]> git.proxmox.com Git - rustc.git/commitdiff
Imported Upstream version 1.9.0+dfsg1
authorSylvestre Ledru <sylvestre@debian.org>
Sun, 29 May 2016 15:53:46 +0000 (17:53 +0200)
committerSylvestre Ledru <sylvestre@debian.org>
Sun, 29 May 2016 15:53:46 +0000 (17:53 +0200)
1979 files changed:
CONTRIBUTING.md
README.md
RELEASES.md
configure
man/rustc.1
mk/cfg/i586-pc-windows-msvc.mk [new file with mode: 0644]
mk/cfg/i586-unknown-linux-gnu.mk
mk/cfg/i686-pc-windows-gnu.mk
mk/cfg/x86_64-pc-windows-gnu.mk
mk/crates.mk
mk/dist.mk
mk/main.mk
mk/rt.mk
mk/target.mk
mk/tests.mk
src/bootstrap/Cargo.lock
src/bootstrap/Cargo.toml
src/bootstrap/bootstrap.py
src/bootstrap/build/channel.rs
src/bootstrap/build/check.rs [new file with mode: 0644]
src/bootstrap/build/compile.rs
src/bootstrap/build/dist.rs [new file with mode: 0644]
src/bootstrap/build/doc.rs
src/bootstrap/build/mod.rs
src/bootstrap/build/native.rs
src/bootstrap/build/sanity.rs
src/bootstrap/build/step.rs
src/bootstrap/build/util.rs
src/bootstrap/main.rs
src/bootstrap/mk/Makefile.in
src/bootstrap/rustc.rs
src/bootstrap/rustdoc.rs [new file with mode: 0644]
src/build_helper/lib.rs
src/compiler-rt/cmake/config-ix.cmake
src/compiler-rt/lib/builtins/CMakeLists.txt
src/compiletest/common.rs
src/compiletest/compiletest.rs
src/compiletest/errors.rs
src/compiletest/header.rs
src/compiletest/procsrv.rs
src/compiletest/runtest.rs
src/doc/book/SUMMARY.md
src/doc/book/associated-types.md
src/doc/book/casting-between-types.md
src/doc/book/choosing-your-guarantees.md
src/doc/book/closures.md
src/doc/book/compiler-plugins.md
src/doc/book/concurrency.md
src/doc/book/const-and-static.md
src/doc/book/crates-and-modules.md
src/doc/book/drop.md
src/doc/book/error-handling.md
src/doc/book/functions.md
src/doc/book/getting-started.md
src/doc/book/guessing-game.md
src/doc/book/if.md
src/doc/book/inline-assembly.md
src/doc/book/iterators.md
src/doc/book/lifetimes.md
src/doc/book/macros.md
src/doc/book/match.md
src/doc/book/no-stdlib.md
src/doc/book/ownership.md
src/doc/book/patterns.md
src/doc/book/primitive-types.md
src/doc/book/references-and-borrowing.md
src/doc/book/strings.md
src/doc/book/syntax-index.md
src/doc/book/testing.md
src/doc/book/traits.md
src/doc/book/unsafe.md
src/doc/book/variable-bindings.md
src/doc/book/vectors.md
src/doc/guide-plugins.md
src/doc/nomicon/coercions.md
src/doc/nomicon/vec.md
src/doc/reference.md
src/doc/style/README.md
src/doc/style/features/functions-and-methods/input.md
src/doc/style/features/traits/generics.md
src/doc/style/style/naming/README.md
src/error_index_generator/main.rs [deleted file]
src/etc/CONFIGS.md
src/etc/platform-intrinsics/generator.py
src/etc/platform-intrinsics/x86/avx.json
src/etc/platform-intrinsics/x86/fma.json [new file with mode: 0644]
src/etc/tidy.py
src/etc/unicode.py
src/jemalloc/COPYING
src/jemalloc/ChangeLog
src/jemalloc/INSTALL
src/jemalloc/Makefile.in
src/jemalloc/VERSION [deleted file]
src/jemalloc/bin/jemalloc-config.in [new file with mode: 0644]
src/jemalloc/bin/jeprof.in [new file with mode: 0644]
src/jemalloc/bin/pprof [deleted file]
src/jemalloc/build-aux/config.guess [new file with mode: 0755]
src/jemalloc/build-aux/config.sub [new file with mode: 0755]
src/jemalloc/build-aux/install-sh [new file with mode: 0755]
src/jemalloc/config.guess [deleted file]
src/jemalloc/config.sub [deleted file]
src/jemalloc/configure
src/jemalloc/configure.ac
src/jemalloc/doc/jemalloc.xml.in
src/jemalloc/include/jemalloc/internal/arena.h
src/jemalloc/include/jemalloc/internal/assert.h [new file with mode: 0644]
src/jemalloc/include/jemalloc/internal/atomic.h
src/jemalloc/include/jemalloc/internal/base.h
src/jemalloc/include/jemalloc/internal/bitmap.h
src/jemalloc/include/jemalloc/internal/chunk.h
src/jemalloc/include/jemalloc/internal/chunk_dss.h
src/jemalloc/include/jemalloc/internal/chunk_mmap.h
src/jemalloc/include/jemalloc/internal/ckh.h
src/jemalloc/include/jemalloc/internal/ctl.h
src/jemalloc/include/jemalloc/internal/extent.h
src/jemalloc/include/jemalloc/internal/hash.h
src/jemalloc/include/jemalloc/internal/huge.h
src/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
src/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
src/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
src/jemalloc/include/jemalloc/internal/mutex.h
src/jemalloc/include/jemalloc/internal/nstime.h [new file with mode: 0644]
src/jemalloc/include/jemalloc/internal/pages.h [new file with mode: 0644]
src/jemalloc/include/jemalloc/internal/private_symbols.txt
src/jemalloc/include/jemalloc/internal/prng.h
src/jemalloc/include/jemalloc/internal/prof.h
src/jemalloc/include/jemalloc/internal/ql.h
src/jemalloc/include/jemalloc/internal/qr.h
src/jemalloc/include/jemalloc/internal/quarantine.h
src/jemalloc/include/jemalloc/internal/rb.h
src/jemalloc/include/jemalloc/internal/rtree.h
src/jemalloc/include/jemalloc/internal/size_classes.sh
src/jemalloc/include/jemalloc/internal/smoothstep.h [new file with mode: 0644]
src/jemalloc/include/jemalloc/internal/smoothstep.sh [new file with mode: 0755]
src/jemalloc/include/jemalloc/internal/stats.h
src/jemalloc/include/jemalloc/internal/tcache.h
src/jemalloc/include/jemalloc/internal/ticker.h [new file with mode: 0644]
src/jemalloc/include/jemalloc/internal/tsd.h
src/jemalloc/include/jemalloc/internal/util.h
src/jemalloc/include/jemalloc/jemalloc.sh
src/jemalloc/include/jemalloc/jemalloc_defs.h.in
src/jemalloc/include/jemalloc/jemalloc_macros.h.in
src/jemalloc/include/jemalloc/jemalloc_protos.h.in
src/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
src/jemalloc/include/msvc_compat/C99/inttypes.h [deleted file]
src/jemalloc/include/msvc_compat/strings.h
src/jemalloc/include/msvc_compat/windows_extra.h [new file with mode: 0644]
src/jemalloc/install-sh [deleted file]
src/jemalloc/jemalloc.pc.in
src/jemalloc/msvc/ReadMe.txt [new file with mode: 0644]
src/jemalloc/msvc/jemalloc_vc2015.sln [new file with mode: 0644]
src/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj [new file with mode: 0644]
src/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters [new file with mode: 0644]
src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp [new file with mode: 0644]
src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h [new file with mode: 0644]
src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj [new file with mode: 0644]
src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters [new file with mode: 0644]
src/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp [new file with mode: 0644]
src/jemalloc/src/arena.c
src/jemalloc/src/base.c
src/jemalloc/src/bitmap.c
src/jemalloc/src/chunk.c
src/jemalloc/src/chunk_dss.c
src/jemalloc/src/chunk_mmap.c
src/jemalloc/src/ckh.c
src/jemalloc/src/ctl.c
src/jemalloc/src/extent.c
src/jemalloc/src/huge.c
src/jemalloc/src/jemalloc.c
src/jemalloc/src/mutex.c
src/jemalloc/src/nstime.c [new file with mode: 0644]
src/jemalloc/src/pages.c [new file with mode: 0644]
src/jemalloc/src/prng.c [new file with mode: 0644]
src/jemalloc/src/prof.c
src/jemalloc/src/quarantine.c
src/jemalloc/src/rtree.c
src/jemalloc/src/stats.c
src/jemalloc/src/tcache.c
src/jemalloc/src/ticker.c [new file with mode: 0644]
src/jemalloc/src/tsd.c
src/jemalloc/src/util.c
src/jemalloc/src/zone.c
src/jemalloc/test/include/test/jemalloc_test.h.in
src/jemalloc/test/include/test/jemalloc_test_defs.h.in
src/jemalloc/test/include/test/math.h
src/jemalloc/test/include/test/mq.h
src/jemalloc/test/include/test/test.h
src/jemalloc/test/include/test/thd.h
src/jemalloc/test/include/test/timer.h
src/jemalloc/test/integration/chunk.c
src/jemalloc/test/integration/mallocx.c
src/jemalloc/test/integration/overflow.c [new file with mode: 0644]
src/jemalloc/test/integration/rallocx.c
src/jemalloc/test/integration/xallocx.c
src/jemalloc/test/src/mq.c [new file with mode: 0644]
src/jemalloc/test/src/test.c
src/jemalloc/test/src/timer.c
src/jemalloc/test/stress/microbench.c
src/jemalloc/test/unit/SFMT.c
src/jemalloc/test/unit/atomic.c
src/jemalloc/test/unit/bitmap.c
src/jemalloc/test/unit/ckh.c
src/jemalloc/test/unit/decay.c [new file with mode: 0644]
src/jemalloc/test/unit/hash.c
src/jemalloc/test/unit/junk.c
src/jemalloc/test/unit/junk_alloc.c [new file with mode: 0644]
src/jemalloc/test/unit/junk_free.c [new file with mode: 0644]
src/jemalloc/test/unit/lg_chunk.c [new file with mode: 0644]
src/jemalloc/test/unit/mallctl.c
src/jemalloc/test/unit/mq.c
src/jemalloc/test/unit/nstime.c [new file with mode: 0644]
src/jemalloc/test/unit/prng.c [new file with mode: 0644]
src/jemalloc/test/unit/prof_active.c
src/jemalloc/test/unit/prof_gdump.c
src/jemalloc/test/unit/prof_reset.c
src/jemalloc/test/unit/prof_thread_name.c
src/jemalloc/test/unit/rb.c
src/jemalloc/test/unit/rtree.c
src/jemalloc/test/unit/run_quantize.c [new file with mode: 0644]
src/jemalloc/test/unit/size_classes.c [new file with mode: 0644]
src/jemalloc/test/unit/smoothstep.c [new file with mode: 0644]
src/jemalloc/test/unit/stats.c
src/jemalloc/test/unit/ticker.c [new file with mode: 0644]
src/jemalloc/test/unit/tsd.c
src/jemalloc/test/unit/util.c
src/jemalloc/test/unit/zero.c
src/liballoc/arc.rs
src/liballoc/boxed.rs
src/liballoc/lib.rs
src/liballoc/rc.rs
src/liballoc_jemalloc/build.rs
src/liballoc_jemalloc/lib.rs
src/libarena/lib.rs
src/libcollections/binary_heap.rs
src/libcollections/borrow.rs
src/libcollections/btree/map.rs
src/libcollections/btree/set.rs
src/libcollections/enum_set.rs
src/libcollections/fmt.rs
src/libcollections/lib.rs
src/libcollections/linked_list.rs
src/libcollections/macros.rs
src/libcollections/range.rs
src/libcollections/slice.rs
src/libcollections/str.rs
src/libcollections/string.rs
src/libcollections/vec.rs
src/libcollections/vec_deque.rs
src/libcollectionstest/btree/map.rs
src/libcollectionstest/lib.rs
src/libcollectionstest/slice.rs
src/libcollectionstest/str.rs
src/libcollectionstest/vec_deque.rs
src/libcore/Cargo.toml
src/libcore/any.rs
src/libcore/array.rs
src/libcore/build.rs [new file with mode: 0644]
src/libcore/cell.rs
src/libcore/char.rs
src/libcore/clone.rs
src/libcore/cmp.rs
src/libcore/convert.rs
src/libcore/fmt/builders.rs
src/libcore/fmt/mod.rs
src/libcore/fmt/num.rs
src/libcore/fmt/rt/v1.rs
src/libcore/hash/mod.rs
src/libcore/hash/sip.rs
src/libcore/intrinsics.rs
src/libcore/iter.rs
src/libcore/lib.rs
src/libcore/macros.rs
src/libcore/mem.rs
src/libcore/num/dec2flt/mod.rs
src/libcore/num/f32.rs
src/libcore/num/f64.rs
src/libcore/num/flt2dec/mod.rs
src/libcore/num/i16.rs
src/libcore/num/i32.rs
src/libcore/num/i64.rs
src/libcore/num/i8.rs
src/libcore/num/int_macros.rs
src/libcore/num/isize.rs
src/libcore/num/mod.rs
src/libcore/num/u16.rs
src/libcore/num/u32.rs
src/libcore/num/u64.rs
src/libcore/num/u8.rs
src/libcore/num/uint_macros.rs
src/libcore/num/usize.rs
src/libcore/num/wrapping.rs
src/libcore/ops.rs
src/libcore/option.rs
src/libcore/ptr.rs
src/libcore/raw.rs
src/libcore/result.rs
src/libcore/slice.rs
src/libcore/str/mod.rs
src/libcore/str/pattern.rs
src/libcore/sync/atomic.rs
src/libcoretest/cell.rs
src/libcoretest/char.rs
src/libcoretest/fmt/builders.rs
src/libcoretest/fmt/num.rs
src/libcoretest/iter.rs
src/libcoretest/lib.rs
src/libcoretest/num/int_macros.rs
src/libcoretest/num/mod.rs
src/libcoretest/option.rs
src/libflate/lib.rs
src/libgetopts/lib.rs
src/libgraphviz/lib.rs
src/liblibc/.travis.yml
src/liblibc/Cargo.toml
src/liblibc/appveyor.yml
src/liblibc/ci/landing-page-footer.html
src/liblibc/ci/landing-page-head.html
src/liblibc/ci/run-travis.sh
src/liblibc/ci/style.rs [new file with mode: 0644]
src/liblibc/libc-test/Cargo.lock
src/liblibc/libc-test/build.rs
src/liblibc/libc-test/generate-files/Cargo.lock
src/liblibc/src/lib.rs
src/liblibc/src/macros.rs
src/liblibc/src/unix/bsd/apple/b32.rs
src/liblibc/src/unix/bsd/apple/b64.rs
src/liblibc/src/unix/bsd/apple/mod.rs
src/liblibc/src/unix/bsd/freebsdlike/dragonfly.rs [deleted file]
src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs [new file with mode: 0644]
src/liblibc/src/unix/bsd/freebsdlike/freebsd.rs [deleted file]
src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs [new file with mode: 0644]
src/liblibc/src/unix/bsd/freebsdlike/freebsd/x86.rs [new file with mode: 0644]
src/liblibc/src/unix/bsd/freebsdlike/freebsd/x86_64.rs [new file with mode: 0644]
src/liblibc/src/unix/bsd/freebsdlike/mod.rs
src/liblibc/src/unix/bsd/freebsdlike/x86.rs [deleted file]
src/liblibc/src/unix/bsd/freebsdlike/x86_64.rs [deleted file]
src/liblibc/src/unix/bsd/mod.rs
src/liblibc/src/unix/bsd/openbsdlike/bitrig.rs
src/liblibc/src/unix/bsd/openbsdlike/mod.rs
src/liblibc/src/unix/bsd/openbsdlike/netbsd.rs
src/liblibc/src/unix/bsd/openbsdlike/openbsd.rs
src/liblibc/src/unix/mod.rs
src/liblibc/src/unix/notbsd/android/b32.rs
src/liblibc/src/unix/notbsd/android/b64.rs
src/liblibc/src/unix/notbsd/android/mod.rs
src/liblibc/src/unix/notbsd/linux/mips.rs
src/liblibc/src/unix/notbsd/linux/mod.rs
src/liblibc/src/unix/notbsd/linux/musl/b32/arm.rs
src/liblibc/src/unix/notbsd/linux/musl/b32/asmjs.rs
src/liblibc/src/unix/notbsd/linux/musl/b32/mips.rs
src/liblibc/src/unix/notbsd/linux/musl/b32/mod.rs
src/liblibc/src/unix/notbsd/linux/musl/b32/x86.rs
src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs [new file with mode: 0644]
src/liblibc/src/unix/notbsd/linux/musl/b64/mod.rs
src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs [new file with mode: 0644]
src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs [new file with mode: 0644]
src/liblibc/src/unix/notbsd/linux/musl/mod.rs
src/liblibc/src/unix/notbsd/linux/other/b32/arm.rs
src/liblibc/src/unix/notbsd/linux/other/b32/mod.rs
src/liblibc/src/unix/notbsd/linux/other/b32/powerpc.rs
src/liblibc/src/unix/notbsd/linux/other/b32/x86.rs
src/liblibc/src/unix/notbsd/linux/other/b64/aarch64.rs
src/liblibc/src/unix/notbsd/linux/other/b64/mod.rs
src/liblibc/src/unix/notbsd/linux/other/b64/powerpc64.rs
src/liblibc/src/unix/notbsd/linux/other/b64/x86_64.rs
src/liblibc/src/unix/notbsd/linux/other/mod.rs
src/liblibc/src/unix/notbsd/mod.rs
src/liblibc/src/unix/solaris/mod.rs
src/liblibc/src/windows.rs
src/librand/lib.rs
src/librand/rand_impls.rs
src/librbml/lib.rs
src/librbml/opaque.rs
src/librustc/Cargo.toml
src/librustc/cfg/construct.rs [new file with mode: 0644]
src/librustc/cfg/graphviz.rs [new file with mode: 0644]
src/librustc/cfg/mod.rs [new file with mode: 0644]
src/librustc/dep_graph/README.md
src/librustc/dep_graph/dep_node.rs [new file with mode: 0644]
src/librustc/dep_graph/dep_tracking_map.rs
src/librustc/dep_graph/edges.rs
src/librustc/dep_graph/graph.rs [new file with mode: 0644]
src/librustc/dep_graph/mod.rs
src/librustc/dep_graph/query.rs
src/librustc/dep_graph/raii.rs
src/librustc/dep_graph/thread.rs
src/librustc/dep_graph/visit.rs [new file with mode: 0644]
src/librustc/diagnostics.rs
src/librustc/front/check_attr.rs [deleted file]
src/librustc/front/map/blocks.rs [deleted file]
src/librustc/front/map/collector.rs [deleted file]
src/librustc/front/map/definitions.rs [deleted file]
src/librustc/front/map/mod.rs [deleted file]
src/librustc/hir/check_attr.rs [new file with mode: 0644]
src/librustc/hir/def.rs [new file with mode: 0644]
src/librustc/hir/def_id.rs [new file with mode: 0644]
src/librustc/hir/fold.rs [new file with mode: 0644]
src/librustc/hir/intravisit.rs [new file with mode: 0644]
src/librustc/hir/lowering.rs [new file with mode: 0644]
src/librustc/hir/map/blocks.rs [new file with mode: 0644]
src/librustc/hir/map/collector.rs [new file with mode: 0644]
src/librustc/hir/map/definitions.rs [new file with mode: 0644]
src/librustc/hir/map/mod.rs [new file with mode: 0644]
src/librustc/hir/mod.rs [new file with mode: 0644]
src/librustc/hir/pat_util.rs [new file with mode: 0644]
src/librustc/hir/print.rs [new file with mode: 0644]
src/librustc/hir/svh.rs [new file with mode: 0644]
src/librustc/infer/README.md [new file with mode: 0644]
src/librustc/infer/bivariate.rs [new file with mode: 0644]
src/librustc/infer/combine.rs [new file with mode: 0644]
src/librustc/infer/equate.rs [new file with mode: 0644]
src/librustc/infer/error_reporting.rs [new file with mode: 0644]
src/librustc/infer/freshen.rs [new file with mode: 0644]
src/librustc/infer/glb.rs [new file with mode: 0644]
src/librustc/infer/higher_ranked/README.md [new file with mode: 0644]
src/librustc/infer/higher_ranked/mod.rs [new file with mode: 0644]
src/librustc/infer/lattice.rs [new file with mode: 0644]
src/librustc/infer/lub.rs [new file with mode: 0644]
src/librustc/infer/mod.rs [new file with mode: 0644]
src/librustc/infer/region_inference/README.md [new file with mode: 0644]
src/librustc/infer/region_inference/graphviz.rs [new file with mode: 0644]
src/librustc/infer/region_inference/mod.rs [new file with mode: 0644]
src/librustc/infer/resolve.rs [new file with mode: 0644]
src/librustc/infer/sub.rs [new file with mode: 0644]
src/librustc/infer/type_variable.rs [new file with mode: 0644]
src/librustc/infer/unify_key.rs [new file with mode: 0644]
src/librustc/lib.rs
src/librustc/lint/builtin.rs
src/librustc/lint/context.rs
src/librustc/lint/mod.rs
src/librustc/macros.rs
src/librustc/middle/astconv_util.rs
src/librustc/middle/cfg/construct.rs [deleted file]
src/librustc/middle/cfg/graphviz.rs [deleted file]
src/librustc/middle/cfg/mod.rs [deleted file]
src/librustc/middle/check_match.rs [deleted file]
src/librustc/middle/const_eval.rs [deleted file]
src/librustc/middle/const_val.rs [new file with mode: 0644]
src/librustc/middle/cstore.rs
src/librustc/middle/dataflow.rs
src/librustc/middle/dead.rs
src/librustc/middle/def.rs [deleted file]
src/librustc/middle/def_id.rs [deleted file]
src/librustc/middle/effect.rs
src/librustc/middle/entry.rs
src/librustc/middle/expr_use_visitor.rs
src/librustc/middle/free_region.rs
src/librustc/middle/infer/README.md [deleted file]
src/librustc/middle/infer/bivariate.rs [deleted file]
src/librustc/middle/infer/combine.rs [deleted file]
src/librustc/middle/infer/equate.rs [deleted file]
src/librustc/middle/infer/error_reporting.rs [deleted file]
src/librustc/middle/infer/freshen.rs [deleted file]
src/librustc/middle/infer/glb.rs [deleted file]
src/librustc/middle/infer/higher_ranked/README.md [deleted file]
src/librustc/middle/infer/higher_ranked/mod.rs [deleted file]
src/librustc/middle/infer/lattice.rs [deleted file]
src/librustc/middle/infer/lub.rs [deleted file]
src/librustc/middle/infer/mod.rs [deleted file]
src/librustc/middle/infer/region_inference/README.md [deleted file]
src/librustc/middle/infer/region_inference/graphviz.rs [deleted file]
src/librustc/middle/infer/region_inference/mod.rs [deleted file]
src/librustc/middle/infer/resolve.rs [deleted file]
src/librustc/middle/infer/sub.rs [deleted file]
src/librustc/middle/infer/type_variable.rs [deleted file]
src/librustc/middle/infer/unify_key.rs [deleted file]
src/librustc/middle/intrinsicck.rs
src/librustc/middle/lang_items.rs
src/librustc/middle/liveness.rs
src/librustc/middle/mem_categorization.rs
src/librustc/middle/pat_util.rs [deleted file]
src/librustc/middle/privacy.rs
src/librustc/middle/reachable.rs
src/librustc/middle/region.rs
src/librustc/middle/resolve_lifetime.rs
src/librustc/middle/stability.rs
src/librustc/middle/subst.rs [deleted file]
src/librustc/middle/traits/README.md [deleted file]
src/librustc/middle/traits/coherence.rs [deleted file]
src/librustc/middle/traits/error_reporting.rs [deleted file]
src/librustc/middle/traits/fulfill.rs [deleted file]
src/librustc/middle/traits/mod.rs [deleted file]
src/librustc/middle/traits/object_safety.rs [deleted file]
src/librustc/middle/traits/project.rs [deleted file]
src/librustc/middle/traits/select.rs [deleted file]
src/librustc/middle/traits/structural_impls.rs [deleted file]
src/librustc/middle/traits/util.rs [deleted file]
src/librustc/middle/ty/_match.rs [deleted file]
src/librustc/middle/ty/adjustment.rs [deleted file]
src/librustc/middle/ty/cast.rs [deleted file]
src/librustc/middle/ty/contents.rs [deleted file]
src/librustc/middle/ty/context.rs [deleted file]
src/librustc/middle/ty/error.rs [deleted file]
src/librustc/middle/ty/fast_reject.rs [deleted file]
src/librustc/middle/ty/flags.rs [deleted file]
src/librustc/middle/ty/fold.rs [deleted file]
src/librustc/middle/ty/ivar.rs [deleted file]
src/librustc/middle/ty/maps.rs [deleted file]
src/librustc/middle/ty/mod.rs [deleted file]
src/librustc/middle/ty/outlives.rs [deleted file]
src/librustc/middle/ty/relate.rs [deleted file]
src/librustc/middle/ty/structural_impls.rs [deleted file]
src/librustc/middle/ty/sty.rs [deleted file]
src/librustc/middle/ty/trait_def.rs [deleted file]
src/librustc/middle/ty/util.rs [deleted file]
src/librustc/middle/ty/walk.rs [deleted file]
src/librustc/middle/ty/wf.rs [deleted file]
src/librustc/middle/weak_lang_items.rs
src/librustc/mir/mir_map.rs
src/librustc/mir/repr.rs
src/librustc/mir/tcx.rs
src/librustc/mir/transform.rs
src/librustc/mir/visit.rs
src/librustc/session/config.rs
src/librustc/session/filesearch.rs
src/librustc/session/mod.rs
src/librustc/traits/README.md [new file with mode: 0644]
src/librustc/traits/coherence.rs [new file with mode: 0644]
src/librustc/traits/error_reporting.rs [new file with mode: 0644]
src/librustc/traits/fulfill.rs [new file with mode: 0644]
src/librustc/traits/mod.rs [new file with mode: 0644]
src/librustc/traits/object_safety.rs [new file with mode: 0644]
src/librustc/traits/project.rs [new file with mode: 0644]
src/librustc/traits/select.rs [new file with mode: 0644]
src/librustc/traits/specialize/mod.rs [new file with mode: 0644]
src/librustc/traits/specialize/specialization_graph.rs [new file with mode: 0644]
src/librustc/traits/structural_impls.rs [new file with mode: 0644]
src/librustc/traits/util.rs [new file with mode: 0644]
src/librustc/ty/_match.rs [new file with mode: 0644]
src/librustc/ty/adjustment.rs [new file with mode: 0644]
src/librustc/ty/cast.rs [new file with mode: 0644]
src/librustc/ty/contents.rs [new file with mode: 0644]
src/librustc/ty/context.rs [new file with mode: 0644]
src/librustc/ty/error.rs [new file with mode: 0644]
src/librustc/ty/fast_reject.rs [new file with mode: 0644]
src/librustc/ty/flags.rs [new file with mode: 0644]
src/librustc/ty/fold.rs [new file with mode: 0644]
src/librustc/ty/item_path.rs [new file with mode: 0644]
src/librustc/ty/ivar.rs [new file with mode: 0644]
src/librustc/ty/layout.rs [new file with mode: 0644]
src/librustc/ty/maps.rs [new file with mode: 0644]
src/librustc/ty/mod.rs [new file with mode: 0644]
src/librustc/ty/outlives.rs [new file with mode: 0644]
src/librustc/ty/relate.rs [new file with mode: 0644]
src/librustc/ty/structural_impls.rs [new file with mode: 0644]
src/librustc/ty/sty.rs [new file with mode: 0644]
src/librustc/ty/subst.rs [new file with mode: 0644]
src/librustc/ty/trait_def.rs [new file with mode: 0644]
src/librustc/ty/util.rs [new file with mode: 0644]
src/librustc/ty/walk.rs [new file with mode: 0644]
src/librustc/ty/wf.rs [new file with mode: 0644]
src/librustc/util/common.rs
src/librustc/util/nodemap.rs
src/librustc/util/ppaux.rs
src/librustc_back/Cargo.toml
src/librustc_back/abi.rs [deleted file]
src/librustc_back/dynamic_lib.rs [new file with mode: 0644]
src/librustc_back/lib.rs
src/librustc_back/svh.rs [deleted file]
src/librustc_back/target/aarch64_apple_ios.rs
src/librustc_back/target/aarch64_linux_android.rs
src/librustc_back/target/aarch64_unknown_linux_gnu.rs
src/librustc_back/target/arm_linux_androideabi.rs
src/librustc_back/target/arm_unknown_linux_gnueabi.rs
src/librustc_back/target/arm_unknown_linux_gnueabihf.rs
src/librustc_back/target/armv7_apple_ios.rs
src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs
src/librustc_back/target/armv7s_apple_ios.rs
src/librustc_back/target/asmjs_unknown_emscripten.rs
src/librustc_back/target/i386_apple_ios.rs
src/librustc_back/target/i586_pc_windows_msvc.rs [new file with mode: 0644]
src/librustc_back/target/i586_unknown_linux_gnu.rs
src/librustc_back/target/i686_apple_darwin.rs
src/librustc_back/target/i686_linux_android.rs
src/librustc_back/target/i686_pc_windows_gnu.rs
src/librustc_back/target/i686_pc_windows_msvc.rs
src/librustc_back/target/i686_unknown_dragonfly.rs
src/librustc_back/target/i686_unknown_freebsd.rs
src/librustc_back/target/i686_unknown_linux_gnu.rs
src/librustc_back/target/i686_unknown_linux_musl.rs
src/librustc_back/target/le32_unknown_nacl.rs
src/librustc_back/target/mips_unknown_linux_gnu.rs
src/librustc_back/target/mips_unknown_linux_musl.rs
src/librustc_back/target/mipsel_unknown_linux_gnu.rs
src/librustc_back/target/mipsel_unknown_linux_musl.rs
src/librustc_back/target/mod.rs
src/librustc_back/target/powerpc64_unknown_linux_gnu.rs
src/librustc_back/target/powerpc64le_unknown_linux_gnu.rs
src/librustc_back/target/powerpc_unknown_linux_gnu.rs
src/librustc_back/target/x86_64_apple_darwin.rs
src/librustc_back/target/x86_64_apple_ios.rs
src/librustc_back/target/x86_64_pc_windows_gnu.rs
src/librustc_back/target/x86_64_pc_windows_msvc.rs
src/librustc_back/target/x86_64_rumprun_netbsd.rs
src/librustc_back/target/x86_64_sun_solaris.rs
src/librustc_back/target/x86_64_unknown_bitrig.rs
src/librustc_back/target/x86_64_unknown_dragonfly.rs
src/librustc_back/target/x86_64_unknown_freebsd.rs
src/librustc_back/target/x86_64_unknown_linux_gnu.rs
src/librustc_back/target/x86_64_unknown_linux_musl.rs
src/librustc_back/target/x86_64_unknown_netbsd.rs
src/librustc_back/target/x86_64_unknown_openbsd.rs
src/librustc_back/tempdir.rs
src/librustc_borrowck/Cargo.toml
src/librustc_borrowck/bitslice.rs [new file with mode: 0644]
src/librustc_borrowck/borrowck/check_loans.rs
src/librustc_borrowck/borrowck/fragments.rs
src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs
src/librustc_borrowck/borrowck/gather_loans/lifetime.rs
src/librustc_borrowck/borrowck/gather_loans/mod.rs
src/librustc_borrowck/borrowck/gather_loans/move_error.rs
src/librustc_borrowck/borrowck/gather_loans/restrictions.rs
src/librustc_borrowck/borrowck/mir/abs_domain.rs [new file with mode: 0644]
src/librustc_borrowck/borrowck/mir/dataflow.rs [new file with mode: 0644]
src/librustc_borrowck/borrowck/mir/gather_moves.rs [new file with mode: 0644]
src/librustc_borrowck/borrowck/mir/graphviz.rs [new file with mode: 0644]
src/librustc_borrowck/borrowck/mir/mod.rs [new file with mode: 0644]
src/librustc_borrowck/borrowck/mod.rs
src/librustc_borrowck/borrowck/move_data.rs
src/librustc_borrowck/graphviz.rs
src/librustc_borrowck/lib.rs
src/librustc_const_eval/Cargo.toml [new file with mode: 0644]
src/librustc_const_eval/check_match.rs [new file with mode: 0644]
src/librustc_const_eval/diagnostics.rs [new file with mode: 0644]
src/librustc_const_eval/eval.rs [new file with mode: 0644]
src/librustc_const_eval/lib.rs [new file with mode: 0644]
src/librustc_const_math/Cargo.toml [new file with mode: 0644]
src/librustc_const_math/err.rs [new file with mode: 0644]
src/librustc_const_math/int.rs [new file with mode: 0644]
src/librustc_const_math/is.rs [new file with mode: 0644]
src/librustc_const_math/lib.rs [new file with mode: 0644]
src/librustc_const_math/us.rs [new file with mode: 0644]
src/librustc_data_structures/bitvec.rs
src/librustc_data_structures/fnv.rs
src/librustc_data_structures/graph/mod.rs
src/librustc_data_structures/graph/tests.rs
src/librustc_data_structures/ivar.rs
src/librustc_data_structures/lib.rs
src/librustc_data_structures/obligation_forest/README.md
src/librustc_data_structures/obligation_forest/mod.rs
src/librustc_data_structures/obligation_forest/node_index.rs
src/librustc_data_structures/obligation_forest/test.rs
src/librustc_data_structures/obligation_forest/tree_index.rs
src/librustc_data_structures/snapshot_vec.rs
src/librustc_data_structures/transitive_relation.rs
src/librustc_data_structures/tuple_slice.rs
src/librustc_data_structures/unify/mod.rs
src/librustc_data_structures/unify/tests.rs
src/librustc_data_structures/veccell/mod.rs
src/librustc_driver/Cargo.toml
src/librustc_driver/driver.rs
src/librustc_driver/lib.rs
src/librustc_driver/pretty.rs
src/librustc_driver/test.rs
src/librustc_front/Cargo.toml [deleted file]
src/librustc_front/fold.rs [deleted file]
src/librustc_front/hir.rs [deleted file]
src/librustc_front/intravisit.rs [deleted file]
src/librustc_front/lib.rs [deleted file]
src/librustc_front/lowering.rs [deleted file]
src/librustc_front/print/pprust.rs [deleted file]
src/librustc_front/util.rs [deleted file]
src/librustc_incremental/Cargo.toml [new file with mode: 0644]
src/librustc_incremental/assert_dep_graph.rs [new file with mode: 0644]
src/librustc_incremental/calculate_svh.rs [new file with mode: 0644]
src/librustc_incremental/lib.rs [new file with mode: 0644]
src/librustc_incremental/persist/README.md [new file with mode: 0644]
src/librustc_incremental/persist/data.rs [new file with mode: 0644]
src/librustc_incremental/persist/directory.rs [new file with mode: 0644]
src/librustc_incremental/persist/dirty_clean.rs [new file with mode: 0644]
src/librustc_incremental/persist/load.rs [new file with mode: 0644]
src/librustc_incremental/persist/mod.rs [new file with mode: 0644]
src/librustc_incremental/persist/save.rs [new file with mode: 0644]
src/librustc_incremental/persist/serialize.rs [new file with mode: 0644]
src/librustc_incremental/persist/util.rs [new file with mode: 0644]
src/librustc_lint/Cargo.toml
src/librustc_lint/bad_style.rs
src/librustc_lint/builtin.rs
src/librustc_lint/lib.rs
src/librustc_lint/types.rs
src/librustc_lint/unused.rs
src/librustc_llvm/build.rs
src/librustc_llvm/lib.rs
src/librustc_metadata/Cargo.toml
src/librustc_metadata/astencode.rs
src/librustc_metadata/common.rs
src/librustc_metadata/creader.rs
src/librustc_metadata/csearch.rs
src/librustc_metadata/cstore.rs
src/librustc_metadata/decoder.rs
src/librustc_metadata/diagnostics.rs
src/librustc_metadata/encoder.rs
src/librustc_metadata/index.rs
src/librustc_metadata/lib.rs
src/librustc_metadata/loader.rs
src/librustc_metadata/macro_import.rs
src/librustc_metadata/tls_context.rs
src/librustc_metadata/tydecode.rs
src/librustc_metadata/tyencode.rs
src/librustc_mir/Cargo.toml
src/librustc_mir/build/block.rs
src/librustc_mir/build/cfg.rs
src/librustc_mir/build/expr/as_constant.rs
src/librustc_mir/build/expr/as_lvalue.rs
src/librustc_mir/build/expr/as_operand.rs
src/librustc_mir/build/expr/as_rvalue.rs
src/librustc_mir/build/expr/as_temp.rs
src/librustc_mir/build/expr/into.rs
src/librustc_mir/build/matches/mod.rs
src/librustc_mir/build/matches/simplify.rs
src/librustc_mir/build/matches/test.rs
src/librustc_mir/build/matches/util.rs
src/librustc_mir/build/misc.rs
src/librustc_mir/build/mod.rs
src/librustc_mir/build/scope.rs
src/librustc_mir/build/stmt.rs [deleted file]
src/librustc_mir/graphviz.rs
src/librustc_mir/hair/cx/block.rs
src/librustc_mir/hair/cx/expr.rs
src/librustc_mir/hair/cx/mod.rs
src/librustc_mir/hair/cx/pattern.rs
src/librustc_mir/hair/cx/to_ref.rs
src/librustc_mir/hair/mod.rs
src/librustc_mir/lib.rs
src/librustc_mir/mir_map.rs
src/librustc_mir/pretty.rs
src/librustc_mir/transform/break_critical_edges.rs [new file with mode: 0644]
src/librustc_mir/transform/clear_dead_blocks.rs [deleted file]
src/librustc_mir/transform/erase_regions.rs
src/librustc_mir/transform/mod.rs
src/librustc_mir/transform/no_landing_pads.rs
src/librustc_mir/transform/remove_dead_blocks.rs [new file with mode: 0644]
src/librustc_mir/transform/simplify_cfg.rs
src/librustc_mir/transform/type_check.rs
src/librustc_mir/traversal.rs [new file with mode: 0644]
src/librustc_passes/Cargo.toml
src/librustc_passes/const_fn.rs
src/librustc_passes/consts.rs
src/librustc_passes/lib.rs
src/librustc_passes/loops.rs
src/librustc_passes/rvalues.rs
src/librustc_passes/static_recursion.rs
src/librustc_platform_intrinsics/Cargo.toml
src/librustc_platform_intrinsics/aarch64.rs
src/librustc_platform_intrinsics/arm.rs
src/librustc_platform_intrinsics/lib.rs
src/librustc_platform_intrinsics/x86.rs
src/librustc_plugin/Cargo.toml
src/librustc_plugin/build.rs
src/librustc_plugin/lib.rs
src/librustc_plugin/load.rs
src/librustc_plugin/registry.rs
src/librustc_privacy/Cargo.toml
src/librustc_privacy/lib.rs
src/librustc_resolve/Cargo.toml
src/librustc_resolve/build_reduced_graph.rs
src/librustc_resolve/check_unused.rs
src/librustc_resolve/diagnostics.rs
src/librustc_resolve/lib.rs
src/librustc_resolve/resolve_imports.rs
src/librustc_save_analysis/Cargo.toml [new file with mode: 0644]
src/librustc_save_analysis/csv_dumper.rs [new file with mode: 0644]
src/librustc_save_analysis/data.rs [new file with mode: 0644]
src/librustc_save_analysis/dump.rs [new file with mode: 0644]
src/librustc_save_analysis/dump_visitor.rs [new file with mode: 0644]
src/librustc_save_analysis/lib.rs [new file with mode: 0644]
src/librustc_save_analysis/span_utils.rs [new file with mode: 0644]
src/librustc_trans/Cargo.toml
src/librustc_trans/_match.rs [new file with mode: 0644]
src/librustc_trans/abi.rs [new file with mode: 0644]
src/librustc_trans/adt.rs [new file with mode: 0644]
src/librustc_trans/asm.rs [new file with mode: 0644]
src/librustc_trans/attributes.rs [new file with mode: 0644]
src/librustc_trans/back/archive.rs
src/librustc_trans/back/link.rs
src/librustc_trans/back/linker.rs
src/librustc_trans/back/symbol_names.rs [new file with mode: 0644]
src/librustc_trans/back/write.rs
src/librustc_trans/base.rs [new file with mode: 0644]
src/librustc_trans/basic_block.rs [new file with mode: 0644]
src/librustc_trans/build.rs [new file with mode: 0644]
src/librustc_trans/builder.rs [new file with mode: 0644]
src/librustc_trans/cabi_aarch64.rs [new file with mode: 0644]
src/librustc_trans/cabi_arm.rs [new file with mode: 0644]
src/librustc_trans/cabi_asmjs.rs [new file with mode: 0644]
src/librustc_trans/cabi_mips.rs [new file with mode: 0644]
src/librustc_trans/cabi_powerpc.rs [new file with mode: 0644]
src/librustc_trans/cabi_powerpc64.rs [new file with mode: 0644]
src/librustc_trans/cabi_x86.rs [new file with mode: 0644]
src/librustc_trans/cabi_x86_64.rs [new file with mode: 0644]
src/librustc_trans/cabi_x86_win64.rs [new file with mode: 0644]
src/librustc_trans/callee.rs [new file with mode: 0644]
src/librustc_trans/cleanup.rs [new file with mode: 0644]
src/librustc_trans/closure.rs [new file with mode: 0644]
src/librustc_trans/collector.rs [new file with mode: 0644]
src/librustc_trans/common.rs [new file with mode: 0644]
src/librustc_trans/consts.rs [new file with mode: 0644]
src/librustc_trans/context.rs [new file with mode: 0644]
src/librustc_trans/controlflow.rs [new file with mode: 0644]
src/librustc_trans/datum.rs [new file with mode: 0644]
src/librustc_trans/debuginfo/create_scope_map.rs [new file with mode: 0644]
src/librustc_trans/debuginfo/doc.rs [new file with mode: 0644]
src/librustc_trans/debuginfo/gdb.rs [new file with mode: 0644]
src/librustc_trans/debuginfo/metadata.rs [new file with mode: 0644]
src/librustc_trans/debuginfo/mod.rs [new file with mode: 0644]
src/librustc_trans/debuginfo/namespace.rs [new file with mode: 0644]
src/librustc_trans/debuginfo/source_loc.rs [new file with mode: 0644]
src/librustc_trans/debuginfo/type_names.rs [new file with mode: 0644]
src/librustc_trans/debuginfo/utils.rs [new file with mode: 0644]
src/librustc_trans/declare.rs [new file with mode: 0644]
src/librustc_trans/diagnostics.rs
src/librustc_trans/disr.rs [new file with mode: 0644]
src/librustc_trans/expr.rs [new file with mode: 0644]
src/librustc_trans/glue.rs [new file with mode: 0644]
src/librustc_trans/inline.rs [new file with mode: 0644]
src/librustc_trans/intrinsic.rs [new file with mode: 0644]
src/librustc_trans/lib.rs
src/librustc_trans/machine.rs [new file with mode: 0644]
src/librustc_trans/macros.rs [new file with mode: 0644]
src/librustc_trans/meth.rs [new file with mode: 0644]
src/librustc_trans/mir/analyze.rs [new file with mode: 0644]
src/librustc_trans/mir/block.rs [new file with mode: 0644]
src/librustc_trans/mir/constant.rs [new file with mode: 0644]
src/librustc_trans/mir/drop.rs [new file with mode: 0644]
src/librustc_trans/mir/lvalue.rs [new file with mode: 0644]
src/librustc_trans/mir/mod.rs [new file with mode: 0644]
src/librustc_trans/mir/operand.rs [new file with mode: 0644]
src/librustc_trans/mir/rvalue.rs [new file with mode: 0644]
src/librustc_trans/mir/statement.rs [new file with mode: 0644]
src/librustc_trans/monomorphize.rs [new file with mode: 0644]
src/librustc_trans/save/dump_csv.rs [deleted file]
src/librustc_trans/save/mod.rs [deleted file]
src/librustc_trans/save/recorder.rs [deleted file]
src/librustc_trans/save/span_utils.rs [deleted file]
src/librustc_trans/symbol_names_test.rs [new file with mode: 0644]
src/librustc_trans/trans/_match.rs [deleted file]
src/librustc_trans/trans/adt.rs [deleted file]
src/librustc_trans/trans/asm.rs [deleted file]
src/librustc_trans/trans/assert_dep_graph.rs [deleted file]
src/librustc_trans/trans/attributes.rs [deleted file]
src/librustc_trans/trans/base.rs [deleted file]
src/librustc_trans/trans/basic_block.rs [deleted file]
src/librustc_trans/trans/build.rs [deleted file]
src/librustc_trans/trans/builder.rs [deleted file]
src/librustc_trans/trans/cabi.rs [deleted file]
src/librustc_trans/trans/cabi_aarch64.rs [deleted file]
src/librustc_trans/trans/cabi_arm.rs [deleted file]
src/librustc_trans/trans/cabi_asmjs.rs [deleted file]
src/librustc_trans/trans/cabi_mips.rs [deleted file]
src/librustc_trans/trans/cabi_powerpc.rs [deleted file]
src/librustc_trans/trans/cabi_powerpc64.rs [deleted file]
src/librustc_trans/trans/cabi_x86.rs [deleted file]
src/librustc_trans/trans/cabi_x86_64.rs [deleted file]
src/librustc_trans/trans/cabi_x86_win64.rs [deleted file]
src/librustc_trans/trans/callee.rs [deleted file]
src/librustc_trans/trans/cleanup.rs [deleted file]
src/librustc_trans/trans/closure.rs [deleted file]
src/librustc_trans/trans/collector.rs [deleted file]
src/librustc_trans/trans/common.rs [deleted file]
src/librustc_trans/trans/consts.rs [deleted file]
src/librustc_trans/trans/context.rs [deleted file]
src/librustc_trans/trans/controlflow.rs [deleted file]
src/librustc_trans/trans/datum.rs [deleted file]
src/librustc_trans/trans/debuginfo/create_scope_map.rs [deleted file]
src/librustc_trans/trans/debuginfo/doc.rs [deleted file]
src/librustc_trans/trans/debuginfo/gdb.rs [deleted file]
src/librustc_trans/trans/debuginfo/metadata.rs [deleted file]
src/librustc_trans/trans/debuginfo/mod.rs [deleted file]
src/librustc_trans/trans/debuginfo/namespace.rs [deleted file]
src/librustc_trans/trans/debuginfo/source_loc.rs [deleted file]
src/librustc_trans/trans/debuginfo/type_names.rs [deleted file]
src/librustc_trans/trans/debuginfo/utils.rs [deleted file]
src/librustc_trans/trans/declare.rs [deleted file]
src/librustc_trans/trans/disr.rs [deleted file]
src/librustc_trans/trans/expr.rs [deleted file]
src/librustc_trans/trans/foreign.rs [deleted file]
src/librustc_trans/trans/glue.rs [deleted file]
src/librustc_trans/trans/inline.rs [deleted file]
src/librustc_trans/trans/intrinsic.rs [deleted file]
src/librustc_trans/trans/llrepr.rs [deleted file]
src/librustc_trans/trans/machine.rs [deleted file]
src/librustc_trans/trans/macros.rs [deleted file]
src/librustc_trans/trans/meth.rs [deleted file]
src/librustc_trans/trans/mir/analyze.rs [deleted file]
src/librustc_trans/trans/mir/block.rs [deleted file]
src/librustc_trans/trans/mir/constant.rs [deleted file]
src/librustc_trans/trans/mir/did.rs [deleted file]
src/librustc_trans/trans/mir/lvalue.rs [deleted file]
src/librustc_trans/trans/mir/mod.rs [deleted file]
src/librustc_trans/trans/mir/operand.rs [deleted file]
src/librustc_trans/trans/mir/rvalue.rs [deleted file]
src/librustc_trans/trans/mir/statement.rs [deleted file]
src/librustc_trans/trans/mod.rs [deleted file]
src/librustc_trans/trans/monomorphize.rs [deleted file]
src/librustc_trans/trans/tvec.rs [deleted file]
src/librustc_trans/trans/type_.rs [deleted file]
src/librustc_trans/trans/type_of.rs [deleted file]
src/librustc_trans/trans/value.rs [deleted file]
src/librustc_trans/tvec.rs [new file with mode: 0644]
src/librustc_trans/type_.rs [new file with mode: 0644]
src/librustc_trans/type_of.rs [new file with mode: 0644]
src/librustc_trans/value.rs [new file with mode: 0644]
src/librustc_typeck/Cargo.toml
src/librustc_typeck/astconv.rs
src/librustc_typeck/check/_match.rs
src/librustc_typeck/check/assoc.rs
src/librustc_typeck/check/callee.rs
src/librustc_typeck/check/cast.rs
src/librustc_typeck/check/closure.rs
src/librustc_typeck/check/coercion.rs
src/librustc_typeck/check/compare_method.rs
src/librustc_typeck/check/demand.rs
src/librustc_typeck/check/dropck.rs
src/librustc_typeck/check/intrinsic.rs
src/librustc_typeck/check/method/confirm.rs
src/librustc_typeck/check/method/mod.rs
src/librustc_typeck/check/method/probe.rs
src/librustc_typeck/check/method/suggest.rs
src/librustc_typeck/check/mod.rs
src/librustc_typeck/check/op.rs
src/librustc_typeck/check/regionck.rs
src/librustc_typeck/check/upvar.rs
src/librustc_typeck/check/wfcheck.rs
src/librustc_typeck/check/writeback.rs
src/librustc_typeck/coherence/mod.rs
src/librustc_typeck/coherence/orphan.rs
src/librustc_typeck/coherence/overlap.rs
src/librustc_typeck/coherence/unsafety.rs
src/librustc_typeck/collect.rs
src/librustc_typeck/constrained_type_params.rs
src/librustc_typeck/diagnostics.rs
src/librustc_typeck/lib.rs
src/librustc_typeck/rscope.rs
src/librustc_typeck/variance/constraints.rs
src/librustc_typeck/variance/mod.rs
src/librustc_typeck/variance/solve.rs
src/librustc_typeck/variance/terms.rs
src/librustc_typeck/variance/xform.rs
src/librustc_unicode/char.rs
src/librustc_unicode/lib.rs
src/librustc_unicode/tables.rs
src/librustc_unicode/u_str.rs
src/librustdoc/Cargo.toml
src/librustdoc/clean/inline.rs
src/librustdoc/clean/mod.rs
src/librustdoc/clean/simplify.rs
src/librustdoc/core.rs
src/librustdoc/doctree.rs
src/librustdoc/externalfiles.rs
src/librustdoc/fold.rs
src/librustdoc/html/escape.rs
src/librustdoc/html/format.rs
src/librustdoc/html/highlight.rs
src/librustdoc/html/item_type.rs
src/librustdoc/html/layout.rs
src/librustdoc/html/markdown.rs
src/librustdoc/html/render.rs
src/librustdoc/html/static/main.js
src/librustdoc/html/static/rustdoc.css
src/librustdoc/html/toc.rs
src/librustdoc/lib.rs
src/librustdoc/markdown.rs
src/librustdoc/passes.rs
src/librustdoc/plugins.rs
src/librustdoc/test.rs
src/librustdoc/visit_ast.rs
src/libserialize/collection_impls.rs
src/libserialize/hex.rs
src/libserialize/json.rs
src/libserialize/lib.rs
src/libserialize/serialize.rs
src/libstd/ascii.rs
src/libstd/build.rs
src/libstd/collections/hash/map.rs
src/libstd/collections/hash/mod.rs
src/libstd/collections/hash/set.rs
src/libstd/collections/hash/state.rs [deleted file]
src/libstd/collections/hash/table.rs
src/libstd/collections/mod.rs
src/libstd/dynamic_lib.rs [deleted file]
src/libstd/env.rs
src/libstd/error.rs
src/libstd/ffi/c_str.rs
src/libstd/ffi/os_str.rs
src/libstd/fs.rs
src/libstd/io/buffered.rs
src/libstd/io/cursor.rs
src/libstd/io/error.rs
src/libstd/io/impls.rs
src/libstd/io/mod.rs
src/libstd/io/stdio.rs
src/libstd/io/util.rs
src/libstd/lib.rs
src/libstd/macros.rs
src/libstd/net/addr.rs
src/libstd/net/ip.rs
src/libstd/net/mod.rs
src/libstd/net/tcp.rs
src/libstd/net/udp.rs
src/libstd/num/f32.rs
src/libstd/num/f64.rs
src/libstd/num/mod.rs
src/libstd/os/dragonfly/fs.rs
src/libstd/os/emscripten/fs.rs [new file with mode: 0644]
src/libstd/os/emscripten/mod.rs [new file with mode: 0644]
src/libstd/os/emscripten/raw.rs [new file with mode: 0644]
src/libstd/os/mod.rs
src/libstd/panic.rs
src/libstd/panicking.rs
src/libstd/path.rs
src/libstd/prelude/mod.rs
src/libstd/primitive_docs.rs
src/libstd/process.rs
src/libstd/rt.rs
src/libstd/sync/barrier.rs
src/libstd/sync/condvar.rs
src/libstd/sync/mod.rs
src/libstd/sync/mpsc/mod.rs
src/libstd/sync/mutex.rs
src/libstd/sync/once.rs
src/libstd/sync/rwlock.rs
src/libstd/sync/semaphore.rs [deleted file]
src/libstd/sys/common/backtrace.rs
src/libstd/sys/common/gnu/libbacktrace.rs
src/libstd/sys/common/io.rs
src/libstd/sys/common/libunwind.rs
src/libstd/sys/common/net.rs
src/libstd/sys/common/remutex.rs
src/libstd/sys/common/unwind/gcc.rs
src/libstd/sys/common/wtf8.rs
src/libstd/sys/unix/backtrace/tracing/backtrace_fn.rs
src/libstd/sys/unix/backtrace/tracing/gcc_s.rs
src/libstd/sys/unix/ext/fs.rs
src/libstd/sys/unix/ext/mod.rs
src/libstd/sys/unix/ext/net.rs [new file with mode: 0644]
src/libstd/sys/unix/ext/process.rs
src/libstd/sys/unix/ext/thread.rs
src/libstd/sys/unix/fd.rs
src/libstd/sys/unix/fs.rs
src/libstd/sys/unix/mod.rs
src/libstd/sys/unix/net.rs
src/libstd/sys/unix/os.rs
src/libstd/sys/unix/pipe.rs
src/libstd/sys/unix/process.rs
src/libstd/sys/unix/rand.rs
src/libstd/sys/unix/stdio.rs
src/libstd/sys/unix/thread.rs
src/libstd/sys/unix/time.rs
src/libstd/sys/unix/weak.rs
src/libstd/sys/windows/backtrace.rs
src/libstd/sys/windows/c.rs
src/libstd/sys/windows/dynamic_lib.rs [new file with mode: 0644]
src/libstd/sys/windows/ext/thread.rs
src/libstd/sys/windows/fs.rs
src/libstd/sys/windows/handle.rs
src/libstd/sys/windows/mod.rs
src/libstd/sys/windows/net.rs
src/libstd/sys/windows/os.rs
src/libstd/sys/windows/pipe.rs
src/libstd/sys/windows/printing/gnu.rs
src/libstd/sys/windows/printing/msvc.rs
src/libstd/sys/windows/process.rs
src/libstd/sys/windows/stdio.rs
src/libstd/sys/windows/thread.rs
src/libstd/thread/mod.rs
src/libstd/time/duration.rs
src/libstd/time/mod.rs
src/libsyntax/Cargo.toml
src/libsyntax/ast.rs
src/libsyntax/ast_util.rs [deleted file]
src/libsyntax/codemap.rs
src/libsyntax/config.rs
src/libsyntax/diagnostics/metadata.rs
src/libsyntax/errors/emitter.rs
src/libsyntax/errors/json.rs
src/libsyntax/ext/base.rs
src/libsyntax/ext/build.rs
src/libsyntax/ext/expand.rs
src/libsyntax/ext/quote.rs
src/libsyntax/ext/tt/macro_parser.rs
src/libsyntax/ext/tt/macro_rules.rs
src/libsyntax/feature_gate.rs
src/libsyntax/fold.rs
src/libsyntax/lib.rs
src/libsyntax/parse/attr.rs
src/libsyntax/parse/lexer/comments.rs
src/libsyntax/parse/lexer/mod.rs
src/libsyntax/parse/mod.rs
src/libsyntax/parse/parser.rs
src/libsyntax/parse/token.rs
src/libsyntax/print/pp.rs
src/libsyntax/print/pprust.rs
src/libsyntax/test.rs
src/libsyntax/util/parser.rs
src/libsyntax/util/parser_testing.rs
src/libsyntax/visit.rs
src/libsyntax_ext/Cargo.toml
src/libsyntax_ext/asm.rs
src/libsyntax_ext/deriving/cmp/ord.rs
src/libsyntax_ext/deriving/cmp/partial_eq.rs
src/libsyntax_ext/deriving/cmp/partial_ord.rs
src/libsyntax_ext/deriving/decodable.rs
src/libsyntax_ext/deriving/encodable.rs
src/libsyntax_ext/deriving/generic/mod.rs
src/libsyntax_ext/deriving/hash.rs
src/libsyntax_ext/deriving/mod.rs
src/libsyntax_ext/lib.rs
src/libterm/lib.rs
src/libterm/terminfo/mod.rs
src/libterm/terminfo/parm.rs
src/libterm/terminfo/parser/compiled.rs
src/libtest/Cargo.toml
src/libtest/lib.rs
src/rt/rust_test_helpers.c
src/rustbook/Cargo.toml [deleted file]
src/rustbook/book.rs [deleted file]
src/rustbook/build.rs [deleted file]
src/rustbook/error.rs [deleted file]
src/rustbook/help.rs [deleted file]
src/rustbook/main.rs [deleted file]
src/rustbook/serve.rs [deleted file]
src/rustbook/static/rustbook.css [deleted file]
src/rustbook/static/rustbook.js [deleted file]
src/rustbook/subcommand.rs [deleted file]
src/rustbook/term.rs [deleted file]
src/rustbook/test.rs [deleted file]
src/rustc/Cargo.lock
src/rustc/Cargo.toml
src/rustc/rustbook.rs [deleted file]
src/rustc/test_shim/Cargo.lock [new file with mode: 0644]
src/rustc/test_shim/Cargo.toml [new file with mode: 0644]
src/rustc/test_shim/lib.rs [new file with mode: 0644]
src/rustllvm/RustWrapper.cpp
src/rustllvm/llvm-auto-clean-trigger
src/snapshots.txt
src/test/auxiliary/ambig_impl_2_lib.rs
src/test/auxiliary/cgu_test.rs [new file with mode: 0644]
src/test/auxiliary/cgu_test_a.rs [new file with mode: 0644]
src/test/auxiliary/cgu_test_b.rs [new file with mode: 0644]
src/test/auxiliary/derive-no-std.rs [new file with mode: 0644]
src/test/auxiliary/dummy_mir_pass.rs
src/test/auxiliary/empty.rs [new file with mode: 0644]
src/test/auxiliary/foreign_lib.rs
src/test/auxiliary/go_trait.rs
src/test/auxiliary/inline-default-methods.rs
src/test/auxiliary/issue-13698.rs
src/test/auxiliary/issue-15318.rs
src/test/auxiliary/issue-17476.rs
src/test/auxiliary/issue-17718-aux.rs [new file with mode: 0644]
src/test/auxiliary/issue-17718.rs [deleted file]
src/test/auxiliary/issue-19190-3.rs
src/test/auxiliary/issue-20646.rs
src/test/auxiliary/issue-20727.rs
src/test/auxiliary/issue-21092.rs
src/test/auxiliary/issue-21801.rs
src/test/auxiliary/issue-22025.rs
src/test/auxiliary/issue-27362.rs
src/test/auxiliary/issue-29584.rs
src/test/auxiliary/issue13507.rs
src/test/auxiliary/linkage-visibility.rs
src/test/auxiliary/lint_for_crate.rs
src/test/auxiliary/lint_group_plugin_test.rs
src/test/auxiliary/reexp_stripped.rs [new file with mode: 0644]
src/test/auxiliary/specialization_cross_crate.rs [new file with mode: 0644]
src/test/auxiliary/specialization_cross_crate_defaults.rs [new file with mode: 0755]
src/test/auxiliary/struct_field_privacy.rs
src/test/auxiliary/typeid-intrinsic-aux1.rs [new file with mode: 0644]
src/test/auxiliary/typeid-intrinsic-aux2.rs [new file with mode: 0644]
src/test/auxiliary/typeid-intrinsic.rs [deleted file]
src/test/auxiliary/typeid-intrinsic2.rs [deleted file]
src/test/auxiliary/variant-struct.rs [new file with mode: 0644]
src/test/auxiliary/xcrate_associated_type_defaults.rs
src/test/codegen-units/cross-crate-closures.rs
src/test/codegen-units/cross-crate-generic-functions.rs
src/test/codegen-units/cross-crate-trait-method.rs
src/test/codegen-units/generic-drop-glue.rs
src/test/codegen-units/generic-impl.rs
src/test/codegen-units/impl-in-non-instantiated-generic.rs
src/test/codegen-units/instantiation-through-vtable.rs
src/test/codegen-units/non-generic-drop-glue.rs
src/test/codegen-units/non-generic-functions.rs
src/test/codegen-units/overloaded-operators.rs
src/test/codegen-units/trait-implementations.rs
src/test/codegen-units/trait-method-as-argument.rs
src/test/codegen-units/transitive-drop-glue.rs
src/test/codegen-units/tuple-drop-glue.rs
src/test/codegen-units/unsizing.rs
src/test/codegen-units/unused-traits-and-generics.rs
src/test/codegen/adjustments.rs
src/test/codegen/coercions.rs
src/test/codegen/consts.rs
src/test/codegen/drop.rs
src/test/codegen/extern-functions.rs
src/test/codegen/float_math.rs [new file with mode: 0644]
src/test/codegen/naked-functions.rs [new file with mode: 0644]
src/test/codegen/refs.rs
src/test/codegen/stores.rs
src/test/compile-fail-fulldeps/derive-no-std-not-supported.rs [new file with mode: 0644]
src/test/compile-fail-fulldeps/dropck_tarena_cycle_checked.rs [new file with mode: 0644]
src/test/compile-fail-fulldeps/dropck_tarena_unsound_drop.rs [new file with mode: 0644]
src/test/compile-fail-fulldeps/macro-crate-doesnt-resolve.rs
src/test/compile-fail/allocator-dylib-is-system.rs
src/test/compile-fail/allocator-rust-dylib-is-jemalloc.rs
src/test/compile-fail/asm-out-assign-imm.rs
src/test/compile-fail/associated-item-duplicate-names-2.rs
src/test/compile-fail/associated-item-duplicate-names-3.rs
src/test/compile-fail/associated-item-duplicate-names.rs
src/test/compile-fail/associated-types-ICE-when-projecting-out-of-err.rs
src/test/compile-fail/associated-types-bound-failure.rs
src/test/compile-fail/associated-types-coherence-failure.rs
src/test/compile-fail/associated-types-for-unimpl-trait.rs
src/test/compile-fail/associated-types-invalid-trait-ref-issue-18865.rs
src/test/compile-fail/associated-types-no-suitable-bound.rs
src/test/compile-fail/associated-types-no-suitable-supertrait-2.rs
src/test/compile-fail/associated-types-no-suitable-supertrait.rs
src/test/compile-fail/associated-types-path-2.rs
src/test/compile-fail/associated-types-unsized.rs
src/test/compile-fail/bad-const-type.rs
src/test/compile-fail/bad-intrinsic-monomorphization.rs [new file with mode: 0644]
src/test/compile-fail/bad-method-typaram-kind.rs
src/test/compile-fail/bad-module.rs
src/test/compile-fail/bad-sized.rs
src/test/compile-fail/bad-type-env-capture.rs
src/test/compile-fail/binop-bitxor-str.rs
src/test/compile-fail/blind-item-block-item-shadow.rs
src/test/compile-fail/blind-item-block-middle.rs
src/test/compile-fail/blind-item-item-shadow.rs
src/test/compile-fail/borrowck/borrowck-box-insensitivity.rs
src/test/compile-fail/borrowck/borrowck-let-suggestion.rs
src/test/compile-fail/borrowck/borrowck-report-with-custom-diagnostic.rs
src/test/compile-fail/borrowck/borrowck-vec-pattern-nesting.rs
src/test/compile-fail/builtin-superkinds-double-superkind.rs
src/test/compile-fail/builtin-superkinds-in-metadata.rs
src/test/compile-fail/builtin-superkinds-simple.rs
src/test/compile-fail/builtin-superkinds-typaram-not-send.rs
src/test/compile-fail/cast-as-bool.rs
src/test/compile-fail/cast-rfc0401.rs
src/test/compile-fail/cfg-empty-codemap.rs
src/test/compile-fail/closure-bounds-cant-promote-superkind-in-struct.rs
src/test/compile-fail/closure-bounds-subtype.rs
src/test/compile-fail/coherence-blanket-conflicts-with-blanket-implemented.rs
src/test/compile-fail/coherence-blanket-conflicts-with-blanket-unimplemented.rs
src/test/compile-fail/coherence-blanket-conflicts-with-specific-multidispatch.rs
src/test/compile-fail/coherence-blanket-conflicts-with-specific-trait.rs
src/test/compile-fail/coherence-blanket-conflicts-with-specific.rs
src/test/compile-fail/coherence-conflicting-negative-trait-impl.rs
src/test/compile-fail/coherence-cow-1.rs [deleted file]
src/test/compile-fail/coherence-cow-2.rs [deleted file]
src/test/compile-fail/coherence-cow-no-cover.rs [deleted file]
src/test/compile-fail/coherence-cow.rs [new file with mode: 0644]
src/test/compile-fail/coherence-cross-crate-conflict.rs
src/test/compile-fail/coherence-default-trait-impl.rs
src/test/compile-fail/coherence-no-direct-lifetime-dispatch.rs [new file with mode: 0644]
src/test/compile-fail/coherence-overlap-all-t-and-tuple.rs
src/test/compile-fail/coherence-overlap-issue-23516.rs
src/test/compile-fail/coherence-overlap-messages.rs
src/test/compile-fail/coherence-projection-conflict-orphan.rs
src/test/compile-fail/coherence-projection-conflict-ty-param.rs
src/test/compile-fail/coherence-projection-conflict.rs
src/test/compile-fail/coherence-tuple-conflict.rs
src/test/compile-fail/coherence_copy_like_err_fundamental_struct_tuple.rs
src/test/compile-fail/coherence_copy_like_err_struct.rs
src/test/compile-fail/coherence_copy_like_err_tuple.rs
src/test/compile-fail/comm-not-freeze-receiver.rs [deleted file]
src/test/compile-fail/comm-not-freeze.rs [deleted file]
src/test/compile-fail/consider-removing-last-semi.rs [new file with mode: 0644]
src/test/compile-fail/const-err-early.rs [new file with mode: 0644]
src/test/compile-fail/const-err.rs
src/test/compile-fail/const-eval-overflow-2.rs
src/test/compile-fail/const-eval-overflow-3.rs
src/test/compile-fail/const-eval-overflow-4b.rs
src/test/compile-fail/const-eval-overflow.rs
src/test/compile-fail/const-eval-span.rs
src/test/compile-fail/const-integer-bool-ops.rs
src/test/compile-fail/const-len-underflow-separate-spans.rs
src/test/compile-fail/const-len-underflow-subspans.rs
src/test/compile-fail/const-pattern-irrefutable.rs
src/test/compile-fail/const-pattern-not-const-evaluable.rs
src/test/compile-fail/const-tup-index-span.rs
src/test/compile-fail/cross-fn-cache-hole.rs
src/test/compile-fail/dep-graph-assoc-type-trans.rs
src/test/compile-fail/dep-graph-caller-callee.rs
src/test/compile-fail/dep-graph-struct-signature.rs
src/test/compile-fail/dep-graph-trait-impl-two-traits-same-method.rs
src/test/compile-fail/dep-graph-trait-impl-two-traits.rs
src/test/compile-fail/dep-graph-trait-impl.rs
src/test/compile-fail/dep-graph-unrelated.rs
src/test/compile-fail/deprecation-in-staged-api.rs
src/test/compile-fail/deprecation-lint.rs
src/test/compile-fail/deprecation-sanity.rs
src/test/compile-fail/derive-no-std-not-supported.rs [deleted file]
src/test/compile-fail/deriving-no-inner-impl-error-message.rs
src/test/compile-fail/deriving-span-Default-struct.rs
src/test/compile-fail/destructure-trait-ref.rs
src/test/compile-fail/discrim-overflow-2.rs
src/test/compile-fail/discrim-overflow.rs
src/test/compile-fail/double-import.rs
src/test/compile-fail/dropck_no_diverge_on_nonregular_3.rs
src/test/compile-fail/dropck_tarena_cycle_checked.rs [deleted file]
src/test/compile-fail/dropck_tarena_unsound_drop.rs [deleted file]
src/test/compile-fail/dst-bad-assign-2.rs
src/test/compile-fail/dst-bad-assign.rs
src/test/compile-fail/dst-bad-coerce1.rs
src/test/compile-fail/dst-bad-deep.rs
src/test/compile-fail/dst-object-from-unsized-type.rs
src/test/compile-fail/dst-sized-trait-param.rs
src/test/compile-fail/dupe-symbols-8.rs [deleted file]
src/test/compile-fail/duplicate_entry_error.rs
src/test/compile-fail/enum-and-module-in-same-scope.rs
src/test/compile-fail/enum-discrim-autosizing.rs [new file with mode: 0644]
src/test/compile-fail/enum-discrim-too-small.rs
src/test/compile-fail/enum-discrim-too-small2.rs [new file with mode: 0644]
src/test/compile-fail/error-should-say-copy-not-pod.rs
src/test/compile-fail/eval-enum.rs
src/test/compile-fail/export-fully-qualified.rs
src/test/compile-fail/export-tag-variant.rs
src/test/compile-fail/export2.rs
src/test/compile-fail/extern-crate-visibility.rs
src/test/compile-fail/extern-wrong-value-type.rs
src/test/compile-fail/fat-ptr-cast.rs
src/test/compile-fail/feature-gate-negate-unsigned.rs
src/test/compile-fail/feature-gate-negate-unsigned0.rs [new file with mode: 0644]
src/test/compile-fail/feature-gate-try-operator.rs [new file with mode: 0644]
src/test/compile-fail/fn-item-type.rs
src/test/compile-fail/fn-trait-formatting.rs
src/test/compile-fail/for-loop-bogosity.rs
src/test/compile-fail/fully-qualified-type-name1.rs
src/test/compile-fail/fully-qualified-type-name4.rs
src/test/compile-fail/gated-naked_functions.rs [new file with mode: 0644]
src/test/compile-fail/generic-type-params-name-repr.rs
src/test/compile-fail/glob-cycles.rs [new file with mode: 0644]
src/test/compile-fail/hrtb-conflate-regions.rs
src/test/compile-fail/hrtb-higher-ranker-supertraits-transitive.rs
src/test/compile-fail/hrtb-higher-ranker-supertraits.rs
src/test/compile-fail/hrtb-just-for-static.rs
src/test/compile-fail/hrtb-perfect-forwarding.rs
src/test/compile-fail/ifmt-unimpl.rs
src/test/compile-fail/impl-bounds-checking.rs
src/test/compile-fail/impl-duplicate-methods.rs
src/test/compile-fail/impossible_range.rs [new file with mode: 0644]
src/test/compile-fail/indexing-requires-a-uint.rs
src/test/compile-fail/infinite-instantiation.rs
src/test/compile-fail/inherent-overlap.rs [new file with mode: 0644]
src/test/compile-fail/inner-static-type-parameter.rs
src/test/compile-fail/integral-indexing.rs
src/test/compile-fail/intrinsic-return-address.rs
src/test/compile-fail/invalid-intrinsic.rs [new file with mode: 0644]
src/test/compile-fail/issue-10291.rs
src/test/compile-fail/issue-10636-2.rs [new file with mode: 0644]
src/test/compile-fail/issue-11680.rs
src/test/compile-fail/issue-11714.rs
src/test/compile-fail/issue-12796.rs
src/test/compile-fail/issue-13058.rs
src/test/compile-fail/issue-13407.rs
src/test/compile-fail/issue-13428.rs
src/test/compile-fail/issue-13466.rs
src/test/compile-fail/issue-13482-2.rs
src/test/compile-fail/issue-13641.rs
src/test/compile-fail/issue-14084.rs
src/test/compile-fail/issue-14366.rs
src/test/compile-fail/issue-14853.rs
src/test/compile-fail/issue-15260.rs
src/test/compile-fail/issue-15524.rs
src/test/compile-fail/issue-15756.rs
src/test/compile-fail/issue-15783.rs
src/test/compile-fail/issue-16338.rs
src/test/compile-fail/issue-16401.rs
src/test/compile-fail/issue-16538.rs
src/test/compile-fail/issue-16747.rs
src/test/compile-fail/issue-17263.rs
src/test/compile-fail/issue-17441.rs
src/test/compile-fail/issue-17651.rs
src/test/compile-fail/issue-17718-static-sync.rs
src/test/compile-fail/issue-17728.rs
src/test/compile-fail/issue-17959.rs
src/test/compile-fail/issue-18107.rs
src/test/compile-fail/issue-18611.rs
src/test/compile-fail/issue-18919.rs
src/test/compile-fail/issue-1920-1.rs
src/test/compile-fail/issue-1920-2.rs
src/test/compile-fail/issue-1920-3.rs
src/test/compile-fail/issue-19498.rs
src/test/compile-fail/issue-19707.rs
src/test/compile-fail/issue-20005.rs
src/test/compile-fail/issue-20162.rs
src/test/compile-fail/issue-20427.rs [deleted file]
src/test/compile-fail/issue-20605.rs
src/test/compile-fail/issue-20616-8.rs
src/test/compile-fail/issue-20616-9.rs
src/test/compile-fail/issue-21160.rs
src/test/compile-fail/issue-21174.rs
src/test/compile-fail/issue-21221-1.rs
src/test/compile-fail/issue-21221-2.rs
src/test/compile-fail/issue-21221-3.rs
src/test/compile-fail/issue-21221-4.rs
src/test/compile-fail/issue-21332.rs
src/test/compile-fail/issue-2149.rs
src/test/compile-fail/issue-21546.rs
src/test/compile-fail/issue-21600.rs
src/test/compile-fail/issue-21659-show-relevant-trait-impls-1.rs
src/test/compile-fail/issue-21659-show-relevant-trait-impls-2.rs
src/test/compile-fail/issue-21763.rs
src/test/compile-fail/issue-21950.rs
src/test/compile-fail/issue-22034.rs
src/test/compile-fail/issue-22560.rs
src/test/compile-fail/issue-22638.rs
src/test/compile-fail/issue-22645.rs
src/test/compile-fail/issue-22684.rs [new file with mode: 0644]
src/test/compile-fail/issue-23024.rs
src/test/compile-fail/issue-24036.rs
src/test/compile-fail/issue-24081.rs
src/test/compile-fail/issue-24446.rs
src/test/compile-fail/issue-24883.rs [new file with mode: 0644]
src/test/compile-fail/issue-25076.rs
src/test/compile-fail/issue-25385.rs
src/test/compile-fail/issue-25386.rs
src/test/compile-fail/issue-25396.rs
src/test/compile-fail/issue-25793.rs
src/test/compile-fail/issue-2611-4.rs
src/test/compile-fail/issue-26638.rs
src/test/compile-fail/issue-26930.rs [new file with mode: 0644]
src/test/compile-fail/issue-27340.rs [new file with mode: 0644]
src/test/compile-fail/issue-28098.rs
src/test/compile-fail/issue-28472.rs
src/test/compile-fail/issue-28568.rs
src/test/compile-fail/issue-29124.rs [new file with mode: 0644]
src/test/compile-fail/issue-29147.rs
src/test/compile-fail/issue-29161.rs
src/test/compile-fail/issue-30079.rs [new file with mode: 0644]
src/test/compile-fail/issue-3021-b.rs
src/test/compile-fail/issue-3021-c.rs
src/test/compile-fail/issue-3021-d.rs
src/test/compile-fail/issue-3021.rs
src/test/compile-fail/issue-30302.rs
src/test/compile-fail/issue-30560.rs [new file with mode: 0644]
src/test/compile-fail/issue-30730.rs [new file with mode: 0644]
src/test/compile-fail/issue-3099-a.rs
src/test/compile-fail/issue-3099-b.rs
src/test/compile-fail/issue-3099.rs
src/test/compile-fail/issue-31511.rs [new file with mode: 0644]
src/test/compile-fail/issue-31804.rs [new file with mode: 0644]
src/test/compile-fail/issue-32119.rs [new file with mode: 0644]
src/test/compile-fail/issue-32128.rs [new file with mode: 0644]
src/test/compile-fail/issue-3214.rs
src/test/compile-fail/issue-32201.rs [new file with mode: 0644]
src/test/compile-fail/issue-32222.rs [new file with mode: 0644]
src/test/compile-fail/issue-32323.rs [new file with mode: 0644]
src/test/compile-fail/issue-32326.rs [new file with mode: 0644]
src/test/compile-fail/issue-32377.rs [new file with mode: 0644]
src/test/compile-fail/issue-32797.rs [new file with mode: 0644]
src/test/compile-fail/issue-3521-2.rs
src/test/compile-fail/issue-3521.rs
src/test/compile-fail/issue-3668-2.rs
src/test/compile-fail/issue-3668.rs
src/test/compile-fail/issue-3680.rs
src/test/compile-fail/issue-3907.rs
src/test/compile-fail/issue-4265.rs
src/test/compile-fail/issue-5035-2.rs
src/test/compile-fail/issue-5035.rs
src/test/compile-fail/issue-5883.rs
src/test/compile-fail/issue-5997-enum.rs
src/test/compile-fail/issue-5997-struct.rs
src/test/compile-fail/issue-6642.rs
src/test/compile-fail/issue-6702.rs
src/test/compile-fail/issue-6804.rs
src/test/compile-fail/issue-6936.rs
src/test/compile-fail/issue-7013.rs
src/test/compile-fail/issue-7044.rs
src/test/compile-fail/issue-7092.rs
src/test/compile-fail/issue-7364.rs
src/test/compile-fail/issue-7573.rs
src/test/compile-fail/issue-7575.rs
src/test/compile-fail/issue-7867.rs
src/test/compile-fail/issue-8153.rs
src/test/compile-fail/issue-8460-const.rs
src/test/compile-fail/issue-8640.rs
src/test/compile-fail/issue-8727.rs
src/test/compile-fail/issue-8761.rs
src/test/compile-fail/kindck-copy.rs
src/test/compile-fail/kindck-impl-type-params-2.rs
src/test/compile-fail/kindck-impl-type-params.rs
src/test/compile-fail/kindck-nonsendable-1.rs
src/test/compile-fail/kindck-send-object.rs
src/test/compile-fail/kindck-send-object1.rs
src/test/compile-fail/kindck-send-object2.rs
src/test/compile-fail/kindck-send-owned.rs
src/test/compile-fail/kindck-send-unsafe.rs
src/test/compile-fail/lexical-scopes.rs [new file with mode: 0644]
src/test/compile-fail/lifetime-elision-return-type-requires-explicit-lifetime.rs
src/test/compile-fail/lifetime-inference-give-expl-lifetime-param-3.rs
src/test/compile-fail/lifetime-inference-give-expl-lifetime-param.rs
src/test/compile-fail/linkage2.rs
src/test/compile-fail/lint-group-style.rs
src/test/compile-fail/lint-no-drop-on-repr-extern.rs
src/test/compile-fail/lint-removed-allow.rs [new file with mode: 0644]
src/test/compile-fail/lint-removed.rs
src/test/compile-fail/lint-renamed-allow.rs [new file with mode: 0644]
src/test/compile-fail/lint-type-limits.rs
src/test/compile-fail/lint-type-limits2.rs [new file with mode: 0644]
src/test/compile-fail/lint-type-limits3.rs [new file with mode: 0644]
src/test/compile-fail/lint-type-overflow.rs
src/test/compile-fail/lint-type-overflow2.rs [new file with mode: 0644]
src/test/compile-fail/lint-unconditional-recursion.rs
src/test/compile-fail/liveness-return-last-stmt-semi.rs
src/test/compile-fail/liveness-unused.rs
src/test/compile-fail/lub-if.rs
src/test/compile-fail/lub-match.rs
src/test/compile-fail/macro-backtrace-nested.rs
src/test/compile-fail/macro-backtrace-println.rs
src/test/compile-fail/macro-expanded-mod.rs [new file with mode: 0644]
src/test/compile-fail/macro-inner-attributes.rs
src/test/compile-fail/macro_expanded_mod_helper/foo/bar.rs [new file with mode: 0644]
src/test/compile-fail/macro_expanded_mod_helper/foo/mod.rs [new file with mode: 0644]
src/test/compile-fail/map-types.rs
src/test/compile-fail/meta-expected-error-correct-rev.rs [new file with mode: 0644]
src/test/compile-fail/meta-expected-error-wrong-rev.rs [new file with mode: 0644]
src/test/compile-fail/method-help-unsatisfied-bound.rs
src/test/compile-fail/method-macro-backtrace.rs
src/test/compile-fail/method-suggestion-no-duplication.rs
src/test/compile-fail/minus-string.rs
src/test/compile-fail/moves-based-on-type-cyclic-types-issue-4821.rs
src/test/compile-fail/mut-not-freeze.rs [deleted file]
src/test/compile-fail/mutable-enum-indirect.rs
src/test/compile-fail/no-link.rs
src/test/compile-fail/no-method-suggested-traits.rs
src/test/compile-fail/no-send-res-ports.rs
src/test/compile-fail/no-std-inject.rs
src/test/compile-fail/no_send-enum.rs
src/test/compile-fail/no_send-rc.rs
src/test/compile-fail/no_send-struct.rs
src/test/compile-fail/no_share-enum.rs
src/test/compile-fail/no_share-rc.rs [deleted file]
src/test/compile-fail/no_share-struct.rs
src/test/compile-fail/noexporttypeexe.rs
src/test/compile-fail/non-constant-enum-for-vec-repeat.rs
src/test/compile-fail/non-interger-atomic.rs [new file with mode: 0644]
src/test/compile-fail/not-panic-safe-3.rs
src/test/compile-fail/not-panic-safe-5.rs
src/test/compile-fail/not-panic-safe.rs
src/test/compile-fail/not-sync.rs [new file with mode: 0644]
src/test/compile-fail/object-does-not-impl-trait.rs
src/test/compile-fail/object-lifetime-default-mybox.rs
src/test/compile-fail/object-safety-generics.rs
src/test/compile-fail/on-unimplemented.rs
src/test/compile-fail/phantom-oibit.rs
src/test/compile-fail/placement-expr-unstable.rs
src/test/compile-fail/privacy-in-paths.rs [new file with mode: 0644]
src/test/compile-fail/privacy-ns2.rs
src/test/compile-fail/privacy-sanity.rs
src/test/compile-fail/privacy-ufcs.rs
src/test/compile-fail/privacy1.rs
src/test/compile-fail/privacy2.rs
src/test/compile-fail/privacy4.rs
src/test/compile-fail/privacy5.rs
src/test/compile-fail/private-impl-method.rs
src/test/compile-fail/private-in-public-warn.rs
src/test/compile-fail/range-1.rs
src/test/compile-fail/range-2.rs
src/test/compile-fail/range_inclusive_gate.rs [new file with mode: 0644]
src/test/compile-fail/recursion_limit.rs
src/test/compile-fail/ref-suggestion.rs
src/test/compile-fail/reflect-assoc.rs
src/test/compile-fail/reflect-object-param.rs
src/test/compile-fail/reflect.rs
src/test/compile-fail/region-lifetime-bounds-on-fns-where-clause.rs
src/test/compile-fail/region-multiple-lifetime-bounds-on-fns-where-clause.rs
src/test/compile-fail/regions-early-bound-error-method.rs
src/test/compile-fail/regions-early-bound-error.rs
src/test/compile-fail/regions-glb-free-free.rs
src/test/compile-fail/regions-lifetime-bounds-on-fns.rs
src/test/compile-fail/regions-nested-fns.rs
src/test/compile-fail/regions-static-bound.rs
src/test/compile-fail/reject-specialized-drops-8142.rs
src/test/compile-fail/repeat-to-run-dtor-twice.rs
src/test/compile-fail/repeat_count.rs
src/test/compile-fail/resolve-conflict-extern-crate-vs-extern-crate.rs
src/test/compile-fail/resolve-conflict-import-vs-extern-crate.rs
src/test/compile-fail/resolve-conflict-item-vs-extern-crate.rs
src/test/compile-fail/resolve-conflict-item-vs-import.rs
src/test/compile-fail/resolve-conflict-type-vs-import.rs
src/test/compile-fail/resolve-type-param-in-item-in-trait.rs
src/test/compile-fail/rfc1445/feature-gate.rs [new file with mode: 0644]
src/test/compile-fail/rfc1445/match-forbidden-without-eq.rs [new file with mode: 0644]
src/test/compile-fail/rfc1445/match-requires-both-partialeq-and-eq.rs [new file with mode: 0644]
src/test/compile-fail/shadowed-trait-methods.rs [new file with mode: 0644]
src/test/compile-fail/simd-intrinsic-generic-arithmetic.rs
src/test/compile-fail/simd-intrinsic-generic-cast.rs
src/test/compile-fail/simd-intrinsic-generic-comparison.rs
src/test/compile-fail/simd-intrinsic-generic-elements.rs
src/test/compile-fail/sized-cycle-note.rs
src/test/compile-fail/specialization/README.md [new file with mode: 0644]
src/test/compile-fail/specialization/specialization-default-projection.rs [new file with mode: 0644]
src/test/compile-fail/specialization/specialization-default-types.rs [new file with mode: 0644]
src/test/compile-fail/specialization/specialization-feature-gate-default.rs [new file with mode: 0644]
src/test/compile-fail/specialization/specialization-feature-gate-overlap.rs [new file with mode: 0644]
src/test/compile-fail/specialization/specialization-no-default.rs [new file with mode: 0644]
src/test/compile-fail/specialization/specialization-overlap-negative.rs [new file with mode: 0644]
src/test/compile-fail/specialization/specialization-overlap.rs [new file with mode: 0644]
src/test/compile-fail/specialization/specialization-polarity.rs [new file with mode: 0755]
src/test/compile-fail/str-idx.rs
src/test/compile-fail/str-mut-idx.rs
src/test/compile-fail/struct-field-privacy.rs
src/test/compile-fail/struct-variant-privacy-xc.rs
src/test/compile-fail/struct-variant-privacy.rs
src/test/compile-fail/substs-ppaux.rs [new file with mode: 0644]
src/test/compile-fail/suggest-path-instead-of-mod-dot-item.rs
src/test/compile-fail/suggest-private-fields.rs
src/test/compile-fail/super-at-top-level.rs
src/test/compile-fail/symbol-names/basic.rs [new file with mode: 0644]
src/test/compile-fail/symbol-names/impl1.rs [new file with mode: 0644]
src/test/compile-fail/tag-that-dare-not-speak-its-name.rs
src/test/compile-fail/task-rng-isnt-sendable.rs
src/test/compile-fail/token-error-correct-2.rs [new file with mode: 0644]
src/test/compile-fail/token-error-correct-3.rs [new file with mode: 0644]
src/test/compile-fail/token-error-correct.rs [new file with mode: 0644]
src/test/compile-fail/trait-bounds-impl-comparison-1.rs
src/test/compile-fail/trait-bounds-impl-comparison-2.rs
src/test/compile-fail/trait-bounds-not-on-bare-trait.rs
src/test/compile-fail/trait-bounds-on-structs-and-enums-in-fns.rs
src/test/compile-fail/trait-bounds-on-structs-and-enums-in-impls.rs
src/test/compile-fail/trait-bounds-on-structs-and-enums-locals.rs
src/test/compile-fail/trait-bounds-on-structs-and-enums-static.rs
src/test/compile-fail/trait-bounds-on-structs-and-enums-xc.rs
src/test/compile-fail/trait-bounds-on-structs-and-enums-xc1.rs
src/test/compile-fail/trait-bounds-on-structs-and-enums.rs
src/test/compile-fail/trait-coercion-generic-bad.rs
src/test/compile-fail/trait-duplicate-methods.rs
src/test/compile-fail/trait-not-accessible.rs
src/test/compile-fail/trait-object-reference-without-parens-suggestion.rs
src/test/compile-fail/trait-privacy.rs [new file with mode: 0644]
src/test/compile-fail/trait-suggest-where-clause.rs [new file with mode: 0644]
src/test/compile-fail/traits-inductive-overflow-auto-normal-auto.rs [new file with mode: 0644]
src/test/compile-fail/traits-negative-impls.rs
src/test/compile-fail/traits-repeated-supertrait-ambig.rs
src/test/compile-fail/transmute-from-fn-item-types-error.rs [new file with mode: 0644]
src/test/compile-fail/transmute-from-fn-item-types-lint.rs [new file with mode: 0644]
src/test/compile-fail/transmute-type-parameters.rs
src/test/compile-fail/trivial_casts.rs
src/test/compile-fail/type-macros-fail.rs
src/test/compile-fail/type-params-in-different-spaces-2.rs
src/test/compile-fail/typeck-default-trait-impl-assoc-type.rs
src/test/compile-fail/typeck-default-trait-impl-constituent-types-2.rs
src/test/compile-fail/typeck-default-trait-impl-constituent-types.rs
src/test/compile-fail/typeck-default-trait-impl-negation-send.rs
src/test/compile-fail/typeck-default-trait-impl-negation-sync.rs
src/test/compile-fail/typeck-default-trait-impl-negation.rs
src/test/compile-fail/typeck-default-trait-impl-precedence.rs
src/test/compile-fail/typeck-default-trait-impl-send-param.rs
src/test/compile-fail/typeck-default-trait-impl-supertrait.rs
src/test/compile-fail/typeck-default-trait-impl-trait-where-clause-2.rs
src/test/compile-fail/typeck-default-trait-impl-trait-where-clause.rs
src/test/compile-fail/typeck-unsafe-always-share.rs
src/test/compile-fail/ufcs-qpath-missing-params.rs
src/test/compile-fail/ufcs-qpath-self-mismatch.rs
src/test/compile-fail/unboxed-closure-sugar-default.rs
src/test/compile-fail/unboxed-closure-sugar-equiv.rs
src/test/compile-fail/unboxed-closures-fnmut-as-fn.rs
src/test/compile-fail/unboxed-closures-infer-argument-types-two-region-pointers.rs
src/test/compile-fail/unboxed-closures-unsafe-extern-fn.rs
src/test/compile-fail/unboxed-closures-wrong-abi.rs
src/test/compile-fail/unboxed-closures-wrong-arg-type-extern-fn.rs
src/test/compile-fail/unique-unique-kind.rs [deleted file]
src/test/compile-fail/unique-vec-res.rs [deleted file]
src/test/compile-fail/unnecessary-private.rs [deleted file]
src/test/compile-fail/unresolved-extern-mod-suggestion.rs
src/test/compile-fail/unsized-bare-typaram.rs
src/test/compile-fail/unsized-enum.rs
src/test/compile-fail/unsized-inherent-impl-self-type.rs
src/test/compile-fail/unsized-struct.rs
src/test/compile-fail/unsized-trait-impl-self-type.rs
src/test/compile-fail/unsized-trait-impl-trait-arg.rs
src/test/compile-fail/unsized3.rs
src/test/compile-fail/unsized5.rs
src/test/compile-fail/unsized6.rs
src/test/compile-fail/unsized7.rs
src/test/compile-fail/use-mod-3.rs
src/test/compile-fail/use-mod.rs
src/test/compile-fail/use-super-global-path.rs [new file with mode: 0644]
src/test/compile-fail/useless-pub.rs
src/test/compile-fail/variadic-ffi-3.rs
src/test/compile-fail/variance-unused-type-param.rs
src/test/compile-fail/variant-namespacing.rs
src/test/compile-fail/vtable-res-trait-param.rs
src/test/compile-fail/wf-impl-associated-type-trait.rs
src/test/compile-fail/wf-static-method.rs
src/test/compile-fail/where-clause-constraints-are-local-for-inherent-impl.rs
src/test/compile-fail/where-clause-constraints-are-local-for-trait-impl.rs
src/test/compile-fail/where-clause-method-substituion.rs
src/test/compile-fail/where-clauses-method-unsatisfied.rs
src/test/compile-fail/where-clauses-unsatisfied.rs
src/test/compile-fail/where-for-self-2.rs
src/test/compile-fail/xcrate-private-by-default.rs
src/test/debuginfo/constant-in-match-pattern.rs
src/test/incremental/dirty_clean.rs [new file with mode: 0644]
src/test/incremental/hello_world.rs [new file with mode: 0644]
src/test/incremental/string_constant.rs [new file with mode: 0644]
src/test/parse-fail/default.rs [new file with mode: 0644]
src/test/parse-fail/issue-10636-2.rs [deleted file]
src/test/parse-fail/issue-19096.rs
src/test/parse-fail/issue-20711-2.rs
src/test/parse-fail/issue-20711.rs
src/test/parse-fail/issue-2354-1.rs
src/test/parse-fail/issue-3036.rs
src/test/parse-fail/issue-32501.rs [new file with mode: 0644]
src/test/parse-fail/macro-mismatched-delim-paren-brace.rs
src/test/parse-fail/macros-no-semicolon.rs
src/test/parse-fail/match-refactor-to-expr.rs
src/test/parse-fail/paamayim-nekudotayim.rs
src/test/parse-fail/pub-method-macro.rs
src/test/parse-fail/range-3.rs
src/test/parse-fail/range-4.rs
src/test/parse-fail/range_inclusive.rs [new file with mode: 0644]
src/test/parse-fail/range_inclusive_gate.rs [new file with mode: 0644]
src/test/parse-fail/raw-str-unbalanced.rs
src/test/parse-fail/removed-syntax-mut-vec-expr.rs
src/test/parse-fail/removed-syntax-static-fn.rs
src/test/parse-fail/removed-syntax-uniq-mut-expr.rs
src/test/parse-fail/removed-syntax-with-1.rs
src/test/parse-fail/struct-literal-in-for.rs
src/test/parse-fail/struct-literal-in-if.rs
src/test/parse-fail/struct-literal-in-match-discriminant.rs
src/test/parse-fail/struct-literal-in-while.rs
src/test/parse-fail/struct-variant-no-pub.rs [deleted file]
src/test/parse-fail/unboxed-closure-sugar-used-on-struct-3.rs
src/test/pretty/issue-4264.pp
src/test/pretty/top-level-doc-comments.rs [new file with mode: 0644]
src/test/run-fail/divide-by-zero.rs
src/test/run-fail/meta-revision-bad.rs [new file with mode: 0644]
src/test/run-fail/meta-revision-ok.rs [new file with mode: 0644]
src/test/run-fail/mir_dynamic_drops_1.rs [new file with mode: 0644]
src/test/run-fail/mir_dynamic_drops_2.rs [new file with mode: 0644]
src/test/run-fail/mir_dynamic_drops_3.rs [new file with mode: 0644]
src/test/run-fail/mod-zero.rs
src/test/run-fail/overflowing-add.rs
src/test/run-fail/overflowing-lsh-1.rs
src/test/run-fail/overflowing-lsh-2.rs
src/test/run-fail/overflowing-lsh-3.rs
src/test/run-fail/overflowing-lsh-4.rs
src/test/run-fail/overflowing-mul.rs
src/test/run-fail/overflowing-neg.rs
src/test/run-fail/overflowing-rsh-1.rs
src/test/run-fail/overflowing-rsh-2.rs
src/test/run-fail/overflowing-rsh-3.rs
src/test/run-fail/overflowing-rsh-4.rs
src/test/run-fail/overflowing-rsh-5.rs
src/test/run-fail/overflowing-rsh-6.rs
src/test/run-fail/overflowing-sub.rs
src/test/run-make/a-b-a-linker-guard/Makefile [new file with mode: 0644]
src/test/run-make/a-b-a-linker-guard/a.rs [new file with mode: 0644]
src/test/run-make/a-b-a-linker-guard/b.rs [new file with mode: 0644]
src/test/run-make/debug-assertions/debug.rs
src/test/run-make/execution-engine/test.rs
src/test/run-make/extern-fn-reachable/main.rs
src/test/run-make/extern-overrides-distribution/Makefile
src/test/run-make/issue-19371/foo.rs
src/test/run-make/issue-25581/test.c
src/test/run-make/issue-26006/Makefile
src/test/run-make/json-errors/Makefile
src/test/run-make/pretty-print-path-suffix/foo_method.pp
src/test/run-make/relocation-model/Makefile
src/test/run-make/reproducible-build/Makefile [new file with mode: 0644]
src/test/run-make/reproducible-build/reproducible-build-aux.rs [new file with mode: 0644]
src/test/run-make/reproducible-build/reproducible-build.rs [new file with mode: 0644]
src/test/run-make/rustdoc-json/Makefile [deleted file]
src/test/run-make/rustdoc-json/foo.rs [deleted file]
src/test/run-make/stable-symbol-names/Makefile [new file with mode: 0644]
src/test/run-make/stable-symbol-names/stable-symbol-names1.rs [new file with mode: 0644]
src/test/run-make/stable-symbol-names/stable-symbol-names2.rs [new file with mode: 0644]
src/test/run-make/target-specs/my-awesome-platform.json
src/test/run-make/test-harness/Makefile
src/test/run-pass-fulldeps/conditional-debug-macro-off.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/deprecated-derive.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/deriving-encodable-decodable-box.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/deriving-encodable-decodable-cell-refcell.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/deriving-global.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/dropck_tarena_sound_drop.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/empty-struct-braces-derive.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/extern-mod-syntax.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/issue-11881.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/issue-14021.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/issue-15924.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/issue-24972.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/issue-2804.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/issue-4016.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/issue-4036.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/linkage-visibility.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/logging-enabled-debug.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/logging-enabled.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/logging-right-crate.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/logging-separate-lines.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/regions-mock-tcx.rs [new file with mode: 0644]
src/test/run-pass-fulldeps/rust-log-filter.rs [new file with mode: 0644]
src/test/run-pass-valgrind/cast-enum-with-dtor.rs
src/test/run-pass/allocator-default.rs
src/test/run-pass/allocator-jemalloc.rs [deleted file]
src/test/run-pass/associated-const-match-patterns.rs
src/test/run-pass/atomic-compare_exchange.rs
src/test/run-pass/augmented-assignments.rs
src/test/run-pass/autoderef-privacy.rs [new file with mode: 0644]
src/test/run-pass/backtrace-debuginfo-aux.rs
src/test/run-pass/backtrace-debuginfo.rs
src/test/run-pass/backtrace.rs
src/test/run-pass/bench/issue-32062.rs [new file with mode: 0644]
src/test/run-pass/binary-heap-panic-safe.rs
src/test/run-pass/borrow-by-val-method-receiver.rs
src/test/run-pass/borrowck/borrowck-binding-mutbl.rs
src/test/run-pass/cabi-int-widening.rs [new file with mode: 0644]
src/test/run-pass/cci_nested_exe.rs
src/test/run-pass/coerce-unify.rs [new file with mode: 0644]
src/test/run-pass/conditional-debug-macro-off.rs [deleted file]
src/test/run-pass/const-bitshift-rhs-inference.rs [new file with mode: 0644]
src/test/run-pass/const-fn.rs
src/test/run-pass/const-negation.rs [new file with mode: 0644]
src/test/run-pass/const-str-ptr.rs
src/test/run-pass/default-associated-types.rs
src/test/run-pass/deprecated-derive.rs [deleted file]
src/test/run-pass/derive-no-std.rs
src/test/run-pass/deriving-encodable-decodable-box.rs [deleted file]
src/test/run-pass/deriving-encodable-decodable-cell-refcell.rs [deleted file]
src/test/run-pass/deriving-global.rs [deleted file]
src/test/run-pass/deriving-hash.rs
src/test/run-pass/dropck_tarena_sound_drop.rs [deleted file]
src/test/run-pass/empty-struct-braces-derive.rs [deleted file]
src/test/run-pass/empty-struct-braces.rs
src/test/run-pass/empty-type-parameter-list.rs [new file with mode: 0644]
src/test/run-pass/enum-clike-ffi-as-int.rs
src/test/run-pass/enum-discrim-autosizing.rs
src/test/run-pass/enum-discrim-manual-sizing.rs
src/test/run-pass/env-funky-keys.rs
src/test/run-pass/extern-mod-syntax.rs [deleted file]
src/test/run-pass/float_math.rs [new file with mode: 0644]
src/test/run-pass/fn-item-type-zero-sized.rs [new file with mode: 0644]
src/test/run-pass/foreign-dupe.rs
src/test/run-pass/generic-ivec-leak.rs
src/test/run-pass/ifmt.rs
src/test/run-pass/intrinsic-atomics.rs
src/test/run-pass/issue-11881.rs [deleted file]
src/test/run-pass/issue-12860.rs
src/test/run-pass/issue-13507-2.rs
src/test/run-pass/issue-13763.rs
src/test/run-pass/issue-14021.rs [deleted file]
src/test/run-pass/issue-15924.rs [deleted file]
src/test/run-pass/issue-17121.rs
src/test/run-pass/issue-17718.rs
src/test/run-pass/issue-17816.rs
src/test/run-pass/issue-18514.rs
src/test/run-pass/issue-20427.rs [new file with mode: 0644]
src/test/run-pass/issue-20797.rs
src/test/run-pass/issue-21058.rs
src/test/run-pass/issue-21400.rs
src/test/run-pass/issue-22577.rs
src/test/run-pass/issue-23338-ensure-param-drop-order.rs
src/test/run-pass/issue-23550.rs
src/test/run-pass/issue-23833.rs [new file with mode: 0644]
src/test/run-pass/issue-24972.rs [deleted file]
src/test/run-pass/issue-2631-b.rs
src/test/run-pass/issue-26997.rs [new file with mode: 0644]
src/test/run-pass/issue-2723-b.rs
src/test/run-pass/issue-2804.rs [deleted file]
src/test/run-pass/issue-28950.rs [new file with mode: 0644]
src/test/run-pass/issue-29663.rs [new file with mode: 0644]
src/test/run-pass/issue-30615.rs [new file with mode: 0644]
src/test/run-pass/issue-31597.rs [new file with mode: 0644]
src/test/run-pass/issue-31776.rs [new file with mode: 0644]
src/test/run-pass/issue-32292.rs [new file with mode: 0644]
src/test/run-pass/issue-32324.rs [new file with mode: 0644]
src/test/run-pass/issue-32389.rs [new file with mode: 0644]
src/test/run-pass/issue-32518.rs [new file with mode: 0644]
src/test/run-pass/issue-33461.rs [new file with mode: 0644]
src/test/run-pass/issue-4016.rs [deleted file]
src/test/run-pass/issue-4036.rs [deleted file]
src/test/run-pass/issue-8460.rs
src/test/run-pass/ivec-pass-by-value.rs
src/test/run-pass/ivec-tag.rs
src/test/run-pass/linkage-visibility.rs [deleted file]
src/test/run-pass/lint-dead-code-associated-type.rs [new file with mode: 0644]
src/test/run-pass/logging-enabled-debug.rs [deleted file]
src/test/run-pass/logging-enabled.rs [deleted file]
src/test/run-pass/logging-right-crate.rs [deleted file]
src/test/run-pass/logging-separate-lines.rs [deleted file]
src/test/run-pass/macro-delimiter-significance.rs
src/test/run-pass/match-arm-statics.rs
src/test/run-pass/minmax-stability-issue-23687.rs
src/test/run-pass/mir_fat_ptr_drop.rs [new file with mode: 0644]
src/test/run-pass/mir_raw_fat_ptr.rs
src/test/run-pass/mir_refs_correct.rs
src/test/run-pass/mir_trans_critical_edge.rs [new file with mode: 0644]
src/test/run-pass/multi-panic.rs
src/test/run-pass/nullable-pointer-ffi-compat.rs
src/test/run-pass/nullable-pointer-iotareduction.rs
src/test/run-pass/num-wrapping.rs
src/test/run-pass/panic-handler-chain.rs
src/test/run-pass/panic-handler-flail-wildly.rs
src/test/run-pass/panic-handler-set-twice.rs
src/test/run-pass/panic-safe.rs
src/test/run-pass/parser-unicode-whitespace.rs [new file with mode: 0644]
src/test/run-pass/placement-new-arena.rs [deleted file]
src/test/run-pass/process-status-inherits-stdin.rs [new file with mode: 0644]
src/test/run-pass/range.rs
src/test/run-pass/range_inclusive.rs [new file with mode: 0644]
src/test/run-pass/range_inclusive_gate.rs [new file with mode: 0644]
src/test/run-pass/regions-dependent-autoslice.rs
src/test/run-pass/regions-lub-ref-ref-rc.rs [new file with mode: 0644]
src/test/run-pass/regions-mock-tcx.rs [deleted file]
src/test/run-pass/rfc1445/eq-allows-match-on-ty-in-macro.rs [new file with mode: 0644]
src/test/run-pass/rfc1445/eq-allows-match.rs [new file with mode: 0644]
src/test/run-pass/rust-log-filter.rs [deleted file]
src/test/run-pass/send_str_hashmap.rs
src/test/run-pass/send_str_treemap.rs
src/test/run-pass/simd-intrinsic-generic-elements.rs
src/test/run-pass/specialization/README.md [new file with mode: 0644]
src/test/run-pass/specialization/specialization-allowed-cross-crate.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-assoc-fns.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-basics.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-cross-crate-defaults.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-cross-crate-no-gate.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-cross-crate.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-default-methods.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-on-projection.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-out-of-order.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-overlap-projection.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-projection-alias.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-projection.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-super-traits.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-translate-projections-with-params.rs [new file with mode: 0644]
src/test/run-pass/specialization/specialization-translate-projections.rs [new file with mode: 0644]
src/test/run-pass/std-sync-right-kind-impls.rs
src/test/run-pass/super-fast-paren-parsing.rs
src/test/run-pass/transmute-from-fn-item-types.rs [new file with mode: 0644]
src/test/run-pass/try-macro.rs [new file with mode: 0644]
src/test/run-pass/try-operator-hygiene.rs [new file with mode: 0644]
src/test/run-pass/try-operator.rs [new file with mode: 0644]
src/test/run-pass/typeid-intrinsic.rs
src/test/run-pass/ufcs-polymorphic-paths.rs
src/test/run-pass/unique-ffi-symbols.rs [new file with mode: 0644]
src/test/run-pass/variadic-ffi.rs
src/test/run-pass/vec-concat.rs
src/test/run-pass/vec-push.rs
src/test/run-pass/vector-sort-panic-safe.rs
src/test/run-pass/volatile-fat-ptr.rs [new file with mode: 0644]
src/test/run-pass/xcrate-associated-type-defaults.rs
src/test/run-pass/zero-size-type-destructors.rs
src/test/rustdoc/inline_local/issue-28537.rs [new file with mode: 0644]
src/test/rustdoc/inline_local/issue-32343.rs [new file with mode: 0644]
src/test/rustdoc/inline_local/please_inline.rs [new file with mode: 0644]
src/test/rustdoc/issue-15347.rs
src/test/rustdoc/issue-25001.rs
src/test/rustdoc/issue-27104.rs [new file with mode: 0644]
src/test/rustdoc/issue-27362.rs
src/test/rustdoc/issue-28478.rs [new file with mode: 0644]
src/test/rustdoc/issue-32395.rs [new file with mode: 0644]
src/test/rustdoc/recursion1.rs
src/test/rustdoc/recursion2.rs
src/test/rustdoc/recursion3.rs
src/test/rustdoc/redirect.rs [new file with mode: 0644]
src/test/rustdoc/structfields.rs [new file with mode: 0644]
src/tools/cargotest/Cargo.lock [new file with mode: 0644]
src/tools/cargotest/Cargo.toml [new file with mode: 0644]
src/tools/cargotest/lockfiles/iron-Cargo.lock [new file with mode: 0644]
src/tools/cargotest/main.rs [new file with mode: 0644]
src/tools/error_index_generator/Cargo.lock [new file with mode: 0644]
src/tools/error_index_generator/Cargo.toml [new file with mode: 0644]
src/tools/error_index_generator/main.rs [new file with mode: 0644]
src/tools/linkchecker/Cargo.lock [new file with mode: 0644]
src/tools/linkchecker/Cargo.toml [new file with mode: 0644]
src/tools/linkchecker/main.rs [new file with mode: 0644]
src/tools/rustbook/Cargo.lock [new file with mode: 0644]
src/tools/rustbook/Cargo.toml [new file with mode: 0644]
src/tools/rustbook/book.rs [new file with mode: 0644]
src/tools/rustbook/build.rs [new file with mode: 0644]
src/tools/rustbook/error.rs [new file with mode: 0644]
src/tools/rustbook/help.rs [new file with mode: 0644]
src/tools/rustbook/main.rs [new file with mode: 0644]
src/tools/rustbook/serve.rs [new file with mode: 0644]
src/tools/rustbook/static/rustbook.css [new file with mode: 0644]
src/tools/rustbook/static/rustbook.js [new file with mode: 0644]
src/tools/rustbook/subcommand.rs [new file with mode: 0644]
src/tools/rustbook/term.rs [new file with mode: 0644]
src/tools/rustbook/test.rs [new file with mode: 0644]

index 609bf03fb6c91ba8e2f978bb62e0d5dedcfb693d..10598e78ec5cb50c58f11e254d8c17362edf5e72 100644 (file)
@@ -71,7 +71,8 @@ which includes important information about what platform you're on, what
 version of Rust you're using, etc.
 
 Sometimes, a backtrace is helpful, and so including that is nice. To get
-a backtrace, set the `RUST_BACKTRACE` environment variable. The easiest way
+a backtrace, set the `RUST_BACKTRACE` environment variable to a value
+other than `0`. The easiest way
 to do this is to invoke `rustc` like this:
 
 ```bash
@@ -132,8 +133,8 @@ Some common make targets are:
 - `make check-stage1-std NO_REBUILD=1` - test the standard library without
   rebuilding the entire compiler
 - `make check TESTNAME=<substring-of-test-name>` - Run a matching set of tests.
-  - `TESTNAME` should be a substring of the tests to match against e.g. it could 
-    be the fully qualified test name, or just a part of it. 
+  - `TESTNAME` should be a substring of the tests to match against e.g. it could
+    be the fully qualified test name, or just a part of it.
     `TESTNAME=collections::hash::map::test_map::test_capacity_not_less_than_len`
     or `TESTNAME=test_capacity_not_less_than_len`.
 - `make check-stage1-rpass TESTNAME=<substring-of-test-name>` - Run a single
index a83f51e157155cfcdd893f643549506de3d9c710..1acf5fd1f3df8216e80d04887a5150b47813dd9d 100644 (file)
--- a/README.md
+++ b/README.md
@@ -77,7 +77,7 @@ build.
 
 Download [MinGW from
 here](http://mingw-w64.org/doku.php/download/mingw-builds), and choose the
-`threads=win32,exceptions=dwarf/seh` flavor when installing. After installing,
+`version=4.9.x,threads=win32,exceptions=dwarf/seh` flavor when installing. Also, make sure to install to a path without spaces in it. After installing,
 add its `bin` directory to your `PATH`. This is due to [#28260](https://github.com/rust-lang/rust/issues/28260), in the future,
 installing from pacman should be just fine.
 
@@ -177,10 +177,11 @@ To contribute to Rust, please see [CONTRIBUTING](CONTRIBUTING.md).
 Rust has an [IRC] culture and most real-time collaboration happens in a
 variety of channels on Mozilla's IRC network, irc.mozilla.org. The
 most popular channel is [#rust], a venue for general discussion about
-Rust, and a good place to ask for help.
+Rust. And a good place to ask for help would be [#rust-beginners].
 
 [IRC]: https://en.wikipedia.org/wiki/Internet_Relay_Chat
 [#rust]: irc://irc.mozilla.org/rust
+[#rust-beginners]: irc://irc.mozilla.org/rust-beginners
 
 ## License
 
index 0871ae3eaf4a8237c54aa999ea011eb5bc3a1830..b19f4b07a316124680bafa155e23bf8cd2619dd8 100644 (file)
@@ -1,3 +1,291 @@
+Version 1.9.0 (2016-05-26)
+==========================
+
+Language
+--------
+
+* The `#[deprecated]` attribute when applied to an API will generate
+  warnings when used. The warnings may be suppressed with
+  `#[allow(deprecated)]`. [RFC 1270].
+* [`fn` item types are zero sized, and each `fn` names a unique
+  type][1.9fn]. This will break code that transmutes `fn`s, so calling
+  `transmute` on a `fn` type will generate a warning for a few cycles,
+  then will be converted to an error.
+* [Field and method resolution understand visibility, so private
+  fields and methods cannot prevent the proper use of public fields
+  and methods][1.9fv].
+* [The parser considers unicode codepoints in the
+  `PATTERN_WHITE_SPACE` category to be whitespace][1.9ws].
+
+Stabilized APIs
+---------------
+
+* [`std::panic`]
+* [`std::panic::catch_unwind`][] (renamed from `recover`)
+* [`std::panic::resume_unwind`][] (renamed from `propagate`)
+* [`std::panic::AssertUnwindSafe`][] (renamed from `AssertRecoverSafe`)
+* [`std::panic::UnwindSafe`][] (renamed from `RecoverSafe`)
+* [`str::is_char_boundary`]
+* [`<*const T>::as_ref`]
+* [`<*mut T>::as_ref`]
+* [`<*mut T>::as_mut`]
+* [`AsciiExt::make_ascii_uppercase`]
+* [`AsciiExt::make_ascii_lowercase`]
+* [`char::decode_utf16`]
+* [`char::DecodeUtf16`]
+* [`char::DecodeUtf16Error`]
+* [`char::DecodeUtf16Error::unpaired_surrogate`]
+* [`BTreeSet::take`]
+* [`BTreeSet::replace`]
+* [`BTreeSet::get`]
+* [`HashSet::take`]
+* [`HashSet::replace`]
+* [`HashSet::get`]
+* [`OsString::with_capacity`]
+* [`OsString::clear`]
+* [`OsString::capacity`]
+* [`OsString::reserve`]
+* [`OsString::reserve_exact`]
+* [`OsStr::is_empty`]
+* [`OsStr::len`]
+* [`std::os::unix::thread`]
+* [`RawPthread`]
+* [`JoinHandleExt`]
+* [`JoinHandleExt::as_pthread_t`]
+* [`JoinHandleExt::into_pthread_t`]
+* [`HashSet::hasher`]
+* [`HashMap::hasher`]
+* [`CommandExt::exec`]
+* [`File::try_clone`]
+* [`SocketAddr::set_ip`]
+* [`SocketAddr::set_port`]
+* [`SocketAddrV4::set_ip`]
+* [`SocketAddrV4::set_port`]
+* [`SocketAddrV6::set_ip`]
+* [`SocketAddrV6::set_port`]
+* [`SocketAddrV6::set_flowinfo`]
+* [`SocketAddrV6::set_scope_id`]
+* [`slice::copy_from_slice`]
+* [`ptr::read_volatile`]
+* [`ptr::write_volatile`]
+* [`OpenOptions::create_new`]
+* [`TcpStream::set_nodelay`]
+* [`TcpStream::nodelay`]
+* [`TcpStream::set_ttl`]
+* [`TcpStream::ttl`]
+* [`TcpStream::set_only_v6`]
+* [`TcpStream::only_v6`]
+* [`TcpStream::take_error`]
+* [`TcpStream::set_nonblocking`]
+* [`TcpListener::set_ttl`]
+* [`TcpListener::ttl`]
+* [`TcpListener::set_only_v6`]
+* [`TcpListener::only_v6`]
+* [`TcpListener::take_error`]
+* [`TcpListener::set_nonblocking`]
+* [`UdpSocket::set_broadcast`]
+* [`UdpSocket::broadcast`]
+* [`UdpSocket::set_multicast_loop_v4`]
+* [`UdpSocket::multicast_loop_v4`]
+* [`UdpSocket::set_multicast_ttl_v4`]
+* [`UdpSocket::multicast_ttl_v4`]
+* [`UdpSocket::set_multicast_loop_v6`]
+* [`UdpSocket::multicast_loop_v6`]
+* [`UdpSocket::set_multicast_ttl_v6`]
+* [`UdpSocket::multicast_ttl_v6`]
+* [`UdpSocket::set_ttl`]
+* [`UdpSocket::ttl`]
+* [`UdpSocket::set_only_v6`]
+* [`UdpSocket::only_v6`]
+* [`UdpSocket::join_multicast_v4`]
+* [`UdpSocket::join_multicast_v6`]
+* [`UdpSocket::leave_multicast_v4`]
+* [`UdpSocket::leave_multicast_v6`]
+* [`UdpSocket::take_error`]
+* [`UdpSocket::connect`]
+* [`UdpSocket::send`]
+* [`UdpSocket::recv`]
+* [`UdpSocket::set_nonblocking`]
+
+Libraries
+---------
+
+* [`std::sync::Once` is poisoned if its initialization function
+  fails][1.9o].
+* [`cell::Ref` and `cell::RefMut` can contain unsized types][1.9cu].
+* [Most types implement `fmt::Debug`][1.9db].
+* [The default buffer size used by `BufReader` and `BufWriter` was
+  reduced to 8K, from 64K][1.9bf]. This is in line with the buffer size
+  used by other languages.
+* [`Instant`, `SystemTime` and `Duration` implement `+=` and `-=`.
+  `Duration` additionally implements `*=` and `/=`][1.9ta].
+* [`Skip` is a `DoubleEndedIterator`][1.9sk].
+* [`From<[u8; 4]>` is implemented for `Ipv4Addr`][1.9fi].
+* [`Chain` implements `BufRead`][1.9ch].
+* [`HashMap`, `HashSet` and iterators are covariant][1.9hc].
+
+Cargo
+-----
+
+* [Cargo can now run concurrently][1.9cc].
+* [Top-level overrides allow specific revisions of crates to be
+  overridden through the entire crate graph][1.9ct].  This is intended
+  to make upgrades easier for large projects, by allowing crates to be
+  forked temporarily until they've been upgraded and republished.
+* [Cargo exports a `CARGO_PKG_AUTHORS` environment variable][1.9cp].
+* [Cargo will pass the contents of the `RUSTFLAGS` variable to `rustc`
+  on the commandline][1.9cf]. `rustc` arguments can also be specified
+  in the `build.rustflags` configuration key.
+
+Performance
+-----------
+
+* [During type unification, the complexity of comparing variables for
+  equivalance was reduced from `O(n!)` to `O(n)`][1.9tu]. This leads
+  to major compile-time improvements in some scenarios.
+* [`ToString` is specialized for `str`, giving it the same performance
+  as `to_owned`][1.9ts].
+* [Spawning processes with `Command::output` no longer creates extra
+  threads][1.9sp].
+* [`#[derive(PartialEq)]` and `#[derive(PartialOrd)]` emit less code
+  for C-like enums][1.9cl].
+
+Misc
+----
+
+* [Passing the `--quiet` flag to a test runner will produce
+  much-abbreviated output][1.9q].
+* The Rust Project now publishes std binaries for the
+  `mips-unknown-linux-musl`, `mipsel-unknown-linux-musl`, and
+  `i586-pc-windows-msvc` targets.
+
+Compatibility Notes
+-------------------
+
+* [`std::sync::Once` is poisoned if its initialization function
+  fails][1.9o].
+* [It is illegal to define methods with the same name in overlapping
+  inherent `impl` blocks][1.9sn].
+* [`fn` item types are zero sized, and each `fn` names a unique
+  type][1.9fn]. This will break code that transmutes `fn`s, so calling
+  `transmute` on a `fn` type will generate a warning for a few cycles,
+  then will be converted to an error.
+* [Improvements to const evaluation may trigger new errors when integer
+  literals are out of range][1.9ce].
+
+
+[1.9bf]: https://github.com/rust-lang/rust/pull/32695
+[1.9cc]: https://github.com/rust-lang/cargo/pull/2486
+[1.9ce]: https://github.com/rust-lang/rust/pull/30587
+[1.9cf]: https://github.com/rust-lang/cargo/pull/2241
+[1.9ch]: https://github.com/rust-lang/rust/pull/32541
+[1.9cl]: https://github.com/rust-lang/rust/pull/31977
+[1.9cp]: https://github.com/rust-lang/cargo/pull/2465
+[1.9ct]: https://github.com/rust-lang/cargo/pull/2385
+[1.9cu]: https://github.com/rust-lang/rust/pull/32652
+[1.9db]: https://github.com/rust-lang/rust/pull/32054
+[1.9fi]: https://github.com/rust-lang/rust/pull/32050
+[1.9fn]: https://github.com/rust-lang/rust/pull/31710
+[1.9fv]: https://github.com/rust-lang/rust/pull/31938
+[1.9hc]: https://github.com/rust-lang/rust/pull/32635
+[1.9o]: https://github.com/rust-lang/rust/pull/32325
+[1.9q]: https://github.com/rust-lang/rust/pull/31887
+[1.9sk]: https://github.com/rust-lang/rust/pull/31700
+[1.9sn]: https://github.com/rust-lang/rust/pull/31925
+[1.9sp]: https://github.com/rust-lang/rust/pull/31618
+[1.9ta]: https://github.com/rust-lang/rust/pull/32448
+[1.9ts]: https://github.com/rust-lang/rust/pull/32586
+[1.9tu]: https://github.com/rust-lang/rust/pull/32062
+[1.9ws]: https://github.com/rust-lang/rust/pull/29734
+[RFC 1270]: https://github.com/rust-lang/rfcs/blob/master/text/1270-deprecation.md
+[`<*const T>::as_ref`]: http://doc.rust-lang.org/nightly/std/primitive.pointer.html#method.as_ref
+[`<*mut T>::as_mut`]: http://doc.rust-lang.org/nightly/std/primitive.pointer.html#method.as_mut
+[`<*mut T>::as_ref`]: http://doc.rust-lang.org/nightly/std/primitive.pointer.html#method.as_ref
+[`slice::copy_from_slice`]: http://doc.rust-lang.org/nightly/std/primitive.slice.html#method.copy_from_slice
+[`AsciiExt::make_ascii_lowercase`]: http://doc.rust-lang.org/nightly/std/ascii/trait.AsciiExt.html#tymethod.make_ascii_lowercase
+[`AsciiExt::make_ascii_uppercase`]: http://doc.rust-lang.org/nightly/std/ascii/trait.AsciiExt.html#tymethod.make_ascii_uppercase
+[`BTreeSet::get`]: http://doc.rust-lang.org/nightly/collections/btree/set/struct.BTreeSet.html#method.get
+[`BTreeSet::replace`]: http://doc.rust-lang.org/nightly/collections/btree/set/struct.BTreeSet.html#method.replace
+[`BTreeSet::take`]: http://doc.rust-lang.org/nightly/collections/btree/set/struct.BTreeSet.html#method.take
+[`CommandExt::exec`]: http://doc.rust-lang.org/nightly/std/os/unix/process/trait.CommandExt.html#tymethod.exec
+[`File::try_clone`]: http://doc.rust-lang.org/nightly/std/fs/struct.File.html#method.try_clone
+[`HashMap::hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashMap.html#method.hasher
+[`HashSet::get`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.get
+[`HashSet::hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.hasher
+[`HashSet::replace`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.replace
+[`HashSet::take`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.take
+[`JoinHandleExt::as_pthread_t`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/trait.JoinHandleExt.html#tymethod.as_pthread_t
+[`JoinHandleExt::into_pthread_t`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/trait.JoinHandleExt.html#tymethod.into_pthread_t
+[`JoinHandleExt`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/trait.JoinHandleExt.html
+[`OpenOptions::create_new`]: http://doc.rust-lang.org/nightly/std/fs/struct.OpenOptions.html#method.create_new
+[`OsStr::is_empty`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsStr.html#method.is_empty
+[`OsStr::len`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsStr.html#method.len
+[`OsString::capacity`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.capacity
+[`OsString::clear`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.clear
+[`OsString::reserve_exact`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.reserve_exact
+[`OsString::reserve`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.reserve
+[`OsString::with_capacity`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.with_capacity
+[`RawPthread`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/type.RawPthread.html
+[`SocketAddr::set_ip`]: http://doc.rust-lang.org/nightly/std/net/enum.SocketAddr.html#method.set_ip
+[`SocketAddr::set_port`]: http://doc.rust-lang.org/nightly/std/net/enum.SocketAddr.html#method.set_port
+[`SocketAddrV4::set_ip`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV4.html#method.set_ip
+[`SocketAddrV4::set_port`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV4.html#method.set_port
+[`SocketAddrV6::set_flowinfo`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV6.html#method.set_flowinfo
+[`SocketAddrV6::set_ip`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV6.html#method.set_ip
+[`SocketAddrV6::set_port`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV6.html#method.set_port
+[`SocketAddrV6::set_scope_id`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV6.html#method.set_scope_id
+[`TcpListener::only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.only_v6
+[`TcpListener::set_nonblocking`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_nonblocking
+[`TcpListener::set_only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_only_v6
+[`TcpListener::set_ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_ttl
+[`TcpListener::take_error`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.take_error
+[`TcpListener::ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.ttl
+[`TcpStream::nodelay`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.nodelay
+[`TcpStream::only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.only_v6
+[`TcpStream::set_nodelay`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_nodelay
+[`TcpStream::set_nonblocking`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_nonblocking
+[`TcpStream::set_only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_only_v6
+[`TcpStream::set_ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_ttl
+[`TcpStream::take_error`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.take_error
+[`TcpStream::ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.ttl
+[`UdpSocket::broadcast`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.broadcast
+[`UdpSocket::connect`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.connect
+[`UdpSocket::join_multicast_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.join_multicast_v4
+[`UdpSocket::join_multicast_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.join_multicast_v6
+[`UdpSocket::leave_multicast_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.leave_multicast_v4
+[`UdpSocket::leave_multicast_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.leave_multicast_v6
+[`UdpSocket::multicast_loop_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.multicast_loop_v4
+[`UdpSocket::multicast_loop_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.multicast_loop_v6
+[`UdpSocket::multicast_ttl_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.multicast_ttl_v4
+[`UdpSocket::multicast_ttl_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.multicast_ttl_v6
+[`UdpSocket::only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.only_v6
+[`UdpSocket::recv`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.recv
+[`UdpSocket::send`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.send
+[`UdpSocket::set_broadcast`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_broadcast
+[`UdpSocket::set_multicast_loop_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_multicast_loop_v4
+[`UdpSocket::set_multicast_loop_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_multicast_loop_v6
+[`UdpSocket::set_multicast_ttl_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_multicast_ttl_v4
+[`UdpSocket::set_multicast_ttl_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_multicast_ttl_v6
+[`UdpSocket::set_nonblocking`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_nonblocking
+[`UdpSocket::set_only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_only_v6
+[`UdpSocket::set_ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_ttl
+[`UdpSocket::take_error`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.take_error
+[`UdpSocket::ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.ttl
+[`char::DecodeUtf16Error::unpaired_surrogate`]: http://doc.rust-lang.org/nightly/std/char/struct.DecodeUtf16Error.html#method.unpaired_surrogate
+[`char::DecodeUtf16Error`]: http://doc.rust-lang.org/nightly/std/char/struct.DecodeUtf16Error.html
+[`char::DecodeUtf16`]: http://doc.rust-lang.org/nightly/std/char/struct.DecodeUtf16.html
+[`char::decode_utf16`]: http://doc.rust-lang.org/nightly/std/char/fn.decode_utf16.html
+[`ptr::read_volatile`]: http://doc.rust-lang.org/nightly/std/ptr/fn.read_volatile.html
+[`ptr::write_volatile`]: http://doc.rust-lang.org/nightly/std/ptr/fn.write_volatile.html
+[`std::os::unix::thread`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/index.html
+[`std::panic::AssertUnwindSafe`]: http://doc.rust-lang.org/nightly/std/panic/struct.AssertUnwindSafe.html
+[`std::panic::UnwindSafe`]: http://doc.rust-lang.org/nightly/std/panic/trait.UnwindSafe.html
+[`std::panic::catch_unwind`]: http://doc.rust-lang.org/nightly/std/panic/fn.catch_unwind.html
+[`std::panic::resume_unwind`]: http://doc.rust-lang.org/nightly/std/panic/fn.resume_unwind.html
+[`std::panic`]: http://doc.rust-lang.org/nightly/std/panic/index.html
+[`str::is_char_boundary`]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.is_char_boundary
+
+
 Version 1.8.0 (2016-04-14)
 ==========================
 
@@ -209,16 +497,6 @@ Compatibility Notes
 Version 1.7.0 (2016-03-03)
 ==========================
 
-Language
---------
-
-* Soundness fixes to the interactions between associated types and
-  lifetimes, specified in [RFC 1214], [now generate errors][1.7sf] for
-  code that violates the new rules. This is a significant change that
-  is known to break existing code, so it has emitted warnings for the
-  new error cases since 1.4 to give crate authors time to adapt. The
-  details of what is changing are subtle; read the RFC for more.
-
 Libraries
 ---------
 
@@ -267,6 +545,17 @@ Libraries
     * [`IntoStringError::into_cstring`]
     * [`IntoStringError::utf8_error`]
     * `Error for IntoStringError`
+  * Hashing
+    * [`std::hash::BuildHasher`]
+    * [`BuildHasher::Hasher`]
+    * [`BuildHasher::build_hasher`]
+    * [`std::hash::BuildHasherDefault`]
+    * [`HashMap::with_hasher`]
+    * [`HashMap::with_capacity_and_hasher`]
+    * [`HashSet::with_hasher`]
+    * [`HashSet::with_capacity_and_hasher`]
+    * [`std::collections::hash_map::RandomState`]
+    * [`RandomState::new`]
 * [Validating UTF-8 is faster by a factor of between 7 and 14x for
   ASCII input][1.7utf8]. This means that creating `String`s and `str`s
   from bytes is faster.
@@ -288,9 +577,6 @@ Libraries
 Misc
 ----
 
-* [The `--error-format=json` flag to `rustc` causes it to emit errors
-  in JSON format][1.7j]. This is an unstable flag and so also requires
-  the `-Z unstable-options` flag.
 * [When running tests with `--test`, rustdoc will pass `--cfg`
   arguments to the compiler][1.7dt].
 * [The compiler is built with RPATH information by default][1.7rpa].
@@ -312,6 +598,12 @@ Cargo
 Compatibility Notes
 -------------------
 
+* Soundness fixes to the interactions between associated types and
+  lifetimes, specified in [RFC 1214], [now generate errors][1.7sf] for
+  code that violates the new rules. This is a significant change that
+  is known to break existing code, so it has emitted warnings for the
+  new error cases since 1.4 to give crate authors time to adapt. The
+  details of what is changing are subtle; read the RFC for more.
 * [Several bugs in the compiler's visibility calculations were
   fixed][1.7v]. Since this was found to break significant amounts of
   code, the new errors will be emitted as warnings for several release
@@ -320,8 +612,8 @@ Compatibility Notes
   that were not intended. In this release, [defaulted type parameters
   appearing outside of type definitions will generate a
   warning][1.7d], which will become an error in future releases.
-* [Parsing "." as a float results in an error instead of
-  0][1.7p]. That is, `".".parse::<f32>()` returns `Err`, not `Ok(0)`.
+* [Parsing "." as a float results in an error instead of 0][1.7p].
+  That is, `".".parse::<f32>()` returns `Err`, not `Ok(0.0)`.
 * [Borrows of closure parameters may not outlive the closure][1.7bc].
 
 [1.7a]: https://github.com/rust-lang/rust/pull/30928
@@ -334,7 +626,6 @@ Compatibility Notes
 [1.7dta]: https://github.com/rust-lang/rust/pull/30394
 [1.7f]: https://github.com/rust-lang/rust/pull/30672
 [1.7h]: https://github.com/rust-lang/rust/pull/30818
-[1.7j]: https://github.com/rust-lang/rust/pull/30711
 [1.7ll]: https://github.com/rust-lang/rust/pull/30663
 [1.7m]: https://github.com/rust-lang/rust/pull/30381
 [1.7p]: https://github.com/rust-lang/rust/pull/30681
@@ -345,11 +636,15 @@ Compatibility Notes
 [1.7utf8]: https://github.com/rust-lang/rust/pull/30740
 [1.7v]: https://github.com/rust-lang/rust/pull/29973
 [RFC 1214]: https://github.com/rust-lang/rfcs/blob/master/text/1214-projections-lifetimes-and-wf.md
-[`clone_from_slice`]: http://doc.rust-lang.org/nightly/std/primitive.slice.html#method.clone_from_slice
-[`sort_by_key`]: http://doc.rust-lang.org/nightly/std/primitive.slice.html#method.sort_by_key
+[`BuildHasher::Hasher`]: http://doc.rust-lang.org/nightly/std/hash/trait.Hasher.html
+[`BuildHasher::build_hasher`]: http://doc.rust-lang.org/nightly/std/hash/trait.BuildHasher.html#tymethod.build_hasher
 [`CString::into_bytes_with_nul`]: http://doc.rust-lang.org/nightly/std/ffi/struct.CString.html#method.into_bytes_with_nul
 [`CString::into_bytes`]: http://doc.rust-lang.org/nightly/std/ffi/struct.CString.html#method.into_bytes
 [`CString::into_string`]: http://doc.rust-lang.org/nightly/std/ffi/struct.CString.html#method.into_string
+[`HashMap::with_capacity_and_hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashMap.html#method.with_capacity_and_hasher
+[`HashMap::with_hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashMap.html#method.with_hasher
+[`HashSet::with_capacity_and_hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.with_capacity_and_hasher
+[`HashSet::with_hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.with_hasher
 [`IntoStringError::into_cstring`]: http://doc.rust-lang.org/nightly/std/ffi/struct.IntoStringError.html#method.into_cstring
 [`IntoStringError::utf8_error`]: http://doc.rust-lang.org/nightly/std/ffi/struct.IntoStringError.html#method.utf8_error
 [`Ipv4Addr::is_broadcast`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv4Addr.html#method.is_broadcast
@@ -362,10 +657,12 @@ Compatibility Notes
 [`Ipv6Addr::is_multicast`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv6Addr.html#method.is_multicast
 [`Ipv6Addr::is_unspecified`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv6Addr.html#method.is_unspecified
 [`Path::strip_prefix`]: http://doc.rust-lang.org/nightly/std/path/struct.Path.html#method.strip_prefix
+[`RandomState::new`]: http://doc.rust-lang.org/nightly/std/collections/hash_map/struct.RandomState.html#method.new
 [`String::as_mut_str`]: http://doc.rust-lang.org/nightly/std/string/struct.String.html#method.as_mut_str
 [`String::as_str`]: http://doc.rust-lang.org/nightly/std/string/struct.String.html#method.as_str
 [`Vec::as_mut_slice`]: http://doc.rust-lang.org/nightly/std/vec/struct.Vec.html#method.as_mut_slice
 [`Vec::as_slice`]: http://doc.rust-lang.org/nightly/std/vec/struct.Vec.html#method.as_slice
+[`clone_from_slice`]: http://doc.rust-lang.org/nightly/std/primitive.slice.html#method.clone_from_slice
 [`ffi::IntoStringError`]: http://doc.rust-lang.org/nightly/std/ffi/struct.IntoStringError.html
 [`i32::checked_neg`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.checked_neg
 [`i32::checked_rem`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.checked_rem
@@ -381,8 +678,13 @@ Compatibility Notes
 [`i32::overflowing_sub`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.overflowing_sub
 [`i32::saturating_mul`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.saturating_mul
 [`path::StripPrefixError`]: http://doc.rust-lang.org/nightly/std/path/struct.StripPrefixError.html
+[`sort_by_key`]: http://doc.rust-lang.org/nightly/std/primitive.slice.html#method.sort_by_key
+[`std::collections::hash_map::RandomState`]: http://doc.rust-lang.org/nightly/std/collections/hash_map/struct.RandomState.html
+[`std::hash::BuildHasherDefault`]: http://doc.rust-lang.org/nightly/std/hash/struct.BuildHasherDefault.html
+[`std::hash::BuildHasher`]: http://doc.rust-lang.org/nightly/std/hash/trait.BuildHasher.html
 [`u32::checked_neg`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.checked_neg
 [`u32::checked_rem`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.checked_rem
+[`u32::checked_neg`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.checked_neg
 [`u32::checked_shl`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.checked_shl
 [`u32::overflowing_add`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_add
 [`u32::overflowing_div`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_div
index 086dd9f74e080d0d257c5135313f9e47584870b7..fdef550a6451aa520079776b0e26d3a58be8314e 100755 (executable)
--- a/configure
+++ b/configure
@@ -607,6 +607,8 @@ opt dist-host-only 0 "only install bins for the host architecture"
 opt inject-std-version 1 "inject the current compiler version of libstd into programs"
 opt llvm-version-check 1 "check if the LLVM version is supported, build anyway"
 opt rustbuild 0 "use the rust and cargo based build system"
+opt orbit 0 "get MIR where it belongs - everywhere; most importantly, in orbit"
+opt codegen-tests 1 "run the src/test/codegen tests"
 
 # Optimization and debugging options. These may be overridden by the release channel, etc.
 opt_nosave optimize 1 "build optimized rust code"
@@ -713,17 +715,7 @@ if [ -n "$CFG_ENABLE_DEBUG_ASSERTIONS" ]; then putvar CFG_ENABLE_DEBUG_ASSERTION
 if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi
 if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi
 
-# A magic value that allows the compiler to use unstable features
-# during the bootstrap even when doing so would normally be an error
-# because of feature staging or because the build turns on
-# warnings-as-errors and unstable features default to warnings.  The
-# build has to match this key in an env var. Meant to be a mild
-# deterrent from users just turning on unstable features on the stable
-# channel.
-# Basing CFG_BOOTSTRAP_KEY on CFG_BOOTSTRAP_KEY lets it get picked up
-# during a Makefile reconfig.
-CFG_BOOTSTRAP_KEY="${CFG_BOOTSTRAP_KEY-`date +%H:%M:%S`}"
-putvar CFG_BOOTSTRAP_KEY
+if [ -n "$CFG_ENABLE_ORBIT" ]; then putvar CFG_ENABLE_ORBIT; fi
 
 step_msg "looking for build programs"
 
@@ -966,11 +958,11 @@ then
     LLVM_VERSION=$($LLVM_CONFIG --version)
 
     case $LLVM_VERSION in
-        (3.[5-8]*)
+        (3.[6-8]*)
             msg "found ok version of LLVM: $LLVM_VERSION"
             ;;
         (*)
-            err "bad LLVM version: $LLVM_VERSION, need >=3.5"
+            err "bad LLVM version: $LLVM_VERSION, need >=3.6"
             ;;
     esac
 fi
@@ -1031,7 +1023,7 @@ then
         if [ -n "$CFG_OSX_CLANG_VERSION" ]
         then
             case $CFG_OSX_CLANG_VERSION in
-                (7.0* | 7.1* | 7.2*)
+                (7.0* | 7.1* | 7.2* | 7.3*)
                 step_msg "found ok version of APPLE CLANG: $CFG_OSX_CLANG_VERSION"
                 ;;
                 (*)
@@ -1249,7 +1241,7 @@ $ pacman -R cmake && pacman -S mingw-w64-x86_64-cmake
                     bits=x86_64
                     msvc_part=amd64
                     ;;
-                i686-*)
+                i*86-*)
                     bits=i386
                     msvc_part=
                     ;;
@@ -1494,7 +1486,9 @@ do
         LLVM_INST_DIR=$CFG_LLVM_ROOT
         do_reconfigure=0
         # Check that LLVm FileCheck is available. Needed for the tests
-        need_cmd $LLVM_INST_DIR/bin/FileCheck
+        if [ -z "$CFG_DISABLE_CODEGEN_TESTS" ]; then
+            need_cmd $LLVM_INST_DIR/bin/FileCheck
+        fi
     fi
 
     if [ ${do_reconfigure} -ne 0 ]
index 0b8b1559d90bd6252e7b6b7e603eeec058075838..a034e471b6efcd9b88800d7a8490bd587fb4539f 100644 (file)
@@ -268,7 +268,7 @@ the maximum number of threads used for this purpose.
 
 .TP
 \fBRUST_TEST_NOCAPTURE\fR
-A synonym for the --nocapture flag.
+If set to a value other than "0", a synonym for the --nocapture flag.
 
 .TP
 \fBRUST_MIN_STACK\fR
@@ -276,7 +276,7 @@ Sets the minimum stack size for new threads.
 
 .TP
 \fBRUST_BACKTRACE\fR
-If set, produces a backtrace in the output of a program which panics.
+If set to a value different than "0", produces a backtrace in the output of a program which panics.
 
 .SH "EXAMPLES"
 To build an executable from a source file with a main function:
diff --git a/mk/cfg/i586-pc-windows-msvc.mk b/mk/cfg/i586-pc-windows-msvc.mk
new file mode 100644 (file)
index 0000000..da2680f
--- /dev/null
@@ -0,0 +1,28 @@
+# i586-pc-windows-msvc configuration
+CC_i586-pc-windows-msvc="$(CFG_MSVC_CL_i386)" -nologo
+LINK_i586-pc-windows-msvc="$(CFG_MSVC_LINK_i386)" -nologo
+CXX_i586-pc-windows-msvc="$(CFG_MSVC_CL_i386)" -nologo
+CPP_i586-pc-windows-msvc="$(CFG_MSVC_CL_i386)" -nologo
+AR_i586-pc-windows-msvc="$(CFG_MSVC_LIB_i386)" -nologo
+CFG_LIB_NAME_i586-pc-windows-msvc=$(1).dll
+CFG_STATIC_LIB_NAME_i586-pc-windows-msvc=$(1).lib
+CFG_LIB_GLOB_i586-pc-windows-msvc=$(1)-*.{dll,lib}
+CFG_LIB_DSYM_GLOB_i586-pc-windows-msvc=$(1)-*.dylib.dSYM
+CFG_JEMALLOC_CFLAGS_i586-pc-windows-msvc :=
+CFG_GCCISH_CFLAGS_i586-pc-windows-msvc := -MD -arch:IA32
+CFG_GCCISH_CXXFLAGS_i586-pc-windows-msvc := -MD -arch:IA32
+CFG_GCCISH_LINK_FLAGS_i586-pc-windows-msvc :=
+CFG_GCCISH_DEF_FLAG_i586-pc-windows-msvc :=
+CFG_LLC_FLAGS_i586-pc-windows-msvc :=
+CFG_INSTALL_NAME_i586-pc-windows-msvc =
+CFG_EXE_SUFFIX_i586-pc-windows-msvc := .exe
+CFG_WINDOWSY_i586-pc-windows-msvc := 1
+CFG_UNIXY_i586-pc-windows-msvc :=
+CFG_LDPATH_i586-pc-windows-msvc :=
+CFG_RUN_i586-pc-windows-msvc=$(2)
+CFG_RUN_TARG_i586-pc-windows-msvc=$(call CFG_RUN_i586-pc-windows-msvc,,$(2))
+CFG_GNU_TRIPLE_i586-pc-windows-msvc := i586-pc-win32
+
+# Currently the build system is not configured to build jemalloc
+# with MSVC, so we omit this optional dependency.
+CFG_DISABLE_JEMALLOC_i586-pc-windows-msvc := 1
index 0609f365de40839bc231303d84b75f5d87c6cd66..2b28550320d85b2b31ec74495dee6c1dbf24dc53 100644 (file)
@@ -7,9 +7,9 @@ CFG_LIB_NAME_i586-unknown-linux-gnu=lib$(1).so
 CFG_STATIC_LIB_NAME_i586-unknown-linux-gnu=lib$(1).a
 CFG_LIB_GLOB_i586-unknown-linux-gnu=lib$(1)-*.so
 CFG_LIB_DSYM_GLOB_i586-unknown-linux-gnu=lib$(1)-*.dylib.dSYM
-CFG_JEMALLOC_CFLAGS_i586-unknown-linux-gnu := -m32 $(CFLAGS)
-CFG_GCCISH_CFLAGS_i586-unknown-linux-gnu := -Wall -Werror -g -fPIC -m32 $(CFLAGS)
-CFG_GCCISH_CXXFLAGS_i586-unknown-linux-gnu := -fno-rtti $(CXXFLAGS)
+CFG_JEMALLOC_CFLAGS_i586-unknown-linux-gnu := -m32 $(CFLAGS) -march=pentium
+CFG_GCCISH_CFLAGS_i586-unknown-linux-gnu := -Wall -Werror -g -fPIC -m32 $(CFLAGS) -march=pentium
+CFG_GCCISH_CXXFLAGS_i586-unknown-linux-gnu := -fno-rtti $(CXXFLAGS) -march=pentium
 CFG_GCCISH_LINK_FLAGS_i586-unknown-linux-gnu := -shared -fPIC -ldl -pthread  -lrt -g -m32
 CFG_GCCISH_DEF_FLAG_i586-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list=
 CFG_LLC_FLAGS_i586-unknown-linux-gnu :=
index 0ed65effbdf48b0d5206b0ae36c2cb705e95a2ac..3426b30aeeb85f9080d28f6f4dda698c8a11dd28 100644 (file)
@@ -25,5 +25,3 @@ CFG_GNU_TRIPLE_i686-pc-windows-gnu := i686-w64-mingw32
 CFG_THIRD_PARTY_OBJECTS_i686-pc-windows-gnu := crt2.o dllcrt2.o
 CFG_INSTALLED_OBJECTS_i686-pc-windows-gnu := crt2.o dllcrt2.o rsbegin.o rsend.o
 CFG_RUSTRT_HAS_STARTUP_OBJS_i686-pc-windows-gnu := 1
-# FIXME(#31030) - there's not a great reason to disable jemalloc here
-CFG_DISABLE_JEMALLOC_i686-pc-windows-gnu := 1
index f7fc4a2f26abedc6cc131402267d150125c92e86..f0732d08c71ea84c65b9c5e6140084bb69ac2aa2 100644 (file)
@@ -25,5 +25,3 @@ CFG_GNU_TRIPLE_x86_64-pc-windows-gnu := x86_64-w64-mingw32
 CFG_THIRD_PARTY_OBJECTS_x86_64-pc-windows-gnu := crt2.o dllcrt2.o
 CFG_INSTALLED_OBJECTS_x86_64-pc-windows-gnu := crt2.o dllcrt2.o rsbegin.o rsend.o
 CFG_RUSTRT_HAS_STARTUP_OBJS_x86_64-pc-windows-gnu := 1
-# FIXME(#31030) - there's not a great reason to disable jemalloc here
-CFG_DISABLE_JEMALLOC_x86_64-pc-windows-gnu := 1
index b7bb7c1083dd83e9491483fa42617388a757c26d..dafda75f5fe7d883ba564bd2b5003828ab1486c3 100644 (file)
 # automatically generated for all stage/host/target combinations.
 ################################################################################
 
-TARGET_CRATES := libc std flate arena term \
-                 serialize getopts collections test rand \
-                 log graphviz core rbml alloc \
+TARGET_CRATES := libc std term \
+                 getopts collections test rand \
+                 core alloc \
                  rustc_unicode rustc_bitflags \
                 alloc_system alloc_jemalloc
 RUSTC_CRATES := rustc rustc_typeck rustc_mir rustc_borrowck rustc_resolve rustc_driver \
                 rustc_trans rustc_back rustc_llvm rustc_privacy rustc_lint \
-                rustc_data_structures rustc_front rustc_platform_intrinsics \
-                rustc_plugin rustc_metadata rustc_passes
-HOST_CRATES := syntax syntax_ext $(RUSTC_CRATES) rustdoc fmt_macros
+                rustc_data_structures rustc_platform_intrinsics \
+                rustc_plugin rustc_metadata rustc_passes rustc_save_analysis \
+                rustc_const_eval rustc_const_math rustc_incremental
+HOST_CRATES := syntax syntax_ext $(RUSTC_CRATES) rustdoc fmt_macros \
+               flate arena graphviz rbml log serialize
 TOOLS := compiletest rustdoc rustc rustbook error_index_generator
 
 DEPS_core :=
@@ -84,40 +86,49 @@ DEPS_log := std
 DEPS_num := std
 DEPS_rbml := std log serialize
 DEPS_serialize := std log
-DEPS_term := std log
-DEPS_test := std getopts serialize rbml term native:rust_test_helpers
+DEPS_term := std
+DEPS_test := std getopts term native:rust_test_helpers
 
-DEPS_syntax := std term serialize log arena libc rustc_bitflags
+DEPS_syntax := std term serialize log arena libc rustc_bitflags rustc_unicode
 DEPS_syntax_ext := syntax fmt_macros
 
-DEPS_rustc := syntax fmt_macros flate arena serialize getopts rbml rustc_front\
-              log graphviz rustc_llvm rustc_back rustc_data_structures
-DEPS_rustc_back := std syntax rustc_llvm rustc_front flate log libc
-DEPS_rustc_borrowck := rustc rustc_front log graphviz syntax
+DEPS_rustc_const_math := std syntax log serialize
+DEPS_rustc_const_eval := rustc_const_math rustc syntax log serialize \
+                                            rustc_back graphviz
+
+DEPS_rustc := syntax fmt_macros flate arena serialize getopts rbml \
+              log graphviz rustc_back rustc_data_structures\
+                         rustc_const_math
+DEPS_rustc_back := std syntax flate log libc
+DEPS_rustc_borrowck := rustc rustc_mir log graphviz syntax
 DEPS_rustc_data_structures := std log serialize
 DEPS_rustc_driver := arena flate getopts graphviz libc rustc rustc_back rustc_borrowck \
                      rustc_typeck rustc_mir rustc_resolve log syntax serialize rustc_llvm \
-                    rustc_trans rustc_privacy rustc_lint rustc_front rustc_plugin \
-                     rustc_metadata syntax_ext rustc_passes
-DEPS_rustc_front := std syntax log serialize
-DEPS_rustc_lint := rustc log syntax
+                    rustc_trans rustc_privacy rustc_lint rustc_plugin \
+                     rustc_metadata syntax_ext rustc_passes rustc_save_analysis rustc_const_eval \
+                     rustc_incremental
+DEPS_rustc_lint := rustc log syntax rustc_const_eval
 DEPS_rustc_llvm := native:rustllvm libc std rustc_bitflags
-DEPS_rustc_metadata := rustc rustc_front syntax rbml
-DEPS_rustc_passes := syntax rustc core rustc_front
-DEPS_rustc_mir := rustc rustc_front syntax
-DEPS_rustc_resolve := arena rustc rustc_front log syntax
-DEPS_rustc_platform_intrinsics := rustc rustc_llvm
+DEPS_rustc_metadata := rustc syntax rbml rustc_const_math
+DEPS_rustc_passes := syntax rustc core rustc_const_eval
+DEPS_rustc_mir := rustc syntax rustc_const_math rustc_const_eval
+DEPS_rustc_resolve := arena rustc log syntax
+DEPS_rustc_platform_intrinsics := std
 DEPS_rustc_plugin := rustc rustc_metadata syntax rustc_mir
-DEPS_rustc_privacy := rustc rustc_front log syntax
+DEPS_rustc_privacy := rustc log syntax
 DEPS_rustc_trans := arena flate getopts graphviz libc rustc rustc_back rustc_mir \
-                    log syntax serialize rustc_llvm rustc_front rustc_platform_intrinsics
-DEPS_rustc_typeck := rustc syntax rustc_front rustc_platform_intrinsics
+                    log syntax serialize rustc_llvm rustc_platform_intrinsics \
+                    rustc_const_math rustc_const_eval rustc_incremental
+DEPS_rustc_incremental := rbml rustc serialize rustc_data_structures
+DEPS_rustc_save_analysis := rustc log syntax
+DEPS_rustc_typeck := rustc syntax rustc_platform_intrinsics rustc_const_math \
+                     rustc_const_eval
 
 DEPS_rustdoc := rustc rustc_driver native:hoedown serialize getopts \
-                test rustc_lint rustc_front
+                test rustc_lint rustc_const_eval
 
 
-TOOL_DEPS_compiletest := test getopts
+TOOL_DEPS_compiletest := test getopts log
 TOOL_DEPS_rustdoc := rustdoc
 TOOL_DEPS_rustc := rustc_driver
 TOOL_DEPS_rustbook := std rustdoc
@@ -125,8 +136,8 @@ TOOL_DEPS_error_index_generator := rustdoc syntax serialize
 TOOL_SOURCE_compiletest := $(S)src/compiletest/compiletest.rs
 TOOL_SOURCE_rustdoc := $(S)src/driver/driver.rs
 TOOL_SOURCE_rustc := $(S)src/driver/driver.rs
-TOOL_SOURCE_rustbook := $(S)src/rustbook/main.rs
-TOOL_SOURCE_error_index_generator := $(S)src/error_index_generator/main.rs
+TOOL_SOURCE_rustbook := $(S)src/tools/rustbook/main.rs
+TOOL_SOURCE_error_index_generator := $(S)src/tools/error_index_generator/main.rs
 
 ONLY_RLIB_core := 1
 ONLY_RLIB_libc := 1
index 38e0bcd338d235bce419a1423516d0719ad61be1..1273900608385f2faab7205b4b1cfea487dd0e36 100644 (file)
@@ -54,7 +54,6 @@ PKG_FILES := \
       doc                                      \
       driver                                   \
       etc                                      \
-      error_index_generator                    \
       $(foreach crate,$(CRATES),lib$(crate))   \
       libcollectionstest                       \
       libcoretest                              \
@@ -65,7 +64,7 @@ PKG_FILES := \
       rustc                                    \
       snapshots.txt                            \
       rust-installer                           \
-      rustbook                                 \
+      tools                                    \
       test)                                    \
     $(PKG_GITMODULES)                          \
     $(filter-out config.stamp, \
index a0a44cd345caf77f809dffc7d543c4eb18433e2b..fc537d3b7da46635b2e196f5beceb70409c6e39f 100644 (file)
 ######################################################################
 
 # The version number
-CFG_RELEASE_NUM=1.8.0
+CFG_RELEASE_NUM=1.9.0
 
 # An optional number to put after the label, e.g. '.2' -> '-beta.2'
 # NB Make sure it starts with a dot to conform to semver pre-release
 # versions (section 9)
-CFG_PRERELEASE_VERSION=.2
+CFG_PRERELEASE_VERSION=.3
 
 # Append a version-dependent hash to each library, so we can install different
 # versions in the same place
 CFG_FILENAME_EXTRA=$(shell printf '%s' $(CFG_RELEASE)$(CFG_EXTRA_FILENAME) | $(CFG_HASH_COMMAND))
 
+# A magic value that allows the compiler to use unstable features during the
+# bootstrap even when doing so would normally be an error because of feature
+# staging or because the build turns on warnings-as-errors and unstable features
+# default to warnings. The build has to match this key in an env var.
+#
+# This value is keyed off the release to ensure that all compilers for one
+# particular release have the same bootstrap key. Note that this is
+# intentionally not "secure" by any definition, this is largely just a deterrent
+# from users enabling unstable features on the stable compiler.
+CFG_BOOTSTRAP_KEY=$(CFG_FILENAME_EXTRA)
+
 ifeq ($(CFG_RELEASE_CHANNEL),stable)
 # This is the normal semver version string, e.g. "0.12.0", "0.12.0-nightly"
 CFG_RELEASE=$(CFG_RELEASE_NUM)
@@ -134,6 +145,11 @@ ifdef CFG_ENABLE_DEBUGINFO
   CFG_RUSTC_FLAGS += -g
 endif
 
+ifdef CFG_ENABLE_ORBIT
+  $(info cfg: launching MIR (CFG_ENABLE_ORBIT))
+  CFG_RUSTC_FLAGS += -Z orbit
+endif
+
 ifdef SAVE_TEMPS
   CFG_RUSTC_FLAGS += --save-temps
 endif
@@ -488,7 +504,7 @@ endif
 LD_LIBRARY_PATH_ENV_HOSTDIR$(1)_T_$(2)_H_$(3) := \
     $$(CURDIR)/$$(HLIB$(1)_H_$(3)):$$(CFG_LLVM_INST_DIR_$(3))/lib
 LD_LIBRARY_PATH_ENV_TARGETDIR$(1)_T_$(2)_H_$(3) := \
-    $$(CURDIR)/$$(TLIB1_T_$(2)_H_$(CFG_BUILD))
+    $$(CURDIR)/$$(TLIB$(1)_T_$(2)_H_$(3))
 
 HOST_RPATH_VAR$(1)_T_$(2)_H_$(3) := \
   $$(LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3))=$$(LD_LIBRARY_PATH_ENV_HOSTDIR$(1)_T_$(2)_H_$(3)):$$$$$$(LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3))
@@ -501,18 +517,14 @@ RPATH_VAR$(1)_T_$(2)_H_$(3) := $$(HOST_RPATH_VAR$(1)_T_$(2)_H_$(3))
 # if you're building a cross config, the host->* parts are
 # effectively stage1, since it uses the just-built stage0.
 #
-# This logic is similar to how the LD_LIBRARY_PATH variable must
-# change be slightly different when doing cross compilations.
-# The build doesn't copy over all target libraries into
-# a new directory, so we need to point the library path at
-# the build directory where all the target libraries came
-# from (the stage0 build host). Otherwise the relative rpaths
-# inside of the rustc binary won't get resolved correctly.
+# Also be sure to use the right rpath because we're loading libraries from the
+# CFG_BUILD's stage1 directory for our target, so switch this one instance of
+# `RPATH_VAR` to get the bootstrap working.
 ifeq ($(1),0)
 ifneq ($(strip $(CFG_BUILD)),$(strip $(3)))
 CFGFLAG$(1)_T_$(2)_H_$(3) = stage1
 
-RPATH_VAR$(1)_T_$(2)_H_$(3) := $$(TARGET_RPATH_VAR$(1)_T_$(2)_H_$(3))
+RPATH_VAR$(1)_T_$(2)_H_$(3) := $$(TARGET_RPATH_VAR1_T_$(2)_H_$$(CFG_BUILD))
 endif
 endif
 
index bd17490955d1e5bbabf97d257097da1af3b12a6b..6591812280122067c5ec3aee46686d7f4616c070 100644 (file)
--- a/mk/rt.mk
+++ b/mk/rt.mk
@@ -157,6 +157,8 @@ else ifeq ($(findstring android, $(OSTYPE_$(1))), android)
   # If the test suite passes, however, without symbol prefixes then we should be
   # good to go!
   JEMALLOC_ARGS_$(1) := --disable-tls --with-jemalloc-prefix=je_
+else ifeq ($(findstring dragonfly, $(OSTYPE_$(1))), dragonfly)
+  JEMALLOC_ARGS_$(1) := --with-jemalloc-prefix=je_
 endif
 
 ifdef CFG_ENABLE_DEBUG_JEMALLOC
@@ -236,11 +238,11 @@ COMPRT_LIB_$(1) := $$(RT_OUTPUT_DIR_$(1))/$$(COMPRT_NAME_$(1))
 COMPRT_BUILD_DIR_$(1) := $$(RT_OUTPUT_DIR_$(1))/compiler-rt
 
 ifeq ($$(findstring msvc,$(1)),msvc)
-$$(COMPRT_LIB_$(1)): $$(COMPRT_DEPS) $$(MKFILE_DEPS) $$(LLVM_CONFIG_$(1))
+$$(COMPRT_LIB_$(1)): $$(COMPRT_DEPS) $$(MKFILE_DEPS) $$(LLVM_CONFIG_$$(CFG_BUILD))
        @$$(call E, cmake: compiler-rt)
        $$(Q)cd "$$(COMPRT_BUILD_DIR_$(1))"; $$(CFG_CMAKE) "$(S)src/compiler-rt" \
                -DCMAKE_BUILD_TYPE=$$(LLVM_BUILD_CONFIG_MODE) \
-               -DLLVM_CONFIG_PATH=$$(LLVM_CONFIG_$(1)) \
+               -DLLVM_CONFIG_PATH=$$(LLVM_CONFIG_$$(CFG_BUILD)) \
                -G"$$(CFG_CMAKE_GENERATOR)"
        $$(Q)$$(CFG_CMAKE) --build "$$(COMPRT_BUILD_DIR_$(1))" \
                --target lib/builtins/builtins \
@@ -253,7 +255,7 @@ COMPRT_AR_$(1) := $$(AR_$(1))
 # We chomp -Werror here because GCC warns about the type signature of
 # builtins not matching its own and the build fails. It's a bit hacky,
 # but what can we do, we're building libclang-rt using GCC ......
-COMPRT_CFLAGS_$(1) := $$(filter-out -Werror -Werror=*,$$(CFG_GCCISH_CFLAGS_$(1))) -std=c99
+COMPRT_CFLAGS_$(1) := $$(CFG_GCCISH_CFLAGS_$(1)) -Wno-error -std=c99
 
 # FreeBSD Clang's packaging is problematic; it doesn't copy unwind.h to
 # the standard include directory. This should really be in our changes to
@@ -361,7 +363,7 @@ $$(BACKTRACE_BUILD_DIR_$(1))/Makefile: $$(BACKTRACE_DEPS) $$(MKFILE_DEPS)
              CC="$$(CC_$(1))" \
              AR="$$(AR_$(1))" \
              RANLIB="$$(AR_$(1)) s" \
-             CFLAGS="$$(CFG_GCCISH_CFLAGS_$(1):-Werror=) -fno-stack-protector" \
+             CFLAGS="$$(CFG_GCCISH_CFLAGS_$(1)) -Wno-error -fno-stack-protector" \
              $(S)src/libbacktrace/configure --build=$(CFG_GNU_TRIPLE_$(CFG_BUILD)) --host=$(CFG_GNU_TRIPLE_$(1)))
        $$(Q)echo '#undef HAVE_ATOMIC_FUNCTIONS' >> \
              $$(BACKTRACE_BUILD_DIR_$(1))/config.h
index cdd8abd7fb770002d7a6954595a2938e717f202d..261b9e79aa541a653f1e80d61078fa30e4b07c3f 100644 (file)
@@ -89,6 +89,7 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$(4): \
                $$(RUSTFLAGS$(1)_$(4)_T_$(2)) \
                --out-dir $$(@D) \
                -C extra-filename=-$$(CFG_FILENAME_EXTRA) \
+               -C metadata=$$(CFG_FILENAME_EXTRA) \
                $$<
        @touch -r $$@.start_time $$@ && rm $$@.start_time
        $$(call LIST_ALL_OLD_GLOB_MATCHES, \
index ea88a7d34f54e2da90ca86df8468e9cd92558d93..20736165b73b0beb9b441cf6f640da17687a3d55 100644 (file)
@@ -299,24 +299,35 @@ check-stage$(1)-T-$(2)-H-$(3)-exec: \
        check-stage$(1)-T-$(2)-H-$(3)-cfail-exec \
        check-stage$(1)-T-$(2)-H-$(3)-pfail-exec \
        check-stage$(1)-T-$(2)-H-$(3)-rpass-valgrind-exec \
-       check-stage$(1)-T-$(2)-H-$(3)-rpass-full-exec \
-       check-stage$(1)-T-$(2)-H-$(3)-rfail-full-exec \
-       check-stage$(1)-T-$(2)-H-$(3)-cfail-full-exec \
        check-stage$(1)-T-$(2)-H-$(3)-rmake-exec \
        check-stage$(1)-T-$(2)-H-$(3)-rustdocck-exec \
        check-stage$(1)-T-$(2)-H-$(3)-crates-exec \
        check-stage$(1)-T-$(2)-H-$(3)-doc-crates-exec \
        check-stage$(1)-T-$(2)-H-$(3)-debuginfo-gdb-exec \
        check-stage$(1)-T-$(2)-H-$(3)-debuginfo-lldb-exec \
-       check-stage$(1)-T-$(2)-H-$(3)-codegen-exec \
-       check-stage$(1)-T-$(2)-H-$(3)-codegen-units-exec \
+       check-stage$(1)-T-$(2)-H-$(3)-incremental-exec \
        check-stage$(1)-T-$(2)-H-$(3)-doc-exec \
        check-stage$(1)-T-$(2)-H-$(3)-pretty-exec
 
+ifndef CFG_DISABLE_CODEGEN_TESTS
+check-stage$(1)-T-$(2)-H-$(3)-exec: \
+       check-stage$(1)-T-$(2)-H-$(3)-codegen-exec \
+       check-stage$(1)-T-$(2)-H-$(3)-codegen-units-exec
+endif
+
 # Only test the compiler-dependent crates when the target is
 # able to build a compiler (when the target triple is in the set of host triples)
 ifneq ($$(findstring $(2),$$(CFG_HOST)),)
 
+check-stage$(1)-T-$(2)-H-$(3)-exec: \
+       check-stage$(1)-T-$(2)-H-$(3)-rpass-full-exec \
+       check-stage$(1)-T-$(2)-H-$(3)-rfail-full-exec \
+       check-stage$(1)-T-$(2)-H-$(3)-cfail-full-exec
+
+check-stage$(1)-T-$(2)-H-$(3)-pretty-exec: \
+       check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-full-exec \
+       check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail-full-exec
+
 check-stage$(1)-T-$(2)-H-$(3)-crates-exec: \
        $$(foreach crate,$$(TEST_CRATES), \
            check-stage$(1)-T-$(2)-H-$(3)-$$(crate)-exec)
@@ -340,9 +351,7 @@ check-stage$(1)-T-$(2)-H-$(3)-doc-exec: \
 check-stage$(1)-T-$(2)-H-$(3)-pretty-exec: \
        check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-exec \
     check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-valgrind-exec \
-       check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-full-exec \
        check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail-exec \
-       check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail-full-exec \
        check-stage$(1)-T-$(2)-H-$(3)-pretty-pretty-exec
 
 endef
@@ -379,7 +388,7 @@ $(3)/stage$(1)/test/$(4)test-$(2)$$(X_$(2)): \
        @$$(call E, rustc: $$@)
        $(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(2)) \
            $$(subst @,,$$(STAGE$(1)_T_$(2)_H_$(3))) -o $$@ $$< --test \
-               -L "$$(RT_OUTPUT_DIR_$(2))" \
+               -Cmetadata="test-crate" -L "$$(RT_OUTPUT_DIR_$(2))" \
                $$(LLVM_LIBDIR_RUSTFLAGS_$(2)) \
                $$(RUSTFLAGS_$(4))
 
@@ -473,6 +482,7 @@ DEBUGINFO_LLDB_RS := $(call rwildcard,$(S)src/test/debuginfo/,*.rs)
 CODEGEN_RS := $(call rwildcard,$(S)src/test/codegen/,*.rs)
 CODEGEN_CC := $(call rwildcard,$(S)src/test/codegen/,*.cc)
 CODEGEN_UNITS_RS := $(call rwildcard,$(S)src/test/codegen-units/,*.rs)
+INCREMENTAL_RS := $(call rwildcard,$(S)src/test/incremental/,*.rs)
 RUSTDOCCK_RS := $(call rwildcard,$(S)src/test/rustdoc/,*.rs)
 
 RPASS_TESTS := $(RPASS_RS)
@@ -488,6 +498,7 @@ DEBUGINFO_GDB_TESTS := $(DEBUGINFO_GDB_RS)
 DEBUGINFO_LLDB_TESTS := $(DEBUGINFO_LLDB_RS)
 CODEGEN_TESTS := $(CODEGEN_RS) $(CODEGEN_CC)
 CODEGEN_UNITS_TESTS := $(CODEGEN_UNITS_RS)
+INCREMENTAL_TESTS := $(INCREMENTAL_RS)
 RUSTDOCCK_TESTS := $(RUSTDOCCK_RS)
 
 CTEST_SRC_BASE_rpass = run-pass
@@ -550,6 +561,11 @@ CTEST_BUILD_BASE_codegen-units = codegen-units
 CTEST_MODE_codegen-units = codegen-units
 CTEST_RUNTOOL_codegen-units = $(CTEST_RUNTOOL)
 
+CTEST_SRC_BASE_incremental = incremental
+CTEST_BUILD_BASE_incremental = incremental
+CTEST_MODE_incremental = incremental
+CTEST_RUNTOOL_incremental = $(CTEST_RUNTOOL)
+
 CTEST_SRC_BASE_rustdocck = rustdoc
 CTEST_BUILD_BASE_rustdocck = rustdoc
 CTEST_MODE_rustdocck = rustdoc
@@ -673,6 +689,7 @@ CTEST_DEPS_debuginfo-lldb_$(1)-T-$(2)-H-$(3) = $$(DEBUGINFO_LLDB_TESTS) \
                                                $(S)src/etc/lldb_rust_formatters.py
 CTEST_DEPS_codegen_$(1)-T-$(2)-H-$(3) = $$(CODEGEN_TESTS)
 CTEST_DEPS_codegen-units_$(1)-T-$(2)-H-$(3) = $$(CODEGEN_UNITS_TESTS)
+CTEST_DEPS_incremental_$(1)-T-$(2)-H-$(3) = $$(INCREMENTAL_TESTS)
 CTEST_DEPS_rustdocck_$(1)-T-$(2)-H-$(3) = $$(RUSTDOCCK_TESTS) \
                $$(HBIN$(1)_H_$(3))/rustdoc$$(X_$(3)) \
                $(S)src/etc/htmldocck.py
@@ -739,7 +756,7 @@ endif
 endef
 
 CTEST_NAMES = rpass rpass-valgrind rpass-full rfail-full cfail-full rfail cfail pfail \
-       debuginfo-gdb debuginfo-lldb codegen codegen-units rustdocck
+       debuginfo-gdb debuginfo-lldb codegen codegen-units rustdocck incremental
 
 $(foreach host,$(CFG_HOST), \
  $(eval $(foreach target,$(CFG_TARGET), \
@@ -937,6 +954,7 @@ TEST_GROUPS = \
        debuginfo-lldb \
        codegen \
        codegen-units \
+       incremental \
        doc \
        $(foreach docname,$(DOC_NAMES),doc-$(docname)) \
        pretty \
index 05186d48ce2d1c9851f1d98a7214287695d597b3..722feab212eeb93f1ad8c6387a97986863d32882 100644 (file)
@@ -3,16 +3,17 @@ name = "bootstrap"
 version = "0.0.0"
 dependencies = [
  "build_helper 0.1.0",
- "cmake 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cmake 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
  "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "gcc 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.26 (registry+https://github.com/rust-lang/crates.io-index)",
  "getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
  "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "md5 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
- "toml 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -21,10 +22,10 @@ version = "0.1.0"
 
 [[package]]
 name = "cmake"
-version = "0.1.13"
+version = "0.1.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "gcc 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.26 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -32,12 +33,12 @@ name = "filetime"
 version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "gcc"
-version = "0.3.25"
+version = "0.3.26"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -50,13 +51,18 @@ name = "kernel32-sys"
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "libc"
-version = "0.2.7"
+version = "0.2.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "md5"
+version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -64,25 +70,25 @@ name = "num_cpus"
 version = "0.2.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rustc-serialize"
-version = "0.3.18"
+version = "0.3.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "toml"
-version = "0.1.27"
+version = "0.1.28"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "winapi"
-version = "0.2.5"
+version = "0.2.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
index 8321f93c90f61f23e039ffda2cc0b8e73cee57c5..88dd19a7697dcfe414d9226c7a5b68eeca4eb46a 100644 (file)
@@ -15,6 +15,10 @@ path = "main.rs"
 name = "rustc"
 path = "rustc.rs"
 
+[[bin]]
+name = "rustdoc"
+path = "rustdoc.rs"
+
 [dependencies]
 build_helper = { path = "../build_helper" }
 cmake = "0.1.10"
@@ -27,3 +31,4 @@ winapi = "0.2"
 kernel32-sys = "0.2"
 gcc = "0.3.17"
 libc = "0.2"
+md5 = "0.1"
index 6659894a171f04f191d2bd8b3a216755acd88ff6..5de7e6957c6f12d134446a291026add49fd66482 100644 (file)
@@ -73,7 +73,8 @@ class RustBuild:
 
         if self.rustc().startswith(self.bin_root()) and \
            (not os.path.exists(self.rustc()) or self.rustc_out_of_date()):
-            shutil.rmtree(self.bin_root())
+            if os.path.exists(self.bin_root()):
+                shutil.rmtree(self.bin_root())
             filename = "rust-std-nightly-" + self.build + ".tar.gz"
             url = "https://static.rust-lang.org/dist/" + self.snap_rustc_date()
             tarball = os.path.join(rustc_cache, filename)
index 628b1d764320b845f01030d9264b3511e1bc16f2..611e3475610a7108da9c170386de7abed36c5830 100644 (file)
@@ -8,15 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use std::env;
 use std::fs::{self, File};
 use std::io::prelude::*;
-use std::path::Path;
 use std::process::Command;
 
 use build_helper::output;
+use md5;
 
 use build::Build;
-use build::util::mtime;
 
 pub fn collect(build: &mut Build) {
     let mut main_mk = String::new();
@@ -36,19 +36,23 @@ pub fn collect(build: &mut Build) {
     match &build.config.channel[..] {
         "stable" => {
             build.release = release_num.to_string();
+            build.package_vers = build.release.clone();
             build.unstable_features = false;
         }
         "beta" => {
             build.release = format!("{}-beta{}", release_num,
                                    prerelease_version);
+            build.package_vers = "beta".to_string();
             build.unstable_features = false;
         }
         "nightly" => {
             build.release = format!("{}-nightly", release_num);
+            build.package_vers = "nightly".to_string();
             build.unstable_features = true;
         }
         _ => {
             build.release = format!("{}-dev", release_num);
+            build.package_vers = build.release.clone();
             build.unstable_features = true;
         }
     }
@@ -76,7 +80,8 @@ pub fn collect(build: &mut Build) {
         build.short_ver_hash = Some(short_ver_hash);
     }
 
-    build.bootstrap_key = mtime(Path::new("config.toml")).seconds()
-                                                        .to_string();
+    let key = md5::compute(build.release.as_bytes());
+    build.bootstrap_key = format!("{:02x}{:02x}{:02x}{:02x}",
+                                  key[0], key[1], key[2], key[3]);
+    env::set_var("RUSTC_BOOTSTRAP_KEY", &build.bootstrap_key);
 }
-
diff --git a/src/bootstrap/build/check.rs b/src/bootstrap/build/check.rs
new file mode 100644 (file)
index 0000000..a2445ae
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use build::{Build, Compiler};
+
+pub fn linkcheck(build: &Build, stage: u32, host: &str) {
+    println!("Linkcheck stage{} ({})", stage, host);
+    let compiler = Compiler::new(stage, host);
+    build.run(build.tool_cmd(&compiler, "linkchecker")
+                   .arg(build.out.join(host).join("doc")));
+}
+
+pub fn cargotest(build: &Build, stage: u32, host: &str) {
+
+    let ref compiler = Compiler::new(stage, host);
+
+    // Configure PATH to find the right rustc. NB. we have to use PATH
+    // and not RUSTC because the Cargo test suite has tests that will
+    // fail if rustc is not spelled `rustc`.
+    let path = build.sysroot(compiler).join("bin");
+    let old_path = ::std::env::var("PATH").expect("");
+    let sep = if cfg!(windows) { ";" } else {":" };
+    let ref newpath = format!("{}{}{}", path.display(), sep, old_path);
+
+    build.run(build.tool_cmd(compiler, "cargotest")
+              .env("PATH", newpath)
+              .arg(&build.cargo));
+}
index 3be4199352ca1e5da0f4e8e75680fded2a903803..a67f1ba48b560acbf36a5c33bd6d47163de1a1a0 100644 (file)
@@ -15,33 +15,31 @@ use std::process::Command;
 
 use build_helper::output;
 
-use build::util::{exe, staticlib, libdir, mtime, is_dylib};
-use build::{Build, Compiler};
+use build::util::{exe, staticlib, libdir, mtime, is_dylib, copy};
+use build::{Build, Compiler, Mode};
 
 /// Build the standard library.
 ///
 /// This will build the standard library for a particular stage of the build
 /// using the `compiler` targeting the `target` architecture. The artifacts
 /// created will also be linked into the sysroot directory.
-pub fn std<'a>(build: &'a Build, stage: u32, target: &str,
-               compiler: &Compiler<'a>) {
-    let host = compiler.host;
-    println!("Building stage{} std artifacts ({} -> {})", stage,
-             host, target);
+pub fn std<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) {
+    println!("Building stage{} std artifacts ({} -> {})", compiler.stage,
+             compiler.host, target);
 
     // Move compiler-rt into place as it'll be required by the compiler when
     // building the standard library to link the dylib of libstd
-    let libdir = build.sysroot_libdir(stage, &host, target);
+    let libdir = build.sysroot_libdir(compiler, target);
     let _ = fs::remove_dir_all(&libdir);
     t!(fs::create_dir_all(&libdir));
-    t!(fs::hard_link(&build.compiler_rt_built.borrow()[target],
-                     libdir.join(staticlib("compiler-rt", target))));
+    copy(&build.compiler_rt_built.borrow()[target],
+         &libdir.join(staticlib("compiler-rt", target)));
 
     build_startup_objects(build, target, &libdir);
 
-    let out_dir = build.cargo_out(stage, &host, true, target);
+    let out_dir = build.cargo_out(compiler, Mode::Libstd, target);
     build.clear_if_dirty(&out_dir, &build.compiler_path(compiler));
-    let mut cargo = build.cargo(stage, compiler, true, target, "build");
+    let mut cargo = build.cargo(compiler, Mode::Libstd, target, "build");
     cargo.arg("--features").arg(build.std_features())
          .arg("--manifest-path")
          .arg(build.src.join("src/rustc/std_shim/Cargo.toml"));
@@ -58,7 +56,7 @@ pub fn std<'a>(build: &'a Build, stage: u32, target: &str,
     }
 
     build.run(&mut cargo);
-    std_link(build, stage, target, compiler, host);
+    std_link(build, target, compiler, compiler.host);
 }
 
 /// Link all libstd rlibs/dylibs into the sysroot location.
@@ -66,12 +64,12 @@ pub fn std<'a>(build: &'a Build, stage: u32, target: &str,
 /// Links those artifacts generated in the given `stage` for `target` produced
 /// by `compiler` into `host`'s sysroot.
 pub fn std_link(build: &Build,
-                stage: u32,
                 target: &str,
                 compiler: &Compiler,
                 host: &str) {
-    let libdir = build.sysroot_libdir(stage, host, target);
-    let out_dir = build.cargo_out(stage, compiler.host, true, target);
+    let target_compiler = Compiler::new(compiler.stage, host);
+    let libdir = build.sysroot_libdir(&target_compiler, target);
+    let out_dir = build.cargo_out(compiler, Mode::Libstd, target);
 
     // If we're linking one compiler host's output into another, then we weren't
     // called from the `std` method above. In that case we clean out what's
@@ -79,10 +77,24 @@ pub fn std_link(build: &Build,
     if host != compiler.host {
         let _ = fs::remove_dir_all(&libdir);
         t!(fs::create_dir_all(&libdir));
-        t!(fs::hard_link(&build.compiler_rt_built.borrow()[target],
-                         libdir.join(staticlib("compiler-rt", target))));
+        copy(&build.compiler_rt_built.borrow()[target],
+             &libdir.join(staticlib("compiler-rt", target)));
     }
     add_to_sysroot(&out_dir, &libdir);
+
+    if target.contains("musl") &&
+       (target.contains("x86_64") || target.contains("i686")) {
+        copy_third_party_objects(build, target, &libdir);
+    }
+}
+
+/// Copies the crt(1,i,n).o startup objects
+///
+/// Only required for musl targets that statically link to libc
+fn copy_third_party_objects(build: &Build, target: &str, into: &Path) {
+    for &obj in &["crt1.o", "crti.o", "crtn.o"] {
+        copy(&compiler_file(build.cc(target), obj), &into.join(obj));
+    }
 }
 
 /// Build and prepare startup objects like rsbegin.o and rsend.o
@@ -107,34 +119,59 @@ fn build_startup_objects(build: &Build, target: &str, into: &Path) {
     }
 
     for obj in ["crt2.o", "dllcrt2.o"].iter() {
-        t!(fs::copy(compiler_file(build.cc(target), obj), into.join(obj)));
+        copy(&compiler_file(build.cc(target), obj), &into.join(obj));
     }
 }
 
+/// Build libtest.
+///
+/// This will build libtest and supporting libraries for a particular stage of
+/// the build using the `compiler` targeting the `target` architecture. The
+/// artifacts created will also be linked into the sysroot directory.
+pub fn test<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) {
+    println!("Building stage{} test artifacts ({} -> {})", compiler.stage,
+             compiler.host, target);
+    let out_dir = build.cargo_out(compiler, Mode::Libtest, target);
+    build.clear_if_dirty(&out_dir, &libstd_shim(build, compiler, target));
+    let mut cargo = build.cargo(compiler, Mode::Libtest, target, "build");
+    cargo.arg("--manifest-path")
+         .arg(build.src.join("src/rustc/test_shim/Cargo.toml"));
+    build.run(&mut cargo);
+    test_link(build, target, compiler, compiler.host);
+}
+
+/// Link all libtest rlibs/dylibs into the sysroot location.
+///
+/// Links those artifacts generated in the given `stage` for `target` produced
+/// by `compiler` into `host`'s sysroot.
+pub fn test_link(build: &Build,
+                 target: &str,
+                 compiler: &Compiler,
+                 host: &str) {
+    let target_compiler = Compiler::new(compiler.stage, host);
+    let libdir = build.sysroot_libdir(&target_compiler, target);
+    let out_dir = build.cargo_out(compiler, Mode::Libtest, target);
+    add_to_sysroot(&out_dir, &libdir);
+}
+
+
 /// Build the compiler.
 ///
 /// This will build the compiler for a particular stage of the build using
 /// the `compiler` targeting the `target` architecture. The artifacts
 /// created will also be linked into the sysroot directory.
-pub fn rustc<'a>(build: &'a Build, stage: u32, target: &str,
-                 compiler: &Compiler<'a>) {
-    let host = compiler.host;
-    println!("Building stage{} compiler artifacts ({} -> {})", stage,
-             host, target);
+pub fn rustc<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) {
+    println!("Building stage{} compiler artifacts ({} -> {})",
+             compiler.stage, compiler.host, target);
 
-    let out_dir = build.cargo_out(stage, &host, false, target);
-    build.clear_if_dirty(&out_dir, &libstd_shim(build, stage, &host, target));
+    let out_dir = build.cargo_out(compiler, Mode::Librustc, target);
+    build.clear_if_dirty(&out_dir, &libtest_shim(build, compiler, target));
 
-    let mut cargo = build.cargo(stage, compiler, false, target, "build");
-    cargo.arg("--features").arg(build.rustc_features(stage))
+    let mut cargo = build.cargo(compiler, Mode::Librustc, target, "build");
+    cargo.arg("--features").arg(build.rustc_features())
          .arg("--manifest-path")
          .arg(build.src.join("src/rustc/Cargo.toml"));
 
-    // In stage0 we may not need to build as many executables
-    if stage == 0 {
-        cargo.arg("--bin").arg("rustc");
-    }
-
     // Set some configuration variables picked up by build scripts and
     // the compiler alike
     cargo.env("CFG_RELEASE", &build.release)
@@ -174,7 +211,7 @@ pub fn rustc<'a>(build: &'a Build, stage: u32, target: &str,
     }
     build.run(&mut cargo);
 
-    rustc_link(build, stage, target, compiler, compiler.host);
+    rustc_link(build, target, compiler, compiler.host);
 }
 
 /// Link all librustc rlibs/dylibs into the sysroot location.
@@ -182,24 +219,31 @@ pub fn rustc<'a>(build: &'a Build, stage: u32, target: &str,
 /// Links those artifacts generated in the given `stage` for `target` produced
 /// by `compiler` into `host`'s sysroot.
 pub fn rustc_link(build: &Build,
-                  stage: u32,
                   target: &str,
                   compiler: &Compiler,
                   host: &str) {
-    let libdir = build.sysroot_libdir(stage, host, target);
-    let out_dir = build.cargo_out(stage, compiler.host, false, target);
+    let target_compiler = Compiler::new(compiler.stage, host);
+    let libdir = build.sysroot_libdir(&target_compiler, target);
+    let out_dir = build.cargo_out(compiler, Mode::Librustc, target);
     add_to_sysroot(&out_dir, &libdir);
 }
 
 /// Cargo's output path for the standard library in a given stage, compiled
 /// by a particular compiler for the specified target.
-fn libstd_shim(build: &Build, stage: u32, host: &str, target: &str) -> PathBuf {
-    build.cargo_out(stage, host, true, target).join("libstd_shim.rlib")
+fn libstd_shim(build: &Build, compiler: &Compiler, target: &str) -> PathBuf {
+    build.cargo_out(compiler, Mode::Libstd, target).join("libstd_shim.rlib")
 }
 
-fn compiler_file(compiler: &Path, file: &str) -> String {
-    output(Command::new(compiler)
-                   .arg(format!("-print-file-name={}", file))).trim().to_string()
+/// Cargo's output path for libtest in a given stage, compiled by a particular
+/// compiler for the specified target.
+fn libtest_shim(build: &Build, compiler: &Compiler, target: &str) -> PathBuf {
+    build.cargo_out(compiler, Mode::Libtest, target).join("libtest_shim.rlib")
+}
+
+fn compiler_file(compiler: &Path, file: &str) -> PathBuf {
+    let out = output(Command::new(compiler)
+                            .arg(format!("-print-file-name={}", file)));
+    PathBuf::from(out.trim())
 }
 
 /// Prepare a new compiler from the artifacts in `stage`
@@ -209,24 +253,29 @@ fn compiler_file(compiler: &Path, file: &str) -> String {
 /// compiler.
 pub fn assemble_rustc(build: &Build, stage: u32, host: &str) {
     assert!(stage > 0, "the stage0 compiler isn't assembled, it's downloaded");
+    // The compiler that we're assembling
+    let target_compiler = Compiler::new(stage, host);
+
+    // The compiler that compiled the compiler we're assembling
+    let build_compiler = Compiler::new(stage - 1, &build.config.build);
 
     // Clear out old files
-    let sysroot = build.sysroot(stage, host);
+    let sysroot = build.sysroot(&target_compiler);
     let _ = fs::remove_dir_all(&sysroot);
     t!(fs::create_dir_all(&sysroot));
 
     // Link in all dylibs to the libdir
     let sysroot_libdir = sysroot.join(libdir(host));
     t!(fs::create_dir_all(&sysroot_libdir));
-    let src_libdir = build.sysroot_libdir(stage - 1, &build.config.build, host);
+    let src_libdir = build.sysroot_libdir(&build_compiler, host);
     for f in t!(fs::read_dir(&src_libdir)).map(|f| t!(f)) {
         let filename = f.file_name().into_string().unwrap();
         if is_dylib(&filename) {
-            t!(fs::hard_link(&f.path(), sysroot_libdir.join(&filename)));
+            copy(&f.path(), &sysroot_libdir.join(&filename));
         }
     }
 
-    let out_dir = build.cargo_out(stage - 1, &build.config.build, false, host);
+    let out_dir = build.cargo_out(&build_compiler, Mode::Librustc, host);
 
     // Link the compiler binary itself into place
     let rustc = out_dir.join(exe("rustc", host));
@@ -234,7 +283,7 @@ pub fn assemble_rustc(build: &Build, stage: u32, host: &str) {
     t!(fs::create_dir_all(&bindir));
     let compiler = build.compiler_path(&Compiler::new(stage, host));
     let _ = fs::remove_file(&compiler);
-    t!(fs::hard_link(rustc, compiler));
+    copy(&rustc, &compiler);
 
     // See if rustdoc exists to link it into place
     let rustdoc = exe("rustdoc", host);
@@ -242,7 +291,7 @@ pub fn assemble_rustc(build: &Build, stage: u32, host: &str) {
     let rustdoc_dst = bindir.join(&rustdoc);
     if fs::metadata(&rustdoc_src).is_ok() {
         let _ = fs::remove_file(&rustdoc_dst);
-        t!(fs::hard_link(&rustdoc_src, &rustdoc_dst));
+        copy(&rustdoc_src, &rustdoc_dst);
     }
 }
 
@@ -281,7 +330,30 @@ fn add_to_sysroot(out_dir: &Path, sysroot_dst: &Path) {
         let (_, path) = paths.iter().map(|path| {
             (mtime(&path).seconds(), path)
         }).max().unwrap();
-        t!(fs::hard_link(&path,
-                         sysroot_dst.join(path.file_name().unwrap())));
+        copy(&path, &sysroot_dst.join(path.file_name().unwrap()));
     }
 }
+
+/// Build a tool in `src/tools`
+///
+/// This will build the specified tool with the specified `host` compiler in
+/// `stage` into the normal cargo output directory.
+pub fn tool(build: &Build, stage: u32, host: &str, tool: &str) {
+    println!("Building stage{} tool {} ({})", stage, tool, host);
+
+    let compiler = Compiler::new(stage, host);
+
+    // FIXME: need to clear out previous tool and ideally deps, may require
+    //        isolating output directories or require a pseudo shim step to
+    //        clear out all the info.
+    //
+    //        Maybe when libstd is compiled it should clear out the rustc of the
+    //        corresponding stage?
+    // let out_dir = build.cargo_out(stage, &host, Mode::Librustc, target);
+    // build.clear_if_dirty(&out_dir, &libstd_shim(build, stage, &host, target));
+
+    let mut cargo = build.cargo(&compiler, Mode::Tool, host, "build");
+    cargo.arg("--manifest-path")
+         .arg(build.src.join(format!("src/tools/{}/Cargo.toml", tool)));
+    build.run(&mut cargo);
+}
diff --git a/src/bootstrap/build/dist.rs b/src/bootstrap/build/dist.rs
new file mode 100644 (file)
index 0000000..6ae652b
--- /dev/null
@@ -0,0 +1,292 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fs::{self, File};
+use std::io::Write;
+use std::path::{PathBuf, Path};
+use std::process::Command;
+
+use build::{Build, Compiler};
+use build::util::{cp_r, libdir, is_dylib};
+
+fn package_vers(build: &Build) -> &str {
+    match &build.config.channel[..] {
+        "stable" => &build.release,
+        "beta" => "beta",
+        "nightly" => "nightly",
+        _ => &build.release,
+    }
+}
+
+fn distdir(build: &Build) -> PathBuf {
+    build.out.join("dist")
+}
+
+fn tmpdir(build: &Build) -> PathBuf {
+    build.out.join("tmp/dist")
+}
+
+pub fn docs(build: &Build, stage: u32, host: &str) {
+    println!("Dist docs stage{} ({})", stage, host);
+    let name = format!("rust-docs-{}", package_vers(build));
+    let image = tmpdir(build).join(format!("{}-{}-image", name, name));
+    let _ = fs::remove_dir_all(&image);
+
+    let dst = image.join("share/doc/rust/html");
+    t!(fs::create_dir_all(&dst));
+    let src = build.out.join(host).join("doc");
+    cp_r(&src, &dst);
+
+    let mut cmd = Command::new("sh");
+    cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh")))
+       .arg("--product-name=Rust-Documentation")
+       .arg("--rel-manifest-dir=rustlib")
+       .arg("--success-message=Rust-documentation-is-installed.")
+       .arg(format!("--image-dir={}", sanitize_sh(&image)))
+       .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build))))
+       .arg(format!("--output-dir={}", sanitize_sh(&distdir(build))))
+       .arg(format!("--package-name={}-{}", name, host))
+       .arg("--component-name=rust-docs")
+       .arg("--legacy-manifest-dirs=rustlib,cargo")
+       .arg("--bulk-dirs=share/doc/rust/html");
+    build.run(&mut cmd);
+    t!(fs::remove_dir_all(&image));
+
+    // As part of this step, *also* copy the docs directory to a directory which
+    // buildbot typically uploads.
+    if host == build.config.build {
+        let dst = distdir(build).join("doc").join(&build.package_vers);
+        t!(fs::create_dir_all(&dst));
+        cp_r(&src, &dst);
+    }
+}
+
+pub fn mingw(build: &Build, host: &str) {
+    println!("Dist mingw ({})", host);
+    let name = format!("rust-mingw-{}", package_vers(build));
+    let image = tmpdir(build).join(format!("{}-{}-image", name, host));
+    let _ = fs::remove_dir_all(&image);
+
+    // The first argument to the script is a "temporary directory" which is just
+    // thrown away (this contains the runtime DLLs included in the rustc package
+    // above) and the second argument is where to place all the MinGW components
+    // (which is what we want).
+    //
+    // FIXME: this script should be rewritten into Rust
+    let mut cmd = Command::new("python");
+    cmd.arg(build.src.join("src/etc/make-win-dist.py"))
+       .arg(tmpdir(build))
+       .arg(&image)
+       .arg(host);
+    build.run(&mut cmd);
+
+    let mut cmd = Command::new("sh");
+    cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh")))
+       .arg("--product-name=Rust-MinGW")
+       .arg("--rel-manifest-dir=rustlib")
+       .arg("--success-message=Rust-MinGW-is-installed.")
+       .arg(format!("--image-dir={}", sanitize_sh(&image)))
+       .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build))))
+       .arg(format!("--output-dir={}", sanitize_sh(&distdir(build))))
+       .arg(format!("--package-name={}-{}", name, host))
+       .arg("--component-name=rust-mingw")
+       .arg("--legacy-manifest-dirs=rustlib,cargo");
+    build.run(&mut cmd);
+    t!(fs::remove_dir_all(&image));
+}
+
+pub fn rustc(build: &Build, stage: u32, host: &str) {
+    println!("Dist rustc stage{} ({})", stage, host);
+    let name = format!("rustc-{}", package_vers(build));
+    let image = tmpdir(build).join(format!("{}-{}-image", name, host));
+    let _ = fs::remove_dir_all(&image);
+    let overlay = tmpdir(build).join(format!("{}-{}-overlay", name, host));
+    let _ = fs::remove_dir_all(&overlay);
+
+    // Prepare the rustc "image", what will actually end up getting installed
+    prepare_image(build, stage, host, &image);
+
+    // Prepare the overlay which is part of the tarball but won't actually be
+    // installed
+    t!(fs::create_dir_all(&overlay));
+    let cp = |file: &str| {
+        install(&build.src.join(file), &overlay, 0o644);
+    };
+    cp("COPYRIGHT");
+    cp("LICENSE-APACHE");
+    cp("LICENSE-MIT");
+    cp("README.md");
+    // tiny morsel of metadata is used by rust-packaging
+    let version = &build.version;
+    t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes()));
+
+    // On MinGW we've got a few runtime DLL dependencies that we need to
+    // include. The first argument to this script is where to put these DLLs
+    // (the image we're creating), and the second argument is a junk directory
+    // to ignore all other MinGW stuff the script creates.
+    //
+    // On 32-bit MinGW we're always including a DLL which needs some extra
+    // licenses to distribute. On 64-bit MinGW we don't actually distribute
+    // anything requiring us to distribute a license, but it's likely the
+    // install will *also* include the rust-mingw package, which also needs
+    // licenses, so to be safe we just include it here in all MinGW packages.
+    //
+    // FIXME: this script should be rewritten into Rust
+    if host.contains("pc-windows-gnu") {
+        let mut cmd = Command::new("python");
+        cmd.arg(build.src.join("src/etc/make-win-dist.py"))
+           .arg(&image)
+           .arg(tmpdir(build))
+           .arg(host);
+        build.run(&mut cmd);
+
+        let dst = image.join("share/doc");
+        t!(fs::create_dir_all(&dst));
+        cp_r(&build.src.join("src/etc/third-party"), &dst);
+    }
+
+    // Finally, wrap everything up in a nice tarball!
+    let mut cmd = Command::new("sh");
+    cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh")))
+       .arg("--product-name=Rust")
+       .arg("--rel-manifest-dir=rustlib")
+       .arg("--success-message=Rust-is-ready-to-roll.")
+       .arg(format!("--image-dir={}", sanitize_sh(&image)))
+       .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build))))
+       .arg(format!("--output-dir={}", sanitize_sh(&distdir(build))))
+       .arg(format!("--non-installed-overlay={}", sanitize_sh(&overlay)))
+       .arg(format!("--package-name={}-{}", name, host))
+       .arg("--component-name=rustc")
+       .arg("--legacy-manifest-dirs=rustlib,cargo");
+    build.run(&mut cmd);
+    t!(fs::remove_dir_all(&image));
+    t!(fs::remove_dir_all(&overlay));
+
+    fn prepare_image(build: &Build, stage: u32, host: &str, image: &Path) {
+        let src = build.sysroot(&Compiler::new(stage, host));
+        let libdir = libdir(host);
+
+        // Copy rustc/rustdoc binaries
+        t!(fs::create_dir_all(image.join("bin")));
+        cp_r(&src.join("bin"), &image.join("bin"));
+
+        // Copy runtime DLLs needed by the compiler
+        if libdir != "bin" {
+            t!(fs::create_dir_all(image.join(libdir)));
+            for entry in t!(src.join(libdir).read_dir()).map(|e| t!(e)) {
+                let name = entry.file_name();
+                if let Some(s) = name.to_str() {
+                    if is_dylib(s) {
+                        install(&entry.path(), &image.join(libdir), 0o644);
+                    }
+                }
+            }
+        }
+
+        // Man pages
+        t!(fs::create_dir_all(image.join("share/man/man1")));
+        cp_r(&build.src.join("man"), &image.join("share/man/man1"));
+
+        // Debugger scripts
+        let cp_debugger_script = |file: &str| {
+            let dst = image.join("lib/rustlib/etc");
+            t!(fs::create_dir_all(&dst));
+            install(&build.src.join("src/etc/").join(file), &dst, 0o644);
+        };
+        if host.contains("windows") {
+            // no debugger scripts
+        } else if host.contains("darwin") {
+            // lldb debugger scripts
+            install(&build.src.join("src/etc/rust-lldb"), &image.join("bin"),
+                    0o755);
+
+            cp_debugger_script("lldb_rust_formatters.py");
+            cp_debugger_script("debugger_pretty_printers_common.py");
+        } else {
+            // gdb debugger scripts
+            install(&build.src.join("src/etc/rust-gdb"), &image.join("bin"),
+                    0o755);
+
+            cp_debugger_script("gdb_load_rust_pretty_printers.py");
+            cp_debugger_script("gdb_rust_pretty_printing.py");
+            cp_debugger_script("debugger_pretty_printers_common.py");
+        }
+
+        // Misc license info
+        let cp = |file: &str| {
+            install(&build.src.join(file), &image.join("share/doc/rust"), 0o644);
+        };
+        t!(fs::create_dir_all(&image.join("share/doc/rust")));
+        cp("COPYRIGHT");
+        cp("LICENSE-APACHE");
+        cp("LICENSE-MIT");
+        cp("README.md");
+    }
+}
+
+pub fn std(build: &Build, compiler: &Compiler, target: &str) {
+    println!("Dist std stage{} ({} -> {})", compiler.stage, compiler.host,
+             target);
+    let name = format!("rust-std-{}", package_vers(build));
+    let image = tmpdir(build).join(format!("{}-{}-image", name, target));
+    let _ = fs::remove_dir_all(&image);
+
+    let dst = image.join("lib/rustlib").join(target);
+    t!(fs::create_dir_all(&dst));
+    let src = build.sysroot(compiler).join("lib/rustlib");
+    cp_r(&src.join(target), &dst);
+
+    let mut cmd = Command::new("sh");
+    cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh")))
+       .arg("--product-name=Rust")
+       .arg("--rel-manifest-dir=rustlib")
+       .arg("--success-message=std-is-standing-at-the-ready.")
+       .arg(format!("--image-dir={}", sanitize_sh(&image)))
+       .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build))))
+       .arg(format!("--output-dir={}", sanitize_sh(&distdir(build))))
+       .arg(format!("--package-name={}-{}", name, target))
+       .arg(format!("--component-name=rust-std-{}", target))
+       .arg("--legacy-manifest-dirs=rustlib,cargo");
+    build.run(&mut cmd);
+    t!(fs::remove_dir_all(&image));
+}
+
+fn install(src: &Path, dstdir: &Path, perms: u32) {
+    let dst = dstdir.join(src.file_name().unwrap());
+    t!(fs::copy(src, &dst));
+    chmod(&dst, perms);
+}
+
+#[cfg(unix)]
+fn chmod(path: &Path, perms: u32) {
+    use std::os::unix::fs::*;
+    t!(fs::set_permissions(path, fs::Permissions::from_mode(perms)));
+}
+#[cfg(windows)]
+fn chmod(_path: &Path, _perms: u32) {}
+
+// We have to run a few shell scripts, which choke quite a bit on both `\`
+// characters and on `C:\` paths, so normalize both of them away.
+fn sanitize_sh(path: &Path) -> String {
+    let path = path.to_str().unwrap().replace("\\", "/");
+    return change_drive(&path).unwrap_or(path);
+
+    fn change_drive(s: &str) -> Option<String> {
+        let mut ch = s.chars();
+        let drive = ch.next().unwrap_or('C');
+        if ch.next() != Some(':') {
+            return None
+        }
+        if ch.next() != Some('/') {
+            return None
+        }
+        Some(format!("/{}/{}", drive, &s[drive.len_utf8() + 2..]))
+    }
+}
index 937a234bec86dda5f21c380876d5a6ae51910c31..5782dd5ec28dd0c23b98ee9fc0e5b75e17458774 100644 (file)
@@ -8,25 +8,26 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use std::path::Path;
 use std::fs::{self, File};
 use std::io::prelude::*;
+use std::path::Path;
+use std::process::Command;
 
-use build::{Build, Compiler};
-use build::util::up_to_date;
+use build::{Build, Compiler, Mode};
+use build::util::{up_to_date, cp_r};
 
-pub fn rustbook(build: &Build, stage: u32, host: &str, name: &str, out: &Path) {
+pub fn rustbook(build: &Build, stage: u32, target: &str, name: &str, out: &Path) {
     t!(fs::create_dir_all(out));
 
     let out = out.join(name);
-    let compiler = Compiler::new(stage, host);
+    let compiler = Compiler::new(stage, &build.config.build);
     let src = build.src.join("src/doc").join(name);
     let index = out.join("index.html");
     let rustbook = build.tool(&compiler, "rustbook");
     if up_to_date(&src, &index) && up_to_date(&rustbook, &index) {
         return
     }
-    println!("Rustbook stage{} ({}) - {}", stage, host, name);
+    println!("Rustbook stage{} ({}) - {}", stage, target, name);
     let _ = fs::remove_dir_all(&out);
     build.run(build.tool_cmd(&compiler, "rustbook")
                    .arg("build")
@@ -34,11 +35,11 @@ pub fn rustbook(build: &Build, stage: u32, host: &str, name: &str, out: &Path) {
                    .arg(out));
 }
 
-pub fn standalone(build: &Build, stage: u32, host: &str, out: &Path) {
-    println!("Documenting stage{} standalone ({})", stage, host);
+pub fn standalone(build: &Build, stage: u32, target: &str, out: &Path) {
+    println!("Documenting stage{} standalone ({})", stage, target);
     t!(fs::create_dir_all(out));
 
-    let compiler = Compiler::new(stage, host);
+    let compiler = Compiler::new(stage, &build.config.build);
 
     let favicon = build.src.join("src/doc/favicon.inc");
     let footer = build.src.join("src/doc/footer.inc");
@@ -69,7 +70,7 @@ pub fn standalone(build: &Build, stage: u32, host: &str, out: &Path) {
         }
 
         let html = out.join(filename).with_extension("html");
-        let rustdoc = build.tool(&compiler, "rustdoc");
+        let rustdoc = build.rustdoc(&compiler);
         if up_to_date(&path, &html) &&
            up_to_date(&footer, &html) &&
            up_to_date(&favicon, &html) &&
@@ -79,7 +80,8 @@ pub fn standalone(build: &Build, stage: u32, host: &str, out: &Path) {
             continue
         }
 
-        let mut cmd = build.tool_cmd(&compiler, "rustdoc");
+        let mut cmd = Command::new(&rustdoc);
+        build.add_rustc_lib_path(&compiler, &mut cmd);
         cmd.arg("--html-after-content").arg(&footer)
            .arg("--html-before-content").arg(&version_info)
            .arg("--html-in-header").arg(&favicon)
@@ -102,3 +104,68 @@ pub fn standalone(build: &Build, stage: u32, host: &str, out: &Path) {
         build.run(&mut cmd);
     }
 }
+
+pub fn std(build: &Build, stage: u32, target: &str, out: &Path) {
+    println!("Documenting stage{} std ({})", stage, target);
+    t!(fs::create_dir_all(out));
+    let compiler = Compiler::new(stage, &build.config.build);
+    let out_dir = build.stage_out(&compiler, Mode::Libstd)
+                       .join(target).join("doc");
+    let rustdoc = build.rustdoc(&compiler);
+
+    build.clear_if_dirty(&out_dir, &rustdoc);
+
+    let mut cargo = build.cargo(&compiler, Mode::Libstd, target, "doc");
+    cargo.arg("--manifest-path")
+         .arg(build.src.join("src/rustc/std_shim/Cargo.toml"))
+         .arg("--features").arg(build.std_features());
+    build.run(&mut cargo);
+    cp_r(&out_dir, out)
+}
+
+pub fn test(build: &Build, stage: u32, target: &str, out: &Path) {
+    println!("Documenting stage{} test ({})", stage, target);
+    let compiler = Compiler::new(stage, &build.config.build);
+    let out_dir = build.stage_out(&compiler, Mode::Libtest)
+                       .join(target).join("doc");
+    let rustdoc = build.rustdoc(&compiler);
+
+    build.clear_if_dirty(&out_dir, &rustdoc);
+
+    let mut cargo = build.cargo(&compiler, Mode::Libtest, target, "doc");
+    cargo.arg("--manifest-path")
+         .arg(build.src.join("src/rustc/test_shim/Cargo.toml"));
+    build.run(&mut cargo);
+    cp_r(&out_dir, out)
+}
+
+pub fn rustc(build: &Build, stage: u32, target: &str, out: &Path) {
+    println!("Documenting stage{} compiler ({})", stage, target);
+    let compiler = Compiler::new(stage, &build.config.build);
+    let out_dir = build.stage_out(&compiler, Mode::Librustc)
+                       .join(target).join("doc");
+    let rustdoc = build.rustdoc(&compiler);
+    if !up_to_date(&rustdoc, &out_dir.join("rustc/index.html")) {
+        t!(fs::remove_dir_all(&out_dir));
+    }
+    let mut cargo = build.cargo(&compiler, Mode::Librustc, target, "doc");
+    cargo.arg("--manifest-path")
+         .arg(build.src.join("src/rustc/Cargo.toml"))
+         .arg("--features").arg(build.rustc_features());
+    build.run(&mut cargo);
+    cp_r(&out_dir, out)
+}
+
+pub fn error_index(build: &Build, stage: u32, target: &str, out: &Path) {
+    println!("Documenting stage{} error index ({})", stage, target);
+    t!(fs::create_dir_all(out));
+    let compiler = Compiler::new(stage, &build.config.build);
+    let mut index = build.tool_cmd(&compiler, "error_index_generator");
+    index.arg("html");
+    index.arg(out.join("error-index.html"));
+
+    // FIXME: shouldn't have to pass this env var
+    index.env("CFG_BUILD", &build.config.build);
+
+    build.run(&mut index);
+}
index 98d821b8b90235c71d4ed718e3e1b8544024a29a..248bf6cb4ea153f0e32b7dc67131f422a70e6ebc 100644 (file)
@@ -30,9 +30,11 @@ macro_rules! t {
 
 mod cc;
 mod channel;
+mod check;
 mod clean;
 mod compile;
 mod config;
+mod dist;
 mod doc;
 mod flags;
 mod native;
@@ -75,6 +77,7 @@ pub struct Build {
     short_ver_hash: Option<String>,
     ver_date: Option<String>,
     version: String,
+    package_vers: String,
     bootstrap_key: String,
 
     // Runtime state filled in later on
@@ -83,6 +86,13 @@ pub struct Build {
     compiler_rt_built: RefCell<HashMap<String, PathBuf>>,
 }
 
+pub enum Mode {
+    Libstd,
+    Libtest,
+    Librustc,
+    Tool,
+}
+
 impl Build {
     pub fn new(flags: Flags, config: Config) -> Build {
         let cwd = t!(env::current_dir());
@@ -114,6 +124,7 @@ impl Build {
             ver_date: None,
             version: String::new(),
             bootstrap_key: String::new(),
+            package_vers: String::new(),
             cc: HashMap::new(),
             cxx: HashMap::new(),
             compiler_rt_built: RefCell::new(HashMap::new()),
@@ -131,9 +142,13 @@ impl Build {
             return clean::clean(self);
         }
 
+        self.verbose("finding compilers");
         cc::find(self);
+        self.verbose("running sanity check");
         sanity::check(self);
+        self.verbose("collecting channel variables");
         channel::collect(self);
+        self.verbose("updating submodules");
         self.update_submodules();
 
         for target in step::all(self) {
@@ -145,19 +160,23 @@ impl Build {
                 CompilerRt { _dummy } => {
                     native::compiler_rt(self, target.target);
                 }
-                Libstd { stage, compiler } => {
-                    compile::std(self, stage, target.target, &compiler);
+                Libstd { compiler } => {
+                    compile::std(self, target.target, &compiler);
+                }
+                Libtest { compiler } => {
+                    compile::test(self, target.target, &compiler);
+                }
+                Librustc { compiler } => {
+                    compile::rustc(self, target.target, &compiler);
                 }
-                Librustc { stage, compiler } => {
-                    compile::rustc(self, stage, target.target, &compiler);
+                LibstdLink { compiler, host } => {
+                    compile::std_link(self, target.target, &compiler, host);
                 }
-                LibstdLink { stage, compiler, host } => {
-                    compile::std_link(self, stage, target.target,
-                                      &compiler, host);
+                LibtestLink { compiler, host } => {
+                    compile::test_link(self, target.target, &compiler, host);
                 }
-                LibrustcLink { stage, compiler, host } => {
-                    compile::rustc_link(self, stage, target.target,
-                                        &compiler, host);
+                LibrustcLink { compiler, host } => {
+                    compile::rustc_link(self, target.target, &compiler, host);
                 }
                 Rustc { stage: 0 } => {
                     // nothing to do...
@@ -165,6 +184,19 @@ impl Build {
                 Rustc { stage } => {
                     compile::assemble_rustc(self, stage, target.target);
                 }
+                ToolLinkchecker { stage } => {
+                    compile::tool(self, stage, target.target, "linkchecker");
+                }
+                ToolRustbook { stage } => {
+                    compile::tool(self, stage, target.target, "rustbook");
+                }
+                ToolErrorIndex { stage } => {
+                    compile::tool(self, stage, target.target,
+                                  "error_index_generator");
+                }
+                ToolCargoTest { stage } => {
+                    compile::tool(self, stage, target.target, "cargotest");
+                }
                 DocBook { stage } => {
                     doc::rustbook(self, stage, target.target, "book", &doc_out);
                 }
@@ -179,7 +211,34 @@ impl Build {
                 DocStandalone { stage } => {
                     doc::standalone(self, stage, target.target, &doc_out);
                 }
-                Doc { .. } => {} // pseudo-step
+                DocStd { stage } => {
+                    doc::std(self, stage, target.target, &doc_out);
+                }
+                DocTest { stage } => {
+                    doc::test(self, stage, target.target, &doc_out);
+                }
+                DocRustc { stage } => {
+                    doc::rustc(self, stage, target.target, &doc_out);
+                }
+                DocErrorIndex { stage } => {
+                    doc::error_index(self, stage, target.target, &doc_out);
+                }
+
+                CheckLinkcheck { stage } => {
+                    check::linkcheck(self, stage, target.target);
+                }
+                CheckCargoTest { stage } => {
+                    check::cargotest(self, stage, target.target);
+                }
+
+                DistDocs { stage } => dist::docs(self, stage, target.target),
+                DistMingw { _dummy } => dist::mingw(self, target.target),
+                DistRustc { stage } => dist::rustc(self, stage, target.target),
+                DistStd { compiler } => dist::std(self, &compiler, target.target),
+
+                Dist { .. } |
+                Doc { .. } | // pseudo-steps
+                Check { .. } => {}
             }
         }
     }
@@ -229,43 +288,51 @@ impl Build {
     /// This will create a `Command` that represents a pending execution of
     /// Cargo for the specified stage, whether or not the standard library is
     /// being built, and using the specified compiler targeting `target`.
-    // FIXME: aren't stage/compiler duplicated?
-    fn cargo(&self, stage: u32, compiler: &Compiler, is_std: bool,
-             target: &str, cmd: &str) -> Command {
+    fn cargo(&self,
+             compiler: &Compiler,
+             mode: Mode,
+             target: &str,
+             cmd: &str) -> Command {
         let mut cargo = Command::new(&self.cargo);
-        let host = compiler.host;
-        let out_dir = self.stage_out(stage, host, is_std);
+        let out_dir = self.stage_out(compiler, mode);
         cargo.env("CARGO_TARGET_DIR", out_dir)
              .arg(cmd)
-             .arg("--target").arg(target)
-             .arg("-j").arg(self.jobs().to_string());
+             .arg("-j").arg(self.jobs().to_string())
+             .arg("--target").arg(target);
 
         // Customize the compiler we're running. Specify the compiler to cargo
         // as our shim and then pass it some various options used to configure
         // how the actual compiler itself is called.
         cargo.env("RUSTC", self.out.join("bootstrap/debug/rustc"))
              .env("RUSTC_REAL", self.compiler_path(compiler))
-             .env("RUSTC_STAGE", self.stage_arg(stage, compiler).to_string())
+             .env("RUSTC_STAGE", compiler.stage.to_string())
              .env("RUSTC_DEBUGINFO", self.config.rust_debuginfo.to_string())
              .env("RUSTC_CODEGEN_UNITS",
                   self.config.rust_codegen_units.to_string())
              .env("RUSTC_DEBUG_ASSERTIONS",
                   self.config.rust_debug_assertions.to_string())
              .env("RUSTC_SNAPSHOT", &self.rustc)
-             .env("RUSTC_SYSROOT", self.sysroot(stage, host))
+             .env("RUSTC_SYSROOT", self.sysroot(compiler))
              .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_snapshot_libdir())
-             .env("RUSTC_FLAGS", self.rustc_flags(target).join(" "))
              .env("RUSTC_RPATH", self.config.rust_rpath.to_string())
-             .env("RUSTDOC", self.tool(compiler, "rustdoc"));
+             .env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc"))
+             .env("RUSTDOC_REAL", self.rustdoc(compiler))
+             .env("RUSTC_FLAGS", self.rustc_flags(target).join(" "));
 
-        // Specify some variuos options for build scripts used throughout the
-        // build.
+        // Specify some various options for build scripts used throughout
+        // the build.
         //
         // FIXME: the guard against msvc shouldn't need to be here
         if !target.contains("msvc") {
             cargo.env(format!("CC_{}", target), self.cc(target))
                  .env(format!("AR_{}", target), self.ar(target))
-                 .env(format!("CFLAGS_{}", target), self.cflags(target));
+                 .env(format!("CFLAGS_{}", target), self.cflags(target).join(" "));
+        }
+
+        // If we're building for OSX, inform the compiler and the linker that
+        // we want to build a compiler runnable on 10.7
+        if target.contains("apple-darwin") {
+            cargo.env("MACOSX_DEPLOYMENT_TARGET", "10.7");
         }
 
         // Environment variables *required* needed throughout the build
@@ -288,43 +355,38 @@ impl Build {
         if compiler.is_snapshot(self) {
             self.rustc.clone()
         } else {
-            self.sysroot(compiler.stage, compiler.host).join("bin")
-                .join(exe("rustc", compiler.host))
+            self.sysroot(compiler).join("bin").join(exe("rustc", compiler.host))
         }
     }
 
-    /// Get the specified tool next to the specified compiler
+    /// Get the specified tool built by the specified compiler
     fn tool(&self, compiler: &Compiler, tool: &str) -> PathBuf {
-        if compiler.is_snapshot(self) {
-            assert!(tool == "rustdoc", "no tools other than rustdoc in stage0");
-            let mut rustdoc = self.rustc.clone();
-            rustdoc.pop();
-            rustdoc.push(exe("rustdoc", &self.config.build));
-            return rustdoc
-        }
-        let (stage, host) = (compiler.stage, compiler.host);
-        self.cargo_out(stage - 1, host, false, host).join(exe(tool, host))
+        self.cargo_out(compiler, Mode::Tool, compiler.host)
+            .join(exe(tool, compiler.host))
+    }
+
+    /// Get the `rustdoc` executable next to the specified compiler
+    fn rustdoc(&self, compiler: &Compiler) -> PathBuf {
+        let mut rustdoc = self.compiler_path(compiler);
+        rustdoc.pop();
+        rustdoc.push(exe("rustdoc", compiler.host));
+        return rustdoc
     }
 
     /// Get a `Command` which is ready to run `tool` in `stage` built for
     /// `host`.
-    #[allow(dead_code)] // this will be used soon
     fn tool_cmd(&self, compiler: &Compiler, tool: &str) -> Command {
         let mut cmd = Command::new(self.tool(&compiler, tool));
         let host = compiler.host;
-        let stage = compiler.stage;
         let paths = vec![
-            self.cargo_out(stage - 1, host, true, host).join("deps"),
-            self.cargo_out(stage - 1, host, false, host).join("deps"),
+            self.cargo_out(compiler, Mode::Libstd, host).join("deps"),
+            self.cargo_out(compiler, Mode::Libtest, host).join("deps"),
+            self.cargo_out(compiler, Mode::Librustc, host).join("deps"),
         ];
         add_lib_path(paths, &mut cmd);
         return cmd
     }
 
-    fn stage_arg(&self, stage: u32, compiler: &Compiler) -> u32 {
-        if stage == 0 && compiler.host != self.config.build {1} else {stage}
-    }
-
     /// Get the space-separated set of activated features for the standard
     /// library.
     fn std_features(&self) -> String {
@@ -339,15 +401,11 @@ impl Build {
     }
 
     /// Get the space-separated set of activated features for the compiler.
-    fn rustc_features(&self, stage: u32) -> String {
+    fn rustc_features(&self) -> String {
         let mut features = String::new();
         if self.config.use_jemalloc {
             features.push_str(" jemalloc");
         }
-        if stage > 0 {
-            features.push_str(" rustdoc");
-            features.push_str(" rustbook");
-        }
         return features
     }
 
@@ -357,35 +415,41 @@ impl Build {
         if self.config.rust_optimize {"release"} else {"debug"}
     }
 
-    fn sysroot(&self, stage: u32, host: &str) -> PathBuf {
-        if stage == 0 {
-            self.stage_out(stage, host, false)
+    fn sysroot(&self, compiler: &Compiler) -> PathBuf {
+        if compiler.stage == 0 {
+            self.out.join(compiler.host).join("stage0-sysroot")
         } else {
-            self.out.join(host).join(format!("stage{}", stage))
+            self.out.join(compiler.host).join(format!("stage{}", compiler.stage))
         }
     }
 
-    fn sysroot_libdir(&self, stage: u32, host: &str, target: &str) -> PathBuf {
-        self.sysroot(stage, host).join("lib").join("rustlib")
+    fn sysroot_libdir(&self, compiler: &Compiler, target: &str) -> PathBuf {
+        self.sysroot(compiler).join("lib").join("rustlib")
             .join(target).join("lib")
     }
 
     /// Returns the root directory for all output generated in a particular
     /// stage when running with a particular host compiler.
     ///
-    /// The `is_std` flag indicates whether the root directory is for the
-    /// bootstrap of the standard library or for the compiler.
-    fn stage_out(&self, stage: u32, host: &str, is_std: bool) -> PathBuf {
-        self.out.join(host)
-            .join(format!("stage{}{}", stage, if is_std {"-std"} else {"-rustc"}))
+    /// The mode indicates what the root directory is for.
+    fn stage_out(&self, compiler: &Compiler, mode: Mode) -> PathBuf {
+        let suffix = match mode {
+            Mode::Libstd => "-std",
+            Mode::Libtest => "-test",
+            Mode::Tool | Mode::Librustc => "-rustc",
+        };
+        self.out.join(compiler.host)
+                .join(format!("stage{}{}", compiler.stage, suffix))
     }
 
     /// Returns the root output directory for all Cargo output in a given stage,
     /// running a particular comipler, wehther or not we're building the
     /// standard library, and targeting the specified architecture.
-    fn cargo_out(&self, stage: u32, host: &str, is_std: bool,
+    fn cargo_out(&self,
+                 compiler: &Compiler,
+                 mode: Mode,
                  target: &str) -> PathBuf {
-        self.stage_out(stage, host, is_std).join(target).join(self.cargo_dir())
+        self.stage_out(compiler, mode).join(target).join(self.cargo_dir())
     }
 
     /// Root output directory for LLVM compiled for `target`
@@ -411,8 +475,7 @@ impl Build {
         if compiler.is_snapshot(self) {
             self.rustc_snapshot_libdir()
         } else {
-            self.sysroot(compiler.stage, compiler.host)
-                .join(libdir(compiler.host))
+            self.sysroot(compiler).join(libdir(compiler.host))
         }
     }
 
@@ -440,11 +503,20 @@ impl Build {
         self.cc[target].0.path()
     }
 
-    fn cflags(&self, target: &str) -> String {
-        self.cc[target].0.args().iter()
-            .map(|s| s.to_string_lossy())
-            .collect::<Vec<_>>()
-            .join(" ")
+    fn cflags(&self, target: &str) -> Vec<String> {
+        let mut base = self.cc[target].0.args().iter()
+                           .map(|s| s.to_string_lossy().into_owned())
+                           .collect::<Vec<_>>();
+
+        // If we're compiling on OSX then we add a few unconditional flags
+        // indicating that we want libc++ (more filled out than libstdc++) and
+        // we want to compile for 10.7. This way we can ensure that
+        // LLVM/jemalloc/etc are all properly compiled.
+        if target.contains("apple-darwin") {
+            base.push("-stdlib=libc++".into());
+            base.push("-mmacosx-version-min=10.7".into());
+        }
+        return base
     }
 
     fn ar(&self, target: &str) -> &Path {
index a9d84b60fbff81d244726559d3aadd2adcd2c5f3..bf0494bcd8c8c5a4240ca05579d8837d257b2700 100644 (file)
@@ -86,6 +86,9 @@ pub fn llvm(build: &Build, target: &str) {
               .define("CMAKE_CXX_COMPILER", build.cxx(target));
         }
         cfg.build_arg("-j").build_arg(build.jobs().to_string());
+
+        cfg.define("CMAKE_C_FLAGS", build.cflags(target).join(" "));
+        cfg.define("CMAKE_CXX_FLAGS", build.cflags(target).join(" "));
     }
 
     // FIXME: we don't actually need to build all LLVM tools and all LLVM
@@ -113,7 +116,9 @@ pub fn compiler_rt(build: &Build, target: &str) {
     let dst = build.compiler_rt_out(target);
     let arch = target.split('-').next().unwrap();
     let mode = if build.config.rust_optimize {"Release"} else {"Debug"};
-    let (dir, build_target, libname) = if target.contains("linux") {
+    let (dir, build_target, libname) = if target.contains("linux") ||
+                                          target.contains("freebsd") ||
+                                          target.contains("netbsd") {
         let os = if target.contains("android") {"-android"} else {""};
         let arch = if arch.starts_with("arm") && target.contains("eabihf") {
             "armhf"
index 40f4c7076092d78f5e70dcfe16182f4fcf0a195a..6ce2749638841b4e86cdc9dd64b56926bb9d257c 100644 (file)
@@ -79,7 +79,7 @@ pub fn check(build: &mut Build) {
         }
 
         // Make sure musl-root is valid if specified
-        if target.contains("musl") {
+        if target.contains("musl") && (target.contains("x86_64") || target.contains("i686")) {
             match build.config.musl_root {
                 Some(ref root) => {
                     if fs::metadata(root.join("lib/libc.a")).is_err() {
@@ -119,4 +119,16 @@ $ pacman -R cmake && pacman -S mingw-w64-x86_64-cmake
             }
         }
     }
+
+    for host in build.flags.host.iter() {
+        if !build.config.host.contains(host) {
+            panic!("specified host `{}` is not in the ./configure list", host);
+        }
+    }
+    for target in build.flags.target.iter() {
+        if !build.config.target.contains(target) {
+            panic!("specified target `{}` is not in the ./configure list",
+                   target);
+        }
+    }
 }
index ba0095ce84980146dff3c10c620090ec9c5350f9..80fcc32e5371d5c4504e9d911750c0b6bee139a4 100644 (file)
@@ -25,26 +25,33 @@ macro_rules! targets {
             // compiler executable itself, not any of the support libraries
             (rustc, Rustc { stage: u32 }),
 
-            // Steps for the two main cargo builds, one for the standard library
-            // and one for the compiler itself. These are parameterized over the
-            // stage output they're going to be placed in along with the
-            // compiler which is producing the copy of libstd or librustc
-            (libstd, Libstd { stage: u32, compiler: Compiler<'a> }),
-            (librustc, Librustc { stage: u32, compiler: Compiler<'a> }),
-
-            // Links the standard library/librustc produced by the compiler
-            // provided into the host's directory also provided.
+            // Steps for the two main cargo builds. These are parameterized over
+            // the compiler which is producing the artifact.
+            (libstd, Libstd { compiler: Compiler<'a> }),
+            (libtest, Libtest { compiler: Compiler<'a> }),
+            (librustc, Librustc { compiler: Compiler<'a> }),
+
+            // Links the target produced by the compiler provided into the
+            // host's directory also provided.
             (libstd_link, LibstdLink {
-                stage: u32,
+                compiler: Compiler<'a>,
+                host: &'a str
+            }),
+            (libtest_link, LibtestLink {
                 compiler: Compiler<'a>,
                 host: &'a str
             }),
             (librustc_link, LibrustcLink {
-                stage: u32,
                 compiler: Compiler<'a>,
                 host: &'a str
             }),
 
+            // Various tools that we can build as part of the build.
+            (tool_linkchecker, ToolLinkchecker { stage: u32 }),
+            (tool_rustbook, ToolRustbook { stage: u32 }),
+            (tool_error_index, ToolErrorIndex { stage: u32 }),
+            (tool_cargotest, ToolCargoTest { stage: u32 }),
+
             // Steps for long-running native builds. Ideally these wouldn't
             // actually exist and would be part of build scripts, but for now
             // these are here.
@@ -53,11 +60,32 @@ macro_rules! targets {
             // with braces are unstable so we just pick something that works.
             (llvm, Llvm { _dummy: () }),
             (compiler_rt, CompilerRt { _dummy: () }),
+
+            // Steps for various pieces of documentation that we can generate,
+            // the 'doc' step is just a pseudo target to depend on a bunch of
+            // others.
             (doc, Doc { stage: u32 }),
             (doc_book, DocBook { stage: u32 }),
             (doc_nomicon, DocNomicon { stage: u32 }),
             (doc_style, DocStyle { stage: u32 }),
             (doc_standalone, DocStandalone { stage: u32 }),
+            (doc_std, DocStd { stage: u32 }),
+            (doc_test, DocTest { stage: u32 }),
+            (doc_rustc, DocRustc { stage: u32 }),
+            (doc_error_index, DocErrorIndex { stage: u32 }),
+
+            // Steps for running tests. The 'check' target is just a pseudo
+            // target to depend on a bunch of others.
+            (check, Check { stage: u32, compiler: Compiler<'a> }),
+            (check_linkcheck, CheckLinkcheck { stage: u32 }),
+            (check_cargotest, CheckCargoTest { stage: u32 }),
+
+            // Distribution targets, creating tarballs
+            (dist, Dist { stage: u32 }),
+            (dist_docs, DistDocs { stage: u32 }),
+            (dist_mingw, DistMingw { _dummy: () }),
+            (dist_rustc, DistRustc { stage: u32 }),
+            (dist_std, DistStd { compiler: Compiler<'a> }),
         }
     }
 }
@@ -127,10 +155,9 @@ fn top_level(build: &Build) -> Vec<Step> {
             }
             let host = t.target(host);
             if host.target == build.config.build {
-                targets.push(host.librustc(stage, host.compiler(stage)));
+                targets.push(host.librustc(host.compiler(stage)));
             } else {
-                targets.push(host.librustc_link(stage, t.compiler(stage),
-                                                host.target));
+                targets.push(host.librustc_link(t.compiler(stage), host.target));
             }
             for target in build.config.target.iter() {
                 if !build.flags.target.contains(target) {
@@ -139,11 +166,10 @@ fn top_level(build: &Build) -> Vec<Step> {
 
                 if host.target == build.config.build {
                     targets.push(host.target(target)
-                                     .libstd(stage, host.compiler(stage)));
+                                     .libtest(host.compiler(stage)));
                 } else {
                     targets.push(host.target(target)
-                                     .libstd_link(stage, t.compiler(stage),
-                                                  host.target));
+                                     .libtest_link(t.compiler(stage), host.target));
                 }
             }
         }
@@ -158,25 +184,37 @@ fn add_steps<'a>(build: &'a Build,
                  host: &Step<'a>,
                  target: &Step<'a>,
                  targets: &mut Vec<Step<'a>>) {
+    struct Context<'a> {
+        stage: u32,
+        compiler: Compiler<'a>,
+        _dummy: (),
+        host: &'a str,
+    }
     for step in build.flags.step.iter() {
-        let compiler = host.target(&build.config.build).compiler(stage);
-        match &step[..] {
-            "libstd" => targets.push(target.libstd(stage, compiler)),
-            "librustc" => targets.push(target.librustc(stage, compiler)),
-            "libstd-link" => targets.push(target.libstd_link(stage, compiler,
-                                                             host.target)),
-            "librustc-link" => targets.push(target.librustc_link(stage, compiler,
-                                                                 host.target)),
-            "rustc" => targets.push(host.rustc(stage)),
-            "llvm" => targets.push(target.llvm(())),
-            "compiler-rt" => targets.push(target.compiler_rt(())),
-            "doc-style" => targets.push(host.doc_style(stage)),
-            "doc-standalone" => targets.push(host.doc_standalone(stage)),
-            "doc-nomicon" => targets.push(host.doc_nomicon(stage)),
-            "doc-book" => targets.push(host.doc_book(stage)),
-            "doc" => targets.push(host.doc(stage)),
-            _ => panic!("unknown build target: `{}`", step),
+
+        // The macro below insists on hygienic access to all local variables, so
+        // we shove them all in a struct and subvert hygiene by accessing struct
+        // fields instead,
+        let cx = Context {
+            stage: stage,
+            compiler: host.target(&build.config.build).compiler(stage),
+            _dummy: (),
+            host: host.target,
+        };
+        macro_rules! add_step {
+            ($(($short:ident, $name:ident { $($arg:ident: $t:ty),* }),)*) => ({$(
+                let name = stringify!($short).replace("_", "-");
+                if &step[..] == &name[..] {
+                    targets.push(target.$short($(cx.$arg),*));
+                    continue
+                }
+                drop(name);
+            )*})
         }
+
+        targets!(add_step);
+
+        panic!("unknown step: {}", step);
     }
 }
 
@@ -209,36 +247,114 @@ impl<'a> Step<'a> {
             }
             Source::Rustc { stage } => {
                 let compiler = Compiler::new(stage - 1, &build.config.build);
-                vec![self.librustc(stage - 1, compiler)]
+                vec![self.librustc(compiler)]
             }
-            Source::Librustc { stage, compiler } => {
-                vec![self.libstd(stage, compiler), self.llvm(())]
+            Source::Librustc { compiler } => {
+                vec![self.libtest(compiler), self.llvm(())]
             }
-            Source::Libstd { stage: _, compiler } => {
+            Source::Libtest { compiler } => {
+                vec![self.libstd(compiler)]
+            }
+            Source::Libstd { compiler } => {
                 vec![self.compiler_rt(()),
                      self.rustc(compiler.stage).target(compiler.host)]
             }
-            Source::LibrustcLink { stage, compiler, host } => {
-                vec![self.librustc(stage, compiler),
-                     self.libstd_link(stage, compiler, host)]
+            Source::LibrustcLink { compiler, host } => {
+                vec![self.librustc(compiler),
+                     self.libtest_link(compiler, host)]
+            }
+            Source::LibtestLink { compiler, host } => {
+                vec![self.libtest(compiler), self.libstd_link(compiler, host)]
             }
-            Source::LibstdLink { stage, compiler, host } => {
-                vec![self.libstd(stage, compiler),
-                     self.target(host).rustc(stage)]
+            Source::LibstdLink { compiler, host } => {
+                vec![self.libstd(compiler),
+                     self.target(host).rustc(compiler.stage)]
             }
             Source::CompilerRt { _dummy } => {
                 vec![self.llvm(()).target(&build.config.build)]
             }
             Source::Llvm { _dummy } => Vec::new(),
+
+            // Note that all doc targets depend on artifacts from the build
+            // architecture, not the target (which is where we're generating
+            // docs into).
+            Source::DocStd { stage } => {
+                let compiler = self.target(&build.config.build).compiler(stage);
+                vec![self.libstd(compiler)]
+            }
+            Source::DocTest { stage } => {
+                let compiler = self.target(&build.config.build).compiler(stage);
+                vec![self.libtest(compiler)]
+            }
             Source::DocBook { stage } |
             Source::DocNomicon { stage } |
-            Source::DocStyle { stage } |
+            Source::DocStyle { stage } => {
+                vec![self.target(&build.config.build).tool_rustbook(stage)]
+            }
+            Source::DocErrorIndex { stage } => {
+                vec![self.target(&build.config.build).tool_error_index(stage)]
+            }
             Source::DocStandalone { stage } => {
-                vec![self.rustc(stage)]
+                vec![self.target(&build.config.build).rustc(stage)]
+            }
+            Source::DocRustc { stage } => {
+                vec![self.doc_test(stage)]
             }
             Source::Doc { stage } => {
                 vec![self.doc_book(stage), self.doc_nomicon(stage),
-                     self.doc_style(stage), self.doc_standalone(stage)]
+                     self.doc_style(stage), self.doc_standalone(stage),
+                     self.doc_std(stage),
+                     self.doc_error_index(stage)]
+            }
+            Source::Check { stage, compiler: _ } => {
+                vec![self.check_linkcheck(stage),
+                     self.dist(stage)]
+            }
+            Source::CheckLinkcheck { stage } => {
+                vec![self.tool_linkchecker(stage), self.doc(stage)]
+            }
+            Source::CheckCargoTest { stage } => {
+                vec![self.tool_cargotest(stage)]
+            }
+
+            Source::ToolLinkchecker { stage } => {
+                vec![self.libstd(self.compiler(stage))]
+            }
+            Source::ToolErrorIndex { stage } |
+            Source::ToolRustbook { stage } => {
+                vec![self.librustc(self.compiler(stage))]
+            }
+            Source::ToolCargoTest { stage } => {
+                vec![self.librustc(self.compiler(stage))]
+            }
+
+            Source::DistDocs { stage } => vec![self.doc(stage)],
+            Source::DistMingw { _dummy: _ } => Vec::new(),
+            Source::DistRustc { stage } => {
+                vec![self.rustc(stage)]
+            }
+            Source::DistStd { compiler } => {
+                vec![self.libtest(compiler)]
+            }
+
+            Source::Dist { stage } => {
+                let mut base = Vec::new();
+
+                for host in build.config.host.iter() {
+                    let host = self.target(host);
+                    base.push(host.dist_rustc(stage));
+                    if host.target.contains("windows-gnu") {
+                        base.push(host.dist_mingw(()));
+                    }
+
+                    let compiler = self.compiler(stage);
+                    for target in build.config.target.iter() {
+                        let target = self.target(target);
+                        base.push(target.dist_docs(stage));
+                        base.push(target.dist_std(compiler));
+                    }
+                }
+                return base
             }
         }
     }
index 6c700671f11115d396897e1e537a5ca66f6364f0..41cf924d44a92dcf85ddce41ff058e3c5040398c 100644 (file)
@@ -30,7 +30,15 @@ pub fn mtime(path: &Path) -> FileTime {
     }).unwrap_or(FileTime::zero())
 }
 
-#[allow(dead_code)] // this will be used soon
+pub fn copy(src: &Path, dst: &Path) {
+    let res = fs::hard_link(src, dst);
+    let res = res.or_else(|_| fs::copy(src, dst).map(|_| ()));
+    if let Err(e) = res {
+        panic!("failed to copy `{}` to `{}`: {}", src.display(),
+               dst.display(), e)
+    }
+}
+
 pub fn cp_r(src: &Path, dst: &Path) {
     for f in t!(fs::read_dir(src)) {
         let f = t!(f);
@@ -43,7 +51,7 @@ pub fn cp_r(src: &Path, dst: &Path) {
             cp_r(&path, &dst);
         } else {
             let _ = fs::remove_file(&dst);
-            t!(fs::hard_link(&path, dst));
+            copy(&path, &dst);
         }
     }
 }
index 32432132c1730bfa42aa095ca2b3fe88b1189e3c..bf29ac107ffbf666a8244ad0f0d7730e814f82ec 100644 (file)
@@ -20,6 +20,7 @@ extern crate libc;
 extern crate num_cpus;
 extern crate rustc_serialize;
 extern crate toml;
+extern crate md5;
 
 use std::env;
 
index fa3dee2f358f94bbe397950679d8f50438a8a0c3..9199c755f603e4750127ccd6a3eb8eb42702862f 100644 (file)
@@ -36,3 +36,11 @@ book:
        $(Q)$(BOOTSTRAP) --step doc-book
 standalone-docs:
        $(Q)$(BOOTSTRAP) --step doc-standalone
+check:
+       $(Q)$(BOOTSTRAP) --step check
+check-cargotest:
+       $(Q)$(BOOTSTRAP) --step check-cargotest
+dist:
+       $(Q)$(BOOTSTRAP) --step dist
+
+.PHONY: dist
index 4e9d6da9157de2f9643d09002e0a53cf5c779426..d403d76bb1400469a289b3e8bcba592c4238f9b0 100644 (file)
@@ -8,6 +8,23 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+//! Shim which is passed to Cargo as "rustc" when running the bootstrap.
+//!
+//! This shim will take care of some various tasks that our build process
+//! requires that Cargo can't quite do through normal configuration:
+//!
+//! 1. When compiling build scripts and build dependencies, we need a guaranteed
+//!    full standard library available. The only compiler which actually has
+//!    this is the snapshot, so we detect this situation and always compile with
+//!    the snapshot compiler.
+//! 2. We pass a bunch of `--cfg` and other flags based on what we're compiling
+//!    (and this slightly differs based on a whether we're using a snapshot or
+//!    not), so we do that all here.
+//!
+//! This may one day be replaced by RUSTFLAGS, but the dynamic nature of
+//! switching compilers for the bootstrap and for build scripts will probably
+//! never get replaced.
+
 extern crate bootstrap;
 
 use std::env;
diff --git a/src/bootstrap/rustdoc.rs b/src/bootstrap/rustdoc.rs
new file mode 100644 (file)
index 0000000..8c61819
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Shim which is passed to Cargo as "rustdoc" when running the bootstrap.
+//!
+//! See comments in `src/bootstrap/rustc.rs` for more information.
+
+use std::env;
+use std::process::Command;
+
+fn main() {
+    let args = env::args_os().skip(1).collect::<Vec<_>>();
+    let rustdoc = env::var_os("RUSTDOC_REAL").unwrap();
+
+    let mut cmd = Command::new(rustdoc);
+    cmd.args(&args)
+       .arg("--cfg").arg(format!("stage{}", env::var("RUSTC_STAGE").unwrap()))
+       .arg("--cfg").arg("dox");
+    std::process::exit(match cmd.status() {
+        Ok(s) => s.code().unwrap_or(1),
+        Err(e) => panic!("\n\nfailed to run {:?}: {}\n\n", cmd, e),
+    })
+}
+
index 092a1cabc746fb8ab44139c76e565eb8be397c09..8e1da69cf02e752e2a4c5622549092408b37bd70 100644 (file)
@@ -43,10 +43,16 @@ pub fn cc2ar(cc: &Path, target: &str) -> PathBuf {
     if target.contains("musl") || target.contains("msvc") {
         PathBuf::from("ar")
     } else {
+        let parent = cc.parent().unwrap();
         let file = cc.file_name().unwrap().to_str().unwrap();
-        cc.parent().unwrap().join(file.replace("gcc", "ar")
-                                      .replace("cc", "ar")
-                                      .replace("clang", "ar"))
+        for suffix in &["gcc", "cc", "clang"] {
+            if let Some(idx) = file.rfind(suffix) {
+                let mut file = file[..idx].to_owned();
+                file.push_str("ar");
+                return parent.join(&file);
+            }
+        }
+        parent.join(file)
     }
 }
 
index f137d51c7f2039addc5df9a0e66cb544e48aad06..9ede3670801875e89fa9700fe1575fae813dda99 100644 (file)
@@ -207,8 +207,11 @@ elseif(NOT APPLE) # Supported archs for Apple platforms are generated later
     test_target_arch(mips "" "-mips32r2" "--target=mips-linux-gnu")
     test_target_arch(mips64 "" "-mips64r2" "--target=mips64-linux-gnu" "-mabi=n64")
   elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "arm")
-    test_target_arch(arm "" "-march=armv7-a" "-mfloat-abi=soft")
-    test_target_arch(armhf "" "-march=armv7-a" "-mfloat-abi=hard")
+    if("${COMPILER_RT_DEFAULT_TARGET_TRIPLE}" MATCHES "eabihf")
+      test_target_arch(armhf "" "-march=armv7-a" "-mfloat-abi=hard")
+    else()
+      test_target_arch(arm "" "-march=armv7-a" "-mfloat-abi=soft")
+    endif()
   elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "aarch32")
     test_target_arch(aarch32 "" "-march=armv8-a")
   elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "aarch64")
index 97249ea730e073c7512e1b0032bd64f60fc8b613..9f9f4304d7347493109d3ddc9019e18b98caa687 100644 (file)
@@ -220,8 +220,6 @@ else () # MSVC
 endif () # if (NOT MSVC)
 
 set(arm_SOURCES
-  arm/adddf3vfp.S
-  arm/addsf3vfp.S
   arm/aeabi_cdcmp.S
   arm/aeabi_cdcmpeq_check_nan.c
   arm/aeabi_cfcmp.S
@@ -242,40 +240,11 @@ set(arm_SOURCES
   arm/bswapdi2.S
   arm/bswapsi2.S
   arm/comparesf2.S
-  arm/divdf3vfp.S
   arm/divmodsi4.S
-  arm/divsf3vfp.S
   arm/divsi3.S
-  arm/eqdf2vfp.S
-  arm/eqsf2vfp.S
-  arm/extendsfdf2vfp.S
-  arm/fixdfsivfp.S
-  arm/fixsfsivfp.S
-  arm/fixunsdfsivfp.S
-  arm/fixunssfsivfp.S
-  arm/floatsidfvfp.S
-  arm/floatsisfvfp.S
-  arm/floatunssidfvfp.S
-  arm/floatunssisfvfp.S
-  arm/gedf2vfp.S
-  arm/gesf2vfp.S
-  arm/gtdf2vfp.S
-  arm/gtsf2vfp.S
-  arm/ledf2vfp.S
-  arm/lesf2vfp.S
-  arm/ltdf2vfp.S
-  arm/ltsf2vfp.S
   arm/modsi3.S
-  arm/muldf3vfp.S
-  arm/mulsf3vfp.S
-  arm/nedf2vfp.S
   arm/negdf2vfp.S
   arm/negsf2vfp.S
-  arm/nesf2vfp.S
-  arm/restore_vfp_d8_d15_regs.S
-  arm/save_vfp_d8_d15_regs.S
-  arm/subdf3vfp.S
-  arm/subsf3vfp.S
   arm/switch16.S
   arm/switch32.S
   arm/switch8.S
@@ -301,12 +270,9 @@ set(arm_SOURCES
   arm/sync_fetch_and_xor_4.S
   arm/sync_fetch_and_xor_8.S
   arm/sync_synchronize.S
-  arm/truncdfsf2vfp.S
   arm/udivmodsi4.S
   arm/udivsi3.S
   arm/umodsi3.S
-  arm/unorddf2vfp.S
-  arm/unordsf2vfp.S
   ${GENERIC_SOURCES})
 
 set(aarch64_SOURCES
@@ -328,7 +294,42 @@ set(aarch64_SOURCES
   trunctfsf2.c
   ${GENERIC_SOURCES})
 
-set(armhf_SOURCES ${arm_SOURCES})
+set(armhf_SOURCES
+  arm/adddf3vfp.S
+  arm/addsf3vfp.S
+  arm/divdf3vfp.S
+  arm/divsf3vfp.S
+  arm/eqdf2vfp.S
+  arm/eqsf2vfp.S
+  arm/extendsfdf2vfp.S
+  arm/fixdfsivfp.S
+  arm/fixsfsivfp.S
+  arm/fixunsdfsivfp.S
+  arm/fixunssfsivfp.S
+  arm/floatsidfvfp.S
+  arm/floatsisfvfp.S
+  arm/floatunssidfvfp.S
+  arm/floatunssisfvfp.S
+  arm/gedf2vfp.S
+  arm/gesf2vfp.S
+  arm/gtdf2vfp.S
+  arm/gtsf2vfp.S
+  arm/ledf2vfp.S
+  arm/lesf2vfp.S
+  arm/ltdf2vfp.S
+  arm/ltsf2vfp.S
+  arm/muldf3vfp.S
+  arm/mulsf3vfp.S
+  arm/nedf2vfp.S
+  arm/nesf2vfp.S
+  arm/restore_vfp_d8_d15_regs.S
+  arm/save_vfp_d8_d15_regs.S
+  arm/subdf3vfp.S
+  arm/subsf3vfp.S
+  arm/truncdfsf2vfp.S
+  arm/unorddf2vfp.S
+  arm/unordsf2vfp.S
+  ${arm_SOURCES})
 set(armv7_SOURCES ${arm_SOURCES})
 set(armv7s_SOURCES ${arm_SOURCES})
 set(arm64_SOURCES ${aarch64_SOURCES})
index e66094dc3954b9d4bf7c13a0e8c3b6f3980e9373..a6960ff17854ecf27f0bda277f3e4c702f484f65 100644 (file)
@@ -25,7 +25,8 @@ pub enum Mode {
     DebugInfoLldb,
     Codegen,
     Rustdoc,
-    CodegenUnits
+    CodegenUnits,
+    Incremental,
 }
 
 impl FromStr for Mode {
@@ -43,6 +44,7 @@ impl FromStr for Mode {
           "codegen" => Ok(Codegen),
           "rustdoc" => Ok(Rustdoc),
           "codegen-units" => Ok(CodegenUnits),
+          "incremental" => Ok(Incremental),
           _ => Err(()),
         }
     }
@@ -62,6 +64,7 @@ impl fmt::Display for Mode {
             Codegen => "codegen",
             Rustdoc => "rustdoc",
             CodegenUnits => "codegen-units",
+            Incremental => "incremental",
         }, f)
     }
 }
@@ -69,10 +72,10 @@ impl fmt::Display for Mode {
 #[derive(Clone)]
 pub struct Config {
     // The library paths required for running the compiler
-    pub compile_lib_path: String,
+    pub compile_lib_path: PathBuf,
 
     // The library paths required for running compiled programs
-    pub run_lib_path: String,
+    pub run_lib_path: PathBuf,
 
     // The rustc executable
     pub rustc_path: PathBuf,
@@ -155,5 +158,8 @@ pub struct Config {
     pub lldb_python_dir: Option<String>,
 
     // Explain what's going on
-    pub verbose: bool
+    pub verbose: bool,
+
+    // Print one character per test instead of one line
+    pub quiet: bool,
 }
index bbace16f05928a5d0fb10c873d90dddeb7e02c5d..8d2558e4344e631fd1fde74c4eaea9660f50ed66 100644 (file)
 #![crate_type = "bin"]
 
 #![feature(box_syntax)]
-#![feature(dynamic_lib)]
 #![feature(libc)]
 #![feature(rustc_private)]
-#![feature(str_char)]
 #![feature(test)]
+#![feature(question_mark)]
 
 #![deny(warnings)]
 
@@ -71,13 +70,15 @@ pub fn parse_config(args: Vec<String> ) -> Config {
           reqopt("", "aux-base", "directory to find auxiliary test files", "PATH"),
           reqopt("", "stage-id", "the target-stage identifier", "stageN-TARGET"),
           reqopt("", "mode", "which sort of compile tests to run",
-                 "(compile-fail|parse-fail|run-fail|run-pass|run-pass-valgrind|pretty|debug-info)"),
+                 "(compile-fail|parse-fail|run-fail|run-pass|\
+                  run-pass-valgrind|pretty|debug-info|incremental)"),
           optflag("", "ignored", "run tests marked as ignored"),
           optopt("", "runtool", "supervisor program to run tests under \
                                  (eg. emulator, valgrind)", "PROGRAM"),
           optopt("", "host-rustcflags", "flags to pass to rustc for host", "FLAGS"),
           optopt("", "target-rustcflags", "flags to pass to rustc for target", "FLAGS"),
           optflag("", "verbose", "run tests verbosely, showing all output"),
+          optflag("", "quiet", "print one character per test instead of one line"),
           optopt("", "logfile", "file to log test execution to", "FILE"),
           optopt("", "target", "the target to build for", "TARGET"),
           optopt("", "host", "the host to build for", "HOST"),
@@ -117,15 +118,17 @@ pub fn parse_config(args: Vec<String> ) -> Config {
         }
     }
 
-    let filter = if !matches.free.is_empty() {
-        Some(matches.free[0].clone())
-    } else {
-        None
-    };
+    fn make_absolute(path: PathBuf) -> PathBuf {
+        if path.is_relative() {
+            env::current_dir().unwrap().join(path)
+        } else {
+            path
+        }
+    }
 
     Config {
-        compile_lib_path: matches.opt_str("compile-lib-path").unwrap(),
-        run_lib_path: matches.opt_str("run-lib-path").unwrap(),
+        compile_lib_path: make_absolute(opt_path(matches, "compile-lib-path")),
+        run_lib_path: make_absolute(opt_path(matches, "run-lib-path")),
         rustc_path: opt_path(matches, "rustc-path"),
         rustdoc_path: opt_path(matches, "rustdoc-path"),
         python: matches.opt_str("python").unwrap(),
@@ -138,7 +141,7 @@ pub fn parse_config(args: Vec<String> ) -> Config {
         stage_id: matches.opt_str("stage-id").unwrap(),
         mode: matches.opt_str("mode").unwrap().parse().ok().expect("invalid mode"),
         run_ignored: matches.opt_present("ignored"),
-        filter: filter,
+        filter: matches.free.first().cloned(),
         logfile: matches.opt_str("logfile").map(|s| PathBuf::from(&s)),
         runtool: matches.opt_str("runtool"),
         host_rustcflags: matches.opt_str("host-rustcflags"),
@@ -158,6 +161,7 @@ pub fn parse_config(args: Vec<String> ) -> Config {
             !opt_str2(matches.opt_str("adb-test-dir")).is_empty(),
         lldb_python_dir: matches.opt_str("lldb-python-dir"),
         verbose: matches.opt_present("verbose"),
+        quiet: matches.opt_present("quiet"),
     }
 }
 
@@ -191,6 +195,7 @@ pub fn log_config(config: &Config) {
     logv(c, format!("adb_device_status: {}",
                     config.adb_device_status));
     logv(c, format!("verbose: {}", config.verbose));
+    logv(c, format!("quiet: {}", config.quiet));
     logv(c, format!("\n"));
 }
 
@@ -252,15 +257,16 @@ pub fn run_tests(config: &Config) {
 
 pub fn test_opts(config: &Config) -> test::TestOpts {
     test::TestOpts {
-        filter: match config.filter {
-            None => None,
-            Some(ref filter) => Some(filter.clone()),
-        },
+        filter: config.filter.clone(),
         run_ignored: config.run_ignored,
+        quiet: config.quiet,
         logfile: config.logfile.clone(),
         run_tests: true,
         bench_benchmarks: true,
-        nocapture: env::var("RUST_TEST_NOCAPTURE").is_ok(),
+        nocapture: match env::var("RUST_TEST_NOCAPTURE") {
+            Ok(val) => &val != "0",
+            Err(_) => false
+        },
         color: test::AutoColor,
     }
 }
@@ -286,16 +292,16 @@ fn collect_tests_from_dir(config: &Config,
                           -> io::Result<()> {
     // Ignore directories that contain a file
     // `compiletest-ignore-dir`.
-    for file in try!(fs::read_dir(dir)) {
-        let file = try!(file);
+    for file in fs::read_dir(dir)? {
+        let file = file?;
         if file.file_name() == *"compiletest-ignore-dir" {
             return Ok(());
         }
     }
 
-    let dirs = try!(fs::read_dir(dir));
+    let dirs = fs::read_dir(dir)?;
     for file in dirs {
-        let file = try!(file);
+        let file = file?;
         let file_path = file.path();
         debug!("inspecting file {:?}", file_path.display());
         if is_test(config, &file_path) {
@@ -316,11 +322,11 @@ fn collect_tests_from_dir(config: &Config,
             tests.push(make_test(config, &paths))
         } else if file_path.is_dir() {
             let relative_file_path = relative_dir_path.join(file.file_name());
-            try!(collect_tests_from_dir(config,
-                                        base,
-                                        &file_path,
-                                        &relative_file_path,
-                                        tests));
+            collect_tests_from_dir(config,
+                                   base,
+                                   &file_path,
+                                   &relative_file_path,
+                                   tests)?;
         }
     }
     Ok(())
@@ -354,11 +360,25 @@ pub fn is_test(config: &Config, testfile: &Path) -> bool {
 }
 
 pub fn make_test(config: &Config, testpaths: &TestPaths) -> test::TestDescAndFn {
+    let early_props = header::early_props(config, &testpaths.file);
+
+    // The `should-fail` annotation doesn't apply to pretty tests,
+    // since we run the pretty printer across all tests by default.
+    // If desired, we could add a `should-fail-pretty` annotation.
+    let should_panic = match config.mode {
+        Pretty => test::ShouldPanic::No,
+        _ => if early_props.should_fail {
+            test::ShouldPanic::Yes
+        } else {
+            test::ShouldPanic::No
+        }
+    };
+
     test::TestDescAndFn {
         desc: test::TestDesc {
             name: make_test_name(config, testpaths),
-            ignore: header::is_test_ignored(config, &testpaths.file),
-            should_panic: test::ShouldPanic::No,
+            ignore: early_props.ignore,
+            should_panic: should_panic,
         },
         testfn: make_test_closure(config, testpaths),
     }
@@ -391,16 +411,26 @@ fn extract_gdb_version(full_version_line: Option<String>) -> Option<String> {
 
             // used to be a regex "(^|[^0-9])([0-9]\.[0-9]+)"
             for (pos, c) in full_version_line.char_indices() {
-                if !c.is_digit(10) { continue }
-                if pos + 2 >= full_version_line.len() { continue }
-                if full_version_line.char_at(pos + 1) != '.' { continue }
-                if !full_version_line.char_at(pos + 2).is_digit(10) { continue }
-                if pos > 0 && full_version_line.char_at_reverse(pos).is_digit(10) {
+                if !c.is_digit(10) {
+                    continue
+                }
+                if pos + 2 >= full_version_line.len() {
+                    continue
+                }
+                if full_version_line[pos + 1..].chars().next().unwrap() != '.' {
+                    continue
+                }
+                if !full_version_line[pos + 2..].chars().next().unwrap().is_digit(10) {
+                    continue
+                }
+                if pos > 0 && full_version_line[..pos].chars().next_back()
+                                                      .unwrap().is_digit(10) {
                     continue
                 }
                 let mut end = pos + 3;
                 while end < full_version_line.len() &&
-                      full_version_line.char_at(end).is_digit(10) {
+                      full_version_line[end..].chars().next()
+                                              .unwrap().is_digit(10) {
                     end += 1;
                 }
                 return Some(full_version_line[pos..end].to_owned());
@@ -432,13 +462,13 @@ fn extract_lldb_version(full_version_line: Option<String>) -> Option<String> {
             for (pos, l) in full_version_line.char_indices() {
                 if l != 'l' && l != 'L' { continue }
                 if pos + 5 >= full_version_line.len() { continue }
-                let l = full_version_line.char_at(pos + 1);
+                let l = full_version_line[pos + 1..].chars().next().unwrap();
                 if l != 'l' && l != 'L' { continue }
-                let d = full_version_line.char_at(pos + 2);
+                let d = full_version_line[pos + 2..].chars().next().unwrap();
                 if d != 'd' && d != 'D' { continue }
-                let b = full_version_line.char_at(pos + 3);
+                let b = full_version_line[pos + 3..].chars().next().unwrap();
                 if b != 'b' && b != 'B' { continue }
-                let dash = full_version_line.char_at(pos + 4);
+                let dash = full_version_line[pos + 4..].chars().next().unwrap();
                 if dash != '-' { continue }
 
                 let vers = full_version_line[pos + 5..].chars().take_while(|c| {
index a3ad022ebd52faa65d16b11e1280ae5fa6263199..418a0bc7121cbd624be4797e47fd78b5226b59be 100644 (file)
@@ -9,14 +9,54 @@
 // except according to those terms.
 use self::WhichLine::*;
 
+use std::fmt;
 use std::fs::File;
 use std::io::BufReader;
 use std::io::prelude::*;
 use std::path::Path;
+use std::str::FromStr;
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum ErrorKind {
+    Help,
+    Error,
+    Note,
+    Suggestion,
+    Warning,
+}
+
+impl FromStr for ErrorKind {
+    type Err = ();
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        match &s.trim_right_matches(':') as &str {
+            "HELP" => Ok(ErrorKind::Help),
+            "ERROR" => Ok(ErrorKind::Error),
+            "NOTE" => Ok(ErrorKind::Note),
+            "SUGGESTION" => Ok(ErrorKind::Suggestion),
+            "WARN" => Ok(ErrorKind::Warning),
+            "WARNING" => Ok(ErrorKind::Warning),
+            _ => Err(()),
+        }
+    }
+}
+
+impl fmt::Display for ErrorKind {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            ErrorKind::Help => write!(f, "help"),
+            ErrorKind::Error => write!(f, "error"),
+            ErrorKind::Note => write!(f, "note"),
+            ErrorKind::Suggestion => write!(f, "suggestion"),
+            ErrorKind::Warning => write!(f, "warning"),
+        }
+    }
+}
 
 pub struct ExpectedError {
-    pub line: usize,
-    pub kind: String,
+    pub line_num: usize,
+    /// What kind of message we expect (e.g. warning, error, suggestion).
+    /// `None` if not specified or unknown message kind.
+    pub kind: Option<ErrorKind>,
     pub msg: String,
 }
 
@@ -30,8 +70,10 @@ enum WhichLine { ThisLine, FollowPrevious(usize), AdjustBackward(usize) }
 /// Goal is to enable tests both like: //~^^^ ERROR go up three
 /// and also //~^ ERROR message one for the preceding line, and
 ///          //~| ERROR message two for that same line.
-// Load any test directives embedded in the file
-pub fn load_errors(testfile: &Path) -> Vec<ExpectedError> {
+///
+/// If cfg is not None (i.e., in an incremental test), then we look
+/// for `//[X]~` instead, where `X` is the current `cfg`.
+pub fn load_errors(testfile: &Path, cfg: Option<&str>) -> Vec<ExpectedError> {
     let rdr = BufReader::new(File::open(testfile).unwrap());
 
     // `last_nonfollow_error` tracks the most recently seen
@@ -44,55 +86,66 @@ pub fn load_errors(testfile: &Path) -> Vec<ExpectedError> {
     // updating it in the map callback below.)
     let mut last_nonfollow_error = None;
 
-    rdr.lines().enumerate().filter_map(|(line_no, ln)| {
-        parse_expected(last_nonfollow_error,
-                       line_no + 1,
-                       &ln.unwrap())
-            .map(|(which, error)| {
-                match which {
-                    FollowPrevious(_) => {}
-                    _ => last_nonfollow_error = Some(error.line),
-                }
-                error
-            })
-    }).collect()
+    let tag = match cfg {
+        Some(rev) => format!("//[{}]~", rev),
+        None => format!("//~")
+    };
+
+    rdr.lines()
+       .enumerate()
+       .filter_map(|(line_num, line)| {
+           parse_expected(last_nonfollow_error,
+                          line_num + 1,
+                          &line.unwrap(),
+                          &tag)
+               .map(|(which, error)| {
+                   match which {
+                       FollowPrevious(_) => {}
+                       _ => last_nonfollow_error = Some(error.line_num),
+                   }
+                   error
+               })
+       })
+       .collect()
 }
 
 fn parse_expected(last_nonfollow_error: Option<usize>,
                   line_num: usize,
-                  line: &str) -> Option<(WhichLine, ExpectedError)> {
-    let start = match line.find("//~") { Some(i) => i, None => return None };
-    let (follow, adjusts) = if line.char_at(start + 3) == '|' {
+                  line: &str,
+                  tag: &str)
+                  -> Option<(WhichLine, ExpectedError)> {
+    let start = match line.find(tag) { Some(i) => i, None => return None };
+    let (follow, adjusts) = if line[start + tag.len()..].chars().next().unwrap() == '|' {
         (true, 0)
     } else {
-        (false, line[start + 3..].chars().take_while(|c| *c == '^').count())
+        (false, line[start + tag.len()..].chars().take_while(|c| *c == '^').count())
     };
-    let kind_start = start + 3 + adjusts + (follow as usize);
-    let letters = line[kind_start..].chars();
-    let kind = letters.skip_while(|c| c.is_whitespace())
-                      .take_while(|c| !c.is_whitespace())
-                      .flat_map(|c| c.to_lowercase())
-                      .collect::<String>();
+    let kind_start = start + tag.len() + adjusts + (follow as usize);
+    let kind = line[kind_start..].split_whitespace()
+                                 .next()
+                                 .expect("Encountered unexpected empty comment")
+                                 .parse::<ErrorKind>()
+                                 .ok();
     let letters = line[kind_start..].chars();
     let msg = letters.skip_while(|c| c.is_whitespace())
                      .skip_while(|c| !c.is_whitespace())
                      .collect::<String>().trim().to_owned();
 
-    let (which, line) = if follow {
+    let (which, line_num) = if follow {
         assert!(adjusts == 0, "use either //~| or //~^, not both.");
-        let line = last_nonfollow_error.unwrap_or_else(|| {
-            panic!("encountered //~| without preceding //~^ line.")
-        });
-        (FollowPrevious(line), line)
+        let line_num = last_nonfollow_error.expect("encountered //~| without \
+                                                    preceding //~^ line.");
+        (FollowPrevious(line_num), line_num)
     } else {
         let which =
             if adjusts > 0 { AdjustBackward(adjusts) } else { ThisLine };
-        let line = line_num - adjusts;
-        (which, line)
+        let line_num = line_num - adjusts;
+        (which, line_num)
     };
 
-    debug!("line={} which={:?} kind={:?} msg={:?}", line_num, which, kind, msg);
-    Some((which, ExpectedError { line: line,
+    debug!("line={} tag={:?} which={:?} kind={:?} msg={:?}",
+           line_num, tag, which, kind, msg);
+    Some((which, ExpectedError { line_num: line_num,
                                  kind: kind,
                                  msg: msg, }))
 }
index 6efe6e608e8ad9cd83188462a2134caaffc48c9b..ef93fcfa013f89f10b99c00917a462cbe21aea18 100644 (file)
@@ -18,11 +18,12 @@ use common::Config;
 use common;
 use util;
 
+#[derive(Clone, Debug)]
 pub struct TestProps {
     // Lines that should be expected, in order, on standard out
     pub error_patterns: Vec<String> ,
     // Extra flags to pass to the compiler
-    pub compile_flags: Option<String>,
+    pub compile_flags: Vec<String>,
     // Extra flags to pass when the compiled code is run (such as --bench)
     pub run_flags: Option<String>,
     // If present, the name of a file that this test should match when
@@ -30,6 +31,8 @@ pub struct TestProps {
     pub pp_exact: Option<PathBuf>,
     // Modules from aux directory that should be compiled
     pub aux_builds: Vec<String> ,
+    // Environment settings to use for compiling
+    pub rustc_env: Vec<(String,String)> ,
     // Environment settings to use during execution
     pub exec_env: Vec<(String,String)> ,
     // Lines to check if they appear in the expected debugger output
@@ -50,119 +53,173 @@ pub struct TestProps {
     pub pretty_compare_only: bool,
     // Patterns which must not appear in the output of a cfail test.
     pub forbid_output: Vec<String>,
+    // Revisions to test for incremental compilation.
+    pub revisions: Vec<String>,
 }
 
 // Load any test directives embedded in the file
 pub fn load_props(testfile: &Path) -> TestProps {
-    let mut error_patterns = Vec::new();
-    let mut aux_builds = Vec::new();
-    let mut exec_env = Vec::new();
-    let mut compile_flags = None;
-    let mut run_flags = None;
-    let mut pp_exact = None;
-    let mut check_lines = Vec::new();
-    let mut build_aux_docs = false;
-    let mut force_host = false;
-    let mut check_stdout = false;
-    let mut no_prefer_dynamic = false;
-    let mut pretty_expanded = false;
-    let mut pretty_mode = None;
-    let mut pretty_compare_only = false;
-    let mut forbid_output = Vec::new();
-    iter_header(testfile, &mut |ln| {
+    let error_patterns = Vec::new();
+    let aux_builds = Vec::new();
+    let exec_env = Vec::new();
+    let run_flags = None;
+    let pp_exact = None;
+    let check_lines = Vec::new();
+    let build_aux_docs = false;
+    let force_host = false;
+    let check_stdout = false;
+    let no_prefer_dynamic = false;
+    let pretty_expanded = false;
+    let pretty_compare_only = false;
+    let forbid_output = Vec::new();
+    let mut props = TestProps {
+        error_patterns: error_patterns,
+        compile_flags: vec![],
+        run_flags: run_flags,
+        pp_exact: pp_exact,
+        aux_builds: aux_builds,
+        revisions: vec![],
+        rustc_env: vec![],
+        exec_env: exec_env,
+        check_lines: check_lines,
+        build_aux_docs: build_aux_docs,
+        force_host: force_host,
+        check_stdout: check_stdout,
+        no_prefer_dynamic: no_prefer_dynamic,
+        pretty_expanded: pretty_expanded,
+        pretty_mode: format!("normal"),
+        pretty_compare_only: pretty_compare_only,
+        forbid_output: forbid_output,
+    };
+    load_props_into(&mut props, testfile, None);
+    props
+}
+
+/// Load properties from `testfile` into `props`. If a property is
+/// tied to a particular revision `foo` (indicated by writing
+/// `//[foo]`), then the property is ignored unless `cfg` is
+/// `Some("foo")`.
+pub fn load_props_into(props: &mut TestProps, testfile: &Path, cfg: Option<&str>)  {
+    iter_header(testfile, cfg, &mut |ln| {
         if let Some(ep) = parse_error_pattern(ln) {
-           error_patterns.push(ep);
+            props.error_patterns.push(ep);
         }
 
-        if compile_flags.is_none() {
-            compile_flags = parse_compile_flags(ln);
+        if let Some(flags) = parse_compile_flags(ln) {
+            props.compile_flags.extend(
+                flags
+                    .split_whitespace()
+                    .map(|s| s.to_owned()));
         }
 
-        if run_flags.is_none() {
-            run_flags = parse_run_flags(ln);
+        if let Some(r) = parse_revisions(ln) {
+            props.revisions.extend(r);
         }
 
-        if pp_exact.is_none() {
-            pp_exact = parse_pp_exact(ln, testfile);
+        if props.run_flags.is_none() {
+            props.run_flags = parse_run_flags(ln);
         }
 
-        if !build_aux_docs {
-            build_aux_docs = parse_build_aux_docs(ln);
+        if props.pp_exact.is_none() {
+            props.pp_exact = parse_pp_exact(ln, testfile);
         }
 
-        if !force_host {
-            force_host = parse_force_host(ln);
+        if !props.build_aux_docs {
+            props.build_aux_docs = parse_build_aux_docs(ln);
         }
 
-        if !check_stdout {
-            check_stdout = parse_check_stdout(ln);
+        if !props.force_host {
+            props.force_host = parse_force_host(ln);
         }
 
-        if !no_prefer_dynamic {
-            no_prefer_dynamic = parse_no_prefer_dynamic(ln);
+        if !props.check_stdout {
+            props.check_stdout = parse_check_stdout(ln);
         }
 
-        if !pretty_expanded {
-            pretty_expanded = parse_pretty_expanded(ln);
+        if !props.no_prefer_dynamic {
+            props.no_prefer_dynamic = parse_no_prefer_dynamic(ln);
         }
 
-        if pretty_mode.is_none() {
-            pretty_mode = parse_pretty_mode(ln);
+        if !props.pretty_expanded {
+            props.pretty_expanded = parse_pretty_expanded(ln);
         }
 
-        if !pretty_compare_only {
-            pretty_compare_only = parse_pretty_compare_only(ln);
+        if let Some(m) = parse_pretty_mode(ln) {
+            props.pretty_mode = m;
+        }
+
+        if !props.pretty_compare_only {
+            props.pretty_compare_only = parse_pretty_compare_only(ln);
         }
 
         if let  Some(ab) = parse_aux_build(ln) {
-            aux_builds.push(ab);
+            props.aux_builds.push(ab);
         }
 
-        if let Some(ee) = parse_exec_env(ln) {
-            exec_env.push(ee);
+        if let Some(ee) = parse_env(ln, "exec-env") {
+            props.exec_env.push(ee);
+        }
+
+        if let Some(ee) = parse_env(ln, "rustc-env") {
+            props.rustc_env.push(ee);
         }
 
         if let Some(cl) =  parse_check_line(ln) {
-            check_lines.push(cl);
+            props.check_lines.push(cl);
         }
 
         if let Some(of) = parse_forbid_output(ln) {
-            forbid_output.push(of);
+            props.forbid_output.push(of);
         }
-
-        true
     });
 
     for key in vec!["RUST_TEST_NOCAPTURE", "RUST_TEST_THREADS"] {
         match env::var(key) {
             Ok(val) =>
-                if exec_env.iter().find(|&&(ref x, _)| *x == key).is_none() {
-                    exec_env.push((key.to_owned(), val))
+                if props.exec_env.iter().find(|&&(ref x, _)| *x == key).is_none() {
+                    props.exec_env.push((key.to_owned(), val))
                 },
             Err(..) => {}
         }
     }
+}
 
-    TestProps {
-        error_patterns: error_patterns,
-        compile_flags: compile_flags,
-        run_flags: run_flags,
-        pp_exact: pp_exact,
-        aux_builds: aux_builds,
-        exec_env: exec_env,
-        check_lines: check_lines,
-        build_aux_docs: build_aux_docs,
-        force_host: force_host,
-        check_stdout: check_stdout,
-        no_prefer_dynamic: no_prefer_dynamic,
-        pretty_expanded: pretty_expanded,
-        pretty_mode: pretty_mode.unwrap_or("normal".to_owned()),
-        pretty_compare_only: pretty_compare_only,
-        forbid_output: forbid_output,
-    }
+pub struct EarlyProps {
+    pub ignore: bool,
+    pub should_fail: bool,
 }
 
-pub fn is_test_ignored(config: &Config, testfile: &Path) -> bool {
+// scan the file to detect whether the test should be ignored and
+// whether it should panic; these are two things the test runner needs
+// to know early, before actually running the test
+pub fn early_props(config: &Config, testfile: &Path) -> EarlyProps {
+    let mut props = EarlyProps {
+        ignore: false,
+        should_fail: false,
+    };
+
+    iter_header(testfile, None, &mut |ln| {
+        props.ignore =
+            props.ignore ||
+            parse_name_directive(ln, "ignore-test") ||
+            parse_name_directive(ln, &ignore_target(config)) ||
+            parse_name_directive(ln, &ignore_architecture(config)) ||
+            parse_name_directive(ln, &ignore_stage(config)) ||
+            parse_name_directive(ln, &ignore_env(config)) ||
+            (config.mode == common::Pretty &&
+             parse_name_directive(ln, "ignore-pretty")) ||
+            (config.target != config.host &&
+             parse_name_directive(ln, "ignore-cross-compile")) ||
+            ignore_gdb(config, ln) ||
+            ignore_lldb(config, ln);
+
+        props.should_fail =
+            props.should_fail ||
+            parse_name_directive(ln, "should-fail");
+    });
+
+    return props;
+
     fn ignore_target(config: &Config) -> String {
         format!("ignore-{}", util::get_os(&config.target))
     }
@@ -229,39 +286,40 @@ pub fn is_test_ignored(config: &Config, testfile: &Path) -> bool {
             false
         }
     }
-
-    let val = iter_header(testfile, &mut |ln| {
-        !parse_name_directive(ln, "ignore-test") &&
-        !parse_name_directive(ln, &ignore_target(config)) &&
-        !parse_name_directive(ln, &ignore_architecture(config)) &&
-        !parse_name_directive(ln, &ignore_stage(config)) &&
-        !parse_name_directive(ln, &ignore_env(config)) &&
-        !(config.mode == common::Pretty && parse_name_directive(ln, "ignore-pretty")) &&
-        !(config.target != config.host && parse_name_directive(ln, "ignore-cross-compile")) &&
-        !ignore_gdb(config, ln) &&
-        !ignore_lldb(config, ln)
-    });
-
-    !val
 }
 
-fn iter_header(testfile: &Path, it: &mut FnMut(&str) -> bool) -> bool {
+fn iter_header(testfile: &Path,
+               cfg: Option<&str>,
+               it: &mut FnMut(&str)) {
     let rdr = BufReader::new(File::open(testfile).unwrap());
     for ln in rdr.lines() {
         // Assume that any directives will be found before the first
         // module or function. This doesn't seem to be an optimization
         // with a warm page cache. Maybe with a cold one.
         let ln = ln.unwrap();
-        if ln.starts_with("fn") ||
-                ln.starts_with("mod") {
-            return true;
-        } else {
-            if !(it(ln.trim())) {
-                return false;
+        let ln = ln.trim();
+        if ln.starts_with("fn") || ln.starts_with("mod") {
+            return;
+        } else if ln.starts_with("//[") {
+            // A comment like `//[foo]` is specific to revision `foo`
+            if let Some(close_brace) = ln.find("]") {
+                let lncfg = &ln[3..close_brace];
+                let matches = match cfg {
+                    Some(s) => s == &lncfg[..],
+                    None => false,
+                };
+                if matches {
+                    it(&ln[close_brace+1..]);
+                }
+            } else {
+                panic!("malformed condition directive: expected `//[foo]`, found `{}`",
+                       ln)
             }
+        } else if ln.starts_with("//") {
+            it(&ln[2..]);
         }
     }
-    return true;
+    return;
 }
 
 fn parse_error_pattern(line: &str) -> Option<String> {
@@ -280,6 +338,11 @@ fn parse_compile_flags(line: &str) -> Option<String> {
     parse_name_value_directive(line, "compile-flags")
 }
 
+fn parse_revisions(line: &str) -> Option<Vec<String>> {
+    parse_name_value_directive(line, "revisions")
+        .map(|r| r.split_whitespace().map(|t| t.to_string()).collect())
+}
+
 fn parse_run_flags(line: &str) -> Option<String> {
     parse_name_value_directive(line, "run-flags")
 }
@@ -316,8 +379,8 @@ fn parse_pretty_compare_only(line: &str) -> bool {
     parse_name_directive(line, "pretty-compare-only")
 }
 
-fn parse_exec_env(line: &str) -> Option<(String, String)> {
-    parse_name_value_directive(line, "exec-env").map(|nv| {
+fn parse_env(line: &str, name: &str) -> Option<(String, String)> {
+    parse_name_value_directive(line, name).map(|nv| {
         // nv is either FOO or FOO=BAR
         let mut strs: Vec<String> = nv
                                       .splitn(2, '=')
index 7c5397a1af989d474c2cd4994d1e57ce61b2af84..f418edf66866e83f0c97d3af8fcc367484c01b33 100644 (file)
@@ -8,9 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![allow(deprecated)]
-
-use std::dynamic_lib::DynamicLibrary;
+use std::env;
+use std::ffi::OsString;
 use std::io::prelude::*;
 use std::path::PathBuf;
 use std::process::{ExitStatus, Command, Child, Output, Stdio};
@@ -18,15 +17,22 @@ use std::process::{ExitStatus, Command, Child, Output, Stdio};
 fn add_target_env(cmd: &mut Command, lib_path: &str, aux_path: Option<&str>) {
     // Need to be sure to put both the lib_path and the aux path in the dylib
     // search path for the child.
-    let mut path = DynamicLibrary::search_path();
+    let var = if cfg!(windows) {
+        "PATH"
+    } else if cfg!(target_os = "macos") {
+        "DYLD_LIBRARY_PATH"
+    } else {
+        "LD_LIBRARY_PATH"
+    };
+    let mut path = env::split_paths(&env::var_os(var).unwrap_or(OsString::new()))
+                       .collect::<Vec<_>>();
     if let Some(p) = aux_path {
         path.insert(0, PathBuf::from(p))
     }
     path.insert(0, PathBuf::from(lib_path));
 
     // Add the new dylib search path var
-    let var = DynamicLibrary::envvar();
-    let newpath = DynamicLibrary::create_path(&path);
+    let newpath = env::join_paths(&path).unwrap();
     cmd.env(var, newpath);
 }
 
index 7cad5a4391c017e28eac0bda936827c17c71aa40..6773c34c7d76da8cadaafb314c9e2f3bbfdc3364 100644 (file)
@@ -11,7 +11,8 @@
 use common::Config;
 use common::{CompileFail, ParseFail, Pretty, RunFail, RunPass, RunPassValgrind};
 use common::{Codegen, DebugInfoLldb, DebugInfoGdb, Rustdoc, CodegenUnits};
-use errors;
+use common::{Incremental};
+use errors::{self, ErrorKind};
 use header::TestProps;
 use header;
 use procsrv;
@@ -59,6 +60,7 @@ pub fn run(config: Config, testpaths: &TestPaths) {
         Codegen => run_codegen_test(&config, &props, &testpaths),
         Rustdoc => run_rustdoc_test(&config, &props, &testpaths),
         CodegenUnits => run_codegen_units_test(&config, &props, &testpaths),
+        Incremental => run_incremental_test(&config, &props, &testpaths),
     }
 }
 
@@ -70,39 +72,77 @@ fn get_output(props: &TestProps, proc_res: &ProcRes) -> String {
     }
 }
 
+
+fn for_each_revision<OP>(config: &Config, props: &TestProps, testpaths: &TestPaths,
+                         mut op: OP)
+    where OP: FnMut(&Config, &TestProps, &TestPaths, Option<&str>)
+{
+    if props.revisions.is_empty() {
+        op(config, props, testpaths, None)
+    } else {
+        for revision in &props.revisions {
+            let mut revision_props = props.clone();
+            header::load_props_into(&mut revision_props,
+                                    &testpaths.file,
+                                    Some(&revision));
+            revision_props.compile_flags.extend(vec![
+                format!("--cfg"),
+                format!("{}", revision),
+            ]);
+            op(config, &revision_props, testpaths, Some(revision));
+        }
+    }
+}
+
 fn run_cfail_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    for_each_revision(config, props, testpaths, run_cfail_test_revision);
+}
+
+fn run_cfail_test_revision(config: &Config,
+                           props: &TestProps,
+                           testpaths: &TestPaths,
+                           revision: Option<&str>) {
     let proc_res = compile_test(config, props, testpaths);
 
     if proc_res.status.success() {
-        fatal_proc_rec(&format!("{} test compiled successfully!", config.mode)[..],
-                      &proc_res);
+        fatal_proc_rec(
+            revision,
+            &format!("{} test compiled successfully!", config.mode)[..],
+            &proc_res);
     }
 
-    check_correct_failure_status(&proc_res);
+    check_correct_failure_status(revision, &proc_res);
 
     if proc_res.status.success() {
-        fatal("process did not return an error status");
+        fatal(revision, "process did not return an error status");
     }
 
     let output_to_check = get_output(props, &proc_res);
-    let expected_errors = errors::load_errors(&testpaths.file);
+    let expected_errors = errors::load_errors(&testpaths.file, revision);
     if !expected_errors.is_empty() {
         if !props.error_patterns.is_empty() {
-            fatal("both error pattern and expected errors specified");
+            fatal(revision, "both error pattern and expected errors specified");
         }
-        check_expected_errors(expected_errors, testpaths, &proc_res);
+        check_expected_errors(revision, expected_errors, testpaths, &proc_res);
     } else {
-        check_error_patterns(props, testpaths, &output_to_check, &proc_res);
+        check_error_patterns(revision, props, testpaths, &output_to_check, &proc_res);
     }
-    check_no_compiler_crash(&proc_res);
-    check_forbid_output(props, &output_to_check, &proc_res);
+    check_no_compiler_crash(revision, &proc_res);
+    check_forbid_output(revision, props, &output_to_check, &proc_res);
 }
 
 fn run_rfail_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    for_each_revision(config, props, testpaths, run_rfail_test_revision);
+}
+
+fn run_rfail_test_revision(config: &Config,
+                           props: &TestProps,
+                           testpaths: &TestPaths,
+                           revision: Option<&str>) {
     let proc_res = compile_test(config, props, testpaths);
 
     if !proc_res.status.success() {
-        fatal_proc_rec("compilation failed!", &proc_res);
+        fatal_proc_rec(revision, "compilation failed!", &proc_res);
     }
 
     let proc_res = exec_compiled_test(config, props, testpaths);
@@ -110,19 +150,20 @@ fn run_rfail_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
     // The value our Makefile configures valgrind to return on failure
     const VALGRIND_ERR: i32 = 100;
     if proc_res.status.code() == Some(VALGRIND_ERR) {
-        fatal_proc_rec("run-fail test isn't valgrind-clean!", &proc_res);
+        fatal_proc_rec(revision, "run-fail test isn't valgrind-clean!", &proc_res);
     }
 
     let output_to_check = get_output(props, &proc_res);
-    check_correct_failure_status(&proc_res);
-    check_error_patterns(props, testpaths, &output_to_check, &proc_res);
+    check_correct_failure_status(revision, &proc_res);
+    check_error_patterns(revision, props, testpaths, &output_to_check, &proc_res);
 }
 
-fn check_correct_failure_status(proc_res: &ProcRes) {
+fn check_correct_failure_status(revision: Option<&str>, proc_res: &ProcRes) {
     // The value the rust runtime returns on failure
     const RUST_ERR: i32 = 101;
     if proc_res.status.code() != Some(RUST_ERR) {
         fatal_proc_rec(
+            revision,
             &format!("failure produced the wrong error: {}",
                      proc_res.status),
             proc_res);
@@ -130,20 +171,29 @@ fn check_correct_failure_status(proc_res: &ProcRes) {
 }
 
 fn run_rpass_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    for_each_revision(config, props, testpaths, run_rpass_test_revision);
+}
+
+fn run_rpass_test_revision(config: &Config,
+                           props: &TestProps,
+                           testpaths: &TestPaths,
+                           revision: Option<&str>) {
     let proc_res = compile_test(config, props, testpaths);
 
     if !proc_res.status.success() {
-        fatal_proc_rec("compilation failed!", &proc_res);
+        fatal_proc_rec(revision, "compilation failed!", &proc_res);
     }
 
     let proc_res = exec_compiled_test(config, props, testpaths);
 
     if !proc_res.status.success() {
-        fatal_proc_rec("test run failed!", &proc_res);
+        fatal_proc_rec(revision, "test run failed!", &proc_res);
     }
 }
 
 fn run_valgrind_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    assert!(props.revisions.is_empty(), "revisions not relevant here");
+
     if config.valgrind_path.is_none() {
         assert!(!config.force_valgrind);
         return run_rpass_test(config, props, testpaths);
@@ -152,7 +202,7 @@ fn run_valgrind_test(config: &Config, props: &TestProps, testpaths: &TestPaths)
     let mut proc_res = compile_test(config, props, testpaths);
 
     if !proc_res.status.success() {
-        fatal_proc_rec("compilation failed!", &proc_res);
+        fatal_proc_rec(None, "compilation failed!", &proc_res);
     }
 
     let mut new_config = config.clone();
@@ -160,11 +210,18 @@ fn run_valgrind_test(config: &Config, props: &TestProps, testpaths: &TestPaths)
     proc_res = exec_compiled_test(&new_config, props, testpaths);
 
     if !proc_res.status.success() {
-        fatal_proc_rec("test run failed!", &proc_res);
+        fatal_proc_rec(None, "test run failed!", &proc_res);
     }
 }
 
 fn run_pretty_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    for_each_revision(config, props, testpaths, run_pretty_test_revision);
+}
+
+fn run_pretty_test_revision(config: &Config,
+                            props: &TestProps,
+                            testpaths: &TestPaths,
+                            revision: Option<&str>) {
     if props.pp_exact.is_some() {
         logv(config, "testing for exact pretty-printing".to_owned());
     } else {
@@ -180,7 +237,8 @@ fn run_pretty_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
 
     let mut round = 0;
     while round < rounds {
-        logv(config, format!("pretty-printing round {}", round));
+        logv(config, format!("pretty-printing round {} revision {:?}",
+                             round, revision));
         let proc_res = print_source(config,
                                     props,
                                     testpaths,
@@ -188,8 +246,10 @@ fn run_pretty_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
                                     &props.pretty_mode);
 
         if !proc_res.status.success() {
-            fatal_proc_rec(&format!("pretty-printing failed in round {}", round),
-                          &proc_res);
+            fatal_proc_rec(revision,
+                           &format!("pretty-printing failed in round {} revision {:?}",
+                                    round, revision),
+                           &proc_res);
         }
 
         let ProcRes{ stdout, .. } = proc_res;
@@ -215,30 +275,32 @@ fn run_pretty_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
         expected = expected.replace(&cr, "").to_owned();
     }
 
-    compare_source(&expected, &actual);
+    compare_source(revision, &expected, &actual);
 
     // If we're only making sure that the output matches then just stop here
     if props.pretty_compare_only { return; }
 
     // Finally, let's make sure it actually appears to remain valid code
     let proc_res = typecheck_source(config, props, testpaths, actual);
-
     if !proc_res.status.success() {
-        fatal_proc_rec("pretty-printed source does not typecheck", &proc_res);
+        fatal_proc_rec(revision, "pretty-printed source does not typecheck", &proc_res);
     }
+
     if !props.pretty_expanded { return }
 
     // additionally, run `--pretty expanded` and try to build it.
     let proc_res = print_source(config, props, testpaths, srcs[round].clone(), "expanded");
     if !proc_res.status.success() {
-        fatal_proc_rec("pretty-printing (expanded) failed", &proc_res);
+        fatal_proc_rec(revision, "pretty-printing (expanded) failed", &proc_res);
     }
 
     let ProcRes{ stdout: expanded_src, .. } = proc_res;
     let proc_res = typecheck_source(config, props, testpaths, expanded_src);
     if !proc_res.status.success() {
-        fatal_proc_rec("pretty-printed source (expanded) does not typecheck",
-                      &proc_res);
+        fatal_proc_rec(
+            revision,
+            "pretty-printed source (expanded) does not typecheck",
+            &proc_res);
     }
 
     return;
@@ -256,7 +318,7 @@ fn run_pretty_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
                                      testpaths,
                                      pretty_type.to_owned()),
                         props.exec_env.clone(),
-                        &config.compile_lib_path,
+                        config.compile_lib_path.to_str().unwrap(),
                         Some(aux_dir.to_str().unwrap()),
                         Some(src))
     }
@@ -275,16 +337,16 @@ fn run_pretty_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
                             "-L".to_owned(),
                             aux_dir.to_str().unwrap().to_owned());
         args.extend(split_maybe_args(&config.target_rustcflags));
-        args.extend(split_maybe_args(&props.compile_flags));
+        args.extend(props.compile_flags.iter().cloned());
         return ProcArgs {
             prog: config.rustc_path.to_str().unwrap().to_owned(),
             args: args,
         };
     }
 
-    fn compare_source(expected: &str, actual: &str) {
+    fn compare_source(revision: Option<&str>, expected: &str, actual: &str) {
         if expected != actual {
-            error("pretty-printed source does not match expected source");
+            error(revision, "pretty-printed source does not match expected source");
             println!("\n\
 expected:\n\
 ------------------------------------------\n\
@@ -322,7 +384,7 @@ actual:\n\
                             "-L".to_owned(),
                             aux_dir.to_str().unwrap().to_owned());
         args.extend(split_maybe_args(&config.target_rustcflags));
-        args.extend(split_maybe_args(&props.compile_flags));
+        args.extend(props.compile_flags.iter().cloned());
         // FIXME (#9639): This needs to handle non-utf8 paths
         return ProcArgs {
             prog: config.rustc_path.to_str().unwrap().to_owned(),
@@ -332,6 +394,8 @@ actual:\n\
 }
 
 fn run_debuginfo_gdb_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    assert!(props.revisions.is_empty(), "revisions not relevant here");
+
     let mut config = Config {
         target_rustcflags: cleanup_debug_info_options(&config.target_rustcflags),
         host_rustcflags: cleanup_debug_info_options(&config.host_rustcflags),
@@ -349,7 +413,7 @@ fn run_debuginfo_gdb_test(config: &Config, props: &TestProps, testpaths: &TestPa
     // compile test file (it should have 'compile-flags:-g' in the header)
     let compiler_run_result = compile_test(config, props, testpaths);
     if !compiler_run_result.status.success() {
-        fatal_proc_rec("compilation failed!", &compiler_run_result);
+        fatal_proc_rec(None, "compilation failed!", &compiler_run_result);
     }
 
     let exe_file = make_exe_name(config, testpaths);
@@ -441,7 +505,7 @@ fn run_debuginfo_gdb_test(config: &Config, props: &TestProps, testpaths: &TestPa
 
             let tool_path = match config.android_cross_path.to_str() {
                 Some(x) => x.to_owned(),
-                None => fatal("cannot find android cross path")
+                None => fatal(None, "cannot find android cross path")
             };
 
             let debugger_script = make_out_name(config, testpaths, "debugger.script");
@@ -573,14 +637,14 @@ fn run_debuginfo_gdb_test(config: &Config, props: &TestProps, testpaths: &TestPa
                                                   testpaths,
                                                   proc_args,
                                                   environment,
-                                                  &config.run_lib_path,
+                                                  config.run_lib_path.to_str().unwrap(),
                                                   None,
                                                   None);
         }
     }
 
     if !debugger_run_result.status.success() {
-        fatal("gdb failed to execute");
+        fatal(None, "gdb failed to execute");
     }
 
     check_debugger_output(&debugger_run_result, &check_lines);
@@ -600,8 +664,10 @@ fn find_rust_src_root(config: &Config) -> Option<PathBuf> {
 }
 
 fn run_debuginfo_lldb_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    assert!(props.revisions.is_empty(), "revisions not relevant here");
+
     if config.lldb_python_dir.is_none() {
-        fatal("Can't run LLDB test because LLDB's python path is not set.");
+        fatal(None, "Can't run LLDB test because LLDB's python path is not set.");
     }
 
     let mut config = Config {
@@ -615,7 +681,7 @@ fn run_debuginfo_lldb_test(config: &Config, props: &TestProps, testpaths: &TestP
     // compile test file (it should have 'compile-flags:-g' in the header)
     let compile_result = compile_test(config, props, testpaths);
     if !compile_result.status.success() {
-        fatal_proc_rec("compilation failed!", &compile_result);
+        fatal_proc_rec(None, "compilation failed!", &compile_result);
     }
 
     let exe_file = make_exe_name(config, testpaths);
@@ -663,8 +729,11 @@ fn run_debuginfo_lldb_test(config: &Config, props: &TestProps, testpaths: &TestP
     script_str.push_str("type category enable Rust\n");
 
     // Set breakpoints on every line that contains the string "#break"
+    let source_file_name = testpaths.file.file_name().unwrap().to_string_lossy();
     for line in &breakpoint_lines {
-        script_str.push_str(&format!("breakpoint set --line {}\n", line));
+        script_str.push_str(&format!("breakpoint set --file '{}' --line {}\n",
+                                     source_file_name,
+                                     line));
     }
 
     // Append the other commands
@@ -692,7 +761,7 @@ fn run_debuginfo_lldb_test(config: &Config, props: &TestProps, testpaths: &TestP
                                        &rust_src_root);
 
     if !debugger_run_result.status.success() {
-        fatal_proc_rec("Error while running LLDB", &debugger_run_result);
+        fatal_proc_rec(None, "Error while running LLDB", &debugger_run_result);
     }
 
     check_debugger_output(&debugger_run_result, &check_lines);
@@ -725,7 +794,7 @@ fn cmd2procres(config: &Config, testpaths: &TestPaths, cmd: &mut Command)
              String::from_utf8(stderr).unwrap())
         },
         Err(e) => {
-            fatal(&format!("Failed to setup Python process for \
+            fatal(None, &format!("Failed to setup Python process for \
                             LLDB script: {}", e))
         }
     };
@@ -775,7 +844,7 @@ fn parse_debugger_commands(testpaths: &TestPaths, debugger_prefix: &str)
                 });
             }
             Err(e) => {
-                fatal(&format!("Error while parsing debugger commands: {}", e))
+                fatal(None, &format!("Error while parsing debugger commands: {}", e))
             }
         }
         counter += 1;
@@ -799,12 +868,28 @@ fn cleanup_debug_info_options(options: &Option<String>) -> Option<String> {
         "-g".to_owned(),
         "--debuginfo".to_owned()
     ];
-    let new_options =
+    let mut new_options =
         split_maybe_args(options).into_iter()
                                  .filter(|x| !options_to_remove.contains(x))
-                                 .collect::<Vec<String>>()
-                                 .join(" ");
-    Some(new_options)
+                                 .collect::<Vec<String>>();
+
+    let mut i = 0;
+    while i + 1 < new_options.len() {
+        if new_options[i] == "-Z" {
+            // FIXME #31005 MIR missing debuginfo currently.
+            if new_options[i + 1] == "orbit" {
+                // Remove "-Z" and "orbit".
+                new_options.remove(i);
+                new_options.remove(i);
+                continue;
+            }
+            // Always skip over -Z's argument.
+            i += 1;
+        }
+        i += 1;
+    }
+
+    Some(new_options.join(" "))
 }
 
 fn check_debugger_output(debugger_run_result: &ProcRes, check_lines: &[String]) {
@@ -857,19 +942,21 @@ fn check_debugger_output(debugger_run_result: &ProcRes, check_lines: &[String])
             }
         }
         if i != num_check_lines {
-            fatal_proc_rec(&format!("line not found in debugger output: {}",
+            fatal_proc_rec(None, &format!("line not found in debugger output: {}",
                                     check_lines.get(i).unwrap()),
                           debugger_run_result);
         }
     }
 }
 
-fn check_error_patterns(props: &TestProps,
+fn check_error_patterns(revision: Option<&str>,
+                        props: &TestProps,
                         testpaths: &TestPaths,
                         output_to_check: &str,
                         proc_res: &ProcRes) {
     if props.error_patterns.is_empty() {
-        fatal(&format!("no error pattern specified in {:?}",
+        fatal(revision,
+              &format!("no error pattern specified in {:?}",
                        testpaths.file.display()));
     }
     let mut next_err_idx = 0;
@@ -891,75 +978,68 @@ fn check_error_patterns(props: &TestProps,
 
     let missing_patterns = &props.error_patterns[next_err_idx..];
     if missing_patterns.len() == 1 {
-        fatal_proc_rec(&format!("error pattern '{}' not found!", missing_patterns[0]),
-                      proc_res);
+        fatal_proc_rec(
+            revision,
+            &format!("error pattern '{}' not found!", missing_patterns[0]),
+            proc_res);
     } else {
         for pattern in missing_patterns {
-            error(&format!("error pattern '{}' not found!", *pattern));
+            error(revision, &format!("error pattern '{}' not found!", *pattern));
         }
-        fatal_proc_rec("multiple error patterns not found", proc_res);
+        fatal_proc_rec(revision, "multiple error patterns not found", proc_res);
     }
 }
 
-fn check_no_compiler_crash(proc_res: &ProcRes) {
+fn check_no_compiler_crash(revision: Option<&str>, proc_res: &ProcRes) {
     for line in proc_res.stderr.lines() {
         if line.starts_with("error: internal compiler error:") {
-            fatal_proc_rec("compiler encountered internal error",
-                          proc_res);
+            fatal_proc_rec(revision,
+                           "compiler encountered internal error",
+                           proc_res);
         }
     }
 }
 
-fn check_forbid_output(props: &TestProps,
+fn check_forbid_output(revision: Option<&str>,
+                       props: &TestProps,
                        output_to_check: &str,
                        proc_res: &ProcRes) {
     for pat in &props.forbid_output {
         if output_to_check.contains(pat) {
-            fatal_proc_rec("forbidden pattern found in compiler output", proc_res);
+            fatal_proc_rec(revision,
+                           "forbidden pattern found in compiler output",
+                           proc_res);
         }
     }
 }
 
-fn check_expected_errors(expected_errors: Vec<errors::ExpectedError>,
+fn check_expected_errors(revision: Option<&str>,
+                         expected_errors: Vec<errors::ExpectedError>,
                          testpaths: &TestPaths,
                          proc_res: &ProcRes) {
-
     // true if we found the error in question
     let mut found_flags = vec![false; expected_errors.len()];
 
     if proc_res.status.success() {
-        fatal("process did not return an error status");
+        fatal_proc_rec(revision, "process did not return an error status", proc_res);
     }
 
     let prefixes = expected_errors.iter().map(|ee| {
-        let expected = format!("{}:{}:", testpaths.file.display(), ee.line);
+        let expected = format!("{}:{}:", testpaths.file.display(), ee.line_num);
         // On windows just translate all '\' path separators to '/'
         expected.replace(r"\", "/")
     }).collect::<Vec<String>>();
 
+    // If the testcase being checked contains at least one expected "help"
+    // message, then we'll ensure that all "help" messages are expected.
+    // Otherwise, all "help" messages reported by the compiler will be ignored.
+    // This logic also applies to "note" messages.
     let (expect_help, expect_note) =
         expected_errors.iter()
                         .fold((false, false),
                               |(acc_help, acc_note), ee|
-                                  (acc_help || ee.kind == "help:", acc_note ||
-                                   ee.kind == "note:"));
-
-    fn prefix_matches(line: &str, prefix: &str) -> bool {
-        use std::ascii::AsciiExt;
-        // On windows just translate all '\' path separators to '/'
-        let line = line.replace(r"\", "/");
-        if cfg!(windows) {
-            line.to_ascii_lowercase().starts_with(&prefix.to_ascii_lowercase())
-        } else {
-            line.starts_with(prefix)
-        }
-    }
-
-    // A multi-line error will have followup lines which start with a space
-    // or open paren.
-    fn continuation( line: &str) -> bool {
-        line.starts_with(" ") || line.starts_with("(")
-    }
+                                  (acc_help || ee.kind == Some(ErrorKind::Help),
+                                   acc_note || ee.kind == Some(ErrorKind::Note)));
 
     // Scan and extract our error/warning messages,
     // which look like:
@@ -970,20 +1050,22 @@ fn check_expected_errors(expected_errors: Vec<errors::ExpectedError>,
     //
     // This pattern is ambiguous on windows, because filename may contain
     // a colon, so any path prefix must be detected and removed first.
+    let mut unexpected = 0;
+    let mut not_found = 0;
     for line in proc_res.stderr.lines() {
         let mut was_expected = false;
         let mut prev = 0;
         for (i, ee) in expected_errors.iter().enumerate() {
             if !found_flags[i] {
-                debug!("prefix={} ee.kind={} ee.msg={} line={}",
+                debug!("prefix={} ee.kind={:?} ee.msg={} line={}",
                        prefixes[i],
                        ee.kind,
                        ee.msg,
                        line);
                 // Suggestions have no line number in their output, so take on the line number of
                 // the previous expected error
-                if ee.kind == "suggestion" {
-                    assert!(expected_errors[prev].kind == "help",
+                if ee.kind == Some(ErrorKind::Suggestion) {
+                    assert!(expected_errors[prev].kind == Some(ErrorKind::Help),
                             "SUGGESTIONs must be preceded by a HELP");
                     if line.contains(&ee.msg) {
                         found_flags[i] = true;
@@ -991,9 +1073,11 @@ fn check_expected_errors(expected_errors: Vec<errors::ExpectedError>,
                         break;
                     }
                 }
-                if (prefix_matches(line, &prefixes[i]) || continuation(line)) &&
-                    line.contains(&ee.kind) &&
-                    line.contains(&ee.msg) {
+                if
+                    (prefix_matches(line, &prefixes[i]) || continuation(line)) &&
+                    (ee.kind.is_none() || line.contains(&ee.kind.as_ref().unwrap().to_string())) &&
+                    line.contains(&ee.msg)
+                {
                     found_flags[i] = true;
                     was_expected = true;
                     break;
@@ -1008,20 +1092,47 @@ fn check_expected_errors(expected_errors: Vec<errors::ExpectedError>,
         }
 
         if !was_expected && is_unexpected_compiler_message(line, expect_help, expect_note) {
-            fatal_proc_rec(&format!("unexpected compiler message: '{}'",
-                                    line),
-                          proc_res);
+            error(revision, &format!("unexpected compiler message: '{}'", line));
+            unexpected += 1;
         }
     }
 
     for (i, &flag) in found_flags.iter().enumerate() {
         if !flag {
             let ee = &expected_errors[i];
-            fatal_proc_rec(&format!("expected {} on line {} not found: {}",
-                                    ee.kind, ee.line, ee.msg),
-                          proc_res);
+            error(revision, &format!("expected {} on line {} not found: {}",
+                                     ee.kind.as_ref()
+                                            .map_or("message".into(),
+                                                    |k| k.to_string()),
+                                     ee.line_num, ee.msg));
+            not_found += 1;
         }
     }
+
+    if unexpected > 0 || not_found > 0 {
+        fatal_proc_rec(
+            revision,
+            &format!("{} unexpected errors found, {} expected errors not found",
+                     unexpected, not_found),
+            proc_res);
+    }
+
+    fn prefix_matches(line: &str, prefix: &str) -> bool {
+        use std::ascii::AsciiExt;
+        // On windows just translate all '\' path separators to '/'
+        let line = line.replace(r"\", "/");
+        if cfg!(windows) {
+            line.to_ascii_lowercase().starts_with(&prefix.to_ascii_lowercase())
+        } else {
+            line.starts_with(prefix)
+        }
+    }
+
+    // A multi-line error will have followup lines which start with a space
+    // or open paren.
+    fn continuation( line: &str) -> bool {
+        line.starts_with(" ") || line.starts_with("(")
+    }
 }
 
 fn is_unexpected_compiler_message(line: &str, expect_help: bool, expect_note: bool) -> bool {
@@ -1066,7 +1177,7 @@ fn scan_char(haystack: &str, needle: char, idx: &mut usize) -> bool {
     if *idx >= haystack.len() {
         return false;
     }
-    let ch = haystack.char_at(*idx);
+    let ch = haystack[*idx..].chars().next().unwrap();
     if ch != needle {
         return false;
     }
@@ -1077,7 +1188,7 @@ fn scan_char(haystack: &str, needle: char, idx: &mut usize) -> bool {
 fn scan_integer(haystack: &str, idx: &mut usize) -> bool {
     let mut i = *idx;
     while i < haystack.len() {
-        let ch = haystack.char_at(i);
+        let ch = haystack[i..].chars().next().unwrap();
         if ch < '0' || '9' < ch {
             break;
         }
@@ -1097,7 +1208,7 @@ fn scan_string(haystack: &str, needle: &str, idx: &mut usize) -> bool {
         if haystack_i >= haystack.len() {
             return false;
         }
-        let ch = haystack.char_at(haystack_i);
+        let ch = haystack[haystack_i..].chars().next().unwrap();
         haystack_i += ch.len_utf8();
         if !scan_char(needle, ch, &mut needle_i) {
             return false;
@@ -1184,7 +1295,7 @@ fn document(config: &Config,
                         "-o".to_owned(),
                         out_dir.to_str().unwrap().to_owned(),
                         testpaths.file.to_str().unwrap().to_owned()];
-    args.extend(split_maybe_args(&props.compile_flags));
+    args.extend(props.compile_flags.iter().cloned());
     let args = ProcArgs {
         prog: config.rustdoc_path.to_str().unwrap().to_owned(),
         args: args,
@@ -1209,7 +1320,7 @@ fn exec_compiled_test(config: &Config, props: &TestProps,
                             testpaths,
                             make_run_args(config, props, testpaths),
                             env,
-                            &config.run_lib_path,
+                            config.run_lib_path.to_str().unwrap(),
                             Some(aux_dir.to_str().unwrap()),
                             None)
         }
@@ -1281,11 +1392,12 @@ fn compose_and_run_compiler(config: &Config, props: &TestProps,
                                      &aux_testpaths,
                                      aux_args,
                                      Vec::new(),
-                                     &config.compile_lib_path,
+                                     config.compile_lib_path.to_str().unwrap(),
                                      Some(aux_dir.to_str().unwrap()),
                                      None);
         if !auxres.status.success() {
             fatal_proc_rec(
+                None,
                 &format!("auxiliary build of {:?} failed to compile: ",
                         aux_testpaths.file.display()),
                 &auxres);
@@ -1302,8 +1414,8 @@ fn compose_and_run_compiler(config: &Config, props: &TestProps,
     compose_and_run(config,
                     testpaths,
                     args,
-                    Vec::new(),
-                    &config.compile_lib_path,
+                    props.rustc_env.clone(),
+                    config.compile_lib_path.to_str().unwrap(),
                     Some(aux_dir.to_str().unwrap()),
                     input)
 }
@@ -1369,7 +1481,7 @@ fn make_compile_args<F>(config: &Config,
     } else {
         args.extend(split_maybe_args(&config.target_rustcflags));
     }
-    args.extend(split_maybe_args(&props.compile_flags));
+    args.extend(props.compile_flags.iter().cloned());
     return ProcArgs {
         prog: config.rustc_path.to_str().unwrap().to_owned(),
         args: args,
@@ -1537,13 +1649,20 @@ fn maybe_dump_to_stdout(config: &Config, out: &str, err: &str) {
     }
 }
 
-fn error(err: &str) { println!("\nerror: {}", err); }
+fn error(revision: Option<&str>, err: &str) {
+    match revision {
+        Some(rev) => println!("\nerror in revision `{}`: {}", rev, err),
+        None => println!("\nerror: {}", err)
+    }
+}
 
-fn fatal(err: &str) -> ! { error(err); panic!(); }
+fn fatal(revision: Option<&str>, err: &str) -> ! {
+    error(revision, err); panic!();
+}
 
-fn fatal_proc_rec(err: &str, proc_res: &ProcRes) -> ! {
-    print!("\n\
-error: {}\n\
+fn fatal_proc_rec(revision: Option<&str>, err: &str, proc_res: &ProcRes) -> ! {
+    error(revision, err);
+    print!("\
 status: {}\n\
 command: {}\n\
 stdout:\n\
@@ -1555,7 +1674,7 @@ stderr:\n\
 {}\n\
 ------------------------------------------\n\
 \n",
-             err, proc_res.status, proc_res.cmdline, proc_res.stdout,
+             proc_res.status, proc_res.cmdline, proc_res.stdout,
              proc_res.stderr);
     panic!();
 }
@@ -1753,20 +1872,22 @@ fn check_ir_with_filecheck(config: &Config, testpaths: &TestPaths) -> ProcRes {
 }
 
 fn run_codegen_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    assert!(props.revisions.is_empty(), "revisions not relevant here");
 
     if config.llvm_bin_path.is_none() {
-        fatal("missing --llvm-bin-path");
+        fatal(None, "missing --llvm-bin-path");
     }
 
     let mut proc_res = compile_test_and_save_ir(config, props, testpaths);
     if !proc_res.status.success() {
-        fatal_proc_rec("compilation failed!", &proc_res);
+        fatal_proc_rec(None, "compilation failed!", &proc_res);
     }
 
     proc_res = check_ir_with_filecheck(config, testpaths);
     if !proc_res.status.success() {
-        fatal_proc_rec("verification with 'FileCheck' failed",
-                      &proc_res);
+        fatal_proc_rec(None,
+                       "verification with 'FileCheck' failed",
+                       &proc_res);
     }
 }
 
@@ -1782,13 +1903,15 @@ fn charset() -> &'static str {
 }
 
 fn run_rustdoc_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    assert!(props.revisions.is_empty(), "revisions not relevant here");
+
     let out_dir = output_base_name(config, testpaths);
     let _ = fs::remove_dir_all(&out_dir);
     ensure_dir(&out_dir);
 
     let proc_res = document(config, props, testpaths, &out_dir);
     if !proc_res.status.success() {
-        fatal_proc_rec("rustdoc failed!", &proc_res);
+        fatal_proc_rec(None, "rustdoc failed!", &proc_res);
     }
     let root = find_rust_src_root(config).unwrap();
 
@@ -1799,18 +1922,20 @@ fn run_rustdoc_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
                                   .arg(out_dir)
                                   .arg(&testpaths.file));
     if !res.status.success() {
-        fatal_proc_rec("htmldocck failed!", &res);
+        fatal_proc_rec(None, "htmldocck failed!", &res);
     }
 }
 
 fn run_codegen_units_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    assert!(props.revisions.is_empty(), "revisions not relevant here");
+
     let proc_res = compile_test(config, props, testpaths);
 
     if !proc_res.status.success() {
-        fatal_proc_rec("compilation failed!", &proc_res);
+        fatal_proc_rec(None, "compilation failed!", &proc_res);
     }
 
-    check_no_compiler_crash(&proc_res);
+    check_no_compiler_crash(None, &proc_res);
 
     let prefix = "TRANS_ITEM ";
 
@@ -1821,7 +1946,7 @@ fn run_codegen_units_test(config: &Config, props: &TestProps, testpaths: &TestPa
         .map(|s| (&s[prefix.len()..]).to_string())
         .collect();
 
-    let expected: HashSet<String> = errors::load_errors(&testpaths.file)
+    let expected: HashSet<String> = errors::load_errors(&testpaths.file, None)
         .iter()
         .map(|e| e.msg.trim().to_string())
         .collect();
@@ -1843,3 +1968,67 @@ fn run_codegen_units_test(config: &Config, props: &TestProps, testpaths: &TestPa
         panic!();
     }
 }
+
+fn run_incremental_test(config: &Config, props: &TestProps, testpaths: &TestPaths) {
+    // Basic plan for a test incremental/foo/bar.rs:
+    // - load list of revisions pass1, fail2, pass3
+    //   - each should begin with `rpass`, `rfail`, or `cfail`
+    //   - if `rpass`, expect compile and execution to succeed
+    //   - if `cfail`, expect compilation to fail
+    //   - if `rfail`, expect execution to fail
+    // - create a directory build/foo/bar.incremental
+    // - compile foo/bar.rs with -Z incremental=.../foo/bar.incremental and -C pass1
+    //   - because name of revision starts with "pass", expect success
+    // - compile foo/bar.rs with -Z incremental=.../foo/bar.incremental and -C fail2
+    //   - because name of revision starts with "fail", expect an error
+    //   - load expected errors as usual, but filter for those that end in `[fail2]`
+    // - compile foo/bar.rs with -Z incremental=.../foo/bar.incremental and -C pass3
+    //   - because name of revision starts with "pass", expect success
+    // - execute build/foo/bar.exe and save output
+    //
+    // FIXME -- use non-incremental mode as an oracle? That doesn't apply
+    // to #[rustc_dirty] and clean tests I guess
+
+    assert!(!props.revisions.is_empty(), "incremental tests require a list of revisions");
+
+    let output_base_name = output_base_name(config, testpaths);
+
+    // Create the incremental workproduct directory.
+    let incremental_dir = output_base_name.with_extension("incremental");
+    if incremental_dir.exists() {
+        fs::remove_dir_all(&incremental_dir).unwrap();
+    }
+    fs::create_dir_all(&incremental_dir).unwrap();
+
+    if config.verbose {
+        print!("incremental_dir={}", incremental_dir.display());
+    }
+
+    for revision in &props.revisions {
+        let mut revision_props = props.clone();
+        header::load_props_into(&mut revision_props, &testpaths.file, Some(&revision));
+
+        revision_props.compile_flags.extend(vec![
+            format!("-Z"),
+            format!("incremental={}", incremental_dir.display()),
+            format!("--cfg"),
+            format!("{}", revision),
+        ]);
+
+        if config.verbose {
+            print!("revision={:?} revision_props={:#?}", revision, revision_props);
+        }
+
+        if revision.starts_with("rpass") {
+            run_rpass_test_revision(config, &revision_props, testpaths, Some(&revision));
+        } else if revision.starts_with("rfail") {
+            run_rfail_test_revision(config, &revision_props, testpaths, Some(&revision));
+        } else if revision.starts_with("cfail") {
+            run_cfail_test_revision(config, &revision_props, testpaths, Some(&revision));
+        } else {
+            fatal(
+                Some(revision),
+                "revision name must begin with rpass, rfail, or cfail");
+        }
+    }
+}
index fe5e1c3990c5c8412a41fd3efe9c6b7835531c03..18aa9f24580d5f373855d0fe9da2378a485d38b5 100644 (file)
@@ -9,6 +9,7 @@
     * [Comments](comments.md)
     * [if](if.md)
     * [Loops](loops.md)
+    * [Vectors](vectors.md)
     * [Ownership](ownership.md)
     * [References and Borrowing](references-and-borrowing.md)
     * [Lifetimes](lifetimes.md)
@@ -18,7 +19,6 @@
     * [Match](match.md)
     * [Patterns](patterns.md)
     * [Method Syntax](method-syntax.md)
-    * [Vectors](vectors.md)
     * [Strings](strings.md)
     * [Generics](generics.md)
     * [Traits](traits.md)
index a0676a33996fc34bf83a380806db5a3acaa11373..cb54ac2419ecd0d55baf0a780d2c4e87d7ddef24 100644 (file)
@@ -131,7 +131,7 @@ declarations.
 ## Trait objects with associated types
 
 There’s one more bit of syntax we should talk about: trait objects. If you
-try to create a trait object from an associated type, like this:
+try to create a trait object from a trait with an associated type, like this:
 
 ```rust,ignore
 # trait Graph {
index 5cafe1693690ddf1a3b9a1524295b5f9906b42ad..7d03d2991abdcc5133d753b77ba8e7deaf634826 100644 (file)
@@ -17,12 +17,12 @@ function result.
 The most common case of coercion is removing mutability from a reference:
 
  * `&mut T` to `&T`
+
 An analogous conversion is to remove mutability from a
 [raw pointer](raw-pointers.md):
 
  * `*mut T` to `*const T`
+
 References can also be coerced to raw pointers:
 
  * `&T` to `*const T`
@@ -32,7 +32,7 @@ References can also be coerced to raw pointers:
 Custom coercions may be defined using [`Deref`](deref-coercions.md).
 
 Coercion is transitive.
+
 # `as`
 
 The `as` keyword does safe casting:
@@ -64,7 +64,7 @@ A cast `e as U` is also valid in any of the following cases:
     and `U` is an integer type; *enum-cast*
  * `e` has type `bool` or `char` and `U` is an integer type; *prim-int-cast*
  * `e` has type `u8` and `U` is `char`; *u8-char-cast*
+
 For example
 
 ```rust
@@ -98,9 +98,9 @@ The semantics of numeric casts are:
 
 [float-int]: https://github.com/rust-lang/rust/issues/10184
 [float-float]: https://github.com/rust-lang/rust/issues/15536
+
 ## Pointer casts
+
 Perhaps surprisingly, it is safe to cast [raw pointers](raw-pointers.md) to and
 from integers, and to cast between pointers to different types subject to
 some constraints. It is only unsafe to dereference the pointer:
@@ -114,7 +114,7 @@ let b = a as u32;
 
 * `e` has type `*T`, `U` has type `*U_0`, and either `U_0: Sized` or
   `unsize_kind(T) == unsize_kind(U_0)`; a *ptr-ptr-cast*
-  
+
 * `e` has type `*T` and `U` is a numeric type, while `T: Sized`; *ptr-addr-cast*
 
 * `e` is an integer and `U` is `*U_0`, while `U_0: Sized`; *addr-ptr-cast*
index f2b92e6dec4ddd664d48b0be1834353557783709..50350213074bf3a2cd2368db61d0d7acce5c7a23 100644 (file)
@@ -204,7 +204,7 @@ borrow checker. Generally we know that such mutations won't happen in a nested f
 to check.
 
 For large, complicated programs, it becomes useful to put some things in `RefCell`s to make things
-simpler. For example, a lot of the maps in [the `ctxt` struct][ctxt] in the Rust compiler internals
+simpler. For example, a lot of the maps in the `ctxt` struct in the Rust compiler internals
 are inside this wrapper. These are only modified once (during creation, which is not right after
 initialization) or a couple of times in well-separated places. However, since this struct is
 pervasively used everywhere, juggling mutable and immutable pointers would be hard (perhaps
@@ -235,7 +235,6 @@ At runtime each borrow causes a modification/check of the refcount.
 [cell-mod]: ../std/cell/
 [cell]: ../std/cell/struct.Cell.html
 [refcell]: ../std/cell/struct.RefCell.html
-[ctxt]: ../rustc/middle/ty/struct.ctxt.html
 
 # Synchronous types
 
index 237545edc05bbf9231b516116bffdb1cba4a6f89..a8135ad384932bbf88916b454380825bb301ecf0 100644 (file)
@@ -371,14 +371,13 @@ assert_eq!(6, answer);
 This gives us these long, related errors:
 
 ```text
-error: the trait `core::marker::Sized` is not implemented for the type
-`core::ops::Fn(i32) -> i32` [E0277]
+error: the trait bound `core::ops::Fn(i32) -> i32 : core::marker::Sized` is not satisfied [E0277]
 fn factory() -> (Fn(i32) -> i32) {
                 ^~~~~~~~~~~~~~~~
 note: `core::ops::Fn(i32) -> i32` does not have a constant size known at compile-time
 fn factory() -> (Fn(i32) -> i32) {
                 ^~~~~~~~~~~~~~~~
-error: the trait `core::marker::Sized` is not implemented for the type `core::ops::Fn(i32) -> i32` [E0277]
+error: the trait bound `core::ops::Fn(i32) -> i32 : core::marker::Sized` is not satisfied [E0277]
 let f = factory();
     ^
 note: `core::ops::Fn(i32) -> i32` does not have a constant size known at compile-time
@@ -502,5 +501,5 @@ assert_eq!(6, answer);
 ```
 
 By making the inner closure a `move Fn`, we create a new stack frame for our
-closure. By `Box`ing it up, we’ve given it a known size, and allowing it to
+closure. By `Box`ing it up, we’ve given it a known size, allowing it to
 escape our stack frame.
index 800be13a243fb9e09b17e2e6655e98ee0f80978b..1af05bfea19a210001ee73542036168f6e997fb9 100644 (file)
@@ -8,12 +8,12 @@ extend the compiler's behavior with new syntax extensions, lint checks, etc.
 A plugin is a dynamic library crate with a designated *registrar* function that
 registers extensions with `rustc`. Other crates can load these extensions using
 the crate attribute `#![plugin(...)]`.  See the
-[`rustc_plugin`](../rustc_plugin/index.html) documentation for more about the
+`rustc_plugin` documentation for more about the
 mechanics of defining and loading a plugin.
 
 If present, arguments passed as `#![plugin(foo(... args ...))]` are not
 interpreted by rustc itself.  They are provided to the plugin through the
-`Registry`'s [`args` method](../rustc_plugin/registry/struct.Registry.html#method.args).
+`Registry`'s `args` method.
 
 In the vast majority of cases, a plugin should *only* be used through
 `#![plugin]` and not through an `extern crate` item.  Linking a plugin would
@@ -30,7 +30,7 @@ of a library.
 Plugins can extend Rust's syntax in various ways. One kind of syntax extension
 is the procedural macro. These are invoked the same way as [ordinary
 macros](macros.html), but the expansion is performed by arbitrary Rust
-code that manipulates [syntax trees](../syntax/ast/index.html) at
+code that manipulates syntax trees at
 compile time.
 
 Let's write a plugin
@@ -120,11 +120,8 @@ The advantages over a simple `fn(&str) -> u32` are:
 
 In addition to procedural macros, you can define new
 [`derive`](../reference.html#derive)-like attributes and other kinds of
-extensions.  See
-[`Registry::register_syntax_extension`](../rustc_plugin/registry/struct.Registry.html#method.register_syntax_extension)
-and the [`SyntaxExtension`
-enum](https://doc.rust-lang.org/syntax/ext/base/enum.SyntaxExtension.html).  For
-a more involved macro example, see
+extensions.  See `Registry::register_syntax_extension` and the `SyntaxExtension`
+enum.  For a more involved macro example, see
 [`regex_macros`](https://github.com/rust-lang/regex/blob/master/regex_macros/src/lib.rs).
 
 
@@ -132,7 +129,7 @@ a more involved macro example, see
 
 Some of the [macro debugging tips](macros.html#debugging-macro-code) are applicable.
 
-You can use [`syntax::parse`](../syntax/parse/index.html) to turn token trees into
+You can use `syntax::parse` to turn token trees into
 higher-level syntax elements like expressions:
 
 ```ignore
@@ -148,30 +145,21 @@ Looking through [`libsyntax` parser
 code](https://github.com/rust-lang/rust/blob/master/src/libsyntax/parse/parser.rs)
 will give you a feel for how the parsing infrastructure works.
 
-Keep the [`Span`s](../syntax/codemap/struct.Span.html) of
-everything you parse, for better error reporting. You can wrap
-[`Spanned`](../syntax/codemap/struct.Spanned.html) around
-your custom data structures.
-
-Calling
-[`ExtCtxt::span_fatal`](../syntax/ext/base/struct.ExtCtxt.html#method.span_fatal)
-will immediately abort compilation. It's better to instead call
-[`ExtCtxt::span_err`](../syntax/ext/base/struct.ExtCtxt.html#method.span_err)
-and return
-[`DummyResult`](../syntax/ext/base/struct.DummyResult.html),
-so that the compiler can continue and find further errors.
-
-To print syntax fragments for debugging, you can use
-[`span_note`](../syntax/ext/base/struct.ExtCtxt.html#method.span_note) together
-with
-[`syntax::print::pprust::*_to_string`](https://doc.rust-lang.org/syntax/print/pprust/index.html#functions).
-
-The example above produced an integer literal using
-[`AstBuilder::expr_usize`](../syntax/ext/build/trait.AstBuilder.html#tymethod.expr_usize).
+Keep the `Span`s of everything you parse, for better error reporting. You can
+wrap `Spanned` around your custom data structures.
+
+Calling `ExtCtxt::span_fatal` will immediately abort compilation. It's better to
+instead call `ExtCtxt::span_err` and return `DummyResult` so that the compiler
+can continue and find further errors.
+
+To print syntax fragments for debugging, you can use `span_note` together with
+`syntax::print::pprust::*_to_string`.
+
+The example above produced an integer literal using `AstBuilder::expr_usize`.
 As an alternative to the `AstBuilder` trait, `libsyntax` provides a set of
-[quasiquote macros](../syntax/ext/quote/index.html).  They are undocumented and
-very rough around the edges.  However, the implementation may be a good
-starting point for an improved quasiquote as an ordinary plugin library.
+quasiquote macros. They are undocumented and very rough around the edges.
+However, the implementation may be a good starting point for an improved
+quasiquote as an ordinary plugin library.
 
 
 # Lint plugins
@@ -239,12 +227,11 @@ foo.rs:4 fn lintme() { }
 
 The components of a lint plugin are:
 
-* one or more `declare_lint!` invocations, which define static
-  [`Lint`](../rustc/lint/struct.Lint.html) structs;
+* one or more `declare_lint!` invocations, which define static `Lint` structs;
 
 * a struct holding any state needed by the lint pass (here, none);
 
-* a [`LintPass`](../rustc/lint/trait.LintPass.html)
+* a `LintPass`
   implementation defining how to check each syntax element. A single
   `LintPass` may call `span_lint` for several different `Lint`s, but should
   register them all through the `get_lints` method.
index 30e4ad7ba5b1213c1e0ad92e54a3b04d02844256..ba4496b93f3a18be9a7fbf86a7b23a075a75b236 100644 (file)
@@ -94,6 +94,52 @@ fn main() {
 }
 ```
 
+As closures can capture variables from their environment, we can also try to
+bring some data into the other thread:
+
+```rust,ignore
+use std::thread;
+
+fn main() {
+    let x = 1;
+    thread::spawn(|| {
+        println!("x is {}", x);
+    });
+}
+```
+
+However, this gives us an error:
+
+```text
+5:19: 7:6 error: closure may outlive the current function, but it
+                 borrows `x`, which is owned by the current function
+...
+5:19: 7:6 help: to force the closure to take ownership of `x` (and any other referenced variables),
+          use the `move` keyword, as shown:
+      thread::spawn(move || {
+          println!("x is {}", x);
+      });
+```
+
+This is because by default closures capture variables by reference, and thus the
+closure only captures a _reference to `x`_. This is a problem, because the
+thread may outlive the scope of `x`, leading to a dangling pointer.
+
+To fix this, we use a `move` closure as mentioned in the error message. `move`
+closures are explained in depth [here](closures.html#move-closures); basically
+they move variables from their environment into themselves.
+
+```rust
+use std::thread;
+
+fn main() {
+    let x = 1;
+    thread::spawn(move || {
+        println!("x is {}", x);
+    });
+}
+```
+
 Many languages have the ability to execute threads, but it's wildly unsafe.
 There are entire books about how to prevent errors that occur from shared
 mutable state. Rust helps out with its type system here as well, by preventing
@@ -116,7 +162,7 @@ The same [ownership system](ownership.html) that helps prevent using pointers
 incorrectly also helps rule out data races, one of the worst kinds of
 concurrency bugs.
 
-As an example, here is a Rust program that would have a data race in many
+As an example, here is a Rust program that could have a data race in many
 languages. It will not compile:
 
 ```ignore
@@ -145,23 +191,69 @@ This gives us an error:
 ```
 
 Rust knows this wouldn't be safe! If we had a reference to `data` in each
-thread, and the thread takes ownership of the reference, we'd have three
-owners!
+thread, and the thread takes ownership of the reference, we'd have three owners!
+`data` gets moved out of `main` in the first call to `spawn()`, so subsequent
+calls in the loop cannot use this variable.
 
-So, we need some type that lets us have more than one reference to a value and
-that we can share between threads, that is it must implement `Sync`.
+Note that this specific example will not cause a data race since different array
+indices are being accessed. But this can't be determined at compile time, and in
+a similar situation where `i` is a constant or is random, you would have a data
+race.
 
-We'll use `Arc<T>`, Rust's standard atomic reference count type, which
-wraps a value up with some extra runtime bookkeeping which allows us to
-share the ownership of the value between multiple references at the same time.
+So, we need some type that lets us have more than one owning reference to a
+value. Usually, we'd use `Rc<T>` for this, which is a reference counted type
+that provides shared ownership. It has some runtime bookkeeping that keeps track
+of the number of references to it, hence the "reference count" part of its name.
 
-The bookkeeping consists of a count of how many of these references exist to
-the value, hence the reference count part of the name.
+Calling `clone()` on an `Rc<T>` will return a new owned reference and bump the
+internal reference count. We create one of these for each thread:
+
+
+```ignore
+use std::thread;
+use std::time::Duration;
+use std::rc::Rc;
+
+fn main() {
+    let mut data = Rc::new(vec![1, 2, 3]);
+
+    for i in 0..3 {
+        // create a new owned reference
+        let data_ref = data.clone();
+
+        // use it in a thread
+        thread::spawn(move || {
+            data_ref[i] += 1;
+        });
+    }
+
+    thread::sleep(Duration::from_millis(50));
+}
+```
+
+This won't work, however, and will give us the error:
+
+```text
+13:9: 13:22 error: the trait bound `alloc::rc::Rc<collections::vec::Vec<i32>> : core::marker::Send`
+            is not satisfied
+...
+13:9: 13:22 note: `alloc::rc::Rc<collections::vec::Vec<i32>>`
+            cannot be sent between threads safely
+```
+
+As the error message mentions, `Rc` cannot be sent between threads safely. This
+is because the internal reference count is not maintained in a thread safe
+matter and can have a data race.
+
+To solve this, we'll use `Arc<T>`, Rust's standard atomic reference count type.
 
 The Atomic part means `Arc<T>` can safely be accessed from multiple threads.
 To do this the compiler guarantees that mutations of the internal count use
 indivisible operations which can't have data races.
 
+In essence, `Arc<T>` is a type that lets us share ownership of data _across
+threads_.
+
 
 ```ignore
 use std::thread;
@@ -182,7 +274,7 @@ fn main() {
 }
 ```
 
-We now call `clone()` on our `Arc<T>`, which increases the internal count.
+Similarly to last time, we use `clone()` to create a new owned handle.
 This handle is then moved into the new thread.
 
 And... still gives us an error.
@@ -193,14 +285,21 @@ And... still gives us an error.
                              ^~~~
 ```
 
-`Arc<T>` assumes one more property about its contents to ensure that it is safe
-to share across threads: it assumes its contents are `Sync`. This is true for
-our value if it's immutable, but we want to be able to mutate it, so we need
-something else to persuade the borrow checker we know what we're doing.
+`Arc<T>` by default has immutable contents. It allows the _sharing_ of data
+between threads, but shared mutable data is unsafe and when threads are
+involved can cause data races!
+
 
-It looks like we need some type that allows us to safely mutate a shared value,
-for example a type that can ensure only one thread at a time is able to
-mutate the value inside it at any one time.
+Usually when we wish to make something in an immutable position mutable, we use
+`Cell<T>` or `RefCell<T>` which allow safe mutation via runtime checks or
+otherwise (see also: [Choosing Your Guarantees](choosing-your-guarantees.html)).
+However, similar to `Rc`, these are not thread safe. If we try using these, we
+will get an error about these types not being `Sync`, and the code will fail to
+compile.
+
+It looks like we need some type that allows us to safely mutate a shared value
+across threads, for example a type that can ensure only one thread at a time is
+able to mutate the value inside it at any one time.
 
 For that, we can use the `Mutex<T>` type!
 
@@ -229,7 +328,17 @@ fn main() {
 Note that the value of `i` is bound (copied) to the closure and not shared
 among the threads.
 
-Also note that [`lock`](../std/sync/struct.Mutex.html#method.lock) method of
+We're "locking" the mutex here. A mutex (short for "mutual exclusion"), as
+mentioned, only allows one thread at a time to access a value. When we wish to
+access the value, we use `lock()` on it. This will "lock" the mutex, and no
+other thread will be able to lock it (and hence, do anything with the value)
+until we're done with it. If a thread attempts to lock a mutex which is already
+locked, it will wait until the other thread releases the lock.
+
+The lock "release" here is implicit; when the result of the lock (in this case,
+`data`) goes out of scope, the lock is automatically released.
+
+Note that [`lock`](../std/sync/struct.Mutex.html#method.lock) method of
 [`Mutex`](../std/sync/struct.Mutex.html) has this signature:
 
 ```ignore
index b7042854bd27fc0248c47cdf769cf1c1a4fe7f67..08ff3894c9db57fcb4d7e062a76f30532952c20d 100644 (file)
@@ -72,7 +72,7 @@ a [`Drop`][drop] implementation.
 # Initializing
 
 Both `const` and `static` have requirements for giving them a value. They must
-be given a value that’s a constant expression. In other words, you cannot use 
+be given a value that’s a constant expression. In other words, you cannot use
 the result of a function call or anything similarly complex or at runtime.
 
 # Which construct should I use?
index 0c9ed0bf1228125746cb7110b570755ff123c16d..b3ccefe0a6b43c912dec430b693468bcebf1523b 100644 (file)
@@ -118,7 +118,7 @@ build  deps  examples  libphrases-a7448e02a0468eaa.rlib  native
 `libphrases-hash.rlib` is the compiled crate. Before we see how to use this
 crate from another crate, let’s break it up into multiple files.
 
-# Multiple file crates
+# Multiple File Crates
 
 If each crate were just one file, these files would get very large. It’s often
 easier to split up crates into multiple files, and Rust supports this in two
@@ -190,13 +190,19 @@ mod farewells;
 ```
 
 Again, these declarations tell Rust to look for either
-`src/english/greetings.rs` and `src/japanese/greetings.rs` or
-`src/english/farewells/mod.rs` and `src/japanese/farewells/mod.rs`. Because
-these sub-modules don’t have their own sub-modules, we’ve chosen to make them
-`src/english/greetings.rs` and `src/japanese/farewells.rs`. Whew!
-
-The contents of `src/english/greetings.rs` and `src/japanese/farewells.rs` are
-both empty at the moment. Let’s add some functions.
+`src/english/greetings.rs`, `src/english/farewells.rs`,
+`src/japanese/greetings.rs` and `src/japanese/farewells.rs` or
+`src/english/greetings/mod.rs`, `src/english/farewells/mod.rs`,
+`src/japanese/greetings/mod.rs` and
+`src/japanese/farewells/mod.rs`. Because these sub-modules don’t have
+their own sub-modules, we’ve chosen to make them
+`src/english/greetings.rs`, `src/english/farewells.rs`,
+`src/japanese/greetings.rs` and `src/japanese/farewells.rs`. Whew!
+
+The contents of `src/english/greetings.rs`,
+`src/english/farewells.rs`, `src/japanese/greetings.rs` and
+`src/japanese/farewells.rs` are all empty at the moment. Let’s add
+some functions.
 
 Put this in `src/english/greetings.rs`:
 
index 8bc25ef90d382391c45af0682a29d69d3abcf279..5513523e56b95373ca8d7332c8eeab7750f61937 100644 (file)
@@ -55,7 +55,7 @@ BOOM times 100!!!
 BOOM times 1!!!
 ```
 
-The TNT goes off before the firecracker does, because it was declared
+The `tnt` goes off before the `firecracker` does, because it was declared
 afterwards. Last in, first out.
 
 So what is `Drop` good for? Generally, `Drop` is used to clean up any resources
index 11086af10bd3daca6cecf3eafdc6071d7da8a72d..12cb71973ab25832f78306f24683e2c3e7753c12 100644 (file)
@@ -2019,6 +2019,16 @@ impl Error for CliError {
             CliError::NotFound => "not found",
         }
     }
+
+    fn cause(&self) -> Option<&error::Error> {
+        match *self {            
+            CliError::Io(ref err) => Some(err),
+            CliError::Parse(ref err) => Some(err),
+            // Our custom error doesn't have an underlying cause, but we could
+            // modify it so that it does.
+            CliError::NotFound() => None,
+        }
+    }
 }
 ```
 
index 31c9da3fadae2cd0f983938d99b81d6294094535..8a2444323f1ec61bf47c55297fe130a13b7b7d9d 100644 (file)
@@ -246,6 +246,19 @@ stack backtrace:
   13:                0x0 - <unknown>
 ```
 
+If you need to override an already set `RUST_BACKTRACE`, 
+in cases when you cannot just unset the variable, 
+then set it to `0` to avoid getting a backtrace. 
+Any other value(even no value at all) turns on backtrace.
+
+```text
+$ export RUST_BACKTRACE=1
+...
+$ RUST_BACKTRACE=0 ./diverges 
+thread '<main>' panicked at 'This function never returns!', hello.rs:2
+note: Run with `RUST_BACKTRACE=1` for a backtrace.
+```
+
 `RUST_BACKTRACE` also works with Cargo’s `run` command:
 
 ```text
index 31ee385a928d6dc0dc9dac309c11cc5284f7f5fb..16141d936ebccb71584ff8c4ba52752016bffef7 100644 (file)
@@ -93,8 +93,8 @@ unofficial locations.
 | `armv7-apple-ios`             |  ✓  |     |     | ARM iOS                    |
 | `armv7s-apple-ios`            |  ✓  |     |     | ARM iOS                    |
 | `aarch64-apple-ios`           |  ✓  |     |     | ARM64 iOS                  |
-| `i686-unknown-freebsd`        |  ✓  |  ✓  |     | 32-bit FreeBSD             |
-| `x86_64-unknown-freebsd`      |  ✓  |  ✓  |     | 64-bit FreeBSD             |
+| `i686-unknown-freebsd`        |  ✓  |  ✓  |    | 32-bit FreeBSD             |
+| `x86_64-unknown-freebsd`      |  ✓  |  ✓  |    | 64-bit FreeBSD             |
 | `x86_64-unknown-openbsd`      |  ✓  |  ✓  |     | 64-bit OpenBSD             |
 | `x86_64-unknown-netbsd`       |  ✓  |  ✓  |     | 64-bit NetBSD              |
 | `x86_64-unknown-bitrig`       |  ✓  |  ✓  |     | 64-bit Bitrig              |
@@ -119,19 +119,7 @@ This will download a script, and start the installation. If it all goes well,
 you’ll see this appear:
 
 ```text
-Welcome to Rust.
-
-This script will download the Rust compiler and its package manager, Cargo, and
-install them to /usr/local. You may install elsewhere by running this script
-with the --prefix=<path> option.
-
-The installer will run under ‘sudo’ and may ask you for your password. If you do
-not want the script to run ‘sudo’ then pass it the --disable-sudo flag.
-
-You may uninstall later by running /usr/local/lib/rustlib/uninstall.sh,
-or by running this script again with the --uninstall flag.
-
-Continue? (y/N)
+Rust is ready to roll.
 ```
 
 From here, press `y` for ‘yes’, and then follow the rest of the prompts.
@@ -176,13 +164,15 @@ installed. Doing so will depend on your specific system, consult its
 documentation for more details.
 
 If not, there are a number of places where we can get help. The easiest is
-[the #rust IRC channel on irc.mozilla.org][irc], which we can access through
-[Mibbit][mibbit]. Click that link, and we'll be chatting with other Rustaceans
-(a silly nickname we call ourselves) who can help us out. Other great resources
-include [the user’s forum][users], and [Stack Overflow][stackoverflow].
+[the #rust-beginners IRC channel on irc.mozilla.org][irc-beginners] and for
+general discussion [the #rust IRC channel on irc.mozilla.org][irc], which we
+can access through [Mibbit][mibbit]. Then we'll be chatting with other
+Rustaceans (a silly nickname we call ourselves) who can help us out. Other great
+resources include [the user’s forum][users] and [Stack Overflow][stackoverflow].
 
+[irc-beginners]: irc://irc.mozilla.org/#rust-beginners
 [irc]: irc://irc.mozilla.org/#rust
-[mibbit]: http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust
+[mibbit]: http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-beginners,%23rust
 [users]: https://users.rust-lang.org/
 [stackoverflow]: http://stackoverflow.com/questions/tagged/rust
 
@@ -429,7 +419,7 @@ first. This leaves the top-level project directory (in this case,
 to your code. In this way, using Cargo helps you keep your projects nice and
 tidy. There's a place for everything, and everything is in its place.
 
-Now, copy *main.rs* to the *src* directory, and delete the compiled file you
+Now, move *main.rs* into the *src* directory, and delete the compiled file you
 created with `rustc`. As usual, replace `main` with `main.exe` if you're on
 Windows.
 
@@ -513,7 +503,7 @@ Cargo checks to see if any of your project’s files have been modified, and onl
 rebuilds your project if they’ve changed since the last time you built it.
 
 With simple projects, Cargo doesn't bring a whole lot over just using `rustc`,
-but it will become useful in future. This is especially true when you start
+but it will become useful in the future. This is especially true when you start
 using crates; these are synonymous with a ‘library’ or ‘package’ in other
 programming languages. For complex projects composed of multiple crates, it’s
 much easier to let Cargo coordinate the build. Using Cargo, you can run `cargo
index b9b6e9a4c9568b711390dc3be6a0cc6878b88bd5..590c7e848190a120632d5c1e8bfaabeae75ef35e 100644 (file)
@@ -295,7 +295,7 @@ Rust warns us that we haven’t used the `Result` value. This warning comes from
 a special annotation that `io::Result` has. Rust is trying to tell you that
 you haven’t handled a possible error. The right way to suppress the error is
 to actually write error handling. Luckily, if we want to crash if there’s
-a problem, we can use these two little methods. If we can recover from the
+a problem, we can use `expect()`. If we can recover from the
 error somehow, we’d do something else, but we’ll save that for a future
 project.
 
@@ -912,7 +912,7 @@ returned by `parse()`, this is an `enum`  like `Ordering`, but in this case,
 each variant has some data associated with it: `Ok` is a success, and `Err` is a
 failure. Each contains more information: the successfully parsed integer, or an
 error type. In this case, we `match` on `Ok(num)`, which sets the name `num` to
-the unwrapped `Ok` value (ythe integer), and then we  return it on the
+the unwrapped `Ok` value (the integer), and then we  return it on the
 right-hand side. In the `Err` case, we don’t care what kind of error it is, so
 we just use the catch all `_` instead of a name. This catches everything that
 isn't `Ok`, and `continue` lets us move to the next iteration of the loop; in
index a532dabf8d12d894a7b9f13e7d6081d7b2f2040e..52d0dd888efef1edf3a9c461b5e19111c4415fbf 100644 (file)
@@ -4,7 +4,7 @@ Rust’s take on `if` is not particularly complex, but it’s much more like the
 `if` you’ll find in a dynamically typed language than in a more traditional
 systems language. So let’s talk about it, to make sure you grasp the nuances.
 
-`if` is a specific form of a more general concept, the ‘branch’. The name comes
+`if` is a specific form of a more general concept, the ‘branch’, whose name comes
 from a branch in a tree: a decision point, where depending on a choice,
 multiple paths can be taken.
 
index 7659c4ff88dae983d3a4985fe223c1da41d2c92c..a5a2d7ce74e7d741271a8dcd447a120f2147bab7 100644 (file)
@@ -2,8 +2,7 @@
 
 For extremely low-level manipulations and performance reasons, one
 might wish to control the CPU directly. Rust supports using inline
-assembly to do this via the `asm!` macro. The syntax roughly matches
-that of GCC & Clang:
+assembly to do this via the `asm!` macro.
 
 ```ignore
 asm!(assembly template
index 0c4f804126631fea066fdc402ad7e4a47a566dd8..c174d2d6bacb6e4c1a916f02c0f86775d0413cde 100644 (file)
@@ -14,6 +14,11 @@ Now that you know more Rust, we can talk in detail about how this works.
 Ranges (the `0..10`) are 'iterators'. An iterator is something that we can
 call the `.next()` method on repeatedly, and it gives us a sequence of things.
 
+(By the way, a range with two dots like `0..10` is inclusive on the left (so it
+starts at 0) and exclusive on the right (so it ends at 9). A mathematician
+would write "[0, 10)". To get a range that goes all the way up to 10 you can
+write `0...10`.)
+
 Like this:
 
 ```rust
index 4193c93c894c4fec4a97b19aa3a5ce1ccf1b0509..695b1614fb70c5cd3877cd0ed542adb6e1425e12 100644 (file)
@@ -56,8 +56,8 @@ To fix this, we have to make sure that step four never happens after step
 three. The ownership system in Rust does this through a concept called
 lifetimes, which describe the scope that a reference is valid for.
 
-When we have a function that takes a reference by argument, we can be implicit
-or explicit about the lifetime of the reference:
+When we have a function that takes an argument by reference, we can be
+implicit or explicit about the lifetime of the reference:
 
 ```rust
 // implicit
@@ -282,14 +282,12 @@ to it.
 
 ## Lifetime Elision
 
-Rust supports powerful local type inference in function bodies, but it’s
-forbidden in item signatures to allow reasoning about the types based on
-the item signature alone. However, for ergonomic reasons a very restricted
-secondary inference algorithm called “lifetime elision” applies in function
-signatures. It infers only based on the signature components themselves and not
-based on the body of the function, only infers lifetime parameters, and does
-this with only three easily memorizable and unambiguous rules. This makes
-lifetime elision a shorthand for writing an item signature, while not hiding
+Rust supports powerful local type inference in the bodies of functions but not in their item signatures. 
+It's forbidden to allow reasoning about types based on the item signature alone. 
+However, for ergonomic reasons, a very restricted secondary inference algorithm called 
+“lifetime elision” does apply when judging lifetimes. Lifetime elision is concerned solely to infer 
+lifetime parameters using three easily memorizable and unambiguous rules. This means lifetime elision 
+acts as a shorthand for writing an item signature, while not hiding
 away the actual types involved as full local inference would if applied to it.
 
 When talking about lifetime elision, we use the term *input lifetime* and
index 188abb316ab0b316677301a2aa9a92f872e93d30..c16e2ea4535e9a0f4e34e74be56c3e2d5d6a0b64 100644 (file)
@@ -337,8 +337,8 @@ fn main() {
 }
 ```
 
-Instead you need to pass the variable name into the invocation, so it’s tagged
-with the right syntax context.
+Instead you need to pass the variable name into the invocation, so that it’s
+tagged with the right syntax context.
 
 ```rust
 macro_rules! foo {
@@ -470,7 +470,7 @@ which syntactic form it matches.
 * `ty`: a type. Examples: `i32`; `Vec<(char, String)>`; `&T`.
 * `pat`: a pattern. Examples: `Some(t)`; `(17, 'a')`; `_`.
 * `stmt`: a single statement. Example: `let x = 3`.
-* `block`: a brace-delimited sequence of statements. Example:
+* `block`: a brace-delimited sequence of statements and optionally an expression. Example:
   `{ log(error, "hi"); return 12; }`.
 * `item`: an [item][item]. Examples: `fn foo() { }`; `struct Bar;`.
 * `meta`: a "meta item", as found in attributes. Example: `cfg(target_os = "windows")`.
index acffaf4544b10182466cc619365a42d62015969e..d01a20083efb580f86c670beb7b9361ac26afe7d 100644 (file)
@@ -28,18 +28,18 @@ patterns][patterns] that covers all the patterns that are possible here.
 
 [patterns]: patterns.html
 
-One of the many advantages of `match` is it enforces ‘exhaustiveness checking’. 
-For example if we remove the last arm with the underscore `_`, the compiler will 
+One of the many advantages of `match` is it enforces ‘exhaustiveness checking’.
+For example if we remove the last arm with the underscore `_`, the compiler will
 give us an error:
 
 ```text
 error: non-exhaustive patterns: `_` not covered
 ```
 
-Rust is telling us that we forgot a value. The compiler infers from `x` that it
-can have any positive 32bit value; for example 1 to 2,147,483,647. The `_` acts 
+Rust is telling us that we forgot some value. The compiler infers from `x` that it
+can have any 32bit integer value; for example -2,147,483,648 to 2,147,483,647. The `_` acts 
 as a 'catch-all', and will catch all possible values that *aren't* specified in 
-an arm of `match`. As you can see with the previous example, we provide `match` 
+an arm of `match`. As you can see in the previous example, we provide `match` 
 arms for integers 1-5, if `x` is 6 or any other value, then it is caught by `_`.
 
 `match` is also an expression, which means we can use it on the right-hand
@@ -58,7 +58,7 @@ let number = match x {
 };
 ```
 
-Sometimes it’s a nice way of converting something from one type to another; in 
+Sometimes it’s a nice way of converting something from one type to another; in
 this example the integers are converted to `String`.
 
 # Matching on enums
@@ -90,7 +90,7 @@ fn process_message(msg: Message) {
 
 Again, the Rust compiler checks exhaustiveness, so it demands that you
 have a match arm for every variant of the enum. If you leave one off, it
-will give you a compile-time error unless you use `_` or provide all possible 
+will give you a compile-time error unless you use `_` or provide all possible
 arms.
 
 Unlike the previous uses of `match`, you can’t use the normal `if`
index 610940cde95e6f3509b3740a8221e3ec71a70979..43bd0507ebbb6f02e7354942af18330a593e6e2d 100644 (file)
@@ -38,7 +38,7 @@ fn start(_argc: isize, _argv: *const *const u8) -> isize {
 // for a bare-bones hello world. These are normally
 // provided by libstd.
 #[lang = "eh_personality"] extern fn eh_personality() {}
-#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
+#[lang = "panic_fmt"] extern fn panic_fmt() -> ! { loop {} }
 # #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
 # #[no_mangle] pub extern fn rust_eh_register_frames () {}
 # #[no_mangle] pub extern fn rust_eh_unregister_frames () {}
@@ -65,7 +65,7 @@ pub extern fn main(argc: i32, argv: *const *const u8) -> i32 {
 }
 
 #[lang = "eh_personality"] extern fn eh_personality() {}
-#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
+#[lang = "panic_fmt"] extern fn panic_fmt() -> ! { loop {} }
 # #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
 # #[no_mangle] pub extern fn rust_eh_register_frames () {}
 # #[no_mangle] pub extern fn rust_eh_unregister_frames () {}
index 70d71c14ddf1686da0714ea4612285ac716181db..f8938be30ed49f7d94ff56126de9eaf8ebcd780c 100644 (file)
@@ -51,7 +51,7 @@ fn foo() {
 }
 ```
 
-When `v` comes into scope, a new [vector] is created on [the stack][stack],
+When `v` comes into scope, a new [vector][vectors] is created on [the stack][stack],
 and it allocates space on [the heap][heap] for its elements. When `v` goes out
 of scope at the end of `foo()`, Rust will clean up everything related to the
 vector, even the heap-allocated memory. This happens deterministically, at the
@@ -124,7 +124,7 @@ special annotation here, it’s the default thing that Rust does.
 ## The details
 
 The reason that we cannot use a binding after we’ve moved it is subtle, but
-important. 
+important.
 
 When we write code like this:
 
@@ -148,7 +148,7 @@ The first line allocates memory for the vector object `v` on the stack like
 it does for `x` above. But in addition to that it also allocates some memory
 on the [heap][sh] for the actual data (`[1, 2, 3]`). Rust copies the address
 of this heap allocation to an internal pointer, which is part of the vector
-object placed on the stack (let's call it the data pointer). 
+object placed on the stack (let's call it the data pointer).
 
 It is worth pointing out (even at the risk of stating the obvious) that the
 vector object and its data live in separate memory regions instead of being a
@@ -163,7 +163,7 @@ does not create a copy of the heap allocation containing the actual data.
 Which means that there would be two pointers to the contents of the vector
 both pointing to the same memory allocation on the heap. It would violate
 Rust’s safety guarantees by introducing a data race if one could access both
-`v` and `v2` at the same time. 
+`v` and `v2` at the same time.
 
 For example if we truncated the vector to just two elements through `v2`:
 
index 7325d448962cf229a9aac6443895466d0ce89d24..7ecfdcfcc1e025d78fea05f938405de33fb1d201 100644 (file)
@@ -1,7 +1,7 @@
 % Patterns
 
 Patterns are quite common in Rust. We use them in [variable
-bindings][bindings], [match statements][match], and other places, too. Let’s go
+bindings][bindings], [match expressions][match], and other places, too. Let’s go
 on a whirlwind tour of all of the things patterns can do!
 
 [bindings]: variable-bindings.html
index 840609d1dd62954f00211ad1f9df1a7ae637c379..69040931de676584f6037480d2f6ba35dcb32e89 100644 (file)
@@ -7,7 +7,7 @@ of these ones, as well, but these are the most primitive.
 
 # Booleans
 
-Rust has a built in boolean type, named `bool`. It has two values, `true` and `false`:
+Rust has a built-in boolean type, named `bool`. It has two values, `true` and `false`:
 
 ```rust
 let x = true;
@@ -89,13 +89,13 @@ Unsigned types use a `u` for their category, and signed types use `i`. The `i`
 is for ‘integer’. So `u8` is an eight-bit unsigned number, and `i8` is an
 eight-bit signed number.
 
-## Fixed size types
+## Fixed-size types
 
-Fixed size types have a specific number of bits in their representation. Valid
+Fixed-size types have a specific number of bits in their representation. Valid
 bit sizes are `8`, `16`, `32`, and `64`. So, `u32` is an unsigned, 32-bit integer,
 and `i64` is a signed, 64-bit integer.
 
-## Variable sized types
+## Variable-size types
 
 Rust also provides types whose size depends on the size of a pointer of the
 underlying machine. These types have ‘size’ as the category, and come in signed
@@ -164,7 +164,7 @@ copying. For example, you might want to reference only one line of a file read
 into memory. By nature, a slice is not created directly, but from an existing
 variable binding. Slices have a defined length, can be mutable or immutable.
 
-Internally, slices are represented as a pointer to the beginning of the data 
+Internally, slices are represented as a pointer to the beginning of the data
 and a length.
 
 ## Slicing syntax
index e7faf174600a9381b040c6c03d9deba9476f7774..a08d53f958ba3eddcaa139646e7df4ef91f4f06a 100644 (file)
@@ -23,7 +23,7 @@ Before we get to the details, two important notes about the ownership system.
 Rust has a focus on safety and speed. It accomplishes these goals through many
 ‘zero-cost abstractions’, which means that in Rust, abstractions cost as little
 as possible in order to make them work. The ownership system is a prime example
-of a zero cost abstraction. All of the analysis we’ll talk about in this guide
+of a zero-cost abstraction. All of the analysis we’ll talk about in this guide
 is _done at compile time_. You do not pay any run-time cost for any of these
 features.
 
@@ -163,8 +163,8 @@ both at the same time:
 * exactly one mutable reference (`&mut T`).
 
 
-You may notice that this is very similar, though not exactly the same as,
-to the definition of a data race:
+You may notice that this is very similar to, though not exactly the same as,
+the definition of a data race:
 
 > There is a ‘data race’ when two or more pointers access the same memory
 > location at the same time, where at least one of them is writing, and the
@@ -211,9 +211,10 @@ fn main() {
 ```
 
 In other words, the mutable borrow is held through the rest of our example. What
-we want is for the mutable borrow to end _before_ we try to call `println!` and
-make an immutable borrow. In Rust, borrowing is tied to the scope that the
-borrow is valid for. And our scopes look like this:
+we want is for the mutable borrow by `y` to end so that the resource can be
+returned to the owner, `x`. `x` can then provide a immutable borrow to `println!`.
+In Rust, borrowing is tied to the scope that the borrow is valid for. And our
+scopes look like this:
 
 ```rust,ignore
 let mut x = 5;
@@ -378,4 +379,3 @@ statement 1 at 3:14
 
 In the above example, `y` is declared before `x`, meaning that `y` lives longer
 than `x`, which is not allowed.
-
index 68c7235975e8c39a1b68ee4ca43fb1d49def0de0..f5ebceedd363a9c77fb071788638253fee0a2f84 100644 (file)
@@ -44,7 +44,12 @@ let s = "foo\
 assert_eq!("foobar", s);
 ```
 
-Rust has more than only `&str`s though. A `String`, is a heap-allocated string.
+Note that you normally cannot access a `str` directly, but only through a `&str`
+reference. This is because `str` is an unsized type which requires additional
+runtime information to be usable. For more information see the chapter on
+[unsized types][ut].
+
+Rust has more than only `&str`s though. A `String` is a heap-allocated string.
 This string is growable, and is also guaranteed to be UTF-8. `String`s are
 commonly created by converting from a string slice using the `to_string`
 method.
@@ -89,7 +94,7 @@ Viewing a `String` as a `&str` is cheap, but converting the `&str` to a
 
 ## Indexing
 
-Because strings are valid UTF-8, strings do not support indexing:
+Because strings are valid UTF-8, they do not support indexing:
 
 ```rust,ignore
 let s = "hello";
@@ -185,5 +190,6 @@ let hello_world = hello + &world;
 This is because `&String` can automatically coerce to a `&str`. This is a
 feature called ‘[`Deref` coercions][dc]’.
 
+[ut]: unsized-types.html
 [dc]: deref-coercions.html
 [connect]: ../std/net/struct.TcpStream.html#method.connect
index 6782bdb4985ecc61f277551a1159944dce8d7eca..3e889f51f542d2d7541933d62771d05942c3e6c6 100644 (file)
 * `!` (`!expr`): bitwise or logical complement.  Overloadable (`Not`).
 * `!=` (`var != expr`): nonequality comparison.  Overloadable (`PartialEq`).
 * `%` (`expr % expr`): arithmetic remainder.  Overloadable (`Rem`).
-* `%=` (`var %= expr`): arithmetic remainder & assignment.
+* `%=` (`var %= expr`): arithmetic remainder & assignment. Overloadable (`RemAssign`).
 * `&` (`expr & expr`): bitwise and.  Overloadable (`BitAnd`).
 * `&` (`&expr`): borrow.  See [References and Borrowing].
 * `&` (`&type`, `&mut type`, `&'a type`, `&'a mut type`): borrowed pointer type.  See [References and Borrowing].
-* `&=` (`var &= expr`): bitwise and & assignment.
+* `&=` (`var &= expr`): bitwise and & assignment. Overloadable (`BitAndAssign`).
 * `&&` (`expr && expr`): logical and.
 * `*` (`expr * expr`): arithmetic multiplication.  Overloadable (`Mul`).
 * `*` (`*expr`): dereference.
 * `*` (`*const type`, `*mut type`): raw pointer.  See [Raw Pointers].
-* `*=` (`var *= expr`): arithmetic multiplication & assignment.
+* `*=` (`var *= expr`): arithmetic multiplication & assignment. Overloadable (`MulAssign`).
 * `+` (`expr + expr`): arithmetic addition.  Overloadable (`Add`).
 * `+` (`trait + trait`, `'a + trait`): compound type constraint.  See [Traits (Multiple Trait Bounds)].
-* `+=` (`var += expr`): arithmetic addition & assignment.
+* `+=` (`var += expr`): arithmetic addition & assignment. Overloadable (`AddAssign`).
 * `,`: argument and element separator.  See [Attributes], [Functions], [Structs], [Generics], [Match], [Closures], [Crates and Modules (Importing Modules with `use`)].
 * `-` (`expr - expr`): arithmetic subtraction.  Overloadable (`Sub`).
 * `-` (`- expr`): arithmetic negation.  Overloadable (`Neg`).
-* `-=` (`var -= expr`): arithmetic subtraction & assignment.
+* `-=` (`var -= expr`): arithmetic subtraction & assignment. Overloadable (`SubAssign`).
 * `->` (`fn(…) -> type`, `|…| -> type`): function and closure return type.  See [Functions], [Closures].
 * `-> !` (`fn(…) -> !`, `|…| -> !`): diverging function or closure. See [Diverging Functions].
 * `.` (`expr.ident`): member access.  See [Structs], [Method Syntax].
 * `..` (`..`, `expr..`, `..expr`, `expr..expr`): right-exclusive range literal.
 * `..` (`..expr`): struct literal update syntax.  See [Structs (Update syntax)].
 * `..` (`variant(x, ..)`, `struct_type { x, .. }`): "and the rest" pattern binding.  See [Patterns (Ignoring bindings)].
-* `...` (`expr ... expr`): inclusive range pattern.  See [Patterns (Ranges)].
+* `...` (`...expr`, `expr...expr`) *in an expression*: inclusive range expression. See [Iterators].
+* `...` (`expr...expr`) *in a pattern*: inclusive range pattern.  See [Patterns (Ranges)].
 * `/` (`expr / expr`): arithmetic division.  Overloadable (`Div`).
-* `/=` (`var /= expr`): arithmetic division & assignment.
+* `/=` (`var /= expr`): arithmetic division & assignment. Overloadable (`DivAssign`).
 * `:` (`pat: type`, `ident: type`): constraints.  See [Variable Bindings], [Functions], [Structs], [Traits].
 * `:` (`ident: expr`): struct field initializer.  See [Structs].
 * `:` (`'a: loop {…}`): loop label.  See [Loops (Loops Labels)].
 * `;`: statement and item terminator.
 * `;` (`[…; len]`): part of fixed-size array syntax.  See [Primitive Types (Arrays)].
 * `<<` (`expr << expr`): left-shift.  Overloadable (`Shl`).
-* `<<=` (`var <<= expr`): left-shift & assignment.
+* `<<=` (`var <<= expr`): left-shift & assignment. Overloadable (`ShlAssign`).
 * `<` (`expr < expr`): less-than comparison.  Overloadable (`PartialOrd`).
 * `<=` (`var <= expr`): less-than or equal-to comparison.  Overloadable (`PartialOrd`).
 * `=` (`var = expr`, `ident = type`): assignment/equivalence.  See [Variable Bindings], [`type` Aliases], generic parameter defaults.
 * `>` (`expr > expr`): greater-than comparison.  Overloadable (`PartialOrd`).
 * `>=` (`var >= expr`): greater-than or equal-to comparison.  Overloadable (`PartialOrd`).
 * `>>` (`expr >> expr`): right-shift.  Overloadable (`Shr`).
-* `>>=` (`var >>= expr`): right-shift & assignment.
+* `>>=` (`var >>= expr`): right-shift & assignment. Overloadable (`ShrAssign`).
 * `@` (`ident @ pat`): pattern binding.  See [Patterns (Bindings)].
 * `^` (`expr ^ expr`): bitwise exclusive or.  Overloadable (`BitXor`).
-* `^=` (`var ^= expr`): bitwise exclusive or & assignment.
+* `^=` (`var ^= expr`): bitwise exclusive or & assignment. Overloadable (`BitXorAssign`).
 * `|` (`expr | expr`): bitwise or.  Overloadable (`BitOr`).
 * `|` (`pat | pat`): pattern alternatives.  See [Patterns (Multiple patterns)].
 * `|` (`|…| expr`): closures.  See [Closures].
-* `|=` (`var |= expr`): bitwise or & assignment.
+* `|=` (`var |= expr`): bitwise or & assignment. Overloadable (`BitOrAssign`).
 * `||` (`expr || expr`): logical or.
 * `_`: "ignored" pattern binding.  See [Patterns (Ignoring bindings)].
 
 [Functions (Early Returns)]: functions.html#early-returns
 [Functions]: functions.html
 [Generics]: generics.html
+[Iterators]: iterators.html
 [Lifetimes]: lifetimes.html
 [Loops (`for`)]: loops.html#for
 [Loops (`loop`)]: loops.html#loop
index d57664bf07e4d8cda1fb217c40973224033e48c2..59d07e4f81c6e923aa08fa3967873395e47c1178 100644 (file)
@@ -515,7 +515,3 @@ you add more examples.
 
 We haven’t covered all of the details with writing documentation tests. For more,
 please see the [Documentation chapter](documentation.html).
-
-One final note: documentation tests *cannot* be run on binary crates.
-To see more on file arrangement see the [Crates and
-Modules](crates-and-modules.html) section.
index 2a164077683b29694e18628d32d32777a7c38eeb..b3b41979245681e2408a3626c0354b39e4f4fb42 100644 (file)
@@ -154,7 +154,7 @@ print_area(5);
 We get a compile-time error:
 
 ```text
-error: the trait `HasArea` is not implemented for the type `_` [E0277]
+error: the trait bound `_ : HasArea` is not satisfied [E0277]
 ```
 
 ## Trait bounds on generic structs
@@ -496,7 +496,7 @@ impl FooBar for Baz {
 If we forget to implement `Foo`, Rust will tell us:
 
 ```text
-error: the trait `main::Foo` is not implemented for the type `main::Baz` [E0277]
+error: the trait bound `main::Baz : main::Foo` is not satisfied [E0277]
 ```
 
 # Deriving
index ecd196a9f0d1fe5b3b79b4f3b54dd7d8df892ed9..af4e351569f7f55920fd2db72c6841682098fe1c 100644 (file)
@@ -4,7 +4,7 @@ Rust’s main draw is its powerful static guarantees about behavior. But safety
 checks are conservative by nature: there are some programs that are actually
 safe, but the compiler is not able to verify this is true. To write these kinds
 of programs, we need to tell the compiler to relax its restrictions a bit. For
-this, Rust has a keyword, `unsafe`. Code using `unsafe` has less restrictions
+this, Rust has a keyword, `unsafe`. Code using `unsafe` has fewer restrictions
 than normal code does.
 
 Let’s go over the syntax, and then we’ll talk semantics. `unsafe` is used in
index 29b59937a63fa6b4bf3d93dc230abe160102c662..1c8c03cf6793773acc04fa5d8c8718d5b8d54c16 100644 (file)
@@ -18,14 +18,14 @@ function, rather than leaving it off. Otherwise, you’ll get an error.
 
 In many languages, a variable binding would be called a *variable*, but Rust’s
 variable bindings have a few tricks up their sleeves. For example the
-left-hand side of a `let` expression is a ‘[pattern][pattern]’, not a
+left-hand side of a `let` statement is a ‘[pattern][pattern]’, not a
 variable name. This means we can do things like:
 
 ```rust
 let (x, y) = (1, 2);
 ```
 
-After this expression is evaluated, `x` will be one, and `y` will be two.
+After this statement is evaluated, `x` will be one, and `y` will be two.
 Patterns are really powerful, and have [their own section][pattern] in the
 book. We don’t need those features for now, so we’ll keep this in the back
 of our minds as we go forward.
index f5a543d75b1b40baecb74f5605172b36157a4c0c..75e961e4c4a80504d6c2fefc27db9ebd6025d39e 100644 (file)
@@ -56,8 +56,8 @@ v[j];
 Indexing with a non-`usize` type gives an error that looks like this:
 
 ```text
-error: the trait `core::ops::Index<i32>` is not implemented for the type
-`collections::vec::Vec<_>` [E0277]
+error: the trait bound `collections::vec::Vec<_> : core::ops::Index<i32>`
+is not satisfied [E0277]
 v[j];
 ^~~~
 note: the type `collections::vec::Vec<_>` cannot be indexed by `i32`
@@ -115,6 +115,36 @@ for i in v {
 }
 ```
 
+Note: You cannot use the vector again once you have iterated by taking ownership of the vector.
+You can iterate the vector multiple times by taking a reference to the vector whilst iterating.
+For example, the following code does not compile.
+
+```rust,ignore
+let v = vec![1, 2, 3, 4, 5];
+
+for i in v {
+    println!("Take ownership of the vector and its element {}", i);
+}
+
+for i in v {
+    println!("Take ownership of the vector and its element {}", i);
+}
+```
+
+Whereas the following works perfectly,
+
+```rust
+let v = vec![1, 2, 3, 4, 5];
+
+for i in &v {
+    println!("This is a reference to {}", i);
+}
+
+for i in &v {
+    println!("This is a reference to {}", i);
+}
+```
+
 Vectors have many more useful methods, which you can read about in [their
 API documentation][vec].
 
index d6495d02e1189fd1de39f68c3b5929b068b47338..742433b99ac5f56e42704782b5e4e2a6697287d5 100644 (file)
@@ -1,4 +1,4 @@
 % The (old) Rust Compiler Plugins Guide
 
 This content has moved into
-[the Rust Programming Language book](book/plugins.html).
+[the Rust Programming Language book](book/compiler-plugins.html).
index 1d2897ce3bd1f37e260389cde03328d0d6bb5cb6..6a9ebd6edf8fbc7f53968c25047ce266d719bb0a 100644 (file)
@@ -64,7 +64,7 @@ fn main() {
 ```
 
 ```text
-<anon>:10:5: 10:8 error: the trait `Trait` is not implemented for the type `&mut i32` [E0277]
+<anon>:10:5: 10:8 error: the trait bound `&mut i32 : Trait` is not satisfied [E0277]
 <anon>:10     foo(t);
               ^~~
 ```
index 63f83788c4bac55a8298fe17874d3fd8f99e94ce..691301946de4b0ff452ca7009249e5689cf40aa8 100644 (file)
@@ -2,7 +2,7 @@
 
 To bring everything together, we're going to write `std::Vec` from scratch.
 Because all the best tools for writing unsafe code are unstable, this
-project will only work on nightly (as of Rust 1.2.0). With the exception of the
+project will only work on nightly (as of Rust 1.9.0). With the exception of the
 allocator API, much of the unstable code we'll use is expected to be stabilized
 in a similar form as it is today.
 
index 6fb8de780942c3aeff85fcdffb4d2b674c173183..fcf9aefaba8474920178eac36f51f7adc8ad48fe 100644 (file)
@@ -379,6 +379,10 @@ Examples of integer literals of various forms:
 0usize;                            // type usize
 ```
 
+Note that the Rust syntax considers `-1i8` as an application of the [unary minus
+operator](#unary-operator-expressions) to an integer literal `1i8`, rather than
+a single integer literal.
+
 ##### Floating-point literals
 
 A _floating-point literal_ has one of two forms:
@@ -1114,6 +1118,16 @@ type Point = (u8, u8);
 let p: Point = (41, 68);
 ```
 
+Currently a type alias to an enum type cannot be used to qualify the
+constructors:
+
+```
+enum E { A }
+type F = E;
+let _: F = E::A;  // OK
+// let _: F = F::A;  // Doesn't work
+```
+
 ### Structs
 
 A _struct_ is a nominal [struct type](#struct-types) defined with the
@@ -1191,7 +1205,8 @@ a = Animal::Cat { name: "Spotty".to_string(), weight: 2.7 };
 In this example, `Cat` is a _struct-like enum variant_,
 whereas `Dog` is simply called an enum variant.
 
-Enums have a discriminant. You can assign them explicitly:
+Each enum value has a _discriminant_ which is an integer associated to it. You
+can specify it explicitly:
 
 ```
 enum Foo {
@@ -1199,10 +1214,15 @@ enum Foo {
 }
 ```
 
-If a discriminant isn't assigned, they start at zero, and add one for each
+The right hand side of the specification is interpreted as an `isize` value,
+but the compiler is allowed to use a smaller type in the actual memory layout.
+The [`repr` attribute](#ffi-attributes) can be added in order to change
+the type of the right hand side and specify the memory layout.
+
+If a discriminant isn't specified, they start at zero, and add one for each
 variant, in order.
 
-You can cast an enum to get this value:
+You can cast an enum to get its discriminant:
 
 ```
 # enum Foo { Bar = 123 }
@@ -1885,6 +1905,8 @@ type int8_t = i8;
 - `should_panic` - indicates that this test function should panic, inverting the success condition.
 - `cold` - The function is unlikely to be executed, so optimize it (and calls
   to it) differently.
+- `naked` - The function utilizes a custom ABI or custom inline ASM that requires
+  epilogue and prologue to be skipped.
 
 ### Static-only attributes
 
@@ -2277,6 +2299,10 @@ The currently implemented features of the reference compiler are:
                     `#[derive_Foo] #[derive_Bar]`, which can be user-defined syntax
                     extensions.
 
+* `inclusive_range_syntax` - Allows use of the `a...b` and `...b` syntax for inclusive ranges.
+
+* `inclusive_range` - Allows use of the types that represent desugared inclusive ranges.
+
 * `intrinsics` - Allows use of the "rust-intrinsics" ABI. Compiler intrinsics
                  are inherently unstable and no promise about them is made.
 
@@ -2747,13 +2773,34 @@ let y = 0..10;
 assert_eq!(x, y);
 ```
 
+Similarly, the `...` operator will construct an object of one of the
+`std::ops::RangeInclusive` variants.
+
+```
+# #![feature(inclusive_range_syntax)]
+1...2;   // std::ops::RangeInclusive
+...4;    // std::ops::RangeToInclusive
+```
+
+The following expressions are equivalent.
+
+```
+# #![feature(inclusive_range_syntax, inclusive_range)]
+let x = std::ops::RangeInclusive::NonEmpty {start: 0, end: 10};
+let y = 0...10;
+
+assert_eq!(x, y);
+```
+
 ### Unary operator expressions
 
 Rust defines the following unary operators. They are all written as prefix operators,
 before the expression they apply to.
 
 * `-`
-  : Negation. May only be applied to numeric types.
+  : Negation. Signed integer types and floating-point types support negation. It
+    is an error to apply negation to unsigned types; for example, the compiler
+    rejects `-1u32`.
 * `*`
   : Dereference. When applied to a [pointer](#pointer-types) it denotes the
     pointed-to location. For pointers to mutable locations, the resulting
@@ -3283,6 +3330,10 @@ The primitive types are the following:
 * The boolean type `bool` with values `true` and `false`.
 * The machine types (integer and floating-point).
 * The machine-dependent integer types.
+* Arrays
+* Tuples
+* Slices
+* Function pointers
 
 #### Machine types
 
@@ -3860,6 +3911,9 @@ The _heap_ is a general term that describes boxes.  The lifetime of an
 allocation in the heap depends on the lifetime of the box values pointing to
 it. Since box values may themselves be passed in and out of frames, or stored
 in the heap, heap allocations may outlive the frame they are allocated within.
+An allocation in the heap is guaranteed to reside at a single location in the
+heap for the whole lifetime of the allocation - it will never be relocated as
+a result of moving a box value.
 
 ### Memory ownership
 
index 5ab1a1d9c10f45e9de5b6beb04784e72fbd4e9df..8d837d1a1a9165b653e8019c8333c2608d3b8584 100644 (file)
@@ -53,7 +53,7 @@ This document is broken into four parts:
   cross-cutting topic, starting with
   [Ownership and resources](ownership/README.md).
 
-* **[APIs for a changing Rust](changing/README.md)**
+* **APIs for a changing Rust**
   discusses the forward-compatibility hazards, especially those that interact
   with the pre-1.0 library stabilization process.
 
index 9ea1d218161912b50fed472dc4df7e000036e626..9b243bc72ef7125e6540151a70838c3e7fe78464 100644 (file)
@@ -76,7 +76,7 @@ needs to make about its arguments.
 On the other hand, generics can make it more difficult to read and understand a
 function's signature. Aim for "natural" parameter types that a neither overly
 concrete nor overly abstract. See the discussion on
-[traits](../../traits/README.md) for more guidance.
+[traits](../traits/README.md) for more guidance.
 
 
 #### Minimizing ownership assumptions:
index 26ffda50ac53dc1a06b30dae8e7b0fc4e2a82d52..a09640c3055c2ac68bd7cdeab15074100da83a97 100644 (file)
@@ -27,8 +27,7 @@ explicitly implement to be used by this generic function.
 * _Inference_. Since the type parameters to generic functions can usually be
   inferred, generic functions can help cut down on verbosity in code where
   explicit conversions or other method calls would usually be necessary. See the
-  [overloading/implicits use case](#use-case-limited-overloading-andor-implicit-conversions)
-  below.
+  overloading/implicits use case below.
 * _Precise types_. Because generics give a _name_ to the specific type
   implementing a trait, it is possible to be precise about places where that
   exact type is required or produced. For example, a function
@@ -51,7 +50,7 @@ explicitly implement to be used by this generic function.
   a `Vec<T>` contains elements of a single concrete type (and, indeed, the
   vector representation is specialized to lay these out in line). Sometimes
   heterogeneous collections are useful; see
-  [trait objects](#use-case-trait-objects) below.
+  trait objects below.
 * _Signature verbosity_. Heavy use of generics can bloat function signatures.
   **[Ed. note]** This problem may be mitigated by some language improvements; stay tuned.
 
index 9d78721ad3644965a11e0fb1b6dd7115a20ae841..2106f32fafade8fdcc54320064b4353f1708aafc 100644 (file)
@@ -101,7 +101,7 @@ The convention for a field `foo: T` is:
   here may take `&T` or some other type, depending on the context.)
 
 Note that this convention is about getters/setters on ordinary data types, *not*
-on [builder objects](../ownership/builders.html).
+on [builder objects](../../ownership/builders.html).
 
 ### Escape hatches [FIXME]
 
diff --git a/src/error_index_generator/main.rs b/src/error_index_generator/main.rs
deleted file mode 100644 (file)
index db9dd00..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(rustc_private, rustdoc)]
-
-extern crate syntax;
-extern crate rustdoc;
-extern crate serialize as rustc_serialize;
-
-use std::collections::BTreeMap;
-use std::fs::{read_dir, File};
-use std::io::{Read, Write};
-use std::env;
-use std::path::Path;
-use std::error::Error;
-
-use syntax::diagnostics::metadata::{get_metadata_dir, ErrorMetadataMap, ErrorMetadata};
-
-use rustdoc::html::markdown::Markdown;
-use rustc_serialize::json;
-
-enum OutputFormat {
-    HTML(HTMLFormatter),
-    Markdown(MarkdownFormatter),
-    Unknown(String),
-}
-
-impl OutputFormat {
-    fn from(format: &str) -> OutputFormat {
-        match &*format.to_lowercase() {
-            "html"     => OutputFormat::HTML(HTMLFormatter),
-            "markdown" => OutputFormat::Markdown(MarkdownFormatter),
-            s          => OutputFormat::Unknown(s.to_owned()),
-        }
-    }
-}
-
-trait Formatter {
-    fn header(&self, output: &mut Write) -> Result<(), Box<Error>>;
-    fn title(&self, output: &mut Write) -> Result<(), Box<Error>>;
-    fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
-                        err_code: &str) -> Result<(), Box<Error>>;
-    fn footer(&self, output: &mut Write) -> Result<(), Box<Error>>;
-}
-
-struct HTMLFormatter;
-struct MarkdownFormatter;
-
-impl Formatter for HTMLFormatter {
-    fn header(&self, output: &mut Write) -> Result<(), Box<Error>> {
-        try!(write!(output, r##"<!DOCTYPE html>
-<html>
-<head>
-<title>Rust Compiler Error Index</title>
-<meta charset="utf-8">
-<!-- Include rust.css after main.css so its rules take priority. -->
-<link rel="stylesheet" type="text/css" href="main.css"/>
-<link rel="stylesheet" type="text/css" href="rust.css"/>
-<style>
-.error-undescribed {{
-    display: none;
-}}
-</style>
-</head>
-<body>
-"##));
-        Ok(())
-    }
-
-    fn title(&self, output: &mut Write) -> Result<(), Box<Error>> {
-        try!(write!(output, "<h1>Rust Compiler Error Index</h1>\n"));
-        Ok(())
-    }
-
-    fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
-                        err_code: &str) -> Result<(), Box<Error>> {
-        // Enclose each error in a div so they can be shown/hidden en masse.
-        let desc_desc = match info.description {
-            Some(_) => "error-described",
-            None => "error-undescribed",
-        };
-        let use_desc = match info.use_site {
-            Some(_) => "error-used",
-            None => "error-unused",
-        };
-        try!(write!(output, "<div class=\"{} {}\">", desc_desc, use_desc));
-
-        // Error title (with self-link).
-        try!(write!(output,
-                    "<h2 id=\"{0}\" class=\"section-header\"><a href=\"#{0}\">{0}</a></h2>\n",
-                    err_code));
-
-        // Description rendered as markdown.
-        match info.description {
-            Some(ref desc) => try!(write!(output, "{}", Markdown(desc))),
-            None => try!(write!(output, "<p>No description.</p>\n")),
-        }
-
-        try!(write!(output, "</div>\n"));
-        Ok(())
-    }
-
-    fn footer(&self, output: &mut Write) -> Result<(), Box<Error>> {
-        try!(write!(output, "</body>\n</html>"));
-        Ok(())
-    }
-}
-
-impl Formatter for MarkdownFormatter {
-    #[allow(unused_variables)]
-    fn header(&self, output: &mut Write) -> Result<(), Box<Error>> {
-        Ok(())
-    }
-
-    fn title(&self, output: &mut Write) -> Result<(), Box<Error>> {
-        try!(write!(output, "# Rust Compiler Error Index\n"));
-        Ok(())
-    }
-
-    fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
-                        err_code: &str) -> Result<(), Box<Error>> {
-        Ok(match info.description {
-            Some(ref desc) => try!(write!(output, "## {}\n{}\n", err_code, desc)),
-            None => (),
-        })
-    }
-
-    #[allow(unused_variables)]
-    fn footer(&self, output: &mut Write) -> Result<(), Box<Error>> {
-        Ok(())
-    }
-}
-
-/// Load all the metadata files from `metadata_dir` into an in-memory map.
-fn load_all_errors(metadata_dir: &Path) -> Result<ErrorMetadataMap, Box<Error>> {
-    let mut all_errors = BTreeMap::new();
-
-    for entry in try!(read_dir(metadata_dir)) {
-        let path = try!(entry).path();
-
-        let mut metadata_str = String::new();
-        try!(File::open(&path).and_then(|mut f| f.read_to_string(&mut metadata_str)));
-
-        let some_errors: ErrorMetadataMap = try!(json::decode(&metadata_str));
-
-        for (err_code, info) in some_errors {
-            all_errors.insert(err_code, info);
-        }
-    }
-
-    Ok(all_errors)
-}
-
-/// Output an HTML page for the errors in `err_map` to `output_path`.
-fn render_error_page<T: Formatter>(err_map: &ErrorMetadataMap, output_path: &Path,
-                                   formatter: T) -> Result<(), Box<Error>> {
-    let mut output_file = try!(File::create(output_path));
-
-    try!(formatter.header(&mut output_file));
-    try!(formatter.title(&mut output_file));
-
-    for (err_code, info) in err_map {
-        try!(formatter.error_code_block(&mut output_file, info, err_code));
-    }
-
-    formatter.footer(&mut output_file)
-}
-
-fn main_with_result(format: OutputFormat) -> Result<(), Box<Error>> {
-    let build_arch = try!(env::var("CFG_BUILD"));
-    let metadata_dir = get_metadata_dir(&build_arch);
-    let err_map = try!(load_all_errors(&metadata_dir));
-    match format {
-        OutputFormat::Unknown(s)  => panic!("Unknown output format: {}", s),
-        OutputFormat::HTML(h)     => try!(render_error_page(&err_map,
-                                                            Path::new("doc/error-index.html"),
-                                                            h)),
-        OutputFormat::Markdown(m) => try!(render_error_page(&err_map,
-                                                            Path::new("doc/error-index.md"),
-                                                            m)),
-    }
-    Ok(())
-}
-
-fn parse_args() -> OutputFormat {
-    for arg in env::args().skip(1) {
-        return OutputFormat::from(&arg);
-    }
-    OutputFormat::from("html")
-}
-
-fn main() {
-    if let Err(e) = main_with_result(parse_args()) {
-        panic!("{}", e.description());
-    }
-}
index 74837a06faecd094d2c588293465dbe8b543801a..cde7094cec41e1cd795dab7bc7fc37a59d8aa897 100644 (file)
@@ -10,7 +10,3 @@ These are some links to repos with configs which ease the use of rust.
 * [kate-config](https://github.com/rust-lang/kate-config)
 * [nano-config](https://github.com/rust-lang/nano-config)
 * [zsh-config](https://github.com/rust-lang/zsh-config)
-
-## Community-maintained Configs
-
-* [.editorconfig](https://gist.github.com/derhuerst/c9d1b9309e308d9851fa) ([what is this?](http://editorconfig.org/))
index e3aa4e688d38beea06fa567826570c3bccc0f3a8..a21194962f9b061663203c969ce6c115b4bc097b 100644 (file)
@@ -117,7 +117,10 @@ class Void(Type):
         Type.__init__(self, 0)
 
     def compiler_ctor(self):
-        return 'void()'
+        return '::VOID'
+
+    def compiler_ctor_ref(self):
+        return '&' + self.compiler_ctor()
 
     def rust_name(self):
         return '()'
@@ -163,10 +166,12 @@ class Signed(Number):
 
     def compiler_ctor(self):
         if self._llvm_bitwidth is None:
-            return 'i({})'.format(self.bitwidth())
+            return '::I{}'.format(self.bitwidth())
         else:
-            return 'i_({}, {})'.format(self.bitwidth(),
-                                       self._llvm_bitwidth)
+            return '::I{}_{}'.format(self.bitwidth(), self._llvm_bitwidth)
+
+    def compiler_ctor_ref(self):
+        return '&' + self.compiler_ctor()
 
     def llvm_name(self):
         bw = self._llvm_bitwidth or self.bitwidth()
@@ -182,10 +187,12 @@ class Unsigned(Number):
 
     def compiler_ctor(self):
         if self._llvm_bitwidth is None:
-            return 'u({})'.format(self.bitwidth())
+            return '::U{}'.format(self.bitwidth())
         else:
-            return 'u_({}, {})'.format(self.bitwidth(),
-                                       self._llvm_bitwidth)
+            return '::U{}_{}'.format(self.bitwidth(), self._llvm_bitwidth)
+
+    def compiler_ctor_ref(self):
+        return '&' + self.compiler_ctor()
 
     def llvm_name(self):
         bw = self._llvm_bitwidth or self.bitwidth()
@@ -200,7 +207,10 @@ class Float(Number):
         Number.__init__(self, bitwidth)
 
     def compiler_ctor(self):
-        return 'f({})'.format(self.bitwidth())
+        return '::F{}'.format(self.bitwidth())
+
+    def compiler_ctor_ref(self):
+        return '&' + self.compiler_ctor()
 
     def llvm_name(self):
         return 'f{}'.format(self.bitwidth())
@@ -244,12 +254,16 @@ class Vector(Type):
 
     def compiler_ctor(self):
         if self._bitcast is None:
-            return 'v({}, {})'.format(self._elem.compiler_ctor(),
-                                      self._length)
+            return '{}x{}'.format(self._elem.compiler_ctor(),
+                                     self._length)
         else:
-            return 'v_({}, {}, {})'.format(self._elem.compiler_ctor(),
-                                           self._bitcast.compiler_ctor(),
-                                           self._length)
+            return '{}x{}_{}'.format(self._elem.compiler_ctor(),
+                                     self._length,
+                                     self._bitcast.compiler_ctor()
+                                         .replace('::', ''))
+
+    def compiler_ctor_ref(self):
+        return '&' + self.compiler_ctor()
 
     def rust_name(self):
         return '{}x{}'.format(self._elem.rust_name(), self._length)
@@ -284,10 +298,14 @@ class Pointer(Type):
         if self._llvm_elem is None:
             llvm_elem = 'None'
         else:
-            llvm_elem = 'Some({})'.format(self._llvm_elem.compiler_ctor())
-        return 'p({}, {}, {})'.format('true' if self._const else 'false',
-                                      self._elem.compiler_ctor(),
-                                      llvm_elem)
+            llvm_elem = 'Some({})'.format(self._llvm_elem.compiler_ctor_ref())
+        return 'Type::Pointer({}, {}, {})'.format(self._elem.compiler_ctor_ref(),
+                                                  llvm_elem,
+                                                  'true' if self._const else 'false')
+
+    def compiler_ctor_ref(self):
+        return "{{ static PTR: Type = {}; &PTR }}".format(self.compiler_ctor())
+
 
     def rust_name(self):
         return '*{} {}'.format('const' if self._const else 'mut',
@@ -322,8 +340,14 @@ class Aggregate(Type):
             raise NotImplementedError()
 
     def compiler_ctor(self):
-        return 'agg({}, vec![{}])'.format('true' if self._flatten else 'false',
-                                          ', '.join(elem.compiler_ctor() for elem in self._elems))
+        parts = "{{ static PARTS: [&'static Type; {}] = [{}]; &PARTS }}"
+        elems = ', '.join(elem.compiler_ctor_ref() for elem in self._elems)
+        parts = parts.format(len(self._elems), elems)
+        return 'Type::Aggregate({}, {})'.format('true' if self._flatten else 'false',
+                                                parts)
+
+    def compiler_ctor_ref(self):
+        return "{{ static AGG: Type = {}; &AGG }}".format(self.compiler_ctor())
 
     def rust_name(self):
         return '({})'.format(', '.join(elem.rust_name() for elem in self._elems))
@@ -518,10 +542,10 @@ class MonomorphicIntrinsic(object):
         return self._platform.platform().intrinsic_prefix() + self.intrinsic_suffix()
 
     def compiler_args(self):
-        return ', '.join(arg.compiler_ctor() for arg in self._args_raw)
+        return ', '.join(arg.compiler_ctor_ref() for arg in self._args_raw)
 
     def compiler_ret(self):
-        return self._ret_raw.compiler_ctor()
+        return self._ret_raw.compiler_ctor_ref()
 
     def compiler_signature(self):
         return '({}) -> {}'.format(self.compiler_args(), self.compiler_ret())
@@ -691,7 +715,7 @@ def parse_args():
     parser.add_argument('-o', '--out', type=argparse.FileType('w'), default=sys.stdout,
                         help = 'File to output to (default stdout).')
     parser.add_argument('-i', '--info', type=argparse.FileType('r'),
-                        help = 'File containing platform specific information to merge into'
+                        help = 'File containing platform specific information to merge into '
                                 'the input files\' header.')
     parser.add_argument('in_', metavar="FILE", type=argparse.FileType('r'), nargs='+',
                         help = 'JSON files to load')
@@ -733,24 +757,24 @@ class CompilerDefs(object):
 
 #![allow(unused_imports)]
 
-use {{Intrinsic, i, i_, u, u_, f, v, v_, agg, p, void}};
+use {{Intrinsic, Type}};
 use IntrinsicDef::Named;
-use rustc::middle::ty;
 
 // The default inlining settings trigger a pathological behaviour in
 // LLVM, which causes makes compilation very slow. See #28273.
 #[inline(never)]
-pub fn find<'tcx>(_tcx: &ty::ctxt<'tcx>, name: &str) -> Option<Intrinsic> {{
+pub fn find(name: &str) -> Option<Intrinsic> {{
     if !name.starts_with("{0}") {{ return None }}
     Some(match &name["{0}".len()..] {{'''.format(platform.intrinsic_prefix())
 
     def render(self, mono):
         return '''\
         "{}" => Intrinsic {{
-            inputs: vec![{}],
+            inputs: {{ static INPUTS: [&'static Type; {}] = [{}]; &INPUTS }},
             output: {},
             definition: Named("{}")
         }},'''.format(mono.intrinsic_suffix(),
+                      len(mono._args_raw),
                       mono.compiler_args(),
                       mono.compiler_ret(),
                       mono.llvm_name())
index 2c1492c2954c8e18dd8eb6296f523a903a252deb..05a4721aed8340e0ce56a1513d899e33e68e2b46 100644 (file)
@@ -8,6 +8,83 @@
             "ret": "f(32-64)",
             "args": ["0", "0"]
         },
+        {
+            "intrinsic": "256_blendv_{0.data_type}",
+            "width": [256],
+            "llvm": "blendv.{0.data_type}.256",
+            "ret": "f(32-64)",
+            "args": ["0", "0", "0"]
+        },
+        {
+            "intrinsic": "256_broadcast_{0.data_type}",
+            "width": [256],
+            "llvm": "vbroadcastf128.{0.data_type}.256",
+            "ret": "f(32-64)",
+            "args": ["s8SPc"]
+        },
+        {
+            "intrinsic": "256_cmp_{0.data_type}",
+            "width": [256],
+            "llvm": "cmp.{1.data_type}.256",
+            "ret": "f(32-64)",
+            "args": ["0", "0", "s8S"]
+        },
+        {
+            "intrinsic": "256_cvtepi32_pd",
+            "width": [256],
+            "llvm": "cvtdq2.pd.256",
+            "ret": "f64",
+            "args": ["s32h"]
+        },
+        {
+            "intrinsic": "256_cvtepi32_ps",
+            "width": [256],
+            "llvm": "cvtdq2.ps.256",
+            "ret": "f32",
+            "args": ["s32"]
+        },
+        {
+            "intrinsic": "256_cvtpd_epi32",
+            "width": [256],
+            "llvm": "cvt.pd2dq.256",
+            "ret": "s32h",
+            "args": ["f64"]
+        },
+        {
+            "intrinsic": "256_cvtpd_ps",
+            "width": [256],
+            "llvm": "cvt.pd2.ps.256",
+            "ret": "f32h",
+            "args": ["f64"]
+        },
+        {
+            "intrinsic": "256_cvtps_epi32",
+            "width": [256],
+            "llvm": "cvt.ps2dq.256",
+            "ret": "s32",
+            "args": ["f32"]
+        },
+        {
+            "intrinsic": "256_cvtps_pd",
+            "width": [256],
+            "llvm": "cvt.ps2.pd.256",
+            "ret": "f64",
+            "args": ["f32h"]
+        },
+        {
+            "intrinsic": "256_cvttpd_epi32",
+            "width": [256],
+            "llvm": "cvtt.pd2dq.256",
+            "ret": "s32h",
+            "args": ["f64"]
+        },
+        {
+            "intrinsic": "256_cvttps_epi32",
+            "width": [256],
+            "llvm": "cvtt.ps2dq.256",
+            "ret": "s32",
+            "args": ["f32"]
+        },
         {
             "intrinsic": "256_dp_ps",
             "width": [256],
diff --git a/src/etc/platform-intrinsics/x86/fma.json b/src/etc/platform-intrinsics/x86/fma.json
new file mode 100644 (file)
index 0000000..c922d16
--- /dev/null
@@ -0,0 +1,47 @@
+{
+    "llvm_prefix": "llvm.x86.fma.",
+    "intrinsics": [
+        {
+            "intrinsic": "{0.width_mm}_fmadd_{0.data_type}",
+            "width": [128, 256],
+            "llvm": "vfmadd.{0.data_type_short}{0.width_suffix}",
+            "ret": "f(32-64)",
+            "args": ["0", "0", "0"]
+        },
+        {
+            "intrinsic": "{0.width_mm}_fmaddsub_{0.data_type}",
+            "width": [128, 256],
+            "llvm": "vfmaddsub.{0.data_type_short}{0.width_suffix}",
+            "ret": "f(32-64)",
+            "args": ["0", "0", "0"]
+        },
+        {
+            "intrinsic": "{0.width_mm}_fmsub_{0.data_type}",
+            "width": [128, 256],
+            "llvm": "vfmsub.{0.data_type_short}{0.width_suffix}",
+            "ret": "f(32-64)",
+            "args": ["0", "0", "0"]
+        },
+        {
+            "intrinsic": "{0.width_mm}_fmsubadd_{0.data_type}",
+            "width": [128, 256],
+            "llvm": "vfmsubadd.{0.data_type_short}{0.width_suffix}",
+            "ret": "f(32-64)",
+            "args": ["0", "0", "0"]
+        },
+        {
+            "intrinsic": "{0.width_mm}_fnmadd_{0.data_type}",
+            "width": [128, 256],
+            "llvm": "vfnmadd.{0.data_type_short}{0.width_suffix}",
+            "ret": "f(32-64)",
+            "args": ["0", "0", "0"]
+        },
+        {
+            "intrinsic": "{0.width_mm}_fnmsub_{0.data_type}",
+            "width": [128, 256],
+            "llvm": "vfnmsub.{0.data_type_short}{0.width_suffix}",
+            "ret": "f(32-64)",
+            "args": ["0", "0", "0"]
+        }
+    ]
+}
index ce774d31b081fe2a34d727e8d17d291a30376b7a..9264646673b5b4b4bc74b7c7a4abaa7e24349477 100644 (file)
@@ -31,6 +31,7 @@ stable_whitelist = {
     'src/libcore',
     'src/libstd',
     'src/rustc/std_shim',
+    'src/rustc/test_shim',
     'src/test'
 }
 
index 10b864a902dc0a4bebb166b1d8ec0723fa3f4343..5a7632868e467202379324d00f36931ed15e583a 100755 (executable)
@@ -398,7 +398,7 @@ pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s);
         derived = load_properties("DerivedCoreProperties.txt", want_derived)
         scripts = load_properties("Scripts.txt", [])
         props = load_properties("PropList.txt",
-                ["White_Space", "Join_Control", "Noncharacter_Code_Point"])
+                ["White_Space", "Join_Control", "Noncharacter_Code_Point", "Pattern_White_Space"])
         norm_props = load_properties("DerivedNormalizationProps.txt",
                      ["Full_Composition_Exclusion"])
 
@@ -408,7 +408,7 @@ pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s);
         # category tables
         for (name, cat, pfuns) in ("general_category", gencats, ["N", "Cc"]), \
                                   ("derived_property", derived, want_derived), \
-                                  ("property", props, ["White_Space"]):
+                                  ("property", props, ["White_Space", "Pattern_White_Space"]):
             emit_property_module(rf, name, cat, pfuns)
 
         # normalizations and conversions module
index bdda0feb9e5d423741c7411725df48fab953852d..104b1f8b0177af4205b8dd49faa04747cc6abbf6 100644 (file)
@@ -1,10 +1,10 @@
 Unless otherwise specified, files in the jemalloc source distribution are
 subject to the following license:
 --------------------------------------------------------------------------------
-Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>.
+Copyright (C) 2002-2016 Jason Evans <jasone@canonware.com>.
 All rights reserved.
 Copyright (C) 2007-2012 Mozilla Foundation.  All rights reserved.
-Copyright (C) 2009-2014 Facebook, Inc.  All rights reserved.
+Copyright (C) 2009-2016 Facebook, Inc.  All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
index d56ee999e69cdaff28c8a0cb2b24ac67421b89d1..9cbfbf9636bf51af5167204720c45fb1ba51c222 100644 (file)
 Following are change highlights associated with official releases.  Important
-bug fixes are all mentioned, but internal enhancements are omitted here for
-brevity (even though they are more fun to write about).  Much more detail can be
-found in the git revision history:
+bug fixes are all mentioned, but some internal enhancements are omitted here for
+brevity.  Much more detail can be found in the git revision history:
 
     https://github.com/jemalloc/jemalloc
 
+* 4.1.0 (February 28, 2016)
+
+  This release is primarily about optimizations, but it also incorporates a lot
+  of portability-motivated refactoring and enhancements.  Many people worked on
+  this release, to an extent that even with the omission here of minor changes
+  (see git revision history), and of the people who reported and diagnosed
+  issues, so much of the work was contributed that starting with this release,
+  changes are annotated with author credits to help reflect the collaborative
+  effort involved.
+
+  New features:
+  - Implement decay-based unused dirty page purging, a major optimization with
+    mallctl API impact.  This is an alternative to the existing ratio-based
+    unused dirty page purging, and is intended to eventually become the sole
+    purging mechanism.  New mallctls:
+    + opt.purge
+    + opt.decay_time
+    + arena.<i>.decay
+    + arena.<i>.decay_time
+    + arenas.decay_time
+    + stats.arenas.<i>.decay_time
+    (@jasone, @cevans87)
+  - Add --with-malloc-conf, which makes it possible to embed a default
+    options string during configuration.  This was motivated by the desire to
+    specify --with-malloc-conf=purge:decay , since the default must remain
+    purge:ratio until the 5.0.0 release.  (@jasone)
+  - Add MS Visual Studio 2015 support.  (@rustyx, @yuslepukhin)
+  - Make *allocx() size class overflow behavior defined.  The maximum
+    size class is now less than PTRDIFF_MAX to protect applications against
+    numerical overflow, and all allocation functions are guaranteed to indicate
+    errors rather than potentially crashing if the request size exceeds the
+    maximum size class.  (@jasone)
+  - jeprof:
+    + Add raw heap profile support.  (@jasone)
+    + Add --retain and --exclude for backtrace symbol filtering.  (@jasone)
+
+  Optimizations:
+  - Optimize the fast path to combine various bootstrapping and configuration
+    checks and execute more streamlined code in the common case.  (@interwq)
+  - Use linear scan for small bitmaps (used for small object tracking).  In
+    addition to speeding up bitmap operations on 64-bit systems, this reduces
+    allocator metadata overhead by approximately 0.2%.  (@djwatson)
+  - Separate arena_avail trees, which substantially speeds up run tree
+    operations.  (@djwatson)
+  - Use memoization (boot-time-computed table) for run quantization.  Separate
+    arena_avail trees reduced the importance of this optimization.  (@jasone)
+  - Attempt mmap-based in-place huge reallocation.  This can dramatically speed
+    up incremental huge reallocation.  (@jasone)
+
+  Incompatible changes:
+  - Make opt.narenas unsigned rather than size_t.  (@jasone)
+
+  Bug fixes:
+  - Fix stats.cactive accounting regression.  (@rustyx, @jasone)
+  - Handle unaligned keys in hash().  This caused problems for some ARM systems.
+    (@jasone, Christopher Ferris)
+  - Refactor arenas array.  In addition to fixing a fork-related deadlock, this
+    makes arena lookups faster and simpler.  (@jasone)
+  - Move retained memory allocation out of the default chunk allocation
+    function, to a location that gets executed even if the application installs
+    a custom chunk allocation function.  This resolves a virtual memory leak.
+    (@buchgr)
+  - Fix a potential tsd cleanup leak.  (Christopher Ferris, @jasone)
+  - Fix run quantization.  In practice this bug had no impact unless
+    applications requested memory with alignment exceeding one page.
+    (@jasone, @djwatson)
+  - Fix LinuxThreads-specific bootstrapping deadlock.  (Cosmin Paraschiv)
+  - jeprof:
+    + Don't discard curl options if timeout is not defined.  (@djwatson)
+    + Detect failed profile fetches.  (@djwatson)
+  - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
+    --disable-stats case.  (@jasone)
+
+* 4.0.4 (October 24, 2015)
+
+  This bugfix release fixes another xallocx() regression.  No other regressions
+  have come to light in over a month, so this is likely a good starting point
+  for people who prefer to wait for "dot one" releases with all the major issues
+  shaken out.
+
+  Bug fixes:
+  - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large
+    allocations that have been randomly assigned an offset of 0 when
+    --enable-cache-oblivious configure option is enabled.
+
+* 4.0.3 (September 24, 2015)
+
+  This bugfix release continues the trend of xallocx() and heap profiling fixes.
+
+  Bug fixes:
+  - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
+    allocations when --enable-cache-oblivious configure option is enabled.
+  - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
+    when resizing from/to a size class that is not a multiple of the chunk size.
+  - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
+    profile dumping started.
+  - Work around a potentially bad thread-specific data initialization
+    interaction with NPTL (glibc's pthreads implementation).
+
+* 4.0.2 (September 21, 2015)
+
+  This bugfix release addresses a few bugs specific to heap profiling.
+
+  Bug fixes:
+  - Fix ixallocx_prof_sample() to never modify nor create sampled small
+    allocations.  xallocx() is in general incapable of moving small allocations,
+    so this fix removes buggy code without loss of generality.
+  - Fix irallocx_prof_sample() to always allocate large regions, even when
+    alignment is non-zero.
+  - Fix prof_alloc_rollback() to read tdata from thread-specific data rather
+    than dereferencing a potentially invalid tctx.
+
+* 4.0.1 (September 15, 2015)
+
+  This is a bugfix release that is somewhat high risk due to the amount of
+  refactoring required to address deep xallocx() problems.  As a side effect of
+  these fixes, xallocx() now tries harder to partially fulfill requests for
+  optional extra space.  Note that a couple of minor heap profiling
+  optimizations are included, but these are better thought of as performance
+  fixes that were integral to disovering most of the other bugs.
+
+  Optimizations:
+  - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
+    fast path when heap profiling is enabled.  Additionally, split a special
+    case out into arena_prof_tctx_reset(), which also avoids chunk metadata
+    reads.
+  - Optimize irallocx_prof() to optimistically update the sampler state.  The
+    prior implementation appears to have been a holdover from when
+    rallocx()/xallocx() functionality was combined as rallocm().
+
+  Bug fixes:
+  - Fix TLS configuration such that it is enabled by default for platforms on
+    which it works correctly.
+  - Fix arenas_cache_cleanup() and arena_get_hard() to handle
+    allocation/deallocation within the application's thread-specific data
+    cleanup functions even after arenas_cache is torn down.
+  - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
+  - Fix chunk purge hook calls for in-place huge shrinking reallocation to
+    specify the old chunk size rather than the new chunk size.  This bug caused
+    no correctness issues for the default chunk purge function, but was
+    visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
+  - Fix heap profiling bugs:
+    + Fix heap profiling to distinguish among otherwise identical sample sites
+      with interposed resets (triggered via the "prof.reset" mallctl).  This bug
+      could cause data structure corruption that would most likely result in a
+      segfault.
+    + Fix irealloc_prof() to prof_alloc_rollback() on OOM.
+    + Make one call to prof_active_get_unlocked() per allocation event, and use
+      the result throughout the relevant functions that handle an allocation
+      event.  Also add a missing check in prof_realloc().  These fixes protect
+      allocation events against concurrent prof_active changes.
+    + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
+      in the correct order.
+    + Fix prof_realloc() to call prof_free_sampled_object() after calling
+      prof_malloc_sample_object().  Prior to this fix, if tctx and old_tctx were
+      the same, the tctx could have been prematurely destroyed.
+  - Fix portability bugs:
+    + Don't bitshift by negative amounts when encoding/decoding run sizes in
+      chunk header maps.  This affected systems with page sizes greater than 8
+      KiB.
+    + Rename index_t to szind_t to avoid an existing type on Solaris.
+    + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
+      match glibc and avoid compilation errors when including both
+      jemalloc/jemalloc.h and malloc.h in C++ code.
+    + Don't assume that /bin/sh is appropriate when running size_classes.sh
+      during configuration.
+    + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
+    + Link tests to librt if it contains clock_gettime(2).
+
+* 4.0.0 (August 17, 2015)
+
+  This version contains many speed and space optimizations, both minor and
+  major.  The major themes are generalization, unification, and simplification.
+  Although many of these optimizations cause no visible behavior change, their
+  cumulative effect is substantial.
+
+  New features:
+  - Normalize size class spacing to be consistent across the complete size
+    range.  By default there are four size classes per size doubling, but this
+    is now configurable via the --with-lg-size-class-group option.  Also add the
+    --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
+    --with-lg-tiny-min options, which can be used to tweak page and size class
+    settings.  Impacts:
+    + Worst case performance for incrementally growing/shrinking reallocation
+      is improved because there are far fewer size classes, and therefore
+      copying happens less often.
+    + Internal fragmentation is limited to 20% for all but the smallest size
+      classes (those less than four times the quantum).  (1B + 4 KiB)
+      and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
+    + Chunk fragmentation tends to be lower because there are fewer distinct run
+      sizes to pack.
+  - Add support for explicit tcaches.  The "tcache.create", "tcache.flush", and
+    "tcache.destroy" mallctls control tcache lifetime and flushing, and the
+    MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
+    control which tcache is used for each operation.
+  - Implement per thread heap profiling, as well as the ability to
+    enable/disable heap profiling on a per thread basis.  Add the "prof.reset",
+    "prof.lg_sample", "thread.prof.name", "thread.prof.active",
+    "opt.prof_thread_active_init", "prof.thread_active_init", and
+    "thread.prof.active" mallctls.
+  - Add support for per arena application-specified chunk allocators, configured
+    via the "arena.<i>.chunk_hooks" mallctl.
+  - Refactor huge allocation to be managed by arenas, so that arenas now
+    function as general purpose independent allocators.  This is important in
+    the context of user-specified chunk allocators, aside from the scalability
+    benefits.  Related new statistics:
+    + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
+      "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
+      mallctls provide high level per arena huge allocation statistics.
+    + The "arenas.nhchunks", "arenas.hchunk.<i>.size",
+      "stats.arenas.<i>.hchunks.<j>.nmalloc",
+      "stats.arenas.<i>.hchunks.<j>.ndalloc",
+      "stats.arenas.<i>.hchunks.<j>.nrequests", and
+      "stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
+      statistics.
+  - Add the 'util' column to malloc_stats_print() output, which reports the
+    proportion of available regions that are currently in use for each small
+    size class.
+  - Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
+    mallctl), so that it is possible to separately enable junk filling for
+    allocation versus deallocation.
+  - Add the jemalloc-config script, which provides information about how
+    jemalloc was configured, and how to integrate it into application builds.
+  - Add metadata statistics, which are accessible via the "stats.metadata",
+    "stats.arenas.<i>.metadata.mapped", and
+    "stats.arenas.<i>.metadata.allocated" mallctls.
+  - Add the "stats.resident" mallctl, which reports the upper limit of
+    physically resident memory mapped by the allocator.
+  - Add per arena control over unused dirty page purging, via the
+    "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
+    "stats.arenas.<i>.lg_dirty_mult" mallctls.
+  - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
+    feature on/off during program execution.
+  - Add sdallocx(), which implements sized deallocation.  The primary
+    optimization over dallocx() is the removal of a metadata read, which often
+    suffers an L1 cache miss.
+  - Add missing header includes in jemalloc/jemalloc.h, so that applications
+    only have to #include <jemalloc/jemalloc.h>.
+  - Add support for additional platforms:
+    + Bitrig
+    + Cygwin
+    + DragonFlyBSD
+    + iOS
+    + OpenBSD
+    + OpenRISC/or1k
+
+  Optimizations:
+  - Maintain dirty runs in per arena LRUs rather than in per arena trees of
+    dirty-run-containing chunks.  In practice this change significantly reduces
+    dirty page purging volume.
+  - Integrate whole chunks into the unused dirty page purging machinery.  This
+    reduces the cost of repeated huge allocation/deallocation, because it
+    effectively introduces a cache of chunks.
+  - Split the arena chunk map into two separate arrays, in order to increase
+    cache locality for the frequently accessed bits.
+  - Move small run metadata out of runs, into arena chunk headers.  This reduces
+    run fragmentation, smaller runs reduce external fragmentation for small size
+    classes, and packed (less uniformly aligned) metadata layout improves CPU
+    cache set distribution.
+  - Randomly distribute large allocation base pointer alignment relative to page
+    boundaries in order to more uniformly utilize CPU cache sets.  This can be
+    disabled via the --disable-cache-oblivious configure option, and queried via
+    the "config.cache_oblivious" mallctl.
+  - Micro-optimize the fast paths for the public API functions.
+  - Refactor thread-specific data to reside in a single structure.  This assures
+    that only a single TLS read is necessary per call into the public API.
+  - Implement in-place huge allocation growing and shrinking.
+  - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
+    additional optimizations that reduce maximum lookup depth to one or two
+    levels.  This resolves what was a concurrency bottleneck for per arena huge
+    allocation, because a global data structure is critical for determining
+    which arenas own which huge allocations.
+
+  Incompatible changes:
+  - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
+    warnings by default.
+  - Assure that the constness of malloc_usable_size()'s return type matches that
+    of the system implementation.
+  - Change the heap profile dump format to support per thread heap profiling,
+    rename pprof to jeprof, and enhance it with the --thread=<n> option.  As a
+    result, the bundled jeprof must now be used rather than the upstream
+    (gperftools) pprof.
+  - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
+    internally deadlock on some platforms.
+  - Change the "arenas.nlruns" mallctl type from size_t to unsigned.
+  - Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
+    "stats.arenas.<i>.bins.<j>.curregs".
+  - Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
+  - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
+    MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
+
+  Removed features:
+  - Remove the *allocm() API, which is superseded by the *allocx() API.
+  - Remove the --enable-dss options, and make dss non-optional on all platforms
+    which support sbrk(2).
+  - Remove the "arenas.purge" mallctl, which was obsoleted by the
+    "arena.<i>.purge" mallctl in 3.1.0.
+  - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
+    detects whether it is running inside Valgrind.
+  - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
+    "stats.huge.ndalloc" mallctls.
+  - Remove the --enable-mremap option.
+  - Remove the "stats.chunks.current", "stats.chunks.total", and
+    "stats.chunks.high" mallctls.
+
+  Bug fixes:
+  - Fix the cactive statistic to decrease (rather than increase) when active
+    memory decreases.  This regression was first released in 3.5.0.
+  - Fix OOM handling in memalign() and valloc().  A variant of this bug existed
+    in all releases since 2.0.0, which introduced these functions.
+  - Fix an OOM-related regression in arena_tcache_fill_small(), which could
+    cause cache corruption on OOM.  This regression was present in all releases
+    from 2.2.0 through 3.6.0.
+  - Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
+    calloc(), and realloc() when profiling is enabled.
+  - Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
+    "secondary" precedence is specified, but sbrk(2) is not supported.
+  - Fix fallback lg_floor() implementations to handle extremely large inputs.
+  - Ensure the default purgeable zone is after the default zone on OS X.
+  - Fix latent bugs in atomic_*().
+  - Fix the "arena.<i>.dss" mallctl to handle read-only calls.
+  - Fix tls_model configuration to enable the initial-exec model when possible.
+  - Mark malloc_conf as a weak symbol so that the application can override it.
+  - Correctly detect glibc's adaptive pthread mutexes.
+  - Fix the --without-export configure option.
+
 * 3.6.0 (March 31, 2014)
 
   This version contains a critical bug fix for a regression present in 3.5.0 and
@@ -21,7 +346,7 @@ found in the git revision history:
     backtracing to be reliable.
   - Use dss allocation precedence for huge allocations as well as small/large
     allocations.
-  - Fix test assertion failure message formatting.  This bug did not manifect on
+  - Fix test assertion failure message formatting.  This bug did not manifest on
     x86_64 systems because of implementation subtleties in va_list.
   - Fix inconsequential test failures for hash and SFMT code.
 
@@ -516,7 +841,7 @@ found in the git revision history:
   - Make it possible for the application to manually flush a thread's cache, via
     the "tcache.flush" mallctl.
   - Base maximum dirty page count on proportion of active memory.
-  - Compute various addtional run-time statistics, including per size class
+  - Compute various additional run-time statistics, including per size class
     statistics for large objects.
   - Expose malloc_stats_print(), which can be called repeatedly by the
     application.
index 9af233693b2714021c9a97557fceb0e61501a0e7..5c25054a88f9b17f5b9c794239c50b850c0d5556 100644 (file)
@@ -84,6 +84,14 @@ any of the following arguments (not a definitive list) to 'configure':
     versions of jemalloc can coexist in the same installation directory.  For
     example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
 
+--with-malloc-conf=<malloc_conf>
+    Embed <malloc_conf> as a run-time options string that is processed prior to
+    the malloc_conf global variable, the /etc/malloc.conf symlink, and the
+    MALLOC_CONF environment variable.  For example, to change the default chunk
+    size to 256 KiB:
+
+      --with-malloc-conf=lg_chunk:18
+
 --disable-cc-silence
     Disable code that silences non-useful compiler warnings.  This is mainly
     useful during development when auditing the set of warnings that are being
@@ -107,15 +115,15 @@ any of the following arguments (not a definitive list) to 'configure':
     there are interactions between the various coverage targets, so it is
     usually advisable to run 'make clean' between repeated code coverage runs.
 
---enable-ivsalloc
-    Enable validation code, which verifies that pointers reside within
-    jemalloc-owned chunks before dereferencing them.  This incurs a substantial
-    performance hit.
-
 --disable-stats
     Disable statistics gathering functionality.  See the "opt.stats_print"
     option documentation for usage details.
 
+--enable-ivsalloc
+    Enable validation code, which verifies that pointers reside within
+    jemalloc-owned chunks before dereferencing them.  This incurs a minor
+    performance hit.
+
 --enable-prof
     Enable heap profiling and leak detection functionality.  See the "opt.prof"
     option documentation for usage details.  When enabled, there are several
@@ -185,10 +193,106 @@ any of the following arguments (not a definitive list) to 'configure':
     thread-local variables via the __thread keyword.  If TLS is available,
     jemalloc uses it for several purposes.
 
+--disable-cache-oblivious
+    Disable cache-oblivious large allocation alignment for large allocation
+    requests with no alignment constraints.  If this feature is disabled, all
+    large allocations are page-aligned as an implementation artifact, which can
+    severely harm CPU cache utilization.  However, the cache-oblivious layout
+    comes at the cost of one extra page per large allocation, which in the
+    most extreme case increases physical memory usage for the 16 KiB size class
+    to 20 KiB.
+
 --with-xslroot=<path>
     Specify where to find DocBook XSL stylesheets when building the
     documentation.
 
+--with-lg-page=<lg-page>
+    Specify the base 2 log of the system page size.  This option is only useful
+    when cross compiling, since the configure script automatically determines
+    the host's page size by default.
+
+--with-lg-page-sizes=<lg-page-sizes>
+    Specify the comma-separated base 2 logs of the page sizes to support.  This
+    option may be useful when cross-compiling in combination with
+    --with-lg-page, but its primary use case is for integration with FreeBSD's
+    libc, wherein jemalloc is embedded.
+
+--with-lg-size-class-group=<lg-size-class-group>
+    Specify the base 2 log of how many size classes to use for each doubling in
+    size.  By default jemalloc uses <lg-size-class-group>=2, which results in
+    e.g. the following size classes:
+
+      [...], 64,
+      80, 96, 112, 128,
+      160, [...]
+
+    <lg-size-class-group>=3 results in e.g. the following size classes:
+
+      [...], 64,
+      72, 80, 88, 96, 104, 112, 120, 128,
+      144, [...]
+
+    The minimal <lg-size-class-group>=0 causes jemalloc to only provide size
+    classes that are powers of 2:
+
+      [...],
+      64,
+      128,
+      256,
+      [...]
+
+    An implementation detail currently limits the total number of small size
+    classes to 255, and a compilation error will result if the
+    <lg-size-class-group> you specify cannot be supported.  The limit is
+    roughly <lg-size-class-group>=4, depending on page size.
+
+--with-lg-quantum=<lg-quantum>
+    Specify the base 2 log of the minimum allocation alignment.  jemalloc needs
+    to know the minimum alignment that meets the following C standard
+    requirement (quoted from the April 12, 2011 draft of the C11 standard):
+
+      The pointer returned if the allocation succeeds is suitably aligned so
+      that it may be assigned to a pointer to any type of object with a
+      fundamental alignment requirement and then used to access such an object
+      or an array of such objects in the space allocated [...]
+
+    This setting is architecture-specific, and although jemalloc includes known
+    safe values for the most commonly used modern architectures, there is a
+    wrinkle related to GNU libc (glibc) that may impact your choice of
+    <lg-quantum>.  On most modern architectures, this mandates 16-byte alignment
+    (<lg-quantum>=4), but the glibc developers chose not to meet this
+    requirement for performance reasons.  An old discussion can be found at
+    https://sourceware.org/bugzilla/show_bug.cgi?id=206 .  Unlike glibc,
+    jemalloc does follow the C standard by default (caveat: jemalloc
+    technically cheats if --with-lg-tiny-min is smaller than
+    --with-lg-quantum), but the fact that Linux systems already work around
+    this allocator noncompliance means that it is generally safe in practice to
+    let jemalloc's minimum alignment follow glibc's lead.  If you specify
+    --with-lg-quantum=3 during configuration, jemalloc will provide additional
+    size classes that are not 16-byte-aligned (24, 40, and 56, assuming
+    --with-lg-size-class-group=2).
+
+--with-lg-tiny-min=<lg-tiny-min>
+    Specify the base 2 log of the minimum tiny size class to support.  Tiny
+    size classes are powers of 2 less than the quantum, and are only
+    incorporated if <lg-tiny-min> is less than <lg-quantum> (see
+    --with-lg-quantum).  Tiny size classes technically violate the C standard
+    requirement for minimum alignment, and crashes could conceivably result if
+    the compiler were to generate instructions that made alignment assumptions,
+    both because illegal instruction traps could result, and because accesses
+    could straddle page boundaries and cause segmentation faults due to
+    accessing unmapped addresses.
+
+    The default of <lg-tiny-min>=3 works well in practice even on architectures
+    that technically require 16-byte alignment, probably for the same reason
+    --with-lg-quantum=3 works.  Smaller tiny size classes can, and will, cause
+    crashes (see https://bugzilla.mozilla.org/show_bug.cgi?id=691003 for an
+    example).
+
+    This option is rarely useful, and is mainly provided as documentation of a
+    subtle implementation detail.  If you do use this option, specify a
+    value in [3, ..., <lg-quantum>].
+
 The following environment variables (not a definitive list) impact configure's
 behavior:
 
index 50f6596a6c742afb2db570f85202f9eb0fb9226d..f60823f59676682c6e46d9a2e932e6e383fea26a 100644 (file)
@@ -28,6 +28,7 @@ CFLAGS := @CFLAGS@
 LDFLAGS := @LDFLAGS@
 EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
 LIBS := @LIBS@
+TESTLIBS := @TESTLIBS@
 RPATH_EXTRA := @RPATH_EXTRA@
 SO := @so@
 IMPORTLIB := @importlib@
@@ -48,8 +49,10 @@ cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
 cfgoutputs_out := @cfgoutputs_out@
 enable_autogen := @enable_autogen@
 enable_code_coverage := @enable_code_coverage@
+enable_prof := @enable_prof@
 enable_valgrind := @enable_valgrind@
 enable_zone_allocator := @enable_zone_allocator@
+MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
 DSO_LDFLAGS = @DSO_LDFLAGS@
 SOREV = @SOREV@
 PIC_CFLAGS = @PIC_CFLAGS@
@@ -73,16 +76,34 @@ endif
 LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
 
 # Lists of files.
-BINS := $(srcroot)bin/pprof $(objroot)bin/jemalloc.sh
+BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof
 C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
-C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \
-       $(srcroot)src/atomic.c $(srcroot)src/base.c $(srcroot)src/bitmap.c \
-       $(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \
-       $(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \
-       $(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \
-       $(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/prof.c \
-       $(srcroot)src/quarantine.c $(srcroot)src/rtree.c $(srcroot)src/stats.c \
-       $(srcroot)src/tcache.c $(srcroot)src/util.c $(srcroot)src/tsd.c
+C_SRCS := $(srcroot)src/jemalloc.c \
+       $(srcroot)src/arena.c \
+       $(srcroot)src/atomic.c \
+       $(srcroot)src/base.c \
+       $(srcroot)src/bitmap.c \
+       $(srcroot)src/chunk.c \
+       $(srcroot)src/chunk_dss.c \
+       $(srcroot)src/chunk_mmap.c \
+       $(srcroot)src/ckh.c \
+       $(srcroot)src/ctl.c \
+       $(srcroot)src/extent.c \
+       $(srcroot)src/hash.c \
+       $(srcroot)src/huge.c \
+       $(srcroot)src/mb.c \
+       $(srcroot)src/mutex.c \
+       $(srcroot)src/nstime.c \
+       $(srcroot)src/pages.c \
+       $(srcroot)src/prng.c \
+       $(srcroot)src/prof.c \
+       $(srcroot)src/quarantine.c \
+       $(srcroot)src/rtree.c \
+       $(srcroot)src/stats.c \
+       $(srcroot)src/tcache.c \
+       $(srcroot)src/ticker.c \
+       $(srcroot)src/tsd.c \
+       $(srcroot)src/util.c
 ifeq ($(enable_valgrind), 1)
 C_SRCS += $(srcroot)src/valgrind.c
 endif
@@ -104,24 +125,29 @@ endif
 PC := $(objroot)jemalloc.pc
 MAN3 := $(objroot)doc/jemalloc$(install_suffix).3
 DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
-DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html)
-DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3)
+DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html)
+DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3)
 DOCS := $(DOCS_HTML) $(DOCS_MAN3)
 C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
        $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \
-       $(srcroot)test/src/mtx.c $(srcroot)test/src/SFMT.c \
-       $(srcroot)test/src/test.c $(srcroot)test/src/thd.c \
-       $(srcroot)test/src/timer.c
-C_UTIL_INTEGRATION_SRCS := $(srcroot)src/util.c
+       $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \
+       $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
+       $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
+C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/util.c
 TESTS_UNIT := $(srcroot)test/unit/atomic.c \
        $(srcroot)test/unit/bitmap.c \
        $(srcroot)test/unit/ckh.c \
+       $(srcroot)test/unit/decay.c \
        $(srcroot)test/unit/hash.c \
        $(srcroot)test/unit/junk.c \
+       $(srcroot)test/unit/junk_alloc.c \
+       $(srcroot)test/unit/junk_free.c \
+       $(srcroot)test/unit/lg_chunk.c \
        $(srcroot)test/unit/mallctl.c \
        $(srcroot)test/unit/math.c \
        $(srcroot)test/unit/mq.c \
        $(srcroot)test/unit/mtx.c \
+       $(srcroot)test/unit/prng.c \
        $(srcroot)test/unit/prof_accum.c \
        $(srcroot)test/unit/prof_active.c \
        $(srcroot)test/unit/prof_gdump.c \
@@ -133,8 +159,13 @@ TESTS_UNIT := $(srcroot)test/unit/atomic.c \
        $(srcroot)test/unit/quarantine.c \
        $(srcroot)test/unit/rb.c \
        $(srcroot)test/unit/rtree.c \
+       $(srcroot)test/unit/run_quantize.c \
        $(srcroot)test/unit/SFMT.c \
+       $(srcroot)test/unit/size_classes.c \
+       $(srcroot)test/unit/smoothstep.c \
        $(srcroot)test/unit/stats.c \
+       $(srcroot)test/unit/ticker.c \
+       $(srcroot)test/unit/nstime.c \
        $(srcroot)test/unit/tsd.c \
        $(srcroot)test/unit/util.c \
        $(srcroot)test/unit/zero.c
@@ -143,6 +174,7 @@ TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
        $(srcroot)test/integration/sdallocx.c \
        $(srcroot)test/integration/mallocx.c \
        $(srcroot)test/integration/MALLOCX_ARENA.c \
+       $(srcroot)test/integration/overflow.c \
        $(srcroot)test/integration/posix_memalign.c \
        $(srcroot)test/integration/rallocx.c \
        $(srcroot)test/integration/thread_arena.c \
@@ -178,10 +210,10 @@ all: build_lib
 
 dist: build_doc
 
-$(srcroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
+$(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
        $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
 
-$(srcroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
+$(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
        $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
 
 build_doc_html: $(DOCS_HTML)
@@ -257,15 +289,15 @@ $(STATIC_LIBS):
 
 $(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
        @mkdir -p $(@D)
-       $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
+       $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
 
 $(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
        @mkdir -p $(@D)
-       $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(filter -lpthread,$(LIBS))) -lm $(EXTRA_LDFLAGS)
+       $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(filter -lpthread,$(LIBS))) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
 
 $(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
        @mkdir -p $(@D)
-       $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
+       $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
 
 build_lib_shared: $(DSOS)
 build_lib_static: $(STATIC_LIBS)
@@ -335,18 +367,27 @@ check_unit_dir:
        @mkdir -p $(objroot)test/unit
 check_integration_dir:
        @mkdir -p $(objroot)test/integration
-check_stress_dir:
+stress_dir:
        @mkdir -p $(objroot)test/stress
-check_dir: check_unit_dir check_integration_dir check_stress_dir
+check_dir: check_unit_dir check_integration_dir
 
 check_unit: tests_unit check_unit_dir
-       $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+       $(MALLOC_CONF)="purge:ratio" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+       $(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+check_integration_prof: tests_integration check_integration_dir
+ifeq ($(enable_prof), 1)
+       $(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+       $(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+endif
+check_integration_decay: tests_integration check_integration_dir
+       $(MALLOC_CONF)="purge:decay,decay_time:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+       $(MALLOC_CONF)="purge:decay,decay_time:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+       $(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
 check_integration: tests_integration check_integration_dir
        $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
-check_stress: tests_stress check_stress_dir
+stress: tests_stress stress_dir
        $(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
-check: tests check_dir
-       $(SHELL) $(objroot)test/test.sh $(TESTS:$(srcroot)%.c=$(objroot)%)
+check: check_unit check_integration check_integration_decay check_integration_prof
 
 ifeq ($(enable_code_coverage), 1)
 coverage_unit: check_unit
@@ -360,7 +401,7 @@ coverage_integration: check_integration
        $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
        $(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
 
-coverage_stress: check_stress
+coverage_stress: stress
        $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
        $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
        $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
@@ -405,7 +446,9 @@ clean:
        rm -f $(objroot)*.gcov.*
 
 distclean: clean
+       rm -f $(objroot)bin/jemalloc-config
        rm -f $(objroot)bin/jemalloc.sh
+       rm -f $(objroot)bin/jeprof
        rm -f $(objroot)config.log
        rm -f $(objroot)config.status
        rm -f $(objroot)config.stamp
@@ -414,7 +457,7 @@ distclean: clean
 
 relclean: distclean
        rm -f $(objroot)configure
-       rm -f $(srcroot)VERSION
+       rm -f $(objroot)VERSION
        rm -f $(DOCS_HTML)
        rm -f $(DOCS_MAN3)
 
diff --git a/src/jemalloc/VERSION b/src/jemalloc/VERSION
deleted file mode 100644 (file)
index f1e1510..0000000
+++ /dev/null
@@ -1 +0,0 @@
-0.12.0-17867-gdb2939409db26ab4904372c82492cd3488e4c44e
diff --git a/src/jemalloc/bin/jemalloc-config.in b/src/jemalloc/bin/jemalloc-config.in
new file mode 100644 (file)
index 0000000..b016c8d
--- /dev/null
@@ -0,0 +1,79 @@
+#!/bin/sh
+
+usage() {
+       cat <<EOF
+Usage:
+  @BINDIR@/jemalloc-config <option>
+Options:
+  --help | -h  : Print usage.
+  --version    : Print jemalloc version.
+  --revision   : Print shared library revision number.
+  --config     : Print configure options used to build jemalloc.
+  --prefix     : Print installation directory prefix.
+  --bindir     : Print binary installation directory.
+  --datadir    : Print data installation directory.
+  --includedir : Print include installation directory.
+  --libdir     : Print library installation directory.
+  --mandir     : Print manual page installation directory.
+  --cc         : Print compiler used to build jemalloc.
+  --cflags     : Print compiler flags used to build jemalloc.
+  --cppflags   : Print preprocessor flags used to build jemalloc.
+  --ldflags    : Print library flags used to build jemalloc.
+  --libs       : Print libraries jemalloc was linked against.
+EOF
+}
+
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+
+case "$1" in
+--help | -h)
+       usage
+       exit 0
+       ;;
+--version)
+       echo "@jemalloc_version@"
+       ;;
+--revision)
+       echo "@rev@"
+       ;;
+--config)
+       echo "@CONFIG@"
+       ;;
+--prefix)
+       echo "@PREFIX@"
+       ;;
+--bindir)
+       echo "@BINDIR@"
+       ;;
+--datadir)
+       echo "@DATADIR@"
+       ;;
+--includedir)
+       echo "@INCLUDEDIR@"
+       ;;
+--libdir)
+       echo "@LIBDIR@"
+       ;;
+--mandir)
+       echo "@MANDIR@"
+       ;;
+--cc)
+       echo "@CC@"
+       ;;
+--cflags)
+       echo "@CFLAGS@"
+       ;;
+--cppflags)
+       echo "@CPPFLAGS@"
+       ;;
+--ldflags)
+       echo "@LDFLAGS@ @EXTRA_LDFLAGS@"
+       ;;
+--libs)
+       echo "@LIBS@"
+       ;;
+*)
+       usage
+       exit 1
+esac
diff --git a/src/jemalloc/bin/jeprof.in b/src/jemalloc/bin/jeprof.in
new file mode 100644 (file)
index 0000000..42087fc
--- /dev/null
@@ -0,0 +1,5611 @@
+#! /usr/bin/env perl
+
+# Copyright (c) 1998-2007, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# ---
+# Program for printing the profile generated by common/profiler.cc,
+# or by the heap profiler (common/debugallocation.cc)
+#
+# The profile contains a sequence of entries of the form:
+#       <count> <stack trace>
+# This program parses the profile, and generates user-readable
+# output.
+#
+# Examples:
+#
+# % tools/jeprof "program" "profile"
+#   Enters "interactive" mode
+#
+# % tools/jeprof --text "program" "profile"
+#   Generates one line per procedure
+#
+# % tools/jeprof --gv "program" "profile"
+#   Generates annotated call-graph and displays via "gv"
+#
+# % tools/jeprof --gv --focus=Mutex "program" "profile"
+#   Restrict to code paths that involve an entry that matches "Mutex"
+#
+# % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile"
+#   Restrict to code paths that involve an entry that matches "Mutex"
+#   and does not match "string"
+#
+# % tools/jeprof --list=IBF_CheckDocid "program" "profile"
+#   Generates disassembly listing of all routines with at least one
+#   sample that match the --list=<regexp> pattern.  The listing is
+#   annotated with the flat and cumulative sample counts at each line.
+#
+# % tools/jeprof --disasm=IBF_CheckDocid "program" "profile"
+#   Generates disassembly listing of all routines with at least one
+#   sample that match the --disasm=<regexp> pattern.  The listing is
+#   annotated with the flat and cumulative sample counts at each PC value.
+#
+# TODO: Use color to indicate files?
+
+use strict;
+use warnings;
+use Getopt::Long;
+
+my $JEPROF_VERSION = "@jemalloc_version@";
+my $PPROF_VERSION = "2.0";
+
+# These are the object tools we use which can come from a
+# user-specified location using --tools, from the JEPROF_TOOLS
+# environment variable, or from the environment.
+my %obj_tool_map = (
+  "objdump" => "objdump",
+  "nm" => "nm",
+  "addr2line" => "addr2line",
+  "c++filt" => "c++filt",
+  ## ConfigureObjTools may add architecture-specific entries:
+  #"nm_pdb" => "nm-pdb",       # for reading windows (PDB-format) executables
+  #"addr2line_pdb" => "addr2line-pdb",                                # ditto
+  #"otool" => "otool",         # equivalent of objdump on OS X
+);
+# NOTE: these are lists, so you can put in commandline flags if you want.
+my @DOT = ("dot");          # leave non-absolute, since it may be in /usr/local
+my @GV = ("gv");
+my @EVINCE = ("evince");    # could also be xpdf or perhaps acroread
+my @KCACHEGRIND = ("kcachegrind");
+my @PS2PDF = ("ps2pdf");
+# These are used for dynamic profiles
+my @URL_FETCHER = ("curl", "-s", "--fail");
+
+# These are the web pages that servers need to support for dynamic profiles
+my $HEAP_PAGE = "/pprof/heap";
+my $PROFILE_PAGE = "/pprof/profile";   # must support cgi-param "?seconds=#"
+my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param
+                                                # ?seconds=#&event=x&period=n
+my $GROWTH_PAGE = "/pprof/growth";
+my $CONTENTION_PAGE = "/pprof/contention";
+my $WALL_PAGE = "/pprof/wall(?:\\?.*)?";  # accepts options like namefilter
+my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?";
+my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param
+                                                       # "?seconds=#",
+                                                       # "?tags_regexp=#" and
+                                                       # "?type=#".
+my $SYMBOL_PAGE = "/pprof/symbol";     # must support symbol lookup via POST
+my $PROGRAM_NAME_PAGE = "/pprof/cmdline";
+
+# These are the web pages that can be named on the command line.
+# All the alternatives must begin with /.
+my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" .
+               "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" .
+               "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)";
+
+# default binary name
+my $UNKNOWN_BINARY = "(unknown)";
+
+# There is a pervasive dependency on the length (in hex characters,
+# i.e., nibbles) of an address, distinguishing between 32-bit and
+# 64-bit profiles.  To err on the safe size, default to 64-bit here:
+my $address_length = 16;
+
+my $dev_null = "/dev/null";
+if (! -e $dev_null && $^O =~ /MSWin/) {    # $^O is the OS perl was built for
+  $dev_null = "nul";
+}
+
+# A list of paths to search for shared object files
+my @prefix_list = ();
+
+# Special routine name that should not have any symbols.
+# Used as separator to parse "addr2line -i" output.
+my $sep_symbol = '_fini';
+my $sep_address = undef;
+
+##### Argument parsing #####
+
+sub usage_string {
+  return <<EOF;
+Usage:
+jeprof [options] <program> <profiles>
+   <profiles> is a space separated list of profile names.
+jeprof [options] <symbolized-profiles>
+   <symbolized-profiles> is a list of profile files where each file contains
+   the necessary symbol mappings  as well as profile data (likely generated
+   with --raw).
+jeprof [options] <profile>
+   <profile> is a remote form.  Symbols are obtained from host:port$SYMBOL_PAGE
+
+   Each name can be:
+   /path/to/profile        - a path to a profile file
+   host:port[/<service>]   - a location of a service to get profile from
+
+   The /<service> can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile,
+                         $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall,
+                         $CENSUSPROFILE_PAGE, or /pprof/filteredprofile.
+   For instance:
+     jeprof http://myserver.com:80$HEAP_PAGE
+   If /<service> is omitted, the service defaults to $PROFILE_PAGE (cpu profiling).
+jeprof --symbols <program>
+   Maps addresses to symbol names.  In this mode, stdin should be a
+   list of library mappings, in the same format as is found in the heap-
+   and cpu-profile files (this loosely matches that of /proc/self/maps
+   on linux), followed by a list of hex addresses to map, one per line.
+
+   For more help with querying remote servers, including how to add the
+   necessary server-side support code, see this filename (or one like it):
+
+   /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html
+
+Options:
+   --cum               Sort by cumulative data
+   --base=<base>       Subtract <base> from <profile> before display
+   --interactive       Run in interactive mode (interactive "help" gives help) [default]
+   --seconds=<n>       Length of time for dynamic profiles [default=30 secs]
+   --add_lib=<file>    Read additional symbols and line info from the given library
+   --lib_prefix=<dir>  Comma separated list of library path prefixes
+
+Reporting Granularity:
+   --addresses         Report at address level
+   --lines             Report at source line level
+   --functions         Report at function level [default]
+   --files             Report at source file level
+
+Output type:
+   --text              Generate text report
+   --callgrind         Generate callgrind format to stdout
+   --gv                Generate Postscript and display
+   --evince            Generate PDF and display
+   --web               Generate SVG and display
+   --list=<regexp>     Generate source listing of matching routines
+   --disasm=<regexp>   Generate disassembly of matching routines
+   --symbols           Print demangled symbol names found at given addresses
+   --dot               Generate DOT file to stdout
+   --ps                Generate Postcript to stdout
+   --pdf               Generate PDF to stdout
+   --svg               Generate SVG to stdout
+   --gif               Generate GIF to stdout
+   --raw               Generate symbolized jeprof data (useful with remote fetch)
+
+Heap-Profile Options:
+   --inuse_space       Display in-use (mega)bytes [default]
+   --inuse_objects     Display in-use objects
+   --alloc_space       Display allocated (mega)bytes
+   --alloc_objects     Display allocated objects
+   --show_bytes        Display space in bytes
+   --drop_negative     Ignore negative differences
+
+Contention-profile options:
+   --total_delay       Display total delay at each region [default]
+   --contentions       Display number of delays at each region
+   --mean_delay        Display mean delay at each region
+
+Call-graph Options:
+   --nodecount=<n>     Show at most so many nodes [default=80]
+   --nodefraction=<f>  Hide nodes below <f>*total [default=.005]
+   --edgefraction=<f>  Hide edges below <f>*total [default=.001]
+   --maxdegree=<n>     Max incoming/outgoing edges per node [default=8]
+   --focus=<regexp>    Focus on backtraces with nodes matching <regexp>
+   --thread=<n>        Show profile for thread <n>
+   --ignore=<regexp>   Ignore backtraces with nodes matching <regexp>
+   --scale=<n>         Set GV scaling [default=0]
+   --heapcheck         Make nodes with non-0 object counts
+                       (i.e. direct leak generators) more visible
+   --retain=<regexp>   Retain only nodes that match <regexp>
+   --exclude=<regexp>  Exclude all nodes that match <regexp>
+
+Miscellaneous:
+   --tools=<prefix or binary:fullpath>[,...]   \$PATH for object tool pathnames
+   --test              Run unit tests
+   --help              This message
+   --version           Version information
+
+Environment Variables:
+   JEPROF_TMPDIR        Profiles directory. Defaults to \$HOME/jeprof
+   JEPROF_TOOLS         Prefix for object tools pathnames
+
+Examples:
+
+jeprof /bin/ls ls.prof
+                       Enters "interactive" mode
+jeprof --text /bin/ls ls.prof
+                       Outputs one line per procedure
+jeprof --web /bin/ls ls.prof
+                       Displays annotated call-graph in web browser
+jeprof --gv /bin/ls ls.prof
+                       Displays annotated call-graph via 'gv'
+jeprof --gv --focus=Mutex /bin/ls ls.prof
+                       Restricts to code paths including a .*Mutex.* entry
+jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof
+                       Code paths including Mutex but not string
+jeprof --list=getdir /bin/ls ls.prof
+                       (Per-line) annotated source listing for getdir()
+jeprof --disasm=getdir /bin/ls ls.prof
+                       (Per-PC) annotated disassembly for getdir()
+
+jeprof http://localhost:1234/
+                       Enters "interactive" mode
+jeprof --text localhost:1234
+                       Outputs one line per procedure for localhost:1234
+jeprof --raw localhost:1234 > ./local.raw
+jeprof --text ./local.raw
+                       Fetches a remote profile for later analysis and then
+                       analyzes it in text mode.
+EOF
+}
+
+sub version_string {
+  return <<EOF
+jeprof (part of jemalloc $JEPROF_VERSION)
+based on pprof (part of gperftools $PPROF_VERSION)
+
+Copyright 1998-2007 Google Inc.
+
+This is BSD licensed software; see the source for copying conditions
+and license information.
+There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE.
+EOF
+}
+
+sub usage {
+  my $msg = shift;
+  print STDERR "$msg\n\n";
+  print STDERR usage_string();
+  print STDERR "\nFATAL ERROR: $msg\n";    # just as a reminder
+  exit(1);
+}
+
+sub Init() {
+  # Setup tmp-file name and handler to clean it up.
+  # We do this in the very beginning so that we can use
+  # error() and cleanup() function anytime here after.
+  $main::tmpfile_sym = "/tmp/jeprof$$.sym";
+  $main::tmpfile_ps = "/tmp/jeprof$$";
+  $main::next_tmpfile = 0;
+  $SIG{'INT'} = \&sighandler;
+
+  # Cache from filename/linenumber to source code
+  $main::source_cache = ();
+
+  $main::opt_help = 0;
+  $main::opt_version = 0;
+
+  $main::opt_cum = 0;
+  $main::opt_base = '';
+  $main::opt_addresses = 0;
+  $main::opt_lines = 0;
+  $main::opt_functions = 0;
+  $main::opt_files = 0;
+  $main::opt_lib_prefix = "";
+
+  $main::opt_text = 0;
+  $main::opt_callgrind = 0;
+  $main::opt_list = "";
+  $main::opt_disasm = "";
+  $main::opt_symbols = 0;
+  $main::opt_gv = 0;
+  $main::opt_evince = 0;
+  $main::opt_web = 0;
+  $main::opt_dot = 0;
+  $main::opt_ps = 0;
+  $main::opt_pdf = 0;
+  $main::opt_gif = 0;
+  $main::opt_svg = 0;
+  $main::opt_raw = 0;
+
+  $main::opt_nodecount = 80;
+  $main::opt_nodefraction = 0.005;
+  $main::opt_edgefraction = 0.001;
+  $main::opt_maxdegree = 8;
+  $main::opt_focus = '';
+  $main::opt_thread = undef;
+  $main::opt_ignore = '';
+  $main::opt_scale = 0;
+  $main::opt_heapcheck = 0;
+  $main::opt_retain = '';
+  $main::opt_exclude = '';
+  $main::opt_seconds = 30;
+  $main::opt_lib = "";
+
+  $main::opt_inuse_space   = 0;
+  $main::opt_inuse_objects = 0;
+  $main::opt_alloc_space   = 0;
+  $main::opt_alloc_objects = 0;
+  $main::opt_show_bytes    = 0;
+  $main::opt_drop_negative = 0;
+  $main::opt_interactive   = 0;
+
+  $main::opt_total_delay = 0;
+  $main::opt_contentions = 0;
+  $main::opt_mean_delay = 0;
+
+  $main::opt_tools   = "";
+  $main::opt_debug   = 0;
+  $main::opt_test    = 0;
+
+  # These are undocumented flags used only by unittests.
+  $main::opt_test_stride = 0;
+
+  # Are we using $SYMBOL_PAGE?
+  $main::use_symbol_page = 0;
+
+  # Files returned by TempName.
+  %main::tempnames = ();
+
+  # Type of profile we are dealing with
+  # Supported types:
+  #     cpu
+  #     heap
+  #     growth
+  #     contention
+  $main::profile_type = '';     # Empty type means "unknown"
+
+  GetOptions("help!"          => \$main::opt_help,
+             "version!"       => \$main::opt_version,
+             "cum!"           => \$main::opt_cum,
+             "base=s"         => \$main::opt_base,
+             "seconds=i"      => \$main::opt_seconds,
+             "add_lib=s"      => \$main::opt_lib,
+             "lib_prefix=s"   => \$main::opt_lib_prefix,
+             "functions!"     => \$main::opt_functions,
+             "lines!"         => \$main::opt_lines,
+             "addresses!"     => \$main::opt_addresses,
+             "files!"         => \$main::opt_files,
+             "text!"          => \$main::opt_text,
+             "callgrind!"     => \$main::opt_callgrind,
+             "list=s"         => \$main::opt_list,
+             "disasm=s"       => \$main::opt_disasm,
+             "symbols!"       => \$main::opt_symbols,
+             "gv!"            => \$main::opt_gv,
+             "evince!"        => \$main::opt_evince,
+             "web!"           => \$main::opt_web,
+             "dot!"           => \$main::opt_dot,
+             "ps!"            => \$main::opt_ps,
+             "pdf!"           => \$main::opt_pdf,
+             "svg!"           => \$main::opt_svg,
+             "gif!"           => \$main::opt_gif,
+             "raw!"           => \$main::opt_raw,
+             "interactive!"   => \$main::opt_interactive,
+             "nodecount=i"    => \$main::opt_nodecount,
+             "nodefraction=f" => \$main::opt_nodefraction,
+             "edgefraction=f" => \$main::opt_edgefraction,
+             "maxdegree=i"    => \$main::opt_maxdegree,
+             "focus=s"        => \$main::opt_focus,
+             "thread=s"       => \$main::opt_thread,
+             "ignore=s"       => \$main::opt_ignore,
+             "scale=i"        => \$main::opt_scale,
+             "heapcheck"      => \$main::opt_heapcheck,
+             "retain=s"       => \$main::opt_retain,
+             "exclude=s"      => \$main::opt_exclude,
+             "inuse_space!"   => \$main::opt_inuse_space,
+             "inuse_objects!" => \$main::opt_inuse_objects,
+             "alloc_space!"   => \$main::opt_alloc_space,
+             "alloc_objects!" => \$main::opt_alloc_objects,
+             "show_bytes!"    => \$main::opt_show_bytes,
+             "drop_negative!" => \$main::opt_drop_negative,
+             "total_delay!"   => \$main::opt_total_delay,
+             "contentions!"   => \$main::opt_contentions,
+             "mean_delay!"    => \$main::opt_mean_delay,
+             "tools=s"        => \$main::opt_tools,
+             "test!"          => \$main::opt_test,
+             "debug!"         => \$main::opt_debug,
+             # Undocumented flags used only by unittests:
+             "test_stride=i"  => \$main::opt_test_stride,
+      ) || usage("Invalid option(s)");
+
+  # Deal with the standard --help and --version
+  if ($main::opt_help) {
+    print usage_string();
+    exit(0);
+  }
+
+  if ($main::opt_version) {
+    print version_string();
+    exit(0);
+  }
+
+  # Disassembly/listing/symbols mode requires address-level info
+  if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) {
+    $main::opt_functions = 0;
+    $main::opt_lines = 0;
+    $main::opt_addresses = 1;
+    $main::opt_files = 0;
+  }
+
+  # Check heap-profiling flags
+  if ($main::opt_inuse_space +
+      $main::opt_inuse_objects +
+      $main::opt_alloc_space +
+      $main::opt_alloc_objects > 1) {
+    usage("Specify at most on of --inuse/--alloc options");
+  }
+
+  # Check output granularities
+  my $grains =
+      $main::opt_functions +
+      $main::opt_lines +
+      $main::opt_addresses +
+      $main::opt_files +
+      0;
+  if ($grains > 1) {
+    usage("Only specify one output granularity option");
+  }
+  if ($grains == 0) {
+    $main::opt_functions = 1;
+  }
+
+  # Check output modes
+  my $modes =
+      $main::opt_text +
+      $main::opt_callgrind +
+      ($main::opt_list eq '' ? 0 : 1) +
+      ($main::opt_disasm eq '' ? 0 : 1) +
+      ($main::opt_symbols == 0 ? 0 : 1) +
+      $main::opt_gv +
+      $main::opt_evince +
+      $main::opt_web +
+      $main::opt_dot +
+      $main::opt_ps +
+      $main::opt_pdf +
+      $main::opt_svg +
+      $main::opt_gif +
+      $main::opt_raw +
+      $main::opt_interactive +
+      0;
+  if ($modes > 1) {
+    usage("Only specify one output mode");
+  }
+  if ($modes == 0) {
+    if (-t STDOUT) {  # If STDOUT is a tty, activate interactive mode
+      $main::opt_interactive = 1;
+    } else {
+      $main::opt_text = 1;
+    }
+  }
+
+  if ($main::opt_test) {
+    RunUnitTests();
+    # Should not return
+    exit(1);
+  }
+
+  # Binary name and profile arguments list
+  $main::prog = "";
+  @main::pfile_args = ();
+
+  # Remote profiling without a binary (using $SYMBOL_PAGE instead)
+  if (@ARGV > 0) {
+    if (IsProfileURL($ARGV[0])) {
+      $main::use_symbol_page = 1;
+    } elsif (IsSymbolizedProfileFile($ARGV[0])) {
+      $main::use_symbolized_profile = 1;
+      $main::prog = $UNKNOWN_BINARY;  # will be set later from the profile file
+    }
+  }
+
+  if ($main::use_symbol_page || $main::use_symbolized_profile) {
+    # We don't need a binary!
+    my %disabled = ('--lines' => $main::opt_lines,
+                    '--disasm' => $main::opt_disasm);
+    for my $option (keys %disabled) {
+      usage("$option cannot be used without a binary") if $disabled{$option};
+    }
+    # Set $main::prog later...
+    scalar(@ARGV) || usage("Did not specify profile file");
+  } elsif ($main::opt_symbols) {
+    # --symbols needs a binary-name (to run nm on, etc) but not profiles
+    $main::prog = shift(@ARGV) || usage("Did not specify program");
+  } else {
+    $main::prog = shift(@ARGV) || usage("Did not specify program");
+    scalar(@ARGV) || usage("Did not specify profile file");
+  }
+
+  # Parse profile file/location arguments
+  foreach my $farg (@ARGV) {
+    if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) {
+      my $machine = $1;
+      my $num_machines = $2;
+      my $path = $3;
+      for (my $i = 0; $i < $num_machines; $i++) {
+        unshift(@main::pfile_args, "$i.$machine$path");
+      }
+    } else {
+      unshift(@main::pfile_args, $farg);
+    }
+  }
+
+  if ($main::use_symbol_page) {
+    unless (IsProfileURL($main::pfile_args[0])) {
+      error("The first profile should be a remote form to use $SYMBOL_PAGE\n");
+    }
+    CheckSymbolPage();
+    $main::prog = FetchProgramName();
+  } elsif (!$main::use_symbolized_profile) {  # may not need objtools!
+    ConfigureObjTools($main::prog)
+  }
+
+  # Break the opt_lib_prefix into the prefix_list array
+  @prefix_list = split (',', $main::opt_lib_prefix);
+
+  # Remove trailing / from the prefixes, in the list to prevent
+  # searching things like /my/path//lib/mylib.so
+  foreach (@prefix_list) {
+    s|/+$||;
+  }
+}
+
+sub FilterAndPrint {
+  my ($profile, $symbols, $libs, $thread) = @_;
+
+  # Get total data in profile
+  my $total = TotalProfile($profile);
+
+  # Remove uniniteresting stack items
+  $profile = RemoveUninterestingFrames($symbols, $profile);
+
+  # Focus?
+  if ($main::opt_focus ne '') {
+    $profile = FocusProfile($symbols, $profile, $main::opt_focus);
+  }
+
+  # Ignore?
+  if ($main::opt_ignore ne '') {
+    $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore);
+  }
+
+  my $calls = ExtractCalls($symbols, $profile);
+
+  # Reduce profiles to required output granularity, and also clean
+  # each stack trace so a given entry exists at most once.
+  my $reduced = ReduceProfile($symbols, $profile);
+
+  # Get derived profiles
+  my $flat = FlatProfile($reduced);
+  my $cumulative = CumulativeProfile($reduced);
+
+  # Print
+  if (!$main::opt_interactive) {
+    if ($main::opt_disasm) {
+      PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm);
+    } elsif ($main::opt_list) {
+      PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0);
+    } elsif ($main::opt_text) {
+      # Make sure the output is empty when have nothing to report
+      # (only matters when --heapcheck is given but we must be
+      # compatible with old branches that did not pass --heapcheck always):
+      if ($total != 0) {
+        printf("Total%s: %s %s\n",
+               (defined($thread) ? " (t$thread)" : ""),
+               Unparse($total), Units());
+      }
+      PrintText($symbols, $flat, $cumulative, -1);
+    } elsif ($main::opt_raw) {
+      PrintSymbolizedProfile($symbols, $profile, $main::prog);
+    } elsif ($main::opt_callgrind) {
+      PrintCallgrind($calls);
+    } else {
+      if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
+        if ($main::opt_gv) {
+          RunGV(TempName($main::next_tmpfile, "ps"), "");
+        } elsif ($main::opt_evince) {
+          RunEvince(TempName($main::next_tmpfile, "pdf"), "");
+        } elsif ($main::opt_web) {
+          my $tmp = TempName($main::next_tmpfile, "svg");
+          RunWeb($tmp);
+          # The command we run might hand the file name off
+          # to an already running browser instance and then exit.
+          # Normally, we'd remove $tmp on exit (right now),
+          # but fork a child to remove $tmp a little later, so that the
+          # browser has time to load it first.
+          delete $main::tempnames{$tmp};
+          if (fork() == 0) {
+            sleep 5;
+            unlink($tmp);
+            exit(0);
+          }
+        }
+      } else {
+        cleanup();
+        exit(1);
+      }
+    }
+  } else {
+    InteractiveMode($profile, $symbols, $libs, $total);
+  }
+}
+
+sub Main() {
+  Init();
+  $main::collected_profile = undef;
+  @main::profile_files = ();
+  $main::op_time = time();
+
+  # Printing symbols is special and requires a lot less info that most.
+  if ($main::opt_symbols) {
+    PrintSymbols(*STDIN);   # Get /proc/maps and symbols output from stdin
+    return;
+  }
+
+  # Fetch all profile data
+  FetchDynamicProfiles();
+
+  # this will hold symbols that we read from the profile files
+  my $symbol_map = {};
+
+  # Read one profile, pick the last item on the list
+  my $data = ReadProfile($main::prog, pop(@main::profile_files));
+  my $profile = $data->{profile};
+  my $pcs = $data->{pcs};
+  my $libs = $data->{libs};   # Info about main program and shared libraries
+  $symbol_map = MergeSymbols($symbol_map, $data->{symbols});
+
+  # Add additional profiles, if available.
+  if (scalar(@main::profile_files) > 0) {
+    foreach my $pname (@main::profile_files) {
+      my $data2 = ReadProfile($main::prog, $pname);
+      $profile = AddProfile($profile, $data2->{profile});
+      $pcs = AddPcs($pcs, $data2->{pcs});
+      $symbol_map = MergeSymbols($symbol_map, $data2->{symbols});
+    }
+  }
+
+  # Subtract base from profile, if specified
+  if ($main::opt_base ne '') {
+    my $base = ReadProfile($main::prog, $main::opt_base);
+    $profile = SubtractProfile($profile, $base->{profile});
+    $pcs = AddPcs($pcs, $base->{pcs});
+    $symbol_map = MergeSymbols($symbol_map, $base->{symbols});
+  }
+
+  # Collect symbols
+  my $symbols;
+  if ($main::use_symbolized_profile) {
+    $symbols = FetchSymbols($pcs, $symbol_map);
+  } elsif ($main::use_symbol_page) {
+    $symbols = FetchSymbols($pcs);
+  } else {
+    # TODO(csilvers): $libs uses the /proc/self/maps data from profile1,
+    # which may differ from the data from subsequent profiles, especially
+    # if they were run on different machines.  Use appropriate libs for
+    # each pc somehow.
+    $symbols = ExtractSymbols($libs, $pcs);
+  }
+
+  if (!defined($main::opt_thread)) {
+    FilterAndPrint($profile, $symbols, $libs);
+  }
+  if (defined($data->{threads})) {
+    foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) {
+      if (defined($main::opt_thread) &&
+          ($main::opt_thread eq '*' || $main::opt_thread == $thread)) {
+        my $thread_profile = $data->{threads}{$thread};
+        FilterAndPrint($thread_profile, $symbols, $libs, $thread);
+      }
+    }
+  }
+
+  cleanup();
+  exit(0);
+}
+
+##### Entry Point #####
+
+Main();
+
+# Temporary code to detect if we're running on a Goobuntu system.
+# These systems don't have the right stuff installed for the special
+# Readline libraries to work, so as a temporary workaround, we default
+# to using the normal stdio code, rather than the fancier readline-based
+# code
+sub ReadlineMightFail {
+  if (-e '/lib/libtermcap.so.2') {
+    return 0;  # libtermcap exists, so readline should be okay
+  } else {
+    return 1;
+  }
+}
+
+sub RunGV {
+  my $fname = shift;
+  my $bg = shift;       # "" or " &" if we should run in background
+  if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) {
+    # Options using double dash are supported by this gv version.
+    # Also, turn on noantialias to better handle bug in gv for
+    # postscript files with large dimensions.
+    # TODO: Maybe we should not pass the --noantialias flag
+    # if the gv version is known to work properly without the flag.
+    system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname)
+           . $bg);
+  } else {
+    # Old gv version - only supports options that use single dash.
+    print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n";
+    system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg);
+  }
+}
+
+sub RunEvince {
+  my $fname = shift;
+  my $bg = shift;       # "" or " &" if we should run in background
+  system(ShellEscape(@EVINCE, $fname) . $bg);
+}
+
+sub RunWeb {
+  my $fname = shift;
+  print STDERR "Loading web page file:///$fname\n";
+
+  if (`uname` =~ /Darwin/) {
+    # OS X: open will use standard preference for SVG files.
+    system("/usr/bin/open", $fname);
+    return;
+  }
+
+  # Some kind of Unix; try generic symlinks, then specific browsers.
+  # (Stop once we find one.)
+  # Works best if the browser is already running.
+  my @alt = (
+    "/etc/alternatives/gnome-www-browser",
+    "/etc/alternatives/x-www-browser",
+    "google-chrome",
+    "firefox",
+  );
+  foreach my $b (@alt) {
+    if (system($b, $fname) == 0) {
+      return;
+    }
+  }
+
+  print STDERR "Could not load web browser.\n";
+}
+
+sub RunKcachegrind {
+  my $fname = shift;
+  my $bg = shift;       # "" or " &" if we should run in background
+  print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n";
+  system(ShellEscape(@KCACHEGRIND, $fname) . $bg);
+}
+
+
+##### Interactive helper routines #####
+
+sub InteractiveMode {
+  $| = 1;  # Make output unbuffered for interactive mode
+  my ($orig_profile, $symbols, $libs, $total) = @_;
+
+  print STDERR "Welcome to jeprof!  For help, type 'help'.\n";
+
+  # Use ReadLine if it's installed and input comes from a console.
+  if ( -t STDIN &&
+       !ReadlineMightFail() &&
+       defined(eval {require Term::ReadLine}) ) {
+    my $term = new Term::ReadLine 'jeprof';
+    while ( defined ($_ = $term->readline('(jeprof) '))) {
+      $term->addhistory($_) if /\S/;
+      if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
+        last;    # exit when we get an interactive command to quit
+      }
+    }
+  } else {       # don't have readline
+    while (1) {
+      print STDERR "(jeprof) ";
+      $_ = <STDIN>;
+      last if ! defined $_ ;
+      s/\r//g;         # turn windows-looking lines into unix-looking lines
+
+      # Save some flags that might be reset by InteractiveCommand()
+      my $save_opt_lines = $main::opt_lines;
+
+      if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
+        last;    # exit when we get an interactive command to quit
+      }
+
+      # Restore flags
+      $main::opt_lines = $save_opt_lines;
+    }
+  }
+}
+
+# Takes two args: orig profile, and command to run.
+# Returns 1 if we should keep going, or 0 if we were asked to quit
+sub InteractiveCommand {
+  my($orig_profile, $symbols, $libs, $total, $command) = @_;
+  $_ = $command;                # just to make future m//'s easier
+  if (!defined($_)) {
+    print STDERR "\n";
+    return 0;
+  }
+  if (m/^\s*quit/) {
+    return 0;
+  }
+  if (m/^\s*help/) {
+    InteractiveHelpMessage();
+    return 1;
+  }
+  # Clear all the mode options -- mode is controlled by "$command"
+  $main::opt_text = 0;
+  $main::opt_callgrind = 0;
+  $main::opt_disasm = 0;
+  $main::opt_list = 0;
+  $main::opt_gv = 0;
+  $main::opt_evince = 0;
+  $main::opt_cum = 0;
+
+  if (m/^\s*(text|top)(\d*)\s*(.*)/) {
+    $main::opt_text = 1;
+
+    my $line_limit = ($2 ne "") ? int($2) : 10;
+
+    my $routine;
+    my $ignore;
+    ($routine, $ignore) = ParseInteractiveArgs($3);
+
+    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+    my $reduced = ReduceProfile($symbols, $profile);
+
+    # Get derived profiles
+    my $flat = FlatProfile($reduced);
+    my $cumulative = CumulativeProfile($reduced);
+
+    PrintText($symbols, $flat, $cumulative, $line_limit);
+    return 1;
+  }
+  if (m/^\s*callgrind\s*([^ \n]*)/) {
+    $main::opt_callgrind = 1;
+
+    # Get derived profiles
+    my $calls = ExtractCalls($symbols, $orig_profile);
+    my $filename = $1;
+    if ( $1 eq '' ) {
+      $filename = TempName($main::next_tmpfile, "callgrind");
+    }
+    PrintCallgrind($calls, $filename);
+    if ( $1 eq '' ) {
+      RunKcachegrind($filename, " & ");
+      $main::next_tmpfile++;
+    }
+
+    return 1;
+  }
+  if (m/^\s*(web)?list\s*(.+)/) {
+    my $html = (defined($1) && ($1 eq "web"));
+    $main::opt_list = 1;
+
+    my $routine;
+    my $ignore;
+    ($routine, $ignore) = ParseInteractiveArgs($2);
+
+    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+    my $reduced = ReduceProfile($symbols, $profile);
+
+    # Get derived profiles
+    my $flat = FlatProfile($reduced);
+    my $cumulative = CumulativeProfile($reduced);
+
+    PrintListing($total, $libs, $flat, $cumulative, $routine, $html);
+    return 1;
+  }
+  if (m/^\s*disasm\s*(.+)/) {
+    $main::opt_disasm = 1;
+
+    my $routine;
+    my $ignore;
+    ($routine, $ignore) = ParseInteractiveArgs($1);
+
+    # Process current profile to account for various settings
+    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+    my $reduced = ReduceProfile($symbols, $profile);
+
+    # Get derived profiles
+    my $flat = FlatProfile($reduced);
+    my $cumulative = CumulativeProfile($reduced);
+
+    PrintDisassembly($libs, $flat, $cumulative, $routine);
+    return 1;
+  }
+  if (m/^\s*(gv|web|evince)\s*(.*)/) {
+    $main::opt_gv = 0;
+    $main::opt_evince = 0;
+    $main::opt_web = 0;
+    if ($1 eq "gv") {
+      $main::opt_gv = 1;
+    } elsif ($1 eq "evince") {
+      $main::opt_evince = 1;
+    } elsif ($1 eq "web") {
+      $main::opt_web = 1;
+    }
+
+    my $focus;
+    my $ignore;
+    ($focus, $ignore) = ParseInteractiveArgs($2);
+
+    # Process current profile to account for various settings
+    my $profile = ProcessProfile($total, $orig_profile, $symbols,
+                                 $focus, $ignore);
+    my $reduced = ReduceProfile($symbols, $profile);
+
+    # Get derived profiles
+    my $flat = FlatProfile($reduced);
+    my $cumulative = CumulativeProfile($reduced);
+
+    if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
+      if ($main::opt_gv) {
+        RunGV(TempName($main::next_tmpfile, "ps"), " &");
+      } elsif ($main::opt_evince) {
+        RunEvince(TempName($main::next_tmpfile, "pdf"), " &");
+      } elsif ($main::opt_web) {
+        RunWeb(TempName($main::next_tmpfile, "svg"));
+      }
+      $main::next_tmpfile++;
+    }
+    return 1;
+  }
+  if (m/^\s*$/) {
+    return 1;
+  }
+  print STDERR "Unknown command: try 'help'.\n";
+  return 1;
+}
+
+
+sub ProcessProfile {
+  my $total_count = shift;
+  my $orig_profile = shift;
+  my $symbols = shift;
+  my $focus = shift;
+  my $ignore = shift;
+
+  # Process current profile to account for various settings
+  my $profile = $orig_profile;
+  printf("Total: %s %s\n", Unparse($total_count), Units());
+  if ($focus ne '') {
+    $profile = FocusProfile($symbols, $profile, $focus);
+    my $focus_count = TotalProfile($profile);
+    printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n",
+           $focus,
+           Unparse($focus_count), Units(),
+           Unparse($total_count), ($focus_count*100.0) / $total_count);
+  }
+  if ($ignore ne '') {
+    $profile = IgnoreProfile($symbols, $profile, $ignore);
+    my $ignore_count = TotalProfile($profile);
+    printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n",
+           $ignore,
+           Unparse($ignore_count), Units(),
+           Unparse($total_count),
+           ($ignore_count*100.0) / $total_count);
+  }
+
+  return $profile;
+}
+
+sub InteractiveHelpMessage {
+  print STDERR <<ENDOFHELP;
+Interactive jeprof mode
+
+Commands:
+  gv
+  gv [focus] [-ignore1] [-ignore2]
+      Show graphical hierarchical display of current profile.  Without
+      any arguments, shows all samples in the profile.  With the optional
+      "focus" argument, restricts the samples shown to just those where
+      the "focus" regular expression matches a routine name on the stack
+      trace.
+
+  web
+  web [focus] [-ignore1] [-ignore2]
+      Like GV, but displays profile in your web browser instead of using
+      Ghostview. Works best if your web browser is already running.
+      To change the browser that gets used:
+      On Linux, set the /etc/alternatives/gnome-www-browser symlink.
+      On OS X, change the Finder association for SVG files.
+
+  list [routine_regexp] [-ignore1] [-ignore2]
+      Show source listing of routines whose names match "routine_regexp"
+
+  weblist [routine_regexp] [-ignore1] [-ignore2]
+     Displays a source listing of routines whose names match "routine_regexp"
+     in a web browser.  You can click on source lines to view the
+     corresponding disassembly.
+
+  top [--cum] [-ignore1] [-ignore2]
+  top20 [--cum] [-ignore1] [-ignore2]
+  top37 [--cum] [-ignore1] [-ignore2]
+      Show top lines ordered by flat profile count, or cumulative count
+      if --cum is specified.  If a number is present after 'top', the
+      top K routines will be shown (defaults to showing the top 10)
+
+  disasm [routine_regexp] [-ignore1] [-ignore2]
+      Show disassembly of routines whose names match "routine_regexp",
+      annotated with sample counts.
+
+  callgrind
+  callgrind [filename]
+      Generates callgrind file. If no filename is given, kcachegrind is called.
+
+  help - This listing
+  quit or ^D - End jeprof
+
+For commands that accept optional -ignore tags, samples where any routine in
+the stack trace matches the regular expression in any of the -ignore
+parameters will be ignored.
+
+Further pprof details are available at this location (or one similar):
+
+ /usr/doc/gperftools-$PPROF_VERSION/cpu_profiler.html
+ /usr/doc/gperftools-$PPROF_VERSION/heap_profiler.html
+
+ENDOFHELP
+}
+sub ParseInteractiveArgs {
+  my $args = shift;
+  my $focus = "";
+  my $ignore = "";
+  my @x = split(/ +/, $args);
+  foreach $a (@x) {
+    if ($a =~ m/^(--|-)lines$/) {
+      $main::opt_lines = 1;
+    } elsif ($a =~ m/^(--|-)cum$/) {
+      $main::opt_cum = 1;
+    } elsif ($a =~ m/^-(.*)/) {
+      $ignore .= (($ignore ne "") ? "|" : "" ) . $1;
+    } else {
+      $focus .= (($focus ne "") ? "|" : "" ) . $a;
+    }
+  }
+  if ($ignore ne "") {
+    print STDERR "Ignoring samples in call stacks that match '$ignore'\n";
+  }
+  return ($focus, $ignore);
+}
+
+##### Output code #####
+
+sub TempName {
+  my $fnum = shift;
+  my $ext = shift;
+  my $file = "$main::tmpfile_ps.$fnum.$ext";
+  $main::tempnames{$file} = 1;
+  return $file;
+}
+
+# Print profile data in packed binary format (64-bit) to standard out
+sub PrintProfileData {
+  my $profile = shift;
+
+  # print header (64-bit style)
+  # (zero) (header-size) (version) (sample-period) (zero)
+  print pack('L*', 0, 0, 3, 0, 0, 0, 1, 0, 0, 0);
+
+  foreach my $k (keys(%{$profile})) {
+    my $count = $profile->{$k};
+    my @addrs = split(/\n/, $k);
+    if ($#addrs >= 0) {
+      my $depth = $#addrs + 1;
+      # int(foo / 2**32) is the only reliable way to get rid of bottom
+      # 32 bits on both 32- and 64-bit systems.
+      print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32));
+      print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32));
+
+      foreach my $full_addr (@addrs) {
+        my $addr = $full_addr;
+        $addr =~ s/0x0*//;  # strip off leading 0x, zeroes
+        if (length($addr) > 16) {
+          print STDERR "Invalid address in profile: $full_addr\n";
+          next;
+        }
+        my $low_addr = substr($addr, -8);       # get last 8 hex chars
+        my $high_addr = substr($addr, -16, 8);  # get up to 8 more hex chars
+        print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr));
+      }
+    }
+  }
+}
+
+# Print symbols and profile data
+sub PrintSymbolizedProfile {
+  my $symbols = shift;
+  my $profile = shift;
+  my $prog = shift;
+
+  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+  my $symbol_marker = $&;
+
+  print '--- ', $symbol_marker, "\n";
+  if (defined($prog)) {
+    print 'binary=', $prog, "\n";
+  }
+  while (my ($pc, $name) = each(%{$symbols})) {
+    my $sep = ' ';
+    print '0x', $pc;
+    # We have a list of function names, which include the inlined
+    # calls.  They are separated (and terminated) by --, which is
+    # illegal in function names.
+    for (my $j = 2; $j <= $#{$name}; $j += 3) {
+      print $sep, $name->[$j];
+      $sep = '--';
+    }
+    print "\n";
+  }
+  print '---', "\n";
+
+  my $profile_marker;
+  if ($main::profile_type eq 'heap') {
+    $HEAP_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+    $profile_marker = $&;
+  } elsif ($main::profile_type eq 'growth') {
+    $GROWTH_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+    $profile_marker = $&;
+  } elsif ($main::profile_type eq 'contention') {
+    $CONTENTION_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+    $profile_marker = $&;
+  } else { # elsif ($main::profile_type eq 'cpu')
+    $PROFILE_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+    $profile_marker = $&;
+  }
+
+  print '--- ', $profile_marker, "\n";
+  if (defined($main::collected_profile)) {
+    # if used with remote fetch, simply dump the collected profile to output.
+    open(SRC, "<$main::collected_profile");
+    while (<SRC>) {
+      print $_;
+    }
+    close(SRC);
+  } else {
+    # --raw/http: For everything to work correctly for non-remote profiles, we
+    # would need to extend PrintProfileData() to handle all possible profile
+    # types, re-enable the code that is currently disabled in ReadCPUProfile()
+    # and FixCallerAddresses(), and remove the remote profile dumping code in
+    # the block above.
+    die "--raw/http: jeprof can only dump remote profiles for --raw\n";
+    # dump a cpu-format profile to standard out
+    PrintProfileData($profile);
+  }
+}
+
+# Print text output
+sub PrintText {
+  my $symbols = shift;
+  my $flat = shift;
+  my $cumulative = shift;
+  my $line_limit = shift;
+
+  my $total = TotalProfile($flat);
+
+  # Which profile to sort by?
+  my $s = $main::opt_cum ? $cumulative : $flat;
+
+  my $running_sum = 0;
+  my $lines = 0;
+  foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b }
+                 keys(%{$cumulative})) {
+    my $f = GetEntry($flat, $k);
+    my $c = GetEntry($cumulative, $k);
+    $running_sum += $f;
+
+    my $sym = $k;
+    if (exists($symbols->{$k})) {
+      $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1];
+      if ($main::opt_addresses) {
+        $sym = $k . " " . $sym;
+      }
+    }
+
+    if ($f != 0 || $c != 0) {
+      printf("%8s %6s %6s %8s %6s %s\n",
+             Unparse($f),
+             Percent($f, $total),
+             Percent($running_sum, $total),
+             Unparse($c),
+             Percent($c, $total),
+             $sym);
+    }
+    $lines++;
+    last if ($line_limit >= 0 && $lines >= $line_limit);
+  }
+}
+
+# Callgrind format has a compression for repeated function and file
+# names.  You show the name the first time, and just use its number
+# subsequently.  This can cut down the file to about a third or a
+# quarter of its uncompressed size.  $key and $val are the key/value
+# pair that would normally be printed by callgrind; $map is a map from
+# value to number.
+sub CompressedCGName {
+  my($key, $val, $map) = @_;
+  my $idx = $map->{$val};
+  # For very short keys, providing an index hurts rather than helps.
+  if (length($val) <= 3) {
+    return "$key=$val\n";
+  } elsif (defined($idx)) {
+    return "$key=($idx)\n";
+  } else {
+    # scalar(keys $map) gives the number of items in the map.
+    $idx = scalar(keys(%{$map})) + 1;
+    $map->{$val} = $idx;
+    return "$key=($idx) $val\n";
+  }
+}
+
+# Print the call graph in a way that's suiteable for callgrind.
+sub PrintCallgrind {
+  my $calls = shift;
+  my $filename;
+  my %filename_to_index_map;
+  my %fnname_to_index_map;
+
+  if ($main::opt_interactive) {
+    $filename = shift;
+    print STDERR "Writing callgrind file to '$filename'.\n"
+  } else {
+    $filename = "&STDOUT";
+  }
+  open(CG, ">$filename");
+  printf CG ("events: Hits\n\n");
+  foreach my $call ( map { $_->[0] }
+                     sort { $a->[1] cmp $b ->[1] ||
+                            $a->[2] <=> $b->[2] }
+                     map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
+                           [$_, $1, $2] }
+                     keys %$calls ) {
+    my $count = int($calls->{$call});
+    $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
+    my ( $caller_file, $caller_line, $caller_function,
+         $callee_file, $callee_line, $callee_function ) =
+       ( $1, $2, $3, $5, $6, $7 );
+
+    # TODO(csilvers): for better compression, collect all the
+    # caller/callee_files and functions first, before printing
+    # anything, and only compress those referenced more than once.
+    printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map);
+    printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map);
+    if (defined $6) {
+      printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map);
+      printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map);
+      printf CG ("calls=$count $callee_line\n");
+    }
+    printf CG ("$caller_line $count\n\n");
+  }
+}
+
+# Print disassembly for all all routines that match $main::opt_disasm
+sub PrintDisassembly {
+  my $libs = shift;
+  my $flat = shift;
+  my $cumulative = shift;
+  my $disasm_opts = shift;
+
+  my $total = TotalProfile($flat);
+
+  foreach my $lib (@{$libs}) {
+    my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts);
+    my $offset = AddressSub($lib->[1], $lib->[3]);
+    foreach my $routine (sort ByName keys(%{$symbol_table})) {
+      my $start_addr = $symbol_table->{$routine}->[0];
+      my $end_addr = $symbol_table->{$routine}->[1];
+      # See if there are any samples in this routine
+      my $length = hex(AddressSub($end_addr, $start_addr));
+      my $addr = AddressAdd($start_addr, $offset);
+      for (my $i = 0; $i < $length; $i++) {
+        if (defined($cumulative->{$addr})) {
+          PrintDisassembledFunction($lib->[0], $offset,
+                                    $routine, $flat, $cumulative,
+                                    $start_addr, $end_addr, $total);
+          last;
+        }
+        $addr = AddressInc($addr);
+      }
+    }
+  }
+}
+
+# Return reference to array of tuples of the form:
+#       [start_address, filename, linenumber, instruction, limit_address]
+# E.g.,
+#       ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"]
+sub Disassemble {
+  my $prog = shift;
+  my $offset = shift;
+  my $start_addr = shift;
+  my $end_addr = shift;
+
+  my $objdump = $obj_tool_map{"objdump"};
+  my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn",
+                        "--start-address=0x$start_addr",
+                        "--stop-address=0x$end_addr", $prog);
+  open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
+  my @result = ();
+  my $filename = "";
+  my $linenumber = -1;
+  my $last = ["", "", "", ""];
+  while (<OBJDUMP>) {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    chop;
+    if (m|\s*([^:\s]+):(\d+)\s*$|) {
+      # Location line of the form:
+      #   <filename>:<linenumber>
+      $filename = $1;
+      $linenumber = $2;
+    } elsif (m/^ +([0-9a-f]+):\s*(.*)/) {
+      # Disassembly line -- zero-extend address to full length
+      my $addr = HexExtend($1);
+      my $k = AddressAdd($addr, $offset);
+      $last->[4] = $k;   # Store ending address for previous instruction
+      $last = [$k, $filename, $linenumber, $2, $end_addr];
+      push(@result, $last);
+    }
+  }
+  close(OBJDUMP);
+  return @result;
+}
+
+# The input file should contain lines of the form /proc/maps-like
+# output (same format as expected from the profiles) or that looks
+# like hex addresses (like "0xDEADBEEF").  We will parse all
+# /proc/maps output, and for all the hex addresses, we will output
+# "short" symbol names, one per line, in the same order as the input.
+sub PrintSymbols {
+  my $maps_and_symbols_file = shift;
+
+  # ParseLibraries expects pcs to be in a set.  Fine by us...
+  my @pclist = ();   # pcs in sorted order
+  my $pcs = {};
+  my $map = "";
+  foreach my $line (<$maps_and_symbols_file>) {
+    $line =~ s/\r//g;    # turn windows-looking lines into unix-looking lines
+    if ($line =~ /\b(0x[0-9a-f]+)\b/i) {
+      push(@pclist, HexExtend($1));
+      $pcs->{$pclist[-1]} = 1;
+    } else {
+      $map .= $line;
+    }
+  }
+
+  my $libs = ParseLibraries($main::prog, $map, $pcs);
+  my $symbols = ExtractSymbols($libs, $pcs);
+
+  foreach my $pc (@pclist) {
+    # ->[0] is the shortname, ->[2] is the full name
+    print(($symbols->{$pc}->[0] || "??") . "\n");
+  }
+}
+
+
+# For sorting functions by name
+sub ByName {
+  return ShortFunctionName($a) cmp ShortFunctionName($b);
+}
+
+# Print source-listing for all all routines that match $list_opts
+sub PrintListing {
+  my $total = shift;
+  my $libs = shift;
+  my $flat = shift;
+  my $cumulative = shift;
+  my $list_opts = shift;
+  my $html = shift;
+
+  my $output = \*STDOUT;
+  my $fname = "";
+
+  if ($html) {
+    # Arrange to write the output to a temporary file
+    $fname = TempName($main::next_tmpfile, "html");
+    $main::next_tmpfile++;
+    if (!open(TEMP, ">$fname")) {
+      print STDERR "$fname: $!\n";
+      return;
+    }
+    $output = \*TEMP;
+    print $output HtmlListingHeader();
+    printf $output ("<div class=\"legend\">%s<br>Total: %s %s</div>\n",
+                    $main::prog, Unparse($total), Units());
+  }
+
+  my $listed = 0;
+  foreach my $lib (@{$libs}) {
+    my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts);
+    my $offset = AddressSub($lib->[1], $lib->[3]);
+    foreach my $routine (sort ByName keys(%{$symbol_table})) {
+      # Print if there are any samples in this routine
+      my $start_addr = $symbol_table->{$routine}->[0];
+      my $end_addr = $symbol_table->{$routine}->[1];
+      my $length = hex(AddressSub($end_addr, $start_addr));
+      my $addr = AddressAdd($start_addr, $offset);
+      for (my $i = 0; $i < $length; $i++) {
+        if (defined($cumulative->{$addr})) {
+          $listed += PrintSource(
+            $lib->[0], $offset,
+            $routine, $flat, $cumulative,
+            $start_addr, $end_addr,
+            $html,
+            $output);
+          last;
+        }
+        $addr = AddressInc($addr);
+      }
+    }
+  }
+
+  if ($html) {
+    if ($listed > 0) {
+      print $output HtmlListingFooter();
+      close($output);
+      RunWeb($fname);
+    } else {
+      close($output);
+      unlink($fname);
+    }
+  }
+}
+
+sub HtmlListingHeader {
+  return <<'EOF';
+<DOCTYPE html>
+<html>
+<head>
+<title>Pprof listing</title>
+<style type="text/css">
+body {
+  font-family: sans-serif;
+}
+h1 {
+  font-size: 1.5em;
+  margin-bottom: 4px;
+}
+.legend {
+  font-size: 1.25em;
+}
+.line {
+  color: #aaaaaa;
+}
+.nop {
+  color: #aaaaaa;
+}
+.unimportant {
+  color: #cccccc;
+}
+.disasmloc {
+  color: #000000;
+}
+.deadsrc {
+  cursor: pointer;
+}
+.deadsrc:hover {
+  background-color: #eeeeee;
+}
+.livesrc {
+  color: #0000ff;
+  cursor: pointer;
+}
+.livesrc:hover {
+  background-color: #eeeeee;
+}
+.asm {
+  color: #008800;
+  display: none;
+}
+</style>
+<script type="text/javascript">
+function jeprof_toggle_asm(e) {
+  var target;
+  if (!e) e = window.event;
+  if (e.target) target = e.target;
+  else if (e.srcElement) target = e.srcElement;
+
+  if (target) {
+    var asm = target.nextSibling;
+    if (asm && asm.className == "asm") {
+      asm.style.display = (asm.style.display == "block" ? "" : "block");
+      e.preventDefault();
+      return false;
+    }
+  }
+}
+</script>
+</head>
+<body>
+EOF
+}
+
+sub HtmlListingFooter {
+  return <<'EOF';
+</body>
+</html>
+EOF
+}
+
+sub HtmlEscape {
+  my $text = shift;
+  $text =~ s/&/&amp;/g;
+  $text =~ s/</&lt;/g;
+  $text =~ s/>/&gt;/g;
+  return $text;
+}
+
+# Returns the indentation of the line, if it has any non-whitespace
+# characters.  Otherwise, returns -1.
+sub Indentation {
+  my $line = shift;
+  if (m/^(\s*)\S/) {
+    return length($1);
+  } else {
+    return -1;
+  }
+}
+
+# If the symbol table contains inlining info, Disassemble() may tag an
+# instruction with a location inside an inlined function.  But for
+# source listings, we prefer to use the location in the function we
+# are listing.  So use MapToSymbols() to fetch full location
+# information for each instruction and then pick out the first
+# location from a location list (location list contains callers before
+# callees in case of inlining).
+#
+# After this routine has run, each entry in $instructions contains:
+#   [0] start address
+#   [1] filename for function we are listing
+#   [2] line number for function we are listing
+#   [3] disassembly
+#   [4] limit address
+#   [5] most specific filename (may be different from [1] due to inlining)
+#   [6] most specific line number (may be different from [2] due to inlining)
+sub GetTopLevelLineNumbers {
+  my ($lib, $offset, $instructions) = @_;
+  my $pcs = [];
+  for (my $i = 0; $i <= $#{$instructions}; $i++) {
+    push(@{$pcs}, $instructions->[$i]->[0]);
+  }
+  my $symbols = {};
+  MapToSymbols($lib, $offset, $pcs, $symbols);
+  for (my $i = 0; $i <= $#{$instructions}; $i++) {
+    my $e = $instructions->[$i];
+    push(@{$e}, $e->[1]);
+    push(@{$e}, $e->[2]);
+    my $addr = $e->[0];
+    my $sym = $symbols->{$addr};
+    if (defined($sym)) {
+      if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) {
+        $e->[1] = $1;  # File name
+        $e->[2] = $2;  # Line number
+      }
+    }
+  }
+}
+
+# Print source-listing for one routine
+sub PrintSource {
+  my $prog = shift;
+  my $offset = shift;
+  my $routine = shift;
+  my $flat = shift;
+  my $cumulative = shift;
+  my $start_addr = shift;
+  my $end_addr = shift;
+  my $html = shift;
+  my $output = shift;
+
+  # Disassemble all instructions (just to get line numbers)
+  my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
+  GetTopLevelLineNumbers($prog, $offset, \@instructions);
+
+  # Hack 1: assume that the first source file encountered in the
+  # disassembly contains the routine
+  my $filename = undef;
+  for (my $i = 0; $i <= $#instructions; $i++) {
+    if ($instructions[$i]->[2] >= 0) {
+      $filename = $instructions[$i]->[1];
+      last;
+    }
+  }
+  if (!defined($filename)) {
+    print STDERR "no filename found in $routine\n";
+    return 0;
+  }
+
+  # Hack 2: assume that the largest line number from $filename is the
+  # end of the procedure.  This is typically safe since if P1 contains
+  # an inlined call to P2, then P2 usually occurs earlier in the
+  # source file.  If this does not work, we might have to compute a
+  # density profile or just print all regions we find.
+  my $lastline = 0;
+  for (my $i = 0; $i <= $#instructions; $i++) {
+    my $f = $instructions[$i]->[1];
+    my $l = $instructions[$i]->[2];
+    if (($f eq $filename) && ($l > $lastline)) {
+      $lastline = $l;
+    }
+  }
+
+  # Hack 3: assume the first source location from "filename" is the start of
+  # the source code.
+  my $firstline = 1;
+  for (my $i = 0; $i <= $#instructions; $i++) {
+    if ($instructions[$i]->[1] eq $filename) {
+      $firstline = $instructions[$i]->[2];
+      last;
+    }
+  }
+
+  # Hack 4: Extend last line forward until its indentation is less than
+  # the indentation we saw on $firstline
+  my $oldlastline = $lastline;
+  {
+    if (!open(FILE, "<$filename")) {
+      print STDERR "$filename: $!\n";
+      return 0;
+    }
+    my $l = 0;
+    my $first_indentation = -1;
+    while (<FILE>) {
+      s/\r//g;         # turn windows-looking lines into unix-looking lines
+      $l++;
+      my $indent = Indentation($_);
+      if ($l >= $firstline) {
+        if ($first_indentation < 0 && $indent >= 0) {
+          $first_indentation = $indent;
+          last if ($first_indentation == 0);
+        }
+      }
+      if ($l >= $lastline && $indent >= 0) {
+        if ($indent >= $first_indentation) {
+          $lastline = $l+1;
+        } else {
+          last;
+        }
+      }
+    }
+    close(FILE);
+  }
+
+  # Assign all samples to the range $firstline,$lastline,
+  # Hack 4: If an instruction does not occur in the range, its samples
+  # are moved to the next instruction that occurs in the range.
+  my $samples1 = {};        # Map from line number to flat count
+  my $samples2 = {};        # Map from line number to cumulative count
+  my $running1 = 0;         # Unassigned flat counts
+  my $running2 = 0;         # Unassigned cumulative counts
+  my $total1 = 0;           # Total flat counts
+  my $total2 = 0;           # Total cumulative counts
+  my %disasm = ();          # Map from line number to disassembly
+  my $running_disasm = "";  # Unassigned disassembly
+  my $skip_marker = "---\n";
+  if ($html) {
+    $skip_marker = "";
+    for (my $l = $firstline; $l <= $lastline; $l++) {
+      $disasm{$l} = "";
+    }
+  }
+  my $last_dis_filename = '';
+  my $last_dis_linenum = -1;
+  my $last_touched_line = -1;  # To detect gaps in disassembly for a line
+  foreach my $e (@instructions) {
+    # Add up counts for all address that fall inside this instruction
+    my $c1 = 0;
+    my $c2 = 0;
+    for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
+      $c1 += GetEntry($flat, $a);
+      $c2 += GetEntry($cumulative, $a);
+    }
+
+    if ($html) {
+      my $dis = sprintf("      %6s %6s \t\t%8s: %s ",
+                        HtmlPrintNumber($c1),
+                        HtmlPrintNumber($c2),
+                        UnparseAddress($offset, $e->[0]),
+                        CleanDisassembly($e->[3]));
+
+      # Append the most specific source line associated with this instruction
+      if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
+      $dis = HtmlEscape($dis);
+      my $f = $e->[5];
+      my $l = $e->[6];
+      if ($f ne $last_dis_filename) {
+        $dis .= sprintf("<span class=disasmloc>%s:%d</span>",
+                        HtmlEscape(CleanFileName($f)), $l);
+      } elsif ($l ne $last_dis_linenum) {
+        # De-emphasize the unchanged file name portion
+        $dis .= sprintf("<span class=unimportant>%s</span>" .
+                        "<span class=disasmloc>:%d</span>",
+                        HtmlEscape(CleanFileName($f)), $l);
+      } else {
+        # De-emphasize the entire location
+        $dis .= sprintf("<span class=unimportant>%s:%d</span>",
+                        HtmlEscape(CleanFileName($f)), $l);
+      }
+      $last_dis_filename = $f;
+      $last_dis_linenum = $l;
+      $running_disasm .= $dis;
+      $running_disasm .= "\n";
+    }
+
+    $running1 += $c1;
+    $running2 += $c2;
+    $total1 += $c1;
+    $total2 += $c2;
+    my $file = $e->[1];
+    my $line = $e->[2];
+    if (($file eq $filename) &&
+        ($line >= $firstline) &&
+        ($line <= $lastline)) {
+      # Assign all accumulated samples to this line
+      AddEntry($samples1, $line, $running1);
+      AddEntry($samples2, $line, $running2);
+      $running1 = 0;
+      $running2 = 0;
+      if ($html) {
+        if ($line != $last_touched_line && $disasm{$line} ne '') {
+          $disasm{$line} .= "\n";
+        }
+        $disasm{$line} .= $running_disasm;
+        $running_disasm = '';
+        $last_touched_line = $line;
+      }
+    }
+  }
+
+  # Assign any leftover samples to $lastline
+  AddEntry($samples1, $lastline, $running1);
+  AddEntry($samples2, $lastline, $running2);
+  if ($html) {
+    if ($lastline != $last_touched_line && $disasm{$lastline} ne '') {
+      $disasm{$lastline} .= "\n";
+    }
+    $disasm{$lastline} .= $running_disasm;
+  }
+
+  if ($html) {
+    printf $output (
+      "<h1>%s</h1>%s\n<pre onClick=\"jeprof_toggle_asm()\">\n" .
+      "Total:%6s %6s (flat / cumulative %s)\n",
+      HtmlEscape(ShortFunctionName($routine)),
+      HtmlEscape(CleanFileName($filename)),
+      Unparse($total1),
+      Unparse($total2),
+      Units());
+  } else {
+    printf $output (
+      "ROUTINE ====================== %s in %s\n" .
+      "%6s %6s Total %s (flat / cumulative)\n",
+      ShortFunctionName($routine),
+      CleanFileName($filename),
+      Unparse($total1),
+      Unparse($total2),
+      Units());
+  }
+  if (!open(FILE, "<$filename")) {
+    print STDERR "$filename: $!\n";
+    return 0;
+  }
+  my $l = 0;
+  while (<FILE>) {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    $l++;
+    if ($l >= $firstline - 5 &&
+        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
+      chop;
+      my $text = $_;
+      if ($l == $firstline) { print $output $skip_marker; }
+      my $n1 = GetEntry($samples1, $l);
+      my $n2 = GetEntry($samples2, $l);
+      if ($html) {
+        # Emit a span that has one of the following classes:
+        #    livesrc -- has samples
+        #    deadsrc -- has disassembly, but with no samples
+        #    nop     -- has no matching disasembly
+        # Also emit an optional span containing disassembly.
+        my $dis = $disasm{$l};
+        my $asm = "";
+        if (defined($dis) && $dis ne '') {
+          $asm = "<span class=\"asm\">" . $dis . "</span>";
+        }
+        my $source_class = (($n1 + $n2 > 0)
+                            ? "livesrc"
+                            : (($asm ne "") ? "deadsrc" : "nop"));
+        printf $output (
+          "<span class=\"line\">%5d</span> " .
+          "<span class=\"%s\">%6s %6s %s</span>%s\n",
+          $l, $source_class,
+          HtmlPrintNumber($n1),
+          HtmlPrintNumber($n2),
+          HtmlEscape($text),
+          $asm);
+      } else {
+        printf $output(
+          "%6s %6s %4d: %s\n",
+          UnparseAlt($n1),
+          UnparseAlt($n2),
+          $l,
+          $text);
+      }
+      if ($l == $lastline)  { print $output $skip_marker; }
+    };
+  }
+  close(FILE);
+  if ($html) {
+    print $output "</pre>\n";
+  }
+  return 1;
+}
+
+# Return the source line for the specified file/linenumber.
+# Returns undef if not found.
+sub SourceLine {
+  my $file = shift;
+  my $line = shift;
+
+  # Look in cache
+  if (!defined($main::source_cache{$file})) {
+    if (100 < scalar keys(%main::source_cache)) {
+      # Clear the cache when it gets too big
+      $main::source_cache = ();
+    }
+
+    # Read all lines from the file
+    if (!open(FILE, "<$file")) {
+      print STDERR "$file: $!\n";
+      $main::source_cache{$file} = [];  # Cache the negative result
+      return undef;
+    }
+    my $lines = [];
+    push(@{$lines}, "");        # So we can use 1-based line numbers as indices
+    while (<FILE>) {
+      push(@{$lines}, $_);
+    }
+    close(FILE);
+
+    # Save the lines in the cache
+    $main::source_cache{$file} = $lines;
+  }
+
+  my $lines = $main::source_cache{$file};
+  if (($line < 0) || ($line > $#{$lines})) {
+    return undef;
+  } else {
+    return $lines->[$line];
+  }
+}
+
+# Print disassembly for one routine with interspersed source if available
+sub PrintDisassembledFunction {
+  my $prog = shift;
+  my $offset = shift;
+  my $routine = shift;
+  my $flat = shift;
+  my $cumulative = shift;
+  my $start_addr = shift;
+  my $end_addr = shift;
+  my $total = shift;
+
+  # Disassemble all instructions
+  my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
+
+  # Make array of counts per instruction
+  my @flat_count = ();
+  my @cum_count = ();
+  my $flat_total = 0;
+  my $cum_total = 0;
+  foreach my $e (@instructions) {
+    # Add up counts for all address that fall inside this instruction
+    my $c1 = 0;
+    my $c2 = 0;
+    for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
+      $c1 += GetEntry($flat, $a);
+      $c2 += GetEntry($cumulative, $a);
+    }
+    push(@flat_count, $c1);
+    push(@cum_count, $c2);
+    $flat_total += $c1;
+    $cum_total += $c2;
+  }
+
+  # Print header with total counts
+  printf("ROUTINE ====================== %s\n" .
+         "%6s %6s %s (flat, cumulative) %.1f%% of total\n",
+         ShortFunctionName($routine),
+         Unparse($flat_total),
+         Unparse($cum_total),
+         Units(),
+         ($cum_total * 100.0) / $total);
+
+  # Process instructions in order
+  my $current_file = "";
+  for (my $i = 0; $i <= $#instructions; ) {
+    my $e = $instructions[$i];
+
+    # Print the new file name whenever we switch files
+    if ($e->[1] ne $current_file) {
+      $current_file = $e->[1];
+      my $fname = $current_file;
+      $fname =~ s|^\./||;   # Trim leading "./"
+
+      # Shorten long file names
+      if (length($fname) >= 58) {
+        $fname = "..." . substr($fname, -55);
+      }
+      printf("-------------------- %s\n", $fname);
+    }
+
+    # TODO: Compute range of lines to print together to deal with
+    # small reorderings.
+    my $first_line = $e->[2];
+    my $last_line = $first_line;
+    my %flat_sum = ();
+    my %cum_sum = ();
+    for (my $l = $first_line; $l <= $last_line; $l++) {
+      $flat_sum{$l} = 0;
+      $cum_sum{$l} = 0;
+    }
+
+    # Find run of instructions for this range of source lines
+    my $first_inst = $i;
+    while (($i <= $#instructions) &&
+           ($instructions[$i]->[2] >= $first_line) &&
+           ($instructions[$i]->[2] <= $last_line)) {
+      $e = $instructions[$i];
+      $flat_sum{$e->[2]} += $flat_count[$i];
+      $cum_sum{$e->[2]} += $cum_count[$i];
+      $i++;
+    }
+    my $last_inst = $i - 1;
+
+    # Print source lines
+    for (my $l = $first_line; $l <= $last_line; $l++) {
+      my $line = SourceLine($current_file, $l);
+      if (!defined($line)) {
+        $line = "?\n";
+        next;
+      } else {
+        $line =~ s/^\s+//;
+      }
+      printf("%6s %6s %5d: %s",
+             UnparseAlt($flat_sum{$l}),
+             UnparseAlt($cum_sum{$l}),
+             $l,
+             $line);
+    }
+
+    # Print disassembly
+    for (my $x = $first_inst; $x <= $last_inst; $x++) {
+      my $e = $instructions[$x];
+      printf("%6s %6s    %8s: %6s\n",
+             UnparseAlt($flat_count[$x]),
+             UnparseAlt($cum_count[$x]),
+             UnparseAddress($offset, $e->[0]),
+             CleanDisassembly($e->[3]));
+    }
+  }
+}
+
+# Print DOT graph
+sub PrintDot {
+  my $prog = shift;
+  my $symbols = shift;
+  my $raw = shift;
+  my $flat = shift;
+  my $cumulative = shift;
+  my $overall_total = shift;
+
+  # Get total
+  my $local_total = TotalProfile($flat);
+  my $nodelimit = int($main::opt_nodefraction * $local_total);
+  my $edgelimit = int($main::opt_edgefraction * $local_total);
+  my $nodecount = $main::opt_nodecount;
+
+  # Find nodes to include
+  my @list = (sort { abs(GetEntry($cumulative, $b)) <=>
+                     abs(GetEntry($cumulative, $a))
+                     || $a cmp $b }
+              keys(%{$cumulative}));
+  my $last = $nodecount - 1;
+  if ($last > $#list) {
+    $last = $#list;
+  }
+  while (($last >= 0) &&
+         (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) {
+    $last--;
+  }
+  if ($last < 0) {
+    print STDERR "No nodes to print\n";
+    return 0;
+  }
+
+  if ($nodelimit > 0 || $edgelimit > 0) {
+    printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n",
+                   Unparse($nodelimit), Units(),
+                   Unparse($edgelimit), Units());
+  }
+
+  # Open DOT output file
+  my $output;
+  my $escaped_dot = ShellEscape(@DOT);
+  my $escaped_ps2pdf = ShellEscape(@PS2PDF);
+  if ($main::opt_gv) {
+    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps"));
+    $output = "| $escaped_dot -Tps2 >$escaped_outfile";
+  } elsif ($main::opt_evince) {
+    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf"));
+    $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile";
+  } elsif ($main::opt_ps) {
+    $output = "| $escaped_dot -Tps2";
+  } elsif ($main::opt_pdf) {
+    $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -";
+  } elsif ($main::opt_web || $main::opt_svg) {
+    # We need to post-process the SVG, so write to a temporary file always.
+    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg"));
+    $output = "| $escaped_dot -Tsvg >$escaped_outfile";
+  } elsif ($main::opt_gif) {
+    $output = "| $escaped_dot -Tgif";
+  } else {
+    $output = ">&STDOUT";
+  }
+  open(DOT, $output) || error("$output: $!\n");
+
+  # Title
+  printf DOT ("digraph \"%s; %s %s\" {\n",
+              $prog,
+              Unparse($overall_total),
+              Units());
+  if ($main::opt_pdf) {
+    # The output is more printable if we set the page size for dot.
+    printf DOT ("size=\"8,11\"\n");
+  }
+  printf DOT ("node [width=0.375,height=0.25];\n");
+
+  # Print legend
+  printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," .
+              "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n",
+              $prog,
+              sprintf("Total %s: %s", Units(), Unparse($overall_total)),
+              sprintf("Focusing on: %s", Unparse($local_total)),
+              sprintf("Dropped nodes with <= %s abs(%s)",
+                      Unparse($nodelimit), Units()),
+              sprintf("Dropped edges with <= %s %s",
+                      Unparse($edgelimit), Units())
+              );
+
+  # Print nodes
+  my %node = ();
+  my $nextnode = 1;
+  foreach my $a (@list[0..$last]) {
+    # Pick font size
+    my $f = GetEntry($flat, $a);
+    my $c = GetEntry($cumulative, $a);
+
+    my $fs = 8;
+    if ($local_total > 0) {
+      $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total)));
+    }
+
+    $node{$a} = $nextnode++;
+    my $sym = $a;
+    $sym =~ s/\s+/\\n/g;
+    $sym =~ s/::/\\n/g;
+
+    # Extra cumulative info to print for non-leaves
+    my $extra = "";
+    if ($f != $c) {
+      $extra = sprintf("\\rof %s (%s)",
+                       Unparse($c),
+                       Percent($c, $local_total));
+    }
+    my $style = "";
+    if ($main::opt_heapcheck) {
+      if ($f > 0) {
+        # make leak-causing nodes more visible (add a background)
+        $style = ",style=filled,fillcolor=gray"
+      } elsif ($f < 0) {
+        # make anti-leak-causing nodes (which almost never occur)
+        # stand out as well (triple border)
+        $style = ",peripheries=3"
+      }
+    }
+
+    printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" .
+                "\",shape=box,fontsize=%.1f%s];\n",
+                $node{$a},
+                $sym,
+                Unparse($f),
+                Percent($f, $local_total),
+                $extra,
+                $fs,
+                $style,
+               );
+  }
+
+  # Get edges and counts per edge
+  my %edge = ();
+  my $n;
+  my $fullname_to_shortname_map = {};
+  FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
+  foreach my $k (keys(%{$raw})) {
+    # TODO: omit low %age edges
+    $n = $raw->{$k};
+    my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
+    for (my $i = 1; $i <= $#translated; $i++) {
+      my $src = $translated[$i];
+      my $dst = $translated[$i-1];
+      #next if ($src eq $dst);  # Avoid self-edges?
+      if (exists($node{$src}) && exists($node{$dst})) {
+        my $edge_label = "$src\001$dst";
+        if (!exists($edge{$edge_label})) {
+          $edge{$edge_label} = 0;
+        }
+        $edge{$edge_label} += $n;
+      }
+    }
+  }
+
+  # Print edges (process in order of decreasing counts)
+  my %indegree = ();   # Number of incoming edges added per node so far
+  my %outdegree = ();  # Number of outgoing edges added per node so far
+  foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) {
+    my @x = split(/\001/, $e);
+    $n = $edge{$e};
+
+    # Initialize degree of kept incoming and outgoing edges if necessary
+    my $src = $x[0];
+    my $dst = $x[1];
+    if (!exists($outdegree{$src})) { $outdegree{$src} = 0; }
+    if (!exists($indegree{$dst})) { $indegree{$dst} = 0; }
+
+    my $keep;
+    if ($indegree{$dst} == 0) {
+      # Keep edge if needed for reachability
+      $keep = 1;
+    } elsif (abs($n) <= $edgelimit) {
+      # Drop if we are below --edgefraction
+      $keep = 0;
+    } elsif ($outdegree{$src} >= $main::opt_maxdegree ||
+             $indegree{$dst} >= $main::opt_maxdegree) {
+      # Keep limited number of in/out edges per node
+      $keep = 0;
+    } else {
+      $keep = 1;
+    }
+
+    if ($keep) {
+      $outdegree{$src}++;
+      $indegree{$dst}++;
+
+      # Compute line width based on edge count
+      my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0);
+      if ($fraction > 1) { $fraction = 1; }
+      my $w = $fraction * 2;
+      if ($w < 1 && ($main::opt_web || $main::opt_svg)) {
+        # SVG output treats line widths < 1 poorly.
+        $w = 1;
+      }
+
+      # Dot sometimes segfaults if given edge weights that are too large, so
+      # we cap the weights at a large value
+      my $edgeweight = abs($n) ** 0.7;
+      if ($edgeweight > 100000) { $edgeweight = 100000; }
+      $edgeweight = int($edgeweight);
+
+      my $style = sprintf("setlinewidth(%f)", $w);
+      if ($x[1] =~ m/\(inline\)/) {
+        $style .= ",dashed";
+      }
+
+      # Use a slightly squashed function of the edge count as the weight
+      printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n",
+                  $node{$x[0]},
+                  $node{$x[1]},
+                  Unparse($n),
+                  $edgeweight,
+                  $style);
+    }
+  }
+
+  print DOT ("}\n");
+  close(DOT);
+
+  if ($main::opt_web || $main::opt_svg) {
+    # Rewrite SVG to be more usable inside web browser.
+    RewriteSvg(TempName($main::next_tmpfile, "svg"));
+  }
+
+  return 1;
+}
+
+sub RewriteSvg {
+  my $svgfile = shift;
+
+  open(SVG, $svgfile) || die "open temp svg: $!";
+  my @svg = <SVG>;
+  close(SVG);
+  unlink $svgfile;
+  my $svg = join('', @svg);
+
+  # Dot's SVG output is
+  #
+  #    <svg width="___" height="___"
+  #     viewBox="___" xmlns=...>
+  #    <g id="graph0" transform="...">
+  #    ...
+  #    </g>
+  #    </svg>
+  #
+  # Change it to
+  #
+  #    <svg width="100%" height="100%"
+  #     xmlns=...>
+  #    $svg_javascript
+  #    <g id="viewport" transform="translate(0,0)">
+  #    <g id="graph0" transform="...">
+  #    ...
+  #    </g>
+  #    </g>
+  #    </svg>
+
+  # Fix width, height; drop viewBox.
+  $svg =~ s/(?s)<svg width="[^"]+" height="[^"]+"(.*?)viewBox="[^"]+"/<svg width="100%" height="100%"$1/;
+
+  # Insert script, viewport <g> above first <g>
+  my $svg_javascript = SvgJavascript();
+  my $viewport = "<g id=\"viewport\" transform=\"translate(0,0)\">\n";
+  $svg =~ s/<g id="graph\d"/$svg_javascript$viewport$&/;
+
+  # Insert final </g> above </svg>.
+  $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/;
+  $svg =~ s/<g id="graph\d"(.*?)/<g id="viewport"$1/;
+
+  if ($main::opt_svg) {
+    # --svg: write to standard output.
+    print $svg;
+  } else {
+    # Write back to temporary file.
+    open(SVG, ">$svgfile") || die "open $svgfile: $!";
+    print SVG $svg;
+    close(SVG);
+  }
+}
+
+sub SvgJavascript {
+  return <<'EOF';
+<script type="text/ecmascript"><![CDATA[
+// SVGPan
+// http://www.cyberz.org/blog/2009/12/08/svgpan-a-javascript-svg-panzoomdrag-library/
+// Local modification: if(true || ...) below to force panning, never moving.
+
+/**
+ *  SVGPan library 1.2
+ * ====================
+ *
+ * Given an unique existing element with id "viewport", including the
+ * the library into any SVG adds the following capabilities:
+ *
+ *  - Mouse panning
+ *  - Mouse zooming (using the wheel)
+ *  - Object dargging
+ *
+ * Known issues:
+ *
+ *  - Zooming (while panning) on Safari has still some issues
+ *
+ * Releases:
+ *
+ * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui
+ *     Fixed a bug with browser mouse handler interaction
+ *
+ * 1.1, Wed Feb  3 17:39:33 GMT 2010, Zeng Xiaohui
+ *     Updated the zoom code to support the mouse wheel on Safari/Chrome
+ *
+ * 1.0, Andrea Leofreddi
+ *     First release
+ *
+ * This code is licensed under the following BSD license:
+ *
+ * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ *    1. Redistributions of source code must retain the above copyright notice, this list of
+ *       conditions and the following disclaimer.
+ *
+ *    2. Redistributions in binary form must reproduce the above copyright notice, this list
+ *       of conditions and the following disclaimer in the documentation and/or other materials
+ *       provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are those of the
+ * authors and should not be interpreted as representing official policies, either expressed
+ * or implied, of Andrea Leofreddi.
+ */
+
+var root = document.documentElement;
+
+var state = 'none', stateTarget, stateOrigin, stateTf;
+
+setupHandlers(root);
+
+/**
+ * Register handlers
+ */
+function setupHandlers(root){
+       setAttributes(root, {
+               "onmouseup" : "add(evt)",
+               "onmousedown" : "handleMouseDown(evt)",
+               "onmousemove" : "handleMouseMove(evt)",
+               "onmouseup" : "handleMouseUp(evt)",
+               //"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element
+       });
+
+       if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)
+               window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari
+       else
+               window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others
+
+       var g = svgDoc.getElementById("svg");
+       g.width = "100%";
+       g.height = "100%";
+}
+
+/**
+ * Instance an SVGPoint object with given event coordinates.
+ */
+function getEventPoint(evt) {
+       var p = root.createSVGPoint();
+
+       p.x = evt.clientX;
+       p.y = evt.clientY;
+
+       return p;
+}
+
+/**
+ * Sets the current transform matrix of an element.
+ */
+function setCTM(element, matrix) {
+       var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")";
+
+       element.setAttribute("transform", s);
+}
+
+/**
+ * Dumps a matrix to a string (useful for debug).
+ */
+function dumpMatrix(matrix) {
+       var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n  " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n  0, 0, 1 ]";
+
+       return s;
+}
+
+/**
+ * Sets attributes of an element.
+ */
+function setAttributes(element, attributes){
+       for (i in attributes)
+               element.setAttributeNS(null, i, attributes[i]);
+}
+
+/**
+ * Handle mouse move event.
+ */
+function handleMouseWheel(evt) {
+       if(evt.preventDefault)
+               evt.preventDefault();
+
+       evt.returnValue = false;
+
+       var svgDoc = evt.target.ownerDocument;
+
+       var delta;
+
+       if(evt.wheelDelta)
+               delta = evt.wheelDelta / 3600; // Chrome/Safari
+       else
+               delta = evt.detail / -90; // Mozilla
+
+       var z = 1 + delta; // Zoom factor: 0.9/1.1
+
+       var g = svgDoc.getElementById("viewport");
+
+       var p = getEventPoint(evt);
+
+       p = p.matrixTransform(g.getCTM().inverse());
+
+       // Compute new scale matrix in current mouse position
+       var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);
+
+        setCTM(g, g.getCTM().multiply(k));
+
+       stateTf = stateTf.multiply(k.inverse());
+}
+
+/**
+ * Handle mouse move event.
+ */
+function handleMouseMove(evt) {
+       if(evt.preventDefault)
+               evt.preventDefault();
+
+       evt.returnValue = false;
+
+       var svgDoc = evt.target.ownerDocument;
+
+       var g = svgDoc.getElementById("viewport");
+
+       if(state == 'pan') {
+               // Pan mode
+               var p = getEventPoint(evt).matrixTransform(stateTf);
+
+               setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));
+       } else if(state == 'move') {
+               // Move mode
+               var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());
+
+               setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));
+
+               stateOrigin = p;
+       }
+}
+
+/**
+ * Handle click event.
+ */
+function handleMouseDown(evt) {
+       if(evt.preventDefault)
+               evt.preventDefault();
+
+       evt.returnValue = false;
+
+       var svgDoc = evt.target.ownerDocument;
+
+       var g = svgDoc.getElementById("viewport");
+
+       if(true || evt.target.tagName == "svg") {
+               // Pan mode
+               state = 'pan';
+
+               stateTf = g.getCTM().inverse();
+
+               stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+       } else {
+               // Move mode
+               state = 'move';
+
+               stateTarget = evt.target;
+
+               stateTf = g.getCTM().inverse();
+
+               stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+       }
+}
+
+/**
+ * Handle mouse button release event.
+ */
+function handleMouseUp(evt) {
+       if(evt.preventDefault)
+               evt.preventDefault();
+
+       evt.returnValue = false;
+
+       var svgDoc = evt.target.ownerDocument;
+
+       if(state == 'pan' || state == 'move') {
+               // Quit pan mode
+               state = '';
+       }
+}
+
+]]></script>
+EOF
+}
+
+# Provides a map from fullname to shortname for cases where the
+# shortname is ambiguous.  The symlist has both the fullname and
+# shortname for all symbols, which is usually fine, but sometimes --
+# such as overloaded functions -- two different fullnames can map to
+# the same shortname.  In that case, we use the address of the
+# function to disambiguate the two.  This function fills in a map that
+# maps fullnames to modified shortnames in such cases.  If a fullname
+# is not present in the map, the 'normal' shortname provided by the
+# symlist is the appropriate one to use.
+sub FillFullnameToShortnameMap {
+  my $symbols = shift;
+  my $fullname_to_shortname_map = shift;
+  my $shortnames_seen_once = {};
+  my $shortnames_seen_more_than_once = {};
+
+  foreach my $symlist (values(%{$symbols})) {
+    # TODO(csilvers): deal with inlined symbols too.
+    my $shortname = $symlist->[0];
+    my $fullname = $symlist->[2];
+    if ($fullname !~ /<[0-9a-fA-F]+>$/) {  # fullname doesn't end in an address
+      next;       # the only collisions we care about are when addresses differ
+    }
+    if (defined($shortnames_seen_once->{$shortname}) &&
+        $shortnames_seen_once->{$shortname} ne $fullname) {
+      $shortnames_seen_more_than_once->{$shortname} = 1;
+    } else {
+      $shortnames_seen_once->{$shortname} = $fullname;
+    }
+  }
+
+  foreach my $symlist (values(%{$symbols})) {
+    my $shortname = $symlist->[0];
+    my $fullname = $symlist->[2];
+    # TODO(csilvers): take in a list of addresses we care about, and only
+    # store in the map if $symlist->[1] is in that list.  Saves space.
+    next if defined($fullname_to_shortname_map->{$fullname});
+    if (defined($shortnames_seen_more_than_once->{$shortname})) {
+      if ($fullname =~ /<0*([^>]*)>$/) {   # fullname has address at end of it
+        $fullname_to_shortname_map->{$fullname} = "$shortname\@$1";
+      }
+    }
+  }
+}
+
+# Return a small number that identifies the argument.
+# Multiple calls with the same argument will return the same number.
+# Calls with different arguments will return different numbers.
+sub ShortIdFor {
+  my $key = shift;
+  my $id = $main::uniqueid{$key};
+  if (!defined($id)) {
+    $id = keys(%main::uniqueid) + 1;
+    $main::uniqueid{$key} = $id;
+  }
+  return $id;
+}
+
+# Translate a stack of addresses into a stack of symbols
+sub TranslateStack {
+  my $symbols = shift;
+  my $fullname_to_shortname_map = shift;
+  my $k = shift;
+
+  my @addrs = split(/\n/, $k);
+  my @result = ();
+  for (my $i = 0; $i <= $#addrs; $i++) {
+    my $a = $addrs[$i];
+
+    # Skip large addresses since they sometimes show up as fake entries on RH9
+    if (length($a) > 8 && $a gt "7fffffffffffffff") {
+      next;
+    }
+
+    if ($main::opt_disasm || $main::opt_list) {
+      # We want just the address for the key
+      push(@result, $a);
+      next;
+    }
+
+    my $symlist = $symbols->{$a};
+    if (!defined($symlist)) {
+      $symlist = [$a, "", $a];
+    }
+
+    # We can have a sequence of symbols for a particular entry
+    # (more than one symbol in the case of inlining).  Callers
+    # come before callees in symlist, so walk backwards since
+    # the translated stack should contain callees before callers.
+    for (my $j = $#{$symlist}; $j >= 2; $j -= 3) {
+      my $func = $symlist->[$j-2];
+      my $fileline = $symlist->[$j-1];
+      my $fullfunc = $symlist->[$j];
+      if (defined($fullname_to_shortname_map->{$fullfunc})) {
+        $func = $fullname_to_shortname_map->{$fullfunc};
+      }
+      if ($j > 2) {
+        $func = "$func (inline)";
+      }
+
+      # Do not merge nodes corresponding to Callback::Run since that
+      # causes confusing cycles in dot display.  Instead, we synthesize
+      # a unique name for this frame per caller.
+      if ($func =~ m/Callback.*::Run$/) {
+        my $caller = ($i > 0) ? $addrs[$i-1] : 0;
+        $func = "Run#" . ShortIdFor($caller);
+      }
+
+      if ($main::opt_addresses) {
+        push(@result, "$a $func $fileline");
+      } elsif ($main::opt_lines) {
+        if ($func eq '??' && $fileline eq '??:0') {
+          push(@result, "$a");
+        } else {
+          push(@result, "$func $fileline");
+        }
+      } elsif ($main::opt_functions) {
+        if ($func eq '??') {
+          push(@result, "$a");
+        } else {
+          push(@result, $func);
+        }
+      } elsif ($main::opt_files) {
+        if ($fileline eq '??:0' || $fileline eq '') {
+          push(@result, "$a");
+        } else {
+          my $f = $fileline;
+          $f =~ s/:\d+$//;
+          push(@result, $f);
+        }
+      } else {
+        push(@result, $a);
+        last;  # Do not print inlined info
+      }
+    }
+  }
+
+  # print join(",", @addrs), " => ", join(",", @result), "\n";
+  return @result;
+}
+
+# Generate percent string for a number and a total
+sub Percent {
+  my $num = shift;
+  my $tot = shift;
+  if ($tot != 0) {
+    return sprintf("%.1f%%", $num * 100.0 / $tot);
+  } else {
+    return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf");
+  }
+}
+
+# Generate pretty-printed form of number
+sub Unparse {
+  my $num = shift;
+  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+    if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
+      return sprintf("%d", $num);
+    } else {
+      if ($main::opt_show_bytes) {
+        return sprintf("%d", $num);
+      } else {
+        return sprintf("%.1f", $num / 1048576.0);
+      }
+    }
+  } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
+    return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds
+  } else {
+    return sprintf("%d", $num);
+  }
+}
+
+# Alternate pretty-printed form: 0 maps to "."
+sub UnparseAlt {
+  my $num = shift;
+  if ($num == 0) {
+    return ".";
+  } else {
+    return Unparse($num);
+  }
+}
+
+# Alternate pretty-printed form: 0 maps to ""
+sub HtmlPrintNumber {
+  my $num = shift;
+  if ($num == 0) {
+    return "";
+  } else {
+    return Unparse($num);
+  }
+}
+
+# Return output units
+sub Units {
+  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+    if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
+      return "objects";
+    } else {
+      if ($main::opt_show_bytes) {
+        return "B";
+      } else {
+        return "MB";
+      }
+    }
+  } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
+    return "seconds";
+  } else {
+    return "samples";
+  }
+}
+
+##### Profile manipulation code #####
+
+# Generate flattened profile:
+# If count is charged to stack [a,b,c,d], in generated profile,
+# it will be charged to [a]
+sub FlatProfile {
+  my $profile = shift;
+  my $result = {};
+  foreach my $k (keys(%{$profile})) {
+    my $count = $profile->{$k};
+    my @addrs = split(/\n/, $k);
+    if ($#addrs >= 0) {
+      AddEntry($result, $addrs[0], $count);
+    }
+  }
+  return $result;
+}
+
+# Generate cumulative profile:
+# If count is charged to stack [a,b,c,d], in generated profile,
+# it will be charged to [a], [b], [c], [d]
+sub CumulativeProfile {
+  my $profile = shift;
+  my $result = {};
+  foreach my $k (keys(%{$profile})) {
+    my $count = $profile->{$k};
+    my @addrs = split(/\n/, $k);
+    foreach my $a (@addrs) {
+      AddEntry($result, $a, $count);
+    }
+  }
+  return $result;
+}
+
+# If the second-youngest PC on the stack is always the same, returns
+# that pc.  Otherwise, returns undef.
+sub IsSecondPcAlwaysTheSame {
+  my $profile = shift;
+
+  my $second_pc = undef;
+  foreach my $k (keys(%{$profile})) {
+    my @addrs = split(/\n/, $k);
+    if ($#addrs < 1) {
+      return undef;
+    }
+    if (not defined $second_pc) {
+      $second_pc = $addrs[1];
+    } else {
+      if ($second_pc ne $addrs[1]) {
+        return undef;
+      }
+    }
+  }
+  return $second_pc;
+}
+
+sub ExtractSymbolLocation {
+  my $symbols = shift;
+  my $address = shift;
+  # 'addr2line' outputs "??:0" for unknown locations; we do the
+  # same to be consistent.
+  my $location = "??:0:unknown";
+  if (exists $symbols->{$address}) {
+    my $file = $symbols->{$address}->[1];
+    if ($file eq "?") {
+      $file = "??:0"
+    }
+    $location = $file . ":" . $symbols->{$address}->[0];
+  }
+  return $location;
+}
+
+# Extracts a graph of calls.
+sub ExtractCalls {
+  my $symbols = shift;
+  my $profile = shift;
+
+  my $calls = {};
+  while( my ($stack_trace, $count) = each %$profile ) {
+    my @address = split(/\n/, $stack_trace);
+    my $destination = ExtractSymbolLocation($symbols, $address[0]);
+    AddEntry($calls, $destination, $count);
+    for (my $i = 1; $i <= $#address; $i++) {
+      my $source = ExtractSymbolLocation($symbols, $address[$i]);
+      my $call = "$source -> $destination";
+      AddEntry($calls, $call, $count);
+      $destination = $source;
+    }
+  }
+
+  return $calls;
+}
+
+sub FilterFrames {
+  my $symbols = shift;
+  my $profile = shift;
+
+  if ($main::opt_retain eq '' && $main::opt_exclude eq '') {
+    return $profile;
+  }
+
+  my $result = {};
+  foreach my $k (keys(%{$profile})) {
+    my $count = $profile->{$k};
+    my @addrs = split(/\n/, $k);
+    my @path = ();
+    foreach my $a (@addrs) {
+      my $sym;
+      if (exists($symbols->{$a})) {
+        $sym = $symbols->{$a}->[0];
+      } else {
+        $sym = $a;
+      }
+      if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) {
+        next;
+      }
+      if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) {
+        next;
+      }
+      push(@path, $a);
+    }
+    if (scalar(@path) > 0) {
+      my $reduced_path = join("\n", @path);
+      AddEntry($result, $reduced_path, $count);
+    }
+  }
+
+  return $result;
+}
+
+sub RemoveUninterestingFrames {
+  my $symbols = shift;
+  my $profile = shift;
+
+  # List of function names to skip
+  my %skip = ();
+  my $skip_regexp = 'NOMATCH';
+  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+    foreach my $name ('calloc',
+                      'cfree',
+                      'malloc',
+                      'free',
+                      'memalign',
+                      'posix_memalign',
+                      'aligned_alloc',
+                      'pvalloc',
+                      'valloc',
+                      'realloc',
+                      'mallocx', # jemalloc
+                      'rallocx', # jemalloc
+                      'xallocx', # jemalloc
+                      'dallocx', # jemalloc
+                      'sdallocx', # jemalloc
+                      'tc_calloc',
+                      'tc_cfree',
+                      'tc_malloc',
+                      'tc_free',
+                      'tc_memalign',
+                      'tc_posix_memalign',
+                      'tc_pvalloc',
+                      'tc_valloc',
+                      'tc_realloc',
+                      'tc_new',
+                      'tc_delete',
+                      'tc_newarray',
+                      'tc_deletearray',
+                      'tc_new_nothrow',
+                      'tc_newarray_nothrow',
+                      'do_malloc',
+                      '::do_malloc',   # new name -- got moved to an unnamed ns
+                      '::do_malloc_or_cpp_alloc',
+                      'DoSampledAllocation',
+                      'simple_alloc::allocate',
+                      '__malloc_alloc_template::allocate',
+                      '__builtin_delete',
+                      '__builtin_new',
+                      '__builtin_vec_delete',
+                      '__builtin_vec_new',
+                      'operator new',
+                      'operator new[]',
+                      # The entry to our memory-allocation routines on OS X
+                      'malloc_zone_malloc',
+                      'malloc_zone_calloc',
+                      'malloc_zone_valloc',
+                      'malloc_zone_realloc',
+                      'malloc_zone_memalign',
+                      'malloc_zone_free',
+                      # These mark the beginning/end of our custom sections
+                      '__start_google_malloc',
+                      '__stop_google_malloc',
+                      '__start_malloc_hook',
+                      '__stop_malloc_hook') {
+      $skip{$name} = 1;
+      $skip{"_" . $name} = 1;   # Mach (OS X) adds a _ prefix to everything
+    }
+    # TODO: Remove TCMalloc once everything has been
+    # moved into the tcmalloc:: namespace and we have flushed
+    # old code out of the system.
+    $skip_regexp = "TCMalloc|^tcmalloc::";
+  } elsif ($main::profile_type eq 'contention') {
+    foreach my $vname ('base::RecordLockProfileData',
+                       'base::SubmitMutexProfileData',
+                       'base::SubmitSpinLockProfileData',
+                       'Mutex::Unlock',
+                       'Mutex::UnlockSlow',
+                       'Mutex::ReaderUnlock',
+                       'MutexLock::~MutexLock',
+                       'SpinLock::Unlock',
+                       'SpinLock::SlowUnlock',
+                       'SpinLockHolder::~SpinLockHolder') {
+      $skip{$vname} = 1;
+    }
+  } elsif ($main::profile_type eq 'cpu') {
+    # Drop signal handlers used for CPU profile collection
+    # TODO(dpeng): this should not be necessary; it's taken
+    # care of by the general 2nd-pc mechanism below.
+    foreach my $name ('ProfileData::Add',           # historical
+                      'ProfileData::prof_handler',  # historical
+                      'CpuProfiler::prof_handler',
+                      '__FRAME_END__',
+                      '__pthread_sighandler',
+                      '__restore') {
+      $skip{$name} = 1;
+    }
+  } else {
+    # Nothing skipped for unknown types
+  }
+
+  if ($main::profile_type eq 'cpu') {
+    # If all the second-youngest program counters are the same,
+    # this STRONGLY suggests that it is an artifact of measurement,
+    # i.e., stack frames pushed by the CPU profiler signal handler.
+    # Hence, we delete them.
+    # (The topmost PC is read from the signal structure, not from
+    # the stack, so it does not get involved.)
+    while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) {
+      my $result = {};
+      my $func = '';
+      if (exists($symbols->{$second_pc})) {
+        $second_pc = $symbols->{$second_pc}->[0];
+      }
+      print STDERR "Removing $second_pc from all stack traces.\n";
+      foreach my $k (keys(%{$profile})) {
+        my $count = $profile->{$k};
+        my @addrs = split(/\n/, $k);
+        splice @addrs, 1, 1;
+        my $reduced_path = join("\n", @addrs);
+        AddEntry($result, $reduced_path, $count);
+      }
+      $profile = $result;
+    }
+  }
+
+  my $result = {};
+  foreach my $k (keys(%{$profile})) {
+    my $count = $profile->{$k};
+    my @addrs = split(/\n/, $k);
+    my @path = ();
+    foreach my $a (@addrs) {
+      if (exists($symbols->{$a})) {
+        my $func = $symbols->{$a}->[0];
+        if ($skip{$func} || ($func =~ m/$skip_regexp/)) {
+          # Throw away the portion of the backtrace seen so far, under the
+          # assumption that previous frames were for functions internal to the
+          # allocator.
+          @path = ();
+          next;
+        }
+      }
+      push(@path, $a);
+    }
+    my $reduced_path = join("\n", @path);
+    AddEntry($result, $reduced_path, $count);
+  }
+
+  $result = FilterFrames($symbols, $result);
+
+  return $result;
+}
+
+# Reduce profile to granularity given by user
+sub ReduceProfile {
+  my $symbols = shift;
+  my $profile = shift;
+  my $result = {};
+  my $fullname_to_shortname_map = {};
+  FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
+  foreach my $k (keys(%{$profile})) {
+    my $count = $profile->{$k};
+    my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
+    my @path = ();
+    my %seen = ();
+    $seen{''} = 1;      # So that empty keys are skipped
+    foreach my $e (@translated) {
+      # To avoid double-counting due to recursion, skip a stack-trace
+      # entry if it has already been seen
+      if (!$seen{$e}) {
+        $seen{$e} = 1;
+        push(@path, $e);
+      }
+    }
+    my $reduced_path = join("\n", @path);
+    AddEntry($result, $reduced_path, $count);
+  }
+  return $result;
+}
+
+# Does the specified symbol array match the regexp?
+sub SymbolMatches {
+  my $sym = shift;
+  my $re = shift;
+  if (defined($sym)) {
+    for (my $i = 0; $i < $#{$sym}; $i += 3) {
+      if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) {
+        return 1;
+      }
+    }
+  }
+  return 0;
+}
+
+# Focus only on paths involving specified regexps
+sub FocusProfile {
+  my $symbols = shift;
+  my $profile = shift;
+  my $focus = shift;
+  my $result = {};
+  foreach my $k (keys(%{$profile})) {
+    my $count = $profile->{$k};
+    my @addrs = split(/\n/, $k);
+    foreach my $a (@addrs) {
+      # Reply if it matches either the address/shortname/fileline
+      if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) {
+        AddEntry($result, $k, $count);
+        last;
+      }
+    }
+  }
+  return $result;
+}
+
+# Focus only on paths not involving specified regexps
+sub IgnoreProfile {
+  my $symbols = shift;
+  my $profile = shift;
+  my $ignore = shift;
+  my $result = {};
+  foreach my $k (keys(%{$profile})) {
+    my $count = $profile->{$k};
+    my @addrs = split(/\n/, $k);
+    my $matched = 0;
+    foreach my $a (@addrs) {
+      # Reply if it matches either the address/shortname/fileline
+      if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) {
+        $matched = 1;
+        last;
+      }
+    }
+    if (!$matched) {
+      AddEntry($result, $k, $count);
+    }
+  }
+  return $result;
+}
+
+# Get total count in profile
+sub TotalProfile {
+  my $profile = shift;
+  my $result = 0;
+  foreach my $k (keys(%{$profile})) {
+    $result += $profile->{$k};
+  }
+  return $result;
+}
+
+# Add A to B
+sub AddProfile {
+  my $A = shift;
+  my $B = shift;
+
+  my $R = {};
+  # add all keys in A
+  foreach my $k (keys(%{$A})) {
+    my $v = $A->{$k};
+    AddEntry($R, $k, $v);
+  }
+  # add all keys in B
+  foreach my $k (keys(%{$B})) {
+    my $v = $B->{$k};
+    AddEntry($R, $k, $v);
+  }
+  return $R;
+}
+
+# Merges symbol maps
+sub MergeSymbols {
+  my $A = shift;
+  my $B = shift;
+
+  my $R = {};
+  foreach my $k (keys(%{$A})) {
+    $R->{$k} = $A->{$k};
+  }
+  if (defined($B)) {
+    foreach my $k (keys(%{$B})) {
+      $R->{$k} = $B->{$k};
+    }
+  }
+  return $R;
+}
+
+
+# Add A to B
+sub AddPcs {
+  my $A = shift;
+  my $B = shift;
+
+  my $R = {};
+  # add all keys in A
+  foreach my $k (keys(%{$A})) {
+    $R->{$k} = 1
+  }
+  # add all keys in B
+  foreach my $k (keys(%{$B})) {
+    $R->{$k} = 1
+  }
+  return $R;
+}
+
+# Subtract B from A
+sub SubtractProfile {
+  my $A = shift;
+  my $B = shift;
+
+  my $R = {};
+  foreach my $k (keys(%{$A})) {
+    my $v = $A->{$k} - GetEntry($B, $k);
+    if ($v < 0 && $main::opt_drop_negative) {
+      $v = 0;
+    }
+    AddEntry($R, $k, $v);
+  }
+  if (!$main::opt_drop_negative) {
+    # Take care of when subtracted profile has more entries
+    foreach my $k (keys(%{$B})) {
+      if (!exists($A->{$k})) {
+        AddEntry($R, $k, 0 - $B->{$k});
+      }
+    }
+  }
+  return $R;
+}
+
+# Get entry from profile; zero if not present
+sub GetEntry {
+  my $profile = shift;
+  my $k = shift;
+  if (exists($profile->{$k})) {
+    return $profile->{$k};
+  } else {
+    return 0;
+  }
+}
+
+# Add entry to specified profile
+sub AddEntry {
+  my $profile = shift;
+  my $k = shift;
+  my $n = shift;
+  if (!exists($profile->{$k})) {
+    $profile->{$k} = 0;
+  }
+  $profile->{$k} += $n;
+}
+
+# Add a stack of entries to specified profile, and add them to the $pcs
+# list.
+sub AddEntries {
+  my $profile = shift;
+  my $pcs = shift;
+  my $stack = shift;
+  my $count = shift;
+  my @k = ();
+
+  foreach my $e (split(/\s+/, $stack)) {
+    my $pc = HexExtend($e);
+    $pcs->{$pc} = 1;
+    push @k, $pc;
+  }
+  AddEntry($profile, (join "\n", @k), $count);
+}
+
+##### Code to profile a server dynamically #####
+
+sub CheckSymbolPage {
+  my $url = SymbolPageURL();
+  my $command = ShellEscape(@URL_FETCHER, $url);
+  open(SYMBOL, "$command |") or error($command);
+  my $line = <SYMBOL>;
+  $line =~ s/\r//g;         # turn windows-looking lines into unix-looking lines
+  close(SYMBOL);
+  unless (defined($line)) {
+    error("$url doesn't exist\n");
+  }
+
+  if ($line =~ /^num_symbols:\s+(\d+)$/) {
+    if ($1 == 0) {
+      error("Stripped binary. No symbols available.\n");
+    }
+  } else {
+    error("Failed to get the number of symbols from $url\n");
+  }
+}
+
+sub IsProfileURL {
+  my $profile_name = shift;
+  if (-f $profile_name) {
+    printf STDERR "Using local file $profile_name.\n";
+    return 0;
+  }
+  return 1;
+}
+
+sub ParseProfileURL {
+  my $profile_name = shift;
+
+  if (!defined($profile_name) || $profile_name eq "") {
+    return ();
+  }
+
+  # Split profile URL - matches all non-empty strings, so no test.
+  $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,;
+
+  my $proto = $1 || "http://";
+  my $hostport = $2;
+  my $prefix = $3;
+  my $profile = $4 || "/";
+
+  my $host = $hostport;
+  $host =~ s/:.*//;
+
+  my $baseurl = "$proto$hostport$prefix";
+  return ($host, $baseurl, $profile);
+}
+
+# We fetch symbols from the first profile argument.
+sub SymbolPageURL {
+  my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
+  return "$baseURL$SYMBOL_PAGE";
+}
+
+sub FetchProgramName() {
+  my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
+  my $url = "$baseURL$PROGRAM_NAME_PAGE";
+  my $command_line = ShellEscape(@URL_FETCHER, $url);
+  open(CMDLINE, "$command_line |") or error($command_line);
+  my $cmdline = <CMDLINE>;
+  $cmdline =~ s/\r//g;   # turn windows-looking lines into unix-looking lines
+  close(CMDLINE);
+  error("Failed to get program name from $url\n") unless defined($cmdline);
+  $cmdline =~ s/\x00.+//;  # Remove argv[1] and latters.
+  $cmdline =~ s!\n!!g;  # Remove LFs.
+  return $cmdline;
+}
+
+# Gee, curl's -L (--location) option isn't reliable at least
+# with its 7.12.3 version.  Curl will forget to post data if
+# there is a redirection.  This function is a workaround for
+# curl.  Redirection happens on borg hosts.
+sub ResolveRedirectionForCurl {
+  my $url = shift;
+  my $command_line = ShellEscape(@URL_FETCHER, "--head", $url);
+  open(CMDLINE, "$command_line |") or error($command_line);
+  while (<CMDLINE>) {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    if (/^Location: (.*)/) {
+      $url = $1;
+    }
+  }
+  close(CMDLINE);
+  return $url;
+}
+
+# Add a timeout flat to URL_FETCHER.  Returns a new list.
+sub AddFetchTimeout {
+  my $timeout = shift;
+  my @fetcher = @_;
+  if (defined($timeout)) {
+    if (join(" ", @fetcher) =~ m/\bcurl -s/) {
+      push(@fetcher, "--max-time", sprintf("%d", $timeout));
+    } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) {
+      push(@fetcher, sprintf("--deadline=%d", $timeout));
+    }
+  }
+  return @fetcher;
+}
+
+# Reads a symbol map from the file handle name given as $1, returning
+# the resulting symbol map.  Also processes variables relating to symbols.
+# Currently, the only variable processed is 'binary=<value>' which updates
+# $main::prog to have the correct program name.
+sub ReadSymbols {
+  my $in = shift;
+  my $map = {};
+  while (<$in>) {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    # Removes all the leading zeroes from the symbols, see comment below.
+    if (m/^0x0*([0-9a-f]+)\s+(.+)/) {
+      $map->{$1} = $2;
+    } elsif (m/^---/) {
+      last;
+    } elsif (m/^([a-z][^=]*)=(.*)$/ ) {
+      my ($variable, $value) = ($1, $2);
+      for ($variable, $value) {
+        s/^\s+//;
+        s/\s+$//;
+      }
+      if ($variable eq "binary") {
+        if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) {
+          printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n",
+                         $main::prog, $value);
+        }
+        $main::prog = $value;
+      } else {
+        printf STDERR ("Ignoring unknown variable in symbols list: " .
+            "'%s' = '%s'\n", $variable, $value);
+      }
+    }
+  }
+  return $map;
+}
+
+sub URLEncode {
+  my $str = shift;
+  $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg;
+  return $str;
+}
+
+sub AppendSymbolFilterParams {
+  my $url = shift;
+  my @params = ();
+  if ($main::opt_retain ne '') {
+    push(@params, sprintf("retain=%s", URLEncode($main::opt_retain)));
+  }
+  if ($main::opt_exclude ne '') {
+    push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude)));
+  }
+  if (scalar @params > 0) {
+    $url = sprintf("%s?%s", $url, join("&", @params));
+  }
+  return $url;
+}
+
+# Fetches and processes symbols to prepare them for use in the profile output
+# code.  If the optional 'symbol_map' arg is not given, fetches symbols from
+# $SYMBOL_PAGE for all PC values found in profile.  Otherwise, the raw symbols
+# are assumed to have already been fetched into 'symbol_map' and are simply
+# extracted and processed.
+sub FetchSymbols {
+  my $pcset = shift;
+  my $symbol_map = shift;
+
+  my %seen = ();
+  my @pcs = grep { !$seen{$_}++ } keys(%$pcset);  # uniq
+
+  if (!defined($symbol_map)) {
+    my $post_data = join("+", sort((map {"0x" . "$_"} @pcs)));
+
+    open(POSTFILE, ">$main::tmpfile_sym");
+    print POSTFILE $post_data;
+    close(POSTFILE);
+
+    my $url = SymbolPageURL();
+
+    my $command_line;
+    if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) {
+      $url = ResolveRedirectionForCurl($url);
+      $url = AppendSymbolFilterParams($url);
+      $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym",
+                                  $url);
+    } else {
+      $url = AppendSymbolFilterParams($url);
+      $command_line = (ShellEscape(@URL_FETCHER, "--post", $url)
+                       . " < " . ShellEscape($main::tmpfile_sym));
+    }
+    # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols.
+    my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"});
+    open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line);
+    $symbol_map = ReadSymbols(*SYMBOL{IO});
+    close(SYMBOL);
+  }
+
+  my $symbols = {};
+  foreach my $pc (@pcs) {
+    my $fullname;
+    # For 64 bits binaries, symbols are extracted with 8 leading zeroes.
+    # Then /symbol reads the long symbols in as uint64, and outputs
+    # the result with a "0x%08llx" format which get rid of the zeroes.
+    # By removing all the leading zeroes in both $pc and the symbols from
+    # /symbol, the symbols match and are retrievable from the map.
+    my $shortpc = $pc;
+    $shortpc =~ s/^0*//;
+    # Each line may have a list of names, which includes the function
+    # and also other functions it has inlined.  They are separated (in
+    # PrintSymbolizedProfile), by --, which is illegal in function names.
+    my $fullnames;
+    if (defined($symbol_map->{$shortpc})) {
+      $fullnames = $symbol_map->{$shortpc};
+    } else {
+      $fullnames = "0x" . $pc;  # Just use addresses
+    }
+    my $sym = [];
+    $symbols->{$pc} = $sym;
+    foreach my $fullname (split("--", $fullnames)) {
+      my $name = ShortFunctionName($fullname);
+      push(@{$sym}, $name, "?", $fullname);
+    }
+  }
+  return $symbols;
+}
+
+sub BaseName {
+  my $file_name = shift;
+  $file_name =~ s!^.*/!!;  # Remove directory name
+  return $file_name;
+}
+
+sub MakeProfileBaseName {
+  my ($binary_name, $profile_name) = @_;
+  my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
+  my $binary_shortname = BaseName($binary_name);
+  return sprintf("%s.%s.%s",
+                 $binary_shortname, $main::op_time, $host);
+}
+
+sub FetchDynamicProfile {
+  my $binary_name = shift;
+  my $profile_name = shift;
+  my $fetch_name_only = shift;
+  my $encourage_patience = shift;
+
+  if (!IsProfileURL($profile_name)) {
+    return $profile_name;
+  } else {
+    my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
+    if ($path eq "" || $path eq "/") {
+      # Missing type specifier defaults to cpu-profile
+      $path = $PROFILE_PAGE;
+    }
+
+    my $profile_file = MakeProfileBaseName($binary_name, $profile_name);
+
+    my $url = "$baseURL$path";
+    my $fetch_timeout = undef;
+    if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) {
+      if ($path =~ m/[?]/) {
+        $url .= "&";
+      } else {
+        $url .= "?";
+      }
+      $url .= sprintf("seconds=%d", $main::opt_seconds);
+      $fetch_timeout = $main::opt_seconds * 1.01 + 60;
+      # Set $profile_type for consumption by PrintSymbolizedProfile.
+      $main::profile_type = 'cpu';
+    } else {
+      # For non-CPU profiles, we add a type-extension to
+      # the target profile file name.
+      my $suffix = $path;
+      $suffix =~ s,/,.,g;
+      $profile_file .= $suffix;
+      # Set $profile_type for consumption by PrintSymbolizedProfile.
+      if ($path =~ m/$HEAP_PAGE/) {
+        $main::profile_type = 'heap';
+      } elsif ($path =~ m/$GROWTH_PAGE/) {
+        $main::profile_type = 'growth';
+      } elsif ($path =~ m/$CONTENTION_PAGE/) {
+        $main::profile_type = 'contention';
+      }
+    }
+
+    my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof");
+    if (! -d $profile_dir) {
+      mkdir($profile_dir)
+          || die("Unable to create profile directory $profile_dir: $!\n");
+    }
+    my $tmp_profile = "$profile_dir/.tmp.$profile_file";
+    my $real_profile = "$profile_dir/$profile_file";
+
+    if ($fetch_name_only > 0) {
+      return $real_profile;
+    }
+
+    my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER);
+    my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile);
+    if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){
+      print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n  ${real_profile}\n";
+      if ($encourage_patience) {
+        print STDERR "Be patient...\n";
+      }
+    } else {
+      print STDERR "Fetching $path profile from $url to\n  ${real_profile}\n";
+    }
+
+    (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n");
+    (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n");
+    print STDERR "Wrote profile to $real_profile\n";
+    $main::collected_profile = $real_profile;
+    return $main::collected_profile;
+  }
+}
+
+# Collect profiles in parallel
+sub FetchDynamicProfiles {
+  my $items = scalar(@main::pfile_args);
+  my $levels = log($items) / log(2);
+
+  if ($items == 1) {
+    $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1);
+  } else {
+    # math rounding issues
+    if ((2 ** $levels) < $items) {
+     $levels++;
+    }
+    my $count = scalar(@main::pfile_args);
+    for (my $i = 0; $i < $count; $i++) {
+      $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0);
+    }
+    print STDERR "Fetching $count profiles, Be patient...\n";
+    FetchDynamicProfilesRecurse($levels, 0, 0);
+    $main::collected_profile = join(" \\\n    ", @main::profile_files);
+  }
+}
+
+# Recursively fork a process to get enough processes
+# collecting profiles
+sub FetchDynamicProfilesRecurse {
+  my $maxlevel = shift;
+  my $level = shift;
+  my $position = shift;
+
+  if (my $pid = fork()) {
+    $position = 0 | ($position << 1);
+    TryCollectProfile($maxlevel, $level, $position);
+    wait;
+  } else {
+    $position = 1 | ($position << 1);
+    TryCollectProfile($maxlevel, $level, $position);
+    cleanup();
+    exit(0);
+  }
+}
+
+# Collect a single profile
+sub TryCollectProfile {
+  my $maxlevel = shift;
+  my $level = shift;
+  my $position = shift;
+
+  if ($level >= ($maxlevel - 1)) {
+    if ($position < scalar(@main::pfile_args)) {
+      FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0);
+    }
+  } else {
+    FetchDynamicProfilesRecurse($maxlevel, $level+1, $position);
+  }
+}
+
+##### Parsing code #####
+
+# Provide a small streaming-read module to handle very large
+# cpu-profile files.  Stream in chunks along a sliding window.
+# Provides an interface to get one 'slot', correctly handling
+# endian-ness differences.  A slot is one 32-bit or 64-bit word
+# (depending on the input profile).  We tell endianness and bit-size
+# for the profile by looking at the first 8 bytes: in cpu profiles,
+# the second slot is always 3 (we'll accept anything that's not 0).
+BEGIN {
+  package CpuProfileStream;
+
+  sub new {
+    my ($class, $file, $fname) = @_;
+    my $self = { file        => $file,
+                 base        => 0,
+                 stride      => 512 * 1024,   # must be a multiple of bitsize/8
+                 slots       => [],
+                 unpack_code => "",           # N for big-endian, V for little
+                 perl_is_64bit => 1,          # matters if profile is 64-bit
+    };
+    bless $self, $class;
+    # Let unittests adjust the stride
+    if ($main::opt_test_stride > 0) {
+      $self->{stride} = $main::opt_test_stride;
+    }
+    # Read the first two slots to figure out bitsize and endianness.
+    my $slots = $self->{slots};
+    my $str;
+    read($self->{file}, $str, 8);
+    # Set the global $address_length based on what we see here.
+    # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars).
+    $address_length = ($str eq (chr(0)x8)) ? 16 : 8;
+    if ($address_length == 8) {
+      if (substr($str, 6, 2) eq chr(0)x2) {
+        $self->{unpack_code} = 'V';  # Little-endian.
+      } elsif (substr($str, 4, 2) eq chr(0)x2) {
+        $self->{unpack_code} = 'N';  # Big-endian
+      } else {
+        ::error("$fname: header size >= 2**16\n");
+      }
+      @$slots = unpack($self->{unpack_code} . "*", $str);
+    } else {
+      # If we're a 64-bit profile, check if we're a 64-bit-capable
+      # perl.  Otherwise, each slot will be represented as a float
+      # instead of an int64, losing precision and making all the
+      # 64-bit addresses wrong.  We won't complain yet, but will
+      # later if we ever see a value that doesn't fit in 32 bits.
+      my $has_q = 0;
+      eval { $has_q = pack("Q", "1") ? 1 : 1; };
+      if (!$has_q) {
+        $self->{perl_is_64bit} = 0;
+      }
+      read($self->{file}, $str, 8);
+      if (substr($str, 4, 4) eq chr(0)x4) {
+        # We'd love to use 'Q', but it's a) not universal, b) not endian-proof.
+        $self->{unpack_code} = 'V';  # Little-endian.
+      } elsif (substr($str, 0, 4) eq chr(0)x4) {
+        $self->{unpack_code} = 'N';  # Big-endian
+      } else {
+        ::error("$fname: header size >= 2**32\n");
+      }
+      my @pair = unpack($self->{unpack_code} . "*", $str);
+      # Since we know one of the pair is 0, it's fine to just add them.
+      @$slots = (0, $pair[0] + $pair[1]);
+    }
+    return $self;
+  }
+
+  # Load more data when we access slots->get(X) which is not yet in memory.
+  sub overflow {
+    my ($self) = @_;
+    my $slots = $self->{slots};
+    $self->{base} += $#$slots + 1;   # skip over data we're replacing
+    my $str;
+    read($self->{file}, $str, $self->{stride});
+    if ($address_length == 8) {      # the 32-bit case
+      # This is the easy case: unpack provides 32-bit unpacking primitives.
+      @$slots = unpack($self->{unpack_code} . "*", $str);
+    } else {
+      # We need to unpack 32 bits at a time and combine.
+      my @b32_values = unpack($self->{unpack_code} . "*", $str);
+      my @b64_values = ();
+      for (my $i = 0; $i < $#b32_values; $i += 2) {
+        # TODO(csilvers): if this is a 32-bit perl, the math below
+        #    could end up in a too-large int, which perl will promote
+        #    to a double, losing necessary precision.  Deal with that.
+        #    Right now, we just die.
+        my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]);
+        if ($self->{unpack_code} eq 'N') {    # big-endian
+          ($lo, $hi) = ($hi, $lo);
+        }
+        my $value = $lo + $hi * (2**32);
+        if (!$self->{perl_is_64bit} &&   # check value is exactly represented
+            (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) {
+          ::error("Need a 64-bit perl to process this 64-bit profile.\n");
+        }
+        push(@b64_values, $value);
+      }
+      @$slots = @b64_values;
+    }
+  }
+
+  # Access the i-th long in the file (logically), or -1 at EOF.
+  sub get {
+    my ($self, $idx) = @_;
+    my $slots = $self->{slots};
+    while ($#$slots >= 0) {
+      if ($idx < $self->{base}) {
+        # The only time we expect a reference to $slots[$i - something]
+        # after referencing $slots[$i] is reading the very first header.
+        # Since $stride > |header|, that shouldn't cause any lookback
+        # errors.  And everything after the header is sequential.
+        print STDERR "Unexpected look-back reading CPU profile";
+        return -1;   # shrug, don't know what better to return
+      } elsif ($idx > $self->{base} + $#$slots) {
+        $self->overflow();
+      } else {
+        return $slots->[$idx - $self->{base}];
+      }
+    }
+    # If we get here, $slots is [], which means we've reached EOF
+    return -1;  # unique since slots is supposed to hold unsigned numbers
+  }
+}
+
+# Reads the top, 'header' section of a profile, and returns the last
+# line of the header, commonly called a 'header line'.  The header
+# section of a profile consists of zero or more 'command' lines that
+# are instructions to jeprof, which jeprof executes when reading the
+# header.  All 'command' lines start with a %.  After the command
+# lines is the 'header line', which is a profile-specific line that
+# indicates what type of profile it is, and perhaps other global
+# information about the profile.  For instance, here's a header line
+# for a heap profile:
+#   heap profile:     53:    38236 [  5525:  1284029] @ heapprofile
+# For historical reasons, the CPU profile does not contain a text-
+# readable header line.  If the profile looks like a CPU profile,
+# this function returns "".  If no header line could be found, this
+# function returns undef.
+#
+# The following commands are recognized:
+#   %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:'
+#
+# The input file should be in binmode.
+sub ReadProfileHeader {
+  local *PROFILE = shift;
+  my $firstchar = "";
+  my $line = "";
+  read(PROFILE, $firstchar, 1);
+  seek(PROFILE, -1, 1);                    # unread the firstchar
+  if ($firstchar !~ /[[:print:]]/) {       # is not a text character
+    return "";
+  }
+  while (defined($line = <PROFILE>)) {
+    $line =~ s/\r//g;   # turn windows-looking lines into unix-looking lines
+    if ($line =~ /^%warn\s+(.*)/) {        # 'warn' command
+      # Note this matches both '%warn blah\n' and '%warn\n'.
+      print STDERR "WARNING: $1\n";        # print the rest of the line
+    } elsif ($line =~ /^%/) {
+      print STDERR "Ignoring unknown command from profile header: $line";
+    } else {
+      # End of commands, must be the header line.
+      return $line;
+    }
+  }
+  return undef;     # got to EOF without seeing a header line
+}
+
+sub IsSymbolizedProfileFile {
+  my $file_name = shift;
+  if (!(-e $file_name) || !(-r $file_name)) {
+    return 0;
+  }
+  # Check if the file contains a symbol-section marker.
+  open(TFILE, "<$file_name");
+  binmode TFILE;
+  my $firstline = ReadProfileHeader(*TFILE);
+  close(TFILE);
+  if (!$firstline) {
+    return 0;
+  }
+  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+  my $symbol_marker = $&;
+  return $firstline =~ /^--- *$symbol_marker/;
+}
+
+# Parse profile generated by common/profiler.cc and return a reference
+# to a map:
+#      $result->{version}     Version number of profile file
+#      $result->{period}      Sampling period (in microseconds)
+#      $result->{profile}     Profile object
+#      $result->{threads}     Map of thread IDs to profile objects
+#      $result->{map}         Memory map info from profile
+#      $result->{pcs}         Hash of all PC values seen, key is hex address
+sub ReadProfile {
+  my $prog = shift;
+  my $fname = shift;
+  my $result;            # return value
+
+  $CONTENTION_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+  my $contention_marker = $&;
+  $GROWTH_PAGE  =~ m,[^/]+$,;    # matches everything after the last slash
+  my $growth_marker = $&;
+  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+  my $symbol_marker = $&;
+  $PROFILE_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+  my $profile_marker = $&;
+  $HEAP_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
+  my $heap_marker = $&;
+
+  # Look at first line to see if it is a heap or a CPU profile.
+  # CPU profile may start with no header at all, and just binary data
+  # (starting with \0\0\0\0) -- in that case, don't try to read the
+  # whole firstline, since it may be gigabytes(!) of data.
+  open(PROFILE, "<$fname") || error("$fname: $!\n");
+  binmode PROFILE;      # New perls do UTF-8 processing
+  my $header = ReadProfileHeader(*PROFILE);
+  if (!defined($header)) {   # means "at EOF"
+    error("Profile is empty.\n");
+  }
+
+  my $symbols;
+  if ($header =~ m/^--- *$symbol_marker/o) {
+    # Verify that the user asked for a symbolized profile
+    if (!$main::use_symbolized_profile) {
+      # we have both a binary and symbolized profiles, abort
+      error("FATAL ERROR: Symbolized profile\n   $fname\ncannot be used with " .
+            "a binary arg. Try again without passing\n   $prog\n");
+    }
+    # Read the symbol section of the symbolized profile file.
+    $symbols = ReadSymbols(*PROFILE{IO});
+    # Read the next line to get the header for the remaining profile.
+    $header = ReadProfileHeader(*PROFILE) || "";
+  }
+
+  if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) {
+    # Skip "--- ..." line for profile types that have their own headers.
+    $header = ReadProfileHeader(*PROFILE) || "";
+  }
+
+  $main::profile_type = '';
+
+  if ($header =~ m/^heap profile:.*$growth_marker/o) {
+    $main::profile_type = 'growth';
+    $result =  ReadHeapProfile($prog, *PROFILE, $header);
+  } elsif ($header =~ m/^heap profile:/) {
+    $main::profile_type = 'heap';
+    $result =  ReadHeapProfile($prog, *PROFILE, $header);
+  } elsif ($header =~ m/^heap/) {
+    $main::profile_type = 'heap';
+    $result = ReadThreadedHeapProfile($prog, $fname, $header);
+  } elsif ($header =~ m/^--- *$contention_marker/o) {
+    $main::profile_type = 'contention';
+    $result = ReadSynchProfile($prog, *PROFILE);
+  } elsif ($header =~ m/^--- *Stacks:/) {
+    print STDERR
+      "Old format contention profile: mistakenly reports " .
+      "condition variable signals as lock contentions.\n";
+    $main::profile_type = 'contention';
+    $result = ReadSynchProfile($prog, *PROFILE);
+  } elsif ($header =~ m/^--- *$profile_marker/) {
+    # the binary cpu profile data starts immediately after this line
+    $main::profile_type = 'cpu';
+    $result = ReadCPUProfile($prog, $fname, *PROFILE);
+  } else {
+    if (defined($symbols)) {
+      # a symbolized profile contains a format we don't recognize, bail out
+      error("$fname: Cannot recognize profile section after symbols.\n");
+    }
+    # no ascii header present -- must be a CPU profile
+    $main::profile_type = 'cpu';
+    $result = ReadCPUProfile($prog, $fname, *PROFILE);
+  }
+
+  close(PROFILE);
+
+  # if we got symbols along with the profile, return those as well
+  if (defined($symbols)) {
+    $result->{symbols} = $symbols;
+  }
+
+  return $result;
+}
+
+# Subtract one from caller pc so we map back to call instr.
+# However, don't do this if we're reading a symbolized profile
+# file, in which case the subtract-one was done when the file
+# was written.
+#
+# We apply the same logic to all readers, though ReadCPUProfile uses an
+# independent implementation.
+sub FixCallerAddresses {
+  my $stack = shift;
+  # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile()
+  # dumps unadjusted profiles.
+  {
+    $stack =~ /(\s)/;
+    my $delimiter = $1;
+    my @addrs = split(' ', $stack);
+    my @fixedaddrs;
+    $#fixedaddrs = $#addrs;
+    if ($#addrs >= 0) {
+      $fixedaddrs[0] = $addrs[0];
+    }
+    for (my $i = 1; $i <= $#addrs; $i++) {
+      $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1");
+    }
+    return join $delimiter, @fixedaddrs;
+  }
+}
+
+# CPU profile reader
+sub ReadCPUProfile {
+  my $prog = shift;
+  my $fname = shift;       # just used for logging
+  local *PROFILE = shift;
+  my $version;
+  my $period;
+  my $i;
+  my $profile = {};
+  my $pcs = {};
+
+  # Parse string into array of slots.
+  my $slots = CpuProfileStream->new(*PROFILE, $fname);
+
+  # Read header.  The current header version is a 5-element structure
+  # containing:
+  #   0: header count (always 0)
+  #   1: header "words" (after this one: 3)
+  #   2: format version (0)
+  #   3: sampling period (usec)
+  #   4: unused padding (always 0)
+  if ($slots->get(0) != 0 ) {
+    error("$fname: not a profile file, or old format profile file\n");
+  }
+  $i = 2 + $slots->get(1);
+  $version = $slots->get(2);
+  $period = $slots->get(3);
+  # Do some sanity checking on these header values.
+  if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) {
+    error("$fname: not a profile file, or corrupted profile file\n");
+  }
+
+  # Parse profile
+  while ($slots->get($i) != -1) {
+    my $n = $slots->get($i++);
+    my $d = $slots->get($i++);
+    if ($d > (2**16)) {  # TODO(csilvers): what's a reasonable max-stack-depth?
+      my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8));
+      print STDERR "At index $i (address $addr):\n";
+      error("$fname: stack trace depth >= 2**32\n");
+    }
+    if ($slots->get($i) == 0) {
+      # End of profile data marker
+      $i += $d;
+      last;
+    }
+
+    # Make key out of the stack entries
+    my @k = ();
+    for (my $j = 0; $j < $d; $j++) {
+      my $pc = $slots->get($i+$j);
+      # Subtract one from caller pc so we map back to call instr.
+      $pc--;
+      $pc = sprintf("%0*x", $address_length, $pc);
+      $pcs->{$pc} = 1;
+      push @k, $pc;
+    }
+
+    AddEntry($profile, (join "\n", @k), $n);
+    $i += $d;
+  }
+
+  # Parse map
+  my $map = '';
+  seek(PROFILE, $i * 4, 0);
+  read(PROFILE, $map, (stat PROFILE)[7]);
+
+  my $r = {};
+  $r->{version} = $version;
+  $r->{period} = $period;
+  $r->{profile} = $profile;
+  $r->{libs} = ParseLibraries($prog, $map, $pcs);
+  $r->{pcs} = $pcs;
+
+  return $r;
+}
+
+sub HeapProfileIndex {
+  my $index = 1;
+  if ($main::opt_inuse_space) {
+    $index = 1;
+  } elsif ($main::opt_inuse_objects) {
+    $index = 0;
+  } elsif ($main::opt_alloc_space) {
+    $index = 3;
+  } elsif ($main::opt_alloc_objects) {
+    $index = 2;
+  }
+  return $index;
+}
+
+sub ReadMappedLibraries {
+  my $fh = shift;
+  my $map = "";
+  # Read the /proc/self/maps data
+  while (<$fh>) {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    $map .= $_;
+  }
+  return $map;
+}
+
+sub ReadMemoryMap {
+  my $fh = shift;
+  my $map = "";
+  # Read /proc/self/maps data as formatted by DumpAddressMap()
+  my $buildvar = "";
+  while (<PROFILE>) {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    # Parse "build=<dir>" specification if supplied
+    if (m/^\s*build=(.*)\n/) {
+      $buildvar = $1;
+    }
+
+    # Expand "$build" variable if available
+    $_ =~ s/\$build\b/$buildvar/g;
+
+    $map .= $_;
+  }
+  return $map;
+}
+
+sub AdjustSamples {
+  my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_;
+  if ($sample_adjustment) {
+    if ($sampling_algorithm == 2) {
+      # Remote-heap version 2
+      # The sampling frequency is the rate of a Poisson process.
+      # This means that the probability of sampling an allocation of
+      # size X with sampling rate Y is 1 - exp(-X/Y)
+      if ($n1 != 0) {
+        my $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+        my $scale_factor = 1/(1 - exp(-$ratio));
+        $n1 *= $scale_factor;
+        $s1 *= $scale_factor;
+      }
+      if ($n2 != 0) {
+        my $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+        my $scale_factor = 1/(1 - exp(-$ratio));
+        $n2 *= $scale_factor;
+        $s2 *= $scale_factor;
+      }
+    } else {
+      # Remote-heap version 1
+      my $ratio;
+      $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+      if ($ratio < 1) {
+        $n1 /= $ratio;
+        $s1 /= $ratio;
+      }
+      $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+      if ($ratio < 1) {
+        $n2 /= $ratio;
+        $s2 /= $ratio;
+      }
+    }
+  }
+  return ($n1, $s1, $n2, $s2);
+}
+
+sub ReadHeapProfile {
+  my $prog = shift;
+  local *PROFILE = shift;
+  my $header = shift;
+
+  my $index = HeapProfileIndex();
+
+  # Find the type of this profile.  The header line looks like:
+  #    heap profile:   1246:  8800744 [  1246:  8800744] @ <heap-url>/266053
+  # There are two pairs <count: size>, the first inuse objects/space, and the
+  # second allocated objects/space.  This is followed optionally by a profile
+  # type, and if that is present, optionally by a sampling frequency.
+  # For remote heap profiles (v1):
+  # The interpretation of the sampling frequency is that the profiler, for
+  # each sample, calculates a uniformly distributed random integer less than
+  # the given value, and records the next sample after that many bytes have
+  # been allocated.  Therefore, the expected sample interval is half of the
+  # given frequency.  By default, if not specified, the expected sample
+  # interval is 128KB.  Only remote-heap-page profiles are adjusted for
+  # sample size.
+  # For remote heap profiles (v2):
+  # The sampling frequency is the rate of a Poisson process. This means that
+  # the probability of sampling an allocation of size X with sampling rate Y
+  # is 1 - exp(-X/Y)
+  # For version 2, a typical header line might look like this:
+  # heap profile:   1922: 127792360 [  1922: 127792360] @ <heap-url>_v2/524288
+  # the trailing number (524288) is the sampling rate. (Version 1 showed
+  # double the 'rate' here)
+  my $sampling_algorithm = 0;
+  my $sample_adjustment = 0;
+  chomp($header);
+  my $type = "unknown";
+  if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") {
+    if (defined($6) && ($6 ne '')) {
+      $type = $6;
+      my $sample_period = $8;
+      # $type is "heapprofile" for profiles generated by the
+      # heap-profiler, and either "heap" or "heap_v2" for profiles
+      # generated by sampling directly within tcmalloc.  It can also
+      # be "growth" for heap-growth profiles.  The first is typically
+      # found for profiles generated locally, and the others for
+      # remote profiles.
+      if (($type eq "heapprofile") || ($type !~ /heap/) ) {
+        # No need to adjust for the sampling rate with heap-profiler-derived data
+        $sampling_algorithm = 0;
+      } elsif ($type =~ /_v2/) {
+        $sampling_algorithm = 2;     # version 2 sampling
+        if (defined($sample_period) && ($sample_period ne '')) {
+          $sample_adjustment = int($sample_period);
+        }
+      } else {
+        $sampling_algorithm = 1;     # version 1 sampling
+        if (defined($sample_period) && ($sample_period ne '')) {
+          $sample_adjustment = int($sample_period)/2;
+        }
+      }
+    } else {
+      # We detect whether or not this is a remote-heap profile by checking
+      # that the total-allocated stats ($n2,$s2) are exactly the
+      # same as the in-use stats ($n1,$s1).  It is remotely conceivable
+      # that a non-remote-heap profile may pass this check, but it is hard
+      # to imagine how that could happen.
+      # In this case it's so old it's guaranteed to be remote-heap version 1.
+      my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
+      if (($n1 == $n2) && ($s1 == $s2)) {
+        # This is likely to be a remote-heap based sample profile
+        $sampling_algorithm = 1;
+      }
+    }
+  }
+
+  if ($sampling_algorithm > 0) {
+    # For remote-heap generated profiles, adjust the counts and sizes to
+    # account for the sample rate (we sample once every 128KB by default).
+    if ($sample_adjustment == 0) {
+      # Turn on profile adjustment.
+      $sample_adjustment = 128*1024;
+      print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n";
+    } else {
+      printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n",
+                     $sample_adjustment);
+    }
+    if ($sampling_algorithm > 1) {
+      # We don't bother printing anything for the original version (version 1)
+      printf STDERR "Heap version $sampling_algorithm\n";
+    }
+  }
+
+  my $profile = {};
+  my $pcs = {};
+  my $map = "";
+
+  while (<PROFILE>) {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    if (/^MAPPED_LIBRARIES:/) {
+      $map .= ReadMappedLibraries(*PROFILE);
+      last;
+    }
+
+    if (/^--- Memory map:/) {
+      $map .= ReadMemoryMap(*PROFILE);
+      last;
+    }
+
+    # Read entry of the form:
+    #  <count1>: <bytes1> [<count2>: <bytes2>] @ a1 a2 a3 ... an
+    s/^\s*//;
+    s/\s*$//;
+    if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) {
+      my $stack = $5;
+      my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
+      my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+                                 $n1, $s1, $n2, $s2);
+      AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+    }
+  }
+
+  my $r = {};
+  $r->{version} = "heap";
+  $r->{period} = 1;
+  $r->{profile} = $profile;
+  $r->{libs} = ParseLibraries($prog, $map, $pcs);
+  $r->{pcs} = $pcs;
+  return $r;
+}
+
+sub ReadThreadedHeapProfile {
+  my ($prog, $fname, $header) = @_;
+
+  my $index = HeapProfileIndex();
+  my $sampling_algorithm = 0;
+  my $sample_adjustment = 0;
+  chomp($header);
+  my $type = "unknown";
+  # Assuming a very specific type of header for now.
+  if ($header =~ m"^heap_v2/(\d+)") {
+    $type = "_v2";
+    $sampling_algorithm = 2;
+    $sample_adjustment = int($1);
+  }
+  if ($type ne "_v2" || !defined($sample_adjustment)) {
+    die "Threaded heap profiles require v2 sampling with a sample rate\n";
+  }
+
+  my $profile = {};
+  my $thread_profiles = {};
+  my $pcs = {};
+  my $map = "";
+  my $stack = "";
+
+  while (<PROFILE>) {
+    s/\r//g;
+    if (/^MAPPED_LIBRARIES:/) {
+      $map .= ReadMappedLibraries(*PROFILE);
+      last;
+    }
+
+    if (/^--- Memory map:/) {
+      $map .= ReadMemoryMap(*PROFILE);
+      last;
+    }
+
+    # Read entry of the form:
+    # @ a1 a2 ... an
+    #   t*: <count1>: <bytes1> [<count2>: <bytes2>]
+    #   t1: <count1>: <bytes1> [<count2>: <bytes2>]
+    #     ...
+    #   tn: <count1>: <bytes1> [<count2>: <bytes2>]
+    s/^\s*//;
+    s/\s*$//;
+    if (m/^@\s+(.*)$/) {
+      $stack = $1;
+    } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) {
+      if ($stack eq "") {
+        # Still in the header, so this is just a per-thread summary.
+        next;
+      }
+      my $thread = $2;
+      my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6);
+      my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+                                 $n1, $s1, $n2, $s2);
+      if ($thread eq "*") {
+        AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+      } else {
+        if (!exists($thread_profiles->{$thread})) {
+          $thread_profiles->{$thread} = {};
+        }
+        AddEntries($thread_profiles->{$thread}, $pcs,
+                   FixCallerAddresses($stack), $counts[$index]);
+      }
+    }
+  }
+
+  my $r = {};
+  $r->{version} = "heap";
+  $r->{period} = 1;
+  $r->{profile} = $profile;
+  $r->{threads} = $thread_profiles;
+  $r->{libs} = ParseLibraries($prog, $map, $pcs);
+  $r->{pcs} = $pcs;
+  return $r;
+}
+
+sub ReadSynchProfile {
+  my $prog = shift;
+  local *PROFILE = shift;
+  my $header = shift;
+
+  my $map = '';
+  my $profile = {};
+  my $pcs = {};
+  my $sampling_period = 1;
+  my $cyclespernanosec = 2.8;   # Default assumption for old binaries
+  my $seen_clockrate = 0;
+  my $line;
+
+  my $index = 0;
+  if ($main::opt_total_delay) {
+    $index = 0;
+  } elsif ($main::opt_contentions) {
+    $index = 1;
+  } elsif ($main::opt_mean_delay) {
+    $index = 2;
+  }
+
+  while ( $line = <PROFILE> ) {
+    $line =~ s/\r//g;      # turn windows-looking lines into unix-looking lines
+    if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) {
+      my ($cycles, $count, $stack) = ($1, $2, $3);
+
+      # Convert cycles to nanoseconds
+      $cycles /= $cyclespernanosec;
+
+      # Adjust for sampling done by application
+      $cycles *= $sampling_period;
+      $count *= $sampling_period;
+
+      my @values = ($cycles, $count, $cycles / $count);
+      AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]);
+
+    } elsif ( $line =~ /^(slow release).*thread \d+  \@\s*(.*?)\s*$/ ||
+              $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) {
+      my ($cycles, $stack) = ($1, $2);
+      if ($cycles !~ /^\d+$/) {
+        next;
+      }
+
+      # Convert cycles to nanoseconds
+      $cycles /= $cyclespernanosec;
+
+      # Adjust for sampling done by application
+      $cycles *= $sampling_period;
+
+      AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles);
+
+    } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) {
+      my ($variable, $value) = ($1,$2);
+      for ($variable, $value) {
+        s/^\s+//;
+        s/\s+$//;
+      }
+      if ($variable eq "cycles/second") {
+        $cyclespernanosec = $value / 1e9;
+        $seen_clockrate = 1;
+      } elsif ($variable eq "sampling period") {
+        $sampling_period = $value;
+      } elsif ($variable eq "ms since reset") {
+        # Currently nothing is done with this value in jeprof
+        # So we just silently ignore it for now
+      } elsif ($variable eq "discarded samples") {
+        # Currently nothing is done with this value in jeprof
+        # So we just silently ignore it for now
+      } else {
+        printf STDERR ("Ignoring unnknown variable in /contention output: " .
+                       "'%s' = '%s'\n",$variable,$value);
+      }
+    } else {
+      # Memory map entry
+      $map .= $line;
+    }
+  }
+
+  if (!$seen_clockrate) {
+    printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n",
+                   $cyclespernanosec);
+  }
+
+  my $r = {};
+  $r->{version} = 0;
+  $r->{period} = $sampling_period;
+  $r->{profile} = $profile;
+  $r->{libs} = ParseLibraries($prog, $map, $pcs);
+  $r->{pcs} = $pcs;
+  return $r;
+}
+
+# Given a hex value in the form "0x1abcd" or "1abcd", return either
+# "0001abcd" or "000000000001abcd", depending on the current (global)
+# address length.
+sub HexExtend {
+  my $addr = shift;
+
+  $addr =~ s/^(0x)?0*//;
+  my $zeros_needed = $address_length - length($addr);
+  if ($zeros_needed < 0) {
+    printf STDERR "Warning: address $addr is longer than address length $address_length\n";
+    return $addr;
+  }
+  return ("0" x $zeros_needed) . $addr;
+}
+
+##### Symbol extraction #####
+
+# Aggressively search the lib_prefix values for the given library
+# If all else fails, just return the name of the library unmodified.
+# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so"
+# it will search the following locations in this order, until it finds a file:
+#   /my/path/lib/dir/mylib.so
+#   /other/path/lib/dir/mylib.so
+#   /my/path/dir/mylib.so
+#   /other/path/dir/mylib.so
+#   /my/path/mylib.so
+#   /other/path/mylib.so
+#   /lib/dir/mylib.so              (returned as last resort)
+sub FindLibrary {
+  my $file = shift;
+  my $suffix = $file;
+
+  # Search for the library as described above
+  do {
+    foreach my $prefix (@prefix_list) {
+      my $fullpath = $prefix . $suffix;
+      if (-e $fullpath) {
+        return $fullpath;
+      }
+    }
+  } while ($suffix =~ s|^/[^/]+/|/|);
+  return $file;
+}
+
+# Return path to library with debugging symbols.
+# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
+sub DebuggingLibrary {
+  my $file = shift;
+  if ($file =~ m|^/|) {
+      if (-f "/usr/lib/debug$file") {
+        return "/usr/lib/debug$file";
+      } elsif (-f "/usr/lib/debug$file.debug") {
+        return "/usr/lib/debug$file.debug";
+      }
+  }
+  return undef;
+}
+
+# Parse text section header of a library using objdump
+sub ParseTextSectionHeaderFromObjdump {
+  my $lib = shift;
+
+  my $size = undef;
+  my $vma;
+  my $file_offset;
+  # Get objdump output from the library file to figure out how to
+  # map between mapped addresses and addresses in the library.
+  my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib);
+  open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
+  while (<OBJDUMP>) {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    # Idx Name          Size      VMA       LMA       File off  Algn
+    #  10 .text         00104b2c  420156f0  420156f0  000156f0  2**4
+    # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file
+    # offset may still be 8.  But AddressSub below will still handle that.
+    my @x = split;
+    if (($#x >= 6) && ($x[1] eq '.text')) {
+      $size = $x[2];
+      $vma = $x[3];
+      $file_offset = $x[5];
+      last;
+    }
+  }
+  close(OBJDUMP);
+
+  if (!defined($size)) {
+    return undef;
+  }
+
+  my $r = {};
+  $r->{size} = $size;
+  $r->{vma} = $vma;
+  $r->{file_offset} = $file_offset;
+
+  return $r;
+}
+
+# Parse text section header of a library using otool (on OS X)
+sub ParseTextSectionHeaderFromOtool {
+  my $lib = shift;
+
+  my $size = undef;
+  my $vma = undef;
+  my $file_offset = undef;
+  # Get otool output from the library file to figure out how to
+  # map between mapped addresses and addresses in the library.
+  my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib);
+  open(OTOOL, "$command |") || error("$command: $!\n");
+  my $cmd = "";
+  my $sectname = "";
+  my $segname = "";
+  foreach my $line (<OTOOL>) {
+    $line =~ s/\r//g;      # turn windows-looking lines into unix-looking lines
+    # Load command <#>
+    #       cmd LC_SEGMENT
+    # [...]
+    # Section
+    #   sectname __text
+    #    segname __TEXT
+    #       addr 0x000009f8
+    #       size 0x00018b9e
+    #     offset 2552
+    #      align 2^2 (4)
+    # We will need to strip off the leading 0x from the hex addresses,
+    # and convert the offset into hex.
+    if ($line =~ /Load command/) {
+      $cmd = "";
+      $sectname = "";
+      $segname = "";
+    } elsif ($line =~ /Section/) {
+      $sectname = "";
+      $segname = "";
+    } elsif ($line =~ /cmd (\w+)/) {
+      $cmd = $1;
+    } elsif ($line =~ /sectname (\w+)/) {
+      $sectname = $1;
+    } elsif ($line =~ /segname (\w+)/) {
+      $segname = $1;
+    } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") &&
+               $sectname eq "__text" &&
+               $segname eq "__TEXT")) {
+      next;
+    } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) {
+      $vma = $1;
+    } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) {
+      $size = $1;
+    } elsif ($line =~ /\boffset ([0-9]+)/) {
+      $file_offset = sprintf("%016x", $1);
+    }
+    if (defined($vma) && defined($size) && defined($file_offset)) {
+      last;
+    }
+  }
+  close(OTOOL);
+
+  if (!defined($vma) || !defined($size) || !defined($file_offset)) {
+     return undef;
+  }
+
+  my $r = {};
+  $r->{size} = $size;
+  $r->{vma} = $vma;
+  $r->{file_offset} = $file_offset;
+
+  return $r;
+}
+
+sub ParseTextSectionHeader {
+  # obj_tool_map("otool") is only defined if we're in a Mach-O environment
+  if (defined($obj_tool_map{"otool"})) {
+    my $r = ParseTextSectionHeaderFromOtool(@_);
+    if (defined($r)){
+      return $r;
+    }
+  }
+  # If otool doesn't work, or we don't have it, fall back to objdump
+  return ParseTextSectionHeaderFromObjdump(@_);
+}
+
+# Split /proc/pid/maps dump into a list of libraries
+sub ParseLibraries {
+  return if $main::use_symbol_page;  # We don't need libraries info.
+  my $prog = shift;
+  my $map = shift;
+  my $pcs = shift;
+
+  my $result = [];
+  my $h = "[a-f0-9]+";
+  my $zero_offset = HexExtend("0");
+
+  my $buildvar = "";
+  foreach my $l (split("\n", $map)) {
+    if ($l =~ m/^\s*build=(.*)$/) {
+      $buildvar = $1;
+    }
+
+    my $start;
+    my $finish;
+    my $offset;
+    my $lib;
+    if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) {
+      # Full line from /proc/self/maps.  Example:
+      #   40000000-40015000 r-xp 00000000 03:01 12845071   /lib/ld-2.3.2.so
+      $start = HexExtend($1);
+      $finish = HexExtend($2);
+      $offset = HexExtend($3);
+      $lib = $4;
+      $lib =~ s|\\|/|g;     # turn windows-style paths into unix-style paths
+    } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) {
+      # Cooked line from DumpAddressMap.  Example:
+      #   40000000-40015000: /lib/ld-2.3.2.so
+      $start = HexExtend($1);
+      $finish = HexExtend($2);
+      $offset = $zero_offset;
+      $lib = $3;
+    }
+    # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
+    # function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
+    #
+    # Example:
+    # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
+    # o.1 NCH -1
+    elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
+      $start = HexExtend($1);
+      $finish = HexExtend($2);
+      $offset = $zero_offset;
+      $lib = FindLibrary($5);
+
+    } else {
+      next;
+    }
+
+    # Expand "$build" variable if available
+    $lib =~ s/\$build\b/$buildvar/g;
+
+    $lib = FindLibrary($lib);
+
+    # Check for pre-relocated libraries, which use pre-relocated symbol tables
+    # and thus require adjusting the offset that we'll use to translate
+    # VM addresses into symbol table addresses.
+    # Only do this if we're not going to fetch the symbol table from a
+    # debugging copy of the library.
+    if (!DebuggingLibrary($lib)) {
+      my $text = ParseTextSectionHeader($lib);
+      if (defined($text)) {
+         my $vma_offset = AddressSub($text->{vma}, $text->{file_offset});
+         $offset = AddressAdd($offset, $vma_offset);
+      }
+    }
+
+    if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
+    push(@{$result}, [$lib, $start, $finish, $offset]);
+  }
+
+  # Append special entry for additional library (not relocated)
+  if ($main::opt_lib ne "") {
+    my $text = ParseTextSectionHeader($main::opt_lib);
+    if (defined($text)) {
+       my $start = $text->{vma};
+       my $finish = AddressAdd($start, $text->{size});
+
+       push(@{$result}, [$main::opt_lib, $start, $finish, $start]);
+    }
+  }
+
+  # Append special entry for the main program.  This covers
+  # 0..max_pc_value_seen, so that we assume pc values not found in one
+  # of the library ranges will be treated as coming from the main
+  # program binary.
+  my $min_pc = HexExtend("0");
+  my $max_pc = $min_pc;          # find the maximal PC value in any sample
+  foreach my $pc (keys(%{$pcs})) {
+    if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); }
+  }
+  push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]);
+
+  return $result;
+}
+
+# Add two hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressAdd {
+  my $addr1 = shift;
+  my $addr2 = shift;
+  my $sum;
+
+  if ($address_length == 8) {
+    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+    $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16);
+    return sprintf("%08x", $sum);
+
+  } else {
+    # Do the addition in 7-nibble chunks to trivialize carry handling.
+
+    if ($main::opt_debug and $main::opt_test) {
+      print STDERR "AddressAdd $addr1 + $addr2 = ";
+    }
+
+    my $a1 = substr($addr1,-7);
+    $addr1 = substr($addr1,0,-7);
+    my $a2 = substr($addr2,-7);
+    $addr2 = substr($addr2,0,-7);
+    $sum = hex($a1) + hex($a2);
+    my $c = 0;
+    if ($sum > 0xfffffff) {
+      $c = 1;
+      $sum -= 0x10000000;
+    }
+    my $r = sprintf("%07x", $sum);
+
+    $a1 = substr($addr1,-7);
+    $addr1 = substr($addr1,0,-7);
+    $a2 = substr($addr2,-7);
+    $addr2 = substr($addr2,0,-7);
+    $sum = hex($a1) + hex($a2) + $c;
+    $c = 0;
+    if ($sum > 0xfffffff) {
+      $c = 1;
+      $sum -= 0x10000000;
+    }
+    $r = sprintf("%07x", $sum) . $r;
+
+    $sum = hex($addr1) + hex($addr2) + $c;
+    if ($sum > 0xff) { $sum -= 0x100; }
+    $r = sprintf("%02x", $sum) . $r;
+
+    if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; }
+
+    return $r;
+  }
+}
+
+
+# Subtract two hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressSub {
+  my $addr1 = shift;
+  my $addr2 = shift;
+  my $diff;
+
+  if ($address_length == 8) {
+    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+    $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16);
+    return sprintf("%08x", $diff);
+
+  } else {
+    # Do the addition in 7-nibble chunks to trivialize borrow handling.
+    # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; }
+
+    my $a1 = hex(substr($addr1,-7));
+    $addr1 = substr($addr1,0,-7);
+    my $a2 = hex(substr($addr2,-7));
+    $addr2 = substr($addr2,0,-7);
+    my $b = 0;
+    if ($a2 > $a1) {
+      $b = 1;
+      $a1 += 0x10000000;
+    }
+    $diff = $a1 - $a2;
+    my $r = sprintf("%07x", $diff);
+
+    $a1 = hex(substr($addr1,-7));
+    $addr1 = substr($addr1,0,-7);
+    $a2 = hex(substr($addr2,-7)) + $b;
+    $addr2 = substr($addr2,0,-7);
+    $b = 0;
+    if ($a2 > $a1) {
+      $b = 1;
+      $a1 += 0x10000000;
+    }
+    $diff = $a1 - $a2;
+    $r = sprintf("%07x", $diff) . $r;
+
+    $a1 = hex($addr1);
+    $a2 = hex($addr2) + $b;
+    if ($a2 > $a1) { $a1 += 0x100; }
+    $diff = $a1 - $a2;
+    $r = sprintf("%02x", $diff) . $r;
+
+    # if ($main::opt_debug) { print STDERR "$r\n"; }
+
+    return $r;
+  }
+}
+
+# Increment a hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressInc {
+  my $addr = shift;
+  my $sum;
+
+  if ($address_length == 8) {
+    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+    $sum = (hex($addr)+1) % (0x10000000 * 16);
+    return sprintf("%08x", $sum);
+
+  } else {
+    # Do the addition in 7-nibble chunks to trivialize carry handling.
+    # We are always doing this to step through the addresses in a function,
+    # and will almost never overflow the first chunk, so we check for this
+    # case and exit early.
+
+    # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; }
+
+    my $a1 = substr($addr,-7);
+    $addr = substr($addr,0,-7);
+    $sum = hex($a1) + 1;
+    my $r = sprintf("%07x", $sum);
+    if ($sum <= 0xfffffff) {
+      $r = $addr . $r;
+      # if ($main::opt_debug) { print STDERR "$r\n"; }
+      return HexExtend($r);
+    } else {
+      $r = "0000000";
+    }
+
+    $a1 = substr($addr,-7);
+    $addr = substr($addr,0,-7);
+    $sum = hex($a1) + 1;
+    $r = sprintf("%07x", $sum) . $r;
+    if ($sum <= 0xfffffff) {
+      $r = $addr . $r;
+      # if ($main::opt_debug) { print STDERR "$r\n"; }
+      return HexExtend($r);
+    } else {
+      $r = "00000000000000";
+    }
+
+    $sum = hex($addr) + 1;
+    if ($sum > 0xff) { $sum -= 0x100; }
+    $r = sprintf("%02x", $sum) . $r;
+
+    # if ($main::opt_debug) { print STDERR "$r\n"; }
+    return $r;
+  }
+}
+
+# Extract symbols for all PC values found in profile
+sub ExtractSymbols {
+  my $libs = shift;
+  my $pcset = shift;
+
+  my $symbols = {};
+
+  # Map each PC value to the containing library.  To make this faster,
+  # we sort libraries by their starting pc value (highest first), and
+  # advance through the libraries as we advance the pc.  Sometimes the
+  # addresses of libraries may overlap with the addresses of the main
+  # binary, so to make sure the libraries 'win', we iterate over the
+  # libraries in reverse order (which assumes the binary doesn't start
+  # in the middle of a library, which seems a fair assumption).
+  my @pcs = (sort { $a cmp $b } keys(%{$pcset}));  # pcset is 0-extended strings
+  foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) {
+    my $libname = $lib->[0];
+    my $start = $lib->[1];
+    my $finish = $lib->[2];
+    my $offset = $lib->[3];
+
+    # Use debug library if it exists
+    my $debug_libname = DebuggingLibrary($libname);
+    if ($debug_libname) {
+        $libname = $debug_libname;
+    }
+
+    # Get list of pcs that belong in this library.
+    my $contained = [];
+    my ($start_pc_index, $finish_pc_index);
+    # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index].
+    for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0;
+         $finish_pc_index--) {
+      last if $pcs[$finish_pc_index - 1] le $finish;
+    }
+    # Find smallest start_pc_index such that $start <= $pc[$start_pc_index].
+    for ($start_pc_index = $finish_pc_index; $start_pc_index > 0;
+         $start_pc_index--) {
+      last if $pcs[$start_pc_index - 1] lt $start;
+    }
+    # This keeps PC values higher than $pc[$finish_pc_index] in @pcs,
+    # in case there are overlaps in libraries and the main binary.
+    @{$contained} = splice(@pcs, $start_pc_index,
+                           $finish_pc_index - $start_pc_index);
+    # Map to symbols
+    MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols);
+  }
+
+  return $symbols;
+}
+
+# Map list of PC values to symbols for a given image
+sub MapToSymbols {
+  my $image = shift;
+  my $offset = shift;
+  my $pclist = shift;
+  my $symbols = shift;
+
+  my $debug = 0;
+
+  # Ignore empty binaries
+  if ($#{$pclist} < 0) { return; }
+
+  # Figure out the addr2line command to use
+  my $addr2line = $obj_tool_map{"addr2line"};
+  my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image);
+  if (exists $obj_tool_map{"addr2line_pdb"}) {
+    $addr2line = $obj_tool_map{"addr2line_pdb"};
+    $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image);
+  }
+
+  # If "addr2line" isn't installed on the system at all, just use
+  # nm to get what info we can (function names, but not line numbers).
+  if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) {
+    MapSymbolsWithNM($image, $offset, $pclist, $symbols);
+    return;
+  }
+
+  # "addr2line -i" can produce a variable number of lines per input
+  # address, with no separator that allows us to tell when data for
+  # the next address starts.  So we find the address for a special
+  # symbol (_fini) and interleave this address between all real
+  # addresses passed to addr2line.  The name of this special symbol
+  # can then be used as a separator.
+  $sep_address = undef;  # May be filled in by MapSymbolsWithNM()
+  my $nm_symbols = {};
+  MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols);
+  if (defined($sep_address)) {
+    # Only add " -i" to addr2line if the binary supports it.
+    # addr2line --help returns 0, but not if it sees an unknown flag first.
+    if (system("$cmd -i --help >$dev_null 2>&1") == 0) {
+      $cmd .= " -i";
+    } else {
+      $sep_address = undef;   # no need for sep_address if we don't support -i
+    }
+  }
+
+  # Make file with all PC values with intervening 'sep_address' so
+  # that we can reliably detect the end of inlined function list
+  open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n");
+  if ($debug) { print("---- $image ---\n"); }
+  for (my $i = 0; $i <= $#{$pclist}; $i++) {
+    # addr2line always reads hex addresses, and does not need '0x' prefix.
+    if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); }
+    printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset));
+    if (defined($sep_address)) {
+      printf ADDRESSES ("%s\n", $sep_address);
+    }
+  }
+  close(ADDRESSES);
+  if ($debug) {
+    print("----\n");
+    system("cat", $main::tmpfile_sym);
+    print("----\n");
+    system("$cmd < " . ShellEscape($main::tmpfile_sym));
+    print("----\n");
+  }
+
+  open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |")
+      || error("$cmd: $!\n");
+  my $count = 0;   # Index in pclist
+  while (<SYMBOLS>) {
+    # Read fullfunction and filelineinfo from next pair of lines
+    s/\r?\n$//g;
+    my $fullfunction = $_;
+    $_ = <SYMBOLS>;
+    s/\r?\n$//g;
+    my $filelinenum = $_;
+
+    if (defined($sep_address) && $fullfunction eq $sep_symbol) {
+      # Terminating marker for data for this address
+      $count++;
+      next;
+    }
+
+    $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths
+
+    my $pcstr = $pclist->[$count];
+    my $function = ShortFunctionName($fullfunction);
+    my $nms = $nm_symbols->{$pcstr};
+    if (defined($nms)) {
+      if ($fullfunction eq '??') {
+        # nm found a symbol for us.
+        $function = $nms->[0];
+        $fullfunction = $nms->[2];
+      } else {
+       # MapSymbolsWithNM tags each routine with its starting address,
+       # useful in case the image has multiple occurrences of this
+       # routine.  (It uses a syntax that resembles template paramters,
+       # that are automatically stripped out by ShortFunctionName().)
+       # addr2line does not provide the same information.  So we check
+       # if nm disambiguated our symbol, and if so take the annotated
+       # (nm) version of the routine-name.  TODO(csilvers): this won't
+       # catch overloaded, inlined symbols, which nm doesn't see.
+       # Better would be to do a check similar to nm's, in this fn.
+       if ($nms->[2] =~ m/^\Q$function\E/) {  # sanity check it's the right fn
+         $function = $nms->[0];
+         $fullfunction = $nms->[2];
+       }
+      }
+    }
+
+    # Prepend to accumulated symbols for pcstr
+    # (so that caller comes before callee)
+    my $sym = $symbols->{$pcstr};
+    if (!defined($sym)) {
+      $sym = [];
+      $symbols->{$pcstr} = $sym;
+    }
+    unshift(@{$sym}, $function, $filelinenum, $fullfunction);
+    if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); }
+    if (!defined($sep_address)) {
+      # Inlining is off, so this entry ends immediately
+      $count++;
+    }
+  }
+  close(SYMBOLS);
+}
+
+# Use nm to map the list of referenced PCs to symbols.  Return true iff we
+# are able to read procedure information via nm.
+sub MapSymbolsWithNM {
+  my $image = shift;
+  my $offset = shift;
+  my $pclist = shift;
+  my $symbols = shift;
+
+  # Get nm output sorted by increasing address
+  my $symbol_table = GetProcedureBoundaries($image, ".");
+  if (!%{$symbol_table}) {
+    return 0;
+  }
+  # Start addresses are already the right length (8 or 16 hex digits).
+  my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] }
+    keys(%{$symbol_table});
+
+  if ($#names < 0) {
+    # No symbols: just use addresses
+    foreach my $pc (@{$pclist}) {
+      my $pcstr = "0x" . $pc;
+      $symbols->{$pc} = [$pcstr, "?", $pcstr];
+    }
+    return 0;
+  }
+
+  # Sort addresses so we can do a join against nm output
+  my $index = 0;
+  my $fullname = $names[0];
+  my $name = ShortFunctionName($fullname);
+  foreach my $pc (sort { $a cmp $b } @{$pclist}) {
+    # Adjust for mapped offset
+    my $mpc = AddressSub($pc, $offset);
+    while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){
+      $index++;
+      $fullname = $names[$index];
+      $name = ShortFunctionName($fullname);
+    }
+    if ($mpc lt $symbol_table->{$fullname}->[1]) {
+      $symbols->{$pc} = [$name, "?", $fullname];
+    } else {
+      my $pcstr = "0x" . $pc;
+      $symbols->{$pc} = [$pcstr, "?", $pcstr];
+    }
+  }
+  return 1;
+}
+
+sub ShortFunctionName {
+  my $function = shift;
+  while ($function =~ s/\([^()]*\)(\s*const)?//g) { }   # Argument types
+  while ($function =~ s/<[^<>]*>//g)  { }    # Remove template arguments
+  $function =~ s/^.*\s+(\w+::)/$1/;          # Remove leading type
+  return $function;
+}
+
+# Trim overly long symbols found in disassembler output
+sub CleanDisassembly {
+  my $d = shift;
+  while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax)
+  while ($d =~ s/(\w+)<[^<>]*>/$1/g)  { }       # Remove template arguments
+  return $d;
+}
+
+# Clean file name for display
+sub CleanFileName {
+  my ($f) = @_;
+  $f =~ s|^/proc/self/cwd/||;
+  $f =~ s|^\./||;
+  return $f;
+}
+
+# Make address relative to section and clean up for display
+sub UnparseAddress {
+  my ($offset, $address) = @_;
+  $address = AddressSub($address, $offset);
+  $address =~ s/^0x//;
+  $address =~ s/^0*//;
+  return $address;
+}
+
+##### Miscellaneous #####
+
+# Find the right versions of the above object tools to use.  The
+# argument is the program file being analyzed, and should be an ELF
+# 32-bit or ELF 64-bit executable file.  The location of the tools
+# is determined by considering the following options in this order:
+#   1) --tools option, if set
+#   2) JEPROF_TOOLS environment variable, if set
+#   3) the environment
+sub ConfigureObjTools {
+  my $prog_file = shift;
+
+  # Check for the existence of $prog_file because /usr/bin/file does not
+  # predictably return error status in prod.
+  (-e $prog_file)  || error("$prog_file does not exist.\n");
+
+  my $file_type = undef;
+  if (-e "/usr/bin/file") {
+    # Follow symlinks (at least for systems where "file" supports that).
+    my $escaped_prog_file = ShellEscape($prog_file);
+    $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null ||
+                  /usr/bin/file $escaped_prog_file`;
+  } elsif ($^O == "MSWin32") {
+    $file_type = "MS Windows";
+  } else {
+    print STDERR "WARNING: Can't determine the file type of $prog_file";
+  }
+
+  if ($file_type =~ /64-bit/) {
+    # Change $address_length to 16 if the program file is ELF 64-bit.
+    # We can't detect this from many (most?) heap or lock contention
+    # profiles, since the actual addresses referenced are generally in low
+    # memory even for 64-bit programs.
+    $address_length = 16;
+  }
+
+  if ($file_type =~ /MS Windows/) {
+    # For windows, we provide a version of nm and addr2line as part of
+    # the opensource release, which is capable of parsing
+    # Windows-style PDB executables.  It should live in the path, or
+    # in the same directory as jeprof.
+    $obj_tool_map{"nm_pdb"} = "nm-pdb";
+    $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb";
+  }
+
+  if ($file_type =~ /Mach-O/) {
+    # OS X uses otool to examine Mach-O files, rather than objdump.
+    $obj_tool_map{"otool"} = "otool";
+    $obj_tool_map{"addr2line"} = "false";  # no addr2line
+    $obj_tool_map{"objdump"} = "false";  # no objdump
+  }
+
+  # Go fill in %obj_tool_map with the pathnames to use:
+  foreach my $tool (keys %obj_tool_map) {
+    $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool});
+  }
+}
+
+# Returns the path of a caller-specified object tool.  If --tools or
+# JEPROF_TOOLS are specified, then returns the full path to the tool
+# with that prefix.  Otherwise, returns the path unmodified (which
+# means we will look for it on PATH).
+sub ConfigureTool {
+  my $tool = shift;
+  my $path;
+
+  # --tools (or $JEPROF_TOOLS) is a comma separated list, where each
+  # item is either a) a pathname prefix, or b) a map of the form
+  # <tool>:<path>.  First we look for an entry of type (b) for our
+  # tool.  If one is found, we use it.  Otherwise, we consider all the
+  # pathname prefixes in turn, until one yields an existing file.  If
+  # none does, we use a default path.
+  my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || "";
+  if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) {
+    $path = $2;
+    # TODO(csilvers): sanity-check that $path exists?  Hard if it's relative.
+  } elsif ($tools ne '') {
+    foreach my $prefix (split(',', $tools)) {
+      next if ($prefix =~ /:/);    # ignore "tool:fullpath" entries in the list
+      if (-x $prefix . $tool) {
+        $path = $prefix . $tool;
+        last;
+      }
+    }
+    if (!$path) {
+      error("No '$tool' found with prefix specified by " .
+            "--tools (or \$JEPROF_TOOLS) '$tools'\n");
+    }
+  } else {
+    # ... otherwise use the version that exists in the same directory as
+    # jeprof.  If there's nothing there, use $PATH.
+    $0 =~ m,[^/]*$,;     # this is everything after the last slash
+    my $dirname = $`;    # this is everything up to and including the last slash
+    if (-x "$dirname$tool") {
+      $path = "$dirname$tool";
+    } else {
+      $path = $tool;
+    }
+  }
+  if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; }
+  return $path;
+}
+
+sub ShellEscape {
+  my @escaped_words = ();
+  foreach my $word (@_) {
+    my $escaped_word = $word;
+    if ($word =~ m![^a-zA-Z0-9/.,_=-]!) {  # check for anything not in whitelist
+      $escaped_word =~ s/'/'\\''/;
+      $escaped_word = "'$escaped_word'";
+    }
+    push(@escaped_words, $escaped_word);
+  }
+  return join(" ", @escaped_words);
+}
+
+sub cleanup {
+  unlink($main::tmpfile_sym);
+  unlink(keys %main::tempnames);
+
+  # We leave any collected profiles in $HOME/jeprof in case the user wants
+  # to look at them later.  We print a message informing them of this.
+  if ((scalar(@main::profile_files) > 0) &&
+      defined($main::collected_profile)) {
+    if (scalar(@main::profile_files) == 1) {
+      print STDERR "Dynamically gathered profile is in $main::collected_profile\n";
+    }
+    print STDERR "If you want to investigate this profile further, you can do:\n";
+    print STDERR "\n";
+    print STDERR "  jeprof \\\n";
+    print STDERR "    $main::prog \\\n";
+    print STDERR "    $main::collected_profile\n";
+    print STDERR "\n";
+  }
+}
+
+sub sighandler {
+  cleanup();
+  exit(1);
+}
+
+sub error {
+  my $msg = shift;
+  print STDERR $msg;
+  cleanup();
+  exit(1);
+}
+
+
+# Run $nm_command and get all the resulting procedure boundaries whose
+# names match "$regexp" and returns them in a hashtable mapping from
+# procedure name to a two-element vector of [start address, end address]
+sub GetProcedureBoundariesViaNm {
+  my $escaped_nm_command = shift;    # shell-escaped
+  my $regexp = shift;
+
+  my $symbol_table = {};
+  open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n");
+  my $last_start = "0";
+  my $routine = "";
+  while (<NM>) {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    if (m/^\s*([0-9a-f]+) (.) (..*)/) {
+      my $start_val = $1;
+      my $type = $2;
+      my $this_routine = $3;
+
+      # It's possible for two symbols to share the same address, if
+      # one is a zero-length variable (like __start_google_malloc) or
+      # one symbol is a weak alias to another (like __libc_malloc).
+      # In such cases, we want to ignore all values except for the
+      # actual symbol, which in nm-speak has type "T".  The logic
+      # below does this, though it's a bit tricky: what happens when
+      # we have a series of lines with the same address, is the first
+      # one gets queued up to be processed.  However, it won't
+      # *actually* be processed until later, when we read a line with
+      # a different address.  That means that as long as we're reading
+      # lines with the same address, we have a chance to replace that
+      # item in the queue, which we do whenever we see a 'T' entry --
+      # that is, a line with type 'T'.  If we never see a 'T' entry,
+      # we'll just go ahead and process the first entry (which never
+      # got touched in the queue), and ignore the others.
+      if ($start_val eq $last_start && $type =~ /t/i) {
+        # We are the 'T' symbol at this address, replace previous symbol.
+        $routine = $this_routine;
+        next;
+      } elsif ($start_val eq $last_start) {
+        # We're not the 'T' symbol at this address, so ignore us.
+        next;
+      }
+
+      if ($this_routine eq $sep_symbol) {
+        $sep_address = HexExtend($start_val);
+      }
+
+      # Tag this routine with the starting address in case the image
+      # has multiple occurrences of this routine.  We use a syntax
+      # that resembles template parameters that are automatically
+      # stripped out by ShortFunctionName()
+      $this_routine .= "<$start_val>";
+
+      if (defined($routine) && $routine =~ m/$regexp/) {
+        $symbol_table->{$routine} = [HexExtend($last_start),
+                                     HexExtend($start_val)];
+      }
+      $last_start = $start_val;
+      $routine = $this_routine;
+    } elsif (m/^Loaded image name: (.+)/) {
+      # The win32 nm workalike emits information about the binary it is using.
+      if ($main::opt_debug) { print STDERR "Using Image $1\n"; }
+    } elsif (m/^PDB file name: (.+)/) {
+      # The win32 nm workalike emits information about the pdb it is using.
+      if ($main::opt_debug) { print STDERR "Using PDB $1\n"; }
+    }
+  }
+  close(NM);
+  # Handle the last line in the nm output.  Unfortunately, we don't know
+  # how big this last symbol is, because we don't know how big the file
+  # is.  For now, we just give it a size of 0.
+  # TODO(csilvers): do better here.
+  if (defined($routine) && $routine =~ m/$regexp/) {
+    $symbol_table->{$routine} = [HexExtend($last_start),
+                                 HexExtend($last_start)];
+  }
+  return $symbol_table;
+}
+
+# Gets the procedure boundaries for all routines in "$image" whose names
+# match "$regexp" and returns them in a hashtable mapping from procedure
+# name to a two-element vector of [start address, end address].
+# Will return an empty map if nm is not installed or not working properly.
+sub GetProcedureBoundaries {
+  my $image = shift;
+  my $regexp = shift;
+
+  # If $image doesn't start with /, then put ./ in front of it.  This works
+  # around an obnoxious bug in our probing of nm -f behavior.
+  # "nm -f $image" is supposed to fail on GNU nm, but if:
+  #
+  # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND
+  # b. you have a.out in your current directory (a not uncommon occurence)
+  #
+  # then "nm -f $image" succeeds because -f only looks at the first letter of
+  # the argument, which looks valid because it's [BbSsPp], and then since
+  # there's no image provided, it looks for a.out and finds it.
+  #
+  # This regex makes sure that $image starts with . or /, forcing the -f
+  # parsing to fail since . and / are not valid formats.
+  $image =~ s#^[^/]#./$&#;
+
+  # For libc libraries, the copy in /usr/lib/debug contains debugging symbols
+  my $debugging = DebuggingLibrary($image);
+  if ($debugging) {
+    $image = $debugging;
+  }
+
+  my $nm = $obj_tool_map{"nm"};
+  my $cppfilt = $obj_tool_map{"c++filt"};
+
+  # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm
+  # binary doesn't support --demangle.  In addition, for OS X we need
+  # to use the -f flag to get 'flat' nm output (otherwise we don't sort
+  # properly and get incorrect results).  Unfortunately, GNU nm uses -f
+  # in an incompatible way.  So first we test whether our nm supports
+  # --demangle and -f.
+  my $demangle_flag = "";
+  my $cppfilt_flag = "";
+  my $to_devnull = ">$dev_null 2>&1";
+  if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) {
+    # In this mode, we do "nm --demangle <foo>"
+    $demangle_flag = "--demangle";
+    $cppfilt_flag = "";
+  } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) {
+    # In this mode, we do "nm <foo> | c++filt"
+    $cppfilt_flag = " | " . ShellEscape($cppfilt);
+  };
+  my $flatten_flag = "";
+  if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) {
+    $flatten_flag = "-f";
+  }
+
+  # Finally, in the case $imagie isn't a debug library, we try again with
+  # -D to at least get *exported* symbols.  If we can't use --demangle,
+  # we use c++filt instead, if it exists on this system.
+  my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag,
+                                 $image) . " 2>$dev_null $cppfilt_flag",
+                     ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag,
+                                 $image) . " 2>$dev_null $cppfilt_flag",
+                     # 6nm is for Go binaries
+                     ShellEscape("6nm", "$image") . " 2>$dev_null | sort",
+                     );
+
+  # If the executable is an MS Windows PDB-format executable, we'll
+  # have set up obj_tool_map("nm_pdb").  In this case, we actually
+  # want to use both unix nm and windows-specific nm_pdb, since
+  # PDB-format executables can apparently include dwarf .o files.
+  if (exists $obj_tool_map{"nm_pdb"}) {
+    push(@nm_commands,
+         ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image)
+         . " 2>$dev_null");
+  }
+
+  foreach my $nm_command (@nm_commands) {
+    my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp);
+    return $symbol_table if (%{$symbol_table});
+  }
+  my $symbol_table = {};
+  return $symbol_table;
+}
+
+
+# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings.
+# To make them more readable, we add underscores at interesting places.
+# This routine removes the underscores, producing the canonical representation
+# used by jeprof to represent addresses, particularly in the tested routines.
+sub CanonicalHex {
+  my $arg = shift;
+  return join '', (split '_',$arg);
+}
+
+
+# Unit test for AddressAdd:
+sub AddressAddUnitTest {
+  my $test_data_8 = shift;
+  my $test_data_16 = shift;
+  my $error_count = 0;
+  my $fail_count = 0;
+  my $pass_count = 0;
+  # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+  # First a few 8-nibble addresses.  Note that this implementation uses
+  # plain old arithmetic, so a quick sanity check along with verifying what
+  # happens to overflow (we want it to wrap):
+  $address_length = 8;
+  foreach my $row (@{$test_data_8}) {
+    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+    my $sum = AddressAdd ($row->[0], $row->[1]);
+    if ($sum ne $row->[2]) {
+      printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
+             $row->[0], $row->[1], $row->[2];
+      ++$fail_count;
+    } else {
+      ++$pass_count;
+    }
+  }
+  printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n",
+         $pass_count, $fail_count;
+  $error_count = $fail_count;
+  $fail_count = 0;
+  $pass_count = 0;
+
+  # Now 16-nibble addresses.
+  $address_length = 16;
+  foreach my $row (@{$test_data_16}) {
+    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+    my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
+    my $expected = join '', (split '_',$row->[2]);
+    if ($sum ne CanonicalHex($row->[2])) {
+      printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
+             $row->[0], $row->[1], $row->[2];
+      ++$fail_count;
+    } else {
+      ++$pass_count;
+    }
+  }
+  printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n",
+         $pass_count, $fail_count;
+  $error_count += $fail_count;
+
+  return $error_count;
+}
+
+
+# Unit test for AddressSub:
+sub AddressSubUnitTest {
+  my $test_data_8 = shift;
+  my $test_data_16 = shift;
+  my $error_count = 0;
+  my $fail_count = 0;
+  my $pass_count = 0;
+  # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+  # First a few 8-nibble addresses.  Note that this implementation uses
+  # plain old arithmetic, so a quick sanity check along with verifying what
+  # happens to overflow (we want it to wrap):
+  $address_length = 8;
+  foreach my $row (@{$test_data_8}) {
+    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+    my $sum = AddressSub ($row->[0], $row->[1]);
+    if ($sum ne $row->[3]) {
+      printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
+             $row->[0], $row->[1], $row->[3];
+      ++$fail_count;
+    } else {
+      ++$pass_count;
+    }
+  }
+  printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n",
+         $pass_count, $fail_count;
+  $error_count = $fail_count;
+  $fail_count = 0;
+  $pass_count = 0;
+
+  # Now 16-nibble addresses.
+  $address_length = 16;
+  foreach my $row (@{$test_data_16}) {
+    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+    my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
+    if ($sum ne CanonicalHex($row->[3])) {
+      printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
+             $row->[0], $row->[1], $row->[3];
+      ++$fail_count;
+    } else {
+      ++$pass_count;
+    }
+  }
+  printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n",
+         $pass_count, $fail_count;
+  $error_count += $fail_count;
+
+  return $error_count;
+}
+
+
+# Unit test for AddressInc:
+sub AddressIncUnitTest {
+  my $test_data_8 = shift;
+  my $test_data_16 = shift;
+  my $error_count = 0;
+  my $fail_count = 0;
+  my $pass_count = 0;
+  # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+  # First a few 8-nibble addresses.  Note that this implementation uses
+  # plain old arithmetic, so a quick sanity check along with verifying what
+  # happens to overflow (we want it to wrap):
+  $address_length = 8;
+  foreach my $row (@{$test_data_8}) {
+    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+    my $sum = AddressInc ($row->[0]);
+    if ($sum ne $row->[4]) {
+      printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
+             $row->[0], $row->[4];
+      ++$fail_count;
+    } else {
+      ++$pass_count;
+    }
+  }
+  printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n",
+         $pass_count, $fail_count;
+  $error_count = $fail_count;
+  $fail_count = 0;
+  $pass_count = 0;
+
+  # Now 16-nibble addresses.
+  $address_length = 16;
+  foreach my $row (@{$test_data_16}) {
+    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+    my $sum = AddressInc (CanonicalHex($row->[0]));
+    if ($sum ne CanonicalHex($row->[4])) {
+      printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
+             $row->[0], $row->[4];
+      ++$fail_count;
+    } else {
+      ++$pass_count;
+    }
+  }
+  printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n",
+         $pass_count, $fail_count;
+  $error_count += $fail_count;
+
+  return $error_count;
+}
+
+
+# Driver for unit tests.
+# Currently just the address add/subtract/increment routines for 64-bit.
+sub RunUnitTests {
+  my $error_count = 0;
+
+  # This is a list of tuples [a, b, a+b, a-b, a+1]
+  my $unit_test_data_8 = [
+    [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)],
+    [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)],
+    [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)],
+    [qw(00000001 ffffffff 00000000 00000002 00000002)],
+    [qw(00000001 fffffff0 fffffff1 00000011 00000002)],
+  ];
+  my $unit_test_data_16 = [
+    # The implementation handles data in 7-nibble chunks, so those are the
+    # interesting boundaries.
+    [qw(aaaaaaaa 50505050
+        00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)],
+    [qw(50505050 aaaaaaaa
+        00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)],
+    [qw(ffffffff aaaaaaaa
+        00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)],
+    [qw(00000001 ffffffff
+        00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)],
+    [qw(00000001 fffffff0
+        00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)],
+
+    [qw(00_a00000a_aaaaaaa 50505050
+        00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)],
+    [qw(0f_fff0005_0505050 aaaaaaaa
+        0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)],
+    [qw(00_000000f_fffffff 01_800000a_aaaaaaa
+        01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)],
+    [qw(00_0000000_0000001 ff_fffffff_fffffff
+        00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)],
+    [qw(00_0000000_0000001 ff_fffffff_ffffff0
+        ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)],
+  ];
+
+  $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16);
+  $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16);
+  $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16);
+  if ($error_count > 0) {
+    print STDERR $error_count, " errors: FAILED\n";
+  } else {
+    print STDERR "PASS\n";
+  }
+  exit ($error_count);
+}
diff --git a/src/jemalloc/bin/pprof b/src/jemalloc/bin/pprof
deleted file mode 100755 (executable)
index 5a4c6cd..0000000
+++ /dev/null
@@ -1,5507 +0,0 @@
-#! /usr/bin/env perl
-
-# Copyright (c) 1998-2007, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# ---
-# Program for printing the profile generated by common/profiler.cc,
-# or by the heap profiler (common/debugallocation.cc)
-#
-# The profile contains a sequence of entries of the form:
-#       <count> <stack trace>
-# This program parses the profile, and generates user-readable
-# output.
-#
-# Examples:
-#
-# % tools/pprof "program" "profile"
-#   Enters "interactive" mode
-#
-# % tools/pprof --text "program" "profile"
-#   Generates one line per procedure
-#
-# % tools/pprof --gv "program" "profile"
-#   Generates annotated call-graph and displays via "gv"
-#
-# % tools/pprof --gv --focus=Mutex "program" "profile"
-#   Restrict to code paths that involve an entry that matches "Mutex"
-#
-# % tools/pprof --gv --focus=Mutex --ignore=string "program" "profile"
-#   Restrict to code paths that involve an entry that matches "Mutex"
-#   and does not match "string"
-#
-# % tools/pprof --list=IBF_CheckDocid "program" "profile"
-#   Generates disassembly listing of all routines with at least one
-#   sample that match the --list=<regexp> pattern.  The listing is
-#   annotated with the flat and cumulative sample counts at each line.
-#
-# % tools/pprof --disasm=IBF_CheckDocid "program" "profile"
-#   Generates disassembly listing of all routines with at least one
-#   sample that match the --disasm=<regexp> pattern.  The listing is
-#   annotated with the flat and cumulative sample counts at each PC value.
-#
-# TODO: Use color to indicate files?
-
-use strict;
-use warnings;
-use Getopt::Long;
-
-my $PPROF_VERSION = "2.0";
-
-# These are the object tools we use which can come from a
-# user-specified location using --tools, from the PPROF_TOOLS
-# environment variable, or from the environment.
-my %obj_tool_map = (
-  "objdump" => "objdump",
-  "nm" => "nm",
-  "addr2line" => "addr2line",
-  "c++filt" => "c++filt",
-  ## ConfigureObjTools may add architecture-specific entries:
-  #"nm_pdb" => "nm-pdb",       # for reading windows (PDB-format) executables
-  #"addr2line_pdb" => "addr2line-pdb",                                # ditto
-  #"otool" => "otool",         # equivalent of objdump on OS X
-);
-# NOTE: these are lists, so you can put in commandline flags if you want.
-my @DOT = ("dot");          # leave non-absolute, since it may be in /usr/local
-my @GV = ("gv");
-my @EVINCE = ("evince");    # could also be xpdf or perhaps acroread
-my @KCACHEGRIND = ("kcachegrind");
-my @PS2PDF = ("ps2pdf");
-# These are used for dynamic profiles
-my @URL_FETCHER = ("curl", "-s");
-
-# These are the web pages that servers need to support for dynamic profiles
-my $HEAP_PAGE = "/pprof/heap";
-my $PROFILE_PAGE = "/pprof/profile";   # must support cgi-param "?seconds=#"
-my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param
-                                                # ?seconds=#&event=x&period=n
-my $GROWTH_PAGE = "/pprof/growth";
-my $CONTENTION_PAGE = "/pprof/contention";
-my $WALL_PAGE = "/pprof/wall(?:\\?.*)?";  # accepts options like namefilter
-my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?";
-my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param
-                                                       # "?seconds=#",
-                                                       # "?tags_regexp=#" and
-                                                       # "?type=#".
-my $SYMBOL_PAGE = "/pprof/symbol";     # must support symbol lookup via POST
-my $PROGRAM_NAME_PAGE = "/pprof/cmdline";
-
-# These are the web pages that can be named on the command line.
-# All the alternatives must begin with /.
-my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" .
-               "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" .
-               "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)";
-
-# default binary name
-my $UNKNOWN_BINARY = "(unknown)";
-
-# There is a pervasive dependency on the length (in hex characters,
-# i.e., nibbles) of an address, distinguishing between 32-bit and
-# 64-bit profiles.  To err on the safe size, default to 64-bit here:
-my $address_length = 16;
-
-my $dev_null = "/dev/null";
-if (! -e $dev_null && $^O =~ /MSWin/) {    # $^O is the OS perl was built for
-  $dev_null = "nul";
-}
-
-# A list of paths to search for shared object files
-my @prefix_list = ();
-
-# Special routine name that should not have any symbols.
-# Used as separator to parse "addr2line -i" output.
-my $sep_symbol = '_fini';
-my $sep_address = undef;
-
-##### Argument parsing #####
-
-sub usage_string {
-  return <<EOF;
-Usage:
-pprof [options] <program> <profiles>
-   <profiles> is a space separated list of profile names.
-pprof [options] <symbolized-profiles>
-   <symbolized-profiles> is a list of profile files where each file contains
-   the necessary symbol mappings  as well as profile data (likely generated
-   with --raw).
-pprof [options] <profile>
-   <profile> is a remote form.  Symbols are obtained from host:port$SYMBOL_PAGE
-
-   Each name can be:
-   /path/to/profile        - a path to a profile file
-   host:port[/<service>]   - a location of a service to get profile from
-
-   The /<service> can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile,
-                         $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall,
-                         $CENSUSPROFILE_PAGE, or /pprof/filteredprofile.
-   For instance:
-     pprof http://myserver.com:80$HEAP_PAGE
-   If /<service> is omitted, the service defaults to $PROFILE_PAGE (cpu profiling).
-pprof --symbols <program>
-   Maps addresses to symbol names.  In this mode, stdin should be a
-   list of library mappings, in the same format as is found in the heap-
-   and cpu-profile files (this loosely matches that of /proc/self/maps
-   on linux), followed by a list of hex addresses to map, one per line.
-
-   For more help with querying remote servers, including how to add the
-   necessary server-side support code, see this filename (or one like it):
-
-   /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html
-
-Options:
-   --cum               Sort by cumulative data
-   --base=<base>       Subtract <base> from <profile> before display
-   --interactive       Run in interactive mode (interactive "help" gives help) [default]
-   --seconds=<n>       Length of time for dynamic profiles [default=30 secs]
-   --add_lib=<file>    Read additional symbols and line info from the given library
-   --lib_prefix=<dir>  Comma separated list of library path prefixes
-
-Reporting Granularity:
-   --addresses         Report at address level
-   --lines             Report at source line level
-   --functions         Report at function level [default]
-   --files             Report at source file level
-
-Output type:
-   --text              Generate text report
-   --callgrind         Generate callgrind format to stdout
-   --gv                Generate Postscript and display
-   --evince            Generate PDF and display
-   --web               Generate SVG and display
-   --list=<regexp>     Generate source listing of matching routines
-   --disasm=<regexp>   Generate disassembly of matching routines
-   --symbols           Print demangled symbol names found at given addresses
-   --dot               Generate DOT file to stdout
-   --ps                Generate Postcript to stdout
-   --pdf               Generate PDF to stdout
-   --svg               Generate SVG to stdout
-   --gif               Generate GIF to stdout
-   --raw               Generate symbolized pprof data (useful with remote fetch)
-
-Heap-Profile Options:
-   --inuse_space       Display in-use (mega)bytes [default]
-   --inuse_objects     Display in-use objects
-   --alloc_space       Display allocated (mega)bytes
-   --alloc_objects     Display allocated objects
-   --show_bytes        Display space in bytes
-   --drop_negative     Ignore negative differences
-
-Contention-profile options:
-   --total_delay       Display total delay at each region [default]
-   --contentions       Display number of delays at each region
-   --mean_delay        Display mean delay at each region
-
-Call-graph Options:
-   --nodecount=<n>     Show at most so many nodes [default=80]
-   --nodefraction=<f>  Hide nodes below <f>*total [default=.005]
-   --edgefraction=<f>  Hide edges below <f>*total [default=.001]
-   --maxdegree=<n>     Max incoming/outgoing edges per node [default=8]
-   --focus=<regexp>    Focus on nodes matching <regexp>
-   --thread=<n>        Show profile for thread <n>
-   --ignore=<regexp>   Ignore nodes matching <regexp>
-   --scale=<n>         Set GV scaling [default=0]
-   --heapcheck         Make nodes with non-0 object counts
-                       (i.e. direct leak generators) more visible
-
-Miscellaneous:
-   --tools=<prefix or binary:fullpath>[,...]   \$PATH for object tool pathnames
-   --test              Run unit tests
-   --help              This message
-   --version           Version information
-
-Environment Variables:
-   PPROF_TMPDIR        Profiles directory. Defaults to \$HOME/pprof
-   PPROF_TOOLS         Prefix for object tools pathnames
-
-Examples:
-
-pprof /bin/ls ls.prof
-                       Enters "interactive" mode
-pprof --text /bin/ls ls.prof
-                       Outputs one line per procedure
-pprof --web /bin/ls ls.prof
-                       Displays annotated call-graph in web browser
-pprof --gv /bin/ls ls.prof
-                       Displays annotated call-graph via 'gv'
-pprof --gv --focus=Mutex /bin/ls ls.prof
-                       Restricts to code paths including a .*Mutex.* entry
-pprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof
-                       Code paths including Mutex but not string
-pprof --list=getdir /bin/ls ls.prof
-                       (Per-line) annotated source listing for getdir()
-pprof --disasm=getdir /bin/ls ls.prof
-                       (Per-PC) annotated disassembly for getdir()
-
-pprof http://localhost:1234/
-                       Enters "interactive" mode
-pprof --text localhost:1234
-                       Outputs one line per procedure for localhost:1234
-pprof --raw localhost:1234 > ./local.raw
-pprof --text ./local.raw
-                       Fetches a remote profile for later analysis and then
-                       analyzes it in text mode.
-EOF
-}
-
-sub version_string {
-  return <<EOF
-pprof (part of gperftools $PPROF_VERSION)
-
-Copyright 1998-2007 Google Inc.
-
-This is BSD licensed software; see the source for copying conditions
-and license information.
-There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A
-PARTICULAR PURPOSE.
-EOF
-}
-
-sub usage {
-  my $msg = shift;
-  print STDERR "$msg\n\n";
-  print STDERR usage_string();
-  print STDERR "\nFATAL ERROR: $msg\n";    # just as a reminder
-  exit(1);
-}
-
-sub Init() {
-  # Setup tmp-file name and handler to clean it up.
-  # We do this in the very beginning so that we can use
-  # error() and cleanup() function anytime here after.
-  $main::tmpfile_sym = "/tmp/pprof$$.sym";
-  $main::tmpfile_ps = "/tmp/pprof$$";
-  $main::next_tmpfile = 0;
-  $SIG{'INT'} = \&sighandler;
-
-  # Cache from filename/linenumber to source code
-  $main::source_cache = ();
-
-  $main::opt_help = 0;
-  $main::opt_version = 0;
-
-  $main::opt_cum = 0;
-  $main::opt_base = '';
-  $main::opt_addresses = 0;
-  $main::opt_lines = 0;
-  $main::opt_functions = 0;
-  $main::opt_files = 0;
-  $main::opt_lib_prefix = "";
-
-  $main::opt_text = 0;
-  $main::opt_callgrind = 0;
-  $main::opt_list = "";
-  $main::opt_disasm = "";
-  $main::opt_symbols = 0;
-  $main::opt_gv = 0;
-  $main::opt_evince = 0;
-  $main::opt_web = 0;
-  $main::opt_dot = 0;
-  $main::opt_ps = 0;
-  $main::opt_pdf = 0;
-  $main::opt_gif = 0;
-  $main::opt_svg = 0;
-  $main::opt_raw = 0;
-
-  $main::opt_nodecount = 80;
-  $main::opt_nodefraction = 0.005;
-  $main::opt_edgefraction = 0.001;
-  $main::opt_maxdegree = 8;
-  $main::opt_focus = '';
-  $main::opt_thread = undef;
-  $main::opt_ignore = '';
-  $main::opt_scale = 0;
-  $main::opt_heapcheck = 0;
-  $main::opt_seconds = 30;
-  $main::opt_lib = "";
-
-  $main::opt_inuse_space   = 0;
-  $main::opt_inuse_objects = 0;
-  $main::opt_alloc_space   = 0;
-  $main::opt_alloc_objects = 0;
-  $main::opt_show_bytes    = 0;
-  $main::opt_drop_negative = 0;
-  $main::opt_interactive   = 0;
-
-  $main::opt_total_delay = 0;
-  $main::opt_contentions = 0;
-  $main::opt_mean_delay = 0;
-
-  $main::opt_tools   = "";
-  $main::opt_debug   = 0;
-  $main::opt_test    = 0;
-
-  # These are undocumented flags used only by unittests.
-  $main::opt_test_stride = 0;
-
-  # Are we using $SYMBOL_PAGE?
-  $main::use_symbol_page = 0;
-
-  # Files returned by TempName.
-  %main::tempnames = ();
-
-  # Type of profile we are dealing with
-  # Supported types:
-  #     cpu
-  #     heap
-  #     growth
-  #     contention
-  $main::profile_type = '';     # Empty type means "unknown"
-
-  GetOptions("help!"          => \$main::opt_help,
-             "version!"       => \$main::opt_version,
-             "cum!"           => \$main::opt_cum,
-             "base=s"         => \$main::opt_base,
-             "seconds=i"      => \$main::opt_seconds,
-             "add_lib=s"      => \$main::opt_lib,
-             "lib_prefix=s"   => \$main::opt_lib_prefix,
-             "functions!"     => \$main::opt_functions,
-             "lines!"         => \$main::opt_lines,
-             "addresses!"     => \$main::opt_addresses,
-             "files!"         => \$main::opt_files,
-             "text!"          => \$main::opt_text,
-             "callgrind!"     => \$main::opt_callgrind,
-             "list=s"         => \$main::opt_list,
-             "disasm=s"       => \$main::opt_disasm,
-             "symbols!"       => \$main::opt_symbols,
-             "gv!"            => \$main::opt_gv,
-             "evince!"        => \$main::opt_evince,
-             "web!"           => \$main::opt_web,
-             "dot!"           => \$main::opt_dot,
-             "ps!"            => \$main::opt_ps,
-             "pdf!"           => \$main::opt_pdf,
-             "svg!"           => \$main::opt_svg,
-             "gif!"           => \$main::opt_gif,
-             "raw!"           => \$main::opt_raw,
-             "interactive!"   => \$main::opt_interactive,
-             "nodecount=i"    => \$main::opt_nodecount,
-             "nodefraction=f" => \$main::opt_nodefraction,
-             "edgefraction=f" => \$main::opt_edgefraction,
-             "maxdegree=i"    => \$main::opt_maxdegree,
-             "focus=s"        => \$main::opt_focus,
-             "thread=i"       => \$main::opt_thread,
-             "ignore=s"       => \$main::opt_ignore,
-             "scale=i"        => \$main::opt_scale,
-             "heapcheck"      => \$main::opt_heapcheck,
-             "inuse_space!"   => \$main::opt_inuse_space,
-             "inuse_objects!" => \$main::opt_inuse_objects,
-             "alloc_space!"   => \$main::opt_alloc_space,
-             "alloc_objects!" => \$main::opt_alloc_objects,
-             "show_bytes!"    => \$main::opt_show_bytes,
-             "drop_negative!" => \$main::opt_drop_negative,
-             "total_delay!"   => \$main::opt_total_delay,
-             "contentions!"   => \$main::opt_contentions,
-             "mean_delay!"    => \$main::opt_mean_delay,
-             "tools=s"        => \$main::opt_tools,
-             "test!"          => \$main::opt_test,
-             "debug!"         => \$main::opt_debug,
-             # Undocumented flags used only by unittests:
-             "test_stride=i"  => \$main::opt_test_stride,
-      ) || usage("Invalid option(s)");
-
-  # Deal with the standard --help and --version
-  if ($main::opt_help) {
-    print usage_string();
-    exit(0);
-  }
-
-  if ($main::opt_version) {
-    print version_string();
-    exit(0);
-  }
-
-  # Disassembly/listing/symbols mode requires address-level info
-  if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) {
-    $main::opt_functions = 0;
-    $main::opt_lines = 0;
-    $main::opt_addresses = 1;
-    $main::opt_files = 0;
-  }
-
-  # Check heap-profiling flags
-  if ($main::opt_inuse_space +
-      $main::opt_inuse_objects +
-      $main::opt_alloc_space +
-      $main::opt_alloc_objects > 1) {
-    usage("Specify at most on of --inuse/--alloc options");
-  }
-
-  # Check output granularities
-  my $grains =
-      $main::opt_functions +
-      $main::opt_lines +
-      $main::opt_addresses +
-      $main::opt_files +
-      0;
-  if ($grains > 1) {
-    usage("Only specify one output granularity option");
-  }
-  if ($grains == 0) {
-    $main::opt_functions = 1;
-  }
-
-  # Check output modes
-  my $modes =
-      $main::opt_text +
-      $main::opt_callgrind +
-      ($main::opt_list eq '' ? 0 : 1) +
-      ($main::opt_disasm eq '' ? 0 : 1) +
-      ($main::opt_symbols == 0 ? 0 : 1) +
-      $main::opt_gv +
-      $main::opt_evince +
-      $main::opt_web +
-      $main::opt_dot +
-      $main::opt_ps +
-      $main::opt_pdf +
-      $main::opt_svg +
-      $main::opt_gif +
-      $main::opt_raw +
-      $main::opt_interactive +
-      0;
-  if ($modes > 1) {
-    usage("Only specify one output mode");
-  }
-  if ($modes == 0) {
-    if (-t STDOUT) {  # If STDOUT is a tty, activate interactive mode
-      $main::opt_interactive = 1;
-    } else {
-      $main::opt_text = 1;
-    }
-  }
-
-  if ($main::opt_test) {
-    RunUnitTests();
-    # Should not return
-    exit(1);
-  }
-
-  # Binary name and profile arguments list
-  $main::prog = "";
-  @main::pfile_args = ();
-
-  # Remote profiling without a binary (using $SYMBOL_PAGE instead)
-  if (@ARGV > 0) {
-    if (IsProfileURL($ARGV[0])) {
-      $main::use_symbol_page = 1;
-    } elsif (IsSymbolizedProfileFile($ARGV[0])) {
-      $main::use_symbolized_profile = 1;
-      $main::prog = $UNKNOWN_BINARY;  # will be set later from the profile file
-    }
-  }
-
-  if ($main::use_symbol_page || $main::use_symbolized_profile) {
-    # We don't need a binary!
-    my %disabled = ('--lines' => $main::opt_lines,
-                    '--disasm' => $main::opt_disasm);
-    for my $option (keys %disabled) {
-      usage("$option cannot be used without a binary") if $disabled{$option};
-    }
-    # Set $main::prog later...
-    scalar(@ARGV) || usage("Did not specify profile file");
-  } elsif ($main::opt_symbols) {
-    # --symbols needs a binary-name (to run nm on, etc) but not profiles
-    $main::prog = shift(@ARGV) || usage("Did not specify program");
-  } else {
-    $main::prog = shift(@ARGV) || usage("Did not specify program");
-    scalar(@ARGV) || usage("Did not specify profile file");
-  }
-
-  # Parse profile file/location arguments
-  foreach my $farg (@ARGV) {
-    if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) {
-      my $machine = $1;
-      my $num_machines = $2;
-      my $path = $3;
-      for (my $i = 0; $i < $num_machines; $i++) {
-        unshift(@main::pfile_args, "$i.$machine$path");
-      }
-    } else {
-      unshift(@main::pfile_args, $farg);
-    }
-  }
-
-  if ($main::use_symbol_page) {
-    unless (IsProfileURL($main::pfile_args[0])) {
-      error("The first profile should be a remote form to use $SYMBOL_PAGE\n");
-    }
-    CheckSymbolPage();
-    $main::prog = FetchProgramName();
-  } elsif (!$main::use_symbolized_profile) {  # may not need objtools!
-    ConfigureObjTools($main::prog)
-  }
-
-  # Break the opt_lib_prefix into the prefix_list array
-  @prefix_list = split (',', $main::opt_lib_prefix);
-
-  # Remove trailing / from the prefixes, in the list to prevent
-  # searching things like /my/path//lib/mylib.so
-  foreach (@prefix_list) {
-    s|/+$||;
-  }
-}
-
-sub FilterAndPrint {
-  my ($profile, $symbols, $libs, $thread) = @_;
-
-  # Get total data in profile
-  my $total = TotalProfile($profile);
-
-  # Remove uniniteresting stack items
-  $profile = RemoveUninterestingFrames($symbols, $profile);
-
-  # Focus?
-  if ($main::opt_focus ne '') {
-    $profile = FocusProfile($symbols, $profile, $main::opt_focus);
-  }
-
-  # Ignore?
-  if ($main::opt_ignore ne '') {
-    $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore);
-  }
-
-  my $calls = ExtractCalls($symbols, $profile);
-
-  # Reduce profiles to required output granularity, and also clean
-  # each stack trace so a given entry exists at most once.
-  my $reduced = ReduceProfile($symbols, $profile);
-
-  # Get derived profiles
-  my $flat = FlatProfile($reduced);
-  my $cumulative = CumulativeProfile($reduced);
-
-  # Print
-  if (!$main::opt_interactive) {
-    if ($main::opt_disasm) {
-      PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm);
-    } elsif ($main::opt_list) {
-      PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0);
-    } elsif ($main::opt_text) {
-      # Make sure the output is empty when have nothing to report
-      # (only matters when --heapcheck is given but we must be
-      # compatible with old branches that did not pass --heapcheck always):
-      if ($total != 0) {
-        printf("Total%s: %s %s\n",
-               (defined($thread) ? " (t$thread)" : ""),
-               Unparse($total), Units());
-      }
-      PrintText($symbols, $flat, $cumulative, -1);
-    } elsif ($main::opt_raw) {
-      PrintSymbolizedProfile($symbols, $profile, $main::prog);
-    } elsif ($main::opt_callgrind) {
-      PrintCallgrind($calls);
-    } else {
-      if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
-        if ($main::opt_gv) {
-          RunGV(TempName($main::next_tmpfile, "ps"), "");
-        } elsif ($main::opt_evince) {
-          RunEvince(TempName($main::next_tmpfile, "pdf"), "");
-        } elsif ($main::opt_web) {
-          my $tmp = TempName($main::next_tmpfile, "svg");
-          RunWeb($tmp);
-          # The command we run might hand the file name off
-          # to an already running browser instance and then exit.
-          # Normally, we'd remove $tmp on exit (right now),
-          # but fork a child to remove $tmp a little later, so that the
-          # browser has time to load it first.
-          delete $main::tempnames{$tmp};
-          if (fork() == 0) {
-            sleep 5;
-            unlink($tmp);
-            exit(0);
-          }
-        }
-      } else {
-        cleanup();
-        exit(1);
-      }
-    }
-  } else {
-    InteractiveMode($profile, $symbols, $libs, $total);
-  }
-}
-
-sub Main() {
-  Init();
-  $main::collected_profile = undef;
-  @main::profile_files = ();
-  $main::op_time = time();
-
-  # Printing symbols is special and requires a lot less info that most.
-  if ($main::opt_symbols) {
-    PrintSymbols(*STDIN);   # Get /proc/maps and symbols output from stdin
-    return;
-  }
-
-  # Fetch all profile data
-  FetchDynamicProfiles();
-
-  # this will hold symbols that we read from the profile files
-  my $symbol_map = {};
-
-  # Read one profile, pick the last item on the list
-  my $data = ReadProfile($main::prog, pop(@main::profile_files));
-  my $profile = $data->{profile};
-  my $pcs = $data->{pcs};
-  my $libs = $data->{libs};   # Info about main program and shared libraries
-  $symbol_map = MergeSymbols($symbol_map, $data->{symbols});
-
-  # Add additional profiles, if available.
-  if (scalar(@main::profile_files) > 0) {
-    foreach my $pname (@main::profile_files) {
-      my $data2 = ReadProfile($main::prog, $pname);
-      $profile = AddProfile($profile, $data2->{profile});
-      $pcs = AddPcs($pcs, $data2->{pcs});
-      $symbol_map = MergeSymbols($symbol_map, $data2->{symbols});
-    }
-  }
-
-  # Subtract base from profile, if specified
-  if ($main::opt_base ne '') {
-    my $base = ReadProfile($main::prog, $main::opt_base);
-    $profile = SubtractProfile($profile, $base->{profile});
-    $pcs = AddPcs($pcs, $base->{pcs});
-    $symbol_map = MergeSymbols($symbol_map, $base->{symbols});
-  }
-
-  # Collect symbols
-  my $symbols;
-  if ($main::use_symbolized_profile) {
-    $symbols = FetchSymbols($pcs, $symbol_map);
-  } elsif ($main::use_symbol_page) {
-    $symbols = FetchSymbols($pcs);
-  } else {
-    # TODO(csilvers): $libs uses the /proc/self/maps data from profile1,
-    # which may differ from the data from subsequent profiles, especially
-    # if they were run on different machines.  Use appropriate libs for
-    # each pc somehow.
-    $symbols = ExtractSymbols($libs, $pcs);
-  }
-
-  if (!defined($main::opt_thread)) {
-    FilterAndPrint($profile, $symbols, $libs);
-  }
-  if (defined($data->{threads})) {
-    foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) {
-      if (!defined($main::opt_thread) || $main::opt_thread == $thread) {
-        my $thread_profile = $data->{threads}{$thread};
-        FilterAndPrint($thread_profile, $symbols, $libs, $thread);
-      }
-    }
-  }
-
-  cleanup();
-  exit(0);
-}
-
-##### Entry Point #####
-
-Main();
-
-# Temporary code to detect if we're running on a Goobuntu system.
-# These systems don't have the right stuff installed for the special
-# Readline libraries to work, so as a temporary workaround, we default
-# to using the normal stdio code, rather than the fancier readline-based
-# code
-sub ReadlineMightFail {
-  if (-e '/lib/libtermcap.so.2') {
-    return 0;  # libtermcap exists, so readline should be okay
-  } else {
-    return 1;
-  }
-}
-
-sub RunGV {
-  my $fname = shift;
-  my $bg = shift;       # "" or " &" if we should run in background
-  if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) {
-    # Options using double dash are supported by this gv version.
-    # Also, turn on noantialias to better handle bug in gv for
-    # postscript files with large dimensions.
-    # TODO: Maybe we should not pass the --noantialias flag
-    # if the gv version is known to work properly without the flag.
-    system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname)
-           . $bg);
-  } else {
-    # Old gv version - only supports options that use single dash.
-    print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n";
-    system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg);
-  }
-}
-
-sub RunEvince {
-  my $fname = shift;
-  my $bg = shift;       # "" or " &" if we should run in background
-  system(ShellEscape(@EVINCE, $fname) . $bg);
-}
-
-sub RunWeb {
-  my $fname = shift;
-  print STDERR "Loading web page file:///$fname\n";
-
-  if (`uname` =~ /Darwin/) {
-    # OS X: open will use standard preference for SVG files.
-    system("/usr/bin/open", $fname);
-    return;
-  }
-
-  # Some kind of Unix; try generic symlinks, then specific browsers.
-  # (Stop once we find one.)
-  # Works best if the browser is already running.
-  my @alt = (
-    "/etc/alternatives/gnome-www-browser",
-    "/etc/alternatives/x-www-browser",
-    "google-chrome",
-    "firefox",
-  );
-  foreach my $b (@alt) {
-    if (system($b, $fname) == 0) {
-      return;
-    }
-  }
-
-  print STDERR "Could not load web browser.\n";
-}
-
-sub RunKcachegrind {
-  my $fname = shift;
-  my $bg = shift;       # "" or " &" if we should run in background
-  print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n";
-  system(ShellEscape(@KCACHEGRIND, $fname) . $bg);
-}
-
-
-##### Interactive helper routines #####
-
-sub InteractiveMode {
-  $| = 1;  # Make output unbuffered for interactive mode
-  my ($orig_profile, $symbols, $libs, $total) = @_;
-
-  print STDERR "Welcome to pprof!  For help, type 'help'.\n";
-
-  # Use ReadLine if it's installed and input comes from a console.
-  if ( -t STDIN &&
-       !ReadlineMightFail() &&
-       defined(eval {require Term::ReadLine}) ) {
-    my $term = new Term::ReadLine 'pprof';
-    while ( defined ($_ = $term->readline('(pprof) '))) {
-      $term->addhistory($_) if /\S/;
-      if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
-        last;    # exit when we get an interactive command to quit
-      }
-    }
-  } else {       # don't have readline
-    while (1) {
-      print STDERR "(pprof) ";
-      $_ = <STDIN>;
-      last if ! defined $_ ;
-      s/\r//g;         # turn windows-looking lines into unix-looking lines
-
-      # Save some flags that might be reset by InteractiveCommand()
-      my $save_opt_lines = $main::opt_lines;
-
-      if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
-        last;    # exit when we get an interactive command to quit
-      }
-
-      # Restore flags
-      $main::opt_lines = $save_opt_lines;
-    }
-  }
-}
-
-# Takes two args: orig profile, and command to run.
-# Returns 1 if we should keep going, or 0 if we were asked to quit
-sub InteractiveCommand {
-  my($orig_profile, $symbols, $libs, $total, $command) = @_;
-  $_ = $command;                # just to make future m//'s easier
-  if (!defined($_)) {
-    print STDERR "\n";
-    return 0;
-  }
-  if (m/^\s*quit/) {
-    return 0;
-  }
-  if (m/^\s*help/) {
-    InteractiveHelpMessage();
-    return 1;
-  }
-  # Clear all the mode options -- mode is controlled by "$command"
-  $main::opt_text = 0;
-  $main::opt_callgrind = 0;
-  $main::opt_disasm = 0;
-  $main::opt_list = 0;
-  $main::opt_gv = 0;
-  $main::opt_evince = 0;
-  $main::opt_cum = 0;
-
-  if (m/^\s*(text|top)(\d*)\s*(.*)/) {
-    $main::opt_text = 1;
-
-    my $line_limit = ($2 ne "") ? int($2) : 10;
-
-    my $routine;
-    my $ignore;
-    ($routine, $ignore) = ParseInteractiveArgs($3);
-
-    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
-    my $reduced = ReduceProfile($symbols, $profile);
-
-    # Get derived profiles
-    my $flat = FlatProfile($reduced);
-    my $cumulative = CumulativeProfile($reduced);
-
-    PrintText($symbols, $flat, $cumulative, $line_limit);
-    return 1;
-  }
-  if (m/^\s*callgrind\s*([^ \n]*)/) {
-    $main::opt_callgrind = 1;
-
-    # Get derived profiles
-    my $calls = ExtractCalls($symbols, $orig_profile);
-    my $filename = $1;
-    if ( $1 eq '' ) {
-      $filename = TempName($main::next_tmpfile, "callgrind");
-    }
-    PrintCallgrind($calls, $filename);
-    if ( $1 eq '' ) {
-      RunKcachegrind($filename, " & ");
-      $main::next_tmpfile++;
-    }
-
-    return 1;
-  }
-  if (m/^\s*(web)?list\s*(.+)/) {
-    my $html = (defined($1) && ($1 eq "web"));
-    $main::opt_list = 1;
-
-    my $routine;
-    my $ignore;
-    ($routine, $ignore) = ParseInteractiveArgs($2);
-
-    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
-    my $reduced = ReduceProfile($symbols, $profile);
-
-    # Get derived profiles
-    my $flat = FlatProfile($reduced);
-    my $cumulative = CumulativeProfile($reduced);
-
-    PrintListing($total, $libs, $flat, $cumulative, $routine, $html);
-    return 1;
-  }
-  if (m/^\s*disasm\s*(.+)/) {
-    $main::opt_disasm = 1;
-
-    my $routine;
-    my $ignore;
-    ($routine, $ignore) = ParseInteractiveArgs($1);
-
-    # Process current profile to account for various settings
-    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
-    my $reduced = ReduceProfile($symbols, $profile);
-
-    # Get derived profiles
-    my $flat = FlatProfile($reduced);
-    my $cumulative = CumulativeProfile($reduced);
-
-    PrintDisassembly($libs, $flat, $cumulative, $routine);
-    return 1;
-  }
-  if (m/^\s*(gv|web|evince)\s*(.*)/) {
-    $main::opt_gv = 0;
-    $main::opt_evince = 0;
-    $main::opt_web = 0;
-    if ($1 eq "gv") {
-      $main::opt_gv = 1;
-    } elsif ($1 eq "evince") {
-      $main::opt_evince = 1;
-    } elsif ($1 eq "web") {
-      $main::opt_web = 1;
-    }
-
-    my $focus;
-    my $ignore;
-    ($focus, $ignore) = ParseInteractiveArgs($2);
-
-    # Process current profile to account for various settings
-    my $profile = ProcessProfile($total, $orig_profile, $symbols,
-                                 $focus, $ignore);
-    my $reduced = ReduceProfile($symbols, $profile);
-
-    # Get derived profiles
-    my $flat = FlatProfile($reduced);
-    my $cumulative = CumulativeProfile($reduced);
-
-    if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
-      if ($main::opt_gv) {
-        RunGV(TempName($main::next_tmpfile, "ps"), " &");
-      } elsif ($main::opt_evince) {
-        RunEvince(TempName($main::next_tmpfile, "pdf"), " &");
-      } elsif ($main::opt_web) {
-        RunWeb(TempName($main::next_tmpfile, "svg"));
-      }
-      $main::next_tmpfile++;
-    }
-    return 1;
-  }
-  if (m/^\s*$/) {
-    return 1;
-  }
-  print STDERR "Unknown command: try 'help'.\n";
-  return 1;
-}
-
-
-sub ProcessProfile {
-  my $total_count = shift;
-  my $orig_profile = shift;
-  my $symbols = shift;
-  my $focus = shift;
-  my $ignore = shift;
-
-  # Process current profile to account for various settings
-  my $profile = $orig_profile;
-  printf("Total: %s %s\n", Unparse($total_count), Units());
-  if ($focus ne '') {
-    $profile = FocusProfile($symbols, $profile, $focus);
-    my $focus_count = TotalProfile($profile);
-    printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n",
-           $focus,
-           Unparse($focus_count), Units(),
-           Unparse($total_count), ($focus_count*100.0) / $total_count);
-  }
-  if ($ignore ne '') {
-    $profile = IgnoreProfile($symbols, $profile, $ignore);
-    my $ignore_count = TotalProfile($profile);
-    printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n",
-           $ignore,
-           Unparse($ignore_count), Units(),
-           Unparse($total_count),
-           ($ignore_count*100.0) / $total_count);
-  }
-
-  return $profile;
-}
-
-sub InteractiveHelpMessage {
-  print STDERR <<ENDOFHELP;
-Interactive pprof mode
-
-Commands:
-  gv
-  gv [focus] [-ignore1] [-ignore2]
-      Show graphical hierarchical display of current profile.  Without
-      any arguments, shows all samples in the profile.  With the optional
-      "focus" argument, restricts the samples shown to just those where
-      the "focus" regular expression matches a routine name on the stack
-      trace.
-
-  web
-  web [focus] [-ignore1] [-ignore2]
-      Like GV, but displays profile in your web browser instead of using
-      Ghostview. Works best if your web browser is already running.
-      To change the browser that gets used:
-      On Linux, set the /etc/alternatives/gnome-www-browser symlink.
-      On OS X, change the Finder association for SVG files.
-
-  list [routine_regexp] [-ignore1] [-ignore2]
-      Show source listing of routines whose names match "routine_regexp"
-
-  weblist [routine_regexp] [-ignore1] [-ignore2]
-     Displays a source listing of routines whose names match "routine_regexp"
-     in a web browser.  You can click on source lines to view the
-     corresponding disassembly.
-
-  top [--cum] [-ignore1] [-ignore2]
-  top20 [--cum] [-ignore1] [-ignore2]
-  top37 [--cum] [-ignore1] [-ignore2]
-      Show top lines ordered by flat profile count, or cumulative count
-      if --cum is specified.  If a number is present after 'top', the
-      top K routines will be shown (defaults to showing the top 10)
-
-  disasm [routine_regexp] [-ignore1] [-ignore2]
-      Show disassembly of routines whose names match "routine_regexp",
-      annotated with sample counts.
-
-  callgrind
-  callgrind [filename]
-      Generates callgrind file. If no filename is given, kcachegrind is called.
-
-  help - This listing
-  quit or ^D - End pprof
-
-For commands that accept optional -ignore tags, samples where any routine in
-the stack trace matches the regular expression in any of the -ignore
-parameters will be ignored.
-
-Further pprof details are available at this location (or one similar):
-
- /usr/doc/gperftools-$PPROF_VERSION/cpu_profiler.html
- /usr/doc/gperftools-$PPROF_VERSION/heap_profiler.html
-
-ENDOFHELP
-}
-sub ParseInteractiveArgs {
-  my $args = shift;
-  my $focus = "";
-  my $ignore = "";
-  my @x = split(/ +/, $args);
-  foreach $a (@x) {
-    if ($a =~ m/^(--|-)lines$/) {
-      $main::opt_lines = 1;
-    } elsif ($a =~ m/^(--|-)cum$/) {
-      $main::opt_cum = 1;
-    } elsif ($a =~ m/^-(.*)/) {
-      $ignore .= (($ignore ne "") ? "|" : "" ) . $1;
-    } else {
-      $focus .= (($focus ne "") ? "|" : "" ) . $a;
-    }
-  }
-  if ($ignore ne "") {
-    print STDERR "Ignoring samples in call stacks that match '$ignore'\n";
-  }
-  return ($focus, $ignore);
-}
-
-##### Output code #####
-
-sub TempName {
-  my $fnum = shift;
-  my $ext = shift;
-  my $file = "$main::tmpfile_ps.$fnum.$ext";
-  $main::tempnames{$file} = 1;
-  return $file;
-}
-
-# Print profile data in packed binary format (64-bit) to standard out
-sub PrintProfileData {
-  my $profile = shift;
-
-  # print header (64-bit style)
-  # (zero) (header-size) (version) (sample-period) (zero)
-  print pack('L*', 0, 0, 3, 0, 0, 0, 1, 0, 0, 0);
-
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    if ($#addrs >= 0) {
-      my $depth = $#addrs + 1;
-      # int(foo / 2**32) is the only reliable way to get rid of bottom
-      # 32 bits on both 32- and 64-bit systems.
-      print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32));
-      print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32));
-
-      foreach my $full_addr (@addrs) {
-        my $addr = $full_addr;
-        $addr =~ s/0x0*//;  # strip off leading 0x, zeroes
-        if (length($addr) > 16) {
-          print STDERR "Invalid address in profile: $full_addr\n";
-          next;
-        }
-        my $low_addr = substr($addr, -8);       # get last 8 hex chars
-        my $high_addr = substr($addr, -16, 8);  # get up to 8 more hex chars
-        print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr));
-      }
-    }
-  }
-}
-
-# Print symbols and profile data
-sub PrintSymbolizedProfile {
-  my $symbols = shift;
-  my $profile = shift;
-  my $prog = shift;
-
-  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $symbol_marker = $&;
-
-  print '--- ', $symbol_marker, "\n";
-  if (defined($prog)) {
-    print 'binary=', $prog, "\n";
-  }
-  while (my ($pc, $name) = each(%{$symbols})) {
-    my $sep = ' ';
-    print '0x', $pc;
-    # We have a list of function names, which include the inlined
-    # calls.  They are separated (and terminated) by --, which is
-    # illegal in function names.
-    for (my $j = 2; $j <= $#{$name}; $j += 3) {
-      print $sep, $name->[$j];
-      $sep = '--';
-    }
-    print "\n";
-  }
-  print '---', "\n";
-
-  $PROFILE_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $profile_marker = $&;
-  print '--- ', $profile_marker, "\n";
-  if (defined($main::collected_profile)) {
-    # if used with remote fetch, simply dump the collected profile to output.
-    open(SRC, "<$main::collected_profile");
-    while (<SRC>) {
-      print $_;
-    }
-    close(SRC);
-  } else {
-    # dump a cpu-format profile to standard out
-    PrintProfileData($profile);
-  }
-}
-
-# Print text output
-sub PrintText {
-  my $symbols = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $line_limit = shift;
-
-  my $total = TotalProfile($flat);
-
-  # Which profile to sort by?
-  my $s = $main::opt_cum ? $cumulative : $flat;
-
-  my $running_sum = 0;
-  my $lines = 0;
-  foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b }
-                 keys(%{$cumulative})) {
-    my $f = GetEntry($flat, $k);
-    my $c = GetEntry($cumulative, $k);
-    $running_sum += $f;
-
-    my $sym = $k;
-    if (exists($symbols->{$k})) {
-      $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1];
-      if ($main::opt_addresses) {
-        $sym = $k . " " . $sym;
-      }
-    }
-
-    if ($f != 0 || $c != 0) {
-      printf("%8s %6s %6s %8s %6s %s\n",
-             Unparse($f),
-             Percent($f, $total),
-             Percent($running_sum, $total),
-             Unparse($c),
-             Percent($c, $total),
-             $sym);
-    }
-    $lines++;
-    last if ($line_limit >= 0 && $lines >= $line_limit);
-  }
-}
-
-# Callgrind format has a compression for repeated function and file
-# names.  You show the name the first time, and just use its number
-# subsequently.  This can cut down the file to about a third or a
-# quarter of its uncompressed size.  $key and $val are the key/value
-# pair that would normally be printed by callgrind; $map is a map from
-# value to number.
-sub CompressedCGName {
-  my($key, $val, $map) = @_;
-  my $idx = $map->{$val};
-  # For very short keys, providing an index hurts rather than helps.
-  if (length($val) <= 3) {
-    return "$key=$val\n";
-  } elsif (defined($idx)) {
-    return "$key=($idx)\n";
-  } else {
-    # scalar(keys $map) gives the number of items in the map.
-    $idx = scalar(keys(%{$map})) + 1;
-    $map->{$val} = $idx;
-    return "$key=($idx) $val\n";
-  }
-}
-
-# Print the call graph in a way that's suiteable for callgrind.
-sub PrintCallgrind {
-  my $calls = shift;
-  my $filename;
-  my %filename_to_index_map;
-  my %fnname_to_index_map;
-
-  if ($main::opt_interactive) {
-    $filename = shift;
-    print STDERR "Writing callgrind file to '$filename'.\n"
-  } else {
-    $filename = "&STDOUT";
-  }
-  open(CG, ">$filename");
-  printf CG ("events: Hits\n\n");
-  foreach my $call ( map { $_->[0] }
-                     sort { $a->[1] cmp $b ->[1] ||
-                            $a->[2] <=> $b->[2] }
-                     map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
-                           [$_, $1, $2] }
-                     keys %$calls ) {
-    my $count = int($calls->{$call});
-    $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
-    my ( $caller_file, $caller_line, $caller_function,
-         $callee_file, $callee_line, $callee_function ) =
-       ( $1, $2, $3, $5, $6, $7 );
-
-    # TODO(csilvers): for better compression, collect all the
-    # caller/callee_files and functions first, before printing
-    # anything, and only compress those referenced more than once.
-    printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map);
-    printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map);
-    if (defined $6) {
-      printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map);
-      printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map);
-      printf CG ("calls=$count $callee_line\n");
-    }
-    printf CG ("$caller_line $count\n\n");
-  }
-}
-
-# Print disassembly for all all routines that match $main::opt_disasm
-sub PrintDisassembly {
-  my $libs = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $disasm_opts = shift;
-
-  my $total = TotalProfile($flat);
-
-  foreach my $lib (@{$libs}) {
-    my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts);
-    my $offset = AddressSub($lib->[1], $lib->[3]);
-    foreach my $routine (sort ByName keys(%{$symbol_table})) {
-      my $start_addr = $symbol_table->{$routine}->[0];
-      my $end_addr = $symbol_table->{$routine}->[1];
-      # See if there are any samples in this routine
-      my $length = hex(AddressSub($end_addr, $start_addr));
-      my $addr = AddressAdd($start_addr, $offset);
-      for (my $i = 0; $i < $length; $i++) {
-        if (defined($cumulative->{$addr})) {
-          PrintDisassembledFunction($lib->[0], $offset,
-                                    $routine, $flat, $cumulative,
-                                    $start_addr, $end_addr, $total);
-          last;
-        }
-        $addr = AddressInc($addr);
-      }
-    }
-  }
-}
-
-# Return reference to array of tuples of the form:
-#       [start_address, filename, linenumber, instruction, limit_address]
-# E.g.,
-#       ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"]
-sub Disassemble {
-  my $prog = shift;
-  my $offset = shift;
-  my $start_addr = shift;
-  my $end_addr = shift;
-
-  my $objdump = $obj_tool_map{"objdump"};
-  my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn",
-                        "--start-address=0x$start_addr",
-                        "--stop-address=0x$end_addr", $prog);
-  open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
-  my @result = ();
-  my $filename = "";
-  my $linenumber = -1;
-  my $last = ["", "", "", ""];
-  while (<OBJDUMP>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    chop;
-    if (m|\s*([^:\s]+):(\d+)\s*$|) {
-      # Location line of the form:
-      #   <filename>:<linenumber>
-      $filename = $1;
-      $linenumber = $2;
-    } elsif (m/^ +([0-9a-f]+):\s*(.*)/) {
-      # Disassembly line -- zero-extend address to full length
-      my $addr = HexExtend($1);
-      my $k = AddressAdd($addr, $offset);
-      $last->[4] = $k;   # Store ending address for previous instruction
-      $last = [$k, $filename, $linenumber, $2, $end_addr];
-      push(@result, $last);
-    }
-  }
-  close(OBJDUMP);
-  return @result;
-}
-
-# The input file should contain lines of the form /proc/maps-like
-# output (same format as expected from the profiles) or that looks
-# like hex addresses (like "0xDEADBEEF").  We will parse all
-# /proc/maps output, and for all the hex addresses, we will output
-# "short" symbol names, one per line, in the same order as the input.
-sub PrintSymbols {
-  my $maps_and_symbols_file = shift;
-
-  # ParseLibraries expects pcs to be in a set.  Fine by us...
-  my @pclist = ();   # pcs in sorted order
-  my $pcs = {};
-  my $map = "";
-  foreach my $line (<$maps_and_symbols_file>) {
-    $line =~ s/\r//g;    # turn windows-looking lines into unix-looking lines
-    if ($line =~ /\b(0x[0-9a-f]+)\b/i) {
-      push(@pclist, HexExtend($1));
-      $pcs->{$pclist[-1]} = 1;
-    } else {
-      $map .= $line;
-    }
-  }
-
-  my $libs = ParseLibraries($main::prog, $map, $pcs);
-  my $symbols = ExtractSymbols($libs, $pcs);
-
-  foreach my $pc (@pclist) {
-    # ->[0] is the shortname, ->[2] is the full name
-    print(($symbols->{$pc}->[0] || "??") . "\n");
-  }
-}
-
-
-# For sorting functions by name
-sub ByName {
-  return ShortFunctionName($a) cmp ShortFunctionName($b);
-}
-
-# Print source-listing for all all routines that match $list_opts
-sub PrintListing {
-  my $total = shift;
-  my $libs = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $list_opts = shift;
-  my $html = shift;
-
-  my $output = \*STDOUT;
-  my $fname = "";
-
-  if ($html) {
-    # Arrange to write the output to a temporary file
-    $fname = TempName($main::next_tmpfile, "html");
-    $main::next_tmpfile++;
-    if (!open(TEMP, ">$fname")) {
-      print STDERR "$fname: $!\n";
-      return;
-    }
-    $output = \*TEMP;
-    print $output HtmlListingHeader();
-    printf $output ("<div class=\"legend\">%s<br>Total: %s %s</div>\n",
-                    $main::prog, Unparse($total), Units());
-  }
-
-  my $listed = 0;
-  foreach my $lib (@{$libs}) {
-    my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts);
-    my $offset = AddressSub($lib->[1], $lib->[3]);
-    foreach my $routine (sort ByName keys(%{$symbol_table})) {
-      # Print if there are any samples in this routine
-      my $start_addr = $symbol_table->{$routine}->[0];
-      my $end_addr = $symbol_table->{$routine}->[1];
-      my $length = hex(AddressSub($end_addr, $start_addr));
-      my $addr = AddressAdd($start_addr, $offset);
-      for (my $i = 0; $i < $length; $i++) {
-        if (defined($cumulative->{$addr})) {
-          $listed += PrintSource(
-            $lib->[0], $offset,
-            $routine, $flat, $cumulative,
-            $start_addr, $end_addr,
-            $html,
-            $output);
-          last;
-        }
-        $addr = AddressInc($addr);
-      }
-    }
-  }
-
-  if ($html) {
-    if ($listed > 0) {
-      print $output HtmlListingFooter();
-      close($output);
-      RunWeb($fname);
-    } else {
-      close($output);
-      unlink($fname);
-    }
-  }
-}
-
-sub HtmlListingHeader {
-  return <<'EOF';
-<DOCTYPE html>
-<html>
-<head>
-<title>Pprof listing</title>
-<style type="text/css">
-body {
-  font-family: sans-serif;
-}
-h1 {
-  font-size: 1.5em;
-  margin-bottom: 4px;
-}
-.legend {
-  font-size: 1.25em;
-}
-.line {
-  color: #aaaaaa;
-}
-.nop {
-  color: #aaaaaa;
-}
-.unimportant {
-  color: #cccccc;
-}
-.disasmloc {
-  color: #000000;
-}
-.deadsrc {
-  cursor: pointer;
-}
-.deadsrc:hover {
-  background-color: #eeeeee;
-}
-.livesrc {
-  color: #0000ff;
-  cursor: pointer;
-}
-.livesrc:hover {
-  background-color: #eeeeee;
-}
-.asm {
-  color: #008800;
-  display: none;
-}
-</style>
-<script type="text/javascript">
-function pprof_toggle_asm(e) {
-  var target;
-  if (!e) e = window.event;
-  if (e.target) target = e.target;
-  else if (e.srcElement) target = e.srcElement;
-
-  if (target) {
-    var asm = target.nextSibling;
-    if (asm && asm.className == "asm") {
-      asm.style.display = (asm.style.display == "block" ? "" : "block");
-      e.preventDefault();
-      return false;
-    }
-  }
-}
-</script>
-</head>
-<body>
-EOF
-}
-
-sub HtmlListingFooter {
-  return <<'EOF';
-</body>
-</html>
-EOF
-}
-
-sub HtmlEscape {
-  my $text = shift;
-  $text =~ s/&/&amp;/g;
-  $text =~ s/</&lt;/g;
-  $text =~ s/>/&gt;/g;
-  return $text;
-}
-
-# Returns the indentation of the line, if it has any non-whitespace
-# characters.  Otherwise, returns -1.
-sub Indentation {
-  my $line = shift;
-  if (m/^(\s*)\S/) {
-    return length($1);
-  } else {
-    return -1;
-  }
-}
-
-# If the symbol table contains inlining info, Disassemble() may tag an
-# instruction with a location inside an inlined function.  But for
-# source listings, we prefer to use the location in the function we
-# are listing.  So use MapToSymbols() to fetch full location
-# information for each instruction and then pick out the first
-# location from a location list (location list contains callers before
-# callees in case of inlining).
-#
-# After this routine has run, each entry in $instructions contains:
-#   [0] start address
-#   [1] filename for function we are listing
-#   [2] line number for function we are listing
-#   [3] disassembly
-#   [4] limit address
-#   [5] most specific filename (may be different from [1] due to inlining)
-#   [6] most specific line number (may be different from [2] due to inlining)
-sub GetTopLevelLineNumbers {
-  my ($lib, $offset, $instructions) = @_;
-  my $pcs = [];
-  for (my $i = 0; $i <= $#{$instructions}; $i++) {
-    push(@{$pcs}, $instructions->[$i]->[0]);
-  }
-  my $symbols = {};
-  MapToSymbols($lib, $offset, $pcs, $symbols);
-  for (my $i = 0; $i <= $#{$instructions}; $i++) {
-    my $e = $instructions->[$i];
-    push(@{$e}, $e->[1]);
-    push(@{$e}, $e->[2]);
-    my $addr = $e->[0];
-    my $sym = $symbols->{$addr};
-    if (defined($sym)) {
-      if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) {
-        $e->[1] = $1;  # File name
-        $e->[2] = $2;  # Line number
-      }
-    }
-  }
-}
-
-# Print source-listing for one routine
-sub PrintSource {
-  my $prog = shift;
-  my $offset = shift;
-  my $routine = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $start_addr = shift;
-  my $end_addr = shift;
-  my $html = shift;
-  my $output = shift;
-
-  # Disassemble all instructions (just to get line numbers)
-  my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
-  GetTopLevelLineNumbers($prog, $offset, \@instructions);
-
-  # Hack 1: assume that the first source file encountered in the
-  # disassembly contains the routine
-  my $filename = undef;
-  for (my $i = 0; $i <= $#instructions; $i++) {
-    if ($instructions[$i]->[2] >= 0) {
-      $filename = $instructions[$i]->[1];
-      last;
-    }
-  }
-  if (!defined($filename)) {
-    print STDERR "no filename found in $routine\n";
-    return 0;
-  }
-
-  # Hack 2: assume that the largest line number from $filename is the
-  # end of the procedure.  This is typically safe since if P1 contains
-  # an inlined call to P2, then P2 usually occurs earlier in the
-  # source file.  If this does not work, we might have to compute a
-  # density profile or just print all regions we find.
-  my $lastline = 0;
-  for (my $i = 0; $i <= $#instructions; $i++) {
-    my $f = $instructions[$i]->[1];
-    my $l = $instructions[$i]->[2];
-    if (($f eq $filename) && ($l > $lastline)) {
-      $lastline = $l;
-    }
-  }
-
-  # Hack 3: assume the first source location from "filename" is the start of
-  # the source code.
-  my $firstline = 1;
-  for (my $i = 0; $i <= $#instructions; $i++) {
-    if ($instructions[$i]->[1] eq $filename) {
-      $firstline = $instructions[$i]->[2];
-      last;
-    }
-  }
-
-  # Hack 4: Extend last line forward until its indentation is less than
-  # the indentation we saw on $firstline
-  my $oldlastline = $lastline;
-  {
-    if (!open(FILE, "<$filename")) {
-      print STDERR "$filename: $!\n";
-      return 0;
-    }
-    my $l = 0;
-    my $first_indentation = -1;
-    while (<FILE>) {
-      s/\r//g;         # turn windows-looking lines into unix-looking lines
-      $l++;
-      my $indent = Indentation($_);
-      if ($l >= $firstline) {
-        if ($first_indentation < 0 && $indent >= 0) {
-          $first_indentation = $indent;
-          last if ($first_indentation == 0);
-        }
-      }
-      if ($l >= $lastline && $indent >= 0) {
-        if ($indent >= $first_indentation) {
-          $lastline = $l+1;
-        } else {
-          last;
-        }
-      }
-    }
-    close(FILE);
-  }
-
-  # Assign all samples to the range $firstline,$lastline,
-  # Hack 4: If an instruction does not occur in the range, its samples
-  # are moved to the next instruction that occurs in the range.
-  my $samples1 = {};        # Map from line number to flat count
-  my $samples2 = {};        # Map from line number to cumulative count
-  my $running1 = 0;         # Unassigned flat counts
-  my $running2 = 0;         # Unassigned cumulative counts
-  my $total1 = 0;           # Total flat counts
-  my $total2 = 0;           # Total cumulative counts
-  my %disasm = ();          # Map from line number to disassembly
-  my $running_disasm = "";  # Unassigned disassembly
-  my $skip_marker = "---\n";
-  if ($html) {
-    $skip_marker = "";
-    for (my $l = $firstline; $l <= $lastline; $l++) {
-      $disasm{$l} = "";
-    }
-  }
-  my $last_dis_filename = '';
-  my $last_dis_linenum = -1;
-  my $last_touched_line = -1;  # To detect gaps in disassembly for a line
-  foreach my $e (@instructions) {
-    # Add up counts for all address that fall inside this instruction
-    my $c1 = 0;
-    my $c2 = 0;
-    for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
-      $c1 += GetEntry($flat, $a);
-      $c2 += GetEntry($cumulative, $a);
-    }
-
-    if ($html) {
-      my $dis = sprintf("      %6s %6s \t\t%8s: %s ",
-                        HtmlPrintNumber($c1),
-                        HtmlPrintNumber($c2),
-                        UnparseAddress($offset, $e->[0]),
-                        CleanDisassembly($e->[3]));
-
-      # Append the most specific source line associated with this instruction
-      if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
-      $dis = HtmlEscape($dis);
-      my $f = $e->[5];
-      my $l = $e->[6];
-      if ($f ne $last_dis_filename) {
-        $dis .= sprintf("<span class=disasmloc>%s:%d</span>",
-                        HtmlEscape(CleanFileName($f)), $l);
-      } elsif ($l ne $last_dis_linenum) {
-        # De-emphasize the unchanged file name portion
-        $dis .= sprintf("<span class=unimportant>%s</span>" .
-                        "<span class=disasmloc>:%d</span>",
-                        HtmlEscape(CleanFileName($f)), $l);
-      } else {
-        # De-emphasize the entire location
-        $dis .= sprintf("<span class=unimportant>%s:%d</span>",
-                        HtmlEscape(CleanFileName($f)), $l);
-      }
-      $last_dis_filename = $f;
-      $last_dis_linenum = $l;
-      $running_disasm .= $dis;
-      $running_disasm .= "\n";
-    }
-
-    $running1 += $c1;
-    $running2 += $c2;
-    $total1 += $c1;
-    $total2 += $c2;
-    my $file = $e->[1];
-    my $line = $e->[2];
-    if (($file eq $filename) &&
-        ($line >= $firstline) &&
-        ($line <= $lastline)) {
-      # Assign all accumulated samples to this line
-      AddEntry($samples1, $line, $running1);
-      AddEntry($samples2, $line, $running2);
-      $running1 = 0;
-      $running2 = 0;
-      if ($html) {
-        if ($line != $last_touched_line && $disasm{$line} ne '') {
-          $disasm{$line} .= "\n";
-        }
-        $disasm{$line} .= $running_disasm;
-        $running_disasm = '';
-        $last_touched_line = $line;
-      }
-    }
-  }
-
-  # Assign any leftover samples to $lastline
-  AddEntry($samples1, $lastline, $running1);
-  AddEntry($samples2, $lastline, $running2);
-  if ($html) {
-    if ($lastline != $last_touched_line && $disasm{$lastline} ne '') {
-      $disasm{$lastline} .= "\n";
-    }
-    $disasm{$lastline} .= $running_disasm;
-  }
-
-  if ($html) {
-    printf $output (
-      "<h1>%s</h1>%s\n<pre onClick=\"pprof_toggle_asm()\">\n" .
-      "Total:%6s %6s (flat / cumulative %s)\n",
-      HtmlEscape(ShortFunctionName($routine)),
-      HtmlEscape(CleanFileName($filename)),
-      Unparse($total1),
-      Unparse($total2),
-      Units());
-  } else {
-    printf $output (
-      "ROUTINE ====================== %s in %s\n" .
-      "%6s %6s Total %s (flat / cumulative)\n",
-      ShortFunctionName($routine),
-      CleanFileName($filename),
-      Unparse($total1),
-      Unparse($total2),
-      Units());
-  }
-  if (!open(FILE, "<$filename")) {
-    print STDERR "$filename: $!\n";
-    return 0;
-  }
-  my $l = 0;
-  while (<FILE>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    $l++;
-    if ($l >= $firstline - 5 &&
-        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
-      chop;
-      my $text = $_;
-      if ($l == $firstline) { print $output $skip_marker; }
-      my $n1 = GetEntry($samples1, $l);
-      my $n2 = GetEntry($samples2, $l);
-      if ($html) {
-        # Emit a span that has one of the following classes:
-        #    livesrc -- has samples
-        #    deadsrc -- has disassembly, but with no samples
-        #    nop     -- has no matching disasembly
-        # Also emit an optional span containing disassembly.
-        my $dis = $disasm{$l};
-        my $asm = "";
-        if (defined($dis) && $dis ne '') {
-          $asm = "<span class=\"asm\">" . $dis . "</span>";
-        }
-        my $source_class = (($n1 + $n2 > 0)
-                            ? "livesrc"
-                            : (($asm ne "") ? "deadsrc" : "nop"));
-        printf $output (
-          "<span class=\"line\">%5d</span> " .
-          "<span class=\"%s\">%6s %6s %s</span>%s\n",
-          $l, $source_class,
-          HtmlPrintNumber($n1),
-          HtmlPrintNumber($n2),
-          HtmlEscape($text),
-          $asm);
-      } else {
-        printf $output(
-          "%6s %6s %4d: %s\n",
-          UnparseAlt($n1),
-          UnparseAlt($n2),
-          $l,
-          $text);
-      }
-      if ($l == $lastline)  { print $output $skip_marker; }
-    };
-  }
-  close(FILE);
-  if ($html) {
-    print $output "</pre>\n";
-  }
-  return 1;
-}
-
-# Return the source line for the specified file/linenumber.
-# Returns undef if not found.
-sub SourceLine {
-  my $file = shift;
-  my $line = shift;
-
-  # Look in cache
-  if (!defined($main::source_cache{$file})) {
-    if (100 < scalar keys(%main::source_cache)) {
-      # Clear the cache when it gets too big
-      $main::source_cache = ();
-    }
-
-    # Read all lines from the file
-    if (!open(FILE, "<$file")) {
-      print STDERR "$file: $!\n";
-      $main::source_cache{$file} = [];  # Cache the negative result
-      return undef;
-    }
-    my $lines = [];
-    push(@{$lines}, "");        # So we can use 1-based line numbers as indices
-    while (<FILE>) {
-      push(@{$lines}, $_);
-    }
-    close(FILE);
-
-    # Save the lines in the cache
-    $main::source_cache{$file} = $lines;
-  }
-
-  my $lines = $main::source_cache{$file};
-  if (($line < 0) || ($line > $#{$lines})) {
-    return undef;
-  } else {
-    return $lines->[$line];
-  }
-}
-
-# Print disassembly for one routine with interspersed source if available
-sub PrintDisassembledFunction {
-  my $prog = shift;
-  my $offset = shift;
-  my $routine = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $start_addr = shift;
-  my $end_addr = shift;
-  my $total = shift;
-
-  # Disassemble all instructions
-  my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
-
-  # Make array of counts per instruction
-  my @flat_count = ();
-  my @cum_count = ();
-  my $flat_total = 0;
-  my $cum_total = 0;
-  foreach my $e (@instructions) {
-    # Add up counts for all address that fall inside this instruction
-    my $c1 = 0;
-    my $c2 = 0;
-    for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
-      $c1 += GetEntry($flat, $a);
-      $c2 += GetEntry($cumulative, $a);
-    }
-    push(@flat_count, $c1);
-    push(@cum_count, $c2);
-    $flat_total += $c1;
-    $cum_total += $c2;
-  }
-
-  # Print header with total counts
-  printf("ROUTINE ====================== %s\n" .
-         "%6s %6s %s (flat, cumulative) %.1f%% of total\n",
-         ShortFunctionName($routine),
-         Unparse($flat_total),
-         Unparse($cum_total),
-         Units(),
-         ($cum_total * 100.0) / $total);
-
-  # Process instructions in order
-  my $current_file = "";
-  for (my $i = 0; $i <= $#instructions; ) {
-    my $e = $instructions[$i];
-
-    # Print the new file name whenever we switch files
-    if ($e->[1] ne $current_file) {
-      $current_file = $e->[1];
-      my $fname = $current_file;
-      $fname =~ s|^\./||;   # Trim leading "./"
-
-      # Shorten long file names
-      if (length($fname) >= 58) {
-        $fname = "..." . substr($fname, -55);
-      }
-      printf("-------------------- %s\n", $fname);
-    }
-
-    # TODO: Compute range of lines to print together to deal with
-    # small reorderings.
-    my $first_line = $e->[2];
-    my $last_line = $first_line;
-    my %flat_sum = ();
-    my %cum_sum = ();
-    for (my $l = $first_line; $l <= $last_line; $l++) {
-      $flat_sum{$l} = 0;
-      $cum_sum{$l} = 0;
-    }
-
-    # Find run of instructions for this range of source lines
-    my $first_inst = $i;
-    while (($i <= $#instructions) &&
-           ($instructions[$i]->[2] >= $first_line) &&
-           ($instructions[$i]->[2] <= $last_line)) {
-      $e = $instructions[$i];
-      $flat_sum{$e->[2]} += $flat_count[$i];
-      $cum_sum{$e->[2]} += $cum_count[$i];
-      $i++;
-    }
-    my $last_inst = $i - 1;
-
-    # Print source lines
-    for (my $l = $first_line; $l <= $last_line; $l++) {
-      my $line = SourceLine($current_file, $l);
-      if (!defined($line)) {
-        $line = "?\n";
-        next;
-      } else {
-        $line =~ s/^\s+//;
-      }
-      printf("%6s %6s %5d: %s",
-             UnparseAlt($flat_sum{$l}),
-             UnparseAlt($cum_sum{$l}),
-             $l,
-             $line);
-    }
-
-    # Print disassembly
-    for (my $x = $first_inst; $x <= $last_inst; $x++) {
-      my $e = $instructions[$x];
-      printf("%6s %6s    %8s: %6s\n",
-             UnparseAlt($flat_count[$x]),
-             UnparseAlt($cum_count[$x]),
-             UnparseAddress($offset, $e->[0]),
-             CleanDisassembly($e->[3]));
-    }
-  }
-}
-
-# Print DOT graph
-sub PrintDot {
-  my $prog = shift;
-  my $symbols = shift;
-  my $raw = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $overall_total = shift;
-
-  # Get total
-  my $local_total = TotalProfile($flat);
-  my $nodelimit = int($main::opt_nodefraction * $local_total);
-  my $edgelimit = int($main::opt_edgefraction * $local_total);
-  my $nodecount = $main::opt_nodecount;
-
-  # Find nodes to include
-  my @list = (sort { abs(GetEntry($cumulative, $b)) <=>
-                     abs(GetEntry($cumulative, $a))
-                     || $a cmp $b }
-              keys(%{$cumulative}));
-  my $last = $nodecount - 1;
-  if ($last > $#list) {
-    $last = $#list;
-  }
-  while (($last >= 0) &&
-         (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) {
-    $last--;
-  }
-  if ($last < 0) {
-    print STDERR "No nodes to print\n";
-    return 0;
-  }
-
-  if ($nodelimit > 0 || $edgelimit > 0) {
-    printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n",
-                   Unparse($nodelimit), Units(),
-                   Unparse($edgelimit), Units());
-  }
-
-  # Open DOT output file
-  my $output;
-  my $escaped_dot = ShellEscape(@DOT);
-  my $escaped_ps2pdf = ShellEscape(@PS2PDF);
-  if ($main::opt_gv) {
-    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps"));
-    $output = "| $escaped_dot -Tps2 >$escaped_outfile";
-  } elsif ($main::opt_evince) {
-    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf"));
-    $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile";
-  } elsif ($main::opt_ps) {
-    $output = "| $escaped_dot -Tps2";
-  } elsif ($main::opt_pdf) {
-    $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -";
-  } elsif ($main::opt_web || $main::opt_svg) {
-    # We need to post-process the SVG, so write to a temporary file always.
-    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg"));
-    $output = "| $escaped_dot -Tsvg >$escaped_outfile";
-  } elsif ($main::opt_gif) {
-    $output = "| $escaped_dot -Tgif";
-  } else {
-    $output = ">&STDOUT";
-  }
-  open(DOT, $output) || error("$output: $!\n");
-
-  # Title
-  printf DOT ("digraph \"%s; %s %s\" {\n",
-              $prog,
-              Unparse($overall_total),
-              Units());
-  if ($main::opt_pdf) {
-    # The output is more printable if we set the page size for dot.
-    printf DOT ("size=\"8,11\"\n");
-  }
-  printf DOT ("node [width=0.375,height=0.25];\n");
-
-  # Print legend
-  printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," .
-              "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n",
-              $prog,
-              sprintf("Total %s: %s", Units(), Unparse($overall_total)),
-              sprintf("Focusing on: %s", Unparse($local_total)),
-              sprintf("Dropped nodes with <= %s abs(%s)",
-                      Unparse($nodelimit), Units()),
-              sprintf("Dropped edges with <= %s %s",
-                      Unparse($edgelimit), Units())
-              );
-
-  # Print nodes
-  my %node = ();
-  my $nextnode = 1;
-  foreach my $a (@list[0..$last]) {
-    # Pick font size
-    my $f = GetEntry($flat, $a);
-    my $c = GetEntry($cumulative, $a);
-
-    my $fs = 8;
-    if ($local_total > 0) {
-      $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total)));
-    }
-
-    $node{$a} = $nextnode++;
-    my $sym = $a;
-    $sym =~ s/\s+/\\n/g;
-    $sym =~ s/::/\\n/g;
-
-    # Extra cumulative info to print for non-leaves
-    my $extra = "";
-    if ($f != $c) {
-      $extra = sprintf("\\rof %s (%s)",
-                       Unparse($c),
-                       Percent($c, $local_total));
-    }
-    my $style = "";
-    if ($main::opt_heapcheck) {
-      if ($f > 0) {
-        # make leak-causing nodes more visible (add a background)
-        $style = ",style=filled,fillcolor=gray"
-      } elsif ($f < 0) {
-        # make anti-leak-causing nodes (which almost never occur)
-        # stand out as well (triple border)
-        $style = ",peripheries=3"
-      }
-    }
-
-    printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" .
-                "\",shape=box,fontsize=%.1f%s];\n",
-                $node{$a},
-                $sym,
-                Unparse($f),
-                Percent($f, $local_total),
-                $extra,
-                $fs,
-                $style,
-               );
-  }
-
-  # Get edges and counts per edge
-  my %edge = ();
-  my $n;
-  my $fullname_to_shortname_map = {};
-  FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
-  foreach my $k (keys(%{$raw})) {
-    # TODO: omit low %age edges
-    $n = $raw->{$k};
-    my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
-    for (my $i = 1; $i <= $#translated; $i++) {
-      my $src = $translated[$i];
-      my $dst = $translated[$i-1];
-      #next if ($src eq $dst);  # Avoid self-edges?
-      if (exists($node{$src}) && exists($node{$dst})) {
-        my $edge_label = "$src\001$dst";
-        if (!exists($edge{$edge_label})) {
-          $edge{$edge_label} = 0;
-        }
-        $edge{$edge_label} += $n;
-      }
-    }
-  }
-
-  # Print edges (process in order of decreasing counts)
-  my %indegree = ();   # Number of incoming edges added per node so far
-  my %outdegree = ();  # Number of outgoing edges added per node so far
-  foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) {
-    my @x = split(/\001/, $e);
-    $n = $edge{$e};
-
-    # Initialize degree of kept incoming and outgoing edges if necessary
-    my $src = $x[0];
-    my $dst = $x[1];
-    if (!exists($outdegree{$src})) { $outdegree{$src} = 0; }
-    if (!exists($indegree{$dst})) { $indegree{$dst} = 0; }
-
-    my $keep;
-    if ($indegree{$dst} == 0) {
-      # Keep edge if needed for reachability
-      $keep = 1;
-    } elsif (abs($n) <= $edgelimit) {
-      # Drop if we are below --edgefraction
-      $keep = 0;
-    } elsif ($outdegree{$src} >= $main::opt_maxdegree ||
-             $indegree{$dst} >= $main::opt_maxdegree) {
-      # Keep limited number of in/out edges per node
-      $keep = 0;
-    } else {
-      $keep = 1;
-    }
-
-    if ($keep) {
-      $outdegree{$src}++;
-      $indegree{$dst}++;
-
-      # Compute line width based on edge count
-      my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0);
-      if ($fraction > 1) { $fraction = 1; }
-      my $w = $fraction * 2;
-      if ($w < 1 && ($main::opt_web || $main::opt_svg)) {
-        # SVG output treats line widths < 1 poorly.
-        $w = 1;
-      }
-
-      # Dot sometimes segfaults if given edge weights that are too large, so
-      # we cap the weights at a large value
-      my $edgeweight = abs($n) ** 0.7;
-      if ($edgeweight > 100000) { $edgeweight = 100000; }
-      $edgeweight = int($edgeweight);
-
-      my $style = sprintf("setlinewidth(%f)", $w);
-      if ($x[1] =~ m/\(inline\)/) {
-        $style .= ",dashed";
-      }
-
-      # Use a slightly squashed function of the edge count as the weight
-      printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n",
-                  $node{$x[0]},
-                  $node{$x[1]},
-                  Unparse($n),
-                  $edgeweight,
-                  $style);
-    }
-  }
-
-  print DOT ("}\n");
-  close(DOT);
-
-  if ($main::opt_web || $main::opt_svg) {
-    # Rewrite SVG to be more usable inside web browser.
-    RewriteSvg(TempName($main::next_tmpfile, "svg"));
-  }
-
-  return 1;
-}
-
-sub RewriteSvg {
-  my $svgfile = shift;
-
-  open(SVG, $svgfile) || die "open temp svg: $!";
-  my @svg = <SVG>;
-  close(SVG);
-  unlink $svgfile;
-  my $svg = join('', @svg);
-
-  # Dot's SVG output is
-  #
-  #    <svg width="___" height="___"
-  #     viewBox="___" xmlns=...>
-  #    <g id="graph0" transform="...">
-  #    ...
-  #    </g>
-  #    </svg>
-  #
-  # Change it to
-  #
-  #    <svg width="100%" height="100%"
-  #     xmlns=...>
-  #    $svg_javascript
-  #    <g id="viewport" transform="translate(0,0)">
-  #    <g id="graph0" transform="...">
-  #    ...
-  #    </g>
-  #    </g>
-  #    </svg>
-
-  # Fix width, height; drop viewBox.
-  $svg =~ s/(?s)<svg width="[^"]+" height="[^"]+"(.*?)viewBox="[^"]+"/<svg width="100%" height="100%"$1/;
-
-  # Insert script, viewport <g> above first <g>
-  my $svg_javascript = SvgJavascript();
-  my $viewport = "<g id=\"viewport\" transform=\"translate(0,0)\">\n";
-  $svg =~ s/<g id="graph\d"/$svg_javascript$viewport$&/;
-
-  # Insert final </g> above </svg>.
-  $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/;
-  $svg =~ s/<g id="graph\d"(.*?)/<g id="viewport"$1/;
-
-  if ($main::opt_svg) {
-    # --svg: write to standard output.
-    print $svg;
-  } else {
-    # Write back to temporary file.
-    open(SVG, ">$svgfile") || die "open $svgfile: $!";
-    print SVG $svg;
-    close(SVG);
-  }
-}
-
-sub SvgJavascript {
-  return <<'EOF';
-<script type="text/ecmascript"><![CDATA[
-// SVGPan
-// http://www.cyberz.org/blog/2009/12/08/svgpan-a-javascript-svg-panzoomdrag-library/
-// Local modification: if(true || ...) below to force panning, never moving.
-
-/**
- *  SVGPan library 1.2
- * ====================
- *
- * Given an unique existing element with id "viewport", including the
- * the library into any SVG adds the following capabilities:
- *
- *  - Mouse panning
- *  - Mouse zooming (using the wheel)
- *  - Object dargging
- *
- * Known issues:
- *
- *  - Zooming (while panning) on Safari has still some issues
- *
- * Releases:
- *
- * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui
- *     Fixed a bug with browser mouse handler interaction
- *
- * 1.1, Wed Feb  3 17:39:33 GMT 2010, Zeng Xiaohui
- *     Updated the zoom code to support the mouse wheel on Safari/Chrome
- *
- * 1.0, Andrea Leofreddi
- *     First release
- *
- * This code is licensed under the following BSD license:
- *
- * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without modification, are
- * permitted provided that the following conditions are met:
- *
- *    1. Redistributions of source code must retain the above copyright notice, this list of
- *       conditions and the following disclaimer.
- *
- *    2. Redistributions in binary form must reproduce the above copyright notice, this list
- *       of conditions and the following disclaimer in the documentation and/or other materials
- *       provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
- * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are those of the
- * authors and should not be interpreted as representing official policies, either expressed
- * or implied, of Andrea Leofreddi.
- */
-
-var root = document.documentElement;
-
-var state = 'none', stateTarget, stateOrigin, stateTf;
-
-setupHandlers(root);
-
-/**
- * Register handlers
- */
-function setupHandlers(root){
-       setAttributes(root, {
-               "onmouseup" : "add(evt)",
-               "onmousedown" : "handleMouseDown(evt)",
-               "onmousemove" : "handleMouseMove(evt)",
-               "onmouseup" : "handleMouseUp(evt)",
-               //"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element
-       });
-
-       if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)
-               window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari
-       else
-               window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others
-
-       var g = svgDoc.getElementById("svg");
-       g.width = "100%";
-       g.height = "100%";
-}
-
-/**
- * Instance an SVGPoint object with given event coordinates.
- */
-function getEventPoint(evt) {
-       var p = root.createSVGPoint();
-
-       p.x = evt.clientX;
-       p.y = evt.clientY;
-
-       return p;
-}
-
-/**
- * Sets the current transform matrix of an element.
- */
-function setCTM(element, matrix) {
-       var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")";
-
-       element.setAttribute("transform", s);
-}
-
-/**
- * Dumps a matrix to a string (useful for debug).
- */
-function dumpMatrix(matrix) {
-       var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n  " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n  0, 0, 1 ]";
-
-       return s;
-}
-
-/**
- * Sets attributes of an element.
- */
-function setAttributes(element, attributes){
-       for (i in attributes)
-               element.setAttributeNS(null, i, attributes[i]);
-}
-
-/**
- * Handle mouse move event.
- */
-function handleMouseWheel(evt) {
-       if(evt.preventDefault)
-               evt.preventDefault();
-
-       evt.returnValue = false;
-
-       var svgDoc = evt.target.ownerDocument;
-
-       var delta;
-
-       if(evt.wheelDelta)
-               delta = evt.wheelDelta / 3600; // Chrome/Safari
-       else
-               delta = evt.detail / -90; // Mozilla
-
-       var z = 1 + delta; // Zoom factor: 0.9/1.1
-
-       var g = svgDoc.getElementById("viewport");
-
-       var p = getEventPoint(evt);
-
-       p = p.matrixTransform(g.getCTM().inverse());
-
-       // Compute new scale matrix in current mouse position
-       var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);
-
-        setCTM(g, g.getCTM().multiply(k));
-
-       stateTf = stateTf.multiply(k.inverse());
-}
-
-/**
- * Handle mouse move event.
- */
-function handleMouseMove(evt) {
-       if(evt.preventDefault)
-               evt.preventDefault();
-
-       evt.returnValue = false;
-
-       var svgDoc = evt.target.ownerDocument;
-
-       var g = svgDoc.getElementById("viewport");
-
-       if(state == 'pan') {
-               // Pan mode
-               var p = getEventPoint(evt).matrixTransform(stateTf);
-
-               setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));
-       } else if(state == 'move') {
-               // Move mode
-               var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());
-
-               setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));
-
-               stateOrigin = p;
-       }
-}
-
-/**
- * Handle click event.
- */
-function handleMouseDown(evt) {
-       if(evt.preventDefault)
-               evt.preventDefault();
-
-       evt.returnValue = false;
-
-       var svgDoc = evt.target.ownerDocument;
-
-       var g = svgDoc.getElementById("viewport");
-
-       if(true || evt.target.tagName == "svg") {
-               // Pan mode
-               state = 'pan';
-
-               stateTf = g.getCTM().inverse();
-
-               stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
-       } else {
-               // Move mode
-               state = 'move';
-
-               stateTarget = evt.target;
-
-               stateTf = g.getCTM().inverse();
-
-               stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
-       }
-}
-
-/**
- * Handle mouse button release event.
- */
-function handleMouseUp(evt) {
-       if(evt.preventDefault)
-               evt.preventDefault();
-
-       evt.returnValue = false;
-
-       var svgDoc = evt.target.ownerDocument;
-
-       if(state == 'pan' || state == 'move') {
-               // Quit pan mode
-               state = '';
-       }
-}
-
-]]></script>
-EOF
-}
-
-# Provides a map from fullname to shortname for cases where the
-# shortname is ambiguous.  The symlist has both the fullname and
-# shortname for all symbols, which is usually fine, but sometimes --
-# such as overloaded functions -- two different fullnames can map to
-# the same shortname.  In that case, we use the address of the
-# function to disambiguate the two.  This function fills in a map that
-# maps fullnames to modified shortnames in such cases.  If a fullname
-# is not present in the map, the 'normal' shortname provided by the
-# symlist is the appropriate one to use.
-sub FillFullnameToShortnameMap {
-  my $symbols = shift;
-  my $fullname_to_shortname_map = shift;
-  my $shortnames_seen_once = {};
-  my $shortnames_seen_more_than_once = {};
-
-  foreach my $symlist (values(%{$symbols})) {
-    # TODO(csilvers): deal with inlined symbols too.
-    my $shortname = $symlist->[0];
-    my $fullname = $symlist->[2];
-    if ($fullname !~ /<[0-9a-fA-F]+>$/) {  # fullname doesn't end in an address
-      next;       # the only collisions we care about are when addresses differ
-    }
-    if (defined($shortnames_seen_once->{$shortname}) &&
-        $shortnames_seen_once->{$shortname} ne $fullname) {
-      $shortnames_seen_more_than_once->{$shortname} = 1;
-    } else {
-      $shortnames_seen_once->{$shortname} = $fullname;
-    }
-  }
-
-  foreach my $symlist (values(%{$symbols})) {
-    my $shortname = $symlist->[0];
-    my $fullname = $symlist->[2];
-    # TODO(csilvers): take in a list of addresses we care about, and only
-    # store in the map if $symlist->[1] is in that list.  Saves space.
-    next if defined($fullname_to_shortname_map->{$fullname});
-    if (defined($shortnames_seen_more_than_once->{$shortname})) {
-      if ($fullname =~ /<0*([^>]*)>$/) {   # fullname has address at end of it
-        $fullname_to_shortname_map->{$fullname} = "$shortname\@$1";
-      }
-    }
-  }
-}
-
-# Return a small number that identifies the argument.
-# Multiple calls with the same argument will return the same number.
-# Calls with different arguments will return different numbers.
-sub ShortIdFor {
-  my $key = shift;
-  my $id = $main::uniqueid{$key};
-  if (!defined($id)) {
-    $id = keys(%main::uniqueid) + 1;
-    $main::uniqueid{$key} = $id;
-  }
-  return $id;
-}
-
-# Translate a stack of addresses into a stack of symbols
-sub TranslateStack {
-  my $symbols = shift;
-  my $fullname_to_shortname_map = shift;
-  my $k = shift;
-
-  my @addrs = split(/\n/, $k);
-  my @result = ();
-  for (my $i = 0; $i <= $#addrs; $i++) {
-    my $a = $addrs[$i];
-
-    # Skip large addresses since they sometimes show up as fake entries on RH9
-    if (length($a) > 8 && $a gt "7fffffffffffffff") {
-      next;
-    }
-
-    if ($main::opt_disasm || $main::opt_list) {
-      # We want just the address for the key
-      push(@result, $a);
-      next;
-    }
-
-    my $symlist = $symbols->{$a};
-    if (!defined($symlist)) {
-      $symlist = [$a, "", $a];
-    }
-
-    # We can have a sequence of symbols for a particular entry
-    # (more than one symbol in the case of inlining).  Callers
-    # come before callees in symlist, so walk backwards since
-    # the translated stack should contain callees before callers.
-    for (my $j = $#{$symlist}; $j >= 2; $j -= 3) {
-      my $func = $symlist->[$j-2];
-      my $fileline = $symlist->[$j-1];
-      my $fullfunc = $symlist->[$j];
-      if (defined($fullname_to_shortname_map->{$fullfunc})) {
-        $func = $fullname_to_shortname_map->{$fullfunc};
-      }
-      if ($j > 2) {
-        $func = "$func (inline)";
-      }
-
-      # Do not merge nodes corresponding to Callback::Run since that
-      # causes confusing cycles in dot display.  Instead, we synthesize
-      # a unique name for this frame per caller.
-      if ($func =~ m/Callback.*::Run$/) {
-        my $caller = ($i > 0) ? $addrs[$i-1] : 0;
-        $func = "Run#" . ShortIdFor($caller);
-      }
-
-      if ($main::opt_addresses) {
-        push(@result, "$a $func $fileline");
-      } elsif ($main::opt_lines) {
-        if ($func eq '??' && $fileline eq '??:0') {
-          push(@result, "$a");
-        } else {
-          push(@result, "$func $fileline");
-        }
-      } elsif ($main::opt_functions) {
-        if ($func eq '??') {
-          push(@result, "$a");
-        } else {
-          push(@result, $func);
-        }
-      } elsif ($main::opt_files) {
-        if ($fileline eq '??:0' || $fileline eq '') {
-          push(@result, "$a");
-        } else {
-          my $f = $fileline;
-          $f =~ s/:\d+$//;
-          push(@result, $f);
-        }
-      } else {
-        push(@result, $a);
-        last;  # Do not print inlined info
-      }
-    }
-  }
-
-  # print join(",", @addrs), " => ", join(",", @result), "\n";
-  return @result;
-}
-
-# Generate percent string for a number and a total
-sub Percent {
-  my $num = shift;
-  my $tot = shift;
-  if ($tot != 0) {
-    return sprintf("%.1f%%", $num * 100.0 / $tot);
-  } else {
-    return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf");
-  }
-}
-
-# Generate pretty-printed form of number
-sub Unparse {
-  my $num = shift;
-  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
-    if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
-      return sprintf("%d", $num);
-    } else {
-      if ($main::opt_show_bytes) {
-        return sprintf("%d", $num);
-      } else {
-        return sprintf("%.1f", $num / 1048576.0);
-      }
-    }
-  } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
-    return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds
-  } else {
-    return sprintf("%d", $num);
-  }
-}
-
-# Alternate pretty-printed form: 0 maps to "."
-sub UnparseAlt {
-  my $num = shift;
-  if ($num == 0) {
-    return ".";
-  } else {
-    return Unparse($num);
-  }
-}
-
-# Alternate pretty-printed form: 0 maps to ""
-sub HtmlPrintNumber {
-  my $num = shift;
-  if ($num == 0) {
-    return "";
-  } else {
-    return Unparse($num);
-  }
-}
-
-# Return output units
-sub Units {
-  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
-    if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
-      return "objects";
-    } else {
-      if ($main::opt_show_bytes) {
-        return "B";
-      } else {
-        return "MB";
-      }
-    }
-  } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
-    return "seconds";
-  } else {
-    return "samples";
-  }
-}
-
-##### Profile manipulation code #####
-
-# Generate flattened profile:
-# If count is charged to stack [a,b,c,d], in generated profile,
-# it will be charged to [a]
-sub FlatProfile {
-  my $profile = shift;
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    if ($#addrs >= 0) {
-      AddEntry($result, $addrs[0], $count);
-    }
-  }
-  return $result;
-}
-
-# Generate cumulative profile:
-# If count is charged to stack [a,b,c,d], in generated profile,
-# it will be charged to [a], [b], [c], [d]
-sub CumulativeProfile {
-  my $profile = shift;
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    foreach my $a (@addrs) {
-      AddEntry($result, $a, $count);
-    }
-  }
-  return $result;
-}
-
-# If the second-youngest PC on the stack is always the same, returns
-# that pc.  Otherwise, returns undef.
-sub IsSecondPcAlwaysTheSame {
-  my $profile = shift;
-
-  my $second_pc = undef;
-  foreach my $k (keys(%{$profile})) {
-    my @addrs = split(/\n/, $k);
-    if ($#addrs < 1) {
-      return undef;
-    }
-    if (not defined $second_pc) {
-      $second_pc = $addrs[1];
-    } else {
-      if ($second_pc ne $addrs[1]) {
-        return undef;
-      }
-    }
-  }
-  return $second_pc;
-}
-
-sub ExtractSymbolLocation {
-  my $symbols = shift;
-  my $address = shift;
-  # 'addr2line' outputs "??:0" for unknown locations; we do the
-  # same to be consistent.
-  my $location = "??:0:unknown";
-  if (exists $symbols->{$address}) {
-    my $file = $symbols->{$address}->[1];
-    if ($file eq "?") {
-      $file = "??:0"
-    }
-    $location = $file . ":" . $symbols->{$address}->[0];
-  }
-  return $location;
-}
-
-# Extracts a graph of calls.
-sub ExtractCalls {
-  my $symbols = shift;
-  my $profile = shift;
-
-  my $calls = {};
-  while( my ($stack_trace, $count) = each %$profile ) {
-    my @address = split(/\n/, $stack_trace);
-    my $destination = ExtractSymbolLocation($symbols, $address[0]);
-    AddEntry($calls, $destination, $count);
-    for (my $i = 1; $i <= $#address; $i++) {
-      my $source = ExtractSymbolLocation($symbols, $address[$i]);
-      my $call = "$source -> $destination";
-      AddEntry($calls, $call, $count);
-      $destination = $source;
-    }
-  }
-
-  return $calls;
-}
-
-sub RemoveUninterestingFrames {
-  my $symbols = shift;
-  my $profile = shift;
-
-  # List of function names to skip
-  my %skip = ();
-  my $skip_regexp = 'NOMATCH';
-  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
-    foreach my $name ('calloc',
-                      'cfree',
-                      'malloc',
-                      'free',
-                      'memalign',
-                      'posix_memalign',
-                      'aligned_alloc',
-                      'pvalloc',
-                      'valloc',
-                      'realloc',
-                      'mallocx', # jemalloc
-                      'rallocx', # jemalloc
-                      'xallocx', # jemalloc
-                      'dallocx', # jemalloc
-                      'sdallocx', # jemalloc
-                      'tc_calloc',
-                      'tc_cfree',
-                      'tc_malloc',
-                      'tc_free',
-                      'tc_memalign',
-                      'tc_posix_memalign',
-                      'tc_pvalloc',
-                      'tc_valloc',
-                      'tc_realloc',
-                      'tc_new',
-                      'tc_delete',
-                      'tc_newarray',
-                      'tc_deletearray',
-                      'tc_new_nothrow',
-                      'tc_newarray_nothrow',
-                      'do_malloc',
-                      '::do_malloc',   # new name -- got moved to an unnamed ns
-                      '::do_malloc_or_cpp_alloc',
-                      'DoSampledAllocation',
-                      'simple_alloc::allocate',
-                      '__malloc_alloc_template::allocate',
-                      '__builtin_delete',
-                      '__builtin_new',
-                      '__builtin_vec_delete',
-                      '__builtin_vec_new',
-                      'operator new',
-                      'operator new[]',
-                      # The entry to our memory-allocation routines on OS X
-                      'malloc_zone_malloc',
-                      'malloc_zone_calloc',
-                      'malloc_zone_valloc',
-                      'malloc_zone_realloc',
-                      'malloc_zone_memalign',
-                      'malloc_zone_free',
-                      # These mark the beginning/end of our custom sections
-                      '__start_google_malloc',
-                      '__stop_google_malloc',
-                      '__start_malloc_hook',
-                      '__stop_malloc_hook') {
-      $skip{$name} = 1;
-      $skip{"_" . $name} = 1;   # Mach (OS X) adds a _ prefix to everything
-    }
-    # TODO: Remove TCMalloc once everything has been
-    # moved into the tcmalloc:: namespace and we have flushed
-    # old code out of the system.
-    $skip_regexp = "TCMalloc|^tcmalloc::";
-  } elsif ($main::profile_type eq 'contention') {
-    foreach my $vname ('base::RecordLockProfileData',
-                       'base::SubmitMutexProfileData',
-                       'base::SubmitSpinLockProfileData',
-                       'Mutex::Unlock',
-                       'Mutex::UnlockSlow',
-                       'Mutex::ReaderUnlock',
-                       'MutexLock::~MutexLock',
-                       'SpinLock::Unlock',
-                       'SpinLock::SlowUnlock',
-                       'SpinLockHolder::~SpinLockHolder') {
-      $skip{$vname} = 1;
-    }
-  } elsif ($main::profile_type eq 'cpu') {
-    # Drop signal handlers used for CPU profile collection
-    # TODO(dpeng): this should not be necessary; it's taken
-    # care of by the general 2nd-pc mechanism below.
-    foreach my $name ('ProfileData::Add',           # historical
-                      'ProfileData::prof_handler',  # historical
-                      'CpuProfiler::prof_handler',
-                      '__FRAME_END__',
-                      '__pthread_sighandler',
-                      '__restore') {
-      $skip{$name} = 1;
-    }
-  } else {
-    # Nothing skipped for unknown types
-  }
-
-  if ($main::profile_type eq 'cpu') {
-    # If all the second-youngest program counters are the same,
-    # this STRONGLY suggests that it is an artifact of measurement,
-    # i.e., stack frames pushed by the CPU profiler signal handler.
-    # Hence, we delete them.
-    # (The topmost PC is read from the signal structure, not from
-    # the stack, so it does not get involved.)
-    while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) {
-      my $result = {};
-      my $func = '';
-      if (exists($symbols->{$second_pc})) {
-        $second_pc = $symbols->{$second_pc}->[0];
-      }
-      print STDERR "Removing $second_pc from all stack traces.\n";
-      foreach my $k (keys(%{$profile})) {
-        my $count = $profile->{$k};
-        my @addrs = split(/\n/, $k);
-        splice @addrs, 1, 1;
-        my $reduced_path = join("\n", @addrs);
-        AddEntry($result, $reduced_path, $count);
-      }
-      $profile = $result;
-    }
-  }
-
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    my @path = ();
-    foreach my $a (@addrs) {
-      if (exists($symbols->{$a})) {
-        my $func = $symbols->{$a}->[0];
-        if ($skip{$func} || ($func =~ m/$skip_regexp/)) {
-          # Throw away the portion of the backtrace seen so far, under the
-          # assumption that previous frames were for functions internal to the
-          # allocator.
-          @path = ();
-          next;
-        }
-      }
-      push(@path, $a);
-    }
-    my $reduced_path = join("\n", @path);
-    AddEntry($result, $reduced_path, $count);
-  }
-  return $result;
-}
-
-# Reduce profile to granularity given by user
-sub ReduceProfile {
-  my $symbols = shift;
-  my $profile = shift;
-  my $result = {};
-  my $fullname_to_shortname_map = {};
-  FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
-    my @path = ();
-    my %seen = ();
-    $seen{''} = 1;      # So that empty keys are skipped
-    foreach my $e (@translated) {
-      # To avoid double-counting due to recursion, skip a stack-trace
-      # entry if it has already been seen
-      if (!$seen{$e}) {
-        $seen{$e} = 1;
-        push(@path, $e);
-      }
-    }
-    my $reduced_path = join("\n", @path);
-    AddEntry($result, $reduced_path, $count);
-  }
-  return $result;
-}
-
-# Does the specified symbol array match the regexp?
-sub SymbolMatches {
-  my $sym = shift;
-  my $re = shift;
-  if (defined($sym)) {
-    for (my $i = 0; $i < $#{$sym}; $i += 3) {
-      if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) {
-        return 1;
-      }
-    }
-  }
-  return 0;
-}
-
-# Focus only on paths involving specified regexps
-sub FocusProfile {
-  my $symbols = shift;
-  my $profile = shift;
-  my $focus = shift;
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    foreach my $a (@addrs) {
-      # Reply if it matches either the address/shortname/fileline
-      if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) {
-        AddEntry($result, $k, $count);
-        last;
-      }
-    }
-  }
-  return $result;
-}
-
-# Focus only on paths not involving specified regexps
-sub IgnoreProfile {
-  my $symbols = shift;
-  my $profile = shift;
-  my $ignore = shift;
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    my $matched = 0;
-    foreach my $a (@addrs) {
-      # Reply if it matches either the address/shortname/fileline
-      if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) {
-        $matched = 1;
-        last;
-      }
-    }
-    if (!$matched) {
-      AddEntry($result, $k, $count);
-    }
-  }
-  return $result;
-}
-
-# Get total count in profile
-sub TotalProfile {
-  my $profile = shift;
-  my $result = 0;
-  foreach my $k (keys(%{$profile})) {
-    $result += $profile->{$k};
-  }
-  return $result;
-}
-
-# Add A to B
-sub AddProfile {
-  my $A = shift;
-  my $B = shift;
-
-  my $R = {};
-  # add all keys in A
-  foreach my $k (keys(%{$A})) {
-    my $v = $A->{$k};
-    AddEntry($R, $k, $v);
-  }
-  # add all keys in B
-  foreach my $k (keys(%{$B})) {
-    my $v = $B->{$k};
-    AddEntry($R, $k, $v);
-  }
-  return $R;
-}
-
-# Merges symbol maps
-sub MergeSymbols {
-  my $A = shift;
-  my $B = shift;
-
-  my $R = {};
-  foreach my $k (keys(%{$A})) {
-    $R->{$k} = $A->{$k};
-  }
-  if (defined($B)) {
-    foreach my $k (keys(%{$B})) {
-      $R->{$k} = $B->{$k};
-    }
-  }
-  return $R;
-}
-
-
-# Add A to B
-sub AddPcs {
-  my $A = shift;
-  my $B = shift;
-
-  my $R = {};
-  # add all keys in A
-  foreach my $k (keys(%{$A})) {
-    $R->{$k} = 1
-  }
-  # add all keys in B
-  foreach my $k (keys(%{$B})) {
-    $R->{$k} = 1
-  }
-  return $R;
-}
-
-# Subtract B from A
-sub SubtractProfile {
-  my $A = shift;
-  my $B = shift;
-
-  my $R = {};
-  foreach my $k (keys(%{$A})) {
-    my $v = $A->{$k} - GetEntry($B, $k);
-    if ($v < 0 && $main::opt_drop_negative) {
-      $v = 0;
-    }
-    AddEntry($R, $k, $v);
-  }
-  if (!$main::opt_drop_negative) {
-    # Take care of when subtracted profile has more entries
-    foreach my $k (keys(%{$B})) {
-      if (!exists($A->{$k})) {
-        AddEntry($R, $k, 0 - $B->{$k});
-      }
-    }
-  }
-  return $R;
-}
-
-# Get entry from profile; zero if not present
-sub GetEntry {
-  my $profile = shift;
-  my $k = shift;
-  if (exists($profile->{$k})) {
-    return $profile->{$k};
-  } else {
-    return 0;
-  }
-}
-
-# Add entry to specified profile
-sub AddEntry {
-  my $profile = shift;
-  my $k = shift;
-  my $n = shift;
-  if (!exists($profile->{$k})) {
-    $profile->{$k} = 0;
-  }
-  $profile->{$k} += $n;
-}
-
-# Add a stack of entries to specified profile, and add them to the $pcs
-# list.
-sub AddEntries {
-  my $profile = shift;
-  my $pcs = shift;
-  my $stack = shift;
-  my $count = shift;
-  my @k = ();
-
-  foreach my $e (split(/\s+/, $stack)) {
-    my $pc = HexExtend($e);
-    $pcs->{$pc} = 1;
-    push @k, $pc;
-  }
-  AddEntry($profile, (join "\n", @k), $count);
-}
-
-##### Code to profile a server dynamically #####
-
-sub CheckSymbolPage {
-  my $url = SymbolPageURL();
-  my $command = ShellEscape(@URL_FETCHER, $url);
-  open(SYMBOL, "$command |") or error($command);
-  my $line = <SYMBOL>;
-  $line =~ s/\r//g;         # turn windows-looking lines into unix-looking lines
-  close(SYMBOL);
-  unless (defined($line)) {
-    error("$url doesn't exist\n");
-  }
-
-  if ($line =~ /^num_symbols:\s+(\d+)$/) {
-    if ($1 == 0) {
-      error("Stripped binary. No symbols available.\n");
-    }
-  } else {
-    error("Failed to get the number of symbols from $url\n");
-  }
-}
-
-sub IsProfileURL {
-  my $profile_name = shift;
-  if (-f $profile_name) {
-    printf STDERR "Using local file $profile_name.\n";
-    return 0;
-  }
-  return 1;
-}
-
-sub ParseProfileURL {
-  my $profile_name = shift;
-
-  if (!defined($profile_name) || $profile_name eq "") {
-    return ();
-  }
-
-  # Split profile URL - matches all non-empty strings, so no test.
-  $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,;
-
-  my $proto = $1 || "http://";
-  my $hostport = $2;
-  my $prefix = $3;
-  my $profile = $4 || "/";
-
-  my $host = $hostport;
-  $host =~ s/:.*//;
-
-  my $baseurl = "$proto$hostport$prefix";
-  return ($host, $baseurl, $profile);
-}
-
-# We fetch symbols from the first profile argument.
-sub SymbolPageURL {
-  my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
-  return "$baseURL$SYMBOL_PAGE";
-}
-
-sub FetchProgramName() {
-  my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
-  my $url = "$baseURL$PROGRAM_NAME_PAGE";
-  my $command_line = ShellEscape(@URL_FETCHER, $url);
-  open(CMDLINE, "$command_line |") or error($command_line);
-  my $cmdline = <CMDLINE>;
-  $cmdline =~ s/\r//g;   # turn windows-looking lines into unix-looking lines
-  close(CMDLINE);
-  error("Failed to get program name from $url\n") unless defined($cmdline);
-  $cmdline =~ s/\x00.+//;  # Remove argv[1] and latters.
-  $cmdline =~ s!\n!!g;  # Remove LFs.
-  return $cmdline;
-}
-
-# Gee, curl's -L (--location) option isn't reliable at least
-# with its 7.12.3 version.  Curl will forget to post data if
-# there is a redirection.  This function is a workaround for
-# curl.  Redirection happens on borg hosts.
-sub ResolveRedirectionForCurl {
-  my $url = shift;
-  my $command_line = ShellEscape(@URL_FETCHER, "--head", $url);
-  open(CMDLINE, "$command_line |") or error($command_line);
-  while (<CMDLINE>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    if (/^Location: (.*)/) {
-      $url = $1;
-    }
-  }
-  close(CMDLINE);
-  return $url;
-}
-
-# Add a timeout flat to URL_FETCHER.  Returns a new list.
-sub AddFetchTimeout {
-  my $timeout = shift;
-  my @fetcher = shift;
-  if (defined($timeout)) {
-    if (join(" ", @fetcher) =~ m/\bcurl -s/) {
-      push(@fetcher, "--max-time", sprintf("%d", $timeout));
-    } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) {
-      push(@fetcher, sprintf("--deadline=%d", $timeout));
-    }
-  }
-  return @fetcher;
-}
-
-# Reads a symbol map from the file handle name given as $1, returning
-# the resulting symbol map.  Also processes variables relating to symbols.
-# Currently, the only variable processed is 'binary=<value>' which updates
-# $main::prog to have the correct program name.
-sub ReadSymbols {
-  my $in = shift;
-  my $map = {};
-  while (<$in>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    # Removes all the leading zeroes from the symbols, see comment below.
-    if (m/^0x0*([0-9a-f]+)\s+(.+)/) {
-      $map->{$1} = $2;
-    } elsif (m/^---/) {
-      last;
-    } elsif (m/^([a-z][^=]*)=(.*)$/ ) {
-      my ($variable, $value) = ($1, $2);
-      for ($variable, $value) {
-        s/^\s+//;
-        s/\s+$//;
-      }
-      if ($variable eq "binary") {
-        if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) {
-          printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n",
-                         $main::prog, $value);
-        }
-        $main::prog = $value;
-      } else {
-        printf STDERR ("Ignoring unknown variable in symbols list: " .
-            "'%s' = '%s'\n", $variable, $value);
-      }
-    }
-  }
-  return $map;
-}
-
-# Fetches and processes symbols to prepare them for use in the profile output
-# code.  If the optional 'symbol_map' arg is not given, fetches symbols from
-# $SYMBOL_PAGE for all PC values found in profile.  Otherwise, the raw symbols
-# are assumed to have already been fetched into 'symbol_map' and are simply
-# extracted and processed.
-sub FetchSymbols {
-  my $pcset = shift;
-  my $symbol_map = shift;
-
-  my %seen = ();
-  my @pcs = grep { !$seen{$_}++ } keys(%$pcset);  # uniq
-
-  if (!defined($symbol_map)) {
-    my $post_data = join("+", sort((map {"0x" . "$_"} @pcs)));
-
-    open(POSTFILE, ">$main::tmpfile_sym");
-    print POSTFILE $post_data;
-    close(POSTFILE);
-
-    my $url = SymbolPageURL();
-
-    my $command_line;
-    if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) {
-      $url = ResolveRedirectionForCurl($url);
-      $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym",
-                                  $url);
-    } else {
-      $command_line = (ShellEscape(@URL_FETCHER, "--post", $url)
-                       . " < " . ShellEscape($main::tmpfile_sym));
-    }
-    # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols.
-    my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"});
-    open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line);
-    $symbol_map = ReadSymbols(*SYMBOL{IO});
-    close(SYMBOL);
-  }
-
-  my $symbols = {};
-  foreach my $pc (@pcs) {
-    my $fullname;
-    # For 64 bits binaries, symbols are extracted with 8 leading zeroes.
-    # Then /symbol reads the long symbols in as uint64, and outputs
-    # the result with a "0x%08llx" format which get rid of the zeroes.
-    # By removing all the leading zeroes in both $pc and the symbols from
-    # /symbol, the symbols match and are retrievable from the map.
-    my $shortpc = $pc;
-    $shortpc =~ s/^0*//;
-    # Each line may have a list of names, which includes the function
-    # and also other functions it has inlined.  They are separated (in
-    # PrintSymbolizedProfile), by --, which is illegal in function names.
-    my $fullnames;
-    if (defined($symbol_map->{$shortpc})) {
-      $fullnames = $symbol_map->{$shortpc};
-    } else {
-      $fullnames = "0x" . $pc;  # Just use addresses
-    }
-    my $sym = [];
-    $symbols->{$pc} = $sym;
-    foreach my $fullname (split("--", $fullnames)) {
-      my $name = ShortFunctionName($fullname);
-      push(@{$sym}, $name, "?", $fullname);
-    }
-  }
-  return $symbols;
-}
-
-sub BaseName {
-  my $file_name = shift;
-  $file_name =~ s!^.*/!!;  # Remove directory name
-  return $file_name;
-}
-
-sub MakeProfileBaseName {
-  my ($binary_name, $profile_name) = @_;
-  my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
-  my $binary_shortname = BaseName($binary_name);
-  return sprintf("%s.%s.%s",
-                 $binary_shortname, $main::op_time, $host);
-}
-
-sub FetchDynamicProfile {
-  my $binary_name = shift;
-  my $profile_name = shift;
-  my $fetch_name_only = shift;
-  my $encourage_patience = shift;
-
-  if (!IsProfileURL($profile_name)) {
-    return $profile_name;
-  } else {
-    my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
-    if ($path eq "" || $path eq "/") {
-      # Missing type specifier defaults to cpu-profile
-      $path = $PROFILE_PAGE;
-    }
-
-    my $profile_file = MakeProfileBaseName($binary_name, $profile_name);
-
-    my $url = "$baseURL$path";
-    my $fetch_timeout = undef;
-    if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) {
-      if ($path =~ m/[?]/) {
-        $url .= "&";
-      } else {
-        $url .= "?";
-      }
-      $url .= sprintf("seconds=%d", $main::opt_seconds);
-      $fetch_timeout = $main::opt_seconds * 1.01 + 60;
-    } else {
-      # For non-CPU profiles, we add a type-extension to
-      # the target profile file name.
-      my $suffix = $path;
-      $suffix =~ s,/,.,g;
-      $profile_file .= $suffix;
-    }
-
-    my $profile_dir = $ENV{"PPROF_TMPDIR"} || ($ENV{HOME} . "/pprof");
-    if (! -d $profile_dir) {
-      mkdir($profile_dir)
-          || die("Unable to create profile directory $profile_dir: $!\n");
-    }
-    my $tmp_profile = "$profile_dir/.tmp.$profile_file";
-    my $real_profile = "$profile_dir/$profile_file";
-
-    if ($fetch_name_only > 0) {
-      return $real_profile;
-    }
-
-    my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER);
-    my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile);
-    if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){
-      print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n  ${real_profile}\n";
-      if ($encourage_patience) {
-        print STDERR "Be patient...\n";
-      }
-    } else {
-      print STDERR "Fetching $path profile from $url to\n  ${real_profile}\n";
-    }
-
-    (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n");
-    (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n");
-    print STDERR "Wrote profile to $real_profile\n";
-    $main::collected_profile = $real_profile;
-    return $main::collected_profile;
-  }
-}
-
-# Collect profiles in parallel
-sub FetchDynamicProfiles {
-  my $items = scalar(@main::pfile_args);
-  my $levels = log($items) / log(2);
-
-  if ($items == 1) {
-    $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1);
-  } else {
-    # math rounding issues
-    if ((2 ** $levels) < $items) {
-     $levels++;
-    }
-    my $count = scalar(@main::pfile_args);
-    for (my $i = 0; $i < $count; $i++) {
-      $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0);
-    }
-    print STDERR "Fetching $count profiles, Be patient...\n";
-    FetchDynamicProfilesRecurse($levels, 0, 0);
-    $main::collected_profile = join(" \\\n    ", @main::profile_files);
-  }
-}
-
-# Recursively fork a process to get enough processes
-# collecting profiles
-sub FetchDynamicProfilesRecurse {
-  my $maxlevel = shift;
-  my $level = shift;
-  my $position = shift;
-
-  if (my $pid = fork()) {
-    $position = 0 | ($position << 1);
-    TryCollectProfile($maxlevel, $level, $position);
-    wait;
-  } else {
-    $position = 1 | ($position << 1);
-    TryCollectProfile($maxlevel, $level, $position);
-    cleanup();
-    exit(0);
-  }
-}
-
-# Collect a single profile
-sub TryCollectProfile {
-  my $maxlevel = shift;
-  my $level = shift;
-  my $position = shift;
-
-  if ($level >= ($maxlevel - 1)) {
-    if ($position < scalar(@main::pfile_args)) {
-      FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0);
-    }
-  } else {
-    FetchDynamicProfilesRecurse($maxlevel, $level+1, $position);
-  }
-}
-
-##### Parsing code #####
-
-# Provide a small streaming-read module to handle very large
-# cpu-profile files.  Stream in chunks along a sliding window.
-# Provides an interface to get one 'slot', correctly handling
-# endian-ness differences.  A slot is one 32-bit or 64-bit word
-# (depending on the input profile).  We tell endianness and bit-size
-# for the profile by looking at the first 8 bytes: in cpu profiles,
-# the second slot is always 3 (we'll accept anything that's not 0).
-BEGIN {
-  package CpuProfileStream;
-
-  sub new {
-    my ($class, $file, $fname) = @_;
-    my $self = { file        => $file,
-                 base        => 0,
-                 stride      => 512 * 1024,   # must be a multiple of bitsize/8
-                 slots       => [],
-                 unpack_code => "",           # N for big-endian, V for little
-                 perl_is_64bit => 1,          # matters if profile is 64-bit
-    };
-    bless $self, $class;
-    # Let unittests adjust the stride
-    if ($main::opt_test_stride > 0) {
-      $self->{stride} = $main::opt_test_stride;
-    }
-    # Read the first two slots to figure out bitsize and endianness.
-    my $slots = $self->{slots};
-    my $str;
-    read($self->{file}, $str, 8);
-    # Set the global $address_length based on what we see here.
-    # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars).
-    $address_length = ($str eq (chr(0)x8)) ? 16 : 8;
-    if ($address_length == 8) {
-      if (substr($str, 6, 2) eq chr(0)x2) {
-        $self->{unpack_code} = 'V';  # Little-endian.
-      } elsif (substr($str, 4, 2) eq chr(0)x2) {
-        $self->{unpack_code} = 'N';  # Big-endian
-      } else {
-        ::error("$fname: header size >= 2**16\n");
-      }
-      @$slots = unpack($self->{unpack_code} . "*", $str);
-    } else {
-      # If we're a 64-bit profile, check if we're a 64-bit-capable
-      # perl.  Otherwise, each slot will be represented as a float
-      # instead of an int64, losing precision and making all the
-      # 64-bit addresses wrong.  We won't complain yet, but will
-      # later if we ever see a value that doesn't fit in 32 bits.
-      my $has_q = 0;
-      eval { $has_q = pack("Q", "1") ? 1 : 1; };
-      if (!$has_q) {
-        $self->{perl_is_64bit} = 0;
-      }
-      read($self->{file}, $str, 8);
-      if (substr($str, 4, 4) eq chr(0)x4) {
-        # We'd love to use 'Q', but it's a) not universal, b) not endian-proof.
-        $self->{unpack_code} = 'V';  # Little-endian.
-      } elsif (substr($str, 0, 4) eq chr(0)x4) {
-        $self->{unpack_code} = 'N';  # Big-endian
-      } else {
-        ::error("$fname: header size >= 2**32\n");
-      }
-      my @pair = unpack($self->{unpack_code} . "*", $str);
-      # Since we know one of the pair is 0, it's fine to just add them.
-      @$slots = (0, $pair[0] + $pair[1]);
-    }
-    return $self;
-  }
-
-  # Load more data when we access slots->get(X) which is not yet in memory.
-  sub overflow {
-    my ($self) = @_;
-    my $slots = $self->{slots};
-    $self->{base} += $#$slots + 1;   # skip over data we're replacing
-    my $str;
-    read($self->{file}, $str, $self->{stride});
-    if ($address_length == 8) {      # the 32-bit case
-      # This is the easy case: unpack provides 32-bit unpacking primitives.
-      @$slots = unpack($self->{unpack_code} . "*", $str);
-    } else {
-      # We need to unpack 32 bits at a time and combine.
-      my @b32_values = unpack($self->{unpack_code} . "*", $str);
-      my @b64_values = ();
-      for (my $i = 0; $i < $#b32_values; $i += 2) {
-        # TODO(csilvers): if this is a 32-bit perl, the math below
-        #    could end up in a too-large int, which perl will promote
-        #    to a double, losing necessary precision.  Deal with that.
-        #    Right now, we just die.
-        my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]);
-        if ($self->{unpack_code} eq 'N') {    # big-endian
-          ($lo, $hi) = ($hi, $lo);
-        }
-        my $value = $lo + $hi * (2**32);
-        if (!$self->{perl_is_64bit} &&   # check value is exactly represented
-            (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) {
-          ::error("Need a 64-bit perl to process this 64-bit profile.\n");
-        }
-        push(@b64_values, $value);
-      }
-      @$slots = @b64_values;
-    }
-  }
-
-  # Access the i-th long in the file (logically), or -1 at EOF.
-  sub get {
-    my ($self, $idx) = @_;
-    my $slots = $self->{slots};
-    while ($#$slots >= 0) {
-      if ($idx < $self->{base}) {
-        # The only time we expect a reference to $slots[$i - something]
-        # after referencing $slots[$i] is reading the very first header.
-        # Since $stride > |header|, that shouldn't cause any lookback
-        # errors.  And everything after the header is sequential.
-        print STDERR "Unexpected look-back reading CPU profile";
-        return -1;   # shrug, don't know what better to return
-      } elsif ($idx > $self->{base} + $#$slots) {
-        $self->overflow();
-      } else {
-        return $slots->[$idx - $self->{base}];
-      }
-    }
-    # If we get here, $slots is [], which means we've reached EOF
-    return -1;  # unique since slots is supposed to hold unsigned numbers
-  }
-}
-
-# Reads the top, 'header' section of a profile, and returns the last
-# line of the header, commonly called a 'header line'.  The header
-# section of a profile consists of zero or more 'command' lines that
-# are instructions to pprof, which pprof executes when reading the
-# header.  All 'command' lines start with a %.  After the command
-# lines is the 'header line', which is a profile-specific line that
-# indicates what type of profile it is, and perhaps other global
-# information about the profile.  For instance, here's a header line
-# for a heap profile:
-#   heap profile:     53:    38236 [  5525:  1284029] @ heapprofile
-# For historical reasons, the CPU profile does not contain a text-
-# readable header line.  If the profile looks like a CPU profile,
-# this function returns "".  If no header line could be found, this
-# function returns undef.
-#
-# The following commands are recognized:
-#   %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:'
-#
-# The input file should be in binmode.
-sub ReadProfileHeader {
-  local *PROFILE = shift;
-  my $firstchar = "";
-  my $line = "";
-  read(PROFILE, $firstchar, 1);
-  seek(PROFILE, -1, 1);                    # unread the firstchar
-  if ($firstchar !~ /[[:print:]]/) {       # is not a text character
-    return "";
-  }
-  while (defined($line = <PROFILE>)) {
-    $line =~ s/\r//g;   # turn windows-looking lines into unix-looking lines
-    if ($line =~ /^%warn\s+(.*)/) {        # 'warn' command
-      # Note this matches both '%warn blah\n' and '%warn\n'.
-      print STDERR "WARNING: $1\n";        # print the rest of the line
-    } elsif ($line =~ /^%/) {
-      print STDERR "Ignoring unknown command from profile header: $line";
-    } else {
-      # End of commands, must be the header line.
-      return $line;
-    }
-  }
-  return undef;     # got to EOF without seeing a header line
-}
-
-sub IsSymbolizedProfileFile {
-  my $file_name = shift;
-  if (!(-e $file_name) || !(-r $file_name)) {
-    return 0;
-  }
-  # Check if the file contains a symbol-section marker.
-  open(TFILE, "<$file_name");
-  binmode TFILE;
-  my $firstline = ReadProfileHeader(*TFILE);
-  close(TFILE);
-  if (!$firstline) {
-    return 0;
-  }
-  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $symbol_marker = $&;
-  return $firstline =~ /^--- *$symbol_marker/;
-}
-
-# Parse profile generated by common/profiler.cc and return a reference
-# to a map:
-#      $result->{version}     Version number of profile file
-#      $result->{period}      Sampling period (in microseconds)
-#      $result->{profile}     Profile object
-#      $result->{threads}     Map of thread IDs to profile objects
-#      $result->{map}         Memory map info from profile
-#      $result->{pcs}         Hash of all PC values seen, key is hex address
-sub ReadProfile {
-  my $prog = shift;
-  my $fname = shift;
-  my $result;            # return value
-
-  $CONTENTION_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $contention_marker = $&;
-  $GROWTH_PAGE  =~ m,[^/]+$,;    # matches everything after the last slash
-  my $growth_marker = $&;
-  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $symbol_marker = $&;
-  $PROFILE_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $profile_marker = $&;
-
-  # Look at first line to see if it is a heap or a CPU profile.
-  # CPU profile may start with no header at all, and just binary data
-  # (starting with \0\0\0\0) -- in that case, don't try to read the
-  # whole firstline, since it may be gigabytes(!) of data.
-  open(PROFILE, "<$fname") || error("$fname: $!\n");
-  binmode PROFILE;      # New perls do UTF-8 processing
-  my $header = ReadProfileHeader(*PROFILE);
-  if (!defined($header)) {   # means "at EOF"
-    error("Profile is empty.\n");
-  }
-
-  my $symbols;
-  if ($header =~ m/^--- *$symbol_marker/o) {
-    # Verify that the user asked for a symbolized profile
-    if (!$main::use_symbolized_profile) {
-      # we have both a binary and symbolized profiles, abort
-      error("FATAL ERROR: Symbolized profile\n   $fname\ncannot be used with " .
-            "a binary arg. Try again without passing\n   $prog\n");
-    }
-    # Read the symbol section of the symbolized profile file.
-    $symbols = ReadSymbols(*PROFILE{IO});
-    # Read the next line to get the header for the remaining profile.
-    $header = ReadProfileHeader(*PROFILE) || "";
-  }
-
-  $main::profile_type = '';
-  if ($header =~ m/^heap profile:.*$growth_marker/o) {
-    $main::profile_type = 'growth';
-    $result =  ReadHeapProfile($prog, *PROFILE, $header);
-  } elsif ($header =~ m/^heap profile:/) {
-    $main::profile_type = 'heap';
-    $result =  ReadHeapProfile($prog, *PROFILE, $header);
-  } elsif ($header =~ m/^heap/) {
-    $main::profile_type = 'heap';
-    $result = ReadThreadedHeapProfile($prog, $fname, $header);
-  } elsif ($header =~ m/^--- *$contention_marker/o) {
-    $main::profile_type = 'contention';
-    $result = ReadSynchProfile($prog, *PROFILE);
-  } elsif ($header =~ m/^--- *Stacks:/) {
-    print STDERR
-      "Old format contention profile: mistakenly reports " .
-      "condition variable signals as lock contentions.\n";
-    $main::profile_type = 'contention';
-    $result = ReadSynchProfile($prog, *PROFILE);
-  } elsif ($header =~ m/^--- *$profile_marker/) {
-    # the binary cpu profile data starts immediately after this line
-    $main::profile_type = 'cpu';
-    $result = ReadCPUProfile($prog, $fname, *PROFILE);
-  } else {
-    if (defined($symbols)) {
-      # a symbolized profile contains a format we don't recognize, bail out
-      error("$fname: Cannot recognize profile section after symbols.\n");
-    }
-    # no ascii header present -- must be a CPU profile
-    $main::profile_type = 'cpu';
-    $result = ReadCPUProfile($prog, $fname, *PROFILE);
-  }
-
-  close(PROFILE);
-
-  # if we got symbols along with the profile, return those as well
-  if (defined($symbols)) {
-    $result->{symbols} = $symbols;
-  }
-
-  return $result;
-}
-
-# Subtract one from caller pc so we map back to call instr.
-# However, don't do this if we're reading a symbolized profile
-# file, in which case the subtract-one was done when the file
-# was written.
-#
-# We apply the same logic to all readers, though ReadCPUProfile uses an
-# independent implementation.
-sub FixCallerAddresses {
-  my $stack = shift;
-  if ($main::use_symbolized_profile) {
-    return $stack;
-  } else {
-    $stack =~ /(\s)/;
-    my $delimiter = $1;
-    my @addrs = split(' ', $stack);
-    my @fixedaddrs;
-    $#fixedaddrs = $#addrs;
-    if ($#addrs >= 0) {
-      $fixedaddrs[0] = $addrs[0];
-    }
-    for (my $i = 1; $i <= $#addrs; $i++) {
-      $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1");
-    }
-    return join $delimiter, @fixedaddrs;
-  }
-}
-
-# CPU profile reader
-sub ReadCPUProfile {
-  my $prog = shift;
-  my $fname = shift;       # just used for logging
-  local *PROFILE = shift;
-  my $version;
-  my $period;
-  my $i;
-  my $profile = {};
-  my $pcs = {};
-
-  # Parse string into array of slots.
-  my $slots = CpuProfileStream->new(*PROFILE, $fname);
-
-  # Read header.  The current header version is a 5-element structure
-  # containing:
-  #   0: header count (always 0)
-  #   1: header "words" (after this one: 3)
-  #   2: format version (0)
-  #   3: sampling period (usec)
-  #   4: unused padding (always 0)
-  if ($slots->get(0) != 0 ) {
-    error("$fname: not a profile file, or old format profile file\n");
-  }
-  $i = 2 + $slots->get(1);
-  $version = $slots->get(2);
-  $period = $slots->get(3);
-  # Do some sanity checking on these header values.
-  if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) {
-    error("$fname: not a profile file, or corrupted profile file\n");
-  }
-
-  # Parse profile
-  while ($slots->get($i) != -1) {
-    my $n = $slots->get($i++);
-    my $d = $slots->get($i++);
-    if ($d > (2**16)) {  # TODO(csilvers): what's a reasonable max-stack-depth?
-      my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8));
-      print STDERR "At index $i (address $addr):\n";
-      error("$fname: stack trace depth >= 2**32\n");
-    }
-    if ($slots->get($i) == 0) {
-      # End of profile data marker
-      $i += $d;
-      last;
-    }
-
-    # Make key out of the stack entries
-    my @k = ();
-    for (my $j = 0; $j < $d; $j++) {
-      my $pc = $slots->get($i+$j);
-      # Subtract one from caller pc so we map back to call instr.
-      # However, don't do this if we're reading a symbolized profile
-      # file, in which case the subtract-one was done when the file
-      # was written.
-      if ($j > 0 && !$main::use_symbolized_profile) {
-        $pc--;
-      }
-      $pc = sprintf("%0*x", $address_length, $pc);
-      $pcs->{$pc} = 1;
-      push @k, $pc;
-    }
-
-    AddEntry($profile, (join "\n", @k), $n);
-    $i += $d;
-  }
-
-  # Parse map
-  my $map = '';
-  seek(PROFILE, $i * 4, 0);
-  read(PROFILE, $map, (stat PROFILE)[7]);
-
-  my $r = {};
-  $r->{version} = $version;
-  $r->{period} = $period;
-  $r->{profile} = $profile;
-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
-  $r->{pcs} = $pcs;
-
-  return $r;
-}
-
-sub HeapProfileIndex {
-  my $index = 1;
-  if ($main::opt_inuse_space) {
-    $index = 1;
-  } elsif ($main::opt_inuse_objects) {
-    $index = 0;
-  } elsif ($main::opt_alloc_space) {
-    $index = 3;
-  } elsif ($main::opt_alloc_objects) {
-    $index = 2;
-  }
-  return $index;
-}
-
-sub ReadMappedLibraries {
-  my $fh = shift;
-  my $map = "";
-  # Read the /proc/self/maps data
-  while (<$fh>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    $map .= $_;
-  }
-  return $map;
-}
-
-sub ReadMemoryMap {
-  my $fh = shift;
-  my $map = "";
-  # Read /proc/self/maps data as formatted by DumpAddressMap()
-  my $buildvar = "";
-  while (<PROFILE>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    # Parse "build=<dir>" specification if supplied
-    if (m/^\s*build=(.*)\n/) {
-      $buildvar = $1;
-    }
-
-    # Expand "$build" variable if available
-    $_ =~ s/\$build\b/$buildvar/g;
-
-    $map .= $_;
-  }
-  return $map;
-}
-
-sub AdjustSamples {
-  my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_;
-  if ($sample_adjustment) {
-    if ($sampling_algorithm == 2) {
-      # Remote-heap version 2
-      # The sampling frequency is the rate of a Poisson process.
-      # This means that the probability of sampling an allocation of
-      # size X with sampling rate Y is 1 - exp(-X/Y)
-      if ($n1 != 0) {
-        my $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
-        my $scale_factor = 1/(1 - exp(-$ratio));
-        $n1 *= $scale_factor;
-        $s1 *= $scale_factor;
-      }
-      if ($n2 != 0) {
-        my $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
-        my $scale_factor = 1/(1 - exp(-$ratio));
-        $n2 *= $scale_factor;
-        $s2 *= $scale_factor;
-      }
-    } else {
-      # Remote-heap version 1
-      my $ratio;
-      $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
-      if ($ratio < 1) {
-        $n1 /= $ratio;
-        $s1 /= $ratio;
-      }
-      $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
-      if ($ratio < 1) {
-        $n2 /= $ratio;
-        $s2 /= $ratio;
-      }
-    }
-  }
-  return ($n1, $s1, $n2, $s2);
-}
-
-sub ReadHeapProfile {
-  my $prog = shift;
-  local *PROFILE = shift;
-  my $header = shift;
-
-  my $index = HeapProfileIndex();
-
-  # Find the type of this profile.  The header line looks like:
-  #    heap profile:   1246:  8800744 [  1246:  8800744] @ <heap-url>/266053
-  # There are two pairs <count: size>, the first inuse objects/space, and the
-  # second allocated objects/space.  This is followed optionally by a profile
-  # type, and if that is present, optionally by a sampling frequency.
-  # For remote heap profiles (v1):
-  # The interpretation of the sampling frequency is that the profiler, for
-  # each sample, calculates a uniformly distributed random integer less than
-  # the given value, and records the next sample after that many bytes have
-  # been allocated.  Therefore, the expected sample interval is half of the
-  # given frequency.  By default, if not specified, the expected sample
-  # interval is 128KB.  Only remote-heap-page profiles are adjusted for
-  # sample size.
-  # For remote heap profiles (v2):
-  # The sampling frequency is the rate of a Poisson process. This means that
-  # the probability of sampling an allocation of size X with sampling rate Y
-  # is 1 - exp(-X/Y)
-  # For version 2, a typical header line might look like this:
-  # heap profile:   1922: 127792360 [  1922: 127792360] @ <heap-url>_v2/524288
-  # the trailing number (524288) is the sampling rate. (Version 1 showed
-  # double the 'rate' here)
-  my $sampling_algorithm = 0;
-  my $sample_adjustment = 0;
-  chomp($header);
-  my $type = "unknown";
-  if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") {
-    if (defined($6) && ($6 ne '')) {
-      $type = $6;
-      my $sample_period = $8;
-      # $type is "heapprofile" for profiles generated by the
-      # heap-profiler, and either "heap" or "heap_v2" for profiles
-      # generated by sampling directly within tcmalloc.  It can also
-      # be "growth" for heap-growth profiles.  The first is typically
-      # found for profiles generated locally, and the others for
-      # remote profiles.
-      if (($type eq "heapprofile") || ($type !~ /heap/) ) {
-        # No need to adjust for the sampling rate with heap-profiler-derived data
-        $sampling_algorithm = 0;
-      } elsif ($type =~ /_v2/) {
-        $sampling_algorithm = 2;     # version 2 sampling
-        if (defined($sample_period) && ($sample_period ne '')) {
-          $sample_adjustment = int($sample_period);
-        }
-      } else {
-        $sampling_algorithm = 1;     # version 1 sampling
-        if (defined($sample_period) && ($sample_period ne '')) {
-          $sample_adjustment = int($sample_period)/2;
-        }
-      }
-    } else {
-      # We detect whether or not this is a remote-heap profile by checking
-      # that the total-allocated stats ($n2,$s2) are exactly the
-      # same as the in-use stats ($n1,$s1).  It is remotely conceivable
-      # that a non-remote-heap profile may pass this check, but it is hard
-      # to imagine how that could happen.
-      # In this case it's so old it's guaranteed to be remote-heap version 1.
-      my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
-      if (($n1 == $n2) && ($s1 == $s2)) {
-        # This is likely to be a remote-heap based sample profile
-        $sampling_algorithm = 1;
-      }
-    }
-  }
-
-  if ($sampling_algorithm > 0) {
-    # For remote-heap generated profiles, adjust the counts and sizes to
-    # account for the sample rate (we sample once every 128KB by default).
-    if ($sample_adjustment == 0) {
-      # Turn on profile adjustment.
-      $sample_adjustment = 128*1024;
-      print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n";
-    } else {
-      printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n",
-                     $sample_adjustment);
-    }
-    if ($sampling_algorithm > 1) {
-      # We don't bother printing anything for the original version (version 1)
-      printf STDERR "Heap version $sampling_algorithm\n";
-    }
-  }
-
-  my $profile = {};
-  my $pcs = {};
-  my $map = "";
-
-  while (<PROFILE>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    if (/^MAPPED_LIBRARIES:/) {
-      $map .= ReadMappedLibraries(*PROFILE);
-      last;
-    }
-
-    if (/^--- Memory map:/) {
-      $map .= ReadMemoryMap(*PROFILE);
-      last;
-    }
-
-    # Read entry of the form:
-    #  <count1>: <bytes1> [<count2>: <bytes2>] @ a1 a2 a3 ... an
-    s/^\s*//;
-    s/\s*$//;
-    if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) {
-      my $stack = $5;
-      my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
-      my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
-                                 $n1, $s1, $n2, $s2);
-      AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
-    }
-  }
-
-  my $r = {};
-  $r->{version} = "heap";
-  $r->{period} = 1;
-  $r->{profile} = $profile;
-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
-  $r->{pcs} = $pcs;
-  return $r;
-}
-
-sub ReadThreadedHeapProfile {
-  my ($prog, $fname, $header) = @_;
-
-  my $index = HeapProfileIndex();
-  my $sampling_algorithm = 0;
-  my $sample_adjustment = 0;
-  chomp($header);
-  my $type = "unknown";
-  # Assuming a very specific type of header for now.
-  if ($header =~ m"^heap_v2/(\d+)") {
-    $type = "_v2";
-    $sampling_algorithm = 2;
-    $sample_adjustment = int($1);
-  }
-  if ($type ne "_v2" || !defined($sample_adjustment)) {
-    die "Threaded heap profiles require v2 sampling with a sample rate\n";
-  }
-
-  my $profile = {};
-  my $thread_profiles = {};
-  my $pcs = {};
-  my $map = "";
-  my $stack = "";
-
-  while (<PROFILE>) {
-    s/\r//g;
-    if (/^MAPPED_LIBRARIES:/) {
-      $map .= ReadMappedLibraries(*PROFILE);
-      last;
-    }
-
-    if (/^--- Memory map:/) {
-      $map .= ReadMemoryMap(*PROFILE);
-      last;
-    }
-
-    # Read entry of the form:
-    # @ a1 a2 ... an
-    #   t*: <count1>: <bytes1> [<count2>: <bytes2>]
-    #   t1: <count1>: <bytes1> [<count2>: <bytes2>]
-    #     ...
-    #   tn: <count1>: <bytes1> [<count2>: <bytes2>]
-    s/^\s*//;
-    s/\s*$//;
-    if (m/^@\s+(.*)$/) {
-      $stack = $1;
-    } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) {
-      if ($stack eq "") {
-        # Still in the header, so this is just a per-thread summary.
-        next;
-      }
-      my $thread = $2;
-      my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6);
-      my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
-                                 $n1, $s1, $n2, $s2);
-      if ($thread eq "*") {
-        AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
-      } else {
-        if (!exists($thread_profiles->{$thread})) {
-          $thread_profiles->{$thread} = {};
-        }
-        AddEntries($thread_profiles->{$thread}, $pcs,
-                   FixCallerAddresses($stack), $counts[$index]);
-      }
-    }
-  }
-
-  my $r = {};
-  $r->{version} = "heap";
-  $r->{period} = 1;
-  $r->{profile} = $profile;
-  $r->{threads} = $thread_profiles;
-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
-  $r->{pcs} = $pcs;
-  return $r;
-}
-
-sub ReadSynchProfile {
-  my $prog = shift;
-  local *PROFILE = shift;
-  my $header = shift;
-
-  my $map = '';
-  my $profile = {};
-  my $pcs = {};
-  my $sampling_period = 1;
-  my $cyclespernanosec = 2.8;   # Default assumption for old binaries
-  my $seen_clockrate = 0;
-  my $line;
-
-  my $index = 0;
-  if ($main::opt_total_delay) {
-    $index = 0;
-  } elsif ($main::opt_contentions) {
-    $index = 1;
-  } elsif ($main::opt_mean_delay) {
-    $index = 2;
-  }
-
-  while ( $line = <PROFILE> ) {
-    $line =~ s/\r//g;      # turn windows-looking lines into unix-looking lines
-    if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) {
-      my ($cycles, $count, $stack) = ($1, $2, $3);
-
-      # Convert cycles to nanoseconds
-      $cycles /= $cyclespernanosec;
-
-      # Adjust for sampling done by application
-      $cycles *= $sampling_period;
-      $count *= $sampling_period;
-
-      my @values = ($cycles, $count, $cycles / $count);
-      AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]);
-
-    } elsif ( $line =~ /^(slow release).*thread \d+  \@\s*(.*?)\s*$/ ||
-              $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) {
-      my ($cycles, $stack) = ($1, $2);
-      if ($cycles !~ /^\d+$/) {
-        next;
-      }
-
-      # Convert cycles to nanoseconds
-      $cycles /= $cyclespernanosec;
-
-      # Adjust for sampling done by application
-      $cycles *= $sampling_period;
-
-      AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles);
-
-    } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) {
-      my ($variable, $value) = ($1,$2);
-      for ($variable, $value) {
-        s/^\s+//;
-        s/\s+$//;
-      }
-      if ($variable eq "cycles/second") {
-        $cyclespernanosec = $value / 1e9;
-        $seen_clockrate = 1;
-      } elsif ($variable eq "sampling period") {
-        $sampling_period = $value;
-      } elsif ($variable eq "ms since reset") {
-        # Currently nothing is done with this value in pprof
-        # So we just silently ignore it for now
-      } elsif ($variable eq "discarded samples") {
-        # Currently nothing is done with this value in pprof
-        # So we just silently ignore it for now
-      } else {
-        printf STDERR ("Ignoring unnknown variable in /contention output: " .
-                       "'%s' = '%s'\n",$variable,$value);
-      }
-    } else {
-      # Memory map entry
-      $map .= $line;
-    }
-  }
-
-  if (!$seen_clockrate) {
-    printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n",
-                   $cyclespernanosec);
-  }
-
-  my $r = {};
-  $r->{version} = 0;
-  $r->{period} = $sampling_period;
-  $r->{profile} = $profile;
-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
-  $r->{pcs} = $pcs;
-  return $r;
-}
-
-# Given a hex value in the form "0x1abcd" or "1abcd", return either
-# "0001abcd" or "000000000001abcd", depending on the current (global)
-# address length.
-sub HexExtend {
-  my $addr = shift;
-
-  $addr =~ s/^(0x)?0*//;
-  my $zeros_needed = $address_length - length($addr);
-  if ($zeros_needed < 0) {
-    printf STDERR "Warning: address $addr is longer than address length $address_length\n";
-    return $addr;
-  }
-  return ("0" x $zeros_needed) . $addr;
-}
-
-##### Symbol extraction #####
-
-# Aggressively search the lib_prefix values for the given library
-# If all else fails, just return the name of the library unmodified.
-# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so"
-# it will search the following locations in this order, until it finds a file:
-#   /my/path/lib/dir/mylib.so
-#   /other/path/lib/dir/mylib.so
-#   /my/path/dir/mylib.so
-#   /other/path/dir/mylib.so
-#   /my/path/mylib.so
-#   /other/path/mylib.so
-#   /lib/dir/mylib.so              (returned as last resort)
-sub FindLibrary {
-  my $file = shift;
-  my $suffix = $file;
-
-  # Search for the library as described above
-  do {
-    foreach my $prefix (@prefix_list) {
-      my $fullpath = $prefix . $suffix;
-      if (-e $fullpath) {
-        return $fullpath;
-      }
-    }
-  } while ($suffix =~ s|^/[^/]+/|/|);
-  return $file;
-}
-
-# Return path to library with debugging symbols.
-# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
-sub DebuggingLibrary {
-  my $file = shift;
-  if ($file =~ m|^/|) {
-      if (-f "/usr/lib/debug$file") {
-        return "/usr/lib/debug$file";
-      } elsif (-f "/usr/lib/debug$file.debug") {
-        return "/usr/lib/debug$file.debug";
-      }
-  }
-  return undef;
-}
-
-# Parse text section header of a library using objdump
-sub ParseTextSectionHeaderFromObjdump {
-  my $lib = shift;
-
-  my $size = undef;
-  my $vma;
-  my $file_offset;
-  # Get objdump output from the library file to figure out how to
-  # map between mapped addresses and addresses in the library.
-  my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib);
-  open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
-  while (<OBJDUMP>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    # Idx Name          Size      VMA       LMA       File off  Algn
-    #  10 .text         00104b2c  420156f0  420156f0  000156f0  2**4
-    # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file
-    # offset may still be 8.  But AddressSub below will still handle that.
-    my @x = split;
-    if (($#x >= 6) && ($x[1] eq '.text')) {
-      $size = $x[2];
-      $vma = $x[3];
-      $file_offset = $x[5];
-      last;
-    }
-  }
-  close(OBJDUMP);
-
-  if (!defined($size)) {
-    return undef;
-  }
-
-  my $r = {};
-  $r->{size} = $size;
-  $r->{vma} = $vma;
-  $r->{file_offset} = $file_offset;
-
-  return $r;
-}
-
-# Parse text section header of a library using otool (on OS X)
-sub ParseTextSectionHeaderFromOtool {
-  my $lib = shift;
-
-  my $size = undef;
-  my $vma = undef;
-  my $file_offset = undef;
-  # Get otool output from the library file to figure out how to
-  # map between mapped addresses and addresses in the library.
-  my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib);
-  open(OTOOL, "$command |") || error("$command: $!\n");
-  my $cmd = "";
-  my $sectname = "";
-  my $segname = "";
-  foreach my $line (<OTOOL>) {
-    $line =~ s/\r//g;      # turn windows-looking lines into unix-looking lines
-    # Load command <#>
-    #       cmd LC_SEGMENT
-    # [...]
-    # Section
-    #   sectname __text
-    #    segname __TEXT
-    #       addr 0x000009f8
-    #       size 0x00018b9e
-    #     offset 2552
-    #      align 2^2 (4)
-    # We will need to strip off the leading 0x from the hex addresses,
-    # and convert the offset into hex.
-    if ($line =~ /Load command/) {
-      $cmd = "";
-      $sectname = "";
-      $segname = "";
-    } elsif ($line =~ /Section/) {
-      $sectname = "";
-      $segname = "";
-    } elsif ($line =~ /cmd (\w+)/) {
-      $cmd = $1;
-    } elsif ($line =~ /sectname (\w+)/) {
-      $sectname = $1;
-    } elsif ($line =~ /segname (\w+)/) {
-      $segname = $1;
-    } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") &&
-               $sectname eq "__text" &&
-               $segname eq "__TEXT")) {
-      next;
-    } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) {
-      $vma = $1;
-    } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) {
-      $size = $1;
-    } elsif ($line =~ /\boffset ([0-9]+)/) {
-      $file_offset = sprintf("%016x", $1);
-    }
-    if (defined($vma) && defined($size) && defined($file_offset)) {
-      last;
-    }
-  }
-  close(OTOOL);
-
-  if (!defined($vma) || !defined($size) || !defined($file_offset)) {
-     return undef;
-  }
-
-  my $r = {};
-  $r->{size} = $size;
-  $r->{vma} = $vma;
-  $r->{file_offset} = $file_offset;
-
-  return $r;
-}
-
-sub ParseTextSectionHeader {
-  # obj_tool_map("otool") is only defined if we're in a Mach-O environment
-  if (defined($obj_tool_map{"otool"})) {
-    my $r = ParseTextSectionHeaderFromOtool(@_);
-    if (defined($r)){
-      return $r;
-    }
-  }
-  # If otool doesn't work, or we don't have it, fall back to objdump
-  return ParseTextSectionHeaderFromObjdump(@_);
-}
-
-# Split /proc/pid/maps dump into a list of libraries
-sub ParseLibraries {
-  return if $main::use_symbol_page;  # We don't need libraries info.
-  my $prog = shift;
-  my $map = shift;
-  my $pcs = shift;
-
-  my $result = [];
-  my $h = "[a-f0-9]+";
-  my $zero_offset = HexExtend("0");
-
-  my $buildvar = "";
-  foreach my $l (split("\n", $map)) {
-    if ($l =~ m/^\s*build=(.*)$/) {
-      $buildvar = $1;
-    }
-
-    my $start;
-    my $finish;
-    my $offset;
-    my $lib;
-    if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) {
-      # Full line from /proc/self/maps.  Example:
-      #   40000000-40015000 r-xp 00000000 03:01 12845071   /lib/ld-2.3.2.so
-      $start = HexExtend($1);
-      $finish = HexExtend($2);
-      $offset = HexExtend($3);
-      $lib = $4;
-      $lib =~ s|\\|/|g;     # turn windows-style paths into unix-style paths
-    } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) {
-      # Cooked line from DumpAddressMap.  Example:
-      #   40000000-40015000: /lib/ld-2.3.2.so
-      $start = HexExtend($1);
-      $finish = HexExtend($2);
-      $offset = $zero_offset;
-      $lib = $3;
-    }
-    # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
-    # function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
-    #
-    # Example:
-    # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
-    # o.1 NCH -1
-    elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
-      $start = HexExtend($1);
-      $finish = HexExtend($2);
-      $offset = $zero_offset;
-      $lib = FindLibrary($5);
-
-    } else {
-      next;
-    }
-
-    # Expand "$build" variable if available
-    $lib =~ s/\$build\b/$buildvar/g;
-
-    $lib = FindLibrary($lib);
-
-    # Check for pre-relocated libraries, which use pre-relocated symbol tables
-    # and thus require adjusting the offset that we'll use to translate
-    # VM addresses into symbol table addresses.
-    # Only do this if we're not going to fetch the symbol table from a
-    # debugging copy of the library.
-    if (!DebuggingLibrary($lib)) {
-      my $text = ParseTextSectionHeader($lib);
-      if (defined($text)) {
-         my $vma_offset = AddressSub($text->{vma}, $text->{file_offset});
-         $offset = AddressAdd($offset, $vma_offset);
-      }
-    }
-
-    if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
-    push(@{$result}, [$lib, $start, $finish, $offset]);
-  }
-
-  # Append special entry for additional library (not relocated)
-  if ($main::opt_lib ne "") {
-    my $text = ParseTextSectionHeader($main::opt_lib);
-    if (defined($text)) {
-       my $start = $text->{vma};
-       my $finish = AddressAdd($start, $text->{size});
-
-       push(@{$result}, [$main::opt_lib, $start, $finish, $start]);
-    }
-  }
-
-  # Append special entry for the main program.  This covers
-  # 0..max_pc_value_seen, so that we assume pc values not found in one
-  # of the library ranges will be treated as coming from the main
-  # program binary.
-  my $min_pc = HexExtend("0");
-  my $max_pc = $min_pc;          # find the maximal PC value in any sample
-  foreach my $pc (keys(%{$pcs})) {
-    if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); }
-  }
-  push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]);
-
-  return $result;
-}
-
-# Add two hex addresses of length $address_length.
-# Run pprof --test for unit test if this is changed.
-sub AddressAdd {
-  my $addr1 = shift;
-  my $addr2 = shift;
-  my $sum;
-
-  if ($address_length == 8) {
-    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
-    $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16);
-    return sprintf("%08x", $sum);
-
-  } else {
-    # Do the addition in 7-nibble chunks to trivialize carry handling.
-
-    if ($main::opt_debug and $main::opt_test) {
-      print STDERR "AddressAdd $addr1 + $addr2 = ";
-    }
-
-    my $a1 = substr($addr1,-7);
-    $addr1 = substr($addr1,0,-7);
-    my $a2 = substr($addr2,-7);
-    $addr2 = substr($addr2,0,-7);
-    $sum = hex($a1) + hex($a2);
-    my $c = 0;
-    if ($sum > 0xfffffff) {
-      $c = 1;
-      $sum -= 0x10000000;
-    }
-    my $r = sprintf("%07x", $sum);
-
-    $a1 = substr($addr1,-7);
-    $addr1 = substr($addr1,0,-7);
-    $a2 = substr($addr2,-7);
-    $addr2 = substr($addr2,0,-7);
-    $sum = hex($a1) + hex($a2) + $c;
-    $c = 0;
-    if ($sum > 0xfffffff) {
-      $c = 1;
-      $sum -= 0x10000000;
-    }
-    $r = sprintf("%07x", $sum) . $r;
-
-    $sum = hex($addr1) + hex($addr2) + $c;
-    if ($sum > 0xff) { $sum -= 0x100; }
-    $r = sprintf("%02x", $sum) . $r;
-
-    if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; }
-
-    return $r;
-  }
-}
-
-
-# Subtract two hex addresses of length $address_length.
-# Run pprof --test for unit test if this is changed.
-sub AddressSub {
-  my $addr1 = shift;
-  my $addr2 = shift;
-  my $diff;
-
-  if ($address_length == 8) {
-    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
-    $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16);
-    return sprintf("%08x", $diff);
-
-  } else {
-    # Do the addition in 7-nibble chunks to trivialize borrow handling.
-    # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; }
-
-    my $a1 = hex(substr($addr1,-7));
-    $addr1 = substr($addr1,0,-7);
-    my $a2 = hex(substr($addr2,-7));
-    $addr2 = substr($addr2,0,-7);
-    my $b = 0;
-    if ($a2 > $a1) {
-      $b = 1;
-      $a1 += 0x10000000;
-    }
-    $diff = $a1 - $a2;
-    my $r = sprintf("%07x", $diff);
-
-    $a1 = hex(substr($addr1,-7));
-    $addr1 = substr($addr1,0,-7);
-    $a2 = hex(substr($addr2,-7)) + $b;
-    $addr2 = substr($addr2,0,-7);
-    $b = 0;
-    if ($a2 > $a1) {
-      $b = 1;
-      $a1 += 0x10000000;
-    }
-    $diff = $a1 - $a2;
-    $r = sprintf("%07x", $diff) . $r;
-
-    $a1 = hex($addr1);
-    $a2 = hex($addr2) + $b;
-    if ($a2 > $a1) { $a1 += 0x100; }
-    $diff = $a1 - $a2;
-    $r = sprintf("%02x", $diff) . $r;
-
-    # if ($main::opt_debug) { print STDERR "$r\n"; }
-
-    return $r;
-  }
-}
-
-# Increment a hex addresses of length $address_length.
-# Run pprof --test for unit test if this is changed.
-sub AddressInc {
-  my $addr = shift;
-  my $sum;
-
-  if ($address_length == 8) {
-    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
-    $sum = (hex($addr)+1) % (0x10000000 * 16);
-    return sprintf("%08x", $sum);
-
-  } else {
-    # Do the addition in 7-nibble chunks to trivialize carry handling.
-    # We are always doing this to step through the addresses in a function,
-    # and will almost never overflow the first chunk, so we check for this
-    # case and exit early.
-
-    # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; }
-
-    my $a1 = substr($addr,-7);
-    $addr = substr($addr,0,-7);
-    $sum = hex($a1) + 1;
-    my $r = sprintf("%07x", $sum);
-    if ($sum <= 0xfffffff) {
-      $r = $addr . $r;
-      # if ($main::opt_debug) { print STDERR "$r\n"; }
-      return HexExtend($r);
-    } else {
-      $r = "0000000";
-    }
-
-    $a1 = substr($addr,-7);
-    $addr = substr($addr,0,-7);
-    $sum = hex($a1) + 1;
-    $r = sprintf("%07x", $sum) . $r;
-    if ($sum <= 0xfffffff) {
-      $r = $addr . $r;
-      # if ($main::opt_debug) { print STDERR "$r\n"; }
-      return HexExtend($r);
-    } else {
-      $r = "00000000000000";
-    }
-
-    $sum = hex($addr) + 1;
-    if ($sum > 0xff) { $sum -= 0x100; }
-    $r = sprintf("%02x", $sum) . $r;
-
-    # if ($main::opt_debug) { print STDERR "$r\n"; }
-    return $r;
-  }
-}
-
-# Extract symbols for all PC values found in profile
-sub ExtractSymbols {
-  my $libs = shift;
-  my $pcset = shift;
-
-  my $symbols = {};
-
-  # Map each PC value to the containing library.  To make this faster,
-  # we sort libraries by their starting pc value (highest first), and
-  # advance through the libraries as we advance the pc.  Sometimes the
-  # addresses of libraries may overlap with the addresses of the main
-  # binary, so to make sure the libraries 'win', we iterate over the
-  # libraries in reverse order (which assumes the binary doesn't start
-  # in the middle of a library, which seems a fair assumption).
-  my @pcs = (sort { $a cmp $b } keys(%{$pcset}));  # pcset is 0-extended strings
-  foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) {
-    my $libname = $lib->[0];
-    my $start = $lib->[1];
-    my $finish = $lib->[2];
-    my $offset = $lib->[3];
-
-    # Use debug library if it exists
-    my $debug_libname = DebuggingLibrary($libname);
-    if ($debug_libname) {
-        $libname = $debug_libname;
-    }
-
-    # Get list of pcs that belong in this library.
-    my $contained = [];
-    my ($start_pc_index, $finish_pc_index);
-    # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index].
-    for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0;
-         $finish_pc_index--) {
-      last if $pcs[$finish_pc_index - 1] le $finish;
-    }
-    # Find smallest start_pc_index such that $start <= $pc[$start_pc_index].
-    for ($start_pc_index = $finish_pc_index; $start_pc_index > 0;
-         $start_pc_index--) {
-      last if $pcs[$start_pc_index - 1] lt $start;
-    }
-    # This keeps PC values higher than $pc[$finish_pc_index] in @pcs,
-    # in case there are overlaps in libraries and the main binary.
-    @{$contained} = splice(@pcs, $start_pc_index,
-                           $finish_pc_index - $start_pc_index);
-    # Map to symbols
-    MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols);
-  }
-
-  return $symbols;
-}
-
-# Map list of PC values to symbols for a given image
-sub MapToSymbols {
-  my $image = shift;
-  my $offset = shift;
-  my $pclist = shift;
-  my $symbols = shift;
-
-  my $debug = 0;
-
-  # Ignore empty binaries
-  if ($#{$pclist} < 0) { return; }
-
-  # Figure out the addr2line command to use
-  my $addr2line = $obj_tool_map{"addr2line"};
-  my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image);
-  if (exists $obj_tool_map{"addr2line_pdb"}) {
-    $addr2line = $obj_tool_map{"addr2line_pdb"};
-    $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image);
-  }
-
-  # If "addr2line" isn't installed on the system at all, just use
-  # nm to get what info we can (function names, but not line numbers).
-  if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) {
-    MapSymbolsWithNM($image, $offset, $pclist, $symbols);
-    return;
-  }
-
-  # "addr2line -i" can produce a variable number of lines per input
-  # address, with no separator that allows us to tell when data for
-  # the next address starts.  So we find the address for a special
-  # symbol (_fini) and interleave this address between all real
-  # addresses passed to addr2line.  The name of this special symbol
-  # can then be used as a separator.
-  $sep_address = undef;  # May be filled in by MapSymbolsWithNM()
-  my $nm_symbols = {};
-  MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols);
-  if (defined($sep_address)) {
-    # Only add " -i" to addr2line if the binary supports it.
-    # addr2line --help returns 0, but not if it sees an unknown flag first.
-    if (system("$cmd -i --help >$dev_null 2>&1") == 0) {
-      $cmd .= " -i";
-    } else {
-      $sep_address = undef;   # no need for sep_address if we don't support -i
-    }
-  }
-
-  # Make file with all PC values with intervening 'sep_address' so
-  # that we can reliably detect the end of inlined function list
-  open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n");
-  if ($debug) { print("---- $image ---\n"); }
-  for (my $i = 0; $i <= $#{$pclist}; $i++) {
-    # addr2line always reads hex addresses, and does not need '0x' prefix.
-    if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); }
-    printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset));
-    if (defined($sep_address)) {
-      printf ADDRESSES ("%s\n", $sep_address);
-    }
-  }
-  close(ADDRESSES);
-  if ($debug) {
-    print("----\n");
-    system("cat", $main::tmpfile_sym);
-    print("----\n");
-    system("$cmd < " . ShellEscape($main::tmpfile_sym));
-    print("----\n");
-  }
-
-  open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |")
-      || error("$cmd: $!\n");
-  my $count = 0;   # Index in pclist
-  while (<SYMBOLS>) {
-    # Read fullfunction and filelineinfo from next pair of lines
-    s/\r?\n$//g;
-    my $fullfunction = $_;
-    $_ = <SYMBOLS>;
-    s/\r?\n$//g;
-    my $filelinenum = $_;
-
-    if (defined($sep_address) && $fullfunction eq $sep_symbol) {
-      # Terminating marker for data for this address
-      $count++;
-      next;
-    }
-
-    $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths
-
-    my $pcstr = $pclist->[$count];
-    my $function = ShortFunctionName($fullfunction);
-    my $nms = $nm_symbols->{$pcstr};
-    if (defined($nms)) {
-      if ($fullfunction eq '??') {
-        # nm found a symbol for us.
-        $function = $nms->[0];
-        $fullfunction = $nms->[2];
-      } else {
-       # MapSymbolsWithNM tags each routine with its starting address,
-       # useful in case the image has multiple occurrences of this
-       # routine.  (It uses a syntax that resembles template paramters,
-       # that are automatically stripped out by ShortFunctionName().)
-       # addr2line does not provide the same information.  So we check
-       # if nm disambiguated our symbol, and if so take the annotated
-       # (nm) version of the routine-name.  TODO(csilvers): this won't
-       # catch overloaded, inlined symbols, which nm doesn't see.
-       # Better would be to do a check similar to nm's, in this fn.
-       if ($nms->[2] =~ m/^\Q$function\E/) {  # sanity check it's the right fn
-         $function = $nms->[0];
-         $fullfunction = $nms->[2];
-       }
-      }
-    }
-
-    # Prepend to accumulated symbols for pcstr
-    # (so that caller comes before callee)
-    my $sym = $symbols->{$pcstr};
-    if (!defined($sym)) {
-      $sym = [];
-      $symbols->{$pcstr} = $sym;
-    }
-    unshift(@{$sym}, $function, $filelinenum, $fullfunction);
-    if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); }
-    if (!defined($sep_address)) {
-      # Inlining is off, so this entry ends immediately
-      $count++;
-    }
-  }
-  close(SYMBOLS);
-}
-
-# Use nm to map the list of referenced PCs to symbols.  Return true iff we
-# are able to read procedure information via nm.
-sub MapSymbolsWithNM {
-  my $image = shift;
-  my $offset = shift;
-  my $pclist = shift;
-  my $symbols = shift;
-
-  # Get nm output sorted by increasing address
-  my $symbol_table = GetProcedureBoundaries($image, ".");
-  if (!%{$symbol_table}) {
-    return 0;
-  }
-  # Start addresses are already the right length (8 or 16 hex digits).
-  my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] }
-    keys(%{$symbol_table});
-
-  if ($#names < 0) {
-    # No symbols: just use addresses
-    foreach my $pc (@{$pclist}) {
-      my $pcstr = "0x" . $pc;
-      $symbols->{$pc} = [$pcstr, "?", $pcstr];
-    }
-    return 0;
-  }
-
-  # Sort addresses so we can do a join against nm output
-  my $index = 0;
-  my $fullname = $names[0];
-  my $name = ShortFunctionName($fullname);
-  foreach my $pc (sort { $a cmp $b } @{$pclist}) {
-    # Adjust for mapped offset
-    my $mpc = AddressSub($pc, $offset);
-    while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){
-      $index++;
-      $fullname = $names[$index];
-      $name = ShortFunctionName($fullname);
-    }
-    if ($mpc lt $symbol_table->{$fullname}->[1]) {
-      $symbols->{$pc} = [$name, "?", $fullname];
-    } else {
-      my $pcstr = "0x" . $pc;
-      $symbols->{$pc} = [$pcstr, "?", $pcstr];
-    }
-  }
-  return 1;
-}
-
-sub ShortFunctionName {
-  my $function = shift;
-  while ($function =~ s/\([^()]*\)(\s*const)?//g) { }   # Argument types
-  while ($function =~ s/<[^<>]*>//g)  { }    # Remove template arguments
-  $function =~ s/^.*\s+(\w+::)/$1/;          # Remove leading type
-  return $function;
-}
-
-# Trim overly long symbols found in disassembler output
-sub CleanDisassembly {
-  my $d = shift;
-  while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax)
-  while ($d =~ s/(\w+)<[^<>]*>/$1/g)  { }       # Remove template arguments
-  return $d;
-}
-
-# Clean file name for display
-sub CleanFileName {
-  my ($f) = @_;
-  $f =~ s|^/proc/self/cwd/||;
-  $f =~ s|^\./||;
-  return $f;
-}
-
-# Make address relative to section and clean up for display
-sub UnparseAddress {
-  my ($offset, $address) = @_;
-  $address = AddressSub($address, $offset);
-  $address =~ s/^0x//;
-  $address =~ s/^0*//;
-  return $address;
-}
-
-##### Miscellaneous #####
-
-# Find the right versions of the above object tools to use.  The
-# argument is the program file being analyzed, and should be an ELF
-# 32-bit or ELF 64-bit executable file.  The location of the tools
-# is determined by considering the following options in this order:
-#   1) --tools option, if set
-#   2) PPROF_TOOLS environment variable, if set
-#   3) the environment
-sub ConfigureObjTools {
-  my $prog_file = shift;
-
-  # Check for the existence of $prog_file because /usr/bin/file does not
-  # predictably return error status in prod.
-  (-e $prog_file)  || error("$prog_file does not exist.\n");
-
-  my $file_type = undef;
-  if (-e "/usr/bin/file") {
-    # Follow symlinks (at least for systems where "file" supports that).
-    my $escaped_prog_file = ShellEscape($prog_file);
-    $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null ||
-                  /usr/bin/file $escaped_prog_file`;
-  } elsif ($^O == "MSWin32") {
-    $file_type = "MS Windows";
-  } else {
-    print STDERR "WARNING: Can't determine the file type of $prog_file";
-  }
-
-  if ($file_type =~ /64-bit/) {
-    # Change $address_length to 16 if the program file is ELF 64-bit.
-    # We can't detect this from many (most?) heap or lock contention
-    # profiles, since the actual addresses referenced are generally in low
-    # memory even for 64-bit programs.
-    $address_length = 16;
-  }
-
-  if ($file_type =~ /MS Windows/) {
-    # For windows, we provide a version of nm and addr2line as part of
-    # the opensource release, which is capable of parsing
-    # Windows-style PDB executables.  It should live in the path, or
-    # in the same directory as pprof.
-    $obj_tool_map{"nm_pdb"} = "nm-pdb";
-    $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb";
-  }
-
-  if ($file_type =~ /Mach-O/) {
-    # OS X uses otool to examine Mach-O files, rather than objdump.
-    $obj_tool_map{"otool"} = "otool";
-    $obj_tool_map{"addr2line"} = "false";  # no addr2line
-    $obj_tool_map{"objdump"} = "false";  # no objdump
-  }
-
-  # Go fill in %obj_tool_map with the pathnames to use:
-  foreach my $tool (keys %obj_tool_map) {
-    $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool});
-  }
-}
-
-# Returns the path of a caller-specified object tool.  If --tools or
-# PPROF_TOOLS are specified, then returns the full path to the tool
-# with that prefix.  Otherwise, returns the path unmodified (which
-# means we will look for it on PATH).
-sub ConfigureTool {
-  my $tool = shift;
-  my $path;
-
-  # --tools (or $PPROF_TOOLS) is a comma separated list, where each
-  # item is either a) a pathname prefix, or b) a map of the form
-  # <tool>:<path>.  First we look for an entry of type (b) for our
-  # tool.  If one is found, we use it.  Otherwise, we consider all the
-  # pathname prefixes in turn, until one yields an existing file.  If
-  # none does, we use a default path.
-  my $tools = $main::opt_tools || $ENV{"PPROF_TOOLS"} || "";
-  if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) {
-    $path = $2;
-    # TODO(csilvers): sanity-check that $path exists?  Hard if it's relative.
-  } elsif ($tools ne '') {
-    foreach my $prefix (split(',', $tools)) {
-      next if ($prefix =~ /:/);    # ignore "tool:fullpath" entries in the list
-      if (-x $prefix . $tool) {
-        $path = $prefix . $tool;
-        last;
-      }
-    }
-    if (!$path) {
-      error("No '$tool' found with prefix specified by " .
-            "--tools (or \$PPROF_TOOLS) '$tools'\n");
-    }
-  } else {
-    # ... otherwise use the version that exists in the same directory as
-    # pprof.  If there's nothing there, use $PATH.
-    $0 =~ m,[^/]*$,;     # this is everything after the last slash
-    my $dirname = $`;    # this is everything up to and including the last slash
-    if (-x "$dirname$tool") {
-      $path = "$dirname$tool";
-    } else {
-      $path = $tool;
-    }
-  }
-  if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; }
-  return $path;
-}
-
-sub ShellEscape {
-  my @escaped_words = ();
-  foreach my $word (@_) {
-    my $escaped_word = $word;
-    if ($word =~ m![^a-zA-Z0-9/.,_=-]!) {  # check for anything not in whitelist
-      $escaped_word =~ s/'/'\\''/;
-      $escaped_word = "'$escaped_word'";
-    }
-    push(@escaped_words, $escaped_word);
-  }
-  return join(" ", @escaped_words);
-}
-
-sub cleanup {
-  unlink($main::tmpfile_sym);
-  unlink(keys %main::tempnames);
-
-  # We leave any collected profiles in $HOME/pprof in case the user wants
-  # to look at them later.  We print a message informing them of this.
-  if ((scalar(@main::profile_files) > 0) &&
-      defined($main::collected_profile)) {
-    if (scalar(@main::profile_files) == 1) {
-      print STDERR "Dynamically gathered profile is in $main::collected_profile\n";
-    }
-    print STDERR "If you want to investigate this profile further, you can do:\n";
-    print STDERR "\n";
-    print STDERR "  pprof \\\n";
-    print STDERR "    $main::prog \\\n";
-    print STDERR "    $main::collected_profile\n";
-    print STDERR "\n";
-  }
-}
-
-sub sighandler {
-  cleanup();
-  exit(1);
-}
-
-sub error {
-  my $msg = shift;
-  print STDERR $msg;
-  cleanup();
-  exit(1);
-}
-
-
-# Run $nm_command and get all the resulting procedure boundaries whose
-# names match "$regexp" and returns them in a hashtable mapping from
-# procedure name to a two-element vector of [start address, end address]
-sub GetProcedureBoundariesViaNm {
-  my $escaped_nm_command = shift;    # shell-escaped
-  my $regexp = shift;
-
-  my $symbol_table = {};
-  open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n");
-  my $last_start = "0";
-  my $routine = "";
-  while (<NM>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    if (m/^\s*([0-9a-f]+) (.) (..*)/) {
-      my $start_val = $1;
-      my $type = $2;
-      my $this_routine = $3;
-
-      # It's possible for two symbols to share the same address, if
-      # one is a zero-length variable (like __start_google_malloc) or
-      # one symbol is a weak alias to another (like __libc_malloc).
-      # In such cases, we want to ignore all values except for the
-      # actual symbol, which in nm-speak has type "T".  The logic
-      # below does this, though it's a bit tricky: what happens when
-      # we have a series of lines with the same address, is the first
-      # one gets queued up to be processed.  However, it won't
-      # *actually* be processed until later, when we read a line with
-      # a different address.  That means that as long as we're reading
-      # lines with the same address, we have a chance to replace that
-      # item in the queue, which we do whenever we see a 'T' entry --
-      # that is, a line with type 'T'.  If we never see a 'T' entry,
-      # we'll just go ahead and process the first entry (which never
-      # got touched in the queue), and ignore the others.
-      if ($start_val eq $last_start && $type =~ /t/i) {
-        # We are the 'T' symbol at this address, replace previous symbol.
-        $routine = $this_routine;
-        next;
-      } elsif ($start_val eq $last_start) {
-        # We're not the 'T' symbol at this address, so ignore us.
-        next;
-      }
-
-      if ($this_routine eq $sep_symbol) {
-        $sep_address = HexExtend($start_val);
-      }
-
-      # Tag this routine with the starting address in case the image
-      # has multiple occurrences of this routine.  We use a syntax
-      # that resembles template parameters that are automatically
-      # stripped out by ShortFunctionName()
-      $this_routine .= "<$start_val>";
-
-      if (defined($routine) && $routine =~ m/$regexp/) {
-        $symbol_table->{$routine} = [HexExtend($last_start),
-                                     HexExtend($start_val)];
-      }
-      $last_start = $start_val;
-      $routine = $this_routine;
-    } elsif (m/^Loaded image name: (.+)/) {
-      # The win32 nm workalike emits information about the binary it is using.
-      if ($main::opt_debug) { print STDERR "Using Image $1\n"; }
-    } elsif (m/^PDB file name: (.+)/) {
-      # The win32 nm workalike emits information about the pdb it is using.
-      if ($main::opt_debug) { print STDERR "Using PDB $1\n"; }
-    }
-  }
-  close(NM);
-  # Handle the last line in the nm output.  Unfortunately, we don't know
-  # how big this last symbol is, because we don't know how big the file
-  # is.  For now, we just give it a size of 0.
-  # TODO(csilvers): do better here.
-  if (defined($routine) && $routine =~ m/$regexp/) {
-    $symbol_table->{$routine} = [HexExtend($last_start),
-                                 HexExtend($last_start)];
-  }
-  return $symbol_table;
-}
-
-# Gets the procedure boundaries for all routines in "$image" whose names
-# match "$regexp" and returns them in a hashtable mapping from procedure
-# name to a two-element vector of [start address, end address].
-# Will return an empty map if nm is not installed or not working properly.
-sub GetProcedureBoundaries {
-  my $image = shift;
-  my $regexp = shift;
-
-  # If $image doesn't start with /, then put ./ in front of it.  This works
-  # around an obnoxious bug in our probing of nm -f behavior.
-  # "nm -f $image" is supposed to fail on GNU nm, but if:
-  #
-  # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND
-  # b. you have a.out in your current directory (a not uncommon occurence)
-  #
-  # then "nm -f $image" succeeds because -f only looks at the first letter of
-  # the argument, which looks valid because it's [BbSsPp], and then since
-  # there's no image provided, it looks for a.out and finds it.
-  #
-  # This regex makes sure that $image starts with . or /, forcing the -f
-  # parsing to fail since . and / are not valid formats.
-  $image =~ s#^[^/]#./$&#;
-
-  # For libc libraries, the copy in /usr/lib/debug contains debugging symbols
-  my $debugging = DebuggingLibrary($image);
-  if ($debugging) {
-    $image = $debugging;
-  }
-
-  my $nm = $obj_tool_map{"nm"};
-  my $cppfilt = $obj_tool_map{"c++filt"};
-
-  # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm
-  # binary doesn't support --demangle.  In addition, for OS X we need
-  # to use the -f flag to get 'flat' nm output (otherwise we don't sort
-  # properly and get incorrect results).  Unfortunately, GNU nm uses -f
-  # in an incompatible way.  So first we test whether our nm supports
-  # --demangle and -f.
-  my $demangle_flag = "";
-  my $cppfilt_flag = "";
-  my $to_devnull = ">$dev_null 2>&1";
-  if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) {
-    # In this mode, we do "nm --demangle <foo>"
-    $demangle_flag = "--demangle";
-    $cppfilt_flag = "";
-  } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) {
-    # In this mode, we do "nm <foo> | c++filt"
-    $cppfilt_flag = " | " . ShellEscape($cppfilt);
-  };
-  my $flatten_flag = "";
-  if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) {
-    $flatten_flag = "-f";
-  }
-
-  # Finally, in the case $imagie isn't a debug library, we try again with
-  # -D to at least get *exported* symbols.  If we can't use --demangle,
-  # we use c++filt instead, if it exists on this system.
-  my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag,
-                                 $image) . " 2>$dev_null $cppfilt_flag",
-                     ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag,
-                                 $image) . " 2>$dev_null $cppfilt_flag",
-                     # 6nm is for Go binaries
-                     ShellEscape("6nm", "$image") . " 2>$dev_null | sort",
-                     );
-
-  # If the executable is an MS Windows PDB-format executable, we'll
-  # have set up obj_tool_map("nm_pdb").  In this case, we actually
-  # want to use both unix nm and windows-specific nm_pdb, since
-  # PDB-format executables can apparently include dwarf .o files.
-  if (exists $obj_tool_map{"nm_pdb"}) {
-    push(@nm_commands,
-         ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image)
-         . " 2>$dev_null");
-  }
-
-  foreach my $nm_command (@nm_commands) {
-    my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp);
-    return $symbol_table if (%{$symbol_table});
-  }
-  my $symbol_table = {};
-  return $symbol_table;
-}
-
-
-# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings.
-# To make them more readable, we add underscores at interesting places.
-# This routine removes the underscores, producing the canonical representation
-# used by pprof to represent addresses, particularly in the tested routines.
-sub CanonicalHex {
-  my $arg = shift;
-  return join '', (split '_',$arg);
-}
-
-
-# Unit test for AddressAdd:
-sub AddressAddUnitTest {
-  my $test_data_8 = shift;
-  my $test_data_16 = shift;
-  my $error_count = 0;
-  my $fail_count = 0;
-  my $pass_count = 0;
-  # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n";
-
-  # First a few 8-nibble addresses.  Note that this implementation uses
-  # plain old arithmetic, so a quick sanity check along with verifying what
-  # happens to overflow (we want it to wrap):
-  $address_length = 8;
-  foreach my $row (@{$test_data_8}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressAdd ($row->[0], $row->[1]);
-    if ($sum ne $row->[2]) {
-      printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
-             $row->[0], $row->[1], $row->[2];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count = $fail_count;
-  $fail_count = 0;
-  $pass_count = 0;
-
-  # Now 16-nibble addresses.
-  $address_length = 16;
-  foreach my $row (@{$test_data_16}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
-    my $expected = join '', (split '_',$row->[2]);
-    if ($sum ne CanonicalHex($row->[2])) {
-      printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
-             $row->[0], $row->[1], $row->[2];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count += $fail_count;
-
-  return $error_count;
-}
-
-
-# Unit test for AddressSub:
-sub AddressSubUnitTest {
-  my $test_data_8 = shift;
-  my $test_data_16 = shift;
-  my $error_count = 0;
-  my $fail_count = 0;
-  my $pass_count = 0;
-  # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n";
-
-  # First a few 8-nibble addresses.  Note that this implementation uses
-  # plain old arithmetic, so a quick sanity check along with verifying what
-  # happens to overflow (we want it to wrap):
-  $address_length = 8;
-  foreach my $row (@{$test_data_8}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressSub ($row->[0], $row->[1]);
-    if ($sum ne $row->[3]) {
-      printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
-             $row->[0], $row->[1], $row->[3];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count = $fail_count;
-  $fail_count = 0;
-  $pass_count = 0;
-
-  # Now 16-nibble addresses.
-  $address_length = 16;
-  foreach my $row (@{$test_data_16}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
-    if ($sum ne CanonicalHex($row->[3])) {
-      printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
-             $row->[0], $row->[1], $row->[3];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count += $fail_count;
-
-  return $error_count;
-}
-
-
-# Unit test for AddressInc:
-sub AddressIncUnitTest {
-  my $test_data_8 = shift;
-  my $test_data_16 = shift;
-  my $error_count = 0;
-  my $fail_count = 0;
-  my $pass_count = 0;
-  # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n";
-
-  # First a few 8-nibble addresses.  Note that this implementation uses
-  # plain old arithmetic, so a quick sanity check along with verifying what
-  # happens to overflow (we want it to wrap):
-  $address_length = 8;
-  foreach my $row (@{$test_data_8}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressInc ($row->[0]);
-    if ($sum ne $row->[4]) {
-      printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
-             $row->[0], $row->[4];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count = $fail_count;
-  $fail_count = 0;
-  $pass_count = 0;
-
-  # Now 16-nibble addresses.
-  $address_length = 16;
-  foreach my $row (@{$test_data_16}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressInc (CanonicalHex($row->[0]));
-    if ($sum ne CanonicalHex($row->[4])) {
-      printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
-             $row->[0], $row->[4];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count += $fail_count;
-
-  return $error_count;
-}
-
-
-# Driver for unit tests.
-# Currently just the address add/subtract/increment routines for 64-bit.
-sub RunUnitTests {
-  my $error_count = 0;
-
-  # This is a list of tuples [a, b, a+b, a-b, a+1]
-  my $unit_test_data_8 = [
-    [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)],
-    [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)],
-    [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)],
-    [qw(00000001 ffffffff 00000000 00000002 00000002)],
-    [qw(00000001 fffffff0 fffffff1 00000011 00000002)],
-  ];
-  my $unit_test_data_16 = [
-    # The implementation handles data in 7-nibble chunks, so those are the
-    # interesting boundaries.
-    [qw(aaaaaaaa 50505050
-        00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)],
-    [qw(50505050 aaaaaaaa
-        00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)],
-    [qw(ffffffff aaaaaaaa
-        00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)],
-    [qw(00000001 ffffffff
-        00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)],
-    [qw(00000001 fffffff0
-        00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)],
-
-    [qw(00_a00000a_aaaaaaa 50505050
-        00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)],
-    [qw(0f_fff0005_0505050 aaaaaaaa
-        0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)],
-    [qw(00_000000f_fffffff 01_800000a_aaaaaaa
-        01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)],
-    [qw(00_0000000_0000001 ff_fffffff_fffffff
-        00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)],
-    [qw(00_0000000_0000001 ff_fffffff_ffffff0
-        ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)],
-  ];
-
-  $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16);
-  $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16);
-  $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16);
-  if ($error_count > 0) {
-    print STDERR $error_count, " errors: FAILED\n";
-  } else {
-    print STDERR "PASS\n";
-  }
-  exit ($error_count);
-}
diff --git a/src/jemalloc/build-aux/config.guess b/src/jemalloc/build-aux/config.guess
new file mode 100755 (executable)
index 0000000..1f5c50c
--- /dev/null
@@ -0,0 +1,1420 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+#   Copyright 1992-2014 Free Software Foundation, Inc.
+
+timestamp='2014-03-23'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program.  This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+#
+# Originally written by Per Bothner.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+#
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+  -h, --help         print this help, then exit
+  -t, --time-stamp   print date of last modification, then exit
+  -v, --version      print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright 1992-2014 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+  case $1 in
+    --time-stamp | --time* | -t )
+       echo "$timestamp" ; exit ;;
+    --version | -v )
+       echo "$version" ; exit ;;
+    --help | --h* | -h )
+       echo "$usage"; exit ;;
+    -- )     # Stop option processing
+       shift; break ;;
+    - )        # Use stdin as input.
+       break ;;
+    -* )
+       echo "$me: invalid option $1$help" >&2
+       exit 1 ;;
+    * )
+       break ;;
+  esac
+done
+
+if test $# != 0; then
+  echo "$me: too many arguments$help" >&2
+  exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,)    echo "int x;" > $dummy.c ;
+       for c in cc gcc c89 c99 ; do
+         if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+            CC_FOR_BUILD="$c"; break ;
+         fi ;
+       done ;
+       if test x"$CC_FOR_BUILD" = x ; then
+         CC_FOR_BUILD=no_compiler_found ;
+       fi
+       ;;
+ ,,*)   CC_FOR_BUILD=$CC ;;
+ ,*,*)  CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+       PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null`  || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+case "${UNAME_SYSTEM}" in
+Linux|GNU|GNU/*)
+       # If the system lacks a compiler, then just pick glibc.
+       # We could probably try harder.
+       LIBC=gnu
+
+       eval $set_cc_for_build
+       cat <<-EOF > $dummy.c
+       #include <features.h>
+       #if defined(__UCLIBC__)
+       LIBC=uclibc
+       #elif defined(__dietlibc__)
+       LIBC=dietlibc
+       #else
+       LIBC=gnu
+       #endif
+       EOF
+       eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`
+       ;;
+esac
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+    *:NetBSD:*:*)
+       # NetBSD (nbsd) targets should (where applicable) match one or
+       # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+       # *-*-netbsdecoff* and *-*-netbsd*.  For targets that recently
+       # switched to ELF, *-*-netbsd* would select the old
+       # object file format.  This provides both forward
+       # compatibility and a consistent mechanism for selecting the
+       # object file format.
+       #
+       # Note: NetBSD doesn't particularly care about the vendor
+       # portion of the name.  We always set it to "unknown".
+       sysctl="sysctl -n hw.machine_arch"
+       UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+           /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+       case "${UNAME_MACHINE_ARCH}" in
+           armeb) machine=armeb-unknown ;;
+           arm*) machine=arm-unknown ;;
+           sh3el) machine=shl-unknown ;;
+           sh3eb) machine=sh-unknown ;;
+           sh5el) machine=sh5le-unknown ;;
+           *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+       esac
+       # The Operating System including object format, if it has switched
+       # to ELF recently, or will in the future.
+       case "${UNAME_MACHINE_ARCH}" in
+           arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+               eval $set_cc_for_build
+               if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+                       | grep -q __ELF__
+               then
+                   # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+                   # Return netbsd for either.  FIX?
+                   os=netbsd
+               else
+                   os=netbsdelf
+               fi
+               ;;
+           *)
+               os=netbsd
+               ;;
+       esac
+       # The OS release
+       # Debian GNU/NetBSD machines have a different userland, and
+       # thus, need a distinct triplet. However, they do not need
+       # kernel version information, so it can be replaced with a
+       # suitable tag, in the style of linux-gnu.
+       case "${UNAME_VERSION}" in
+           Debian*)
+               release='-gnu'
+               ;;
+           *)
+               release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+               ;;
+       esac
+       # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+       # contains redundant information, the shorter form:
+       # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+       echo "${machine}-${os}${release}"
+       exit ;;
+    *:Bitrig:*:*)
+       UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+       echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+       exit ;;
+    *:OpenBSD:*:*)
+       UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+       echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+       exit ;;
+    *:ekkoBSD:*:*)
+       echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+       exit ;;
+    *:SolidBSD:*:*)
+       echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+       exit ;;
+    macppc:MirBSD:*:*)
+       echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+       exit ;;
+    *:MirBSD:*:*)
+       echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+       exit ;;
+    alpha:OSF1:*:*)
+       case $UNAME_RELEASE in
+       *4.0)
+               UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+               ;;
+       *5.*)
+               UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+               ;;
+       esac
+       # According to Compaq, /usr/sbin/psrinfo has been available on
+       # OSF/1 and Tru64 systems produced since 1995.  I hope that
+       # covers most systems running today.  This code pipes the CPU
+       # types through head -n 1, so we only detect the type of CPU 0.
+       ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^  The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+       case "$ALPHA_CPU_TYPE" in
+           "EV4 (21064)")
+               UNAME_MACHINE="alpha" ;;
+           "EV4.5 (21064)")
+               UNAME_MACHINE="alpha" ;;
+           "LCA4 (21066/21068)")
+               UNAME_MACHINE="alpha" ;;
+           "EV5 (21164)")
+               UNAME_MACHINE="alphaev5" ;;
+           "EV5.6 (21164A)")
+               UNAME_MACHINE="alphaev56" ;;
+           "EV5.6 (21164PC)")
+               UNAME_MACHINE="alphapca56" ;;
+           "EV5.7 (21164PC)")
+               UNAME_MACHINE="alphapca57" ;;
+           "EV6 (21264)")
+               UNAME_MACHINE="alphaev6" ;;
+           "EV6.7 (21264A)")
+               UNAME_MACHINE="alphaev67" ;;
+           "EV6.8CB (21264C)")
+               UNAME_MACHINE="alphaev68" ;;
+           "EV6.8AL (21264B)")
+               UNAME_MACHINE="alphaev68" ;;
+           "EV6.8CX (21264D)")
+               UNAME_MACHINE="alphaev68" ;;
+           "EV6.9A (21264/EV69A)")
+               UNAME_MACHINE="alphaev69" ;;
+           "EV7 (21364)")
+               UNAME_MACHINE="alphaev7" ;;
+           "EV7.9 (21364A)")
+               UNAME_MACHINE="alphaev79" ;;
+       esac
+       # A Pn.n version is a patched version.
+       # A Vn.n version is a released version.
+       # A Tn.n version is a released field test version.
+       # A Xn.n version is an unreleased experimental baselevel.
+       # 1.2 uses "1.2" for uname -r.
+       echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+       # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+       exitcode=$?
+       trap '' 0
+       exit $exitcode ;;
+    Alpha\ *:Windows_NT*:*)
+       # How do we know it's Interix rather than the generic POSIX subsystem?
+       # Should we change UNAME_MACHINE based on the output of uname instead
+       # of the specific Alpha model?
+       echo alpha-pc-interix
+       exit ;;
+    21064:Windows_NT:50:3)
+       echo alpha-dec-winnt3.5
+       exit ;;
+    Amiga*:UNIX_System_V:4.0:*)
+       echo m68k-unknown-sysv4
+       exit ;;
+    *:[Aa]miga[Oo][Ss]:*:*)
+       echo ${UNAME_MACHINE}-unknown-amigaos
+       exit ;;
+    *:[Mm]orph[Oo][Ss]:*:*)
+       echo ${UNAME_MACHINE}-unknown-morphos
+       exit ;;
+    *:OS/390:*:*)
+       echo i370-ibm-openedition
+       exit ;;
+    *:z/VM:*:*)
+       echo s390-ibm-zvmoe
+       exit ;;
+    *:OS400:*:*)
+       echo powerpc-ibm-os400
+       exit ;;
+    arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+       echo arm-acorn-riscix${UNAME_RELEASE}
+       exit ;;
+    arm*:riscos:*:*|arm*:RISCOS:*:*)
+       echo arm-unknown-riscos
+       exit ;;
+    SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+       echo hppa1.1-hitachi-hiuxmpp
+       exit ;;
+    Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+       # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+       if test "`(/bin/universe) 2>/dev/null`" = att ; then
+               echo pyramid-pyramid-sysv3
+       else
+               echo pyramid-pyramid-bsd
+       fi
+       exit ;;
+    NILE*:*:*:dcosx)
+       echo pyramid-pyramid-svr4
+       exit ;;
+    DRS?6000:unix:4.0:6*)
+       echo sparc-icl-nx6
+       exit ;;
+    DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+       case `/usr/bin/uname -p` in
+           sparc) echo sparc-icl-nx7; exit ;;
+       esac ;;
+    s390x:SunOS:*:*)
+       echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+       exit ;;
+    sun4H:SunOS:5.*:*)
+       echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+       exit ;;
+    sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+       echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+       exit ;;
+    i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+       echo i386-pc-auroraux${UNAME_RELEASE}
+       exit ;;
+    i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+       eval $set_cc_for_build
+       SUN_ARCH="i386"
+       # If there is a compiler, see if it is configured for 64-bit objects.
+       # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+       # This test works for both compilers.
+       if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+           if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+               (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+               grep IS_64BIT_ARCH >/dev/null
+           then
+               SUN_ARCH="x86_64"
+           fi
+       fi
+       echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+       exit ;;
+    sun4*:SunOS:6*:*)
+       # According to config.sub, this is the proper way to canonicalize
+       # SunOS6.  Hard to guess exactly what SunOS6 will be like, but
+       # it's likely to be more like Solaris than SunOS4.
+       echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+       exit ;;
+    sun4*:SunOS:*:*)
+       case "`/usr/bin/arch -k`" in
+           Series*|S4*)
+               UNAME_RELEASE=`uname -v`
+               ;;
+       esac
+       # Japanese Language versions have a version number like `4.1.3-JL'.
+       echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+       exit ;;
+    sun3*:SunOS:*:*)
+       echo m68k-sun-sunos${UNAME_RELEASE}
+       exit ;;
+    sun*:*:4.2BSD:*)
+       UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+       test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+       case "`/bin/arch`" in
+           sun3)
+               echo m68k-sun-sunos${UNAME_RELEASE}
+               ;;
+           sun4)
+               echo sparc-sun-sunos${UNAME_RELEASE}
+               ;;
+       esac
+       exit ;;
+    aushp:SunOS:*:*)
+       echo sparc-auspex-sunos${UNAME_RELEASE}
+       exit ;;
+    # The situation for MiNT is a little confusing.  The machine name
+    # can be virtually everything (everything which is not
+    # "atarist" or "atariste" at least should have a processor
+    # > m68000).  The system name ranges from "MiNT" over "FreeMiNT"
+    # to the lowercase version "mint" (or "freemint").  Finally
+    # the system name "TOS" denotes a system which is actually not
+    # MiNT.  But MiNT is downward compatible to TOS, so this should
+    # be no problem.
+    atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+       echo m68k-atari-mint${UNAME_RELEASE}
+       exit ;;
+    atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+       echo m68k-atari-mint${UNAME_RELEASE}
+       exit ;;
+    *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+       echo m68k-atari-mint${UNAME_RELEASE}
+       exit ;;
+    milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+       echo m68k-milan-mint${UNAME_RELEASE}
+       exit ;;
+    hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+       echo m68k-hades-mint${UNAME_RELEASE}
+       exit ;;
+    *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+       echo m68k-unknown-mint${UNAME_RELEASE}
+       exit ;;
+    m68k:machten:*:*)
+       echo m68k-apple-machten${UNAME_RELEASE}
+       exit ;;
+    powerpc:machten:*:*)
+       echo powerpc-apple-machten${UNAME_RELEASE}
+       exit ;;
+    RISC*:Mach:*:*)
+       echo mips-dec-mach_bsd4.3
+       exit ;;
+    RISC*:ULTRIX:*:*)
+       echo mips-dec-ultrix${UNAME_RELEASE}
+       exit ;;
+    VAX*:ULTRIX*:*:*)
+       echo vax-dec-ultrix${UNAME_RELEASE}
+       exit ;;
+    2020:CLIX:*:* | 2430:CLIX:*:*)
+       echo clipper-intergraph-clix${UNAME_RELEASE}
+       exit ;;
+    mips:*:*:UMIPS | mips:*:*:RISCos)
+       eval $set_cc_for_build
+       sed 's/^        //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h>  /* for printf() prototype */
+       int main (int argc, char *argv[]) {
+#else
+       int main (argc, argv) int argc; char *argv[]; {
+#endif
+       #if defined (host_mips) && defined (MIPSEB)
+       #if defined (SYSTYPE_SYSV)
+         printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+       #endif
+       #if defined (SYSTYPE_SVR4)
+         printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+       #endif
+       #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+         printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+       #endif
+       #endif
+         exit (-1);
+       }
+EOF
+       $CC_FOR_BUILD -o $dummy $dummy.c &&
+         dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+         SYSTEM_NAME=`$dummy $dummyarg` &&
+           { echo "$SYSTEM_NAME"; exit; }
+       echo mips-mips-riscos${UNAME_RELEASE}
+       exit ;;
+    Motorola:PowerMAX_OS:*:*)
+       echo powerpc-motorola-powermax
+       exit ;;
+    Motorola:*:4.3:PL8-*)
+       echo powerpc-harris-powermax
+       exit ;;
+    Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+       echo powerpc-harris-powermax
+       exit ;;
+    Night_Hawk:Power_UNIX:*:*)
+       echo powerpc-harris-powerunix
+       exit ;;
+    m88k:CX/UX:7*:*)
+       echo m88k-harris-cxux7
+       exit ;;
+    m88k:*:4*:R4*)
+       echo m88k-motorola-sysv4
+       exit ;;
+    m88k:*:3*:R3*)
+       echo m88k-motorola-sysv3
+       exit ;;
+    AViiON:dgux:*:*)
+       # DG/UX returns AViiON for all architectures
+       UNAME_PROCESSOR=`/usr/bin/uname -p`
+       if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+       then
+           if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+              [ ${TARGET_BINARY_INTERFACE}x = x ]
+           then
+               echo m88k-dg-dgux${UNAME_RELEASE}
+           else
+               echo m88k-dg-dguxbcs${UNAME_RELEASE}
+           fi
+       else
+           echo i586-dg-dgux${UNAME_RELEASE}
+       fi
+       exit ;;
+    M88*:DolphinOS:*:*)        # DolphinOS (SVR3)
+       echo m88k-dolphin-sysv3
+       exit ;;
+    M88*:*:R3*:*)
+       # Delta 88k system running SVR3
+       echo m88k-motorola-sysv3
+       exit ;;
+    XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+       echo m88k-tektronix-sysv3
+       exit ;;
+    Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+       echo m68k-tektronix-bsd
+       exit ;;
+    *:IRIX*:*:*)
+       echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+       exit ;;
+    ????????:AIX?:[12].1:2)   # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+       echo romp-ibm-aix     # uname -m gives an 8 hex-code CPU id
+       exit ;;               # Note that: echo "'`uname -s`'" gives 'AIX '
+    i*86:AIX:*:*)
+       echo i386-ibm-aix
+       exit ;;
+    ia64:AIX:*:*)
+       if [ -x /usr/bin/oslevel ] ; then
+               IBM_REV=`/usr/bin/oslevel`
+       else
+               IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+       fi
+       echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+       exit ;;
+    *:AIX:2:3)
+       if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+               eval $set_cc_for_build
+               sed 's/^                //' << EOF >$dummy.c
+               #include <sys/systemcfg.h>
+
+               main()
+                       {
+                       if (!__power_pc())
+                               exit(1);
+                       puts("powerpc-ibm-aix3.2.5");
+                       exit(0);
+                       }
+EOF
+               if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+               then
+                       echo "$SYSTEM_NAME"
+               else
+                       echo rs6000-ibm-aix3.2.5
+               fi
+       elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+               echo rs6000-ibm-aix3.2.4
+       else
+               echo rs6000-ibm-aix3.2
+       fi
+       exit ;;
+    *:AIX:*:[4567])
+       IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+       if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+               IBM_ARCH=rs6000
+       else
+               IBM_ARCH=powerpc
+       fi
+       if [ -x /usr/bin/oslevel ] ; then
+               IBM_REV=`/usr/bin/oslevel`
+       else
+               IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+       fi
+       echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+       exit ;;
+    *:AIX:*:*)
+       echo rs6000-ibm-aix
+       exit ;;
+    ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+       echo romp-ibm-bsd4.4
+       exit ;;
+    ibmrt:*BSD:*|romp-ibm:BSD:*)            # covers RT/PC BSD and
+       echo romp-ibm-bsd${UNAME_RELEASE}   # 4.3 with uname added to
+       exit ;;                             # report: romp-ibm BSD 4.3
+    *:BOSX:*:*)
+       echo rs6000-bull-bosx
+       exit ;;
+    DPX/2?00:B.O.S.:*:*)
+       echo m68k-bull-sysv3
+       exit ;;
+    9000/[34]??:4.3bsd:1.*:*)
+       echo m68k-hp-bsd
+       exit ;;
+    hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+       echo m68k-hp-bsd4.4
+       exit ;;
+    9000/[34678]??:HP-UX:*:*)
+       HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+       case "${UNAME_MACHINE}" in
+           9000/31? )            HP_ARCH=m68000 ;;
+           9000/[34]?? )         HP_ARCH=m68k ;;
+           9000/[678][0-9][0-9])
+               if [ -x /usr/bin/getconf ]; then
+                   sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+                   sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+                   case "${sc_cpu_version}" in
+                     523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+                     528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+                     532)                      # CPU_PA_RISC2_0
+                       case "${sc_kernel_bits}" in
+                         32) HP_ARCH="hppa2.0n" ;;
+                         64) HP_ARCH="hppa2.0w" ;;
+                         '') HP_ARCH="hppa2.0" ;;   # HP-UX 10.20
+                       esac ;;
+                   esac
+               fi
+               if [ "${HP_ARCH}" = "" ]; then
+                   eval $set_cc_for_build
+                   sed 's/^            //' << EOF >$dummy.c
+
+               #define _HPUX_SOURCE
+               #include <stdlib.h>
+               #include <unistd.h>
+
+               int main ()
+               {
+               #if defined(_SC_KERNEL_BITS)
+                   long bits = sysconf(_SC_KERNEL_BITS);
+               #endif
+                   long cpu  = sysconf (_SC_CPU_VERSION);
+
+                   switch (cpu)
+                       {
+                       case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+                       case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+                       case CPU_PA_RISC2_0:
+               #if defined(_SC_KERNEL_BITS)
+                           switch (bits)
+                               {
+                               case 64: puts ("hppa2.0w"); break;
+                               case 32: puts ("hppa2.0n"); break;
+                               default: puts ("hppa2.0"); break;
+                               } break;
+               #else  /* !defined(_SC_KERNEL_BITS) */
+                           puts ("hppa2.0"); break;
+               #endif
+                       default: puts ("hppa1.0"); break;
+                       }
+                   exit (0);
+               }
+EOF
+                   (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+                   test -z "$HP_ARCH" && HP_ARCH=hppa
+               fi ;;
+       esac
+       if [ ${HP_ARCH} = "hppa2.0w" ]
+       then
+           eval $set_cc_for_build
+
+           # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+           # 32-bit code.  hppa64-hp-hpux* has the same kernel and a compiler
+           # generating 64-bit code.  GNU and HP use different nomenclature:
+           #
+           # $ CC_FOR_BUILD=cc ./config.guess
+           # => hppa2.0w-hp-hpux11.23
+           # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+           # => hppa64-hp-hpux11.23
+
+           if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+               grep -q __LP64__
+           then
+               HP_ARCH="hppa2.0w"
+           else
+               HP_ARCH="hppa64"
+           fi
+       fi
+       echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+       exit ;;
+    ia64:HP-UX:*:*)
+       HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+       echo ia64-hp-hpux${HPUX_REV}
+       exit ;;
+    3050*:HI-UX:*:*)
+       eval $set_cc_for_build
+       sed 's/^        //' << EOF >$dummy.c
+       #include <unistd.h>
+       int
+       main ()
+       {
+         long cpu = sysconf (_SC_CPU_VERSION);
+         /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+            true for CPU_PA_RISC1_0.  CPU_IS_PA_RISC returns correct
+            results, however.  */
+         if (CPU_IS_PA_RISC (cpu))
+           {
+             switch (cpu)
+               {
+                 case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+                 case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+                 case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+                 default: puts ("hppa-hitachi-hiuxwe2"); break;
+               }
+           }
+         else if (CPU_IS_HP_MC68K (cpu))
+           puts ("m68k-hitachi-hiuxwe2");
+         else puts ("unknown-hitachi-hiuxwe2");
+         exit (0);
+       }
+EOF
+       $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+               { echo "$SYSTEM_NAME"; exit; }
+       echo unknown-hitachi-hiuxwe2
+       exit ;;
+    9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+       echo hppa1.1-hp-bsd
+       exit ;;
+    9000/8??:4.3bsd:*:*)
+       echo hppa1.0-hp-bsd
+       exit ;;
+    *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+       echo hppa1.0-hp-mpeix
+       exit ;;
+    hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+       echo hppa1.1-hp-osf
+       exit ;;
+    hp8??:OSF1:*:*)
+       echo hppa1.0-hp-osf
+       exit ;;
+    i*86:OSF1:*:*)
+       if [ -x /usr/sbin/sysversion ] ; then
+           echo ${UNAME_MACHINE}-unknown-osf1mk
+       else
+           echo ${UNAME_MACHINE}-unknown-osf1
+       fi
+       exit ;;
+    parisc*:Lites*:*:*)
+       echo hppa1.1-hp-lites
+       exit ;;
+    C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+       echo c1-convex-bsd
+       exit ;;
+    C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+       if getsysinfo -f scalar_acc
+       then echo c32-convex-bsd
+       else echo c2-convex-bsd
+       fi
+       exit ;;
+    C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+       echo c34-convex-bsd
+       exit ;;
+    C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+       echo c38-convex-bsd
+       exit ;;
+    C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+       echo c4-convex-bsd
+       exit ;;
+    CRAY*Y-MP:*:*:*)
+       echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+       exit ;;
+    CRAY*[A-Z]90:*:*:*)
+       echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+       | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+             -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+             -e 's/\.[^.]*$/.X/'
+       exit ;;
+    CRAY*TS:*:*:*)
+       echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+       exit ;;
+    CRAY*T3E:*:*:*)
+       echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+       exit ;;
+    CRAY*SV1:*:*:*)
+       echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+       exit ;;
+    *:UNICOS/mp:*:*)
+       echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+       exit ;;
+    F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+       FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+       FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+       FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+       echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+       exit ;;
+    5000:UNIX_System_V:4.*:*)
+       FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+       FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+       echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+       exit ;;
+    i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+       echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+       exit ;;
+    sparc*:BSD/OS:*:*)
+       echo sparc-unknown-bsdi${UNAME_RELEASE}
+       exit ;;
+    *:BSD/OS:*:*)
+       echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+       exit ;;
+    *:FreeBSD:*:*)
+       UNAME_PROCESSOR=`/usr/bin/uname -p`
+       case ${UNAME_PROCESSOR} in
+           amd64)
+               echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+           *)
+               echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+       esac
+       exit ;;
+    i*:CYGWIN*:*)
+       echo ${UNAME_MACHINE}-pc-cygwin
+       exit ;;
+    *:MINGW64*:*)
+       echo ${UNAME_MACHINE}-pc-mingw64
+       exit ;;
+    *:MINGW*:*)
+       echo ${UNAME_MACHINE}-pc-mingw32
+       exit ;;
+    *:MSYS*:*)
+       echo ${UNAME_MACHINE}-pc-msys
+       exit ;;
+    i*:windows32*:*)
+       # uname -m includes "-pc" on this system.
+       echo ${UNAME_MACHINE}-mingw32
+       exit ;;
+    i*:PW*:*)
+       echo ${UNAME_MACHINE}-pc-pw32
+       exit ;;
+    *:Interix*:*)
+       case ${UNAME_MACHINE} in
+           x86)
+               echo i586-pc-interix${UNAME_RELEASE}
+               exit ;;
+           authenticamd | genuineintel | EM64T)
+               echo x86_64-unknown-interix${UNAME_RELEASE}
+               exit ;;
+           IA64)
+               echo ia64-unknown-interix${UNAME_RELEASE}
+               exit ;;
+       esac ;;
+    [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+       echo i${UNAME_MACHINE}-pc-mks
+       exit ;;
+    8664:Windows_NT:*)
+       echo x86_64-pc-mks
+       exit ;;
+    i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+       # How do we know it's Interix rather than the generic POSIX subsystem?
+       # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+       # UNAME_MACHINE based on the output of uname instead of i386?
+       echo i586-pc-interix
+       exit ;;
+    i*:UWIN*:*)
+       echo ${UNAME_MACHINE}-pc-uwin
+       exit ;;
+    amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+       echo x86_64-unknown-cygwin
+       exit ;;
+    p*:CYGWIN*:*)
+       echo powerpcle-unknown-cygwin
+       exit ;;
+    prep*:SunOS:5.*:*)
+       echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+       exit ;;
+    *:GNU:*:*)
+       # the GNU system
+       echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+       exit ;;
+    *:GNU/*:*:*)
+       # other systems with GNU libc and userland
+       echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+       exit ;;
+    i*86:Minix:*:*)
+       echo ${UNAME_MACHINE}-pc-minix
+       exit ;;
+    aarch64:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    aarch64_be:Linux:*:*)
+       UNAME_MACHINE=aarch64_be
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    alpha:Linux:*:*)
+       case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+         EV5)   UNAME_MACHINE=alphaev5 ;;
+         EV56)  UNAME_MACHINE=alphaev56 ;;
+         PCA56) UNAME_MACHINE=alphapca56 ;;
+         PCA57) UNAME_MACHINE=alphapca56 ;;
+         EV6)   UNAME_MACHINE=alphaev6 ;;
+         EV67)  UNAME_MACHINE=alphaev67 ;;
+         EV68*) UNAME_MACHINE=alphaev68 ;;
+       esac
+       objdump --private-headers /bin/sh | grep -q ld.so.1
+       if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    arc:Linux:*:* | arceb:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    arm*:Linux:*:*)
+       eval $set_cc_for_build
+       if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+           | grep -q __ARM_EABI__
+       then
+           echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       else
+           if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+               | grep -q __ARM_PCS_VFP
+           then
+               echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
+           else
+               echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
+           fi
+       fi
+       exit ;;
+    avr32*:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    cris:Linux:*:*)
+       echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+       exit ;;
+    crisv32:Linux:*:*)
+       echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+       exit ;;
+    frv:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    hexagon:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    i*86:Linux:*:*)
+       echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+       exit ;;
+    ia64:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    m32r*:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    m68*:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    mips:Linux:*:* | mips64:Linux:*:*)
+       eval $set_cc_for_build
+       sed 's/^        //' << EOF >$dummy.c
+       #undef CPU
+       #undef ${UNAME_MACHINE}
+       #undef ${UNAME_MACHINE}el
+       #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+       CPU=${UNAME_MACHINE}el
+       #else
+       #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+       CPU=${UNAME_MACHINE}
+       #else
+       CPU=
+       #endif
+       #endif
+EOF
+       eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+       test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
+       ;;
+    openrisc*:Linux:*:*)
+       echo or1k-unknown-linux-${LIBC}
+       exit ;;
+    or32:Linux:*:* | or1k*:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    padre:Linux:*:*)
+       echo sparc-unknown-linux-${LIBC}
+       exit ;;
+    parisc64:Linux:*:* | hppa64:Linux:*:*)
+       echo hppa64-unknown-linux-${LIBC}
+       exit ;;
+    parisc:Linux:*:* | hppa:Linux:*:*)
+       # Look for CPU level
+       case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+         PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
+         PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
+         *)    echo hppa-unknown-linux-${LIBC} ;;
+       esac
+       exit ;;
+    ppc64:Linux:*:*)
+       echo powerpc64-unknown-linux-${LIBC}
+       exit ;;
+    ppc:Linux:*:*)
+       echo powerpc-unknown-linux-${LIBC}
+       exit ;;
+    ppc64le:Linux:*:*)
+       echo powerpc64le-unknown-linux-${LIBC}
+       exit ;;
+    ppcle:Linux:*:*)
+       echo powerpcle-unknown-linux-${LIBC}
+       exit ;;
+    s390:Linux:*:* | s390x:Linux:*:*)
+       echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
+       exit ;;
+    sh64*:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    sh*:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    sparc:Linux:*:* | sparc64:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    tile*:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    vax:Linux:*:*)
+       echo ${UNAME_MACHINE}-dec-linux-${LIBC}
+       exit ;;
+    x86_64:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    xtensa*:Linux:*:*)
+       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+       exit ;;
+    i*86:DYNIX/ptx:4*:*)
+       # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+       # earlier versions are messed up and put the nodename in both
+       # sysname and nodename.
+       echo i386-sequent-sysv4
+       exit ;;
+    i*86:UNIX_SV:4.2MP:2.*)
+       # Unixware is an offshoot of SVR4, but it has its own version
+       # number series starting with 2...
+       # I am not positive that other SVR4 systems won't match this,
+       # I just have to hope.  -- rms.
+       # Use sysv4.2uw... so that sysv4* matches it.
+       echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+       exit ;;
+    i*86:OS/2:*:*)
+       # If we were able to find `uname', then EMX Unix compatibility
+       # is probably installed.
+       echo ${UNAME_MACHINE}-pc-os2-emx
+       exit ;;
+    i*86:XTS-300:*:STOP)
+       echo ${UNAME_MACHINE}-unknown-stop
+       exit ;;
+    i*86:atheos:*:*)
+       echo ${UNAME_MACHINE}-unknown-atheos
+       exit ;;
+    i*86:syllable:*:*)
+       echo ${UNAME_MACHINE}-pc-syllable
+       exit ;;
+    i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+       echo i386-unknown-lynxos${UNAME_RELEASE}
+       exit ;;
+    i*86:*DOS:*:*)
+       echo ${UNAME_MACHINE}-pc-msdosdjgpp
+       exit ;;
+    i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+       UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+       if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+               echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+       else
+               echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+       fi
+       exit ;;
+    i*86:*:5:[678]*)
+       # UnixWare 7.x, OpenUNIX and OpenServer 6.
+       case `/bin/uname -X | grep "^Machine"` in
+           *486*)           UNAME_MACHINE=i486 ;;
+           *Pentium)        UNAME_MACHINE=i586 ;;
+           *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+       esac
+       echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+       exit ;;
+    i*86:*:3.2:*)
+       if test -f /usr/options/cb.name; then
+               UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+               echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+       elif /bin/uname -X 2>/dev/null >/dev/null ; then
+               UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+               (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+               (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+                       && UNAME_MACHINE=i586
+               (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+                       && UNAME_MACHINE=i686
+               (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+                       && UNAME_MACHINE=i686
+               echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+       else
+               echo ${UNAME_MACHINE}-pc-sysv32
+       fi
+       exit ;;
+    pc:*:*:*)
+       # Left here for compatibility:
+       # uname -m prints for DJGPP always 'pc', but it prints nothing about
+       # the processor, so we play safe by assuming i586.
+       # Note: whatever this is, it MUST be the same as what config.sub
+       # prints for the "djgpp" host, or else GDB configury will decide that
+       # this is a cross-build.
+       echo i586-pc-msdosdjgpp
+       exit ;;
+    Intel:Mach:3*:*)
+       echo i386-pc-mach3
+       exit ;;
+    paragon:*:*:*)
+       echo i860-intel-osf1
+       exit ;;
+    i860:*:4.*:*) # i860-SVR4
+       if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+         echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+       else # Add other i860-SVR4 vendors below as they are discovered.
+         echo i860-unknown-sysv${UNAME_RELEASE}  # Unknown i860-SVR4
+       fi
+       exit ;;
+    mini*:CTIX:SYS*5:*)
+       # "miniframe"
+       echo m68010-convergent-sysv
+       exit ;;
+    mc68k:UNIX:SYSTEM5:3.51m)
+       echo m68k-convergent-sysv
+       exit ;;
+    M680?0:D-NIX:5.3:*)
+       echo m68k-diab-dnix
+       exit ;;
+    M68*:*:R3V[5678]*:*)
+       test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+    3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+       OS_REL=''
+       test -r /etc/.relid \
+       && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+       /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+         && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+       /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+         && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+    3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+       /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+         && { echo i486-ncr-sysv4; exit; } ;;
+    NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+       OS_REL='.3'
+       test -r /etc/.relid \
+           && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+       /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+           && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+       /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+           && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+       /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+           && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+    m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+       echo m68k-unknown-lynxos${UNAME_RELEASE}
+       exit ;;
+    mc68030:UNIX_System_V:4.*:*)
+       echo m68k-atari-sysv4
+       exit ;;
+    TSUNAMI:LynxOS:2.*:*)
+       echo sparc-unknown-lynxos${UNAME_RELEASE}
+       exit ;;
+    rs6000:LynxOS:2.*:*)
+       echo rs6000-unknown-lynxos${UNAME_RELEASE}
+       exit ;;
+    PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+       echo powerpc-unknown-lynxos${UNAME_RELEASE}
+       exit ;;
+    SM[BE]S:UNIX_SV:*:*)
+       echo mips-dde-sysv${UNAME_RELEASE}
+       exit ;;
+    RM*:ReliantUNIX-*:*:*)
+       echo mips-sni-sysv4
+       exit ;;
+    RM*:SINIX-*:*:*)
+       echo mips-sni-sysv4
+       exit ;;
+    *:SINIX-*:*:*)
+       if uname -p 2>/dev/null >/dev/null ; then
+               UNAME_MACHINE=`(uname -p) 2>/dev/null`
+               echo ${UNAME_MACHINE}-sni-sysv4
+       else
+               echo ns32k-sni-sysv
+       fi
+       exit ;;
+    PENTIUM:*:4.0*:*)  # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+                       # says <Richard.M.Bartel@ccMail.Census.GOV>
+       echo i586-unisys-sysv4
+       exit ;;
+    *:UNIX_System_V:4*:FTX*)
+       # From Gerald Hewes <hewes@openmarket.com>.
+       # How about differentiating between stratus architectures? -djm
+       echo hppa1.1-stratus-sysv4
+       exit ;;
+    *:*:*:FTX*)
+       # From seanf@swdc.stratus.com.
+       echo i860-stratus-sysv4
+       exit ;;
+    i*86:VOS:*:*)
+       # From Paul.Green@stratus.com.
+       echo ${UNAME_MACHINE}-stratus-vos
+       exit ;;
+    *:VOS:*:*)
+       # From Paul.Green@stratus.com.
+       echo hppa1.1-stratus-vos
+       exit ;;
+    mc68*:A/UX:*:*)
+       echo m68k-apple-aux${UNAME_RELEASE}
+       exit ;;
+    news*:NEWS-OS:6*:*)
+       echo mips-sony-newsos6
+       exit ;;
+    R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+       if [ -d /usr/nec ]; then
+               echo mips-nec-sysv${UNAME_RELEASE}
+       else
+               echo mips-unknown-sysv${UNAME_RELEASE}
+       fi
+       exit ;;
+    BeBox:BeOS:*:*)    # BeOS running on hardware made by Be, PPC only.
+       echo powerpc-be-beos
+       exit ;;
+    BeMac:BeOS:*:*)    # BeOS running on Mac or Mac clone, PPC only.
+       echo powerpc-apple-beos
+       exit ;;
+    BePC:BeOS:*:*)     # BeOS running on Intel PC compatible.
+       echo i586-pc-beos
+       exit ;;
+    BePC:Haiku:*:*)    # Haiku running on Intel PC compatible.
+       echo i586-pc-haiku
+       exit ;;
+    x86_64:Haiku:*:*)
+       echo x86_64-unknown-haiku
+       exit ;;
+    SX-4:SUPER-UX:*:*)
+       echo sx4-nec-superux${UNAME_RELEASE}
+       exit ;;
+    SX-5:SUPER-UX:*:*)
+       echo sx5-nec-superux${UNAME_RELEASE}
+       exit ;;
+    SX-6:SUPER-UX:*:*)
+       echo sx6-nec-superux${UNAME_RELEASE}
+       exit ;;
+    SX-7:SUPER-UX:*:*)
+       echo sx7-nec-superux${UNAME_RELEASE}
+       exit ;;
+    SX-8:SUPER-UX:*:*)
+       echo sx8-nec-superux${UNAME_RELEASE}
+       exit ;;
+    SX-8R:SUPER-UX:*:*)
+       echo sx8r-nec-superux${UNAME_RELEASE}
+       exit ;;
+    Power*:Rhapsody:*:*)
+       echo powerpc-apple-rhapsody${UNAME_RELEASE}
+       exit ;;
+    *:Rhapsody:*:*)
+       echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+       exit ;;
+    *:Darwin:*:*)
+       UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+       eval $set_cc_for_build
+       if test "$UNAME_PROCESSOR" = unknown ; then
+           UNAME_PROCESSOR=powerpc
+       fi
+       if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
+           if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+               if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+                   (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+                   grep IS_64BIT_ARCH >/dev/null
+               then
+                   case $UNAME_PROCESSOR in
+                       i386) UNAME_PROCESSOR=x86_64 ;;
+                       powerpc) UNAME_PROCESSOR=powerpc64 ;;
+                   esac
+               fi
+           fi
+       elif test "$UNAME_PROCESSOR" = i386 ; then
+           # Avoid executing cc on OS X 10.9, as it ships with a stub
+           # that puts up a graphical alert prompting to install
+           # developer tools.  Any system running Mac OS X 10.7 or
+           # later (Darwin 11 and later) is required to have a 64-bit
+           # processor. This is not true of the ARM version of Darwin
+           # that Apple uses in portable devices.
+           UNAME_PROCESSOR=x86_64
+       fi
+       echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+       exit ;;
+    *:procnto*:*:* | *:QNX:[0123456789]*:*)
+       UNAME_PROCESSOR=`uname -p`
+       if test "$UNAME_PROCESSOR" = "x86"; then
+               UNAME_PROCESSOR=i386
+               UNAME_MACHINE=pc
+       fi
+       echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+       exit ;;
+    *:QNX:*:4*)
+       echo i386-pc-qnx
+       exit ;;
+    NEO-?:NONSTOP_KERNEL:*:*)
+       echo neo-tandem-nsk${UNAME_RELEASE}
+       exit ;;
+    NSE-*:NONSTOP_KERNEL:*:*)
+       echo nse-tandem-nsk${UNAME_RELEASE}
+       exit ;;
+    NSR-?:NONSTOP_KERNEL:*:*)
+       echo nsr-tandem-nsk${UNAME_RELEASE}
+       exit ;;
+    *:NonStop-UX:*:*)
+       echo mips-compaq-nonstopux
+       exit ;;
+    BS2000:POSIX*:*:*)
+       echo bs2000-siemens-sysv
+       exit ;;
+    DS/*:UNIX_System_V:*:*)
+       echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+       exit ;;
+    *:Plan9:*:*)
+       # "uname -m" is not consistent, so use $cputype instead. 386
+       # is converted to i386 for consistency with other x86
+       # operating systems.
+       if test "$cputype" = "386"; then
+           UNAME_MACHINE=i386
+       else
+           UNAME_MACHINE="$cputype"
+       fi
+       echo ${UNAME_MACHINE}-unknown-plan9
+       exit ;;
+    *:TOPS-10:*:*)
+       echo pdp10-unknown-tops10
+       exit ;;
+    *:TENEX:*:*)
+       echo pdp10-unknown-tenex
+       exit ;;
+    KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+       echo pdp10-dec-tops20
+       exit ;;
+    XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+       echo pdp10-xkl-tops20
+       exit ;;
+    *:TOPS-20:*:*)
+       echo pdp10-unknown-tops20
+       exit ;;
+    *:ITS:*:*)
+       echo pdp10-unknown-its
+       exit ;;
+    SEI:*:*:SEIUX)
+       echo mips-sei-seiux${UNAME_RELEASE}
+       exit ;;
+    *:DragonFly:*:*)
+       echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+       exit ;;
+    *:*VMS:*:*)
+       UNAME_MACHINE=`(uname -p) 2>/dev/null`
+       case "${UNAME_MACHINE}" in
+           A*) echo alpha-dec-vms ; exit ;;
+           I*) echo ia64-dec-vms ; exit ;;
+           V*) echo vax-dec-vms ; exit ;;
+       esac ;;
+    *:XENIX:*:SysV)
+       echo i386-pc-xenix
+       exit ;;
+    i*86:skyos:*:*)
+       echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+       exit ;;
+    i*86:rdos:*:*)
+       echo ${UNAME_MACHINE}-pc-rdos
+       exit ;;
+    i*86:AROS:*:*)
+       echo ${UNAME_MACHINE}-pc-aros
+       exit ;;
+    x86_64:VMkernel:*:*)
+       echo ${UNAME_MACHINE}-unknown-esx
+       exit ;;
+esac
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X     = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo               = `(hostinfo) 2>/dev/null`
+/bin/universe          = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch              = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM  = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/src/jemalloc/build-aux/config.sub b/src/jemalloc/build-aux/config.sub
new file mode 100755 (executable)
index 0000000..0ccff77
--- /dev/null
@@ -0,0 +1,1797 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+#   Copyright 1992-2014 Free Software Foundation, Inc.
+
+timestamp='2014-05-01'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program.  This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+
+
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support.  The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+#      CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+#      CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+       $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+  -h, --help         print this help, then exit
+  -t, --time-stamp   print date of last modification, then exit
+  -v, --version      print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright 1992-2014 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+  case $1 in
+    --time-stamp | --time* | -t )
+       echo "$timestamp" ; exit ;;
+    --version | -v )
+       echo "$version" ; exit ;;
+    --help | --h* | -h )
+       echo "$usage"; exit ;;
+    -- )     # Stop option processing
+       shift; break ;;
+    - )        # Use stdin as input.
+       break ;;
+    -* )
+       echo "$me: invalid option $1$help"
+       exit 1 ;;
+
+    *local*)
+       # First pass through any local machine types.
+       echo $1
+       exit ;;
+
+    * )
+       break ;;
+  esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+    exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+    exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+  nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+  linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+  knetbsd*-gnu* | netbsd*-gnu* | \
+  kopensolaris*-gnu* | \
+  storm-chaos* | os2-emx* | rtmk-nova*)
+    os=-$maybe_os
+    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+    ;;
+  android-linux)
+    os=-linux-android
+    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+    ;;
+  *)
+    basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+    if [ $basic_machine != $1 ]
+    then os=`echo $1 | sed 's/.*-/-/'`
+    else os=; fi
+    ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work.  We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+       -sun*os*)
+               # Prevent following clause from handling this invalid input.
+               ;;
+       -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+       -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+       -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+       -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+       -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+       -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+       -apple | -axis | -knuth | -cray | -microblaze*)
+               os=
+               basic_machine=$1
+               ;;
+       -bluegene*)
+               os=-cnk
+               ;;
+       -sim | -cisco | -oki | -wec | -winbond)
+               os=
+               basic_machine=$1
+               ;;
+       -scout)
+               ;;
+       -wrs)
+               os=-vxworks
+               basic_machine=$1
+               ;;
+       -chorusos*)
+               os=-chorusos
+               basic_machine=$1
+               ;;
+       -chorusrdb)
+               os=-chorusrdb
+               basic_machine=$1
+               ;;
+       -hiux*)
+               os=-hiuxwe2
+               ;;
+       -sco6)
+               os=-sco5v6
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -sco5)
+               os=-sco3.2v5
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -sco4)
+               os=-sco3.2v4
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -sco3.2.[4-9]*)
+               os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -sco3.2v[4-9]*)
+               # Don't forget version if it is 3.2v4 or newer.
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -sco5v6*)
+               # Don't forget version if it is 3.2v4 or newer.
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -sco*)
+               os=-sco3.2v2
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -udk*)
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -isc)
+               os=-isc2.2
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -clix*)
+               basic_machine=clipper-intergraph
+               ;;
+       -isc*)
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+               ;;
+       -lynx*178)
+               os=-lynxos178
+               ;;
+       -lynx*5)
+               os=-lynxos5
+               ;;
+       -lynx*)
+               os=-lynxos
+               ;;
+       -ptx*)
+               basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+               ;;
+       -windowsnt*)
+               os=`echo $os | sed -e 's/windowsnt/winnt/'`
+               ;;
+       -psos*)
+               os=-psos
+               ;;
+       -mint | -mint[0-9]*)
+               basic_machine=m68k-atari
+               os=-mint
+               ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+       # Recognize the basic CPU types without company name.
+       # Some are omitted here because they have special meanings below.
+       1750a | 580 \
+       | a29k \
+       | aarch64 | aarch64_be \
+       | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+       | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+       | am33_2.0 \
+       | arc | arceb \
+       | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+       | avr | avr32 \
+       | be32 | be64 \
+       | bfin \
+       | c4x | c8051 | clipper \
+       | d10v | d30v | dlx | dsp16xx \
+       | epiphany \
+       | fido | fr30 | frv \
+       | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+       | hexagon \
+       | i370 | i860 | i960 | ia64 \
+       | ip2k | iq2000 \
+       | k1om \
+       | le32 | le64 \
+       | lm32 \
+       | m32c | m32r | m32rle | m68000 | m68k | m88k \
+       | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
+       | mips | mipsbe | mipseb | mipsel | mipsle \
+       | mips16 \
+       | mips64 | mips64el \
+       | mips64octeon | mips64octeonel \
+       | mips64orion | mips64orionel \
+       | mips64r5900 | mips64r5900el \
+       | mips64vr | mips64vrel \
+       | mips64vr4100 | mips64vr4100el \
+       | mips64vr4300 | mips64vr4300el \
+       | mips64vr5000 | mips64vr5000el \
+       | mips64vr5900 | mips64vr5900el \
+       | mipsisa32 | mipsisa32el \
+       | mipsisa32r2 | mipsisa32r2el \
+       | mipsisa32r6 | mipsisa32r6el \
+       | mipsisa64 | mipsisa64el \
+       | mipsisa64r2 | mipsisa64r2el \
+       | mipsisa64r6 | mipsisa64r6el \
+       | mipsisa64sb1 | mipsisa64sb1el \
+       | mipsisa64sr71k | mipsisa64sr71kel \
+       | mipsr5900 | mipsr5900el \
+       | mipstx39 | mipstx39el \
+       | mn10200 | mn10300 \
+       | moxie \
+       | mt \
+       | msp430 \
+       | nds32 | nds32le | nds32be \
+       | nios | nios2 | nios2eb | nios2el \
+       | ns16k | ns32k \
+       | open8 | or1k | or1knd | or32 \
+       | pdp10 | pdp11 | pj | pjl \
+       | powerpc | powerpc64 | powerpc64le | powerpcle \
+       | pyramid \
+       | rl78 | rx \
+       | score \
+       | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+       | sh64 | sh64le \
+       | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+       | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+       | spu \
+       | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+       | ubicom32 \
+       | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+       | we32k \
+       | x86 | xc16x | xstormy16 | xtensa \
+       | z8k | z80)
+               basic_machine=$basic_machine-unknown
+               ;;
+       c54x)
+               basic_machine=tic54x-unknown
+               ;;
+       c55x)
+               basic_machine=tic55x-unknown
+               ;;
+       c6x)
+               basic_machine=tic6x-unknown
+               ;;
+       m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
+               basic_machine=$basic_machine-unknown
+               os=-none
+               ;;
+       m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+               ;;
+       ms1)
+               basic_machine=mt-unknown
+               ;;
+
+       strongarm | thumb | xscale)
+               basic_machine=arm-unknown
+               ;;
+       xgate)
+               basic_machine=$basic_machine-unknown
+               os=-none
+               ;;
+       xscaleeb)
+               basic_machine=armeb-unknown
+               ;;
+
+       xscaleel)
+               basic_machine=armel-unknown
+               ;;
+
+       # We use `pc' rather than `unknown'
+       # because (1) that's what they normally are, and
+       # (2) the word "unknown" tends to confuse beginning users.
+       i*86 | x86_64)
+         basic_machine=$basic_machine-pc
+         ;;
+       # Object if more than one company name word.
+       *-*-*)
+               echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+               exit 1
+               ;;
+       # Recognize the basic CPU types with company name.
+       580-* \
+       | a29k-* \
+       | aarch64-* | aarch64_be-* \
+       | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+       | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+       | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
+       | arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
+       | avr-* | avr32-* \
+       | be32-* | be64-* \
+       | bfin-* | bs2000-* \
+       | c[123]* | c30-* | [cjt]90-* | c4x-* \
+       | c8051-* | clipper-* | craynv-* | cydra-* \
+       | d10v-* | d30v-* | dlx-* \
+       | elxsi-* \
+       | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+       | h8300-* | h8500-* \
+       | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+       | hexagon-* \
+       | i*86-* | i860-* | i960-* | ia64-* \
+       | ip2k-* | iq2000-* \
+       | k1om-* \
+       | le32-* | le64-* \
+       | lm32-* \
+       | m32c-* | m32r-* | m32rle-* \
+       | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+       | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+       | microblaze-* | microblazeel-* \
+       | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+       | mips16-* \
+       | mips64-* | mips64el-* \
+       | mips64octeon-* | mips64octeonel-* \
+       | mips64orion-* | mips64orionel-* \
+       | mips64r5900-* | mips64r5900el-* \
+       | mips64vr-* | mips64vrel-* \
+       | mips64vr4100-* | mips64vr4100el-* \
+       | mips64vr4300-* | mips64vr4300el-* \
+       | mips64vr5000-* | mips64vr5000el-* \
+       | mips64vr5900-* | mips64vr5900el-* \
+       | mipsisa32-* | mipsisa32el-* \
+       | mipsisa32r2-* | mipsisa32r2el-* \
+       | mipsisa32r6-* | mipsisa32r6el-* \
+       | mipsisa64-* | mipsisa64el-* \
+       | mipsisa64r2-* | mipsisa64r2el-* \
+       | mipsisa64r6-* | mipsisa64r6el-* \
+       | mipsisa64sb1-* | mipsisa64sb1el-* \
+       | mipsisa64sr71k-* | mipsisa64sr71kel-* \
+       | mipsr5900-* | mipsr5900el-* \
+       | mipstx39-* | mipstx39el-* \
+       | mmix-* \
+       | mt-* \
+       | msp430-* \
+       | nds32-* | nds32le-* | nds32be-* \
+       | nios-* | nios2-* | nios2eb-* | nios2el-* \
+       | none-* | np1-* | ns16k-* | ns32k-* \
+       | open8-* \
+       | or1k*-* \
+       | orion-* \
+       | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+       | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+       | pyramid-* \
+       | rl78-* | romp-* | rs6000-* | rx-* \
+       | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+       | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+       | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+       | sparclite-* \
+       | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+       | tahoe-* \
+       | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+       | tile*-* \
+       | tron-* \
+       | ubicom32-* \
+       | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+       | vax-* \
+       | we32k-* \
+       | x86-* | x86_64-* | xc16x-* | xps100-* \
+       | xstormy16-* | xtensa*-* \
+       | ymp-* \
+       | z8k-* | z80-*)
+               ;;
+       # Recognize the basic CPU types without company name, with glob match.
+       xtensa*)
+               basic_machine=$basic_machine-unknown
+               ;;
+       # Recognize the various machine names and aliases which stand
+       # for a CPU type and a company and sometimes even an OS.
+       386bsd)
+               basic_machine=i386-unknown
+               os=-bsd
+               ;;
+       3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+               basic_machine=m68000-att
+               ;;
+       3b*)
+               basic_machine=we32k-att
+               ;;
+       a29khif)
+               basic_machine=a29k-amd
+               os=-udi
+               ;;
+       abacus)
+               basic_machine=abacus-unknown
+               ;;
+       adobe68k)
+               basic_machine=m68010-adobe
+               os=-scout
+               ;;
+       alliant | fx80)
+               basic_machine=fx80-alliant
+               ;;
+       altos | altos3068)
+               basic_machine=m68k-altos
+               ;;
+       am29k)
+               basic_machine=a29k-none
+               os=-bsd
+               ;;
+       amd64)
+               basic_machine=x86_64-pc
+               ;;
+       amd64-*)
+               basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       amdahl)
+               basic_machine=580-amdahl
+               os=-sysv
+               ;;
+       amiga | amiga-*)
+               basic_machine=m68k-unknown
+               ;;
+       amigaos | amigados)
+               basic_machine=m68k-unknown
+               os=-amigaos
+               ;;
+       amigaunix | amix)
+               basic_machine=m68k-unknown
+               os=-sysv4
+               ;;
+       apollo68)
+               basic_machine=m68k-apollo
+               os=-sysv
+               ;;
+       apollo68bsd)
+               basic_machine=m68k-apollo
+               os=-bsd
+               ;;
+       aros)
+               basic_machine=i386-pc
+               os=-aros
+               ;;
+       aux)
+               basic_machine=m68k-apple
+               os=-aux
+               ;;
+       balance)
+               basic_machine=ns32k-sequent
+               os=-dynix
+               ;;
+       blackfin)
+               basic_machine=bfin-unknown
+               os=-linux
+               ;;
+       blackfin-*)
+               basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+               os=-linux
+               ;;
+       bluegene*)
+               basic_machine=powerpc-ibm
+               os=-cnk
+               ;;
+       c54x-*)
+               basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       c55x-*)
+               basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       c6x-*)
+               basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       c90)
+               basic_machine=c90-cray
+               os=-unicos
+               ;;
+       cegcc)
+               basic_machine=arm-unknown
+               os=-cegcc
+               ;;
+       convex-c1)
+               basic_machine=c1-convex
+               os=-bsd
+               ;;
+       convex-c2)
+               basic_machine=c2-convex
+               os=-bsd
+               ;;
+       convex-c32)
+               basic_machine=c32-convex
+               os=-bsd
+               ;;
+       convex-c34)
+               basic_machine=c34-convex
+               os=-bsd
+               ;;
+       convex-c38)
+               basic_machine=c38-convex
+               os=-bsd
+               ;;
+       cray | j90)
+               basic_machine=j90-cray
+               os=-unicos
+               ;;
+       craynv)
+               basic_machine=craynv-cray
+               os=-unicosmp
+               ;;
+       cr16 | cr16-*)
+               basic_machine=cr16-unknown
+               os=-elf
+               ;;
+       crds | unos)
+               basic_machine=m68k-crds
+               ;;
+       crisv32 | crisv32-* | etraxfs*)
+               basic_machine=crisv32-axis
+               ;;
+       cris | cris-* | etrax*)
+               basic_machine=cris-axis
+               ;;
+       crx)
+               basic_machine=crx-unknown
+               os=-elf
+               ;;
+       da30 | da30-*)
+               basic_machine=m68k-da30
+               ;;
+       decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+               basic_machine=mips-dec
+               ;;
+       decsystem10* | dec10*)
+               basic_machine=pdp10-dec
+               os=-tops10
+               ;;
+       decsystem20* | dec20*)
+               basic_machine=pdp10-dec
+               os=-tops20
+               ;;
+       delta | 3300 | motorola-3300 | motorola-delta \
+             | 3300-motorola | delta-motorola)
+               basic_machine=m68k-motorola
+               ;;
+       delta88)
+               basic_machine=m88k-motorola
+               os=-sysv3
+               ;;
+       dicos)
+               basic_machine=i686-pc
+               os=-dicos
+               ;;
+       djgpp)
+               basic_machine=i586-pc
+               os=-msdosdjgpp
+               ;;
+       dpx20 | dpx20-*)
+               basic_machine=rs6000-bull
+               os=-bosx
+               ;;
+       dpx2* | dpx2*-bull)
+               basic_machine=m68k-bull
+               os=-sysv3
+               ;;
+       ebmon29k)
+               basic_machine=a29k-amd
+               os=-ebmon
+               ;;
+       elxsi)
+               basic_machine=elxsi-elxsi
+               os=-bsd
+               ;;
+       encore | umax | mmax)
+               basic_machine=ns32k-encore
+               ;;
+       es1800 | OSE68k | ose68k | ose | OSE)
+               basic_machine=m68k-ericsson
+               os=-ose
+               ;;
+       fx2800)
+               basic_machine=i860-alliant
+               ;;
+       genix)
+               basic_machine=ns32k-ns
+               ;;
+       gmicro)
+               basic_machine=tron-gmicro
+               os=-sysv
+               ;;
+       go32)
+               basic_machine=i386-pc
+               os=-go32
+               ;;
+       h3050r* | hiux*)
+               basic_machine=hppa1.1-hitachi
+               os=-hiuxwe2
+               ;;
+       h8300hms)
+               basic_machine=h8300-hitachi
+               os=-hms
+               ;;
+       h8300xray)
+               basic_machine=h8300-hitachi
+               os=-xray
+               ;;
+       h8500hms)
+               basic_machine=h8500-hitachi
+               os=-hms
+               ;;
+       harris)
+               basic_machine=m88k-harris
+               os=-sysv3
+               ;;
+       hp300-*)
+               basic_machine=m68k-hp
+               ;;
+       hp300bsd)
+               basic_machine=m68k-hp
+               os=-bsd
+               ;;
+       hp300hpux)
+               basic_machine=m68k-hp
+               os=-hpux
+               ;;
+       hp3k9[0-9][0-9] | hp9[0-9][0-9])
+               basic_machine=hppa1.0-hp
+               ;;
+       hp9k2[0-9][0-9] | hp9k31[0-9])
+               basic_machine=m68000-hp
+               ;;
+       hp9k3[2-9][0-9])
+               basic_machine=m68k-hp
+               ;;
+       hp9k6[0-9][0-9] | hp6[0-9][0-9])
+               basic_machine=hppa1.0-hp
+               ;;
+       hp9k7[0-79][0-9] | hp7[0-79][0-9])
+               basic_machine=hppa1.1-hp
+               ;;
+       hp9k78[0-9] | hp78[0-9])
+               # FIXME: really hppa2.0-hp
+               basic_machine=hppa1.1-hp
+               ;;
+       hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+               # FIXME: really hppa2.0-hp
+               basic_machine=hppa1.1-hp
+               ;;
+       hp9k8[0-9][13679] | hp8[0-9][13679])
+               basic_machine=hppa1.1-hp
+               ;;
+       hp9k8[0-9][0-9] | hp8[0-9][0-9])
+               basic_machine=hppa1.0-hp
+               ;;
+       hppa-next)
+               os=-nextstep3
+               ;;
+       hppaosf)
+               basic_machine=hppa1.1-hp
+               os=-osf
+               ;;
+       hppro)
+               basic_machine=hppa1.1-hp
+               os=-proelf
+               ;;
+       i370-ibm* | ibm*)
+               basic_machine=i370-ibm
+               ;;
+       i*86v32)
+               basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+               os=-sysv32
+               ;;
+       i*86v4*)
+               basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+               os=-sysv4
+               ;;
+       i*86v)
+               basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+               os=-sysv
+               ;;
+       i*86sol2)
+               basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+               os=-solaris2
+               ;;
+       i386mach)
+               basic_machine=i386-mach
+               os=-mach
+               ;;
+       i386-vsta | vsta)
+               basic_machine=i386-unknown
+               os=-vsta
+               ;;
+       iris | iris4d)
+               basic_machine=mips-sgi
+               case $os in
+                   -irix*)
+                       ;;
+                   *)
+                       os=-irix4
+                       ;;
+               esac
+               ;;
+       isi68 | isi)
+               basic_machine=m68k-isi
+               os=-sysv
+               ;;
+       m68knommu)
+               basic_machine=m68k-unknown
+               os=-linux
+               ;;
+       m68knommu-*)
+               basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+               os=-linux
+               ;;
+       m88k-omron*)
+               basic_machine=m88k-omron
+               ;;
+       magnum | m3230)
+               basic_machine=mips-mips
+               os=-sysv
+               ;;
+       merlin)
+               basic_machine=ns32k-utek
+               os=-sysv
+               ;;
+       microblaze*)
+               basic_machine=microblaze-xilinx
+               ;;
+       mingw64)
+               basic_machine=x86_64-pc
+               os=-mingw64
+               ;;
+       mingw32)
+               basic_machine=i686-pc
+               os=-mingw32
+               ;;
+       mingw32ce)
+               basic_machine=arm-unknown
+               os=-mingw32ce
+               ;;
+       miniframe)
+               basic_machine=m68000-convergent
+               ;;
+       *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+               basic_machine=m68k-atari
+               os=-mint
+               ;;
+       mips3*-*)
+               basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+               ;;
+       mips3*)
+               basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+               ;;
+       monitor)
+               basic_machine=m68k-rom68k
+               os=-coff
+               ;;
+       morphos)
+               basic_machine=powerpc-unknown
+               os=-morphos
+               ;;
+       msdos)
+               basic_machine=i386-pc
+               os=-msdos
+               ;;
+       ms1-*)
+               basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+               ;;
+       msys)
+               basic_machine=i686-pc
+               os=-msys
+               ;;
+       mvs)
+               basic_machine=i370-ibm
+               os=-mvs
+               ;;
+       nacl)
+               basic_machine=le32-unknown
+               os=-nacl
+               ;;
+       ncr3000)
+               basic_machine=i486-ncr
+               os=-sysv4
+               ;;
+       netbsd386)
+               basic_machine=i386-unknown
+               os=-netbsd
+               ;;
+       netwinder)
+               basic_machine=armv4l-rebel
+               os=-linux
+               ;;
+       news | news700 | news800 | news900)
+               basic_machine=m68k-sony
+               os=-newsos
+               ;;
+       news1000)
+               basic_machine=m68030-sony
+               os=-newsos
+               ;;
+       news-3600 | risc-news)
+               basic_machine=mips-sony
+               os=-newsos
+               ;;
+       necv70)
+               basic_machine=v70-nec
+               os=-sysv
+               ;;
+       next | m*-next )
+               basic_machine=m68k-next
+               case $os in
+                   -nextstep* )
+                       ;;
+                   -ns2*)
+                     os=-nextstep2
+                       ;;
+                   *)
+                     os=-nextstep3
+                       ;;
+               esac
+               ;;
+       nh3000)
+               basic_machine=m68k-harris
+               os=-cxux
+               ;;
+       nh[45]000)
+               basic_machine=m88k-harris
+               os=-cxux
+               ;;
+       nindy960)
+               basic_machine=i960-intel
+               os=-nindy
+               ;;
+       mon960)
+               basic_machine=i960-intel
+               os=-mon960
+               ;;
+       nonstopux)
+               basic_machine=mips-compaq
+               os=-nonstopux
+               ;;
+       np1)
+               basic_machine=np1-gould
+               ;;
+       neo-tandem)
+               basic_machine=neo-tandem
+               ;;
+       nse-tandem)
+               basic_machine=nse-tandem
+               ;;
+       nsr-tandem)
+               basic_machine=nsr-tandem
+               ;;
+       op50n-* | op60c-*)
+               basic_machine=hppa1.1-oki
+               os=-proelf
+               ;;
+       openrisc | openrisc-*)
+               basic_machine=or32-unknown
+               ;;
+       os400)
+               basic_machine=powerpc-ibm
+               os=-os400
+               ;;
+       OSE68000 | ose68000)
+               basic_machine=m68000-ericsson
+               os=-ose
+               ;;
+       os68k)
+               basic_machine=m68k-none
+               os=-os68k
+               ;;
+       pa-hitachi)
+               basic_machine=hppa1.1-hitachi
+               os=-hiuxwe2
+               ;;
+       paragon)
+               basic_machine=i860-intel
+               os=-osf
+               ;;
+       parisc)
+               basic_machine=hppa-unknown
+               os=-linux
+               ;;
+       parisc-*)
+               basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+               os=-linux
+               ;;
+       pbd)
+               basic_machine=sparc-tti
+               ;;
+       pbb)
+               basic_machine=m68k-tti
+               ;;
+       pc532 | pc532-*)
+               basic_machine=ns32k-pc532
+               ;;
+       pc98)
+               basic_machine=i386-pc
+               ;;
+       pc98-*)
+               basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       pentium | p5 | k5 | k6 | nexgen | viac3)
+               basic_machine=i586-pc
+               ;;
+       pentiumpro | p6 | 6x86 | athlon | athlon_*)
+               basic_machine=i686-pc
+               ;;
+       pentiumii | pentium2 | pentiumiii | pentium3)
+               basic_machine=i686-pc
+               ;;
+       pentium4)
+               basic_machine=i786-pc
+               ;;
+       pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+               basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       pentiumpro-* | p6-* | 6x86-* | athlon-*)
+               basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+               basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       pentium4-*)
+               basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       pn)
+               basic_machine=pn-gould
+               ;;
+       power)  basic_machine=power-ibm
+               ;;
+       ppc | ppcbe)    basic_machine=powerpc-unknown
+               ;;
+       ppc-* | ppcbe-*)
+               basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       ppcle | powerpclittle | ppc-le | powerpc-little)
+               basic_machine=powerpcle-unknown
+               ;;
+       ppcle-* | powerpclittle-*)
+               basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       ppc64)  basic_machine=powerpc64-unknown
+               ;;
+       ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+               basic_machine=powerpc64le-unknown
+               ;;
+       ppc64le-* | powerpc64little-*)
+               basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       ps2)
+               basic_machine=i386-ibm
+               ;;
+       pw32)
+               basic_machine=i586-unknown
+               os=-pw32
+               ;;
+       rdos | rdos64)
+               basic_machine=x86_64-pc
+               os=-rdos
+               ;;
+       rdos32)
+               basic_machine=i386-pc
+               os=-rdos
+               ;;
+       rom68k)
+               basic_machine=m68k-rom68k
+               os=-coff
+               ;;
+       rm[46]00)
+               basic_machine=mips-siemens
+               ;;
+       rtpc | rtpc-*)
+               basic_machine=romp-ibm
+               ;;
+       s390 | s390-*)
+               basic_machine=s390-ibm
+               ;;
+       s390x | s390x-*)
+               basic_machine=s390x-ibm
+               ;;
+       sa29200)
+               basic_machine=a29k-amd
+               os=-udi
+               ;;
+       sb1)
+               basic_machine=mipsisa64sb1-unknown
+               ;;
+       sb1el)
+               basic_machine=mipsisa64sb1el-unknown
+               ;;
+       sde)
+               basic_machine=mipsisa32-sde
+               os=-elf
+               ;;
+       sei)
+               basic_machine=mips-sei
+               os=-seiux
+               ;;
+       sequent)
+               basic_machine=i386-sequent
+               ;;
+       sh)
+               basic_machine=sh-hitachi
+               os=-hms
+               ;;
+       sh5el)
+               basic_machine=sh5le-unknown
+               ;;
+       sh64)
+               basic_machine=sh64-unknown
+               ;;
+       sparclite-wrs | simso-wrs)
+               basic_machine=sparclite-wrs
+               os=-vxworks
+               ;;
+       sps7)
+               basic_machine=m68k-bull
+               os=-sysv2
+               ;;
+       spur)
+               basic_machine=spur-unknown
+               ;;
+       st2000)
+               basic_machine=m68k-tandem
+               ;;
+       stratus)
+               basic_machine=i860-stratus
+               os=-sysv4
+               ;;
+       strongarm-* | thumb-*)
+               basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+               ;;
+       sun2)
+               basic_machine=m68000-sun
+               ;;
+       sun2os3)
+               basic_machine=m68000-sun
+               os=-sunos3
+               ;;
+       sun2os4)
+               basic_machine=m68000-sun
+               os=-sunos4
+               ;;
+       sun3os3)
+               basic_machine=m68k-sun
+               os=-sunos3
+               ;;
+       sun3os4)
+               basic_machine=m68k-sun
+               os=-sunos4
+               ;;
+       sun4os3)
+               basic_machine=sparc-sun
+               os=-sunos3
+               ;;
+       sun4os4)
+               basic_machine=sparc-sun
+               os=-sunos4
+               ;;
+       sun4sol2)
+               basic_machine=sparc-sun
+               os=-solaris2
+               ;;
+       sun3 | sun3-*)
+               basic_machine=m68k-sun
+               ;;
+       sun4)
+               basic_machine=sparc-sun
+               ;;
+       sun386 | sun386i | roadrunner)
+               basic_machine=i386-sun
+               ;;
+       sv1)
+               basic_machine=sv1-cray
+               os=-unicos
+               ;;
+       symmetry)
+               basic_machine=i386-sequent
+               os=-dynix
+               ;;
+       t3e)
+               basic_machine=alphaev5-cray
+               os=-unicos
+               ;;
+       t90)
+               basic_machine=t90-cray
+               os=-unicos
+               ;;
+       tile*)
+               basic_machine=$basic_machine-unknown
+               os=-linux-gnu
+               ;;
+       tx39)
+               basic_machine=mipstx39-unknown
+               ;;
+       tx39el)
+               basic_machine=mipstx39el-unknown
+               ;;
+       toad1)
+               basic_machine=pdp10-xkl
+               os=-tops20
+               ;;
+       tower | tower-32)
+               basic_machine=m68k-ncr
+               ;;
+       tpf)
+               basic_machine=s390x-ibm
+               os=-tpf
+               ;;
+       udi29k)
+               basic_machine=a29k-amd
+               os=-udi
+               ;;
+       ultra3)
+               basic_machine=a29k-nyu
+               os=-sym1
+               ;;
+       v810 | necv810)
+               basic_machine=v810-nec
+               os=-none
+               ;;
+       vaxv)
+               basic_machine=vax-dec
+               os=-sysv
+               ;;
+       vms)
+               basic_machine=vax-dec
+               os=-vms
+               ;;
+       vpp*|vx|vx-*)
+               basic_machine=f301-fujitsu
+               ;;
+       vxworks960)
+               basic_machine=i960-wrs
+               os=-vxworks
+               ;;
+       vxworks68)
+               basic_machine=m68k-wrs
+               os=-vxworks
+               ;;
+       vxworks29k)
+               basic_machine=a29k-wrs
+               os=-vxworks
+               ;;
+       w65*)
+               basic_machine=w65-wdc
+               os=-none
+               ;;
+       w89k-*)
+               basic_machine=hppa1.1-winbond
+               os=-proelf
+               ;;
+       xbox)
+               basic_machine=i686-pc
+               os=-mingw32
+               ;;
+       xps | xps100)
+               basic_machine=xps100-honeywell
+               ;;
+       xscale-* | xscalee[bl]-*)
+               basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+               ;;
+       ymp)
+               basic_machine=ymp-cray
+               os=-unicos
+               ;;
+       z8k-*-coff)
+               basic_machine=z8k-unknown
+               os=-sim
+               ;;
+       z80-*-coff)
+               basic_machine=z80-unknown
+               os=-sim
+               ;;
+       none)
+               basic_machine=none-none
+               os=-none
+               ;;
+
+# Here we handle the default manufacturer of certain CPU types.  It is in
+# some cases the only manufacturer, in others, it is the most popular.
+       w89k)
+               basic_machine=hppa1.1-winbond
+               ;;
+       op50n)
+               basic_machine=hppa1.1-oki
+               ;;
+       op60c)
+               basic_machine=hppa1.1-oki
+               ;;
+       romp)
+               basic_machine=romp-ibm
+               ;;
+       mmix)
+               basic_machine=mmix-knuth
+               ;;
+       rs6000)
+               basic_machine=rs6000-ibm
+               ;;
+       vax)
+               basic_machine=vax-dec
+               ;;
+       pdp10)
+               # there are many clones, so DEC is not a safe bet
+               basic_machine=pdp10-unknown
+               ;;
+       pdp11)
+               basic_machine=pdp11-dec
+               ;;
+       we32k)
+               basic_machine=we32k-att
+               ;;
+       sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+               basic_machine=sh-unknown
+               ;;
+       sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+               basic_machine=sparc-sun
+               ;;
+       cydra)
+               basic_machine=cydra-cydrome
+               ;;
+       orion)
+               basic_machine=orion-highlevel
+               ;;
+       orion105)
+               basic_machine=clipper-highlevel
+               ;;
+       mac | mpw | mac-mpw)
+               basic_machine=m68k-apple
+               ;;
+       pmac | pmac-mpw)
+               basic_machine=powerpc-apple
+               ;;
+       *-unknown)
+               # Make sure to match an already-canonicalized machine name.
+               ;;
+       *)
+               echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+               exit 1
+               ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+       *-digital*)
+               basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+               ;;
+       *-commodore*)
+               basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+               ;;
+       *)
+               ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+       # First match some system type aliases
+       # that might get confused with valid system types.
+       # -solaris* is a basic system type, with this one exception.
+       -auroraux)
+               os=-auroraux
+               ;;
+       -solaris1 | -solaris1.*)
+               os=`echo $os | sed -e 's|solaris1|sunos4|'`
+               ;;
+       -solaris)
+               os=-solaris2
+               ;;
+       -svr4*)
+               os=-sysv4
+               ;;
+       -unixware*)
+               os=-sysv4.2uw
+               ;;
+       -gnu/linux*)
+               os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+               ;;
+       # First accept the basic system types.
+       # The portable systems comes first.
+       # Each alternative MUST END IN A *, to match a version number.
+       # -sysv* is not here because it comes later, after sysvr4.
+       -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+             | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+             | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+             | -sym* | -kopensolaris* | -plan9* \
+             | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+             | -aos* | -aros* \
+             | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+             | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+             | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+             | -bitrig* | -openbsd* | -solidbsd* \
+             | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+             | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+             | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+             | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+             | -chorusos* | -chorusrdb* | -cegcc* \
+             | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+             | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+             | -linux-newlib* | -linux-musl* | -linux-uclibc* \
+             | -uxpv* | -beos* | -mpeix* | -udk* \
+             | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+             | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+             | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+             | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+             | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+             | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+             | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*)
+       # Remember, each alternative MUST END IN *, to match a version number.
+               ;;
+       -qnx*)
+               case $basic_machine in
+                   x86-* | i*86-*)
+                       ;;
+                   *)
+                       os=-nto$os
+                       ;;
+               esac
+               ;;
+       -nto-qnx*)
+               ;;
+       -nto*)
+               os=`echo $os | sed -e 's|nto|nto-qnx|'`
+               ;;
+       -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+             | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+             | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+               ;;
+       -mac*)
+               os=`echo $os | sed -e 's|mac|macos|'`
+               ;;
+       # Apple iOS
+       -ios*)
+               ;;
+       -linux-dietlibc)
+               os=-linux-dietlibc
+               ;;
+       -linux*)
+               os=`echo $os | sed -e 's|linux|linux-gnu|'`
+               ;;
+       -sunos5*)
+               os=`echo $os | sed -e 's|sunos5|solaris2|'`
+               ;;
+       -sunos6*)
+               os=`echo $os | sed -e 's|sunos6|solaris3|'`
+               ;;
+       -opened*)
+               os=-openedition
+               ;;
+       -os400*)
+               os=-os400
+               ;;
+       -wince*)
+               os=-wince
+               ;;
+       -osfrose*)
+               os=-osfrose
+               ;;
+       -osf*)
+               os=-osf
+               ;;
+       -utek*)
+               os=-bsd
+               ;;
+       -dynix*)
+               os=-bsd
+               ;;
+       -acis*)
+               os=-aos
+               ;;
+       -atheos*)
+               os=-atheos
+               ;;
+       -syllable*)
+               os=-syllable
+               ;;
+       -386bsd)
+               os=-bsd
+               ;;
+       -ctix* | -uts*)
+               os=-sysv
+               ;;
+       -nova*)
+               os=-rtmk-nova
+               ;;
+       -ns2 )
+               os=-nextstep2
+               ;;
+       -nsk*)
+               os=-nsk
+               ;;
+       # Preserve the version number of sinix5.
+       -sinix5.*)
+               os=`echo $os | sed -e 's|sinix|sysv|'`
+               ;;
+       -sinix*)
+               os=-sysv4
+               ;;
+       -tpf*)
+               os=-tpf
+               ;;
+       -triton*)
+               os=-sysv3
+               ;;
+       -oss*)
+               os=-sysv3
+               ;;
+       -svr4)
+               os=-sysv4
+               ;;
+       -svr3)
+               os=-sysv3
+               ;;
+       -sysvr4)
+               os=-sysv4
+               ;;
+       # This must come after -sysvr4.
+       -sysv*)
+               ;;
+       -ose*)
+               os=-ose
+               ;;
+       -es1800*)
+               os=-ose
+               ;;
+       -xenix)
+               os=-xenix
+               ;;
+       -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+               os=-mint
+               ;;
+       -aros*)
+               os=-aros
+               ;;
+       -zvmoe)
+               os=-zvmoe
+               ;;
+       -dicos*)
+               os=-dicos
+               ;;
+       -nacl*)
+               ;;
+       -none)
+               ;;
+       *)
+               # Get rid of the `-' at the beginning of $os.
+               os=`echo $os | sed 's/[^-]*-//'`
+               echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+               exit 1
+               ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system.  Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+       score-*)
+               os=-elf
+               ;;
+       spu-*)
+               os=-elf
+               ;;
+       *-acorn)
+               os=-riscix1.2
+               ;;
+       arm*-rebel)
+               os=-linux
+               ;;
+       arm*-semi)
+               os=-aout
+               ;;
+       c4x-* | tic4x-*)
+               os=-coff
+               ;;
+       c8051-*)
+               os=-elf
+               ;;
+       hexagon-*)
+               os=-elf
+               ;;
+       tic54x-*)
+               os=-coff
+               ;;
+       tic55x-*)
+               os=-coff
+               ;;
+       tic6x-*)
+               os=-coff
+               ;;
+       # This must come before the *-dec entry.
+       pdp10-*)
+               os=-tops20
+               ;;
+       pdp11-*)
+               os=-none
+               ;;
+       *-dec | vax-*)
+               os=-ultrix4.2
+               ;;
+       m68*-apollo)
+               os=-domain
+               ;;
+       i386-sun)
+               os=-sunos4.0.2
+               ;;
+       m68000-sun)
+               os=-sunos3
+               ;;
+       m68*-cisco)
+               os=-aout
+               ;;
+       mep-*)
+               os=-elf
+               ;;
+       mips*-cisco)
+               os=-elf
+               ;;
+       mips*-*)
+               os=-elf
+               ;;
+       or32-*)
+               os=-coff
+               ;;
+       *-tti)  # must be before sparc entry or we get the wrong os.
+               os=-sysv3
+               ;;
+       sparc-* | *-sun)
+               os=-sunos4.1.1
+               ;;
+       *-be)
+               os=-beos
+               ;;
+       *-haiku)
+               os=-haiku
+               ;;
+       *-ibm)
+               os=-aix
+               ;;
+       *-knuth)
+               os=-mmixware
+               ;;
+       *-wec)
+               os=-proelf
+               ;;
+       *-winbond)
+               os=-proelf
+               ;;
+       *-oki)
+               os=-proelf
+               ;;
+       *-hp)
+               os=-hpux
+               ;;
+       *-hitachi)
+               os=-hiux
+               ;;
+       i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+               os=-sysv
+               ;;
+       *-cbm)
+               os=-amigaos
+               ;;
+       *-dg)
+               os=-dgux
+               ;;
+       *-dolphin)
+               os=-sysv3
+               ;;
+       m68k-ccur)
+               os=-rtu
+               ;;
+       m88k-omron*)
+               os=-luna
+               ;;
+       *-next )
+               os=-nextstep
+               ;;
+       *-sequent)
+               os=-ptx
+               ;;
+       *-crds)
+               os=-unos
+               ;;
+       *-ns)
+               os=-genix
+               ;;
+       i370-*)
+               os=-mvs
+               ;;
+       *-next)
+               os=-nextstep3
+               ;;
+       *-gould)
+               os=-sysv
+               ;;
+       *-highlevel)
+               os=-bsd
+               ;;
+       *-encore)
+               os=-bsd
+               ;;
+       *-sgi)
+               os=-irix
+               ;;
+       *-siemens)
+               os=-sysv4
+               ;;
+       *-masscomp)
+               os=-rtu
+               ;;
+       f30[01]-fujitsu | f700-fujitsu)
+               os=-uxpv
+               ;;
+       *-rom68k)
+               os=-coff
+               ;;
+       *-*bug)
+               os=-coff
+               ;;
+       *-apple)
+               os=-macos
+               ;;
+       *-atari*)
+               os=-mint
+               ;;
+       *)
+               os=-none
+               ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer.  We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+       *-unknown)
+               case $os in
+                       -riscix*)
+                               vendor=acorn
+                               ;;
+                       -sunos*)
+                               vendor=sun
+                               ;;
+                       -cnk*|-aix*)
+                               vendor=ibm
+                               ;;
+                       -beos*)
+                               vendor=be
+                               ;;
+                       -hpux*)
+                               vendor=hp
+                               ;;
+                       -mpeix*)
+                               vendor=hp
+                               ;;
+                       -hiux*)
+                               vendor=hitachi
+                               ;;
+                       -unos*)
+                               vendor=crds
+                               ;;
+                       -dgux*)
+                               vendor=dg
+                               ;;
+                       -luna*)
+                               vendor=omron
+                               ;;
+                       -genix*)
+                               vendor=ns
+                               ;;
+                       -mvs* | -opened*)
+                               vendor=ibm
+                               ;;
+                       -os400*)
+                               vendor=ibm
+                               ;;
+                       -ptx*)
+                               vendor=sequent
+                               ;;
+                       -tpf*)
+                               vendor=ibm
+                               ;;
+                       -vxsim* | -vxworks* | -windiss*)
+                               vendor=wrs
+                               ;;
+                       -aux*)
+                               vendor=apple
+                               ;;
+                       -hms*)
+                               vendor=hitachi
+                               ;;
+                       -mpw* | -macos*)
+                               vendor=apple
+                               ;;
+                       -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+                               vendor=atari
+                               ;;
+                       -vos*)
+                               vendor=stratus
+                               ;;
+               esac
+               basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+               ;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/src/jemalloc/build-aux/install-sh b/src/jemalloc/build-aux/install-sh
new file mode 100755 (executable)
index 0000000..ebc6691
--- /dev/null
@@ -0,0 +1,250 @@
+#! /bin/sh
+#
+# install - install a program, script, or datafile
+# This comes from X11R5 (mit/util/scripts/install.sh).
+#
+# Copyright 1991 by the Massachusetts Institute of Technology
+#
+# Permission to use, copy, modify, distribute, and sell this software and its
+# documentation for any purpose is hereby granted without fee, provided that
+# the above copyright notice appear in all copies and that both that
+# copyright notice and this permission notice appear in supporting
+# documentation, and that the name of M.I.T. not be used in advertising or
+# publicity pertaining to distribution of the software without specific,
+# written prior permission.  M.I.T. makes no representations about the
+# suitability of this software for any purpose.  It is provided "as is"
+# without express or implied warranty.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch.  It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+transformbasename=""
+transform_arg=""
+instcmd="$mvprog"
+chmodcmd="$chmodprog 0755"
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=""
+dst=""
+dir_arg=""
+
+while [ x"$1" != x ]; do
+    case $1 in
+       -c) instcmd="$cpprog"
+           shift
+           continue;;
+
+       -d) dir_arg=true
+           shift
+           continue;;
+
+       -m) chmodcmd="$chmodprog $2"
+           shift
+           shift
+           continue;;
+
+       -o) chowncmd="$chownprog $2"
+           shift
+           shift
+           continue;;
+
+       -g) chgrpcmd="$chgrpprog $2"
+           shift
+           shift
+           continue;;
+
+       -s) stripcmd="$stripprog"
+           shift
+           continue;;
+
+       -t=*) transformarg=`echo $1 | sed 's/-t=//'`
+           shift
+           continue;;
+
+       -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
+           shift
+           continue;;
+
+       *)  if [ x"$src" = x ]
+           then
+               src=$1
+           else
+               # this colon is to work around a 386BSD /bin/sh bug
+               :
+               dst=$1
+           fi
+           shift
+           continue;;
+    esac
+done
+
+if [ x"$src" = x ]
+then
+       echo "install:  no input file specified"
+       exit 1
+else
+       true
+fi
+
+if [ x"$dir_arg" != x ]; then
+       dst=$src
+       src=""
+       
+       if [ -d $dst ]; then
+               instcmd=:
+       else
+               instcmd=mkdir
+       fi
+else
+
+# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
+# might cause directories to be created, which would be especially bad 
+# if $src (and thus $dsttmp) contains '*'.
+
+       if [ -f $src -o -d $src ]
+       then
+               true
+       else
+               echo "install:  $src does not exist"
+               exit 1
+       fi
+       
+       if [ x"$dst" = x ]
+       then
+               echo "install:  no destination specified"
+               exit 1
+       else
+               true
+       fi
+
+# If destination is a directory, append the input filename; if your system
+# does not like double slashes in filenames, you may need to add some logic
+
+       if [ -d $dst ]
+       then
+               dst="$dst"/`basename $src`
+       else
+               true
+       fi
+fi
+
+## this sed command emulates the dirname command
+dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
+
+# Make sure that the destination directory exists.
+#  this part is taken from Noah Friedman's mkinstalldirs script
+
+# Skip lots of stat calls in the usual case.
+if [ ! -d "$dstdir" ]; then
+defaultIFS='   
+'
+IFS="${IFS-${defaultIFS}}"
+
+oIFS="${IFS}"
+# Some sh's can't handle IFS=/ for some reason.
+IFS='%'
+set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
+IFS="${oIFS}"
+
+pathcomp=''
+
+while [ $# -ne 0 ] ; do
+       pathcomp="${pathcomp}${1}"
+       shift
+
+       if [ ! -d "${pathcomp}" ] ;
+        then
+               $mkdirprog "${pathcomp}"
+       else
+               true
+       fi
+
+       pathcomp="${pathcomp}/"
+done
+fi
+
+if [ x"$dir_arg" != x ]
+then
+       $doit $instcmd $dst &&
+
+       if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
+       if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
+       if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
+       if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
+else
+
+# If we're going to rename the final executable, determine the name now.
+
+       if [ x"$transformarg" = x ] 
+       then
+               dstfile=`basename $dst`
+       else
+               dstfile=`basename $dst $transformbasename | 
+                       sed $transformarg`$transformbasename
+       fi
+
+# don't allow the sed command to completely eliminate the filename
+
+       if [ x"$dstfile" = x ] 
+       then
+               dstfile=`basename $dst`
+       else
+               true
+       fi
+
+# Make a temp file name in the proper directory.
+
+       dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+
+       $doit $instcmd $src $dsttmp &&
+
+       trap "rm -f ${dsttmp}" 0 &&
+
+# and set any options; do chmod last to preserve setuid bits
+
+# If any of these fail, we abort the whole thing.  If we want to
+# ignore errors from any of these, just make sure not to ignore
+# errors from the above "$doit $instcmd $src $dsttmp" command.
+
+       if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
+       if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
+       if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
+       if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
+
+# Now rename the file to the real destination.
+
+       $doit $rmcmd -f $dstdir/$dstfile &&
+       $doit $mvcmd $dsttmp $dstdir/$dstfile 
+
+fi &&
+
+
+exit 0
diff --git a/src/jemalloc/config.guess b/src/jemalloc/config.guess
deleted file mode 100755 (executable)
index 1f5c50c..0000000
+++ /dev/null
@@ -1,1420 +0,0 @@
-#! /bin/sh
-# Attempt to guess a canonical system name.
-#   Copyright 1992-2014 Free Software Foundation, Inc.
-
-timestamp='2014-03-23'
-
-# This file is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that
-# program.  This Exception is an additional permission under section 7
-# of the GNU General Public License, version 3 ("GPLv3").
-#
-# Originally written by Per Bothner.
-#
-# You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
-#
-# Please send patches with a ChangeLog entry to config-patches@gnu.org.
-
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION]
-
-Output the configuration name of the system \`$me' is run on.
-
-Operation modes:
-  -h, --help         print this help, then exit
-  -t, --time-stamp   print date of last modification, then exit
-  -v, --version      print version number, then exit
-
-Report bugs and patches to <config-patches@gnu.org>."
-
-version="\
-GNU config.guess ($timestamp)
-
-Originally written by Per Bothner.
-Copyright 1992-2014 Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions.  There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
-  case $1 in
-    --time-stamp | --time* | -t )
-       echo "$timestamp" ; exit ;;
-    --version | -v )
-       echo "$version" ; exit ;;
-    --help | --h* | -h )
-       echo "$usage"; exit ;;
-    -- )     # Stop option processing
-       shift; break ;;
-    - )        # Use stdin as input.
-       break ;;
-    -* )
-       echo "$me: invalid option $1$help" >&2
-       exit 1 ;;
-    * )
-       break ;;
-  esac
-done
-
-if test $# != 0; then
-  echo "$me: too many arguments$help" >&2
-  exit 1
-fi
-
-trap 'exit 1' 1 2 15
-
-# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
-# compiler to aid in system detection is discouraged as it requires
-# temporary files to be created and, as you can see below, it is a
-# headache to deal with in a portable fashion.
-
-# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
-# use `HOST_CC' if defined, but it is deprecated.
-
-# Portable tmp directory creation inspired by the Autoconf team.
-
-set_cc_for_build='
-trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
-trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
-: ${TMPDIR=/tmp} ;
- { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
- { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
- { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
- { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
-dummy=$tmp/dummy ;
-tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
-case $CC_FOR_BUILD,$HOST_CC,$CC in
- ,,)    echo "int x;" > $dummy.c ;
-       for c in cc gcc c89 c99 ; do
-         if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
-            CC_FOR_BUILD="$c"; break ;
-         fi ;
-       done ;
-       if test x"$CC_FOR_BUILD" = x ; then
-         CC_FOR_BUILD=no_compiler_found ;
-       fi
-       ;;
- ,,*)   CC_FOR_BUILD=$CC ;;
- ,*,*)  CC_FOR_BUILD=$HOST_CC ;;
-esac ; set_cc_for_build= ;'
-
-# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
-# (ghazi@noc.rutgers.edu 1994-08-24)
-if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
-       PATH=$PATH:/.attbin ; export PATH
-fi
-
-UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
-UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
-UNAME_SYSTEM=`(uname -s) 2>/dev/null`  || UNAME_SYSTEM=unknown
-UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
-
-case "${UNAME_SYSTEM}" in
-Linux|GNU|GNU/*)
-       # If the system lacks a compiler, then just pick glibc.
-       # We could probably try harder.
-       LIBC=gnu
-
-       eval $set_cc_for_build
-       cat <<-EOF > $dummy.c
-       #include <features.h>
-       #if defined(__UCLIBC__)
-       LIBC=uclibc
-       #elif defined(__dietlibc__)
-       LIBC=dietlibc
-       #else
-       LIBC=gnu
-       #endif
-       EOF
-       eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`
-       ;;
-esac
-
-# Note: order is significant - the case branches are not exclusive.
-
-case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
-    *:NetBSD:*:*)
-       # NetBSD (nbsd) targets should (where applicable) match one or
-       # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
-       # *-*-netbsdecoff* and *-*-netbsd*.  For targets that recently
-       # switched to ELF, *-*-netbsd* would select the old
-       # object file format.  This provides both forward
-       # compatibility and a consistent mechanism for selecting the
-       # object file format.
-       #
-       # Note: NetBSD doesn't particularly care about the vendor
-       # portion of the name.  We always set it to "unknown".
-       sysctl="sysctl -n hw.machine_arch"
-       UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
-           /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
-       case "${UNAME_MACHINE_ARCH}" in
-           armeb) machine=armeb-unknown ;;
-           arm*) machine=arm-unknown ;;
-           sh3el) machine=shl-unknown ;;
-           sh3eb) machine=sh-unknown ;;
-           sh5el) machine=sh5le-unknown ;;
-           *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
-       esac
-       # The Operating System including object format, if it has switched
-       # to ELF recently, or will in the future.
-       case "${UNAME_MACHINE_ARCH}" in
-           arm*|i386|m68k|ns32k|sh3*|sparc|vax)
-               eval $set_cc_for_build
-               if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
-                       | grep -q __ELF__
-               then
-                   # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
-                   # Return netbsd for either.  FIX?
-                   os=netbsd
-               else
-                   os=netbsdelf
-               fi
-               ;;
-           *)
-               os=netbsd
-               ;;
-       esac
-       # The OS release
-       # Debian GNU/NetBSD machines have a different userland, and
-       # thus, need a distinct triplet. However, they do not need
-       # kernel version information, so it can be replaced with a
-       # suitable tag, in the style of linux-gnu.
-       case "${UNAME_VERSION}" in
-           Debian*)
-               release='-gnu'
-               ;;
-           *)
-               release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
-               ;;
-       esac
-       # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
-       # contains redundant information, the shorter form:
-       # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
-       echo "${machine}-${os}${release}"
-       exit ;;
-    *:Bitrig:*:*)
-       UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
-       echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
-       exit ;;
-    *:OpenBSD:*:*)
-       UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
-       echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
-       exit ;;
-    *:ekkoBSD:*:*)
-       echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
-       exit ;;
-    *:SolidBSD:*:*)
-       echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
-       exit ;;
-    macppc:MirBSD:*:*)
-       echo powerpc-unknown-mirbsd${UNAME_RELEASE}
-       exit ;;
-    *:MirBSD:*:*)
-       echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
-       exit ;;
-    alpha:OSF1:*:*)
-       case $UNAME_RELEASE in
-       *4.0)
-               UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
-               ;;
-       *5.*)
-               UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
-               ;;
-       esac
-       # According to Compaq, /usr/sbin/psrinfo has been available on
-       # OSF/1 and Tru64 systems produced since 1995.  I hope that
-       # covers most systems running today.  This code pipes the CPU
-       # types through head -n 1, so we only detect the type of CPU 0.
-       ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^  The alpha \(.*\) processor.*$/\1/p' | head -n 1`
-       case "$ALPHA_CPU_TYPE" in
-           "EV4 (21064)")
-               UNAME_MACHINE="alpha" ;;
-           "EV4.5 (21064)")
-               UNAME_MACHINE="alpha" ;;
-           "LCA4 (21066/21068)")
-               UNAME_MACHINE="alpha" ;;
-           "EV5 (21164)")
-               UNAME_MACHINE="alphaev5" ;;
-           "EV5.6 (21164A)")
-               UNAME_MACHINE="alphaev56" ;;
-           "EV5.6 (21164PC)")
-               UNAME_MACHINE="alphapca56" ;;
-           "EV5.7 (21164PC)")
-               UNAME_MACHINE="alphapca57" ;;
-           "EV6 (21264)")
-               UNAME_MACHINE="alphaev6" ;;
-           "EV6.7 (21264A)")
-               UNAME_MACHINE="alphaev67" ;;
-           "EV6.8CB (21264C)")
-               UNAME_MACHINE="alphaev68" ;;
-           "EV6.8AL (21264B)")
-               UNAME_MACHINE="alphaev68" ;;
-           "EV6.8CX (21264D)")
-               UNAME_MACHINE="alphaev68" ;;
-           "EV6.9A (21264/EV69A)")
-               UNAME_MACHINE="alphaev69" ;;
-           "EV7 (21364)")
-               UNAME_MACHINE="alphaev7" ;;
-           "EV7.9 (21364A)")
-               UNAME_MACHINE="alphaev79" ;;
-       esac
-       # A Pn.n version is a patched version.
-       # A Vn.n version is a released version.
-       # A Tn.n version is a released field test version.
-       # A Xn.n version is an unreleased experimental baselevel.
-       # 1.2 uses "1.2" for uname -r.
-       echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
-       # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
-       exitcode=$?
-       trap '' 0
-       exit $exitcode ;;
-    Alpha\ *:Windows_NT*:*)
-       # How do we know it's Interix rather than the generic POSIX subsystem?
-       # Should we change UNAME_MACHINE based on the output of uname instead
-       # of the specific Alpha model?
-       echo alpha-pc-interix
-       exit ;;
-    21064:Windows_NT:50:3)
-       echo alpha-dec-winnt3.5
-       exit ;;
-    Amiga*:UNIX_System_V:4.0:*)
-       echo m68k-unknown-sysv4
-       exit ;;
-    *:[Aa]miga[Oo][Ss]:*:*)
-       echo ${UNAME_MACHINE}-unknown-amigaos
-       exit ;;
-    *:[Mm]orph[Oo][Ss]:*:*)
-       echo ${UNAME_MACHINE}-unknown-morphos
-       exit ;;
-    *:OS/390:*:*)
-       echo i370-ibm-openedition
-       exit ;;
-    *:z/VM:*:*)
-       echo s390-ibm-zvmoe
-       exit ;;
-    *:OS400:*:*)
-       echo powerpc-ibm-os400
-       exit ;;
-    arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
-       echo arm-acorn-riscix${UNAME_RELEASE}
-       exit ;;
-    arm*:riscos:*:*|arm*:RISCOS:*:*)
-       echo arm-unknown-riscos
-       exit ;;
-    SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
-       echo hppa1.1-hitachi-hiuxmpp
-       exit ;;
-    Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
-       # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
-       if test "`(/bin/universe) 2>/dev/null`" = att ; then
-               echo pyramid-pyramid-sysv3
-       else
-               echo pyramid-pyramid-bsd
-       fi
-       exit ;;
-    NILE*:*:*:dcosx)
-       echo pyramid-pyramid-svr4
-       exit ;;
-    DRS?6000:unix:4.0:6*)
-       echo sparc-icl-nx6
-       exit ;;
-    DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
-       case `/usr/bin/uname -p` in
-           sparc) echo sparc-icl-nx7; exit ;;
-       esac ;;
-    s390x:SunOS:*:*)
-       echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-       exit ;;
-    sun4H:SunOS:5.*:*)
-       echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-       exit ;;
-    sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
-       echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-       exit ;;
-    i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
-       echo i386-pc-auroraux${UNAME_RELEASE}
-       exit ;;
-    i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
-       eval $set_cc_for_build
-       SUN_ARCH="i386"
-       # If there is a compiler, see if it is configured for 64-bit objects.
-       # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
-       # This test works for both compilers.
-       if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
-           if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
-               (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
-               grep IS_64BIT_ARCH >/dev/null
-           then
-               SUN_ARCH="x86_64"
-           fi
-       fi
-       echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-       exit ;;
-    sun4*:SunOS:6*:*)
-       # According to config.sub, this is the proper way to canonicalize
-       # SunOS6.  Hard to guess exactly what SunOS6 will be like, but
-       # it's likely to be more like Solaris than SunOS4.
-       echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-       exit ;;
-    sun4*:SunOS:*:*)
-       case "`/usr/bin/arch -k`" in
-           Series*|S4*)
-               UNAME_RELEASE=`uname -v`
-               ;;
-       esac
-       # Japanese Language versions have a version number like `4.1.3-JL'.
-       echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
-       exit ;;
-    sun3*:SunOS:*:*)
-       echo m68k-sun-sunos${UNAME_RELEASE}
-       exit ;;
-    sun*:*:4.2BSD:*)
-       UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
-       test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
-       case "`/bin/arch`" in
-           sun3)
-               echo m68k-sun-sunos${UNAME_RELEASE}
-               ;;
-           sun4)
-               echo sparc-sun-sunos${UNAME_RELEASE}
-               ;;
-       esac
-       exit ;;
-    aushp:SunOS:*:*)
-       echo sparc-auspex-sunos${UNAME_RELEASE}
-       exit ;;
-    # The situation for MiNT is a little confusing.  The machine name
-    # can be virtually everything (everything which is not
-    # "atarist" or "atariste" at least should have a processor
-    # > m68000).  The system name ranges from "MiNT" over "FreeMiNT"
-    # to the lowercase version "mint" (or "freemint").  Finally
-    # the system name "TOS" denotes a system which is actually not
-    # MiNT.  But MiNT is downward compatible to TOS, so this should
-    # be no problem.
-    atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
-       echo m68k-atari-mint${UNAME_RELEASE}
-       exit ;;
-    atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
-       echo m68k-atari-mint${UNAME_RELEASE}
-       exit ;;
-    *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
-       echo m68k-atari-mint${UNAME_RELEASE}
-       exit ;;
-    milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
-       echo m68k-milan-mint${UNAME_RELEASE}
-       exit ;;
-    hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
-       echo m68k-hades-mint${UNAME_RELEASE}
-       exit ;;
-    *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
-       echo m68k-unknown-mint${UNAME_RELEASE}
-       exit ;;
-    m68k:machten:*:*)
-       echo m68k-apple-machten${UNAME_RELEASE}
-       exit ;;
-    powerpc:machten:*:*)
-       echo powerpc-apple-machten${UNAME_RELEASE}
-       exit ;;
-    RISC*:Mach:*:*)
-       echo mips-dec-mach_bsd4.3
-       exit ;;
-    RISC*:ULTRIX:*:*)
-       echo mips-dec-ultrix${UNAME_RELEASE}
-       exit ;;
-    VAX*:ULTRIX*:*:*)
-       echo vax-dec-ultrix${UNAME_RELEASE}
-       exit ;;
-    2020:CLIX:*:* | 2430:CLIX:*:*)
-       echo clipper-intergraph-clix${UNAME_RELEASE}
-       exit ;;
-    mips:*:*:UMIPS | mips:*:*:RISCos)
-       eval $set_cc_for_build
-       sed 's/^        //' << EOF >$dummy.c
-#ifdef __cplusplus
-#include <stdio.h>  /* for printf() prototype */
-       int main (int argc, char *argv[]) {
-#else
-       int main (argc, argv) int argc; char *argv[]; {
-#endif
-       #if defined (host_mips) && defined (MIPSEB)
-       #if defined (SYSTYPE_SYSV)
-         printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
-       #endif
-       #if defined (SYSTYPE_SVR4)
-         printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
-       #endif
-       #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
-         printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
-       #endif
-       #endif
-         exit (-1);
-       }
-EOF
-       $CC_FOR_BUILD -o $dummy $dummy.c &&
-         dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
-         SYSTEM_NAME=`$dummy $dummyarg` &&
-           { echo "$SYSTEM_NAME"; exit; }
-       echo mips-mips-riscos${UNAME_RELEASE}
-       exit ;;
-    Motorola:PowerMAX_OS:*:*)
-       echo powerpc-motorola-powermax
-       exit ;;
-    Motorola:*:4.3:PL8-*)
-       echo powerpc-harris-powermax
-       exit ;;
-    Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
-       echo powerpc-harris-powermax
-       exit ;;
-    Night_Hawk:Power_UNIX:*:*)
-       echo powerpc-harris-powerunix
-       exit ;;
-    m88k:CX/UX:7*:*)
-       echo m88k-harris-cxux7
-       exit ;;
-    m88k:*:4*:R4*)
-       echo m88k-motorola-sysv4
-       exit ;;
-    m88k:*:3*:R3*)
-       echo m88k-motorola-sysv3
-       exit ;;
-    AViiON:dgux:*:*)
-       # DG/UX returns AViiON for all architectures
-       UNAME_PROCESSOR=`/usr/bin/uname -p`
-       if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
-       then
-           if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
-              [ ${TARGET_BINARY_INTERFACE}x = x ]
-           then
-               echo m88k-dg-dgux${UNAME_RELEASE}
-           else
-               echo m88k-dg-dguxbcs${UNAME_RELEASE}
-           fi
-       else
-           echo i586-dg-dgux${UNAME_RELEASE}
-       fi
-       exit ;;
-    M88*:DolphinOS:*:*)        # DolphinOS (SVR3)
-       echo m88k-dolphin-sysv3
-       exit ;;
-    M88*:*:R3*:*)
-       # Delta 88k system running SVR3
-       echo m88k-motorola-sysv3
-       exit ;;
-    XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
-       echo m88k-tektronix-sysv3
-       exit ;;
-    Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
-       echo m68k-tektronix-bsd
-       exit ;;
-    *:IRIX*:*:*)
-       echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
-       exit ;;
-    ????????:AIX?:[12].1:2)   # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
-       echo romp-ibm-aix     # uname -m gives an 8 hex-code CPU id
-       exit ;;               # Note that: echo "'`uname -s`'" gives 'AIX '
-    i*86:AIX:*:*)
-       echo i386-ibm-aix
-       exit ;;
-    ia64:AIX:*:*)
-       if [ -x /usr/bin/oslevel ] ; then
-               IBM_REV=`/usr/bin/oslevel`
-       else
-               IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
-       fi
-       echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
-       exit ;;
-    *:AIX:2:3)
-       if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
-               eval $set_cc_for_build
-               sed 's/^                //' << EOF >$dummy.c
-               #include <sys/systemcfg.h>
-
-               main()
-                       {
-                       if (!__power_pc())
-                               exit(1);
-                       puts("powerpc-ibm-aix3.2.5");
-                       exit(0);
-                       }
-EOF
-               if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
-               then
-                       echo "$SYSTEM_NAME"
-               else
-                       echo rs6000-ibm-aix3.2.5
-               fi
-       elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
-               echo rs6000-ibm-aix3.2.4
-       else
-               echo rs6000-ibm-aix3.2
-       fi
-       exit ;;
-    *:AIX:*:[4567])
-       IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
-       if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
-               IBM_ARCH=rs6000
-       else
-               IBM_ARCH=powerpc
-       fi
-       if [ -x /usr/bin/oslevel ] ; then
-               IBM_REV=`/usr/bin/oslevel`
-       else
-               IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
-       fi
-       echo ${IBM_ARCH}-ibm-aix${IBM_REV}
-       exit ;;
-    *:AIX:*:*)
-       echo rs6000-ibm-aix
-       exit ;;
-    ibmrt:4.4BSD:*|romp-ibm:BSD:*)
-       echo romp-ibm-bsd4.4
-       exit ;;
-    ibmrt:*BSD:*|romp-ibm:BSD:*)            # covers RT/PC BSD and
-       echo romp-ibm-bsd${UNAME_RELEASE}   # 4.3 with uname added to
-       exit ;;                             # report: romp-ibm BSD 4.3
-    *:BOSX:*:*)
-       echo rs6000-bull-bosx
-       exit ;;
-    DPX/2?00:B.O.S.:*:*)
-       echo m68k-bull-sysv3
-       exit ;;
-    9000/[34]??:4.3bsd:1.*:*)
-       echo m68k-hp-bsd
-       exit ;;
-    hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
-       echo m68k-hp-bsd4.4
-       exit ;;
-    9000/[34678]??:HP-UX:*:*)
-       HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
-       case "${UNAME_MACHINE}" in
-           9000/31? )            HP_ARCH=m68000 ;;
-           9000/[34]?? )         HP_ARCH=m68k ;;
-           9000/[678][0-9][0-9])
-               if [ -x /usr/bin/getconf ]; then
-                   sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
-                   sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
-                   case "${sc_cpu_version}" in
-                     523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
-                     528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
-                     532)                      # CPU_PA_RISC2_0
-                       case "${sc_kernel_bits}" in
-                         32) HP_ARCH="hppa2.0n" ;;
-                         64) HP_ARCH="hppa2.0w" ;;
-                         '') HP_ARCH="hppa2.0" ;;   # HP-UX 10.20
-                       esac ;;
-                   esac
-               fi
-               if [ "${HP_ARCH}" = "" ]; then
-                   eval $set_cc_for_build
-                   sed 's/^            //' << EOF >$dummy.c
-
-               #define _HPUX_SOURCE
-               #include <stdlib.h>
-               #include <unistd.h>
-
-               int main ()
-               {
-               #if defined(_SC_KERNEL_BITS)
-                   long bits = sysconf(_SC_KERNEL_BITS);
-               #endif
-                   long cpu  = sysconf (_SC_CPU_VERSION);
-
-                   switch (cpu)
-                       {
-                       case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
-                       case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
-                       case CPU_PA_RISC2_0:
-               #if defined(_SC_KERNEL_BITS)
-                           switch (bits)
-                               {
-                               case 64: puts ("hppa2.0w"); break;
-                               case 32: puts ("hppa2.0n"); break;
-                               default: puts ("hppa2.0"); break;
-                               } break;
-               #else  /* !defined(_SC_KERNEL_BITS) */
-                           puts ("hppa2.0"); break;
-               #endif
-                       default: puts ("hppa1.0"); break;
-                       }
-                   exit (0);
-               }
-EOF
-                   (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
-                   test -z "$HP_ARCH" && HP_ARCH=hppa
-               fi ;;
-       esac
-       if [ ${HP_ARCH} = "hppa2.0w" ]
-       then
-           eval $set_cc_for_build
-
-           # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
-           # 32-bit code.  hppa64-hp-hpux* has the same kernel and a compiler
-           # generating 64-bit code.  GNU and HP use different nomenclature:
-           #
-           # $ CC_FOR_BUILD=cc ./config.guess
-           # => hppa2.0w-hp-hpux11.23
-           # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
-           # => hppa64-hp-hpux11.23
-
-           if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
-               grep -q __LP64__
-           then
-               HP_ARCH="hppa2.0w"
-           else
-               HP_ARCH="hppa64"
-           fi
-       fi
-       echo ${HP_ARCH}-hp-hpux${HPUX_REV}
-       exit ;;
-    ia64:HP-UX:*:*)
-       HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
-       echo ia64-hp-hpux${HPUX_REV}
-       exit ;;
-    3050*:HI-UX:*:*)
-       eval $set_cc_for_build
-       sed 's/^        //' << EOF >$dummy.c
-       #include <unistd.h>
-       int
-       main ()
-       {
-         long cpu = sysconf (_SC_CPU_VERSION);
-         /* The order matters, because CPU_IS_HP_MC68K erroneously returns
-            true for CPU_PA_RISC1_0.  CPU_IS_PA_RISC returns correct
-            results, however.  */
-         if (CPU_IS_PA_RISC (cpu))
-           {
-             switch (cpu)
-               {
-                 case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
-                 case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
-                 case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
-                 default: puts ("hppa-hitachi-hiuxwe2"); break;
-               }
-           }
-         else if (CPU_IS_HP_MC68K (cpu))
-           puts ("m68k-hitachi-hiuxwe2");
-         else puts ("unknown-hitachi-hiuxwe2");
-         exit (0);
-       }
-EOF
-       $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
-               { echo "$SYSTEM_NAME"; exit; }
-       echo unknown-hitachi-hiuxwe2
-       exit ;;
-    9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
-       echo hppa1.1-hp-bsd
-       exit ;;
-    9000/8??:4.3bsd:*:*)
-       echo hppa1.0-hp-bsd
-       exit ;;
-    *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
-       echo hppa1.0-hp-mpeix
-       exit ;;
-    hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
-       echo hppa1.1-hp-osf
-       exit ;;
-    hp8??:OSF1:*:*)
-       echo hppa1.0-hp-osf
-       exit ;;
-    i*86:OSF1:*:*)
-       if [ -x /usr/sbin/sysversion ] ; then
-           echo ${UNAME_MACHINE}-unknown-osf1mk
-       else
-           echo ${UNAME_MACHINE}-unknown-osf1
-       fi
-       exit ;;
-    parisc*:Lites*:*:*)
-       echo hppa1.1-hp-lites
-       exit ;;
-    C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
-       echo c1-convex-bsd
-       exit ;;
-    C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
-       if getsysinfo -f scalar_acc
-       then echo c32-convex-bsd
-       else echo c2-convex-bsd
-       fi
-       exit ;;
-    C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
-       echo c34-convex-bsd
-       exit ;;
-    C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
-       echo c38-convex-bsd
-       exit ;;
-    C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
-       echo c4-convex-bsd
-       exit ;;
-    CRAY*Y-MP:*:*:*)
-       echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-       exit ;;
-    CRAY*[A-Z]90:*:*:*)
-       echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
-       | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
-             -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
-             -e 's/\.[^.]*$/.X/'
-       exit ;;
-    CRAY*TS:*:*:*)
-       echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-       exit ;;
-    CRAY*T3E:*:*:*)
-       echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-       exit ;;
-    CRAY*SV1:*:*:*)
-       echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-       exit ;;
-    *:UNICOS/mp:*:*)
-       echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-       exit ;;
-    F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
-       FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
-       FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
-       FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
-       echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
-       exit ;;
-    5000:UNIX_System_V:4.*:*)
-       FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
-       FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
-       echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
-       exit ;;
-    i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
-       echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
-       exit ;;
-    sparc*:BSD/OS:*:*)
-       echo sparc-unknown-bsdi${UNAME_RELEASE}
-       exit ;;
-    *:BSD/OS:*:*)
-       echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
-       exit ;;
-    *:FreeBSD:*:*)
-       UNAME_PROCESSOR=`/usr/bin/uname -p`
-       case ${UNAME_PROCESSOR} in
-           amd64)
-               echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
-           *)
-               echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
-       esac
-       exit ;;
-    i*:CYGWIN*:*)
-       echo ${UNAME_MACHINE}-pc-cygwin
-       exit ;;
-    *:MINGW64*:*)
-       echo ${UNAME_MACHINE}-pc-mingw64
-       exit ;;
-    *:MINGW*:*)
-       echo ${UNAME_MACHINE}-pc-mingw32
-       exit ;;
-    *:MSYS*:*)
-       echo ${UNAME_MACHINE}-pc-msys
-       exit ;;
-    i*:windows32*:*)
-       # uname -m includes "-pc" on this system.
-       echo ${UNAME_MACHINE}-mingw32
-       exit ;;
-    i*:PW*:*)
-       echo ${UNAME_MACHINE}-pc-pw32
-       exit ;;
-    *:Interix*:*)
-       case ${UNAME_MACHINE} in
-           x86)
-               echo i586-pc-interix${UNAME_RELEASE}
-               exit ;;
-           authenticamd | genuineintel | EM64T)
-               echo x86_64-unknown-interix${UNAME_RELEASE}
-               exit ;;
-           IA64)
-               echo ia64-unknown-interix${UNAME_RELEASE}
-               exit ;;
-       esac ;;
-    [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
-       echo i${UNAME_MACHINE}-pc-mks
-       exit ;;
-    8664:Windows_NT:*)
-       echo x86_64-pc-mks
-       exit ;;
-    i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
-       # How do we know it's Interix rather than the generic POSIX subsystem?
-       # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
-       # UNAME_MACHINE based on the output of uname instead of i386?
-       echo i586-pc-interix
-       exit ;;
-    i*:UWIN*:*)
-       echo ${UNAME_MACHINE}-pc-uwin
-       exit ;;
-    amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
-       echo x86_64-unknown-cygwin
-       exit ;;
-    p*:CYGWIN*:*)
-       echo powerpcle-unknown-cygwin
-       exit ;;
-    prep*:SunOS:5.*:*)
-       echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-       exit ;;
-    *:GNU:*:*)
-       # the GNU system
-       echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
-       exit ;;
-    *:GNU/*:*:*)
-       # other systems with GNU libc and userland
-       echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
-       exit ;;
-    i*86:Minix:*:*)
-       echo ${UNAME_MACHINE}-pc-minix
-       exit ;;
-    aarch64:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    aarch64_be:Linux:*:*)
-       UNAME_MACHINE=aarch64_be
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    alpha:Linux:*:*)
-       case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
-         EV5)   UNAME_MACHINE=alphaev5 ;;
-         EV56)  UNAME_MACHINE=alphaev56 ;;
-         PCA56) UNAME_MACHINE=alphapca56 ;;
-         PCA57) UNAME_MACHINE=alphapca56 ;;
-         EV6)   UNAME_MACHINE=alphaev6 ;;
-         EV67)  UNAME_MACHINE=alphaev67 ;;
-         EV68*) UNAME_MACHINE=alphaev68 ;;
-       esac
-       objdump --private-headers /bin/sh | grep -q ld.so.1
-       if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    arc:Linux:*:* | arceb:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    arm*:Linux:*:*)
-       eval $set_cc_for_build
-       if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
-           | grep -q __ARM_EABI__
-       then
-           echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       else
-           if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
-               | grep -q __ARM_PCS_VFP
-           then
-               echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
-           else
-               echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
-           fi
-       fi
-       exit ;;
-    avr32*:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    cris:Linux:*:*)
-       echo ${UNAME_MACHINE}-axis-linux-${LIBC}
-       exit ;;
-    crisv32:Linux:*:*)
-       echo ${UNAME_MACHINE}-axis-linux-${LIBC}
-       exit ;;
-    frv:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    hexagon:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    i*86:Linux:*:*)
-       echo ${UNAME_MACHINE}-pc-linux-${LIBC}
-       exit ;;
-    ia64:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    m32r*:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    m68*:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    mips:Linux:*:* | mips64:Linux:*:*)
-       eval $set_cc_for_build
-       sed 's/^        //' << EOF >$dummy.c
-       #undef CPU
-       #undef ${UNAME_MACHINE}
-       #undef ${UNAME_MACHINE}el
-       #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
-       CPU=${UNAME_MACHINE}el
-       #else
-       #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
-       CPU=${UNAME_MACHINE}
-       #else
-       CPU=
-       #endif
-       #endif
-EOF
-       eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
-       test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
-       ;;
-    openrisc*:Linux:*:*)
-       echo or1k-unknown-linux-${LIBC}
-       exit ;;
-    or32:Linux:*:* | or1k*:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    padre:Linux:*:*)
-       echo sparc-unknown-linux-${LIBC}
-       exit ;;
-    parisc64:Linux:*:* | hppa64:Linux:*:*)
-       echo hppa64-unknown-linux-${LIBC}
-       exit ;;
-    parisc:Linux:*:* | hppa:Linux:*:*)
-       # Look for CPU level
-       case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
-         PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
-         PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
-         *)    echo hppa-unknown-linux-${LIBC} ;;
-       esac
-       exit ;;
-    ppc64:Linux:*:*)
-       echo powerpc64-unknown-linux-${LIBC}
-       exit ;;
-    ppc:Linux:*:*)
-       echo powerpc-unknown-linux-${LIBC}
-       exit ;;
-    ppc64le:Linux:*:*)
-       echo powerpc64le-unknown-linux-${LIBC}
-       exit ;;
-    ppcle:Linux:*:*)
-       echo powerpcle-unknown-linux-${LIBC}
-       exit ;;
-    s390:Linux:*:* | s390x:Linux:*:*)
-       echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
-       exit ;;
-    sh64*:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    sh*:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    sparc:Linux:*:* | sparc64:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    tile*:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    vax:Linux:*:*)
-       echo ${UNAME_MACHINE}-dec-linux-${LIBC}
-       exit ;;
-    x86_64:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    xtensa*:Linux:*:*)
-       echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
-       exit ;;
-    i*86:DYNIX/ptx:4*:*)
-       # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
-       # earlier versions are messed up and put the nodename in both
-       # sysname and nodename.
-       echo i386-sequent-sysv4
-       exit ;;
-    i*86:UNIX_SV:4.2MP:2.*)
-       # Unixware is an offshoot of SVR4, but it has its own version
-       # number series starting with 2...
-       # I am not positive that other SVR4 systems won't match this,
-       # I just have to hope.  -- rms.
-       # Use sysv4.2uw... so that sysv4* matches it.
-       echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
-       exit ;;
-    i*86:OS/2:*:*)
-       # If we were able to find `uname', then EMX Unix compatibility
-       # is probably installed.
-       echo ${UNAME_MACHINE}-pc-os2-emx
-       exit ;;
-    i*86:XTS-300:*:STOP)
-       echo ${UNAME_MACHINE}-unknown-stop
-       exit ;;
-    i*86:atheos:*:*)
-       echo ${UNAME_MACHINE}-unknown-atheos
-       exit ;;
-    i*86:syllable:*:*)
-       echo ${UNAME_MACHINE}-pc-syllable
-       exit ;;
-    i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
-       echo i386-unknown-lynxos${UNAME_RELEASE}
-       exit ;;
-    i*86:*DOS:*:*)
-       echo ${UNAME_MACHINE}-pc-msdosdjgpp
-       exit ;;
-    i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
-       UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
-       if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
-               echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
-       else
-               echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
-       fi
-       exit ;;
-    i*86:*:5:[678]*)
-       # UnixWare 7.x, OpenUNIX and OpenServer 6.
-       case `/bin/uname -X | grep "^Machine"` in
-           *486*)           UNAME_MACHINE=i486 ;;
-           *Pentium)        UNAME_MACHINE=i586 ;;
-           *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
-       esac
-       echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
-       exit ;;
-    i*86:*:3.2:*)
-       if test -f /usr/options/cb.name; then
-               UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
-               echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
-       elif /bin/uname -X 2>/dev/null >/dev/null ; then
-               UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
-               (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
-               (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
-                       && UNAME_MACHINE=i586
-               (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
-                       && UNAME_MACHINE=i686
-               (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
-                       && UNAME_MACHINE=i686
-               echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
-       else
-               echo ${UNAME_MACHINE}-pc-sysv32
-       fi
-       exit ;;
-    pc:*:*:*)
-       # Left here for compatibility:
-       # uname -m prints for DJGPP always 'pc', but it prints nothing about
-       # the processor, so we play safe by assuming i586.
-       # Note: whatever this is, it MUST be the same as what config.sub
-       # prints for the "djgpp" host, or else GDB configury will decide that
-       # this is a cross-build.
-       echo i586-pc-msdosdjgpp
-       exit ;;
-    Intel:Mach:3*:*)
-       echo i386-pc-mach3
-       exit ;;
-    paragon:*:*:*)
-       echo i860-intel-osf1
-       exit ;;
-    i860:*:4.*:*) # i860-SVR4
-       if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
-         echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
-       else # Add other i860-SVR4 vendors below as they are discovered.
-         echo i860-unknown-sysv${UNAME_RELEASE}  # Unknown i860-SVR4
-       fi
-       exit ;;
-    mini*:CTIX:SYS*5:*)
-       # "miniframe"
-       echo m68010-convergent-sysv
-       exit ;;
-    mc68k:UNIX:SYSTEM5:3.51m)
-       echo m68k-convergent-sysv
-       exit ;;
-    M680?0:D-NIX:5.3:*)
-       echo m68k-diab-dnix
-       exit ;;
-    M68*:*:R3V[5678]*:*)
-       test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
-    3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
-       OS_REL=''
-       test -r /etc/.relid \
-       && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
-       /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
-         && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
-       /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
-         && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
-    3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
-       /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
-         && { echo i486-ncr-sysv4; exit; } ;;
-    NCR*:*:4.2:* | MPRAS*:*:4.2:*)
-       OS_REL='.3'
-       test -r /etc/.relid \
-           && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
-       /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
-           && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
-       /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
-           && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
-       /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
-           && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
-    m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
-       echo m68k-unknown-lynxos${UNAME_RELEASE}
-       exit ;;
-    mc68030:UNIX_System_V:4.*:*)
-       echo m68k-atari-sysv4
-       exit ;;
-    TSUNAMI:LynxOS:2.*:*)
-       echo sparc-unknown-lynxos${UNAME_RELEASE}
-       exit ;;
-    rs6000:LynxOS:2.*:*)
-       echo rs6000-unknown-lynxos${UNAME_RELEASE}
-       exit ;;
-    PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
-       echo powerpc-unknown-lynxos${UNAME_RELEASE}
-       exit ;;
-    SM[BE]S:UNIX_SV:*:*)
-       echo mips-dde-sysv${UNAME_RELEASE}
-       exit ;;
-    RM*:ReliantUNIX-*:*:*)
-       echo mips-sni-sysv4
-       exit ;;
-    RM*:SINIX-*:*:*)
-       echo mips-sni-sysv4
-       exit ;;
-    *:SINIX-*:*:*)
-       if uname -p 2>/dev/null >/dev/null ; then
-               UNAME_MACHINE=`(uname -p) 2>/dev/null`
-               echo ${UNAME_MACHINE}-sni-sysv4
-       else
-               echo ns32k-sni-sysv
-       fi
-       exit ;;
-    PENTIUM:*:4.0*:*)  # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
-                       # says <Richard.M.Bartel@ccMail.Census.GOV>
-       echo i586-unisys-sysv4
-       exit ;;
-    *:UNIX_System_V:4*:FTX*)
-       # From Gerald Hewes <hewes@openmarket.com>.
-       # How about differentiating between stratus architectures? -djm
-       echo hppa1.1-stratus-sysv4
-       exit ;;
-    *:*:*:FTX*)
-       # From seanf@swdc.stratus.com.
-       echo i860-stratus-sysv4
-       exit ;;
-    i*86:VOS:*:*)
-       # From Paul.Green@stratus.com.
-       echo ${UNAME_MACHINE}-stratus-vos
-       exit ;;
-    *:VOS:*:*)
-       # From Paul.Green@stratus.com.
-       echo hppa1.1-stratus-vos
-       exit ;;
-    mc68*:A/UX:*:*)
-       echo m68k-apple-aux${UNAME_RELEASE}
-       exit ;;
-    news*:NEWS-OS:6*:*)
-       echo mips-sony-newsos6
-       exit ;;
-    R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
-       if [ -d /usr/nec ]; then
-               echo mips-nec-sysv${UNAME_RELEASE}
-       else
-               echo mips-unknown-sysv${UNAME_RELEASE}
-       fi
-       exit ;;
-    BeBox:BeOS:*:*)    # BeOS running on hardware made by Be, PPC only.
-       echo powerpc-be-beos
-       exit ;;
-    BeMac:BeOS:*:*)    # BeOS running on Mac or Mac clone, PPC only.
-       echo powerpc-apple-beos
-       exit ;;
-    BePC:BeOS:*:*)     # BeOS running on Intel PC compatible.
-       echo i586-pc-beos
-       exit ;;
-    BePC:Haiku:*:*)    # Haiku running on Intel PC compatible.
-       echo i586-pc-haiku
-       exit ;;
-    x86_64:Haiku:*:*)
-       echo x86_64-unknown-haiku
-       exit ;;
-    SX-4:SUPER-UX:*:*)
-       echo sx4-nec-superux${UNAME_RELEASE}
-       exit ;;
-    SX-5:SUPER-UX:*:*)
-       echo sx5-nec-superux${UNAME_RELEASE}
-       exit ;;
-    SX-6:SUPER-UX:*:*)
-       echo sx6-nec-superux${UNAME_RELEASE}
-       exit ;;
-    SX-7:SUPER-UX:*:*)
-       echo sx7-nec-superux${UNAME_RELEASE}
-       exit ;;
-    SX-8:SUPER-UX:*:*)
-       echo sx8-nec-superux${UNAME_RELEASE}
-       exit ;;
-    SX-8R:SUPER-UX:*:*)
-       echo sx8r-nec-superux${UNAME_RELEASE}
-       exit ;;
-    Power*:Rhapsody:*:*)
-       echo powerpc-apple-rhapsody${UNAME_RELEASE}
-       exit ;;
-    *:Rhapsody:*:*)
-       echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
-       exit ;;
-    *:Darwin:*:*)
-       UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
-       eval $set_cc_for_build
-       if test "$UNAME_PROCESSOR" = unknown ; then
-           UNAME_PROCESSOR=powerpc
-       fi
-       if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
-           if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
-               if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
-                   (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
-                   grep IS_64BIT_ARCH >/dev/null
-               then
-                   case $UNAME_PROCESSOR in
-                       i386) UNAME_PROCESSOR=x86_64 ;;
-                       powerpc) UNAME_PROCESSOR=powerpc64 ;;
-                   esac
-               fi
-           fi
-       elif test "$UNAME_PROCESSOR" = i386 ; then
-           # Avoid executing cc on OS X 10.9, as it ships with a stub
-           # that puts up a graphical alert prompting to install
-           # developer tools.  Any system running Mac OS X 10.7 or
-           # later (Darwin 11 and later) is required to have a 64-bit
-           # processor. This is not true of the ARM version of Darwin
-           # that Apple uses in portable devices.
-           UNAME_PROCESSOR=x86_64
-       fi
-       echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
-       exit ;;
-    *:procnto*:*:* | *:QNX:[0123456789]*:*)
-       UNAME_PROCESSOR=`uname -p`
-       if test "$UNAME_PROCESSOR" = "x86"; then
-               UNAME_PROCESSOR=i386
-               UNAME_MACHINE=pc
-       fi
-       echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
-       exit ;;
-    *:QNX:*:4*)
-       echo i386-pc-qnx
-       exit ;;
-    NEO-?:NONSTOP_KERNEL:*:*)
-       echo neo-tandem-nsk${UNAME_RELEASE}
-       exit ;;
-    NSE-*:NONSTOP_KERNEL:*:*)
-       echo nse-tandem-nsk${UNAME_RELEASE}
-       exit ;;
-    NSR-?:NONSTOP_KERNEL:*:*)
-       echo nsr-tandem-nsk${UNAME_RELEASE}
-       exit ;;
-    *:NonStop-UX:*:*)
-       echo mips-compaq-nonstopux
-       exit ;;
-    BS2000:POSIX*:*:*)
-       echo bs2000-siemens-sysv
-       exit ;;
-    DS/*:UNIX_System_V:*:*)
-       echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
-       exit ;;
-    *:Plan9:*:*)
-       # "uname -m" is not consistent, so use $cputype instead. 386
-       # is converted to i386 for consistency with other x86
-       # operating systems.
-       if test "$cputype" = "386"; then
-           UNAME_MACHINE=i386
-       else
-           UNAME_MACHINE="$cputype"
-       fi
-       echo ${UNAME_MACHINE}-unknown-plan9
-       exit ;;
-    *:TOPS-10:*:*)
-       echo pdp10-unknown-tops10
-       exit ;;
-    *:TENEX:*:*)
-       echo pdp10-unknown-tenex
-       exit ;;
-    KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
-       echo pdp10-dec-tops20
-       exit ;;
-    XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
-       echo pdp10-xkl-tops20
-       exit ;;
-    *:TOPS-20:*:*)
-       echo pdp10-unknown-tops20
-       exit ;;
-    *:ITS:*:*)
-       echo pdp10-unknown-its
-       exit ;;
-    SEI:*:*:SEIUX)
-       echo mips-sei-seiux${UNAME_RELEASE}
-       exit ;;
-    *:DragonFly:*:*)
-       echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
-       exit ;;
-    *:*VMS:*:*)
-       UNAME_MACHINE=`(uname -p) 2>/dev/null`
-       case "${UNAME_MACHINE}" in
-           A*) echo alpha-dec-vms ; exit ;;
-           I*) echo ia64-dec-vms ; exit ;;
-           V*) echo vax-dec-vms ; exit ;;
-       esac ;;
-    *:XENIX:*:SysV)
-       echo i386-pc-xenix
-       exit ;;
-    i*86:skyos:*:*)
-       echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
-       exit ;;
-    i*86:rdos:*:*)
-       echo ${UNAME_MACHINE}-pc-rdos
-       exit ;;
-    i*86:AROS:*:*)
-       echo ${UNAME_MACHINE}-pc-aros
-       exit ;;
-    x86_64:VMkernel:*:*)
-       echo ${UNAME_MACHINE}-unknown-esx
-       exit ;;
-esac
-
-cat >&2 <<EOF
-$0: unable to guess system type
-
-This script, last modified $timestamp, has failed to recognize
-the operating system you are using. It is advised that you
-download the most up to date version of the config scripts from
-
-  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
-and
-  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
-
-If the version you run ($0) is already up to date, please
-send the following data and any information you think might be
-pertinent to <config-patches@gnu.org> in order to provide the needed
-information to handle your system.
-
-config.guess timestamp = $timestamp
-
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
-/bin/uname -X     = `(/bin/uname -X) 2>/dev/null`
-
-hostinfo               = `(hostinfo) 2>/dev/null`
-/bin/universe          = `(/bin/universe) 2>/dev/null`
-/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null`
-/bin/arch              = `(/bin/arch) 2>/dev/null`
-/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
-
-UNAME_MACHINE = ${UNAME_MACHINE}
-UNAME_RELEASE = ${UNAME_RELEASE}
-UNAME_SYSTEM  = ${UNAME_SYSTEM}
-UNAME_VERSION = ${UNAME_VERSION}
-EOF
-
-exit 1
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/src/jemalloc/config.sub b/src/jemalloc/config.sub
deleted file mode 100755 (executable)
index 0ccff77..0000000
+++ /dev/null
@@ -1,1797 +0,0 @@
-#! /bin/sh
-# Configuration validation subroutine script.
-#   Copyright 1992-2014 Free Software Foundation, Inc.
-
-timestamp='2014-05-01'
-
-# This file is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that
-# program.  This Exception is an additional permission under section 7
-# of the GNU General Public License, version 3 ("GPLv3").
-
-
-# Please send patches with a ChangeLog entry to config-patches@gnu.org.
-#
-# Configuration subroutine to validate and canonicalize a configuration type.
-# Supply the specified configuration type as an argument.
-# If it is invalid, we print an error message on stderr and exit with code 1.
-# Otherwise, we print the canonical config type on stdout and succeed.
-
-# You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
-
-# This file is supposed to be the same for all GNU packages
-# and recognize all the CPU types, system types and aliases
-# that are meaningful with *any* GNU software.
-# Each package is responsible for reporting which valid configurations
-# it does not support.  The user should be able to distinguish
-# a failure to support a valid configuration from a meaningless
-# configuration.
-
-# The goal of this file is to map all the various variations of a given
-# machine specification into a single specification in the form:
-#      CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
-# or in some cases, the newer four-part form:
-#      CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
-# It is wrong to echo any other type of specification.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION] CPU-MFR-OPSYS
-       $0 [OPTION] ALIAS
-
-Canonicalize a configuration name.
-
-Operation modes:
-  -h, --help         print this help, then exit
-  -t, --time-stamp   print date of last modification, then exit
-  -v, --version      print version number, then exit
-
-Report bugs and patches to <config-patches@gnu.org>."
-
-version="\
-GNU config.sub ($timestamp)
-
-Copyright 1992-2014 Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions.  There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
-  case $1 in
-    --time-stamp | --time* | -t )
-       echo "$timestamp" ; exit ;;
-    --version | -v )
-       echo "$version" ; exit ;;
-    --help | --h* | -h )
-       echo "$usage"; exit ;;
-    -- )     # Stop option processing
-       shift; break ;;
-    - )        # Use stdin as input.
-       break ;;
-    -* )
-       echo "$me: invalid option $1$help"
-       exit 1 ;;
-
-    *local*)
-       # First pass through any local machine types.
-       echo $1
-       exit ;;
-
-    * )
-       break ;;
-  esac
-done
-
-case $# in
- 0) echo "$me: missing argument$help" >&2
-    exit 1;;
- 1) ;;
- *) echo "$me: too many arguments$help" >&2
-    exit 1;;
-esac
-
-# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
-# Here we must recognize all the valid KERNEL-OS combinations.
-maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
-case $maybe_os in
-  nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
-  linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
-  knetbsd*-gnu* | netbsd*-gnu* | \
-  kopensolaris*-gnu* | \
-  storm-chaos* | os2-emx* | rtmk-nova*)
-    os=-$maybe_os
-    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
-    ;;
-  android-linux)
-    os=-linux-android
-    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
-    ;;
-  *)
-    basic_machine=`echo $1 | sed 's/-[^-]*$//'`
-    if [ $basic_machine != $1 ]
-    then os=`echo $1 | sed 's/.*-/-/'`
-    else os=; fi
-    ;;
-esac
-
-### Let's recognize common machines as not being operating systems so
-### that things like config.sub decstation-3100 work.  We also
-### recognize some manufacturers as not being operating systems, so we
-### can provide default operating systems below.
-case $os in
-       -sun*os*)
-               # Prevent following clause from handling this invalid input.
-               ;;
-       -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
-       -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
-       -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
-       -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
-       -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
-       -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
-       -apple | -axis | -knuth | -cray | -microblaze*)
-               os=
-               basic_machine=$1
-               ;;
-       -bluegene*)
-               os=-cnk
-               ;;
-       -sim | -cisco | -oki | -wec | -winbond)
-               os=
-               basic_machine=$1
-               ;;
-       -scout)
-               ;;
-       -wrs)
-               os=-vxworks
-               basic_machine=$1
-               ;;
-       -chorusos*)
-               os=-chorusos
-               basic_machine=$1
-               ;;
-       -chorusrdb)
-               os=-chorusrdb
-               basic_machine=$1
-               ;;
-       -hiux*)
-               os=-hiuxwe2
-               ;;
-       -sco6)
-               os=-sco5v6
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -sco5)
-               os=-sco3.2v5
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -sco4)
-               os=-sco3.2v4
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -sco3.2.[4-9]*)
-               os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -sco3.2v[4-9]*)
-               # Don't forget version if it is 3.2v4 or newer.
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -sco5v6*)
-               # Don't forget version if it is 3.2v4 or newer.
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -sco*)
-               os=-sco3.2v2
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -udk*)
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -isc)
-               os=-isc2.2
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -clix*)
-               basic_machine=clipper-intergraph
-               ;;
-       -isc*)
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
-               ;;
-       -lynx*178)
-               os=-lynxos178
-               ;;
-       -lynx*5)
-               os=-lynxos5
-               ;;
-       -lynx*)
-               os=-lynxos
-               ;;
-       -ptx*)
-               basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
-               ;;
-       -windowsnt*)
-               os=`echo $os | sed -e 's/windowsnt/winnt/'`
-               ;;
-       -psos*)
-               os=-psos
-               ;;
-       -mint | -mint[0-9]*)
-               basic_machine=m68k-atari
-               os=-mint
-               ;;
-esac
-
-# Decode aliases for certain CPU-COMPANY combinations.
-case $basic_machine in
-       # Recognize the basic CPU types without company name.
-       # Some are omitted here because they have special meanings below.
-       1750a | 580 \
-       | a29k \
-       | aarch64 | aarch64_be \
-       | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
-       | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
-       | am33_2.0 \
-       | arc | arceb \
-       | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
-       | avr | avr32 \
-       | be32 | be64 \
-       | bfin \
-       | c4x | c8051 | clipper \
-       | d10v | d30v | dlx | dsp16xx \
-       | epiphany \
-       | fido | fr30 | frv \
-       | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
-       | hexagon \
-       | i370 | i860 | i960 | ia64 \
-       | ip2k | iq2000 \
-       | k1om \
-       | le32 | le64 \
-       | lm32 \
-       | m32c | m32r | m32rle | m68000 | m68k | m88k \
-       | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
-       | mips | mipsbe | mipseb | mipsel | mipsle \
-       | mips16 \
-       | mips64 | mips64el \
-       | mips64octeon | mips64octeonel \
-       | mips64orion | mips64orionel \
-       | mips64r5900 | mips64r5900el \
-       | mips64vr | mips64vrel \
-       | mips64vr4100 | mips64vr4100el \
-       | mips64vr4300 | mips64vr4300el \
-       | mips64vr5000 | mips64vr5000el \
-       | mips64vr5900 | mips64vr5900el \
-       | mipsisa32 | mipsisa32el \
-       | mipsisa32r2 | mipsisa32r2el \
-       | mipsisa32r6 | mipsisa32r6el \
-       | mipsisa64 | mipsisa64el \
-       | mipsisa64r2 | mipsisa64r2el \
-       | mipsisa64r6 | mipsisa64r6el \
-       | mipsisa64sb1 | mipsisa64sb1el \
-       | mipsisa64sr71k | mipsisa64sr71kel \
-       | mipsr5900 | mipsr5900el \
-       | mipstx39 | mipstx39el \
-       | mn10200 | mn10300 \
-       | moxie \
-       | mt \
-       | msp430 \
-       | nds32 | nds32le | nds32be \
-       | nios | nios2 | nios2eb | nios2el \
-       | ns16k | ns32k \
-       | open8 | or1k | or1knd | or32 \
-       | pdp10 | pdp11 | pj | pjl \
-       | powerpc | powerpc64 | powerpc64le | powerpcle \
-       | pyramid \
-       | rl78 | rx \
-       | score \
-       | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
-       | sh64 | sh64le \
-       | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
-       | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
-       | spu \
-       | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
-       | ubicom32 \
-       | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
-       | we32k \
-       | x86 | xc16x | xstormy16 | xtensa \
-       | z8k | z80)
-               basic_machine=$basic_machine-unknown
-               ;;
-       c54x)
-               basic_machine=tic54x-unknown
-               ;;
-       c55x)
-               basic_machine=tic55x-unknown
-               ;;
-       c6x)
-               basic_machine=tic6x-unknown
-               ;;
-       m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
-               basic_machine=$basic_machine-unknown
-               os=-none
-               ;;
-       m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
-               ;;
-       ms1)
-               basic_machine=mt-unknown
-               ;;
-
-       strongarm | thumb | xscale)
-               basic_machine=arm-unknown
-               ;;
-       xgate)
-               basic_machine=$basic_machine-unknown
-               os=-none
-               ;;
-       xscaleeb)
-               basic_machine=armeb-unknown
-               ;;
-
-       xscaleel)
-               basic_machine=armel-unknown
-               ;;
-
-       # We use `pc' rather than `unknown'
-       # because (1) that's what they normally are, and
-       # (2) the word "unknown" tends to confuse beginning users.
-       i*86 | x86_64)
-         basic_machine=$basic_machine-pc
-         ;;
-       # Object if more than one company name word.
-       *-*-*)
-               echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
-               exit 1
-               ;;
-       # Recognize the basic CPU types with company name.
-       580-* \
-       | a29k-* \
-       | aarch64-* | aarch64_be-* \
-       | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
-       | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
-       | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
-       | arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
-       | avr-* | avr32-* \
-       | be32-* | be64-* \
-       | bfin-* | bs2000-* \
-       | c[123]* | c30-* | [cjt]90-* | c4x-* \
-       | c8051-* | clipper-* | craynv-* | cydra-* \
-       | d10v-* | d30v-* | dlx-* \
-       | elxsi-* \
-       | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
-       | h8300-* | h8500-* \
-       | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
-       | hexagon-* \
-       | i*86-* | i860-* | i960-* | ia64-* \
-       | ip2k-* | iq2000-* \
-       | k1om-* \
-       | le32-* | le64-* \
-       | lm32-* \
-       | m32c-* | m32r-* | m32rle-* \
-       | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
-       | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
-       | microblaze-* | microblazeel-* \
-       | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
-       | mips16-* \
-       | mips64-* | mips64el-* \
-       | mips64octeon-* | mips64octeonel-* \
-       | mips64orion-* | mips64orionel-* \
-       | mips64r5900-* | mips64r5900el-* \
-       | mips64vr-* | mips64vrel-* \
-       | mips64vr4100-* | mips64vr4100el-* \
-       | mips64vr4300-* | mips64vr4300el-* \
-       | mips64vr5000-* | mips64vr5000el-* \
-       | mips64vr5900-* | mips64vr5900el-* \
-       | mipsisa32-* | mipsisa32el-* \
-       | mipsisa32r2-* | mipsisa32r2el-* \
-       | mipsisa32r6-* | mipsisa32r6el-* \
-       | mipsisa64-* | mipsisa64el-* \
-       | mipsisa64r2-* | mipsisa64r2el-* \
-       | mipsisa64r6-* | mipsisa64r6el-* \
-       | mipsisa64sb1-* | mipsisa64sb1el-* \
-       | mipsisa64sr71k-* | mipsisa64sr71kel-* \
-       | mipsr5900-* | mipsr5900el-* \
-       | mipstx39-* | mipstx39el-* \
-       | mmix-* \
-       | mt-* \
-       | msp430-* \
-       | nds32-* | nds32le-* | nds32be-* \
-       | nios-* | nios2-* | nios2eb-* | nios2el-* \
-       | none-* | np1-* | ns16k-* | ns32k-* \
-       | open8-* \
-       | or1k*-* \
-       | orion-* \
-       | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
-       | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
-       | pyramid-* \
-       | rl78-* | romp-* | rs6000-* | rx-* \
-       | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
-       | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
-       | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
-       | sparclite-* \
-       | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
-       | tahoe-* \
-       | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
-       | tile*-* \
-       | tron-* \
-       | ubicom32-* \
-       | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
-       | vax-* \
-       | we32k-* \
-       | x86-* | x86_64-* | xc16x-* | xps100-* \
-       | xstormy16-* | xtensa*-* \
-       | ymp-* \
-       | z8k-* | z80-*)
-               ;;
-       # Recognize the basic CPU types without company name, with glob match.
-       xtensa*)
-               basic_machine=$basic_machine-unknown
-               ;;
-       # Recognize the various machine names and aliases which stand
-       # for a CPU type and a company and sometimes even an OS.
-       386bsd)
-               basic_machine=i386-unknown
-               os=-bsd
-               ;;
-       3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
-               basic_machine=m68000-att
-               ;;
-       3b*)
-               basic_machine=we32k-att
-               ;;
-       a29khif)
-               basic_machine=a29k-amd
-               os=-udi
-               ;;
-       abacus)
-               basic_machine=abacus-unknown
-               ;;
-       adobe68k)
-               basic_machine=m68010-adobe
-               os=-scout
-               ;;
-       alliant | fx80)
-               basic_machine=fx80-alliant
-               ;;
-       altos | altos3068)
-               basic_machine=m68k-altos
-               ;;
-       am29k)
-               basic_machine=a29k-none
-               os=-bsd
-               ;;
-       amd64)
-               basic_machine=x86_64-pc
-               ;;
-       amd64-*)
-               basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       amdahl)
-               basic_machine=580-amdahl
-               os=-sysv
-               ;;
-       amiga | amiga-*)
-               basic_machine=m68k-unknown
-               ;;
-       amigaos | amigados)
-               basic_machine=m68k-unknown
-               os=-amigaos
-               ;;
-       amigaunix | amix)
-               basic_machine=m68k-unknown
-               os=-sysv4
-               ;;
-       apollo68)
-               basic_machine=m68k-apollo
-               os=-sysv
-               ;;
-       apollo68bsd)
-               basic_machine=m68k-apollo
-               os=-bsd
-               ;;
-       aros)
-               basic_machine=i386-pc
-               os=-aros
-               ;;
-       aux)
-               basic_machine=m68k-apple
-               os=-aux
-               ;;
-       balance)
-               basic_machine=ns32k-sequent
-               os=-dynix
-               ;;
-       blackfin)
-               basic_machine=bfin-unknown
-               os=-linux
-               ;;
-       blackfin-*)
-               basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
-               os=-linux
-               ;;
-       bluegene*)
-               basic_machine=powerpc-ibm
-               os=-cnk
-               ;;
-       c54x-*)
-               basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       c55x-*)
-               basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       c6x-*)
-               basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       c90)
-               basic_machine=c90-cray
-               os=-unicos
-               ;;
-       cegcc)
-               basic_machine=arm-unknown
-               os=-cegcc
-               ;;
-       convex-c1)
-               basic_machine=c1-convex
-               os=-bsd
-               ;;
-       convex-c2)
-               basic_machine=c2-convex
-               os=-bsd
-               ;;
-       convex-c32)
-               basic_machine=c32-convex
-               os=-bsd
-               ;;
-       convex-c34)
-               basic_machine=c34-convex
-               os=-bsd
-               ;;
-       convex-c38)
-               basic_machine=c38-convex
-               os=-bsd
-               ;;
-       cray | j90)
-               basic_machine=j90-cray
-               os=-unicos
-               ;;
-       craynv)
-               basic_machine=craynv-cray
-               os=-unicosmp
-               ;;
-       cr16 | cr16-*)
-               basic_machine=cr16-unknown
-               os=-elf
-               ;;
-       crds | unos)
-               basic_machine=m68k-crds
-               ;;
-       crisv32 | crisv32-* | etraxfs*)
-               basic_machine=crisv32-axis
-               ;;
-       cris | cris-* | etrax*)
-               basic_machine=cris-axis
-               ;;
-       crx)
-               basic_machine=crx-unknown
-               os=-elf
-               ;;
-       da30 | da30-*)
-               basic_machine=m68k-da30
-               ;;
-       decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
-               basic_machine=mips-dec
-               ;;
-       decsystem10* | dec10*)
-               basic_machine=pdp10-dec
-               os=-tops10
-               ;;
-       decsystem20* | dec20*)
-               basic_machine=pdp10-dec
-               os=-tops20
-               ;;
-       delta | 3300 | motorola-3300 | motorola-delta \
-             | 3300-motorola | delta-motorola)
-               basic_machine=m68k-motorola
-               ;;
-       delta88)
-               basic_machine=m88k-motorola
-               os=-sysv3
-               ;;
-       dicos)
-               basic_machine=i686-pc
-               os=-dicos
-               ;;
-       djgpp)
-               basic_machine=i586-pc
-               os=-msdosdjgpp
-               ;;
-       dpx20 | dpx20-*)
-               basic_machine=rs6000-bull
-               os=-bosx
-               ;;
-       dpx2* | dpx2*-bull)
-               basic_machine=m68k-bull
-               os=-sysv3
-               ;;
-       ebmon29k)
-               basic_machine=a29k-amd
-               os=-ebmon
-               ;;
-       elxsi)
-               basic_machine=elxsi-elxsi
-               os=-bsd
-               ;;
-       encore | umax | mmax)
-               basic_machine=ns32k-encore
-               ;;
-       es1800 | OSE68k | ose68k | ose | OSE)
-               basic_machine=m68k-ericsson
-               os=-ose
-               ;;
-       fx2800)
-               basic_machine=i860-alliant
-               ;;
-       genix)
-               basic_machine=ns32k-ns
-               ;;
-       gmicro)
-               basic_machine=tron-gmicro
-               os=-sysv
-               ;;
-       go32)
-               basic_machine=i386-pc
-               os=-go32
-               ;;
-       h3050r* | hiux*)
-               basic_machine=hppa1.1-hitachi
-               os=-hiuxwe2
-               ;;
-       h8300hms)
-               basic_machine=h8300-hitachi
-               os=-hms
-               ;;
-       h8300xray)
-               basic_machine=h8300-hitachi
-               os=-xray
-               ;;
-       h8500hms)
-               basic_machine=h8500-hitachi
-               os=-hms
-               ;;
-       harris)
-               basic_machine=m88k-harris
-               os=-sysv3
-               ;;
-       hp300-*)
-               basic_machine=m68k-hp
-               ;;
-       hp300bsd)
-               basic_machine=m68k-hp
-               os=-bsd
-               ;;
-       hp300hpux)
-               basic_machine=m68k-hp
-               os=-hpux
-               ;;
-       hp3k9[0-9][0-9] | hp9[0-9][0-9])
-               basic_machine=hppa1.0-hp
-               ;;
-       hp9k2[0-9][0-9] | hp9k31[0-9])
-               basic_machine=m68000-hp
-               ;;
-       hp9k3[2-9][0-9])
-               basic_machine=m68k-hp
-               ;;
-       hp9k6[0-9][0-9] | hp6[0-9][0-9])
-               basic_machine=hppa1.0-hp
-               ;;
-       hp9k7[0-79][0-9] | hp7[0-79][0-9])
-               basic_machine=hppa1.1-hp
-               ;;
-       hp9k78[0-9] | hp78[0-9])
-               # FIXME: really hppa2.0-hp
-               basic_machine=hppa1.1-hp
-               ;;
-       hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
-               # FIXME: really hppa2.0-hp
-               basic_machine=hppa1.1-hp
-               ;;
-       hp9k8[0-9][13679] | hp8[0-9][13679])
-               basic_machine=hppa1.1-hp
-               ;;
-       hp9k8[0-9][0-9] | hp8[0-9][0-9])
-               basic_machine=hppa1.0-hp
-               ;;
-       hppa-next)
-               os=-nextstep3
-               ;;
-       hppaosf)
-               basic_machine=hppa1.1-hp
-               os=-osf
-               ;;
-       hppro)
-               basic_machine=hppa1.1-hp
-               os=-proelf
-               ;;
-       i370-ibm* | ibm*)
-               basic_machine=i370-ibm
-               ;;
-       i*86v32)
-               basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
-               os=-sysv32
-               ;;
-       i*86v4*)
-               basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
-               os=-sysv4
-               ;;
-       i*86v)
-               basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
-               os=-sysv
-               ;;
-       i*86sol2)
-               basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
-               os=-solaris2
-               ;;
-       i386mach)
-               basic_machine=i386-mach
-               os=-mach
-               ;;
-       i386-vsta | vsta)
-               basic_machine=i386-unknown
-               os=-vsta
-               ;;
-       iris | iris4d)
-               basic_machine=mips-sgi
-               case $os in
-                   -irix*)
-                       ;;
-                   *)
-                       os=-irix4
-                       ;;
-               esac
-               ;;
-       isi68 | isi)
-               basic_machine=m68k-isi
-               os=-sysv
-               ;;
-       m68knommu)
-               basic_machine=m68k-unknown
-               os=-linux
-               ;;
-       m68knommu-*)
-               basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
-               os=-linux
-               ;;
-       m88k-omron*)
-               basic_machine=m88k-omron
-               ;;
-       magnum | m3230)
-               basic_machine=mips-mips
-               os=-sysv
-               ;;
-       merlin)
-               basic_machine=ns32k-utek
-               os=-sysv
-               ;;
-       microblaze*)
-               basic_machine=microblaze-xilinx
-               ;;
-       mingw64)
-               basic_machine=x86_64-pc
-               os=-mingw64
-               ;;
-       mingw32)
-               basic_machine=i686-pc
-               os=-mingw32
-               ;;
-       mingw32ce)
-               basic_machine=arm-unknown
-               os=-mingw32ce
-               ;;
-       miniframe)
-               basic_machine=m68000-convergent
-               ;;
-       *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
-               basic_machine=m68k-atari
-               os=-mint
-               ;;
-       mips3*-*)
-               basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
-               ;;
-       mips3*)
-               basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
-               ;;
-       monitor)
-               basic_machine=m68k-rom68k
-               os=-coff
-               ;;
-       morphos)
-               basic_machine=powerpc-unknown
-               os=-morphos
-               ;;
-       msdos)
-               basic_machine=i386-pc
-               os=-msdos
-               ;;
-       ms1-*)
-               basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
-               ;;
-       msys)
-               basic_machine=i686-pc
-               os=-msys
-               ;;
-       mvs)
-               basic_machine=i370-ibm
-               os=-mvs
-               ;;
-       nacl)
-               basic_machine=le32-unknown
-               os=-nacl
-               ;;
-       ncr3000)
-               basic_machine=i486-ncr
-               os=-sysv4
-               ;;
-       netbsd386)
-               basic_machine=i386-unknown
-               os=-netbsd
-               ;;
-       netwinder)
-               basic_machine=armv4l-rebel
-               os=-linux
-               ;;
-       news | news700 | news800 | news900)
-               basic_machine=m68k-sony
-               os=-newsos
-               ;;
-       news1000)
-               basic_machine=m68030-sony
-               os=-newsos
-               ;;
-       news-3600 | risc-news)
-               basic_machine=mips-sony
-               os=-newsos
-               ;;
-       necv70)
-               basic_machine=v70-nec
-               os=-sysv
-               ;;
-       next | m*-next )
-               basic_machine=m68k-next
-               case $os in
-                   -nextstep* )
-                       ;;
-                   -ns2*)
-                     os=-nextstep2
-                       ;;
-                   *)
-                     os=-nextstep3
-                       ;;
-               esac
-               ;;
-       nh3000)
-               basic_machine=m68k-harris
-               os=-cxux
-               ;;
-       nh[45]000)
-               basic_machine=m88k-harris
-               os=-cxux
-               ;;
-       nindy960)
-               basic_machine=i960-intel
-               os=-nindy
-               ;;
-       mon960)
-               basic_machine=i960-intel
-               os=-mon960
-               ;;
-       nonstopux)
-               basic_machine=mips-compaq
-               os=-nonstopux
-               ;;
-       np1)
-               basic_machine=np1-gould
-               ;;
-       neo-tandem)
-               basic_machine=neo-tandem
-               ;;
-       nse-tandem)
-               basic_machine=nse-tandem
-               ;;
-       nsr-tandem)
-               basic_machine=nsr-tandem
-               ;;
-       op50n-* | op60c-*)
-               basic_machine=hppa1.1-oki
-               os=-proelf
-               ;;
-       openrisc | openrisc-*)
-               basic_machine=or32-unknown
-               ;;
-       os400)
-               basic_machine=powerpc-ibm
-               os=-os400
-               ;;
-       OSE68000 | ose68000)
-               basic_machine=m68000-ericsson
-               os=-ose
-               ;;
-       os68k)
-               basic_machine=m68k-none
-               os=-os68k
-               ;;
-       pa-hitachi)
-               basic_machine=hppa1.1-hitachi
-               os=-hiuxwe2
-               ;;
-       paragon)
-               basic_machine=i860-intel
-               os=-osf
-               ;;
-       parisc)
-               basic_machine=hppa-unknown
-               os=-linux
-               ;;
-       parisc-*)
-               basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
-               os=-linux
-               ;;
-       pbd)
-               basic_machine=sparc-tti
-               ;;
-       pbb)
-               basic_machine=m68k-tti
-               ;;
-       pc532 | pc532-*)
-               basic_machine=ns32k-pc532
-               ;;
-       pc98)
-               basic_machine=i386-pc
-               ;;
-       pc98-*)
-               basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       pentium | p5 | k5 | k6 | nexgen | viac3)
-               basic_machine=i586-pc
-               ;;
-       pentiumpro | p6 | 6x86 | athlon | athlon_*)
-               basic_machine=i686-pc
-               ;;
-       pentiumii | pentium2 | pentiumiii | pentium3)
-               basic_machine=i686-pc
-               ;;
-       pentium4)
-               basic_machine=i786-pc
-               ;;
-       pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
-               basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       pentiumpro-* | p6-* | 6x86-* | athlon-*)
-               basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
-               basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       pentium4-*)
-               basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       pn)
-               basic_machine=pn-gould
-               ;;
-       power)  basic_machine=power-ibm
-               ;;
-       ppc | ppcbe)    basic_machine=powerpc-unknown
-               ;;
-       ppc-* | ppcbe-*)
-               basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       ppcle | powerpclittle | ppc-le | powerpc-little)
-               basic_machine=powerpcle-unknown
-               ;;
-       ppcle-* | powerpclittle-*)
-               basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       ppc64)  basic_machine=powerpc64-unknown
-               ;;
-       ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       ppc64le | powerpc64little | ppc64-le | powerpc64-little)
-               basic_machine=powerpc64le-unknown
-               ;;
-       ppc64le-* | powerpc64little-*)
-               basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       ps2)
-               basic_machine=i386-ibm
-               ;;
-       pw32)
-               basic_machine=i586-unknown
-               os=-pw32
-               ;;
-       rdos | rdos64)
-               basic_machine=x86_64-pc
-               os=-rdos
-               ;;
-       rdos32)
-               basic_machine=i386-pc
-               os=-rdos
-               ;;
-       rom68k)
-               basic_machine=m68k-rom68k
-               os=-coff
-               ;;
-       rm[46]00)
-               basic_machine=mips-siemens
-               ;;
-       rtpc | rtpc-*)
-               basic_machine=romp-ibm
-               ;;
-       s390 | s390-*)
-               basic_machine=s390-ibm
-               ;;
-       s390x | s390x-*)
-               basic_machine=s390x-ibm
-               ;;
-       sa29200)
-               basic_machine=a29k-amd
-               os=-udi
-               ;;
-       sb1)
-               basic_machine=mipsisa64sb1-unknown
-               ;;
-       sb1el)
-               basic_machine=mipsisa64sb1el-unknown
-               ;;
-       sde)
-               basic_machine=mipsisa32-sde
-               os=-elf
-               ;;
-       sei)
-               basic_machine=mips-sei
-               os=-seiux
-               ;;
-       sequent)
-               basic_machine=i386-sequent
-               ;;
-       sh)
-               basic_machine=sh-hitachi
-               os=-hms
-               ;;
-       sh5el)
-               basic_machine=sh5le-unknown
-               ;;
-       sh64)
-               basic_machine=sh64-unknown
-               ;;
-       sparclite-wrs | simso-wrs)
-               basic_machine=sparclite-wrs
-               os=-vxworks
-               ;;
-       sps7)
-               basic_machine=m68k-bull
-               os=-sysv2
-               ;;
-       spur)
-               basic_machine=spur-unknown
-               ;;
-       st2000)
-               basic_machine=m68k-tandem
-               ;;
-       stratus)
-               basic_machine=i860-stratus
-               os=-sysv4
-               ;;
-       strongarm-* | thumb-*)
-               basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
-               ;;
-       sun2)
-               basic_machine=m68000-sun
-               ;;
-       sun2os3)
-               basic_machine=m68000-sun
-               os=-sunos3
-               ;;
-       sun2os4)
-               basic_machine=m68000-sun
-               os=-sunos4
-               ;;
-       sun3os3)
-               basic_machine=m68k-sun
-               os=-sunos3
-               ;;
-       sun3os4)
-               basic_machine=m68k-sun
-               os=-sunos4
-               ;;
-       sun4os3)
-               basic_machine=sparc-sun
-               os=-sunos3
-               ;;
-       sun4os4)
-               basic_machine=sparc-sun
-               os=-sunos4
-               ;;
-       sun4sol2)
-               basic_machine=sparc-sun
-               os=-solaris2
-               ;;
-       sun3 | sun3-*)
-               basic_machine=m68k-sun
-               ;;
-       sun4)
-               basic_machine=sparc-sun
-               ;;
-       sun386 | sun386i | roadrunner)
-               basic_machine=i386-sun
-               ;;
-       sv1)
-               basic_machine=sv1-cray
-               os=-unicos
-               ;;
-       symmetry)
-               basic_machine=i386-sequent
-               os=-dynix
-               ;;
-       t3e)
-               basic_machine=alphaev5-cray
-               os=-unicos
-               ;;
-       t90)
-               basic_machine=t90-cray
-               os=-unicos
-               ;;
-       tile*)
-               basic_machine=$basic_machine-unknown
-               os=-linux-gnu
-               ;;
-       tx39)
-               basic_machine=mipstx39-unknown
-               ;;
-       tx39el)
-               basic_machine=mipstx39el-unknown
-               ;;
-       toad1)
-               basic_machine=pdp10-xkl
-               os=-tops20
-               ;;
-       tower | tower-32)
-               basic_machine=m68k-ncr
-               ;;
-       tpf)
-               basic_machine=s390x-ibm
-               os=-tpf
-               ;;
-       udi29k)
-               basic_machine=a29k-amd
-               os=-udi
-               ;;
-       ultra3)
-               basic_machine=a29k-nyu
-               os=-sym1
-               ;;
-       v810 | necv810)
-               basic_machine=v810-nec
-               os=-none
-               ;;
-       vaxv)
-               basic_machine=vax-dec
-               os=-sysv
-               ;;
-       vms)
-               basic_machine=vax-dec
-               os=-vms
-               ;;
-       vpp*|vx|vx-*)
-               basic_machine=f301-fujitsu
-               ;;
-       vxworks960)
-               basic_machine=i960-wrs
-               os=-vxworks
-               ;;
-       vxworks68)
-               basic_machine=m68k-wrs
-               os=-vxworks
-               ;;
-       vxworks29k)
-               basic_machine=a29k-wrs
-               os=-vxworks
-               ;;
-       w65*)
-               basic_machine=w65-wdc
-               os=-none
-               ;;
-       w89k-*)
-               basic_machine=hppa1.1-winbond
-               os=-proelf
-               ;;
-       xbox)
-               basic_machine=i686-pc
-               os=-mingw32
-               ;;
-       xps | xps100)
-               basic_machine=xps100-honeywell
-               ;;
-       xscale-* | xscalee[bl]-*)
-               basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
-               ;;
-       ymp)
-               basic_machine=ymp-cray
-               os=-unicos
-               ;;
-       z8k-*-coff)
-               basic_machine=z8k-unknown
-               os=-sim
-               ;;
-       z80-*-coff)
-               basic_machine=z80-unknown
-               os=-sim
-               ;;
-       none)
-               basic_machine=none-none
-               os=-none
-               ;;
-
-# Here we handle the default manufacturer of certain CPU types.  It is in
-# some cases the only manufacturer, in others, it is the most popular.
-       w89k)
-               basic_machine=hppa1.1-winbond
-               ;;
-       op50n)
-               basic_machine=hppa1.1-oki
-               ;;
-       op60c)
-               basic_machine=hppa1.1-oki
-               ;;
-       romp)
-               basic_machine=romp-ibm
-               ;;
-       mmix)
-               basic_machine=mmix-knuth
-               ;;
-       rs6000)
-               basic_machine=rs6000-ibm
-               ;;
-       vax)
-               basic_machine=vax-dec
-               ;;
-       pdp10)
-               # there are many clones, so DEC is not a safe bet
-               basic_machine=pdp10-unknown
-               ;;
-       pdp11)
-               basic_machine=pdp11-dec
-               ;;
-       we32k)
-               basic_machine=we32k-att
-               ;;
-       sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
-               basic_machine=sh-unknown
-               ;;
-       sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
-               basic_machine=sparc-sun
-               ;;
-       cydra)
-               basic_machine=cydra-cydrome
-               ;;
-       orion)
-               basic_machine=orion-highlevel
-               ;;
-       orion105)
-               basic_machine=clipper-highlevel
-               ;;
-       mac | mpw | mac-mpw)
-               basic_machine=m68k-apple
-               ;;
-       pmac | pmac-mpw)
-               basic_machine=powerpc-apple
-               ;;
-       *-unknown)
-               # Make sure to match an already-canonicalized machine name.
-               ;;
-       *)
-               echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
-               exit 1
-               ;;
-esac
-
-# Here we canonicalize certain aliases for manufacturers.
-case $basic_machine in
-       *-digital*)
-               basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
-               ;;
-       *-commodore*)
-               basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
-               ;;
-       *)
-               ;;
-esac
-
-# Decode manufacturer-specific aliases for certain operating systems.
-
-if [ x"$os" != x"" ]
-then
-case $os in
-       # First match some system type aliases
-       # that might get confused with valid system types.
-       # -solaris* is a basic system type, with this one exception.
-       -auroraux)
-               os=-auroraux
-               ;;
-       -solaris1 | -solaris1.*)
-               os=`echo $os | sed -e 's|solaris1|sunos4|'`
-               ;;
-       -solaris)
-               os=-solaris2
-               ;;
-       -svr4*)
-               os=-sysv4
-               ;;
-       -unixware*)
-               os=-sysv4.2uw
-               ;;
-       -gnu/linux*)
-               os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
-               ;;
-       # First accept the basic system types.
-       # The portable systems comes first.
-       # Each alternative MUST END IN A *, to match a version number.
-       # -sysv* is not here because it comes later, after sysvr4.
-       -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
-             | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
-             | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
-             | -sym* | -kopensolaris* | -plan9* \
-             | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
-             | -aos* | -aros* \
-             | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
-             | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
-             | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
-             | -bitrig* | -openbsd* | -solidbsd* \
-             | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
-             | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
-             | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
-             | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
-             | -chorusos* | -chorusrdb* | -cegcc* \
-             | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
-             | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
-             | -linux-newlib* | -linux-musl* | -linux-uclibc* \
-             | -uxpv* | -beos* | -mpeix* | -udk* \
-             | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
-             | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
-             | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
-             | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
-             | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
-             | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
-             | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*)
-       # Remember, each alternative MUST END IN *, to match a version number.
-               ;;
-       -qnx*)
-               case $basic_machine in
-                   x86-* | i*86-*)
-                       ;;
-                   *)
-                       os=-nto$os
-                       ;;
-               esac
-               ;;
-       -nto-qnx*)
-               ;;
-       -nto*)
-               os=`echo $os | sed -e 's|nto|nto-qnx|'`
-               ;;
-       -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
-             | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
-             | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
-               ;;
-       -mac*)
-               os=`echo $os | sed -e 's|mac|macos|'`
-               ;;
-       # Apple iOS
-       -ios*)
-               ;;
-       -linux-dietlibc)
-               os=-linux-dietlibc
-               ;;
-       -linux*)
-               os=`echo $os | sed -e 's|linux|linux-gnu|'`
-               ;;
-       -sunos5*)
-               os=`echo $os | sed -e 's|sunos5|solaris2|'`
-               ;;
-       -sunos6*)
-               os=`echo $os | sed -e 's|sunos6|solaris3|'`
-               ;;
-       -opened*)
-               os=-openedition
-               ;;
-       -os400*)
-               os=-os400
-               ;;
-       -wince*)
-               os=-wince
-               ;;
-       -osfrose*)
-               os=-osfrose
-               ;;
-       -osf*)
-               os=-osf
-               ;;
-       -utek*)
-               os=-bsd
-               ;;
-       -dynix*)
-               os=-bsd
-               ;;
-       -acis*)
-               os=-aos
-               ;;
-       -atheos*)
-               os=-atheos
-               ;;
-       -syllable*)
-               os=-syllable
-               ;;
-       -386bsd)
-               os=-bsd
-               ;;
-       -ctix* | -uts*)
-               os=-sysv
-               ;;
-       -nova*)
-               os=-rtmk-nova
-               ;;
-       -ns2 )
-               os=-nextstep2
-               ;;
-       -nsk*)
-               os=-nsk
-               ;;
-       # Preserve the version number of sinix5.
-       -sinix5.*)
-               os=`echo $os | sed -e 's|sinix|sysv|'`
-               ;;
-       -sinix*)
-               os=-sysv4
-               ;;
-       -tpf*)
-               os=-tpf
-               ;;
-       -triton*)
-               os=-sysv3
-               ;;
-       -oss*)
-               os=-sysv3
-               ;;
-       -svr4)
-               os=-sysv4
-               ;;
-       -svr3)
-               os=-sysv3
-               ;;
-       -sysvr4)
-               os=-sysv4
-               ;;
-       # This must come after -sysvr4.
-       -sysv*)
-               ;;
-       -ose*)
-               os=-ose
-               ;;
-       -es1800*)
-               os=-ose
-               ;;
-       -xenix)
-               os=-xenix
-               ;;
-       -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
-               os=-mint
-               ;;
-       -aros*)
-               os=-aros
-               ;;
-       -zvmoe)
-               os=-zvmoe
-               ;;
-       -dicos*)
-               os=-dicos
-               ;;
-       -nacl*)
-               ;;
-       -none)
-               ;;
-       *)
-               # Get rid of the `-' at the beginning of $os.
-               os=`echo $os | sed 's/[^-]*-//'`
-               echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
-               exit 1
-               ;;
-esac
-else
-
-# Here we handle the default operating systems that come with various machines.
-# The value should be what the vendor currently ships out the door with their
-# machine or put another way, the most popular os provided with the machine.
-
-# Note that if you're going to try to match "-MANUFACTURER" here (say,
-# "-sun"), then you have to tell the case statement up towards the top
-# that MANUFACTURER isn't an operating system.  Otherwise, code above
-# will signal an error saying that MANUFACTURER isn't an operating
-# system, and we'll never get to this point.
-
-case $basic_machine in
-       score-*)
-               os=-elf
-               ;;
-       spu-*)
-               os=-elf
-               ;;
-       *-acorn)
-               os=-riscix1.2
-               ;;
-       arm*-rebel)
-               os=-linux
-               ;;
-       arm*-semi)
-               os=-aout
-               ;;
-       c4x-* | tic4x-*)
-               os=-coff
-               ;;
-       c8051-*)
-               os=-elf
-               ;;
-       hexagon-*)
-               os=-elf
-               ;;
-       tic54x-*)
-               os=-coff
-               ;;
-       tic55x-*)
-               os=-coff
-               ;;
-       tic6x-*)
-               os=-coff
-               ;;
-       # This must come before the *-dec entry.
-       pdp10-*)
-               os=-tops20
-               ;;
-       pdp11-*)
-               os=-none
-               ;;
-       *-dec | vax-*)
-               os=-ultrix4.2
-               ;;
-       m68*-apollo)
-               os=-domain
-               ;;
-       i386-sun)
-               os=-sunos4.0.2
-               ;;
-       m68000-sun)
-               os=-sunos3
-               ;;
-       m68*-cisco)
-               os=-aout
-               ;;
-       mep-*)
-               os=-elf
-               ;;
-       mips*-cisco)
-               os=-elf
-               ;;
-       mips*-*)
-               os=-elf
-               ;;
-       or32-*)
-               os=-coff
-               ;;
-       *-tti)  # must be before sparc entry or we get the wrong os.
-               os=-sysv3
-               ;;
-       sparc-* | *-sun)
-               os=-sunos4.1.1
-               ;;
-       *-be)
-               os=-beos
-               ;;
-       *-haiku)
-               os=-haiku
-               ;;
-       *-ibm)
-               os=-aix
-               ;;
-       *-knuth)
-               os=-mmixware
-               ;;
-       *-wec)
-               os=-proelf
-               ;;
-       *-winbond)
-               os=-proelf
-               ;;
-       *-oki)
-               os=-proelf
-               ;;
-       *-hp)
-               os=-hpux
-               ;;
-       *-hitachi)
-               os=-hiux
-               ;;
-       i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
-               os=-sysv
-               ;;
-       *-cbm)
-               os=-amigaos
-               ;;
-       *-dg)
-               os=-dgux
-               ;;
-       *-dolphin)
-               os=-sysv3
-               ;;
-       m68k-ccur)
-               os=-rtu
-               ;;
-       m88k-omron*)
-               os=-luna
-               ;;
-       *-next )
-               os=-nextstep
-               ;;
-       *-sequent)
-               os=-ptx
-               ;;
-       *-crds)
-               os=-unos
-               ;;
-       *-ns)
-               os=-genix
-               ;;
-       i370-*)
-               os=-mvs
-               ;;
-       *-next)
-               os=-nextstep3
-               ;;
-       *-gould)
-               os=-sysv
-               ;;
-       *-highlevel)
-               os=-bsd
-               ;;
-       *-encore)
-               os=-bsd
-               ;;
-       *-sgi)
-               os=-irix
-               ;;
-       *-siemens)
-               os=-sysv4
-               ;;
-       *-masscomp)
-               os=-rtu
-               ;;
-       f30[01]-fujitsu | f700-fujitsu)
-               os=-uxpv
-               ;;
-       *-rom68k)
-               os=-coff
-               ;;
-       *-*bug)
-               os=-coff
-               ;;
-       *-apple)
-               os=-macos
-               ;;
-       *-atari*)
-               os=-mint
-               ;;
-       *)
-               os=-none
-               ;;
-esac
-fi
-
-# Here we handle the case where we know the os, and the CPU type, but not the
-# manufacturer.  We pick the logical manufacturer.
-vendor=unknown
-case $basic_machine in
-       *-unknown)
-               case $os in
-                       -riscix*)
-                               vendor=acorn
-                               ;;
-                       -sunos*)
-                               vendor=sun
-                               ;;
-                       -cnk*|-aix*)
-                               vendor=ibm
-                               ;;
-                       -beos*)
-                               vendor=be
-                               ;;
-                       -hpux*)
-                               vendor=hp
-                               ;;
-                       -mpeix*)
-                               vendor=hp
-                               ;;
-                       -hiux*)
-                               vendor=hitachi
-                               ;;
-                       -unos*)
-                               vendor=crds
-                               ;;
-                       -dgux*)
-                               vendor=dg
-                               ;;
-                       -luna*)
-                               vendor=omron
-                               ;;
-                       -genix*)
-                               vendor=ns
-                               ;;
-                       -mvs* | -opened*)
-                               vendor=ibm
-                               ;;
-                       -os400*)
-                               vendor=ibm
-                               ;;
-                       -ptx*)
-                               vendor=sequent
-                               ;;
-                       -tpf*)
-                               vendor=ibm
-                               ;;
-                       -vxsim* | -vxworks* | -windiss*)
-                               vendor=wrs
-                               ;;
-                       -aux*)
-                               vendor=apple
-                               ;;
-                       -hms*)
-                               vendor=hitachi
-                               ;;
-                       -mpw* | -macos*)
-                               vendor=apple
-                               ;;
-                       -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
-                               vendor=atari
-                               ;;
-                       -vos*)
-                               vendor=stratus
-                               ;;
-               esac
-               basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
-               ;;
-esac
-
-echo $basic_machine$os
-exit
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
index 5a2b1c91754da3fb4f33a15fe6fcb657e0a4463a..649096c90941af077b1fb304dba97b764ea27e25 100755 (executable)
@@ -628,12 +628,14 @@ cfghdrs_in
 enable_zone_allocator
 enable_tls
 enable_lazy_lock
+TESTLIBS
 jemalloc_version_gid
 jemalloc_version_nrev
 jemalloc_version_bugfix
 jemalloc_version_minor
 jemalloc_version_major
 jemalloc_version
+enable_cache_oblivious
 enable_xmalloc
 enable_valgrind
 enable_utrace
@@ -646,6 +648,7 @@ enable_debug
 je_
 install_suffix
 private_namespace
+JEMALLOC_CPREFIX
 enable_code_coverage
 AUTOCONF
 LD
@@ -706,6 +709,7 @@ objroot
 abs_srcroot
 srcroot
 rev
+CONFIG
 target_alias
 host_alias
 build_alias
@@ -756,6 +760,7 @@ with_jemalloc_prefix
 with_export
 with_private_namespace
 with_install_suffix
+with_malloc_conf
 enable_cc_silence
 enable_debug
 enable_ivsalloc
@@ -771,6 +776,12 @@ enable_fill
 enable_utrace
 enable_valgrind
 enable_xmalloc
+enable_cache_oblivious
+with_lg_tiny_min
+with_lg_quantum
+with_lg_page
+with_lg_page_sizes
+with_lg_size_class_group
 enable_lazy_lock
 enable_tls
 enable_zone_allocator
@@ -1412,6 +1423,9 @@ Optional Features:
   --enable-utrace         Enable utrace(2)-based tracing
   --disable-valgrind      Disable support for Valgrind
   --enable-xmalloc        Support xmalloc option
+  --disable-cache-oblivious
+                          Disable support for cache-oblivious allocation
+                          alignment
   --enable-lazy-lock      Enable lazy locking (only lock when multi-threaded)
   --disable-tls           Disable thread-local storage (__thread keyword)
   --disable-zone-allocator
@@ -1430,9 +1444,21 @@ Optional Packages:
                           Prefix to prepend to all library-private APIs
   --with-install-suffix=<suffix>
                           Suffix to append to all installed files
+  --with-malloc-conf=<malloc_conf>
+                          config.malloc_conf options string
   --with-static-libunwind=<libunwind.a>
                           Path to static libunwind library; use rather than
                           dynamically linking
+  --with-lg-tiny-min=<lg-tiny-min>
+                          Base 2 log of minimum tiny size class to support
+  --with-lg-quantum=<lg-quantum>
+                          Base 2 log of minimum allocation alignment
+  --with-lg-page=<lg-page>
+                          Base 2 log of system page size
+  --with-lg-page-sizes=<lg-page-sizes>
+                          Base 2 logs of system page sizes to support
+  --with-lg-size-class-group=<lg-size-class-group>
+                          Base 2 log of size classes per doubling
 
 Some influential environment variables:
   CC          C compiler command
@@ -2461,11 +2487,44 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
 
 
 
+ac_aux_dir=
+for ac_dir in build-aux "$srcdir"/build-aux; do
+  if test -f "$ac_dir/install-sh"; then
+    ac_aux_dir=$ac_dir
+    ac_install_sh="$ac_aux_dir/install-sh -c"
+    break
+  elif test -f "$ac_dir/install.sh"; then
+    ac_aux_dir=$ac_dir
+    ac_install_sh="$ac_aux_dir/install.sh -c"
+    break
+  elif test -f "$ac_dir/shtool"; then
+    ac_aux_dir=$ac_dir
+    ac_install_sh="$ac_aux_dir/shtool install -c"
+    break
+  fi
+done
+if test -z "$ac_aux_dir"; then
+  as_fn_error $? "cannot find install-sh, install.sh, or shtool in build-aux \"$srcdir\"/build-aux" "$LINENO" 5
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess"  # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub"  # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure"  # Please don't use this var.
+
+
+
+
 
 
 
 
 
+CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
+
 
 rev=2
 
@@ -3479,6 +3538,78 @@ fi
 rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 
 
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror=declaration-after-statement" >&5
+$as_echo_n "checking whether compiler supports -Werror=declaration-after-statement... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+  CFLAGS="-Werror=declaration-after-statement"
+else
+  CFLAGS="${CFLAGS} -Werror=declaration-after-statement"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+
+int
+main ()
+{
+
+    return 0;
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  je_cv_cflags_appended=-Werror=declaration-after-statement
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+  je_cv_cflags_appended=
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+              CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wshorten-64-to-32" >&5
+$as_echo_n "checking whether compiler supports -Wshorten-64-to-32... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+  CFLAGS="-Wshorten-64-to-32"
+else
+  CFLAGS="${CFLAGS} -Wshorten-64-to-32"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+
+int
+main ()
+{
+
+    return 0;
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  je_cv_cflags_appended=-Wshorten-64-to-32
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+  je_cv_cflags_appended=
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+              CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5
 $as_echo_n "checking whether compiler supports -pipe... " >&6; }
 TCFLAGS="${CFLAGS}"
@@ -4369,7 +4500,12 @@ if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
   CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99"
 fi
 
-# The cast to long int works around a bug in the HP C Compiler
+if test "x${je_cv_msvc}" = "xyes" ; then
+  LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&5
+$as_echo "Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&6; }
+else
+  # The cast to long int works around a bug in the HP C Compiler
 # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
 # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
 # This bug is HP SR number 8606223364.
@@ -4402,12 +4538,13 @@ cat >>confdefs.h <<_ACEOF
 _ACEOF
 
 
-if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
-  LG_SIZEOF_PTR=3
-elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
-  LG_SIZEOF_PTR=2
-else
-  as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5
+  if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
+    LG_SIZEOF_PTR=3
+  elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
+    LG_SIZEOF_PTR=2
+  else
+    as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5
+  fi
 fi
 cat >>confdefs.h <<_ACEOF
 #define LG_SIZEOF_PTR $LG_SIZEOF_PTR
@@ -4504,6 +4641,51 @@ cat >>confdefs.h <<_ACEOF
 _ACEOF
 
 
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long" >&5
+$as_echo_n "checking size of long long... " >&6; }
+if ${ac_cv_sizeof_long_long+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long))" "ac_cv_sizeof_long_long"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_long_long" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (long long)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_long_long=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long" >&5
+$as_echo "$ac_cv_sizeof_long_long" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long
+_ACEOF
+
+
+if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
+  LG_SIZEOF_LONG_LONG=3
+elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
+  LG_SIZEOF_LONG_LONG=2
+else
+  as_fn_error $? "Unsupported long long size: ${ac_cv_sizeof_long_long}" "$LINENO" 5
+fi
+cat >>confdefs.h <<_ACEOF
+#define LG_SIZEOF_LONG_LONG $LG_SIZEOF_LONG_LONG
+_ACEOF
+
+
 # The cast to long int works around a bug in the HP C Compiler
 # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
 # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
@@ -4551,35 +4733,6 @@ cat >>confdefs.h <<_ACEOF
 _ACEOF
 
 
-ac_aux_dir=
-for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
-  if test -f "$ac_dir/install-sh"; then
-    ac_aux_dir=$ac_dir
-    ac_install_sh="$ac_aux_dir/install-sh -c"
-    break
-  elif test -f "$ac_dir/install.sh"; then
-    ac_aux_dir=$ac_dir
-    ac_install_sh="$ac_aux_dir/install.sh -c"
-    break
-  elif test -f "$ac_dir/shtool"; then
-    ac_aux_dir=$ac_dir
-    ac_install_sh="$ac_aux_dir/shtool install -c"
-    break
-  fi
-done
-if test -z "$ac_aux_dir"; then
-  as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
-fi
-
-# These three variables are undocumented and unsupported,
-# and are intended to be withdrawn in a future Autoconf release.
-# They can cause serious problems if a builder's source tree is in a directory
-# whose full name contains unusual characters.
-ac_config_guess="$SHELL $ac_aux_dir/config.guess"  # Please don't use this var.
-ac_config_sub="$SHELL $ac_aux_dir/config.sub"  # Please don't use this var.
-ac_configure="$SHELL $ac_aux_dir/configure"  # Please don't use this var.
-
-
 # Make sure we can run config.sub.
 $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
   as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
@@ -4653,13 +4806,15 @@ case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
 
 CPU_SPINWAIT=""
 case "${host_cpu}" in
-  i[345]86)
-       ;;
   i686|x86_64)
+       if test "x${je_cv_msvc}" = "xyes" ; then
+           if ${je_cv_pause_msvc+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
 
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5
-$as_echo_n "checking whether pause instruction is compilable... " >&6; }
-if ${je_cv_pause+:} false; then :
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction MSVC is compilable" >&5
+$as_echo_n "checking whether pause instruction MSVC is compilable... " >&6; }
+if ${je_cv_pause_msvc+:} false; then :
   $as_echo_n "(cached) " >&6
 else
   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -4668,60 +4823,64 @@ else
 int
 main ()
 {
-__asm__ volatile("pause"); return 0;
+_mm_pause(); return 0;
   ;
   return 0;
 }
 _ACEOF
 if ac_fn_c_try_link "$LINENO"; then :
-  je_cv_pause=yes
+  je_cv_pause_msvc=yes
 else
-  je_cv_pause=no
+  je_cv_pause_msvc=no
 fi
 rm -f core conftest.err conftest.$ac_objext \
     conftest$ac_exeext conftest.$ac_ext
 fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5
-$as_echo "$je_cv_pause" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause_msvc" >&5
+$as_echo "$je_cv_pause_msvc" >&6; }
 
-       if test "x${je_cv_pause}" = "xyes" ; then
-           CPU_SPINWAIT='__asm__ volatile("pause")'
-       fi
+fi
+
+           if test "x${je_cv_pause_msvc}" = "xyes" ; then
+               CPU_SPINWAIT='_mm_pause()'
+           fi
+       else
+           if ${je_cv_pause+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
 
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether SSE2 intrinsics is compilable" >&5
-$as_echo_n "checking whether SSE2 intrinsics is compilable... " >&6; }
-if ${je_cv_sse2+:} false; then :
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5
+$as_echo_n "checking whether pause instruction is compilable... " >&6; }
+if ${je_cv_pause+:} false; then :
   $as_echo_n "(cached) " >&6
 else
   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
-#include <emmintrin.h>
-
 int
 main ()
 {
-
+__asm__ volatile("pause"); return 0;
   ;
   return 0;
 }
 _ACEOF
 if ac_fn_c_try_link "$LINENO"; then :
-  je_cv_sse2=yes
+  je_cv_pause=yes
 else
-  je_cv_sse2=no
+  je_cv_pause=no
 fi
 rm -f core conftest.err conftest.$ac_objext \
     conftest$ac_exeext conftest.$ac_ext
 fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sse2" >&5
-$as_echo "$je_cv_sse2" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5
+$as_echo "$je_cv_pause" >&6; }
 
-       if test "x${je_cv_sse2}" = "xyes" ; then
-         cat >>confdefs.h <<_ACEOF
-#define HAVE_SSE2
-_ACEOF
+fi
 
+           if test "x${je_cv_pause}" = "xyes" ; then
+               CPU_SPINWAIT='__asm__ volatile("pause")'
+           fi
        fi
        ;;
   powerpc)
@@ -4853,6 +5012,7 @@ fi
 
 
 default_munmap="1"
+maps_coalesce="1"
 case "${host}" in
   *-*-darwin* | *-*-ios*)
        CFLAGS="$CFLAGS"
@@ -4864,7 +5024,7 @@ case "${host}" in
        so="dylib"
        importlib="${so}"
        force_tls="0"
-       DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)'
+       DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
        SOREV="${rev}.${so}"
        sbrk_deprecated="1"
        ;;
@@ -4881,7 +5041,14 @@ case "${host}" in
        $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE  " >>confdefs.h
 
        ;;
-  *-*-openbsd*|*-*-bitrig*)
+  *-*-openbsd*)
+       CFLAGS="$CFLAGS"
+       abi="elf"
+       $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE  " >>confdefs.h
+
+       force_tls="0"
+       ;;
+  *-*-bitrig*)
        CFLAGS="$CFLAGS"
        abi="elf"
        $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE  " >>confdefs.h
@@ -4897,6 +5064,8 @@ case "${host}" in
 
        $as_echo "#define JEMALLOC_THREADED_INIT  " >>confdefs.h
 
+       $as_echo "#define JEMALLOC_USE_CXX_THROW  " >>confdefs.h
+
        default_munmap="0"
        ;;
   *-*-netbsd*)
@@ -4949,6 +5118,8 @@ $as_echo "$abi" >&6; }
   *-*-mingw* | *-*-cygwin*)
        abi="pecoff"
        force_tls="0"
+       force_lazy_lock="1"
+       maps_coalesce="0"
        RPATH=""
        so="dll"
        if test "x$je_cv_msvc" = "xyes" ; then
@@ -5045,45 +5216,229 @@ _ACEOF
 
 
 
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5
-$as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; }
-if ${je_cv_attribute+:} false; then :
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5
+$as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; }
+if ${je_cv_attribute+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+static __attribute__((unused)) void foo(void){}
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  je_cv_attribute=yes
+else
+  je_cv_attribute=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_attribute" >&5
+$as_echo "$je_cv_attribute" >&6; }
+
+if test "x${je_cv_attribute}" = "xyes" ; then
+  $as_echo "#define JEMALLOC_HAVE_ATTR  " >>confdefs.h
+
+  if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5
+$as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+  CFLAGS="-fvisibility=hidden"
+else
+  CFLAGS="${CFLAGS} -fvisibility=hidden"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+
+int
+main ()
+{
+
+    return 0;
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  je_cv_cflags_appended=-fvisibility=hidden
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+  je_cv_cflags_appended=
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+              CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+  fi
+fi
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+  CFLAGS="-Werror"
+else
+  CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+
+int
+main ()
+{
+
+    return 0;
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  je_cv_cflags_appended=-Werror
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+  je_cv_cflags_appended=
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+              CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5
+$as_echo_n "checking whether tls_model attribute is compilable... " >&6; }
+if ${je_cv_tls_model+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+static __thread int
+               __attribute__((tls_model("initial-exec"), unused)) foo;
+               foo = 0;
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  je_cv_tls_model=yes
+else
+  je_cv_tls_model=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_tls_model" >&5
+$as_echo "$je_cv_tls_model" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_tls_model}" = "xyes" ; then
+  $as_echo "#define JEMALLOC_TLS_MODEL __attribute__((tls_model(\"initial-exec\")))" >>confdefs.h
+
+else
+  $as_echo "#define JEMALLOC_TLS_MODEL  " >>confdefs.h
+
+fi
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+  CFLAGS="-Werror"
+else
+  CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+
+int
+main ()
+{
+
+    return 0;
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  je_cv_cflags_appended=-Werror
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+  je_cv_cflags_appended=
+              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+              CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5
+$as_echo_n "checking whether alloc_size attribute is compilable... " >&6; }
+if ${je_cv_alloc_size+:} false; then :
   $as_echo_n "(cached) " >&6
 else
   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
-static __attribute__((unused)) void foo(void){}
+#include <stdlib.h>
 int
 main ()
 {
-
+void *foo(size_t size) __attribute__((alloc_size(1)));
   ;
   return 0;
 }
 _ACEOF
 if ac_fn_c_try_link "$LINENO"; then :
-  je_cv_attribute=yes
+  je_cv_alloc_size=yes
 else
-  je_cv_attribute=no
+  je_cv_alloc_size=no
 fi
 rm -f core conftest.err conftest.$ac_objext \
     conftest$ac_exeext conftest.$ac_ext
 fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_attribute" >&5
-$as_echo "$je_cv_attribute" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_alloc_size" >&5
+$as_echo "$je_cv_alloc_size" >&6; }
 
-if test "x${je_cv_attribute}" = "xyes" ; then
-  $as_echo "#define JEMALLOC_HAVE_ATTR  " >>confdefs.h
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_alloc_size}" = "xyes" ; then
+  $as_echo "#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE  " >>confdefs.h
 
-  if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
+fi
+SAVED_CFLAGS="${CFLAGS}"
 
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5
-$as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
 TCFLAGS="${CFLAGS}"
 if test "x${CFLAGS}" = "x" ; then
-  CFLAGS="-fvisibility=hidden"
+  CFLAGS="-Werror"
 else
-  CFLAGS="${CFLAGS} -fvisibility=hidden"
+  CFLAGS="${CFLAGS} -Werror"
 fi
 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
@@ -5100,7 +5455,7 @@ main ()
 }
 _ACEOF
 if ac_fn_c_try_compile "$LINENO"; then :
-  je_cv_cflags_appended=-fvisibility=hidden
+  je_cv_cflags_appended=-Werror
               { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
 $as_echo "yes" >&6; }
 else
@@ -5112,7 +5467,38 @@ $as_echo "no" >&6; }
 fi
 rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 
-  fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5
+$as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; }
+if ${je_cv_format_gnu_printf+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdlib.h>
+int
+main ()
+{
+void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  je_cv_format_gnu_printf=yes
+else
+  je_cv_format_gnu_printf=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_gnu_printf" >&5
+$as_echo "$je_cv_format_gnu_printf" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
+  $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF  " >>confdefs.h
+
 fi
 SAVED_CFLAGS="${CFLAGS}"
 
@@ -5152,41 +5538,36 @@ fi
 rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 
 
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5
-$as_echo_n "checking whether tls_model attribute is compilable... " >&6; }
-if ${je_cv_tls_model+:} false; then :
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5
+$as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; }
+if ${je_cv_format_printf+:} false; then :
   $as_echo_n "(cached) " >&6
 else
   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
-
+#include <stdlib.h>
 int
 main ()
 {
-static __thread int
-               __attribute__((tls_model("initial-exec"), unused)) foo;
-               foo = 0;
+void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));
   ;
   return 0;
 }
 _ACEOF
 if ac_fn_c_try_link "$LINENO"; then :
-  je_cv_tls_model=yes
+  je_cv_format_printf=yes
 else
-  je_cv_tls_model=no
+  je_cv_format_printf=no
 fi
 rm -f core conftest.err conftest.$ac_objext \
     conftest$ac_exeext conftest.$ac_ext
 fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_tls_model" >&5
-$as_echo "$je_cv_tls_model" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_printf" >&5
+$as_echo "$je_cv_format_printf" >&6; }
 
 CFLAGS="${SAVED_CFLAGS}"
-if test "x${je_cv_tls_model}" = "xyes" ; then
-  $as_echo "#define JEMALLOC_TLS_MODEL __attribute__((tls_model(\"initial-exec\")))" >>confdefs.h
-
-else
-  $as_echo "#define JEMALLOC_TLS_MODEL  " >>confdefs.h
+if test "x${je_cv_format_printf}" = "xyes" ; then
+  $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF  " >>confdefs.h
 
 fi
 
@@ -5637,6 +6018,7 @@ _ACEOF
 fi
 
 
+
 # Check whether --with-export was given.
 if test "${with_export+set}" = set; then :
   withval=$with_export; if test "x$with_export" = "xno"; then
@@ -5675,6 +6057,21 @@ fi
 install_suffix="$INSTALL_SUFFIX"
 
 
+
+# Check whether --with-malloc_conf was given.
+if test "${with_malloc_conf+set}" = set; then :
+  withval=$with_malloc_conf; JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"
+else
+  JEMALLOC_CONFIG_MALLOC_CONF=""
+
+fi
+
+config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
+cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_CONFIG_MALLOC_CONF "$config_malloc_conf"
+_ACEOF
+
+
 je_="je_"
 
 
@@ -5777,6 +6174,10 @@ else
 
 fi
 
+if test "x$enable_debug" = "x1" ; then
+  $as_echo "#define JEMALLOC_DEBUG  " >>confdefs.h
+
+fi
 if test "x$enable_debug" = "x1" ; then
   $as_echo "#define JEMALLOC_DEBUG  " >>confdefs.h
 
@@ -6267,6 +6668,11 @@ if test "x$enable_tcache" = "x1" ; then
 fi
 
 
+if test "x${maps_coalesce}" = "x1" ; then
+  $as_echo "#define JEMALLOC_MAPS_COALESCE  " >>confdefs.h
+
+fi
+
 # Check whether --enable-munmap was given.
 if test "${enable_munmap+set}" = set; then :
   enableval=$enable_munmap; if test "x$enable_munmap" = "xno" ; then
@@ -6464,6 +6870,25 @@ if test "x$enable_xmalloc" = "x1" ; then
 fi
 
 
+# Check whether --enable-cache-oblivious was given.
+if test "${enable_cache_oblivious+set}" = set; then :
+  enableval=$enable_cache_oblivious; if test "x$enable_cache_oblivious" = "xno" ; then
+  enable_cache_oblivious="0"
+else
+  enable_cache_oblivious="1"
+fi
+
+else
+  enable_cache_oblivious="1"
+
+fi
+
+if test "x$enable_cache_oblivious" = "x1" ; then
+  $as_echo "#define JEMALLOC_CACHE_OBLIVIOUS  " >>confdefs.h
+
+fi
+
+
 
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5
 $as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; }
@@ -6502,6 +6927,8 @@ fi
 $as_echo "$je_cv_gcc_builtin_ffsl" >&6; }
 
 if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
+  $as_echo "#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll" >>confdefs.h
+
   $as_echo "#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl" >>confdefs.h
 
   $as_echo "#define JEMALLOC_INTERNAL_FFS __builtin_ffs" >>confdefs.h
@@ -6545,6 +6972,8 @@ fi
 $as_echo "$je_cv_function_ffsl" >&6; }
 
   if test "x${je_cv_function_ffsl}" = "xyes" ; then
+    $as_echo "#define JEMALLOC_INTERNAL_FFSLL ffsll" >>confdefs.h
+
     $as_echo "#define JEMALLOC_INTERNAL_FFSL ffsl" >>confdefs.h
 
     $as_echo "#define JEMALLOC_INTERNAL_FFS ffs" >>confdefs.h
@@ -6554,13 +6983,50 @@ $as_echo "$je_cv_function_ffsl" >&6; }
   fi
 fi
 
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking STATIC_PAGE_SHIFT" >&5
-$as_echo_n "checking STATIC_PAGE_SHIFT... " >&6; }
-if ${je_cv_static_page_shift+:} false; then :
+
+# Check whether --with-lg_tiny_min was given.
+if test "${with_lg_tiny_min+set}" = set; then :
+  withval=$with_lg_tiny_min; LG_TINY_MIN="$with_lg_tiny_min"
+else
+  LG_TINY_MIN="3"
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define LG_TINY_MIN $LG_TINY_MIN
+_ACEOF
+
+
+
+# Check whether --with-lg_quantum was given.
+if test "${with_lg_quantum+set}" = set; then :
+  withval=$with_lg_quantum; LG_QUANTA="$with_lg_quantum"
+else
+  LG_QUANTA="3 4"
+fi
+
+if test "x$with_lg_quantum" != "x" ; then
+  cat >>confdefs.h <<_ACEOF
+#define LG_QUANTUM $with_lg_quantum
+_ACEOF
+
+fi
+
+
+# Check whether --with-lg_page was given.
+if test "${with_lg_page+set}" = set; then :
+  withval=$with_lg_page; LG_PAGE="$with_lg_page"
+else
+  LG_PAGE="detect"
+fi
+
+if test "x$LG_PAGE" = "xdetect"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking LG_PAGE" >&5
+$as_echo_n "checking LG_PAGE... " >&6; }
+if ${je_cv_lg_page+:} false; then :
   $as_echo_n "(cached) " >&6
 else
   if test "$cross_compiling" = yes; then :
-  je_cv_static_page_shift=12
+  je_cv_lg_page=12
 else
   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
@@ -6596,7 +7062,7 @@ main ()
     if (f == NULL) {
        return 1;
     }
-    fprintf(f, "%d\n", result);
+    fprintf(f, "%d", result);
     fclose(f);
 
     return 0;
@@ -6606,51 +7072,76 @@ main ()
 }
 _ACEOF
 if ac_fn_c_try_run "$LINENO"; then :
-  je_cv_static_page_shift=`cat conftest.out`
+  je_cv_lg_page=`cat conftest.out`
 else
-  je_cv_static_page_shift=undefined
+  je_cv_lg_page=undefined
 fi
 rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
   conftest.$ac_objext conftest.beam conftest.$ac_ext
 fi
 
 fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_static_page_shift" >&5
-$as_echo "$je_cv_static_page_shift" >&6; }
-
-if test "x$je_cv_static_page_shift" != "xundefined"; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_page" >&5
+$as_echo "$je_cv_lg_page" >&6; }
+fi
+if test "x${je_cv_lg_page}" != "x" ; then
+  LG_PAGE="${je_cv_lg_page}"
+fi
+if test "x${LG_PAGE}" != "xundefined" ; then
    cat >>confdefs.h <<_ACEOF
-#define STATIC_PAGE_SHIFT $je_cv_static_page_shift
+#define LG_PAGE $LG_PAGE
 _ACEOF
 
 else
-   as_fn_error $? "cannot determine value for STATIC_PAGE_SHIFT" "$LINENO" 5
+   as_fn_error $? "cannot determine value for LG_PAGE" "$LINENO" 5
+fi
+
+
+# Check whether --with-lg_page_sizes was given.
+if test "${with_lg_page_sizes+set}" = set; then :
+  withval=$with_lg_page_sizes; LG_PAGE_SIZES="$with_lg_page_sizes"
+else
+  LG_PAGE_SIZES="$LG_PAGE"
+fi
+
+
+
+# Check whether --with-lg_size_class_group was given.
+if test "${with_lg_size_class_group+set}" = set; then :
+  withval=$with_lg_size_class_group; LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"
+else
+  LG_SIZE_CLASS_GROUP="2"
 fi
 
 
-if test "x`git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
-        rm -f "${srcroot}VERSION"
+
+if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
+        rm -f "${objroot}VERSION"
   for pattern in '[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
                  '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
                  '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
                  '[0-9][0-9].[0-9][0-9].[0-9]' \
                  '[0-9][0-9].[0-9][0-9].[0-9][0-9]'; do
-    if test ! -e "${srcroot}VERSION" ; then
-      git describe --long --abbrev=40 --match="${pattern}" > "${srcroot}VERSION.tmp" 2>/dev/null
+    if test ! -e "${objroot}VERSION" ; then
+      (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
       if test $? -eq 0 ; then
-        mv "${srcroot}VERSION.tmp" "${srcroot}VERSION"
+        mv "${objroot}VERSION.tmp" "${objroot}VERSION"
         break
       fi
     fi
   done
 fi
-rm -f "${srcroot}VERSION.tmp"
-if test ! -e "${srcroot}VERSION" ; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5
+rm -f "${objroot}VERSION.tmp"
+if test ! -e "${objroot}VERSION" ; then
+  if test ! -e "${srcroot}VERSION" ; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5
 $as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSION" >&6; }
-  echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${srcroot}VERSION"
+    echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION"
+  else
+    cp ${srcroot}VERSION ${objroot}VERSION
+  fi
 fi
-jemalloc_version=`cat "${srcroot}VERSION"`
+jemalloc_version=`cat "${objroot}VERSION"`
 jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'`
 jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'`
 jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $3}'`
@@ -6782,6 +7273,93 @@ fi
 
 CPPFLAGS="$CPPFLAGS -D_REENTRANT"
 
+SAVED_LIBS="${LIBS}"
+LIBS=
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
+$as_echo_n "checking for library containing clock_gettime... " >&6; }
+if ${ac_cv_search_clock_gettime+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char clock_gettime ();
+int
+main ()
+{
+return clock_gettime ();
+  ;
+  return 0;
+}
+_ACEOF
+for ac_lib in '' rt; do
+  if test -z "$ac_lib"; then
+    ac_res="none required"
+  else
+    ac_res=-l$ac_lib
+    LIBS="-l$ac_lib  $ac_func_search_save_LIBS"
+  fi
+  if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_search_clock_gettime=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext
+  if ${ac_cv_search_clock_gettime+:} false; then :
+  break
+fi
+done
+if ${ac_cv_search_clock_gettime+:} false; then :
+
+else
+  ac_cv_search_clock_gettime=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
+$as_echo "$ac_cv_search_clock_gettime" >&6; }
+ac_res=$ac_cv_search_clock_gettime
+if test "$ac_res" != no; then :
+  test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+  TESTLIBS="${LIBS}"
+fi
+
+
+LIBS="${SAVED_LIBS}"
+
+ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv"
+if test "x$ac_cv_func_secure_getenv" = xyes; then :
+  have_secure_getenv="1"
+else
+  have_secure_getenv="0"
+
+fi
+
+if test "x$have_secure_getenv" = "x1" ; then
+  $as_echo "#define JEMALLOC_HAVE_SECURE_GETENV  " >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "issetugid" "ac_cv_func_issetugid"
+if test "x$ac_cv_func_issetugid" = xyes; then :
+  have_issetugid="1"
+else
+  have_issetugid="0"
+
+fi
+
+if test "x$have_issetugid" = "x1" ; then
+  $as_echo "#define JEMALLOC_HAVE_ISSETUGID  " >>confdefs.h
+
+fi
+
 ac_fn_c_check_func "$LINENO" "_malloc_thread_cleanup" "ac_cv_func__malloc_thread_cleanup"
 if test "x$ac_cv_func__malloc_thread_cleanup" = xyes; then :
   have__malloc_thread_cleanup="1"
@@ -6818,11 +7396,11 @@ else
 fi
 
 else
-  enable_lazy_lock="0"
+  enable_lazy_lock=""
 
 fi
 
-if test "x$enable_lazy_lock" = "x0" -a "x${force_lazy_lock}" = "x1" ; then
+if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then
   { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5
 $as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; }
   enable_lazy_lock="1"
@@ -6895,6 +7473,8 @@ fi
   fi
   $as_echo "#define JEMALLOC_LAZY_LOCK  " >>confdefs.h
 
+else
+  enable_lazy_lock="0"
 fi
 
 
@@ -6907,19 +7487,22 @@ else
 fi
 
 else
-  enable_tls="1"
+  enable_tls=""
 
 fi
 
-if test "x${enable_tls}" = "x0" -a "x${force_tls}" = "x1" ; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5
+if test "x${enable_tls}" = "x" ; then
+  if test "x${force_tls}" = "x1" ; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5
 $as_echo "Forcing TLS to avoid allocator/threading bootstrap issues" >&6; }
-  enable_tls="1"
-fi
-if test "x${enable_tls}" = "x1" -a "x${force_tls}" = "x0" ; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5
+    enable_tls="1"
+  elif test "x${force_tls}" = "x0" ; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5
 $as_echo "Forcing no TLS to avoid allocator/threading bootstrap issues" >&6; }
-  enable_tls="0"
+    enable_tls="0"
+  else
+    enable_tls="1"
+  fi
 fi
 if test "x${enable_tls}" = "x1" ; then
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5
@@ -6950,15 +7533,69 @@ $as_echo "no" >&6; }
               enable_tls="0"
 fi
 rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+  enable_tls="0"
 fi
 
 if test "x${enable_tls}" = "x1" ; then
+  if test "x${force_tls}" = "x0" ; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS enabled despite being marked unusable on this platform" >&5
+$as_echo "$as_me: WARNING: TLS enabled despite being marked unusable on this platform" >&2;}
+  fi
   cat >>confdefs.h <<_ACEOF
 #define JEMALLOC_TLS
 _ACEOF
 
 elif test "x${force_tls}" = "x1" ; then
-  as_fn_error $? "Failed to configure TLS, which is mandatory for correct function" "$LINENO" 5
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS disabled despite being marked critical on this platform" >&5
+$as_echo "$as_me: WARNING: TLS disabled despite being marked critical on this platform" >&2;}
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C11 atomics is compilable" >&5
+$as_echo_n "checking whether C11 atomics is compilable... " >&6; }
+if ${je_cv_c11atomics+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+#include <stdint.h>
+#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
+#include <stdatomic.h>
+#else
+#error Atomics not available
+#endif
+
+int
+main ()
+{
+
+    uint64_t *p = (uint64_t *)0;
+    uint64_t x = 1;
+    volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+    uint64_t r = atomic_fetch_add(a, x) + x;
+    return (r == 0);
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  je_cv_c11atomics=yes
+else
+  je_cv_c11atomics=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_c11atomics" >&5
+$as_echo "$je_cv_c11atomics" >&6; }
+
+if test "x${je_cv_c11atomics}" = "xyes" ; then
+  $as_echo "#define JEMALLOC_C11ATOMICS 1" >>confdefs.h
+
 fi
 
 
@@ -7300,8 +7937,6 @@ if test "x${enable_zone_allocator}" = "x1" ; then
   if test "x${abi}" != "xmacho"; then
     as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5
   fi
-  $as_echo "#define JEMALLOC_IVSALLOC  " >>confdefs.h
-
   $as_echo "#define JEMALLOC_ZONE  " >>confdefs.h
 
 
@@ -7315,7 +7950,7 @@ $as_echo_n "checking malloc zone version... " >&6; }
 int
 main ()
 {
-static foo[sizeof(malloc_zone_t) == sizeof(void *) * 14 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 14 ? 1 : -1]
 
   ;
   return 0;
@@ -7331,7 +7966,7 @@ else
 int
 main ()
 {
-static foo[sizeof(malloc_zone_t) == sizeof(void *) * 15 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 15 ? 1 : -1]
 
   ;
   return 0;
@@ -7347,7 +7982,7 @@ else
 int
 main ()
 {
-static foo[sizeof(malloc_zone_t) == sizeof(void *) * 16 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 16 ? 1 : -1]
 
   ;
   return 0;
@@ -7361,7 +7996,7 @@ if ac_fn_c_try_compile "$LINENO"; then :
 int
 main ()
 {
-static foo[sizeof(malloc_introspection_t) == sizeof(void *) * 9 ? 1 : -1]
+static int foo[sizeof(malloc_introspection_t) == sizeof(void *) * 9 ? 1 : -1]
 
   ;
   return 0;
@@ -7377,7 +8012,7 @@ else
 int
 main ()
 {
-static foo[sizeof(malloc_introspection_t) == sizeof(void *) * 13 ? 1 : -1]
+static int foo[sizeof(malloc_introspection_t) == sizeof(void *) * 13 ? 1 : -1]
 
   ;
   return 0;
@@ -7400,7 +8035,7 @@ else
 int
 main ()
 {
-static foo[sizeof(malloc_zone_t) == sizeof(void *) * 17 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 17 ? 1 : -1]
 
   ;
   return 0;
@@ -7416,7 +8051,7 @@ else
 int
 main ()
 {
-static foo[sizeof(malloc_zone_t) > sizeof(void *) * 17 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) > sizeof(void *) * 17 ? 1 : -1]
 
   ;
   return 0;
@@ -7705,7 +8340,7 @@ ac_config_headers="$ac_config_headers $cfghdrs_tup"
 
 
 
-ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc.sh"
+ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof"
 
 
 
@@ -8423,8 +9058,13 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
   objroot="${objroot}"
 
 
+  SHELL="${SHELL}"
   srcdir="${srcdir}"
   objroot="${objroot}"
+  LG_QUANTA="${LG_QUANTA}"
+  LG_TINY_MIN=${LG_TINY_MIN}
+  LG_PAGE_SIZES="${LG_PAGE_SIZES}"
+  LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP}
 
 
   srcdir="${srcdir}"
@@ -8470,7 +9110,9 @@ do
     "$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;;
     "$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;;
     "config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;;
+    "bin/jemalloc-config") CONFIG_FILES="$CONFIG_FILES bin/jemalloc-config" ;;
     "bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;;
+    "bin/jeprof") CONFIG_FILES="$CONFIG_FILES bin/jeprof" ;;
 
   *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
   esac
@@ -9060,7 +9702,7 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
  ;;
     "include/jemalloc/internal/size_classes.h":C)
   mkdir -p "${objroot}include/jemalloc/internal"
-  "${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h"
+  "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
  ;;
     "include/jemalloc/jemalloc_protos_jet.h":C)
   mkdir -p "${objroot}include/jemalloc"
@@ -9129,18 +9771,22 @@ $as_echo "jemalloc version   : ${jemalloc_version}" >&6; }
 $as_echo "library revision   : ${rev}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
 $as_echo "" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIG             : ${CONFIG}" >&5
+$as_echo "CONFIG             : ${CONFIG}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: CC                 : ${CC}" >&5
 $as_echo "CC                 : ${CC}" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS           : ${CPPFLAGS}" >&5
-$as_echo "CPPFLAGS           : ${CPPFLAGS}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: CFLAGS             : ${CFLAGS}" >&5
 $as_echo "CFLAGS             : ${CFLAGS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS           : ${CPPFLAGS}" >&5
+$as_echo "CPPFLAGS           : ${CPPFLAGS}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS            : ${LDFLAGS}" >&5
 $as_echo "LDFLAGS            : ${LDFLAGS}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS      : ${EXTRA_LDFLAGS}" >&5
 $as_echo "EXTRA_LDFLAGS      : ${EXTRA_LDFLAGS}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS               : ${LIBS}" >&5
 $as_echo "LIBS               : ${LIBS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: TESTLIBS           : ${TESTLIBS}" >&5
+$as_echo "TESTLIBS           : ${TESTLIBS}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA        : ${RPATH_EXTRA}" >&5
 $as_echo "RPATH_EXTRA        : ${RPATH_EXTRA}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
@@ -9155,12 +9801,12 @@ $as_echo "" >&6; }
 $as_echo "PREFIX             : ${PREFIX}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: BINDIR             : ${BINDIR}" >&5
 $as_echo "BINDIR             : ${BINDIR}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR            : ${DATADIR}" >&5
+$as_echo "DATADIR            : ${DATADIR}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: INCLUDEDIR         : ${INCLUDEDIR}" >&5
 $as_echo "INCLUDEDIR         : ${INCLUDEDIR}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBDIR             : ${LIBDIR}" >&5
 $as_echo "LIBDIR             : ${LIBDIR}" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR            : ${DATADIR}" >&5
-$as_echo "DATADIR            : ${DATADIR}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: MANDIR             : ${MANDIR}" >&5
 $as_echo "MANDIR             : ${MANDIR}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
@@ -9183,6 +9829,8 @@ $as_echo "JEMALLOC_PRIVATE_NAMESPACE" >&6; }
 $as_echo "                   : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: install_suffix     : ${install_suffix}" >&5
 $as_echo "install_suffix     : ${install_suffix}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: malloc_conf        : ${config_malloc_conf}" >&5
+$as_echo "malloc_conf        : ${config_malloc_conf}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen            : ${enable_autogen}" >&5
 $as_echo "autogen            : ${enable_autogen}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: cc-silence         : ${enable_cc_silence}" >&5
@@ -9217,5 +9865,7 @@ $as_echo "munmap             : ${enable_munmap}" >&6; }
 $as_echo "lazy_lock          : ${enable_lazy_lock}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: tls                : ${enable_tls}" >&5
 $as_echo "tls                : ${enable_tls}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cache-oblivious    : ${enable_cache_oblivious}" >&5
+$as_echo "cache-oblivious    : ${enable_cache_oblivious}" >&6; }
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5
 $as_echo "===============================================================================" >&6; }
index fd7554d668e94d52e61ab6034dc8066e1be03cea..eb387ed95731ee1622c58b27d39ab63322c3550c 100644 (file)
@@ -1,6 +1,8 @@
 dnl Process this file with autoconf to produce a configure script.
 AC_INIT([Makefile.in])
 
+AC_CONFIG_AUX_DIR([build-aux])
+
 dnl ============================================================================
 dnl Custom macro definitions.
 
@@ -43,6 +45,9 @@ AC_CACHE_CHECK([whether $1 is compilable],
 
 dnl ============================================================================
 
+CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
+AC_SUBST([CONFIG])
+
 dnl Library revision.
 rev=2
 AC_SUBST([rev])
@@ -134,6 +139,8 @@ if test "x$CFLAGS" = "x" ; then
       AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
     fi
     JE_CFLAGS_APPEND([-Wall])
+    JE_CFLAGS_APPEND([-Werror=declaration-after-statement])
+    JE_CFLAGS_APPEND([-Wshorten-64-to-32])
     JE_CFLAGS_APPEND([-pipe])
     JE_CFLAGS_APPEND([-g3])
   elif test "x$je_cv_msvc" = "xyes" ; then
@@ -160,13 +167,18 @@ if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
   CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99"
 fi
 
-AC_CHECK_SIZEOF([void *])
-if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
-  LG_SIZEOF_PTR=3
-elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
-  LG_SIZEOF_PTR=2
+if test "x${je_cv_msvc}" = "xyes" ; then
+  LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
+  AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit])
 else
-  AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
+  AC_CHECK_SIZEOF([void *])
+  if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
+    LG_SIZEOF_PTR=3
+  elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
+    LG_SIZEOF_PTR=2
+  else
+    AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
+  fi
 fi
 AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR])
 
@@ -190,6 +202,16 @@ else
 fi
 AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG])
 
+AC_CHECK_SIZEOF([long long])
+if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
+  LG_SIZEOF_LONG_LONG=3
+elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
+  LG_SIZEOF_LONG_LONG=2
+else
+  AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG])
+
 AC_CHECK_SIZEOF([intmax_t])
 if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then
   LG_SIZEOF_INTMAX_T=4
@@ -206,22 +228,23 @@ AC_CANONICAL_HOST
 dnl CPU-specific settings.
 CPU_SPINWAIT=""
 case "${host_cpu}" in
-  i[[345]]86)
-       ;;
   i686|x86_64)
-       JE_COMPILABLE([pause instruction], [],
-                     [[__asm__ volatile("pause"); return 0;]],
-                     [je_cv_pause])
-       if test "x${je_cv_pause}" = "xyes" ; then
-           CPU_SPINWAIT='__asm__ volatile("pause")'
-       fi
-       dnl emmintrin.h fails to compile unless MMX, SSE, and SSE2 are
-       dnl supported.
-       JE_COMPILABLE([SSE2 intrinsics], [
-#include <emmintrin.h>
-], [], [je_cv_sse2])
-       if test "x${je_cv_sse2}" = "xyes" ; then
-         AC_DEFINE_UNQUOTED([HAVE_SSE2], [ ])
+       if test "x${je_cv_msvc}" = "xyes" ; then
+           AC_CACHE_VAL([je_cv_pause_msvc],
+             [JE_COMPILABLE([pause instruction MSVC], [],
+                                       [[_mm_pause(); return 0;]],
+                                       [je_cv_pause_msvc])])
+           if test "x${je_cv_pause_msvc}" = "xyes" ; then
+               CPU_SPINWAIT='_mm_pause()'
+           fi
+       else
+           AC_CACHE_VAL([je_cv_pause],
+             [JE_COMPILABLE([pause instruction], [],
+                                       [[__asm__ volatile("pause"); return 0;]],
+                                       [je_cv_pause])])
+           if test "x${je_cv_pause}" = "xyes" ; then
+               CPU_SPINWAIT='__asm__ volatile("pause")'
+           fi
        fi
        ;;
   powerpc)
@@ -263,6 +286,7 @@ dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
 dnl definitions need to be seen before any headers are included, which is a pain
 dnl to make happen otherwise.
 default_munmap="1"
+maps_coalesce="1"
 case "${host}" in
   *-*-darwin* | *-*-ios*)
        CFLAGS="$CFLAGS"
@@ -273,7 +297,7 @@ case "${host}" in
        so="dylib"
        importlib="${so}"
        force_tls="0"
-       DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)'
+       DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
        SOREV="${rev}.${so}"
        sbrk_deprecated="1"
        ;;
@@ -288,7 +312,13 @@ case "${host}" in
        abi="elf"
        AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
        ;;
-  *-*-openbsd*|*-*-bitrig*)
+  *-*-openbsd*)
+       CFLAGS="$CFLAGS"
+       abi="elf"
+       AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+       force_tls="0"
+       ;;
+  *-*-bitrig*)
        CFLAGS="$CFLAGS"
        abi="elf"
        AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
@@ -300,6 +330,7 @@ case "${host}" in
        AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
        AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
        AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
+       AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
        default_munmap="0"
        ;;
   *-*-netbsd*)
@@ -338,6 +369,8 @@ case "${host}" in
   *-*-mingw* | *-*-cygwin*)
        abi="pecoff"
        force_tls="0"
+       force_lazy_lock="1"
+       maps_coalesce="0"
        RPATH=""
        so="dll"
        if test "x$je_cv_msvc" = "xyes" ; then
@@ -426,6 +459,36 @@ if test "x${je_cv_tls_model}" = "xyes" ; then
 else
   AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
 fi
+dnl Check for alloc_size attribute support.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>],
+              [void *foo(size_t size) __attribute__((alloc_size(1)));],
+              [je_cv_alloc_size])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_alloc_size}" = "xyes" ; then
+  AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ])
+fi
+dnl Check for format(gnu_printf, ...) attribute support.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>],
+              [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));],
+              [je_cv_format_gnu_printf])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
+  AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ])
+fi
+dnl Check for format(printf, ...) attribute support.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
+              [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));],
+              [je_cv_format_printf])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_printf}" = "xyes" ; then
+  AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ])
+fi
 
 dnl Support optional additions to rpath.
 AC_ARG_WITH([rpath],
@@ -512,6 +575,7 @@ if test "x$JEMALLOC_PREFIX" != "x" ; then
   AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"])
   AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"])
 fi
+AC_SUBST([JEMALLOC_CPREFIX])
 
 AC_ARG_WITH([export],
   [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])],
@@ -539,6 +603,15 @@ AC_ARG_WITH([install_suffix],
 install_suffix="$INSTALL_SUFFIX"
 AC_SUBST([install_suffix])
 
+dnl Specify default malloc_conf.
+AC_ARG_WITH([malloc_conf],
+  [AS_HELP_STRING([--with-malloc-conf=<malloc_conf>], [config.malloc_conf options string])],
+  [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"],
+  [JEMALLOC_CONFIG_MALLOC_CONF=""]
+)
+config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
+AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"])
+
 dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of
 dnl jemalloc_protos_jet.h easy.
 je_="je_"
@@ -630,7 +703,8 @@ fi
 
 dnl Do not compile with debugging by default.
 AC_ARG_ENABLE([debug],
-  [AS_HELP_STRING([--enable-debug], [Build debugging code (implies --enable-ivsalloc)])],
+  [AS_HELP_STRING([--enable-debug],
+                  [Build debugging code (implies --enable-ivsalloc)])],
 [if test "x$enable_debug" = "xno" ; then
   enable_debug="0"
 else
@@ -639,6 +713,9 @@ fi
 ],
 [enable_debug="0"]
 )
+if test "x$enable_debug" = "x1" ; then
+  AC_DEFINE([JEMALLOC_DEBUG], [ ])
+fi
 if test "x$enable_debug" = "x1" ; then
   AC_DEFINE([JEMALLOC_DEBUG], [ ])
   enable_ivsalloc="1"
@@ -647,7 +724,8 @@ AC_SUBST([enable_debug])
 
 dnl Do not validate pointers by default.
 AC_ARG_ENABLE([ivsalloc],
-  [AS_HELP_STRING([--enable-ivsalloc], [Validate pointers passed through the public API])],
+  [AS_HELP_STRING([--enable-ivsalloc],
+                  [Validate pointers passed through the public API])],
 [if test "x$enable_ivsalloc" = "xno" ; then
   enable_ivsalloc="0"
 else
@@ -823,6 +901,12 @@ if test "x$enable_tcache" = "x1" ; then
 fi
 AC_SUBST([enable_tcache])
 
+dnl Indicate whether adjacent virtual memory mappings automatically coalesce
+dnl (and fragment on demand).
+if test "x${maps_coalesce}" = "x1" ; then
+  AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
+fi
+
 dnl Enable VM deallocation via munmap() by default.
 AC_ARG_ENABLE([munmap],
   [AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
@@ -946,11 +1030,28 @@ if test "x$enable_xmalloc" = "x1" ; then
 fi
 AC_SUBST([enable_xmalloc])
 
+dnl Support cache-oblivious allocation alignment by default.
+AC_ARG_ENABLE([cache-oblivious],
+  [AS_HELP_STRING([--disable-cache-oblivious],
+                  [Disable support for cache-oblivious allocation alignment])],
+[if test "x$enable_cache_oblivious" = "xno" ; then
+  enable_cache_oblivious="0"
+else
+  enable_cache_oblivious="1"
+fi
+],
+[enable_cache_oblivious="1"]
+)
+if test "x$enable_cache_oblivious" = "x1" ; then
+  AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ])
+fi
+AC_SUBST([enable_cache_oblivious])
+
 dnl ============================================================================
 dnl Check for  __builtin_ffsl(), then ffsl(3), and fail if neither are found.
 dnl One of those two functions should (theoretically) exist on all platforms
 dnl that jemalloc currently has a chance of functioning on without modification.
-dnl We additionally assume ffs() or __builtin_ffs() are defined if
+dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if
 dnl ffsl() or __builtin_ffsl() are defined, respectively.
 JE_COMPILABLE([a program using __builtin_ffsl], [
 #include <stdio.h>
@@ -963,6 +1064,7 @@ JE_COMPILABLE([a program using __builtin_ffsl], [
        }
 ], [je_cv_gcc_builtin_ffsl])
 if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
+  AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll])
   AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl])
   AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs])
 else
@@ -977,6 +1079,7 @@ else
        }
   ], [je_cv_function_ffsl])
   if test "x${je_cv_function_ffsl}" = "xyes" ; then
+    AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll])
     AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl])
     AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs])
   else
@@ -984,8 +1087,28 @@ else
   fi
 fi
 
-AC_CACHE_CHECK([STATIC_PAGE_SHIFT],
-               [je_cv_static_page_shift],
+AC_ARG_WITH([lg_tiny_min],
+  [AS_HELP_STRING([--with-lg-tiny-min=<lg-tiny-min>],
+   [Base 2 log of minimum tiny size class to support])],
+  [LG_TINY_MIN="$with_lg_tiny_min"],
+  [LG_TINY_MIN="3"])
+AC_DEFINE_UNQUOTED([LG_TINY_MIN], [$LG_TINY_MIN])
+
+AC_ARG_WITH([lg_quantum],
+  [AS_HELP_STRING([--with-lg-quantum=<lg-quantum>],
+   [Base 2 log of minimum allocation alignment])],
+  [LG_QUANTA="$with_lg_quantum"],
+  [LG_QUANTA="3 4"])
+if test "x$with_lg_quantum" != "x" ; then
+  AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum])
+fi
+
+AC_ARG_WITH([lg_page],
+  [AS_HELP_STRING([--with-lg-page=<lg-page>], [Base 2 log of system page size])],
+  [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"])
+if test "x$LG_PAGE" = "xdetect"; then
+  AC_CACHE_CHECK([LG_PAGE],
+               [je_cv_lg_page],
                AC_RUN_IFELSE([AC_LANG_PROGRAM(
 [[
 #include <strings.h>
@@ -1016,52 +1139,70 @@ AC_CACHE_CHECK([STATIC_PAGE_SHIFT],
     if (f == NULL) {
        return 1;
     }
-    fprintf(f, "%d\n", result);
+    fprintf(f, "%d", result);
     fclose(f);
 
     return 0;
 ]])],
-                             [je_cv_static_page_shift=`cat conftest.out`],
-                             [je_cv_static_page_shift=undefined],
-                             [je_cv_static_page_shift=12]))
-
-if test "x$je_cv_static_page_shift" != "xundefined"; then
-   AC_DEFINE_UNQUOTED([STATIC_PAGE_SHIFT], [$je_cv_static_page_shift])
+                             [je_cv_lg_page=`cat conftest.out`],
+                             [je_cv_lg_page=undefined],
+                             [je_cv_lg_page=12]))
+fi
+if test "x${je_cv_lg_page}" != "x" ; then
+  LG_PAGE="${je_cv_lg_page}"
+fi
+if test "x${LG_PAGE}" != "xundefined" ; then
+   AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE])
 else
-   AC_MSG_ERROR([cannot determine value for STATIC_PAGE_SHIFT])
+   AC_MSG_ERROR([cannot determine value for LG_PAGE])
 fi
 
+AC_ARG_WITH([lg_page_sizes],
+  [AS_HELP_STRING([--with-lg-page-sizes=<lg-page-sizes>],
+   [Base 2 logs of system page sizes to support])],
+  [LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"])
+
+AC_ARG_WITH([lg_size_class_group],
+  [AS_HELP_STRING([--with-lg-size-class-group=<lg-size-class-group>],
+   [Base 2 log of size classes per doubling])],
+  [LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"],
+  [LG_SIZE_CLASS_GROUP="2"])
+
 dnl ============================================================================
 dnl jemalloc configuration.
 dnl 
 
 dnl Set VERSION if source directory is inside a git repository.
-if test "x`git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
+if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
   dnl Pattern globs aren't powerful enough to match both single- and
   dnl double-digit version numbers, so iterate over patterns to support up to
   dnl version 99.99.99 without any accidental matches.
-  rm -f "${srcroot}VERSION"
+  rm -f "${objroot}VERSION"
   for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
                  '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
                  '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
                  '[0-9][0-9].[0-9][0-9].[0-9]' \
                  '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do
-    if test ! -e "${srcroot}VERSION" ; then
-      git describe --long --abbrev=40 --match="${pattern}" > "${srcroot}VERSION.tmp" 2>/dev/null
+    if test ! -e "${objroot}VERSION" ; then
+      (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
       if test $? -eq 0 ; then
-        mv "${srcroot}VERSION.tmp" "${srcroot}VERSION"
+        mv "${objroot}VERSION.tmp" "${objroot}VERSION"
         break
       fi
     fi
   done
 fi
-rm -f "${srcroot}VERSION.tmp"
-if test ! -e "${srcroot}VERSION" ; then
-  AC_MSG_RESULT(
-    [Missing VERSION file, and unable to generate it; creating bogus VERSION])
-  echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${srcroot}VERSION"
+rm -f "${objroot}VERSION.tmp"
+if test ! -e "${objroot}VERSION" ; then
+  if test ! -e "${srcroot}VERSION" ; then
+    AC_MSG_RESULT(
+      [Missing VERSION file, and unable to generate it; creating bogus VERSION])
+    echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION"
+  else
+    cp ${srcroot}VERSION ${objroot}VERSION
+  fi
 fi
-jemalloc_version=`cat "${srcroot}VERSION"`
+jemalloc_version=`cat "${objroot}VERSION"`
 jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'`
 jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'`
 jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'`
@@ -1088,6 +1229,32 @@ fi
 
 CPPFLAGS="$CPPFLAGS -D_REENTRANT"
 
+dnl Check whether clock_gettime(2) is in libc or librt.  This function is only
+dnl used in test code, so save the result to TESTLIBS to avoid poluting LIBS.
+SAVED_LIBS="${LIBS}"
+LIBS=
+AC_SEARCH_LIBS([clock_gettime], [rt], [TESTLIBS="${LIBS}"])
+AC_SUBST([TESTLIBS])
+LIBS="${SAVED_LIBS}"
+
+dnl Check if the GNU-specific secure_getenv function exists.
+AC_CHECK_FUNC([secure_getenv],
+              [have_secure_getenv="1"],
+              [have_secure_getenv="0"]
+             )
+if test "x$have_secure_getenv" = "x1" ; then
+  AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ])
+fi
+
+dnl Check if the Solaris/BSD issetugid function exists.
+AC_CHECK_FUNC([issetugid],
+              [have_issetugid="1"],
+              [have_issetugid="0"]
+             )
+if test "x$have_issetugid" = "x1" ; then
+  AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ])
+fi
+
 dnl Check whether the BSD-specific _malloc_thread_cleanup() exists.  If so, use
 dnl it rather than pthreads TSD cleanup functions to support cleanup during
 dnl thread exit, in order to avoid pthreads library recursion during
@@ -1122,9 +1289,9 @@ else
   enable_lazy_lock="1"
 fi
 ],
-[enable_lazy_lock="0"]
+[enable_lazy_lock=""]
 )
-if test "x$enable_lazy_lock" = "x0" -a "x${force_lazy_lock}" = "x1" ; then
+if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then
   AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues])
   enable_lazy_lock="1"
 fi
@@ -1137,6 +1304,8 @@ if test "x$enable_lazy_lock" = "x1" ; then
       ])
   fi
   AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ])
+else
+  enable_lazy_lock="0"
 fi
 AC_SUBST([enable_lazy_lock])
 
@@ -1148,15 +1317,18 @@ else
   enable_tls="1"
 fi
 ,
-enable_tls="1"
+enable_tls=""
 )
-if test "x${enable_tls}" = "x0" -a "x${force_tls}" = "x1" ; then
-  AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
-  enable_tls="1"
-fi
-if test "x${enable_tls}" = "x1" -a "x${force_tls}" = "x0" ; then
-  AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
-  enable_tls="0"
+if test "x${enable_tls}" = "x" ; then
+  if test "x${force_tls}" = "x1" ; then
+    AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
+    enable_tls="1"
+  elif test "x${force_tls}" = "x0" ; then
+    AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
+    enable_tls="0"
+  else
+    enable_tls="1"
+  fi
 fi
 if test "x${enable_tls}" = "x1" ; then
 AC_MSG_CHECKING([for TLS])
@@ -1171,12 +1343,38 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
               AC_MSG_RESULT([yes]),
               AC_MSG_RESULT([no])
               enable_tls="0")
+else
+  enable_tls="0"
 fi
 AC_SUBST([enable_tls])
 if test "x${enable_tls}" = "x1" ; then
+  if test "x${force_tls}" = "x0" ; then
+    AC_MSG_WARN([TLS enabled despite being marked unusable on this platform])
+  fi
   AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
 elif test "x${force_tls}" = "x1" ; then
-  AC_MSG_ERROR([Failed to configure TLS, which is mandatory for correct function])
+  AC_MSG_WARN([TLS disabled despite being marked critical on this platform])
+fi
+
+dnl ============================================================================
+dnl Check for C11 atomics.
+
+JE_COMPILABLE([C11 atomics], [
+#include <stdint.h>
+#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
+#include <stdatomic.h>
+#else
+#error Atomics not available
+#endif
+], [
+    uint64_t *p = (uint64_t *)0;
+    uint64_t x = 1;
+    volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+    uint64_t r = atomic_fetch_add(a, x) + x;
+    return (r == 0);
+], [je_cv_c11atomics])
+if test "x${je_cv_c11atomics}" = "xyes" ; then
+  AC_DEFINE([JEMALLOC_C11ATOMICS])
 fi
 
 dnl ============================================================================
@@ -1333,7 +1531,6 @@ if test "x${enable_zone_allocator}" = "x1" ; then
   if test "x${abi}" != "xmacho"; then
     AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
   fi
-  AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
   AC_DEFINE([JEMALLOC_ZONE], [ ])
 
   dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6
@@ -1343,7 +1540,7 @@ if test "x${enable_zone_allocator}" = "x1" ; then
   AC_DEFUN([JE_ZONE_PROGRAM],
     [AC_LANG_PROGRAM(
       [#include <malloc/malloc.h>],
-      [static foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]]
+      [static int foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]]
     )])
 
   AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,14)],[JEMALLOC_ZONE_VERSION=3],[
@@ -1471,10 +1668,15 @@ AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
 ])
 AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [
   mkdir -p "${objroot}include/jemalloc/internal"
-  "${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h"
+  "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
 ], [
+  SHELL="${SHELL}"
   srcdir="${srcdir}"
   objroot="${objroot}"
+  LG_QUANTA="${LG_QUANTA}"
+  LG_TINY_MIN=${LG_TINY_MIN}
+  LG_PAGE_SIZES="${LG_PAGE_SIZES}"
+  LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP}
 ])
 AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
   mkdir -p "${objroot}include/jemalloc"
@@ -1521,7 +1723,7 @@ AC_CONFIG_HEADERS([$cfghdrs_tup])
 dnl ============================================================================
 dnl Generate outputs.
 
-AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc.sh])
+AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof])
 AC_SUBST([cfgoutputs_in])
 AC_SUBST([cfgoutputs_out])
 AC_OUTPUT
@@ -1532,12 +1734,14 @@ AC_MSG_RESULT([=================================================================
 AC_MSG_RESULT([jemalloc version   : ${jemalloc_version}])
 AC_MSG_RESULT([library revision   : ${rev}])
 AC_MSG_RESULT([])
+AC_MSG_RESULT([CONFIG             : ${CONFIG}])
 AC_MSG_RESULT([CC                 : ${CC}])
-AC_MSG_RESULT([CPPFLAGS           : ${CPPFLAGS}])
 AC_MSG_RESULT([CFLAGS             : ${CFLAGS}])
+AC_MSG_RESULT([CPPFLAGS           : ${CPPFLAGS}])
 AC_MSG_RESULT([LDFLAGS            : ${LDFLAGS}])
 AC_MSG_RESULT([EXTRA_LDFLAGS      : ${EXTRA_LDFLAGS}])
 AC_MSG_RESULT([LIBS               : ${LIBS}])
+AC_MSG_RESULT([TESTLIBS           : ${TESTLIBS}])
 AC_MSG_RESULT([RPATH_EXTRA        : ${RPATH_EXTRA}])
 AC_MSG_RESULT([])
 AC_MSG_RESULT([XSLTPROC           : ${XSLTPROC}])
@@ -1545,9 +1749,9 @@ AC_MSG_RESULT([XSLROOT            : ${XSLROOT}])
 AC_MSG_RESULT([])
 AC_MSG_RESULT([PREFIX             : ${PREFIX}])
 AC_MSG_RESULT([BINDIR             : ${BINDIR}])
+AC_MSG_RESULT([DATADIR            : ${DATADIR}])
 AC_MSG_RESULT([INCLUDEDIR         : ${INCLUDEDIR}])
 AC_MSG_RESULT([LIBDIR             : ${LIBDIR}])
-AC_MSG_RESULT([DATADIR            : ${DATADIR}])
 AC_MSG_RESULT([MANDIR             : ${MANDIR}])
 AC_MSG_RESULT([])
 AC_MSG_RESULT([srcroot            : ${srcroot}])
@@ -1559,6 +1763,7 @@ AC_MSG_RESULT([JEMALLOC_PREFIX    : ${JEMALLOC_PREFIX}])
 AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE])
 AC_MSG_RESULT([                   : ${JEMALLOC_PRIVATE_NAMESPACE}])
 AC_MSG_RESULT([install_suffix     : ${install_suffix}])
+AC_MSG_RESULT([malloc_conf        : ${config_malloc_conf}])
 AC_MSG_RESULT([autogen            : ${enable_autogen}])
 AC_MSG_RESULT([cc-silence         : ${enable_cc_silence}])
 AC_MSG_RESULT([debug              : ${enable_debug}])
@@ -1576,4 +1781,5 @@ AC_MSG_RESULT([xmalloc            : ${enable_xmalloc}])
 AC_MSG_RESULT([munmap             : ${enable_munmap}])
 AC_MSG_RESULT([lazy_lock          : ${enable_lazy_lock}])
 AC_MSG_RESULT([tls                : ${enable_tls}])
+AC_MSG_RESULT([cache-oblivious    : ${enable_cache_oblivious}])
 AC_MSG_RESULT([===============================================================================])
index 1f692f78b923a53f7924bb2a481109011c669fca..bc5dbd1d7a9f60930ce92dc3c1ae0941320088d6 100644 (file)
       relevant.  Use bitwise or (<code language="C">|</code>) operations to
       specify one or more of the following:
         <variablelist>
-          <varlistentry>
+          <varlistentry id="MALLOCX_LG_ALIGN">
             <term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>)
             </constant></term>
 
             that <parameter>la</parameter> is within the valid
             range.</para></listitem>
           </varlistentry>
-          <varlistentry>
+          <varlistentry id="MALLOCX_ALIGN">
             <term><constant>MALLOCX_ALIGN(<parameter>a</parameter>)
             </constant></term>
 
             validate that <parameter>a</parameter> is a power of 2.
             </para></listitem>
           </varlistentry>
-          <varlistentry>
+          <varlistentry id="MALLOCX_ZERO">
             <term><constant>MALLOCX_ZERO</constant></term>
 
             <listitem><para>Initialize newly allocated memory to contain zero
             that are initialized to contain zero bytes.  If this macro is
             absent, newly allocated memory is uninitialized.</para></listitem>
           </varlistentry>
-          <varlistentry>
+          <varlistentry id="MALLOCX_TCACHE">
+            <term><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)
+            </constant></term>
+
+            <listitem><para>Use the thread-specific cache (tcache) specified by
+            the identifier <parameter>tc</parameter>, which must have been
+            acquired via the <link
+            linkend="tcache.create"><mallctl>tcache.create</mallctl></link>
+            mallctl.  This macro does not validate that
+            <parameter>tc</parameter> specifies a valid
+            identifier.</para></listitem>
+          </varlistentry>
+          <varlistentry id="MALLOC_TCACHE_NONE">
+            <term><constant>MALLOCX_TCACHE_NONE</constant></term>
+
+            <listitem><para>Do not use a thread-specific cache (tcache).  Unless
+            <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant> or
+            <constant>MALLOCX_TCACHE_NONE</constant> is specified, an
+            automatically managed tcache will be used under many circumstances.
+            This macro cannot be used in the same <parameter>flags</parameter>
+            argument as
+            <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant>.</para></listitem>
+          </varlistentry>
+          <varlistentry id="MALLOCX_ARENA">
             <term><constant>MALLOCX_ARENA(<parameter>a</parameter>)
             </constant></term>
 
             <listitem><para>Use the arena specified by the index
-            <parameter>a</parameter> (and by necessity bypass the thread
-            cache).  This macro has no effect for regions that were allocated
-            via an arena other than the one specified.  This macro does not
-            validate that <parameter>a</parameter> specifies an arena index in
-            the valid range.</para></listitem>
+            <parameter>a</parameter>.  This macro has no effect for regions that
+            were allocated via an arena other than the one specified.  This
+            macro does not validate that <parameter>a</parameter> specifies an
+            arena index in the valid range.</para></listitem>
           </varlistentry>
         </variablelist>
       </para>
       <para>The <function>mallocx<parameter/></function> function allocates at
       least <parameter>size</parameter> bytes of memory, and returns a pointer
       to the base address of the allocation.  Behavior is undefined if
-      <parameter>size</parameter> is <constant>0</constant>, or if request size
-      overflows due to size class and/or alignment constraints.</para>
+      <parameter>size</parameter> is <constant>0</constant>.</para>
 
       <para>The <function>rallocx<parameter/></function> function resizes the
       allocation at <parameter>ptr</parameter> to be at least
       <parameter>size</parameter> bytes, and returns a pointer to the base
       address of the resulting allocation, which may or may not have moved from
       its original location.  Behavior is undefined if
-      <parameter>size</parameter> is <constant>0</constant>, or if request size
-      overflows due to size class and/or alignment constraints.</para>
+      <parameter>size</parameter> is <constant>0</constant>.</para>
 
       <para>The <function>xallocx<parameter/></function> function resizes the
       allocation at <parameter>ptr</parameter> in place to be at least
       memory, but it performs the same size computation as the
       <function>mallocx<parameter/></function> function, and returns the real
       size of the allocation that would result from the equivalent
-      <function>mallocx<parameter/></function> function call.  Behavior is
-      undefined if <parameter>size</parameter> is <constant>0</constant>, or if
-      request size overflows due to size class and/or alignment
-      constraints.</para>
+      <function>mallocx<parameter/></function> function call, or
+      <constant>0</constant> if the inputs exceed the maximum supported size
+      class and/or alignment.  Behavior is undefined if
+      <parameter>size</parameter> is <constant>0</constant>.</para>
 
       <para>The <function>mallctl<parameter/></function> function provides a
       general interface for introspecting the memory allocator, as well as
@@ -406,11 +426,12 @@ for (i = 0; i < nbins; i++) {
       functions simultaneously.  If <option>--enable-stats</option> is
       specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
       be specified to omit merged arena and per arena statistics, respectively;
-      &ldquo;b&rdquo; and &ldquo;l&rdquo; can be specified to omit per size
-      class statistics for bins and large objects, respectively.  Unrecognized
-      characters are silently ignored.  Note that thread caching may prevent
-      some statistics from being completely up to date, since extra locking
-      would be required to merge counters that track thread cache operations.
+      &ldquo;b&rdquo;, &ldquo;l&rdquo;, and &ldquo;h&rdquo; can be specified to
+      omit per size class statistics for bins, large objects, and huge objects,
+      respectively.  Unrecognized characters are silently ignored.  Note that
+      thread caching may prevent some statistics from being completely up to
+      date, since extra locking would be required to merge counters that track
+      thread cache operations.
       </para>
 
       <para>The <function>malloc_usable_size<parameter/></function> function
@@ -432,19 +453,20 @@ for (i = 0; i < nbins; i++) {
     routines, the allocator initializes its internals based in part on various
     options that can be specified at compile- or run-time.</para>
 
-    <para>The string pointed to by the global variable
-    <varname>malloc_conf</varname>, the &ldquo;name&rdquo; of the file
-    referenced by the symbolic link named <filename
-    class="symlink">/etc/malloc.conf</filename>, and the value of the
+    <para>The string specified via <option>--with-malloc-conf</option>, the
+    string pointed to by the global variable <varname>malloc_conf</varname>, the
+    &ldquo;name&rdquo; of the file referenced by the symbolic link named
+    <filename class="symlink">/etc/malloc.conf</filename>, and the value of the
     environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
     that order, from left to right as options.  Note that
     <varname>malloc_conf</varname> may be read before
     <function>main<parameter/></function> is entered, so the declaration of
     <varname>malloc_conf</varname> should specify an initializer that contains
-    the final value to be read by jemalloc.  <varname>malloc_conf</varname> is
-    a compile-time setting, whereas <filename
-    class="symlink">/etc/malloc.conf</filename> and <envar>MALLOC_CONF</envar>
-    can be safely set any time prior to program invocation.</para>
+    the final value to be read by jemalloc.  <option>--with-malloc-conf</option>
+    and <varname>malloc_conf</varname> are compile-time mechanisms, whereas
+    <filename class="symlink">/etc/malloc.conf</filename> and
+    <envar>MALLOC_CONF</envar> can be safely set any time prior to program
+    invocation.</para>
 
     <para>An options string is a comma-separated list of option:value pairs.
     There is one key corresponding to each <link
@@ -494,39 +516,32 @@ for (i = 0; i < nbins; i++) {
     common case, but it increases memory usage and fragmentation, since a
     bounded number of objects can remain allocated in each thread cache.</para>
 
-    <para>Memory is conceptually broken into equal-sized chunks, where the
-    chunk size is a power of two that is greater than the page size.  Chunks
-    are always aligned to multiples of the chunk size.  This alignment makes it
-    possible to find metadata for user objects very quickly.</para>
-
-    <para>User objects are broken into three categories according to size:
-    small, large, and huge.  Small objects are smaller than one page.  Large
-    objects are smaller than the chunk size.  Huge objects are a multiple of
-    the chunk size.  Small and large objects are managed entirely by arenas;
-    huge objects are additionally aggregated in a single data structure that is
-    shared by all threads.  Huge objects are typically used by applications
-    infrequently enough that this single data structure is not a scalability
-    issue.</para>
-
-    <para>Each chunk that is managed by an arena tracks its contents as runs of
+    <para>Memory is conceptually broken into equal-sized chunks, where the chunk
+    size is a power of two that is greater than the page size.  Chunks are
+    always aligned to multiples of the chunk size.  This alignment makes it
+    possible to find metadata for user objects very quickly.  User objects are
+    broken into three categories according to size: small, large, and huge.
+    Multiple small and large objects can reside within a single chunk, whereas
+    huge objects each have one or more chunks backing them.  Each chunk that
+    contains small and/or large objects tracks its contents as runs of
     contiguous pages (unused, backing a set of small objects, or backing one
-    large object).  The combination of chunk alignment and chunk page maps
-    makes it possible to determine all metadata regarding small and large
-    allocations in constant time.</para>
+    large object).  The combination of chunk alignment and chunk page maps makes
+    it possible to determine all metadata regarding small and large allocations
+    in constant time.</para>
 
     <para>Small objects are managed in groups by page runs.  Each run maintains
-    a frontier and free list to track which regions are in use.  Allocation
-    requests that are no more than half the quantum (8 or 16, depending on
-    architecture) are rounded up to the nearest power of two that is at least
-    <code language="C">sizeof(<type>double</type>)</code>.  All other small
-    object size classes are multiples of the quantum, spaced such that internal
-    fragmentation is limited to approximately 25% for all but the smallest size
-    classes.  Allocation requests that are larger than the maximum small size
-    class, but small enough to fit in an arena-managed chunk (see the <link
-    linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), are
-    rounded up to the nearest run size.  Allocation requests that are too large
-    to fit in an arena-managed chunk are rounded up to the nearest multiple of
-    the chunk size.</para>
+    a bitmap to track which regions are in use.  Allocation requests that are no
+    more than half the quantum (8 or 16, depending on architecture) are rounded
+    up to the nearest power of two that is at least <code
+    language="C">sizeof(<type>double</type>)</code>.  All other object size
+    classes are multiples of the quantum, spaced such that there are four size
+    classes for each doubling in size, which limits internal fragmentation to
+    approximately 20% for all but the smallest size classes.  Small size classes
+    are smaller than four times the page size, large size classes are smaller
+    than the chunk size (see the <link
+    linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), and
+    huge size classes extend from the chunk size up to one size class less than
+    the full address space size.</para>
 
     <para>Allocations are packed tightly together, which can be an issue for
     multi-threaded applications.  If you need to assure that allocations do not
@@ -534,8 +549,29 @@ for (i = 0; i < nbins; i++) {
     nearest multiple of the cacheline size, or specify cacheline alignment when
     allocating.</para>
 
-    <para>Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit
-    system, the size classes in each category are as shown in <xref
+    <para>The <function>realloc<parameter/></function>,
+    <function>rallocx<parameter/></function>, and
+    <function>xallocx<parameter/></function> functions may resize allocations
+    without moving them under limited circumstances.  Unlike the
+    <function>*allocx<parameter/></function> API, the standard API does not
+    officially round up the usable size of an allocation to the nearest size
+    class, so technically it is necessary to call
+    <function>realloc<parameter/></function> to grow e.g. a 9-byte allocation to
+    16 bytes, or shrink a 16-byte allocation to 9 bytes.  Growth and shrinkage
+    trivially succeeds in place as long as the pre-size and post-size both round
+    up to the same size class.  No other API guarantees are made regarding
+    in-place resizing, but the current implementation also tries to resize large
+    and huge allocations in place, as long as the pre-size and post-size are
+    both large or both huge.  In such cases shrinkage always succeeds for large
+    size classes, but for huge size classes the chunk allocator must support
+    splitting (see <link
+    linkend="arena.i.chunk_hooks"><mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl></link>).
+    Growth only succeeds if the trailing memory is currently available, and
+    additionally for huge size classes the chunk allocator must support
+    merging.</para>
+
+    <para>Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a
+    64-bit system, the size classes in each category are as shown in <xref
     linkend="size_classes" xrefstyle="template:Table %n"/>.</para>
 
     <table xml:id="size_classes" frame="all">
@@ -553,13 +589,13 @@ for (i = 0; i < nbins; i++) {
       </thead>
       <tbody>
         <row>
-          <entry morerows="6">Small</entry>
+          <entry morerows="8">Small</entry>
           <entry>lg</entry>
           <entry>[8]</entry>
         </row>
         <row>
           <entry>16</entry>
-          <entry>[16, 32, 48, ..., 128]</entry>
+          <entry>[16, 32, 48, 64, 80, 96, 112, 128]</entry>
         </row>
         <row>
           <entry>32</entry>
@@ -579,17 +615,77 @@ for (i = 0; i < nbins; i++) {
         </row>
         <row>
           <entry>512</entry>
-          <entry>[2560, 3072, 3584]</entry>
+          <entry>[2560, 3072, 3584, 4096]</entry>
+        </row>
+        <row>
+          <entry>1 KiB</entry>
+          <entry>[5 KiB, 6 KiB, 7 KiB, 8 KiB]</entry>
+        </row>
+        <row>
+          <entry>2 KiB</entry>
+          <entry>[10 KiB, 12 KiB, 14 KiB]</entry>
+        </row>
+        <row>
+          <entry morerows="7">Large</entry>
+          <entry>2 KiB</entry>
+          <entry>[16 KiB]</entry>
         </row>
         <row>
-          <entry>Large</entry>
           <entry>4 KiB</entry>
-          <entry>[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]</entry>
+          <entry>[20 KiB, 24 KiB, 28 KiB, 32 KiB]</entry>
+        </row>
+        <row>
+          <entry>8 KiB</entry>
+          <entry>[40 KiB, 48 KiB, 54 KiB, 64 KiB]</entry>
+        </row>
+        <row>
+          <entry>16 KiB</entry>
+          <entry>[80 KiB, 96 KiB, 112 KiB, 128 KiB]</entry>
+        </row>
+        <row>
+          <entry>32 KiB</entry>
+          <entry>[160 KiB, 192 KiB, 224 KiB, 256 KiB]</entry>
+        </row>
+        <row>
+          <entry>64 KiB</entry>
+          <entry>[320 KiB, 384 KiB, 448 KiB, 512 KiB]</entry>
+        </row>
+        <row>
+          <entry>128 KiB</entry>
+          <entry>[640 KiB, 768 KiB, 896 KiB, 1 MiB]</entry>
+        </row>
+        <row>
+          <entry>256 KiB</entry>
+          <entry>[1280 KiB, 1536 KiB, 1792 KiB]</entry>
+        </row>
+        <row>
+          <entry morerows="6">Huge</entry>
+          <entry>256 KiB</entry>
+          <entry>[2 MiB]</entry>
+        </row>
+        <row>
+          <entry>512 KiB</entry>
+          <entry>[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</entry>
+        </row>
+        <row>
+          <entry>1 MiB</entry>
+          <entry>[5 MiB, 6 MiB, 7 MiB, 8 MiB]</entry>
+        </row>
+        <row>
+          <entry>2 MiB</entry>
+          <entry>[10 MiB, 12 MiB, 14 MiB, 16 MiB]</entry>
         </row>
         <row>
-          <entry>Huge</entry>
           <entry>4 MiB</entry>
-          <entry>[4 MiB, 8 MiB, 12 MiB, ...]</entry>
+          <entry>[20 MiB, 24 MiB, 28 MiB, 32 MiB]</entry>
+        </row>
+        <row>
+          <entry>8 MiB</entry>
+          <entry>[40 MiB, 48 MiB, 56 MiB, 64 MiB]</entry>
+        </row>
+        <row>
+          <entry>...</entry>
+          <entry>...</entry>
         </row>
       </tbody>
       </tgroup>
@@ -634,6 +730,16 @@ for (i = 0; i < nbins; i++) {
         detecting whether another thread caused a refresh.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="config.cache_oblivious">
+        <term>
+          <mallctl>config.cache_oblivious</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-cache-oblivious</option> was specified
+        during build configuration.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="config.debug">
         <term>
           <mallctl>config.debug</mallctl>
@@ -664,6 +770,17 @@ for (i = 0; i < nbins; i++) {
         during build configuration.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="config.malloc_conf">
+        <term>
+          <mallctl>config.malloc_conf</mallctl>
+          (<type>const char *</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Embedded configure-time-specified run-time options
+        string, empty unless <option>--with-malloc-conf</option> was specified
+        during build configuration.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="config.munmap">
         <term>
           <mallctl>config.munmap</mallctl>
@@ -810,14 +927,14 @@ for (i = 0; i < nbins; i++) {
         <listitem><para>Virtual memory chunk size (log base 2).  If a chunk
         size outside the supported size range is specified, the size is
         silently clipped to the minimum/maximum supported size.  The default
-        chunk size is 4 MiB (2^22).
+        chunk size is 2 MiB (2^21).
         </para></listitem>
       </varlistentry>
 
       <varlistentry id="opt.narenas">
         <term>
           <mallctl>opt.narenas</mallctl>
-          (<type>size_t</type>)
+          (<type>unsigned</type>)
           <literal>r-</literal>
         </term>
         <listitem><para>Maximum number of arenas to use for automatic
@@ -825,6 +942,20 @@ for (i = 0; i < nbins; i++) {
         number of CPUs, or one if there is a single CPU.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="opt.purge">
+        <term>
+          <mallctl>opt.purge</mallctl>
+          (<type>const char *</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Purge mode is &ldquo;ratio&rdquo; (default) or
+        &ldquo;decay&rdquo;.  See <link
+        linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+        for details of the ratio mode.  See <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+        details of the decay mode.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="opt.lg_dirty_mult">
         <term>
           <mallctl>opt.lg_dirty_mult</mallctl>
@@ -840,7 +971,31 @@ for (i = 0; i < nbins; i++) {
         provides the kernel with sufficient information to recycle dirty pages
         if physical memory becomes scarce and the pages remain unused.  The
         default minimum ratio is 8:1 (2^3:1); an option value of -1 will
-        disable dirty page purging.</para></listitem>
+        disable dirty page purging.  See <link
+        linkend="arenas.lg_dirty_mult"><mallctl>arenas.lg_dirty_mult</mallctl></link>
+        and <link
+        linkend="arena.i.lg_dirty_mult"><mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl></link>
+        for related dynamic control options.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.decay_time">
+        <term>
+          <mallctl>opt.decay_time</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Approximate time in seconds from the creation of a set
+        of unused dirty pages until an equivalent set of unused dirty pages is
+        purged and/or reused.  The pages are incrementally purged according to a
+        sigmoidal decay curve that starts and ends with zero purge rate.  A
+        decay time of 0 causes all unused dirty pages to be purged immediately
+        upon creation.  A decay time of -1 disables purging.  The default decay
+        time is 10 seconds.  See <link
+        linkend="arenas.decay_time"><mallctl>arenas.decay_time</mallctl></link>
+        and <link
+        linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
+        for related dynamic control options.
+        </para></listitem>
       </varlistentry>
 
       <varlistentry id="opt.stats_print">
@@ -857,26 +1012,34 @@ for (i = 0; i < nbins; i++) {
         <option>--enable-stats</option> is specified during configuration, this
         has the potential to cause deadlock for a multi-threaded process that
         exits while one or more threads are executing in the memory allocation
-        functions.  Therefore, this option should only be used with care; it is
-        primarily intended as a performance tuning aid during application
+        functions.  Furthermore, <function>atexit<parameter/></function> may
+        allocate memory during application initialization and then deadlock
+        internally when jemalloc in turn calls
+        <function>atexit<parameter/></function>, so this option is not
+        univerally usable (though the application can register its own
+        <function>atexit<parameter/></function> function with equivalent
+        functionality).  Therefore, this option should only be used with care;
+        it is primarily intended as a performance tuning aid during application
         development.  This option is disabled by default.</para></listitem>
       </varlistentry>
 
       <varlistentry id="opt.junk">
         <term>
           <mallctl>opt.junk</mallctl>
-          (<type>bool</type>)
+          (<type>const char *</type>)
           <literal>r-</literal>
           [<option>--enable-fill</option>]
         </term>
-        <listitem><para>Junk filling enabled/disabled.  If enabled, each byte
-        of uninitialized allocated memory will be initialized to
-        <literal>0xa5</literal>.  All deallocated memory will be initialized to
-        <literal>0x5a</literal>.  This is intended for debugging and will
-        impact performance negatively.  This option is disabled by default
-        unless <option>--enable-debug</option> is specified during
-        configuration, in which case it is enabled by default unless running
-        inside <ulink
+        <listitem><para>Junk filling.  If set to "alloc", each byte of
+        uninitialized allocated memory will be initialized to
+        <literal>0xa5</literal>.  If set to "free", all deallocated memory will
+        be initialized to <literal>0x5a</literal>.  If set to "true", both
+        allocated and deallocated memory will be initialized, and if set to
+        "false", junk filling be disabled entirely.  This is intended for
+        debugging and will impact performance negatively.  This option is
+        "false" by default unless <option>--enable-debug</option> is specified
+        during configuration, in which case it is "true" by default unless
+        running inside <ulink
         url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
       </varlistentry>
 
@@ -977,12 +1140,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
           <literal>r-</literal>
           [<option>--enable-tcache</option>]
         </term>
-        <listitem><para>Thread-specific caching enabled/disabled.  When there
-        are multiple threads, each thread uses a thread-specific cache for
-        objects up to a certain size.  Thread-specific caching allows many
-        allocations to be satisfied without performing any thread
-        synchronization, at the cost of increased memory use.  See the
-        <link
+        <listitem><para>Thread-specific caching (tcache) enabled/disabled.  When
+        there are multiple threads, each thread uses a tcache for objects up to
+        a certain size.  Thread-specific caching allows many allocations to be
+        satisfied without performing any thread synchronization, at the cost of
+        increased memory use.  See the <link
         linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
         option for related tuning information.  This option is enabled by
         default unless running inside <ulink
@@ -998,8 +1160,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
           [<option>--enable-tcache</option>]
         </term>
         <listitem><para>Maximum size class (log base 2) to cache in the
-        thread-specific cache.  At a minimum, all small size classes are
-        cached, and at a maximum all large size classes are cached.  The
+        thread-specific cache (tcache).  At a minimum, all small size classes
+        are cached, and at a maximum all large size classes are cached.  The
         default maximum is 32 KiB (2^15).</para></listitem>
       </varlistentry>
 
@@ -1024,9 +1186,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         option for information on high-water-triggered profile dumping, and the
         <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>
         option for final profile dumping.  Profile output is compatible with
-        the included <command>pprof</command> Perl script, which originates
-        from the <ulink url="http://code.google.com/p/gperftools/">gperftools
-        package</ulink>.</para></listitem>
+        the <command>jeprof</command> command, which is based on the
+        <command>pprof</command> that is developed as part of the <ulink
+        url="http://code.google.com/p/gperftools/">gperftools
+        package</ulink>.  See <link linkend="heap_profile_format">HEAP PROFILE
+        FORMAT</link> for heap profile format documentation.</para></listitem>
       </varlistentry>
 
       <varlistentry id="opt.prof_prefix">
@@ -1047,7 +1211,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         <term>
           <mallctl>opt.prof_active</mallctl>
           (<type>bool</type>)
-          <literal>rw</literal>
+          <literal>r-</literal>
           [<option>--enable-prof</option>]
         </term>
         <listitem><para>Profiling activated/deactivated.  This is a secondary
@@ -1132,13 +1296,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
           <literal>r-</literal>
           [<option>--enable-prof</option>]
         </term>
-        <listitem><para>Trigger a memory profile dump every time the total
-        virtual memory exceeds the previous maximum.  Profiles are dumped to
-        files named according to the pattern
-        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
-        where <literal>&lt;prefix&gt;</literal> is controlled by the <link
-        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
-        option.  This option is disabled by default.</para></listitem>
+        <listitem><para>Set the initial state of <link
+        linkend="prof.gdump"><mallctl>prof.gdump</mallctl></link>, which when
+        enabled triggers a memory profile dump every time the total virtual
+        memory exceeds the previous maximum.  This option is disabled by
+        default.</para></listitem>
       </varlistentry>
 
       <varlistentry id="opt.prof_final">
@@ -1155,7 +1317,13 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
         where <literal>&lt;prefix&gt;</literal> is controlled by the <link
         linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
-        option.  This option is enabled by default.</para></listitem>
+        option.  Note that <function>atexit<parameter/></function> may allocate
+        memory during application initialization and then deadlock internally
+        when jemalloc in turn calls <function>atexit<parameter/></function>, so
+        this option is not univerally usable (though the application can
+        register its own <function>atexit<parameter/></function> function with
+        equivalent functionality).  This option is disabled by
+        default.</para></listitem>
       </varlistentry>
 
       <varlistentry id="opt.prof_leak">
@@ -1252,7 +1420,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         <listitem><para>Enable/disable calling thread's tcache.  The tcache is
         implicitly flushed as a side effect of becoming
         disabled (see <link
-        lenkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>).
+        linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>).
         </para></listitem>
       </varlistentry>
 
@@ -1263,9 +1431,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
           <literal>--</literal>
           [<option>--enable-tcache</option>]
         </term>
-        <listitem><para>Flush calling thread's tcache.  This interface releases
-        all cached objects and internal data structures associated with the
-        calling thread's thread-specific cache.  Ordinarily, this interface
+        <listitem><para>Flush calling thread's thread-specific cache (tcache).
+        This interface releases all cached objects and internal data structures
+        associated with the calling thread's tcache.  Ordinarily, this interface
         need not be called, since automatic periodic incremental garbage
         collection occurs, and the thread cache is automatically discarded when
         a thread exits.  However, garbage collection is triggered by allocation
@@ -1290,8 +1458,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         can cause asynchronous string deallocation.  Furthermore, each
         invocation of this interface can only read or write; simultaneous
         read/write is not supported due to string lifetime limitations.  The
-        name string must nil-terminated and comprised only of characters in the
-        sets recognized
+        name string must be nil-terminated and comprised only of characters in
+        the sets recognized
         by <citerefentry><refentrytitle>isgraph</refentrytitle>
         <manvolnum>3</manvolnum></citerefentry> and
         <citerefentry><refentrytitle>isblank</refentrytitle>
@@ -1312,18 +1480,76 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         default.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="tcache.create">
+        <term>
+          <mallctl>tcache.create</mallctl>
+          (<type>unsigned</type>)
+          <literal>r-</literal>
+          [<option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Create an explicit thread-specific cache (tcache) and
+        return an identifier that can be passed to the <link
+        linkend="MALLOCX_TCACHE"><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant></link>
+        macro to explicitly use the specified cache rather than the
+        automatically managed one that is used by default.  Each explicit cache
+        can be used by only one thread at a time; the application must assure
+        that this constraint holds.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry id="tcache.flush">
+        <term>
+          <mallctl>tcache.flush</mallctl>
+          (<type>unsigned</type>)
+          <literal>-w</literal>
+          [<option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Flush the specified thread-specific cache (tcache).  The
+        same considerations apply to this interface as to <link
+        linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>,
+        except that the tcache will never be automatically discarded.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry id="tcache.destroy">
+        <term>
+          <mallctl>tcache.destroy</mallctl>
+          (<type>unsigned</type>)
+          <literal>-w</literal>
+          [<option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Flush the specified thread-specific cache (tcache) and
+        make the identifier available for use during a future tcache creation.
+        </para></listitem>
+      </varlistentry>
+
       <varlistentry id="arena.i.purge">
         <term>
           <mallctl>arena.&lt;i&gt;.purge</mallctl>
           (<type>void</type>)
           <literal>--</literal>
         </term>
-        <listitem><para>Purge unused dirty pages for arena &lt;i&gt;, or for
+        <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
         all arenas if &lt;i&gt; equals <link
         linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
         </para></listitem>
       </varlistentry>
 
+      <varlistentry id="arena.i.decay">
+        <term>
+          <mallctl>arena.&lt;i&gt;.decay</mallctl>
+          (<type>void</type>)
+          <literal>--</literal>
+        </term>
+        <listitem><para>Trigger decay-based purging of unused dirty pages for
+        arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals <link
+        linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
+        The proportion of unused dirty pages to be purged depends on the current
+        time; see <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+        details.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="arena.i.dss">
         <term>
           <mallctl>arena.&lt;i&gt;.dss</mallctl>
@@ -1338,75 +1564,233 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         settings.</para></listitem>
       </varlistentry>
 
-      <varlistentry id="arena.i.chunk.alloc">
+      <varlistentry id="arena.i.lg_dirty_mult">
         <term>
-          <mallctl>arena.&lt;i&gt;.chunk.alloc</mallctl>
-          (<type>chunk_alloc_t *</type>)
+          <mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl>
+          (<type>ssize_t</type>)
           <literal>rw</literal>
         </term>
-        <listitem><para>Get or set the chunk allocation function for arena
-        &lt;i&gt;.  If setting, the chunk deallocation function should
-        also be set via <link linkend="arena.i.chunk.dalloc">
-        <mallctl>arena.&lt;i&gt;.chunk.dalloc</mallctl></link> to a companion
-        function that knows how to deallocate the chunks.
+        <listitem><para>Current per-arena minimum ratio (log base 2) of active
+        to dirty pages for arena &lt;i&gt;.  Each time this interface is set and
+        the ratio is increased, pages are synchronously purged as necessary to
+        impose the new ratio.  See <link
+        linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+        for additional information.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="arena.i.decay_time">
+        <term>
+          <mallctl>arena.&lt;i&gt;.decay_time</mallctl>
+          (<type>ssize_t</type>)
+          <literal>rw</literal>
+        </term>
+        <listitem><para>Current per-arena approximate time in seconds from the
+        creation of a set of unused dirty pages until an equivalent set of
+        unused dirty pages is purged and/or reused.  Each time this interface is
+        set, all currently unused dirty pages are considered to have fully
+        decayed, which causes immediate purging of all unused dirty pages unless
+        the decay time is set to -1 (i.e. purging disabled).  See <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+        additional information.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="arena.i.chunk_hooks">
+        <term>
+          <mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl>
+          (<type>chunk_hooks_t</type>)
+          <literal>rw</literal>
+        </term>
+        <listitem><para>Get or set the chunk management hook functions for arena
+        &lt;i&gt;.  The functions must be capable of operating on all extant
+        chunks associated with arena &lt;i&gt;, usually by passing unknown
+        chunks to the replaced functions.  In practice, it is feasible to
+        control allocation for arenas created via <link
+        linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link> such
+        that all chunks originate from an application-supplied chunk allocator
+        (by setting custom chunk hook functions just after arena creation), but
+        the automatically created arenas may have already created chunks prior
+        to the application having an opportunity to take over chunk
+        allocation.</para>
+
+        <programlisting language="C"><![CDATA[
+typedef struct {
+       chunk_alloc_t           *alloc;
+       chunk_dalloc_t          *dalloc;
+       chunk_commit_t          *commit;
+       chunk_decommit_t        *decommit;
+       chunk_purge_t           *purge;
+       chunk_split_t           *split;
+       chunk_merge_t           *merge;
+} chunk_hooks_t;]]></programlisting>
+        <para>The <type>chunk_hooks_t</type> structure comprises function
+        pointers which are described individually below.  jemalloc uses these
+        functions to manage chunk lifetime, which starts off with allocation of
+        mapped committed memory, in the simplest case followed by deallocation.
+        However, there are performance and platform reasons to retain chunks for
+        later reuse.  Cleanup attempts cascade from deallocation to decommit to
+        purging, which gives the chunk management functions opportunities to
+        reject the most permanent cleanup operations in favor of less permanent
+        (and often less costly) operations.  The chunk splitting and merging
+        operations can also be opted out of, but this is mainly intended to
+        support platforms on which virtual memory mappings provided by the
+        operating system kernel do not automatically coalesce and split, e.g.
+        Windows.</para>
+
         <funcsynopsis><funcprototype>
           <funcdef>typedef void *<function>(chunk_alloc_t)</function></funcdef>
           <paramdef>void *<parameter>chunk</parameter></paramdef>
           <paramdef>size_t <parameter>size</parameter></paramdef>
           <paramdef>size_t <parameter>alignment</parameter></paramdef>
           <paramdef>bool *<parameter>zero</parameter></paramdef>
+          <paramdef>bool *<parameter>commit</parameter></paramdef>
           <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
         </funcprototype></funcsynopsis>
-        A chunk allocation function conforms to the <type>chunk_alloc_t</type>
-        type and upon success returns a pointer to <parameter>size</parameter>
-        bytes of memory on behalf of arena <parameter>arena_ind</parameter> such
-        that the chunk's base address is a multiple of
-        <parameter>alignment</parameter>, as well as setting
-        <parameter>*zero</parameter> to indicate whether the chunk is zeroed.
-        Upon error the function returns <constant>NULL</constant> and leaves
-        <parameter>*zero</parameter> unmodified.  The
+        <literallayout></literallayout>
+        <para>A chunk allocation function conforms to the
+        <type>chunk_alloc_t</type> type and upon success returns a pointer to
+        <parameter>size</parameter> bytes of mapped memory on behalf of arena
+        <parameter>arena_ind</parameter> such that the chunk's base address is a
+        multiple of <parameter>alignment</parameter>, as well as setting
+        <parameter>*zero</parameter> to indicate whether the chunk is zeroed and
+        <parameter>*commit</parameter> to indicate whether the chunk is
+        committed.  Upon error the function returns <constant>NULL</constant>
+        and leaves <parameter>*zero</parameter> and
+        <parameter>*commit</parameter> unmodified.  The
         <parameter>size</parameter> parameter is always a multiple of the chunk
         size.  The <parameter>alignment</parameter> parameter is always a power
         of two at least as large as the chunk size.  Zeroing is mandatory if
-        <parameter>*zero</parameter> is true upon function entry.  If
-        <parameter>chunk</parameter> is not <constant>NULL</constant>, the
-        returned pointer must be <parameter>chunk</parameter> or
-        <constant>NULL</constant> if it could not be allocated.</para>
-
-        <para>Note that replacing the default chunk allocation function makes
-        the arena's <link
+        <parameter>*zero</parameter> is true upon function entry.  Committing is
+        mandatory if <parameter>*commit</parameter> is true upon function entry.
+        If <parameter>chunk</parameter> is not <constant>NULL</constant>, the
+        returned pointer must be <parameter>chunk</parameter> on success or
+        <constant>NULL</constant> on error.  Committed memory may be committed
+        in absolute terms as on a system that does not overcommit, or in
+        implicit terms as on a system that overcommits and satisfies physical
+        memory needs on demand via soft page faults.  Note that replacing the
+        default chunk allocation function makes the arena's <link
         linkend="arena.i.dss"><mallctl>arena.&lt;i&gt;.dss</mallctl></link>
-        setting irrelevant.</para></listitem>
-      </varlistentry>
+        setting irrelevant.</para>
 
-      <varlistentry id="arena.i.chunk.dalloc">
-        <term>
-          <mallctl>arena.&lt;i&gt;.chunk.dalloc</mallctl>
-          (<type>chunk_dalloc_t *</type>)
-          <literal>rw</literal>
-        </term>
-        <listitem><para>Get or set the chunk deallocation function for arena
-        &lt;i&gt;.  If setting, the chunk deallocation function must
-        be capable of deallocating all extant chunks associated with arena
-        &lt;i&gt;, usually by passing unknown chunks to the deallocation
-        function that was replaced.  In practice, it is feasible to control
-        allocation for arenas created via <link
-        linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link> such
-        that all chunks originate from an application-supplied chunk allocator
-        (by setting custom chunk allocation/deallocation functions just after
-        arena creation), but the automatically created arenas may have already
-        created chunks prior to the application having an opportunity to take
-        over chunk allocation.
         <funcsynopsis><funcprototype>
-          <funcdef>typedef void <function>(chunk_dalloc_t)</function></funcdef>
+          <funcdef>typedef bool <function>(chunk_dalloc_t)</function></funcdef>
           <paramdef>void *<parameter>chunk</parameter></paramdef>
           <paramdef>size_t <parameter>size</parameter></paramdef>
+          <paramdef>bool <parameter>committed</parameter></paramdef>
           <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
         </funcprototype></funcsynopsis>
+        <literallayout></literallayout>
+        <para>
         A chunk deallocation function conforms to the
         <type>chunk_dalloc_t</type> type and deallocates a
-        <parameter>chunk</parameter> of given <parameter>size</parameter> on
-        behalf of arena <parameter>arena_ind</parameter>.</para></listitem>
+        <parameter>chunk</parameter> of given <parameter>size</parameter> with
+        <parameter>committed</parameter>/decommited memory as indicated, on
+        behalf of arena <parameter>arena_ind</parameter>, returning false upon
+        success.  If the function returns true, this indicates opt-out from
+        deallocation; the virtual memory mapping associated with the chunk
+        remains mapped, in the same commit state, and available for future use,
+        in which case it will be automatically retained for later reuse.</para>
+
+        <funcsynopsis><funcprototype>
+          <funcdef>typedef bool <function>(chunk_commit_t)</function></funcdef>
+          <paramdef>void *<parameter>chunk</parameter></paramdef>
+          <paramdef>size_t <parameter>size</parameter></paramdef>
+          <paramdef>size_t <parameter>offset</parameter></paramdef>
+          <paramdef>size_t <parameter>length</parameter></paramdef>
+          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+        </funcprototype></funcsynopsis>
+        <literallayout></literallayout>
+        <para>A chunk commit function conforms to the
+        <type>chunk_commit_t</type> type and commits zeroed physical memory to
+        back pages within a <parameter>chunk</parameter> of given
+        <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+        extending for <parameter>length</parameter> on behalf of arena
+        <parameter>arena_ind</parameter>, returning false upon success.
+        Committed memory may be committed in absolute terms as on a system that
+        does not overcommit, or in implicit terms as on a system that
+        overcommits and satisfies physical memory needs on demand via soft page
+        faults. If the function returns true, this indicates insufficient
+        physical memory to satisfy the request.</para>
+
+        <funcsynopsis><funcprototype>
+          <funcdef>typedef bool <function>(chunk_decommit_t)</function></funcdef>
+          <paramdef>void *<parameter>chunk</parameter></paramdef>
+          <paramdef>size_t <parameter>size</parameter></paramdef>
+          <paramdef>size_t <parameter>offset</parameter></paramdef>
+          <paramdef>size_t <parameter>length</parameter></paramdef>
+          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+        </funcprototype></funcsynopsis>
+        <literallayout></literallayout>
+        <para>A chunk decommit function conforms to the
+        <type>chunk_decommit_t</type> type and decommits any physical memory
+        that is backing pages within a <parameter>chunk</parameter> of given
+        <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+        extending for <parameter>length</parameter> on behalf of arena
+        <parameter>arena_ind</parameter>, returning false upon success, in which
+        case the pages will be committed via the chunk commit function before
+        being reused.  If the function returns true, this indicates opt-out from
+        decommit; the memory remains committed and available for future use, in
+        which case it will be automatically retained for later reuse.</para>
+
+        <funcsynopsis><funcprototype>
+          <funcdef>typedef bool <function>(chunk_purge_t)</function></funcdef>
+          <paramdef>void *<parameter>chunk</parameter></paramdef>
+          <paramdef>size_t<parameter>size</parameter></paramdef>
+          <paramdef>size_t <parameter>offset</parameter></paramdef>
+          <paramdef>size_t <parameter>length</parameter></paramdef>
+          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+        </funcprototype></funcsynopsis>
+        <literallayout></literallayout>
+        <para>A chunk purge function conforms to the <type>chunk_purge_t</type>
+        type and optionally discards physical pages within the virtual memory
+        mapping associated with <parameter>chunk</parameter> of given
+        <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+        extending for <parameter>length</parameter> on behalf of arena
+        <parameter>arena_ind</parameter>, returning false if pages within the
+        purged virtual memory range will be zero-filled the next time they are
+        accessed.</para>
+
+        <funcsynopsis><funcprototype>
+          <funcdef>typedef bool <function>(chunk_split_t)</function></funcdef>
+          <paramdef>void *<parameter>chunk</parameter></paramdef>
+          <paramdef>size_t <parameter>size</parameter></paramdef>
+          <paramdef>size_t <parameter>size_a</parameter></paramdef>
+          <paramdef>size_t <parameter>size_b</parameter></paramdef>
+          <paramdef>bool <parameter>committed</parameter></paramdef>
+          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+        </funcprototype></funcsynopsis>
+        <literallayout></literallayout>
+        <para>A chunk split function conforms to the <type>chunk_split_t</type>
+        type and optionally splits <parameter>chunk</parameter> of given
+        <parameter>size</parameter> into two adjacent chunks, the first of
+        <parameter>size_a</parameter> bytes, and the second of
+        <parameter>size_b</parameter> bytes, operating on
+        <parameter>committed</parameter>/decommitted memory as indicated, on
+        behalf of arena <parameter>arena_ind</parameter>, returning false upon
+        success.  If the function returns true, this indicates that the chunk
+        remains unsplit and therefore should continue to be operated on as a
+        whole.</para>
+
+        <funcsynopsis><funcprototype>
+          <funcdef>typedef bool <function>(chunk_merge_t)</function></funcdef>
+          <paramdef>void *<parameter>chunk_a</parameter></paramdef>
+          <paramdef>size_t <parameter>size_a</parameter></paramdef>
+          <paramdef>void *<parameter>chunk_b</parameter></paramdef>
+          <paramdef>size_t <parameter>size_b</parameter></paramdef>
+          <paramdef>bool <parameter>committed</parameter></paramdef>
+          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+        </funcprototype></funcsynopsis>
+        <literallayout></literallayout>
+        <para>A chunk merge function conforms to the <type>chunk_merge_t</type>
+        type and optionally merges adjacent chunks,
+        <parameter>chunk_a</parameter> of given <parameter>size_a</parameter>
+        and <parameter>chunk_b</parameter> of given
+        <parameter>size_b</parameter> into one contiguous chunk, operating on
+        <parameter>committed</parameter>/decommitted memory as indicated, on
+        behalf of arena <parameter>arena_ind</parameter>, returning false upon
+        success.  If the function returns true, this indicates that the chunks
+        remain distinct mappings and therefore should continue to be operated on
+        independently.</para>
+        </listitem>
       </varlistentry>
 
       <varlistentry id="arenas.narenas">
@@ -1430,6 +1814,35 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         initialized.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="arenas.lg_dirty_mult">
+        <term>
+          <mallctl>arenas.lg_dirty_mult</mallctl>
+          (<type>ssize_t</type>)
+          <literal>rw</literal>
+        </term>
+        <listitem><para>Current default per-arena minimum ratio (log base 2) of
+        active to dirty pages, used to initialize <link
+        linkend="arena.i.lg_dirty_mult"><mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl></link>
+        during arena creation.  See <link
+        linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+        for additional information.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="arenas.decay_time">
+        <term>
+          <mallctl>arenas.decay_time</mallctl>
+          (<type>ssize_t</type>)
+          <literal>rw</literal>
+        </term>
+        <listitem><para>Current default per-arena approximate time in seconds
+        from the creation of a set of unused dirty pages until an equivalent set
+        of unused dirty pages is purged and/or reused, used to initialize <link
+        linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
+        during arena creation.  See <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+        additional information.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="arenas.quantum">
         <term>
           <mallctl>arenas.quantum</mallctl>
@@ -1508,7 +1921,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
       <varlistentry id="arenas.nlruns">
         <term>
           <mallctl>arenas.nlruns</mallctl>
-          (<type>size_t</type>)
+          (<type>unsigned</type>)
           <literal>r-</literal>
         </term>
         <listitem><para>Total number of large size classes.</para></listitem>
@@ -1524,6 +1937,25 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         class.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="arenas.nhchunks">
+        <term>
+          <mallctl>arenas.nhchunks</mallctl>
+          (<type>unsigned</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Total number of huge size classes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="arenas.hchunk.i.size">
+        <term>
+          <mallctl>arenas.hchunk.&lt;i&gt;.size</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Maximum size supported by this huge size
+        class.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="arenas.extend">
         <term>
           <mallctl>arenas.extend</mallctl>
@@ -1579,6 +2011,22 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         option.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="prof.gdump">
+        <term>
+          <mallctl>prof.gdump</mallctl>
+          (<type>bool</type>)
+          <literal>rw</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>When enabled, trigger a memory profile dump every time
+        the total virtual memory exceeds the previous maximum.  Profiles are
+        dumped to files named according to the pattern
+        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
+        where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+        option.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="prof.reset">
         <term>
           <mallctl>prof.reset</mallctl>
@@ -1629,9 +2077,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         </term>
         <listitem><para>Pointer to a counter that contains an approximate count
         of the current number of bytes in active pages.  The estimate may be
-        high, but never low, because each arena rounds up to the nearest
-        multiple of the chunk size when computing its contribution to the
-        counter.  Note that the <link
+        high, but never low, because each arena rounds up when computing its
+        contribution to the counter.  Note that the <link
         linkend="epoch"><mallctl>epoch</mallctl></link> mallctl has no bearing
         on this counter.  Furthermore, counter consistency is maintained via
         atomic operations, so it is necessary to use an atomic operation in
@@ -1662,55 +2109,56 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         equal to <link
         linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
         This does not include <link linkend="stats.arenas.i.pdirty">
-        <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link> and pages
+        <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>, nor pages
         entirely devoted to allocator metadata.</para></listitem>
       </varlistentry>
 
-      <varlistentry id="stats.mapped">
+      <varlistentry id="stats.metadata">
         <term>
-          <mallctl>stats.mapped</mallctl>
+          <mallctl>stats.metadata</mallctl>
           (<type>size_t</type>)
           <literal>r-</literal>
           [<option>--enable-stats</option>]
         </term>
-        <listitem><para>Total number of bytes in chunks mapped on behalf of the
-        application.  This is a multiple of the chunk size, and is at least as
-        large as <link
-        linkend="stats.active"><mallctl>stats.active</mallctl></link>.  This
-        does not include inactive chunks.</para></listitem>
+        <listitem><para>Total number of bytes dedicated to metadata, which
+        comprise base allocations used for bootstrap-sensitive internal
+        allocator data structures, arena chunk headers (see <link
+        linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>),
+        and internal allocations (see <link
+        linkend="stats.arenas.i.metadata.allocated"><mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl></link>).</para></listitem>
       </varlistentry>
 
-      <varlistentry id="stats.chunks.current">
+      <varlistentry id="stats.resident">
         <term>
-          <mallctl>stats.chunks.current</mallctl>
+          <mallctl>stats.resident</mallctl>
           (<type>size_t</type>)
           <literal>r-</literal>
           [<option>--enable-stats</option>]
         </term>
-        <listitem><para>Total number of chunks actively mapped on behalf of the
-        application.  This does not include inactive chunks.
-        </para></listitem>
+        <listitem><para>Maximum number of bytes in physically resident data
+        pages mapped by the allocator, comprising all pages dedicated to
+        allocator metadata, pages backing active allocations, and unused dirty
+        pages.  This is a maximum rather than precise because pages may not
+        actually be physically resident if they correspond to demand-zeroed
+        virtual memory that has not yet been touched.  This is a multiple of the
+        page size, and is larger than <link
+        linkend="stats.active"><mallctl>stats.active</mallctl></link>.</para></listitem>
       </varlistentry>
 
-      <varlistentry id="stats.chunks.total">
-        <term>
-          <mallctl>stats.chunks.total</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of chunks allocated.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.chunks.high">
+      <varlistentry id="stats.mapped">
         <term>
-          <mallctl>stats.chunks.high</mallctl>
+          <mallctl>stats.mapped</mallctl>
           (<type>size_t</type>)
           <literal>r-</literal>
           [<option>--enable-stats</option>]
         </term>
-        <listitem><para>Maximum number of active chunks at any time thus far.
-        </para></listitem>
+        <listitem><para>Total number of bytes in active chunks mapped by the
+        allocator.  This is a multiple of the chunk size, and is larger than
+        <link linkend="stats.active"><mallctl>stats.active</mallctl></link>.
+        This does not include inactive chunks, even those that contain unused
+        dirty pages, which means that there is no strict ordering between this
+        and <link
+        linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem>
       </varlistentry>
 
       <varlistentry id="stats.arenas.i.dss">
@@ -1727,6 +2175,31 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         </para></listitem>
       </varlistentry>
 
+      <varlistentry id="stats.arenas.i.lg_dirty_mult">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.lg_dirty_mult</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Minimum ratio (log base 2) of active to dirty pages.
+        See <link
+        linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+        for details.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="stats.arenas.i.decay_time">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.decay_time</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Approximate time in seconds from the creation of a set
+        of unused dirty pages until an equivalent set of unused dirty pages is
+        purged and/or reused.  See <link
+        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link>
+        for details.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="stats.arenas.i.nthreads">
         <term>
           <mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
@@ -1768,6 +2241,38 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         <listitem><para>Number of mapped bytes.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="stats.arenas.i.metadata.mapped">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Number of mapped bytes in arena chunk headers, which
+        track the states of the non-metadata pages.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="stats.arenas.i.metadata.allocated">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Number of bytes dedicated to internal allocations.
+        Internal allocations differ from application-originated allocations in
+        that they are for internal use, and that they are omitted from heap
+        profiles.  This statistic is reported separately from <link
+        linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
+        <link
+        linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>
+        because it overlaps with e.g. the <link
+        linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link> and
+        <link linkend="stats.active"><mallctl>stats.active</mallctl></link>
+        statistics, whereas the other metadata statistics do
+        not.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="stats.arenas.i.npurge">
         <term>
           <mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl>
@@ -1933,17 +2438,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         </para></listitem>
       </varlistentry>
 
-      <varlistentry id="stats.arenas.i.bins.j.allocated">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.allocated</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Current number of bytes allocated by
-        bin.</para></listitem>
-      </varlistentry>
-
       <varlistentry id="stats.arenas.i.bins.j.nmalloc">
         <term>
           <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
@@ -1977,6 +2471,17 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         requests.</para></listitem>
       </varlistentry>
 
+      <varlistentry id="stats.arenas.i.bins.j.curregs">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Current number of regions for this size
+        class.</para></listitem>
+      </varlistentry>
+
       <varlistentry id="stats.arenas.i.bins.j.nfills">
         <term>
           <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
@@ -2071,8 +2576,99 @@ malloc_conf = "xmalloc:true";]]></programlisting>
         <listitem><para>Current number of runs for this size class.
         </para></listitem>
       </varlistentry>
+
+      <varlistentry id="stats.arenas.i.hchunks.j.nmalloc">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of allocation requests for this size
+        class served directly by the arena.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="stats.arenas.i.hchunks.j.ndalloc">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.ndalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of deallocation requests for this
+        size class served directly by the arena.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="stats.arenas.i.hchunks.j.nrequests">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nrequests</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of allocation requests for this size
+        class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="stats.arenas.i.hchunks.j.curhchunks">
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.curhchunks</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Current number of huge allocations for this size class.
+        </para></listitem>
+      </varlistentry>
     </variablelist>
   </refsect1>
+  <refsect1 id="heap_profile_format">
+    <title>HEAP PROFILE FORMAT</title>
+    <para>Although the heap profiling functionality was originally designed to
+    be compatible with the
+    <command>pprof</command> command that is developed as part of the <ulink
+    url="http://code.google.com/p/gperftools/">gperftools
+    package</ulink>, the addition of per thread heap profiling functionality
+    required a different heap profile format.  The <command>jeprof</command>
+    command is derived from <command>pprof</command>, with enhancements to
+    support the heap profile format described here.</para>
+
+    <para>In the following hypothetical heap profile, <constant>[...]</constant>
+    indicates elision for the sake of compactness.  <programlisting><![CDATA[
+heap_v2/524288
+  t*: 28106: 56637512 [0: 0]
+  [...]
+  t3: 352: 16777344 [0: 0]
+  [...]
+  t99: 17754: 29341640 [0: 0]
+  [...]
+@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
+  t*: 13: 6688 [0: 0]
+  t3: 12: 6496 [0: ]
+  t99: 1: 192 [0: 0]
+[...]
+
+MAPPED_LIBRARIES:
+[...]]]></programlisting> The following matches the above heap profile, but most
+tokens are replaced with <constant>&lt;description&gt;</constant> to indicate
+descriptions of the corresponding fields.  <programlisting><![CDATA[
+<heap_profile_format_version>/<mean_sample_interval>
+  <aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+  [...]
+  <thread_3_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
+  [...]
+  <thread_99_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
+  [...]
+@ <top_frame> <frame> [...] <frame> <frame> <frame> [...]
+  <backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+  <backtrace_thread_3>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+  <backtrace_thread_99>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+[...]
+
+MAPPED_LIBRARIES:
+</proc/<pid>/maps>]]></programlisting></para>
+  </refsect1>
+
   <refsect1 id="debugging_malloc_problems">
     <title>DEBUGGING MALLOC PROBLEMS</title>
     <para>When debugging, it is a good idea to configure/build jemalloc with
index 1f9857239c8ac11a4eea1986a02be3202224ccf3..3519873ce5009b99759cef37e846c8fd37eca8f4 100644 (file)
@@ -1,6 +1,8 @@
 /******************************************************************************/
 #ifdef JEMALLOC_H_TYPES
 
+#define        LARGE_MINCLASS          (ZU(1) << LG_LARGE_MINCLASS)
+
 /* Maximum number of regions in one run. */
 #define        LG_RUN_MAXREGS          (LG_PAGE - LG_TINY_MIN)
 #define        RUN_MAXREGS             (1U << LG_RUN_MAXREGS)
 /*
  * The minimum ratio of active:dirty pages per arena is computed as:
  *
- *   (nactive >> opt_lg_dirty_mult) >= ndirty
+ *   (nactive >> lg_dirty_mult) >= ndirty
  *
- * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
- * as many active pages as dirty pages.
+ * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
+ * many active pages as dirty pages.
  */
 #define        LG_DIRTY_MULT_DEFAULT   3
 
+typedef enum {
+       purge_mode_ratio = 0,
+       purge_mode_decay = 1,
+
+       purge_mode_limit = 2
+} purge_mode_t;
+#define        PURGE_DEFAULT           purge_mode_ratio
+/* Default decay time in seconds. */
+#define        DECAY_TIME_DEFAULT      10
+/* Number of event ticks between time checks. */
+#define        DECAY_NTICKS_PER_UPDATE 1000
+
+typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
 typedef struct arena_run_s arena_run_t;
 typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
 typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
@@ -28,17 +43,16 @@ typedef struct arena_chunk_s arena_chunk_t;
 typedef struct arena_bin_info_s arena_bin_info_t;
 typedef struct arena_bin_s arena_bin_t;
 typedef struct arena_s arena_t;
+typedef struct arena_tdata_s arena_tdata_t;
 
 #endif /* JEMALLOC_H_TYPES */
 /******************************************************************************/
 #ifdef JEMALLOC_H_STRUCTS
 
+#ifdef JEMALLOC_ARENA_STRUCTS_A
 struct arena_run_s {
-       /* Bin this run is associated with. */
-       arena_bin_t     *bin;
-
-       /* Index of next region that has never been allocated, or nregs. */
-       uint32_t        nextind;
+       /* Index of bin this run is associated with. */
+       szind_t         binind;
 
        /* Number of free regions in run. */
        unsigned        nfree;
@@ -53,15 +67,16 @@ struct arena_chunk_map_bits_s {
         * Run address (or size) and various flags are stored together.  The bit
         * layout looks like (assuming 32-bit system):
         *
-        *   ???????? ???????? ????nnnn nnnndula
+        *   ???????? ???????? ???nnnnn nnndumla
         *
         * ? : Unallocated: Run address for first/last pages, unset for internal
         *                  pages.
         *     Small: Run page offset.
-        *     Large: Run size for first page, unset for trailing pages.
+        *     Large: Run page count for first page, unset for trailing pages.
         * n : binind for small size class, BININD_INVALID for large size class.
         * d : dirty?
         * u : unzeroed?
+        * m : decommitted?
         * l : large?
         * a : allocated?
         *
@@ -73,47 +88,62 @@ struct arena_chunk_map_bits_s {
         * x : don't care
         * - : 0
         * + : 1
-        * [DULA] : bit set
-        * [dula] : bit unset
+        * [DUMLA] : bit set
+        * [dumla] : bit unset
         *
         *   Unallocated (clean):
-        *     ssssssss ssssssss ssss++++ ++++du-a
-        *     xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
-        *     ssssssss ssssssss ssss++++ ++++dU-a
+        *     ssssssss ssssssss sss+++++ +++dum-a
+        *     xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
+        *     ssssssss ssssssss sss+++++ +++dUm-a
         *
         *   Unallocated (dirty):
-        *     ssssssss ssssssss ssss++++ ++++D--a
+        *     ssssssss ssssssss sss+++++ +++D-m-a
         *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
-        *     ssssssss ssssssss ssss++++ ++++D--a
+        *     ssssssss ssssssss sss+++++ +++D-m-a
         *
         *   Small:
-        *     pppppppp pppppppp ppppnnnn nnnnd--A
-        *     pppppppp pppppppp ppppnnnn nnnn---A
-        *     pppppppp pppppppp ppppnnnn nnnnd--A
+        *     pppppppp pppppppp pppnnnnn nnnd---A
+        *     pppppppp pppppppp pppnnnnn nnn----A
+        *     pppppppp pppppppp pppnnnnn nnnd---A
         *
         *   Large:
-        *     ssssssss ssssssss ssss++++ ++++D-LA
+        *     ssssssss ssssssss sss+++++ +++D--LA
         *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
-        *     -------- -------- ----++++ ++++D-LA
+        *     -------- -------- ---+++++ +++D--LA
         *
-        *   Large (sampled, size <= PAGE):
-        *     ssssssss ssssssss ssssnnnn nnnnD-LA
+        *   Large (sampled, size <= LARGE_MINCLASS):
+        *     ssssssss ssssssss sssnnnnn nnnD--LA
+        *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+        *     -------- -------- ---+++++ +++D--LA
         *
-        *   Large (not sampled, size == PAGE):
-        *     ssssssss ssssssss ssss++++ ++++D-LA
+        *   Large (not sampled, size == LARGE_MINCLASS):
+        *     ssssssss ssssssss sss+++++ +++D--LA
+        *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+        *     -------- -------- ---+++++ +++D--LA
         */
        size_t                          bits;
-#define        CHUNK_MAP_BININD_SHIFT  4
+#define        CHUNK_MAP_ALLOCATED     ((size_t)0x01U)
+#define        CHUNK_MAP_LARGE         ((size_t)0x02U)
+#define        CHUNK_MAP_STATE_MASK    ((size_t)0x3U)
+
+#define        CHUNK_MAP_DECOMMITTED   ((size_t)0x04U)
+#define        CHUNK_MAP_UNZEROED      ((size_t)0x08U)
+#define        CHUNK_MAP_DIRTY         ((size_t)0x10U)
+#define        CHUNK_MAP_FLAGS_MASK    ((size_t)0x1cU)
+
+#define        CHUNK_MAP_BININD_SHIFT  5
 #define        BININD_INVALID          ((size_t)0xffU)
-/*     CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
-#define        CHUNK_MAP_BININD_MASK   ((size_t)0xff0U)
+#define        CHUNK_MAP_BININD_MASK   (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
 #define        CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
-#define        CHUNK_MAP_FLAGS_MASK    ((size_t)0xcU)
-#define        CHUNK_MAP_DIRTY         ((size_t)0x8U)
-#define        CHUNK_MAP_UNZEROED      ((size_t)0x4U)
-#define        CHUNK_MAP_LARGE         ((size_t)0x2U)
-#define        CHUNK_MAP_ALLOCATED     ((size_t)0x1U)
-#define        CHUNK_MAP_KEY           CHUNK_MAP_ALLOCATED
+
+#define        CHUNK_MAP_RUNIND_SHIFT  (CHUNK_MAP_BININD_SHIFT + 8)
+#define        CHUNK_MAP_SIZE_SHIFT    (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
+#define        CHUNK_MAP_SIZE_MASK                                             \
+    (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
+};
+
+struct arena_runs_dirty_link_s {
+       qr(arena_runs_dirty_link_t)     rd_link;
 };
 
 /*
@@ -127,29 +157,36 @@ struct arena_chunk_map_misc_s {
         *
         * 1) arena_t's runs_avail tree.
         * 2) arena_run_t conceptually uses this linkage for in-use non-full
-        * runs, rather than directly embedding linkage.
+        *    runs, rather than directly embedding linkage.
         */
        rb_node(arena_chunk_map_misc_t)         rb_link;
 
        union {
                /* Linkage for list of dirty runs. */
-               ql_elm(arena_chunk_map_misc_t)  dr_link;
+               arena_runs_dirty_link_t         rd;
 
                /* Profile counters, used for large object runs. */
-               prof_tctx_t                     *prof_tctx;
+               union {
+                       void                    *prof_tctx_pun;
+                       prof_tctx_t             *prof_tctx;
+               };
 
                /* Small region run metadata. */
                arena_run_t                     run;
        };
 };
-typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
 typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
-typedef ql_head(arena_chunk_map_misc_t) arena_chunk_miscelms_t;
+#endif /* JEMALLOC_ARENA_STRUCTS_A */
 
+#ifdef JEMALLOC_ARENA_STRUCTS_B
 /* Arena chunk header. */
 struct arena_chunk_s {
-       /* Arena that owns the chunk. */
-       arena_t                 *arena;
+       /*
+        * A pointer to the arena that owns the chunk is stored within the node.
+        * This field as a whole is used by chunks_rtree to support both
+        * ivsalloc() and core-based debugging.
+        */
+       extent_node_t           node;
 
        /*
         * Map of pages within chunk that keeps track of free/large/small.  The
@@ -195,28 +232,28 @@ struct arena_chunk_s {
  */
 struct arena_bin_info_s {
        /* Size of regions in a run for this bin's size class. */
-       size_t          reg_size;
+       size_t                  reg_size;
 
        /* Redzone size. */
-       size_t          redzone_size;
+       size_t                  redzone_size;
 
        /* Interval between regions (reg_size + (redzone_size << 1)). */
-       size_t          reg_interval;
+       size_t                  reg_interval;
 
        /* Total size of a run for this bin's size class. */
-       size_t          run_size;
+       size_t                  run_size;
 
        /* Total number of regions in a run for this bin's size class. */
-       uint32_t        nregs;
+       uint32_t                nregs;
 
        /*
         * Metadata used to manipulate bitmaps for runs associated with this
         * bin.
         */
-       bitmap_info_t   bitmap_info;
+       bitmap_info_t           bitmap_info;
 
        /* Offset of first region in a run for this bin's size class. */
-       uint32_t        reg0_offset;
+       uint32_t                reg0_offset;
 };
 
 struct arena_bin_s {
@@ -226,13 +263,13 @@ struct arena_bin_s {
         * which may be acquired while holding one or more bin locks, but not
         * vise versa.
         */
-       malloc_mutex_t  lock;
+       malloc_mutex_t          lock;
 
        /*
         * Current run being used to service allocations of this bin's size
         * class.
         */
-       arena_run_t     *runcur;
+       arena_run_t             *runcur;
 
        /*
         * Tree of non-full runs.  This tree is used when looking for an
@@ -241,10 +278,10 @@ struct arena_bin_s {
         * objects packed well, and it can also help reduce the number of
         * almost-empty chunks.
         */
-       arena_run_tree_t runs;
+       arena_run_tree_t        runs;
 
        /* Bin statistics. */
-       malloc_bin_stats_t stats;
+       malloc_bin_stats_t      stats;
 };
 
 struct arena_s {
@@ -253,15 +290,14 @@ struct arena_s {
 
        /*
         * Number of threads currently assigned to this arena.  This field is
-        * protected by arenas_lock.
+        * synchronized via atomic operations.
         */
        unsigned                nthreads;
 
        /*
         * There are three classes of arena operations from a locking
         * perspective:
-        * 1) Thread asssignment (modifies nthreads) is protected by
-        *    arenas_lock.
+        * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
         * 2) Bin-related operations are protected by bin locks.
         * 3) Chunk- and run-related operations are protected by this mutex.
         */
@@ -270,12 +306,19 @@ struct arena_s {
        arena_stats_t           stats;
        /*
         * List of tcaches for extant threads associated with this arena.
-        * Stats from these are merged incrementally, and at exit.
+        * Stats from these are merged incrementally, and at exit if
+        * opt_stats_print is enabled.
         */
        ql_head(tcache_t)       tcache_ql;
 
        uint64_t                prof_accumbytes;
 
+       /*
+        * PRNG state for cache index randomization of large allocation base
+        * pointers.
+        */
+       uint64_t                offset_state;
+
        dss_prec_t              dss_prec;
 
        /*
@@ -290,6 +333,12 @@ struct arena_s {
         */
        arena_chunk_t           *spare;
 
+       /* Minimum ratio (log base 2) of nactive:ndirty. */
+       ssize_t                 lg_dirty_mult;
+
+       /* True if a thread is currently executing arena_purge_to_limit(). */
+       bool                    purging;
+
        /* Number of pages in active runs and huge regions. */
        size_t                  nactive;
 
@@ -302,53 +351,179 @@ struct arena_s {
        size_t                  ndirty;
 
        /*
-        * Size/address-ordered trees of this arena's available runs.  The trees
-        * are used for first-best-fit run allocation.
+        * Unused dirty memory this arena manages.  Dirty memory is conceptually
+        * tracked as an arbitrarily interleaved LRU of dirty runs and cached
+        * chunks, but the list linkage is actually semi-duplicated in order to
+        * avoid extra arena_chunk_map_misc_t space overhead.
+        *
+        *   LRU-----------------------------------------------------------MRU
+        *
+        *        /-- arena ---\
+        *        |            |
+        *        |            |
+        *        |------------|                             /- chunk -\
+        *   ...->|chunks_cache|<--------------------------->|  /----\ |<--...
+        *        |------------|                             |  |node| |
+        *        |            |                             |  |    | |
+        *        |            |    /- run -\    /- run -\   |  |    | |
+        *        |            |    |       |    |       |   |  |    | |
+        *        |            |    |       |    |       |   |  |    | |
+        *        |------------|    |-------|    |-------|   |  |----| |
+        *   ...->|runs_dirty  |<-->|rd     |<-->|rd     |<---->|rd  |<----...
+        *        |------------|    |-------|    |-------|   |  |----| |
+        *        |            |    |       |    |       |   |  |    | |
+        *        |            |    |       |    |       |   |  \----/ |
+        *        |            |    \-------/    \-------/   |         |
+        *        |            |                             |         |
+        *        |            |                             |         |
+        *        \------------/                             \---------/
+        */
+       arena_runs_dirty_link_t runs_dirty;
+       extent_node_t           chunks_cache;
+
+       /*
+        * Approximate time in seconds from the creation of a set of unused
+        * dirty pages until an equivalent set of unused dirty pages is purged
+        * and/or reused.
+        */
+       ssize_t                 decay_time;
+       /* decay_time / SMOOTHSTEP_NSTEPS. */
+       nstime_t                decay_interval;
+       /*
+        * Time at which the current decay interval logically started.  We do
+        * not actually advance to a new epoch until sometime after it starts
+        * because of scheduling and computation delays, and it is even possible
+        * to completely skip epochs.  In all cases, during epoch advancement we
+        * merge all relevant activity into the most recently recorded epoch.
+        */
+       nstime_t                decay_epoch;
+       /* decay_deadline randomness generator. */
+       uint64_t                decay_jitter_state;
+       /*
+        * Deadline for current epoch.  This is the sum of decay_interval and
+        * per epoch jitter which is a uniform random variable in
+        * [0..decay_interval).  Epochs always advance by precise multiples of
+        * decay_interval, but we randomize the deadline to reduce the
+        * likelihood of arenas purging in lockstep.
+        */
+       nstime_t                decay_deadline;
+       /*
+        * Number of dirty pages at beginning of current epoch.  During epoch
+        * advancement we use the delta between decay_ndirty and ndirty to
+        * determine how many dirty pages, if any, were generated, and record
+        * the result in decay_backlog.
+        */
+       size_t                  decay_ndirty;
+       /*
+        * Memoized result of arena_decay_backlog_npages_limit() corresponding
+        * to the current contents of decay_backlog, i.e. the limit on how many
+        * pages are allowed to exist for the decay epochs.
+        */
+       size_t                  decay_backlog_npages_limit;
+       /*
+        * Trailing log of how many unused dirty pages were generated during
+        * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
+        * element is the most recent epoch.  Corresponding epoch times are
+        * relative to decay_epoch.
         */
-       arena_avail_tree_t      runs_avail;
+       size_t                  decay_backlog[SMOOTHSTEP_NSTEPS];
 
-       /* List of dirty runs this arena manages. */
-       arena_chunk_miscelms_t  runs_dirty;
+       /* Extant huge allocations. */
+       ql_head(extent_node_t)  huge;
+       /* Synchronizes all huge allocation/update/deallocation. */
+       malloc_mutex_t          huge_mtx;
 
        /*
-        * user-configureable chunk allocation and deallocation functions.
+        * Trees of chunks that were previously allocated (trees differ only in
+        * node ordering).  These are used when allocating chunks, in an attempt
+        * to re-use address space.  Depending on function, different tree
+        * orderings are needed, which is why there are two trees with the same
+        * contents.
         */
-       chunk_alloc_t           *chunk_alloc;
-       chunk_dalloc_t          *chunk_dalloc;
+       extent_tree_t           chunks_szad_cached;
+       extent_tree_t           chunks_ad_cached;
+       extent_tree_t           chunks_szad_retained;
+       extent_tree_t           chunks_ad_retained;
+
+       malloc_mutex_t          chunks_mtx;
+       /* Cache of nodes that were allocated via base_alloc(). */
+       ql_head(extent_node_t)  node_cache;
+       malloc_mutex_t          node_cache_mtx;
+
+       /* User-configurable chunk hook functions. */
+       chunk_hooks_t           chunk_hooks;
 
        /* bins is used to store trees of free regions. */
        arena_bin_t             bins[NBINS];
+
+       /*
+        * Quantized address-ordered trees of this arena's available runs.  The
+        * trees are used for first-best-fit run allocation.
+        */
+       arena_run_tree_t        runs_avail[1]; /* Dynamically sized. */
 };
 
+/* Used in conjunction with tsd for fast arena-related context lookup. */
+struct arena_tdata_s {
+       ticker_t                decay_ticker;
+};
+#endif /* JEMALLOC_ARENA_STRUCTS_B */
+
 #endif /* JEMALLOC_H_STRUCTS */
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-extern ssize_t opt_lg_dirty_mult;
-/*
- * small_size2bin_tab is a compact lookup table that rounds request sizes up to
- * size classes.  In order to reduce cache footprint, the table is compressed,
- * and all accesses are via small_size2bin().
- */
-extern uint8_t const   small_size2bin_tab[];
-/*
- * small_bin2size_tab duplicates information in arena_bin_info, but in a const
- * array, for which it is easier for the compiler to optimize repeated
- * dereferences.
- */
-extern uint32_t const  small_bin2size_tab[NBINS];
+static const size_t    large_pad =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+    PAGE
+#else
+    0
+#endif
+    ;
+
+extern purge_mode_t    opt_purge;
+extern const char      *purge_mode_names[];
+extern ssize_t         opt_lg_dirty_mult;
+extern ssize_t         opt_decay_time;
 
 extern arena_bin_info_t        arena_bin_info[NBINS];
 
-/* Number of large size classes. */
-#define                        nlclasses (chunk_npages - map_bias)
+extern size_t          map_bias; /* Number of arena chunk header pages. */
+extern size_t          map_misc_offset;
+extern size_t          arena_maxrun; /* Max run size for arenas. */
+extern size_t          large_maxclass; /* Max large size class. */
+extern size_t          run_quantize_max; /* Max run_quantize_*() input. */
+extern unsigned                nlclasses; /* Number of large size classes. */
+extern unsigned                nhclasses; /* Number of huge size classes. */
 
-void   *arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size,
-    size_t alignment, bool *zero);
-void   arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size);
-void   arena_purge_all(arena_t *arena);
-void   arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
-    size_t binind, uint64_t prof_accumbytes);
+#ifdef JEMALLOC_JET
+typedef size_t (run_quantize_t)(size_t);
+extern run_quantize_t *run_quantize_floor;
+extern run_quantize_t *run_quantize_ceil;
+#endif
+void   arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
+    bool cache);
+void   arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
+    bool cache);
+extent_node_t  *arena_node_alloc(arena_t *arena);
+void   arena_node_dalloc(arena_t *arena, extent_node_t *node);
+void   *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
+    bool *zero);
+void   arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
+void   arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
+    size_t oldsize, size_t usize);
+void   arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
+    size_t oldsize, size_t usize);
+bool   arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
+    size_t oldsize, size_t usize, bool *zero);
+ssize_t        arena_lg_dirty_mult_get(arena_t *arena);
+bool   arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
+ssize_t        arena_decay_time_get(arena_t *arena);
+bool   arena_decay_time_set(arena_t *arena, ssize_t decay_time);
+void   arena_maybe_purge(arena_t *arena);
+void   arena_purge(arena_t *arena, bool all);
+void   arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
+    szind_t binind, uint64_t prof_accumbytes);
 void   arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
     bool zero);
 #ifdef JEMALLOC_JET
@@ -361,39 +536,54 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
 void   arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
 #endif
 void   arena_quarantine_junk_small(void *ptr, size_t usize);
-void   *arena_malloc_small(arena_t *arena, size_t size, bool zero);
-void   *arena_malloc_large(arena_t *arena, size_t size, bool zero);
-void   *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
+void   *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero);
+void   *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
+    bool zero, tcache_t *tcache);
+void   *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
+    size_t alignment, bool zero, tcache_t *tcache);
 void   arena_prof_promoted(const void *ptr, size_t size);
-void   arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    arena_chunk_map_bits_t *bitselm);
+void   arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, arena_chunk_map_bits_t *bitselm);
 void   arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t pageind, arena_chunk_map_bits_t *bitselm);
-void   arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t pageind);
+void   arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, size_t pageind);
 #ifdef JEMALLOC_JET
 typedef void (arena_dalloc_junk_large_t)(void *, size_t);
 extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
+#else
+void   arena_dalloc_junk_large(void *ptr, size_t usize);
 #endif
-void   arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
+void   arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr);
+void   arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
     void *ptr);
-void   arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
 #ifdef JEMALLOC_JET
 typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
 extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
 #endif
-bool   arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+bool   arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
     size_t extra, bool zero);
 void   *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
-    size_t size, size_t extra, size_t alignment, bool zero,
-    bool try_tcache_alloc, bool try_tcache_dalloc);
+    size_t size, size_t alignment, bool zero, tcache_t *tcache);
 dss_prec_t     arena_dss_prec_get(arena_t *arena);
 bool   arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
-void   arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
+ssize_t        arena_lg_dirty_mult_default_get(void);
+bool   arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
+ssize_t        arena_decay_time_default_get(void);
+bool   arena_decay_time_default_set(ssize_t decay_time);
+void   arena_basic_stats_merge(arena_t *arena, unsigned *nthreads,
+    const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+    size_t *nactive, size_t *ndirty);
+void   arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
+    ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
     size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
-    malloc_large_stats_t *lstats);
-bool   arena_new(arena_t *arena, unsigned ind);
-void   arena_boot(void);
+    malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
+unsigned       arena_nthreads_get(arena_t *arena);
+void   arena_nthreads_inc(arena_t *arena);
+void   arena_nthreads_dec(arena_t *arena);
+arena_t        *arena_new(unsigned ind);
+bool   arena_boot(void);
 void   arena_prefork(arena_t *arena);
 void   arena_postfork_parent(arena_t *arena);
 void   arena_postfork_child(arena_t *arena);
@@ -403,209 +593,68 @@ void     arena_postfork_child(arena_t *arena);
 #ifdef JEMALLOC_H_INLINES
 
 #ifndef JEMALLOC_ENABLE_INLINE
-size_t small_size2bin_compute(size_t size);
-size_t small_size2bin_lookup(size_t size);
-size_t small_size2bin(size_t size);
-size_t small_bin2size_compute(size_t binind);
-size_t small_bin2size_lookup(size_t binind);
-size_t small_bin2size(size_t binind);
-size_t small_s2u_compute(size_t size);
-size_t small_s2u_lookup(size_t size);
-size_t small_s2u(size_t size);
 arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
     size_t pageind);
 arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
     size_t pageind);
-size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm);
+size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
 void   *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
+arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
 arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
 size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
 size_t arena_mapbitsp_read(size_t *mapbitsp);
 size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_size_decode(size_t mapbits);
 size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
     size_t pageind);
 size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
 size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
+szind_t        arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
 size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
 size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
 size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
 size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
 void   arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
+size_t arena_mapbits_size_encode(size_t size);
 void   arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
     size_t size, size_t flags);
 void   arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
     size_t size);
+void   arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
+    size_t flags);
 void   arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
     size_t size, size_t flags);
 void   arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
-    size_t binind);
+    szind_t binind);
 void   arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
-    size_t runind, size_t binind, size_t flags);
-void   arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
-    size_t unzeroed);
+    size_t runind, szind_t binind, size_t flags);
+void   arena_metadata_allocated_add(arena_t *arena, size_t size);
+void   arena_metadata_allocated_sub(arena_t *arena, size_t size);
+size_t arena_metadata_allocated_get(arena_t *arena);
 bool   arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
 bool   arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
 bool   arena_prof_accum(arena_t *arena, uint64_t accumbytes);
-size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
-size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
-unsigned       arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
+szind_t        arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
+szind_t        arena_bin_index(arena_t *arena, arena_bin_t *bin);
+size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
     const void *ptr);
 prof_tctx_t    *arena_prof_tctx_get(const void *ptr);
-void   arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
-void   *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
-    bool try_tcache);
+void   arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void   arena_prof_tctx_reset(const void *ptr, size_t usize,
+    const void *old_ptr, prof_tctx_t *old_tctx);
+void   arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
+void   arena_decay_tick(tsd_t *tsd, arena_t *arena);
+void   *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
+    bool zero, tcache_t *tcache, bool slow_path);
+arena_t        *arena_aalloc(const void *ptr);
 size_t arena_salloc(const void *ptr, bool demote);
-void   arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr,
-    bool try_tcache);
-void   arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
-    bool try_tcache);
+void   arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
+void   arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
 #  ifdef JEMALLOC_ARENA_INLINE_A
-JEMALLOC_INLINE size_t
-small_size2bin_compute(size_t size)
-{
-#if (NTBINS != 0)
-       if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
-               size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
-               size_t lg_ceil = lg_floor(pow2_ceil(size));
-               return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
-       } else
-#endif
-       {
-               size_t x = lg_floor((size<<1)-1);
-               size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
-                   x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
-               size_t grp = shift << LG_SIZE_CLASS_GROUP;
-
-               size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
-                   ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
-
-               size_t delta_inverse_mask = ZI(-1) << lg_delta;
-               size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
-                   ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
-               size_t bin = NTBINS + grp + mod;
-               return (bin);
-       }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-small_size2bin_lookup(size_t size)
-{
-
-       assert(size <= LOOKUP_MAXCLASS);
-       {
-               size_t ret = ((size_t)(small_size2bin_tab[(size-1) >>
-                   LG_TINY_MIN]));
-               assert(ret == small_size2bin_compute(size));
-               return (ret);
-       }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-small_size2bin(size_t size)
-{
-
-       assert(size > 0);
-       if (likely(size <= LOOKUP_MAXCLASS))
-               return (small_size2bin_lookup(size));
-       else
-               return (small_size2bin_compute(size));
-}
-
-JEMALLOC_INLINE size_t
-small_bin2size_compute(size_t binind)
-{
-#if (NTBINS > 0)
-       if (binind < NTBINS)
-               return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + binind));
-       else
-#endif
-       {
-               size_t reduced_binind = binind - NTBINS;
-               size_t grp = reduced_binind >> LG_SIZE_CLASS_GROUP;
-               size_t mod = reduced_binind & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
-                   1);
-
-               size_t grp_size_mask = ~((!!grp)-1);
-               size_t grp_size = ((ZU(1) << (LG_QUANTUM +
-                   (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
-               size_t shift = (grp == 0) ? 1 : grp;
-               size_t lg_delta = shift + (LG_QUANTUM-1);
-               size_t mod_size = (mod+1) << lg_delta;
-
-               size_t usize = grp_size + mod_size;
-               return (usize);
-       }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-small_bin2size_lookup(size_t binind)
-{
-
-       assert(binind < NBINS);
-       {
-               size_t ret = (size_t)small_bin2size_tab[binind];
-               assert(ret == small_bin2size_compute(binind));
-               return (ret);
-       }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-small_bin2size(size_t binind)
-{
-
-       return (small_bin2size_lookup(binind));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-small_s2u_compute(size_t size)
-{
-#if (NTBINS > 0)
-       if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
-               size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
-               size_t lg_ceil = lg_floor(pow2_ceil(size));
-               return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
-                   (ZU(1) << lg_ceil));
-       } else
-#endif
-       {
-               size_t x = lg_floor((size<<1)-1);
-               size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
-                   ?  LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
-               size_t delta = ZU(1) << lg_delta;
-               size_t delta_mask = delta - 1;
-               size_t usize = (size + delta_mask) & ~delta_mask;
-               return (usize);
-       }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-small_s2u_lookup(size_t size)
-{
-       size_t ret = small_bin2size(small_size2bin(size));
-
-       assert(ret == small_s2u_compute(size));
-       return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-small_s2u(size_t size)
-{
-
-       assert(size > 0);
-       if (likely(size <= LOOKUP_MAXCLASS))
-               return (small_s2u_lookup(size));
-       else
-               return (small_s2u_compute(size));
-}
-#  endif /* JEMALLOC_ARENA_INLINE_A */
-
-#  ifdef JEMALLOC_ARENA_INLINE_B
 JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
 arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
 {
@@ -628,7 +677,7 @@ arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
-arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
+arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
 {
        arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
        size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
@@ -649,6 +698,18 @@ arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
        return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
 }
 
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
+arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
+{
+       arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
+           *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
+
+       assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
+       assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
+
+       return (miscelm);
+}
+
 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
 arena_run_to_miscelm(arena_run_t *run)
 {
@@ -682,6 +743,22 @@ arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
        return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
 }
 
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_size_decode(size_t mapbits)
+{
+       size_t size;
+
+#if CHUNK_MAP_SIZE_SHIFT > 0
+       size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
+#elif CHUNK_MAP_SIZE_SHIFT == 0
+       size = mapbits & CHUNK_MAP_SIZE_MASK;
+#else
+       size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
+#endif
+
+       return (size);
+}
+
 JEMALLOC_ALWAYS_INLINE size_t
 arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
 {
@@ -689,7 +766,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
 
        mapbits = arena_mapbits_get(chunk, pageind);
        assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
-       return (mapbits & ~PAGE_MASK);
+       return (arena_mapbits_size_decode(mapbits));
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
@@ -700,7 +777,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
        mapbits = arena_mapbits_get(chunk, pageind);
        assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
            (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
-       return (mapbits & ~PAGE_MASK);
+       return (arena_mapbits_size_decode(mapbits));
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
@@ -711,14 +788,14 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
        mapbits = arena_mapbits_get(chunk, pageind);
        assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
            CHUNK_MAP_ALLOCATED);
-       return (mapbits >> LG_PAGE);
+       return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
 }
 
-JEMALLOC_ALWAYS_INLINE size_t
+JEMALLOC_ALWAYS_INLINE szind_t
 arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
 {
        size_t mapbits;
-       size_t binind;
+       szind_t binind;
 
        mapbits = arena_mapbits_get(chunk, pageind);
        binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@@ -732,6 +809,8 @@ arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
        size_t mapbits;
 
        mapbits = arena_mapbits_get(chunk, pageind);
+       assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
+           (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
        return (mapbits & CHUNK_MAP_DIRTY);
 }
 
@@ -741,9 +820,22 @@ arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
        size_t mapbits;
 
        mapbits = arena_mapbits_get(chunk, pageind);
+       assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
+           (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
        return (mapbits & CHUNK_MAP_UNZEROED);
 }
 
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
+{
+       size_t mapbits;
+
+       mapbits = arena_mapbits_get(chunk, pageind);
+       assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
+           (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+       return (mapbits & CHUNK_MAP_DECOMMITTED);
+}
+
 JEMALLOC_ALWAYS_INLINE size_t
 arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
 {
@@ -769,6 +861,23 @@ arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
        *mapbitsp = mapbits;
 }
 
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_size_encode(size_t size)
+{
+       size_t mapbits;
+
+#if CHUNK_MAP_SIZE_SHIFT > 0
+       mapbits = size << CHUNK_MAP_SIZE_SHIFT;
+#elif CHUNK_MAP_SIZE_SHIFT == 0
+       mapbits = size;
+#else
+       mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
+#endif
+
+       assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
+       return (mapbits);
+}
+
 JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
     size_t flags)
@@ -776,9 +885,11 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
        size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
 
        assert((size & PAGE_MASK) == 0);
-       assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
-       assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
-       arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
+       assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
+       assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
+           (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+       arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+           CHUNK_MAP_BININD_INVALID | flags);
 }
 
 JEMALLOC_ALWAYS_INLINE void
@@ -790,7 +901,17 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
 
        assert((size & PAGE_MASK) == 0);
        assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
-       arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
+       arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+           (mapbits & ~CHUNK_MAP_SIZE_MASK));
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
+{
+       size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+
+       assert((flags & CHUNK_MAP_UNZEROED) == flags);
+       arena_mapbitsp_write(mapbitsp, flags);
 }
 
 JEMALLOC_ALWAYS_INLINE void
@@ -798,54 +919,62 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
     size_t flags)
 {
        size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
-       size_t mapbits = arena_mapbitsp_read(mapbitsp);
-       size_t unzeroed;
 
        assert((size & PAGE_MASK) == 0);
-       assert((flags & CHUNK_MAP_DIRTY) == flags);
-       unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
-       arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
-           | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
+       assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
+       assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
+           (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+       arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+           CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
+           CHUNK_MAP_ALLOCATED);
 }
 
 JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
-    size_t binind)
+    szind_t binind)
 {
        size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
        size_t mapbits = arena_mapbitsp_read(mapbitsp);
 
        assert(binind <= BININD_INVALID);
-       assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
+       assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
+           large_pad);
        arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
            (binind << CHUNK_MAP_BININD_SHIFT));
 }
 
 JEMALLOC_ALWAYS_INLINE void
 arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
-    size_t binind, size_t flags)
+    szind_t binind, size_t flags)
 {
        size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
-       size_t mapbits = arena_mapbitsp_read(mapbitsp);
-       size_t unzeroed;
 
        assert(binind < BININD_INVALID);
        assert(pageind - runind >= map_bias);
-       assert((flags & CHUNK_MAP_DIRTY) == flags);
-       unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
-       arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
-           CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
+       assert((flags & CHUNK_MAP_UNZEROED) == flags);
+       arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
+           (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
 }
 
-JEMALLOC_ALWAYS_INLINE void
-arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
-    size_t unzeroed)
+JEMALLOC_INLINE void
+arena_metadata_allocated_add(arena_t *arena, size_t size)
 {
-       size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
-       size_t mapbits = arena_mapbitsp_read(mapbitsp);
 
-       arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
-           unzeroed);
+       atomic_add_z(&arena->stats.metadata_allocated, size);
+}
+
+JEMALLOC_INLINE void
+arena_metadata_allocated_sub(arena_t *arena, size_t size)
+{
+
+       atomic_sub_z(&arena->stats.metadata_allocated, size);
+}
+
+JEMALLOC_INLINE size_t
+arena_metadata_allocated_get(arena_t *arena)
+{
+
+       return (atomic_read_z(&arena->stats.metadata_allocated));
 }
 
 JEMALLOC_INLINE bool
@@ -893,10 +1022,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
        }
 }
 
-JEMALLOC_ALWAYS_INLINE size_t
+JEMALLOC_ALWAYS_INLINE szind_t
 arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
 {
-       size_t binind;
+       szind_t binind;
 
        binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
 
@@ -908,7 +1037,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
                size_t rpages_ind;
                arena_run_t *run;
                arena_bin_t *bin;
-               size_t actual_binind;
+               szind_t run_binind, actual_binind;
                arena_bin_info_t *bin_info;
                arena_chunk_map_misc_t *miscelm;
                void *rpages;
@@ -916,7 +1045,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
                assert(binind != BININD_INVALID);
                assert(binind < NBINS);
                chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-               arena = chunk->arena;
+               arena = extent_node_arena_get(&chunk->node);
                pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
                actual_mapbits = arena_mapbits_get(chunk, pageind);
                assert(mapbits == actual_mapbits);
@@ -926,9 +1055,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
                    pageind);
                miscelm = arena_miscelm_get(chunk, rpages_ind);
                run = &miscelm->run;
-               bin = run->bin;
-               actual_binind = bin - arena->bins;
-               assert(binind == actual_binind);
+               run_binind = run->binind;
+               bin = &arena->bins[run_binind];
+               actual_binind = (szind_t)(bin - arena->bins);
+               assert(run_binind == actual_binind);
                bin_info = &arena_bin_info[actual_binind];
                rpages = arena_miscelm_to_rpages(miscelm);
                assert(((uintptr_t)ptr - ((uintptr_t)rpages +
@@ -938,22 +1068,21 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
 
        return (binind);
 }
-#  endif /* JEMALLOC_ARENA_INLINE_B */
+#  endif /* JEMALLOC_ARENA_INLINE_A */
 
-#  ifdef JEMALLOC_ARENA_INLINE_C
-JEMALLOC_INLINE size_t
+#  ifdef JEMALLOC_ARENA_INLINE_B
+JEMALLOC_INLINE szind_t
 arena_bin_index(arena_t *arena, arena_bin_t *bin)
 {
-       size_t binind = bin - arena->bins;
+       szind_t binind = (szind_t)(bin - arena->bins);
        assert(binind < NBINS);
        return (binind);
 }
 
-JEMALLOC_INLINE unsigned
+JEMALLOC_INLINE size_t
 arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
 {
-       unsigned shift, diff, regind;
-       size_t interval;
+       size_t diff, interval, shift, regind;
        arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
        void *rpages = arena_miscelm_to_rpages(miscelm);
 
@@ -968,12 +1097,12 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
         * Avoid doing division with a variable divisor if possible.  Using
         * actual division here can reduce allocator throughput by over 20%!
         */
-       diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages -
+       diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
            bin_info->reg0_offset);
 
        /* Rescale (factor powers of 2 out of the numerator and denominator). */
        interval = bin_info->reg_interval;
-       shift = jemalloc_ffs(interval) - 1;
+       shift = ffs_zu(interval) - 1;
        diff >>= shift;
        interval >>= shift;
 
@@ -995,9 +1124,9 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
                 * divide by 0, and 1 and 2 are both powers of two, which are
                 * handled above.
                 */
-#define        SIZE_INV_SHIFT  ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
-#define        SIZE_INV(s)     (((1U << SIZE_INV_SHIFT) / (s)) + 1)
-               static const unsigned interval_invs[] = {
+#define        SIZE_INV_SHIFT  ((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
+#define        SIZE_INV(s)     (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
+               static const size_t interval_invs[] = {
                    SIZE_INV(3),
                    SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
                    SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
@@ -1008,8 +1137,8 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
                    SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
                };
 
-               if (likely(interval <= ((sizeof(interval_invs) /
-                   sizeof(unsigned)) + 2))) {
+               if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
+                   + 2))) {
                        regind = (diff * interval_invs[interval - 3]) >>
                            SIZE_INV_SHIFT;
                } else
@@ -1028,72 +1157,146 @@ arena_prof_tctx_get(const void *ptr)
 {
        prof_tctx_t *ret;
        arena_chunk_t *chunk;
-       size_t pageind, mapbits;
 
        cassert(config_prof);
        assert(ptr != NULL);
-       assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       mapbits = arena_mapbits_get(chunk, pageind);
-       assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
-       if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
-               ret = (prof_tctx_t *)(uintptr_t)1U;
-       else
-               ret = arena_miscelm_get(chunk, pageind)->prof_tctx;
+       if (likely(chunk != ptr)) {
+               size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+               size_t mapbits = arena_mapbits_get(chunk, pageind);
+               assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+               if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
+                       ret = (prof_tctx_t *)(uintptr_t)1U;
+               else {
+                       arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
+                           pageind);
+                       ret = atomic_read_p(&elm->prof_tctx_pun);
+               }
+       } else
+               ret = huge_prof_tctx_get(ptr);
 
        return (ret);
 }
 
 JEMALLOC_INLINE void
-arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
+arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
 {
        arena_chunk_t *chunk;
-       size_t pageind;
 
        cassert(config_prof);
        assert(ptr != NULL);
-       assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+       if (likely(chunk != ptr)) {
+               size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+
+               assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
 
-       if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0))
-               arena_miscelm_get(chunk, pageind)->prof_tctx = tctx;
+               if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
+                   (uintptr_t)1U)) {
+                       arena_chunk_map_misc_t *elm;
+
+                       assert(arena_mapbits_large_get(chunk, pageind) != 0);
+
+                       elm = arena_miscelm_get(chunk, pageind);
+                       atomic_write_p(&elm->prof_tctx_pun, tctx);
+               } else {
+                       /*
+                        * tctx must always be initialized for large runs.
+                        * Assert that the surrounding conditional logic is
+                        * equivalent to checking whether ptr refers to a large
+                        * run.
+                        */
+                       assert(arena_mapbits_large_get(chunk, pageind) == 0);
+               }
+       } else
+               huge_prof_tctx_set(ptr, tctx);
+}
+
+JEMALLOC_INLINE void
+arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+    prof_tctx_t *old_tctx)
+{
+
+       cassert(config_prof);
+       assert(ptr != NULL);
+
+       if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
+           (uintptr_t)old_tctx > (uintptr_t)1U))) {
+               arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+               if (likely(chunk != ptr)) {
+                       size_t pageind;
+                       arena_chunk_map_misc_t *elm;
+
+                       pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
+                           LG_PAGE;
+                       assert(arena_mapbits_allocated_get(chunk, pageind) !=
+                           0);
+                       assert(arena_mapbits_large_get(chunk, pageind) != 0);
+
+                       elm = arena_miscelm_get(chunk, pageind);
+                       atomic_write_p(&elm->prof_tctx_pun,
+                           (prof_tctx_t *)(uintptr_t)1U);
+               } else
+                       huge_prof_tctx_reset(ptr);
+       }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks)
+{
+       ticker_t *decay_ticker;
+
+       if (unlikely(tsd == NULL))
+               return;
+       decay_ticker = decay_ticker_get(tsd, arena->ind);
+       if (unlikely(decay_ticker == NULL))
+               return;
+       if (unlikely(ticker_ticks(decay_ticker, nticks)))
+               arena_purge(arena, false);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_tick(tsd_t *tsd, arena_t *arena)
+{
+
+       arena_decay_ticks(tsd, arena, 1);
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
-    bool try_tcache)
+arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero,
+    tcache_t *tcache, bool slow_path)
 {
-       tcache_t *tcache;
 
        assert(size != 0);
-       assert(size <= arena_maxclass);
 
-       if (likely(size <= SMALL_MAXCLASS)) {
-               if (likely(try_tcache) && likely((tcache = tcache_get(tsd,
-                   true)) != NULL))
-                       return (tcache_alloc_small(tcache, size, zero));
-               else {
-                       return (arena_malloc_small(choose_arena(tsd, arena),
-                           size, zero));
+       if (likely(tcache != NULL)) {
+               if (likely(size <= SMALL_MAXCLASS)) {
+                       return (tcache_alloc_small(tsd, arena, tcache, size,
+                           ind, zero, slow_path));
                }
-       } else {
-               /*
-                * Initialize tcache after checking size in order to avoid
-                * infinite recursion during tcache initialization.
-                */
-               if (try_tcache && size <= tcache_maxclass && likely((tcache =
-                   tcache_get(tsd, true)) != NULL))
-                       return (tcache_alloc_large(tcache, size, zero));
-               else {
-                       return (arena_malloc_large(choose_arena(tsd, arena),
-                           size, zero));
+               if (likely(size <= tcache_maxclass)) {
+                       return (tcache_alloc_large(tsd, arena, tcache, size,
+                           ind, zero, slow_path));
                }
+               /* (size > tcache_maxclass) case falls through. */
+               assert(size > tcache_maxclass);
        }
+
+       return (arena_malloc_hard(tsd, arena, size, ind, zero, tcache));
+}
+
+JEMALLOC_ALWAYS_INLINE arena_t *
+arena_aalloc(const void *ptr)
+{
+       arena_chunk_t *chunk;
+
+       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+       if (likely(chunk != ptr))
+               return (extent_node_arena_get(&chunk->node));
+       else
+               return (huge_aalloc(ptr));
 }
 
 /* Return the size of the allocation pointed to by ptr. */
@@ -1102,108 +1305,145 @@ arena_salloc(const void *ptr, bool demote)
 {
        size_t ret;
        arena_chunk_t *chunk;
-       size_t pageind, binind;
+       size_t pageind;
+       szind_t binind;
 
        assert(ptr != NULL);
-       assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
-       binind = arena_mapbits_binind_get(chunk, pageind);
-       if (unlikely(binind == BININD_INVALID || (config_prof && !demote &&
-           arena_mapbits_large_get(chunk, pageind) != 0))) {
-               /*
-                * Large allocation.  In the common case (demote), and as this
-                * is an inline function, most callers will only end up looking
-                * at binind to determine that ptr is a small allocation.
-                */
-               assert(((uintptr_t)ptr & PAGE_MASK) == 0);
-               ret = arena_mapbits_large_size_get(chunk, pageind);
-               assert(ret != 0);
-               assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
-               assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
-                   pageind+(ret>>LG_PAGE)-1) == 0);
-               assert(binind == arena_mapbits_binind_get(chunk,
-                   pageind+(ret>>LG_PAGE)-1));
-               assert(arena_mapbits_dirty_get(chunk, pageind) ==
-                   arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
-       } else {
-               /* Small allocation (possibly promoted to a large object). */
-               assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
-                   arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
-                   pageind)) == binind);
-               ret = small_bin2size(binind);
-       }
+       if (likely(chunk != ptr)) {
+               pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+               assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+               binind = arena_mapbits_binind_get(chunk, pageind);
+               if (unlikely(binind == BININD_INVALID || (config_prof && !demote
+                   && arena_mapbits_large_get(chunk, pageind) != 0))) {
+                       /*
+                        * Large allocation.  In the common case (demote), and
+                        * as this is an inline function, most callers will only
+                        * end up looking at binind to determine that ptr is a
+                        * small allocation.
+                        */
+                       assert(config_cache_oblivious || ((uintptr_t)ptr &
+                           PAGE_MASK) == 0);
+                       ret = arena_mapbits_large_size_get(chunk, pageind) -
+                           large_pad;
+                       assert(ret != 0);
+                       assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
+                           chunk_npages);
+                       assert(arena_mapbits_dirty_get(chunk, pageind) ==
+                           arena_mapbits_dirty_get(chunk,
+                           pageind+((ret+large_pad)>>LG_PAGE)-1));
+               } else {
+                       /*
+                        * Small allocation (possibly promoted to a large
+                        * object).
+                        */
+                       assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
+                           arena_ptr_small_binind_get(ptr,
+                           arena_mapbits_get(chunk, pageind)) == binind);
+                       ret = index2size(binind);
+               }
+       } else
+               ret = huge_salloc(ptr);
 
        return (ret);
 }
 
 JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, bool try_tcache)
+arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
 {
+       arena_chunk_t *chunk;
        size_t pageind, mapbits;
-       tcache_t *tcache;
 
        assert(ptr != NULL);
-       assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
-       pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       mapbits = arena_mapbits_get(chunk, pageind);
-       assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
-       if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
-               /* Small allocation. */
-               if (likely(try_tcache) && likely((tcache = tcache_get(tsd,
-                   false)) != NULL)) {
-                       size_t binind = arena_ptr_small_binind_get(ptr,
-                           mapbits);
-                       tcache_dalloc_small(tcache, ptr, binind);
-               } else
-                       arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
-       } else {
-               size_t size = arena_mapbits_large_size_get(chunk, pageind);
-
-               assert(((uintptr_t)ptr & PAGE_MASK) == 0);
-
-               if (try_tcache && size <= tcache_maxclass && likely((tcache =
-                   tcache_get(tsd, false)) != NULL)) {
-                       tcache_dalloc_large(tcache, ptr, size);
-               } else
-                       arena_dalloc_large(chunk->arena, chunk, ptr);
-       }
+       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+       if (likely(chunk != ptr)) {
+               pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+               mapbits = arena_mapbits_get(chunk, pageind);
+               assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+               if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
+                       /* Small allocation. */
+                       if (likely(tcache != NULL)) {
+                               szind_t binind = arena_ptr_small_binind_get(ptr,
+                                   mapbits);
+                               tcache_dalloc_small(tsd, tcache, ptr, binind,
+                                   slow_path);
+                       } else {
+                               arena_dalloc_small(tsd, extent_node_arena_get(
+                                   &chunk->node), chunk, ptr, pageind);
+                       }
+               } else {
+                       size_t size = arena_mapbits_large_size_get(chunk,
+                           pageind);
+
+                       assert(config_cache_oblivious || ((uintptr_t)ptr &
+                           PAGE_MASK) == 0);
+
+                       if (likely(tcache != NULL) && size - large_pad <=
+                           tcache_maxclass) {
+                               tcache_dalloc_large(tsd, tcache, ptr, size -
+                                   large_pad, slow_path);
+                       } else {
+                               arena_dalloc_large(tsd, extent_node_arena_get(
+                                   &chunk->node), chunk, ptr);
+                       }
+               }
+       } else
+               huge_dalloc(tsd, ptr, tcache);
 }
 
 JEMALLOC_ALWAYS_INLINE void
-arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
-    bool try_tcache)
+arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
 {
-       tcache_t *tcache;
+       arena_chunk_t *chunk;
 
-       assert(ptr != NULL);
-       assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
-       if (likely(size <= SMALL_MAXCLASS)) {
-               /* Small allocation. */
-               if (likely(try_tcache) && likely((tcache = tcache_get(tsd,
-                   false)) != NULL)) {
-                       size_t binind = small_size2bin(size);
-                       tcache_dalloc_small(tcache, ptr, binind);
-               } else {
+       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+       if (likely(chunk != ptr)) {
+               if (config_prof && opt_prof) {
                        size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
                            LG_PAGE;
-                       arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
+                       assert(arena_mapbits_allocated_get(chunk, pageind) !=
+                           0);
+                       if (arena_mapbits_large_get(chunk, pageind) != 0) {
+                               /*
+                                * Make sure to use promoted size, not request
+                                * size.
+                                */
+                               size = arena_mapbits_large_size_get(chunk,
+                                   pageind) - large_pad;
+                       }
                }
-       } else {
-               assert(((uintptr_t)ptr & PAGE_MASK) == 0);
-
-               if (try_tcache && size <= tcache_maxclass && (tcache =
-                   tcache_get(tsd, false)) != NULL) {
-                       tcache_dalloc_large(tcache, ptr, size);
-               } else
-                       arena_dalloc_large(chunk->arena, chunk, ptr);
-       }
+               assert(s2u(size) == s2u(arena_salloc(ptr, false)));
+
+               if (likely(size <= SMALL_MAXCLASS)) {
+                       /* Small allocation. */
+                       if (likely(tcache != NULL)) {
+                               szind_t binind = size2index(size);
+                               tcache_dalloc_small(tsd, tcache, ptr, binind,
+                                   true);
+                       } else {
+                               size_t pageind = ((uintptr_t)ptr -
+                                   (uintptr_t)chunk) >> LG_PAGE;
+                               arena_dalloc_small(tsd, extent_node_arena_get(
+                                   &chunk->node), chunk, ptr, pageind);
+                       }
+               } else {
+                       assert(config_cache_oblivious || ((uintptr_t)ptr &
+                           PAGE_MASK) == 0);
+
+                       if (likely(tcache != NULL) && size <= tcache_maxclass) {
+                               tcache_dalloc_large(tsd, tcache, ptr, size,
+                                   true);
+                       } else {
+                               arena_dalloc_large(tsd, extent_node_arena_get(
+                                   &chunk->node), chunk, ptr);
+                       }
+               }
+       } else
+               huge_dalloc(tsd, ptr, tcache);
 }
-#  endif /* JEMALLOC_ARENA_INLINE_C */
+#  endif /* JEMALLOC_ARENA_INLINE_B */
 #endif
 
 #endif /* JEMALLOC_H_INLINES */
diff --git a/src/jemalloc/include/jemalloc/internal/assert.h b/src/jemalloc/include/jemalloc/internal/assert.h
new file mode 100644 (file)
index 0000000..6f8f7eb
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Define a custom assert() in order to reduce the chances of deadlock during
+ * assertion failure.
+ */
+#ifndef assert
+#define        assert(e) do {                                                  \
+       if (unlikely(config_debug && !(e))) {                           \
+               malloc_printf(                                          \
+                   "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n",    \
+                   __FILE__, __LINE__, #e);                            \
+               abort();                                                \
+       }                                                               \
+} while (0)
+#endif
+
+#ifndef not_reached
+#define        not_reached() do {                                              \
+       if (config_debug) {                                             \
+               malloc_printf(                                          \
+                   "<jemalloc>: %s:%d: Unreachable code reached\n",    \
+                   __FILE__, __LINE__);                                \
+               abort();                                                \
+       }                                                               \
+       unreachable();                                                  \
+} while (0)
+#endif
+
+#ifndef not_implemented
+#define        not_implemented() do {                                          \
+       if (config_debug) {                                             \
+               malloc_printf("<jemalloc>: %s:%d: Not implemented\n",   \
+                   __FILE__, __LINE__);                                \
+               abort();                                                \
+       }                                                               \
+} while (0)
+#endif
+
+#ifndef assert_not_implemented
+#define        assert_not_implemented(e) do {                                  \
+       if (unlikely(config_debug && !(e)))                             \
+               not_implemented();                                      \
+} while (0)
+#endif
+
+
index a04881579674a59e10839685771e0f1b417603c2..3f15ea1499ca91441be32dd47b3872b986395218 100644 (file)
@@ -11,6 +11,7 @@
 
 #define        atomic_read_uint64(p)   atomic_add_uint64(p, 0)
 #define        atomic_read_uint32(p)   atomic_add_uint32(p, 0)
+#define        atomic_read_p(p)        atomic_add_p(p, NULL)
 #define        atomic_read_z(p)        atomic_add_z(p, 0)
 #define        atomic_read_u(p)        atomic_add_u(p, 0)
 
 #ifdef JEMALLOC_H_INLINES
 
 /*
- * All functions return the arithmetic result of the atomic operation.  Some
- * atomic operation APIs return the value prior to mutation, in which case the
- * following functions must redundantly compute the result so that it can be
- * returned.  These functions are normally inlined, so the extra operations can
- * be optimized away if the return values aren't used by the callers.
+ * All arithmetic functions return the arithmetic result of the atomic
+ * operation.  Some atomic operation APIs return the value prior to mutation, in
+ * which case the following functions must redundantly compute the result so
+ * that it can be returned.  These functions are normally inlined, so the extra
+ * operations can be optimized away if the return values aren't used by the
+ * callers.
  *
- *   <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
- *   <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
+ *   <t> atomic_read_<t>(<t> *p) { return (*p); }
+ *   <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
+ *   <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
+ *   bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
+ *   {
+ *     if (*p != c)
+ *       return (true);
+ *     *p = s;
+ *     return (false);
+ *   }
+ *   void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
  */
 
 #ifndef JEMALLOC_ENABLE_INLINE
 uint64_t       atomic_add_uint64(uint64_t *p, uint64_t x);
 uint64_t       atomic_sub_uint64(uint64_t *p, uint64_t x);
+bool   atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
+void   atomic_write_uint64(uint64_t *p, uint64_t x);
 uint32_t       atomic_add_uint32(uint32_t *p, uint32_t x);
 uint32_t       atomic_sub_uint32(uint32_t *p, uint32_t x);
+bool   atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
+void   atomic_write_uint32(uint32_t *p, uint32_t x);
+void   *atomic_add_p(void **p, void *x);
+void   *atomic_sub_p(void **p, void *x);
+bool   atomic_cas_p(void **p, void *c, void *s);
+void   atomic_write_p(void **p, const void *x);
 size_t atomic_add_z(size_t *p, size_t x);
 size_t atomic_sub_z(size_t *p, size_t x);
+bool   atomic_cas_z(size_t *p, size_t c, size_t s);
+void   atomic_write_z(size_t *p, size_t x);
 unsigned       atomic_add_u(unsigned *p, unsigned x);
 unsigned       atomic_sub_u(unsigned *p, unsigned x);
+bool   atomic_cas_u(unsigned *p, unsigned c, unsigned s);
+void   atomic_write_u(unsigned *p, unsigned x);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
 /******************************************************************************/
 /* 64-bit operations. */
 #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
-#  ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
+#  if (defined(__amd64__) || defined(__x86_64__))
 JEMALLOC_INLINE uint64_t
 atomic_add_uint64(uint64_t *p, uint64_t x)
 {
+       uint64_t t = x;
 
-       return (__sync_add_and_fetch(p, x));
+       asm volatile (
+           "lock; xaddq %0, %1;"
+           : "+r" (t), "=m" (*p) /* Outputs. */
+           : "m" (*p) /* Inputs. */
+           );
+
+       return (t + x);
 }
 
 JEMALLOC_INLINE uint64_t
 atomic_sub_uint64(uint64_t *p, uint64_t x)
 {
+       uint64_t t;
 
-       return (__sync_sub_and_fetch(p, x));
+       x = (uint64_t)(-(int64_t)x);
+       t = x;
+       asm volatile (
+           "lock; xaddq %0, %1;"
+           : "+r" (t), "=m" (*p) /* Outputs. */
+           : "m" (*p) /* Inputs. */
+           );
+
+       return (t + x);
 }
-#elif (defined(_MSC_VER))
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+       uint8_t success;
+
+       asm volatile (
+           "lock; cmpxchgq %4, %0;"
+           "sete %1;"
+           : "=m" (*p), "=a" (success) /* Outputs. */
+           : "m" (*p), "a" (c), "r" (s) /* Inputs. */
+           : "memory" /* Clobbers. */
+           );
+
+       return (!(bool)success);
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+       asm volatile (
+           "xchgq %1, %0;" /* Lock is implied by xchgq. */
+           : "=m" (*p), "+r" (x) /* Outputs. */
+           : "m" (*p) /* Inputs. */
+           : "memory" /* Clobbers. */
+           );
+}
+#  elif (defined(JEMALLOC_C11ATOMICS))
 JEMALLOC_INLINE uint64_t
 atomic_add_uint64(uint64_t *p, uint64_t x)
 {
-
-       return (InterlockedExchangeAdd64(p, x) + x);
+       volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+       return (atomic_fetch_add(a, x) + x);
 }
 
 JEMALLOC_INLINE uint64_t
 atomic_sub_uint64(uint64_t *p, uint64_t x)
 {
+       volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+       return (atomic_fetch_sub(a, x) - x);
+}
 
-       return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+       volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+       return (!atomic_compare_exchange_strong(a, &c, s));
 }
-#elif (defined(JEMALLOC_OSATOMIC))
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+       volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+       atomic_store(a, x);
+}
+#  elif (defined(JEMALLOC_ATOMIC9))
 JEMALLOC_INLINE uint64_t
 atomic_add_uint64(uint64_t *p, uint64_t x)
 {
 
-       return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
+       /*
+        * atomic_fetchadd_64() doesn't exist, but we only ever use this
+        * function on LP64 systems, so atomic_fetchadd_long() will do.
+        */
+       assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+       return (atomic_fetchadd_long(p, (unsigned long)x) + x);
 }
 
 JEMALLOC_INLINE uint64_t
 atomic_sub_uint64(uint64_t *p, uint64_t x)
 {
 
-       return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
+       assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+       return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+
+       assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+       return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
 }
-#  elif (defined(__amd64__) || defined(__x86_64__))
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+       assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+       atomic_store_rel_long(p, x);
+}
+#  elif (defined(JEMALLOC_OSATOMIC))
 JEMALLOC_INLINE uint64_t
 atomic_add_uint64(uint64_t *p, uint64_t x)
 {
-       uint64_t t = x;
-
-       asm volatile (
-           "lock; xaddq %0, %1;"
-           : "+r" (t), "=m" (*p) /* Outputs. */
-           : "m" (*p) /* Inputs. */
-           );
 
-       return (t + x);
+       return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
 }
 
 JEMALLOC_INLINE uint64_t
 atomic_sub_uint64(uint64_t *p, uint64_t x)
 {
-       uint64_t t;
 
-       x = (uint64_t)(-(int64_t)x);
-       t = x;
-       asm volatile (
-           "lock; xaddq %0, %1;"
-           : "+r" (t), "=m" (*p) /* Outputs. */
-           : "m" (*p) /* Inputs. */
-           );
+       return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
+}
 
-       return (t + x);
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+
+       return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
 }
-#  elif (defined(JEMALLOC_ATOMIC9))
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+       uint64_t o;
+
+       /*The documented OSAtomic*() API does not expose an atomic exchange. */
+       do {
+               o = atomic_read_uint64(p);
+       } while (atomic_cas_uint64(p, o, x));
+}
+#  elif (defined(_MSC_VER))
 JEMALLOC_INLINE uint64_t
 atomic_add_uint64(uint64_t *p, uint64_t x)
 {
 
-       /*
-        * atomic_fetchadd_64() doesn't exist, but we only ever use this
-        * function on LP64 systems, so atomic_fetchadd_long() will do.
-        */
-       assert(sizeof(uint64_t) == sizeof(unsigned long));
-
-       return (atomic_fetchadd_long(p, (unsigned long)x) + x);
+       return (InterlockedExchangeAdd64(p, x) + x);
 }
 
 JEMALLOC_INLINE uint64_t
 atomic_sub_uint64(uint64_t *p, uint64_t x)
 {
 
-       assert(sizeof(uint64_t) == sizeof(unsigned long));
+       return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
+}
 
-       return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+       uint64_t o;
+
+       o = InterlockedCompareExchange64(p, s, c);
+       return (o != c);
 }
-#  elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+       InterlockedExchange64(p, x);
+}
+#  elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
+    defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
 JEMALLOC_INLINE uint64_t
 atomic_add_uint64(uint64_t *p, uint64_t x)
 {
@@ -152,6 +270,20 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
 
        return (__sync_sub_and_fetch(p, x));
 }
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+
+       return (!__sync_bool_compare_and_swap(p, c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+       __sync_lock_test_and_set(p, x);
+}
 #  else
 #    error "Missing implementation for 64-bit atomic operations"
 #  endif
@@ -159,93 +291,184 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
 
 /******************************************************************************/
 /* 32-bit operations. */
-#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
 JEMALLOC_INLINE uint32_t
 atomic_add_uint32(uint32_t *p, uint32_t x)
 {
+       uint32_t t = x;
 
-       return (__sync_add_and_fetch(p, x));
+       asm volatile (
+           "lock; xaddl %0, %1;"
+           : "+r" (t), "=m" (*p) /* Outputs. */
+           : "m" (*p) /* Inputs. */
+           );
+
+       return (t + x);
 }
 
 JEMALLOC_INLINE uint32_t
 atomic_sub_uint32(uint32_t *p, uint32_t x)
 {
+       uint32_t t;
 
-       return (__sync_sub_and_fetch(p, x));
+       x = (uint32_t)(-(int32_t)x);
+       t = x;
+       asm volatile (
+           "lock; xaddl %0, %1;"
+           : "+r" (t), "=m" (*p) /* Outputs. */
+           : "m" (*p) /* Inputs. */
+           );
+
+       return (t + x);
 }
-#elif (defined(_MSC_VER))
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+       uint8_t success;
+
+       asm volatile (
+           "lock; cmpxchgl %4, %0;"
+           "sete %1;"
+           : "=m" (*p), "=a" (success) /* Outputs. */
+           : "m" (*p), "a" (c), "r" (s) /* Inputs. */
+           : "memory"
+           );
+
+       return (!(bool)success);
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+       asm volatile (
+           "xchgl %1, %0;" /* Lock is implied by xchgl. */
+           : "=m" (*p), "+r" (x) /* Outputs. */
+           : "m" (*p) /* Inputs. */
+           : "memory" /* Clobbers. */
+           );
+}
+#  elif (defined(JEMALLOC_C11ATOMICS))
 JEMALLOC_INLINE uint32_t
 atomic_add_uint32(uint32_t *p, uint32_t x)
 {
-
-       return (InterlockedExchangeAdd(p, x) + x);
+       volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+       return (atomic_fetch_add(a, x) + x);
 }
 
 JEMALLOC_INLINE uint32_t
 atomic_sub_uint32(uint32_t *p, uint32_t x)
 {
+       volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+       return (atomic_fetch_sub(a, x) - x);
+}
 
-       return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+       volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+       return (!atomic_compare_exchange_strong(a, &c, s));
 }
-#elif (defined(JEMALLOC_OSATOMIC))
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+       volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+       atomic_store(a, x);
+}
+#elif (defined(JEMALLOC_ATOMIC9))
 JEMALLOC_INLINE uint32_t
 atomic_add_uint32(uint32_t *p, uint32_t x)
 {
 
-       return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
+       return (atomic_fetchadd_32(p, x) + x);
 }
 
 JEMALLOC_INLINE uint32_t
 atomic_sub_uint32(uint32_t *p, uint32_t x)
 {
 
-       return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
+       return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
 }
-#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+
+       return (!atomic_cmpset_32(p, c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+       atomic_store_rel_32(p, x);
+}
+#elif (defined(JEMALLOC_OSATOMIC))
 JEMALLOC_INLINE uint32_t
 atomic_add_uint32(uint32_t *p, uint32_t x)
 {
-       uint32_t t = x;
 
-       asm volatile (
-           "lock; xaddl %0, %1;"
-           : "+r" (t), "=m" (*p) /* Outputs. */
-           : "m" (*p) /* Inputs. */
-           );
-
-       return (t + x);
+       return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
 }
 
 JEMALLOC_INLINE uint32_t
 atomic_sub_uint32(uint32_t *p, uint32_t x)
 {
-       uint32_t t;
 
-       x = (uint32_t)(-(int32_t)x);
-       t = x;
-       asm volatile (
-           "lock; xaddl %0, %1;"
-           : "+r" (t), "=m" (*p) /* Outputs. */
-           : "m" (*p) /* Inputs. */
-           );
+       return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
+}
 
-       return (t + x);
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+
+       return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
 }
-#elif (defined(JEMALLOC_ATOMIC9))
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+       uint32_t o;
+
+       /*The documented OSAtomic*() API does not expose an atomic exchange. */
+       do {
+               o = atomic_read_uint32(p);
+       } while (atomic_cas_uint32(p, o, x));
+}
+#elif (defined(_MSC_VER))
 JEMALLOC_INLINE uint32_t
 atomic_add_uint32(uint32_t *p, uint32_t x)
 {
 
-       return (atomic_fetchadd_32(p, x) + x);
+       return (InterlockedExchangeAdd(p, x) + x);
 }
 
 JEMALLOC_INLINE uint32_t
 atomic_sub_uint32(uint32_t *p, uint32_t x)
 {
 
-       return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
+       return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+       uint32_t o;
+
+       o = InterlockedCompareExchange(p, s, c);
+       return (o != c);
 }
-#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+       InterlockedExchange(p, x);
+}
+#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
+ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
 JEMALLOC_INLINE uint32_t
 atomic_add_uint32(uint32_t *p, uint32_t x)
 {
@@ -259,10 +482,72 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
 
        return (__sync_sub_and_fetch(p, x));
 }
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+
+       return (!__sync_bool_compare_and_swap(p, c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+       __sync_lock_test_and_set(p, x);
+}
 #else
 #  error "Missing implementation for 32-bit atomic operations"
 #endif
 
+/******************************************************************************/
+/* Pointer operations. */
+JEMALLOC_INLINE void *
+atomic_add_p(void **p, void *x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+       return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
+#elif (LG_SIZEOF_PTR == 2)
+       return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
+#endif
+}
+
+JEMALLOC_INLINE void *
+atomic_sub_p(void **p, void *x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+       return ((void *)atomic_add_uint64((uint64_t *)p,
+           (uint64_t)-((int64_t)x)));
+#elif (LG_SIZEOF_PTR == 2)
+       return ((void *)atomic_add_uint32((uint32_t *)p,
+           (uint32_t)-((int32_t)x)));
+#endif
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_p(void **p, void *c, void *s)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+       return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
+#elif (LG_SIZEOF_PTR == 2)
+       return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
+#endif
+}
+
+JEMALLOC_INLINE void
+atomic_write_p(void **p, const void *x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+       atomic_write_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_PTR == 2)
+       atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
 /******************************************************************************/
 /* size_t operations. */
 JEMALLOC_INLINE size_t
@@ -289,6 +574,28 @@ atomic_sub_z(size_t *p, size_t x)
 #endif
 }
 
+JEMALLOC_INLINE bool
+atomic_cas_z(size_t *p, size_t c, size_t s)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+       return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
+#elif (LG_SIZEOF_PTR == 2)
+       return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
+#endif
+}
+
+JEMALLOC_INLINE void
+atomic_write_z(size_t *p, size_t x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+       atomic_write_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_PTR == 2)
+       atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
 /******************************************************************************/
 /* unsigned operations. */
 JEMALLOC_INLINE unsigned
@@ -314,6 +621,29 @@ atomic_sub_u(unsigned *p, unsigned x)
            (uint32_t)-((int32_t)x)));
 #endif
 }
+
+JEMALLOC_INLINE bool
+atomic_cas_u(unsigned *p, unsigned c, unsigned s)
+{
+
+#if (LG_SIZEOF_INT == 3)
+       return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
+#elif (LG_SIZEOF_INT == 2)
+       return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
+#endif
+}
+
+JEMALLOC_INLINE void
+atomic_write_u(unsigned *p, unsigned x)
+{
+
+#if (LG_SIZEOF_INT == 3)
+       atomic_write_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_INT == 2)
+       atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
 /******************************************************************************/
 #endif
 
index 3fb80b92a9874040d3c4cbb2ca32367ab9717dab..39e46ee445d378a20b2731b6320790a3c5eb009e 100644 (file)
@@ -10,9 +10,7 @@
 #ifdef JEMALLOC_H_EXTERNS
 
 void   *base_alloc(size_t size);
-void   *base_calloc(size_t number, size_t size);
-extent_node_t *base_node_alloc(void);
-void   base_node_dalloc(extent_node_t *node);
+void   base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
 bool   base_boot(void);
 void   base_prefork(void);
 void   base_postfork_parent(void);
index fcc6005c795be71a7480f48953f44cdec2eefb18..2594e3a49259d5f00b73ceede205dcdca0e56e9f 100644 (file)
@@ -15,6 +15,15 @@ typedef unsigned long bitmap_t;
 #define        BITMAP_GROUP_NBITS              (ZU(1) << LG_BITMAP_GROUP_NBITS)
 #define        BITMAP_GROUP_NBITS_MASK         (BITMAP_GROUP_NBITS-1)
 
+/*
+ * Do some analysis on how big the bitmap is before we use a tree.  For a brute
+ * force linear search, if we would have to call ffsl more than 2^3 times, use a
+ * tree instead.
+ */
+#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
+#  define USE_TREE
+#endif
+
 /* Number of groups required to store a given number of bits. */
 #define        BITMAP_BITS2GROUPS(nbits)                                       \
     ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
@@ -48,6 +57,8 @@ typedef unsigned long bitmap_t;
 /*
  * Maximum number of groups required to support LG_BITMAP_MAXBITS.
  */
+#ifdef USE_TREE
+
 #if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
 #  define BITMAP_GROUPS_MAX    BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
 #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
@@ -65,6 +76,12 @@ typedef unsigned long bitmap_t;
     (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP)                             \
     + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
 
+#else /* USE_TREE */
+
+#define        BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
+
+#endif /* USE_TREE */
+
 #endif /* JEMALLOC_H_TYPES */
 /******************************************************************************/
 #ifdef JEMALLOC_H_STRUCTS
@@ -78,6 +95,7 @@ struct bitmap_info_s {
        /* Logical number of bits in bitmap (stored at bottom level). */
        size_t nbits;
 
+#ifdef USE_TREE
        /* Number of levels necessary for nbits. */
        unsigned nlevels;
 
@@ -86,6 +104,10 @@ struct bitmap_info_s {
         * bottom to top (e.g. the bottom level is stored in levels[0]).
         */
        bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
+#else /* USE_TREE */
+       /* Number of groups necessary for nbits. */
+       size_t ngroups;
+#endif /* USE_TREE */
 };
 
 #endif /* JEMALLOC_H_STRUCTS */
@@ -93,9 +115,8 @@ struct bitmap_info_s {
 #ifdef JEMALLOC_H_EXTERNS
 
 void   bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
-size_t bitmap_info_ngroups(const bitmap_info_t *binfo);
-size_t bitmap_size(size_t nbits);
 void   bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
+size_t bitmap_size(const bitmap_info_t *binfo);
 
 #endif /* JEMALLOC_H_EXTERNS */
 /******************************************************************************/
@@ -113,10 +134,20 @@ void      bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
 JEMALLOC_INLINE bool
 bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
 {
-       unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
+#ifdef USE_TREE
+       size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
        bitmap_t rg = bitmap[rgoff];
        /* The bitmap is full iff the root group is 0. */
        return (rg == 0);
+#else
+       size_t i;
+
+       for (i = 0; i < binfo->ngroups; i++) {
+               if (bitmap[i] != 0)
+                       return (false);
+       }
+       return (true);
+#endif
 }
 
 JEMALLOC_INLINE bool
@@ -128,7 +159,7 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
        assert(bit < binfo->nbits);
        goff = bit >> LG_BITMAP_GROUP_NBITS;
        g = bitmap[goff];
-       return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
+       return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
 }
 
 JEMALLOC_INLINE void
@@ -143,10 +174,11 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
        goff = bit >> LG_BITMAP_GROUP_NBITS;
        gp = &bitmap[goff];
        g = *gp;
-       assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
-       g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+       assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
+       g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
        *gp = g;
        assert(bitmap_get(bitmap, binfo, bit));
+#ifdef USE_TREE
        /* Propagate group state transitions up the tree. */
        if (g == 0) {
                unsigned i;
@@ -155,13 +187,14 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
                        goff = bit >> LG_BITMAP_GROUP_NBITS;
                        gp = &bitmap[binfo->levels[i].group_offset + goff];
                        g = *gp;
-                       assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
-                       g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+                       assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
+                       g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
                        *gp = g;
                        if (g != 0)
                                break;
                }
        }
+#endif
 }
 
 /* sfu: set first unset. */
@@ -174,15 +207,24 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
 
        assert(!bitmap_full(bitmap, binfo));
 
+#ifdef USE_TREE
        i = binfo->nlevels - 1;
        g = bitmap[binfo->levels[i].group_offset];
-       bit = jemalloc_ffsl(g) - 1;
+       bit = ffs_lu(g) - 1;
        while (i > 0) {
                i--;
                g = bitmap[binfo->levels[i].group_offset + bit];
-               bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1);
+               bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
        }
-
+#else
+       i = 0;
+       g = bitmap[0];
+       while ((bit = ffs_lu(g)) == 0) {
+               i++;
+               g = bitmap[i];
+       }
+       bit = (bit - 1) + (i << 6);
+#endif
        bitmap_set(bitmap, binfo, bit);
        return (bit);
 }
@@ -193,7 +235,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
        size_t goff;
        bitmap_t *gp;
        bitmap_t g;
-       bool propagate;
+       UNUSED bool propagate;
 
        assert(bit < binfo->nbits);
        assert(bitmap_get(bitmap, binfo, bit));
@@ -201,10 +243,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
        gp = &bitmap[goff];
        g = *gp;
        propagate = (g == 0);
-       assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
-       g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+       assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
+       g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
        *gp = g;
        assert(!bitmap_get(bitmap, binfo, bit));
+#ifdef USE_TREE
        /* Propagate group state transitions up the tree. */
        if (propagate) {
                unsigned i;
@@ -214,14 +257,15 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
                        gp = &bitmap[binfo->levels[i].group_offset + goff];
                        g = *gp;
                        propagate = (g == 0);
-                       assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
+                       assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
                            == 0);
-                       g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+                       g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
                        *gp = g;
                        if (!propagate)
                                break;
                }
        }
+#endif /* USE_TREE */
 }
 
 #endif
index 2e68a020296ec43a02783b7048853a7f5bb83147..5d19383530334419fb24cb8332e55044d84dc82d 100644 (file)
@@ -5,7 +5,7 @@
  * Size and alignment of memory chunks that are allocated by the OS's virtual
  * memory system.
  */
-#define        LG_CHUNK_DEFAULT        22
+#define        LG_CHUNK_DEFAULT        21
 
 /* Return the chunk address for allocation address a. */
 #define        CHUNK_ADDR2BASE(a)                                              \
 #define        CHUNK_CEILING(s)                                                \
        (((s) + chunksize_mask) & ~chunksize_mask)
 
+#define        CHUNK_HOOKS_INITIALIZER {                                       \
+    NULL,                                                              \
+    NULL,                                                              \
+    NULL,                                                              \
+    NULL,                                                              \
+    NULL,                                                              \
+    NULL,                                                              \
+    NULL                                                               \
+}
+
 #endif /* JEMALLOC_H_TYPES */
 /******************************************************************************/
 #ifdef JEMALLOC_H_STRUCTS
 extern size_t          opt_lg_chunk;
 extern const char      *opt_dss;
 
-/* Protects stats_chunks; currently not used for any other purpose. */
-extern malloc_mutex_t  chunks_mtx;
-/* Chunk statistics. */
-extern chunk_stats_t   stats_chunks;
-
-extern rtree_t         *chunks_rtree;
+extern rtree_t         chunks_rtree;
 
 extern size_t          chunksize;
 extern size_t          chunksize_mask; /* (chunksize - 1). */
 extern size_t          chunk_npages;
-extern size_t          map_bias; /* Number of arena chunk header pages. */
-extern size_t          map_misc_offset;
-extern size_t          arena_maxclass; /* Max size class for arenas. */
 
+extern const chunk_hooks_t     chunk_hooks_default;
+
+chunk_hooks_t  chunk_hooks_get(arena_t *arena);
+chunk_hooks_t  chunk_hooks_set(arena_t *arena,
+    const chunk_hooks_t *chunk_hooks);
+
+bool   chunk_register(const void *chunk, const extent_node_t *node);
+void   chunk_deregister(const void *chunk, const extent_node_t *node);
 void   *chunk_alloc_base(size_t size);
-void   *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
-    chunk_dalloc_t *chunk_dalloc, unsigned arena_ind, void *new_addr,
-    size_t size, size_t alignment, bool *zero);
-void   *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
-    bool *zero, unsigned arena_ind);
-void   chunk_unmap(void *chunk, size_t size);
-bool   chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
+void   *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    void *new_addr, size_t size, size_t alignment, bool *zero,
+    bool dalloc_node);
+void   *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
+void   chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    void *chunk, size_t size, bool committed);
+void   chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    void *chunk, size_t size, bool zeroed, bool committed);
+void   chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    void *chunk, size_t size, bool committed);
+bool   chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
+    size_t length);
+bool   chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    void *chunk, size_t size, size_t offset, size_t length);
 bool   chunk_boot(void);
 void   chunk_prefork(void);
 void   chunk_postfork_parent(void);
@@ -61,6 +79,19 @@ void chunk_postfork_child(void);
 /******************************************************************************/
 #ifdef JEMALLOC_H_INLINES
 
+#ifndef JEMALLOC_ENABLE_INLINE
+extent_node_t  *chunk_lookup(const void *chunk, bool dependent);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
+JEMALLOC_INLINE extent_node_t *
+chunk_lookup(const void *ptr, bool dependent)
+{
+
+       return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
+}
+#endif
+
 #endif /* JEMALLOC_H_INLINES */
 /******************************************************************************/
 
index 4535ce09c09a822b2cb74bc1b13c5b24feb5c5d3..388f46be0801be97ff341e75d939383ad13eae3a 100644 (file)
@@ -23,7 +23,8 @@ extern const char *dss_prec_names[];
 
 dss_prec_t     chunk_dss_prec_get(void);
 bool   chunk_dss_prec_set(dss_prec_t dss_prec);
-void   *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
+void   *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
+    size_t alignment, bool *zero, bool *commit);
 bool   chunk_in_dss(void *chunk);
 bool   chunk_dss_boot(void);
 void   chunk_dss_prefork(void);
index c5d5c6c0c7ac41fa42bb1766a21e959a3d1db84f..6f2d0ac2ed37312acdb43e3520f801ce0efb34fe 100644 (file)
@@ -9,9 +9,8 @@
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-bool   pages_purge(void *addr, size_t length);
-
-void   *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
+void   *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
+    bool *zero, bool *commit);
 bool   chunk_dalloc_mmap(void *chunk, size_t size);
 
 #endif /* JEMALLOC_H_EXTERNS */
index 75c1c979f2704c1acb985f5ee16366dadd254314..f75ad90b73ccdf2a431ed69e744ae78c3cf7048d 100644 (file)
@@ -40,9 +40,7 @@ struct ckh_s {
 #endif
 
        /* Used for pseudo-random number generation. */
-#define        CKH_A           1103515241
-#define        CKH_C           12347
-       uint32_t        prng_state;
+       uint64_t        prng_state;
 
        /* Total number of items. */
        size_t          count;
@@ -74,7 +72,7 @@ bool  ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
 bool   ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
 bool   ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
     void **data);
-bool   ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
+bool   ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
 void   ckh_string_hash(const void *key, size_t r_hash[2]);
 bool   ckh_string_keycomp(const void *k1, const void *k2);
 void   ckh_pointer_hash(const void *key, size_t r_hash[2]);
index 2d301bf1786cfb897b349a27f72e81d68ad2c16c..9c5e9328bd7d868f78bf75b4a33db1c9e24a457b 100644 (file)
@@ -34,8 +34,13 @@ struct ctl_arena_stats_s {
        bool                    initialized;
        unsigned                nthreads;
        const char              *dss;
+       ssize_t                 lg_dirty_mult;
+       ssize_t                 decay_time;
        size_t                  pactive;
        size_t                  pdirty;
+
+       /* The remainder are only populated if config_stats is true. */
+
        arena_stats_t           astats;
 
        /* Aggregate stats for small size classes, based on bin stats. */
@@ -46,17 +51,15 @@ struct ctl_arena_stats_s {
 
        malloc_bin_stats_t      bstats[NBINS];
        malloc_large_stats_t    *lstats;        /* nlclasses elements. */
+       malloc_huge_stats_t     *hstats;        /* nhclasses elements. */
 };
 
 struct ctl_stats_s {
        size_t                  allocated;
        size_t                  active;
+       size_t                  metadata;
+       size_t                  resident;
        size_t                  mapped;
-       struct {
-               size_t          current;        /* stats_chunks.curchunks */
-               uint64_t        total;          /* stats_chunks.nchunks */
-               size_t          high;           /* stats_chunks.highchunks */
-       } chunks;
        unsigned                narenas;
        ctl_arena_stats_t       *arenas;        /* (narenas + 1) elements. */
 };
index 5b00076f26d2c644f836424eef4716d856d003fa..386d50ef4cd5eda34cfb0bb232c8484163c7eeee 100644 (file)
@@ -7,28 +7,53 @@ typedef struct extent_node_s extent_node_t;
 /******************************************************************************/
 #ifdef JEMALLOC_H_STRUCTS
 
-/* Tree of extents. */
+/* Tree of extents.  Use accessor functions for en_* fields. */
 struct extent_node_s {
-       /* Linkage for the size/address-ordered tree. */
-       rb_node(extent_node_t)  link_szad;
+       /* Arena from which this extent came, if any. */
+       arena_t                 *en_arena;
 
-       /* Linkage for the address-ordered tree. */
-       rb_node(extent_node_t)  link_ad;
+       /* Pointer to the extent that this tree node is responsible for. */
+       void                    *en_addr;
+
+       /* Total region size. */
+       size_t                  en_size;
+
+       /*
+        * The zeroed flag is used by chunk recycling code to track whether
+        * memory is zero-filled.
+        */
+       bool                    en_zeroed;
+
+       /*
+        * True if physical memory is committed to the extent, whether
+        * explicitly or implicitly as on a system that overcommits and
+        * satisfies physical memory needs on demand via soft page faults.
+        */
+       bool                    en_committed;
+
+       /*
+        * The achunk flag is used to validate that huge allocation lookups
+        * don't return arena chunks.
+        */
+       bool                    en_achunk;
 
        /* Profile counters, used for huge objects. */
-       prof_tctx_t             *prof_tctx;
+       prof_tctx_t             *en_prof_tctx;
 
-       /* Pointer to the extent that this tree node is responsible for. */
-       void                    *addr;
+       /* Linkage for arena's runs_dirty and chunks_cache rings. */
+       arena_runs_dirty_link_t rd;
+       qr(extent_node_t)       cc_link;
 
-       /* Total region size. */
-       size_t                  size;
+       union {
+               /* Linkage for the size/address-ordered tree. */
+               rb_node(extent_node_t)  szad_link;
 
-       /* Arena from which this extent came, if any */
-       arena_t                 *arena;
+               /* Linkage for arena's huge and node_cache lists. */
+               ql_elm(extent_node_t)   ql_link;
+       };
 
-       /* True if zero-filled; used by chunk recycling code. */
-       bool                    zeroed;
+       /* Linkage for the address-ordered tree. */
+       rb_node(extent_node_t)  ad_link;
 };
 typedef rb_tree(extent_node_t) extent_tree_t;
 
@@ -44,6 +69,171 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
 /******************************************************************************/
 #ifdef JEMALLOC_H_INLINES
 
+#ifndef JEMALLOC_ENABLE_INLINE
+arena_t        *extent_node_arena_get(const extent_node_t *node);
+void   *extent_node_addr_get(const extent_node_t *node);
+size_t extent_node_size_get(const extent_node_t *node);
+bool   extent_node_zeroed_get(const extent_node_t *node);
+bool   extent_node_committed_get(const extent_node_t *node);
+bool   extent_node_achunk_get(const extent_node_t *node);
+prof_tctx_t    *extent_node_prof_tctx_get(const extent_node_t *node);
+void   extent_node_arena_set(extent_node_t *node, arena_t *arena);
+void   extent_node_addr_set(extent_node_t *node, void *addr);
+void   extent_node_size_set(extent_node_t *node, size_t size);
+void   extent_node_zeroed_set(extent_node_t *node, bool zeroed);
+void   extent_node_committed_set(extent_node_t *node, bool committed);
+void   extent_node_achunk_set(extent_node_t *node, bool achunk);
+void   extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
+void   extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
+    size_t size, bool zeroed, bool committed);
+void   extent_node_dirty_linkage_init(extent_node_t *node);
+void   extent_node_dirty_insert(extent_node_t *node,
+    arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
+void   extent_node_dirty_remove(extent_node_t *node);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
+JEMALLOC_INLINE arena_t *
+extent_node_arena_get(const extent_node_t *node)
+{
+
+       return (node->en_arena);
+}
+
+JEMALLOC_INLINE void *
+extent_node_addr_get(const extent_node_t *node)
+{
+
+       return (node->en_addr);
+}
+
+JEMALLOC_INLINE size_t
+extent_node_size_get(const extent_node_t *node)
+{
+
+       return (node->en_size);
+}
+
+JEMALLOC_INLINE bool
+extent_node_zeroed_get(const extent_node_t *node)
+{
+
+       return (node->en_zeroed);
+}
+
+JEMALLOC_INLINE bool
+extent_node_committed_get(const extent_node_t *node)
+{
+
+       assert(!node->en_achunk);
+       return (node->en_committed);
+}
+
+JEMALLOC_INLINE bool
+extent_node_achunk_get(const extent_node_t *node)
+{
+
+       return (node->en_achunk);
+}
+
+JEMALLOC_INLINE prof_tctx_t *
+extent_node_prof_tctx_get(const extent_node_t *node)
+{
+
+       return (node->en_prof_tctx);
+}
+
+JEMALLOC_INLINE void
+extent_node_arena_set(extent_node_t *node, arena_t *arena)
+{
+
+       node->en_arena = arena;
+}
+
+JEMALLOC_INLINE void
+extent_node_addr_set(extent_node_t *node, void *addr)
+{
+
+       node->en_addr = addr;
+}
+
+JEMALLOC_INLINE void
+extent_node_size_set(extent_node_t *node, size_t size)
+{
+
+       node->en_size = size;
+}
+
+JEMALLOC_INLINE void
+extent_node_zeroed_set(extent_node_t *node, bool zeroed)
+{
+
+       node->en_zeroed = zeroed;
+}
+
+JEMALLOC_INLINE void
+extent_node_committed_set(extent_node_t *node, bool committed)
+{
+
+       node->en_committed = committed;
+}
+
+JEMALLOC_INLINE void
+extent_node_achunk_set(extent_node_t *node, bool achunk)
+{
+
+       node->en_achunk = achunk;
+}
+
+JEMALLOC_INLINE void
+extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
+{
+
+       node->en_prof_tctx = tctx;
+}
+
+JEMALLOC_INLINE void
+extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
+    bool zeroed, bool committed)
+{
+
+       extent_node_arena_set(node, arena);
+       extent_node_addr_set(node, addr);
+       extent_node_size_set(node, size);
+       extent_node_zeroed_set(node, zeroed);
+       extent_node_committed_set(node, committed);
+       extent_node_achunk_set(node, false);
+       if (config_prof)
+               extent_node_prof_tctx_set(node, NULL);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_linkage_init(extent_node_t *node)
+{
+
+       qr_new(&node->rd, rd_link);
+       qr_new(node, cc_link);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_insert(extent_node_t *node,
+    arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
+{
+
+       qr_meld(runs_dirty, &node->rd, rd_link);
+       qr_meld(chunks_dirty, node, cc_link);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_remove(extent_node_t *node)
+{
+
+       qr_remove(&node->rd, rd_link);
+       qr_remove(node, cc_link);
+}
+
+#endif
+
 #endif /* JEMALLOC_H_INLINES */
 /******************************************************************************/
 
index a43bbbeccd4615ab96b00ddcf23276db76e2eb35..864fda81710805f79bd36e2d117ae8049b79c1e2 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * The following hash function is based on MurmurHash3, placed into the public
- * domain by Austin Appleby.  See http://code.google.com/p/smhasher/ for
+ * domain by Austin Appleby.  See https://github.com/aappleby/smhasher for
  * details.
  */
 /******************************************************************************/
@@ -35,19 +35,28 @@ JEMALLOC_INLINE uint32_t
 hash_rotl_32(uint32_t x, int8_t r)
 {
 
-       return (x << r) | (x >> (32 - r));
+       return ((x << r) | (x >> (32 - r)));
 }
 
 JEMALLOC_INLINE uint64_t
 hash_rotl_64(uint64_t x, int8_t r)
 {
-       return (x << r) | (x >> (64 - r));
+
+       return ((x << r) | (x >> (64 - r)));
 }
 
 JEMALLOC_INLINE uint32_t
 hash_get_block_32(const uint32_t *p, int i)
 {
 
+       /* Handle unaligned read. */
+       if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
+               uint32_t ret;
+
+               memcpy(&ret, &p[i], sizeof(uint32_t));
+               return (ret);
+       }
+
        return (p[i]);
 }
 
@@ -55,6 +64,14 @@ JEMALLOC_INLINE uint64_t
 hash_get_block_64(const uint64_t *p, int i)
 {
 
+       /* Handle unaligned read. */
+       if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
+               uint64_t ret;
+
+               memcpy(&ret, &p[i], sizeof(uint64_t));
+               return (ret);
+       }
+
        return (p[i]);
 }
 
@@ -320,13 +337,18 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
 JEMALLOC_INLINE void
 hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
 {
+
+       assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
+
 #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
-       hash_x64_128(key, len, seed, (uint64_t *)r_hash);
+       hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
 #else
-       uint64_t hashes[2];
-       hash_x86_128(key, len, seed, hashes);
-       r_hash[0] = (size_t)hashes[0];
-       r_hash[1] = (size_t)hashes[1];
+       {
+               uint64_t hashes[2];
+               hash_x86_128(key, (int)len, seed, hashes);
+               r_hash[0] = (size_t)hashes[0];
+               r_hash[1] = (size_t)hashes[1];
+       }
 #endif
 }
 #endif
index 00d8c09dd8b9ed22ad137c5e7d5a59845feeacb9..cb6f69e63593ade3a7ab0f8e5aa79f8c414dc601 100644 (file)
@@ -9,26 +9,24 @@
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-void   *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero);
-void   *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
-    bool zero);
-bool   huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
-    size_t extra, bool zero);
+void   *huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
+    tcache_t *tcache);
+void   *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+    bool zero, tcache_t *tcache);
+bool   huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize,
+    size_t usize_min, size_t usize_max, bool zero);
 void   *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
-    size_t size, size_t extra, size_t alignment, bool zero,
-    bool try_tcache_dalloc);
+    size_t usize, size_t alignment, bool zero, tcache_t *tcache);
 #ifdef JEMALLOC_JET
 typedef void (huge_dalloc_junk_t)(void *, size_t);
 extern huge_dalloc_junk_t *huge_dalloc_junk;
 #endif
-void   huge_dalloc(void *ptr);
+void   huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+arena_t        *huge_aalloc(const void *ptr);
 size_t huge_salloc(const void *ptr);
 prof_tctx_t    *huge_prof_tctx_get(const void *ptr);
 void   huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
-bool   huge_boot(void);
-void   huge_prefork(void);
-void   huge_postfork_parent(void);
-void   huge_postfork_child(void);
+void   huge_prof_tctx_reset(const void *ptr);
 
 #endif /* JEMALLOC_H_EXTERNS */
 /******************************************************************************/
index a169221bb39ef1fdc096bdc76859129f51bd4c90..3f54391f05f8b2a0077ee41bcd62bd3655a17a3c 100644 (file)
@@ -49,6 +49,7 @@ static const bool config_lazy_lock =
     false
 #endif
     ;
+static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
 static const bool config_prof =
 #ifdef JEMALLOC_PROF
     true
@@ -70,6 +71,13 @@ static const bool config_prof_libunwind =
     false
 #endif
     ;
+static const bool maps_coalesce =
+#ifdef JEMALLOC_MAPS_COALESCE
+    true
+#else
+    false
+#endif
+    ;
 static const bool config_munmap =
 #ifdef JEMALLOC_MUNMAP
     true
@@ -126,6 +134,17 @@ static const bool config_ivsalloc =
     false
 #endif
     ;
+static const bool config_cache_oblivious =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+    true
+#else
+    false
+#endif
+    ;
+
+#ifdef JEMALLOC_C11ATOMICS
+#include <stdatomic.h>
+#endif
 
 #ifdef JEMALLOC_ATOMIC9
 #include <machine/atomic.h>
@@ -165,7 +184,24 @@ static const bool config_ivsalloc =
 
 #include "jemalloc/internal/jemalloc_internal_macros.h"
 
-#define        MALLOCX_ARENA_MASK      ((int)~0xff)
+/* Size class index type. */
+typedef unsigned szind_t;
+
+/*
+ * Flags bits:
+ *
+ * a: arena
+ * t: tcache
+ * 0: unused
+ * z: zero
+ * n: alignment
+ *
+ * aaaaaaaa aaaatttt tttttttt 0znnnnnn
+ */
+#define        MALLOCX_ARENA_MASK      ((int)~0xfffff)
+#define        MALLOCX_ARENA_MAX       0xffe
+#define        MALLOCX_TCACHE_MASK     ((int)~0xfff000ffU)
+#define        MALLOCX_TCACHE_MAX      0xffd
 #define        MALLOCX_LG_ALIGN_MASK   ((int)0x3f)
 /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
 #define        MALLOCX_ALIGN_GET_SPECIFIED(flags)                              \
@@ -174,15 +210,17 @@ static const bool config_ivsalloc =
     (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
 #define        MALLOCX_ZERO_GET(flags)                                         \
     ((bool)(flags & MALLOCX_ZERO))
+
+#define        MALLOCX_TCACHE_GET(flags)                                       \
+    (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
 #define        MALLOCX_ARENA_GET(flags)                                        \
-    (((unsigned)(flags >> 8)) - 1)
+    (((unsigned)(((unsigned)flags) >> 20)) - 1)
 
 /* Smallest size class to support. */
-#define        LG_TINY_MIN             3
 #define        TINY_MIN                (1U << LG_TINY_MIN)
 
 /*
- * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
  * classes).
  */
 #ifndef LG_QUANTUM
@@ -195,7 +233,7 @@ static const bool config_ivsalloc =
 #  ifdef __alpha__
 #    define LG_QUANTUM         4
 #  endif
-#  ifdef __sparc64__
+#  if (defined(__sparc64__) || defined(__sparcv9))
 #    define LG_QUANTUM         4
 #  endif
 #  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
@@ -232,7 +270,8 @@ static const bool config_ivsalloc =
 #    define LG_QUANTUM         4
 #  endif
 #  ifndef LG_QUANTUM
-#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
+#    error "Unknown minimum alignment for architecture; specify via "
+        "--with-lg-quantum"
 #  endif
 #endif
 
@@ -272,14 +311,17 @@ static const bool config_ivsalloc =
 #define        CACHELINE_CEILING(s)                                            \
        (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
 
-/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
+/* Page size.  LG_PAGE is determined by the configure script. */
 #ifdef PAGE_MASK
 #  undef PAGE_MASK
 #endif
-#define        LG_PAGE         STATIC_PAGE_SHIFT
-#define        PAGE            ((size_t)(1U << STATIC_PAGE_SHIFT))
+#define        PAGE            ((size_t)(1U << LG_PAGE))
 #define        PAGE_MASK       ((size_t)(PAGE - 1))
 
+/* Return the page base address for the page containing address a. */
+#define        PAGE_ADDR2BASE(a)                                               \
+       ((void *)((uintptr_t)(a) & ~PAGE_MASK))
+
 /* Return the smallest pagesize multiple that is >= s. */
 #define        PAGE_CEILING(s)                                                 \
        (((s) + PAGE_MASK) & ~PAGE_MASK)
@@ -296,7 +338,7 @@ static const bool config_ivsalloc =
 #define        ALIGNMENT_CEILING(s, alignment)                                 \
        (((s) + (alignment - 1)) & (-(alignment)))
 
-/* Declare a variable length array */
+/* Declare a variable-length array. */
 #if __STDC_VERSION__ < 199901L
 #  ifdef _MSC_VER
 #    include <malloc.h>
@@ -314,12 +356,15 @@ static const bool config_ivsalloc =
 #  define VARIABLE_ARRAY(type, name, count) type name[(count)]
 #endif
 
+#include "jemalloc/internal/nstime.h"
 #include "jemalloc/internal/valgrind.h"
 #include "jemalloc/internal/util.h"
 #include "jemalloc/internal/atomic.h"
 #include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ticker.h"
 #include "jemalloc/internal/ckh.h"
 #include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/smoothstep.h"
 #include "jemalloc/internal/stats.h"
 #include "jemalloc/internal/ctl.h"
 #include "jemalloc/internal/mutex.h"
@@ -329,9 +374,10 @@ static const bool config_ivsalloc =
 #include "jemalloc/internal/arena.h"
 #include "jemalloc/internal/bitmap.h"
 #include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
 #include "jemalloc/internal/chunk.h"
 #include "jemalloc/internal/huge.h"
-#include "jemalloc/internal/rtree.h"
 #include "jemalloc/internal/tcache.h"
 #include "jemalloc/internal/hash.h"
 #include "jemalloc/internal/quarantine.h"
@@ -341,23 +387,32 @@ static const bool config_ivsalloc =
 /******************************************************************************/
 #define        JEMALLOC_H_STRUCTS
 
+#include "jemalloc/internal/nstime.h"
 #include "jemalloc/internal/valgrind.h"
 #include "jemalloc/internal/util.h"
 #include "jemalloc/internal/atomic.h"
 #include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ticker.h"
 #include "jemalloc/internal/ckh.h"
 #include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/smoothstep.h"
 #include "jemalloc/internal/stats.h"
 #include "jemalloc/internal/ctl.h"
 #include "jemalloc/internal/mutex.h"
 #include "jemalloc/internal/mb.h"
 #include "jemalloc/internal/bitmap.h"
+#define        JEMALLOC_ARENA_STRUCTS_A
+#include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_STRUCTS_A
 #include "jemalloc/internal/extent.h"
+#define        JEMALLOC_ARENA_STRUCTS_B
 #include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_STRUCTS_B
 #include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
 #include "jemalloc/internal/chunk.h"
 #include "jemalloc/internal/huge.h"
-#include "jemalloc/internal/rtree.h"
 #include "jemalloc/internal/tcache.h"
 #include "jemalloc/internal/hash.h"
 #include "jemalloc/internal/quarantine.h"
@@ -370,48 +425,69 @@ static const bool config_ivsalloc =
 #define        JEMALLOC_H_EXTERNS
 
 extern bool    opt_abort;
-extern bool    opt_junk;
+extern const char      *opt_junk;
+extern bool    opt_junk_alloc;
+extern bool    opt_junk_free;
 extern size_t  opt_quarantine;
 extern bool    opt_redzone;
 extern bool    opt_utrace;
 extern bool    opt_xmalloc;
 extern bool    opt_zero;
-extern size_t  opt_narenas;
+extern unsigned        opt_narenas;
 
 extern bool    in_valgrind;
 
 /* Number of CPUs. */
-extern unsigned                ncpus;
+extern unsigned        ncpus;
 
-/* Protects arenas initialization (arenas, arenas_total). */
-extern malloc_mutex_t  arenas_lock;
 /*
  * Arenas that are used to service external requests.  Not all elements of the
  * arenas array are necessarily used; arenas are created lazily as needed.
- *
- * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
- * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
- * takes some action to create them and allocate from them.
  */
-extern arena_t         **arenas;
-extern unsigned                narenas_total;
-extern unsigned                narenas_auto; /* Read-only after initialization. */
+extern arena_t **arenas;
+
+/*
+ * index2size_tab encodes the same information as could be computed (at
+ * unacceptable cost in some code paths) by index2size_compute().
+ */
+extern size_t const    index2size_tab[NSIZES+1];
+/*
+ * size2index_tab is a compact lookup table that rounds request sizes up to
+ * size classes.  In order to reduce cache footprint, the table is compressed,
+ * and all accesses are via size2index().
+ */
+extern uint8_t const   size2index_tab[];
 
+void   *a0malloc(size_t size);
+void   a0dalloc(void *ptr);
+void   *bootstrap_malloc(size_t size);
+void   *bootstrap_calloc(size_t num, size_t size);
+void   bootstrap_free(void *ptr);
 arena_t        *arenas_extend(unsigned ind);
-arena_t        *choose_arena_hard(tsd_t *tsd);
+unsigned       narenas_total_get(void);
+arena_t        *arena_init(unsigned ind);
+arena_tdata_t  *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
+arena_t        *arena_choose_hard(tsd_t *tsd);
+void   arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
 void   thread_allocated_cleanup(tsd_t *tsd);
 void   thread_deallocated_cleanup(tsd_t *tsd);
 void   arena_cleanup(tsd_t *tsd);
+void   arenas_tdata_cleanup(tsd_t *tsd);
+void   narenas_tdata_cleanup(tsd_t *tsd);
+void   arenas_tdata_bypass_cleanup(tsd_t *tsd);
 void   jemalloc_prefork(void);
 void   jemalloc_postfork_parent(void);
 void   jemalloc_postfork_child(void);
 
+#include "jemalloc/internal/nstime.h"
 #include "jemalloc/internal/valgrind.h"
 #include "jemalloc/internal/util.h"
 #include "jemalloc/internal/atomic.h"
 #include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ticker.h"
 #include "jemalloc/internal/ckh.h"
 #include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/smoothstep.h"
 #include "jemalloc/internal/stats.h"
 #include "jemalloc/internal/ctl.h"
 #include "jemalloc/internal/mutex.h"
@@ -420,9 +496,10 @@ void       jemalloc_postfork_child(void);
 #include "jemalloc/internal/extent.h"
 #include "jemalloc/internal/arena.h"
 #include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
 #include "jemalloc/internal/chunk.h"
 #include "jemalloc/internal/huge.h"
-#include "jemalloc/internal/rtree.h"
 #include "jemalloc/internal/tcache.h"
 #include "jemalloc/internal/hash.h"
 #include "jemalloc/internal/quarantine.h"
@@ -433,12 +510,15 @@ void      jemalloc_postfork_child(void);
 /******************************************************************************/
 #define        JEMALLOC_H_INLINES
 
+#include "jemalloc/internal/nstime.h"
 #include "jemalloc/internal/valgrind.h"
 #include "jemalloc/internal/util.h"
 #include "jemalloc/internal/atomic.h"
 #include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ticker.h"
 #include "jemalloc/internal/ckh.h"
 #include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/smoothstep.h"
 #include "jemalloc/internal/stats.h"
 #include "jemalloc/internal/ctl.h"
 #include "jemalloc/internal/mutex.h"
@@ -446,25 +526,160 @@ void     jemalloc_postfork_child(void);
 #include "jemalloc/internal/mb.h"
 #include "jemalloc/internal/extent.h"
 #include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
 #include "jemalloc/internal/chunk.h"
 #include "jemalloc/internal/huge.h"
 
-/*
- * Include arena.h the first time in order to provide inline functions for this
- * header's inlines.
- */
-#define        JEMALLOC_ARENA_INLINE_A
-#include "jemalloc/internal/arena.h"
-#undef JEMALLOC_ARENA_INLINE_A
-
 #ifndef JEMALLOC_ENABLE_INLINE
+szind_t        size2index_compute(size_t size);
+szind_t        size2index_lookup(size_t size);
+szind_t        size2index(size_t size);
+size_t index2size_compute(szind_t index);
+size_t index2size_lookup(szind_t index);
+size_t index2size(szind_t index);
+size_t s2u_compute(size_t size);
+size_t s2u_lookup(size_t size);
 size_t s2u(size_t size);
 size_t sa2u(size_t size, size_t alignment);
-unsigned       narenas_total_get(void);
-arena_t        *choose_arena(tsd_t *tsd, arena_t *arena);
+arena_t        *arena_choose(tsd_t *tsd, arena_t *arena);
+arena_tdata_t  *arena_tdata_get(tsd_t *tsd, unsigned ind,
+    bool refresh_if_missing);
+arena_t        *arena_get(unsigned ind, bool init_if_missing);
+ticker_t       *decay_ticker_get(tsd_t *tsd, unsigned ind);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
+JEMALLOC_INLINE szind_t
+size2index_compute(size_t size)
+{
+
+#if (NTBINS != 0)
+       if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+               szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+               szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+               return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
+       }
+#endif
+       {
+               szind_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
+                   (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
+                   : lg_floor((size<<1)-1);
+               szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
+                   x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
+               szind_t grp = shift << LG_SIZE_CLASS_GROUP;
+
+               szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+                   ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+
+               size_t delta_inverse_mask = ZI(-1) << lg_delta;
+               szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
+                   ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+               szind_t index = NTBINS + grp + mod;
+               return (index);
+       }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+size2index_lookup(size_t size)
+{
+
+       assert(size <= LOOKUP_MAXCLASS);
+       {
+               szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
+               assert(ret == size2index_compute(size));
+               return (ret);
+       }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+size2index(size_t size)
+{
+
+       assert(size > 0);
+       if (likely(size <= LOOKUP_MAXCLASS))
+               return (size2index_lookup(size));
+       return (size2index_compute(size));
+}
+
+JEMALLOC_INLINE size_t
+index2size_compute(szind_t index)
+{
+
+#if (NTBINS > 0)
+       if (index < NTBINS)
+               return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
+#endif
+       {
+               size_t reduced_index = index - NTBINS;
+               size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
+               size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
+                   1);
+
+               size_t grp_size_mask = ~((!!grp)-1);
+               size_t grp_size = ((ZU(1) << (LG_QUANTUM +
+                   (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
+
+               size_t shift = (grp == 0) ? 1 : grp;
+               size_t lg_delta = shift + (LG_QUANTUM-1);
+               size_t mod_size = (mod+1) << lg_delta;
+
+               size_t usize = grp_size + mod_size;
+               return (usize);
+       }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+index2size_lookup(szind_t index)
+{
+       size_t ret = (size_t)index2size_tab[index];
+       assert(ret == index2size_compute(index));
+       return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+index2size(szind_t index)
+{
+
+       assert(index < NSIZES);
+       return (index2size_lookup(index));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+s2u_compute(size_t size)
+{
+
+#if (NTBINS > 0)
+       if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+               size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+               size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+               return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
+                   (ZU(1) << lg_ceil));
+       }
+#endif
+       {
+               size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
+                   (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
+                   : lg_floor((size<<1)-1);
+               size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+                   ?  LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+               size_t delta = ZU(1) << lg_delta;
+               size_t delta_mask = delta - 1;
+               size_t usize = (size + delta_mask) & ~delta_mask;
+               return (usize);
+       }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+s2u_lookup(size_t size)
+{
+       size_t ret = index2size_lookup(size2index_lookup(size));
+
+       assert(ret == s2u_compute(size));
+       return (ret);
+}
+
 /*
  * Compute usable size that would result from allocating an object with the
  * specified size.
@@ -473,11 +688,10 @@ JEMALLOC_ALWAYS_INLINE size_t
 s2u(size_t size)
 {
 
-       if (size <= SMALL_MAXCLASS)
-               return (small_s2u(size));
-       if (size <= arena_maxclass)
-               return (PAGE_CEILING(size));
-       return (CHUNK_CEILING(size));
+       assert(size > 0);
+       if (likely(size <= LOOKUP_MAXCLASS))
+               return (s2u_lookup(size));
+       return (s2u_compute(size));
 }
 
 /*
@@ -491,247 +705,317 @@ sa2u(size_t size, size_t alignment)
 
        assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
 
-       /*
-        * Round size up to the nearest multiple of alignment.
-        *
-        * This done, we can take advantage of the fact that for each small
-        * size class, every object is aligned at the smallest power of two
-        * that is non-zero in the base two representation of the size.  For
-        * example:
-        *
-        *   Size |   Base 2 | Minimum alignment
-        *   -----+----------+------------------
-        *     96 |  1100000 |  32
-        *    144 | 10100000 |  32
-        *    192 | 11000000 |  64
-        */
-       usize = ALIGNMENT_CEILING(size, alignment);
-       /*
-        * (usize < size) protects against the combination of maximal
-        * alignment and size greater than maximal alignment.
-        */
-       if (usize < size) {
-               /* size_t overflow. */
-               return (0);
+       /* Try for a small size class. */
+       if (size <= SMALL_MAXCLASS && alignment < PAGE) {
+               /*
+                * Round size up to the nearest multiple of alignment.
+                *
+                * This done, we can take advantage of the fact that for each
+                * small size class, every object is aligned at the smallest
+                * power of two that is non-zero in the base two representation
+                * of the size.  For example:
+                *
+                *   Size |   Base 2 | Minimum alignment
+                *   -----+----------+------------------
+                *     96 |  1100000 |  32
+                *    144 | 10100000 |  32
+                *    192 | 11000000 |  64
+                */
+               usize = s2u(ALIGNMENT_CEILING(size, alignment));
+               if (usize < LARGE_MINCLASS)
+                       return (usize);
        }
 
-       if (usize <= arena_maxclass && alignment <= PAGE) {
-               if (usize <= SMALL_MAXCLASS)
-                       return (small_s2u(usize));
-               return (PAGE_CEILING(usize));
-       } else {
-               size_t run_size;
-
+       /* Try for a large size class. */
+       if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
                /*
                 * We can't achieve subpage alignment, so round up alignment
-                * permanently; it makes later calculations simpler.
+                * to the minimum that can actually be supported.
                 */
                alignment = PAGE_CEILING(alignment);
-               usize = PAGE_CEILING(size);
-               /*
-                * (usize < size) protects against very large sizes within
-                * PAGE of SIZE_T_MAX.
-                *
-                * (usize + alignment < usize) protects against the
-                * combination of maximal alignment and usize large enough
-                * to cause overflow.  This is similar to the first overflow
-                * check above, but it needs to be repeated due to the new
-                * usize value, which may now be *equal* to maximal
-                * alignment, whereas before we only detected overflow if the
-                * original size was *greater* than maximal alignment.
-                */
-               if (usize < size || usize + alignment < usize) {
-                       /* size_t overflow. */
-                       return (0);
-               }
+
+               /* Make sure result is a large size class. */
+               usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
 
                /*
                 * Calculate the size of the over-size run that arena_palloc()
                 * would need to allocate in order to guarantee the alignment.
-                * If the run wouldn't fit within a chunk, round up to a huge
-                * allocation size.
                 */
-               run_size = usize + alignment - PAGE;
-               if (run_size <= arena_maxclass)
-                       return (PAGE_CEILING(usize));
-               return (CHUNK_CEILING(usize));
+               if (usize + large_pad + alignment - PAGE <= arena_maxrun)
+                       return (usize);
        }
-}
 
-JEMALLOC_INLINE unsigned
-narenas_total_get(void)
-{
-       unsigned narenas;
+       /* Huge size class.  Beware of overflow. */
 
-       malloc_mutex_lock(&arenas_lock);
-       narenas = narenas_total;
-       malloc_mutex_unlock(&arenas_lock);
+       if (unlikely(alignment > HUGE_MAXCLASS))
+               return (0);
 
-       return (narenas);
+       /*
+        * We can't achieve subchunk alignment, so round up alignment to the
+        * minimum that can actually be supported.
+        */
+       alignment = CHUNK_CEILING(alignment);
+
+       /* Make sure result is a huge size class. */
+       if (size <= chunksize)
+               usize = chunksize;
+       else {
+               usize = s2u(size);
+               if (usize < size) {
+                       /* size_t overflow. */
+                       return (0);
+               }
+       }
+
+       /*
+        * Calculate the multi-chunk mapping that huge_palloc() would need in
+        * order to guarantee the alignment.
+        */
+       if (usize + alignment - PAGE < usize) {
+               /* size_t overflow. */
+               return (0);
+       }
+       return (usize);
 }
 
 /* Choose an arena based on a per-thread value. */
 JEMALLOC_INLINE arena_t *
-choose_arena(tsd_t *tsd, arena_t *arena)
+arena_choose(tsd_t *tsd, arena_t *arena)
 {
        arena_t *ret;
 
        if (arena != NULL)
                return (arena);
 
-       if (unlikely((ret = tsd_arena_get(tsd)) == NULL)) {
-               ret = choose_arena_hard(tsd);
-               assert(ret != NULL);
+       if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
+               ret = arena_choose_hard(tsd);
+
+       return (ret);
+}
+
+JEMALLOC_INLINE arena_tdata_t *
+arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
+{
+       arena_tdata_t *tdata;
+       arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
+
+       if (unlikely(arenas_tdata == NULL)) {
+               /* arenas_tdata hasn't been initialized yet. */
+               return (arena_tdata_get_hard(tsd, ind));
+       }
+       if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
+               /*
+                * ind is invalid, cache is old (too small), or tdata to be
+                * initialized.
+                */
+               return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
+                   NULL);
        }
 
+       tdata = &arenas_tdata[ind];
+       if (likely(tdata != NULL) || !refresh_if_missing)
+               return (tdata);
+       return (arena_tdata_get_hard(tsd, ind));
+}
+
+JEMALLOC_INLINE arena_t *
+arena_get(unsigned ind, bool init_if_missing)
+{
+       arena_t *ret;
+
+       assert(ind <= MALLOCX_ARENA_MAX);
+
+       ret = arenas[ind];
+       if (unlikely(ret == NULL)) {
+               ret = atomic_read_p((void *)&arenas[ind]);
+               if (init_if_missing && unlikely(ret == NULL))
+                       ret = arena_init(ind);
+       }
        return (ret);
 }
+
+JEMALLOC_INLINE ticker_t *
+decay_ticker_get(tsd_t *tsd, unsigned ind)
+{
+       arena_tdata_t *tdata;
+
+       tdata = arena_tdata_get(tsd, ind, true);
+       if (unlikely(tdata == NULL))
+               return (NULL);
+       return (&tdata->decay_ticker);
+}
 #endif
 
 #include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/rtree.h"
 /*
- * Include arena.h the second and third times in order to resolve circular
- * dependencies with tcache.h.
+ * Include portions of arena.h interleaved with tcache.h in order to resolve
+ * circular dependencies.
  */
-#define        JEMALLOC_ARENA_INLINE_B
+#define        JEMALLOC_ARENA_INLINE_A
 #include "jemalloc/internal/arena.h"
-#undef JEMALLOC_ARENA_INLINE_B
+#undef JEMALLOC_ARENA_INLINE_A
 #include "jemalloc/internal/tcache.h"
-#define        JEMALLOC_ARENA_INLINE_C
+#define        JEMALLOC_ARENA_INLINE_B
 #include "jemalloc/internal/arena.h"
-#undef JEMALLOC_ARENA_INLINE_C
+#undef JEMALLOC_ARENA_INLINE_B
 #include "jemalloc/internal/hash.h"
 #include "jemalloc/internal/quarantine.h"
 
 #ifndef JEMALLOC_ENABLE_INLINE
-void   *imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena);
-void   *imalloc(tsd_t *tsd, size_t size);
-void   *icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena);
-void   *icalloc(tsd_t *tsd, size_t size);
+arena_t        *iaalloc(const void *ptr);
+size_t isalloc(const void *ptr, bool demote);
+void   *iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero,
+    tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
+void   *imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
+    arena_t *arena);
+void   *imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path);
+void   *icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
+    arena_t *arena);
+void   *icalloc(tsd_t *tsd, size_t size, szind_t ind);
+void   *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+    tcache_t *tcache, bool is_metadata, arena_t *arena);
 void   *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
-    bool try_tcache, arena_t *arena);
+    tcache_t *tcache, arena_t *arena);
 void   *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
-size_t isalloc(const void *ptr, bool demote);
 size_t ivsalloc(const void *ptr, bool demote);
 size_t u2rz(size_t usize);
 size_t p2rz(const void *ptr);
-void   idalloct(tsd_t *tsd, void *ptr, bool try_tcache);
-void   isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache);
+void   idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
+    bool slow_path);
+void   idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
 void   idalloc(tsd_t *tsd, void *ptr);
-void   iqalloc(tsd_t *tsd, void *ptr, bool try_tcache);
-void   isqalloc(tsd_t *tsd, void *ptr, size_t size, bool try_tcache);
+void   iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
+void   isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+void   isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
 void   *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
-    size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
-    bool try_tcache_dalloc, arena_t *arena);
-void   *iralloct(tsd_t *tsd, void *ptr, size_t size, size_t alignment,
-    bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
-void   *iralloc(tsd_t *tsd, void *ptr, size_t size, size_t alignment,
-    bool zero);
-bool   ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
-    bool zero);
+    size_t extra, size_t alignment, bool zero, tcache_t *tcache,
+    arena_t *arena);
+void   *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+    size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
+void   *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+    size_t alignment, bool zero);
+bool   ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+    size_t extra, size_t alignment, bool zero);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
+JEMALLOC_ALWAYS_INLINE arena_t *
+iaalloc(const void *ptr)
+{
+
+       assert(ptr != NULL);
+
+       return (arena_aalloc(ptr));
+}
+
+/*
+ * Typical usage:
+ *   void *ptr = [...]
+ *   size_t sz = isalloc(ptr, config_prof);
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+isalloc(const void *ptr, bool demote)
+{
+
+       assert(ptr != NULL);
+       /* Demotion only makes sense if config_prof is true. */
+       assert(config_prof || !demote);
+
+       return (arena_salloc(ptr, demote));
+}
+
 JEMALLOC_ALWAYS_INLINE void *
-imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena)
+iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache,
+    bool is_metadata, arena_t *arena, bool slow_path)
 {
+       void *ret;
 
        assert(size != 0);
 
-       if (size <= arena_maxclass)
-               return (arena_malloc(tsd, arena, size, false, try_tcache));
-       else
-               return (huge_malloc(tsd, arena, size, false));
+       ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path);
+       if (config_stats && is_metadata && likely(ret != NULL)) {
+               arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
+                   config_prof));
+       }
+       return (ret);
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-imalloc(tsd_t *tsd, size_t size)
+imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
 {
 
-       return (imalloct(tsd, size, true, NULL));
+       return (iallocztm(tsd, size, ind, false, tcache, false, arena, true));
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena)
+imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path)
 {
 
-       if (size <= arena_maxclass)
-               return (arena_malloc(tsd, arena, size, true, try_tcache));
-       else
-               return (huge_malloc(tsd, arena, size, true));
+       return (iallocztm(tsd, size, ind, false, tcache_get(tsd, true), false,
+           NULL, slow_path));
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-icalloc(tsd_t *tsd, size_t size)
+icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
 {
 
-       return (icalloct(tsd, size, true, NULL));
+       return (iallocztm(tsd, size, ind, true, tcache, false, arena, true));
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, bool try_tcache,
-    arena_t *arena)
+icalloc(tsd_t *tsd, size_t size, szind_t ind)
+{
+
+       return (iallocztm(tsd, size, ind, true, tcache_get(tsd, true), false,
+           NULL, true));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+    tcache_t *tcache, bool is_metadata, arena_t *arena)
 {
        void *ret;
 
        assert(usize != 0);
        assert(usize == sa2u(usize, alignment));
 
-       if (usize <= arena_maxclass && alignment <= PAGE)
-               ret = arena_malloc(tsd, arena, usize, zero, try_tcache);
-       else {
-               if (usize <= arena_maxclass) {
-                       ret = arena_palloc(choose_arena(tsd, arena), usize,
-                           alignment, zero);
-               } else if (alignment <= chunksize)
-                       ret = huge_malloc(tsd, arena, usize, zero);
-               else
-                       ret = huge_palloc(tsd, arena, usize, alignment, zero);
-       }
-
+       ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
        assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
+       if (config_stats && is_metadata && likely(ret != NULL)) {
+               arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
+                   config_prof));
+       }
        return (ret);
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
+ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+    tcache_t *tcache, arena_t *arena)
 {
 
-       return (ipalloct(tsd, usize, alignment, zero, true, NULL));
+       return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
 }
 
-/*
- * Typical usage:
- *   void *ptr = [...]
- *   size_t sz = isalloc(ptr, config_prof);
- */
-JEMALLOC_ALWAYS_INLINE size_t
-isalloc(const void *ptr, bool demote)
+JEMALLOC_ALWAYS_INLINE void *
+ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
 {
-       size_t ret;
-       arena_chunk_t *chunk;
-
-       assert(ptr != NULL);
-       /* Demotion only makes sense if config_prof is true. */
-       assert(config_prof || !demote);
 
-       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       if (chunk != ptr)
-               ret = arena_salloc(ptr, demote);
-       else
-               ret = huge_salloc(ptr);
-
-       return (ret);
+       return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, true),
+           false, NULL));
 }
 
 JEMALLOC_ALWAYS_INLINE size_t
 ivsalloc(const void *ptr, bool demote)
 {
+       extent_node_t *node;
 
        /* Return 0 if ptr is not within a chunk managed by jemalloc. */
-       if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
+       node = chunk_lookup(ptr, false);
+       if (node == NULL)
                return (0);
+       /* Only arena chunks should be looked up via interior pointers. */
+       assert(extent_node_addr_get(node) == ptr ||
+           extent_node_achunk_get(node));
 
        return (isalloc(ptr, demote));
 }
@@ -742,7 +1026,7 @@ u2rz(size_t usize)
        size_t ret;
 
        if (usize <= SMALL_MAXCLASS) {
-               size_t binind = small_size2bin(usize);
+               szind_t binind = size2index(usize);
                ret = arena_bin_info[binind].redzone_size;
        } else
                ret = 0;
@@ -759,81 +1043,79 @@ p2rz(const void *ptr)
 }
 
 JEMALLOC_ALWAYS_INLINE void
-idalloct(tsd_t *tsd, void *ptr, bool try_tcache)
+idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
+    bool slow_path)
 {
-       arena_chunk_t *chunk;
 
        assert(ptr != NULL);
+       if (config_stats && is_metadata) {
+               arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
+                   config_prof));
+       }
 
-       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       if (chunk != ptr)
-               arena_dalloc(tsd, chunk, ptr, try_tcache);
-       else
-               huge_dalloc(ptr);
+       arena_dalloc(tsd, ptr, tcache, slow_path);
 }
 
 JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
+idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
 {
-       arena_chunk_t *chunk;
-
-       assert(ptr != NULL);
 
-       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       if (chunk != ptr)
-               arena_sdalloc(tsd, chunk, ptr, size, try_tcache);
-       else
-               huge_dalloc(ptr);
+       idalloctm(tsd, ptr, tcache, false, true);
 }
 
 JEMALLOC_ALWAYS_INLINE void
 idalloc(tsd_t *tsd, void *ptr)
 {
 
-       idalloct(tsd, ptr, true);
+       idalloctm(tsd, ptr, tcache_get(tsd, false), false, true);
 }
 
 JEMALLOC_ALWAYS_INLINE void
-iqalloc(tsd_t *tsd, void *ptr, bool try_tcache)
+iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
 {
 
-       if (config_fill && unlikely(opt_quarantine))
+       if (slow_path && config_fill && unlikely(opt_quarantine))
                quarantine(tsd, ptr);
        else
-               idalloct(tsd, ptr, try_tcache);
+               idalloctm(tsd, ptr, tcache, false, slow_path);
 }
 
 JEMALLOC_ALWAYS_INLINE void
-isqalloc(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
+isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
+{
+
+       arena_sdalloc(tsd, ptr, size, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
 {
 
        if (config_fill && unlikely(opt_quarantine))
                quarantine(tsd, ptr);
        else
-               isdalloct(tsd, ptr, size, try_tcache);
+               isdalloct(tsd, ptr, size, tcache);
 }
 
 JEMALLOC_ALWAYS_INLINE void *
 iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
-    size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
-    bool try_tcache_dalloc, arena_t *arena)
+    size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
 {
        void *p;
        size_t usize, copysize;
 
        usize = sa2u(size + extra, alignment);
-       if (usize == 0)
+       if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
                return (NULL);
-       p = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc, arena);
+       p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
        if (p == NULL) {
                if (extra == 0)
                        return (NULL);
                /* Try again, without extra this time. */
                usize = sa2u(size, alignment);
-               if (usize == 0)
+               if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
                        return (NULL);
-               p = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc,
-                   arena);
+               p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
                if (p == NULL)
                        return (NULL);
        }
@@ -843,21 +1125,18 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
         */
        copysize = (size < oldsize) ? size : oldsize;
        memcpy(p, ptr, copysize);
-       iqalloc(tsd, ptr, try_tcache_dalloc);
+       isqalloc(tsd, ptr, oldsize, tcache);
        return (p);
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-iralloct(tsd_t *tsd, void *ptr, size_t size, size_t alignment, bool zero,
-    bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
+iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
+    bool zero, tcache_t *tcache, arena_t *arena)
 {
-       size_t oldsize;
 
        assert(ptr != NULL);
        assert(size != 0);
 
-       oldsize = isalloc(ptr, config_prof);
-
        if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
            != 0) {
                /*
@@ -865,44 +1144,37 @@ iralloct(tsd_t *tsd, void *ptr, size_t size, size_t alignment, bool zero,
                 * and copy.
                 */
                return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
-                   zero, try_tcache_alloc, try_tcache_dalloc, arena));
+                   zero, tcache, arena));
        }
 
-       if (size <= arena_maxclass) {
-               return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0,
-                   alignment, zero, try_tcache_alloc, try_tcache_dalloc));
-       } else {
-               return (huge_ralloc(tsd, arena, ptr, oldsize, size, 0,
-                   alignment, zero, try_tcache_dalloc));
-       }
+       return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
+           tcache));
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-iralloc(tsd_t *tsd, void *ptr, size_t size, size_t alignment, bool zero)
+iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
+    bool zero)
 {
 
-       return (iralloct(tsd, ptr, size, alignment, zero, true, true, NULL));
+       return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
+           tcache_get(tsd, true), NULL));
 }
 
 JEMALLOC_ALWAYS_INLINE bool
-ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
+ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
+    size_t alignment, bool zero)
 {
-       size_t oldsize;
 
        assert(ptr != NULL);
        assert(size != 0);
 
-       oldsize = isalloc(ptr, config_prof);
        if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
            != 0) {
                /* Existing object alignment is inadequate. */
                return (true);
        }
 
-       if (size <= arena_maxclass)
-               return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
-       else
-               return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
+       return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero));
 }
 #endif
 
index fa590404708baf8129564203da583fe829be60bb..2b8ca5d04a61c45c15606f1445403082d8db126a 100644 (file)
@@ -4,14 +4,8 @@
 #include <math.h>
 #ifdef _WIN32
 #  include <windows.h>
-#  define ENOENT ERROR_PATH_NOT_FOUND
-#  define EINVAL ERROR_BAD_ARGUMENTS
-#  define EAGAIN ERROR_OUTOFMEMORY
-#  define EPERM  ERROR_WRITE_FAULT
-#  define EFAULT ERROR_INVALID_ADDRESS
-#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
-#  undef ERANGE
-#  define ERANGE ERROR_INVALID_DATA
+#  include "msvc_compat/windows_extra.h"
+
 #else
 #  include <sys/param.h>
 #  include <sys/mman.h>
@@ -24,6 +18,7 @@
 #  endif
 #  include <pthread.h>
 #  include <errno.h>
+#  include <sys/time.h>
 #endif
 #include <sys/types.h>
 
@@ -40,7 +35,6 @@
 #ifndef offsetof
 #  define offsetof(type, member)       ((size_t)&(((type *)NULL)->member))
 #endif
-#include <inttypes.h>
 #include <string.h>
 #include <strings.h>
 #include <ctype.h>
@@ -50,8 +44,19 @@ typedef intptr_t ssize_t;
 #  define PATH_MAX 1024
 #  define STDERR_FILENO 2
 #  define __func__ __FUNCTION__
-/* Disable warnings about deprecated system functions */
+#  ifdef JEMALLOC_HAS_RESTRICT
+#    define restrict __restrict
+#  endif
+/* Disable warnings about deprecated system functions. */
 #  pragma warning(disable: 4996)
+#if _MSC_VER < 1800
+static int
+isblank(int c)
+{
+
+       return (c == '\t' || c == ' ');
+}
+#endif
 #else
 #  include <unistd.h>
 #endif
index fd85e5cf1323683798ca80e8d3096367d4422c27..2c753719811c4e52ed74bd54eaef63e597a6c9c3 100644 (file)
@@ -22,6 +22,9 @@
  */
 #undef CPU_SPINWAIT
 
+/* Defined if C11 atomics are available. */
+#undef JEMALLOC_C11ATOMICS
+
 /* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
 #undef JEMALLOC_ATOMIC9
 
@@ -35,7 +38,7 @@
  * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
  * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
  * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
- * functions are defined in libgcc instead of being inlines)
+ * functions are defined in libgcc instead of being inlines).
  */
 #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
 
@@ -43,7 +46,7 @@
  * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
  * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
  * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
- * functions are defined in libgcc instead of being inlines)
+ * functions are defined in libgcc instead of being inlines).
  */
 #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
 
  */
 #undef JEMALLOC_OSSPIN
 
+/*
+ * Defined if secure_getenv(3) is available.
+ */
+#undef JEMALLOC_HAVE_SECURE_GETENV
+
+/*
+ * Defined if issetugid(2) is available.
+ */
+#undef JEMALLOC_HAVE_ISSETUGID
+
 /*
  * Defined if _malloc_thread_cleanup() exists.  At least in the case of
  * FreeBSD, pthread_key_create() allocates, which if used during malloc
 /* Support lazy locking (avoid locking unless a second thread is launched). */
 #undef JEMALLOC_LAZY_LOCK
 
-/* One page is 2^STATIC_PAGE_SHIFT bytes. */
-#undef STATIC_PAGE_SHIFT
+/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
+#undef LG_TINY_MIN
+
+/*
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#undef LG_QUANTUM
+
+/* One page is 2^LG_PAGE bytes. */
+#undef LG_PAGE
+
+/*
+ * If defined, adjacent virtual memory mappings with identical attributes
+ * automatically coalesce, and they fragment when changes are made to subranges.
+ * This is the normal order of things for mmap()/munmap(), but on Windows
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
+ * mappings do *not* coalesce/fragment.
+ */
+#undef JEMALLOC_MAPS_COALESCE
 
 /*
  * If defined, use munmap() to unmap freed chunks, rather than storing them for
 #undef JEMALLOC_TLS
 
 /*
- * ffs()/ffsl() functions to use for bitmapping.  Don't use these directly;
- * instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
+ * ffs*() functions to use for bitmapping.  Don't use these directly; instead,
+ * use ffs_*() from util.h.
  */
+#undef JEMALLOC_INTERNAL_FFSLL
 #undef JEMALLOC_INTERNAL_FFSL
 #undef JEMALLOC_INTERNAL_FFS
 
  */
 #undef JEMALLOC_IVSALLOC
 
+/*
+ * If defined, explicitly attempt to more uniformly distribute large allocation
+ * pointer alignments across all cache indices.
+ */
+#undef JEMALLOC_CACHE_OBLIVIOUS
+
 /*
  * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
  */
 #undef JEMALLOC_PURGE_MADVISE_DONTNEED
 #undef JEMALLOC_PURGE_MADVISE_FREE
 
-/*
- * Define if operating system has alloca.h header.
- */
+/* Define if operating system has alloca.h header. */
 #undef JEMALLOC_HAS_ALLOCA_H
 
 /* C99 restrict keyword supported. */
 /* sizeof(long) == 2^LG_SIZEOF_LONG. */
 #undef LG_SIZEOF_LONG
 
+/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
+#undef LG_SIZEOF_LONG_LONG
+
 /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
 #undef LG_SIZEOF_INTMAX_T
 
-/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook) */
+/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
 #undef JEMALLOC_GLIBC_MALLOC_HOOK
 
-/* glibc memalign hook */
+/* glibc memalign hook. */
 #undef JEMALLOC_GLIBC_MEMALIGN_HOOK
 
-/* adaptive mutex support in pthreads */
+/* Adaptive mutex support in pthreads. */
 #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
 
+/*
+ * If defined, jemalloc symbols are not exported (doesn't work when
+ * JEMALLOC_PREFIX is not defined).
+ */
+#undef JEMALLOC_EXPORT
+
+/* config.malloc_conf options string. */
+#undef JEMALLOC_CONFIG_MALLOC_CONF
+
 #endif /* JEMALLOC_INTERNAL_DEFS_H_ */
index 8a03d82504ecc5e3af73d0517dfb5ae2b7ea2d48..f051f2917b3b7611024a3b2cfb487b2875f28466 100644 (file)
@@ -26,7 +26,11 @@ typedef struct malloc_mutex_s malloc_mutex_t;
 
 struct malloc_mutex_s {
 #ifdef _WIN32
+#  if _WIN32_WINNT >= 0x0600
+       SRWLOCK                 lock;
+#  else
        CRITICAL_SECTION        lock;
+#  endif
 #elif (defined(JEMALLOC_OSSPIN))
        OSSpinLock              lock;
 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -70,7 +74,11 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
 
        if (isthreaded) {
 #ifdef _WIN32
+#  if _WIN32_WINNT >= 0x0600
+               AcquireSRWLockExclusive(&mutex->lock);
+#  else
                EnterCriticalSection(&mutex->lock);
+#  endif
 #elif (defined(JEMALLOC_OSSPIN))
                OSSpinLockLock(&mutex->lock);
 #else
@@ -85,7 +93,11 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
 
        if (isthreaded) {
 #ifdef _WIN32
+#  if _WIN32_WINNT >= 0x0600
+               ReleaseSRWLockExclusive(&mutex->lock);
+#  else
                LeaveCriticalSection(&mutex->lock);
+#  endif
 #elif (defined(JEMALLOC_OSSPIN))
                OSSpinLockUnlock(&mutex->lock);
 #else
diff --git a/src/jemalloc/include/jemalloc/internal/nstime.h b/src/jemalloc/include/jemalloc/internal/nstime.h
new file mode 100644 (file)
index 0000000..bd04f04
--- /dev/null
@@ -0,0 +1,48 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
+    && _POSIX_MONOTONIC_CLOCK >= 0
+
+typedef struct nstime_s nstime_t;
+
+/* Maximum supported number of seconds (~584 years). */
+#define        NSTIME_SEC_MAX  18446744072
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct nstime_s {
+       uint64_t        ns;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void   nstime_init(nstime_t *time, uint64_t ns);
+void   nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
+uint64_t       nstime_ns(const nstime_t *time);
+uint64_t       nstime_sec(const nstime_t *time);
+uint64_t       nstime_nsec(const nstime_t *time);
+void   nstime_copy(nstime_t *time, const nstime_t *source);
+int    nstime_compare(const nstime_t *a, const nstime_t *b);
+void   nstime_add(nstime_t *time, const nstime_t *addend);
+void   nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
+void   nstime_imultiply(nstime_t *time, uint64_t multiplier);
+void   nstime_idivide(nstime_t *time, uint64_t divisor);
+uint64_t       nstime_divide(const nstime_t *time, const nstime_t *divisor);
+#ifdef JEMALLOC_JET
+typedef bool (nstime_update_t)(nstime_t *);
+extern nstime_update_t *nstime_update;
+#else
+bool   nstime_update(nstime_t *time);
+#endif
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/src/jemalloc/include/jemalloc/internal/pages.h b/src/jemalloc/include/jemalloc/internal/pages.h
new file mode 100644 (file)
index 0000000..da7eb96
--- /dev/null
@@ -0,0 +1,26 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void   *pages_map(void *addr, size_t size);
+void   pages_unmap(void *addr, size_t size);
+void   *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
+    size_t size);
+bool   pages_commit(void *addr, size_t size);
+bool   pages_decommit(void *addr, size_t size);
+bool   pages_purge(void *addr, size_t size);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
index 4ea9a953d7eb88b1ad473f0477e1ff62d5d10a30..5880996adfc02975895d67cfbb67bbed3e72db54 100644 (file)
@@ -1,50 +1,83 @@
-a0calloc
-a0free
+a0dalloc
 a0malloc
+arena_aalloc
 arena_alloc_junk_small
+arena_basic_stats_merge
 arena_bin_index
 arena_bin_info
 arena_bitselm_get
 arena_boot
+arena_choose
+arena_choose_hard
 arena_chunk_alloc_huge
+arena_chunk_cache_maybe_insert
+arena_chunk_cache_maybe_remove
 arena_chunk_dalloc_huge
+arena_chunk_ralloc_huge_expand
+arena_chunk_ralloc_huge_shrink
+arena_chunk_ralloc_huge_similar
 arena_cleanup
 arena_dalloc
 arena_dalloc_bin
-arena_dalloc_bin_locked
+arena_dalloc_bin_junked_locked
 arena_dalloc_junk_large
 arena_dalloc_junk_small
 arena_dalloc_large
-arena_dalloc_large_locked
+arena_dalloc_large_junked_locked
 arena_dalloc_small
+arena_decay_tick
+arena_decay_ticks
+arena_decay_time_default_get
+arena_decay_time_default_set
+arena_decay_time_get
+arena_decay_time_set
 arena_dss_prec_get
 arena_dss_prec_set
+arena_get
+arena_init
+arena_lg_dirty_mult_default_get
+arena_lg_dirty_mult_default_set
+arena_lg_dirty_mult_get
+arena_lg_dirty_mult_set
 arena_malloc
+arena_malloc_hard
 arena_malloc_large
-arena_malloc_small
 arena_mapbits_allocated_get
 arena_mapbits_binind_get
+arena_mapbits_decommitted_get
 arena_mapbits_dirty_get
 arena_mapbits_get
+arena_mapbits_internal_set
 arena_mapbits_large_binind_set
 arena_mapbits_large_get
 arena_mapbits_large_set
 arena_mapbits_large_size_get
+arena_mapbits_size_decode
+arena_mapbits_size_encode
 arena_mapbits_small_runind_get
 arena_mapbits_small_set
 arena_mapbits_unallocated_set
 arena_mapbits_unallocated_size_get
 arena_mapbits_unallocated_size_set
 arena_mapbits_unzeroed_get
-arena_mapbits_unzeroed_set
 arena_mapbitsp_get
 arena_mapbitsp_read
 arena_mapbitsp_write
-arena_maxclass
+arena_maxrun
+arena_maybe_purge
+arena_metadata_allocated_add
+arena_metadata_allocated_get
+arena_metadata_allocated_sub
+arena_migrate
 arena_miscelm_get
 arena_miscelm_to_pageind
 arena_miscelm_to_rpages
 arena_new
+arena_node_alloc
+arena_node_dalloc
+arena_nthreads_dec
+arena_nthreads_get
+arena_nthreads_inc
 arena_palloc
 arena_postfork_child
 arena_postfork_parent
@@ -54,13 +87,15 @@ arena_prof_accum_impl
 arena_prof_accum_locked
 arena_prof_promoted
 arena_prof_tctx_get
+arena_prof_tctx_reset
 arena_prof_tctx_set
 arena_ptr_small_binind_get
-arena_purge_all
+arena_purge
 arena_quarantine_junk_small
 arena_ralloc
 arena_ralloc_junk_large
 arena_ralloc_no_move
+arena_rd_to_miscelm
 arena_redzone_corruption
 arena_run_regind
 arena_run_to_miscelm
@@ -68,80 +103,88 @@ arena_salloc
 arena_sdalloc
 arena_stats_merge
 arena_tcache_fill_small
+arena_tdata_get
+arena_tdata_get_hard
 arenas
-arenas_cleanup
-arenas_extend
-arenas_lock
+arenas_tdata_bypass_cleanup
+arenas_tdata_cleanup
+atomic_add_p
 atomic_add_u
 atomic_add_uint32
 atomic_add_uint64
 atomic_add_z
+atomic_cas_p
+atomic_cas_u
+atomic_cas_uint32
+atomic_cas_uint64
+atomic_cas_z
+atomic_sub_p
 atomic_sub_u
 atomic_sub_uint32
 atomic_sub_uint64
 atomic_sub_z
 base_alloc
 base_boot
-base_calloc
-base_node_alloc
-base_node_dalloc
 base_postfork_child
 base_postfork_parent
 base_prefork
+base_stats_get
 bitmap_full
 bitmap_get
 bitmap_info_init
-bitmap_info_ngroups
 bitmap_init
 bitmap_set
 bitmap_sfu
 bitmap_size
 bitmap_unset
+bootstrap_calloc
+bootstrap_free
+bootstrap_malloc
 bt_init
 buferror
-choose_arena
-choose_arena_hard
-chunk_alloc_arena
 chunk_alloc_base
-chunk_alloc_default
+chunk_alloc_cache
 chunk_alloc_dss
 chunk_alloc_mmap
+chunk_alloc_wrapper
 chunk_boot
-chunk_dalloc_default
+chunk_dalloc_arena
+chunk_dalloc_cache
 chunk_dalloc_mmap
+chunk_dalloc_wrapper
+chunk_deregister
 chunk_dss_boot
 chunk_dss_postfork_child
 chunk_dss_postfork_parent
 chunk_dss_prec_get
 chunk_dss_prec_set
 chunk_dss_prefork
+chunk_hooks_default
+chunk_hooks_get
+chunk_hooks_set
 chunk_in_dss
+chunk_lookup
 chunk_npages
 chunk_postfork_child
 chunk_postfork_parent
 chunk_prefork
-chunk_unmap
-chunks_mtx
+chunk_purge_arena
+chunk_purge_wrapper
+chunk_register
 chunks_rtree
 chunksize
 chunksize_mask
-ckh_bucket_search
 ckh_count
 ckh_delete
-ckh_evict_reloc_insert
 ckh_insert
-ckh_isearch
 ckh_iter
 ckh_new
 ckh_pointer_hash
 ckh_pointer_keycomp
-ckh_rebuild
 ckh_remove
 ckh_search
 ckh_string_hash
 ckh_string_keycomp
-ckh_try_bucket_insert
-ckh_try_insert
 ctl_boot
 ctl_bymib
 ctl_byname
@@ -149,7 +192,25 @@ ctl_nametomib
 ctl_postfork_child
 ctl_postfork_parent
 ctl_prefork
+decay_ticker_get
 dss_prec_names
+extent_node_achunk_get
+extent_node_achunk_set
+extent_node_addr_get
+extent_node_addr_set
+extent_node_arena_get
+extent_node_arena_set
+extent_node_dirty_insert
+extent_node_dirty_linkage_init
+extent_node_dirty_remove
+extent_node_init
+extent_node_prof_tctx_get
+extent_node_prof_tctx_set
+extent_node_size_get
+extent_node_size_set
+extent_node_zeroed_get
+extent_node_zeroed_set
+extent_tree_ad_empty
 extent_tree_ad_first
 extent_tree_ad_insert
 extent_tree_ad_iter
@@ -166,6 +227,7 @@ extent_tree_ad_reverse_iter
 extent_tree_ad_reverse_iter_recurse
 extent_tree_ad_reverse_iter_start
 extent_tree_ad_search
+extent_tree_szad_empty
 extent_tree_szad_first
 extent_tree_szad_insert
 extent_tree_szad_iter
@@ -182,6 +244,12 @@ extent_tree_szad_reverse_iter
 extent_tree_szad_reverse_iter_recurse
 extent_tree_szad_reverse_iter_start
 extent_tree_szad_search
+ffs_llu
+ffs_lu
+ffs_u
+ffs_u32
+ffs_u64
+ffs_zu
 get_errno
 hash
 hash_fmix_32
@@ -193,44 +261,48 @@ hash_rotl_64
 hash_x64_128
 hash_x86_128
 hash_x86_32
-huge_allocated
-huge_boot
+huge_aalloc
 huge_dalloc
 huge_dalloc_junk
 huge_malloc
-huge_ndalloc
-huge_nmalloc
 huge_palloc
-huge_postfork_child
-huge_postfork_parent
-huge_prefork
 huge_prof_tctx_get
+huge_prof_tctx_reset
 huge_prof_tctx_set
 huge_ralloc
 huge_ralloc_no_move
 huge_salloc
+iaalloc
+iallocztm
 icalloc
 icalloct
 idalloc
 idalloct
+idalloctm
 imalloc
 imalloct
 in_valgrind
+index2size
+index2size_compute
+index2size_lookup
+index2size_tab
 ipalloc
 ipalloct
+ipallocztm
 iqalloc
 iralloc
 iralloct
 iralloct_realign
 isalloc
 isdalloct
-isthreaded
 isqalloc
+isthreaded
 ivsalloc
 ixalloc
 jemalloc_postfork_child
 jemalloc_postfork_parent
 jemalloc_prefork
+large_maxclass
 lg_floor
 malloc_cprintf
 malloc_mutex_init
@@ -242,7 +314,8 @@ malloc_mutex_unlock
 malloc_printf
 malloc_snprintf
 malloc_strtoumax
-malloc_tsd_boot
+malloc_tsd_boot0
+malloc_tsd_boot1
 malloc_tsd_cleanup_register
 malloc_tsd_dalloc
 malloc_tsd_malloc
@@ -254,14 +327,29 @@ map_bias
 map_misc_offset
 mb_write
 mutex_boot
-narenas_auto
-narenas_total
+narenas_tdata_cleanup
 narenas_total_get
 ncpus
 nhbins
+nstime_add
+nstime_compare
+nstime_copy
+nstime_divide
+nstime_idivide
+nstime_imultiply
+nstime_init
+nstime_init2
+nstime_ns
+nstime_nsec
+nstime_sec
+nstime_subtract
+nstime_update
 opt_abort
+opt_decay_time
 opt_dss
 opt_junk
+opt_junk_alloc
+opt_junk_free
 opt_lg_chunk
 opt_lg_dirty_mult
 opt_lg_prof_interval
@@ -275,6 +363,8 @@ opt_prof_final
 opt_prof_gdump
 opt_prof_leak
 opt_prof_prefix
+opt_prof_thread_active_init
+opt_purge
 opt_quarantine
 opt_redzone
 opt_stats_print
@@ -283,8 +373,17 @@ opt_utrace
 opt_xmalloc
 opt_zero
 p2rz
+pages_commit
+pages_decommit
+pages_map
 pages_purge
-pow2_ceil
+pages_trim
+pages_unmap
+pow2_ceil_u32
+pow2_ceil_u64
+pow2_ceil_zu
+prng_lg_range
+prng_range
 prof_active_get
 prof_active_get_unlocked
 prof_active_set
@@ -294,12 +393,15 @@ prof_backtrace
 prof_boot0
 prof_boot1
 prof_boot2
-prof_bt_count
 prof_dump_header
 prof_dump_open
 prof_free
 prof_free_sampled_object
 prof_gdump
+prof_gdump_get
+prof_gdump_get_unlocked
+prof_gdump_set
+prof_gdump_val
 prof_idump
 prof_interval
 prof_lookup
@@ -314,48 +416,55 @@ prof_reset
 prof_sample_accum_update
 prof_sample_threshold_update
 prof_tctx_get
+prof_tctx_reset
 prof_tctx_set
 prof_tdata_cleanup
 prof_tdata_get
 prof_tdata_init
+prof_tdata_reinit
 prof_thread_active_get
 prof_thread_active_init_get
 prof_thread_active_init_set
 prof_thread_active_set
 prof_thread_name_get
 prof_thread_name_set
+purge_mode_names
 quarantine
 quarantine_alloc_hook
+quarantine_alloc_hook_work
 quarantine_cleanup
-quarantine_init
 register_zone
+rtree_child_read
+rtree_child_read_hard
+rtree_child_tryread
 rtree_delete
 rtree_get
-rtree_get_locked
 rtree_new
-rtree_postfork_child
-rtree_postfork_parent
-rtree_prefork
+rtree_node_valid
 rtree_set
+rtree_start_level
+rtree_subkey
+rtree_subtree_read
+rtree_subtree_read_hard
+rtree_subtree_tryread
+rtree_val_read
+rtree_val_write
+run_quantize_ceil
+run_quantize_floor
+run_quantize_max
 s2u
+s2u_compute
+s2u_lookup
 sa2u
 set_errno
-small_bin2size
-small_bin2size_compute
-small_bin2size_lookup
-small_bin2size_tab
-small_s2u
-small_s2u_compute
-small_s2u_lookup
-small_size2bin
-small_size2bin_compute
-small_size2bin_lookup
-small_size2bin_tab
+size2index
+size2index_compute
+size2index_lookup
+size2index_tab
 stats_cactive
 stats_cactive_add
 stats_cactive_get
 stats_cactive_sub
-stats_chunks
 stats_print
 tcache_alloc_easy
 tcache_alloc_large
@@ -363,6 +472,7 @@ tcache_alloc_small
 tcache_alloc_small_hard
 tcache_arena_associate
 tcache_arena_dissociate
+tcache_arena_reassociate
 tcache_bin_flush_large
 tcache_bin_flush_small
 tcache_bin_info
@@ -382,22 +492,37 @@ tcache_get_hard
 tcache_maxclass
 tcache_salloc
 tcache_stats_merge
+tcaches
+tcaches_create
+tcaches_destroy
+tcaches_flush
+tcaches_get
 thread_allocated_cleanup
 thread_deallocated_cleanup
-tsd_booted
+ticker_copy
+ticker_init
+ticker_read
+ticker_tick
+ticker_ticks
 tsd_arena_get
 tsd_arena_set
 tsd_boot
+tsd_boot0
+tsd_boot1
+tsd_booted
 tsd_cleanup
 tsd_cleanup_wrapper
 tsd_fetch
 tsd_get
-tsd_get_wrapper
+tsd_wrapper_get
+tsd_wrapper_set
 tsd_initialized
 tsd_init_check_recursion
 tsd_init_finish
 tsd_init_head
 tsd_nominal
+tsd_prof_tdata_get
+tsd_prof_tdata_set
 tsd_quarantine_get
 tsd_quarantine_set
 tsd_set
@@ -405,14 +530,12 @@ tsd_tcache_enabled_get
 tsd_tcache_enabled_set
 tsd_tcache_get
 tsd_tcache_set
-tsd_tls
-tsd_tsd
-tsd_prof_tdata_get
-tsd_prof_tdata_set
 tsd_thread_allocated_get
 tsd_thread_allocated_set
 tsd_thread_deallocated_get
 tsd_thread_deallocated_set
+tsd_tls
+tsd_tsd
 u2rz
 valgrind_freelike_block
 valgrind_make_mem_defined
index c6b1797226e0fb2f58e26897020b13c0f2f99d0c..5830f8b7b32747a4fe8a02f1a49d692fb8ba9211 100644 (file)
  * proportional to bit position.  For example, the lowest bit has a cycle of 2,
  * the next has a cycle of 4, etc.  For this reason, we prefer to use the upper
  * bits.
- *
- * Macro parameters:
- *   uint32_t r          : Result.
- *   unsigned lg_range   : (0..32], number of least significant bits to return.
- *   uint32_t state      : Seed value.
- *   const uint32_t a, c : See above discussion.
  */
-#define        prng32(r, lg_range, state, a, c) do {                           \
-       assert(lg_range > 0);                                           \
-       assert(lg_range <= 32);                                         \
-                                                                       \
-       r = (state * (a)) + (c);                                        \
-       state = r;                                                      \
-       r >>= (32 - lg_range);                                          \
-} while (false)
-
-/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
-#define        prng64(r, lg_range, state, a, c) do {                           \
-       assert(lg_range > 0);                                           \
-       assert(lg_range <= 64);                                         \
-                                                                       \
-       r = (state * (a)) + (c);                                        \
-       state = r;                                                      \
-       r >>= (64 - lg_range);                                          \
-} while (false)
+#define        PRNG_A  UINT64_C(6364136223846793005)
+#define        PRNG_C  UINT64_C(1442695040888963407)
 
 #endif /* JEMALLOC_H_TYPES */
 /******************************************************************************/
 /******************************************************************************/
 #ifdef JEMALLOC_H_INLINES
 
+#ifndef JEMALLOC_ENABLE_INLINE
+uint64_t       prng_lg_range(uint64_t *state, unsigned lg_range);
+uint64_t       prng_range(uint64_t *state, uint64_t range);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
+JEMALLOC_ALWAYS_INLINE uint64_t
+prng_lg_range(uint64_t *state, unsigned lg_range)
+{
+       uint64_t ret;
+
+       assert(lg_range > 0);
+       assert(lg_range <= 64);
+
+       ret = (*state * PRNG_A) + PRNG_C;
+       *state = ret;
+       ret >>= (64 - lg_range);
+
+       return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+prng_range(uint64_t *state, uint64_t range)
+{
+       uint64_t ret;
+       unsigned lg_range;
+
+       assert(range > 1);
+
+       /* Compute the ceiling of lg(range). */
+       lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
+
+       /* Generate a result in [0..range) via repeated trial. */
+       do {
+               ret = prng_lg_range(state, lg_range);
+       } while (ret >= range);
+
+       return (ret);
+}
+#endif
+
 #endif /* JEMALLOC_H_INLINES */
 /******************************************************************************/
index c8014717ece797d7e864ef9da27b2e08c25a979c..a25502a9f3989aad83203f241aefc653e8d0d4a2 100644 (file)
@@ -89,12 +89,34 @@ struct prof_tctx_s {
        /* Thread data for thread that performed the allocation. */
        prof_tdata_t            *tdata;
 
+       /*
+        * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
+        * defunct during teardown.
+        */
+       uint64_t                thr_uid;
+       uint64_t                thr_discrim;
+
        /* Profiling counters, protected by tdata->lock. */
        prof_cnt_t              cnts;
 
        /* Associated global context. */
        prof_gctx_t             *gctx;
 
+       /*
+        * UID that distinguishes multiple tctx's created by the same thread,
+        * but coexisting in gctx->tctxs.  There are two ways that such
+        * coexistence can occur:
+        * - A dumper thread can cause a tctx to be retained in the purgatory
+        *   state.
+        * - Although a single "producer" thread must create all tctx's which
+        *   share the same thr_uid, multiple "consumers" can each concurrently
+        *   execute portions of prof_tctx_destroy().  prof_tctx_destroy() only
+        *   gets called once each time cnts.cur{objs,bytes} drop to 0, but this
+        *   threshold can be hit again before the first consumer finishes
+        *   executing prof_tctx_destroy().
+        */
+       uint64_t                tctx_uid;
+
        /* Linkage into gctx's tctxs. */
        rb_node(prof_tctx_t)    tctx_link;
 
@@ -171,6 +193,13 @@ struct prof_tdata_s {
 
        rb_node(prof_tdata_t)   tdata_link;
 
+       /*
+        * Counter used to initialize prof_tctx_t's tctx_uid.  No locking is
+        * necessary when incrementing this field, because only one thread ever
+        * does so.
+        */
+       uint64_t                tctx_uid_next;
+
        /*
         * Hash of (prof_bt_t *)-->(prof_tctx_t *).  Each thread tracks
         * backtraces for which it has non-zero allocation/deallocation counters
@@ -233,6 +262,9 @@ extern char opt_prof_prefix[
 /* Accessed via prof_active_[gs]et{_unlocked,}(). */
 extern bool    prof_active;
 
+/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
+extern bool    prof_gdump_val;
+
 /*
  * Profile dump interval, measured in bytes allocated.  Each arena triggers a
  * profile dump when it reaches this threshold.  The effect is that the
@@ -279,6 +311,8 @@ bool        prof_thread_active_get(void);
 bool   prof_thread_active_set(bool active);
 bool   prof_thread_active_init_get(void);
 bool   prof_thread_active_init_set(bool active_init);
+bool   prof_gdump_get(void);
+bool   prof_gdump_set(bool active);
 void   prof_boot0(void);
 void   prof_boot1(void);
 bool   prof_boot2(void);
@@ -293,17 +327,22 @@ void      prof_sample_threshold_update(prof_tdata_t *tdata);
 
 #ifndef JEMALLOC_ENABLE_INLINE
 bool   prof_active_get_unlocked(void);
+bool   prof_gdump_get_unlocked(void);
 prof_tdata_t   *prof_tdata_get(tsd_t *tsd, bool create);
 bool   prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
     prof_tdata_t **tdata_out);
-prof_tctx_t    *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update);
+prof_tctx_t    *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
+    bool update);
 prof_tctx_t    *prof_tctx_get(const void *ptr);
-void   prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
+void   prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void   prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+    prof_tctx_t *tctx);
 void   prof_malloc_sample_object(const void *ptr, size_t usize,
     prof_tctx_t *tctx);
 void   prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
 void   prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
-    prof_tctx_t *tctx, bool updated, size_t old_usize, prof_tctx_t *old_tctx);
+    prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
+    size_t old_usize, prof_tctx_t *old_tctx);
 void   prof_free(tsd_t *tsd, const void *ptr, size_t usize);
 #endif
 
@@ -321,6 +360,18 @@ prof_active_get_unlocked(void)
        return (prof_active);
 }
 
+JEMALLOC_ALWAYS_INLINE bool
+prof_gdump_get_unlocked(void)
+{
+
+       /*
+        * No locking is used when reading prof_gdump_val in the fast path, so
+        * there are no guarantees regarding how long it will take for all
+        * threads to notice state changes.
+        */
+       return (prof_gdump_val);
+}
+
 JEMALLOC_ALWAYS_INLINE prof_tdata_t *
 prof_tdata_get(tsd_t *tsd, bool create)
 {
@@ -348,36 +399,32 @@ prof_tdata_get(tsd_t *tsd, bool create)
 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
 prof_tctx_get(const void *ptr)
 {
-       prof_tctx_t *ret;
-       arena_chunk_t *chunk;
 
        cassert(config_prof);
        assert(ptr != NULL);
 
-       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       if (chunk != ptr) {
-               /* Region. */
-               ret = arena_prof_tctx_get(ptr);
-       } else
-               ret = huge_prof_tctx_get(ptr);
+       return (arena_prof_tctx_get(ptr));
+}
 
-       return (ret);
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
+{
+
+       cassert(config_prof);
+       assert(ptr != NULL);
+
+       arena_prof_tctx_set(ptr, usize, tctx);
 }
 
 JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
+prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+    prof_tctx_t *old_tctx)
 {
-       arena_chunk_t *chunk;
 
        cassert(config_prof);
        assert(ptr != NULL);
 
-       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       if (chunk != ptr) {
-               /* Region. */
-               arena_prof_tctx_set(ptr, tctx);
-       } else
-               huge_prof_tctx_set(ptr, tctx);
+       arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
 }
 
 JEMALLOC_ALWAYS_INLINE bool
@@ -389,16 +436,16 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
        cassert(config_prof);
 
        tdata = prof_tdata_get(tsd, true);
-       if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+       if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
                tdata = NULL;
 
        if (tdata_out != NULL)
                *tdata_out = tdata;
 
-       if (tdata == NULL)
+       if (unlikely(tdata == NULL))
                return (true);
 
-       if (tdata->bytes_until_sample >= usize) {
+       if (likely(tdata->bytes_until_sample >= usize)) {
                if (update)
                        tdata->bytes_until_sample -= usize;
                return (true);
@@ -411,7 +458,7 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
 }
 
 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
+prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
 {
        prof_tctx_t *ret;
        prof_tdata_t *tdata;
@@ -419,8 +466,8 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
 
        assert(usize == s2u(usize));
 
-       if (!prof_active_get_unlocked() || likely(prof_sample_accum_update(tsd,
-           usize, update, &tdata)))
+       if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
+           &tdata)))
                ret = (prof_tctx_t *)(uintptr_t)1U;
        else {
                bt_init(&bt, tdata->vec);
@@ -442,22 +489,24 @@ prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
        if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
                prof_malloc_sample_object(ptr, usize, tctx);
        else
-               prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
+               prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
 }
 
 JEMALLOC_ALWAYS_INLINE void
 prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
-    bool updated, size_t old_usize, prof_tctx_t *old_tctx)
+    bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
+    prof_tctx_t *old_tctx)
 {
+       bool sampled, old_sampled;
 
        cassert(config_prof);
        assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
 
-       if (!updated && ptr != NULL) {
+       if (prof_active && !updated && ptr != NULL) {
                assert(usize == isalloc(ptr, true));
                if (prof_sample_accum_update(tsd, usize, true, NULL)) {
                        /*
-                        * Don't sample.  The usize passed to PROF_ALLOC_PREP()
+                        * Don't sample.  The usize passed to prof_alloc_prep()
                         * was larger than what actually got allocated, so a
                         * backtrace was captured for this allocation, even
                         * though its actual usize was insufficient to cross the
@@ -467,12 +516,16 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
                }
        }
 
-       if (unlikely((uintptr_t)old_tctx > (uintptr_t)1U))
-               prof_free_sampled_object(tsd, old_usize, old_tctx);
-       if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
+       sampled = ((uintptr_t)tctx > (uintptr_t)1U);
+       old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
+
+       if (unlikely(sampled))
                prof_malloc_sample_object(ptr, usize, tctx);
        else
-               prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
+               prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
+
+       if (unlikely(old_sampled))
+               prof_free_sampled_object(tsd, old_usize, old_tctx);
 }
 
 JEMALLOC_ALWAYS_INLINE void
index f70c5f6f3919aaea8338108aab59b01b278ff8f0..1834bb8557ac6c26956a620d3e1b3467cd820ddb 100644 (file)
@@ -1,6 +1,4 @@
-/*
- * List definitions.
- */
+/* List definitions. */
 #define        ql_head(a_type)                                                 \
 struct {                                                               \
        a_type *qlh_first;                                              \
index 602944b9b4fa4281bde94f7ddce35ab9ba62b845..0fbaec25e7c063c0df37db41f713f16e83963073 100644 (file)
@@ -40,8 +40,10 @@ struct {                                                             \
        (a_qr_b)->a_field.qre_prev = t;                                 \
 } while (0)
 
-/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
- * have two copies of the code. */
+/*
+ * qr_meld() and qr_split() are functionally equivalent, so there's no need to
+ * have two copies of the code.
+ */
 #define        qr_split(a_qr_a, a_qr_b, a_field)                               \
        qr_meld((a_qr_a), (a_qr_b), a_field)
 
index 4e9c710ae4ee2c349ffd29f02402042fcd1504fd..ae607399f6d7eef9b43af481bb10c0e05298f3ea 100644 (file)
@@ -29,7 +29,7 @@ struct quarantine_s {
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-quarantine_t   *quarantine_init(tsd_t *tsd, size_t lg_maxobjs);
+void   quarantine_alloc_hook_work(tsd_t *tsd);
 void   quarantine(tsd_t *tsd, void *ptr);
 void   quarantine_cleanup(tsd_t *tsd);
 
@@ -50,8 +50,8 @@ quarantine_alloc_hook(void)
        assert(config_fill && opt_quarantine);
 
        tsd = tsd_fetch();
-       if (tsd_quarantine_get(tsd) == NULL && tsd_nominal(tsd))
-               tsd_quarantine_set(tsd, quarantine_init(tsd, LG_MAXOBJS_INIT));
+       if (tsd_quarantine_get(tsd) == NULL)
+               quarantine_alloc_hook_work(tsd);
 }
 #endif
 
index 64fab89c009abe4e592fabd1e5082bdb6e6401a8..3770342f805a04dbb98989bcd8e8d5d89fc7073f 100644 (file)
@@ -42,7 +42,6 @@ struct {                                                              \
 #define        rb_tree(a_type)                                                 \
 struct {                                                               \
     a_type *rbt_root;                                                  \
-    a_type rbt_nil;                                                    \
 }
 
 /* Left accessors. */
@@ -79,6 +78,15 @@ struct {                                                             \
     (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t)          \
       (a_node)->a_field.rbn_right_red) & ((ssize_t)-2));               \
 } while (0)
+
+/* Node initializer. */
+#define        rbt_node_new(a_type, a_field, a_rbt, a_node) do {               \
+    /* Bookkeeping bit cannot be used by node pointer. */              \
+    assert(((uintptr_t)(a_node) & 0x1) == 0);                          \
+    rbtn_left_set(a_type, a_field, (a_node), NULL);    \
+    rbtn_right_set(a_type, a_field, (a_node), NULL);   \
+    rbtn_red_set(a_type, a_field, (a_node));                           \
+} while (0)
 #else
 /* Right accessors. */
 #define        rbtn_right_get(a_type, a_field, a_node)                         \
@@ -99,28 +107,26 @@ struct {                                                           \
 #define        rbtn_black_set(a_type, a_field, a_node) do {                    \
     (a_node)->a_field.rbn_red = false;                                 \
 } while (0)
-#endif
 
 /* Node initializer. */
 #define        rbt_node_new(a_type, a_field, a_rbt, a_node) do {               \
-    rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil);       \
-    rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil);      \
+    rbtn_left_set(a_type, a_field, (a_node), NULL);    \
+    rbtn_right_set(a_type, a_field, (a_node), NULL);   \
     rbtn_red_set(a_type, a_field, (a_node));                           \
 } while (0)
+#endif
 
 /* Tree initializer. */
 #define        rb_new(a_type, a_field, a_rbt) do {                             \
-    (a_rbt)->rbt_root = &(a_rbt)->rbt_nil;                             \
-    rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil);           \
-    rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil);                        \
+    (a_rbt)->rbt_root = NULL;                                          \
 } while (0)
 
 /* Internal utility macros. */
 #define        rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do {         \
     (r_node) = (a_root);                                               \
-    if ((r_node) != &(a_rbt)->rbt_nil) {                               \
+    if ((r_node) != NULL) {                                            \
        for (;                                                          \
-         rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
+         rbtn_left_get(a_type, a_field, (r_node)) != NULL;             \
          (r_node) = rbtn_left_get(a_type, a_field, (r_node))) {        \
        }                                                               \
     }                                                                  \
@@ -128,10 +134,9 @@ struct {                                                           \
 
 #define        rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do {          \
     (r_node) = (a_root);                                               \
-    if ((r_node) != &(a_rbt)->rbt_nil) {                               \
-       for (; rbtn_right_get(a_type, a_field, (r_node)) !=             \
-         &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
-         (r_node))) {                                                  \
+    if ((r_node) != NULL) {                                            \
+       for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL;       \
+         (r_node) = rbtn_right_get(a_type, a_field, (r_node))) {       \
        }                                                               \
     }                                                                  \
 } while (0)
@@ -169,11 +174,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node);                 \
 a_attr a_type *                                                                \
 a_prefix##prev(a_rbt_type *rbtree, a_type *node);                      \
 a_attr a_type *                                                                \
-a_prefix##search(a_rbt_type *rbtree, a_type *key);                     \
+a_prefix##search(a_rbt_type *rbtree, const a_type *key);               \
 a_attr a_type *                                                                \
-a_prefix##nsearch(a_rbt_type *rbtree, a_type *key);                    \
+a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key);              \
 a_attr a_type *                                                                \
-a_prefix##psearch(a_rbt_type *rbtree, a_type *key);                    \
+a_prefix##psearch(a_rbt_type *rbtree, const a_type *key);              \
 a_attr void                                                            \
 a_prefix##insert(a_rbt_type *rbtree, a_type *node);                    \
 a_attr void                                                            \
@@ -183,7 +188,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)(   \
   a_rbt_type *, a_type *, void *), void *arg);                         \
 a_attr a_type *                                                                \
 a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,              \
-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
+  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);           \
+a_attr void                                                            \
+a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *),    \
+  void *arg);
 
 /*
  * The rb_gen() macro generates a type-specific red-black tree implementation,
@@ -200,7 +208,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,           \
  *                 int (a_cmp *)(a_type *a_node, a_type *a_other);
  *                                       ^^^^^^
  *                                    or a_key
- *               Interpretation of comparision function return values:
+ *               Interpretation of comparison function return values:
  *                 -1 : a_node <  a_other
  *                  0 : a_node == a_other
  *                  1 : a_node >  a_other
@@ -254,7 +262,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,           \
  *            last/first.
  *
  *   static ex_node_t *
- *   ex_search(ex_t *tree, ex_node_t *key);
+ *   ex_search(ex_t *tree, const ex_node_t *key);
  *       Description: Search for node that matches key.
  *       Args:
  *         tree: Pointer to an initialized red-black tree object.
@@ -262,9 +270,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,           \
  *       Ret: Node in tree that matches key, or NULL if no match.
  *
  *   static ex_node_t *
- *   ex_nsearch(ex_t *tree, ex_node_t *key);
+ *   ex_nsearch(ex_t *tree, const ex_node_t *key);
  *   static ex_node_t *
- *   ex_psearch(ex_t *tree, ex_node_t *key);
+ *   ex_psearch(ex_t *tree, const ex_node_t *key);
  *       Description: Search for node that matches key.  If no match is found,
  *                    return what would be key's successor/predecessor, were
  *                    key in tree.
@@ -312,6 +320,20 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,          \
  *         arg  : Opaque pointer passed to cb().
  *       Ret: NULL if iteration completed, or the non-NULL callback return value
  *            that caused termination of the iteration.
+ *
+ *   static void
+ *   ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
+ *       Description: Iterate over the tree with post-order traversal, remove
+ *                    each node, and run the callback if non-null.  This is
+ *                    used for destroying a tree without paying the cost to
+ *                    rebalance it.  The tree must not be otherwise altered
+ *                    during traversal.
+ *       Args:
+ *         tree: Pointer to an initialized red-black tree object.
+ *         cb  : Callback function, which, if non-null, is called for each node
+ *               during iteration.  There is no way to stop iteration once it
+ *               has begun.
+ *         arg : Opaque pointer passed to cb().
  */
 #define        rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp)    \
 a_attr void                                                            \
@@ -320,36 +342,30 @@ a_prefix##new(a_rbt_type *rbtree) {                                       \
 }                                                                      \
 a_attr bool                                                            \
 a_prefix##empty(a_rbt_type *rbtree) {                                  \
-    return (rbtree->rbt_root == &rbtree->rbt_nil);                     \
+    return (rbtree->rbt_root == NULL);                                 \
 }                                                                      \
 a_attr a_type *                                                                \
 a_prefix##first(a_rbt_type *rbtree) {                                  \
     a_type *ret;                                                       \
     rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret);                \
-    if (ret == &rbtree->rbt_nil) {                                     \
-       ret = NULL;                                                     \
-    }                                                                  \
     return (ret);                                                      \
 }                                                                      \
 a_attr a_type *                                                                \
 a_prefix##last(a_rbt_type *rbtree) {                                   \
     a_type *ret;                                                       \
     rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret);         \
-    if (ret == &rbtree->rbt_nil) {                                     \
-       ret = NULL;                                                     \
-    }                                                                  \
     return (ret);                                                      \
 }                                                                      \
 a_attr a_type *                                                                \
 a_prefix##next(a_rbt_type *rbtree, a_type *node) {                     \
     a_type *ret;                                                       \
-    if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) {   \
+    if (rbtn_right_get(a_type, a_field, node) != NULL) {               \
        rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type,      \
          a_field, node), ret);                                         \
     } else {                                                           \
        a_type *tnode = rbtree->rbt_root;                               \
-       assert(tnode != &rbtree->rbt_nil);                              \
-       ret = &rbtree->rbt_nil;                                         \
+       assert(tnode != NULL);                                          \
+       ret = NULL;                                                     \
        while (true) {                                                  \
            int cmp = (a_cmp)(node, tnode);                             \
            if (cmp < 0) {                                              \
@@ -360,24 +376,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) {                        \
            } else {                                                    \
                break;                                                  \
            }                                                           \
-           assert(tnode != &rbtree->rbt_nil);                          \
+           assert(tnode != NULL);                                      \
        }                                                               \
     }                                                                  \
-    if (ret == &rbtree->rbt_nil) {                                     \
-       ret = (NULL);                                                   \
-    }                                                                  \
     return (ret);                                                      \
 }                                                                      \
 a_attr a_type *                                                                \
 a_prefix##prev(a_rbt_type *rbtree, a_type *node) {                     \
     a_type *ret;                                                       \
-    if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) {    \
+    if (rbtn_left_get(a_type, a_field, node) != NULL) {                        \
        rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type,        \
          a_field, node), ret);                                         \
     } else {                                                           \
        a_type *tnode = rbtree->rbt_root;                               \
-       assert(tnode != &rbtree->rbt_nil);                              \
-       ret = &rbtree->rbt_nil;                                         \
+       assert(tnode != NULL);                                          \
+       ret = NULL;                                                     \
        while (true) {                                                  \
            int cmp = (a_cmp)(node, tnode);                             \
            if (cmp < 0) {                                              \
@@ -388,20 +401,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) {                        \
            } else {                                                    \
                break;                                                  \
            }                                                           \
-           assert(tnode != &rbtree->rbt_nil);                          \
+           assert(tnode != NULL);                                      \
        }                                                               \
     }                                                                  \
-    if (ret == &rbtree->rbt_nil) {                                     \
-       ret = (NULL);                                                   \
-    }                                                                  \
     return (ret);                                                      \
 }                                                                      \
 a_attr a_type *                                                                \
-a_prefix##search(a_rbt_type *rbtree, a_type *key) {                    \
+a_prefix##search(a_rbt_type *rbtree, const a_type *key) {              \
     a_type *ret;                                                       \
     int cmp;                                                           \
     ret = rbtree->rbt_root;                                            \
-    while (ret != &rbtree->rbt_nil                                     \
+    while (ret != NULL                                                 \
       && (cmp = (a_cmp)(key, ret)) != 0) {                             \
        if (cmp < 0) {                                                  \
            ret = rbtn_left_get(a_type, a_field, ret);                  \
@@ -409,17 +419,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) {                       \
            ret = rbtn_right_get(a_type, a_field, ret);                 \
        }                                                               \
     }                                                                  \
-    if (ret == &rbtree->rbt_nil) {                                     \
-       ret = (NULL);                                                   \
-    }                                                                  \
     return (ret);                                                      \
 }                                                                      \
 a_attr a_type *                                                                \
-a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) {                   \
+a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) {             \
     a_type *ret;                                                       \
     a_type *tnode = rbtree->rbt_root;                                  \
-    ret = &rbtree->rbt_nil;                                            \
-    while (tnode != &rbtree->rbt_nil) {                                        \
+    ret = NULL;                                                                \
+    while (tnode != NULL) {                                            \
        int cmp = (a_cmp)(key, tnode);                                  \
        if (cmp < 0) {                                                  \
            ret = tnode;                                                \
@@ -431,17 +438,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) {                      \
            break;                                                      \
        }                                                               \
     }                                                                  \
-    if (ret == &rbtree->rbt_nil) {                                     \
-       ret = (NULL);                                                   \
-    }                                                                  \
     return (ret);                                                      \
 }                                                                      \
 a_attr a_type *                                                                \
-a_prefix##psearch(a_rbt_type *rbtree, a_type *key) {                   \
+a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) {             \
     a_type *ret;                                                       \
     a_type *tnode = rbtree->rbt_root;                                  \
-    ret = &rbtree->rbt_nil;                                            \
-    while (tnode != &rbtree->rbt_nil) {                                        \
+    ret = NULL;                                                                \
+    while (tnode != NULL) {                                            \
        int cmp = (a_cmp)(key, tnode);                                  \
        if (cmp < 0) {                                                  \
            tnode = rbtn_left_get(a_type, a_field, tnode);              \
@@ -453,9 +457,6 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) {                        \
            break;                                                      \
        }                                                               \
     }                                                                  \
-    if (ret == &rbtree->rbt_nil) {                                     \
-       ret = (NULL);                                                   \
-    }                                                                  \
     return (ret);                                                      \
 }                                                                      \
 a_attr void                                                            \
@@ -467,7 +468,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) {                        \
     rbt_node_new(a_type, a_field, rbtree, node);                       \
     /* Wind. */                                                                \
     path->node = rbtree->rbt_root;                                     \
-    for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) {     \
+    for (pathp = path; pathp->node != NULL; pathp++) {                 \
        int cmp = pathp->cmp = a_cmp(node, pathp->node);                \
        assert(cmp != 0);                                               \
        if (cmp < 0) {                                                  \
@@ -487,7 +488,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) {                        \
            rbtn_left_set(a_type, a_field, cnode, left);                \
            if (rbtn_red_get(a_type, a_field, left)) {                  \
                a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
-               if (rbtn_red_get(a_type, a_field, leftleft)) {          \
+               if (leftleft != NULL && rbtn_red_get(a_type, a_field,   \
+                 leftleft)) {                                          \
                    /* Fix up 4-node. */                                \
                    a_type *tnode;                                      \
                    rbtn_black_set(a_type, a_field, leftleft);          \
@@ -502,7 +504,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) {                        \
            rbtn_right_set(a_type, a_field, cnode, right);              \
            if (rbtn_red_get(a_type, a_field, right)) {                 \
                a_type *left = rbtn_left_get(a_type, a_field, cnode);   \
-               if (rbtn_red_get(a_type, a_field, left)) {              \
+               if (left != NULL && rbtn_red_get(a_type, a_field,       \
+                 left)) {                                              \
                    /* Split 4-node. */                                 \
                    rbtn_black_set(a_type, a_field, left);              \
                    rbtn_black_set(a_type, a_field, right);             \
@@ -535,7 +538,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                        \
     /* Wind. */                                                                \
     nodep = NULL; /* Silence compiler warning. */                      \
     path->node = rbtree->rbt_root;                                     \
-    for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) {     \
+    for (pathp = path; pathp->node != NULL; pathp++) {                 \
        int cmp = pathp->cmp = a_cmp(node, pathp->node);                \
        if (cmp < 0) {                                                  \
            pathp[1].node = rbtn_left_get(a_type, a_field,              \
@@ -547,7 +550,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                        \
                /* Find node's successor, in preparation for swap. */   \
                pathp->cmp = 1;                                         \
                nodep = pathp;                                          \
-               for (pathp++; pathp->node != &rbtree->rbt_nil;          \
+               for (pathp++; pathp->node != NULL;                      \
                  pathp++) {                                            \
                    pathp->cmp = -1;                                    \
                    pathp[1].node = rbtn_left_get(a_type, a_field,      \
@@ -590,7 +593,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                        \
        }                                                               \
     } else {                                                           \
        a_type *left = rbtn_left_get(a_type, a_field, node);            \
-       if (left != &rbtree->rbt_nil) {                                 \
+       if (left != NULL) {                                             \
            /* node has no successor, but it has a left child.        */\
            /* Splice node out, without losing the left child.        */\
            assert(!rbtn_red_get(a_type, a_field, node));               \
@@ -610,33 +613,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                      \
            return;                                                     \
        } else if (pathp == path) {                                     \
            /* The tree only contained one node. */                     \
-           rbtree->rbt_root = &rbtree->rbt_nil;                        \
+           rbtree->rbt_root = NULL;                                    \
            return;                                                     \
        }                                                               \
     }                                                                  \
     if (rbtn_red_get(a_type, a_field, pathp->node)) {                  \
        /* Prune red node, which requires no fixup. */                  \
        assert(pathp[-1].cmp < 0);                                      \
-       rbtn_left_set(a_type, a_field, pathp[-1].node,                  \
-         &rbtree->rbt_nil);                                            \
+       rbtn_left_set(a_type, a_field, pathp[-1].node, NULL);           \
        return;                                                         \
     }                                                                  \
     /* The node to be pruned is black, so unwind until balance is     */\
     /* restored.                                                      */\
-    pathp->node = &rbtree->rbt_nil;                                    \
+    pathp->node = NULL;                                                        \
     for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) {      \
        assert(pathp->cmp != 0);                                        \
        if (pathp->cmp < 0) {                                           \
            rbtn_left_set(a_type, a_field, pathp->node,                 \
              pathp[1].node);                                           \
-           assert(!rbtn_red_get(a_type, a_field, pathp[1].node));      \
            if (rbtn_red_get(a_type, a_field, pathp->node)) {           \
                a_type *right = rbtn_right_get(a_type, a_field,         \
                  pathp->node);                                         \
                a_type *rightleft = rbtn_left_get(a_type, a_field,      \
                  right);                                               \
                a_type *tnode;                                          \
-               if (rbtn_red_get(a_type, a_field, rightleft)) {         \
+               if (rightleft != NULL && rbtn_red_get(a_type, a_field,  \
+                 rightleft)) {                                         \
                    /* In the following diagrams, ||, //, and \\      */\
                    /* indicate the path to the removed node.         */\
                    /*                                                */\
@@ -679,7 +681,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                        \
                  pathp->node);                                         \
                a_type *rightleft = rbtn_left_get(a_type, a_field,      \
                  right);                                               \
-               if (rbtn_red_get(a_type, a_field, rightleft)) {         \
+               if (rightleft != NULL && rbtn_red_get(a_type, a_field,  \
+                 rightleft)) {                                         \
                    /*      ||                                        */\
                    /*    pathp(b)                                    */\
                    /*  //        \                                   */\
@@ -693,7 +696,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                        \
                    rbtn_rotate_left(a_type, a_field, pathp->node,      \
                      tnode);                                           \
                    /* Balance restored, but rotation modified        */\
-                   /* subree root, which may actually be the tree    */\
+                   /* subtree root, which may actually be the tree   */\
                    /* root.                                          */\
                    if (pathp == path) {                                \
                        /* Set root. */                                 \
@@ -733,7 +736,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                        \
                  left);                                                \
                a_type *leftrightleft = rbtn_left_get(a_type, a_field,  \
                  leftright);                                           \
-               if (rbtn_red_get(a_type, a_field, leftrightleft)) {     \
+               if (leftrightleft != NULL && rbtn_red_get(a_type,       \
+                 a_field, leftrightleft)) {                            \
                    /*      ||                                        */\
                    /*    pathp(b)                                    */\
                    /*   /        \\                                  */\
@@ -759,7 +763,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                        \
                    /*   (b)                                          */\
                    /*   /                                            */\
                    /* (b)                                            */\
-                   assert(leftright != &rbtree->rbt_nil);              \
+                   assert(leftright != NULL);                          \
                    rbtn_red_set(a_type, a_field, leftright);           \
                    rbtn_rotate_right(a_type, a_field, pathp->node,     \
                      tnode);                                           \
@@ -782,7 +786,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                        \
                return;                                                 \
            } else if (rbtn_red_get(a_type, a_field, pathp->node)) {    \
                a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
-               if (rbtn_red_get(a_type, a_field, leftleft)) {          \
+               if (leftleft != NULL && rbtn_red_get(a_type, a_field,   \
+                 leftleft)) {                                          \
                    /*        ||                                      */\
                    /*      pathp(r)                                  */\
                    /*     /        \\                                */\
@@ -820,7 +825,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                        \
                }                                                       \
            } else {                                                    \
                a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
-               if (rbtn_red_get(a_type, a_field, leftleft)) {          \
+               if (leftleft != NULL && rbtn_red_get(a_type, a_field,   \
+                 leftleft)) {                                          \
                    /*               ||                               */\
                    /*             pathp(b)                           */\
                    /*            /        \\                         */\
@@ -866,13 +872,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) {                      \
 a_attr a_type *                                                                \
 a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node,               \
   a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {          \
-    if (node == &rbtree->rbt_nil) {                                    \
-       return (&rbtree->rbt_nil);                                      \
+    if (node == NULL) {                                                        \
+       return (NULL);                                                  \
     } else {                                                           \
        a_type *ret;                                                    \
        if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
-         a_field, node), cb, arg)) != &rbtree->rbt_nil                 \
-         || (ret = cb(rbtree, node, arg)) != NULL) {                   \
+         a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node,  \
+         arg)) != NULL) {                                              \
            return (ret);                                               \
        }                                                               \
        return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,   \
@@ -886,8 +892,8 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node,       \
     if (cmp < 0) {                                                     \
        a_type *ret;                                                    \
        if ((ret = a_prefix##iter_start(rbtree, start,                  \
-         rbtn_left_get(a_type, a_field, node), cb, arg)) !=            \
-         &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) {  \
+         rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL ||    \
+         (ret = cb(rbtree, node, arg)) != NULL) {                      \
            return (ret);                                               \
        }                                                               \
        return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,   \
@@ -914,21 +920,18 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)(  \
     } else {                                                           \
        ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
     }                                                                  \
-    if (ret == &rbtree->rbt_nil) {                                     \
-       ret = NULL;                                                     \
-    }                                                                  \
     return (ret);                                                      \
 }                                                                      \
 a_attr a_type *                                                                \
 a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node,       \
   a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {          \
-    if (node == &rbtree->rbt_nil) {                                    \
-       return (&rbtree->rbt_nil);                                      \
+    if (node == NULL) {                                                        \
+       return (NULL);                                                  \
     } else {                                                           \
        a_type *ret;                                                    \
        if ((ret = a_prefix##reverse_iter_recurse(rbtree,               \
-         rbtn_right_get(a_type, a_field, node), cb, arg)) !=           \
-         &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) {  \
+         rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL ||   \
+         (ret = cb(rbtree, node, arg)) != NULL) {                      \
            return (ret);                                               \
        }                                                               \
        return (a_prefix##reverse_iter_recurse(rbtree,                  \
@@ -943,8 +946,8 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start,             \
     if (cmp > 0) {                                                     \
        a_type *ret;                                                    \
        if ((ret = a_prefix##reverse_iter_start(rbtree, start,          \
-         rbtn_right_get(a_type, a_field, node), cb, arg)) !=           \
-         &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) {  \
+         rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL ||   \
+         (ret = cb(rbtree, node, arg)) != NULL) {                      \
            return (ret);                                               \
        }                                                               \
        return (a_prefix##reverse_iter_recurse(rbtree,                  \
@@ -972,10 +975,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,         \
        ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root,  \
          cb, arg);                                                     \
     }                                                                  \
-    if (ret == &rbtree->rbt_nil) {                                     \
-       ret = NULL;                                                     \
-    }                                                                  \
     return (ret);                                                      \
+}                                                                      \
+a_attr void                                                            \
+a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)(        \
+  a_type *, void *), void *arg) {                                      \
+    if (node == NULL) {                                                        \
+       return;                                                         \
+    }                                                                  \
+    a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field,   \
+      node), cb, arg);                                                 \
+    rbtn_left_set(a_type, a_field, (node), NULL);                      \
+    a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field,  \
+      node), cb, arg);                                                 \
+    rbtn_right_set(a_type, a_field, (node), NULL);                     \
+    if (cb) {                                                          \
+       cb(node, arg);                                                  \
+    }                                                                  \
+}                                                                      \
+a_attr void                                                            \
+a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *),    \
+  void *arg) {                                                         \
+    a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg);      \
+    rbtree->rbt_root = NULL;                                           \
 }
 
 #endif /* RB_H_ */
index bc74769f50ed68588850c36366b9ec7df9896f8d..28ae9d1dd2d1a51906614eeab1533e121c17f303 100644 (file)
 /*
  * This radix tree implementation is tailored to the singular purpose of
- * tracking which chunks are currently owned by jemalloc.  This functionality
- * is mandatory for OS X, where jemalloc must be able to respond to object
- * ownership queries.
+ * associating metadata with chunks that are currently owned by jemalloc.
  *
  *******************************************************************************
  */
 #ifdef JEMALLOC_H_TYPES
 
+typedef struct rtree_node_elm_s rtree_node_elm_t;
+typedef struct rtree_level_s rtree_level_t;
 typedef struct rtree_s rtree_t;
 
 /*
- * Size of each radix tree node (must be a power of 2).  This impacts tree
- * depth.
+ * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
+ * machine address width.
  */
-#define        RTREE_NODESIZE (1U << 16)
+#define        LG_RTREE_BITS_PER_LEVEL 4
+#define        RTREE_BITS_PER_LEVEL    (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
+#define        RTREE_HEIGHT_MAX                                                \
+    ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
 
-typedef void *(rtree_alloc_t)(size_t);
-typedef void (rtree_dalloc_t)(void *);
+/* Used for two-stage lock-free node initialization. */
+#define        RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
+
+/*
+ * The node allocation callback function's argument is the number of contiguous
+ * rtree_node_elm_t structures to allocate, and the resulting memory must be
+ * zeroed.
+ */
+typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
+typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
 
 #endif /* JEMALLOC_H_TYPES */
 /******************************************************************************/
 #ifdef JEMALLOC_H_STRUCTS
 
+struct rtree_node_elm_s {
+       union {
+               void                    *pun;
+               rtree_node_elm_t        *child;
+               extent_node_t           *val;
+       };
+};
+
+struct rtree_level_s {
+       /*
+        * A non-NULL subtree points to a subtree rooted along the hypothetical
+        * path to the leaf node corresponding to key 0.  Depending on what keys
+        * have been used to store to the tree, an arbitrary combination of
+        * subtree pointers may remain NULL.
+        *
+        * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
+        * This results in a 3-level tree, and the leftmost leaf can be directly
+        * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
+        * 0x00000000) can be accessed via subtrees[1], and the remainder of the
+        * tree can be accessed via subtrees[0].
+        *
+        *   levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
+        *
+        *   levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
+        *
+        *   levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
+        *
+        * This has practical implications on x64, which currently uses only the
+        * lower 47 bits of virtual address space in userland, thus leaving
+        * subtrees[0] unused and avoiding a level of tree traversal.
+        */
+       union {
+               void                    *subtree_pun;
+               rtree_node_elm_t        *subtree;
+       };
+       /* Number of key bits distinguished by this level. */
+       unsigned                bits;
+       /*
+        * Cumulative number of key bits distinguished by traversing to
+        * corresponding tree level.
+        */
+       unsigned                cumbits;
+};
+
 struct rtree_s {
-       rtree_alloc_t   *alloc;
-       rtree_dalloc_t  *dalloc;
-       malloc_mutex_t  mutex;
-       void            **root;
-       unsigned        height;
-       unsigned        level2bits[1]; /* Dynamically sized. */
+       rtree_node_alloc_t      *alloc;
+       rtree_node_dalloc_t     *dalloc;
+       unsigned                height;
+       /*
+        * Precomputed table used to convert from the number of leading 0 key
+        * bits to which subtree level to start at.
+        */
+       unsigned                start_level[RTREE_HEIGHT_MAX];
+       rtree_level_t           levels[RTREE_HEIGHT_MAX];
 };
 
 #endif /* JEMALLOC_H_STRUCTS */
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-rtree_t        *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
+bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
+    rtree_node_dalloc_t *dalloc);
 void   rtree_delete(rtree_t *rtree);
-void   rtree_prefork(rtree_t *rtree);
-void   rtree_postfork_parent(rtree_t *rtree);
-void   rtree_postfork_child(rtree_t *rtree);
+rtree_node_elm_t       *rtree_subtree_read_hard(rtree_t *rtree,
+    unsigned level);
+rtree_node_elm_t       *rtree_child_read_hard(rtree_t *rtree,
+    rtree_node_elm_t *elm, unsigned level);
 
 #endif /* JEMALLOC_H_EXTERNS */
 /******************************************************************************/
 #ifdef JEMALLOC_H_INLINES
 
 #ifndef JEMALLOC_ENABLE_INLINE
-#ifdef JEMALLOC_DEBUG
-uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
-#endif
-uint8_t        rtree_get(rtree_t *rtree, uintptr_t key);
-bool   rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
+unsigned       rtree_start_level(rtree_t *rtree, uintptr_t key);
+uintptr_t      rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
+
+bool   rtree_node_valid(rtree_node_elm_t *node);
+rtree_node_elm_t       *rtree_child_tryread(rtree_node_elm_t *elm);
+rtree_node_elm_t       *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
+    unsigned level);
+extent_node_t  *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
+    bool dependent);
+void   rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
+    const extent_node_t *val);
+rtree_node_elm_t       *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
+rtree_node_elm_t       *rtree_subtree_read(rtree_t *rtree, unsigned level);
+
+extent_node_t  *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
+bool   rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
-#define        RTREE_GET_GENERATE(f)                                           \
-/* The least significant bits of the key are ignored. */               \
-JEMALLOC_INLINE uint8_t                                                        \
-f(rtree_t *rtree, uintptr_t key)                                       \
-{                                                                      \
-       uint8_t ret;                                                    \
-       uintptr_t subkey;                                               \
-       unsigned i, lshift, height, bits;                               \
-       void **node, **child;                                           \
-                                                                       \
-       RTREE_LOCK(&rtree->mutex);                                      \
-       for (i = lshift = 0, height = rtree->height, node = rtree->root;\
-           i < height - 1;                                             \
-           i++, lshift += bits, node = child) {                        \
-               bits = rtree->level2bits[i];                            \
-               subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
-                   3)) - bits);                                        \
-               child = (void**)node[subkey];                           \
-               if (child == NULL) {                                    \
-                       RTREE_UNLOCK(&rtree->mutex);                    \
-                       return (0);                                     \
-               }                                                       \
-       }                                                               \
-                                                                       \
-       /*                                                              \
-        * node is a leaf, so it contains values rather than node       \
-        * pointers.                                                    \
-        */                                                             \
-       bits = rtree->level2bits[i];                                    \
-       subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -     \
-           bits);                                                      \
-       {                                                               \
-               uint8_t *leaf = (uint8_t *)node;                        \
-               ret = leaf[subkey];                                     \
-       }                                                               \
-       RTREE_UNLOCK(&rtree->mutex);                                    \
-                                                                       \
-       RTREE_GET_VALIDATE                                              \
-       return (ret);                                                   \
+JEMALLOC_INLINE unsigned
+rtree_start_level(rtree_t *rtree, uintptr_t key)
+{
+       unsigned start_level;
+
+       if (unlikely(key == 0))
+               return (rtree->height - 1);
+
+       start_level = rtree->start_level[lg_floor(key) >>
+           LG_RTREE_BITS_PER_LEVEL];
+       assert(start_level < rtree->height);
+       return (start_level);
 }
 
-#ifdef JEMALLOC_DEBUG
-#  define RTREE_LOCK(l)                malloc_mutex_lock(l)
-#  define RTREE_UNLOCK(l)      malloc_mutex_unlock(l)
-#  define RTREE_GET_VALIDATE
-RTREE_GET_GENERATE(rtree_get_locked)
-#  undef RTREE_LOCK
-#  undef RTREE_UNLOCK
-#  undef RTREE_GET_VALIDATE
-#endif
+JEMALLOC_INLINE uintptr_t
+rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
+{
 
-#define        RTREE_LOCK(l)
-#define        RTREE_UNLOCK(l)
-#ifdef JEMALLOC_DEBUG
-   /*
-    * Suppose that it were possible for a jemalloc-allocated chunk to be
-    * munmap()ped, followed by a different allocator in another thread re-using
-    * overlapping virtual memory, all without invalidating the cached rtree
-    * value.  The result would be a false positive (the rtree would claim that
-    * jemalloc owns memory that it had actually discarded).  This scenario
-    * seems impossible, but the following assertion is a prudent sanity check.
-    */
-#  define RTREE_GET_VALIDATE                                           \
-       assert(rtree_get_locked(rtree, key) == ret);
-#else
-#  define RTREE_GET_VALIDATE
-#endif
-RTREE_GET_GENERATE(rtree_get)
-#undef RTREE_LOCK
-#undef RTREE_UNLOCK
-#undef RTREE_GET_VALIDATE
+       return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
+           rtree->levels[level].cumbits)) & ((ZU(1) <<
+           rtree->levels[level].bits) - 1));
+}
 
 JEMALLOC_INLINE bool
-rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
+rtree_node_valid(rtree_node_elm_t *node)
+{
+
+       return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
+}
+
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_child_tryread(rtree_node_elm_t *elm)
+{
+       rtree_node_elm_t *child;
+
+       /* Double-checked read (first read may be stale. */
+       child = elm->child;
+       if (!rtree_node_valid(child))
+               child = atomic_read_p(&elm->pun);
+       return (child);
+}
+
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
+{
+       rtree_node_elm_t *child;
+
+       child = rtree_child_tryread(elm);
+       if (unlikely(!rtree_node_valid(child)))
+               child = rtree_child_read_hard(rtree, elm, level);
+       return (child);
+}
+
+JEMALLOC_INLINE extent_node_t *
+rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
+{
+
+       if (dependent) {
+               /*
+                * Reading a val on behalf of a pointer to a valid allocation is
+                * guaranteed to be a clean read even without synchronization,
+                * because the rtree update became visible in memory before the
+                * pointer came into existence.
+                */
+               return (elm->val);
+       } else {
+               /*
+                * An arbitrary read, e.g. on behalf of ivsalloc(), may not be
+                * dependent on a previous rtree write, which means a stale read
+                * could result if synchronization were omitted here.
+                */
+               return (atomic_read_p(&elm->pun));
+       }
+}
+
+JEMALLOC_INLINE void
+rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
+{
+
+       atomic_write_p(&elm->pun, val);
+}
+
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_subtree_tryread(rtree_t *rtree, unsigned level)
+{
+       rtree_node_elm_t *subtree;
+
+       /* Double-checked read (first read may be stale. */
+       subtree = rtree->levels[level].subtree;
+       if (!rtree_node_valid(subtree))
+               subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
+       return (subtree);
+}
+
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_subtree_read(rtree_t *rtree, unsigned level)
+{
+       rtree_node_elm_t *subtree;
+
+       subtree = rtree_subtree_tryread(rtree, level);
+       if (unlikely(!rtree_node_valid(subtree)))
+               subtree = rtree_subtree_read_hard(rtree, level);
+       return (subtree);
+}
+
+JEMALLOC_INLINE extent_node_t *
+rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
 {
        uintptr_t subkey;
-       unsigned i, lshift, height, bits;
-       void **node, **child;
-
-       malloc_mutex_lock(&rtree->mutex);
-       for (i = lshift = 0, height = rtree->height, node = rtree->root;
-           i < height - 1;
-           i++, lshift += bits, node = child) {
-               bits = rtree->level2bits[i];
-               subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
-                   bits);
-               child = (void**)node[subkey];
-               if (child == NULL) {
-                       size_t size = ((i + 1 < height - 1) ? sizeof(void *)
-                           : (sizeof(uint8_t))) << rtree->level2bits[i+1];
-                       child = (void**)rtree->alloc(size);
-                       if (child == NULL) {
-                               malloc_mutex_unlock(&rtree->mutex);
-                               return (true);
-                       }
-                       memset(child, 0, size);
-                       node[subkey] = child;
+       unsigned i, start_level;
+       rtree_node_elm_t *node, *child;
+
+       start_level = rtree_start_level(rtree, key);
+
+       for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
+           /**/; i++, node = child) {
+               if (!dependent && unlikely(!rtree_node_valid(node)))
+                       return (NULL);
+               subkey = rtree_subkey(rtree, key, i);
+               if (i == rtree->height - 1) {
+                       /*
+                        * node is a leaf, so it contains values rather than
+                        * child pointers.
+                        */
+                       return (rtree_val_read(rtree, &node[subkey],
+                           dependent));
                }
+               assert(i < rtree->height - 1);
+               child = rtree_child_tryread(&node[subkey]);
        }
+       not_reached();
+}
 
-       /* node is a leaf, so it contains values rather than node pointers. */
-       bits = rtree->level2bits[i];
-       subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
-       {
-               uint8_t *leaf = (uint8_t *)node;
-               leaf[subkey] = val;
-       }
-       malloc_mutex_unlock(&rtree->mutex);
+JEMALLOC_INLINE bool
+rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
+{
+       uintptr_t subkey;
+       unsigned i, start_level;
+       rtree_node_elm_t *node, *child;
 
-       return (false);
+       start_level = rtree_start_level(rtree, key);
+
+       node = rtree_subtree_read(rtree, start_level);
+       if (node == NULL)
+               return (true);
+       for (i = start_level; /**/; i++, node = child) {
+               subkey = rtree_subkey(rtree, key, i);
+               if (i == rtree->height - 1) {
+                       /*
+                        * node is a leaf, so it contains values rather than
+                        * child pointers.
+                        */
+                       rtree_val_write(rtree, &node[subkey], val);
+                       return (false);
+               }
+               assert(i + 1 < rtree->height);
+               child = rtree_child_read(rtree, &node[subkey], i);
+               if (child == NULL)
+                       return (true);
+       }
+       not_reached();
 }
 #endif
 
index 0cfac72dede2a1ea8b6e261e68f1102d7778f7a8..2b0ca29ac5debcd76e79ca2b528acdf59fa5121f 100755 (executable)
@@ -1,4 +1,6 @@
 #!/bin/sh
+#
+# Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g>
 
 # The following limits are chosen such that they cover all supported platforms.
 
@@ -6,19 +8,19 @@
 lg_zarr="2 3"
 
 # Quanta.
-lg_qarr="3 4"
+lg_qarr=$1
 
 # The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
-lg_tmin=3
+lg_tmin=$2
 
 # Maximum lookup size.
 lg_kmax=12
 
 # Page sizes.
-lg_parr="12 13 16"
+lg_parr=`echo $3 | tr ',' ' '`
 
 # Size class group size (number of size classes for each size doubling).
-lg_g=2
+lg_g=$4
 
 pow2() {
   e=$1
@@ -61,7 +63,7 @@ size_class() {
     rem="yes"
   fi
 
-  if [ ${lg_size} -lt ${lg_p} ] ; then
+  if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
     bin="yes"
   else
     bin="no"
@@ -140,10 +142,10 @@ size_classes() {
 
   # All remaining groups.
   lg_grp=$((${lg_grp} + ${lg_g}))
-  while [ ${lg_grp} -lt ${ptr_bits} ] ; do
+  while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do
     sep_line
     ndelta=1
-    if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then
+    if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then
       ndelta_limit=$((${g} - 1))
     else
       ndelta_limit=${g}
@@ -159,7 +161,14 @@ size_classes() {
         nbins=$((${index} + 1))
         # Final written value is correct:
         small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
+        if [ ${lg_g} -gt 0 ] ; then
+          lg_large_minclass=$((${lg_grp} + 1))
+        else
+          lg_large_minclass=$((${lg_grp} + 2))
+        fi
       fi
+      # Final written value is correct:
+      huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
       index=$((${index} + 1))
       ndelta=$((${ndelta} + 1))
     done
@@ -167,14 +176,18 @@ size_classes() {
     lg_delta=$((${lg_delta} + 1))
   done
   echo
+  nsizes=${index}
 
   # Defined upon completion:
   # - ntbins
   # - nlbins
   # - nbins
+  # - nsizes
   # - lg_tiny_maxclass
   # - lookup_maxclass
   # - small_maxclass
+  # - lg_large_minclass
+  # - huge_maxclass
 }
 
 cat <<EOF
@@ -188,7 +201,8 @@ cat <<EOF
  *
  *   LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
  *   SIZE_CLASSES: Complete table of
- *                 SC(index, lg_delta, size, bin, lg_delta_lookup) tuples.
+ *                 SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)
+ *                 tuples.
  *     index: Size class index.
  *     lg_grp: Lg group base size (no deltas added).
  *     lg_delta: Lg delta to previous size class.
@@ -199,10 +213,12 @@ cat <<EOF
  *   NTBINS: Number of tiny bins.
  *   NLBINS: Number of bins supported by the lookup table.
  *   NBINS: Number of small size class bins.
+ *   NSIZES: Number of size classes.
  *   LG_TINY_MAXCLASS: Lg of maximum tiny size class.
  *   LOOKUP_MAXCLASS: Maximum size class included in lookup table.
  *   SMALL_MAXCLASS: Maximum small size class.
- *   LARGE_MINCLASS: Minimum large size class.
+ *   LG_LARGE_MINCLASS: Lg of minimum large size class.
+ *   HUGE_MAXCLASS: Maximum (huge) size class.
  */
 
 #define        LG_SIZE_CLASS_GROUP     ${lg_g}
@@ -221,9 +237,12 @@ for lg_z in ${lg_zarr} ; do
         echo "#define  NTBINS                  ${ntbins}"
         echo "#define  NLBINS                  ${nlbins}"
         echo "#define  NBINS                   ${nbins}"
+        echo "#define  NSIZES                  ${nsizes}"
         echo "#define  LG_TINY_MAXCLASS        ${lg_tiny_maxclass}"
         echo "#define  LOOKUP_MAXCLASS         ${lookup_maxclass}"
         echo "#define  SMALL_MAXCLASS          ${small_maxclass}"
+        echo "#define  LG_LARGE_MINCLASS       ${lg_large_minclass}"
+        echo "#define  HUGE_MAXCLASS           ${huge_maxclass}"
         echo "#endif"
         echo
       done
@@ -238,7 +257,7 @@ cat <<EOF
 #endif
 #undef SIZE_CLASSES_DEFINED
 /*
- * The small_size2bin lookup table uses uint8_t to encode each bin index, so we
+ * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
  * cannot support more than 256 small size classes.  Further constrain NBINS to
  * 255 since all small size classes, plus a "not small" size class must be
  * stored in 8 bits of arena_chunk_map_bits_t's bits field.
@@ -247,8 +266,6 @@ cat <<EOF
 #  error "Too many small size classes"
 #endif
 
-#define        LARGE_MINCLASS (PAGE_CEILING(SMALL_MAXCLASS+1))
-
 #endif /* JEMALLOC_H_TYPES */
 /******************************************************************************/
 #ifdef JEMALLOC_H_STRUCTS
diff --git a/src/jemalloc/include/jemalloc/internal/smoothstep.h b/src/jemalloc/include/jemalloc/internal/smoothstep.h
new file mode 100644 (file)
index 0000000..c5333cc
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * This file was generated by the following command:
+ *   sh smoothstep.sh smoother 200 24 3 15
+ */
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * This header defines a precomputed table based on the smoothstep family of
+ * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
+ * to 1 in 0 <= x <= 1.  The table is stored as integer fixed point values so
+ * that floating point math can be avoided.
+ *
+ *                      3     2
+ *   smoothstep(x) = -2x  + 3x
+ *
+ *                       5      4      3
+ *   smootherstep(x) = 6x  - 15x  + 10x
+ *
+ *                          7      6      5      4
+ *   smootheststep(x) = -20x  + 70x  - 84x  + 35x
+ */
+
+#define        SMOOTHSTEP_VARIANT      "smoother"
+#define        SMOOTHSTEP_NSTEPS       200
+#define        SMOOTHSTEP_BFP          24
+#define        SMOOTHSTEP \
+ /* STEP(step, h,                            x,     y) */ \
+    STEP(   1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
+    STEP(   2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
+    STEP(   3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
+    STEP(   4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
+    STEP(   5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
+    STEP(   6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
+    STEP(   7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
+    STEP(   8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
+    STEP(   9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
+    STEP(  10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
+    STEP(  11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
+    STEP(  12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
+    STEP(  13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
+    STEP(  14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
+    STEP(  15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
+    STEP(  16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
+    STEP(  17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
+    STEP(  18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
+    STEP(  19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
+    STEP(  20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
+    STEP(  21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
+    STEP(  22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
+    STEP(  23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
+    STEP(  24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
+    STEP(  25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
+    STEP(  26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
+    STEP(  27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
+    STEP(  28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
+    STEP(  29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
+    STEP(  30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
+    STEP(  31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
+    STEP(  32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
+    STEP(  33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
+    STEP(  34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
+    STEP(  35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
+    STEP(  36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
+    STEP(  37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
+    STEP(  38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
+    STEP(  39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
+    STEP(  40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
+    STEP(  41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
+    STEP(  42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
+    STEP(  43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
+    STEP(  44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
+    STEP(  45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
+    STEP(  46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
+    STEP(  47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
+    STEP(  48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
+    STEP(  49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
+    STEP(  50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
+    STEP(  51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
+    STEP(  52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
+    STEP(  53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
+    STEP(  54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
+    STEP(  55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
+    STEP(  56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
+    STEP(  57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
+    STEP(  58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
+    STEP(  59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
+    STEP(  60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
+    STEP(  61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
+    STEP(  62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
+    STEP(  63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
+    STEP(  64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
+    STEP(  65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
+    STEP(  66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
+    STEP(  67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
+    STEP(  68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
+    STEP(  69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
+    STEP(  70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
+    STEP(  71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
+    STEP(  72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
+    STEP(  73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
+    STEP(  74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
+    STEP(  75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
+    STEP(  76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
+    STEP(  77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
+    STEP(  78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
+    STEP(  79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
+    STEP(  80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
+    STEP(  81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
+    STEP(  82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
+    STEP(  83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
+    STEP(  84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
+    STEP(  85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
+    STEP(  86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
+    STEP(  87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
+    STEP(  88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
+    STEP(  89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
+    STEP(  90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
+    STEP(  91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
+    STEP(  92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
+    STEP(  93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
+    STEP(  94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
+    STEP(  95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
+    STEP(  96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
+    STEP(  97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
+    STEP(  98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
+    STEP(  99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
+    STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
+    STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
+    STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
+    STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
+    STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
+    STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
+    STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
+    STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
+    STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
+    STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
+    STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
+    STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
+    STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
+    STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
+    STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
+    STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
+    STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
+    STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
+    STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
+    STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
+    STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
+    STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
+    STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
+    STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
+    STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
+    STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
+    STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
+    STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
+    STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
+    STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
+    STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
+    STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
+    STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
+    STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
+    STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
+    STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
+    STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
+    STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
+    STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
+    STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
+    STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
+    STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
+    STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
+    STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
+    STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
+    STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
+    STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
+    STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
+    STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
+    STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
+    STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
+    STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
+    STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
+    STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
+    STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
+    STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
+    STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
+    STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
+    STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
+    STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
+    STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
+    STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
+    STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
+    STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
+    STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
+    STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
+    STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
+    STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
+    STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
+    STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
+    STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
+    STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
+    STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
+    STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
+    STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
+    STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
+    STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
+    STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
+    STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
+    STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
+    STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
+    STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
+    STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
+    STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
+    STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
+    STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
+    STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
+    STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
+    STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
+    STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
+    STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
+    STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
+    STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
+    STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
+    STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
+    STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
+    STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
+    STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
+    STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
+    STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
+    STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/src/jemalloc/include/jemalloc/internal/smoothstep.sh b/src/jemalloc/include/jemalloc/internal/smoothstep.sh
new file mode 100755 (executable)
index 0000000..8124693
--- /dev/null
@@ -0,0 +1,115 @@
+#!/bin/sh
+#
+# Generate a discrete lookup table for a sigmoid function in the smoothstep
+# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
+# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps].  Encode
+# the entries using a binary fixed point representation.
+#
+# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
+#
+#        <variant> is in {smooth, smoother, smoothest}.
+#        <nsteps> must be greater than zero.
+#        <bfp> must be in [0..62]; reasonable values are roughly [10..30].
+#        <xprec> is x decimal precision.
+#        <yprec> is y decimal precision.
+
+#set -x
+
+cmd="sh smoothstep.sh $*"
+variant=$1
+nsteps=$2
+bfp=$3
+xprec=$4
+yprec=$5
+
+case "${variant}" in
+  smooth)
+    ;;
+  smoother)
+    ;;
+  smoothest)
+    ;;
+  *)
+    echo "Unsupported variant"
+    exit 1
+    ;;
+esac
+
+smooth() {
+  step=$1
+  y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+  h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
+}
+
+smoother() {
+  step=$1
+  y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+  h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
+}
+
+smoothest() {
+  step=$1
+  y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+  h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
+}
+
+cat <<EOF
+/*
+ * This file was generated by the following command:
+ *   $cmd
+ */
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * This header defines a precomputed table based on the smoothstep family of
+ * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
+ * to 1 in 0 <= x <= 1.  The table is stored as integer fixed point values so
+ * that floating point math can be avoided.
+ *
+ *                      3     2
+ *   smoothstep(x) = -2x  + 3x
+ *
+ *                       5      4      3
+ *   smootherstep(x) = 6x  - 15x  + 10x
+ *
+ *                          7      6      5      4
+ *   smootheststep(x) = -20x  + 70x  - 84x  + 35x
+ */
+
+#define        SMOOTHSTEP_VARIANT      "${variant}"
+#define        SMOOTHSTEP_NSTEPS       ${nsteps}
+#define        SMOOTHSTEP_BFP          ${bfp}
+#define        SMOOTHSTEP \\
+ /* STEP(step, h,                            x,     y) */ \\
+EOF
+
+s=1
+while [ $s -le $nsteps ] ; do
+  $variant ${s}
+  x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+  printf '    STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
+
+  s=$((s+1))
+done
+echo
+
+cat <<EOF
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+EOF
index ce96476ad75d558125683118a4aca4cc812c94bd..705903adf45430119885aeedf12cce2a357212b5 100644 (file)
@@ -4,6 +4,7 @@
 typedef struct tcache_bin_stats_s tcache_bin_stats_t;
 typedef struct malloc_bin_stats_s malloc_bin_stats_t;
 typedef struct malloc_large_stats_s malloc_large_stats_t;
+typedef struct malloc_huge_stats_s malloc_huge_stats_t;
 typedef struct arena_stats_s arena_stats_t;
 typedef struct chunk_stats_s chunk_stats_t;
 
@@ -20,12 +21,6 @@ struct tcache_bin_stats_s {
 };
 
 struct malloc_bin_stats_s {
-       /*
-        * Current number of bytes allocated, including objects currently
-        * cached by tcache.
-        */
-       size_t          allocated;
-
        /*
         * Total number of allocation/deallocation requests served directly by
         * the bin.  Note that tcache may allocate an object, then recycle it
@@ -42,6 +37,12 @@ struct malloc_bin_stats_s {
         */
        uint64_t        nrequests;
 
+       /*
+        * Current number of regions of this size class, including regions
+        * currently cached by tcache.
+        */
+       size_t          curregs;
+
        /* Number of tcache fills from this bin. */
        uint64_t        nfills;
 
@@ -78,10 +79,25 @@ struct malloc_large_stats_s {
         */
        uint64_t        nrequests;
 
-       /* Current number of runs of this size class. */
+       /*
+        * Current number of runs of this size class, including runs currently
+        * cached by tcache.
+        */
        size_t          curruns;
 };
 
+struct malloc_huge_stats_s {
+       /*
+        * Total number of allocation/deallocation requests served directly by
+        * the arena.
+        */
+       uint64_t        nmalloc;
+       uint64_t        ndalloc;
+
+       /* Current number of (multi-)chunk allocations of this size class. */
+       size_t          curhchunks;
+};
+
 struct arena_stats_s {
        /* Number of bytes currently mapped. */
        size_t          mapped;
@@ -95,6 +111,13 @@ struct arena_stats_s {
        uint64_t        nmadvise;
        uint64_t        purged;
 
+       /*
+        * Number of bytes currently mapped purely for metadata purposes, and
+        * number of bytes currently allocated for internal metadata.
+        */
+       size_t          metadata_mapped;
+       size_t          metadata_allocated; /* Protected via atomic_*_z(). */
+
        /* Per-size-category statistics. */
        size_t          allocated_large;
        uint64_t        nmalloc_large;
@@ -104,30 +127,12 @@ struct arena_stats_s {
        size_t          allocated_huge;
        uint64_t        nmalloc_huge;
        uint64_t        ndalloc_huge;
-       uint64_t        nrequests_huge;
 
-       /*
-        * One element for each possible size class, including sizes that
-        * overlap with bin size classes.  This is necessary because ipalloc()
-        * sometimes has to use such large objects in order to assure proper
-        * alignment.
-        */
+       /* One element for each large size class. */
        malloc_large_stats_t    *lstats;
-};
-
-struct chunk_stats_s {
-       /* Number of chunks that were allocated. */
-       uint64_t        nchunks;
 
-       /* High-water mark for number of chunks allocated. */
-       size_t          highchunks;
-
-       /*
-        * Current number of chunks allocated.  This value isn't maintained for
-        * any other purpose, so keep track of it in order to be able to set
-        * highchunks.
-        */
-       size_t          curchunks;
+       /* One element for each huge size class. */
+       malloc_huge_stats_t     *hstats;
 };
 
 #endif /* JEMALLOC_H_STRUCTS */
@@ -162,15 +167,25 @@ stats_cactive_get(void)
 JEMALLOC_INLINE void
 stats_cactive_add(size_t size)
 {
+       UNUSED size_t cactive;
+
+       assert(size > 0);
+       assert((size & chunksize_mask) == 0);
 
-       atomic_add_z(&stats_cactive, size);
+       cactive = atomic_add_z(&stats_cactive, size);
+       assert(cactive - size < cactive);
 }
 
 JEMALLOC_INLINE void
 stats_cactive_sub(size_t size)
 {
+       UNUSED size_t cactive;
+
+       assert(size > 0);
+       assert((size & chunksize_mask) == 0);
 
-       atomic_sub_z(&stats_cactive, size);
+       cactive = atomic_sub_z(&stats_cactive, size);
+       assert(cactive + size > cactive);
 }
 #endif
 
index 1b1d8d98b9c0a4fbca1b804937ba05f483ad89f7..8357820bdfd738e99fafe61ef30533a946100965 100644 (file)
@@ -4,6 +4,7 @@
 typedef struct tcache_bin_info_s tcache_bin_info_t;
 typedef struct tcache_bin_s tcache_bin_t;
 typedef struct tcache_s tcache_t;
+typedef struct tcaches_s tcaches_t;
 
 /*
  * tcache pointers close to NULL are used to encode state information that is
@@ -15,6 +16,11 @@ typedef struct tcache_s tcache_t;
 #define        TCACHE_STATE_PURGATORY          ((tcache_t *)(uintptr_t)3)
 #define        TCACHE_STATE_MAX                TCACHE_STATE_PURGATORY
 
+/*
+ * Absolute minimum number of cache slots for each small bin.
+ */
+#define        TCACHE_NSLOTS_SMALL_MIN         20
+
 /*
  * Absolute maximum number of cache slots for each small bin in the thread
  * cache.  This is an additional constraint beyond that imposed as: twice the
@@ -64,15 +70,21 @@ struct tcache_bin_s {
        int             low_water;      /* Min # cached since last GC. */
        unsigned        lg_fill_div;    /* Fill (ncached_max >> lg_fill_div). */
        unsigned        ncached;        /* # of cached objects. */
+       /*
+        * To make use of adjacent cacheline prefetch, the items in the avail
+        * stack goes to higher address for newer allocations.  avail points
+        * just above the available space, which means that
+        * avail[-ncached, ... -1] are available items and the lowest item will
+        * be allocated first.
+        */
        void            **avail;        /* Stack of available objects. */
 };
 
 struct tcache_s {
        ql_elm(tcache_t) link;          /* Used for aggregating stats. */
-       uint64_t        prof_accumbytes;/* Cleared after arena_prof_accum() */
-       arena_t         *arena;         /* This thread's arena. */
-       unsigned        ev_cnt;         /* Event count since incremental GC. */
-       unsigned        next_gc_bin;    /* Next bin to GC. */
+       uint64_t        prof_accumbytes;/* Cleared after arena_prof_accum(). */
+       ticker_t        gc_ticker;      /* Drives incremental GC. */
+       szind_t         next_gc_bin;    /* Next bin to GC. */
        tcache_bin_t    tbins[1];       /* Dynamically sized. */
        /*
         * The pointer stacks associated with tbins follow as a contiguous
@@ -82,6 +94,14 @@ struct tcache_s {
         */
 };
 
+/* Linkage for list of available (previously used) explicit tcache IDs. */
+struct tcaches_s {
+       union {
+               tcache_t        *tcache;
+               tcaches_t       *next;
+       };
+};
+
 #endif /* JEMALLOC_H_STRUCTS */
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
@@ -95,26 +115,41 @@ extern tcache_bin_info_t   *tcache_bin_info;
  * Number of tcache bins.  There are NBINS small-object bins, plus 0 or more
  * large-object bins.
  */
-extern size_t                  nhbins;
+extern unsigned        nhbins;
 
 /* Maximum cached size class. */
-extern size_t                  tcache_maxclass;
+extern size_t  tcache_maxclass;
+
+/*
+ * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
+ * usable via the MALLOCX_TCACHE() flag.  The automatic per thread tcaches are
+ * completely disjoint from this data structure.  tcaches starts off as a sparse
+ * array, so it has no physical memory footprint until individual pages are
+ * touched.  This allows the entire array to be allocated the first time an
+ * explicit tcache is created without a disproportionate impact on memory usage.
+ */
+extern tcaches_t       *tcaches;
 
 size_t tcache_salloc(const void *ptr);
-void   tcache_event_hard(tcache_t *tcache);
-void   *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
-    size_t binind);
-void   tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
-    tcache_t *tcache);
-void   tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
-    tcache_t *tcache);
+void   tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
+void   *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+    tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
+void   tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+    szind_t binind, unsigned rem);
+void   tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+    unsigned rem, tcache_t *tcache);
 void   tcache_arena_associate(tcache_t *tcache, arena_t *arena);
-void   tcache_arena_dissociate(tcache_t *tcache);
+void   tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
+    arena_t *newarena);
+void   tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
 tcache_t *tcache_get_hard(tsd_t *tsd);
-tcache_t *tcache_create(arena_t *arena);
+tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
 void   tcache_cleanup(tsd_t *tsd);
 void   tcache_enabled_cleanup(tsd_t *tsd);
 void   tcache_stats_merge(tcache_t *tcache, arena_t *arena);
+bool   tcaches_create(tsd_t *tsd, unsigned *r_ind);
+void   tcaches_flush(tsd_t *tsd, unsigned ind);
+void   tcaches_destroy(tsd_t *tsd, unsigned ind);
 bool   tcache_boot(void);
 
 #endif /* JEMALLOC_H_EXTERNS */
@@ -122,16 +157,21 @@ bool      tcache_boot(void);
 #ifdef JEMALLOC_H_INLINES
 
 #ifndef JEMALLOC_ENABLE_INLINE
-void   tcache_event(tcache_t *tcache);
+void   tcache_event(tsd_t *tsd, tcache_t *tcache);
 void   tcache_flush(void);
 bool   tcache_enabled_get(void);
 tcache_t *tcache_get(tsd_t *tsd, bool create);
 void   tcache_enabled_set(bool enabled);
-void   *tcache_alloc_easy(tcache_bin_t *tbin);
-void   *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
-void   *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
-void   tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
-void   tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
+void   *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
+void   *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+    size_t size, szind_t ind, bool zero, bool slow_path);
+void   *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+    size_t size, szind_t ind, bool zero, bool slow_path);
+void   tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
+    szind_t binind, bool slow_path);
+void   tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
+    size_t size, bool slow_path);
+tcache_t       *tcaches_get(tsd_t *tsd, unsigned ind);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
@@ -201,100 +241,137 @@ tcache_get(tsd_t *tsd, bool create)
 }
 
 JEMALLOC_ALWAYS_INLINE void
-tcache_event(tcache_t *tcache)
+tcache_event(tsd_t *tsd, tcache_t *tcache)
 {
 
        if (TCACHE_GC_INCR == 0)
                return;
 
-       tcache->ev_cnt++;
-       assert(tcache->ev_cnt <= TCACHE_GC_INCR);
-       if (unlikely(tcache->ev_cnt == TCACHE_GC_INCR))
-               tcache_event_hard(tcache);
+       if (unlikely(ticker_tick(&tcache->gc_ticker)))
+               tcache_event_hard(tsd, tcache);
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_easy(tcache_bin_t *tbin)
+tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
 {
        void *ret;
 
        if (unlikely(tbin->ncached == 0)) {
                tbin->low_water = -1;
+               *tcache_success = false;
                return (NULL);
        }
+       /*
+        * tcache_success (instead of ret) should be checked upon the return of
+        * this function.  We avoid checking (ret == NULL) because there is
+        * never a null stored on the avail stack (which is unknown to the
+        * compiler), and eagerly checking ret would cause pipeline stall
+        * (waiting for the cacheline).
+        */
+       *tcache_success = true;
+       ret = *(tbin->avail - tbin->ncached);
        tbin->ncached--;
+
        if (unlikely((int)tbin->ncached < tbin->low_water))
                tbin->low_water = tbin->ncached;
-       ret = tbin->avail[tbin->ncached];
+
        return (ret);
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
+tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+    szind_t binind, bool zero, bool slow_path)
 {
        void *ret;
-       size_t binind;
        tcache_bin_t *tbin;
+       bool tcache_success;
+       size_t usize JEMALLOC_CC_SILENCE_INIT(0);
 
-       binind = small_size2bin(size);
        assert(binind < NBINS);
        tbin = &tcache->tbins[binind];
-       size = small_bin2size(binind);
-       ret = tcache_alloc_easy(tbin);
-       if (unlikely(ret == NULL)) {
-               ret = tcache_alloc_small_hard(tcache, tbin, binind);
-               if (ret == NULL)
+       ret = tcache_alloc_easy(tbin, &tcache_success);
+       assert(tcache_success == (ret != NULL));
+       if (unlikely(!tcache_success)) {
+               bool tcache_hard_success;
+               arena = arena_choose(tsd, arena);
+               if (unlikely(arena == NULL))
                        return (NULL);
+
+               ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind,
+                       &tcache_hard_success);
+               if (tcache_hard_success == false)
+                       return (NULL);
+       }
+
+       assert(ret);
+       /*
+        * Only compute usize if required.  The checks in the following if
+        * statement are all static.
+        */
+       if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
+               usize = index2size(binind);
+               assert(tcache_salloc(ret) == usize);
        }
-       assert(tcache_salloc(ret) == size);
 
        if (likely(!zero)) {
-               if (config_fill) {
-                       if (unlikely(opt_junk)) {
+               if (slow_path && config_fill) {
+                       if (unlikely(opt_junk_alloc)) {
                                arena_alloc_junk_small(ret,
                                    &arena_bin_info[binind], false);
                        } else if (unlikely(opt_zero))
-                               memset(ret, 0, size);
+                               memset(ret, 0, usize);
                }
        } else {
-               if (config_fill && unlikely(opt_junk)) {
+               if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
                        arena_alloc_junk_small(ret, &arena_bin_info[binind],
                            true);
                }
-               memset(ret, 0, size);
+               memset(ret, 0, usize);
        }
 
        if (config_stats)
                tbin->tstats.nrequests++;
        if (config_prof)
-               tcache->prof_accumbytes += size;
-       tcache_event(tcache);
+               tcache->prof_accumbytes += usize;
+       tcache_event(tsd, tcache);
        return (ret);
 }
 
 JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
+tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+    szind_t binind, bool zero, bool slow_path)
 {
        void *ret;
-       size_t binind;
        tcache_bin_t *tbin;
+       bool tcache_success;
 
-       size = PAGE_CEILING(size);
-       assert(size <= tcache_maxclass);
-       binind = NBINS + (size >> LG_PAGE) - 1;
        assert(binind < nhbins);
        tbin = &tcache->tbins[binind];
-       ret = tcache_alloc_easy(tbin);
-       if (unlikely(ret == NULL)) {
+       ret = tcache_alloc_easy(tbin, &tcache_success);
+       assert(tcache_success == (ret != NULL));
+       if (unlikely(!tcache_success)) {
                /*
                 * Only allocate one large object at a time, because it's quite
                 * expensive to create one and not use it.
                 */
-               ret = arena_malloc_large(tcache->arena, size, zero);
+               arena = arena_choose(tsd, arena);
+               if (unlikely(arena == NULL))
+                       return (NULL);
+
+               ret = arena_malloc_large(tsd, arena, binind, zero);
                if (ret == NULL)
                        return (NULL);
        } else {
-               if (config_prof && size == PAGE) {
+               size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+               /* Only compute usize on demand */
+               if (config_prof || (slow_path && config_fill) ||
+                   unlikely(zero)) {
+                       usize = index2size(binind);
+                       assert(usize <= tcache_maxclass);
+               }
+
+               if (config_prof && usize == LARGE_MINCLASS) {
                        arena_chunk_t *chunk =
                            (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
                        size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
@@ -303,53 +380,55 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
                            BININD_INVALID);
                }
                if (likely(!zero)) {
-                       if (config_fill) {
-                               if (unlikely(opt_junk))
-                                       memset(ret, 0xa5, size);
+                       if (slow_path && config_fill) {
+                               if (unlikely(opt_junk_alloc))
+                                       memset(ret, 0xa5, usize);
                                else if (unlikely(opt_zero))
-                                       memset(ret, 0, size);
+                                       memset(ret, 0, usize);
                        }
                } else
-                       memset(ret, 0, size);
+                       memset(ret, 0, usize);
 
                if (config_stats)
                        tbin->tstats.nrequests++;
                if (config_prof)
-                       tcache->prof_accumbytes += size;
+                       tcache->prof_accumbytes += usize;
        }
 
-       tcache_event(tcache);
+       tcache_event(tsd, tcache);
        return (ret);
 }
 
 JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
+    bool slow_path)
 {
        tcache_bin_t *tbin;
        tcache_bin_info_t *tbin_info;
 
        assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
 
-       if (config_fill && unlikely(opt_junk))
+       if (slow_path && config_fill && unlikely(opt_junk_free))
                arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
 
        tbin = &tcache->tbins[binind];
        tbin_info = &tcache_bin_info[binind];
        if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
-               tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
-                   1), tcache);
+               tcache_bin_flush_small(tsd, tcache, tbin, binind,
+                   (tbin_info->ncached_max >> 1));
        }
        assert(tbin->ncached < tbin_info->ncached_max);
-       tbin->avail[tbin->ncached] = ptr;
        tbin->ncached++;
+       *(tbin->avail - tbin->ncached) = ptr;
 
-       tcache_event(tcache);
+       tcache_event(tsd, tcache);
 }
 
 JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
+tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
+    bool slow_path)
 {
-       size_t binind;
+       szind_t binind;
        tcache_bin_t *tbin;
        tcache_bin_info_t *tbin_info;
 
@@ -357,22 +436,31 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
        assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
        assert(tcache_salloc(ptr) <= tcache_maxclass);
 
-       binind = NBINS + (size >> LG_PAGE) - 1;
+       binind = size2index(size);
 
-       if (config_fill && unlikely(opt_junk))
-               memset(ptr, 0x5a, size);
+       if (slow_path && config_fill && unlikely(opt_junk_free))
+               arena_dalloc_junk_large(ptr, size);
 
        tbin = &tcache->tbins[binind];
        tbin_info = &tcache_bin_info[binind];
        if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
-               tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
-                   1), tcache);
+               tcache_bin_flush_large(tsd, tbin, binind,
+                   (tbin_info->ncached_max >> 1), tcache);
        }
        assert(tbin->ncached < tbin_info->ncached_max);
-       tbin->avail[tbin->ncached] = ptr;
        tbin->ncached++;
+       *(tbin->avail - tbin->ncached) = ptr;
 
-       tcache_event(tcache);
+       tcache_event(tsd, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcaches_get(tsd_t *tsd, unsigned ind)
+{
+       tcaches_t *elm = &tcaches[ind];
+       if (unlikely(elm->tcache == NULL))
+               elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
+       return (elm->tcache);
 }
 #endif
 
diff --git a/src/jemalloc/include/jemalloc/internal/ticker.h b/src/jemalloc/include/jemalloc/internal/ticker.h
new file mode 100644 (file)
index 0000000..4696e56
--- /dev/null
@@ -0,0 +1,75 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct ticker_s ticker_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct ticker_s {
+       int32_t tick;
+       int32_t nticks;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void   ticker_init(ticker_t *ticker, int32_t nticks);
+void   ticker_copy(ticker_t *ticker, const ticker_t *other);
+int32_t        ticker_read(const ticker_t *ticker);
+bool   ticker_ticks(ticker_t *ticker, int32_t nticks);
+bool   ticker_tick(ticker_t *ticker);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
+JEMALLOC_INLINE void
+ticker_init(ticker_t *ticker, int32_t nticks)
+{
+
+       ticker->tick = nticks;
+       ticker->nticks = nticks;
+}
+
+JEMALLOC_INLINE void
+ticker_copy(ticker_t *ticker, const ticker_t *other)
+{
+
+       *ticker = *other;
+}
+
+JEMALLOC_INLINE int32_t
+ticker_read(const ticker_t *ticker)
+{
+
+       return (ticker->tick);
+}
+
+JEMALLOC_INLINE bool
+ticker_ticks(ticker_t *ticker, int32_t nticks)
+{
+
+       if (unlikely(ticker->tick < nticks)) {
+               ticker->tick = ticker->nticks;
+               return (true);
+       }
+       ticker->tick -= nticks;
+       return(false);
+}
+
+JEMALLOC_INLINE bool
+ticker_tick(ticker_t *ticker)
+{
+
+       return (ticker_ticks(ticker, 1));
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
index 25450391cecc7a2c84f6076a86723c282cc3d98d..16cc2f1720ec9c27b89097a16a48244f9d37093e 100644 (file)
@@ -2,7 +2,7 @@
 #ifdef JEMALLOC_H_TYPES
 
 /* Maximum number of malloc_tsd users with cleanup functions. */
-#define        MALLOC_TSD_CLEANUPS_MAX 8
+#define        MALLOC_TSD_CLEANUPS_MAX 2
 
 typedef bool (*malloc_tsd_cleanup_t)(void);
 
@@ -23,7 +23,7 @@ typedef enum {
 
 /*
  * TLS/TSD-agnostic macro-based implementation of thread-specific data.  There
- * are four macros that support (at least) three use cases: file-private,
+ * are five macros that support (at least) three use cases: file-private,
  * library-private, and library-private inlined.  Following is an example
  * library-private tsd variable:
  *
@@ -33,21 +33,22 @@ typedef enum {
  *           int y;
  *   } example_t;
  *   #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
- *   malloc_tsd_protos(, example_, example_t *)
- *   malloc_tsd_externs(example_, example_t *)
+ *   malloc_tsd_types(example_, example_t)
+ *   malloc_tsd_protos(, example_, example_t)
+ *   malloc_tsd_externs(example_, example_t)
  * In example.c:
- *   malloc_tsd_data(, example_, example_t *, EX_INITIALIZER)
- *   malloc_tsd_funcs(, example_, example_t *, EX_INITIALIZER,
+ *   malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
+ *   malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
  *       example_tsd_cleanup)
  *
  * The result is a set of generated functions, e.g.:
  *
  *   bool example_tsd_boot(void) {...}
- *   example_t **example_tsd_get() {...}
- *   void example_tsd_set(example_t **val) {...}
+ *   example_t *example_tsd_get() {...}
+ *   void example_tsd_set(example_t *val) {...}
  *
  * Note that all of the functions deal in terms of (a_type *) rather than
- * (a_type)  so that it is possible to support non-pointer types (unlike
+ * (a_type) so that it is possible to support non-pointer types (unlike
  * pthreads TSD).  example_tsd_cleanup() is passed an (a_type *) pointer that is
  * cast to (void *).  This means that the cleanup function needs to cast the
  * function argument to (a_type *), then dereference the resulting pointer to
@@ -70,9 +71,32 @@ typedef enum {
  * non-NULL.
  */
 
+/* malloc_tsd_types(). */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#define        malloc_tsd_types(a_name, a_type)
+#elif (defined(JEMALLOC_TLS))
+#define        malloc_tsd_types(a_name, a_type)
+#elif (defined(_WIN32))
+#define        malloc_tsd_types(a_name, a_type)                                \
+typedef struct {                                                       \
+       bool    initialized;                                            \
+       a_type  val;                                                    \
+} a_name##tsd_wrapper_t;
+#else
+#define        malloc_tsd_types(a_name, a_type)                                \
+typedef struct {                                                       \
+       bool    initialized;                                            \
+       a_type  val;                                                    \
+} a_name##tsd_wrapper_t;
+#endif
+
 /* malloc_tsd_protos(). */
 #define        malloc_tsd_protos(a_attr, a_name, a_type)                       \
 a_attr bool                                                            \
+a_name##tsd_boot0(void);                                               \
+a_attr void                                                            \
+a_name##tsd_boot1(void);                                               \
+a_attr bool                                                            \
 a_name##tsd_boot(void);                                                        \
 a_attr a_type *                                                                \
 a_name##tsd_get(void);                                                 \
@@ -93,11 +117,13 @@ extern bool                a_name##tsd_booted;
 #elif (defined(_WIN32))
 #define        malloc_tsd_externs(a_name, a_type)                              \
 extern DWORD           a_name##tsd_tsd;                                \
+extern a_name##tsd_wrapper_t   a_name##tsd_boot_wrapper;               \
 extern bool            a_name##tsd_booted;
 #else
 #define        malloc_tsd_externs(a_name, a_type)                              \
 extern pthread_key_t   a_name##tsd_tsd;                                \
 extern tsd_init_head_t a_name##tsd_init_head;                          \
+extern a_name##tsd_wrapper_t   a_name##tsd_boot_wrapper;               \
 extern bool            a_name##tsd_booted;
 #endif
 
@@ -118,6 +144,10 @@ a_attr bool                a_name##tsd_booted = false;
 #elif (defined(_WIN32))
 #define        malloc_tsd_data(a_attr, a_name, a_type, a_initializer)          \
 a_attr DWORD           a_name##tsd_tsd;                                \
+a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = {              \
+       false,                                                          \
+       a_initializer                                                   \
+};                                                                     \
 a_attr bool            a_name##tsd_booted = false;
 #else
 #define        malloc_tsd_data(a_attr, a_name, a_type, a_initializer)          \
@@ -126,6 +156,10 @@ a_attr tsd_init_head_t     a_name##tsd_init_head = {                       \
        ql_head_initializer(blocks),                                    \
        MALLOC_MUTEX_INITIALIZER                                        \
 };                                                                     \
+a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = {              \
+       false,                                                          \
+       a_initializer                                                   \
+};                                                                     \
 a_attr bool            a_name##tsd_booted = false;
 #endif
 
@@ -145,7 +179,7 @@ a_name##tsd_cleanup_wrapper(void)                                   \
        return (a_name##tsd_initialized);                               \
 }                                                                      \
 a_attr bool                                                            \
-a_name##tsd_boot(void)                                                 \
+a_name##tsd_boot0(void)                                                        \
 {                                                                      \
                                                                        \
        if (a_cleanup != malloc_tsd_no_cleanup) {                       \
@@ -155,6 +189,18 @@ a_name##tsd_boot(void)                                                     \
        a_name##tsd_booted = true;                                      \
        return (false);                                                 \
 }                                                                      \
+a_attr void                                                            \
+a_name##tsd_boot1(void)                                                        \
+{                                                                      \
+                                                                       \
+       /* Do nothing. */                                               \
+}                                                                      \
+a_attr bool                                                            \
+a_name##tsd_boot(void)                                                 \
+{                                                                      \
+                                                                       \
+       return (a_name##tsd_boot0());                                   \
+}                                                                      \
 /* Get/set. */                                                         \
 a_attr a_type *                                                                \
 a_name##tsd_get(void)                                                  \
@@ -177,7 +223,7 @@ a_name##tsd_set(a_type *val)                                                \
     a_cleanup)                                                         \
 /* Initialization/cleanup. */                                          \
 a_attr bool                                                            \
-a_name##tsd_boot(void)                                                 \
+a_name##tsd_boot0(void)                                                        \
 {                                                                      \
                                                                        \
        if (a_cleanup != malloc_tsd_no_cleanup) {                       \
@@ -188,6 +234,18 @@ a_name##tsd_boot(void)                                                     \
        a_name##tsd_booted = true;                                      \
        return (false);                                                 \
 }                                                                      \
+a_attr void                                                            \
+a_name##tsd_boot1(void)                                                        \
+{                                                                      \
+                                                                       \
+       /* Do nothing. */                                               \
+}                                                                      \
+a_attr bool                                                            \
+a_name##tsd_boot(void)                                                 \
+{                                                                      \
+                                                                       \
+       return (a_name##tsd_boot0());                                   \
+}                                                                      \
 /* Get/set. */                                                         \
 a_attr a_type *                                                                \
 a_name##tsd_get(void)                                                  \
@@ -215,18 +273,15 @@ a_name##tsd_set(a_type *val)                                              \
 #elif (defined(_WIN32))
 #define        malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer,         \
     a_cleanup)                                                         \
-/* Data structure. */                                                  \
-typedef struct {                                                       \
-       bool    initialized;                                            \
-       a_type  val;                                                    \
-} a_name##tsd_wrapper_t;                                               \
 /* Initialization/cleanup. */                                          \
 a_attr bool                                                            \
 a_name##tsd_cleanup_wrapper(void)                                      \
 {                                                                      \
-       a_name##tsd_wrapper_t *wrapper;                                 \
+       DWORD error = GetLastError();                                   \
+       a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)      \
+           TlsGetValue(a_name##tsd_tsd);                               \
+       SetLastError(error);                                            \
                                                                        \
-       wrapper = (a_name##tsd_wrapper_t *)TlsGetValue(a_name##tsd_tsd);\
        if (wrapper == NULL)                                            \
                return (false);                                         \
        if (a_cleanup != malloc_tsd_no_cleanup &&                       \
@@ -241,26 +296,23 @@ a_name##tsd_cleanup_wrapper(void)                                 \
        malloc_tsd_dalloc(wrapper);                                     \
        return (false);                                                 \
 }                                                                      \
-a_attr bool                                                            \
-a_name##tsd_boot(void)                                                 \
+a_attr void                                                            \
+a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper)                        \
 {                                                                      \
                                                                        \
-       a_name##tsd_tsd = TlsAlloc();                                   \
-       if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES)                      \
-               return (true);                                          \
-       if (a_cleanup != malloc_tsd_no_cleanup) {                       \
-               malloc_tsd_cleanup_register(                            \
-                   &a_name##tsd_cleanup_wrapper);                      \
+       if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) {           \
+               malloc_write("<jemalloc>: Error setting"                \
+                   " TSD for "#a_name"\n");                            \
+               abort();                                                \
        }                                                               \
-       a_name##tsd_booted = true;                                      \
-       return (false);                                                 \
 }                                                                      \
-/* Get/set. */                                                         \
 a_attr a_name##tsd_wrapper_t *                                         \
-a_name##tsd_get_wrapper(void)                                          \
+a_name##tsd_wrapper_get(void)                                          \
 {                                                                      \
+       DWORD error = GetLastError();                                   \
        a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)      \
            TlsGetValue(a_name##tsd_tsd);                               \
+       SetLastError(error);                                            \
                                                                        \
        if (unlikely(wrapper == NULL)) {                                \
                wrapper = (a_name##tsd_wrapper_t *)                     \
@@ -273,21 +325,57 @@ a_name##tsd_get_wrapper(void)                                             \
                        wrapper->initialized = false;                   \
                        wrapper->val = a_initializer;                   \
                }                                                       \
-               if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) {   \
-                       malloc_write("<jemalloc>: Error setting"        \
-                           " TSD for "#a_name"\n");                    \
-                       abort();                                        \
-               }                                                       \
+               a_name##tsd_wrapper_set(wrapper);                       \
        }                                                               \
        return (wrapper);                                               \
 }                                                                      \
+a_attr bool                                                            \
+a_name##tsd_boot0(void)                                                        \
+{                                                                      \
+                                                                       \
+       a_name##tsd_tsd = TlsAlloc();                                   \
+       if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES)                      \
+               return (true);                                          \
+       if (a_cleanup != malloc_tsd_no_cleanup) {                       \
+               malloc_tsd_cleanup_register(                            \
+                   &a_name##tsd_cleanup_wrapper);                      \
+       }                                                               \
+       a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper);             \
+       a_name##tsd_booted = true;                                      \
+       return (false);                                                 \
+}                                                                      \
+a_attr void                                                            \
+a_name##tsd_boot1(void)                                                        \
+{                                                                      \
+       a_name##tsd_wrapper_t *wrapper;                                 \
+       wrapper = (a_name##tsd_wrapper_t *)                             \
+           malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t));           \
+       if (wrapper == NULL) {                                          \
+               malloc_write("<jemalloc>: Error allocating"             \
+                   " TSD for "#a_name"\n");                            \
+               abort();                                                \
+       }                                                               \
+       memcpy(wrapper, &a_name##tsd_boot_wrapper,                      \
+           sizeof(a_name##tsd_wrapper_t));                             \
+       a_name##tsd_wrapper_set(wrapper);                               \
+}                                                                      \
+a_attr bool                                                            \
+a_name##tsd_boot(void)                                                 \
+{                                                                      \
+                                                                       \
+       if (a_name##tsd_boot0())                                        \
+               return (true);                                          \
+       a_name##tsd_boot1();                                            \
+       return (false);                                                 \
+}                                                                      \
+/* Get/set. */                                                         \
 a_attr a_type *                                                                \
 a_name##tsd_get(void)                                                  \
 {                                                                      \
        a_name##tsd_wrapper_t *wrapper;                                 \
                                                                        \
        assert(a_name##tsd_booted);                                     \
-       wrapper = a_name##tsd_get_wrapper();                            \
+       wrapper = a_name##tsd_wrapper_get();                            \
        return (&wrapper->val);                                         \
 }                                                                      \
 a_attr void                                                            \
@@ -296,7 +384,7 @@ a_name##tsd_set(a_type *val)                                                \
        a_name##tsd_wrapper_t *wrapper;                                 \
                                                                        \
        assert(a_name##tsd_booted);                                     \
-       wrapper = a_name##tsd_get_wrapper();                            \
+       wrapper = a_name##tsd_wrapper_get();                            \
        wrapper->val = *(val);                                          \
        if (a_cleanup != malloc_tsd_no_cleanup)                         \
                wrapper->initialized = true;                            \
@@ -304,11 +392,6 @@ a_name##tsd_set(a_type *val)                                               \
 #else
 #define        malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer,         \
     a_cleanup)                                                         \
-/* Data structure. */                                                  \
-typedef struct {                                                       \
-       bool    initialized;                                            \
-       a_type  val;                                                    \
-} a_name##tsd_wrapper_t;                                               \
 /* Initialization/cleanup. */                                          \
 a_attr void                                                            \
 a_name##tsd_cleanup_wrapper(void *arg)                                 \
@@ -333,19 +416,19 @@ a_name##tsd_cleanup_wrapper(void *arg)                                    \
        }                                                               \
        malloc_tsd_dalloc(wrapper);                                     \
 }                                                                      \
-a_attr bool                                                            \
-a_name##tsd_boot(void)                                                 \
+a_attr void                                                            \
+a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper)                        \
 {                                                                      \
                                                                        \
-       if (pthread_key_create(&a_name##tsd_tsd,                        \
-           a_name##tsd_cleanup_wrapper) != 0)                          \
-               return (true);                                          \
-       a_name##tsd_booted = true;                                      \
-       return (false);                                                 \
+       if (pthread_setspecific(a_name##tsd_tsd,                        \
+           (void *)wrapper)) {                                         \
+               malloc_write("<jemalloc>: Error setting"                \
+                   " TSD for "#a_name"\n");                            \
+               abort();                                                \
+       }                                                               \
 }                                                                      \
-/* Get/set. */                                                         \
 a_attr a_name##tsd_wrapper_t *                                         \
-a_name##tsd_get_wrapper(void)                                          \
+a_name##tsd_wrapper_get(void)                                          \
 {                                                                      \
        a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)      \
            pthread_getspecific(a_name##tsd_tsd);                       \
@@ -367,23 +450,54 @@ a_name##tsd_get_wrapper(void)                                             \
                        wrapper->initialized = false;                   \
                        wrapper->val = a_initializer;                   \
                }                                                       \
-               if (pthread_setspecific(a_name##tsd_tsd,                \
-                   (void *)wrapper)) {                                 \
-                       malloc_write("<jemalloc>: Error setting"        \
-                           " TSD for "#a_name"\n");                    \
-                       abort();                                        \
-               }                                                       \
+               a_name##tsd_wrapper_set(wrapper);                       \
                tsd_init_finish(&a_name##tsd_init_head, &block);        \
        }                                                               \
        return (wrapper);                                               \
 }                                                                      \
+a_attr bool                                                            \
+a_name##tsd_boot0(void)                                                        \
+{                                                                      \
+                                                                       \
+       if (pthread_key_create(&a_name##tsd_tsd,                        \
+           a_name##tsd_cleanup_wrapper) != 0)                          \
+               return (true);                                          \
+       a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper);             \
+       a_name##tsd_booted = true;                                      \
+       return (false);                                                 \
+}                                                                      \
+a_attr void                                                            \
+a_name##tsd_boot1(void)                                                        \
+{                                                                      \
+       a_name##tsd_wrapper_t *wrapper;                                 \
+       wrapper = (a_name##tsd_wrapper_t *)                             \
+           malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t));           \
+       if (wrapper == NULL) {                                          \
+               malloc_write("<jemalloc>: Error allocating"             \
+                   " TSD for "#a_name"\n");                            \
+               abort();                                                \
+       }                                                               \
+       memcpy(wrapper, &a_name##tsd_boot_wrapper,                      \
+           sizeof(a_name##tsd_wrapper_t));                             \
+       a_name##tsd_wrapper_set(wrapper);                               \
+}                                                                      \
+a_attr bool                                                            \
+a_name##tsd_boot(void)                                                 \
+{                                                                      \
+                                                                       \
+       if (a_name##tsd_boot0())                                        \
+               return (true);                                          \
+       a_name##tsd_boot1();                                            \
+       return (false);                                                 \
+}                                                                      \
+/* Get/set. */                                                         \
 a_attr a_type *                                                                \
 a_name##tsd_get(void)                                                  \
 {                                                                      \
        a_name##tsd_wrapper_t *wrapper;                                 \
                                                                        \
        assert(a_name##tsd_booted);                                     \
-       wrapper = a_name##tsd_get_wrapper();                            \
+       wrapper = a_name##tsd_wrapper_get();                            \
        return (&wrapper->val);                                         \
 }                                                                      \
 a_attr void                                                            \
@@ -392,7 +506,7 @@ a_name##tsd_set(a_type *val)                                                \
        a_name##tsd_wrapper_t *wrapper;                                 \
                                                                        \
        assert(a_name##tsd_booted);                                     \
-       wrapper = a_name##tsd_get_wrapper();                            \
+       wrapper = a_name##tsd_wrapper_get();                            \
        wrapper->val = *(val);                                          \
        if (a_cleanup != malloc_tsd_no_cleanup)                         \
                wrapper->initialized = true;                            \
@@ -423,6 +537,9 @@ struct tsd_init_head_s {
     O(thread_deallocated,      uint64_t)                               \
     O(prof_tdata,              prof_tdata_t *)                         \
     O(arena,                   arena_t *)                              \
+    O(arenas_tdata,            arena_tdata_t *)                        \
+    O(narenas_tdata,           unsigned)                               \
+    O(arenas_tdata_bypass,     bool)                                   \
     O(tcache_enabled,          tcache_enabled_t)                       \
     O(quarantine,              quarantine_t *)                         \
 
@@ -433,6 +550,9 @@ struct tsd_init_head_s {
     0,                                                                 \
     NULL,                                                              \
     NULL,                                                              \
+    NULL,                                                              \
+    0,                                                                 \
+    false,                                                             \
     tcache_enabled_default,                                            \
     NULL                                                               \
 }
@@ -447,6 +567,8 @@ MALLOC_TSD
 
 static const tsd_t tsd_initializer = TSD_INITIALIZER;
 
+malloc_tsd_types(, tsd_t)
+
 #endif /* JEMALLOC_H_STRUCTS */
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
@@ -455,7 +577,8 @@ void        *malloc_tsd_malloc(size_t size);
 void   malloc_tsd_dalloc(void *wrapper);
 void   malloc_tsd_no_cleanup(void *arg);
 void   malloc_tsd_cleanup_register(bool (*f)(void));
-bool   malloc_tsd_boot(void);
+bool   malloc_tsd_boot0(void);
+void   malloc_tsd_boot1(void);
 #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
     !defined(_WIN32))
 void   *tsd_init_check_recursion(tsd_init_head_t *head,
index 5af6832949757ebae3aea2fdc5968d9622946bbb..b8885bfaabdf21bd7e56127de5c84657ea8a6e6c 100644 (file)
@@ -1,6 +1,36 @@
 /******************************************************************************/
 #ifdef JEMALLOC_H_TYPES
 
+#ifdef _WIN32
+#  ifdef _WIN64
+#    define FMT64_PREFIX "ll"
+#    define FMTPTR_PREFIX "ll"
+#  else
+#    define FMT64_PREFIX "ll"
+#    define FMTPTR_PREFIX ""
+#  endif
+#  define FMTd32 "d"
+#  define FMTu32 "u"
+#  define FMTx32 "x"
+#  define FMTd64 FMT64_PREFIX "d"
+#  define FMTu64 FMT64_PREFIX "u"
+#  define FMTx64 FMT64_PREFIX "x"
+#  define FMTdPTR FMTPTR_PREFIX "d"
+#  define FMTuPTR FMTPTR_PREFIX "u"
+#  define FMTxPTR FMTPTR_PREFIX "x"
+#else
+#  include <inttypes.h>
+#  define FMTd32 PRId32
+#  define FMTu32 PRIu32
+#  define FMTx32 PRIx32
+#  define FMTd64 PRId64
+#  define FMTu64 PRIu64
+#  define FMTx64 PRIx64
+#  define FMTdPTR PRIdPTR
+#  define FMTuPTR PRIuPTR
+#  define FMTxPTR PRIxPTR
+#endif
+
 /* Size of stack-allocated buffer passed to buferror(). */
 #define        BUFERROR_BUF            64
 
  * uninitialized.
  */
 #ifdef JEMALLOC_CC_SILENCE
-#  define JEMALLOC_CC_SILENCE_INIT(v) = v
-#else
-#  define JEMALLOC_CC_SILENCE_INIT(v)
-#endif
-
-#ifdef __GNUC__
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
+#      define JEMALLOC_CC_SILENCE_INIT(v) = v
 #else
-#define likely(x) !!(x)
-#define unlikely(x) !!(x)
-#endif
-
-/*
- * Define a custom assert() in order to reduce the chances of deadlock during
- * assertion failure.
- */
-#ifndef assert
-#define        assert(e) do {                                                  \
-       if (unlikely(config_debug && !(e))) {                           \
-               malloc_printf(                                          \
-                   "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n",    \
-                   __FILE__, __LINE__, #e);                            \
-               abort();                                                \
-       }                                                               \
-} while (0)
+#      define JEMALLOC_CC_SILENCE_INIT(v)
 #endif
 
-#ifndef not_reached
-#define        not_reached() do {                                              \
-       if (config_debug) {                                             \
-               malloc_printf(                                          \
-                   "<jemalloc>: %s:%d: Unreachable code reached\n",    \
-                   __FILE__, __LINE__);                                \
-               abort();                                                \
-       }                                                               \
-} while (0)
+#define        JEMALLOC_GNUC_PREREQ(major, minor)                              \
+    (!defined(__clang__) &&                                            \
+    (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))))
+#ifndef __has_builtin
+#  define __has_builtin(builtin) (0)
 #endif
+#define        JEMALLOC_CLANG_HAS_BUILTIN(builtin)                             \
+    (defined(__clang__) && __has_builtin(builtin))
 
-#ifndef not_implemented
-#define        not_implemented() do {                                          \
-       if (config_debug) {                                             \
-               malloc_printf("<jemalloc>: %s:%d: Not implemented\n",   \
-                   __FILE__, __LINE__);                                \
-               abort();                                                \
-       }                                                               \
-} while (0)
+#ifdef __GNUC__
+#      define likely(x)   __builtin_expect(!!(x), 1)
+#      define unlikely(x) __builtin_expect(!!(x), 0)
+#  if JEMALLOC_GNUC_PREREQ(4, 6) ||                                    \
+      JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
+#      define unreachable() __builtin_unreachable()
+#  else
+#      define unreachable()
+#  endif
+#else
+#      define likely(x)   !!(x)
+#      define unlikely(x) !!(x)
+#      define unreachable()
 #endif
 
-#ifndef assert_not_implemented
-#define        assert_not_implemented(e) do {                                  \
-       if (unlikely(config_debug && !(e)))                             \
-               not_implemented();                                      \
-} while (0)
-#endif
+#include "jemalloc/internal/assert.h"
 
 /* Use to assert a particular configuration, e.g., cassert(config_debug). */
 #define        cassert(c) do {                                                 \
@@ -104,51 +109,103 @@ void     malloc_write(const char *s);
 int    malloc_vsnprintf(char *str, size_t size, const char *format,
     va_list ap);
 int    malloc_snprintf(char *str, size_t size, const char *format, ...)
-    JEMALLOC_ATTR(format(printf, 3, 4));
+    JEMALLOC_FORMAT_PRINTF(3, 4);
 void   malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
     const char *format, va_list ap);
 void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
-    const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
-void   malloc_printf(const char *format, ...)
-    JEMALLOC_ATTR(format(printf, 1, 2));
+    const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
+void   malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
 
 #endif /* JEMALLOC_H_EXTERNS */
 /******************************************************************************/
 #ifdef JEMALLOC_H_INLINES
 
 #ifndef JEMALLOC_ENABLE_INLINE
-int    jemalloc_ffsl(long bitmap);
-int    jemalloc_ffs(int bitmap);
-size_t pow2_ceil(size_t x);
-size_t lg_floor(size_t x);
+unsigned       ffs_llu(unsigned long long bitmap);
+unsigned       ffs_lu(unsigned long bitmap);
+unsigned       ffs_u(unsigned bitmap);
+unsigned       ffs_zu(size_t bitmap);
+unsigned       ffs_u64(uint64_t bitmap);
+unsigned       ffs_u32(uint32_t bitmap);
+uint64_t       pow2_ceil_u64(uint64_t x);
+uint32_t       pow2_ceil_u32(uint32_t x);
+size_t pow2_ceil_zu(size_t x);
+unsigned       lg_floor(size_t x);
 void   set_errno(int errnum);
 int    get_errno(void);
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
 
-/* Sanity check: */
-#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS)
-#  error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure
+/* Sanity check. */
+#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
+    || !defined(JEMALLOC_INTERNAL_FFS)
+#  error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
 #endif
 
-JEMALLOC_ALWAYS_INLINE int
-jemalloc_ffsl(long bitmap)
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_llu(unsigned long long bitmap)
 {
 
-        return (JEMALLOC_INTERNAL_FFSL(bitmap));
+       return (JEMALLOC_INTERNAL_FFSLL(bitmap));
 }
 
-JEMALLOC_ALWAYS_INLINE int
-jemalloc_ffs(int bitmap)
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_lu(unsigned long bitmap)
 {
 
-        return (JEMALLOC_INTERNAL_FFS(bitmap));
+       return (JEMALLOC_INTERNAL_FFSL(bitmap));
 }
 
-/* Compute the smallest power of 2 that is >= x. */
-JEMALLOC_INLINE size_t
-pow2_ceil(size_t x)
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_u(unsigned bitmap)
+{
+
+       return (JEMALLOC_INTERNAL_FFS(bitmap));
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_zu(size_t bitmap)
+{
+
+#if LG_SIZEOF_PTR == LG_SIZEOF_INT
+       return (ffs_u(bitmap));
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
+       return (ffs_lu(bitmap));
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
+       return (ffs_llu(bitmap));
+#else
+#error No implementation for size_t ffs()
+#endif
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_u64(uint64_t bitmap)
+{
+
+#if LG_SIZEOF_LONG == 3
+       return (ffs_lu(bitmap));
+#elif LG_SIZEOF_LONG_LONG == 3
+       return (ffs_llu(bitmap));
+#else
+#error No implementation for 64-bit ffs()
+#endif
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_u32(uint32_t bitmap)
+{
+
+#if LG_SIZEOF_INT == 2
+       return (ffs_u(bitmap));
+#else
+#error No implementation for 32-bit ffs()
+#endif
+       return (ffs_u(bitmap));
+}
+
+JEMALLOC_INLINE uint64_t
+pow2_ceil_u64(uint64_t x)
 {
 
        x--;
@@ -157,81 +214,108 @@ pow2_ceil(size_t x)
        x |= x >> 4;
        x |= x >> 8;
        x |= x >> 16;
-#if (LG_SIZEOF_PTR == 3)
        x |= x >> 32;
-#endif
        x++;
        return (x);
 }
 
-#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE uint32_t
+pow2_ceil_u32(uint32_t x)
+{
+
+       x--;
+       x |= x >> 1;
+       x |= x >> 2;
+       x |= x >> 4;
+       x |= x >> 8;
+       x |= x >> 16;
+       x++;
+       return (x);
+}
+
+/* Compute the smallest power of 2 that is >= x. */
 JEMALLOC_INLINE size_t
+pow2_ceil_zu(size_t x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+       return (pow2_ceil_u64(x));
+#else
+       return (pow2_ceil_u32(x));
+#endif
+}
+
+#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE unsigned
 lg_floor(size_t x)
 {
        size_t ret;
 
+       assert(x != 0);
+
        asm ("bsr %1, %0"
            : "=r"(ret) // Outputs.
            : "r"(x)    // Inputs.
            );
-       return (ret);
+       assert(ret < UINT_MAX);
+       return ((unsigned)ret);
 }
 #elif (defined(_MSC_VER))
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE unsigned
 lg_floor(size_t x)
 {
-    unsigned long ret;
+       unsigned long ret;
+
+       assert(x != 0);
 
 #if (LG_SIZEOF_PTR == 3)
-    _BitScanReverse64(&ret, x);
+       _BitScanReverse64(&ret, x);
 #elif (LG_SIZEOF_PTR == 2)
-    _BitScanReverse(&ret, x);
+       _BitScanReverse(&ret, x);
 #else
-#  error "Unsupported type sizes for lg_floor()"
+#  error "Unsupported type size for lg_floor()"
 #endif
-    return (ret);
+       assert(ret < UINT_MAX);
+       return ((unsigned)ret);
 }
 #elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE unsigned
 lg_floor(size_t x)
 {
 
+       assert(x != 0);
+
 #if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
        return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
 #elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
        return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
 #else
-#  error "Unsupported type sizes for lg_floor()"
+#  error "Unsupported type size for lg_floor()"
 #endif
 }
 #else
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE unsigned
 lg_floor(size_t x)
 {
 
+       assert(x != 0);
+
        x |= (x >> 1);
        x |= (x >> 2);
        x |= (x >> 4);
        x |= (x >> 8);
        x |= (x >> 16);
-#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG)
+#if (LG_SIZEOF_PTR == 3)
        x |= (x >> 32);
-       if (x == KZU(0xffffffffffffffff))
-               return (63);
-       x++;
-       return (jemalloc_ffsl(x) - 2);
-#elif (LG_SIZEOF_PTR == 2)
-       if (x == KZU(0xffffffff))
-               return (31);
-       x++;
-       return (jemalloc_ffs(x) - 2);
-#else
-#  error "Unsupported type sizes for lg_floor()"
 #endif
+       if (x == SIZE_T_MAX)
+               return ((8 << LG_SIZEOF_PTR) - 1);
+       x++;
+       return (ffs_zu(x) - 2);
 }
 #endif
 
-/* Sets error code */
+/* Set error code. */
 JEMALLOC_INLINE void
 set_errno(int errnum)
 {
@@ -243,7 +327,7 @@ set_errno(int errnum)
 #endif
 }
 
-/* Get last error code */
+/* Get last error code. */
 JEMALLOC_INLINE int
 get_errno(void)
 {
index 7e1c8be18f76f3e5ab08517c568653a06db5a295..c085814f204046d532b8162ead4ef536f747ed4e 100755 (executable)
@@ -22,7 +22,7 @@ done
 
 cat <<EOF
 #ifdef __cplusplus
-};
+}
 #endif
 #endif /* JEMALLOC_H_ */
 EOF
index ce6c6987c5b07d4fbea4b3f62b4d8c5e31d78cf0..6d89435c296ed72b69fe95786361167ec7600cf8 100644 (file)
@@ -1,6 +1,15 @@
 /* Defined if __attribute__((...)) syntax is supported. */
 #undef JEMALLOC_HAVE_ATTR
 
+/* Defined if alloc_size attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+
+/* Defined if format(gnu_printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+
+/* Defined if format(printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
+
 /*
  * Define overrides for non-standard allocator-related functions if they are
  * present on the system.
  */
 #undef JEMALLOC_USABLE_SIZE_CONST
 
+/*
+ * If defined, specify throw() for the public function prototypes when compiling
+ * with C++.  The only justification for this is to match the prototypes that
+ * glibc defines.
+ */
+#undef JEMALLOC_USE_CXX_THROW
+
+#ifdef _MSC_VER
+#  ifdef _WIN64
+#    define LG_SIZEOF_PTR_WIN 3
+#  else
+#    define LG_SIZEOF_PTR_WIN 2
+#  endif
+#endif
+
 /* sizeof(void *) == 2^LG_SIZEOF_PTR. */
 #undef LG_SIZEOF_PTR
index 99f12611d6bfad64815379d4d186feb41eb60461..9f356f98ed8d9931f1d9c3a5a43919261bf9050c 100644 (file)
 #define        JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
 #define        JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
 
-#  define MALLOCX_LG_ALIGN(la) (la)
+#  define MALLOCX_LG_ALIGN(la) ((int)(la))
 #  if LG_SIZEOF_PTR == 2
-#    define MALLOCX_ALIGN(a)   (ffs(a)-1)
+#    define MALLOCX_ALIGN(a)   ((int)(ffs(a)-1))
 #  else
 #    define MALLOCX_ALIGN(a)                                           \
-        ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
+       ((int)(((a) < (size_t)INT_MAX) ? ffs((int)(a))-1 :              \
+       ffs((int)((a)>>32))+31))
 #  endif
 #  define MALLOCX_ZERO ((int)0x40)
-/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
-#  define MALLOCX_ARENA(a)     ((int)(((a)+1) << 8))
+/*
+ * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
+ * encodes MALLOCX_TCACHE_NONE.
+ */
+#  define MALLOCX_TCACHE(tc)   ((int)(((tc)+2) << 8))
+#  define MALLOCX_TCACHE_NONE  MALLOCX_TCACHE(-1)
+/*
+ * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
+ */
+#  define MALLOCX_ARENA(a)     ((int)(((a)+1) << 20))
 
-#ifdef JEMALLOC_HAVE_ATTR
-#  define JEMALLOC_ATTR(s) __attribute__((s))
-#  define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-#  define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-#  define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-#  define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-#elif _MSC_VER
+#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
+#  define JEMALLOC_CXX_THROW throw()
+#else
+#  define JEMALLOC_CXX_THROW
+#endif
+
+#if _MSC_VER
 #  define JEMALLOC_ATTR(s)
-#  ifdef DLLEXPORT
-#    define JEMALLOC_EXPORT __declspec(dllexport)
+#  define JEMALLOC_ALIGNED(s) __declspec(align(s))
+#  define JEMALLOC_ALLOC_SIZE(s)
+#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
+#  ifndef JEMALLOC_EXPORT
+#    ifdef DLLEXPORT
+#      define JEMALLOC_EXPORT __declspec(dllexport)
+#    else
+#      define JEMALLOC_EXPORT __declspec(dllimport)
+#    endif
+#  endif
+#  define JEMALLOC_FORMAT_PRINTF(s, i)
+#  define JEMALLOC_NOINLINE __declspec(noinline)
+#  ifdef __cplusplus
+#    define JEMALLOC_NOTHROW __declspec(nothrow)
 #  else
-#    define JEMALLOC_EXPORT __declspec(dllimport)
+#    define JEMALLOC_NOTHROW
 #  endif
-#  define JEMALLOC_ALIGNED(s) __declspec(align(s))
 #  define JEMALLOC_SECTION(s) __declspec(allocate(s))
-#  define JEMALLOC_NOINLINE __declspec(noinline)
+#  define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
+#  if _MSC_VER >= 1900 && !defined(__EDG__)
+#    define JEMALLOC_ALLOCATOR __declspec(allocator)
+#  else
+#    define JEMALLOC_ALLOCATOR
+#  endif
+#elif defined(JEMALLOC_HAVE_ATTR)
+#  define JEMALLOC_ATTR(s) __attribute__((s))
+#  define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+#  ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+#    define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+#    define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+#  else
+#    define JEMALLOC_ALLOC_SIZE(s)
+#    define JEMALLOC_ALLOC_SIZE2(s1, s2)
+#  endif
+#  ifndef JEMALLOC_EXPORT
+#    define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+#  endif
+#  ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+#  elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+#  else
+#    define JEMALLOC_FORMAT_PRINTF(s, i)
+#  endif
+#  define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+#  define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
+#  define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+#  define JEMALLOC_RESTRICT_RETURN
+#  define JEMALLOC_ALLOCATOR
 #else
 #  define JEMALLOC_ATTR(s)
-#  define JEMALLOC_EXPORT
 #  define JEMALLOC_ALIGNED(s)
-#  define JEMALLOC_SECTION(s)
+#  define JEMALLOC_ALLOC_SIZE(s)
+#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
+#  define JEMALLOC_EXPORT
+#  define JEMALLOC_FORMAT_PRINTF(s, i)
 #  define JEMALLOC_NOINLINE
+#  define JEMALLOC_NOTHROW
+#  define JEMALLOC_SECTION(s)
+#  define JEMALLOC_RESTRICT_RETURN
+#  define JEMALLOC_ALLOCATOR
 #endif
index f81adc14a7e70464f9b89f04593f338685db2c07..a78414b196fc4063f90ba71d4c3703fcdb7bee18 100644 (file)
@@ -7,44 +7,60 @@ extern JEMALLOC_EXPORT const char     *@je_@malloc_conf;
 extern JEMALLOC_EXPORT void            (*@je_@malloc_message)(void *cbopaque,
     const char *s);
 
-JEMALLOC_EXPORT void   *@je_@malloc(size_t size) JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT void   *@je_@calloc(size_t num, size_t size)
-    JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT int    @je_@posix_memalign(void **memptr, size_t alignment,
-    size_t size) JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT void   *@je_@aligned_alloc(size_t alignment, size_t size)
-    JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT void   *@je_@realloc(void *ptr, size_t size);
-JEMALLOC_EXPORT void   @je_@free(void *ptr);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+    void JEMALLOC_NOTHROW      *@je_@malloc(size_t size)
+    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+    void JEMALLOC_NOTHROW      *@je_@calloc(size_t num, size_t size)
+    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW   @je_@posix_memalign(void **memptr,
+    size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+    void JEMALLOC_NOTHROW      *@je_@aligned_alloc(size_t alignment,
+    size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
+    JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+    void JEMALLOC_NOTHROW      *@je_@realloc(void *ptr, size_t size)
+    JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW  @je_@free(void *ptr)
+    JEMALLOC_CXX_THROW;
 
-JEMALLOC_EXPORT void   *@je_@mallocx(size_t size, int flags)
-    JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT void   *@je_@rallocx(void *ptr, size_t size, int flags);
-JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra,
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+    void JEMALLOC_NOTHROW      *@je_@mallocx(size_t size, int flags)
+    JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+    void JEMALLOC_NOTHROW      *@je_@rallocx(void *ptr, size_t size,
+    int flags) JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW        @je_@xallocx(void *ptr, size_t size,
+    size_t extra, int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW        @je_@sallocx(const void *ptr,
+    int flags) JEMALLOC_ATTR(pure);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW  @je_@dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW  @je_@sdallocx(void *ptr, size_t size,
     int flags);
-JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags)
-    JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT void   @je_@dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT void   @je_@sdallocx(void *ptr, size_t size, int flags);
-JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags)
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW        @je_@nallocx(size_t size, int flags)
     JEMALLOC_ATTR(pure);
 
-JEMALLOC_EXPORT int    @je_@mallctl(const char *name, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int    @je_@mallctlnametomib(const char *name, size_t *mibp,
-    size_t *miblenp);
-JEMALLOC_EXPORT int    @je_@mallctlbymib(const size_t *mib, size_t miblen,
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW   @je_@mallctl(const char *name,
     void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void   @je_@malloc_stats_print(void (*write_cb)(void *,
-    const char *), void *@je_@cbopaque, const char *opts);
-JEMALLOC_EXPORT size_t @je_@malloc_usable_size(
-    JEMALLOC_USABLE_SIZE_CONST void *ptr);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW   @je_@mallctlnametomib(const char *name,
+    size_t *mibp, size_t *miblenp);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW   @je_@mallctlbymib(const size_t *mib,
+    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW  @je_@malloc_stats_print(
+    void (*write_cb)(void *, const char *), void *@je_@cbopaque,
+    const char *opts);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW        @je_@malloc_usable_size(
+    JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
 
 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
-    JEMALLOC_ATTR(malloc);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+    void JEMALLOC_NOTHROW      *@je_@memalign(size_t alignment, size_t size)
+    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
 #endif
 
 #ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+    void JEMALLOC_NOTHROW      *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
+    JEMALLOC_ATTR(malloc);
 #endif
index 8092f1b155e6ed58c8afd0f67fdab8e7416c8d40..fa7b350adcda9884f3d6b315323c8edf7e7bd086 100644 (file)
@@ -1,2 +1,57 @@
-typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned);
-typedef bool (chunk_dalloc_t)(void *, size_t, unsigned);
+/*
+ * void *
+ * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
+ *     bool *commit, unsigned arena_ind);
+ */
+typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
+
+/*
+ * bool
+ * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
+ */
+typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
+
+/*
+ * bool
+ * chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
+ *     unsigned arena_ind);
+ */
+typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
+
+/*
+ * bool
+ * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
+ *     unsigned arena_ind);
+ */
+typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
+
+/*
+ * bool
+ * chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
+ *     unsigned arena_ind);
+ */
+typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
+
+/*
+ * bool
+ * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
+ *     bool committed, unsigned arena_ind);
+ */
+typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
+
+/*
+ * bool
+ * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ *     bool committed, unsigned arena_ind);
+ */
+typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
+
+typedef struct {
+       chunk_alloc_t           *alloc;
+       chunk_dalloc_t          *dalloc;
+       chunk_commit_t          *commit;
+       chunk_decommit_t        *decommit;
+       chunk_purge_t           *purge;
+       chunk_split_t           *split;
+       chunk_merge_t           *merge;
+} chunk_hooks_t;
diff --git a/src/jemalloc/include/msvc_compat/C99/inttypes.h b/src/jemalloc/include/msvc_compat/C99/inttypes.h
deleted file mode 100644 (file)
index a4e6b75..0000000
+++ /dev/null
@@ -1,313 +0,0 @@
-// ISO C9x  compliant inttypes.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 
-// 
-//  Copyright (c) 2006 Alexander Chemeris
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-// 
-//   1. Redistributions of source code must retain the above copyright notice,
-//      this list of conditions and the following disclaimer.
-// 
-//   2. Redistributions in binary form must reproduce the above copyright
-//      notice, this list of conditions and the following disclaimer in the
-//      documentation and/or other materials provided with the distribution.
-// 
-//   3. The name of the author may be used to endorse or promote products
-//      derived from this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// 
-///////////////////////////////////////////////////////////////////////////////
-
-#ifndef _MSC_VER // [
-#error "Use this header only with Microsoft Visual C++ compilers!"
-#endif // _MSC_VER ]
-
-#ifndef _MSC_INTTYPES_H_ // [
-#define _MSC_INTTYPES_H_
-
-#if _MSC_VER > 1000
-#pragma once
-#endif
-
-#include "stdint.h"
-
-// 7.8 Format conversion of integer types
-
-typedef struct {
-   intmax_t quot;
-   intmax_t rem;
-} imaxdiv_t;
-
-// 7.8.1 Macros for format specifiers
-
-#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [   See footnote 185 at page 198
-
-#ifdef _WIN64
-#  define __PRI64_PREFIX        "l"
-#  define __PRIPTR_PREFIX       "l"
-#else
-#  define __PRI64_PREFIX        "ll"
-#  define __PRIPTR_PREFIX
-#endif
-
-// The fprintf macros for signed integers are:
-#define PRId8       "d"
-#define PRIi8       "i"
-#define PRIdLEAST8  "d"
-#define PRIiLEAST8  "i"
-#define PRIdFAST8   "d"
-#define PRIiFAST8   "i"
-
-#define PRId16       "hd"
-#define PRIi16       "hi"
-#define PRIdLEAST16  "hd"
-#define PRIiLEAST16  "hi"
-#define PRIdFAST16   "hd"
-#define PRIiFAST16   "hi"
-
-#define PRId32       "d"
-#define PRIi32       "i"
-#define PRIdLEAST32  "d"
-#define PRIiLEAST32  "i"
-#define PRIdFAST32   "d"
-#define PRIiFAST32   "i"
-
-#define PRId64       __PRI64_PREFIX "d"
-#define PRIi64       __PRI64_PREFIX "i"
-#define PRIdLEAST64  __PRI64_PREFIX "d"
-#define PRIiLEAST64  __PRI64_PREFIX "i"
-#define PRIdFAST64   __PRI64_PREFIX "d"
-#define PRIiFAST64   __PRI64_PREFIX "i"
-
-#define PRIdMAX     __PRI64_PREFIX "d"
-#define PRIiMAX     __PRI64_PREFIX "i"
-
-#define PRIdPTR     __PRIPTR_PREFIX "d"
-#define PRIiPTR     __PRIPTR_PREFIX "i"
-
-// The fprintf macros for unsigned integers are:
-#define PRIo8       "o"
-#define PRIu8       "u"
-#define PRIx8       "x"
-#define PRIX8       "X"
-#define PRIoLEAST8  "o"
-#define PRIuLEAST8  "u"
-#define PRIxLEAST8  "x"
-#define PRIXLEAST8  "X"
-#define PRIoFAST8   "o"
-#define PRIuFAST8   "u"
-#define PRIxFAST8   "x"
-#define PRIXFAST8   "X"
-
-#define PRIo16       "ho"
-#define PRIu16       "hu"
-#define PRIx16       "hx"
-#define PRIX16       "hX"
-#define PRIoLEAST16  "ho"
-#define PRIuLEAST16  "hu"
-#define PRIxLEAST16  "hx"
-#define PRIXLEAST16  "hX"
-#define PRIoFAST16   "ho"
-#define PRIuFAST16   "hu"
-#define PRIxFAST16   "hx"
-#define PRIXFAST16   "hX"
-
-#define PRIo32       "o"
-#define PRIu32       "u"
-#define PRIx32       "x"
-#define PRIX32       "X"
-#define PRIoLEAST32  "o"
-#define PRIuLEAST32  "u"
-#define PRIxLEAST32  "x"
-#define PRIXLEAST32  "X"
-#define PRIoFAST32   "o"
-#define PRIuFAST32   "u"
-#define PRIxFAST32   "x"
-#define PRIXFAST32   "X"
-
-#define PRIo64       __PRI64_PREFIX "o"
-#define PRIu64       __PRI64_PREFIX "u"
-#define PRIx64       __PRI64_PREFIX "x"
-#define PRIX64       __PRI64_PREFIX "X"
-#define PRIoLEAST64  __PRI64_PREFIX "o"
-#define PRIuLEAST64  __PRI64_PREFIX "u"
-#define PRIxLEAST64  __PRI64_PREFIX "x"
-#define PRIXLEAST64  __PRI64_PREFIX "X"
-#define PRIoFAST64   __PRI64_PREFIX "o"
-#define PRIuFAST64   __PRI64_PREFIX "u"
-#define PRIxFAST64   __PRI64_PREFIX "x"
-#define PRIXFAST64   __PRI64_PREFIX "X"
-
-#define PRIoMAX     __PRI64_PREFIX "o"
-#define PRIuMAX     __PRI64_PREFIX "u"
-#define PRIxMAX     __PRI64_PREFIX "x"
-#define PRIXMAX     __PRI64_PREFIX "X"
-
-#define PRIoPTR     __PRIPTR_PREFIX "o"
-#define PRIuPTR     __PRIPTR_PREFIX "u"
-#define PRIxPTR     __PRIPTR_PREFIX "x"
-#define PRIXPTR     __PRIPTR_PREFIX "X"
-
-// The fscanf macros for signed integers are:
-#define SCNd8       "d"
-#define SCNi8       "i"
-#define SCNdLEAST8  "d"
-#define SCNiLEAST8  "i"
-#define SCNdFAST8   "d"
-#define SCNiFAST8   "i"
-
-#define SCNd16       "hd"
-#define SCNi16       "hi"
-#define SCNdLEAST16  "hd"
-#define SCNiLEAST16  "hi"
-#define SCNdFAST16   "hd"
-#define SCNiFAST16   "hi"
-
-#define SCNd32       "ld"
-#define SCNi32       "li"
-#define SCNdLEAST32  "ld"
-#define SCNiLEAST32  "li"
-#define SCNdFAST32   "ld"
-#define SCNiFAST32   "li"
-
-#define SCNd64       "I64d"
-#define SCNi64       "I64i"
-#define SCNdLEAST64  "I64d"
-#define SCNiLEAST64  "I64i"
-#define SCNdFAST64   "I64d"
-#define SCNiFAST64   "I64i"
-
-#define SCNdMAX     "I64d"
-#define SCNiMAX     "I64i"
-
-#ifdef _WIN64 // [
-#  define SCNdPTR     "I64d"
-#  define SCNiPTR     "I64i"
-#else  // _WIN64 ][
-#  define SCNdPTR     "ld"
-#  define SCNiPTR     "li"
-#endif  // _WIN64 ]
-
-// The fscanf macros for unsigned integers are:
-#define SCNo8       "o"
-#define SCNu8       "u"
-#define SCNx8       "x"
-#define SCNX8       "X"
-#define SCNoLEAST8  "o"
-#define SCNuLEAST8  "u"
-#define SCNxLEAST8  "x"
-#define SCNXLEAST8  "X"
-#define SCNoFAST8   "o"
-#define SCNuFAST8   "u"
-#define SCNxFAST8   "x"
-#define SCNXFAST8   "X"
-
-#define SCNo16       "ho"
-#define SCNu16       "hu"
-#define SCNx16       "hx"
-#define SCNX16       "hX"
-#define SCNoLEAST16  "ho"
-#define SCNuLEAST16  "hu"
-#define SCNxLEAST16  "hx"
-#define SCNXLEAST16  "hX"
-#define SCNoFAST16   "ho"
-#define SCNuFAST16   "hu"
-#define SCNxFAST16   "hx"
-#define SCNXFAST16   "hX"
-
-#define SCNo32       "lo"
-#define SCNu32       "lu"
-#define SCNx32       "lx"
-#define SCNX32       "lX"
-#define SCNoLEAST32  "lo"
-#define SCNuLEAST32  "lu"
-#define SCNxLEAST32  "lx"
-#define SCNXLEAST32  "lX"
-#define SCNoFAST32   "lo"
-#define SCNuFAST32   "lu"
-#define SCNxFAST32   "lx"
-#define SCNXFAST32   "lX"
-
-#define SCNo64       "I64o"
-#define SCNu64       "I64u"
-#define SCNx64       "I64x"
-#define SCNX64       "I64X"
-#define SCNoLEAST64  "I64o"
-#define SCNuLEAST64  "I64u"
-#define SCNxLEAST64  "I64x"
-#define SCNXLEAST64  "I64X"
-#define SCNoFAST64   "I64o"
-#define SCNuFAST64   "I64u"
-#define SCNxFAST64   "I64x"
-#define SCNXFAST64   "I64X"
-
-#define SCNoMAX     "I64o"
-#define SCNuMAX     "I64u"
-#define SCNxMAX     "I64x"
-#define SCNXMAX     "I64X"
-
-#ifdef _WIN64 // [
-#  define SCNoPTR     "I64o"
-#  define SCNuPTR     "I64u"
-#  define SCNxPTR     "I64x"
-#  define SCNXPTR     "I64X"
-#else  // _WIN64 ][
-#  define SCNoPTR     "lo"
-#  define SCNuPTR     "lu"
-#  define SCNxPTR     "lx"
-#  define SCNXPTR     "lX"
-#endif  // _WIN64 ]
-
-#endif // __STDC_FORMAT_MACROS ]
-
-// 7.8.2 Functions for greatest-width integer types
-
-// 7.8.2.1 The imaxabs function
-#define imaxabs _abs64
-
-// 7.8.2.2 The imaxdiv function
-
-// This is modified version of div() function from Microsoft's div.c found
-// in %MSVC.NET%\crt\src\div.c
-#ifdef STATIC_IMAXDIV // [
-static
-#else // STATIC_IMAXDIV ][
-_inline
-#endif // STATIC_IMAXDIV ]
-imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
-{
-   imaxdiv_t result;
-
-   result.quot = numer / denom;
-   result.rem = numer % denom;
-
-   if (numer < 0 && result.rem > 0) {
-      // did division wrong; must fix up
-      ++result.quot;
-      result.rem -= denom;
-   }
-
-   return result;
-}
-
-// 7.8.2.3 The strtoimax and strtoumax functions
-#define strtoimax _strtoi64
-#define strtoumax _strtoui64
-
-// 7.8.2.4 The wcstoimax and wcstoumax functions
-#define wcstoimax _wcstoi64
-#define wcstoumax _wcstoui64
-
-
-#endif // _MSC_INTTYPES_H_ ]
index c84975b6b8e1d682133614705f077f863ffef0a1..a3ee250639c838dd77a5a700c09de42e92dc7b1e 100644 (file)
@@ -3,8 +3,9 @@
 
 /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
  * for both */
-#include <intrin.h>
-#pragma intrinsic(_BitScanForward)
+#ifdef _MSC_VER
+#  include <intrin.h>
+#  pragma intrinsic(_BitScanForward)
 static __forceinline int ffsl(long x)
 {
        unsigned long i;
@@ -20,4 +21,39 @@ static __forceinline int ffs(int x)
        return (ffsl(x));
 }
 
+#  ifdef  _M_X64
+#    pragma intrinsic(_BitScanForward64)
+#  endif
+
+static __forceinline int ffsll(unsigned __int64 x)
+{
+       unsigned long i;
+#ifdef  _M_X64
+       if (_BitScanForward64(&i, x))
+               return (i + 1);
+       return (0);
+#else
+// Fallback for 32-bit build where 64-bit version not available
+// assuming little endian
+       union {
+               unsigned __int64 ll;
+               unsigned   long l[2];
+       } s;
+
+       s.ll = x;
+
+       if (_BitScanForward(&i, s.l[0]))
+               return (i + 1);
+       else if(_BitScanForward(&i, s.l[1]))
+               return (i + 33);
+       return (0);
+#endif
+}
+
+#else
+#  define ffsll(x) __builtin_ffsll(x)
+#  define ffsl(x) __builtin_ffsl(x)
+#  define ffs(x) __builtin_ffs(x)
 #endif
+
+#endif /* strings_h */
diff --git a/src/jemalloc/include/msvc_compat/windows_extra.h b/src/jemalloc/include/msvc_compat/windows_extra.h
new file mode 100644 (file)
index 0000000..3008faa
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
+#define        MSVC_COMPAT_WINDOWS_EXTRA_H
+
+#include <errno.h>
+
+#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
diff --git a/src/jemalloc/install-sh b/src/jemalloc/install-sh
deleted file mode 100755 (executable)
index ebc6691..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-#! /bin/sh
-#
-# install - install a program, script, or datafile
-# This comes from X11R5 (mit/util/scripts/install.sh).
-#
-# Copyright 1991 by the Massachusetts Institute of Technology
-#
-# Permission to use, copy, modify, distribute, and sell this software and its
-# documentation for any purpose is hereby granted without fee, provided that
-# the above copyright notice appear in all copies and that both that
-# copyright notice and this permission notice appear in supporting
-# documentation, and that the name of M.I.T. not be used in advertising or
-# publicity pertaining to distribution of the software without specific,
-# written prior permission.  M.I.T. makes no representations about the
-# suitability of this software for any purpose.  It is provided "as is"
-# without express or implied warranty.
-#
-# Calling this script install-sh is preferred over install.sh, to prevent
-# `make' implicit rules from creating a file called install from it
-# when there is no Makefile.
-#
-# This script is compatible with the BSD install script, but was written
-# from scratch.  It can only install one file at a time, a restriction
-# shared with many OS's install programs.
-
-
-# set DOITPROG to echo to test this script
-
-# Don't use :- since 4.3BSD and earlier shells don't like it.
-doit="${DOITPROG-}"
-
-
-# put in absolute paths if you don't have them in your path; or use env. vars.
-
-mvprog="${MVPROG-mv}"
-cpprog="${CPPROG-cp}"
-chmodprog="${CHMODPROG-chmod}"
-chownprog="${CHOWNPROG-chown}"
-chgrpprog="${CHGRPPROG-chgrp}"
-stripprog="${STRIPPROG-strip}"
-rmprog="${RMPROG-rm}"
-mkdirprog="${MKDIRPROG-mkdir}"
-
-transformbasename=""
-transform_arg=""
-instcmd="$mvprog"
-chmodcmd="$chmodprog 0755"
-chowncmd=""
-chgrpcmd=""
-stripcmd=""
-rmcmd="$rmprog -f"
-mvcmd="$mvprog"
-src=""
-dst=""
-dir_arg=""
-
-while [ x"$1" != x ]; do
-    case $1 in
-       -c) instcmd="$cpprog"
-           shift
-           continue;;
-
-       -d) dir_arg=true
-           shift
-           continue;;
-
-       -m) chmodcmd="$chmodprog $2"
-           shift
-           shift
-           continue;;
-
-       -o) chowncmd="$chownprog $2"
-           shift
-           shift
-           continue;;
-
-       -g) chgrpcmd="$chgrpprog $2"
-           shift
-           shift
-           continue;;
-
-       -s) stripcmd="$stripprog"
-           shift
-           continue;;
-
-       -t=*) transformarg=`echo $1 | sed 's/-t=//'`
-           shift
-           continue;;
-
-       -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
-           shift
-           continue;;
-
-       *)  if [ x"$src" = x ]
-           then
-               src=$1
-           else
-               # this colon is to work around a 386BSD /bin/sh bug
-               :
-               dst=$1
-           fi
-           shift
-           continue;;
-    esac
-done
-
-if [ x"$src" = x ]
-then
-       echo "install:  no input file specified"
-       exit 1
-else
-       true
-fi
-
-if [ x"$dir_arg" != x ]; then
-       dst=$src
-       src=""
-       
-       if [ -d $dst ]; then
-               instcmd=:
-       else
-               instcmd=mkdir
-       fi
-else
-
-# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
-# might cause directories to be created, which would be especially bad 
-# if $src (and thus $dsttmp) contains '*'.
-
-       if [ -f $src -o -d $src ]
-       then
-               true
-       else
-               echo "install:  $src does not exist"
-               exit 1
-       fi
-       
-       if [ x"$dst" = x ]
-       then
-               echo "install:  no destination specified"
-               exit 1
-       else
-               true
-       fi
-
-# If destination is a directory, append the input filename; if your system
-# does not like double slashes in filenames, you may need to add some logic
-
-       if [ -d $dst ]
-       then
-               dst="$dst"/`basename $src`
-       else
-               true
-       fi
-fi
-
-## this sed command emulates the dirname command
-dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
-
-# Make sure that the destination directory exists.
-#  this part is taken from Noah Friedman's mkinstalldirs script
-
-# Skip lots of stat calls in the usual case.
-if [ ! -d "$dstdir" ]; then
-defaultIFS='   
-'
-IFS="${IFS-${defaultIFS}}"
-
-oIFS="${IFS}"
-# Some sh's can't handle IFS=/ for some reason.
-IFS='%'
-set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
-IFS="${oIFS}"
-
-pathcomp=''
-
-while [ $# -ne 0 ] ; do
-       pathcomp="${pathcomp}${1}"
-       shift
-
-       if [ ! -d "${pathcomp}" ] ;
-        then
-               $mkdirprog "${pathcomp}"
-       else
-               true
-       fi
-
-       pathcomp="${pathcomp}/"
-done
-fi
-
-if [ x"$dir_arg" != x ]
-then
-       $doit $instcmd $dst &&
-
-       if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
-       if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
-       if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
-       if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
-else
-
-# If we're going to rename the final executable, determine the name now.
-
-       if [ x"$transformarg" = x ] 
-       then
-               dstfile=`basename $dst`
-       else
-               dstfile=`basename $dst $transformbasename | 
-                       sed $transformarg`$transformbasename
-       fi
-
-# don't allow the sed command to completely eliminate the filename
-
-       if [ x"$dstfile" = x ] 
-       then
-               dstfile=`basename $dst`
-       else
-               true
-       fi
-
-# Make a temp file name in the proper directory.
-
-       dsttmp=$dstdir/#inst.$$#
-
-# Move or copy the file name to the temp name
-
-       $doit $instcmd $src $dsttmp &&
-
-       trap "rm -f ${dsttmp}" 0 &&
-
-# and set any options; do chmod last to preserve setuid bits
-
-# If any of these fail, we abort the whole thing.  If we want to
-# ignore errors from any of these, just make sure not to ignore
-# errors from the above "$doit $instcmd $src $dsttmp" command.
-
-       if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
-       if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
-       if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
-       if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
-
-# Now rename the file to the real destination.
-
-       $doit $rmcmd -f $dstdir/$dstfile &&
-       $doit $mvcmd $dsttmp $dstdir/$dstfile 
-
-fi &&
-
-
-exit 0
index af3f945d42f7f77c99b880e20bad332af05546a1..1a3ad9b34d9200d36ab8bb03c434542f6a8469b3 100644 (file)
@@ -2,10 +2,11 @@ prefix=@prefix@
 exec_prefix=@exec_prefix@
 libdir=@libdir@
 includedir=@includedir@
+install_suffix=@install_suffix@
 
 Name: jemalloc
 Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
 URL: http://www.canonware.com/jemalloc
 Version: @jemalloc_version@
 Cflags: -I${includedir}
-Libs: -L${libdir} -ljemalloc
+Libs: -L${libdir} -ljemalloc${install_suffix}
diff --git a/src/jemalloc/msvc/ReadMe.txt b/src/jemalloc/msvc/ReadMe.txt
new file mode 100644 (file)
index 0000000..02b97f7
--- /dev/null
@@ -0,0 +1,24 @@
+
+How to build jemalloc for Windows
+=================================
+
+1. Install Cygwin with at least the following packages:
+   * autoconf
+   * autogen
+   * gawk
+   * grep
+   * sed
+
+2. Install Visual Studio 2015 with Visual C++
+
+3. Add Cygwin\bin to the PATH environment variable
+
+4. Open "VS2015 x86 Native Tools Command Prompt"
+   (note: x86/x64 doesn't matter at this point)
+
+5. Generate header files:
+   sh -c "./autogen.sh CC=cl --enable-lazy-lock=no"
+
+6. Now the project can be opened and built in Visual Studio:
+   msvc\jemalloc_vc2015.sln
+
diff --git a/src/jemalloc/msvc/jemalloc_vc2015.sln b/src/jemalloc/msvc/jemalloc_vc2015.sln
new file mode 100644 (file)
index 0000000..aedd5e5
--- /dev/null
@@ -0,0 +1,63 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 14
+VisualStudioVersion = 14.0.24720.0
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
+       ProjectSection(SolutionItems) = preProject
+               ReadMe.txt = ReadMe.txt
+       EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
+EndProject
+Global
+       GlobalSection(SolutionConfigurationPlatforms) = preSolution
+               Debug|x64 = Debug|x64
+               Debug|x86 = Debug|x86
+               Debug-static|x64 = Debug-static|x64
+               Debug-static|x86 = Debug-static|x86
+               Release|x64 = Release|x64
+               Release|x86 = Release|x86
+               Release-static|x64 = Release-static|x64
+               Release-static|x86 = Release-static|x86
+       EndGlobalSection
+       GlobalSection(ProjectConfigurationPlatforms) = postSolution
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
+               {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
+               {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
+       EndGlobalSection
+       GlobalSection(SolutionProperties) = preSolution
+               HideSolutionNode = FALSE
+       EndGlobalSection
+EndGlobal
diff --git a/src/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/src/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
new file mode 100644 (file)
index 0000000..f3f0260
--- /dev/null
@@ -0,0 +1,396 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug-static|Win32">
+      <Configuration>Debug-static</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug-static|x64">
+      <Configuration>Debug-static</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|Win32">
+      <Configuration>Debug</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release-static|Win32">
+      <Configuration>Release-static</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release-static|x64">
+      <Configuration>Release-static</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|Win32">
+      <Configuration>Release</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\valgrind.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h" />
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h" />
+    <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h" />
+    <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h" />
+    <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h" />
+    <ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h" />
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\..\..\..\src\arena.c" />
+    <ClCompile Include="..\..\..\..\src\atomic.c" />
+    <ClCompile Include="..\..\..\..\src\base.c" />
+    <ClCompile Include="..\..\..\..\src\bitmap.c" />
+    <ClCompile Include="..\..\..\..\src\chunk.c" />
+    <ClCompile Include="..\..\..\..\src\chunk_dss.c" />
+    <ClCompile Include="..\..\..\..\src\chunk_mmap.c" />
+    <ClCompile Include="..\..\..\..\src\ckh.c" />
+    <ClCompile Include="..\..\..\..\src\ctl.c" />
+    <ClCompile Include="..\..\..\..\src\extent.c" />
+    <ClCompile Include="..\..\..\..\src\hash.c" />
+    <ClCompile Include="..\..\..\..\src\huge.c" />
+    <ClCompile Include="..\..\..\..\src\jemalloc.c" />
+    <ClCompile Include="..\..\..\..\src\mb.c" />
+    <ClCompile Include="..\..\..\..\src\mutex.c" />
+    <ClCompile Include="..\..\..\..\src\nstime.c" />
+    <ClCompile Include="..\..\..\..\src\pages.c" />
+    <ClCompile Include="..\..\..\..\src\prng.c" />
+    <ClCompile Include="..\..\..\..\src\prof.c" />
+    <ClCompile Include="..\..\..\..\src\quarantine.c" />
+    <ClCompile Include="..\..\..\..\src\rtree.c" />
+    <ClCompile Include="..\..\..\..\src\stats.c" />
+    <ClCompile Include="..\..\..\..\src\tcache.c" />
+    <ClCompile Include="..\..\..\..\src\ticker.c" />
+    <ClCompile Include="..\..\..\..\src\tsd.c" />
+    <ClCompile Include="..\..\..\..\src\util.c" />
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
+    <Keyword>Win32Proj</Keyword>
+    <RootNamespace>jemalloc</RootNamespace>
+    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+    <ConfigurationType>DynamicLibrary</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+    <ConfigurationType>DynamicLibrary</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>DynamicLibrary</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>DynamicLibrary</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Label="Shared">
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <TargetName>$(ProjectName)d</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <TargetName>$(ProjectName)d</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
+  </PropertyGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings>
+      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+      <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings>
+      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings>
+      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+      <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings>
+      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings>
+      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+      <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings>
+      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings>
+      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+      <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings>
+      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+    </Link>
+  </ItemDefinitionGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/src/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/src/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
new file mode 100644 (file)
index 0000000..ce70632
--- /dev/null
@@ -0,0 +1,257 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Filter Include="Source Files">
+      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+    </Filter>
+    <Filter Include="Header Files">
+      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
+    </Filter>
+    <Filter Include="Header Files\internal">
+      <UniqueIdentifier>{5697dfa3-16cf-4932-b428-6e0ec6e9f98e}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\msvc_compat">
+      <UniqueIdentifier>{0cbd2ca6-42a7-4f82-8517-d7e7a14fd986}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\msvc_compat\C99">
+      <UniqueIdentifier>{0abe6f30-49b5-46dd-8aca-6e33363fa52c}</UniqueIdentifier>
+    </Filter>
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\jemalloc\internal\valgrind.h">
+      <Filter>Header Files\internal</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h">
+      <Filter>Header Files\msvc_compat</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h">
+      <Filter>Header Files\msvc_compat</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h">
+      <Filter>Header Files\msvc_compat\C99</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h">
+      <Filter>Header Files\msvc_compat\C99</Filter>
+    </ClInclude>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\..\..\..\src\arena.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\atomic.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\base.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\bitmap.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\chunk.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\chunk_dss.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\chunk_mmap.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\ckh.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\ctl.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\extent.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\hash.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\huge.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\jemalloc.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\mb.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\mutex.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\nstime.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\pages.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\prng.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\prof.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\quarantine.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\rtree.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\stats.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\tcache.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\ticker.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\tsd.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\src\util.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp b/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp
new file mode 100644 (file)
index 0000000..c8cb7d6
--- /dev/null
@@ -0,0 +1,89 @@
+// jemalloc C++ threaded test
+// Author: Rustam Abdullaev
+// Public Domain
+
+#include <atomic>
+#include <functional>
+#include <future>
+#include <random>
+#include <thread>
+#include <vector>
+#include <stdio.h>
+#include <jemalloc/jemalloc.h>
+
+using std::vector;
+using std::thread;
+using std::uniform_int_distribution;
+using std::minstd_rand;
+
+int test_threads()
+{
+  je_malloc_conf = "narenas:3";
+  int narenas = 0;
+  size_t sz = sizeof(narenas);
+  je_mallctl("opt.narenas", &narenas, &sz, NULL, 0);
+  if (narenas != 3) {
+    printf("Error: unexpected number of arenas: %d\n", narenas);
+    return 1;
+  }
+  static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 };
+  static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0]));
+  vector<thread> workers;
+  static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50;
+  je_malloc_stats_print(NULL, NULL, NULL);
+  size_t allocated1;
+  size_t sz1 = sizeof(allocated1);
+  je_mallctl("stats.active", &allocated1, &sz1, NULL, 0);
+  printf("\nPress Enter to start threads...\n");
+  getchar();
+  printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
+  for (int i = 0; i < numThreads; i++) {
+    workers.emplace_back([tid=i]() {
+      uniform_int_distribution<int> sizeDist(0, numSizes - 1);
+      minstd_rand rnd(tid * 17);
+      uint8_t* ptrs[numAllocsMax];
+      int ptrsz[numAllocsMax];
+      for (int i = 0; i < numIter1; ++i) {
+        thread t([&]() {
+          for (int i = 0; i < numIter2; ++i) {
+            const int numAllocs = numAllocsMax - sizeDist(rnd);
+            for (int j = 0; j < numAllocs; j += 64) {
+              const int x = sizeDist(rnd);
+              const int sz = sizes[x];
+              ptrsz[j] = sz;
+              ptrs[j] = (uint8_t*)je_malloc(sz);
+              if (!ptrs[j]) {
+                printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x);
+                exit(1);
+              }
+              for (int k = 0; k < sz; k++)
+                ptrs[j][k] = tid + k;
+            }
+            for (int j = 0; j < numAllocs; j += 64) {
+              for (int k = 0, sz = ptrsz[j]; k < sz; k++)
+                if (ptrs[j][k] != (uint8_t)(tid + k)) {
+                  printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k));
+                  exit(1);
+                }
+              je_free(ptrs[j]);
+            }
+          }
+        });
+        t.join();
+      }
+    });
+  }
+  for (thread& t : workers) {
+    t.join();
+  }
+  je_malloc_stats_print(NULL, NULL, NULL);
+  size_t allocated2;
+  je_mallctl("stats.active", &allocated2, &sz1, NULL, 0);
+  size_t leaked = allocated2 - allocated1;
+  printf("\nDone. Leaked: %zd bytes\n", leaked);
+  bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
+  printf("\nTest %s!\n", (failed ? "FAILED" : "successful"));
+  printf("\nPress Enter to continue...\n");
+  getchar();
+  return failed ? 1 : 0;
+}
diff --git a/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h b/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h
new file mode 100644 (file)
index 0000000..64d0cdb
--- /dev/null
@@ -0,0 +1,3 @@
+#pragma once
+
+int test_threads();
diff --git a/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj b/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj
new file mode 100644 (file)
index 0000000..f5e9898
--- /dev/null
@@ -0,0 +1,327 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug-static|Win32">
+      <Configuration>Debug-static</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug-static|x64">
+      <Configuration>Debug-static</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|Win32">
+      <Configuration>Debug</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release-static|Win32">
+      <Configuration>Release-static</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release-static|x64">
+      <Configuration>Release-static</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|Win32">
+      <Configuration>Release</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
+    <Keyword>Win32Proj</Keyword>
+    <RootNamespace>test_threads</RootNamespace>
+    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v140</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Label="Shared">
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <LinkIncremental>true</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <LinkIncremental>true</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <LinkIncremental>true</LinkIncremental>
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
+    <LinkIncremental>true</LinkIncremental>
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
+    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+    <IntDir>$(Platform)\$(Configuration)\</IntDir>
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemGroup>
+    <ClCompile Include="test_threads.cpp" />
+    <ClCompile Include="test_threads_main.cpp" />
+  </ItemGroup>
+  <ItemGroup>
+    <ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
+      <Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
+    </ProjectReference>
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="test_threads.h" />
+  </ItemGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters b/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters
new file mode 100644 (file)
index 0000000..4c23340
--- /dev/null
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Filter Include="Source Files">
+      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+    </Filter>
+    <Filter Include="Header Files">
+      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
+    </Filter>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="test_threads.cpp">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="test_threads_main.cpp">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="test_threads.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp b/src/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp
new file mode 100644 (file)
index 0000000..ffd96e6
--- /dev/null
@@ -0,0 +1,12 @@
+#include "test_threads.h"
+#include <future>
+#include <functional>
+#include <chrono>
+
+using namespace std::chrono_literals;
+
+int main(int argc, char** argv)
+{
+  int rc = test_threads();
+  return rc;
+}
index b7300a92413a8aa7099bb9981c98e21d889a297c..965c0fe2e88472f4f2547d262a166121d38623fd 100644 (file)
@@ -4,45 +4,32 @@
 /******************************************************************************/
 /* Data. */
 
+purge_mode_t   opt_purge = PURGE_DEFAULT;
+const char     *purge_mode_names[] = {
+       "ratio",
+       "decay",
+       "N/A"
+};
 ssize_t                opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
-arena_bin_info_t       arena_bin_info[NBINS];
+static ssize_t lg_dirty_mult_default;
+ssize_t                opt_decay_time = DECAY_TIME_DEFAULT;
+static ssize_t decay_time_default;
 
-JEMALLOC_ALIGNED(CACHELINE)
-const uint32_t small_bin2size_tab[NBINS] = {
-#define        B2S_bin_yes(size) \
-       size,
-#define        B2S_bin_no(size)
-#define        SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
-       B2S_bin_##bin((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
-       SIZE_CLASSES
-#undef B2S_bin_yes
-#undef B2S_bin_no
-#undef SC
-};
+arena_bin_info_t       arena_bin_info[NBINS];
 
-JEMALLOC_ALIGNED(CACHELINE)
-const uint8_t  small_size2bin_tab[] = {
-#define        S2B_3(i)        i,
-#define        S2B_4(i)        S2B_3(i) S2B_3(i)
-#define        S2B_5(i)        S2B_4(i) S2B_4(i)
-#define        S2B_6(i)        S2B_5(i) S2B_5(i)
-#define        S2B_7(i)        S2B_6(i) S2B_6(i)
-#define        S2B_8(i)        S2B_7(i) S2B_7(i)
-#define        S2B_9(i)        S2B_8(i) S2B_8(i)
-#define        S2B_no(i)
-#define        SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
-       S2B_##lg_delta_lookup(index)
-       SIZE_CLASSES
-#undef S2B_3
-#undef S2B_4
-#undef S2B_5
-#undef S2B_6
-#undef S2B_7
-#undef S2B_8
-#undef S2B_9
-#undef S2B_no
-#undef SC
-};
+size_t         map_bias;
+size_t         map_misc_offset;
+size_t         arena_maxrun; /* Max run size for arenas. */
+size_t         large_maxclass; /* Max large size class. */
+size_t         run_quantize_max; /* Max run_quantize_*() input. */
+static size_t  small_maxrun; /* Max run size for small size classes. */
+static bool    *small_run_tab; /* Valid small run page multiples. */
+static size_t  *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
+static size_t  *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
+unsigned       nlclasses; /* Number of large size classes. */
+unsigned       nhclasses; /* Number of huge size classes. */
+static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */
+static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */
 
 /******************************************************************************/
 /*
@@ -50,9 +37,9 @@ const uint8_t small_size2bin_tab[] = {
  * definition.
  */
 
-static void    arena_purge(arena_t *arena, bool all);
+static void    arena_purge_to_limit(arena_t *arena, size_t ndirty_limit);
 static void    arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
-    bool cleaned);
+    bool cleaned, bool decommitted);
 static void    arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
     arena_run_t *run, arena_bin_t *bin);
 static void    arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
@@ -61,16 +48,20 @@ static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
 /******************************************************************************/
 
 JEMALLOC_INLINE_C size_t
-arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm)
+arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
 {
-       arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm);
-       size_t pageind = arena_miscelm_to_pageind(miscelm);
+       arena_chunk_t *chunk;
+       size_t pageind, mapbits;
 
-       return arena_mapbits_get(chunk, pageind);
+       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+       pageind = arena_miscelm_to_pageind(miscelm);
+       mapbits = arena_mapbits_get(chunk, pageind);
+       return (arena_mapbits_size_decode(mapbits));
 }
 
-static inline int
-arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
+JEMALLOC_INLINE_C int
+arena_run_addr_comp(const arena_chunk_map_misc_t *a,
+    const arena_chunk_map_misc_t *b)
 {
        uintptr_t a_miscelm = (uintptr_t)a;
        uintptr_t b_miscelm = (uintptr_t)b;
@@ -83,124 +74,258 @@ arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
 
 /* Generate red-black tree functions. */
 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
-    rb_link, arena_run_comp)
+    rb_link, arena_run_addr_comp)
 
-static inline int
-arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
+static size_t
+run_quantize_floor_compute(size_t size)
 {
-       int ret;
-       size_t a_size;
-       size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK;
-       uintptr_t a_miscelm = (uintptr_t)a;
-       uintptr_t b_miscelm = (uintptr_t)b;
+       size_t qsize;
 
-       if (a_miscelm & CHUNK_MAP_KEY)
-               a_size = a_miscelm & ~PAGE_MASK;
-       else
-               a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK;
+       assert(size != 0);
+       assert(size == PAGE_CEILING(size));
 
-       ret = (a_size > b_size) - (a_size < b_size);
-       if (ret == 0) {
-               if (!(a_miscelm & CHUNK_MAP_KEY))
-                       ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
-               else {
-                       /*
-                        * Treat keys as if they are lower than anything else.
-                        */
-                       ret = -1;
+       /* Don't change sizes that are valid small run sizes. */
+       if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
+               return (size);
+
+       /*
+        * Round down to the nearest run size that can actually be requested
+        * during normal large allocation.  Add large_pad so that cache index
+        * randomization can offset the allocation from the page boundary.
+        */
+       qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
+       if (qsize <= SMALL_MAXCLASS + large_pad)
+               return (run_quantize_floor_compute(size - large_pad));
+       assert(qsize <= size);
+       return (qsize);
+}
+
+static size_t
+run_quantize_ceil_compute_hard(size_t size)
+{
+       size_t large_run_size_next;
+
+       assert(size != 0);
+       assert(size == PAGE_CEILING(size));
+
+       /*
+        * Return the next quantized size greater than the input size.
+        * Quantized sizes comprise the union of run sizes that back small
+        * region runs, and run sizes that back large regions with no explicit
+        * alignment constraints.
+        */
+
+       if (size > SMALL_MAXCLASS) {
+               large_run_size_next = PAGE_CEILING(index2size(size2index(size -
+                   large_pad) + 1) + large_pad);
+       } else
+               large_run_size_next = SIZE_T_MAX;
+       if (size >= small_maxrun)
+               return (large_run_size_next);
+
+       while (true) {
+               size += PAGE;
+               assert(size <= small_maxrun);
+               if (small_run_tab[size >> LG_PAGE]) {
+                       if (large_run_size_next < size)
+                               return (large_run_size_next);
+                       return (size);
                }
        }
+}
+
+static size_t
+run_quantize_ceil_compute(size_t size)
+{
+       size_t qsize = run_quantize_floor_compute(size);
+
+       if (qsize < size) {
+               /*
+                * Skip a quantization that may have an adequately large run,
+                * because under-sized runs may be mixed in.  This only happens
+                * when an unusual size is requested, i.e. for aligned
+                * allocation, and is just one of several places where linear
+                * search would potentially find sufficiently aligned available
+                * memory somewhere lower.
+                */
+               qsize = run_quantize_ceil_compute_hard(qsize);
+       }
+       return (qsize);
+}
+
+#ifdef JEMALLOC_JET
+#undef run_quantize_floor
+#define        run_quantize_floor JEMALLOC_N(run_quantize_floor_impl)
+#endif
+static size_t
+run_quantize_floor(size_t size)
+{
+       size_t ret;
+
+       assert(size > 0);
+       assert(size <= run_quantize_max);
+       assert((size & PAGE_MASK) == 0);
 
+       ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
+       assert(ret == run_quantize_floor_compute(size));
        return (ret);
 }
+#ifdef JEMALLOC_JET
+#undef run_quantize_floor
+#define        run_quantize_floor JEMALLOC_N(run_quantize_floor)
+run_quantize_t *run_quantize_floor = JEMALLOC_N(run_quantize_floor_impl);
+#endif
 
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
-    arena_chunk_map_misc_t, rb_link, arena_avail_comp)
+#ifdef JEMALLOC_JET
+#undef run_quantize_ceil
+#define        run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl)
+#endif
+static size_t
+run_quantize_ceil(size_t size)
+{
+       size_t ret;
+
+       assert(size > 0);
+       assert(size <= run_quantize_max);
+       assert((size & PAGE_MASK) == 0);
+
+       ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
+       assert(ret == run_quantize_ceil_compute(size));
+       return (ret);
+}
+#ifdef JEMALLOC_JET
+#undef run_quantize_ceil
+#define        run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
+run_quantize_t *run_quantize_ceil = JEMALLOC_N(run_quantize_ceil_impl);
+#endif
+
+static arena_run_tree_t *
+arena_runs_avail_get(arena_t *arena, szind_t ind)
+{
+
+       assert(ind >= runs_avail_bias);
+       assert(ind - runs_avail_bias < runs_avail_nclasses);
+
+       return (&arena->runs_avail[ind - runs_avail_bias]);
+}
 
 static void
 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
     size_t npages)
 {
-
+       szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
+           arena_miscelm_get(chunk, pageind))));
        assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
            LG_PAGE));
-       arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
-           pageind));
+       arena_run_tree_insert(arena_runs_avail_get(arena, ind),
+           arena_miscelm_get(chunk, pageind));
 }
 
 static void
 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
     size_t npages)
 {
-
+       szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
+           arena_miscelm_get(chunk, pageind))));
        assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
            LG_PAGE));
-       arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
-           pageind));
+       arena_run_tree_remove(arena_runs_avail_get(arena, ind),
+           arena_miscelm_get(chunk, pageind));
 }
 
 static void
-arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
     size_t npages)
 {
        arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+
        assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
            LG_PAGE));
        assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
        assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
            CHUNK_MAP_DIRTY);
-       ql_elm_new(miscelm, dr_link);
-       ql_tail_insert(&arena->runs_dirty, miscelm, dr_link);
+
+       qr_new(&miscelm->rd, rd_link);
+       qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
        arena->ndirty += npages;
 }
 
 static void
-arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
     size_t npages)
 {
        arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+
        assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
            LG_PAGE));
        assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
        assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
            CHUNK_MAP_DIRTY);
-       ql_remove(&arena->runs_dirty, miscelm, dr_link);
+
+       qr_remove(&miscelm->rd, rd_link);
+       assert(arena->ndirty >= npages);
        arena->ndirty -= npages;
 }
 
-static inline void *
+static size_t
+arena_chunk_dirty_npages(const extent_node_t *node)
+{
+
+       return (extent_node_size_get(node) >> LG_PAGE);
+}
+
+void
+arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
+{
+
+       if (cache) {
+               extent_node_dirty_linkage_init(node);
+               extent_node_dirty_insert(node, &arena->runs_dirty,
+                   &arena->chunks_cache);
+               arena->ndirty += arena_chunk_dirty_npages(node);
+       }
+}
+
+void
+arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
+{
+
+       if (dirty) {
+               extent_node_dirty_remove(node);
+               assert(arena->ndirty >= arena_chunk_dirty_npages(node));
+               arena->ndirty -= arena_chunk_dirty_npages(node);
+       }
+}
+
+JEMALLOC_INLINE_C void *
 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
 {
        void *ret;
-       unsigned regind;
+       size_t regind;
        arena_chunk_map_misc_t *miscelm;
        void *rpages;
 
        assert(run->nfree > 0);
        assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
 
-       regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
+       regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
        miscelm = arena_run_to_miscelm(run);
        rpages = arena_miscelm_to_rpages(miscelm);
        ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
            (uintptr_t)(bin_info->reg_interval * regind));
        run->nfree--;
-       if (regind == run->nextind)
-               run->nextind++;
-       assert(regind < run->nextind);
        return (ret);
 }
 
-static inline void
+JEMALLOC_INLINE_C void
 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
 {
        arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
        size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
        size_t mapbits = arena_mapbits_get(chunk, pageind);
-       size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
+       szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
        arena_bin_info_t *bin_info = &arena_bin_info[binind];
-       unsigned regind = arena_run_regind(run, bin_info, ptr);
+       size_t regind = arena_run_regind(run, bin_info, ptr);
 
        assert(run->nfree < bin_info->nregs);
        /* Freeing an interior pointer can cause assertion failure. */
@@ -218,7 +343,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
        run->nfree++;
 }
 
-static inline void
+JEMALLOC_INLINE_C void
 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
 {
 
@@ -228,7 +353,7 @@ arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
            (npages << LG_PAGE));
 }
 
-static inline void
+JEMALLOC_INLINE_C void
 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
 {
 
@@ -236,7 +361,7 @@ arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
            << LG_PAGE)), PAGE);
 }
 
-static inline void
+JEMALLOC_INLINE_C void
 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
 {
        size_t i;
@@ -248,24 +373,40 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
 }
 
 static void
-arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
+arena_nactive_add(arena_t *arena, size_t add_pages)
 {
 
        if (config_stats) {
-               ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
-                   - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+               size_t cactive_add = CHUNK_CEILING((arena->nactive +
+                   add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
                    LG_PAGE);
-               if (cactive_diff != 0)
-                       stats_cactive_add(cactive_diff);
+               if (cactive_add != 0)
+                       stats_cactive_add(cactive_add);
+       }
+       arena->nactive += add_pages;
+}
+
+static void
+arena_nactive_sub(arena_t *arena, size_t sub_pages)
+{
+
+       if (config_stats) {
+               size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
+                   CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
+               if (cactive_sub != 0)
+                       stats_cactive_sub(cactive_sub);
        }
+       arena->nactive -= sub_pages;
 }
 
 static void
 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
-    size_t flag_dirty, size_t need_pages)
+    size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
 {
        size_t total_pages, rem_pages;
 
+       assert(flag_dirty == 0 || flag_decommitted == 0);
+
        total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
            LG_PAGE;
        assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
@@ -275,61 +416,72 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
 
        arena_avail_remove(arena, chunk, run_ind, total_pages);
        if (flag_dirty != 0)
-               arena_dirty_remove(arena, chunk, run_ind, total_pages);
-       arena_cactive_update(arena, need_pages, 0);
-       arena->nactive += need_pages;
+               arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
+       arena_nactive_add(arena, need_pages);
 
        /* Keep track of trailing unused pages for later use. */
        if (rem_pages > 0) {
+               size_t flags = flag_dirty | flag_decommitted;
+               size_t flag_unzeroed_mask = (flags == 0) ?  CHUNK_MAP_UNZEROED :
+                   0;
+
+               arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
+                   (rem_pages << LG_PAGE), flags |
+                   (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
+                   flag_unzeroed_mask));
+               arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
+                   (rem_pages << LG_PAGE), flags |
+                   (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
+                   flag_unzeroed_mask));
                if (flag_dirty != 0) {
-                       arena_mapbits_unallocated_set(chunk,
-                           run_ind+need_pages, (rem_pages << LG_PAGE),
-                           flag_dirty);
-                       arena_mapbits_unallocated_set(chunk,
-                           run_ind+total_pages-1, (rem_pages << LG_PAGE),
-                           flag_dirty);
-                       arena_dirty_insert(arena, chunk, run_ind+need_pages,
+                       arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
                            rem_pages);
-               } else {
-                       arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
-                           (rem_pages << LG_PAGE),
-                           arena_mapbits_unzeroed_get(chunk,
-                           run_ind+need_pages));
-                       arena_mapbits_unallocated_set(chunk,
-                           run_ind+total_pages-1, (rem_pages << LG_PAGE),
-                           arena_mapbits_unzeroed_get(chunk,
-                           run_ind+total_pages-1));
                }
                arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
        }
 }
 
-static void
+static bool
 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
     bool remove, bool zero)
 {
        arena_chunk_t *chunk;
        arena_chunk_map_misc_t *miscelm;
-       size_t flag_dirty, run_ind, need_pages, i;
+       size_t flag_dirty, flag_decommitted, run_ind, need_pages;
+       size_t flag_unzeroed_mask;
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
        miscelm = arena_run_to_miscelm(run);
        run_ind = arena_miscelm_to_pageind(miscelm);
        flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+       flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
        need_pages = (size >> LG_PAGE);
        assert(need_pages > 0);
 
+       if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+           run_ind << LG_PAGE, size, arena->ind))
+               return (true);
+
        if (remove) {
                arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
-                   need_pages);
+                   flag_decommitted, need_pages);
        }
 
        if (zero) {
-               if (flag_dirty == 0) {
+               if (flag_decommitted != 0) {
+                       /* The run is untouched, and therefore zeroed. */
+                       JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
+                           *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
+                           (need_pages << LG_PAGE));
+               } else if (flag_dirty != 0) {
+                       /* The run is dirty, so all pages must be zeroed. */
+                       arena_run_zero(chunk, run_ind, need_pages);
+               } else {
                        /*
                         * The run is clean, so some pages may be zeroed (i.e.
                         * never before touched).
                         */
+                       size_t i;
                        for (i = 0; i < need_pages; i++) {
                                if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
                                    != 0)
@@ -342,9 +494,6 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
                                            run_ind+i);
                                }
                        }
-               } else {
-                       /* The run is dirty, so all pages must be zeroed. */
-                       arena_run_zero(chunk, run_ind, need_pages);
                }
        } else {
                JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
@@ -355,31 +504,37 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
         * Set the last element first, in case the run only contains one page
         * (i.e. both statements set the same element).
         */
-       arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
-       arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
+       flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+           CHUNK_MAP_UNZEROED : 0;
+       arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
+           (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+           run_ind+need_pages-1)));
+       arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
+           (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
+       return (false);
 }
 
-static void
+static bool
 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
 {
 
-       arena_run_split_large_helper(arena, run, size, true, zero);
+       return (arena_run_split_large_helper(arena, run, size, true, zero));
 }
 
-static void
+static bool
 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
 {
 
-       arena_run_split_large_helper(arena, run, size, false, zero);
+       return (arena_run_split_large_helper(arena, run, size, false, zero));
 }
 
-static void
+static bool
 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
-    size_t binind)
+    szind_t binind)
 {
        arena_chunk_t *chunk;
        arena_chunk_map_misc_t *miscelm;
-       size_t flag_dirty, run_ind, need_pages, i;
+       size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
 
        assert(binind != BININD_INVALID);
 
@@ -387,33 +542,28 @@ arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
        miscelm = arena_run_to_miscelm(run);
        run_ind = arena_miscelm_to_pageind(miscelm);
        flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+       flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
        need_pages = (size >> LG_PAGE);
        assert(need_pages > 0);
 
-       arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
+       if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+           run_ind << LG_PAGE, size, arena->ind))
+               return (true);
 
-       /*
-        * Propagate the dirty and unzeroed flags to the allocated small run,
-        * so that arena_dalloc_bin_run() has the ability to conditionally trim
-        * clean pages.
-        */
-       arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
-       if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
-           run_ind) == 0)
-               arena_run_page_validate_zeroed(chunk, run_ind);
-       for (i = 1; i < need_pages - 1; i++) {
-               arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
-               if (config_debug && flag_dirty == 0 &&
-                   arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
+       arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
+           flag_decommitted, need_pages);
+
+       for (i = 0; i < need_pages; i++) {
+               size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
+                   run_ind+i);
+               arena_mapbits_small_set(chunk, run_ind+i, i, binind,
+                   flag_unzeroed);
+               if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
                        arena_run_page_validate_zeroed(chunk, run_ind+i);
        }
-       arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
-           binind, flag_dirty);
-       if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
-           run_ind+need_pages-1) == 0)
-               arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
        JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
            (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
+       return (false);
 }
 
 static arena_chunk_t *
@@ -429,97 +579,117 @@ arena_chunk_init_spare(arena_t *arena)
        assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
        assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
        assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
-           arena_maxclass);
+           arena_maxrun);
        assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
-           arena_maxclass);
+           arena_maxrun);
        assert(arena_mapbits_dirty_get(chunk, map_bias) ==
            arena_mapbits_dirty_get(chunk, chunk_npages-1));
 
        return (chunk);
 }
 
+static bool
+arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
+{
+
+       /*
+        * The extent node notion of "committed" doesn't directly apply to
+        * arena chunks.  Arbitrarily mark them as committed.  The commit state
+        * of runs is tracked individually, and upon chunk deallocation the
+        * entire chunk is in a consistent commit state.
+        */
+       extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
+       extent_node_achunk_set(&chunk->node, true);
+       return (chunk_register(chunk, &chunk->node));
+}
+
 static arena_chunk_t *
-arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
-    bool *zero)
+arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    bool *zero, bool *commit)
 {
        arena_chunk_t *chunk;
-       chunk_alloc_t *chunk_alloc;
-       chunk_dalloc_t *chunk_dalloc;
 
-       chunk_alloc = arena->chunk_alloc;
-       chunk_dalloc = arena->chunk_dalloc;
        malloc_mutex_unlock(&arena->lock);
-       chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
-           arena->ind, NULL, size, alignment, zero);
-       malloc_mutex_lock(&arena->lock);
-       if (config_stats && chunk != NULL)
-               arena->stats.mapped += chunksize;
 
+       chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
+           chunksize, chunksize, zero, commit);
+       if (chunk != NULL && !*commit) {
+               /* Commit header. */
+               if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
+                   LG_PAGE, arena->ind)) {
+                       chunk_dalloc_wrapper(arena, chunk_hooks,
+                           (void *)chunk, chunksize, *commit);
+                       chunk = NULL;
+               }
+       }
+       if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
+               if (!*commit) {
+                       /* Undo commit of header. */
+                       chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
+                           LG_PAGE, arena->ind);
+               }
+               chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
+                   chunksize, *commit);
+               chunk = NULL;
+       }
+
+       malloc_mutex_lock(&arena->lock);
        return (chunk);
 }
 
-void *
-arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size,
-    size_t alignment, bool *zero)
+static arena_chunk_t *
+arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
 {
-       void *ret;
-       chunk_alloc_t *chunk_alloc;
-       chunk_dalloc_t *chunk_dalloc;
+       arena_chunk_t *chunk;
+       chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
 
-       malloc_mutex_lock(&arena->lock);
-       chunk_alloc = arena->chunk_alloc;
-       chunk_dalloc = arena->chunk_dalloc;
-       if (config_stats) {
-               /* Optimistically update stats prior to unlocking. */
-               arena->stats.mapped += size;
-               arena->stats.allocated_huge += size;
-               arena->stats.nmalloc_huge++;
-               arena->stats.nrequests_huge++;
+       chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
+           chunksize, zero, true);
+       if (chunk != NULL) {
+               if (arena_chunk_register(arena, chunk, *zero)) {
+                       chunk_dalloc_cache(arena, &chunk_hooks, chunk,
+                           chunksize, true);
+                       return (NULL);
+               }
+               *commit = true;
+       }
+       if (chunk == NULL) {
+               chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
+                   zero, commit);
        }
-       arena->nactive += (size >> LG_PAGE);
-       malloc_mutex_unlock(&arena->lock);
 
-       ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
-           new_addr, size, alignment, zero);
-       if (config_stats) {
-               if (ret != NULL)
-                       stats_cactive_add(size);
-               else {
-                       /* Revert optimistic stats updates. */
-                       malloc_mutex_lock(&arena->lock);
-                       arena->stats.mapped -= size;
-                       arena->stats.allocated_huge -= size;
-                       arena->stats.nmalloc_huge--;
-                       malloc_mutex_unlock(&arena->lock);
-               }
+       if (config_stats && chunk != NULL) {
+               arena->stats.mapped += chunksize;
+               arena->stats.metadata_mapped += (map_bias << LG_PAGE);
        }
 
-       return (ret);
+       return (chunk);
 }
 
 static arena_chunk_t *
 arena_chunk_init_hard(arena_t *arena)
 {
        arena_chunk_t *chunk;
-       bool zero;
-       size_t unzeroed, i;
+       bool zero, commit;
+       size_t flag_unzeroed, flag_decommitted, i;
 
        assert(arena->spare == NULL);
 
        zero = false;
-       chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
+       commit = false;
+       chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
        if (chunk == NULL)
                return (NULL);
 
-       chunk->arena = arena;
-
        /*
         * Initialize the map to contain one maximal free untouched run.  Mark
-        * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
+        * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
+        * chunk.
         */
-       unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
-       arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
-           unzeroed);
+       flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
+       flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
+       arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
+           flag_unzeroed | flag_decommitted);
        /*
         * There is no need to initialize the internal page map entries unless
         * the chunk is not zeroed.
@@ -531,7 +701,7 @@ arena_chunk_init_hard(arena_t *arena)
                    chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
                    map_bias+1)));
                for (i = map_bias+1; i < chunk_npages-1; i++)
-                       arena_mapbits_unzeroed_set(chunk, i, unzeroed);
+                       arena_mapbits_internal_set(chunk, i, flag_unzeroed);
        } else {
                JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
                    *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
@@ -540,12 +710,12 @@ arena_chunk_init_hard(arena_t *arena)
                if (config_debug) {
                        for (i = map_bias+1; i < chunk_npages-1; i++) {
                                assert(arena_mapbits_unzeroed_get(chunk, i) ==
-                                   unzeroed);
+                                   flag_unzeroed);
                        }
                }
        }
-       arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
-           unzeroed);
+       arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
+           flag_unzeroed);
 
        return (chunk);
 }
@@ -563,43 +733,11 @@ arena_chunk_alloc(arena_t *arena)
                        return (NULL);
        }
 
-       /* Insert the run into the runs_avail tree. */
        arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
 
        return (chunk);
 }
 
-static void
-arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
-{
-       chunk_dalloc_t *chunk_dalloc;
-
-       chunk_dalloc = arena->chunk_dalloc;
-       malloc_mutex_unlock(&arena->lock);
-       chunk_dalloc((void *)chunk, chunksize, arena->ind);
-       malloc_mutex_lock(&arena->lock);
-       if (config_stats)
-               arena->stats.mapped -= chunksize;
-}
-
-void
-arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size)
-{
-       chunk_dalloc_t *chunk_dalloc;
-
-       malloc_mutex_lock(&arena->lock);
-       chunk_dalloc = arena->chunk_dalloc;
-       if (config_stats) {
-               arena->stats.mapped -= size;
-               arena->stats.allocated_huge -= size;
-               arena->stats.ndalloc_huge++;
-               stats_cactive_sub(size);
-       }
-       arena->nactive -= (size >> LG_PAGE);
-       malloc_mutex_unlock(&arena->lock);
-       chunk_dalloc(chunk, size, arena->ind);
-}
-
 static void
 arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
 {
@@ -607,56 +745,368 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
        assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
        assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
        assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
-           arena_maxclass);
+           arena_maxrun);
        assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
-           arena_maxclass);
+           arena_maxrun);
        assert(arena_mapbits_dirty_get(chunk, map_bias) ==
            arena_mapbits_dirty_get(chunk, chunk_npages-1));
+       assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
+           arena_mapbits_decommitted_get(chunk, chunk_npages-1));
 
-       /*
-        * Remove run from the runs_avail tree, so that the arena does not use
-        * it.
-        */
+       /* Remove run from runs_avail, so that the arena does not use it. */
        arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
 
        if (arena->spare != NULL) {
                arena_chunk_t *spare = arena->spare;
+               chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+               bool committed;
 
                arena->spare = chunk;
                if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
-                       arena_dirty_remove(arena, spare, map_bias,
+                       arena_run_dirty_remove(arena, spare, map_bias,
                            chunk_npages-map_bias);
                }
-               arena_chunk_dalloc_internal(arena, spare);
+
+               chunk_deregister(spare, &spare->node);
+
+               committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
+                   0);
+               if (!committed) {
+                       /*
+                        * Decommit the header.  Mark the chunk as decommitted
+                        * even if header decommit fails, since treating a
+                        * partially committed chunk as committed has a high
+                        * potential for causing later access of decommitted
+                        * memory.
+                        */
+                       chunk_hooks = chunk_hooks_get(arena);
+                       chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
+                           LG_PAGE, arena->ind);
+               }
+
+               chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
+                   chunksize, committed);
+
+               if (config_stats) {
+                       arena->stats.mapped -= chunksize;
+                       arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+               }
        } else
                arena->spare = chunk;
 }
 
+static void
+arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
+{
+       szind_t index = size2index(usize) - nlclasses - NBINS;
+
+       cassert(config_stats);
+
+       arena->stats.nmalloc_huge++;
+       arena->stats.allocated_huge += usize;
+       arena->stats.hstats[index].nmalloc++;
+       arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+       szind_t index = size2index(usize) - nlclasses - NBINS;
+
+       cassert(config_stats);
+
+       arena->stats.nmalloc_huge--;
+       arena->stats.allocated_huge -= usize;
+       arena->stats.hstats[index].nmalloc--;
+       arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
+{
+       szind_t index = size2index(usize) - nlclasses - NBINS;
+
+       cassert(config_stats);
+
+       arena->stats.ndalloc_huge++;
+       arena->stats.allocated_huge -= usize;
+       arena->stats.hstats[index].ndalloc++;
+       arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+       szind_t index = size2index(usize) - nlclasses - NBINS;
+
+       cassert(config_stats);
+
+       arena->stats.ndalloc_huge--;
+       arena->stats.allocated_huge += usize;
+       arena->stats.hstats[index].ndalloc--;
+       arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
+{
+
+       arena_huge_dalloc_stats_update(arena, oldsize);
+       arena_huge_malloc_stats_update(arena, usize);
+}
+
+static void
+arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
+    size_t usize)
+{
+
+       arena_huge_dalloc_stats_update_undo(arena, oldsize);
+       arena_huge_malloc_stats_update_undo(arena, usize);
+}
+
+extent_node_t *
+arena_node_alloc(arena_t *arena)
+{
+       extent_node_t *node;
+
+       malloc_mutex_lock(&arena->node_cache_mtx);
+       node = ql_last(&arena->node_cache, ql_link);
+       if (node == NULL) {
+               malloc_mutex_unlock(&arena->node_cache_mtx);
+               return (base_alloc(sizeof(extent_node_t)));
+       }
+       ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
+       malloc_mutex_unlock(&arena->node_cache_mtx);
+       return (node);
+}
+
+void
+arena_node_dalloc(arena_t *arena, extent_node_t *node)
+{
+
+       malloc_mutex_lock(&arena->node_cache_mtx);
+       ql_elm_new(node, ql_link);
+       ql_tail_insert(&arena->node_cache, node, ql_link);
+       malloc_mutex_unlock(&arena->node_cache_mtx);
+}
+
+static void *
+arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    size_t usize, size_t alignment, bool *zero, size_t csize)
+{
+       void *ret;
+       bool commit = true;
+
+       ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
+           zero, &commit);
+       if (ret == NULL) {
+               /* Revert optimistic stats updates. */
+               malloc_mutex_lock(&arena->lock);
+               if (config_stats) {
+                       arena_huge_malloc_stats_update_undo(arena, usize);
+                       arena->stats.mapped -= usize;
+               }
+               arena_nactive_sub(arena, usize >> LG_PAGE);
+               malloc_mutex_unlock(&arena->lock);
+       }
+
+       return (ret);
+}
+
+void *
+arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
+    bool *zero)
+{
+       void *ret;
+       chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+       size_t csize = CHUNK_CEILING(usize);
+
+       malloc_mutex_lock(&arena->lock);
+
+       /* Optimistically update stats. */
+       if (config_stats) {
+               arena_huge_malloc_stats_update(arena, usize);
+               arena->stats.mapped += usize;
+       }
+       arena_nactive_add(arena, usize >> LG_PAGE);
+
+       ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
+           zero, true);
+       malloc_mutex_unlock(&arena->lock);
+       if (ret == NULL) {
+               ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
+                   alignment, zero, csize);
+       }
+
+       return (ret);
+}
+
+void
+arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
+{
+       chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+       size_t csize;
+
+       csize = CHUNK_CEILING(usize);
+       malloc_mutex_lock(&arena->lock);
+       if (config_stats) {
+               arena_huge_dalloc_stats_update(arena, usize);
+               arena->stats.mapped -= usize;
+       }
+       arena_nactive_sub(arena, usize >> LG_PAGE);
+
+       chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
+       malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
+    size_t usize)
+{
+
+       assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
+       assert(oldsize != usize);
+
+       malloc_mutex_lock(&arena->lock);
+       if (config_stats)
+               arena_huge_ralloc_stats_update(arena, oldsize, usize);
+       if (oldsize < usize)
+               arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
+       else
+               arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
+       malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
+    size_t usize)
+{
+       size_t udiff = oldsize - usize;
+       size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+
+       malloc_mutex_lock(&arena->lock);
+       if (config_stats) {
+               arena_huge_ralloc_stats_update(arena, oldsize, usize);
+               if (cdiff != 0)
+                       arena->stats.mapped -= cdiff;
+       }
+       arena_nactive_sub(arena, udiff >> LG_PAGE);
+
+       if (cdiff != 0) {
+               chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+               void *nchunk = (void *)((uintptr_t)chunk +
+                   CHUNK_CEILING(usize));
+
+               chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
+       }
+       malloc_mutex_unlock(&arena->lock);
+}
+
+static bool
+arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
+    size_t udiff, size_t cdiff)
+{
+       bool err;
+       bool commit = true;
+
+       err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
+           zero, &commit) == NULL);
+       if (err) {
+               /* Revert optimistic stats updates. */
+               malloc_mutex_lock(&arena->lock);
+               if (config_stats) {
+                       arena_huge_ralloc_stats_update_undo(arena, oldsize,
+                           usize);
+                       arena->stats.mapped -= cdiff;
+               }
+               arena_nactive_sub(arena, udiff >> LG_PAGE);
+               malloc_mutex_unlock(&arena->lock);
+       } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+           cdiff, true, arena->ind)) {
+               chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
+                   true);
+               err = true;
+       }
+       return (err);
+}
+
+bool
+arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
+    size_t usize, bool *zero)
+{
+       bool err;
+       chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+       void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
+       size_t udiff = usize - oldsize;
+       size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+
+       malloc_mutex_lock(&arena->lock);
+
+       /* Optimistically update stats. */
+       if (config_stats) {
+               arena_huge_ralloc_stats_update(arena, oldsize, usize);
+               arena->stats.mapped += cdiff;
+       }
+       arena_nactive_add(arena, udiff >> LG_PAGE);
+
+       err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
+           chunksize, zero, true) == NULL);
+       malloc_mutex_unlock(&arena->lock);
+       if (err) {
+               err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
+                   chunk, oldsize, usize, zero, nchunk, udiff,
+                   cdiff);
+       } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+           cdiff, true, arena->ind)) {
+               chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
+                   true);
+               err = true;
+       }
+
+       return (err);
+}
+
+/*
+ * Do first-best-fit run selection, i.e. select the lowest run that best fits.
+ * Run sizes are indexed, so not all candidate runs are necessarily exactly the
+ * same size.
+ */
 static arena_run_t *
-arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
+arena_run_first_best_fit(arena_t *arena, size_t size)
 {
-       arena_chunk_map_misc_t *miscelm;
-       arena_chunk_map_misc_t *key;
+       szind_t ind, i;
 
-       key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
-       miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
-       if (miscelm != NULL) {
-               arena_run_t *run = &miscelm->run;
-               arena_run_split_large(arena, &miscelm->run, size, zero);
-               return (run);
+       ind = size2index(run_quantize_ceil(size));
+       for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
+               arena_chunk_map_misc_t *miscelm = arena_run_tree_first(
+                   arena_runs_avail_get(arena, i));
+               if (miscelm != NULL)
+                       return (&miscelm->run);
        }
 
        return (NULL);
 }
 
+static arena_run_t *
+arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
+{
+       arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
+       if (run != NULL) {
+               if (arena_run_split_large(arena, run, size, zero))
+                       run = NULL;
+       }
+       return (run);
+}
+
 static arena_run_t *
 arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
 {
        arena_chunk_t *chunk;
        arena_run_t *run;
 
-       assert(size <= arena_maxclass);
-       assert((size & PAGE_MASK) == 0);
+       assert(size <= arena_maxrun);
+       assert(size == PAGE_CEILING(size));
 
        /* Search the arena's chunks for the lowest best fit. */
        run = arena_run_alloc_large_helper(arena, size, zero);
@@ -669,7 +1119,8 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
        chunk = arena_chunk_alloc(arena);
        if (chunk != NULL) {
                run = &arena_miscelm_get(chunk, map_bias)->run;
-               arena_run_split_large(arena, run, size, zero);
+               if (arena_run_split_large(arena, run, size, zero))
+                       run = NULL;
                return (run);
        }
 
@@ -682,31 +1133,24 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
 }
 
 static arena_run_t *
-arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
+arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
 {
-       arena_run_t *run;
-       arena_chunk_map_misc_t *miscelm;
-       arena_chunk_map_misc_t *key;
-
-       key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
-       miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
-       if (miscelm != NULL) {
-               run = &miscelm->run;
-               arena_run_split_small(arena, run, size, binind);
-               return (run);
+       arena_run_t *run = arena_run_first_best_fit(arena, size);
+       if (run != NULL) {
+               if (arena_run_split_small(arena, run, size, binind))
+                       run = NULL;
        }
-
-       return (NULL);
+       return (run);
 }
 
 static arena_run_t *
-arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
+arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
 {
        arena_chunk_t *chunk;
        arena_run_t *run;
 
-       assert(size <= arena_maxclass);
-       assert((size & PAGE_MASK) == 0);
+       assert(size <= arena_maxrun);
+       assert(size == PAGE_CEILING(size));
        assert(binind != BININD_INVALID);
 
        /* Search the arena's chunks for the lowest best fit. */
@@ -720,7 +1164,8 @@ arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
        chunk = arena_chunk_alloc(arena);
        if (chunk != NULL) {
                run = &arena_miscelm_get(chunk, map_bias)->run;
-               arena_run_split_small(arena, run, size, binind);
+               if (arena_run_split_small(arena, run, size, binind))
+                       run = NULL;
                return (run);
        }
 
@@ -732,104 +1177,414 @@ arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
        return (arena_run_alloc_small_helper(arena, size, binind));
 }
 
-static inline void
-arena_maybe_purge(arena_t *arena)
+static bool
+arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
 {
-       size_t threshold;
 
-       /* Don't purge if the option is disabled. */
-       if (opt_lg_dirty_mult < 0)
-               return;
-       threshold = (arena->nactive >> opt_lg_dirty_mult);
+       return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
+           << 3));
+}
+
+ssize_t
+arena_lg_dirty_mult_get(arena_t *arena)
+{
+       ssize_t lg_dirty_mult;
+
+       malloc_mutex_lock(&arena->lock);
+       lg_dirty_mult = arena->lg_dirty_mult;
+       malloc_mutex_unlock(&arena->lock);
+
+       return (lg_dirty_mult);
+}
+
+bool
+arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
+{
+
+       if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+               return (true);
+
+       malloc_mutex_lock(&arena->lock);
+       arena->lg_dirty_mult = lg_dirty_mult;
+       arena_maybe_purge(arena);
+       malloc_mutex_unlock(&arena->lock);
+
+       return (false);
+}
+
+static void
+arena_decay_deadline_init(arena_t *arena)
+{
+
+       assert(opt_purge == purge_mode_decay);
+
        /*
-        * Don't purge unless the number of purgeable pages exceeds the
-        * threshold.
+        * Generate a new deadline that is uniformly random within the next
+        * epoch after the current one.
         */
-       if (arena->ndirty <= threshold)
-               return;
+       nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
+       nstime_add(&arena->decay_deadline, &arena->decay_interval);
+       if (arena->decay_time > 0) {
+               nstime_t jitter;
+
+               nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
+                   nstime_ns(&arena->decay_interval)));
+               nstime_add(&arena->decay_deadline, &jitter);
+       }
+}
 
-       arena_purge(arena, false);
+static bool
+arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
+{
+
+       assert(opt_purge == purge_mode_decay);
+
+       return (nstime_compare(&arena->decay_deadline, time) <= 0);
 }
 
 static size_t
-arena_dirty_count(arena_t *arena)
+arena_decay_backlog_npages_limit(const arena_t *arena)
+{
+       static const uint64_t h_steps[] = {
+#define        STEP(step, h, x, y) \
+               h,
+               SMOOTHSTEP
+#undef STEP
+       };
+       uint64_t sum;
+       size_t npages_limit_backlog;
+       unsigned i;
+
+       assert(opt_purge == purge_mode_decay);
+
+       /*
+        * For each element of decay_backlog, multiply by the corresponding
+        * fixed-point smoothstep decay factor.  Sum the products, then divide
+        * to round down to the nearest whole number of pages.
+        */
+       sum = 0;
+       for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
+               sum += arena->decay_backlog[i] * h_steps[i];
+       npages_limit_backlog = (sum >> SMOOTHSTEP_BFP);
+
+       return (npages_limit_backlog);
+}
+
+static void
+arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
 {
-       size_t ndirty = 0;
-       arena_chunk_map_misc_t *miscelm;
-       arena_chunk_t *chunk;
-       size_t pageind, npages;
-
-       ql_foreach(miscelm, &arena->runs_dirty, dr_link) {
-               chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
-               pageind = arena_miscelm_to_pageind(miscelm);
-               assert(arena_mapbits_allocated_get(chunk, pageind) == 0);
-               assert(arena_mapbits_large_get(chunk, pageind) == 0);
-               assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
-               npages = arena_mapbits_unallocated_size_get(chunk, pageind) >>
-                   LG_PAGE;
-               ndirty += npages;
-       }
+       uint64_t nadvance;
+       nstime_t delta;
+       size_t ndirty_delta;
+
+       assert(opt_purge == purge_mode_decay);
+       assert(arena_decay_deadline_reached(arena, time));
 
-       return (ndirty);
+       nstime_copy(&delta, time);
+       nstime_subtract(&delta, &arena->decay_epoch);
+       nadvance = nstime_divide(&delta, &arena->decay_interval);
+       assert(nadvance > 0);
+
+       /* Add nadvance decay intervals to epoch. */
+       nstime_copy(&delta, &arena->decay_interval);
+       nstime_imultiply(&delta, nadvance);
+       nstime_add(&arena->decay_epoch, &delta);
+
+       /* Set a new deadline. */
+       arena_decay_deadline_init(arena);
+
+       /* Update the backlog. */
+       if (nadvance >= SMOOTHSTEP_NSTEPS) {
+               memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+                   sizeof(size_t));
+       } else {
+               memmove(arena->decay_backlog, &arena->decay_backlog[nadvance],
+                   (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t));
+               if (nadvance > 1) {
+                       memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
+                           nadvance], 0, (nadvance-1) * sizeof(size_t));
+               }
+       }
+       ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
+           arena->decay_ndirty : 0;
+       arena->decay_ndirty = arena->ndirty;
+       arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
+       arena->decay_backlog_npages_limit =
+           arena_decay_backlog_npages_limit(arena);
 }
 
 static size_t
-arena_compute_npurge(arena_t *arena, bool all)
+arena_decay_npages_limit(arena_t *arena)
+{
+       size_t npages_limit;
+
+       assert(opt_purge == purge_mode_decay);
+
+       npages_limit = arena->decay_backlog_npages_limit;
+
+       /* Add in any dirty pages created during the current epoch. */
+       if (arena->ndirty > arena->decay_ndirty)
+               npages_limit += arena->ndirty - arena->decay_ndirty;
+
+       return (npages_limit);
+}
+
+static void
+arena_decay_init(arena_t *arena, ssize_t decay_time)
+{
+
+       arena->decay_time = decay_time;
+       if (decay_time > 0) {
+               nstime_init2(&arena->decay_interval, decay_time, 0);
+               nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
+       }
+
+       nstime_init(&arena->decay_epoch, 0);
+       nstime_update(&arena->decay_epoch);
+       arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
+       arena_decay_deadline_init(arena);
+       arena->decay_ndirty = arena->ndirty;
+       arena->decay_backlog_npages_limit = 0;
+       memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+}
+
+static bool
+arena_decay_time_valid(ssize_t decay_time)
+{
+
+       return (decay_time >= -1 && decay_time <= NSTIME_SEC_MAX);
+}
+
+ssize_t
+arena_decay_time_get(arena_t *arena)
+{
+       ssize_t decay_time;
+
+       malloc_mutex_lock(&arena->lock);
+       decay_time = arena->decay_time;
+       malloc_mutex_unlock(&arena->lock);
+
+       return (decay_time);
+}
+
+bool
+arena_decay_time_set(arena_t *arena, ssize_t decay_time)
 {
-       size_t npurge;
 
+       if (!arena_decay_time_valid(decay_time))
+               return (true);
+
+       malloc_mutex_lock(&arena->lock);
        /*
-        * Compute the minimum number of pages that this thread should try to
-        * purge.
+        * Restart decay backlog from scratch, which may cause many dirty pages
+        * to be immediately purged.  It would conceptually be possible to map
+        * the old backlog onto the new backlog, but there is no justification
+        * for such complexity since decay_time changes are intended to be
+        * infrequent, either between the {-1, 0, >0} states, or a one-time
+        * arbitrary change during initial arena configuration.
         */
-       if (!all) {
-               size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
+       arena_decay_init(arena, decay_time);
+       arena_maybe_purge(arena);
+       malloc_mutex_unlock(&arena->lock);
 
-               npurge = arena->ndirty - threshold;
-       } else
-               npurge = arena->ndirty;
+       return (false);
+}
+
+static void
+arena_maybe_purge_ratio(arena_t *arena)
+{
+
+       assert(opt_purge == purge_mode_ratio);
+
+       /* Don't purge if the option is disabled. */
+       if (arena->lg_dirty_mult < 0)
+               return;
+
+       /*
+        * Iterate, since preventing recursive purging could otherwise leave too
+        * many dirty pages.
+        */
+       while (true) {
+               size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
+               if (threshold < chunk_npages)
+                       threshold = chunk_npages;
+               /*
+                * Don't purge unless the number of purgeable pages exceeds the
+                * threshold.
+                */
+               if (arena->ndirty <= threshold)
+                       return;
+               arena_purge_to_limit(arena, threshold);
+       }
+}
+
+static void
+arena_maybe_purge_decay(arena_t *arena)
+{
+       nstime_t time;
+       size_t ndirty_limit;
+
+       assert(opt_purge == purge_mode_decay);
+
+       /* Purge all or nothing if the option is disabled. */
+       if (arena->decay_time <= 0) {
+               if (arena->decay_time == 0)
+                       arena_purge_to_limit(arena, 0);
+               return;
+       }
+
+       nstime_copy(&time, &arena->decay_epoch);
+       if (unlikely(nstime_update(&time))) {
+               /* Time went backwards.  Force an epoch advance. */
+               nstime_copy(&time, &arena->decay_deadline);
+       }
+
+       if (arena_decay_deadline_reached(arena, &time))
+               arena_decay_epoch_advance(arena, &time);
+
+       ndirty_limit = arena_decay_npages_limit(arena);
+
+       /*
+        * Don't try to purge unless the number of purgeable pages exceeds the
+        * current limit.
+        */
+       if (arena->ndirty <= ndirty_limit)
+               return;
+       arena_purge_to_limit(arena, ndirty_limit);
+}
+
+void
+arena_maybe_purge(arena_t *arena)
+{
+
+       /* Don't recursively purge. */
+       if (arena->purging)
+               return;
 
-       return (npurge);
+       if (opt_purge == purge_mode_ratio)
+               arena_maybe_purge_ratio(arena);
+       else
+               arena_maybe_purge_decay(arena);
 }
 
 static size_t
-arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
-    arena_chunk_miscelms_t *miscelms)
+arena_dirty_count(arena_t *arena)
 {
-       arena_chunk_map_misc_t *miscelm;
+       size_t ndirty = 0;
+       arena_runs_dirty_link_t *rdelm;
+       extent_node_t *chunkselm;
+
+       for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+           chunkselm = qr_next(&arena->chunks_cache, cc_link);
+           rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
+               size_t npages;
+
+               if (rdelm == &chunkselm->rd) {
+                       npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+                       chunkselm = qr_next(chunkselm, cc_link);
+               } else {
+                       arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+                           rdelm);
+                       arena_chunk_map_misc_t *miscelm =
+                           arena_rd_to_miscelm(rdelm);
+                       size_t pageind = arena_miscelm_to_pageind(miscelm);
+                       assert(arena_mapbits_allocated_get(chunk, pageind) ==
+                           0);
+                       assert(arena_mapbits_large_get(chunk, pageind) == 0);
+                       assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
+                       npages = arena_mapbits_unallocated_size_get(chunk,
+                           pageind) >> LG_PAGE;
+               }
+               ndirty += npages;
+       }
+
+       return (ndirty);
+}
+
+static size_t
+arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
+    extent_node_t *purge_chunks_sentinel)
+{
+       arena_runs_dirty_link_t *rdelm, *rdelm_next;
+       extent_node_t *chunkselm;
        size_t nstashed = 0;
 
-       /* Add at least npurge pages to purge_list. */
-       for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL;
-           miscelm = ql_first(&arena->runs_dirty)) {
-               arena_chunk_t *chunk =
-                   (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
-               size_t pageind = arena_miscelm_to_pageind(miscelm);
-               size_t run_size = arena_mapbits_unallocated_size_get(chunk,
-                   pageind);
-               size_t npages = run_size >> LG_PAGE;
-               arena_run_t *run = &miscelm->run;
-
-               assert(pageind + npages <= chunk_npages);
-               assert(arena_mapbits_dirty_get(chunk, pageind) ==
-                   arena_mapbits_dirty_get(chunk, pageind+npages-1));
+       /* Stash runs/chunks according to ndirty_limit. */
+       for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+           chunkselm = qr_next(&arena->chunks_cache, cc_link);
+           rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
+               size_t npages;
+               rdelm_next = qr_next(rdelm, rd_link);
 
-               /*
-                * If purging the spare chunk's run, make it available prior to
-                * allocation.
-                */
-               if (chunk == arena->spare)
-                       arena_chunk_alloc(arena);
+               if (rdelm == &chunkselm->rd) {
+                       extent_node_t *chunkselm_next;
+                       bool zero;
+                       UNUSED void *chunk;
 
-               /* Temporarily allocate the free dirty run. */
-               arena_run_split_large(arena, run, run_size, false);
-               /* Append to purge_list for later processing. */
-               ql_elm_new(miscelm, dr_link);
-               ql_tail_insert(miscelms, miscelm, dr_link);
+                       npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+                       if (opt_purge == purge_mode_decay && arena->ndirty -
+                           (nstashed + npages) < ndirty_limit)
+                               break;
 
-               nstashed += npages;
+                       chunkselm_next = qr_next(chunkselm, cc_link);
+                       /*
+                        * Allocate.  chunkselm remains valid due to the
+                        * dalloc_node=false argument to chunk_alloc_cache().
+                        */
+                       zero = false;
+                       chunk = chunk_alloc_cache(arena, chunk_hooks,
+                           extent_node_addr_get(chunkselm),
+                           extent_node_size_get(chunkselm), chunksize, &zero,
+                           false);
+                       assert(chunk == extent_node_addr_get(chunkselm));
+                       assert(zero == extent_node_zeroed_get(chunkselm));
+                       extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
+                           purge_chunks_sentinel);
+                       assert(npages == (extent_node_size_get(chunkselm) >>
+                           LG_PAGE));
+                       chunkselm = chunkselm_next;
+               } else {
+                       arena_chunk_t *chunk =
+                           (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+                       arena_chunk_map_misc_t *miscelm =
+                           arena_rd_to_miscelm(rdelm);
+                       size_t pageind = arena_miscelm_to_pageind(miscelm);
+                       arena_run_t *run = &miscelm->run;
+                       size_t run_size =
+                           arena_mapbits_unallocated_size_get(chunk, pageind);
+
+                       npages = run_size >> LG_PAGE;
+                       if (opt_purge == purge_mode_decay && arena->ndirty -
+                           (nstashed + npages) < ndirty_limit)
+                               break;
+
+                       assert(pageind + npages <= chunk_npages);
+                       assert(arena_mapbits_dirty_get(chunk, pageind) ==
+                           arena_mapbits_dirty_get(chunk, pageind+npages-1));
+
+                       /*
+                        * If purging the spare chunk's run, make it available
+                        * prior to allocation.
+                        */
+                       if (chunk == arena->spare)
+                               arena_chunk_alloc(arena);
+
+                       /* Temporarily allocate the free dirty run. */
+                       arena_run_split_large(arena, run, run_size, false);
+                       /* Stash. */
+                       if (false)
+                               qr_new(rdelm, rd_link); /* Redundant. */
+                       else {
+                               assert(qr_next(rdelm, rd_link) == rdelm);
+                               assert(qr_prev(rdelm, rd_link) == rdelm);
+                       }
+                       qr_meld(purge_runs_sentinel, rdelm, rd_link);
+               }
 
-               if (!all && nstashed >= npurge)
+               nstashed += npages;
+               if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
+                   ndirty_limit)
                        break;
        }
 
@@ -837,52 +1592,88 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
 }
 
 static size_t
-arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
+arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    arena_runs_dirty_link_t *purge_runs_sentinel,
+    extent_node_t *purge_chunks_sentinel)
 {
        size_t npurged, nmadvise;
-       arena_chunk_map_misc_t *miscelm;
+       arena_runs_dirty_link_t *rdelm;
+       extent_node_t *chunkselm;
 
        if (config_stats)
                nmadvise = 0;
        npurged = 0;
 
        malloc_mutex_unlock(&arena->lock);
+       for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+           chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+           rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
+               size_t npages;
 
-       ql_foreach(miscelm, miscelms, dr_link) {
-               arena_chunk_t *chunk;
-               size_t pageind, run_size, npages, flag_unzeroed, i;
-               bool unzeroed;
-
-               chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
-               pageind = arena_miscelm_to_pageind(miscelm);
-               run_size = arena_mapbits_large_size_get(chunk, pageind);
-               npages = run_size >> LG_PAGE;
-
-               assert(pageind + npages <= chunk_npages);
-               unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
-                   LG_PAGE)), run_size);
-               flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
+               if (rdelm == &chunkselm->rd) {
+                       /*
+                        * Don't actually purge the chunk here because 1)
+                        * chunkselm is embedded in the chunk and must remain
+                        * valid, and 2) we deallocate the chunk in
+                        * arena_unstash_purged(), where it is destroyed,
+                        * decommitted, or purged, depending on chunk
+                        * deallocation policy.
+                        */
+                       size_t size = extent_node_size_get(chunkselm);
+                       npages = size >> LG_PAGE;
+                       chunkselm = qr_next(chunkselm, cc_link);
+               } else {
+                       size_t pageind, run_size, flag_unzeroed, flags, i;
+                       bool decommitted;
+                       arena_chunk_t *chunk =
+                           (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+                       arena_chunk_map_misc_t *miscelm =
+                           arena_rd_to_miscelm(rdelm);
+                       pageind = arena_miscelm_to_pageind(miscelm);
+                       run_size = arena_mapbits_large_size_get(chunk, pageind);
+                       npages = run_size >> LG_PAGE;
+
+                       assert(pageind + npages <= chunk_npages);
+                       assert(!arena_mapbits_decommitted_get(chunk, pageind));
+                       assert(!arena_mapbits_decommitted_get(chunk,
+                           pageind+npages-1));
+                       decommitted = !chunk_hooks->decommit(chunk, chunksize,
+                           pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
+                       if (decommitted) {
+                               flag_unzeroed = 0;
+                               flags = CHUNK_MAP_DECOMMITTED;
+                       } else {
+                               flag_unzeroed = chunk_purge_wrapper(arena,
+                                   chunk_hooks, chunk, chunksize, pageind <<
+                                   LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
+                               flags = flag_unzeroed;
+                       }
+                       arena_mapbits_large_set(chunk, pageind+npages-1, 0,
+                           flags);
+                       arena_mapbits_large_set(chunk, pageind, run_size,
+                           flags);
 
-               /*
-                * Set the unzeroed flag for all pages, now that pages_purge()
-                * has returned whether the pages were zeroed as a side effect
-                * of purging.  This chunk map modification is safe even though
-                * the arena mutex isn't currently owned by this thread,
-                * because the run is marked as allocated, thus protecting it
-                * from being modified by any other thread.  As long as these
-                * writes don't perturb the first and last elements'
-                * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
-                */
-               for (i = 0; i < npages; i++) {
-                       arena_mapbits_unzeroed_set(chunk, pageind+i,
-                           flag_unzeroed);
+                       /*
+                        * Set the unzeroed flag for internal pages, now that
+                        * chunk_purge_wrapper() has returned whether the pages
+                        * were zeroed as a side effect of purging.  This chunk
+                        * map modification is safe even though the arena mutex
+                        * isn't currently owned by this thread, because the run
+                        * is marked as allocated, thus protecting it from being
+                        * modified by any other thread.  As long as these
+                        * writes don't perturb the first and last elements'
+                        * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
+                        */
+                       for (i = 1; i < npages-1; i++) {
+                               arena_mapbits_internal_set(chunk, pageind+i,
+                                   flag_unzeroed);
+                       }
                }
 
                npurged += npages;
                if (config_stats)
                        nmadvise++;
        }
-
        malloc_mutex_lock(&arena->lock);
 
        if (config_stats) {
@@ -894,55 +1685,111 @@ arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
 }
 
 static void
-arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms)
-{
-       arena_chunk_map_misc_t *miscelm;
-
-       /* Deallocate runs. */
-       for (miscelm = ql_first(miscelms); miscelm != NULL;
-           miscelm = ql_first(miscelms)) {
-               arena_run_t *run = &miscelm->run;
-               ql_remove(miscelms, miscelm, dr_link);
-               arena_run_dalloc(arena, run, false, true);
+arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    arena_runs_dirty_link_t *purge_runs_sentinel,
+    extent_node_t *purge_chunks_sentinel)
+{
+       arena_runs_dirty_link_t *rdelm, *rdelm_next;
+       extent_node_t *chunkselm;
+
+       /* Deallocate chunks/runs. */
+       for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+           chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+           rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
+               rdelm_next = qr_next(rdelm, rd_link);
+               if (rdelm == &chunkselm->rd) {
+                       extent_node_t *chunkselm_next = qr_next(chunkselm,
+                           cc_link);
+                       void *addr = extent_node_addr_get(chunkselm);
+                       size_t size = extent_node_size_get(chunkselm);
+                       bool zeroed = extent_node_zeroed_get(chunkselm);
+                       bool committed = extent_node_committed_get(chunkselm);
+                       extent_node_dirty_remove(chunkselm);
+                       arena_node_dalloc(arena, chunkselm);
+                       chunkselm = chunkselm_next;
+                       chunk_dalloc_arena(arena, chunk_hooks, addr, size,
+                           zeroed, committed);
+               } else {
+                       arena_chunk_t *chunk =
+                           (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+                       arena_chunk_map_misc_t *miscelm =
+                           arena_rd_to_miscelm(rdelm);
+                       size_t pageind = arena_miscelm_to_pageind(miscelm);
+                       bool decommitted = (arena_mapbits_decommitted_get(chunk,
+                           pageind) != 0);
+                       arena_run_t *run = &miscelm->run;
+                       qr_remove(rdelm, rd_link);
+                       arena_run_dalloc(arena, run, false, true, decommitted);
+               }
        }
 }
 
-void
-arena_purge(arena_t *arena, bool all)
+/*
+ * NB: ndirty_limit is interpreted differently depending on opt_purge:
+ *   - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
+ *                       desired state:
+ *                       (arena->ndirty <= ndirty_limit)
+ *   - purge_mode_decay: Purge as many dirty runs/chunks as possible without
+ *                       violating the invariant:
+ *                       (arena->ndirty >= ndirty_limit)
+ */
+static void
+arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
 {
-       size_t npurge, npurgeable, npurged;
-       arena_chunk_miscelms_t purge_list;
+       chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+       size_t npurge, npurged;
+       arena_runs_dirty_link_t purge_runs_sentinel;
+       extent_node_t purge_chunks_sentinel;
 
-       if (config_debug) {
+       arena->purging = true;
+
+       /*
+        * Calls to arena_dirty_count() are disabled even for debug builds
+        * because overhead grows nonlinearly as memory usage increases.
+        */
+       if (false && config_debug) {
                size_t ndirty = arena_dirty_count(arena);
                assert(ndirty == arena->ndirty);
        }
-       assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all);
+       assert(opt_purge != purge_mode_ratio || (arena->nactive >>
+           arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
+
+       qr_new(&purge_runs_sentinel, rd_link);
+       extent_node_dirty_linkage_init(&purge_chunks_sentinel);
+
+       npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit,
+           &purge_runs_sentinel, &purge_chunks_sentinel);
+       if (npurge == 0)
+               goto label_return;
+       npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
+           &purge_chunks_sentinel);
+       assert(npurged == npurge);
+       arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
+           &purge_chunks_sentinel);
 
        if (config_stats)
                arena->stats.npurge++;
 
-       npurge = arena_compute_npurge(arena, all);
-       ql_new(&purge_list);
-       npurgeable = arena_stash_dirty(arena, all, npurge, &purge_list);
-       assert(npurgeable >= npurge);
-       npurged = arena_purge_stashed(arena, &purge_list);
-       assert(npurged == npurgeable);
-       arena_unstash_purged(arena, &purge_list);
+label_return:
+       arena->purging = false;
 }
 
 void
-arena_purge_all(arena_t *arena)
+arena_purge(arena_t *arena, bool all)
 {
 
        malloc_mutex_lock(&arena->lock);
-       arena_purge(arena, true);
+       if (all)
+               arena_purge_to_limit(arena, 0);
+       else
+               arena_maybe_purge(arena);
        malloc_mutex_unlock(&arena->lock);
 }
 
 static void
 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
-    size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
+    size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
+    size_t flag_decommitted)
 {
        size_t size = *p_size;
        size_t run_ind = *p_run_ind;
@@ -951,7 +1798,9 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
        /* Try to coalesce forward. */
        if (run_ind + run_pages < chunk_npages &&
            arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
-           arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
+           arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
+           arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
+           flag_decommitted) {
                size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
                    run_ind+run_pages);
                size_t nrun_pages = nrun_size >> LG_PAGE;
@@ -964,11 +1813,16 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
                    run_ind+run_pages+nrun_pages-1) == nrun_size);
                assert(arena_mapbits_dirty_get(chunk,
                    run_ind+run_pages+nrun_pages-1) == flag_dirty);
+               assert(arena_mapbits_decommitted_get(chunk,
+                   run_ind+run_pages+nrun_pages-1) == flag_decommitted);
                arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
 
-               /* If the successor is dirty, remove it from runs_dirty. */
+               /*
+                * If the successor is dirty, remove it from the set of dirty
+                * pages.
+                */
                if (flag_dirty != 0) {
-                       arena_dirty_remove(arena, chunk, run_ind+run_pages,
+                       arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
                            nrun_pages);
                }
 
@@ -983,7 +1837,8 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
        /* Try to coalesce backward. */
        if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
            run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
-           flag_dirty) {
+           flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
+           flag_decommitted) {
                size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
                    run_ind-1);
                size_t prun_pages = prun_size >> LG_PAGE;
@@ -997,11 +1852,18 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
                assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
                    prun_size);
                assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
+               assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+                   flag_decommitted);
                arena_avail_remove(arena, chunk, run_ind, prun_pages);
 
-               /* If the predecessor is dirty, remove it from runs_dirty. */
-               if (flag_dirty != 0)
-                       arena_dirty_remove(arena, chunk, run_ind, prun_pages);
+               /*
+                * If the predecessor is dirty, remove it from the set of dirty
+                * pages.
+                */
+               if (flag_dirty != 0) {
+                       arena_run_dirty_remove(arena, chunk, run_ind,
+                           prun_pages);
+               }
 
                size += prun_size;
                run_pages += prun_pages;
@@ -1016,31 +1878,43 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
        *p_run_pages = run_pages;
 }
 
-static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
+static size_t
+arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+    size_t run_ind)
 {
-       arena_chunk_t *chunk;
-       arena_chunk_map_misc_t *miscelm;
-       size_t size, run_ind, run_pages, flag_dirty;
+       size_t size;
 
-       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-       miscelm = arena_run_to_miscelm(run);
-       run_ind = arena_miscelm_to_pageind(miscelm);
        assert(run_ind >= map_bias);
        assert(run_ind < chunk_npages);
+
        if (arena_mapbits_large_get(chunk, run_ind) != 0) {
                size = arena_mapbits_large_size_get(chunk, run_ind);
-               assert(size == PAGE ||
-                   arena_mapbits_large_size_get(chunk,
+               assert(size == PAGE || arena_mapbits_large_size_get(chunk,
                    run_ind+(size>>LG_PAGE)-1) == 0);
        } else {
-               size_t binind = arena_bin_index(arena, run->bin);
-               arena_bin_info_t *bin_info = &arena_bin_info[binind];
+               arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
                size = bin_info->run_size;
        }
+
+       return (size);
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
+    bool decommitted)
+{
+       arena_chunk_t *chunk;
+       arena_chunk_map_misc_t *miscelm;
+       size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
+
+       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+       miscelm = arena_run_to_miscelm(run);
+       run_ind = arena_miscelm_to_pageind(miscelm);
+       assert(run_ind >= map_bias);
+       assert(run_ind < chunk_npages);
+       size = arena_run_size_get(arena, chunk, run, run_ind);
        run_pages = (size >> LG_PAGE);
-       arena_cactive_update(arena, 0, run_pages);
-       arena->nactive -= run_pages;
+       arena_nactive_sub(arena, run_pages);
 
        /*
         * The run is dirty if the caller claims to have dirtied it, as well as
@@ -1049,16 +1923,18 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
         */
        assert(arena_mapbits_dirty_get(chunk, run_ind) ==
            arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
-       if (!cleaned && arena_mapbits_dirty_get(chunk, run_ind) != 0)
+       if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
+           != 0)
                dirty = true;
        flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
+       flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
 
        /* Mark pages as unallocated in the chunk map. */
-       if (dirty) {
-               arena_mapbits_unallocated_set(chunk, run_ind, size,
-                   CHUNK_MAP_DIRTY);
+       if (dirty || decommitted) {
+               size_t flags = flag_dirty | flag_decommitted;
+               arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
                arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
-                   CHUNK_MAP_DIRTY);
+                   flags);
        } else {
                arena_mapbits_unallocated_set(chunk, run_ind, size,
                    arena_mapbits_unzeroed_get(chunk, run_ind));
@@ -1066,22 +1942,25 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
                    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
        }
 
-       arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, flag_dirty);
+       arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
+           flag_dirty, flag_decommitted);
 
        /* Insert into runs_avail, now that coalescing is complete. */
        assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
            arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
        assert(arena_mapbits_dirty_get(chunk, run_ind) ==
            arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
+       assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+           arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
        arena_avail_insert(arena, chunk, run_ind, run_pages);
 
        if (dirty)
-               arena_dirty_insert(arena, chunk, run_ind, run_pages);
+               arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
 
        /* Deallocate chunk if it is now completely unused. */
-       if (size == arena_maxclass) {
+       if (size == arena_maxrun) {
                assert(run_ind == map_bias);
-               assert(run_pages == (arena_maxclass >> LG_PAGE));
+               assert(run_pages == (arena_maxrun >> LG_PAGE));
                arena_chunk_dalloc(arena, chunk);
        }
 
@@ -1104,6 +1983,9 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
        size_t pageind = arena_miscelm_to_pageind(miscelm);
        size_t head_npages = (oldsize - newsize) >> LG_PAGE;
        size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+       size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+       size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+           CHUNK_MAP_UNZEROED : 0;
 
        assert(oldsize > newsize);
 
@@ -1113,8 +1995,11 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
         * run first, in case of single-page runs.
         */
        assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
-       arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
-       arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
+       arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+           (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+           pageind+head_npages-1)));
+       arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
+           (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
 
        if (config_debug) {
                UNUSED size_t tail_npages = newsize >> LG_PAGE;
@@ -1124,9 +2009,10 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
                    pageind+head_npages+tail_npages-1) == flag_dirty);
        }
        arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
-           flag_dirty);
+           flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+           pageind+head_npages)));
 
-       arena_run_dalloc(arena, run, false, false);
+       arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
 }
 
 static void
@@ -1137,6 +2023,9 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
        size_t pageind = arena_miscelm_to_pageind(miscelm);
        size_t head_npages = newsize >> LG_PAGE;
        size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+       size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+       size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+           CHUNK_MAP_UNZEROED : 0;
        arena_chunk_map_misc_t *tail_miscelm;
        arena_run_t *tail_run;
 
@@ -1148,8 +2037,11 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
         * run first, in case of single-page runs.
         */
        assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
-       arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
-       arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
+       arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+           (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+           pageind+head_npages-1)));
+       arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
+           (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
 
        if (config_debug) {
                UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
@@ -1159,11 +2051,13 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
                    pageind+head_npages+tail_npages-1) == flag_dirty);
        }
        arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
-           flag_dirty);
+           flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+           pageind+head_npages)));
 
        tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
        tail_run = &tail_miscelm->run;
-       arena_run_dalloc(arena, tail_run, dirty, false);
+       arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
+           0));
 }
 
 static arena_run_t *
@@ -1212,7 +2106,7 @@ static arena_run_t *
 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 {
        arena_run_t *run;
-       size_t binind;
+       szind_t binind;
        arena_bin_info_t *bin_info;
 
        /* Look for a usable run. */
@@ -1231,8 +2125,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
        run = arena_run_alloc_small(arena, bin_info->run_size, binind);
        if (run != NULL) {
                /* Initialize run internals. */
-               run->bin = bin;
-               run->nextind = 0;
+               run->binind = binind;
                run->nfree = bin_info->nregs;
                bitmap_init(run->bitmap, &bin_info->bitmap_info);
        }
@@ -1263,8 +2156,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 static void *
 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
 {
-       void *ret;
-       size_t binind;
+       szind_t binind;
        arena_bin_info_t *bin_info;
        arena_run_t *run;
 
@@ -1277,6 +2169,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
                 * Another thread updated runcur while this one ran without the
                 * bin lock in arena_bin_nonfull_run_get().
                 */
+               void *ret;
                assert(bin->runcur->nfree > 0);
                ret = arena_run_reg_alloc(bin->runcur, bin_info);
                if (run != NULL) {
@@ -1310,13 +2203,11 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
 }
 
 void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
-    uint64_t prof_accumbytes)
+arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
+    szind_t binind, uint64_t prof_accumbytes)
 {
        unsigned i, nfill;
        arena_bin_t *bin;
-       arena_run_t *run;
-       void *ptr;
 
        assert(tbin->ncached == 0);
 
@@ -1326,6 +2217,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
        malloc_mutex_lock(&bin->lock);
        for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
            tbin->lg_fill_div); i < nfill; i++) {
+               arena_run_t *run;
+               void *ptr;
                if ((run = bin->runcur) != NULL && run->nfree > 0)
                        ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
                else
@@ -1334,31 +2227,31 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
                        /*
                         * OOM.  tbin->avail isn't yet filled down to its first
                         * element, so the successful allocations (if any) must
-                        * be moved to the base of tbin->avail before bailing
-                        * out.
+                        * be moved just before tbin->avail before bailing out.
                         */
                        if (i > 0) {
-                               memmove(tbin->avail, &tbin->avail[nfill - i],
+                               memmove(tbin->avail - i, tbin->avail - nfill,
                                    i * sizeof(void *));
                        }
                        break;
                }
-               if (config_fill && unlikely(opt_junk)) {
+               if (config_fill && unlikely(opt_junk_alloc)) {
                        arena_alloc_junk_small(ptr, &arena_bin_info[binind],
                            true);
                }
                /* Insert such that low regions get used first. */
-               tbin->avail[nfill - 1 - i] = ptr;
+               *(tbin->avail - nfill + i) = ptr;
        }
        if (config_stats) {
-               bin->stats.allocated += i * arena_bin_info[binind].reg_size;
                bin->stats.nmalloc += i;
                bin->stats.nrequests += tbin->tstats.nrequests;
+               bin->stats.curregs += i;
                bin->stats.nfills++;
                tbin->tstats.nrequests = 0;
        }
        malloc_mutex_unlock(&bin->lock);
        tbin->ncached = i;
+       arena_decay_tick(tsd, arena);
 }
 
 void
@@ -1400,29 +2293,35 @@ arena_redzone_corruption_t *arena_redzone_corruption =
 static void
 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
 {
-       size_t size = bin_info->reg_size;
-       size_t redzone_size = bin_info->redzone_size;
-       size_t i;
        bool error = false;
 
-       for (i = 1; i <= redzone_size; i++) {
-               uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
-               if (*byte != 0xa5) {
-                       error = true;
-                       arena_redzone_corruption(ptr, size, false, i, *byte);
-                       if (reset)
-                               *byte = 0xa5;
+       if (opt_junk_alloc) {
+               size_t size = bin_info->reg_size;
+               size_t redzone_size = bin_info->redzone_size;
+               size_t i;
+
+               for (i = 1; i <= redzone_size; i++) {
+                       uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
+                       if (*byte != 0xa5) {
+                               error = true;
+                               arena_redzone_corruption(ptr, size, false, i,
+                                   *byte);
+                               if (reset)
+                                       *byte = 0xa5;
+                       }
                }
-       }
-       for (i = 0; i < redzone_size; i++) {
-               uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
-               if (*byte != 0xa5) {
-                       error = true;
-                       arena_redzone_corruption(ptr, size, true, i, *byte);
-                       if (reset)
-                               *byte = 0xa5;
+               for (i = 0; i < redzone_size; i++) {
+                       uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
+                       if (*byte != 0xa5) {
+                               error = true;
+                               arena_redzone_corruption(ptr, size, true, i,
+                                   *byte);
+                               if (reset)
+                                       *byte = 0xa5;
+                       }
                }
        }
+
        if (opt_abort && error)
                abort();
 }
@@ -1450,30 +2349,29 @@ arena_dalloc_junk_small_t *arena_dalloc_junk_small =
 void
 arena_quarantine_junk_small(void *ptr, size_t usize)
 {
-       size_t binind;
+       szind_t binind;
        arena_bin_info_t *bin_info;
        cassert(config_fill);
-       assert(opt_junk);
+       assert(opt_junk_free);
        assert(opt_quarantine);
        assert(usize <= SMALL_MAXCLASS);
 
-       binind = small_size2bin(usize);
+       binind = size2index(usize);
        bin_info = &arena_bin_info[binind];
        arena_redzones_validate(ptr, bin_info, true);
 }
 
-void *
-arena_malloc_small(arena_t *arena, size_t size, bool zero)
+static void *
+arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
 {
        void *ret;
        arena_bin_t *bin;
+       size_t usize;
        arena_run_t *run;
-       size_t binind;
 
-       binind = small_size2bin(size);
        assert(binind < NBINS);
        bin = &arena->bins[binind];
-       size = small_bin2size(binind);
+       usize = index2size(binind);
 
        malloc_mutex_lock(&bin->lock);
        if ((run = bin->runcur) != NULL && run->nfree > 0)
@@ -1487,82 +2385,118 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
        }
 
        if (config_stats) {
-               bin->stats.allocated += size;
                bin->stats.nmalloc++;
                bin->stats.nrequests++;
+               bin->stats.curregs++;
        }
        malloc_mutex_unlock(&bin->lock);
-       if (config_prof && !isthreaded && arena_prof_accum(arena, size))
+       if (config_prof && !isthreaded && arena_prof_accum(arena, usize))
                prof_idump();
 
        if (!zero) {
                if (config_fill) {
-                       if (unlikely(opt_junk)) {
+                       if (unlikely(opt_junk_alloc)) {
                                arena_alloc_junk_small(ret,
                                    &arena_bin_info[binind], false);
                        } else if (unlikely(opt_zero))
-                               memset(ret, 0, size);
+                               memset(ret, 0, usize);
                }
-               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
        } else {
-               if (config_fill && unlikely(opt_junk)) {
+               if (config_fill && unlikely(opt_junk_alloc)) {
                        arena_alloc_junk_small(ret, &arena_bin_info[binind],
                            true);
                }
-               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
-               memset(ret, 0, size);
+               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
+               memset(ret, 0, usize);
        }
 
+       arena_decay_tick(tsd, arena);
        return (ret);
 }
 
 void *
-arena_malloc_large(arena_t *arena, size_t size, bool zero)
+arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
 {
        void *ret;
+       size_t usize;
+       uintptr_t random_offset;
        arena_run_t *run;
        arena_chunk_map_misc_t *miscelm;
-       UNUSED bool idump;
+       UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
 
        /* Large allocation. */
-       size = PAGE_CEILING(size);
+       usize = index2size(binind);
        malloc_mutex_lock(&arena->lock);
-       run = arena_run_alloc_large(arena, size, zero);
+       if (config_cache_oblivious) {
+               uint64_t r;
+
+               /*
+                * Compute a uniformly distributed offset within the first page
+                * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
+                * for 4 KiB pages and 64-byte cachelines.
+                */
+               r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
+               random_offset = ((uintptr_t)r) << LG_CACHELINE;
+       } else
+               random_offset = 0;
+       run = arena_run_alloc_large(arena, usize + large_pad, zero);
        if (run == NULL) {
                malloc_mutex_unlock(&arena->lock);
                return (NULL);
        }
        miscelm = arena_run_to_miscelm(run);
-       ret = arena_miscelm_to_rpages(miscelm);
+       ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
+           random_offset);
        if (config_stats) {
+               szind_t index = binind - NBINS;
+
                arena->stats.nmalloc_large++;
                arena->stats.nrequests_large++;
-               arena->stats.allocated_large += size;
-               arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
-               arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
-               arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+               arena->stats.allocated_large += usize;
+               arena->stats.lstats[index].nmalloc++;
+               arena->stats.lstats[index].nrequests++;
+               arena->stats.lstats[index].curruns++;
        }
        if (config_prof)
-               idump = arena_prof_accum_locked(arena, size);
+               idump = arena_prof_accum_locked(arena, usize);
        malloc_mutex_unlock(&arena->lock);
        if (config_prof && idump)
                prof_idump();
 
        if (!zero) {
                if (config_fill) {
-                       if (unlikely(opt_junk))
-                               memset(ret, 0xa5, size);
+                       if (unlikely(opt_junk_alloc))
+                               memset(ret, 0xa5, usize);
                        else if (unlikely(opt_zero))
-                               memset(ret, 0, size);
+                               memset(ret, 0, usize);
                }
        }
 
+       arena_decay_tick(tsd, arena);
        return (ret);
 }
 
-/* Only handles large allocations that require more than page alignment. */
 void *
-arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
+arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
+    bool zero, tcache_t *tcache)
+{
+
+       arena = arena_choose(tsd, arena);
+       if (unlikely(arena == NULL))
+               return (NULL);
+
+       if (likely(size <= SMALL_MAXCLASS))
+               return (arena_malloc_small(tsd, arena, ind, zero));
+       if (likely(size <= large_maxclass))
+               return (arena_malloc_large(tsd, arena, ind, zero));
+       return (huge_malloc(tsd, arena, index2size(ind), zero, tcache));
+}
+
+/* Only handles large allocations that require more than page alignment. */
+static void *
+arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+    bool zero)
 {
        void *ret;
        size_t alloc_size, leadsize, trailsize;
@@ -1571,10 +2505,14 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
        arena_chunk_map_misc_t *miscelm;
        void *rpages;
 
-       assert((size & PAGE_MASK) == 0);
+       assert(usize == PAGE_CEILING(usize));
+
+       arena = arena_choose(tsd, arena);
+       if (unlikely(arena == NULL))
+               return (NULL);
 
        alignment = PAGE_CEILING(alignment);
-       alloc_size = size + alignment - PAGE;
+       alloc_size = usize + large_pad + alignment - PAGE;
 
        malloc_mutex_lock(&arena->lock);
        run = arena_run_alloc_large(arena, alloc_size, false);
@@ -1588,8 +2526,8 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
 
        leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
            (uintptr_t)rpages;
-       assert(alloc_size >= leadsize + size);
-       trailsize = alloc_size - leadsize - size;
+       assert(alloc_size >= leadsize + usize);
+       trailsize = alloc_size - leadsize - usize - large_pad;
        if (leadsize != 0) {
                arena_chunk_map_misc_t *head_miscelm = miscelm;
                arena_run_t *head_run = run;
@@ -1603,27 +2541,77 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
                    alloc_size - leadsize);
        }
        if (trailsize != 0) {
-               arena_run_trim_tail(arena, chunk, run, size + trailsize, size,
-                   false);
+               arena_run_trim_tail(arena, chunk, run, usize + large_pad +
+                   trailsize, usize + large_pad, false);
+       }
+       if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
+               size_t run_ind =
+                   arena_miscelm_to_pageind(arena_run_to_miscelm(run));
+               bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
+               bool decommitted = (arena_mapbits_decommitted_get(chunk,
+                   run_ind) != 0);
+
+               assert(decommitted); /* Cause of OOM. */
+               arena_run_dalloc(arena, run, dirty, false, decommitted);
+               malloc_mutex_unlock(&arena->lock);
+               return (NULL);
        }
-       arena_run_init_large(arena, run, size, zero);
        ret = arena_miscelm_to_rpages(miscelm);
 
        if (config_stats) {
+               szind_t index = size2index(usize) - NBINS;
+
                arena->stats.nmalloc_large++;
                arena->stats.nrequests_large++;
-               arena->stats.allocated_large += size;
-               arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
-               arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
-               arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+               arena->stats.allocated_large += usize;
+               arena->stats.lstats[index].nmalloc++;
+               arena->stats.lstats[index].nrequests++;
+               arena->stats.lstats[index].curruns++;
        }
        malloc_mutex_unlock(&arena->lock);
 
        if (config_fill && !zero) {
-               if (unlikely(opt_junk))
-                       memset(ret, 0xa5, size);
+               if (unlikely(opt_junk_alloc))
+                       memset(ret, 0xa5, usize);
                else if (unlikely(opt_zero))
-                       memset(ret, 0, size);
+                       memset(ret, 0, usize);
+       }
+       arena_decay_tick(tsd, arena);
+       return (ret);
+}
+
+void *
+arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+    bool zero, tcache_t *tcache)
+{
+       void *ret;
+
+       if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
+           && (usize & PAGE_MASK) == 0))) {
+               /* Small; alignment doesn't require special run placement. */
+               ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
+                   tcache, true);
+       } else if (usize <= large_maxclass && alignment <= PAGE) {
+               /*
+                * Large; alignment doesn't require special run placement.
+                * However, the cached pointer may be at a random offset from
+                * the base of the run, so do some bit manipulation to retrieve
+                * the base.
+                */
+               ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
+                   tcache, true);
+               if (config_cache_oblivious)
+                       ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
+       } else {
+               if (likely(usize <= large_maxclass)) {
+                       ret = arena_palloc_large(tsd, arena, usize, alignment,
+                           zero);
+               } else if (likely(alignment <= chunksize))
+                       ret = huge_malloc(tsd, arena, usize, zero, tcache);
+               else {
+                       ret = huge_palloc(tsd, arena, usize, alignment, zero,
+                           tcache);
+               }
        }
        return (ret);
 }
@@ -1632,22 +2620,23 @@ void
 arena_prof_promoted(const void *ptr, size_t size)
 {
        arena_chunk_t *chunk;
-       size_t pageind, binind;
+       size_t pageind;
+       szind_t binind;
 
        cassert(config_prof);
        assert(ptr != NULL);
        assert(CHUNK_ADDR2BASE(ptr) != ptr);
-       assert(isalloc(ptr, false) == PAGE);
-       assert(isalloc(ptr, true) == PAGE);
+       assert(isalloc(ptr, false) == LARGE_MINCLASS);
+       assert(isalloc(ptr, true) == LARGE_MINCLASS);
        assert(size <= SMALL_MAXCLASS);
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
        pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       binind = small_size2bin(size);
+       binind = size2index(size);
        assert(binind < NBINS);
        arena_mapbits_large_binind_set(chunk, pageind, binind);
 
-       assert(isalloc(ptr, false) == PAGE);
+       assert(isalloc(ptr, false) == LARGE_MINCLASS);
        assert(isalloc(ptr, true) == size);
 }
 
@@ -1660,7 +2649,8 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
        if (run == bin->runcur)
                bin->runcur = NULL;
        else {
-               size_t binind = arena_bin_index(chunk->arena, bin);
+               szind_t binind = arena_bin_index(extent_node_arena_get(
+                   &chunk->node), bin);
                arena_bin_info_t *bin_info = &arena_bin_info[binind];
 
                if (bin_info->nregs != 1) {
@@ -1678,54 +2668,15 @@ static void
 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
     arena_bin_t *bin)
 {
-       size_t binind;
-       arena_bin_info_t *bin_info;
-       size_t npages, run_ind, past;
-       arena_chunk_map_misc_t *miscelm;
-       void *rpages;
 
        assert(run != bin->runcur);
        assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
            NULL);
 
-       binind = arena_bin_index(chunk->arena, run->bin);
-       bin_info = &arena_bin_info[binind];
-
        malloc_mutex_unlock(&bin->lock);
        /******************************/
-       npages = bin_info->run_size >> LG_PAGE;
-       miscelm = arena_run_to_miscelm(run);
-       run_ind = arena_miscelm_to_pageind(miscelm);
-       rpages = arena_miscelm_to_rpages(miscelm);
-       past = (size_t)(PAGE_CEILING((uintptr_t)rpages +
-           (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
-           bin_info->reg_interval - bin_info->redzone_size) -
-           (uintptr_t)chunk) >> LG_PAGE);
        malloc_mutex_lock(&arena->lock);
-
-       /*
-        * If the run was originally clean, and some pages were never touched,
-        * trim the clean pages before deallocating the dirty portion of the
-        * run.
-        */
-       assert(arena_mapbits_dirty_get(chunk, run_ind) ==
-           arena_mapbits_dirty_get(chunk, run_ind+npages-1));
-       if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
-           npages) {
-               /* Trim clean pages.  Convert to large run beforehand. */
-               assert(npages > 0);
-               if (past > run_ind) {
-                       arena_mapbits_large_set(chunk, run_ind,
-                           bin_info->run_size, 0);
-                       arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
-                       arena_run_trim_tail(arena, chunk, run, (npages <<
-                           LG_PAGE), ((past - run_ind) << LG_PAGE), false);
-                       arena_run_dalloc(arena, run, true, false);
-               } else
-                       arena_run_dalloc(arena, run, false, false);
-               /* npages = past - run_ind; */
-       } else
-               arena_run_dalloc(arena, run, true, false);
+       arena_run_dalloc(arena, run, true, false, false);
        malloc_mutex_unlock(&arena->lock);
        /****************************/
        malloc_mutex_lock(&bin->lock);
@@ -1754,27 +2705,24 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
                arena_bin_runs_insert(bin, run);
 }
 
-void
-arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    arena_chunk_map_bits_t *bitselm)
+static void
+arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    arena_chunk_map_bits_t *bitselm, bool junked)
 {
        size_t pageind, rpages_ind;
        arena_run_t *run;
        arena_bin_t *bin;
        arena_bin_info_t *bin_info;
-       size_t size, binind;
+       szind_t binind;
 
        pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
        rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
        run = &arena_miscelm_get(chunk, rpages_ind)->run;
-       bin = run->bin;
-       binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
-           pageind));
+       binind = run->binind;
+       bin = &arena->bins[binind];
        bin_info = &arena_bin_info[binind];
-       if (config_fill || config_stats)
-               size = bin_info->reg_size;
 
-       if (config_fill && unlikely(opt_junk))
+       if (!junked && config_fill && unlikely(opt_junk_free))
                arena_dalloc_junk_small(ptr, bin_info);
 
        arena_run_reg_dalloc(run, ptr);
@@ -1785,11 +2733,19 @@ arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
                arena_bin_lower_run(arena, chunk, run, bin);
 
        if (config_stats) {
-               bin->stats.allocated -= size;
                bin->stats.ndalloc++;
+               bin->stats.curregs--;
        }
 }
 
+void
+arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    arena_chunk_map_bits_t *bitselm)
+{
+
+       arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
+}
+
 void
 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t pageind, arena_chunk_map_bits_t *bitselm)
@@ -1800,14 +2756,14 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
 
        rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
        run = &arena_miscelm_get(chunk, rpages_ind)->run;
-       bin = run->bin;
+       bin = &arena->bins[run->binind];
        malloc_mutex_lock(&bin->lock);
-       arena_dalloc_bin_locked(arena, chunk, ptr, bitselm);
+       arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
        malloc_mutex_unlock(&bin->lock);
 }
 
 void
-arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t pageind)
 {
        arena_chunk_map_bits_t *bitselm;
@@ -1819,17 +2775,18 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
        }
        bitselm = arena_bitselm_get(chunk, pageind);
        arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
+       arena_decay_tick(tsd, arena);
 }
 
 #ifdef JEMALLOC_JET
 #undef arena_dalloc_junk_large
 #define        arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
 #endif
-static void
+void
 arena_dalloc_junk_large(void *ptr, size_t usize)
 {
 
-       if (config_fill && unlikely(opt_junk))
+       if (config_fill && unlikely(opt_junk_free))
                memset(ptr, 0x5a, usize);
 }
 #ifdef JEMALLOC_JET
@@ -1839,35 +2796,49 @@ arena_dalloc_junk_large_t *arena_dalloc_junk_large =
     JEMALLOC_N(arena_dalloc_junk_large_impl);
 #endif
 
-void
-arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+static void
+arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, bool junked)
 {
        size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
        arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
        arena_run_t *run = &miscelm->run;
 
        if (config_fill || config_stats) {
-               size_t usize = arena_mapbits_large_size_get(chunk, pageind);
+               size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
+                   large_pad;
 
-               arena_dalloc_junk_large(ptr, usize);
+               if (!junked)
+                       arena_dalloc_junk_large(ptr, usize);
                if (config_stats) {
+                       szind_t index = size2index(usize) - NBINS;
+
                        arena->stats.ndalloc_large++;
                        arena->stats.allocated_large -= usize;
-                       arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
-                       arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
+                       arena->stats.lstats[index].ndalloc++;
+                       arena->stats.lstats[index].curruns--;
                }
        }
 
-       arena_run_dalloc(arena, run, true, false);
+       arena_run_dalloc(arena, run, true, false, false);
+}
+
+void
+arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr)
+{
+
+       arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
 }
 
 void
-arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr)
 {
 
        malloc_mutex_lock(&arena->lock);
-       arena_dalloc_large_locked(arena, chunk, ptr);
+       arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
        malloc_mutex_unlock(&arena->lock);
+       arena_decay_tick(tsd, arena);
 }
 
 static void
@@ -1885,54 +2856,84 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
         * allocations.
         */
        malloc_mutex_lock(&arena->lock);
-       arena_run_trim_tail(arena, chunk, run, oldsize, size, true);
+       arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
+           large_pad, true);
        if (config_stats) {
+               szind_t oldindex = size2index(oldsize) - NBINS;
+               szind_t index = size2index(size) - NBINS;
+
                arena->stats.ndalloc_large++;
                arena->stats.allocated_large -= oldsize;
-               arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
-               arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+               arena->stats.lstats[oldindex].ndalloc++;
+               arena->stats.lstats[oldindex].curruns--;
 
                arena->stats.nmalloc_large++;
                arena->stats.nrequests_large++;
                arena->stats.allocated_large += size;
-               arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
-               arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
-               arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+               arena->stats.lstats[index].nmalloc++;
+               arena->stats.lstats[index].nrequests++;
+               arena->stats.lstats[index].curruns++;
        }
        malloc_mutex_unlock(&arena->lock);
 }
 
 static bool
 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t oldsize, size_t size, size_t extra, bool zero)
+    size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
 {
        size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       size_t npages = oldsize >> LG_PAGE;
+       size_t npages = (oldsize + large_pad) >> LG_PAGE;
        size_t followsize;
 
-       assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
+       assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
+           large_pad);
 
        /* Try to extend the run. */
-       assert(size + extra > oldsize);
        malloc_mutex_lock(&arena->lock);
-       if (pageind + npages < chunk_npages &&
-           arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
-           (followsize = arena_mapbits_unallocated_size_get(chunk,
-           pageind+npages)) >= size - oldsize) {
+       if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
+           pageind+npages) != 0)
+               goto label_fail;
+       followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
+       if (oldsize + followsize >= usize_min) {
                /*
                 * The next run is available and sufficiently large.  Split the
                 * following run, then merge the first part with the existing
                 * allocation.
                 */
-               size_t flag_dirty;
-               size_t splitsize = (oldsize + followsize <= size + extra)
-                   ? followsize : size + extra - oldsize;
-               arena_run_t *run = &arena_miscelm_get(chunk,
-                   pageind+npages)->run;
-               arena_run_split_large(arena, run, splitsize, zero);
+               arena_run_t *run;
+               size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
+
+               usize = usize_max;
+               while (oldsize + followsize < usize)
+                       usize = index2size(size2index(usize)-1);
+               assert(usize >= usize_min);
+               assert(usize >= oldsize);
+               splitsize = usize - oldsize;
+               if (splitsize == 0)
+                       goto label_fail;
+
+               run = &arena_miscelm_get(chunk, pageind+npages)->run;
+               if (arena_run_split_large(arena, run, splitsize, zero))
+                       goto label_fail;
+
+               if (config_cache_oblivious && zero) {
+                       /*
+                        * Zero the trailing bytes of the original allocation's
+                        * last page, since they are in an indeterminate state.
+                        * There will always be trailing bytes, because ptr's
+                        * offset from the beginning of the run is a multiple of
+                        * CACHELINE in [0 .. PAGE).
+                        */
+                       void *zbase = (void *)((uintptr_t)ptr + oldsize);
+                       void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
+                           PAGE));
+                       size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
+                       assert(nzero > 0);
+                       memset(zbase, 0, nzero);
+               }
 
                size = oldsize + splitsize;
-               npages = size >> LG_PAGE;
+               npages = (size + large_pad) >> LG_PAGE;
 
                /*
                 * Mark the extended run as dirty if either portion of the run
@@ -1944,27 +2945,35 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
                 */
                flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
                    arena_mapbits_dirty_get(chunk, pageind+npages-1);
-               arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
-               arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
+               flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
+               arena_mapbits_large_set(chunk, pageind, size + large_pad,
+                   flag_dirty | (flag_unzeroed_mask &
+                   arena_mapbits_unzeroed_get(chunk, pageind)));
+               arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
+                   (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+                   pageind+npages-1)));
 
                if (config_stats) {
+                       szind_t oldindex = size2index(oldsize) - NBINS;
+                       szind_t index = size2index(size) - NBINS;
+
                        arena->stats.ndalloc_large++;
                        arena->stats.allocated_large -= oldsize;
-                       arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
-                       arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+                       arena->stats.lstats[oldindex].ndalloc++;
+                       arena->stats.lstats[oldindex].curruns--;
 
                        arena->stats.nmalloc_large++;
                        arena->stats.nrequests_large++;
                        arena->stats.allocated_large += size;
-                       arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
-                       arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
-                       arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+                       arena->stats.lstats[index].nmalloc++;
+                       arena->stats.lstats[index].nrequests++;
+                       arena->stats.lstats[index].curruns++;
                }
                malloc_mutex_unlock(&arena->lock);
                return (false);
        }
+label_fail:
        malloc_mutex_unlock(&arena->lock);
-
        return (true);
 }
 
@@ -1976,7 +2985,7 @@ static void
 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
 {
 
-       if (config_fill && unlikely(opt_junk)) {
+       if (config_fill && unlikely(opt_junk_free)) {
                memset((void *)((uintptr_t)ptr + usize), 0x5a,
                    old_usize - usize);
        }
@@ -1993,136 +3002,142 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
  * always fail if growing an object, and the following run is already in use.
  */
 static bool
-arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
-    bool zero)
+arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
+    size_t usize_max, bool zero)
 {
-       size_t psize;
+       arena_chunk_t *chunk;
+       arena_t *arena;
 
-       psize = PAGE_CEILING(size + extra);
-       if (psize == oldsize) {
-               /* Same size class. */
+       if (oldsize == usize_max) {
+               /* Current size class is compatible and maximal. */
                return (false);
-       } else {
-               arena_chunk_t *chunk;
-               arena_t *arena;
+       }
 
-               chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-               arena = chunk->arena;
-
-               if (psize < oldsize) {
-                       /* Fill before shrinking in order avoid a race. */
-                       arena_ralloc_junk_large(ptr, oldsize, psize);
-                       arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
-                           psize);
-                       return (false);
-               } else {
-                       bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
-                           oldsize, PAGE_CEILING(size),
-                           psize - PAGE_CEILING(size), zero);
-                       if (config_fill && !ret && !zero) {
-                               if (unlikely(opt_junk)) {
-                                       memset((void *)((uintptr_t)ptr +
-                                           oldsize), 0xa5, isalloc(ptr,
-                                           config_prof) - oldsize);
-                               } else if (unlikely(opt_zero)) {
-                                       memset((void *)((uintptr_t)ptr +
-                                           oldsize), 0, isalloc(ptr,
-                                           config_prof) - oldsize);
-                               }
+       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+       arena = extent_node_arena_get(&chunk->node);
+
+       if (oldsize < usize_max) {
+               bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
+                   usize_min, usize_max, zero);
+               if (config_fill && !ret && !zero) {
+                       if (unlikely(opt_junk_alloc)) {
+                               memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
+                                   isalloc(ptr, config_prof) - oldsize);
+                       } else if (unlikely(opt_zero)) {
+                               memset((void *)((uintptr_t)ptr + oldsize), 0,
+                                   isalloc(ptr, config_prof) - oldsize);
                        }
-                       return (ret);
                }
+               return (ret);
        }
+
+       assert(oldsize > usize_max);
+       /* Fill before shrinking in order avoid a race. */
+       arena_ralloc_junk_large(ptr, oldsize, usize_max);
+       arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
+       return (false);
 }
 
 bool
-arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
-    bool zero)
+arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+    size_t extra, bool zero)
 {
+       size_t usize_min, usize_max;
 
-       /*
-        * Avoid moving the allocation if the size class can be left the same.
-        */
-       if (oldsize <= arena_maxclass) {
+       /* Calls with non-zero extra had to clamp extra. */
+       assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
+
+       if (unlikely(size > HUGE_MAXCLASS))
+               return (true);
+
+       usize_min = s2u(size);
+       usize_max = s2u(size + extra);
+       if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
+               arena_chunk_t *chunk;
+
+               /*
+                * Avoid moving the allocation if the size class can be left the
+                * same.
+                */
                if (oldsize <= SMALL_MAXCLASS) {
-                       assert(arena_bin_info[small_size2bin(oldsize)].reg_size
-                           == oldsize);
-                       if ((size + extra <= SMALL_MAXCLASS &&
-                           small_size2bin(size + extra) ==
-                           small_size2bin(oldsize)) || (size <= oldsize &&
-                           size + extra >= oldsize))
-                               return (false);
+                       assert(arena_bin_info[size2index(oldsize)].reg_size ==
+                           oldsize);
+                       if ((usize_max > SMALL_MAXCLASS ||
+                           size2index(usize_max) != size2index(oldsize)) &&
+                           (size > oldsize || usize_max < oldsize))
+                               return (true);
                } else {
-                       assert(size <= arena_maxclass);
-                       if (size + extra > SMALL_MAXCLASS) {
-                               if (!arena_ralloc_large(ptr, oldsize, size,
-                                   extra, zero))
-                                       return (false);
-                       }
+                       if (usize_max <= SMALL_MAXCLASS)
+                               return (true);
+                       if (arena_ralloc_large(ptr, oldsize, usize_min,
+                           usize_max, zero))
+                               return (true);
                }
+
+               chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+               arena_decay_tick(tsd, extent_node_arena_get(&chunk->node));
+               return (false);
+       } else {
+               return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min,
+                   usize_max, zero));
        }
+}
 
-       /* Reallocation would require a move. */
-       return (true);
+static void *
+arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+    size_t alignment, bool zero, tcache_t *tcache)
+{
+
+       if (alignment == 0)
+               return (arena_malloc(tsd, arena, usize, size2index(usize), zero,
+                   tcache, true));
+       usize = sa2u(usize, alignment);
+       if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+               return (NULL);
+       return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
 }
 
 void *
 arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
-    size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
-    bool try_tcache_dalloc)
+    size_t alignment, bool zero, tcache_t *tcache)
 {
        void *ret;
-       size_t copysize;
+       size_t usize;
 
-       /* Try to avoid moving the allocation. */
-       if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
-               return (ptr);
+       usize = s2u(size);
+       if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
+               return (NULL);
 
-       /*
-        * size and oldsize are different enough that we need to move the
-        * object.  In that case, fall back to allocating new space and
-        * copying.
-        */
-       if (alignment != 0) {
-               size_t usize = sa2u(size + extra, alignment);
-               if (usize == 0)
-                       return (NULL);
-               ret = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc,
-                   arena);
-       } else {
-               ret = arena_malloc(tsd, arena, size + extra, zero,
-                   try_tcache_alloc);
-       }
+       if (likely(usize <= large_maxclass)) {
+               size_t copysize;
 
-       if (ret == NULL) {
-               if (extra == 0)
-                       return (NULL);
-               /* Try again, this time without extra. */
-               if (alignment != 0) {
-                       size_t usize = sa2u(size, alignment);
-                       if (usize == 0)
-                               return (NULL);
-                       ret = ipalloct(tsd, usize, alignment, zero,
-                           try_tcache_alloc, arena);
-               } else {
-                       ret = arena_malloc(tsd, arena, size, zero,
-                           try_tcache_alloc);
-               }
+               /* Try to avoid moving the allocation. */
+               if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero))
+                       return (ptr);
 
+               /*
+                * size and oldsize are different enough that we need to move
+                * the object.  In that case, fall back to allocating new space
+                * and copying.
+                */
+               ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
+                   zero, tcache);
                if (ret == NULL)
                        return (NULL);
-       }
 
-       /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
+               /*
+                * Junk/zero-filling were already done by
+                * ipalloc()/arena_malloc().
+                */
 
-       /*
-        * Copy at most size bytes (not size+extra), since the caller has no
-        * expectation that the extra bytes will be reliably preserved.
-        */
-       copysize = (size < oldsize) ? size : oldsize;
-       JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
-       memcpy(ret, ptr, copysize);
-       iqalloc(tsd, ptr, try_tcache_dalloc);
+               copysize = (usize < oldsize) ? usize : oldsize;
+               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
+               memcpy(ret, ptr, copysize);
+               isqalloc(tsd, ptr, oldsize, tcache);
+       } else {
+               ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
+                   zero, tcache);
+       }
        return (ret);
 }
 
@@ -2149,22 +3164,90 @@ arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
        return (false);
 }
 
+ssize_t
+arena_lg_dirty_mult_default_get(void)
+{
+
+       return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
+}
+
+bool
+arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
+{
+
+       if (opt_purge != purge_mode_ratio)
+               return (true);
+       if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+               return (true);
+       atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
+       return (false);
+}
+
+ssize_t
+arena_decay_time_default_get(void)
+{
+
+       return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
+}
+
+bool
+arena_decay_time_default_set(ssize_t decay_time)
+{
+
+       if (opt_purge != purge_mode_decay)
+               return (true);
+       if (!arena_decay_time_valid(decay_time))
+               return (true);
+       atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
+       return (false);
+}
+
+static void
+arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
+    const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+    size_t *nactive, size_t *ndirty)
+{
+
+       *nthreads += arena_nthreads_get(arena);
+       *dss = dss_prec_names[arena->dss_prec];
+       *lg_dirty_mult = arena->lg_dirty_mult;
+       *decay_time = arena->decay_time;
+       *nactive += arena->nactive;
+       *ndirty += arena->ndirty;
+}
+
 void
-arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
+arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
+    ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
+    size_t *ndirty)
+{
+
+       malloc_mutex_lock(&arena->lock);
+       arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
+           decay_time, nactive, ndirty);
+       malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
+    ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
     size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
-    malloc_large_stats_t *lstats)
+    malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
 {
        unsigned i;
 
+       cassert(config_stats);
+
        malloc_mutex_lock(&arena->lock);
-       *dss = dss_prec_names[arena->dss_prec];
-       *nactive += arena->nactive;
-       *ndirty += arena->ndirty;
+       arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
+           decay_time, nactive, ndirty);
 
        astats->mapped += arena->stats.mapped;
        astats->npurge += arena->stats.npurge;
        astats->nmadvise += arena->stats.nmadvise;
        astats->purged += arena->stats.purged;
+       astats->metadata_mapped += arena->stats.metadata_mapped;
+       astats->metadata_allocated += arena_metadata_allocated_get(arena);
        astats->allocated_large += arena->stats.allocated_large;
        astats->nmalloc_large += arena->stats.nmalloc_large;
        astats->ndalloc_large += arena->stats.ndalloc_large;
@@ -2172,7 +3255,6 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
        astats->allocated_huge += arena->stats.allocated_huge;
        astats->nmalloc_huge += arena->stats.nmalloc_huge;
        astats->ndalloc_huge += arena->stats.ndalloc_huge;
-       astats->nrequests_huge += arena->stats.nrequests_huge;
 
        for (i = 0; i < nlclasses; i++) {
                lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
@@ -2180,16 +3262,22 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
                lstats[i].nrequests += arena->stats.lstats[i].nrequests;
                lstats[i].curruns += arena->stats.lstats[i].curruns;
        }
+
+       for (i = 0; i < nhclasses; i++) {
+               hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
+               hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
+               hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
+       }
        malloc_mutex_unlock(&arena->lock);
 
        for (i = 0; i < NBINS; i++) {
                arena_bin_t *bin = &arena->bins[i];
 
                malloc_mutex_lock(&bin->lock);
-               bstats[i].allocated += bin->stats.allocated;
                bstats[i].nmalloc += bin->stats.nmalloc;
                bstats[i].ndalloc += bin->stats.ndalloc;
                bstats[i].nrequests += bin->stats.nrequests;
+               bstats[i].curregs += bin->stats.curregs;
                if (config_tcache) {
                        bstats[i].nfills += bin->stats.nfills;
                        bstats[i].nflushes += bin->stats.nflushes;
@@ -2201,29 +3289,67 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
        }
 }
 
-bool
-arena_new(arena_t *arena, unsigned ind)
+unsigned
+arena_nthreads_get(arena_t *arena)
+{
+
+       return (atomic_read_u(&arena->nthreads));
+}
+
+void
+arena_nthreads_inc(arena_t *arena)
+{
+
+       atomic_add_u(&arena->nthreads, 1);
+}
+
+void
+arena_nthreads_dec(arena_t *arena)
 {
+
+       atomic_sub_u(&arena->nthreads, 1);
+}
+
+arena_t *
+arena_new(unsigned ind)
+{
+       arena_t *arena;
+       size_t arena_size;
        unsigned i;
        arena_bin_t *bin;
 
+       /* Compute arena size to incorporate sufficient runs_avail elements. */
+       arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_tree_t) *
+           runs_avail_nclasses);
+       /*
+        * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
+        * because there is no way to clean up if base_alloc() OOMs.
+        */
+       if (config_stats) {
+               arena = (arena_t *)base_alloc(CACHELINE_CEILING(arena_size) +
+                   QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
+                   nhclasses) * sizeof(malloc_huge_stats_t));
+       } else
+               arena = (arena_t *)base_alloc(arena_size);
+       if (arena == NULL)
+               return (NULL);
+
        arena->ind = ind;
        arena->nthreads = 0;
-       arena->chunk_alloc = chunk_alloc_default;
-       arena->chunk_dalloc = chunk_dalloc_default;
-
        if (malloc_mutex_init(&arena->lock))
-               return (true);
+               return (NULL);
 
        if (config_stats) {
                memset(&arena->stats, 0, sizeof(arena_stats_t));
-               arena->stats.lstats =
-                   (malloc_large_stats_t *)base_alloc(nlclasses *
-                   sizeof(malloc_large_stats_t));
-               if (arena->stats.lstats == NULL)
-                       return (true);
+               arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
+                   + CACHELINE_CEILING(arena_size));
                memset(arena->stats.lstats, 0, nlclasses *
                    sizeof(malloc_large_stats_t));
+               arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
+                   + CACHELINE_CEILING(arena_size) +
+                   QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
+               memset(arena->stats.hstats, 0, nhclasses *
+                   sizeof(malloc_huge_stats_t));
                if (config_tcache)
                        ql_new(&arena->tcache_ql);
        }
@@ -2231,34 +3357,69 @@ arena_new(arena_t *arena, unsigned ind)
        if (config_prof)
                arena->prof_accumbytes = 0;
 
+       if (config_cache_oblivious) {
+               /*
+                * A nondeterministic seed based on the address of arena reduces
+                * the likelihood of lockstep non-uniform cache index
+                * utilization among identical concurrent processes, but at the
+                * cost of test repeatability.  For debug builds, instead use a
+                * deterministic seed.
+                */
+               arena->offset_state = config_debug ? ind :
+                   (uint64_t)(uintptr_t)arena;
+       }
+
        arena->dss_prec = chunk_dss_prec_get();
 
        arena->spare = NULL;
 
+       arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
+       arena->purging = false;
        arena->nactive = 0;
        arena->ndirty = 0;
 
-       arena_avail_tree_new(&arena->runs_avail);
-       ql_new(&arena->runs_dirty);
+       for(i = 0; i < runs_avail_nclasses; i++)
+               arena_run_tree_new(&arena->runs_avail[i]);
+       qr_new(&arena->runs_dirty, rd_link);
+       qr_new(&arena->chunks_cache, cc_link);
+
+       if (opt_purge == purge_mode_decay)
+               arena_decay_init(arena, arena_decay_time_default_get());
+
+       ql_new(&arena->huge);
+       if (malloc_mutex_init(&arena->huge_mtx))
+               return (NULL);
+
+       extent_tree_szad_new(&arena->chunks_szad_cached);
+       extent_tree_ad_new(&arena->chunks_ad_cached);
+       extent_tree_szad_new(&arena->chunks_szad_retained);
+       extent_tree_ad_new(&arena->chunks_ad_retained);
+       if (malloc_mutex_init(&arena->chunks_mtx))
+               return (NULL);
+       ql_new(&arena->node_cache);
+       if (malloc_mutex_init(&arena->node_cache_mtx))
+               return (NULL);
+
+       arena->chunk_hooks = chunk_hooks_default;
 
        /* Initialize bins. */
        for (i = 0; i < NBINS; i++) {
                bin = &arena->bins[i];
                if (malloc_mutex_init(&bin->lock))
-                       return (true);
+                       return (NULL);
                bin->runcur = NULL;
                arena_run_tree_new(&bin->runs);
                if (config_stats)
                        memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
        }
 
-       return (false);
+       return (arena);
 }
 
 /*
  * Calculate bin_info->run_size such that it meets the following constraints:
  *
- *   *) bin_info->run_size <= arena_maxclass
+ *   *) bin_info->run_size <= arena_maxrun
  *   *) bin_info->nregs <= RUN_MAXREGS
  *
  * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
@@ -2279,8 +3440,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
         * be twice as large in order to maintain alignment.
         */
        if (config_fill && unlikely(opt_redzone)) {
-               size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
-                   1);
+               size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
                if (align_min <= REDZONE_MINSIZE) {
                        bin_info->redzone_size = REDZONE_MINSIZE;
                        pad_size = 0;
@@ -2300,18 +3460,19 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
         * size).
         */
        try_run_size = PAGE;
-       try_nregs = try_run_size / bin_info->reg_size;
+       try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
        do {
                perfect_run_size = try_run_size;
                perfect_nregs = try_nregs;
 
                try_run_size += PAGE;
-               try_nregs = try_run_size / bin_info->reg_size;
+               try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
        } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
        assert(perfect_nregs <= RUN_MAXREGS);
 
        actual_run_size = perfect_run_size;
-       actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
+       actual_nregs = (uint32_t)((actual_run_size - pad_size) /
+           bin_info->reg_interval);
 
        /*
         * Redzones can require enough padding that not even a single region can
@@ -2323,25 +3484,29 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
                assert(config_fill && unlikely(opt_redzone));
 
                actual_run_size += PAGE;
-               actual_nregs = (actual_run_size - pad_size) /
-                   bin_info->reg_interval;
+               actual_nregs = (uint32_t)((actual_run_size - pad_size) /
+                   bin_info->reg_interval);
        }
 
        /*
         * Make sure that the run will fit within an arena chunk.
         */
-       while (actual_run_size > arena_maxclass) {
+       while (actual_run_size > arena_maxrun) {
                actual_run_size -= PAGE;
-               actual_nregs = (actual_run_size - pad_size) /
-                   bin_info->reg_interval;
+               actual_nregs = (uint32_t)((actual_run_size - pad_size) /
+                   bin_info->reg_interval);
        }
        assert(actual_nregs > 0);
+       assert(actual_run_size == s2u(actual_run_size));
 
        /* Copy final settings. */
        bin_info->run_size = actual_run_size;
        bin_info->nregs = actual_nregs;
-       bin_info->reg0_offset = actual_run_size - (actual_nregs *
-           bin_info->reg_interval) - pad_size + bin_info->redzone_size;
+       bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
+           bin_info->reg_interval) - pad_size + bin_info->redzone_size);
+
+       if (actual_run_size > small_maxrun)
+               small_maxrun = actual_run_size;
 
        assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
            * bin_info->reg_interval) + pad_size == bin_info->run_size);
@@ -2352,7 +3517,7 @@ bin_info_init(void)
 {
        arena_bin_info_t *bin_info;
 
-#define        BIN_INFO_INIT_bin_yes(index, size) \
+#define        BIN_INFO_INIT_bin_yes(index, size)                              \
        bin_info = &arena_bin_info[index];                              \
        bin_info->reg_size = size;                                      \
        bin_info_run_size_calc(bin_info);                               \
@@ -2366,12 +3531,69 @@ bin_info_init(void)
 #undef SC
 }
 
-void
+static bool
+small_run_size_init(void)
+{
+
+       assert(small_maxrun != 0);
+
+       small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
+           LG_PAGE));
+       if (small_run_tab == NULL)
+               return (true);
+
+#define        TAB_INIT_bin_yes(index, size) {                                 \
+               arena_bin_info_t *bin_info = &arena_bin_info[index];    \
+               small_run_tab[bin_info->run_size >> LG_PAGE] = true;    \
+       }
+#define        TAB_INIT_bin_no(index, size)
+#define        SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)       \
+       TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
+       SIZE_CLASSES
+#undef TAB_INIT_bin_yes
+#undef TAB_INIT_bin_no
+#undef SC
+
+       return (false);
+}
+
+static bool
+run_quantize_init(void)
+{
+       unsigned i;
+
+       run_quantize_max = chunksize + large_pad;
+
+       run_quantize_floor_tab = (size_t *)base_alloc(sizeof(size_t) *
+           (run_quantize_max >> LG_PAGE));
+       if (run_quantize_floor_tab == NULL)
+               return (true);
+
+       run_quantize_ceil_tab = (size_t *)base_alloc(sizeof(size_t) *
+           (run_quantize_max >> LG_PAGE));
+       if (run_quantize_ceil_tab == NULL)
+               return (true);
+
+       for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) {
+               size_t run_size = i << LG_PAGE;
+
+               run_quantize_floor_tab[i-1] =
+                   run_quantize_floor_compute(run_size);
+               run_quantize_ceil_tab[i-1] =
+                   run_quantize_ceil_compute(run_size);
+       }
+
+       return (false);
+}
+
+bool
 arena_boot(void)
 {
-       size_t header_size;
        unsigned i;
 
+       arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
+       arena_decay_time_default_set(opt_decay_time);
+
        /*
         * Compute the header size such that it is large enough to contain the
         * page map.  The page map is biased to omit entries for the header
@@ -2386,7 +3608,7 @@ arena_boot(void)
         */
        map_bias = 0;
        for (i = 0; i < 3; i++) {
-               header_size = offsetof(arena_chunk_t, map_bits) +
+               size_t header_size = offsetof(arena_chunk_t, map_bits) +
                    ((sizeof(arena_chunk_map_bits_t) +
                    sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
                map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
@@ -2396,9 +3618,31 @@ arena_boot(void)
        map_misc_offset = offsetof(arena_chunk_t, map_bits) +
            sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
 
-       arena_maxclass = chunksize - (map_bias << LG_PAGE);
+       arena_maxrun = chunksize - (map_bias << LG_PAGE);
+       assert(arena_maxrun > 0);
+       large_maxclass = index2size(size2index(chunksize)-1);
+       if (large_maxclass > arena_maxrun) {
+               /*
+                * For small chunk sizes it's possible for there to be fewer
+                * non-header pages available than are necessary to serve the
+                * size classes just below chunksize.
+                */
+               large_maxclass = arena_maxrun;
+       }
+       assert(large_maxclass > 0);
+       nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
+       nhclasses = NSIZES - nlclasses - NBINS;
 
        bin_info_init();
+       if (small_run_size_init())
+               return (true);
+       if (run_quantize_init())
+               return (true);
+
+       runs_avail_bias = size2index(PAGE);
+       runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias;
+
+       return (false);
 }
 
 void
@@ -2407,6 +3651,9 @@ arena_prefork(arena_t *arena)
        unsigned i;
 
        malloc_mutex_prefork(&arena->lock);
+       malloc_mutex_prefork(&arena->huge_mtx);
+       malloc_mutex_prefork(&arena->chunks_mtx);
+       malloc_mutex_prefork(&arena->node_cache_mtx);
        for (i = 0; i < NBINS; i++)
                malloc_mutex_prefork(&arena->bins[i].lock);
 }
@@ -2418,6 +3665,9 @@ arena_postfork_parent(arena_t *arena)
 
        for (i = 0; i < NBINS; i++)
                malloc_mutex_postfork_parent(&arena->bins[i].lock);
+       malloc_mutex_postfork_parent(&arena->node_cache_mtx);
+       malloc_mutex_postfork_parent(&arena->chunks_mtx);
+       malloc_mutex_postfork_parent(&arena->huge_mtx);
        malloc_mutex_postfork_parent(&arena->lock);
 }
 
@@ -2428,5 +3678,8 @@ arena_postfork_child(arena_t *arena)
 
        for (i = 0; i < NBINS; i++)
                malloc_mutex_postfork_child(&arena->bins[i].lock);
+       malloc_mutex_postfork_child(&arena->node_cache_mtx);
+       malloc_mutex_postfork_child(&arena->chunks_mtx);
+       malloc_mutex_postfork_child(&arena->huge_mtx);
        malloc_mutex_postfork_child(&arena->lock);
 }
index 409c7bb78c1e50a1d9ca5c6a2e27934878fea2c4..7cdcfed86bd8d435d8d715526dbba62647ab8a7a 100644 (file)
 /* Data. */
 
 static malloc_mutex_t  base_mtx;
-
-/*
- * Current pages that are being used for internal memory allocations.  These
- * pages are carved up in cacheline-size quanta, so that there is no chance of
- * false cache line sharing.
- */
-static void            *base_pages;
-static void            *base_next_addr;
-static void            *base_past_addr; /* Addr immediately past base_pages. */
+static extent_tree_t   base_avail_szad;
 static extent_node_t   *base_nodes;
+static size_t          base_allocated;
+static size_t          base_resident;
+static size_t          base_mapped;
 
 /******************************************************************************/
 
-static bool
-base_pages_alloc(size_t minsize)
+/* base_mtx must be held. */
+static extent_node_t *
+base_node_try_alloc(void)
 {
-       size_t csize;
-
-       assert(minsize != 0);
-       csize = CHUNK_CEILING(minsize);
-       base_pages = chunk_alloc_base(csize);
-       if (base_pages == NULL)
-               return (true);
-       base_next_addr = base_pages;
-       base_past_addr = (void *)((uintptr_t)base_pages + csize);
+       extent_node_t *node;
 
-       return (false);
+       if (base_nodes == NULL)
+               return (NULL);
+       node = base_nodes;
+       base_nodes = *(extent_node_t **)node;
+       JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+       return (node);
 }
 
-void *
-base_alloc(size_t size)
+/* base_mtx must be held. */
+static void
+base_node_dalloc(extent_node_t *node)
 {
-       void *ret;
-       size_t csize;
 
-       /* Round size up to nearest multiple of the cacheline size. */
-       csize = CACHELINE_CEILING(size);
-
-       malloc_mutex_lock(&base_mtx);
-       /* Make sure there's enough space for the allocation. */
-       if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
-               if (base_pages_alloc(csize)) {
-                       malloc_mutex_unlock(&base_mtx);
-                       return (NULL);
-               }
-       }
-       /* Allocate. */
-       ret = base_next_addr;
-       base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
-       malloc_mutex_unlock(&base_mtx);
-       JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
-
-       return (ret);
+       JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+       *(extent_node_t **)node = base_nodes;
+       base_nodes = node;
 }
 
-void *
-base_calloc(size_t number, size_t size)
+/* base_mtx must be held. */
+static extent_node_t *
+base_chunk_alloc(size_t minsize)
 {
-       void *ret = base_alloc(number * size);
-
-       if (ret != NULL)
-               memset(ret, 0, number * size);
+       extent_node_t *node;
+       size_t csize, nsize;
+       void *addr;
 
-       return (ret);
+       assert(minsize != 0);
+       node = base_node_try_alloc();
+       /* Allocate enough space to also carve a node out if necessary. */
+       nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
+       csize = CHUNK_CEILING(minsize + nsize);
+       addr = chunk_alloc_base(csize);
+       if (addr == NULL) {
+               if (node != NULL)
+                       base_node_dalloc(node);
+               return (NULL);
+       }
+       base_mapped += csize;
+       if (node == NULL) {
+               node = (extent_node_t *)addr;
+               addr = (void *)((uintptr_t)addr + nsize);
+               csize -= nsize;
+               if (config_stats) {
+                       base_allocated += nsize;
+                       base_resident += PAGE_CEILING(nsize);
+               }
+       }
+       extent_node_init(node, NULL, addr, csize, true, true);
+       return (node);
 }
 
-extent_node_t *
-base_node_alloc(void)
+/*
+ * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
+ * sparse data structures such as radix tree nodes efficient with respect to
+ * physical memory usage.
+ */
+void *
+base_alloc(size_t size)
 {
-       extent_node_t *ret;
+       void *ret;
+       size_t csize, usize;
+       extent_node_t *node;
+       extent_node_t key;
+
+       /*
+        * Round size up to nearest multiple of the cacheline size, so that
+        * there is no chance of false cache line sharing.
+        */
+       csize = CACHELINE_CEILING(size);
 
+       usize = s2u(csize);
+       extent_node_init(&key, NULL, NULL, usize, false, false);
        malloc_mutex_lock(&base_mtx);
-       if (base_nodes != NULL) {
-               ret = base_nodes;
-               base_nodes = *(extent_node_t **)ret;
-               malloc_mutex_unlock(&base_mtx);
-               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret,
-                   sizeof(extent_node_t));
+       node = extent_tree_szad_nsearch(&base_avail_szad, &key);
+       if (node != NULL) {
+               /* Use existing space. */
+               extent_tree_szad_remove(&base_avail_szad, node);
        } else {
-               malloc_mutex_unlock(&base_mtx);
-               ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
+               /* Try to allocate more space. */
+               node = base_chunk_alloc(csize);
+       }
+       if (node == NULL) {
+               ret = NULL;
+               goto label_return;
        }
 
+       ret = extent_node_addr_get(node);
+       if (extent_node_size_get(node) > csize) {
+               extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
+               extent_node_size_set(node, extent_node_size_get(node) - csize);
+               extent_tree_szad_insert(&base_avail_szad, node);
+       } else
+               base_node_dalloc(node);
+       if (config_stats) {
+               base_allocated += csize;
+               /*
+                * Add one PAGE to base_resident for every page boundary that is
+                * crossed by the new allocation.
+                */
+               base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
+                   PAGE_CEILING((uintptr_t)ret);
+       }
+       JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
+label_return:
+       malloc_mutex_unlock(&base_mtx);
        return (ret);
 }
 
 void
-base_node_dalloc(extent_node_t *node)
+base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
 {
 
-       JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
        malloc_mutex_lock(&base_mtx);
-       *(extent_node_t **)node = base_nodes;
-       base_nodes = node;
+       assert(base_allocated <= base_resident);
+       assert(base_resident <= base_mapped);
+       *allocated = base_allocated;
+       *resident = base_resident;
+       *mapped = base_mapped;
        malloc_mutex_unlock(&base_mtx);
 }
 
@@ -106,9 +144,10 @@ bool
 base_boot(void)
 {
 
-       base_nodes = NULL;
        if (malloc_mutex_init(&base_mtx))
                return (true);
+       extent_tree_szad_new(&base_avail_szad);
+       base_nodes = NULL;
 
        return (false);
 }
index c733372b4cb2f47fcbe1b49628a00b20fc26cde6..b1e66271e67889ff04d19d242bf210d5df372db9 100644 (file)
@@ -3,6 +3,8 @@
 
 /******************************************************************************/
 
+#ifdef USE_TREE
+
 void
 bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
 {
@@ -32,20 +34,11 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
        binfo->nbits = nbits;
 }
 
-size_t
+static size_t
 bitmap_info_ngroups(const bitmap_info_t *binfo)
 {
 
-       return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
-}
-
-size_t
-bitmap_size(size_t nbits)
-{
-       bitmap_info_t binfo;
-
-       bitmap_info_init(&binfo, nbits);
-       return (bitmap_info_ngroups(&binfo));
+       return (binfo->levels[binfo->nlevels].group_offset);
 }
 
 void
@@ -61,8 +54,7 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
         * correspond to the first logical bit in the group, so extra bits
         * are the most significant bits of the last group.
         */
-       memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
-           LG_SIZEOF_BITMAP);
+       memset(bitmap, 0xffU, bitmap_size(binfo));
        extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
            & BITMAP_GROUP_NBITS_MASK;
        if (extra != 0)
@@ -76,3 +68,47 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
                        bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
        }
 }
+
+#else /* USE_TREE */
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
+{
+       size_t i;
+
+       assert(nbits > 0);
+       assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+       i = nbits >> LG_BITMAP_GROUP_NBITS;
+       if (nbits % BITMAP_GROUP_NBITS != 0)
+               i++;
+       binfo->ngroups = i;
+       binfo->nbits = nbits;
+}
+
+static size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo)
+{
+
+       return (binfo->ngroups);
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+       size_t extra;
+
+       memset(bitmap, 0xffU, bitmap_size(binfo));
+       extra = (binfo->nbits % (binfo->ngroups * BITMAP_GROUP_NBITS));
+       if (extra != 0)
+               bitmap[binfo->ngroups - 1] >>= (BITMAP_GROUP_NBITS - extra);
+}
+
+#endif /* USE_TREE */
+
+size_t
+bitmap_size(const bitmap_info_t *binfo)
+{
+
+       return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
+}
index 32b8b3a6c6bd278025486a0537a0b1ce4b89a347..b179d213557202644869a3e433ef92db0c3a39e4 100644 (file)
@@ -5,31 +5,43 @@
 /* Data. */
 
 const char     *opt_dss = DSS_DEFAULT;
-size_t         opt_lg_chunk = LG_CHUNK_DEFAULT;
+size_t         opt_lg_chunk = 0;
 
-malloc_mutex_t chunks_mtx;
-chunk_stats_t  stats_chunks;
+/* Used exclusively for gdump triggering. */
+static size_t  curchunks;
+static size_t  highchunks;
 
-/*
- * Trees of chunks that were previously allocated (trees differ only in node
- * ordering).  These are used when allocating chunks, in an attempt to re-use
- * address space.  Depending on function, different tree orderings are needed,
- * which is why there are two trees with the same contents.
- */
-static extent_tree_t   chunks_szad_mmap;
-static extent_tree_t   chunks_ad_mmap;
-static extent_tree_t   chunks_szad_dss;
-static extent_tree_t   chunks_ad_dss;
-
-rtree_t                *chunks_rtree;
+rtree_t                chunks_rtree;
 
 /* Various chunk-related settings. */
 size_t         chunksize;
 size_t         chunksize_mask; /* (chunksize - 1). */
 size_t         chunk_npages;
-size_t         map_bias;
-size_t         map_misc_offset;
-size_t         arena_maxclass; /* Max size class for arenas. */
+
+static void    *chunk_alloc_default(void *new_addr, size_t size,
+    size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+static bool    chunk_dalloc_default(void *chunk, size_t size, bool committed,
+    unsigned arena_ind);
+static bool    chunk_commit_default(void *chunk, size_t size, size_t offset,
+    size_t length, unsigned arena_ind);
+static bool    chunk_decommit_default(void *chunk, size_t size, size_t offset,
+    size_t length, unsigned arena_ind);
+static bool    chunk_purge_default(void *chunk, size_t size, size_t offset,
+    size_t length, unsigned arena_ind);
+static bool    chunk_split_default(void *chunk, size_t size, size_t size_a,
+    size_t size_b, bool committed, unsigned arena_ind);
+static bool    chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
+    size_t size_b, bool committed, unsigned arena_ind);
+
+const chunk_hooks_t    chunk_hooks_default = {
+       chunk_alloc_default,
+       chunk_dalloc_default,
+       chunk_commit_default,
+       chunk_decommit_default,
+       chunk_purge_default,
+       chunk_split_default,
+       chunk_merge_default
+};
 
 /******************************************************************************/
 /*
@@ -37,89 +49,263 @@ size_t             arena_maxclass; /* Max size class for arenas. */
  * definition.
  */
 
-static void    chunk_dalloc_core(void *chunk, size_t size);
+static void    chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+    void *chunk, size_t size, bool zeroed, bool committed);
 
 /******************************************************************************/
 
+static chunk_hooks_t
+chunk_hooks_get_locked(arena_t *arena)
+{
+
+       return (arena->chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_get(arena_t *arena)
+{
+       chunk_hooks_t chunk_hooks;
+
+       malloc_mutex_lock(&arena->chunks_mtx);
+       chunk_hooks = chunk_hooks_get_locked(arena);
+       malloc_mutex_unlock(&arena->chunks_mtx);
+
+       return (chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
+{
+       chunk_hooks_t old_chunk_hooks;
+
+       malloc_mutex_lock(&arena->chunks_mtx);
+       old_chunk_hooks = arena->chunk_hooks;
+       /*
+        * Copy each field atomically so that it is impossible for readers to
+        * see partially updated pointers.  There are places where readers only
+        * need one hook function pointer (therefore no need to copy the
+        * entirety of arena->chunk_hooks), and stale reads do not affect
+        * correctness, so they perform unlocked reads.
+        */
+#define        ATOMIC_COPY_HOOK(n) do {                                        \
+       union {                                                         \
+               chunk_##n##_t   **n;                                    \
+               void            **v;                                    \
+       } u;                                                            \
+       u.n = &arena->chunk_hooks.n;                                    \
+       atomic_write_p(u.v, chunk_hooks->n);                            \
+} while (0)
+       ATOMIC_COPY_HOOK(alloc);
+       ATOMIC_COPY_HOOK(dalloc);
+       ATOMIC_COPY_HOOK(commit);
+       ATOMIC_COPY_HOOK(decommit);
+       ATOMIC_COPY_HOOK(purge);
+       ATOMIC_COPY_HOOK(split);
+       ATOMIC_COPY_HOOK(merge);
+#undef ATOMIC_COPY_HOOK
+       malloc_mutex_unlock(&arena->chunks_mtx);
+
+       return (old_chunk_hooks);
+}
+
+static void
+chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    bool locked)
+{
+       static const chunk_hooks_t uninitialized_hooks =
+           CHUNK_HOOKS_INITIALIZER;
+
+       if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
+           0) {
+               *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
+                   chunk_hooks_get(arena);
+       }
+}
+
+static void
+chunk_hooks_assure_initialized_locked(arena_t *arena,
+    chunk_hooks_t *chunk_hooks)
+{
+
+       chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
+}
+
+static void
+chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
+{
+
+       chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
+}
+
+bool
+chunk_register(const void *chunk, const extent_node_t *node)
+{
+
+       assert(extent_node_addr_get(node) == chunk);
+
+       if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
+               return (true);
+       if (config_prof && opt_prof) {
+               size_t size = extent_node_size_get(node);
+               size_t nadd = (size == 0) ? 1 : size / chunksize;
+               size_t cur = atomic_add_z(&curchunks, nadd);
+               size_t high = atomic_read_z(&highchunks);
+               while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
+                       /*
+                        * Don't refresh cur, because it may have decreased
+                        * since this thread lost the highchunks update race.
+                        */
+                       high = atomic_read_z(&highchunks);
+               }
+               if (cur > high && prof_gdump_get_unlocked())
+                       prof_gdump();
+       }
+
+       return (false);
+}
+
+void
+chunk_deregister(const void *chunk, const extent_node_t *node)
+{
+       bool err;
+
+       err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
+       assert(!err);
+       if (config_prof && opt_prof) {
+               size_t size = extent_node_size_get(node);
+               size_t nsub = (size == 0) ? 1 : size / chunksize;
+               assert(atomic_read_z(&curchunks) >= nsub);
+               atomic_sub_z(&curchunks, nsub);
+       }
+}
+
+/*
+ * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
+ * fits.
+ */
+static extent_node_t *
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
+    extent_tree_t *chunks_ad, size_t size)
+{
+       extent_node_t key;
+
+       assert(size == CHUNK_CEILING(size));
+
+       extent_node_init(&key, arena, NULL, size, false, false);
+       return (extent_tree_szad_nsearch(chunks_szad, &key));
+}
+
 static void *
-chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
-    void *new_addr, size_t size, size_t alignment, bool base, bool *zero)
+chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+    void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
+    bool dalloc_node)
 {
        void *ret;
        extent_node_t *node;
-       extent_node_t key;
        size_t alloc_size, leadsize, trailsize;
-       bool zeroed;
+       bool zeroed, committed;
 
-       if (base) {
-               /*
-                * This function may need to call base_node_{,de}alloc(), but
-                * the current chunk allocation request is on behalf of the
-                * base allocator.  Avoid deadlock (and if that weren't an
-                * issue, potential for infinite recursion) by returning NULL.
-                */
-               return (NULL);
-       }
+       assert(new_addr == NULL || alignment == chunksize);
+       /*
+        * Cached chunks use the node linkage embedded in their headers, in
+        * which case dalloc_node is true, and new_addr is non-NULL because
+        * we're operating on a specific chunk.
+        */
+       assert(dalloc_node || new_addr != NULL);
 
-       alloc_size = size + alignment - chunksize;
+       alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
        /* Beware size_t wrap-around. */
        if (alloc_size < size)
                return (NULL);
-       key.addr = new_addr;
-       key.size = alloc_size;
-       malloc_mutex_lock(&chunks_mtx);
-       node = extent_tree_szad_nsearch(chunks_szad, &key);
-       if (node == NULL || (new_addr && node->addr != new_addr)) {
-               malloc_mutex_unlock(&chunks_mtx);
+       malloc_mutex_lock(&arena->chunks_mtx);
+       chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+       if (new_addr != NULL) {
+               extent_node_t key;
+               extent_node_init(&key, arena, new_addr, alloc_size, false,
+                   false);
+               node = extent_tree_ad_search(chunks_ad, &key);
+       } else {
+               node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
+                   alloc_size);
+       }
+       if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
+           size)) {
+               malloc_mutex_unlock(&arena->chunks_mtx);
                return (NULL);
        }
-       leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
-           (uintptr_t)node->addr;
-       assert(node->size >= leadsize + size);
-       trailsize = node->size - leadsize - size;
-       ret = (void *)((uintptr_t)node->addr + leadsize);
-       zeroed = node->zeroed;
+       leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
+           alignment) - (uintptr_t)extent_node_addr_get(node);
+       assert(new_addr == NULL || leadsize == 0);
+       assert(extent_node_size_get(node) >= leadsize + size);
+       trailsize = extent_node_size_get(node) - leadsize - size;
+       ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
+       zeroed = extent_node_zeroed_get(node);
        if (zeroed)
-           *zero = true;
+               *zero = true;
+       committed = extent_node_committed_get(node);
+       if (committed)
+               *commit = true;
+       /* Split the lead. */
+       if (leadsize != 0 &&
+           chunk_hooks->split(extent_node_addr_get(node),
+           extent_node_size_get(node), leadsize, size, false, arena->ind)) {
+               malloc_mutex_unlock(&arena->chunks_mtx);
+               return (NULL);
+       }
        /* Remove node from the tree. */
        extent_tree_szad_remove(chunks_szad, node);
        extent_tree_ad_remove(chunks_ad, node);
+       arena_chunk_cache_maybe_remove(arena, node, cache);
        if (leadsize != 0) {
                /* Insert the leading space as a smaller chunk. */
-               node->size = leadsize;
+               extent_node_size_set(node, leadsize);
                extent_tree_szad_insert(chunks_szad, node);
                extent_tree_ad_insert(chunks_ad, node);
+               arena_chunk_cache_maybe_insert(arena, node, cache);
                node = NULL;
        }
        if (trailsize != 0) {
+               /* Split the trail. */
+               if (chunk_hooks->split(ret, size + trailsize, size,
+                   trailsize, false, arena->ind)) {
+                       if (dalloc_node && node != NULL)
+                               arena_node_dalloc(arena, node);
+                       malloc_mutex_unlock(&arena->chunks_mtx);
+                       chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
+                           cache, ret, size + trailsize, zeroed, committed);
+                       return (NULL);
+               }
                /* Insert the trailing space as a smaller chunk. */
                if (node == NULL) {
-                       /*
-                        * An additional node is required, but
-                        * base_node_alloc() can cause a new base chunk to be
-                        * allocated.  Drop chunks_mtx in order to avoid
-                        * deadlock, and if node allocation fails, deallocate
-                        * the result before returning an error.
-                        */
-                       malloc_mutex_unlock(&chunks_mtx);
-                       node = base_node_alloc();
+                       node = arena_node_alloc(arena);
                        if (node == NULL) {
-                               chunk_dalloc_core(ret, size);
+                               malloc_mutex_unlock(&arena->chunks_mtx);
+                               chunk_record(arena, chunk_hooks, chunks_szad,
+                                   chunks_ad, cache, ret, size + trailsize,
+                                   zeroed, committed);
                                return (NULL);
                        }
-                       malloc_mutex_lock(&chunks_mtx);
                }
-               node->addr = (void *)((uintptr_t)(ret) + size);
-               node->size = trailsize;
-               node->zeroed = zeroed;
+               extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
+                   trailsize, zeroed, committed);
                extent_tree_szad_insert(chunks_szad, node);
                extent_tree_ad_insert(chunks_ad, node);
+               arena_chunk_cache_maybe_insert(arena, node, cache);
                node = NULL;
        }
-       malloc_mutex_unlock(&chunks_mtx);
+       if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
+               malloc_mutex_unlock(&arena->chunks_mtx);
+               chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
+                   ret, size, zeroed, committed);
+               return (NULL);
+       }
+       malloc_mutex_unlock(&arena->chunks_mtx);
 
-       if (node != NULL)
-               base_node_dalloc(node);
+       assert(dalloc_node || node != NULL);
+       if (dalloc_node && node != NULL)
+               arena_node_dalloc(arena, node);
        if (*zero) {
                if (!zeroed)
                        memset(ret, 0, size);
@@ -142,8 +328,8 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
  * them if they are returned.
  */
 static void *
-chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
-    bool *zero, dss_prec_t dss_prec)
+chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+    bool *zero, bool *commit, dss_prec_t dss_prec)
 {
        void *ret;
 
@@ -153,174 +339,206 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
        assert((alignment & chunksize_mask) == 0);
 
        /* "primary" dss. */
-       if (have_dss && dss_prec == dss_prec_primary) {
-               if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
-                   new_addr, size, alignment, base, zero)) != NULL)
-                       return (ret);
-               /* requesting an address only implemented for recycle */
-               if (new_addr == NULL
-                   && (ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
-                       return (ret);
-       }
-       /* mmap. */
-       if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, new_addr,
-           size, alignment, base, zero)) != NULL)
+       if (have_dss && dss_prec == dss_prec_primary && (ret =
+           chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
+           NULL)
                return (ret);
-       /* requesting an address only implemented for recycle */
-       if (new_addr == NULL &&
-           (ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
+       /* mmap. */
+       if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
+           NULL)
                return (ret);
        /* "secondary" dss. */
-       if (have_dss && dss_prec == dss_prec_secondary) {
-               if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
-                   new_addr, size, alignment, base, zero)) != NULL)
-                       return (ret);
-               /* requesting an address only implemented for recycle */
-               if (new_addr == NULL &&
-                   (ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
-                       return (ret);
-       }
+       if (have_dss && dss_prec == dss_prec_secondary && (ret =
+           chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
+           NULL)
+               return (ret);
 
        /* All strategies for allocation failed. */
        return (NULL);
 }
 
-static bool
-chunk_register(void *chunk, size_t size, bool base)
+void *
+chunk_alloc_base(size_t size)
 {
+       void *ret;
+       bool zero, commit;
 
-       assert(chunk != NULL);
-       assert(CHUNK_ADDR2BASE(chunk) == chunk);
-
-       if (config_ivsalloc && !base) {
-               if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1))
-                       return (true);
-       }
-       if (config_stats || config_prof) {
-               bool gdump;
-               malloc_mutex_lock(&chunks_mtx);
-               if (config_stats)
-                       stats_chunks.nchunks += (size / chunksize);
-               stats_chunks.curchunks += (size / chunksize);
-               if (stats_chunks.curchunks > stats_chunks.highchunks) {
-                       stats_chunks.highchunks =
-                           stats_chunks.curchunks;
-                       if (config_prof)
-                               gdump = true;
-               } else if (config_prof)
-                       gdump = false;
-               malloc_mutex_unlock(&chunks_mtx);
-               if (config_prof && opt_prof && opt_prof_gdump && gdump)
-                       prof_gdump();
-       }
+       /*
+        * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
+        * because it's critical that chunk_alloc_base() return untouched
+        * demand-zeroed virtual memory.
+        */
+       zero = true;
+       commit = true;
+       ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
+       if (ret == NULL)
+               return (NULL);
        if (config_valgrind)
-               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
-       return (false);
+               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+
+       return (ret);
 }
 
 void *
-chunk_alloc_base(size_t size)
+chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+    size_t size, size_t alignment, bool *zero, bool dalloc_node)
 {
        void *ret;
-       bool zero;
+       bool commit;
 
-       zero = false;
-       ret = chunk_alloc_core(NULL, size, chunksize, true, &zero,
-           chunk_dss_prec_get());
+       assert(size != 0);
+       assert((size & chunksize_mask) == 0);
+       assert(alignment != 0);
+       assert((alignment & chunksize_mask) == 0);
+
+       commit = true;
+       ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
+           &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
+           &commit, dalloc_node);
        if (ret == NULL)
                return (NULL);
-       if (chunk_register(ret, size, true)) {
-               chunk_dalloc_core(ret, size);
-               return (NULL);
-       }
+       assert(commit);
+       if (config_valgrind)
+               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
        return (ret);
 }
 
-void *
-chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
-    unsigned arena_ind, void *new_addr, size_t size, size_t alignment,
-    bool *zero)
+static arena_t *
+chunk_arena_get(unsigned arena_ind)
+{
+       arena_t *arena;
+
+       arena = arena_get(arena_ind, false);
+       /*
+        * The arena we're allocating on behalf of must have been initialized
+        * already.
+        */
+       assert(arena != NULL);
+       return (arena);
+}
+
+static void *
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+    bool *commit, unsigned arena_ind)
 {
        void *ret;
+       arena_t *arena;
 
-       ret = chunk_alloc(new_addr, size, alignment, zero, arena_ind);
-       if (ret != NULL && chunk_register(ret, size, false)) {
-               chunk_dalloc(ret, size, arena_ind);
-               ret = NULL;
-       }
+       arena = chunk_arena_get(arena_ind);
+       ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
+           commit, arena->dss_prec);
+       if (ret == NULL)
+               return (NULL);
+       if (config_valgrind)
+               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
 
        return (ret);
 }
 
-/* Default arena chunk allocation routine in the absence of user override. */
+static void *
+chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+    size_t size, size_t alignment, bool *zero, bool *commit)
+{
+
+       assert(size != 0);
+       assert((size & chunksize_mask) == 0);
+       assert(alignment != 0);
+       assert((alignment & chunksize_mask) == 0);
+
+       return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained,
+           &arena->chunks_ad_retained, false, new_addr, size, alignment, zero,
+           commit, true));
+}
+
 void *
-chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
-    unsigned arena_ind)
+chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+    size_t size, size_t alignment, bool *zero, bool *commit)
 {
+       void *ret;
+
+       chunk_hooks_assure_initialized(arena, chunk_hooks);
 
-       return (chunk_alloc_core(new_addr, size, alignment, false, zero,
-           arenas[arena_ind]->dss_prec));
+       ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size,
+           alignment, zero, commit);
+       if (ret == NULL) {
+               ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
+                   commit, arena->ind);
+               if (ret == NULL)
+                       return (NULL);
+       }
+
+       if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
+               JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
+       return (ret);
 }
 
 static void
-chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
-    size_t size)
+chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+    extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+    void *chunk, size_t size, bool zeroed, bool committed)
 {
        bool unzeroed;
-       extent_node_t *xnode, *node, *prev, *xprev, key;
+       extent_node_t *node, *prev;
+       extent_node_t key;
 
-       unzeroed = pages_purge(chunk, size);
+       assert(!cache || !zeroed);
+       unzeroed = cache || !zeroed;
        JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
 
-       /*
-        * Allocate a node before acquiring chunks_mtx even though it might not
-        * be needed, because base_node_alloc() may cause a new base chunk to
-        * be allocated, which could cause deadlock if chunks_mtx were already
-        * held.
-        */
-       xnode = base_node_alloc();
-       /* Use xprev to implement conditional deferred deallocation of prev. */
-       xprev = NULL;
-
-       malloc_mutex_lock(&chunks_mtx);
-       key.addr = (void *)((uintptr_t)chunk + size);
+       malloc_mutex_lock(&arena->chunks_mtx);
+       chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+       extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
+           false, false);
        node = extent_tree_ad_nsearch(chunks_ad, &key);
        /* Try to coalesce forward. */
-       if (node != NULL && node->addr == key.addr) {
+       if (node != NULL && extent_node_addr_get(node) ==
+           extent_node_addr_get(&key) && extent_node_committed_get(node) ==
+           committed && !chunk_hooks->merge(chunk, size,
+           extent_node_addr_get(node), extent_node_size_get(node), false,
+           arena->ind)) {
                /*
                 * Coalesce chunk with the following address range.  This does
                 * not change the position within chunks_ad, so only
                 * remove/insert from/into chunks_szad.
                 */
                extent_tree_szad_remove(chunks_szad, node);
-               node->addr = chunk;
-               node->size += size;
-               node->zeroed = (node->zeroed && !unzeroed);
+               arena_chunk_cache_maybe_remove(arena, node, cache);
+               extent_node_addr_set(node, chunk);
+               extent_node_size_set(node, size + extent_node_size_get(node));
+               extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
+                   !unzeroed);
                extent_tree_szad_insert(chunks_szad, node);
+               arena_chunk_cache_maybe_insert(arena, node, cache);
        } else {
                /* Coalescing forward failed, so insert a new node. */
-               if (xnode == NULL) {
+               node = arena_node_alloc(arena);
+               if (node == NULL) {
                        /*
-                        * base_node_alloc() failed, which is an exceedingly
-                        * unlikely failure.  Leak chunk; its pages have
-                        * already been purged, so this is only a virtual
-                        * memory leak.
+                        * Node allocation failed, which is an exceedingly
+                        * unlikely failure.  Leak chunk after making sure its
+                        * pages have already been purged, so that this is only
+                        * a virtual memory leak.
                         */
+                       if (cache) {
+                               chunk_purge_wrapper(arena, chunk_hooks, chunk,
+                                   size, 0, size);
+                       }
                        goto label_return;
                }
-               node = xnode;
-               xnode = NULL; /* Prevent deallocation below. */
-               node->addr = chunk;
-               node->size = size;
-               node->zeroed = !unzeroed;
+               extent_node_init(node, arena, chunk, size, !unzeroed,
+                   committed);
                extent_tree_ad_insert(chunks_ad, node);
                extent_tree_szad_insert(chunks_szad, node);
+               arena_chunk_cache_maybe_insert(arena, node, cache);
        }
 
        /* Try to coalesce backward. */
        prev = extent_tree_ad_prev(chunks_ad, node);
-       if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
-           chunk) {
+       if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
+           extent_node_size_get(prev)) == chunk &&
+           extent_node_committed_get(prev) == committed &&
+           !chunk_hooks->merge(extent_node_addr_get(prev),
+           extent_node_size_get(prev), chunk, size, false, arena->ind)) {
                /*
                 * Coalesce chunk with the previous address range.  This does
                 * not change the position within chunks_ad, so only
@@ -328,44 +546,42 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
                 */
                extent_tree_szad_remove(chunks_szad, prev);
                extent_tree_ad_remove(chunks_ad, prev);
-
+               arena_chunk_cache_maybe_remove(arena, prev, cache);
                extent_tree_szad_remove(chunks_szad, node);
-               node->addr = prev->addr;
-               node->size += prev->size;
-               node->zeroed = (node->zeroed && prev->zeroed);
+               arena_chunk_cache_maybe_remove(arena, node, cache);
+               extent_node_addr_set(node, extent_node_addr_get(prev));
+               extent_node_size_set(node, extent_node_size_get(prev) +
+                   extent_node_size_get(node));
+               extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
+                   extent_node_zeroed_get(node));
                extent_tree_szad_insert(chunks_szad, node);
+               arena_chunk_cache_maybe_insert(arena, node, cache);
 
-               xprev = prev;
+               arena_node_dalloc(arena, prev);
        }
 
 label_return:
-       malloc_mutex_unlock(&chunks_mtx);
-       /*
-        * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
-        * avoid potential deadlock.
-        */
-       if (xnode != NULL)
-               base_node_dalloc(xnode);
-       if (xprev != NULL)
-               base_node_dalloc(xprev);
+       malloc_mutex_unlock(&arena->chunks_mtx);
 }
 
 void
-chunk_unmap(void *chunk, size_t size)
+chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+    size_t size, bool committed)
 {
+
        assert(chunk != NULL);
        assert(CHUNK_ADDR2BASE(chunk) == chunk);
        assert(size != 0);
        assert((size & chunksize_mask) == 0);
 
-       if (have_dss && chunk_in_dss(chunk))
-               chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
-       else if (chunk_dalloc_mmap(chunk, size))
-               chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
+       chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
+           &arena->chunks_ad_cached, true, chunk, size, false, committed);
+       arena_maybe_purge(arena);
 }
 
-static void
-chunk_dalloc_core(void *chunk, size_t size)
+void
+chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+    size_t size, bool zeroed, bool committed)
 {
 
        assert(chunk != NULL);
@@ -373,30 +589,149 @@ chunk_dalloc_core(void *chunk, size_t size)
        assert(size != 0);
        assert((size & chunksize_mask) == 0);
 
-       if (config_ivsalloc)
-               rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
-       if (config_stats || config_prof) {
-               malloc_mutex_lock(&chunks_mtx);
-               assert(stats_chunks.curchunks >= (size / chunksize));
-               stats_chunks.curchunks -= (size / chunksize);
-               malloc_mutex_unlock(&chunks_mtx);
+       chunk_hooks_assure_initialized(arena, chunk_hooks);
+       /* Try to deallocate. */
+       if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
+               return;
+       /* Try to decommit; purge if that fails. */
+       if (committed) {
+               committed = chunk_hooks->decommit(chunk, size, 0, size,
+                   arena->ind);
        }
+       zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
+           arena->ind);
+       chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
+           &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
+}
+
+static bool
+chunk_dalloc_default(void *chunk, size_t size, bool committed,
+    unsigned arena_ind)
+{
+
+       if (!have_dss || !chunk_in_dss(chunk))
+               return (chunk_dalloc_mmap(chunk, size));
+       return (true);
+}
+
+void
+chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+    size_t size, bool committed)
+{
+
+       chunk_hooks_assure_initialized(arena, chunk_hooks);
+       chunk_hooks->dalloc(chunk, size, committed, arena->ind);
+       if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
+               JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+}
 
-       chunk_unmap(chunk, size);
+static bool
+chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
+    unsigned arena_ind)
+{
+
+       return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+           length));
+}
+
+static bool
+chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
+    unsigned arena_ind)
+{
+
+       return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+           length));
 }
 
-/* Default arena chunk deallocation routine in the absence of user override. */
 bool
-chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
+chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
+{
+
+       assert(chunk != NULL);
+       assert(CHUNK_ADDR2BASE(chunk) == chunk);
+       assert((offset & PAGE_MASK) == 0);
+       assert(length != 0);
+       assert((length & PAGE_MASK) == 0);
+
+       return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
+           length));
+}
+
+static bool
+chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
+    unsigned arena_ind)
+{
+
+       return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
+           length));
+}
+
+bool
+chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+    size_t size, size_t offset, size_t length)
+{
+
+       chunk_hooks_assure_initialized(arena, chunk_hooks);
+       return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
+}
+
+static bool
+chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
+    bool committed, unsigned arena_ind)
+{
+
+       if (!maps_coalesce)
+               return (true);
+       return (false);
+}
+
+static bool
+chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+    bool committed, unsigned arena_ind)
 {
 
-       chunk_dalloc_core(chunk, size);
+       if (!maps_coalesce)
+               return (true);
+       if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
+               return (true);
+
        return (false);
 }
 
+static rtree_node_elm_t *
+chunks_rtree_node_alloc(size_t nelms)
+{
+
+       return ((rtree_node_elm_t *)base_alloc(nelms *
+           sizeof(rtree_node_elm_t)));
+}
+
 bool
 chunk_boot(void)
 {
+#ifdef _WIN32
+       SYSTEM_INFO info;
+       GetSystemInfo(&info);
+
+       /*
+        * Verify actual page size is equal to or an integral multiple of
+        * configured page size.
+        */
+       if (info.dwPageSize & ((1U << LG_PAGE) - 1))
+               return (true);
+
+       /*
+        * Configure chunksize (if not set) to match granularity (usually 64K),
+        * so pages_map will always take fast path.
+        */
+       if (!opt_lg_chunk) {
+               opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
+                   - 1;
+       }
+#else
+       if (!opt_lg_chunk)
+               opt_lg_chunk = LG_CHUNK_DEFAULT;
+#endif
 
        /* Set variables according to the value of opt_lg_chunk. */
        chunksize = (ZU(1) << opt_lg_chunk);
@@ -404,23 +739,11 @@ chunk_boot(void)
        chunksize_mask = chunksize - 1;
        chunk_npages = (chunksize >> LG_PAGE);
 
-       if (config_stats || config_prof) {
-               if (malloc_mutex_init(&chunks_mtx))
-                       return (true);
-               memset(&stats_chunks, 0, sizeof(chunk_stats_t));
-       }
        if (have_dss && chunk_dss_boot())
                return (true);
-       extent_tree_szad_new(&chunks_szad_mmap);
-       extent_tree_ad_new(&chunks_ad_mmap);
-       extent_tree_szad_new(&chunks_szad_dss);
-       extent_tree_ad_new(&chunks_ad_dss);
-       if (config_ivsalloc) {
-               chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
-                   opt_lg_chunk, base_alloc, NULL);
-               if (chunks_rtree == NULL)
-                       return (true);
-       }
+       if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
+           opt_lg_chunk), chunks_rtree_node_alloc, NULL))
+               return (true);
 
        return (false);
 }
@@ -429,9 +752,6 @@ void
 chunk_prefork(void)
 {
 
-       malloc_mutex_prefork(&chunks_mtx);
-       if (config_ivsalloc)
-               rtree_prefork(chunks_rtree);
        chunk_dss_prefork();
 }
 
@@ -440,9 +760,6 @@ chunk_postfork_parent(void)
 {
 
        chunk_dss_postfork_parent();
-       if (config_ivsalloc)
-               rtree_postfork_parent(chunks_rtree);
-       malloc_mutex_postfork_parent(&chunks_mtx);
 }
 
 void
@@ -450,7 +767,4 @@ chunk_postfork_child(void)
 {
 
        chunk_dss_postfork_child();
-       if (config_ivsalloc)
-               rtree_postfork_child(chunks_rtree);
-       malloc_mutex_postfork_child(&chunks_mtx);
 }
index cce71041c4f61742bed4432c885c2eeec2fd6497..61fc91696192476304939721e8c338779575614f 100644 (file)
@@ -66,10 +66,9 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
 }
 
 void *
-chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
+chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+    bool *zero, bool *commit)
 {
-       void *ret;
-
        cassert(have_dss);
        assert(size > 0 && (size & chunksize_mask) == 0);
        assert(alignment > 0 && (alignment & chunksize_mask) == 0);
@@ -83,9 +82,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
 
        malloc_mutex_lock(&dss_mtx);
        if (dss_prev != (void *)-1) {
-               size_t gap_size, cpad_size;
-               void *cpad, *dss_next;
-               intptr_t incr;
 
                /*
                 * The loop is necessary to recover from races with other
@@ -93,8 +89,20 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
                 * malloc.
                 */
                do {
+                       void *ret, *cpad, *dss_next;
+                       size_t gap_size, cpad_size;
+                       intptr_t incr;
+                       /* Avoid an unnecessary system call. */
+                       if (new_addr != NULL && dss_max != new_addr)
+                               break;
+
                        /* Get the current end of the DSS. */
                        dss_max = chunk_dss_sbrk(0);
+
+                       /* Make sure the earlier condition still holds. */
+                       if (new_addr != NULL && dss_max != new_addr)
+                               break;
+
                        /*
                         * Calculate how much padding is necessary to
                         * chunk-align the end of the DSS.
@@ -123,13 +131,20 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
                                /* Success. */
                                dss_max = dss_next;
                                malloc_mutex_unlock(&dss_mtx);
-                               if (cpad_size != 0)
-                                       chunk_unmap(cpad, cpad_size);
+                               if (cpad_size != 0) {
+                                       chunk_hooks_t chunk_hooks =
+                                           CHUNK_HOOKS_INITIALIZER;
+                                       chunk_dalloc_wrapper(arena,
+                                           &chunk_hooks, cpad, cpad_size,
+                                           true);
+                               }
                                if (*zero) {
                                        JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
                                            ret, size);
                                        memset(ret, 0, size);
                                }
+                               if (!*commit)
+                                       *commit = pages_decommit(ret, size);
                                return (ret);
                        }
                } while (dss_prev != (void *)-1);
index 7e02c10223ea93b4cb4a0a9cb07e2883e944a931..56b2ee422ab88f289d67339fed97ac21edf1a9a5 100644 (file)
 #define        JEMALLOC_CHUNK_MMAP_C_
 #include "jemalloc/internal/jemalloc_internal.h"
 
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void    *pages_map(void *addr, size_t size);
-static void    pages_unmap(void *addr, size_t size);
-static void    *chunk_alloc_mmap_slow(size_t size, size_t alignment,
-    bool *zero);
-
 /******************************************************************************/
 
 static void *
-pages_map(void *addr, size_t size)
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
 {
        void *ret;
-
-       assert(size != 0);
-
-#ifdef _WIN32
-       /*
-        * If VirtualAlloc can't allocate at the given address when one is
-        * given, it fails and returns NULL.
-        */
-       ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
-           PAGE_READWRITE);
-#else
-       /*
-        * We don't use MAP_FIXED here, because it can cause the *replacement*
-        * of existing mappings, and we only want to create new mappings.
-        */
-       ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
-           -1, 0);
-       assert(ret != NULL);
-
-       if (ret == MAP_FAILED)
-               ret = NULL;
-       else if (addr != NULL && ret != addr) {
-               /*
-                * We succeeded in mapping memory, but not in the right place.
-                */
-               if (munmap(ret, size) == -1) {
-                       char buf[BUFERROR_BUF];
-
-                       buferror(get_errno(), buf, sizeof(buf));
-                       malloc_printf("<jemalloc: Error in munmap(): %s\n",
-                           buf);
-                       if (opt_abort)
-                               abort();
-               }
-               ret = NULL;
-       }
-#endif
-       assert(ret == NULL || (addr == NULL && ret != addr)
-           || (addr != NULL && ret == addr));
-       return (ret);
-}
-
-static void
-pages_unmap(void *addr, size_t size)
-{
-
-#ifdef _WIN32
-       if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
-#else
-       if (munmap(addr, size) == -1)
-#endif
-       {
-               char buf[BUFERROR_BUF];
-
-               buferror(get_errno(), buf, sizeof(buf));
-               malloc_printf("<jemalloc>: Error in "
-#ifdef _WIN32
-                             "VirtualFree"
-#else
-                             "munmap"
-#endif
-                             "(): %s\n", buf);
-               if (opt_abort)
-                       abort();
-       }
-}
-
-static void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
-{
-       void *ret = (void *)((uintptr_t)addr + leadsize);
-
-       assert(alloc_size >= leadsize + size);
-#ifdef _WIN32
-       {
-               void *new_addr;
-
-               pages_unmap(addr, alloc_size);
-               new_addr = pages_map(ret, size);
-               if (new_addr == ret)
-                       return (ret);
-               if (new_addr)
-                       pages_unmap(new_addr, size);
-               return (NULL);
-       }
-#else
-       {
-               size_t trailsize = alloc_size - leadsize - size;
-
-               if (leadsize != 0)
-                       pages_unmap(addr, leadsize);
-               if (trailsize != 0)
-                       pages_unmap((void *)((uintptr_t)ret + size), trailsize);
-               return (ret);
-       }
-#endif
-}
-
-bool
-pages_purge(void *addr, size_t length)
-{
-       bool unzeroed;
-
-#ifdef _WIN32
-       VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
-       unzeroed = true;
-#elif defined(JEMALLOC_HAVE_MADVISE)
-#  ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
-#    define JEMALLOC_MADV_PURGE MADV_DONTNEED
-#    define JEMALLOC_MADV_ZEROS true
-#  elif defined(JEMALLOC_PURGE_MADVISE_FREE)
-#    define JEMALLOC_MADV_PURGE MADV_FREE
-#    define JEMALLOC_MADV_ZEROS false
-#  else
-#    error "No madvise(2) flag defined for purging unused dirty pages."
-#  endif
-       int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
-       unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
-#  undef JEMALLOC_MADV_PURGE
-#  undef JEMALLOC_MADV_ZEROS
-#else
-       /* Last resort no-op. */
-       unzeroed = true;
-#endif
-       return (unzeroed);
-}
-
-static void *
-chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
-{
-       void *ret, *pages;
-       size_t alloc_size, leadsize;
+       size_t alloc_size;
 
        alloc_size = size + alignment - PAGE;
        /* Beware size_t wrap-around. */
        if (alloc_size < size)
                return (NULL);
        do {
+               void *pages;
+               size_t leadsize;
                pages = pages_map(NULL, alloc_size);
                if (pages == NULL)
                        return (NULL);
@@ -163,11 +26,14 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
 
        assert(ret != NULL);
        *zero = true;
+       if (!*commit)
+               *commit = pages_decommit(ret, size);
        return (ret);
 }
 
 void *
-chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
+chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
+    bool *commit)
 {
        void *ret;
        size_t offset;
@@ -188,17 +54,20 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
        assert(alignment != 0);
        assert((alignment & chunksize_mask) == 0);
 
-       ret = pages_map(NULL, size);
-       if (ret == NULL)
-               return (NULL);
+       ret = pages_map(new_addr, size);
+       if (ret == NULL || ret == new_addr)
+               return (ret);
+       assert(new_addr == NULL);
        offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
        if (offset != 0) {
                pages_unmap(ret, size);
-               return (chunk_alloc_mmap_slow(size, alignment, zero));
+               return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
        }
 
        assert(ret != NULL);
        *zero = true;
+       if (!*commit)
+               *commit = pages_decommit(ret, size);
        return (ret);
 }
 
index 3a545966abbf3c03e50bf8bf63f896ba34696d0c..3b423aa22a3e56e7b00a8a02dfb90c829c3dec5d 100644 (file)
@@ -99,7 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
         * Cycle through the cells in the bucket, starting at a random position.
         * The randomness avoids worst-case search overhead as buckets fill up.
         */
-       prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
+       offset = (unsigned)prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS);
        for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
                cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
                    ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
@@ -141,7 +141,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
                 * were an item for which both hashes indicated the same
                 * bucket.
                 */
-               prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
+               i = (unsigned)prng_lg_range(&ckh->prng_state,
+                   LG_CKH_BUCKET_CELLS);
                cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
                assert(cell->key != NULL);
 
@@ -247,8 +248,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
 {
        bool ret;
        ckhc_t *tab, *ttab;
-       size_t lg_curcells;
-       unsigned lg_prevbuckets;
+       unsigned lg_prevbuckets, lg_curcells;
 
 #ifdef CKH_COUNT
        ckh->ngrows++;
@@ -266,11 +266,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
 
                lg_curcells++;
                usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
-               if (usize == 0) {
+               if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
                        ret = true;
                        goto label_return;
                }
-               tab = (ckhc_t *)ipalloc(tsd, usize, CACHELINE, true);
+               tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
+                   true, NULL);
                if (tab == NULL) {
                        ret = true;
                        goto label_return;
@@ -282,12 +283,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
                ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
 
                if (!ckh_rebuild(ckh, tab)) {
-                       idalloc(tsd, tab);
+                       idalloctm(tsd, tab, tcache_get(tsd, false), true, true);
                        break;
                }
 
                /* Rebuilding failed, so back out partially rebuilt table. */
-               idalloc(tsd, ckh->tab);
+               idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true);
                ckh->tab = tab;
                ckh->lg_curbuckets = lg_prevbuckets;
        }
@@ -301,8 +302,8 @@ static void
 ckh_shrink(tsd_t *tsd, ckh_t *ckh)
 {
        ckhc_t *tab, *ttab;
-       size_t lg_curcells, usize;
-       unsigned lg_prevbuckets;
+       size_t usize;
+       unsigned lg_prevbuckets, lg_curcells;
 
        /*
         * It is possible (though unlikely, given well behaved hashes) that the
@@ -311,9 +312,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
        lg_prevbuckets = ckh->lg_curbuckets;
        lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
        usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
-       if (usize == 0)
+       if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
                return;
-       tab = (ckhc_t *)ipalloc(tsd, usize, CACHELINE, true);
+       tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+           NULL);
        if (tab == NULL) {
                /*
                 * An OOM error isn't worth propagating, since it doesn't
@@ -328,7 +330,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
        ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
 
        if (!ckh_rebuild(ckh, tab)) {
-               idalloc(tsd, tab);
+               idalloctm(tsd, tab, tcache_get(tsd, false), true, true);
 #ifdef CKH_COUNT
                ckh->nshrinks++;
 #endif
@@ -336,7 +338,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
        }
 
        /* Rebuilding failed, so back out partially rebuilt table. */
-       idalloc(tsd, ckh->tab);
+       idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true);
        ckh->tab = tab;
        ckh->lg_curbuckets = lg_prevbuckets;
 #ifdef CKH_COUNT
@@ -367,10 +369,10 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
        ckh->count = 0;
 
        /*
-        * Find the minimum power of 2 that is large enough to fit aBaseCount
+        * Find the minimum power of 2 that is large enough to fit minitems
         * entries.  We are using (2+,2) cuckoo hashing, which has an expected
         * maximum load factor of at least ~0.86, so 0.75 is a conservative load
-        * factor that will typically allow 2^aLgMinItems to fit without ever
+        * factor that will typically allow mincells items to fit without ever
         * growing the table.
         */
        assert(LG_CKH_BUCKET_CELLS > 0);
@@ -385,11 +387,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
        ckh->keycomp = keycomp;
 
        usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
-       if (usize == 0) {
+       if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
                ret = true;
                goto label_return;
        }
-       ckh->tab = (ckhc_t *)ipalloc(tsd, usize, CACHELINE, true);
+       ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+           NULL);
        if (ckh->tab == NULL) {
                ret = true;
                goto label_return;
@@ -408,9 +411,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
 
 #ifdef CKH_VERBOSE
        malloc_printf(
-           "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
-           " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
-           " nrelocs: %"PRIu64"\n", __func__, ckh,
+           "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
+           " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
+           " nrelocs: %"FMTu64"\n", __func__, ckh,
            (unsigned long long)ckh->ngrows,
            (unsigned long long)ckh->nshrinks,
            (unsigned long long)ckh->nshrinkfails,
@@ -418,7 +421,7 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
            (unsigned long long)ckh->nrelocs);
 #endif
 
-       idalloc(tsd, ckh->tab);
+       idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true);
        if (config_debug)
                memset(ckh, 0x5a, sizeof(ckh_t));
 }
index 309f1f65d9858cf451d53bedcb86785e36052f79..17bd0719735fbd264f8f3a0f891d84fc71323a63 100644 (file)
@@ -16,22 +16,22 @@ static ctl_stats_t  ctl_stats;
 /******************************************************************************/
 /* Helpers for named and indexed nodes. */
 
-static inline const ctl_named_node_t *
+JEMALLOC_INLINE_C const ctl_named_node_t *
 ctl_named_node(const ctl_node_t *node)
 {
 
        return ((node->named) ? (const ctl_named_node_t *)node : NULL);
 }
 
-static inline const ctl_named_node_t *
-ctl_named_children(const ctl_named_node_t *node, int index)
+JEMALLOC_INLINE_C const ctl_named_node_t *
+ctl_named_children(const ctl_named_node_t *node, size_t index)
 {
        const ctl_named_node_t *children = ctl_named_node(node->children);
 
        return (children ? &children[index] : NULL);
 }
 
-static inline const ctl_indexed_node_t *
+JEMALLOC_INLINE_C const ctl_indexed_node_t *
 ctl_indexed_node(const ctl_node_t *node)
 {
 
@@ -73,9 +73,11 @@ CTL_PROTO(thread_allocated)
 CTL_PROTO(thread_allocatedp)
 CTL_PROTO(thread_deallocated)
 CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(config_cache_oblivious)
 CTL_PROTO(config_debug)
 CTL_PROTO(config_fill)
 CTL_PROTO(config_lazy_lock)
+CTL_PROTO(config_malloc_conf)
 CTL_PROTO(config_munmap)
 CTL_PROTO(config_prof)
 CTL_PROTO(config_prof_libgcc)
@@ -90,7 +92,9 @@ CTL_PROTO(opt_abort)
 CTL_PROTO(opt_dss)
 CTL_PROTO(opt_lg_chunk)
 CTL_PROTO(opt_narenas)
+CTL_PROTO(opt_purge)
 CTL_PROTO(opt_lg_dirty_mult)
+CTL_PROTO(opt_decay_time)
 CTL_PROTO(opt_stats_print)
 CTL_PROTO(opt_junk)
 CTL_PROTO(opt_zero)
@@ -110,11 +114,16 @@ CTL_PROTO(opt_prof_gdump)
 CTL_PROTO(opt_prof_final)
 CTL_PROTO(opt_prof_leak)
 CTL_PROTO(opt_prof_accum)
+CTL_PROTO(tcache_create)
+CTL_PROTO(tcache_flush)
+CTL_PROTO(tcache_destroy)
+static void    arena_i_purge(unsigned arena_ind, bool all);
 CTL_PROTO(arena_i_purge)
-static void    arena_purge(unsigned arena_ind);
+CTL_PROTO(arena_i_decay)
 CTL_PROTO(arena_i_dss)
-CTL_PROTO(arena_i_chunk_alloc)
-CTL_PROTO(arena_i_chunk_dalloc)
+CTL_PROTO(arena_i_lg_dirty_mult)
+CTL_PROTO(arena_i_decay_time)
+CTL_PROTO(arena_i_chunk_hooks)
 INDEX_PROTO(arena_i)
 CTL_PROTO(arenas_bin_i_size)
 CTL_PROTO(arenas_bin_i_nregs)
@@ -122,24 +131,27 @@ CTL_PROTO(arenas_bin_i_run_size)
 INDEX_PROTO(arenas_bin_i)
 CTL_PROTO(arenas_lrun_i_size)
 INDEX_PROTO(arenas_lrun_i)
+CTL_PROTO(arenas_hchunk_i_size)
+INDEX_PROTO(arenas_hchunk_i)
 CTL_PROTO(arenas_narenas)
 CTL_PROTO(arenas_initialized)
+CTL_PROTO(arenas_lg_dirty_mult)
+CTL_PROTO(arenas_decay_time)
 CTL_PROTO(arenas_quantum)
 CTL_PROTO(arenas_page)
 CTL_PROTO(arenas_tcache_max)
 CTL_PROTO(arenas_nbins)
 CTL_PROTO(arenas_nhbins)
 CTL_PROTO(arenas_nlruns)
+CTL_PROTO(arenas_nhchunks)
 CTL_PROTO(arenas_extend)
 CTL_PROTO(prof_thread_active_init)
 CTL_PROTO(prof_active)
 CTL_PROTO(prof_dump)
+CTL_PROTO(prof_gdump)
 CTL_PROTO(prof_reset)
 CTL_PROTO(prof_interval)
 CTL_PROTO(lg_prof_sample)
-CTL_PROTO(stats_chunks_current)
-CTL_PROTO(stats_chunks_total)
-CTL_PROTO(stats_chunks_high)
 CTL_PROTO(stats_arenas_i_small_allocated)
 CTL_PROTO(stats_arenas_i_small_nmalloc)
 CTL_PROTO(stats_arenas_i_small_ndalloc)
@@ -152,10 +164,10 @@ CTL_PROTO(stats_arenas_i_huge_allocated)
 CTL_PROTO(stats_arenas_i_huge_nmalloc)
 CTL_PROTO(stats_arenas_i_huge_ndalloc)
 CTL_PROTO(stats_arenas_i_huge_nrequests)
-CTL_PROTO(stats_arenas_i_bins_j_allocated)
 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_curregs)
 CTL_PROTO(stats_arenas_i_bins_j_nfills)
 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
 CTL_PROTO(stats_arenas_i_bins_j_nruns)
@@ -167,18 +179,29 @@ CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
 CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
 CTL_PROTO(stats_arenas_i_lruns_j_curruns)
 INDEX_PROTO(stats_arenas_i_lruns_j)
+CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
+CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
+INDEX_PROTO(stats_arenas_i_hchunks_j)
 CTL_PROTO(stats_arenas_i_nthreads)
 CTL_PROTO(stats_arenas_i_dss)
+CTL_PROTO(stats_arenas_i_lg_dirty_mult)
+CTL_PROTO(stats_arenas_i_decay_time)
 CTL_PROTO(stats_arenas_i_pactive)
 CTL_PROTO(stats_arenas_i_pdirty)
 CTL_PROTO(stats_arenas_i_mapped)
 CTL_PROTO(stats_arenas_i_npurge)
 CTL_PROTO(stats_arenas_i_nmadvise)
 CTL_PROTO(stats_arenas_i_purged)
+CTL_PROTO(stats_arenas_i_metadata_mapped)
+CTL_PROTO(stats_arenas_i_metadata_allocated)
 INDEX_PROTO(stats_arenas_i)
 CTL_PROTO(stats_cactive)
 CTL_PROTO(stats_allocated)
 CTL_PROTO(stats_active)
+CTL_PROTO(stats_metadata)
+CTL_PROTO(stats_resident)
 CTL_PROTO(stats_mapped)
 
 /******************************************************************************/
@@ -221,60 +244,68 @@ static const ctl_named_node_t     thread_node[] = {
 };
 
 static const ctl_named_node_t  config_node[] = {
-       {NAME("debug"),                 CTL(config_debug)},
-       {NAME("fill"),                  CTL(config_fill)},
-       {NAME("lazy_lock"),             CTL(config_lazy_lock)},
-       {NAME("munmap"),                CTL(config_munmap)},
-       {NAME("prof"),                  CTL(config_prof)},
-       {NAME("prof_libgcc"),           CTL(config_prof_libgcc)},
-       {NAME("prof_libunwind"),        CTL(config_prof_libunwind)},
-       {NAME("stats"),                 CTL(config_stats)},
-       {NAME("tcache"),                CTL(config_tcache)},
-       {NAME("tls"),                   CTL(config_tls)},
-       {NAME("utrace"),                CTL(config_utrace)},
-       {NAME("valgrind"),              CTL(config_valgrind)},
-       {NAME("xmalloc"),               CTL(config_xmalloc)}
+       {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
+       {NAME("debug"),         CTL(config_debug)},
+       {NAME("fill"),          CTL(config_fill)},
+       {NAME("lazy_lock"),     CTL(config_lazy_lock)},
+       {NAME("malloc_conf"),   CTL(config_malloc_conf)},
+       {NAME("munmap"),        CTL(config_munmap)},
+       {NAME("prof"),          CTL(config_prof)},
+       {NAME("prof_libgcc"),   CTL(config_prof_libgcc)},
+       {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
+       {NAME("stats"),         CTL(config_stats)},
+       {NAME("tcache"),        CTL(config_tcache)},
+       {NAME("tls"),           CTL(config_tls)},
+       {NAME("utrace"),        CTL(config_utrace)},
+       {NAME("valgrind"),      CTL(config_valgrind)},
+       {NAME("xmalloc"),       CTL(config_xmalloc)}
 };
 
 static const ctl_named_node_t opt_node[] = {
-       {NAME("abort"),                 CTL(opt_abort)},
-       {NAME("dss"),                   CTL(opt_dss)},
-       {NAME("lg_chunk"),              CTL(opt_lg_chunk)},
-       {NAME("narenas"),               CTL(opt_narenas)},
-       {NAME("lg_dirty_mult"),         CTL(opt_lg_dirty_mult)},
-       {NAME("stats_print"),           CTL(opt_stats_print)},
-       {NAME("junk"),                  CTL(opt_junk)},
-       {NAME("zero"),                  CTL(opt_zero)},
-       {NAME("quarantine"),            CTL(opt_quarantine)},
-       {NAME("redzone"),               CTL(opt_redzone)},
-       {NAME("utrace"),                CTL(opt_utrace)},
-       {NAME("xmalloc"),               CTL(opt_xmalloc)},
-       {NAME("tcache"),                CTL(opt_tcache)},
-       {NAME("lg_tcache_max"),         CTL(opt_lg_tcache_max)},
-       {NAME("prof"),                  CTL(opt_prof)},
-       {NAME("prof_prefix"),           CTL(opt_prof_prefix)},
-       {NAME("prof_active"),           CTL(opt_prof_active)},
+       {NAME("abort"),         CTL(opt_abort)},
+       {NAME("dss"),           CTL(opt_dss)},
+       {NAME("lg_chunk"),      CTL(opt_lg_chunk)},
+       {NAME("narenas"),       CTL(opt_narenas)},
+       {NAME("purge"),         CTL(opt_purge)},
+       {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
+       {NAME("decay_time"),    CTL(opt_decay_time)},
+       {NAME("stats_print"),   CTL(opt_stats_print)},
+       {NAME("junk"),          CTL(opt_junk)},
+       {NAME("zero"),          CTL(opt_zero)},
+       {NAME("quarantine"),    CTL(opt_quarantine)},
+       {NAME("redzone"),       CTL(opt_redzone)},
+       {NAME("utrace"),        CTL(opt_utrace)},
+       {NAME("xmalloc"),       CTL(opt_xmalloc)},
+       {NAME("tcache"),        CTL(opt_tcache)},
+       {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
+       {NAME("prof"),          CTL(opt_prof)},
+       {NAME("prof_prefix"),   CTL(opt_prof_prefix)},
+       {NAME("prof_active"),   CTL(opt_prof_active)},
        {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
-       {NAME("lg_prof_sample"),        CTL(opt_lg_prof_sample)},
-       {NAME("lg_prof_interval"),      CTL(opt_lg_prof_interval)},
-       {NAME("prof_gdump"),            CTL(opt_prof_gdump)},
-       {NAME("prof_final"),            CTL(opt_prof_final)},
-       {NAME("prof_leak"),             CTL(opt_prof_leak)},
-       {NAME("prof_accum"),            CTL(opt_prof_accum)}
+       {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
+       {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
+       {NAME("prof_gdump"),    CTL(opt_prof_gdump)},
+       {NAME("prof_final"),    CTL(opt_prof_final)},
+       {NAME("prof_leak"),     CTL(opt_prof_leak)},
+       {NAME("prof_accum"),    CTL(opt_prof_accum)}
 };
 
-static const ctl_named_node_t chunk_node[] = {
-       {NAME("alloc"),                 CTL(arena_i_chunk_alloc)},
-       {NAME("dalloc"),                CTL(arena_i_chunk_dalloc)}
+static const ctl_named_node_t  tcache_node[] = {
+       {NAME("create"),        CTL(tcache_create)},
+       {NAME("flush"),         CTL(tcache_flush)},
+       {NAME("destroy"),       CTL(tcache_destroy)}
 };
 
 static const ctl_named_node_t arena_i_node[] = {
-       {NAME("purge"),                 CTL(arena_i_purge)},
-       {NAME("dss"),                   CTL(arena_i_dss)},
-       {NAME("chunk"),                 CHILD(named, chunk)},
+       {NAME("purge"),         CTL(arena_i_purge)},
+       {NAME("decay"),         CTL(arena_i_decay)},
+       {NAME("dss"),           CTL(arena_i_dss)},
+       {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
+       {NAME("decay_time"),    CTL(arena_i_decay_time)},
+       {NAME("chunk_hooks"),   CTL(arena_i_chunk_hooks)}
 };
 static const ctl_named_node_t super_arena_i_node[] = {
-       {NAME(""),                      CHILD(named, arena_i)}
+       {NAME(""),              CHILD(named, arena_i)}
 };
 
 static const ctl_indexed_node_t arena_node[] = {
@@ -282,12 +313,12 @@ static const ctl_indexed_node_t arena_node[] = {
 };
 
 static const ctl_named_node_t arenas_bin_i_node[] = {
-       {NAME("size"),                  CTL(arenas_bin_i_size)},
-       {NAME("nregs"),                 CTL(arenas_bin_i_nregs)},
-       {NAME("run_size"),              CTL(arenas_bin_i_run_size)}
+       {NAME("size"),          CTL(arenas_bin_i_size)},
+       {NAME("nregs"),         CTL(arenas_bin_i_nregs)},
+       {NAME("run_size"),      CTL(arenas_bin_i_run_size)}
 };
 static const ctl_named_node_t super_arenas_bin_i_node[] = {
-       {NAME(""),                      CHILD(named, arenas_bin_i)}
+       {NAME(""),              CHILD(named, arenas_bin_i)}
 };
 
 static const ctl_indexed_node_t arenas_bin_node[] = {
@@ -295,79 +326,94 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
 };
 
 static const ctl_named_node_t arenas_lrun_i_node[] = {
-       {NAME("size"),                  CTL(arenas_lrun_i_size)}
+       {NAME("size"),          CTL(arenas_lrun_i_size)}
 };
 static const ctl_named_node_t super_arenas_lrun_i_node[] = {
-       {NAME(""),                      CHILD(named, arenas_lrun_i)}
+       {NAME(""),              CHILD(named, arenas_lrun_i)}
 };
 
 static const ctl_indexed_node_t arenas_lrun_node[] = {
        {INDEX(arenas_lrun_i)}
 };
 
+static const ctl_named_node_t arenas_hchunk_i_node[] = {
+       {NAME("size"),          CTL(arenas_hchunk_i_size)}
+};
+static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
+       {NAME(""),              CHILD(named, arenas_hchunk_i)}
+};
+
+static const ctl_indexed_node_t arenas_hchunk_node[] = {
+       {INDEX(arenas_hchunk_i)}
+};
+
 static const ctl_named_node_t arenas_node[] = {
-       {NAME("narenas"),               CTL(arenas_narenas)},
-       {NAME("initialized"),           CTL(arenas_initialized)},
-       {NAME("quantum"),               CTL(arenas_quantum)},
-       {NAME("page"),                  CTL(arenas_page)},
-       {NAME("tcache_max"),            CTL(arenas_tcache_max)},
-       {NAME("nbins"),                 CTL(arenas_nbins)},
-       {NAME("nhbins"),                CTL(arenas_nhbins)},
-       {NAME("bin"),                   CHILD(indexed, arenas_bin)},
-       {NAME("nlruns"),                CTL(arenas_nlruns)},
-       {NAME("lrun"),                  CHILD(indexed, arenas_lrun)},
-       {NAME("extend"),                CTL(arenas_extend)}
+       {NAME("narenas"),       CTL(arenas_narenas)},
+       {NAME("initialized"),   CTL(arenas_initialized)},
+       {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
+       {NAME("decay_time"),    CTL(arenas_decay_time)},
+       {NAME("quantum"),       CTL(arenas_quantum)},
+       {NAME("page"),          CTL(arenas_page)},
+       {NAME("tcache_max"),    CTL(arenas_tcache_max)},
+       {NAME("nbins"),         CTL(arenas_nbins)},
+       {NAME("nhbins"),        CTL(arenas_nhbins)},
+       {NAME("bin"),           CHILD(indexed, arenas_bin)},
+       {NAME("nlruns"),        CTL(arenas_nlruns)},
+       {NAME("lrun"),          CHILD(indexed, arenas_lrun)},
+       {NAME("nhchunks"),      CTL(arenas_nhchunks)},
+       {NAME("hchunk"),        CHILD(indexed, arenas_hchunk)},
+       {NAME("extend"),        CTL(arenas_extend)}
 };
 
 static const ctl_named_node_t  prof_node[] = {
        {NAME("thread_active_init"), CTL(prof_thread_active_init)},
        {NAME("active"),        CTL(prof_active)},
        {NAME("dump"),          CTL(prof_dump)},
+       {NAME("gdump"),         CTL(prof_gdump)},
        {NAME("reset"),         CTL(prof_reset)},
        {NAME("interval"),      CTL(prof_interval)},
        {NAME("lg_sample"),     CTL(lg_prof_sample)}
 };
 
-static const ctl_named_node_t stats_chunks_node[] = {
-       {NAME("current"),               CTL(stats_chunks_current)},
-       {NAME("total"),                 CTL(stats_chunks_total)},
-       {NAME("high"),                  CTL(stats_chunks_high)}
+static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
+       {NAME("mapped"),        CTL(stats_arenas_i_metadata_mapped)},
+       {NAME("allocated"),     CTL(stats_arenas_i_metadata_allocated)}
 };
 
 static const ctl_named_node_t stats_arenas_i_small_node[] = {
-       {NAME("allocated"),             CTL(stats_arenas_i_small_allocated)},
-       {NAME("nmalloc"),               CTL(stats_arenas_i_small_nmalloc)},
-       {NAME("ndalloc"),               CTL(stats_arenas_i_small_ndalloc)},
-       {NAME("nrequests"),             CTL(stats_arenas_i_small_nrequests)}
+       {NAME("allocated"),     CTL(stats_arenas_i_small_allocated)},
+       {NAME("nmalloc"),       CTL(stats_arenas_i_small_nmalloc)},
+       {NAME("ndalloc"),       CTL(stats_arenas_i_small_ndalloc)},
+       {NAME("nrequests"),     CTL(stats_arenas_i_small_nrequests)}
 };
 
 static const ctl_named_node_t stats_arenas_i_large_node[] = {
-       {NAME("allocated"),             CTL(stats_arenas_i_large_allocated)},
-       {NAME("nmalloc"),               CTL(stats_arenas_i_large_nmalloc)},
-       {NAME("ndalloc"),               CTL(stats_arenas_i_large_ndalloc)},
-       {NAME("nrequests"),             CTL(stats_arenas_i_large_nrequests)}
+       {NAME("allocated"),     CTL(stats_arenas_i_large_allocated)},
+       {NAME("nmalloc"),       CTL(stats_arenas_i_large_nmalloc)},
+       {NAME("ndalloc"),       CTL(stats_arenas_i_large_ndalloc)},
+       {NAME("nrequests"),     CTL(stats_arenas_i_large_nrequests)}
 };
 
 static const ctl_named_node_t stats_arenas_i_huge_node[] = {
-       {NAME("allocated"),             CTL(stats_arenas_i_huge_allocated)},
-       {NAME("nmalloc"),               CTL(stats_arenas_i_huge_nmalloc)},
-       {NAME("ndalloc"),               CTL(stats_arenas_i_huge_ndalloc)},
-       {NAME("nrequests"),             CTL(stats_arenas_i_huge_nrequests)},
+       {NAME("allocated"),     CTL(stats_arenas_i_huge_allocated)},
+       {NAME("nmalloc"),       CTL(stats_arenas_i_huge_nmalloc)},
+       {NAME("ndalloc"),       CTL(stats_arenas_i_huge_ndalloc)},
+       {NAME("nrequests"),     CTL(stats_arenas_i_huge_nrequests)}
 };
 
 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
-       {NAME("allocated"),             CTL(stats_arenas_i_bins_j_allocated)},
-       {NAME("nmalloc"),               CTL(stats_arenas_i_bins_j_nmalloc)},
-       {NAME("ndalloc"),               CTL(stats_arenas_i_bins_j_ndalloc)},
-       {NAME("nrequests"),             CTL(stats_arenas_i_bins_j_nrequests)},
-       {NAME("nfills"),                CTL(stats_arenas_i_bins_j_nfills)},
-       {NAME("nflushes"),              CTL(stats_arenas_i_bins_j_nflushes)},
-       {NAME("nruns"),                 CTL(stats_arenas_i_bins_j_nruns)},
-       {NAME("nreruns"),               CTL(stats_arenas_i_bins_j_nreruns)},
-       {NAME("curruns"),               CTL(stats_arenas_i_bins_j_curruns)}
+       {NAME("nmalloc"),       CTL(stats_arenas_i_bins_j_nmalloc)},
+       {NAME("ndalloc"),       CTL(stats_arenas_i_bins_j_ndalloc)},
+       {NAME("nrequests"),     CTL(stats_arenas_i_bins_j_nrequests)},
+       {NAME("curregs"),       CTL(stats_arenas_i_bins_j_curregs)},
+       {NAME("nfills"),        CTL(stats_arenas_i_bins_j_nfills)},
+       {NAME("nflushes"),      CTL(stats_arenas_i_bins_j_nflushes)},
+       {NAME("nruns"),         CTL(stats_arenas_i_bins_j_nruns)},
+       {NAME("nreruns"),       CTL(stats_arenas_i_bins_j_nreruns)},
+       {NAME("curruns"),       CTL(stats_arenas_i_bins_j_curruns)}
 };
 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
-       {NAME(""),                      CHILD(named, stats_arenas_i_bins_j)}
+       {NAME(""),              CHILD(named, stats_arenas_i_bins_j)}
 };
 
 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
@@ -375,36 +421,54 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
 };
 
 static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
-       {NAME("nmalloc"),               CTL(stats_arenas_i_lruns_j_nmalloc)},
-       {NAME("ndalloc"),               CTL(stats_arenas_i_lruns_j_ndalloc)},
-       {NAME("nrequests"),             CTL(stats_arenas_i_lruns_j_nrequests)},
-       {NAME("curruns"),               CTL(stats_arenas_i_lruns_j_curruns)}
+       {NAME("nmalloc"),       CTL(stats_arenas_i_lruns_j_nmalloc)},
+       {NAME("ndalloc"),       CTL(stats_arenas_i_lruns_j_ndalloc)},
+       {NAME("nrequests"),     CTL(stats_arenas_i_lruns_j_nrequests)},
+       {NAME("curruns"),       CTL(stats_arenas_i_lruns_j_curruns)}
 };
 static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
-       {NAME(""),                      CHILD(named, stats_arenas_i_lruns_j)}
+       {NAME(""),              CHILD(named, stats_arenas_i_lruns_j)}
 };
 
 static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
        {INDEX(stats_arenas_i_lruns_j)}
 };
 
+static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
+       {NAME("nmalloc"),       CTL(stats_arenas_i_hchunks_j_nmalloc)},
+       {NAME("ndalloc"),       CTL(stats_arenas_i_hchunks_j_ndalloc)},
+       {NAME("nrequests"),     CTL(stats_arenas_i_hchunks_j_nrequests)},
+       {NAME("curhchunks"),    CTL(stats_arenas_i_hchunks_j_curhchunks)}
+};
+static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
+       {NAME(""),              CHILD(named, stats_arenas_i_hchunks_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
+       {INDEX(stats_arenas_i_hchunks_j)}
+};
+
 static const ctl_named_node_t stats_arenas_i_node[] = {
-       {NAME("nthreads"),              CTL(stats_arenas_i_nthreads)},
-       {NAME("dss"),                   CTL(stats_arenas_i_dss)},
-       {NAME("pactive"),               CTL(stats_arenas_i_pactive)},
-       {NAME("pdirty"),                CTL(stats_arenas_i_pdirty)},
-       {NAME("mapped"),                CTL(stats_arenas_i_mapped)},
-       {NAME("npurge"),                CTL(stats_arenas_i_npurge)},
-       {NAME("nmadvise"),              CTL(stats_arenas_i_nmadvise)},
-       {NAME("purged"),                CTL(stats_arenas_i_purged)},
-       {NAME("small"),                 CHILD(named, stats_arenas_i_small)},
-       {NAME("large"),                 CHILD(named, stats_arenas_i_large)},
-       {NAME("huge"),                  CHILD(named, stats_arenas_i_huge)},
-       {NAME("bins"),                  CHILD(indexed, stats_arenas_i_bins)},
-       {NAME("lruns"),                 CHILD(indexed, stats_arenas_i_lruns)}
+       {NAME("nthreads"),      CTL(stats_arenas_i_nthreads)},
+       {NAME("dss"),           CTL(stats_arenas_i_dss)},
+       {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
+       {NAME("decay_time"),    CTL(stats_arenas_i_decay_time)},
+       {NAME("pactive"),       CTL(stats_arenas_i_pactive)},
+       {NAME("pdirty"),        CTL(stats_arenas_i_pdirty)},
+       {NAME("mapped"),        CTL(stats_arenas_i_mapped)},
+       {NAME("npurge"),        CTL(stats_arenas_i_npurge)},
+       {NAME("nmadvise"),      CTL(stats_arenas_i_nmadvise)},
+       {NAME("purged"),        CTL(stats_arenas_i_purged)},
+       {NAME("metadata"),      CHILD(named, stats_arenas_i_metadata)},
+       {NAME("small"),         CHILD(named, stats_arenas_i_small)},
+       {NAME("large"),         CHILD(named, stats_arenas_i_large)},
+       {NAME("huge"),          CHILD(named, stats_arenas_i_huge)},
+       {NAME("bins"),          CHILD(indexed, stats_arenas_i_bins)},
+       {NAME("lruns"),         CHILD(indexed, stats_arenas_i_lruns)},
+       {NAME("hchunks"),       CHILD(indexed, stats_arenas_i_hchunks)}
 };
 static const ctl_named_node_t super_stats_arenas_i_node[] = {
-       {NAME(""),                      CHILD(named, stats_arenas_i)}
+       {NAME(""),              CHILD(named, stats_arenas_i)}
 };
 
 static const ctl_indexed_node_t stats_arenas_node[] = {
@@ -412,12 +476,13 @@ static const ctl_indexed_node_t stats_arenas_node[] = {
 };
 
 static const ctl_named_node_t stats_node[] = {
-       {NAME("cactive"),               CTL(stats_cactive)},
-       {NAME("allocated"),             CTL(stats_allocated)},
-       {NAME("active"),                CTL(stats_active)},
-       {NAME("mapped"),                CTL(stats_mapped)},
-       {NAME("chunks"),                CHILD(named, stats_chunks)},
-       {NAME("arenas"),                CHILD(indexed, stats_arenas)}
+       {NAME("cactive"),       CTL(stats_cactive)},
+       {NAME("allocated"),     CTL(stats_allocated)},
+       {NAME("active"),        CTL(stats_active)},
+       {NAME("metadata"),      CTL(stats_metadata)},
+       {NAME("resident"),      CTL(stats_resident)},
+       {NAME("mapped"),        CTL(stats_mapped)},
+       {NAME("arenas"),        CHILD(indexed, stats_arenas)}
 };
 
 static const ctl_named_node_t  root_node[] = {
@@ -426,6 +491,7 @@ static const ctl_named_node_t       root_node[] = {
        {NAME("thread"),        CHILD(named, thread)},
        {NAME("config"),        CHILD(named, config)},
        {NAME("opt"),           CHILD(named, opt)},
+       {NAME("tcache"),        CHILD(named, tcache)},
        {NAME("arena"),         CHILD(indexed, arena)},
        {NAME("arenas"),        CHILD(named, arenas)},
        {NAME("prof"),          CHILD(named, prof)},
@@ -447,12 +513,19 @@ ctl_arena_init(ctl_arena_stats_t *astats)
 {
 
        if (astats->lstats == NULL) {
-               astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
+               astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
                    sizeof(malloc_large_stats_t));
                if (astats->lstats == NULL)
                        return (true);
        }
 
+       if (astats->hstats == NULL) {
+               astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
+                   sizeof(malloc_huge_stats_t));
+               if (astats->hstats == NULL)
+                       return (true);
+       }
+
        return (false);
 }
 
@@ -460,7 +533,10 @@ static void
 ctl_arena_clear(ctl_arena_stats_t *astats)
 {
 
+       astats->nthreads = 0;
        astats->dss = dss_prec_names[dss_prec_limit];
+       astats->lg_dirty_mult = -1;
+       astats->decay_time = -1;
        astats->pactive = 0;
        astats->pdirty = 0;
        if (config_stats) {
@@ -472,6 +548,8 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
                memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
                memset(astats->lstats, 0, nlclasses *
                    sizeof(malloc_large_stats_t));
+               memset(astats->hstats, 0, nhclasses *
+                   sizeof(malloc_huge_stats_t));
        }
 }
 
@@ -480,14 +558,23 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
 {
        unsigned i;
 
-       arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
-           &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
-
-       for (i = 0; i < NBINS; i++) {
-               cstats->allocated_small += cstats->bstats[i].allocated;
-               cstats->nmalloc_small += cstats->bstats[i].nmalloc;
-               cstats->ndalloc_small += cstats->bstats[i].ndalloc;
-               cstats->nrequests_small += cstats->bstats[i].nrequests;
+       if (config_stats) {
+               arena_stats_merge(arena, &cstats->nthreads, &cstats->dss,
+                   &cstats->lg_dirty_mult, &cstats->decay_time,
+                   &cstats->pactive, &cstats->pdirty, &cstats->astats,
+                   cstats->bstats, cstats->lstats, cstats->hstats);
+
+               for (i = 0; i < NBINS; i++) {
+                       cstats->allocated_small += cstats->bstats[i].curregs *
+                           index2size(i);
+                       cstats->nmalloc_small += cstats->bstats[i].nmalloc;
+                       cstats->ndalloc_small += cstats->bstats[i].ndalloc;
+                       cstats->nrequests_small += cstats->bstats[i].nrequests;
+               }
+       } else {
+               arena_basic_stats_merge(arena, &cstats->nthreads, &cstats->dss,
+                   &cstats->lg_dirty_mult, &cstats->decay_time,
+                   &cstats->pactive, &cstats->pdirty);
        }
 }
 
@@ -496,49 +583,68 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
 {
        unsigned i;
 
+       sstats->nthreads += astats->nthreads;
        sstats->pactive += astats->pactive;
        sstats->pdirty += astats->pdirty;
 
-       sstats->astats.mapped += astats->astats.mapped;
-       sstats->astats.npurge += astats->astats.npurge;
-       sstats->astats.nmadvise += astats->astats.nmadvise;
-       sstats->astats.purged += astats->astats.purged;
-
-       sstats->allocated_small += astats->allocated_small;
-       sstats->nmalloc_small += astats->nmalloc_small;
-       sstats->ndalloc_small += astats->ndalloc_small;
-       sstats->nrequests_small += astats->nrequests_small;
-
-       sstats->astats.allocated_large += astats->astats.allocated_large;
-       sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
-       sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
-       sstats->astats.nrequests_large += astats->astats.nrequests_large;
-
-       sstats->astats.allocated_huge += astats->astats.allocated_huge;
-       sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
-       sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
-       sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
-
-       for (i = 0; i < nlclasses; i++) {
-               sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
-               sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
-               sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
-               sstats->lstats[i].curruns += astats->lstats[i].curruns;
-       }
+       if (config_stats) {
+               sstats->astats.mapped += astats->astats.mapped;
+               sstats->astats.npurge += astats->astats.npurge;
+               sstats->astats.nmadvise += astats->astats.nmadvise;
+               sstats->astats.purged += astats->astats.purged;
+
+               sstats->astats.metadata_mapped +=
+                   astats->astats.metadata_mapped;
+               sstats->astats.metadata_allocated +=
+                   astats->astats.metadata_allocated;
+
+               sstats->allocated_small += astats->allocated_small;
+               sstats->nmalloc_small += astats->nmalloc_small;
+               sstats->ndalloc_small += astats->ndalloc_small;
+               sstats->nrequests_small += astats->nrequests_small;
+
+               sstats->astats.allocated_large +=
+                   astats->astats.allocated_large;
+               sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+               sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+               sstats->astats.nrequests_large +=
+                   astats->astats.nrequests_large;
+
+               sstats->astats.allocated_huge += astats->astats.allocated_huge;
+               sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
+               sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
+
+               for (i = 0; i < NBINS; i++) {
+                       sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+                       sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+                       sstats->bstats[i].nrequests +=
+                           astats->bstats[i].nrequests;
+                       sstats->bstats[i].curregs += astats->bstats[i].curregs;
+                       if (config_tcache) {
+                               sstats->bstats[i].nfills +=
+                                   astats->bstats[i].nfills;
+                               sstats->bstats[i].nflushes +=
+                                   astats->bstats[i].nflushes;
+                       }
+                       sstats->bstats[i].nruns += astats->bstats[i].nruns;
+                       sstats->bstats[i].reruns += astats->bstats[i].reruns;
+                       sstats->bstats[i].curruns += astats->bstats[i].curruns;
+               }
 
-       for (i = 0; i < NBINS; i++) {
-               sstats->bstats[i].allocated += astats->bstats[i].allocated;
-               sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
-               sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
-               sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
-               if (config_tcache) {
-                       sstats->bstats[i].nfills += astats->bstats[i].nfills;
-                       sstats->bstats[i].nflushes +=
-                           astats->bstats[i].nflushes;
+               for (i = 0; i < nlclasses; i++) {
+                       sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
+                       sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
+                       sstats->lstats[i].nrequests +=
+                           astats->lstats[i].nrequests;
+                       sstats->lstats[i].curruns += astats->lstats[i].curruns;
+               }
+
+               for (i = 0; i < nhclasses; i++) {
+                       sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
+                       sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
+                       sstats->hstats[i].curhchunks +=
+                           astats->hstats[i].curhchunks;
                }
-               sstats->bstats[i].nruns += astats->bstats[i].nruns;
-               sstats->bstats[i].reruns += astats->bstats[i].reruns;
-               sstats->bstats[i].curruns += astats->bstats[i].curruns;
        }
 }
 
@@ -549,49 +655,32 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
        ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
 
        ctl_arena_clear(astats);
-
-       sstats->nthreads += astats->nthreads;
-       if (config_stats) {
-               ctl_arena_stats_amerge(astats, arena);
-               /* Merge into sum stats as well. */
-               ctl_arena_stats_smerge(sstats, astats);
-       } else {
-               astats->pactive += arena->nactive;
-               astats->pdirty += arena->ndirty;
-               /* Merge into sum stats as well. */
-               sstats->pactive += arena->nactive;
-               sstats->pdirty += arena->ndirty;
-       }
+       ctl_arena_stats_amerge(astats, arena);
+       /* Merge into sum stats as well. */
+       ctl_arena_stats_smerge(sstats, astats);
 }
 
 static bool
 ctl_grow(void)
 {
-       tsd_t *tsd;
        ctl_arena_stats_t *astats;
-       arena_t **tarenas;
 
-       tsd = tsd_fetch();
+       /* Initialize new arena. */
+       if (arena_init(ctl_stats.narenas) == NULL)
+               return (true);
 
-       /* Allocate extended arena stats and arenas arrays. */
-       astats = (ctl_arena_stats_t *)imalloc(tsd, (ctl_stats.narenas + 2) *
+       /* Allocate extended arena stats. */
+       astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
            sizeof(ctl_arena_stats_t));
        if (astats == NULL)
                return (true);
-       tarenas = (arena_t **)imalloc(tsd, (ctl_stats.narenas + 1) *
-           sizeof(arena_t *));
-       if (tarenas == NULL) {
-               idalloc(tsd, astats);
-               return (true);
-       }
 
        /* Initialize the new astats element. */
        memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
            sizeof(ctl_arena_stats_t));
        memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
        if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
-               idalloc(tsd, tarenas);
-               idalloc(tsd, astats);
+               a0dalloc(astats);
                return (true);
        }
        /* Swap merged stats to their new location. */
@@ -604,32 +693,7 @@ ctl_grow(void)
                memcpy(&astats[ctl_stats.narenas + 1], &tstats,
                    sizeof(ctl_arena_stats_t));
        }
-       /* Initialize the new arenas element. */
-       tarenas[ctl_stats.narenas] = NULL;
-       {
-               arena_t **arenas_old = arenas;
-               /*
-                * Swap extended arenas array into place.  Although ctl_mtx
-                * protects this function from other threads extending the
-                * array, it does not protect from other threads mutating it
-                * (i.e. initializing arenas and setting array elements to
-                * point to them).  Therefore, array copying must happen under
-                * the protection of arenas_lock.
-                */
-               malloc_mutex_lock(&arenas_lock);
-               arenas = tarenas;
-               memcpy(arenas, arenas_old, ctl_stats.narenas *
-                   sizeof(arena_t *));
-               narenas_total++;
-               arenas_extend(narenas_total - 1);
-               malloc_mutex_unlock(&arenas_lock);
-               /*
-                * Deallocate arenas_old only if it came from imalloc() (not
-                * base_alloc()).
-                */
-               if (ctl_stats.narenas != narenas_auto)
-                       idalloc(tsd, arenas_old);
-       }
+       a0dalloc(ctl_stats.arenas);
        ctl_stats.arenas = astats;
        ctl_stats.narenas++;
 
@@ -642,30 +706,15 @@ ctl_refresh(void)
        unsigned i;
        VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
 
-       if (config_stats) {
-               malloc_mutex_lock(&chunks_mtx);
-               ctl_stats.chunks.current = stats_chunks.curchunks;
-               ctl_stats.chunks.total = stats_chunks.nchunks;
-               ctl_stats.chunks.high = stats_chunks.highchunks;
-               malloc_mutex_unlock(&chunks_mtx);
-       }
-
        /*
         * Clear sum stats, since they will be merged into by
         * ctl_arena_refresh().
         */
-       ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
        ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
 
-       malloc_mutex_lock(&arenas_lock);
-       memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
-       for (i = 0; i < ctl_stats.narenas; i++) {
-               if (arenas[i] != NULL)
-                       ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
-               else
-                       ctl_stats.arenas[i].nthreads = 0;
-       }
-       malloc_mutex_unlock(&arenas_lock);
+       for (i = 0; i < ctl_stats.narenas; i++)
+               tarenas[i] = arena_get(i, false);
+
        for (i = 0; i < ctl_stats.narenas; i++) {
                bool initialized = (tarenas[i] != NULL);
 
@@ -675,13 +724,24 @@ ctl_refresh(void)
        }
 
        if (config_stats) {
+               size_t base_allocated, base_resident, base_mapped;
+               base_stats_get(&base_allocated, &base_resident, &base_mapped);
                ctl_stats.allocated =
-                   ctl_stats.arenas[ctl_stats.narenas].allocated_small
-                   + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
-                   ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
+                   ctl_stats.arenas[ctl_stats.narenas].allocated_small +
+                   ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
+                   ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
                ctl_stats.active =
                    (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
-               ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
+               ctl_stats.metadata = base_allocated +
+                   ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+                   ctl_stats.arenas[ctl_stats.narenas].astats
+                   .metadata_allocated;
+               ctl_stats.resident = base_resident +
+                   ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+                   ((ctl_stats.arenas[ctl_stats.narenas].pactive +
+                   ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
+               ctl_stats.mapped = base_mapped +
+                   ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
        }
 
        ctl_epoch++;
@@ -698,9 +758,8 @@ ctl_init(void)
                 * Allocate space for one extra arena stats element, which
                 * contains summed stats across all arenas.
                 */
-               assert(narenas_auto == narenas_total_get());
-               ctl_stats.narenas = narenas_auto;
-               ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
+               ctl_stats.narenas = narenas_total_get();
+               ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
                    (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
                if (ctl_stats.arenas == NULL) {
                        ret = true;
@@ -718,6 +777,15 @@ ctl_init(void)
                        unsigned i;
                        for (i = 0; i <= ctl_stats.narenas; i++) {
                                if (ctl_arena_init(&ctl_stats.arenas[i])) {
+                                       unsigned j;
+                                       for (j = 0; j < i; j++) {
+                                               a0dalloc(
+                                                   ctl_stats.arenas[j].lstats);
+                                               a0dalloc(
+                                                   ctl_stats.arenas[j].hstats);
+                                       }
+                                       a0dalloc(ctl_stats.arenas);
+                                       ctl_stats.arenas = NULL;
                                        ret = true;
                                        goto label_return;
                                }
@@ -900,7 +968,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
                assert(node->nchildren > 0);
                if (ctl_named_node(node->children) != NULL) {
                        /* Children are named. */
-                       if (node->nchildren <= mib[i]) {
+                       if (node->nchildren <= (unsigned)mib[i]) {
                                ret = ENOENT;
                                goto label_return;
                        }
@@ -996,8 +1064,8 @@ ctl_postfork_child(void)
                        memcpy(oldp, (void *)&(v), copylen);            \
                        ret = EINVAL;                                   \
                        goto label_return;                              \
-               } else                                                  \
-                       *(t *)oldp = (v);                               \
+               }                                                       \
+               *(t *)oldp = (v);                                       \
        }                                                               \
 } while (0)
 
@@ -1139,17 +1207,17 @@ label_return:                                                           \
        return (ret);                                                   \
 }
 
-#define        CTL_RO_BOOL_CONFIG_GEN(n)                                       \
+#define        CTL_RO_CONFIG_GEN(n, t)                                         \
 static int                                                             \
 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
     void *newp, size_t newlen)                                         \
 {                                                                      \
        int ret;                                                        \
-       bool oldval;                                                    \
+       t oldval;                                                       \
                                                                        \
        READONLY();                                                     \
        oldval = n;                                                     \
-       READ(oldval, bool);                                             \
+       READ(oldval, t);                                                \
                                                                        \
        ret = 0;                                                        \
 label_return:                                                          \
@@ -1181,29 +1249,33 @@ label_return:
 
 /******************************************************************************/
 
-CTL_RO_BOOL_CONFIG_GEN(config_debug)
-CTL_RO_BOOL_CONFIG_GEN(config_fill)
-CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
-CTL_RO_BOOL_CONFIG_GEN(config_munmap)
-CTL_RO_BOOL_CONFIG_GEN(config_prof)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
-CTL_RO_BOOL_CONFIG_GEN(config_stats)
-CTL_RO_BOOL_CONFIG_GEN(config_tcache)
-CTL_RO_BOOL_CONFIG_GEN(config_tls)
-CTL_RO_BOOL_CONFIG_GEN(config_utrace)
-CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
-CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
+CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
+CTL_RO_CONFIG_GEN(config_debug, bool)
+CTL_RO_CONFIG_GEN(config_fill, bool)
+CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
+CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
+CTL_RO_CONFIG_GEN(config_munmap, bool)
+CTL_RO_CONFIG_GEN(config_prof, bool)
+CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
+CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
+CTL_RO_CONFIG_GEN(config_stats, bool)
+CTL_RO_CONFIG_GEN(config_tcache, bool)
+CTL_RO_CONFIG_GEN(config_tls, bool)
+CTL_RO_CONFIG_GEN(config_utrace, bool)
+CTL_RO_CONFIG_GEN(config_valgrind, bool)
+CTL_RO_CONFIG_GEN(config_xmalloc, bool)
 
 /******************************************************************************/
 
 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
 CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
-CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
+CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
 CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
 CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
 CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
@@ -1231,16 +1303,20 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
 {
        int ret;
        tsd_t *tsd;
+       arena_t *oldarena;
        unsigned newind, oldind;
 
        tsd = tsd_fetch();
+       oldarena = arena_choose(tsd, NULL);
+       if (oldarena == NULL)
+               return (EAGAIN);
 
        malloc_mutex_lock(&ctl_mtx);
-       newind = oldind = choose_arena(tsd, NULL)->ind;
+       newind = oldind = oldarena->ind;
        WRITE(newind, unsigned);
        READ(oldind, unsigned);
        if (newind != oldind) {
-               arena_t *arena;
+               arena_t *newarena;
 
                if (newind >= ctl_stats.narenas) {
                        /* New arena index is out of range. */
@@ -1249,28 +1325,20 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
                }
 
                /* Initialize arena if necessary. */
-               malloc_mutex_lock(&arenas_lock);
-               if ((arena = arenas[newind]) == NULL && (arena =
-                   arenas_extend(newind)) == NULL) {
-                       malloc_mutex_unlock(&arenas_lock);
+               newarena = arena_get(newind, true);
+               if (newarena == NULL) {
                        ret = EAGAIN;
                        goto label_return;
                }
-               assert(arena == arenas[newind]);
-               arenas[oldind]->nthreads--;
-               arenas[newind]->nthreads++;
-               malloc_mutex_unlock(&arenas_lock);
-
-               /* Set new arena association. */
+               /* Set new arena/tcache associations. */
+               arena_migrate(tsd, oldind, newind);
                if (config_tcache) {
                        tcache_t *tcache = tsd_tcache_get(tsd);
                        if (tcache != NULL) {
-                               tcache_arena_dissociate(tcache);
-                               tcache_arena_associate(tcache, arena);
+                               tcache_arena_reassociate(tcache, oldarena,
+                                   newarena);
                        }
                }
-
-               tsd_arena_set(tsd, arena);
        }
 
        ret = 0;
@@ -1396,26 +1464,127 @@ label_return:
 
 /******************************************************************************/
 
-/* ctl_mutex must be held during execution of this function. */
+static int
+tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+       int ret;
+       tsd_t *tsd;
+       unsigned tcache_ind;
+
+       if (!config_tcache)
+               return (ENOENT);
+
+       tsd = tsd_fetch();
+
+       malloc_mutex_lock(&ctl_mtx);
+       READONLY();
+       if (tcaches_create(tsd, &tcache_ind)) {
+               ret = EFAULT;
+               goto label_return;
+       }
+       READ(tcache_ind, unsigned);
+
+       ret = 0;
+label_return:
+       malloc_mutex_unlock(&ctl_mtx);
+       return (ret);
+}
+
+static int
+tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+       int ret;
+       tsd_t *tsd;
+       unsigned tcache_ind;
+
+       if (!config_tcache)
+               return (ENOENT);
+
+       tsd = tsd_fetch();
+
+       WRITEONLY();
+       tcache_ind = UINT_MAX;
+       WRITE(tcache_ind, unsigned);
+       if (tcache_ind == UINT_MAX) {
+               ret = EFAULT;
+               goto label_return;
+       }
+       tcaches_flush(tsd, tcache_ind);
+
+       ret = 0;
+label_return:
+       return (ret);
+}
+
+static int
+tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen)
+{
+       int ret;
+       tsd_t *tsd;
+       unsigned tcache_ind;
+
+       if (!config_tcache)
+               return (ENOENT);
+
+       tsd = tsd_fetch();
+
+       WRITEONLY();
+       tcache_ind = UINT_MAX;
+       WRITE(tcache_ind, unsigned);
+       if (tcache_ind == UINT_MAX) {
+               ret = EFAULT;
+               goto label_return;
+       }
+       tcaches_destroy(tsd, tcache_ind);
+
+       ret = 0;
+label_return:
+       return (ret);
+}
+
+/******************************************************************************/
+
 static void
-arena_purge(unsigned arena_ind)
+arena_i_purge(unsigned arena_ind, bool all)
 {
-       VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
 
-       malloc_mutex_lock(&arenas_lock);
-       memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
-       malloc_mutex_unlock(&arenas_lock);
+       malloc_mutex_lock(&ctl_mtx);
+       {
+               unsigned narenas = ctl_stats.narenas;
+
+               if (arena_ind == narenas) {
+                       unsigned i;
+                       VARIABLE_ARRAY(arena_t *, tarenas, narenas);
+
+                       for (i = 0; i < narenas; i++)
+                               tarenas[i] = arena_get(i, false);
+
+                       /*
+                        * No further need to hold ctl_mtx, since narenas and
+                        * tarenas contain everything needed below.
+                        */
+                       malloc_mutex_unlock(&ctl_mtx);
+
+                       for (i = 0; i < narenas; i++) {
+                               if (tarenas[i] != NULL)
+                                       arena_purge(tarenas[i], all);
+                       }
+               } else {
+                       arena_t *tarena;
+
+                       assert(arena_ind < narenas);
+
+                       tarena = arena_get(arena_ind, false);
 
-       if (arena_ind == ctl_stats.narenas) {
-               unsigned i;
-               for (i = 0; i < ctl_stats.narenas; i++) {
-                       if (tarenas[i] != NULL)
-                               arena_purge_all(tarenas[i]);
+                       /* No further need to hold ctl_mtx. */
+                       malloc_mutex_unlock(&ctl_mtx);
+
+                       if (tarena != NULL)
+                               arena_purge(tarena, all);
                }
-       } else {
-               assert(arena_ind < ctl_stats.narenas);
-               if (tarenas[arena_ind] != NULL)
-                       arena_purge_all(tarenas[arena_ind]);
        }
 }
 
@@ -1427,9 +1596,22 @@ arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
 
        READONLY();
        WRITEONLY();
-       malloc_mutex_lock(&ctl_mtx);
-       arena_purge(mib[1]);
-       malloc_mutex_unlock(&ctl_mtx);
+       arena_i_purge((unsigned)mib[1], true);
+
+       ret = 0;
+label_return:
+       return (ret);
+}
+
+static int
+arena_i_decay_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+       int ret;
+
+       READONLY();
+       WRITEONLY();
+       arena_i_purge((unsigned)mib[1], false);
 
        ret = 0;
 label_return:
@@ -1442,7 +1624,7 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
 {
        int ret;
        const char *dss = NULL;
-       unsigned arena_ind = mib[1];
+       unsigned arena_ind = (unsigned)mib[1];
        dss_prec_t dss_prec_old = dss_prec_limit;
        dss_prec_t dss_prec = dss_prec_limit;
 
@@ -1467,7 +1649,7 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
        }
 
        if (arena_ind < ctl_stats.narenas) {
-               arena_t *arena = arenas[arena_ind];
+               arena_t *arena = arena_get(arena_ind, false);
                if (arena == NULL || (dss_prec != dss_prec_limit &&
                    arena_dss_prec_set(arena, dss_prec))) {
                        ret = EFAULT;
@@ -1493,52 +1675,100 @@ label_return:
 }
 
 static int
-arena_i_chunk_alloc_ctl(const size_t *mib, size_t miblen, void *oldp,
+arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
     size_t *oldlenp, void *newp, size_t newlen)
 {
        int ret;
-       unsigned arena_ind = mib[1];
+       unsigned arena_ind = (unsigned)mib[1];
        arena_t *arena;
 
-       malloc_mutex_lock(&ctl_mtx);
-       if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
-               malloc_mutex_lock(&arena->lock);
-               READ(arena->chunk_alloc, chunk_alloc_t *);
-               WRITE(arena->chunk_alloc, chunk_alloc_t *);
-       } else {
+       arena = arena_get(arena_ind, false);
+       if (arena == NULL) {
                ret = EFAULT;
-               goto label_outer_return;
+               goto label_return;
+       }
+
+       if (oldp != NULL && oldlenp != NULL) {
+               size_t oldval = arena_lg_dirty_mult_get(arena);
+               READ(oldval, ssize_t);
+       }
+       if (newp != NULL) {
+               if (newlen != sizeof(ssize_t)) {
+                       ret = EINVAL;
+                       goto label_return;
+               }
+               if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) {
+                       ret = EFAULT;
+                       goto label_return;
+               }
        }
+
        ret = 0;
 label_return:
-       malloc_mutex_unlock(&arena->lock);
-label_outer_return:
-       malloc_mutex_unlock(&ctl_mtx);
        return (ret);
 }
 
 static int
-arena_i_chunk_dalloc_ctl(const size_t *mib, size_t miblen, void *oldp,
+arena_i_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
     size_t *oldlenp, void *newp, size_t newlen)
 {
+       int ret;
+       unsigned arena_ind = (unsigned)mib[1];
+       arena_t *arena;
+
+       arena = arena_get(arena_ind, false);
+       if (arena == NULL) {
+               ret = EFAULT;
+               goto label_return;
+       }
 
+       if (oldp != NULL && oldlenp != NULL) {
+               size_t oldval = arena_decay_time_get(arena);
+               READ(oldval, ssize_t);
+       }
+       if (newp != NULL) {
+               if (newlen != sizeof(ssize_t)) {
+                       ret = EINVAL;
+                       goto label_return;
+               }
+               if (arena_decay_time_set(arena, *(ssize_t *)newp)) {
+                       ret = EFAULT;
+                       goto label_return;
+               }
+       }
+
+       ret = 0;
+label_return:
+       return (ret);
+}
+
+static int
+arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen)
+{
        int ret;
-       unsigned arena_ind = mib[1];
+       unsigned arena_ind = (unsigned)mib[1];
        arena_t *arena;
 
        malloc_mutex_lock(&ctl_mtx);
-       if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
-               malloc_mutex_lock(&arena->lock);
-               READ(arena->chunk_dalloc, chunk_dalloc_t *);
-               WRITE(arena->chunk_dalloc, chunk_dalloc_t *);
+       if (arena_ind < narenas_total_get() && (arena =
+           arena_get(arena_ind, false)) != NULL) {
+               if (newp != NULL) {
+                       chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
+                       WRITE(new_chunk_hooks, chunk_hooks_t);
+                       old_chunk_hooks = chunk_hooks_set(arena,
+                           &new_chunk_hooks);
+                       READ(old_chunk_hooks, chunk_hooks_t);
+               } else {
+                       chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
+                       READ(old_chunk_hooks, chunk_hooks_t);
+               }
        } else {
                ret = EFAULT;
-               goto label_outer_return;
+               goto label_return;
        }
        ret = 0;
 label_return:
-       malloc_mutex_unlock(&arena->lock);
-label_outer_return:
        malloc_mutex_unlock(&ctl_mtx);
        return (ret);
 }
@@ -1596,7 +1826,7 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
        if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
                ret = EINVAL;
                nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
-                   ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
+                   ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas;
        } else {
                ret = 0;
                nread = ctl_stats.narenas;
@@ -1610,6 +1840,58 @@ label_return:
        return (ret);
 }
 
+static int
+arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen)
+{
+       int ret;
+
+       if (oldp != NULL && oldlenp != NULL) {
+               size_t oldval = arena_lg_dirty_mult_default_get();
+               READ(oldval, ssize_t);
+       }
+       if (newp != NULL) {
+               if (newlen != sizeof(ssize_t)) {
+                       ret = EINVAL;
+                       goto label_return;
+               }
+               if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) {
+                       ret = EFAULT;
+                       goto label_return;
+               }
+       }
+
+       ret = 0;
+label_return:
+       return (ret);
+}
+
+static int
+arenas_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen)
+{
+       int ret;
+
+       if (oldp != NULL && oldlenp != NULL) {
+               size_t oldval = arena_decay_time_default_get();
+               READ(oldval, ssize_t);
+       }
+       if (newp != NULL) {
+               if (newlen != sizeof(ssize_t)) {
+                       ret = EINVAL;
+                       goto label_return;
+               }
+               if (arena_decay_time_default_set(*(ssize_t *)newp)) {
+                       ret = EFAULT;
+                       goto label_return;
+               }
+       }
+
+       ret = 0;
+label_return:
+       return (ret);
+}
+
 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
 CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
@@ -1627,8 +1909,8 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
        return (super_arenas_bin_i_node);
 }
 
-CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
-CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
+CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
+CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
 static const ctl_named_node_t *
 arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
 {
@@ -1638,6 +1920,18 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
        return (super_arenas_lrun_i_node);
 }
 
+CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
+CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
+    size_t)
+static const ctl_named_node_t *
+arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+
+       if (i > nhclasses)
+               return (NULL);
+       return (super_arenas_hchunk_i_node);
+}
+
 static int
 arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
     void *newp, size_t newlen)
@@ -1735,6 +2029,31 @@ label_return:
        return (ret);
 }
 
+static int
+prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+       int ret;
+       bool oldval;
+
+       if (!config_prof)
+               return (ENOENT);
+
+       if (newp != NULL) {
+               if (newlen != sizeof(bool)) {
+                       ret = EINVAL;
+                       goto label_return;
+               }
+               oldval = prof_gdump_set(*(bool *)newp);
+       } else
+               oldval = prof_gdump_get();
+       READ(oldval, bool);
+
+       ret = 0;
+label_return:
+       return (ret);
+}
+
 static int
 prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
     void *newp, size_t newlen)
@@ -1768,14 +2087,15 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
 CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
 CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
+CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
 
-CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
-    size_t)
-CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
-CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
-
 CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
+CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
+    ssize_t)
+CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
+    ssize_t)
 CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
 CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
 CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
@@ -1787,6 +2107,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
     ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
 CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
     ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
+    ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
+    ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
 
 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
     ctl_stats.arenas[mib[2]].allocated_small, size_t)
@@ -1811,16 +2135,16 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
     ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
-    ctl_stats.arenas[mib[2]].astats.nrequests_huge, uint64_t)
+    ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
 
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
-    ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
@@ -1859,6 +2183,25 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
        return (super_stats_arenas_i_lruns_j_node);
 }
 
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
+    ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
+    ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
+    ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
+    uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
+    ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
+{
+
+       if (j > nhclasses)
+               return (NULL);
+       return (super_stats_arenas_i_hchunks_j_node);
+}
+
 static const ctl_named_node_t *
 stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
 {
index 8c09b486ed819a4af685488031ed17e70498fcfd..9f5146e5ff5c0477e3c772ffa800162367530452 100644 (file)
@@ -3,17 +3,32 @@
 
 /******************************************************************************/
 
-static inline int
-extent_szad_comp(extent_node_t *a, extent_node_t *b)
+JEMALLOC_INLINE_C size_t
+extent_quantize(size_t size)
 {
-       int ret;
-       size_t a_size = a->size;
-       size_t b_size = b->size;
 
-       ret = (a_size > b_size) - (a_size < b_size);
+       /*
+        * Round down to the nearest chunk size that can actually be requested
+        * during normal huge allocation.
+        */
+       return (index2size(size2index(size + 1) - 1));
+}
+
+JEMALLOC_INLINE_C int
+extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
+{
+       int ret;
+       size_t a_qsize = extent_quantize(extent_node_size_get(a));
+       size_t b_qsize = extent_quantize(extent_node_size_get(b));
+
+       /*
+        * Compare based on quantized size rather than size, in order to sort
+        * equally useful extents only by address.
+        */
+       ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
        if (ret == 0) {
-               uintptr_t a_addr = (uintptr_t)a->addr;
-               uintptr_t b_addr = (uintptr_t)b->addr;
+               uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+               uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
 
                ret = (a_addr > b_addr) - (a_addr < b_addr);
        }
@@ -22,18 +37,17 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
 }
 
 /* Generate red-black tree functions. */
-rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
+rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
     extent_szad_comp)
 
-static inline int
-extent_ad_comp(extent_node_t *a, extent_node_t *b)
+JEMALLOC_INLINE_C int
+extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
 {
-       uintptr_t a_addr = (uintptr_t)a->addr;
-       uintptr_t b_addr = (uintptr_t)b->addr;
+       uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+       uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
 
        return ((a_addr > b_addr) - (a_addr < b_addr));
 }
 
 /* Generate red-black tree functions. */
-rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
-    extent_ad_comp)
+rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
index 6bdc0767f177e58dbc9d401c66d0230562a00df4..5f7ceaf191c195facef3290c9a8a6439b33ca637 100644 (file)
@@ -2,42 +2,63 @@
 #include "jemalloc/internal/jemalloc_internal.h"
 
 /******************************************************************************/
-/* Data. */
 
-/* Protects chunk-related data structures. */
-static malloc_mutex_t  huge_mtx;
+static extent_node_t *
+huge_node_get(const void *ptr)
+{
+       extent_node_t *node;
 
-/******************************************************************************/
+       node = chunk_lookup(ptr, true);
+       assert(!extent_node_achunk_get(node));
+
+       return (node);
+}
+
+static bool
+huge_node_set(const void *ptr, extent_node_t *node)
+{
+
+       assert(extent_node_addr_get(node) == ptr);
+       assert(!extent_node_achunk_get(node));
+       return (chunk_register(ptr, node));
+}
 
-/* Tree of chunks that are stand-alone huge allocations. */
-static extent_tree_t   huge;
+static void
+huge_node_unset(const void *ptr, const extent_node_t *node)
+{
+
+       chunk_deregister(ptr, node);
+}
 
 void *
-huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero)
+huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
+    tcache_t *tcache)
 {
 
-       return (huge_palloc(tsd, arena, size, chunksize, zero));
+       assert(usize == s2u(usize));
+
+       return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
 }
 
 void *
-huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
-    bool zero)
+huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+    bool zero, tcache_t *tcache)
 {
        void *ret;
-       size_t csize;
+       size_t ausize;
        extent_node_t *node;
        bool is_zeroed;
 
        /* Allocate one or more contiguous chunks for this request. */
 
-       csize = CHUNK_CEILING(size);
-       if (csize == 0) {
-               /* size is large enough to cause size_t wrap-around. */
+       ausize = sa2u(usize, alignment);
+       if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
                return (NULL);
-       }
+       assert(ausize >= chunksize);
 
        /* Allocate an extent node with which to track the chunk. */
-       node = base_node_alloc();
+       node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
+           CACHELINE, false, tcache, true, arena);
        if (node == NULL)
                return (NULL);
 
@@ -46,29 +67,34 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
         * it is possible to make correct junk/zero fill decisions below.
         */
        is_zeroed = zero;
-       arena = choose_arena(tsd, arena);
-       ret = arena_chunk_alloc_huge(arena, NULL, csize, alignment, &is_zeroed);
-       if (ret == NULL) {
-               base_node_dalloc(node);
+       arena = arena_choose(tsd, arena);
+       if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
+           usize, alignment, &is_zeroed)) == NULL) {
+               idalloctm(tsd, node, tcache, true, true);
                return (NULL);
        }
 
-       /* Insert node into huge. */
-       node->addr = ret;
-       node->size = csize;
-       node->arena = arena;
-
-       malloc_mutex_lock(&huge_mtx);
-       extent_tree_ad_insert(&huge, node);
-       malloc_mutex_unlock(&huge_mtx);
-
-       if (config_fill && !zero) {
-               if (unlikely(opt_junk))
-                       memset(ret, 0xa5, csize);
-               else if (unlikely(opt_zero) && !is_zeroed)
-                       memset(ret, 0, csize);
+       extent_node_init(node, arena, ret, usize, is_zeroed, true);
+
+       if (huge_node_set(ret, node)) {
+               arena_chunk_dalloc_huge(arena, ret, usize);
+               idalloctm(tsd, node, tcache, true, true);
+               return (NULL);
        }
 
+       /* Insert node into huge. */
+       malloc_mutex_lock(&arena->huge_mtx);
+       ql_elm_new(node, ql_link);
+       ql_tail_insert(&arena->huge, node, ql_link);
+       malloc_mutex_unlock(&arena->huge_mtx);
+
+       if (zero || (config_fill && unlikely(opt_zero))) {
+               if (!is_zeroed)
+                       memset(ret, 0, usize);
+       } else if (config_fill && unlikely(opt_junk_alloc))
+               memset(ret, 0xa5, usize);
+
+       arena_decay_tick(tsd, arena);
        return (ret);
 }
 
@@ -80,7 +106,7 @@ static void
 huge_dalloc_junk(void *ptr, size_t usize)
 {
 
-       if (config_fill && have_dss && unlikely(opt_junk)) {
+       if (config_fill && have_dss && unlikely(opt_junk_free)) {
                /*
                 * Only bother junk filling if the chunk isn't about to be
                 * unmapped.
@@ -95,276 +121,328 @@ huge_dalloc_junk(void *ptr, size_t usize)
 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
 #endif
 
+static void
+huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
+    size_t usize_max, bool zero)
+{
+       size_t usize, usize_next;
+       extent_node_t *node;
+       arena_t *arena;
+       chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+       bool pre_zeroed, post_zeroed;
+
+       /* Increase usize to incorporate extra. */
+       for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
+           <= oldsize; usize = usize_next)
+               ; /* Do nothing. */
+
+       if (oldsize == usize)
+               return;
+
+       node = huge_node_get(ptr);
+       arena = extent_node_arena_get(node);
+       pre_zeroed = extent_node_zeroed_get(node);
+
+       /* Fill if necessary (shrinking). */
+       if (oldsize > usize) {
+               size_t sdiff = oldsize - usize;
+               if (config_fill && unlikely(opt_junk_free)) {
+                       memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
+                       post_zeroed = false;
+               } else {
+                       post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+                           ptr, CHUNK_CEILING(oldsize), usize, sdiff);
+               }
+       } else
+               post_zeroed = pre_zeroed;
+
+       malloc_mutex_lock(&arena->huge_mtx);
+       /* Update the size of the huge allocation. */
+       assert(extent_node_size_get(node) != usize);
+       extent_node_size_set(node, usize);
+       /* Update zeroed. */
+       extent_node_zeroed_set(node, post_zeroed);
+       malloc_mutex_unlock(&arena->huge_mtx);
+
+       arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
+
+       /* Fill if necessary (growing). */
+       if (oldsize < usize) {
+               if (zero || (config_fill && unlikely(opt_zero))) {
+                       if (!pre_zeroed) {
+                               memset((void *)((uintptr_t)ptr + oldsize), 0,
+                                   usize - oldsize);
+                       }
+               } else if (config_fill && unlikely(opt_junk_alloc)) {
+                       memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+                           oldsize);
+               }
+       }
+}
+
 static bool
-huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
-       size_t csize;
-       void *expand_addr;
-       size_t expand_size;
-       extent_node_t *node, key;
+huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
+{
+       extent_node_t *node;
        arena_t *arena;
-       bool is_zeroed;
-       void *ret;
+       chunk_hooks_t chunk_hooks;
+       size_t cdiff;
+       bool pre_zeroed, post_zeroed;
 
-       csize = CHUNK_CEILING(size);
-       if (csize == 0) {
-               /* size is large enough to cause size_t wrap-around. */
+       node = huge_node_get(ptr);
+       arena = extent_node_arena_get(node);
+       pre_zeroed = extent_node_zeroed_get(node);
+       chunk_hooks = chunk_hooks_get(arena);
+
+       assert(oldsize > usize);
+
+       /* Split excess chunks. */
+       cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+       if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
+           CHUNK_CEILING(usize), cdiff, true, arena->ind))
                return (true);
-       }
 
-       expand_addr = ptr + oldsize;
-       expand_size = csize - oldsize;
+       if (oldsize > usize) {
+               size_t sdiff = oldsize - usize;
+               if (config_fill && unlikely(opt_junk_free)) {
+                       huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
+                           sdiff);
+                       post_zeroed = false;
+               } else {
+                       post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+                           CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
+                           CHUNK_CEILING(oldsize),
+                           CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
+               }
+       } else
+               post_zeroed = pre_zeroed;
+
+       malloc_mutex_lock(&arena->huge_mtx);
+       /* Update the size of the huge allocation. */
+       extent_node_size_set(node, usize);
+       /* Update zeroed. */
+       extent_node_zeroed_set(node, post_zeroed);
+       malloc_mutex_unlock(&arena->huge_mtx);
 
-       malloc_mutex_lock(&huge_mtx);
+       /* Zap the excess chunks. */
+       arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
 
-       key.addr = ptr;
-       node = extent_tree_ad_search(&huge, &key);
-       assert(node != NULL);
-       assert(node->addr == ptr);
+       return (false);
+}
 
-       /* Find the current arena. */
-       arena = node->arena;
+static bool
+huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
+       extent_node_t *node;
+       arena_t *arena;
+       bool is_zeroed_subchunk, is_zeroed_chunk;
 
-       malloc_mutex_unlock(&huge_mtx);
+       node = huge_node_get(ptr);
+       arena = extent_node_arena_get(node);
+       malloc_mutex_lock(&arena->huge_mtx);
+       is_zeroed_subchunk = extent_node_zeroed_get(node);
+       malloc_mutex_unlock(&arena->huge_mtx);
 
        /*
-        * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
-        * it is possible to make correct junk/zero fill decisions below.
+        * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
+        * that it is possible to make correct junk/zero fill decisions below.
         */
-       is_zeroed = zero;
-       ret = arena_chunk_alloc_huge(arena, expand_addr, expand_size, chunksize,
-                                    &is_zeroed);
-       if (ret == NULL)
-               return (true);
+       is_zeroed_chunk = zero;
 
-       assert(ret == expand_addr);
+       if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
+            &is_zeroed_chunk))
+               return (true);
 
-       malloc_mutex_lock(&huge_mtx);
+       malloc_mutex_lock(&arena->huge_mtx);
        /* Update the size of the huge allocation. */
-       node->size = csize;
-       malloc_mutex_unlock(&huge_mtx);
-
-       if (config_fill && !zero) {
-               if (unlikely(opt_junk))
-                       memset(expand_addr, 0xa5, expand_size);
-               else if (unlikely(opt_zero) && !is_zeroed)
-                       memset(expand_addr, 0, expand_size);
+       extent_node_size_set(node, usize);
+       malloc_mutex_unlock(&arena->huge_mtx);
+
+       if (zero || (config_fill && unlikely(opt_zero))) {
+               if (!is_zeroed_subchunk) {
+                       memset((void *)((uintptr_t)ptr + oldsize), 0,
+                           CHUNK_CEILING(oldsize) - oldsize);
+               }
+               if (!is_zeroed_chunk) {
+                       memset((void *)((uintptr_t)ptr +
+                           CHUNK_CEILING(oldsize)), 0, usize -
+                           CHUNK_CEILING(oldsize));
+               }
+       } else if (config_fill && unlikely(opt_junk_alloc)) {
+               memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+                   oldsize);
        }
+
        return (false);
 }
 
 bool
-huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
-    bool zero)
+huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
+    size_t usize_max, bool zero)
 {
 
+       assert(s2u(oldsize) == oldsize);
+       /* The following should have been caught by callers. */
+       assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
+
        /* Both allocations must be huge to avoid a move. */
-       if (oldsize <= arena_maxclass)
+       if (oldsize < chunksize || usize_max < chunksize)
                return (true);
 
-       assert(CHUNK_CEILING(oldsize) == oldsize);
+       if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
+               /* Attempt to expand the allocation in-place. */
+               if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
+                   zero)) {
+                       arena_decay_tick(tsd, huge_aalloc(ptr));
+                       return (false);
+               }
+               /* Try again, this time with usize_min. */
+               if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
+                   CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
+                   oldsize, usize_min, zero)) {
+                       arena_decay_tick(tsd, huge_aalloc(ptr));
+                       return (false);
+               }
+       }
 
        /*
-        * Avoid moving the allocation if the size class can be left the same.
+        * Avoid moving the allocation if the existing chunk size accommodates
+        * the new size.
         */
-       if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
-           && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
+       if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
+           && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
+               huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
+                   zero);
+               arena_decay_tick(tsd, huge_aalloc(ptr));
                return (false);
        }
 
-       /* Overflow. */
-       if (CHUNK_CEILING(size) == 0)
-               return (true);
-
-       /* Shrink the allocation in-place. */
-       if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(size)) {
-               extent_node_t *node, key;
-               void *excess_addr;
-               size_t excess_size;
-
-               malloc_mutex_lock(&huge_mtx);
-
-               key.addr = ptr;
-               node = extent_tree_ad_search(&huge, &key);
-               assert(node != NULL);
-               assert(node->addr == ptr);
-
-               /* Update the size of the huge allocation. */
-               node->size = CHUNK_CEILING(size);
-
-               malloc_mutex_unlock(&huge_mtx);
-
-               excess_addr = node->addr + CHUNK_CEILING(size);
-               excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(size);
-
-               /* Zap the excess chunks. */
-               huge_dalloc_junk(excess_addr, excess_size);
-               arena_chunk_dalloc_huge(node->arena, excess_addr, excess_size);
-
-               return (false);
+       /* Attempt to shrink the allocation in-place. */
+       if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
+               if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
+                       arena_decay_tick(tsd, huge_aalloc(ptr));
+                       return (false);
+               }
        }
+       return (true);
+}
 
-       /* Attempt to expand the allocation in-place. */
-       if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
-               if (extra == 0)
-                       return (true);
+static void *
+huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+    size_t alignment, bool zero, tcache_t *tcache)
+{
 
-               /* Try again, this time without extra. */
-               return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
-       }
-       return (false);
+       if (alignment <= chunksize)
+               return (huge_malloc(tsd, arena, usize, zero, tcache));
+       return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
 }
 
 void *
-huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
-    size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
+huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
+    size_t alignment, bool zero, tcache_t *tcache)
 {
        void *ret;
        size_t copysize;
 
+       /* The following should have been caught by callers. */
+       assert(usize > 0 && usize <= HUGE_MAXCLASS);
+
        /* Try to avoid moving the allocation. */
-       if (!huge_ralloc_no_move(ptr, oldsize, size, extra, zero))
+       if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
                return (ptr);
 
        /*
-        * size and oldsize are different enough that we need to use a
+        * usize and oldsize are different enough that we need to use a
         * different size class.  In that case, fall back to allocating new
         * space and copying.
         */
-       if (alignment > chunksize)
-               ret = huge_palloc(tsd, arena, size + extra, alignment, zero);
-       else
-               ret = huge_malloc(tsd, arena, size + extra, zero);
-
-       if (ret == NULL) {
-               if (extra == 0)
-                       return (NULL);
-               /* Try again, this time without extra. */
-               if (alignment > chunksize)
-                       ret = huge_palloc(tsd, arena, size, alignment, zero);
-               else
-                       ret = huge_malloc(tsd, arena, size, zero);
-
-               if (ret == NULL)
-                       return (NULL);
-       }
+       ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
+           tcache);
+       if (ret == NULL)
+               return (NULL);
 
-       /*
-        * Copy at most size bytes (not size+extra), since the caller has no
-        * expectation that the extra bytes will be reliably preserved.
-        */
-       copysize = (size < oldsize) ? size : oldsize;
+       copysize = (usize < oldsize) ? usize : oldsize;
        memcpy(ret, ptr, copysize);
-       iqalloc(tsd, ptr, try_tcache_dalloc);
+       isqalloc(tsd, ptr, oldsize, tcache);
        return (ret);
 }
 
 void
-huge_dalloc(void *ptr)
+huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
 {
-       extent_node_t *node, key;
+       extent_node_t *node;
+       arena_t *arena;
 
-       malloc_mutex_lock(&huge_mtx);
+       node = huge_node_get(ptr);
+       arena = extent_node_arena_get(node);
+       huge_node_unset(ptr, node);
+       malloc_mutex_lock(&arena->huge_mtx);
+       ql_remove(&arena->huge, node, ql_link);
+       malloc_mutex_unlock(&arena->huge_mtx);
 
-       /* Extract from tree of huge allocations. */
-       key.addr = ptr;
-       node = extent_tree_ad_search(&huge, &key);
-       assert(node != NULL);
-       assert(node->addr == ptr);
-       extent_tree_ad_remove(&huge, node);
+       huge_dalloc_junk(extent_node_addr_get(node),
+           extent_node_size_get(node));
+       arena_chunk_dalloc_huge(extent_node_arena_get(node),
+           extent_node_addr_get(node), extent_node_size_get(node));
+       idalloctm(tsd, node, tcache, true, true);
 
-       malloc_mutex_unlock(&huge_mtx);
+       arena_decay_tick(tsd, arena);
+}
 
-       huge_dalloc_junk(node->addr, node->size);
-       arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
-       base_node_dalloc(node);
+arena_t *
+huge_aalloc(const void *ptr)
+{
+
+       return (extent_node_arena_get(huge_node_get(ptr)));
 }
 
 size_t
 huge_salloc(const void *ptr)
 {
-       size_t ret;
-       extent_node_t *node, key;
-
-       malloc_mutex_lock(&huge_mtx);
-
-       /* Extract from tree of huge allocations. */
-       key.addr = __DECONST(void *, ptr);
-       node = extent_tree_ad_search(&huge, &key);
-       assert(node != NULL);
-
-       ret = node->size;
+       size_t size;
+       extent_node_t *node;
+       arena_t *arena;
 
-       malloc_mutex_unlock(&huge_mtx);
+       node = huge_node_get(ptr);
+       arena = extent_node_arena_get(node);
+       malloc_mutex_lock(&arena->huge_mtx);
+       size = extent_node_size_get(node);
+       malloc_mutex_unlock(&arena->huge_mtx);
 
-       return (ret);
+       return (size);
 }
 
 prof_tctx_t *
 huge_prof_tctx_get(const void *ptr)
 {
-       prof_tctx_t *ret;
-       extent_node_t *node, key;
-
-       malloc_mutex_lock(&huge_mtx);
-
-       /* Extract from tree of huge allocations. */
-       key.addr = __DECONST(void *, ptr);
-       node = extent_tree_ad_search(&huge, &key);
-       assert(node != NULL);
-
-       ret = node->prof_tctx;
+       prof_tctx_t *tctx;
+       extent_node_t *node;
+       arena_t *arena;
 
-       malloc_mutex_unlock(&huge_mtx);
+       node = huge_node_get(ptr);
+       arena = extent_node_arena_get(node);
+       malloc_mutex_lock(&arena->huge_mtx);
+       tctx = extent_node_prof_tctx_get(node);
+       malloc_mutex_unlock(&arena->huge_mtx);
 
-       return (ret);
+       return (tctx);
 }
 
 void
 huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
 {
-       extent_node_t *node, key;
-
-       malloc_mutex_lock(&huge_mtx);
-
-       /* Extract from tree of huge allocations. */
-       key.addr = __DECONST(void *, ptr);
-       node = extent_tree_ad_search(&huge, &key);
-       assert(node != NULL);
-
-       node->prof_tctx = tctx;
-
-       malloc_mutex_unlock(&huge_mtx);
-}
-
-bool
-huge_boot(void)
-{
-
-       /* Initialize chunks data. */
-       if (malloc_mutex_init(&huge_mtx))
-               return (true);
-       extent_tree_ad_new(&huge);
-
-       return (false);
-}
-
-void
-huge_prefork(void)
-{
-
-       malloc_mutex_prefork(&huge_mtx);
-}
-
-void
-huge_postfork_parent(void)
-{
+       extent_node_t *node;
+       arena_t *arena;
 
-       malloc_mutex_postfork_parent(&huge_mtx);
+       node = huge_node_get(ptr);
+       arena = extent_node_arena_get(node);
+       malloc_mutex_lock(&arena->huge_mtx);
+       extent_node_prof_tctx_set(node, tctx);
+       malloc_mutex_unlock(&arena->huge_mtx);
 }
 
 void
-huge_postfork_child(void)
+huge_prof_tctx_reset(const void *ptr)
 {
 
-       malloc_mutex_postfork_child(&huge_mtx);
+       huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
 }
index 3490ecdf6c291f4b5f39375099a4f33aa5365197..0735376ec01f5b88829a3a312226644e5d80009b 100644 (file)
@@ -4,8 +4,6 @@
 /******************************************************************************/
 /* Data. */
 
-malloc_tsd_data(, arenas, arena_t *, NULL)
-
 /* Runtime configuration options. */
 const char     *je_malloc_conf JEMALLOC_ATTR(weak);
 bool   opt_abort =
@@ -15,32 +13,169 @@ bool       opt_abort =
     false
 #endif
     ;
-bool   opt_junk =
+const char     *opt_junk =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+    "true"
+#else
+    "false"
+#endif
+    ;
+bool   opt_junk_alloc =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+    true
+#else
+    false
+#endif
+    ;
+bool   opt_junk_free =
 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
     true
 #else
     false
 #endif
     ;
+
 size_t opt_quarantine = ZU(0);
 bool   opt_redzone = false;
 bool   opt_utrace = false;
 bool   opt_xmalloc = false;
 bool   opt_zero = false;
-size_t opt_narenas = 0;
+unsigned       opt_narenas = 0;
 
 /* Initialized to true if the process is running inside Valgrind. */
 bool   in_valgrind;
 
 unsigned       ncpus;
 
-malloc_mutex_t         arenas_lock;
+/* Protects arenas initialization. */
+static malloc_mutex_t  arenas_lock;
+/*
+ * Arenas that are used to service external requests.  Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
+ */
 arena_t                        **arenas;
-unsigned               narenas_total;
-unsigned               narenas_auto;
-
-/* Set to true once the allocator has been initialized. */
-static bool            malloc_initialized = false;
+static unsigned                narenas_total; /* Use narenas_total_*(). */
+static arena_t         *a0; /* arenas[0]; read-only after initialization. */
+static unsigned                narenas_auto; /* Read-only after initialization. */
+
+typedef enum {
+       malloc_init_uninitialized       = 3,
+       malloc_init_a0_initialized      = 2,
+       malloc_init_recursible          = 1,
+       malloc_init_initialized         = 0 /* Common case --> jnz. */
+} malloc_init_t;
+static malloc_init_t   malloc_init_state = malloc_init_uninitialized;
+
+/* 0 should be the common case.  Set to true to trigger initialization. */
+static bool    malloc_slow = true;
+
+/* When malloc_slow != 0, set the corresponding bits for sanity check. */
+enum {
+       flag_opt_junk_alloc     = (1U),
+       flag_opt_junk_free      = (1U << 1),
+       flag_opt_quarantine     = (1U << 2),
+       flag_opt_zero           = (1U << 3),
+       flag_opt_utrace         = (1U << 4),
+       flag_in_valgrind        = (1U << 5),
+       flag_opt_xmalloc        = (1U << 6)
+};
+static uint8_t malloc_slow_flags;
+
+/* Last entry for overflow detection only.  */
+JEMALLOC_ALIGNED(CACHELINE)
+const size_t   index2size_tab[NSIZES+1] = {
+#define        SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+       ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
+       SIZE_CLASSES
+#undef SC
+       ZU(0)
+};
+
+JEMALLOC_ALIGNED(CACHELINE)
+const uint8_t  size2index_tab[] = {
+#if LG_TINY_MIN == 0
+#warning "Dangerous LG_TINY_MIN"
+#define        S2B_0(i)        i,
+#elif LG_TINY_MIN == 1
+#warning "Dangerous LG_TINY_MIN"
+#define        S2B_1(i)        i,
+#elif LG_TINY_MIN == 2
+#warning "Dangerous LG_TINY_MIN"
+#define        S2B_2(i)        i,
+#elif LG_TINY_MIN == 3
+#define        S2B_3(i)        i,
+#elif LG_TINY_MIN == 4
+#define        S2B_4(i)        i,
+#elif LG_TINY_MIN == 5
+#define        S2B_5(i)        i,
+#elif LG_TINY_MIN == 6
+#define        S2B_6(i)        i,
+#elif LG_TINY_MIN == 7
+#define        S2B_7(i)        i,
+#elif LG_TINY_MIN == 8
+#define        S2B_8(i)        i,
+#elif LG_TINY_MIN == 9
+#define        S2B_9(i)        i,
+#elif LG_TINY_MIN == 10
+#define        S2B_10(i)       i,
+#elif LG_TINY_MIN == 11
+#define        S2B_11(i)       i,
+#else
+#error "Unsupported LG_TINY_MIN"
+#endif
+#if LG_TINY_MIN < 1
+#define        S2B_1(i)        S2B_0(i) S2B_0(i)
+#endif
+#if LG_TINY_MIN < 2
+#define        S2B_2(i)        S2B_1(i) S2B_1(i)
+#endif
+#if LG_TINY_MIN < 3
+#define        S2B_3(i)        S2B_2(i) S2B_2(i)
+#endif
+#if LG_TINY_MIN < 4
+#define        S2B_4(i)        S2B_3(i) S2B_3(i)
+#endif
+#if LG_TINY_MIN < 5
+#define        S2B_5(i)        S2B_4(i) S2B_4(i)
+#endif
+#if LG_TINY_MIN < 6
+#define        S2B_6(i)        S2B_5(i) S2B_5(i)
+#endif
+#if LG_TINY_MIN < 7
+#define        S2B_7(i)        S2B_6(i) S2B_6(i)
+#endif
+#if LG_TINY_MIN < 8
+#define        S2B_8(i)        S2B_7(i) S2B_7(i)
+#endif
+#if LG_TINY_MIN < 9
+#define        S2B_9(i)        S2B_8(i) S2B_8(i)
+#endif
+#if LG_TINY_MIN < 10
+#define        S2B_10(i)       S2B_9(i) S2B_9(i)
+#endif
+#if LG_TINY_MIN < 11
+#define        S2B_11(i)       S2B_10(i) S2B_10(i)
+#endif
+#define        S2B_no(i)
+#define        SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+       S2B_##lg_delta_lookup(index)
+       SIZE_CLASSES
+#undef S2B_3
+#undef S2B_4
+#undef S2B_5
+#undef S2B_6
+#undef S2B_7
+#undef S2B_8
+#undef S2B_9
+#undef S2B_10
+#undef S2B_11
+#undef S2B_no
+#undef SC
+};
 
 #ifdef JEMALLOC_THREADED_INIT
 /* Used to let the initializing thread recursively allocate. */
@@ -57,14 +192,28 @@ static bool                        malloc_initializer = NO_INITIALIZER;
 
 /* Used to avoid initialization races. */
 #ifdef _WIN32
+#if _WIN32_WINNT >= 0x0600
+static malloc_mutex_t  init_lock = SRWLOCK_INIT;
+#else
 static malloc_mutex_t  init_lock;
+static bool init_lock_initialized = false;
 
 JEMALLOC_ATTR(constructor)
 static void WINAPI
 _init_init_lock(void)
 {
 
-       malloc_mutex_init(&init_lock);
+       /* If another constructor in the same binary is using mallctl to
+        * e.g. setup chunk hooks, it may end up running before this one,
+        * and malloc_init_hard will crash trying to lock the uninitialized
+        * lock. So we force an initialization of the lock in
+        * malloc_init_hard as well. We don't try to care about atomicity
+        * of the accessed to the init_lock_initialized boolean, since it
+        * really only matters early in the process creation, before any
+        * separate thread normally starts doing anything. */
+       if (!init_lock_initialized)
+               malloc_mutex_init(&init_lock);
+       init_lock_initialized = true;
 }
 
 #ifdef _MSC_VER
@@ -72,7 +221,7 @@ _init_init_lock(void)
 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
 #endif
-
+#endif
 #else
 static malloc_mutex_t  init_lock = MALLOC_MUTEX_INITIALIZER;
 #endif
@@ -105,6 +254,7 @@ typedef struct {
  * definition.
  */
 
+static bool    malloc_init_hard_a0(void);
 static bool    malloc_init_hard(void);
 
 /******************************************************************************/
@@ -112,35 +262,307 @@ static bool      malloc_init_hard(void);
  * Begin miscellaneous support functions.
  */
 
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_initialized(void)
+{
+
+       return (malloc_init_state == malloc_init_initialized);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void
+malloc_thread_init(void)
+{
+
+       /*
+        * TSD initialization can't be safely done as a side effect of
+        * deallocation, because it is possible for a thread to do nothing but
+        * deallocate its TLS data via free(), in which case writing to TLS
+        * would cause write-after-free memory corruption.  The quarantine
+        * facility *only* gets used as a side effect of deallocation, so make
+        * a best effort attempt at initializing its TSD by hooking all
+        * allocation events.
+        */
+       if (config_fill && unlikely(opt_quarantine))
+               quarantine_alloc_hook();
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init_a0(void)
+{
+
+       if (unlikely(malloc_init_state == malloc_init_uninitialized))
+               return (malloc_init_hard_a0());
+       return (false);
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init(void)
+{
+
+       if (unlikely(!malloc_initialized()) && malloc_init_hard())
+               return (true);
+       malloc_thread_init();
+
+       return (false);
+}
+
+/*
+ * The a0*() functions are used instead of i[mcd]alloc() in situations that
+ * cannot tolerate TLS variable access.
+ */
+
+static void *
+a0ialloc(size_t size, bool zero, bool is_metadata)
+{
+
+       if (unlikely(malloc_init_a0()))
+               return (NULL);
+
+       return (iallocztm(NULL, size, size2index(size), zero, false,
+           is_metadata, arena_get(0, false), true));
+}
+
+static void
+a0idalloc(void *ptr, bool is_metadata)
+{
+
+       idalloctm(NULL, ptr, false, is_metadata, true);
+}
+
+void *
+a0malloc(size_t size)
+{
+
+       return (a0ialloc(size, false, true));
+}
+
+void
+a0dalloc(void *ptr)
+{
+
+       a0idalloc(ptr, true);
+}
+
+/*
+ * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
+ * situations that cannot tolerate TLS variable access (TLS allocation and very
+ * early internal data structure initialization).
+ */
+
+void *
+bootstrap_malloc(size_t size)
+{
+
+       if (unlikely(size == 0))
+               size = 1;
+
+       return (a0ialloc(size, false, false));
+}
+
+void *
+bootstrap_calloc(size_t num, size_t size)
+{
+       size_t num_size;
+
+       num_size = num * size;
+       if (unlikely(num_size == 0)) {
+               assert(num == 0 || size == 0);
+               num_size = 1;
+       }
+
+       return (a0ialloc(num_size, true, false));
+}
+
+void
+bootstrap_free(void *ptr)
+{
+
+       if (unlikely(ptr == NULL))
+               return;
+
+       a0idalloc(ptr, false);
+}
+
+static void
+arena_set(unsigned ind, arena_t *arena)
+{
+
+       atomic_write_p((void **)&arenas[ind], arena);
+}
+
+static void
+narenas_total_set(unsigned narenas)
+{
+
+       atomic_write_u(&narenas_total, narenas);
+}
+
+static void
+narenas_total_inc(void)
+{
+
+       atomic_add_u(&narenas_total, 1);
+}
+
+unsigned
+narenas_total_get(void)
+{
+
+       return (atomic_read_u(&narenas_total));
+}
+
 /* Create a new arena and insert it into the arenas array at index ind. */
+static arena_t *
+arena_init_locked(unsigned ind)
+{
+       arena_t *arena;
+
+       assert(ind <= narenas_total_get());
+       if (ind > MALLOCX_ARENA_MAX)
+               return (NULL);
+       if (ind == narenas_total_get())
+               narenas_total_inc();
+
+       /*
+        * Another thread may have already initialized arenas[ind] if it's an
+        * auto arena.
+        */
+       arena = arena_get(ind, false);
+       if (arena != NULL) {
+               assert(ind < narenas_auto);
+               return (arena);
+       }
+
+       /* Actually initialize the arena. */
+       arena = arena_new(ind);
+       arena_set(ind, arena);
+       return (arena);
+}
+
 arena_t *
-arenas_extend(unsigned ind)
+arena_init(unsigned ind)
 {
-       arena_t *ret;
+       arena_t *arena;
+
+       malloc_mutex_lock(&arenas_lock);
+       arena = arena_init_locked(ind);
+       malloc_mutex_unlock(&arenas_lock);
+       return (arena);
+}
+
+static void
+arena_bind(tsd_t *tsd, unsigned ind)
+{
+       arena_t *arena;
+
+       arena = arena_get(ind, false);
+       arena_nthreads_inc(arena);
+
+       if (tsd_nominal(tsd))
+               tsd_arena_set(tsd, arena);
+}
+
+void
+arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
+{
+       arena_t *oldarena, *newarena;
+
+       oldarena = arena_get(oldind, false);
+       newarena = arena_get(newind, false);
+       arena_nthreads_dec(oldarena);
+       arena_nthreads_inc(newarena);
+       tsd_arena_set(tsd, newarena);
+}
+
+static void
+arena_unbind(tsd_t *tsd, unsigned ind)
+{
+       arena_t *arena;
 
-       ret = (arena_t *)base_alloc(sizeof(arena_t));
-       if (ret != NULL && !arena_new(ret, ind)) {
-               arenas[ind] = ret;
-               return (ret);
+       arena = arena_get(ind, false);
+       arena_nthreads_dec(arena);
+       tsd_arena_set(tsd, NULL);
+}
+
+arena_tdata_t *
+arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
+{
+       arena_tdata_t *tdata, *arenas_tdata_old;
+       arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
+       unsigned narenas_tdata_old, i;
+       unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
+       unsigned narenas_actual = narenas_total_get();
+
+       /*
+        * Dissociate old tdata array (and set up for deallocation upon return)
+        * if it's too small.
+        */
+       if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
+               arenas_tdata_old = arenas_tdata;
+               narenas_tdata_old = narenas_tdata;
+               arenas_tdata = NULL;
+               narenas_tdata = 0;
+               tsd_arenas_tdata_set(tsd, arenas_tdata);
+               tsd_narenas_tdata_set(tsd, narenas_tdata);
+       } else {
+               arenas_tdata_old = NULL;
+               narenas_tdata_old = 0;
+       }
+
+       /* Allocate tdata array if it's missing. */
+       if (arenas_tdata == NULL) {
+               bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
+               narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
+
+               if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
+                       *arenas_tdata_bypassp = true;
+                       arenas_tdata = (arena_tdata_t *)a0malloc(
+                           sizeof(arena_tdata_t) * narenas_tdata);
+                       *arenas_tdata_bypassp = false;
+               }
+               if (arenas_tdata == NULL) {
+                       tdata = NULL;
+                       goto label_return;
+               }
+               assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
+               tsd_arenas_tdata_set(tsd, arenas_tdata);
+               tsd_narenas_tdata_set(tsd, narenas_tdata);
        }
-       /* Only reached if there is an OOM error. */
 
        /*
-        * OOM here is quite inconvenient to propagate, since dealing with it
-        * would require a check for failure in the fast path.  Instead, punt
-        * by using arenas[0].  In practice, this is an extremely unlikely
-        * failure.
+        * Copy to tdata array.  It's possible that the actual number of arenas
+        * has increased since narenas_total_get() was called above, but that
+        * causes no correctness issues unless two threads concurrently execute
+        * the arenas.extend mallctl, which we trust mallctl synchronization to
+        * prevent.
         */
-       malloc_write("<jemalloc>: Error initializing arena\n");
-       if (opt_abort)
-               abort();
 
-       return (arenas[0]);
+       /* Copy/initialize tickers. */
+       for (i = 0; i < narenas_actual; i++) {
+               if (i < narenas_tdata_old) {
+                       ticker_copy(&arenas_tdata[i].decay_ticker,
+                           &arenas_tdata_old[i].decay_ticker);
+               } else {
+                       ticker_init(&arenas_tdata[i].decay_ticker,
+                           DECAY_NTICKS_PER_UPDATE);
+               }
+       }
+       if (narenas_tdata > narenas_actual) {
+               memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
+                   * (narenas_tdata - narenas_actual));
+       }
+
+       /* Read the refreshed tdata array. */
+       tdata = &arenas_tdata[ind];
+label_return:
+       if (arenas_tdata_old != NULL)
+               a0dalloc(arenas_tdata_old);
+       return (tdata);
 }
 
-/* Slow path, called only by choose_arena(). */
+/* Slow path, called only by arena_choose(). */
 arena_t *
-choose_arena_hard(tsd_t *tsd)
+arena_choose_hard(tsd_t *tsd)
 {
        arena_t *ret;
 
@@ -150,15 +572,16 @@ choose_arena_hard(tsd_t *tsd)
                choose = 0;
                first_null = narenas_auto;
                malloc_mutex_lock(&arenas_lock);
-               assert(arenas[0] != NULL);
+               assert(arena_get(0, false) != NULL);
                for (i = 1; i < narenas_auto; i++) {
-                       if (arenas[i] != NULL) {
+                       if (arena_get(i, false) != NULL) {
                                /*
                                 * Choose the first arena that has the lowest
                                 * number of threads assigned to it.
                                 */
-                               if (arenas[i]->nthreads <
-                                   arenas[choose]->nthreads)
+                               if (arena_nthreads_get(arena_get(i, false)) <
+                                   arena_nthreads_get(arena_get(choose,
+                                   false)))
                                        choose = i;
                        } else if (first_null == narenas_auto) {
                                /*
@@ -174,29 +597,29 @@ choose_arena_hard(tsd_t *tsd)
                        }
                }
 
-               if (arenas[choose]->nthreads == 0
+               if (arena_nthreads_get(arena_get(choose, false)) == 0
                    || first_null == narenas_auto) {
                        /*
                         * Use an unloaded arena, or the least loaded arena if
                         * all arenas are already initialized.
                         */
-                       ret = arenas[choose];
+                       ret = arena_get(choose, false);
                } else {
                        /* Initialize a new arena. */
-                       ret = arenas_extend(first_null);
+                       choose = first_null;
+                       ret = arena_init_locked(choose);
+                       if (ret == NULL) {
+                               malloc_mutex_unlock(&arenas_lock);
+                               return (NULL);
+                       }
                }
-               ret->nthreads++;
+               arena_bind(tsd, choose);
                malloc_mutex_unlock(&arenas_lock);
        } else {
-               ret = arenas[0];
-               malloc_mutex_lock(&arenas_lock);
-               ret->nthreads++;
-               malloc_mutex_unlock(&arenas_lock);
+               ret = arena_get(0, false);
+               arena_bind(tsd, 0);
        }
 
-       if (tsd_nominal(tsd))
-               tsd_arena_set(tsd, ret);
-
        return (ret);
 }
 
@@ -216,6 +639,38 @@ thread_deallocated_cleanup(tsd_t *tsd)
 
 void
 arena_cleanup(tsd_t *tsd)
+{
+       arena_t *arena;
+
+       arena = tsd_arena_get(tsd);
+       if (arena != NULL)
+               arena_unbind(tsd, arena->ind);
+}
+
+void
+arenas_tdata_cleanup(tsd_t *tsd)
+{
+       arena_tdata_t *arenas_tdata;
+
+       /* Prevent tsd->arenas_tdata from being (re)created. */
+       *tsd_arenas_tdata_bypassp_get(tsd) = true;
+
+       arenas_tdata = tsd_arenas_tdata_get(tsd);
+       if (arenas_tdata != NULL) {
+               tsd_arenas_tdata_set(tsd, NULL);
+               a0dalloc(arenas_tdata);
+       }
+}
+
+void
+narenas_tdata_cleanup(tsd_t *tsd)
+{
+
+       /* Do nothing. */
+}
+
+void
+arenas_tdata_bypass_cleanup(tsd_t *tsd)
 {
 
        /* Do nothing. */
@@ -236,7 +691,7 @@ stats_print_atexit(void)
                 * continue to allocate.
                 */
                for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
-                       arena_t *arena = arenas[i];
+                       arena_t *arena = arena_get(i, false);
                        if (arena != NULL) {
                                tcache_t *tcache;
 
@@ -265,6 +720,19 @@ stats_print_atexit(void)
  * Begin initialization functions.
  */
 
+#ifndef JEMALLOC_HAVE_SECURE_GETENV
+static char *
+secure_getenv(const char *name)
+{
+
+#  ifdef JEMALLOC_HAVE_ISSETUGID
+       if (issetugid() != 0)
+               return (NULL);
+#  endif
+       return (getenv(name));
+}
+#endif
+
 static unsigned
 malloc_ncpus(void)
 {
@@ -280,44 +748,6 @@ malloc_ncpus(void)
        return ((result == -1) ? 1 : (unsigned)result);
 }
 
-void
-arenas_cleanup(void *arg)
-{
-       arena_t *arena = *(arena_t **)arg;
-
-       malloc_mutex_lock(&arenas_lock);
-       arena->nthreads--;
-       malloc_mutex_unlock(&arenas_lock);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void
-malloc_thread_init(void)
-{
-
-       /*
-        * TSD initialization can't be safely done as a side effect of
-        * deallocation, because it is possible for a thread to do nothing but
-        * deallocate its TLS data via free(), in which case writing to TLS
-        * would cause write-after-free memory corruption.  The quarantine
-        * facility *only* gets used as a side effect of deallocation, so make
-        * a best effort attempt at initializing its TSD by hooking all
-        * allocation events.
-        */
-       if (config_fill && unlikely(opt_quarantine))
-               quarantine_alloc_hook();
-}
-
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init(void)
-{
-
-       if (unlikely(!malloc_initialized) && malloc_init_hard())
-               return (true);
-       malloc_thread_init();
-
-       return (false);
-}
-
 static bool
 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
     char const **v_p, size_t *vlen_p)
@@ -403,6 +833,26 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
            (int)vlen, v);
 }
 
+static void
+malloc_slow_flag_init(void)
+{
+       /*
+        * Combine the runtime options into malloc_slow for fast path.  Called
+        * after processing all the options.
+        */
+       malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
+           | (opt_junk_free ? flag_opt_junk_free : 0)
+           | (opt_quarantine ? flag_opt_quarantine : 0)
+           | (opt_zero ? flag_opt_zero : 0)
+           | (opt_utrace ? flag_opt_utrace : 0)
+           | (opt_xmalloc ? flag_opt_xmalloc : 0);
+
+       if (config_valgrind)
+               malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
+
+       malloc_slow = (malloc_slow_flags != 0);
+}
+
 static void
 malloc_conf_init(void)
 {
@@ -418,7 +868,9 @@ malloc_conf_init(void)
        if (config_valgrind) {
                in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
                if (config_fill && unlikely(in_valgrind)) {
-                       opt_junk = false;
+                       opt_junk = "false";
+                       opt_junk_alloc = false;
+                       opt_junk_free = false;
                        assert(!opt_zero);
                        opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
                        opt_redzone = true;
@@ -427,10 +879,13 @@ malloc_conf_init(void)
                        opt_tcache = false;
        }
 
-       for (i = 0; i < 3; i++) {
+       for (i = 0; i < 4; i++) {
                /* Get runtime configuration. */
                switch (i) {
                case 0:
+                       opts = config_malloc_conf;
+                       break;
+               case 1:
                        if (je_malloc_conf != NULL) {
                                /*
                                 * Use options that were compiled into the
@@ -443,8 +898,8 @@ malloc_conf_init(void)
                                opts = buf;
                        }
                        break;
-               case 1: {
-                       int linklen = 0;
+               case 2: {
+                       ssize_t linklen = 0;
 #ifndef _WIN32
                        int saved_errno = errno;
                        const char *linkname =
@@ -463,14 +918,14 @@ malloc_conf_init(void)
                        if (linklen == -1) {
                                /* No configuration specified. */
                                linklen = 0;
-                               /* restore errno */
+                               /* Restore errno. */
                                set_errno(saved_errno);
                        }
 #endif
                        buf[linklen] = '\0';
                        opts = buf;
                        break;
-               } case 2: {
+               } case 3: {
                        const char *envname =
 #ifdef JEMALLOC_PREFIX
                            JEMALLOC_CPREFIX"MALLOC_CONF"
@@ -479,7 +934,7 @@ malloc_conf_init(void)
 #endif
                            ;
 
-                       if ((opts = getenv(envname)) != NULL) {
+                       if ((opts = secure_getenv(envname)) != NULL) {
                                /*
                                 * Do nothing; opts is already initialized to
                                 * the value of the MALLOC_CONF environment
@@ -501,13 +956,13 @@ malloc_conf_init(void)
                    &vlen)) {
 #define        CONF_MATCH(n)                                                   \
        (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
+#define        CONF_MATCH_VALUE(n)                                             \
+       (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
 #define        CONF_HANDLE_BOOL(o, n, cont)                                    \
                        if (CONF_MATCH(n)) {                            \
-                               if (strncmp("true", v, vlen) == 0 &&    \
-                                   vlen == sizeof("true")-1)           \
+                               if (CONF_MATCH_VALUE("true"))           \
                                        o = true;                       \
-                               else if (strncmp("false", v, vlen) ==   \
-                                   0 && vlen == sizeof("false")-1)     \
+                               else if (CONF_MATCH_VALUE("false"))     \
                                        o = false;                      \
                                else {                                  \
                                        malloc_conf_error(              \
@@ -517,7 +972,7 @@ malloc_conf_init(void)
                                if (cont)                               \
                                        continue;                       \
                        }
-#define        CONF_HANDLE_SIZE_T(o, n, min, max, clip)                        \
+#define        CONF_HANDLE_T_U(t, o, n, min, max, clip)                        \
                        if (CONF_MATCH(n)) {                            \
                                uintmax_t um;                           \
                                char *end;                              \
@@ -530,24 +985,28 @@ malloc_conf_init(void)
                                            "Invalid conf value",       \
                                            k, klen, v, vlen);          \
                                } else if (clip) {                      \
-                                       if (min != 0 && um < min)       \
-                                               o = min;                \
-                                       else if (um > max)              \
-                                               o = max;                \
+                                       if ((min) != 0 && um < (min))   \
+                                               o = (t)(min);           \
+                                       else if (um > (max))            \
+                                               o = (t)(max);           \
                                        else                            \
-                                               o = um;                 \
+                                               o = (t)um;              \
                                } else {                                \
-                                       if ((min != 0 && um < min) ||   \
-                                           um > max) {                 \
+                                       if (((min) != 0 && um < (min))  \
+                                           || um > (max)) {            \
                                                malloc_conf_error(      \
                                                    "Out-of-range "     \
                                                    "conf value",       \
                                                    k, klen, v, vlen);  \
                                        } else                          \
-                                               o = um;                 \
+                                               o = (t)um;              \
                                }                                       \
                                continue;                               \
                        }
+#define        CONF_HANDLE_UNSIGNED(o, n, min, max, clip)                      \
+                       CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
+#define        CONF_HANDLE_SIZE_T(o, n, min, max, clip)                        \
+                       CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
 #define        CONF_HANDLE_SSIZE_T(o, n, min, max)                             \
                        if (CONF_MATCH(n)) {                            \
                                long l;                                 \
@@ -560,8 +1019,8 @@ malloc_conf_init(void)
                                        malloc_conf_error(              \
                                            "Invalid conf value",       \
                                            k, klen, v, vlen);          \
-                               } else if (l < (ssize_t)min || l >      \
-                                   (ssize_t)max) {                     \
+                               } else if (l < (ssize_t)(min) || l >    \
+                                   (ssize_t)(max)) {                   \
                                        malloc_conf_error(              \
                                            "Out-of-range conf value",  \
                                            k, klen, v, vlen);          \
@@ -581,15 +1040,16 @@ malloc_conf_init(void)
 
                        CONF_HANDLE_BOOL(opt_abort, "abort", true)
                        /*
-                        * Chunks always require at least one header page, plus
-                        * one data page in the absence of redzones, or three
-                        * pages in the presence of redzones.  In order to
-                        * simplify options processing, fix the limit based on
-                        * config_fill.
+                        * Chunks always require at least one header page,
+                        * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
+                        * possibly an additional page in the presence of
+                        * redzones.  In order to simplify options processing,
+                        * use a conservative bound that accommodates all these
+                        * constraints.
                         */
                        CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
-                           (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
-                           true)
+                           LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
+                           (sizeof(size_t) << 3) - 1, true)
                        if (strncmp("dss", k, klen) == 0) {
                                int i;
                                bool match = false;
@@ -614,13 +1074,55 @@ malloc_conf_init(void)
                                }
                                continue;
                        }
-                       CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
-                           SIZE_T_MAX, false)
+                       CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
+                           UINT_MAX, false)
+                       if (strncmp("purge", k, klen) == 0) {
+                               int i;
+                               bool match = false;
+                               for (i = 0; i < purge_mode_limit; i++) {
+                                       if (strncmp(purge_mode_names[i], v,
+                                           vlen) == 0) {
+                                               opt_purge = (purge_mode_t)i;
+                                               match = true;
+                                               break;
+                                       }
+                               }
+                               if (!match) {
+                                       malloc_conf_error("Invalid conf value",
+                                           k, klen, v, vlen);
+                               }
+                               continue;
+                       }
                        CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
                            -1, (sizeof(size_t) << 3) - 1)
+                       CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
+                           NSTIME_SEC_MAX);
                        CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
                        if (config_fill) {
-                               CONF_HANDLE_BOOL(opt_junk, "junk", true)
+                               if (CONF_MATCH("junk")) {
+                                       if (CONF_MATCH_VALUE("true")) {
+                                               opt_junk = "true";
+                                               opt_junk_alloc = opt_junk_free =
+                                                   true;
+                                       } else if (CONF_MATCH_VALUE("false")) {
+                                               opt_junk = "false";
+                                               opt_junk_alloc = opt_junk_free =
+                                                   false;
+                                       } else if (CONF_MATCH_VALUE("alloc")) {
+                                               opt_junk = "alloc";
+                                               opt_junk_alloc = true;
+                                               opt_junk_free = false;
+                                       } else if (CONF_MATCH_VALUE("free")) {
+                                               opt_junk = "free";
+                                               opt_junk_alloc = false;
+                                               opt_junk_free = true;
+                                       } else {
+                                               malloc_conf_error(
+                                                   "Invalid conf value", k,
+                                                   klen, v, vlen);
+                                       }
+                                       continue;
+                               }
                                CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
                                    0, SIZE_T_MAX, false)
                                CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
@@ -684,19 +1186,18 @@ malloc_conf_init(void)
        }
 }
 
+/* init_lock must be held. */
 static bool
-malloc_init_hard(void)
+malloc_init_hard_needed(void)
 {
-       arena_t *init_arenas[1];
 
-       malloc_mutex_lock(&init_lock);
-       if (malloc_initialized || IS_INITIALIZER) {
+       if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
+           malloc_init_recursible)) {
                /*
                 * Another thread initialized the allocator before this one
                 * acquired init_lock, or this thread is the initializing
                 * thread, and it is recursively allocating.
                 */
-               malloc_mutex_unlock(&init_lock);
                return (false);
        }
 #ifdef JEMALLOC_THREADED_INIT
@@ -706,23 +1207,23 @@ malloc_init_hard(void)
                        malloc_mutex_unlock(&init_lock);
                        CPU_SPINWAIT;
                        malloc_mutex_lock(&init_lock);
-               } while (!malloc_initialized);
-               malloc_mutex_unlock(&init_lock);
+               } while (!malloc_initialized());
                return (false);
        }
 #endif
-       malloc_initializer = INITIALIZER;
+       return (true);
+}
 
-       if (malloc_tsd_boot()) {
-               malloc_mutex_unlock(&init_lock);
-               return (true);
-       }
+/* init_lock must be held. */
+static bool
+malloc_init_hard_a0_locked(void)
+{
+
+       malloc_initializer = INITIALIZER;
 
        if (config_prof)
                prof_boot0();
-
        malloc_conf_init();
-
        if (opt_stats_print) {
                /* Print statistics at exit. */
                if (atexit(stats_print_atexit) != 0) {
@@ -731,90 +1232,94 @@ malloc_init_hard(void)
                                abort();
                }
        }
-
-       if (base_boot()) {
-               malloc_mutex_unlock(&init_lock);
+       if (base_boot())
                return (true);
-       }
-
-       if (chunk_boot()) {
-               malloc_mutex_unlock(&init_lock);
+       if (chunk_boot())
                return (true);
-       }
-
-       if (ctl_boot()) {
-               malloc_mutex_unlock(&init_lock);
+       if (ctl_boot())
                return (true);
-       }
-
        if (config_prof)
                prof_boot1();
-
-       arena_boot();
-
-       if (config_tcache && tcache_boot()) {
-               malloc_mutex_unlock(&init_lock);
+       if (arena_boot())
                return (true);
-       }
-
-       if (huge_boot()) {
-               malloc_mutex_unlock(&init_lock);
+       if (config_tcache && tcache_boot())
                return (true);
-       }
-
-       if (malloc_mutex_init(&arenas_lock)) {
-               malloc_mutex_unlock(&init_lock);
+       if (malloc_mutex_init(&arenas_lock))
                return (true);
-       }
-
        /*
         * Create enough scaffolding to allow recursive allocation in
         * malloc_ncpus().
         */
-       narenas_total = narenas_auto = 1;
-       arenas = init_arenas;
+       narenas_auto = 1;
+       narenas_total_set(narenas_auto);
+       arenas = &a0;
        memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
-
        /*
         * Initialize one arena here.  The rest are lazily created in
-        * choose_arena_hard().
+        * arena_choose_hard().
         */
-       arenas_extend(0);
-       if (arenas[0] == NULL) {
-               malloc_mutex_unlock(&init_lock);
+       if (arena_init(0) == NULL)
                return (true);
-       }
+       malloc_init_state = malloc_init_a0_initialized;
+       return (false);
+}
 
-       if (config_prof && prof_boot2()) {
-               malloc_mutex_unlock(&init_lock);
-               return (true);
-       }
+static bool
+malloc_init_hard_a0(void)
+{
+       bool ret;
 
+       malloc_mutex_lock(&init_lock);
+       ret = malloc_init_hard_a0_locked();
        malloc_mutex_unlock(&init_lock);
-       /**********************************************************************/
-       /* Recursive allocation may follow. */
+       return (ret);
+}
+
+/*
+ * Initialize data structures which may trigger recursive allocation.
+ *
+ * init_lock must be held.
+ */
+static bool
+malloc_init_hard_recursible(void)
+{
+       bool ret = false;
+
+       malloc_init_state = malloc_init_recursible;
+       malloc_mutex_unlock(&init_lock);
+
+       /* LinuxThreads' pthread_setspecific() allocates. */
+       if (malloc_tsd_boot0()) {
+               ret = true;
+               goto label_return;
+       }
 
        ncpus = malloc_ncpus();
 
 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
     && !defined(_WIN32) && !defined(__native_client__))
-       /* LinuxThreads's pthread_atfork() allocates. */
+       /* LinuxThreads' pthread_atfork() allocates. */
        if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
            jemalloc_postfork_child) != 0) {
+               ret = true;
                malloc_write("<jemalloc>: Error in pthread_atfork()\n");
                if (opt_abort)
                        abort();
        }
 #endif
 
-       /* Done recursively allocating. */
-       /**********************************************************************/
+label_return:
        malloc_mutex_lock(&init_lock);
+       return (ret);
+}
+
+/* init_lock must be held. */
+static bool
+malloc_init_hard_finish(void)
+{
 
-       if (mutex_boot()) {
-               malloc_mutex_unlock(&init_lock);
+       if (mutex_boot())
                return (true);
-       }
 
        if (opt_narenas == 0) {
                /*
@@ -828,34 +1333,65 @@ malloc_init_hard(void)
        }
        narenas_auto = opt_narenas;
        /*
-        * Make sure that the arenas array can be allocated.  In practice, this
-        * limit is enough to allow the allocator to function, but the ctl
-        * machinery will fail to allocate memory at far lower limits.
+        * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
         */
-       if (narenas_auto > chunksize / sizeof(arena_t *)) {
-               narenas_auto = chunksize / sizeof(arena_t *);
+       if (narenas_auto > MALLOCX_ARENA_MAX) {
+               narenas_auto = MALLOCX_ARENA_MAX;
                malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
                    narenas_auto);
        }
-       narenas_total = narenas_auto;
+       narenas_total_set(narenas_auto);
 
        /* Allocate and initialize arenas. */
-       arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
-       if (arenas == NULL) {
+       arenas = (arena_t **)base_alloc(sizeof(arena_t *) *
+           (MALLOCX_ARENA_MAX+1));
+       if (arenas == NULL)
+               return (true);
+       /* Copy the pointer to the one arena that was already initialized. */
+       arena_set(0, a0);
+
+       malloc_init_state = malloc_init_initialized;
+       malloc_slow_flag_init();
+
+       return (false);
+}
+
+static bool
+malloc_init_hard(void)
+{
+
+#if defined(_WIN32) && _WIN32_WINNT < 0x0600
+       _init_init_lock();
+#endif
+       malloc_mutex_lock(&init_lock);
+       if (!malloc_init_hard_needed()) {
+               malloc_mutex_unlock(&init_lock);
+               return (false);
+       }
+
+       if (malloc_init_state != malloc_init_a0_initialized &&
+           malloc_init_hard_a0_locked()) {
                malloc_mutex_unlock(&init_lock);
                return (true);
        }
-       /*
-        * Zero the array.  In practice, this should always be pre-zeroed,
-        * since it was just mmap()ed, but let's be sure.
-        */
-       memset(arenas, 0, sizeof(arena_t *) * narenas_total);
-       /* Copy the pointer to the one arena that was already initialized. */
-       arenas[0] = init_arenas[0];
 
-       malloc_initialized = true;
-       malloc_mutex_unlock(&init_lock);
+       if (malloc_init_hard_recursible()) {
+               malloc_mutex_unlock(&init_lock);
+               return (true);
+       }
+
+       if (config_prof && prof_boot2()) {
+               malloc_mutex_unlock(&init_lock);
+               return (true);
+       }
+
+       if (malloc_init_hard_finish()) {
+               malloc_mutex_unlock(&init_lock);
+               return (true);
+       }
 
+       malloc_mutex_unlock(&init_lock);
+       malloc_tsd_boot1();
        return (false);
 }
 
@@ -868,35 +1404,37 @@ malloc_init_hard(void)
  */
 
 static void *
-imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
+imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind,
+    prof_tctx_t *tctx, bool slow_path)
 {
        void *p;
 
        if (tctx == NULL)
                return (NULL);
        if (usize <= SMALL_MAXCLASS) {
-               p = imalloc(tsd, LARGE_MINCLASS);
+               szind_t ind_large = size2index(LARGE_MINCLASS);
+               p = imalloc(tsd, LARGE_MINCLASS, ind_large, slow_path);
                if (p == NULL)
                        return (NULL);
                arena_prof_promoted(p, usize);
        } else
-               p = imalloc(tsd, usize);
+               p = imalloc(tsd, usize, ind, slow_path);
 
        return (p);
 }
 
 JEMALLOC_ALWAYS_INLINE_C void *
-imalloc_prof(tsd_t *tsd, size_t usize)
+imalloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool slow_path)
 {
        void *p;
        prof_tctx_t *tctx;
 
-       tctx = prof_alloc_prep(tsd, usize, true);
+       tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
        if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
-               p = imalloc_prof_sample(tsd, usize, tctx);
+               p = imalloc_prof_sample(tsd, usize, ind, tctx, slow_path);
        else
-               p = imalloc(tsd, usize);
-       if (p == NULL) {
+               p = imalloc(tsd, usize, ind, slow_path);
+       if (unlikely(p == NULL)) {
                prof_alloc_rollback(tsd, tctx, true);
                return (NULL);
        }
@@ -906,36 +1444,34 @@ imalloc_prof(tsd_t *tsd, size_t usize)
 }
 
 JEMALLOC_ALWAYS_INLINE_C void *
-imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
+imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path)
 {
+       szind_t ind;
 
-       if (unlikely(malloc_init()))
+       if (slow_path && unlikely(malloc_init()))
                return (NULL);
        *tsd = tsd_fetch();
+       ind = size2index(size);
+       if (unlikely(ind >= NSIZES))
+               return (NULL);
 
-       if (config_prof && opt_prof) {
-               *usize = s2u(size);
-               return (imalloc_prof(*tsd, *usize));
+       if (config_stats || (config_prof && opt_prof) || (slow_path &&
+           config_valgrind && unlikely(in_valgrind))) {
+               *usize = index2size(ind);
+               assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
        }
 
-       if (config_stats || (config_valgrind && unlikely(in_valgrind)))
-               *usize = s2u(size);
-       return (imalloc(*tsd, size));
+       if (config_prof && opt_prof)
+               return (imalloc_prof(*tsd, *usize, ind, slow_path));
+
+       return (imalloc(*tsd, size, ind, slow_path));
 }
 
-void *
-je_malloc(size_t size)
+JEMALLOC_ALWAYS_INLINE_C void
+imalloc_post_check(void *ret, tsd_t *tsd, size_t usize, bool slow_path)
 {
-       void *ret;
-       tsd_t *tsd;
-       size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
-       if (size == 0)
-               size = 1;
-
-       ret = imalloc_body(size, &tsd, &usize);
        if (unlikely(ret == NULL)) {
-               if (config_xmalloc && unlikely(opt_xmalloc)) {
+               if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
                        malloc_write("<jemalloc>: Error in malloc(): "
                            "out of memory\n");
                        abort();
@@ -946,8 +1482,34 @@ je_malloc(size_t size)
                assert(usize == isalloc(ret, config_prof));
                *tsd_thread_allocatedp_get(tsd) += usize;
        }
-       UTRACE(0, size, ret);
-       JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_malloc(size_t size)
+{
+       void *ret;
+       tsd_t *tsd;
+       size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+       if (size == 0)
+               size = 1;
+
+       if (likely(!malloc_slow)) {
+               /*
+                * imalloc_body() is inlined so that fast and slow paths are
+                * generated separately with statically known slow_path.
+                */
+               ret = imalloc_body(size, &tsd, &usize, false);
+               imalloc_post_check(ret, tsd, usize, false);
+       } else {
+               ret = imalloc_body(size, &tsd, &usize, true);
+               imalloc_post_check(ret, tsd, usize, true);
+               UTRACE(0, size, ret);
+               JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
+       }
+
        return (ret);
 }
 
@@ -961,7 +1523,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
                return (NULL);
        if (usize <= SMALL_MAXCLASS) {
                assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
-               p = imalloc(tsd, LARGE_MINCLASS);
+               p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
                if (p == NULL)
                        return (NULL);
                arena_prof_promoted(p, usize);
@@ -977,12 +1539,12 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
        void *p;
        prof_tctx_t *tctx;
 
-       tctx = prof_alloc_prep(tsd, usize, true);
+       tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
        if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
                p = imemalign_prof_sample(tsd, alignment, usize, tctx);
        else
                p = ipalloc(tsd, usize, alignment, false);
-       if (p == NULL) {
+       if (unlikely(p == NULL)) {
                prof_alloc_rollback(tsd, tctx, true);
                return (NULL);
        }
@@ -1005,38 +1567,38 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
        if (unlikely(malloc_init())) {
                result = NULL;
                goto label_oom;
-       } else {
-               tsd = tsd_fetch();
-               if (size == 0)
-                       size = 1;
-
-               /* Make sure that alignment is a large enough power of 2. */
-               if (unlikely(((alignment - 1) & alignment) != 0
-                   || (alignment < min_alignment))) {
-                       if (config_xmalloc && unlikely(opt_xmalloc)) {
-                               malloc_write("<jemalloc>: Error allocating "
-                                   "aligned memory: invalid alignment\n");
-                               abort();
-                       }
-                       result = NULL;
-                       ret = EINVAL;
-                       goto label_return;
-               }
+       }
+       tsd = tsd_fetch();
+       if (size == 0)
+               size = 1;
 
-               usize = sa2u(size, alignment);
-               if (unlikely(usize == 0)) {
-                       result = NULL;
-                       goto label_oom;
+       /* Make sure that alignment is a large enough power of 2. */
+       if (unlikely(((alignment - 1) & alignment) != 0
+           || (alignment < min_alignment))) {
+               if (config_xmalloc && unlikely(opt_xmalloc)) {
+                       malloc_write("<jemalloc>: Error allocating "
+                           "aligned memory: invalid alignment\n");
+                       abort();
                }
+               result = NULL;
+               ret = EINVAL;
+               goto label_return;
+       }
 
-               if (config_prof && opt_prof)
-                       result = imemalign_prof(tsd, alignment, usize);
-               else
-                       result = ipalloc(tsd, usize, alignment, false);
-               if (unlikely(result == NULL))
-                       goto label_oom;
+       usize = sa2u(size, alignment);
+       if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+               result = NULL;
+               goto label_oom;
        }
 
+       if (config_prof && opt_prof)
+               result = imemalign_prof(tsd, alignment, usize);
+       else
+               result = ipalloc(tsd, usize, alignment, false);
+       if (unlikely(result == NULL))
+               goto label_oom;
+       assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
+
        *memptr = result;
        ret = 0;
 label_return:
@@ -1057,7 +1619,8 @@ label_oom:
        goto label_return;
 }
 
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+JEMALLOC_ATTR(nonnull(1))
 je_posix_memalign(void **memptr, size_t alignment, size_t size)
 {
        int ret = imemalign(memptr, alignment, size, sizeof(void *));
@@ -1066,7 +1629,9 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size)
        return (ret);
 }
 
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
 je_aligned_alloc(size_t alignment, size_t size)
 {
        void *ret;
@@ -1082,35 +1647,36 @@ je_aligned_alloc(size_t alignment, size_t size)
 }
 
 static void *
-icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
+icalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, prof_tctx_t *tctx)
 {
        void *p;
 
        if (tctx == NULL)
                return (NULL);
        if (usize <= SMALL_MAXCLASS) {
-               p = icalloc(tsd, LARGE_MINCLASS);
+               szind_t ind_large = size2index(LARGE_MINCLASS);
+               p = icalloc(tsd, LARGE_MINCLASS, ind_large);
                if (p == NULL)
                        return (NULL);
                arena_prof_promoted(p, usize);
        } else
-               p = icalloc(tsd, usize);
+               p = icalloc(tsd, usize, ind);
 
        return (p);
 }
 
 JEMALLOC_ALWAYS_INLINE_C void *
-icalloc_prof(tsd_t *tsd, size_t usize)
+icalloc_prof(tsd_t *tsd, size_t usize, szind_t ind)
 {
        void *p;
        prof_tctx_t *tctx;
 
-       tctx = prof_alloc_prep(tsd, usize, true);
+       tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
        if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
-               p = icalloc_prof_sample(tsd, usize, tctx);
+               p = icalloc_prof_sample(tsd, usize, ind, tctx);
        else
-               p = icalloc(tsd, usize);
-       if (p == NULL) {
+               p = icalloc(tsd, usize, ind);
+       if (unlikely(p == NULL)) {
                prof_alloc_rollback(tsd, tctx, true);
                return (NULL);
        }
@@ -1119,12 +1685,15 @@ icalloc_prof(tsd_t *tsd, size_t usize)
        return (p);
 }
 
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
 je_calloc(size_t num, size_t size)
 {
        void *ret;
        tsd_t *tsd;
        size_t num_size;
+       szind_t ind;
        size_t usize JEMALLOC_CC_SILENCE_INIT(0);
 
        if (unlikely(malloc_init())) {
@@ -1154,13 +1723,18 @@ je_calloc(size_t num, size_t size)
                goto label_return;
        }
 
+       ind = size2index(num_size);
+       if (unlikely(ind >= NSIZES)) {
+               ret = NULL;
+               goto label_return;
+       }
        if (config_prof && opt_prof) {
-               usize = s2u(num_size);
-               ret = icalloc_prof(tsd, usize);
+               usize = index2size(ind);
+               ret = icalloc_prof(tsd, usize, ind);
        } else {
                if (config_stats || (config_valgrind && unlikely(in_valgrind)))
-                       usize = s2u(num_size);
-               ret = icalloc(tsd, num_size);
+                       usize = index2size(ind);
+               ret = icalloc(tsd, num_size, ind);
        }
 
 label_return:
@@ -1182,50 +1756,56 @@ label_return:
 }
 
 static void *
-irealloc_prof_sample(tsd_t *tsd, void *oldptr, size_t usize, prof_tctx_t *tctx)
+irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
+    prof_tctx_t *tctx)
 {
        void *p;
 
        if (tctx == NULL)
                return (NULL);
        if (usize <= SMALL_MAXCLASS) {
-               p = iralloc(tsd, oldptr, LARGE_MINCLASS, 0, false);
+               p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
                if (p == NULL)
                        return (NULL);
                arena_prof_promoted(p, usize);
        } else
-               p = iralloc(tsd, oldptr, usize, 0, false);
+               p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
 
        return (p);
 }
 
 JEMALLOC_ALWAYS_INLINE_C void *
-irealloc_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize)
+irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
 {
        void *p;
+       bool prof_active;
        prof_tctx_t *old_tctx, *tctx;
 
-       old_tctx = prof_tctx_get(oldptr);
-       tctx = prof_alloc_prep(tsd, usize, true);
+       prof_active = prof_active_get_unlocked();
+       old_tctx = prof_tctx_get(old_ptr);
+       tctx = prof_alloc_prep(tsd, usize, prof_active, true);
        if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
-               p = irealloc_prof_sample(tsd, oldptr, usize, tctx);
+               p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
        else
-               p = iralloc(tsd, oldptr, usize, 0, false);
-       if (p == NULL)
+               p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+       if (unlikely(p == NULL)) {
+               prof_alloc_rollback(tsd, tctx, true);
                return (NULL);
-       prof_realloc(tsd, p, usize, tctx, true, old_usize, old_tctx);
+       }
+       prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
+           old_tctx);
 
        return (p);
 }
 
 JEMALLOC_INLINE_C void
-ifree(tsd_t *tsd, void *ptr, bool try_tcache)
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
 {
        size_t usize;
        UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
 
        assert(ptr != NULL);
-       assert(malloc_initialized || IS_INITIALIZER);
+       assert(malloc_initialized() || IS_INITIALIZER);
 
        if (config_prof && opt_prof) {
                usize = isalloc(ptr, config_prof);
@@ -1234,19 +1814,24 @@ ifree(tsd_t *tsd, void *ptr, bool try_tcache)
                usize = isalloc(ptr, config_prof);
        if (config_stats)
                *tsd_thread_deallocatedp_get(tsd) += usize;
-       if (config_valgrind && unlikely(in_valgrind))
-               rzsize = p2rz(ptr);
-       iqalloc(tsd, ptr, try_tcache);
-       JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+
+       if (likely(!slow_path))
+               iqalloc(tsd, ptr, tcache, false);
+       else {
+               if (config_valgrind && unlikely(in_valgrind))
+                       rzsize = p2rz(ptr);
+               iqalloc(tsd, ptr, tcache, true);
+               JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+       }
 }
 
 JEMALLOC_INLINE_C void
-isfree(tsd_t *tsd, void *ptr, size_t usize, bool try_tcache)
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
 {
        UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
 
        assert(ptr != NULL);
-       assert(malloc_initialized || IS_INITIALIZER);
+       assert(malloc_initialized() || IS_INITIALIZER);
 
        if (config_prof && opt_prof)
                prof_free(tsd, ptr, usize);
@@ -1254,11 +1839,13 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, bool try_tcache)
                *tsd_thread_deallocatedp_get(tsd) += usize;
        if (config_valgrind && unlikely(in_valgrind))
                rzsize = p2rz(ptr);
-       isqalloc(tsd, ptr, usize, try_tcache);
+       isqalloc(tsd, ptr, usize, tcache);
        JEMALLOC_VALGRIND_FREE(ptr, rzsize);
 }
 
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
 je_realloc(void *ptr, size_t size)
 {
        void *ret;
@@ -1272,35 +1859,37 @@ je_realloc(void *ptr, size_t size)
                        /* realloc(ptr, 0) is equivalent to free(ptr). */
                        UTRACE(ptr, 0, 0);
                        tsd = tsd_fetch();
-                       ifree(tsd, ptr, true);
+                       ifree(tsd, ptr, tcache_get(tsd, false), true);
                        return (NULL);
                }
                size = 1;
        }
 
        if (likely(ptr != NULL)) {
-               assert(malloc_initialized || IS_INITIALIZER);
+               assert(malloc_initialized() || IS_INITIALIZER);
                malloc_thread_init();
                tsd = tsd_fetch();
 
-               if ((config_prof && opt_prof) || config_stats ||
-                   (config_valgrind && unlikely(in_valgrind)))
-                       old_usize = isalloc(ptr, config_prof);
+               old_usize = isalloc(ptr, config_prof);
                if (config_valgrind && unlikely(in_valgrind))
                        old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
 
                if (config_prof && opt_prof) {
                        usize = s2u(size);
-                       ret = irealloc_prof(tsd, ptr, old_usize, usize);
+                       ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
+                           NULL : irealloc_prof(tsd, ptr, old_usize, usize);
                } else {
                        if (config_stats || (config_valgrind &&
                            unlikely(in_valgrind)))
                                usize = s2u(size);
-                       ret = iralloc(tsd, ptr, size, 0, false);
+                       ret = iralloc(tsd, ptr, old_usize, size, 0, false);
                }
        } else {
                /* realloc(NULL, size) is equivalent to malloc(size). */
-               ret = imalloc_body(size, &tsd, &usize);
+               if (likely(!malloc_slow))
+                       ret = imalloc_body(size, &tsd, &usize, false);
+               else
+                       ret = imalloc_body(size, &tsd, &usize, true);
        }
 
        if (unlikely(ret == NULL)) {
@@ -1322,13 +1911,18 @@ je_realloc(void *ptr, size_t size)
        return (ret);
 }
 
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
 je_free(void *ptr)
 {
 
        UTRACE(ptr, 0, 0);
-       if (likely(ptr != NULL))
-               ifree(tsd_fetch(), ptr, true);
+       if (likely(ptr != NULL)) {
+               tsd_t *tsd = tsd_fetch();
+               if (likely(!malloc_slow))
+                       ifree(tsd, ptr, tcache_get(tsd, false), false);
+               else
+                       ifree(tsd, ptr, tcache_get(tsd, false), true);
+       }
 }
 
 /*
@@ -1340,22 +1934,28 @@ je_free(void *ptr)
  */
 
 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
 je_memalign(size_t alignment, size_t size)
 {
        void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
-       imemalign(&ret, alignment, size, 1);
+       if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
+               ret = NULL;
        JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
        return (ret);
 }
 #endif
 
 #ifdef JEMALLOC_OVERRIDE_VALLOC
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
 je_valloc(size_t size)
 {
        void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
-       imemalign(&ret, PAGE, size, 1);
+       if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
+               ret = NULL;
        JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
        return (ret);
 }
@@ -1396,9 +1996,9 @@ JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
  * Begin non-standard functions.
  */
 
-JEMALLOC_ALWAYS_INLINE_C void
-imallocx_flags_decode_hard(size_t size, int flags, size_t *usize,
-    size_t *alignment, bool *zero, bool *try_tcache, arena_t **arena)
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
+    size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
 {
 
        if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
@@ -1408,76 +2008,77 @@ imallocx_flags_decode_hard(size_t size, int flags, size_t *usize,
                *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
                *usize = sa2u(size, *alignment);
        }
+       if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
+               return (true);
        *zero = MALLOCX_ZERO_GET(flags);
+       if ((flags & MALLOCX_TCACHE_MASK) != 0) {
+               if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+                       *tcache = NULL;
+               else
+                       *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+       } else
+               *tcache = tcache_get(tsd, true);
        if ((flags & MALLOCX_ARENA_MASK) != 0) {
                unsigned arena_ind = MALLOCX_ARENA_GET(flags);
-               *try_tcache = false;
-               *arena = arenas[arena_ind];
-       } else {
-               *try_tcache = true;
+               *arena = arena_get(arena_ind, true);
+               if (unlikely(*arena == NULL))
+                       return (true);
+       } else
                *arena = NULL;
-       }
+       return (false);
 }
 
-JEMALLOC_ALWAYS_INLINE_C void
-imallocx_flags_decode(size_t size, int flags, size_t *usize, size_t *alignment,
-    bool *zero, bool *try_tcache, arena_t **arena)
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
+    size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
 {
 
        if (likely(flags == 0)) {
                *usize = s2u(size);
-               assert(usize != 0);
+               if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
+                       return (true);
                *alignment = 0;
                *zero = false;
-               *try_tcache = true;
+               *tcache = tcache_get(tsd, true);
                *arena = NULL;
+               return (false);
        } else {
-               imallocx_flags_decode_hard(size, flags, usize, alignment, zero,
-                   try_tcache, arena);
+               return (imallocx_flags_decode_hard(tsd, size, flags, usize,
+                   alignment, zero, tcache, arena));
        }
 }
 
 JEMALLOC_ALWAYS_INLINE_C void *
 imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
-    bool try_tcache, arena_t *arena)
-{
-
-       if (alignment != 0) {
-               return (ipalloct(tsd, usize, alignment, zero, try_tcache,
-                   arena));
-       }
-       if (zero)
-               return (icalloct(tsd, usize, try_tcache, arena));
-       return (imalloct(tsd, usize, try_tcache, arena));
-}
-
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_maybe_flags(tsd_t *tsd, size_t size, int flags, size_t usize,
-    size_t alignment, bool zero, bool try_tcache, arena_t *arena)
+    tcache_t *tcache, arena_t *arena)
 {
+       szind_t ind;
 
-       if (likely(flags == 0))
-               return (imalloc(tsd, size));
-       return (imallocx_flags(tsd, usize, alignment, zero, try_tcache, arena));
+       if (unlikely(alignment != 0))
+               return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
+       ind = size2index(usize);
+       assert(ind < NSIZES);
+       if (unlikely(zero))
+               return (icalloct(tsd, usize, ind, tcache, arena));
+       return (imalloct(tsd, usize, ind, tcache, arena));
 }
 
 static void *
-imallocx_prof_sample(tsd_t *tsd, size_t size, int flags, size_t usize,
-    size_t alignment, bool zero, bool try_tcache, arena_t *arena)
+imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+    tcache_t *tcache, arena_t *arena)
 {
        void *p;
 
        if (usize <= SMALL_MAXCLASS) {
                assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
                    sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
-               p = imalloct(tsd, LARGE_MINCLASS, try_tcache, arena);
+               p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
+                   arena);
                if (p == NULL)
                        return (NULL);
                arena_prof_promoted(p, usize);
-       } else {
-               p = imallocx_maybe_flags(tsd, size, flags, usize, alignment,
-                   zero, try_tcache, arena);
-       }
+       } else
+               p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
 
        return (p);
 }
@@ -1488,19 +2089,19 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
        void *p;
        size_t alignment;
        bool zero;
-       bool try_tcache;
+       tcache_t *tcache;
        arena_t *arena;
        prof_tctx_t *tctx;
 
-       imallocx_flags_decode(size, flags, usize, &alignment, &zero,
-           &try_tcache, &arena);
-       tctx = prof_alloc_prep(tsd, *usize, true);
-       if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
-               p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
-                   zero, try_tcache, arena);
-       else if ((uintptr_t)tctx > (uintptr_t)1U) {
-               p = imallocx_prof_sample(tsd, size, flags, *usize, alignment,
-                   zero, try_tcache, arena);
+       if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
+           &zero, &tcache, &arena)))
+               return (NULL);
+       tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
+       if (likely((uintptr_t)tctx == (uintptr_t)1U))
+               p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
+       else if ((uintptr_t)tctx > (uintptr_t)1U) {
+               p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
+                   arena);
        } else
                p = NULL;
        if (unlikely(p == NULL)) {
@@ -1509,30 +2110,42 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
        }
        prof_malloc(p, *usize, tctx);
 
+       assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
        return (p);
 }
 
 JEMALLOC_ALWAYS_INLINE_C void *
 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
 {
+       void *p;
        size_t alignment;
        bool zero;
-       bool try_tcache;
+       tcache_t *tcache;
        arena_t *arena;
 
        if (likely(flags == 0)) {
-               if (config_stats || (config_valgrind && unlikely(in_valgrind)))
-                       *usize = s2u(size);
-               return (imalloc(tsd, size));
+               szind_t ind = size2index(size);
+               if (unlikely(ind >= NSIZES))
+                       return (NULL);
+               if (config_stats || (config_valgrind &&
+                   unlikely(in_valgrind))) {
+                       *usize = index2size(ind);
+                       assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
+               }
+               return (imalloc(tsd, size, ind, true));
        }
 
-       imallocx_flags_decode_hard(size, flags, usize, &alignment, &zero,
-           &try_tcache, &arena);
-       return (imallocx_flags(tsd, *usize, alignment, zero, try_tcache,
-           arena));
+       if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
+           &alignment, &zero, &tcache, &arena)))
+               return (NULL);
+       p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
+       assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+       return (p);
 }
 
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
 je_mallocx(size_t size, int flags)
 {
        tsd_t *tsd;
@@ -1569,51 +2182,53 @@ label_oom:
 }
 
 static void *
-irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t size, size_t alignment,
-    size_t usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
-    arena_t *arena, prof_tctx_t *tctx)
+irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
+    size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
+    prof_tctx_t *tctx)
 {
        void *p;
 
        if (tctx == NULL)
                return (NULL);
        if (usize <= SMALL_MAXCLASS) {
-               p = iralloct(tsd, oldptr, LARGE_MINCLASS, alignment, zero,
-                   try_tcache_alloc, try_tcache_dalloc, arena);
+               p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
+                   zero, tcache, arena);
                if (p == NULL)
                        return (NULL);
                arena_prof_promoted(p, usize);
        } else {
-               p = iralloct(tsd, oldptr, size, alignment, zero,
-                   try_tcache_alloc, try_tcache_dalloc, arena);
+               p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
+                   tcache, arena);
        }
 
        return (p);
 }
 
 JEMALLOC_ALWAYS_INLINE_C void *
-irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
-    size_t alignment, size_t *usize, bool zero, bool try_tcache_alloc,
-    bool try_tcache_dalloc, arena_t *arena)
+irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
+    size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
+    arena_t *arena)
 {
        void *p;
+       bool prof_active;
        prof_tctx_t *old_tctx, *tctx;
 
-       old_tctx = prof_tctx_get(oldptr);
-       tctx = prof_alloc_prep(tsd, *usize, false);
+       prof_active = prof_active_get_unlocked();
+       old_tctx = prof_tctx_get(old_ptr);
+       tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
        if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
-               p = irallocx_prof_sample(tsd, oldptr, size, alignment, *usize,
-                   zero, try_tcache_alloc, try_tcache_dalloc, arena, tctx);
+               p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
+                   alignment, zero, tcache, arena, tctx);
        } else {
-               p = iralloct(tsd, oldptr, size, alignment, zero,
-                   try_tcache_alloc, try_tcache_dalloc, arena);
+               p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
+                   tcache, arena);
        }
        if (unlikely(p == NULL)) {
-               prof_alloc_rollback(tsd, tctx, false);
+               prof_alloc_rollback(tsd, tctx, true);
                return (NULL);
        }
 
-       if (p == oldptr && alignment != 0) {
+       if (p == old_ptr && alignment != 0) {
                /*
                 * The allocation did not move, so it is possible that the size
                 * class is smaller than would guarantee the requested
@@ -1624,65 +2239,70 @@ irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
                 */
                *usize = isalloc(p, config_prof);
        }
-       prof_realloc(tsd, p, *usize, tctx, false, old_usize, old_tctx);
+       prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
+           old_usize, old_tctx);
 
        return (p);
 }
 
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
 je_rallocx(void *ptr, size_t size, int flags)
 {
        void *p;
        tsd_t *tsd;
        size_t usize;
-       UNUSED size_t old_usize JEMALLOC_CC_SILENCE_INIT(0);
+       size_t old_usize;
        UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
        size_t alignment = MALLOCX_ALIGN_GET(flags);
        bool zero = flags & MALLOCX_ZERO;
-       bool try_tcache_alloc, try_tcache_dalloc;
        arena_t *arena;
+       tcache_t *tcache;
 
        assert(ptr != NULL);
        assert(size != 0);
-       assert(malloc_initialized || IS_INITIALIZER);
+       assert(malloc_initialized() || IS_INITIALIZER);
        malloc_thread_init();
        tsd = tsd_fetch();
 
        if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
                unsigned arena_ind = MALLOCX_ARENA_GET(flags);
-               arena_chunk_t *chunk;
-               try_tcache_alloc = false;
-               chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-               try_tcache_dalloc = (chunk == ptr || chunk->arena !=
-                   arenas[arena_ind]);
-               arena = arenas[arena_ind];
-       } else {
-               try_tcache_alloc = true;
-               try_tcache_dalloc = true;
+               arena = arena_get(arena_ind, true);
+               if (unlikely(arena == NULL))
+                       goto label_oom;
+       } else
                arena = NULL;
-       }
 
-       if ((config_prof && opt_prof) || config_stats ||
-           ((config_valgrind && unlikely(in_valgrind))))
-               old_usize = isalloc(ptr, config_prof);
+       if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+               if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+                       tcache = NULL;
+               else
+                       tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+       } else
+               tcache = tcache_get(tsd, true);
+
+       old_usize = isalloc(ptr, config_prof);
        if (config_valgrind && unlikely(in_valgrind))
                old_rzsize = u2rz(old_usize);
 
        if (config_prof && opt_prof) {
                usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
-               assert(usize != 0);
+               if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+                       goto label_oom;
                p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
-                   zero, try_tcache_alloc, try_tcache_dalloc, arena);
+                   zero, tcache, arena);
                if (unlikely(p == NULL))
                        goto label_oom;
        } else {
-               p = iralloct(tsd, ptr, size, alignment, zero, try_tcache_alloc,
-                   try_tcache_dalloc, arena);
+               p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
+                    tcache, arena);
                if (unlikely(p == NULL))
                        goto label_oom;
                if (config_stats || (config_valgrind && unlikely(in_valgrind)))
                        usize = isalloc(p, config_prof);
        }
+       assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
 
        if (config_stats) {
                *tsd_thread_allocatedp_get(tsd) += usize;
@@ -1702,12 +2322,12 @@ label_oom:
 }
 
 JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
-    size_t alignment, bool zero, arena_t *arena)
+ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+    size_t extra, size_t alignment, bool zero)
 {
        size_t usize;
 
-       if (ixalloc(ptr, size, extra, alignment, zero))
+       if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero))
                return (old_usize);
        usize = isalloc(ptr, config_prof);
 
@@ -1715,39 +2335,28 @@ ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
 }
 
 static size_t
-ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
-    size_t alignment, size_t max_usize, bool zero, arena_t *arena,
-    prof_tctx_t *tctx)
+ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+    size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
 {
        size_t usize;
 
        if (tctx == NULL)
                return (old_usize);
-       /* Use minimum usize to determine whether promotion may happen. */
-       if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <=
-           SMALL_MAXCLASS) {
-               if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
-                   size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
-                   alignment, zero))
-                       return (old_usize);
-               usize = isalloc(ptr, config_prof);
-               if (max_usize < PAGE)
-                       arena_prof_promoted(ptr, usize);
-       } else {
-               usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
-                   zero, arena);
-       }
+       usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment,
+           zero);
 
        return (usize);
 }
 
 JEMALLOC_ALWAYS_INLINE_C size_t
 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
-    size_t extra, size_t alignment, bool zero, arena_t *arena)
+    size_t extra, size_t alignment, bool zero)
 {
-       size_t max_usize, usize;
+       size_t usize_max, usize;
+       bool prof_active;
        prof_tctx_t *old_tctx, *tctx;
 
+       prof_active = prof_active_get_unlocked();
        old_tctx = prof_tctx_get(ptr);
        /*
         * usize isn't knowable before ixalloc() returns when extra is non-zero.
@@ -1755,26 +2364,41 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
         * prof_alloc_prep() to decide whether to capture a backtrace.
         * prof_realloc() will use the actual usize to decide whether to sample.
         */
-       max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
-           alignment);
-       tctx = prof_alloc_prep(tsd, max_usize, false);
+       if (alignment == 0) {
+               usize_max = s2u(size+extra);
+               assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
+       } else {
+               usize_max = sa2u(size+extra, alignment);
+               if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
+                       /*
+                        * usize_max is out of range, and chances are that
+                        * allocation will fail, but use the maximum possible
+                        * value and carry on with prof_alloc_prep(), just in
+                        * case allocation succeeds.
+                        */
+                       usize_max = HUGE_MAXCLASS;
+               }
+       }
+       tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
+
        if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
-               usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
-                   alignment, zero, max_usize, arena, tctx);
+               usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra,
+                   alignment, zero, tctx);
        } else {
-               usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
-                   zero, arena);
+               usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
+                   alignment, zero);
        }
-       if (unlikely(usize == old_usize)) {
+       if (usize == old_usize) {
                prof_alloc_rollback(tsd, tctx, false);
                return (usize);
        }
-       prof_realloc(tsd, ptr, usize, tctx, false, old_usize, old_tctx);
+       prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
+           old_tctx);
 
        return (usize);
 }
 
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
 {
        tsd_t *tsd;
@@ -1782,31 +2406,41 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
        UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
        size_t alignment = MALLOCX_ALIGN_GET(flags);
        bool zero = flags & MALLOCX_ZERO;
-       arena_t *arena;
 
        assert(ptr != NULL);
        assert(size != 0);
        assert(SIZE_T_MAX - size >= extra);
-       assert(malloc_initialized || IS_INITIALIZER);
+       assert(malloc_initialized() || IS_INITIALIZER);
        malloc_thread_init();
        tsd = tsd_fetch();
 
-       if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
-               unsigned arena_ind = MALLOCX_ARENA_GET(flags);
-               arena = arenas[arena_ind];
-       } else
-               arena = NULL;
-
        old_usize = isalloc(ptr, config_prof);
+
+       /*
+        * The API explicitly absolves itself of protecting against (size +
+        * extra) numerical overflow, but we may need to clamp extra to avoid
+        * exceeding HUGE_MAXCLASS.
+        *
+        * Ordinarily, size limit checking is handled deeper down, but here we
+        * have to check as part of (size + extra) clamping, since we need the
+        * clamped value in the above helper functions.
+        */
+       if (unlikely(size > HUGE_MAXCLASS)) {
+               usize = old_usize;
+               goto label_not_resized;
+       }
+       if (unlikely(HUGE_MAXCLASS - size < extra))
+               extra = HUGE_MAXCLASS - size;
+
        if (config_valgrind && unlikely(in_valgrind))
                old_rzsize = u2rz(old_usize);
 
        if (config_prof && opt_prof) {
                usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
-                   alignment, zero, arena);
+                   alignment, zero);
        } else {
-               usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
-                   zero, arena);
+               usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
+                   alignment, zero);
        }
        if (unlikely(usize == old_usize))
                goto label_not_resized;
@@ -1822,42 +2456,43 @@ label_not_resized:
        return (usize);
 }
 
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
 je_sallocx(const void *ptr, int flags)
 {
        size_t usize;
 
-       assert(malloc_initialized || IS_INITIALIZER);
+       assert(malloc_initialized() || IS_INITIALIZER);
        malloc_thread_init();
 
        if (config_ivsalloc)
                usize = ivsalloc(ptr, config_prof);
-       else {
-               assert(ptr != NULL);
+       else
                usize = isalloc(ptr, config_prof);
-       }
 
        return (usize);
 }
 
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
 je_dallocx(void *ptr, int flags)
 {
-       bool try_tcache;
+       tsd_t *tsd;
+       tcache_t *tcache;
 
        assert(ptr != NULL);
-       assert(malloc_initialized || IS_INITIALIZER);
+       assert(malloc_initialized() || IS_INITIALIZER);
 
-       if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
-               unsigned arena_ind = MALLOCX_ARENA_GET(flags);
-               arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-               try_tcache = (chunk == ptr || chunk->arena !=
-                   arenas[arena_ind]);
+       tsd = tsd_fetch();
+       if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+               if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+                       tcache = NULL;
+               else
+                       tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
        } else
-               try_tcache = true;
+               tcache = tcache_get(tsd, false);
 
        UTRACE(ptr, 0, 0);
-       ifree(tsd_fetch(), ptr, try_tcache);
+       ifree(tsd_fetch(), ptr, tcache, true);
 }
 
 JEMALLOC_ALWAYS_INLINE_C size_t
@@ -1869,46 +2504,53 @@ inallocx(size_t size, int flags)
                usize = s2u(size);
        else
                usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
-       assert(usize != 0);
        return (usize);
 }
 
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
 je_sdallocx(void *ptr, size_t size, int flags)
 {
-       bool try_tcache;
+       tsd_t *tsd;
+       tcache_t *tcache;
        size_t usize;
 
        assert(ptr != NULL);
-       assert(malloc_initialized || IS_INITIALIZER);
+       assert(malloc_initialized() || IS_INITIALIZER);
        usize = inallocx(size, flags);
        assert(usize == isalloc(ptr, config_prof));
 
-       if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
-               unsigned arena_ind = MALLOCX_ARENA_GET(flags);
-               arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-               try_tcache = (chunk == ptr || chunk->arena !=
-                   arenas[arena_ind]);
+       tsd = tsd_fetch();
+       if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+               if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+                       tcache = NULL;
+               else
+                       tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
        } else
-               try_tcache = true;
+               tcache = tcache_get(tsd, false);
 
        UTRACE(ptr, 0, 0);
-       isfree(tsd_fetch(), ptr, usize, try_tcache);
+       isfree(tsd, ptr, usize, tcache);
 }
 
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
 je_nallocx(size_t size, int flags)
 {
+       size_t usize;
 
        assert(size != 0);
 
        if (unlikely(malloc_init()))
                return (0);
 
-       return (inallocx(size, flags));
+       usize = inallocx(size, flags);
+       if (unlikely(usize > HUGE_MAXCLASS))
+               return (0);
+
+       return (usize);
 }
 
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
     size_t newlen)
 {
@@ -1919,7 +2561,7 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
        return (ctl_byname(name, oldp, oldlenp, newp, newlen));
 }
 
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
 {
 
@@ -1929,7 +2571,7 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
        return (ctl_nametomib(name, mibp, miblenp));
 }
 
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
   void *newp, size_t newlen)
 {
@@ -1940,7 +2582,7 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
        return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
 }
 
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
     const char *opts)
 {
@@ -1948,18 +2590,18 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
        stats_print(write_cb, cbopaque, opts);
 }
 
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
 {
        size_t ret;
 
-       assert(malloc_initialized || IS_INITIALIZER);
+       assert(malloc_initialized() || IS_INITIALIZER);
        malloc_thread_init();
 
        if (config_ivsalloc)
                ret = ivsalloc(ptr, config_prof);
        else
-               ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
+               ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
 
        return (ret);
 }
@@ -1982,9 +2624,9 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
  * fork/malloc races via the following functions it registers during
  * initialization using pthread_atfork(), but of course that does no good if
  * the allocator isn't fully initialized at fork time.  The following library
- * constructor is a partial solution to this problem.  It may still possible to
- * trigger the deadlock described above, but doing so would involve forking via
- * a library constructor that runs before jemalloc's runs.
+ * constructor is a partial solution to this problem.  It may still be possible
+ * to trigger the deadlock described above, but doing so would involve forking
+ * via a library constructor that runs before jemalloc's runs.
  */
 JEMALLOC_ATTR(constructor)
 static void
@@ -2002,25 +2644,26 @@ JEMALLOC_EXPORT void
 _malloc_prefork(void)
 #endif
 {
-       unsigned i;
+       unsigned i, narenas;
 
 #ifdef JEMALLOC_MUTEX_INIT_CB
-       if (!malloc_initialized)
+       if (!malloc_initialized())
                return;
 #endif
-       assert(malloc_initialized);
+       assert(malloc_initialized());
 
        /* Acquire all mutexes in a safe order. */
        ctl_prefork();
        prof_prefork();
        malloc_mutex_prefork(&arenas_lock);
-       for (i = 0; i < narenas_total; i++) {
-               if (arenas[i] != NULL)
-                       arena_prefork(arenas[i]);
+       for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+               arena_t *arena;
+
+               if ((arena = arena_get(i, false)) != NULL)
+                       arena_prefork(arena);
        }
        chunk_prefork();
        base_prefork();
-       huge_prefork();
 }
 
 #ifndef JEMALLOC_MUTEX_INIT_CB
@@ -2031,21 +2674,22 @@ JEMALLOC_EXPORT void
 _malloc_postfork(void)
 #endif
 {
-       unsigned i;
+       unsigned i, narenas;
 
 #ifdef JEMALLOC_MUTEX_INIT_CB
-       if (!malloc_initialized)
+       if (!malloc_initialized())
                return;
 #endif
-       assert(malloc_initialized);
+       assert(malloc_initialized());
 
        /* Release all mutexes, now that fork() has completed. */
-       huge_postfork_parent();
        base_postfork_parent();
        chunk_postfork_parent();
-       for (i = 0; i < narenas_total; i++) {
-               if (arenas[i] != NULL)
-                       arena_postfork_parent(arenas[i]);
+       for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+               arena_t *arena;
+
+               if ((arena = arena_get(i, false)) != NULL)
+                       arena_postfork_parent(arena);
        }
        malloc_mutex_postfork_parent(&arenas_lock);
        prof_postfork_parent();
@@ -2055,17 +2699,18 @@ _malloc_postfork(void)
 void
 jemalloc_postfork_child(void)
 {
-       unsigned i;
+       unsigned i, narenas;
 
-       assert(malloc_initialized);
+       assert(malloc_initialized());
 
        /* Release all mutexes, now that fork() has completed. */
-       huge_postfork_child();
        base_postfork_child();
        chunk_postfork_child();
-       for (i = 0; i < narenas_total; i++) {
-               if (arenas[i] != NULL)
-                       arena_postfork_child(arenas[i]);
+       for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+               arena_t *arena;
+
+               if ((arena = arena_get(i, false)) != NULL)
+                       arena_postfork_child(arena);
        }
        malloc_mutex_postfork_child(&arenas_lock);
        prof_postfork_child();
@@ -2073,55 +2718,3 @@ jemalloc_postfork_child(void)
 }
 
 /******************************************************************************/
-/*
- * The following functions are used for TLS allocation/deallocation in static
- * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
- * is that these avoid accessing TLS variables.
- */
-
-static void *
-a0alloc(size_t size, bool zero)
-{
-
-       if (unlikely(malloc_init()))
-               return (NULL);
-
-       if (size == 0)
-               size = 1;
-
-       if (size <= arena_maxclass)
-               return (arena_malloc(NULL, arenas[0], size, zero, false));
-       else
-               return (huge_malloc(NULL, arenas[0], size, zero));
-}
-
-void *
-a0malloc(size_t size)
-{
-
-       return (a0alloc(size, false));
-}
-
-void *
-a0calloc(size_t num, size_t size)
-{
-
-       return (a0alloc(num * size, true));
-}
-
-void
-a0free(void *ptr)
-{
-       arena_chunk_t *chunk;
-
-       if (ptr == NULL)
-               return;
-
-       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       if (chunk != ptr)
-               arena_dalloc(NULL, chunk, ptr, false);
-       else
-               huge_dalloc(ptr);
-}
-
-/******************************************************************************/
index 788eca387032fcd55e81caa56846bf290800f256..2d47af976c58b9208f0356e329150c6c0f09a7fa 100644 (file)
@@ -73,9 +73,13 @@ malloc_mutex_init(malloc_mutex_t *mutex)
 {
 
 #ifdef _WIN32
+#  if _WIN32_WINNT >= 0x0600
+       InitializeSRWLock(&mutex->lock);
+#  else
        if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
            _CRT_SPINCOUNT))
                return (true);
+#  endif
 #elif (defined(JEMALLOC_OSSPIN))
        mutex->lock = 0;
 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -83,8 +87,8 @@ malloc_mutex_init(malloc_mutex_t *mutex)
                mutex->postponed_next = postponed_mutexes;
                postponed_mutexes = mutex;
        } else {
-               if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
-                   0)
+               if (_pthread_mutex_init_calloc_cb(&mutex->lock,
+                   bootstrap_calloc) != 0)
                        return (true);
        }
 #else
@@ -140,7 +144,7 @@ mutex_boot(void)
        postpone_init = false;
        while (postponed_mutexes != NULL) {
                if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
-                   base_calloc) != 0)
+                   bootstrap_calloc) != 0)
                        return (true);
                postponed_mutexes = postponed_mutexes->postponed_next;
        }
diff --git a/src/jemalloc/src/nstime.c b/src/jemalloc/src/nstime.c
new file mode 100644 (file)
index 0000000..4cf90b5
--- /dev/null
@@ -0,0 +1,148 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#define        BILLION UINT64_C(1000000000)
+
+void
+nstime_init(nstime_t *time, uint64_t ns)
+{
+
+       time->ns = ns;
+}
+
+void
+nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
+{
+
+       time->ns = sec * BILLION + nsec;
+}
+
+uint64_t
+nstime_ns(const nstime_t *time)
+{
+
+       return (time->ns);
+}
+
+uint64_t
+nstime_sec(const nstime_t *time)
+{
+
+       return (time->ns / BILLION);
+}
+
+uint64_t
+nstime_nsec(const nstime_t *time)
+{
+
+       return (time->ns % BILLION);
+}
+
+void
+nstime_copy(nstime_t *time, const nstime_t *source)
+{
+
+       *time = *source;
+}
+
+int
+nstime_compare(const nstime_t *a, const nstime_t *b)
+{
+
+       return ((a->ns > b->ns) - (a->ns < b->ns));
+}
+
+void
+nstime_add(nstime_t *time, const nstime_t *addend)
+{
+
+       assert(UINT64_MAX - time->ns >= addend->ns);
+
+       time->ns += addend->ns;
+}
+
+void
+nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
+{
+
+       assert(nstime_compare(time, subtrahend) >= 0);
+
+       time->ns -= subtrahend->ns;
+}
+
+void
+nstime_imultiply(nstime_t *time, uint64_t multiplier)
+{
+
+       assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
+           2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
+
+       time->ns *= multiplier;
+}
+
+void
+nstime_idivide(nstime_t *time, uint64_t divisor)
+{
+
+       assert(divisor != 0);
+
+       time->ns /= divisor;
+}
+
+uint64_t
+nstime_divide(const nstime_t *time, const nstime_t *divisor)
+{
+
+       assert(divisor->ns != 0);
+
+       return (time->ns / divisor->ns);
+}
+
+#ifdef JEMALLOC_JET
+#undef nstime_update
+#define        nstime_update JEMALLOC_N(nstime_update_impl)
+#endif
+bool
+nstime_update(nstime_t *time)
+{
+       nstime_t old_time;
+
+       nstime_copy(&old_time, time);
+
+#ifdef _WIN32
+       {
+               FILETIME ft;
+               uint64_t ticks;
+               GetSystemTimeAsFileTime(&ft);
+               ticks = (((uint64_t)ft.dwHighDateTime) << 32) |
+                   ft.dwLowDateTime;
+               time->ns = ticks * 100;
+       }
+#elif JEMALLOC_CLOCK_GETTIME
+       {
+               struct timespec ts;
+
+               if (sysconf(_SC_MONOTONIC_CLOCK) > 0)
+                       clock_gettime(CLOCK_MONOTONIC, &ts);
+               else
+                       clock_gettime(CLOCK_REALTIME, &ts);
+               time->ns = ts.tv_sec * BILLION + ts.tv_nsec;
+       }
+#else
+       struct timeval tv;
+       gettimeofday(&tv, NULL);
+       time->ns = tv.tv_sec * BILLION + tv.tv_usec * 1000;
+#endif
+
+       /* Handle non-monotonic clocks. */
+       if (unlikely(nstime_compare(&old_time, time) > 0)) {
+               nstime_copy(time, &old_time);
+               return (true);
+       }
+
+       return (false);
+}
+#ifdef JEMALLOC_JET
+#undef nstime_update
+#define        nstime_update JEMALLOC_N(nstime_update)
+nstime_update_t *nstime_update = JEMALLOC_N(nstime_update_impl);
+#endif
diff --git a/src/jemalloc/src/pages.c b/src/jemalloc/src/pages.c
new file mode 100644 (file)
index 0000000..83a167f
--- /dev/null
@@ -0,0 +1,173 @@
+#define        JEMALLOC_PAGES_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+void *
+pages_map(void *addr, size_t size)
+{
+       void *ret;
+
+       assert(size != 0);
+
+#ifdef _WIN32
+       /*
+        * If VirtualAlloc can't allocate at the given address when one is
+        * given, it fails and returns NULL.
+        */
+       ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
+           PAGE_READWRITE);
+#else
+       /*
+        * We don't use MAP_FIXED here, because it can cause the *replacement*
+        * of existing mappings, and we only want to create new mappings.
+        */
+       ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+           -1, 0);
+       assert(ret != NULL);
+
+       if (ret == MAP_FAILED)
+               ret = NULL;
+       else if (addr != NULL && ret != addr) {
+               /*
+                * We succeeded in mapping memory, but not in the right place.
+                */
+               pages_unmap(ret, size);
+               ret = NULL;
+       }
+#endif
+       assert(ret == NULL || (addr == NULL && ret != addr)
+           || (addr != NULL && ret == addr));
+       return (ret);
+}
+
+void
+pages_unmap(void *addr, size_t size)
+{
+
+#ifdef _WIN32
+       if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
+#else
+       if (munmap(addr, size) == -1)
+#endif
+       {
+               char buf[BUFERROR_BUF];
+
+               buferror(get_errno(), buf, sizeof(buf));
+               malloc_printf("<jemalloc>: Error in "
+#ifdef _WIN32
+                             "VirtualFree"
+#else
+                             "munmap"
+#endif
+                             "(): %s\n", buf);
+               if (opt_abort)
+                       abort();
+       }
+}
+
+void *
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
+{
+       void *ret = (void *)((uintptr_t)addr + leadsize);
+
+       assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+       {
+               void *new_addr;
+
+               pages_unmap(addr, alloc_size);
+               new_addr = pages_map(ret, size);
+               if (new_addr == ret)
+                       return (ret);
+               if (new_addr)
+                       pages_unmap(new_addr, size);
+               return (NULL);
+       }
+#else
+       {
+               size_t trailsize = alloc_size - leadsize - size;
+
+               if (leadsize != 0)
+                       pages_unmap(addr, leadsize);
+               if (trailsize != 0)
+                       pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+               return (ret);
+       }
+#endif
+}
+
+static bool
+pages_commit_impl(void *addr, size_t size, bool commit)
+{
+
+#ifndef _WIN32
+       /*
+        * The following decommit/commit implementation is functional, but
+        * always disabled because it doesn't add value beyong improved
+        * debugging (at the cost of extra system calls) on systems that
+        * overcommit.
+        */
+       if (false) {
+               int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
+               void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
+                   MAP_FIXED, -1, 0);
+               if (result == MAP_FAILED)
+                       return (true);
+               if (result != addr) {
+                       /*
+                        * We succeeded in mapping memory, but not in the right
+                        * place.
+                        */
+                       pages_unmap(result, size);
+                       return (true);
+               }
+               return (false);
+       }
+#endif
+       return (true);
+}
+
+bool
+pages_commit(void *addr, size_t size)
+{
+
+       return (pages_commit_impl(addr, size, true));
+}
+
+bool
+pages_decommit(void *addr, size_t size)
+{
+
+       return (pages_commit_impl(addr, size, false));
+}
+
+bool
+pages_purge(void *addr, size_t size)
+{
+       bool unzeroed;
+
+#ifdef _WIN32
+       VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
+       unzeroed = true;
+#elif defined(JEMALLOC_HAVE_MADVISE)
+#  ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+#    define JEMALLOC_MADV_PURGE MADV_DONTNEED
+#    define JEMALLOC_MADV_ZEROS true
+#  elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+#    define JEMALLOC_MADV_PURGE MADV_FREE
+#    define JEMALLOC_MADV_ZEROS false
+#  else
+#    error "No madvise(2) flag defined for purging unused dirty pages."
+#  endif
+       int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
+       unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
+#  undef JEMALLOC_MADV_PURGE
+#  undef JEMALLOC_MADV_ZEROS
+#else
+       /* Last resort no-op. */
+       unzeroed = true;
+#endif
+       return (unzeroed);
+}
+
diff --git a/src/jemalloc/src/prng.c b/src/jemalloc/src/prng.c
new file mode 100644 (file)
index 0000000..76646a2
--- /dev/null
@@ -0,0 +1,2 @@
+#define        JEMALLOC_PRNG_C_
+#include "jemalloc/internal/jemalloc_internal.h"
index a6cea92fd833ee20e43545b558500da3d55b522c..b38722770d9a565a6466b1d131ea8f279ea721fa 100644 (file)
@@ -20,7 +20,7 @@ bool          opt_prof_thread_active_init = true;
 size_t         opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
 ssize_t                opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
 bool           opt_prof_gdump = false;
-bool           opt_prof_final = true;
+bool           opt_prof_final = false;
 bool           opt_prof_leak = false;
 bool           opt_prof_accum = false;
 char           opt_prof_prefix[
@@ -44,6 +44,13 @@ static malloc_mutex_t        prof_active_mtx;
 static bool            prof_thread_active_init;
 static malloc_mutex_t  prof_thread_active_init_mtx;
 
+/*
+ * Initialized as opt_prof_gdump, and accessed via
+ * prof_gdump_[gs]et{_unlocked,}().
+ */
+bool                   prof_gdump_val;
+static malloc_mutex_t  prof_gdump_mtx;
+
 uint64_t       prof_interval = 0;
 
 size_t         lg_prof_sample;
@@ -102,7 +109,7 @@ static char         prof_dump_buf[
     1
 #endif
 ];
-static unsigned                prof_dump_buf_end;
+static size_t          prof_dump_buf_end;
 static int             prof_dump_fd;
 
 /* Do not dump any profiles until bootstrapping is complete. */
@@ -128,10 +135,22 @@ static char       *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
 JEMALLOC_INLINE_C int
 prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
 {
-       uint64_t a_uid = a->tdata->thr_uid;
-       uint64_t b_uid = b->tdata->thr_uid;
-
-       return ((a_uid > b_uid) - (a_uid < b_uid));
+       uint64_t a_thr_uid = a->thr_uid;
+       uint64_t b_thr_uid = b->thr_uid;
+       int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+       if (ret == 0) {
+               uint64_t a_thr_discrim = a->thr_discrim;
+               uint64_t b_thr_discrim = b->thr_discrim;
+               ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+                   b_thr_discrim);
+               if (ret == 0) {
+                       uint64_t a_tctx_uid = a->tctx_uid;
+                       uint64_t b_tctx_uid = b->tctx_uid;
+                       ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+                           b_tctx_uid);
+               }
+       }
+       return (ret);
 }
 
 rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
@@ -190,7 +209,7 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
                 */
                tdata = prof_tdata_get(tsd, true);
                if (tdata != NULL)
-                       prof_sample_threshold_update(tctx->tdata);
+                       prof_sample_threshold_update(tdata);
        }
 
        if ((uintptr_t)tctx > (uintptr_t)1U) {
@@ -204,8 +223,10 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
 }
 
 void
-prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) {
-       prof_tctx_set(ptr, tctx);
+prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
+{
+
+       prof_tctx_set(ptr, usize, tctx);
 
        malloc_mutex_lock(tctx->tdata->lock);
        tctx->cnts.curobjs++;
@@ -244,38 +265,45 @@ bt_init(prof_bt_t *bt, void **vec)
        bt->len = 0;
 }
 
-static inline void
-prof_enter(prof_tdata_t *tdata)
+JEMALLOC_INLINE_C void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
 {
 
        cassert(config_prof);
+       assert(tdata == prof_tdata_get(tsd, false));
 
-       assert(!tdata->enq);
-       tdata->enq = true;
+       if (tdata != NULL) {
+               assert(!tdata->enq);
+               tdata->enq = true;
+       }
 
        malloc_mutex_lock(&bt2gctx_mtx);
 }
 
-static inline void
-prof_leave(prof_tdata_t *tdata)
+JEMALLOC_INLINE_C void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
 {
-       bool idump, gdump;
 
        cassert(config_prof);
+       assert(tdata == prof_tdata_get(tsd, false));
 
        malloc_mutex_unlock(&bt2gctx_mtx);
 
-       assert(tdata->enq);
-       tdata->enq = false;
-       idump = tdata->enq_idump;
-       tdata->enq_idump = false;
-       gdump = tdata->enq_gdump;
-       tdata->enq_gdump = false;
+       if (tdata != NULL) {
+               bool idump, gdump;
+
+               assert(tdata->enq);
+               tdata->enq = false;
+               idump = tdata->enq_idump;
+               tdata->enq_idump = false;
+               gdump = tdata->enq_gdump;
+               tdata->enq_gdump = false;
 
-       if (idump)
-               prof_idump();
-       if (gdump)
-               prof_gdump();
+               if (idump)
+                       prof_idump();
+               if (gdump)
+                       prof_gdump();
+       }
 }
 
 #ifdef JEMALLOC_PROF_LIBUNWIND
@@ -523,8 +551,9 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
        /*
         * Create a single allocation that has space for vec of length bt->len.
         */
-       prof_gctx_t *gctx = (prof_gctx_t *)imalloc(tsd, offsetof(prof_gctx_t,
-           vec) + (bt->len * sizeof(void *)));
+       size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
+       prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, size,
+           size2index(size), false, tcache_get(tsd, true), true, NULL, true);
        if (gctx == NULL)
                return (NULL);
        gctx->lock = prof_gctx_mutex_choose();
@@ -542,7 +571,8 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
 }
 
 static void
-prof_gctx_try_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata)
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
+    prof_tdata_t *tdata)
 {
 
        cassert(config_prof);
@@ -554,17 +584,17 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata)
         * avoid a race between the main body of prof_tctx_destroy() and entry
         * into this function.
         */
-       prof_enter(tdata);
+       prof_enter(tsd, tdata_self);
        malloc_mutex_lock(gctx->lock);
        assert(gctx->nlimbo != 0);
        if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
                /* Remove gctx from bt2gctx. */
                if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
                        not_reached();
-               prof_leave(tdata);
+               prof_leave(tsd, tdata_self);
                /* Destroy gctx. */
                malloc_mutex_unlock(gctx->lock);
-               idalloc(tsd, gctx);
+               idalloctm(tsd, gctx, tcache_get(tsd, false), true, true);
        } else {
                /*
                 * Compensate for increment in prof_tctx_destroy() or
@@ -572,7 +602,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata)
                 */
                gctx->nlimbo--;
                malloc_mutex_unlock(gctx->lock);
-               prof_leave(tdata);
+               prof_leave(tsd, tdata_self);
        }
 }
 
@@ -609,7 +639,7 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
 {
        prof_tdata_t *tdata = tctx->tdata;
        prof_gctx_t *gctx = tctx->gctx;
-       bool destroy_tdata, destroy_gctx;
+       bool destroy_tdata, destroy_tctx, destroy_gctx;
 
        assert(tctx->cnts.curobjs == 0);
        assert(tctx->cnts.curbytes == 0);
@@ -622,33 +652,56 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
        malloc_mutex_unlock(tdata->lock);
 
        malloc_mutex_lock(gctx->lock);
-       tctx_tree_remove(&gctx->tctxs, tctx);
-       if (prof_gctx_should_destroy(gctx)) {
+       switch (tctx->state) {
+       case prof_tctx_state_nominal:
+               tctx_tree_remove(&gctx->tctxs, tctx);
+               destroy_tctx = true;
+               if (prof_gctx_should_destroy(gctx)) {
+                       /*
+                        * Increment gctx->nlimbo in order to keep another
+                        * thread from winning the race to destroy gctx while
+                        * this one has gctx->lock dropped.  Without this, it
+                        * would be possible for another thread to:
+                        *
+                        * 1) Sample an allocation associated with gctx.
+                        * 2) Deallocate the sampled object.
+                        * 3) Successfully prof_gctx_try_destroy(gctx).
+                        *
+                        * The result would be that gctx no longer exists by the
+                        * time this thread accesses it in
+                        * prof_gctx_try_destroy().
+                        */
+                       gctx->nlimbo++;
+                       destroy_gctx = true;
+               } else
+                       destroy_gctx = false;
+               break;
+       case prof_tctx_state_dumping:
                /*
-                * Increment gctx->nlimbo in order to keep another thread from
-                * winning the race to destroy gctx while this one has
-                * gctx->lock dropped.  Without this, it would be possible for
-                * another thread to:
-                *
-                * 1) Sample an allocation associated with gctx.
-                * 2) Deallocate the sampled object.
-                * 3) Successfully prof_gctx_try_destroy(gctx).
-                *
-                * The result would be that gctx no longer exists by the time
-                * this thread accesses it in prof_gctx_try_destroy().
+                * A dumping thread needs tctx to remain valid until dumping
+                * has finished.  Change state such that the dumping thread will
+                * complete destruction during a late dump iteration phase.
                 */
-               gctx->nlimbo++;
-               destroy_gctx = true;
-       } else
+               tctx->state = prof_tctx_state_purgatory;
+               destroy_tctx = false;
                destroy_gctx = false;
+               break;
+       default:
+               not_reached();
+               destroy_tctx = false;
+               destroy_gctx = false;
+       }
        malloc_mutex_unlock(gctx->lock);
-       if (destroy_gctx)
-               prof_gctx_try_destroy(tsd, gctx, tdata);
+       if (destroy_gctx) {
+               prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
+                   tdata);
+       }
 
        if (destroy_tdata)
                prof_tdata_destroy(tsd, tdata, false);
 
-       idalloc(tsd, tctx);
+       if (destroy_tctx)
+               idalloctm(tsd, tctx, tcache_get(tsd, false), true, true);
 }
 
 static bool
@@ -665,19 +718,20 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
        } btkey;
        bool new_gctx;
 
-       prof_enter(tdata);
+       prof_enter(tsd, tdata);
        if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
                /* bt has never been seen before.  Insert it. */
                gctx.p = prof_gctx_create(tsd, bt);
                if (gctx.v == NULL) {
-                       prof_leave(tdata);
+                       prof_leave(tsd, tdata);
                        return (true);
                }
                btkey.p = &gctx.p->bt;
                if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
                        /* OOM. */
-                       prof_leave(tdata);
-                       idalloc(tsd, gctx.v);
+                       prof_leave(tsd, tdata);
+                       idalloctm(tsd, gctx.v, tcache_get(tsd, false), true,
+                           true);
                        return (true);
                }
                new_gctx = true;
@@ -691,7 +745,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
                malloc_mutex_unlock(gctx.p->lock);
                new_gctx = false;
        }
-       prof_leave(tdata);
+       prof_leave(tsd, tdata);
 
        *p_btkey = btkey.v;
        *p_gctx = gctx.p;
@@ -721,6 +775,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
                ret.p->prepared = true;
        malloc_mutex_unlock(tdata->lock);
        if (not_found) {
+               tcache_t *tcache;
                void *btkey;
                prof_gctx_t *gctx;
                bool new_gctx, error;
@@ -734,15 +789,21 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
                        return (NULL);
 
                /* Link a prof_tctx_t into gctx for this thread. */
-               ret.v = imalloc(tsd, sizeof(prof_tctx_t));
+               tcache = tcache_get(tsd, true);
+               ret.v = iallocztm(tsd, sizeof(prof_tctx_t),
+                   size2index(sizeof(prof_tctx_t)), false, tcache, true, NULL,
+                   true);
                if (ret.p == NULL) {
                        if (new_gctx)
-                               prof_gctx_try_destroy(tsd, gctx, tdata);
+                               prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
                        return (NULL);
                }
                ret.p->tdata = tdata;
+               ret.p->thr_uid = tdata->thr_uid;
+               ret.p->thr_discrim = tdata->thr_discrim;
                memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
                ret.p->gctx = gctx;
+               ret.p->tctx_uid = tdata->tctx_uid_next++;
                ret.p->prepared = true;
                ret.p->state = prof_tctx_state_initializing;
                malloc_mutex_lock(tdata->lock);
@@ -750,8 +811,8 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
                malloc_mutex_unlock(tdata->lock);
                if (error) {
                        if (new_gctx)
-                               prof_gctx_try_destroy(tsd, gctx, tdata);
-                       idalloc(tsd, ret.v);
+                               prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+                       idalloctm(tsd, ret.v, tcache, true, true);
                        return (NULL);
                }
                malloc_mutex_lock(gctx->lock);
@@ -810,8 +871,7 @@ prof_sample_threshold_update(prof_tdata_t *tdata)
         *   pp 500
         *   (http://luc.devroye.org/rnbookindex.html)
         */
-       prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
-           UINT64_C(1442695040888963407));
+       r = prng_lg_range(&tdata->prng_state, 53);
        u = (double)r * (1.0/9007199254740992.0L);
        tdata->bytes_until_sample = (uint64_t)(log(u) /
            log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
@@ -857,9 +917,9 @@ prof_bt_count(void)
        if (tdata == NULL)
                return (0);
 
-       prof_enter(tdata);
+       malloc_mutex_lock(&bt2gctx_mtx);
        bt_count = ckh_count(&bt2gctx);
-       prof_leave(tdata);
+       malloc_mutex_unlock(&bt2gctx_mtx);
 
        return (bt_count);
 }
@@ -929,7 +989,7 @@ prof_dump_close(bool propagate_err)
 static bool
 prof_dump_write(bool propagate_err, const char *s)
 {
-       unsigned i, slen, n;
+       size_t i, slen, n;
 
        cassert(config_prof);
 
@@ -956,7 +1016,7 @@ prof_dump_write(bool propagate_err, const char *s)
        return (false);
 }
 
-JEMALLOC_ATTR(format(printf, 2, 3))
+JEMALLOC_FORMAT_PRINTF(2, 3)
 static bool
 prof_dump_printf(bool propagate_err, const char *format, ...)
 {
@@ -978,21 +1038,29 @@ prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
 {
 
        malloc_mutex_lock(tctx->gctx->lock);
-       if (tctx->state == prof_tctx_state_initializing) {
+
+       switch (tctx->state) {
+       case prof_tctx_state_initializing:
                malloc_mutex_unlock(tctx->gctx->lock);
                return;
-       }
-       assert(tctx->state == prof_tctx_state_nominal);
-       tctx->state = prof_tctx_state_dumping;
-       malloc_mutex_unlock(tctx->gctx->lock);
+       case prof_tctx_state_nominal:
+               tctx->state = prof_tctx_state_dumping;
+               malloc_mutex_unlock(tctx->gctx->lock);
 
-       memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
+               memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
 
-       tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
-       tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
-       if (opt_prof_accum) {
-               tdata->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
-               tdata->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
+               tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+               tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+               if (opt_prof_accum) {
+                       tdata->cnt_summed.accumobjs +=
+                           tctx->dump_cnts.accumobjs;
+                       tdata->cnt_summed.accumbytes +=
+                           tctx->dump_cnts.accumbytes;
+               }
+               break;
+       case prof_tctx_state_dumping:
+       case prof_tctx_state_purgatory:
+               not_reached();
        }
 }
 
@@ -1035,12 +1103,23 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
 {
        bool propagate_err = *(bool *)arg;
 
-       if (prof_dump_printf(propagate_err,
-           "  t%"PRIu64": %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]\n",
-           tctx->tdata->thr_uid, tctx->dump_cnts.curobjs,
-           tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
-           tctx->dump_cnts.accumbytes))
-               return (tctx);
+       switch (tctx->state) {
+       case prof_tctx_state_initializing:
+       case prof_tctx_state_nominal:
+               /* Not captured by this dump. */
+               break;
+       case prof_tctx_state_dumping:
+       case prof_tctx_state_purgatory:
+               if (prof_dump_printf(propagate_err,
+                   "  t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
+                   "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
+                   tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
+                   tctx->dump_cnts.accumbytes))
+                       return (tctx);
+               break;
+       default:
+               not_reached();
+       }
        return (NULL);
 }
 
@@ -1132,7 +1211,8 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
                                            to_destroy);
                                        tctx_tree_remove(&gctx->tctxs,
                                            to_destroy);
-                                       idalloc(tsd, to_destroy);
+                                       idalloctm(tsd, to_destroy,
+                                           tcache_get(tsd, false), true, true);
                                } else
                                        next = NULL;
                        } while (next != NULL);
@@ -1141,7 +1221,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
                if (prof_gctx_should_destroy(gctx)) {
                        gctx->nlimbo++;
                        malloc_mutex_unlock(gctx->lock);
-                       prof_gctx_try_destroy(tsd, gctx, tdata);
+                       prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
                } else
                        malloc_mutex_unlock(gctx->lock);
        }
@@ -1188,7 +1268,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
                return (NULL);
 
        if (prof_dump_printf(propagate_err,
-           "  t%"PRIu64": %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]%s%s\n",
+           "  t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
            tdata->thr_uid, tdata->cnt_summed.curobjs,
            tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
            tdata->cnt_summed.accumbytes,
@@ -1208,8 +1288,8 @@ prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
        bool ret;
 
        if (prof_dump_printf(propagate_err,
-           "heap_v2/%"PRIu64"\n"
-           "  t*: %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]\n",
+           "heap_v2/%"FMTu64"\n"
+           "  t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
            ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
            cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
                return (true);
@@ -1252,7 +1332,7 @@ prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
                goto label_return;
        }
        for (i = 0; i < bt->len; i++) {
-               if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
+               if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
                    (uintptr_t)bt->vec[i])) {
                        ret = true;
                        goto label_return;
@@ -1261,7 +1341,7 @@ prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
 
        if (prof_dump_printf(propagate_err,
            "\n"
-           "  t*: %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]\n",
+           "  t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
            gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
            gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
                ret = true;
@@ -1279,21 +1359,55 @@ label_return:
        return (ret);
 }
 
+#ifndef _WIN32
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps(const char *format, ...)
+{
+       int mfd;
+       va_list ap;
+       char filename[PATH_MAX + 1];
+
+       va_start(ap, format);
+       malloc_vsnprintf(filename, sizeof(filename), format, ap);
+       va_end(ap);
+       mfd = open(filename, O_RDONLY);
+
+       return (mfd);
+}
+#endif
+
+static int
+prof_getpid(void)
+{
+
+#ifdef _WIN32
+       return (GetCurrentProcessId());
+#else
+       return (getpid());
+#endif
+}
+
 static bool
 prof_dump_maps(bool propagate_err)
 {
        bool ret;
        int mfd;
-       char filename[PATH_MAX + 1];
 
        cassert(config_prof);
 #ifdef __FreeBSD__
-       malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
+       mfd = prof_open_maps("/proc/curproc/map");
+#elif defined(_WIN32)
+       mfd = -1; // Not implemented
 #else
-       malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
-           (int)getpid());
+       {
+               int pid = prof_getpid();
+
+               mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
+               if (mfd == -1)
+                       mfd = prof_open_maps("/proc/%d/maps", pid);
+       }
 #endif
-       mfd = open(filename, O_RDONLY);
        if (mfd != -1) {
                ssize_t nread;
 
@@ -1334,13 +1448,13 @@ prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
 {
 
        if (cnt_all->curbytes != 0) {
-               malloc_printf("<jemalloc>: Leak summary: %"PRIu64" byte%s, %"
-                   PRIu64" object%s, %zu context%s\n",
+               malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
+                   FMTu64" object%s, %zu context%s\n",
                    cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
                    cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
                    leak_ngctx, (leak_ngctx != 1) ? "s" : "");
                malloc_printf(
-                   "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+                   "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
                    filename);
        }
 }
@@ -1384,7 +1498,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
                return (true);
 
        malloc_mutex_lock(&prof_dump_mtx);
-       prof_enter(tdata);
+       prof_enter(tsd, tdata);
 
        /*
         * Put gctx's in limbo and clear their counters in preparation for
@@ -1407,7 +1521,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
        leak_ngctx = 0;
        gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
 
-       prof_leave(tdata);
+       prof_leave(tsd, tdata);
 
        /* Create dump file. */
        if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
@@ -1455,13 +1569,13 @@ prof_dump_filename(char *filename, char v, uint64_t vseq)
        if (vseq != VSEQ_INVALID) {
                /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
                malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
-                   "%s.%d.%"PRIu64".%c%"PRIu64".heap",
-                   opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
+                   "%s.%d.%"FMTu64".%c%"FMTu64".heap",
+                   opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
        } else {
                /* "<prefix>.<pid>.<seq>.<v>.heap" */
                malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
-                   "%s.%d.%"PRIu64".%c.heap",
-                   opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
+                   "%s.%d.%"FMTu64".%c.heap",
+                   opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
        }
        prof_dump_seq++;
 }
@@ -1473,17 +1587,17 @@ prof_fdump(void)
        char filename[DUMP_FILENAME_BUFSIZE];
 
        cassert(config_prof);
+       assert(opt_prof_final);
+       assert(opt_prof_prefix[0] != '\0');
 
        if (!prof_booted)
                return;
        tsd = tsd_fetch();
 
-       if (opt_prof_final && opt_prof_prefix[0] != '\0') {
-               malloc_mutex_lock(&prof_dump_seq_mtx);
-               prof_dump_filename(filename, 'f', VSEQ_INVALID);
-               malloc_mutex_unlock(&prof_dump_seq_mtx);
-               prof_dump(tsd, false, filename, opt_prof_leak);
-       }
+       malloc_mutex_lock(&prof_dump_seq_mtx);
+       prof_dump_filename(filename, 'f', VSEQ_INVALID);
+       malloc_mutex_unlock(&prof_dump_seq_mtx);
+       prof_dump(tsd, false, filename, opt_prof_leak);
 }
 
 void
@@ -1491,7 +1605,6 @@ prof_idump(void)
 {
        tsd_t *tsd;
        prof_tdata_t *tdata;
-       char filename[PATH_MAX + 1];
 
        cassert(config_prof);
 
@@ -1507,6 +1620,7 @@ prof_idump(void)
        }
 
        if (opt_prof_prefix[0] != '\0') {
+               char filename[PATH_MAX + 1];
                malloc_mutex_lock(&prof_dump_seq_mtx);
                prof_dump_filename(filename, 'i', prof_dump_iseq);
                prof_dump_iseq++;
@@ -1545,7 +1659,6 @@ prof_gdump(void)
 {
        tsd_t *tsd;
        prof_tdata_t *tdata;
-       char filename[DUMP_FILENAME_BUFSIZE];
 
        cassert(config_prof);
 
@@ -1561,6 +1674,7 @@ prof_gdump(void)
        }
 
        if (opt_prof_prefix[0] != '\0') {
+               char filename[DUMP_FILENAME_BUFSIZE];
                malloc_mutex_lock(&prof_dump_seq_mtx);
                prof_dump_filename(filename, 'u', prof_dump_useq);
                prof_dump_useq++;
@@ -1610,11 +1724,14 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
     char *thread_name, bool active)
 {
        prof_tdata_t *tdata;
+       tcache_t *tcache;
 
        cassert(config_prof);
 
        /* Initialize an empty cache for this thread. */
-       tdata = (prof_tdata_t *)imalloc(tsd, sizeof(prof_tdata_t));
+       tcache = tcache_get(tsd, true);
+       tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t),
+           size2index(sizeof(prof_tdata_t)), false, tcache, true, NULL, true);
        if (tdata == NULL)
                return (NULL);
 
@@ -1624,10 +1741,11 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
        tdata->thread_name = thread_name;
        tdata->attached = true;
        tdata->expired = false;
+       tdata->tctx_uid_next = 0;
 
        if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
            prof_bt_hash, prof_bt_keycomp)) {
-               idalloc(tsd, tdata);
+               idalloctm(tsd, tdata, tcache, true, true);
                return (NULL);
        }
 
@@ -1673,16 +1791,18 @@ static void
 prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
     bool even_if_attached)
 {
+       tcache_t *tcache;
 
        assert(prof_tdata_should_destroy(tdata, even_if_attached));
        assert(tsd_prof_tdata_get(tsd) != tdata);
 
        tdata_tree_remove(&tdatas, tdata);
 
+       tcache = tcache_get(tsd, false);
        if (tdata->thread_name != NULL)
-               idalloc(tsd, tdata->thread_name);
+               idalloctm(tsd, tdata->thread_name, tcache, true, true);
        ckh_delete(tsd, &tdata->bt2tctx);
-       idalloc(tsd, tdata);
+       idalloctm(tsd, tdata, tcache, true, true);
 }
 
 static void
@@ -1843,7 +1963,8 @@ prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
        if (size == 1)
                return ("");
 
-       ret = imalloc(tsd, size);
+       ret = iallocztm(tsd, size, size2index(size), false, tcache_get(tsd,
+           true), true, NULL, true);
        if (ret == NULL)
                return (NULL);
        memcpy(ret, thread_name, size);
@@ -1875,7 +1996,8 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
                return (EAGAIN);
 
        if (tdata->thread_name != NULL) {
-               idalloc(tsd, tdata->thread_name);
+               idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
+                   true, true);
                tdata->thread_name = NULL;
        }
        if (strlen(s) > 0)
@@ -1933,6 +2055,29 @@ prof_thread_active_init_set(bool active_init)
        return (active_init_old);
 }
 
+bool
+prof_gdump_get(void)
+{
+       bool prof_gdump_current;
+
+       malloc_mutex_lock(&prof_gdump_mtx);
+       prof_gdump_current = prof_gdump_val;
+       malloc_mutex_unlock(&prof_gdump_mtx);
+       return (prof_gdump_current);
+}
+
+bool
+prof_gdump_set(bool gdump)
+{
+       bool prof_gdump_old;
+
+       malloc_mutex_lock(&prof_gdump_mtx);
+       prof_gdump_old = prof_gdump_val;
+       prof_gdump_val = gdump;
+       malloc_mutex_unlock(&prof_gdump_mtx);
+       return (prof_gdump_old);
+}
+
 void
 prof_boot0(void)
 {
@@ -1985,6 +2130,10 @@ prof_boot2(void)
                if (malloc_mutex_init(&prof_active_mtx))
                        return (true);
 
+               prof_gdump_val = opt_prof_gdump;
+               if (malloc_mutex_init(&prof_gdump_mtx))
+                       return (true);
+
                prof_thread_active_init = opt_prof_thread_active_init;
                if (malloc_mutex_init(&prof_thread_active_init_mtx))
                        return (true);
@@ -2009,7 +2158,8 @@ prof_boot2(void)
                if (malloc_mutex_init(&prof_dump_mtx))
                        return (true);
 
-               if (atexit(prof_fdump) != 0) {
+               if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
+                   atexit(prof_fdump) != 0) {
                        malloc_write("<jemalloc>: Error in atexit()\n");
                        if (opt_abort)
                                abort();
index 1301b4793aee1291d63e6852451ed0431cb20531..ff8801cb7c7cc1ef63fededfb081030bbbc12ff2 100644 (file)
@@ -2,7 +2,7 @@
 #include "jemalloc/internal/jemalloc_internal.h"
 
 /*
- * quarantine pointers close to NULL are used to encode state information that
+ * Quarantine pointers close to NULL are used to encode state information that
  * is used for cleaning up during thread shutdown.
  */
 #define        QUARANTINE_STATE_REINCARNATED   ((quarantine_t *)(uintptr_t)1)
@@ -19,13 +19,18 @@ static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
 
 /******************************************************************************/
 
-quarantine_t *
+static quarantine_t *
 quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
 {
        quarantine_t *quarantine;
+       size_t size;
+
+       assert(tsd_nominal(tsd));
 
-       quarantine = (quarantine_t *)imalloc(tsd, offsetof(quarantine_t, objs) +
-           ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
+       size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
+           sizeof(quarantine_obj_t));
+       quarantine = (quarantine_t *)iallocztm(tsd, size, size2index(size),
+           false, tcache_get(tsd, true), true, NULL, true);
        if (quarantine == NULL)
                return (NULL);
        quarantine->curbytes = 0;
@@ -36,6 +41,25 @@ quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
        return (quarantine);
 }
 
+void
+quarantine_alloc_hook_work(tsd_t *tsd)
+{
+       quarantine_t *quarantine;
+
+       if (!tsd_nominal(tsd))
+               return;
+
+       quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
+       /*
+        * Check again whether quarantine has been initialized, because
+        * quarantine_init() may have triggered recursive initialization.
+        */
+       if (tsd_quarantine_get(tsd) == NULL)
+               tsd_quarantine_set(tsd, quarantine);
+       else
+               idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true);
+}
+
 static quarantine_t *
 quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
 {
@@ -65,8 +89,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
                memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
                    sizeof(quarantine_obj_t));
        }
-       idalloc(tsd, quarantine);
+       idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true);
 
+       tsd_quarantine_set(tsd, ret);
        return (ret);
 }
 
@@ -75,7 +100,7 @@ quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
 {
        quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
        assert(obj->usize == isalloc(obj->ptr, config_prof));
-       idalloc(tsd, obj->ptr);
+       idalloctm(tsd, obj->ptr, NULL, false, true);
        quarantine->curbytes -= obj->usize;
        quarantine->curobjs--;
        quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
@@ -100,7 +125,7 @@ quarantine(tsd_t *tsd, void *ptr)
        assert(opt_quarantine);
 
        if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
-               idalloc(tsd, ptr);
+               idalloctm(tsd, ptr, NULL, false, true);
                return;
        }
        /*
@@ -126,7 +151,7 @@ quarantine(tsd_t *tsd, void *ptr)
                obj->usize = usize;
                quarantine->curbytes += usize;
                quarantine->curobjs++;
-               if (config_fill && unlikely(opt_junk)) {
+               if (config_fill && unlikely(opt_junk_free)) {
                        /*
                         * Only do redzone validation if Valgrind isn't in
                         * operation.
@@ -139,7 +164,7 @@ quarantine(tsd_t *tsd, void *ptr)
                }
        } else {
                assert(quarantine->curbytes == 0);
-               idalloc(tsd, ptr);
+               idalloctm(tsd, ptr, NULL, false, true);
        }
 }
 
@@ -154,7 +179,7 @@ quarantine_cleanup(tsd_t *tsd)
        quarantine = tsd_quarantine_get(tsd);
        if (quarantine != NULL) {
                quarantine_drain(tsd, quarantine, 0);
-               idalloc(tsd, quarantine);
+               idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true);
                tsd_quarantine_set(tsd, NULL);
        }
 }
index 2ff93dbe75ce12bf1e1f2543ce07b7ac22e9b479..af0d97e753018d2da0dba40a73bc06e4ffee4499 100644 (file)
@@ -1,75 +1,74 @@
 #define        JEMALLOC_RTREE_C_
 #include "jemalloc/internal/jemalloc_internal.h"
 
-rtree_t *
-rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
+static unsigned
+hmin(unsigned ha, unsigned hb)
 {
-       rtree_t *ret;
-       unsigned bits_per_level, bits_in_leaf, height, i;
+
+       return (ha < hb ? ha : hb);
+}
+
+/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
+bool
+rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
+    rtree_node_dalloc_t *dalloc)
+{
+       unsigned bits_in_leaf, height, i;
 
        assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
 
-       bits_per_level = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void
-           *)))) - 1;
-       bits_in_leaf = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE /
-           sizeof(uint8_t)))) - 1;
+       bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
+           : (bits % RTREE_BITS_PER_LEVEL);
        if (bits > bits_in_leaf) {
-               height = 1 + (bits - bits_in_leaf) / bits_per_level;
-               if ((height-1) * bits_per_level + bits_in_leaf != bits)
+               height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
+               if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
                        height++;
-       } else {
+       } else
                height = 1;
+       assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
+
+       rtree->alloc = alloc;
+       rtree->dalloc = dalloc;
+       rtree->height = height;
+
+       /* Root level. */
+       rtree->levels[0].subtree = NULL;
+       rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
+           bits_in_leaf;
+       rtree->levels[0].cumbits = rtree->levels[0].bits;
+       /* Interior levels. */
+       for (i = 1; i < height-1; i++) {
+               rtree->levels[i].subtree = NULL;
+               rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
+               rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
+                   RTREE_BITS_PER_LEVEL;
        }
-       assert((height-1) * bits_per_level + bits_in_leaf >= bits);
-
-       ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
-           (sizeof(unsigned) * height));
-       if (ret == NULL)
-               return (NULL);
-       memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
-           height));
-
-       ret->alloc = alloc;
-       ret->dalloc = dalloc;
-       if (malloc_mutex_init(&ret->mutex)) {
-               if (dalloc != NULL)
-                       dalloc(ret);
-               return (NULL);
-       }
-       ret->height = height;
+       /* Leaf level. */
        if (height > 1) {
-               if ((height-1) * bits_per_level + bits_in_leaf > bits) {
-                       ret->level2bits[0] = (bits - bits_in_leaf) %
-                           bits_per_level;
-               } else
-                       ret->level2bits[0] = bits_per_level;
-               for (i = 1; i < height-1; i++)
-                       ret->level2bits[i] = bits_per_level;
-               ret->level2bits[height-1] = bits_in_leaf;
-       } else
-               ret->level2bits[0] = bits;
+               rtree->levels[height-1].subtree = NULL;
+               rtree->levels[height-1].bits = bits_in_leaf;
+               rtree->levels[height-1].cumbits = bits;
+       }
 
-       ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
-       if (ret->root == NULL) {
-               if (dalloc != NULL)
-                       dalloc(ret);
-               return (NULL);
+       /* Compute lookup table to be used by rtree_start_level(). */
+       for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
+               rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
+                   1);
        }
-       memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
 
-       return (ret);
+       return (false);
 }
 
 static void
-rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
+rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
 {
 
-       if (level < rtree->height - 1) {
+       if (level + 1 < rtree->height) {
                size_t nchildren, i;
 
-               nchildren = ZU(1) << rtree->level2bits[level];
+               nchildren = ZU(1) << rtree->levels[level].bits;
                for (i = 0; i < nchildren; i++) {
-                       void **child = (void **)node[i];
+                       rtree_node_elm_t *child = node[i].child;
                        if (child != NULL)
                                rtree_delete_subtree(rtree, child, level + 1);
                }
@@ -80,28 +79,49 @@ rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
 void
 rtree_delete(rtree_t *rtree)
 {
+       unsigned i;
 
-       rtree_delete_subtree(rtree, rtree->root, 0);
-       rtree->dalloc(rtree);
+       for (i = 0; i < rtree->height; i++) {
+               rtree_node_elm_t *subtree = rtree->levels[i].subtree;
+               if (subtree != NULL)
+                       rtree_delete_subtree(rtree, subtree, i);
+       }
 }
 
-void
-rtree_prefork(rtree_t *rtree)
+static rtree_node_elm_t *
+rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
 {
+       rtree_node_elm_t *node;
+
+       if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
+               /*
+                * Another thread is already in the process of initializing.
+                * Spin-wait until initialization is complete.
+                */
+               do {
+                       CPU_SPINWAIT;
+                       node = atomic_read_p((void **)elmp);
+               } while (node == RTREE_NODE_INITIALIZING);
+       } else {
+               node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
+               if (node == NULL)
+                       return (NULL);
+               atomic_write_p((void **)elmp, node);
+       }
 
-       malloc_mutex_prefork(&rtree->mutex);
+       return (node);
 }
 
-void
-rtree_postfork_parent(rtree_t *rtree)
+rtree_node_elm_t *
+rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
 {
 
-       malloc_mutex_postfork_parent(&rtree->mutex);
+       return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
 }
 
-void
-rtree_postfork_child(rtree_t *rtree)
+rtree_node_elm_t *
+rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
 {
 
-       malloc_mutex_postfork_child(&rtree->mutex);
+       return (rtree_node_init(rtree, level, &elm->child));
 }
index 5c3d701753519c753013bcaa26ab9541a37336b7..a724947938c0599f2393a91bcc7322df33790d05 100644 (file)
@@ -6,31 +6,22 @@
        xmallctl(n, v, &sz, NULL, 0);                                   \
 } while (0)
 
-#define        CTL_I_GET(n, v, t) do {                                         \
+#define        CTL_M2_GET(n, i, v, t) do {                                     \
        size_t mib[6];                                                  \
        size_t miblen = sizeof(mib) / sizeof(size_t);                   \
        size_t sz = sizeof(t);                                          \
        xmallctlnametomib(n, mib, &miblen);                             \
-       mib[2] = i;                                                     \
+       mib[2] = (i);                                                   \
        xmallctlbymib(mib, miblen, v, &sz, NULL, 0);                    \
 } while (0)
 
-#define        CTL_J_GET(n, v, t) do {                                         \
+#define        CTL_M2_M4_GET(n, i, j, v, t) do {                               \
        size_t mib[6];                                                  \
        size_t miblen = sizeof(mib) / sizeof(size_t);                   \
        size_t sz = sizeof(t);                                          \
        xmallctlnametomib(n, mib, &miblen);                             \
-       mib[2] = j;                                                     \
-       xmallctlbymib(mib, miblen, v, &sz, NULL, 0);                    \
-} while (0)
-
-#define        CTL_IJ_GET(n, v, t) do {                                        \
-       size_t mib[6];                                                  \
-       size_t miblen = sizeof(mib) / sizeof(size_t);                   \
-       size_t sz = sizeof(t);                                          \
-       xmallctlnametomib(n, mib, &miblen);                             \
-       mib[2] = i;                                                     \
-       mib[4] = j;                                                     \
+       mib[2] = (i);                                                   \
+       mib[4] = (j);                                                   \
        xmallctlbymib(mib, miblen, v, &sz, NULL, 0);                    \
 } while (0)
 
@@ -48,8 +39,10 @@ static void  stats_arena_bins_print(void (*write_cb)(void *, const char *),
     void *cbopaque, unsigned i);
 static void    stats_arena_lruns_print(void (*write_cb)(void *, const char *),
     void *cbopaque, unsigned i);
+static void    stats_arena_hchunks_print(
+    void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
 static void    stats_arena_print(void (*write_cb)(void *, const char *),
-    void *cbopaque, unsigned i, bool bins, bool large);
+    void *cbopaque, unsigned i, bool bins, bool large, bool huge);
 
 /******************************************************************************/
 
@@ -58,100 +51,109 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
     unsigned i)
 {
        size_t page;
-       bool config_tcache;
-       unsigned nbins, j, gap_start;
+       bool config_tcache, in_gap;
+       unsigned nbins, j;
 
        CTL_GET("arenas.page", &page, size_t);
 
        CTL_GET("config.tcache", &config_tcache, bool);
        if (config_tcache) {
                malloc_cprintf(write_cb, cbopaque,
-                   "bins:     bin  size regs pgs    allocated      nmalloc"
-                   "      ndalloc    nrequests       nfills     nflushes"
-                   "      newruns       reruns      curruns\n");
+                   "bins:           size ind    allocated      nmalloc"
+                   "      ndalloc    nrequests      curregs      curruns regs"
+                   " pgs  util       nfills     nflushes      newruns"
+                   "       reruns\n");
        } else {
                malloc_cprintf(write_cb, cbopaque,
-                   "bins:     bin  size regs pgs    allocated      nmalloc"
-                   "      ndalloc      newruns       reruns      curruns\n");
+                   "bins:           size ind    allocated      nmalloc"
+                   "      ndalloc    nrequests      curregs      curruns regs"
+                   " pgs  util      newruns       reruns\n");
        }
        CTL_GET("arenas.nbins", &nbins, unsigned);
-       for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
+       for (j = 0, in_gap = false; j < nbins; j++) {
                uint64_t nruns;
 
-               CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
-               if (nruns == 0) {
-                       if (gap_start == UINT_MAX)
-                               gap_start = j;
-               } else {
-                       size_t reg_size, run_size, allocated;
+               CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
+                   uint64_t);
+               if (nruns == 0)
+                       in_gap = true;
+               else {
+                       size_t reg_size, run_size, curregs, availregs, milli;
+                       size_t curruns;
                        uint32_t nregs;
                        uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
                        uint64_t reruns;
-                       size_t curruns;
+                       char util[6]; /* "x.yyy". */
 
-                       if (gap_start != UINT_MAX) {
-                               if (j > gap_start + 1) {
-                                       /* Gap of more than one size class. */
-                                       malloc_cprintf(write_cb, cbopaque,
-                                           "[%u..%u]\n", gap_start,
-                                           j - 1);
-                               } else {
-                                       /* Gap of one size class. */
-                                       malloc_cprintf(write_cb, cbopaque,
-                                           "[%u]\n", gap_start);
-                               }
-                               gap_start = UINT_MAX;
+                       if (in_gap) {
+                               malloc_cprintf(write_cb, cbopaque,
+                                   "                     ---\n");
+                               in_gap = false;
                        }
-                       CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
-                       CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
-                       CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
-                       CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
-                           &allocated, size_t);
-                       CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
+                       CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
+                       CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
+                       CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
+                           size_t);
+                       CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
                            &nmalloc, uint64_t);
-                       CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
+                       CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j,
                            &ndalloc, uint64_t);
+                       CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j,
+                           &curregs, size_t);
+                       CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
+                           &nrequests, uint64_t);
                        if (config_tcache) {
-                               CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
-                                   &nrequests, uint64_t);
-                               CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
-                                   &nfills, uint64_t);
-                               CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
-                                   &nflushes, uint64_t);
+                               CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i,
+                                   j, &nfills, uint64_t);
+                               CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
+                                   i, j, &nflushes, uint64_t);
                        }
-                       CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
-                           uint64_t);
-                       CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
-                           size_t);
+                       CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
+                           &reruns, uint64_t);
+                       CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
+                           &curruns, size_t);
+
+                       availregs = nregs * curruns;
+                       milli = (availregs != 0) ? (1000 * curregs) / availregs
+                           : 1000;
+                       assert(milli <= 1000);
+                       if (milli < 10) {
+                               malloc_snprintf(util, sizeof(util),
+                                   "0.00%zu", milli);
+                       } else if (milli < 100) {
+                               malloc_snprintf(util, sizeof(util), "0.0%zu",
+                                   milli);
+                       } else if (milli < 1000) {
+                               malloc_snprintf(util, sizeof(util), "0.%zu",
+                                   milli);
+                       } else
+                               malloc_snprintf(util, sizeof(util), "1");
+
                        if (config_tcache) {
                                malloc_cprintf(write_cb, cbopaque,
-                                   "%13u %5zu %4u %3zu %12zu %12"PRIu64
-                                   " %12"PRIu64" %12"PRIu64" %12"PRIu64
-                                   " %12"PRIu64" %12"PRIu64" %12"PRIu64
-                                   " %12zu\n",
-                                   j, reg_size, nregs, run_size / page,
-                                   allocated, nmalloc, ndalloc, nrequests,
-                                   nfills, nflushes, nruns, reruns, curruns);
+                                   "%20zu %3u %12zu %12"FMTu64
+                                   " %12"FMTu64" %12"FMTu64" %12zu"
+                                   " %12zu %4u %3zu %-5s %12"FMTu64
+                                   " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
+                                   reg_size, j, curregs * reg_size, nmalloc,
+                                   ndalloc, nrequests, curregs, curruns, nregs,
+                                   run_size / page, util, nfills, nflushes,
+                                   nruns, reruns);
                        } else {
                                malloc_cprintf(write_cb, cbopaque,
-                                   "%13u %5zu %4u %3zu %12zu %12"PRIu64
-                                   " %12"PRIu64" %12"PRIu64" %12"PRIu64
-                                   " %12zu\n",
-                                   j, reg_size, nregs, run_size / page,
-                                   allocated, nmalloc, ndalloc, nruns, reruns,
-                                   curruns);
+                                   "%20zu %3u %12zu %12"FMTu64
+                                   " %12"FMTu64" %12"FMTu64" %12zu"
+                                   " %12zu %4u %3zu %-5s %12"FMTu64
+                                   " %12"FMTu64"\n",
+                                   reg_size, j, curregs * reg_size, nmalloc,
+                                   ndalloc, nrequests, curregs, curruns, nregs,
+                                   run_size / page, util, nruns, reruns);
                        }
                }
        }
-       if (gap_start != UINT_MAX) {
-               if (j > gap_start + 1) {
-                       /* Gap of more than one size class. */
-                       malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
-                           gap_start, j - 1);
-               } else {
-                       /* Gap of one size class. */
-                       malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
-               }
+       if (in_gap) {
+               malloc_cprintf(write_cb, cbopaque,
+                   "                     ---\n");
        }
 }
 
@@ -159,55 +161,106 @@ static void
 stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
     unsigned i)
 {
-       size_t page, nlruns, j;
-       ssize_t gap_start;
-
-       CTL_GET("arenas.page", &page, size_t);
+       unsigned nbins, nlruns, j;
+       bool in_gap;
 
        malloc_cprintf(write_cb, cbopaque,
-           "large:   size pages      nmalloc      ndalloc    nrequests"
-           "      curruns\n");
-       CTL_GET("arenas.nlruns", &nlruns, size_t);
-       for (j = 0, gap_start = -1; j < nlruns; j++) {
+           "large:          size ind    allocated      nmalloc      ndalloc"
+           "    nrequests      curruns\n");
+       CTL_GET("arenas.nbins", &nbins, unsigned);
+       CTL_GET("arenas.nlruns", &nlruns, unsigned);
+       for (j = 0, in_gap = false; j < nlruns; j++) {
                uint64_t nmalloc, ndalloc, nrequests;
                size_t run_size, curruns;
 
-               CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
-                   uint64_t);
-               CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
+               CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
                    uint64_t);
-               CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
+               CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
                    uint64_t);
-               if (nrequests == 0) {
-                       if (gap_start == -1)
-                               gap_start = j;
-               } else {
-                       CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
-                       CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
+               CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
+                   &nrequests, uint64_t);
+               if (nrequests == 0)
+                       in_gap = true;
+               else {
+                       CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
+                       CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
+                           &curruns, size_t);
+                       if (in_gap) {
+                               malloc_cprintf(write_cb, cbopaque,
+                                   "                     ---\n");
+                               in_gap = false;
+                       }
+                       malloc_cprintf(write_cb, cbopaque,
+                           "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+                           " %12"FMTu64" %12zu\n",
+                           run_size, nbins + j, curruns * run_size, nmalloc,
+                           ndalloc, nrequests, curruns);
+               }
+       }
+       if (in_gap) {
+               malloc_cprintf(write_cb, cbopaque,
+                   "                     ---\n");
+       }
+}
+
+static void
+stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
+    void *cbopaque, unsigned i)
+{
+       unsigned nbins, nlruns, nhchunks, j;
+       bool in_gap;
+
+       malloc_cprintf(write_cb, cbopaque,
+           "huge:           size ind    allocated      nmalloc      ndalloc"
+           "    nrequests   curhchunks\n");
+       CTL_GET("arenas.nbins", &nbins, unsigned);
+       CTL_GET("arenas.nlruns", &nlruns, unsigned);
+       CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
+       for (j = 0, in_gap = false; j < nhchunks; j++) {
+               uint64_t nmalloc, ndalloc, nrequests;
+               size_t hchunk_size, curhchunks;
+
+               CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j,
+                   &nmalloc, uint64_t);
+               CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j,
+                   &ndalloc, uint64_t);
+               CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
+                   &nrequests, uint64_t);
+               if (nrequests == 0)
+                       in_gap = true;
+               else {
+                       CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
                            size_t);
-                       if (gap_start != -1) {
-                               malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
-                                   j - gap_start);
-                               gap_start = -1;
+                       CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
+                           j, &curhchunks, size_t);
+                       if (in_gap) {
+                               malloc_cprintf(write_cb, cbopaque,
+                                   "                     ---\n");
+                               in_gap = false;
                        }
                        malloc_cprintf(write_cb, cbopaque,
-                           "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
-                           " %12zu\n",
-                           run_size, run_size / page, nmalloc, ndalloc,
-                           nrequests, curruns);
+                           "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+                           " %12"FMTu64" %12zu\n",
+                           hchunk_size, nbins + nlruns + j,
+                           curhchunks * hchunk_size, nmalloc, ndalloc,
+                           nrequests, curhchunks);
                }
        }
-       if (gap_start != -1)
-               malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
+       if (in_gap) {
+               malloc_cprintf(write_cb, cbopaque,
+                   "                     ---\n");
+       }
 }
 
 static void
 stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
-    unsigned i, bool bins, bool large)
+    unsigned i, bool bins, bool large, bool huge)
 {
        unsigned nthreads;
        const char *dss;
+       ssize_t lg_dirty_mult, decay_time;
        size_t page, pactive, pdirty, mapped;
+       size_t metadata_mapped, metadata_allocated;
        uint64_t npurge, nmadvise, purged;
        size_t small_allocated;
        uint64_t small_nmalloc, small_ndalloc, small_nrequests;
@@ -218,60 +271,98 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
 
        CTL_GET("arenas.page", &page, size_t);
 
-       CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
+       CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
        malloc_cprintf(write_cb, cbopaque,
            "assigned threads: %u\n", nthreads);
-       CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
+       CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
        malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
            dss);
-       CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
-       CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
-       CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
-       CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
-       CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
+       CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
+       if (opt_purge == purge_mode_ratio) {
+               if (lg_dirty_mult >= 0) {
+                       malloc_cprintf(write_cb, cbopaque,
+                           "min active:dirty page ratio: %u:1\n",
+                           (1U << lg_dirty_mult));
+               } else {
+                       malloc_cprintf(write_cb, cbopaque,
+                           "min active:dirty page ratio: N/A\n");
+               }
+       }
+       CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
+       if (opt_purge == purge_mode_decay) {
+               if (decay_time >= 0) {
+                       malloc_cprintf(write_cb, cbopaque, "decay time: %zd\n",
+                           decay_time);
+               } else
+                       malloc_cprintf(write_cb, cbopaque, "decay time: N/A\n");
+       }
+       CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
+       CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
+       CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
+       CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
+       CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
        malloc_cprintf(write_cb, cbopaque,
-           "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
-           " %"PRIu64" madvise%s, %"PRIu64" purged\n",
-           pactive, pdirty, npurge, npurge == 1 ? "" : "s",
-           nmadvise, nmadvise == 1 ? "" : "s", purged);
+           "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64", "
+           "purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
 
        malloc_cprintf(write_cb, cbopaque,
-           "            allocated      nmalloc      ndalloc    nrequests\n");
-       CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
-       CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
-       CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
-       CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
+           "                            allocated      nmalloc      ndalloc"
+           "    nrequests\n");
+       CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
+           size_t);
+       CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
+       CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
+       CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
+           uint64_t);
        malloc_cprintf(write_cb, cbopaque,
-           "small:   %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+           "small:                   %12zu %12"FMTu64" %12"FMTu64
+           " %12"FMTu64"\n",
            small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
-       CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
-       CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
-       CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
-       CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
+       CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
+           size_t);
+       CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
+       CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
+       CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
+           uint64_t);
        malloc_cprintf(write_cb, cbopaque,
-           "large:   %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+           "large:                   %12zu %12"FMTu64" %12"FMTu64
+           " %12"FMTu64"\n",
            large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
-       CTL_I_GET("stats.arenas.0.huge.allocated", &huge_allocated, size_t);
-       CTL_I_GET("stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t);
-       CTL_I_GET("stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t);
-       CTL_I_GET("stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t);
+       CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
+       CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
+       CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
+       CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
+           uint64_t);
        malloc_cprintf(write_cb, cbopaque,
-           "huge:    %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+           "huge:                    %12zu %12"FMTu64" %12"FMTu64
+           " %12"FMTu64"\n",
            huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
        malloc_cprintf(write_cb, cbopaque,
-           "total:   %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+           "total:                   %12zu %12"FMTu64" %12"FMTu64
+           " %12"FMTu64"\n",
            small_allocated + large_allocated + huge_allocated,
            small_nmalloc + large_nmalloc + huge_nmalloc,
            small_ndalloc + large_ndalloc + huge_ndalloc,
            small_nrequests + large_nrequests + huge_nrequests);
-       malloc_cprintf(write_cb, cbopaque, "active:  %12zu\n", pactive * page);
-       CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
-       malloc_cprintf(write_cb, cbopaque, "mapped:  %12zu\n", mapped);
+       malloc_cprintf(write_cb, cbopaque,
+           "active:                  %12zu\n", pactive * page);
+       CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
+       malloc_cprintf(write_cb, cbopaque,
+           "mapped:                  %12zu\n", mapped);
+       CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
+           size_t);
+       CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
+           size_t);
+       malloc_cprintf(write_cb, cbopaque,
+           "metadata: mapped: %zu, allocated: %zu\n",
+           metadata_mapped, metadata_allocated);
 
        if (bins)
                stats_arena_bins_print(write_cb, cbopaque, i);
        if (large)
                stats_arena_lruns_print(write_cb, cbopaque, i);
+       if (huge)
+               stats_arena_hchunks_print(write_cb, cbopaque, i);
 }
 
 void
@@ -286,6 +377,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
        bool unmerged = true;
        bool bins = true;
        bool large = true;
+       bool huge = true;
 
        /*
         * Refresh stats, in case mallctl() was called by the application.
@@ -328,6 +420,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                        case 'l':
                                large = false;
                                break;
+                       case 'h':
+                               huge = false;
+                               break;
                        default:;
                        }
                }
@@ -340,9 +435,10 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                bool bv;
                unsigned uv;
                ssize_t ssv;
-               size_t sv, bsz, ssz, sssz, cpsz;
+               size_t sv, bsz, usz, ssz, sssz, cpsz;
 
                bsz = sizeof(bool);
+               usz = sizeof(unsigned);
                ssz = sizeof(size_t);
                sssz = sizeof(ssize_t);
                cpsz = sizeof(const char *);
@@ -352,6 +448,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                CTL_GET("config.debug", &bv, bool);
                malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
                    bv ? "enabled" : "disabled");
+               malloc_cprintf(write_cb, cbopaque,
+                   "config.malloc_conf: \"%s\"\n", config_malloc_conf);
 
 #define        OPT_WRITE_BOOL(n)                                               \
                if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) {    \
@@ -367,6 +465,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                            : "false", bv2 ? "true" : "false");         \
                }                                                       \
 }
+#define        OPT_WRITE_UNSIGNED(n)                                           \
+               if (je_mallctl("opt."#n, &uv, &usz, NULL, 0) == 0) {    \
+                       malloc_cprintf(write_cb, cbopaque,              \
+                       "  opt."#n": %zu\n", sv);                       \
+               }
 #define        OPT_WRITE_SIZE_T(n)                                             \
                if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) {    \
                        malloc_cprintf(write_cb, cbopaque,              \
@@ -377,6 +480,15 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                        malloc_cprintf(write_cb, cbopaque,              \
                            "  opt."#n": %zd\n", ssv);                  \
                }
+#define        OPT_WRITE_SSIZE_T_MUTABLE(n, m) {                               \
+               ssize_t ssv2;                                           \
+               if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 &&  \
+                   je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) {       \
+                       malloc_cprintf(write_cb, cbopaque,              \
+                           "  opt."#n": %zd ("#m": %zd)\n",            \
+                           ssv, ssv2);                                 \
+               }                                                       \
+}
 #define        OPT_WRITE_CHAR_P(n)                                             \
                if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) {  \
                        malloc_cprintf(write_cb, cbopaque,              \
@@ -388,10 +500,16 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                OPT_WRITE_BOOL(abort)
                OPT_WRITE_SIZE_T(lg_chunk)
                OPT_WRITE_CHAR_P(dss)
-               OPT_WRITE_SIZE_T(narenas)
-               OPT_WRITE_SSIZE_T(lg_dirty_mult)
+               OPT_WRITE_UNSIGNED(narenas)
+               OPT_WRITE_CHAR_P(purge)
+               if (opt_purge == purge_mode_ratio) {
+                       OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
+                           arenas.lg_dirty_mult)
+               }
+               if (opt_purge == purge_mode_decay)
+                       OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time)
                OPT_WRITE_BOOL(stats_print)
-               OPT_WRITE_BOOL(junk)
+               OPT_WRITE_CHAR_P(junk)
                OPT_WRITE_SIZE_T(quarantine)
                OPT_WRITE_BOOL(redzone)
                OPT_WRITE_BOOL(zero)
@@ -427,19 +545,29 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                    sizeof(void *));
 
                CTL_GET("arenas.quantum", &sv, size_t);
-               malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+               malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n",
+                   sv);
 
                CTL_GET("arenas.page", &sv, size_t);
                malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
 
-               CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
-               if (ssv >= 0) {
-                       malloc_cprintf(write_cb, cbopaque,
-                           "Min active:dirty page ratio per arena: %u:1\n",
-                           (1U << ssv));
-               } else {
+               CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
+               if (opt_purge == purge_mode_ratio) {
+                       if (ssv >= 0) {
+                               malloc_cprintf(write_cb, cbopaque,
+                                   "Min active:dirty page ratio per arena: "
+                                   "%u:1\n", (1U << ssv));
+                       } else {
+                               malloc_cprintf(write_cb, cbopaque,
+                                   "Min active:dirty page ratio per arena: "
+                                   "N/A\n");
+                       }
+               }
+               CTL_GET("arenas.decay_time", &ssv, ssize_t);
+               if (opt_purge == purge_mode_decay) {
                        malloc_cprintf(write_cb, cbopaque,
-                           "Min active:dirty page ratio per arena: N/A\n");
+                           "Unused dirty page decay time: %zd%s\n",
+                           ssv, (ssv < 0) ? " (no decay)" : "");
                }
                if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
                        malloc_cprintf(write_cb, cbopaque,
@@ -448,13 +576,13 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
                        CTL_GET("prof.lg_sample", &sv, size_t);
                        malloc_cprintf(write_cb, cbopaque,
-                           "Average profile sample interval: %"PRIu64
+                           "Average profile sample interval: %"FMTu64
                            " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
 
                        CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
                        if (ssv >= 0) {
                                malloc_cprintf(write_cb, cbopaque,
-                                   "Average profile dump interval: %"PRIu64
+                                   "Average profile dump interval: %"FMTu64
                                    " (2^%zd)\n",
                                    (((uint64_t)1U) << ssv), ssv);
                        } else {
@@ -463,35 +591,27 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                        }
                }
                CTL_GET("opt.lg_chunk", &sv, size_t);
-               malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
-                   (ZU(1) << sv), sv);
+               malloc_cprintf(write_cb, cbopaque,
+                   "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
        }
 
        if (config_stats) {
                size_t *cactive;
-               size_t allocated, active, mapped;
-               size_t chunks_current, chunks_high;
-               uint64_t chunks_total;
+               size_t allocated, active, metadata, resident, mapped;
 
                CTL_GET("stats.cactive", &cactive, size_t *);
                CTL_GET("stats.allocated", &allocated, size_t);
                CTL_GET("stats.active", &active, size_t);
+               CTL_GET("stats.metadata", &metadata, size_t);
+               CTL_GET("stats.resident", &resident, size_t);
                CTL_GET("stats.mapped", &mapped, size_t);
                malloc_cprintf(write_cb, cbopaque,
-                   "Allocated: %zu, active: %zu, mapped: %zu\n",
-                   allocated, active, mapped);
-               malloc_cprintf(write_cb, cbopaque,
-                   "Current active ceiling: %zu\n", atomic_read_z(cactive));
-
-               /* Print chunk stats. */
-               CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
-               CTL_GET("stats.chunks.high", &chunks_high, size_t);
-               CTL_GET("stats.chunks.current", &chunks_current, size_t);
-               malloc_cprintf(write_cb, cbopaque, "chunks: nchunks   "
-                   "highchunks    curchunks\n");
+                   "Allocated: %zu, active: %zu, metadata: %zu,"
+                   " resident: %zu, mapped: %zu\n",
+                   allocated, active, metadata, resident, mapped);
                malloc_cprintf(write_cb, cbopaque,
-                   "  %13"PRIu64" %12zu %12zu\n",
-                   chunks_total, chunks_high, chunks_current);
+                   "Current active ceiling: %zu\n",
+                   atomic_read_z(cactive));
 
                if (merged) {
                        unsigned narenas;
@@ -515,7 +635,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                                        malloc_cprintf(write_cb, cbopaque,
                                            "\nMerged arenas stats:\n");
                                        stats_arena_print(write_cb, cbopaque,
-                                           narenas, bins, large);
+                                           narenas, bins, large, huge);
                                }
                        }
                }
@@ -541,7 +661,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
                                                    cbopaque,
                                                    "\narenas[%u]:\n", i);
                                                stats_arena_print(write_cb,
-                                                   cbopaque, i, bins, large);
+                                                   cbopaque, i, bins, large,
+                                                   huge);
                                        }
                                }
                        }
index 07167b6d3c318b92676e5b3d5f4b7f7d47b65c3c..6e32f40471afed4a3669ee13ce6f880f45b65d17 100644 (file)
@@ -10,9 +10,17 @@ ssize_t      opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
 tcache_bin_info_t      *tcache_bin_info;
 static unsigned                stack_nelms; /* Total stack elms per tcache. */
 
-size_t                 nhbins;
+unsigned               nhbins;
 size_t                 tcache_maxclass;
 
+tcaches_t              *tcaches;
+
+/* Index of first element within tcaches that has never been used. */
+static unsigned                tcaches_past;
+
+/* Head of singly linked list tracking available tcaches elements. */
+static tcaches_t       *tcaches_avail;
+
 /******************************************************************************/
 
 size_t tcache_salloc(const void *ptr)
@@ -22,9 +30,9 @@ size_t        tcache_salloc(const void *ptr)
 }
 
 void
-tcache_event_hard(tcache_t *tcache)
+tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
 {
-       size_t binind = tcache->next_gc_bin;
+       szind_t binind = tcache->next_gc_bin;
        tcache_bin_t *tbin = &tcache->tbins[binind];
        tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
 
@@ -33,11 +41,12 @@ tcache_event_hard(tcache_t *tcache)
                 * Flush (ceiling) 3/4 of the objects below the low water mark.
                 */
                if (binind < NBINS) {
-                       tcache_bin_flush_small(tbin, binind, tbin->ncached -
-                           tbin->low_water + (tbin->low_water >> 2), tcache);
+                       tcache_bin_flush_small(tsd, tcache, tbin, binind,
+                           tbin->ncached - tbin->low_water + (tbin->low_water
+                           >> 2));
                } else {
-                       tcache_bin_flush_large(tbin, binind, tbin->ncached -
-                           tbin->low_water + (tbin->low_water >> 2), tcache);
+                       tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
+                           tbin->low_water + (tbin->low_water >> 2), tcache);
                }
                /*
                 * Reduce fill count by 2X.  Limit lg_fill_div such that the
@@ -58,27 +67,28 @@ tcache_event_hard(tcache_t *tcache)
        tcache->next_gc_bin++;
        if (tcache->next_gc_bin == nhbins)
                tcache->next_gc_bin = 0;
-       tcache->ev_cnt = 0;
 }
 
 void *
-tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
+tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+    tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
 {
        void *ret;
 
-       arena_tcache_fill_small(tcache->arena, tbin, binind,
-           config_prof ? tcache->prof_accumbytes : 0);
+       arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ?
+           tcache->prof_accumbytes : 0);
        if (config_prof)
                tcache->prof_accumbytes = 0;
-       ret = tcache_alloc_easy(tbin);
+       ret = tcache_alloc_easy(tbin, tcache_success);
 
        return (ret);
 }
 
 void
-tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
-    tcache_t *tcache)
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+    szind_t binind, unsigned rem)
 {
+       arena_t *arena;
        void *ptr;
        unsigned i, nflush, ndeferred;
        bool merged_stats = false;
@@ -86,21 +96,23 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
        assert(binind < NBINS);
        assert(rem <= tbin->ncached);
 
+       arena = arena_choose(tsd, NULL);
+       assert(arena != NULL);
        for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
                /* Lock the arena bin associated with the first object. */
                arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
-                   tbin->avail[0]);
-               arena_t *arena = chunk->arena;
-               arena_bin_t *bin = &arena->bins[binind];
+                   *(tbin->avail - 1));
+               arena_t *bin_arena = extent_node_arena_get(&chunk->node);
+               arena_bin_t *bin = &bin_arena->bins[binind];
 
-               if (config_prof && arena == tcache->arena) {
+               if (config_prof && bin_arena == arena) {
                        if (arena_prof_accum(arena, tcache->prof_accumbytes))
                                prof_idump();
                        tcache->prof_accumbytes = 0;
                }
 
                malloc_mutex_lock(&bin->lock);
-               if (config_stats && arena == tcache->arena) {
+               if (config_stats && bin_arena == arena) {
                        assert(!merged_stats);
                        merged_stats = true;
                        bin->stats.nflushes++;
@@ -109,16 +121,16 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
                }
                ndeferred = 0;
                for (i = 0; i < nflush; i++) {
-                       ptr = tbin->avail[i];
+                       ptr = *(tbin->avail - 1 - i);
                        assert(ptr != NULL);
                        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-                       if (chunk->arena == arena) {
+                       if (extent_node_arena_get(&chunk->node) == bin_arena) {
                                size_t pageind = ((uintptr_t)ptr -
                                    (uintptr_t)chunk) >> LG_PAGE;
                                arena_chunk_map_bits_t *bitselm =
                                    arena_bitselm_get(chunk, pageind);
-                               arena_dalloc_bin_locked(arena, chunk, ptr,
-                                   bitselm);
+                               arena_dalloc_bin_junked_locked(bin_arena, chunk,
+                                   ptr, bitselm);
                        } else {
                                /*
                                 * This object was allocated via a different
@@ -126,18 +138,19 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
                                 * locked.  Stash the object, so that it can be
                                 * handled in a future pass.
                                 */
-                               tbin->avail[ndeferred] = ptr;
+                               *(tbin->avail - 1 - ndeferred) = ptr;
                                ndeferred++;
                        }
                }
                malloc_mutex_unlock(&bin->lock);
+               arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
        }
        if (config_stats && !merged_stats) {
                /*
                 * The flush loop didn't happen to flush to this thread's
                 * arena, so the stats didn't get merged.  Manually do so now.
                 */
-               arena_bin_t *bin = &tcache->arena->bins[binind];
+               arena_bin_t *bin = &arena->bins[binind];
                malloc_mutex_lock(&bin->lock);
                bin->stats.nflushes++;
                bin->stats.nrequests += tbin->tstats.nrequests;
@@ -145,17 +158,18 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
                malloc_mutex_unlock(&bin->lock);
        }
 
-       memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
-           rem * sizeof(void *));
+       memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+           sizeof(void *));
        tbin->ncached = rem;
        if ((int)tbin->ncached < tbin->low_water)
                tbin->low_water = tbin->ncached;
 }
 
 void
-tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
-    tcache_t *tcache)
+tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+    unsigned rem, tcache_t *tcache)
 {
+       arena_t *arena;
        void *ptr;
        unsigned i, nflush, ndeferred;
        bool merged_stats = false;
@@ -163,17 +177,19 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
        assert(binind < nhbins);
        assert(rem <= tbin->ncached);
 
+       arena = arena_choose(tsd, NULL);
+       assert(arena != NULL);
        for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
                /* Lock the arena associated with the first object. */
                arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
-                   tbin->avail[0]);
-               arena_t *arena = chunk->arena;
+                   *(tbin->avail - 1));
+               arena_t *locked_arena = extent_node_arena_get(&chunk->node);
                UNUSED bool idump;
 
                if (config_prof)
                        idump = false;
-               malloc_mutex_lock(&arena->lock);
-               if ((config_prof || config_stats) && arena == tcache->arena) {
+               malloc_mutex_lock(&locked_arena->lock);
+               if ((config_prof || config_stats) && locked_arena == arena) {
                        if (config_prof) {
                                idump = arena_prof_accum_locked(arena,
                                    tcache->prof_accumbytes);
@@ -190,32 +206,34 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
                }
                ndeferred = 0;
                for (i = 0; i < nflush; i++) {
-                       ptr = tbin->avail[i];
+                       ptr = *(tbin->avail - 1 - i);
                        assert(ptr != NULL);
                        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-                       if (chunk->arena == arena)
-                               arena_dalloc_large_locked(arena, chunk, ptr);
-                       else {
+                       if (extent_node_arena_get(&chunk->node) ==
+                           locked_arena) {
+                               arena_dalloc_large_junked_locked(locked_arena,
+                                   chunk, ptr);
+                       } else {
                                /*
                                 * This object was allocated via a different
                                 * arena than the one that is currently locked.
                                 * Stash the object, so that it can be handled
                                 * in a future pass.
                                 */
-                               tbin->avail[ndeferred] = ptr;
+                               *(tbin->avail - 1 - ndeferred) = ptr;
                                ndeferred++;
                        }
                }
-               malloc_mutex_unlock(&arena->lock);
+               malloc_mutex_unlock(&locked_arena->lock);
                if (config_prof && idump)
                        prof_idump();
+               arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
        }
        if (config_stats && !merged_stats) {
                /*
                 * The flush loop didn't happen to flush to this thread's
                 * arena, so the stats didn't get merged.  Manually do so now.
                 */
-               arena_t *arena = tcache->arena;
                malloc_mutex_lock(&arena->lock);
                arena->stats.nrequests_large += tbin->tstats.nrequests;
                arena->stats.lstats[binind - NBINS].nrequests +=
@@ -224,8 +242,8 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
                malloc_mutex_unlock(&arena->lock);
        }
 
-       memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
-           rem * sizeof(void *));
+       memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+           sizeof(void *));
        tbin->ncached = rem;
        if ((int)tbin->ncached < tbin->low_water)
                tbin->low_water = tbin->ncached;
@@ -242,36 +260,58 @@ tcache_arena_associate(tcache_t *tcache, arena_t *arena)
                ql_tail_insert(&arena->tcache_ql, tcache, link);
                malloc_mutex_unlock(&arena->lock);
        }
-       tcache->arena = arena;
 }
 
 void
-tcache_arena_dissociate(tcache_t *tcache)
+tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
+{
+
+       tcache_arena_dissociate(tcache, oldarena);
+       tcache_arena_associate(tcache, newarena);
+}
+
+void
+tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
 {
 
        if (config_stats) {
                /* Unlink from list of extant tcaches. */
-               malloc_mutex_lock(&tcache->arena->lock);
-               ql_remove(&tcache->arena->tcache_ql, tcache, link);
-               tcache_stats_merge(tcache, tcache->arena);
-               malloc_mutex_unlock(&tcache->arena->lock);
+               malloc_mutex_lock(&arena->lock);
+               if (config_debug) {
+                       bool in_ql = false;
+                       tcache_t *iter;
+                       ql_foreach(iter, &arena->tcache_ql, link) {
+                               if (iter == tcache) {
+                                       in_ql = true;
+                                       break;
+                               }
+                       }
+                       assert(in_ql);
+               }
+               ql_remove(&arena->tcache_ql, tcache, link);
+               tcache_stats_merge(tcache, arena);
+               malloc_mutex_unlock(&arena->lock);
        }
 }
 
 tcache_t *
 tcache_get_hard(tsd_t *tsd)
 {
+       arena_t *arena;
 
        if (!tcache_enabled_get()) {
                if (tsd_nominal(tsd))
                        tcache_enabled_set(false); /* Memoize. */
                return (NULL);
        }
-       return (tcache_create(choose_arena(tsd, NULL)));
+       arena = arena_choose(tsd, NULL);
+       if (unlikely(arena == NULL))
+               return (NULL);
+       return (tcache_create(tsd, arena));
 }
 
 tcache_t *
-tcache_create(arena_t *arena)
+tcache_create(tsd_t *tsd, arena_t *arena)
 {
        tcache_t *tcache;
        size_t size, stack_offset;
@@ -282,34 +322,29 @@ tcache_create(arena_t *arena)
        size = PTR_CEILING(size);
        stack_offset = size;
        size += stack_nelms * sizeof(void *);
-       /*
-        * Round up to the nearest multiple of the cacheline size, in order to
-        * avoid the possibility of false cacheline sharing.
-        *
-        * That this works relies on the same logic as in ipalloc(), but we
-        * cannot directly call ipalloc() here due to tcache bootstrapping
-        * issues.
-        */
-       size = (size + CACHELINE_MASK) & (-CACHELINE);
-
-       if (size <= SMALL_MAXCLASS)
-               tcache = (tcache_t *)arena_malloc_small(arena, size, true);
-       else if (size <= tcache_maxclass)
-               tcache = (tcache_t *)arena_malloc_large(arena, size, true);
-       else
-               tcache = (tcache_t *)icalloct(NULL, size, false, arena);
+       /* Avoid false cacheline sharing. */
+       size = sa2u(size, CACHELINE);
 
+       tcache = ipallocztm(tsd, size, CACHELINE, true, false, true,
+           arena_get(0, false));
        if (tcache == NULL)
                return (NULL);
 
        tcache_arena_associate(tcache, arena);
 
+       ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
+
        assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
        for (i = 0; i < nhbins; i++) {
                tcache->tbins[i].lg_fill_div = 1;
+               stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+               /*
+                * avail points past the available space.  Allocations will
+                * access the slots toward higher addresses (for the benefit of
+                * prefetch).
+                */
                tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
                    (uintptr_t)stack_offset);
-               stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
        }
 
        return (tcache);
@@ -318,17 +353,17 @@ tcache_create(arena_t *arena)
 static void
 tcache_destroy(tsd_t *tsd, tcache_t *tcache)
 {
+       arena_t *arena;
        unsigned i;
-       size_t tcache_size;
 
-       tcache_arena_dissociate(tcache);
+       arena = arena_choose(tsd, NULL);
+       tcache_arena_dissociate(tcache, arena);
 
        for (i = 0; i < NBINS; i++) {
                tcache_bin_t *tbin = &tcache->tbins[i];
-               tcache_bin_flush_small(tbin, i, 0, tcache);
+               tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
 
                if (config_stats && tbin->tstats.nrequests != 0) {
-                       arena_t *arena = tcache->arena;
                        arena_bin_t *bin = &arena->bins[i];
                        malloc_mutex_lock(&bin->lock);
                        bin->stats.nrequests += tbin->tstats.nrequests;
@@ -338,10 +373,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
 
        for (; i < nhbins; i++) {
                tcache_bin_t *tbin = &tcache->tbins[i];
-               tcache_bin_flush_large(tbin, i, 0, tcache);
+               tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
 
                if (config_stats && tbin->tstats.nrequests != 0) {
-                       arena_t *arena = tcache->arena;
                        malloc_mutex_lock(&arena->lock);
                        arena->stats.nrequests_large += tbin->tstats.nrequests;
                        arena->stats.lstats[i - NBINS].nrequests +=
@@ -351,26 +385,10 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
        }
 
        if (config_prof && tcache->prof_accumbytes > 0 &&
-           arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
+           arena_prof_accum(arena, tcache->prof_accumbytes))
                prof_idump();
 
-       tcache_size = arena_salloc(tcache, false);
-       if (tcache_size <= SMALL_MAXCLASS) {
-               arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
-               arena_t *arena = chunk->arena;
-               size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
-                   LG_PAGE;
-               arena_chunk_map_bits_t *bitselm = arena_bitselm_get(chunk,
-                   pageind);
-
-               arena_dalloc_bin(arena, chunk, tcache, pageind, bitselm);
-       } else if (tcache_size <= tcache_maxclass) {
-               arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
-               arena_t *arena = chunk->arena;
-
-               arena_dalloc_large(arena, chunk, tcache);
-       } else
-               idalloct(tsd, tcache, false);
+       idalloctm(tsd, tcache, false, true, true);
 }
 
 void
@@ -421,23 +439,83 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
        }
 }
 
+bool
+tcaches_create(tsd_t *tsd, unsigned *r_ind)
+{
+       tcache_t *tcache;
+       tcaches_t *elm;
+
+       if (tcaches == NULL) {
+               tcaches = base_alloc(sizeof(tcache_t *) *
+                   (MALLOCX_TCACHE_MAX+1));
+               if (tcaches == NULL)
+                       return (true);
+       }
+
+       if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
+               return (true);
+       tcache = tcache_create(tsd, arena_get(0, false));
+       if (tcache == NULL)
+               return (true);
+
+       if (tcaches_avail != NULL) {
+               elm = tcaches_avail;
+               tcaches_avail = tcaches_avail->next;
+               elm->tcache = tcache;
+               *r_ind = (unsigned)(elm - tcaches);
+       } else {
+               elm = &tcaches[tcaches_past];
+               elm->tcache = tcache;
+               *r_ind = tcaches_past;
+               tcaches_past++;
+       }
+
+       return (false);
+}
+
+static void
+tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
+{
+
+       if (elm->tcache == NULL)
+               return;
+       tcache_destroy(tsd, elm->tcache);
+       elm->tcache = NULL;
+}
+
+void
+tcaches_flush(tsd_t *tsd, unsigned ind)
+{
+
+       tcaches_elm_flush(tsd, &tcaches[ind]);
+}
+
+void
+tcaches_destroy(tsd_t *tsd, unsigned ind)
+{
+       tcaches_t *elm = &tcaches[ind];
+       tcaches_elm_flush(tsd, elm);
+       elm->next = tcaches_avail;
+       tcaches_avail = elm;
+}
+
 bool
 tcache_boot(void)
 {
        unsigned i;
 
        /*
-        * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
+        * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
         * known.
         */
        if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
                tcache_maxclass = SMALL_MAXCLASS;
-       else if ((1U << opt_lg_tcache_max) > arena_maxclass)
-               tcache_maxclass = arena_maxclass;
+       else if ((1U << opt_lg_tcache_max) > large_maxclass)
+               tcache_maxclass = large_maxclass;
        else
                tcache_maxclass = (1U << opt_lg_tcache_max);
 
-       nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
+       nhbins = size2index(tcache_maxclass) + 1;
 
        /* Initialize tcache_bin_info. */
        tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
@@ -446,7 +524,11 @@ tcache_boot(void)
                return (true);
        stack_nelms = 0;
        for (i = 0; i < NBINS; i++) {
-               if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
+               if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
+                       tcache_bin_info[i].ncached_max =
+                           TCACHE_NSLOTS_SMALL_MIN;
+               } else if ((arena_bin_info[i].nregs << 1) <=
+                   TCACHE_NSLOTS_SMALL_MAX) {
                        tcache_bin_info[i].ncached_max =
                            (arena_bin_info[i].nregs << 1);
                } else {
diff --git a/src/jemalloc/src/ticker.c b/src/jemalloc/src/ticker.c
new file mode 100644 (file)
index 0000000..db09024
--- /dev/null
@@ -0,0 +1,2 @@
+#define        JEMALLOC_TICKER_C_
+#include "jemalloc/internal/jemalloc_internal.h"
index cbc64e44eaba030325a74b5d1f8023789225a978..34c1573cdd5a41b97e102d206e5697405f067244 100644 (file)
@@ -15,16 +15,14 @@ void *
 malloc_tsd_malloc(size_t size)
 {
 
-       /* Avoid choose_arena() in order to dodge bootstrapping issues. */
-       return (arena_malloc(NULL, arenas[0], CACHELINE_CEILING(size), false,
-           false));
+       return (a0malloc(CACHELINE_CEILING(size)));
 }
 
 void
 malloc_tsd_dalloc(void *wrapper)
 {
 
-       idalloct(NULL, wrapper, false);
+       a0dalloc(wrapper);
 }
 
 void
@@ -75,6 +73,9 @@ tsd_cleanup(void *arg)
        tsd_t *tsd = (tsd_t *)arg;
 
        switch (tsd->state) {
+       case tsd_state_uninitialized:
+               /* Do nothing. */
+               break;
        case tsd_state_nominal:
 #define O(n, t)                                                                \
                n##_cleanup(tsd);
@@ -106,15 +107,24 @@ MALLOC_TSD
 }
 
 bool
-malloc_tsd_boot(void)
+malloc_tsd_boot0(void)
 {
 
        ncleanups = 0;
-       if (tsd_boot())
+       if (tsd_boot0())
                return (true);
+       *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = true;
        return (false);
 }
 
+void
+malloc_tsd_boot1(void)
+{
+
+       tsd_boot1();
+       *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
+}
+
 #ifdef _WIN32
 static BOOL WINAPI
 _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
@@ -138,13 +148,15 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
 #ifdef _MSC_VER
 #  ifdef _M_IX86
 #    pragma comment(linker, "/INCLUDE:__tls_used")
+#    pragma comment(linker, "/INCLUDE:_tls_callback")
 #  else
 #    pragma comment(linker, "/INCLUDE:_tls_used")
+#    pragma comment(linker, "/INCLUDE:tls_callback")
 #  endif
 #  pragma section(".CRT$XLY",long,read)
 #endif
 JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
-static const BOOL      (WINAPI *tls_callback)(HINSTANCE hinstDLL,
+BOOL   (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
     DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
 #endif
 
index bfd86af8d8ce6ef601869693e45de41df9c16791..02673c70d9cb0c08db70aa3f2b367a7dd1e28e37 100644 (file)
@@ -1,3 +1,7 @@
+/*
+ * Define simple versions of assertion macros that won't recurse in case
+ * of assertion failures in malloc_*printf().
+ */
 #define        assert(e) do {                                                  \
        if (config_debug && !(e)) {                                     \
                malloc_write("<jemalloc>: Failed assertion\n");         \
@@ -49,10 +53,14 @@ wrtmessage(void *cbopaque, const char *s)
         * Use syscall(2) rather than write(2) when possible in order to avoid
         * the possibility of memory allocation within libc.  This is necessary
         * on FreeBSD; most operating systems do not have this problem though.
+        *
+        * syscall() returns long or int, depending on platform, so capture the
+        * unused result in the widest plausible type to avoid compiler
+        * warnings.
         */
-       UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
+       UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
 #else
-       UNUSED int result = write(STDERR_FILENO, s, strlen(s));
+       UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
 #endif
 }
 
@@ -81,10 +89,10 @@ buferror(int err, char *buf, size_t buflen)
 {
 
 #ifdef _WIN32
-       FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
-           (LPSTR)buf, buflen, NULL);
+       FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
+           (LPSTR)buf, (DWORD)buflen, NULL);
        return (0);
-#elif defined(_GNU_SOURCE)
+#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
        char *b = strerror_r(err, buf, buflen);
        if (b != buf) {
                strncpy(buf, b, buflen);
@@ -577,7 +585,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
                str[i] = '\0';
        else
                str[size - 1] = '\0';
-       ret = i;
+       assert(i < INT_MAX);
+       ret = (int)i;
 
 #undef APPEND_C
 #undef APPEND_S
@@ -586,7 +595,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
        return (ret);
 }
 
-JEMALLOC_ATTR(format(printf, 3, 4))
+JEMALLOC_FORMAT_PRINTF(3, 4)
 int
 malloc_snprintf(char *str, size_t size, const char *format, ...)
 {
@@ -625,7 +634,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
  * Print to a callback function in such a way as to (hopefully) avoid memory
  * allocation.
  */
-JEMALLOC_ATTR(format(printf, 3, 4))
+JEMALLOC_FORMAT_PRINTF(3, 4)
 void
 malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
     const char *format, ...)
@@ -638,7 +647,7 @@ malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
 }
 
 /* Print to stderr in such a way as to avoid memory allocation. */
-JEMALLOC_ATTR(format(printf, 1, 2))
+JEMALLOC_FORMAT_PRINTF(1, 2)
 void
 malloc_printf(const char *format, ...)
 {
@@ -648,3 +657,12 @@ malloc_printf(const char *format, ...)
        malloc_vcprintf(NULL, NULL, format, ap);
        va_end(ap);
 }
+
+/*
+ * Restore normal assertion macros, in order to make it possible to compile all
+ * C files as a single concatenation.
+ */
+#undef assert
+#undef not_reached
+#undef not_implemented
+#include "jemalloc/internal/assert.h"
index c6bd533fcc2fd4da0f3f995c6c3b9bd82f2e8756..6859b3feeae0c69e26f546e1c2b1a7997fc2a0c9 100644 (file)
@@ -121,9 +121,11 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
 static void
 zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
 {
+       size_t alloc_size;
 
-       if (ivsalloc(ptr, config_prof) != 0) {
-               assert(ivsalloc(ptr, config_prof) == size);
+       alloc_size = ivsalloc(ptr, config_prof);
+       if (alloc_size != 0) {
+               assert(alloc_size == size);
                je_free(ptr);
                return;
        }
@@ -263,7 +265,7 @@ register_zone(void)
                 * after the default zone.  On OSX < 10.6, there is no purgeable
                 * zone, so this does nothing.  On OSX >= 10.6, unregistering
                 * replaces the purgeable zone with the last registered zone
-                * above, i.e the default zone.  Registering it again then puts
+                * above, i.e. the default zone.  Registering it again then puts
                 * it at the end, obviously after the default zone.
                 */
                if (purgeable_zone) {
index 6018e58ac12dac4112a95f1f772b91def0e85b34..0a3dbeac160a17a57d4edae804caac90f3a2098a 100644 (file)
@@ -1,14 +1,20 @@
+#include <limits.h>
+#ifndef SIZE_T_MAX
+#  define SIZE_T_MAX   SIZE_MAX
+#endif
 #include <stdlib.h>
 #include <stdarg.h>
 #include <stdbool.h>
 #include <errno.h>
-#include <inttypes.h>
 #include <math.h>
 #include <string.h>
-#include <sys/time.h>
+#ifdef _WIN32
+#  include "msvc_compat/strings.h"
+#endif
 
 #ifdef _WIN32
 #  include <windows.h>
+#  include "msvc_compat/windows_extra.h"
 #else
 #  include <pthread.h>
 #endif
@@ -87,6 +93,7 @@
 #  define JEMALLOC_H_STRUCTS
 #  define JEMALLOC_H_EXTERNS
 #  define JEMALLOC_H_INLINES
+#  include "jemalloc/internal/nstime.h"
 #  include "jemalloc/internal/util.h"
 #  include "jemalloc/internal/qr.h"
 #  include "jemalloc/internal/ql.h"
index aaaaec14b2cb890736d56f0c01664e7b378fd73d..5cc8532a3488f01091096a7855196558f55002a4 100644 (file)
@@ -1,6 +1,9 @@
 #include "jemalloc/internal/jemalloc_internal_defs.h"
 #include "jemalloc/internal/jemalloc_internal_decls.h"
 
-/* For use by SFMT. */
+/*
+ * For use by SFMT.  configure.ac doesn't actually define HAVE_SSE2 because its
+ * dependencies are notoriously unportable in practice.
+ */
 #undef HAVE_SSE2
 #undef HAVE_ALTIVEC
index a862ed7db24adc218edeaf3c69d7ecb8a0d4b43d..b057b29a1d2a1d280924f66a9eec11f1d00a3679 100644 (file)
@@ -299,7 +299,7 @@ pt_chi2(double p, double df, double ln_gamma_df_2)
 
 /*
  * Given a value p in [0..1] and Gamma distribution shape and scale parameters,
- * compute the upper limit on the definite integeral from [0..z] that satisfies
+ * compute the upper limit on the definite integral from [0..z] that satisfies
  * p.
  */
 JEMALLOC_INLINE double
index 11188653c655f79a259a4b835f558dc81b289406..7c4df493188050602f152e3f5d4f8b814aa8b00a 100644 (file)
@@ -1,3 +1,5 @@
+void   mq_nanosleep(unsigned ns);
+
 /*
  * Simple templated message queue implementation that relies on only mutexes for
  * synchronization (which reduces portability issues).  Given the following
@@ -75,26 +77,23 @@ a_attr a_mq_msg_type *                                                      \
 a_prefix##get(a_mq_type *mq)                                           \
 {                                                                      \
        a_mq_msg_type *msg;                                             \
-       struct timespec timeout;                                        \
+       unsigned ns;                                                    \
                                                                        \
        msg = a_prefix##tryget(mq);                                     \
        if (msg != NULL)                                                \
                return (msg);                                           \
                                                                        \
-       timeout.tv_sec = 0;                                             \
-       timeout.tv_nsec = 1;                                            \
+       ns = 1;                                                         \
        while (true) {                                                  \
-               nanosleep(&timeout, NULL);                              \
+               mq_nanosleep(ns);                                       \
                msg = a_prefix##tryget(mq);                             \
                if (msg != NULL)                                        \
                        return (msg);                                   \
-               if (timeout.tv_sec == 0) {                              \
+               if (ns < 1000*1000*1000) {                              \
                        /* Double sleep time, up to max 1 second. */    \
-                       timeout.tv_nsec <<= 1;                          \
-                       if (timeout.tv_nsec >= 1000*1000*1000) {        \
-                               timeout.tv_sec = 1;                     \
-                               timeout.tv_nsec = 0;                    \
-                       }                                               \
+                       ns <<= 1;                                       \
+                       if (ns > 1000*1000*1000)                        \
+                               ns = 1000*1000*1000;                    \
                }                                                       \
        }                                                               \
 }                                                                      \
index f55bafce677a8cd49fdcd1fa764f30a2f6f6353d..3cf901fc46f0012cfcd21c6a904ba0058c22082d 100644 (file)
     <=, "zu", __VA_ARGS__)
 
 #define        assert_d32_eq(a, b, ...)        assert_cmp(int32_t, a, b, ==,   \
-    !=, PRId32, __VA_ARGS__)
+    !=, FMTd32, __VA_ARGS__)
 #define        assert_d32_ne(a, b, ...)        assert_cmp(int32_t, a, b, !=,   \
-    ==, PRId32, __VA_ARGS__)
+    ==, FMTd32, __VA_ARGS__)
 #define        assert_d32_lt(a, b, ...)        assert_cmp(int32_t, a, b, <,    \
-    >=, PRId32, __VA_ARGS__)
+    >=, FMTd32, __VA_ARGS__)
 #define        assert_d32_le(a, b, ...)        assert_cmp(int32_t, a, b, <=,   \
-    >, PRId32, __VA_ARGS__)
+    >, FMTd32, __VA_ARGS__)
 #define        assert_d32_ge(a, b, ...)        assert_cmp(int32_t, a, b, >=,   \
-    <, PRId32, __VA_ARGS__)
+    <, FMTd32, __VA_ARGS__)
 #define        assert_d32_gt(a, b, ...)        assert_cmp(int32_t, a, b, >,    \
-    <=, PRId32, __VA_ARGS__)
+    <=, FMTd32, __VA_ARGS__)
 
 #define        assert_u32_eq(a, b, ...)        assert_cmp(uint32_t, a, b, ==,  \
-    !=, PRIu32, __VA_ARGS__)
+    !=, FMTu32, __VA_ARGS__)
 #define        assert_u32_ne(a, b, ...)        assert_cmp(uint32_t, a, b, !=,  \
-    ==, PRIu32, __VA_ARGS__)
+    ==, FMTu32, __VA_ARGS__)
 #define        assert_u32_lt(a, b, ...)        assert_cmp(uint32_t, a, b, <,   \
-    >=, PRIu32, __VA_ARGS__)
+    >=, FMTu32, __VA_ARGS__)
 #define        assert_u32_le(a, b, ...)        assert_cmp(uint32_t, a, b, <=,  \
-    >, PRIu32, __VA_ARGS__)
+    >, FMTu32, __VA_ARGS__)
 #define        assert_u32_ge(a, b, ...)        assert_cmp(uint32_t, a, b, >=,  \
-    <, PRIu32, __VA_ARGS__)
+    <, FMTu32, __VA_ARGS__)
 #define        assert_u32_gt(a, b, ...)        assert_cmp(uint32_t, a, b, >,   \
-    <=, PRIu32, __VA_ARGS__)
+    <=, FMTu32, __VA_ARGS__)
 
 #define        assert_d64_eq(a, b, ...)        assert_cmp(int64_t, a, b, ==,   \
-    !=, PRId64, __VA_ARGS__)
+    !=, FMTd64, __VA_ARGS__)
 #define        assert_d64_ne(a, b, ...)        assert_cmp(int64_t, a, b, !=,   \
-    ==, PRId64, __VA_ARGS__)
+    ==, FMTd64, __VA_ARGS__)
 #define        assert_d64_lt(a, b, ...)        assert_cmp(int64_t, a, b, <,    \
-    >=, PRId64, __VA_ARGS__)
+    >=, FMTd64, __VA_ARGS__)
 #define        assert_d64_le(a, b, ...)        assert_cmp(int64_t, a, b, <=,   \
-    >, PRId64, __VA_ARGS__)
+    >, FMTd64, __VA_ARGS__)
 #define        assert_d64_ge(a, b, ...)        assert_cmp(int64_t, a, b, >=,   \
-    <, PRId64, __VA_ARGS__)
+    <, FMTd64, __VA_ARGS__)
 #define        assert_d64_gt(a, b, ...)        assert_cmp(int64_t, a, b, >,    \
-    <=, PRId64, __VA_ARGS__)
+    <=, FMTd64, __VA_ARGS__)
 
 #define        assert_u64_eq(a, b, ...)        assert_cmp(uint64_t, a, b, ==,  \
-    !=, PRIu64, __VA_ARGS__)
+    !=, FMTu64, __VA_ARGS__)
 #define        assert_u64_ne(a, b, ...)        assert_cmp(uint64_t, a, b, !=,  \
-    ==, PRIu64, __VA_ARGS__)
+    ==, FMTu64, __VA_ARGS__)
 #define        assert_u64_lt(a, b, ...)        assert_cmp(uint64_t, a, b, <,   \
-    >=, PRIu64, __VA_ARGS__)
+    >=, FMTu64, __VA_ARGS__)
 #define        assert_u64_le(a, b, ...)        assert_cmp(uint64_t, a, b, <=,  \
-    >, PRIu64, __VA_ARGS__)
+    >, FMTu64, __VA_ARGS__)
 #define        assert_u64_ge(a, b, ...)        assert_cmp(uint64_t, a, b, >=,  \
-    <, PRIu64, __VA_ARGS__)
+    <, FMTu64, __VA_ARGS__)
 #define        assert_u64_gt(a, b, ...)        assert_cmp(uint64_t, a, b, >,   \
-    <=, PRIu64, __VA_ARGS__)
+    <=, FMTu64, __VA_ARGS__)
 
 #define        assert_b_eq(a, b, ...) do {                                     \
        bool a_ = (a);                                                  \
@@ -319,8 +319,8 @@ label_test_end:                                                             \
        }                                                               \
 } while (0)
 
-void   test_skip(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
-void   test_fail(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
+void   test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+void   test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
 
 /* For private use by macros. */
 test_status_t  p_test(test_t *t, ...);
index f941d7a752fed19b9b0855df406a7a8b9b732342..47a51262e990000795fdf183f64fe2816b863b85 100644 (file)
@@ -1,4 +1,4 @@
-/* Abstraction layer for threading in tests */
+/* Abstraction layer for threading in tests. */
 #ifdef _WIN32
 typedef HANDLE thd_t;
 #else
index 6877e4ac9f293323cb0923b819136505c369b390..ace6191b85af53a1254f00427ff54f7d0b9b073d 100644 (file)
@@ -1,12 +1,8 @@
-/*
- * Simple timer, for use in benchmark reporting.
- */
-
-#include <sys/time.h>
+/* Simple timer, for use in benchmark reporting. */
 
 typedef struct {
-       struct timeval tv0;
-       struct timeval tv1;
+       nstime_t t0;
+       nstime_t t1;
 } timedelta_t;
 
 void   timer_start(timedelta_t *timer);
index 89938504eed50c8a6243283386e8a6ef850aad53..af1c9a53e44d2341166fc4bb23083b5d2ee4038a 100644 (file)
 #include "test/jemalloc_test.h"
 
-chunk_alloc_t *old_alloc;
-chunk_dalloc_t *old_dalloc;
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "junk:false";
+#endif
+
+static chunk_hooks_t orig_hooks;
+static chunk_hooks_t old_hooks;
+
+static bool do_dalloc = true;
+static bool do_decommit;
+
+static bool did_alloc;
+static bool did_dalloc;
+static bool did_commit;
+static bool did_decommit;
+static bool did_purge;
+static bool did_split;
+static bool did_merge;
+
+#if 0
+#  define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__)
+#else
+#  define TRACE_HOOK(fmt, ...)
+#endif
+
+void *
+chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
+    bool *commit, unsigned arena_ind)
+{
+
+       TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, "
+           "*commit=%s, arena_ind=%u)\n", __func__, new_addr, size, alignment,
+           *zero ?  "true" : "false", *commit ? "true" : "false", arena_ind);
+       did_alloc = true;
+       return (old_hooks.alloc(new_addr, size, alignment, zero, commit,
+           arena_ind));
+}
 
 bool
-chunk_dalloc(void *chunk, size_t size, unsigned arena_ind)
+chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind)
 {
 
-       return (old_dalloc(chunk, size, arena_ind));
+       TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n",
+           __func__, chunk, size, committed ? "true" : "false", arena_ind);
+       did_dalloc = true;
+       if (!do_dalloc)
+               return (true);
+       return (old_hooks.dalloc(chunk, size, committed, arena_ind));
 }
 
-void *
-chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
+bool
+chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
+    unsigned arena_ind)
+{
+       bool err;
+
+       TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
+           "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+           arena_ind);
+       err = old_hooks.commit(chunk, size, offset, length, arena_ind);
+       did_commit = !err;
+       return (err);
+}
+
+bool
+chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
+    unsigned arena_ind)
+{
+       bool err;
+
+       TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
+           "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+           arena_ind);
+       if (!do_decommit)
+               return (true);
+       err = old_hooks.decommit(chunk, size, offset, length, arena_ind);
+       did_decommit = !err;
+       return (err);
+}
+
+bool
+chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
     unsigned arena_ind)
 {
 
-       return (old_alloc(new_addr, size, alignment, zero, arena_ind));
+       TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu "
+           "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+           arena_ind);
+       did_purge = true;
+       return (old_hooks.purge(chunk, size, offset, length, arena_ind));
+}
+
+bool
+chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
+    bool committed, unsigned arena_ind)
+{
+
+       TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, "
+           "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a,
+           size_b, committed ? "true" : "false", arena_ind);
+       did_split = true;
+       return (old_hooks.split(chunk, size, size_a, size_b, committed,
+           arena_ind));
+}
+
+bool
+chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+    bool committed, unsigned arena_ind)
+{
+
+       TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, "
+           "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b,
+           size_b, committed ? "true" : "false", arena_ind);
+       did_merge = true;
+       return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b,
+           committed, arena_ind));
 }
 
 TEST_BEGIN(test_chunk)
 {
        void *p;
-       chunk_alloc_t *new_alloc;
-       chunk_dalloc_t *new_dalloc;
-       size_t old_size, new_size;
+       size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz;
+       chunk_hooks_t new_hooks = {
+               chunk_alloc,
+               chunk_dalloc,
+               chunk_commit,
+               chunk_decommit,
+               chunk_purge,
+               chunk_split,
+               chunk_merge
+       };
+       bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
 
-       new_alloc = chunk_alloc;
-       new_dalloc = chunk_dalloc;
-       old_size = sizeof(chunk_alloc_t *);
-       new_size = sizeof(chunk_alloc_t *);
+       /* Install custom chunk hooks. */
+       old_size = sizeof(chunk_hooks_t);
+       new_size = sizeof(chunk_hooks_t);
+       assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
+           &new_hooks, new_size), 0, "Unexpected chunk_hooks error");
+       orig_hooks = old_hooks;
+       assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
+       assert_ptr_ne(old_hooks.dalloc, chunk_dalloc,
+           "Unexpected dalloc error");
+       assert_ptr_ne(old_hooks.commit, chunk_commit,
+           "Unexpected commit error");
+       assert_ptr_ne(old_hooks.decommit, chunk_decommit,
+           "Unexpected decommit error");
+       assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error");
+       assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error");
+       assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error");
 
-       assert_d_eq(mallctl("arena.0.chunk.alloc", &old_alloc,
-           &old_size, &new_alloc, new_size), 0,
-           "Unexpected alloc error");
-       assert_ptr_ne(old_alloc, new_alloc,
-           "Unexpected alloc error");
-       assert_d_eq(mallctl("arena.0.chunk.dalloc", &old_dalloc, &old_size,
-           &new_dalloc, new_size), 0, "Unexpected dalloc error");
-       assert_ptr_ne(old_dalloc, new_dalloc, "Unexpected dalloc error");
+       /* Get large size classes. */
+       sz = sizeof(size_t);
+       assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+           "Unexpected arenas.lrun.0.size failure");
+       assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0,
+           "Unexpected arenas.lrun.1.size failure");
+
+       /* Get huge size classes. */
+       assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
+           "Unexpected arenas.hchunk.0.size failure");
+       assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
+           "Unexpected arenas.hchunk.1.size failure");
+       assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
+           "Unexpected arenas.hchunk.2.size failure");
+
+       /* Test dalloc/decommit/purge cascade. */
+       do_dalloc = false;
+       do_decommit = false;
+       p = mallocx(huge0 * 2, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+       did_dalloc = false;
+       did_decommit = false;
+       did_purge = false;
+       did_split = false;
+       xallocx_success_a = (xallocx(p, huge0, 0, 0) == huge0);
+       assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+           "Unexpected arena.0.purge error");
+       if (xallocx_success_a) {
+               assert_true(did_dalloc, "Expected dalloc");
+               assert_false(did_decommit, "Unexpected decommit");
+               assert_true(did_purge, "Expected purge");
+       }
+       assert_true(did_split, "Expected split");
+       dallocx(p, 0);
+       do_dalloc = true;
+
+       /* Test decommit/commit and observe split/merge. */
+       do_dalloc = false;
+       do_decommit = true;
+       p = mallocx(huge0 * 2, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+       did_decommit = false;
+       did_commit = false;
+       did_split = false;
+       did_merge = false;
+       xallocx_success_b = (xallocx(p, huge0, 0, 0) == huge0);
+       assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+           "Unexpected arena.0.purge error");
+       if (xallocx_success_b)
+               assert_true(did_split, "Expected split");
+       xallocx_success_c = (xallocx(p, huge0 * 2, 0, 0) == huge0 * 2);
+       assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
+       if (xallocx_success_b && xallocx_success_c)
+               assert_true(did_merge, "Expected merge");
+       dallocx(p, 0);
+       do_dalloc = true;
+       do_decommit = false;
+
+       /* Test purge for partial-chunk huge allocations. */
+       if (huge0 * 2 > huge2) {
+               /*
+                * There are at least four size classes per doubling, so a
+                * successful xallocx() from size=huge2 to size=huge1 is
+                * guaranteed to leave trailing purgeable memory.
+                */
+               p = mallocx(huge2, 0);
+               assert_ptr_not_null(p, "Unexpected mallocx() error");
+               did_purge = false;
+               assert_zu_eq(xallocx(p, huge1, 0, 0), huge1,
+                   "Unexpected xallocx() failure");
+               assert_true(did_purge, "Expected purge");
+               dallocx(p, 0);
+       }
+
+       /* Test decommit for large allocations. */
+       do_decommit = true;
+       p = mallocx(large1, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+       assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+           "Unexpected arena.0.purge error");
+       did_decommit = false;
+       assert_zu_eq(xallocx(p, large0, 0, 0), large0,
+           "Unexpected xallocx() failure");
+       assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+           "Unexpected arena.0.purge error");
+       did_commit = false;
+       assert_zu_eq(xallocx(p, large1, 0, 0), large1,
+           "Unexpected xallocx() failure");
+       assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
+       dallocx(p, 0);
+       do_decommit = false;
 
+       /* Make sure non-huge allocation succeeds. */
        p = mallocx(42, 0);
-       assert_ptr_ne(p, NULL, "Unexpected alloc error");
-       free(p);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+       dallocx(p, 0);
 
-       assert_d_eq(mallctl("arena.0.chunk.alloc", NULL,
-           NULL, &old_alloc, old_size), 0,
+       /* Restore chunk hooks. */
+       assert_d_eq(mallctl("arena.0.chunk_hooks", NULL, NULL, &old_hooks,
+           new_size), 0, "Unexpected chunk_hooks error");
+       assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
+           NULL, 0), 0, "Unexpected chunk_hooks error");
+       assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc,
            "Unexpected alloc error");
-       assert_d_eq(mallctl("arena.0.chunk.dalloc", NULL, NULL, &old_dalloc,
-           old_size), 0, "Unexpected dalloc error");
+       assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc,
+           "Unexpected dalloc error");
+       assert_ptr_eq(old_hooks.commit, orig_hooks.commit,
+           "Unexpected commit error");
+       assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit,
+           "Unexpected decommit error");
+       assert_ptr_eq(old_hooks.purge, orig_hooks.purge,
+           "Unexpected purge error");
+       assert_ptr_eq(old_hooks.split, orig_hooks.split,
+           "Unexpected split error");
+       assert_ptr_eq(old_hooks.merge, orig_hooks.merge,
+           "Unexpected merge error");
 }
 TEST_END
 
index 123e041fa33f09cee5875c8445a0cd172dba47cb..42eee105f257ede41e8dd1055963e4cef72063a0 100644 (file)
 #include "test/jemalloc_test.h"
 
-#define        CHUNK 0x400000
-#define        MAXALIGN (((size_t)1) << 25)
-#define        NITER 4
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+       unsigned ret;
+       size_t z;
+
+       z = sizeof(unsigned);
+       assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+           "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+       return (ret);
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+       return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+       size_t ret;
+       size_t z;
+       size_t mib[4];
+       size_t miblen = 4;
+
+       z = sizeof(size_t);
+       assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+           0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+       mib[2] = ind;
+       z = sizeof(size_t);
+       assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+           0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+       return (ret);
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+       return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+TEST_BEGIN(test_overflow)
+{
+       size_t hugemax;
+
+       hugemax = get_huge_size(get_nhuge()-1);
+
+       assert_ptr_null(mallocx(hugemax+1, 0),
+           "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1);
+
+       assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
+           "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+       assert_ptr_null(mallocx(SIZE_T_MAX, 0),
+           "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+       assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+           "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
+           ZU(PTRDIFF_MAX)+1);
+}
+TEST_END
+
+TEST_BEGIN(test_oom)
+{
+       size_t hugemax, size, alignment;
+
+       hugemax = get_huge_size(get_nhuge()-1);
+
+       /*
+        * It should be impossible to allocate two objects that each consume
+        * more than half the virtual address space.
+        */
+       {
+               void *p;
+
+               p = mallocx(hugemax, 0);
+               if (p != NULL) {
+                       assert_ptr_null(mallocx(hugemax, 0),
+                           "Expected OOM for mallocx(size=%#zx, 0)", hugemax);
+                       dallocx(p, 0);
+               }
+       }
+
+#if LG_SIZEOF_PTR == 3
+       size      = ZU(0x8000000000000000);
+       alignment = ZU(0x8000000000000000);
+#else
+       size      = ZU(0x80000000);
+       alignment = ZU(0x80000000);
+#endif
+       assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)),
+           "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size,
+           alignment);
+}
+TEST_END
 
 TEST_BEGIN(test_basic)
 {
-       size_t nsz, rsz, sz;
-       void *p;
-
-       sz = 42;
-       nsz = nallocx(sz, 0);
-       assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
-       p = mallocx(sz, 0);
-       assert_ptr_not_null(p, "Unexpected mallocx() error");
-       rsz = sallocx(p, 0);
-       assert_zu_ge(rsz, sz, "Real size smaller than expected");
-       assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
-       dallocx(p, 0);
-
-       p = mallocx(sz, 0);
-       assert_ptr_not_null(p, "Unexpected mallocx() error");
-       dallocx(p, 0);
-
-       nsz = nallocx(sz, MALLOCX_ZERO);
-       assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
-       p = mallocx(sz, MALLOCX_ZERO);
-       assert_ptr_not_null(p, "Unexpected mallocx() error");
-       rsz = sallocx(p, 0);
-       assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
-       dallocx(p, 0);
+#define        MAXSZ (((size_t)1) << 26)
+       size_t sz;
+
+       for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+               size_t nsz, rsz;
+               void *p;
+               nsz = nallocx(sz, 0);
+               assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+               p = mallocx(sz, 0);
+               assert_ptr_not_null(p, "Unexpected mallocx() error");
+               rsz = sallocx(p, 0);
+               assert_zu_ge(rsz, sz, "Real size smaller than expected");
+               assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+               dallocx(p, 0);
+
+               p = mallocx(sz, 0);
+               assert_ptr_not_null(p, "Unexpected mallocx() error");
+               dallocx(p, 0);
+
+               nsz = nallocx(sz, MALLOCX_ZERO);
+               assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+               p = mallocx(sz, MALLOCX_ZERO);
+               assert_ptr_not_null(p, "Unexpected mallocx() error");
+               rsz = sallocx(p, 0);
+               assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+               dallocx(p, 0);
+       }
+#undef MAXSZ
 }
 TEST_END
 
 TEST_BEGIN(test_alignment_and_size)
 {
+#define        MAXALIGN (((size_t)1) << 25)
+#define        NITER 4
        size_t nsz, rsz, sz, alignment, total;
        unsigned i;
        void *ps[NITER];
@@ -84,6 +187,8 @@ TEST_BEGIN(test_alignment_and_size)
                        }
                }
        }
+#undef MAXALIGN
+#undef NITER
 }
 TEST_END
 
@@ -92,6 +197,8 @@ main(void)
 {
 
        return (test(
+           test_overflow,
+           test_oom,
            test_basic,
            test_alignment_and_size));
 }
diff --git a/src/jemalloc/test/integration/overflow.c b/src/jemalloc/test/integration/overflow.c
new file mode 100644 (file)
index 0000000..303d9b2
--- /dev/null
@@ -0,0 +1,49 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_overflow)
+{
+       unsigned nhchunks;
+       size_t mib[4];
+       size_t sz, miblen, max_size_class;
+       void *p;
+
+       sz = sizeof(unsigned);
+       assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
+           "Unexpected mallctl() error");
+
+       miblen = sizeof(mib) / sizeof(size_t);
+       assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
+           "Unexpected mallctlnametomib() error");
+       mib[2] = nhchunks - 1;
+
+       sz = sizeof(size_t);
+       assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
+           "Unexpected mallctlbymib() error");
+
+       assert_ptr_null(malloc(max_size_class + 1),
+           "Expected OOM due to over-sized allocation request");
+       assert_ptr_null(malloc(SIZE_T_MAX),
+           "Expected OOM due to over-sized allocation request");
+
+       assert_ptr_null(calloc(1, max_size_class + 1),
+           "Expected OOM due to over-sized allocation request");
+       assert_ptr_null(calloc(1, SIZE_T_MAX),
+           "Expected OOM due to over-sized allocation request");
+
+       p = malloc(1);
+       assert_ptr_not_null(p, "Unexpected malloc() OOM");
+       assert_ptr_null(realloc(p, max_size_class + 1),
+           "Expected OOM due to over-sized allocation request");
+       assert_ptr_null(realloc(p, SIZE_T_MAX),
+           "Expected OOM due to over-sized allocation request");
+       free(p);
+}
+TEST_END
+
+int
+main(void)
+{
+
+       return (test(
+           test_overflow));
+}
index b69807298c0703205dcb21d260672c0d24371c42..66ad8660a46be173b8b33883677b7a4bf6ae2986 100644 (file)
@@ -1,5 +1,51 @@
 #include "test/jemalloc_test.h"
 
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+       unsigned ret;
+       size_t z;
+
+       z = sizeof(unsigned);
+       assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+           "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+       return (ret);
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+       return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+       size_t ret;
+       size_t z;
+       size_t mib[4];
+       size_t miblen = 4;
+
+       z = sizeof(size_t);
+       assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+           0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+       mib[2] = ind;
+       z = sizeof(size_t);
+       assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+           0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+       return (ret);
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+       return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
 TEST_BEGIN(test_grow_and_shrink)
 {
        void *p, *q;
@@ -22,7 +68,7 @@ TEST_BEGIN(test_grow_and_shrink)
                            szs[j-1], szs[j-1]+1);
                        szs[j] = sallocx(q, 0);
                        assert_zu_ne(szs[j], szs[j-1]+1,
-                           "Expected size to at least: %zu", szs[j-1]+1);
+                           "Expected size to be at least: %zu", szs[j-1]+1);
                        p = q;
                }
 
@@ -55,8 +101,9 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
        for (i = 0; i < len; i++) {
                uint8_t b = buf[offset+i];
                if (b != c) {
-                       test_fail("Allocation at %p contains %#x rather than "
-                           "%#x at offset %zu", p, b, c, offset+i);
+                       test_fail("Allocation at %p (len=%zu) contains %#x "
+                           "rather than %#x at offset %zu", p, len, b, c,
+                           offset+i);
                        ret = true;
                }
        }
@@ -137,22 +184,22 @@ TEST_END
 TEST_BEGIN(test_lg_align_and_zero)
 {
        void *p, *q;
-       size_t lg_align, sz;
+       unsigned lg_align;
+       size_t sz;
 #define        MAX_LG_ALIGN 25
 #define        MAX_VALIDATE (ZU(1) << 22)
 
-       lg_align = ZU(0);
+       lg_align = 0;
        p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
        assert_ptr_not_null(p, "Unexpected mallocx() error");
 
        for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
                q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
                assert_ptr_not_null(q,
-                   "Unexpected rallocx() error for lg_align=%zu", lg_align);
+                   "Unexpected rallocx() error for lg_align=%u", lg_align);
                assert_ptr_null(
                    (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
-                   "%p inadequately aligned for lg_align=%zu",
-                   q, lg_align);
+                   "%p inadequately aligned for lg_align=%u", q, lg_align);
                sz = sallocx(q, 0);
                if ((sz << 1) <= MAX_VALIDATE) {
                        assert_false(validate_fill(q, 0, 0, sz),
@@ -172,6 +219,33 @@ TEST_BEGIN(test_lg_align_and_zero)
 }
 TEST_END
 
+TEST_BEGIN(test_overflow)
+{
+       size_t hugemax;
+       void *p;
+
+       hugemax = get_huge_size(get_nhuge()-1);
+
+       p = mallocx(1, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+       assert_ptr_null(rallocx(p, hugemax+1, 0),
+           "Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1);
+
+       assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
+           "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+       assert_ptr_null(rallocx(p, SIZE_T_MAX, 0),
+           "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
+
+       assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+           "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
+           ZU(PTRDIFF_MAX)+1);
+
+       dallocx(p, 0);
+}
+TEST_END
+
 int
 main(void)
 {
@@ -180,5 +254,6 @@ main(void)
            test_grow_and_shrink,
            test_zero,
            test_align,
-           test_lg_align_and_zero));
+           test_lg_align_and_zero,
+           test_overflow));
 }
index ab4cf945e54e5915db225ff4167a75eb9f19c323..5c4998b68bd9bc7dc0e006ecdf55237ca77aa0b4 100644 (file)
@@ -1,5 +1,24 @@
 #include "test/jemalloc_test.h"
 
+/*
+ * Use a separate arena for xallocx() extension/contraction tests so that
+ * internal allocation e.g. by heap profiling can't interpose allocations where
+ * xallocx() would ordinarily be able to extend.
+ */
+static unsigned
+arena_ind(void)
+{
+       static unsigned ind = 0;
+
+       if (ind == 0) {
+               size_t sz = sizeof(ind);
+               assert_d_eq(mallctl("arenas.extend", &ind, &sz, NULL, 0), 0,
+                   "Unexpected mallctl failure creating arena");
+       }
+
+       return (ind);
+}
+
 TEST_BEGIN(test_same_size)
 {
        void *p;
@@ -48,6 +67,414 @@ TEST_BEGIN(test_no_move_fail)
 }
 TEST_END
 
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+       unsigned ret;
+       size_t z;
+
+       z = sizeof(unsigned);
+       assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+           "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+       return (ret);
+}
+
+static unsigned
+get_nsmall(void)
+{
+
+       return (get_nsizes_impl("arenas.nbins"));
+}
+
+static unsigned
+get_nlarge(void)
+{
+
+       return (get_nsizes_impl("arenas.nlruns"));
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+       return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+       size_t ret;
+       size_t z;
+       size_t mib[4];
+       size_t miblen = 4;
+
+       z = sizeof(size_t);
+       assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+           0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+       mib[2] = ind;
+       z = sizeof(size_t);
+       assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+           0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+       return (ret);
+}
+
+static size_t
+get_small_size(size_t ind)
+{
+
+       return (get_size_impl("arenas.bin.0.size", ind));
+}
+
+static size_t
+get_large_size(size_t ind)
+{
+
+       return (get_size_impl("arenas.lrun.0.size", ind));
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+       return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+TEST_BEGIN(test_size)
+{
+       size_t small0, hugemax;
+       void *p;
+
+       /* Get size classes. */
+       small0 = get_small_size(0);
+       hugemax = get_huge_size(get_nhuge()-1);
+
+       p = mallocx(small0, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+       /* Test smallest supported size. */
+       assert_zu_eq(xallocx(p, 1, 0, 0), small0,
+           "Unexpected xallocx() behavior");
+
+       /* Test largest supported size. */
+       assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax,
+           "Unexpected xallocx() behavior");
+
+       /* Test size overflow. */
+       assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax,
+           "Unexpected xallocx() behavior");
+       assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax,
+           "Unexpected xallocx() behavior");
+
+       dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_size_extra_overflow)
+{
+       size_t small0, hugemax;
+       void *p;
+
+       /* Get size classes. */
+       small0 = get_small_size(0);
+       hugemax = get_huge_size(get_nhuge()-1);
+
+       p = mallocx(small0, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+       /* Test overflows that can be resolved by clamping extra. */
+       assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax,
+           "Unexpected xallocx() behavior");
+       assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax,
+           "Unexpected xallocx() behavior");
+
+       /* Test overflow such that hugemax-size underflows. */
+       assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax,
+           "Unexpected xallocx() behavior");
+       assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax,
+           "Unexpected xallocx() behavior");
+       assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax,
+           "Unexpected xallocx() behavior");
+       assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax,
+           "Unexpected xallocx() behavior");
+
+       dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_small)
+{
+       size_t small0, small1, hugemax;
+       void *p;
+
+       /* Get size classes. */
+       small0 = get_small_size(0);
+       small1 = get_small_size(1);
+       hugemax = get_huge_size(get_nhuge()-1);
+
+       p = mallocx(small0, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+       assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
+           "Unexpected xallocx() behavior");
+
+       /* Test size+extra overflow. */
+       assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0,
+           "Unexpected xallocx() behavior");
+       assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
+           "Unexpected xallocx() behavior");
+
+       dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_large)
+{
+       int flags = MALLOCX_ARENA(arena_ind());
+       size_t smallmax, large0, large1, large2, huge0, hugemax;
+       void *p;
+
+       /* Get size classes. */
+       smallmax = get_small_size(get_nsmall()-1);
+       large0 = get_large_size(0);
+       large1 = get_large_size(1);
+       large2 = get_large_size(2);
+       huge0 = get_huge_size(0);
+       hugemax = get_huge_size(get_nhuge()-1);
+
+       p = mallocx(large2, flags);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+       assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+           "Unexpected xallocx() behavior");
+       /* Test size decrease with zero extra. */
+       assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+           "Unexpected xallocx() behavior");
+       assert_zu_eq(xallocx(p, smallmax, 0, flags), large0,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+           "Unexpected xallocx() behavior");
+       /* Test size decrease with non-zero extra. */
+       assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
+           "Unexpected xallocx() behavior");
+       assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2,
+           "Unexpected xallocx() behavior");
+       assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1,
+           "Unexpected xallocx() behavior");
+       assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+           "Unexpected xallocx() behavior");
+       /* Test size increase with zero extra. */
+       assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+           "Unexpected xallocx() behavior");
+       assert_zu_eq(xallocx(p, huge0, 0, flags), large2,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+           "Unexpected xallocx() behavior");
+       /* Test size increase with non-zero extra. */
+       assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+           "Unexpected xallocx() behavior");
+       /* Test size increase with non-zero extra. */
+       assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+           "Unexpected xallocx() behavior");
+       /* Test size+extra overflow. */
+       assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0,
+           "Unexpected xallocx() behavior");
+
+       dallocx(p, flags);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_huge)
+{
+       int flags = MALLOCX_ARENA(arena_ind());
+       size_t largemax, huge1, huge2, huge3, hugemax;
+       void *p;
+
+       /* Get size classes. */
+       largemax = get_large_size(get_nlarge()-1);
+       huge1 = get_huge_size(1);
+       huge2 = get_huge_size(2);
+       huge3 = get_huge_size(3);
+       hugemax = get_huge_size(get_nhuge()-1);
+
+       p = mallocx(huge3, flags);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+       assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+           "Unexpected xallocx() behavior");
+       /* Test size decrease with zero extra. */
+       assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+           "Unexpected xallocx() behavior");
+       assert_zu_ge(xallocx(p, largemax, 0, flags), huge1,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+           "Unexpected xallocx() behavior");
+       /* Test size decrease with non-zero extra. */
+       assert_zu_eq(xallocx(p, huge1, huge3 - huge1, flags), huge3,
+           "Unexpected xallocx() behavior");
+       assert_zu_eq(xallocx(p, huge2, huge3 - huge2, flags), huge3,
+           "Unexpected xallocx() behavior");
+       assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2,
+           "Unexpected xallocx() behavior");
+       assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+           "Unexpected xallocx() behavior");
+       /* Test size increase with zero extra. */
+       assert_zu_le(xallocx(p, huge3, 0, flags), huge3,
+           "Unexpected xallocx() behavior");
+       assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge3,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+           "Unexpected xallocx() behavior");
+       /* Test size increase with non-zero extra. */
+       assert_zu_le(xallocx(p, huge1, SIZE_T_MAX - huge1, flags), hugemax,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+           "Unexpected xallocx() behavior");
+       /* Test size increase with non-zero extra. */
+       assert_zu_le(xallocx(p, huge1, huge3 - huge1, flags), huge3,
+           "Unexpected xallocx() behavior");
+
+       assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+           "Unexpected xallocx() behavior");
+       /* Test size+extra overflow. */
+       assert_zu_le(xallocx(p, huge3, hugemax - huge3 + 1, flags), hugemax,
+           "Unexpected xallocx() behavior");
+
+       dallocx(p, flags);
+}
+TEST_END
+
+static void
+print_filled_extents(const void *p, uint8_t c, size_t len)
+{
+       const uint8_t *pc = (const uint8_t *)p;
+       size_t i, range0;
+       uint8_t c0;
+
+       malloc_printf("  p=%p, c=%#x, len=%zu:", p, c, len);
+       range0 = 0;
+       c0 = pc[0];
+       for (i = 0; i < len; i++) {
+               if (pc[i] != c0) {
+                       malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
+                       range0 = i;
+                       c0 = pc[i];
+               }
+       }
+       malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
+}
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
+{
+       const uint8_t *pc = (const uint8_t *)p;
+       bool err;
+       size_t i;
+
+       for (i = offset, err = false; i < offset+len; i++) {
+               if (pc[i] != c)
+                       err = true;
+       }
+
+       if (err)
+               print_filled_extents(p, c, offset + len);
+
+       return (err);
+}
+
+static void
+test_zero(size_t szmin, size_t szmax)
+{
+       int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
+       size_t sz, nsz;
+       void *p;
+#define        FILL_BYTE 0x7aU
+
+       sz = szmax;
+       p = mallocx(sz, flags);
+       assert_ptr_not_null(p, "Unexpected mallocx() error");
+       assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
+           sz);
+
+       /*
+        * Fill with non-zero so that non-debug builds are more likely to detect
+        * errors.
+        */
+       memset(p, FILL_BYTE, sz);
+       assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+           "Memory not filled: sz=%zu", sz);
+
+       /* Shrink in place so that we can expect growing in place to succeed. */
+       sz = szmin;
+       assert_zu_eq(xallocx(p, sz, 0, flags), sz,
+           "Unexpected xallocx() error");
+       assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+           "Memory not filled: sz=%zu", sz);
+
+       for (sz = szmin; sz < szmax; sz = nsz) {
+               nsz = nallocx(sz+1, flags);
+               assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz,
+                   "Unexpected xallocx() failure");
+               assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+                   "Memory not filled: sz=%zu", sz);
+               assert_false(validate_fill(p, 0x00, sz, nsz-sz),
+                   "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
+               memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
+               assert_false(validate_fill(p, FILL_BYTE, 0, nsz),
+                   "Memory not filled: nsz=%zu", nsz);
+       }
+
+       dallocx(p, flags);
+}
+
+TEST_BEGIN(test_zero_large)
+{
+       size_t large0, largemax;
+
+       /* Get size classes. */
+       large0 = get_large_size(0);
+       largemax = get_large_size(get_nlarge()-1);
+
+       test_zero(large0, largemax);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_huge)
+{
+       size_t huge0, huge1;
+
+       /* Get size classes. */
+       huge0 = get_huge_size(0);
+       huge1 = get_huge_size(1);
+
+       test_zero(huge1, huge0 * 2);
+}
+TEST_END
+
 int
 main(void)
 {
@@ -55,5 +482,12 @@ main(void)
        return (test(
            test_same_size,
            test_extra_no_move,
-           test_no_move_fail));
+           test_no_move_fail,
+           test_size,
+           test_size_extra_overflow,
+           test_extra_small,
+           test_extra_large,
+           test_extra_huge,
+           test_zero_large,
+           test_zero_huge));
 }
diff --git a/src/jemalloc/test/src/mq.c b/src/jemalloc/test/src/mq.c
new file mode 100644 (file)
index 0000000..40b31c1
--- /dev/null
@@ -0,0 +1,29 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Sleep for approximately ns nanoseconds.  No lower *nor* upper bound on sleep
+ * time is guaranteed.
+ */
+void
+mq_nanosleep(unsigned ns)
+{
+
+       assert(ns <= 1000*1000*1000);
+
+#ifdef _WIN32
+       Sleep(ns / 1000);
+#else
+       {
+               struct timespec timeout;
+
+               if (ns < 1000*1000*1000) {
+                       timeout.tv_sec = 0;
+                       timeout.tv_nsec = ns;
+               } else {
+                       timeout.tv_sec = 1;
+                       timeout.tv_nsec = 0;
+               }
+               nanosleep(&timeout, NULL);
+       }
+#endif
+}
index 0f8bd4947105a2f99c21a4a478d845f53533ca6d..8173614cf638060c4a5d3fafdcda632cfdf8c82c 100644 (file)
@@ -5,7 +5,7 @@ static test_status_t    test_counts[test_status_count] = {0, 0, 0};
 static test_status_t   test_status = test_status_pass;
 static const char *    test_name = "";
 
-JEMALLOC_ATTR(format(printf, 1, 2))
+JEMALLOC_FORMAT_PRINTF(1, 2)
 void
 test_skip(const char *format, ...)
 {
@@ -18,7 +18,7 @@ test_skip(const char *format, ...)
        test_status = test_status_skip;
 }
 
-JEMALLOC_ATTR(format(printf, 1, 2))
+JEMALLOC_FORMAT_PRINTF(1, 2)
 void
 test_fail(const char *format, ...)
 {
index 36fbedd48952e02e0088b22a9ca1d7101d36f2e7..e91b3cf21f0d8aef8f70f6d3e9b12e208bf1b65d 100644 (file)
@@ -4,22 +4,26 @@ void
 timer_start(timedelta_t *timer)
 {
 
-       gettimeofday(&timer->tv0, NULL);
+       nstime_init(&timer->t0, 0);
+       nstime_update(&timer->t0);
 }
 
 void
 timer_stop(timedelta_t *timer)
 {
 
-       gettimeofday(&timer->tv1, NULL);
+       nstime_copy(&timer->t1, &timer->t0);
+       nstime_update(&timer->t1);
 }
 
 uint64_t
 timer_usec(const timedelta_t *timer)
 {
+       nstime_t delta;
 
-       return (((timer->tv1.tv_sec - timer->tv0.tv_sec) * 1000000) +
-           timer->tv1.tv_usec - timer->tv0.tv_usec);
+       nstime_copy(&delta, &timer->t1);
+       nstime_subtract(&delta, &timer->t0);
+       return (nstime_ns(&delta) / 1000);
 }
 
 void
@@ -33,7 +37,7 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen)
        int n;
 
        /* Whole. */
-       n = malloc_snprintf(&buf[i], buflen-i, "%"PRIu64, t0 / t1);
+       n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
        i += n;
        if (i >= buflen)
                return;
@@ -50,7 +54,7 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen)
                uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10
                    >= 5)) ? 1 : 0;
                n = malloc_snprintf(&buf[i], buflen-i,
-                   "%"PRIu64, (t0 * mult / t1) % 10 + round);
+                   "%"FMTu64, (t0 * mult / t1) % 10 + round);
                i += n;
                mult *= 10;
        }
index 980eca410820d2be9c4b32caf4bcfddfcfc20cb2..ee39fea7f1b4d163ecf6f019bcf17fbcc8d9629f 100644 (file)
@@ -31,8 +31,8 @@ compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
        time_func(&timer_b, nwarmup, niter, func_b);
 
        timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf));
-       malloc_printf("%"PRIu64" iterations, %s=%"PRIu64"us, "
-           "%s=%"PRIu64"us, ratio=1:%s\n",
+       malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, "
+           "%s=%"FMTu64"us, ratio=1:%s\n",
            niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b),
            ratio_buf);
 
@@ -114,6 +114,10 @@ malloc_mus_free(void)
        void *p;
 
        p = malloc(1);
+       if (p == NULL) {
+               test_fail("Unexpected malloc() failure");
+               return;
+       }
        malloc_usable_size(p);
        free(p);
 }
@@ -124,6 +128,10 @@ malloc_sallocx_free(void)
        void *p;
 
        p = malloc(1);
+       if (p == NULL) {
+               test_fail("Unexpected malloc() failure");
+               return;
+       }
        if (sallocx(p, 0) < 1)
                test_fail("Unexpected sallocx() failure");
        free(p);
@@ -143,6 +151,10 @@ malloc_nallocx_free(void)
        void *p;
 
        p = malloc(1);
+       if (p == NULL) {
+               test_fail("Unexpected malloc() failure");
+               return;
+       }
        if (nallocx(1, 0) < 1)
                test_fail("Unexpected nallocx() failure");
        free(p);
index 88b31f6efdb8f70fd24a7d6ba736a66debb693e2..ba4be8702edac3bbb14618f59b7ffd5d1369e266 100644 (file)
@@ -1543,13 +1543,13 @@ TEST_BEGIN(test_gen_rand_64)
                }
                r = gen_rand64(ctx);
                assert_u64_eq(r, array64[i],
-                   "Mismatch at array64[%d]=%"PRIx64", gen=%"PRIx64, i,
+                   "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
                    array64[i], r);
        }
        for (i = 0; i < COUNT_2; i++) {
                r = gen_rand64(ctx);
                assert_u64_eq(r, array64_2[i],
-                   "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64"", i,
+                   "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
                    array64_2[i], r);
        }
        fini_gen_rand(ctx);
@@ -1580,13 +1580,13 @@ TEST_BEGIN(test_by_array_64)
                }
                r = gen_rand64(ctx);
                assert_u64_eq(r, array64[i],
-                   "Mismatch at array64[%d]=%"PRIx64" gen=%"PRIx64, i,
+                   "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
                    array64[i], r);
        }
        for (i = 0; i < COUNT_2; i++) {
                r = gen_rand64(ctx);
                assert_u64_eq(r, array64_2[i],
-                   "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64, i,
+                   "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
                    array64_2[i], r);
        }
        fini_gen_rand(ctx);
index eb6136c79403937b61ecd41b17bcc7abb68191d7..bdd74f659cf31f95e69704ae21011cecdbb8f827 100644 (file)
@@ -4,48 +4,64 @@
 struct p##_test_s {                                                    \
        t       accum0;                                                 \
        t       x;                                                      \
+       t       s;                                                      \
 };                                                                     \
 typedef struct p##_test_s p##_test_t;
 
-#define        TEST_BODY(p, t, PRI) do {                                       \
+#define        TEST_BODY(p, t, tc, ta, FMT) do {                               \
        const p##_test_t tests[] = {                                    \
-               {-1, -1},                                               \
-               {-1,  0},                                               \
-               {-1,  1},                                               \
+               {(t)-1, (t)-1, (t)-2},                                  \
+               {(t)-1, (t) 0, (t)-2},                                  \
+               {(t)-1, (t) 1, (t)-2},                                  \
                                                                        \
-               { 0, -1},                                               \
-               { 0,  0},                                               \
-               { 0,  1},                                               \
+               {(t) 0, (t)-1, (t)-2},                                  \
+               {(t) 0, (t) 0, (t)-2},                                  \
+               {(t) 0, (t) 1, (t)-2},                                  \
                                                                        \
-               { 1, -1},                                               \
-               { 1,  0},                                               \
-               { 1,  1},                                               \
+               {(t) 1, (t)-1, (t)-2},                                  \
+               {(t) 1, (t) 0, (t)-2},                                  \
+               {(t) 1, (t) 1, (t)-2},                                  \
                                                                        \
-               {0, -(1 << 22)},                                        \
-               {0, (1 << 22)},                                         \
-               {(1 << 22), -(1 << 22)},                                \
-               {(1 << 22), (1 << 22)}                                  \
+               {(t)0, (t)-(1 << 22), (t)-2},                           \
+               {(t)0, (t)(1 << 22), (t)-2},                            \
+               {(t)(1 << 22), (t)-(1 << 22), (t)-2},                   \
+               {(t)(1 << 22), (t)(1 << 22), (t)-2}                     \
        };                                                              \
        unsigned i;                                                     \
                                                                        \
        for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) {        \
+               bool err;                                               \
                t accum = tests[i].accum0;                              \
-               assert_u64_eq(atomic_read_##p(&accum), tests[i].accum0, \
-                   "i=%u", i);                                         \
-               assert_u64_eq(atomic_add_##p(&accum, tests[i].x),       \
-                   tests[i].accum0 + tests[i].x,                       \
-                   "i=%u, accum=%#"PRI", x=%#"PRI,                     \
+               assert_##ta##_eq(atomic_read_##p(&accum),               \
+                   tests[i].accum0,                                    \
+                   "Erroneous read, i=%u", i);                         \
+                                                                       \
+               assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x),    \
+                   (t)((tc)tests[i].accum0 + (tc)tests[i].x),          \
+                   "i=%u, accum=%"FMT", x=%"FMT,                       \
                    i, tests[i].accum0, tests[i].x);                    \
-               assert_u64_eq(atomic_read_##p(&accum), accum,           \
-                   "i=%u", i);                                         \
+               assert_##ta##_eq(atomic_read_##p(&accum), accum,        \
+                   "Erroneous add, i=%u", i);                          \
                                                                        \
                accum = tests[i].accum0;                                \
-               assert_u64_eq(atomic_sub_##p(&accum, tests[i].x),       \
-                   tests[i].accum0 - tests[i].x,                       \
-                   "i=%u, accum=%#"PRI", x=%#"PRI,                     \
+               assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x),    \
+                   (t)((tc)tests[i].accum0 - (tc)tests[i].x),          \
+                   "i=%u, accum=%"FMT", x=%"FMT,                       \
                    i, tests[i].accum0, tests[i].x);                    \
-               assert_u64_eq(atomic_read_##p(&accum), accum,           \
-                   "i=%u", i);                                         \
+               assert_##ta##_eq(atomic_read_##p(&accum), accum,        \
+                   "Erroneous sub, i=%u", i);                          \
+                                                                       \
+               accum = tests[i].accum0;                                \
+               err = atomic_cas_##p(&accum, tests[i].x, tests[i].s);   \
+               assert_b_eq(err, tests[i].accum0 != tests[i].x,         \
+                   "Erroneous cas success/failure result");            \
+               assert_##ta##_eq(accum, err ? tests[i].accum0 :         \
+                   tests[i].s, "Erroneous cas effect, i=%u", i);       \
+                                                                       \
+               accum = tests[i].accum0;                                \
+               atomic_write_##p(&accum, tests[i].s);                   \
+               assert_##ta##_eq(accum, tests[i].s,                     \
+                   "Erroneous write, i=%u", i);                        \
        }                                                               \
 } while (0)
 
@@ -56,7 +72,7 @@ TEST_BEGIN(test_atomic_uint64)
 #if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
        test_skip("64-bit atomic operations not supported");
 #else
-       TEST_BODY(uint64, uint64_t, PRIx64);
+       TEST_BODY(uint64, uint64_t, uint64_t, u64, FMTx64);
 #endif
 }
 TEST_END
@@ -65,7 +81,15 @@ TEST_STRUCT(uint32, uint32_t)
 TEST_BEGIN(test_atomic_uint32)
 {
 
-       TEST_BODY(uint32, uint32_t, PRIx32);
+       TEST_BODY(uint32, uint32_t, uint32_t, u32, "#"FMTx32);
+}
+TEST_END
+
+TEST_STRUCT(p, void *)
+TEST_BEGIN(test_atomic_p)
+{
+
+       TEST_BODY(p, void *, uintptr_t, ptr, "p");
 }
 TEST_END
 
@@ -73,7 +97,7 @@ TEST_STRUCT(z, size_t)
 TEST_BEGIN(test_atomic_z)
 {
 
-       TEST_BODY(z, size_t, "zx");
+       TEST_BODY(z, size_t, size_t, zu, "#zx");
 }
 TEST_END
 
@@ -81,7 +105,7 @@ TEST_STRUCT(u, unsigned)
 TEST_BEGIN(test_atomic_u)
 {
 
-       TEST_BODY(u, unsigned, "x");
+       TEST_BODY(u, unsigned, unsigned, u, "#x");
 }
 TEST_END
 
@@ -92,6 +116,7 @@ main(void)
        return (test(
            test_atomic_uint64,
            test_atomic_uint32,
+           test_atomic_p,
            test_atomic_z,
            test_atomic_u));
 }
index 4ea94f8575215b76844591c5804bb62ecff08417..1ab0bb8ebce785a91dbe9b552bc973c51cf70949 100644 (file)
@@ -6,7 +6,11 @@ TEST_BEGIN(test_bitmap_size)
 
        prev_size = 0;
        for (i = 1; i <= BITMAP_MAXBITS; i++) {
-               size_t size = bitmap_size(i);
+               bitmap_info_t binfo;
+               size_t size;
+
+               bitmap_info_init(&binfo, i);
+               size = bitmap_size(&binfo);
                assert_true(size >= prev_size,
                    "Bitmap size is smaller than expected");
                prev_size = size;
@@ -23,8 +27,8 @@ TEST_BEGIN(test_bitmap_init)
                bitmap_info_init(&binfo, i);
                {
                        size_t j;
-                       bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
-                               bitmap_info_ngroups(&binfo));
+                       bitmap_t *bitmap = (bitmap_t *)malloc(
+                           bitmap_size(&binfo));
                        bitmap_init(bitmap, &binfo);
 
                        for (j = 0; j < i; j++) {
@@ -46,8 +50,8 @@ TEST_BEGIN(test_bitmap_set)
                bitmap_info_init(&binfo, i);
                {
                        size_t j;
-                       bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
-                               bitmap_info_ngroups(&binfo));
+                       bitmap_t *bitmap = (bitmap_t *)malloc(
+                           bitmap_size(&binfo));
                        bitmap_init(bitmap, &binfo);
 
                        for (j = 0; j < i; j++)
@@ -69,8 +73,8 @@ TEST_BEGIN(test_bitmap_unset)
                bitmap_info_init(&binfo, i);
                {
                        size_t j;
-                       bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
-                               bitmap_info_ngroups(&binfo));
+                       bitmap_t *bitmap = (bitmap_t *)malloc(
+                           bitmap_size(&binfo));
                        bitmap_init(bitmap, &binfo);
 
                        for (j = 0; j < i; j++)
@@ -98,8 +102,8 @@ TEST_BEGIN(test_bitmap_sfu)
                bitmap_info_init(&binfo, i);
                {
                        ssize_t j;
-                       bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
-                               bitmap_info_ngroups(&binfo));
+                       bitmap_t *bitmap = (bitmap_t *)malloc(
+                           bitmap_size(&binfo));
                        bitmap_init(bitmap, &binfo);
 
                        /* Iteratively set bits starting at the beginning. */
index c2126487257c774aa8c3ee05c1255972a2e11eb0..b117595994abb394cd4db46ac9e13bb11d8eb34a 100644 (file)
@@ -64,10 +64,10 @@ TEST_BEGIN(test_count_insert_search_remove)
 
                ks = (i & 1) ? strs[i] : (const char *)NULL;
                vs = (i & 2) ? strs[i] : (const char *)NULL;
-               assert_ptr_eq((void *)ks, (void *)k.s,
-                   "Key mismatch, i=%zu", i);
-               assert_ptr_eq((void *)vs, (void *)v.s,
-                   "Value mismatch, i=%zu", i);
+               assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+                   i);
+               assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+                   i);
        }
        assert_true(ckh_search(&ckh, missing, NULL, NULL),
            "Unexpected ckh_search() success");
@@ -90,14 +90,14 @@ TEST_BEGIN(test_count_insert_search_remove)
 
                ks = (i & 1) ? strs[i] : (const char *)NULL;
                vs = (i & 2) ? strs[i] : (const char *)NULL;
-               assert_ptr_eq((void *)ks, (void *)k.s,
-                   "Key mismatch, i=%zu", i);
-               assert_ptr_eq((void *)vs, (void *)v.s,
-                   "Value mismatch, i=%zu", i);
+               assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+                   i);
+               assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+                   i);
                assert_zu_eq(ckh_count(&ckh),
                    sizeof(strs)/sizeof(const char *) - i - 1,
                    "ckh_count() should return %zu, but it returned %zu",
-                   sizeof(strs)/sizeof(const char *) - i - 1,
+                       sizeof(strs)/sizeof(const char *) - i - 1,
                    ckh_count(&ckh));
        }
 
@@ -196,8 +196,8 @@ TEST_BEGIN(test_insert_iter_remove)
        }
 
        assert_zu_eq(ckh_count(&ckh), 0,
-           "ckh_count() should return %zu, but it returned %zu", ZU(0),
-           ckh_count(&ckh));
+           "ckh_count() should return %zu, but it returned %zu",
+           ZU(0), ckh_count(&ckh));
        ckh_delete(tsd, &ckh);
 #undef NITEMS
 }
diff --git a/src/jemalloc/test/unit/decay.c b/src/jemalloc/test/unit/decay.c
new file mode 100644 (file)
index 0000000..70a2e67
--- /dev/null
@@ -0,0 +1,360 @@
+#include "test/jemalloc_test.h"
+
+const char *malloc_conf = "purge:decay,decay_time:1";
+
+static nstime_update_t *nstime_update_orig;
+
+static unsigned nupdates_mock;
+static nstime_t time_mock;
+static bool nonmonotonic_mock;
+
+static bool
+nstime_update_mock(nstime_t *time)
+{
+
+       nupdates_mock++;
+       if (!nonmonotonic_mock)
+               nstime_copy(time, &time_mock);
+       return (nonmonotonic_mock);
+}
+
+TEST_BEGIN(test_decay_ticks)
+{
+       ticker_t *decay_ticker;
+       unsigned tick0, tick1;
+       size_t sz, huge0, large0;
+       void *p;
+
+       test_skip_if(opt_purge != purge_mode_decay);
+
+       decay_ticker = decay_ticker_get(tsd_fetch(), 0);
+       assert_ptr_not_null(decay_ticker,
+           "Unexpected failure getting decay ticker");
+
+       sz = sizeof(size_t);
+       assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
+           "Unexpected mallctl failure");
+       assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+           "Unexpected mallctl failure");
+
+       /*
+        * Test the standard APIs using a huge size class, since we can't
+        * control tcache interactions (except by completely disabling tcache
+        * for the entire test program).
+        */
+
+       /* malloc(). */
+       tick0 = ticker_read(decay_ticker);
+       p = malloc(huge0);
+       assert_ptr_not_null(p, "Unexpected malloc() failure");
+       tick1 = ticker_read(decay_ticker);
+       assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
+       /* free(). */
+       tick0 = ticker_read(decay_ticker);
+       free(p);
+       tick1 = ticker_read(decay_ticker);
+       assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
+
+       /* calloc(). */
+       tick0 = ticker_read(decay_ticker);
+       p = calloc(1, huge0);
+       assert_ptr_not_null(p, "Unexpected calloc() failure");
+       tick1 = ticker_read(decay_ticker);
+       assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
+       free(p);
+
+       /* posix_memalign(). */
+       tick0 = ticker_read(decay_ticker);
+       assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0,
+           "Unexpected posix_memalign() failure");
+       tick1 = ticker_read(decay_ticker);
+       assert_u32_ne(tick1, tick0,
+           "Expected ticker to tick during posix_memalign()");
+       free(p);
+
+       /* aligned_alloc(). */
+       tick0 = ticker_read(decay_ticker);
+       p = aligned_alloc(sizeof(size_t), huge0);
+       assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
+       tick1 = ticker_read(decay_ticker);
+       assert_u32_ne(tick1, tick0,
+           "Expected ticker to tick during aligned_alloc()");
+       free(p);
+
+       /* realloc(). */
+       /* Allocate. */
+       tick0 = ticker_read(decay_ticker);
+       p = realloc(NULL, huge0);
+       assert_ptr_not_null(p, "Unexpected realloc() failure");
+       tick1 = ticker_read(decay_ticker);
+       assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+       /* Reallocate. */
+       tick0 = ticker_read(decay_ticker);
+       p = realloc(p, huge0);
+       assert_ptr_not_null(p, "Unexpected realloc() failure");
+       tick1 = ticker_read(decay_ticker);
+       assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+       /* Deallocate. */
+       tick0 = ticker_read(decay_ticker);
+       realloc(p, 0);
+       tick1 = ticker_read(decay_ticker);
+       assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+
+       /*
+        * Test the *allocx() APIs using huge, large, and small size classes,
+        * with tcache explicitly disabled.
+        */
+       {
+               unsigned i;
+               size_t allocx_sizes[3];
+               allocx_sizes[0] = huge0;
+               allocx_sizes[1] = large0;
+               allocx_sizes[2] = 1;
+
+               for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
+                       sz = allocx_sizes[i];
+
+                       /* mallocx(). */
+                       tick0 = ticker_read(decay_ticker);
+                       p = mallocx(sz, MALLOCX_TCACHE_NONE);
+                       assert_ptr_not_null(p, "Unexpected mallocx() failure");
+                       tick1 = ticker_read(decay_ticker);
+                       assert_u32_ne(tick1, tick0,
+                           "Expected ticker to tick during mallocx() (sz=%zu)",
+                           sz);
+                       /* rallocx(). */
+                       tick0 = ticker_read(decay_ticker);
+                       p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
+                       assert_ptr_not_null(p, "Unexpected rallocx() failure");
+                       tick1 = ticker_read(decay_ticker);
+                       assert_u32_ne(tick1, tick0,
+                           "Expected ticker to tick during rallocx() (sz=%zu)",
+                           sz);
+                       /* xallocx(). */
+                       tick0 = ticker_read(decay_ticker);
+                       xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
+                       tick1 = ticker_read(decay_ticker);
+                       assert_u32_ne(tick1, tick0,
+                           "Expected ticker to tick during xallocx() (sz=%zu)",
+                           sz);
+                       /* dallocx(). */
+                       tick0 = ticker_read(decay_ticker);
+                       dallocx(p, MALLOCX_TCACHE_NONE);
+                       tick1 = ticker_read(decay_ticker);
+                       assert_u32_ne(tick1, tick0,
+                           "Expected ticker to tick during dallocx() (sz=%zu)",
+                           sz);
+                       /* sdallocx(). */
+                       p = mallocx(sz, MALLOCX_TCACHE_NONE);
+                       assert_ptr_not_null(p, "Unexpected mallocx() failure");
+                       tick0 = ticker_read(decay_ticker);
+                       sdallocx(p, sz, MALLOCX_TCACHE_NONE);
+                       tick1 = ticker_read(decay_ticker);
+                       assert_u32_ne(tick1, tick0,
+                           "Expected ticker to tick during sdallocx() "
+                           "(sz=%zu)", sz);
+               }
+       }
+
+       /*
+        * Test tcache fill/flush interactions for large and small size classes,
+        * using an explicit tcache.
+        */
+       if (config_tcache) {
+               unsigned tcache_ind, i;
+               size_t tcache_sizes[2];
+               tcache_sizes[0] = large0;
+               tcache_sizes[1] = 1;
+
+               sz = sizeof(unsigned);
+               assert_d_eq(mallctl("tcache.create", &tcache_ind, &sz, NULL, 0),
+                   0, "Unexpected mallctl failure");
+
+               for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
+                       sz = tcache_sizes[i];
+
+                       /* tcache fill. */
+                       tick0 = ticker_read(decay_ticker);
+                       p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
+                       assert_ptr_not_null(p, "Unexpected mallocx() failure");
+                       tick1 = ticker_read(decay_ticker);
+                       assert_u32_ne(tick1, tick0,
+                           "Expected ticker to tick during tcache fill "
+                           "(sz=%zu)", sz);
+                       /* tcache flush. */
+                       dallocx(p, MALLOCX_TCACHE(tcache_ind));
+                       tick0 = ticker_read(decay_ticker);
+                       assert_d_eq(mallctl("tcache.flush", NULL, NULL,
+                           &tcache_ind, sizeof(unsigned)), 0,
+                           "Unexpected mallctl failure");
+                       tick1 = ticker_read(decay_ticker);
+                       assert_u32_ne(tick1, tick0,
+                           "Expected ticker to tick during tcache flush "
+                           "(sz=%zu)", sz);
+               }
+       }
+}
+TEST_END
+
+TEST_BEGIN(test_decay_ticker)
+{
+#define        NPS 1024
+       int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
+       void *ps[NPS];
+       uint64_t epoch;
+       uint64_t npurge0 = 0;
+       uint64_t npurge1 = 0;
+       size_t sz, large;
+       unsigned i, nupdates0;
+       nstime_t time, decay_time, deadline;
+
+       test_skip_if(opt_purge != purge_mode_decay);
+
+       /*
+        * Allocate a bunch of large objects, pause the clock, deallocate the
+        * objects, restore the clock, then [md]allocx() in a tight loop to
+        * verify the ticker triggers purging.
+        */
+
+       if (config_tcache) {
+               size_t tcache_max;
+
+               sz = sizeof(size_t);
+               assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL,
+                   0), 0, "Unexpected mallctl failure");
+               large = nallocx(tcache_max + 1, flags);
+       }  else {
+               sz = sizeof(size_t);
+               assert_d_eq(mallctl("arenas.lrun.0.size", &large, &sz, NULL, 0),
+                   0, "Unexpected mallctl failure");
+       }
+
+       assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+           "Unexpected mallctl failure");
+       assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
+           "Unexpected mallctl failure");
+       sz = sizeof(uint64_t);
+       assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0),
+           config_stats ? 0 : ENOENT, "Unexpected mallctl result");
+
+       for (i = 0; i < NPS; i++) {
+               ps[i] = mallocx(large, flags);
+               assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
+       }
+
+       nupdates_mock = 0;
+       nstime_init(&time_mock, 0);
+       nstime_update(&time_mock);
+       nonmonotonic_mock = false;
+
+       nstime_update_orig = nstime_update;
+       nstime_update = nstime_update_mock;
+
+       for (i = 0; i < NPS; i++) {
+               dallocx(ps[i], flags);
+               nupdates0 = nupdates_mock;
+               assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+                   "Unexpected arena.0.decay failure");
+               assert_u_gt(nupdates_mock, nupdates0,
+                   "Expected nstime_update() to be called");
+       }
+
+       nstime_update = nstime_update_orig;
+
+       nstime_init(&time, 0);
+       nstime_update(&time);
+       nstime_init2(&decay_time, opt_decay_time, 0);
+       nstime_copy(&deadline, &time);
+       nstime_add(&deadline, &decay_time);
+       do {
+               for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
+                       void *p = mallocx(1, flags);
+                       assert_ptr_not_null(p, "Unexpected mallocx() failure");
+                       dallocx(p, flags);
+               }
+               assert_d_eq(mallctl("epoch", NULL, NULL, &epoch,
+                   sizeof(uint64_t)), 0, "Unexpected mallctl failure");
+               sz = sizeof(uint64_t);
+               assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz,
+                   NULL, 0), config_stats ? 0 : ENOENT,
+                   "Unexpected mallctl result");
+
+               nstime_update(&time);
+       } while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
+
+       if (config_stats)
+               assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
+#undef NPS
+}
+TEST_END
+
+TEST_BEGIN(test_decay_nonmonotonic)
+{
+#define        NPS (SMOOTHSTEP_NSTEPS + 1)
+       int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
+       void *ps[NPS];
+       uint64_t epoch;
+       uint64_t npurge0 = 0;
+       uint64_t npurge1 = 0;
+       size_t sz, large0;
+       unsigned i, nupdates0;
+
+       test_skip_if(opt_purge != purge_mode_decay);
+
+       sz = sizeof(size_t);
+       assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+           "Unexpected mallctl failure");
+
+       assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+           "Unexpected mallctl failure");
+       assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
+           "Unexpected mallctl failure");
+       sz = sizeof(uint64_t);
+       assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0),
+           config_stats ? 0 : ENOENT, "Unexpected mallctl result");
+
+       nupdates_mock = 0;
+       nstime_init(&time_mock, 0);
+       nstime_update(&time_mock);
+       nonmonotonic_mock = true;
+
+       nstime_update_orig = nstime_update;
+       nstime_update = nstime_update_mock;
+
+       for (i = 0; i < NPS; i++) {
+               ps[i] = mallocx(large0, flags);
+               assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
+       }
+
+       for (i = 0; i < NPS; i++) {
+               dallocx(ps[i], flags);
+               nupdates0 = nupdates_mock;
+               assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+                   "Unexpected arena.0.decay failure");
+               assert_u_gt(nupdates_mock, nupdates0,
+                   "Expected nstime_update() to be called");
+       }
+
+       assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
+           "Unexpected mallctl failure");
+       sz = sizeof(uint64_t);
+       assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, NULL, 0),
+           config_stats ? 0 : ENOENT, "Unexpected mallctl result");
+
+       if (config_stats)
+               assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
+
+       nstime_update = nstime_update_orig;
+#undef NPS
+}
+TEST_END
+
+int
+main(void)
+{
+
+       return (test(
+           test_decay_ticks,
+           test_decay_ticker,
+           test_decay_nonmonotonic));
+}
index 77a8cede92ad8567d56c62edeac089e7a0df741f..010c9d76fd9301537870db5167c0a2e024f916d2 100644 (file)
@@ -35,7 +35,7 @@ typedef enum {
        hash_variant_x64_128
 } hash_variant_t;
 
-static size_t
+static int
 hash_variant_bits(hash_variant_t variant)
 {
 
@@ -59,19 +59,20 @@ hash_variant_string(hash_variant_t variant)
        }
 }
 
+#define        KEY_SIZE        256
 static void
-hash_variant_verify(hash_variant_t variant)
+hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
 {
-       const size_t hashbytes = hash_variant_bits(variant) / 8;
-       uint8_t key[256];
-       VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
+       const int hashbytes = hash_variant_bits(variant) / 8;
+       const int hashes_size = hashbytes * 256;
+       VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
        VARIABLE_ARRAY(uint8_t, final, hashbytes);
        unsigned i;
        uint32_t computed, expected;
 
-       memset(key, 0, sizeof(key));
-       memset(hashes, 0, sizeof(hashes));
-       memset(final, 0, sizeof(final));
+       memset(key, 0, KEY_SIZE);
+       memset(hashes, 0, hashes_size);
+       memset(final, 0, hashbytes);
 
        /*
         * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
@@ -102,17 +103,17 @@ hash_variant_verify(hash_variant_t variant)
        /* Hash the result array. */
        switch (variant) {
        case hash_variant_x86_32: {
-               uint32_t out = hash_x86_32(hashes, hashbytes*256, 0);
+               uint32_t out = hash_x86_32(hashes, hashes_size, 0);
                memcpy(final, &out, sizeof(out));
                break;
        } case hash_variant_x86_128: {
                uint64_t out[2];
-               hash_x86_128(hashes, hashbytes*256, 0, out);
+               hash_x86_128(hashes, hashes_size, 0, out);
                memcpy(final, out, sizeof(out));
                break;
        } case hash_variant_x64_128: {
                uint64_t out[2];
-               hash_x64_128(hashes, hashbytes*256, 0, out);
+               hash_x64_128(hashes, hashes_size, 0, out);
                memcpy(final, out, sizeof(out));
                break;
        } default: not_reached();
@@ -139,6 +140,19 @@ hash_variant_verify(hash_variant_t variant)
            hash_variant_string(variant), expected, computed);
 }
 
+static void
+hash_variant_verify(hash_variant_t variant)
+{
+#define        MAX_ALIGN       16
+       uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
+       unsigned i;
+
+       for (i = 0; i < MAX_ALIGN; i++)
+               hash_variant_verify_key(variant, &key[i]);
+#undef MAX_ALIGN
+}
+#undef KEY_SIZE
+
 TEST_BEGIN(test_hash_x86_32)
 {
 
index 301428f2cb119eacb6d1bc0c134572a0199a520c..b23dd1e9592a0294509ed27c00c9a10911aa0c64 100644 (file)
@@ -1,14 +1,26 @@
 #include "test/jemalloc_test.h"
 
 #ifdef JEMALLOC_FILL
+#  ifndef JEMALLOC_TEST_JUNK_OPT
+#    define JEMALLOC_TEST_JUNK_OPT "junk:true"
+#  endif
 const char *malloc_conf =
-    "abort:false,junk:true,zero:false,redzone:true,quarantine:0";
+    "abort:false,zero:false,redzone:true,quarantine:0," JEMALLOC_TEST_JUNK_OPT;
 #endif
 
 static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
 static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
 static huge_dalloc_junk_t *huge_dalloc_junk_orig;
-static void *most_recently_junked;
+static void *watch_for_junking;
+static bool saw_junking;
+
+static void
+watch_junking(void *p)
+{
+
+       watch_for_junking = p;
+       saw_junking = false;
+}
 
 static void
 arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
@@ -21,7 +33,8 @@ arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
                    "Missing junk fill for byte %zu/%zu of deallocated region",
                    i, bin_info->reg_size);
        }
-       most_recently_junked = ptr;
+       if (ptr == watch_for_junking)
+               saw_junking = true;
 }
 
 static void
@@ -35,7 +48,8 @@ arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
                    "Missing junk fill for byte %zu/%zu of deallocated region",
                    i, usize);
        }
-       most_recently_junked = ptr;
+       if (ptr == watch_for_junking)
+               saw_junking = true;
 }
 
 static void
@@ -48,7 +62,8 @@ huge_dalloc_junk_intercept(void *ptr, size_t usize)
         * enough that it doesn't make sense to duplicate the decision logic in
         * test code, so don't actually check that the region is junk-filled.
         */
-       most_recently_junked = ptr;
+       if (ptr == watch_for_junking)
+               saw_junking = true;
 }
 
 static void
@@ -57,12 +72,14 @@ test_junk(size_t sz_min, size_t sz_max)
        char *s;
        size_t sz_prev, sz, i;
 
-       arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
-       arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
-       arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
-       arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
-       huge_dalloc_junk_orig = huge_dalloc_junk;
-       huge_dalloc_junk = huge_dalloc_junk_intercept;
+       if (opt_junk_free) {
+               arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
+               arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
+               arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
+               arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
+               huge_dalloc_junk_orig = huge_dalloc_junk;
+               huge_dalloc_junk = huge_dalloc_junk_intercept;
+       }
 
        sz_prev = 0;
        s = (char *)mallocx(sz_min, 0);
@@ -80,31 +97,35 @@ test_junk(size_t sz_min, size_t sz_max)
                }
 
                for (i = sz_prev; i < sz; i++) {
-                       assert_c_eq(s[i], 0xa5,
-                           "Newly allocated byte %zu/%zu isn't junk-filled",
-                           i, sz);
+                       if (opt_junk_alloc) {
+                               assert_c_eq(s[i], 0xa5,
+                                   "Newly allocated byte %zu/%zu isn't "
+                                   "junk-filled", i, sz);
+                       }
                        s[i] = 'a';
                }
 
                if (xallocx(s, sz+1, 0, 0) == sz) {
-                       void *junked = (void *)s;
-
+                       watch_junking(s);
                        s = (char *)rallocx(s, sz+1, 0);
                        assert_ptr_not_null((void *)s,
                            "Unexpected rallocx() failure");
-                       assert_ptr_eq(most_recently_junked, junked,
+                       assert_true(!opt_junk_free || saw_junking,
                            "Expected region of size %zu to be junk-filled",
                            sz);
                }
        }
 
+       watch_junking(s);
        dallocx(s, 0);
-       assert_ptr_eq(most_recently_junked, (void *)s,
+       assert_true(!opt_junk_free || saw_junking,
            "Expected region of size %zu to be junk-filled", sz);
 
-       arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
-       arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
-       huge_dalloc_junk = huge_dalloc_junk_orig;
+       if (opt_junk_free) {
+               arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
+               arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
+               huge_dalloc_junk = huge_dalloc_junk_orig;
+       }
 }
 
 TEST_BEGIN(test_junk_small)
@@ -119,7 +140,7 @@ TEST_BEGIN(test_junk_large)
 {
 
        test_skip_if(!config_fill);
-       test_junk(SMALL_MAXCLASS+1, arena_maxclass);
+       test_junk(SMALL_MAXCLASS+1, large_maxclass);
 }
 TEST_END
 
@@ -127,20 +148,32 @@ TEST_BEGIN(test_junk_huge)
 {
 
        test_skip_if(!config_fill);
-       test_junk(arena_maxclass+1, chunksize*2);
+       test_junk(large_maxclass+1, chunksize*2);
 }
 TEST_END
 
 arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig;
 static void *most_recently_trimmed;
 
+static size_t
+shrink_size(size_t size)
+{
+       size_t shrink_size;
+
+       for (shrink_size = size - 1; nallocx(shrink_size, 0) == size;
+           shrink_size--)
+               ; /* Do nothing. */
+
+       return (shrink_size);
+}
+
 static void
 arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
 {
 
        arena_ralloc_junk_large_orig(ptr, old_usize, usize);
-       assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize");
-       assert_zu_eq(usize, arena_maxclass-PAGE, "Unexpected usize");
+       assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize");
+       assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize");
        most_recently_trimmed = ptr;
 }
 
@@ -148,13 +181,13 @@ TEST_BEGIN(test_junk_large_ralloc_shrink)
 {
        void *p1, *p2;
 
-       p1 = mallocx(arena_maxclass, 0);
+       p1 = mallocx(large_maxclass, 0);
        assert_ptr_not_null(p1, "Unexpected mallocx() failure");
 
        arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
        arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
 
-       p2 = rallocx(p1, arena_maxclass-PAGE, 0);
+       p2 = rallocx(p1, shrink_size(large_maxclass), 0);
        assert_ptr_eq(p1, p2, "Unexpected move during shrink");
 
        arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
@@ -180,6 +213,7 @@ TEST_BEGIN(test_junk_redzone)
        arena_redzone_corruption_t *arena_redzone_corruption_orig;
 
        test_skip_if(!config_fill);
+       test_skip_if(!opt_junk_alloc || !opt_junk_free);
 
        arena_redzone_corruption_orig = arena_redzone_corruption;
        arena_redzone_corruption = arena_redzone_corruption_replacement;
@@ -210,6 +244,7 @@ int
 main(void)
 {
 
+       assert(!config_fill || opt_junk_alloc || opt_junk_free);
        return (test(
            test_junk_small,
            test_junk_large,
diff --git a/src/jemalloc/test/unit/junk_alloc.c b/src/jemalloc/test/unit/junk_alloc.c
new file mode 100644 (file)
index 0000000..8db3331
--- /dev/null
@@ -0,0 +1,3 @@
+#define JEMALLOC_TEST_JUNK_OPT "junk:alloc"
+#include "junk.c"
+#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/src/jemalloc/test/unit/junk_free.c b/src/jemalloc/test/unit/junk_free.c
new file mode 100644 (file)
index 0000000..482a61d
--- /dev/null
@@ -0,0 +1,3 @@
+#define JEMALLOC_TEST_JUNK_OPT "junk:free"
+#include "junk.c"
+#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/src/jemalloc/test/unit/lg_chunk.c b/src/jemalloc/test/unit/lg_chunk.c
new file mode 100644 (file)
index 0000000..7e5df38
--- /dev/null
@@ -0,0 +1,26 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Make sure that opt.lg_chunk clamping is sufficient.  In practice, this test
+ * program will fail a debug assertion during initialization and abort (rather
+ * than the test soft-failing) if clamping is insufficient.
+ */
+const char *malloc_conf = "lg_chunk:0";
+
+TEST_BEGIN(test_lg_chunk_clamp)
+{
+       void *p;
+
+       p = mallocx(1, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() failure");
+       dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+       return (test(
+           test_lg_chunk_clamp));
+}
index c70473cc92a666fd8ceba087baa2ed148973acb0..69f8c20c1716588b5d7ba724d3714032c2dca266 100644 (file)
@@ -117,8 +117,8 @@ TEST_END
 TEST_BEGIN(test_mallctl_config)
 {
 
-#define        TEST_MALLCTL_CONFIG(config) do {                                \
-       bool oldval;                                                    \
+#define        TEST_MALLCTL_CONFIG(config, t) do {                             \
+       t oldval;                                                       \
        size_t sz = sizeof(oldval);                                     \
        assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0),   \
            0, "Unexpected mallctl() failure");                         \
@@ -126,19 +126,21 @@ TEST_BEGIN(test_mallctl_config)
        assert_zu_eq(sz, sizeof(oldval), "Unexpected output size");     \
 } while (0)
 
-       TEST_MALLCTL_CONFIG(debug);
-       TEST_MALLCTL_CONFIG(fill);
-       TEST_MALLCTL_CONFIG(lazy_lock);
-       TEST_MALLCTL_CONFIG(munmap);
-       TEST_MALLCTL_CONFIG(prof);
-       TEST_MALLCTL_CONFIG(prof_libgcc);
-       TEST_MALLCTL_CONFIG(prof_libunwind);
-       TEST_MALLCTL_CONFIG(stats);
-       TEST_MALLCTL_CONFIG(tcache);
-       TEST_MALLCTL_CONFIG(tls);
-       TEST_MALLCTL_CONFIG(utrace);
-       TEST_MALLCTL_CONFIG(valgrind);
-       TEST_MALLCTL_CONFIG(xmalloc);
+       TEST_MALLCTL_CONFIG(cache_oblivious, bool);
+       TEST_MALLCTL_CONFIG(debug, bool);
+       TEST_MALLCTL_CONFIG(fill, bool);
+       TEST_MALLCTL_CONFIG(lazy_lock, bool);
+       TEST_MALLCTL_CONFIG(malloc_conf, const char *);
+       TEST_MALLCTL_CONFIG(munmap, bool);
+       TEST_MALLCTL_CONFIG(prof, bool);
+       TEST_MALLCTL_CONFIG(prof_libgcc, bool);
+       TEST_MALLCTL_CONFIG(prof_libunwind, bool);
+       TEST_MALLCTL_CONFIG(stats, bool);
+       TEST_MALLCTL_CONFIG(tcache, bool);
+       TEST_MALLCTL_CONFIG(tls, bool);
+       TEST_MALLCTL_CONFIG(utrace, bool);
+       TEST_MALLCTL_CONFIG(valgrind, bool);
+       TEST_MALLCTL_CONFIG(xmalloc, bool);
 
 #undef TEST_MALLCTL_CONFIG
 }
@@ -161,10 +163,12 @@ TEST_BEGIN(test_mallctl_opt)
        TEST_MALLCTL_OPT(bool, abort, always);
        TEST_MALLCTL_OPT(size_t, lg_chunk, always);
        TEST_MALLCTL_OPT(const char *, dss, always);
-       TEST_MALLCTL_OPT(size_t, narenas, always);
+       TEST_MALLCTL_OPT(unsigned, narenas, always);
+       TEST_MALLCTL_OPT(const char *, purge, always);
        TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
+       TEST_MALLCTL_OPT(ssize_t, decay_time, always);
        TEST_MALLCTL_OPT(bool, stats_print, always);
-       TEST_MALLCTL_OPT(bool, junk, fill);
+       TEST_MALLCTL_OPT(const char *, junk, fill);
        TEST_MALLCTL_OPT(size_t, quarantine, fill);
        TEST_MALLCTL_OPT(bool, redzone, fill);
        TEST_MALLCTL_OPT(bool, zero, fill);
@@ -211,6 +215,126 @@ TEST_BEGIN(test_manpage_example)
 }
 TEST_END
 
+TEST_BEGIN(test_tcache_none)
+{
+       void *p0, *q, *p1;
+
+       test_skip_if(!config_tcache);
+
+       /* Allocate p and q. */
+       p0 = mallocx(42, 0);
+       assert_ptr_not_null(p0, "Unexpected mallocx() failure");
+       q = mallocx(42, 0);
+       assert_ptr_not_null(q, "Unexpected mallocx() failure");
+
+       /* Deallocate p and q, but bypass the tcache for q. */
+       dallocx(p0, 0);
+       dallocx(q, MALLOCX_TCACHE_NONE);
+
+       /* Make sure that tcache-based allocation returns p, not q. */
+       p1 = mallocx(42, 0);
+       assert_ptr_not_null(p1, "Unexpected mallocx() failure");
+       assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region");
+
+       /* Clean up. */
+       dallocx(p1, MALLOCX_TCACHE_NONE);
+}
+TEST_END
+
+TEST_BEGIN(test_tcache)
+{
+#define        NTCACHES        10
+       unsigned tis[NTCACHES];
+       void *ps[NTCACHES];
+       void *qs[NTCACHES];
+       unsigned i;
+       size_t sz, psz, qsz;
+
+       test_skip_if(!config_tcache);
+
+       psz = 42;
+       qsz = nallocx(psz, 0) + 1;
+
+       /* Create tcaches. */
+       for (i = 0; i < NTCACHES; i++) {
+               sz = sizeof(unsigned);
+               assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
+                   "Unexpected mallctl() failure, i=%u", i);
+       }
+
+       /* Exercise tcache ID recycling. */
+       for (i = 0; i < NTCACHES; i++) {
+               assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
+                   sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+                   i);
+       }
+       for (i = 0; i < NTCACHES; i++) {
+               sz = sizeof(unsigned);
+               assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
+                   "Unexpected mallctl() failure, i=%u", i);
+       }
+
+       /* Flush empty tcaches. */
+       for (i = 0; i < NTCACHES; i++) {
+               assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
+                   sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+                   i);
+       }
+
+       /* Cache some allocations. */
+       for (i = 0; i < NTCACHES; i++) {
+               ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+               assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+                   i);
+               dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
+
+               qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
+               assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
+                   i);
+               dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+       }
+
+       /* Verify that tcaches allocate cached regions. */
+       for (i = 0; i < NTCACHES; i++) {
+               void *p0 = ps[i];
+               ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+               assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+                   i);
+               assert_ptr_eq(ps[i], p0,
+                   "Expected mallocx() to allocate cached region, i=%u", i);
+       }
+
+       /* Verify that reallocation uses cached regions. */
+       for (i = 0; i < NTCACHES; i++) {
+               void *q0 = qs[i];
+               qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
+               assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
+                   i);
+               assert_ptr_eq(qs[i], q0,
+                   "Expected rallocx() to allocate cached region, i=%u", i);
+               /* Avoid undefined behavior in case of test failure. */
+               if (qs[i] == NULL)
+                       qs[i] = ps[i];
+       }
+       for (i = 0; i < NTCACHES; i++)
+               dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+
+       /* Flush some non-empty tcaches. */
+       for (i = 0; i < NTCACHES/2; i++) {
+               assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
+                   sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+                   i);
+       }
+
+       /* Destroy tcaches. */
+       for (i = 0; i < NTCACHES; i++) {
+               assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
+                   sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+                   i);
+       }
+}
+TEST_END
+
 TEST_BEGIN(test_thread_arena)
 {
        unsigned arena_old, arena_new, narenas;
@@ -228,6 +352,73 @@ TEST_BEGIN(test_thread_arena)
 }
 TEST_END
 
+TEST_BEGIN(test_arena_i_lg_dirty_mult)
+{
+       ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
+       size_t sz = sizeof(ssize_t);
+
+       test_skip_if(opt_purge != purge_mode_ratio);
+
+       assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
+           NULL, 0), 0, "Unexpected mallctl() failure");
+
+       lg_dirty_mult = -2;
+       assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
+           &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+           "Unexpected mallctl() success");
+
+       lg_dirty_mult = (sizeof(size_t) << 3);
+       assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
+           &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+           "Unexpected mallctl() success");
+
+       for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
+           lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult
+           = lg_dirty_mult, lg_dirty_mult++) {
+               ssize_t old_lg_dirty_mult;
+
+               assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult,
+                   &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
+                   "Unexpected mallctl() failure");
+               assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
+                   "Unexpected old arena.0.lg_dirty_mult");
+       }
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_decay_time)
+{
+       ssize_t decay_time, orig_decay_time, prev_decay_time;
+       size_t sz = sizeof(ssize_t);
+
+       test_skip_if(opt_purge != purge_mode_decay);
+
+       assert_d_eq(mallctl("arena.0.decay_time", &orig_decay_time, &sz,
+           NULL, 0), 0, "Unexpected mallctl() failure");
+
+       decay_time = -2;
+       assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
+           &decay_time, sizeof(ssize_t)), EFAULT,
+           "Unexpected mallctl() success");
+
+       decay_time = 0x7fffffff;
+       assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
+           &decay_time, sizeof(ssize_t)), 0,
+           "Unexpected mallctl() failure");
+
+       for (prev_decay_time = decay_time, decay_time = -1;
+           decay_time < 20; prev_decay_time = decay_time, decay_time++) {
+               ssize_t old_decay_time;
+
+               assert_d_eq(mallctl("arena.0.decay_time", &old_decay_time,
+                   &sz, &decay_time, sizeof(ssize_t)), 0,
+                   "Unexpected mallctl() failure");
+               assert_zd_eq(old_decay_time, prev_decay_time,
+                   "Unexpected old arena.0.decay_time");
+       }
+}
+TEST_END
+
 TEST_BEGIN(test_arena_i_purge)
 {
        unsigned narenas;
@@ -248,6 +439,26 @@ TEST_BEGIN(test_arena_i_purge)
 }
 TEST_END
 
+TEST_BEGIN(test_arena_i_decay)
+{
+       unsigned narenas;
+       size_t sz = sizeof(unsigned);
+       size_t mib[3];
+       size_t miblen = 3;
+
+       assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+           "Unexpected mallctl() failure");
+
+       assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+           "Unexpected mallctl() failure");
+       assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
+           "Unexpected mallctlnametomib() failure");
+       mib[1] = narenas;
+       assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+           "Unexpected mallctlbymib() failure");
+}
+TEST_END
+
 TEST_BEGIN(test_arena_i_dss)
 {
        const char *dss_prec_old, *dss_prec_new;
@@ -307,6 +518,73 @@ TEST_BEGIN(test_arenas_initialized)
 }
 TEST_END
 
+TEST_BEGIN(test_arenas_lg_dirty_mult)
+{
+       ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
+       size_t sz = sizeof(ssize_t);
+
+       test_skip_if(opt_purge != purge_mode_ratio);
+
+       assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
+           NULL, 0), 0, "Unexpected mallctl() failure");
+
+       lg_dirty_mult = -2;
+       assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
+           &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+           "Unexpected mallctl() success");
+
+       lg_dirty_mult = (sizeof(size_t) << 3);
+       assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
+           &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+           "Unexpected mallctl() success");
+
+       for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
+           lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult =
+           lg_dirty_mult, lg_dirty_mult++) {
+               ssize_t old_lg_dirty_mult;
+
+               assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult,
+                   &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
+                   "Unexpected mallctl() failure");
+               assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
+                   "Unexpected old arenas.lg_dirty_mult");
+       }
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_decay_time)
+{
+       ssize_t decay_time, orig_decay_time, prev_decay_time;
+       size_t sz = sizeof(ssize_t);
+
+       test_skip_if(opt_purge != purge_mode_decay);
+
+       assert_d_eq(mallctl("arenas.decay_time", &orig_decay_time, &sz,
+           NULL, 0), 0, "Unexpected mallctl() failure");
+
+       decay_time = -2;
+       assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
+           &decay_time, sizeof(ssize_t)), EFAULT,
+           "Unexpected mallctl() success");
+
+       decay_time = 0x7fffffff;
+       assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
+           &decay_time, sizeof(ssize_t)), 0,
+           "Expected mallctl() failure");
+
+       for (prev_decay_time = decay_time, decay_time = -1;
+           decay_time < 20; prev_decay_time = decay_time, decay_time++) {
+               ssize_t old_decay_time;
+
+               assert_d_eq(mallctl("arenas.decay_time", &old_decay_time,
+                   &sz, &decay_time, sizeof(ssize_t)), 0,
+                   "Unexpected mallctl() failure");
+               assert_zd_eq(old_decay_time, prev_decay_time,
+                   "Unexpected old arenas.decay_time");
+       }
+}
+TEST_END
+
 TEST_BEGIN(test_arenas_constants)
 {
 
@@ -321,7 +599,8 @@ TEST_BEGIN(test_arenas_constants)
        TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
        TEST_ARENAS_CONSTANT(size_t, page, PAGE);
        TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
-       TEST_ARENAS_CONSTANT(size_t, nlruns, nlclasses);
+       TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses);
+       TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses);
 
 #undef TEST_ARENAS_CONSTANT
 }
@@ -357,12 +636,29 @@ TEST_BEGIN(test_arenas_lrun_constants)
        assert_zu_eq(name, expected, "Incorrect "#name" size");         \
 } while (0)
 
-       TEST_ARENAS_LRUN_CONSTANT(size_t, size, (1 << LG_PAGE));
+       TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS);
 
 #undef TEST_ARENAS_LRUN_CONSTANT
 }
 TEST_END
 
+TEST_BEGIN(test_arenas_hchunk_constants)
+{
+
+#define        TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do {             \
+       t name;                                                         \
+       size_t sz = sizeof(t);                                          \
+       assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL,  \
+           0), 0, "Unexpected mallctl() failure");                     \
+       assert_zu_eq(name, expected, "Incorrect "#name" size");         \
+} while (0)
+
+       TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize);
+
+#undef TEST_ARENAS_HCHUNK_CONSTANT
+}
+TEST_END
+
 TEST_BEGIN(test_arenas_extend)
 {
        unsigned narenas_before, arena, narenas_after;
@@ -391,8 +687,10 @@ TEST_BEGIN(test_stats_arenas)
            0), 0, "Unexpected mallctl() failure");                     \
 } while (0)
 
-       TEST_STATS_ARENAS(const char *, dss);
        TEST_STATS_ARENAS(unsigned, nthreads);
+       TEST_STATS_ARENAS(const char *, dss);
+       TEST_STATS_ARENAS(ssize_t, lg_dirty_mult);
+       TEST_STATS_ARENAS(ssize_t, decay_time);
        TEST_STATS_ARENAS(size_t, pactive);
        TEST_STATS_ARENAS(size_t, pdirty);
 
@@ -413,13 +711,21 @@ main(void)
            test_mallctl_config,
            test_mallctl_opt,
            test_manpage_example,
+           test_tcache_none,
+           test_tcache,
            test_thread_arena,
+           test_arena_i_lg_dirty_mult,
+           test_arena_i_decay_time,
            test_arena_i_purge,
+           test_arena_i_decay,
            test_arena_i_dss,
            test_arenas_initialized,
+           test_arenas_lg_dirty_mult,
+           test_arenas_decay_time,
            test_arenas_constants,
            test_arenas_bin_constants,
            test_arenas_lrun_constants,
+           test_arenas_hchunk_constants,
            test_arenas_extend,
            test_stats_arenas));
 }
index bd289c54d6b86800580c0ce5e67edec3a9c0faa0..bde2a480b6bfda2f02ec9d84b440e5afe354d0b2 100644 (file)
@@ -85,6 +85,7 @@ TEST_END
 int
 main(void)
 {
+
        return (test(
            test_mq_basic,
            test_mq_threaded));
diff --git a/src/jemalloc/test/unit/nstime.c b/src/jemalloc/test/unit/nstime.c
new file mode 100644 (file)
index 0000000..cd7d9a6
--- /dev/null
@@ -0,0 +1,220 @@
+#include "test/jemalloc_test.h"
+
+#define        BILLION UINT64_C(1000000000)
+
+TEST_BEGIN(test_nstime_init)
+{
+       nstime_t nst;
+
+       nstime_init(&nst, 42000000043);
+       assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
+       assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+       assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_init2)
+{
+       nstime_t nst;
+
+       nstime_init2(&nst, 42, 43);
+       assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+       assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_copy)
+{
+       nstime_t nsta, nstb;
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_init(&nstb, 0);
+       nstime_copy(&nstb, &nsta);
+       assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
+       assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_compare)
+{
+       nstime_t nsta, nstb;
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_copy(&nstb, &nsta);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
+       assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
+
+       nstime_init2(&nstb, 42, 42);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 1,
+           "nsta should be greater than nstb");
+       assert_d_eq(nstime_compare(&nstb, &nsta), -1,
+           "nstb should be less than nsta");
+
+       nstime_init2(&nstb, 42, 44);
+       assert_d_eq(nstime_compare(&nsta, &nstb), -1,
+           "nsta should be less than nstb");
+       assert_d_eq(nstime_compare(&nstb, &nsta), 1,
+           "nstb should be greater than nsta");
+
+       nstime_init2(&nstb, 41, BILLION - 1);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 1,
+           "nsta should be greater than nstb");
+       assert_d_eq(nstime_compare(&nstb, &nsta), -1,
+           "nstb should be less than nsta");
+
+       nstime_init2(&nstb, 43, 0);
+       assert_d_eq(nstime_compare(&nsta, &nstb), -1,
+           "nsta should be less than nstb");
+       assert_d_eq(nstime_compare(&nstb, &nsta), 1,
+           "nstb should be greater than nsta");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_add)
+{
+       nstime_t nsta, nstb;
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_copy(&nstb, &nsta);
+       nstime_add(&nsta, &nstb);
+       nstime_init2(&nstb, 84, 86);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+           "Incorrect addition result");
+
+       nstime_init2(&nsta, 42, BILLION - 1);
+       nstime_copy(&nstb, &nsta);
+       nstime_add(&nsta, &nstb);
+       nstime_init2(&nstb, 85, BILLION - 2);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+           "Incorrect addition result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_subtract)
+{
+       nstime_t nsta, nstb;
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_copy(&nstb, &nsta);
+       nstime_subtract(&nsta, &nstb);
+       nstime_init(&nstb, 0);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+           "Incorrect subtraction result");
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_init2(&nstb, 41, 44);
+       nstime_subtract(&nsta, &nstb);
+       nstime_init2(&nstb, 0, BILLION - 1);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+           "Incorrect subtraction result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_imultiply)
+{
+       nstime_t nsta, nstb;
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_imultiply(&nsta, 10);
+       nstime_init2(&nstb, 420, 430);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+           "Incorrect multiplication result");
+
+       nstime_init2(&nsta, 42, 666666666);
+       nstime_imultiply(&nsta, 3);
+       nstime_init2(&nstb, 127, 999999998);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+           "Incorrect multiplication result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_idivide)
+{
+       nstime_t nsta, nstb;
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_copy(&nstb, &nsta);
+       nstime_imultiply(&nsta, 10);
+       nstime_idivide(&nsta, 10);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+           "Incorrect division result");
+
+       nstime_init2(&nsta, 42, 666666666);
+       nstime_copy(&nstb, &nsta);
+       nstime_imultiply(&nsta, 3);
+       nstime_idivide(&nsta, 3);
+       assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+           "Incorrect division result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_divide)
+{
+       nstime_t nsta, nstb, nstc;
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_copy(&nstb, &nsta);
+       nstime_imultiply(&nsta, 10);
+       assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
+           "Incorrect division result");
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_copy(&nstb, &nsta);
+       nstime_imultiply(&nsta, 10);
+       nstime_init(&nstc, 1);
+       nstime_add(&nsta, &nstc);
+       assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
+           "Incorrect division result");
+
+       nstime_init2(&nsta, 42, 43);
+       nstime_copy(&nstb, &nsta);
+       nstime_imultiply(&nsta, 10);
+       nstime_init(&nstc, 1);
+       nstime_subtract(&nsta, &nstc);
+       assert_u64_eq(nstime_divide(&nsta, &nstb), 9,
+           "Incorrect division result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_update)
+{
+       nstime_t nst;
+
+       nstime_init(&nst, 0);
+
+       assert_false(nstime_update(&nst), "Basic time update failed.");
+
+       /* Only Rip Van Winkle sleeps this long. */
+       {
+               nstime_t addend;
+               nstime_init2(&addend, 631152000, 0);
+               nstime_add(&nst, &addend);
+       }
+       {
+               nstime_t nst0;
+               nstime_copy(&nst0, &nst);
+               assert_true(nstime_update(&nst),
+                   "Update should detect time roll-back.");
+               assert_d_eq(nstime_compare(&nst, &nst0), 0,
+                   "Time should not have been modified");
+       }
+
+}
+TEST_END
+
+int
+main(void)
+{
+
+       return (test(
+           test_nstime_init,
+           test_nstime_init2,
+           test_nstime_copy,
+           test_nstime_compare,
+           test_nstime_add,
+           test_nstime_subtract,
+           test_nstime_imultiply,
+           test_nstime_idivide,
+           test_nstime_divide,
+           test_nstime_update));
+}
diff --git a/src/jemalloc/test/unit/prng.c b/src/jemalloc/test/unit/prng.c
new file mode 100644 (file)
index 0000000..b22bd2f
--- /dev/null
@@ -0,0 +1,68 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_prng_lg_range)
+{
+       uint64_t sa, sb, ra, rb;
+       unsigned lg_range;
+
+       sa = 42;
+       ra = prng_lg_range(&sa, 64);
+       sa = 42;
+       rb = prng_lg_range(&sa, 64);
+       assert_u64_eq(ra, rb,
+           "Repeated generation should produce repeated results");
+
+       sb = 42;
+       rb = prng_lg_range(&sb, 64);
+       assert_u64_eq(ra, rb,
+           "Equivalent generation should produce equivalent results");
+
+       sa = 42;
+       ra = prng_lg_range(&sa, 64);
+       rb = prng_lg_range(&sa, 64);
+       assert_u64_ne(ra, rb,
+           "Full-width results must not immediately repeat");
+
+       sa = 42;
+       ra = prng_lg_range(&sa, 64);
+       for (lg_range = 63; lg_range > 0; lg_range--) {
+               sb = 42;
+               rb = prng_lg_range(&sb, lg_range);
+               assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
+                   0, "High order bits should be 0, lg_range=%u", lg_range);
+               assert_u64_eq(rb, (ra >> (64 - lg_range)),
+                   "Expected high order bits of full-width result, "
+                   "lg_range=%u", lg_range);
+       }
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range)
+{
+       uint64_t range;
+#define        MAX_RANGE       10000000
+#define        RANGE_STEP      97
+#define        NREPS           10
+
+       for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
+               uint64_t s;
+               unsigned rep;
+
+               s = range;
+               for (rep = 0; rep < NREPS; rep++) {
+                       uint64_t r = prng_range(&s, range);
+
+                       assert_u64_lt(r, range, "Out of range");
+               }
+       }
+}
+TEST_END
+
+int
+main(void)
+{
+
+       return (test(
+           test_prng_lg_range,
+           test_prng_range));
+}
index d4bab8d013465e48912d6444de2543cb8740ab72..81490957292538b1291fc5670f1eb3d547377dcb 100644 (file)
@@ -2,7 +2,7 @@
 
 #ifdef JEMALLOC_PROF
 const char *malloc_conf =
-    "prof:true,prof_thread_active_init:false,lg_prof_sample:0,prof_final:false";
+    "prof:true,prof_thread_active_init:false,lg_prof_sample:0";
 #endif
 
 static void
index a00b1054f1d6f638067275a4f64190ec481711ce..a0e6ee921178cf7226e2fe23afd74da844d56134 100644 (file)
@@ -21,8 +21,9 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
 
 TEST_BEGIN(test_gdump)
 {
-       bool active;
-       void *p, *q;
+       bool active, gdump, gdump_old;
+       void *p, *q, *r, *s;
+       size_t sz;
 
        test_skip_if(!config_prof);
 
@@ -42,8 +43,32 @@ TEST_BEGIN(test_gdump)
        assert_ptr_not_null(q, "Unexpected mallocx() failure");
        assert_true(did_prof_dump_open, "Expected a profile dump");
 
+       gdump = false;
+       sz = sizeof(gdump_old);
+       assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
+           sizeof(gdump)), 0,
+           "Unexpected mallctl failure while disabling prof.gdump");
+       assert(gdump_old);
+       did_prof_dump_open = false;
+       r = mallocx(chunksize, 0);
+       assert_ptr_not_null(q, "Unexpected mallocx() failure");
+       assert_false(did_prof_dump_open, "Unexpected profile dump");
+
+       gdump = true;
+       sz = sizeof(gdump_old);
+       assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
+           sizeof(gdump)), 0,
+           "Unexpected mallctl failure while enabling prof.gdump");
+       assert(!gdump_old);
+       did_prof_dump_open = false;
+       s = mallocx(chunksize, 0);
+       assert_ptr_not_null(q, "Unexpected mallocx() failure");
+       assert_true(did_prof_dump_open, "Expected a profile dump");
+
        dallocx(p, 0);
        dallocx(q, 0);
+       dallocx(r, 0);
+       dallocx(s, 0);
 }
 TEST_END
 
index 3af1964294a9acb3910c1b89dd323044a5dedd24..69983e5e53037acd62839cb129e8953476a21c40 100644 (file)
@@ -16,6 +16,35 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
        return (fd);
 }
 
+static void
+set_prof_active(bool active)
+{
+
+       assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+           0, "Unexpected mallctl failure");
+}
+
+static size_t
+get_lg_prof_sample(void)
+{
+       size_t lg_prof_sample;
+       size_t sz = sizeof(size_t);
+
+       assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
+           "Unexpected mallctl failure while reading profiling sample rate");
+       return (lg_prof_sample);
+}
+
+static void
+do_prof_reset(size_t lg_prof_sample)
+{
+       assert_d_eq(mallctl("prof.reset", NULL, NULL,
+           &lg_prof_sample, sizeof(size_t)), 0,
+           "Unexpected mallctl failure while resetting profile data");
+       assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
+           "Expected profile sample rate change");
+}
+
 TEST_BEGIN(test_prof_reset_basic)
 {
        size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
@@ -30,9 +59,7 @@ TEST_BEGIN(test_prof_reset_basic)
            "Unexpected mallctl failure while reading profiling sample rate");
        assert_zu_eq(lg_prof_sample_orig, 0,
            "Unexpected profiling sample rate");
-       sz = sizeof(size_t);
-       assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
-           "Unexpected mallctl failure while reading profiling sample rate");
+       lg_prof_sample = get_lg_prof_sample();
        assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
            "Unexpected disagreement between \"opt.lg_prof_sample\" and "
            "\"prof.lg_sample\"");
@@ -41,10 +68,7 @@ TEST_BEGIN(test_prof_reset_basic)
        for (i = 0; i < 2; i++) {
                assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
                    "Unexpected mallctl failure while resetting profile data");
-               sz = sizeof(size_t);
-               assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz,
-                   NULL, 0), 0, "Unexpected mallctl failure while reading "
-                   "profiling sample rate");
+               lg_prof_sample = get_lg_prof_sample();
                assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
                    "Unexpected profile sample rate change");
        }
@@ -52,22 +76,15 @@ TEST_BEGIN(test_prof_reset_basic)
        /* Test resets with prof.lg_sample changes. */
        lg_prof_sample_next = 1;
        for (i = 0; i < 2; i++) {
-               assert_d_eq(mallctl("prof.reset", NULL, NULL,
-                   &lg_prof_sample_next, sizeof(size_t)), 0,
-                   "Unexpected mallctl failure while resetting profile data");
-               sz = sizeof(size_t);
-               assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz,
-                   NULL, 0), 0, "Unexpected mallctl failure while reading "
-                   "profiling sample rate");
+               do_prof_reset(lg_prof_sample_next);
+               lg_prof_sample = get_lg_prof_sample();
                assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
                    "Expected profile sample rate change");
                lg_prof_sample_next = lg_prof_sample_orig;
        }
 
        /* Make sure the test code restored prof.lg_sample. */
-       sz = sizeof(size_t);
-       assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
-           "Unexpected mallctl failure while reading profiling sample rate");
+       lg_prof_sample = get_lg_prof_sample();
        assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
            "Unexpected disagreement between \"opt.lg_prof_sample\" and "
            "\"prof.lg_sample\"");
@@ -88,15 +105,12 @@ prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all)
 
 TEST_BEGIN(test_prof_reset_cleanup)
 {
-       bool active;
        void *p;
        prof_dump_header_t *prof_dump_header_orig;
 
        test_skip_if(!config_prof);
 
-       active = true;
-       assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-           0, "Unexpected mallctl failure while activating profiling");
+       set_prof_active(true);
 
        assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
        p = mallocx(1, 0);
@@ -124,9 +138,7 @@ TEST_BEGIN(test_prof_reset_cleanup)
        dallocx(p, 0);
        assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
 
-       active = false;
-       assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-           0, "Unexpected mallctl failure while deactivating profiling");
+       set_prof_active(false);
 }
 TEST_END
 
@@ -182,7 +194,7 @@ thd_start(void *varg)
 
 TEST_BEGIN(test_prof_reset)
 {
-       bool active;
+       size_t lg_prof_sample_orig;
        thd_t thds[NTHREADS];
        unsigned thd_args[NTHREADS];
        unsigned i;
@@ -195,9 +207,10 @@ TEST_BEGIN(test_prof_reset)
            "Unexpected pre-existing tdata structures");
        tdata_count = prof_tdata_count();
 
-       active = true;
-       assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-           0, "Unexpected mallctl failure while activating profiling");
+       lg_prof_sample_orig = get_lg_prof_sample();
+       do_prof_reset(5);
+
+       set_prof_active(true);
 
        for (i = 0; i < NTHREADS; i++) {
                thd_args[i] = i;
@@ -211,9 +224,9 @@ TEST_BEGIN(test_prof_reset)
        assert_zu_eq(prof_tdata_count(), tdata_count,
            "Unexpected remaining tdata structures");
 
-       active = false;
-       assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-           0, "Unexpected mallctl failure while deactivating profiling");
+       set_prof_active(false);
+
+       do_prof_reset(lg_prof_sample_orig);
 }
 TEST_END
 #undef NTHREADS
@@ -222,6 +235,58 @@ TEST_END
 #undef RESET_INTERVAL
 #undef DUMP_INTERVAL
 
+/* Test sampling at the same allocation site across resets. */
+#define        NITER 10
+TEST_BEGIN(test_xallocx)
+{
+       size_t lg_prof_sample_orig;
+       unsigned i;
+       void *ptrs[NITER];
+
+       test_skip_if(!config_prof);
+
+       lg_prof_sample_orig = get_lg_prof_sample();
+       set_prof_active(true);
+
+       /* Reset profiling. */
+       do_prof_reset(0);
+
+       for (i = 0; i < NITER; i++) {
+               void *p;
+               size_t sz, nsz;
+
+               /* Reset profiling. */
+               do_prof_reset(0);
+
+               /* Allocate small object (which will be promoted). */
+               p = ptrs[i] = mallocx(1, 0);
+               assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+               /* Reset profiling. */
+               do_prof_reset(0);
+
+               /* Perform successful xallocx(). */
+               sz = sallocx(p, 0);
+               assert_zu_eq(xallocx(p, sz, 0, 0), sz,
+                   "Unexpected xallocx() failure");
+
+               /* Perform unsuccessful xallocx(). */
+               nsz = nallocx(sz+1, 0);
+               assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
+                   "Unexpected xallocx() success");
+       }
+
+       for (i = 0; i < NITER; i++) {
+               /* dallocx. */
+               dallocx(ptrs[i], 0);
+       }
+
+       set_prof_active(false);
+       do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NITER
+
 int
 main(void)
 {
@@ -232,5 +297,6 @@ main(void)
        return (test(
            test_prof_reset_basic,
            test_prof_reset_cleanup,
-           test_prof_reset));
+           test_prof_reset,
+           test_xallocx));
 }
index 6066dba7adb4a001471de92c3c1492062922c8ec..f501158d7de072e5c56dc1d0d9609a01d6cb1142 100644 (file)
@@ -1,8 +1,7 @@
 #include "test/jemalloc_test.h"
 
 #ifdef JEMALLOC_PROF
-const char *malloc_conf =
-    "prof:true,prof_active:false,prof_final:false";
+const char *malloc_conf = "prof:true,prof_active:false";
 #endif
 
 static void
index b38eb0e33f3b8b08a43d37356b96c0d9b91b7e6f..cf3d3a783587107dadb463719e2559dfdcdac2fc 100644 (file)
@@ -3,7 +3,7 @@
 #define        rbtn_black_height(a_type, a_field, a_rbt, r_height) do {        \
     a_type *rbp_bh_t;                                                  \
     for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0;                 \
-      rbp_bh_t != &(a_rbt)->rbt_nil;                                   \
+        rbp_bh_t != NULL;                                              \
       rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) {           \
        if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) {                 \
            (r_height)++;                                               \
@@ -21,7 +21,7 @@ struct node_s {
 };
 
 static int
-node_cmp(node_t *a, node_t *b) {
+node_cmp(const node_t *a, const node_t *b) {
        int ret;
 
        assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
@@ -68,38 +68,43 @@ TEST_BEGIN(test_rb_empty)
 TEST_END
 
 static unsigned
-tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
-    node_t *nil)
+tree_recurse(node_t *node, unsigned black_height, unsigned black_depth)
 {
        unsigned ret = 0;
-       node_t *left_node = rbtn_left_get(node_t, link, node);
-       node_t *right_node = rbtn_right_get(node_t, link, node);
+       node_t *left_node;
+       node_t *right_node;
+
+       if (node == NULL)
+               return (ret);
+
+       left_node = rbtn_left_get(node_t, link, node);
+       right_node = rbtn_right_get(node_t, link, node);
 
        if (!rbtn_red_get(node_t, link, node))
                black_depth++;
 
        /* Red nodes must be interleaved with black nodes. */
        if (rbtn_red_get(node_t, link, node)) {
-               assert_false(rbtn_red_get(node_t, link, left_node),
-                   "Node should be black");
-               assert_false(rbtn_red_get(node_t, link, right_node),
-                   "Node should be black");
+               if (left_node != NULL)
+                       assert_false(rbtn_red_get(node_t, link, left_node),
+                               "Node should be black");
+               if (right_node != NULL)
+                       assert_false(rbtn_red_get(node_t, link, right_node),
+                           "Node should be black");
        }
 
-       if (node == nil)
-               return (ret);
        /* Self. */
        assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
 
        /* Left subtree. */
-       if (left_node != nil)
-               ret += tree_recurse(left_node, black_height, black_depth, nil);
+       if (left_node != NULL)
+               ret += tree_recurse(left_node, black_height, black_depth);
        else
                ret += (black_depth != black_height);
 
        /* Right subtree. */
-       if (right_node != nil)
-               ret += tree_recurse(right_node, black_height, black_depth, nil);
+       if (right_node != NULL)
+               ret += tree_recurse(right_node, black_height, black_depth);
        else
                ret += (black_depth != black_height);
 
@@ -181,8 +186,7 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes)
        node->magic = 0;
 
        rbtn_black_height(node_t, link, tree, black_height);
-       imbalances = tree_recurse(tree->rbt_root, black_height, 0,
-           &(tree->rbt_nil));
+       imbalances = tree_recurse(tree->rbt_root, black_height, 0);
        assert_u_eq(imbalances, 0, "Tree is unbalanced");
        assert_u_eq(tree_iterate(tree), nnodes-1,
            "Unexpected node iteration count");
@@ -212,6 +216,15 @@ remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
        return (ret);
 }
 
+static void
+destroy_cb(node_t *node, void *data)
+{
+       unsigned *nnodes = (unsigned *)data;
+
+       assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
+       (*nnodes)--;
+}
+
 TEST_BEGIN(test_rb_random)
 {
 #define        NNODES 25
@@ -244,7 +257,6 @@ TEST_BEGIN(test_rb_random)
                for (j = 1; j <= NNODES; j++) {
                        /* Initialize tree and nodes. */
                        tree_new(&tree);
-                       tree.rbt_nil.magic = 0;
                        for (k = 0; k < j; k++) {
                                nodes[k].magic = NODE_MAGIC;
                                nodes[k].key = bag[k];
@@ -257,7 +269,7 @@ TEST_BEGIN(test_rb_random)
                                rbtn_black_height(node_t, link, &tree,
                                    black_height);
                                imbalances = tree_recurse(tree.rbt_root,
-                                   black_height, 0, &(tree.rbt_nil));
+                                   black_height, 0);
                                assert_u_eq(imbalances, 0,
                                    "Tree is unbalanced");
 
@@ -278,7 +290,7 @@ TEST_BEGIN(test_rb_random)
                        }
 
                        /* Remove nodes. */
-                       switch (i % 4) {
+                       switch (i % 5) {
                        case 0:
                                for (k = 0; k < j; k++)
                                        node_remove(&tree, &nodes[k], j - k);
@@ -314,6 +326,12 @@ TEST_BEGIN(test_rb_random)
                                assert_u_eq(nnodes, 0,
                                    "Removal terminated early");
                                break;
+                       } case 4: {
+                               unsigned nnodes = j;
+                               tree_destroy(&tree, destroy_cb, &nnodes);
+                               assert_u_eq(nnodes, 0,
+                                   "Destruction terminated early");
+                               break;
                        } default:
                                not_reached();
                        }
index 77a947d60fdff61805d7a4f6ccbd23d1179ba846..b54b3e86f574b3a3fc3bf42b8697b8fd38bdb1d1 100644 (file)
@@ -1,14 +1,30 @@
 #include "test/jemalloc_test.h"
 
+static rtree_node_elm_t *
+node_alloc(size_t nelms)
+{
+
+       return ((rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t)));
+}
+
+static void
+node_dalloc(rtree_node_elm_t *node)
+{
+
+       free(node);
+}
+
 TEST_BEGIN(test_rtree_get_empty)
 {
        unsigned i;
 
        for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
-               rtree_t *rtree = rtree_new(i, malloc, free);
-               assert_u_eq(rtree_get(rtree, 0), 0,
+               rtree_t rtree;
+               assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+                   "Unexpected rtree_new() failure");
+               assert_ptr_null(rtree_get(&rtree, 0, false),
                    "rtree_get() should return NULL for empty tree");
-               rtree_delete(rtree);
+               rtree_delete(&rtree);
        }
 }
 TEST_END
@@ -16,19 +32,24 @@ TEST_END
 TEST_BEGIN(test_rtree_extrema)
 {
        unsigned i;
+       extent_node_t node_a, node_b;
 
        for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
-               rtree_t *rtree = rtree_new(i, malloc, free);
+               rtree_t rtree;
+               assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+                   "Unexpected rtree_new() failure");
 
-               rtree_set(rtree, 0, 1);
-               assert_u_eq(rtree_get(rtree, 0), 1,
+               assert_false(rtree_set(&rtree, 0, &node_a),
+                   "Unexpected rtree_set() failure");
+               assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a,
                    "rtree_get() should return previously set value");
 
-               rtree_set(rtree, ~((uintptr_t)0), 1);
-               assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1,
+               assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b),
+                   "Unexpected rtree_set() failure");
+               assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b,
                    "rtree_get() should return previously set value");
 
-               rtree_delete(rtree);
+               rtree_delete(&rtree);
        }
 }
 TEST_END
@@ -40,26 +61,32 @@ TEST_BEGIN(test_rtree_bits)
        for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
                uintptr_t keys[] = {0, 1,
                    (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
-               rtree_t *rtree = rtree_new(i, malloc, free);
+               extent_node_t node;
+               rtree_t rtree;
+
+               assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+                   "Unexpected rtree_new() failure");
 
                for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
-                       rtree_set(rtree, keys[j], 1);
+                       assert_false(rtree_set(&rtree, keys[j], &node),
+                           "Unexpected rtree_set() failure");
                        for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
-                               assert_u_eq(rtree_get(rtree, keys[k]), 1,
-                                   "rtree_get() should return previously set "
-                                   "value and ignore insignificant key bits; "
-                                   "i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
-                                   "get key=%#"PRIxPTR, i, j, k, keys[j],
-                                   keys[k]);
+                               assert_ptr_eq(rtree_get(&rtree, keys[k], true),
+                                   &node, "rtree_get() should return "
+                                   "previously set value and ignore "
+                                   "insignificant key bits; i=%u, j=%u, k=%u, "
+                                   "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
+                                   j, k, keys[j], keys[k]);
                        }
-                       assert_u_eq(rtree_get(rtree,
-                           (((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,
+                       assert_ptr_null(rtree_get(&rtree,
+                           (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false),
                            "Only leftmost rtree leaf should be set; "
                            "i=%u, j=%u", i, j);
-                       rtree_set(rtree, keys[j], 0);
+                       assert_false(rtree_set(&rtree, keys[j], NULL),
+                           "Unexpected rtree_set() failure");
                }
 
-               rtree_delete(rtree);
+               rtree_delete(&rtree);
        }
 }
 TEST_END
@@ -68,37 +95,43 @@ TEST_BEGIN(test_rtree_random)
 {
        unsigned i;
        sfmt_t *sfmt;
-#define        NSET 100
+#define        NSET 16
 #define        SEED 42
 
        sfmt = init_gen_rand(SEED);
        for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
-               rtree_t *rtree = rtree_new(i, malloc, free);
                uintptr_t keys[NSET];
+               extent_node_t node;
                unsigned j;
+               rtree_t rtree;
+
+               assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+                   "Unexpected rtree_new() failure");
 
                for (j = 0; j < NSET; j++) {
                        keys[j] = (uintptr_t)gen_rand64(sfmt);
-                       rtree_set(rtree, keys[j], 1);
-                       assert_u_eq(rtree_get(rtree, keys[j]), 1,
+                       assert_false(rtree_set(&rtree, keys[j], &node),
+                           "Unexpected rtree_set() failure");
+                       assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
                            "rtree_get() should return previously set value");
                }
                for (j = 0; j < NSET; j++) {
-                       assert_u_eq(rtree_get(rtree, keys[j]), 1,
+                       assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
                            "rtree_get() should return previously set value");
                }
 
                for (j = 0; j < NSET; j++) {
-                       rtree_set(rtree, keys[j], 0);
-                       assert_u_eq(rtree_get(rtree, keys[j]), 0,
+                       assert_false(rtree_set(&rtree, keys[j], NULL),
+                           "Unexpected rtree_set() failure");
+                       assert_ptr_null(rtree_get(&rtree, keys[j], true),
                            "rtree_get() should return previously set value");
                }
                for (j = 0; j < NSET; j++) {
-                       assert_u_eq(rtree_get(rtree, keys[j]), 0,
+                       assert_ptr_null(rtree_get(&rtree, keys[j], true),
                            "rtree_get() should return previously set value");
                }
 
-               rtree_delete(rtree);
+               rtree_delete(&rtree);
        }
        fini_gen_rand(sfmt);
 #undef NSET
diff --git a/src/jemalloc/test/unit/run_quantize.c b/src/jemalloc/test/unit/run_quantize.c
new file mode 100644 (file)
index 0000000..f6a2f74
--- /dev/null
@@ -0,0 +1,149 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_small_run_size)
+{
+       unsigned nbins, i;
+       size_t sz, run_size;
+       size_t mib[4];
+       size_t miblen = sizeof(mib) / sizeof(size_t);
+
+       /*
+        * Iterate over all small size classes, get their run sizes, and verify
+        * that the quantized size is the same as the run size.
+        */
+
+       sz = sizeof(unsigned);
+       assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
+           "Unexpected mallctl failure");
+
+       assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
+           "Unexpected mallctlnametomib failure");
+       for (i = 0; i < nbins; i++) {
+               mib[2] = i;
+               sz = sizeof(size_t);
+               assert_d_eq(mallctlbymib(mib, miblen, &run_size, &sz, NULL, 0),
+                   0, "Unexpected mallctlbymib failure");
+               assert_zu_eq(run_size, run_quantize_floor(run_size),
+                   "Small run quantization should be a no-op (run_size=%zu)",
+                   run_size);
+               assert_zu_eq(run_size, run_quantize_ceil(run_size),
+                   "Small run quantization should be a no-op (run_size=%zu)",
+                   run_size);
+       }
+}
+TEST_END
+
+TEST_BEGIN(test_large_run_size)
+{
+       bool cache_oblivious;
+       unsigned nlruns, i;
+       size_t sz, run_size_prev, ceil_prev;
+       size_t mib[4];
+       size_t miblen = sizeof(mib) / sizeof(size_t);
+
+       /*
+        * Iterate over all large size classes, get their run sizes, and verify
+        * that the quantized size is the same as the run size.
+        */
+
+       sz = sizeof(bool);
+       assert_d_eq(mallctl("config.cache_oblivious", &cache_oblivious, &sz,
+           NULL, 0), 0, "Unexpected mallctl failure");
+
+       sz = sizeof(unsigned);
+       assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
+           "Unexpected mallctl failure");
+
+       assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
+           "Unexpected mallctlnametomib failure");
+       for (i = 0; i < nlruns; i++) {
+               size_t lrun_size, run_size, floor, ceil;
+
+               mib[2] = i;
+               sz = sizeof(size_t);
+               assert_d_eq(mallctlbymib(mib, miblen, &lrun_size, &sz, NULL, 0),
+                   0, "Unexpected mallctlbymib failure");
+               run_size = cache_oblivious ? lrun_size + PAGE : lrun_size;
+               floor = run_quantize_floor(run_size);
+               ceil = run_quantize_ceil(run_size);
+
+               assert_zu_eq(run_size, floor,
+                   "Large run quantization should be a no-op for precise "
+                   "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
+               assert_zu_eq(run_size, ceil,
+                   "Large run quantization should be a no-op for precise "
+                   "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
+
+               if (i > 0) {
+                       assert_zu_eq(run_size_prev, run_quantize_floor(run_size
+                           - PAGE), "Floor should be a precise size");
+                       if (run_size_prev < ceil_prev) {
+                               assert_zu_eq(ceil_prev, run_size,
+                                   "Ceiling should be a precise size "
+                                   "(run_size_prev=%zu, ceil_prev=%zu, "
+                                   "run_size=%zu)", run_size_prev, ceil_prev,
+                                   run_size);
+                       }
+               }
+               run_size_prev = floor;
+               ceil_prev = run_quantize_ceil(run_size + PAGE);
+       }
+}
+TEST_END
+
+TEST_BEGIN(test_monotonic)
+{
+       unsigned nbins, nlruns, i;
+       size_t sz, floor_prev, ceil_prev;
+
+       /*
+        * Iterate over all run sizes and verify that
+        * run_quantize_{floor,ceil}() are monotonic.
+        */
+
+       sz = sizeof(unsigned);
+       assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
+           "Unexpected mallctl failure");
+
+       sz = sizeof(unsigned);
+       assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
+           "Unexpected mallctl failure");
+
+       floor_prev = 0;
+       ceil_prev = 0;
+       for (i = 1; i < run_quantize_max >> LG_PAGE; i++) {
+               size_t run_size, floor, ceil;
+
+               run_size = i << LG_PAGE;
+               floor = run_quantize_floor(run_size);
+               ceil = run_quantize_ceil(run_size);
+
+               assert_zu_le(floor, run_size,
+                   "Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)",
+                   floor, run_size, ceil);
+               assert_zu_ge(ceil, run_size,
+                   "Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)",
+                   floor, run_size, ceil);
+
+               assert_zu_le(floor_prev, floor, "Floor should be monotonic "
+                   "(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)",
+                   floor_prev, floor, run_size, ceil);
+               assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
+                   "(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)",
+                   floor, run_size, ceil_prev, ceil);
+
+               floor_prev = floor;
+               ceil_prev = ceil;
+       }
+}
+TEST_END
+
+int
+main(void)
+{
+
+       return (test(
+           test_small_run_size,
+           test_large_run_size,
+           test_monotonic));
+}
diff --git a/src/jemalloc/test/unit/size_classes.c b/src/jemalloc/test/unit/size_classes.c
new file mode 100644 (file)
index 0000000..2e2caaf
--- /dev/null
@@ -0,0 +1,112 @@
+#include "test/jemalloc_test.h"
+
+static size_t
+get_max_size_class(void)
+{
+       unsigned nhchunks;
+       size_t mib[4];
+       size_t sz, miblen, max_size_class;
+
+       sz = sizeof(unsigned);
+       assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
+           "Unexpected mallctl() error");
+
+       miblen = sizeof(mib) / sizeof(size_t);
+       assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
+           "Unexpected mallctlnametomib() error");
+       mib[2] = nhchunks - 1;
+
+       sz = sizeof(size_t);
+       assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
+           "Unexpected mallctlbymib() error");
+
+       return (max_size_class);
+}
+
+TEST_BEGIN(test_size_classes)
+{
+       size_t size_class, max_size_class;
+       szind_t index, max_index;
+
+       max_size_class = get_max_size_class();
+       max_index = size2index(max_size_class);
+
+       for (index = 0, size_class = index2size(index); index < max_index ||
+           size_class < max_size_class; index++, size_class =
+           index2size(index)) {
+               assert_true(index < max_index,
+                   "Loop conditionals should be equivalent; index=%u, "
+                   "size_class=%zu (%#zx)", index, size_class, size_class);
+               assert_true(size_class < max_size_class,
+                   "Loop conditionals should be equivalent; index=%u, "
+                   "size_class=%zu (%#zx)", index, size_class, size_class);
+
+               assert_u_eq(index, size2index(size_class),
+                   "size2index() does not reverse index2size(): index=%u -->"
+                   " size_class=%zu --> index=%u --> size_class=%zu", index,
+                   size_class, size2index(size_class),
+                   index2size(size2index(size_class)));
+               assert_zu_eq(size_class, index2size(size2index(size_class)),
+                   "index2size() does not reverse size2index(): index=%u -->"
+                   " size_class=%zu --> index=%u --> size_class=%zu", index,
+                   size_class, size2index(size_class),
+                   index2size(size2index(size_class)));
+
+               assert_u_eq(index+1, size2index(size_class+1),
+                   "Next size_class does not round up properly");
+
+               assert_zu_eq(size_class, (index > 0) ?
+                   s2u(index2size(index-1)+1) : s2u(1),
+                   "s2u() does not round up to size class");
+               assert_zu_eq(size_class, s2u(size_class-1),
+                   "s2u() does not round up to size class");
+               assert_zu_eq(size_class, s2u(size_class),
+                   "s2u() does not compute same size class");
+               assert_zu_eq(s2u(size_class+1), index2size(index+1),
+                   "s2u() does not round up to next size class");
+       }
+
+       assert_u_eq(index, size2index(index2size(index)),
+           "size2index() does not reverse index2size()");
+       assert_zu_eq(max_size_class, index2size(size2index(max_size_class)),
+           "index2size() does not reverse size2index()");
+
+       assert_zu_eq(size_class, s2u(index2size(index-1)+1),
+           "s2u() does not round up to size class");
+       assert_zu_eq(size_class, s2u(size_class-1),
+           "s2u() does not round up to size class");
+       assert_zu_eq(size_class, s2u(size_class),
+           "s2u() does not compute same size class");
+}
+TEST_END
+
+TEST_BEGIN(test_overflow)
+{
+       size_t max_size_class;
+
+       max_size_class = get_max_size_class();
+
+       assert_u_ge(size2index(max_size_class+1), NSIZES,
+           "size2index() should return >= NSIZES on overflow");
+       assert_u_ge(size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
+           "size2index() should return >= NSIZES on overflow");
+       assert_u_ge(size2index(SIZE_T_MAX), NSIZES,
+           "size2index() should return >= NSIZES on overflow");
+
+       assert_zu_gt(s2u(max_size_class+1), HUGE_MAXCLASS,
+           "s2u() should return > HUGE_MAXCLASS for unsupported size");
+       assert_zu_gt(s2u(ZU(PTRDIFF_MAX)+1), HUGE_MAXCLASS,
+           "s2u() should return > HUGE_MAXCLASS for unsupported size");
+       assert_zu_eq(s2u(SIZE_T_MAX), 0,
+           "s2u() should return 0 on overflow");
+}
+TEST_END
+
+int
+main(void)
+{
+
+       return (test(
+           test_size_classes,
+           test_overflow));
+}
diff --git a/src/jemalloc/test/unit/smoothstep.c b/src/jemalloc/test/unit/smoothstep.c
new file mode 100644 (file)
index 0000000..4cfb213
--- /dev/null
@@ -0,0 +1,106 @@
+#include "test/jemalloc_test.h"
+
+static const uint64_t smoothstep_tab[] = {
+#define        STEP(step, h, x, y) \
+       h,
+       SMOOTHSTEP
+#undef STEP
+};
+
+TEST_BEGIN(test_smoothstep_integral)
+{
+       uint64_t sum, min, max;
+       unsigned i;
+
+       /*
+        * The integral of smoothstep in the [0..1] range equals 1/2.  Verify
+        * that the fixed point representation's integral is no more than
+        * rounding error distant from 1/2.  Regarding rounding, each table
+        * element is rounded down to the nearest fixed point value, so the
+        * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
+        */
+       sum = 0;
+       for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
+               sum += smoothstep_tab[i];
+
+       max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
+       min = max - SMOOTHSTEP_NSTEPS;
+
+       assert_u64_ge(sum, min,
+           "Integral too small, even accounting for truncation");
+       assert_u64_le(sum, max, "Integral exceeds 1/2");
+       if (false) {
+               malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
+                   max - sum, SMOOTHSTEP_NSTEPS);
+       }
+}
+TEST_END
+
+TEST_BEGIN(test_smoothstep_monotonic)
+{
+       uint64_t prev_h;
+       unsigned i;
+
+       /*
+        * The smoothstep function is monotonic in [0..1], i.e. its slope is
+        * non-negative.  In practice we want to parametrize table generation
+        * such that piecewise slope is greater than zero, but do not require
+        * that here.
+        */
+       prev_h = 0;
+       for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+               uint64_t h = smoothstep_tab[i];
+               assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
+               prev_h = h;
+       }
+       assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
+           (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
+}
+TEST_END
+
+TEST_BEGIN(test_smoothstep_slope)
+{
+       uint64_t prev_h, prev_delta;
+       unsigned i;
+
+       /*
+        * The smoothstep slope strictly increases until x=0.5, and then
+        * strictly decreases until x=1.0.  Verify the slightly weaker
+        * requirement of monotonicity, so that inadequate table precision does
+        * not cause false test failures.
+        */
+       prev_h = 0;
+       prev_delta = 0;
+       for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
+               uint64_t h = smoothstep_tab[i];
+               uint64_t delta = h - prev_h;
+               assert_u64_ge(delta, prev_delta,
+                   "Slope must monotonically increase in 0.0 <= x <= 0.5, "
+                   "i=%u", i);
+               prev_h = h;
+               prev_delta = delta;
+       }
+
+       prev_h = KQU(1) << SMOOTHSTEP_BFP;
+       prev_delta = 0;
+       for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
+               uint64_t h = smoothstep_tab[i];
+               uint64_t delta = prev_h - h;
+               assert_u64_ge(delta, prev_delta,
+                   "Slope must monotonically decrease in 0.5 <= x <= 1.0, "
+                   "i=%u", i);
+               prev_h = h;
+               prev_delta = delta;
+       }
+}
+TEST_END
+
+int
+main(void)
+{
+
+       return (test(
+           test_smoothstep_integral,
+           test_smoothstep_monotonic,
+           test_smoothstep_slope));
+}
index 78c78cd501a8bc0a2d6592126b32dd2f4003851d..6e803160405e0d406304a0411f7a1ba3ca79cada 100644 (file)
@@ -3,7 +3,7 @@
 TEST_BEGIN(test_stats_summary)
 {
        size_t *cactive;
-       size_t sz, allocated, active, mapped;
+       size_t sz, allocated, active, resident, mapped;
        int expected = config_stats ? 0 : ENOENT;
 
        sz = sizeof(cactive);
@@ -15,6 +15,8 @@ TEST_BEGIN(test_stats_summary)
            expected, "Unexpected mallctl() result");
        assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected,
            "Unexpected mallctl() result");
+       assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0),
+           expected, "Unexpected mallctl() result");
        assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected,
            "Unexpected mallctl() result");
 
@@ -23,34 +25,10 @@ TEST_BEGIN(test_stats_summary)
                    "active should be no larger than cactive");
                assert_zu_le(allocated, active,
                    "allocated should be no larger than active");
-               assert_zu_le(active, mapped,
-                   "active should be no larger than mapped");
-       }
-}
-TEST_END
-
-TEST_BEGIN(test_stats_chunks)
-{
-       size_t current, high;
-       uint64_t total;
-       size_t sz;
-       int expected = config_stats ? 0 : ENOENT;
-
-       sz = sizeof(size_t);
-       assert_d_eq(mallctl("stats.chunks.current", &current, &sz, NULL, 0),
-           expected, "Unexpected mallctl() result");
-       sz = sizeof(uint64_t);
-       assert_d_eq(mallctl("stats.chunks.total", &total, &sz, NULL, 0),
-           expected, "Unexpected mallctl() result");
-       sz = sizeof(size_t);
-       assert_d_eq(mallctl("stats.chunks.high", &high, &sz, NULL, 0), expected,
-           "Unexpected mallctl() result");
-
-       if (config_stats) {
-               assert_zu_le(current, high,
-                   "current should be no larger than high");
-               assert_u64_le((uint64_t)high, total,
-                   "high should be no larger than total");
+               assert_zu_lt(active, resident,
+                   "active should be less than resident");
+               assert_zu_lt(active, mapped,
+                   "active should be less than mapped");
        }
 }
 TEST_END
@@ -64,7 +42,7 @@ TEST_BEGIN(test_stats_huge)
        size_t sz;
        int expected = config_stats ? 0 : ENOENT;
 
-       p = mallocx(arena_maxclass+1, 0);
+       p = mallocx(large_maxclass+1, 0);
        assert_ptr_not_null(p, "Unexpected mallocx() failure");
 
        assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@@ -97,7 +75,7 @@ TEST_END
 TEST_BEGIN(test_stats_arenas_summary)
 {
        unsigned arena;
-       void *little, *large;
+       void *little, *large, *huge;
        uint64_t epoch;
        size_t sz;
        int expected = config_stats ? 0 : ENOENT;
@@ -110,8 +88,14 @@ TEST_BEGIN(test_stats_arenas_summary)
 
        little = mallocx(SMALL_MAXCLASS, 0);
        assert_ptr_not_null(little, "Unexpected mallocx() failure");
-       large = mallocx(arena_maxclass, 0);
+       large = mallocx(large_maxclass, 0);
        assert_ptr_not_null(large, "Unexpected mallocx() failure");
+       huge = mallocx(chunksize, 0);
+       assert_ptr_not_null(huge, "Unexpected mallocx() failure");
+
+       dallocx(little, 0);
+       dallocx(large, 0);
+       dallocx(huge, 0);
 
        assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
            "Unexpected mallctl() failure");
@@ -136,9 +120,6 @@ TEST_BEGIN(test_stats_arenas_summary)
                assert_u64_le(nmadvise, purged,
                    "nmadvise should be no greater than purged");
        }
-
-       dallocx(little, 0);
-       dallocx(large, 0);
 }
 TEST_END
 
@@ -219,7 +200,7 @@ TEST_BEGIN(test_stats_arenas_large)
        assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
            0, "Unexpected mallctl() failure");
 
-       p = mallocx(arena_maxclass, 0);
+       p = mallocx(large_maxclass, 0);
        assert_ptr_not_null(p, "Unexpected mallocx() failure");
 
        assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@@ -251,11 +232,51 @@ TEST_BEGIN(test_stats_arenas_large)
 }
 TEST_END
 
+TEST_BEGIN(test_stats_arenas_huge)
+{
+       unsigned arena;
+       void *p;
+       size_t sz, allocated;
+       uint64_t epoch, nmalloc, ndalloc;
+       int expected = config_stats ? 0 : ENOENT;
+
+       arena = 0;
+       assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+           0, "Unexpected mallctl() failure");
+
+       p = mallocx(chunksize, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+       assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+           "Unexpected mallctl() failure");
+
+       sz = sizeof(size_t);
+       assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
+           NULL, 0), expected, "Unexpected mallctl() result");
+       sz = sizeof(uint64_t);
+       assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz,
+           NULL, 0), expected, "Unexpected mallctl() result");
+       assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz,
+           NULL, 0), expected, "Unexpected mallctl() result");
+
+       if (config_stats) {
+               assert_zu_gt(allocated, 0,
+                   "allocated should be greater than zero");
+               assert_zu_gt(nmalloc, 0,
+                   "nmalloc should be greater than zero");
+               assert_zu_ge(nmalloc, ndalloc,
+                   "nmalloc should be at least as large as ndalloc");
+       }
+
+       dallocx(p, 0);
+}
+TEST_END
+
 TEST_BEGIN(test_stats_arenas_bins)
 {
        unsigned arena;
        void *p;
-       size_t sz, allocated, curruns;
+       size_t sz, curruns, curregs;
        uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
        uint64_t nruns, nreruns;
        int expected = config_stats ? 0 : ENOENT;
@@ -273,9 +294,6 @@ TEST_BEGIN(test_stats_arenas_bins)
        assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
            "Unexpected mallctl() failure");
 
-       sz = sizeof(size_t);
-       assert_d_eq(mallctl("stats.arenas.0.bins.0.allocated", &allocated, &sz,
-           NULL, 0), expected, "Unexpected mallctl() result");
        sz = sizeof(uint64_t);
        assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
            NULL, 0), expected, "Unexpected mallctl() result");
@@ -283,7 +301,11 @@ TEST_BEGIN(test_stats_arenas_bins)
            NULL, 0), expected, "Unexpected mallctl() result");
        assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
            NULL, 0), expected, "Unexpected mallctl() result");
+       sz = sizeof(size_t);
+       assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz,
+           NULL, 0), expected, "Unexpected mallctl() result");
 
+       sz = sizeof(uint64_t);
        assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz,
            NULL, 0), config_tcache ? expected : ENOENT,
            "Unexpected mallctl() result");
@@ -300,14 +322,14 @@ TEST_BEGIN(test_stats_arenas_bins)
            NULL, 0), expected, "Unexpected mallctl() result");
 
        if (config_stats) {
-               assert_zu_gt(allocated, 0,
-                   "allocated should be greater than zero");
                assert_u64_gt(nmalloc, 0,
                    "nmalloc should be greater than zero");
                assert_u64_ge(nmalloc, ndalloc,
                    "nmalloc should be at least as large as ndalloc");
                assert_u64_gt(nrequests, 0,
                    "nrequests should be greater than zero");
+               assert_zu_gt(curregs, 0,
+                   "allocated should be greater than zero");
                if (config_tcache) {
                        assert_u64_gt(nfills, 0,
                            "At least one fill should have occurred");
@@ -336,7 +358,7 @@ TEST_BEGIN(test_stats_arenas_lruns)
        assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
            0, "Unexpected mallctl() failure");
 
-       p = mallocx(SMALL_MAXCLASS+1, 0);
+       p = mallocx(LARGE_MINCLASS, 0);
        assert_ptr_not_null(p, "Unexpected mallocx() failure");
 
        assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@@ -368,17 +390,58 @@ TEST_BEGIN(test_stats_arenas_lruns)
 }
 TEST_END
 
+TEST_BEGIN(test_stats_arenas_hchunks)
+{
+       unsigned arena;
+       void *p;
+       uint64_t epoch, nmalloc, ndalloc;
+       size_t curhchunks, sz;
+       int expected = config_stats ? 0 : ENOENT;
+
+       arena = 0;
+       assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+           0, "Unexpected mallctl() failure");
+
+       p = mallocx(chunksize, 0);
+       assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+       assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+           "Unexpected mallctl() failure");
+
+       sz = sizeof(uint64_t);
+       assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz,
+           NULL, 0), expected, "Unexpected mallctl() result");
+       assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz,
+           NULL, 0), expected, "Unexpected mallctl() result");
+       sz = sizeof(size_t);
+       assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks,
+           &sz, NULL, 0), expected, "Unexpected mallctl() result");
+
+       if (config_stats) {
+               assert_u64_gt(nmalloc, 0,
+                   "nmalloc should be greater than zero");
+               assert_u64_ge(nmalloc, ndalloc,
+                   "nmalloc should be at least as large as ndalloc");
+               assert_u64_gt(curhchunks, 0,
+                   "At least one chunk should be currently allocated");
+       }
+
+       dallocx(p, 0);
+}
+TEST_END
+
 int
 main(void)
 {
 
        return (test(
            test_stats_summary,
-           test_stats_chunks,
            test_stats_huge,
            test_stats_arenas_summary,
            test_stats_arenas_small,
            test_stats_arenas_large,
+           test_stats_arenas_huge,
            test_stats_arenas_bins,
-           test_stats_arenas_lruns));
+           test_stats_arenas_lruns,
+           test_stats_arenas_hchunks));
 }
diff --git a/src/jemalloc/test/unit/ticker.c b/src/jemalloc/test/unit/ticker.c
new file mode 100644 (file)
index 0000000..e737020
--- /dev/null
@@ -0,0 +1,76 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_ticker_tick)
+{
+#define        NREPS 2
+#define        NTICKS 3
+       ticker_t ticker;
+       int32_t i, j;
+
+       ticker_init(&ticker, NTICKS);
+       for (i = 0; i < NREPS; i++) {
+               for (j = 0; j < NTICKS; j++) {
+                       assert_u_eq(ticker_read(&ticker), NTICKS - j,
+                           "Unexpected ticker value (i=%d, j=%d)", i, j);
+                       assert_false(ticker_tick(&ticker),
+                           "Unexpected ticker fire (i=%d, j=%d)", i, j);
+               }
+               assert_u32_eq(ticker_read(&ticker), 0,
+                   "Expected ticker depletion");
+               assert_true(ticker_tick(&ticker),
+                   "Expected ticker fire (i=%d)", i);
+               assert_u32_eq(ticker_read(&ticker), NTICKS,
+                   "Expected ticker reset");
+       }
+#undef NTICKS
+}
+TEST_END
+
+TEST_BEGIN(test_ticker_ticks)
+{
+#define        NTICKS 3
+       ticker_t ticker;
+
+       ticker_init(&ticker, NTICKS);
+
+       assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+       assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
+       assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
+       assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
+       assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+
+       assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
+       assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+#undef NTICKS
+}
+TEST_END
+
+TEST_BEGIN(test_ticker_copy)
+{
+#define        NTICKS 3
+       ticker_t ta, tb;
+
+       ticker_init(&ta, NTICKS);
+       ticker_copy(&tb, &ta);
+       assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+       assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
+       assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+
+       ticker_tick(&ta);
+       ticker_copy(&tb, &ta);
+       assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
+       assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
+       assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+#undef NTICKS
+}
+TEST_END
+
+int
+main(void)
+{
+
+       return (test(
+           test_ticker_tick,
+           test_ticker_ticks,
+           test_ticker_copy));
+}
index eb1c597699003a26484819189ecdedb477806b91..8be787fda935cc8e88e220f122b99c03eeeda423 100644 (file)
@@ -6,6 +6,7 @@ typedef unsigned int data_t;
 
 static bool data_cleanup_executed;
 
+malloc_tsd_types(data_, data_t)
 malloc_tsd_protos(, data_, data_t)
 
 void
@@ -55,9 +56,14 @@ static void *
 thd_start(void *arg)
 {
        data_t d = (data_t)(uintptr_t)arg;
+       void *p;
+
        assert_x_eq(*data_tsd_get(), DATA_INIT,
            "Initial tsd get should return initialization value");
 
+       p = malloc(1);
+       assert_ptr_not_null(p, "Unexpected malloc() failure");
+
        data_tsd_set(&d);
        assert_x_eq(*data_tsd_get(), d,
            "After tsd set, tsd get should return value that was set");
@@ -66,6 +72,7 @@ thd_start(void *arg)
        assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
            "Resetting local data should have no effect on tsd");
 
+       free(p);
        return (NULL);
 }
 
index 8ab39a4581935ba391ade45059dbb50d2cf48673..2f65aad25b71c9eba4c901f38404741768888a40 100644 (file)
@@ -1,33 +1,54 @@
 #include "test/jemalloc_test.h"
 
-TEST_BEGIN(test_pow2_ceil)
+#define        TEST_POW2_CEIL(t, suf, pri) do {                                \
+       unsigned i, pow2;                                               \
+       t x;                                                            \
+                                                                       \
+       assert_zu_eq(pow2_ceil_##suf(0), 0, "Unexpected result");       \
+                                                                       \
+       for (i = 0; i < sizeof(t) * 8; i++) {                           \
+               assert_zu_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) << i, \
+                   "Unexpected result");                               \
+       }                                                               \
+                                                                       \
+       for (i = 2; i < sizeof(t) * 8; i++) {                           \
+               assert_zu_eq(pow2_ceil_##suf((((t)1) << i) - 1),        \
+                   ((t)1) << i, "Unexpected result");                  \
+       }                                                               \
+                                                                       \
+       for (i = 0; i < sizeof(t) * 8 - 1; i++) {                       \
+               assert_zu_eq(pow2_ceil_##suf((((t)1) << i) + 1),        \
+                   ((t)1) << (i+1), "Unexpected result");              \
+       }                                                               \
+                                                                       \
+       for (pow2 = 1; pow2 < 25; pow2++) {                             \
+               for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
+                   x++) {                                              \
+                       assert_zu_eq(pow2_ceil_##suf(x),                \
+                           ((t)1) << pow2,                             \
+                           "Unexpected result, x=%"pri, x);            \
+               }                                                       \
+       }                                                               \
+} while (0)
+
+TEST_BEGIN(test_pow2_ceil_u64)
 {
-       unsigned i, pow2;
-       size_t x;
 
-       assert_zu_eq(pow2_ceil(0), 0, "Unexpected result");
+       TEST_POW2_CEIL(uint64_t, u64, FMTu64);
+}
+TEST_END
 
-       for (i = 0; i < sizeof(size_t) * 8; i++) {
-               assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i,
-                   "Unexpected result");
-       }
+TEST_BEGIN(test_pow2_ceil_u32)
+{
 
-       for (i = 2; i < sizeof(size_t) * 8; i++) {
-               assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i,
-                   "Unexpected result");
-       }
+       TEST_POW2_CEIL(uint32_t, u32, FMTu32);
+}
+TEST_END
 
-       for (i = 0; i < sizeof(size_t) * 8 - 1; i++) {
-               assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1),
-                   "Unexpected result");
-       }
+TEST_BEGIN(test_pow2_ceil_zu)
+{
 
-       for (pow2 = 1; pow2 < 25; pow2++) {
-               for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) {
-                       assert_zu_eq(pow2_ceil(x), ZU(1) << pow2,
-                           "Unexpected result, x=%zu", x);
-               }
-       }
+       TEST_POW2_CEIL(size_t, zu, "zu");
 }
 TEST_END
 
@@ -286,7 +307,9 @@ main(void)
 {
 
        return (test(
-           test_pow2_ceil,
+           test_pow2_ceil_u64,
+           test_pow2_ceil_u32,
+           test_pow2_ceil_zu,
            test_malloc_strtoumax_no_endptr,
            test_malloc_strtoumax,
            test_malloc_snprintf_truncated,
index 65a8f0c9c3217cc4dcc624fdd2fd961f2274afb3..93afc2b871020354ea066d52d0c29fe13fc8622e 100644 (file)
@@ -55,7 +55,7 @@ TEST_BEGIN(test_zero_large)
 {
 
        test_skip_if(!config_fill);
-       test_zero(SMALL_MAXCLASS+1, arena_maxclass);
+       test_zero(SMALL_MAXCLASS+1, large_maxclass);
 }
 TEST_END
 
@@ -63,7 +63,7 @@ TEST_BEGIN(test_zero_huge)
 {
 
        test_skip_if(!config_fill);
-       test_zero(arena_maxclass+1, chunksize*2);
+       test_zero(large_maxclass+1, chunksize*2);
 }
 TEST_END
 
index 74325afaebd48ddd18ada9c630d7dd3d9169bee7..4aba567fa1c202215b361bba0db7ea1b6f913124 100644 (file)
@@ -107,7 +107,7 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize;
 /// use std::thread;
 ///
 /// fn main() {
-///     let numbers: Vec<_> = (0..100u32).collect();
+///     let numbers: Vec<_> = (0..100).collect();
 ///     let shared_numbers = Arc::new(numbers);
 ///
 ///     for _ in 0..10 {
@@ -124,9 +124,7 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize;
 #[unsafe_no_drop_flag]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Arc<T: ?Sized> {
-    // FIXME #12808: strange name to try to avoid interfering with
-    // field accesses of the contained type via Deref
-    _ptr: Shared<ArcInner<T>>,
+    ptr: Shared<ArcInner<T>>,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -144,9 +142,7 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
 #[unsafe_no_drop_flag]
 #[stable(feature = "arc_weak", since = "1.4.0")]
 pub struct Weak<T: ?Sized> {
-    // FIXME #12808: strange name to try to avoid interfering with
-    // field accesses of the contained type via Deref
-    _ptr: Shared<ArcInner<T>>,
+    ptr: Shared<ArcInner<T>>,
 }
 
 #[stable(feature = "arc_weak", since = "1.4.0")]
@@ -198,14 +194,15 @@ impl<T> Arc<T> {
             weak: atomic::AtomicUsize::new(1),
             data: data,
         };
-        Arc { _ptr: unsafe { Shared::new(Box::into_raw(x)) } }
+        Arc { ptr: unsafe { Shared::new(Box::into_raw(x)) } }
     }
 
-    /// Unwraps the contained value if the `Arc<T>` has only one strong reference.
-    /// This will succeed even if there are outstanding weak references.
+    /// Unwraps the contained value if the `Arc<T>` has exactly one strong reference.
     ///
     /// Otherwise, an `Err` is returned with the same `Arc<T>`.
     ///
+    /// This will succeed even if there are outstanding weak references.
+    ///
     /// # Examples
     ///
     /// ```
@@ -222,18 +219,18 @@ impl<T> Arc<T> {
     #[stable(feature = "arc_unique", since = "1.4.0")]
     pub fn try_unwrap(this: Self) -> Result<T, Self> {
         // See `drop` for why all these atomics are like this
-        if this.inner().strong.compare_and_swap(1, 0, Release) != 1 {
+        if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() {
             return Err(this);
         }
 
         atomic::fence(Acquire);
 
         unsafe {
-            let ptr = *this._ptr;
+            let ptr = *this.ptr;
             let elem = ptr::read(&(*ptr).data);
 
             // Make a weak pointer to clean up the implicit strong-weak reference
-            let _weak = Weak { _ptr: this._ptr };
+            let _weak = Weak { ptr: this.ptr };
             mem::forget(this);
 
             Ok(elem)
@@ -255,13 +252,14 @@ impl<T: ?Sized> Arc<T> {
     /// ```
     #[stable(feature = "arc_weak", since = "1.4.0")]
     pub fn downgrade(this: &Self) -> Weak<T> {
-        loop {
-            // This Relaxed is OK because we're checking the value in the CAS
-            // below.
-            let cur = this.inner().weak.load(Relaxed);
+        // This Relaxed is OK because we're checking the value in the CAS
+        // below.
+        let mut cur = this.inner().weak.load(Relaxed);
 
+        loop {
             // check if the weak counter is currently "locked"; if so, spin.
             if cur == usize::MAX {
+                cur = this.inner().weak.load(Relaxed);
                 continue;
             }
 
@@ -272,8 +270,9 @@ impl<T: ?Sized> Arc<T> {
             // Unlike with Clone(), we need this to be an Acquire read to
             // synchronize with the write coming from `is_unique`, so that the
             // events prior to that write happen before this read.
-            if this.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur {
-                return Weak { _ptr: this._ptr };
+            match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
+                Ok(_) => return Weak { ptr: this.ptr },
+                Err(old) => cur = old,
             }
         }
     }
@@ -301,13 +300,13 @@ impl<T: ?Sized> Arc<T> {
         // `ArcInner` structure itself is `Sync` because the inner data is
         // `Sync` as well, so we're ok loaning out an immutable pointer to these
         // contents.
-        unsafe { &**self._ptr }
+        unsafe { &**self.ptr }
     }
 
     // Non-inlined part of `drop`.
     #[inline(never)]
     unsafe fn drop_slow(&mut self) {
-        let ptr = *self._ptr;
+        let ptr = *self.ptr;
 
         // Destroy the data at this time, even though we may not free the box
         // allocation itself (there may still be weak pointers lying around).
@@ -365,7 +364,7 @@ impl<T: ?Sized> Clone for Arc<T> {
             }
         }
 
-        Arc { _ptr: self._ptr }
+        Arc { ptr: self.ptr }
     }
 }
 
@@ -415,7 +414,7 @@ impl<T: Clone> Arc<T> {
         // before release writes (i.e., decrements) to `strong`. Since we hold a
         // weak count, there's no chance the ArcInner itself could be
         // deallocated.
-        if this.inner().strong.compare_and_swap(1, 0, Acquire) != 1 {
+        if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
             // Another strong pointer exists; clone
             *this = Arc::new((**this).clone());
         } else if this.inner().weak.load(Relaxed) != 1 {
@@ -433,7 +432,7 @@ impl<T: Clone> Arc<T> {
 
             // Materialize our own implicit weak pointer, so that it can clean
             // up the ArcInner as needed.
-            let weak = Weak { _ptr: this._ptr };
+            let weak = Weak { ptr: this.ptr };
 
             // mark the data itself as already deallocated
             unsafe {
@@ -441,7 +440,7 @@ impl<T: Clone> Arc<T> {
                 // here (due to zeroing) because data is no longer accessed by
                 // other threads (due to there being no more strong refs at this
                 // point).
-                let mut swap = Arc::new(ptr::read(&(**weak._ptr).data));
+                let mut swap = Arc::new(ptr::read(&(**weak.ptr).data));
                 mem::swap(this, &mut swap);
                 mem::forget(swap);
             }
@@ -454,7 +453,7 @@ impl<T: Clone> Arc<T> {
         // As with `get_mut()`, the unsafety is ok because our reference was
         // either unique to begin with, or became one upon cloning the contents.
         unsafe {
-            let inner = &mut **this._ptr;
+            let inner = &mut **this.ptr;
             &mut inner.data
         }
     }
@@ -486,7 +485,7 @@ impl<T: ?Sized> Arc<T> {
             // the Arc itself to be `mut`, so we're returning the only possible
             // reference to the inner data.
             unsafe {
-                let inner = &mut **this._ptr;
+                let inner = &mut **this.ptr;
                 Some(&mut inner.data)
             }
         } else {
@@ -505,7 +504,7 @@ impl<T: ?Sized> Arc<T> {
         // The acquire label here ensures a happens-before relationship with any
         // writes to `strong` prior to decrements of the `weak` count (via drop,
         // which uses Release).
-        if self.inner().weak.compare_and_swap(1, usize::MAX, Acquire) == 1 {
+        if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
             // Due to the previous acquire read, this will observe any writes to
             // `strong` that were due to upgrading weak pointers; only strong
             // clones remain, which require that the strong count is > 1 anyway.
@@ -555,7 +554,7 @@ impl<T: ?Sized> Drop for Arc<T> {
         // This structure has #[unsafe_no_drop_flag], so this drop glue may run
         // more than once (but it is guaranteed to be zeroed after the first if
         // it's run more than once)
-        let thin = *self._ptr as *const ();
+        let thin = *self.ptr as *const ();
 
         if thin as usize == mem::POST_DROP_USIZE {
             return;
@@ -617,12 +616,14 @@ impl<T: ?Sized> Weak<T> {
         // We use a CAS loop to increment the strong count instead of a
         // fetch_add because once the count hits 0 it must never be above 0.
         let inner = self.inner();
+
+        // Relaxed load because any write of 0 that we can observe
+        // leaves the field in a permanently zero state (so a
+        // "stale" read of 0 is fine), and any other value is
+        // confirmed via the CAS below.
+        let mut n = inner.strong.load(Relaxed);
+
         loop {
-            // Relaxed load because any write of 0 that we can observe
-            // leaves the field in a permanently zero state (so a
-            // "stale" read of 0 is fine), and any other value is
-            // confirmed via the CAS below.
-            let n = inner.strong.load(Relaxed);
             if n == 0 {
                 return None;
             }
@@ -633,9 +634,9 @@ impl<T: ?Sized> Weak<T> {
             }
 
             // Relaxed is valid for the same reason it is on Arc's Clone impl
-            let old = inner.strong.compare_and_swap(n, n + 1, Relaxed);
-            if old == n {
-                return Some(Arc { _ptr: self._ptr });
+            match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
+                Ok(_) => return Some(Arc { ptr: self.ptr }),
+                Err(old) => n = old,
             }
         }
     }
@@ -643,7 +644,7 @@ impl<T: ?Sized> Weak<T> {
     #[inline]
     fn inner(&self) -> &ArcInner<T> {
         // See comments above for why this is "safe"
-        unsafe { &**self._ptr }
+        unsafe { &**self.ptr }
     }
 }
 
@@ -677,7 +678,7 @@ impl<T: ?Sized> Clone for Weak<T> {
             }
         }
 
-        return Weak { _ptr: self._ptr };
+        return Weak { ptr: self.ptr };
     }
 }
 
@@ -709,7 +710,7 @@ impl<T: ?Sized> Drop for Weak<T> {
     /// } // implicit drop
     /// ```
     fn drop(&mut self) {
-        let ptr = *self._ptr;
+        let ptr = *self.ptr;
         let thin = ptr as *const ();
 
         // see comments above for why this check is here
@@ -881,7 +882,7 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> fmt::Pointer for Arc<T> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Pointer::fmt(&*self._ptr, f)
+        fmt::Pointer::fmt(&*self.ptr, f)
     }
 }
 
@@ -926,7 +927,7 @@ impl<T> Weak<T> {
                issue = "30425")]
     pub fn new() -> Weak<T> {
         unsafe {
-            Weak { _ptr: Shared::new(Box::into_raw(box ArcInner {
+            Weak { ptr: Shared::new(Box::into_raw(box ArcInner {
                 strong: atomic::AtomicUsize::new(0),
                 weak: atomic::AtomicUsize::new(1),
                 data: uninitialized(),
@@ -1117,7 +1118,7 @@ mod tests {
 
     #[test]
     fn test_strong_count() {
-        let a = Arc::new(0u32);
+        let a = Arc::new(0);
         assert!(Arc::strong_count(&a) == 1);
         let w = Arc::downgrade(&a);
         assert!(Arc::strong_count(&a) == 1);
@@ -1134,7 +1135,7 @@ mod tests {
 
     #[test]
     fn test_weak_count() {
-        let a = Arc::new(0u32);
+        let a = Arc::new(0);
         assert!(Arc::strong_count(&a) == 1);
         assert!(Arc::weak_count(&a) == 0);
         let w = Arc::downgrade(&a);
@@ -1160,7 +1161,7 @@ mod tests {
 
     #[test]
     fn show_arc() {
-        let a = Arc::new(5u32);
+        let a = Arc::new(5);
         assert_eq!(format!("{:?}", a), "5");
     }
 
index a7cd59d4dfe3c7313c24b5b8432edb56ffbecb52..7bdf9eaccc3239df362284f6970f35301b605282 100644 (file)
@@ -525,14 +525,14 @@ impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {}
 /// }
 /// ```
 #[rustc_paren_sugar]
-#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")]
+#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "28796")]
 pub trait FnBox<A> {
     type Output;
 
     fn call_box(self: Box<Self>, args: A) -> Self::Output;
 }
 
-#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")]
+#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "28796")]
 impl<A, F> FnBox<A> for F where F: FnOnce<A>
 {
     type Output = F::Output;
@@ -542,7 +542,7 @@ impl<A, F> FnBox<A> for F where F: FnOnce<A>
     }
 }
 
-#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")]
+#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "28796")]
 impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + 'a> {
     type Output = R;
 
@@ -551,7 +551,7 @@ impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + 'a> {
     }
 }
 
-#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")]
+#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "28796")]
 impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + Send + 'a> {
     type Output = R;
 
index 300142d5ec18e79ef8d2ef909b47fa8ff9f3de07..c2dad9a1ae4f95b53b40673fc020aa457735d12a 100644 (file)
 #![feature(unique)]
 #![feature(unsafe_no_drop_flag, filling_drop)]
 #![feature(unsize)]
+#![feature(extended_compare_and_swap)]
 
 #![cfg_attr(not(test), feature(raw, fn_traits, placement_new_protocol))]
-#![cfg_attr(test, feature(test, rustc_private, box_heap))]
+#![cfg_attr(test, feature(test, box_heap))]
 
 // Allow testing this library
 
 #[cfg(test)]
 #[macro_use]
 extern crate std;
-#[cfg(test)]
-#[macro_use]
-extern crate log;
 
 // Heaps provided for low-level allocation strategies
 
index 162312e2457da8a1885286a6a94d9cb1de586951..c2f0a96132733221514a09f067ad59e8b4aca1a8 100644 (file)
@@ -184,9 +184,7 @@ struct RcBox<T: ?Sized> {
 #[unsafe_no_drop_flag]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Rc<T: ?Sized> {
-    // FIXME #12808: strange names to try to avoid interfering with field
-    // accesses of the contained type via Deref
-    _ptr: Shared<RcBox<T>>,
+    ptr: Shared<RcBox<T>>,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -215,7 +213,7 @@ impl<T> Rc<T> {
                 // pointers, which ensures that the weak destructor never frees
                 // the allocation while the strong destructor is running, even
                 // if the weak pointer is stored inside the strong one.
-                _ptr: Shared::new(Box::into_raw(box RcBox {
+                ptr: Shared::new(Box::into_raw(box RcBox {
                     strong: Cell::new(1),
                     weak: Cell::new(1),
                     value: value,
@@ -224,11 +222,12 @@ impl<T> Rc<T> {
         }
     }
 
-    /// Unwraps the contained value if the `Rc<T>` has only one strong reference.
-    /// This will succeed even if there are outstanding weak references.
+    /// Unwraps the contained value if the `Rc<T>` has exactly one strong reference.
     ///
     /// Otherwise, an `Err` is returned with the same `Rc<T>`.
     ///
+    /// This will succeed even if there are outstanding weak references.
+    ///
     /// # Examples
     ///
     /// ```
@@ -253,7 +252,7 @@ impl<T> Rc<T> {
                 // pointer while also handling drop logic by just crafting a
                 // fake Weak.
                 this.dec_strong();
-                let _weak = Weak { _ptr: this._ptr };
+                let _weak = Weak { ptr: this.ptr };
                 forget(this);
                 Ok(val)
             }
@@ -286,7 +285,7 @@ impl<T: ?Sized> Rc<T> {
     #[stable(feature = "rc_weak", since = "1.4.0")]
     pub fn downgrade(this: &Self) -> Weak<T> {
         this.inc_weak();
-        Weak { _ptr: this._ptr }
+        Weak { ptr: this.ptr }
     }
 
     /// Get the number of weak references to this value.
@@ -347,7 +346,7 @@ impl<T: ?Sized> Rc<T> {
     #[stable(feature = "rc_unique", since = "1.4.0")]
     pub fn get_mut(this: &mut Self) -> Option<&mut T> {
         if Rc::is_unique(this) {
-            let inner = unsafe { &mut **this._ptr };
+            let inner = unsafe { &mut **this.ptr };
             Some(&mut inner.value)
         } else {
             None
@@ -389,7 +388,7 @@ impl<T: Clone> Rc<T> {
         } else if Rc::weak_count(this) != 0 {
             // Can just steal the data, all that's left is Weaks
             unsafe {
-                let mut swap = Rc::new(ptr::read(&(**this._ptr).value));
+                let mut swap = Rc::new(ptr::read(&(**this.ptr).value));
                 mem::swap(this, &mut swap);
                 swap.dec_strong();
                 // Remove implicit strong-weak ref (no need to craft a fake
@@ -403,7 +402,7 @@ impl<T: Clone> Rc<T> {
         // reference count is guaranteed to be 1 at this point, and we required
         // the `Rc<T>` itself to be `mut`, so we're returning the only possible
         // reference to the inner value.
-        let inner = unsafe { &mut **this._ptr };
+        let inner = unsafe { &mut **this.ptr };
         &mut inner.value
     }
 }
@@ -448,7 +447,7 @@ impl<T: ?Sized> Drop for Rc<T> {
     #[unsafe_destructor_blind_to_params]
     fn drop(&mut self) {
         unsafe {
-            let ptr = *self._ptr;
+            let ptr = *self.ptr;
             let thin = ptr as *const ();
 
             if thin as usize != mem::POST_DROP_USIZE {
@@ -489,7 +488,7 @@ impl<T: ?Sized> Clone for Rc<T> {
     #[inline]
     fn clone(&self) -> Rc<T> {
         self.inc_strong();
-        Rc { _ptr: self._ptr }
+        Rc { ptr: self.ptr }
     }
 }
 
@@ -690,7 +689,7 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for Rc<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> fmt::Pointer for Rc<T> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Pointer::fmt(&*self._ptr, f)
+        fmt::Pointer::fmt(&*self.ptr, f)
     }
 }
 
@@ -710,9 +709,7 @@ impl<T> From<T> for Rc<T> {
 #[unsafe_no_drop_flag]
 #[stable(feature = "rc_weak", since = "1.4.0")]
 pub struct Weak<T: ?Sized> {
-    // FIXME #12808: strange names to try to avoid interfering with
-    // field accesses of the contained type via Deref
-    _ptr: Shared<RcBox<T>>,
+    ptr: Shared<RcBox<T>>,
 }
 
 #[stable(feature = "rc_weak", since = "1.4.0")]
@@ -748,7 +745,7 @@ impl<T: ?Sized> Weak<T> {
             None
         } else {
             self.inc_strong();
-            Some(Rc { _ptr: self._ptr })
+            Some(Rc { ptr: self.ptr })
         }
     }
 }
@@ -782,7 +779,7 @@ impl<T: ?Sized> Drop for Weak<T> {
     /// ```
     fn drop(&mut self) {
         unsafe {
-            let ptr = *self._ptr;
+            let ptr = *self.ptr;
             let thin = ptr as *const ();
 
             if thin as usize != mem::POST_DROP_USIZE {
@@ -815,7 +812,7 @@ impl<T: ?Sized> Clone for Weak<T> {
     #[inline]
     fn clone(&self) -> Weak<T> {
         self.inc_weak();
-        Weak { _ptr: self._ptr }
+        Weak { ptr: self.ptr }
     }
 }
 
@@ -847,7 +844,7 @@ impl<T> Weak<T> {
     pub fn new() -> Weak<T> {
         unsafe {
             Weak {
-                _ptr: Shared::new(Box::into_raw(box RcBox {
+                ptr: Shared::new(Box::into_raw(box RcBox {
                     strong: Cell::new(0),
                     weak: Cell::new(1),
                     value: uninitialized(),
@@ -909,8 +906,8 @@ impl<T: ?Sized> RcBoxPtr<T> for Rc<T> {
             // the contract anyway.
             // This allows the null check to be elided in the destructor if we
             // manipulated the reference count in the same function.
-            assume(!(*(&self._ptr as *const _ as *const *const ())).is_null());
-            &(**self._ptr)
+            assume(!(*(&self.ptr as *const _ as *const *const ())).is_null());
+            &(**self.ptr)
         }
     }
 }
@@ -923,8 +920,8 @@ impl<T: ?Sized> RcBoxPtr<T> for Weak<T> {
             // the contract anyway.
             // This allows the null check to be elided in the destructor if we
             // manipulated the reference count in the same function.
-            assume(!(*(&self._ptr as *const _ as *const *const ())).is_null());
-            &(**self._ptr)
+            assume(!(*(&self.ptr as *const _ as *const *const ())).is_null());
+            &(**self.ptr)
         }
     }
 }
@@ -1013,7 +1010,7 @@ mod tests {
 
     #[test]
     fn test_strong_count() {
-        let a = Rc::new(0u32);
+        let a = Rc::new(0);
         assert!(Rc::strong_count(&a) == 1);
         let w = Rc::downgrade(&a);
         assert!(Rc::strong_count(&a) == 1);
@@ -1030,7 +1027,7 @@ mod tests {
 
     #[test]
     fn test_weak_count() {
-        let a = Rc::new(0u32);
+        let a = Rc::new(0);
         assert!(Rc::strong_count(&a) == 1);
         assert!(Rc::weak_count(&a) == 0);
         let w = Rc::downgrade(&a);
index c9508322a31a7d7292d14b25426b562b31d08519..5d521913b48f3b1d30ce2689d90a784ee949ef87 100644 (file)
@@ -86,6 +86,8 @@ fn main() {
         // should be good to go!
         cmd.arg("--with-jemalloc-prefix=je_");
         cmd.arg("--disable-tls");
+    } else if target.contains("dragonfly") {
+        cmd.arg("--with-jemalloc-prefix=je_");
     }
 
     if cfg!(feature = "debug-jemalloc") {
@@ -111,7 +113,7 @@ fn main() {
     println!("cargo:rustc-link-search=native={}/lib", build_dir.display());
     if target.contains("android") {
         println!("cargo:rustc-link-lib=gcc");
-    } else if !target.contains("windows") {
+    } else if !target.contains("windows") && !target.contains("musl") {
         println!("cargo:rustc-link-lib=pthread");
     }
 }
index c96d303e6bb64cffed87e8b11e463744806a24eb..3a30bebec547889c4f1772b1b33d43849cb9b7d7 100644 (file)
@@ -42,22 +42,27 @@ use libc::{c_int, c_void, size_t};
 extern {}
 
 // Note that the symbols here are prefixed by default on OSX (we don't
-// explicitly request it), and on Android we explicitly request it as
-// unprefixing cause segfaults (mismatches in allocators).
+// explicitly request it), and on Android and DragonFly we explicitly request
+// it as unprefixing cause segfaults (mismatches in allocators).
 extern {
-    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios"),
+    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+                   target_os = "dragonfly"),
                link_name = "je_mallocx")]
     fn mallocx(size: size_t, flags: c_int) -> *mut c_void;
-    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios"),
+    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+                   target_os = "dragonfly"),
                link_name = "je_rallocx")]
     fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
-    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios"),
+    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+                   target_os = "dragonfly"),
                link_name = "je_xallocx")]
     fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
-    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios"),
+    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+                   target_os = "dragonfly"),
                link_name = "je_sdallocx")]
     fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
-    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios"),
+    #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+                   target_os = "dragonfly"),
                link_name = "je_nallocx")]
     fn nallocx(size: size_t, flags: c_int) -> size_t;
 }
index cb1107fb650e17f27d75fd15c166d2410b35bc7d..b299b786b35a8057e8703ce7bb216fa248c0551a 100644 (file)
@@ -32,7 +32,6 @@
 #![feature(alloc)]
 #![feature(core_intrinsics)]
 #![feature(heap_api)]
-#![feature(raw)]
 #![feature(heap_api)]
 #![feature(staged_api)]
 #![feature(dropck_parametricity)]
@@ -48,326 +47,10 @@ use std::intrinsics;
 use std::marker::{PhantomData, Send};
 use std::mem;
 use std::ptr;
-use std::slice;
 
 use alloc::heap;
 use alloc::raw_vec::RawVec;
 
-struct Chunk {
-    data: RawVec<u8>,
-    /// Index of the first unused byte.
-    fill: Cell<usize>,
-    /// Indicates whether objects with destructors are stored in this chunk.
-    is_copy: Cell<bool>,
-}
-
-impl Chunk {
-    fn new(size: usize, is_copy: bool) -> Chunk {
-        Chunk {
-            data: RawVec::with_capacity(size),
-            fill: Cell::new(0),
-            is_copy: Cell::new(is_copy),
-        }
-    }
-
-    fn capacity(&self) -> usize {
-        self.data.cap()
-    }
-
-    unsafe fn as_ptr(&self) -> *const u8 {
-        self.data.ptr()
-    }
-
-    // Walk down a chunk, running the destructors for any objects stored
-    // in it.
-    unsafe fn destroy(&self) {
-        let mut idx = 0;
-        let buf = self.as_ptr();
-        let fill = self.fill.get();
-
-        while idx < fill {
-            let tydesc_data = buf.offset(idx as isize) as *const usize;
-            let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
-            let (size, align) = ((*tydesc).size, (*tydesc).align);
-
-            let after_tydesc = idx + mem::size_of::<*const TyDesc>();
-
-            let start = round_up(after_tydesc, align);
-
-            if is_done {
-                ((*tydesc).drop_glue)(buf.offset(start as isize) as *const i8);
-            }
-
-            // Find where the next tydesc lives
-            idx = round_up(start + size, mem::align_of::<*const TyDesc>());
-        }
-    }
-}
-
-/// A slower reflection-based arena that can allocate objects of any type.
-///
-/// This arena uses `RawVec<u8>` as a backing store to allocate objects from.
-/// For each allocated object, the arena stores a pointer to the type descriptor
-/// followed by the object (potentially with alignment padding after each
-/// element). When the arena is destroyed, it iterates through all of its
-/// chunks, and uses the tydesc information to trace through the objects,
-/// calling the destructors on them. One subtle point that needs to be
-/// addressed is how to handle panics while running the user provided
-/// initializer function. It is important to not run the destructor on
-/// uninitialized objects, but how to detect them is somewhat subtle. Since
-/// `alloc()` can be invoked recursively, it is not sufficient to simply exclude
-/// the most recent object. To solve this without requiring extra space, we
-/// use the low order bit of the tydesc pointer to encode whether the object
-/// it describes has been fully initialized.
-///
-/// As an optimization, objects with destructors are stored in different chunks
-/// than objects without destructors. This reduces overhead when initializing
-/// plain-old-data (`Copy` types) and means we don't need to waste time running
-/// their destructors.
-#[unstable(feature = "rustc_private",
-           reason = "Private to rustc", issue = "0")]
-#[rustc_deprecated(since = "1.6.0-dev", reason =
-"The reflection-based arena is superseded by the any-arena crate")]
-pub struct Arena<'longer_than_self> {
-    // The heads are separated out from the list as a unbenchmarked
-    // microoptimization, to avoid needing to case on the list to access a head.
-    head: RefCell<Chunk>,
-    copy_head: RefCell<Chunk>,
-    chunks: RefCell<Vec<Chunk>>,
-    _marker: PhantomData<*mut &'longer_than_self ()>,
-}
-
-impl<'a> Arena<'a> {
-    /// Allocates a new Arena with 32 bytes preallocated.
-    pub fn new() -> Arena<'a> {
-        Arena::new_with_size(32)
-    }
-
-    /// Allocates a new Arena with `initial_size` bytes preallocated.
-    pub fn new_with_size(initial_size: usize) -> Arena<'a> {
-        Arena {
-            head: RefCell::new(Chunk::new(initial_size, false)),
-            copy_head: RefCell::new(Chunk::new(initial_size, true)),
-            chunks: RefCell::new(Vec::new()),
-            _marker: PhantomData,
-        }
-    }
-}
-
-impl<'longer_than_self> Drop for Arena<'longer_than_self> {
-    fn drop(&mut self) {
-        unsafe {
-            self.head.borrow().destroy();
-            for chunk in self.chunks.borrow().iter() {
-                if !chunk.is_copy.get() {
-                    chunk.destroy();
-                }
-            }
-        }
-    }
-}
-
-#[inline]
-fn round_up(base: usize, align: usize) -> usize {
-    (base.checked_add(align - 1)).unwrap() & !(align - 1)
-}
-
-// We encode whether the object a tydesc describes has been
-// initialized in the arena in the low bit of the tydesc pointer. This
-// is necessary in order to properly do cleanup if a panic occurs
-// during an initializer.
-#[inline]
-fn bitpack_tydesc_ptr(p: *const TyDesc, is_done: bool) -> usize {
-    p as usize | (is_done as usize)
-}
-#[inline]
-fn un_bitpack_tydesc_ptr(p: usize) -> (*const TyDesc, bool) {
-    ((p & !1) as *const TyDesc, p & 1 == 1)
-}
-
-// HACK(eddyb) TyDesc replacement using a trait object vtable.
-// This could be replaced in the future with a custom DST layout,
-// or `&'static (drop_glue, size, align)` created by a `const fn`.
-// Requirements:
-// * rvalue promotion (issue #1056)
-// * mem::{size_of, align_of} must be const fns
-struct TyDesc {
-    drop_glue: fn(*const i8),
-    size: usize,
-    align: usize,
-}
-
-trait AllTypes {
-    fn dummy(&self) {}
-}
-
-impl<T: ?Sized> AllTypes for T {}
-
-unsafe fn get_tydesc<T>() -> *const TyDesc {
-    use std::raw::TraitObject;
-
-    let ptr = &*(heap::EMPTY as *const T);
-
-    // Can use any trait that is implemented for all types.
-    let obj = mem::transmute::<&AllTypes, TraitObject>(ptr);
-    obj.vtable as *const TyDesc
-}
-
-impl<'longer_than_self> Arena<'longer_than_self> {
-    // Grows a given chunk and returns `false`, or replaces it with a bigger
-    // chunk and returns `true`.
-    // This method is shared by both parts of the arena.
-    #[cold]
-    fn alloc_grow(&self, head: &mut Chunk, used_cap: usize, n_bytes: usize) -> bool {
-        if head.data.reserve_in_place(used_cap, n_bytes) {
-            // In-place reallocation succeeded.
-            false
-        } else {
-            // Allocate a new chunk.
-            let new_min_chunk_size = cmp::max(n_bytes, head.capacity());
-            let new_chunk = Chunk::new((new_min_chunk_size + 1).next_power_of_two(), false);
-            let old_chunk = mem::replace(head, new_chunk);
-            if old_chunk.fill.get() != 0 {
-                self.chunks.borrow_mut().push(old_chunk);
-            }
-            true
-        }
-    }
-
-    // Functions for the copyable part of the arena.
-
-    #[inline]
-    fn alloc_copy_inner(&self, n_bytes: usize, align: usize) -> *const u8 {
-        let mut copy_head = self.copy_head.borrow_mut();
-        let fill = copy_head.fill.get();
-        let mut start = round_up(fill, align);
-        let mut end = start + n_bytes;
-
-        if end > copy_head.capacity() {
-            if self.alloc_grow(&mut *copy_head, fill, end - fill) {
-                // Continuing with a newly allocated chunk
-                start = 0;
-                end = n_bytes;
-                copy_head.is_copy.set(true);
-            }
-        }
-
-        copy_head.fill.set(end);
-
-        unsafe { copy_head.as_ptr().offset(start as isize) }
-    }
-
-    #[inline]
-    fn alloc_copy<T, F>(&self, op: F) -> &mut T
-        where F: FnOnce() -> T
-    {
-        unsafe {
-            let ptr = self.alloc_copy_inner(mem::size_of::<T>(), mem::align_of::<T>());
-            let ptr = ptr as *mut T;
-            ptr::write(&mut (*ptr), op());
-            &mut *ptr
-        }
-    }
-
-    // Functions for the non-copyable part of the arena.
-
-    #[inline]
-    fn alloc_noncopy_inner(&self, n_bytes: usize, align: usize) -> (*const u8, *const u8) {
-        let mut head = self.head.borrow_mut();
-        let fill = head.fill.get();
-
-        let mut tydesc_start = fill;
-        let after_tydesc = fill + mem::size_of::<*const TyDesc>();
-        let mut start = round_up(after_tydesc, align);
-        let mut end = round_up(start + n_bytes, mem::align_of::<*const TyDesc>());
-
-        if end > head.capacity() {
-            if self.alloc_grow(&mut *head, tydesc_start, end - tydesc_start) {
-                // Continuing with a newly allocated chunk
-                tydesc_start = 0;
-                start = round_up(mem::size_of::<*const TyDesc>(), align);
-                end = round_up(start + n_bytes, mem::align_of::<*const TyDesc>());
-            }
-        }
-
-        head.fill.set(end);
-
-        unsafe {
-            let buf = head.as_ptr();
-            (buf.offset(tydesc_start as isize),
-             buf.offset(start as isize))
-        }
-    }
-
-    #[inline]
-    fn alloc_noncopy<T, F>(&self, op: F) -> &mut T
-        where F: FnOnce() -> T
-    {
-        unsafe {
-            let tydesc = get_tydesc::<T>();
-            let (ty_ptr, ptr) = self.alloc_noncopy_inner(mem::size_of::<T>(), mem::align_of::<T>());
-            let ty_ptr = ty_ptr as *mut usize;
-            let ptr = ptr as *mut T;
-            // Write in our tydesc along with a bit indicating that it
-            // has *not* been initialized yet.
-            *ty_ptr = bitpack_tydesc_ptr(tydesc, false);
-            // Actually initialize it
-            ptr::write(&mut (*ptr), op());
-            // Now that we are done, update the tydesc to indicate that
-            // the object is there.
-            *ty_ptr = bitpack_tydesc_ptr(tydesc, true);
-
-            &mut *ptr
-        }
-    }
-
-    /// Allocates a new item in the arena, using `op` to initialize the value,
-    /// and returns a reference to it.
-    #[inline]
-    pub fn alloc<T: 'longer_than_self, F>(&self, op: F) -> &mut T
-        where F: FnOnce() -> T
-    {
-        unsafe {
-            if intrinsics::needs_drop::<T>() {
-                self.alloc_noncopy(op)
-            } else {
-                self.alloc_copy(op)
-            }
-        }
-    }
-
-    /// Allocates a slice of bytes of requested length. The bytes are not guaranteed to be zero
-    /// if the arena has previously been cleared.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the requested length is too large and causes overflow.
-    pub fn alloc_bytes(&self, len: usize) -> &mut [u8] {
-        unsafe {
-            // Check for overflow.
-            self.copy_head.borrow().fill.get().checked_add(len).expect("length overflow");
-            let ptr = self.alloc_copy_inner(len, 1);
-            intrinsics::assume(!ptr.is_null());
-            slice::from_raw_parts_mut(ptr as *mut _, len)
-        }
-    }
-
-    /// Clears the arena. Deallocates all but the longest chunk which may be reused.
-    pub fn clear(&mut self) {
-        unsafe {
-            self.head.borrow().destroy();
-            self.head.borrow().fill.set(0);
-            self.copy_head.borrow().fill.set(0);
-            for chunk in self.chunks.borrow().iter() {
-                if !chunk.is_copy.get() {
-                    chunk.destroy();
-                }
-            }
-            self.chunks.borrow_mut().clear();
-        }
-    }
-}
-
 /// A faster arena that can hold objects of only one type.
 pub struct TypedArena<T> {
     /// A pointer to the next object to be allocated.
@@ -566,9 +249,8 @@ unsafe impl<T: Send> Send for TypedArena<T> {}
 mod tests {
     extern crate test;
     use self::test::Bencher;
-    use super::{Arena, TypedArena};
+    use super::TypedArena;
     use std::cell::Cell;
-    use std::rc::Rc;
 
     #[allow(dead_code)]
     #[derive(Debug, Eq, PartialEq)]
@@ -642,12 +324,6 @@ mod tests {
         })
     }
 
-    #[bench]
-    pub fn bench_copy_old_arena(b: &mut Bencher) {
-        let arena = Arena::new();
-        b.iter(|| arena.alloc(|| Point { x: 1, y: 2, z: 3 }))
-    }
-
     #[allow(dead_code)]
     struct Noncopy {
         string: String,
@@ -673,22 +349,6 @@ mod tests {
         }
     }
 
-    #[test]
-    pub fn test_arena_zero_sized() {
-        let arena = Arena::new();
-        let mut points = vec![];
-        for _ in 0..1000 {
-            for _ in 0..100 {
-                arena.alloc(|| ());
-            }
-            let point = arena.alloc(|| Point { x: 1, y: 2, z: 3 });
-            points.push(point);
-        }
-        for point in &points {
-            assert_eq!(**point, Point { x: 1, y: 2, z: 3 });
-        }
-    }
-
     #[test]
     pub fn test_typed_arena_clear() {
         let mut arena = TypedArena::new();
@@ -700,66 +360,6 @@ mod tests {
         }
     }
 
-    #[test]
-    pub fn test_arena_clear() {
-        let mut arena = Arena::new();
-        for _ in 0..10 {
-            arena.clear();
-            for _ in 0..10000 {
-                arena.alloc(|| Point { x: 1, y: 2, z: 3 });
-                arena.alloc(|| {
-                    Noncopy {
-                        string: "hello world".to_string(),
-                        array: vec![],
-                    }
-                });
-            }
-        }
-    }
-
-    #[test]
-    pub fn test_arena_alloc_bytes() {
-        let arena = Arena::new();
-        for i in 0..10000 {
-            arena.alloc(|| Point { x: 1, y: 2, z: 3 });
-            for byte in arena.alloc_bytes(i % 42).iter_mut() {
-                *byte = i as u8;
-            }
-        }
-    }
-
-    #[test]
-    fn test_arena_destructors() {
-        let arena = Arena::new();
-        for i in 0..10 {
-            // Arena allocate something with drop glue to make sure it
-            // doesn't leak.
-            arena.alloc(|| Rc::new(i));
-            // Allocate something with funny size and alignment, to keep
-            // things interesting.
-            arena.alloc(|| [0u8, 1u8, 2u8]);
-        }
-    }
-
-    #[test]
-    #[should_panic]
-    fn test_arena_destructors_fail() {
-        let arena = Arena::new();
-        // Put some stuff in the arena.
-        for i in 0..10 {
-            // Arena allocate something with drop glue to make sure it
-            // doesn't leak.
-            arena.alloc(|| Rc::new(i));
-            // Allocate something with funny size and alignment, to keep
-            // things interesting.
-            arena.alloc(|| [0u8, 1, 2]);
-        }
-        // Now, panic while allocating
-        arena.alloc::<Rc<i32>, _>(|| {
-            panic!();
-        });
-    }
-
     // Drop tests
 
     struct DropCounter<'a> {
@@ -772,40 +372,6 @@ mod tests {
         }
     }
 
-    #[test]
-    fn test_arena_drop_count() {
-        let counter = Cell::new(0);
-        {
-            let arena = Arena::new();
-            for _ in 0..100 {
-                // Allocate something with drop glue to make sure it doesn't leak.
-                arena.alloc(|| DropCounter { count: &counter });
-                // Allocate something with funny size and alignment, to keep
-                // things interesting.
-                arena.alloc(|| [0u8, 1u8, 2u8]);
-            }
-            // dropping
-        };
-        assert_eq!(counter.get(), 100);
-    }
-
-    #[test]
-    fn test_arena_drop_on_clear() {
-        let counter = Cell::new(0);
-        for i in 0..10 {
-            let mut arena = Arena::new();
-            for _ in 0..100 {
-                // Allocate something with drop glue to make sure it doesn't leak.
-                arena.alloc(|| DropCounter { count: &counter });
-                // Allocate something with funny size and alignment, to keep
-                // things interesting.
-                arena.alloc(|| [0u8, 1u8, 2u8]);
-            }
-            arena.clear();
-            assert_eq!(counter.get(), i * 100 + 100);
-        }
-    }
-
     #[test]
     fn test_typed_arena_drop_count() {
         let counter = Cell::new(0);
@@ -845,25 +411,6 @@ mod tests {
         }
     }
 
-    #[test]
-    fn test_arena_drop_small_count() {
-        DROP_COUNTER.with(|c| c.set(0));
-        {
-            let arena = Arena::new();
-            for _ in 0..10 {
-                for _ in 0..10 {
-                    // Allocate something with drop glue to make sure it doesn't leak.
-                    arena.alloc(|| SmallDroppable);
-                }
-                // Allocate something with funny size and alignment, to keep
-                // things interesting.
-                arena.alloc(|| [0u8, 1u8, 2u8]);
-            }
-            // dropping
-        };
-        assert_eq!(DROP_COUNTER.with(|c| c.get()), 100);
-    }
-
     #[test]
     fn test_typed_arena_drop_small_count() {
         DROP_COUNTER.with(|c| c.set(0));
@@ -898,17 +445,4 @@ mod tests {
             });
         })
     }
-
-    #[bench]
-    pub fn bench_noncopy_old_arena(b: &mut Bencher) {
-        let arena = Arena::new();
-        b.iter(|| {
-            arena.alloc(|| {
-                Noncopy {
-                    string: "hello world".to_string(),
-                    array: vec![1, 2, 3, 4, 5],
-                }
-            })
-        })
-    }
 }
index bd329949618e5f4376860f8b3af77db4a041be75..c9dd1efb37435d3b3b8e3282cdcb2d8a6c005eff 100644 (file)
@@ -167,6 +167,49 @@ use vec::{self, Vec};
 /// item's ordering relative to any other item, as determined by the `Ord`
 /// trait, changes while it is in the heap. This is normally only possible
 /// through `Cell`, `RefCell`, global state, I/O, or unsafe code.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BinaryHeap;
+///
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `BinaryHeap<i32>` in this example).
+/// let mut heap = BinaryHeap::new();
+///
+/// // We can use peek to look at the next item in the heap. In this case,
+/// // there's no items in there yet so we get None.
+/// assert_eq!(heap.peek(), None);
+///
+/// // Let's add some scores...
+/// heap.push(1);
+/// heap.push(5);
+/// heap.push(2);
+///
+/// // Now peek shows the most important item in the heap.
+/// assert_eq!(heap.peek(), Some(&5));
+///
+/// // We can check the length of a heap.
+/// assert_eq!(heap.len(), 3);
+///
+/// // We can iterate over the items in the heap, although they are returned in
+/// // a random order.
+/// for x in &heap {
+///     println!("{}", x);
+/// }
+///
+/// // If we instead pop these scores, they should come back in order.
+/// assert_eq!(heap.pop(), Some(5));
+/// assert_eq!(heap.pop(), Some(2));
+/// assert_eq!(heap.pop(), Some(1));
+/// assert_eq!(heap.pop(), None);
+///
+/// // We can clear the heap of any remaining items.
+/// heap.clear();
+///
+/// // The heap should now be empty.
+/// assert!(heap.is_empty())
+/// ```
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct BinaryHeap<T> {
     data: Vec<T>,
@@ -203,6 +246,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let mut heap = BinaryHeap::new();
@@ -220,6 +265,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let mut heap = BinaryHeap::with_capacity(10);
@@ -235,6 +282,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]);
@@ -253,6 +302,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let mut heap = BinaryHeap::new();
@@ -273,6 +324,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let mut heap = BinaryHeap::with_capacity(100);
@@ -297,6 +350,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let mut heap = BinaryHeap::new();
@@ -318,6 +373,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let mut heap = BinaryHeap::new();
@@ -331,6 +388,19 @@ impl<T: Ord> BinaryHeap<T> {
     }
 
     /// Discards as much additional capacity as possible.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// use std::collections::BinaryHeap;
+    /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
+    ///
+    /// assert!(heap.capacity() >= 100);
+    /// heap.shrink_to_fit();
+    /// assert!(heap.capacity() == 0);
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn shrink_to_fit(&mut self) {
         self.data.shrink_to_fit();
@@ -341,6 +411,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let mut heap = BinaryHeap::from(vec![1, 3]);
@@ -364,6 +436,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let mut heap = BinaryHeap::new();
@@ -386,6 +460,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// #![feature(binary_heap_extras)]
     ///
@@ -424,6 +500,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// #![feature(binary_heap_extras)]
     ///
@@ -454,6 +532,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5, 6, 7]);
@@ -474,6 +554,8 @@ impl<T: Ord> BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     ///
@@ -571,12 +653,40 @@ impl<T: Ord> BinaryHeap<T> {
     }
 
     /// Returns the length of the binary heap.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// use std::collections::BinaryHeap;
+    /// let heap = BinaryHeap::from(vec![1, 3]);
+    ///
+    /// assert_eq!(heap.len(), 2);
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn len(&self) -> usize {
         self.data.len()
     }
 
     /// Checks if the binary heap is empty.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// use std::collections::BinaryHeap;
+    /// let mut heap = BinaryHeap::new();
+    ///
+    /// assert!(heap.is_empty());
+    ///
+    /// heap.push(3);
+    /// heap.push(5);
+    /// heap.push(1);
+    ///
+    /// assert!(!heap.is_empty());
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn is_empty(&self) -> bool {
         self.len() == 0
@@ -585,6 +695,23 @@ impl<T: Ord> BinaryHeap<T> {
     /// Clears the binary heap, returning an iterator over the removed elements.
     ///
     /// The elements are removed in arbitrary order.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// use std::collections::BinaryHeap;
+    /// let mut heap = BinaryHeap::from(vec![1, 3]);
+    ///
+    /// assert!(!heap.is_empty());
+    ///
+    /// for x in heap.drain() {
+    ///     println!("{}", x);
+    /// }
+    ///
+    /// assert!(heap.is_empty());
+    /// ```
     #[inline]
     #[stable(feature = "drain", since = "1.6.0")]
     pub fn drain(&mut self) -> Drain<T> {
@@ -592,6 +719,21 @@ impl<T: Ord> BinaryHeap<T> {
     }
 
     /// Drops all items from the binary heap.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// use std::collections::BinaryHeap;
+    /// let mut heap = BinaryHeap::from(vec![1, 3]);
+    ///
+    /// assert!(!heap.is_empty());
+    ///
+    /// heap.clear();
+    ///
+    /// assert!(heap.is_empty());
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn clear(&mut self) {
         self.drain();
@@ -809,6 +951,8 @@ impl<T: Ord> IntoIterator for BinaryHeap<T> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BinaryHeap;
     /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]);
@@ -836,13 +980,13 @@ impl<'a, T> IntoIterator for &'a BinaryHeap<T> where T: Ord {
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: Ord> Extend<T> for BinaryHeap<T> {
-    fn extend<I: IntoIterator<Item = T>>(&mut self, iterable: I) {
-        let iter = iterable.into_iter();
-        let (lower, _) = iter.size_hint();
+    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+        let iterator = iter.into_iter();
+        let (lower, _) = iterator.size_hint();
 
         self.reserve(lower);
 
-        for elem in iter {
+        for elem in iterator {
             self.push(elem);
         }
     }
index ce20a25d13af38ac38f8402669e6f4d5582f2ef4..6ca0db68a88ce42a1c87330c47fe1b8a34b38dc5 100644 (file)
@@ -49,6 +49,18 @@ pub trait ToOwned {
     type Owned: Borrow<Self>;
 
     /// Creates owned data from borrowed data, usually by cloning.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// let s = "a"; // &str
+    /// let ss = s.to_owned(); // String
+    ///
+    /// let v = &[1, 2]; // slice
+    /// let vv = v.to_owned(); // Vec
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     fn to_owned(&self) -> Self::Owned;
 }
@@ -244,24 +256,6 @@ impl<'a, B: ?Sized> Hash for Cow<'a, B> where B: Hash + ToOwned {
     }
 }
 
-/// Trait for moving into a `Cow`.
-#[unstable(feature = "into_cow", reason = "may be replaced by `convert::Into`",
-           issue = "27735")]
-#[rustc_deprecated(since = "1.7.0",
-                   reason = "conflicts with Into, may return with specialization")]
-pub trait IntoCow<'a, B: ?Sized> where B: ToOwned {
-    /// Moves `self` into `Cow`
-    fn into_cow(self) -> Cow<'a, B>;
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow(deprecated)]
-impl<'a, B: ?Sized> IntoCow<'a, B> for Cow<'a, B> where B: ToOwned {
-    fn into_cow(self) -> Cow<'a, B> {
-        self
-    }
-}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 #[allow(deprecated)]
 impl<'a, T: ?Sized + ToOwned> AsRef<T> for Cow<'a, T> {
index 2a950ce0ab72d58404aa0981513ceb1fa9effbee..de40568fd6704d97f0cb4399efca81113993055b 100644 (file)
@@ -58,6 +58,74 @@ use self::Entry::*;
 /// It is a logic error for a key to be modified in such a way that the key's ordering relative to
 /// any other key, as determined by the `Ord` trait, changes while it is in the map. This is
 /// normally only possible through `Cell`, `RefCell`, global state, I/O, or unsafe code.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BTreeMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `BTreeMap<&str, &str>` in this example).
+/// let mut movie_reviews = BTreeMap::new();
+///
+/// // review some books.
+/// movie_reviews.insert("Office Space",       "Deals with real issues in the workplace.");
+/// movie_reviews.insert("Pulp Fiction",       "Masterpiece.");
+/// movie_reviews.insert("The Godfather",      "Very enjoyable.");
+/// movie_reviews.insert("The Blues Brothers", "Eye lyked it alot.");
+///
+/// // check for a specific one.
+/// if !movie_reviews.contains_key("Les Misérables") {
+///     println!("We've got {} reviews, but Les Misérables ain't one.",
+///              movie_reviews.len());
+/// }
+///
+/// // oops, this review has a lot of spelling mistakes, let's delete it.
+/// movie_reviews.remove("The Blues Brothers");
+///
+/// // look up the values associated with some keys.
+/// let to_find = ["Up!", "Office Space"];
+/// for book in &to_find {
+///     match movie_reviews.get(book) {
+///        Some(review) => println!("{}: {}", book, review),
+///        None => println!("{} is unreviewed.", book)
+///     }
+/// }
+///
+/// // iterate over everything.
+/// for (movie, review) in &movie_reviews {
+///     println!("{}: \"{}\"", movie, review);
+/// }
+/// ```
+///
+/// `BTreeMap` also implements an [`Entry API`](#method.entry), which allows
+/// for more complex methods of getting, setting, updating and removing keys and
+/// their values:
+///
+/// ```
+/// use std::collections::BTreeMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `BTreeMap<&str, u8>` in this example).
+/// let mut player_stats = BTreeMap::new();
+///
+/// fn random_stat_buff() -> u8 {
+///     // could actually return some random value here - let's just return
+///     // some fixed value for now
+///     42
+/// }
+///
+/// // insert a key only if it doesn't already exist
+/// player_stats.entry("health").or_insert(100);
+///
+/// // insert a key using a function that provides a new value only if it
+/// // doesn't already exist
+/// player_stats.entry("defence").or_insert_with(random_stat_buff);
+///
+/// // update a key, guarding against the key possibly not being set
+/// let stat = player_stats.entry("attack").or_insert(100);
+/// *stat += random_stat_buff();
+/// ```
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct BTreeMap<K, V> {
     root: node::Root<K, V>,
@@ -217,6 +285,12 @@ pub struct Values<'a, K: 'a, V: 'a> {
     inner: Iter<'a, K, V>,
 }
 
+/// A mutable iterator over a BTreeMap's values.
+#[unstable(feature = "map_values_mut", reason = "recently added", issue = "32551")]
+pub struct ValuesMut<'a, K: 'a, V: 'a> {
+    inner: IterMut<'a, K, V>,
+}
+
 /// An iterator over a sub-range of BTreeMap's entries.
 pub struct Range<'a, K: 'a, V: 'a> {
     front: Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>,
@@ -276,6 +350,19 @@ pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
 
 impl<K: Ord, V> BTreeMap<K, V> {
     /// Makes a new empty BTreeMap with a reasonable choice for B.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// use std::collections::BTreeMap;
+    ///
+    /// let mut map = BTreeMap::new();
+    ///
+    /// // entries can now be inserted into the empty map
+    /// map.insert(1, "a");
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn new() -> BTreeMap<K, V> {
         BTreeMap {
@@ -288,6 +375,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -309,6 +398,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -332,6 +423,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -352,6 +445,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -384,6 +479,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -414,6 +511,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -443,6 +542,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// #![feature(btree_range, collections_bound)]
     ///
@@ -516,6 +617,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// #![feature(btree_range, collections_bound)]
     ///
@@ -591,6 +694,8 @@ impl<K: Ord, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -907,6 +1012,33 @@ impl<'a, K, V> Iterator for Range<'a, K, V> {
     }
 }
 
+#[unstable(feature = "map_values_mut", reason = "recently added", issue = "32551")]
+impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
+    type Item = &'a mut V;
+
+    fn next(&mut self) -> Option<&'a mut V> {
+        self.inner.next().map(|(_, v)| v)
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.inner.size_hint()
+    }
+}
+
+#[unstable(feature = "map_values_mut", reason = "recently added", issue = "32551")]
+impl<'a, K, V> DoubleEndedIterator for ValuesMut<'a, K, V> {
+    fn next_back(&mut self) -> Option<&'a mut V> {
+        self.inner.next_back().map(|(_, v)| v)
+    }
+}
+
+#[unstable(feature = "map_values_mut", reason = "recently added", issue = "32551")]
+impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> {
+    fn len(&self) -> usize {
+        self.inner.len()
+    }
+}
+
 impl<'a, K, V> Range<'a, K, V> {
     unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
         let handle = self.front;
@@ -1199,6 +1331,8 @@ impl<K, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -1229,6 +1363,8 @@ impl<K, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -1262,6 +1398,8 @@ impl<K, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -1281,6 +1419,8 @@ impl<K, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -1296,10 +1436,39 @@ impl<K, V> BTreeMap<K, V> {
         Values { inner: self.iter() }
     }
 
+    /// Gets a mutable iterator over the values of the map, in order by key.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// # #![feature(map_values_mut)]
+    /// use std::collections::BTreeMap;
+    ///
+    /// let mut a = BTreeMap::new();
+    /// a.insert(1, String::from("hello"));
+    /// a.insert(2, String::from("goodbye"));
+    ///
+    /// for value in a.values_mut() {
+    ///     value.push_str("!");
+    /// }
+    ///
+    /// let values: Vec<String> = a.values().cloned().collect();
+    /// assert_eq!(values, [String::from("hello!"),
+    ///                     String::from("goodbye!")]);
+    /// ```
+    #[unstable(feature = "map_values_mut", reason = "recently added", issue = "32551")]
+    pub fn values_mut<'a>(&'a mut self) -> ValuesMut<'a, K, V> {
+        ValuesMut { inner: self.iter_mut() }
+    }
+
     /// Returns the number of elements in the map.
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -1317,6 +1486,8 @@ impl<K, V> BTreeMap<K, V> {
     ///
     /// # Examples
     ///
+    /// Basic usage:
+    ///
     /// ```
     /// use std::collections::BTreeMap;
     ///
@@ -1354,6 +1525,13 @@ impl<'a, K: Ord, V> Entry<'a, K, V> {
 }
 
 impl<'a, K: Ord, V> VacantEntry<'a, K, V> {
+    /// Gets a reference to the key that would be used when inserting a value
+    /// through the VacantEntry.
+    #[unstable(feature = "map_entry_keys", issue = "32281")]
+    pub fn key(&self) -> &K {
+        &self.key
+    }
+
     /// Sets the value of the entry with the VacantEntry's key,
     /// and returns a mutable reference to it.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -1398,6 +1576,12 @@ impl<'a, K: Ord, V> VacantEntry<'a, K, V> {
 }
 
 impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> {
+    /// Gets a reference to the key in the entry.
+    #[unstable(feature = "map_entry_keys", issue = "32281")]
+    pub fn key(&self) -> &K {
+        self.handle.reborrow().into_kv().0
+    }
+
     /// Gets a reference to the value in the entry.
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn get(&self) -> &V {
index 1cd50c2dcbe9e81e7ba74feba7197bc1e0d9cadd..e679381f223f8a97624c21efefbc1f49b9d28c90 100644 (file)
@@ -34,10 +34,40 @@ use Bound;
 /// to any other item, as determined by the [`Ord`] trait, changes while it is in the set. This is
 /// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
 ///
-/// [`BTreeMap`]: ../struct.BTreeMap.html
-/// [`Ord`]: ../../core/cmp/trait.Ord.html
+/// [`BTreeMap`]: struct.BTreeMap.html
+/// [`Ord`]: ../../std/cmp/trait.Ord.html
 /// [`Cell`]: ../../std/cell/struct.Cell.html
 /// [`RefCell`]: ../../std/cell/struct.RefCell.html
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BTreeSet;
+///
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `BTreeSet<&str>` in this example).
+/// let mut books = BTreeSet::new();
+///
+/// // Add some books.
+/// books.insert("A Dance With Dragons");
+/// books.insert("To Kill a Mockingbird");
+/// books.insert("The Odyssey");
+/// books.insert("The Great Gatsby");
+///
+/// // Check for a specific one.
+/// if !books.contains("The Winds of Winter") {
+///     println!("We have {} books, but The Winds of Winter ain't one.",
+///              books.len());
+/// }
+///
+/// // Remove a book.
+/// books.remove("The Odyssey");
+///
+/// // Iterate over everything.
+/// for book in &books {
+///     println!("{}", book);
+/// }
+/// ```
 #[derive(Clone, Hash, PartialEq, Eq, Ord, PartialOrd)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct BTreeSet<T> {
@@ -349,7 +379,7 @@ impl<T: Ord> BTreeSet<T> {
     /// The value may be any borrowed form of the set's value type,
     /// but the ordering on the borrowed form *must* match the
     /// ordering on the value type.
-    #[unstable(feature = "set_recovery", issue = "28050")]
+    #[stable(feature = "set_recovery", since = "1.9.0")]
     pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
         where T: Borrow<Q>,
               Q: Ord
@@ -472,7 +502,7 @@ impl<T: Ord> BTreeSet<T> {
 
     /// Adds a value to the set, replacing the existing value, if any, that is equal to the given
     /// one. Returns the replaced value.
-    #[unstable(feature = "set_recovery", issue = "28050")]
+    #[stable(feature = "set_recovery", since = "1.9.0")]
     pub fn replace(&mut self, value: T) -> Option<T> {
         Recover::replace(&mut self.map, value)
     }
@@ -508,7 +538,7 @@ impl<T: Ord> BTreeSet<T> {
     /// The value may be any borrowed form of the set's value type,
     /// but the ordering on the borrowed form *must* match the
     /// ordering on the value type.
-    #[unstable(feature = "set_recovery", issue = "28050")]
+    #[stable(feature = "set_recovery", since = "1.9.0")]
     pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
         where T: Borrow<Q>,
               Q: Ord
index 8b8ccd526c90f854c7fd0c8b360eda4ebeb7bf14..0c66c0564c3ea2ca879d6eaefee4442ca641f830 100644 (file)
@@ -81,13 +81,13 @@ pub trait CLike {
     fn from_usize(usize) -> Self;
 }
 
-#[allow(deprecated)]
 fn bit<E: CLike>(e: &E) -> usize {
-    use core::usize;
+    use core::mem;
     let value = e.to_usize();
-    assert!(value < usize::BITS,
+    let bits = mem::size_of::<usize>() * 8;
+    assert!(value < bits,
             "EnumSet only supports up to {} variants.",
-            usize::BITS - 1);
+            bits - 1);
     1 << value
 }
 
index 97b01a607f5e602a06ea3d30f9b373f3c9c8fec3..e30e0b213afa161eb3da80c251349bd90af865f7 100644 (file)
 //! For example, these:
 //!
 //! ```
-//! // Hello {arg 0 (x)} is {arg 1 (0.01} with precision specified inline (5)}
+//! // Hello {arg 0 (x)} is {arg 1 (0.01) with precision specified inline (5)}
 //! println!("Hello {0} is {1:.5}", "x", 0.01);
 //!
-//! // Hello {arg 1 (x)} is {arg 2 (0.01} with precision specified in arg 0 (5)}
+//! // Hello {arg 1 (x)} is {arg 2 (0.01) with precision specified in arg 0 (5)}
 //! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
 //!
-//! // Hello {arg 0 (x)} is {arg 2 (0.01} with precision specified in arg 1 (5)}
+//! // Hello {arg 0 (x)} is {arg 2 (0.01) with precision specified in arg 1 (5)}
 //! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
 //!
-//! // Hello {next arg (x)} is {second of next two args (0.01} with precision
+//! // Hello {next arg (x)} is {second of next two args (0.01) with precision
 //! //                          specified in first of next two args (5)}
 //! println!("Hello {} is {:.*}",    "x", 5, 0.01);
 //!
-//! // Hello {next arg (x)} is {arg 2 (0.01} with precision
+//! // Hello {next arg (x)} is {arg 2 (0.01) with precision
 //! //                          specified in its predecessor (5)}
 //! println!("Hello {} is {2:.*}",   "x", 5, 0.01);
 //! ```
@@ -491,10 +491,6 @@ pub use core::fmt::{LowerExp, UpperExp};
 pub use core::fmt::Error;
 #[stable(feature = "rust1", since = "1.0.0")]
 pub use core::fmt::{ArgumentV1, Arguments, write};
-#[unstable(feature = "fmt_radix", issue = "27728")]
-#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")]
-#[allow(deprecated)]
-pub use core::fmt::{radix, Radix, RadixFmt};
 #[stable(feature = "rust1", since = "1.0.0")]
 pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
 
index 373fe7dc4c139be9baa143d6b7fd57054955f931..7540c51e236d226b18343352e9bce5649d6d6d1b 100644 (file)
        test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))]
 
 #![cfg_attr(test, allow(deprecated))] // rand
-#![cfg_attr(not(test), feature(copy_from_slice))] // impl [T]
 #![cfg_attr(not(stage0), deny(warnings))]
 
 #![feature(alloc)]
+#![feature(allow_internal_unstable)]
 #![feature(box_patterns)]
 #![feature(box_syntax)]
 #![feature(core_intrinsics)]
-#![feature(decode_utf16)]
 #![feature(dropck_parametricity)]
 #![feature(fmt_internals)]
-#![feature(fmt_radix)]
 #![feature(heap_api)]
-#![feature(iter_arith)]
+#![feature(inclusive_range)]
 #![feature(iter_arith)]
 #![feature(lang_items)]
 #![feature(nonzero)]
-#![feature(num_bits_bytes)]
 #![feature(pattern)]
 #![feature(placement_in)]
 #![feature(placement_new_protocol)]
 #![feature(shared)]
-#![feature(slice_bytes)]
 #![feature(slice_patterns)]
+#![feature(specialization)]
 #![feature(staged_api)]
 #![feature(step_by)]
 #![feature(str_char)]
@@ -70,13 +67,21 @@ extern crate std;
 #[cfg(test)]
 extern crate test;
 
+#[doc(no_inline)]
 pub use binary_heap::BinaryHeap;
+#[doc(no_inline)]
 pub use btree_map::BTreeMap;
+#[doc(no_inline)]
 pub use btree_set::BTreeSet;
+#[doc(no_inline)]
 pub use linked_list::LinkedList;
+#[doc(no_inline)]
 pub use enum_set::EnumSet;
+#[doc(no_inline)]
 pub use vec_deque::VecDeque;
+#[doc(no_inline)]
 pub use string::String;
+#[doc(no_inline)]
 pub use vec::Vec;
 
 // Needed for the vec! macro
index cb669a9bf9ef8863c5ba5a6d714e0bbf5aadbf3d..85a4fa82e2a34e82171d58833de5b90a42414e93 100644 (file)
@@ -1232,7 +1232,7 @@ mod tests {
         m.append(&mut n);
         check_links(&m);
         let mut sum = v;
-        sum.push_all(&u);
+        sum.extend_from_slice(&u);
         assert_eq!(sum.len(), m.len());
         for elt in sum {
             assert_eq!(m.pop_front(), Some(elt))
@@ -1305,10 +1305,10 @@ mod tests {
         //
         // https://github.com/rust-lang/rust/issues/26021
         let mut v1 = LinkedList::new();
-        v1.push_front(1u8);
-        v1.push_front(1u8);
-        v1.push_front(1u8);
-        v1.push_front(1u8);
+        v1.push_front(1);
+        v1.push_front(1);
+        v1.push_front(1);
+        v1.push_front(1);
         let _ = v1.split_off(3); // Dropping this now should not cause laundry consumption
         assert_eq!(v1.len(), 3);
 
@@ -1319,10 +1319,10 @@ mod tests {
     #[test]
     fn test_split_off() {
         let mut v1 = LinkedList::new();
-        v1.push_front(1u8);
-        v1.push_front(1u8);
-        v1.push_front(1u8);
-        v1.push_front(1u8);
+        v1.push_front(1);
+        v1.push_front(1);
+        v1.push_front(1);
+        v1.push_front(1);
 
         // test all splits
         for ix in 0..1 + v1.len() {
index 6a683e65c9eb302237d41743bc6f0a84737cbca7..d6a8362d58182597527fe1b14449f46c9ef8e86a 100644 (file)
 #[cfg(not(test))]
 #[macro_export]
 #[stable(feature = "rust1", since = "1.0.0")]
+#[allow_internal_unstable]
 macro_rules! vec {
     ($elem:expr; $n:expr) => (
         $crate::vec::from_elem($elem, $n)
     );
     ($($x:expr),*) => (
-        <[_]>::into_vec($crate::boxed::Box::new([$($x),*]))
+        <[_]>::into_vec(box [$($x),*])
     );
     ($($x:expr,)*) => (vec![$($x),*])
 }
@@ -61,7 +62,7 @@ macro_rules! vec {
         $crate::vec::from_elem($elem, $n)
     );
     ($($x:expr),*) => (
-        $crate::slice::into_vec($crate::boxed::Box::new([$($x),*]))
+        $crate::slice::into_vec(box [$($x),*])
     );
     ($($x:expr,)*) => (vec![$($x),*])
 }
index afcd779ddf19f92db8c6b575b2619905675893a3..4e39191b472ee8f6a85ec7ea67a21fbc0745c6d7 100644 (file)
@@ -35,6 +35,7 @@ pub trait RangeArgument<T> {
     }
 }
 
+// FIXME add inclusive ranges to RangeArgument
 
 impl<T> RangeArgument<T> for RangeFull {}
 
index 1446d00b9ea6b8bb29badae94b76dc368233d64e..db91d911c7355d0a68b07bb2b70ac073a406953c 100644 (file)
@@ -78,7 +78,7 @@
 //! * Further methods that return iterators are `.split()`, `.splitn()`,
 //!   `.chunks()`, `.windows()` and more.
 //!
-//! *[See also the slice primitive type](../primitive.slice.html).*
+//! *[See also the slice primitive type](../../std/primitive.slice.html).*
 #![stable(feature = "rust1", since = "1.0.0")]
 
 // Many of the usings in this module are only used in the test configuration.
@@ -104,9 +104,6 @@ pub use core::slice::{Iter, IterMut};
 pub use core::slice::{SplitMut, ChunksMut, Split};
 #[stable(feature = "rust1", since = "1.0.0")]
 pub use core::slice::{SplitN, RSplitN, SplitNMut, RSplitNMut};
-#[unstable(feature = "slice_bytes", issue = "27740")]
-#[allow(deprecated)]
-pub use core::slice::bytes;
 #[stable(feature = "rust1", since = "1.0.0")]
 pub use core::slice::{from_raw_parts, from_raw_parts_mut};
 
@@ -848,14 +845,13 @@ impl<T> [T] {
     /// # Example
     ///
     /// ```rust
-    /// #![feature(copy_from_slice)]
     /// let mut dst = [0, 0, 0];
     /// let src = [1, 2, 3];
     ///
     /// dst.copy_from_slice(&src);
     /// assert_eq!(src, dst);
     /// ```
-    #[unstable(feature = "copy_from_slice", issue = "31755")]
+    #[stable(feature = "copy_from_slice", since = "1.9.0")]
     pub fn copy_from_slice(&mut self, src: &[T]) where T: Copy {
         core_slice::SliceExt::copy_from_slice(self, src)
     }
index a30ec452e3c15e3c7dfbfcca8c7c1c6e527d8e0f..b2b1e019a1b899f1887c2e436ac87993b177154d 100644 (file)
@@ -8,9 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Unicode string slices
+//! Unicode string slices.
 //!
-//! *[See also the `str` primitive type](../primitive.str.html).*
+//! *[See also the `str` primitive type](../../std/primitive.str.html).*
 
 
 #![stable(feature = "rust1", since = "1.0.0")]
@@ -228,8 +228,6 @@ impl str {
     /// # Examples
     ///
     /// ```
-    /// #![feature(str_char)]
-    ///
     /// let s = "Löwe 老虎 Léopard";
     /// assert!(s.is_char_boundary(0));
     /// // start of `老`
@@ -242,12 +240,7 @@ impl str {
     /// // third byte of `老`
     /// assert!(!s.is_char_boundary(8));
     /// ```
-    #[unstable(feature = "str_char",
-               reason = "it is unclear whether this method pulls its weight \
-                         with the existence of the char_indices iterator or \
-                         this method may want to be replaced with checked \
-                         slicing",
-               issue = "27754")]
+    #[stable(feature = "is_char_boundary", since = "1.9.0")]
     #[inline]
     pub fn is_char_boundary(&self, index: usize) -> bool {
         core_str::StrExt::is_char_boundary(self, index)
@@ -374,6 +367,7 @@ impl str {
     ///
     /// ```
     /// #![feature(str_char)]
+    /// #![allow(deprecated)]
     ///
     /// use std::str::CharRange;
     ///
@@ -408,6 +402,9 @@ impl str {
                          removed altogether",
                issue = "27754")]
     #[inline]
+    #[rustc_deprecated(reason = "use slicing plus chars() plus len_utf8",
+                       since = "1.9.0")]
+    #[allow(deprecated)]
     pub fn char_range_at(&self, start: usize) -> CharRange {
         core_str::StrExt::char_range_at(self, start)
     }
@@ -432,6 +429,7 @@ impl str {
     ///
     /// ```
     /// #![feature(str_char)]
+    /// #![allow(deprecated)]
     ///
     /// use std::str::CharRange;
     ///
@@ -466,6 +464,9 @@ impl str {
                          eventually removed altogether",
                issue = "27754")]
     #[inline]
+    #[rustc_deprecated(reason = "use slicing plus chars().rev() plus len_utf8",
+                       since = "1.9.0")]
+    #[allow(deprecated)]
     pub fn char_range_at_reverse(&self, start: usize) -> CharRange {
         core_str::StrExt::char_range_at_reverse(self, start)
     }
@@ -481,6 +482,7 @@ impl str {
     ///
     /// ```
     /// #![feature(str_char)]
+    /// #![allow(deprecated)]
     ///
     /// let s = "abπc";
     /// assert_eq!(s.char_at(1), 'b');
@@ -495,6 +497,9 @@ impl str {
                          subslice",
                issue = "27754")]
     #[inline]
+    #[allow(deprecated)]
+    #[rustc_deprecated(reason = "use slicing plus chars()",
+                       since = "1.9.0")]
     pub fn char_at(&self, i: usize) -> char {
         core_str::StrExt::char_at(self, i)
     }
@@ -511,6 +516,7 @@ impl str {
     ///
     /// ```
     /// #![feature(str_char)]
+    /// #![allow(deprecated)]
     ///
     /// let s = "abπc";
     /// assert_eq!(s.char_at_reverse(1), 'a');
@@ -523,6 +529,9 @@ impl str {
                          cases generate panics",
                issue = "27754")]
     #[inline]
+    #[rustc_deprecated(reason = "use slicing plus chars().rev()",
+                       since = "1.9.0")]
+    #[allow(deprecated)]
     pub fn char_at_reverse(&self, i: usize) -> char {
         core_str::StrExt::char_at_reverse(self, i)
     }
@@ -541,6 +550,7 @@ impl str {
     ///
     /// ```
     /// #![feature(str_char)]
+    /// #![allow(deprecated)]
     ///
     /// let s = "Łódź"; // \u{141}o\u{301}dz\u{301}
     /// let (c, s1) = s.slice_shift_char().unwrap();
@@ -559,6 +569,9 @@ impl str {
                          and/or char_indices iterators",
                issue = "27754")]
     #[inline]
+    #[rustc_deprecated(reason = "use chars() plus Chars::as_str",
+                       since = "1.9.0")]
+    #[allow(deprecated)]
     pub fn slice_shift_char(&self) -> Option<(char, &str)> {
         core_str::StrExt::slice_shift_char(self)
     }
index 62ae7938e15e389b3133d5b20b5e09ce05c5768f..2226116585fcb072550d4fa6ce90faf43d8a5180 100644 (file)
@@ -59,15 +59,13 @@ use core::fmt;
 use core::hash;
 use core::iter::FromIterator;
 use core::mem;
-use core::ops::{self, Add};
+use core::ops::{self, Add, Index, IndexMut};
 use core::ptr;
-use core::slice;
 use core::str::pattern::Pattern;
 use rustc_unicode::char::{decode_utf16, REPLACEMENT_CHARACTER};
 use rustc_unicode::str as unicode_str;
 
-#[allow(deprecated)]
-use borrow::{Cow, IntoCow};
+use borrow::{Cow, ToOwned};
 use range::RangeArgument;
 use str::{self, FromStr, Utf8Error, Chars};
 use vec::Vec;
@@ -79,7 +77,7 @@ use boxed::Box;
 /// contents of the string. It has a close relationship with its borrowed
 /// counterpart, the primitive [`str`].
 ///
-/// [`str`]: ../primitive.str.html
+/// [`str`]: ../../std/primitive.str.html
 ///
 /// # Examples
 ///
@@ -99,7 +97,7 @@ use boxed::Box;
 /// hello.push_str("orld!");
 /// ```
 ///
-/// [`char`]: ../primitive.char.html
+/// [`char`]: ../../std/primitive.char.html
 /// [`push()`]: #method.push
 /// [`push_str()`]: #method.push_str
 ///
@@ -131,7 +129,7 @@ use boxed::Box;
 /// println!("The first letter of s is {}", s[0]); // ERROR!!!
 /// ```
 ///
-/// [`OsString`]: ../ffi/struct.OsString.html
+/// [`OsString`]: ../../std/ffi/struct.OsString.html
 ///
 /// Indexing is intended to be a constant-time operation, but UTF-8 encoding
 /// does not allow us to do this. Furtheremore, it's not clear what sort of
@@ -156,8 +154,8 @@ use boxed::Box;
 /// takes_str(&s);
 /// ```
 ///
-/// [`&str`]: ../primitive.str.html
-/// [`Deref`]: ../ops/trait.Deref.html
+/// [`&str`]: ../../std/primitive.str.html
+/// [`Deref`]: ../../std/ops/trait.Deref.html
 ///
 /// This will create a [`&str`] from the `String` and pass it in. This
 /// conversion is very inexpensive, and so generally, functions will accept
@@ -280,10 +278,10 @@ pub struct String {
 /// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error`
 /// through the [`utf8_error()`] method.
 ///
-/// [`Utf8Error`]: ../str/struct.Utf8Error.html
-/// [`std::str`]: ../str/index.html
-/// [`u8`]: ../primitive.u8.html
-/// [`&str`]: ../primitive.str.html
+/// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html
+/// [`std::str`]: ../../std/str/index.html
+/// [`u8`]: ../../std/primitive.u8.html
+/// [`&str`]: ../../std/primitive.str.html
 /// [`utf8_error()`]: #method.utf8_error
 ///
 /// # Examples
@@ -414,9 +412,9 @@ impl String {
     /// requires that it is valid UTF-8. `from_utf8()` checks to ensure that
     /// the bytes are valid UTF-8, and then does the conversion.
     ///
-    /// [`&str`]: ../primitive.str.html
-    /// [`u8`]: ../primitive.u8.html
-    /// [`Vec<u8>`]: ../vec/struct.Vec.html
+    /// [`&str`]: ../../std/primitive.str.html
+    /// [`u8`]: ../../std/primitive.u8.html
+    /// [`Vec<u8>`]: ../../std/vec/struct.Vec.html
     ///
     /// If you are sure that the byte slice is valid UTF-8, and you don't want
     /// to incur the overhead of the validity check, there is an unsafe version
@@ -431,7 +429,7 @@ impl String {
     /// If you need a `&str` instead of a `String`, consider
     /// [`str::from_utf8()`].
     ///
-    /// [`str::from_utf8()`]: ../str/fn.from_utf8.html
+    /// [`str::from_utf8()`]: ../../std/str/fn.from_utf8.html
     ///
     /// # Errors
     ///
@@ -488,8 +486,8 @@ impl String {
     /// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with
     /// `U+FFFD REPLACEMENT CHARACTER`, which looks like this: �
     ///
-    /// [`u8`]: ../primitive.u8.html
-    /// [byteslice]: ../primitive.slice.html
+    /// [`u8`]: ../../std/primitive.u8.html
+    /// [byteslice]: ../../std/primitive.slice.html
     ///
     /// If you are sure that the byte slice is valid UTF-8, and you don't want
     /// to incur the overhead of the conversion, there is an unsafe version
@@ -504,7 +502,7 @@ impl String {
     /// it's already valid UTF-8, we don't need a new allocation. This return
     /// type allows us to handle both cases.
     ///
-    /// [`Cow<'a, str>`]: ../borrow/enum.Cow.html
+    /// [`Cow<'a, str>`]: ../../std/borrow/enum.Cow.html
     ///
     /// # Examples
     ///
@@ -971,22 +969,7 @@ impl String {
     pub fn push(&mut self, ch: char) {
         match ch.len_utf8() {
             1 => self.vec.push(ch as u8),
-            ch_len => {
-                let cur_len = self.len();
-                // This may use up to 4 bytes.
-                self.vec.reserve(ch_len);
-
-                unsafe {
-                    // Attempt to not use an intermediate buffer by just pushing bytes
-                    // directly onto this string.
-                    let slice = slice::from_raw_parts_mut(self.vec
-                                                              .as_mut_ptr()
-                                                              .offset(cur_len as isize),
-                                                          ch_len);
-                    let used = ch.encode_utf8(slice).unwrap_or(0);
-                    self.vec.set_len(cur_len + used);
-                }
-            }
+            _ => self.vec.extend_from_slice(ch.encode_utf8().as_slice()),
         }
     }
 
@@ -1014,7 +997,7 @@ impl String {
     /// Panics if `new_len` > current length, or if `new_len` does not lie on a
     /// [`char`] boundary.
     ///
-    /// [`char`]: ../primitive.char.html
+    /// [`char`]: ../../std/primitive.char.html
     ///
     /// # Examples
     ///
@@ -1054,14 +1037,13 @@ impl String {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn pop(&mut self) -> Option<char> {
-        let len = self.len();
-        if len == 0 {
-            return None;
-        }
-
-        let ch = self.char_at_reverse(len);
+        let ch = match self.chars().rev().next() {
+            Some(ch) => ch,
+            None => return None,
+        };
+        let newlen = self.len() - ch.len_utf8();
         unsafe {
-            self.vec.set_len(len - ch.len_utf8());
+            self.vec.set_len(newlen);
         }
         Some(ch)
     }
@@ -1076,7 +1058,7 @@ impl String {
     /// Panics if `idx` is larger than or equal to the `String`'s length,
     /// or if it does not lie on a [`char`] boundary.
     ///
-    /// [`char`]: ../primitive.char.html
+    /// [`char`]: ../../std/primitive.char.html
     ///
     /// # Examples
     ///
@@ -1092,11 +1074,13 @@ impl String {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn remove(&mut self, idx: usize) -> char {
-        let len = self.len();
-        assert!(idx < len);
+        let ch = match self[idx..].chars().next() {
+            Some(ch) => ch,
+            None => panic!("cannot remove a char from the end of a string"),
+        };
 
-        let ch = self.char_at(idx);
         let next = idx + ch.len_utf8();
+        let len = self.len();
         unsafe {
             ptr::copy(self.vec.as_ptr().offset(next as isize),
                       self.vec.as_mut_ptr().offset(idx as isize),
@@ -1116,7 +1100,7 @@ impl String {
     /// Panics if `idx` is larger than the `String`'s length, or if it does not
     /// lie on a [`char`] boundary.
     ///
-    /// [`char`]: ../primitive.char.html
+    /// [`char`]: ../../std/primitive.char.html
     ///
     /// # Examples
     ///
@@ -1137,9 +1121,10 @@ impl String {
         let len = self.len();
         assert!(idx <= len);
         assert!(self.is_char_boundary(idx));
-        self.vec.reserve(4);
-        let mut bits = [0; 4];
-        let amt = ch.encode_utf8(&mut bits).unwrap();
+        let bits = ch.encode_utf8();
+        let bits = bits.as_slice();
+        let amt = bits.len();
+        self.vec.reserve(amt);
 
         unsafe {
             ptr::copy(self.vec.as_ptr().offset(idx as isize),
@@ -1255,7 +1240,7 @@ impl String {
     /// Panics if the starting point or end point do not lie on a [`char`]
     /// boundary, or if they're out of bounds.
     ///
-    /// [`char`]: ../primitive.char.html
+    /// [`char`]: ../../std/primitive.char.html
     ///
     /// # Examples
     ///
@@ -1353,10 +1338,10 @@ impl FromUtf8Error {
     /// an analogue to `FromUtf8Error`. See its documentation for more details
     /// on using it.
     ///
-    /// [`Utf8Error`]: ../str/struct.Utf8Error.html
-    /// [`std::str`]: ../str/index.html
-    /// [`u8`]: ../primitive.u8.html
-    /// [`&str`]: ../primitive.str.html
+    /// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html
+    /// [`std::str`]: ../../std/str/index.html
+    /// [`u8`]: ../../std/primitive.u8.html
+    /// [`&str`]: ../../std/primitive.str.html
     ///
     /// # Examples
     ///
@@ -1404,35 +1389,35 @@ impl Clone for String {
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl FromIterator<char> for String {
-    fn from_iter<I: IntoIterator<Item = char>>(iterable: I) -> String {
+    fn from_iter<I: IntoIterator<Item = char>>(iter: I) -> String {
         let mut buf = String::new();
-        buf.extend(iterable);
+        buf.extend(iter);
         buf
     }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a> FromIterator<&'a str> for String {
-    fn from_iter<I: IntoIterator<Item = &'a str>>(iterable: I) -> String {
+    fn from_iter<I: IntoIterator<Item = &'a str>>(iter: I) -> String {
         let mut buf = String::new();
-        buf.extend(iterable);
+        buf.extend(iter);
         buf
     }
 }
 
 #[stable(feature = "extend_string", since = "1.4.0")]
 impl FromIterator<String> for String {
-    fn from_iter<I: IntoIterator<Item = String>>(iterable: I) -> String {
+    fn from_iter<I: IntoIterator<Item = String>>(iter: I) -> String {
         let mut buf = String::new();
-        buf.extend(iterable);
+        buf.extend(iter);
         buf
     }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl Extend<char> for String {
-    fn extend<I: IntoIterator<Item = char>>(&mut self, iterable: I) {
-        let iterator = iterable.into_iter();
+    fn extend<I: IntoIterator<Item = char>>(&mut self, iter: I) {
+        let iterator = iter.into_iter();
         let (lower_bound, _) = iterator.size_hint();
         self.reserve(lower_bound);
         for ch in iterator {
@@ -1443,15 +1428,15 @@ impl Extend<char> for String {
 
 #[stable(feature = "extend_ref", since = "1.2.0")]
 impl<'a> Extend<&'a char> for String {
-    fn extend<I: IntoIterator<Item = &'a char>>(&mut self, iterable: I) {
-        self.extend(iterable.into_iter().cloned());
+    fn extend<I: IntoIterator<Item = &'a char>>(&mut self, iter: I) {
+        self.extend(iter.into_iter().cloned());
     }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a> Extend<&'a str> for String {
-    fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iterable: I) {
-        for s in iterable {
+    fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iter: I) {
+        for s in iter {
             self.push_str(s)
         }
     }
@@ -1459,8 +1444,8 @@ impl<'a> Extend<&'a str> for String {
 
 #[stable(feature = "extend_string", since = "1.4.0")]
 impl Extend<String> for String {
-    fn extend<I: IntoIterator<Item = String>>(&mut self, iterable: I) {
-        for s in iterable {
+    fn extend<I: IntoIterator<Item = String>>(&mut self, iter: I) {
+        for s in iter {
             self.push_str(&s)
         }
     }
@@ -1606,6 +1591,24 @@ impl ops::Index<ops::RangeFull> for String {
         unsafe { str::from_utf8_unchecked(&self.vec) }
     }
 }
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl ops::Index<ops::RangeInclusive<usize>> for String {
+    type Output = str;
+
+    #[inline]
+    fn index(&self, index: ops::RangeInclusive<usize>) -> &str {
+        Index::index(&**self, index)
+    }
+}
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl ops::Index<ops::RangeToInclusive<usize>> for String {
+    type Output = str;
+
+    #[inline]
+    fn index(&self, index: ops::RangeToInclusive<usize>) -> &str {
+        Index::index(&**self, index)
+    }
+}
 
 #[stable(feature = "derefmut_for_string", since = "1.2.0")]
 impl ops::IndexMut<ops::Range<usize>> for String {
@@ -1635,6 +1638,20 @@ impl ops::IndexMut<ops::RangeFull> for String {
         unsafe { mem::transmute(&mut *self.vec) }
     }
 }
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl ops::IndexMut<ops::RangeInclusive<usize>> for String {
+    #[inline]
+    fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str {
+        IndexMut::index_mut(&mut **self, index)
+    }
+}
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl ops::IndexMut<ops::RangeToInclusive<usize>> for String {
+    #[inline]
+    fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str {
+        IndexMut::index_mut(&mut **self, index)
+    }
+}
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl ops::Deref for String {
@@ -1663,9 +1680,9 @@ impl ops::DerefMut for String {
 /// [`String`] without error, this type will never actually be returned. As
 /// such, it is only here to satisfy said signature, and is useless otherwise.
 ///
-/// [`FromStr`]: ../str/trait.FromStr.html
+/// [`FromStr`]: ../../std/str/trait.FromStr.html
 /// [`String`]: struct.String.html
-/// [`from_str()`]: ../str/trait.FromStr.html#tymethod.from_str
+/// [`from_str()`]: ../../std/str/trait.FromStr.html#tymethod.from_str
 #[stable(feature = "str_parse_error", since = "1.5.0")]
 #[derive(Copy)]
 pub enum ParseError {}
@@ -1717,7 +1734,7 @@ impl Eq for ParseError {}
 /// [`Display`] should be implemented instead, and you get the `ToString`
 /// implementation for free.
 ///
-/// [`Display`]: ../fmt/trait.Display.html
+/// [`Display`]: ../../std/fmt/trait.Display.html
 #[stable(feature = "rust1", since = "1.0.0")]
 pub trait ToString {
     /// Converts the given value to a `String`.
@@ -1739,7 +1756,7 @@ pub trait ToString {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: fmt::Display + ?Sized> ToString for T {
     #[inline]
-    fn to_string(&self) -> String {
+    default fn to_string(&self) -> String {
         use core::fmt::Write;
         let mut buf = String::new();
         let _ = buf.write_fmt(format_args!("{}", self));
@@ -1748,6 +1765,14 @@ impl<T: fmt::Display + ?Sized> ToString for T {
     }
 }
 
+#[stable(feature = "str_to_string_specialization", since = "1.9.0")]
+impl ToString for str {
+    #[inline]
+    fn to_string(&self) -> String {
+        String::from(self)
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl AsRef<str> for String {
     #[inline]
@@ -1766,20 +1791,8 @@ impl AsRef<[u8]> for String {
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a> From<&'a str> for String {
-    #[cfg(not(test))]
-    #[inline]
     fn from(s: &'a str) -> String {
-        String { vec: <[_]>::to_vec(s.as_bytes()) }
-    }
-
-    // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
-    // required for this method definition, is not available. Since we don't
-    // require this method for testing purposes, I'll just stub it
-    // NB see the slice::hack module in slice.rs for more information
-    #[inline]
-    #[cfg(test)]
-    fn from(_: &str) -> String {
-        panic!("not available with cfg(test)");
+        s.to_owned()
     }
 }
 
@@ -1806,26 +1819,6 @@ impl Into<Vec<u8>> for String {
     }
 }
 
-#[unstable(feature = "into_cow", reason = "may be replaced by `convert::Into`",
-           issue= "27735")]
-#[allow(deprecated)]
-impl IntoCow<'static, str> for String {
-    #[inline]
-    fn into_cow(self) -> Cow<'static, str> {
-        Cow::Owned(self)
-    }
-}
-
-#[unstable(feature = "into_cow", reason = "may be replaced by `convert::Into`",
-           issue = "27735")]
-#[allow(deprecated)]
-impl<'a> IntoCow<'a, str> for &'a str {
-    #[inline]
-    fn into_cow(self) -> Cow<'a, str> {
-        Cow::Borrowed(self)
-    }
-}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 impl fmt::Write for String {
     #[inline]
index ae442e155c0d07b923eca9bbc5d008374ac3b1f4..dde5cbb508e1bab661e1337371c9d9b8d00e8dd9 100644 (file)
@@ -63,10 +63,11 @@ use alloc::boxed::Box;
 use alloc::heap::EMPTY;
 use alloc::raw_vec::RawVec;
 use borrow::ToOwned;
+use borrow::Cow;
 use core::cmp::Ordering;
 use core::fmt;
 use core::hash::{self, Hash};
-use core::intrinsics::{arith_offset, assume, needs_drop};
+use core::intrinsics::{arith_offset, assume};
 use core::iter::FromIterator;
 use core::mem;
 use core::ops::{Index, IndexMut};
@@ -74,9 +75,6 @@ use core::ops;
 use core::ptr;
 use core::slice;
 
-#[allow(deprecated)]
-use borrow::{Cow, IntoCow};
-
 use super::range::RangeArgument;
 
 /// A contiguous growable array type, written `Vec<T>` but pronounced 'vector.'
@@ -497,10 +495,11 @@ impl<T> Vec<T> {
         unsafe {
             // drop any extra elements
             while len < self.len {
-                // decrement len before the read(), so a panic on Drop doesn't
-                // re-drop the just-failed value.
+                // decrement len before the drop_in_place(), so a panic on Drop
+                // doesn't re-drop the just-failed value.
                 self.len -= 1;
-                ptr::read(self.get_unchecked(self.len));
+                let len = self.len;
+                ptr::drop_in_place(self.get_unchecked_mut(len));
             }
         }
     }
@@ -966,17 +965,6 @@ impl<T: Clone> Vec<T> {
         }
     }
 
-    #[allow(missing_docs)]
-    #[inline]
-    #[unstable(feature = "vec_push_all",
-               reason = "likely to be replaced by a more optimized extend",
-               issue = "27744")]
-    #[rustc_deprecated(reason = "renamed to extend_from_slice",
-                       since = "1.6.0")]
-    pub fn push_all(&mut self, other: &[T]) {
-        self.extend_from_slice(other)
-    }
-
     /// Appends all elements in a slice to the `Vec`.
     ///
     /// Iterates over the slice `other`, clones each element, and then appends
@@ -1225,6 +1213,24 @@ impl<T> ops::Index<ops::RangeFull> for Vec<T> {
         self
     }
 }
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<T> ops::Index<ops::RangeInclusive<usize>> for Vec<T> {
+    type Output = [T];
+
+    #[inline]
+    fn index(&self, index: ops::RangeInclusive<usize>) -> &[T] {
+        Index::index(&**self, index)
+    }
+}
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<T> ops::Index<ops::RangeToInclusive<usize>> for Vec<T> {
+    type Output = [T];
+
+    #[inline]
+    fn index(&self, index: ops::RangeToInclusive<usize>) -> &[T] {
+        Index::index(&**self, index)
+    }
+}
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::IndexMut<ops::Range<usize>> for Vec<T> {
@@ -1254,6 +1260,20 @@ impl<T> ops::IndexMut<ops::RangeFull> for Vec<T> {
         self
     }
 }
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<T> ops::IndexMut<ops::RangeInclusive<usize>> for Vec<T> {
+    #[inline]
+    fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut [T] {
+        IndexMut::index_mut(&mut **self, index)
+    }
+}
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<T> ops::IndexMut<ops::RangeToInclusive<usize>> for Vec<T> {
+    #[inline]
+    fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut [T] {
+        IndexMut::index_mut(&mut **self, index)
+    }
+}
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::Deref for Vec<T> {
@@ -1282,13 +1302,13 @@ impl<T> ops::DerefMut for Vec<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> FromIterator<T> for Vec<T> {
     #[inline]
-    fn from_iter<I: IntoIterator<Item = T>>(iterable: I) -> Vec<T> {
+    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
         // Unroll the first iteration, as the vector is going to be
         // expanded on this iteration in every case when the iterable is not
         // empty, but the loop in extend_desugared() is not going to see the
         // vector being full in the few subsequent loop iterations.
         // So we get better branch prediction.
-        let mut iterator = iterable.into_iter();
+        let mut iterator = iter.into_iter();
         let mut vector = match iterator.next() {
             None => return Vec::new(),
             Some(element) => {
@@ -1369,8 +1389,8 @@ impl<'a, T> IntoIterator for &'a mut Vec<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> Extend<T> for Vec<T> {
     #[inline]
-    fn extend<I: IntoIterator<Item = T>>(&mut self, iterable: I) {
-        self.extend_desugared(iterable.into_iter())
+    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+        self.extend_desugared(iter.into_iter())
     }
 }
 
@@ -1471,13 +1491,8 @@ impl<T> Drop for Vec<T> {
     fn drop(&mut self) {
         if self.buf.unsafe_no_drop_flag_needs_drop() {
             unsafe {
-                // The branch on needs_drop() is an -O1 performance optimization.
-                // Without the branch, dropping Vec<u8> takes linear time.
-                if needs_drop::<T>() {
-                    for x in self.iter_mut() {
-                        ptr::drop_in_place(x);
-                    }
-                }
+                // use drop for [T]
+                ptr::drop_in_place(&mut self[..]);
             }
         }
         // RawVec handles deallocation
@@ -1570,22 +1585,6 @@ impl<'a, T> FromIterator<T> for Cow<'a, [T]> where T: Clone {
     }
 }
 
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow(deprecated)]
-impl<'a, T: 'a> IntoCow<'a, [T]> for Vec<T> where T: Clone {
-    fn into_cow(self) -> Cow<'a, [T]> {
-        Cow::Owned(self)
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow(deprecated)]
-impl<'a, T> IntoCow<'a, [T]> for &'a [T] where T: Clone {
-    fn into_cow(self) -> Cow<'a, [T]> {
-        Cow::Borrowed(self)
-    }
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // Iterators
 ////////////////////////////////////////////////////////////////////////////////
index f34fe2da7e84b21541fab615fb79b96a25ba1d71..9e2b25d178fb930619ee4ecb867ec32a93542394 100644 (file)
@@ -70,7 +70,12 @@ impl<T: Clone> Clone for VecDeque<T> {
 impl<T> Drop for VecDeque<T> {
     #[unsafe_destructor_blind_to_params]
     fn drop(&mut self) {
-        self.clear();
+        let (front, back) = self.as_mut_slices();
+        unsafe {
+            // use drop for [T]
+            ptr::drop_in_place(front);
+            ptr::drop_in_place(back);
+        }
         // RawVec handles deallocation
     }
 }
@@ -2052,8 +2057,8 @@ impl<A> IndexMut<usize> for VecDeque<A> {
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<A> FromIterator<A> for VecDeque<A> {
-    fn from_iter<T: IntoIterator<Item = A>>(iterable: T) -> VecDeque<A> {
-        let iterator = iterable.into_iter();
+    fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> VecDeque<A> {
+        let iterator = iter.into_iter();
         let (lower, _) = iterator.size_hint();
         let mut deq = VecDeque::with_capacity(lower);
         deq.extend(iterator);
index 7f368f0205b922f1b45c56e3a2e627f3cbacd77b..619bc189e6c09855d4dd24a7099d889f56a0acb9 100644 (file)
@@ -114,6 +114,21 @@ fn test_iter_rev() {
     test(size, map.into_iter().rev());
 }
 
+#[test]
+fn test_values_mut() {
+    let mut a = BTreeMap::new();
+    a.insert(1, String::from("hello"));
+    a.insert(2, String::from("goodbye"));
+
+    for value in a.values_mut() {
+        value.push_str("!");
+    }
+
+    let values: Vec<String> = a.values().cloned().collect();
+    assert_eq!(values, [String::from("hello!"),
+                        String::from("goodbye!")]);
+}
+
 #[test]
 fn test_iter_mixed() {
     let size = 10000;
@@ -395,6 +410,42 @@ fn test_variance() {
     fn vals<'a, 'new>(v: Values<'a, (), &'static str>) -> Values<'a, (), &'new str> { v }
 }
 
+#[test]
+fn test_occupied_entry_key() {
+    let mut a = BTreeMap::new();
+    let key = "hello there";
+    let value = "value goes here";
+    assert!(a.is_empty());
+    a.insert(key.clone(), value.clone());
+    assert_eq!(a.len(), 1);
+    assert_eq!(a[key], value);
+
+    match a.entry(key.clone()) {
+        Vacant(_) => panic!(),
+        Occupied(e) => assert_eq!(key, *e.key()),
+    }
+    assert_eq!(a.len(), 1);
+    assert_eq!(a[key], value);
+}
+
+#[test]
+fn test_vacant_entry_key() {
+    let mut a = BTreeMap::new();
+    let key = "hello there";
+    let value = "value goes here";
+
+    assert!(a.is_empty());
+    match a.entry(key.clone()) {
+        Occupied(_) => panic!(),
+        Vacant(e) => {
+            assert_eq!(key, *e.key());
+            e.insert(value.clone());
+        },
+    }
+    assert_eq!(a.len(), 1);
+    assert_eq!(a[key], value);
+}
+
 mod bench {
     use std::collections::BTreeMap;
     use std::__rand::{Rng, thread_rng};
index eed530d8b61cd607d3a5745f843a6c49b40ff43c..211942f2294ecd165246ecb95e43dd7782cdbaca 100644 (file)
 
 #![deny(warnings)]
 
-#![feature(ascii)]
 #![feature(binary_heap_extras)]
 #![feature(box_syntax)]
 #![feature(btree_range)]
 #![feature(collections)]
 #![feature(collections_bound)]
-#![feature(copy_from_slice)]
 #![feature(const_fn)]
 #![feature(fn_traits)]
 #![feature(enumset)]
 #![feature(iter_arith)]
+#![feature(map_entry_keys)]
+#![feature(map_values_mut)]
 #![feature(pattern)]
 #![feature(rand)]
-#![feature(rustc_private)]
-#![feature(set_recovery)]
-#![feature(slice_bytes)]
 #![feature(step_by)]
 #![feature(str_char)]
 #![feature(str_escape)]
@@ -33,8 +30,6 @@
 #![feature(unboxed_closures)]
 #![feature(unicode)]
 
-#[macro_use] extern crate log;
-
 extern crate collections;
 extern crate test;
 extern crate rustc_unicode;
index f3bb58ba45b7effd4d3331634a180702bd0ff3fb..236c151891d11880d0d2da424ac29bc8247c7531 100644 (file)
@@ -267,9 +267,9 @@ fn test_swap_remove_fail() {
 fn test_swap_remove_noncopyable() {
     // Tests that we don't accidentally run destructors twice.
     let mut v: Vec<Box<_>> = Vec::new();
-    v.push(box 0u8);
-    v.push(box 0u8);
-    v.push(box 0u8);
+    v.push(box 0);
+    v.push(box 0);
+    v.push(box 0);
     let mut _e = v.swap_remove(0);
     assert_eq!(v.len(), 2);
     _e = v.swap_remove(1);
@@ -574,18 +574,48 @@ fn test_slice_2() {
     assert_eq!(v[1], 3);
 }
 
+macro_rules! assert_order {
+    (Greater, $a:expr, $b:expr) => {
+        assert_eq!($a.cmp($b), Greater);
+        assert!($a > $b);
+    };
+    (Less, $a:expr, $b:expr) => {
+        assert_eq!($a.cmp($b), Less);
+        assert!($a < $b);
+    };
+    (Equal, $a:expr, $b:expr) => {
+        assert_eq!($a.cmp($b), Equal);
+        assert_eq!($a, $b);
+    }
+}
+
+#[test]
+fn test_total_ord_u8() {
+    let c = &[1u8, 2, 3];
+    assert_order!(Greater, &[1u8, 2, 3, 4][..], &c[..]);
+    let c = &[1u8, 2, 3, 4];
+    assert_order!(Less, &[1u8, 2, 3][..], &c[..]);
+    let c = &[1u8, 2, 3, 6];
+    assert_order!(Equal, &[1u8, 2, 3, 6][..], &c[..]);
+    let c = &[1u8, 2, 3, 4, 5, 6];
+    assert_order!(Less, &[1u8, 2, 3, 4, 5, 5, 5, 5][..], &c[..]);
+    let c = &[1u8, 2, 3, 4];
+    assert_order!(Greater, &[2u8, 2][..], &c[..]);
+}
+
+
 #[test]
-fn test_total_ord() {
+fn test_total_ord_i32() {
     let c = &[1, 2, 3];
-    [1, 2, 3, 4][..].cmp(c) == Greater;
+    assert_order!(Greater, &[1, 2, 3, 4][..], &c[..]);
     let c = &[1, 2, 3, 4];
-    [1, 2, 3][..].cmp(c) == Less;
+    assert_order!(Less, &[1, 2, 3][..], &c[..]);
     let c = &[1, 2, 3, 6];
-    [1, 2, 3, 4][..].cmp(c) == Equal;
+    assert_order!(Equal, &[1, 2, 3, 6][..], &c[..]);
     let c = &[1, 2, 3, 4, 5, 6];
-    [1, 2, 3, 4, 5, 5, 5, 5][..].cmp(c) == Less;
+    assert_order!(Less, &[1, 2, 3, 4, 5, 5, 5, 5][..], &c[..]);
     let c = &[1, 2, 3, 4];
-    [2, 2][..].cmp(c) == Greater;
+    assert_order!(Greater, &[2, 2][..], &c[..]);
 }
 
 #[test]
@@ -865,18 +895,6 @@ fn test_vec_default() {
     t!(Vec<i32>);
 }
 
-#[test]
-#[allow(deprecated)]
-fn test_bytes_set_memory() {
-    use std::slice::bytes::MutableByteVector;
-
-    let mut values = [1,2,3,4,5];
-    values[0..5].set_memory(0xAB);
-    assert!(values == [0xAB, 0xAB, 0xAB, 0xAB, 0xAB]);
-    values[2..4].set_memory(0xFF);
-    assert!(values == [0xAB, 0xAB, 0xFF, 0xFF, 0xAB]);
-}
-
 #[test]
 #[should_panic]
 fn test_overflow_does_not_cause_segfault() {
@@ -896,7 +914,7 @@ fn test_overflow_does_not_cause_segfault_managed() {
 
 #[test]
 fn test_mut_split_at() {
-    let mut values = [1u8,2,3,4,5];
+    let mut values = [1,2,3,4,5];
     {
         let (left, right) = values.split_at_mut(2);
         {
index 25457043a9df45dd5a5274ae83e9c184b351a240..929ac7a52ab2786a3a0945a7237c37db9c7767d5 100644 (file)
@@ -346,6 +346,26 @@ fn test_slice_fail() {
     &"中华Việt Nam"[0..2];
 }
 
+const LOREM_PARAGRAPH: &'static str = "\
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \
+ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \
+eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \
+sem ut lacinia. Fusce varius tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec \
+tempus vel, gravida nec quam.";
+
+// check the panic includes the prefix of the sliced string
+#[test]
+#[should_panic(expected="Lorem ipsum dolor sit amet")]
+fn test_slice_fail_truncated_1() {
+    &LOREM_PARAGRAPH[..1024];
+}
+// check the truncation in the panic message
+#[test]
+#[should_panic(expected="luctus, im`[...] do not lie on character boundary")]
+fn test_slice_fail_truncated_2() {
+    &LOREM_PARAGRAPH[..1024];
+}
+
 #[test]
 fn test_slice_from() {
     assert_eq!(&"abcd"[0..], "abcd");
@@ -444,12 +464,14 @@ fn test_is_whitespace() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_slice_shift_char() {
     let data = "ประเทศไทย中";
     assert_eq!(data.slice_shift_char(), Some(('ป', "ระเทศไทย中")));
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_slice_shift_char_2() {
     let empty = "";
     assert_eq!(empty.slice_shift_char(), None);
@@ -606,8 +628,6 @@ fn vec_str_conversions() {
     while i < n1 {
         let a: u8 = s1.as_bytes()[i];
         let b: u8 = s2.as_bytes()[i];
-        debug!("{}", a);
-        debug!("{}", b);
         assert_eq!(a, b);
         i += 1;
     }
@@ -639,6 +659,7 @@ fn test_contains_char() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_char_at() {
     let s = "ศไทย中华Việt Nam";
     let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
@@ -650,6 +671,7 @@ fn test_char_at() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_char_at_reverse() {
     let s = "ศไทย中华Việt Nam";
     let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
@@ -727,6 +749,7 @@ fn test_total_ord() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_char_range_at() {
     let data = "b¢€𤭢𤭢€¢b";
     assert_eq!('b', data.char_range_at(0).ch);
@@ -740,6 +763,7 @@ fn test_char_range_at() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_char_range_at_reverse_underflow() {
     assert_eq!("abc".char_range_at_reverse(0).next, 0);
 }
@@ -776,10 +800,9 @@ fn test_rev_iterator() {
 
 #[test]
 fn test_chars_decoding() {
-    let mut bytes = [0; 4];
     for c in (0..0x110000).filter_map(::std::char::from_u32) {
-        let len = c.encode_utf8(&mut bytes).unwrap_or(0);
-        let s = ::std::str::from_utf8(&bytes[..len]).unwrap();
+        let bytes = c.encode_utf8();
+        let s = ::std::str::from_utf8(bytes.as_slice()).unwrap();
         if Some(c) != s.chars().next() {
             panic!("character {:x}={} does not decode correctly", c as u32, c);
         }
@@ -788,10 +811,9 @@ fn test_chars_decoding() {
 
 #[test]
 fn test_chars_rev_decoding() {
-    let mut bytes = [0; 4];
     for c in (0..0x110000).filter_map(::std::char::from_u32) {
-        let len = c.encode_utf8(&mut bytes).unwrap_or(0);
-        let s = ::std::str::from_utf8(&bytes[..len]).unwrap();
+        let bytes = c.encode_utf8();
+        let s = ::std::str::from_utf8(bytes.as_slice()).unwrap();
         if Some(c) != s.chars().rev().next() {
             panic!("character {:x}={} does not decode correctly", c as u32, c);
         }
@@ -1508,6 +1530,19 @@ generate_iterator_test! {
     with str::rsplitn;
 }
 
+#[test]
+fn different_str_pattern_forwarding_lifetimes() {
+    use std::str::pattern::Pattern;
+
+    fn foo<'a, P>(p: P) where for<'b> &'b P: Pattern<'a> {
+        for _ in 0..3 {
+            "asdf".find(&p);
+        }
+    }
+
+    foo::<&str>("x");
+}
+
 mod bench {
     use test::{Bencher, black_box};
 
index 742205df8d794c3ad4d2ac3509c2c20e425e6851..95675a2423e19d51aad34e3a68e83c58e464321c 100644 (file)
@@ -45,10 +45,6 @@ fn test_simple() {
     assert_eq!(d.len(), 3);
     d.push_front(1);
     assert_eq!(d.len(), 4);
-    debug!("{}", d[0]);
-    debug!("{}", d[1]);
-    debug!("{}", d[2]);
-    debug!("{}", d[3]);
     assert_eq!(d[0], 1);
     assert_eq!(d[1], 2);
     assert_eq!(d[2], 3);
index 24455a1d841bd9be4c2009ece6d16f5c074166de..98f941f0057a36ce0e67eacf3361b3530b1c2fad 100644 (file)
@@ -2,6 +2,7 @@
 authors = ["The Rust Project Developers"]
 name = "core"
 version = "0.0.0"
+build = "build.rs"
 
 [lib]
 name = "core"
index cb9bf935cdb58b9608cce14250419de9524fb58e..dfd2ba9154d53eceb71ec17fbb6ddf46c84a55f6 100644 (file)
@@ -22,7 +22,7 @@
 //! Note that &Any is limited to testing whether a value is of a specified
 //! concrete type, and cannot be used to test whether a type implements a trait.
 //!
-//! [`Box`]: ../boxed/struct.Box.html
+//! [`Box`]: ../../std/boxed/struct.Box.html
 //!
 //! # Examples
 //!
index 0c5eada21655c001a8878d79b9492b784fa1e134..45fc5ff80093a99176599d4f760e2a4e1ce45610 100644 (file)
@@ -12,7 +12,7 @@
 //! up to a certain length. Eventually we should able to generalize
 //! to all lengths.
 //!
-//! *[See also the array primitive type](../primitive.array.html).*
+//! *[See also the array primitive type](../../std/primitive.array.html).*
 
 #![unstable(feature = "fixed_size_array",
             reason = "traits and impls are better expressed through generic \
diff --git a/src/libcore/build.rs b/src/libcore/build.rs
new file mode 100644 (file)
index 0000000..a991ac0
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+    // Remove this whenever snapshots and rustbuild nightlies are synced.
+    println!("cargo:rustc-cfg=cargobuild");
+}
index 255c846244bdcfa1391524d58dfa0a7cc9ec1778..a1c7a293af0b3d759a8b828c1a74b0e0a3ee81c7 100644 (file)
 use clone::Clone;
 use cmp::{PartialEq, Eq};
 use default::Default;
-use marker::{Copy, Send, Sync, Sized};
-use ops::{Deref, DerefMut, Drop, FnOnce};
+use marker::{Copy, Send, Sync, Sized, Unsize};
+use ops::{Deref, DerefMut, Drop, FnOnce, CoerceUnsized};
 use option::Option;
 use option::Option::{None, Some};
 
@@ -216,10 +216,6 @@ impl<T:Copy> Cell<T> {
 
     /// Returns a reference to the underlying `UnsafeCell`.
     ///
-    /// # Safety
-    ///
-    /// This function is `unsafe` because `UnsafeCell`'s field is public.
-    ///
     /// # Examples
     ///
     /// ```
@@ -229,11 +225,11 @@ impl<T:Copy> Cell<T> {
     ///
     /// let c = Cell::new(5);
     ///
-    /// let uc = unsafe { c.as_unsafe_cell() };
+    /// let uc = c.as_unsafe_cell();
     /// ```
     #[inline]
     #[unstable(feature = "as_unsafe_cell", issue = "27708")]
-    pub unsafe fn as_unsafe_cell(&self) -> &UnsafeCell<T> {
+    pub fn as_unsafe_cell(&self) -> &UnsafeCell<T> {
         &self.value
     }
 }
@@ -241,6 +237,9 @@ impl<T:Copy> Cell<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<T> Send for Cell<T> where T: Send {}
 
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> !Sync for Cell<T> {}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T:Copy> Clone for Cell<T> {
     #[inline]
@@ -391,8 +390,8 @@ impl<T: ?Sized> RefCell<T> {
     pub fn borrow(&self) -> Ref<T> {
         match BorrowRef::new(&self.borrow) {
             Some(b) => Ref {
-                _value: unsafe { &*self.value.get() },
-                _borrow: b,
+                value: unsafe { &*self.value.get() },
+                borrow: b,
             },
             None => panic!("RefCell<T> already mutably borrowed"),
         }
@@ -439,8 +438,8 @@ impl<T: ?Sized> RefCell<T> {
     pub fn borrow_mut(&self) -> RefMut<T> {
         match BorrowRefMut::new(&self.borrow) {
             Some(b) => RefMut {
-                _value: unsafe { &mut *self.value.get() },
-                _borrow: b,
+                value: unsafe { &mut *self.value.get() },
+                borrow: b,
             },
             None => panic!("RefCell<T> already borrowed"),
         }
@@ -461,6 +460,9 @@ impl<T: ?Sized> RefCell<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<T: ?Sized> Send for RefCell<T> where T: Send {}
 
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for RefCell<T> {}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: Clone> Clone for RefCell<T> {
     #[inline]
@@ -489,7 +491,7 @@ impl<T: ?Sized + PartialEq> PartialEq for RefCell<T> {
 impl<T: ?Sized + Eq> Eq for RefCell<T> {}
 
 struct BorrowRef<'b> {
-    _borrow: &'b Cell<BorrowFlag>,
+    borrow: &'b Cell<BorrowFlag>,
 }
 
 impl<'b> BorrowRef<'b> {
@@ -499,7 +501,7 @@ impl<'b> BorrowRef<'b> {
             WRITING => None,
             b => {
                 borrow.set(b + 1);
-                Some(BorrowRef { _borrow: borrow })
+                Some(BorrowRef { borrow: borrow })
             },
         }
     }
@@ -508,9 +510,9 @@ impl<'b> BorrowRef<'b> {
 impl<'b> Drop for BorrowRef<'b> {
     #[inline]
     fn drop(&mut self) {
-        let borrow = self._borrow.get();
+        let borrow = self.borrow.get();
         debug_assert!(borrow != WRITING && borrow != UNUSED);
-        self._borrow.set(borrow - 1);
+        self.borrow.set(borrow - 1);
     }
 }
 
@@ -519,10 +521,10 @@ impl<'b> Clone for BorrowRef<'b> {
     fn clone(&self) -> BorrowRef<'b> {
         // Since this Ref exists, we know the borrow flag
         // is not set to WRITING.
-        let borrow = self._borrow.get();
+        let borrow = self.borrow.get();
         debug_assert!(borrow != WRITING && borrow != UNUSED);
-        self._borrow.set(borrow + 1);
-        BorrowRef { _borrow: self._borrow }
+        self.borrow.set(borrow + 1);
+        BorrowRef { borrow: self.borrow }
     }
 }
 
@@ -532,10 +534,8 @@ impl<'b> Clone for BorrowRef<'b> {
 /// See the [module-level documentation](index.html) for more.
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Ref<'b, T: ?Sized + 'b> {
-    // FIXME #12808: strange name to try to avoid interfering with
-    // field accesses of the contained type via Deref
-    _value: &'b T,
-    _borrow: BorrowRef<'b>,
+    value: &'b T,
+    borrow: BorrowRef<'b>,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -544,7 +544,7 @@ impl<'b, T: ?Sized> Deref for Ref<'b, T> {
 
     #[inline]
     fn deref(&self) -> &T {
-        self._value
+        self.value
     }
 }
 
@@ -563,8 +563,8 @@ impl<'b, T: ?Sized> Ref<'b, T> {
     #[inline]
     pub fn clone(orig: &Ref<'b, T>) -> Ref<'b, T> {
         Ref {
-            _value: orig._value,
-            _borrow: orig._borrow.clone(),
+            value: orig.value,
+            borrow: orig.borrow.clone(),
         }
     }
 
@@ -592,8 +592,8 @@ impl<'b, T: ?Sized> Ref<'b, T> {
         where F: FnOnce(&T) -> &U
     {
         Ref {
-            _value: f(orig._value),
-            _borrow: orig._borrow,
+            value: f(orig.value),
+            borrow: orig.borrow,
         }
     }
 
@@ -619,18 +619,22 @@ impl<'b, T: ?Sized> Ref<'b, T> {
     /// ```
     #[unstable(feature = "cell_extras", reason = "recently added",
                issue = "27746")]
-    #[rustc_deprecated(since = "1.8.0", reason = "can be built on Ref::map")]
+    #[rustc_deprecated(since = "1.8.0", reason = "can be built on `Ref::map`: \
+        https://crates.io/crates/ref_filter_map")]
     #[inline]
     pub fn filter_map<U: ?Sized, F>(orig: Ref<'b, T>, f: F) -> Option<Ref<'b, U>>
         where F: FnOnce(&T) -> Option<&U>
     {
-        f(orig._value).map(move |new| Ref {
-            _value: new,
-            _borrow: orig._borrow,
+        f(orig.value).map(move |new| Ref {
+            value: new,
+            borrow: orig.borrow,
         })
     }
 }
 
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Ref<'b, U>> for Ref<'b, T> {}
+
 impl<'b, T: ?Sized> RefMut<'b, T> {
     /// Make a new `RefMut` for a component of the borrowed data, e.g. an enum
     /// variant.
@@ -661,8 +665,8 @@ impl<'b, T: ?Sized> RefMut<'b, T> {
         where F: FnOnce(&mut T) -> &mut U
     {
         RefMut {
-            _value: f(orig._value),
-            _borrow: orig._borrow,
+            value: f(orig.value),
+            borrow: orig.borrow,
         }
     }
 
@@ -694,29 +698,30 @@ impl<'b, T: ?Sized> RefMut<'b, T> {
     /// ```
     #[unstable(feature = "cell_extras", reason = "recently added",
                issue = "27746")]
-    #[rustc_deprecated(since = "1.8.0", reason = "can be built on RefMut::map")]
+    #[rustc_deprecated(since = "1.8.0", reason = "can be built on `RefMut::map`: \
+        https://crates.io/crates/ref_filter_map")]
     #[inline]
     pub fn filter_map<U: ?Sized, F>(orig: RefMut<'b, T>, f: F) -> Option<RefMut<'b, U>>
         where F: FnOnce(&mut T) -> Option<&mut U>
     {
-        let RefMut { _value, _borrow } = orig;
-        f(_value).map(move |new| RefMut {
-            _value: new,
-            _borrow: _borrow,
+        let RefMut { value, borrow } = orig;
+        f(value).map(move |new| RefMut {
+            value: new,
+            borrow: borrow,
         })
     }
 }
 
 struct BorrowRefMut<'b> {
-    _borrow: &'b Cell<BorrowFlag>,
+    borrow: &'b Cell<BorrowFlag>,
 }
 
 impl<'b> Drop for BorrowRefMut<'b> {
     #[inline]
     fn drop(&mut self) {
-        let borrow = self._borrow.get();
+        let borrow = self.borrow.get();
         debug_assert!(borrow == WRITING);
-        self._borrow.set(UNUSED);
+        self.borrow.set(UNUSED);
     }
 }
 
@@ -726,7 +731,7 @@ impl<'b> BorrowRefMut<'b> {
         match borrow.get() {
             UNUSED => {
                 borrow.set(WRITING);
-                Some(BorrowRefMut { _borrow: borrow })
+                Some(BorrowRefMut { borrow: borrow })
             },
             _ => None,
         }
@@ -738,10 +743,8 @@ impl<'b> BorrowRefMut<'b> {
 /// See the [module-level documentation](index.html) for more.
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct RefMut<'b, T: ?Sized + 'b> {
-    // FIXME #12808: strange name to try to avoid interfering with
-    // field accesses of the contained type via Deref
-    _value: &'b mut T,
-    _borrow: BorrowRefMut<'b>,
+    value: &'b mut T,
+    borrow: BorrowRefMut<'b>,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -750,7 +753,7 @@ impl<'b, T: ?Sized> Deref for RefMut<'b, T> {
 
     #[inline]
     fn deref(&self) -> &T {
-        self._value
+        self.value
     }
 }
 
@@ -758,10 +761,13 @@ impl<'b, T: ?Sized> Deref for RefMut<'b, T> {
 impl<'b, T: ?Sized> DerefMut for RefMut<'b, T> {
     #[inline]
     fn deref_mut(&mut self) -> &mut T {
-        self._value
+        self.value
     }
 }
 
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<RefMut<'b, U>> for RefMut<'b, T> {}
+
 /// The core primitive for interior mutability in Rust.
 ///
 /// `UnsafeCell<T>` is a type that wraps some `T` and indicates unsafe interior operations on the
index 0c3807d8ca0b5420389cd838edd6fe959c575bf1..b2b1dc5178e2e1aa0b18cf014e72a8f3a44e0686 100644 (file)
@@ -69,7 +69,7 @@ const MAX_THREE_B: u32 =  0x10000;
 /// Point], but only ones within a certain range. `MAX` is the highest valid
 /// code point that's a valid [Unicode Scalar Value].
 ///
-/// [`char`]: ../primitive.char.html
+/// [`char`]: ../../std/primitive.char.html
 /// [Unicode Scalar Value]: http://www.unicode.org/glossary/#unicode_scalar_value
 /// [Code Point]: http://www.unicode.org/glossary/#code_point
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -91,8 +91,8 @@ pub const MAX: char = '\u{10ffff}';
 /// [`char`]s. `from_u32()` will return `None` if the input is not a valid value
 /// for a [`char`].
 ///
-/// [`char`]: ../primitive.char.html
-/// [`u32`]: ../primitive.u32.html
+/// [`char`]: ../../std/primitive.char.html
+/// [`u32`]: ../../std/primitive.u32.html
 /// [`as`]: ../../book/casting-between-types.html#as
 ///
 /// For an unsafe version of this function which ignores these checks, see
@@ -148,8 +148,8 @@ pub fn from_u32(i: u32) -> Option<char> {
 /// [`char`]s. `from_u32_unchecked()` will ignore this, and blindly cast to
 /// [`char`], possibly creating an invalid one.
 ///
-/// [`char`]: ../primitive.char.html
-/// [`u32`]: ../primitive.u32.html
+/// [`char`]: ../../std/primitive.char.html
+/// [`u32`]: ../../std/primitive.u32.html
 /// [`as`]: ../../book/casting-between-types.html#as
 ///
 /// # Safety
@@ -255,7 +255,7 @@ pub fn from_digit(num: u32, radix: u32) -> Option<char> {
 #[doc(hidden)]
 #[unstable(feature = "core_char_ext",
            reason = "the stable interface is `impl char` in later crate",
-           issue = "27701")]
+           issue = "32110")]
 pub trait CharExt {
     #[stable(feature = "core", since = "1.6.0")]
     fn is_digit(self, radix: u32) -> bool;
@@ -269,10 +269,10 @@ pub trait CharExt {
     fn len_utf8(self) -> usize;
     #[stable(feature = "core", since = "1.6.0")]
     fn len_utf16(self) -> usize;
-    #[stable(feature = "core", since = "1.6.0")]
-    fn encode_utf8(self, dst: &mut [u8]) -> Option<usize>;
-    #[stable(feature = "core", since = "1.6.0")]
-    fn encode_utf16(self, dst: &mut [u16]) -> Option<usize>;
+    #[unstable(feature = "unicode", issue = "27784")]
+    fn encode_utf8(self) -> EncodeUtf8;
+    #[unstable(feature = "unicode", issue = "27784")]
+    fn encode_utf16(self) -> EncodeUtf16;
 }
 
 #[stable(feature = "core", since = "1.6.0")]
@@ -336,75 +336,47 @@ impl CharExt for char {
     }
 
     #[inline]
-    fn encode_utf8(self, dst: &mut [u8]) -> Option<usize> {
-        encode_utf8_raw(self as u32, dst)
+    fn encode_utf8(self) -> EncodeUtf8 {
+        let code = self as u32;
+        let mut buf = [0; 4];
+        let pos = if code < MAX_ONE_B {
+            buf[3] = code as u8;
+            3
+        } else if code < MAX_TWO_B {
+            buf[2] = (code >> 6 & 0x1F) as u8 | TAG_TWO_B;
+            buf[3] = (code & 0x3F) as u8 | TAG_CONT;
+            2
+        } else if code < MAX_THREE_B {
+            buf[1] = (code >> 12 & 0x0F) as u8 | TAG_THREE_B;
+            buf[2] = (code >>  6 & 0x3F) as u8 | TAG_CONT;
+            buf[3] = (code & 0x3F) as u8 | TAG_CONT;
+            1
+        } else {
+            buf[0] = (code >> 18 & 0x07) as u8 | TAG_FOUR_B;
+            buf[1] = (code >> 12 & 0x3F) as u8 | TAG_CONT;
+            buf[2] = (code >>  6 & 0x3F) as u8 | TAG_CONT;
+            buf[3] = (code & 0x3F) as u8 | TAG_CONT;
+            0
+        };
+        EncodeUtf8 { buf: buf, pos: pos }
     }
 
     #[inline]
-    fn encode_utf16(self, dst: &mut [u16]) -> Option<usize> {
-        encode_utf16_raw(self as u32, dst)
-    }
-}
-
-/// Encodes a raw u32 value as UTF-8 into the provided byte buffer,
-/// and then returns the number of bytes written.
-///
-/// If the buffer is not large enough, nothing will be written into it
-/// and a `None` will be returned.
-#[inline]
-#[unstable(feature = "char_internals",
-           reason = "this function should not be exposed publicly",
-           issue = "0")]
-#[doc(hidden)]
-pub fn encode_utf8_raw(code: u32, dst: &mut [u8]) -> Option<usize> {
-    // Marked #[inline] to allow llvm optimizing it away
-    if code < MAX_ONE_B && !dst.is_empty() {
-        dst[0] = code as u8;
-        Some(1)
-    } else if code < MAX_TWO_B && dst.len() >= 2 {
-        dst[0] = (code >> 6 & 0x1F) as u8 | TAG_TWO_B;
-        dst[1] = (code & 0x3F) as u8 | TAG_CONT;
-        Some(2)
-    } else if code < MAX_THREE_B && dst.len() >= 3  {
-        dst[0] = (code >> 12 & 0x0F) as u8 | TAG_THREE_B;
-        dst[1] = (code >>  6 & 0x3F) as u8 | TAG_CONT;
-        dst[2] = (code & 0x3F) as u8 | TAG_CONT;
-        Some(3)
-    } else if dst.len() >= 4 {
-        dst[0] = (code >> 18 & 0x07) as u8 | TAG_FOUR_B;
-        dst[1] = (code >> 12 & 0x3F) as u8 | TAG_CONT;
-        dst[2] = (code >>  6 & 0x3F) as u8 | TAG_CONT;
-        dst[3] = (code & 0x3F) as u8 | TAG_CONT;
-        Some(4)
-    } else {
-        None
-    }
-}
-
-/// Encodes a raw u32 value as UTF-16 into the provided `u16` buffer,
-/// and then returns the number of `u16`s written.
-///
-/// If the buffer is not large enough, nothing will be written into it
-/// and a `None` will be returned.
-#[inline]
-#[unstable(feature = "char_internals",
-           reason = "this function should not be exposed publicly",
-           issue = "0")]
-#[doc(hidden)]
-pub fn encode_utf16_raw(mut ch: u32, dst: &mut [u16]) -> Option<usize> {
-    // Marked #[inline] to allow llvm optimizing it away
-    if (ch & 0xFFFF) == ch && !dst.is_empty() {
-        // The BMP falls through (assuming non-surrogate, as it should)
-        dst[0] = ch as u16;
-        Some(1)
-    } else if dst.len() >= 2 {
-        // Supplementary planes break into surrogates.
-        ch -= 0x1_0000;
-        dst[0] = 0xD800 | ((ch >> 10) as u16);
-        dst[1] = 0xDC00 | ((ch as u16) & 0x3FF);
-        Some(2)
-    } else {
-        None
+    fn encode_utf16(self) -> EncodeUtf16 {
+        let mut buf = [0; 2];
+        let mut code = self as u32;
+        let pos = if (code & 0xFFFF) == code {
+            // The BMP falls through (assuming non-surrogate, as it should)
+            buf[1] = code as u16;
+            1
+        } else {
+            // Supplementary planes break into surrogates.
+            code -= 0x1_0000;
+            buf[0] = 0xD800 | ((code >> 10) as u16);
+            buf[1] = 0xDC00 | ((code as u16) & 0x3FF);
+            0
+        };
+        EncodeUtf16 { buf: buf, pos: pos }
     }
 }
 
@@ -414,16 +386,16 @@ pub fn encode_utf16_raw(mut ch: u32, dst: &mut [u16]) -> Option<usize> {
 /// This `struct` is created by the [`escape_unicode()`] method on [`char`]. See
 /// its documentation for more.
 ///
-/// [`escape_unicode()`]: ../primitive.char.html#method.escape_unicode
-/// [`char`]: ../primitive.char.html
-#[derive(Clone)]
+/// [`escape_unicode()`]: ../../std/primitive.char.html#method.escape_unicode
+/// [`char`]: ../../std/primitive.char.html
+#[derive(Clone, Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct EscapeUnicode {
     c: char,
     state: EscapeUnicodeState
 }
 
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 enum EscapeUnicodeState {
     Backslash,
     Type,
@@ -494,15 +466,15 @@ impl Iterator for EscapeUnicode {
 /// This `struct` is created by the [`escape_default()`] method on [`char`]. See
 /// its documentation for more.
 ///
-/// [`escape_default()`]: ../primitive.char.html#method.escape_default
-/// [`char`]: ../primitive.char.html
-#[derive(Clone)]
+/// [`escape_default()`]: ../../std/primitive.char.html#method.escape_default
+/// [`char`]: ../../std/primitive.char.html
+#[derive(Clone, Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct EscapeDefault {
     state: EscapeDefaultState
 }
 
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 enum EscapeDefaultState {
     Backslash(char),
     Char(char),
@@ -583,3 +555,80 @@ impl Iterator for EscapeDefault {
         }
     }
 }
+
+/// An iterator over `u8` entries represending the UTF-8 encoding of a `char`
+/// value.
+///
+/// Constructed via the `.encode_utf8()` method on `char`.
+#[unstable(feature = "unicode", issue = "27784")]
+#[derive(Debug)]
+pub struct EncodeUtf8 {
+    buf: [u8; 4],
+    pos: usize,
+}
+
+impl EncodeUtf8 {
+    /// Returns the remaining bytes of this iterator as a slice.
+    #[unstable(feature = "unicode", issue = "27784")]
+    pub fn as_slice(&self) -> &[u8] {
+        &self.buf[self.pos..]
+    }
+}
+
+#[unstable(feature = "unicode", issue = "27784")]
+impl Iterator for EncodeUtf8 {
+    type Item = u8;
+
+    fn next(&mut self) -> Option<u8> {
+        if self.pos == self.buf.len() {
+            None
+        } else {
+            let ret = Some(self.buf[self.pos]);
+            self.pos += 1;
+            ret
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.as_slice().iter().size_hint()
+    }
+}
+
+/// An iterator over `u16` entries represending the UTF-16 encoding of a `char`
+/// value.
+///
+/// Constructed via the `.encode_utf16()` method on `char`.
+#[unstable(feature = "unicode", issue = "27784")]
+#[derive(Debug)]
+pub struct EncodeUtf16 {
+    buf: [u16; 2],
+    pos: usize,
+}
+
+impl EncodeUtf16 {
+    /// Returns the remaining bytes of this iterator as a slice.
+    #[unstable(feature = "unicode", issue = "27784")]
+    pub fn as_slice(&self) -> &[u16] {
+        &self.buf[self.pos..]
+    }
+}
+
+
+#[unstable(feature = "unicode", issue = "27784")]
+impl Iterator for EncodeUtf16 {
+    type Item = u16;
+
+    fn next(&mut self) -> Option<u16> {
+        if self.pos == self.buf.len() {
+            None
+        } else {
+            let ret = Some(self.buf[self.pos]);
+            self.pos += 1;
+            ret
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.as_slice().iter().size_hint()
+    }
+}
index 769faedf46e8ebf4b57b6677add6c55a8ba00abb..a793502e58d371e440d4743c1f54dced7f3d6331 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! The `Clone` trait for types that cannot be 'implicitly copied'
+//! The `Clone` trait for types that cannot be 'implicitly copied'.
 //!
 //! In Rust, some simple types are "implicitly copyable" and when you
 //! assign them or pass them as arguments, the receiver will get a copy,
 //! them cheap and safe to copy. For other types copies must be made
 //! explicitly, by convention implementing the `Clone` trait and calling
 //! the `clone` method.
+//!
+//! Basic usage example:
+//!
+//! ```
+//! let s = String::new(); // String type implements Clone
+//! let copy = s.clone(); // so we can clone it
+//! ```
+//!
+//! To easily implement the Clone trait, you can also use
+//! `#[derive(Clone)]`. Example:
+//!
+//! ```
+//! #[derive(Clone)] // we add the Clone trait to Morpheus struct
+//! struct Morpheus {
+//!    blue_pill: f32,
+//!    red_pill: i64,
+//! }
+//!
+//! fn main() {
+//!    let f = Morpheus { blue_pill: 0.0, red_pill: 0 };
+//!    let copy = f.clone(); // and now we can clone it!
+//! }
+//! ```
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index 042cbea64bd2662e86a73b52781ef5cce2a28851..49aa0238a996a5f236f68749992ecf5236340056 100644 (file)
 //! by the compiler to implement comparison operators. Rust programs may
 //! implement `PartialOrd` to overload the `<`, `<=`, `>`, and `>=` operators,
 //! and may implement `PartialEq` to overload the `==` and `!=` operators.
+//!
+//! # Examples
+//!
+//! ```
+//! let x: u32 = 0;
+//! let y: u32 = 1;
+//!
+//! // these two lines are equivalent
+//! assert_eq!(x < y, true);
+//! assert_eq!(x.lt(&y), true);
+//!
+//! // these two lines are also equivalent
+//! assert_eq!(x == y, false);
+//! assert_eq!(x.eq(&y), false);
+//! ```
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
@@ -44,6 +59,16 @@ use option::Option::{self, Some};
 /// only if `a != b`.
 ///
 /// This trait can be used with `#[derive]`.
+///
+/// # Examples
+///
+/// ```
+/// let x: u32 = 0;
+/// let y: u32 = 1;
+///
+/// assert_eq!(x == y, false);
+/// assert_eq!(x.eq(&y), false);
+/// ```
 #[lang = "eq"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub trait PartialEq<Rhs: ?Sized = Self> {
@@ -226,6 +251,16 @@ impl PartialOrd for Ordering {
 ///
 /// This trait can be used with `#[derive]`. When `derive`d, it will produce an ordering
 /// based on the top-to-bottom declaration order of the struct's members.
+///
+/// # Examples
+///
+/// ```
+/// let x : u32 = 0;
+/// let y : u32 = 1;
+///
+/// assert_eq!(x < y, true);
+/// assert_eq!(x.lt(&y), true);
+/// ```
 #[lang = "ord"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> {
index b4ac020795c3804b01453c70baf82b253ebfa466..2d999868f71ec5995c3a667279eefc921a4be7ba 100644 (file)
 //!
 //! - Impl the `As*` traits for reference-to-reference conversions
 //! - Impl the `Into` trait when you want to consume the value in the conversion
-//! - The `From` trait is the most flexible, useful for values _and_ references conversions
+//! - The `From` trait is the most flexible, useful for value _and_ reference conversions
 //!
-//! As a library writer, you should prefer implementing `From<T>` rather than
-//! `Into<U>`, as `From` provides greater flexibility and offer the equivalent `Into`
+//! As a library author, you should prefer implementing `From<T>` rather than
+//! `Into<U>`, as `From` provides greater flexibility and offers an equivalent `Into`
 //! implementation for free, thanks to a blanket implementation in the standard library.
 //!
 //! **Note: these traits must not fail**. If the conversion can fail, you must use a dedicated
-//! method which return an `Option<T>` or a `Result<T, E>`.
+//! method which returns an `Option<T>` or a `Result<T, E>`.
 //!
 //! # Generic impl
 //!
@@ -49,7 +49,7 @@ use marker::Sized;
 /// [book]: ../../book/borrow-and-asref.html
 ///
 /// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which
-/// return an `Option<T>` or a `Result<T, E>`.
+/// returns an `Option<T>` or a `Result<T, E>`.
 ///
 /// # Examples
 ///
@@ -82,7 +82,7 @@ pub trait AsRef<T: ?Sized> {
 /// A cheap, mutable reference-to-mutable reference conversion.
 ///
 /// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which
-/// return an `Option<T>` or a `Result<T, E>`.
+/// returns an `Option<T>` or a `Result<T, E>`.
 ///
 /// # Generic Impls
 ///
@@ -99,10 +99,10 @@ pub trait AsMut<T: ?Sized> {
 /// A conversion that consumes `self`, which may or may not be expensive.
 ///
 /// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which
-/// return an `Option<T>` or a `Result<T, E>`.
+/// returns an `Option<T>` or a `Result<T, E>`.
 ///
-/// Library writer should not implement directly this trait, but should prefer the implementation
-/// of the `From` trait, which offer greater flexibility and provide the equivalent `Into`
+/// Library authors should not directly implement this trait, but should prefer implementing
+/// the `From` trait, which offers greater flexibility and provides an equivalent `Into`
 /// implementation for free, thanks to a blanket implementation in the standard library.
 ///
 /// # Examples
@@ -134,7 +134,7 @@ pub trait Into<T>: Sized {
 /// Construct `Self` via a conversion.
 ///
 /// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which
-/// return an `Option<T>` or a `Result<T, E>`.
+/// returns an `Option<T>` or a `Result<T, E>`.
 ///
 /// # Examples
 ///
index 7c986131a52858dd357c48b0f6ec02208635878e..d33746389a0a0a6081fdf791b5b4265f5940cb27 100644 (file)
@@ -29,7 +29,7 @@ impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> {
     fn write_str(&mut self, mut s: &str) -> fmt::Result {
         while !s.is_empty() {
             if self.on_newline {
-                try!(self.fmt.write_str("    "));
+                self.fmt.write_str("    ")?;
             }
 
             let split = match s.find('\n') {
@@ -42,7 +42,7 @@ impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> {
                     s.len()
                 }
             };
-            try!(self.fmt.write_str(&s[..split]));
+            self.fmt.write_str(&s[..split])?;
             s = &s[split..];
         }
 
@@ -54,6 +54,7 @@ impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> {
 ///
 /// Constructed by the `Formatter::debug_struct` method.
 #[must_use]
+#[allow(missing_debug_implementations)]
 #[stable(feature = "debug_builders", since = "1.2.0")]
 pub struct DebugStruct<'a, 'b: 'a> {
     fmt: &'a mut fmt::Formatter<'b>,
@@ -120,11 +121,13 @@ impl<'a, 'b: 'a> DebugStruct<'a, 'b> {
 ///
 /// Constructed by the `Formatter::debug_tuple` method.
 #[must_use]
+#[allow(missing_debug_implementations)]
 #[stable(feature = "debug_builders", since = "1.2.0")]
 pub struct DebugTuple<'a, 'b: 'a> {
     fmt: &'a mut fmt::Formatter<'b>,
     result: fmt::Result,
-    has_fields: bool,
+    fields: usize,
+    empty_name: bool,
 }
 
 pub fn debug_tuple_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>, name: &str) -> DebugTuple<'a, 'b> {
@@ -132,7 +135,8 @@ pub fn debug_tuple_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>, name: &str) -> D
     DebugTuple {
         fmt: fmt,
         result: result,
-        has_fields: false,
+        fields: 0,
+        empty_name: name.is_empty(),
     }
 }
 
@@ -141,7 +145,7 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> {
     #[stable(feature = "debug_builders", since = "1.2.0")]
     pub fn field(&mut self, value: &fmt::Debug) -> &mut DebugTuple<'a, 'b> {
         self.result = self.result.and_then(|_| {
-            let (prefix, space) = if self.has_fields {
+            let (prefix, space) = if self.fields > 0 {
                 (",", " ")
             } else {
                 ("(", "")
@@ -155,20 +159,22 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> {
             }
         });
 
-        self.has_fields = true;
+        self.fields += 1;
         self
     }
 
     /// Finishes output and returns any error encountered.
     #[stable(feature = "debug_builders", since = "1.2.0")]
     pub fn finish(&mut self) -> fmt::Result {
-        if self.has_fields {
+        if self.fields > 0 {
             self.result = self.result.and_then(|_| {
                 if self.is_pretty() {
-                    self.fmt.write_str("\n)")
-                } else {
-                    self.fmt.write_str(")")
+                    self.fmt.write_str("\n")?;
+                }
+                if self.fields == 1 && self.empty_name {
+                    self.fmt.write_str(",")?;
                 }
+                self.fmt.write_str(")")
             });
         }
         self.result
@@ -177,14 +183,6 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> {
     fn is_pretty(&self) -> bool {
         self.fmt.flags() & (1 << (FlagV1::Alternate as usize)) != 0
     }
-
-    /// Returns the wrapped `Formatter`.
-    #[unstable(feature = "debug_builder_formatter", reason = "recently added",
-               issue = "27782")]
-    #[rustc_deprecated(since = "1.7.0", reason = "will be removed")]
-    pub fn formatter(&mut self) -> &mut fmt::Formatter<'b> {
-        &mut self.fmt
-    }
 }
 
 struct DebugInner<'a, 'b: 'a> {
@@ -235,6 +233,7 @@ impl<'a, 'b: 'a> DebugInner<'a, 'b> {
 ///
 /// Constructed by the `Formatter::debug_set` method.
 #[must_use]
+#[allow(missing_debug_implementations)]
 #[stable(feature = "debug_builders", since = "1.2.0")]
 pub struct DebugSet<'a, 'b: 'a> {
     inner: DebugInner<'a, 'b>,
@@ -283,6 +282,7 @@ impl<'a, 'b: 'a> DebugSet<'a, 'b> {
 ///
 /// Constructed by the `Formatter::debug_list` method.
 #[must_use]
+#[allow(missing_debug_implementations)]
 #[stable(feature = "debug_builders", since = "1.2.0")]
 pub struct DebugList<'a, 'b: 'a> {
     inner: DebugInner<'a, 'b>,
@@ -331,6 +331,7 @@ impl<'a, 'b: 'a> DebugList<'a, 'b> {
 ///
 /// Constructed by the `Formatter::debug_map` method.
 #[must_use]
+#[allow(missing_debug_implementations)]
 #[stable(feature = "debug_builders", since = "1.2.0")]
 pub struct DebugMap<'a, 'b: 'a> {
     fmt: &'a mut fmt::Formatter<'b>,
index a3b09e9db42d137a9aa3e96d02ab52fbd0424eba..2f02f5c21f51b8768c837f04fe07d07fe9b2d12e 100644 (file)
@@ -8,13 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Utilities for formatting and printing strings
+//! Utilities for formatting and printing strings.
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
 use prelude::v1::*;
 
-use cell::{Cell, RefCell, Ref, RefMut, BorrowState};
+use cell::{UnsafeCell, Cell, RefCell, Ref, RefMut, BorrowState};
 use marker::PhantomData;
 use mem;
 use num::flt2dec;
@@ -22,20 +22,21 @@ use ops::Deref;
 use result;
 use slice;
 use str;
-use self::rt::v1::Alignment;
-
-#[unstable(feature = "fmt_radix", issue = "27728")]
-#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")]
-#[allow(deprecated)]
-pub use self::num::radix;
-#[unstable(feature = "fmt_radix", issue = "27728")]
-#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")]
-#[allow(deprecated)]
-pub use self::num::Radix;
-#[unstable(feature = "fmt_radix", issue = "27728")]
-#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")]
-#[allow(deprecated)]
-pub use self::num::RadixFmt;
+
+#[unstable(feature = "fmt_flags_align", issue = "27726")]
+/// Possible alignments returned by `Formatter::align`
+#[derive(Debug)]
+pub enum Alignment {
+    /// Indication that contents should be left-aligned.
+    Left,
+    /// Indication that contents should be right-aligned.
+    Right,
+    /// Indication that contents should be center-aligned.
+    Center,
+    /// No alignment was requested.
+    Unknown,
+}
+
 #[stable(feature = "debug_builders", since = "1.2.0")]
 pub use self::builders::{DebugStruct, DebugTuple, DebugSet, DebugList, DebugMap};
 
@@ -98,9 +99,9 @@ pub trait Write {
     /// This function will return an instance of `Error` on error.
     #[stable(feature = "fmt_write_char", since = "1.1.0")]
     fn write_char(&mut self, c: char) -> Result {
-        let mut utf_8 = [0u8; 4];
-        let bytes_written = c.encode_utf8(&mut utf_8).unwrap_or(0);
-        self.write_str(unsafe { str::from_utf8_unchecked(&utf_8[..bytes_written]) })
+        self.write_str(unsafe {
+            str::from_utf8_unchecked(c.encode_utf8().as_slice())
+        })
     }
 
     /// Glue for usage of the `write!` macro with implementors of this trait.
@@ -152,6 +153,7 @@ impl<'a, W: Write + ?Sized> Write for &'a mut W {
 /// A struct to represent both where to emit formatting strings to and how they
 /// should be formatted. A mutable version of this is passed to all formatting
 /// traits.
+#[allow(missing_debug_implementations)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Formatter<'a> {
     flags: u32,
@@ -175,6 +177,7 @@ enum Void {}
 /// compile time it is ensured that the function and the value have the correct
 /// types, and then this struct is used to canonicalize arguments to one type.
 #[derive(Copy)]
+#[allow(missing_debug_implementations)]
 #[unstable(feature = "fmt_internals", reason = "internal to format_args!",
            issue = "0")]
 #[doc(hidden)]
@@ -780,7 +783,7 @@ pub fn write(output: &mut Write, args: Arguments) -> Result {
         width: None,
         precision: None,
         buf: output,
-        align: Alignment::Unknown,
+        align: rt::v1::Alignment::Unknown,
         fill: ' ',
         args: args.args,
         curarg: args.args.iter(),
@@ -792,16 +795,16 @@ pub fn write(output: &mut Write, args: Arguments) -> Result {
         None => {
             // We can use default formatting parameters for all arguments.
             for (arg, piece) in args.args.iter().zip(pieces.by_ref()) {
-                try!(formatter.buf.write_str(*piece));
-                try!((arg.formatter)(arg.value, &mut formatter));
+                formatter.buf.write_str(*piece)?;
+                (arg.formatter)(arg.value, &mut formatter)?;
             }
         }
         Some(fmt) => {
             // Every spec has a corresponding argument that is preceded by
             // a string piece.
             for (arg, piece) in fmt.iter().zip(pieces.by_ref()) {
-                try!(formatter.buf.write_str(*piece));
-                try!(formatter.run(arg));
+                formatter.buf.write_str(*piece)?;
+                formatter.run(arg)?;
             }
         }
     }
@@ -809,7 +812,7 @@ pub fn write(output: &mut Write, args: Arguments) -> Result {
     // There can be only one trailing string piece left.
     match pieces.next() {
         Some(piece) => {
-            try!(formatter.buf.write_str(*piece));
+            formatter.buf.write_str(*piece)?;
         }
         None => {}
     }
@@ -894,10 +897,9 @@ impl<'a> Formatter<'a> {
         // Writes the sign if it exists, and then the prefix if it was requested
         let write_prefix = |f: &mut Formatter| {
             if let Some(c) = sign {
-                let mut b = [0; 4];
-                let n = c.encode_utf8(&mut b).unwrap_or(0);
-                let b = unsafe { str::from_utf8_unchecked(&b[..n]) };
-                try!(f.buf.write_str(b));
+                f.buf.write_str(unsafe {
+                    str::from_utf8_unchecked(c.encode_utf8().as_slice())
+                })?;
             }
             if prefixed { f.buf.write_str(prefix) }
             else { Ok(()) }
@@ -908,26 +910,26 @@ impl<'a> Formatter<'a> {
             // If there's no minimum length requirements then we can just
             // write the bytes.
             None => {
-                try!(write_prefix(self)); self.buf.write_str(buf)
+                write_prefix(self)?; self.buf.write_str(buf)
             }
             // Check if we're over the minimum width, if so then we can also
             // just write the bytes.
             Some(min) if width >= min => {
-                try!(write_prefix(self)); self.buf.write_str(buf)
+                write_prefix(self)?; self.buf.write_str(buf)
             }
             // The sign and prefix goes before the padding if the fill character
             // is zero
             Some(min) if self.sign_aware_zero_pad() => {
                 self.fill = '0';
-                try!(write_prefix(self));
-                self.with_padding(min - width, Alignment::Right, |f| {
+                write_prefix(self)?;
+                self.with_padding(min - width, rt::v1::Alignment::Right, |f| {
                     f.buf.write_str(buf)
                 })
             }
             // Otherwise, the sign and prefix goes after the padding
             Some(min) => {
-                self.with_padding(min - width, Alignment::Right, |f| {
-                    try!(write_prefix(f)); f.buf.write_str(buf)
+                self.with_padding(min - width, rt::v1::Alignment::Right, |f| {
+                    write_prefix(f)?; f.buf.write_str(buf)
                 })
             }
         }
@@ -973,7 +975,8 @@ impl<'a> Formatter<'a> {
             // If we're under both the maximum and the minimum width, then fill
             // up the minimum width with the specified string + some alignment.
             Some(width) => {
-                self.with_padding(width - s.chars().count(), Alignment::Left, |me| {
+                let align = rt::v1::Alignment::Left;
+                self.with_padding(width - s.chars().count(), align, |me| {
                     me.buf.write_str(s)
                 })
             }
@@ -982,34 +985,36 @@ impl<'a> Formatter<'a> {
 
     /// Runs a callback, emitting the correct padding either before or
     /// afterwards depending on whether right or left alignment is requested.
-    fn with_padding<F>(&mut self, padding: usize, default: Alignment,
+    fn with_padding<F>(&mut self, padding: usize, default: rt::v1::Alignment,
                        f: F) -> Result
         where F: FnOnce(&mut Formatter) -> Result,
     {
         use char::CharExt;
         let align = match self.align {
-            Alignment::Unknown => default,
+            rt::v1::Alignment::Unknown => default,
             _ => self.align
         };
 
         let (pre_pad, post_pad) = match align {
-            Alignment::Left => (0, padding),
-            Alignment::Right | Alignment::Unknown => (padding, 0),
-            Alignment::Center => (padding / 2, (padding + 1) / 2),
+            rt::v1::Alignment::Left => (0, padding),
+            rt::v1::Alignment::Right |
+            rt::v1::Alignment::Unknown => (padding, 0),
+            rt::v1::Alignment::Center => (padding / 2, (padding + 1) / 2),
         };
 
-        let mut fill = [0; 4];
-        let len = self.fill.encode_utf8(&mut fill).unwrap_or(0);
-        let fill = unsafe { str::from_utf8_unchecked(&fill[..len]) };
+        let fill = self.fill.encode_utf8();
+        let fill = unsafe {
+            str::from_utf8_unchecked(fill.as_slice())
+        };
 
         for _ in 0..pre_pad {
-            try!(self.buf.write_str(fill));
+            self.buf.write_str(fill)?;
         }
 
-        try!(f(self));
+        f(self)?;
 
         for _ in 0..post_pad {
-            try!(self.buf.write_str(fill));
+            self.buf.write_str(fill)?;
         }
 
         Ok(())
@@ -1028,12 +1033,12 @@ impl<'a> Formatter<'a> {
             if self.sign_aware_zero_pad() {
                 // a sign always goes first
                 let sign = unsafe { str::from_utf8_unchecked(formatted.sign) };
-                try!(self.buf.write_str(sign));
+                self.buf.write_str(sign)?;
 
                 // remove the sign from the formatted parts
                 formatted.sign = b"";
                 width = if width < sign.len() { 0 } else { width - sign.len() };
-                align = Alignment::Right;
+                align = rt::v1::Alignment::Right;
                 self.fill = '0';
             }
 
@@ -1060,7 +1065,7 @@ impl<'a> Formatter<'a> {
         }
 
         if !formatted.sign.is_empty() {
-            try!(write_bytes(self.buf, formatted.sign));
+            write_bytes(self.buf, formatted.sign)?;
         }
         for part in formatted.parts {
             match *part {
@@ -1068,11 +1073,11 @@ impl<'a> Formatter<'a> {
                     const ZEROES: &'static str = // 64 zeroes
                         "0000000000000000000000000000000000000000000000000000000000000000";
                     while nzeroes > ZEROES.len() {
-                        try!(self.buf.write_str(ZEROES));
+                        self.buf.write_str(ZEROES)?;
                         nzeroes -= ZEROES.len();
                     }
                     if nzeroes > 0 {
-                        try!(self.buf.write_str(&ZEROES[..nzeroes]));
+                        self.buf.write_str(&ZEROES[..nzeroes])?;
                     }
                 }
                 flt2dec::Part::Num(mut v) => {
@@ -1082,10 +1087,10 @@ impl<'a> Formatter<'a> {
                         *c = b'0' + (v % 10) as u8;
                         v /= 10;
                     }
-                    try!(write_bytes(self.buf, &s[..len]));
+                    write_bytes(self.buf, &s[..len])?;
                 }
                 flt2dec::Part::Copy(buf) => {
-                    try!(write_bytes(self.buf, buf));
+                    write_bytes(self.buf, buf)?;
                 }
             }
         }
@@ -1116,7 +1121,14 @@ impl<'a> Formatter<'a> {
     /// Flag indicating what form of alignment was requested
     #[unstable(feature = "fmt_flags_align", reason = "method was just created",
                issue = "27726")]
-    pub fn align(&self) -> Alignment { self.align }
+    pub fn align(&self) -> Alignment {
+        match self.align {
+            rt::v1::Alignment::Left => Alignment::Left,
+            rt::v1::Alignment::Right => Alignment::Right,
+            rt::v1::Alignment::Center => Alignment::Center,
+            rt::v1::Alignment::Unknown => Alignment::Unknown,
+        }
+    }
 
     /// Optionally specified integer width that the output should be
     #[stable(feature = "fmt_flags", since = "1.5.0")]
@@ -1337,20 +1349,20 @@ impl Display for bool {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl Debug for str {
     fn fmt(&self, f: &mut Formatter) -> Result {
-        try!(f.write_char('"'));
+        f.write_char('"')?;
         let mut from = 0;
         for (i, c) in self.char_indices() {
             let esc = c.escape_default();
             // If char needs escaping, flush backlog so far and write, else skip
             if esc.size_hint() != (1, Some(1)) {
-                try!(f.write_str(&self[from..i]));
+                f.write_str(&self[from..i])?;
                 for c in esc {
-                    try!(f.write_char(c));
+                    f.write_char(c)?;
                 }
                 from = i + c.len_utf8();
             }
         }
-        try!(f.write_str(&self[from..]));
+        f.write_str(&self[from..])?;
         f.write_char('"')
     }
 }
@@ -1365,9 +1377,9 @@ impl Display for str {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl Debug for char {
     fn fmt(&self, f: &mut Formatter) -> Result {
-        try!(f.write_char('\''));
+        f.write_char('\'')?;
         for c in self.escape_default() {
-            try!(f.write_char(c))
+            f.write_char(c)?
         }
         f.write_char('\'')
     }
@@ -1379,10 +1391,9 @@ impl Display for char {
         if f.width.is_none() && f.precision.is_none() {
             f.write_char(*self)
         } else {
-            let mut utf8 = [0; 4];
-            let amt = self.encode_utf8(&mut utf8).unwrap_or(0);
-            let s: &str = unsafe { str::from_utf8_unchecked(&utf8[..amt]) };
-            f.pad(s)
+            f.pad(unsafe {
+                str::from_utf8_unchecked(self.encode_utf8().as_slice())
+            })
         }
     }
 }
@@ -1540,16 +1551,10 @@ macro_rules! tuple {
             fn fmt(&self, f: &mut Formatter) -> Result {
                 let mut builder = f.debug_tuple("");
                 let ($(ref $name,)*) = *self;
-                let mut n = 0;
                 $(
                     builder.field($name);
-                    n += 1;
                 )*
 
-                if n == 1 {
-                    try!(write!(builder.formatter(), ","));
-                }
-
                 builder.finish()
             }
         }
@@ -1582,7 +1587,9 @@ impl<T: ?Sized> Debug for PhantomData<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: Copy + Debug> Debug for Cell<T> {
     fn fmt(&self, f: &mut Formatter) -> Result {
-        write!(f, "Cell {{ value: {:?} }}", self.get())
+        f.debug_struct("Cell")
+            .field("value", &self.get())
+            .finish()
     }
 }
 
@@ -1591,9 +1598,15 @@ impl<T: ?Sized + Debug> Debug for RefCell<T> {
     fn fmt(&self, f: &mut Formatter) -> Result {
         match self.borrow_state() {
             BorrowState::Unused | BorrowState::Reading => {
-                write!(f, "RefCell {{ value: {:?} }}", self.borrow())
+                f.debug_struct("RefCell")
+                    .field("value", &self.borrow())
+                    .finish()
+            }
+            BorrowState::Writing => {
+                f.debug_struct("RefCell")
+                    .field("value", &"<borrowed>")
+                    .finish()
             }
-            BorrowState::Writing => write!(f, "RefCell {{ <borrowed> }}"),
         }
     }
 }
@@ -1612,5 +1625,12 @@ impl<'b, T: ?Sized + Debug> Debug for RefMut<'b, T> {
     }
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: ?Sized + Debug> Debug for UnsafeCell<T> {
+    fn fmt(&self, f: &mut Formatter) -> Result {
+        f.pad("UnsafeCell")
+    }
+}
+
 // If you expected tests to be here, look instead at the run-pass/ifmt.rs test,
 // it's a lot easier than creating all of the rt::Piece structures here.
index 263e03dcc78393a87b428d59a0d029cfd32e40b1..a944c996c1a1e7532e0aeebde83bbbfc05c182e8 100644 (file)
@@ -140,81 +140,6 @@ radix! { LowerHex, 16, "0x", x @  0 ...  9 => b'0' + x,
 radix! { UpperHex, 16, "0x", x @  0 ...  9 => b'0' + x,
                              x @ 10 ... 15 => b'A' + (x - 10) }
 
-/// A radix with in the range of `2..36`.
-#[derive(Clone, Copy, PartialEq)]
-#[unstable(feature = "fmt_radix",
-           reason = "may be renamed or move to a different module",
-           issue = "27728")]
-#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")]
-pub struct Radix {
-    base: u8,
-}
-
-impl Radix {
-    fn new(base: u8) -> Radix {
-        assert!(2 <= base && base <= 36,
-                "the base must be in the range of 2..36: {}",
-                base);
-        Radix { base: base }
-    }
-}
-
-impl GenericRadix for Radix {
-    fn base(&self) -> u8 {
-        self.base
-    }
-    fn digit(&self, x: u8) -> u8 {
-        match x {
-            x @  0 ... 9 => b'0' + x,
-            x if x < self.base() => b'a' + (x - 10),
-            x => panic!("number not in the range 0..{}: {}", self.base() - 1, x),
-        }
-    }
-}
-
-/// A helper type for formatting radixes.
-#[unstable(feature = "fmt_radix",
-           reason = "may be renamed or move to a different module",
-           issue = "27728")]
-#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")]
-#[derive(Copy, Clone)]
-pub struct RadixFmt<T, R>(T, R);
-
-/// Constructs a radix formatter in the range of `2..36`.
-///
-/// # Examples
-///
-/// ```
-/// #![feature(fmt_radix)]
-///
-/// use std::fmt::radix;
-/// assert_eq!(format!("{}", radix(55, 36)), "1j".to_string());
-/// ```
-#[unstable(feature = "fmt_radix",
-           reason = "may be renamed or move to a different module",
-           issue = "27728")]
-#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")]
-pub fn radix<T>(x: T, base: u8) -> RadixFmt<T, Radix> {
-    RadixFmt(x, Radix::new(base))
-}
-
-macro_rules! radix_fmt {
-    ($T:ty as $U:ty, $fmt:ident) => {
-        #[stable(feature = "rust1", since = "1.0.0")]
-        impl fmt::Debug for RadixFmt<$T, Radix> {
-            fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-                fmt::Display::fmt(self, f)
-            }
-        }
-        #[stable(feature = "rust1", since = "1.0.0")]
-        impl fmt::Display for RadixFmt<$T, Radix> {
-            fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-                match *self { RadixFmt(ref x, radix) => radix.$fmt(*x as $U, f) }
-            }
-        }
-    }
-}
-
 macro_rules! int_base {
     ($Trait:ident for $T:ident as $U:ident -> $Radix:ident) => {
         #[stable(feature = "rust1", since = "1.0.0")]
@@ -243,14 +168,12 @@ macro_rules! integer {
         int_base! { Octal    for $Int as $Uint  -> Octal }
         int_base! { LowerHex for $Int as $Uint  -> LowerHex }
         int_base! { UpperHex for $Int as $Uint  -> UpperHex }
-        radix_fmt! { $Int as $Int, fmt_int }
         debug! { $Int }
 
         int_base! { Binary   for $Uint as $Uint -> Binary }
         int_base! { Octal    for $Uint as $Uint -> Octal }
         int_base! { LowerHex for $Uint as $Uint -> LowerHex }
         int_base! { UpperHex for $Uint as $Uint -> UpperHex }
-        radix_fmt! { $Uint as $Uint, fmt_int }
         debug! { $Uint }
     }
 }
index f889045a3f59568b14eeca4689a0bf21c11975d1..6b31e040622cd083943d7396d80c52207264554b 100644 (file)
@@ -13,6 +13,7 @@
 //!
 //! These definitions are similar to their `ct` equivalents, but differ in that
 //! these can be statically allocated and are slightly optimized for the runtime
+#![allow(missing_debug_implementations)]
 
 #[derive(Copy, Clone)]
 pub struct Argument {
index b8709e8649a69051ec893a444a03f33db75ccf5c..4d0fed983343611f32a57a5591275b3e2a606304 100644 (file)
@@ -73,6 +73,7 @@
 
 use prelude::v1::*;
 
+use fmt;
 use marker;
 use mem;
 
@@ -215,6 +216,13 @@ pub trait BuildHasher {
 #[stable(since = "1.7.0", feature = "build_hasher")]
 pub struct BuildHasherDefault<H>(marker::PhantomData<H>);
 
+#[stable(since = "1.9.0", feature = "core_impl_debug")]
+impl<H> fmt::Debug for BuildHasherDefault<H> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.pad("BuildHasherDefault")
+    }
+}
+
 #[stable(since = "1.7.0", feature = "build_hasher")]
 impl<H: Default + Hasher> BuildHasher for BuildHasherDefault<H> {
     type Hasher = H;
@@ -238,30 +246,6 @@ impl<H> Default for BuildHasherDefault<H> {
     }
 }
 
-// The HashState trait is super deprecated, but it's here to have the blanket
-// impl that goes from HashState -> BuildHasher
-
-/// Deprecated, renamed to `BuildHasher`
-#[unstable(feature = "hashmap_hasher", reason = "hasher stuff is unclear",
-           issue = "27713")]
-#[rustc_deprecated(since = "1.7.0", reason = "support moved to std::hash and \
-                                              renamed to BuildHasher")]
-pub trait HashState {
-    /// Type of the hasher that will be created.
-    type Hasher: Hasher;
-
-    /// Creates a new hasher based on the given state of this object.
-    fn hasher(&self) -> Self::Hasher;
-}
-
-#[unstable(feature = "hashmap_hasher", reason = "hasher stuff is unclear",
-           issue = "27713")]
-#[allow(deprecated)]
-impl<T: HashState> BuildHasher for T {
-    type Hasher = T::Hasher;
-    fn build_hasher(&self) -> T::Hasher { self.hasher() }
-}
-
 //////////////////////////////////////////////////////////////////////////////
 
 mod impls {
index 722d77a8a11efeccdc26f0eecfdf8eb905a1f999..fd1dab7a1f043b141cf6367ed980c1a3b2cf4a4b 100644 (file)
@@ -17,17 +17,20 @@ use super::Hasher;
 
 /// An implementation of SipHash 2-4.
 ///
-/// See: http://131002.net/siphash/
+/// See: https://131002.net/siphash/
 ///
-/// Consider this as a main "general-purpose" hash for all hashtables: it
-/// runs at good speed (competitive with spooky and city) and permits
-/// strong _keyed_ hashing. Key your hashtables from a strong RNG,
-/// such as `rand::Rng`.
+/// This is currently the default hashing function used by standard library
+/// (eg. `collections::HashMap` uses it by default).
 ///
-/// Although the SipHash algorithm is considered to be cryptographically
-/// strong, this implementation has not been reviewed for such purposes.
-/// As such, all cryptographic uses of this implementation are strongly
-/// discouraged.
+/// SipHash is a general-purpose hashing function: it runs at a good
+/// speed (competitive with Spooky and City) and permits strong _keyed_
+/// hashing. This lets you key your hashtables from a strong RNG, such as
+/// [`rand::os::OsRng`](https://doc.rust-lang.org/rand/rand/os/struct.OsRng.html).
+///
+/// Although the SipHash algorithm is considered to be generally strong,
+/// it is not intended for cryptographic purposes. As such, all
+/// cryptographic uses of this implementation are _strongly discouraged_.
+#[derive(Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct SipHasher {
     k0: u64,
index 0417ef84163ab30bccc64d6857b3f55de5af4dcf..03bcf9caeea0cab30ad450440deb2ffc271acc27 100644 (file)
@@ -53,37 +53,44 @@ extern "rust-intrinsic" {
     // NB: These intrinsics take raw pointers because they mutate aliased
     // memory, which is not valid for either `&` or `&mut`.
 
+    #[cfg(all(stage0, not(cargobuild)))]
     pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> T;
+    #[cfg(all(stage0, not(cargobuild)))]
     pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> T;
+    #[cfg(all(stage0, not(cargobuild)))]
     pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> T;
+    #[cfg(all(stage0, not(cargobuild)))]
     pub fn atomic_cxchg_acqrel<T>(dst: *mut T, old: T, src: T) -> T;
+    #[cfg(all(stage0, not(cargobuild)))]
     pub fn atomic_cxchg_relaxed<T>(dst: *mut T, old: T, src: T) -> T;
-    #[cfg(not(stage0))]
-    pub fn atomic_cxchg_failrelaxed<T>(dst: *mut T, old: T, src: T) -> T;
-    #[cfg(not(stage0))]
-    pub fn atomic_cxchg_failacq<T>(dst: *mut T, old: T, src: T) -> T;
-    #[cfg(not(stage0))]
-    pub fn atomic_cxchg_acq_failrelaxed<T>(dst: *mut T, old: T, src: T) -> T;
-    #[cfg(not(stage0))]
-    pub fn atomic_cxchg_acqrel_failrelaxed<T>(dst: *mut T, old: T, src: T) -> T;
 
-    #[cfg(not(stage0))]
+    #[cfg(any(not(stage0), cargobuild))]
+    pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+    #[cfg(any(not(stage0), cargobuild))]
+    pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+    #[cfg(any(not(stage0), cargobuild))]
+    pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+    #[cfg(any(not(stage0), cargobuild))]
+    pub fn atomic_cxchg_acqrel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+    #[cfg(any(not(stage0), cargobuild))]
+    pub fn atomic_cxchg_relaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+    #[cfg(any(not(stage0), cargobuild))]
+    pub fn atomic_cxchg_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+    #[cfg(any(not(stage0), cargobuild))]
+    pub fn atomic_cxchg_failacq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+    #[cfg(any(not(stage0), cargobuild))]
+    pub fn atomic_cxchg_acq_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+    #[cfg(any(not(stage0), cargobuild))]
+    pub fn atomic_cxchg_acqrel_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+
     pub fn atomic_cxchgweak<T>(dst: *mut T, old: T, src: T) -> (T, bool);
-    #[cfg(not(stage0))]
     pub fn atomic_cxchgweak_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
-    #[cfg(not(stage0))]
     pub fn atomic_cxchgweak_rel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
-    #[cfg(not(stage0))]
     pub fn atomic_cxchgweak_acqrel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
-    #[cfg(not(stage0))]
     pub fn atomic_cxchgweak_relaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
-    #[cfg(not(stage0))]
     pub fn atomic_cxchgweak_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
-    #[cfg(not(stage0))]
     pub fn atomic_cxchgweak_failacq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
-    #[cfg(not(stage0))]
     pub fn atomic_cxchgweak_acq_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
-    #[cfg(not(stage0))]
     pub fn atomic_cxchgweak_acqrel_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
 
     pub fn atomic_load<T>(src: *const T) -> T;
@@ -539,6 +546,32 @@ extern "rust-intrinsic" {
     /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
     pub fn roundf64(x: f64) -> f64;
 
+    /// Float addition that allows optimizations based on algebraic rules.
+    /// May assume inputs are finite.
+    #[cfg(not(stage0))]
+    pub fn fadd_fast<T>(a: T, b: T) -> T;
+
+    /// Float subtraction that allows optimizations based on algebraic rules.
+    /// May assume inputs are finite.
+    #[cfg(not(stage0))]
+    pub fn fsub_fast<T>(a: T, b: T) -> T;
+
+    /// Float multiplication that allows optimizations based on algebraic rules.
+    /// May assume inputs are finite.
+    #[cfg(not(stage0))]
+    pub fn fmul_fast<T>(a: T, b: T) -> T;
+
+    /// Float division that allows optimizations based on algebraic rules.
+    /// May assume inputs are finite.
+    #[cfg(not(stage0))]
+    pub fn fdiv_fast<T>(a: T, b: T) -> T;
+
+    /// Float remainder that allows optimizations based on algebraic rules.
+    /// May assume inputs are finite.
+    #[cfg(not(stage0))]
+    pub fn frem_fast<T>(a: T, b: T) -> T;
+
+
     /// Returns the number of bits set in an integer type `T`
     pub fn ctpop<T>(x: T) -> T;
 
index d6bd9dbf4bde222938fdcefead9918e7f30925db..b4378a5fec58d5a8a5bd0fb0272a6bebfed51c14 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Composable external iteration
+//! Composable external iteration.
 //!
 //! If you've found yourself with a collection of some kind, and needed to
 //! perform an operation on the elements of said collection, you'll quickly run
@@ -65,7 +65,7 @@
 //!
 //! [`Iterator`]: trait.Iterator.html
 //! [`next()`]: trait.Iterator.html#tymethod.next
-//! [`Option`]: ../option/enum.Option.html
+//! [`Option`]: ../../std/option/enum.Option.html
 //!
 //! # The three forms of iteration
 //!
@@ -303,10 +303,11 @@ use clone::Clone;
 use cmp;
 use cmp::{Ord, PartialOrd, PartialEq, Ordering};
 use default::Default;
+use fmt;
 use marker;
 use mem;
 use num::{Zero, One};
-use ops::{self, Add, Sub, FnMut, Mul, RangeFrom};
+use ops::{self, Add, Sub, FnMut, Mul};
 use option::Option::{self, Some, None};
 use marker::Sized;
 use usize;
@@ -433,7 +434,7 @@ pub trait Iterator {
     /// `None`. Once `None` is encountered, `count()` returns the number of
     /// times it called [`next()`].
     ///
-    /// [`next()`]: #method.next
+    /// [`next()`]: #tymethod.next
     ///
     /// # Overflow Behavior
     ///
@@ -496,7 +497,7 @@ pub trait Iterator {
     /// This method will evaluate the iterator `n` times, discarding those elements.
     /// After it does so, it will call [`next()`] and return its value.
     ///
-    /// [`next()`]: #method.next
+    /// [`next()`]: #tymethod.next
     ///
     /// Like most indexing operations, the count starts from zero, so `nth(0)`
     /// returns the first value, `nth(1)` the second, and so on.
@@ -804,7 +805,7 @@ pub trait Iterator {
     /// closure returns `None`, it will try again, and call the closure on the
     /// next element, seeing if it will return `Some`.
     ///
-    /// [`Option<T>`]: ../option/enum.Option.html
+    /// [`Option<T>`]: ../../std/option/enum.Option.html
     ///
     /// Why `filter_map()` and not just [`filter()`].[`map()`]? The key is in this
     /// part:
@@ -866,7 +867,7 @@ pub trait Iterator {
     /// different sized integer, the [`zip()`] function provides similar
     /// functionality.
     ///
-    /// [`usize`]: ../primitive.usize.html
+    /// [`usize`]: ../../std/primitive.usize.html
     /// [`zip()`]: #method.zip
     ///
     /// # Overflow Behavior
@@ -875,7 +876,7 @@ pub trait Iterator {
     /// [`usize::MAX`] elements either produces the wrong result or panics. If
     /// debug assertions are enabled, a panic is guaranteed.
     ///
-    /// [`usize::MAX`]: ../usize/constant.MAX.html
+    /// [`usize::MAX`]: ../../std/usize/constant.MAX.html
     ///
     /// # Panics
     ///
@@ -1151,7 +1152,7 @@ pub trait Iterator {
     /// iterator and the return value from the closure, an [`Option`], is
     /// yielded by the iterator.
     ///
-    /// [`Option`]: ../option/enum.Option.html
+    /// [`Option`]: ../../std/option/enum.Option.html
     ///
     /// # Examples
     ///
@@ -1385,9 +1386,9 @@ pub trait Iterator {
     /// be thought of as single `Result<Collection<T>, E>`. See the examples
     /// below for more.
     ///
-    /// [`String`]: ../string/struct.String.html
-    /// [`Result<T, E>`]: ../result/enum.Result.html
-    /// [`char`]: ../primitive.char.html
+    /// [`String`]: ../../std/string/struct.String.html
+    /// [`Result<T, E>`]: ../../std/result/enum.Result.html
+    /// [`char`]: ../../std/primitive.char.html
     ///
     /// Because `collect()` is so general, it can cause problems with type
     /// inference. As such, `collect()` is one of the few times you'll see
@@ -1412,7 +1413,7 @@ pub trait Iterator {
     /// Note that we needed the `: Vec<i32>` on the left-hand side. This is because
     /// we could collect into, for example, a [`VecDeque<T>`] instead:
     ///
-    /// [`VecDeque<T>`]: ../collections/struct.VecDeque.html
+    /// [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html
     ///
     /// ```
     /// use std::collections::VecDeque;
@@ -1532,7 +1533,7 @@ pub trait Iterator {
     /// An iterator adaptor that applies a function, producing a single, final value.
     ///
     /// `fold()` takes two arguments: an initial value, and a closure with two
-    /// arguments: an 'accumulator', and an element. It returns the value that
+    /// arguments: an 'accumulator', and an element. The closure returns the value that
     /// the accumulator should have for the next iteration.
     ///
     /// The initial value is the value the accumulator will have on the first
@@ -1922,19 +1923,6 @@ pub trait Iterator {
             .map(|(_, x)| x)
     }
 
-    #[allow(missing_docs)]
-    #[inline]
-    #[unstable(feature = "iter_cmp",
-               reason = "may want to produce an Ordering directly; see #15311",
-               issue = "27724")]
-    #[rustc_deprecated(reason = "renamed to max_by_key", since = "1.6.0")]
-    fn max_by<B: Ord, F>(self, f: F) -> Option<Self::Item> where
-        Self: Sized,
-        F: FnMut(&Self::Item) -> B,
-    {
-        self.max_by_key(f)
-    }
-
     /// Returns the element that gives the maximum value from the
     /// specified function.
     ///
@@ -1960,19 +1948,6 @@ pub trait Iterator {
             .map(|(_, x)| x)
     }
 
-    #[inline]
-    #[allow(missing_docs)]
-    #[unstable(feature = "iter_cmp",
-               reason = "may want to produce an Ordering directly; see #15311",
-               issue = "27724")]
-    #[rustc_deprecated(reason = "renamed to min_by_key", since = "1.6.0")]
-    fn min_by<B: Ord, F>(self, f: F) -> Option<Self::Item> where
-        Self: Sized,
-        F: FnMut(&Self::Item) -> B,
-    {
-        self.min_by_key(f)
-    }
-
     /// Returns the element that gives the minimum value from the
     /// specified function.
     ///
@@ -2483,10 +2458,10 @@ impl<'a, I: Iterator + ?Sized> Iterator for &'a mut I {
 ///
 /// // and we'll implement FromIterator
 /// impl FromIterator<i32> for MyCollection {
-///     fn from_iter<I: IntoIterator<Item=i32>>(iterator: I) -> Self {
+///     fn from_iter<I: IntoIterator<Item=i32>>(iter: I) -> Self {
 ///         let mut c = MyCollection::new();
 ///
-///         for i in iterator {
+///         for i in iter {
 ///             c.add(i);
 ///         }
 ///
@@ -2533,7 +2508,7 @@ pub trait FromIterator<A>: Sized {
     /// assert_eq!(v, vec![5, 5, 5, 5, 5]);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    fn from_iter<T: IntoIterator<Item=A>>(iterator: T) -> Self;
+    fn from_iter<T: IntoIterator<Item=A>>(iter: T) -> Self;
 }
 
 /// Conversion into an `Iterator`.
@@ -2708,11 +2683,11 @@ impl<I: Iterator> IntoIterator for I {
 ///     // This is a bit simpler with the concrete type signature: we can call
 ///     // extend on anything which can be turned into an Iterator which gives
 ///     // us i32s. Because we need i32s to put into MyCollection.
-///     fn extend<T: IntoIterator<Item=i32>>(&mut self, iterable: T) {
+///     fn extend<T: IntoIterator<Item=i32>>(&mut self, iter: T) {
 ///
 ///         // The implementation is very straightforward: loop through the
 ///         // iterator, and add() each element to ourselves.
-///         for elem in iterable {
+///         for elem in iter {
 ///             self.add(elem);
 ///         }
 ///     }
@@ -2752,7 +2727,7 @@ pub trait Extend<A> {
     /// assert_eq!("abcdef", &message);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    fn extend<T: IntoIterator<Item=A>>(&mut self, iterable: T);
+    fn extend<T: IntoIterator<Item=A>>(&mut self, iter: T);
 }
 
 /// An iterator able to yield elements from both ends.
@@ -2955,7 +2930,7 @@ impl<A, B> ExactSizeIterator for Zip<A, B>
 ///
 /// [`rev()`]: trait.Iterator.html#method.rev
 /// [`Iterator`]: trait.Iterator.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Rev<T> {
@@ -2987,7 +2962,7 @@ impl<I> DoubleEndedIterator for Rev<I> where I: DoubleEndedIterator {
 /// [`Iterator`]: trait.Iterator.html
 #[stable(feature = "iter_cloned", since = "1.1.0")]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub struct Cloned<I> {
     it: I,
 }
@@ -3028,7 +3003,7 @@ impl<'a, I, T: 'a> ExactSizeIterator for Cloned<I>
 ///
 /// [`cycle()`]: trait.Iterator.html#method.cycle
 /// [`Iterator`]: trait.Iterator.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Cycle<I> {
@@ -3066,7 +3041,7 @@ impl<I> Iterator for Cycle<I> where I: Clone + Iterator {
 ///
 /// [`chain()`]: trait.Iterator.html#method.chain
 /// [`Iterator`]: trait.Iterator.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Chain<A, B> {
@@ -3088,7 +3063,7 @@ pub struct Chain<A, B> {
 //
 //  The fourth state (neither iterator is remaining) only occurs after Chain has
 //  returned None once, so we don't need to store this state.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 enum ChainState {
     // both front and back iterator are remaining
     Both,
@@ -3210,7 +3185,7 @@ impl<A, B> DoubleEndedIterator for Chain<A, B> where
 ///
 /// [`zip()`]: trait.Iterator.html#method.zip
 /// [`Iterator`]: trait.Iterator.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Zip<A, B> {
@@ -3333,6 +3308,15 @@ pub struct Map<I, F> {
     f: F,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, F> fmt::Debug for Map<I, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Map")
+            .field("iter", &self.iter)
+            .finish()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<B, I: Iterator, F> Iterator for Map<I, F> where F: FnMut(I::Item) -> B {
     type Item = B;
@@ -3373,6 +3357,15 @@ pub struct Filter<I, P> {
     predicate: P,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, P> fmt::Debug for Filter<I, P> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Filter")
+            .field("iter", &self.iter)
+            .finish()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<I: Iterator, P> Iterator for Filter<I, P> where P: FnMut(&I::Item) -> bool {
     type Item = I::Item;
@@ -3424,6 +3417,15 @@ pub struct FilterMap<I, F> {
     f: F,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, F> fmt::Debug for FilterMap<I, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("FilterMap")
+            .field("iter", &self.iter)
+            .finish()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<B, I: Iterator, F> Iterator for FilterMap<I, F>
     where F: FnMut(I::Item) -> Option<B>,
@@ -3469,7 +3471,7 @@ impl<B, I: DoubleEndedIterator, F> DoubleEndedIterator for FilterMap<I, F>
 ///
 /// [`enumerate()`]: trait.Iterator.html#method.enumerate
 /// [`Iterator`]: trait.Iterator.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Enumerate<I> {
@@ -3543,7 +3545,7 @@ impl<I> DoubleEndedIterator for Enumerate<I> where
 ///
 /// [`peekable()`]: trait.Iterator.html#method.peekable
 /// [`Iterator`]: trait.Iterator.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Peekable<I: Iterator> {
@@ -3679,7 +3681,7 @@ impl<I: Iterator> Peekable<I> {
     ///
     /// assert_eq!(iter.is_empty(), true);
     /// ```
-    #[unstable(feature = "peekable_is_empty", issue = "27701")]
+    #[unstable(feature = "peekable_is_empty", issue = "32111")]
     #[inline]
     pub fn is_empty(&mut self) -> bool {
         self.peek().is_none()
@@ -3702,6 +3704,16 @@ pub struct SkipWhile<I, P> {
     predicate: P,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, P> fmt::Debug for SkipWhile<I, P> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SkipWhile")
+            .field("iter", &self.iter)
+            .field("flag", &self.flag)
+            .finish()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<I: Iterator, P> Iterator for SkipWhile<I, P>
     where P: FnMut(&I::Item) -> bool
@@ -3742,6 +3754,16 @@ pub struct TakeWhile<I, P> {
     predicate: P,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, P> fmt::Debug for TakeWhile<I, P> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("TakeWhile")
+            .field("iter", &self.iter)
+            .field("flag", &self.flag)
+            .finish()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<I: Iterator, P> Iterator for TakeWhile<I, P>
     where P: FnMut(&I::Item) -> bool
@@ -3778,7 +3800,7 @@ impl<I: Iterator, P> Iterator for TakeWhile<I, P>
 ///
 /// [`skip()`]: trait.Iterator.html#method.skip
 /// [`Iterator`]: trait.Iterator.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Skip<I> {
@@ -3851,6 +3873,17 @@ impl<I> Iterator for Skip<I> where I: Iterator {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<I> ExactSizeIterator for Skip<I> where I: ExactSizeIterator {}
 
+#[stable(feature = "double_ended_skip_iterator", since = "1.8.0")]
+impl<I> DoubleEndedIterator for Skip<I> where I: DoubleEndedIterator + ExactSizeIterator {
+    fn next_back(&mut self) -> Option<Self::Item> {
+        if self.len() > 0 {
+            self.iter.next_back()
+        } else {
+            None
+        }
+    }
+}
+
 /// An iterator that only iterates over the first `n` iterations of `iter`.
 ///
 /// This `struct` is created by the [`take()`] method on [`Iterator`]. See its
@@ -3858,7 +3891,7 @@ impl<I> ExactSizeIterator for Skip<I> where I: ExactSizeIterator {}
 ///
 /// [`take()`]: trait.Iterator.html#method.take
 /// [`Iterator`]: trait.Iterator.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Take<I> {
@@ -3929,6 +3962,16 @@ pub struct Scan<I, St, F> {
     state: St,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, St: fmt::Debug, F> fmt::Debug for Scan<I, St, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Scan")
+            .field("iter", &self.iter)
+            .field("state", &self.state)
+            .finish()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<B, I, St, F> Iterator for Scan<I, St, F> where
     I: Iterator,
@@ -3966,6 +4009,19 @@ pub struct FlatMap<I, U: IntoIterator, F> {
     backiter: Option<U::IntoIter>,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, U: IntoIterator, F> fmt::Debug for FlatMap<I, U, F>
+    where U::IntoIter: fmt::Debug
+{
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("FlatMap")
+            .field("iter", &self.iter)
+            .field("frontiter", &self.frontiter)
+            .field("backiter", &self.backiter)
+            .finish()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<I: Iterator, U: IntoIterator, F> Iterator for FlatMap<I, U, F>
     where F: FnMut(I::Item) -> U,
@@ -4029,7 +4085,7 @@ impl<I: DoubleEndedIterator, U, F> DoubleEndedIterator for FlatMap<I, U, F> wher
 ///
 /// [`fuse()`]: trait.Iterator.html#method.fuse
 /// [`Iterator`]: trait.Iterator.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Fuse<I> {
@@ -4124,6 +4180,15 @@ pub struct Inspect<I, F> {
     f: F,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, F> fmt::Debug for Inspect<I, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Inspect")
+            .field("iter", &self.iter)
+            .finish()
+    }
+}
+
 impl<I: Iterator, F> Inspect<I, F> where F: FnMut(&I::Item) {
     #[inline]
     fn do_inspect(&mut self, elt: Option<I::Item>) -> Option<I::Item> {
@@ -4286,8 +4351,8 @@ step_impl_no_between!(u64 i64);
 ///
 /// The resulting iterator handles overflow by stopping. The `A`
 /// parameter is the type being iterated over, while `R` is the range
-/// type (usually one of `std::ops::{Range, RangeFrom}`.
-#[derive(Clone)]
+/// type (usually one of `std::ops::{Range, RangeFrom, RangeInclusive}`.
+#[derive(Clone, Debug)]
 #[unstable(feature = "step_by", reason = "recent addition",
            issue = "27741")]
 pub struct StepBy<A, R> {
@@ -4295,7 +4360,7 @@ pub struct StepBy<A, R> {
     range: R,
 }
 
-impl<A: Step> RangeFrom<A> {
+impl<A: Step> ops::RangeFrom<A> {
     /// Creates an iterator starting at the same point, but stepping by
     /// the given amount at each iteration.
     ///
@@ -4355,8 +4420,44 @@ impl<A: Step> ops::Range<A> {
     }
 }
 
+impl<A: Step> ops::RangeInclusive<A> {
+    /// Creates an iterator with the same range, but stepping by the
+    /// given amount at each iteration.
+    ///
+    /// The resulting iterator handles overflow by stopping.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(step_by, inclusive_range_syntax)]
+    ///
+    /// for i in (0...10).step_by(2) {
+    ///     println!("{}", i);
+    /// }
+    /// ```
+    ///
+    /// This prints:
+    ///
+    /// ```text
+    /// 0
+    /// 2
+    /// 4
+    /// 6
+    /// 8
+    /// 10
+    /// ```
+    #[unstable(feature = "step_by", reason = "recent addition",
+               issue = "27741")]
+    pub fn step_by(self, by: A) -> StepBy<A, Self> {
+        StepBy {
+            step_by: by,
+            range: self
+        }
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<A> Iterator for StepBy<A, RangeFrom<A>> where
+impl<A> Iterator for StepBy<A, ops::RangeFrom<A>> where
     A: Clone,
     for<'a> &'a A: Add<&'a A, Output = A>
 {
@@ -4375,95 +4476,6 @@ impl<A> Iterator for StepBy<A, RangeFrom<A>> where
     }
 }
 
-/// An iterator over the range [start, stop]
-#[derive(Clone)]
-#[unstable(feature = "range_inclusive",
-           reason = "likely to be replaced by range notation and adapters",
-           issue = "27777")]
-#[rustc_deprecated(since = "1.5.0", reason = "replaced with ... syntax")]
-#[allow(deprecated)]
-pub struct RangeInclusive<A> {
-    range: ops::Range<A>,
-    done: bool,
-}
-
-/// Returns an iterator over the range [start, stop].
-#[inline]
-#[unstable(feature = "range_inclusive",
-           reason = "likely to be replaced by range notation and adapters",
-           issue = "27777")]
-#[rustc_deprecated(since = "1.5.0", reason = "replaced with ... syntax")]
-#[allow(deprecated)]
-pub fn range_inclusive<A>(start: A, stop: A) -> RangeInclusive<A>
-    where A: Step + One + Clone
-{
-    RangeInclusive {
-        range: start..stop,
-        done: false,
-    }
-}
-
-#[unstable(feature = "range_inclusive",
-           reason = "likely to be replaced by range notation and adapters",
-           issue = "27777")]
-#[rustc_deprecated(since = "1.5.0", reason = "replaced with ... syntax")]
-#[allow(deprecated)]
-impl<A> Iterator for RangeInclusive<A> where
-    A: PartialEq + Step + One + Clone,
-    for<'a> &'a A: Add<&'a A, Output = A>
-{
-    type Item = A;
-
-    #[inline]
-    fn next(&mut self) -> Option<A> {
-        self.range.next().or_else(|| {
-            if !self.done && self.range.start == self.range.end {
-                self.done = true;
-                Some(self.range.end.clone())
-            } else {
-                None
-            }
-        })
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        let (lo, hi) = self.range.size_hint();
-        if self.done {
-            (lo, hi)
-        } else {
-            let lo = lo.saturating_add(1);
-            let hi = hi.and_then(|x| x.checked_add(1));
-            (lo, hi)
-        }
-    }
-}
-
-#[unstable(feature = "range_inclusive",
-           reason = "likely to be replaced by range notation and adapters",
-           issue = "27777")]
-#[rustc_deprecated(since = "1.5.0", reason = "replaced with ... syntax")]
-#[allow(deprecated)]
-impl<A> DoubleEndedIterator for RangeInclusive<A> where
-    A: PartialEq + Step + One + Clone,
-    for<'a> &'a A: Add<&'a A, Output = A>,
-    for<'a> &'a A: Sub<Output=A>
-{
-    #[inline]
-    fn next_back(&mut self) -> Option<A> {
-        if self.range.end > self.range.start {
-            let result = self.range.end.clone();
-            self.range.end = &self.range.end - &A::one();
-            Some(result)
-        } else if !self.done && self.range.start == self.range.end {
-            self.done = true;
-            Some(self.range.end.clone())
-        } else {
-            None
-        }
-    }
-}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<A: Step + Zero + Clone> Iterator for StepBy<A, ops::Range<A>> {
     type Item = A;
@@ -4501,10 +4513,83 @@ impl<A: Step + Zero + Clone> Iterator for StepBy<A, ops::Range<A>> {
     }
 }
 
+#[unstable(feature = "inclusive_range",
+           reason = "recently added, follows RFC",
+           issue = "28237")]
+impl<A: Step + Zero + Clone> Iterator for StepBy<A, ops::RangeInclusive<A>> {
+    type Item = A;
+
+    #[inline]
+    fn next(&mut self) -> Option<A> {
+        use ops::RangeInclusive::*;
+
+        // this function has a sort of odd structure due to borrowck issues
+        // we may need to replace self.range, so borrows of start and end need to end early
+
+        let (finishing, n) = match self.range {
+            Empty { .. } => return None, // empty iterators yield no values
+
+            NonEmpty { ref mut start, ref mut end } => {
+                let zero = A::zero();
+                let rev = self.step_by < zero;
+
+                // march start towards (maybe past!) end and yield the old value
+                if (rev && start >= end) ||
+                   (!rev && start <= end)
+                {
+                    match start.step(&self.step_by) {
+                        Some(mut n) => {
+                            mem::swap(start, &mut n);
+                            (None, Some(n)) // yield old value, remain non-empty
+                        },
+                        None => {
+                            let mut n = end.clone();
+                            mem::swap(start, &mut n);
+                            (None, Some(n)) // yield old value, remain non-empty
+                        }
+                    }
+                } else {
+                    // found range in inconsistent state (start at or past end), so become empty
+                    (Some(mem::replace(end, zero)), None)
+                }
+            }
+        };
+
+        // turn into an empty iterator if we've reached the end
+        if let Some(end) = finishing {
+            self.range = Empty { at: end };
+        }
+
+        n
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        use ops::RangeInclusive::*;
+
+        match self.range {
+            Empty { .. } => (0, Some(0)),
+
+            NonEmpty { ref start, ref end } =>
+                match Step::steps_between(start,
+                                          end,
+                                          &self.step_by) {
+                    Some(hint) => (hint.saturating_add(1), hint.checked_add(1)),
+                    None       => (0, None)
+                }
+        }
+    }
+}
+
 macro_rules! range_exact_iter_impl {
     ($($t:ty)*) => ($(
         #[stable(feature = "rust1", since = "1.0.0")]
         impl ExactSizeIterator for ops::Range<$t> { }
+
+        #[unstable(feature = "inclusive_range",
+                   reason = "recently added, follows RFC",
+                   issue = "28237")]
+        impl ExactSizeIterator for ops::RangeInclusive<$t> { }
     )*)
 }
 
@@ -4568,12 +4653,113 @@ impl<A: Step + One> Iterator for ops::RangeFrom<A> where
     }
 }
 
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<A: Step + One> Iterator for ops::RangeInclusive<A> where
+    for<'a> &'a A: Add<&'a A, Output = A>
+{
+    type Item = A;
+
+    #[inline]
+    fn next(&mut self) -> Option<A> {
+        use ops::RangeInclusive::*;
+
+        // this function has a sort of odd structure due to borrowck issues
+        // we may need to replace self, so borrows of self.start and self.end need to end early
+
+        let (finishing, n) = match *self {
+            Empty { .. } => (None, None), // empty iterators yield no values
+
+            NonEmpty { ref mut start, ref mut end } => {
+                if start == end {
+                    (Some(mem::replace(end, A::one())), Some(mem::replace(start, A::one())))
+                } else if start < end {
+                    let one = A::one();
+                    let mut n = &*start + &one;
+                    mem::swap(&mut n, start);
+
+                    // if the iterator is done iterating, it will change from NonEmpty to Empty
+                    // to avoid unnecessary drops or clones, we'll reuse either start or end
+                    // (they are equal now, so it doesn't matter which)
+                    // to pull out end, we need to swap something back in -- use the previously
+                    // created A::one() as a dummy value
+
+                    (if n == *end { Some(mem::replace(end, one)) } else { None },
+                    // ^ are we done yet?
+                    Some(n)) // < the value to output
+                } else {
+                    (Some(mem::replace(start, A::one())), None)
+                }
+            }
+        };
+
+        // turn into an empty iterator if this is the last value
+        if let Some(end) = finishing {
+            *self = Empty { at: end };
+        }
+
+        n
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        use ops::RangeInclusive::*;
+
+        match *self {
+            Empty { .. } => (0, Some(0)),
+
+            NonEmpty { ref start, ref end } =>
+                match Step::steps_between(start, end, &A::one()) {
+                    Some(hint) => (hint.saturating_add(1), hint.checked_add(1)),
+                    None => (0, None),
+                }
+        }
+    }
+}
+
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<A: Step + One> DoubleEndedIterator for ops::RangeInclusive<A> where
+    for<'a> &'a A: Add<&'a A, Output = A>,
+    for<'a> &'a A: Sub<&'a A, Output = A>
+{
+    #[inline]
+    fn next_back(&mut self) -> Option<A> {
+        use ops::RangeInclusive::*;
+
+        // see Iterator::next for comments
+
+        let (finishing, n) = match *self {
+            Empty { .. } => return None,
+
+            NonEmpty { ref mut start, ref mut end } => {
+                if start == end {
+                    (Some(mem::replace(start, A::one())), Some(mem::replace(end, A::one())))
+                } else if start < end {
+                    let one = A::one();
+                    let mut n = &*end - &one;
+                    mem::swap(&mut n, end);
+
+                    (if n == *start { Some(mem::replace(start, one)) } else { None },
+                     Some(n))
+                } else {
+                    (Some(mem::replace(end, A::one())), None)
+                }
+            }
+        };
+
+        if let Some(start) = finishing {
+            *self = Empty { at: start };
+        }
+
+        n
+    }
+}
+
 /// An iterator that repeats an element endlessly.
 ///
 /// This `struct` is created by the [`repeat()`] function. See its documentation for more.
 ///
 /// [`repeat()`]: fn.repeat.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Repeat<A> {
     element: A
@@ -4655,6 +4841,13 @@ pub fn repeat<T: Clone>(elt: T) -> Repeat<T> {
 #[stable(feature = "iter_empty", since = "1.2.0")]
 pub struct Empty<T>(marker::PhantomData<T>);
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T> fmt::Debug for Empty<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.pad("Empty")
+    }
+}
+
 #[stable(feature = "iter_empty", since = "1.2.0")]
 impl<T> Iterator for Empty<T> {
     type Item = T;
@@ -4724,7 +4917,7 @@ pub fn empty<T>() -> Empty<T> {
 /// This `struct` is created by the [`once()`] function. See its documentation for more.
 ///
 /// [`once()`]: fn.once.html
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[stable(feature = "iter_once", since = "1.2.0")]
 pub struct Once<T> {
     inner: ::option::IntoIter<T>
index f76b8655ad1ed1389f555999bee3c4d63b95f395..fa5e90562d80e1b28721b0b3baea998f569262ca 100644 (file)
@@ -56,6 +56,7 @@
 
 #![no_core]
 #![deny(missing_docs)]
+#![deny(missing_debug_implementations)]
 #![cfg_attr(not(stage0), deny(warnings))]
 
 #![feature(allow_internal_unstable)]
@@ -64,6 +65,7 @@
 #![feature(const_fn)]
 #![feature(custom_attribute)]
 #![feature(fundamental)]
+#![feature(inclusive_range_syntax)]
 #![feature(intrinsics)]
 #![feature(lang_items)]
 #![feature(no_core)]
 #![feature(reflect)]
 #![feature(unwind_attributes)]
 #![feature(repr_simd, platform_intrinsics)]
+#![feature(rustc_attrs)]
+#![feature(specialization)]
 #![feature(staged_api)]
 #![feature(unboxed_closures)]
+#![feature(question_mark)]
 
 #[macro_use]
 mod macros;
index 154ca30c62dd148398744744912fed670a34ada3..f923668688b8c2ef18b3266318d45bb8f14b76eb 100644 (file)
@@ -206,8 +206,8 @@ macro_rules! try {
 ///
 /// See [`std::fmt`][fmt] for more information on format syntax.
 ///
-/// [fmt]: fmt/index.html
-/// [write]: io/trait.Write.html
+/// [fmt]: ../std/fmt/index.html
+/// [write]: ../std/io/trait.Write.html
 ///
 /// # Examples
 ///
@@ -232,8 +232,8 @@ macro_rules! write {
 ///
 /// See [`std::fmt`][fmt] for more information on format syntax.
 ///
-/// [fmt]: fmt/index.html
-/// [write]: io/trait.Write.html
+/// [fmt]: ../std/fmt/index.html
+/// [write]: ../std/io/trait.Write.html
 ///
 /// # Examples
 ///
index c36ad592ad3b54500858bc8149a1a6e031cda2b3..2c648d1516bffc30d01aa7e382413544427ff61c 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Basic functions for dealing with memory
+//! Basic functions for dealing with memory.
 //!
 //! This module contains functions for querying the size and alignment of
 //! types, initializing and manipulating memory.
index 9e946dc65c25388d75305e687456ccdd59ffeed6..022bd84f4c8417e3ef352a0f493f618b4de0749b 100644 (file)
@@ -154,8 +154,8 @@ from_str_float_impl!(f64);
 /// for [`f32`] and [`f64`].
 ///
 /// [`FromStr`]: ../str/trait.FromStr.html
-/// [`f32`]: ../primitive.f32.html
-/// [`f64`]: ../primitive.f64.html
+/// [`f32`]: ../../std/primitive.f32.html
+/// [`f64`]: ../../std/primitive.f64.html
 #[derive(Debug, Clone, PartialEq)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct ParseFloatError {
@@ -214,7 +214,7 @@ fn dec2flt<T: RawFloat>(s: &str) -> Result<T, ParseFloatError> {
     }
     let (sign, s) = extract_sign(s);
     let flt = match parse_decimal(s) {
-        ParseResult::Valid(decimal) => try!(convert(decimal)),
+        ParseResult::Valid(decimal) => convert(decimal)?,
         ParseResult::ShortcutToInf => T::infinity(),
         ParseResult::ShortcutToZero => T::zero(),
         ParseResult::Invalid => match s {
index 8af1022acdf24bdb7150653dffb213fe59501500..c24eaa3eabc754956e1ef4027328b813632b941e 100644 (file)
@@ -141,7 +141,7 @@ pub mod consts {
 
 #[unstable(feature = "core_float",
            reason = "stable interface is via `impl f{32,64}` in later crates",
-           issue = "27702")]
+           issue = "32110")]
 impl Float for f32 {
     #[inline]
     fn nan() -> f32 { NAN }
index 9486e4337bf58f56dcbd38b6527c68c73174ec5f..beeee80902525f16c0684bda05e2debc1832d99f 100644 (file)
@@ -141,7 +141,7 @@ pub mod consts {
 
 #[unstable(feature = "core_float",
            reason = "stable interface is via `impl f{32,64}` in later crates",
-           issue = "27702")]
+           issue = "32110")]
 impl Float for f64 {
     #[inline]
     fn nan() -> f64 { NAN }
index b9a7afc400d5e7a8e854ff5fd1ed5e4a78971284..b549f33424264d3ddcf741b19cc8408486aabd74 100644 (file)
@@ -222,6 +222,7 @@ impl<'a> Part<'a> {
 
 /// Formatted result containing one or more parts.
 /// This can be written to the byte buffer or converted to the allocated string.
+#[allow(missing_debug_implementations)]
 #[derive(Clone)]
 pub struct Formatted<'a> {
     /// A byte slice representing a sign, either `""`, `"-"` or `"+"`.
index 4054497941797140ccc4fa1c9d7c8496e8efe446..1dd820980f496c534ff3e9ceda4a4da9c90b1b5e 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The 16-bit signed integer type.
 //!
-//! *[See also the `i16` primitive type](../primitive.i16.html).*
+//! *[See also the `i16` primitive type](../../std/primitive.i16.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index 5d2ade8d8e0e93b3dcd0e0adb539e48a9391a8e3..8a2168933dc66c5956a3f41d674d410c3243211b 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The 32-bit signed integer type.
 //!
-//! *[See also the `i32` primitive type](../primitive.i32.html).*
+//! *[See also the `i32` primitive type](../../std/primitive.i32.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index b1d43a3b83872a79b2dd55de5c2ca2e0276674c9..2ce9eb11936bc981dcaa2e8c8140182c4d905430 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The 64-bit signed integer type.
 //!
-//! *[See also the `i64` primitive type](../primitive.i64.html).*
+//! *[See also the `i64` primitive type](../../std/primitive.i64.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index ee003d92b28235c47e366624cd6410722faa0548..8b5a7f1910e2055f467befc7e44c64f51528709c 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The 8-bit signed integer type.
 //!
-//! *[See also the `i8` primitive type](../primitive.i8.html).*
+//! *[See also the `i8` primitive type](../../std/primitive.i8.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index 77f662723c86dab6f3ca7868a12960aa09efbca3..42349257ab71c23415eb2edcfb49bbc13c9d6aca 100644 (file)
 
 macro_rules! int_module { ($T:ty, $bits:expr) => (
 
-// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
-// calling the `mem::size_of` function.
-#[unstable(feature = "num_bits_bytes",
-           reason = "may want to be an associated function",
-           issue = "27753")]
-#[rustc_deprecated(since = "1.7.0",
-                   reason = "will be replaced via const fn or associated constants")]
-#[allow(missing_docs)]
-pub const BITS : usize = $bits;
-// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
-// calling the `mem::size_of` function.
-#[unstable(feature = "num_bits_bytes",
-           reason = "may want to be an associated function",
-           issue = "27753")]
-#[rustc_deprecated(since = "1.7.0",
-                   reason = "will be replaced via const fn or associated constants")]
-#[allow(missing_docs)]
-pub const BYTES : usize = ($bits / 8);
-
 // FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
 // calling the `Bounded::min_value` function.
 #[stable(feature = "rust1", since = "1.0.0")]
index 034a5c0eb89d4388c16be66883a5baff71bf84c8..de5b1777f936f6b86fd7beec86472f026b5f6d8c 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The pointer-sized signed integer type.
 //!
-//! *[See also the `isize` primitive type](../primitive.isize.html).*
+//! *[See also the `isize` primitive type](../../std/primitive.isize.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index ed370bb91648fc457a9359bdae4d4667a8701783..229a864d712c5cf4f07e17439c420ed879d58f78 100644 (file)
@@ -41,7 +41,7 @@ use slice::SliceExt;
 #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Default)]
 pub struct Wrapping<T>(#[stable(feature = "rust1", since = "1.0.0")] pub T);
 
-pub mod wrapping;
+mod wrapping;
 
 // All these modules are technically private and only exposed for libcoretest:
 pub mod flt2dec;
@@ -1008,6 +1008,7 @@ macro_rules! int_impl {
         /// ```
         #[stable(feature = "rust1", since = "1.0.0")]
         #[inline]
+        #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
         pub fn pow(self, mut exp: u32) -> Self {
             let mut base = self;
             let mut acc = Self::one();
@@ -1049,6 +1050,7 @@ macro_rules! int_impl {
         /// ```
         #[stable(feature = "rust1", since = "1.0.0")]
         #[inline]
+        #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
         pub fn abs(self) -> Self {
             if self.is_negative() {
                 // Note that the #[inline] above means that the overflow
@@ -2013,6 +2015,7 @@ macro_rules! uint_impl {
         /// ```
         #[stable(feature = "rust1", since = "1.0.0")]
         #[inline]
+        #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
         pub fn pow(self, mut exp: u32) -> Self {
             let mut base = self;
             let mut acc = Self::one();
@@ -2179,8 +2182,8 @@ impl usize {
 /// This `enum` is used as the return type for [`f32::classify()`] and [`f64::classify()`]. See
 /// their documentation for more.
 ///
-/// [`f32::classify()`]: ../primitive.f32.html#method.classify
-/// [`f64::classify()`]: ../primitive.f64.html#method.classify
+/// [`f32::classify()`]: ../../std/primitive.f32.html#method.classify
+/// [`f64::classify()`]: ../../std/primitive.f64.html#method.classify
 #[derive(Copy, Clone, PartialEq, Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub enum FpCategory {
@@ -2209,7 +2212,7 @@ pub enum FpCategory {
 #[doc(hidden)]
 #[unstable(feature = "core_float",
            reason = "stable interface is via `impl f{32,64}` in later crates",
-           issue = "27702")]
+           issue = "32110")]
 pub trait Float: Sized {
     /// Returns the NaN value.
     #[unstable(feature = "float_extras", reason = "needs removal",
@@ -2411,7 +2414,7 @@ fn from_str_radix<T: FromStrRadixHelper>(src: &str, radix: u32)
 /// This error is used as the error type for the `from_str_radix()` functions
 /// on the primitive integer types, such as [`i8::from_str_radix()`].
 ///
-/// [`i8::from_str_radix()`]: ../std/primitive.i8.html#method.from_str_radix
+/// [`i8::from_str_radix()`]: ../../std/primitive.i8.html#method.from_str_radix
 #[derive(Debug, Clone, PartialEq)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct ParseIntError { kind: IntErrorKind }
index d7e320267c60777c8dafa3a304a36db74cf0d771..d34d87caa55fa630b5a21800229adcb162ac4542 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The 16-bit unsigned integer type.
 //!
-//! *[See also the `u16` primitive type](../primitive.u16.html).*
+//! *[See also the `u16` primitive type](../../std/primitive.u16.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index 9a26a39d5b36546b2f22c2ab1998dcbf346bfdcf..f9c9099e47f18b9694a9f30d2738b89720fd95eb 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The 32-bit unsigned integer type.
 //!
-//! *[See also the `u32` primitive type](../primitive.u32.html).*
+//! *[See also the `u32` primitive type](../../std/primitive.u32.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index e12c2c265420bcef49127a67cbbb10da64d7aa78..8dfe4335a3d722bfadae71e487b93e7032582002 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The 64-bit unsigned integer type.
 //!
-//! *[See also the `u64` primitive type](../primitive.u64.html).*
+//! *[See also the `u64` primitive type](../../std/primitive.u64.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index 0eb63bd9f3a9d863b74a7951958915e2bdab935a..0106ee8e401c9cb4f64cd8e2572f9fdab29e8733 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The 8-bit unsigned integer type.
 //!
-//! *[See also the `u8` primitive type](../primitive.u8.html).*
+//! *[See also the `u8` primitive type](../../std/primitive.u8.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index 9539a311390f6407e96c7456c61839adf232df46..6479836cbe1179361a8c468d100fde73ed91c2d5 100644 (file)
 
 macro_rules! uint_module { ($T:ty, $bits:expr) => (
 
-#[unstable(feature = "num_bits_bytes",
-           reason = "may want to be an associated function",
-           issue = "27753")]
-#[rustc_deprecated(since = "1.7.0",
-                   reason = "will be replaced via const fn or associated constants")]
-#[allow(missing_docs)]
-pub const BITS : usize = $bits;
-#[unstable(feature = "num_bits_bytes",
-           reason = "may want to be an associated function",
-           issue = "27753")]
-#[rustc_deprecated(since = "1.7.0",
-                   reason = "will be replaced via const fn or associated constants")]
-#[allow(missing_docs)]
-pub const BYTES : usize = ($bits / 8);
-
 #[stable(feature = "rust1", since = "1.0.0")]
 #[allow(missing_docs)]
 pub const MIN: $T = 0 as $T;
index 875ab32fa8c572efdd9d5339128b55d27926b320..0c7d16a41bc7e8968b6501b6c57378097ee939e0 100644 (file)
@@ -10,7 +10,7 @@
 
 //! The pointer-sized unsigned integer type.
 //!
-//! *[See also the `usize` primitive type](../primitive.usize.html).*
+//! *[See also the `usize` primitive type](../../std/primitive.usize.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
index 7ccc15c21121b35194ec664050e78124f9f5e720..e28a36af2f32202cca3b8a9e9c34bbe95611f651 100644 (file)
@@ -8,34 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![allow(missing_docs)]
-#![unstable(feature = "old_wrapping", reason = "may be removed or relocated",
-            issue = "27755")]
-
-use intrinsics::{add_with_overflow, sub_with_overflow, mul_with_overflow};
-
 use super::Wrapping;
 
 use ops::*;
 
-use ::{i8, i16, i32, i64, isize};
-
-#[unstable(feature = "old_wrapping", reason = "may be removed or relocated",
-           issue = "27755")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to inherent methods")]
-pub trait OverflowingOps {
-    fn overflowing_add(self, rhs: Self) -> (Self, bool);
-    fn overflowing_sub(self, rhs: Self) -> (Self, bool);
-    fn overflowing_mul(self, rhs: Self) -> (Self, bool);
-
-    fn overflowing_div(self, rhs: Self) -> (Self, bool);
-    fn overflowing_rem(self, rhs: Self) -> (Self, bool);
-    fn overflowing_neg(self) -> (Self, bool);
-
-    fn overflowing_shl(self, rhs: u32) -> (Self, bool);
-    fn overflowing_shr(self, rhs: u32) -> (Self, bool);
-}
-
 macro_rules! sh_impl_signed {
     ($t:ident, $f:ident) => (
         #[stable(feature = "rust1", since = "1.0.0")]
@@ -52,7 +28,7 @@ macro_rules! sh_impl_signed {
             }
         }
 
-        #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")]
+        #[stable(feature = "wrapping_impls", since = "1.7.0")]
         impl ShlAssign<$f> for Wrapping<$t> {
             #[inline(always)]
             fn shl_assign(&mut self, other: $f) {
@@ -74,7 +50,7 @@ macro_rules! sh_impl_signed {
             }
         }
 
-        #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")]
+        #[stable(feature = "wrapping_impls", since = "1.7.0")]
         impl ShrAssign<$f> for Wrapping<$t> {
             #[inline(always)]
             fn shr_assign(&mut self, other: $f) {
@@ -96,7 +72,7 @@ macro_rules! sh_impl_unsigned {
             }
         }
 
-        #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")]
+        #[stable(feature = "wrapping_impls", since = "1.7.0")]
         impl ShlAssign<$f> for Wrapping<$t> {
             #[inline(always)]
             fn shl_assign(&mut self, other: $f) {
@@ -114,7 +90,7 @@ macro_rules! sh_impl_unsigned {
             }
         }
 
-        #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")]
+        #[stable(feature = "wrapping_impls", since = "1.7.0")]
         impl ShrAssign<$f> for Wrapping<$t> {
             #[inline(always)]
             fn shr_assign(&mut self, other: $f) {
@@ -218,7 +194,7 @@ macro_rules! wrapping_impl {
             }
         }
 
-        #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")]
+        #[stable(feature = "wrapping_impls", since = "1.7.0")]
         impl Rem for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
@@ -331,120 +307,3 @@ mod shift_max {
     pub const u64: u32 = i64;
     pub use self::platform::usize;
 }
-
-macro_rules! signed_overflowing_impl {
-    ($($t:ident)*) => ($(
-        #[allow(deprecated)]
-        impl OverflowingOps for $t {
-            #[inline(always)]
-            fn overflowing_add(self, rhs: $t) -> ($t, bool) {
-                unsafe {
-                    add_with_overflow(self, rhs)
-                }
-            }
-            #[inline(always)]
-            fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
-                unsafe {
-                    sub_with_overflow(self, rhs)
-                }
-            }
-            #[inline(always)]
-            fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
-                unsafe {
-                    mul_with_overflow(self, rhs)
-                }
-            }
-
-            #[inline(always)]
-            fn overflowing_div(self, rhs: $t) -> ($t, bool) {
-                if self == $t::MIN && rhs == -1 {
-                    (self, true)
-                } else {
-                    (self/rhs, false)
-                }
-            }
-            #[inline(always)]
-            fn overflowing_rem(self, rhs: $t) -> ($t, bool) {
-                if self == $t::MIN && rhs == -1 {
-                    (0, true)
-                } else {
-                    (self % rhs, false)
-                }
-            }
-
-            #[inline(always)]
-            fn overflowing_shl(self, rhs: u32) -> ($t, bool) {
-                (self << (rhs & self::shift_max::$t),
-                 (rhs > self::shift_max::$t))
-            }
-            #[inline(always)]
-            fn overflowing_shr(self, rhs: u32) -> ($t, bool) {
-                (self >> (rhs & self::shift_max::$t),
-                 (rhs > self::shift_max::$t))
-            }
-
-            #[inline(always)]
-            fn overflowing_neg(self) -> ($t, bool) {
-                if self == $t::MIN {
-                    ($t::MIN, true)
-                } else {
-                    (-self, false)
-                }
-            }
-        }
-    )*)
-}
-
-macro_rules! unsigned_overflowing_impl {
-    ($($t:ident)*) => ($(
-        #[allow(deprecated)]
-        impl OverflowingOps for $t {
-            #[inline(always)]
-            fn overflowing_add(self, rhs: $t) -> ($t, bool) {
-                unsafe {
-                    add_with_overflow(self, rhs)
-                }
-            }
-            #[inline(always)]
-            fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
-                unsafe {
-                    sub_with_overflow(self, rhs)
-                }
-            }
-            #[inline(always)]
-            fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
-                unsafe {
-                    mul_with_overflow(self, rhs)
-                }
-            }
-
-            #[inline(always)]
-            fn overflowing_div(self, rhs: $t) -> ($t, bool) {
-                (self/rhs, false)
-            }
-            #[inline(always)]
-            fn overflowing_rem(self, rhs: $t) -> ($t, bool) {
-                (self % rhs, false)
-            }
-
-            #[inline(always)]
-            fn overflowing_shl(self, rhs: u32) -> ($t, bool) {
-                (self << (rhs & self::shift_max::$t),
-                 (rhs > self::shift_max::$t))
-            }
-            #[inline(always)]
-            fn overflowing_shr(self, rhs: u32) -> ($t, bool) {
-                (self >> (rhs & self::shift_max::$t),
-                 (rhs > self::shift_max::$t))
-            }
-
-            #[inline(always)]
-            fn overflowing_neg(self) -> ($t, bool) {
-                ((!self).wrapping_add(1), true)
-            }
-        }
-    )*)
-}
-
-signed_overflowing_impl! { i8 i16 i32 i64 isize }
-unsigned_overflowing_impl! { u8 u16 u32 u64 usize }
index 0f5584a952f547f76627d3bf64506e2c5ff5a3d5..44c498ef6d4e0836fb88b5c3c39aa6a0835473f4 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Overloadable operators
+//! Overloadable operators.
 //!
 //! Implementing these traits allows you to get an effect similar to
 //! overloading operators.
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
-use marker::{Sized, Unsize};
+use cmp::PartialOrd;
 use fmt;
+use convert::From;
+use marker::{Sized, Unsize};
+use num::One;
 
 /// The `Drop` trait is used to run some code when a value goes out of scope.
 /// This is sometimes called a 'destructor'.
@@ -1443,9 +1446,25 @@ pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
     fn index_mut(&mut self, index: Idx) -> &mut Self::Output;
 }
 
-/// An unbounded range.
+/// An unbounded range. Use `..` (two dots) for its shorthand.
+///
+/// Its primary use case is slicing index. It cannot serve as an iterator
+/// because it doesn't have a starting point.
+///
+/// # Examples
+///
+/// ```
+/// fn main() {
+///     assert_eq!((..), std::ops::RangeFull);
+///
+///     let arr = [0, 1, 2, 3];
+///     assert_eq!(arr[ .. ], [0,1,2,3]);  // RangeFull
+///     assert_eq!(arr[ ..3], [0,1,2  ]);
+///     assert_eq!(arr[1.. ], [  1,2,3]);
+///     assert_eq!(arr[1..3], [  1,2  ]);
+/// }
+/// ```
 #[derive(Copy, Clone, PartialEq, Eq)]
-#[lang = "range_full"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct RangeFull;
 
@@ -1456,9 +1475,27 @@ impl fmt::Debug for RangeFull {
     }
 }
 
-/// A (half-open) range which is bounded at both ends.
+/// A (half-open) range which is bounded at both ends: { x | start <= x < end }.
+/// Use `start..end` (two dots) for its shorthand.
+///
+/// See the [`contains()`](#method.contains) method for its characterization.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(iter_arith)]
+/// fn main() {
+///     assert_eq!((3..5), std::ops::Range{ start: 3, end: 5 });
+///     assert_eq!(3+4+5, (3..6).sum());
+///
+///     let arr = [0, 1, 2, 3];
+///     assert_eq!(arr[ .. ], [0,1,2,3]);
+///     assert_eq!(arr[ ..3], [0,1,2  ]);
+///     assert_eq!(arr[1.. ], [  1,2,3]);
+///     assert_eq!(arr[1..3], [  1,2  ]);  // Range
+/// }
+/// ```
 #[derive(Clone, PartialEq, Eq)]
-#[lang = "range"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Range<Idx> {
     /// The lower bound of the range (inclusive).
@@ -1476,9 +1513,48 @@ impl<Idx: fmt::Debug> fmt::Debug for Range<Idx> {
     }
 }
 
-/// A range which is only bounded below.
+#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
+impl<Idx: PartialOrd<Idx>> Range<Idx> {
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(range_contains)]
+    /// fn main() {
+    ///     assert!( ! (3..5).contains(2));
+    ///     assert!(   (3..5).contains(3));
+    ///     assert!(   (3..5).contains(4));
+    ///     assert!( ! (3..5).contains(5));
+    ///
+    ///     assert!( ! (3..3).contains(3));
+    ///     assert!( ! (3..2).contains(3));
+    /// }
+    /// ```
+    pub fn contains(&self, item: Idx) -> bool {
+        (self.start <= item) && (item < self.end)
+    }
+}
+
+/// A range which is only bounded below: { x | start <= x }.
+/// Use `start..` for its shorthand.
+///
+/// See the [`contains()`](#method.contains) method for its characterization.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(iter_arith)]
+/// fn main() {
+///     assert_eq!((2..), std::ops::RangeFrom{ start: 2 });
+///     assert_eq!(2+3+4, (2..).take(3).sum());
+///
+///     let arr = [0, 1, 2, 3];
+///     assert_eq!(arr[ .. ], [0,1,2,3]);
+///     assert_eq!(arr[ ..3], [0,1,2  ]);
+///     assert_eq!(arr[1.. ], [  1,2,3]);  // RangeFrom
+///     assert_eq!(arr[1..3], [  1,2  ]);
+/// }
+/// ```
 #[derive(Clone, PartialEq, Eq)]
-#[lang = "range_from"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct RangeFrom<Idx> {
     /// The lower bound of the range (inclusive).
@@ -1493,9 +1569,41 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeFrom<Idx> {
     }
 }
 
-/// A range which is only bounded above.
+#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
+impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> {
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(range_contains)]
+    /// fn main() {
+    ///     assert!( ! (3..).contains(2));
+    ///     assert!(   (3..).contains(3));
+    ///     assert!(   (3..).contains(1_000_000_000));
+    /// }
+    /// ```
+    pub fn contains(&self, item: Idx) -> bool {
+        (self.start <= item)
+    }
+}
+
+/// A range which is only bounded above: { x | x < end }.
+/// Use `..end` (two dots) for its shorthand.
+///
+/// See the [`contains()`](#method.contains) method for its characterization.
+///
+/// It cannot serve as an iterator because it doesn't have a starting point.
+/// ```
+/// fn main() {
+///     assert_eq!((..5), std::ops::RangeTo{ end: 5 });
+///
+///     let arr = [0, 1, 2, 3];
+///     assert_eq!(arr[ .. ], [0,1,2,3]);
+///     assert_eq!(arr[ ..3], [0,1,2  ]);  // RangeTo
+///     assert_eq!(arr[1.. ], [  1,2,3]);
+///     assert_eq!(arr[1..3], [  1,2  ]);
+/// }
+/// ```
 #[derive(Copy, Clone, PartialEq, Eq)]
-#[lang = "range_to"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct RangeTo<Idx> {
     /// The upper bound of the range (exclusive).
@@ -1510,6 +1618,183 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeTo<Idx> {
     }
 }
 
+#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
+impl<Idx: PartialOrd<Idx>> RangeTo<Idx> {
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(range_contains)]
+    /// fn main() {
+    ///     assert!(   (..5).contains(-1_000_000_000));
+    ///     assert!(   (..5).contains(4));
+    ///     assert!( ! (..5).contains(5));
+    /// }
+    /// ```
+    pub fn contains(&self, item: Idx) -> bool {
+        (item < self.end)
+    }
+}
+
+/// An inclusive range which is bounded at both ends: { x | start <= x <= end }.
+/// Use `start...end` (three dots) for its shorthand.
+///
+/// See the [`contains()`](#method.contains) method for its characterization.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(inclusive_range,inclusive_range_syntax,iter_arith)]
+/// fn main() {
+///     assert_eq!((3...5), std::ops::RangeInclusive::NonEmpty{ start: 3, end: 5 });
+///     assert_eq!(3+4+5, (3...5).sum());
+///
+///     let arr = [0, 1, 2, 3];
+///     assert_eq!(arr[ ...2], [0,1,2  ]);
+///     assert_eq!(arr[1...2], [  1,2  ]);  // RangeInclusive
+/// }
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq)]
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+pub enum RangeInclusive<Idx> {
+    /// Empty range (iteration has finished)
+    #[unstable(feature = "inclusive_range",
+               reason = "recently added, follows RFC",
+               issue = "28237")]
+    Empty {
+        /// The point at which iteration finished
+        #[unstable(feature = "inclusive_range",
+                   reason = "recently added, follows RFC",
+                   issue = "28237")]
+        at: Idx
+    },
+    /// Non-empty range (iteration will yield value(s))
+    #[unstable(feature = "inclusive_range",
+               reason = "recently added, follows RFC",
+               issue = "28237")]
+    NonEmpty {
+        /// The lower bound of the range (inclusive).
+        #[unstable(feature = "inclusive_range",
+                   reason = "recently added, follows RFC",
+                   issue = "28237")]
+        start: Idx,
+        /// The upper bound of the range (inclusive).
+        #[unstable(feature = "inclusive_range",
+                   reason = "recently added, follows RFC",
+                   issue = "28237")]
+        end: Idx,
+    },
+}
+
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeInclusive<Idx> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        use self::RangeInclusive::*;
+
+        match *self {
+            Empty { ref at } => write!(fmt, "[empty range @ {:?}]", at),
+            NonEmpty { ref start, ref end } => write!(fmt, "{:?}...{:?}", start, end),
+        }
+    }
+}
+
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<Idx: PartialOrd + One + Sub<Output=Idx>> From<Range<Idx>> for RangeInclusive<Idx> {
+    fn from(range: Range<Idx>) -> RangeInclusive<Idx> {
+        use self::RangeInclusive::*;
+
+        if range.start < range.end {
+            NonEmpty {
+                start: range.start,
+                end: range.end - Idx::one() // can't underflow because end > start >= MIN
+            }
+        } else {
+            Empty {
+                at: range.start
+            }
+        }
+    }
+}
+
+#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
+impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> {
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(range_contains,inclusive_range_syntax)]
+    /// fn main() {
+    ///     assert!( ! (3...5).contains(2));
+    ///     assert!(   (3...5).contains(3));
+    ///     assert!(   (3...5).contains(4));
+    ///     assert!(   (3...5).contains(5));
+    ///     assert!( ! (3...5).contains(6));
+    ///
+    ///     assert!(   (3...3).contains(3));
+    ///     assert!( ! (3...2).contains(3));
+    /// }
+    /// ```
+    pub fn contains(&self, item: Idx) -> bool {
+        if let &RangeInclusive::NonEmpty{ref start, ref end} = self {
+            (*start <= item) && (item <= *end)
+        } else { false }
+    }
+}
+
+/// An inclusive range which is only bounded above: { x | x <= end }.
+/// Use `...end` (three dots) for its shorthand.
+///
+/// See the [`contains()`](#method.contains) method for its characterization.
+///
+/// It cannot serve as an iterator because it doesn't have a starting point.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(inclusive_range,inclusive_range_syntax)]
+/// fn main() {
+///     assert_eq!((...5), std::ops::RangeToInclusive{ end: 5 });
+///
+///     let arr = [0, 1, 2, 3];
+///     assert_eq!(arr[ ...2], [0,1,2  ]);  // RangeToInclusive
+///     assert_eq!(arr[1...2], [  1,2  ]);
+/// }
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq)]
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+pub struct RangeToInclusive<Idx> {
+    /// The upper bound of the range (inclusive)
+    #[unstable(feature = "inclusive_range",
+               reason = "recently added, follows RFC",
+               issue = "28237")]
+    pub end: Idx,
+}
+
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeToInclusive<Idx> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "...{:?}", self.end)
+    }
+}
+
+#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
+impl<Idx: PartialOrd<Idx>> RangeToInclusive<Idx> {
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(range_contains,inclusive_range_syntax)]
+    /// fn main() {
+    ///     assert!(   (...5).contains(-1_000_000_000));
+    ///     assert!(   (...5).contains(5));
+    ///     assert!( ! (...5).contains(6));
+    /// }
+    /// ```
+    pub fn contains(&self, item: Idx) -> bool {
+        (item <= self.end)
+    }
+}
+
+// RangeToInclusive<Idx> cannot impl From<RangeTo<Idx>>
+// because underflow would be possible with (..0).into()
+
 /// The `Deref` trait is used to specify the functionality of dereferencing
 /// operations, like `*v`.
 ///
index e38cf9af010b9f9953fff8c59a0def8c0c36283e..beed2075d049427d2daa39365d0e1730ee901f58 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Optional values
+//! Optional values.
 //!
 //! Type `Option` represents an optional value: every `Option`
 //! is either `Some` and contains a value, or `None`, and
 //! let msg = Some("howdy");
 //!
 //! // Take a reference to the contained string
-//! match msg {
-//!     Some(ref m) => println!("{}", *m),
-//!     None => (),
+//! if let Some(ref m) = msg {
+//!     println!("{}", *m);
 //! }
 //!
 //! // Remove the contained string, destroying the Option
-//! let unwrapped_msg = match msg {
-//!     Some(m) => m,
-//!     None => "default message",
-//! };
+//! let unwrapped_msg = msg.unwrap_or("default message");
 //! ```
 //!
 //! Initialize a result to `None` before a loop:
@@ -763,7 +759,7 @@ impl<'a, T> IntoIterator for &'a mut Option<T> {
 // The Option Iterators
 /////////////////////////////////////////////////////////////////////////////
 
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 struct Item<A> {
     opt: Option<A>
 }
@@ -796,6 +792,7 @@ impl<A> ExactSizeIterator for Item<A> {}
 
 /// An iterator over a reference of the contained item in an Option.
 #[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
 pub struct Iter<'a, A: 'a> { inner: Item<&'a A> }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -826,6 +823,7 @@ impl<'a, A> Clone for Iter<'a, A> {
 
 /// An iterator over a mutable reference of the contained item in an Option.
 #[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
 pub struct IterMut<'a, A: 'a> { inner: Item<&'a mut A> }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -848,7 +846,7 @@ impl<'a, A> DoubleEndedIterator for IterMut<'a, A> {
 impl<'a, A> ExactSizeIterator for IterMut<'a, A> {}
 
 /// An iterator over the item contained inside an Option.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct IntoIter<A> { inner: Item<A> }
 
index cb109c010c7694ebdbaeb375f65ae98480171cfa..a6b5355d947860be655fd50118510c222b1b4ed8 100644 (file)
@@ -10,9 +10,9 @@
 
 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
 
-//! Raw, unsafe pointers, `*const T`, and `*mut T`
+//! Raw, unsafe pointers, `*const T`, and `*mut T`.
 //!
-//! *[See also the pointer primitive types](../primitive.pointer.html).*
+//! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
@@ -166,9 +166,16 @@ pub unsafe fn write<T>(dst: *mut T, src: T) {
 ///
 /// Volatile operations are intended to act on I/O memory, and are guaranteed
 /// to not be elided or reordered by the compiler across other volatile
-/// operations. See the LLVM documentation on [[volatile]].
+/// operations.
 ///
-/// [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
+/// # Notes
+///
+/// Rust does not currently have a rigorously and formally defined memory model,
+/// so the precise semantics of what "volatile" means here is subject to change
+/// over time. That being said, the semantics will almost always end up pretty
+/// similar to [C11's definition of volatile][c11].
+///
+/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
 ///
 /// # Safety
 ///
@@ -179,7 +186,7 @@ pub unsafe fn write<T>(dst: *mut T, src: T) {
 /// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
 /// because it will attempt to drop the value previously at `*src`.
 #[inline]
-#[unstable(feature = "volatile", reason = "recently added", issue = "31756")]
+#[stable(feature = "volatile", since = "1.9.0")]
 pub unsafe fn read_volatile<T>(src: *const T) -> T {
     intrinsics::volatile_load(src)
 }
@@ -189,9 +196,16 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T {
 ///
 /// Volatile operations are intended to act on I/O memory, and are guaranteed
 /// to not be elided or reordered by the compiler across other volatile
-/// operations. See the LLVM documentation on [[volatile]].
+/// operations.
+///
+/// # Notes
+///
+/// Rust does not currently have a rigorously and formally defined memory model,
+/// so the precise semantics of what "volatile" means here is subject to change
+/// over time. That being said, the semantics will almost always end up pretty
+/// similar to [C11's definition of volatile][c11].
 ///
-/// [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
+/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
 ///
 /// # Safety
 ///
@@ -204,7 +218,7 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T {
 /// This is appropriate for initializing uninitialized memory, or overwriting
 /// memory that has previously been `read` from.
 #[inline]
-#[unstable(feature = "volatile", reason = "recently added", issue = "31756")]
+#[stable(feature = "volatile", since = "1.9.0")]
 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
     intrinsics::volatile_store(dst, src);
 }
@@ -212,6 +226,16 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
 #[lang = "const_ptr"]
 impl<T: ?Sized> *const T {
     /// Returns true if the pointer is null.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// let s: &str = "Follow the rabbit";
+    /// let ptr: *const u8 = s.as_ptr();
+    /// assert!(!ptr.is_null());
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
     pub fn is_null(self) -> bool where T: Sized {
@@ -227,17 +251,30 @@ impl<T: ?Sized> *const T {
     /// null-safety, it is important to note that this is still an unsafe
     /// operation because the returned value could be pointing to invalid
     /// memory.
-    #[unstable(feature = "ptr_as_ref",
-               reason = "Option is not clearly the right return type, and we \
-                         may want to tie the return lifetime to a borrow of \
-                         the raw pointer",
-               issue = "27780")]
+    ///
+    /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
+    /// not necessarily reflect the actual lifetime of the data.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```ignore
+    /// let val: *const u8 = &10u8 as *const u8;
+    ///
+    /// unsafe {
+    ///     if let Some(val_back) = val.as_ref() {
+    ///         println!("We got back the value: {}!", val_back);
+    ///     }
+    /// }
+    /// ```
+    #[stable(feature = "ptr_as_ref", since = "1.9.0")]
     #[inline]
-    pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
+    pub unsafe fn as_ref<'a>(self) -> Option<&'a T> where T: Sized {
         if self.is_null() {
             None
         } else {
-            Some(&**self)
+            Some(&*self)
         }
     }
 
@@ -250,6 +287,20 @@ impl<T: ?Sized> *const T {
     /// byte past the end of an allocated object. If either pointer is out of
     /// bounds or arithmetic overflow occurs then
     /// any further use of the returned value will result in undefined behavior.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// let s: &str = "123";
+    /// let ptr: *const u8 = s.as_ptr();
+    ///
+    /// unsafe {
+    ///     println!("{}", *ptr.offset(1) as char);
+    ///     println!("{}", *ptr.offset(2) as char);
+    /// }
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
     pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
@@ -260,6 +311,16 @@ impl<T: ?Sized> *const T {
 #[lang = "mut_ptr"]
 impl<T: ?Sized> *mut T {
     /// Returns true if the pointer is null.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// let mut s = [1, 2, 3];
+    /// let ptr: *mut u32 = s.as_mut_ptr();
+    /// assert!(!ptr.is_null());
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
     pub fn is_null(self) -> bool where T: Sized {
@@ -275,17 +336,30 @@ impl<T: ?Sized> *mut T {
     /// null-safety, it is important to note that this is still an unsafe
     /// operation because the returned value could be pointing to invalid
     /// memory.
-    #[unstable(feature = "ptr_as_ref",
-               reason = "Option is not clearly the right return type, and we \
-                         may want to tie the return lifetime to a borrow of \
-                         the raw pointer",
-               issue = "27780")]
+    ///
+    /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
+    /// not necessarily reflect the actual lifetime of the data.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```ignore
+    /// let val: *mut u8 = &mut 10u8 as *mut u8;
+    ///
+    /// unsafe {
+    ///     if let Some(val_back) = val.as_ref() {
+    ///         println!("We got back the value: {}!", val_back);
+    ///     }
+    /// }
+    /// ```
+    #[stable(feature = "ptr_as_ref", since = "1.9.0")]
     #[inline]
-    pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
+    pub unsafe fn as_ref<'a>(self) -> Option<&'a T> where T: Sized {
         if self.is_null() {
             None
         } else {
-            Some(&**self)
+            Some(&*self)
         }
     }
 
@@ -297,6 +371,20 @@ impl<T: ?Sized> *mut T {
     /// The offset must be in-bounds of the object, or one-byte-past-the-end.
     /// Otherwise `offset` invokes Undefined Behavior, regardless of whether
     /// the pointer is used.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// let mut s = [1, 2, 3];
+    /// let ptr: *mut u32 = s.as_mut_ptr();
+    ///
+    /// unsafe {
+    ///     println!("{}", *ptr.offset(1));
+    ///     println!("{}", *ptr.offset(2));
+    /// }
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
     pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
@@ -309,17 +397,24 @@ impl<T: ?Sized> *mut T {
     /// # Safety
     ///
     /// As with `as_ref`, this is unsafe because it cannot verify the validity
-    /// of the returned pointer.
-    #[unstable(feature = "ptr_as_ref",
-               reason = "return value does not necessarily convey all possible \
-                         information",
-               issue = "27780")]
+    /// of the returned pointer, nor can it ensure that the lifetime `'a`
+    /// returned is indeed a valid lifetime for the contained data.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// let mut s = [1, 2, 3];
+    /// let ptr: *mut u32 = s.as_mut_ptr();
+    /// ```
+    #[stable(feature = "ptr_as_ref", since = "1.9.0")]
     #[inline]
-    pub unsafe fn as_mut<'a>(&self) -> Option<&'a mut T> where T: Sized {
+    pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> where T: Sized {
         if self.is_null() {
             None
         } else {
-            Some(&mut **self)
+            Some(&mut *self)
         }
     }
 }
@@ -520,6 +615,7 @@ impl<T: ?Sized> PartialOrd for *mut T {
 /// modified without a unique path to the `Unique` reference. Useful
 /// for building abstractions like `Vec<T>` or `Box<T>`, which
 /// internally use raw pointers to manage the memory that they own.
+#[allow(missing_debug_implementations)]
 #[unstable(feature = "unique", reason = "needs an RFC to flesh out design",
            issue = "27730")]
 pub struct Unique<T: ?Sized> {
@@ -592,6 +688,7 @@ impl<T> fmt::Pointer for Unique<T> {
 /// of this wrapper has shared ownership of the referent. Useful for
 /// building abstractions like `Rc<T>` or `Arc<T>`, which internally
 /// use raw pointers to manage the memory that they own.
+#[allow(missing_debug_implementations)]
 #[unstable(feature = "shared", reason = "needs an RFC to flesh out design",
            issue = "27730")]
 pub struct Shared<T: ?Sized> {
index 84467be6eca5641d30e15a0a4240fc827b761ec1..19226d81f16882000966fcd514a046d1f16abe86 100644 (file)
@@ -59,12 +59,18 @@ use mem;
 /// println!("data pointer = {:?}, length = {}", repr.data, repr.len);
 /// ```
 #[repr(C)]
+#[allow(missing_debug_implementations)]
+#[rustc_deprecated(reason = "use raw accessors/constructors in `slice` module",
+                   since = "1.9.0")]
+#[unstable(feature = "raw", issue = "27751")]
 pub struct Slice<T> {
     pub data: *const T,
     pub len: usize,
 }
 
+#[allow(deprecated)]
 impl<T> Copy for Slice<T> {}
+#[allow(deprecated)]
 impl<T> Clone for Slice<T> {
     fn clone(&self) -> Slice<T> { *self }
 }
@@ -143,6 +149,7 @@ impl<T> Clone for Slice<T> {
 /// ```
 #[repr(C)]
 #[derive(Copy, Clone)]
+#[allow(missing_debug_implementations)]
 pub struct TraitObject {
     pub data: *mut (),
     pub vtable: *mut (),
@@ -150,6 +157,9 @@ pub struct TraitObject {
 
 /// This trait is meant to map equivalences between raw structs and their
 /// corresponding rust values.
+#[rustc_deprecated(reason = "use raw accessors/constructors in `slice` module",
+                   since = "1.9.0")]
+#[unstable(feature = "raw", issue = "27751")]
 pub unsafe trait Repr<T> {
     /// This function "unwraps" a rust value (without consuming it) into its raw
     /// struct representation. This can be used to read/write different values
@@ -159,5 +169,7 @@ pub unsafe trait Repr<T> {
     fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } }
 }
 
+#[allow(deprecated)]
 unsafe impl<T> Repr<Slice<T>> for [T] {}
+#[allow(deprecated)]
 unsafe impl Repr<Slice<u8>> for str {}
index f6703d16ad997e988e347bb45720356985b5c572..7f8cf531d26176bc7a8e52739892bef13cb8f129 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Error handling with the `Result` type
+//! Error handling with the `Result` type.
 //!
 //! `Result<T, E>` is the type used for returning and propagating
 //! errors. It is an enum with the variants, `Ok(T)`, representing
@@ -799,6 +799,7 @@ impl<'a, T, E> IntoIterator for &'a mut Result<T, E> {
 /////////////////////////////////////////////////////////////////////////////
 
 /// An iterator over a reference to the `Ok` variant of a `Result`.
+#[derive(Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Iter<'a, T: 'a> { inner: Option<&'a T> }
 
@@ -830,6 +831,7 @@ impl<'a, T> Clone for Iter<'a, T> {
 }
 
 /// An iterator over a mutable reference to the `Ok` variant of a `Result`.
+#[derive(Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct IterMut<'a, T: 'a> { inner: Option<&'a mut T> }
 
@@ -856,6 +858,7 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
 impl<'a, T> ExactSizeIterator for IterMut<'a, T> {}
 
 /// An iterator over the value in a `Ok` variant of a `Result`.
+#[derive(Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct IntoIter<T> { inner: Option<T> }
 
index afda70f4fcc0af67b8d44853f7002ce071fe1339..ca1abb4fe0bddf46ef756fc3cfed76b365ed6dd5 100644 (file)
@@ -38,6 +38,7 @@ use cmp::{Ordering, PartialEq, PartialOrd, Eq, Ord};
 use cmp::Ordering::{Less, Equal, Greater};
 use cmp;
 use default::Default;
+use fmt;
 use intrinsics::assume;
 use iter::*;
 use ops::{FnMut, self, Index};
@@ -49,10 +50,12 @@ use result::Result::{Ok, Err};
 use ptr;
 use mem;
 use marker::{Copy, Send, Sync, self};
-use raw::Repr;
-// Avoid conflicts with *both* the Slice trait (buggy) and the `slice::raw` module.
-use raw::Slice as RawSlice;
 
+#[repr(C)]
+struct Repr<T> {
+    pub data: *const T,
+    pub len: usize,
+}
 
 //
 // Extension traits
@@ -61,7 +64,7 @@ use raw::Slice as RawSlice;
 /// Extension methods for slices.
 #[unstable(feature = "core_slice_ext",
            reason = "stable interface provided by `impl [T]` in later crates",
-           issue = "27701")]
+           issue = "32110")]
 #[allow(missing_docs)] // documented elsewhere
 pub trait SliceExt {
     type Item;
@@ -151,8 +154,8 @@ pub trait SliceExt {
     fn ends_with(&self, needle: &[Self::Item]) -> bool where Self::Item: PartialEq;
 
     #[stable(feature = "clone_from_slice", since = "1.7.0")]
-    fn clone_from_slice(&mut self, &[Self::Item]) where Self::Item: Clone;
-    #[unstable(feature = "copy_from_slice", issue = "31755")]
+    fn clone_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Clone;
+    #[stable(feature = "copy_from_slice", since = "1.9.0")]
     fn copy_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Copy;
 }
 
@@ -182,7 +185,7 @@ macro_rules! slice_ref {
 
 #[unstable(feature = "core_slice_ext",
            reason = "stable interface provided by `impl [T]` in later crates",
-           issue = "27701")]
+           issue = "32110")]
 impl<T> SliceExt for [T] {
     type Item = T;
 
@@ -285,12 +288,12 @@ impl<T> SliceExt for [T] {
 
     #[inline]
     unsafe fn get_unchecked(&self, index: usize) -> &T {
-        &*(self.repr().data.offset(index as isize))
+        &*(self.as_ptr().offset(index as isize))
     }
 
     #[inline]
     fn as_ptr(&self) -> *const T {
-        self.repr().data
+        self as *const [T] as *const T
     }
 
     fn binary_search_by<F>(&self, mut f: F) -> Result<usize, usize> where
@@ -316,7 +319,11 @@ impl<T> SliceExt for [T] {
     }
 
     #[inline]
-    fn len(&self) -> usize { self.repr().len }
+    fn len(&self) -> usize {
+        unsafe {
+            mem::transmute::<&[T], Repr<T>>(self).len
+        }
+    }
 
     #[inline]
     fn get_mut(&mut self, index: usize) -> Option<&mut T> {
@@ -448,12 +455,12 @@ impl<T> SliceExt for [T] {
 
     #[inline]
     unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T {
-        &mut *(self.repr().data as *mut T).offset(index as isize)
+        &mut *self.as_mut_ptr().offset(index as isize)
     }
 
     #[inline]
     fn as_mut_ptr(&mut self) -> *mut T {
-        self.repr().data as *mut T
+        self as *mut [T] as *mut T
     }
 
     #[inline]
@@ -533,6 +540,18 @@ fn slice_index_order_fail(index: usize, end: usize) -> ! {
     panic!("slice index starts at {} but ends at {}", index, end);
 }
 
+// FIXME implement indexing with inclusive ranges
+
+/// Implements slicing with syntax `&self[begin .. end]`.
+///
+/// Returns a slice of self for the index range [`begin`..`end`).
+///
+/// This operation is `O(1)`.
+///
+/// # Panics
+///
+/// Requires that `begin <= end` and `end <= self.len()`,
+/// otherwise slicing will panic.
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::Index<ops::Range<usize>> for [T] {
     type Output = [T];
@@ -552,24 +571,43 @@ impl<T> ops::Index<ops::Range<usize>> for [T] {
         }
     }
 }
+
+/// Implements slicing with syntax `&self[.. end]`.
+///
+/// Returns a slice of self from the beginning until but not including
+/// the index `end`.
+///
+/// Equivalent to `&self[0 .. end]`
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::Index<ops::RangeTo<usize>> for [T] {
     type Output = [T];
 
     #[inline]
     fn index(&self, index: ops::RangeTo<usize>) -> &[T] {
-        self.index(ops::Range{ start: 0, end: index.end })
+        self.index(0 .. index.end)
     }
 }
+
+/// Implements slicing with syntax `&self[begin ..]`.
+///
+/// Returns a slice of self from and including the index `begin` until the end.
+///
+/// Equivalent to `&self[begin .. self.len()]`
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::Index<ops::RangeFrom<usize>> for [T] {
     type Output = [T];
 
     #[inline]
     fn index(&self, index: ops::RangeFrom<usize>) -> &[T] {
-        self.index(ops::Range{ start: index.start, end: self.len() })
+        self.index(index.start .. self.len())
     }
 }
+
+/// Implements slicing with syntax `&self[..]`.
+///
+/// Returns a slice of the whole slice. This operation can not panic.
+///
+/// Equivalent to `&self[0 .. self.len()]`
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::Index<RangeFull> for [T] {
     type Output = [T];
@@ -580,6 +618,41 @@ impl<T> ops::Index<RangeFull> for [T] {
     }
 }
 
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<T> ops::Index<ops::RangeInclusive<usize>> for [T] {
+    type Output = [T];
+
+    #[inline]
+    fn index(&self, index: ops::RangeInclusive<usize>) -> &[T] {
+        match index {
+            ops::RangeInclusive::Empty { .. } => &[],
+            ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() =>
+                panic!("attempted to index slice up to maximum usize"),
+            ops::RangeInclusive::NonEmpty { start, end } =>
+                self.index(start .. end+1)
+        }
+    }
+}
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<T> ops::Index<ops::RangeToInclusive<usize>> for [T] {
+    type Output = [T];
+
+    #[inline]
+    fn index(&self, index: ops::RangeToInclusive<usize>) -> &[T] {
+        self.index(0...index.end)
+    }
+}
+
+/// Implements mutable slicing with syntax `&mut self[begin .. end]`.
+///
+/// Returns a slice of self for the index range [`begin`..`end`).
+///
+/// This operation is `O(1)`.
+///
+/// # Panics
+///
+/// Requires that `begin <= end` and `end <= self.len()`,
+/// otherwise slicing will panic.
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::IndexMut<ops::Range<usize>> for [T] {
     #[inline]
@@ -597,21 +670,40 @@ impl<T> ops::IndexMut<ops::Range<usize>> for [T] {
         }
     }
 }
+
+/// Implements mutable slicing with syntax `&mut self[.. end]`.
+///
+/// Returns a slice of self from the beginning until but not including
+/// the index `end`.
+///
+/// Equivalent to `&mut self[0 .. end]`
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::IndexMut<ops::RangeTo<usize>> for [T] {
     #[inline]
     fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut [T] {
-        self.index_mut(ops::Range{ start: 0, end: index.end })
+        self.index_mut(0 .. index.end)
     }
 }
+
+/// Implements mutable slicing with syntax `&mut self[begin ..]`.
+///
+/// Returns a slice of self from and including the index `begin` until the end.
+///
+/// Equivalent to `&mut self[begin .. self.len()]`
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::IndexMut<ops::RangeFrom<usize>> for [T] {
     #[inline]
     fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut [T] {
         let len = self.len();
-        self.index_mut(ops::Range{ start: index.start, end: len })
+        self.index_mut(index.start .. len)
     }
 }
+
+/// Implements mutable slicing with syntax `&mut self[..]`.
+///
+/// Returns a slice of the whole slice. This operation can not panic.
+///
+/// Equivalent to `&mut self[0 .. self.len()]`
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> ops::IndexMut<RangeFull> for [T] {
     #[inline]
@@ -620,6 +712,26 @@ impl<T> ops::IndexMut<RangeFull> for [T] {
     }
 }
 
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<T> ops::IndexMut<ops::RangeInclusive<usize>> for [T] {
+    #[inline]
+    fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut [T] {
+        match index {
+            ops::RangeInclusive::Empty { .. } => &mut [],
+            ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() =>
+                panic!("attempted to index slice up to maximum usize"),
+            ops::RangeInclusive::NonEmpty { start, end } =>
+                self.index_mut(start .. end+1)
+        }
+    }
+}
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+impl<T> ops::IndexMut<ops::RangeToInclusive<usize>> for [T] {
+    #[inline]
+    fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut [T] {
+        self.index_mut(0...index.end)
+    }
+}
 
 ////////////////////////////////////////////////////////////////////////////////
 // Common traits
@@ -772,6 +884,15 @@ pub struct Iter<'a, T: 'a> {
     _marker: marker::PhantomData<&'a T>,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_tuple("Iter")
+            .field(&self.as_slice())
+            .finish()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<'a, T: Sync> Sync for Iter<'a, T> {}
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -820,6 +941,15 @@ pub struct IterMut<'a, T: 'a> {
     _marker: marker::PhantomData<&'a mut T>,
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_tuple("IterMut")
+            .field(&make_slice!(self.ptr, self.end))
+            .finish()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<'a, T: Sync> Sync for IterMut<'a, T> {}
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -859,6 +989,7 @@ impl<'a, T> ExactSizeIterator for IterMut<'a, T> {}
 
 /// An internal abstraction over the splitting iterators, so that
 /// splitn, splitn_mut etc can be implemented once.
+#[doc(hidden)]
 trait SplitIter: DoubleEndedIterator {
     /// Mark the underlying iterator as complete, extracting the remaining
     /// portion of the slice.
@@ -874,6 +1005,16 @@ pub struct Split<'a, T:'a, P> where P: FnMut(&T) -> bool {
     finished: bool
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for Split<'a, T, P> where P: FnMut(&T) -> bool {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Split")
+            .field("v", &self.v)
+            .field("finished", &self.finished)
+            .finish()
+    }
+}
+
 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a, T, P> Clone for Split<'a, T, P> where P: Clone + FnMut(&T) -> bool {
@@ -947,6 +1088,16 @@ pub struct SplitMut<'a, T:'a, P> where P: FnMut(&T) -> bool {
     finished: bool
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for SplitMut<'a, T, P> where P: FnMut(&T) -> bool {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SplitMut")
+            .field("v", &self.v)
+            .field("finished", &self.finished)
+            .finish()
+    }
+}
+
 impl<'a, T, P> SplitIter for SplitMut<'a, T, P> where P: FnMut(&T) -> bool {
     #[inline]
     fn finish(&mut self) -> Option<&'a mut [T]> {
@@ -1021,6 +1172,7 @@ impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P> where
 /// An private iterator over subslices separated by elements that
 /// match a predicate function, splitting at most a fixed number of
 /// times.
+#[derive(Debug)]
 struct GenericSplitN<I> {
     iter: I,
     count: usize,
@@ -1056,6 +1208,15 @@ pub struct SplitN<'a, T: 'a, P> where P: FnMut(&T) -> bool {
     inner: GenericSplitN<Split<'a, T, P>>
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for SplitN<'a, T, P> where P: FnMut(&T) -> bool {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SplitN")
+            .field("inner", &self.inner)
+            .finish()
+    }
+}
+
 /// An iterator over subslices separated by elements that match a
 /// predicate function, limited to a given number of splits, starting
 /// from the end of the slice.
@@ -1064,6 +1225,15 @@ pub struct RSplitN<'a, T: 'a, P> where P: FnMut(&T) -> bool {
     inner: GenericSplitN<Split<'a, T, P>>
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for RSplitN<'a, T, P> where P: FnMut(&T) -> bool {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("RSplitN")
+            .field("inner", &self.inner)
+            .finish()
+    }
+}
+
 /// An iterator over subslices separated by elements that match a predicate
 /// function, limited to a given number of splits.
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1071,6 +1241,15 @@ pub struct SplitNMut<'a, T: 'a, P> where P: FnMut(&T) -> bool {
     inner: GenericSplitN<SplitMut<'a, T, P>>
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for SplitNMut<'a, T, P> where P: FnMut(&T) -> bool {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SplitNMut")
+            .field("inner", &self.inner)
+            .finish()
+    }
+}
+
 /// An iterator over subslices separated by elements that match a
 /// predicate function, limited to a given number of splits, starting
 /// from the end of the slice.
@@ -1079,6 +1258,15 @@ pub struct RSplitNMut<'a, T: 'a, P> where P: FnMut(&T) -> bool {
     inner: GenericSplitN<SplitMut<'a, T, P>>
 }
 
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for RSplitNMut<'a, T, P> where P: FnMut(&T) -> bool {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("RSplitNMut")
+            .field("inner", &self.inner)
+            .finish()
+    }
+}
+
 macro_rules! forward_iterator {
     ($name:ident: $elem:ident, $iter_of:ty) => {
         #[stable(feature = "rust1", since = "1.0.0")]
@@ -1106,6 +1294,7 @@ forward_iterator! { SplitNMut: T, &'a mut [T] }
 forward_iterator! { RSplitNMut: T, &'a mut [T] }
 
 /// An iterator over overlapping subslices of length `size`.
+#[derive(Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Windows<'a, T:'a> {
     v: &'a [T],
@@ -1199,6 +1388,7 @@ impl<'a, T> ExactSizeIterator for Windows<'a, T> {}
 ///
 /// When the slice len is not evenly divided by the chunk size, the last slice
 /// of the iteration will be the remainder.
+#[derive(Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Chunks<'a, T:'a> {
     v: &'a [T],
@@ -1299,6 +1489,7 @@ impl<'a, T> ExactSizeIterator for Chunks<'a, T> {}
 /// An iterator over a slice in (non-overlapping) mutable chunks (`size`
 /// elements at a time). When the slice len is not evenly divided by the chunk
 /// size, the last slice of the iteration will be the remainder.
+#[derive(Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct ChunksMut<'a, T:'a> {
     v: &'a mut [T],
@@ -1429,7 +1620,7 @@ impl<'a, T> ExactSizeIterator for ChunksMut<'a, T> {}
 #[inline]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub unsafe fn from_raw_parts<'a, T>(p: *const T, len: usize) -> &'a [T] {
-    mem::transmute(RawSlice { data: p, len: len })
+    mem::transmute(Repr { data: p, len: len })
 }
 
 /// Performs the same functionality as `from_raw_parts`, except that a mutable
@@ -1441,62 +1632,64 @@ pub unsafe fn from_raw_parts<'a, T>(p: *const T, len: usize) -> &'a [T] {
 #[inline]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub unsafe fn from_raw_parts_mut<'a, T>(p: *mut T, len: usize) -> &'a mut [T] {
-    mem::transmute(RawSlice { data: p, len: len })
+    mem::transmute(Repr { data: p, len: len })
 }
 
 //
-// Submodules
+// Comparison traits
 //
 
-/// Operations on `[u8]`.
-#[unstable(feature = "slice_bytes", reason = "needs review",
-           issue = "27740")]
-#[rustc_deprecated(reason = "unidiomatic functions not pulling their weight",
-                   since = "1.6.0")]
-#[allow(deprecated)]
-pub mod bytes {
-    use ptr;
-    use slice::SliceExt;
+extern {
+    /// Call implementation provided memcmp
+    ///
+    /// Interprets the data as u8.
+    ///
+    /// Return 0 for equal, < 0 for less than and > 0 for greater
+    /// than.
+    // FIXME(#32610): Return type should be c_int
+    fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32;
+}
 
-    /// A trait for operations on mutable `[u8]`s.
-    pub trait MutableByteVector {
-        /// Sets all bytes of the receiver to the given value.
-        fn set_memory(&mut self, value: u8);
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> PartialEq<[B]> for [A] where A: PartialEq<B> {
+    fn eq(&self, other: &[B]) -> bool {
+        SlicePartialEq::equal(self, other)
     }
 
-    impl MutableByteVector for [u8] {
-        #[inline]
-        fn set_memory(&mut self, value: u8) {
-            unsafe { ptr::write_bytes(self.as_mut_ptr(), value, self.len()) };
-        }
+    fn ne(&self, other: &[B]) -> bool {
+        SlicePartialEq::not_equal(self, other)
     }
+}
 
-    /// Copies data from `src` to `dst`
-    ///
-    /// Panics if the length of `dst` is less than the length of `src`.
-    #[inline]
-    pub fn copy_memory(src: &[u8], dst: &mut [u8]) {
-        let len_src = src.len();
-        assert!(dst.len() >= len_src);
-        // `dst` is unaliasable, so we know statically it doesn't overlap
-        // with `src`.
-        unsafe {
-            ptr::copy_nonoverlapping(src.as_ptr(),
-                                     dst.as_mut_ptr(),
-                                     len_src);
-        }
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq> Eq for [T] {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Ord for [T] {
+    fn cmp(&self, other: &[T]) -> Ordering {
+        SliceOrd::compare(self, other)
     }
 }
 
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd> PartialOrd for [T] {
+    fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
+        SlicePartialOrd::partial_compare(self, other)
+    }
+}
 
+#[doc(hidden)]
+// intermediate trait for specialization of slice's PartialEq
+trait SlicePartialEq<B> {
+    fn equal(&self, other: &[B]) -> bool;
+    fn not_equal(&self, other: &[B]) -> bool;
+}
 
-//
-// Boilerplate traits
-//
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<A, B> PartialEq<[B]> for [A] where A: PartialEq<B> {
-    fn eq(&self, other: &[B]) -> bool {
+// Generic slice equality
+impl<A, B> SlicePartialEq<B> for [A]
+    where A: PartialEq<B>
+{
+    default fn equal(&self, other: &[B]) -> bool {
         if self.len() != other.len() {
             return false;
         }
@@ -1509,7 +1702,8 @@ impl<A, B> PartialEq<[B]> for [A] where A: PartialEq<B> {
 
         true
     }
-    fn ne(&self, other: &[B]) -> bool {
+
+    default fn not_equal(&self, other: &[B]) -> bool {
         if self.len() != other.len() {
             return true;
         }
@@ -1524,12 +1718,36 @@ impl<A, B> PartialEq<[B]> for [A] where A: PartialEq<B> {
     }
 }
 
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Eq> Eq for [T] {}
+// Use memcmp for bytewise equality when the types allow
+impl<A> SlicePartialEq<A> for [A]
+    where A: PartialEq<A> + BytewiseEquality
+{
+    fn equal(&self, other: &[A]) -> bool {
+        if self.len() != other.len() {
+            return false;
+        }
+        unsafe {
+            let size = mem::size_of_val(self);
+            memcmp(self.as_ptr() as *const u8,
+                   other.as_ptr() as *const u8, size) == 0
+        }
+    }
 
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Ord> Ord for [T] {
-    fn cmp(&self, other: &[T]) -> Ordering {
+    fn not_equal(&self, other: &[A]) -> bool {
+        !self.equal(other)
+    }
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's PartialOrd
+trait SlicePartialOrd<B> {
+    fn partial_compare(&self, other: &[B]) -> Option<Ordering>;
+}
+
+impl<A> SlicePartialOrd<A> for [A]
+    where A: PartialOrd
+{
+    default fn partial_compare(&self, other: &[A]) -> Option<Ordering> {
         let l = cmp::min(self.len(), other.len());
 
         // Slice to the loop iteration range to enable bound check
@@ -1538,19 +1756,33 @@ impl<T: Ord> Ord for [T] {
         let rhs = &other[..l];
 
         for i in 0..l {
-            match lhs[i].cmp(&rhs[i]) {
-                Ordering::Equal => (),
+            match lhs[i].partial_cmp(&rhs[i]) {
+                Some(Ordering::Equal) => (),
                 non_eq => return non_eq,
             }
         }
 
-        self.len().cmp(&other.len())
+        self.len().partial_cmp(&other.len())
     }
 }
 
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: PartialOrd> PartialOrd for [T] {
-    fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
+impl SlicePartialOrd<u8> for [u8] {
+    #[inline]
+    fn partial_compare(&self, other: &[u8]) -> Option<Ordering> {
+        Some(SliceOrd::compare(self, other))
+    }
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's Ord
+trait SliceOrd<B> {
+    fn compare(&self, other: &[B]) -> Ordering;
+}
+
+impl<A> SliceOrd<A> for [A]
+    where A: Ord
+{
+    default fn compare(&self, other: &[A]) -> Ordering {
         let l = cmp::min(self.len(), other.len());
 
         // Slice to the loop iteration range to enable bound check
@@ -1559,12 +1791,48 @@ impl<T: PartialOrd> PartialOrd for [T] {
         let rhs = &other[..l];
 
         for i in 0..l {
-            match lhs[i].partial_cmp(&rhs[i]) {
-                Some(Ordering::Equal) => (),
+            match lhs[i].cmp(&rhs[i]) {
+                Ordering::Equal => (),
                 non_eq => return non_eq,
             }
         }
 
-        self.len().partial_cmp(&other.len())
+        self.len().cmp(&other.len())
+    }
+}
+
+// memcmp compares a sequence of unsigned bytes lexicographically.
+// this matches the order we want for [u8], but no others (not even [i8]).
+impl SliceOrd<u8> for [u8] {
+    #[inline]
+    fn compare(&self, other: &[u8]) -> Ordering {
+        let order = unsafe {
+            memcmp(self.as_ptr(), other.as_ptr(),
+                   cmp::min(self.len(), other.len()))
+        };
+        if order == 0 {
+            self.len().cmp(&other.len())
+        } else if order < 0 {
+            Less
+        } else {
+            Greater
+        }
+    }
+}
+
+#[doc(hidden)]
+/// Trait implemented for types that can be compared for equality using
+/// their bytewise representation
+trait BytewiseEquality { }
+
+macro_rules! impl_marker_for {
+    ($traitname:ident, $($ty:ty)*) => {
+        $(
+            impl $traitname for $ty { }
+        )*
     }
 }
+
+impl_marker_for!(BytewiseEquality,
+                 u8 i8 u16 i16 u32 i32 u64 i64 usize isize char bool);
+
index 4d367cfd432f94a2f5333ec3bbbcdc94b33b493c..ef8670df9121e111c768b047795c3f44da8a6909 100644 (file)
@@ -29,7 +29,6 @@ use marker::Sized;
 use mem;
 use ops::{Fn, FnMut, FnOnce};
 use option::Option::{self, None, Some};
-use raw::{Repr, Slice};
 use result::Result::{self, Ok, Err};
 use slice::{self, SliceExt};
 
@@ -42,8 +41,8 @@ pub mod pattern;
 /// [`str`]'s [`parse()`] method. See [`parse()`]'s documentation for examples.
 ///
 /// [`from_str()`]: #tymethod.from_str
-/// [`str`]: ../primitive.str.html
-/// [`parse()`]: ../primitive.str.html#method.parse
+/// [`str`]: ../../std/primitive.str.html
+/// [`parse()`]: ../../std/primitive.str.html#method.parse
 #[stable(feature = "rust1", since = "1.0.0")]
 pub trait FromStr: Sized {
     /// The associated error which can be returned from parsing.
@@ -60,7 +59,7 @@ pub trait FromStr: Sized {
     ///
     /// Basic usage with [`i32`][ithirtytwo], a type that implements `FromStr`:
     ///
-    /// [ithirtytwo]: ../primitive.i32.html
+    /// [ithirtytwo]: ../../std/primitive.i32.html
     ///
     /// ```
     /// use std::str::FromStr;
@@ -182,7 +181,7 @@ impl Utf8Error {
 /// If you need a `String` instead of a `&str`, consider
 /// [`String::from_utf8()`][string].
 ///
-/// [string]: ../string/struct.String.html#method.from_utf8
+/// [string]: ../../std/string/struct.String.html#method.from_utf8
 ///
 /// Because you can stack-allocate a `[u8; N]`, and you can take a `&[u8]` of
 /// it, this function is one way to have a stack-allocated string. There is
@@ -240,7 +239,7 @@ impl Utf8Error {
 /// ```
 #[stable(feature = "rust1", since = "1.0.0")]
 pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
-    try!(run_utf8_validation(v));
+    run_utf8_validation(v)?;
     Ok(unsafe { from_utf8_unchecked(v) })
 }
 
@@ -322,8 +321,8 @@ Section: Iterators
 ///
 /// Created with the method [`chars()`].
 ///
-/// [`chars()`]: ../primitive.str.html#method.chars
-#[derive(Clone)]
+/// [`chars()`]: ../../std/primitive.str.html#method.chars
+#[derive(Clone, Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Chars<'a> {
     iter: slice::Iter<'a, u8>
@@ -468,7 +467,7 @@ impl<'a> Chars<'a> {
 }
 
 /// Iterator for a string's characters and their byte offsets.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct CharIndices<'a> {
     front_offset: usize,
@@ -531,9 +530,9 @@ impl<'a> CharIndices<'a> {
 ///
 /// Created with the method [`bytes()`].
 ///
-/// [`bytes()`]: ../primitive.str.html#method.bytes
+/// [`bytes()`]: ../../std/primitive.str.html#method.bytes
 #[stable(feature = "rust1", since = "1.0.0")]
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub struct Bytes<'a>(Cloned<slice::Iter<'a, u8>>);
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -662,6 +661,17 @@ macro_rules! generate_pattern_iterators {
         $(#[$common_stability_attribute])*
         pub struct $forward_iterator<'a, P: Pattern<'a>>($internal_iterator<'a, P>);
 
+        $(#[$common_stability_attribute])*
+        impl<'a, P: Pattern<'a>> fmt::Debug for $forward_iterator<'a, P>
+            where P::Searcher: fmt::Debug
+        {
+            fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+                f.debug_tuple(stringify!($forward_iterator))
+                    .field(&self.0)
+                    .finish()
+            }
+        }
+
         $(#[$common_stability_attribute])*
         impl<'a, P: Pattern<'a>> Iterator for $forward_iterator<'a, P> {
             type Item = $iterty;
@@ -685,6 +695,17 @@ macro_rules! generate_pattern_iterators {
         $(#[$common_stability_attribute])*
         pub struct $reverse_iterator<'a, P: Pattern<'a>>($internal_iterator<'a, P>);
 
+        $(#[$common_stability_attribute])*
+        impl<'a, P: Pattern<'a>> fmt::Debug for $reverse_iterator<'a, P>
+            where P::Searcher: fmt::Debug
+        {
+            fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+                f.debug_tuple(stringify!($reverse_iterator))
+                    .field(&self.0)
+                    .finish()
+            }
+        }
+
         $(#[$common_stability_attribute])*
         impl<'a, P: Pattern<'a>> Iterator for $reverse_iterator<'a, P>
             where P::Searcher: ReverseSearcher<'a>
@@ -746,6 +767,7 @@ derive_pattern_clone!{
     clone SplitInternal
     with |s| SplitInternal { matcher: s.matcher.clone(), ..*s }
 }
+
 struct SplitInternal<'a, P: Pattern<'a>> {
     start: usize,
     end: usize,
@@ -754,6 +776,18 @@ struct SplitInternal<'a, P: Pattern<'a>> {
     finished: bool,
 }
 
+impl<'a, P: Pattern<'a>> fmt::Debug for SplitInternal<'a, P> where P::Searcher: fmt::Debug {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SplitInternal")
+            .field("start", &self.start)
+            .field("end", &self.end)
+            .field("matcher", &self.matcher)
+            .field("allow_trailing_empty", &self.allow_trailing_empty)
+            .field("finished", &self.finished)
+            .finish()
+    }
+}
+
 impl<'a, P: Pattern<'a>> SplitInternal<'a, P> {
     #[inline]
     fn get_end(&mut self) -> Option<&'a str> {
@@ -816,12 +850,12 @@ generate_pattern_iterators! {
     forward:
         /// Created with the method [`split()`].
         ///
-        /// [`split()`]: ../primitive.str.html#method.split
+        /// [`split()`]: ../../std/primitive.str.html#method.split
         struct Split;
     reverse:
         /// Created with the method [`rsplit()`].
         ///
-        /// [`rsplit()`]: ../primitive.str.html#method.rsplit
+        /// [`rsplit()`]: ../../std/primitive.str.html#method.rsplit
         struct RSplit;
     stability:
         #[stable(feature = "rust1", since = "1.0.0")]
@@ -834,12 +868,12 @@ generate_pattern_iterators! {
     forward:
         /// Created with the method [`split_terminator()`].
         ///
-        /// [`split_terminator()`]: ../primitive.str.html#method.split_terminator
+        /// [`split_terminator()`]: ../../std/primitive.str.html#method.split_terminator
         struct SplitTerminator;
     reverse:
         /// Created with the method [`rsplit_terminator()`].
         ///
-        /// [`rsplit_terminator()`]: ../primitive.str.html#method.rsplit_terminator
+        /// [`rsplit_terminator()`]: ../../std/primitive.str.html#method.rsplit_terminator
         struct RSplitTerminator;
     stability:
         #[stable(feature = "rust1", since = "1.0.0")]
@@ -852,12 +886,22 @@ derive_pattern_clone!{
     clone SplitNInternal
     with |s| SplitNInternal { iter: s.iter.clone(), ..*s }
 }
+
 struct SplitNInternal<'a, P: Pattern<'a>> {
     iter: SplitInternal<'a, P>,
     /// The number of splits remaining
     count: usize,
 }
 
+impl<'a, P: Pattern<'a>> fmt::Debug for SplitNInternal<'a, P> where P::Searcher: fmt::Debug {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SplitNInternal")
+            .field("iter", &self.iter)
+            .field("count", &self.count)
+            .finish()
+    }
+}
+
 impl<'a, P: Pattern<'a>> SplitNInternal<'a, P> {
     #[inline]
     fn next(&mut self) -> Option<&'a str> {
@@ -884,12 +928,12 @@ generate_pattern_iterators! {
     forward:
         /// Created with the method [`splitn()`].
         ///
-        /// [`splitn()`]: ../primitive.str.html#method.splitn
+        /// [`splitn()`]: ../../std/primitive.str.html#method.splitn
         struct SplitN;
     reverse:
         /// Created with the method [`rsplitn()`].
         ///
-        /// [`rsplitn()`]: ../primitive.str.html#method.rsplitn
+        /// [`rsplitn()`]: ../../std/primitive.str.html#method.rsplitn
         struct RSplitN;
     stability:
         #[stable(feature = "rust1", since = "1.0.0")]
@@ -902,8 +946,17 @@ derive_pattern_clone!{
     clone MatchIndicesInternal
     with |s| MatchIndicesInternal(s.0.clone())
 }
+
 struct MatchIndicesInternal<'a, P: Pattern<'a>>(P::Searcher);
 
+impl<'a, P: Pattern<'a>> fmt::Debug for MatchIndicesInternal<'a, P> where P::Searcher: fmt::Debug {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_tuple("MatchIndicesInternal")
+            .field(&self.0)
+            .finish()
+    }
+}
+
 impl<'a, P: Pattern<'a>> MatchIndicesInternal<'a, P> {
     #[inline]
     fn next(&mut self) -> Option<(usize, &'a str)> {
@@ -926,12 +979,12 @@ generate_pattern_iterators! {
     forward:
         /// Created with the method [`match_indices()`].
         ///
-        /// [`match_indices()`]: ../primitive.str.html#method.match_indices
+        /// [`match_indices()`]: ../../std/primitive.str.html#method.match_indices
         struct MatchIndices;
     reverse:
         /// Created with the method [`rmatch_indices()`].
         ///
-        /// [`rmatch_indices()`]: ../primitive.str.html#method.rmatch_indices
+        /// [`rmatch_indices()`]: ../../std/primitive.str.html#method.rmatch_indices
         struct RMatchIndices;
     stability:
         #[stable(feature = "str_match_indices", since = "1.5.0")]
@@ -944,8 +997,17 @@ derive_pattern_clone!{
     clone MatchesInternal
     with |s| MatchesInternal(s.0.clone())
 }
+
 struct MatchesInternal<'a, P: Pattern<'a>>(P::Searcher);
 
+impl<'a, P: Pattern<'a>> fmt::Debug for MatchesInternal<'a, P> where P::Searcher: fmt::Debug {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_tuple("MatchesInternal")
+            .field(&self.0)
+            .finish()
+    }
+}
+
 impl<'a, P: Pattern<'a>> MatchesInternal<'a, P> {
     #[inline]
     fn next(&mut self) -> Option<&'a str> {
@@ -970,12 +1032,12 @@ generate_pattern_iterators! {
     forward:
         /// Created with the method [`matches()`].
         ///
-        /// [`matches()`]: ../primitive.str.html#method.matches
+        /// [`matches()`]: ../../std/primitive.str.html#method.matches
         struct Matches;
     reverse:
         /// Created with the method [`rmatches()`].
         ///
-        /// [`rmatches()`]: ../primitive.str.html#method.rmatches
+        /// [`rmatches()`]: ../../std/primitive.str.html#method.rmatches
         struct RMatches;
     stability:
         #[stable(feature = "str_matches", since = "1.2.0")]
@@ -986,9 +1048,9 @@ generate_pattern_iterators! {
 
 /// Created with the method [`lines()`].
 ///
-/// [`lines()`]: ../primitive.str.html#method.lines
+/// [`lines()`]: ../../std/primitive.str.html#method.lines
 #[stable(feature = "rust1", since = "1.0.0")]
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub struct Lines<'a>(Map<SplitTerminator<'a, char>, LinesAnyMap>);
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1016,10 +1078,10 @@ impl<'a> DoubleEndedIterator for Lines<'a> {
 
 /// Created with the method [`lines_any()`].
 ///
-/// [`lines_any()`]: ../primitive.str.html#method.lines_any
+/// [`lines_any()`]: ../../std/primitive.str.html#method.lines_any
 #[stable(feature = "rust1", since = "1.0.0")]
 #[rustc_deprecated(since = "1.4.0", reason = "use lines()/Lines instead now")]
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 #[allow(deprecated)]
 pub struct LinesAny<'a>(Lines<'a>);
 
@@ -1087,18 +1149,7 @@ Section: Comparing strings
 #[lang = "str_eq"]
 #[inline]
 fn eq_slice(a: &str, b: &str) -> bool {
-    a.len() == b.len() && unsafe { cmp_slice(a, b, a.len()) == 0 }
-}
-
-/// Bytewise slice comparison.
-/// NOTE: This uses the system's memcmp, which is currently dramatically
-/// faster than comparing each byte in a loop.
-#[inline]
-unsafe fn cmp_slice(a: &str, b: &str, len: usize) -> i32 {
-    // NOTE: In theory n should be libc::size_t and not usize, but libc is not available here
-    #[allow(improper_ctypes)]
-    extern { fn memcmp(s1: *const i8, s2: *const i8, n: usize) -> i32; }
-    memcmp(a.as_ptr() as *const i8, b.as_ptr() as *const i8, len)
+    a.as_bytes() == b.as_bytes()
 }
 
 /*
@@ -1244,7 +1295,7 @@ static UTF8_CHAR_WIDTH: [u8; 256] = [
 /// Struct that contains a `char` and the index of the first byte of
 /// the next `char` in a string.  This can be used as a data structure
 /// for iterating over the UTF-8 bytes of a string.
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug)]
 #[unstable(feature = "str_char",
            reason = "existence of this struct is uncertain as it is frequently \
                      able to be replaced with char.len_utf8() and/or \
@@ -1267,8 +1318,7 @@ Section: Trait implementations
 */
 
 mod traits {
-    use cmp::{self, Ordering, Ord, PartialEq, PartialOrd, Eq};
-    use cmp::Ordering::{Less, Greater};
+    use cmp::{Ord, Ordering, PartialEq, PartialOrd, Eq};
     use iter::Iterator;
     use option::Option;
     use option::Option::Some;
@@ -1279,16 +1329,7 @@ mod traits {
     impl Ord for str {
         #[inline]
         fn cmp(&self, other: &str) -> Ordering {
-            let cmp = unsafe {
-                super::cmp_slice(self, other, cmp::min(self.len(), other.len()))
-            };
-            if cmp == 0 {
-                self.len().cmp(&other.len())
-            } else if cmp < 0 {
-                Less
-            } else {
-                Greater
-            }
+            self.as_bytes().cmp(other.as_bytes())
         }
     }
 
@@ -1313,13 +1354,19 @@ mod traits {
         }
     }
 
+    /// Implements substring slicing with syntax `&self[begin .. end]`.
+    ///
     /// Returns a slice of the given string from the byte range
     /// [`begin`..`end`).
     ///
     /// This operation is `O(1)`.
     ///
-    /// Panics when `begin` and `end` do not point to valid characters
-    /// or point beyond the last character of the string.
+    /// # Panics
+    ///
+    /// Panics if `begin` or `end` does not point to the starting
+    /// byte offset of a character (as defined by `is_char_boundary`).
+    /// Requires that `begin <= end` and `end <= len` where `len` is the
+    /// length of the string.
     ///
     /// # Examples
     ///
@@ -1355,8 +1402,20 @@ mod traits {
         }
     }
 
+    /// Implements mutable substring slicing with syntax
+    /// `&mut self[begin .. end]`.
+    ///
     /// Returns a mutable slice of the given string from the byte range
     /// [`begin`..`end`).
+    ///
+    /// This operation is `O(1)`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `begin` or `end` does not point to the starting
+    /// byte offset of a character (as defined by `is_char_boundary`).
+    /// Requires that `begin <= end` and `end <= len` where `len` is the
+    /// length of the string.
     #[stable(feature = "derefmut_for_string", since = "1.2.0")]
     impl ops::IndexMut<ops::Range<usize>> for str {
         #[inline]
@@ -1372,13 +1431,12 @@ mod traits {
         }
     }
 
-    /// Returns a slice of the string from the beginning to byte
-    /// `end`.
+    /// Implements substring slicing with syntax `&self[.. end]`.
     ///
-    /// Equivalent to `self[0 .. end]`.
+    /// Returns a slice of the string from the beginning to byte offset
+    /// `end`.
     ///
-    /// Panics when `end` does not point to a valid character, or is
-    /// out of bounds.
+    /// Equivalent to `&self[0 .. end]`.
     #[stable(feature = "rust1", since = "1.0.0")]
     impl ops::Index<ops::RangeTo<usize>> for str {
         type Output = str;
@@ -1394,8 +1452,12 @@ mod traits {
         }
     }
 
-    /// Returns a mutable slice of the string from the beginning to byte
+    /// Implements mutable substring slicing with syntax `&mut self[.. end]`.
+    ///
+    /// Returns a mutable slice of the string from the beginning to byte offset
     /// `end`.
+    ///
+    /// Equivalent to `&mut self[0 .. end]`.
     #[stable(feature = "derefmut_for_string", since = "1.2.0")]
     impl ops::IndexMut<ops::RangeTo<usize>> for str {
         #[inline]
@@ -1409,12 +1471,12 @@ mod traits {
         }
     }
 
-    /// Returns a slice of the string from `begin` to its end.
+    /// Implements substring slicing with syntax `&self[begin ..]`.
     ///
-    /// Equivalent to `self[begin .. self.len()]`.
+    /// Returns a slice of the string from byte offset `begin`
+    /// to the end of the string.
     ///
-    /// Panics when `begin` does not point to a valid character, or is
-    /// out of bounds.
+    /// Equivalent to `&self[begin .. len]`.
     #[stable(feature = "rust1", since = "1.0.0")]
     impl ops::Index<ops::RangeFrom<usize>> for str {
         type Output = str;
@@ -1430,7 +1492,12 @@ mod traits {
         }
     }
 
-    /// Returns a slice of the string from `begin` to its end.
+    /// Implements mutable substring slicing with syntax `&mut self[begin ..]`.
+    ///
+    /// Returns a mutable slice of the string from byte offset `begin`
+    /// to the end of the string.
+    ///
+    /// Equivalent to `&mut self[begin .. len]`.
     #[stable(feature = "derefmut_for_string", since = "1.2.0")]
     impl ops::IndexMut<ops::RangeFrom<usize>> for str {
         #[inline]
@@ -1445,6 +1512,12 @@ mod traits {
         }
     }
 
+    /// Implements substring slicing with syntax `&self[..]`.
+    ///
+    /// Returns a slice of the whole string. This operation can
+    /// never panic.
+    ///
+    /// Equivalent to `&self[0 .. len]`.
     #[stable(feature = "rust1", since = "1.0.0")]
     impl ops::Index<ops::RangeFull> for str {
         type Output = str;
@@ -1455,6 +1528,12 @@ mod traits {
         }
     }
 
+    /// Implements mutable substring slicing with syntax `&mut self[..]`.
+    ///
+    /// Returns a mutable slice of the whole string. This operation can
+    /// never panic.
+    ///
+    /// Equivalent to `&mut self[0 .. len]`.
     #[stable(feature = "derefmut_for_string", since = "1.2.0")]
     impl ops::IndexMut<ops::RangeFull> for str {
         #[inline]
@@ -1462,6 +1541,60 @@ mod traits {
             self
         }
     }
+
+    #[unstable(feature = "inclusive_range",
+               reason = "recently added, follows RFC",
+               issue = "28237")]
+    impl ops::Index<ops::RangeInclusive<usize>> for str {
+        type Output = str;
+
+        #[inline]
+        fn index(&self, index: ops::RangeInclusive<usize>) -> &str {
+            match index {
+                ops::RangeInclusive::Empty { .. } => "",
+                ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() =>
+                    panic!("attempted to index slice up to maximum usize"),
+                ops::RangeInclusive::NonEmpty { start, end } =>
+                    self.index(start .. end+1)
+            }
+        }
+    }
+    #[unstable(feature = "inclusive_range",
+               reason = "recently added, follows RFC",
+               issue = "28237")]
+    impl ops::Index<ops::RangeToInclusive<usize>> for str {
+        type Output = str;
+
+        #[inline]
+        fn index(&self, index: ops::RangeToInclusive<usize>) -> &str {
+            self.index(0...index.end)
+        }
+    }
+
+    #[unstable(feature = "inclusive_range",
+               reason = "recently added, follows RFC",
+               issue = "28237")]
+    impl ops::IndexMut<ops::RangeInclusive<usize>> for str {
+        #[inline]
+        fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str {
+            match index {
+                ops::RangeInclusive::Empty { .. } => &mut self[0..0], // `&mut ""` doesn't work
+                ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() =>
+                    panic!("attempted to index str up to maximum usize"),
+                    ops::RangeInclusive::NonEmpty { start, end } =>
+                        self.index_mut(start .. end+1)
+            }
+        }
+    }
+    #[unstable(feature = "inclusive_range",
+               reason = "recently added, follows RFC",
+               issue = "28237")]
+    impl ops::IndexMut<ops::RangeToInclusive<usize>> for str {
+        #[inline]
+        fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str {
+            self.index_mut(0...index.end)
+        }
+    }
 }
 
 /// Methods for string slices
@@ -1469,7 +1602,7 @@ mod traits {
 #[doc(hidden)]
 #[unstable(feature = "core_str_ext",
            reason = "stable interface provided by `impl str` in later crates",
-           issue = "27701")]
+           issue = "32110")]
 pub trait StrExt {
     // NB there are no docs here are they're all located on the StrExt trait in
     // libcollections, not here.
@@ -1530,24 +1663,23 @@ pub trait StrExt {
     #[stable(feature = "core", since = "1.6.0")]
     fn trim_right_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str
         where P::Searcher: ReverseSearcher<'a>;
-    #[unstable(feature = "str_char",
-               reason = "it is unclear whether this method pulls its weight \
-                         with the existence of the char_indices iterator or \
-                         this method may want to be replaced with checked \
-                         slicing",
-               issue = "27754")]
+    #[stable(feature = "is_char_boundary", since = "1.9.0")]
     fn is_char_boundary(&self, index: usize) -> bool;
     #[unstable(feature = "str_char",
                reason = "often replaced by char_indices, this method may \
                          be removed in favor of just char_at() or eventually \
                          removed altogether",
                issue = "27754")]
+    #[rustc_deprecated(reason = "use slicing plus chars() plus len_utf8",
+                       since = "1.9.0")]
     fn char_range_at(&self, start: usize) -> CharRange;
     #[unstable(feature = "str_char",
                reason = "often replaced by char_indices, this method may \
                          be removed in favor of just char_at_reverse() or \
                          eventually removed altogether",
                issue = "27754")]
+    #[rustc_deprecated(reason = "use slicing plus chars().rev() plus len_utf8",
+                       since = "1.9.0")]
     fn char_range_at_reverse(&self, start: usize) -> CharRange;
     #[unstable(feature = "str_char",
                reason = "frequently replaced by the chars() iterator, this \
@@ -1556,12 +1688,16 @@ pub trait StrExt {
                          iterators or by getting the first char from a \
                          subslice",
                issue = "27754")]
+    #[rustc_deprecated(reason = "use slicing plus chars()",
+                       since = "1.9.0")]
     fn char_at(&self, i: usize) -> char;
     #[unstable(feature = "str_char",
                reason = "see char_at for more details, but reverse semantics \
                          are also somewhat unclear, especially with which \
                          cases generate panics",
                issue = "27754")]
+    #[rustc_deprecated(reason = "use slicing plus chars().rev()",
+                       since = "1.9.0")]
     fn char_at_reverse(&self, i: usize) -> char;
     #[stable(feature = "core", since = "1.6.0")]
     fn as_bytes(&self) -> &[u8];
@@ -1580,6 +1716,8 @@ pub trait StrExt {
                          may not be warranted with the existence of the chars \
                          and/or char_indices iterators",
                issue = "27754")]
+    #[rustc_deprecated(reason = "use chars() plus Chars::as_str",
+                       since = "1.9.0")]
     fn slice_shift_char(&self) -> Option<(char, &str)>;
     #[stable(feature = "core", since = "1.6.0")]
     fn as_ptr(&self) -> *const u8;
@@ -1591,12 +1729,30 @@ pub trait StrExt {
     fn parse<T: FromStr>(&self) -> Result<T, T::Err>;
 }
 
+// truncate `&str` to length at most equal to `max`
+// return `true` if it were truncated, and the new str.
+fn truncate_to_char_boundary(s: &str, mut max: usize) -> (bool, &str) {
+    if max >= s.len() {
+        (false, s)
+    } else {
+        while !s.is_char_boundary(max) {
+            max -= 1;
+        }
+        (true, &s[..max])
+    }
+}
+
 #[inline(never)]
 #[cold]
 fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! {
-    assert!(begin <= end);
-    panic!("index {} and/or {} in `{}` do not lie on character boundary",
-          begin, end, s);
+    const MAX_DISPLAY_LENGTH: usize = 256;
+    let (truncated, s) = truncate_to_char_boundary(s, MAX_DISPLAY_LENGTH);
+    let ellipsis = if truncated { "[...]" } else { "" };
+
+    assert!(begin <= end, "begin <= end ({} <= {}) when slicing `{}`{}",
+            begin, end, s, ellipsis);
+    panic!("index {} and/or {} in `{}`{} do not lie on character boundary",
+          begin, end, s, ellipsis);
 }
 
 #[stable(feature = "core", since = "1.6.0")]
@@ -1705,18 +1861,16 @@ impl StrExt for str {
 
     #[inline]
     unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str {
-        mem::transmute(Slice {
-            data: self.as_ptr().offset(begin as isize),
-            len: end - begin,
-        })
+        let ptr = self.as_ptr().offset(begin as isize);
+        let len = end - begin;
+        from_utf8_unchecked(slice::from_raw_parts(ptr, len))
     }
 
     #[inline]
     unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str {
-        mem::transmute(Slice {
-            data: self.as_ptr().offset(begin as isize),
-            len: end - begin,
-        })
+        let ptr = self.as_ptr().offset(begin as isize);
+        let len = end - begin;
+        mem::transmute(slice::from_raw_parts_mut(ptr as *mut u8, len))
     }
 
     #[inline]
@@ -1782,7 +1936,10 @@ impl StrExt for str {
 
     #[inline]
     fn is_char_boundary(&self, index: usize) -> bool {
-        if index == self.len() { return true; }
+        // 0 and len are always ok.
+        // Test for 0 explicitly so that it can optimize out the check
+        // easily and skip reading string data for that case.
+        if index == 0 || index == self.len() { return true; }
         match self.as_bytes().get(index) {
             None => false,
             Some(&b) => b < 128 || b >= 192,
@@ -1827,11 +1984,13 @@ impl StrExt for str {
     }
 
     #[inline]
+    #[allow(deprecated)]
     fn char_at(&self, i: usize) -> char {
         self.char_range_at(i).ch
     }
 
     #[inline]
+    #[allow(deprecated)]
     fn char_at_reverse(&self, i: usize) -> char {
         self.char_range_at_reverse(i).ch
     }
@@ -1855,6 +2014,7 @@ impl StrExt for str {
         self.find(pat)
     }
 
+    #[inline]
     fn split_at(&self, mid: usize) -> (&str, &str) {
         // is_char_boundary checks that the index is in [0, .len()]
         if self.is_char_boundary(mid) {
@@ -1882,6 +2042,7 @@ impl StrExt for str {
     }
 
     #[inline]
+    #[allow(deprecated)]
     fn slice_shift_char(&self) -> Option<(char, &str)> {
         if self.is_empty() {
             None
@@ -1894,11 +2055,13 @@ impl StrExt for str {
 
     #[inline]
     fn as_ptr(&self) -> *const u8 {
-        self.repr().data
+        self as *const str as *const u8
     }
 
     #[inline]
-    fn len(&self) -> usize { self.repr().len }
+    fn len(&self) -> usize {
+        self.as_bytes().len()
+    }
 
     #[inline]
     fn is_empty(&self) -> bool { self.len() == 0 }
index 29130100e996fe5268c28c173de8aff97e74ae7f..b803539e12b1a2fd6d91a26d56fb854851168340 100644 (file)
@@ -20,6 +20,7 @@
 use prelude::v1::*;
 
 use cmp;
+use fmt;
 use usize;
 
 // Pattern
@@ -275,7 +276,7 @@ impl<'a> CharEq for &'a [char] {
 
 struct CharEqPattern<C: CharEq>(C);
 
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 struct CharEqSearcher<'a, C: CharEq> {
     char_eq: C,
     haystack: &'a str,
@@ -415,7 +416,7 @@ macro_rules! searcher_methods {
 /////////////////////////////////////////////////////////////////////////////
 
 /// Associated type for `<char as Pattern<'a>>::Searcher`.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub struct CharSearcher<'a>(<CharEqPattern<char> as Pattern<'a>>::Searcher);
 
 unsafe impl<'a> Searcher<'a> for CharSearcher<'a> {
@@ -440,7 +441,7 @@ impl<'a> Pattern<'a> for char {
 // Todo: Change / Remove due to ambiguity in meaning.
 
 /// Associated type for `<&[char] as Pattern<'a>>::Searcher`.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub struct CharSliceSearcher<'a, 'b>(<CharEqPattern<&'b [char]> as Pattern<'a>>::Searcher);
 
 unsafe impl<'a, 'b> Searcher<'a> for CharSliceSearcher<'a, 'b> {
@@ -467,6 +468,17 @@ impl<'a, 'b> Pattern<'a> for &'b [char] {
 pub struct CharPredicateSearcher<'a, F>(<CharEqPattern<F> as Pattern<'a>>::Searcher)
     where F: FnMut(char) -> bool;
 
+impl<'a, F> fmt::Debug for CharPredicateSearcher<'a, F>
+    where F: FnMut(char) -> bool
+{
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("CharPredicateSearcher")
+            .field("haystack", &self.0.haystack)
+            .field("char_indices", &self.0.char_indices)
+            .field("ascii_only", &self.0.ascii_only)
+            .finish()
+    }
+}
 unsafe impl<'a, F> Searcher<'a> for CharPredicateSearcher<'a, F>
     where F: FnMut(char) -> bool
 {
@@ -492,7 +504,7 @@ impl<'a, F> Pattern<'a> for F where F: FnMut(char) -> bool {
 /////////////////////////////////////////////////////////////////////////////
 
 /// Delegates to the `&str` impl.
-impl<'a, 'b> Pattern<'a> for &'b &'b str {
+impl<'a, 'b, 'c> Pattern<'a> for &'c &'b str {
     pattern_methods!(StrSearcher<'a, 'b>, |&s| s, |s| s);
 }
 
index 0c831bff763d0dc22ccb3ea1ecacb9a379ae9dbd..483c3822df6ca345532f352ff99e358897dd8c6a 100644 (file)
@@ -77,6 +77,8 @@ use marker::{Send, Sync};
 use intrinsics;
 use cell::UnsafeCell;
 
+use result::Result::{self, Ok, Err};
+
 use default::Default;
 use fmt;
 
@@ -160,7 +162,7 @@ unsafe impl<T> Sync for AtomicPtr<T> {}
 /// Rust's memory orderings are [the same as
 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
 #[stable(feature = "rust1", since = "1.0.0")]
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug)]
 pub enum Ordering {
     /// No ordering constraints, only atomic operations. Corresponds to LLVM's
     /// `Monotonic` ordering.
@@ -311,18 +313,21 @@ impl AtomicBool {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
-        self.compare_exchange(current, new, order, strongest_failure_ordering(order))
+        match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+            Ok(x) => x,
+            Err(x) => x,
+        }
     }
 
     /// Stores a value into the `bool` if the current value is the same as the `current` value.
     ///
-    /// The return value is always the previous value. If it is equal to `current`, then the value
-    /// was updated.
+    /// The return value is a result indicating whether the new value was written and containing
+    /// the previous value. On success this value is guaranteed to be equal to `new`.
     ///
     /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
     /// operation. The first describes the required ordering if the operation succeeds while the
     /// second describes the required ordering when the operation fails. The failure ordering can't
-    /// be `Acquire` or `AcqRel` and must be equivalent or weaker than the success ordering.
+    /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
     ///
     /// # Examples
     ///
@@ -336,13 +341,13 @@ impl AtomicBool {
     ///                                       false,
     ///                                       Ordering::Acquire,
     ///                                       Ordering::Relaxed),
-    ///            true);
+    ///            Ok(true));
     /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
     ///
     /// assert_eq!(some_bool.compare_exchange(true, true,
     ///                                       Ordering::SeqCst,
     ///                                       Ordering::Acquire),
-    ///            false);
+    ///            Err(false));
     /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
     /// ```
     #[inline]
@@ -351,24 +356,27 @@ impl AtomicBool {
                             current: bool,
                             new: bool,
                             success: Ordering,
-                            failure: Ordering) -> bool {
+                            failure: Ordering) -> Result<bool, bool> {
         let current = if current { UINT_TRUE } else { 0 };
         let new = if new { UINT_TRUE } else { 0 };
 
-        unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) > 0 }
+        match unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } {
+            Ok(x) => Ok(x > 0),
+            Err(x) => Err(x > 0),
+        }
     }
 
     /// Stores a value into the `bool` if the current value is the same as the `current` value.
     ///
     /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
     /// comparison succeeds, which can result in more efficient code on some platforms. The
-    /// returned value is a tuple of the existing value and a flag indicating whether the
-    /// new value was written.
+    /// return value is a result indicating whether the new value was written and containing the
+    /// previous value.
     ///
     /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
     /// ordering of this operation. The first describes the required ordering if the operation
     /// succeeds while the second describes the required ordering when the operation fails. The
-    /// failure ordering can't be `Acquire` or `AcqRel` and must be equivalent or weaker than the
+    /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
     /// success ordering.
     ///
     /// # Examples
@@ -382,13 +390,9 @@ impl AtomicBool {
     /// let new = true;
     /// let mut old = val.load(Ordering::Relaxed);
     /// loop {
-    ///     let result = val.compare_exchange_weak(old, new,
-    ///                                            Ordering::SeqCst,
-    ///                                            Ordering::Relaxed);
-    ///     if result.1 {
-    ///         break;
-    ///     } else {
-    ///         old = result.0;
+    ///     match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+    ///         Ok(_) => break,
+    ///         Err(x) => old = x,
     ///     }
     /// }
     /// ```
@@ -398,14 +402,15 @@ impl AtomicBool {
                                  current: bool,
                                  new: bool,
                                  success: Ordering,
-                                 failure: Ordering) -> (bool, bool) {
+                                 failure: Ordering) -> Result<bool, bool> {
         let current = if current { UINT_TRUE } else { 0 };
         let new = if new { UINT_TRUE } else { 0 };
 
-        let result = unsafe {
-            atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
-        };
-        (result.0 > 0, result.1)
+        match unsafe { atomic_compare_exchange_weak(self.v.get(), current, new,
+                                                    success, failure) } {
+            Ok(x) => Ok(x > 0),
+            Err(x) => Err(x > 0),
+        }
     }
 
     /// Logical "and" with a boolean value.
@@ -644,18 +649,21 @@ impl AtomicIsize {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize {
-        self.compare_exchange(current, new, order, strongest_failure_ordering(order))
+        match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+            Ok(x) => x,
+            Err(x) => x,
+        }
     }
 
     /// Stores a value into the `isize` if the current value is the same as the `current` value.
     ///
-    /// The return value is always the previous value. If it is equal to `current`, then the value
-    /// was updated.
+    /// The return value is a result indicating whether the new value was written and containing
+    /// the previous value. On success this value is guaranteed to be equal to `new`.
     ///
     /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
     /// operation. The first describes the required ordering if the operation succeeds while the
     /// second describes the required ordering when the operation fails. The failure ordering can't
-    /// be `Acquire` or `AcqRel` and must be equivalent or weaker than the success ordering.
+    /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
     ///
     /// # Examples
     ///
@@ -668,13 +676,13 @@ impl AtomicIsize {
     /// assert_eq!(some_isize.compare_exchange(5, 10,
     ///                                        Ordering::Acquire,
     ///                                        Ordering::Relaxed),
-    ///            5);
+    ///            Ok(5));
     /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
     ///
     /// assert_eq!(some_isize.compare_exchange(6, 12,
     ///                                        Ordering::SeqCst,
     ///                                        Ordering::Acquire),
-    ///            10);
+    ///            Err(10));
     /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
     /// ```
     #[inline]
@@ -683,21 +691,21 @@ impl AtomicIsize {
                             current: isize,
                             new: isize,
                             success: Ordering,
-                            failure: Ordering) -> isize {
+                            failure: Ordering) -> Result<isize, isize> {
         unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
     }
 
-    /// Stores a value into the `isize if the current value is the same as the `current` value.
+    /// Stores a value into the `isize` if the current value is the same as the `current` value.
     ///
     /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
     /// comparison succeeds, which can result in more efficient code on some platforms. The
-    /// returned value is a tuple of the existing value and a flag indicating whether the
-    /// new value was written.
+    /// return value is a result indicating whether the new value was written and containing the
+    /// previous value.
     ///
     /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
     /// ordering of this operation. The first describes the required ordering if the operation
     /// succeeds while the second describes the required ordering when the operation fails. The
-    /// failure ordering can't be `Acquire` or `AcqRel` and must be equivalent or weaker than the
+    /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
     /// success ordering.
     ///
     /// # Examples
@@ -711,13 +719,9 @@ impl AtomicIsize {
     /// let mut old = val.load(Ordering::Relaxed);
     /// loop {
     ///     let new = old * 2;
-    ///     let result = val.compare_exchange_weak(old, new,
-    ///                                            Ordering::SeqCst,
-    ///                                            Ordering::Relaxed);
-    ///     if result.1 {
-    ///         break;
-    ///     } else {
-    ///         old = result.0;
+    ///     match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+    ///         Ok(_) => break,
+    ///         Err(x) => old = x,
     ///     }
     /// }
     /// ```
@@ -727,7 +731,7 @@ impl AtomicIsize {
                                  current: isize,
                                  new: isize,
                                  success: Ordering,
-                                 failure: Ordering) -> (isize, bool) {
+                                 failure: Ordering) -> Result<isize, isize> {
         unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
     }
 
@@ -921,18 +925,21 @@ impl AtomicUsize {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize {
-        self.compare_exchange(current, new, order, strongest_failure_ordering(order))
+        match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+            Ok(x) => x,
+            Err(x) => x,
+        }
     }
 
     /// Stores a value into the `usize` if the current value is the same as the `current` value.
     ///
-    /// The return value is always the previous value. If it is equal to `current`, then the value
-    /// was updated.
+    /// The return value is a result indicating whether the new value was written and containing
+    /// the previous value. On success this value is guaranteed to be equal to `new`.
     ///
     /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
     /// operation. The first describes the required ordering if the operation succeeds while the
     /// second describes the required ordering when the operation fails. The failure ordering can't
-    /// be `Acquire` or `AcqRel` and must be equivalent or weaker than the success ordering.
+    /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
     ///
     /// # Examples
     ///
@@ -945,13 +952,13 @@ impl AtomicUsize {
     /// assert_eq!(some_isize.compare_exchange(5, 10,
     ///                                        Ordering::Acquire,
     ///                                        Ordering::Relaxed),
-    ///            5);
+    ///            Ok(5));
     /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
     ///
     /// assert_eq!(some_isize.compare_exchange(6, 12,
     ///                                        Ordering::SeqCst,
     ///                                        Ordering::Acquire),
-    ///            10);
+    ///            Err(10));
     /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
     /// ```
     #[inline]
@@ -960,7 +967,7 @@ impl AtomicUsize {
                             current: usize,
                             new: usize,
                             success: Ordering,
-                            failure: Ordering) -> usize {
+                            failure: Ordering) -> Result<usize, usize> {
         unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
     }
 
@@ -968,13 +975,13 @@ impl AtomicUsize {
     ///
     /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
     /// comparison succeeds, which can result in more efficient code on some platforms. The
-    /// returned value is a tuple of the existing value and a flag indicating whether the
-    /// new value was written.
+    /// return value is a result indicating whether the new value was written and containing the
+    /// previous value.
     ///
     /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
     /// ordering of this operation. The first describes the required ordering if the operation
     /// succeeds while the second describes the required ordering when the operation fails. The
-    /// failure ordering can't be `Acquire` or `AcqRel` and must be equivalent or weaker than the
+    /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
     /// success ordering.
     ///
     /// # Examples
@@ -988,13 +995,9 @@ impl AtomicUsize {
     /// let mut old = val.load(Ordering::Relaxed);
     /// loop {
     ///     let new = old * 2;
-    ///     let result = val.compare_exchange_weak(old, new,
-    ///                                            Ordering::SeqCst,
-    ///                                            Ordering::Relaxed);
-    ///     if result.1 {
-    ///         break;
-    ///     } else {
-    ///         old = result.0;
+    ///     match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+    ///         Ok(_) => break,
+    ///         Err(x) => old = x,
     ///     }
     /// }
     /// ```
@@ -1004,7 +1007,7 @@ impl AtomicUsize {
                                  current: usize,
                                  new: usize,
                                  success: Ordering,
-                                 failure: Ordering) -> (usize, bool) {
+                                 failure: Ordering) -> Result<usize, usize> {
         unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
     }
 
@@ -1206,18 +1209,21 @@ impl<T> AtomicPtr<T> {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
-        self.compare_exchange(current, new, order, strongest_failure_ordering(order))
+        match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+            Ok(x) => x,
+            Err(x) => x,
+        }
     }
 
     /// Stores a value into the pointer if the current value is the same as the `current` value.
     ///
-    /// The return value is always the previous value. If it is equal to `current`, then the value
-    /// was updated.
+    /// The return value is a result indicating whether the new value was written and containing
+    /// the previous value. On success this value is guaranteed to be equal to `new`.
     ///
     /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
     /// operation. The first describes the required ordering if the operation succeeds while the
     /// second describes the required ordering when the operation fails. The failure ordering can't
-    /// be `Acquire` or `AcqRel` and must be equivalent or weaker than the success ordering.
+    /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
     ///
     /// # Examples
     ///
@@ -1240,10 +1246,17 @@ impl<T> AtomicPtr<T> {
                             current: *mut T,
                             new: *mut T,
                             success: Ordering,
-                            failure: Ordering) -> *mut T {
+                            failure: Ordering) -> Result<*mut T, *mut T> {
         unsafe {
-            atomic_compare_exchange(self.p.get() as *mut usize, current as usize,
-                                    new as usize, success, failure) as *mut T
+            let res = atomic_compare_exchange(self.p.get() as *mut usize,
+                                              current as usize,
+                                              new as usize,
+                                              success,
+                                              failure);
+            match res {
+                Ok(x) => Ok(x as *mut T),
+                Err(x) => Err(x as *mut T),
+            }
         }
     }
 
@@ -1251,13 +1264,13 @@ impl<T> AtomicPtr<T> {
     ///
     /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
     /// comparison succeeds, which can result in more efficient code on some platforms. The
-    /// returned value is a tuple of the existing value and a flag indicating whether the
-    /// new value was written.
+    /// return value is a result indicating whether the new value was written and containing the
+    /// previous value.
     ///
     /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
     /// ordering of this operation. The first describes the required ordering if the operation
     /// succeeds while the second describes the required ordering when the operation fails. The
-    /// failure ordering can't be `Acquire` or `AcqRel` and must be equivalent or weaker than the
+    /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
     /// success ordering.
     ///
     /// # Examples
@@ -1271,13 +1284,9 @@ impl<T> AtomicPtr<T> {
     /// let new = &mut 10;
     /// let mut old = some_ptr.load(Ordering::Relaxed);
     /// loop {
-    ///     let result = some_ptr.compare_exchange_weak(old, new,
-    ///                                                 Ordering::SeqCst,
-    ///                                                 Ordering::Relaxed);
-    ///     if result.1 {
-    ///         break;
-    ///     } else {
-    ///         old = result.0;
+    ///     match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+    ///         Ok(_) => break,
+    ///         Err(x) => old = x,
     ///     }
     /// }
     /// ```
@@ -1287,12 +1296,18 @@ impl<T> AtomicPtr<T> {
                                  current: *mut T,
                                  new: *mut T,
                                  success: Ordering,
-                                 failure: Ordering) -> (*mut T, bool) {
-        let result = unsafe {
-            atomic_compare_exchange_weak(self.p.get() as *mut usize, current as usize,
-                                         new as usize, success, failure)
-        };
-        (result.0 as *mut T, result.1)
+                                 failure: Ordering) -> Result<*mut T, *mut T> {
+        unsafe {
+            let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
+                                                   current as usize,
+                                                   new as usize,
+                                                   success,
+                                                   failure);
+            match res {
+                Ok(x) => Ok(x as *mut T),
+                Err(x) => Err(x as *mut T),
+            }
+        }
     }
 }
 
@@ -1365,13 +1380,13 @@ unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
 }
 
 #[inline]
-#[cfg(not(stage0))]
+#[cfg(any(not(stage0), cargobuild))]
 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
                                      old: T,
                                      new: T,
                                      success: Ordering,
-                                     failure: Ordering) -> T {
-    match (success, failure) {
+                                     failure: Ordering) -> Result<T, T> {
+    let (val, ok) = match (success, failure) {
         (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
         (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
         (AcqRel, Acquire)  => intrinsics::atomic_cxchg_acqrel(dst, old, new),
@@ -1381,36 +1396,47 @@ unsafe fn atomic_compare_exchange<T>(dst: *mut T,
         (AcqRel, Relaxed)  => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
         (SeqCst, Relaxed)  => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
         (SeqCst, Acquire)  => intrinsics::atomic_cxchg_failacq(dst, old, new),
-        (_, Release) => panic!("there is no such thing as an acquire/release failure ordering"),
-        (_, AcqRel) => panic!("there is no such thing as a release failure ordering"),
+        (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
+        (_, Release) => panic!("there is no such thing as a release failure ordering"),
         _ => panic!("a failure ordering can't be stronger than a success ordering"),
+    };
+    if ok {
+        Ok(val)
+    } else {
+        Err(val)
     }
 }
 
 #[inline]
-#[cfg(stage0)]
+#[cfg(all(stage0, not(cargobuild)))]
 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
                                      old: T,
                                      new: T,
                                      success: Ordering,
-                                     _: Ordering) -> T {
-    match success {
+                                     _: Ordering) -> Result<T, T>
+    where T: ::cmp::Eq + ::marker::Copy
+{
+    let val = match success {
         Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
         Release => intrinsics::atomic_cxchg_rel(dst, old, new),
         AcqRel  => intrinsics::atomic_cxchg_acqrel(dst, old, new),
         Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
         SeqCst  => intrinsics::atomic_cxchg(dst, old, new),
+    };
+    if val == old {
+        Ok(val)
+    } else {
+        Err(val)
     }
 }
 
 #[inline]
-#[cfg(not(stage0))]
 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
                                           old: T,
                                           new: T,
                                           success: Ordering,
-                                          failure: Ordering) -> (T, bool) {
-    match (success, failure) {
+                                          failure: Ordering) -> Result<T, T> {
+    let (val, ok) = match (success, failure) {
         (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
         (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
         (AcqRel, Acquire)  => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
@@ -1420,25 +1446,17 @@ unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
         (AcqRel, Relaxed)  => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
         (SeqCst, Relaxed)  => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
         (SeqCst, Acquire)  => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
-        (_, Release) => panic!("there is no such thing as an acquire/release failure ordering"),
-        (_, AcqRel) => panic!("there is no such thing as a release failure ordering"),
+        (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
+        (_, Release) => panic!("there is no such thing as a release failure ordering"),
         _ => panic!("a failure ordering can't be stronger than a success ordering"),
+    };
+    if ok {
+        Ok(val)
+    } else {
+        Err(val)
     }
 }
 
-#[inline]
-#[cfg(stage0)]
-unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
-                                          old: T,
-                                          new: T,
-                                          success: Ordering,
-                                          failure: Ordering) -> (T, bool)
-    where T: ::cmp::Eq + ::marker::Copy
-{
-    let result = atomic_compare_exchange(dst, old, new, success, failure);
-    (result, result == old)
-}
-
 #[inline]
 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
     match order {
index cafffb5266f911f8e6a210b7d2221376b2bbf8f2..c0b22274ee9d628f09c36b4b7081629844d102f5 100644 (file)
@@ -261,3 +261,23 @@ fn refcell_unsized() {
     let comp: &mut [i32] = &mut [4, 2, 5];
     assert_eq!(&*cell.borrow(), comp);
 }
+
+#[test]
+fn refcell_ref_coercion() {
+    let cell: RefCell<[i32; 3]> = RefCell::new([1, 2, 3]);
+    {
+        let mut cellref: RefMut<[i32; 3]> = cell.borrow_mut();
+        cellref[0] = 4;
+        let mut coerced: RefMut<[i32]> = cellref;
+        coerced[2] = 5;
+    }
+    {
+        let comp: &mut [i32] = &mut [4, 2, 5];
+        let cellref: Ref<[i32; 3]> = cell.borrow();
+        assert_eq!(&*cellref, comp);
+        let coerced: Ref<[i32]> = cellref;
+        assert_eq!(&*coerced, comp);
+    }
+}
+
+
index c1f3ea42ef4f768d9b21e968e7fec2181f9eefd5..41fd742c9e011f48094edd8b250b1857ebc3ed33 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use std::char;
+
 #[test]
 fn test_is_lowercase() {
     assert!('a'.is_lowercase());
@@ -175,9 +177,10 @@ fn test_escape_unicode() {
 #[test]
 fn test_encode_utf8() {
     fn check(input: char, expect: &[u8]) {
-        let mut buf = [0; 4];
-        let n = input.encode_utf8(&mut buf).unwrap_or(0);
-        assert_eq!(&buf[..n], expect);
+        assert_eq!(input.encode_utf8().as_slice(), expect);
+        for (a, b) in input.encode_utf8().zip(expect) {
+            assert_eq!(a, *b);
+        }
     }
 
     check('x', &[0x78]);
@@ -189,9 +192,10 @@ fn test_encode_utf8() {
 #[test]
 fn test_encode_utf16() {
     fn check(input: char, expect: &[u16]) {
-        let mut buf = [0; 2];
-        let n = input.encode_utf16(&mut buf).unwrap_or(0);
-        assert_eq!(&buf[..n], expect);
+        assert_eq!(input.encode_utf16().as_slice(), expect);
+        for (a, b) in input.encode_utf16().zip(expect) {
+            assert_eq!(a, *b);
+        }
     }
 
     check('x', &[0x0078]);
@@ -211,7 +215,10 @@ fn test_len_utf16() {
 #[test]
 fn test_decode_utf16() {
     fn check(s: &[u16], expected: &[Result<char, u16>]) {
-        assert_eq!(::std::char::decode_utf16(s.iter().cloned()).collect::<Vec<_>>(), expected);
+        let v = char::decode_utf16(s.iter().cloned())
+                     .map(|r| r.map_err(|e| e.unpaired_surrogate()))
+                     .collect::<Vec<_>>();
+        assert_eq!(v, expected);
     }
     check(&[0xD800, 0x41, 0x42], &[Err(0xD800), Ok('A'), Ok('B')]);
     check(&[0xD800, 0], &[Err(0xD800), Ok('\0')]);
index 885ee3f9c3be2e19ba39c658719e0a899b578f2b..e71e61bda5efdec258bf7a5d81f302d17b66ba38 100644 (file)
@@ -53,7 +53,7 @@ mod debug_struct {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_struct("Foo")
                     .field("bar", &true)
-                    .field("baz", &format_args!("{}/{}", 10i32, 20i32))
+                    .field("baz", &format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
@@ -75,7 +75,7 @@ mod debug_struct {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_struct("Foo")
                     .field("bar", &true)
-                    .field("baz", &format_args!("{}/{}", 10i32, 20i32))
+                    .field("baz", &format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
@@ -150,7 +150,7 @@ mod debug_tuple {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_tuple("Foo")
                     .field(&true)
-                    .field(&format_args!("{}/{}", 10i32, 20i32))
+                    .field(&format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
@@ -172,7 +172,7 @@ mod debug_tuple {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_tuple("Foo")
                     .field(&true)
-                    .field(&format_args!("{}/{}", 10i32, 20i32))
+                    .field(&format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
@@ -247,7 +247,7 @@ mod debug_map {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_map()
                     .entry(&"bar", &true)
-                    .entry(&10i32, &format_args!("{}/{}", 10i32, 20i32))
+                    .entry(&10, &format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
@@ -269,7 +269,7 @@ mod debug_map {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_map()
                     .entry(&"bar", &true)
-                    .entry(&10i32, &format_args!("{}/{}", 10i32, 20i32))
+                    .entry(&10, &format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
@@ -348,7 +348,7 @@ mod debug_set {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_set()
                     .entry(&true)
-                    .entry(&format_args!("{}/{}", 10i32, 20i32))
+                    .entry(&format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
@@ -370,7 +370,7 @@ mod debug_set {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_set()
                     .entry(&true)
-                    .entry(&format_args!("{}/{}", 10i32, 20i32))
+                    .entry(&format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
@@ -445,7 +445,7 @@ mod debug_list {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_list()
                     .entry(&true)
-                    .entry(&format_args!("{}/{}", 10i32, 20i32))
+                    .entry(&format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
@@ -467,7 +467,7 @@ mod debug_list {
             fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
                 fmt.debug_list()
                     .entry(&true)
-                    .entry(&format_args!("{}/{}", 10i32, 20i32))
+                    .entry(&format_args!("{}/{}", 10, 20))
                     .finish()
             }
         }
index 2d3c05ea4ab14232d327e5597eddc022a75793b2..4ddedd9100486ea8640a712a390310f89b7305a2 100644 (file)
@@ -150,107 +150,3 @@ fn test_format_int_twos_complement() {
     assert!(format!("{}", i32::MIN) == "-2147483648");
     assert!(format!("{}", i64::MIN) == "-9223372036854775808");
 }
-
-#[test]
-#[allow(deprecated)]
-fn test_format_radix() {
-    use core::fmt::radix;
-    assert!(format!("{:04}", radix(3, 2)) == "0011");
-    assert!(format!("{}", radix(55, 36)) == "1j");
-}
-
-#[test]
-#[should_panic]
-#[allow(deprecated)]
-fn test_radix_base_too_large() {
-    use core::fmt::radix;
-    let _ = radix(55, 37);
-}
-
-#[allow(deprecated)]
-mod u32 {
-    use test::Bencher;
-    use core::fmt::radix;
-    use std::__rand::{thread_rng, Rng};
-    use std::io::{Write, sink};
-
-    #[bench]
-    fn format_bin(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{:b}", rng.gen::<u32>()) })
-    }
-
-    #[bench]
-    fn format_oct(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{:o}", rng.gen::<u32>()) })
-    }
-
-    #[bench]
-    fn format_dec(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{}", rng.gen::<u32>()) })
-    }
-
-    #[bench]
-    fn format_hex(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{:x}", rng.gen::<u32>()) })
-    }
-
-    #[bench]
-    fn format_show(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{:?}", rng.gen::<u32>()) })
-    }
-
-    #[bench]
-    fn format_base_36(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{}", radix(rng.gen::<u32>(), 36)) })
-    }
-}
-
-#[allow(deprecated)]
-mod i32 {
-    use test::Bencher;
-    use core::fmt::radix;
-    use std::__rand::{thread_rng, Rng};
-    use std::io::{Write, sink};
-
-    #[bench]
-    fn format_bin(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{:b}", rng.gen::<i32>()) })
-    }
-
-    #[bench]
-    fn format_oct(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{:o}", rng.gen::<i32>()) })
-    }
-
-    #[bench]
-    fn format_dec(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{}", rng.gen::<i32>()) })
-    }
-
-    #[bench]
-    fn format_hex(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{:x}", rng.gen::<i32>()) })
-    }
-
-    #[bench]
-    fn format_show(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{:?}", rng.gen::<i32>()) })
-    }
-
-    #[bench]
-    fn format_base_36(b: &mut Bencher) {
-        let mut rng = thread_rng();
-        b.iter(|| { write!(&mut sink(), "{}", radix(rng.gen::<i32>(), 36)) })
-    }
-}
index da9062b8a92ec307e5dc06a38888037cb903327a..6c0cb03b5f775db269b4de142ab681b201ce7f5b 100644 (file)
@@ -303,6 +303,44 @@ fn test_iterator_skip() {
     assert_eq!(it.len(), 0);
 }
 
+#[test]
+fn test_iterator_skip_doubleended() {
+    let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+    let mut it = xs.iter().rev().skip(5);
+    assert_eq!(it.next(), Some(&15));
+    assert_eq!(it.by_ref().rev().next(), Some(&0));
+    assert_eq!(it.next(), Some(&13));
+    assert_eq!(it.by_ref().rev().next(), Some(&1));
+    assert_eq!(it.next(), Some(&5));
+    assert_eq!(it.by_ref().rev().next(), Some(&2));
+    assert_eq!(it.next(), Some(&3));
+    assert_eq!(it.next(), None);
+    let mut it = xs.iter().rev().skip(5).rev();
+    assert_eq!(it.next(), Some(&0));
+    assert_eq!(it.rev().next(), Some(&15));
+    let mut it_base = xs.iter();
+    {
+        let mut it = it_base.by_ref().skip(5).rev();
+        assert_eq!(it.next(), Some(&30));
+        assert_eq!(it.next(), Some(&20));
+        assert_eq!(it.next(), Some(&19));
+        assert_eq!(it.next(), Some(&17));
+        assert_eq!(it.next(), Some(&16));
+        assert_eq!(it.next(), Some(&15));
+        assert_eq!(it.next(), Some(&13));
+        assert_eq!(it.next(), None);
+    }
+    // make sure the skipped parts have not been consumed
+    assert_eq!(it_base.next(), Some(&0));
+    assert_eq!(it_base.next(), Some(&1));
+    assert_eq!(it_base.next(), Some(&2));
+    assert_eq!(it_base.next(), Some(&3));
+    assert_eq!(it_base.next(), Some(&5));
+    assert_eq!(it_base.next(), None);
+    let it = xs.iter().skip(5).rev();
+    assert_eq!(it.last(), Some(&13));
+}
+
 #[test]
 fn test_iterator_skip_nth() {
     let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
@@ -639,7 +677,7 @@ fn test_rev() {
 
 #[test]
 fn test_cloned() {
-    let xs = [2u8, 4, 6, 8];
+    let xs = [2, 4, 6, 8];
 
     let mut it = xs.iter().cloned();
     assert_eq!(it.len(), 4);
@@ -823,8 +861,8 @@ fn test_range() {
     assert_eq!((-10..-1).size_hint(), (9, Some(9)));
     assert_eq!((-1..-10).size_hint(), (0, Some(0)));
 
-    assert_eq!((-70..58i8).size_hint(), (128, Some(128)));
-    assert_eq!((-128..127i8).size_hint(), (255, Some(255)));
+    assert_eq!((-70..58).size_hint(), (128, Some(128)));
+    assert_eq!((-128..127).size_hint(), (255, Some(255)));
     assert_eq!((-2..isize::MAX).size_hint(),
                (isize::MAX as usize + 2, Some(isize::MAX as usize + 2)));
 }
@@ -975,7 +1013,7 @@ fn bench_max_by_key2(b: &mut Bencher) {
         array.iter().enumerate().max_by_key(|&(_, item)| item).unwrap().0
     }
 
-    let mut data = vec![0i32; 1638];
+    let mut data = vec![0; 1638];
     data[514] = 9999;
 
     b.iter(|| max_index_iter(&data));
index e4df99002c8c23116f58249929565ca6c5b3c76e..aa7ab4b4e3f85c82eee639a02bae62b213baf687 100644 (file)
 #![feature(box_syntax)]
 #![feature(cell_extras)]
 #![feature(const_fn)]
-#![feature(copy_from_slice)]
 #![feature(core_float)]
 #![feature(core_private_bignum)]
 #![feature(core_private_diy_float)]
 #![feature(dec2flt)]
-#![feature(decode_utf16)]
 #![feature(fixed_size_array)]
 #![feature(float_extras)]
 #![feature(flt2dec)]
-#![feature(fmt_radix)]
-#![feature(iter_arith)]
 #![feature(iter_arith)]
 #![feature(libc)]
 #![feature(nonzero)]
 #![feature(peekable_is_empty)]
-#![feature(ptr_as_ref)]
 #![feature(rand)]
 #![feature(raw)]
 #![feature(slice_patterns)]
index afcf836ad10f520c7b1cef4d3e0886334db18c2d..8d791283ab87ebd22846667445f7414dc53a60b9 100644 (file)
@@ -208,11 +208,11 @@ mod tests {
     fn test_pow() {
         let mut r = 2 as $T;
 
-        assert_eq!(r.pow(2u32), 4 as $T);
-        assert_eq!(r.pow(0u32), 1 as $T);
+        assert_eq!(r.pow(2), 4 as $T);
+        assert_eq!(r.pow(0), 1 as $T);
         r = -2 as $T;
-        assert_eq!(r.pow(2u32), 4 as $T);
-        assert_eq!(r.pow(3u32), -8 as $T);
+        assert_eq!(r.pow(2), 4 as $T);
+        assert_eq!(r.pow(3), -8 as $T);
     }
 }
 
index fba56db32bb4c65376be1a666cdfe03a2f56bd72..11c1bd667fb366d2da3709d99414becfcdadb4b0 100644 (file)
@@ -99,8 +99,8 @@ mod tests {
 
     #[test]
     fn test_leading_plus() {
-        assert_eq!("+127".parse::<u8>().ok(), Some(127u8));
-        assert_eq!("+9223372036854775807".parse::<i64>().ok(), Some(9223372036854775807i64));
+        assert_eq!("+127".parse::<u8>().ok(), Some(127));
+        assert_eq!("+9223372036854775807".parse::<i64>().ok(), Some(9223372036854775807));
     }
 
     #[test]
index 3e564cf197061fc7cebdda72e2d2aea034aeb708..51b0655f680f6186857c7e07cdedaa640326ab16 100644 (file)
@@ -251,7 +251,7 @@ fn test_collect() {
 
 #[test]
 fn test_cloned() {
-    let val = 1u32;
+    let val = 1;
     let val_ref = &val;
     let opt_none: Option<&'static u32> = None;
     let opt_ref = Some(&val);
@@ -263,10 +263,10 @@ fn test_cloned() {
 
     // Immutable ref works
     assert_eq!(opt_ref.clone(), Some(&val));
-    assert_eq!(opt_ref.cloned(), Some(1u32));
+    assert_eq!(opt_ref.cloned(), Some(1));
 
     // Double Immutable ref works
     assert_eq!(opt_ref_ref.clone(), Some(&val_ref));
     assert_eq!(opt_ref_ref.clone().cloned(), Some(&val));
-    assert_eq!(opt_ref_ref.cloned().cloned(), Some(1u32));
+    assert_eq!(opt_ref_ref.cloned().cloned(), Some(1));
 }
index a6bf735e459de53f4945f381c0d96bf9778843c5..1cc008c5ee9cebd69c5120d7f2bd6bab5e8b13dd 100644 (file)
@@ -27,7 +27,7 @@
 #![feature(libc)]
 #![feature(staged_api)]
 #![feature(unique)]
-#![cfg_attr(test, feature(rustc_private, rand, vec_push_all))]
+#![cfg_attr(test, feature(rustc_private, rand))]
 
 #[cfg(test)]
 #[macro_use]
@@ -173,7 +173,7 @@ mod tests {
         for _ in 0..20 {
             let mut input = vec![];
             for _ in 0..2000 {
-                input.push_all(r.choose(&words).unwrap());
+                input.extend_from_slice(r.choose(&words).unwrap());
             }
             debug!("de/inflate of {} bytes of random word-sequences",
                    input.len());
index fe059076926ee8161ebb736e5d8fa5ad9e260d3f..eda20699755a18e89d9749627074c66a50547def 100644 (file)
 
 #![deny(missing_docs)]
 #![feature(staged_api)]
-#![feature(str_char)]
-#![cfg_attr(test, feature(rustc_private))]
-
-#[cfg(test)]
-#[macro_use]
-extern crate log;
 
 use self::Name::*;
 use self::HasArg::*;
@@ -228,7 +222,7 @@ pub type Result = result::Result<Matches, Fail>;
 impl Name {
     fn from_str(nm: &str) -> Name {
         if nm.len() == 1 {
-            Short(nm.char_at(0))
+            Short(nm.chars().next().unwrap())
         } else {
             Long(nm.to_owned())
         }
@@ -266,7 +260,7 @@ impl OptGroup {
             }
             (1, 0) => {
                 Opt {
-                    name: Short(short_name.char_at(0)),
+                    name: Short(short_name.chars().next().unwrap()),
                     hasarg: hasarg,
                     occur: occur,
                     aliases: Vec::new(),
@@ -278,7 +272,7 @@ impl OptGroup {
                     hasarg: hasarg,
                     occur: occur,
                     aliases: vec![Opt {
-                                      name: Short(short_name.char_at(0)),
+                                      name: Short(short_name.chars().next().unwrap()),
                                       hasarg: hasarg,
                                       occur: occur,
                                       aliases: Vec::new(),
@@ -604,7 +598,7 @@ pub fn getopts(args: &[String], optgrps: &[OptGroup]) -> Result {
                 let mut j = 1;
                 names = Vec::new();
                 while j < curlen {
-                    let ch = cur.char_at(j);
+                    let ch = cur[j..].chars().next().unwrap();
                     let opt = Short(ch);
 
                     // In a series of potential options (eg. -aheJ), if we
@@ -1544,8 +1538,6 @@ Options:
 
         let generated_usage = usage("Usage: fruits", &optgroups);
 
-        debug!("expected: <<{}>>", expected);
-        debug!("generated: <<{}>>", generated_usage);
         assert_eq!(generated_usage, expected);
     }
 
@@ -1573,8 +1565,6 @@ Options:
 
         let usage = usage("Usage: fruits", &optgroups);
 
-        debug!("expected: <<{}>>", expected);
-        debug!("generated: <<{}>>", usage);
         assert!(usage == expected)
     }
 
@@ -1601,8 +1591,6 @@ Options:
 
         let usage = usage("Usage: fruits", &optgroups);
 
-        debug!("expected: <<{}>>", expected);
-        debug!("generated: <<{}>>", usage);
         assert!(usage == expected)
     }
 
@@ -1617,8 +1605,6 @@ Options:
         let expected = "Usage: fruits -b VAL [-a VAL] [-k] [-p [VAL]] [-l VAL]..".to_string();
         let generated_usage = short_usage("fruits", &optgroups);
 
-        debug!("expected: <<{}>>", expected);
-        debug!("generated: <<{}>>", generated_usage);
         assert_eq!(generated_usage, expected);
     }
 
index 2e26cc1b2660ad951b1dec5e4c5cf4a3395a8ffd..74cc498a7df23478effa5f7644a736caad606470 100644 (file)
@@ -62,7 +62,9 @@
 //!     dot::render(&edges, output).unwrap()
 //! }
 //!
-//! impl<'a> dot::Labeller<'a, Nd, Ed> for Edges {
+//! impl<'a> dot::Labeller<'a> for Edges {
+//!     type Node = Nd;
+//!     type Edge = Ed;
 //!     fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example1").unwrap() }
 //!
 //!     fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
@@ -70,7 +72,9 @@
 //!     }
 //! }
 //!
-//! impl<'a> dot::GraphWalk<'a, Nd, Ed> for Edges {
+//! impl<'a> dot::GraphWalk<'a> for Edges {
+//!     type Node = Nd;
+//!     type Edge = Ed;
 //!     fn nodes(&self) -> dot::Nodes<'a,Nd> {
 //!         // (assumes that |N| \approxeq |E|)
 //!         let &Edges(ref v) = self;
 //!     dot::render(&graph, output).unwrap()
 //! }
 //!
-//! impl<'a> dot::Labeller<'a, Nd, Ed<'a>> for Graph {
+//! impl<'a> dot::Labeller<'a> for Graph {
+//!     type Node = Nd;
+//!     type Edge = Ed<'a>;
 //!     fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example2").unwrap() }
 //!     fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
 //!         dot::Id::new(format!("N{}", n)).unwrap()
 //!     }
 //! }
 //!
-//! impl<'a> dot::GraphWalk<'a, Nd, Ed<'a>> for Graph {
+//! impl<'a> dot::GraphWalk<'a> for Graph {
+//!     type Node = Nd;
+//!     type Edge = Ed<'a>;
 //!     fn nodes(&self) -> dot::Nodes<'a,Nd> { (0..self.nodes.len()).collect() }
 //!     fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { self.edges.iter().collect() }
 //!     fn source(&self, e: &Ed) -> Nd { let & &(s,_) = e; s }
 //!     dot::render(&graph, output).unwrap()
 //! }
 //!
-//! impl<'a> dot::Labeller<'a, Nd<'a>, Ed<'a>> for Graph {
+//! impl<'a> dot::Labeller<'a> for Graph {
+//!     type Node = Nd<'a>;
+//!     type Edge = Ed<'a>;
 //!     fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example3").unwrap() }
 //!     fn node_id(&'a self, n: &Nd<'a>) -> dot::Id<'a> {
 //!         dot::Id::new(format!("N{}", n.0)).unwrap()
 //!     }
 //! }
 //!
-//! impl<'a> dot::GraphWalk<'a, Nd<'a>, Ed<'a>> for Graph {
+//! impl<'a> dot::GraphWalk<'a> for Graph {
+//!     type Node = Nd<'a>;
+//!     type Edge = Ed<'a>;
 //!     fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> {
 //!         self.nodes.iter().map(|s| &s[..]).enumerate().collect()
 //!     }
 #![cfg_attr(not(stage0), deny(warnings))]
 
 #![feature(str_escape)]
+#![feature(question_mark)]
 
 use self::LabelText::*;
 
@@ -447,45 +460,48 @@ impl<'a> Id<'a> {
 /// The graph instance is responsible for providing the DOT compatible
 /// identifiers for the nodes and (optionally) rendered labels for the nodes and
 /// edges, as well as an identifier for the graph itself.
-pub trait Labeller<'a,N,E> {
+pub trait Labeller<'a> {
+    type Node;
+    type Edge;
+
     /// Must return a DOT compatible identifier naming the graph.
     fn graph_id(&'a self) -> Id<'a>;
 
     /// Maps `n` to a unique identifier with respect to `self`. The
     /// implementor is responsible for ensuring that the returned name
     /// is a valid DOT identifier.
-    fn node_id(&'a self, n: &N) -> Id<'a>;
+    fn node_id(&'a self, n: &Self::Node) -> Id<'a>;
 
     /// Maps `n` to one of the [graphviz `shape` names][1]. If `None`
     /// is returned, no `shape` attribute is specified.
     ///
     /// [1]: http://www.graphviz.org/content/node-shapes
-    fn node_shape(&'a self, _node: &N) -> Option<LabelText<'a>> {
+    fn node_shape(&'a self, _node: &Self::Node) -> Option<LabelText<'a>> {
         None
     }
 
     /// Maps `n` to a label that will be used in the rendered output.
     /// The label need not be unique, and may be the empty string; the
     /// default is just the output from `node_id`.
-    fn node_label(&'a self, n: &N) -> LabelText<'a> {
+    fn node_label(&'a self, n: &Self::Node) -> LabelText<'a> {
         LabelStr(self.node_id(n).name)
     }
 
     /// Maps `e` to a label that will be used in the rendered output.
     /// The label need not be unique, and may be the empty string; the
     /// default is in fact the empty string.
-    fn edge_label(&'a self, e: &E) -> LabelText<'a> {
+    fn edge_label(&'a self, e: &Self::Edge) -> LabelText<'a> {
         let _ignored = e;
         LabelStr("".into_cow())
     }
 
     /// Maps `n` to a style that will be used in the rendered output.
-    fn node_style(&'a self, _n: &N) -> Style {
+    fn node_style(&'a self, _n: &Self::Node) -> Style {
         Style::None
     }
 
     /// Maps `e` to a style that will be used in the rendered output.
-    fn edge_style(&'a self, _e: &E) -> Style {
+    fn edge_style(&'a self, _e: &Self::Edge) -> Style {
         Style::None
     }
 }
@@ -596,15 +612,18 @@ pub type Edges<'a,E> = Cow<'a,[E]>;
 /// `Cow<[T]>` to leave implementors the freedom to create
 /// entirely new vectors or to pass back slices into internally owned
 /// vectors.
-pub trait GraphWalk<'a, N: Clone, E: Clone> {
+pub trait GraphWalk<'a> {
+    type Node: Clone;
+    type Edge: Clone;
+
     /// Returns all the nodes in this graph.
-    fn nodes(&'a self) -> Nodes<'a, N>;
+    fn nodes(&'a self) -> Nodes<'a, Self::Node>;
     /// Returns all of the edges in this graph.
-    fn edges(&'a self) -> Edges<'a, E>;
+    fn edges(&'a self) -> Edges<'a, Self::Edge>;
     /// The source node for `edge`.
-    fn source(&'a self, edge: &E) -> N;
+    fn source(&'a self, edge: &Self::Edge) -> Self::Node;
     /// The target node for `edge`.
-    fn target(&'a self, edge: &E) -> N;
+    fn target(&'a self, edge: &Self::Edge) -> Self::Node;
 }
 
 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
@@ -622,31 +641,29 @@ pub fn default_options() -> Vec<RenderOption> {
 
 /// Renders directed graph `g` into the writer `w` in DOT syntax.
 /// (Simple wrapper around `render_opts` that passes a default set of options.)
-pub fn render<'a,
-              N: Clone + 'a,
-              E: Clone + 'a,
-              G: Labeller<'a, N, E> + GraphWalk<'a, N, E>,
-              W: Write>
-    (g: &'a G,
-     w: &mut W)
-     -> io::Result<()> {
+pub fn render<'a,N,E,G,W>(g: &'a G, w: &mut W) -> io::Result<()>
+    where N: Clone + 'a,
+          E: Clone + 'a,
+          G: Labeller<'a, Node=N, Edge=E> + GraphWalk<'a, Node=N, Edge=E>,
+          W: Write
+{
     render_opts(g, w, &[])
 }
 
 /// Renders directed graph `g` into the writer `w` in DOT syntax.
 /// (Main entry point for the library.)
-pub fn render_opts<'a,
-                   N: Clone + 'a,
-                   E: Clone + 'a,
-                   G: Labeller<'a, N, E> + GraphWalk<'a, N, E>,
-                   W: Write>
-    (g: &'a G,
-     w: &mut W,
-     options: &[RenderOption])
-     -> io::Result<()> {
+pub fn render_opts<'a, N, E, G, W>(g: &'a G,
+                                   w: &mut W,
+                                   options: &[RenderOption])
+                                   -> io::Result<()>
+    where N: Clone + 'a,
+          E: Clone + 'a,
+          G: Labeller<'a, Node=N, Edge=E> + GraphWalk<'a, Node=N, Edge=E>,
+          W: Write
+{
     fn writeln<W: Write>(w: &mut W, arg: &[&str]) -> io::Result<()> {
         for &s in arg {
-            try!(w.write_all(s.as_bytes()));
+            w.write_all(s.as_bytes())?;
         }
         write!(w, "\n")
     }
@@ -655,9 +672,9 @@ pub fn render_opts<'a,
         w.write_all(b"    ")
     }
 
-    try!(writeln(w, &["digraph ", g.graph_id().as_slice(), " {"]));
+    writeln(w, &["digraph ", g.graph_id().as_slice(), " {"])?;
     for n in g.nodes().iter() {
-        try!(indent(w));
+        indent(w)?;
         let id = g.node_id(n);
 
         let escaped = &g.node_label(n).to_dot_string();
@@ -686,12 +703,12 @@ pub fn render_opts<'a,
         }
 
         text.push(";");
-        try!(writeln(w, &text));
+        writeln(w, &text)?;
     }
 
     for e in g.edges().iter() {
         let escaped_label = &g.edge_label(e).to_dot_string();
-        try!(indent(w));
+        indent(w)?;
         let source = g.source(e);
         let target = g.target(e);
         let source_id = g.node_id(&source);
@@ -713,7 +730,7 @@ pub fn render_opts<'a,
         }
 
         text.push(";");
-        try!(writeln(w, &text));
+        writeln(w, &text)?;
     }
 
     writeln(w, &["}"])
@@ -858,7 +875,9 @@ mod tests {
         Id::new(format!("N{}", *n)).unwrap()
     }
 
-    impl<'a> Labeller<'a, Node, &'a Edge> for LabelledGraph {
+    impl<'a> Labeller<'a> for LabelledGraph {
+        type Node = Node;
+        type Edge = &'a Edge;
         fn graph_id(&'a self) -> Id<'a> {
             Id::new(&self.name[..]).unwrap()
         }
@@ -882,7 +901,9 @@ mod tests {
         }
     }
 
-    impl<'a> Labeller<'a, Node, &'a Edge> for LabelledGraphWithEscStrs {
+    impl<'a> Labeller<'a> for LabelledGraphWithEscStrs {
+        type Node = Node;
+        type Edge = &'a Edge;
         fn graph_id(&'a self) -> Id<'a> {
             self.graph.graph_id()
         }
@@ -901,7 +922,9 @@ mod tests {
         }
     }
 
-    impl<'a> GraphWalk<'a, Node, &'a Edge> for LabelledGraph {
+    impl<'a> GraphWalk<'a> for LabelledGraph {
+        type Node = Node;
+        type Edge = &'a Edge;
         fn nodes(&'a self) -> Nodes<'a, Node> {
             (0..self.node_labels.len()).collect()
         }
@@ -916,7 +939,9 @@ mod tests {
         }
     }
 
-    impl<'a> GraphWalk<'a, Node, &'a Edge> for LabelledGraphWithEscStrs {
+    impl<'a> GraphWalk<'a> for LabelledGraphWithEscStrs {
+        type Node = Node;
+        type Edge = &'a Edge;
         fn nodes(&'a self) -> Nodes<'a, Node> {
             self.graph.nodes()
         }
@@ -935,7 +960,7 @@ mod tests {
         let mut writer = Vec::new();
         render(&g, &mut writer).unwrap();
         let mut s = String::new();
-        try!(Read::read_to_string(&mut &*writer, &mut s));
+        Read::read_to_string(&mut &*writer, &mut s)?;
         Ok(s)
     }
 
index d6d3ab581d95dfdb520d75e4e23bf978377799ae..11a830e41fcff8c0bb0b29ee240a9cb010d8a5f3 100644 (file)
@@ -16,6 +16,7 @@ script:
     else
       cargo build;
       cargo build --no-default-features;
+      rustc ci/style.rs && ./style src;
     fi
 os:
   - linux
@@ -51,7 +52,7 @@ matrix:
       rust: nightly
     - os: linux
       env: TARGET=x86_64-rumprun-netbsd DOCKER=alexcrichton/rust-libc-rumprun:2015-11-27
-      rust: nightly-2015-09-27
+      rust: nightly
     - os: linux
       env: TARGET=x86_64-unknown-freebsd QEMU=freebsd.qcow2
       rust: nightly
@@ -61,6 +62,4 @@ matrix:
 notifications:
   email:
     on_success: never
-branches:
-  only:
-    - master
+  webhooks: http://buildbot.rust-lang.org/homu/travis
index 28e5b3359e8dc5a97bd61f348bb502ba25be8339..36c6d199d3c057c1189c0f21b929266c42e03898 100644 (file)
@@ -1,7 +1,7 @@
 [package]
 
 name = "libc"
-version = "0.2.7"
+version = "0.2.9"
 authors = ["The Rust Project Developers"]
 license = "MIT/Apache-2.0"
 readme = "README.md"
index 74811ed105ecac2a87d2c86d8dd584666d4dd1e3..93bd0f93623c472d6fce2c1e72a1eeb23d57bdba 100644 (file)
@@ -19,7 +19,3 @@ build: false
 test_script:
   - cargo test
   - cargo run --manifest-path libc-test/Cargo.toml
-
-branches:
-  only:
-    - master
index fc69fa88eb5ce6fc8724ac195827278df5cdcdbd..941cc8d2b40307cc0a5911984505cf6b48cd82aa 100644 (file)
@@ -1,7 +1,3 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-  </head>
-  <body>
-    <ul>
+    </ul>
+  </body>
+</html>
index de3c63c01f3103b223fa934ff808d5609022824c..fc69fa88eb5ce6fc8724ac195827278df5cdcdbd 100644 (file)
@@ -1,4 +1,7 @@
-    </ul>
-  </body>
-</html>
-
+<!DOCTYPE html>
+<html lang="en">
+  <head>
+    <meta charset="utf-8">
+  </head>
+  <body>
+    <ul>
index e6d6c81562c9dba12b0b8d50e8092e00f9df50e3..d9f708aa12a91ebe352fbc810943b14c03757dbc 100644 (file)
@@ -19,7 +19,10 @@ fi
 
 MAIN_TARGETS=https://static.rust-lang.org/dist
 DATE=$(echo $TRAVIS_RUST_VERSION | sed s/nightly-//)
-EXTRA_TARGETS=https://people.mozilla.org/~acrichton/libc-test/$DATE
+if [ "$DATE" != "nightly" ]; then
+    MAIN_TARGETS=$MAIN_TARGETS/$DATE
+    TRAVIS_RUST_VERSION=nightly
+fi
 
 install() {
   if [ "$TRAVIS" = "true" ]; then
@@ -104,30 +107,11 @@ mkdir -p .cargo
 cp ci/cargo-config .cargo/config
 
 # Next up we need to install the standard library for the version of Rust that
-# we're testing. Get fancy targets from the EXTRA_TARGETS URL and otherwise get
-# all others from the official distribution.
+# we're testing.
 if [ "$TRAVIS" = "true" ]; then
-  case "$TARGET" in
-    *-rumprun-*)
-      curl -s $EXTRA_TARGETS/$TARGET.tar.gz | \
-       tar xzf - -C `rustc --print sysroot`/lib/rustlib
-      ;;
-
-    *)
-      # Download the rustlib folder from the relevant portion of main
-      # distribution's tarballs.
-      dir=rust-std-$TARGET
-      pkg=rust-std
-      if [ "$TRAVIS_RUST_VERSION" = "1.0.0" ]; then
-        pkg=rust
-        dir=rustc
-      fi
-      curl -s $MAIN_TARGETS/$pkg-$TRAVIS_RUST_VERSION-$TARGET.tar.gz | \
-        tar xzf - -C $HOME/rust/lib/rustlib --strip-components=4 \
-          $pkg-$TRAVIS_RUST_VERSION-$TARGET/$dir/lib/rustlib/$TARGET
-      ;;
-
-  esac
+  curl -s $MAIN_TARGETS/rust-std-$TRAVIS_RUST_VERSION-$TARGET.tar.gz | \
+    tar xzf - -C $HOME/rust/lib/rustlib --strip-components=4 \
+      rust-std-$TRAVIS_RUST_VERSION-$TARGET/rust-std-$TARGET/lib/rustlib/$TARGET
 fi
 
 # If we're testing with a docker image, then run tests entirely within that
diff --git a/src/liblibc/ci/style.rs b/src/liblibc/ci/style.rs
new file mode 100644 (file)
index 0000000..32e4ba7
--- /dev/null
@@ -0,0 +1,204 @@
+//! Simple script to verify the coding style of this library
+//!
+//! ## How to run
+//!
+//! The first argument to this script is the directory to run on, so running
+//! this script should be as simple as:
+//!
+//! ```notrust
+//! rustc ci/style.rs
+//! ./style src
+//! ```
+//!
+//! ## Guidelines
+//!
+//! The current style is:
+//!
+//! * No trailing whitespace
+//! * No tabs
+//! * 80-character lines
+//! * `extern` instead of `extern "C"`
+//! * Specific module layout:
+//!     1. use directives
+//!     2. typedefs
+//!     3. structs
+//!     4. constants
+//!     5. f! { ... } functions
+//!     6. extern functions
+//!     7. modules + pub use
+//!
+//! Things not verified:
+//!
+//! * alignment
+//! * 4-space tabs
+//! * leading colons on paths
+
+use std::env;
+use std::fs;
+use std::io::prelude::*;
+use std::path::Path;
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {}", stringify!($e), e),
+    })
+}
+
+fn main() {
+    let arg = env::args().skip(1).next().unwrap_or(".".to_string());
+
+    let mut errors = Errors { errs: false };
+    walk(Path::new(&arg), &mut errors);
+
+    if errors.errs {
+        panic!("found some lint errors");
+    } else {
+        println!("good style!");
+    }
+}
+
+fn walk(path: &Path, err: &mut Errors) {
+    for entry in t!(path.read_dir()).map(|e| t!(e)) {
+        let path = entry.path();
+        if t!(entry.file_type()).is_dir() {
+            walk(&path, err);
+            continue
+        }
+
+        let name = entry.file_name().into_string().unwrap();
+        match &name[..] {
+            n if !n.ends_with(".rs") => continue,
+
+            "dox.rs" |
+            "lib.rs" |
+            "macros.rs" => continue,
+
+            _ => {}
+        }
+
+        let mut contents = String::new();
+        t!(t!(fs::File::open(&path)).read_to_string(&mut contents));
+
+        check_style(&contents, &path, err);
+    }
+}
+
+struct Errors {
+    errs: bool,
+}
+
+#[derive(Clone, Copy, PartialEq)]
+enum State {
+    Start,
+    Imports,
+    Typedefs,
+    Structs,
+    Constants,
+    FunctionDefinitions,
+    Functions,
+    Modules,
+}
+
+fn check_style(file: &str, path: &Path, err: &mut Errors) {
+    let mut state = State::Start;
+    let mut s_macros = 0;
+    let mut f_macros = 0;
+    let mut prev_blank = false;
+
+    for (i, line) in file.lines().enumerate() {
+        if line == "" {
+            if prev_blank {
+                err.error(path, i, "double blank line");
+            }
+            prev_blank = true;
+        } else {
+            prev_blank = false;
+        }
+        if line != line.trim_right() {
+            err.error(path, i, "trailing whitespace");
+        }
+        if line.contains("\t") {
+            err.error(path, i, "tab character");
+        }
+        if line.len() > 80 {
+            err.error(path, i, "line longer than 80 chars");
+        }
+        if line.contains("extern \"C\"") {
+            err.error(path, i, "use `extern` instead of `extern \"C\"");
+        }
+        if line.contains("#[cfg(") && !line.contains(" if ") {
+            if state != State::Structs {
+                err.error(path, i, "use cfg_if! and submodules \
+                                    instead of #[cfg]");
+            }
+        }
+
+        let line = line.trim_left();
+        let is_pub = line.starts_with("pub ");
+        let line = if is_pub {&line[4..]} else {line};
+
+        let line_state = if line.starts_with("use ") {
+            if is_pub {
+                State::Modules
+            } else {
+                State::Imports
+            }
+        } else if line.starts_with("const ") {
+            State::Constants
+        } else if line.starts_with("type ") {
+            State::Typedefs
+        } else if line.starts_with("s! {") {
+            s_macros += 1;
+            State::Structs
+        } else if line.starts_with("f! {") {
+            f_macros += 1;
+            State::FunctionDefinitions
+        } else if line.starts_with("extern ") {
+            State::Functions
+        } else if line.starts_with("mod ") {
+            State::Modules
+        } else {
+            continue
+        };
+
+        if state as usize > line_state as usize {
+            err.error(path, i, &format!("{} found after {} when \
+                                         it belongs before",
+                                        line_state.desc(), state.desc()));
+        }
+
+        if f_macros == 2 {
+            f_macros += 1;
+            err.error(path, i, "multiple f! macros in one module");
+        }
+        if s_macros == 2 {
+            s_macros += 1;
+            err.error(path, i, "multiple s! macros in one module");
+        }
+
+        state = line_state;
+    }
+}
+
+impl State {
+    fn desc(&self) -> &str {
+        match *self {
+            State::Start => "start",
+            State::Imports => "import",
+            State::Typedefs => "typedef",
+            State::Structs => "struct",
+            State::Constants => "constant",
+            State::FunctionDefinitions => "function definition",
+            State::Functions => "extern function",
+            State::Modules => "module",
+        }
+    }
+}
+
+impl Errors {
+    fn error(&mut self, path: &Path, line: usize, msg: &str) {
+        self.errs = true;
+        println!("{}:{} - {}", path.display(), line + 1, msg);
+    }
+}
index e5564c5643015150dc3240d986e6781541feb439..007ba034ec22969ba184198a910f029e086a2ff0 100644 (file)
@@ -3,16 +3,7 @@ name = "libc-test"
 version = "0.1.0"
 dependencies = [
  "ctest 0.1.0 (git+https://github.com/alexcrichton/ctest)",
- "libc 0.2.7",
-]
-
-[[package]]
-name = "advapi32-sys"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.9",
 ]
 
 [[package]]
@@ -23,27 +14,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 [[package]]
 name = "ctest"
 version = "0.1.0"
-source = "git+https://github.com/alexcrichton/ctest#7703b51086cce2d9a703b103d0695b36653b8cab"
+source = "git+https://github.com/alexcrichton/ctest#50ac771acb7bb45cf0c182a5a9c8188a15c89efc"
 dependencies = [
- "gcc 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)",
  "syntex_syntax 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "gcc"
-version = "0.3.21"
+version = "0.3.25"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "advapi32-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
-]
 
 [[package]]
 name = "kernel32-sys"
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -54,24 +41,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "libc"
-version = "0.2.5"
+version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "libc"
-version = "0.2.7"
+version = "0.2.9"
 
 [[package]]
 name = "log"
 version = "0.3.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rustc-serialize"
-version = "0.3.16"
+version = "0.3.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -82,7 +69,7 @@ dependencies = [
  "bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-serialize 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
  "unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
@@ -93,7 +80,7 @@ version = "0.2.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -103,7 +90,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "winapi"
-version = "0.2.5"
+version = "0.2.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
index 9c3b592690d6bbcfd66ffc23ba78798904a1f1d8..984a63ac8fb3194357fa295dcdabe309160b7e46 100644 (file)
@@ -14,11 +14,12 @@ fn main() {
     let apple = target.contains("apple");
     let musl = target.contains("musl");
     let freebsd = target.contains("freebsd");
+    let dragonfly = target.contains("dragonfly");
     let mips = target.contains("mips");
     let netbsd = target.contains("netbsd");
     let openbsd = target.contains("openbsd");
     let rumprun = target.contains("rumprun");
-    let bsdlike = freebsd || apple || netbsd || openbsd;
+    let bsdlike = freebsd || apple || netbsd || openbsd || dragonfly;
     let mut cfg = ctest::TestGenerator::new();
 
     // Pull in extra goodies on linux/mingw
@@ -37,6 +38,7 @@ fn main() {
     cfg.header("errno.h")
        .header("fcntl.h")
        .header("limits.h")
+       .header("locale.h")
        .header("stddef.h")
        .header("stdint.h")
        .header("stdio.h")
@@ -103,7 +105,7 @@ fn main() {
         cfg.header("ifaddrs.h");
         cfg.header("sys/statvfs.h");
 
-        if !openbsd && !freebsd {
+        if !openbsd && !freebsd && !dragonfly {
             cfg.header("sys/quota.h");
         }
 
@@ -120,6 +122,7 @@ fn main() {
         cfg.header("mach-o/dyld.h");
         cfg.header("mach/mach_time.h");
         cfg.header("malloc/malloc.h");
+        cfg.header("util.h");
         if target.starts_with("x86") {
             cfg.header("crt_externs.h");
         }
@@ -127,14 +130,22 @@ fn main() {
 
     if bsdlike {
         cfg.header("sys/event.h");
+
+        if freebsd {
+            cfg.header("libutil.h");
+        } else {
+            cfg.header("util.h");
+        }
     }
 
     if linux {
         cfg.header("mqueue.h");
+        cfg.header("ucontext.h");
         cfg.header("sys/signalfd.h");
         cfg.header("sys/xattr.h");
         cfg.header("sys/ipc.h");
         cfg.header("sys/shm.h");
+        cfg.header("pty.h");
     }
 
     if linux || android {
@@ -177,6 +188,12 @@ fn main() {
         cfg.header("sys/syscall.h");
     }
 
+    if dragonfly {
+        cfg.header("ufs/ufs/quota.h");
+        cfg.header("pthread_np.h");
+        cfg.header("sys/ioctl_compat.h");
+    }
+
     cfg.type_name(move |ty, is_struct| {
         match ty {
             // Just pass all these through, no need for a "struct" prefix
@@ -254,12 +271,14 @@ fn main() {
         }
     });
 
-    cfg.skip_signededness(|c| {
+    cfg.skip_signededness(move |c| {
         match c {
             "LARGE_INTEGER" |
             "mach_timebase_info_data_t" |
             "float" |
             "double" => true,
+            // uuid_t is a struct, not an integer.
+            "uuid_t" if dragonfly => true,
             n if n.starts_with("pthread") => true,
 
             // windows-isms
@@ -293,6 +312,7 @@ fn main() {
 
             // weird signed extension or something like that?
             "MS_NOUSER" => true,
+            "MS_RMT_MASK" => true, // updated in glibc 2.22 and musl 1.1.13
 
             // These OSX constants are flagged as deprecated
             "NOTE_EXIT_REPARENTED" |
@@ -321,7 +341,7 @@ fn main() {
             "strerror_r" if linux => true,   // actually xpg-something-or-other
 
             // typed 2nd arg on linux and android
-            "gettimeofday" if linux || android || freebsd || openbsd => true,
+            "gettimeofday" if linux || android || freebsd || openbsd || dragonfly => true,
 
             // not declared in newer android toolchains
             "getdtablesize" if android => true,
@@ -341,10 +361,6 @@ fn main() {
             // they're implemented on rumprun yet, just let them slide for now.
             // Some of them look like they have headers but then don't have
             // corresponding actual definitions either...
-            "backtrace" |
-            "pthread_main_np" |
-            "pthread_set_name_np" |
-            "pthread_stackseg_np" |
             "shm_open" |
             "shm_unlink" |
             "syscall" |
index 8a599cff1bd9651929b7cd3b4b4ee4fa5c94e685..7b23e556c64c745597f1edca3e3df455cade934b 100644 (file)
@@ -10,7 +10,7 @@ name = "advapi32-sys"
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -34,7 +34,7 @@ version = "0.3.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "advapi32-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -42,7 +42,7 @@ name = "kernel32-sys"
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -88,7 +88,7 @@ version = "0.2.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -98,7 +98,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "winapi"
-version = "0.2.5"
+version = "0.2.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
index c9d7701aa652525e20e8c410bc4f629367d12706..2593e3ba6bc00e85cdeedbb1e16b39b8477dd8aa 100644 (file)
@@ -74,7 +74,7 @@
 ))]
 
 // Attributes needed when building as part of the standard library
-#![cfg_attr(stdbuild, feature(no_std, core, core_slice_ext, staged_api, custom_attribute))]
+#![cfg_attr(stdbuild, feature(no_std, core, core_slice_ext, staged_api, custom_attribute, cfg_target_vendor))]
 #![cfg_attr(stdbuild, no_std)]
 #![cfg_attr(stdbuild, staged_api)]
 #![cfg_attr(stdbuild, allow(warnings))]
@@ -258,8 +258,10 @@ cfg_if! {
     if #[cfg(windows)] {
         mod windows;
         pub use windows::*;
-    } else {
+    } else if #[cfg(unix)] {
         mod unix;
         pub use unix::*;
+    } else {
+        // Unknown target_family
     }
 }
index 3c2978ea206cca8e17b57c10dbad8330ce512974..5811c84c3aaa541010b7e363b52ffb311d1cd5f2 100644 (file)
@@ -23,7 +23,7 @@ macro_rules! cfg_if {
 macro_rules! __cfg_if_items {
     (($($not:meta,)*) ; ) => {};
     (($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => {
-        __cfg_if_apply! { cfg(all($($m,)* not(any($($not),*)))), $($it)* }
+        __cfg_if_apply! { cfg(all(not(any($($not),*)), $($m,)*)), $($it)* }
         __cfg_if_items! { ($($not,)* $($m,)*) ; $($rest)* }
     }
 }
index 9a46ed05598eab24debfb1740edc6727b44cbbba..80916b3b7840de7ce16139fa50e89677b778431d 100644 (file)
@@ -3,13 +3,16 @@
 pub type c_long = i32;
 pub type c_ulong = u32;
 
-pub const __PTHREAD_MUTEX_SIZE__: usize = 40;
-pub const __PTHREAD_COND_SIZE__: usize = 24;
-pub const __PTHREAD_RWLOCK_SIZE__: usize = 124;
-
 s! {
     pub struct pthread_attr_t {
         __sig: c_long,
         __opaque: [::c_char; 36]
     }
 }
+
+pub const __PTHREAD_MUTEX_SIZE__: usize = 40;
+pub const __PTHREAD_COND_SIZE__: usize = 24;
+pub const __PTHREAD_RWLOCK_SIZE__: usize = 124;
+
+pub const TIOCTIMESTAMP: ::c_ulong = 0x40087459;
+pub const TIOCDCDTIMESTAMP: ::c_ulong = 0x40087458;
index 344582e35474fdd9e7b9d0a4cc248f544cd89d9b..92962280777aab34a60f09f6403a104c958ba734 100644 (file)
@@ -3,13 +3,16 @@
 pub type c_long = i64;
 pub type c_ulong = u64;
 
-pub const __PTHREAD_MUTEX_SIZE__: usize = 56;
-pub const __PTHREAD_COND_SIZE__: usize = 40;
-pub const __PTHREAD_RWLOCK_SIZE__: usize = 192;
-
 s! {
     pub struct pthread_attr_t {
         __sig: c_long,
         __opaque: [::c_char; 56]
     }
 }
+
+pub const __PTHREAD_MUTEX_SIZE__: usize = 56;
+pub const __PTHREAD_COND_SIZE__: usize = 40;
+pub const __PTHREAD_RWLOCK_SIZE__: usize = 192;
+
+pub const TIOCTIMESTAMP: ::c_ulong = 0x40107459;
+pub const TIOCDCDTIMESTAMP: ::c_ulong = 0x40107458;
index 3d96ed9308545a355f1a5b7797199610f87757da..f4c3e197d1b542d93d8ccf8aca7c8d873671d9d1 100644 (file)
@@ -257,6 +257,33 @@ s! {
         pub trailers: *mut ::iovec,
         pub trl_cnt: ::c_int,
     }
+
+    pub struct lconv {
+        pub decimal_point: *mut ::c_char,
+        pub thousands_sep: *mut ::c_char,
+        pub grouping: *mut ::c_char,
+        pub int_curr_symbol: *mut ::c_char,
+        pub currency_symbol: *mut ::c_char,
+        pub mon_decimal_point: *mut ::c_char,
+        pub mon_thousands_sep: *mut ::c_char,
+        pub mon_grouping: *mut ::c_char,
+        pub positive_sign: *mut ::c_char,
+        pub negative_sign: *mut ::c_char,
+        pub int_frac_digits: ::c_char,
+        pub frac_digits: ::c_char,
+        pub p_cs_precedes: ::c_char,
+        pub p_sep_by_space: ::c_char,
+        pub n_cs_precedes: ::c_char,
+        pub n_sep_by_space: ::c_char,
+        pub p_sign_posn: ::c_char,
+        pub n_sign_posn: ::c_char,
+        pub int_p_cs_precedes: ::c_char,
+        pub int_n_cs_precedes: ::c_char,
+        pub int_p_sep_by_space: ::c_char,
+        pub int_n_sep_by_space: ::c_char,
+        pub int_p_sign_posn: ::c_char,
+        pub int_n_sign_posn: ::c_char,
+    }
 }
 
 pub const EXIT_FAILURE: ::c_int = 1;
@@ -492,6 +519,70 @@ pub const F_VOLPOSMODE: ::c_int = 4;
 
 pub const O_ACCMODE: ::c_int = 3;
 
+pub const TIOCMODG: ::c_ulong = 0x40047403;
+pub const TIOCMODS: ::c_ulong = 0x80047404;
+pub const TIOCM_LE: ::c_int = 0x1;
+pub const TIOCM_DTR: ::c_int = 0x2;
+pub const TIOCM_RTS: ::c_int = 0x4;
+pub const TIOCM_ST: ::c_int = 0x8;
+pub const TIOCM_SR: ::c_int = 0x10;
+pub const TIOCM_CTS: ::c_int = 0x20;
+pub const TIOCM_CAR: ::c_int = 0x40;
+pub const TIOCM_CD: ::c_int = 0x40;
+pub const TIOCM_RNG: ::c_int = 0x80;
+pub const TIOCM_RI: ::c_int = 0x80;
+pub const TIOCM_DSR: ::c_int = 0x100;
+pub const TIOCEXCL: ::c_int = 0x2000740d;
+pub const TIOCNXCL: ::c_int = 0x2000740e;
+pub const TIOCFLUSH: ::c_ulong = 0x80047410;
+pub const TIOCGETD: ::c_ulong = 0x4004741a;
+pub const TIOCSETD: ::c_ulong = 0x8004741b;
+pub const TIOCIXON: ::c_uint = 0x20007481;
+pub const TIOCIXOFF: ::c_uint = 0x20007480;
+pub const TIOCSBRK: ::c_uint = 0x2000747b;
+pub const TIOCCBRK: ::c_uint = 0x2000747a;
+pub const TIOCSDTR: ::c_uint = 0x20007479;
+pub const TIOCCDTR: ::c_uint = 0x20007478;
+pub const TIOCGPGRP: ::c_ulong = 0x40047477;
+pub const TIOCSPGRP: ::c_ulong = 0x80047476;
+pub const TIOCOUTQ: ::c_ulong = 0x40047473;
+pub const TIOCSTI: ::c_ulong = 0x80017472;
+pub const TIOCNOTTY: ::c_uint = 0x20007471;
+pub const TIOCPKT: ::c_ulong = 0x80047470;
+pub const TIOCPKT_DATA: ::c_int = 0x0;
+pub const TIOCPKT_FLUSHREAD: ::c_int = 0x1;
+pub const TIOCPKT_FLUSHWRITE: ::c_int = 0x2;
+pub const TIOCPKT_STOP: ::c_int = 0x4;
+pub const TIOCPKT_START: ::c_int = 0x8;
+pub const TIOCPKT_NOSTOP: ::c_int = 0x10;
+pub const TIOCPKT_DOSTOP: ::c_int = 0x20;
+pub const TIOCPKT_IOCTL: ::c_int = 0x40;
+pub const TIOCSTOP: ::c_uint = 0x2000746f;
+pub const TIOCSTART: ::c_uint = 0x2000746e;
+pub const TIOCMSET: ::c_ulong = 0x8004746d;
+pub const TIOCMBIS: ::c_ulong = 0x8004746c;
+pub const TIOCMBIC: ::c_ulong = 0x8004746b;
+pub const TIOCMGET: ::c_ulong = 0x4004746a;
+pub const TIOCREMOTE: ::c_ulong = 0x80047469;
+pub const TIOCGWINSZ: ::c_ulong = 0x40087468;
+pub const TIOCSWINSZ: ::c_ulong = 0x80087467;
+pub const TIOCUCNTL: ::c_ulong = 0x80047466;
+pub const TIOCSTAT: ::c_uint = 0x20007465;
+pub const TIOCSCONS: ::c_uint = 0x20007463;
+pub const TIOCCONS: ::c_ulong = 0x80047462;
+pub const TIOCSCTTY: ::c_uint = 0x20007461;
+pub const TIOCEXT: ::c_ulong = 0x80047460;
+pub const TIOCSIG: ::c_uint = 0x2000745f;
+pub const TIOCDRAIN: ::c_uint = 0x2000745e;
+pub const TIOCMSDTRWAIT: ::c_ulong = 0x8004745b;
+pub const TIOCMGDTRWAIT: ::c_ulong = 0x4004745a;
+pub const TIOCSDRAINWAIT: ::c_ulong = 0x80047457;
+pub const TIOCGDRAINWAIT: ::c_ulong = 0x40047456;
+pub const TIOCDSIMICROCODE: ::c_uint = 0x20007455;
+pub const TIOCPTYGRANT: ::c_uint = 0x20007454;
+pub const TIOCPTYGNAME: ::c_uint = 0x40807453;
+pub const TIOCPTYUNLK: ::c_uint = 0x20007452;
+
 pub const SIGTRAP: ::c_int = 5;
 
 pub const GLOB_APPEND  : ::c_int = 0x0001;
@@ -927,6 +1018,15 @@ extern {
                     len: *mut ::off_t,
                     hdtr: *mut ::sf_hdtr,
                     flags: ::c_int) -> ::c_int;
+    pub fn openpty(amaster: *mut ::c_int,
+                   aslave: *mut ::c_int,
+                   name: *mut ::c_char,
+                   termp: *mut termios,
+                   winp: *mut ::winsize) -> ::c_int;
+    pub fn forkpty(amaster: *mut ::c_int,
+                   name: *mut ::c_char,
+                   termp: *mut termios,
+                   winp: *mut ::winsize) -> ::pid_t;
 }
 
 cfg_if! {
@@ -937,6 +1037,6 @@ cfg_if! {
         mod b64;
         pub use self::b64::*;
     } else {
-        // unknown arch...
+        // Unknown target_arch
     }
 }
diff --git a/src/liblibc/src/unix/bsd/freebsdlike/dragonfly.rs b/src/liblibc/src/unix/bsd/freebsdlike/dragonfly.rs
deleted file mode 100644 (file)
index a06d232..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-pub type fsblkcnt_t = ::c_uint;
-pub type fsfilcnt_t = ::c_uint;
-
-pub const PTHREAD_STACK_MIN: ::size_t = 1024;
-pub const KERN_PROC_PATHNAME: ::c_int = 9;
-pub const SIGSTKSZ: ::size_t = 40960;
-pub const MADV_INVAL: ::c_int = 10;
-
-extern {
-    pub fn __dfly_error() -> *const ::c_int;
-}
diff --git a/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs b/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs
new file mode 100644 (file)
index 0000000..141b2ad
--- /dev/null
@@ -0,0 +1,120 @@
+pub type clock_t = u64;
+pub type ino_t = u64;
+pub type nlink_t = u32;
+pub type blksize_t = i64;
+pub type clockid_t = ::c_ulong;
+
+pub type c_long = i64;
+pub type c_ulong = u64;
+pub type time_t = i64;
+pub type suseconds_t = i64;
+
+pub type uuid_t = ::uuid;
+
+pub type fsblkcnt_t = u64;
+pub type fsfilcnt_t = u64;
+
+s! {
+    pub struct dirent {
+        pub d_fileno: ::ino_t,
+        pub d_namlen: u16,
+        pub d_type: u8,
+        __unused1: u8,
+        __unused2: u32,
+        pub d_name: [::c_char; 256],
+    }
+
+    pub struct uuid {
+        pub time_low: u32,
+        pub time_mid: u16,
+        pub time_hi_and_version: u16,
+        pub clock_seq_hi_and_reserved: u8,
+        pub clock_seq_low: u8,
+        pub node: [u8; 6],
+    }
+
+    pub struct statvfs {
+        pub f_bsize: ::c_ulong,
+        pub f_frsize: ::c_ulong,
+        pub f_blocks: ::fsblkcnt_t,
+        pub f_bfree: ::fsblkcnt_t,
+        pub f_bavail: ::fsblkcnt_t,
+        pub f_files: ::fsfilcnt_t,
+        pub f_ffree: ::fsfilcnt_t,
+        pub f_favail: ::fsfilcnt_t,
+        pub f_fsid: ::c_ulong,
+        pub f_flag: ::c_ulong,
+        pub f_namemax: ::c_ulong,
+        pub f_owner: ::uid_t,
+        pub f_type: ::c_uint,
+        pub f_syncreads: u64,
+        pub f_syncwrites: u64,
+        pub f_asyncreads: u64,
+        pub f_asyncwrites: u64,
+        pub f_fsid_uuid: ::uuid_t,
+        pub f_uid_uuid: ::uuid_t,
+    }
+
+    pub struct stat {
+        pub st_ino: ::ino_t,
+        pub st_nlink: ::nlink_t,
+        pub st_dev: ::dev_t,
+        pub st_mode: ::mode_t,
+        pub st_padding1: ::uint16_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        pub st_rdev: ::dev_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        pub st_size: ::off_t,
+        pub st_blocks: ::int64_t,
+        pub st_blksize: ::uint32_t,
+        pub st_flags: ::uint32_t,
+        pub st_gen: ::uint32_t,
+        pub st_lspare: ::int32_t,
+        pub st_qspare1: ::int64_t,
+        pub st_qspare2: ::int64_t,
+    }
+}
+
+pub const RAND_MAX: ::c_int = 0x7fff_ffff;
+pub const PTHREAD_STACK_MIN: ::size_t = 1024;
+pub const KERN_PROC_PATHNAME: ::c_int = 9;
+pub const SIGSTKSZ: ::size_t = 40960;
+pub const MADV_INVAL: ::c_int = 10;
+pub const O_CLOEXEC: ::c_int = 0x00020000;
+pub const F_GETLK: ::c_int = 7;
+pub const F_SETLK: ::c_int = 8;
+pub const F_SETLKW: ::c_int = 9;
+pub const ELAST: ::c_int = 99;
+pub const RLIMIT_POSIXLOCKS: ::c_int = 11;
+pub const RLIM_NLIMITS: ::rlim_t = 12;
+
+pub const Q_GETQUOTA: ::c_int = 0x300;
+pub const Q_SETQUOTA: ::c_int = 0x400;
+
+pub const CLOCK_REALTIME: clockid_t = 0;
+pub const CLOCK_VIRTUAL: clockid_t = 1;
+pub const CLOCK_PROF: clockid_t = 2;
+pub const CLOCK_MONOTONIC: clockid_t = 4;
+pub const CLOCK_UPTIME: clockid_t = 5;
+pub const CLOCK_UPTIME_PRECISE: clockid_t = 7;
+pub const CLOCK_UPTIME_FAST: clockid_t = 8;
+pub const CLOCK_REALTIME_PRECISE: clockid_t = 9;
+pub const CLOCK_REALTIME_FAST: clockid_t = 10;
+pub const CLOCK_MONOTONIC_PRECISE: clockid_t = 11;
+pub const CLOCK_MONOTONIC_FAST: clockid_t = 12;
+pub const CLOCK_SECOND: clockid_t = 13;
+pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 14;
+pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 15;
+
+extern {
+    pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int)
+                    -> ::c_int;
+    pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
+    pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
+}
diff --git a/src/liblibc/src/unix/bsd/freebsdlike/freebsd.rs b/src/liblibc/src/unix/bsd/freebsdlike/freebsd.rs
deleted file mode 100644 (file)
index d6fa96f..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-pub type fsblkcnt_t = ::uint64_t;
-pub type fsfilcnt_t = ::uint64_t;
-
-pub const PTHREAD_STACK_MIN: ::size_t = 2048;
-pub const KERN_PROC_PATHNAME: ::c_int = 12;
-pub const SIGSTKSZ: ::size_t = 34816;
-pub const SF_NODISKIO: ::c_int = 0x00000001;
-pub const SF_MNOWAIT: ::c_int = 0x00000002;
-pub const SF_SYNC: ::c_int = 0x00000004;
-
-extern {
-    pub fn __error() -> *mut ::c_int;
-}
diff --git a/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs b/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs
new file mode 100644 (file)
index 0000000..ae521f2
--- /dev/null
@@ -0,0 +1,108 @@
+pub type fflags_t = u32;
+pub type clock_t = i32;
+pub type ino_t = u32;
+pub type nlink_t = u16;
+pub type blksize_t = u32;
+pub type clockid_t = ::c_int;
+
+pub type fsblkcnt_t = ::uint64_t;
+pub type fsfilcnt_t = ::uint64_t;
+
+s! {
+    pub struct dirent {
+        pub d_fileno: u32,
+        pub d_reclen: u16,
+        pub d_type: u8,
+        pub d_namlen: u8,
+        pub d_name: [::c_char; 256],
+    }
+
+    pub struct statvfs {
+        pub f_bavail: ::fsblkcnt_t,
+        pub f_bfree: ::fsblkcnt_t,
+        pub f_blocks: ::fsblkcnt_t,
+        pub f_favail: ::fsfilcnt_t,
+        pub f_ffree: ::fsfilcnt_t,
+        pub f_files: ::fsfilcnt_t,
+        pub f_bsize: ::c_ulong,
+        pub f_flag: ::c_ulong,
+        pub f_frsize: ::c_ulong,
+        pub f_fsid: ::c_ulong,
+        pub f_namemax: ::c_ulong,
+    }
+}
+
+pub const RAND_MAX: ::c_int = 0x7fff_fffd;
+pub const PTHREAD_STACK_MIN: ::size_t = 2048;
+pub const KERN_PROC_PATHNAME: ::c_int = 12;
+pub const SIGSTKSZ: ::size_t = 34816;
+pub const SF_NODISKIO: ::c_int = 0x00000001;
+pub const SF_MNOWAIT: ::c_int = 0x00000002;
+pub const SF_SYNC: ::c_int = 0x00000004;
+pub const O_CLOEXEC: ::c_int = 0x00100000;
+pub const F_GETLK: ::c_int = 11;
+pub const F_SETLK: ::c_int = 12;
+pub const F_SETLKW: ::c_int = 13;
+pub const ELAST: ::c_int = 96;
+pub const RLIMIT_NPTS: ::c_int = 11;
+pub const RLIMIT_SWAP: ::c_int = 12;
+pub const RLIM_NLIMITS: ::rlim_t = 13;
+
+pub const Q_GETQUOTA: ::c_int = 0x700;
+pub const Q_SETQUOTA: ::c_int = 0x800;
+
+pub const POSIX_FADV_NORMAL: ::c_int = 0;
+pub const POSIX_FADV_RANDOM: ::c_int = 1;
+pub const POSIX_FADV_SEQUENTIAL: ::c_int = 2;
+pub const POSIX_FADV_WILLNEED: ::c_int = 3;
+pub const POSIX_FADV_DONTNEED: ::c_int = 4;
+pub const POSIX_FADV_NOREUSE: ::c_int = 5;
+
+pub const MADV_PROTECT: ::c_int = 10;
+pub const RUSAGE_THREAD: ::c_int = 1;
+
+pub const CLOCK_REALTIME: clockid_t = 0;
+pub const CLOCK_VIRTUAL: clockid_t = 1;
+pub const CLOCK_PROF: clockid_t = 2;
+pub const CLOCK_MONOTONIC: clockid_t = 4;
+pub const CLOCK_UPTIME: clockid_t = 5;
+pub const CLOCK_UPTIME_PRECISE: clockid_t = 7;
+pub const CLOCK_UPTIME_FAST: clockid_t = 8;
+pub const CLOCK_REALTIME_PRECISE: clockid_t = 9;
+pub const CLOCK_REALTIME_FAST: clockid_t = 10;
+pub const CLOCK_MONOTONIC_PRECISE: clockid_t = 11;
+pub const CLOCK_MONOTONIC_FAST: clockid_t = 12;
+pub const CLOCK_SECOND: clockid_t = 13;
+pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 14;
+pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 15;
+
+extern {
+    pub fn __error() -> *mut ::c_int;
+
+    pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int)
+                    -> ::c_int;
+
+    pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
+    pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
+
+    pub fn posix_fallocate(fd: ::c_int, offset: ::off_t,
+                           len: ::off_t) -> ::c_int;
+    pub fn posix_fadvise(fd: ::c_int, offset: ::off_t, len: ::off_t,
+                         advise: ::c_int) -> ::c_int;
+    pub fn mkostemp(template: *mut ::c_char, flags: ::c_int) -> ::c_int;
+    pub fn mkostemps(template: *mut ::c_char,
+                     suffixlen: ::c_int,
+                     flags: ::c_int) -> ::c_int;
+}
+
+cfg_if! {
+    if #[cfg(target_arch = "x86")] {
+        mod x86;
+        pub use self::x86::*;
+    } else if #[cfg(target_arch = "x86_64")] {
+        mod x86_64;
+        pub use self::x86_64::*;
+    } else {
+        // Unknown target_arch
+    }
+}
diff --git a/src/liblibc/src/unix/bsd/freebsdlike/freebsd/x86.rs b/src/liblibc/src/unix/bsd/freebsdlike/freebsd/x86.rs
new file mode 100644 (file)
index 0000000..8a5e5f9
--- /dev/null
@@ -0,0 +1,31 @@
+pub type c_long = i32;
+pub type c_ulong = u32;
+pub type time_t = i32;
+pub type suseconds_t = i32;
+
+s! {
+    pub struct stat {
+        pub st_dev: ::dev_t,
+        pub st_ino: ::ino_t,
+        pub st_mode: ::mode_t,
+        pub st_nlink: ::nlink_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        pub st_rdev: ::dev_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        pub st_size: ::off_t,
+        pub st_blocks: ::blkcnt_t,
+        pub st_blksize: ::blksize_t,
+        pub st_flags: ::fflags_t,
+        pub st_gen: ::uint32_t,
+        pub st_lspare: ::int32_t,
+        pub st_birthtime: ::time_t,
+        pub st_birthtime_nsec: ::c_long,
+        __unused: [u8; 8],
+    }
+}
diff --git a/src/liblibc/src/unix/bsd/freebsdlike/freebsd/x86_64.rs b/src/liblibc/src/unix/bsd/freebsdlike/freebsd/x86_64.rs
new file mode 100644 (file)
index 0000000..a2da845
--- /dev/null
@@ -0,0 +1,30 @@
+pub type c_long = i64;
+pub type c_ulong = u64;
+pub type time_t = i64;
+pub type suseconds_t = i64;
+
+s! {
+    pub struct stat {
+        pub st_dev: ::dev_t,
+        pub st_ino: ::ino_t,
+        pub st_mode: ::mode_t,
+        pub st_nlink: ::nlink_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        pub st_rdev: ::dev_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        pub st_size: ::off_t,
+        pub st_blocks: ::blkcnt_t,
+        pub st_blksize: ::blksize_t,
+        pub st_flags: ::fflags_t,
+        pub st_gen: ::uint32_t,
+        pub st_lspare: ::int32_t,
+        pub st_birthtime: ::time_t,
+        pub st_birthtime_nsec: ::c_long,
+    }
+}
index 011cf867626622cade3328c2047c76115e5d3320..8828401e4b5a3d49e1b084b1c497d0cbced0f632 100644 (file)
@@ -1,10 +1,5 @@
-pub type clock_t = i32;
 pub type dev_t = u32;
-pub type ino_t = u32;
 pub type mode_t = u16;
-pub type nlink_t = u16;
-pub type blksize_t = u32;
-pub type fflags_t = u32;
 pub type pthread_attr_t = *mut ::c_void;
 pub type rlim_t = i64;
 pub type pthread_mutex_t = *mut ::c_void;
@@ -18,14 +13,6 @@ pub type speed_t = ::c_uint;
 pub enum timezone {}
 
 s! {
-    pub struct dirent {
-        pub d_fileno: u32,
-        pub d_reclen: u16,
-        pub d_type: u8,
-        pub d_namlen: u8,
-        pub d_name: [::c_char; 256],
-    }
-
     pub struct glob_t {
         pub gl_pathc: ::size_t,
         __unused1: ::size_t,
@@ -88,20 +75,6 @@ s! {
         pub ss_flags: ::c_int,
     }
 
-    pub struct statvfs {
-        pub f_bavail: ::fsblkcnt_t,
-        pub f_bfree: ::fsblkcnt_t,
-        pub f_blocks: ::fsblkcnt_t,
-        pub f_favail: ::fsfilcnt_t,
-        pub f_ffree: ::fsfilcnt_t,
-        pub f_files: ::fsfilcnt_t,
-        pub f_bsize: ::c_ulong,
-        pub f_flag: ::c_ulong,
-        pub f_frsize: ::c_ulong,
-        pub f_fsid: ::c_ulong,
-        pub f_namemax: ::c_ulong,
-    }
-
     pub struct sched_param {
         pub sched_priority: ::c_int,
     }
@@ -137,6 +110,7 @@ s! {
         pub l_pid: ::pid_t,
         pub l_type: ::c_short,
         pub l_whence: ::c_short,
+        #[cfg(not(target_os = "dragonfly"))]
         pub l_sysid: ::c_int,
     }
 
@@ -146,11 +120,37 @@ s! {
         pub trailers: *mut ::iovec,
         pub trl_cnt: ::c_int,
     }
+
+    pub struct lconv {
+        pub decimal_point: *mut ::c_char,
+        pub thousands_sep: *mut ::c_char,
+        pub grouping: *mut ::c_char,
+        pub int_curr_symbol: *mut ::c_char,
+        pub currency_symbol: *mut ::c_char,
+        pub mon_decimal_point: *mut ::c_char,
+        pub mon_thousands_sep: *mut ::c_char,
+        pub mon_grouping: *mut ::c_char,
+        pub positive_sign: *mut ::c_char,
+        pub negative_sign: *mut ::c_char,
+        pub int_frac_digits: ::c_char,
+        pub frac_digits: ::c_char,
+        pub p_cs_precedes: ::c_char,
+        pub p_sep_by_space: ::c_char,
+        pub n_cs_precedes: ::c_char,
+        pub n_sep_by_space: ::c_char,
+        pub p_sign_posn: ::c_char,
+        pub n_sign_posn: ::c_char,
+        pub int_p_cs_precedes: ::c_char,
+        pub int_n_cs_precedes: ::c_char,
+        pub int_p_sep_by_space: ::c_char,
+        pub int_n_sep_by_space: ::c_char,
+        pub int_p_sign_posn: ::c_char,
+        pub int_n_sign_posn: ::c_char,
+    }
 }
 
 pub const EXIT_FAILURE: ::c_int = 1;
 pub const EXIT_SUCCESS: ::c_int = 0;
-pub const RAND_MAX: ::c_int = 0x7fff_fffd;
 pub const EOF: ::c_int = -1;
 pub const SEEK_SET: ::c_int = 0;
 pub const SEEK_CUR: ::c_int = 1;
@@ -173,7 +173,6 @@ pub const O_CREAT: ::c_int = 512;
 pub const O_EXCL: ::c_int = 2048;
 pub const O_NOCTTY: ::c_int = 32768;
 pub const O_TRUNC: ::c_int = 1024;
-pub const O_CLOEXEC: ::c_int = 0x00100000;
 pub const S_IFIFO: mode_t = 4096;
 pub const S_IFCHR: mode_t = 8192;
 pub const S_IFBLK: mode_t = 24576;
@@ -209,9 +208,6 @@ pub const F_TEST: ::c_int = 3;
 pub const F_TLOCK: ::c_int = 2;
 pub const F_ULOCK: ::c_int = 0;
 pub const F_DUPFD_CLOEXEC: ::c_int = 17;
-pub const F_GETLK: ::c_int = 11;
-pub const F_SETLK: ::c_int = 12;
-pub const F_SETLKW: ::c_int = 13;
 pub const SIGHUP: ::c_int = 1;
 pub const SIGINT: ::c_int = 2;
 pub const SIGQUIT: ::c_int = 3;
@@ -337,7 +333,6 @@ pub const EBADMSG: ::c_int = 89;
 pub const EMULTIHOP: ::c_int = 90;
 pub const ENOLINK: ::c_int = 91;
 pub const EPROTO: ::c_int = 92;
-pub const ELAST: ::c_int = 96;
 
 pub const F_DUPFD: ::c_int = 0;
 pub const F_GETFD: ::c_int = 1;
@@ -365,13 +360,6 @@ pub const POSIX_MADV_SEQUENTIAL: ::c_int = 2;
 pub const POSIX_MADV_WILLNEED: ::c_int = 3;
 pub const POSIX_MADV_DONTNEED: ::c_int = 4;
 
-pub const POSIX_FADV_NORMAL: ::c_int = 0;
-pub const POSIX_FADV_RANDOM: ::c_int = 1;
-pub const POSIX_FADV_SEQUENTIAL: ::c_int = 2;
-pub const POSIX_FADV_WILLNEED: ::c_int = 3;
-pub const POSIX_FADV_DONTNEED: ::c_int = 4;
-pub const POSIX_FADV_NOREUSE: ::c_int = 5;
-
 pub const _SC_IOV_MAX: ::c_int = 56;
 pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 70;
 pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 71;
@@ -405,9 +393,6 @@ pub const _SC_XOPEN_XCU_VERSION: ::c_int = 117;
 pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0;
 pub const PTHREAD_CREATE_DETACHED: ::c_int = 1;
 
-pub const CLOCK_REALTIME: ::c_int = 0;
-pub const CLOCK_MONOTONIC: ::c_int = 4;
-
 pub const RLIMIT_CPU: ::c_int = 0;
 pub const RLIMIT_FSIZE: ::c_int = 1;
 pub const RLIMIT_DATA: ::c_int = 2;
@@ -420,15 +405,10 @@ pub const RLIMIT_NOFILE: ::c_int = 8;
 pub const RLIMIT_SBSIZE: ::c_int = 9;
 pub const RLIMIT_VMEM: ::c_int = 10;
 pub const RLIMIT_AS: ::c_int = RLIMIT_VMEM;
-pub const RLIMIT_NPTS: ::c_int = 11;
-pub const RLIMIT_SWAP: ::c_int = 12;
-
-pub const RLIM_NLIMITS: rlim_t = 13;
 pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff;
 
 pub const RUSAGE_SELF: ::c_int = 0;
 pub const RUSAGE_CHILDREN: ::c_int = -1;
-pub const RUSAGE_THREAD: ::c_int = 1;
 
 pub const MADV_NORMAL: ::c_int = 0;
 pub const MADV_RANDOM: ::c_int = 1;
@@ -440,7 +420,6 @@ pub const MADV_NOSYNC: ::c_int = 6;
 pub const MADV_AUTOSYNC: ::c_int = 7;
 pub const MADV_NOCORE: ::c_int = 8;
 pub const MADV_CORE: ::c_int = 9;
-pub const MADV_PROTECT: ::c_int = 10;
 
 pub const MINCORE_INCORE: ::c_int =  0x1;
 pub const MINCORE_REFERENCED: ::c_int = 0x2;
@@ -584,14 +563,12 @@ pub const ST_NOSUID: ::c_ulong = 2;
 
 pub const NI_MAXHOST: ::size_t = 1025;
 
-pub const Q_GETQUOTA: ::c_int = 0x700;
-pub const Q_SETQUOTA: ::c_int = 0x800;
-
 pub const RTLD_LOCAL: ::c_int = 0;
 pub const RTLD_NODELETE: ::c_int = 0x1000;
 pub const RTLD_NOLOAD: ::c_int = 0x2000;
 pub const RTLD_GLOBAL: ::c_int = 0x100;
 
+#[link(name = "util")]
 extern {
     pub fn getnameinfo(sa: *const ::sockaddr,
                        salen: ::socklen_t,
@@ -606,8 +583,6 @@ extern {
                            mibp: *mut ::c_int,
                            sizep: *mut ::size_t)
                            -> ::c_int;
-    pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int)
-                    -> ::c_int;
     pub fn shm_open(name: *const ::c_char, oflag: ::c_int, mode: ::mode_t)
                     -> ::c_int;
     pub fn sysctl(name: *const ::c_int,
@@ -623,13 +598,13 @@ extern {
                         newp: *const ::c_void,
                         newlen: ::size_t)
                         -> ::c_int;
-    pub fn clock_gettime(clk_id: ::c_int, tp: *mut ::timespec) -> ::c_int;
-    pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char);
-    pub fn posix_fallocate(fd: ::c_int, offset: ::off_t,
-                           len: ::off_t) -> ::c_int;
-    pub fn sched_setscheduler(pid: ::pid_t, policy: ::c_int, param: *const sched_param) -> ::c_int;
+    pub fn sched_setscheduler(pid: ::pid_t,
+                              policy: ::c_int,
+                              param: *const sched_param) -> ::c_int;
     pub fn sched_getscheduler(pid: ::pid_t) -> ::c_int;
-    pub fn memrchr(cx: *const ::c_void, c: ::c_int, n: ::size_t) -> *mut ::c_void;
+    pub fn memrchr(cx: *const ::c_void,
+                   c: ::c_int,
+                   n: ::size_t) -> *mut ::c_void;
     pub fn sendfile(fd: ::c_int,
                     s: ::c_int,
                     offset: ::off_t,
@@ -637,23 +612,29 @@ extern {
                     hdtr: *mut ::sf_hdtr,
                     sbytes: *mut ::off_t,
                     flags: ::c_int) -> ::c_int;
+    pub fn sigtimedwait(set: *const sigset_t,
+                        info: *mut siginfo_t,
+                        timeout: *const ::timespec) -> ::c_int;
+    pub fn sigwaitinfo(set: *const sigset_t,
+                       info: *mut siginfo_t) -> ::c_int;
+    pub fn openpty(amaster: *mut ::c_int,
+                   aslave: *mut ::c_int,
+                   name: *mut ::c_char,
+                   termp: *mut termios,
+                   winp: *mut ::winsize) -> ::c_int;
+    pub fn forkpty(amaster: *mut ::c_int,
+                   name: *mut ::c_char,
+                   termp: *mut termios,
+                   winp: *mut ::winsize) -> ::pid_t;
 
-    pub fn posix_fadvise(fd: ::c_int, offset: ::off_t, len: ::off_t, 
-                         advise: ::c_int) -> ::c_int;
-    pub fn mkostemp(template: *mut ::c_char, flags: ::c_int) -> ::c_int;
-    pub fn mkostemps(template: *mut ::c_char, suffixlen: ::c_int, flags: ::c_int) -> ::c_int;
-}
-
-cfg_if! {
-    if #[cfg(target_arch = "x86")] {
-        mod x86;
-        pub use self::x86::*;
-    } else if #[cfg(target_arch = "x86_64")] {
-        mod x86_64;
-        pub use self::x86_64::*;
-    } else {
-        // ...
-    }
+    pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char);
+    pub fn pthread_attr_get_np(tid: ::pthread_t,
+                               attr: *mut ::pthread_attr_t) -> ::c_int;
+    pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t,
+                                     guardsize: *mut ::size_t) -> ::c_int;
+    pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t,
+                                 stackaddr: *mut *mut ::c_void,
+                                 stacksize: *mut ::size_t) -> ::c_int;
 }
 
 cfg_if! {
diff --git a/src/liblibc/src/unix/bsd/freebsdlike/x86.rs b/src/liblibc/src/unix/bsd/freebsdlike/x86.rs
deleted file mode 100644 (file)
index 8a5e5f9..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-pub type c_long = i32;
-pub type c_ulong = u32;
-pub type time_t = i32;
-pub type suseconds_t = i32;
-
-s! {
-    pub struct stat {
-        pub st_dev: ::dev_t,
-        pub st_ino: ::ino_t,
-        pub st_mode: ::mode_t,
-        pub st_nlink: ::nlink_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        pub st_rdev: ::dev_t,
-        pub st_atime: ::time_t,
-        pub st_atime_nsec: ::c_long,
-        pub st_mtime: ::time_t,
-        pub st_mtime_nsec: ::c_long,
-        pub st_ctime: ::time_t,
-        pub st_ctime_nsec: ::c_long,
-        pub st_size: ::off_t,
-        pub st_blocks: ::blkcnt_t,
-        pub st_blksize: ::blksize_t,
-        pub st_flags: ::fflags_t,
-        pub st_gen: ::uint32_t,
-        pub st_lspare: ::int32_t,
-        pub st_birthtime: ::time_t,
-        pub st_birthtime_nsec: ::c_long,
-        __unused: [u8; 8],
-    }
-}
diff --git a/src/liblibc/src/unix/bsd/freebsdlike/x86_64.rs b/src/liblibc/src/unix/bsd/freebsdlike/x86_64.rs
deleted file mode 100644 (file)
index a2da845..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-pub type c_long = i64;
-pub type c_ulong = u64;
-pub type time_t = i64;
-pub type suseconds_t = i64;
-
-s! {
-    pub struct stat {
-        pub st_dev: ::dev_t,
-        pub st_ino: ::ino_t,
-        pub st_mode: ::mode_t,
-        pub st_nlink: ::nlink_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        pub st_rdev: ::dev_t,
-        pub st_atime: ::time_t,
-        pub st_atime_nsec: ::c_long,
-        pub st_mtime: ::time_t,
-        pub st_mtime_nsec: ::c_long,
-        pub st_ctime: ::time_t,
-        pub st_ctime_nsec: ::c_long,
-        pub st_size: ::off_t,
-        pub st_blocks: ::blkcnt_t,
-        pub st_blksize: ::blksize_t,
-        pub st_flags: ::fflags_t,
-        pub st_gen: ::uint32_t,
-        pub st_lspare: ::int32_t,
-        pub st_birthtime: ::time_t,
-        pub st_birthtime_nsec: ::c_long,
-    }
-}
index d6752a5f9f697a8a82b46ca625eba5ec98408b40..a280fc92631445dddf855aa45c206b0dc42ddaac 100644 (file)
@@ -63,10 +63,10 @@ s! {
 
     pub struct fd_set {
         #[cfg(all(target_pointer_width = "64",
-                  target_os = "freebsd"))]
+                  any(target_os = "freebsd", target_os = "dragonfly")))]
         fds_bits: [i64; FD_SETSIZE / 64],
         #[cfg(not(all(target_pointer_width = "64",
-                      target_os = "freebsd")))]
+                      any(target_os = "freebsd", target_os = "dragonfly"))))]
         fds_bits: [i32; FD_SETSIZE / 32],
     }
 
@@ -85,11 +85,26 @@ s! {
     }
 
     pub struct utsname {
+        #[cfg(not(target_os = "dragonfly"))]
         pub sysname: [::c_char; 256],
+        #[cfg(target_os = "dragonfly")]
+        pub sysname: [::c_char; 32],
+        #[cfg(not(target_os = "dragonfly"))]
         pub nodename: [::c_char; 256],
+        #[cfg(target_os = "dragonfly")]
+        pub nodename: [::c_char; 32],
+        #[cfg(not(target_os = "dragonfly"))]
         pub release: [::c_char; 256],
+        #[cfg(target_os = "dragonfly")]
+        pub release: [::c_char; 32],
+        #[cfg(not(target_os = "dragonfly"))]
         pub version: [::c_char; 256],
+        #[cfg(target_os = "dragonfly")]
+        pub version: [::c_char; 32],
+        #[cfg(not(target_os = "dragonfly"))]
         pub machine: [::c_char; 256],
+        #[cfg(target_os = "dragonfly")]
+        pub machine: [::c_char; 32],
     }
 
     pub struct msghdr {
@@ -107,6 +122,14 @@ s! {
     }
 }
 
+pub const LC_ALL: ::c_int = 0;
+pub const LC_COLLATE: ::c_int = 1;
+pub const LC_CTYPE: ::c_int = 2;
+pub const LC_MONETARY: ::c_int = 3;
+pub const LC_NUMERIC: ::c_int = 4;
+pub const LC_TIME: ::c_int = 5;
+pub const LC_MESSAGES: ::c_int = 6;
+
 pub const FIOCLEX: ::c_ulong = 0x20006601;
 pub const FIONBIO: ::c_ulong = 0x8004667e;
 
@@ -267,6 +290,7 @@ pub const NOFLSH: ::tcflag_t = 0x80000000;
 pub const WNOHANG: ::c_int = 1;
 
 pub const RTLD_NOW: ::c_int = 0x2;
+pub const RTLD_DEFAULT: *mut ::c_void = -2isize as *mut ::c_void;
 
 f! {
     pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () {
@@ -335,6 +359,6 @@ cfg_if! {
         mod freebsdlike;
         pub use self::freebsdlike::*;
     } else {
-        // ...
+        // Unknown target_os
     }
 }
index ab9cc9f0cc5f45b32c3a0d18ac7200e367eb88a2..aeba9fcfec798203f1ec886fcace53975a6058c9 100644 (file)
@@ -106,6 +106,33 @@ s! {
         pub dli_sname: *const ::c_char,
         pub dli_saddr: *mut ::c_void,
     }
+
+    pub struct lconv {
+        pub decimal_point: *mut ::c_char,
+        pub thousands_sep: *mut ::c_char,
+        pub grouping: *mut ::c_char,
+        pub int_curr_symbol: *mut ::c_char,
+        pub currency_symbol: *mut ::c_char,
+        pub mon_decimal_point: *mut ::c_char,
+        pub mon_thousands_sep: *mut ::c_char,
+        pub mon_grouping: *mut ::c_char,
+        pub positive_sign: *mut ::c_char,
+        pub negative_sign: *mut ::c_char,
+        pub int_frac_digits: ::c_char,
+        pub frac_digits: ::c_char,
+        pub p_cs_precedes: ::c_char,
+        pub p_sep_by_space: ::c_char,
+        pub n_cs_precedes: ::c_char,
+        pub n_sep_by_space: ::c_char,
+        pub p_sign_posn: ::c_char,
+        pub n_sign_posn: ::c_char,
+        pub int_p_cs_precedes: ::c_char,
+        pub int_n_cs_precedes: ::c_char,
+        pub int_p_sep_by_space: ::c_char,
+        pub int_n_sep_by_space: ::c_char,
+        pub int_p_sign_posn: ::c_char,
+        pub int_n_sign_posn: ::c_char,
+    }
 }
 
 pub const O_CLOEXEC: ::c_int = 0x10000;
@@ -227,6 +254,10 @@ extern {
                        flags: ::c_int) -> ::c_int;
     pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int)
                     -> ::c_int;
+    pub fn pthread_main_np() -> ::c_int;
+    pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char);
+    pub fn pthread_stackseg_np(thread: ::pthread_t,
+                               sinfo: *mut ::stack_t) -> ::c_int;
     pub fn sysctl(name: *mut ::c_int,
                   namelen: ::c_uint,
                   oldp: *mut ::c_void,
index 490e8056c2759e2558988c381465c3e3d2f4b585..c772e2e9c72bc798559ec884a240a72527424367 100644 (file)
@@ -8,6 +8,7 @@ pub type pthread_key_t = ::c_int;
 pub type rlim_t = u64;
 pub type speed_t = ::c_uint;
 pub type tcflag_t = ::c_uint;
+pub type clockid_t = ::c_int;
 
 pub enum timezone {}
 
@@ -258,8 +259,16 @@ pub const _SC_XOPEN_SHM : ::c_int = 30;
 pub const PTHREAD_CREATE_JOINABLE : ::c_int = 0;
 pub const PTHREAD_CREATE_DETACHED : ::c_int = 1;
 
-pub const CLOCK_REALTIME : ::c_int = 0;
-pub const CLOCK_MONOTONIC : ::c_int = 3;
+// http://man.openbsd.org/OpenBSD-current/man2/clock_getres.2
+// The man page says clock_gettime(3) can accept various values as clockid_t but
+// http://fxr.watson.org/fxr/source/kern/kern_time.c?v=OPENBSD;im=excerpts#L161
+// the implementation rejects anything other than the below two
+//
+// http://netbsd.gw.com/cgi-bin/man-cgi?clock_gettime
+// https://github.com/jsonn/src/blob/HEAD/sys/kern/subr_time.c#L222
+// Basically the same goes for NetBSD
+pub const CLOCK_REALTIME: clockid_t = 0;
+pub const CLOCK_MONOTONIC: clockid_t = 3;
 
 pub const RLIMIT_CPU: ::c_int = 0;
 pub const RLIMIT_FSIZE: ::c_int = 1;
@@ -376,21 +385,35 @@ pub const Q_SETQUOTA: ::c_int = 0x400;
 
 pub const RTLD_GLOBAL: ::c_int = 0x100;
 
+#[link(name = "util")]
 extern {
     pub fn mincore(addr: *mut ::c_void, len: ::size_t,
                    vec: *mut ::c_char) -> ::c_int;
+    #[cfg_attr(target_os = "netbsd", link_name = "__clock_getres50")]
+    pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
     #[cfg_attr(target_os = "netbsd", link_name = "__clock_gettime50")]
-    pub fn clock_gettime(clk_id: ::c_int, tp: *mut ::timespec) -> ::c_int;
+    pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
     pub fn __errno() -> *mut ::c_int;
     pub fn shm_open(name: *const ::c_char, oflag: ::c_int, mode: ::mode_t)
                     -> ::c_int;
-    pub fn pthread_main_np() -> ::c_int;
-    pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char);
-    pub fn pthread_stackseg_np(thread: ::pthread_t,
-                               sinfo: *mut ::stack_t) -> ::c_int;
-    pub fn memrchr(cx: *const ::c_void, c: ::c_int, n: ::size_t) -> *mut ::c_void;
+    pub fn memrchr(cx: *const ::c_void,
+                   c: ::c_int,
+                   n: ::size_t) -> *mut ::c_void;
     pub fn mkostemp(template: *mut ::c_char, flags: ::c_int) -> ::c_int;
-    pub fn mkostemps(template: *mut ::c_char, suffixlen: ::c_int, flags: ::c_int) -> ::c_int;
+    pub fn mkostemps(template: *mut ::c_char,
+                     suffixlen: ::c_int,
+                     flags: ::c_int) -> ::c_int;
+    pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int;
+    pub fn fdatasync(fd: ::c_int) -> ::c_int;
+    pub fn openpty(amaster: *mut ::c_int,
+                   aslave: *mut ::c_int,
+                   name: *mut ::c_char,
+                   termp: *mut termios,
+                   winp: *mut ::winsize) -> ::c_int;
+    pub fn forkpty(amaster: *mut ::c_int,
+                   name: *mut ::c_char,
+                   termp: *mut termios,
+                   winp: *mut ::winsize) -> ::pid_t;
 }
 
 cfg_if! {
@@ -400,8 +423,10 @@ cfg_if! {
     } else if #[cfg(target_os = "netbsd")] {
         mod netbsd;
         pub use self::netbsd::*;
-    } else {
+    } else if #[cfg(target_os = "openbsd")] {
         mod openbsd;
         pub use self::openbsd::*;
+    } else {
+        // Unknown target_os
     }
 }
index b350f1d72c5249eedcbf2df212962c34b4713af3..2d1def9c74448e35caf32b570d912e88e8eac52f 100644 (file)
@@ -190,6 +190,33 @@ s! {
         pub dli_sname: *const ::c_char,
         pub dli_saddr: *const ::c_void,
     }
+
+    pub struct lconv {
+        pub decimal_point: *mut ::c_char,
+        pub thousands_sep: *mut ::c_char,
+        pub grouping: *mut ::c_char,
+        pub int_curr_symbol: *mut ::c_char,
+        pub currency_symbol: *mut ::c_char,
+        pub mon_decimal_point: *mut ::c_char,
+        pub mon_thousands_sep: *mut ::c_char,
+        pub mon_grouping: *mut ::c_char,
+        pub positive_sign: *mut ::c_char,
+        pub negative_sign: *mut ::c_char,
+        pub int_frac_digits: ::c_char,
+        pub frac_digits: ::c_char,
+        pub p_cs_precedes: ::c_char,
+        pub p_sep_by_space: ::c_char,
+        pub n_cs_precedes: ::c_char,
+        pub n_sep_by_space: ::c_char,
+        pub p_sign_posn: ::c_char,
+        pub n_sign_posn: ::c_char,
+        pub int_p_cs_precedes: ::c_char,
+        pub int_n_cs_precedes: ::c_char,
+        pub int_p_sep_by_space: ::c_char,
+        pub int_n_sep_by_space: ::c_char,
+        pub int_p_sign_posn: ::c_char,
+        pub int_n_sign_posn: ::c_char,
+    }
 }
 
 pub const O_CLOEXEC: ::c_int = 0x400000;
@@ -378,4 +405,10 @@ extern {
     pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t,
                                  stackaddr: *mut *mut ::c_void,
                                  stacksize: *mut ::size_t) -> ::c_int;
+    #[link_name = "__sigtimedwait50"]
+    pub fn sigtimedwait(set: *const sigset_t,
+                        info: *mut siginfo_t,
+                        timeout: *const ::timespec) -> ::c_int;
+    pub fn sigwaitinfo(set: *const sigset_t,
+                       info: *mut siginfo_t) -> ::c_int;
 }
index fc38478fd14f5f13e4097bfce91b2c98edd79691..027714556939ea02f03dacb31bdd2543254bea71 100644 (file)
@@ -109,6 +109,33 @@ s! {
         pub dli_sname: *const ::c_char,
         pub dli_saddr: *mut ::c_void,
     }
+
+    pub struct lconv {
+        pub decimal_point: *mut ::c_char,
+        pub thousands_sep: *mut ::c_char,
+        pub grouping: *mut ::c_char,
+        pub int_curr_symbol: *mut ::c_char,
+        pub currency_symbol: *mut ::c_char,
+        pub mon_decimal_point: *mut ::c_char,
+        pub mon_thousands_sep: *mut ::c_char,
+        pub mon_grouping: *mut ::c_char,
+        pub positive_sign: *mut ::c_char,
+        pub negative_sign: *mut ::c_char,
+        pub int_frac_digits: ::c_char,
+        pub frac_digits: ::c_char,
+        pub p_cs_precedes: ::c_char,
+        pub p_sep_by_space: ::c_char,
+        pub n_cs_precedes: ::c_char,
+        pub n_sep_by_space: ::c_char,
+        pub p_sign_posn: ::c_char,
+        pub n_sign_posn: ::c_char,
+        pub int_p_cs_precedes: ::c_char,
+        pub int_p_sep_by_space: ::c_char,
+        pub int_n_cs_precedes: ::c_char,
+        pub int_n_sep_by_space: ::c_char,
+        pub int_p_sign_posn: ::c_char,
+        pub int_n_sign_posn: ::c_char,
+    }
 }
 
 pub const O_CLOEXEC: ::c_int = 0x10000;
@@ -232,6 +259,10 @@ extern {
                        flags: ::c_int) -> ::c_int;
     pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int)
                     -> ::c_int;
+    pub fn pthread_main_np() -> ::c_int;
+    pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char);
+    pub fn pthread_stackseg_np(thread: ::pthread_t,
+                               sinfo: *mut ::stack_t) -> ::c_int;
     pub fn sysctl(name: *const ::c_int,
                   namelen: ::c_uint,
                   oldp: *mut ::c_void,
index 374985dd56b11bef087d2fa0cdd9173df8c22d60..3170363ccfe39036bd0b0324611562343786d3eb 100644 (file)
@@ -3,6 +3,8 @@
 //! More functions and definitions can be found in the more specific modules
 //! according to the platform in question.
 
+use dox::Option;
+
 pub type pid_t = i32;
 pub type uid_t = u32;
 pub type gid_t = u32;
@@ -97,6 +99,13 @@ s! {
         pub events: ::c_short,
         pub revents: ::c_short,
     }
+
+    pub struct winsize {
+        pub ws_row: ::c_ushort,
+        pub ws_col: ::c_ushort,
+        pub ws_xpixel: ::c_ushort,
+        pub ws_ypixel: ::c_ushort,
+    }
 }
 
 pub const SIG_DFL: sighandler_t = 0 as sighandler_t;
@@ -134,15 +143,24 @@ pub const IF_NAMESIZE: ::size_t = 16;
 pub const RTLD_LAZY: ::c_int = 0x1;
 
 cfg_if! {
-    if #[cfg(not(stdbuild))] {
+    if #[cfg(dox)] {
+        // on dox builds don't pull in anything
+    } else if #[cfg(all(not(stdbuild), feature = "use_std"))] {
         // cargo build, don't pull in anything extra as the libstd  dep
         // already pulls in all libs.
-    } else if #[cfg(all(target_env = "musl", not(target_arch = "mips")))] {
+    } else if #[cfg(all(target_env = "musl", not(any(target_arch = "mips",
+                                                     target_arch = "arm"))))] {
         #[link(name = "c", kind = "static")]
         extern {}
     } else if #[cfg(target_os = "emscripten")] {
         #[link(name = "c")]
         extern {}
+    } else if #[cfg(all(target_vendor = "rumprun", target_os = "netbsd"))] {
+        // Since we don't use -nodefaultlibs on Rumprun, libc is always pulled
+        // in automatically by the linker. We avoid passing it explicitly, as it
+        // causes some versions of binutils to crash with an assertion failure.
+        #[link(name = "m")]
+        extern {}
     } else if #[cfg(any(target_os = "macos",
                         target_os = "ios",
                         target_os = "android",
@@ -367,7 +385,8 @@ extern {
     pub fn munmap(addr: *mut ::c_void, len: ::size_t) -> ::c_int;
 
     pub fn if_nametoindex(ifname: *const c_char) -> ::c_uint;
-    pub fn if_indextoname(ifindex: ::c_uint, ifname: *mut ::c_char) -> *mut ::c_char;
+    pub fn if_indextoname(ifindex: ::c_uint,
+                          ifname: *mut ::c_char) -> *mut ::c_char;
 
     #[cfg_attr(target_os = "macos", link_name = "lstat$INODE64")]
     #[cfg_attr(target_os = "netbsd", link_name = "__lstat50")]
@@ -434,7 +453,7 @@ extern {
     #[cfg_attr(target_os = "netbsd", link_name = "__libc_thr_yield")]
     pub fn sched_yield() -> ::c_int;
     pub fn pthread_key_create(key: *mut pthread_key_t,
-                              dtor: ::dox::Option<unsafe extern fn(*mut ::c_void)>)
+                              dtor: Option<unsafe extern fn(*mut ::c_void)>)
                               -> ::c_int;
     pub fn pthread_key_delete(key: pthread_key_t) -> ::c_int;
     pub fn pthread_getspecific(key: pthread_key_t) -> *mut ::c_void;
@@ -489,8 +508,8 @@ extern {
     pub fn pthread_sigmask(how: ::c_int, set: *const sigset_t,
                            oldset: *mut sigset_t) -> ::c_int;
     pub fn pthread_kill(thread: ::pthread_t, sig: ::c_int) -> ::c_int;
-
-    // #[cfg_attr(target_os = "linux", link_name = "__xpg_strerror_r")]
+    #[cfg_attr(all(target_os = "linux", not(target_env = "musl")),
+               link_name = "__xpg_strerror_r")]
     pub fn strerror_r(errnum: ::c_int, buf: *mut c_char,
                       buflen: ::size_t) -> ::c_int;
 
@@ -500,13 +519,19 @@ extern {
                       optval: *mut ::c_void,
                       optlen: *mut ::socklen_t) -> ::c_int;
     pub fn raise(signum: ::c_int) -> ::c_int;
+    #[cfg_attr(target_os = "netbsd", link_name = "__sigaction14")]
     pub fn sigaction(signum: ::c_int,
                      act: *const sigaction,
                      oldact: *mut sigaction) -> ::c_int;
     #[cfg_attr(all(target_os = "macos", target_arch = "x86"),
                link_name = "sigaltstack$UNIX2003")]
+    #[cfg_attr(target_os = "netbsd", link_name = "__sigaltstack14")]
     pub fn sigaltstack(ss: *const stack_t,
                        oss: *mut stack_t) -> ::c_int;
+    #[cfg_attr(all(target_os = "macos", target_arch ="x86"),
+               link_name = "sigwait$UNIX2003")]
+    pub fn sigwait(set: *const sigset_t,
+                   sig: *mut ::c_int) -> ::c_int;
 
     #[cfg_attr(target_os = "netbsd", link_name = "__utimes50")]
     pub fn utimes(filename: *const ::c_char,
@@ -540,10 +565,14 @@ extern {
                  dev: ::dev_t) -> ::c_int;
     #[cfg_attr(all(target_os = "macos", target_arch = "x86"),
                link_name = "writev$UNIX2003")]
-    pub fn writev(fd: ::c_int, iov: *const ::iovec, iovcnt: ::c_int) -> ::ssize_t;
+    pub fn writev(fd: ::c_int,
+                  iov: *const ::iovec,
+                  iovcnt: ::c_int) -> ::ssize_t;
     #[cfg_attr(all(target_os = "macos", target_arch = "x86"),
                link_name = "readv$UNIX2003")]
-    pub fn readv(fd: ::c_int, iov: *const ::iovec, iovcnt: ::c_int) -> ::ssize_t;
+    pub fn readv(fd: ::c_int,
+                 iov: *const ::iovec,
+                 iovcnt: ::c_int) -> ::ssize_t;
     pub fn uname(buf: *mut ::utsname) -> ::c_int;
     pub fn daemon(nochdir: ::c_int, noclose: ::c_int) -> ::c_int;
     pub fn gethostname(name: *mut ::c_char, len: ::size_t) -> ::c_int;
@@ -565,17 +594,33 @@ extern {
     pub fn putenv(string: *mut c_char) -> ::c_int;
     #[cfg_attr(all(target_os = "macos", target_arch = "x86"),
                link_name = "sendmsg$UNIX2003")]
-    pub fn sendmsg(fd: ::c_int, msg: *const msghdr, flags: ::c_int) -> ::ssize_t;
+    pub fn sendmsg(fd: ::c_int,
+                   msg: *const msghdr,
+                   flags: ::c_int) -> ::ssize_t;
     #[cfg_attr(all(target_os = "macos", target_arch = "x86"),
                link_name = "recvmsg$UNIX2003")]
     pub fn recvmsg(fd: ::c_int, msg: *mut msghdr, flags: ::c_int) -> ::ssize_t;
     #[cfg_attr(all(target_os = "macos", target_arch = "x86"),
                link_name = "poll$UNIX2003")]
     pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: ::c_int) -> ::c_int;
+    #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"),
+               link_name = "select$1050")]
+    #[cfg_attr(all(target_os = "macos", target_arch = "x86"),
+               link_name = "select$UNIX2003")]
+    #[cfg_attr(target_os = "netbsd", link_name = "__select50")]
+    pub fn select(nfds: ::c_int,
+                  readfs: *mut fd_set,
+                  writefds: *mut fd_set,
+                  errorfds: *mut fd_set,
+                  timeout: *mut timeval) -> ::c_int;
+    #[cfg_attr(target_os = "netbsd", link_name = "__setlocale50")]
+    pub fn setlocale(category: ::c_int,
+                     locale: *const ::c_char) -> *mut ::c_char;
+    pub fn localeconv() -> *mut lconv;
 }
 
-// TODO: get rid of this #[cfg(not(...))]
-#[cfg(not(target_os = "android"))]
+// TODO: get rid of this cfg(not(...))
+#[cfg(not(target_os = "android"))] // " if " -- appease style checker
 extern {
     pub fn getifaddrs(ifap: *mut *mut ifaddrs) -> ::c_int;
     pub fn freeifaddrs(ifa: *mut ifaddrs);
@@ -583,8 +628,8 @@ extern {
     #[cfg_attr(target_os = "netbsd", link_name = "__glob30")]
     pub fn glob(pattern: *const c_char,
                 flags: ::c_int,
-                errfunc: ::dox::Option<extern "C" fn(epath: *const c_char,
-                                                     errno: ::c_int) -> ::c_int>,
+                errfunc: Option<extern fn(epath: *const c_char,
+                                          errno: ::c_int) -> ::c_int>,
                 pglob: *mut glob_t) -> ::c_int;
     #[cfg_attr(target_os = "netbsd", link_name = "__globfree30")]
     pub fn globfree(pglob: *mut glob_t);
@@ -636,16 +681,6 @@ extern {
     pub fn sigdelset(set: *mut sigset_t, signum: ::c_int) -> ::c_int;
     #[cfg_attr(target_os = "netbsd", link_name = "__sigismember14")]
     pub fn sigismember(set: *const sigset_t, signum: ::c_int) -> ::c_int;
-    #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"),
-               link_name = "select$1050")]
-    #[cfg_attr(all(target_os = "macos", target_arch = "x86"),
-               link_name = "select$UNIX2003")]
-    #[cfg_attr(target_os = "netbsd", link_name = "__select50")]
-    pub fn select(nfds: ::c_int,
-                  readfs: *mut fd_set,
-                  writefds: *mut fd_set,
-                  errorfds: *mut fd_set,
-                  timeout: *mut timeval) -> ::c_int;
     #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"),
                link_name = "pselect$1050")]
     #[cfg_attr(all(target_os = "macos", target_arch = "x86"),
@@ -682,6 +717,7 @@ extern {
     pub fn mkstemp(template: *mut ::c_char) -> ::c_int;
     pub fn mkstemps(template: *mut ::c_char, suffixlen: ::c_int) -> ::c_int;
     pub fn mkdtemp(template: *mut ::c_char) -> *mut ::c_char;
+    pub fn futimes(fd: ::c_int, times: *const ::timeval) -> ::c_int;
 }
 
 cfg_if! {
@@ -703,6 +739,6 @@ cfg_if! {
         mod solaris;
         pub use self::solaris::*;
     } else {
-        // ...
+        // Unknown target_os
     }
 }
index 9cc469f11405be7ba703ae57073ccf2144cf41db..267060d422fed04841aa3ada481048ace9bf8393 100644 (file)
@@ -8,3 +8,5 @@ s! {
         pub sa_restorer: ::dox::Option<extern fn()>,
     }
 }
+
+pub const SYS_gettid: ::c_long = 224;
index 45fd961581a4dd99700e5846a92dabf2cda21cc8..5cf4f9d0c83202a10db2696645c5d8a62e7505a9 100644 (file)
@@ -8,3 +8,5 @@ s! {
         _restorer: *mut ::c_void,
     }
 }
+
+pub const SYS_gettid: ::c_long = 178;
index c081ecfaf028f11d92a230346aaeecd71b8f6d98..8963ddf9146720f255281bc4688909aded150e1a 100644 (file)
@@ -302,6 +302,13 @@ pub const SIG_UNBLOCK: ::c_int = 0x01;
 
 pub const RUSAGE_CHILDREN: ::c_int = -1;
 
+pub const LC_PAPER: ::c_int = 7;
+pub const LC_NAME: ::c_int = 8;
+pub const LC_ADDRESS: ::c_int = 9;
+pub const LC_TELEPHONE: ::c_int = 10;
+pub const LC_MEASUREMENT: ::c_int = 11;
+pub const LC_IDENTIFICATION: ::c_int = 12;
+
 pub const MAP_ANON: ::c_int = 0x0020;
 pub const MAP_ANONYMOUS: ::c_int = 0x0020;
 pub const MAP_GROWSDOWN: ::c_int = 0x0100;
@@ -543,6 +550,7 @@ pub const TIOCCONS: ::c_int = 0x541D;
 pub const RTLD_GLOBAL: ::c_int = 0x2;
 pub const RTLD_NOLOAD: ::c_int = 0x4;
 pub const RTLD_NOW: ::c_int = 0;
+pub const RTLD_DEFAULT: *mut ::c_void = -1isize as *mut ::c_void;
 
 f! {
     pub fn sigemptyset(set: *mut sigset_t) -> ::c_int {
@@ -571,11 +579,13 @@ f! {
         (*termios).c_cflag & ::CBAUD
     }
     pub fn cfsetispeed(termios: *mut ::termios, speed: ::speed_t) -> ::c_int {
-        (*termios).c_cflag = ((*termios).c_cflag & !::CBAUD) | (speed & ::CBAUD);
+        let cbaud = ::CBAUD;
+        (*termios).c_cflag = ((*termios).c_cflag & !cbaud) | (speed & cbaud);
         return 0
     }
     pub fn cfsetospeed(termios: *mut ::termios, speed: ::speed_t) -> ::c_int {
-        (*termios).c_cflag = ((*termios).c_cflag & !::CBAUD) | (speed & ::CBAUD);
+        let cbaud = ::CBAUD;
+        (*termios).c_cflag = ((*termios).c_cflag & !cbaud) | (speed & cbaud);
         return 0
     }
     pub fn tcgetattr(fd: ::c_int, termios: *mut ::termios) -> ::c_int {
@@ -655,8 +665,6 @@ cfg_if! {
         mod b64;
         pub use self::b64::*;
     } else {
-        // ...
+        // Unknown target_pointer_width
     }
 }
-
-
index d9baf722d1c8a8c83720bdd5371fa68c9c9400c2..728c1a38da805f20173be68e16a9fac5a679e3c5 100644 (file)
@@ -294,6 +294,13 @@ pub const EOWNERDEAD: ::c_int = 165;
 pub const ENOTRECOVERABLE: ::c_int = 166;
 pub const ERFKILL: ::c_int = 167;
 
+pub const LC_PAPER: ::c_int = 7;
+pub const LC_NAME: ::c_int = 8;
+pub const LC_ADDRESS: ::c_int = 9;
+pub const LC_TELEPHONE: ::c_int = 10;
+pub const LC_MEASUREMENT: ::c_int = 11;
+pub const LC_IDENTIFICATION: ::c_int = 12;
+
 pub const MAP_NORESERVE: ::c_int = 0x400;
 pub const MAP_ANON: ::c_int = 0x800;
 pub const MAP_ANONYMOUS: ::c_int = 0x800;
@@ -303,6 +310,7 @@ pub const MAP_EXECUTABLE: ::c_int = 0x4000;
 pub const MAP_LOCKED: ::c_int = 0x8000;
 pub const MAP_POPULATE: ::c_int = 0x10000;
 pub const MAP_NONBLOCK: ::c_int = 0x20000;
+pub const MAP_STACK: ::c_int = 0x40000;
 
 pub const SOCK_STREAM: ::c_int = 2;
 pub const SOCK_DGRAM: ::c_int = 1;
@@ -426,6 +434,8 @@ pub const PTRACE_SETFPXREGS: ::c_uint = 19;
 pub const PTRACE_GETREGS: ::c_uint = 12;
 pub const PTRACE_SETREGS: ::c_uint = 13;
 
+pub const MAP_HUGETLB: ::c_int = 0x080000;
+
 pub const EFD_NONBLOCK: ::c_int = 0x80;
 
 pub const F_GETLK: ::c_int = 14;
@@ -472,6 +482,8 @@ pub const RTLD_DEEPBIND: ::c_int = 0x10;
 pub const RTLD_GLOBAL: ::c_int = 0x4;
 pub const RTLD_NOLOAD: ::c_int = 0x8;
 
+pub const SYS_gettid: ::c_long = 4222;   // Valid for O32
+
 extern {
     pub fn sysctl(name: *mut ::c_int,
                   namelen: ::c_int,
@@ -485,8 +497,9 @@ extern {
                      sz: ::c_int) -> ::c_int;
     pub fn glob64(pattern: *const ::c_char,
                   flags: ::c_int,
-                  errfunc: ::dox::Option<extern "C" fn(epath: *const ::c_char,
-                                                       errno: ::c_int) -> ::c_int>,
+                  errfunc: ::dox::Option<extern fn(epath: *const ::c_char,
+                                                   errno: ::c_int)
+                                                   -> ::c_int>,
                   pglob: *mut glob64_t) -> ::c_int;
     pub fn globfree64(pglob: *mut glob64_t);
     pub fn getnameinfo(sa: *const ::sockaddr,
index 510a33d0bafd4d99e6efe5d7fb209fb6c495f61b..374e0019d7774f0148389f61b02ae97574557b61 100644 (file)
@@ -126,13 +126,6 @@ s! {
         __f_spare: [::c_int; 6],
     }
 
-    pub struct sockaddr_nl {
-        pub nl_family: ::sa_family_t,
-        nl_pad: ::c_ushort,
-        pub nl_pid: u32,
-        pub nl_groups: u32
-    }
-
     pub struct dqblk {
         pub dqb_bhardlimit: ::uint64_t,
         pub dqb_bsoftlimit: ::uint64_t,
@@ -185,31 +178,6 @@ s! {
     }
 }
 
-f! {
-    pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () {
-        for slot in cpuset.bits.iter_mut() {
-            *slot = 0;
-        }
-    }
-
-    pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () {
-        let size = mem::size_of_val(&cpuset.bits[0]);
-        let (idx, offset) = (cpu / size, cpu % size);
-        cpuset.bits[idx] |= 1 << offset;
-        ()
-    }
-
-    pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool {
-        let size = mem::size_of_val(&cpuset.bits[0]);
-        let (idx, offset) = (cpu / size, cpu % size);
-        0 != (cpuset.bits[idx] & (1 << offset))
-    }
-
-    pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool {
-        set1.bits == set2.bits
-    }
-}
-
 pub const FILENAME_MAX: ::c_uint = 4096;
 pub const L_tmpnam: ::c_uint = 20;
 pub const _PC_NAME_MAX: ::c_int = 3;
@@ -345,13 +313,8 @@ pub const RTLD_DEFAULT: *mut ::c_void = 0i64 as *mut ::c_void;
 pub const RTLD_NODELETE: ::c_int = 0x1000;
 pub const RTLD_NOW: ::c_int = 0x2;
 
-#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-pub const MAP_32BIT: ::c_int = 0x0040;
-
 pub const TCP_MD5SIG: ::c_int = 14;
 
-pub const F_DUPFD_CLOEXEC: ::c_int = 1030;
-
 pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
     __align: [],
     size: [0; __SIZEOF_PTHREAD_MUTEX_T],
@@ -396,11 +359,6 @@ pub const SHM_UNLOCK: ::c_int = 12;
 pub const SHM_HUGETLB: ::c_int = 0o4000;
 pub const SHM_NORESERVE: ::c_int = 0o10000;
 
-pub const MS_RELATIME: ::c_ulong = 0x200000;
-pub const MS_KERNMOUNT: ::c_ulong = 0x400000;
-pub const MS_I_VERSION: ::c_ulong = 0x800000;
-pub const MS_STRICTATIME: ::c_ulong = 0x01000000;
-
 pub const EPOLLRDHUP: ::c_int = 0x2000;
 pub const EPOLLONESHOT: ::c_int = 0x40000000;
 
@@ -413,20 +371,45 @@ pub const EFD_SEMAPHORE: ::c_int = 0x1;
 
 pub const NCCS: usize = 32;
 
-pub const CLONE_NEWUTS: ::c_int = 0x04000000;
-pub const CLONE_NEWIPC: ::c_int = 0x08000000;
-pub const CLONE_NEWUSER: ::c_int = 0x10000000;
-pub const CLONE_NEWPID: ::c_int = 0x20000000;
-pub const CLONE_NEWNET: ::c_int = 0x40000000;
-pub const CLONE_IO: ::c_int = 0x80000000;
+pub const AF_NETLINK: ::c_int = 16;
+
+f! {
+    pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () {
+        for slot in cpuset.bits.iter_mut() {
+            *slot = 0;
+        }
+    }
+
+    pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () {
+        let size = mem::size_of_val(&cpuset.bits[0]);
+        let (idx, offset) = (cpu / size, cpu % size);
+        cpuset.bits[idx] |= 1 << offset;
+        ()
+    }
 
+    pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool {
+        let size = mem::size_of_val(&cpuset.bits[0]);
+        let (idx, offset) = (cpu / size, cpu % size);
+        0 != (cpuset.bits[idx] & (1 << offset))
+    }
+
+    pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool {
+        set1.bits == set2.bits
+    }
+}
+
+#[link(name = "util")]
 extern {
     pub fn shm_open(name: *const c_char, oflag: ::c_int,
                     mode: mode_t) -> ::c_int;
     pub fn shmget(key: ::key_t, size: ::size_t, shmflg: ::c_int) -> ::c_int;
-    pub fn shmat(shmid: ::c_int, shmaddr: *const ::c_void, shmflg: ::c_int) -> *mut ::c_void;
+    pub fn shmat(shmid: ::c_int,
+                 shmaddr: *const ::c_void,
+                 shmflg: ::c_int) -> *mut ::c_void;
     pub fn shmdt(shmaddr: *const ::c_void) -> ::c_int;
-    pub fn shmctl(shmid: ::c_int, cmd: ::c_int, buf: *mut ::shmid_ds) -> ::c_int;
+    pub fn shmctl(shmid: ::c_int,
+                  cmd: ::c_int,
+                  buf: *mut ::shmid_ds) -> ::c_int;
     pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int)
                     -> ::c_int;
     pub fn __errno_location() -> *mut ::c_int;
@@ -541,7 +524,23 @@ extern {
     pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int;
     pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int;
     pub fn mkostemp(template: *mut ::c_char, flags: ::c_int) -> ::c_int;
-    pub fn mkostemps(template: *mut ::c_char, suffixlen: ::c_int, flags: ::c_int) -> ::c_int;
+    pub fn mkostemps(template: *mut ::c_char,
+                     suffixlen: ::c_int,
+                     flags: ::c_int) -> ::c_int;
+    pub fn sigtimedwait(set: *const sigset_t,
+                        info: *mut siginfo_t,
+                        timeout: *const ::timespec) -> ::c_int;
+    pub fn sigwaitinfo(set: *const sigset_t,
+                       info: *mut siginfo_t) -> ::c_int;
+    pub fn openpty(amaster: *mut ::c_int,
+                   aslave: *mut ::c_int,
+                   name: *mut ::c_char,
+                   termp: *const termios,
+                   winp: *const ::winsize) -> ::c_int;
+    pub fn forkpty(amaster: *mut ::c_int,
+                   name: *mut ::c_char,
+                   termp: *const termios,
+                   winp: *const ::winsize) -> ::pid_t;
 }
 
 cfg_if! {
@@ -557,5 +556,3 @@ cfg_if! {
         pub use self::other::*;
     }
 }
-
-pub const AF_NETLINK: ::c_int = 16;
index b59647f94df2275215ad2dc71e4acd10b6931f05..b87913f3692dc1d3002a335a5ecda17fce2c662c 100644 (file)
@@ -1,6 +1,89 @@
 pub type c_char = u8;
 pub type wchar_t = u32;
 
+s! {
+    pub struct stat {
+        pub st_dev: ::dev_t,
+        __st_dev_padding: ::c_int,
+        __st_ino_truncated: ::c_long,
+        pub st_mode: ::mode_t,
+        pub st_nlink: ::nlink_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        pub st_rdev: ::dev_t,
+        __st_rdev_padding: ::c_int,
+        pub st_size: ::off_t,
+        pub st_blksize: ::blksize_t,
+        pub st_blocks: ::blkcnt_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        pub st_ino: ::ino_t,
+    }
+
+    pub struct stat64 {
+        pub st_dev: ::dev_t,
+        __st_dev_padding: ::c_int,
+        __st_ino_truncated: ::c_long,
+        pub st_mode: ::mode_t,
+        pub st_nlink: ::nlink_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        pub st_rdev: ::dev_t,
+        __st_rdev_padding: ::c_int,
+        pub st_size: ::off_t,
+        pub st_blksize: ::blksize_t,
+        pub st_blocks: ::blkcnt_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        pub st_ino: ::ino_t,
+    }
+
+    pub struct stack_t {
+        pub ss_sp: *mut ::c_void,
+        pub ss_flags: ::c_int,
+        pub ss_size: ::size_t
+    }
+
+    pub struct shmid_ds {
+        pub shm_perm: ::ipc_perm,
+        pub shm_segsz: ::size_t,
+        pub shm_atime: ::time_t,
+        __unused1: ::c_int,
+        pub shm_dtime: ::time_t,
+        __unused2: ::c_int,
+        pub shm_ctime: ::time_t,
+        __unused3: ::c_int,
+        pub shm_cpid: ::pid_t,
+        pub shm_lpid: ::pid_t,
+        pub shm_nattch: ::c_ulong,
+        __pad1: ::c_ulong,
+        __pad2: ::c_ulong,
+    }
+
+    pub struct statfs {
+        pub f_type: ::c_ulong,
+        pub f_bsize: ::c_ulong,
+        pub f_blocks: ::fsblkcnt_t,
+        pub f_bfree: ::fsblkcnt_t,
+        pub f_bavail: ::fsblkcnt_t,
+        pub f_files: ::fsfilcnt_t,
+        pub f_ffree: ::fsfilcnt_t,
+        pub f_fsid: ::fsid_t,
+        pub f_namelen: ::c_ulong,
+        pub f_frsize: ::c_ulong,
+        pub f_flags: ::c_ulong,
+        pub f_spare: [::c_ulong; 4],
+    }
+}
+
 pub const O_DIRECT: ::c_int = 0x4000;
 pub const O_DIRECTORY: ::c_int = 0x10000;
 pub const O_NOFOLLOW: ::c_int = 0x20000;
@@ -220,79 +303,5 @@ pub const TIOCMSET: ::c_ulong = 0x5418;
 pub const FIONREAD: ::c_ulong = 0x541B;
 pub const TIOCCONS: ::c_ulong = 0x541D;
 
-s! {
-    pub struct stat {
-        pub st_dev: ::dev_t,
-        __st_dev_padding: ::c_int,
-        __st_ino_truncated: ::c_long,
-        pub st_mode: ::mode_t,
-        pub st_nlink: ::nlink_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        pub st_rdev: ::dev_t,
-        __st_rdev_padding: ::c_int,
-        pub st_size: ::off_t,
-        pub st_blksize: ::blksize_t,
-        pub st_blocks: ::blkcnt_t,
-        pub st_atim: ::timespec,
-        pub st_mtim: ::timespec,
-        pub st_ctim: ::timespec,
-        pub st_ino: ::ino_t,
-    }
-
-    pub struct stat64 {
-        pub st_dev: ::dev_t,
-        __st_dev_padding: ::c_int,
-        __st_ino_truncated: ::c_long,
-        pub st_mode: ::mode_t,
-        pub st_nlink: ::nlink_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        pub st_rdev: ::dev_t,
-        __st_rdev_padding: ::c_int,
-        pub st_size: ::off_t,
-        pub st_blksize: ::blksize_t,
-        pub st_blocks: ::blkcnt_t,
-        pub st_atim: ::timespec,
-        pub st_mtim: ::timespec,
-        pub st_ctim: ::timespec,
-        pub st_ino: ::ino_t,
-    }
-
-    pub struct stack_t {
-        pub ss_sp: *mut ::c_void,
-        pub ss_flags: ::c_int,
-        pub ss_size: ::size_t
-    }
-
-    pub struct shmid_ds {
-        pub shm_perm: ::ipc_perm,
-        pub shm_segsz: ::size_t,
-        pub shm_atime: ::time_t,
-        __unused1: ::c_int,
-        pub shm_dtime: ::time_t,
-        __unused2: ::c_int,
-        pub shm_ctime: ::time_t,
-        __unused3: ::c_int,
-        pub shm_cpid: ::pid_t,
-        pub shm_lpid: ::pid_t,
-        pub shm_nattch: ::c_ulong,
-        __pad1: ::c_ulong,
-        __pad2: ::c_ulong,
-    }
-
-    pub struct statfs {
-        pub f_type: ::c_ulong,
-        pub f_bsize: ::c_ulong,
-        pub f_blocks: ::fsblkcnt_t,
-        pub f_bfree: ::fsblkcnt_t,
-        pub f_bavail: ::fsblkcnt_t,
-        pub f_files: ::fsfilcnt_t,
-        pub f_ffree: ::fsfilcnt_t,
-        pub f_fsid: ::fsid_t,
-        pub f_namelen: ::c_ulong,
-        pub f_frsize: ::c_ulong,
-        pub f_flags: ::c_ulong,
-        pub f_spare: [::c_ulong; 4],
-    }
-}
+pub const SYS_gettid: ::c_long = 224;
+pub const SYS_perf_event_open: ::c_long = 364;
index b59647f94df2275215ad2dc71e4acd10b6931f05..13790ef4dfcefa2b3a15e49121e537900a98d1ca 100644 (file)
@@ -1,6 +1,89 @@
 pub type c_char = u8;
 pub type wchar_t = u32;
 
+s! {
+    pub struct stat {
+        pub st_dev: ::dev_t,
+        __st_dev_padding: ::c_int,
+        __st_ino_truncated: ::c_long,
+        pub st_mode: ::mode_t,
+        pub st_nlink: ::nlink_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        pub st_rdev: ::dev_t,
+        __st_rdev_padding: ::c_int,
+        pub st_size: ::off_t,
+        pub st_blksize: ::blksize_t,
+        pub st_blocks: ::blkcnt_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        pub st_ino: ::ino_t,
+    }
+
+    pub struct stat64 {
+        pub st_dev: ::dev_t,
+        __st_dev_padding: ::c_int,
+        __st_ino_truncated: ::c_long,
+        pub st_mode: ::mode_t,
+        pub st_nlink: ::nlink_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        pub st_rdev: ::dev_t,
+        __st_rdev_padding: ::c_int,
+        pub st_size: ::off_t,
+        pub st_blksize: ::blksize_t,
+        pub st_blocks: ::blkcnt_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        pub st_ino: ::ino_t,
+    }
+
+    pub struct stack_t {
+        pub ss_sp: *mut ::c_void,
+        pub ss_flags: ::c_int,
+        pub ss_size: ::size_t
+    }
+
+    pub struct shmid_ds {
+        pub shm_perm: ::ipc_perm,
+        pub shm_segsz: ::size_t,
+        pub shm_atime: ::time_t,
+        __unused1: ::c_int,
+        pub shm_dtime: ::time_t,
+        __unused2: ::c_int,
+        pub shm_ctime: ::time_t,
+        __unused3: ::c_int,
+        pub shm_cpid: ::pid_t,
+        pub shm_lpid: ::pid_t,
+        pub shm_nattch: ::c_ulong,
+        __pad1: ::c_ulong,
+        __pad2: ::c_ulong,
+    }
+
+    pub struct statfs {
+        pub f_type: ::c_ulong,
+        pub f_bsize: ::c_ulong,
+        pub f_blocks: ::fsblkcnt_t,
+        pub f_bfree: ::fsblkcnt_t,
+        pub f_bavail: ::fsblkcnt_t,
+        pub f_files: ::fsfilcnt_t,
+        pub f_ffree: ::fsfilcnt_t,
+        pub f_fsid: ::fsid_t,
+        pub f_namelen: ::c_ulong,
+        pub f_frsize: ::c_ulong,
+        pub f_flags: ::c_ulong,
+        pub f_spare: [::c_ulong; 4],
+    }
+}
+
 pub const O_DIRECT: ::c_int = 0x4000;
 pub const O_DIRECTORY: ::c_int = 0x10000;
 pub const O_NOFOLLOW: ::c_int = 0x20000;
@@ -220,79 +303,4 @@ pub const TIOCMSET: ::c_ulong = 0x5418;
 pub const FIONREAD: ::c_ulong = 0x541B;
 pub const TIOCCONS: ::c_ulong = 0x541D;
 
-s! {
-    pub struct stat {
-        pub st_dev: ::dev_t,
-        __st_dev_padding: ::c_int,
-        __st_ino_truncated: ::c_long,
-        pub st_mode: ::mode_t,
-        pub st_nlink: ::nlink_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        pub st_rdev: ::dev_t,
-        __st_rdev_padding: ::c_int,
-        pub st_size: ::off_t,
-        pub st_blksize: ::blksize_t,
-        pub st_blocks: ::blkcnt_t,
-        pub st_atim: ::timespec,
-        pub st_mtim: ::timespec,
-        pub st_ctim: ::timespec,
-        pub st_ino: ::ino_t,
-    }
-
-    pub struct stat64 {
-        pub st_dev: ::dev_t,
-        __st_dev_padding: ::c_int,
-        __st_ino_truncated: ::c_long,
-        pub st_mode: ::mode_t,
-        pub st_nlink: ::nlink_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        pub st_rdev: ::dev_t,
-        __st_rdev_padding: ::c_int,
-        pub st_size: ::off_t,
-        pub st_blksize: ::blksize_t,
-        pub st_blocks: ::blkcnt_t,
-        pub st_atim: ::timespec,
-        pub st_mtim: ::timespec,
-        pub st_ctim: ::timespec,
-        pub st_ino: ::ino_t,
-    }
-
-    pub struct stack_t {
-        pub ss_sp: *mut ::c_void,
-        pub ss_flags: ::c_int,
-        pub ss_size: ::size_t
-    }
-
-    pub struct shmid_ds {
-        pub shm_perm: ::ipc_perm,
-        pub shm_segsz: ::size_t,
-        pub shm_atime: ::time_t,
-        __unused1: ::c_int,
-        pub shm_dtime: ::time_t,
-        __unused2: ::c_int,
-        pub shm_ctime: ::time_t,
-        __unused3: ::c_int,
-        pub shm_cpid: ::pid_t,
-        pub shm_lpid: ::pid_t,
-        pub shm_nattch: ::c_ulong,
-        __pad1: ::c_ulong,
-        __pad2: ::c_ulong,
-    }
-
-    pub struct statfs {
-        pub f_type: ::c_ulong,
-        pub f_bsize: ::c_ulong,
-        pub f_blocks: ::fsblkcnt_t,
-        pub f_bfree: ::fsblkcnt_t,
-        pub f_bavail: ::fsblkcnt_t,
-        pub f_files: ::fsfilcnt_t,
-        pub f_ffree: ::fsfilcnt_t,
-        pub f_fsid: ::fsid_t,
-        pub f_namelen: ::c_ulong,
-        pub f_frsize: ::c_ulong,
-        pub f_flags: ::c_ulong,
-        pub f_spare: [::c_ulong; 4],
-    }
-}
+pub const SYS_gettid: ::c_long = 224; // Valid for arm (32-bit) and x86 (32-bit)
index 806bde31beeb8e95d4fba8ac32bdf34279622c2b..afe806acec6fefd1a8a3810456c8785ff1b1af63 100644 (file)
@@ -301,3 +301,6 @@ pub const TIOCMBIC: ::c_ulong = 0x741C;
 pub const TIOCMSET: ::c_ulong = 0x741D;
 pub const FIONREAD: ::c_ulong = 0x467F;
 pub const TIOCCONS: ::c_ulong = 0x80047478;
+
+pub const SYS_gettid: ::c_long = 4222;   // Valid for O32
+pub const SYS_perf_event_open: ::c_long = 4333;  // Valid for O32
index dfbc2b54811be4a18f400b7407ae505103506365..ad74e8816d17b7f4547cc40d011117b4fde5bf2d 100644 (file)
@@ -2,9 +2,6 @@ pub type c_long = i32;
 pub type c_ulong = u32;
 pub type nlink_t = u32;
 
-pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32;
-pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24;
-
 s! {
     pub struct pthread_attr_t {
         __size: [u32; 9]
@@ -25,6 +22,9 @@ s! {
     }
 }
 
+pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32;
+pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24;
+
 cfg_if! {
     if #[cfg(any(target_arch = "x86"))] {
         mod x86;
@@ -38,5 +38,7 @@ cfg_if! {
     } else if #[cfg(any(target_arch = "asmjs"))] {
         mod asmjs;
         pub use self::asmjs::*;
-    } else { }
+    } else {
+        // Unknown target_arch
+    }
 }
index e34997a47233b1d903bf1ca5c6126a0ac1e3cc8e..2785547820c93fddc134f9805b04802cf37857e7 100644 (file)
@@ -1,6 +1,89 @@
 pub type c_char = i8;
 pub type wchar_t = i32;
 
+s! {
+    pub struct stat {
+        pub st_dev: ::dev_t,
+        __st_dev_padding: ::c_int,
+        __st_ino_truncated: ::c_long,
+        pub st_mode: ::mode_t,
+        pub st_nlink: ::nlink_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        pub st_rdev: ::dev_t,
+        __st_rdev_padding: ::c_int,
+        pub st_size: ::off_t,
+        pub st_blksize: ::blksize_t,
+        pub st_blocks: ::blkcnt_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        pub st_ino: ::ino_t,
+    }
+
+    pub struct stat64 {
+        pub st_dev: ::dev_t,
+        __st_dev_padding: ::c_int,
+        __st_ino_truncated: ::c_long,
+        pub st_mode: ::mode_t,
+        pub st_nlink: ::nlink_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        pub st_rdev: ::dev_t,
+        __st_rdev_padding: ::c_int,
+        pub st_size: ::off_t,
+        pub st_blksize: ::blksize_t,
+        pub st_blocks: ::blkcnt_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        pub st_ino: ::ino_t,
+    }
+
+    pub struct stack_t {
+        pub ss_sp: *mut ::c_void,
+        pub ss_flags: ::c_int,
+        pub ss_size: ::size_t
+    }
+
+    pub struct shmid_ds {
+        pub shm_perm: ::ipc_perm,
+        pub shm_segsz: ::size_t,
+        pub shm_atime: ::time_t,
+        __unused1: ::c_int,
+        pub shm_dtime: ::time_t,
+        __unused2: ::c_int,
+        pub shm_ctime: ::time_t,
+        __unused3: ::c_int,
+        pub shm_cpid: ::pid_t,
+        pub shm_lpid: ::pid_t,
+        pub shm_nattch: ::c_ulong,
+        __pad1: ::c_ulong,
+        __pad2: ::c_ulong,
+    }
+
+    pub struct statfs {
+        pub f_type: ::c_ulong,
+        pub f_bsize: ::c_ulong,
+        pub f_blocks: ::fsblkcnt_t,
+        pub f_bfree: ::fsblkcnt_t,
+        pub f_bavail: ::fsblkcnt_t,
+        pub f_files: ::fsfilcnt_t,
+        pub f_ffree: ::fsfilcnt_t,
+        pub f_fsid: ::fsid_t,
+        pub f_namelen: ::c_ulong,
+        pub f_frsize: ::c_ulong,
+        pub f_flags: ::c_ulong,
+        pub f_spare: [::c_ulong; 4],
+    }
+}
+
 pub const O_DIRECT: ::c_int = 0x4000;
 pub const O_DIRECTORY: ::c_int = 0x10000;
 pub const O_NOFOLLOW: ::c_int = 0x20000;
@@ -174,6 +257,7 @@ pub const SIG_UNBLOCK: ::c_int = 0x01;
 pub const EXTPROC: ::tcflag_t = 0x00010000;
 
 pub const MAP_HUGETLB: ::c_int = 0x040000;
+pub const MAP_32BIT: ::c_int = 0x0040;
 
 pub const F_GETLK: ::c_int = 12;
 pub const F_GETOWN: ::c_int = 9;
@@ -220,79 +304,5 @@ pub const TIOCMSET: ::c_ulong = 0x5418;
 pub const FIONREAD: ::c_ulong = 0x541B;
 pub const TIOCCONS: ::c_ulong = 0x541D;
 
-s! {
-    pub struct stat {
-        pub st_dev: ::dev_t,
-        __st_dev_padding: ::c_int,
-        __st_ino_truncated: ::c_long,
-        pub st_mode: ::mode_t,
-        pub st_nlink: ::nlink_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        pub st_rdev: ::dev_t,
-        __st_rdev_padding: ::c_int,
-        pub st_size: ::off_t,
-        pub st_blksize: ::blksize_t,
-        pub st_blocks: ::blkcnt_t,
-        pub st_atim: ::timespec,
-        pub st_mtim: ::timespec,
-        pub st_ctim: ::timespec,
-        pub st_ino: ::ino_t,
-    }
-
-    pub struct stat64 {
-        pub st_dev: ::dev_t,
-        __st_dev_padding: ::c_int,
-        __st_ino_truncated: ::c_long,
-        pub st_mode: ::mode_t,
-        pub st_nlink: ::nlink_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        pub st_rdev: ::dev_t,
-        __st_rdev_padding: ::c_int,
-        pub st_size: ::off_t,
-        pub st_blksize: ::blksize_t,
-        pub st_blocks: ::blkcnt_t,
-        pub st_atim: ::timespec,
-        pub st_mtim: ::timespec,
-        pub st_ctim: ::timespec,
-        pub st_ino: ::ino_t,
-    }
-
-    pub struct stack_t {
-        pub ss_sp: *mut ::c_void,
-        pub ss_flags: ::c_int,
-        pub ss_size: ::size_t
-    }
-
-    pub struct shmid_ds {
-        pub shm_perm: ::ipc_perm,
-        pub shm_segsz: ::size_t,
-        pub shm_atime: ::time_t,
-        __unused1: ::c_int,
-        pub shm_dtime: ::time_t,
-        __unused2: ::c_int,
-        pub shm_ctime: ::time_t,
-        __unused3: ::c_int,
-        pub shm_cpid: ::pid_t,
-        pub shm_lpid: ::pid_t,
-        pub shm_nattch: ::c_ulong,
-        __pad1: ::c_ulong,
-        __pad2: ::c_ulong,
-    }
-
-    pub struct statfs {
-        pub f_type: ::c_ulong,
-        pub f_bsize: ::c_ulong,
-        pub f_blocks: ::fsblkcnt_t,
-        pub f_bfree: ::fsblkcnt_t,
-        pub f_bavail: ::fsblkcnt_t,
-        pub f_files: ::fsfilcnt_t,
-        pub f_ffree: ::fsfilcnt_t,
-        pub f_fsid: ::fsid_t,
-        pub f_namelen: ::c_ulong,
-        pub f_frsize: ::c_ulong,
-        pub f_flags: ::c_ulong,
-        pub f_spare: [::c_ulong; 4],
-    }
-}
+pub const SYS_gettid: ::c_long = 224;
+pub const SYS_perf_event_open: ::c_long = 336;
diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs
new file mode 100644 (file)
index 0000000..51db30f
--- /dev/null
@@ -0,0 +1 @@
+pub const SYS_perf_event_open: ::c_long = 241;
index 0251e83b05c5ecb3b6693c9257a87aba18288137..6946b66eaa6981f7d786d9c2ef7e2f2a28ab9c20 100644 (file)
@@ -4,6 +4,104 @@ pub type c_long = i64;
 pub type c_ulong = u64;
 pub type nlink_t = u64;
 
+s! {
+    pub struct stat {
+        pub st_dev: ::dev_t,
+        pub st_ino: ::ino_t,
+        pub st_nlink: ::nlink_t,
+        pub st_mode: ::mode_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        __pad0: ::c_int,
+        pub st_rdev: ::dev_t,
+        pub st_size: ::off_t,
+        pub st_blksize: ::blksize_t,
+        pub st_blocks: ::blkcnt_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        __unused: [::c_long; 3],
+    }
+
+    pub struct stat64 {
+        pub st_dev: ::dev_t,
+        pub st_ino: ::ino64_t,
+        pub st_nlink: ::nlink_t,
+        pub st_mode: ::mode_t,
+        pub st_uid: ::uid_t,
+        pub st_gid: ::gid_t,
+        __pad0: ::c_int,
+        pub st_rdev: ::dev_t,
+        pub st_size: ::off_t,
+        pub st_blksize: ::blksize_t,
+        pub st_blocks: ::blkcnt64_t,
+        pub st_atime: ::time_t,
+        pub st_atime_nsec: ::c_long,
+        pub st_mtime: ::time_t,
+        pub st_mtime_nsec: ::c_long,
+        pub st_ctime: ::time_t,
+        pub st_ctime_nsec: ::c_long,
+        __reserved: [::c_long; 3],
+    }
+
+    pub struct stack_t {
+        pub ss_sp: *mut ::c_void,
+        pub ss_flags: ::c_int,
+        pub ss_size: ::size_t
+    }
+
+    pub struct pthread_attr_t {
+        __size: [u64; 7]
+    }
+
+    pub struct sigset_t {
+        __val: [::c_ulong; 16],
+    }
+
+    pub struct shmid_ds {
+        pub shm_perm: ::ipc_perm,
+        pub shm_segsz: ::size_t,
+        pub shm_atime: ::time_t,
+        pub shm_dtime: ::time_t,
+        pub shm_ctime: ::time_t,
+        pub shm_cpid: ::pid_t,
+        pub shm_lpid: ::pid_t,
+        pub shm_nattch: ::c_ulong,
+        __pad1: ::c_ulong,
+        __pad2: ::c_ulong,
+    }
+
+    pub struct statfs {
+        pub f_type: ::c_ulong,
+        pub f_bsize: ::c_ulong,
+        pub f_blocks: ::fsblkcnt_t,
+        pub f_bfree: ::fsblkcnt_t,
+        pub f_bavail: ::fsblkcnt_t,
+        pub f_files: ::fsfilcnt_t,
+        pub f_ffree: ::fsfilcnt_t,
+        pub f_fsid: ::fsid_t,
+        pub f_namelen: ::c_ulong,
+        pub f_frsize: ::c_ulong,
+        pub f_flags: ::c_ulong,
+        pub f_spare: [::c_ulong; 4],
+    }
+
+    pub struct msghdr {
+        pub msg_name: *mut ::c_void,
+        pub msg_namelen: ::socklen_t,
+        pub msg_iov: *mut ::iovec,
+        pub msg_iovlen: ::c_int,
+        __pad1: ::c_int,
+        pub msg_control: *mut ::c_void,
+        pub msg_controllen: ::socklen_t,
+        __pad2: ::socklen_t,
+        pub msg_flags: ::c_int,
+    }
+}
+
 pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56;
 pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40;
 
@@ -41,6 +139,7 @@ pub const MAP_NORESERVE: ::c_int = 0x04000;
 pub const MAP_POPULATE: ::c_int = 0x08000;
 pub const MAP_NONBLOCK: ::c_int = 0x010000;
 pub const MAP_STACK: ::c_int = 0x020000;
+pub const MAP_32BIT: ::c_int = 0x0040;
 
 pub const SOCK_STREAM: ::c_int = 1;
 pub const SOCK_DGRAM: ::c_int = 2;
@@ -226,100 +325,17 @@ pub const TIOCMSET: ::c_ulong = 0x5418;
 pub const FIONREAD: ::c_ulong = 0x541B;
 pub const TIOCCONS: ::c_ulong = 0x541D;
 
-s! {
-    pub struct stat {
-        pub st_dev: ::dev_t,
-        pub st_ino: ::ino_t,
-        pub st_nlink: ::nlink_t,
-        pub st_mode: ::mode_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        __pad0: ::c_int,
-        pub st_rdev: ::dev_t,
-        pub st_size: ::off_t,
-        pub st_blksize: ::blksize_t,
-        pub st_blocks: ::blkcnt_t,
-        pub st_atime: ::time_t,
-        pub st_atime_nsec: ::c_long,
-        pub st_mtime: ::time_t,
-        pub st_mtime_nsec: ::c_long,
-        pub st_ctime: ::time_t,
-        pub st_ctime_nsec: ::c_long,
-        __unused: [::c_long; 3],
-    }
-
-    pub struct stat64 {
-        pub st_dev: ::dev_t,
-        pub st_ino: ::ino64_t,
-        pub st_nlink: ::nlink_t,
-        pub st_mode: ::mode_t,
-        pub st_uid: ::uid_t,
-        pub st_gid: ::gid_t,
-        __pad0: ::c_int,
-        pub st_rdev: ::dev_t,
-        pub st_size: ::off_t,
-        pub st_blksize: ::blksize_t,
-        pub st_blocks: ::blkcnt64_t,
-        pub st_atime: ::time_t,
-        pub st_atime_nsec: ::c_long,
-        pub st_mtime: ::time_t,
-        pub st_mtime_nsec: ::c_long,
-        pub st_ctime: ::time_t,
-        pub st_ctime_nsec: ::c_long,
-        __reserved: [::c_long; 3],
-    }
-
-    pub struct stack_t {
-        pub ss_sp: *mut ::c_void,
-        pub ss_flags: ::c_int,
-        pub ss_size: ::size_t
-    }
-
-    pub struct pthread_attr_t {
-        __size: [u64; 7]
-    }
-
-    pub struct sigset_t {
-        __val: [::c_ulong; 16],
-    }
-
-    pub struct shmid_ds {
-        pub shm_perm: ::ipc_perm,
-        pub shm_segsz: ::size_t,
-        pub shm_atime: ::time_t,
-        pub shm_dtime: ::time_t,
-        pub shm_ctime: ::time_t,
-        pub shm_cpid: ::pid_t,
-        pub shm_lpid: ::pid_t,
-        pub shm_nattch: ::c_ulong,
-        __pad1: ::c_ulong,
-        __pad2: ::c_ulong,
-    }
-
-    pub struct statfs {
-        pub f_type: ::c_ulong,
-        pub f_bsize: ::c_ulong,
-        pub f_blocks: ::fsblkcnt_t,
-        pub f_bfree: ::fsblkcnt_t,
-        pub f_bavail: ::fsblkcnt_t,
-        pub f_files: ::fsfilcnt_t,
-        pub f_ffree: ::fsfilcnt_t,
-        pub f_fsid: ::fsid_t,
-        pub f_namelen: ::c_ulong,
-        pub f_frsize: ::c_ulong,
-        pub f_flags: ::c_ulong,
-        pub f_spare: [::c_ulong; 4],
-    }
-
-    pub struct msghdr {
-        pub msg_name: *mut ::c_void,
-        pub msg_namelen: ::socklen_t,
-        pub msg_iov: *mut ::iovec,
-        pub msg_iovlen: ::c_int,
-        __pad1: ::c_int,
-        pub msg_control: *mut ::c_void,
-        pub msg_controllen: ::socklen_t,
-        __pad2: ::socklen_t,
-        pub msg_flags: ::c_int,
+cfg_if! {
+    if #[cfg(target_arch = "aarch64")] {
+        mod aarch64;
+        pub use self::aarch64::*;
+    } else if #[cfg(any(target_arch = "powerpc64"))] {
+        mod powerpc64;
+        pub use self::powerpc64::*;
+    } else if #[cfg(any(target_arch = "x86_64"))] {
+        mod x86_64;
+        pub use self::x86_64::*;
+    } else {
+        // Unknown target_arch
     }
 }
diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs
new file mode 100644 (file)
index 0000000..bb81863
--- /dev/null
@@ -0,0 +1 @@
+pub const SYS_perf_event_open: ::c_long = 319;
diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs
new file mode 100644 (file)
index 0000000..168e242
--- /dev/null
@@ -0,0 +1,3 @@
+pub const SYS_gettid: ::c_long = 186;
+
+pub const SYS_perf_event_open: ::c_long = 298;
index 1cbfd8f25e57a1e0d03b7e743d8be9318c24e74d..cd04d851a5462cb720d9a71cbf36f57169a762c6 100644 (file)
@@ -148,6 +148,11 @@ pub const TIOCINQ: ::c_ulong = ::FIONREAD;
 pub const RTLD_GLOBAL: ::c_int = 0x100;
 pub const RTLD_NOLOAD: ::c_int = 0x4;
 
+// TODO(#247) Temporarily musl-specific (available since musl 0.9.12 / Linux
+// kernel 3.10).  See also notbsd/mod.rs
+pub const CLOCK_SGI_CYCLE: ::clockid_t = 10;
+pub const CLOCK_TAI: ::clockid_t = 11;
+
 extern {
     pub fn getnameinfo(sa: *const ::sockaddr,
                        salen: ::socklen_t,
index aabe8edbe76cb6f246a0c67bfcc87d1afc896e67..7de4b8b990ea152591014b4460ccb662ca1eb837 100644 (file)
@@ -18,3 +18,6 @@ pub const SO_SNDTIMEO: ::c_int = 21;
 
 pub const FIOCLEX: ::c_ulong = 0x5451;
 pub const FIONBIO: ::c_ulong = 0x5421;
+
+pub const SYS_gettid: ::c_long = 224;
+pub const SYS_perf_event_open: ::c_long = 364;
index 12d46698125875708b7a248018d5afc88904a07b..b39ff683f60e6a7626183017d21731882b043e22 100644 (file)
@@ -13,17 +13,6 @@ pub type __fsword_t = i32;
 pub type blksize_t = i32;
 pub type nlink_t = u32;
 
-pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24;
-pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32;
-pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4;
-
-pub const PTRACE_GETFPREGS: ::c_uint = 14;
-pub const PTRACE_SETFPREGS: ::c_uint = 15;
-pub const PTRACE_GETFPXREGS: ::c_uint = 18;
-pub const PTRACE_SETFPXREGS: ::c_uint = 19;
-pub const PTRACE_GETREGS: ::c_uint = 12;
-pub const PTRACE_SETREGS: ::c_uint = 13;
-
 s! {
     pub struct stat {
         pub st_dev: ::dev_t,
@@ -79,6 +68,17 @@ s! {
     }
 }
 
+pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24;
+pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32;
+pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4;
+
+pub const PTRACE_GETFPREGS: ::c_uint = 14;
+pub const PTRACE_SETFPREGS: ::c_uint = 15;
+pub const PTRACE_GETFPXREGS: ::c_uint = 18;
+pub const PTRACE_SETFPXREGS: ::c_uint = 19;
+pub const PTRACE_GETREGS: ::c_uint = 12;
+pub const PTRACE_SETREGS: ::c_uint = 13;
+
 cfg_if! {
     if #[cfg(target_arch = "x86")] {
         mod x86;
@@ -90,6 +90,6 @@ cfg_if! {
         mod powerpc;
         pub use self::powerpc::*;
     } else {
-        // ...
+        // Unknown target_arch
     }
 }
index 4ebfeda40c186dc201ebba60d2cadf13077c9d4d..2334c1f8cd66485373e174c3d1d7c6225ea715bf 100644 (file)
@@ -18,3 +18,6 @@ pub const SO_SNDTIMEO: ::c_int = 19;
 
 pub const FIOCLEX: ::c_ulong = 0x20006601;
 pub const FIONBIO: ::c_ulong = 0x8004667e;
+
+pub const SYS_gettid: ::c_long = 207;
+pub const SYS_perf_event_open: ::c_long = 319;
index 45fa03f90dea0259a931b325f38bfcf2577b1de6..1b215a977dec4676aeac11434db066aa055cf987 100644 (file)
@@ -1,12 +1,28 @@
 pub type c_char = i8;
 pub type wchar_t = i32;
 
+s! {
+    pub struct mcontext_t {
+        __private: [u32; 22]
+    }
+
+    pub struct ucontext_t {
+        pub uc_flags: ::c_ulong,
+        pub uc_link: *mut ucontext_t,
+        pub uc_stack: ::stack_t,
+        pub uc_mcontext: mcontext_t,
+        pub uc_sigmask: ::sigset_t,
+        __private: [u8; 112],
+    }
+}
+
 pub const O_DIRECT: ::c_int = 0x4000;
 pub const O_DIRECTORY: ::c_int = 0x10000;
 pub const O_NOFOLLOW: ::c_int = 0x20000;
 
 pub const MAP_LOCKED: ::c_int = 0x02000;
 pub const MAP_NORESERVE: ::c_int = 0x04000;
+pub const MAP_32BIT: ::c_int = 0x0040;
 
 pub const EDEADLOCK: ::c_int = 35;
 
@@ -18,3 +34,16 @@ pub const SO_SNDTIMEO: ::c_int = 21;
 
 pub const FIOCLEX: ::c_ulong = 0x5451;
 pub const FIONBIO: ::c_ulong = 0x5421;
+
+pub const SYS_gettid: ::c_long = 224;
+pub const SYS_perf_event_open: ::c_long = 336;
+
+extern {
+    pub fn getcontext(ucp: *mut ucontext_t) -> ::c_int;
+    pub fn setcontext(ucp: *const ucontext_t) -> ::c_int;
+    pub fn makecontext(ucp: *mut ucontext_t,
+                       func:  extern fn (),
+                       argc: ::c_int, ...);
+    pub fn swapcontext(uocp: *mut ucontext_t,
+                       ucp: *const ucontext_t) -> ::c_int;
+}
index b56cb48b5dc6d6a701e2321ea23a232a27eb2eeb..d227c466cb4260cd7d61c89bb5955661879a1a6c 100644 (file)
@@ -5,27 +5,6 @@ pub type wchar_t = u32;
 pub type nlink_t = u32;
 pub type blksize_t = i32;
 
-pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 48;
-pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 8;
-
-pub const O_DIRECT: ::c_int = 0x10000;
-pub const O_DIRECTORY: ::c_int = 0x4000;
-pub const O_NOFOLLOW: ::c_int = 0x8000;
-
-pub const MAP_LOCKED: ::c_int = 0x02000;
-pub const MAP_NORESERVE: ::c_int = 0x04000;
-
-pub const EDEADLOCK: ::c_int = 35;
-
-pub const SO_PEERCRED: ::c_int = 17;
-pub const SO_RCVLOWAT: ::c_int = 18;
-pub const SO_SNDLOWAT: ::c_int = 19;
-pub const SO_RCVTIMEO: ::c_int = 20;
-pub const SO_SNDTIMEO: ::c_int = 21;
-
-pub const FIOCLEX: ::c_ulong = 0x5451;
-pub const FIONBIO: ::c_ulong = 0x5421;
-
 s! {
     pub struct stat {
         pub st_dev: ::dev_t,
@@ -75,3 +54,27 @@ s! {
         __size: [u64; 8]
     }
 }
+
+pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 48;
+pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 8;
+
+pub const O_DIRECT: ::c_int = 0x10000;
+pub const O_DIRECTORY: ::c_int = 0x4000;
+pub const O_NOFOLLOW: ::c_int = 0x8000;
+
+pub const MAP_LOCKED: ::c_int = 0x02000;
+pub const MAP_NORESERVE: ::c_int = 0x04000;
+
+pub const EDEADLOCK: ::c_int = 35;
+
+pub const SO_PEERCRED: ::c_int = 17;
+pub const SO_RCVLOWAT: ::c_int = 18;
+pub const SO_SNDLOWAT: ::c_int = 19;
+pub const SO_RCVTIMEO: ::c_int = 20;
+pub const SO_SNDTIMEO: ::c_int = 21;
+
+pub const FIOCLEX: ::c_ulong = 0x5451;
+pub const FIONBIO: ::c_ulong = 0x5421;
+
+pub const SYS_gettid: ::c_long = 178;
+pub const SYS_perf_event_open: ::c_long = 241;
index 28d9e4d00f85f85668b8b2ce52b97d6d9c099909..663b09333cdf7cf32bcfb7135cfe214c44f3eeb7 100644 (file)
@@ -25,8 +25,10 @@ cfg_if! {
     } else if #[cfg(any(target_arch = "powerpc64"))] {
         mod powerpc64;
         pub use self::powerpc64::*;
-    } else {
+    } else if #[cfg(any(target_arch = "x86_64"))] {
         mod x86_64;
         pub use self::x86_64::*;
+    } else {
+        // Unknown target_arch
     }
 }
index 742fb579ba9b3aabc045966043826d12ed9675b8..72db09e9115bb4a32688c4251d058d3601ecd5d1 100644 (file)
@@ -5,27 +5,6 @@ pub type wchar_t = i32;
 pub type nlink_t = u64;
 pub type blksize_t = i64;
 
-pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40;
-pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4;
-
-pub const O_DIRECTORY: ::c_int = 0x4000;
-pub const O_NOFOLLOW: ::c_int = 0x8000;
-pub const O_DIRECT: ::c_int = 0x20000;
-
-pub const MAP_LOCKED: ::c_int = 0x00080;
-pub const MAP_NORESERVE: ::c_int = 0x00040;
-
-pub const EDEADLOCK: ::c_int = 58;
-
-pub const SO_PEERCRED: ::c_int = 21;
-pub const SO_RCVLOWAT: ::c_int = 16;
-pub const SO_SNDLOWAT: ::c_int = 17;
-pub const SO_RCVTIMEO: ::c_int = 18;
-pub const SO_SNDTIMEO: ::c_int = 19;
-
-pub const FIOCLEX: ::c_ulong = 0x20006601;
-pub const FIONBIO: ::c_ulong = 0x8004667e;
-
 s! {
     pub struct stat {
         pub st_dev: ::dev_t,
@@ -73,3 +52,27 @@ s! {
         __size: [u64; 7]
     }
 }
+
+pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40;
+pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4;
+
+pub const O_DIRECTORY: ::c_int = 0x4000;
+pub const O_NOFOLLOW: ::c_int = 0x8000;
+pub const O_DIRECT: ::c_int = 0x20000;
+
+pub const MAP_LOCKED: ::c_int = 0x00080;
+pub const MAP_NORESERVE: ::c_int = 0x00040;
+
+pub const EDEADLOCK: ::c_int = 58;
+
+pub const SO_PEERCRED: ::c_int = 21;
+pub const SO_RCVLOWAT: ::c_int = 16;
+pub const SO_SNDLOWAT: ::c_int = 17;
+pub const SO_RCVTIMEO: ::c_int = 18;
+pub const SO_SNDTIMEO: ::c_int = 19;
+
+pub const FIOCLEX: ::c_ulong = 0x20006601;
+pub const FIONBIO: ::c_ulong = 0x8004667e;
+
+pub const SYS_gettid: ::c_long = 207;
+pub const SYS_perf_event_open: ::c_long = 319;
index fc82c71e2d8773f1ce3839a5f9faa8d238169ccb..6903c62751fceb5db0156c2138a2fc193bb2f4fa 100644 (file)
@@ -5,34 +5,6 @@ pub type wchar_t = i32;
 pub type nlink_t = u64;
 pub type blksize_t = i64;
 
-pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40;
-pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4;
-
-pub const O_DIRECT: ::c_int = 0x4000;
-pub const O_DIRECTORY: ::c_int = 0x10000;
-pub const O_NOFOLLOW: ::c_int = 0x20000;
-
-pub const MAP_LOCKED: ::c_int = 0x02000;
-pub const MAP_NORESERVE: ::c_int = 0x04000;
-
-pub const EDEADLOCK: ::c_int = 35;
-
-pub const SO_PEERCRED: ::c_int = 17;
-pub const SO_RCVLOWAT: ::c_int = 18;
-pub const SO_SNDLOWAT: ::c_int = 19;
-pub const SO_RCVTIMEO: ::c_int = 20;
-pub const SO_SNDTIMEO: ::c_int = 21;
-
-pub const FIOCLEX: ::c_ulong = 0x5451;
-pub const FIONBIO: ::c_ulong = 0x5421;
-
-pub const PTRACE_GETFPREGS: ::c_uint = 14;
-pub const PTRACE_SETFPREGS: ::c_uint = 15;
-pub const PTRACE_GETFPXREGS: ::c_uint = 18;
-pub const PTRACE_SETFPXREGS: ::c_uint = 19;
-pub const PTRACE_GETREGS: ::c_uint = 12;
-pub const PTRACE_SETREGS: ::c_uint = 13;
-
 s! {
     pub struct stat {
         pub st_dev: ::dev_t,
@@ -79,4 +51,59 @@ s! {
     pub struct pthread_attr_t {
         __size: [u64; 7]
     }
+
+    pub struct mcontext_t {
+        __private: [u64; 32],
+    }
+
+    pub struct ucontext_t {
+        pub uc_flags: ::c_ulong,
+        pub uc_link: *mut ucontext_t,
+        pub uc_stack: ::stack_t,
+        pub uc_mcontext: mcontext_t,
+        pub uc_sigmask: ::sigset_t,
+        __private: [u8; 512],
+    }
+}
+
+pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40;
+pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4;
+
+pub const O_DIRECT: ::c_int = 0x4000;
+pub const O_DIRECTORY: ::c_int = 0x10000;
+pub const O_NOFOLLOW: ::c_int = 0x20000;
+
+pub const MAP_LOCKED: ::c_int = 0x02000;
+pub const MAP_NORESERVE: ::c_int = 0x04000;
+pub const MAP_32BIT: ::c_int = 0x0040;
+
+pub const EDEADLOCK: ::c_int = 35;
+
+pub const SO_PEERCRED: ::c_int = 17;
+pub const SO_RCVLOWAT: ::c_int = 18;
+pub const SO_SNDLOWAT: ::c_int = 19;
+pub const SO_RCVTIMEO: ::c_int = 20;
+pub const SO_SNDTIMEO: ::c_int = 21;
+
+pub const FIOCLEX: ::c_ulong = 0x5451;
+pub const FIONBIO: ::c_ulong = 0x5421;
+
+pub const PTRACE_GETFPREGS: ::c_uint = 14;
+pub const PTRACE_SETFPREGS: ::c_uint = 15;
+pub const PTRACE_GETFPXREGS: ::c_uint = 18;
+pub const PTRACE_SETFPXREGS: ::c_uint = 19;
+pub const PTRACE_GETREGS: ::c_uint = 12;
+pub const PTRACE_SETREGS: ::c_uint = 13;
+
+pub const SYS_gettid: ::c_long = 186;
+pub const SYS_perf_event_open: ::c_long = 298;
+
+extern {
+    pub fn getcontext(ucp: *mut ucontext_t) -> ::c_int;
+    pub fn setcontext(ucp: *const ucontext_t) -> ::c_int;
+    pub fn makecontext(ucp: *mut ucontext_t,
+                       func:  extern fn (),
+                       argc: ::c_int, ...);
+    pub fn swapcontext(uocp: *mut ucontext_t,
+                       ucp: *const ucontext_t) -> ::c_int;
 }
index d043f224493824d1dbfaefa8db040723b55305ae..337b625936733481a753000e49ef221aec5ee8c4 100644 (file)
@@ -37,7 +37,6 @@ s! {
         __unused5: *mut ::c_void,
     }
 
-
     pub struct ucred {
         pub pid: ::pid_t,
         pub uid: ::uid_t,
@@ -88,6 +87,39 @@ s! {
         pub l_len: ::off_t,
         pub l_pid: ::pid_t,
     }
+
+    pub struct ipc_perm {
+        pub __key: ::key_t,
+        pub uid: ::uid_t,
+        pub gid: ::gid_t,
+        pub cuid: ::uid_t,
+        pub cgid: ::gid_t,
+        pub mode: ::c_ushort,
+        __pad1: ::c_ushort,
+        pub __seq: ::c_ushort,
+        __pad2: ::c_ushort,
+        __unused1: ::c_ulong,
+        __unused2: ::c_ulong
+    }
+
+    pub struct shmid_ds {
+        pub shm_perm: ::ipc_perm,
+        pub shm_segsz: ::size_t,
+        pub shm_atime: ::time_t,
+        #[cfg(target_pointer_width = "32")]
+        __unused1: ::c_ulong,
+        pub shm_dtime: ::time_t,
+        #[cfg(target_pointer_width = "32")]
+        __unused2: ::c_ulong,
+        pub shm_ctime: ::time_t,
+        #[cfg(target_pointer_width = "32")]
+        __unused3: ::c_ulong,
+        pub shm_cpid: ::pid_t,
+        pub shm_lpid: ::pid_t,
+        pub shm_nattch: ::shmatt_t,
+        __unused4: ::c_ulong,
+        __unused5: ::c_ulong
+    }
 }
 
 pub const RLIMIT_RSS: ::c_int = 5;
@@ -111,6 +143,13 @@ pub const O_FSYNC: ::c_int = 0x101000;
 
 pub const SOCK_NONBLOCK: ::c_int = O_NONBLOCK;
 
+pub const LC_PAPER: ::c_int = 7;
+pub const LC_NAME: ::c_int = 8;
+pub const LC_ADDRESS: ::c_int = 9;
+pub const LC_TELEPHONE: ::c_int = 10;
+pub const LC_MEASUREMENT: ::c_int = 11;
+pub const LC_IDENTIFICATION: ::c_int = 12;
+
 pub const MAP_ANON: ::c_int = 0x0020;
 pub const MAP_ANONYMOUS: ::c_int = 0x0020;
 pub const MAP_GROWSDOWN: ::c_int = 0x0100;
@@ -425,8 +464,9 @@ extern {
                      sz: ::c_int) -> ::c_int;
     pub fn glob64(pattern: *const ::c_char,
                   flags: ::c_int,
-                  errfunc: ::dox::Option<extern "C" fn(epath: *const ::c_char,
-                                                       errno: ::c_int) -> ::c_int>,
+                  errfunc: ::dox::Option<extern fn(epath: *const ::c_char,
+                                                   errno: ::c_int)
+                                                   -> ::c_int>,
                   pglob: *mut glob64_t) -> ::c_int;
     pub fn globfree64(pglob: *mut glob64_t);
     pub fn getnameinfo(sa: *const ::sockaddr,
@@ -458,41 +498,6 @@ cfg_if! {
         mod b64;
         pub use self::b64::*;
     } else {
-        // ...
-    }
-}
-
-s! {
-    pub struct ipc_perm {
-        pub __key: ::key_t,
-        pub uid: ::uid_t,
-        pub gid: ::gid_t,
-        pub cuid: ::uid_t,
-        pub cgid: ::gid_t,
-        pub mode: ::c_ushort,
-        __pad1: ::c_ushort,
-        pub __seq: ::c_ushort,
-        __pad2: ::c_ushort,
-        __unused1: ::c_ulong,
-        __unused2: ::c_ulong
-    }
-
-    pub struct shmid_ds {
-        pub shm_perm: ::ipc_perm,
-        pub shm_segsz: ::size_t,
-        pub shm_atime: ::time_t,
-        #[cfg(target_pointer_width = "32")]
-        __unused1: ::c_ulong,
-        pub shm_dtime: ::time_t,
-        #[cfg(target_pointer_width = "32")]
-        __unused2: ::c_ulong,
-        pub shm_ctime: ::time_t,
-        #[cfg(target_pointer_width = "32")]
-        __unused3: ::c_ulong,
-        pub shm_cpid: ::pid_t,
-        pub shm_lpid: ::pid_t,
-        pub shm_nattch: ::shmatt_t,
-        __unused4: ::c_ulong,
-        __unused5: ::c_ulong
+        // Unknown target_arch
     }
 }
index 12cff8352c8f30e76dd4d9b0f457f417933a39ba..c2405e3d1b7b62e8340ea39ee37d8ace427fe6ec 100644 (file)
@@ -5,6 +5,7 @@ pub type pthread_key_t = ::c_uint;
 pub type speed_t = ::c_uint;
 pub type tcflag_t = ::c_uint;
 pub type loff_t = ::c_longlong;
+pub type clockid_t = ::c_int;
 
 pub enum timezone {}
 
@@ -61,6 +62,13 @@ s! {
         pub ai_next: *mut addrinfo,
     }
 
+    pub struct sockaddr_nl {
+        pub nl_family: ::sa_family_t,
+        nl_pad: ::c_ushort,
+        pub nl_pid: u32,
+        pub nl_groups: u32
+    }
+
     pub struct sockaddr_ll {
         pub sll_family: ::c_ushort,
         pub sll_protocol: ::c_ushort,
@@ -124,13 +132,45 @@ s! {
         pub machine: [::c_char; 65],
         pub domainname: [::c_char; 65]
     }
+
+    pub struct lconv {
+        pub decimal_point: *mut ::c_char,
+        pub thousands_sep: *mut ::c_char,
+        pub grouping: *mut ::c_char,
+        pub int_curr_symbol: *mut ::c_char,
+        pub currency_symbol: *mut ::c_char,
+        pub mon_decimal_point: *mut ::c_char,
+        pub mon_thousands_sep: *mut ::c_char,
+        pub mon_grouping: *mut ::c_char,
+        pub positive_sign: *mut ::c_char,
+        pub negative_sign: *mut ::c_char,
+        pub int_frac_digits: ::c_char,
+        pub frac_digits: ::c_char,
+        pub p_cs_precedes: ::c_char,
+        pub p_sep_by_space: ::c_char,
+        pub n_cs_precedes: ::c_char,
+        pub n_sep_by_space: ::c_char,
+        pub p_sign_posn: ::c_char,
+        pub n_sign_posn: ::c_char,
+        pub int_p_cs_precedes: ::c_char,
+        pub int_p_sep_by_space: ::c_char,
+        pub int_n_cs_precedes: ::c_char,
+        pub int_n_sep_by_space: ::c_char,
+        pub int_p_sign_posn: ::c_char,
+        pub int_n_sign_posn: ::c_char,
+    }
 }
 
 // intentionally not public, only used for fd_set
-#[cfg(target_pointer_width = "32")]
-const ULONG_SIZE: usize = 32;
-#[cfg(target_pointer_width = "64")]
-const ULONG_SIZE: usize = 64;
+cfg_if! {
+    if #[cfg(target_pointer_width = "32")] {
+        const ULONG_SIZE: usize = 32;
+    } else if #[cfg(target_pointer_width = "64")] {
+        const ULONG_SIZE: usize = 64;
+    } else {
+        // Unknown target_pointer_width
+    }
+}
 
 pub const EXIT_FAILURE: ::c_int = 1;
 pub const EXIT_SUCCESS: ::c_int = 0;
@@ -149,13 +189,35 @@ pub const F_SETFD: ::c_int = 2;
 pub const F_GETFL: ::c_int = 3;
 pub const F_SETFL: ::c_int = 4;
 
+// Linux-specific fcntls
+pub const F_SETLEASE: ::c_int = 1024;
+pub const F_GETLEASE: ::c_int = 1025;
+pub const F_NOTIFY: ::c_int = 1026;
+pub const F_DUPFD_CLOEXEC: ::c_int = 1030;
+pub const F_SETPIPE_SZ: ::c_int = 1031;
+pub const F_GETPIPE_SZ: ::c_int = 1032;
+
+// TODO(#235): Include file sealing fcntls once we have a way to verify them.
+
 pub const SIGTRAP: ::c_int = 5;
 
 pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0;
 pub const PTHREAD_CREATE_DETACHED: ::c_int = 1;
 
-pub const CLOCK_REALTIME: ::c_int = 0;
-pub const CLOCK_MONOTONIC: ::c_int = 1;
+pub const CLOCK_REALTIME: clockid_t = 0;
+pub const CLOCK_MONOTONIC: clockid_t = 1;
+pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 2;
+pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 3;
+pub const CLOCK_MONOTONIC_RAW: clockid_t = 4;
+pub const CLOCK_REALTIME_COARSE: clockid_t = 5;
+pub const CLOCK_MONOTONIC_COARSE: clockid_t = 6;
+pub const CLOCK_BOOTTIME: clockid_t = 7;
+pub const CLOCK_REALTIME_ALARM: clockid_t = 8;
+pub const CLOCK_BOOTTIME_ALARM: clockid_t = 9;
+// TODO(#247) Someday our Travis shall have glibc 2.21 (released in Sep
+// 2014.) See also musl/mod.rs
+// pub const CLOCK_SGI_CYCLE: clockid_t = 10;
+// pub const CLOCK_TAI: clockid_t = 11;
 
 pub const RLIMIT_CPU: ::c_int = 0;
 pub const RLIMIT_FSIZE: ::c_int = 1;
@@ -222,6 +284,14 @@ pub const PROT_READ: ::c_int = 1;
 pub const PROT_WRITE: ::c_int = 2;
 pub const PROT_EXEC: ::c_int = 4;
 
+pub const LC_CTYPE: ::c_int = 0;
+pub const LC_NUMERIC: ::c_int = 1;
+pub const LC_TIME: ::c_int = 2;
+pub const LC_COLLATE: ::c_int = 3;
+pub const LC_MONETARY: ::c_int = 4;
+pub const LC_MESSAGES: ::c_int = 5;
+pub const LC_ALL: ::c_int = 6;
+
 pub const MAP_FILE: ::c_int = 0x0000;
 pub const MAP_SHARED: ::c_int = 0x0001;
 pub const MAP_PRIVATE: ::c_int = 0x0002;
@@ -232,9 +302,12 @@ pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void;
 pub const MCL_CURRENT: ::c_int = 0x0001;
 pub const MCL_FUTURE: ::c_int = 0x0002;
 
+// MS_ flags for msync(2)
 pub const MS_ASYNC: ::c_int = 0x0001;
 pub const MS_INVALIDATE: ::c_int = 0x0002;
 pub const MS_SYNC: ::c_int = 0x0004;
+
+// MS_ flags for mount(2)
 pub const MS_RDONLY: ::c_ulong = 0x01;
 pub const MS_NOSUID: ::c_ulong = 0x02;
 pub const MS_NODEV: ::c_ulong = 0x04;
@@ -254,6 +327,10 @@ pub const MS_UNBINDABLE: ::c_ulong = 0x020000;
 pub const MS_PRIVATE: ::c_ulong = 0x040000;
 pub const MS_SLAVE: ::c_ulong = 0x080000;
 pub const MS_SHARED: ::c_ulong = 0x100000;
+pub const MS_RELATIME: ::c_ulong = 0x200000;
+pub const MS_KERNMOUNT: ::c_ulong = 0x400000;
+pub const MS_I_VERSION: ::c_ulong = 0x800000;
+pub const MS_STRICTATIME: ::c_ulong = 0x1000000;
 pub const MS_ACTIVE: ::c_ulong = 0x40000000;
 pub const MS_NOUSER: ::c_ulong = 0x80000000;
 pub const MS_MGC_VAL: ::c_ulong = 0xc0ed0000;
@@ -527,6 +604,12 @@ pub const CLONE_CHILD_CLEARTID: ::c_int = 0x200000;
 pub const CLONE_DETACHED: ::c_int = 0x400000;
 pub const CLONE_UNTRACED: ::c_int = 0x800000;
 pub const CLONE_CHILD_SETTID: ::c_int = 0x01000000;
+pub const CLONE_NEWUTS: ::c_int = 0x04000000;
+pub const CLONE_NEWIPC: ::c_int = 0x08000000;
+pub const CLONE_NEWUSER: ::c_int = 0x10000000;
+pub const CLONE_NEWPID: ::c_int = 0x20000000;
+pub const CLONE_NEWNET: ::c_int = 0x40000000;
+pub const CLONE_IO: ::c_int = 0x80000000;
 
 pub const WNOHANG: ::c_int = 1;
 
@@ -544,6 +627,9 @@ pub const POSIX_FADV_WILLNEED: ::c_int = 3;
 pub const POSIX_FADV_DONTNEED: ::c_int = 4;
 pub const POSIX_FADV_NOREUSE: ::c_int = 5;
 
+pub const AT_FDCWD: ::c_int = -100;
+pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x100;
+
 f! {
     pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () {
         let fd = fd as usize;
@@ -593,7 +679,8 @@ extern {
     pub fn fdatasync(fd: ::c_int) -> ::c_int;
     pub fn mincore(addr: *mut ::c_void, len: ::size_t,
                    vec: *mut ::c_uchar) -> ::c_int;
-    pub fn clock_gettime(clk_id: ::c_int, tp: *mut ::timespec) -> ::c_int;
+    pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
+    pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
     pub fn prctl(option: ::c_int, ...) -> ::c_int;
     pub fn pthread_getattr_np(native: ::pthread_t,
                               attr: *mut ::pthread_attr_t) -> ::c_int;
@@ -605,7 +692,9 @@ extern {
     pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void;
     pub fn setgroups(ngroups: ::size_t,
                      ptr: *const ::gid_t) -> ::c_int;
-    pub fn sched_setscheduler(pid: ::pid_t, policy: ::c_int, param: *const sched_param) -> ::c_int;
+    pub fn sched_setscheduler(pid: ::pid_t,
+                              policy: ::c_int,
+                              param: *const sched_param) -> ::c_int;
     pub fn sched_getscheduler(pid: ::pid_t) -> ::c_int;
     pub fn sched_get_priority_max(policy: ::c_int) -> ::c_int;
     pub fn sched_get_priority_min(policy: ::c_int) -> ::c_int;
@@ -632,7 +721,9 @@ extern {
                  arg: *mut ::c_void, ...) -> ::c_int;
     pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int;
     pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int;
-    pub fn memrchr(cx: *const ::c_void, c: ::c_int, n: ::size_t) -> *mut ::c_void;
+    pub fn memrchr(cx: *const ::c_void,
+                   c: ::c_int,
+                   n: ::size_t) -> *mut ::c_void;
     pub fn syscall(num: ::c_long, ...) -> ::c_long;
     pub fn sendfile(out_fd: ::c_int,
                     in_fd: ::c_int,
@@ -653,8 +744,11 @@ extern {
                     nr_segs: ::size_t,
                     flags: ::c_uint) -> ::ssize_t;
 
-    pub fn posix_fadvise(fd: ::c_int, offset: ::off_t, len: ::off_t, 
+    pub fn posix_fadvise(fd: ::c_int, offset: ::off_t, len: ::off_t,
                          advise: ::c_int) -> ::c_int;
+    pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int;
+    pub fn utimensat(dirfd: ::c_int, path: *const ::c_char,
+                     times: *const ::timespec, flag: ::c_int) -> ::c_int;
 }
 
 cfg_if! {
@@ -666,6 +760,6 @@ cfg_if! {
         mod android;
         pub use self::android::*;
     } else {
-        // ...
+        // Unknown target_os
     }
 }
index 4587a89ee1eaa1d8d94ded30aa5fe35adb638f0e..a47d1baaaab1be81fc0d6c20c207d4bbadfc2a34 100644 (file)
@@ -1,6 +1,7 @@
 pub type c_char = i8;
 pub type c_long = i64;
 pub type c_ulong = u64;
+pub type clockid_t = ::c_int;
 
 pub type blkcnt_t = i64;
 pub type clock_t = i64;
@@ -540,8 +541,18 @@ pub const PTHREAD_STACK_MIN: ::size_t = 4096;
 
 pub const SIGSTKSZ: ::size_t = 8192;
 
-pub const CLOCK_REALTIME: ::c_int = 3;
-pub const CLOCK_MONOTONIC: ::c_int = 4;
+// https://illumos.org/man/3c/clock_gettime
+// https://github.com/illumos/illumos-gate/
+//   blob/HEAD/usr/src/lib/libc/amd64/sys/__clock_gettime.s
+// clock_gettime(3c) doesn't seem to accept anything other than CLOCK_REALTIME
+// or __CLOCK_REALTIME0
+//
+// https://github.com/illumos/illumos-gate/
+//   blob/HEAD/usr/src/uts/common/sys/time_impl.h
+// Confusing! CLOCK_HIGHRES==CLOCK_MONOTONIC==4
+// __CLOCK_REALTIME0==0 is an obsoleted version of CLOCK_REALTIME==3
+pub const CLOCK_REALTIME: clockid_t = 3;
+pub const CLOCK_MONOTONIC: clockid_t = 4;
 
 pub const RLIMIT_CPU: ::c_int = 0;
 pub const RLIMIT_FSIZE: ::c_int = 1;
@@ -752,7 +763,8 @@ extern {
     pub fn ioctl(fildes: ::c_int, request: ::c_int, ...) -> ::c_int;
     pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int)
                     -> ::c_int;
-    pub fn clock_gettime(clk_id: ::c_int, tp: *mut ::timespec) -> ::c_int;
+    pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
+    pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
     pub fn getnameinfo(sa: *const ::sockaddr,
                        salen: ::socklen_t,
                        host: *mut ::c_char,
@@ -765,5 +777,6 @@ extern {
                       buf: *mut ::c_char,
                       buflen: ::size_t) -> *const passwd;
     pub fn readdir(dirp: *mut ::DIR) -> *const ::dirent;
+    pub fn fdatasync(fd: ::c_int) -> ::c_int;
 }
 
index a4783cb0810a7f36b13853df6567d7e570a0d222..21b6e13b747ec42145d3395b3fe21710078c3736 100644 (file)
@@ -72,9 +72,11 @@ cfg_if! {
     if #[cfg(all(target_env = "gnu"))] {
         pub const L_tmpnam: ::c_uint = 14;
         pub const TMP_MAX: ::c_uint = 0x7fff;
-    } else {
+    } else if #[cfg(all(target_env = "msvc"))] {
         pub const L_tmpnam: ::c_uint = 260;
         pub const TMP_MAX: ::c_uint = 0x7fff_ffff;
+    } else {
+        // Unknown target_env
     }
 }
 
@@ -96,7 +98,7 @@ pub const S_IEXEC: ::c_int = 64;
 pub const S_IWRITE: ::c_int = 128;
 pub const S_IREAD: ::c_int = 256;
 
-#[cfg(target_env = "msvc")]
+#[cfg(target_env = "msvc")] // " if " -- appease style checker
 #[link(name = "msvcrt")]
 extern {}
 
@@ -160,7 +162,9 @@ extern {
     #[link_name = "_lseek"]
     pub fn lseek(fd: ::c_int, offset: c_long, origin: ::c_int) -> c_long;
     #[link_name = "_pipe"]
-    pub fn pipe(fds: *mut ::c_int, psize: ::c_uint, textmode: ::c_int) -> ::c_int;
+    pub fn pipe(fds: *mut ::c_int,
+                psize: ::c_uint,
+                textmode: ::c_int) -> ::c_int;
     #[link_name = "_read"]
     pub fn read(fd: ::c_int, buf: *mut ::c_void, count: ::c_uint) -> ::c_int;
     #[link_name = "_rmdir"]
index 531be63b7bb73468615480b4f3b75a5bb12a0bed..e651f5bc1829be40319d6f8626e8decaf9dc448d 100644 (file)
             issue = "27703")]
 #![feature(core_float)]
 #![feature(core_intrinsics)]
-#![feature(num_bits_bytes)]
 #![feature(staged_api)]
 #![feature(step_by)]
 #![feature(custom_attribute)]
 #![allow(unused_attributes)]
 
-#![cfg_attr(test, feature(test, rand, rustc_private))]
+#![cfg_attr(test, feature(test, rand))]
 
 #![allow(deprecated)]
 
 #[cfg(test)]
 #[macro_use]
 extern crate std;
-#[cfg(test)]
-#[macro_use]
-extern crate log;
 
 use core::f64;
 use core::intrinsics;
@@ -70,6 +66,7 @@ mod rand_impls;
 // needed by librand; this is necessary because librand doesn't
 // depend on libstd.  This will go away when librand is integrated
 // into libstd.
+#[doc(hidden)]
 trait FloatMath : Sized {
     fn exp(self) -> Self;
     fn ln(self) -> Self;
index 34b7f37a6788f98f2023edc20898809d454ed2c0..1185ad25485bd32ca3efbe67dd32871ed1c7025d 100644 (file)
 //! The implementations of `Rand` for the built-in types.
 
 use core::char;
-use core::isize;
-use core::usize;
+use core::mem;
 
 use {Rand, Rng};
 
 impl Rand for isize {
     #[inline]
     fn rand<R: Rng>(rng: &mut R) -> isize {
-        if isize::BITS == 32 {
+        if mem::size_of::<isize>() == 4 {
             rng.gen::<i32>() as isize
         } else {
             rng.gen::<i64>() as isize
@@ -58,7 +57,7 @@ impl Rand for i64 {
 impl Rand for usize {
     #[inline]
     fn rand<R: Rng>(rng: &mut R) -> usize {
-        if usize::BITS == 32 {
+        if mem::size_of::<usize>() == 4 {
             rng.gen::<u32>() as usize
         } else {
             rng.gen::<u64>() as usize
index 533f2ee3b3e614c8be1429297815c598544a411a..4edbeab5dfb11049cbedf1255d30608c16abcc6f 100644 (file)
        test(attr(deny(warnings))))]
 #![cfg_attr(not(stage0), deny(warnings))]
 
-#![feature(copy_from_slice)]
 #![feature(rustc_private)]
 #![feature(staged_api)]
+#![feature(question_mark)]
 
 #![cfg_attr(test, feature(test))]
 
@@ -165,7 +165,7 @@ impl<'doc> Doc<'doc> {
         }
     }
 
-    pub fn get<'a>(&'a self, tag: usize) -> Doc<'a> {
+    pub fn get(&self, tag: usize) -> Doc<'doc> {
         reader::get_doc(*self, tag)
     }
 
@@ -173,7 +173,7 @@ impl<'doc> Doc<'doc> {
         self.start == self.end
     }
 
-    pub fn as_str_slice<'a>(&'a self) -> &'a str {
+    pub fn as_str_slice(&self) -> &'doc str {
         str::from_utf8(&self.data[self.start..self.end]).unwrap()
     }
 
@@ -398,8 +398,8 @@ pub mod reader {
     }
 
     pub fn doc_at<'a>(data: &'a [u8], start: usize) -> DecodeResult<TaggedDoc<'a>> {
-        let elt_tag = try!(tag_at(data, start));
-        let elt_size = try!(tag_len_at(data, elt_tag));
+        let elt_tag = tag_at(data, start)?;
+        let elt_size = tag_len_at(data, elt_tag)?;
         let end = elt_size.next + elt_size.val;
         Ok(TaggedDoc {
             tag: elt_tag.val,
@@ -581,7 +581,7 @@ pub mod reader {
             if self.pos >= self.parent.end {
                 return Err(Expected(format!("no more documents in current node!")));
             }
-            let TaggedDoc { tag: r_tag, doc: r_doc } = try!(doc_at(self.parent.data, self.pos));
+            let TaggedDoc { tag: r_tag, doc: r_doc } = doc_at(self.parent.data, self.pos)?;
             debug!("self.parent={:?}-{:?} self.pos={:?} r_tag={:?} r_doc={:?}-{:?}",
                    self.parent.start,
                    self.parent.end,
@@ -607,12 +607,12 @@ pub mod reader {
         fn push_doc<T, F>(&mut self, exp_tag: EbmlEncoderTag, f: F) -> DecodeResult<T>
             where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>
         {
-            let d = try!(self.next_doc(exp_tag));
+            let d = self.next_doc(exp_tag)?;
             let old_parent = self.parent;
             let old_pos = self.pos;
             self.parent = d;
             self.pos = d.start;
-            let r = try!(f(self));
+            let r = f(self)?;
             self.parent = old_parent;
             self.pos = old_pos;
             Ok(r)
@@ -624,7 +624,7 @@ pub mod reader {
                 return Ok(0);
             }
 
-            let TaggedDoc { tag: r_tag, doc: r_doc } = try!(doc_at(self.parent.data, self.pos));
+            let TaggedDoc { tag: r_tag, doc: r_doc } = doc_at(self.parent.data, self.pos)?;
             let r = if r_tag == (EsSub8 as usize) {
                 doc_as_u8(r_doc) as usize
             } else if r_tag == (EsSub32 as usize) {
@@ -659,7 +659,7 @@ pub mod reader {
                 return Err(Expected(format!("no more documents in current node!")));
             }
 
-            let TaggedDoc { tag: r_tag, doc: r_doc } = try!(doc_at(self.parent.data, self.pos));
+            let TaggedDoc { tag: r_tag, doc: r_doc } = doc_at(self.parent.data, self.pos)?;
             let r = if first_tag as usize <= r_tag && r_tag <= last_tag as usize {
                 match r_tag - first_tag as usize {
                     0 => doc_as_u8(r_doc) as u64,
@@ -689,11 +689,11 @@ pub mod reader {
         pub fn read_opaque<R, F>(&mut self, op: F) -> DecodeResult<R>
             where F: FnOnce(&mut opaque::Decoder, Doc) -> DecodeResult<R>
         {
-            let doc = try!(self.next_doc(EsOpaque));
+            let doc = self.next_doc(EsOpaque)?;
 
             let result = {
                 let mut opaque_decoder = opaque::Decoder::new(doc.data, doc.start);
-                try!(op(&mut opaque_decoder, doc))
+                op(&mut opaque_decoder, doc)?
             };
 
             Ok(result)
@@ -718,16 +718,16 @@ pub mod reader {
             self._next_int(EsU8, EsU64)
         }
         fn read_u32(&mut self) -> DecodeResult<u32> {
-            Ok(try!(self._next_int(EsU8, EsU32)) as u32)
+            Ok(self._next_int(EsU8, EsU32)? as u32)
         }
         fn read_u16(&mut self) -> DecodeResult<u16> {
-            Ok(try!(self._next_int(EsU8, EsU16)) as u16)
+            Ok(self._next_int(EsU8, EsU16)? as u16)
         }
         fn read_u8(&mut self) -> DecodeResult<u8> {
-            Ok(doc_as_u8(try!(self.next_doc(EsU8))))
+            Ok(doc_as_u8(self.next_doc(EsU8)?))
         }
         fn read_uint(&mut self) -> DecodeResult<usize> {
-            let v = try!(self._next_int(EsU8, EsU64));
+            let v = self._next_int(EsU8, EsU64)?;
             if v > (::std::usize::MAX as u64) {
                 Err(IntTooBig(v as usize))
             } else {
@@ -736,19 +736,19 @@ pub mod reader {
         }
 
         fn read_i64(&mut self) -> DecodeResult<i64> {
-            Ok(try!(self._next_int(EsI8, EsI64)) as i64)
+            Ok(self._next_int(EsI8, EsI64)? as i64)
         }
         fn read_i32(&mut self) -> DecodeResult<i32> {
-            Ok(try!(self._next_int(EsI8, EsI32)) as i32)
+            Ok(self._next_int(EsI8, EsI32)? as i32)
         }
         fn read_i16(&mut self) -> DecodeResult<i16> {
-            Ok(try!(self._next_int(EsI8, EsI16)) as i16)
+            Ok(self._next_int(EsI8, EsI16)? as i16)
         }
         fn read_i8(&mut self) -> DecodeResult<i8> {
-            Ok(doc_as_u8(try!(self.next_doc(EsI8))) as i8)
+            Ok(doc_as_u8(self.next_doc(EsI8)?) as i8)
         }
         fn read_int(&mut self) -> DecodeResult<isize> {
-            let v = try!(self._next_int(EsI8, EsI64)) as i64;
+            let v = self._next_int(EsI8, EsI64)? as i64;
             if v > (isize::MAX as i64) || v < (isize::MIN as i64) {
                 debug!("FIXME \\#6122: Removing this makes this function miscompile");
                 Err(IntTooBig(v as usize))
@@ -758,22 +758,22 @@ pub mod reader {
         }
 
         fn read_bool(&mut self) -> DecodeResult<bool> {
-            Ok(doc_as_u8(try!(self.next_doc(EsBool))) != 0)
+            Ok(doc_as_u8(self.next_doc(EsBool)?) != 0)
         }
 
         fn read_f64(&mut self) -> DecodeResult<f64> {
-            let bits = doc_as_u64(try!(self.next_doc(EsF64)));
+            let bits = doc_as_u64(self.next_doc(EsF64)?);
             Ok(unsafe { transmute(bits) })
         }
         fn read_f32(&mut self) -> DecodeResult<f32> {
-            let bits = doc_as_u32(try!(self.next_doc(EsF32)));
+            let bits = doc_as_u32(self.next_doc(EsF32)?);
             Ok(unsafe { transmute(bits) })
         }
         fn read_char(&mut self) -> DecodeResult<char> {
-            Ok(char::from_u32(doc_as_u32(try!(self.next_doc(EsChar)))).unwrap())
+            Ok(char::from_u32(doc_as_u32(self.next_doc(EsChar)?)).unwrap())
         }
         fn read_str(&mut self) -> DecodeResult<String> {
-            Ok(try!(self.next_doc(EsStr)).as_str())
+            Ok(self.next_doc(EsStr)?.as_str())
         }
 
         // Compound types:
@@ -782,13 +782,13 @@ pub mod reader {
         {
             debug!("read_enum({})", name);
 
-            let doc = try!(self.next_doc(EsEnum));
+            let doc = self.next_doc(EsEnum)?;
 
             let (old_parent, old_pos) = (self.parent, self.pos);
             self.parent = doc;
             self.pos = self.parent.start;
 
-            let result = try!(f(self));
+            let result = f(self)?;
 
             self.parent = old_parent;
             self.pos = old_pos;
@@ -799,7 +799,7 @@ pub mod reader {
             where F: FnMut(&mut Decoder<'doc>, usize) -> DecodeResult<T>
         {
             debug!("read_enum_variant()");
-            let idx = try!(self._next_sub());
+            let idx = self._next_sub()?;
             debug!("  idx={}", idx);
 
             f(self, idx)
@@ -816,7 +816,7 @@ pub mod reader {
             where F: FnMut(&mut Decoder<'doc>, usize) -> DecodeResult<T>
         {
             debug!("read_enum_struct_variant()");
-            let idx = try!(self._next_sub());
+            let idx = self._next_sub()?;
             debug!("  idx={}", idx);
 
             f(self, idx)
@@ -904,7 +904,7 @@ pub mod reader {
         {
             debug!("read_seq()");
             self.push_doc(EsVec, move |d| {
-                let len = try!(d._next_sub());
+                let len = d._next_sub()?;
                 debug!("  len={}", len);
                 f(d, len)
             })
@@ -922,7 +922,7 @@ pub mod reader {
         {
             debug!("read_map()");
             self.push_doc(EsMap, move |d| {
-                let len = try!(d._next_sub());
+                let len = d._next_sub()?;
                 debug!("  len={}", len);
                 f(d, len)
             })
@@ -1020,10 +1020,10 @@ pub mod writer {
             assert!(tag_id >= NUM_IMPLICIT_TAGS);
 
             // Write the enum ID:
-            try!(write_tag(self.writer, tag_id));
+            write_tag(self.writer, tag_id)?;
 
             // Write a placeholder four-byte size.
-            let cur_pos = try!(self.writer.seek(SeekFrom::Current(0)));
+            let cur_pos = self.writer.seek(SeekFrom::Current(0))?;
             self.size_positions.push(cur_pos);
             let zeroes: &[u8] = &[0, 0, 0, 0];
             self.writer.write_all(zeroes)
@@ -1031,8 +1031,8 @@ pub mod writer {
 
         pub fn end_tag(&mut self) -> EncodeResult {
             let last_size_pos = self.size_positions.pop().unwrap();
-            let cur_pos = try!(self.writer.seek(SeekFrom::Current(0)));
-            try!(self.writer.seek(SeekFrom::Start(last_size_pos)));
+            let cur_pos = self.writer.seek(SeekFrom::Current(0))?;
+            self.writer.seek(SeekFrom::Start(last_size_pos))?;
             let size = (cur_pos - last_size_pos - 4) as usize;
 
             // relax the size encoding for small tags (bigger tags are costly to move).
@@ -1048,12 +1048,12 @@ pub mod writer {
                 }
 
                 // overwrite the size and data and continue
-                try!(write_vuint(self.writer, size));
-                try!(self.writer.write_all(&buf[..size]));
+                write_vuint(self.writer, size)?;
+                self.writer.write_all(&buf[..size])?;
             } else {
                 // overwrite the size with an overlong encoding and skip past the data
-                try!(write_sized_vuint(self.writer, size, 4));
-                try!(self.writer.seek(SeekFrom::Start(cur_pos)));
+                write_sized_vuint(self.writer, size, 4)?;
+                self.writer.seek(SeekFrom::Start(cur_pos))?;
             }
 
             debug!("End tag (size = {:?})", size);
@@ -1063,15 +1063,15 @@ pub mod writer {
         pub fn wr_tag<F>(&mut self, tag_id: usize, blk: F) -> EncodeResult
             where F: FnOnce() -> EncodeResult
         {
-            try!(self.start_tag(tag_id));
-            try!(blk());
+            self.start_tag(tag_id)?;
+            blk()?;
             self.end_tag()
         }
 
         pub fn wr_tagged_bytes(&mut self, tag_id: usize, b: &[u8]) -> EncodeResult {
             assert!(tag_id >= NUM_IMPLICIT_TAGS);
-            try!(write_tag(self.writer, tag_id));
-            try!(write_vuint(self.writer, b.len()));
+            write_tag(self.writer, tag_id)?;
+            write_vuint(self.writer, b.len())?;
             self.writer.write_all(b)
         }
 
@@ -1124,7 +1124,7 @@ pub mod writer {
 
         // for auto-serialization
         fn wr_tagged_raw_bytes(&mut self, tag_id: usize, b: &[u8]) -> EncodeResult {
-            try!(write_tag(self.writer, tag_id));
+            write_tag(self.writer, tag_id)?;
             self.writer.write_all(b)
         }
 
@@ -1200,11 +1200,11 @@ pub mod writer {
         pub fn emit_opaque<F>(&mut self, f: F) -> EncodeResult
             where F: FnOnce(&mut opaque::Encoder) -> EncodeResult
         {
-            try!(self.start_tag(EsOpaque as usize));
+            self.start_tag(EsOpaque as usize)?;
 
             {
                 let mut opaque_encoder = opaque::Encoder::new(self.writer);
-                try!(f(&mut opaque_encoder));
+                f(&mut opaque_encoder)?;
             }
 
             self.mark_stable_position();
@@ -1298,15 +1298,15 @@ pub mod writer {
         fn emit_enum<F>(&mut self, _name: &str, f: F) -> EncodeResult
             where F: FnOnce(&mut Encoder<'a>) -> EncodeResult
         {
-            try!(self.start_tag(EsEnum as usize));
-            try!(f(self));
+            self.start_tag(EsEnum as usize)?;
+            f(self)?;
             self.end_tag()
         }
 
         fn emit_enum_variant<F>(&mut self, _: &str, v_id: usize, _: usize, f: F) -> EncodeResult
             where F: FnOnce(&mut Encoder<'a>) -> EncodeResult
         {
-            try!(self._emit_tagged_sub(v_id));
+            self._emit_tagged_sub(v_id)?;
             f(self)
         }
 
@@ -1390,9 +1390,9 @@ pub mod writer {
                 return self.wr_tagged_bytes(EsVec as usize, &[]);
             }
 
-            try!(self.start_tag(EsVec as usize));
-            try!(self._emit_tagged_sub(len));
-            try!(f(self));
+            self.start_tag(EsVec as usize)?;
+            self._emit_tagged_sub(len)?;
+            f(self)?;
             self.end_tag()
         }
 
@@ -1400,8 +1400,8 @@ pub mod writer {
             where F: FnOnce(&mut Encoder<'a>) -> EncodeResult
         {
 
-            try!(self.start_tag(EsVecElt as usize));
-            try!(f(self));
+            self.start_tag(EsVecElt as usize)?;
+            f(self)?;
             self.end_tag()
         }
 
@@ -1413,9 +1413,9 @@ pub mod writer {
                 return self.wr_tagged_bytes(EsMap as usize, &[]);
             }
 
-            try!(self.start_tag(EsMap as usize));
-            try!(self._emit_tagged_sub(len));
-            try!(f(self));
+            self.start_tag(EsMap as usize)?;
+            self._emit_tagged_sub(len)?;
+            f(self)?;
             self.end_tag()
         }
 
@@ -1423,16 +1423,16 @@ pub mod writer {
             where F: FnOnce(&mut Encoder<'a>) -> EncodeResult
         {
 
-            try!(self.start_tag(EsMapKey as usize));
-            try!(f(self));
+            self.start_tag(EsMapKey as usize)?;
+            f(self)?;
             self.end_tag()
         }
 
         fn emit_map_elt_val<F>(&mut self, _idx: usize, f: F) -> EncodeResult
             where F: FnOnce(&mut Encoder<'a>) -> EncodeResult
         {
-            try!(self.start_tag(EsMapVal as usize));
-            try!(f(self));
+            self.start_tag(EsMapVal as usize)?;
+            f(self)?;
             self.end_tag()
         }
     }
index 531a25dc1b766d8047e0e4ee47bd8a044e8825b1..10f419d169181da14639fa1eee06bf9d1d6cb1ee 100644 (file)
@@ -120,7 +120,7 @@ impl<'a> serialize::Encoder for Encoder<'a> {
     }
 
     fn emit_str(&mut self, v: &str) -> EncodeResult {
-        try!(self.emit_uint(v.len()));
+        self.emit_uint(v.len())?;
         let _ = self.cursor.write_all(v.as_bytes());
         Ok(())
     }
@@ -139,7 +139,7 @@ impl<'a> serialize::Encoder for Encoder<'a> {
                             -> EncodeResult
         where F: FnOnce(&mut Self) -> EncodeResult
     {
-        try!(self.emit_uint(v_id));
+        self.emit_uint(v_id)?;
         f(self)
     }
 
@@ -221,7 +221,7 @@ impl<'a> serialize::Encoder for Encoder<'a> {
     fn emit_seq<F>(&mut self, len: usize, f: F) -> EncodeResult
         where F: FnOnce(&mut Encoder<'a>) -> EncodeResult
     {
-        try!(self.emit_uint(len));
+        self.emit_uint(len)?;
         f(self)
     }
 
@@ -234,7 +234,7 @@ impl<'a> serialize::Encoder for Encoder<'a> {
     fn emit_map<F>(&mut self, len: usize, f: F) -> EncodeResult
         where F: FnOnce(&mut Encoder<'a>) -> EncodeResult
     {
-        try!(self.emit_uint(len));
+        self.emit_uint(len)?;
         f(self)
     }
 
@@ -356,27 +356,27 @@ impl<'a> serialize::Decoder for Decoder<'a> {
     }
 
     fn read_bool(&mut self) -> Result<bool, Self::Error> {
-        let value = try!(self.read_u8());
+        let value = self.read_u8()?;
         Ok(value != 0)
     }
 
     fn read_f64(&mut self) -> Result<f64, Self::Error> {
-        let bits = try!(self.read_u64());
+        let bits = self.read_u64()?;
         Ok(unsafe { ::std::mem::transmute(bits) })
     }
 
     fn read_f32(&mut self) -> Result<f32, Self::Error> {
-        let bits = try!(self.read_u32());
+        let bits = self.read_u32()?;
         Ok(unsafe { ::std::mem::transmute(bits) })
     }
 
     fn read_char(&mut self) -> Result<char, Self::Error> {
-        let bits = try!(self.read_u32());
+        let bits = self.read_u32()?;
         Ok(::std::char::from_u32(bits).unwrap())
     }
 
     fn read_str(&mut self) -> Result<String, Self::Error> {
-        let len = try!(self.read_uint());
+        let len = self.read_uint()?;
         let s = ::std::str::from_utf8(&self.data[self.position..self.position + len]).unwrap();
         self.position += len;
         Ok(s.to_string())
@@ -391,7 +391,7 @@ impl<'a> serialize::Decoder for Decoder<'a> {
     fn read_enum_variant<T, F>(&mut self, _: &[&str], mut f: F) -> Result<T, Self::Error>
         where F: FnMut(&mut Decoder<'a>, usize) -> Result<T, Self::Error>
     {
-        let disr = try!(self.read_uint());
+        let disr = self.read_uint()?;
         f(self, disr)
     }
 
@@ -404,7 +404,7 @@ impl<'a> serialize::Decoder for Decoder<'a> {
     fn read_enum_struct_variant<T, F>(&mut self, _: &[&str], mut f: F) -> Result<T, Self::Error>
         where F: FnMut(&mut Decoder<'a>, usize) -> Result<T, Self::Error>
     {
-        let disr = try!(self.read_uint());
+        let disr = self.read_uint()?;
         f(self, disr)
     }
 
@@ -483,7 +483,7 @@ impl<'a> serialize::Decoder for Decoder<'a> {
     fn read_seq<T, F>(&mut self, f: F) -> Result<T, Self::Error>
         where F: FnOnce(&mut Decoder<'a>, usize) -> Result<T, Self::Error>
     {
-        let len = try!(self.read_uint());
+        let len = self.read_uint()?;
         f(self, len)
     }
 
@@ -496,7 +496,7 @@ impl<'a> serialize::Decoder for Decoder<'a> {
     fn read_map<T, F>(&mut self, f: F) -> Result<T, Self::Error>
         where F: FnOnce(&mut Decoder<'a>, usize) -> Result<T, Self::Error>
     {
-        let len = try!(self.read_uint());
+        let len = self.read_uint()?;
         f(self, len)
     }
 
index e65b4355e9d371c934edcaf2c1a39a9c95d8cd5e..d77268219fcecb76e1fcf7730527da81085da824 100644 (file)
@@ -12,14 +12,12 @@ crate-type = ["dylib"]
 arena = { path = "../libarena" }
 flate = { path = "../libflate" }
 fmt_macros = { path = "../libfmt_macros" }
-getopts = { path = "../libgetopts" }
 graphviz = { path = "../libgraphviz" }
 log = { path = "../liblog" }
 rbml = { path = "../librbml" }
 rustc_back = { path = "../librustc_back" }
 rustc_bitflags = { path = "../librustc_bitflags" }
+rustc_const_math = { path = "../librustc_const_math" }
 rustc_data_structures = { path = "../librustc_data_structures" }
-rustc_front = { path = "../librustc_front" }
-rustc_llvm = { path = "../librustc_llvm" }
 serialize = { path = "../libserialize" }
 syntax = { path = "../libsyntax" }
diff --git a/src/librustc/cfg/construct.rs b/src/librustc/cfg/construct.rs
new file mode 100644 (file)
index 0000000..7049749
--- /dev/null
@@ -0,0 +1,594 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc_data_structures::graph;
+use cfg::*;
+use hir::def::Def;
+use hir::pat_util;
+use ty::{self, TyCtxt};
+use syntax::ast;
+use syntax::ptr::P;
+
+use hir::{self, PatKind};
+
+struct CFGBuilder<'a, 'tcx: 'a> {
+    tcx: &'a TyCtxt<'tcx>,
+    graph: CFGGraph,
+    fn_exit: CFGIndex,
+    loop_scopes: Vec<LoopScope>,
+}
+
+#[derive(Copy, Clone)]
+struct LoopScope {
+    loop_id: ast::NodeId,     // id of loop/while node
+    continue_index: CFGIndex, // where to go on a `loop`
+    break_index: CFGIndex,    // where to go on a `break
+}
+
+pub fn construct(tcx: &TyCtxt,
+                 blk: &hir::Block) -> CFG {
+    let mut graph = graph::Graph::new();
+    let entry = graph.add_node(CFGNodeData::Entry);
+
+    // `fn_exit` is target of return exprs, which lies somewhere
+    // outside input `blk`. (Distinguishing `fn_exit` and `block_exit`
+    // also resolves chicken-and-egg problem that arises if you try to
+    // have return exprs jump to `block_exit` during construction.)
+    let fn_exit = graph.add_node(CFGNodeData::Exit);
+    let block_exit;
+
+    let mut cfg_builder = CFGBuilder {
+        graph: graph,
+        fn_exit: fn_exit,
+        tcx: tcx,
+        loop_scopes: Vec::new()
+    };
+    block_exit = cfg_builder.block(blk, entry);
+    cfg_builder.add_contained_edge(block_exit, fn_exit);
+    let CFGBuilder {graph, ..} = cfg_builder;
+    CFG {graph: graph,
+         entry: entry,
+         exit: fn_exit}
+}
+
+impl<'a, 'tcx> CFGBuilder<'a, 'tcx> {
+    fn block(&mut self, blk: &hir::Block, pred: CFGIndex) -> CFGIndex {
+        let mut stmts_exit = pred;
+        for stmt in &blk.stmts {
+            stmts_exit = self.stmt(stmt, stmts_exit);
+        }
+
+        let expr_exit = self.opt_expr(&blk.expr, stmts_exit);
+
+        self.add_ast_node(blk.id, &[expr_exit])
+    }
+
+    fn stmt(&mut self, stmt: &hir::Stmt, pred: CFGIndex) -> CFGIndex {
+        match stmt.node {
+            hir::StmtDecl(ref decl, id) => {
+                let exit = self.decl(&decl, pred);
+                self.add_ast_node(id, &[exit])
+            }
+
+            hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) => {
+                let exit = self.expr(&expr, pred);
+                self.add_ast_node(id, &[exit])
+            }
+        }
+    }
+
+    fn decl(&mut self, decl: &hir::Decl, pred: CFGIndex) -> CFGIndex {
+        match decl.node {
+            hir::DeclLocal(ref local) => {
+                let init_exit = self.opt_expr(&local.init, pred);
+                self.pat(&local.pat, init_exit)
+            }
+
+            hir::DeclItem(_) => {
+                pred
+            }
+        }
+    }
+
+    fn pat(&mut self, pat: &hir::Pat, pred: CFGIndex) -> CFGIndex {
+        match pat.node {
+            PatKind::Ident(_, _, None) |
+            PatKind::TupleStruct(_, None) |
+            PatKind::Path(..) |
+            PatKind::QPath(..) |
+            PatKind::Lit(..) |
+            PatKind::Range(..) |
+            PatKind::Wild => {
+                self.add_ast_node(pat.id, &[pred])
+            }
+
+            PatKind::Box(ref subpat) |
+            PatKind::Ref(ref subpat, _) |
+            PatKind::Ident(_, _, Some(ref subpat)) => {
+                let subpat_exit = self.pat(&subpat, pred);
+                self.add_ast_node(pat.id, &[subpat_exit])
+            }
+
+            PatKind::TupleStruct(_, Some(ref subpats)) |
+            PatKind::Tup(ref subpats) => {
+                let pats_exit = self.pats_all(subpats.iter(), pred);
+                self.add_ast_node(pat.id, &[pats_exit])
+            }
+
+            PatKind::Struct(_, ref subpats, _) => {
+                let pats_exit =
+                    self.pats_all(subpats.iter().map(|f| &f.node.pat), pred);
+                self.add_ast_node(pat.id, &[pats_exit])
+            }
+
+            PatKind::Vec(ref pre, ref vec, ref post) => {
+                let pre_exit = self.pats_all(pre.iter(), pred);
+                let vec_exit = self.pats_all(vec.iter(), pre_exit);
+                let post_exit = self.pats_all(post.iter(), vec_exit);
+                self.add_ast_node(pat.id, &[post_exit])
+            }
+        }
+    }
+
+    fn pats_all<'b, I: Iterator<Item=&'b P<hir::Pat>>>(&mut self,
+                                          pats: I,
+                                          pred: CFGIndex) -> CFGIndex {
+        //! Handles case where all of the patterns must match.
+        pats.fold(pred, |pred, pat| self.pat(&pat, pred))
+    }
+
+    fn expr(&mut self, expr: &hir::Expr, pred: CFGIndex) -> CFGIndex {
+        match expr.node {
+            hir::ExprBlock(ref blk) => {
+                let blk_exit = self.block(&blk, pred);
+                self.add_ast_node(expr.id, &[blk_exit])
+            }
+
+            hir::ExprIf(ref cond, ref then, None) => {
+                //
+                //     [pred]
+                //       |
+                //       v 1
+                //     [cond]
+                //       |
+                //      / \
+                //     /   \
+                //    v 2   *
+                //  [then]  |
+                //    |     |
+                //    v 3   v 4
+                //   [..expr..]
+                //
+                let cond_exit = self.expr(&cond, pred);                // 1
+                let then_exit = self.block(&then, cond_exit);          // 2
+                self.add_ast_node(expr.id, &[cond_exit, then_exit])      // 3,4
+            }
+
+            hir::ExprIf(ref cond, ref then, Some(ref otherwise)) => {
+                //
+                //     [pred]
+                //       |
+                //       v 1
+                //     [cond]
+                //       |
+                //      / \
+                //     /   \
+                //    v 2   v 3
+                //  [then][otherwise]
+                //    |     |
+                //    v 4   v 5
+                //   [..expr..]
+                //
+                let cond_exit = self.expr(&cond, pred);                // 1
+                let then_exit = self.block(&then, cond_exit);          // 2
+                let else_exit = self.expr(&otherwise, cond_exit);      // 3
+                self.add_ast_node(expr.id, &[then_exit, else_exit])      // 4, 5
+            }
+
+            hir::ExprWhile(ref cond, ref body, _) => {
+                //
+                //         [pred]
+                //           |
+                //           v 1
+                //       [loopback] <--+ 5
+                //           |         |
+                //           v 2       |
+                //   +-----[cond]      |
+                //   |       |         |
+                //   |       v 4       |
+                //   |     [body] -----+
+                //   v 3
+                // [expr]
+                //
+                // Note that `break` and `continue` statements
+                // may cause additional edges.
+
+                // Is the condition considered part of the loop?
+                let loopback = self.add_dummy_node(&[pred]);              // 1
+                let cond_exit = self.expr(&cond, loopback);             // 2
+                let expr_exit = self.add_ast_node(expr.id, &[cond_exit]); // 3
+                self.loop_scopes.push(LoopScope {
+                    loop_id: expr.id,
+                    continue_index: loopback,
+                    break_index: expr_exit
+                });
+                let body_exit = self.block(&body, cond_exit);          // 4
+                self.add_contained_edge(body_exit, loopback);            // 5
+                self.loop_scopes.pop();
+                expr_exit
+            }
+
+            hir::ExprLoop(ref body, _) => {
+                //
+                //     [pred]
+                //       |
+                //       v 1
+                //   [loopback] <---+
+                //       |      4   |
+                //       v 3        |
+                //     [body] ------+
+                //
+                //     [expr] 2
+                //
+                // Note that `break` and `loop` statements
+                // may cause additional edges.
+
+                let loopback = self.add_dummy_node(&[pred]);              // 1
+                let expr_exit = self.add_ast_node(expr.id, &[]);          // 2
+                self.loop_scopes.push(LoopScope {
+                    loop_id: expr.id,
+                    continue_index: loopback,
+                    break_index: expr_exit,
+                });
+                let body_exit = self.block(&body, loopback);           // 3
+                self.add_contained_edge(body_exit, loopback);            // 4
+                self.loop_scopes.pop();
+                expr_exit
+            }
+
+            hir::ExprMatch(ref discr, ref arms, _) => {
+                self.match_(expr.id, &discr, &arms, pred)
+            }
+
+            hir::ExprBinary(op, ref l, ref r) if op.node.is_lazy() => {
+                //
+                //     [pred]
+                //       |
+                //       v 1
+                //      [l]
+                //       |
+                //      / \
+                //     /   \
+                //    v 2  *
+                //   [r]   |
+                //    |    |
+                //    v 3  v 4
+                //   [..exit..]
+                //
+                let l_exit = self.expr(&l, pred);                      // 1
+                let r_exit = self.expr(&r, l_exit);                    // 2
+                self.add_ast_node(expr.id, &[l_exit, r_exit])            // 3,4
+            }
+
+            hir::ExprRet(ref v) => {
+                let v_exit = self.opt_expr(v, pred);
+                let b = self.add_ast_node(expr.id, &[v_exit]);
+                self.add_returning_edge(expr, b);
+                self.add_unreachable_node()
+            }
+
+            hir::ExprBreak(label) => {
+                let loop_scope = self.find_scope(expr, label.map(|l| l.node.name));
+                let b = self.add_ast_node(expr.id, &[pred]);
+                self.add_exiting_edge(expr, b,
+                                      loop_scope, loop_scope.break_index);
+                self.add_unreachable_node()
+            }
+
+            hir::ExprAgain(label) => {
+                let loop_scope = self.find_scope(expr, label.map(|l| l.node.name));
+                let a = self.add_ast_node(expr.id, &[pred]);
+                self.add_exiting_edge(expr, a,
+                                      loop_scope, loop_scope.continue_index);
+                self.add_unreachable_node()
+            }
+
+            hir::ExprVec(ref elems) => {
+                self.straightline(expr, pred, elems.iter().map(|e| &**e))
+            }
+
+            hir::ExprCall(ref func, ref args) => {
+                self.call(expr, pred, &func, args.iter().map(|e| &**e))
+            }
+
+            hir::ExprMethodCall(_, _, ref args) => {
+                self.call(expr, pred, &args[0], args[1..].iter().map(|e| &**e))
+            }
+
+            hir::ExprIndex(ref l, ref r) |
+            hir::ExprBinary(_, ref l, ref r) if self.tcx.is_method_call(expr.id) => {
+                self.call(expr, pred, &l, Some(&**r).into_iter())
+            }
+
+            hir::ExprUnary(_, ref e) if self.tcx.is_method_call(expr.id) => {
+                self.call(expr, pred, &e, None::<hir::Expr>.iter())
+            }
+
+            hir::ExprTup(ref exprs) => {
+                self.straightline(expr, pred, exprs.iter().map(|e| &**e))
+            }
+
+            hir::ExprStruct(_, ref fields, ref base) => {
+                let field_cfg = self.straightline(expr, pred, fields.iter().map(|f| &*f.expr));
+                self.opt_expr(base, field_cfg)
+            }
+
+            hir::ExprRepeat(ref elem, ref count) => {
+                self.straightline(expr, pred, [elem, count].iter().map(|&e| &**e))
+            }
+
+            hir::ExprAssign(ref l, ref r) |
+            hir::ExprAssignOp(_, ref l, ref r) => {
+                self.straightline(expr, pred, [r, l].iter().map(|&e| &**e))
+            }
+
+            hir::ExprIndex(ref l, ref r) |
+            hir::ExprBinary(_, ref l, ref r) => { // NB: && and || handled earlier
+                self.straightline(expr, pred, [l, r].iter().map(|&e| &**e))
+            }
+
+            hir::ExprBox(ref e) |
+            hir::ExprAddrOf(_, ref e) |
+            hir::ExprCast(ref e, _) |
+            hir::ExprType(ref e, _) |
+            hir::ExprUnary(_, ref e) |
+            hir::ExprField(ref e, _) |
+            hir::ExprTupField(ref e, _) => {
+                self.straightline(expr, pred, Some(&**e).into_iter())
+            }
+
+            hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
+                let post_outputs = self.exprs(outputs.iter().map(|e| &**e), pred);
+                let post_inputs = self.exprs(inputs.iter().map(|e| &**e), post_outputs);
+                self.add_ast_node(expr.id, &[post_inputs])
+            }
+
+            hir::ExprClosure(..) |
+            hir::ExprLit(..) |
+            hir::ExprPath(..) => {
+                self.straightline(expr, pred, None::<hir::Expr>.iter())
+            }
+        }
+    }
+
+    fn call<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self,
+            call_expr: &hir::Expr,
+            pred: CFGIndex,
+            func_or_rcvr: &hir::Expr,
+            args: I) -> CFGIndex {
+        let method_call = ty::MethodCall::expr(call_expr.id);
+        let fn_ty = match self.tcx.tables.borrow().method_map.get(&method_call) {
+            Some(method) => method.ty,
+            None => self.tcx.expr_ty_adjusted(func_or_rcvr)
+        };
+
+        let func_or_rcvr_exit = self.expr(func_or_rcvr, pred);
+        let ret = self.straightline(call_expr, func_or_rcvr_exit, args);
+        if fn_ty.fn_ret().diverges() {
+            self.add_unreachable_node()
+        } else {
+            ret
+        }
+    }
+
+    fn exprs<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self,
+                                             exprs: I,
+                                             pred: CFGIndex) -> CFGIndex {
+        //! Constructs graph for `exprs` evaluated in order
+        exprs.fold(pred, |p, e| self.expr(e, p))
+    }
+
+    fn opt_expr(&mut self,
+                opt_expr: &Option<P<hir::Expr>>,
+                pred: CFGIndex) -> CFGIndex {
+        //! Constructs graph for `opt_expr` evaluated, if Some
+        opt_expr.iter().fold(pred, |p, e| self.expr(&e, p))
+    }
+
+    fn straightline<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self,
+                    expr: &hir::Expr,
+                    pred: CFGIndex,
+                    subexprs: I) -> CFGIndex {
+        //! Handles case of an expression that evaluates `subexprs` in order
+
+        let subexprs_exit = self.exprs(subexprs, pred);
+        self.add_ast_node(expr.id, &[subexprs_exit])
+    }
+
+    fn match_(&mut self, id: ast::NodeId, discr: &hir::Expr,
+              arms: &[hir::Arm], pred: CFGIndex) -> CFGIndex {
+        // The CFG for match expression is quite complex, so no ASCII
+        // art for it (yet).
+        //
+        // The CFG generated below matches roughly what trans puts
+        // out. Each pattern and guard is visited in parallel, with
+        // arms containing multiple patterns generating multiple nodes
+        // for the same guard expression. The guard expressions chain
+        // into each other from top to bottom, with a specific
+        // exception to allow some additional valid programs
+        // (explained below). Trans differs slightly in that the
+        // pattern matching may continue after a guard but the visible
+        // behaviour should be the same.
+        //
+        // What is going on is explained in further comments.
+
+        // Visit the discriminant expression
+        let discr_exit = self.expr(discr, pred);
+
+        // Add a node for the exit of the match expression as a whole.
+        let expr_exit = self.add_ast_node(id, &[]);
+
+        // Keep track of the previous guard expressions
+        let mut prev_guards = Vec::new();
+        // Track if the previous pattern contained bindings or wildcards
+        let mut prev_has_bindings = false;
+
+        for arm in arms {
+            // Add an exit node for when we've visited all the
+            // patterns and the guard (if there is one) in the arm.
+            let arm_exit = self.add_dummy_node(&[]);
+
+            for pat in &arm.pats {
+                // Visit the pattern, coming from the discriminant exit
+                let mut pat_exit = self.pat(&pat, discr_exit);
+
+                // If there is a guard expression, handle it here
+                if let Some(ref guard) = arm.guard {
+                    // Add a dummy node for the previous guard
+                    // expression to target
+                    let guard_start = self.add_dummy_node(&[pat_exit]);
+                    // Visit the guard expression
+                    let guard_exit = self.expr(&guard, guard_start);
+
+                    let this_has_bindings = pat_util::pat_contains_bindings_or_wild(
+                        &self.tcx.def_map.borrow(), &pat);
+
+                    // If both this pattern and the previous pattern
+                    // were free of bindings, they must consist only
+                    // of "constant" patterns. Note we cannot match an
+                    // all-constant pattern, fail the guard, and then
+                    // match *another* all-constant pattern. This is
+                    // because if the previous pattern matches, then
+                    // we *cannot* match this one, unless all the
+                    // constants are the same (which is rejected by
+                    // `check_match`).
+                    //
+                    // We can use this to be smarter about the flow
+                    // along guards. If the previous pattern matched,
+                    // then we know we will not visit the guard in
+                    // this one (whether or not the guard succeeded),
+                    // if the previous pattern failed, then we know
+                    // the guard for that pattern will not have been
+                    // visited. Thus, it is not possible to visit both
+                    // the previous guard and the current one when
+                    // both patterns consist only of constant
+                    // sub-patterns.
+                    //
+                    // However, if the above does not hold, then all
+                    // previous guards need to be wired to visit the
+                    // current guard pattern.
+                    if prev_has_bindings || this_has_bindings {
+                        while let Some(prev) = prev_guards.pop() {
+                            self.add_contained_edge(prev, guard_start);
+                        }
+                    }
+
+                    prev_has_bindings = this_has_bindings;
+
+                    // Push the guard onto the list of previous guards
+                    prev_guards.push(guard_exit);
+
+                    // Update the exit node for the pattern
+                    pat_exit = guard_exit;
+                }
+
+                // Add an edge from the exit of this pattern to the
+                // exit of the arm
+                self.add_contained_edge(pat_exit, arm_exit);
+            }
+
+            // Visit the body of this arm
+            let body_exit = self.expr(&arm.body, arm_exit);
+
+            // Link the body to the exit of the expression
+            self.add_contained_edge(body_exit, expr_exit);
+        }
+
+        expr_exit
+    }
+
+    fn add_dummy_node(&mut self, preds: &[CFGIndex]) -> CFGIndex {
+        self.add_node(CFGNodeData::Dummy, preds)
+    }
+
+    fn add_ast_node(&mut self, id: ast::NodeId, preds: &[CFGIndex]) -> CFGIndex {
+        assert!(id != ast::DUMMY_NODE_ID);
+        self.add_node(CFGNodeData::AST(id), preds)
+    }
+
+    fn add_unreachable_node(&mut self) -> CFGIndex {
+        self.add_node(CFGNodeData::Unreachable, &[])
+    }
+
+    fn add_node(&mut self, data: CFGNodeData, preds: &[CFGIndex]) -> CFGIndex {
+        let node = self.graph.add_node(data);
+        for &pred in preds {
+            self.add_contained_edge(pred, node);
+        }
+        node
+    }
+
+    fn add_contained_edge(&mut self,
+                          source: CFGIndex,
+                          target: CFGIndex) {
+        let data = CFGEdgeData {exiting_scopes: vec!() };
+        self.graph.add_edge(source, target, data);
+    }
+
+    fn add_exiting_edge(&mut self,
+                        from_expr: &hir::Expr,
+                        from_index: CFGIndex,
+                        to_loop: LoopScope,
+                        to_index: CFGIndex) {
+        let mut data = CFGEdgeData {exiting_scopes: vec!() };
+        let mut scope = self.tcx.region_maps.node_extent(from_expr.id);
+        let target_scope = self.tcx.region_maps.node_extent(to_loop.loop_id);
+        while scope != target_scope {
+            data.exiting_scopes.push(scope.node_id(&self.tcx.region_maps));
+            scope = self.tcx.region_maps.encl_scope(scope);
+        }
+        self.graph.add_edge(from_index, to_index, data);
+    }
+
+    fn add_returning_edge(&mut self,
+                          _from_expr: &hir::Expr,
+                          from_index: CFGIndex) {
+        let mut data = CFGEdgeData {
+            exiting_scopes: vec!(),
+        };
+        for &LoopScope { loop_id: id, .. } in self.loop_scopes.iter().rev() {
+            data.exiting_scopes.push(id);
+        }
+        self.graph.add_edge(from_index, self.fn_exit, data);
+    }
+
+    fn find_scope(&self,
+                  expr: &hir::Expr,
+                  label: Option<ast::Name>) -> LoopScope {
+        if label.is_none() {
+            return *self.loop_scopes.last().unwrap();
+        }
+
+        match self.tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def()) {
+            Some(Def::Label(loop_id)) => {
+                for l in &self.loop_scopes {
+                    if l.loop_id == loop_id {
+                        return *l;
+                    }
+                }
+                span_bug!(expr.span, "no loop scope for id {}", loop_id);
+            }
+
+            r => {
+                span_bug!(expr.span, "bad entry `{:?}` in def_map for label", r);
+            }
+        }
+    }
+}
diff --git a/src/librustc/cfg/graphviz.rs b/src/librustc/cfg/graphviz.rs
new file mode 100644 (file)
index 0000000..c651baa
--- /dev/null
@@ -0,0 +1,131 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// This module provides linkage between rustc::middle::graph and
+/// libgraphviz traits.
+
+// For clarity, rename the graphviz crate locally to dot.
+use graphviz as dot;
+use graphviz::IntoCow;
+
+use syntax::ast;
+
+use hir::map as ast_map;
+use cfg;
+
+pub type Node<'a> = (cfg::CFGIndex, &'a cfg::CFGNode);
+pub type Edge<'a> = &'a cfg::CFGEdge;
+
+pub struct LabelledCFG<'a, 'ast: 'a> {
+    pub ast_map: &'a ast_map::Map<'ast>,
+    pub cfg: &'a cfg::CFG,
+    pub name: String,
+    /// `labelled_edges` controls whether we emit labels on the edges
+    pub labelled_edges: bool,
+}
+
+fn replace_newline_with_backslash_l(s: String) -> String {
+    // Replacing newlines with \\l causes each line to be left-aligned,
+    // improving presentation of (long) pretty-printed expressions.
+    if s.contains("\n") {
+        let mut s = s.replace("\n", "\\l");
+        // Apparently left-alignment applies to the line that precedes
+        // \l, not the line that follows; so, add \l at end of string
+        // if not already present, ensuring last line gets left-aligned
+        // as well.
+        let mut last_two: Vec<_> =
+            s.chars().rev().take(2).collect();
+        last_two.reverse();
+        if last_two != ['\\', 'l'] {
+            s.push_str("\\l");
+        }
+        s
+    } else {
+        s
+    }
+}
+
+impl<'a, 'ast> dot::Labeller<'a> for LabelledCFG<'a, 'ast> {
+    type Node = Node<'a>;
+    type Edge = Edge<'a>;
+    fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new(&self.name[..]).unwrap() }
+
+    fn node_id(&'a self, &(i,_): &Node<'a>) -> dot::Id<'a> {
+        dot::Id::new(format!("N{}", i.node_id())).unwrap()
+    }
+
+    fn node_label(&'a self, &(i, n): &Node<'a>) -> dot::LabelText<'a> {
+        if i == self.cfg.entry {
+            dot::LabelText::LabelStr("entry".into_cow())
+        } else if i == self.cfg.exit {
+            dot::LabelText::LabelStr("exit".into_cow())
+        } else if n.data.id() == ast::DUMMY_NODE_ID {
+            dot::LabelText::LabelStr("(dummy_node)".into_cow())
+        } else {
+            let s = self.ast_map.node_to_string(n.data.id());
+            // left-aligns the lines
+            let s = replace_newline_with_backslash_l(s);
+            dot::LabelText::EscStr(s.into_cow())
+        }
+    }
+
+    fn edge_label(&self, e: &Edge<'a>) -> dot::LabelText<'a> {
+        let mut label = String::new();
+        if !self.labelled_edges {
+            return dot::LabelText::EscStr(label.into_cow());
+        }
+        let mut put_one = false;
+        for (i, &node_id) in e.data.exiting_scopes.iter().enumerate() {
+            if put_one {
+                label.push_str(",\\l");
+            } else {
+                put_one = true;
+            }
+            let s = self.ast_map.node_to_string(node_id);
+            // left-aligns the lines
+            let s = replace_newline_with_backslash_l(s);
+            label.push_str(&format!("exiting scope_{} {}",
+                                   i,
+                                   &s[..]));
+        }
+        dot::LabelText::EscStr(label.into_cow())
+    }
+}
+
+impl<'a> dot::GraphWalk<'a> for &'a cfg::CFG {
+    type Node = Node<'a>;
+    type Edge = Edge<'a>;
+    fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> {
+        let mut v = Vec::new();
+        self.graph.each_node(|i, nd| { v.push((i, nd)); true });
+        v.into_cow()
+    }
+    fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> {
+        self.graph.all_edges().iter().collect()
+    }
+    fn source(&'a self, edge: &Edge<'a>) -> Node<'a> {
+        let i = edge.source();
+        (i, self.graph.node(i))
+    }
+    fn target(&'a self, edge: &Edge<'a>) -> Node<'a> {
+        let i = edge.target();
+        (i, self.graph.node(i))
+    }
+}
+
+impl<'a, 'ast> dot::GraphWalk<'a> for LabelledCFG<'a, 'ast>
+{
+    type Node = Node<'a>;
+    type Edge = Edge<'a>;
+    fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> { self.cfg.nodes() }
+    fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> { self.cfg.edges() }
+    fn source(&'a self, edge: &Edge<'a>) -> Node<'a> { self.cfg.source(edge) }
+    fn target(&'a self, edge: &Edge<'a>) -> Node<'a> { self.cfg.target(edge) }
+}
diff --git a/src/librustc/cfg/mod.rs b/src/librustc/cfg/mod.rs
new file mode 100644 (file)
index 0000000..e86bf6e
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Module that constructs a control-flow graph representing an item.
+//! Uses `Graph` as the underlying representation.
+
+use rustc_data_structures::graph;
+use ty::TyCtxt;
+use syntax::ast;
+use hir;
+
+mod construct;
+pub mod graphviz;
+
+pub struct CFG {
+    pub graph: CFGGraph,
+    pub entry: CFGIndex,
+    pub exit: CFGIndex,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum CFGNodeData {
+    AST(ast::NodeId),
+    Entry,
+    Exit,
+    Dummy,
+    Unreachable,
+}
+
+impl CFGNodeData {
+    pub fn id(&self) -> ast::NodeId {
+        if let CFGNodeData::AST(id) = *self {
+            id
+        } else {
+            ast::DUMMY_NODE_ID
+        }
+    }
+}
+
+#[derive(Debug)]
+pub struct CFGEdgeData {
+    pub exiting_scopes: Vec<ast::NodeId>
+}
+
+pub type CFGIndex = graph::NodeIndex;
+
+pub type CFGGraph = graph::Graph<CFGNodeData, CFGEdgeData>;
+
+pub type CFGNode = graph::Node<CFGNodeData>;
+
+pub type CFGEdge = graph::Edge<CFGEdgeData>;
+
+impl CFG {
+    pub fn new(tcx: &TyCtxt,
+               blk: &hir::Block) -> CFG {
+        construct::construct(tcx, blk)
+    }
+
+    pub fn node_is_reachable(&self, id: ast::NodeId) -> bool {
+        self.graph.depth_traverse(self.entry)
+                  .any(|idx| self.graph.node_data(idx).id() == id)
+    }
+}
index 21742d9935dc2d8b3ff2f937db958b46fd247218..ece5819829baa67b05adbca4f30a3375dfeda7a7 100644 (file)
@@ -51,7 +51,7 @@ could invalidate work done for other items. So, for example:
    not shared state, because if it changes it does not itself
    invalidate other functions (though it may be that it causes new
    monomorphizations to occur, but that's handled independently).
-   
+
 Put another way: if the HIR for an item changes, we are going to
 recompile that item for sure. But we need the dep tracking map to tell
 us what *else* we have to recompile. Shared state is anything that is
@@ -177,7 +177,7 @@ reads from `item`, there would be missing edges in the graph:
       |                                 ^
       |                                 |
       +---------------------------------+ // added by `visit_all_items_in_krate`
-    
+
 In particular, the edge from `Hir(X)` to `ItemSignature(X)` is only
 present because we called `read` ourselves when entering the `ItemSignature(X)`
 task.
@@ -273,8 +273,8 @@ should not exist.  In contrast, using the memoized helper, you get:
     ... -> MapVariant(key) -> A
                  |
                  +----------> B
-                 
-which is much cleaner.                 
+
+which is much cleaner.
 
 **Be aware though that the closure is executed with `MapVariant(key)`
 pushed onto the stack as the current task!** That means that you must
@@ -387,4 +387,3 @@ RUST_DEP_GRAPH_FILTER='Hir&foo -> TypeckItemBody & bar'
 This will dump out all the nodes that lead from `Hir(foo)` to
 `TypeckItemBody(bar)`, from which you can (hopefully) see the source
 of the erroneous edge.
-
diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs
new file mode 100644 (file)
index 0000000..536c739
--- /dev/null
@@ -0,0 +1,205 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt::Debug;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub enum DepNode<D: Clone + Debug> {
+    // The `D` type is "how definitions are identified".
+    // During compilation, it is always `DefId`, but when serializing
+    // it is mapped to `DefPath`.
+
+    // Represents the `Krate` as a whole (the `hir::Krate` value) (as
+    // distinct from the krate module). This is basically a hash of
+    // the entire krate, so if you read from `Krate` (e.g., by calling
+    // `tcx.map.krate()`), we will have to assume that any change
+    // means that you need to be recompiled. This is because the
+    // `Krate` value gives you access to all other items. To avoid
+    // this fate, do not call `tcx.map.krate()`; instead, prefer
+    // wrappers like `tcx.visit_all_items_in_krate()`.  If there is no
+    // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain
+    // access to the krate, but you must remember to add suitable
+    // edges yourself for the individual items that you read.
+    Krate,
+
+    // Represents the HIR node with the given node-id
+    Hir(D),
+
+    // Represents different phases in the compiler.
+    CrateReader,
+    CollectLanguageItems,
+    CheckStaticRecursion,
+    ResolveLifetimes,
+    RegionResolveCrate,
+    CheckLoops,
+    PluginRegistrar,
+    StabilityIndex,
+    CollectItem(D),
+    Coherence,
+    EffectCheck,
+    Liveness,
+    Resolve,
+    EntryPoint,
+    CheckEntryFn,
+    CoherenceCheckImpl(D),
+    CoherenceOverlapCheck(D),
+    CoherenceOverlapCheckSpecial(D),
+    CoherenceOverlapInherentCheck(D),
+    CoherenceOrphanCheck(D),
+    Variance,
+    WfCheck(D),
+    TypeckItemType(D),
+    TypeckItemBody(D),
+    Dropck,
+    DropckImpl(D),
+    CheckConst(D),
+    Privacy,
+    IntrinsicCheck(D),
+    MatchCheck(D),
+    MirMapConstruction(D),
+    MirTypeck(D),
+    BorrowCheck(D),
+    RvalueCheck(D),
+    Reachability,
+    DeadCheck,
+    StabilityCheck,
+    LateLintCheck,
+    TransCrate,
+    TransCrateItem(D),
+    TransInlinedItem(D),
+    TransWriteMetadata,
+
+    // Nodes representing bits of computed IR in the tcx. Each shared
+    // table in the tcx (or elsewhere) maps to one of these
+    // nodes. Often we map multiple tables to the same node if there
+    // is no point in distinguishing them (e.g., both the type and
+    // predicates for an item wind up in `ItemSignature`). Other
+    // times, such as `ImplItems` vs `TraitItemDefIds`, tables which
+    // might be mergable are kept distinct because the sets of def-ids
+    // to which they apply are disjoint, and hence we might as well
+    // have distinct labels for easier debugging.
+    ImplOrTraitItems(D),
+    ItemSignature(D),
+    FieldTy(D),
+    TraitItemDefIds(D),
+    InherentImpls(D),
+    ImplItems(D),
+
+    // The set of impls for a given trait. Ultimately, it would be
+    // nice to get more fine-grained here (e.g., to include a
+    // simplified type), but we can't do that until we restructure the
+    // HIR to distinguish the *header* of an impl from its body.  This
+    // is because changes to the header may change the self-type of
+    // the impl and hence would require us to be more conservative
+    // than changes in the impl body.
+    TraitImpls(D),
+
+    // Nodes representing caches. To properly handle a true cache, we
+    // don't use a DepTrackingMap, but rather we push a task node.
+    // Otherwise the write into the map would be incorrectly
+    // attributed to the first task that happened to fill the cache,
+    // which would yield an overly conservative dep-graph.
+    TraitItems(D),
+    ReprHints(D),
+    TraitSelect(D),
+}
+
+impl<D: Clone + Debug> DepNode<D> {
+    /// Used in testing
+    pub fn from_label_string(label: &str, data: D) -> Result<DepNode<D>, ()> {
+        macro_rules! check {
+            ($($name:ident,)*) => {
+                match label {
+                    $(stringify!($name) => Ok(DepNode::$name(data)),)*
+                    _ => Err(())
+                }
+            }
+        }
+
+        check! {
+            CollectItem,
+            BorrowCheck,
+            TransCrateItem,
+            TypeckItemType,
+            TypeckItemBody,
+            ImplOrTraitItems,
+            ItemSignature,
+            FieldTy,
+            TraitItemDefIds,
+            InherentImpls,
+            ImplItems,
+            TraitImpls,
+            ReprHints,
+        }
+    }
+
+    pub fn map_def<E, OP>(&self, mut op: OP) -> Option<DepNode<E>>
+        where OP: FnMut(&D) -> Option<E>, E: Clone + Debug
+    {
+        use self::DepNode::*;
+
+        match *self {
+            Krate => Some(Krate),
+            CrateReader => Some(CrateReader),
+            CollectLanguageItems => Some(CollectLanguageItems),
+            CheckStaticRecursion => Some(CheckStaticRecursion),
+            ResolveLifetimes => Some(ResolveLifetimes),
+            RegionResolveCrate => Some(RegionResolveCrate),
+            CheckLoops => Some(CheckLoops),
+            PluginRegistrar => Some(PluginRegistrar),
+            StabilityIndex => Some(StabilityIndex),
+            Coherence => Some(Coherence),
+            EffectCheck => Some(EffectCheck),
+            Liveness => Some(Liveness),
+            Resolve => Some(Resolve),
+            EntryPoint => Some(EntryPoint),
+            CheckEntryFn => Some(CheckEntryFn),
+            Variance => Some(Variance),
+            Dropck => Some(Dropck),
+            Privacy => Some(Privacy),
+            Reachability => Some(Reachability),
+            DeadCheck => Some(DeadCheck),
+            StabilityCheck => Some(StabilityCheck),
+            LateLintCheck => Some(LateLintCheck),
+            TransCrate => Some(TransCrate),
+            TransWriteMetadata => Some(TransWriteMetadata),
+            Hir(ref d) => op(d).map(Hir),
+            CollectItem(ref d) => op(d).map(CollectItem),
+            CoherenceCheckImpl(ref d) => op(d).map(CoherenceCheckImpl),
+            CoherenceOverlapCheck(ref d) => op(d).map(CoherenceOverlapCheck),
+            CoherenceOverlapCheckSpecial(ref d) => op(d).map(CoherenceOverlapCheckSpecial),
+            CoherenceOverlapInherentCheck(ref d) => op(d).map(CoherenceOverlapInherentCheck),
+            CoherenceOrphanCheck(ref d) => op(d).map(CoherenceOrphanCheck),
+            WfCheck(ref d) => op(d).map(WfCheck),
+            TypeckItemType(ref d) => op(d).map(TypeckItemType),
+            TypeckItemBody(ref d) => op(d).map(TypeckItemBody),
+            DropckImpl(ref d) => op(d).map(DropckImpl),
+            CheckConst(ref d) => op(d).map(CheckConst),
+            IntrinsicCheck(ref d) => op(d).map(IntrinsicCheck),
+            MatchCheck(ref d) => op(d).map(MatchCheck),
+            MirMapConstruction(ref d) => op(d).map(MirMapConstruction),
+            MirTypeck(ref d) => op(d).map(MirTypeck),
+            BorrowCheck(ref d) => op(d).map(BorrowCheck),
+            RvalueCheck(ref d) => op(d).map(RvalueCheck),
+            TransCrateItem(ref d) => op(d).map(TransCrateItem),
+            TransInlinedItem(ref d) => op(d).map(TransInlinedItem),
+            ImplOrTraitItems(ref d) => op(d).map(ImplOrTraitItems),
+            ItemSignature(ref d) => op(d).map(ItemSignature),
+            FieldTy(ref d) => op(d).map(FieldTy),
+            TraitItemDefIds(ref d) => op(d).map(TraitItemDefIds),
+            InherentImpls(ref d) => op(d).map(InherentImpls),
+            ImplItems(ref d) => op(d).map(ImplItems),
+            TraitImpls(ref d) => op(d).map(TraitImpls),
+            TraitItems(ref d) => op(d).map(TraitItems),
+            ReprHints(ref d) => op(d).map(ReprHints),
+            TraitSelect(ref d) => op(d).map(TraitSelect),
+        }
+    }
+}
index c49e64f0f543b593ec551ae181cecb5dae395653..922d32a3067969f626a299f4a19d01ddbc83f69b 100644 (file)
@@ -8,6 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use hir::def_id::DefId;
 use rustc_data_structures::fnv::FnvHashMap;
 use std::cell::RefCell;
 use std::ops::Index;
@@ -29,7 +30,7 @@ pub struct DepTrackingMap<M: DepTrackingMapConfig> {
 pub trait DepTrackingMapConfig {
     type Key: Eq + Hash + Clone;
     type Value: Clone;
-    fn to_dep_node(key: &Self::Key) -> DepNode;
+    fn to_dep_node(key: &Self::Key) -> DepNode<DefId>;
 }
 
 impl<M: DepTrackingMapConfig> DepTrackingMap<M> {
index 4b25285c476c463e9c9de8e38418570e181e74f5..10f3d21f2af6d37eeb265f293278785543b99dfb 100644 (file)
@@ -9,11 +9,13 @@
 // except according to those terms.
 
 use rustc_data_structures::fnv::{FnvHashMap, FnvHashSet};
+use std::fmt::Debug;
+use std::hash::Hash;
 use super::{DepGraphQuery, DepNode};
 
-pub struct DepGraphEdges {
-    nodes: Vec<DepNode>,
-    indices: FnvHashMap<DepNode, IdIndex>,
+pub struct DepGraphEdges<D: Clone + Debug + Eq + Hash> {
+    nodes: Vec<DepNode<D>>,
+    indices: FnvHashMap<DepNode<D>, IdIndex>,
     edges: FnvHashSet<(IdIndex, IdIndex)>,
     open_nodes: Vec<OpenNode>,
 }
@@ -40,8 +42,8 @@ enum OpenNode {
     Ignore,
 }
 
-impl DepGraphEdges {
-    pub fn new() -> DepGraphEdges {
+impl<D: Clone + Debug + Eq + Hash> DepGraphEdges<D> {
+    pub fn new() -> DepGraphEdges<D> {
         DepGraphEdges {
             nodes: vec![],
             indices: FnvHashMap(),
@@ -50,12 +52,12 @@ impl DepGraphEdges {
         }
     }
 
-    fn id(&self, index: IdIndex) -> DepNode {
-        self.nodes[index.index()]
+    fn id(&self, index: IdIndex) -> DepNode<D> {
+        self.nodes[index.index()].clone()
     }
 
     /// Creates a node for `id` in the graph.
-    fn make_node(&mut self, id: DepNode) -> IdIndex {
+    fn make_node(&mut self, id: DepNode<D>) -> IdIndex {
         if let Some(&i) = self.indices.get(&id) {
             return i;
         }
@@ -80,7 +82,7 @@ impl DepGraphEdges {
         assert_eq!(popped_node, OpenNode::Ignore);
     }
 
-    pub fn push_task(&mut self, key: DepNode) {
+    pub fn push_task(&mut self, key: DepNode<D>) {
         let top_node = self.current_node();
 
         let new_node = self.make_node(key);
@@ -93,7 +95,7 @@ impl DepGraphEdges {
         }
     }
 
-    pub fn pop_task(&mut self, key: DepNode) {
+    pub fn pop_task(&mut self, key: DepNode<D>) {
         let popped_node = self.open_nodes.pop().unwrap();
         assert_eq!(OpenNode::Node(self.indices[&key]), popped_node);
     }
@@ -101,7 +103,7 @@ impl DepGraphEdges {
     /// Indicates that the current task `C` reads `v` by adding an
     /// edge from `v` to `C`. If there is no current task, panics. If
     /// you want to suppress this edge, use `ignore`.
-    pub fn read(&mut self, v: DepNode) {
+    pub fn read(&mut self, v: DepNode<D>) {
         let source = self.make_node(v);
         self.add_edge_from_current_node(|current| (source, current))
     }
@@ -109,7 +111,7 @@ impl DepGraphEdges {
     /// Indicates that the current task `C` writes `v` by adding an
     /// edge from `C` to `v`. If there is no current task, panics. If
     /// you want to suppress this edge, use `ignore`.
-    pub fn write(&mut self, v: DepNode) {
+    pub fn write(&mut self, v: DepNode<D>) {
         let target = self.make_node(v);
         self.add_edge_from_current_node(|current| (current, target))
     }
@@ -122,7 +124,7 @@ impl DepGraphEdges {
     {
         match self.current_node() {
             Some(open_node) => self.add_edge_from_open_node(open_node, op),
-            None => panic!("no current node, cannot add edge into dependency graph")
+            None => bug!("no current node, cannot add edge into dependency graph")
         }
     }
 
@@ -153,7 +155,7 @@ impl DepGraphEdges {
         }
     }
 
-    pub fn query(&self) -> DepGraphQuery {
+    pub fn query(&self) -> DepGraphQuery<D> {
         let edges: Vec<_> = self.edges.iter()
                                       .map(|&(i, j)| (self.id(i), self.id(j)))
                                       .collect();
diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs
new file mode 100644 (file)
index 0000000..741ad65
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir::def_id::DefId;
+use std::rc::Rc;
+
+use super::dep_node::DepNode;
+use super::query::DepGraphQuery;
+use super::raii;
+use super::thread::{DepGraphThreadData, DepMessage};
+
+#[derive(Clone)]
+pub struct DepGraph {
+    data: Rc<DepGraphThreadData>
+}
+
+impl DepGraph {
+    pub fn new(enabled: bool) -> DepGraph {
+        DepGraph {
+            data: Rc::new(DepGraphThreadData::new(enabled))
+        }
+    }
+
+    /// True if we are actually building a dep-graph. If this returns false,
+    /// then the other methods on this `DepGraph` will have no net effect.
+    #[inline]
+    pub fn enabled(&self) -> bool {
+        self.data.enabled()
+    }
+
+    pub fn query(&self) -> DepGraphQuery<DefId> {
+        self.data.query()
+    }
+
+    pub fn in_ignore<'graph>(&'graph self) -> raii::IgnoreTask<'graph> {
+        raii::IgnoreTask::new(&self.data)
+    }
+
+    pub fn in_task<'graph>(&'graph self, key: DepNode<DefId>) -> raii::DepTask<'graph> {
+        raii::DepTask::new(&self.data, key)
+    }
+
+    pub fn with_ignore<OP,R>(&self, op: OP) -> R
+        where OP: FnOnce() -> R
+    {
+        let _task = self.in_ignore();
+        op()
+    }
+
+    pub fn with_task<OP,R>(&self, key: DepNode<DefId>, op: OP) -> R
+        where OP: FnOnce() -> R
+    {
+        let _task = self.in_task(key);
+        op()
+    }
+
+    pub fn read(&self, v: DepNode<DefId>) {
+        self.data.enqueue(DepMessage::Read(v));
+    }
+
+    pub fn write(&self, v: DepNode<DefId>) {
+        self.data.enqueue(DepMessage::Write(v));
+    }
+}
index faf97f5808e36b1b490431dfda73f633976e3e88..49481dcb79672faecec8a671dc2f05c515b8b5c3 100644 (file)
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use self::thread::{DepGraphThreadData, DepMessage};
-use middle::def_id::DefId;
-use middle::ty;
-use rustc_front::hir;
-use rustc_front::intravisit::Visitor;
-use std::rc::Rc;
-
+mod dep_node;
 mod dep_tracking_map;
 mod edges;
+mod graph;
 mod query;
 mod raii;
 mod thread;
-
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
-pub enum DepNode {
-    // Represents the `Krate` as a whole (the `hir::Krate` value) (as
-    // distinct from the krate module). This is basically a hash of
-    // the entire krate, so if you read from `Krate` (e.g., by calling
-    // `tcx.map.krate()`), we will have to assume that any change
-    // means that you need to be recompiled. This is because the
-    // `Krate` value gives you access to all other items. To avoid
-    // this fate, do not call `tcx.map.krate()`; instead, prefer
-    // wrappers like `tcx.visit_all_items_in_krate()`.  If there is no
-    // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain
-    // access to the krate, but you must remember to add suitable
-    // edges yourself for the individual items that you read.
-    Krate,
-
-    // Represents the HIR node with the given node-id
-    Hir(DefId),
-
-    // Represents different phases in the compiler.
-    CrateReader,
-    CollectLanguageItems,
-    CheckStaticRecursion,
-    ResolveLifetimes,
-    RegionResolveCrate,
-    CheckLoops,
-    PluginRegistrar,
-    StabilityIndex,
-    CollectItem(DefId),
-    Coherence,
-    EffectCheck,
-    Liveness,
-    Resolve,
-    EntryPoint,
-    CheckEntryFn,
-    CoherenceCheckImpl(DefId),
-    CoherenceOverlapCheck(DefId),
-    CoherenceOverlapCheckSpecial(DefId),
-    CoherenceOrphanCheck(DefId),
-    Variance,
-    WfCheck(DefId),
-    TypeckItemType(DefId),
-    TypeckItemBody(DefId),
-    Dropck,
-    DropckImpl(DefId),
-    CheckConst(DefId),
-    Privacy,
-    IntrinsicCheck(DefId),
-    MatchCheck(DefId),
-    MirMapConstruction(DefId),
-    BorrowCheck(DefId),
-    RvalueCheck(DefId),
-    Reachability,
-    DeadCheck,
-    StabilityCheck,
-    LateLintCheck,
-    IntrinsicUseCheck,
-    TransCrate,
-    TransCrateItem(DefId),
-    TransInlinedItem(DefId),
-    TransWriteMetadata,
-
-    // Nodes representing bits of computed IR in the tcx. Each shared
-    // table in the tcx (or elsewhere) maps to one of these
-    // nodes. Often we map multiple tables to the same node if there
-    // is no point in distinguishing them (e.g., both the type and
-    // predicates for an item wind up in `ItemSignature`). Other
-    // times, such as `ImplItems` vs `TraitItemDefIds`, tables which
-    // might be mergable are kept distinct because the sets of def-ids
-    // to which they apply are disjoint, and hence we might as well
-    // have distinct labels for easier debugging.
-    ImplOrTraitItems(DefId),
-    ItemSignature(DefId),
-    FieldTy(DefId),
-    TraitItemDefIds(DefId),
-    InherentImpls(DefId),
-    ImplItems(DefId),
-
-    // The set of impls for a given trait. Ultimately, it would be
-    // nice to get more fine-grained here (e.g., to include a
-    // simplified type), but we can't do that until we restructure the
-    // HIR to distinguish the *header* of an impl from its body.  This
-    // is because changes to the header may change the self-type of
-    // the impl and hence would require us to be more conservative
-    // than changes in the impl body.
-    TraitImpls(DefId),
-
-    // Nodes representing caches. To properly handle a true cache, we
-    // don't use a DepTrackingMap, but rather we push a task node.
-    // Otherwise the write into the map would be incorrectly
-    // attributed to the first task that happened to fill the cache,
-    // which would yield an overly conservative dep-graph.
-    TraitItems(DefId),
-    ReprHints(DefId),
-    TraitSelect(DefId),
-}
-
-#[derive(Clone)]
-pub struct DepGraph {
-    data: Rc<DepGraphThreadData>
-}
-
-impl DepGraph {
-    pub fn new(enabled: bool) -> DepGraph {
-        DepGraph {
-            data: Rc::new(DepGraphThreadData::new(enabled))
-        }
-    }
-
-    /// True if we are actually building a dep-graph. If this returns false,
-    /// then the other methods on this `DepGraph` will have no net effect.
-    #[inline]
-    pub fn enabled(&self) -> bool {
-        self.data.enabled()
-    }
-
-    pub fn query(&self) -> DepGraphQuery {
-        self.data.query()
-    }
-
-    pub fn in_ignore<'graph>(&'graph self) -> raii::IgnoreTask<'graph> {
-        raii::IgnoreTask::new(&self.data)
-    }
-
-    pub fn in_task<'graph>(&'graph self, key: DepNode) -> raii::DepTask<'graph> {
-        raii::DepTask::new(&self.data, key)
-    }
-
-    pub fn with_ignore<OP,R>(&self, op: OP) -> R
-        where OP: FnOnce() -> R
-    {
-        let _task = self.in_ignore();
-        op()
-    }
-
-    pub fn with_task<OP,R>(&self, key: DepNode, op: OP) -> R
-        where OP: FnOnce() -> R
-    {
-        let _task = self.in_task(key);
-        op()
-    }
-
-    pub fn read(&self, v: DepNode) {
-        self.data.enqueue(DepMessage::Read(v));
-    }
-
-    pub fn write(&self, v: DepNode) {
-        self.data.enqueue(DepMessage::Write(v));
-    }
-}
+mod visit;
 
 pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig};
-
+pub use self::dep_node::DepNode;
+pub use self::graph::DepGraph;
 pub use self::query::DepGraphQuery;
-
-/// Visit all the items in the krate in some order. When visiting a
-/// particular item, first create a dep-node by calling `dep_node_fn`
-/// and push that onto the dep-graph stack of tasks, and also create a
-/// read edge from the corresponding AST node. This is used in
-/// compiler passes to automatically record the item that they are
-/// working on.
-pub fn visit_all_items_in_krate<'tcx,V,F>(tcx: &ty::ctxt<'tcx>,
-                                          mut dep_node_fn: F,
-                                          visitor: &mut V)
-    where F: FnMut(DefId) -> DepNode, V: Visitor<'tcx>
-{
-    struct TrackingVisitor<'visit, 'tcx: 'visit, F: 'visit, V: 'visit> {
-        tcx: &'visit ty::ctxt<'tcx>,
-        dep_node_fn: &'visit mut F,
-        visitor: &'visit mut V
-    }
-
-    impl<'visit, 'tcx, F, V> Visitor<'tcx> for TrackingVisitor<'visit, 'tcx, F, V>
-        where F: FnMut(DefId) -> DepNode, V: Visitor<'tcx>
-    {
-        fn visit_item(&mut self, i: &'tcx hir::Item) {
-            let item_def_id = self.tcx.map.local_def_id(i.id);
-            let task_id = (self.dep_node_fn)(item_def_id);
-            debug!("About to start task {:?}", task_id);
-            let _task = self.tcx.dep_graph.in_task(task_id);
-            self.tcx.dep_graph.read(DepNode::Hir(item_def_id));
-            self.visitor.visit_item(i)
-        }
-    }
-
-    let krate = tcx.dep_graph.with_ignore(|| tcx.map.krate());
-    let mut tracking_visitor = TrackingVisitor {
-        tcx: tcx,
-        dep_node_fn: &mut dep_node_fn,
-        visitor: visitor
-    };
-    krate.visit_all_items(&mut tracking_visitor)
-}
+pub use self::visit::visit_all_items_in_krate;
index 74a054acb4fa05c98aabf3c5a4afab035626ce3e..acc6660da6e8934ae144aa78e6d4456490681d48 100644 (file)
 
 use rustc_data_structures::fnv::FnvHashMap;
 use rustc_data_structures::graph::{Graph, NodeIndex};
+use std::fmt::Debug;
+use std::hash::Hash;
 
 use super::DepNode;
 
-pub struct DepGraphQuery {
-    pub graph: Graph<DepNode, ()>,
-    pub indices: FnvHashMap<DepNode, NodeIndex>,
+pub struct DepGraphQuery<D: Clone + Debug + Hash + Eq> {
+    pub graph: Graph<DepNode<D>, ()>,
+    pub indices: FnvHashMap<DepNode<D>, NodeIndex>,
 }
 
-impl DepGraphQuery {
-    pub fn new(nodes: &[DepNode], edges: &[(DepNode, DepNode)]) -> DepGraphQuery {
+impl<D: Clone + Debug + Hash + Eq> DepGraphQuery<D> {
+    pub fn new(nodes: &[DepNode<D>],
+               edges: &[(DepNode<D>, DepNode<D>)])
+               -> DepGraphQuery<D> {
         let mut graph = Graph::new();
         let mut indices = FnvHashMap();
         for node in nodes {
@@ -39,27 +43,43 @@ impl DepGraphQuery {
         }
     }
 
-    pub fn nodes(&self) -> Vec<DepNode> {
+    pub fn contains_node(&self, node: &DepNode<D>) -> bool {
+        self.indices.contains_key(&node)
+    }
+
+    pub fn nodes(&self) -> Vec<DepNode<D>> {
         self.graph.all_nodes()
                   .iter()
                   .map(|n| n.data.clone())
                   .collect()
     }
 
-    pub fn edges(&self) -> Vec<(DepNode,DepNode)> {
+    pub fn edges(&self) -> Vec<(DepNode<D>,DepNode<D>)> {
         self.graph.all_edges()
                   .iter()
                   .map(|edge| (edge.source(), edge.target()))
-                  .map(|(s, t)| (self.graph.node_data(s).clone(), self.graph.node_data(t).clone()))
+                  .map(|(s, t)| (self.graph.node_data(s).clone(),
+                                 self.graph.node_data(t).clone()))
                   .collect()
     }
 
     /// All nodes reachable from `node`. In other words, things that
     /// will have to be recomputed if `node` changes.
-    pub fn dependents(&self, node: DepNode) -> Vec<DepNode> {
+    pub fn transitive_dependents(&self, node: DepNode<D>) -> Vec<DepNode<D>> {
         if let Some(&index) = self.indices.get(&node) {
             self.graph.depth_traverse(index)
-                      .map(|dependent_node| self.graph.node_data(dependent_node).clone())
+                      .map(|s| self.graph.node_data(s).clone())
+                      .collect()
+        } else {
+            vec![]
+        }
+    }
+
+    /// Just the outgoing edges from `node`.
+    pub fn immediate_dependents(&self, node: DepNode<D>) -> Vec<DepNode<D>> {
+        if let Some(&index) = self.indices.get(&node) {
+            self.graph.successor_nodes(index)
+                      .map(|s| self.graph.node_data(s).clone())
                       .collect()
         } else {
             vec![]
index dd7ff92f9c360d2b9d2d878a0bc272258c81b9ed..13151d169fc3f78843405d5a1829ec9d2823d985 100644 (file)
@@ -8,16 +8,18 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use hir::def_id::DefId;
 use super::DepNode;
 use super::thread::{DepGraphThreadData, DepMessage};
 
 pub struct DepTask<'graph> {
     data: &'graph DepGraphThreadData,
-    key: DepNode,
+    key: DepNode<DefId>,
 }
 
 impl<'graph> DepTask<'graph> {
-    pub fn new(data: &'graph DepGraphThreadData, key: DepNode) -> DepTask<'graph> {
+    pub fn new(data: &'graph DepGraphThreadData, key: DepNode<DefId>)
+               -> DepTask<'graph> {
         data.enqueue(DepMessage::PushTask(key));
         DepTask { data: data, key: key }
     }
index c43b4b15b763b24fffdc322af8c0574d3428ef29..5b0e4a909c8d3d7aeeade5edc26361eb2da149ae 100644 (file)
@@ -18,6 +18,7 @@
 //! to accumulate more messages. This way we only ever have two vectors
 //! allocated (and both have a fairly large capacity).
 
+use hir::def_id::DefId;
 use rustc_data_structures::veccell::VecCell;
 use std::cell::Cell;
 use std::sync::mpsc::{self, Sender, Receiver};
@@ -28,10 +29,10 @@ use super::DepNode;
 use super::edges::DepGraphEdges;
 
 pub enum DepMessage {
-    Read(DepNode),
-    Write(DepNode),
-    PushTask(DepNode),
-    PopTask(DepNode),
+    Read(DepNode<DefId>),
+    Write(DepNode<DefId>),
+    PushTask(DepNode<DefId>),
+    PopTask(DepNode<DefId>),
     PushIgnore,
     PopIgnore,
     Query,
@@ -57,7 +58,7 @@ pub struct DepGraphThreadData {
     swap_out: Sender<Vec<DepMessage>>,
 
     // where to receive query results
-    query_in: Receiver<DepGraphQuery>,
+    query_in: Receiver<DepGraphQuery<DefId>>,
 }
 
 const INITIAL_CAPACITY: usize = 2048;
@@ -105,7 +106,7 @@ impl DepGraphThreadData {
         self.swap_out.send(old_messages).unwrap();
     }
 
-    pub fn query(&self) -> DepGraphQuery {
+    pub fn query(&self) -> DepGraphQuery<DefId> {
         assert!(self.enabled, "cannot query if dep graph construction not enabled");
         self.enqueue(DepMessage::Query);
         self.swap();
@@ -148,14 +149,14 @@ impl DepGraphThreadData {
 
     // Outline this too.
     fn invalid_message(&self, string: &str) {
-        panic!("{}; see src/librustc/dep_graph/README.md for more information", string)
+        bug!("{}; see src/librustc/dep_graph/README.md for more information", string)
     }
 }
 
 /// Definition of the depgraph thread.
 pub fn main(swap_in: Receiver<Vec<DepMessage>>,
             swap_out: Sender<Vec<DepMessage>>,
-            query_out: Sender<DepGraphQuery>) {
+            query_out: Sender<DepGraphQuery<DefId>>) {
     let mut edges = DepGraphEdges::new();
 
     // the compiler thread always expects a fresh buffer to be
diff --git a/src/librustc/dep_graph/visit.rs b/src/librustc/dep_graph/visit.rs
new file mode 100644 (file)
index 0000000..cdc1621
--- /dev/null
@@ -0,0 +1,56 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir;
+use hir::def_id::DefId;
+use hir::intravisit::Visitor;
+use ty::TyCtxt;
+
+use super::dep_node::DepNode;
+
+
+/// Visit all the items in the krate in some order. When visiting a
+/// particular item, first create a dep-node by calling `dep_node_fn`
+/// and push that onto the dep-graph stack of tasks, and also create a
+/// read edge from the corresponding AST node. This is used in
+/// compiler passes to automatically record the item that they are
+/// working on.
+pub fn visit_all_items_in_krate<'tcx,V,F>(tcx: &TyCtxt<'tcx>,
+                                          mut dep_node_fn: F,
+                                          visitor: &mut V)
+    where F: FnMut(DefId) -> DepNode<DefId>, V: Visitor<'tcx>
+{
+    struct TrackingVisitor<'visit, 'tcx: 'visit, F: 'visit, V: 'visit> {
+        tcx: &'visit TyCtxt<'tcx>,
+        dep_node_fn: &'visit mut F,
+        visitor: &'visit mut V
+    }
+
+    impl<'visit, 'tcx, F, V> Visitor<'tcx> for TrackingVisitor<'visit, 'tcx, F, V>
+        where F: FnMut(DefId) -> DepNode<DefId>, V: Visitor<'tcx>
+    {
+        fn visit_item(&mut self, i: &'tcx hir::Item) {
+            let item_def_id = self.tcx.map.local_def_id(i.id);
+            let task_id = (self.dep_node_fn)(item_def_id);
+            let _task = self.tcx.dep_graph.in_task(task_id);
+            debug!("Started task {:?}", task_id);
+            self.tcx.dep_graph.read(DepNode::Hir(item_def_id));
+            self.visitor.visit_item(i)
+        }
+    }
+
+    let krate = tcx.dep_graph.with_ignore(|| tcx.map.krate());
+    let mut tracking_visitor = TrackingVisitor {
+        tcx: tcx,
+        dep_node_fn: &mut dep_node_fn,
+        visitor: visitor
+    };
+    krate.visit_all_items(&mut tracking_visitor)
+}
index 92db527ef9803ac3b0e1f8b52a994f113a0de5c1..0ef130127a4164e078647c381ecebc6d05a64b82 100644 (file)
 // Each message should start and end with a new line, and be wrapped to 80 characters.
 // In vim you can `:set tw=80` and use `gq` to wrap paragraphs. Use `:set tw=0` to disable.
 register_long_diagnostics! {
-
-E0001: r##"
-This error suggests that the expression arm corresponding to the noted pattern
-will never be reached as for all possible values of the expression being
-matched, one of the preceding patterns will match.
-
-This means that perhaps some of the preceding patterns are too general, this
-one is too specific or the ordering is incorrect.
-
-For example, the following `match` block has too many arms:
-
-```compile_fail
-match foo {
-    Some(bar) => {/* ... */}
-    None => {/* ... */}
-    _ => {/* ... */} // All possible cases have already been handled
-}
-```
-
-`match` blocks have their patterns matched in order, so, for example, putting
-a wildcard arm above a more specific arm will make the latter arm irrelevant.
-
-Ensure the ordering of the match arm is correct and remove any superfluous
-arms.
-"##,
-
-E0002: r##"
-This error indicates that an empty match expression is invalid because the type
-it is matching on is non-empty (there exist values of this type). In safe code
-it is impossible to create an instance of an empty type, so empty match
-expressions are almost never desired. This error is typically fixed by adding
-one or more cases to the match expression.
-
-An example of an empty type is `enum Empty { }`. So, the following will work:
-
-```
-enum Empty {}
-
-fn foo(x: Empty) {
-    match x {
-        // empty
-    }
-}
-```
-
-However, this won't:
-
-```compile_fail
-enum Empty {}
-
-fn foo(x: Option<String>) {
-    match x {
-        // empty
-    }
-}
-```
-"##,
-
-E0003: r##"
-Not-a-Number (NaN) values cannot be compared for equality and hence can never
-match the input to a match expression. So, the following will not compile:
-
-```compile_fail
-const NAN: f32 = 0.0 / 0.0;
-
-let number = 0.1f32;
-
-match number {
-    NAN => { /* ... */ },
-    _ => {}
-}
-```
-
-To match against NaN values, you should instead use the `is_nan()` method in a
-guard, like so:
-
-```
-let number = 0.1f32;
-
-match number {
-    x if x.is_nan() => { /* ... */ }
-    _ => {}
-}
-```
-"##,
-
-E0004: r##"
-This error indicates that the compiler cannot guarantee a matching pattern for
-one or more possible inputs to a match expression. Guaranteed matches are
-required in order to assign values to match expressions, or alternatively,
-determine the flow of execution. Erroneous code example:
-
-```compile_fail
-enum Terminator {
-    HastaLaVistaBaby,
-    TalkToMyHand,
-}
-
-let x = Terminator::HastaLaVistaBaby;
-
-match x { // error: non-exhaustive patterns: `HastaLaVistaBaby` not covered
-    Terminator::TalkToMyHand => {}
-}
-```
-
-If you encounter this error you must alter your patterns so that every possible
-value of the input type is matched. For types with a small number of variants
-(like enums) you should probably cover all cases explicitly. Alternatively, the
-underscore `_` wildcard pattern can be added after all other patterns to match
-"anything else". Example:
-
-```
-enum Terminator {
-    HastaLaVistaBaby,
-    TalkToMyHand,
-}
-
-let x = Terminator::HastaLaVistaBaby;
-
-match x {
-    Terminator::TalkToMyHand => {}
-    Terminator::HastaLaVistaBaby => {}
-}
-
-// or:
-
-match x {
-    Terminator::TalkToMyHand => {}
-    _ => {}
-}
-```
-"##,
-
-E0005: r##"
-Patterns used to bind names must be irrefutable, that is, they must guarantee
-that a name will be extracted in all cases. Erroneous code example:
-
-```compile_fail
-let x = Some(1);
-let Some(y) = x;
-// error: refutable pattern in local binding: `None` not covered
-```
-
-If you encounter this error you probably need to use a `match` or `if let` to
-deal with the possibility of failure. Example:
-
-```compile_fail
-let x = Some(1);
-
-match x {
-    Some(y) => {
-        // do something
-    },
-    None => {}
-}
-
-// or:
-
-if let Some(y) = x {
-    // do something
-}
-```
-"##,
-
-E0007: r##"
-This error indicates that the bindings in a match arm would require a value to
-be moved into more than one location, thus violating unique ownership. Code
-like the following is invalid as it requires the entire `Option<String>` to be
-moved into a variable called `op_string` while simultaneously requiring the
-inner `String` to be moved into a variable called `s`.
-
-```compile_fail
-let x = Some("s".to_string());
-
-match x {
-    op_string @ Some(s) => {},
-    None => {},
-}
-```
-
-See also the error E0303.
-"##,
-
-E0008: r##"
-Names bound in match arms retain their type in pattern guards. As such, if a
-name is bound by move in a pattern, it should also be moved to wherever it is
-referenced in the pattern guard code. Doing so however would prevent the name
-from being available in the body of the match arm. Consider the following:
-
-```compile_fail
-match Some("hi".to_string()) {
-    Some(s) if s.len() == 0 => {}, // use s.
-    _ => {},
-}
-```
-
-The variable `s` has type `String`, and its use in the guard is as a variable of
-type `String`. The guard code effectively executes in a separate scope to the
-body of the arm, so the value would be moved into this anonymous scope and
-therefore become unavailable in the body of the arm. Although this example seems
-innocuous, the problem is most clear when considering functions that take their
-argument by value.
-
-```compile_fail
-match Some("hi".to_string()) {
-    Some(s) if { drop(s); false } => (),
-    Some(s) => {}, // use s.
-    _ => {},
-}
-```
-
-The value would be dropped in the guard then become unavailable not only in the
-body of that arm but also in all subsequent arms! The solution is to bind by
-reference when using guards or refactor the entire expression, perhaps by
-putting the condition inside the body of the arm.
-"##,
-
-E0009: r##"
-In a pattern, all values that don't implement the `Copy` trait have to be bound
-the same way. The goal here is to avoid binding simultaneously by-move and
-by-ref.
-
-This limitation may be removed in a future version of Rust.
-
-Erroneous code example:
-
-```compile_fail
-struct X { x: (), }
-
-let x = Some((X { x: () }, X { x: () }));
-match x {
-    Some((y, ref z)) => {},
-    None => panic!()
-}
-```
-
-You have two solutions:
-
-Solution #1: Bind the pattern's values the same way.
-
-```
-struct X { x: (), }
-
-let x = Some((X { x: () }, X { x: () }));
-match x {
-    Some((ref y, ref z)) => {},
-    // or Some((y, z)) => {}
-    None => panic!()
-}
-```
-
-Solution #2: Implement the `Copy` trait for the `X` structure.
-
-However, please keep in mind that the first solution should be preferred.
-
-```
-#[derive(Clone, Copy)]
-struct X { x: (), }
-
-let x = Some((X { x: () }, X { x: () }));
-match x {
-    Some((y, ref z)) => {},
-    None => panic!()
-}
-```
-"##,
-
 E0020: r##"
 This error indicates that an attempt was made to divide by zero (or take the
 remainder of a zero divisor) in a static or constant expression. Erroneous
@@ -762,128 +495,6 @@ attributes:
 See also https://doc.rust-lang.org/book/no-stdlib.html
 "##,
 
-E0158: r##"
-`const` and `static` mean different things. A `const` is a compile-time
-constant, an alias for a literal value. This property means you can match it
-directly within a pattern.
-
-The `static` keyword, on the other hand, guarantees a fixed location in memory.
-This does not always mean that the value is constant. For example, a global
-mutex can be declared `static` as well.
-
-If you want to match against a `static`, consider using a guard instead:
-
-```
-static FORTY_TWO: i32 = 42;
-
-match Some(42) {
-    Some(x) if x == FORTY_TWO => {}
-    _ => {}
-}
-```
-"##,
-
-E0162: r##"
-An if-let pattern attempts to match the pattern, and enters the body if the
-match was successful. If the match is irrefutable (when it cannot fail to
-match), use a regular `let`-binding instead. For instance:
-
-```compile_fail
-struct Irrefutable(i32);
-let irr = Irrefutable(0);
-
-// This fails to compile because the match is irrefutable.
-if let Irrefutable(x) = irr {
-    // This body will always be executed.
-    foo(x);
-}
-```
-
-Try this instead:
-
-```ignore
-struct Irrefutable(i32);
-let irr = Irrefutable(0);
-
-let Irrefutable(x) = irr;
-foo(x);
-```
-"##,
-
-E0165: r##"
-A while-let pattern attempts to match the pattern, and enters the body if the
-match was successful. If the match is irrefutable (when it cannot fail to
-match), use a regular `let`-binding inside a `loop` instead. For instance:
-
-```compile_fail
-struct Irrefutable(i32);
-let irr = Irrefutable(0);
-
-// This fails to compile because the match is irrefutable.
-while let Irrefutable(x) = irr {
-    ...
-}
-
-Try this instead:
-
-```
-struct Irrefutable(i32);
-let irr = Irrefutable(0);
-
-loop {
-    let Irrefutable(x) = irr;
-    ...
-}
-```
-"##,
-
-E0170: r##"
-Enum variants are qualified by default. For example, given this type:
-
-```
-enum Method {
-    GET,
-    POST,
-}
-```
-
-You would match it using:
-
-```
-enum Method {
-    GET,
-    POST,
-}
-
-let m = Method::GET;
-
-match m {
-    Method::GET => {},
-    Method::POST => {},
-}
-```
-
-If you don't qualify the names, the code will bind new variables named "GET" and
-"POST" instead. This behavior is likely not what you want, so `rustc` warns when
-that happens.
-
-Qualified names are good practice, and most code works well with them. But if
-you prefer them unqualified, you can import the variants into scope:
-
-```ignore
-use Method::*;
-enum Method { GET, POST }
-```
-
-If you want others to be able to import variants from your module directly, use
-`pub use`:
-
-```ignore
-pub use Method::*;
-enum Method { GET, POST }
-```
-"##,
-
 E0229: r##"
 An associated type binding was done outside of the type parameter declaration
 and `where` clause. Erroneous code example:
@@ -1261,7 +872,7 @@ compiled:
 fn foo<T: Index<u8>>(x: T){}
 
 #[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
-trait Index<Idx> { ... }
+trait Index<Idx> { /* ... */ }
 
 foo(true); // `bool` does not implement `Index<u8>`
 ```
@@ -1291,7 +902,7 @@ compiled:
 fn foo<T: Index<u8>>(x: T){}
 
 #[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
-trait Index<Idx> { ... }
+trait Index<Idx> { /* ... */ }
 
 foo(true); // `bool` does not implement `Index<u8>`
 ```
@@ -1319,7 +930,7 @@ compiled:
 fn foo<T: Index<u8>>(x: T){}
 
 #[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
-trait Index<Idx> { ... }
+trait Index<Idx> { /* ... */ }
 
 foo(true); // `bool` does not implement `Index<u8>`
 ```
@@ -1395,8 +1006,7 @@ fn some_func<T: Foo>(foo: T) {
 fn main() {
     // we now call the method with the i32 type, which doesn't implement
     // the Foo trait
-    some_func(5i32); // error: the trait `Foo` is not implemented for the
-                     //        type `i32`
+    some_func(5i32); // error: the trait bound `i32 : Foo` is not satisfied
 }
 ```
 
@@ -1422,6 +1032,47 @@ fn main() {
     some_func(5i32); // ok!
 }
 ```
+
+Or in a generic context, an erroneous code example would look like:
+```compile_fail
+fn some_func<T>(foo: T) {
+    println!("{:?}", foo); // error: the trait `core::fmt::Debug` is not
+                           //        implemented for the type `T`
+}
+
+fn main() {
+    // We now call the method with the i32 type,
+    // which *does* implement the Debug trait.
+    some_func(5i32);
+}
+```
+
+Note that the error here is in the definition of the generic function: Although
+we only call it with a parameter that does implement `Debug`, the compiler
+still rejects the function: It must work with all possible input types. In
+order to make this example compile, we need to restrict the generic type we're
+accepting:
+```
+use std::fmt;
+
+// Restrict the input type to types that implement Debug.
+fn some_func<T: fmt::Debug>(foo: T) {
+    println!("{:?}", foo);
+}
+
+fn main() {
+    // Calling the method is still fine, as i32 implements Debug.
+    some_func(5i32);
+
+    // This would fail to compile now:
+    // struct WithoutDebug;
+    // some_func(WithoutDebug);
+}
+
+Rust only looks at the signature of the called function, as such it must
+already specify all requirements that will be used for every type parameter.
+```
+
 "##,
 
 E0281: r##"
@@ -1573,135 +1224,6 @@ that the value provided is a positive integer between quotes, like so:
 ```
 "##,
 
-E0297: r##"
-Patterns used to bind names must be irrefutable. That is, they must guarantee
-that a name will be extracted in all cases. Instead of pattern matching the
-loop variable, consider using a `match` or `if let` inside the loop body. For
-instance:
-
-```compile_fail
-let xs : Vec<Option<i32>> = vec!(Some(1), None);
-
-// This fails because `None` is not covered.
-for Some(x) in xs {
-    // ...
-}
-```
-
-Match inside the loop instead:
-
-```
-let xs : Vec<Option<i32>> = vec!(Some(1), None);
-
-for item in xs {
-    match item {
-        Some(x) => {},
-        None => {},
-    }
-}
-```
-
-Or use `if let`:
-
-```
-let xs : Vec<Option<i32>> = vec!(Some(1), None);
-
-for item in xs {
-    if let Some(x) = item {
-        // ...
-    }
-}
-```
-"##,
-
-E0301: r##"
-Mutable borrows are not allowed in pattern guards, because matching cannot have
-side effects. Side effects could alter the matched object or the environment
-on which the match depends in such a way, that the match would not be
-exhaustive. For instance, the following would not match any arm if mutable
-borrows were allowed:
-
-```compile_fail
-match Some(()) {
-    None => { },
-    option if option.take().is_none() => {
-        /* impossible, option is `Some` */
-    },
-    Some(_) => { } // When the previous match failed, the option became `None`.
-}
-```
-"##,
-
-E0302: r##"
-Assignments are not allowed in pattern guards, because matching cannot have
-side effects. Side effects could alter the matched object or the environment
-on which the match depends in such a way, that the match would not be
-exhaustive. For instance, the following would not match any arm if assignments
-were allowed:
-
-```compile_fail
-match Some(()) {
-    None => { },
-    option if { option = None; false } { },
-    Some(_) => { } // When the previous match failed, the option became `None`.
-}
-```
-"##,
-
-E0303: r##"
-In certain cases it is possible for sub-bindings to violate memory safety.
-Updates to the borrow checker in a future version of Rust may remove this
-restriction, but for now patterns must be rewritten without sub-bindings.
-
-```ignore
-// Before.
-match Some("hi".to_string()) {
-    ref op_string_ref @ Some(s) => {},
-    None => {},
-}
-
-// After.
-match Some("hi".to_string()) {
-    Some(ref s) => {
-        let op_string_ref = &Some(s);
-        // ...
-    },
-    None => {},
-}
-```
-
-The `op_string_ref` binding has type `&Option<&String>` in both cases.
-
-See also https://github.com/rust-lang/rust/issues/14587
-"##,
-
-E0306: r##"
-In an array literal `[x; N]`, `N` is the number of elements in the array. This
-must be an unsigned integer. Erroneous code example:
-
-```compile_fail
-let x = [0i32; true]; // error: expected positive integer for repeat count,
-                      //        found boolean
-```
-
-Working example:
-
-```
-let x = [0i32; 2];
-```
-"##,
-
-E0307: r##"
-The length of an array is part of its type. For this reason, this length must
-be a compile-time constant. Erroneous code example:
-
-```compile_fail
-    let len = 10;
-    let x = [0i32; len]; // error: expected constant integer for repeat count,
-                         //        found variable
-```
-"##,
-
 E0308: r##"
 This error occurs when the compiler was unable to infer the concrete type of a
 variable. It can occur for several cases, the most common of which is a
@@ -1888,6 +1410,32 @@ It is not possible to use stability attributes outside of the standard library.
 Also, for now, it is not possible to write deprecation messages either.
 "##,
 
+E0512: r##"
+Transmute with two differently sized types was attempted. Erroneous code
+example:
+
+```compile_fail
+fn takes_u8(_: u8) {}
+
+fn main() {
+    unsafe { takes_u8(::std::mem::transmute(0u16)); }
+    // error: transmute called with differently sized types
+}
+```
+
+Please use types with same size or use the expected type directly. Example:
+
+```
+fn takes_u8(_: u8) {}
+
+fn main() {
+    unsafe { takes_u8(::std::mem::transmute(0i8)); } // ok!
+    // or:
+    unsafe { takes_u8(0u8); } // ok!
+}
+```
+"##,
+
 E0517: r##"
 This error indicates that a `#[repr(..)]` attribute was placed on an
 unsupported item.
@@ -1962,6 +1510,23 @@ each method; it is not possible to annotate the entire impl with an `#[inline]`
 attribute.
 "##,
 
+E0522: r##"
+The lang attribute is intended for marking special items that are built-in to
+Rust itself. This includes special traits (like `Copy` and `Sized`) that affect
+how the compiler behaves, as well as special functions that may be automatically
+invoked (such as the handler for out-of-bounds accesses when indexing a slice).
+Erroneous code example:
+
+```compile_fail
+#![feature(lang_items)]
+
+#[lang = "cookie"]
+fn cookie() -> ! { // error: definition of an unknown language item: `cookie`
+    loop {}
+}
+```
+"##,
+
 }
 
 
@@ -1974,8 +1539,6 @@ register_diagnostics! {
     E0280, // requirement is not satisfied
     E0284, // cannot resolve type
 //  E0285, // overflow evaluation builtin bounds
-    E0298, // mismatched types between arms
-    E0299, // mismatched types between arms
 //  E0300, // unexpanded macro
 //  E0304, // expected signed integer constant
 //  E0305, // expected constant
@@ -1986,7 +1549,6 @@ register_diagnostics! {
     E0315, // cannot invoke closure outside of its lifetime
     E0316, // nested quantification of lifetimes
     E0453, // overruled by outer forbid
-    E0471, // constant evaluation error: ..
     E0473, // dereference of reference outside its lifetime
     E0474, // captured variable `..` does not outlive the enclosing closure
     E0475, // index of slice outside its lifetime
diff --git a/src/librustc/front/check_attr.rs b/src/librustc/front/check_attr.rs
deleted file mode 100644 (file)
index 085acc1..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use session::Session;
-
-use syntax::ast;
-use syntax::attr::AttrMetaMethods;
-use syntax::visit;
-use syntax::visit::Visitor;
-
-#[derive(Copy, Clone, PartialEq)]
-enum Target {
-    Fn,
-    Struct,
-    Enum,
-    Other,
-}
-
-impl Target {
-    fn from_item(item: &ast::Item) -> Target {
-        match item.node {
-            ast::ItemKind::Fn(..) => Target::Fn,
-            ast::ItemKind::Struct(..) => Target::Struct,
-            ast::ItemKind::Enum(..) => Target::Enum,
-            _ => Target::Other,
-        }
-    }
-}
-
-struct CheckAttrVisitor<'a> {
-    sess: &'a Session,
-}
-
-impl<'a> CheckAttrVisitor<'a> {
-    fn check_inline(&self, attr: &ast::Attribute, target: Target) {
-        if target != Target::Fn {
-            span_err!(self.sess, attr.span, E0518, "attribute should be applied to function");
-        }
-    }
-
-    fn check_repr(&self, attr: &ast::Attribute, target: Target) {
-        let words = match attr.meta_item_list() {
-            Some(words) => words,
-            None => {
-                return;
-            }
-        };
-        for word in words {
-            let word: &str = &word.name();
-            let message = match word {
-                "C" => {
-                    if target != Target::Struct && target != Target::Enum {
-                            "attribute should be applied to struct or enum"
-                    } else {
-                        continue
-                    }
-                }
-                "packed" |
-                "simd" => {
-                    if target != Target::Struct {
-                        "attribute should be applied to struct"
-                    } else {
-                        continue
-                    }
-                }
-                "i8" | "u8" | "i16" | "u16" |
-                "i32" | "u32" | "i64" | "u64" |
-                "isize" | "usize" => {
-                    if target != Target::Enum {
-                            "attribute should be applied to enum"
-                    } else {
-                        continue
-                    }
-                }
-                _ => continue,
-            };
-            span_err!(self.sess, attr.span, E0517, "{}", message);
-        }
-    }
-
-    fn check_attribute(&self, attr: &ast::Attribute, target: Target) {
-        let name: &str = &attr.name();
-        match name {
-            "inline" => self.check_inline(attr, target),
-            "repr" => self.check_repr(attr, target),
-            _ => (),
-        }
-    }
-}
-
-impl<'a, 'v> Visitor<'v> for CheckAttrVisitor<'a> {
-    fn visit_item(&mut self, item: &ast::Item) {
-        let target = Target::from_item(item);
-        for attr in &item.attrs {
-            self.check_attribute(attr, target);
-        }
-        visit::walk_item(self, item);
-    }
-}
-
-pub fn check_crate(sess: &Session, krate: &ast::Crate) {
-    visit::walk_crate(&mut CheckAttrVisitor { sess: sess }, krate);
-}
diff --git a/src/librustc/front/map/blocks.rs b/src/librustc/front/map/blocks.rs
deleted file mode 100644 (file)
index 752b625..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! This module provides a simplified abstraction for working with
-//! code blocks identified by their integer node-id.  In particular,
-//! it captures a common set of attributes that all "function-like
-//! things" (represented by `FnLike` instances) share.  For example,
-//! all `FnLike` instances have a type signature (be it explicit or
-//! inferred).  And all `FnLike` instances have a body, i.e. the code
-//! that is run when the function-like thing it represents is invoked.
-//!
-//! With the above abstraction in place, one can treat the program
-//! text as a collection of blocks of code (and most such blocks are
-//! nested within a uniquely determined `FnLike`), and users can ask
-//! for the `Code` associated with a particular NodeId.
-
-pub use self::Code::*;
-
-use front::map::{self, Node};
-use syntax::abi;
-use rustc_front::hir::{Block, FnDecl};
-use syntax::ast::{Name, NodeId};
-use rustc_front::hir as ast;
-use syntax::codemap::Span;
-use rustc_front::intravisit::FnKind;
-
-/// An FnLikeNode is a Node that is like a fn, in that it has a decl
-/// and a body (as well as a NodeId, a span, etc).
-///
-/// More specifically, it is one of either:
-///   - A function item,
-///   - A closure expr (i.e. an ExprClosure), or
-///   - The default implementation for a trait method.
-///
-/// To construct one, use the `Code::from_node` function.
-#[derive(Copy, Clone)]
-pub struct FnLikeNode<'a> { node: map::Node<'a> }
-
-/// MaybeFnLike wraps a method that indicates if an object
-/// corresponds to some FnLikeNode.
-pub trait MaybeFnLike { fn is_fn_like(&self) -> bool; }
-
-/// Components shared by fn-like things (fn items, methods, closures).
-pub struct FnParts<'a> {
-    pub decl: &'a FnDecl,
-    pub body: &'a Block,
-    pub kind: FnKind<'a>,
-    pub span: Span,
-    pub id:   NodeId,
-}
-
-impl MaybeFnLike for ast::Item {
-    fn is_fn_like(&self) -> bool {
-        match self.node { ast::ItemFn(..) => true, _ => false, }
-    }
-}
-
-impl MaybeFnLike for ast::TraitItem {
-    fn is_fn_like(&self) -> bool {
-        match self.node { ast::MethodTraitItem(_, Some(_)) => true, _ => false, }
-    }
-}
-
-impl MaybeFnLike for ast::Expr {
-    fn is_fn_like(&self) -> bool {
-        match self.node {
-            ast::ExprClosure(..) => true,
-            _ => false,
-        }
-    }
-}
-
-/// Carries either an FnLikeNode or a Block, as these are the two
-/// constructs that correspond to "code" (as in, something from which
-/// we can construct a control-flow graph).
-#[derive(Copy, Clone)]
-pub enum Code<'a> {
-    FnLikeCode(FnLikeNode<'a>),
-    BlockCode(&'a Block),
-}
-
-impl<'a> Code<'a> {
-    pub fn id(&self) -> NodeId {
-        match *self {
-            FnLikeCode(node) => node.id(),
-            BlockCode(block) => block.id,
-        }
-    }
-
-    /// Attempts to construct a Code from presumed FnLike or Block node input.
-    pub fn from_node(node: Node) -> Option<Code> {
-        if let map::NodeBlock(block) = node {
-            Some(BlockCode(block))
-        } else {
-            FnLikeNode::from_node(node).map(|fn_like| FnLikeCode(fn_like))
-        }
-    }
-}
-
-/// These are all the components one can extract from a fn item for
-/// use when implementing FnLikeNode operations.
-struct ItemFnParts<'a> {
-    name:     Name,
-    decl:     &'a ast::FnDecl,
-    unsafety: ast::Unsafety,
-    constness: ast::Constness,
-    abi:      abi::Abi,
-    vis:      ast::Visibility,
-    generics: &'a ast::Generics,
-    body:     &'a Block,
-    id:       NodeId,
-    span:     Span
-}
-
-/// These are all the components one can extract from a closure expr
-/// for use when implementing FnLikeNode operations.
-struct ClosureParts<'a> {
-    decl: &'a FnDecl,
-    body: &'a Block,
-    id: NodeId,
-    span: Span
-}
-
-impl<'a> ClosureParts<'a> {
-    fn new(d: &'a FnDecl, b: &'a Block, id: NodeId, s: Span) -> ClosureParts<'a> {
-        ClosureParts { decl: d, body: b, id: id, span: s }
-    }
-}
-
-impl<'a> FnLikeNode<'a> {
-    /// Attempts to construct a FnLikeNode from presumed FnLike node input.
-    pub fn from_node(node: Node) -> Option<FnLikeNode> {
-        let fn_like = match node {
-            map::NodeItem(item) => item.is_fn_like(),
-            map::NodeTraitItem(tm) => tm.is_fn_like(),
-            map::NodeImplItem(_) => true,
-            map::NodeExpr(e) => e.is_fn_like(),
-            _ => false
-        };
-        if fn_like {
-            Some(FnLikeNode {
-                node: node
-            })
-        } else {
-            None
-        }
-    }
-
-    pub fn to_fn_parts(self) -> FnParts<'a> {
-        FnParts {
-            decl: self.decl(),
-            body: self.body(),
-            kind: self.kind(),
-            span: self.span(),
-            id:   self.id(),
-        }
-    }
-
-    pub fn body(self) -> &'a Block {
-        self.handle(|i: ItemFnParts<'a>|  &*i.body,
-                    |_, _, _: &'a ast::MethodSig, _, body: &'a ast::Block, _|  body,
-                    |c: ClosureParts<'a>| c.body)
-    }
-
-    pub fn decl(self) -> &'a FnDecl {
-        self.handle(|i: ItemFnParts<'a>|  &*i.decl,
-                    |_, _, sig: &'a ast::MethodSig, _, _, _|  &sig.decl,
-                    |c: ClosureParts<'a>| c.decl)
-    }
-
-    pub fn span(self) -> Span {
-        self.handle(|i: ItemFnParts|     i.span,
-                    |_, _, _: &'a ast::MethodSig, _, _, span| span,
-                    |c: ClosureParts|    c.span)
-    }
-
-    pub fn id(self) -> NodeId {
-        self.handle(|i: ItemFnParts|     i.id,
-                    |id, _, _: &'a ast::MethodSig, _, _, _| id,
-                    |c: ClosureParts|    c.id)
-    }
-
-    pub fn kind(self) -> FnKind<'a> {
-        let item = |p: ItemFnParts<'a>| -> FnKind<'a> {
-            FnKind::ItemFn(p.name, p.generics, p.unsafety, p.constness, p.abi, p.vis)
-        };
-        let closure = |_: ClosureParts| {
-            FnKind::Closure
-        };
-        let method = |_, name: Name, sig: &'a ast::MethodSig, vis, _, _| {
-            FnKind::Method(name, sig, vis)
-        };
-        self.handle(item, method, closure)
-    }
-
-    fn handle<A, I, M, C>(self, item_fn: I, method: M, closure: C) -> A where
-        I: FnOnce(ItemFnParts<'a>) -> A,
-        M: FnOnce(NodeId,
-                  Name,
-                  &'a ast::MethodSig,
-                  Option<ast::Visibility>,
-                  &'a ast::Block,
-                  Span)
-                  -> A,
-        C: FnOnce(ClosureParts<'a>) -> A,
-    {
-        match self.node {
-            map::NodeItem(i) => match i.node {
-                ast::ItemFn(ref decl, unsafety, constness, abi, ref generics, ref block) =>
-                    item_fn(ItemFnParts {
-                        id: i.id,
-                        name: i.name,
-                        decl: &decl,
-                        unsafety: unsafety,
-                        body: &block,
-                        generics: generics,
-                        abi: abi,
-                        vis: i.vis,
-                        constness: constness,
-                        span: i.span
-                    }),
-                _ => panic!("item FnLikeNode that is not fn-like"),
-            },
-            map::NodeTraitItem(ti) => match ti.node {
-                ast::MethodTraitItem(ref sig, Some(ref body)) => {
-                    method(ti.id, ti.name, sig, None, body, ti.span)
-                }
-                _ => panic!("trait method FnLikeNode that is not fn-like"),
-            },
-            map::NodeImplItem(ii) => {
-                match ii.node {
-                    ast::ImplItemKind::Method(ref sig, ref body) => {
-                        method(ii.id, ii.name, sig, Some(ii.vis), body, ii.span)
-                    }
-                    _ => {
-                        panic!("impl method FnLikeNode that is not fn-like")
-                    }
-                }
-            }
-            map::NodeExpr(e) => match e.node {
-                ast::ExprClosure(_, ref decl, ref block) =>
-                    closure(ClosureParts::new(&decl, &block, e.id, e.span)),
-                _ => panic!("expr FnLikeNode that is not fn-like"),
-            },
-            _ => panic!("other FnLikeNode that is not fn-like"),
-        }
-    }
-}
diff --git a/src/librustc/front/map/collector.rs b/src/librustc/front/map/collector.rs
deleted file mode 100644 (file)
index 3c4f682..0000000
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::*;
-use super::MapEntry::*;
-
-use rustc_front::hir::*;
-use rustc_front::util;
-use rustc_front::intravisit::{self, Visitor};
-use middle::def_id::{CRATE_DEF_INDEX, DefIndex};
-use std::iter::repeat;
-use syntax::ast::{NodeId, CRATE_NODE_ID, DUMMY_NODE_ID};
-use syntax::codemap::Span;
-
-/// A Visitor that walks over an AST and collects Node's into an AST
-/// Map.
-pub struct NodeCollector<'ast> {
-    pub krate: &'ast Crate,
-    pub map: Vec<MapEntry<'ast>>,
-    pub definitions: Definitions,
-    pub parent_node: NodeId,
-}
-
-impl<'ast> NodeCollector<'ast> {
-    pub fn root(krate: &'ast Crate) -> NodeCollector<'ast> {
-        let mut collector = NodeCollector {
-            krate: krate,
-            map: vec![],
-            definitions: Definitions::new(),
-            parent_node: CRATE_NODE_ID,
-        };
-        collector.insert_entry(CRATE_NODE_ID, RootCrate);
-
-        let result = collector.create_def_with_parent(None, CRATE_NODE_ID, DefPathData::CrateRoot);
-        assert_eq!(result, CRATE_DEF_INDEX);
-
-        collector.create_def_with_parent(Some(CRATE_DEF_INDEX), DUMMY_NODE_ID, DefPathData::Misc);
-
-        collector
-    }
-
-    pub fn extend(krate: &'ast Crate,
-                  parent: &'ast InlinedParent,
-                  parent_node: NodeId,
-                  parent_def_path: DefPath,
-                  map: Vec<MapEntry<'ast>>,
-                  definitions: Definitions)
-                  -> NodeCollector<'ast> {
-        let mut collector = NodeCollector {
-            krate: krate,
-            map: map,
-            parent_node: parent_node,
-            definitions: definitions,
-        };
-
-        collector.insert_entry(parent_node, RootInlinedParent(parent));
-        collector.create_def(parent_node, DefPathData::InlinedRoot(parent_def_path));
-
-        collector
-    }
-
-    fn parent_def(&self) -> Option<DefIndex> {
-        let mut parent_node = Some(self.parent_node);
-        while let Some(p) = parent_node {
-            if let Some(q) = self.definitions.opt_def_index(p) {
-                return Some(q);
-            }
-            parent_node = self.map[p as usize].parent_node();
-        }
-        None
-    }
-
-    fn create_def(&mut self, node_id: NodeId, data: DefPathData) -> DefIndex {
-        let parent_def = self.parent_def();
-        debug!("create_def(node_id={:?}, data={:?}, parent_def={:?})", node_id, data, parent_def);
-        self.definitions.create_def_with_parent(parent_def, node_id, data)
-    }
-
-    fn create_def_with_parent(&mut self,
-                              parent: Option<DefIndex>,
-                              node_id: NodeId,
-                              data: DefPathData)
-                              -> DefIndex {
-        self.definitions.create_def_with_parent(parent, node_id, data)
-    }
-
-    fn insert_entry(&mut self, id: NodeId, entry: MapEntry<'ast>) {
-        debug!("ast_map: {:?} => {:?}", id, entry);
-        let len = self.map.len();
-        if id as usize >= len {
-            self.map.extend(repeat(NotPresent).take(id as usize - len + 1));
-        }
-        self.map[id as usize] = entry;
-    }
-
-    fn insert_def(&mut self, id: NodeId, node: Node<'ast>, data: DefPathData) -> DefIndex {
-        self.insert(id, node);
-        self.create_def(id, data)
-    }
-
-    fn insert(&mut self, id: NodeId, node: Node<'ast>) {
-        let entry = MapEntry::from_node(self.parent_node, node);
-        self.insert_entry(id, entry);
-    }
-}
-
-impl<'ast> Visitor<'ast> for NodeCollector<'ast> {
-    /// Because we want to track parent items and so forth, enable
-    /// deep walking so that we walk nested items in the context of
-    /// their outer items.
-    fn visit_nested_item(&mut self, item: ItemId) {
-        debug!("visit_nested_item: {:?}", item);
-        self.visit_item(self.krate.item(item.id))
-    }
-
-    fn visit_item(&mut self, i: &'ast Item) {
-        debug!("visit_item: {:?}", i);
-
-        // Pick the def data. This need not be unique, but the more
-        // information we encapsulate into
-        let def_data = match i.node {
-            ItemDefaultImpl(..) | ItemImpl(..) => DefPathData::Impl(i.name),
-            ItemEnum(..) | ItemStruct(..) | ItemTrait(..) => DefPathData::Type(i.name),
-            ItemExternCrate(..) | ItemMod(..) => DefPathData::Mod(i.name),
-            ItemStatic(..) | ItemConst(..) | ItemFn(..) => DefPathData::Value(i.name),
-            _ => DefPathData::Misc,
-        };
-
-        self.insert_def(i.id, NodeItem(i), def_data);
-
-        let parent_node = self.parent_node;
-        self.parent_node = i.id;
-
-        match i.node {
-            ItemImpl(..) => {}
-            ItemEnum(ref enum_definition, _) => {
-                for v in &enum_definition.variants {
-                    let variant_def_index =
-                        self.insert_def(v.node.data.id(),
-                                        NodeVariant(v),
-                                        DefPathData::EnumVariant(v.node.name));
-
-                    for field in v.node.data.fields() {
-                        self.create_def_with_parent(
-                            Some(variant_def_index),
-                            field.node.id,
-                            DefPathData::Field(field.node.kind));
-                    }
-                }
-            }
-            ItemForeignMod(..) => {
-            }
-            ItemStruct(ref struct_def, _) => {
-                // If this is a tuple-like struct, register the constructor.
-                if !struct_def.is_struct() {
-                    self.insert_def(struct_def.id(),
-                                    NodeStructCtor(struct_def),
-                                    DefPathData::StructCtor);
-                }
-
-                for field in struct_def.fields() {
-                    self.create_def(field.node.id, DefPathData::Field(field.node.kind));
-                }
-            }
-            ItemTrait(_, _, ref bounds, _) => {
-                for b in bounds.iter() {
-                    if let TraitTyParamBound(ref t, TraitBoundModifier::None) = *b {
-                        self.insert(t.trait_ref.ref_id, NodeItem(i));
-                    }
-                }
-            }
-            ItemUse(ref view_path) => {
-                match view_path.node {
-                    ViewPathList(_, ref paths) => {
-                        for path in paths {
-                            self.insert(path.node.id(), NodeItem(i));
-                        }
-                    }
-                    _ => ()
-                }
-            }
-            _ => {}
-        }
-        intravisit::walk_item(self, i);
-        self.parent_node = parent_node;
-    }
-
-    fn visit_foreign_item(&mut self, foreign_item: &'ast ForeignItem) {
-        self.insert_def(foreign_item.id,
-                        NodeForeignItem(foreign_item),
-                        DefPathData::Value(foreign_item.name));
-
-        let parent_node = self.parent_node;
-        self.parent_node = foreign_item.id;
-        intravisit::walk_foreign_item(self, foreign_item);
-        self.parent_node = parent_node;
-    }
-
-    fn visit_generics(&mut self, generics: &'ast Generics) {
-        for ty_param in generics.ty_params.iter() {
-            self.insert_def(ty_param.id,
-                            NodeTyParam(ty_param),
-                            DefPathData::TypeParam(ty_param.name));
-        }
-
-        intravisit::walk_generics(self, generics);
-    }
-
-    fn visit_trait_item(&mut self, ti: &'ast TraitItem) {
-        let def_data = match ti.node {
-            MethodTraitItem(..) | ConstTraitItem(..) => DefPathData::Value(ti.name),
-            TypeTraitItem(..) => DefPathData::Type(ti.name),
-        };
-
-        self.insert(ti.id, NodeTraitItem(ti));
-        self.create_def(ti.id, def_data);
-
-        let parent_node = self.parent_node;
-        self.parent_node = ti.id;
-
-        match ti.node {
-            ConstTraitItem(_, Some(ref expr)) => {
-                self.create_def(expr.id, DefPathData::Initializer);
-            }
-            _ => { }
-        }
-
-        intravisit::walk_trait_item(self, ti);
-
-        self.parent_node = parent_node;
-    }
-
-    fn visit_impl_item(&mut self, ii: &'ast ImplItem) {
-        let def_data = match ii.node {
-            ImplItemKind::Method(..) | ImplItemKind::Const(..) => DefPathData::Value(ii.name),
-            ImplItemKind::Type(..) => DefPathData::Type(ii.name),
-        };
-
-        self.insert_def(ii.id, NodeImplItem(ii), def_data);
-
-        let parent_node = self.parent_node;
-        self.parent_node = ii.id;
-
-        match ii.node {
-            ImplItemKind::Const(_, ref expr) => {
-                self.create_def(expr.id, DefPathData::Initializer);
-            }
-            _ => { }
-        }
-
-        intravisit::walk_impl_item(self, ii);
-
-        self.parent_node = parent_node;
-    }
-
-    fn visit_pat(&mut self, pat: &'ast Pat) {
-        let maybe_binding = match pat.node {
-            PatKind::Ident(_, id, _) => Some(id.node),
-            _ => None
-        };
-
-        if let Some(id) = maybe_binding {
-            self.insert_def(pat.id, NodeLocal(pat), DefPathData::Binding(id.name));
-        } else {
-            self.insert(pat.id, NodePat(pat));
-        }
-
-        let parent_node = self.parent_node;
-        self.parent_node = pat.id;
-        intravisit::walk_pat(self, pat);
-        self.parent_node = parent_node;
-    }
-
-    fn visit_expr(&mut self, expr: &'ast Expr) {
-        self.insert(expr.id, NodeExpr(expr));
-
-        match expr.node {
-            ExprClosure(..) => { self.create_def(expr.id, DefPathData::ClosureExpr); }
-            _ => { }
-        }
-
-        let parent_node = self.parent_node;
-        self.parent_node = expr.id;
-        intravisit::walk_expr(self, expr);
-        self.parent_node = parent_node;
-    }
-
-    fn visit_stmt(&mut self, stmt: &'ast Stmt) {
-        let id = util::stmt_id(stmt);
-        self.insert(id, NodeStmt(stmt));
-        let parent_node = self.parent_node;
-        self.parent_node = id;
-        intravisit::walk_stmt(self, stmt);
-        self.parent_node = parent_node;
-    }
-
-    fn visit_fn(&mut self, fk: intravisit::FnKind<'ast>, fd: &'ast FnDecl,
-                b: &'ast Block, s: Span, id: NodeId) {
-        assert_eq!(self.parent_node, id);
-        intravisit::walk_fn(self, fk, fd, b, s);
-    }
-
-    fn visit_block(&mut self, block: &'ast Block) {
-        self.insert(block.id, NodeBlock(block));
-        let parent_node = self.parent_node;
-        self.parent_node = block.id;
-        intravisit::walk_block(self, block);
-        self.parent_node = parent_node;
-    }
-
-    fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) {
-        self.insert(lifetime.id, NodeLifetime(lifetime));
-    }
-
-    fn visit_lifetime_def(&mut self, def: &'ast LifetimeDef) {
-        self.create_def(def.lifetime.id, DefPathData::LifetimeDef(def.lifetime.name));
-        self.visit_lifetime(&def.lifetime);
-    }
-
-    fn visit_macro_def(&mut self, macro_def: &'ast MacroDef) {
-        self.create_def(macro_def.id, DefPathData::MacroDef(macro_def.name));
-    }
-}
diff --git a/src/librustc/front/map/definitions.rs b/src/librustc/front/map/definitions.rs
deleted file mode 100644 (file)
index 10c1372..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::cstore::LOCAL_CRATE;
-use middle::def_id::{DefId, DefIndex};
-use rustc_data_structures::fnv::FnvHashMap;
-use rustc_front::hir;
-use syntax::ast;
-use syntax::parse::token::InternedString;
-use util::nodemap::NodeMap;
-
-#[derive(Clone)]
-pub struct Definitions {
-    data: Vec<DefData>,
-    key_map: FnvHashMap<DefKey, DefIndex>,
-    node_map: NodeMap<DefIndex>,
-}
-
-/// A unique identifier that we can use to lookup a definition
-/// precisely. It combines the index of the definition's parent (if
-/// any) with a `DisambiguatedDefPathData`.
-#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
-pub struct DefKey {
-    /// Parent path.
-    pub parent: Option<DefIndex>,
-
-    /// Identifier of this node.
-    pub disambiguated_data: DisambiguatedDefPathData,
-}
-
-/// Pair of `DefPathData` and an integer disambiguator. The integer is
-/// normally 0, but in the event that there are multiple defs with the
-/// same `parent` and `data`, we use this field to disambiguate
-/// between them. This introduces some artificial ordering dependency
-/// but means that if you have (e.g.) two impls for the same type in
-/// the same module, they do get distinct def-ids.
-#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
-pub struct DisambiguatedDefPathData {
-    pub data: DefPathData,
-    pub disambiguator: u32
-}
-
-/// For each definition, we track the following data.  A definition
-/// here is defined somewhat circularly as "something with a def-id",
-/// but it generally corresponds to things like structs, enums, etc.
-/// There are also some rather random cases (like const initializer
-/// expressions) that are mostly just leftovers.
-#[derive(Clone, Debug)]
-pub struct DefData {
-    pub key: DefKey,
-
-    /// Local ID within the HIR.
-    pub node_id: ast::NodeId,
-}
-
-pub type DefPath = Vec<DisambiguatedDefPathData>;
-
-#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
-pub enum DefPathData {
-    // Root: these should only be used for the root nodes, because
-    // they are treated specially by the `def_path` function.
-    CrateRoot,
-    InlinedRoot(DefPath),
-
-    // Catch-all for random DefId things like DUMMY_NODE_ID
-    Misc,
-
-    // Different kinds of items and item-like things:
-    Impl(ast::Name),
-    Type(ast::Name),
-    Mod(ast::Name),
-    Value(ast::Name),
-    MacroDef(ast::Name),
-    ClosureExpr,
-
-    // Subportions of items
-    TypeParam(ast::Name),
-    LifetimeDef(ast::Name),
-    EnumVariant(ast::Name),
-    PositionalField,
-    Field(hir::StructFieldKind),
-    StructCtor, // implicit ctor for a tuple-like struct
-    Initializer, // initializer for a const
-    Binding(ast::Name), // pattern binding
-
-    // An external crate that does not have an `extern crate` in this
-    // crate.
-    DetachedCrate(ast::Name),
-}
-
-impl Definitions {
-    pub fn new() -> Definitions {
-        Definitions {
-            data: vec![],
-            key_map: FnvHashMap(),
-            node_map: NodeMap(),
-        }
-    }
-
-    pub fn len(&self) -> usize {
-        self.data.len()
-    }
-
-    pub fn def_key(&self, index: DefIndex) -> DefKey {
-        self.data[index.as_usize()].key.clone()
-    }
-
-    /// Returns the path from the crate root to `index`. The root
-    /// nodes are not included in the path (i.e., this will be an
-    /// empty vector for the crate root). For an inlined item, this
-    /// will be the path of the item in the external crate (but the
-    /// path will begin with the path to the external crate).
-    pub fn def_path(&self, index: DefIndex) -> DefPath {
-        make_def_path(index, |p| self.def_key(p))
-    }
-
-    pub fn opt_def_index(&self, node: ast::NodeId) -> Option<DefIndex> {
-        self.node_map.get(&node).cloned()
-    }
-
-    pub fn opt_local_def_id(&self, node: ast::NodeId) -> Option<DefId> {
-        self.opt_def_index(node).map(DefId::local)
-    }
-
-    pub fn as_local_node_id(&self, def_id: DefId) -> Option<ast::NodeId> {
-        if def_id.krate == LOCAL_CRATE {
-            assert!(def_id.index.as_usize() < self.data.len());
-            Some(self.data[def_id.index.as_usize()].node_id)
-        } else {
-            None
-        }
-    }
-
-    pub fn create_def_with_parent(&mut self,
-                                  parent: Option<DefIndex>,
-                                  node_id: ast::NodeId,
-                                  data: DefPathData)
-                                  -> DefIndex {
-        assert!(!self.node_map.contains_key(&node_id),
-                "adding a def'n for node-id {:?} and data {:?} but a previous def'n exists: {:?}",
-                node_id,
-                data,
-                self.data[self.node_map[&node_id].as_usize()]);
-
-        // Find a unique DefKey. This basically means incrementing the disambiguator
-        // until we get no match.
-        let mut key = DefKey {
-            parent: parent,
-            disambiguated_data: DisambiguatedDefPathData {
-                data: data,
-                disambiguator: 0
-            }
-        };
-
-        while self.key_map.contains_key(&key) {
-            key.disambiguated_data.disambiguator += 1;
-        }
-
-        // Create the definition.
-        let index = DefIndex::new(self.data.len());
-        self.data.push(DefData { key: key.clone(), node_id: node_id });
-        self.node_map.insert(node_id, index);
-        self.key_map.insert(key, index);
-
-        index
-    }
-}
-
-impl DefPathData {
-    pub fn as_interned_str(&self) -> InternedString {
-        use self::DefPathData::*;
-        match *self {
-            Impl(name) |
-            Type(name) |
-            Mod(name) |
-            Value(name) |
-            MacroDef(name) |
-            TypeParam(name) |
-            LifetimeDef(name) |
-            EnumVariant(name) |
-            DetachedCrate(name) |
-            Binding(name) => {
-                name.as_str()
-            }
-
-            Field(hir::StructFieldKind::NamedField(name, _)) => {
-                name.as_str()
-            }
-
-            PositionalField |
-            Field(hir::StructFieldKind::UnnamedField(_)) => {
-                InternedString::new("{{field}}")
-            }
-
-            // note that this does not show up in user printouts
-            CrateRoot => {
-                InternedString::new("{{root}}")
-            }
-
-            // note that this does not show up in user printouts
-            InlinedRoot(_) => {
-                InternedString::new("{{inlined-root}}")
-            }
-
-            Misc => {
-                InternedString::new("{{?}}")
-            }
-
-            ClosureExpr => {
-                InternedString::new("{{closure}}")
-            }
-
-            StructCtor => {
-                InternedString::new("{{constructor}}")
-            }
-
-            Initializer => {
-                InternedString::new("{{initializer}}")
-            }
-        }
-    }
-
-    pub fn to_string(&self) -> String {
-        self.as_interned_str().to_string()
-    }
-}
-
-pub fn make_def_path<FN>(start_index: DefIndex, mut get_key: FN) -> DefPath
-    where FN: FnMut(DefIndex) -> DefKey
-{
-    let mut result = vec![];
-    let mut index = Some(start_index);
-    while let Some(p) = index {
-        let key = get_key(p);
-        match key.disambiguated_data.data {
-            DefPathData::CrateRoot => {
-                assert!(key.parent.is_none());
-                break;
-            }
-            DefPathData::InlinedRoot(ref p) => {
-                assert!(key.parent.is_none());
-                result.extend(p.iter().cloned().rev());
-                break;
-            }
-            _ => {
-                result.push(key.disambiguated_data);
-                index = key.parent;
-            }
-        }
-    }
-    result.reverse();
-    result
-}
diff --git a/src/librustc/front/map/mod.rs b/src/librustc/front/map/mod.rs
deleted file mode 100644 (file)
index adf14d0..0000000
+++ /dev/null
@@ -1,1096 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub use self::Node::*;
-pub use self::PathElem::*;
-use self::MapEntry::*;
-use self::collector::NodeCollector;
-pub use self::definitions::{Definitions, DefKey, DefPath, DefPathData, DisambiguatedDefPathData};
-
-use dep_graph::{DepGraph, DepNode};
-
-use middle::cstore::InlinedItem;
-use middle::cstore::InlinedItem as II;
-use middle::def_id::DefId;
-
-use syntax::abi::Abi;
-use syntax::ast::{self, Name, NodeId, DUMMY_NODE_ID};
-use syntax::codemap::{Span, Spanned};
-use syntax::parse::token;
-
-use rustc_front::hir::*;
-use rustc_front::fold::Folder;
-use rustc_front::intravisit;
-use rustc_front::print::pprust;
-
-use arena::TypedArena;
-use std::cell::RefCell;
-use std::fmt;
-use std::io;
-use std::iter;
-use std::mem;
-use std::slice;
-
-pub mod blocks;
-mod collector;
-pub mod definitions;
-
-#[derive(Clone, Copy, PartialEq, Debug)]
-pub enum PathElem {
-    PathMod(Name),
-    PathName(Name)
-}
-
-impl PathElem {
-    pub fn name(&self) -> Name {
-        match *self {
-            PathMod(name) | PathName(name) => name
-        }
-    }
-}
-
-impl fmt::Display for PathElem {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "{}", self.name())
-    }
-}
-
-#[derive(Clone)]
-pub struct LinkedPathNode<'a> {
-    node: PathElem,
-    next: LinkedPath<'a>,
-}
-
-#[derive(Copy, Clone)]
-pub struct LinkedPath<'a>(Option<&'a LinkedPathNode<'a>>);
-
-impl<'a> LinkedPath<'a> {
-    pub fn empty() -> LinkedPath<'a> {
-        LinkedPath(None)
-    }
-
-    pub fn from(node: &'a LinkedPathNode) -> LinkedPath<'a> {
-        LinkedPath(Some(node))
-    }
-}
-
-impl<'a> Iterator for LinkedPath<'a> {
-    type Item = PathElem;
-
-    fn next(&mut self) -> Option<PathElem> {
-        match self.0 {
-            Some(node) => {
-                *self = node.next;
-                Some(node.node)
-            }
-            None => None
-        }
-    }
-}
-
-/// The type of the iterator used by with_path.
-pub type PathElems<'a, 'b> = iter::Chain<iter::Cloned<slice::Iter<'a, PathElem>>, LinkedPath<'b>>;
-
-pub fn path_to_string<PI: Iterator<Item=PathElem>>(path: PI) -> String {
-    let itr = token::get_ident_interner();
-
-    path.fold(String::new(), |mut s, e| {
-        let e = itr.get(e.name());
-        if !s.is_empty() {
-            s.push_str("::");
-        }
-        s.push_str(&e[..]);
-        s
-    })
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum Node<'ast> {
-    NodeItem(&'ast Item),
-    NodeForeignItem(&'ast ForeignItem),
-    NodeTraitItem(&'ast TraitItem),
-    NodeImplItem(&'ast ImplItem),
-    NodeVariant(&'ast Variant),
-    NodeExpr(&'ast Expr),
-    NodeStmt(&'ast Stmt),
-    NodeLocal(&'ast Pat),
-    NodePat(&'ast Pat),
-    NodeBlock(&'ast Block),
-
-    /// NodeStructCtor represents a tuple struct.
-    NodeStructCtor(&'ast VariantData),
-
-    NodeLifetime(&'ast Lifetime),
-    NodeTyParam(&'ast TyParam)
-}
-
-/// Represents an entry and its parent NodeID.
-/// The odd layout is to bring down the total size.
-#[derive(Copy, Debug)]
-pub enum MapEntry<'ast> {
-    /// Placeholder for holes in the map.
-    NotPresent,
-
-    /// All the node types, with a parent ID.
-    EntryItem(NodeId, &'ast Item),
-    EntryForeignItem(NodeId, &'ast ForeignItem),
-    EntryTraitItem(NodeId, &'ast TraitItem),
-    EntryImplItem(NodeId, &'ast ImplItem),
-    EntryVariant(NodeId, &'ast Variant),
-    EntryExpr(NodeId, &'ast Expr),
-    EntryStmt(NodeId, &'ast Stmt),
-    EntryLocal(NodeId, &'ast Pat),
-    EntryPat(NodeId, &'ast Pat),
-    EntryBlock(NodeId, &'ast Block),
-    EntryStructCtor(NodeId, &'ast VariantData),
-    EntryLifetime(NodeId, &'ast Lifetime),
-    EntryTyParam(NodeId, &'ast TyParam),
-
-    /// Roots for node trees.
-    RootCrate,
-    RootInlinedParent(&'ast InlinedParent)
-}
-
-impl<'ast> Clone for MapEntry<'ast> {
-    fn clone(&self) -> MapEntry<'ast> {
-        *self
-    }
-}
-
-#[derive(Debug)]
-pub struct InlinedParent {
-    path: Vec<PathElem>,
-    ii: InlinedItem
-}
-
-impl<'ast> MapEntry<'ast> {
-    fn from_node(p: NodeId, node: Node<'ast>) -> MapEntry<'ast> {
-        match node {
-            NodeItem(n) => EntryItem(p, n),
-            NodeForeignItem(n) => EntryForeignItem(p, n),
-            NodeTraitItem(n) => EntryTraitItem(p, n),
-            NodeImplItem(n) => EntryImplItem(p, n),
-            NodeVariant(n) => EntryVariant(p, n),
-            NodeExpr(n) => EntryExpr(p, n),
-            NodeStmt(n) => EntryStmt(p, n),
-            NodeLocal(n) => EntryLocal(p, n),
-            NodePat(n) => EntryPat(p, n),
-            NodeBlock(n) => EntryBlock(p, n),
-            NodeStructCtor(n) => EntryStructCtor(p, n),
-            NodeLifetime(n) => EntryLifetime(p, n),
-            NodeTyParam(n) => EntryTyParam(p, n),
-        }
-    }
-
-    fn parent_node(self) -> Option<NodeId> {
-        Some(match self {
-            EntryItem(id, _) => id,
-            EntryForeignItem(id, _) => id,
-            EntryTraitItem(id, _) => id,
-            EntryImplItem(id, _) => id,
-            EntryVariant(id, _) => id,
-            EntryExpr(id, _) => id,
-            EntryStmt(id, _) => id,
-            EntryLocal(id, _) => id,
-            EntryPat(id, _) => id,
-            EntryBlock(id, _) => id,
-            EntryStructCtor(id, _) => id,
-            EntryLifetime(id, _) => id,
-            EntryTyParam(id, _) => id,
-            _ => return None
-        })
-    }
-
-    fn to_node(self) -> Option<Node<'ast>> {
-        Some(match self {
-            EntryItem(_, n) => NodeItem(n),
-            EntryForeignItem(_, n) => NodeForeignItem(n),
-            EntryTraitItem(_, n) => NodeTraitItem(n),
-            EntryImplItem(_, n) => NodeImplItem(n),
-            EntryVariant(_, n) => NodeVariant(n),
-            EntryExpr(_, n) => NodeExpr(n),
-            EntryStmt(_, n) => NodeStmt(n),
-            EntryLocal(_, n) => NodeLocal(n),
-            EntryPat(_, n) => NodePat(n),
-            EntryBlock(_, n) => NodeBlock(n),
-            EntryStructCtor(_, n) => NodeStructCtor(n),
-            EntryLifetime(_, n) => NodeLifetime(n),
-            EntryTyParam(_, n) => NodeTyParam(n),
-            _ => return None
-        })
-    }
-}
-
-/// Stores a crate and any number of inlined items from other crates.
-pub struct Forest {
-    krate: Crate,
-    pub dep_graph: DepGraph,
-    inlined_items: TypedArena<InlinedParent>
-}
-
-impl Forest {
-    pub fn new(krate: Crate, dep_graph: DepGraph) -> Forest {
-        Forest {
-            krate: krate,
-            dep_graph: dep_graph,
-            inlined_items: TypedArena::new()
-        }
-    }
-
-    pub fn krate<'ast>(&'ast self) -> &'ast Crate {
-        self.dep_graph.read(DepNode::Krate);
-        &self.krate
-    }
-}
-
-/// Represents a mapping from Node IDs to AST elements and their parent
-/// Node IDs
-#[derive(Clone)]
-pub struct Map<'ast> {
-    /// The backing storage for all the AST nodes.
-    pub forest: &'ast Forest,
-
-    /// Same as the dep_graph in forest, just available with one fewer
-    /// deref. This is a gratuitious micro-optimization.
-    pub dep_graph: DepGraph,
-
-    /// NodeIds are sequential integers from 0, so we can be
-    /// super-compact by storing them in a vector. Not everything with
-    /// a NodeId is in the map, but empirically the occupancy is about
-    /// 75-80%, so there's not too much overhead (certainly less than
-    /// a hashmap, since they (at the time of writing) have a maximum
-    /// of 75% occupancy).
-    ///
-    /// Also, indexing is pretty quick when you've got a vector and
-    /// plain old integers.
-    map: RefCell<Vec<MapEntry<'ast>>>,
-
-    definitions: RefCell<Definitions>,
-}
-
-impl<'ast> Map<'ast> {
-    /// Registers a read in the dependency graph of the AST node with
-    /// the given `id`. This needs to be called each time a public
-    /// function returns the HIR for a node -- in other words, when it
-    /// "reveals" the content of a node to the caller (who might not
-    /// otherwise have had access to those contents, and hence needs a
-    /// read recorded). If the function just returns a DefId or
-    /// NodeId, no actual content was returned, so no read is needed.
-    fn read(&self, id: NodeId) {
-        self.dep_graph.read(self.dep_node(id));
-    }
-
-    fn dep_node(&self, id0: NodeId) -> DepNode {
-        let map = self.map.borrow();
-        let mut id = id0;
-        loop {
-            match map[id as usize] {
-                EntryItem(_, item) => {
-                    let def_id = self.local_def_id(item.id);
-                    // NB                          ^~~~~~~
-                    //
-                    // You would expect that `item.id == id`, but this
-                    // is not always the case. In particular, for a
-                    // ViewPath item like `use self::{mem, foo}`, we
-                    // map the ids for `mem` and `foo` to the
-                    // enclosing view path item. This seems mega super
-                    // ultra wrong, but then who am I to judge?
-                    // -nmatsakis
-                    return DepNode::Hir(def_id);
-                }
-
-                EntryForeignItem(p, _) |
-                EntryTraitItem(p, _) |
-                EntryImplItem(p, _) |
-                EntryVariant(p, _) |
-                EntryExpr(p, _) |
-                EntryStmt(p, _) |
-                EntryLocal(p, _) |
-                EntryPat(p, _) |
-                EntryBlock(p, _) |
-                EntryStructCtor(p, _) |
-                EntryLifetime(p, _) |
-                EntryTyParam(p, _) =>
-                    id = p,
-
-                RootCrate |
-                RootInlinedParent(_) => // FIXME(#2369) clarify story about cross-crate dep tracking
-                    return DepNode::Krate,
-
-                NotPresent =>
-                    // Some nodes, notably struct fields, are not
-                    // present in the map for whatever reason, but
-                    // they *do* have def-ids. So if we encounter an
-                    // empty hole, check for that case.
-                    return self.opt_local_def_id(id)
-                               .map(|def_id| DepNode::Hir(def_id))
-                               .unwrap_or_else(|| {
-                                   panic!("Walking parents from `{}` \
-                                           led to `NotPresent` at `{}`",
-                                          id0, id)
-                               }),
-            }
-        }
-    }
-
-    pub fn num_local_def_ids(&self) -> usize {
-        self.definitions.borrow().len()
-    }
-
-    pub fn def_key(&self, def_id: DefId) -> DefKey {
-        assert!(def_id.is_local());
-        self.definitions.borrow().def_key(def_id.index)
-    }
-
-    pub fn def_path_from_id(&self, id: NodeId) -> DefPath {
-        self.def_path(self.local_def_id(id))
-    }
-
-    pub fn def_path(&self, def_id: DefId) -> DefPath {
-        assert!(def_id.is_local());
-        self.definitions.borrow().def_path(def_id.index)
-    }
-
-    pub fn local_def_id(&self, node: NodeId) -> DefId {
-        self.opt_local_def_id(node).unwrap_or_else(|| {
-            panic!("local_def_id: no entry for `{}`, which has a map of `{:?}`",
-                   node, self.find_entry(node))
-        })
-    }
-
-    pub fn opt_local_def_id(&self, node: NodeId) -> Option<DefId> {
-        self.definitions.borrow().opt_local_def_id(node)
-    }
-
-    pub fn as_local_node_id(&self, def_id: DefId) -> Option<NodeId> {
-        self.definitions.borrow().as_local_node_id(def_id)
-    }
-
-    fn entry_count(&self) -> usize {
-        self.map.borrow().len()
-    }
-
-    fn find_entry(&self, id: NodeId) -> Option<MapEntry<'ast>> {
-        self.map.borrow().get(id as usize).cloned()
-    }
-
-    pub fn krate(&self) -> &'ast Crate {
-        self.forest.krate()
-    }
-
-    /// Retrieve the Node corresponding to `id`, panicking if it cannot
-    /// be found.
-    pub fn get(&self, id: NodeId) -> Node<'ast> {
-        match self.find(id) {
-            Some(node) => node, // read recorded by `find`
-            None => panic!("couldn't find node id {} in the AST map", id)
-        }
-    }
-
-    pub fn get_if_local(&self, id: DefId) -> Option<Node<'ast>> {
-        self.as_local_node_id(id).map(|id| self.get(id)) // read recorded by `get`
-    }
-
-    /// Retrieve the Node corresponding to `id`, returning None if
-    /// cannot be found.
-    pub fn find(&self, id: NodeId) -> Option<Node<'ast>> {
-        let result = self.find_entry(id).and_then(|x| x.to_node());
-        if result.is_some() {
-            self.read(id);
-        }
-        result
-    }
-
-    /// Similar to get_parent, returns the parent node id or id if there is no
-    /// parent.
-    /// This function returns the immediate parent in the AST, whereas get_parent
-    /// returns the enclosing item. Note that this might not be the actual parent
-    /// node in the AST - some kinds of nodes are not in the map and these will
-    /// never appear as the parent_node. So you can always walk the parent_nodes
-    /// from a node to the root of the ast (unless you get the same id back here
-    /// that can happen if the id is not in the map itself or is just weird).
-    pub fn get_parent_node(&self, id: NodeId) -> NodeId {
-        self.find_entry(id).and_then(|x| x.parent_node()).unwrap_or(id)
-    }
-
-    /// Check if the node is an argument. An argument is a local variable whose
-    /// immediate parent is an item or a closure.
-    pub fn is_argument(&self, id: NodeId) -> bool {
-        match self.find(id) {
-            Some(NodeLocal(_)) => (),
-            _ => return false,
-        }
-        match self.find(self.get_parent_node(id)) {
-            Some(NodeItem(_)) |
-            Some(NodeTraitItem(_)) |
-            Some(NodeImplItem(_)) => true,
-            Some(NodeExpr(e)) => {
-                match e.node {
-                    ExprClosure(..) => true,
-                    _ => false,
-                }
-            }
-            _ => false,
-        }
-    }
-
-    /// If there is some error when walking the parents (e.g., a node does not
-    /// have a parent in the map or a node can't be found), then we return the
-    /// last good node id we found. Note that reaching the crate root (id == 0),
-    /// is not an error, since items in the crate module have the crate root as
-    /// parent.
-    fn walk_parent_nodes<F>(&self, start_id: NodeId, found: F) -> Result<NodeId, NodeId>
-        where F: Fn(&Node<'ast>) -> bool
-    {
-        let mut id = start_id;
-        loop {
-            let parent_node = self.get_parent_node(id);
-            if parent_node == 0 {
-                return Ok(0);
-            }
-            if parent_node == id {
-                return Err(id);
-            }
-
-            let node = self.find_entry(parent_node);
-            if node.is_none() {
-                return Err(id);
-            }
-            let node = node.unwrap().to_node();
-            match node {
-                Some(ref node) => {
-                    if found(node) {
-                        return Ok(parent_node);
-                    }
-                }
-                None => {
-                    return Err(parent_node);
-                }
-            }
-            id = parent_node;
-        }
-    }
-
-    /// Retrieve the NodeId for `id`'s parent item, or `id` itself if no
-    /// parent item is in this map. The "parent item" is the closest parent node
-    /// in the AST which is recorded by the map and is an item, either an item
-    /// in a module, trait, or impl.
-    pub fn get_parent(&self, id: NodeId) -> NodeId {
-        match self.walk_parent_nodes(id, |node| match *node {
-            NodeItem(_) |
-            NodeForeignItem(_) |
-            NodeTraitItem(_) |
-            NodeImplItem(_) => true,
-            _ => false,
-        }) {
-            Ok(id) => id,
-            Err(id) => id,
-        }
-    }
-
-    /// Returns the nearest enclosing scope. A scope is an item or block.
-    /// FIXME it is not clear to me that all items qualify as scopes - statics
-    /// and associated types probably shouldn't, for example. Behaviour in this
-    /// regard should be expected to be highly unstable.
-    pub fn get_enclosing_scope(&self, id: NodeId) -> Option<NodeId> {
-        match self.walk_parent_nodes(id, |node| match *node {
-            NodeItem(_) |
-            NodeForeignItem(_) |
-            NodeTraitItem(_) |
-            NodeImplItem(_) |
-            NodeBlock(_) => true,
-            _ => false,
-        }) {
-            Ok(id) => Some(id),
-            Err(_) => None,
-        }
-    }
-
-    pub fn get_parent_did(&self, id: NodeId) -> DefId {
-        let parent = self.get_parent(id);
-        match self.find_entry(parent) {
-            Some(RootInlinedParent(&InlinedParent {ii: II::TraitItem(did, _), ..})) => did,
-            Some(RootInlinedParent(&InlinedParent {ii: II::ImplItem(did, _), ..})) => did,
-            _ => self.local_def_id(parent)
-        }
-    }
-
-    pub fn get_foreign_abi(&self, id: NodeId) -> Abi {
-        let parent = self.get_parent(id);
-        let abi = match self.find_entry(parent) {
-            Some(EntryItem(_, i)) => {
-                match i.node {
-                    ItemForeignMod(ref nm) => Some(nm.abi),
-                    _ => None
-                }
-            }
-            /// Wrong but OK, because the only inlined foreign items are intrinsics.
-            Some(RootInlinedParent(_)) => Some(Abi::RustIntrinsic),
-            _ => None
-        };
-        match abi {
-            Some(abi) => {
-                self.read(id); // reveals some of the content of a node
-                abi
-            }
-            None => panic!("expected foreign mod or inlined parent, found {}",
-                          self.node_to_string(parent))
-        }
-    }
-
-    pub fn get_foreign_vis(&self, id: NodeId) -> Visibility {
-        let vis = self.expect_foreign_item(id).vis; // read recorded by `expect_foreign_item`
-        match self.find(self.get_parent(id)) { // read recorded by `find`
-            Some(NodeItem(i)) => vis.inherit_from(i.vis),
-            _ => vis
-        }
-    }
-
-    pub fn expect_item(&self, id: NodeId) -> &'ast Item {
-        match self.find(id) { // read recorded by `find`
-            Some(NodeItem(item)) => item,
-            _ => panic!("expected item, found {}", self.node_to_string(id))
-        }
-    }
-
-    pub fn expect_trait_item(&self, id: NodeId) -> &'ast TraitItem {
-        match self.find(id) {
-            Some(NodeTraitItem(item)) => item,
-            _ => panic!("expected trait item, found {}", self.node_to_string(id))
-        }
-    }
-
-    pub fn expect_struct(&self, id: NodeId) -> &'ast VariantData {
-        match self.find(id) {
-            Some(NodeItem(i)) => {
-                match i.node {
-                    ItemStruct(ref struct_def, _) => struct_def,
-                    _ => panic!("struct ID bound to non-struct")
-                }
-            }
-            Some(NodeVariant(variant)) => {
-                if variant.node.data.is_struct() {
-                    &variant.node.data
-                } else {
-                    panic!("struct ID bound to enum variant that isn't struct-like")
-                }
-            }
-            _ => panic!(format!("expected struct, found {}", self.node_to_string(id))),
-        }
-    }
-
-    pub fn expect_variant(&self, id: NodeId) -> &'ast Variant {
-        match self.find(id) {
-            Some(NodeVariant(variant)) => variant,
-            _ => panic!(format!("expected variant, found {}", self.node_to_string(id))),
-        }
-    }
-
-    pub fn expect_foreign_item(&self, id: NodeId) -> &'ast ForeignItem {
-        match self.find(id) {
-            Some(NodeForeignItem(item)) => item,
-            _ => panic!("expected foreign item, found {}", self.node_to_string(id))
-        }
-    }
-
-    pub fn expect_expr(&self, id: NodeId) -> &'ast Expr {
-        match self.find(id) { // read recorded by find
-            Some(NodeExpr(expr)) => expr,
-            _ => panic!("expected expr, found {}", self.node_to_string(id))
-        }
-    }
-
-    /// returns the name associated with the given NodeId's AST
-    pub fn get_path_elem(&self, id: NodeId) -> PathElem {
-        let node = self.get(id);
-        match node {
-            NodeItem(item) => {
-                match item.node {
-                    ItemMod(_) | ItemForeignMod(_) => {
-                        PathMod(item.name)
-                    }
-                    _ => PathName(item.name)
-                }
-            }
-            NodeForeignItem(i) => PathName(i.name),
-            NodeImplItem(ii) => PathName(ii.name),
-            NodeTraitItem(ti) => PathName(ti.name),
-            NodeVariant(v) => PathName(v.node.name),
-            NodeLifetime(lt) => PathName(lt.name),
-            NodeTyParam(tp) => PathName(tp.name),
-            NodeLocal(&Pat { node: PatKind::Ident(_,l,_), .. }) => {
-                PathName(l.node.name)
-            },
-            _ => panic!("no path elem for {:?}", node)
-        }
-    }
-
-    pub fn with_path<T, F>(&self, id: NodeId, f: F) -> T where
-        F: FnOnce(PathElems) -> T,
-    {
-        self.with_path_next(id, LinkedPath::empty(), f)
-    }
-
-    pub fn path_to_string(&self, id: NodeId) -> String {
-        self.with_path(id, |path| path_to_string(path))
-    }
-
-    fn path_to_str_with_name(&self, id: NodeId, name: Name) -> String {
-        self.with_path(id, |path| {
-            path_to_string(path.chain(Some(PathName(name))))
-        })
-    }
-
-    fn with_path_next<T, F>(&self, id: NodeId, next: LinkedPath, f: F) -> T where
-        F: FnOnce(PathElems) -> T,
-    {
-        // This function reveals the name of the item and hence is a
-        // kind of read. This is inefficient, since it walks ancestors
-        // and we are walking them anyhow, but whatever.
-        self.read(id);
-
-        let parent = self.get_parent(id);
-        let parent = match self.find_entry(id) {
-            Some(EntryForeignItem(..)) => {
-                // Anonymous extern items go in the parent scope.
-                self.get_parent(parent)
-            }
-            // But tuple struct ctors don't have names, so use the path of its
-            // parent, the struct item. Similarly with closure expressions.
-            Some(EntryStructCtor(..)) | Some(EntryExpr(..)) => {
-                return self.with_path_next(parent, next, f);
-            }
-            _ => parent
-        };
-        if parent == id {
-            match self.find_entry(id) {
-                Some(RootInlinedParent(data)) => {
-                    f(data.path.iter().cloned().chain(next))
-                }
-                _ => f([].iter().cloned().chain(next))
-            }
-        } else {
-            self.with_path_next(parent, LinkedPath::from(&LinkedPathNode {
-                node: self.get_path_elem(id),
-                next: next
-            }), f)
-        }
-    }
-
-    /// Given a node ID, get a list of attributes associated with the AST
-    /// corresponding to the Node ID
-    pub fn attrs(&self, id: NodeId) -> &'ast [ast::Attribute] {
-        self.read(id); // reveals attributes on the node
-        let attrs = match self.find(id) {
-            Some(NodeItem(i)) => Some(&i.attrs[..]),
-            Some(NodeForeignItem(fi)) => Some(&fi.attrs[..]),
-            Some(NodeTraitItem(ref ti)) => Some(&ti.attrs[..]),
-            Some(NodeImplItem(ref ii)) => Some(&ii.attrs[..]),
-            Some(NodeVariant(ref v)) => Some(&v.node.attrs[..]),
-            // unit/tuple structs take the attributes straight from
-            // the struct definition.
-            Some(NodeStructCtor(_)) => {
-                return self.attrs(self.get_parent(id));
-            }
-            _ => None
-        };
-        attrs.unwrap_or(&[])
-    }
-
-    /// Returns an iterator that yields the node id's with paths that
-    /// match `parts`.  (Requires `parts` is non-empty.)
-    ///
-    /// For example, if given `parts` equal to `["bar", "quux"]`, then
-    /// the iterator will produce node id's for items with paths
-    /// such as `foo::bar::quux`, `bar::quux`, `other::bar::quux`, and
-    /// any other such items it can find in the map.
-    pub fn nodes_matching_suffix<'a>(&'a self, parts: &'a [String])
-                                 -> NodesMatchingSuffix<'a, 'ast> {
-        NodesMatchingSuffix {
-            map: self,
-            item_name: parts.last().unwrap(),
-            in_which: &parts[..parts.len() - 1],
-            idx: 0,
-        }
-    }
-
-    pub fn opt_span(&self, id: NodeId) -> Option<Span> {
-        let sp = match self.find(id) {
-            Some(NodeItem(item)) => item.span,
-            Some(NodeForeignItem(foreign_item)) => foreign_item.span,
-            Some(NodeTraitItem(trait_method)) => trait_method.span,
-            Some(NodeImplItem(ref impl_item)) => impl_item.span,
-            Some(NodeVariant(variant)) => variant.span,
-            Some(NodeExpr(expr)) => expr.span,
-            Some(NodeStmt(stmt)) => stmt.span,
-            Some(NodeLocal(pat)) => pat.span,
-            Some(NodePat(pat)) => pat.span,
-            Some(NodeBlock(block)) => block.span,
-            Some(NodeStructCtor(_)) => self.expect_item(self.get_parent(id)).span,
-            Some(NodeTyParam(ty_param)) => ty_param.span,
-            _ => return None,
-        };
-        Some(sp)
-    }
-
-    pub fn span(&self, id: NodeId) -> Span {
-        self.read(id); // reveals span from node
-        self.opt_span(id)
-            .unwrap_or_else(|| panic!("AstMap.span: could not find span for id {:?}", id))
-    }
-
-    pub fn span_if_local(&self, id: DefId) -> Option<Span> {
-        self.as_local_node_id(id).map(|id| self.span(id))
-    }
-
-    pub fn def_id_span(&self, def_id: DefId, fallback: Span) -> Span {
-        if let Some(node_id) = self.as_local_node_id(def_id) {
-            self.opt_span(node_id).unwrap_or(fallback)
-        } else {
-            fallback
-        }
-    }
-
-    pub fn node_to_string(&self, id: NodeId) -> String {
-        node_id_to_string(self, id, true)
-    }
-
-    pub fn node_to_user_string(&self, id: NodeId) -> String {
-        node_id_to_string(self, id, false)
-    }
-}
-
-pub struct NodesMatchingSuffix<'a, 'ast:'a> {
-    map: &'a Map<'ast>,
-    item_name: &'a String,
-    in_which: &'a [String],
-    idx: NodeId,
-}
-
-impl<'a, 'ast> NodesMatchingSuffix<'a, 'ast> {
-    /// Returns true only if some suffix of the module path for parent
-    /// matches `self.in_which`.
-    ///
-    /// In other words: let `[x_0,x_1,...,x_k]` be `self.in_which`;
-    /// returns true if parent's path ends with the suffix
-    /// `x_0::x_1::...::x_k`.
-    fn suffix_matches(&self, parent: NodeId) -> bool {
-        let mut cursor = parent;
-        for part in self.in_which.iter().rev() {
-            let (mod_id, mod_name) = match find_first_mod_parent(self.map, cursor) {
-                None => return false,
-                Some((node_id, name)) => (node_id, name),
-            };
-            if &part[..] != mod_name.as_str() {
-                return false;
-            }
-            cursor = self.map.get_parent(mod_id);
-        }
-        return true;
-
-        // Finds the first mod in parent chain for `id`, along with
-        // that mod's name.
-        //
-        // If `id` itself is a mod named `m` with parent `p`, then
-        // returns `Some(id, m, p)`.  If `id` has no mod in its parent
-        // chain, then returns `None`.
-        fn find_first_mod_parent<'a>(map: &'a Map, mut id: NodeId) -> Option<(NodeId, Name)> {
-            loop {
-                match map.find(id) {
-                    None => return None,
-                    Some(NodeItem(item)) if item_is_mod(&item) =>
-                        return Some((id, item.name)),
-                    _ => {}
-                }
-                let parent = map.get_parent(id);
-                if parent == id { return None }
-                id = parent;
-            }
-
-            fn item_is_mod(item: &Item) -> bool {
-                match item.node {
-                    ItemMod(_) => true,
-                    _ => false,
-                }
-            }
-        }
-    }
-
-    // We are looking at some node `n` with a given name and parent
-    // id; do their names match what I am seeking?
-    fn matches_names(&self, parent_of_n: NodeId, name: Name) -> bool {
-        name.as_str() == &self.item_name[..] &&
-            self.suffix_matches(parent_of_n)
-    }
-}
-
-impl<'a, 'ast> Iterator for NodesMatchingSuffix<'a, 'ast> {
-    type Item = NodeId;
-
-    fn next(&mut self) -> Option<NodeId> {
-        loop {
-            let idx = self.idx;
-            if idx as usize >= self.map.entry_count() {
-                return None;
-            }
-            self.idx += 1;
-            let name = match self.map.find_entry(idx) {
-                Some(EntryItem(_, n))       => n.name(),
-                Some(EntryForeignItem(_, n))=> n.name(),
-                Some(EntryTraitItem(_, n))  => n.name(),
-                Some(EntryImplItem(_, n))   => n.name(),
-                Some(EntryVariant(_, n))    => n.name(),
-                _ => continue,
-            };
-            if self.matches_names(self.map.get_parent(idx), name) {
-                return Some(idx)
-            }
-        }
-    }
-}
-
-trait Named {
-    fn name(&self) -> Name;
-}
-
-impl<T:Named> Named for Spanned<T> { fn name(&self) -> Name { self.node.name() } }
-
-impl Named for Item { fn name(&self) -> Name { self.name } }
-impl Named for ForeignItem { fn name(&self) -> Name { self.name } }
-impl Named for Variant_ { fn name(&self) -> Name { self.name } }
-impl Named for TraitItem { fn name(&self) -> Name { self.name } }
-impl Named for ImplItem { fn name(&self) -> Name { self.name } }
-
-pub trait FoldOps {
-    fn new_id(&self, id: NodeId) -> NodeId {
-        id
-    }
-    fn new_def_id(&self, def_id: DefId) -> DefId {
-        def_id
-    }
-    fn new_span(&self, span: Span) -> Span {
-        span
-    }
-}
-
-/// A Folder that updates IDs and Span's according to fold_ops.
-struct IdAndSpanUpdater<F> {
-    fold_ops: F
-}
-
-impl<F: FoldOps> Folder for IdAndSpanUpdater<F> {
-    fn new_id(&mut self, id: NodeId) -> NodeId {
-        self.fold_ops.new_id(id)
-    }
-
-    fn new_span(&mut self, span: Span) -> Span {
-        self.fold_ops.new_span(span)
-    }
-}
-
-pub fn map_crate<'ast>(forest: &'ast mut Forest) -> Map<'ast> {
-    let (map, definitions) = {
-        let mut collector = NodeCollector::root(&forest.krate);
-        intravisit::walk_crate(&mut collector, &forest.krate);
-        (collector.map, collector.definitions)
-    };
-
-    if log_enabled!(::log::DEBUG) {
-        // This only makes sense for ordered stores; note the
-        // enumerate to count the number of entries.
-        let (entries_less_1, _) = map.iter().filter(|&x| {
-            match *x {
-                NotPresent => false,
-                _ => true
-            }
-        }).enumerate().last().expect("AST map was empty after folding?");
-
-        let entries = entries_less_1 + 1;
-        let vector_length = map.len();
-        debug!("The AST map has {} entries with a maximum of {}: occupancy {:.1}%",
-              entries, vector_length, (entries as f64 / vector_length as f64) * 100.);
-    }
-
-    Map {
-        forest: forest,
-        dep_graph: forest.dep_graph.clone(),
-        map: RefCell::new(map),
-        definitions: RefCell::new(definitions),
-    }
-}
-
-/// Used for items loaded from external crate that are being inlined into this
-/// crate.
-pub fn map_decoded_item<'ast, F: FoldOps>(map: &Map<'ast>,
-                                          parent_path: Vec<PathElem>,
-                                          parent_def_path: DefPath,
-                                          ii: InlinedItem,
-                                          fold_ops: F)
-                                          -> &'ast InlinedItem {
-    let mut fld = IdAndSpanUpdater { fold_ops: fold_ops };
-    let ii = match ii {
-        II::Item(i) => II::Item(i.map(|i| fld.fold_item(i))),
-        II::TraitItem(d, ti) => {
-            II::TraitItem(fld.fold_ops.new_def_id(d),
-                          ti.map(|ti| fld.fold_trait_item(ti)))
-        }
-        II::ImplItem(d, ii) => {
-            II::ImplItem(fld.fold_ops.new_def_id(d),
-                         ii.map(|ii| fld.fold_impl_item(ii)))
-        }
-        II::Foreign(i) => II::Foreign(i.map(|i| fld.fold_foreign_item(i)))
-    };
-
-    let ii_parent = map.forest.inlined_items.alloc(InlinedParent {
-        path: parent_path,
-        ii: ii
-    });
-
-    let ii_parent_id = fld.new_id(DUMMY_NODE_ID);
-    let mut collector =
-        NodeCollector::extend(
-            map.krate(),
-            ii_parent,
-            ii_parent_id,
-            parent_def_path,
-            mem::replace(&mut *map.map.borrow_mut(), vec![]),
-            mem::replace(&mut *map.definitions.borrow_mut(), Definitions::new()));
-    ii_parent.ii.visit(&mut collector);
-
-    *map.map.borrow_mut() = collector.map;
-    *map.definitions.borrow_mut() = collector.definitions;
-
-    &ii_parent.ii
-}
-
-pub trait NodePrinter {
-    fn print_node(&mut self, node: &Node) -> io::Result<()>;
-}
-
-impl<'a> NodePrinter for pprust::State<'a> {
-    fn print_node(&mut self, node: &Node) -> io::Result<()> {
-        match *node {
-            NodeItem(a)        => self.print_item(&a),
-            NodeForeignItem(a) => self.print_foreign_item(&a),
-            NodeTraitItem(a)   => self.print_trait_item(a),
-            NodeImplItem(a)    => self.print_impl_item(a),
-            NodeVariant(a)     => self.print_variant(&a),
-            NodeExpr(a)        => self.print_expr(&a),
-            NodeStmt(a)        => self.print_stmt(&a),
-            NodePat(a)         => self.print_pat(&a),
-            NodeBlock(a)       => self.print_block(&a),
-            NodeLifetime(a)    => self.print_lifetime(&a),
-            NodeTyParam(_)     => panic!("cannot print TyParam"),
-            // these cases do not carry enough information in the
-            // ast_map to reconstruct their full structure for pretty
-            // printing.
-            NodeLocal(_)       => panic!("cannot print isolated Local"),
-            NodeStructCtor(_)  => panic!("cannot print isolated StructCtor"),
-        }
-    }
-}
-
-fn node_id_to_string(map: &Map, id: NodeId, include_id: bool) -> String {
-    let id_str = format!(" (id={})", id);
-    let id_str = if include_id { &id_str[..] } else { "" };
-
-    match map.find(id) {
-        Some(NodeItem(item)) => {
-            let path_str = map.path_to_str_with_name(id, item.name);
-            let item_str = match item.node {
-                ItemExternCrate(..) => "extern crate",
-                ItemUse(..) => "use",
-                ItemStatic(..) => "static",
-                ItemConst(..) => "const",
-                ItemFn(..) => "fn",
-                ItemMod(..) => "mod",
-                ItemForeignMod(..) => "foreign mod",
-                ItemTy(..) => "ty",
-                ItemEnum(..) => "enum",
-                ItemStruct(..) => "struct",
-                ItemTrait(..) => "trait",
-                ItemImpl(..) => "impl",
-                ItemDefaultImpl(..) => "default impl",
-            };
-            format!("{} {}{}", item_str, path_str, id_str)
-        }
-        Some(NodeForeignItem(item)) => {
-            let path_str = map.path_to_str_with_name(id, item.name);
-            format!("foreign item {}{}", path_str, id_str)
-        }
-        Some(NodeImplItem(ii)) => {
-            match ii.node {
-                ImplItemKind::Const(..) => {
-                    format!("assoc const {} in {}{}",
-                            ii.name,
-                            map.path_to_string(id),
-                            id_str)
-                }
-                ImplItemKind::Method(..) => {
-                    format!("method {} in {}{}",
-                            ii.name,
-                            map.path_to_string(id), id_str)
-                }
-                ImplItemKind::Type(_) => {
-                    format!("assoc type {} in {}{}",
-                            ii.name,
-                            map.path_to_string(id),
-                            id_str)
-                }
-            }
-        }
-        Some(NodeTraitItem(ti)) => {
-            let kind = match ti.node {
-                ConstTraitItem(..) => "assoc constant",
-                MethodTraitItem(..) => "trait method",
-                TypeTraitItem(..) => "assoc type",
-            };
-
-            format!("{} {} in {}{}",
-                    kind,
-                    ti.name,
-                    map.path_to_string(id),
-                    id_str)
-        }
-        Some(NodeVariant(ref variant)) => {
-            format!("variant {} in {}{}",
-                    variant.node.name,
-                    map.path_to_string(id), id_str)
-        }
-        Some(NodeExpr(ref expr)) => {
-            format!("expr {}{}", pprust::expr_to_string(&expr), id_str)
-        }
-        Some(NodeStmt(ref stmt)) => {
-            format!("stmt {}{}", pprust::stmt_to_string(&stmt), id_str)
-        }
-        Some(NodeLocal(ref pat)) => {
-            format!("local {}{}", pprust::pat_to_string(&pat), id_str)
-        }
-        Some(NodePat(ref pat)) => {
-            format!("pat {}{}", pprust::pat_to_string(&pat), id_str)
-        }
-        Some(NodeBlock(ref block)) => {
-            format!("block {}{}", pprust::block_to_string(&block), id_str)
-        }
-        Some(NodeStructCtor(_)) => {
-            format!("struct_ctor {}{}", map.path_to_string(id), id_str)
-        }
-        Some(NodeLifetime(ref l)) => {
-            format!("lifetime {}{}",
-                    pprust::lifetime_to_string(&l), id_str)
-        }
-        Some(NodeTyParam(ref ty_param)) => {
-            format!("typaram {:?}{}", ty_param, id_str)
-        }
-        None => {
-            format!("unknown node{}", id_str)
-        }
-    }
-}
diff --git a/src/librustc/hir/check_attr.rs b/src/librustc/hir/check_attr.rs
new file mode 100644 (file)
index 0000000..085acc1
--- /dev/null
@@ -0,0 +1,110 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use session::Session;
+
+use syntax::ast;
+use syntax::attr::AttrMetaMethods;
+use syntax::visit;
+use syntax::visit::Visitor;
+
+#[derive(Copy, Clone, PartialEq)]
+enum Target {
+    Fn,
+    Struct,
+    Enum,
+    Other,
+}
+
+impl Target {
+    fn from_item(item: &ast::Item) -> Target {
+        match item.node {
+            ast::ItemKind::Fn(..) => Target::Fn,
+            ast::ItemKind::Struct(..) => Target::Struct,
+            ast::ItemKind::Enum(..) => Target::Enum,
+            _ => Target::Other,
+        }
+    }
+}
+
+struct CheckAttrVisitor<'a> {
+    sess: &'a Session,
+}
+
+impl<'a> CheckAttrVisitor<'a> {
+    fn check_inline(&self, attr: &ast::Attribute, target: Target) {
+        if target != Target::Fn {
+            span_err!(self.sess, attr.span, E0518, "attribute should be applied to function");
+        }
+    }
+
+    fn check_repr(&self, attr: &ast::Attribute, target: Target) {
+        let words = match attr.meta_item_list() {
+            Some(words) => words,
+            None => {
+                return;
+            }
+        };
+        for word in words {
+            let word: &str = &word.name();
+            let message = match word {
+                "C" => {
+                    if target != Target::Struct && target != Target::Enum {
+                            "attribute should be applied to struct or enum"
+                    } else {
+                        continue
+                    }
+                }
+                "packed" |
+                "simd" => {
+                    if target != Target::Struct {
+                        "attribute should be applied to struct"
+                    } else {
+                        continue
+                    }
+                }
+                "i8" | "u8" | "i16" | "u16" |
+                "i32" | "u32" | "i64" | "u64" |
+                "isize" | "usize" => {
+                    if target != Target::Enum {
+                            "attribute should be applied to enum"
+                    } else {
+                        continue
+                    }
+                }
+                _ => continue,
+            };
+            span_err!(self.sess, attr.span, E0517, "{}", message);
+        }
+    }
+
+    fn check_attribute(&self, attr: &ast::Attribute, target: Target) {
+        let name: &str = &attr.name();
+        match name {
+            "inline" => self.check_inline(attr, target),
+            "repr" => self.check_repr(attr, target),
+            _ => (),
+        }
+    }
+}
+
+impl<'a, 'v> Visitor<'v> for CheckAttrVisitor<'a> {
+    fn visit_item(&mut self, item: &ast::Item) {
+        let target = Target::from_item(item);
+        for attr in &item.attrs {
+            self.check_attribute(attr, target);
+        }
+        visit::walk_item(self, item);
+    }
+}
+
+pub fn check_crate(sess: &Session, krate: &ast::Crate) {
+    visit::walk_crate(&mut CheckAttrVisitor { sess: sess }, krate);
+}
diff --git a/src/librustc/hir/def.rs b/src/librustc/hir/def.rs
new file mode 100644 (file)
index 0000000..c270a44
--- /dev/null
@@ -0,0 +1,176 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir::def_id::DefId;
+use ty::subst::ParamSpace;
+use util::nodemap::NodeMap;
+use syntax::ast;
+use hir;
+
+#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum Def {
+    Fn(DefId),
+    SelfTy(Option<DefId>,                    // trait id
+              Option<(ast::NodeId, ast::NodeId)>),   // (impl id, self type id)
+    Mod(DefId),
+    ForeignMod(DefId),
+    Static(DefId, bool /* is_mutbl */),
+    Const(DefId),
+    AssociatedConst(DefId),
+    Local(DefId, // def id of variable
+             ast::NodeId), // node id of variable
+    Variant(DefId /* enum */, DefId /* variant */),
+    Enum(DefId),
+    TyAlias(DefId),
+    AssociatedTy(DefId /* trait */, DefId),
+    Trait(DefId),
+    PrimTy(hir::PrimTy),
+    TyParam(ParamSpace, u32, DefId, ast::Name),
+    Upvar(DefId,        // def id of closed over local
+             ast::NodeId,  // node id of closed over local
+             usize,        // index in the freevars list of the closure
+             ast::NodeId), // expr node that creates the closure
+
+    // If Def::Struct lives in type namespace it denotes a struct item and its DefId refers
+    // to NodeId of the struct itself.
+    // If Def::Struct lives in value namespace (e.g. tuple struct, unit struct expressions)
+    // it denotes a constructor and its DefId refers to NodeId of the struct's constructor.
+    Struct(DefId),
+    Label(ast::NodeId),
+    Method(DefId),
+    Err,
+}
+
+/// The result of resolving a path.
+/// Before type checking completes, `depth` represents the number of
+/// trailing segments which are yet unresolved. Afterwards, if there
+/// were no errors, all paths should be fully resolved, with `depth`
+/// set to `0` and `base_def` representing the final resolution.
+///
+///     module::Type::AssocX::AssocY::MethodOrAssocType
+///     ^~~~~~~~~~~~  ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+///     base_def      depth = 3
+///
+///     <T as Trait>::AssocX::AssocY::MethodOrAssocType
+///           ^~~~~~~~~~~~~~  ^~~~~~~~~~~~~~~~~~~~~~~~~
+///           base_def        depth = 2
+#[derive(Copy, Clone, Debug)]
+pub struct PathResolution {
+    pub base_def: Def,
+    pub depth: usize
+}
+
+impl PathResolution {
+    /// Get the definition, if fully resolved, otherwise panic.
+    pub fn full_def(&self) -> Def {
+        if self.depth != 0 {
+            bug!("path not fully resolved: {:?}", self);
+        }
+        self.base_def
+    }
+
+    /// Get the DefId, if fully resolved, otherwise panic.
+    pub fn def_id(&self) -> DefId {
+        self.full_def().def_id()
+    }
+
+    pub fn new(base_def: Def,
+               depth: usize)
+               -> PathResolution {
+        PathResolution {
+            base_def: base_def,
+            depth: depth,
+        }
+    }
+}
+
+// Definition mapping
+pub type DefMap = NodeMap<PathResolution>;
+// This is the replacement export map. It maps a module to all of the exports
+// within.
+pub type ExportMap = NodeMap<Vec<Export>>;
+
+#[derive(Copy, Clone)]
+pub struct Export {
+    pub name: ast::Name,    // The name of the target.
+    pub def_id: DefId, // The definition of the target.
+}
+
+impl Def {
+    pub fn var_id(&self) -> ast::NodeId {
+        match *self {
+            Def::Local(_, id) |
+            Def::Upvar(_, id, _, _) => {
+                id
+            }
+
+            Def::Fn(..) | Def::Mod(..) | Def::ForeignMod(..) | Def::Static(..) |
+            Def::Variant(..) | Def::Enum(..) | Def::TyAlias(..) | Def::AssociatedTy(..) |
+            Def::TyParam(..) | Def::Struct(..) | Def::Trait(..) |
+            Def::Method(..) | Def::Const(..) | Def::AssociatedConst(..) |
+            Def::PrimTy(..) | Def::Label(..) | Def::SelfTy(..) | Def::Err => {
+                bug!("attempted .var_id() on invalid {:?}", self)
+            }
+        }
+    }
+
+    pub fn def_id(&self) -> DefId {
+        match *self {
+            Def::Fn(id) | Def::Mod(id) | Def::ForeignMod(id) | Def::Static(id, _) |
+            Def::Variant(_, id) | Def::Enum(id) | Def::TyAlias(id) | Def::AssociatedTy(_, id) |
+            Def::TyParam(_, _, id, _) | Def::Struct(id) | Def::Trait(id) |
+            Def::Method(id) | Def::Const(id) | Def::AssociatedConst(id) |
+            Def::Local(id, _) | Def::Upvar(id, _, _, _) => {
+                id
+            }
+
+            Def::Label(..)  |
+            Def::PrimTy(..) |
+            Def::SelfTy(..) |
+            Def::Err => {
+                bug!("attempted .def_id() on invalid def: {:?}", self)
+            }
+        }
+    }
+
+    pub fn variant_def_ids(&self) -> Option<(DefId, DefId)> {
+        match *self {
+            Def::Variant(enum_id, var_id) => {
+                Some((enum_id, var_id))
+            }
+            _ => None
+        }
+    }
+
+    pub fn kind_name(&self) -> &'static str {
+        match *self {
+            Def::Fn(..) => "function",
+            Def::Mod(..) => "module",
+            Def::ForeignMod(..) => "foreign module",
+            Def::Static(..) => "static",
+            Def::Variant(..) => "variant",
+            Def::Enum(..) => "enum",
+            Def::TyAlias(..) => "type",
+            Def::AssociatedTy(..) => "associated type",
+            Def::Struct(..) => "struct",
+            Def::Trait(..) => "trait",
+            Def::Method(..) => "method",
+            Def::Const(..) => "const",
+            Def::AssociatedConst(..) => "associated const",
+            Def::TyParam(..) => "type parameter",
+            Def::PrimTy(..) => "builtin type",
+            Def::Local(..) => "local variable",
+            Def::Upvar(..) => "closure capture",
+            Def::Label(..) => "label",
+            Def::SelfTy(..) => "self type",
+            Def::Err => "unresolved item",
+        }
+    }
+}
diff --git a/src/librustc/hir/def_id.rs b/src/librustc/hir/def_id.rs
new file mode 100644 (file)
index 0000000..a3b83ec
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::cstore::LOCAL_CRATE;
+use ty;
+use syntax::ast::CrateNum;
+use std::fmt;
+use std::u32;
+
+/// A DefIndex is an index into the hir-map for a crate, identifying a
+/// particular definition. It should really be considered an interned
+/// shorthand for a particular DefPath.
+#[derive(Clone, Debug, Eq, Ord, PartialOrd, PartialEq, RustcEncodable,
+           RustcDecodable, Hash, Copy)]
+pub struct DefIndex(u32);
+
+impl DefIndex {
+    pub fn new(x: usize) -> DefIndex {
+        assert!(x < (u32::MAX as usize));
+        DefIndex(x as u32)
+    }
+
+    pub fn from_u32(x: u32) -> DefIndex {
+        DefIndex(x)
+    }
+
+    pub fn as_usize(&self) -> usize {
+        self.0 as usize
+    }
+
+    pub fn as_u32(&self) -> u32 {
+        self.0
+    }
+}
+
+/// The crate root is always assigned index 0 by the AST Map code,
+/// thanks to `NodeCollector::new`.
+pub const CRATE_DEF_INDEX: DefIndex = DefIndex(0);
+
+/// A DefId identifies a particular *definition*, by combining a crate
+/// index and a def index.
+#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, RustcEncodable,
+           RustcDecodable, Hash, Copy)]
+pub struct DefId {
+    pub krate: CrateNum,
+    pub index: DefIndex,
+}
+
+impl fmt::Debug for DefId {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "DefId {{ krate: {:?}, node: {:?}",
+               self.krate, self.index)?;
+
+        // Unfortunately, there seems to be no way to attempt to print
+        // a path for a def-id, so I'll just make a best effort for now
+        // and otherwise fallback to just printing the crate/node pair
+        if self.is_local() { // (1)
+            // (1) side-step fact that not all external things have paths at
+            // the moment, such as type parameters
+            ty::tls::with_opt(|opt_tcx| {
+                if let Some(tcx) = opt_tcx {
+                    write!(f, " => {}", tcx.item_path_str(*self))?;
+                }
+                Ok(())
+            })?;
+        }
+
+        write!(f, " }}")
+    }
+}
+
+
+impl DefId {
+    pub fn local(index: DefIndex) -> DefId {
+        DefId { krate: LOCAL_CRATE, index: index }
+    }
+
+    pub fn is_local(&self) -> bool {
+        self.krate == LOCAL_CRATE
+    }
+}
diff --git a/src/librustc/hir/fold.rs b/src/librustc/hir/fold.rs
new file mode 100644 (file)
index 0000000..a6ff716
--- /dev/null
@@ -0,0 +1,1143 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A Folder represents an HIR->HIR fold; it accepts a HIR piece,
+//! and returns a piece of the same type.
+
+use hir::*;
+use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, Attribute, Attribute_, MetaItem};
+use syntax::ast::MetaItemKind;
+use syntax::attr::ThinAttributesExt;
+use hir;
+use syntax::codemap::{respan, Span, Spanned};
+use syntax::ptr::P;
+use syntax::parse::token;
+use syntax::util::move_map::MoveMap;
+
+pub trait Folder : Sized {
+    // Any additions to this trait should happen in form
+    // of a call to a public `noop_*` function that only calls
+    // out to the folder again, not other `noop_*` functions.
+    //
+    // This is a necessary API workaround to the problem of not
+    // being able to call out to the super default method
+    // in an overridden default method.
+
+    fn fold_crate(&mut self, c: Crate) -> Crate {
+        noop_fold_crate(c, self)
+    }
+
+    fn fold_meta_items(&mut self, meta_items: HirVec<P<MetaItem>>) -> HirVec<P<MetaItem>> {
+        noop_fold_meta_items(meta_items, self)
+    }
+
+    fn fold_meta_item(&mut self, meta_item: P<MetaItem>) -> P<MetaItem> {
+        noop_fold_meta_item(meta_item, self)
+    }
+
+    fn fold_view_path(&mut self, view_path: P<ViewPath>) -> P<ViewPath> {
+        noop_fold_view_path(view_path, self)
+    }
+
+    fn fold_foreign_item(&mut self, ni: ForeignItem) -> ForeignItem {
+        noop_fold_foreign_item(ni, self)
+    }
+
+    fn fold_item(&mut self, i: Item) -> Item {
+        noop_fold_item(i, self)
+    }
+
+    fn fold_item_id(&mut self, i: ItemId) -> ItemId {
+        noop_fold_item_id(i, self)
+    }
+
+    fn fold_struct_field(&mut self, sf: StructField) -> StructField {
+        noop_fold_struct_field(sf, self)
+    }
+
+    fn fold_item_underscore(&mut self, i: Item_) -> Item_ {
+        noop_fold_item_underscore(i, self)
+    }
+
+    fn fold_trait_item(&mut self, i: TraitItem) -> TraitItem {
+        noop_fold_trait_item(i, self)
+    }
+
+    fn fold_impl_item(&mut self, i: ImplItem) -> ImplItem {
+        noop_fold_impl_item(i, self)
+    }
+
+    fn fold_fn_decl(&mut self, d: P<FnDecl>) -> P<FnDecl> {
+        noop_fold_fn_decl(d, self)
+    }
+
+    fn fold_block(&mut self, b: P<Block>) -> P<Block> {
+        noop_fold_block(b, self)
+    }
+
+    fn fold_stmt(&mut self, s: Stmt) -> Stmt {
+        noop_fold_stmt(s, self)
+    }
+
+    fn fold_arm(&mut self, a: Arm) -> Arm {
+        noop_fold_arm(a, self)
+    }
+
+    fn fold_pat(&mut self, p: P<Pat>) -> P<Pat> {
+        noop_fold_pat(p, self)
+    }
+
+    fn fold_decl(&mut self, d: P<Decl>) -> P<Decl> {
+        noop_fold_decl(d, self)
+    }
+
+    fn fold_expr(&mut self, e: P<Expr>) -> P<Expr> {
+        e.map(|e| noop_fold_expr(e, self))
+    }
+
+    fn fold_ty(&mut self, t: P<Ty>) -> P<Ty> {
+        noop_fold_ty(t, self)
+    }
+
+    fn fold_ty_binding(&mut self, t: TypeBinding) -> TypeBinding {
+        noop_fold_ty_binding(t, self)
+    }
+
+    fn fold_mod(&mut self, m: Mod) -> Mod {
+        noop_fold_mod(m, self)
+    }
+
+    fn fold_foreign_mod(&mut self, nm: ForeignMod) -> ForeignMod {
+        noop_fold_foreign_mod(nm, self)
+    }
+
+    fn fold_variant(&mut self, v: Variant) -> Variant {
+        noop_fold_variant(v, self)
+    }
+
+    fn fold_name(&mut self, n: Name) -> Name {
+        noop_fold_name(n, self)
+    }
+
+    fn fold_ident(&mut self, i: Ident) -> Ident {
+        noop_fold_ident(i, self)
+    }
+
+    fn fold_usize(&mut self, i: usize) -> usize {
+        noop_fold_usize(i, self)
+    }
+
+    fn fold_path(&mut self, p: Path) -> Path {
+        noop_fold_path(p, self)
+    }
+
+    fn fold_path_parameters(&mut self, p: PathParameters) -> PathParameters {
+        noop_fold_path_parameters(p, self)
+    }
+
+    fn fold_angle_bracketed_parameter_data(&mut self,
+                                           p: AngleBracketedParameterData)
+                                           -> AngleBracketedParameterData {
+        noop_fold_angle_bracketed_parameter_data(p, self)
+    }
+
+    fn fold_parenthesized_parameter_data(&mut self,
+                                         p: ParenthesizedParameterData)
+                                         -> ParenthesizedParameterData {
+        noop_fold_parenthesized_parameter_data(p, self)
+    }
+
+    fn fold_local(&mut self, l: P<Local>) -> P<Local> {
+        noop_fold_local(l, self)
+    }
+
+    fn fold_explicit_self(&mut self, es: ExplicitSelf) -> ExplicitSelf {
+        noop_fold_explicit_self(es, self)
+    }
+
+    fn fold_explicit_self_underscore(&mut self, es: ExplicitSelf_) -> ExplicitSelf_ {
+        noop_fold_explicit_self_underscore(es, self)
+    }
+
+    fn fold_lifetime(&mut self, l: Lifetime) -> Lifetime {
+        noop_fold_lifetime(l, self)
+    }
+
+    fn fold_lifetime_def(&mut self, l: LifetimeDef) -> LifetimeDef {
+        noop_fold_lifetime_def(l, self)
+    }
+
+    fn fold_attribute(&mut self, at: Attribute) -> Option<Attribute> {
+        noop_fold_attribute(at, self)
+    }
+
+    fn fold_arg(&mut self, a: Arg) -> Arg {
+        noop_fold_arg(a, self)
+    }
+
+    fn fold_generics(&mut self, generics: Generics) -> Generics {
+        noop_fold_generics(generics, self)
+    }
+
+    fn fold_trait_ref(&mut self, p: TraitRef) -> TraitRef {
+        noop_fold_trait_ref(p, self)
+    }
+
+    fn fold_poly_trait_ref(&mut self, p: PolyTraitRef) -> PolyTraitRef {
+        noop_fold_poly_trait_ref(p, self)
+    }
+
+    fn fold_variant_data(&mut self, vdata: VariantData) -> VariantData {
+        noop_fold_variant_data(vdata, self)
+    }
+
+    fn fold_lifetimes(&mut self, lts: HirVec<Lifetime>) -> HirVec<Lifetime> {
+        noop_fold_lifetimes(lts, self)
+    }
+
+    fn fold_lifetime_defs(&mut self, lts: HirVec<LifetimeDef>) -> HirVec<LifetimeDef> {
+        noop_fold_lifetime_defs(lts, self)
+    }
+
+    fn fold_ty_param(&mut self, tp: TyParam) -> TyParam {
+        noop_fold_ty_param(tp, self)
+    }
+
+    fn fold_ty_params(&mut self, tps: HirVec<TyParam>) -> HirVec<TyParam> {
+        noop_fold_ty_params(tps, self)
+    }
+
+    fn fold_opt_lifetime(&mut self, o_lt: Option<Lifetime>) -> Option<Lifetime> {
+        noop_fold_opt_lifetime(o_lt, self)
+    }
+
+    fn fold_opt_bounds(&mut self,
+                       b: Option<TyParamBounds>)
+                       -> Option<TyParamBounds> {
+        noop_fold_opt_bounds(b, self)
+    }
+
+    fn fold_bounds(&mut self, b: TyParamBounds) -> TyParamBounds {
+        noop_fold_bounds(b, self)
+    }
+
+    fn fold_ty_param_bound(&mut self, tpb: TyParamBound) -> TyParamBound {
+        noop_fold_ty_param_bound(tpb, self)
+    }
+
+    fn fold_mt(&mut self, mt: MutTy) -> MutTy {
+        noop_fold_mt(mt, self)
+    }
+
+    fn fold_field(&mut self, field: Field) -> Field {
+        noop_fold_field(field, self)
+    }
+
+    fn fold_where_clause(&mut self, where_clause: WhereClause) -> WhereClause {
+        noop_fold_where_clause(where_clause, self)
+    }
+
+    fn fold_where_predicate(&mut self, where_predicate: WherePredicate) -> WherePredicate {
+        noop_fold_where_predicate(where_predicate, self)
+    }
+
+    /// called for the `id` on each declaration
+    fn new_id(&mut self, i: NodeId) -> NodeId {
+        i
+    }
+
+    /// called for ids that are references (e.g., ItemDef)
+    fn map_id(&mut self, i: NodeId) -> NodeId {
+        i
+    }
+
+    fn new_span(&mut self, sp: Span) -> Span {
+        sp
+    }
+}
+
+pub fn noop_fold_meta_items<T: Folder>(meta_items: HirVec<P<MetaItem>>,
+                                       fld: &mut T)
+                                       -> HirVec<P<MetaItem>> {
+    meta_items.move_map(|x| fld.fold_meta_item(x))
+}
+
+pub fn noop_fold_view_path<T: Folder>(view_path: P<ViewPath>, fld: &mut T) -> P<ViewPath> {
+    view_path.map(|Spanned { node, span }| {
+        Spanned {
+            node: match node {
+                ViewPathSimple(name, path) => {
+                    ViewPathSimple(name, fld.fold_path(path))
+                }
+                ViewPathGlob(path) => {
+                    ViewPathGlob(fld.fold_path(path))
+                }
+                ViewPathList(path, path_list_idents) => {
+                    ViewPathList(fld.fold_path(path),
+                                 path_list_idents.move_map(|path_list_ident| {
+                                     Spanned {
+                                         node: match path_list_ident.node {
+                                             PathListIdent { id, name, rename } => PathListIdent {
+                                                 id: fld.new_id(id),
+                                                 name: name,
+                                                 rename: rename,
+                                             },
+                                             PathListMod { id, rename } => PathListMod {
+                                                 id: fld.new_id(id),
+                                                 rename: rename,
+                                             },
+                                         },
+                                         span: fld.new_span(path_list_ident.span),
+                                     }
+                                 }))
+                }
+            },
+            span: fld.new_span(span),
+        }
+    })
+}
+
+pub fn fold_attrs<T: Folder>(attrs: HirVec<Attribute>, fld: &mut T) -> HirVec<Attribute> {
+    attrs.move_flat_map(|x| fld.fold_attribute(x))
+}
+
+pub fn noop_fold_arm<T: Folder>(Arm { attrs, pats, guard, body }: Arm, fld: &mut T) -> Arm {
+    Arm {
+        attrs: fold_attrs(attrs, fld),
+        pats: pats.move_map(|x| fld.fold_pat(x)),
+        guard: guard.map(|x| fld.fold_expr(x)),
+        body: fld.fold_expr(body),
+    }
+}
+
+pub fn noop_fold_decl<T: Folder>(d: P<Decl>, fld: &mut T) -> P<Decl> {
+    d.map(|Spanned { node, span }| {
+        match node {
+            DeclLocal(l) => Spanned {
+                node: DeclLocal(fld.fold_local(l)),
+                span: fld.new_span(span),
+            },
+            DeclItem(it) => Spanned {
+                node: DeclItem(fld.fold_item_id(it)),
+                span: fld.new_span(span),
+            },
+        }
+    })
+}
+
+pub fn noop_fold_ty_binding<T: Folder>(b: TypeBinding, fld: &mut T) -> TypeBinding {
+    TypeBinding {
+        id: fld.new_id(b.id),
+        name: b.name,
+        ty: fld.fold_ty(b.ty),
+        span: fld.new_span(b.span),
+    }
+}
+
+pub fn noop_fold_ty<T: Folder>(t: P<Ty>, fld: &mut T) -> P<Ty> {
+    t.map(|Ty { id, node, span }| {
+        Ty {
+            id: fld.new_id(id),
+            node: match node {
+                TyInfer => node,
+                TyVec(ty) => TyVec(fld.fold_ty(ty)),
+                TyPtr(mt) => TyPtr(fld.fold_mt(mt)),
+                TyRptr(region, mt) => {
+                    TyRptr(fld.fold_opt_lifetime(region), fld.fold_mt(mt))
+                }
+                TyBareFn(f) => {
+                    TyBareFn(f.map(|BareFnTy { lifetimes, unsafety, abi, decl }| {
+                        BareFnTy {
+                            lifetimes: fld.fold_lifetime_defs(lifetimes),
+                            unsafety: unsafety,
+                            abi: abi,
+                            decl: fld.fold_fn_decl(decl),
+                        }
+                    }))
+                }
+                TyTup(tys) => TyTup(tys.move_map(|ty| fld.fold_ty(ty))),
+                TyPath(qself, path) => {
+                    let qself = qself.map(|QSelf { ty, position }| {
+                        QSelf {
+                            ty: fld.fold_ty(ty),
+                            position: position,
+                        }
+                    });
+                    TyPath(qself, fld.fold_path(path))
+                }
+                TyObjectSum(ty, bounds) => {
+                    TyObjectSum(fld.fold_ty(ty), fld.fold_bounds(bounds))
+                }
+                TyFixedLengthVec(ty, e) => {
+                    TyFixedLengthVec(fld.fold_ty(ty), fld.fold_expr(e))
+                }
+                TyTypeof(expr) => {
+                    TyTypeof(fld.fold_expr(expr))
+                }
+                TyPolyTraitRef(bounds) => {
+                    TyPolyTraitRef(bounds.move_map(|b| fld.fold_ty_param_bound(b)))
+                }
+            },
+            span: fld.new_span(span),
+        }
+    })
+}
+
+pub fn noop_fold_foreign_mod<T: Folder>(ForeignMod { abi, items }: ForeignMod,
+                                        fld: &mut T)
+                                        -> ForeignMod {
+    ForeignMod {
+        abi: abi,
+        items: items.move_map(|x| fld.fold_foreign_item(x)),
+    }
+}
+
+pub fn noop_fold_variant<T: Folder>(v: Variant, fld: &mut T) -> Variant {
+    Spanned {
+        node: Variant_ {
+            name: v.node.name,
+            attrs: fold_attrs(v.node.attrs, fld),
+            data: fld.fold_variant_data(v.node.data),
+            disr_expr: v.node.disr_expr.map(|e| fld.fold_expr(e)),
+        },
+        span: fld.new_span(v.span),
+    }
+}
+
+pub fn noop_fold_name<T: Folder>(n: Name, _: &mut T) -> Name {
+    n
+}
+
+pub fn noop_fold_ident<T: Folder>(i: Ident, _: &mut T) -> Ident {
+    i
+}
+
+pub fn noop_fold_usize<T: Folder>(i: usize, _: &mut T) -> usize {
+    i
+}
+
+pub fn noop_fold_path<T: Folder>(Path { global, segments, span }: Path, fld: &mut T) -> Path {
+    Path {
+        global: global,
+        segments: segments.move_map(|PathSegment { identifier, parameters }| {
+            PathSegment {
+                identifier: fld.fold_ident(identifier),
+                parameters: fld.fold_path_parameters(parameters),
+            }
+        }),
+        span: fld.new_span(span),
+    }
+}
+
+pub fn noop_fold_path_parameters<T: Folder>(path_parameters: PathParameters,
+                                            fld: &mut T)
+                                            -> PathParameters {
+    match path_parameters {
+        AngleBracketedParameters(data) =>
+            AngleBracketedParameters(fld.fold_angle_bracketed_parameter_data(data)),
+        ParenthesizedParameters(data) =>
+            ParenthesizedParameters(fld.fold_parenthesized_parameter_data(data)),
+    }
+}
+
+pub fn noop_fold_angle_bracketed_parameter_data<T: Folder>(data: AngleBracketedParameterData,
+                                                           fld: &mut T)
+                                                           -> AngleBracketedParameterData {
+    let AngleBracketedParameterData { lifetimes, types, bindings } = data;
+    AngleBracketedParameterData {
+        lifetimes: fld.fold_lifetimes(lifetimes),
+        types: types.move_map(|ty| fld.fold_ty(ty)),
+        bindings: bindings.move_map(|b| fld.fold_ty_binding(b)),
+    }
+}
+
+pub fn noop_fold_parenthesized_parameter_data<T: Folder>(data: ParenthesizedParameterData,
+                                                         fld: &mut T)
+                                                         -> ParenthesizedParameterData {
+    let ParenthesizedParameterData { inputs, output, span } = data;
+    ParenthesizedParameterData {
+        inputs: inputs.move_map(|ty| fld.fold_ty(ty)),
+        output: output.map(|ty| fld.fold_ty(ty)),
+        span: fld.new_span(span),
+    }
+}
+
+pub fn noop_fold_local<T: Folder>(l: P<Local>, fld: &mut T) -> P<Local> {
+    l.map(|Local { id, pat, ty, init, span, attrs }| {
+        Local {
+            id: fld.new_id(id),
+            ty: ty.map(|t| fld.fold_ty(t)),
+            pat: fld.fold_pat(pat),
+            init: init.map(|e| fld.fold_expr(e)),
+            span: fld.new_span(span),
+            attrs: attrs.map_thin_attrs(|attrs| fold_attrs(attrs.into(), fld).into()),
+        }
+    })
+}
+
+pub fn noop_fold_attribute<T: Folder>(at: Attribute, fld: &mut T) -> Option<Attribute> {
+    let Spanned {node: Attribute_ {id, style, value, is_sugared_doc}, span} = at;
+    Some(Spanned {
+        node: Attribute_ {
+            id: id,
+            style: style,
+            value: fld.fold_meta_item(value),
+            is_sugared_doc: is_sugared_doc,
+        },
+        span: fld.new_span(span),
+    })
+}
+
+pub fn noop_fold_explicit_self_underscore<T: Folder>(es: ExplicitSelf_,
+                                                     fld: &mut T)
+                                                     -> ExplicitSelf_ {
+    match es {
+        SelfStatic | SelfValue(_) => es,
+        SelfRegion(lifetime, m, name) => {
+            SelfRegion(fld.fold_opt_lifetime(lifetime), m, name)
+        }
+        SelfExplicit(typ, name) => {
+            SelfExplicit(fld.fold_ty(typ), name)
+        }
+    }
+}
+
+pub fn noop_fold_explicit_self<T: Folder>(Spanned { span, node }: ExplicitSelf,
+                                          fld: &mut T)
+                                          -> ExplicitSelf {
+    Spanned {
+        node: fld.fold_explicit_self_underscore(node),
+        span: fld.new_span(span),
+    }
+}
+
+pub fn noop_fold_meta_item<T: Folder>(mi: P<MetaItem>, fld: &mut T) -> P<MetaItem> {
+    mi.map(|Spanned { node, span }| {
+        Spanned {
+            node: match node {
+                MetaItemKind::Word(id) => MetaItemKind::Word(id),
+                MetaItemKind::List(id, mis) => {
+                    MetaItemKind::List(id, mis.move_map(|e| fld.fold_meta_item(e)))
+                }
+                MetaItemKind::NameValue(id, s) => MetaItemKind::NameValue(id, s),
+            },
+            span: fld.new_span(span),
+        }
+    })
+}
+
+pub fn noop_fold_arg<T: Folder>(Arg { id, pat, ty }: Arg, fld: &mut T) -> Arg {
+    Arg {
+        id: fld.new_id(id),
+        pat: fld.fold_pat(pat),
+        ty: fld.fold_ty(ty),
+    }
+}
+
+pub fn noop_fold_fn_decl<T: Folder>(decl: P<FnDecl>, fld: &mut T) -> P<FnDecl> {
+    decl.map(|FnDecl { inputs, output, variadic }| {
+        FnDecl {
+            inputs: inputs.move_map(|x| fld.fold_arg(x)),
+            output: match output {
+                Return(ty) => Return(fld.fold_ty(ty)),
+                DefaultReturn(span) => DefaultReturn(span),
+                NoReturn(span) => NoReturn(span),
+            },
+            variadic: variadic,
+        }
+    })
+}
+
+pub fn noop_fold_ty_param_bound<T>(tpb: TyParamBound, fld: &mut T) -> TyParamBound
+    where T: Folder
+{
+    match tpb {
+        TraitTyParamBound(ty, modifier) => TraitTyParamBound(fld.fold_poly_trait_ref(ty), modifier),
+        RegionTyParamBound(lifetime) => RegionTyParamBound(fld.fold_lifetime(lifetime)),
+    }
+}
+
+pub fn noop_fold_ty_param<T: Folder>(tp: TyParam, fld: &mut T) -> TyParam {
+    let TyParam {id, name, bounds, default, span} = tp;
+    TyParam {
+        id: fld.new_id(id),
+        name: name,
+        bounds: fld.fold_bounds(bounds),
+        default: default.map(|x| fld.fold_ty(x)),
+        span: span,
+    }
+}
+
+pub fn noop_fold_ty_params<T: Folder>(tps: HirVec<TyParam>,
+                                      fld: &mut T)
+                                      -> HirVec<TyParam> {
+    tps.move_map(|tp| fld.fold_ty_param(tp))
+}
+
+pub fn noop_fold_lifetime<T: Folder>(l: Lifetime, fld: &mut T) -> Lifetime {
+    Lifetime {
+        id: fld.new_id(l.id),
+        name: l.name,
+        span: fld.new_span(l.span),
+    }
+}
+
+pub fn noop_fold_lifetime_def<T: Folder>(l: LifetimeDef, fld: &mut T) -> LifetimeDef {
+    LifetimeDef {
+        lifetime: fld.fold_lifetime(l.lifetime),
+        bounds: fld.fold_lifetimes(l.bounds),
+    }
+}
+
+pub fn noop_fold_lifetimes<T: Folder>(lts: HirVec<Lifetime>, fld: &mut T) -> HirVec<Lifetime> {
+    lts.move_map(|l| fld.fold_lifetime(l))
+}
+
+pub fn noop_fold_lifetime_defs<T: Folder>(lts: HirVec<LifetimeDef>,
+                                          fld: &mut T)
+                                          -> HirVec<LifetimeDef> {
+    lts.move_map(|l| fld.fold_lifetime_def(l))
+}
+
+pub fn noop_fold_opt_lifetime<T: Folder>(o_lt: Option<Lifetime>, fld: &mut T) -> Option<Lifetime> {
+    o_lt.map(|lt| fld.fold_lifetime(lt))
+}
+
+pub fn noop_fold_generics<T: Folder>(Generics { ty_params, lifetimes, where_clause }: Generics,
+                                     fld: &mut T)
+                                     -> Generics {
+    Generics {
+        ty_params: fld.fold_ty_params(ty_params),
+        lifetimes: fld.fold_lifetime_defs(lifetimes),
+        where_clause: fld.fold_where_clause(where_clause),
+    }
+}
+
+pub fn noop_fold_where_clause<T: Folder>(WhereClause { id, predicates }: WhereClause,
+                                         fld: &mut T)
+                                         -> WhereClause {
+    WhereClause {
+        id: fld.new_id(id),
+        predicates: predicates.move_map(|predicate| fld.fold_where_predicate(predicate)),
+    }
+}
+
+pub fn noop_fold_where_predicate<T: Folder>(pred: WherePredicate, fld: &mut T) -> WherePredicate {
+    match pred {
+        hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{bound_lifetimes,
+                                                                     bounded_ty,
+                                                                     bounds,
+                                                                     span}) => {
+            hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+                bound_lifetimes: fld.fold_lifetime_defs(bound_lifetimes),
+                bounded_ty: fld.fold_ty(bounded_ty),
+                bounds: bounds.move_map(|x| fld.fold_ty_param_bound(x)),
+                span: fld.new_span(span),
+            })
+        }
+        hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{lifetime,
+                                                                       bounds,
+                                                                       span}) => {
+            hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+                span: fld.new_span(span),
+                lifetime: fld.fold_lifetime(lifetime),
+                bounds: bounds.move_map(|bound| fld.fold_lifetime(bound)),
+            })
+        }
+        hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{id,
+                                                               path,
+                                                               ty,
+                                                               span}) => {
+            hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
+                id: fld.new_id(id),
+                path: fld.fold_path(path),
+                ty: fld.fold_ty(ty),
+                span: fld.new_span(span),
+            })
+        }
+    }
+}
+
+pub fn noop_fold_variant_data<T: Folder>(vdata: VariantData, fld: &mut T) -> VariantData {
+    match vdata {
+        VariantData::Struct(fields, id) => {
+            VariantData::Struct(fields.move_map(|f| fld.fold_struct_field(f)),
+                                fld.new_id(id))
+        }
+        VariantData::Tuple(fields, id) => {
+            VariantData::Tuple(fields.move_map(|f| fld.fold_struct_field(f)),
+                               fld.new_id(id))
+        }
+        VariantData::Unit(id) => VariantData::Unit(fld.new_id(id)),
+    }
+}
+
+pub fn noop_fold_trait_ref<T: Folder>(p: TraitRef, fld: &mut T) -> TraitRef {
+    let id = fld.new_id(p.ref_id);
+    let TraitRef {
+        path,
+        ref_id: _,
+    } = p;
+    hir::TraitRef {
+        path: fld.fold_path(path),
+        ref_id: id,
+    }
+}
+
+pub fn noop_fold_poly_trait_ref<T: Folder>(p: PolyTraitRef, fld: &mut T) -> PolyTraitRef {
+    hir::PolyTraitRef {
+        bound_lifetimes: fld.fold_lifetime_defs(p.bound_lifetimes),
+        trait_ref: fld.fold_trait_ref(p.trait_ref),
+        span: fld.new_span(p.span),
+    }
+}
+
+pub fn noop_fold_struct_field<T: Folder>(f: StructField, fld: &mut T) -> StructField {
+    StructField {
+        span: fld.new_span(f.span),
+        id: fld.new_id(f.id),
+        name: f.name,
+        vis: f.vis,
+        ty: fld.fold_ty(f.ty),
+        attrs: fold_attrs(f.attrs, fld),
+    }
+}
+
+pub fn noop_fold_field<T: Folder>(Field { name, expr, span }: Field, folder: &mut T) -> Field {
+    Field {
+        name: respan(folder.new_span(name.span), folder.fold_name(name.node)),
+        expr: folder.fold_expr(expr),
+        span: folder.new_span(span),
+    }
+}
+
+pub fn noop_fold_mt<T: Folder>(MutTy { ty, mutbl }: MutTy, folder: &mut T) -> MutTy {
+    MutTy {
+        ty: folder.fold_ty(ty),
+        mutbl: mutbl,
+    }
+}
+
+pub fn noop_fold_opt_bounds<T: Folder>(b: Option<TyParamBounds>,
+                                       folder: &mut T)
+                                       -> Option<TyParamBounds> {
+    b.map(|bounds| folder.fold_bounds(bounds))
+}
+
+fn noop_fold_bounds<T: Folder>(bounds: TyParamBounds, folder: &mut T) -> TyParamBounds {
+    bounds.move_map(|bound| folder.fold_ty_param_bound(bound))
+}
+
+pub fn noop_fold_block<T: Folder>(b: P<Block>, folder: &mut T) -> P<Block> {
+    b.map(|Block { id, stmts, expr, rules, span }| {
+        Block {
+            id: folder.new_id(id),
+            stmts: stmts.move_map(|s| folder.fold_stmt(s)),
+            expr: expr.map(|x| folder.fold_expr(x)),
+            rules: rules,
+            span: folder.new_span(span),
+        }
+    })
+}
+
+pub fn noop_fold_item_underscore<T: Folder>(i: Item_, folder: &mut T) -> Item_ {
+    match i {
+        ItemExternCrate(string) => ItemExternCrate(string),
+        ItemUse(view_path) => {
+            ItemUse(folder.fold_view_path(view_path))
+        }
+        ItemStatic(t, m, e) => {
+            ItemStatic(folder.fold_ty(t), m, folder.fold_expr(e))
+        }
+        ItemConst(t, e) => {
+            ItemConst(folder.fold_ty(t), folder.fold_expr(e))
+        }
+        ItemFn(decl, unsafety, constness, abi, generics, body) => {
+            ItemFn(folder.fold_fn_decl(decl),
+                   unsafety,
+                   constness,
+                   abi,
+                   folder.fold_generics(generics),
+                   folder.fold_block(body))
+        }
+        ItemMod(m) => ItemMod(folder.fold_mod(m)),
+        ItemForeignMod(nm) => ItemForeignMod(folder.fold_foreign_mod(nm)),
+        ItemTy(t, generics) => {
+            ItemTy(folder.fold_ty(t), folder.fold_generics(generics))
+        }
+        ItemEnum(enum_definition, generics) => {
+            ItemEnum(hir::EnumDef {
+                         variants: enum_definition.variants.move_map(|x| folder.fold_variant(x)),
+                     },
+                     folder.fold_generics(generics))
+        }
+        ItemStruct(struct_def, generics) => {
+            let struct_def = folder.fold_variant_data(struct_def);
+            ItemStruct(struct_def, folder.fold_generics(generics))
+        }
+        ItemDefaultImpl(unsafety, ref trait_ref) => {
+            ItemDefaultImpl(unsafety, folder.fold_trait_ref((*trait_ref).clone()))
+        }
+        ItemImpl(unsafety, polarity, generics, ifce, ty, impl_items) => {
+            let new_impl_items = impl_items
+                .move_map(|item| folder.fold_impl_item(item));
+            let ifce = match ifce {
+                None => None,
+                Some(ref trait_ref) => {
+                    Some(folder.fold_trait_ref((*trait_ref).clone()))
+                }
+            };
+            ItemImpl(unsafety,
+                     polarity,
+                     folder.fold_generics(generics),
+                     ifce,
+                     folder.fold_ty(ty),
+                     new_impl_items)
+        }
+        ItemTrait(unsafety, generics, bounds, items) => {
+            let bounds = folder.fold_bounds(bounds);
+            let items = items.move_map(|item| folder.fold_trait_item(item));
+            ItemTrait(unsafety, folder.fold_generics(generics), bounds, items)
+        }
+    }
+}
+
+pub fn noop_fold_trait_item<T: Folder>(i: TraitItem,
+                                       folder: &mut T)
+                                       -> TraitItem {
+    TraitItem {
+        id: folder.new_id(i.id),
+        name: folder.fold_name(i.name),
+        attrs: fold_attrs(i.attrs, folder),
+        node: match i.node {
+            ConstTraitItem(ty, default) => {
+                ConstTraitItem(folder.fold_ty(ty), default.map(|x| folder.fold_expr(x)))
+            }
+            MethodTraitItem(sig, body) => {
+                MethodTraitItem(noop_fold_method_sig(sig, folder),
+                                body.map(|x| folder.fold_block(x)))
+            }
+            TypeTraitItem(bounds, default) => {
+                TypeTraitItem(folder.fold_bounds(bounds),
+                              default.map(|x| folder.fold_ty(x)))
+            }
+        },
+        span: folder.new_span(i.span),
+    }
+}
+
+pub fn noop_fold_impl_item<T: Folder>(i: ImplItem, folder: &mut T) -> ImplItem {
+    ImplItem {
+        id: folder.new_id(i.id),
+        name: folder.fold_name(i.name),
+        attrs: fold_attrs(i.attrs, folder),
+        vis: i.vis,
+        defaultness: i.defaultness,
+        node: match i.node {
+            ImplItemKind::Const(ty, expr) => {
+                ImplItemKind::Const(folder.fold_ty(ty), folder.fold_expr(expr))
+            }
+            ImplItemKind::Method(sig, body) => {
+                ImplItemKind::Method(noop_fold_method_sig(sig, folder), folder.fold_block(body))
+            }
+            ImplItemKind::Type(ty) => ImplItemKind::Type(folder.fold_ty(ty)),
+        },
+        span: folder.new_span(i.span),
+    }
+}
+
+pub fn noop_fold_mod<T: Folder>(Mod { inner, item_ids }: Mod, folder: &mut T) -> Mod {
+    Mod {
+        inner: folder.new_span(inner),
+        item_ids: item_ids.move_map(|x| folder.fold_item_id(x)),
+    }
+}
+
+pub fn noop_fold_crate<T: Folder>(Crate { module, attrs, config, span,
+                                          exported_macros, items }: Crate,
+                                  folder: &mut T)
+                                  -> Crate {
+    let config = folder.fold_meta_items(config);
+
+    let crate_mod = folder.fold_item(hir::Item {
+        name: token::special_idents::invalid.name,
+        attrs: attrs,
+        id: DUMMY_NODE_ID,
+        vis: hir::Public,
+        span: span,
+        node: hir::ItemMod(module),
+    });
+
+    let (module, attrs, span) = match crate_mod {
+        hir::Item { attrs, span, node, .. } => {
+            match node {
+                hir::ItemMod(m) => (m, attrs, span),
+                _ => panic!("fold converted a module to not a module"),
+            }
+        }
+    };
+
+    let items = items.into_iter()
+                     .map(|(id, item)| (id, folder.fold_item(item)))
+                     .collect();
+
+    Crate {
+        module: module,
+        attrs: attrs,
+        config: config,
+        span: span,
+        exported_macros: exported_macros,
+        items: items,
+    }
+}
+
+pub fn noop_fold_item_id<T: Folder>(i: ItemId, folder: &mut T) -> ItemId {
+    let id = folder.map_id(i.id);
+    ItemId { id: id }
+}
+
+// fold one item into one item
+pub fn noop_fold_item<T: Folder>(item: Item, folder: &mut T) -> Item {
+    let Item { id, name, attrs, node, vis, span } = item;
+    let id = folder.new_id(id);
+    let node = folder.fold_item_underscore(node);
+
+    Item {
+        id: id,
+        name: folder.fold_name(name),
+        attrs: fold_attrs(attrs, folder),
+        node: node,
+        vis: vis,
+        span: folder.new_span(span),
+    }
+}
+
+pub fn noop_fold_foreign_item<T: Folder>(ni: ForeignItem, folder: &mut T) -> ForeignItem {
+    ForeignItem {
+        id: folder.new_id(ni.id),
+        name: folder.fold_name(ni.name),
+        attrs: fold_attrs(ni.attrs, folder),
+        node: match ni.node {
+            ForeignItemFn(fdec, generics) => {
+                ForeignItemFn(folder.fold_fn_decl(fdec), folder.fold_generics(generics))
+            }
+            ForeignItemStatic(t, m) => {
+                ForeignItemStatic(folder.fold_ty(t), m)
+            }
+        },
+        vis: ni.vis,
+        span: folder.new_span(ni.span),
+    }
+}
+
+pub fn noop_fold_method_sig<T: Folder>(sig: MethodSig, folder: &mut T) -> MethodSig {
+    MethodSig {
+        generics: folder.fold_generics(sig.generics),
+        abi: sig.abi,
+        explicit_self: folder.fold_explicit_self(sig.explicit_self),
+        unsafety: sig.unsafety,
+        constness: sig.constness,
+        decl: folder.fold_fn_decl(sig.decl),
+    }
+}
+
+pub fn noop_fold_pat<T: Folder>(p: P<Pat>, folder: &mut T) -> P<Pat> {
+    p.map(|Pat { id, node, span }| {
+        Pat {
+            id: folder.new_id(id),
+            node: match node {
+                PatKind::Wild => PatKind::Wild,
+                PatKind::Ident(binding_mode, pth1, sub) => {
+                    PatKind::Ident(binding_mode,
+                             Spanned {
+                                 span: folder.new_span(pth1.span),
+                                 node: folder.fold_ident(pth1.node),
+                             },
+                             sub.map(|x| folder.fold_pat(x)))
+                }
+                PatKind::Lit(e) => PatKind::Lit(folder.fold_expr(e)),
+                PatKind::TupleStruct(pth, pats) => {
+                    PatKind::TupleStruct(folder.fold_path(pth),
+                            pats.map(|pats| pats.move_map(|x| folder.fold_pat(x))))
+                }
+                PatKind::Path(pth) => {
+                    PatKind::Path(folder.fold_path(pth))
+                }
+                PatKind::QPath(qself, pth) => {
+                    let qself = QSelf { ty: folder.fold_ty(qself.ty), ..qself };
+                    PatKind::QPath(qself, folder.fold_path(pth))
+                }
+                PatKind::Struct(pth, fields, etc) => {
+                    let pth = folder.fold_path(pth);
+                    let fs = fields.move_map(|f| {
+                        Spanned {
+                            span: folder.new_span(f.span),
+                            node: hir::FieldPat {
+                                name: f.node.name,
+                                pat: folder.fold_pat(f.node.pat),
+                                is_shorthand: f.node.is_shorthand,
+                            },
+                        }
+                    });
+                    PatKind::Struct(pth, fs, etc)
+                }
+                PatKind::Tup(elts) => PatKind::Tup(elts.move_map(|x| folder.fold_pat(x))),
+                PatKind::Box(inner) => PatKind::Box(folder.fold_pat(inner)),
+                PatKind::Ref(inner, mutbl) => PatKind::Ref(folder.fold_pat(inner), mutbl),
+                PatKind::Range(e1, e2) => {
+                    PatKind::Range(folder.fold_expr(e1), folder.fold_expr(e2))
+                }
+                PatKind::Vec(before, slice, after) => {
+                    PatKind::Vec(before.move_map(|x| folder.fold_pat(x)),
+                           slice.map(|x| folder.fold_pat(x)),
+                           after.move_map(|x| folder.fold_pat(x)))
+                }
+            },
+            span: folder.new_span(span),
+        }
+    })
+}
+
+pub fn noop_fold_expr<T: Folder>(Expr { id, node, span, attrs }: Expr, folder: &mut T) -> Expr {
+    Expr {
+        id: folder.new_id(id),
+        node: match node {
+            ExprBox(e) => {
+                ExprBox(folder.fold_expr(e))
+            }
+            ExprVec(exprs) => {
+                ExprVec(exprs.move_map(|x| folder.fold_expr(x)))
+            }
+            ExprRepeat(expr, count) => {
+                ExprRepeat(folder.fold_expr(expr), folder.fold_expr(count))
+            }
+            ExprTup(elts) => ExprTup(elts.move_map(|x| folder.fold_expr(x))),
+            ExprCall(f, args) => {
+                ExprCall(folder.fold_expr(f), args.move_map(|x| folder.fold_expr(x)))
+            }
+            ExprMethodCall(name, tps, args) => {
+                ExprMethodCall(respan(folder.new_span(name.span), folder.fold_name(name.node)),
+                               tps.move_map(|x| folder.fold_ty(x)),
+                               args.move_map(|x| folder.fold_expr(x)))
+            }
+            ExprBinary(binop, lhs, rhs) => {
+                ExprBinary(binop, folder.fold_expr(lhs), folder.fold_expr(rhs))
+            }
+            ExprUnary(binop, ohs) => {
+                ExprUnary(binop, folder.fold_expr(ohs))
+            }
+            ExprLit(l) => ExprLit(l),
+            ExprCast(expr, ty) => {
+                ExprCast(folder.fold_expr(expr), folder.fold_ty(ty))
+            }
+            ExprType(expr, ty) => {
+                ExprType(folder.fold_expr(expr), folder.fold_ty(ty))
+            }
+            ExprAddrOf(m, ohs) => ExprAddrOf(m, folder.fold_expr(ohs)),
+            ExprIf(cond, tr, fl) => {
+                ExprIf(folder.fold_expr(cond),
+                       folder.fold_block(tr),
+                       fl.map(|x| folder.fold_expr(x)))
+            }
+            ExprWhile(cond, body, opt_ident) => {
+                ExprWhile(folder.fold_expr(cond),
+                          folder.fold_block(body),
+                          opt_ident.map(|i| folder.fold_ident(i)))
+            }
+            ExprLoop(body, opt_ident) => {
+                ExprLoop(folder.fold_block(body),
+                         opt_ident.map(|i| folder.fold_ident(i)))
+            }
+            ExprMatch(expr, arms, source) => {
+                ExprMatch(folder.fold_expr(expr),
+                          arms.move_map(|x| folder.fold_arm(x)),
+                          source)
+            }
+            ExprClosure(capture_clause, decl, body) => {
+                ExprClosure(capture_clause,
+                            folder.fold_fn_decl(decl),
+                            folder.fold_block(body))
+            }
+            ExprBlock(blk) => ExprBlock(folder.fold_block(blk)),
+            ExprAssign(el, er) => {
+                ExprAssign(folder.fold_expr(el), folder.fold_expr(er))
+            }
+            ExprAssignOp(op, el, er) => {
+                ExprAssignOp(op, folder.fold_expr(el), folder.fold_expr(er))
+            }
+            ExprField(el, name) => {
+                ExprField(folder.fold_expr(el),
+                          respan(folder.new_span(name.span), folder.fold_name(name.node)))
+            }
+            ExprTupField(el, index) => {
+                ExprTupField(folder.fold_expr(el),
+                             respan(folder.new_span(index.span), folder.fold_usize(index.node)))
+            }
+            ExprIndex(el, er) => {
+                ExprIndex(folder.fold_expr(el), folder.fold_expr(er))
+            }
+            ExprPath(qself, path) => {
+                let qself = qself.map(|QSelf { ty, position }| {
+                    QSelf {
+                        ty: folder.fold_ty(ty),
+                        position: position,
+                    }
+                });
+                ExprPath(qself, folder.fold_path(path))
+            }
+            ExprBreak(opt_ident) => ExprBreak(opt_ident.map(|label| {
+                respan(folder.new_span(label.span), folder.fold_ident(label.node))
+            })),
+            ExprAgain(opt_ident) => ExprAgain(opt_ident.map(|label| {
+                respan(folder.new_span(label.span), folder.fold_ident(label.node))
+            })),
+            ExprRet(e) => ExprRet(e.map(|x| folder.fold_expr(x))),
+            ExprInlineAsm(asm, outputs, inputs) => {
+                ExprInlineAsm(asm,
+                              outputs.move_map(|x| folder.fold_expr(x)),
+                              inputs.move_map(|x| folder.fold_expr(x)))
+            }
+            ExprStruct(path, fields, maybe_expr) => {
+                ExprStruct(folder.fold_path(path),
+                           fields.move_map(|x| folder.fold_field(x)),
+                           maybe_expr.map(|x| folder.fold_expr(x)))
+            }
+        },
+        span: folder.new_span(span),
+        attrs: attrs.map_thin_attrs(|attrs| fold_attrs(attrs.into(), folder).into()),
+    }
+}
+
+pub fn noop_fold_stmt<T: Folder>(stmt: Stmt, folder: &mut T) -> Stmt {
+    let span = folder.new_span(stmt.span);
+    match stmt.node {
+        StmtDecl(d, id) => {
+            let id = folder.new_id(id);
+            Spanned {
+                node: StmtDecl(folder.fold_decl(d), id),
+                span: span
+            }
+        }
+        StmtExpr(e, id) => {
+            let id = folder.new_id(id);
+            Spanned {
+                node: StmtExpr(folder.fold_expr(e), id),
+                span: span,
+            }
+        }
+        StmtSemi(e, id) => {
+            let id = folder.new_id(id);
+            Spanned {
+                node: StmtSemi(folder.fold_expr(e), id),
+                span: span,
+            }
+        }
+    }
+}
diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs
new file mode 100644 (file)
index 0000000..55fd58d
--- /dev/null
@@ -0,0 +1,1070 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! HIR walker. Each overridden visit method has full control over what
+//! happens with its node, it can do its own traversal of the node's children,
+//! call `intravisit::walk_*` to apply the default traversal algorithm, or prevent
+//! deeper traversal by doing nothing.
+//!
+//! When visiting the HIR, the contents of nested items are NOT visited
+//! by default. This is different from the AST visitor, which does a deep walk.
+//! Hence this module is called `intravisit`; see the method `visit_nested_item`
+//! for more details.
+//!
+//! Note: it is an important invariant that the default visitor walks
+//! the body of a function in "execution order" (more concretely,
+//! reverse post-order with respect to the CFG implied by the AST),
+//! meaning that if AST node A may execute before AST node B, then A
+//! is visited first.  The borrow checker in particular relies on this
+//! property.
+
+use syntax::abi::Abi;
+use syntax::ast::{NodeId, CRATE_NODE_ID, Name, Attribute};
+use syntax::attr::ThinAttributesExt;
+use syntax::codemap::Span;
+use hir::*;
+
+use std::cmp;
+use std::u32;
+
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum FnKind<'a> {
+    /// fn foo() or extern "Abi" fn foo()
+    ItemFn(Name, &'a Generics, Unsafety, Constness, Abi, &'a Visibility, &'a [Attribute]),
+
+    /// fn foo(&self)
+    Method(Name, &'a MethodSig, Option<&'a Visibility>, &'a [Attribute]),
+
+    /// |x, y| {}
+    Closure(&'a [Attribute]),
+}
+
+impl<'a> FnKind<'a> {
+    pub fn attrs(&self) -> &'a [Attribute] {
+        match *self {
+            FnKind::ItemFn(_, _, _, _, _, _, attrs) => attrs,
+            FnKind::Method(_, _, _, attrs) => attrs,
+            FnKind::Closure(attrs) => attrs,
+        }
+    }
+}
+
+/// Each method of the Visitor trait is a hook to be potentially
+/// overridden.  Each method's default implementation recursively visits
+/// the substructure of the input via the corresponding `walk` method;
+/// e.g. the `visit_mod` method by default calls `intravisit::walk_mod`.
+///
+/// Note that this visitor does NOT visit nested items by default
+/// (this is why the module is called `intravisit`, to distinguish it
+/// from the AST's `visit` module, which acts differently). If you
+/// simply want to visit all items in the crate in some order, you
+/// should call `Crate::visit_all_items`. Otherwise, see the comment
+/// on `visit_nested_item` for details on how to visit nested items.
+///
+/// If you want to ensure that your code handles every variant
+/// explicitly, you need to override each method.  (And you also need
+/// to monitor future changes to `Visitor` in case a new method with a
+/// new default implementation gets introduced.)
+pub trait Visitor<'v> : Sized {
+    ///////////////////////////////////////////////////////////////////////////
+    // Nested items.
+
+    /// Invoked when a nested item is encountered. By default, does
+    /// nothing. If you want a deep walk, you need to override to
+    /// fetch the item contents. But most of the time, it is easier
+    /// (and better) to invoke `Crate::visit_all_items`, which visits
+    /// all items in the crate in some order (but doesn't respect
+    /// nesting).
+    #[allow(unused_variables)]
+    fn visit_nested_item(&mut self, id: ItemId) {
+    }
+
+    /// Visit the top-level item and (optionally) nested items. See
+    /// `visit_nested_item` for details.
+    fn visit_item(&mut self, i: &'v Item) {
+        walk_item(self, i)
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+
+    fn visit_name(&mut self, _span: Span, _name: Name) {
+        // Nothing to do.
+    }
+    fn visit_ident(&mut self, span: Span, ident: Ident) {
+        walk_ident(self, span, ident);
+    }
+    fn visit_mod(&mut self, m: &'v Mod, _s: Span, _n: NodeId) {
+        walk_mod(self, m)
+    }
+    fn visit_foreign_item(&mut self, i: &'v ForeignItem) {
+        walk_foreign_item(self, i)
+    }
+    fn visit_local(&mut self, l: &'v Local) {
+        walk_local(self, l)
+    }
+    fn visit_block(&mut self, b: &'v Block) {
+        walk_block(self, b)
+    }
+    fn visit_stmt(&mut self, s: &'v Stmt) {
+        walk_stmt(self, s)
+    }
+    fn visit_arm(&mut self, a: &'v Arm) {
+        walk_arm(self, a)
+    }
+    fn visit_pat(&mut self, p: &'v Pat) {
+        walk_pat(self, p)
+    }
+    fn visit_decl(&mut self, d: &'v Decl) {
+        walk_decl(self, d)
+    }
+    fn visit_expr(&mut self, ex: &'v Expr) {
+        walk_expr(self, ex)
+    }
+    fn visit_expr_post(&mut self, _ex: &'v Expr) {
+    }
+    fn visit_ty(&mut self, t: &'v Ty) {
+        walk_ty(self, t)
+    }
+    fn visit_generics(&mut self, g: &'v Generics) {
+        walk_generics(self, g)
+    }
+    fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: &'v Block, s: Span, _: NodeId) {
+        walk_fn(self, fk, fd, b, s)
+    }
+    fn visit_trait_item(&mut self, ti: &'v TraitItem) {
+        walk_trait_item(self, ti)
+    }
+    fn visit_impl_item(&mut self, ii: &'v ImplItem) {
+        walk_impl_item(self, ii)
+    }
+    fn visit_trait_ref(&mut self, t: &'v TraitRef) {
+        walk_trait_ref(self, t)
+    }
+    fn visit_ty_param_bound(&mut self, bounds: &'v TyParamBound) {
+        walk_ty_param_bound(self, bounds)
+    }
+    fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef, m: &'v TraitBoundModifier) {
+        walk_poly_trait_ref(self, t, m)
+    }
+    fn visit_variant_data(&mut self,
+                          s: &'v VariantData,
+                          _: Name,
+                          _: &'v Generics,
+                          _: NodeId,
+                          _: Span) {
+        walk_struct_def(self, s)
+    }
+    fn visit_struct_field(&mut self, s: &'v StructField) {
+        walk_struct_field(self, s)
+    }
+    fn visit_enum_def(&mut self,
+                      enum_definition: &'v EnumDef,
+                      generics: &'v Generics,
+                      item_id: NodeId,
+                      _: Span) {
+        walk_enum_def(self, enum_definition, generics, item_id)
+    }
+    fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics, item_id: NodeId) {
+        walk_variant(self, v, g, item_id)
+    }
+    fn visit_lifetime(&mut self, lifetime: &'v Lifetime) {
+        walk_lifetime(self, lifetime)
+    }
+    fn visit_lifetime_def(&mut self, lifetime: &'v LifetimeDef) {
+        walk_lifetime_def(self, lifetime)
+    }
+    fn visit_explicit_self(&mut self, es: &'v ExplicitSelf) {
+        walk_explicit_self(self, es)
+    }
+    fn visit_path(&mut self, path: &'v Path, _id: NodeId) {
+        walk_path(self, path)
+    }
+    fn visit_path_list_item(&mut self, prefix: &'v Path, item: &'v PathListItem) {
+        walk_path_list_item(self, prefix, item)
+    }
+    fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v PathSegment) {
+        walk_path_segment(self, path_span, path_segment)
+    }
+    fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &'v PathParameters) {
+        walk_path_parameters(self, path_span, path_parameters)
+    }
+    fn visit_assoc_type_binding(&mut self, type_binding: &'v TypeBinding) {
+        walk_assoc_type_binding(self, type_binding)
+    }
+    fn visit_attribute(&mut self, _attr: &'v Attribute) {
+    }
+    fn visit_macro_def(&mut self, macro_def: &'v MacroDef) {
+        walk_macro_def(self, macro_def)
+    }
+}
+
+pub fn walk_opt_name<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, opt_name: Option<Name>) {
+    for name in opt_name {
+        visitor.visit_name(span, name);
+    }
+}
+
+pub fn walk_opt_ident<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, opt_ident: Option<Ident>) {
+    for ident in opt_ident {
+        visitor.visit_ident(span, ident);
+    }
+}
+
+pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, ident: Ident) {
+    visitor.visit_name(span, ident.name);
+}
+
+/// Walks the contents of a crate. See also `Crate::visit_all_items`.
+pub fn walk_crate<'v, V: Visitor<'v>>(visitor: &mut V, krate: &'v Crate) {
+    visitor.visit_mod(&krate.module, krate.span, CRATE_NODE_ID);
+    walk_list!(visitor, visit_attribute, &krate.attrs);
+    walk_list!(visitor, visit_macro_def, &krate.exported_macros);
+}
+
+pub fn walk_macro_def<'v, V: Visitor<'v>>(visitor: &mut V, macro_def: &'v MacroDef) {
+    visitor.visit_name(macro_def.span, macro_def.name);
+    walk_opt_name(visitor, macro_def.span, macro_def.imported_from);
+    walk_list!(visitor, visit_attribute, &macro_def.attrs);
+}
+
+pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod) {
+    for &item_id in &module.item_ids {
+        visitor.visit_nested_item(item_id);
+    }
+}
+
+pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local) {
+    visitor.visit_pat(&local.pat);
+    walk_list!(visitor, visit_ty, &local.ty);
+    walk_list!(visitor, visit_expr, &local.init);
+}
+
+pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) {
+    visitor.visit_name(lifetime.span, lifetime.name);
+}
+
+pub fn walk_lifetime_def<'v, V: Visitor<'v>>(visitor: &mut V, lifetime_def: &'v LifetimeDef) {
+    visitor.visit_lifetime(&lifetime_def.lifetime);
+    walk_list!(visitor, visit_lifetime, &lifetime_def.bounds);
+}
+
+pub fn walk_explicit_self<'v, V: Visitor<'v>>(visitor: &mut V, explicit_self: &'v ExplicitSelf) {
+    match explicit_self.node {
+        SelfStatic => {}
+        SelfValue(name) => {
+            visitor.visit_name(explicit_self.span, name)
+        }
+        SelfRegion(ref opt_lifetime, _, name) => {
+            visitor.visit_name(explicit_self.span, name);
+            walk_list!(visitor, visit_lifetime, opt_lifetime);
+        }
+        SelfExplicit(ref typ, name) => {
+            visitor.visit_name(explicit_self.span, name);
+            visitor.visit_ty(typ)
+        }
+    }
+}
+
+pub fn walk_poly_trait_ref<'v, V>(visitor: &mut V,
+                                  trait_ref: &'v PolyTraitRef,
+                                  _modifier: &'v TraitBoundModifier)
+    where V: Visitor<'v>
+{
+    walk_list!(visitor, visit_lifetime_def, &trait_ref.bound_lifetimes);
+    visitor.visit_trait_ref(&trait_ref.trait_ref);
+}
+
+pub fn walk_trait_ref<'v, V>(visitor: &mut V, trait_ref: &'v TraitRef)
+    where V: Visitor<'v>
+{
+    visitor.visit_path(&trait_ref.path, trait_ref.ref_id)
+}
+
+pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) {
+    visitor.visit_name(item.span, item.name);
+    match item.node {
+        ItemExternCrate(opt_name) => {
+            walk_opt_name(visitor, item.span, opt_name)
+        }
+        ItemUse(ref vp) => {
+            match vp.node {
+                ViewPathSimple(name, ref path) => {
+                    visitor.visit_name(vp.span, name);
+                    visitor.visit_path(path, item.id);
+                }
+                ViewPathGlob(ref path) => {
+                    visitor.visit_path(path, item.id);
+                }
+                ViewPathList(ref prefix, ref list) => {
+                    if !list.is_empty() {
+                        for item in list {
+                            visitor.visit_path_list_item(prefix, item)
+                        }
+                    } else {
+                        visitor.visit_path(prefix, item.id);
+                    }
+                }
+            }
+        }
+        ItemStatic(ref typ, _, ref expr) |
+        ItemConst(ref typ, ref expr) => {
+            visitor.visit_ty(typ);
+            visitor.visit_expr(expr);
+        }
+        ItemFn(ref declaration, unsafety, constness, abi, ref generics, ref body) => {
+            visitor.visit_fn(FnKind::ItemFn(item.name,
+                                            generics,
+                                            unsafety,
+                                            constness,
+                                            abi,
+                                            &item.vis,
+                                            &item.attrs),
+                             declaration,
+                             body,
+                             item.span,
+                             item.id)
+        }
+        ItemMod(ref module) => {
+            visitor.visit_mod(module, item.span, item.id)
+        }
+        ItemForeignMod(ref foreign_module) => {
+            walk_list!(visitor, visit_foreign_item, &foreign_module.items);
+        }
+        ItemTy(ref typ, ref type_parameters) => {
+            visitor.visit_ty(typ);
+            visitor.visit_generics(type_parameters)
+        }
+        ItemEnum(ref enum_definition, ref type_parameters) => {
+            visitor.visit_generics(type_parameters);
+            visitor.visit_enum_def(enum_definition, type_parameters, item.id, item.span)
+        }
+        ItemDefaultImpl(_, ref trait_ref) => {
+            visitor.visit_trait_ref(trait_ref)
+        }
+        ItemImpl(_, _, ref type_parameters, ref opt_trait_reference, ref typ, ref impl_items) => {
+            visitor.visit_generics(type_parameters);
+            walk_list!(visitor, visit_trait_ref, opt_trait_reference);
+            visitor.visit_ty(typ);
+            walk_list!(visitor, visit_impl_item, impl_items);
+        }
+        ItemStruct(ref struct_definition, ref generics) => {
+            visitor.visit_generics(generics);
+            visitor.visit_variant_data(struct_definition, item.name, generics, item.id, item.span);
+        }
+        ItemTrait(_, ref generics, ref bounds, ref methods) => {
+            visitor.visit_generics(generics);
+            walk_list!(visitor, visit_ty_param_bound, bounds);
+            walk_list!(visitor, visit_trait_item, methods);
+        }
+    }
+    walk_list!(visitor, visit_attribute, &item.attrs);
+}
+
+pub fn walk_enum_def<'v, V: Visitor<'v>>(visitor: &mut V,
+                                         enum_definition: &'v EnumDef,
+                                         generics: &'v Generics,
+                                         item_id: NodeId) {
+    walk_list!(visitor,
+               visit_variant,
+               &enum_definition.variants,
+               generics,
+               item_id);
+}
+
+pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V,
+                                        variant: &'v Variant,
+                                        generics: &'v Generics,
+                                        item_id: NodeId) {
+    visitor.visit_name(variant.span, variant.node.name);
+    visitor.visit_variant_data(&variant.node.data,
+                               variant.node.name,
+                               generics,
+                               item_id,
+                               variant.span);
+    walk_list!(visitor, visit_expr, &variant.node.disr_expr);
+    walk_list!(visitor, visit_attribute, &variant.node.attrs);
+}
+
+pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) {
+    match typ.node {
+        TyVec(ref ty) => {
+            visitor.visit_ty(ty)
+        }
+        TyPtr(ref mutable_type) => {
+            visitor.visit_ty(&mutable_type.ty)
+        }
+        TyRptr(ref opt_lifetime, ref mutable_type) => {
+            walk_list!(visitor, visit_lifetime, opt_lifetime);
+            visitor.visit_ty(&mutable_type.ty)
+        }
+        TyTup(ref tuple_element_types) => {
+            walk_list!(visitor, visit_ty, tuple_element_types);
+        }
+        TyBareFn(ref function_declaration) => {
+            walk_fn_decl(visitor, &function_declaration.decl);
+            walk_list!(visitor, visit_lifetime_def, &function_declaration.lifetimes);
+        }
+        TyPath(ref maybe_qself, ref path) => {
+            if let Some(ref qself) = *maybe_qself {
+                visitor.visit_ty(&qself.ty);
+            }
+            visitor.visit_path(path, typ.id);
+        }
+        TyObjectSum(ref ty, ref bounds) => {
+            visitor.visit_ty(ty);
+            walk_list!(visitor, visit_ty_param_bound, bounds);
+        }
+        TyFixedLengthVec(ref ty, ref expression) => {
+            visitor.visit_ty(ty);
+            visitor.visit_expr(expression)
+        }
+        TyPolyTraitRef(ref bounds) => {
+            walk_list!(visitor, visit_ty_param_bound, bounds);
+        }
+        TyTypeof(ref expression) => {
+            visitor.visit_expr(expression)
+        }
+        TyInfer => {}
+    }
+}
+
+pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path) {
+    for segment in &path.segments {
+        visitor.visit_path_segment(path.span, segment);
+    }
+}
+
+pub fn walk_path_list_item<'v, V: Visitor<'v>>(visitor: &mut V,
+                                               prefix: &'v Path,
+                                               item: &'v PathListItem) {
+    for segment in &prefix.segments {
+        visitor.visit_path_segment(prefix.span, segment);
+    }
+
+    walk_opt_name(visitor, item.span, item.node.name());
+    walk_opt_name(visitor, item.span, item.node.rename());
+}
+
+pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V,
+                                             path_span: Span,
+                                             segment: &'v PathSegment) {
+    visitor.visit_ident(path_span, segment.identifier);
+    visitor.visit_path_parameters(path_span, &segment.parameters);
+}
+
+pub fn walk_path_parameters<'v, V: Visitor<'v>>(visitor: &mut V,
+                                                _path_span: Span,
+                                                path_parameters: &'v PathParameters) {
+    match *path_parameters {
+        AngleBracketedParameters(ref data) => {
+            walk_list!(visitor, visit_ty, &data.types);
+            walk_list!(visitor, visit_lifetime, &data.lifetimes);
+            walk_list!(visitor, visit_assoc_type_binding, &data.bindings);
+        }
+        ParenthesizedParameters(ref data) => {
+            walk_list!(visitor, visit_ty, &data.inputs);
+            walk_list!(visitor, visit_ty, &data.output);
+        }
+    }
+}
+
+pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(visitor: &mut V,
+                                                   type_binding: &'v TypeBinding) {
+    visitor.visit_name(type_binding.span, type_binding.name);
+    visitor.visit_ty(&type_binding.ty);
+}
+
+pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) {
+    match pattern.node {
+        PatKind::TupleStruct(ref path, ref opt_children) => {
+            visitor.visit_path(path, pattern.id);
+            if let Some(ref children) = *opt_children {
+                walk_list!(visitor, visit_pat, children);
+            }
+        }
+        PatKind::Path(ref path) => {
+            visitor.visit_path(path, pattern.id);
+        }
+        PatKind::QPath(ref qself, ref path) => {
+            visitor.visit_ty(&qself.ty);
+            visitor.visit_path(path, pattern.id)
+        }
+        PatKind::Struct(ref path, ref fields, _) => {
+            visitor.visit_path(path, pattern.id);
+            for field in fields {
+                visitor.visit_name(field.span, field.node.name);
+                visitor.visit_pat(&field.node.pat)
+            }
+        }
+        PatKind::Tup(ref tuple_elements) => {
+            walk_list!(visitor, visit_pat, tuple_elements);
+        }
+        PatKind::Box(ref subpattern) |
+        PatKind::Ref(ref subpattern, _) => {
+            visitor.visit_pat(subpattern)
+        }
+        PatKind::Ident(_, ref pth1, ref optional_subpattern) => {
+            visitor.visit_ident(pth1.span, pth1.node);
+            walk_list!(visitor, visit_pat, optional_subpattern);
+        }
+        PatKind::Lit(ref expression) => visitor.visit_expr(expression),
+        PatKind::Range(ref lower_bound, ref upper_bound) => {
+            visitor.visit_expr(lower_bound);
+            visitor.visit_expr(upper_bound)
+        }
+        PatKind::Wild => (),
+        PatKind::Vec(ref prepatterns, ref slice_pattern, ref postpatterns) => {
+            walk_list!(visitor, visit_pat, prepatterns);
+            walk_list!(visitor, visit_pat, slice_pattern);
+            walk_list!(visitor, visit_pat, postpatterns);
+        }
+    }
+}
+
+pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem) {
+    visitor.visit_name(foreign_item.span, foreign_item.name);
+
+    match foreign_item.node {
+        ForeignItemFn(ref function_declaration, ref generics) => {
+            walk_fn_decl(visitor, function_declaration);
+            visitor.visit_generics(generics)
+        }
+        ForeignItemStatic(ref typ, _) => visitor.visit_ty(typ),
+    }
+
+    walk_list!(visitor, visit_attribute, &foreign_item.attrs);
+}
+
+pub fn walk_ty_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v TyParamBound) {
+    match *bound {
+        TraitTyParamBound(ref typ, ref modifier) => {
+            visitor.visit_poly_trait_ref(typ, modifier);
+        }
+        RegionTyParamBound(ref lifetime) => {
+            visitor.visit_lifetime(lifetime);
+        }
+    }
+}
+
+pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics) {
+    for param in &generics.ty_params {
+        visitor.visit_name(param.span, param.name);
+        walk_list!(visitor, visit_ty_param_bound, &param.bounds);
+        walk_list!(visitor, visit_ty, &param.default);
+    }
+    walk_list!(visitor, visit_lifetime_def, &generics.lifetimes);
+    for predicate in &generics.where_clause.predicates {
+        match predicate {
+            &WherePredicate::BoundPredicate(WhereBoundPredicate{ref bounded_ty,
+                                                                          ref bounds,
+                                                                          ref bound_lifetimes,
+                                                                          ..}) => {
+                visitor.visit_ty(bounded_ty);
+                walk_list!(visitor, visit_ty_param_bound, bounds);
+                walk_list!(visitor, visit_lifetime_def, bound_lifetimes);
+            }
+            &WherePredicate::RegionPredicate(WhereRegionPredicate{ref lifetime,
+                                                                            ref bounds,
+                                                                            ..}) => {
+                visitor.visit_lifetime(lifetime);
+                walk_list!(visitor, visit_lifetime, bounds);
+            }
+            &WherePredicate::EqPredicate(WhereEqPredicate{id,
+                                                                    ref path,
+                                                                    ref ty,
+                                                                    ..}) => {
+                visitor.visit_path(path, id);
+                visitor.visit_ty(ty);
+            }
+        }
+    }
+}
+
+pub fn walk_fn_ret_ty<'v, V: Visitor<'v>>(visitor: &mut V, ret_ty: &'v FunctionRetTy) {
+    if let Return(ref output_ty) = *ret_ty {
+        visitor.visit_ty(output_ty)
+    }
+}
+
+pub fn walk_fn_decl<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) {
+    for argument in &function_declaration.inputs {
+        visitor.visit_pat(&argument.pat);
+        visitor.visit_ty(&argument.ty)
+    }
+    walk_fn_ret_ty(visitor, &function_declaration.output)
+}
+
+pub fn walk_fn_decl_nopat<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) {
+    for argument in &function_declaration.inputs {
+        visitor.visit_ty(&argument.ty)
+    }
+    walk_fn_ret_ty(visitor, &function_declaration.output)
+}
+
+pub fn walk_fn_kind<'v, V: Visitor<'v>>(visitor: &mut V, function_kind: FnKind<'v>) {
+    match function_kind {
+        FnKind::ItemFn(_, generics, _, _, _, _, _) => {
+            visitor.visit_generics(generics);
+        }
+        FnKind::Method(_, sig, _, _) => {
+            visitor.visit_generics(&sig.generics);
+            visitor.visit_explicit_self(&sig.explicit_self);
+        }
+        FnKind::Closure(_) => {}
+    }
+}
+
+pub fn walk_fn<'v, V: Visitor<'v>>(visitor: &mut V,
+                                   function_kind: FnKind<'v>,
+                                   function_declaration: &'v FnDecl,
+                                   function_body: &'v Block,
+                                   _span: Span) {
+    walk_fn_decl(visitor, function_declaration);
+    walk_fn_kind(visitor, function_kind);
+    visitor.visit_block(function_body)
+}
+
+pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v TraitItem) {
+    visitor.visit_name(trait_item.span, trait_item.name);
+    walk_list!(visitor, visit_attribute, &trait_item.attrs);
+    match trait_item.node {
+        ConstTraitItem(ref ty, ref default) => {
+            visitor.visit_ty(ty);
+            walk_list!(visitor, visit_expr, default);
+        }
+        MethodTraitItem(ref sig, None) => {
+            visitor.visit_explicit_self(&sig.explicit_self);
+            visitor.visit_generics(&sig.generics);
+            walk_fn_decl(visitor, &sig.decl);
+        }
+        MethodTraitItem(ref sig, Some(ref body)) => {
+            visitor.visit_fn(FnKind::Method(trait_item.name,
+                                            sig,
+                                            None,
+                                            &trait_item.attrs),
+                             &sig.decl,
+                             body,
+                             trait_item.span,
+                             trait_item.id);
+        }
+        TypeTraitItem(ref bounds, ref default) => {
+            walk_list!(visitor, visit_ty_param_bound, bounds);
+            walk_list!(visitor, visit_ty, default);
+        }
+    }
+}
+
+pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplItem) {
+    visitor.visit_name(impl_item.span, impl_item.name);
+    walk_list!(visitor, visit_attribute, &impl_item.attrs);
+    match impl_item.node {
+        ImplItemKind::Const(ref ty, ref expr) => {
+            visitor.visit_ty(ty);
+            visitor.visit_expr(expr);
+        }
+        ImplItemKind::Method(ref sig, ref body) => {
+            visitor.visit_fn(FnKind::Method(impl_item.name,
+                                            sig,
+                                            Some(&impl_item.vis),
+                                            &impl_item.attrs),
+                             &sig.decl,
+                             body,
+                             impl_item.span,
+                             impl_item.id);
+        }
+        ImplItemKind::Type(ref ty) => {
+            visitor.visit_ty(ty);
+        }
+    }
+}
+
+pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: &'v VariantData) {
+    walk_list!(visitor, visit_struct_field, struct_definition.fields());
+}
+
+pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) {
+    visitor.visit_name(struct_field.span, struct_field.name);
+    visitor.visit_ty(&struct_field.ty);
+    walk_list!(visitor, visit_attribute, &struct_field.attrs);
+}
+
+pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block) {
+    walk_list!(visitor, visit_stmt, &block.stmts);
+    walk_list!(visitor, visit_expr, &block.expr);
+}
+
+pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt) {
+    match statement.node {
+        StmtDecl(ref declaration, _) => visitor.visit_decl(declaration),
+        StmtExpr(ref expression, _) | StmtSemi(ref expression, _) => {
+            visitor.visit_expr(expression)
+        }
+    }
+}
+
+pub fn walk_decl<'v, V: Visitor<'v>>(visitor: &mut V, declaration: &'v Decl) {
+    match declaration.node {
+        DeclLocal(ref local) => visitor.visit_local(local),
+        DeclItem(item) => visitor.visit_nested_item(item),
+    }
+}
+
+pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) {
+    match expression.node {
+        ExprBox(ref subexpression) => {
+            visitor.visit_expr(subexpression)
+        }
+        ExprVec(ref subexpressions) => {
+            walk_list!(visitor, visit_expr, subexpressions);
+        }
+        ExprRepeat(ref element, ref count) => {
+            visitor.visit_expr(element);
+            visitor.visit_expr(count)
+        }
+        ExprStruct(ref path, ref fields, ref optional_base) => {
+            visitor.visit_path(path, expression.id);
+            for field in fields {
+                visitor.visit_name(field.name.span, field.name.node);
+                visitor.visit_expr(&field.expr)
+            }
+            walk_list!(visitor, visit_expr, optional_base);
+        }
+        ExprTup(ref subexpressions) => {
+            walk_list!(visitor, visit_expr, subexpressions);
+        }
+        ExprCall(ref callee_expression, ref arguments) => {
+            walk_list!(visitor, visit_expr, arguments);
+            visitor.visit_expr(callee_expression)
+        }
+        ExprMethodCall(ref name, ref types, ref arguments) => {
+            visitor.visit_name(name.span, name.node);
+            walk_list!(visitor, visit_expr, arguments);
+            walk_list!(visitor, visit_ty, types);
+        }
+        ExprBinary(_, ref left_expression, ref right_expression) => {
+            visitor.visit_expr(left_expression);
+            visitor.visit_expr(right_expression)
+        }
+        ExprAddrOf(_, ref subexpression) | ExprUnary(_, ref subexpression) => {
+            visitor.visit_expr(subexpression)
+        }
+        ExprLit(_) => {}
+        ExprCast(ref subexpression, ref typ) | ExprType(ref subexpression, ref typ) => {
+            visitor.visit_expr(subexpression);
+            visitor.visit_ty(typ)
+        }
+        ExprIf(ref head_expression, ref if_block, ref optional_else) => {
+            visitor.visit_expr(head_expression);
+            visitor.visit_block(if_block);
+            walk_list!(visitor, visit_expr, optional_else);
+        }
+        ExprWhile(ref subexpression, ref block, opt_ident) => {
+            visitor.visit_expr(subexpression);
+            visitor.visit_block(block);
+            walk_opt_ident(visitor, expression.span, opt_ident)
+        }
+        ExprLoop(ref block, opt_ident) => {
+            visitor.visit_block(block);
+            walk_opt_ident(visitor, expression.span, opt_ident)
+        }
+        ExprMatch(ref subexpression, ref arms, _) => {
+            visitor.visit_expr(subexpression);
+            walk_list!(visitor, visit_arm, arms);
+        }
+        ExprClosure(_, ref function_declaration, ref body) => {
+            visitor.visit_fn(FnKind::Closure(expression.attrs.as_attr_slice()),
+                             function_declaration,
+                             body,
+                             expression.span,
+                             expression.id)
+        }
+        ExprBlock(ref block) => visitor.visit_block(block),
+        ExprAssign(ref left_hand_expression, ref right_hand_expression) => {
+            visitor.visit_expr(right_hand_expression);
+            visitor.visit_expr(left_hand_expression)
+        }
+        ExprAssignOp(_, ref left_expression, ref right_expression) => {
+            visitor.visit_expr(right_expression);
+            visitor.visit_expr(left_expression)
+        }
+        ExprField(ref subexpression, ref name) => {
+            visitor.visit_expr(subexpression);
+            visitor.visit_name(name.span, name.node);
+        }
+        ExprTupField(ref subexpression, _) => {
+            visitor.visit_expr(subexpression);
+        }
+        ExprIndex(ref main_expression, ref index_expression) => {
+            visitor.visit_expr(main_expression);
+            visitor.visit_expr(index_expression)
+        }
+        ExprPath(ref maybe_qself, ref path) => {
+            if let Some(ref qself) = *maybe_qself {
+                visitor.visit_ty(&qself.ty);
+            }
+            visitor.visit_path(path, expression.id)
+        }
+        ExprBreak(ref opt_sp_ident) | ExprAgain(ref opt_sp_ident) => {
+            for sp_ident in opt_sp_ident {
+                visitor.visit_ident(sp_ident.span, sp_ident.node);
+            }
+        }
+        ExprRet(ref optional_expression) => {
+            walk_list!(visitor, visit_expr, optional_expression);
+        }
+        ExprInlineAsm(_, ref outputs, ref inputs) => {
+            for output in outputs {
+                visitor.visit_expr(output)
+            }
+            for input in inputs {
+                visitor.visit_expr(input)
+            }
+        }
+    }
+
+    visitor.visit_expr_post(expression)
+}
+
+pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm) {
+    walk_list!(visitor, visit_pat, &arm.pats);
+    walk_list!(visitor, visit_expr, &arm.guard);
+    visitor.visit_expr(&arm.body);
+    walk_list!(visitor, visit_attribute, &arm.attrs);
+}
+
+#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)]
+pub struct IdRange {
+    pub min: NodeId,
+    pub max: NodeId,
+}
+
+impl IdRange {
+    pub fn max() -> IdRange {
+        IdRange {
+            min: u32::MAX,
+            max: u32::MIN,
+        }
+    }
+
+    pub fn empty(&self) -> bool {
+        self.min >= self.max
+    }
+
+    pub fn add(&mut self, id: NodeId) {
+        self.min = cmp::min(self.min, id);
+        self.max = cmp::max(self.max, id + 1);
+    }
+}
+
+pub trait IdVisitingOperation {
+    fn visit_id(&mut self, node_id: NodeId);
+}
+
+pub struct IdRangeComputingVisitor {
+    pub result: IdRange,
+}
+
+impl IdRangeComputingVisitor {
+    pub fn new() -> IdRangeComputingVisitor {
+        IdRangeComputingVisitor { result: IdRange::max() }
+    }
+
+    pub fn result(&self) -> IdRange {
+        self.result
+    }
+}
+
+impl IdVisitingOperation for IdRangeComputingVisitor {
+    fn visit_id(&mut self, id: NodeId) {
+        self.result.add(id);
+    }
+}
+
+pub struct IdVisitor<'a, O: 'a> {
+    operation: &'a mut O,
+
+    // In general, the id visitor visits the contents of an item, but
+    // not including nested trait/impl items, nor other nested items.
+    // The base visitor itself always skips nested items, but not
+    // trait/impl items. This means in particular that if you start by
+    // visiting a trait or an impl, you should not visit the
+    // trait/impl items respectively.  This is handled by setting
+    // `skip_members` to true when `visit_item` is on the stack. This
+    // way, if the user begins by calling `visit_trait_item`, we will
+    // visit the trait item, but if they begin with `visit_item`, we
+    // won't visit the (nested) trait items.
+    skip_members: bool,
+}
+
+impl<'a, O: IdVisitingOperation> IdVisitor<'a, O> {
+    pub fn new(operation: &'a mut O) -> IdVisitor<'a, O> {
+        IdVisitor { operation: operation, skip_members: false }
+    }
+
+    fn visit_generics_helper(&mut self, generics: &Generics) {
+        for type_parameter in generics.ty_params.iter() {
+            self.operation.visit_id(type_parameter.id)
+        }
+        for lifetime in &generics.lifetimes {
+            self.operation.visit_id(lifetime.lifetime.id)
+        }
+    }
+}
+
+impl<'a, 'v, O: IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> {
+    fn visit_mod(&mut self, module: &Mod, _: Span, node_id: NodeId) {
+        self.operation.visit_id(node_id);
+        walk_mod(self, module)
+    }
+
+    fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
+        self.operation.visit_id(foreign_item.id);
+        walk_foreign_item(self, foreign_item)
+    }
+
+    fn visit_item(&mut self, item: &Item) {
+        assert!(!self.skip_members);
+        self.skip_members = true;
+
+        self.operation.visit_id(item.id);
+        match item.node {
+            ItemUse(ref view_path) => {
+                match view_path.node {
+                    ViewPathSimple(_, _) |
+                    ViewPathGlob(_) => {}
+                    ViewPathList(_, ref paths) => {
+                        for path in paths {
+                            self.operation.visit_id(path.node.id())
+                        }
+                    }
+                }
+            }
+            _ => {}
+        }
+        walk_item(self, item);
+
+        self.skip_members = false;
+    }
+
+    fn visit_local(&mut self, local: &Local) {
+        self.operation.visit_id(local.id);
+        walk_local(self, local)
+    }
+
+    fn visit_block(&mut self, block: &Block) {
+        self.operation.visit_id(block.id);
+        walk_block(self, block)
+    }
+
+    fn visit_stmt(&mut self, statement: &Stmt) {
+        self.operation.visit_id(statement.node.id());
+        walk_stmt(self, statement)
+    }
+
+    fn visit_pat(&mut self, pattern: &Pat) {
+        self.operation.visit_id(pattern.id);
+        walk_pat(self, pattern)
+    }
+
+    fn visit_expr(&mut self, expression: &Expr) {
+        self.operation.visit_id(expression.id);
+        walk_expr(self, expression)
+    }
+
+    fn visit_ty(&mut self, typ: &Ty) {
+        self.operation.visit_id(typ.id);
+        walk_ty(self, typ)
+    }
+
+    fn visit_generics(&mut self, generics: &Generics) {
+        self.visit_generics_helper(generics);
+        walk_generics(self, generics)
+    }
+
+    fn visit_fn(&mut self,
+                function_kind: FnKind<'v>,
+                function_declaration: &'v FnDecl,
+                block: &'v Block,
+                span: Span,
+                node_id: NodeId) {
+        self.operation.visit_id(node_id);
+
+        match function_kind {
+            FnKind::ItemFn(_, generics, _, _, _, _, _) => {
+                self.visit_generics_helper(generics)
+            }
+            FnKind::Method(_, sig, _, _) => {
+                self.visit_generics_helper(&sig.generics)
+            }
+            FnKind::Closure(_) => {}
+        }
+
+        for argument in &function_declaration.inputs {
+            self.operation.visit_id(argument.id)
+        }
+
+        walk_fn(self, function_kind, function_declaration, block, span);
+    }
+
+    fn visit_struct_field(&mut self, struct_field: &StructField) {
+        self.operation.visit_id(struct_field.id);
+        walk_struct_field(self, struct_field)
+    }
+
+    fn visit_variant_data(&mut self,
+                          struct_def: &VariantData,
+                          _: Name,
+                          _: &Generics,
+                          _: NodeId,
+                          _: Span) {
+        self.operation.visit_id(struct_def.id());
+        walk_struct_def(self, struct_def);
+    }
+
+    fn visit_trait_item(&mut self, ti: &TraitItem) {
+        if !self.skip_members {
+            self.operation.visit_id(ti.id);
+            walk_trait_item(self, ti);
+        }
+    }
+
+    fn visit_impl_item(&mut self, ii: &ImplItem) {
+        if !self.skip_members {
+            self.operation.visit_id(ii.id);
+            walk_impl_item(self, ii);
+        }
+    }
+
+    fn visit_lifetime(&mut self, lifetime: &Lifetime) {
+        self.operation.visit_id(lifetime.id);
+    }
+
+    fn visit_lifetime_def(&mut self, def: &LifetimeDef) {
+        self.visit_lifetime(&def.lifetime);
+    }
+
+    fn visit_trait_ref(&mut self, trait_ref: &TraitRef) {
+        self.operation.visit_id(trait_ref.ref_id);
+        walk_trait_ref(self, trait_ref);
+    }
+}
+
+/// Computes the id range for a single fn body, ignoring nested items.
+pub fn compute_id_range_for_fn_body(fk: FnKind,
+                                    decl: &FnDecl,
+                                    body: &Block,
+                                    sp: Span,
+                                    id: NodeId)
+                                    -> IdRange {
+    let mut visitor = IdRangeComputingVisitor { result: IdRange::max() };
+    let mut id_visitor = IdVisitor::new(&mut visitor);
+    id_visitor.visit_fn(fk, decl, body, sp, id);
+    id_visitor.operation.result
+}
diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs
new file mode 100644 (file)
index 0000000..738a04d
--- /dev/null
@@ -0,0 +1,2148 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Lowers the AST to the HIR.
+//
+// Since the AST and HIR are fairly similar, this is mostly a simple procedure,
+// much like a fold. Where lowering involves a bit more work things get more
+// interesting and there are some invariants you should know about. These mostly
+// concern spans and ids.
+//
+// Spans are assigned to AST nodes during parsing and then are modified during
+// expansion to indicate the origin of a node and the process it went through
+// being expanded. Ids are assigned to AST nodes just before lowering.
+//
+// For the simpler lowering steps, ids and spans should be preserved. Unlike
+// expansion we do not preserve the process of lowering in the spans, so spans
+// should not be modified here. When creating a new node (as opposed to
+// 'folding' an existing one), then you create a new id using `next_id()`.
+//
+// You must ensure that ids are unique. That means that you should only use the
+// id from an AST node in a single HIR node (you can assume that AST node ids
+// are unique). Every new node must have a unique id. Avoid cloning HIR nodes.
+// If you do, you must then set the new node's id to a fresh one.
+//
+// Lowering must be reproducable (the compiler only lowers once, but tools and
+// custom lints may lower an AST node to a HIR node to interact with the
+// compiler). The most interesting bit of this is ids - if you lower an AST node
+// and create new HIR nodes with fresh ids, when re-lowering the same node, you
+// must ensure you get the same ids! To do this, we keep track of the next id
+// when we translate a node which requires new ids. By checking this cache and
+// using node ids starting with the cached id, we ensure ids are reproducible.
+// To use this system, you just need to hold on to a CachedIdSetter object
+// whilst lowering. This is an RAII object that takes care of setting and
+// restoring the cached id, etc.
+//
+// This whole system relies on node ids being incremented one at a time and
+// all increments being for lowering. This means that you should not call any
+// non-lowering function which will use new node ids.
+//
+// We must also cache gensym'ed Idents to ensure that we get the same Ident
+// every time we lower a node with gensym'ed names. One consequence of this is
+// that you can only gensym a name once in a lowering (you don't need to worry
+// about nested lowering though). That's because we cache based on the name and
+// the currently cached node id, which is unique per lowered node.
+//
+// Spans are used for error messages and for tools to map semantics back to
+// source code. It is therefore not as important with spans as ids to be strict
+// about use (you can't break the compiler by screwing up a span). Obviously, a
+// HIR node can only have a single span. But multiple nodes can have the same
+// span and spans don't need to be kept in order, etc. Where code is preserved
+// by lowering, it should have the same span as in the AST. Where HIR nodes are
+// new it is probably best to give a span for the whole AST node being lowered.
+// All nodes should have real spans, don't use dummy spans. Tools are likely to
+// get confused if the spans from leaf AST nodes occur in multiple places
+// in the HIR, especially for multiple identifiers.
+
+use hir;
+
+use std::collections::BTreeMap;
+use std::collections::HashMap;
+use std::iter;
+use syntax::ast::*;
+use syntax::attr::{ThinAttributes, ThinAttributesExt};
+use syntax::errors::Handler;
+use syntax::ext::mtwt;
+use syntax::ptr::P;
+use syntax::codemap::{respan, Spanned, Span};
+use syntax::parse::token;
+use syntax::std_inject;
+use syntax::visit::{self, Visitor};
+
+use std::cell::{Cell, RefCell};
+
+pub struct LoweringContext<'a> {
+    crate_root: Option<&'static str>,
+    // Map AST ids to ids used for expanded nodes.
+    id_cache: RefCell<HashMap<NodeId, NodeId>>,
+    // Use if there are no cached ids for the current node.
+    id_assigner: &'a NodeIdAssigner,
+    // 0 == no cached id. Must be incremented to align with previous id
+    // incrementing.
+    cached_id: Cell<u32>,
+    // Keep track of gensym'ed idents.
+    gensym_cache: RefCell<HashMap<(NodeId, &'static str), hir::Ident>>,
+    // A copy of cached_id, but is also set to an id while a node is lowered for
+    // the first time.
+    gensym_key: Cell<u32>,
+}
+
+impl<'a, 'hir> LoweringContext<'a> {
+    pub fn new(id_assigner: &'a NodeIdAssigner, c: Option<&Crate>) -> LoweringContext<'a> {
+        let crate_root = c.and_then(|c| {
+            if std_inject::no_core(c) {
+                None
+            } else if std_inject::no_std(c) {
+                Some("core")
+            } else {
+                Some("std")
+            }
+        });
+
+        LoweringContext {
+            crate_root: crate_root,
+            id_cache: RefCell::new(HashMap::new()),
+            id_assigner: id_assigner,
+            cached_id: Cell::new(0),
+            gensym_cache: RefCell::new(HashMap::new()),
+            gensym_key: Cell::new(0),
+        }
+    }
+
+    fn next_id(&self) -> NodeId {
+        let cached_id = self.cached_id.get();
+        if cached_id == 0 {
+            return self.id_assigner.next_node_id();
+        }
+
+        self.cached_id.set(cached_id + 1);
+        cached_id
+    }
+
+    fn str_to_ident(&self, s: &'static str) -> hir::Ident {
+        let gensym_key = self.gensym_key.get();
+        if gensym_key == 0 {
+            return hir::Ident::from_name(token::gensym(s));
+        }
+
+        let cached = self.gensym_cache.borrow().contains_key(&(gensym_key, s));
+        if cached {
+            self.gensym_cache.borrow()[&(gensym_key, s)]
+        } else {
+            let result = hir::Ident::from_name(token::gensym(s));
+            self.gensym_cache.borrow_mut().insert((gensym_key, s), result);
+            result
+        }
+    }
+
+    // Panics if this LoweringContext's NodeIdAssigner is not able to emit diagnostics.
+    fn diagnostic(&self) -> &Handler {
+        self.id_assigner.diagnostic()
+    }
+}
+
+// Utility fn for setting and unsetting the cached id.
+fn cache_ids<'a, OP, R>(lctx: &LoweringContext, expr_id: NodeId, op: OP) -> R
+    where OP: FnOnce(&LoweringContext) -> R
+{
+    // Only reset the id if it was previously 0, i.e., was not cached.
+    // If it was cached, we are in a nested node, but our id count will
+    // still count towards the parent's count.
+    let reset_cached_id = lctx.cached_id.get() == 0;
+    // We always reset gensym_key so that if we use the same name in a nested
+    // node and after that node, they get different values.
+    let old_gensym_key = lctx.gensym_key.get();
+
+    {
+        let id_cache: &mut HashMap<_, _> = &mut lctx.id_cache.borrow_mut();
+
+        if id_cache.contains_key(&expr_id) {
+            let cached_id = lctx.cached_id.get();
+            if cached_id == 0 {
+                // We're entering a node where we need to track ids, but are not
+                // yet tracking.
+                lctx.cached_id.set(id_cache[&expr_id]);
+            } else {
+                // We're already tracking - check that the tracked id is the same
+                // as the expected id.
+                assert!(cached_id == id_cache[&expr_id], "id mismatch");
+            }
+            lctx.gensym_key.set(id_cache[&expr_id]);
+        } else {
+            // We've never lowered this node before, remember it for next time.
+            let next_id = lctx.id_assigner.peek_node_id();
+            id_cache.insert(expr_id, next_id);
+            lctx.gensym_key.set(next_id);
+            // self.cached_id is not set when we lower a node for the first time,
+            // only on re-lowering.
+        }
+    }
+
+    let result = op(lctx);
+
+    if reset_cached_id {
+        lctx.cached_id.set(0);
+    }
+    lctx.gensym_key.set(old_gensym_key);
+
+    result
+}
+
+pub fn lower_ident(_lctx: &LoweringContext, ident: Ident) -> hir::Ident {
+    hir::Ident {
+        name: mtwt::resolve(ident),
+        unhygienic_name: ident.name,
+    }
+}
+
+pub fn lower_attrs(_lctx: &LoweringContext, attrs: &Vec<Attribute>) -> hir::HirVec<Attribute> {
+    attrs.clone().into()
+}
+
+pub fn lower_view_path(lctx: &LoweringContext, view_path: &ViewPath) -> P<hir::ViewPath> {
+    P(Spanned {
+        node: match view_path.node {
+            ViewPathSimple(ident, ref path) => {
+                hir::ViewPathSimple(ident.name, lower_path(lctx, path))
+            }
+            ViewPathGlob(ref path) => {
+                hir::ViewPathGlob(lower_path(lctx, path))
+            }
+            ViewPathList(ref path, ref path_list_idents) => {
+                hir::ViewPathList(lower_path(lctx, path),
+                                  path_list_idents.iter()
+                                                  .map(lower_path_list_item)
+                                                  .collect())
+            }
+        },
+        span: view_path.span,
+    })
+}
+
+fn lower_path_list_item(path_list_ident: &PathListItem) -> hir::PathListItem {
+    Spanned {
+        node: match path_list_ident.node {
+            PathListItemKind::Ident { id, name, rename } => hir::PathListIdent {
+                id: id,
+                name: name.name,
+                rename: rename.map(|x| x.name),
+            },
+            PathListItemKind::Mod { id, rename } => hir::PathListMod {
+                id: id,
+                rename: rename.map(|x| x.name),
+            },
+        },
+        span: path_list_ident.span,
+    }
+}
+
+pub fn lower_arm(lctx: &LoweringContext, arm: &Arm) -> hir::Arm {
+    hir::Arm {
+        attrs: lower_attrs(lctx, &arm.attrs),
+        pats: arm.pats.iter().map(|x| lower_pat(lctx, x)).collect(),
+        guard: arm.guard.as_ref().map(|ref x| lower_expr(lctx, x)),
+        body: lower_expr(lctx, &arm.body),
+    }
+}
+
+pub fn lower_decl(lctx: &LoweringContext, d: &Decl) -> P<hir::Decl> {
+    match d.node {
+        DeclKind::Local(ref l) => P(Spanned {
+            node: hir::DeclLocal(lower_local(lctx, l)),
+            span: d.span,
+        }),
+        DeclKind::Item(ref it) => P(Spanned {
+            node: hir::DeclItem(lower_item_id(lctx, it)),
+            span: d.span,
+        }),
+    }
+}
+
+pub fn lower_ty_binding(lctx: &LoweringContext, b: &TypeBinding) -> hir::TypeBinding {
+    hir::TypeBinding {
+        id: b.id,
+        name: b.ident.name,
+        ty: lower_ty(lctx, &b.ty),
+        span: b.span,
+    }
+}
+
+pub fn lower_ty(lctx: &LoweringContext, t: &Ty) -> P<hir::Ty> {
+    use syntax::ast::TyKind::*;
+    P(hir::Ty {
+        id: t.id,
+        node: match t.node {
+            Infer => hir::TyInfer,
+            Vec(ref ty) => hir::TyVec(lower_ty(lctx, ty)),
+            Ptr(ref mt) => hir::TyPtr(lower_mt(lctx, mt)),
+            Rptr(ref region, ref mt) => {
+                hir::TyRptr(lower_opt_lifetime(lctx, region), lower_mt(lctx, mt))
+            }
+            BareFn(ref f) => {
+                hir::TyBareFn(P(hir::BareFnTy {
+                    lifetimes: lower_lifetime_defs(lctx, &f.lifetimes),
+                    unsafety: lower_unsafety(lctx, f.unsafety),
+                    abi: f.abi,
+                    decl: lower_fn_decl(lctx, &f.decl),
+                }))
+            }
+            Tup(ref tys) => hir::TyTup(tys.iter().map(|ty| lower_ty(lctx, ty)).collect()),
+            Paren(ref ty) => {
+                return lower_ty(lctx, ty);
+            }
+            Path(ref qself, ref path) => {
+                let qself = qself.as_ref().map(|&QSelf { ref ty, position }| {
+                    hir::QSelf {
+                        ty: lower_ty(lctx, ty),
+                        position: position,
+                    }
+                });
+                hir::TyPath(qself, lower_path(lctx, path))
+            }
+            ObjectSum(ref ty, ref bounds) => {
+                hir::TyObjectSum(lower_ty(lctx, ty), lower_bounds(lctx, bounds))
+            }
+            FixedLengthVec(ref ty, ref e) => {
+                hir::TyFixedLengthVec(lower_ty(lctx, ty), lower_expr(lctx, e))
+            }
+            Typeof(ref expr) => {
+                hir::TyTypeof(lower_expr(lctx, expr))
+            }
+            PolyTraitRef(ref bounds) => {
+                hir::TyPolyTraitRef(bounds.iter().map(|b| lower_ty_param_bound(lctx, b)).collect())
+            }
+            Mac(_) => panic!("TyMac should have been expanded by now."),
+        },
+        span: t.span,
+    })
+}
+
+pub fn lower_foreign_mod(lctx: &LoweringContext, fm: &ForeignMod) -> hir::ForeignMod {
+    hir::ForeignMod {
+        abi: fm.abi,
+        items: fm.items.iter().map(|x| lower_foreign_item(lctx, x)).collect(),
+    }
+}
+
+pub fn lower_variant(lctx: &LoweringContext, v: &Variant) -> hir::Variant {
+    Spanned {
+        node: hir::Variant_ {
+            name: v.node.name.name,
+            attrs: lower_attrs(lctx, &v.node.attrs),
+            data: lower_variant_data(lctx, &v.node.data),
+            disr_expr: v.node.disr_expr.as_ref().map(|e| lower_expr(lctx, e)),
+        },
+        span: v.span,
+    }
+}
+
+// Path segments are usually unhygienic, hygienic path segments can occur only in
+// identifier-like paths originating from `ExprPath`.
+// Make life simpler for rustc_resolve by renaming only such segments.
+pub fn lower_path_full(lctx: &LoweringContext, p: &Path, maybe_hygienic: bool) -> hir::Path {
+    let maybe_hygienic = maybe_hygienic && !p.global && p.segments.len() == 1;
+    hir::Path {
+        global: p.global,
+        segments: p.segments
+                   .iter()
+                   .map(|&PathSegment { identifier, ref parameters }| {
+                       hir::PathSegment {
+                           identifier: if maybe_hygienic {
+                               lower_ident(lctx, identifier)
+                           } else {
+                               hir::Ident::from_name(identifier.name)
+                           },
+                           parameters: lower_path_parameters(lctx, parameters),
+                       }
+                   })
+                   .collect(),
+        span: p.span,
+    }
+}
+
+pub fn lower_path(lctx: &LoweringContext, p: &Path) -> hir::Path {
+    lower_path_full(lctx, p, false)
+}
+
+pub fn lower_path_parameters(lctx: &LoweringContext,
+                             path_parameters: &PathParameters)
+                             -> hir::PathParameters {
+    match *path_parameters {
+        PathParameters::AngleBracketed(ref data) =>
+            hir::AngleBracketedParameters(lower_angle_bracketed_parameter_data(lctx, data)),
+        PathParameters::Parenthesized(ref data) =>
+            hir::ParenthesizedParameters(lower_parenthesized_parameter_data(lctx, data)),
+    }
+}
+
+pub fn lower_angle_bracketed_parameter_data(lctx: &LoweringContext,
+                                            data: &AngleBracketedParameterData)
+                                            -> hir::AngleBracketedParameterData {
+    let &AngleBracketedParameterData { ref lifetimes, ref types, ref bindings } = data;
+    hir::AngleBracketedParameterData {
+        lifetimes: lower_lifetimes(lctx, lifetimes),
+        types: types.iter().map(|ty| lower_ty(lctx, ty)).collect(),
+        bindings: bindings.iter().map(|b| lower_ty_binding(lctx, b)).collect(),
+    }
+}
+
+pub fn lower_parenthesized_parameter_data(lctx: &LoweringContext,
+                                          data: &ParenthesizedParameterData)
+                                          -> hir::ParenthesizedParameterData {
+    let &ParenthesizedParameterData { ref inputs, ref output, span } = data;
+    hir::ParenthesizedParameterData {
+        inputs: inputs.iter().map(|ty| lower_ty(lctx, ty)).collect(),
+        output: output.as_ref().map(|ty| lower_ty(lctx, ty)),
+        span: span,
+    }
+}
+
+pub fn lower_local(lctx: &LoweringContext, l: &Local) -> P<hir::Local> {
+    P(hir::Local {
+        id: l.id,
+        ty: l.ty.as_ref().map(|t| lower_ty(lctx, t)),
+        pat: lower_pat(lctx, &l.pat),
+        init: l.init.as_ref().map(|e| lower_expr(lctx, e)),
+        span: l.span,
+        attrs: l.attrs.clone(),
+    })
+}
+
+pub fn lower_explicit_self_underscore(lctx: &LoweringContext,
+                                      es: &SelfKind)
+                                      -> hir::ExplicitSelf_ {
+    match *es {
+        SelfKind::Static => hir::SelfStatic,
+        SelfKind::Value(v) => hir::SelfValue(v.name),
+        SelfKind::Region(ref lifetime, m, ident) => {
+            hir::SelfRegion(lower_opt_lifetime(lctx, lifetime),
+                            lower_mutability(lctx, m),
+                            ident.name)
+        }
+        SelfKind::Explicit(ref typ, ident) => {
+            hir::SelfExplicit(lower_ty(lctx, typ), ident.name)
+        }
+    }
+}
+
+pub fn lower_mutability(_lctx: &LoweringContext, m: Mutability) -> hir::Mutability {
+    match m {
+        Mutability::Mutable => hir::MutMutable,
+        Mutability::Immutable => hir::MutImmutable,
+    }
+}
+
+pub fn lower_explicit_self(lctx: &LoweringContext, s: &ExplicitSelf) -> hir::ExplicitSelf {
+    Spanned {
+        node: lower_explicit_self_underscore(lctx, &s.node),
+        span: s.span,
+    }
+}
+
+pub fn lower_arg(lctx: &LoweringContext, arg: &Arg) -> hir::Arg {
+    hir::Arg {
+        id: arg.id,
+        pat: lower_pat(lctx, &arg.pat),
+        ty: lower_ty(lctx, &arg.ty),
+    }
+}
+
+pub fn lower_fn_decl(lctx: &LoweringContext, decl: &FnDecl) -> P<hir::FnDecl> {
+    P(hir::FnDecl {
+        inputs: decl.inputs.iter().map(|x| lower_arg(lctx, x)).collect(),
+        output: match decl.output {
+            FunctionRetTy::Ty(ref ty) => hir::Return(lower_ty(lctx, ty)),
+            FunctionRetTy::Default(span) => hir::DefaultReturn(span),
+            FunctionRetTy::None(span) => hir::NoReturn(span),
+        },
+        variadic: decl.variadic,
+    })
+}
+
+pub fn lower_ty_param_bound(lctx: &LoweringContext, tpb: &TyParamBound) -> hir::TyParamBound {
+    match *tpb {
+        TraitTyParamBound(ref ty, modifier) => {
+            hir::TraitTyParamBound(lower_poly_trait_ref(lctx, ty),
+                                   lower_trait_bound_modifier(lctx, modifier))
+        }
+        RegionTyParamBound(ref lifetime) => {
+            hir::RegionTyParamBound(lower_lifetime(lctx, lifetime))
+        }
+    }
+}
+
+pub fn lower_ty_param(lctx: &LoweringContext, tp: &TyParam) -> hir::TyParam {
+    hir::TyParam {
+        id: tp.id,
+        name: tp.ident.name,
+        bounds: lower_bounds(lctx, &tp.bounds),
+        default: tp.default.as_ref().map(|x| lower_ty(lctx, x)),
+        span: tp.span,
+    }
+}
+
+pub fn lower_ty_params(lctx: &LoweringContext,
+                       tps: &P<[TyParam]>)
+                       -> hir::HirVec<hir::TyParam> {
+    tps.iter().map(|tp| lower_ty_param(lctx, tp)).collect()
+}
+
+pub fn lower_lifetime(_lctx: &LoweringContext, l: &Lifetime) -> hir::Lifetime {
+    hir::Lifetime {
+        id: l.id,
+        name: l.name,
+        span: l.span,
+    }
+}
+
+pub fn lower_lifetime_def(lctx: &LoweringContext, l: &LifetimeDef) -> hir::LifetimeDef {
+    hir::LifetimeDef {
+        lifetime: lower_lifetime(lctx, &l.lifetime),
+        bounds: lower_lifetimes(lctx, &l.bounds),
+    }
+}
+
+pub fn lower_lifetimes(lctx: &LoweringContext, lts: &Vec<Lifetime>) -> hir::HirVec<hir::Lifetime> {
+    lts.iter().map(|l| lower_lifetime(lctx, l)).collect()
+}
+
+pub fn lower_lifetime_defs(lctx: &LoweringContext,
+                           lts: &Vec<LifetimeDef>)
+                           -> hir::HirVec<hir::LifetimeDef> {
+    lts.iter().map(|l| lower_lifetime_def(lctx, l)).collect()
+}
+
+pub fn lower_opt_lifetime(lctx: &LoweringContext,
+                          o_lt: &Option<Lifetime>)
+                          -> Option<hir::Lifetime> {
+    o_lt.as_ref().map(|lt| lower_lifetime(lctx, lt))
+}
+
+pub fn lower_generics(lctx: &LoweringContext, g: &Generics) -> hir::Generics {
+    hir::Generics {
+        ty_params: lower_ty_params(lctx, &g.ty_params),
+        lifetimes: lower_lifetime_defs(lctx, &g.lifetimes),
+        where_clause: lower_where_clause(lctx, &g.where_clause),
+    }
+}
+
+pub fn lower_where_clause(lctx: &LoweringContext, wc: &WhereClause) -> hir::WhereClause {
+    hir::WhereClause {
+        id: wc.id,
+        predicates: wc.predicates
+                      .iter()
+                      .map(|predicate| lower_where_predicate(lctx, predicate))
+                      .collect(),
+    }
+}
+
+pub fn lower_where_predicate(lctx: &LoweringContext,
+                             pred: &WherePredicate)
+                             -> hir::WherePredicate {
+    match *pred {
+        WherePredicate::BoundPredicate(WhereBoundPredicate{ ref bound_lifetimes,
+                                                            ref bounded_ty,
+                                                            ref bounds,
+                                                            span}) => {
+            hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+                bound_lifetimes: lower_lifetime_defs(lctx, bound_lifetimes),
+                bounded_ty: lower_ty(lctx, bounded_ty),
+                bounds: bounds.iter().map(|x| lower_ty_param_bound(lctx, x)).collect(),
+                span: span,
+            })
+        }
+        WherePredicate::RegionPredicate(WhereRegionPredicate{ ref lifetime,
+                                                              ref bounds,
+                                                              span}) => {
+            hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+                span: span,
+                lifetime: lower_lifetime(lctx, lifetime),
+                bounds: bounds.iter().map(|bound| lower_lifetime(lctx, bound)).collect(),
+            })
+        }
+        WherePredicate::EqPredicate(WhereEqPredicate{ id,
+                                                      ref path,
+                                                      ref ty,
+                                                      span}) => {
+            hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
+                id: id,
+                path: lower_path(lctx, path),
+                ty: lower_ty(lctx, ty),
+                span: span,
+            })
+        }
+    }
+}
+
+pub fn lower_variant_data(lctx: &LoweringContext, vdata: &VariantData) -> hir::VariantData {
+    match *vdata {
+        VariantData::Struct(ref fields, id) => {
+            hir::VariantData::Struct(fields.iter()
+                                           .enumerate()
+                                           .map(|f| lower_struct_field(lctx, f))
+                                           .collect(),
+                                     id)
+        }
+        VariantData::Tuple(ref fields, id) => {
+            hir::VariantData::Tuple(fields.iter()
+                                          .enumerate()
+                                          .map(|f| lower_struct_field(lctx, f))
+                                          .collect(),
+                                    id)
+        }
+        VariantData::Unit(id) => hir::VariantData::Unit(id),
+    }
+}
+
+pub fn lower_trait_ref(lctx: &LoweringContext, p: &TraitRef) -> hir::TraitRef {
+    hir::TraitRef {
+        path: lower_path(lctx, &p.path),
+        ref_id: p.ref_id,
+    }
+}
+
+pub fn lower_poly_trait_ref(lctx: &LoweringContext, p: &PolyTraitRef) -> hir::PolyTraitRef {
+    hir::PolyTraitRef {
+        bound_lifetimes: lower_lifetime_defs(lctx, &p.bound_lifetimes),
+        trait_ref: lower_trait_ref(lctx, &p.trait_ref),
+        span: p.span,
+    }
+}
+
+pub fn lower_struct_field(lctx: &LoweringContext,
+                          (index, f): (usize, &StructField))
+                          -> hir::StructField {
+    hir::StructField {
+        span: f.span,
+        id: f.id,
+        name: f.ident.map(|ident| ident.name).unwrap_or(token::intern(&index.to_string())),
+        vis: lower_visibility(lctx, &f.vis),
+        ty: lower_ty(lctx, &f.ty),
+        attrs: lower_attrs(lctx, &f.attrs),
+    }
+}
+
+pub fn lower_field(lctx: &LoweringContext, f: &Field) -> hir::Field {
+    hir::Field {
+        name: respan(f.ident.span, f.ident.node.name),
+        expr: lower_expr(lctx, &f.expr),
+        span: f.span,
+    }
+}
+
+pub fn lower_mt(lctx: &LoweringContext, mt: &MutTy) -> hir::MutTy {
+    hir::MutTy {
+        ty: lower_ty(lctx, &mt.ty),
+        mutbl: lower_mutability(lctx, mt.mutbl),
+    }
+}
+
+pub fn lower_opt_bounds(lctx: &LoweringContext,
+                        b: &Option<TyParamBounds>)
+                        -> Option<hir::TyParamBounds> {
+    b.as_ref().map(|ref bounds| lower_bounds(lctx, bounds))
+}
+
+fn lower_bounds(lctx: &LoweringContext, bounds: &TyParamBounds) -> hir::TyParamBounds {
+    bounds.iter().map(|bound| lower_ty_param_bound(lctx, bound)).collect()
+}
+
+pub fn lower_block(lctx: &LoweringContext, b: &Block) -> P<hir::Block> {
+    P(hir::Block {
+        id: b.id,
+        stmts: b.stmts.iter().map(|s| lower_stmt(lctx, s)).collect(),
+        expr: b.expr.as_ref().map(|ref x| lower_expr(lctx, x)),
+        rules: lower_block_check_mode(lctx, &b.rules),
+        span: b.span,
+    })
+}
+
+pub fn lower_item_kind(lctx: &LoweringContext, i: &ItemKind) -> hir::Item_ {
+    match *i {
+        ItemKind::ExternCrate(string) => hir::ItemExternCrate(string),
+        ItemKind::Use(ref view_path) => {
+            hir::ItemUse(lower_view_path(lctx, view_path))
+        }
+        ItemKind::Static(ref t, m, ref e) => {
+            hir::ItemStatic(lower_ty(lctx, t),
+                            lower_mutability(lctx, m),
+                            lower_expr(lctx, e))
+        }
+        ItemKind::Const(ref t, ref e) => {
+            hir::ItemConst(lower_ty(lctx, t), lower_expr(lctx, e))
+        }
+        ItemKind::Fn(ref decl, unsafety, constness, abi, ref generics, ref body) => {
+            hir::ItemFn(lower_fn_decl(lctx, decl),
+                        lower_unsafety(lctx, unsafety),
+                        lower_constness(lctx, constness),
+                        abi,
+                        lower_generics(lctx, generics),
+                        lower_block(lctx, body))
+        }
+        ItemKind::Mod(ref m) => hir::ItemMod(lower_mod(lctx, m)),
+        ItemKind::ForeignMod(ref nm) => hir::ItemForeignMod(lower_foreign_mod(lctx, nm)),
+        ItemKind::Ty(ref t, ref generics) => {
+            hir::ItemTy(lower_ty(lctx, t), lower_generics(lctx, generics))
+        }
+        ItemKind::Enum(ref enum_definition, ref generics) => {
+            hir::ItemEnum(hir::EnumDef {
+                              variants: enum_definition.variants
+                                                       .iter()
+                                                       .map(|x| lower_variant(lctx, x))
+                                                       .collect(),
+                          },
+                          lower_generics(lctx, generics))
+        }
+        ItemKind::Struct(ref struct_def, ref generics) => {
+            let struct_def = lower_variant_data(lctx, struct_def);
+            hir::ItemStruct(struct_def, lower_generics(lctx, generics))
+        }
+        ItemKind::DefaultImpl(unsafety, ref trait_ref) => {
+            hir::ItemDefaultImpl(lower_unsafety(lctx, unsafety),
+                                 lower_trait_ref(lctx, trait_ref))
+        }
+        ItemKind::Impl(unsafety, polarity, ref generics, ref ifce, ref ty, ref impl_items) => {
+            let new_impl_items = impl_items.iter()
+                                           .map(|item| lower_impl_item(lctx, item))
+                                           .collect();
+            let ifce = ifce.as_ref().map(|trait_ref| lower_trait_ref(lctx, trait_ref));
+            hir::ItemImpl(lower_unsafety(lctx, unsafety),
+                          lower_impl_polarity(lctx, polarity),
+                          lower_generics(lctx, generics),
+                          ifce,
+                          lower_ty(lctx, ty),
+                          new_impl_items)
+        }
+        ItemKind::Trait(unsafety, ref generics, ref bounds, ref items) => {
+            let bounds = lower_bounds(lctx, bounds);
+            let items = items.iter().map(|item| lower_trait_item(lctx, item)).collect();
+            hir::ItemTrait(lower_unsafety(lctx, unsafety),
+                           lower_generics(lctx, generics),
+                           bounds,
+                           items)
+        }
+        ItemKind::Mac(_) => panic!("Shouldn't still be around"),
+    }
+}
+
+pub fn lower_trait_item(lctx: &LoweringContext, i: &TraitItem) -> hir::TraitItem {
+    hir::TraitItem {
+        id: i.id,
+        name: i.ident.name,
+        attrs: lower_attrs(lctx, &i.attrs),
+        node: match i.node {
+            TraitItemKind::Const(ref ty, ref default) => {
+                hir::ConstTraitItem(lower_ty(lctx, ty),
+                                    default.as_ref().map(|x| lower_expr(lctx, x)))
+            }
+            TraitItemKind::Method(ref sig, ref body) => {
+                hir::MethodTraitItem(lower_method_sig(lctx, sig),
+                                     body.as_ref().map(|x| lower_block(lctx, x)))
+            }
+            TraitItemKind::Type(ref bounds, ref default) => {
+                hir::TypeTraitItem(lower_bounds(lctx, bounds),
+                                   default.as_ref().map(|x| lower_ty(lctx, x)))
+            }
+        },
+        span: i.span,
+    }
+}
+
+pub fn lower_impl_item(lctx: &LoweringContext, i: &ImplItem) -> hir::ImplItem {
+    hir::ImplItem {
+        id: i.id,
+        name: i.ident.name,
+        attrs: lower_attrs(lctx, &i.attrs),
+        vis: lower_visibility(lctx, &i.vis),
+        defaultness: lower_defaultness(lctx, i.defaultness),
+        node: match i.node {
+            ImplItemKind::Const(ref ty, ref expr) => {
+                hir::ImplItemKind::Const(lower_ty(lctx, ty), lower_expr(lctx, expr))
+            }
+            ImplItemKind::Method(ref sig, ref body) => {
+                hir::ImplItemKind::Method(lower_method_sig(lctx, sig), lower_block(lctx, body))
+            }
+            ImplItemKind::Type(ref ty) => hir::ImplItemKind::Type(lower_ty(lctx, ty)),
+            ImplItemKind::Macro(..) => panic!("Shouldn't exist any more"),
+        },
+        span: i.span,
+    }
+}
+
+pub fn lower_mod(lctx: &LoweringContext, m: &Mod) -> hir::Mod {
+    hir::Mod {
+        inner: m.inner,
+        item_ids: m.items.iter().map(|x| lower_item_id(lctx, x)).collect(),
+    }
+}
+
+struct ItemLowerer<'lcx, 'interner: 'lcx> {
+    items: BTreeMap<NodeId, hir::Item>,
+    lctx: &'lcx LoweringContext<'interner>,
+}
+
+impl<'lcx, 'interner> Visitor<'lcx> for ItemLowerer<'lcx, 'interner> {
+    fn visit_item(&mut self, item: &'lcx Item) {
+        self.items.insert(item.id, lower_item(self.lctx, item));
+        visit::walk_item(self, item);
+    }
+}
+
+pub fn lower_crate(lctx: &LoweringContext, c: &Crate) -> hir::Crate {
+    let items = {
+        let mut item_lowerer = ItemLowerer { items: BTreeMap::new(), lctx: lctx };
+        visit::walk_crate(&mut item_lowerer, c);
+        item_lowerer.items
+    };
+
+    hir::Crate {
+        module: lower_mod(lctx, &c.module),
+        attrs: lower_attrs(lctx, &c.attrs),
+        config: c.config.clone().into(),
+        span: c.span,
+        exported_macros: c.exported_macros.iter().map(|m| lower_macro_def(lctx, m)).collect(),
+        items: items,
+    }
+}
+
+pub fn lower_macro_def(lctx: &LoweringContext, m: &MacroDef) -> hir::MacroDef {
+    hir::MacroDef {
+        name: m.ident.name,
+        attrs: lower_attrs(lctx, &m.attrs),
+        id: m.id,
+        span: m.span,
+        imported_from: m.imported_from.map(|x| x.name),
+        export: m.export,
+        use_locally: m.use_locally,
+        allow_internal_unstable: m.allow_internal_unstable,
+        body: m.body.clone().into(),
+    }
+}
+
+pub fn lower_item_id(_lctx: &LoweringContext, i: &Item) -> hir::ItemId {
+    hir::ItemId { id: i.id }
+}
+
+pub fn lower_item(lctx: &LoweringContext, i: &Item) -> hir::Item {
+    let node = lower_item_kind(lctx, &i.node);
+
+    hir::Item {
+        id: i.id,
+        name: i.ident.name,
+        attrs: lower_attrs(lctx, &i.attrs),
+        node: node,
+        vis: lower_visibility(lctx, &i.vis),
+        span: i.span,
+    }
+}
+
+pub fn lower_foreign_item(lctx: &LoweringContext, i: &ForeignItem) -> hir::ForeignItem {
+    hir::ForeignItem {
+        id: i.id,
+        name: i.ident.name,
+        attrs: lower_attrs(lctx, &i.attrs),
+        node: match i.node {
+            ForeignItemKind::Fn(ref fdec, ref generics) => {
+                hir::ForeignItemFn(lower_fn_decl(lctx, fdec), lower_generics(lctx, generics))
+            }
+            ForeignItemKind::Static(ref t, m) => {
+                hir::ForeignItemStatic(lower_ty(lctx, t), m)
+            }
+        },
+        vis: lower_visibility(lctx, &i.vis),
+        span: i.span,
+    }
+}
+
+pub fn lower_method_sig(lctx: &LoweringContext, sig: &MethodSig) -> hir::MethodSig {
+    hir::MethodSig {
+        generics: lower_generics(lctx, &sig.generics),
+        abi: sig.abi,
+        explicit_self: lower_explicit_self(lctx, &sig.explicit_self),
+        unsafety: lower_unsafety(lctx, sig.unsafety),
+        constness: lower_constness(lctx, sig.constness),
+        decl: lower_fn_decl(lctx, &sig.decl),
+    }
+}
+
+pub fn lower_unsafety(_lctx: &LoweringContext, u: Unsafety) -> hir::Unsafety {
+    match u {
+        Unsafety::Unsafe => hir::Unsafety::Unsafe,
+        Unsafety::Normal => hir::Unsafety::Normal,
+    }
+}
+
+pub fn lower_constness(_lctx: &LoweringContext, c: Constness) -> hir::Constness {
+    match c {
+        Constness::Const => hir::Constness::Const,
+        Constness::NotConst => hir::Constness::NotConst,
+    }
+}
+
+pub fn lower_unop(_lctx: &LoweringContext, u: UnOp) -> hir::UnOp {
+    match u {
+        UnOp::Deref => hir::UnDeref,
+        UnOp::Not => hir::UnNot,
+        UnOp::Neg => hir::UnNeg,
+    }
+}
+
+pub fn lower_binop(_lctx: &LoweringContext, b: BinOp) -> hir::BinOp {
+    Spanned {
+        node: match b.node {
+            BinOpKind::Add => hir::BiAdd,
+            BinOpKind::Sub => hir::BiSub,
+            BinOpKind::Mul => hir::BiMul,
+            BinOpKind::Div => hir::BiDiv,
+            BinOpKind::Rem => hir::BiRem,
+            BinOpKind::And => hir::BiAnd,
+            BinOpKind::Or => hir::BiOr,
+            BinOpKind::BitXor => hir::BiBitXor,
+            BinOpKind::BitAnd => hir::BiBitAnd,
+            BinOpKind::BitOr => hir::BiBitOr,
+            BinOpKind::Shl => hir::BiShl,
+            BinOpKind::Shr => hir::BiShr,
+            BinOpKind::Eq => hir::BiEq,
+            BinOpKind::Lt => hir::BiLt,
+            BinOpKind::Le => hir::BiLe,
+            BinOpKind::Ne => hir::BiNe,
+            BinOpKind::Ge => hir::BiGe,
+            BinOpKind::Gt => hir::BiGt,
+        },
+        span: b.span,
+    }
+}
+
+pub fn lower_pat(lctx: &LoweringContext, p: &Pat) -> P<hir::Pat> {
+    P(hir::Pat {
+        id: p.id,
+        node: match p.node {
+            PatKind::Wild => hir::PatKind::Wild,
+            PatKind::Ident(ref binding_mode, pth1, ref sub) => {
+                hir::PatKind::Ident(lower_binding_mode(lctx, binding_mode),
+                              respan(pth1.span, lower_ident(lctx, pth1.node)),
+                              sub.as_ref().map(|x| lower_pat(lctx, x)))
+            }
+            PatKind::Lit(ref e) => hir::PatKind::Lit(lower_expr(lctx, e)),
+            PatKind::TupleStruct(ref pth, ref pats) => {
+                hir::PatKind::TupleStruct(lower_path(lctx, pth),
+                             pats.as_ref()
+                                 .map(|pats| pats.iter().map(|x| lower_pat(lctx, x)).collect()))
+            }
+            PatKind::Path(ref pth) => {
+                hir::PatKind::Path(lower_path(lctx, pth))
+            }
+            PatKind::QPath(ref qself, ref pth) => {
+                let qself = hir::QSelf {
+                    ty: lower_ty(lctx, &qself.ty),
+                    position: qself.position,
+                };
+                hir::PatKind::QPath(qself, lower_path(lctx, pth))
+            }
+            PatKind::Struct(ref pth, ref fields, etc) => {
+                let pth = lower_path(lctx, pth);
+                let fs = fields.iter()
+                               .map(|f| {
+                                   Spanned {
+                                       span: f.span,
+                                       node: hir::FieldPat {
+                                           name: f.node.ident.name,
+                                           pat: lower_pat(lctx, &f.node.pat),
+                                           is_shorthand: f.node.is_shorthand,
+                                       },
+                                   }
+                               })
+                               .collect();
+                hir::PatKind::Struct(pth, fs, etc)
+            }
+            PatKind::Tup(ref elts) => {
+                hir::PatKind::Tup(elts.iter().map(|x| lower_pat(lctx, x)).collect())
+            }
+            PatKind::Box(ref inner) => hir::PatKind::Box(lower_pat(lctx, inner)),
+            PatKind::Ref(ref inner, mutbl) => {
+                hir::PatKind::Ref(lower_pat(lctx, inner), lower_mutability(lctx, mutbl))
+            }
+            PatKind::Range(ref e1, ref e2) => {
+                hir::PatKind::Range(lower_expr(lctx, e1), lower_expr(lctx, e2))
+            }
+            PatKind::Vec(ref before, ref slice, ref after) => {
+                hir::PatKind::Vec(before.iter().map(|x| lower_pat(lctx, x)).collect(),
+                            slice.as_ref().map(|x| lower_pat(lctx, x)),
+                            after.iter().map(|x| lower_pat(lctx, x)).collect())
+            }
+            PatKind::Mac(_) => panic!("Shouldn't exist here"),
+        },
+        span: p.span,
+    })
+}
+
+pub fn lower_expr(lctx: &LoweringContext, e: &Expr) -> P<hir::Expr> {
+    P(hir::Expr {
+        id: e.id,
+        node: match e.node {
+            // Issue #22181:
+            // Eventually a desugaring for `box EXPR`
+            // (similar to the desugaring above for `in PLACE BLOCK`)
+            // should go here, desugaring
+            //
+            // to:
+            //
+            // let mut place = BoxPlace::make_place();
+            // let raw_place = Place::pointer(&mut place);
+            // let value = $value;
+            // unsafe {
+            //     ::std::ptr::write(raw_place, value);
+            //     Boxed::finalize(place)
+            // }
+            //
+            // But for now there are type-inference issues doing that.
+            ExprKind::Box(ref e) => {
+                hir::ExprBox(lower_expr(lctx, e))
+            }
+
+            // Desugar ExprBox: `in (PLACE) EXPR`
+            ExprKind::InPlace(ref placer, ref value_expr) => {
+                // to:
+                //
+                // let p = PLACE;
+                // let mut place = Placer::make_place(p);
+                // let raw_place = Place::pointer(&mut place);
+                // push_unsafe!({
+                //     std::intrinsics::move_val_init(raw_place, pop_unsafe!( EXPR ));
+                //     InPlace::finalize(place)
+                // })
+                return cache_ids(lctx, e.id, |lctx| {
+                    let placer_expr = lower_expr(lctx, placer);
+                    let value_expr = lower_expr(lctx, value_expr);
+
+                    let placer_ident = lctx.str_to_ident("placer");
+                    let place_ident = lctx.str_to_ident("place");
+                    let p_ptr_ident = lctx.str_to_ident("p_ptr");
+
+                    let make_place = ["ops", "Placer", "make_place"];
+                    let place_pointer = ["ops", "Place", "pointer"];
+                    let move_val_init = ["intrinsics", "move_val_init"];
+                    let inplace_finalize = ["ops", "InPlace", "finalize"];
+
+                    let make_call = |lctx: &LoweringContext, p, args| {
+                        let path = core_path(lctx, e.span, p);
+                        let path = expr_path(lctx, path, None);
+                        expr_call(lctx, e.span, path, args, None)
+                    };
+
+                    let mk_stmt_let = |lctx: &LoweringContext, bind, expr| {
+                        stmt_let(lctx, e.span, false, bind, expr, None)
+                    };
+
+                    let mk_stmt_let_mut = |lctx: &LoweringContext, bind, expr| {
+                        stmt_let(lctx, e.span, true, bind, expr, None)
+                    };
+
+                    // let placer = <placer_expr> ;
+                    let s1 = {
+                        let placer_expr = signal_block_expr(lctx,
+                                                            hir_vec![],
+                                                            placer_expr,
+                                                            e.span,
+                                                            hir::PopUnstableBlock,
+                                                            None);
+                        mk_stmt_let(lctx, placer_ident, placer_expr)
+                    };
+
+                    // let mut place = Placer::make_place(placer);
+                    let s2 = {
+                        let placer = expr_ident(lctx, e.span, placer_ident, None);
+                        let call = make_call(lctx, &make_place, hir_vec![placer]);
+                        mk_stmt_let_mut(lctx, place_ident, call)
+                    };
+
+                    // let p_ptr = Place::pointer(&mut place);
+                    let s3 = {
+                        let agent = expr_ident(lctx, e.span, place_ident, None);
+                        let args = hir_vec![expr_mut_addr_of(lctx, e.span, agent, None)];
+                        let call = make_call(lctx, &place_pointer, args);
+                        mk_stmt_let(lctx, p_ptr_ident, call)
+                    };
+
+                    // pop_unsafe!(EXPR));
+                    let pop_unsafe_expr = {
+                        let value_expr = signal_block_expr(lctx,
+                                                           hir_vec![],
+                                                           value_expr,
+                                                           e.span,
+                                                           hir::PopUnstableBlock,
+                                                           None);
+                        signal_block_expr(lctx,
+                                          hir_vec![],
+                                          value_expr,
+                                          e.span,
+                                          hir::PopUnsafeBlock(hir::CompilerGenerated), None)
+                    };
+
+                    // push_unsafe!({
+                    //     std::intrinsics::move_val_init(raw_place, pop_unsafe!( EXPR ));
+                    //     InPlace::finalize(place)
+                    // })
+                    let expr = {
+                        let ptr = expr_ident(lctx, e.span, p_ptr_ident, None);
+                        let call_move_val_init =
+                            hir::StmtSemi(
+                                make_call(lctx, &move_val_init, hir_vec![ptr, pop_unsafe_expr]),
+                                lctx.next_id());
+                        let call_move_val_init = respan(e.span, call_move_val_init);
+
+                        let place = expr_ident(lctx, e.span, place_ident, None);
+                        let call = make_call(lctx, &inplace_finalize, hir_vec![place]);
+                        signal_block_expr(lctx,
+                                          hir_vec![call_move_val_init],
+                                          call,
+                                          e.span,
+                                          hir::PushUnsafeBlock(hir::CompilerGenerated), None)
+                    };
+
+                    signal_block_expr(lctx,
+                                      hir_vec![s1, s2, s3],
+                                      expr,
+                                      e.span,
+                                      hir::PushUnstableBlock,
+                                      e.attrs.clone())
+                });
+            }
+
+            ExprKind::Vec(ref exprs) => {
+                hir::ExprVec(exprs.iter().map(|x| lower_expr(lctx, x)).collect())
+            }
+            ExprKind::Repeat(ref expr, ref count) => {
+                let expr = lower_expr(lctx, expr);
+                let count = lower_expr(lctx, count);
+                hir::ExprRepeat(expr, count)
+            }
+            ExprKind::Tup(ref elts) => {
+                hir::ExprTup(elts.iter().map(|x| lower_expr(lctx, x)).collect())
+            }
+            ExprKind::Call(ref f, ref args) => {
+                let f = lower_expr(lctx, f);
+                hir::ExprCall(f, args.iter().map(|x| lower_expr(lctx, x)).collect())
+            }
+            ExprKind::MethodCall(i, ref tps, ref args) => {
+                let tps = tps.iter().map(|x| lower_ty(lctx, x)).collect();
+                let args = args.iter().map(|x| lower_expr(lctx, x)).collect();
+                hir::ExprMethodCall(respan(i.span, i.node.name), tps, args)
+            }
+            ExprKind::Binary(binop, ref lhs, ref rhs) => {
+                let binop = lower_binop(lctx, binop);
+                let lhs = lower_expr(lctx, lhs);
+                let rhs = lower_expr(lctx, rhs);
+                hir::ExprBinary(binop, lhs, rhs)
+            }
+            ExprKind::Unary(op, ref ohs) => {
+                let op = lower_unop(lctx, op);
+                let ohs = lower_expr(lctx, ohs);
+                hir::ExprUnary(op, ohs)
+            }
+            ExprKind::Lit(ref l) => hir::ExprLit(P((**l).clone())),
+            ExprKind::Cast(ref expr, ref ty) => {
+                let expr = lower_expr(lctx, expr);
+                hir::ExprCast(expr, lower_ty(lctx, ty))
+            }
+            ExprKind::Type(ref expr, ref ty) => {
+                let expr = lower_expr(lctx, expr);
+                hir::ExprType(expr, lower_ty(lctx, ty))
+            }
+            ExprKind::AddrOf(m, ref ohs) => {
+                let m = lower_mutability(lctx, m);
+                let ohs = lower_expr(lctx, ohs);
+                hir::ExprAddrOf(m, ohs)
+            }
+            // More complicated than you might expect because the else branch
+            // might be `if let`.
+            ExprKind::If(ref cond, ref blk, ref else_opt) => {
+                let else_opt = else_opt.as_ref().map(|els| {
+                    match els.node {
+                        ExprKind::IfLet(..) => {
+                            cache_ids(lctx, e.id, |lctx| {
+                                // wrap the if-let expr in a block
+                                let span = els.span;
+                                let els = lower_expr(lctx, els);
+                                let id = lctx.next_id();
+                                let blk = P(hir::Block {
+                                    stmts: hir_vec![],
+                                    expr: Some(els),
+                                    id: id,
+                                    rules: hir::DefaultBlock,
+                                    span: span,
+                                });
+                                expr_block(lctx, blk, None)
+                            })
+                        }
+                        _ => lower_expr(lctx, els),
+                    }
+                });
+
+                hir::ExprIf(lower_expr(lctx, cond), lower_block(lctx, blk), else_opt)
+            }
+            ExprKind::While(ref cond, ref body, opt_ident) => {
+                hir::ExprWhile(lower_expr(lctx, cond), lower_block(lctx, body),
+                               opt_ident.map(|ident| lower_ident(lctx, ident)))
+            }
+            ExprKind::Loop(ref body, opt_ident) => {
+                hir::ExprLoop(lower_block(lctx, body),
+                              opt_ident.map(|ident| lower_ident(lctx, ident)))
+            }
+            ExprKind::Match(ref expr, ref arms) => {
+                hir::ExprMatch(lower_expr(lctx, expr),
+                               arms.iter().map(|x| lower_arm(lctx, x)).collect(),
+                               hir::MatchSource::Normal)
+            }
+            ExprKind::Closure(capture_clause, ref decl, ref body) => {
+                hir::ExprClosure(lower_capture_clause(lctx, capture_clause),
+                                 lower_fn_decl(lctx, decl),
+                                 lower_block(lctx, body))
+            }
+            ExprKind::Block(ref blk) => hir::ExprBlock(lower_block(lctx, blk)),
+            ExprKind::Assign(ref el, ref er) => {
+                hir::ExprAssign(lower_expr(lctx, el), lower_expr(lctx, er))
+            }
+            ExprKind::AssignOp(op, ref el, ref er) => {
+                hir::ExprAssignOp(lower_binop(lctx, op),
+                                  lower_expr(lctx, el),
+                                  lower_expr(lctx, er))
+            }
+            ExprKind::Field(ref el, ident) => {
+                hir::ExprField(lower_expr(lctx, el), respan(ident.span, ident.node.name))
+            }
+            ExprKind::TupField(ref el, ident) => {
+                hir::ExprTupField(lower_expr(lctx, el), ident)
+            }
+            ExprKind::Index(ref el, ref er) => {
+                hir::ExprIndex(lower_expr(lctx, el), lower_expr(lctx, er))
+            }
+            ExprKind::Range(ref e1, ref e2, lims) => {
+                fn make_struct(lctx: &LoweringContext,
+                               ast_expr: &Expr,
+                               path: &[&str],
+                               fields: &[(&str, &P<Expr>)]) -> P<hir::Expr> {
+                    let strs = std_path(lctx, &iter::once(&"ops")
+                                                    .chain(path)
+                                                    .map(|s| *s)
+                                                    .collect::<Vec<_>>());
+
+                    let structpath = path_global(ast_expr.span, strs);
+
+                    let hir_expr = if fields.len() == 0 {
+                        expr_path(lctx,
+                                  structpath,
+                                  ast_expr.attrs.clone())
+                    } else {
+                        expr_struct(lctx,
+                                    ast_expr.span,
+                                    structpath,
+                                    fields.into_iter().map(|&(s, e)| {
+                                        field(token::intern(s),
+                                              signal_block_expr(lctx,
+                                                                hir_vec![],
+                                                                lower_expr(lctx, &**e),
+                                                                e.span,
+                                                                hir::PopUnstableBlock,
+                                                                None),
+                                              ast_expr.span)
+                                    }).collect(),
+                                    None,
+                                    ast_expr.attrs.clone())
+                    };
+
+                    signal_block_expr(lctx,
+                                      hir_vec![],
+                                      hir_expr,
+                                      ast_expr.span,
+                                      hir::PushUnstableBlock,
+                                      None)
+                }
+
+                return cache_ids(lctx, e.id, |lctx| {
+                    use syntax::ast::RangeLimits::*;
+
+                    match (e1, e2, lims) {
+                        (&None,         &None,         HalfOpen) =>
+                            make_struct(lctx, e, &["RangeFull"],
+                                                 &[]),
+
+                        (&Some(ref e1), &None,         HalfOpen) =>
+                            make_struct(lctx, e, &["RangeFrom"],
+                                                 &[("start", e1)]),
+
+                        (&None,         &Some(ref e2), HalfOpen) =>
+                            make_struct(lctx, e, &["RangeTo"],
+                                                 &[("end", e2)]),
+
+                        (&Some(ref e1), &Some(ref e2), HalfOpen) =>
+                            make_struct(lctx, e, &["Range"],
+                                                 &[("start", e1), ("end", e2)]),
+
+                        (&None,         &Some(ref e2), Closed)   =>
+                            make_struct(lctx, e, &["RangeToInclusive"],
+                                                 &[("end", e2)]),
+
+                        (&Some(ref e1), &Some(ref e2), Closed)   =>
+                            make_struct(lctx, e, &["RangeInclusive", "NonEmpty"],
+                                                 &[("start", e1), ("end", e2)]),
+
+                        _ => panic!(lctx.diagnostic().span_fatal(e.span,
+                                                                 "inclusive range with no end"))
+                    }
+                });
+            }
+            ExprKind::Path(ref qself, ref path) => {
+                let hir_qself = qself.as_ref().map(|&QSelf { ref ty, position }| {
+                    hir::QSelf {
+                        ty: lower_ty(lctx, ty),
+                        position: position,
+                    }
+                });
+                hir::ExprPath(hir_qself, lower_path_full(lctx, path, qself.is_none()))
+            }
+            ExprKind::Break(opt_ident) => hir::ExprBreak(opt_ident.map(|sp_ident| {
+                respan(sp_ident.span, lower_ident(lctx, sp_ident.node))
+            })),
+            ExprKind::Again(opt_ident) => hir::ExprAgain(opt_ident.map(|sp_ident| {
+                respan(sp_ident.span, lower_ident(lctx, sp_ident.node))
+            })),
+            ExprKind::Ret(ref e) => hir::ExprRet(e.as_ref().map(|x| lower_expr(lctx, x))),
+            ExprKind::InlineAsm(InlineAsm {
+                    ref inputs,
+                    ref outputs,
+                    ref asm,
+                    asm_str_style,
+                    ref clobbers,
+                    volatile,
+                    alignstack,
+                    dialect,
+                    expn_id,
+                }) => hir::ExprInlineAsm(hir::InlineAsm {
+                inputs: inputs.iter().map(|&(ref c, _)| c.clone()).collect(),
+                outputs: outputs.iter()
+                                .map(|out| {
+                                    hir::InlineAsmOutput {
+                                        constraint: out.constraint.clone(),
+                                        is_rw: out.is_rw,
+                                        is_indirect: out.is_indirect,
+                                    }
+                                })
+                                .collect(),
+                asm: asm.clone(),
+                asm_str_style: asm_str_style,
+                clobbers: clobbers.clone().into(),
+                volatile: volatile,
+                alignstack: alignstack,
+                dialect: dialect,
+                expn_id: expn_id,
+            }, outputs.iter().map(|out| lower_expr(lctx, &out.expr)).collect(),
+               inputs.iter().map(|&(_, ref input)| lower_expr(lctx, input)).collect()),
+            ExprKind::Struct(ref path, ref fields, ref maybe_expr) => {
+                hir::ExprStruct(lower_path(lctx, path),
+                                fields.iter().map(|x| lower_field(lctx, x)).collect(),
+                                maybe_expr.as_ref().map(|x| lower_expr(lctx, x)))
+            }
+            ExprKind::Paren(ref ex) => {
+                // merge attributes into the inner expression.
+                return lower_expr(lctx, ex).map(|mut ex| {
+                    ex.attrs.update(|attrs| {
+                        attrs.prepend(e.attrs.clone())
+                    });
+                    ex
+                });
+            }
+
+            // Desugar ExprIfLet
+            // From: `if let <pat> = <sub_expr> <body> [<else_opt>]`
+            ExprKind::IfLet(ref pat, ref sub_expr, ref body, ref else_opt) => {
+                // to:
+                //
+                //   match <sub_expr> {
+                //     <pat> => <body>,
+                //     [_ if <else_opt_if_cond> => <else_opt_if_body>,]
+                //     _ => [<else_opt> | ()]
+                //   }
+
+                return cache_ids(lctx, e.id, |lctx| {
+                    // `<pat> => <body>`
+                    let pat_arm = {
+                        let body = lower_block(lctx, body);
+                        let body_expr = expr_block(lctx, body, None);
+                        arm(hir_vec![lower_pat(lctx, pat)], body_expr)
+                    };
+
+                    // `[_ if <else_opt_if_cond> => <else_opt_if_body>,]`
+                    let mut else_opt = else_opt.as_ref().map(|e| lower_expr(lctx, e));
+                    let else_if_arms = {
+                        let mut arms = vec![];
+                        loop {
+                            let else_opt_continue = else_opt.and_then(|els| {
+                                els.and_then(|els| {
+                                    match els.node {
+                                        // else if
+                                        hir::ExprIf(cond, then, else_opt) => {
+                                            let pat_under = pat_wild(lctx, e.span);
+                                            arms.push(hir::Arm {
+                                                attrs: hir_vec![],
+                                                pats: hir_vec![pat_under],
+                                                guard: Some(cond),
+                                                body: expr_block(lctx, then, None),
+                                            });
+                                            else_opt.map(|else_opt| (else_opt, true))
+                                        }
+                                        _ => Some((P(els), false)),
+                                    }
+                                })
+                            });
+                            match else_opt_continue {
+                                Some((e, true)) => {
+                                    else_opt = Some(e);
+                                }
+                                Some((e, false)) => {
+                                    else_opt = Some(e);
+                                    break;
+                                }
+                                None => {
+                                    else_opt = None;
+                                    break;
+                                }
+                            }
+                        }
+                        arms
+                    };
+
+                    let contains_else_clause = else_opt.is_some();
+
+                    // `_ => [<else_opt> | ()]`
+                    let else_arm = {
+                        let pat_under = pat_wild(lctx, e.span);
+                        let else_expr =
+                            else_opt.unwrap_or_else(
+                                || expr_tuple(lctx, e.span, hir_vec![], None));
+                        arm(hir_vec![pat_under], else_expr)
+                    };
+
+                    let mut arms = Vec::with_capacity(else_if_arms.len() + 2);
+                    arms.push(pat_arm);
+                    arms.extend(else_if_arms);
+                    arms.push(else_arm);
+
+                    let sub_expr = lower_expr(lctx, sub_expr);
+                    // add attributes to the outer returned expr node
+                    expr(lctx,
+                         e.span,
+                         hir::ExprMatch(sub_expr,
+                                        arms.into(),
+                                        hir::MatchSource::IfLetDesugar {
+                                            contains_else_clause: contains_else_clause,
+                                        }),
+                         e.attrs.clone())
+                });
+            }
+
+            // Desugar ExprWhileLet
+            // From: `[opt_ident]: while let <pat> = <sub_expr> <body>`
+            ExprKind::WhileLet(ref pat, ref sub_expr, ref body, opt_ident) => {
+                // to:
+                //
+                //   [opt_ident]: loop {
+                //     match <sub_expr> {
+                //       <pat> => <body>,
+                //       _ => break
+                //     }
+                //   }
+
+                return cache_ids(lctx, e.id, |lctx| {
+                    // `<pat> => <body>`
+                    let pat_arm = {
+                        let body = lower_block(lctx, body);
+                        let body_expr = expr_block(lctx, body, None);
+                        arm(hir_vec![lower_pat(lctx, pat)], body_expr)
+                    };
+
+                    // `_ => break`
+                    let break_arm = {
+                        let pat_under = pat_wild(lctx, e.span);
+                        let break_expr = expr_break(lctx, e.span, None);
+                        arm(hir_vec![pat_under], break_expr)
+                    };
+
+                    // `match <sub_expr> { ... }`
+                    let arms = hir_vec![pat_arm, break_arm];
+                    let sub_expr = lower_expr(lctx, sub_expr);
+                    let match_expr = expr(lctx,
+                                          e.span,
+                                          hir::ExprMatch(sub_expr,
+                                                         arms,
+                                                         hir::MatchSource::WhileLetDesugar),
+                                          None);
+
+                    // `[opt_ident]: loop { ... }`
+                    let loop_block = block_expr(lctx, match_expr);
+                    let loop_expr = hir::ExprLoop(loop_block,
+                                                  opt_ident.map(|ident| lower_ident(lctx, ident)));
+                    // add attributes to the outer returned expr node
+                    expr(lctx, e.span, loop_expr, e.attrs.clone())
+                });
+            }
+
+            // Desugar ExprForLoop
+            // From: `[opt_ident]: for <pat> in <head> <body>`
+            ExprKind::ForLoop(ref pat, ref head, ref body, opt_ident) => {
+                // to:
+                //
+                //   {
+                //     let result = match ::std::iter::IntoIterator::into_iter(<head>) {
+                //       mut iter => {
+                //         [opt_ident]: loop {
+                //           match ::std::iter::Iterator::next(&mut iter) {
+                //             ::std::option::Option::Some(<pat>) => <body>,
+                //             ::std::option::Option::None => break
+                //           }
+                //         }
+                //       }
+                //     };
+                //     result
+                //   }
+
+                return cache_ids(lctx, e.id, |lctx| {
+                    // expand <head>
+                    let head = lower_expr(lctx, head);
+
+                    let iter = lctx.str_to_ident("iter");
+
+                    // `::std::option::Option::Some(<pat>) => <body>`
+                    let pat_arm = {
+                        let body_block = lower_block(lctx, body);
+                        let body_span = body_block.span;
+                        let body_expr = P(hir::Expr {
+                            id: lctx.next_id(),
+                            node: hir::ExprBlock(body_block),
+                            span: body_span,
+                            attrs: None,
+                        });
+                        let pat = lower_pat(lctx, pat);
+                        let some_pat = pat_some(lctx, e.span, pat);
+
+                        arm(hir_vec![some_pat], body_expr)
+                    };
+
+                    // `::std::option::Option::None => break`
+                    let break_arm = {
+                        let break_expr = expr_break(lctx, e.span, None);
+
+                        arm(hir_vec![pat_none(lctx, e.span)], break_expr)
+                    };
+
+                    // `match ::std::iter::Iterator::next(&mut iter) { ... }`
+                    let match_expr = {
+                        let next_path = {
+                            let strs = std_path(lctx, &["iter", "Iterator", "next"]);
+
+                            path_global(e.span, strs)
+                        };
+                        let iter = expr_ident(lctx, e.span, iter, None);
+                        let ref_mut_iter = expr_mut_addr_of(lctx, e.span, iter, None);
+                        let next_path = expr_path(lctx, next_path, None);
+                        let next_expr = expr_call(lctx,
+                                                  e.span,
+                                                  next_path,
+                                                  hir_vec![ref_mut_iter],
+                                                  None);
+                        let arms = hir_vec![pat_arm, break_arm];
+
+                        expr(lctx,
+                             e.span,
+                             hir::ExprMatch(next_expr, arms, hir::MatchSource::ForLoopDesugar),
+                             None)
+                    };
+
+                    // `[opt_ident]: loop { ... }`
+                    let loop_block = block_expr(lctx, match_expr);
+                    let loop_expr = hir::ExprLoop(loop_block,
+                                                  opt_ident.map(|ident| lower_ident(lctx, ident)));
+                    let loop_expr = expr(lctx, e.span, loop_expr, None);
+
+                    // `mut iter => { ... }`
+                    let iter_arm = {
+                        let iter_pat = pat_ident_binding_mode(lctx,
+                                                              e.span,
+                                                              iter,
+                                                              hir::BindByValue(hir::MutMutable));
+                        arm(hir_vec![iter_pat], loop_expr)
+                    };
+
+                    // `match ::std::iter::IntoIterator::into_iter(<head>) { ... }`
+                    let into_iter_expr = {
+                        let into_iter_path = {
+                            let strs = std_path(lctx, &["iter", "IntoIterator", "into_iter"]);
+
+                            path_global(e.span, strs)
+                        };
+
+                        let into_iter = expr_path(lctx, into_iter_path, None);
+                        expr_call(lctx, e.span, into_iter, hir_vec![head], None)
+                    };
+
+                    let match_expr = expr_match(lctx,
+                                                e.span,
+                                                into_iter_expr,
+                                                hir_vec![iter_arm],
+                                                hir::MatchSource::ForLoopDesugar,
+                                                None);
+
+                    // `{ let _result = ...; _result }`
+                    // underscore prevents an unused_variables lint if the head diverges
+                    let result_ident = lctx.str_to_ident("_result");
+                    let let_stmt = stmt_let(lctx, e.span, false, result_ident, match_expr, None);
+                    let result = expr_ident(lctx, e.span, result_ident, None);
+                    let block = block_all(lctx, e.span, hir_vec![let_stmt], Some(result));
+                    // add the attributes to the outer returned expr node
+                    expr_block(lctx, block, e.attrs.clone())
+                });
+            }
+
+            // Desugar ExprKind::Try
+            // From: `<expr>?`
+            ExprKind::Try(ref sub_expr) => {
+                // to:
+                //
+                // {
+                //     match <expr> {
+                //         Ok(val) => val,
+                //         Err(err) => {
+                //             return Err(From::from(err))
+                //         }
+                //     }
+                // }
+
+                return cache_ids(lctx, e.id, |lctx| {
+                    // expand <expr>
+                    let sub_expr = lower_expr(lctx, sub_expr);
+
+                    // Ok(val) => val
+                    let ok_arm = {
+                        let val_ident = lctx.str_to_ident("val");
+                        let val_pat = pat_ident(lctx, e.span, val_ident);
+                        let val_expr = expr_ident(lctx, e.span, val_ident, None);
+                        let ok_pat = pat_ok(lctx, e.span, val_pat);
+
+                        arm(hir_vec![ok_pat], val_expr)
+                    };
+
+                    // Err(err) => return Err(From::from(err))
+                    let err_arm = {
+                        let err_ident = lctx.str_to_ident("err");
+                        let from_expr = {
+                            let path = std_path(lctx, &["convert", "From", "from"]);
+                            let path = path_global(e.span, path);
+                            let from = expr_path(lctx, path, None);
+                            let err_expr = expr_ident(lctx, e.span, err_ident, None);
+
+                            expr_call(lctx, e.span, from, hir_vec![err_expr], None)
+                        };
+                        let err_expr = {
+                            let path = std_path(lctx, &["result", "Result", "Err"]);
+                            let path = path_global(e.span, path);
+                            let err_ctor = expr_path(lctx, path, None);
+                            expr_call(lctx, e.span, err_ctor, hir_vec![from_expr], None)
+                        };
+                        let err_pat = pat_err(lctx, e.span, pat_ident(lctx, e.span, err_ident));
+                        let ret_expr = expr(lctx, e.span,
+                                            hir::Expr_::ExprRet(Some(err_expr)), None);
+
+                        arm(hir_vec![err_pat], ret_expr)
+                    };
+
+                    expr_match(lctx, e.span, sub_expr, hir_vec![err_arm, ok_arm],
+                               hir::MatchSource::TryDesugar, None)
+                })
+            }
+
+            ExprKind::Mac(_) => panic!("Shouldn't exist here"),
+        },
+        span: e.span,
+        attrs: e.attrs.clone(),
+    })
+}
+
+pub fn lower_stmt(lctx: &LoweringContext, s: &Stmt) -> hir::Stmt {
+    match s.node {
+        StmtKind::Decl(ref d, id) => {
+            Spanned {
+                node: hir::StmtDecl(lower_decl(lctx, d), id),
+                span: s.span,
+            }
+        }
+        StmtKind::Expr(ref e, id) => {
+            Spanned {
+                node: hir::StmtExpr(lower_expr(lctx, e), id),
+                span: s.span,
+            }
+        }
+        StmtKind::Semi(ref e, id) => {
+            Spanned {
+                node: hir::StmtSemi(lower_expr(lctx, e), id),
+                span: s.span,
+            }
+        }
+        StmtKind::Mac(..) => panic!("Shouldn't exist here"),
+    }
+}
+
+pub fn lower_capture_clause(_lctx: &LoweringContext, c: CaptureBy) -> hir::CaptureClause {
+    match c {
+        CaptureBy::Value => hir::CaptureByValue,
+        CaptureBy::Ref => hir::CaptureByRef,
+    }
+}
+
+pub fn lower_visibility(lctx: &LoweringContext, v: &Visibility) -> hir::Visibility {
+    match *v {
+        Visibility::Public => hir::Public,
+        Visibility::Inherited => hir::Inherited,
+        _ => panic!(lctx.diagnostic().fatal("pub(restricted) is not implemented yet!"))
+    }
+}
+
+pub fn lower_defaultness(_lctx: &LoweringContext, d: Defaultness) -> hir::Defaultness {
+    match d {
+        Defaultness::Default => hir::Defaultness::Default,
+        Defaultness::Final => hir::Defaultness::Final,
+    }
+}
+
+pub fn lower_block_check_mode(lctx: &LoweringContext, b: &BlockCheckMode) -> hir::BlockCheckMode {
+    match *b {
+        BlockCheckMode::Default => hir::DefaultBlock,
+        BlockCheckMode::Unsafe(u) => hir::UnsafeBlock(lower_unsafe_source(lctx, u)),
+    }
+}
+
+pub fn lower_binding_mode(lctx: &LoweringContext, b: &BindingMode) -> hir::BindingMode {
+    match *b {
+        BindingMode::ByRef(m) => hir::BindByRef(lower_mutability(lctx, m)),
+        BindingMode::ByValue(m) => hir::BindByValue(lower_mutability(lctx, m)),
+    }
+}
+
+pub fn lower_unsafe_source(_lctx: &LoweringContext, u: UnsafeSource) -> hir::UnsafeSource {
+    match u {
+        CompilerGenerated => hir::CompilerGenerated,
+        UserProvided => hir::UserProvided,
+    }
+}
+
+pub fn lower_impl_polarity(_lctx: &LoweringContext, i: ImplPolarity) -> hir::ImplPolarity {
+    match i {
+        ImplPolarity::Positive => hir::ImplPolarity::Positive,
+        ImplPolarity::Negative => hir::ImplPolarity::Negative,
+    }
+}
+
+pub fn lower_trait_bound_modifier(_lctx: &LoweringContext,
+                                  f: TraitBoundModifier)
+                                  -> hir::TraitBoundModifier {
+    match f {
+        TraitBoundModifier::None => hir::TraitBoundModifier::None,
+        TraitBoundModifier::Maybe => hir::TraitBoundModifier::Maybe,
+    }
+}
+
+// Helper methods for building HIR.
+
+fn arm(pats: hir::HirVec<P<hir::Pat>>, expr: P<hir::Expr>) -> hir::Arm {
+    hir::Arm {
+        attrs: hir_vec![],
+        pats: pats,
+        guard: None,
+        body: expr,
+    }
+}
+
+fn field(name: Name, expr: P<hir::Expr>, span: Span) -> hir::Field {
+    hir::Field {
+        name: Spanned {
+            node: name,
+            span: span,
+        },
+        span: span,
+        expr: expr,
+    }
+}
+
+fn expr_break(lctx: &LoweringContext, span: Span,
+              attrs: ThinAttributes) -> P<hir::Expr> {
+    expr(lctx, span, hir::ExprBreak(None), attrs)
+}
+
+fn expr_call(lctx: &LoweringContext,
+             span: Span,
+             e: P<hir::Expr>,
+             args: hir::HirVec<P<hir::Expr>>,
+             attrs: ThinAttributes)
+             -> P<hir::Expr> {
+    expr(lctx, span, hir::ExprCall(e, args), attrs)
+}
+
+fn expr_ident(lctx: &LoweringContext, span: Span, id: hir::Ident,
+              attrs: ThinAttributes) -> P<hir::Expr> {
+    expr_path(lctx, path_ident(span, id), attrs)
+}
+
+fn expr_mut_addr_of(lctx: &LoweringContext, span: Span, e: P<hir::Expr>,
+                    attrs: ThinAttributes) -> P<hir::Expr> {
+    expr(lctx, span, hir::ExprAddrOf(hir::MutMutable, e), attrs)
+}
+
+fn expr_path(lctx: &LoweringContext, path: hir::Path,
+             attrs: ThinAttributes) -> P<hir::Expr> {
+    expr(lctx, path.span, hir::ExprPath(None, path), attrs)
+}
+
+fn expr_match(lctx: &LoweringContext,
+              span: Span,
+              arg: P<hir::Expr>,
+              arms: hir::HirVec<hir::Arm>,
+              source: hir::MatchSource,
+              attrs: ThinAttributes)
+              -> P<hir::Expr> {
+    expr(lctx, span, hir::ExprMatch(arg, arms, source), attrs)
+}
+
+fn expr_block(lctx: &LoweringContext, b: P<hir::Block>,
+              attrs: ThinAttributes) -> P<hir::Expr> {
+    expr(lctx, b.span, hir::ExprBlock(b), attrs)
+}
+
+fn expr_tuple(lctx: &LoweringContext, sp: Span, exprs: hir::HirVec<P<hir::Expr>>,
+              attrs: ThinAttributes) -> P<hir::Expr> {
+    expr(lctx, sp, hir::ExprTup(exprs), attrs)
+}
+
+fn expr_struct(lctx: &LoweringContext,
+               sp: Span,
+               path: hir::Path,
+               fields: hir::HirVec<hir::Field>,
+               e: Option<P<hir::Expr>>,
+               attrs: ThinAttributes) -> P<hir::Expr> {
+    expr(lctx, sp, hir::ExprStruct(path, fields, e), attrs)
+}
+
+fn expr(lctx: &LoweringContext, span: Span, node: hir::Expr_,
+        attrs: ThinAttributes) -> P<hir::Expr> {
+    P(hir::Expr {
+        id: lctx.next_id(),
+        node: node,
+        span: span,
+        attrs: attrs,
+    })
+}
+
+fn stmt_let(lctx: &LoweringContext,
+            sp: Span,
+            mutbl: bool,
+            ident: hir::Ident,
+            ex: P<hir::Expr>,
+            attrs: ThinAttributes)
+            -> hir::Stmt {
+    let pat = if mutbl {
+        pat_ident_binding_mode(lctx, sp, ident, hir::BindByValue(hir::MutMutable))
+    } else {
+        pat_ident(lctx, sp, ident)
+    };
+    let local = P(hir::Local {
+        pat: pat,
+        ty: None,
+        init: Some(ex),
+        id: lctx.next_id(),
+        span: sp,
+        attrs: attrs,
+    });
+    let decl = respan(sp, hir::DeclLocal(local));
+    respan(sp, hir::StmtDecl(P(decl), lctx.next_id()))
+}
+
+fn block_expr(lctx: &LoweringContext, expr: P<hir::Expr>) -> P<hir::Block> {
+    block_all(lctx, expr.span, hir::HirVec::new(), Some(expr))
+}
+
+fn block_all(lctx: &LoweringContext,
+             span: Span,
+             stmts: hir::HirVec<hir::Stmt>,
+             expr: Option<P<hir::Expr>>)
+             -> P<hir::Block> {
+    P(hir::Block {
+        stmts: stmts,
+        expr: expr,
+        id: lctx.next_id(),
+        rules: hir::DefaultBlock,
+        span: span,
+    })
+}
+
+fn pat_ok(lctx: &LoweringContext, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
+    let ok = std_path(lctx, &["result", "Result", "Ok"]);
+    let path = path_global(span, ok);
+    pat_enum(lctx, span, path, hir_vec![pat])
+}
+
+fn pat_err(lctx: &LoweringContext, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
+    let err = std_path(lctx, &["result", "Result", "Err"]);
+    let path = path_global(span, err);
+    pat_enum(lctx, span, path, hir_vec![pat])
+}
+
+fn pat_some(lctx: &LoweringContext, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
+    let some = std_path(lctx, &["option", "Option", "Some"]);
+    let path = path_global(span, some);
+    pat_enum(lctx, span, path, hir_vec![pat])
+}
+
+fn pat_none(lctx: &LoweringContext, span: Span) -> P<hir::Pat> {
+    let none = std_path(lctx, &["option", "Option", "None"]);
+    let path = path_global(span, none);
+    pat_enum(lctx, span, path, hir_vec![])
+}
+
+fn pat_enum(lctx: &LoweringContext,
+            span: Span,
+            path: hir::Path,
+            subpats: hir::HirVec<P<hir::Pat>>)
+            -> P<hir::Pat> {
+    let pt = if subpats.is_empty() {
+        hir::PatKind::Path(path)
+    } else {
+        hir::PatKind::TupleStruct(path, Some(subpats))
+    };
+    pat(lctx, span, pt)
+}
+
+fn pat_ident(lctx: &LoweringContext, span: Span, ident: hir::Ident) -> P<hir::Pat> {
+    pat_ident_binding_mode(lctx, span, ident, hir::BindByValue(hir::MutImmutable))
+}
+
+fn pat_ident_binding_mode(lctx: &LoweringContext,
+                          span: Span,
+                          ident: hir::Ident,
+                          bm: hir::BindingMode)
+                          -> P<hir::Pat> {
+    let pat_ident = hir::PatKind::Ident(bm,
+                                  Spanned {
+                                      span: span,
+                                      node: ident,
+                                  },
+                                  None);
+    pat(lctx, span, pat_ident)
+}
+
+fn pat_wild(lctx: &LoweringContext, span: Span) -> P<hir::Pat> {
+    pat(lctx, span, hir::PatKind::Wild)
+}
+
+fn pat(lctx: &LoweringContext, span: Span, pat: hir::PatKind) -> P<hir::Pat> {
+    P(hir::Pat {
+        id: lctx.next_id(),
+        node: pat,
+        span: span,
+    })
+}
+
+fn path_ident(span: Span, id: hir::Ident) -> hir::Path {
+    path(span, vec![id])
+}
+
+fn path(span: Span, strs: Vec<hir::Ident>) -> hir::Path {
+    path_all(span, false, strs, hir::HirVec::new(), hir::HirVec::new(), hir::HirVec::new())
+}
+
+fn path_global(span: Span, strs: Vec<hir::Ident>) -> hir::Path {
+    path_all(span, true, strs, hir::HirVec::new(), hir::HirVec::new(), hir::HirVec::new())
+}
+
+fn path_all(sp: Span,
+            global: bool,
+            mut idents: Vec<hir::Ident>,
+            lifetimes: hir::HirVec<hir::Lifetime>,
+            types: hir::HirVec<P<hir::Ty>>,
+            bindings: hir::HirVec<hir::TypeBinding>)
+            -> hir::Path {
+    let last_identifier = idents.pop().unwrap();
+    let mut segments: Vec<hir::PathSegment> = idents.into_iter()
+                                                    .map(|ident| {
+                                                        hir::PathSegment {
+                                                            identifier: ident,
+                                                            parameters: hir::PathParameters::none(),
+                                                        }
+                                                    })
+                                                    .collect();
+    segments.push(hir::PathSegment {
+        identifier: last_identifier,
+        parameters: hir::AngleBracketedParameters(hir::AngleBracketedParameterData {
+            lifetimes: lifetimes,
+            types: types,
+            bindings: bindings,
+        }),
+    });
+    hir::Path {
+        span: sp,
+        global: global,
+        segments: segments.into(),
+    }
+}
+
+fn std_path(lctx: &LoweringContext, components: &[&str]) -> Vec<hir::Ident> {
+    let mut v = Vec::new();
+    if let Some(s) = lctx.crate_root {
+        v.push(hir::Ident::from_name(token::intern(s)));
+    }
+    v.extend(components.iter().map(|s| hir::Ident::from_name(token::intern(s))));
+    return v;
+}
+
+// Given suffix ["b","c","d"], returns path `::std::b::c::d` when
+// `fld.cx.use_std`, and `::core::b::c::d` otherwise.
+fn core_path(lctx: &LoweringContext, span: Span, components: &[&str]) -> hir::Path {
+    let idents = std_path(lctx, components);
+    path_global(span, idents)
+}
+
+fn signal_block_expr(lctx: &LoweringContext,
+                     stmts: hir::HirVec<hir::Stmt>,
+                     expr: P<hir::Expr>,
+                     span: Span,
+                     rule: hir::BlockCheckMode,
+                     attrs: ThinAttributes)
+                     -> P<hir::Expr> {
+    let id = lctx.next_id();
+    expr_block(lctx,
+               P(hir::Block {
+                   rules: rule,
+                   span: span,
+                   id: id,
+                   stmts: stmts,
+                   expr: Some(expr),
+               }),
+               attrs)
+}
+
+
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use syntax::ast::{self, NodeId, NodeIdAssigner};
+    use syntax::{parse, codemap};
+    use syntax::fold::Folder;
+    use std::cell::Cell;
+
+    struct MockAssigner {
+        next_id: Cell<NodeId>,
+    }
+
+    impl MockAssigner {
+        fn new() -> MockAssigner {
+            MockAssigner { next_id: Cell::new(0) }
+        }
+    }
+
+    trait FakeExtCtxt {
+        fn call_site(&self) -> codemap::Span;
+        fn cfg(&self) -> ast::CrateConfig;
+        fn ident_of(&self, st: &str) -> ast::Ident;
+        fn name_of(&self, st: &str) -> ast::Name;
+        fn parse_sess(&self) -> &parse::ParseSess;
+    }
+
+    impl FakeExtCtxt for parse::ParseSess {
+        fn call_site(&self) -> codemap::Span {
+            codemap::Span {
+                lo: codemap::BytePos(0),
+                hi: codemap::BytePos(0),
+                expn_id: codemap::NO_EXPANSION,
+            }
+        }
+        fn cfg(&self) -> ast::CrateConfig {
+            Vec::new()
+        }
+        fn ident_of(&self, st: &str) -> ast::Ident {
+            parse::token::str_to_ident(st)
+        }
+        fn name_of(&self, st: &str) -> ast::Name {
+            parse::token::intern(st)
+        }
+        fn parse_sess(&self) -> &parse::ParseSess {
+            self
+        }
+    }
+
+    impl NodeIdAssigner for MockAssigner {
+        fn next_node_id(&self) -> NodeId {
+            let result = self.next_id.get();
+            self.next_id.set(result + 1);
+            result
+        }
+
+        fn peek_node_id(&self) -> NodeId {
+            self.next_id.get()
+        }
+    }
+
+    impl Folder for MockAssigner {
+        fn new_id(&mut self, old_id: NodeId) -> NodeId {
+            assert_eq!(old_id, ast::DUMMY_NODE_ID);
+            self.next_node_id()
+        }
+    }
+
+    #[test]
+    fn test_preserves_ids() {
+        let cx = parse::ParseSess::new();
+        let mut assigner = MockAssigner::new();
+
+        let ast_if_let = quote_expr!(&cx,
+                                     if let Some(foo) = baz {
+                                         bar(foo);
+                                     });
+        let ast_if_let = assigner.fold_expr(ast_if_let);
+        let ast_while_let = quote_expr!(&cx,
+                                        while let Some(foo) = baz {
+                                            bar(foo);
+                                        });
+        let ast_while_let = assigner.fold_expr(ast_while_let);
+        let ast_for = quote_expr!(&cx,
+                                  for i in 0..10 {
+                                      for j in 0..10 {
+                                          foo(i, j);
+                                      }
+                                  });
+        let ast_for = assigner.fold_expr(ast_for);
+        let ast_in = quote_expr!(&cx, in HEAP { foo() });
+        let ast_in = assigner.fold_expr(ast_in);
+
+        let lctx = LoweringContext::new(&assigner, None);
+        let hir1 = lower_expr(&lctx, &ast_if_let);
+        let hir2 = lower_expr(&lctx, &ast_if_let);
+        assert!(hir1 == hir2);
+
+        let hir1 = lower_expr(&lctx, &ast_while_let);
+        let hir2 = lower_expr(&lctx, &ast_while_let);
+        assert!(hir1 == hir2);
+
+        let hir1 = lower_expr(&lctx, &ast_for);
+        let hir2 = lower_expr(&lctx, &ast_for);
+        assert!(hir1 == hir2);
+
+        let hir1 = lower_expr(&lctx, &ast_in);
+        let hir2 = lower_expr(&lctx, &ast_in);
+        assert!(hir1 == hir2);
+    }
+}
diff --git a/src/librustc/hir/map/blocks.rs b/src/librustc/hir/map/blocks.rs
new file mode 100644 (file)
index 0000000..8c62622
--- /dev/null
@@ -0,0 +1,264 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This module provides a simplified abstraction for working with
+//! code blocks identified by their integer node-id.  In particular,
+//! it captures a common set of attributes that all "function-like
+//! things" (represented by `FnLike` instances) share.  For example,
+//! all `FnLike` instances have a type signature (be it explicit or
+//! inferred).  And all `FnLike` instances have a body, i.e. the code
+//! that is run when the function-like thing it represents is invoked.
+//!
+//! With the above abstraction in place, one can treat the program
+//! text as a collection of blocks of code (and most such blocks are
+//! nested within a uniquely determined `FnLike`), and users can ask
+//! for the `Code` associated with a particular NodeId.
+
+pub use self::Code::*;
+
+use hir::map::{self, Node};
+use syntax::abi;
+use hir::{Block, FnDecl};
+use syntax::ast::{Attribute, Name, NodeId};
+use syntax::attr::ThinAttributesExt;
+use hir as ast;
+use syntax::codemap::Span;
+use hir::intravisit::FnKind;
+
+/// An FnLikeNode is a Node that is like a fn, in that it has a decl
+/// and a body (as well as a NodeId, a span, etc).
+///
+/// More specifically, it is one of either:
+///   - A function item,
+///   - A closure expr (i.e. an ExprClosure), or
+///   - The default implementation for a trait method.
+///
+/// To construct one, use the `Code::from_node` function.
+#[derive(Copy, Clone)]
+pub struct FnLikeNode<'a> { node: map::Node<'a> }
+
+/// MaybeFnLike wraps a method that indicates if an object
+/// corresponds to some FnLikeNode.
+pub trait MaybeFnLike { fn is_fn_like(&self) -> bool; }
+
+/// Components shared by fn-like things (fn items, methods, closures).
+pub struct FnParts<'a> {
+    pub decl: &'a FnDecl,
+    pub body: &'a Block,
+    pub kind: FnKind<'a>,
+    pub span: Span,
+    pub id:   NodeId,
+}
+
+impl MaybeFnLike for ast::Item {
+    fn is_fn_like(&self) -> bool {
+        match self.node { ast::ItemFn(..) => true, _ => false, }
+    }
+}
+
+impl MaybeFnLike for ast::TraitItem {
+    fn is_fn_like(&self) -> bool {
+        match self.node { ast::MethodTraitItem(_, Some(_)) => true, _ => false, }
+    }
+}
+
+impl MaybeFnLike for ast::Expr {
+    fn is_fn_like(&self) -> bool {
+        match self.node {
+            ast::ExprClosure(..) => true,
+            _ => false,
+        }
+    }
+}
+
+/// Carries either an FnLikeNode or a Block, as these are the two
+/// constructs that correspond to "code" (as in, something from which
+/// we can construct a control-flow graph).
+#[derive(Copy, Clone)]
+pub enum Code<'a> {
+    FnLikeCode(FnLikeNode<'a>),
+    BlockCode(&'a Block),
+}
+
+impl<'a> Code<'a> {
+    pub fn id(&self) -> NodeId {
+        match *self {
+            FnLikeCode(node) => node.id(),
+            BlockCode(block) => block.id,
+        }
+    }
+
+    /// Attempts to construct a Code from presumed FnLike or Block node input.
+    pub fn from_node(node: Node) -> Option<Code> {
+        if let map::NodeBlock(block) = node {
+            Some(BlockCode(block))
+        } else {
+            FnLikeNode::from_node(node).map(|fn_like| FnLikeCode(fn_like))
+        }
+    }
+}
+
+/// These are all the components one can extract from a fn item for
+/// use when implementing FnLikeNode operations.
+struct ItemFnParts<'a> {
+    name:     Name,
+    decl:     &'a ast::FnDecl,
+    unsafety: ast::Unsafety,
+    constness: ast::Constness,
+    abi:      abi::Abi,
+    vis:      &'a ast::Visibility,
+    generics: &'a ast::Generics,
+    body:     &'a Block,
+    id:       NodeId,
+    span:     Span,
+    attrs:    &'a [Attribute],
+}
+
+/// These are all the components one can extract from a closure expr
+/// for use when implementing FnLikeNode operations.
+struct ClosureParts<'a> {
+    decl: &'a FnDecl,
+    body: &'a Block,
+    id: NodeId,
+    span: Span,
+    attrs: &'a [Attribute],
+}
+
+impl<'a> ClosureParts<'a> {
+    fn new(d: &'a FnDecl, b: &'a Block, id: NodeId, s: Span, attrs: &'a [Attribute]) -> Self {
+        ClosureParts { decl: d, body: b, id: id, span: s, attrs: attrs }
+    }
+}
+
+impl<'a> FnLikeNode<'a> {
+    /// Attempts to construct a FnLikeNode from presumed FnLike node input.
+    pub fn from_node(node: Node) -> Option<FnLikeNode> {
+        let fn_like = match node {
+            map::NodeItem(item) => item.is_fn_like(),
+            map::NodeTraitItem(tm) => tm.is_fn_like(),
+            map::NodeImplItem(_) => true,
+            map::NodeExpr(e) => e.is_fn_like(),
+            _ => false
+        };
+        if fn_like {
+            Some(FnLikeNode {
+                node: node
+            })
+        } else {
+            None
+        }
+    }
+
+    pub fn to_fn_parts(self) -> FnParts<'a> {
+        FnParts {
+            decl: self.decl(),
+            body: self.body(),
+            kind: self.kind(),
+            span: self.span(),
+            id:   self.id(),
+        }
+    }
+
+    pub fn body(self) -> &'a Block {
+        self.handle(|i: ItemFnParts<'a>|  &*i.body,
+                    |_, _, _: &'a ast::MethodSig, _, body: &'a ast::Block, _, _|  body,
+                    |c: ClosureParts<'a>| c.body)
+    }
+
+    pub fn decl(self) -> &'a FnDecl {
+        self.handle(|i: ItemFnParts<'a>|  &*i.decl,
+                    |_, _, sig: &'a ast::MethodSig, _, _, _, _|  &sig.decl,
+                    |c: ClosureParts<'a>| c.decl)
+    }
+
+    pub fn span(self) -> Span {
+        self.handle(|i: ItemFnParts|     i.span,
+                    |_, _, _: &'a ast::MethodSig, _, _, span, _| span,
+                    |c: ClosureParts|    c.span)
+    }
+
+    pub fn id(self) -> NodeId {
+        self.handle(|i: ItemFnParts|     i.id,
+                    |id, _, _: &'a ast::MethodSig, _, _, _, _| id,
+                    |c: ClosureParts|    c.id)
+    }
+
+    pub fn kind(self) -> FnKind<'a> {
+        let item = |p: ItemFnParts<'a>| -> FnKind<'a> {
+            FnKind::ItemFn(p.name, p.generics, p.unsafety, p.constness, p.abi, p.vis, p.attrs)
+        };
+        let closure = |c: ClosureParts<'a>| {
+            FnKind::Closure(c.attrs)
+        };
+        let method = |_, name: Name, sig: &'a ast::MethodSig, vis, _, _, attrs| {
+            FnKind::Method(name, sig, vis, attrs)
+        };
+        self.handle(item, method, closure)
+    }
+
+    fn handle<A, I, M, C>(self, item_fn: I, method: M, closure: C) -> A where
+        I: FnOnce(ItemFnParts<'a>) -> A,
+        M: FnOnce(NodeId,
+                  Name,
+                  &'a ast::MethodSig,
+                  Option<&'a ast::Visibility>,
+                  &'a ast::Block,
+                  Span,
+                  &'a [Attribute])
+                  -> A,
+        C: FnOnce(ClosureParts<'a>) -> A,
+    {
+        match self.node {
+            map::NodeItem(i) => match i.node {
+                ast::ItemFn(ref decl, unsafety, constness, abi, ref generics, ref block) =>
+                    item_fn(ItemFnParts {
+                        id: i.id,
+                        name: i.name,
+                        decl: &decl,
+                        unsafety: unsafety,
+                        body: &block,
+                        generics: generics,
+                        abi: abi,
+                        vis: &i.vis,
+                        constness: constness,
+                        span: i.span,
+                        attrs: &i.attrs,
+                    }),
+                _ => bug!("item FnLikeNode that is not fn-like"),
+            },
+            map::NodeTraitItem(ti) => match ti.node {
+                ast::MethodTraitItem(ref sig, Some(ref body)) => {
+                    method(ti.id, ti.name, sig, None, body, ti.span, &ti.attrs)
+                }
+                _ => bug!("trait method FnLikeNode that is not fn-like"),
+            },
+            map::NodeImplItem(ii) => {
+                match ii.node {
+                    ast::ImplItemKind::Method(ref sig, ref body) => {
+                        method(ii.id, ii.name, sig, Some(&ii.vis), body, ii.span, &ii.attrs)
+                    }
+                    _ => {
+                        bug!("impl method FnLikeNode that is not fn-like")
+                    }
+                }
+            }
+            map::NodeExpr(e) => match e.node {
+                ast::ExprClosure(_, ref decl, ref block) =>
+                    closure(ClosureParts::new(&decl,
+                                              &block,
+                                              e.id,
+                                              e.span,
+                                              e.attrs.as_attr_slice())),
+                _ => bug!("expr FnLikeNode that is not fn-like"),
+            },
+            _ => bug!("other FnLikeNode that is not fn-like"),
+        }
+    }
+}
diff --git a/src/librustc/hir/map/collector.rs b/src/librustc/hir/map/collector.rs
new file mode 100644 (file)
index 0000000..94fa393
--- /dev/null
@@ -0,0 +1,342 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::*;
+use super::MapEntry::*;
+
+use hir::*;
+use hir::intravisit::Visitor;
+use hir::def_id::{CRATE_DEF_INDEX, DefId, DefIndex};
+use middle::cstore::InlinedItem;
+use std::iter::repeat;
+use syntax::ast::{NodeId, CRATE_NODE_ID, DUMMY_NODE_ID};
+use syntax::codemap::Span;
+
+/// A Visitor that walks over an AST and collects Node's into an AST
+/// Map.
+pub struct NodeCollector<'ast> {
+    pub krate: &'ast Crate,
+    pub map: Vec<MapEntry<'ast>>,
+    pub definitions: Definitions,
+    pub parent_node: NodeId,
+}
+
+impl<'ast> NodeCollector<'ast> {
+    pub fn root(krate: &'ast Crate) -> NodeCollector<'ast> {
+        let mut collector = NodeCollector {
+            krate: krate,
+            map: vec![],
+            definitions: Definitions::new(),
+            parent_node: CRATE_NODE_ID,
+        };
+        collector.insert_entry(CRATE_NODE_ID, RootCrate);
+
+        let result = collector.create_def_with_parent(None, CRATE_NODE_ID, DefPathData::CrateRoot);
+        assert_eq!(result, CRATE_DEF_INDEX);
+
+        collector.create_def_with_parent(Some(CRATE_DEF_INDEX), DUMMY_NODE_ID, DefPathData::Misc);
+
+        collector
+    }
+
+    pub fn extend(krate: &'ast Crate,
+                  parent: &'ast InlinedItem,
+                  parent_node: NodeId,
+                  parent_def_path: DefPath,
+                  parent_def_id: DefId,
+                  map: Vec<MapEntry<'ast>>,
+                  definitions: Definitions)
+                  -> NodeCollector<'ast> {
+        let mut collector = NodeCollector {
+            krate: krate,
+            map: map,
+            parent_node: parent_node,
+            definitions: definitions,
+        };
+
+        assert_eq!(parent_def_path.krate, parent_def_id.krate);
+        let root_path = Box::new(InlinedRootPath {
+            data: parent_def_path.data,
+            def_id: parent_def_id,
+        });
+
+        collector.insert_entry(parent_node, RootInlinedParent(parent));
+        collector.create_def(parent_node, DefPathData::InlinedRoot(root_path));
+
+        collector
+    }
+
+    fn parent_def(&self) -> Option<DefIndex> {
+        let mut parent_node = Some(self.parent_node);
+        while let Some(p) = parent_node {
+            if let Some(q) = self.definitions.opt_def_index(p) {
+                return Some(q);
+            }
+            parent_node = self.map[p as usize].parent_node();
+        }
+        None
+    }
+
+    fn create_def(&mut self, node_id: NodeId, data: DefPathData) -> DefIndex {
+        let parent_def = self.parent_def();
+        debug!("create_def(node_id={:?}, data={:?}, parent_def={:?})", node_id, data, parent_def);
+        self.definitions.create_def_with_parent(parent_def, node_id, data)
+    }
+
+    fn create_def_with_parent(&mut self,
+                              parent: Option<DefIndex>,
+                              node_id: NodeId,
+                              data: DefPathData)
+                              -> DefIndex {
+        self.definitions.create_def_with_parent(parent, node_id, data)
+    }
+
+    fn insert_entry(&mut self, id: NodeId, entry: MapEntry<'ast>) {
+        debug!("ast_map: {:?} => {:?}", id, entry);
+        let len = self.map.len();
+        if id as usize >= len {
+            self.map.extend(repeat(NotPresent).take(id as usize - len + 1));
+        }
+        self.map[id as usize] = entry;
+    }
+
+    fn insert_def(&mut self, id: NodeId, node: Node<'ast>, data: DefPathData) -> DefIndex {
+        self.insert(id, node);
+        self.create_def(id, data)
+    }
+
+    fn insert(&mut self, id: NodeId, node: Node<'ast>) {
+        let entry = MapEntry::from_node(self.parent_node, node);
+        self.insert_entry(id, entry);
+    }
+}
+
+impl<'ast> Visitor<'ast> for NodeCollector<'ast> {
+    /// Because we want to track parent items and so forth, enable
+    /// deep walking so that we walk nested items in the context of
+    /// their outer items.
+    fn visit_nested_item(&mut self, item: ItemId) {
+        debug!("visit_nested_item: {:?}", item);
+        self.visit_item(self.krate.item(item.id))
+    }
+
+    fn visit_item(&mut self, i: &'ast Item) {
+        debug!("visit_item: {:?}", i);
+
+        // Pick the def data. This need not be unique, but the more
+        // information we encapsulate into
+        let def_data = match i.node {
+            ItemDefaultImpl(..) | ItemImpl(..) =>
+                DefPathData::Impl,
+            ItemEnum(..) | ItemStruct(..) | ItemTrait(..) |
+            ItemExternCrate(..) | ItemMod(..) | ItemForeignMod(..) |
+            ItemTy(..) =>
+                DefPathData::TypeNs(i.name),
+            ItemStatic(..) | ItemConst(..) | ItemFn(..) =>
+                DefPathData::ValueNs(i.name),
+            ItemUse(..) =>
+                DefPathData::Misc,
+        };
+
+        self.insert_def(i.id, NodeItem(i), def_data);
+
+        let parent_node = self.parent_node;
+        self.parent_node = i.id;
+
+        match i.node {
+            ItemImpl(..) => {}
+            ItemEnum(ref enum_definition, _) => {
+                for v in &enum_definition.variants {
+                    let variant_def_index =
+                        self.insert_def(v.node.data.id(),
+                                        NodeVariant(v),
+                                        DefPathData::EnumVariant(v.node.name));
+
+                    for field in v.node.data.fields() {
+                        self.create_def_with_parent(
+                            Some(variant_def_index),
+                            field.id,
+                            DefPathData::Field(field.name));
+                    }
+                }
+            }
+            ItemForeignMod(..) => {
+            }
+            ItemStruct(ref struct_def, _) => {
+                // If this is a tuple-like struct, register the constructor.
+                if !struct_def.is_struct() {
+                    self.insert_def(struct_def.id(),
+                                    NodeStructCtor(struct_def),
+                                    DefPathData::StructCtor);
+                }
+
+                for field in struct_def.fields() {
+                    self.create_def(field.id, DefPathData::Field(field.name));
+                }
+            }
+            ItemTrait(_, _, ref bounds, _) => {
+                for b in bounds.iter() {
+                    if let TraitTyParamBound(ref t, TraitBoundModifier::None) = *b {
+                        self.insert(t.trait_ref.ref_id, NodeItem(i));
+                    }
+                }
+            }
+            ItemUse(ref view_path) => {
+                match view_path.node {
+                    ViewPathList(_, ref paths) => {
+                        for path in paths {
+                            self.insert(path.node.id(), NodeItem(i));
+                        }
+                    }
+                    _ => ()
+                }
+            }
+            _ => {}
+        }
+        intravisit::walk_item(self, i);
+        self.parent_node = parent_node;
+    }
+
+    fn visit_foreign_item(&mut self, foreign_item: &'ast ForeignItem) {
+        self.insert_def(foreign_item.id,
+                        NodeForeignItem(foreign_item),
+                        DefPathData::ValueNs(foreign_item.name));
+
+        let parent_node = self.parent_node;
+        self.parent_node = foreign_item.id;
+        intravisit::walk_foreign_item(self, foreign_item);
+        self.parent_node = parent_node;
+    }
+
+    fn visit_generics(&mut self, generics: &'ast Generics) {
+        for ty_param in generics.ty_params.iter() {
+            self.insert_def(ty_param.id,
+                            NodeTyParam(ty_param),
+                            DefPathData::TypeParam(ty_param.name));
+        }
+
+        intravisit::walk_generics(self, generics);
+    }
+
+    fn visit_trait_item(&mut self, ti: &'ast TraitItem) {
+        let def_data = match ti.node {
+            MethodTraitItem(..) | ConstTraitItem(..) => DefPathData::ValueNs(ti.name),
+            TypeTraitItem(..) => DefPathData::TypeNs(ti.name),
+        };
+
+        self.insert(ti.id, NodeTraitItem(ti));
+        self.create_def(ti.id, def_data);
+
+        let parent_node = self.parent_node;
+        self.parent_node = ti.id;
+
+        match ti.node {
+            ConstTraitItem(_, Some(ref expr)) => {
+                self.create_def(expr.id, DefPathData::Initializer);
+            }
+            _ => { }
+        }
+
+        intravisit::walk_trait_item(self, ti);
+
+        self.parent_node = parent_node;
+    }
+
+    fn visit_impl_item(&mut self, ii: &'ast ImplItem) {
+        let def_data = match ii.node {
+            ImplItemKind::Method(..) | ImplItemKind::Const(..) => DefPathData::ValueNs(ii.name),
+            ImplItemKind::Type(..) => DefPathData::TypeNs(ii.name),
+        };
+
+        self.insert_def(ii.id, NodeImplItem(ii), def_data);
+
+        let parent_node = self.parent_node;
+        self.parent_node = ii.id;
+
+        match ii.node {
+            ImplItemKind::Const(_, ref expr) => {
+                self.create_def(expr.id, DefPathData::Initializer);
+            }
+            _ => { }
+        }
+
+        intravisit::walk_impl_item(self, ii);
+
+        self.parent_node = parent_node;
+    }
+
+    fn visit_pat(&mut self, pat: &'ast Pat) {
+        let maybe_binding = match pat.node {
+            PatKind::Ident(_, id, _) => Some(id.node),
+            _ => None
+        };
+
+        if let Some(id) = maybe_binding {
+            self.insert_def(pat.id, NodeLocal(pat), DefPathData::Binding(id.name));
+        } else {
+            self.insert(pat.id, NodePat(pat));
+        }
+
+        let parent_node = self.parent_node;
+        self.parent_node = pat.id;
+        intravisit::walk_pat(self, pat);
+        self.parent_node = parent_node;
+    }
+
+    fn visit_expr(&mut self, expr: &'ast Expr) {
+        self.insert(expr.id, NodeExpr(expr));
+
+        match expr.node {
+            ExprClosure(..) => { self.create_def(expr.id, DefPathData::ClosureExpr); }
+            _ => { }
+        }
+
+        let parent_node = self.parent_node;
+        self.parent_node = expr.id;
+        intravisit::walk_expr(self, expr);
+        self.parent_node = parent_node;
+    }
+
+    fn visit_stmt(&mut self, stmt: &'ast Stmt) {
+        let id = stmt.node.id();
+        self.insert(id, NodeStmt(stmt));
+        let parent_node = self.parent_node;
+        self.parent_node = id;
+        intravisit::walk_stmt(self, stmt);
+        self.parent_node = parent_node;
+    }
+
+    fn visit_fn(&mut self, fk: intravisit::FnKind<'ast>, fd: &'ast FnDecl,
+                b: &'ast Block, s: Span, id: NodeId) {
+        assert_eq!(self.parent_node, id);
+        intravisit::walk_fn(self, fk, fd, b, s);
+    }
+
+    fn visit_block(&mut self, block: &'ast Block) {
+        self.insert(block.id, NodeBlock(block));
+        let parent_node = self.parent_node;
+        self.parent_node = block.id;
+        intravisit::walk_block(self, block);
+        self.parent_node = parent_node;
+    }
+
+    fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) {
+        self.insert(lifetime.id, NodeLifetime(lifetime));
+    }
+
+    fn visit_lifetime_def(&mut self, def: &'ast LifetimeDef) {
+        self.create_def(def.lifetime.id, DefPathData::LifetimeDef(def.lifetime.name));
+        self.visit_lifetime(&def.lifetime);
+    }
+
+    fn visit_macro_def(&mut self, macro_def: &'ast MacroDef) {
+        self.create_def(macro_def.id, DefPathData::MacroDef(macro_def.name));
+    }
+}
diff --git a/src/librustc/hir/map/definitions.rs b/src/librustc/hir/map/definitions.rs
new file mode 100644 (file)
index 0000000..2e26fe5
--- /dev/null
@@ -0,0 +1,336 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::cstore::LOCAL_CRATE;
+use hir::def_id::{DefId, DefIndex};
+use rustc_data_structures::fnv::FnvHashMap;
+use syntax::ast;
+use syntax::parse::token::InternedString;
+use util::nodemap::NodeMap;
+
+#[derive(Clone)]
+pub struct Definitions {
+    data: Vec<DefData>,
+    key_map: FnvHashMap<DefKey, DefIndex>,
+    node_map: NodeMap<DefIndex>,
+}
+
+/// A unique identifier that we can use to lookup a definition
+/// precisely. It combines the index of the definition's parent (if
+/// any) with a `DisambiguatedDefPathData`.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub struct DefKey {
+    /// Parent path.
+    pub parent: Option<DefIndex>,
+
+    /// Identifier of this node.
+    pub disambiguated_data: DisambiguatedDefPathData,
+}
+
+/// Pair of `DefPathData` and an integer disambiguator. The integer is
+/// normally 0, but in the event that there are multiple defs with the
+/// same `parent` and `data`, we use this field to disambiguate
+/// between them. This introduces some artificial ordering dependency
+/// but means that if you have (e.g.) two impls for the same type in
+/// the same module, they do get distinct def-ids.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub struct DisambiguatedDefPathData {
+    pub data: DefPathData,
+    pub disambiguator: u32
+}
+
+/// For each definition, we track the following data.  A definition
+/// here is defined somewhat circularly as "something with a def-id",
+/// but it generally corresponds to things like structs, enums, etc.
+/// There are also some rather random cases (like const initializer
+/// expressions) that are mostly just leftovers.
+#[derive(Clone, Debug)]
+pub struct DefData {
+    pub key: DefKey,
+
+    /// Local ID within the HIR.
+    pub node_id: ast::NodeId,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub struct DefPath {
+    /// the path leading from the crate root to the item
+    pub data: Vec<DisambiguatedDefPathData>,
+
+    /// what krate root is this path relative to?
+    pub krate: ast::CrateNum,
+}
+
+impl DefPath {
+    pub fn is_local(&self) -> bool {
+        self.krate == LOCAL_CRATE
+    }
+
+    pub fn make<FN>(start_krate: ast::CrateNum,
+                    start_index: DefIndex,
+                    mut get_key: FN) -> DefPath
+        where FN: FnMut(DefIndex) -> DefKey
+    {
+        let mut krate = start_krate;
+        let mut data = vec![];
+        let mut index = Some(start_index);
+        loop {
+            let p = index.unwrap();
+            let key = get_key(p);
+            match key.disambiguated_data.data {
+                DefPathData::CrateRoot => {
+                    assert!(key.parent.is_none());
+                    break;
+                }
+                DefPathData::InlinedRoot(ref p) => {
+                    assert!(key.parent.is_none());
+                    assert!(!p.def_id.is_local());
+                    data.extend(p.data.iter().cloned().rev());
+                    krate = p.def_id.krate;
+                    break;
+                }
+                _ => {
+                    data.push(key.disambiguated_data);
+                    index = key.parent;
+                }
+            }
+        }
+        data.reverse();
+        DefPath { data: data, krate: krate }
+    }
+}
+
+/// Root of an inlined item. We track the `DefPath` of the item within
+/// the original crate but also its def-id. This is kind of an
+/// augmented version of a `DefPath` that includes a `DefId`. This is
+/// all sort of ugly but the hope is that inlined items will be going
+/// away soon anyway.
+///
+/// Some of the constraints that led to the current approach:
+///
+/// - I don't want to have a `DefId` in the main `DefPath` because
+///   that gets serialized for incr. comp., and when reloaded the
+///   `DefId` is no longer valid. I'd rather maintain the invariant
+///   that every `DefId` is valid, and a potentially outdated `DefId` is
+///   represented as a `DefPath`.
+///   - (We don't serialize def-paths from inlined items, so it's ok to have one here.)
+/// - We need to be able to extract the def-id from inline items to
+///   make the symbol name. In theory we could retrace it from the
+///   data, but the metadata doesn't have the required indices, and I
+///   don't want to write the code to create one just for this.
+/// - It may be that we don't actually need `data` at all. We'll have
+///   to see about that.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub struct InlinedRootPath {
+    pub data: Vec<DisambiguatedDefPathData>,
+    pub def_id: DefId,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub enum DefPathData {
+    // Root: these should only be used for the root nodes, because
+    // they are treated specially by the `def_path` function.
+    CrateRoot,
+    InlinedRoot(Box<InlinedRootPath>),
+
+    // Catch-all for random DefId things like DUMMY_NODE_ID
+    Misc,
+
+    // Different kinds of items and item-like things:
+    Impl,
+    TypeNs(ast::Name), // something in the type NS
+    ValueNs(ast::Name), // something in the value NS
+    MacroDef(ast::Name),
+    ClosureExpr,
+
+    // Subportions of items
+    TypeParam(ast::Name),
+    LifetimeDef(ast::Name),
+    EnumVariant(ast::Name),
+    Field(ast::Name),
+    StructCtor, // implicit ctor for a tuple-like struct
+    Initializer, // initializer for a const
+    Binding(ast::Name), // pattern binding
+}
+
+impl Definitions {
+    pub fn new() -> Definitions {
+        Definitions {
+            data: vec![],
+            key_map: FnvHashMap(),
+            node_map: NodeMap(),
+        }
+    }
+
+    pub fn len(&self) -> usize {
+        self.data.len()
+    }
+
+    pub fn def_key(&self, index: DefIndex) -> DefKey {
+        self.data[index.as_usize()].key.clone()
+    }
+
+    /// Returns the path from the crate root to `index`. The root
+    /// nodes are not included in the path (i.e., this will be an
+    /// empty vector for the crate root). For an inlined item, this
+    /// will be the path of the item in the external crate (but the
+    /// path will begin with the path to the external crate).
+    pub fn def_path(&self, index: DefIndex) -> DefPath {
+        DefPath::make(LOCAL_CRATE, index, |p| self.def_key(p))
+    }
+
+    pub fn opt_def_index(&self, node: ast::NodeId) -> Option<DefIndex> {
+        self.node_map.get(&node).cloned()
+    }
+
+    pub fn opt_local_def_id(&self, node: ast::NodeId) -> Option<DefId> {
+        self.opt_def_index(node).map(DefId::local)
+    }
+
+    pub fn as_local_node_id(&self, def_id: DefId) -> Option<ast::NodeId> {
+        if def_id.krate == LOCAL_CRATE {
+            assert!(def_id.index.as_usize() < self.data.len());
+            Some(self.data[def_id.index.as_usize()].node_id)
+        } else {
+            None
+        }
+    }
+
+    pub fn retrace_path(&self, path: &DefPath) -> Option<DefIndex> {
+        debug!("retrace_path(path={:?})", path);
+
+        // we assume that we only want to retrace paths relative to
+        // the crate root
+        assert!(path.is_local());
+
+        let root_key = DefKey {
+            parent: None,
+            disambiguated_data: DisambiguatedDefPathData {
+                data: DefPathData::CrateRoot,
+                disambiguator: 0,
+            },
+        };
+        let root_id = self.key_map[&root_key];
+
+        debug!("retrace_path: root_id={:?}", root_id);
+
+        let mut id = root_id;
+        for data in &path.data {
+            let key = DefKey { parent: Some(id), disambiguated_data: data.clone() };
+            debug!("key = {:?}", key);
+            id = match self.key_map.get(&key) {
+                Some(&id) => id,
+                None => return None
+            };
+        }
+
+        Some(id)
+    }
+
+    pub fn create_def_with_parent(&mut self,
+                                  parent: Option<DefIndex>,
+                                  node_id: ast::NodeId,
+                                  data: DefPathData)
+                                  -> DefIndex {
+        debug!("create_def_with_parent(parent={:?}, node_id={:?}, data={:?})",
+               parent, node_id, data);
+
+        assert!(!self.node_map.contains_key(&node_id),
+                "adding a def'n for node-id {:?} and data {:?} but a previous def'n exists: {:?}",
+                node_id,
+                data,
+                self.data[self.node_map[&node_id].as_usize()]);
+
+        assert!(parent.is_some() ^ match data {
+            DefPathData::CrateRoot | DefPathData::InlinedRoot(_) => true,
+            _ => false,
+        });
+
+        // Find a unique DefKey. This basically means incrementing the disambiguator
+        // until we get no match.
+        let mut key = DefKey {
+            parent: parent,
+            disambiguated_data: DisambiguatedDefPathData {
+                data: data,
+                disambiguator: 0
+            }
+        };
+
+        while self.key_map.contains_key(&key) {
+            key.disambiguated_data.disambiguator += 1;
+        }
+
+        debug!("create_def_with_parent: after disambiguation, key = {:?}", key);
+
+        // Create the definition.
+        let index = DefIndex::new(self.data.len());
+        self.data.push(DefData { key: key.clone(), node_id: node_id });
+        debug!("create_def_with_parent: node_map[{:?}] = {:?}", node_id, index);
+        self.node_map.insert(node_id, index);
+        debug!("create_def_with_parent: key_map[{:?}] = {:?}", key, index);
+        self.key_map.insert(key, index);
+
+
+        index
+    }
+}
+
+impl DefPathData {
+    pub fn as_interned_str(&self) -> InternedString {
+        use self::DefPathData::*;
+        match *self {
+            TypeNs(name) |
+            ValueNs(name) |
+            MacroDef(name) |
+            TypeParam(name) |
+            LifetimeDef(name) |
+            EnumVariant(name) |
+            Binding(name) |
+            Field(name) => {
+                name.as_str()
+            }
+
+            Impl => {
+                InternedString::new("{{impl}}")
+            }
+
+            // note that this does not show up in user printouts
+            CrateRoot => {
+                InternedString::new("{{root}}")
+            }
+
+            // note that this does not show up in user printouts
+            InlinedRoot(_) => {
+                InternedString::new("{{inlined-root}}")
+            }
+
+            Misc => {
+                InternedString::new("{{?}}")
+            }
+
+            ClosureExpr => {
+                InternedString::new("{{closure}}")
+            }
+
+            StructCtor => {
+                InternedString::new("{{constructor}}")
+            }
+
+            Initializer => {
+                InternedString::new("{{initializer}}")
+            }
+        }
+    }
+
+    pub fn to_string(&self) -> String {
+        self.as_interned_str().to_string()
+    }
+}
+
diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs
new file mode 100644 (file)
index 0000000..7b8ddee
--- /dev/null
@@ -0,0 +1,981 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use self::Node::*;
+use self::MapEntry::*;
+use self::collector::NodeCollector;
+pub use self::definitions::{Definitions, DefKey, DefPath, DefPathData,
+                            DisambiguatedDefPathData, InlinedRootPath};
+
+use dep_graph::{DepGraph, DepNode};
+
+use middle::cstore::InlinedItem;
+use middle::cstore::InlinedItem as II;
+use hir::def_id::{CRATE_DEF_INDEX, DefId};
+
+use syntax::abi::Abi;
+use syntax::ast::{self, Name, NodeId, DUMMY_NODE_ID};
+use syntax::attr::ThinAttributesExt;
+use syntax::codemap::{Span, Spanned};
+
+use hir::*;
+use hir::fold::Folder;
+use hir::print as pprust;
+
+use arena::TypedArena;
+use std::cell::RefCell;
+use std::io;
+use std::mem;
+
+pub mod blocks;
+mod collector;
+pub mod definitions;
+
+#[derive(Copy, Clone, Debug)]
+pub enum Node<'ast> {
+    NodeItem(&'ast Item),
+    NodeForeignItem(&'ast ForeignItem),
+    NodeTraitItem(&'ast TraitItem),
+    NodeImplItem(&'ast ImplItem),
+    NodeVariant(&'ast Variant),
+    NodeExpr(&'ast Expr),
+    NodeStmt(&'ast Stmt),
+    NodeLocal(&'ast Pat),
+    NodePat(&'ast Pat),
+    NodeBlock(&'ast Block),
+
+    /// NodeStructCtor represents a tuple struct.
+    NodeStructCtor(&'ast VariantData),
+
+    NodeLifetime(&'ast Lifetime),
+    NodeTyParam(&'ast TyParam)
+}
+
+/// Represents an entry and its parent NodeID.
+/// The odd layout is to bring down the total size.
+#[derive(Copy, Debug)]
+pub enum MapEntry<'ast> {
+    /// Placeholder for holes in the map.
+    NotPresent,
+
+    /// All the node types, with a parent ID.
+    EntryItem(NodeId, &'ast Item),
+    EntryForeignItem(NodeId, &'ast ForeignItem),
+    EntryTraitItem(NodeId, &'ast TraitItem),
+    EntryImplItem(NodeId, &'ast ImplItem),
+    EntryVariant(NodeId, &'ast Variant),
+    EntryExpr(NodeId, &'ast Expr),
+    EntryStmt(NodeId, &'ast Stmt),
+    EntryLocal(NodeId, &'ast Pat),
+    EntryPat(NodeId, &'ast Pat),
+    EntryBlock(NodeId, &'ast Block),
+    EntryStructCtor(NodeId, &'ast VariantData),
+    EntryLifetime(NodeId, &'ast Lifetime),
+    EntryTyParam(NodeId, &'ast TyParam),
+
+    /// Roots for node trees.
+    RootCrate,
+    RootInlinedParent(&'ast InlinedItem)
+}
+
+impl<'ast> Clone for MapEntry<'ast> {
+    fn clone(&self) -> MapEntry<'ast> {
+        *self
+    }
+}
+
+impl<'ast> MapEntry<'ast> {
+    fn from_node(p: NodeId, node: Node<'ast>) -> MapEntry<'ast> {
+        match node {
+            NodeItem(n) => EntryItem(p, n),
+            NodeForeignItem(n) => EntryForeignItem(p, n),
+            NodeTraitItem(n) => EntryTraitItem(p, n),
+            NodeImplItem(n) => EntryImplItem(p, n),
+            NodeVariant(n) => EntryVariant(p, n),
+            NodeExpr(n) => EntryExpr(p, n),
+            NodeStmt(n) => EntryStmt(p, n),
+            NodeLocal(n) => EntryLocal(p, n),
+            NodePat(n) => EntryPat(p, n),
+            NodeBlock(n) => EntryBlock(p, n),
+            NodeStructCtor(n) => EntryStructCtor(p, n),
+            NodeLifetime(n) => EntryLifetime(p, n),
+            NodeTyParam(n) => EntryTyParam(p, n),
+        }
+    }
+
+    fn parent_node(self) -> Option<NodeId> {
+        Some(match self {
+            EntryItem(id, _) => id,
+            EntryForeignItem(id, _) => id,
+            EntryTraitItem(id, _) => id,
+            EntryImplItem(id, _) => id,
+            EntryVariant(id, _) => id,
+            EntryExpr(id, _) => id,
+            EntryStmt(id, _) => id,
+            EntryLocal(id, _) => id,
+            EntryPat(id, _) => id,
+            EntryBlock(id, _) => id,
+            EntryStructCtor(id, _) => id,
+            EntryLifetime(id, _) => id,
+            EntryTyParam(id, _) => id,
+            _ => return None
+        })
+    }
+
+    fn to_node(self) -> Option<Node<'ast>> {
+        Some(match self {
+            EntryItem(_, n) => NodeItem(n),
+            EntryForeignItem(_, n) => NodeForeignItem(n),
+            EntryTraitItem(_, n) => NodeTraitItem(n),
+            EntryImplItem(_, n) => NodeImplItem(n),
+            EntryVariant(_, n) => NodeVariant(n),
+            EntryExpr(_, n) => NodeExpr(n),
+            EntryStmt(_, n) => NodeStmt(n),
+            EntryLocal(_, n) => NodeLocal(n),
+            EntryPat(_, n) => NodePat(n),
+            EntryBlock(_, n) => NodeBlock(n),
+            EntryStructCtor(_, n) => NodeStructCtor(n),
+            EntryLifetime(_, n) => NodeLifetime(n),
+            EntryTyParam(_, n) => NodeTyParam(n),
+            _ => return None
+        })
+    }
+}
+
+/// Stores a crate and any number of inlined items from other crates.
+pub struct Forest {
+    krate: Crate,
+    pub dep_graph: DepGraph,
+    inlined_items: TypedArena<InlinedItem>
+}
+
+impl Forest {
+    pub fn new(krate: Crate, dep_graph: DepGraph) -> Forest {
+        Forest {
+            krate: krate,
+            dep_graph: dep_graph,
+            inlined_items: TypedArena::new()
+        }
+    }
+
+    pub fn krate<'ast>(&'ast self) -> &'ast Crate {
+        self.dep_graph.read(DepNode::Krate);
+        &self.krate
+    }
+}
+
+/// Represents a mapping from Node IDs to AST elements and their parent
+/// Node IDs
+#[derive(Clone)]
+pub struct Map<'ast> {
+    /// The backing storage for all the AST nodes.
+    pub forest: &'ast Forest,
+
+    /// Same as the dep_graph in forest, just available with one fewer
+    /// deref. This is a gratuitious micro-optimization.
+    pub dep_graph: DepGraph,
+
+    /// NodeIds are sequential integers from 0, so we can be
+    /// super-compact by storing them in a vector. Not everything with
+    /// a NodeId is in the map, but empirically the occupancy is about
+    /// 75-80%, so there's not too much overhead (certainly less than
+    /// a hashmap, since they (at the time of writing) have a maximum
+    /// of 75% occupancy).
+    ///
+    /// Also, indexing is pretty quick when you've got a vector and
+    /// plain old integers.
+    map: RefCell<Vec<MapEntry<'ast>>>,
+
+    definitions: RefCell<Definitions>,
+}
+
+impl<'ast> Map<'ast> {
+    /// Registers a read in the dependency graph of the AST node with
+    /// the given `id`. This needs to be called each time a public
+    /// function returns the HIR for a node -- in other words, when it
+    /// "reveals" the content of a node to the caller (who might not
+    /// otherwise have had access to those contents, and hence needs a
+    /// read recorded). If the function just returns a DefId or
+    /// NodeId, no actual content was returned, so no read is needed.
+    fn read(&self, id: NodeId) {
+        self.dep_graph.read(self.dep_node(id));
+    }
+
+    fn dep_node(&self, id0: NodeId) -> DepNode<DefId> {
+        let map = self.map.borrow();
+        let mut id = id0;
+        loop {
+            match map[id as usize] {
+                EntryItem(_, item) => {
+                    let def_id = self.local_def_id(item.id);
+                    // NB                          ^~~~~~~
+                    //
+                    // You would expect that `item.id == id`, but this
+                    // is not always the case. In particular, for a
+                    // ViewPath item like `use self::{mem, foo}`, we
+                    // map the ids for `mem` and `foo` to the
+                    // enclosing view path item. This seems mega super
+                    // ultra wrong, but then who am I to judge?
+                    // -nmatsakis
+                    return DepNode::Hir(def_id);
+                }
+
+                EntryForeignItem(p, _) |
+                EntryTraitItem(p, _) |
+                EntryImplItem(p, _) |
+                EntryVariant(p, _) |
+                EntryExpr(p, _) |
+                EntryStmt(p, _) |
+                EntryLocal(p, _) |
+                EntryPat(p, _) |
+                EntryBlock(p, _) |
+                EntryStructCtor(p, _) |
+                EntryLifetime(p, _) |
+                EntryTyParam(p, _) =>
+                    id = p,
+
+                RootCrate |
+                RootInlinedParent(_) =>
+                    // FIXME(#32015) clarify story about cross-crate dep tracking
+                    return DepNode::Krate,
+
+                NotPresent =>
+                    // Some nodes, notably struct fields, are not
+                    // present in the map for whatever reason, but
+                    // they *do* have def-ids. So if we encounter an
+                    // empty hole, check for that case.
+                    return self.opt_local_def_id(id)
+                               .map(|def_id| DepNode::Hir(def_id))
+                               .unwrap_or_else(|| {
+                                   bug!("Walking parents from `{}` \
+                                         led to `NotPresent` at `{}`",
+                                        id0, id)
+                               }),
+            }
+        }
+    }
+
+    pub fn num_local_def_ids(&self) -> usize {
+        self.definitions.borrow().len()
+    }
+
+    pub fn def_key(&self, def_id: DefId) -> DefKey {
+        assert!(def_id.is_local());
+        self.definitions.borrow().def_key(def_id.index)
+    }
+
+    pub fn def_path_from_id(&self, id: NodeId) -> Option<DefPath> {
+        self.opt_local_def_id(id).map(|def_id| {
+            self.def_path(def_id)
+        })
+    }
+
+    pub fn def_path(&self, def_id: DefId) -> DefPath {
+        assert!(def_id.is_local());
+        self.definitions.borrow().def_path(def_id.index)
+    }
+
+    pub fn retrace_path(&self, path: &DefPath) -> Option<DefId> {
+        self.definitions.borrow().retrace_path(path)
+                                 .map(DefId::local)
+    }
+
+    pub fn local_def_id(&self, node: NodeId) -> DefId {
+        self.opt_local_def_id(node).unwrap_or_else(|| {
+            bug!("local_def_id: no entry for `{}`, which has a map of `{:?}`",
+                 node, self.find_entry(node))
+        })
+    }
+
+    pub fn opt_local_def_id(&self, node: NodeId) -> Option<DefId> {
+        self.definitions.borrow().opt_local_def_id(node)
+    }
+
+    pub fn as_local_node_id(&self, def_id: DefId) -> Option<NodeId> {
+        self.definitions.borrow().as_local_node_id(def_id)
+    }
+
+    fn entry_count(&self) -> usize {
+        self.map.borrow().len()
+    }
+
+    fn find_entry(&self, id: NodeId) -> Option<MapEntry<'ast>> {
+        self.map.borrow().get(id as usize).cloned()
+    }
+
+    pub fn krate(&self) -> &'ast Crate {
+        self.forest.krate()
+    }
+
+    /// Get the attributes on the krate. This is preferable to
+    /// invoking `krate.attrs` because it registers a tighter
+    /// dep-graph access.
+    pub fn krate_attrs(&self) -> &'ast [ast::Attribute] {
+        let crate_root_def_id = DefId::local(CRATE_DEF_INDEX);
+        self.dep_graph.read(DepNode::Hir(crate_root_def_id));
+        &self.forest.krate.attrs
+    }
+
+    /// Retrieve the Node corresponding to `id`, panicking if it cannot
+    /// be found.
+    pub fn get(&self, id: NodeId) -> Node<'ast> {
+        match self.find(id) {
+            Some(node) => node, // read recorded by `find`
+            None => bug!("couldn't find node id {} in the AST map", id)
+        }
+    }
+
+    pub fn get_if_local(&self, id: DefId) -> Option<Node<'ast>> {
+        self.as_local_node_id(id).map(|id| self.get(id)) // read recorded by `get`
+    }
+
+    /// Retrieve the Node corresponding to `id`, returning None if
+    /// cannot be found.
+    pub fn find(&self, id: NodeId) -> Option<Node<'ast>> {
+        let result = self.find_entry(id).and_then(|x| x.to_node());
+        if result.is_some() {
+            self.read(id);
+        }
+        result
+    }
+
+    /// Similar to get_parent, returns the parent node id or id if there is no
+    /// parent.
+    /// This function returns the immediate parent in the AST, whereas get_parent
+    /// returns the enclosing item. Note that this might not be the actual parent
+    /// node in the AST - some kinds of nodes are not in the map and these will
+    /// never appear as the parent_node. So you can always walk the parent_nodes
+    /// from a node to the root of the ast (unless you get the same id back here
+    /// that can happen if the id is not in the map itself or is just weird).
+    pub fn get_parent_node(&self, id: NodeId) -> NodeId {
+        self.find_entry(id).and_then(|x| x.parent_node()).unwrap_or(id)
+    }
+
+    /// Check if the node is an argument. An argument is a local variable whose
+    /// immediate parent is an item or a closure.
+    pub fn is_argument(&self, id: NodeId) -> bool {
+        match self.find(id) {
+            Some(NodeLocal(_)) => (),
+            _ => return false,
+        }
+        match self.find(self.get_parent_node(id)) {
+            Some(NodeItem(_)) |
+            Some(NodeTraitItem(_)) |
+            Some(NodeImplItem(_)) => true,
+            Some(NodeExpr(e)) => {
+                match e.node {
+                    ExprClosure(..) => true,
+                    _ => false,
+                }
+            }
+            _ => false,
+        }
+    }
+
+    /// If there is some error when walking the parents (e.g., a node does not
+    /// have a parent in the map or a node can't be found), then we return the
+    /// last good node id we found. Note that reaching the crate root (id == 0),
+    /// is not an error, since items in the crate module have the crate root as
+    /// parent.
+    fn walk_parent_nodes<F>(&self, start_id: NodeId, found: F) -> Result<NodeId, NodeId>
+        where F: Fn(&Node<'ast>) -> bool
+    {
+        let mut id = start_id;
+        loop {
+            let parent_node = self.get_parent_node(id);
+            if parent_node == 0 {
+                return Ok(0);
+            }
+            if parent_node == id {
+                return Err(id);
+            }
+
+            let node = self.find_entry(parent_node);
+            if node.is_none() {
+                return Err(id);
+            }
+            let node = node.unwrap().to_node();
+            match node {
+                Some(ref node) => {
+                    if found(node) {
+                        return Ok(parent_node);
+                    }
+                }
+                None => {
+                    return Err(parent_node);
+                }
+            }
+            id = parent_node;
+        }
+    }
+
+    /// Retrieve the NodeId for `id`'s parent item, or `id` itself if no
+    /// parent item is in this map. The "parent item" is the closest parent node
+    /// in the AST which is recorded by the map and is an item, either an item
+    /// in a module, trait, or impl.
+    pub fn get_parent(&self, id: NodeId) -> NodeId {
+        match self.walk_parent_nodes(id, |node| match *node {
+            NodeItem(_) |
+            NodeForeignItem(_) |
+            NodeTraitItem(_) |
+            NodeImplItem(_) => true,
+            _ => false,
+        }) {
+            Ok(id) => id,
+            Err(id) => id,
+        }
+    }
+
+    /// Returns the NodeId of `id`'s nearest module parent, or `id` itself if no
+    /// module parent is in this map.
+    pub fn get_module_parent(&self, id: NodeId) -> NodeId {
+        match self.walk_parent_nodes(id, |node| match *node {
+            NodeItem(&Item { node: Item_::ItemMod(_), .. }) => true,
+            _ => false,
+        }) {
+            Ok(id) => id,
+            Err(id) => id,
+        }
+    }
+
+    /// Returns the nearest enclosing scope. A scope is an item or block.
+    /// FIXME it is not clear to me that all items qualify as scopes - statics
+    /// and associated types probably shouldn't, for example. Behaviour in this
+    /// regard should be expected to be highly unstable.
+    pub fn get_enclosing_scope(&self, id: NodeId) -> Option<NodeId> {
+        match self.walk_parent_nodes(id, |node| match *node {
+            NodeItem(_) |
+            NodeForeignItem(_) |
+            NodeTraitItem(_) |
+            NodeImplItem(_) |
+            NodeBlock(_) => true,
+            _ => false,
+        }) {
+            Ok(id) => Some(id),
+            Err(_) => None,
+        }
+    }
+
+    pub fn get_parent_did(&self, id: NodeId) -> DefId {
+        let parent = self.get_parent(id);
+        match self.find_entry(parent) {
+            Some(RootInlinedParent(&II::TraitItem(did, _))) |
+            Some(RootInlinedParent(&II::ImplItem(did, _))) => did,
+            _ => self.local_def_id(parent)
+        }
+    }
+
+    pub fn get_foreign_abi(&self, id: NodeId) -> Abi {
+        let parent = self.get_parent(id);
+        let abi = match self.find_entry(parent) {
+            Some(EntryItem(_, i)) => {
+                match i.node {
+                    ItemForeignMod(ref nm) => Some(nm.abi),
+                    _ => None
+                }
+            }
+            /// Wrong but OK, because the only inlined foreign items are intrinsics.
+            Some(RootInlinedParent(_)) => Some(Abi::RustIntrinsic),
+            _ => None
+        };
+        match abi {
+            Some(abi) => {
+                self.read(id); // reveals some of the content of a node
+                abi
+            }
+            None => bug!("expected foreign mod or inlined parent, found {}",
+                          self.node_to_string(parent))
+        }
+    }
+
+    pub fn expect_item(&self, id: NodeId) -> &'ast Item {
+        match self.find(id) { // read recorded by `find`
+            Some(NodeItem(item)) => item,
+            _ => bug!("expected item, found {}", self.node_to_string(id))
+        }
+    }
+
+    pub fn expect_trait_item(&self, id: NodeId) -> &'ast TraitItem {
+        match self.find(id) {
+            Some(NodeTraitItem(item)) => item,
+            _ => bug!("expected trait item, found {}", self.node_to_string(id))
+        }
+    }
+
+    pub fn expect_struct(&self, id: NodeId) -> &'ast VariantData {
+        match self.find(id) {
+            Some(NodeItem(i)) => {
+                match i.node {
+                    ItemStruct(ref struct_def, _) => struct_def,
+                    _ => bug!("struct ID bound to non-struct")
+                }
+            }
+            Some(NodeVariant(variant)) => {
+                if variant.node.data.is_struct() {
+                    &variant.node.data
+                } else {
+                    bug!("struct ID bound to enum variant that isn't struct-like")
+                }
+            }
+            _ => bug!("expected struct, found {}", self.node_to_string(id)),
+        }
+    }
+
+    pub fn expect_variant(&self, id: NodeId) -> &'ast Variant {
+        match self.find(id) {
+            Some(NodeVariant(variant)) => variant,
+            _ => bug!("expected variant, found {}", self.node_to_string(id)),
+        }
+    }
+
+    pub fn expect_foreign_item(&self, id: NodeId) -> &'ast ForeignItem {
+        match self.find(id) {
+            Some(NodeForeignItem(item)) => item,
+            _ => bug!("expected foreign item, found {}", self.node_to_string(id))
+        }
+    }
+
+    pub fn expect_expr(&self, id: NodeId) -> &'ast Expr {
+        match self.find(id) { // read recorded by find
+            Some(NodeExpr(expr)) => expr,
+            _ => bug!("expected expr, found {}", self.node_to_string(id))
+        }
+    }
+
+    /// Returns the name associated with the given NodeId's AST.
+    pub fn name(&self, id: NodeId) -> Name {
+        match self.get(id) {
+            NodeItem(i) => i.name,
+            NodeForeignItem(i) => i.name,
+            NodeImplItem(ii) => ii.name,
+            NodeTraitItem(ti) => ti.name,
+            NodeVariant(v) => v.node.name,
+            NodeLifetime(lt) => lt.name,
+            NodeTyParam(tp) => tp.name,
+            NodeLocal(&Pat { node: PatKind::Ident(_,l,_), .. }) => {
+                l.node.name
+            },
+            NodeStructCtor(_) => self.name(self.get_parent(id)),
+            _ => bug!("no name for {}", self.node_to_string(id))
+        }
+    }
+
+    /// Given a node ID, get a list of attributes associated with the AST
+    /// corresponding to the Node ID
+    pub fn attrs(&self, id: NodeId) -> &'ast [ast::Attribute] {
+        self.read(id); // reveals attributes on the node
+        let attrs = match self.find(id) {
+            Some(NodeItem(i)) => Some(&i.attrs[..]),
+            Some(NodeForeignItem(fi)) => Some(&fi.attrs[..]),
+            Some(NodeTraitItem(ref ti)) => Some(&ti.attrs[..]),
+            Some(NodeImplItem(ref ii)) => Some(&ii.attrs[..]),
+            Some(NodeVariant(ref v)) => Some(&v.node.attrs[..]),
+            Some(NodeExpr(ref e)) => Some(e.attrs.as_attr_slice()),
+            Some(NodeStmt(ref s)) => Some(s.node.attrs()),
+            // unit/tuple structs take the attributes straight from
+            // the struct definition.
+            Some(NodeStructCtor(_)) => {
+                return self.attrs(self.get_parent(id));
+            }
+            _ => None
+        };
+        attrs.unwrap_or(&[])
+    }
+
+    /// Returns an iterator that yields the node id's with paths that
+    /// match `parts`.  (Requires `parts` is non-empty.)
+    ///
+    /// For example, if given `parts` equal to `["bar", "quux"]`, then
+    /// the iterator will produce node id's for items with paths
+    /// such as `foo::bar::quux`, `bar::quux`, `other::bar::quux`, and
+    /// any other such items it can find in the map.
+    pub fn nodes_matching_suffix<'a>(&'a self, parts: &'a [String])
+                                 -> NodesMatchingSuffix<'a, 'ast> {
+        NodesMatchingSuffix {
+            map: self,
+            item_name: parts.last().unwrap(),
+            in_which: &parts[..parts.len() - 1],
+            idx: 0,
+        }
+    }
+
+    pub fn opt_span(&self, id: NodeId) -> Option<Span> {
+        let sp = match self.find(id) {
+            Some(NodeItem(item)) => item.span,
+            Some(NodeForeignItem(foreign_item)) => foreign_item.span,
+            Some(NodeTraitItem(trait_method)) => trait_method.span,
+            Some(NodeImplItem(ref impl_item)) => impl_item.span,
+            Some(NodeVariant(variant)) => variant.span,
+            Some(NodeExpr(expr)) => expr.span,
+            Some(NodeStmt(stmt)) => stmt.span,
+            Some(NodeLocal(pat)) => pat.span,
+            Some(NodePat(pat)) => pat.span,
+            Some(NodeBlock(block)) => block.span,
+            Some(NodeStructCtor(_)) => self.expect_item(self.get_parent(id)).span,
+            Some(NodeTyParam(ty_param)) => ty_param.span,
+            _ => return None,
+        };
+        Some(sp)
+    }
+
+    pub fn span(&self, id: NodeId) -> Span {
+        self.read(id); // reveals span from node
+        self.opt_span(id)
+            .unwrap_or_else(|| bug!("AstMap.span: could not find span for id {:?}", id))
+    }
+
+    pub fn span_if_local(&self, id: DefId) -> Option<Span> {
+        self.as_local_node_id(id).map(|id| self.span(id))
+    }
+
+    pub fn def_id_span(&self, def_id: DefId, fallback: Span) -> Span {
+        if let Some(node_id) = self.as_local_node_id(def_id) {
+            self.opt_span(node_id).unwrap_or(fallback)
+        } else {
+            fallback
+        }
+    }
+
+    pub fn node_to_string(&self, id: NodeId) -> String {
+        node_id_to_string(self, id, true)
+    }
+
+    pub fn node_to_user_string(&self, id: NodeId) -> String {
+        node_id_to_string(self, id, false)
+    }
+}
+
+pub struct NodesMatchingSuffix<'a, 'ast:'a> {
+    map: &'a Map<'ast>,
+    item_name: &'a String,
+    in_which: &'a [String],
+    idx: NodeId,
+}
+
+impl<'a, 'ast> NodesMatchingSuffix<'a, 'ast> {
+    /// Returns true only if some suffix of the module path for parent
+    /// matches `self.in_which`.
+    ///
+    /// In other words: let `[x_0,x_1,...,x_k]` be `self.in_which`;
+    /// returns true if parent's path ends with the suffix
+    /// `x_0::x_1::...::x_k`.
+    fn suffix_matches(&self, parent: NodeId) -> bool {
+        let mut cursor = parent;
+        for part in self.in_which.iter().rev() {
+            let (mod_id, mod_name) = match find_first_mod_parent(self.map, cursor) {
+                None => return false,
+                Some((node_id, name)) => (node_id, name),
+            };
+            if &part[..] != mod_name.as_str() {
+                return false;
+            }
+            cursor = self.map.get_parent(mod_id);
+        }
+        return true;
+
+        // Finds the first mod in parent chain for `id`, along with
+        // that mod's name.
+        //
+        // If `id` itself is a mod named `m` with parent `p`, then
+        // returns `Some(id, m, p)`.  If `id` has no mod in its parent
+        // chain, then returns `None`.
+        fn find_first_mod_parent<'a>(map: &'a Map, mut id: NodeId) -> Option<(NodeId, Name)> {
+            loop {
+                match map.find(id) {
+                    None => return None,
+                    Some(NodeItem(item)) if item_is_mod(&item) =>
+                        return Some((id, item.name)),
+                    _ => {}
+                }
+                let parent = map.get_parent(id);
+                if parent == id { return None }
+                id = parent;
+            }
+
+            fn item_is_mod(item: &Item) -> bool {
+                match item.node {
+                    ItemMod(_) => true,
+                    _ => false,
+                }
+            }
+        }
+    }
+
+    // We are looking at some node `n` with a given name and parent
+    // id; do their names match what I am seeking?
+    fn matches_names(&self, parent_of_n: NodeId, name: Name) -> bool {
+        name.as_str() == &self.item_name[..] &&
+            self.suffix_matches(parent_of_n)
+    }
+}
+
+impl<'a, 'ast> Iterator for NodesMatchingSuffix<'a, 'ast> {
+    type Item = NodeId;
+
+    fn next(&mut self) -> Option<NodeId> {
+        loop {
+            let idx = self.idx;
+            if idx as usize >= self.map.entry_count() {
+                return None;
+            }
+            self.idx += 1;
+            let name = match self.map.find_entry(idx) {
+                Some(EntryItem(_, n))       => n.name(),
+                Some(EntryForeignItem(_, n))=> n.name(),
+                Some(EntryTraitItem(_, n))  => n.name(),
+                Some(EntryImplItem(_, n))   => n.name(),
+                Some(EntryVariant(_, n))    => n.name(),
+                _ => continue,
+            };
+            if self.matches_names(self.map.get_parent(idx), name) {
+                return Some(idx)
+            }
+        }
+    }
+}
+
+trait Named {
+    fn name(&self) -> Name;
+}
+
+impl<T:Named> Named for Spanned<T> { fn name(&self) -> Name { self.node.name() } }
+
+impl Named for Item { fn name(&self) -> Name { self.name } }
+impl Named for ForeignItem { fn name(&self) -> Name { self.name } }
+impl Named for Variant_ { fn name(&self) -> Name { self.name } }
+impl Named for TraitItem { fn name(&self) -> Name { self.name } }
+impl Named for ImplItem { fn name(&self) -> Name { self.name } }
+
+pub trait FoldOps {
+    fn new_id(&self, id: NodeId) -> NodeId {
+        id
+    }
+    fn new_def_id(&self, def_id: DefId) -> DefId {
+        def_id
+    }
+    fn new_span(&self, span: Span) -> Span {
+        span
+    }
+}
+
+/// A Folder that updates IDs and Span's according to fold_ops.
+struct IdAndSpanUpdater<F> {
+    fold_ops: F
+}
+
+impl<F: FoldOps> Folder for IdAndSpanUpdater<F> {
+    fn new_id(&mut self, id: NodeId) -> NodeId {
+        self.fold_ops.new_id(id)
+    }
+
+    fn new_span(&mut self, span: Span) -> Span {
+        self.fold_ops.new_span(span)
+    }
+}
+
+pub fn map_crate<'ast>(forest: &'ast mut Forest) -> Map<'ast> {
+    let (map, definitions) = {
+        let mut collector = NodeCollector::root(&forest.krate);
+        intravisit::walk_crate(&mut collector, &forest.krate);
+        (collector.map, collector.definitions)
+    };
+
+    if log_enabled!(::log::DEBUG) {
+        // This only makes sense for ordered stores; note the
+        // enumerate to count the number of entries.
+        let (entries_less_1, _) = map.iter().filter(|&x| {
+            match *x {
+                NotPresent => false,
+                _ => true
+            }
+        }).enumerate().last().expect("AST map was empty after folding?");
+
+        let entries = entries_less_1 + 1;
+        let vector_length = map.len();
+        debug!("The AST map has {} entries with a maximum of {}: occupancy {:.1}%",
+              entries, vector_length, (entries as f64 / vector_length as f64) * 100.);
+    }
+
+    Map {
+        forest: forest,
+        dep_graph: forest.dep_graph.clone(),
+        map: RefCell::new(map),
+        definitions: RefCell::new(definitions),
+    }
+}
+
+/// Used for items loaded from external crate that are being inlined into this
+/// crate.
+pub fn map_decoded_item<'ast, F: FoldOps>(map: &Map<'ast>,
+                                          parent_def_path: DefPath,
+                                          parent_def_id: DefId,
+                                          ii: InlinedItem,
+                                          fold_ops: F)
+                                          -> &'ast InlinedItem {
+    let mut fld = IdAndSpanUpdater { fold_ops: fold_ops };
+    let ii = match ii {
+        II::Item(i) => II::Item(i.map(|i| fld.fold_item(i))),
+        II::TraitItem(d, ti) => {
+            II::TraitItem(fld.fold_ops.new_def_id(d),
+                          ti.map(|ti| fld.fold_trait_item(ti)))
+        }
+        II::ImplItem(d, ii) => {
+            II::ImplItem(fld.fold_ops.new_def_id(d),
+                         ii.map(|ii| fld.fold_impl_item(ii)))
+        }
+        II::Foreign(i) => II::Foreign(i.map(|i| fld.fold_foreign_item(i)))
+    };
+
+    let ii = map.forest.inlined_items.alloc(ii);
+
+    let ii_parent_id = fld.new_id(DUMMY_NODE_ID);
+    let mut collector =
+        NodeCollector::extend(
+            map.krate(),
+            ii,
+            ii_parent_id,
+            parent_def_path,
+            parent_def_id,
+            mem::replace(&mut *map.map.borrow_mut(), vec![]),
+            mem::replace(&mut *map.definitions.borrow_mut(), Definitions::new()));
+    ii.visit(&mut collector);
+
+    *map.map.borrow_mut() = collector.map;
+    *map.definitions.borrow_mut() = collector.definitions;
+
+    ii
+}
+
+pub trait NodePrinter {
+    fn print_node(&mut self, node: &Node) -> io::Result<()>;
+}
+
+impl<'a> NodePrinter for pprust::State<'a> {
+    fn print_node(&mut self, node: &Node) -> io::Result<()> {
+        match *node {
+            NodeItem(a)        => self.print_item(&a),
+            NodeForeignItem(a) => self.print_foreign_item(&a),
+            NodeTraitItem(a)   => self.print_trait_item(a),
+            NodeImplItem(a)    => self.print_impl_item(a),
+            NodeVariant(a)     => self.print_variant(&a),
+            NodeExpr(a)        => self.print_expr(&a),
+            NodeStmt(a)        => self.print_stmt(&a),
+            NodePat(a)         => self.print_pat(&a),
+            NodeBlock(a)       => self.print_block(&a),
+            NodeLifetime(a)    => self.print_lifetime(&a),
+            NodeTyParam(_)     => bug!("cannot print TyParam"),
+            // these cases do not carry enough information in the
+            // ast_map to reconstruct their full structure for pretty
+            // printing.
+            NodeLocal(_)       => bug!("cannot print isolated Local"),
+            NodeStructCtor(_)  => bug!("cannot print isolated StructCtor"),
+        }
+    }
+}
+
+fn node_id_to_string(map: &Map, id: NodeId, include_id: bool) -> String {
+    let id_str = format!(" (id={})", id);
+    let id_str = if include_id { &id_str[..] } else { "" };
+
+    let path_str = || {
+        // This functionality is used for debugging, try to use TyCtxt to get
+        // the user-friendly path, otherwise fall back to stringifying DefPath.
+        ::ty::tls::with_opt(|tcx| {
+            if let Some(tcx) = tcx {
+                tcx.node_path_str(id)
+            } else if let Some(path) = map.def_path_from_id(id) {
+                path.data.into_iter().map(|elem| {
+                    elem.data.to_string()
+                }).collect::<Vec<_>>().join("::")
+            } else {
+                String::from("<missing path>")
+            }
+        })
+    };
+
+    match map.find(id) {
+        Some(NodeItem(item)) => {
+            let item_str = match item.node {
+                ItemExternCrate(..) => "extern crate",
+                ItemUse(..) => "use",
+                ItemStatic(..) => "static",
+                ItemConst(..) => "const",
+                ItemFn(..) => "fn",
+                ItemMod(..) => "mod",
+                ItemForeignMod(..) => "foreign mod",
+                ItemTy(..) => "ty",
+                ItemEnum(..) => "enum",
+                ItemStruct(..) => "struct",
+                ItemTrait(..) => "trait",
+                ItemImpl(..) => "impl",
+                ItemDefaultImpl(..) => "default impl",
+            };
+            format!("{} {}{}", item_str, path_str(), id_str)
+        }
+        Some(NodeForeignItem(_)) => {
+            format!("foreign item {}{}", path_str(), id_str)
+        }
+        Some(NodeImplItem(ii)) => {
+            match ii.node {
+                ImplItemKind::Const(..) => {
+                    format!("assoc const {} in {}{}", ii.name, path_str(), id_str)
+                }
+                ImplItemKind::Method(..) => {
+                    format!("method {} in {}{}", ii.name, path_str(), id_str)
+                }
+                ImplItemKind::Type(_) => {
+                    format!("assoc type {} in {}{}", ii.name, path_str(), id_str)
+                }
+            }
+        }
+        Some(NodeTraitItem(ti)) => {
+            let kind = match ti.node {
+                ConstTraitItem(..) => "assoc constant",
+                MethodTraitItem(..) => "trait method",
+                TypeTraitItem(..) => "assoc type",
+            };
+
+            format!("{} {} in {}{}", kind, ti.name, path_str(), id_str)
+        }
+        Some(NodeVariant(ref variant)) => {
+            format!("variant {} in {}{}",
+                    variant.node.name,
+                    path_str(), id_str)
+        }
+        Some(NodeExpr(ref expr)) => {
+            format!("expr {}{}", pprust::expr_to_string(&expr), id_str)
+        }
+        Some(NodeStmt(ref stmt)) => {
+            format!("stmt {}{}", pprust::stmt_to_string(&stmt), id_str)
+        }
+        Some(NodeLocal(ref pat)) => {
+            format!("local {}{}", pprust::pat_to_string(&pat), id_str)
+        }
+        Some(NodePat(ref pat)) => {
+            format!("pat {}{}", pprust::pat_to_string(&pat), id_str)
+        }
+        Some(NodeBlock(ref block)) => {
+            format!("block {}{}", pprust::block_to_string(&block), id_str)
+        }
+        Some(NodeStructCtor(_)) => {
+            format!("struct_ctor {}{}", path_str(), id_str)
+        }
+        Some(NodeLifetime(ref l)) => {
+            format!("lifetime {}{}",
+                    pprust::lifetime_to_string(&l), id_str)
+        }
+        Some(NodeTyParam(ref ty_param)) => {
+            format!("typaram {:?}{}", ty_param, id_str)
+        }
+        None => {
+            format!("unknown node{}", id_str)
+        }
+    }
+}
diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs
new file mode 100644 (file)
index 0000000..8e74887
--- /dev/null
@@ -0,0 +1,1643 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// The Rust HIR.
+
+pub use self::BindingMode::*;
+pub use self::BinOp_::*;
+pub use self::BlockCheckMode::*;
+pub use self::CaptureClause::*;
+pub use self::Decl_::*;
+pub use self::ExplicitSelf_::*;
+pub use self::Expr_::*;
+pub use self::FunctionRetTy::*;
+pub use self::ForeignItem_::*;
+pub use self::Item_::*;
+pub use self::Mutability::*;
+pub use self::PathListItem_::*;
+pub use self::PrimTy::*;
+pub use self::Stmt_::*;
+pub use self::TraitItem_::*;
+pub use self::Ty_::*;
+pub use self::TyParamBound::*;
+pub use self::UnOp::*;
+pub use self::UnsafeSource::*;
+pub use self::ViewPath_::*;
+pub use self::Visibility::*;
+pub use self::PathParameters::*;
+
+use hir::def::Def;
+use hir::def_id::DefId;
+use util::nodemap::{NodeMap, FnvHashSet};
+
+use syntax::codemap::{self, Span, Spanned, DUMMY_SP, ExpnId};
+use syntax::abi::Abi;
+use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, TokenTree, AsmDialect};
+use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy, MetaItem};
+use syntax::attr::{ThinAttributes, ThinAttributesExt};
+use syntax::parse::token::InternedString;
+use syntax::ptr::P;
+
+use std::collections::BTreeMap;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use serialize::{Encodable, Decodable, Encoder, Decoder};
+
+/// HIR doesn't commit to a concrete storage type and have its own alias for a vector.
+/// It can be `Vec`, `P<[T]>` or potentially `Box<[T]>`, or some other container with similar
+/// behavior. Unlike AST, HIR is mostly a static structure, so we can use an owned slice instead
+/// of `Vec` to avoid keeping extra capacity.
+pub type HirVec<T> = P<[T]>;
+
+macro_rules! hir_vec {
+    ($elem:expr; $n:expr) => (
+        $crate::hir::HirVec::from(vec![$elem; $n])
+    );
+    ($($x:expr),*) => (
+        $crate::hir::HirVec::from(vec![$($x),*])
+    );
+    ($($x:expr,)*) => (hir_vec![$($x),*])
+}
+
+pub mod check_attr;
+pub mod def;
+pub mod def_id;
+pub mod fold;
+pub mod intravisit;
+pub mod lowering;
+pub mod map;
+pub mod pat_util;
+pub mod print;
+pub mod svh;
+
+/// Identifier in HIR
+#[derive(Clone, Copy, Eq)]
+pub struct Ident {
+    /// Hygienic name (renamed), should be used by default
+    pub name: Name,
+    /// Unhygienic name (original, not renamed), needed in few places in name resolution
+    pub unhygienic_name: Name,
+}
+
+impl Ident {
+    /// Creates a HIR identifier with both `name` and `unhygienic_name` initialized with
+    /// the argument. Hygiene properties of the created identifier depend entirely on this
+    /// argument. If the argument is a plain interned string `intern("iter")`, then the result
+    /// is unhygienic and can interfere with other entities named "iter". If the argument is
+    /// a "fresh" name created with `gensym("iter")`, then the result is hygienic and can't
+    /// interfere with other entities having the same string as a name.
+    pub fn from_name(name: Name) -> Ident {
+        Ident { name: name, unhygienic_name: name }
+    }
+}
+
+impl PartialEq for Ident {
+    fn eq(&self, other: &Ident) -> bool {
+        self.name == other.name
+    }
+}
+
+impl Hash for Ident {
+    fn hash<H: Hasher>(&self, state: &mut H) {
+        self.name.hash(state)
+    }
+}
+
+impl fmt::Debug for Ident {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&self.name, f)
+    }
+}
+
+impl fmt::Display for Ident {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&self.name, f)
+    }
+}
+
+impl Encodable for Ident {
+    fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
+        self.name.encode(s)
+    }
+}
+
+impl Decodable for Ident {
+    fn decode<D: Decoder>(d: &mut D) -> Result<Ident, D::Error> {
+        Ok(Ident::from_name(Name::decode(d)?))
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)]
+pub struct Lifetime {
+    pub id: NodeId,
+    pub span: Span,
+    pub name: Name,
+}
+
+impl fmt::Debug for Lifetime {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f,
+               "lifetime({}: {})",
+               self.id,
+               print::lifetime_to_string(self))
+    }
+}
+
+/// A lifetime definition, eg `'a: 'b+'c+'d`
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct LifetimeDef {
+    pub lifetime: Lifetime,
+    pub bounds: HirVec<Lifetime>,
+}
+
+/// A "Path" is essentially Rust's notion of a name; for instance:
+/// std::cmp::PartialEq  .  It's represented as a sequence of identifiers,
+/// along with a bunch of supporting information.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
+pub struct Path {
+    pub span: Span,
+    /// A `::foo` path, is relative to the crate root rather than current
+    /// module (like paths in an import).
+    pub global: bool,
+    /// The segments in the path: the things separated by `::`.
+    pub segments: HirVec<PathSegment>,
+}
+
+impl fmt::Debug for Path {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "path({})", print::path_to_string(self))
+    }
+}
+
+impl fmt::Display for Path {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{}", print::path_to_string(self))
+    }
+}
+
+impl Path {
+    /// Convert a span and an identifier to the corresponding
+    /// 1-segment path.
+    pub fn from_ident(s: Span, ident: Ident) -> Path {
+        Path {
+            span: s,
+            global: false,
+            segments: hir_vec![PathSegment {
+                identifier: ident,
+                parameters: PathParameters::none()
+            }],
+        }
+    }
+}
+
+/// A segment of a path: an identifier, an optional lifetime, and a set of
+/// types.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct PathSegment {
+    /// The identifier portion of this path segment.
+    ///
+    /// Hygiene properties of this identifier are worth noting.
+    /// Most path segments are not hygienic and they are not renamed during
+    /// lowering from AST to HIR (see comments to `fn lower_path`). However segments from
+    /// unqualified paths with one segment originating from `ExprPath` (local-variable-like paths)
+    /// can be hygienic, so they are renamed. You should not normally care about this peculiarity
+    /// and just use `identifier.name` unless you modify identifier resolution code
+    /// (`fn resolve_identifier` and other functions called by it in `rustc_resolve`).
+    pub identifier: Ident,
+
+    /// Type/lifetime parameters attached to this path. They come in
+    /// two flavors: `Path<A,B,C>` and `Path(A,B) -> C`. Note that
+    /// this is more than just simple syntactic sugar; the use of
+    /// parens affects the region binding rules, so we preserve the
+    /// distinction.
+    pub parameters: PathParameters,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum PathParameters {
+    /// The `<'a, A,B,C>` in `foo::bar::baz::<'a, A,B,C>`
+    AngleBracketedParameters(AngleBracketedParameterData),
+    /// The `(A,B)` and `C` in `Foo(A,B) -> C`
+    ParenthesizedParameters(ParenthesizedParameterData),
+}
+
+impl PathParameters {
+    pub fn none() -> PathParameters {
+        AngleBracketedParameters(AngleBracketedParameterData {
+            lifetimes: HirVec::new(),
+            types: HirVec::new(),
+            bindings: HirVec::new(),
+        })
+    }
+
+    pub fn is_empty(&self) -> bool {
+        match *self {
+            AngleBracketedParameters(ref data) => data.is_empty(),
+
+            // Even if the user supplied no types, something like
+            // `X()` is equivalent to `X<(),()>`.
+            ParenthesizedParameters(..) => false,
+        }
+    }
+
+    pub fn has_lifetimes(&self) -> bool {
+        match *self {
+            AngleBracketedParameters(ref data) => !data.lifetimes.is_empty(),
+            ParenthesizedParameters(_) => false,
+        }
+    }
+
+    pub fn has_types(&self) -> bool {
+        match *self {
+            AngleBracketedParameters(ref data) => !data.types.is_empty(),
+            ParenthesizedParameters(..) => true,
+        }
+    }
+
+    /// Returns the types that the user wrote. Note that these do not necessarily map to the type
+    /// parameters in the parenthesized case.
+    pub fn types(&self) -> HirVec<&P<Ty>> {
+        match *self {
+            AngleBracketedParameters(ref data) => {
+                data.types.iter().collect()
+            }
+            ParenthesizedParameters(ref data) => {
+                data.inputs
+                    .iter()
+                    .chain(data.output.iter())
+                    .collect()
+            }
+        }
+    }
+
+    pub fn lifetimes(&self) -> HirVec<&Lifetime> {
+        match *self {
+            AngleBracketedParameters(ref data) => {
+                data.lifetimes.iter().collect()
+            }
+            ParenthesizedParameters(_) => {
+                HirVec::new()
+            }
+        }
+    }
+
+    pub fn bindings(&self) -> HirVec<&TypeBinding> {
+        match *self {
+            AngleBracketedParameters(ref data) => {
+                data.bindings.iter().collect()
+            }
+            ParenthesizedParameters(_) => {
+                HirVec::new()
+            }
+        }
+    }
+}
+
+/// A path like `Foo<'a, T>`
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct AngleBracketedParameterData {
+    /// The lifetime parameters for this path segment.
+    pub lifetimes: HirVec<Lifetime>,
+    /// The type parameters for this path segment, if present.
+    pub types: HirVec<P<Ty>>,
+    /// Bindings (equality constraints) on associated types, if present.
+    /// E.g., `Foo<A=Bar>`.
+    pub bindings: HirVec<TypeBinding>,
+}
+
+impl AngleBracketedParameterData {
+    fn is_empty(&self) -> bool {
+        self.lifetimes.is_empty() && self.types.is_empty() && self.bindings.is_empty()
+    }
+}
+
+/// A path like `Foo(A,B) -> C`
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct ParenthesizedParameterData {
+    /// Overall span
+    pub span: Span,
+
+    /// `(A,B)`
+    pub inputs: HirVec<P<Ty>>,
+
+    /// `C`
+    pub output: Option<P<Ty>>,
+}
+
+/// The AST represents all type param bounds as types.
+/// typeck::collect::compute_bounds matches these against
+/// the "special" built-in traits (see middle::lang_items) and
+/// detects Copy, Send and Sync.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum TyParamBound {
+    TraitTyParamBound(PolyTraitRef, TraitBoundModifier),
+    RegionTyParamBound(Lifetime),
+}
+
+/// A modifier on a bound, currently this is only used for `?Sized`, where the
+/// modifier is `Maybe`. Negative bounds should also be handled here.
+#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum TraitBoundModifier {
+    None,
+    Maybe,
+}
+
+pub type TyParamBounds = HirVec<TyParamBound>;
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct TyParam {
+    pub name: Name,
+    pub id: NodeId,
+    pub bounds: TyParamBounds,
+    pub default: Option<P<Ty>>,
+    pub span: Span,
+}
+
+/// Represents lifetimes and type parameters attached to a declaration
+/// of a function, enum, trait, etc.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct Generics {
+    pub lifetimes: HirVec<LifetimeDef>,
+    pub ty_params: HirVec<TyParam>,
+    pub where_clause: WhereClause,
+}
+
+impl Generics {
+    pub fn empty() -> Generics {
+        Generics {
+            lifetimes: HirVec::new(),
+            ty_params: HirVec::new(),
+            where_clause: WhereClause {
+                id: DUMMY_NODE_ID,
+                predicates: HirVec::new(),
+            },
+        }
+    }
+
+    pub fn is_lt_parameterized(&self) -> bool {
+        !self.lifetimes.is_empty()
+    }
+
+    pub fn is_type_parameterized(&self) -> bool {
+        !self.ty_params.is_empty()
+    }
+
+    pub fn is_parameterized(&self) -> bool {
+        self.is_lt_parameterized() || self.is_type_parameterized()
+    }
+}
+
+/// A `where` clause in a definition
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct WhereClause {
+    pub id: NodeId,
+    pub predicates: HirVec<WherePredicate>,
+}
+
+/// A single predicate in a `where` clause
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum WherePredicate {
+    /// A type binding, eg `for<'c> Foo: Send+Clone+'c`
+    BoundPredicate(WhereBoundPredicate),
+    /// A lifetime predicate, e.g. `'a: 'b+'c`
+    RegionPredicate(WhereRegionPredicate),
+    /// An equality predicate (unsupported)
+    EqPredicate(WhereEqPredicate),
+}
+
+/// A type bound, eg `for<'c> Foo: Send+Clone+'c`
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct WhereBoundPredicate {
+    pub span: Span,
+    /// Any lifetimes from a `for` binding
+    pub bound_lifetimes: HirVec<LifetimeDef>,
+    /// The type being bounded
+    pub bounded_ty: P<Ty>,
+    /// Trait and lifetime bounds (`Clone+Send+'static`)
+    pub bounds: TyParamBounds,
+}
+
+/// A lifetime predicate, e.g. `'a: 'b+'c`
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct WhereRegionPredicate {
+    pub span: Span,
+    pub lifetime: Lifetime,
+    pub bounds: HirVec<Lifetime>,
+}
+
+/// An equality predicate (unsupported), e.g. `T=int`
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct WhereEqPredicate {
+    pub id: NodeId,
+    pub span: Span,
+    pub path: Path,
+    pub ty: P<Ty>,
+}
+
+pub type CrateConfig = HirVec<P<MetaItem>>;
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug)]
+pub struct Crate {
+    pub module: Mod,
+    pub attrs: HirVec<Attribute>,
+    pub config: CrateConfig,
+    pub span: Span,
+    pub exported_macros: HirVec<MacroDef>,
+
+    // NB: We use a BTreeMap here so that `visit_all_items` iterates
+    // over the ids in increasing order. In principle it should not
+    // matter what order we visit things in, but in *practice* it
+    // does, because it can affect the order in which errors are
+    // detected, which in turn can make compile-fail tests yield
+    // slightly different results.
+    pub items: BTreeMap<NodeId, Item>,
+}
+
+impl Crate {
+    pub fn item(&self, id: NodeId) -> &Item {
+        &self.items[&id]
+    }
+
+    /// Visits all items in the crate in some determinstic (but
+    /// unspecified) order. If you just need to process every item,
+    /// but don't care about nesting, this method is the best choice.
+    ///
+    /// If you do care about nesting -- usually because your algorithm
+    /// follows lexical scoping rules -- then you want a different
+    /// approach. You should override `visit_nested_item` in your
+    /// visitor and then call `intravisit::walk_crate` instead.
+    pub fn visit_all_items<'hir, V>(&'hir self, visitor: &mut V)
+        where V: intravisit::Visitor<'hir>
+    {
+        for (_, item) in &self.items {
+            visitor.visit_item(item);
+        }
+    }
+}
+
+/// A macro definition, in this crate or imported from another.
+///
+/// Not parsed directly, but created on macro import or `macro_rules!` expansion.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct MacroDef {
+    pub name: Name,
+    pub attrs: HirVec<Attribute>,
+    pub id: NodeId,
+    pub span: Span,
+    pub imported_from: Option<Name>,
+    pub export: bool,
+    pub use_locally: bool,
+    pub allow_internal_unstable: bool,
+    pub body: HirVec<TokenTree>,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct Block {
+    /// Statements in a block
+    pub stmts: HirVec<Stmt>,
+    /// An expression at the end of the block
+    /// without a semicolon, if any
+    pub expr: Option<P<Expr>>,
+    pub id: NodeId,
+    /// Distinguishes between `unsafe { ... }` and `{ ... }`
+    pub rules: BlockCheckMode,
+    pub span: Span,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
+pub struct Pat {
+    pub id: NodeId,
+    pub node: PatKind,
+    pub span: Span,
+}
+
+impl fmt::Debug for Pat {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "pat({}: {})", self.id, print::pat_to_string(self))
+    }
+}
+
+impl Pat {
+    // FIXME(#19596) this is a workaround, but there should be a better way
+    fn walk_<G>(&self, it: &mut G) -> bool
+        where G: FnMut(&Pat) -> bool
+    {
+        if !it(self) {
+            return false;
+        }
+
+        match self.node {
+            PatKind::Ident(_, _, Some(ref p)) => p.walk_(it),
+            PatKind::Struct(_, ref fields, _) => {
+                fields.iter().all(|field| field.node.pat.walk_(it))
+            }
+            PatKind::TupleStruct(_, Some(ref s)) | PatKind::Tup(ref s) => {
+                s.iter().all(|p| p.walk_(it))
+            }
+            PatKind::Box(ref s) | PatKind::Ref(ref s, _) => {
+                s.walk_(it)
+            }
+            PatKind::Vec(ref before, ref slice, ref after) => {
+                before.iter().all(|p| p.walk_(it)) &&
+                slice.iter().all(|p| p.walk_(it)) &&
+                after.iter().all(|p| p.walk_(it))
+            }
+            PatKind::Wild |
+            PatKind::Lit(_) |
+            PatKind::Range(_, _) |
+            PatKind::Ident(_, _, _) |
+            PatKind::TupleStruct(..) |
+            PatKind::Path(..) |
+            PatKind::QPath(_, _) => {
+                true
+            }
+        }
+    }
+
+    pub fn walk<F>(&self, mut it: F) -> bool
+        where F: FnMut(&Pat) -> bool
+    {
+        self.walk_(&mut it)
+    }
+}
+
+/// A single field in a struct pattern
+///
+/// Patterns like the fields of Foo `{ x, ref y, ref mut z }`
+/// are treated the same as` x: x, y: ref y, z: ref mut z`,
+/// except is_shorthand is true
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct FieldPat {
+    /// The identifier for the field
+    pub name: Name,
+    /// The pattern the field is destructured to
+    pub pat: P<Pat>,
+    pub is_shorthand: bool,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum BindingMode {
+    BindByRef(Mutability),
+    BindByValue(Mutability),
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum PatKind {
+    /// Represents a wildcard pattern (`_`)
+    Wild,
+
+    /// A `PatKind::Ident` may either be a new bound variable,
+    /// or a unit struct/variant pattern, or a const pattern (in the last two cases
+    /// the third field must be `None`).
+    ///
+    /// In the unit or const pattern case, the parser can't determine
+    /// which it is. The resolver determines this, and
+    /// records this pattern's `NodeId` in an auxiliary
+    /// set (of "PatIdents that refer to unit patterns or constants").
+    Ident(BindingMode, Spanned<Ident>, Option<P<Pat>>),
+
+    /// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`.
+    /// The `bool` is `true` in the presence of a `..`.
+    Struct(Path, HirVec<Spanned<FieldPat>>, bool),
+
+    /// A tuple struct/variant pattern `Variant(x, y, z)`.
+    /// "None" means a `Variant(..)` pattern where we don't bind the fields to names.
+    TupleStruct(Path, Option<HirVec<P<Pat>>>),
+
+    /// A path pattern.
+    /// Such pattern can be resolved to a unit struct/variant or a constant.
+    Path(Path),
+
+    /// An associated const named using the qualified path `<T>::CONST` or
+    /// `<T as Trait>::CONST`. Associated consts from inherent impls can be
+    /// referred to as simply `T::CONST`, in which case they will end up as
+    /// PatKind::Path, and the resolver will have to sort that out.
+    QPath(QSelf, Path),
+
+    /// A tuple pattern `(a, b)`
+    Tup(HirVec<P<Pat>>),
+    /// A `box` pattern
+    Box(P<Pat>),
+    /// A reference pattern, e.g. `&mut (a, b)`
+    Ref(P<Pat>, Mutability),
+    /// A literal
+    Lit(P<Expr>),
+    /// A range pattern, e.g. `1...2`
+    Range(P<Expr>, P<Expr>),
+    /// `[a, b, ..i, y, z]` is represented as:
+    ///     `PatKind::Vec(box [a, b], Some(i), box [y, z])`
+    Vec(HirVec<P<Pat>>, Option<P<Pat>>, HirVec<P<Pat>>),
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum Mutability {
+    MutMutable,
+    MutImmutable,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum BinOp_ {
+    /// The `+` operator (addition)
+    BiAdd,
+    /// The `-` operator (subtraction)
+    BiSub,
+    /// The `*` operator (multiplication)
+    BiMul,
+    /// The `/` operator (division)
+    BiDiv,
+    /// The `%` operator (modulus)
+    BiRem,
+    /// The `&&` operator (logical and)
+    BiAnd,
+    /// The `||` operator (logical or)
+    BiOr,
+    /// The `^` operator (bitwise xor)
+    BiBitXor,
+    /// The `&` operator (bitwise and)
+    BiBitAnd,
+    /// The `|` operator (bitwise or)
+    BiBitOr,
+    /// The `<<` operator (shift left)
+    BiShl,
+    /// The `>>` operator (shift right)
+    BiShr,
+    /// The `==` operator (equality)
+    BiEq,
+    /// The `<` operator (less than)
+    BiLt,
+    /// The `<=` operator (less than or equal to)
+    BiLe,
+    /// The `!=` operator (not equal to)
+    BiNe,
+    /// The `>=` operator (greater than or equal to)
+    BiGe,
+    /// The `>` operator (greater than)
+    BiGt,
+}
+
+impl BinOp_ {
+    pub fn as_str(self) -> &'static str {
+        match self {
+            BiAdd => "+",
+            BiSub => "-",
+            BiMul => "*",
+            BiDiv => "/",
+            BiRem => "%",
+            BiAnd => "&&",
+            BiOr => "||",
+            BiBitXor => "^",
+            BiBitAnd => "&",
+            BiBitOr => "|",
+            BiShl => "<<",
+            BiShr => ">>",
+            BiEq => "==",
+            BiLt => "<",
+            BiLe => "<=",
+            BiNe => "!=",
+            BiGe => ">=",
+            BiGt => ">",
+        }
+    }
+
+    pub fn is_lazy(self) -> bool {
+        match self {
+            BiAnd | BiOr => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_shift(self) -> bool {
+        match self {
+            BiShl | BiShr => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_comparison(self) -> bool {
+        match self {
+            BiEq | BiLt | BiLe | BiNe | BiGt | BiGe => true,
+            BiAnd |
+            BiOr |
+            BiAdd |
+            BiSub |
+            BiMul |
+            BiDiv |
+            BiRem |
+            BiBitXor |
+            BiBitAnd |
+            BiBitOr |
+            BiShl |
+            BiShr => false,
+        }
+    }
+
+    /// Returns `true` if the binary operator takes its arguments by value
+    pub fn is_by_value(self) -> bool {
+        !self.is_comparison()
+    }
+}
+
+pub type BinOp = Spanned<BinOp_>;
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum UnOp {
+    /// The `*` operator for dereferencing
+    UnDeref,
+    /// The `!` operator for logical inversion
+    UnNot,
+    /// The `-` operator for negation
+    UnNeg,
+}
+
+impl UnOp {
+    pub fn as_str(self) -> &'static str {
+        match self {
+            UnDeref => "*",
+            UnNot => "!",
+            UnNeg => "-",
+        }
+    }
+
+    /// Returns `true` if the unary operator takes its argument by value
+    pub fn is_by_value(self) -> bool {
+        match self {
+            UnNeg | UnNot => true,
+            _ => false,
+        }
+    }
+}
+
+/// A statement
+pub type Stmt = Spanned<Stmt_>;
+
+impl fmt::Debug for Stmt_ {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        // Sadness.
+        let spanned = codemap::dummy_spanned(self.clone());
+        write!(f,
+               "stmt({}: {})",
+               spanned.node.id(),
+               print::stmt_to_string(&spanned))
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
+pub enum Stmt_ {
+    /// Could be an item or a local (let) binding:
+    StmtDecl(P<Decl>, NodeId),
+
+    /// Expr without trailing semi-colon (must have unit type):
+    StmtExpr(P<Expr>, NodeId),
+
+    /// Expr with trailing semi-colon (may have any type):
+    StmtSemi(P<Expr>, NodeId),
+}
+
+impl Stmt_ {
+    pub fn attrs(&self) -> &[Attribute] {
+        match *self {
+            StmtDecl(ref d, _) => d.node.attrs(),
+            StmtExpr(ref e, _) |
+            StmtSemi(ref e, _) => e.attrs.as_attr_slice(),
+        }
+    }
+
+    pub fn id(&self) -> NodeId {
+        match *self {
+            StmtDecl(_, id) => id,
+            StmtExpr(_, id) => id,
+            StmtSemi(_, id) => id,
+        }
+    }
+}
+
+// FIXME (pending discussion of #1697, #2178...): local should really be
+// a refinement on pat.
+/// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;`
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct Local {
+    pub pat: P<Pat>,
+    pub ty: Option<P<Ty>>,
+    /// Initializer expression to set the value, if any
+    pub init: Option<P<Expr>>,
+    pub id: NodeId,
+    pub span: Span,
+    pub attrs: ThinAttributes,
+}
+
+pub type Decl = Spanned<Decl_>;
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum Decl_ {
+    /// A local (let) binding:
+    DeclLocal(P<Local>),
+    /// An item binding:
+    DeclItem(ItemId),
+}
+
+impl Decl_ {
+    pub fn attrs(&self) -> &[Attribute] {
+        match *self {
+            DeclLocal(ref l) => l.attrs.as_attr_slice(),
+            DeclItem(_) => &[]
+        }
+    }
+}
+
+/// represents one arm of a 'match'
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct Arm {
+    pub attrs: HirVec<Attribute>,
+    pub pats: HirVec<P<Pat>>,
+    pub guard: Option<P<Expr>>,
+    pub body: P<Expr>,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct Field {
+    pub name: Spanned<Name>,
+    pub expr: P<Expr>,
+    pub span: Span,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum BlockCheckMode {
+    DefaultBlock,
+    UnsafeBlock(UnsafeSource),
+    PushUnsafeBlock(UnsafeSource),
+    PopUnsafeBlock(UnsafeSource),
+    // Within this block (but outside a PopUnstableBlock), we suspend checking of stability.
+    PushUnstableBlock,
+    PopUnstableBlock,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum UnsafeSource {
+    CompilerGenerated,
+    UserProvided,
+}
+
+/// An expression
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
+pub struct Expr {
+    pub id: NodeId,
+    pub node: Expr_,
+    pub span: Span,
+    pub attrs: ThinAttributes,
+}
+
+impl fmt::Debug for Expr {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "expr({}: {})", self.id, print::expr_to_string(self))
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum Expr_ {
+    /// A `box x` expression.
+    ExprBox(P<Expr>),
+    /// An array (`[a, b, c, d]`)
+    ExprVec(HirVec<P<Expr>>),
+    /// A function call
+    ///
+    /// The first field resolves to the function itself,
+    /// and the second field is the list of arguments
+    ExprCall(P<Expr>, HirVec<P<Expr>>),
+    /// A method call (`x.foo::<Bar, Baz>(a, b, c, d)`)
+    ///
+    /// The `Spanned<Name>` is the identifier for the method name.
+    /// The vector of `Ty`s are the ascripted type parameters for the method
+    /// (within the angle brackets).
+    ///
+    /// The first element of the vector of `Expr`s is the expression that evaluates
+    /// to the object on which the method is being called on (the receiver),
+    /// and the remaining elements are the rest of the arguments.
+    ///
+    /// Thus, `x.foo::<Bar, Baz>(a, b, c, d)` is represented as
+    /// `ExprMethodCall(foo, [Bar, Baz], [x, a, b, c, d])`.
+    ExprMethodCall(Spanned<Name>, HirVec<P<Ty>>, HirVec<P<Expr>>),
+    /// A tuple (`(a, b, c ,d)`)
+    ExprTup(HirVec<P<Expr>>),
+    /// A binary operation (For example: `a + b`, `a * b`)
+    ExprBinary(BinOp, P<Expr>, P<Expr>),
+    /// A unary operation (For example: `!x`, `*x`)
+    ExprUnary(UnOp, P<Expr>),
+    /// A literal (For example: `1`, `"foo"`)
+    ExprLit(P<Lit>),
+    /// A cast (`foo as f64`)
+    ExprCast(P<Expr>, P<Ty>),
+    ExprType(P<Expr>, P<Ty>),
+    /// An `if` block, with an optional else block
+    ///
+    /// `if expr { block } else { expr }`
+    ExprIf(P<Expr>, P<Block>, Option<P<Expr>>),
+    /// A while loop, with an optional label
+    ///
+    /// `'label: while expr { block }`
+    ExprWhile(P<Expr>, P<Block>, Option<Ident>),
+    /// Conditionless loop (can be exited with break, continue, or return)
+    ///
+    /// `'label: loop { block }`
+    ExprLoop(P<Block>, Option<Ident>),
+    /// A `match` block, with a source that indicates whether or not it is
+    /// the result of a desugaring, and if so, which kind.
+    ExprMatch(P<Expr>, HirVec<Arm>, MatchSource),
+    /// A closure (for example, `move |a, b, c| {a + b + c}`)
+    ExprClosure(CaptureClause, P<FnDecl>, P<Block>),
+    /// A block (`{ ... }`)
+    ExprBlock(P<Block>),
+
+    /// An assignment (`a = foo()`)
+    ExprAssign(P<Expr>, P<Expr>),
+    /// An assignment with an operator
+    ///
+    /// For example, `a += 1`.
+    ExprAssignOp(BinOp, P<Expr>, P<Expr>),
+    /// Access of a named struct field (`obj.foo`)
+    ExprField(P<Expr>, Spanned<Name>),
+    /// Access of an unnamed field of a struct or tuple-struct
+    ///
+    /// For example, `foo.0`.
+    ExprTupField(P<Expr>, Spanned<usize>),
+    /// An indexing operation (`foo[2]`)
+    ExprIndex(P<Expr>, P<Expr>),
+
+    /// Variable reference, possibly containing `::` and/or type
+    /// parameters, e.g. foo::bar::<baz>.
+    ///
+    /// Optionally "qualified",
+    /// e.g. `<HirVec<T> as SomeTrait>::SomeType`.
+    ExprPath(Option<QSelf>, Path),
+
+    /// A referencing operation (`&a` or `&mut a`)
+    ExprAddrOf(Mutability, P<Expr>),
+    /// A `break`, with an optional label to break
+    ExprBreak(Option<Spanned<Ident>>),
+    /// A `continue`, with an optional label
+    ExprAgain(Option<Spanned<Ident>>),
+    /// A `return`, with an optional value to be returned
+    ExprRet(Option<P<Expr>>),
+
+    /// Inline assembly (from `asm!`), with its outputs and inputs.
+    ExprInlineAsm(InlineAsm, Vec<P<Expr>>, Vec<P<Expr>>),
+
+    /// A struct literal expression.
+    ///
+    /// For example, `Foo {x: 1, y: 2}`, or
+    /// `Foo {x: 1, .. base}`, where `base` is the `Option<Expr>`.
+    ExprStruct(Path, HirVec<Field>, Option<P<Expr>>),
+
+    /// A vector literal constructed from one repeated element.
+    ///
+    /// For example, `[1; 5]`. The first expression is the element
+    /// to be repeated; the second is the number of times to repeat it.
+    ExprRepeat(P<Expr>, P<Expr>),
+}
+
+/// The explicit Self type in a "qualified path". The actual
+/// path, including the trait and the associated item, is stored
+/// separately. `position` represents the index of the associated
+/// item qualified with this Self type.
+///
+///     <HirVec<T> as a::b::Trait>::AssociatedItem
+///      ^~~~~     ~~~~~~~~~~~~~~^
+///      ty        position = 3
+///
+///     <HirVec<T>>::AssociatedItem
+///      ^~~~~    ^
+///      ty       position = 0
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct QSelf {
+    pub ty: P<Ty>,
+    pub position: usize,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum MatchSource {
+    Normal,
+    IfLetDesugar {
+        contains_else_clause: bool,
+    },
+    WhileLetDesugar,
+    ForLoopDesugar,
+    TryDesugar,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum CaptureClause {
+    CaptureByValue,
+    CaptureByRef,
+}
+
+// NB: If you change this, you'll probably want to change the corresponding
+// type structure in middle/ty.rs as well.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct MutTy {
+    pub ty: P<Ty>,
+    pub mutbl: Mutability,
+}
+
+/// Represents a method's signature in a trait declaration,
+/// or in an implementation.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct MethodSig {
+    pub unsafety: Unsafety,
+    pub constness: Constness,
+    pub abi: Abi,
+    pub decl: P<FnDecl>,
+    pub generics: Generics,
+    pub explicit_self: ExplicitSelf,
+}
+
+/// Represents an item declaration within a trait declaration,
+/// possibly including a default implementation. A trait item is
+/// either required (meaning it doesn't have an implementation, just a
+/// signature) or provided (meaning it has a default implementation).
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct TraitItem {
+    pub id: NodeId,
+    pub name: Name,
+    pub attrs: HirVec<Attribute>,
+    pub node: TraitItem_,
+    pub span: Span,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum TraitItem_ {
+    ConstTraitItem(P<Ty>, Option<P<Expr>>),
+    MethodTraitItem(MethodSig, Option<P<Block>>),
+    TypeTraitItem(TyParamBounds, Option<P<Ty>>),
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct ImplItem {
+    pub id: NodeId,
+    pub name: Name,
+    pub vis: Visibility,
+    pub defaultness: Defaultness,
+    pub attrs: HirVec<Attribute>,
+    pub node: ImplItemKind,
+    pub span: Span,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum ImplItemKind {
+    Const(P<Ty>, P<Expr>),
+    Method(MethodSig, P<Block>),
+    Type(P<Ty>),
+}
+
+// Bind a type to an associated type: `A=Foo`.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct TypeBinding {
+    pub id: NodeId,
+    pub name: Name,
+    pub ty: P<Ty>,
+    pub span: Span,
+}
+
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
+pub struct Ty {
+    pub id: NodeId,
+    pub node: Ty_,
+    pub span: Span,
+}
+
+impl fmt::Debug for Ty {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "type({})", print::ty_to_string(self))
+    }
+}
+
+/// Not represented directly in the AST, referred to by name through a ty_path.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum PrimTy {
+    TyInt(IntTy),
+    TyUint(UintTy),
+    TyFloat(FloatTy),
+    TyStr,
+    TyBool,
+    TyChar,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct BareFnTy {
+    pub unsafety: Unsafety,
+    pub abi: Abi,
+    pub lifetimes: HirVec<LifetimeDef>,
+    pub decl: P<FnDecl>,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+/// The different kinds of types recognized by the compiler
+pub enum Ty_ {
+    TyVec(P<Ty>),
+    /// A fixed length array (`[T; n]`)
+    TyFixedLengthVec(P<Ty>, P<Expr>),
+    /// A raw pointer (`*const T` or `*mut T`)
+    TyPtr(MutTy),
+    /// A reference (`&'a T` or `&'a mut T`)
+    TyRptr(Option<Lifetime>, MutTy),
+    /// A bare function (e.g. `fn(usize) -> bool`)
+    TyBareFn(P<BareFnTy>),
+    /// A tuple (`(A, B, C, D,...)`)
+    TyTup(HirVec<P<Ty>>),
+    /// A path (`module::module::...::Type`), optionally
+    /// "qualified", e.g. `<HirVec<T> as SomeTrait>::SomeType`.
+    ///
+    /// Type parameters are stored in the Path itself
+    TyPath(Option<QSelf>, Path),
+    /// Something like `A+B`. Note that `B` must always be a path.
+    TyObjectSum(P<Ty>, TyParamBounds),
+    /// A type like `for<'a> Foo<&'a Bar>`
+    TyPolyTraitRef(TyParamBounds),
+    /// Unused for now
+    TyTypeof(P<Expr>),
+    /// TyInfer means the type should be inferred instead of it having been
+    /// specified. This can appear anywhere in a type.
+    TyInfer,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct InlineAsmOutput {
+    pub constraint: InternedString,
+    pub is_rw: bool,
+    pub is_indirect: bool,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct InlineAsm {
+    pub asm: InternedString,
+    pub asm_str_style: StrStyle,
+    pub outputs: HirVec<InlineAsmOutput>,
+    pub inputs: HirVec<InternedString>,
+    pub clobbers: HirVec<InternedString>,
+    pub volatile: bool,
+    pub alignstack: bool,
+    pub dialect: AsmDialect,
+    pub expn_id: ExpnId,
+}
+
+/// represents an argument in a function header
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct Arg {
+    pub ty: P<Ty>,
+    pub pat: P<Pat>,
+    pub id: NodeId,
+}
+
+impl Arg {
+    pub fn new_self(span: Span, mutability: Mutability, self_ident: Ident) -> Arg {
+        let path = Spanned {
+            span: span,
+            node: self_ident,
+        };
+        Arg {
+            // HACK(eddyb) fake type for the self argument.
+            ty: P(Ty {
+                id: DUMMY_NODE_ID,
+                node: TyInfer,
+                span: DUMMY_SP,
+            }),
+            pat: P(Pat {
+                id: DUMMY_NODE_ID,
+                node: PatKind::Ident(BindByValue(mutability), path, None),
+                span: span,
+            }),
+            id: DUMMY_NODE_ID,
+        }
+    }
+}
+
+/// Represents the header (not the body) of a function declaration
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct FnDecl {
+    pub inputs: HirVec<Arg>,
+    pub output: FunctionRetTy,
+    pub variadic: bool,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum Unsafety {
+    Unsafe,
+    Normal,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum Constness {
+    Const,
+    NotConst,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum Defaultness {
+    Default,
+    Final,
+}
+
+impl Defaultness {
+    pub fn is_final(&self) -> bool {
+        *self == Defaultness::Final
+    }
+
+    pub fn is_default(&self) -> bool {
+        *self == Defaultness::Default
+    }
+}
+
+impl fmt::Display for Unsafety {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(match *self {
+                              Unsafety::Normal => "normal",
+                              Unsafety::Unsafe => "unsafe",
+                          },
+                          f)
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
+pub enum ImplPolarity {
+    /// `impl Trait for Type`
+    Positive,
+    /// `impl !Trait for Type`
+    Negative,
+}
+
+impl fmt::Debug for ImplPolarity {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            ImplPolarity::Positive => "positive".fmt(f),
+            ImplPolarity::Negative => "negative".fmt(f),
+        }
+    }
+}
+
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum FunctionRetTy {
+    /// Functions with return type `!`that always
+    /// raise an error or exit (i.e. never return to the caller)
+    NoReturn(Span),
+    /// Return type is not specified.
+    ///
+    /// Functions default to `()` and
+    /// closures default to inference. Span points to where return
+    /// type would be inserted.
+    DefaultReturn(Span),
+    /// Everything else
+    Return(P<Ty>),
+}
+
+impl FunctionRetTy {
+    pub fn span(&self) -> Span {
+        match *self {
+            NoReturn(span) => span,
+            DefaultReturn(span) => span,
+            Return(ref ty) => ty.span,
+        }
+    }
+}
+
+/// Represents the kind of 'self' associated with a method
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum ExplicitSelf_ {
+    /// No self
+    SelfStatic,
+    /// `self`
+    SelfValue(Name),
+    /// `&'lt self`, `&'lt mut self`
+    SelfRegion(Option<Lifetime>, Mutability, Name),
+    /// `self: TYPE`
+    SelfExplicit(P<Ty>, Name),
+}
+
+pub type ExplicitSelf = Spanned<ExplicitSelf_>;
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct Mod {
+    /// A span from the first token past `{` to the last token until `}`.
+    /// For `mod foo;`, the inner span ranges from the first token
+    /// to the last token in the external file.
+    pub inner: Span,
+    pub item_ids: HirVec<ItemId>,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct ForeignMod {
+    pub abi: Abi,
+    pub items: HirVec<ForeignItem>,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct EnumDef {
+    pub variants: HirVec<Variant>,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct Variant_ {
+    pub name: Name,
+    pub attrs: HirVec<Attribute>,
+    pub data: VariantData,
+    /// Explicit discriminant, eg `Foo = 1`
+    pub disr_expr: Option<P<Expr>>,
+}
+
+pub type Variant = Spanned<Variant_>;
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub enum PathListItem_ {
+    PathListIdent {
+        name: Name,
+        /// renamed in list, eg `use foo::{bar as baz};`
+        rename: Option<Name>,
+        id: NodeId,
+    },
+    PathListMod {
+        /// renamed in list, eg `use foo::{self as baz};`
+        rename: Option<Name>,
+        id: NodeId,
+    },
+}
+
+impl PathListItem_ {
+    pub fn id(&self) -> NodeId {
+        match *self {
+            PathListIdent { id, .. } | PathListMod { id, .. } => id,
+        }
+    }
+
+    pub fn name(&self) -> Option<Name> {
+        match *self {
+            PathListIdent { name, .. } => Some(name),
+            PathListMod { .. } => None,
+        }
+    }
+
+    pub fn rename(&self) -> Option<Name> {
+        match *self {
+            PathListIdent { rename, .. } | PathListMod { rename, .. } => rename,
+        }
+    }
+}
+
+pub type PathListItem = Spanned<PathListItem_>;
+
+pub type ViewPath = Spanned<ViewPath_>;
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum ViewPath_ {
+    /// `foo::bar::baz as quux`
+    ///
+    /// or just
+    ///
+    /// `foo::bar::baz` (with `as baz` implicitly on the right)
+    ViewPathSimple(Name, Path),
+
+    /// `foo::bar::*`
+    ViewPathGlob(Path),
+
+    /// `foo::bar::{a,b,c}`
+    ViewPathList(Path, HirVec<PathListItem>),
+}
+
+/// TraitRef's appear in impls.
+///
+/// resolve maps each TraitRef's ref_id to its defining trait; that's all
+/// that the ref_id is for. The impl_id maps to the "self type" of this impl.
+/// If this impl is an ItemImpl, the impl_id is redundant (it could be the
+/// same as the impl's node id).
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct TraitRef {
+    pub path: Path,
+    pub ref_id: NodeId,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct PolyTraitRef {
+    /// The `'a` in `<'a> Foo<&'a T>`
+    pub bound_lifetimes: HirVec<LifetimeDef>,
+
+    /// The `Foo<&'a T>` in `<'a> Foo<&'a T>`
+    pub trait_ref: TraitRef,
+
+    pub span: Span,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum Visibility {
+    Public,
+    Inherited,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct StructField {
+    pub span: Span,
+    pub name: Name,
+    pub vis: Visibility,
+    pub id: NodeId,
+    pub ty: P<Ty>,
+    pub attrs: HirVec<Attribute>,
+}
+
+impl StructField {
+    // Still necessary in couple of places
+    pub fn is_positional(&self) -> bool {
+        let first = self.name.as_str().as_bytes()[0];
+        first >= b'0' && first <= b'9'
+    }
+}
+
+/// Fields and Ids of enum variants and structs
+///
+/// For enum variants: `NodeId` represents both an Id of the variant itself (relevant for all
+/// variant kinds) and an Id of the variant's constructor (not relevant for `Struct`-variants).
+/// One shared Id can be successfully used for these two purposes.
+/// Id of the whole enum lives in `Item`.
+///
+/// For structs: `NodeId` represents an Id of the structure's constructor, so it is not actually
+/// used for `Struct`-structs (but still presents). Structures don't have an analogue of "Id of
+/// the variant itself" from enum variants.
+/// Id of the whole struct lives in `Item`.
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum VariantData {
+    Struct(HirVec<StructField>, NodeId),
+    Tuple(HirVec<StructField>, NodeId),
+    Unit(NodeId),
+}
+
+impl VariantData {
+    pub fn fields(&self) -> &[StructField] {
+        match *self {
+            VariantData::Struct(ref fields, _) | VariantData::Tuple(ref fields, _) => fields,
+            _ => &[],
+        }
+    }
+    pub fn id(&self) -> NodeId {
+        match *self {
+            VariantData::Struct(_, id) | VariantData::Tuple(_, id) | VariantData::Unit(id) => id,
+        }
+    }
+    pub fn is_struct(&self) -> bool {
+        if let VariantData::Struct(..) = *self {
+            true
+        } else {
+            false
+        }
+    }
+    pub fn is_tuple(&self) -> bool {
+        if let VariantData::Tuple(..) = *self {
+            true
+        } else {
+            false
+        }
+    }
+    pub fn is_unit(&self) -> bool {
+        if let VariantData::Unit(..) = *self {
+            true
+        } else {
+            false
+        }
+    }
+}
+
+// The bodies for items are stored "out of line", in a separate
+// hashmap in the `Crate`. Here we just record the node-id of the item
+// so it can fetched later.
+#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct ItemId {
+    pub id: NodeId,
+}
+
+//  FIXME (#3300): Should allow items to be anonymous. Right now
+//  we just use dummy names for anon items.
+/// An item
+///
+/// The name might be a dummy name in case of anonymous items
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct Item {
+    pub name: Name,
+    pub attrs: HirVec<Attribute>,
+    pub id: NodeId,
+    pub node: Item_,
+    pub vis: Visibility,
+    pub span: Span,
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum Item_ {
+    /// An`extern crate` item, with optional original crate name,
+    ///
+    /// e.g. `extern crate foo` or `extern crate foo_bar as foo`
+    ItemExternCrate(Option<Name>),
+    /// A `use` or `pub use` item
+    ItemUse(P<ViewPath>),
+
+    /// A `static` item
+    ItemStatic(P<Ty>, Mutability, P<Expr>),
+    /// A `const` item
+    ItemConst(P<Ty>, P<Expr>),
+    /// A function declaration
+    ItemFn(P<FnDecl>, Unsafety, Constness, Abi, Generics, P<Block>),
+    /// A module
+    ItemMod(Mod),
+    /// An external module
+    ItemForeignMod(ForeignMod),
+    /// A type alias, e.g. `type Foo = Bar<u8>`
+    ItemTy(P<Ty>, Generics),
+    /// An enum definition, e.g. `enum Foo<A, B> {C<A>, D<B>}`
+    ItemEnum(EnumDef, Generics),
+    /// A struct definition, e.g. `struct Foo<A> {x: A}`
+    ItemStruct(VariantData, Generics),
+    /// Represents a Trait Declaration
+    ItemTrait(Unsafety, Generics, TyParamBounds, HirVec<TraitItem>),
+
+    // Default trait implementations
+    ///
+    /// `impl Trait for .. {}`
+    ItemDefaultImpl(Unsafety, TraitRef),
+    /// An implementation, eg `impl<A> Trait for Foo { .. }`
+    ItemImpl(Unsafety,
+             ImplPolarity,
+             Generics,
+             Option<TraitRef>, // (optional) trait this impl implements
+             P<Ty>, // self
+             HirVec<ImplItem>),
+}
+
+impl Item_ {
+    pub fn descriptive_variant(&self) -> &str {
+        match *self {
+            ItemExternCrate(..) => "extern crate",
+            ItemUse(..) => "use",
+            ItemStatic(..) => "static item",
+            ItemConst(..) => "constant item",
+            ItemFn(..) => "function",
+            ItemMod(..) => "module",
+            ItemForeignMod(..) => "foreign module",
+            ItemTy(..) => "type alias",
+            ItemEnum(..) => "enum",
+            ItemStruct(..) => "struct",
+            ItemTrait(..) => "trait",
+            ItemImpl(..) |
+            ItemDefaultImpl(..) => "item",
+        }
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct ForeignItem {
+    pub name: Name,
+    pub attrs: HirVec<Attribute>,
+    pub node: ForeignItem_,
+    pub id: NodeId,
+    pub span: Span,
+    pub vis: Visibility,
+}
+
+/// An item within an `extern` block
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum ForeignItem_ {
+    /// A foreign function
+    ForeignItemFn(P<FnDecl>, Generics),
+    /// A foreign static item (`static ext: u8`), with optional mutability
+    /// (the boolean is true when mutable)
+    ForeignItemStatic(P<Ty>, bool),
+}
+
+impl ForeignItem_ {
+    pub fn descriptive_variant(&self) -> &str {
+        match *self {
+            ForeignItemFn(..) => "foreign function",
+            ForeignItemStatic(..) => "foreign static item",
+        }
+    }
+}
+
+/// A free variable referred to in a function.
+#[derive(Copy, Clone, RustcEncodable, RustcDecodable)]
+pub struct Freevar {
+    /// The variable being accessed free.
+    pub def: Def,
+
+    // First span where it is accessed (there can be multiple).
+    pub span: Span
+}
+
+pub type FreevarMap = NodeMap<Vec<Freevar>>;
+
+pub type CaptureModeMap = NodeMap<CaptureClause>;
+
+// Trait method resolution
+pub type TraitMap = NodeMap<Vec<DefId>>;
+
+// Map from the NodeId of a glob import to a list of items which are actually
+// imported.
+pub type GlobMap = NodeMap<FnvHashSet<Name>>;
diff --git a/src/librustc/hir/pat_util.rs b/src/librustc/hir/pat_util.rs
new file mode 100644 (file)
index 0000000..6cc5a29
--- /dev/null
@@ -0,0 +1,240 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir::def::*;
+use hir::def_id::DefId;
+use ty::TyCtxt;
+use util::nodemap::FnvHashMap;
+
+use syntax::ast;
+use hir::{self, PatKind};
+use syntax::codemap::{respan, Span, Spanned, DUMMY_SP};
+
+use std::cell::RefCell;
+
+pub type PatIdMap = FnvHashMap<ast::Name, ast::NodeId>;
+
+// This is used because same-named variables in alternative patterns need to
+// use the NodeId of their namesake in the first pattern.
+pub fn pat_id_map(dm: &RefCell<DefMap>, pat: &hir::Pat) -> PatIdMap {
+    let mut map = FnvHashMap();
+    pat_bindings(dm, pat, |_bm, p_id, _s, path1| {
+        map.insert(path1.node, p_id);
+    });
+    map
+}
+
+pub fn pat_is_refutable(dm: &DefMap, pat: &hir::Pat) -> bool {
+    match pat.node {
+        PatKind::Lit(_) | PatKind::Range(_, _) | PatKind::QPath(..) => true,
+        PatKind::TupleStruct(..) |
+        PatKind::Path(..) |
+        PatKind::Ident(_, _, None) |
+        PatKind::Struct(..) => {
+            match dm.get(&pat.id).map(|d| d.full_def()) {
+                Some(Def::Variant(..)) => true,
+                _ => false
+            }
+        }
+        PatKind::Vec(_, _, _) => true,
+        _ => false
+    }
+}
+
+pub fn pat_is_variant_or_struct(dm: &DefMap, pat: &hir::Pat) -> bool {
+    match pat.node {
+        PatKind::TupleStruct(..) |
+        PatKind::Path(..) |
+        PatKind::Ident(_, _, None) |
+        PatKind::Struct(..) => {
+            match dm.get(&pat.id).map(|d| d.full_def()) {
+                Some(Def::Variant(..)) | Some(Def::Struct(..)) | Some(Def::TyAlias(..)) => true,
+                _ => false
+            }
+        }
+        _ => false
+    }
+}
+
+pub fn pat_is_const(dm: &DefMap, pat: &hir::Pat) -> bool {
+    match pat.node {
+        PatKind::Ident(_, _, None) | PatKind::Path(..) | PatKind::QPath(..) => {
+            match dm.get(&pat.id).map(|d| d.full_def()) {
+                Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => true,
+                _ => false
+            }
+        }
+        _ => false
+    }
+}
+
+// Same as above, except that partially-resolved defs cause `false` to be
+// returned instead of a panic.
+pub fn pat_is_resolved_const(dm: &DefMap, pat: &hir::Pat) -> bool {
+    match pat.node {
+        PatKind::Ident(_, _, None) | PatKind::Path(..) | PatKind::QPath(..) => {
+            match dm.get(&pat.id)
+                    .and_then(|d| if d.depth == 0 { Some(d.base_def) }
+                                  else { None } ) {
+                Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => true,
+                _ => false
+            }
+        }
+        _ => false
+    }
+}
+
+pub fn pat_is_binding(dm: &DefMap, pat: &hir::Pat) -> bool {
+    match pat.node {
+        PatKind::Ident(..) => {
+            !pat_is_variant_or_struct(dm, pat) &&
+            !pat_is_const(dm, pat)
+        }
+        _ => false
+    }
+}
+
+pub fn pat_is_binding_or_wild(dm: &DefMap, pat: &hir::Pat) -> bool {
+    match pat.node {
+        PatKind::Ident(..) => pat_is_binding(dm, pat),
+        PatKind::Wild => true,
+        _ => false
+    }
+}
+
+/// Call `it` on every "binding" in a pattern, e.g., on `a` in
+/// `match foo() { Some(a) => (), None => () }`
+pub fn pat_bindings<I>(dm: &RefCell<DefMap>, pat: &hir::Pat, mut it: I) where
+    I: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned<ast::Name>),
+{
+    pat.walk(|p| {
+        match p.node {
+          PatKind::Ident(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => {
+            it(binding_mode, p.id, p.span, &respan(pth.span, pth.node.name));
+          }
+          _ => {}
+        }
+        true
+    });
+}
+pub fn pat_bindings_ident<I>(dm: &RefCell<DefMap>, pat: &hir::Pat, mut it: I) where
+    I: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned<hir::Ident>),
+{
+    pat.walk(|p| {
+        match p.node {
+          PatKind::Ident(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => {
+            it(binding_mode, p.id, p.span, &respan(pth.span, pth.node));
+          }
+          _ => {}
+        }
+        true
+    });
+}
+
+/// Checks if the pattern contains any patterns that bind something to
+/// an ident, e.g. `foo`, or `Foo(foo)` or `foo @ Bar(..)`.
+pub fn pat_contains_bindings(dm: &DefMap, pat: &hir::Pat) -> bool {
+    let mut contains_bindings = false;
+    pat.walk(|p| {
+        if pat_is_binding(dm, p) {
+            contains_bindings = true;
+            false // there's at least one binding, can short circuit now.
+        } else {
+            true
+        }
+    });
+    contains_bindings
+}
+
+/// Checks if the pattern contains any `ref` or `ref mut` bindings,
+/// and if yes whether its containing mutable ones or just immutables ones.
+pub fn pat_contains_ref_binding(dm: &RefCell<DefMap>, pat: &hir::Pat) -> Option<hir::Mutability> {
+    let mut result = None;
+    pat_bindings(dm, pat, |mode, _, _, _| {
+        match mode {
+            hir::BindingMode::BindByRef(m) => {
+                // Pick Mutable as maximum
+                match result {
+                    None | Some(hir::MutImmutable) => result = Some(m),
+                    _ => (),
+                }
+            }
+            hir::BindingMode::BindByValue(_) => { }
+        }
+    });
+    result
+}
+
+/// Checks if the patterns for this arm contain any `ref` or `ref mut`
+/// bindings, and if yes whether its containing mutable ones or just immutables ones.
+pub fn arm_contains_ref_binding(dm: &RefCell<DefMap>, arm: &hir::Arm) -> Option<hir::Mutability> {
+    arm.pats.iter()
+            .filter_map(|pat| pat_contains_ref_binding(dm, pat))
+            .max_by_key(|m| match *m {
+                hir::MutMutable => 1,
+                hir::MutImmutable => 0,
+            })
+}
+
+/// Checks if the pattern contains any patterns that bind something to
+/// an ident or wildcard, e.g. `foo`, or `Foo(_)`, `foo @ Bar(..)`,
+pub fn pat_contains_bindings_or_wild(dm: &DefMap, pat: &hir::Pat) -> bool {
+    let mut contains_bindings = false;
+    pat.walk(|p| {
+        if pat_is_binding_or_wild(dm, p) {
+            contains_bindings = true;
+            false // there's at least one binding/wildcard, can short circuit now.
+        } else {
+            true
+        }
+    });
+    contains_bindings
+}
+
+pub fn simple_name<'a>(pat: &'a hir::Pat) -> Option<ast::Name> {
+    match pat.node {
+        PatKind::Ident(hir::BindByValue(_), ref path1, None) => {
+            Some(path1.node.name)
+        }
+        _ => {
+            None
+        }
+    }
+}
+
+pub fn def_to_path(tcx: &TyCtxt, id: DefId) -> hir::Path {
+    let name = tcx.item_name(id);
+    hir::Path::from_ident(DUMMY_SP, hir::Ident::from_name(name))
+}
+
+/// Return variants that are necessary to exist for the pattern to match.
+pub fn necessary_variants(dm: &DefMap, pat: &hir::Pat) -> Vec<DefId> {
+    let mut variants = vec![];
+    pat.walk(|p| {
+        match p.node {
+            PatKind::TupleStruct(..) |
+            PatKind::Path(..) |
+            PatKind::Ident(_, _, None) |
+            PatKind::Struct(..) => {
+                match dm.get(&p.id) {
+                    Some(&PathResolution { base_def: Def::Variant(_, id), .. }) => {
+                        variants.push(id);
+                    }
+                    _ => ()
+                }
+            }
+            _ => ()
+        }
+        true
+    });
+    variants.sort();
+    variants.dedup();
+    variants
+}
diff --git a/src/librustc/hir/print.rs b/src/librustc/hir/print.rs
new file mode 100644 (file)
index 0000000..e9ed0ed
--- /dev/null
@@ -0,0 +1,2416 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use self::AnnNode::*;
+
+use syntax::abi::Abi;
+use syntax::ast;
+use syntax::codemap::{self, CodeMap, BytePos, Spanned};
+use syntax::errors;
+use syntax::parse::token::{self, BinOpToken};
+use syntax::parse::lexer::comments;
+use syntax::parse;
+use syntax::print::pp::{self, break_offset, word, space, hardbreak};
+use syntax::print::pp::{Breaks, eof};
+use syntax::print::pp::Breaks::{Consistent, Inconsistent};
+use syntax::print::pprust::{self as ast_pp, PrintState};
+use syntax::ptr::P;
+
+use hir;
+use hir::{Crate, PatKind, RegionTyParamBound, TraitTyParamBound, TraitBoundModifier};
+
+use std::io::{self, Write, Read};
+
+pub enum AnnNode<'a> {
+    NodeName(&'a ast::Name),
+    NodeBlock(&'a hir::Block),
+    NodeItem(&'a hir::Item),
+    NodeSubItem(ast::NodeId),
+    NodeExpr(&'a hir::Expr),
+    NodePat(&'a hir::Pat),
+}
+
+pub trait PpAnn {
+    fn pre(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> {
+        Ok(())
+    }
+    fn post(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> {
+        Ok(())
+    }
+}
+
+#[derive(Copy, Clone)]
+pub struct NoAnn;
+
+impl PpAnn for NoAnn {}
+
+
+pub struct State<'a> {
+    krate: Option<&'a Crate>,
+    pub s: pp::Printer<'a>,
+    cm: Option<&'a CodeMap>,
+    comments: Option<Vec<comments::Comment>>,
+    literals: Option<Vec<comments::Literal>>,
+    cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral,
+    boxes: Vec<pp::Breaks>,
+    ann: &'a (PpAnn + 'a),
+}
+
+impl<'a> PrintState<'a> for State<'a> {
+    fn writer(&mut self) -> &mut pp::Printer<'a> {
+        &mut self.s
+    }
+
+    fn boxes(&mut self) -> &mut Vec<pp::Breaks> {
+        &mut self.boxes
+    }
+
+    fn comments(&mut self) -> &mut Option<Vec<comments::Comment>> {
+        &mut self.comments
+    }
+
+    fn cur_cmnt_and_lit(&mut self) -> &mut ast_pp::CurrentCommentAndLiteral {
+        &mut self.cur_cmnt_and_lit
+    }
+
+    fn literals(&self) -> &Option<Vec<comments::Literal>> {
+        &self.literals
+    }
+}
+
+pub fn rust_printer<'a>(writer: Box<Write + 'a>, krate: Option<&'a Crate>) -> State<'a> {
+    static NO_ANN: NoAnn = NoAnn;
+    rust_printer_annotated(writer, &NO_ANN, krate)
+}
+
+pub fn rust_printer_annotated<'a>(writer: Box<Write + 'a>,
+                                  ann: &'a PpAnn,
+                                  krate: Option<&'a Crate>)
+                                  -> State<'a> {
+    State {
+        krate: krate,
+        s: pp::mk_printer(writer, default_columns),
+        cm: None,
+        comments: None,
+        literals: None,
+        cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral {
+            cur_cmnt: 0,
+            cur_lit: 0,
+        },
+        boxes: Vec::new(),
+        ann: ann,
+    }
+}
+
+#[allow(non_upper_case_globals)]
+pub const indent_unit: usize = 4;
+
+#[allow(non_upper_case_globals)]
+pub const default_columns: usize = 78;
+
+
+/// Requires you to pass an input filename and reader so that
+/// it can scan the input text for comments and literals to
+/// copy forward.
+pub fn print_crate<'a>(cm: &'a CodeMap,
+                       span_diagnostic: &errors::Handler,
+                       krate: &hir::Crate,
+                       filename: String,
+                       input: &mut Read,
+                       out: Box<Write + 'a>,
+                       ann: &'a PpAnn,
+                       is_expanded: bool)
+                       -> io::Result<()> {
+    let mut s = State::new_from_input(cm, span_diagnostic, filename, input,
+                                      out, ann, is_expanded, Some(krate));
+
+    // When printing the AST, we sometimes need to inject `#[no_std]` here.
+    // Since you can't compile the HIR, it's not necessary.
+
+    s.print_mod(&krate.module, &krate.attrs)?;
+    s.print_remaining_comments()?;
+    eof(&mut s.s)
+}
+
+impl<'a> State<'a> {
+    pub fn new_from_input(cm: &'a CodeMap,
+                          span_diagnostic: &errors::Handler,
+                          filename: String,
+                          input: &mut Read,
+                          out: Box<Write + 'a>,
+                          ann: &'a PpAnn,
+                          is_expanded: bool,
+                          krate: Option<&'a Crate>)
+                          -> State<'a> {
+        let (cmnts, lits) = comments::gather_comments_and_literals(span_diagnostic,
+                                                                   filename,
+                                                                   input);
+
+        State::new(cm,
+                   out,
+                   ann,
+                   Some(cmnts),
+                   // If the code is post expansion, don't use the table of
+                   // literals, since it doesn't correspond with the literals
+                   // in the AST anymore.
+                   if is_expanded {
+                       None
+                   } else {
+                       Some(lits)
+                   },
+                   krate)
+    }
+
+    pub fn new(cm: &'a CodeMap,
+               out: Box<Write + 'a>,
+               ann: &'a PpAnn,
+               comments: Option<Vec<comments::Comment>>,
+               literals: Option<Vec<comments::Literal>>,
+               krate: Option<&'a Crate>)
+               -> State<'a> {
+        State {
+            krate: krate,
+            s: pp::mk_printer(out, default_columns),
+            cm: Some(cm),
+            comments: comments.clone(),
+            literals: literals.clone(),
+            cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral {
+                cur_cmnt: 0,
+                cur_lit: 0,
+            },
+            boxes: Vec::new(),
+            ann: ann,
+        }
+    }
+}
+
+pub fn to_string<F>(f: F) -> String
+    where F: FnOnce(&mut State) -> io::Result<()>
+{
+    let mut wr = Vec::new();
+    {
+        let mut printer = rust_printer(Box::new(&mut wr), None);
+        f(&mut printer).unwrap();
+        eof(&mut printer.s).unwrap();
+    }
+    String::from_utf8(wr).unwrap()
+}
+
+pub fn binop_to_string(op: BinOpToken) -> &'static str {
+    match op {
+        token::Plus => "+",
+        token::Minus => "-",
+        token::Star => "*",
+        token::Slash => "/",
+        token::Percent => "%",
+        token::Caret => "^",
+        token::And => "&",
+        token::Or => "|",
+        token::Shl => "<<",
+        token::Shr => ">>",
+    }
+}
+
+pub fn ty_to_string(ty: &hir::Ty) -> String {
+    to_string(|s| s.print_type(ty))
+}
+
+pub fn bounds_to_string(bounds: &[hir::TyParamBound]) -> String {
+    to_string(|s| s.print_bounds("", bounds))
+}
+
+pub fn pat_to_string(pat: &hir::Pat) -> String {
+    to_string(|s| s.print_pat(pat))
+}
+
+pub fn arm_to_string(arm: &hir::Arm) -> String {
+    to_string(|s| s.print_arm(arm))
+}
+
+pub fn expr_to_string(e: &hir::Expr) -> String {
+    to_string(|s| s.print_expr(e))
+}
+
+pub fn lifetime_to_string(e: &hir::Lifetime) -> String {
+    to_string(|s| s.print_lifetime(e))
+}
+
+pub fn stmt_to_string(stmt: &hir::Stmt) -> String {
+    to_string(|s| s.print_stmt(stmt))
+}
+
+pub fn item_to_string(i: &hir::Item) -> String {
+    to_string(|s| s.print_item(i))
+}
+
+pub fn impl_item_to_string(i: &hir::ImplItem) -> String {
+    to_string(|s| s.print_impl_item(i))
+}
+
+pub fn trait_item_to_string(i: &hir::TraitItem) -> String {
+    to_string(|s| s.print_trait_item(i))
+}
+
+pub fn generics_to_string(generics: &hir::Generics) -> String {
+    to_string(|s| s.print_generics(generics))
+}
+
+pub fn where_clause_to_string(i: &hir::WhereClause) -> String {
+    to_string(|s| s.print_where_clause(i))
+}
+
+pub fn fn_block_to_string(p: &hir::FnDecl) -> String {
+    to_string(|s| s.print_fn_block_args(p))
+}
+
+pub fn path_to_string(p: &hir::Path) -> String {
+    to_string(|s| s.print_path(p, false, 0))
+}
+
+pub fn name_to_string(name: ast::Name) -> String {
+    to_string(|s| s.print_name(name))
+}
+
+pub fn fun_to_string(decl: &hir::FnDecl,
+                     unsafety: hir::Unsafety,
+                     constness: hir::Constness,
+                     name: ast::Name,
+                     opt_explicit_self: Option<&hir::ExplicitSelf_>,
+                     generics: &hir::Generics)
+                     -> String {
+    to_string(|s| {
+        s.head("")?;
+        s.print_fn(decl,
+                   unsafety,
+                   constness,
+                   Abi::Rust,
+                   Some(name),
+                   generics,
+                   opt_explicit_self,
+                   &hir::Inherited)?;
+        s.end()?; // Close the head box
+        s.end() // Close the outer box
+    })
+}
+
+pub fn block_to_string(blk: &hir::Block) -> String {
+    to_string(|s| {
+        // containing cbox, will be closed by print-block at }
+        s.cbox(indent_unit)?;
+        // head-ibox, will be closed by print-block after {
+        s.ibox(0)?;
+        s.print_block(blk)
+    })
+}
+
+pub fn explicit_self_to_string(explicit_self: &hir::ExplicitSelf_) -> String {
+    to_string(|s| s.print_explicit_self(explicit_self, hir::MutImmutable).map(|_| {}))
+}
+
+pub fn variant_to_string(var: &hir::Variant) -> String {
+    to_string(|s| s.print_variant(var))
+}
+
+pub fn arg_to_string(arg: &hir::Arg) -> String {
+    to_string(|s| s.print_arg(arg, false))
+}
+
+pub fn visibility_qualified(vis: &hir::Visibility, s: &str) -> String {
+    match *vis {
+        hir::Public => format!("pub {}", s),
+        hir::Inherited => s.to_string(),
+    }
+}
+
+fn needs_parentheses(expr: &hir::Expr) -> bool {
+    match expr.node {
+        hir::ExprAssign(..) |
+        hir::ExprBinary(..) |
+        hir::ExprClosure(..) |
+        hir::ExprAssignOp(..) |
+        hir::ExprCast(..) |
+        hir::ExprType(..) => true,
+        _ => false,
+    }
+}
+
+impl<'a> State<'a> {
+    pub fn cbox(&mut self, u: usize) -> io::Result<()> {
+        self.boxes.push(pp::Breaks::Consistent);
+        pp::cbox(&mut self.s, u)
+    }
+
+    pub fn nbsp(&mut self) -> io::Result<()> {
+        word(&mut self.s, " ")
+    }
+
+    pub fn word_nbsp(&mut self, w: &str) -> io::Result<()> {
+        word(&mut self.s, w)?;
+        self.nbsp()
+    }
+
+    pub fn head(&mut self, w: &str) -> io::Result<()> {
+        // outer-box is consistent
+        self.cbox(indent_unit)?;
+        // head-box is inconsistent
+        self.ibox(w.len() + 1)?;
+        // keyword that starts the head
+        if !w.is_empty() {
+            self.word_nbsp(w)?;
+        }
+        Ok(())
+    }
+
+    pub fn bopen(&mut self) -> io::Result<()> {
+        word(&mut self.s, "{")?;
+        self.end() // close the head-box
+    }
+
+    pub fn bclose_(&mut self, span: codemap::Span, indented: usize) -> io::Result<()> {
+        self.bclose_maybe_open(span, indented, true)
+    }
+    pub fn bclose_maybe_open(&mut self,
+                             span: codemap::Span,
+                             indented: usize,
+                             close_box: bool)
+                             -> io::Result<()> {
+        self.maybe_print_comment(span.hi)?;
+        self.break_offset_if_not_bol(1, -(indented as isize))?;
+        word(&mut self.s, "}")?;
+        if close_box {
+            self.end()?; // close the outer-box
+        }
+        Ok(())
+    }
+    pub fn bclose(&mut self, span: codemap::Span) -> io::Result<()> {
+        self.bclose_(span, indent_unit)
+    }
+
+    pub fn in_cbox(&self) -> bool {
+        match self.boxes.last() {
+            Some(&last_box) => last_box == pp::Breaks::Consistent,
+            None => false,
+        }
+    }
+    pub fn space_if_not_bol(&mut self) -> io::Result<()> {
+        if !self.is_bol() {
+            space(&mut self.s)?;
+        }
+        Ok(())
+    }
+    pub fn break_offset_if_not_bol(&mut self, n: usize, off: isize) -> io::Result<()> {
+        if !self.is_bol() {
+            break_offset(&mut self.s, n, off)
+        } else {
+            if off != 0 && self.s.last_token().is_hardbreak_tok() {
+                // We do something pretty sketchy here: tuck the nonzero
+                // offset-adjustment we were going to deposit along with the
+                // break into the previous hardbreak.
+                self.s.replace_last_token(pp::hardbreak_tok_offset(off));
+            }
+            Ok(())
+        }
+    }
+
+    // Synthesizes a comment that was not textually present in the original source
+    // file.
+    pub fn synth_comment(&mut self, text: String) -> io::Result<()> {
+        word(&mut self.s, "/*")?;
+        space(&mut self.s)?;
+        word(&mut self.s, &text[..])?;
+        space(&mut self.s)?;
+        word(&mut self.s, "*/")
+    }
+
+
+    pub fn commasep_cmnt<T, F, G>(&mut self,
+                                  b: Breaks,
+                                  elts: &[T],
+                                  mut op: F,
+                                  mut get_span: G)
+                                  -> io::Result<()>
+        where F: FnMut(&mut State, &T) -> io::Result<()>,
+              G: FnMut(&T) -> codemap::Span
+    {
+        self.rbox(0, b)?;
+        let len = elts.len();
+        let mut i = 0;
+        for elt in elts {
+            self.maybe_print_comment(get_span(elt).hi)?;
+            op(self, elt)?;
+            i += 1;
+            if i < len {
+                word(&mut self.s, ",")?;
+                self.maybe_print_trailing_comment(get_span(elt), Some(get_span(&elts[i]).hi))?;
+                self.space_if_not_bol()?;
+            }
+        }
+        self.end()
+    }
+
+    pub fn commasep_exprs(&mut self, b: Breaks, exprs: &[P<hir::Expr>]) -> io::Result<()> {
+        self.commasep_cmnt(b, exprs, |s, e| s.print_expr(&e), |e| e.span)
+    }
+
+    pub fn print_mod(&mut self, _mod: &hir::Mod, attrs: &[ast::Attribute]) -> io::Result<()> {
+        self.print_inner_attributes(attrs)?;
+        for item_id in &_mod.item_ids {
+            self.print_item_id(item_id)?;
+        }
+        Ok(())
+    }
+
+    pub fn print_foreign_mod(&mut self,
+                             nmod: &hir::ForeignMod,
+                             attrs: &[ast::Attribute])
+                             -> io::Result<()> {
+        self.print_inner_attributes(attrs)?;
+        for item in &nmod.items {
+            self.print_foreign_item(item)?;
+        }
+        Ok(())
+    }
+
+    pub fn print_opt_lifetime(&mut self, lifetime: &Option<hir::Lifetime>) -> io::Result<()> {
+        if let Some(l) = *lifetime {
+            self.print_lifetime(&l)?;
+            self.nbsp()?;
+        }
+        Ok(())
+    }
+
+    pub fn print_type(&mut self, ty: &hir::Ty) -> io::Result<()> {
+        self.maybe_print_comment(ty.span.lo)?;
+        self.ibox(0)?;
+        match ty.node {
+            hir::TyVec(ref ty) => {
+                word(&mut self.s, "[")?;
+                self.print_type(&ty)?;
+                word(&mut self.s, "]")?;
+            }
+            hir::TyPtr(ref mt) => {
+                word(&mut self.s, "*")?;
+                match mt.mutbl {
+                    hir::MutMutable => self.word_nbsp("mut")?,
+                    hir::MutImmutable => self.word_nbsp("const")?,
+                }
+                self.print_type(&mt.ty)?;
+            }
+            hir::TyRptr(ref lifetime, ref mt) => {
+                word(&mut self.s, "&")?;
+                self.print_opt_lifetime(lifetime)?;
+                self.print_mt(mt)?;
+            }
+            hir::TyTup(ref elts) => {
+                self.popen()?;
+                self.commasep(Inconsistent, &elts[..], |s, ty| s.print_type(&ty))?;
+                if elts.len() == 1 {
+                    word(&mut self.s, ",")?;
+                }
+                self.pclose()?;
+            }
+            hir::TyBareFn(ref f) => {
+                let generics = hir::Generics {
+                    lifetimes: f.lifetimes.clone(),
+                    ty_params: hir::HirVec::new(),
+                    where_clause: hir::WhereClause {
+                        id: ast::DUMMY_NODE_ID,
+                        predicates: hir::HirVec::new(),
+                    },
+                };
+                self.print_ty_fn(f.abi, f.unsafety, &f.decl, None, &generics, None)?;
+            }
+            hir::TyPath(None, ref path) => {
+                self.print_path(path, false, 0)?;
+            }
+            hir::TyPath(Some(ref qself), ref path) => {
+                self.print_qpath(path, qself, false)?
+            }
+            hir::TyObjectSum(ref ty, ref bounds) => {
+                self.print_type(&ty)?;
+                self.print_bounds("+", &bounds[..])?;
+            }
+            hir::TyPolyTraitRef(ref bounds) => {
+                self.print_bounds("", &bounds[..])?;
+            }
+            hir::TyFixedLengthVec(ref ty, ref v) => {
+                word(&mut self.s, "[")?;
+                self.print_type(&ty)?;
+                word(&mut self.s, "; ")?;
+                self.print_expr(&v)?;
+                word(&mut self.s, "]")?;
+            }
+            hir::TyTypeof(ref e) => {
+                word(&mut self.s, "typeof(")?;
+                self.print_expr(&e)?;
+                word(&mut self.s, ")")?;
+            }
+            hir::TyInfer => {
+                word(&mut self.s, "_")?;
+            }
+        }
+        self.end()
+    }
+
+    pub fn print_foreign_item(&mut self, item: &hir::ForeignItem) -> io::Result<()> {
+        self.hardbreak_if_not_bol()?;
+        self.maybe_print_comment(item.span.lo)?;
+        self.print_outer_attributes(&item.attrs)?;
+        match item.node {
+            hir::ForeignItemFn(ref decl, ref generics) => {
+                self.head("")?;
+                self.print_fn(decl,
+                              hir::Unsafety::Normal,
+                              hir::Constness::NotConst,
+                              Abi::Rust,
+                              Some(item.name),
+                              generics,
+                              None,
+                              &item.vis)?;
+                self.end()?; // end head-ibox
+                word(&mut self.s, ";")?;
+                self.end() // end the outer fn box
+            }
+            hir::ForeignItemStatic(ref t, m) => {
+                self.head(&visibility_qualified(&item.vis, "static"))?;
+                if m {
+                    self.word_space("mut")?;
+                }
+                self.print_name(item.name)?;
+                self.word_space(":")?;
+                self.print_type(&t)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end the head-ibox
+                self.end() // end the outer cbox
+            }
+        }
+    }
+
+    fn print_associated_const(&mut self,
+                              name: ast::Name,
+                              ty: &hir::Ty,
+                              default: Option<&hir::Expr>,
+                              vis: &hir::Visibility)
+                              -> io::Result<()> {
+        word(&mut self.s, &visibility_qualified(vis, ""))?;
+        self.word_space("const")?;
+        self.print_name(name)?;
+        self.word_space(":")?;
+        self.print_type(ty)?;
+        if let Some(expr) = default {
+            space(&mut self.s)?;
+            self.word_space("=")?;
+            self.print_expr(expr)?;
+        }
+        word(&mut self.s, ";")
+    }
+
+    fn print_associated_type(&mut self,
+                             name: ast::Name,
+                             bounds: Option<&hir::TyParamBounds>,
+                             ty: Option<&hir::Ty>)
+                             -> io::Result<()> {
+        self.word_space("type")?;
+        self.print_name(name)?;
+        if let Some(bounds) = bounds {
+            self.print_bounds(":", bounds)?;
+        }
+        if let Some(ty) = ty {
+            space(&mut self.s)?;
+            self.word_space("=")?;
+            self.print_type(ty)?;
+        }
+        word(&mut self.s, ";")
+    }
+
+    pub fn print_item_id(&mut self, item_id: &hir::ItemId) -> io::Result<()> {
+        if let Some(krate) = self.krate {
+            // skip nested items if krate context was not provided
+            let item = &krate.items[&item_id.id];
+            self.print_item(item)
+        } else {
+            Ok(())
+        }
+    }
+
+    /// Pretty-print an item
+    pub fn print_item(&mut self, item: &hir::Item) -> io::Result<()> {
+        self.hardbreak_if_not_bol()?;
+        self.maybe_print_comment(item.span.lo)?;
+        self.print_outer_attributes(&item.attrs)?;
+        self.ann.pre(self, NodeItem(item))?;
+        match item.node {
+            hir::ItemExternCrate(ref optional_path) => {
+                self.head(&visibility_qualified(&item.vis, "extern crate"))?;
+                if let Some(p) = *optional_path {
+                    let val = p.as_str();
+                    if val.contains("-") {
+                        self.print_string(&val, ast::StrStyle::Cooked)?;
+                    } else {
+                        self.print_name(p)?;
+                    }
+                    space(&mut self.s)?;
+                    word(&mut self.s, "as")?;
+                    space(&mut self.s)?;
+                }
+                self.print_name(item.name)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end inner head-block
+                self.end()?; // end outer head-block
+            }
+            hir::ItemUse(ref vp) => {
+                self.head(&visibility_qualified(&item.vis, "use"))?;
+                self.print_view_path(&vp)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end inner head-block
+                self.end()?; // end outer head-block
+            }
+            hir::ItemStatic(ref ty, m, ref expr) => {
+                self.head(&visibility_qualified(&item.vis, "static"))?;
+                if m == hir::MutMutable {
+                    self.word_space("mut")?;
+                }
+                self.print_name(item.name)?;
+                self.word_space(":")?;
+                self.print_type(&ty)?;
+                space(&mut self.s)?;
+                self.end()?; // end the head-ibox
+
+                self.word_space("=")?;
+                self.print_expr(&expr)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end the outer cbox
+            }
+            hir::ItemConst(ref ty, ref expr) => {
+                self.head(&visibility_qualified(&item.vis, "const"))?;
+                self.print_name(item.name)?;
+                self.word_space(":")?;
+                self.print_type(&ty)?;
+                space(&mut self.s)?;
+                self.end()?; // end the head-ibox
+
+                self.word_space("=")?;
+                self.print_expr(&expr)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end the outer cbox
+            }
+            hir::ItemFn(ref decl, unsafety, constness, abi, ref typarams, ref body) => {
+                self.head("")?;
+                self.print_fn(decl,
+                              unsafety,
+                              constness,
+                              abi,
+                              Some(item.name),
+                              typarams,
+                              None,
+                              &item.vis)?;
+                word(&mut self.s, " ")?;
+                self.print_block_with_attrs(&body, &item.attrs)?;
+            }
+            hir::ItemMod(ref _mod) => {
+                self.head(&visibility_qualified(&item.vis, "mod"))?;
+                self.print_name(item.name)?;
+                self.nbsp()?;
+                self.bopen()?;
+                self.print_mod(_mod, &item.attrs)?;
+                self.bclose(item.span)?;
+            }
+            hir::ItemForeignMod(ref nmod) => {
+                self.head("extern")?;
+                self.word_nbsp(&nmod.abi.to_string())?;
+                self.bopen()?;
+                self.print_foreign_mod(nmod, &item.attrs)?;
+                self.bclose(item.span)?;
+            }
+            hir::ItemTy(ref ty, ref params) => {
+                self.ibox(indent_unit)?;
+                self.ibox(0)?;
+                self.word_nbsp(&visibility_qualified(&item.vis, "type"))?;
+                self.print_name(item.name)?;
+                self.print_generics(params)?;
+                self.end()?; // end the inner ibox
+
+                self.print_where_clause(&params.where_clause)?;
+                space(&mut self.s)?;
+                self.word_space("=")?;
+                self.print_type(&ty)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end the outer ibox
+            }
+            hir::ItemEnum(ref enum_definition, ref params) => {
+                self.print_enum_def(enum_definition, params, item.name, item.span, &item.vis)?;
+            }
+            hir::ItemStruct(ref struct_def, ref generics) => {
+                self.head(&visibility_qualified(&item.vis, "struct"))?;
+                self.print_struct(struct_def, generics, item.name, item.span, true)?;
+            }
+
+            hir::ItemDefaultImpl(unsafety, ref trait_ref) => {
+                self.head("")?;
+                self.print_visibility(&item.vis)?;
+                self.print_unsafety(unsafety)?;
+                self.word_nbsp("impl")?;
+                self.print_trait_ref(trait_ref)?;
+                space(&mut self.s)?;
+                self.word_space("for")?;
+                self.word_space("..")?;
+                self.bopen()?;
+                self.bclose(item.span)?;
+            }
+            hir::ItemImpl(unsafety,
+                          polarity,
+                          ref generics,
+                          ref opt_trait,
+                          ref ty,
+                          ref impl_items) => {
+                self.head("")?;
+                self.print_visibility(&item.vis)?;
+                self.print_unsafety(unsafety)?;
+                self.word_nbsp("impl")?;
+
+                if generics.is_parameterized() {
+                    self.print_generics(generics)?;
+                    space(&mut self.s)?;
+                }
+
+                match polarity {
+                    hir::ImplPolarity::Negative => {
+                        word(&mut self.s, "!")?;
+                    }
+                    _ => {}
+                }
+
+                match opt_trait {
+                    &Some(ref t) => {
+                        self.print_trait_ref(t)?;
+                        space(&mut self.s)?;
+                        self.word_space("for")?;
+                    }
+                    &None => {}
+                }
+
+                self.print_type(&ty)?;
+                self.print_where_clause(&generics.where_clause)?;
+
+                space(&mut self.s)?;
+                self.bopen()?;
+                self.print_inner_attributes(&item.attrs)?;
+                for impl_item in impl_items {
+                    self.print_impl_item(impl_item)?;
+                }
+                self.bclose(item.span)?;
+            }
+            hir::ItemTrait(unsafety, ref generics, ref bounds, ref trait_items) => {
+                self.head("")?;
+                self.print_visibility(&item.vis)?;
+                self.print_unsafety(unsafety)?;
+                self.word_nbsp("trait")?;
+                self.print_name(item.name)?;
+                self.print_generics(generics)?;
+                let mut real_bounds = Vec::with_capacity(bounds.len());
+                for b in bounds.iter() {
+                    if let TraitTyParamBound(ref ptr, hir::TraitBoundModifier::Maybe) = *b {
+                        space(&mut self.s)?;
+                        self.word_space("for ?")?;
+                        self.print_trait_ref(&ptr.trait_ref)?;
+                    } else {
+                        real_bounds.push(b.clone());
+                    }
+                }
+                self.print_bounds(":", &real_bounds[..])?;
+                self.print_where_clause(&generics.where_clause)?;
+                word(&mut self.s, " ")?;
+                self.bopen()?;
+                for trait_item in trait_items {
+                    self.print_trait_item(trait_item)?;
+                }
+                self.bclose(item.span)?;
+            }
+        }
+        self.ann.post(self, NodeItem(item))
+    }
+
+    fn print_trait_ref(&mut self, t: &hir::TraitRef) -> io::Result<()> {
+        self.print_path(&t.path, false, 0)
+    }
+
+    fn print_formal_lifetime_list(&mut self, lifetimes: &[hir::LifetimeDef]) -> io::Result<()> {
+        if !lifetimes.is_empty() {
+            word(&mut self.s, "for<")?;
+            let mut comma = false;
+            for lifetime_def in lifetimes {
+                if comma {
+                    self.word_space(",")?
+                }
+                self.print_lifetime_def(lifetime_def)?;
+                comma = true;
+            }
+            word(&mut self.s, ">")?;
+        }
+        Ok(())
+    }
+
+    fn print_poly_trait_ref(&mut self, t: &hir::PolyTraitRef) -> io::Result<()> {
+        self.print_formal_lifetime_list(&t.bound_lifetimes)?;
+        self.print_trait_ref(&t.trait_ref)
+    }
+
+    pub fn print_enum_def(&mut self,
+                          enum_definition: &hir::EnumDef,
+                          generics: &hir::Generics,
+                          name: ast::Name,
+                          span: codemap::Span,
+                          visibility: &hir::Visibility)
+                          -> io::Result<()> {
+        self.head(&visibility_qualified(visibility, "enum"))?;
+        self.print_name(name)?;
+        self.print_generics(generics)?;
+        self.print_where_clause(&generics.where_clause)?;
+        space(&mut self.s)?;
+        self.print_variants(&enum_definition.variants, span)
+    }
+
+    pub fn print_variants(&mut self,
+                          variants: &[hir::Variant],
+                          span: codemap::Span)
+                          -> io::Result<()> {
+        self.bopen()?;
+        for v in variants {
+            self.space_if_not_bol()?;
+            self.maybe_print_comment(v.span.lo)?;
+            self.print_outer_attributes(&v.node.attrs)?;
+            self.ibox(indent_unit)?;
+            self.print_variant(v)?;
+            word(&mut self.s, ",")?;
+            self.end()?;
+            self.maybe_print_trailing_comment(v.span, None)?;
+        }
+        self.bclose(span)
+    }
+
+    pub fn print_visibility(&mut self, vis: &hir::Visibility) -> io::Result<()> {
+        match *vis {
+            hir::Public => self.word_nbsp("pub"),
+            hir::Inherited => Ok(()),
+        }
+    }
+
+    pub fn print_struct(&mut self,
+                        struct_def: &hir::VariantData,
+                        generics: &hir::Generics,
+                        name: ast::Name,
+                        span: codemap::Span,
+                        print_finalizer: bool)
+                        -> io::Result<()> {
+        self.print_name(name)?;
+        self.print_generics(generics)?;
+        if !struct_def.is_struct() {
+            if struct_def.is_tuple() {
+                self.popen()?;
+                self.commasep(Inconsistent, struct_def.fields(), |s, field| {
+                    s.print_visibility(&field.vis)?;
+                    s.maybe_print_comment(field.span.lo)?;
+                    s.print_type(&field.ty)
+                })?;
+                self.pclose()?;
+            }
+            self.print_where_clause(&generics.where_clause)?;
+            if print_finalizer {
+                word(&mut self.s, ";")?;
+            }
+            self.end()?;
+            self.end() // close the outer-box
+        } else {
+            self.print_where_clause(&generics.where_clause)?;
+            self.nbsp()?;
+            self.bopen()?;
+            self.hardbreak_if_not_bol()?;
+
+            for field in struct_def.fields() {
+                self.hardbreak_if_not_bol()?;
+                self.maybe_print_comment(field.span.lo)?;
+                self.print_outer_attributes(&field.attrs)?;
+                self.print_visibility(&field.vis)?;
+                self.print_name(field.name)?;
+                self.word_nbsp(":")?;
+                self.print_type(&field.ty)?;
+                word(&mut self.s, ",")?;
+            }
+
+            self.bclose(span)
+        }
+    }
+
+    pub fn print_variant(&mut self, v: &hir::Variant) -> io::Result<()> {
+        self.head("")?;
+        let generics = hir::Generics::empty();
+        self.print_struct(&v.node.data, &generics, v.node.name, v.span, false)?;
+        match v.node.disr_expr {
+            Some(ref d) => {
+                space(&mut self.s)?;
+                self.word_space("=")?;
+                self.print_expr(&d)
+            }
+            _ => Ok(()),
+        }
+    }
+    pub fn print_method_sig(&mut self,
+                            name: ast::Name,
+                            m: &hir::MethodSig,
+                            vis: &hir::Visibility)
+                            -> io::Result<()> {
+        self.print_fn(&m.decl,
+                      m.unsafety,
+                      m.constness,
+                      m.abi,
+                      Some(name),
+                      &m.generics,
+                      Some(&m.explicit_self.node),
+                      vis)
+    }
+
+    pub fn print_trait_item(&mut self, ti: &hir::TraitItem) -> io::Result<()> {
+        self.ann.pre(self, NodeSubItem(ti.id))?;
+        self.hardbreak_if_not_bol()?;
+        self.maybe_print_comment(ti.span.lo)?;
+        self.print_outer_attributes(&ti.attrs)?;
+        match ti.node {
+            hir::ConstTraitItem(ref ty, ref default) => {
+                self.print_associated_const(ti.name,
+                                            &ty,
+                                            default.as_ref().map(|expr| &**expr),
+                                            &hir::Inherited)?;
+            }
+            hir::MethodTraitItem(ref sig, ref body) => {
+                if body.is_some() {
+                    self.head("")?;
+                }
+                self.print_method_sig(ti.name, sig, &hir::Inherited)?;
+                if let Some(ref body) = *body {
+                    self.nbsp()?;
+                    self.print_block_with_attrs(body, &ti.attrs)?;
+                } else {
+                    word(&mut self.s, ";")?;
+                }
+            }
+            hir::TypeTraitItem(ref bounds, ref default) => {
+                self.print_associated_type(ti.name,
+                                           Some(bounds),
+                                           default.as_ref().map(|ty| &**ty))?;
+            }
+        }
+        self.ann.post(self, NodeSubItem(ti.id))
+    }
+
+    pub fn print_impl_item(&mut self, ii: &hir::ImplItem) -> io::Result<()> {
+        self.ann.pre(self, NodeSubItem(ii.id))?;
+        self.hardbreak_if_not_bol()?;
+        self.maybe_print_comment(ii.span.lo)?;
+        self.print_outer_attributes(&ii.attrs)?;
+
+        if let hir::Defaultness::Default = ii.defaultness {
+            self.word_nbsp("default")?;
+        }
+
+        match ii.node {
+            hir::ImplItemKind::Const(ref ty, ref expr) => {
+                self.print_associated_const(ii.name, &ty, Some(&expr), &ii.vis)?;
+            }
+            hir::ImplItemKind::Method(ref sig, ref body) => {
+                self.head("")?;
+                self.print_method_sig(ii.name, sig, &ii.vis)?;
+                self.nbsp()?;
+                self.print_block_with_attrs(body, &ii.attrs)?;
+            }
+            hir::ImplItemKind::Type(ref ty) => {
+                self.print_associated_type(ii.name, None, Some(ty))?;
+            }
+        }
+        self.ann.post(self, NodeSubItem(ii.id))
+    }
+
+    pub fn print_stmt(&mut self, st: &hir::Stmt) -> io::Result<()> {
+        self.maybe_print_comment(st.span.lo)?;
+        match st.node {
+            hir::StmtDecl(ref decl, _) => {
+                self.print_decl(&decl)?;
+            }
+            hir::StmtExpr(ref expr, _) => {
+                self.space_if_not_bol()?;
+                self.print_expr(&expr)?;
+            }
+            hir::StmtSemi(ref expr, _) => {
+                self.space_if_not_bol()?;
+                self.print_expr(&expr)?;
+                word(&mut self.s, ";")?;
+            }
+        }
+        if stmt_ends_with_semi(&st.node) {
+            word(&mut self.s, ";")?;
+        }
+        self.maybe_print_trailing_comment(st.span, None)
+    }
+
+    pub fn print_block(&mut self, blk: &hir::Block) -> io::Result<()> {
+        self.print_block_with_attrs(blk, &[])
+    }
+
+    pub fn print_block_unclosed(&mut self, blk: &hir::Block) -> io::Result<()> {
+        self.print_block_unclosed_indent(blk, indent_unit)
+    }
+
+    pub fn print_block_unclosed_indent(&mut self,
+                                       blk: &hir::Block,
+                                       indented: usize)
+                                       -> io::Result<()> {
+        self.print_block_maybe_unclosed(blk, indented, &[], false)
+    }
+
+    pub fn print_block_with_attrs(&mut self,
+                                  blk: &hir::Block,
+                                  attrs: &[ast::Attribute])
+                                  -> io::Result<()> {
+        self.print_block_maybe_unclosed(blk, indent_unit, attrs, true)
+    }
+
+    pub fn print_block_maybe_unclosed(&mut self,
+                                      blk: &hir::Block,
+                                      indented: usize,
+                                      attrs: &[ast::Attribute],
+                                      close_box: bool)
+                                      -> io::Result<()> {
+        match blk.rules {
+            hir::UnsafeBlock(..) => self.word_space("unsafe")?,
+            hir::PushUnsafeBlock(..) => self.word_space("push_unsafe")?,
+            hir::PopUnsafeBlock(..) => self.word_space("pop_unsafe")?,
+            hir::PushUnstableBlock => self.word_space("push_unstable")?,
+            hir::PopUnstableBlock => self.word_space("pop_unstable")?,
+            hir::DefaultBlock => (),
+        }
+        self.maybe_print_comment(blk.span.lo)?;
+        self.ann.pre(self, NodeBlock(blk))?;
+        self.bopen()?;
+
+        self.print_inner_attributes(attrs)?;
+
+        for st in &blk.stmts {
+            self.print_stmt(st)?;
+        }
+        match blk.expr {
+            Some(ref expr) => {
+                self.space_if_not_bol()?;
+                self.print_expr(&expr)?;
+                self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi))?;
+            }
+            _ => (),
+        }
+        self.bclose_maybe_open(blk.span, indented, close_box)?;
+        self.ann.post(self, NodeBlock(blk))
+    }
+
+    fn print_else(&mut self, els: Option<&hir::Expr>) -> io::Result<()> {
+        match els {
+            Some(_else) => {
+                match _else.node {
+                    // "another else-if"
+                    hir::ExprIf(ref i, ref then, ref e) => {
+                        self.cbox(indent_unit - 1)?;
+                        self.ibox(0)?;
+                        word(&mut self.s, " else if ")?;
+                        self.print_expr(&i)?;
+                        space(&mut self.s)?;
+                        self.print_block(&then)?;
+                        self.print_else(e.as_ref().map(|e| &**e))
+                    }
+                    // "final else"
+                    hir::ExprBlock(ref b) => {
+                        self.cbox(indent_unit - 1)?;
+                        self.ibox(0)?;
+                        word(&mut self.s, " else ")?;
+                        self.print_block(&b)
+                    }
+                    // BLEAH, constraints would be great here
+                    _ => {
+                        panic!("print_if saw if with weird alternative");
+                    }
+                }
+            }
+            _ => Ok(()),
+        }
+    }
+
+    pub fn print_if(&mut self,
+                    test: &hir::Expr,
+                    blk: &hir::Block,
+                    elseopt: Option<&hir::Expr>)
+                    -> io::Result<()> {
+        self.head("if")?;
+        self.print_expr(test)?;
+        space(&mut self.s)?;
+        self.print_block(blk)?;
+        self.print_else(elseopt)
+    }
+
+    pub fn print_if_let(&mut self,
+                        pat: &hir::Pat,
+                        expr: &hir::Expr,
+                        blk: &hir::Block,
+                        elseopt: Option<&hir::Expr>)
+                        -> io::Result<()> {
+        self.head("if let")?;
+        self.print_pat(pat)?;
+        space(&mut self.s)?;
+        self.word_space("=")?;
+        self.print_expr(expr)?;
+        space(&mut self.s)?;
+        self.print_block(blk)?;
+        self.print_else(elseopt)
+    }
+
+
+    fn print_call_post(&mut self, args: &[P<hir::Expr>]) -> io::Result<()> {
+        self.popen()?;
+        self.commasep_exprs(Inconsistent, args)?;
+        self.pclose()
+    }
+
+    pub fn print_expr_maybe_paren(&mut self, expr: &hir::Expr) -> io::Result<()> {
+        let needs_par = needs_parentheses(expr);
+        if needs_par {
+            self.popen()?;
+        }
+        self.print_expr(expr)?;
+        if needs_par {
+            self.pclose()?;
+        }
+        Ok(())
+    }
+
+    fn print_expr_vec(&mut self, exprs: &[P<hir::Expr>]) -> io::Result<()> {
+        self.ibox(indent_unit)?;
+        word(&mut self.s, "[")?;
+        self.commasep_exprs(Inconsistent, &exprs[..])?;
+        word(&mut self.s, "]")?;
+        self.end()
+    }
+
+    fn print_expr_repeat(&mut self, element: &hir::Expr, count: &hir::Expr) -> io::Result<()> {
+        self.ibox(indent_unit)?;
+        word(&mut self.s, "[")?;
+        self.print_expr(element)?;
+        self.word_space(";")?;
+        self.print_expr(count)?;
+        word(&mut self.s, "]")?;
+        self.end()
+    }
+
+    fn print_expr_struct(&mut self,
+                         path: &hir::Path,
+                         fields: &[hir::Field],
+                         wth: &Option<P<hir::Expr>>)
+                         -> io::Result<()> {
+        self.print_path(path, true, 0)?;
+        word(&mut self.s, "{")?;
+        self.commasep_cmnt(Consistent,
+                           &fields[..],
+                           |s, field| {
+                               s.ibox(indent_unit)?;
+                               s.print_name(field.name.node)?;
+                               s.word_space(":")?;
+                               s.print_expr(&field.expr)?;
+                               s.end()
+                           },
+                           |f| f.span)?;
+        match *wth {
+            Some(ref expr) => {
+                self.ibox(indent_unit)?;
+                if !fields.is_empty() {
+                    word(&mut self.s, ",")?;
+                    space(&mut self.s)?;
+                }
+                word(&mut self.s, "..")?;
+                self.print_expr(&expr)?;
+                self.end()?;
+            }
+            _ => if !fields.is_empty() {
+                word(&mut self.s, ",")?
+            },
+        }
+        word(&mut self.s, "}")?;
+        Ok(())
+    }
+
+    fn print_expr_tup(&mut self, exprs: &[P<hir::Expr>]) -> io::Result<()> {
+        self.popen()?;
+        self.commasep_exprs(Inconsistent, &exprs[..])?;
+        if exprs.len() == 1 {
+            word(&mut self.s, ",")?;
+        }
+        self.pclose()
+    }
+
+    fn print_expr_call(&mut self, func: &hir::Expr, args: &[P<hir::Expr>]) -> io::Result<()> {
+        self.print_expr_maybe_paren(func)?;
+        self.print_call_post(args)
+    }
+
+    fn print_expr_method_call(&mut self,
+                              name: Spanned<ast::Name>,
+                              tys: &[P<hir::Ty>],
+                              args: &[P<hir::Expr>])
+                              -> io::Result<()> {
+        let base_args = &args[1..];
+        self.print_expr(&args[0])?;
+        word(&mut self.s, ".")?;
+        self.print_name(name.node)?;
+        if !tys.is_empty() {
+            word(&mut self.s, "::<")?;
+            self.commasep(Inconsistent, tys, |s, ty| s.print_type(&ty))?;
+            word(&mut self.s, ">")?;
+        }
+        self.print_call_post(base_args)
+    }
+
+    fn print_expr_binary(&mut self,
+                         op: hir::BinOp,
+                         lhs: &hir::Expr,
+                         rhs: &hir::Expr)
+                         -> io::Result<()> {
+        self.print_expr(lhs)?;
+        space(&mut self.s)?;
+        self.word_space(op.node.as_str())?;
+        self.print_expr(rhs)
+    }
+
+    fn print_expr_unary(&mut self, op: hir::UnOp, expr: &hir::Expr) -> io::Result<()> {
+        word(&mut self.s, op.as_str())?;
+        self.print_expr_maybe_paren(expr)
+    }
+
+    fn print_expr_addr_of(&mut self,
+                          mutability: hir::Mutability,
+                          expr: &hir::Expr)
+                          -> io::Result<()> {
+        word(&mut self.s, "&")?;
+        self.print_mutability(mutability)?;
+        self.print_expr_maybe_paren(expr)
+    }
+
+    pub fn print_expr(&mut self, expr: &hir::Expr) -> io::Result<()> {
+        self.maybe_print_comment(expr.span.lo)?;
+        self.ibox(indent_unit)?;
+        self.ann.pre(self, NodeExpr(expr))?;
+        match expr.node {
+            hir::ExprBox(ref expr) => {
+                self.word_space("box")?;
+                self.print_expr(expr)?;
+            }
+            hir::ExprVec(ref exprs) => {
+                self.print_expr_vec(&exprs[..])?;
+            }
+            hir::ExprRepeat(ref element, ref count) => {
+                self.print_expr_repeat(&element, &count)?;
+            }
+            hir::ExprStruct(ref path, ref fields, ref wth) => {
+                self.print_expr_struct(path, &fields[..], wth)?;
+            }
+            hir::ExprTup(ref exprs) => {
+                self.print_expr_tup(&exprs[..])?;
+            }
+            hir::ExprCall(ref func, ref args) => {
+                self.print_expr_call(&func, &args[..])?;
+            }
+            hir::ExprMethodCall(name, ref tys, ref args) => {
+                self.print_expr_method_call(name, &tys[..], &args[..])?;
+            }
+            hir::ExprBinary(op, ref lhs, ref rhs) => {
+                self.print_expr_binary(op, &lhs, &rhs)?;
+            }
+            hir::ExprUnary(op, ref expr) => {
+                self.print_expr_unary(op, &expr)?;
+            }
+            hir::ExprAddrOf(m, ref expr) => {
+                self.print_expr_addr_of(m, &expr)?;
+            }
+            hir::ExprLit(ref lit) => {
+                self.print_literal(&lit)?;
+            }
+            hir::ExprCast(ref expr, ref ty) => {
+                self.print_expr(&expr)?;
+                space(&mut self.s)?;
+                self.word_space("as")?;
+                self.print_type(&ty)?;
+            }
+            hir::ExprType(ref expr, ref ty) => {
+                self.print_expr(&expr)?;
+                self.word_space(":")?;
+                self.print_type(&ty)?;
+            }
+            hir::ExprIf(ref test, ref blk, ref elseopt) => {
+                self.print_if(&test, &blk, elseopt.as_ref().map(|e| &**e))?;
+            }
+            hir::ExprWhile(ref test, ref blk, opt_ident) => {
+                if let Some(ident) = opt_ident {
+                    self.print_name(ident.name)?;
+                    self.word_space(":")?;
+                }
+                self.head("while")?;
+                self.print_expr(&test)?;
+                space(&mut self.s)?;
+                self.print_block(&blk)?;
+            }
+            hir::ExprLoop(ref blk, opt_ident) => {
+                if let Some(ident) = opt_ident {
+                    self.print_name(ident.name)?;
+                    self.word_space(":")?;
+                }
+                self.head("loop")?;
+                space(&mut self.s)?;
+                self.print_block(&blk)?;
+            }
+            hir::ExprMatch(ref expr, ref arms, _) => {
+                self.cbox(indent_unit)?;
+                self.ibox(4)?;
+                self.word_nbsp("match")?;
+                self.print_expr(&expr)?;
+                space(&mut self.s)?;
+                self.bopen()?;
+                for arm in arms {
+                    self.print_arm(arm)?;
+                }
+                self.bclose_(expr.span, indent_unit)?;
+            }
+            hir::ExprClosure(capture_clause, ref decl, ref body) => {
+                self.print_capture_clause(capture_clause)?;
+
+                self.print_fn_block_args(&decl)?;
+                space(&mut self.s)?;
+
+                let default_return = match decl.output {
+                    hir::DefaultReturn(..) => true,
+                    _ => false,
+                };
+
+                if !default_return || !body.stmts.is_empty() || body.expr.is_none() {
+                    self.print_block_unclosed(&body)?;
+                } else {
+                    // we extract the block, so as not to create another set of boxes
+                    match body.expr.as_ref().unwrap().node {
+                        hir::ExprBlock(ref blk) => {
+                            self.print_block_unclosed(&blk)?;
+                        }
+                        _ => {
+                            // this is a bare expression
+                            self.print_expr(body.expr.as_ref().map(|e| &**e).unwrap())?;
+                            self.end()?; // need to close a box
+                        }
+                    }
+                }
+                // a box will be closed by print_expr, but we didn't want an overall
+                // wrapper so we closed the corresponding opening. so create an
+                // empty box to satisfy the close.
+                self.ibox(0)?;
+            }
+            hir::ExprBlock(ref blk) => {
+                // containing cbox, will be closed by print-block at }
+                self.cbox(indent_unit)?;
+                // head-box, will be closed by print-block after {
+                self.ibox(0)?;
+                self.print_block(&blk)?;
+            }
+            hir::ExprAssign(ref lhs, ref rhs) => {
+                self.print_expr(&lhs)?;
+                space(&mut self.s)?;
+                self.word_space("=")?;
+                self.print_expr(&rhs)?;
+            }
+            hir::ExprAssignOp(op, ref lhs, ref rhs) => {
+                self.print_expr(&lhs)?;
+                space(&mut self.s)?;
+                word(&mut self.s, op.node.as_str())?;
+                self.word_space("=")?;
+                self.print_expr(&rhs)?;
+            }
+            hir::ExprField(ref expr, name) => {
+                self.print_expr(&expr)?;
+                word(&mut self.s, ".")?;
+                self.print_name(name.node)?;
+            }
+            hir::ExprTupField(ref expr, id) => {
+                self.print_expr(&expr)?;
+                word(&mut self.s, ".")?;
+                self.print_usize(id.node)?;
+            }
+            hir::ExprIndex(ref expr, ref index) => {
+                self.print_expr(&expr)?;
+                word(&mut self.s, "[")?;
+                self.print_expr(&index)?;
+                word(&mut self.s, "]")?;
+            }
+            hir::ExprPath(None, ref path) => {
+                self.print_path(path, true, 0)?
+            }
+            hir::ExprPath(Some(ref qself), ref path) => {
+                self.print_qpath(path, qself, true)?
+            }
+            hir::ExprBreak(opt_ident) => {
+                word(&mut self.s, "break")?;
+                space(&mut self.s)?;
+                if let Some(ident) = opt_ident {
+                    self.print_name(ident.node.name)?;
+                    space(&mut self.s)?;
+                }
+            }
+            hir::ExprAgain(opt_ident) => {
+                word(&mut self.s, "continue")?;
+                space(&mut self.s)?;
+                if let Some(ident) = opt_ident {
+                    self.print_name(ident.node.name)?;
+                    space(&mut self.s)?
+                }
+            }
+            hir::ExprRet(ref result) => {
+                word(&mut self.s, "return")?;
+                match *result {
+                    Some(ref expr) => {
+                        word(&mut self.s, " ")?;
+                        self.print_expr(&expr)?;
+                    }
+                    _ => (),
+                }
+            }
+            hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => {
+                word(&mut self.s, "asm!")?;
+                self.popen()?;
+                self.print_string(&a.asm, a.asm_str_style)?;
+                self.word_space(":")?;
+
+                let mut out_idx = 0;
+                self.commasep(Inconsistent, &a.outputs, |s, out| {
+                    let mut ch = out.constraint.chars();
+                    match ch.next() {
+                        Some('=') if out.is_rw => {
+                            s.print_string(&format!("+{}", ch.as_str()),
+                                           ast::StrStyle::Cooked)?
+                        }
+                        _ => s.print_string(&out.constraint,
+                                            ast::StrStyle::Cooked)?,
+                    }
+                    s.popen()?;
+                    s.print_expr(&outputs[out_idx])?;
+                    s.pclose()?;
+                    out_idx += 1;
+                    Ok(())
+                })?;
+                space(&mut self.s)?;
+                self.word_space(":")?;
+
+                let mut in_idx = 0;
+                self.commasep(Inconsistent, &a.inputs, |s, co| {
+                    s.print_string(&co, ast::StrStyle::Cooked)?;
+                    s.popen()?;
+                    s.print_expr(&inputs[in_idx])?;
+                    s.pclose()?;
+                    in_idx += 1;
+                    Ok(())
+                })?;
+                space(&mut self.s)?;
+                self.word_space(":")?;
+
+                self.commasep(Inconsistent, &a.clobbers, |s, co| {
+                    s.print_string(&co, ast::StrStyle::Cooked)?;
+                    Ok(())
+                })?;
+
+                let mut options = vec![];
+                if a.volatile {
+                    options.push("volatile");
+                }
+                if a.alignstack {
+                    options.push("alignstack");
+                }
+                if a.dialect == ast::AsmDialect::Intel {
+                    options.push("intel");
+                }
+
+                if !options.is_empty() {
+                    space(&mut self.s)?;
+                    self.word_space(":")?;
+                    self.commasep(Inconsistent, &options, |s, &co| {
+                        s.print_string(co, ast::StrStyle::Cooked)?;
+                        Ok(())
+                    })?;
+                }
+
+                self.pclose()?;
+            }
+        }
+        self.ann.post(self, NodeExpr(expr))?;
+        self.end()
+    }
+
+    pub fn print_local_decl(&mut self, loc: &hir::Local) -> io::Result<()> {
+        self.print_pat(&loc.pat)?;
+        if let Some(ref ty) = loc.ty {
+            self.word_space(":")?;
+            self.print_type(&ty)?;
+        }
+        Ok(())
+    }
+
+    pub fn print_decl(&mut self, decl: &hir::Decl) -> io::Result<()> {
+        self.maybe_print_comment(decl.span.lo)?;
+        match decl.node {
+            hir::DeclLocal(ref loc) => {
+                self.space_if_not_bol()?;
+                self.ibox(indent_unit)?;
+                self.word_nbsp("let")?;
+
+                self.ibox(indent_unit)?;
+                self.print_local_decl(&loc)?;
+                self.end()?;
+                if let Some(ref init) = loc.init {
+                    self.nbsp()?;
+                    self.word_space("=")?;
+                    self.print_expr(&init)?;
+                }
+                self.end()
+            }
+            hir::DeclItem(ref item) => {
+                self.print_item_id(item)
+            }
+        }
+    }
+
+    pub fn print_usize(&mut self, i: usize) -> io::Result<()> {
+        word(&mut self.s, &i.to_string())
+    }
+
+    pub fn print_name(&mut self, name: ast::Name) -> io::Result<()> {
+        word(&mut self.s, &name.as_str())?;
+        self.ann.post(self, NodeName(&name))
+    }
+
+    pub fn print_for_decl(&mut self, loc: &hir::Local, coll: &hir::Expr) -> io::Result<()> {
+        self.print_local_decl(loc)?;
+        space(&mut self.s)?;
+        self.word_space("in")?;
+        self.print_expr(coll)
+    }
+
+    fn print_path(&mut self,
+                  path: &hir::Path,
+                  colons_before_params: bool,
+                  depth: usize)
+                  -> io::Result<()> {
+        self.maybe_print_comment(path.span.lo)?;
+
+        let mut first = !path.global;
+        for segment in &path.segments[..path.segments.len() - depth] {
+            if first {
+                first = false
+            } else {
+                word(&mut self.s, "::")?
+            }
+
+            self.print_name(segment.identifier.name)?;
+
+            self.print_path_parameters(&segment.parameters, colons_before_params)?;
+        }
+
+        Ok(())
+    }
+
+    fn print_qpath(&mut self,
+                   path: &hir::Path,
+                   qself: &hir::QSelf,
+                   colons_before_params: bool)
+                   -> io::Result<()> {
+        word(&mut self.s, "<")?;
+        self.print_type(&qself.ty)?;
+        if qself.position > 0 {
+            space(&mut self.s)?;
+            self.word_space("as")?;
+            let depth = path.segments.len() - qself.position;
+            self.print_path(&path, false, depth)?;
+        }
+        word(&mut self.s, ">")?;
+        word(&mut self.s, "::")?;
+        let item_segment = path.segments.last().unwrap();
+        self.print_name(item_segment.identifier.name)?;
+        self.print_path_parameters(&item_segment.parameters, colons_before_params)
+    }
+
+    fn print_path_parameters(&mut self,
+                             parameters: &hir::PathParameters,
+                             colons_before_params: bool)
+                             -> io::Result<()> {
+        if parameters.is_empty() {
+            return Ok(());
+        }
+
+        if colons_before_params {
+            word(&mut self.s, "::")?
+        }
+
+        match *parameters {
+            hir::AngleBracketedParameters(ref data) => {
+                word(&mut self.s, "<")?;
+
+                let mut comma = false;
+                for lifetime in &data.lifetimes {
+                    if comma {
+                        self.word_space(",")?
+                    }
+                    self.print_lifetime(lifetime)?;
+                    comma = true;
+                }
+
+                if !data.types.is_empty() {
+                    if comma {
+                        self.word_space(",")?
+                    }
+                    self.commasep(Inconsistent, &data.types, |s, ty| s.print_type(&ty))?;
+                    comma = true;
+                }
+
+                for binding in data.bindings.iter() {
+                    if comma {
+                        self.word_space(",")?
+                    }
+                    self.print_name(binding.name)?;
+                    space(&mut self.s)?;
+                    self.word_space("=")?;
+                    self.print_type(&binding.ty)?;
+                    comma = true;
+                }
+
+                word(&mut self.s, ">")?
+            }
+
+            hir::ParenthesizedParameters(ref data) => {
+                word(&mut self.s, "(")?;
+                self.commasep(Inconsistent, &data.inputs, |s, ty| s.print_type(&ty))?;
+                word(&mut self.s, ")")?;
+
+                match data.output {
+                    None => {}
+                    Some(ref ty) => {
+                        self.space_if_not_bol()?;
+                        self.word_space("->")?;
+                        self.print_type(&ty)?;
+                    }
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    pub fn print_pat(&mut self, pat: &hir::Pat) -> io::Result<()> {
+        self.maybe_print_comment(pat.span.lo)?;
+        self.ann.pre(self, NodePat(pat))?;
+        // Pat isn't normalized, but the beauty of it
+        // is that it doesn't matter
+        match pat.node {
+            PatKind::Wild => word(&mut self.s, "_")?,
+            PatKind::Ident(binding_mode, ref path1, ref sub) => {
+                match binding_mode {
+                    hir::BindByRef(mutbl) => {
+                        self.word_nbsp("ref")?;
+                        self.print_mutability(mutbl)?;
+                    }
+                    hir::BindByValue(hir::MutImmutable) => {}
+                    hir::BindByValue(hir::MutMutable) => {
+                        self.word_nbsp("mut")?;
+                    }
+                }
+                self.print_name(path1.node.name)?;
+                match *sub {
+                    Some(ref p) => {
+                        word(&mut self.s, "@")?;
+                        self.print_pat(&p)?;
+                    }
+                    None => (),
+                }
+            }
+            PatKind::TupleStruct(ref path, ref args_) => {
+                self.print_path(path, true, 0)?;
+                match *args_ {
+                    None => word(&mut self.s, "(..)")?,
+                    Some(ref args) => {
+                        self.popen()?;
+                        self.commasep(Inconsistent, &args[..], |s, p| s.print_pat(&p))?;
+                        self.pclose()?;
+                    }
+                }
+            }
+            PatKind::Path(ref path) => {
+                self.print_path(path, true, 0)?;
+            }
+            PatKind::QPath(ref qself, ref path) => {
+                self.print_qpath(path, qself, false)?;
+            }
+            PatKind::Struct(ref path, ref fields, etc) => {
+                self.print_path(path, true, 0)?;
+                self.nbsp()?;
+                self.word_space("{")?;
+                self.commasep_cmnt(Consistent,
+                                   &fields[..],
+                                   |s, f| {
+                                       s.cbox(indent_unit)?;
+                                       if !f.node.is_shorthand {
+                                           s.print_name(f.node.name)?;
+                                           s.word_nbsp(":")?;
+                                       }
+                                       s.print_pat(&f.node.pat)?;
+                                       s.end()
+                                   },
+                                   |f| f.node.pat.span)?;
+                if etc {
+                    if !fields.is_empty() {
+                        self.word_space(",")?;
+                    }
+                    word(&mut self.s, "..")?;
+                }
+                space(&mut self.s)?;
+                word(&mut self.s, "}")?;
+            }
+            PatKind::Tup(ref elts) => {
+                self.popen()?;
+                self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&p))?;
+                if elts.len() == 1 {
+                    word(&mut self.s, ",")?;
+                }
+                self.pclose()?;
+            }
+            PatKind::Box(ref inner) => {
+                word(&mut self.s, "box ")?;
+                self.print_pat(&inner)?;
+            }
+            PatKind::Ref(ref inner, mutbl) => {
+                word(&mut self.s, "&")?;
+                if mutbl == hir::MutMutable {
+                    word(&mut self.s, "mut ")?;
+                }
+                self.print_pat(&inner)?;
+            }
+            PatKind::Lit(ref e) => self.print_expr(&e)?,
+            PatKind::Range(ref begin, ref end) => {
+                self.print_expr(&begin)?;
+                space(&mut self.s)?;
+                word(&mut self.s, "...")?;
+                self.print_expr(&end)?;
+            }
+            PatKind::Vec(ref before, ref slice, ref after) => {
+                word(&mut self.s, "[")?;
+                self.commasep(Inconsistent, &before[..], |s, p| s.print_pat(&p))?;
+                if let Some(ref p) = *slice {
+                    if !before.is_empty() {
+                        self.word_space(",")?;
+                    }
+                    if p.node != PatKind::Wild {
+                        self.print_pat(&p)?;
+                    }
+                    word(&mut self.s, "..")?;
+                    if !after.is_empty() {
+                        self.word_space(",")?;
+                    }
+                }
+                self.commasep(Inconsistent, &after[..], |s, p| s.print_pat(&p))?;
+                word(&mut self.s, "]")?;
+            }
+        }
+        self.ann.post(self, NodePat(pat))
+    }
+
+    fn print_arm(&mut self, arm: &hir::Arm) -> io::Result<()> {
+        // I have no idea why this check is necessary, but here it
+        // is :(
+        if arm.attrs.is_empty() {
+            space(&mut self.s)?;
+        }
+        self.cbox(indent_unit)?;
+        self.ibox(0)?;
+        self.print_outer_attributes(&arm.attrs)?;
+        let mut first = true;
+        for p in &arm.pats {
+            if first {
+                first = false;
+            } else {
+                space(&mut self.s)?;
+                self.word_space("|")?;
+            }
+            self.print_pat(&p)?;
+        }
+        space(&mut self.s)?;
+        if let Some(ref e) = arm.guard {
+            self.word_space("if")?;
+            self.print_expr(&e)?;
+            space(&mut self.s)?;
+        }
+        self.word_space("=>")?;
+
+        match arm.body.node {
+            hir::ExprBlock(ref blk) => {
+                // the block will close the pattern's ibox
+                self.print_block_unclosed_indent(&blk, indent_unit)?;
+
+                // If it is a user-provided unsafe block, print a comma after it
+                if let hir::UnsafeBlock(hir::UserProvided) = blk.rules {
+                    word(&mut self.s, ",")?;
+                }
+            }
+            _ => {
+                self.end()?; // close the ibox for the pattern
+                self.print_expr(&arm.body)?;
+                word(&mut self.s, ",")?;
+            }
+        }
+        self.end() // close enclosing cbox
+    }
+
+    // Returns whether it printed anything
+    fn print_explicit_self(&mut self,
+                           explicit_self: &hir::ExplicitSelf_,
+                           mutbl: hir::Mutability)
+                           -> io::Result<bool> {
+        self.print_mutability(mutbl)?;
+        match *explicit_self {
+            hir::SelfStatic => {
+                return Ok(false);
+            }
+            hir::SelfValue(_) => {
+                word(&mut self.s, "self")?;
+            }
+            hir::SelfRegion(ref lt, m, _) => {
+                word(&mut self.s, "&")?;
+                self.print_opt_lifetime(lt)?;
+                self.print_mutability(m)?;
+                word(&mut self.s, "self")?;
+            }
+            hir::SelfExplicit(ref typ, _) => {
+                word(&mut self.s, "self")?;
+                self.word_space(":")?;
+                self.print_type(&typ)?;
+            }
+        }
+        return Ok(true);
+    }
+
+    pub fn print_fn(&mut self,
+                    decl: &hir::FnDecl,
+                    unsafety: hir::Unsafety,
+                    constness: hir::Constness,
+                    abi: Abi,
+                    name: Option<ast::Name>,
+                    generics: &hir::Generics,
+                    opt_explicit_self: Option<&hir::ExplicitSelf_>,
+                    vis: &hir::Visibility)
+                    -> io::Result<()> {
+        self.print_fn_header_info(unsafety, constness, abi, vis)?;
+
+        if let Some(name) = name {
+            self.nbsp()?;
+            self.print_name(name)?;
+        }
+        self.print_generics(generics)?;
+        self.print_fn_args_and_ret(decl, opt_explicit_self)?;
+        self.print_where_clause(&generics.where_clause)
+    }
+
+    pub fn print_fn_args(&mut self,
+                         decl: &hir::FnDecl,
+                         opt_explicit_self: Option<&hir::ExplicitSelf_>,
+                         is_closure: bool)
+                         -> io::Result<()> {
+        // It is unfortunate to duplicate the commasep logic, but we want the
+        // self type and the args all in the same box.
+        self.rbox(0, Inconsistent)?;
+        let mut first = true;
+        if let Some(explicit_self) = opt_explicit_self {
+            let m = match explicit_self {
+                &hir::SelfStatic => hir::MutImmutable,
+                _ => match decl.inputs[0].pat.node {
+                    PatKind::Ident(hir::BindByValue(m), _, _) => m,
+                    _ => hir::MutImmutable,
+                },
+            };
+            first = !self.print_explicit_self(explicit_self, m)?;
+        }
+
+        // HACK(eddyb) ignore the separately printed self argument.
+        let args = if first {
+            &decl.inputs[..]
+        } else {
+            &decl.inputs[1..]
+        };
+
+        for arg in args {
+            if first {
+                first = false;
+            } else {
+                self.word_space(",")?;
+            }
+            self.print_arg(arg, is_closure)?;
+        }
+
+        self.end()
+    }
+
+    pub fn print_fn_args_and_ret(&mut self,
+                                 decl: &hir::FnDecl,
+                                 opt_explicit_self: Option<&hir::ExplicitSelf_>)
+                                 -> io::Result<()> {
+        self.popen()?;
+        self.print_fn_args(decl, opt_explicit_self, false)?;
+        if decl.variadic {
+            word(&mut self.s, ", ...")?;
+        }
+        self.pclose()?;
+
+        self.print_fn_output(decl)
+    }
+
+    pub fn print_fn_block_args(&mut self, decl: &hir::FnDecl) -> io::Result<()> {
+        word(&mut self.s, "|")?;
+        self.print_fn_args(decl, None, true)?;
+        word(&mut self.s, "|")?;
+
+        if let hir::DefaultReturn(..) = decl.output {
+            return Ok(());
+        }
+
+        self.space_if_not_bol()?;
+        self.word_space("->")?;
+        match decl.output {
+            hir::Return(ref ty) => {
+                self.print_type(&ty)?;
+                self.maybe_print_comment(ty.span.lo)
+            }
+            hir::DefaultReturn(..) => unreachable!(),
+            hir::NoReturn(span) => {
+                self.word_nbsp("!")?;
+                self.maybe_print_comment(span.lo)
+            }
+        }
+    }
+
+    pub fn print_capture_clause(&mut self, capture_clause: hir::CaptureClause) -> io::Result<()> {
+        match capture_clause {
+            hir::CaptureByValue => self.word_space("move"),
+            hir::CaptureByRef => Ok(()),
+        }
+    }
+
+    pub fn print_bounds(&mut self, prefix: &str, bounds: &[hir::TyParamBound]) -> io::Result<()> {
+        if !bounds.is_empty() {
+            word(&mut self.s, prefix)?;
+            let mut first = true;
+            for bound in bounds {
+                self.nbsp()?;
+                if first {
+                    first = false;
+                } else {
+                    self.word_space("+")?;
+                }
+
+                match *bound {
+                    TraitTyParamBound(ref tref, TraitBoundModifier::None) => {
+                        self.print_poly_trait_ref(tref)
+                    }
+                    TraitTyParamBound(ref tref, TraitBoundModifier::Maybe) => {
+                        word(&mut self.s, "?")?;
+                        self.print_poly_trait_ref(tref)
+                    }
+                    RegionTyParamBound(ref lt) => {
+                        self.print_lifetime(lt)
+                    }
+                }?
+            }
+            Ok(())
+        } else {
+            Ok(())
+        }
+    }
+
+    pub fn print_lifetime(&mut self, lifetime: &hir::Lifetime) -> io::Result<()> {
+        self.print_name(lifetime.name)
+    }
+
+    pub fn print_lifetime_def(&mut self, lifetime: &hir::LifetimeDef) -> io::Result<()> {
+        self.print_lifetime(&lifetime.lifetime)?;
+        let mut sep = ":";
+        for v in &lifetime.bounds {
+            word(&mut self.s, sep)?;
+            self.print_lifetime(v)?;
+            sep = "+";
+        }
+        Ok(())
+    }
+
+    pub fn print_generics(&mut self, generics: &hir::Generics) -> io::Result<()> {
+        let total = generics.lifetimes.len() + generics.ty_params.len();
+        if total == 0 {
+            return Ok(());
+        }
+
+        word(&mut self.s, "<")?;
+
+        let mut ints = Vec::new();
+        for i in 0..total {
+            ints.push(i);
+        }
+
+        self.commasep(Inconsistent, &ints[..], |s, &idx| {
+            if idx < generics.lifetimes.len() {
+                let lifetime = &generics.lifetimes[idx];
+                s.print_lifetime_def(lifetime)
+            } else {
+                let idx = idx - generics.lifetimes.len();
+                let param = &generics.ty_params[idx];
+                s.print_ty_param(param)
+            }
+        })?;
+
+        word(&mut self.s, ">")?;
+        Ok(())
+    }
+
+    pub fn print_ty_param(&mut self, param: &hir::TyParam) -> io::Result<()> {
+        self.print_name(param.name)?;
+        self.print_bounds(":", &param.bounds)?;
+        match param.default {
+            Some(ref default) => {
+                space(&mut self.s)?;
+                self.word_space("=")?;
+                self.print_type(&default)
+            }
+            _ => Ok(()),
+        }
+    }
+
+    pub fn print_where_clause(&mut self, where_clause: &hir::WhereClause) -> io::Result<()> {
+        if where_clause.predicates.is_empty() {
+            return Ok(());
+        }
+
+        space(&mut self.s)?;
+        self.word_space("where")?;
+
+        for (i, predicate) in where_clause.predicates.iter().enumerate() {
+            if i != 0 {
+                self.word_space(",")?;
+            }
+
+            match predicate {
+                &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ref bound_lifetimes,
+                                                                              ref bounded_ty,
+                                                                              ref bounds,
+                                                                              ..}) => {
+                    self.print_formal_lifetime_list(bound_lifetimes)?;
+                    self.print_type(&bounded_ty)?;
+                    self.print_bounds(":", bounds)?;
+                }
+                &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime,
+                                                                                ref bounds,
+                                                                                ..}) => {
+                    self.print_lifetime(lifetime)?;
+                    word(&mut self.s, ":")?;
+
+                    for (i, bound) in bounds.iter().enumerate() {
+                        self.print_lifetime(bound)?;
+
+                        if i != 0 {
+                            word(&mut self.s, ":")?;
+                        }
+                    }
+                }
+                &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{ref path, ref ty, ..}) => {
+                    self.print_path(path, false, 0)?;
+                    space(&mut self.s)?;
+                    self.word_space("=")?;
+                    self.print_type(&ty)?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    pub fn print_view_path(&mut self, vp: &hir::ViewPath) -> io::Result<()> {
+        match vp.node {
+            hir::ViewPathSimple(name, ref path) => {
+                self.print_path(path, false, 0)?;
+
+                if path.segments.last().unwrap().identifier.name != name {
+                    space(&mut self.s)?;
+                    self.word_space("as")?;
+                    self.print_name(name)?;
+                }
+
+                Ok(())
+            }
+
+            hir::ViewPathGlob(ref path) => {
+                self.print_path(path, false, 0)?;
+                word(&mut self.s, "::*")
+            }
+
+            hir::ViewPathList(ref path, ref segments) => {
+                if path.segments.is_empty() {
+                    word(&mut self.s, "{")?;
+                } else {
+                    self.print_path(path, false, 0)?;
+                    word(&mut self.s, "::{")?;
+                }
+                self.commasep(Inconsistent, &segments[..], |s, w| {
+                    match w.node {
+                        hir::PathListIdent { name, .. } => {
+                            s.print_name(name)
+                        }
+                        hir::PathListMod { .. } => {
+                            word(&mut s.s, "self")
+                        }
+                    }
+                })?;
+                word(&mut self.s, "}")
+            }
+        }
+    }
+
+    pub fn print_mutability(&mut self, mutbl: hir::Mutability) -> io::Result<()> {
+        match mutbl {
+            hir::MutMutable => self.word_nbsp("mut"),
+            hir::MutImmutable => Ok(()),
+        }
+    }
+
+    pub fn print_mt(&mut self, mt: &hir::MutTy) -> io::Result<()> {
+        self.print_mutability(mt.mutbl)?;
+        self.print_type(&mt.ty)
+    }
+
+    pub fn print_arg(&mut self, input: &hir::Arg, is_closure: bool) -> io::Result<()> {
+        self.ibox(indent_unit)?;
+        match input.ty.node {
+            hir::TyInfer if is_closure => self.print_pat(&input.pat)?,
+            _ => {
+                match input.pat.node {
+                    PatKind::Ident(_, ref path1, _) if
+                        path1.node.name ==
+                            parse::token::special_idents::invalid.name => {
+                        // Do nothing.
+                    }
+                    _ => {
+                        self.print_pat(&input.pat)?;
+                        word(&mut self.s, ":")?;
+                        space(&mut self.s)?;
+                    }
+                }
+                self.print_type(&input.ty)?;
+            }
+        }
+        self.end()
+    }
+
+    pub fn print_fn_output(&mut self, decl: &hir::FnDecl) -> io::Result<()> {
+        if let hir::DefaultReturn(..) = decl.output {
+            return Ok(());
+        }
+
+        self.space_if_not_bol()?;
+        self.ibox(indent_unit)?;
+        self.word_space("->")?;
+        match decl.output {
+            hir::NoReturn(_) => self.word_nbsp("!")?,
+            hir::DefaultReturn(..) => unreachable!(),
+            hir::Return(ref ty) => self.print_type(&ty)?,
+        }
+        self.end()?;
+
+        match decl.output {
+            hir::Return(ref output) => self.maybe_print_comment(output.span.lo),
+            _ => Ok(()),
+        }
+    }
+
+    pub fn print_ty_fn(&mut self,
+                       abi: Abi,
+                       unsafety: hir::Unsafety,
+                       decl: &hir::FnDecl,
+                       name: Option<ast::Name>,
+                       generics: &hir::Generics,
+                       opt_explicit_self: Option<&hir::ExplicitSelf_>)
+                       -> io::Result<()> {
+        self.ibox(indent_unit)?;
+        if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() {
+            word(&mut self.s, "for")?;
+            self.print_generics(generics)?;
+        }
+        let generics = hir::Generics {
+            lifetimes: hir::HirVec::new(),
+            ty_params: hir::HirVec::new(),
+            where_clause: hir::WhereClause {
+                id: ast::DUMMY_NODE_ID,
+                predicates: hir::HirVec::new(),
+            },
+        };
+        self.print_fn(decl,
+                      unsafety,
+                      hir::Constness::NotConst,
+                      abi,
+                      name,
+                      &generics,
+                      opt_explicit_self,
+                      &hir::Inherited)?;
+        self.end()
+    }
+
+    pub fn maybe_print_trailing_comment(&mut self,
+                                        span: codemap::Span,
+                                        next_pos: Option<BytePos>)
+                                        -> io::Result<()> {
+        let cm = match self.cm {
+            Some(cm) => cm,
+            _ => return Ok(()),
+        };
+        match self.next_comment() {
+            Some(ref cmnt) => {
+                if (*cmnt).style != comments::Trailing {
+                    return Ok(());
+                }
+                let span_line = cm.lookup_char_pos(span.hi);
+                let comment_line = cm.lookup_char_pos((*cmnt).pos);
+                let mut next = (*cmnt).pos + BytePos(1);
+                match next_pos {
+                    None => (),
+                    Some(p) => next = p,
+                }
+                if span.hi < (*cmnt).pos && (*cmnt).pos < next &&
+                   span_line.line == comment_line.line {
+                    self.print_comment(cmnt)?;
+                    self.cur_cmnt_and_lit.cur_cmnt += 1;
+                }
+            }
+            _ => (),
+        }
+        Ok(())
+    }
+
+    pub fn print_remaining_comments(&mut self) -> io::Result<()> {
+        // If there aren't any remaining comments, then we need to manually
+        // make sure there is a line break at the end.
+        if self.next_comment().is_none() {
+            hardbreak(&mut self.s)?;
+        }
+        loop {
+            match self.next_comment() {
+                Some(ref cmnt) => {
+                    self.print_comment(cmnt)?;
+                    self.cur_cmnt_and_lit.cur_cmnt += 1;
+                }
+                _ => break,
+            }
+        }
+        Ok(())
+    }
+
+    pub fn print_opt_abi_and_extern_if_nondefault(&mut self,
+                                                  opt_abi: Option<Abi>)
+                                                  -> io::Result<()> {
+        match opt_abi {
+            Some(Abi::Rust) => Ok(()),
+            Some(abi) => {
+                self.word_nbsp("extern")?;
+                self.word_nbsp(&abi.to_string())
+            }
+            None => Ok(()),
+        }
+    }
+
+    pub fn print_extern_opt_abi(&mut self, opt_abi: Option<Abi>) -> io::Result<()> {
+        match opt_abi {
+            Some(abi) => {
+                self.word_nbsp("extern")?;
+                self.word_nbsp(&abi.to_string())
+            }
+            None => Ok(()),
+        }
+    }
+
+    pub fn print_fn_header_info(&mut self,
+                                unsafety: hir::Unsafety,
+                                constness: hir::Constness,
+                                abi: Abi,
+                                vis: &hir::Visibility)
+                                -> io::Result<()> {
+        word(&mut self.s, &visibility_qualified(vis, ""))?;
+        self.print_unsafety(unsafety)?;
+
+        match constness {
+            hir::Constness::NotConst => {}
+            hir::Constness::Const => self.word_nbsp("const")?,
+        }
+
+        if abi != Abi::Rust {
+            self.word_nbsp("extern")?;
+            self.word_nbsp(&abi.to_string())?;
+        }
+
+        word(&mut self.s, "fn")
+    }
+
+    pub fn print_unsafety(&mut self, s: hir::Unsafety) -> io::Result<()> {
+        match s {
+            hir::Unsafety::Normal => Ok(()),
+            hir::Unsafety::Unsafe => self.word_nbsp("unsafe"),
+        }
+    }
+}
+
+// Dup'ed from parse::classify, but adapted for the HIR.
+/// Does this expression require a semicolon to be treated
+/// as a statement? The negation of this: 'can this expression
+/// be used as a statement without a semicolon' -- is used
+/// as an early-bail-out in the parser so that, for instance,
+///     if true {...} else {...}
+///      |x| 5
+/// isn't parsed as (if true {...} else {...} | x) | 5
+fn expr_requires_semi_to_be_stmt(e: &hir::Expr) -> bool {
+    match e.node {
+        hir::ExprIf(..) |
+        hir::ExprMatch(..) |
+        hir::ExprBlock(_) |
+        hir::ExprWhile(..) |
+        hir::ExprLoop(..) => false,
+        _ => true,
+    }
+}
+
+/// this statement requires a semicolon after it.
+/// note that in one case (stmt_semi), we've already
+/// seen the semicolon, and thus don't need another.
+fn stmt_ends_with_semi(stmt: &hir::Stmt_) -> bool {
+    match *stmt {
+        hir::StmtDecl(ref d, _) => {
+            match d.node {
+                hir::DeclLocal(_) => true,
+                hir::DeclItem(_) => false,
+            }
+        }
+        hir::StmtExpr(ref e, _) => {
+            expr_requires_semi_to_be_stmt(&e)
+        }
+        hir::StmtSemi(..) => {
+            false
+        }
+    }
+}
diff --git a/src/librustc/hir/svh.rs b/src/librustc/hir/svh.rs
new file mode 100644 (file)
index 0000000..08c3d70
--- /dev/null
@@ -0,0 +1,87 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Calculation and management of a Strict Version Hash for crates
+//!
+//! # Today's ABI problem
+//!
+//! In today's implementation of rustc, it is incredibly difficult to achieve
+//! forward binary compatibility without resorting to C-like interfaces. Within
+//! rust code itself, abi details such as symbol names suffer from a variety of
+//! unrelated factors to code changing such as the "def id drift" problem. This
+//! ends up yielding confusing error messages about metadata mismatches and
+//! such.
+//!
+//! The core of this problem is when an upstream dependency changes and
+//! downstream dependents are not recompiled. This causes compile errors because
+//! the upstream crate's metadata has changed but the downstream crates are
+//! still referencing the older crate's metadata.
+//!
+//! This problem exists for many reasons, the primary of which is that rust does
+//! not currently support forwards ABI compatibility (in place upgrades of a
+//! crate).
+//!
+//! # SVH and how it alleviates the problem
+//!
+//! With all of this knowledge on hand, this module contains the implementation
+//! of a notion of a "Strict Version Hash" for a crate. This is essentially a
+//! hash of all contents of a crate which can somehow be exposed to downstream
+//! crates.
+//!
+//! This hash is currently calculated by just hashing the AST, but this is
+//! obviously wrong (doc changes should not result in an incompatible ABI).
+//! Implementation-wise, this is required at this moment in time.
+//!
+//! By encoding this strict version hash into all crate's metadata, stale crates
+//! can be detected immediately and error'd about by rustc itself.
+//!
+//! # Relevant links
+//!
+//! Original issue: https://github.com/rust-lang/rust/issues/10207
+
+use std::fmt;
+
+#[derive(Clone, PartialEq, Debug)]
+pub struct Svh {
+    hash: String,
+}
+
+impl Svh {
+    /// Create a new `Svh` given the hash. If you actually want to
+    /// compute the SVH from some HIR, you want the `calculate_svh`
+    /// function found in `librustc_trans`.
+    pub fn new(hash: String) -> Svh {
+        assert!(hash.len() == 16);
+        Svh { hash: hash }
+    }
+
+    pub fn from_hash(hash: u64) -> Svh {
+        return Svh::new((0..64).step_by(4).map(|i| hex(hash >> i)).collect());
+
+        fn hex(b: u64) -> char {
+            let b = (b & 0xf) as u8;
+            let b = match b {
+                0 ... 9 => '0' as u8 + b,
+                _ => 'a' as u8 + b - 10,
+            };
+            b as char
+        }
+    }
+
+    pub fn as_str<'a>(&'a self) -> &'a str {
+        &self.hash
+    }
+}
+
+impl fmt::Display for Svh {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.pad(self.as_str())
+    }
+}
diff --git a/src/librustc/infer/README.md b/src/librustc/infer/README.md
new file mode 100644 (file)
index 0000000..c835189
--- /dev/null
@@ -0,0 +1,237 @@
+# Type inference engine
+
+This is loosely based on standard HM-type inference, but with an
+extension to try and accommodate subtyping.  There is nothing
+principled about this extension; it's sound---I hope!---but it's a
+heuristic, ultimately, and does not guarantee that it finds a valid
+typing even if one exists (in fact, there are known scenarios where it
+fails, some of which may eventually become problematic).
+
+## Key idea
+
+The main change is that each type variable T is associated with a
+lower-bound L and an upper-bound U.  L and U begin as bottom and top,
+respectively, but gradually narrow in response to new constraints
+being introduced.  When a variable is finally resolved to a concrete
+type, it can (theoretically) select any type that is a supertype of L
+and a subtype of U.
+
+There are several critical invariants which we maintain:
+
+- the upper-bound of a variable only becomes lower and the lower-bound
+  only becomes higher over time;
+- the lower-bound L is always a subtype of the upper bound U;
+- the lower-bound L and upper-bound U never refer to other type variables,
+  but only to types (though those types may contain type variables).
+
+> An aside: if the terms upper- and lower-bound confuse you, think of
+> "supertype" and "subtype".  The upper-bound is a "supertype"
+> (super=upper in Latin, or something like that anyway) and the lower-bound
+> is a "subtype" (sub=lower in Latin).  I find it helps to visualize
+> a simple class hierarchy, like Java minus interfaces and
+> primitive types.  The class Object is at the root (top) and other
+> types lie in between.  The bottom type is then the Null type.
+> So the tree looks like:
+>
+> ```text
+>         Object
+>         /    \
+>     String   Other
+>         \    /
+>         (null)
+> ```
+>
+> So the upper bound type is the "supertype" and the lower bound is the
+> "subtype" (also, super and sub mean upper and lower in Latin, or something
+> like that anyway).
+
+## Satisfying constraints
+
+At a primitive level, there is only one form of constraint that the
+inference understands: a subtype relation.  So the outside world can
+say "make type A a subtype of type B".  If there are variables
+involved, the inferencer will adjust their upper- and lower-bounds as
+needed to ensure that this relation is satisfied. (We also allow "make
+type A equal to type B", but this is translated into "A <: B" and "B
+<: A")
+
+As stated above, we always maintain the invariant that type bounds
+never refer to other variables.  This keeps the inference relatively
+simple, avoiding the scenario of having a kind of graph where we have
+to pump constraints along and reach a fixed point, but it does impose
+some heuristics in the case where the user is relating two type
+variables A <: B.
+
+Combining two variables such that variable A will forever be a subtype
+of variable B is the trickiest part of the algorithm because there is
+often no right choice---that is, the right choice will depend on
+future constraints which we do not yet know. The problem comes about
+because both A and B have bounds that can be adjusted in the future.
+Let's look at some of the cases that can come up.
+
+Imagine, to start, the best case, where both A and B have an upper and
+lower bound (that is, the bounds are not top nor bot respectively). In
+that case, if we're lucky, A.ub <: B.lb, and so we know that whatever
+A and B should become, they will forever have the desired subtyping
+relation.  We can just leave things as they are.
+
+### Option 1: Unify
+
+However, suppose that A.ub is *not* a subtype of B.lb.  In
+that case, we must make a decision.  One option is to unify A
+and B so that they are one variable whose bounds are:
+
+    UB = GLB(A.ub, B.ub)
+    LB = LUB(A.lb, B.lb)
+
+(Note that we will have to verify that LB <: UB; if it does not, the
+types are not intersecting and there is an error) In that case, A <: B
+holds trivially because A==B.  However, we have now lost some
+flexibility, because perhaps the user intended for A and B to end up
+as different types and not the same type.
+
+Pictorally, what this does is to take two distinct variables with
+(hopefully not completely) distinct type ranges and produce one with
+the intersection.
+
+```text
+                  B.ub                  B.ub
+                   /\                    /
+           A.ub   /  \           A.ub   /
+           /   \ /    \              \ /
+          /     X      \              UB
+         /     / \      \            / \
+        /     /   /      \          /   /
+        \     \  /       /          \  /
+         \      X       /             LB
+          \    / \     /             / \
+           \  /   \   /             /   \
+           A.lb    B.lb          A.lb    B.lb
+```
+
+
+### Option 2: Relate UB/LB
+
+Another option is to keep A and B as distinct variables but set their
+bounds in such a way that, whatever happens, we know that A <: B will hold.
+This can be achieved by ensuring that A.ub <: B.lb.  In practice there
+are two ways to do that, depicted pictorially here:
+
+```text
+    Before                Option #1            Option #2
+
+             B.ub                B.ub                B.ub
+              /\                 /  \                /  \
+      A.ub   /  \        A.ub   /(B')\       A.ub   /(B')\
+      /   \ /    \           \ /     /           \ /     /
+     /     X      \         __UB____/             UB    /
+    /     / \      \       /  |                   |    /
+   /     /   /      \     /   |                   |   /
+   \     \  /       /    /(A')|                   |  /
+    \      X       /    /     LB            ______LB/
+     \    / \     /    /     / \           / (A')/ \
+      \  /   \   /     \    /   \          \    /   \
+      A.lb    B.lb       A.lb    B.lb        A.lb    B.lb
+```
+
+In these diagrams, UB and LB are defined as before.  As you can see,
+the new ranges `A'` and `B'` are quite different from the range that
+would be produced by unifying the variables.
+
+### What we do now
+
+Our current technique is to *try* (transactionally) to relate the
+existing bounds of A and B, if there are any (i.e., if `UB(A) != top
+&& LB(B) != bot`).  If that succeeds, we're done.  If it fails, then
+we merge A and B into same variable.
+
+This is not clearly the correct course.  For example, if `UB(A) !=
+top` but `LB(B) == bot`, we could conceivably set `LB(B)` to `UB(A)`
+and leave the variables unmerged.  This is sometimes the better
+course, it depends on the program.
+
+The main case which fails today that I would like to support is:
+
+```text
+fn foo<T>(x: T, y: T) { ... }
+
+fn bar() {
+    let x: @mut int = @mut 3;
+    let y: @int = @3;
+    foo(x, y);
+}
+```
+
+In principle, the inferencer ought to find that the parameter `T` to
+`foo(x, y)` is `@const int`.  Today, however, it does not; this is
+because the type variable `T` is merged with the type variable for
+`X`, and thus inherits its UB/LB of `@mut int`.  This leaves no
+flexibility for `T` to later adjust to accommodate `@int`.
+
+### What to do when not all bounds are present
+
+In the prior discussion we assumed that A.ub was not top and B.lb was
+not bot.  Unfortunately this is rarely the case.  Often type variables
+have "lopsided" bounds.  For example, if a variable in the program has
+been initialized but has not been used, then its corresponding type
+variable will have a lower bound but no upper bound.  When that
+variable is then used, we would like to know its upper bound---but we
+don't have one!  In this case we'll do different things depending on
+how the variable is being used.
+
+## Transactional support
+
+Whenever we adjust merge variables or adjust their bounds, we always
+keep a record of the old value.  This allows the changes to be undone.
+
+## Regions
+
+I've only talked about type variables here, but region variables
+follow the same principle.  They have upper- and lower-bounds.  A
+region A is a subregion of a region B if A being valid implies that B
+is valid.  This basically corresponds to the block nesting structure:
+the regions for outer block scopes are superregions of those for inner
+block scopes.
+
+## Integral and floating-point type variables
+
+There is a third variety of type variable that we use only for
+inferring the types of unsuffixed integer literals.  Integral type
+variables differ from general-purpose type variables in that there's
+no subtyping relationship among the various integral types, so instead
+of associating each variable with an upper and lower bound, we just
+use simple unification.  Each integer variable is associated with at
+most one integer type.  Floating point types are handled similarly to
+integral types.
+
+## GLB/LUB
+
+Computing the greatest-lower-bound and least-upper-bound of two
+types/regions is generally straightforward except when type variables
+are involved. In that case, we follow a similar "try to use the bounds
+when possible but otherwise merge the variables" strategy.  In other
+words, `GLB(A, B)` where `A` and `B` are variables will often result
+in `A` and `B` being merged and the result being `A`.
+
+## Type coercion
+
+We have a notion of assignability which differs somewhat from
+subtyping; in particular it may cause region borrowing to occur.  See
+the big comment later in this file on Type Coercion for specifics.
+
+### In conclusion
+
+I showed you three ways to relate `A` and `B`.  There are also more,
+of course, though I'm not sure if there are any more sensible options.
+The main point is that there are various options, each of which
+produce a distinct range of types for `A` and `B`.  Depending on what
+the correct values for A and B are, one of these options will be the
+right choice: but of course we don't know the right values for A and B
+yet, that's what we're trying to find!  In our code, we opt to unify
+(Option #1).
+
+# Implementation details
+
+We make use of a trait-like implementation strategy to consolidate
+duplicated code between subtypes, GLB, and LUB computations.  See the
+section on "Type Combining" below for details.
diff --git a/src/librustc/infer/bivariate.rs b/src/librustc/infer/bivariate.rs
new file mode 100644 (file)
index 0000000..a9ea395
--- /dev/null
@@ -0,0 +1,117 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Applies the "bivariance relationship" to two types and/or regions.
+//! If (A,B) are bivariant then either A <: B or B <: A. It occurs
+//! when type/lifetime parameters are unconstrained. Usually this is
+//! an error, but we permit it in the specific case where a type
+//! parameter is constrained in a where-clause via an associated type.
+//!
+//! There are several ways one could implement bivariance. You could
+//! just do nothing at all, for example, or you could fully verify
+//! that one of the two subtyping relationships hold. We choose to
+//! thread a middle line: we relate types up to regions, but ignore
+//! all region relationships.
+//!
+//! At one point, handling bivariance in this fashion was necessary
+//! for inference, but I'm actually not sure if that is true anymore.
+//! In particular, it might be enough to say (A,B) are bivariant for
+//! all (A,B).
+
+use super::combine::{self, CombineFields};
+use super::type_variable::{BiTo};
+
+use ty::{self, Ty, TyCtxt};
+use ty::TyVar;
+use ty::relate::{Relate, RelateResult, TypeRelation};
+
+pub struct Bivariate<'a, 'tcx: 'a> {
+    fields: CombineFields<'a, 'tcx>
+}
+
+impl<'a, 'tcx> Bivariate<'a, 'tcx> {
+    pub fn new(fields: CombineFields<'a, 'tcx>) -> Bivariate<'a, 'tcx> {
+        Bivariate { fields: fields }
+    }
+}
+
+impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Bivariate<'a, 'tcx> {
+    fn tag(&self) -> &'static str { "Bivariate" }
+
+    fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.tcx() }
+
+    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
+
+    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
+                                               variance: ty::Variance,
+                                               a: &T,
+                                               b: &T)
+                                               -> RelateResult<'tcx, T>
+    {
+        match variance {
+            // If we have Foo<A> and Foo is invariant w/r/t A,
+            // and we want to assert that
+            //
+            //     Foo<A> <: Foo<B> ||
+            //     Foo<B> <: Foo<A>
+            //
+            // then still A must equal B.
+            ty::Invariant => self.relate(a, b),
+
+            ty::Covariant => self.relate(a, b),
+            ty::Bivariant => self.relate(a, b),
+            ty::Contravariant => self.relate(a, b),
+        }
+    }
+
+    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+        debug!("{}.tys({:?}, {:?})", self.tag(),
+               a, b);
+        if a == b { return Ok(a); }
+
+        let infcx = self.fields.infcx;
+        let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
+        let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
+        match (&a.sty, &b.sty) {
+            (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
+                infcx.type_variables.borrow_mut().relate_vars(a_id, BiTo, b_id);
+                Ok(a)
+            }
+
+            (&ty::TyInfer(TyVar(a_id)), _) => {
+                self.fields.instantiate(b, BiTo, a_id)?;
+                Ok(a)
+            }
+
+            (_, &ty::TyInfer(TyVar(b_id))) => {
+                self.fields.instantiate(a, BiTo, b_id)?;
+                Ok(a)
+            }
+
+            _ => {
+                combine::super_combine_tys(self.fields.infcx, self, a, b)
+            }
+        }
+    }
+
+    fn regions(&mut self, a: ty::Region, _: ty::Region) -> RelateResult<'tcx, ty::Region> {
+        Ok(a)
+    }
+
+    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
+                  -> RelateResult<'tcx, ty::Binder<T>>
+        where T: Relate<'a,'tcx>
+    {
+        let a1 = self.tcx().erase_late_bound_regions(a);
+        let b1 = self.tcx().erase_late_bound_regions(b);
+        let c = self.relate(&a1, &b1)?;
+        Ok(ty::Binder(c))
+    }
+}
diff --git a/src/librustc/infer/combine.rs b/src/librustc/infer/combine.rs
new file mode 100644 (file)
index 0000000..67669c5
--- /dev/null
@@ -0,0 +1,405 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+///////////////////////////////////////////////////////////////////////////
+// # Type combining
+//
+// There are four type combiners: equate, sub, lub, and glb.  Each
+// implements the trait `Combine` and contains methods for combining
+// two instances of various things and yielding a new instance.  These
+// combiner methods always yield a `Result<T>`.  There is a lot of
+// common code for these operations, implemented as default methods on
+// the `Combine` trait.
+//
+// Each operation may have side-effects on the inference context,
+// though these can be unrolled using snapshots. On success, the
+// LUB/GLB operations return the appropriate bound. The Eq and Sub
+// operations generally return the first operand.
+//
+// ## Contravariance
+//
+// When you are relating two things which have a contravariant
+// relationship, you should use `contratys()` or `contraregions()`,
+// rather than inversing the order of arguments!  This is necessary
+// because the order of arguments is not relevant for LUB and GLB.  It
+// is also useful to track which value is the "expected" value in
+// terms of error reporting.
+
+use super::bivariate::Bivariate;
+use super::equate::Equate;
+use super::glb::Glb;
+use super::lub::Lub;
+use super::sub::Sub;
+use super::InferCtxt;
+use super::{MiscVariable, TypeTrace};
+use super::type_variable::{RelationDir, BiTo, EqTo, SubtypeOf, SupertypeOf};
+
+use ty::{IntType, UintType};
+use ty::{self, Ty, TyCtxt};
+use ty::error::TypeError;
+use ty::fold::{TypeFolder, TypeFoldable};
+use ty::relate::{Relate, RelateResult, TypeRelation};
+use traits::PredicateObligations;
+
+use syntax::ast;
+use syntax::codemap::Span;
+
+#[derive(Clone)]
+pub struct CombineFields<'a, 'tcx: 'a> {
+    pub infcx: &'a InferCtxt<'a, 'tcx>,
+    pub a_is_expected: bool,
+    pub trace: TypeTrace<'tcx>,
+    pub cause: Option<ty::relate::Cause>,
+    pub obligations: PredicateObligations<'tcx>,
+}
+
+pub fn super_combine_tys<'a,'tcx:'a,R>(infcx: &InferCtxt<'a, 'tcx>,
+                                       relation: &mut R,
+                                       a: Ty<'tcx>,
+                                       b: Ty<'tcx>)
+                                       -> RelateResult<'tcx, Ty<'tcx>>
+    where R: TypeRelation<'a,'tcx>
+{
+    let a_is_expected = relation.a_is_expected();
+
+    match (&a.sty, &b.sty) {
+        // Relate integral variables to other types
+        (&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => {
+            infcx.int_unification_table
+                 .borrow_mut()
+                 .unify_var_var(a_id, b_id)
+                 .map_err(|e| int_unification_error(a_is_expected, e))?;
+            Ok(a)
+        }
+        (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => {
+            unify_integral_variable(infcx, a_is_expected, v_id, IntType(v))
+        }
+        (&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => {
+            unify_integral_variable(infcx, !a_is_expected, v_id, IntType(v))
+        }
+        (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => {
+            unify_integral_variable(infcx, a_is_expected, v_id, UintType(v))
+        }
+        (&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => {
+            unify_integral_variable(infcx, !a_is_expected, v_id, UintType(v))
+        }
+
+        // Relate floating-point variables to other types
+        (&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => {
+            infcx.float_unification_table
+                 .borrow_mut()
+                 .unify_var_var(a_id, b_id)
+                 .map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
+            Ok(a)
+        }
+        (&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => {
+            unify_float_variable(infcx, a_is_expected, v_id, v)
+        }
+        (&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => {
+            unify_float_variable(infcx, !a_is_expected, v_id, v)
+        }
+
+        // All other cases of inference are errors
+        (&ty::TyInfer(_), _) |
+        (_, &ty::TyInfer(_)) => {
+            Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b)))
+        }
+
+
+        _ => {
+            ty::relate::super_relate_tys(relation, a, b)
+        }
+    }
+}
+
+fn unify_integral_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
+                                    vid_is_expected: bool,
+                                    vid: ty::IntVid,
+                                    val: ty::IntVarValue)
+                                    -> RelateResult<'tcx, Ty<'tcx>>
+{
+    infcx.int_unification_table
+         .borrow_mut()
+         .unify_var_value(vid, val)
+         .map_err(|e| int_unification_error(vid_is_expected, e))?;
+    match val {
+        IntType(v) => Ok(infcx.tcx.mk_mach_int(v)),
+        UintType(v) => Ok(infcx.tcx.mk_mach_uint(v)),
+    }
+}
+
+fn unify_float_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
+                                 vid_is_expected: bool,
+                                 vid: ty::FloatVid,
+                                 val: ast::FloatTy)
+                                 -> RelateResult<'tcx, Ty<'tcx>>
+{
+    infcx.float_unification_table
+         .borrow_mut()
+         .unify_var_value(vid, val)
+         .map_err(|e| float_unification_error(vid_is_expected, e))?;
+    Ok(infcx.tcx.mk_mach_float(val))
+}
+
+impl<'a, 'tcx> CombineFields<'a, 'tcx> {
+    pub fn tcx(&self) -> &'a TyCtxt<'tcx> {
+        self.infcx.tcx
+    }
+
+    pub fn switch_expected(&self) -> CombineFields<'a, 'tcx> {
+        CombineFields {
+            a_is_expected: !self.a_is_expected,
+            ..(*self).clone()
+        }
+    }
+
+    pub fn equate(&self) -> Equate<'a, 'tcx> {
+        Equate::new(self.clone())
+    }
+
+    pub fn bivariate(&self) -> Bivariate<'a, 'tcx> {
+        Bivariate::new(self.clone())
+    }
+
+    pub fn sub(&self) -> Sub<'a, 'tcx> {
+        Sub::new(self.clone())
+    }
+
+    pub fn lub(&self) -> Lub<'a, 'tcx> {
+        Lub::new(self.clone())
+    }
+
+    pub fn glb(&self) -> Glb<'a, 'tcx> {
+        Glb::new(self.clone())
+    }
+
+    pub fn instantiate(&self,
+                       a_ty: Ty<'tcx>,
+                       dir: RelationDir,
+                       b_vid: ty::TyVid)
+                       -> RelateResult<'tcx, ()>
+    {
+        let mut stack = Vec::new();
+        stack.push((a_ty, dir, b_vid));
+        loop {
+            // For each turn of the loop, we extract a tuple
+            //
+            //     (a_ty, dir, b_vid)
+            //
+            // to relate. Here dir is either SubtypeOf or
+            // SupertypeOf. The idea is that we should ensure that
+            // the type `a_ty` is a subtype or supertype (respectively) of the
+            // type to which `b_vid` is bound.
+            //
+            // If `b_vid` has not yet been instantiated with a type
+            // (which is always true on the first iteration, but not
+            // necessarily true on later iterations), we will first
+            // instantiate `b_vid` with a *generalized* version of
+            // `a_ty`. Generalization introduces other inference
+            // variables wherever subtyping could occur (at time of
+            // this writing, this means replacing free regions with
+            // region variables).
+            let (a_ty, dir, b_vid) = match stack.pop() {
+                None => break,
+                Some(e) => e,
+            };
+            // Get the actual variable that b_vid has been inferred to
+            let (b_vid, b_ty) = {
+                let mut variables = self.infcx.type_variables.borrow_mut();
+                let b_vid = variables.root_var(b_vid);
+                (b_vid, variables.probe_root(b_vid))
+            };
+
+            debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})",
+                   a_ty,
+                   dir,
+                   b_vid);
+
+            // Check whether `vid` has been instantiated yet.  If not,
+            // make a generalized form of `ty` and instantiate with
+            // that.
+            let b_ty = match b_ty {
+                Some(t) => t, // ...already instantiated.
+                None => {     // ...not yet instantiated:
+                    // Generalize type if necessary.
+                    let generalized_ty = match dir {
+                        EqTo => self.generalize(a_ty, b_vid, false),
+                        BiTo | SupertypeOf | SubtypeOf => self.generalize(a_ty, b_vid, true),
+                    }?;
+                    debug!("instantiate(a_ty={:?}, dir={:?}, \
+                                        b_vid={:?}, generalized_ty={:?})",
+                           a_ty, dir, b_vid,
+                           generalized_ty);
+                    self.infcx.type_variables
+                        .borrow_mut()
+                        .instantiate_and_push(
+                            b_vid, generalized_ty, &mut stack);
+                    generalized_ty
+                }
+            };
+
+            // The original triple was `(a_ty, dir, b_vid)` -- now we have
+            // resolved `b_vid` to `b_ty`, so apply `(a_ty, dir, b_ty)`:
+            //
+            // FIXME(#16847): This code is non-ideal because all these subtype
+            // relations wind up attributed to the same spans. We need
+            // to associate causes/spans with each of the relations in
+            // the stack to get this right.
+            match dir {
+                BiTo => self.bivariate().relate(&a_ty, &b_ty),
+                EqTo => self.equate().relate(&a_ty, &b_ty),
+                SubtypeOf => self.sub().relate(&a_ty, &b_ty),
+                SupertypeOf => self.sub().relate_with_variance(ty::Contravariant, &a_ty, &b_ty),
+            }?;
+        }
+
+        Ok(())
+    }
+
+    /// Attempts to generalize `ty` for the type variable `for_vid`.  This checks for cycle -- that
+    /// is, whether the type `ty` references `for_vid`. If `make_region_vars` is true, it will also
+    /// replace all regions with fresh variables. Returns `TyError` in the case of a cycle, `Ok`
+    /// otherwise.
+    fn generalize(&self,
+                  ty: Ty<'tcx>,
+                  for_vid: ty::TyVid,
+                  make_region_vars: bool)
+                  -> RelateResult<'tcx, Ty<'tcx>>
+    {
+        let mut generalize = Generalizer {
+            infcx: self.infcx,
+            span: self.trace.origin.span(),
+            for_vid: for_vid,
+            make_region_vars: make_region_vars,
+            cycle_detected: false
+        };
+        let u = ty.fold_with(&mut generalize);
+        if generalize.cycle_detected {
+            Err(TypeError::CyclicTy)
+        } else {
+            Ok(u)
+        }
+    }
+}
+
+struct Generalizer<'cx, 'tcx:'cx> {
+    infcx: &'cx InferCtxt<'cx, 'tcx>,
+    span: Span,
+    for_vid: ty::TyVid,
+    make_region_vars: bool,
+    cycle_detected: bool,
+}
+
+impl<'cx, 'tcx> ty::fold::TypeFolder<'tcx> for Generalizer<'cx, 'tcx> {
+    fn tcx(&self) -> &TyCtxt<'tcx> {
+        self.infcx.tcx
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        // Check to see whether the type we are genealizing references
+        // `vid`. At the same time, also update any type variables to
+        // the values that they are bound to. This is needed to truly
+        // check for cycles, but also just makes things readable.
+        //
+        // (In particular, you could have something like `$0 = Box<$1>`
+        //  where `$1` has already been instantiated with `Box<$0>`)
+        match t.sty {
+            ty::TyInfer(ty::TyVar(vid)) => {
+                let mut variables = self.infcx.type_variables.borrow_mut();
+                let vid = variables.root_var(vid);
+                if vid == self.for_vid {
+                    self.cycle_detected = true;
+                    self.tcx().types.err
+                } else {
+                    match variables.probe_root(vid) {
+                        Some(u) => {
+                            drop(variables);
+                            self.fold_ty(u)
+                        }
+                        None => t,
+                    }
+                }
+            }
+            _ => {
+                t.super_fold_with(self)
+            }
+        }
+    }
+
+    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+        match r {
+            // Never make variables for regions bound within the type itself.
+            ty::ReLateBound(..) => { return r; }
+
+            // Early-bound regions should really have been substituted away before
+            // we get to this point.
+            ty::ReEarlyBound(..) => {
+                span_bug!(
+                    self.span,
+                    "Encountered early bound region when generalizing: {:?}",
+                    r);
+            }
+
+            // Always make a fresh region variable for skolemized regions;
+            // the higher-ranked decision procedures rely on this.
+            ty::ReSkolemized(..) => { }
+
+            // For anything else, we make a region variable, unless we
+            // are *equating*, in which case it's just wasteful.
+            ty::ReEmpty |
+            ty::ReStatic |
+            ty::ReScope(..) |
+            ty::ReVar(..) |
+            ty::ReFree(..) => {
+                if !self.make_region_vars {
+                    return r;
+                }
+            }
+        }
+
+        // FIXME: This is non-ideal because we don't give a
+        // very descriptive origin for this region variable.
+        self.infcx.next_region_var(MiscVariable(self.span))
+    }
+}
+
+pub trait RelateResultCompare<'tcx, T> {
+    fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
+        F: FnOnce() -> TypeError<'tcx>;
+}
+
+impl<'tcx, T:Clone + PartialEq> RelateResultCompare<'tcx, T> for RelateResult<'tcx, T> {
+    fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
+        F: FnOnce() -> TypeError<'tcx>,
+    {
+        self.clone().and_then(|s| {
+            if s == t {
+                self.clone()
+            } else {
+                Err(f())
+            }
+        })
+    }
+}
+
+fn int_unification_error<'tcx>(a_is_expected: bool, v: (ty::IntVarValue, ty::IntVarValue))
+                               -> TypeError<'tcx>
+{
+    let (a, b) = v;
+    TypeError::IntMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b))
+}
+
+fn float_unification_error<'tcx>(a_is_expected: bool,
+                                 v: (ast::FloatTy, ast::FloatTy))
+                                 -> TypeError<'tcx>
+{
+    let (a, b) = v;
+    TypeError::FloatMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b))
+}
diff --git a/src/librustc/infer/equate.rs b/src/librustc/infer/equate.rs
new file mode 100644 (file)
index 0000000..5540046
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::combine::{self, CombineFields};
+use super::higher_ranked::HigherRankedRelations;
+use super::{Subtype};
+use super::type_variable::{EqTo};
+
+use ty::{self, Ty, TyCtxt};
+use ty::TyVar;
+use ty::relate::{Relate, RelateResult, TypeRelation};
+use traits::PredicateObligations;
+
+/// Ensures `a` is made equal to `b`. Returns `a` on success.
+pub struct Equate<'a, 'tcx: 'a> {
+    fields: CombineFields<'a, 'tcx>
+}
+
+impl<'a, 'tcx> Equate<'a, 'tcx> {
+    pub fn new(fields: CombineFields<'a, 'tcx>) -> Equate<'a, 'tcx> {
+        Equate { fields: fields }
+    }
+
+    pub fn obligations(self) -> PredicateObligations<'tcx> {
+        self.fields.obligations
+    }
+}
+
+impl<'a, 'tcx> TypeRelation<'a,'tcx> for Equate<'a, 'tcx> {
+    fn tag(&self) -> &'static str { "Equate" }
+
+    fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.tcx() }
+
+    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
+
+    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
+                                               _: ty::Variance,
+                                               a: &T,
+                                               b: &T)
+                                               -> RelateResult<'tcx, T>
+    {
+        self.relate(a, b)
+    }
+
+    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+        debug!("{}.tys({:?}, {:?})", self.tag(),
+               a, b);
+        if a == b { return Ok(a); }
+
+        let infcx = self.fields.infcx;
+        let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
+        let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
+        match (&a.sty, &b.sty) {
+            (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
+                infcx.type_variables.borrow_mut().relate_vars(a_id, EqTo, b_id);
+                Ok(a)
+            }
+
+            (&ty::TyInfer(TyVar(a_id)), _) => {
+                self.fields.instantiate(b, EqTo, a_id)?;
+                Ok(a)
+            }
+
+            (_, &ty::TyInfer(TyVar(b_id))) => {
+                self.fields.instantiate(a, EqTo, b_id)?;
+                Ok(a)
+            }
+
+            _ => {
+                combine::super_combine_tys(self.fields.infcx, self, a, b)?;
+                Ok(a)
+            }
+        }
+    }
+
+    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+        debug!("{}.regions({:?}, {:?})",
+               self.tag(),
+               a,
+               b);
+        let origin = Subtype(self.fields.trace.clone());
+        self.fields.infcx.region_vars.make_eqregion(origin, a, b);
+        Ok(a)
+    }
+
+    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
+                  -> RelateResult<'tcx, ty::Binder<T>>
+        where T: Relate<'a, 'tcx>
+    {
+        self.fields.higher_ranked_sub(a, b)?;
+        self.fields.higher_ranked_sub(b, a)
+    }
+}
diff --git a/src/librustc/infer/error_reporting.rs b/src/librustc/infer/error_reporting.rs
new file mode 100644 (file)
index 0000000..a7553f4
--- /dev/null
@@ -0,0 +1,2046 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Error Reporting Code for the inference engine
+//!
+//! Because of the way inference, and in particular region inference,
+//! works, it often happens that errors are not detected until far after
+//! the relevant line of code has been type-checked. Therefore, there is
+//! an elaborate system to track why a particular constraint in the
+//! inference graph arose so that we can explain to the user what gave
+//! rise to a particular error.
+//!
+//! The basis of the system are the "origin" types. An "origin" is the
+//! reason that a constraint or inference variable arose. There are
+//! different "origin" enums for different kinds of constraints/variables
+//! (e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has
+//! a span, but also more information so that we can generate a meaningful
+//! error message.
+//!
+//! Having a catalogue of all the different reasons an error can arise is
+//! also useful for other reasons, like cross-referencing FAQs etc, though
+//! we are not really taking advantage of this yet.
+//!
+//! # Region Inference
+//!
+//! Region inference is particularly tricky because it always succeeds "in
+//! the moment" and simply registers a constraint. Then, at the end, we
+//! can compute the full graph and report errors, so we need to be able to
+//! store and later report what gave rise to the conflicting constraints.
+//!
+//! # Subtype Trace
+//!
+//! Determining whether `T1 <: T2` often involves a number of subtypes and
+//! subconstraints along the way. A "TypeTrace" is an extended version
+//! of an origin that traces the types and other values that were being
+//! compared. It is not necessarily comprehensive (in fact, at the time of
+//! this writing it only tracks the root values being compared) but I'd
+//! like to extend it to include significant "waypoints". For example, if
+//! you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2
+//! <: T4` fails, I'd like the trace to include enough information to say
+//! "in the 2nd element of the tuple". Similarly, failures when comparing
+//! arguments or return types in fn types should be able to cite the
+//! specific position, etc.
+//!
+//! # Reality vs plan
+//!
+//! Of course, there is still a LOT of code in typeck that has yet to be
+//! ported to this system, and which relies on string concatenation at the
+//! time of error detection.
+
+use self::FreshOrKept::*;
+
+use super::InferCtxt;
+use super::TypeTrace;
+use super::SubregionOrigin;
+use super::RegionVariableOrigin;
+use super::ValuePairs;
+use super::region_inference::RegionResolutionError;
+use super::region_inference::ConcreteFailure;
+use super::region_inference::SubSupConflict;
+use super::region_inference::GenericBoundFailure;
+use super::region_inference::GenericKind;
+use super::region_inference::ProcessedErrors;
+use super::region_inference::ProcessedErrorOrigin;
+use super::region_inference::SameRegions;
+
+use std::collections::HashSet;
+
+use hir::map as ast_map;
+use hir;
+use hir::print as pprust;
+
+use middle::cstore::CrateStore;
+use hir::def::Def;
+use hir::def_id::DefId;
+use infer::{self, TypeOrigin};
+use middle::region;
+use ty::subst;
+use ty::{self, Ty, TyCtxt, TypeFoldable};
+use ty::{Region, ReFree};
+use ty::error::TypeError;
+
+use std::cell::{Cell, RefCell};
+use std::char::from_u32;
+use std::fmt;
+use syntax::ast;
+use syntax::errors::DiagnosticBuilder;
+use syntax::codemap::{self, Pos, Span};
+use syntax::parse::token;
+use syntax::ptr::P;
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn note_and_explain_region(&self,
+                                   err: &mut DiagnosticBuilder,
+                                   prefix: &str,
+                                   region: ty::Region,
+                                   suffix: &str) {
+        fn item_scope_tag(item: &hir::Item) -> &'static str {
+            match item.node {
+                hir::ItemImpl(..) => "impl",
+                hir::ItemStruct(..) => "struct",
+                hir::ItemEnum(..) => "enum",
+                hir::ItemTrait(..) => "trait",
+                hir::ItemFn(..) => "function body",
+                _ => "item"
+            }
+        }
+
+        fn explain_span(tcx: &TyCtxt, heading: &str, span: Span)
+                        -> (String, Option<Span>) {
+            let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo);
+            (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize()),
+             Some(span))
+        }
+
+        let (description, span) = match region {
+            ty::ReScope(scope) => {
+                let new_string;
+                let unknown_scope = || {
+                    format!("{}unknown scope: {:?}{}.  Please report a bug.",
+                            prefix, scope, suffix)
+                };
+                let span = match scope.span(&self.region_maps, &self.map) {
+                    Some(s) => s,
+                    None => {
+                        err.note(&unknown_scope());
+                        return;
+                    }
+                };
+                let tag = match self.map.find(scope.node_id(&self.region_maps)) {
+                    Some(ast_map::NodeBlock(_)) => "block",
+                    Some(ast_map::NodeExpr(expr)) => match expr.node {
+                        hir::ExprCall(..) => "call",
+                        hir::ExprMethodCall(..) => "method call",
+                        hir::ExprMatch(_, _, hir::MatchSource::IfLetDesugar { .. }) => "if let",
+                        hir::ExprMatch(_, _, hir::MatchSource::WhileLetDesugar) =>  "while let",
+                        hir::ExprMatch(_, _, hir::MatchSource::ForLoopDesugar) =>  "for",
+                        hir::ExprMatch(..) => "match",
+                        _ => "expression",
+                    },
+                    Some(ast_map::NodeStmt(_)) => "statement",
+                    Some(ast_map::NodeItem(it)) => item_scope_tag(&it),
+                    Some(_) | None => {
+                        err.span_note(span, &unknown_scope());
+                        return;
+                    }
+                };
+                let scope_decorated_tag = match self.region_maps.code_extent_data(scope) {
+                    region::CodeExtentData::Misc(_) => tag,
+                    region::CodeExtentData::CallSiteScope { .. } => {
+                        "scope of call-site for function"
+                    }
+                    region::CodeExtentData::ParameterScope { .. } => {
+                        "scope of parameters for function"
+                    }
+                    region::CodeExtentData::DestructionScope(_) => {
+                        new_string = format!("destruction scope surrounding {}", tag);
+                        &new_string[..]
+                    }
+                    region::CodeExtentData::Remainder(r) => {
+                        new_string = format!("block suffix following statement {}",
+                                             r.first_statement_index);
+                        &new_string[..]
+                    }
+                };
+                explain_span(self, scope_decorated_tag, span)
+            }
+
+            ty::ReFree(ref fr) => {
+                let prefix = match fr.bound_region {
+                    ty::BrAnon(idx) => {
+                        format!("the anonymous lifetime #{} defined on", idx + 1)
+                    }
+                    ty::BrFresh(_) => "an anonymous lifetime defined on".to_owned(),
+                    _ => {
+                        format!("the lifetime {} as defined on",
+                                fr.bound_region)
+                    }
+                };
+
+                match self.map.find(fr.scope.node_id(&self.region_maps)) {
+                    Some(ast_map::NodeBlock(ref blk)) => {
+                        let (msg, opt_span) = explain_span(self, "block", blk.span);
+                        (format!("{} {}", prefix, msg), opt_span)
+                    }
+                    Some(ast_map::NodeItem(it)) => {
+                        let tag = item_scope_tag(&it);
+                        let (msg, opt_span) = explain_span(self, tag, it.span);
+                        (format!("{} {}", prefix, msg), opt_span)
+                    }
+                    Some(_) | None => {
+                        // this really should not happen, but it does:
+                        // FIXME(#27942)
+                        (format!("{} unknown free region bounded by scope {:?}",
+                                 prefix, fr.scope), None)
+                    }
+                }
+            }
+
+            ty::ReStatic => ("the static lifetime".to_owned(), None),
+
+            ty::ReEmpty => ("the empty lifetime".to_owned(), None),
+
+            ty::ReEarlyBound(ref data) => (data.name.to_string(), None),
+
+            // FIXME(#13998) ReSkolemized should probably print like
+            // ReFree rather than dumping Debug output on the user.
+            //
+            // We shouldn't really be having unification failures with ReVar
+            // and ReLateBound though.
+            ty::ReSkolemized(..) | ty::ReVar(_) | ty::ReLateBound(..) => {
+                (format!("lifetime {:?}", region), None)
+            }
+        };
+        let message = format!("{}{}{}", prefix, description, suffix);
+        if let Some(span) = span {
+            err.span_note(span, &message);
+        } else {
+            err.note(&message);
+        }
+    }
+}
+
+pub trait ErrorReporting<'tcx> {
+    fn report_region_errors(&self,
+                            errors: &Vec<RegionResolutionError<'tcx>>);
+
+    fn process_errors(&self, errors: &Vec<RegionResolutionError<'tcx>>)
+                      -> Option<Vec<RegionResolutionError<'tcx>>>;
+
+    fn report_type_error(&self,
+                         trace: TypeTrace<'tcx>,
+                         terr: &TypeError<'tcx>)
+                         -> DiagnosticBuilder<'tcx>;
+
+    fn check_and_note_conflicting_crates(&self,
+                                         err: &mut DiagnosticBuilder,
+                                         terr: &TypeError<'tcx>,
+                                         sp: Span);
+
+    fn report_and_explain_type_error(&self,
+                                     trace: TypeTrace<'tcx>,
+                                     terr: &TypeError<'tcx>)
+                                     -> DiagnosticBuilder<'tcx>;
+
+    fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<String>;
+
+    fn expected_found_str<T: fmt::Display + Resolvable<'tcx> + TypeFoldable<'tcx>>(
+        &self,
+        exp_found: &ty::error::ExpectedFound<T>)
+        -> Option<String>;
+
+    fn report_concrete_failure(&self,
+                               origin: SubregionOrigin<'tcx>,
+                               sub: Region,
+                               sup: Region)
+                                -> DiagnosticBuilder<'tcx>;
+
+    fn report_generic_bound_failure(&self,
+                                    origin: SubregionOrigin<'tcx>,
+                                    kind: GenericKind<'tcx>,
+                                    sub: Region);
+
+    fn report_sub_sup_conflict(&self,
+                               var_origin: RegionVariableOrigin,
+                               sub_origin: SubregionOrigin<'tcx>,
+                               sub_region: Region,
+                               sup_origin: SubregionOrigin<'tcx>,
+                               sup_region: Region);
+
+    fn report_processed_errors(&self,
+                               origins: &[ProcessedErrorOrigin<'tcx>],
+                               same_regions: &[SameRegions]);
+
+    fn give_suggestion(&self, err: &mut DiagnosticBuilder, same_regions: &[SameRegions]);
+}
+
+trait ErrorReportingHelpers<'tcx> {
+    fn report_inference_failure(&self,
+                                var_origin: RegionVariableOrigin)
+                                -> DiagnosticBuilder<'tcx>;
+
+    fn note_region_origin(&self,
+                          err: &mut DiagnosticBuilder,
+                          origin: &SubregionOrigin<'tcx>);
+
+    fn give_expl_lifetime_param(&self,
+                                err: &mut DiagnosticBuilder,
+                                decl: &hir::FnDecl,
+                                unsafety: hir::Unsafety,
+                                constness: hir::Constness,
+                                name: ast::Name,
+                                opt_explicit_self: Option<&hir::ExplicitSelf_>,
+                                generics: &hir::Generics,
+                                span: Span);
+}
+
+impl<'a, 'tcx> ErrorReporting<'tcx> for InferCtxt<'a, 'tcx> {
+    fn report_region_errors(&self,
+                            errors: &Vec<RegionResolutionError<'tcx>>) {
+        debug!("report_region_errors(): {} errors to start", errors.len());
+
+        // try to pre-process the errors, which will group some of them
+        // together into a `ProcessedErrors` group:
+        let processed_errors = self.process_errors(errors);
+        let errors = processed_errors.as_ref().unwrap_or(errors);
+
+        debug!("report_region_errors: {} errors after preprocessing", errors.len());
+
+        for error in errors {
+            match error.clone() {
+                ConcreteFailure(origin, sub, sup) => {
+                    self.report_concrete_failure(origin, sub, sup).emit();
+                }
+
+                GenericBoundFailure(kind, param_ty, sub) => {
+                    self.report_generic_bound_failure(kind, param_ty, sub);
+                }
+
+                SubSupConflict(var_origin,
+                               sub_origin, sub_r,
+                               sup_origin, sup_r) => {
+                    self.report_sub_sup_conflict(var_origin,
+                                                 sub_origin, sub_r,
+                                                 sup_origin, sup_r);
+                }
+
+                ProcessedErrors(ref origins,
+                                ref same_regions) => {
+                    if !same_regions.is_empty() {
+                        self.report_processed_errors(origins, same_regions);
+                    }
+                }
+            }
+        }
+    }
+
+    // This method goes through all the errors and try to group certain types
+    // of error together, for the purpose of suggesting explicit lifetime
+    // parameters to the user. This is done so that we can have a more
+    // complete view of what lifetimes should be the same.
+    // If the return value is an empty vector, it means that processing
+    // failed (so the return value of this method should not be used).
+    //
+    // The method also attempts to weed out messages that seem like
+    // duplicates that will be unhelpful to the end-user. But
+    // obviously it never weeds out ALL errors.
+    fn process_errors(&self, errors: &Vec<RegionResolutionError<'tcx>>)
+                      -> Option<Vec<RegionResolutionError<'tcx>>> {
+        debug!("process_errors()");
+        let mut origins = Vec::new();
+
+        // we collect up ConcreteFailures and SubSupConflicts that are
+        // relating free-regions bound on the fn-header and group them
+        // together into this vector
+        let mut same_regions = Vec::new();
+
+        // here we put errors that we will not be able to process nicely
+        let mut other_errors = Vec::new();
+
+        // we collect up GenericBoundFailures in here.
+        let mut bound_failures = Vec::new();
+
+        for error in errors {
+            match *error {
+                ConcreteFailure(ref origin, sub, sup) => {
+                    debug!("processing ConcreteFailure");
+                    match free_regions_from_same_fn(self.tcx, sub, sup) {
+                        Some(ref same_frs) => {
+                            origins.push(
+                                ProcessedErrorOrigin::ConcreteFailure(
+                                    origin.clone(),
+                                    sub,
+                                    sup));
+                            append_to_same_regions(&mut same_regions, same_frs);
+                        }
+                        _ => {
+                            other_errors.push(error.clone());
+                        }
+                    }
+                }
+                SubSupConflict(ref var_origin, _, sub_r, _, sup_r) => {
+                    debug!("processing SubSupConflict sub: {:?} sup: {:?}", sub_r, sup_r);
+                    match free_regions_from_same_fn(self.tcx, sub_r, sup_r) {
+                        Some(ref same_frs) => {
+                            origins.push(
+                                ProcessedErrorOrigin::VariableFailure(
+                                    var_origin.clone()));
+                            append_to_same_regions(&mut same_regions, same_frs);
+                        }
+                        None => {
+                            other_errors.push(error.clone());
+                        }
+                    }
+                }
+                GenericBoundFailure(ref origin, ref kind, region) => {
+                    bound_failures.push((origin.clone(), kind.clone(), region));
+                }
+                ProcessedErrors(..) => {
+                    bug!("should not encounter a `ProcessedErrors` yet: {:?}", error)
+                }
+            }
+        }
+
+        // ok, let's pull together the errors, sorted in an order that
+        // we think will help user the best
+        let mut processed_errors = vec![];
+
+        // first, put the processed errors, if any
+        if !same_regions.is_empty() {
+            let common_scope_id = same_regions[0].scope_id;
+            for sr in &same_regions {
+                // Since ProcessedErrors is used to reconstruct the function
+                // declaration, we want to make sure that they are, in fact,
+                // from the same scope
+                if sr.scope_id != common_scope_id {
+                    debug!("returning empty result from process_errors because
+                            {} != {}", sr.scope_id, common_scope_id);
+                    return None;
+                }
+            }
+            assert!(origins.len() > 0);
+            let pe = ProcessedErrors(origins, same_regions);
+            debug!("errors processed: {:?}", pe);
+            processed_errors.push(pe);
+        }
+
+        // next, put the other misc errors
+        processed_errors.extend(other_errors);
+
+        // finally, put the `T: 'a` errors, but only if there were no
+        // other errors. otherwise, these have a very high rate of
+        // being unhelpful in practice. This is because they are
+        // basically secondary checks that test the state of the
+        // region graph after the rest of inference is done, and the
+        // other kinds of errors indicate that the region constraint
+        // graph is internally inconsistent, so these test results are
+        // likely to be meaningless.
+        if processed_errors.is_empty() {
+            for (origin, kind, region) in bound_failures {
+                processed_errors.push(GenericBoundFailure(origin, kind, region));
+            }
+        }
+
+        // we should always wind up with SOME errors, unless there were no
+        // errors to start
+        assert!(if errors.len() > 0 {processed_errors.len() > 0} else {true});
+
+        return Some(processed_errors);
+
+        #[derive(Debug)]
+        struct FreeRegionsFromSameFn {
+            sub_fr: ty::FreeRegion,
+            sup_fr: ty::FreeRegion,
+            scope_id: ast::NodeId
+        }
+
+        impl FreeRegionsFromSameFn {
+            fn new(sub_fr: ty::FreeRegion,
+                   sup_fr: ty::FreeRegion,
+                   scope_id: ast::NodeId)
+                   -> FreeRegionsFromSameFn {
+                FreeRegionsFromSameFn {
+                    sub_fr: sub_fr,
+                    sup_fr: sup_fr,
+                    scope_id: scope_id
+                }
+            }
+        }
+
+        fn free_regions_from_same_fn(tcx: &TyCtxt,
+                                     sub: Region,
+                                     sup: Region)
+                                     -> Option<FreeRegionsFromSameFn> {
+            debug!("free_regions_from_same_fn(sub={:?}, sup={:?})", sub, sup);
+            let (scope_id, fr1, fr2) = match (sub, sup) {
+                (ReFree(fr1), ReFree(fr2)) => {
+                    if fr1.scope != fr2.scope {
+                        return None
+                    }
+                    assert!(fr1.scope == fr2.scope);
+                    (fr1.scope.node_id(&tcx.region_maps), fr1, fr2)
+                },
+                _ => return None
+            };
+            let parent = tcx.map.get_parent(scope_id);
+            let parent_node = tcx.map.find(parent);
+            match parent_node {
+                Some(node) => match node {
+                    ast_map::NodeItem(item) => match item.node {
+                        hir::ItemFn(..) => {
+                            Some(FreeRegionsFromSameFn::new(fr1, fr2, scope_id))
+                        },
+                        _ => None
+                    },
+                    ast_map::NodeImplItem(..) |
+                    ast_map::NodeTraitItem(..) => {
+                        Some(FreeRegionsFromSameFn::new(fr1, fr2, scope_id))
+                    },
+                    _ => None
+                },
+                None => {
+                    debug!("no parent node of scope_id {}", scope_id);
+                    None
+                }
+            }
+        }
+
+        fn append_to_same_regions(same_regions: &mut Vec<SameRegions>,
+                                  same_frs: &FreeRegionsFromSameFn) {
+            debug!("append_to_same_regions(same_regions={:?}, same_frs={:?})",
+                   same_regions, same_frs);
+            let scope_id = same_frs.scope_id;
+            let (sub_fr, sup_fr) = (same_frs.sub_fr, same_frs.sup_fr);
+            for sr in same_regions.iter_mut() {
+                if sr.contains(&sup_fr.bound_region) && scope_id == sr.scope_id {
+                    sr.push(sub_fr.bound_region);
+                    return
+                }
+            }
+            same_regions.push(SameRegions {
+                scope_id: scope_id,
+                regions: vec!(sub_fr.bound_region, sup_fr.bound_region)
+            })
+        }
+    }
+
+    fn report_type_error(&self,
+                         trace: TypeTrace<'tcx>,
+                         terr: &TypeError<'tcx>)
+                         -> DiagnosticBuilder<'tcx> {
+        let expected_found_str = match self.values_str(&trace.values) {
+            Some(v) => v,
+            None => {
+                return self.tcx.sess.diagnostic().struct_dummy(); /* derived error */
+            }
+        };
+
+        let is_simple_error = if let &TypeError::Sorts(ref values) = terr {
+            values.expected.is_primitive() && values.found.is_primitive()
+        } else {
+            false
+        };
+
+        let expected_found_str = if is_simple_error {
+            expected_found_str
+        } else {
+            format!("{} ({})", expected_found_str, terr)
+        };
+
+        let mut err = struct_span_err!(self.tcx.sess,
+                                       trace.origin.span(),
+                                       E0308,
+                                       "{}: {}",
+                                       trace.origin,
+                                       expected_found_str);
+
+        self.check_and_note_conflicting_crates(&mut err, terr, trace.origin.span());
+
+        match trace.origin {
+            TypeOrigin::MatchExpressionArm(_, arm_span, source) => match source {
+                hir::MatchSource::IfLetDesugar{..} => {
+                    err.span_note(arm_span, "`if let` arm with an incompatible type");
+                }
+                _ => {
+                    err.span_note(arm_span, "match arm with an incompatible type");
+                }
+            },
+            _ => ()
+        }
+        err
+    }
+
+    /// Adds a note if the types come from similarly named crates
+    fn check_and_note_conflicting_crates(&self,
+                                         err: &mut DiagnosticBuilder,
+                                         terr: &TypeError<'tcx>,
+                                         sp: Span) {
+        let report_path_match = |err: &mut DiagnosticBuilder, did1: DefId, did2: DefId| {
+            // Only external crates, if either is from a local
+            // module we could have false positives
+            if !(did1.is_local() || did2.is_local()) && did1.krate != did2.krate {
+                let exp_path = self.tcx.item_path_str(did1);
+                let found_path = self.tcx.item_path_str(did2);
+                // We compare strings because DefPath can be different
+                // for imported and non-imported crates
+                if exp_path == found_path {
+                    let crate_name = self.tcx.sess.cstore.crate_name(did1.krate);
+                    err.span_note(sp, &format!("Perhaps two different versions \
+                                                of crate `{}` are being used?",
+                                               crate_name));
+                }
+            }
+        };
+        match *terr {
+            TypeError::Sorts(ref exp_found) => {
+                // if they are both "path types", there's a chance of ambiguity
+                // due to different versions of the same crate
+                match (&exp_found.expected.sty, &exp_found.found.sty) {
+                    (&ty::TyEnum(ref exp_adt, _), &ty::TyEnum(ref found_adt, _)) |
+                    (&ty::TyStruct(ref exp_adt, _), &ty::TyStruct(ref found_adt, _)) |
+                    (&ty::TyEnum(ref exp_adt, _), &ty::TyStruct(ref found_adt, _)) |
+                    (&ty::TyStruct(ref exp_adt, _), &ty::TyEnum(ref found_adt, _)) => {
+                        report_path_match(err, exp_adt.did, found_adt.did);
+                    },
+                    _ => ()
+                }
+            },
+            TypeError::Traits(ref exp_found) => {
+                report_path_match(err, exp_found.expected, exp_found.found);
+            },
+            _ => () // FIXME(#22750) handle traits and stuff
+        }
+    }
+
+    fn report_and_explain_type_error(&self,
+                                     trace: TypeTrace<'tcx>,
+                                     terr: &TypeError<'tcx>)
+                                     -> DiagnosticBuilder<'tcx> {
+        let span = trace.origin.span();
+        let mut err = self.report_type_error(trace, terr);
+        self.tcx.note_and_explain_type_err(&mut err, terr, span);
+        err
+    }
+
+    /// Returns a string of the form "expected `{}`, found `{}`", or None if this is a derived
+    /// error.
+    fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<String> {
+        match *values {
+            infer::Types(ref exp_found) => self.expected_found_str(exp_found),
+            infer::TraitRefs(ref exp_found) => self.expected_found_str(exp_found),
+            infer::PolyTraitRefs(ref exp_found) => self.expected_found_str(exp_found)
+        }
+    }
+
+    fn expected_found_str<T: fmt::Display + Resolvable<'tcx> + TypeFoldable<'tcx>>(
+        &self,
+        exp_found: &ty::error::ExpectedFound<T>)
+        -> Option<String>
+    {
+        let expected = exp_found.expected.resolve(self);
+        if expected.references_error() {
+            return None;
+        }
+
+        let found = exp_found.found.resolve(self);
+        if found.references_error() {
+            return None;
+        }
+
+        Some(format!("expected `{}`, found `{}`",
+                     expected,
+                     found))
+    }
+
+    fn report_generic_bound_failure(&self,
+                                    origin: SubregionOrigin<'tcx>,
+                                    bound_kind: GenericKind<'tcx>,
+                                    sub: Region)
+    {
+        // FIXME: it would be better to report the first error message
+        // with the span of the parameter itself, rather than the span
+        // where the error was detected. But that span is not readily
+        // accessible.
+
+        let labeled_user_string = match bound_kind {
+            GenericKind::Param(ref p) =>
+                format!("the parameter type `{}`", p),
+            GenericKind::Projection(ref p) =>
+                format!("the associated type `{}`", p),
+        };
+
+        let mut err = match sub {
+            ty::ReFree(ty::FreeRegion {bound_region: ty::BrNamed(..), ..}) => {
+                // Does the required lifetime have a nice name we can print?
+                let mut err = struct_span_err!(self.tcx.sess,
+                                               origin.span(),
+                                               E0309,
+                                               "{} may not live long enough",
+                                               labeled_user_string);
+                err.fileline_help(origin.span(),
+                                  &format!("consider adding an explicit lifetime bound `{}: {}`...",
+                                           bound_kind,
+                                           sub));
+                err
+            }
+
+            ty::ReStatic => {
+                // Does the required lifetime have a nice name we can print?
+                let mut err = struct_span_err!(self.tcx.sess,
+                                               origin.span(),
+                                               E0310,
+                                               "{} may not live long enough",
+                                               labeled_user_string);
+                err.fileline_help(origin.span(),
+                                  &format!("consider adding an explicit lifetime \
+                                            bound `{}: 'static`...",
+                                           bound_kind));
+                err
+            }
+
+            _ => {
+                // If not, be less specific.
+                let mut err = struct_span_err!(self.tcx.sess,
+                                               origin.span(),
+                                               E0311,
+                                               "{} may not live long enough",
+                                               labeled_user_string);
+                err.fileline_help(origin.span(),
+                                  &format!("consider adding an explicit lifetime bound for `{}`",
+                                           bound_kind));
+                self.tcx.note_and_explain_region(
+                    &mut err,
+                    &format!("{} must be valid for ", labeled_user_string),
+                    sub,
+                    "...");
+                err
+            }
+        };
+
+        self.note_region_origin(&mut err, &origin);
+        err.emit();
+    }
+
+    fn report_concrete_failure(&self,
+                               origin: SubregionOrigin<'tcx>,
+                               sub: Region,
+                               sup: Region)
+                                -> DiagnosticBuilder<'tcx> {
+        match origin {
+            infer::Subtype(trace) => {
+                let terr = TypeError::RegionsDoesNotOutlive(sup, sub);
+                self.report_and_explain_type_error(trace, &terr)
+            }
+            infer::Reborrow(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0312,
+                    "lifetime of reference outlines \
+                     lifetime of borrowed content...");
+                self.tcx.note_and_explain_region(&mut err,
+                    "...the reference is valid for ",
+                    sub,
+                    "...");
+                self.tcx.note_and_explain_region(&mut err,
+                    "...but the borrowed content is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::ReborrowUpvar(span, ref upvar_id) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0313,
+                    "lifetime of borrowed pointer outlives \
+                            lifetime of captured variable `{}`...",
+                            self.tcx.local_var_name_str(upvar_id.var_id));
+                self.tcx.note_and_explain_region(&mut err,
+                    "...the borrowed pointer is valid for ",
+                    sub,
+                    "...");
+                self.tcx.note_and_explain_region(&mut err,
+                    &format!("...but `{}` is only valid for ",
+                             self.tcx.local_var_name_str(upvar_id.var_id)),
+                    sup,
+                    "");
+                err
+            }
+            infer::InfStackClosure(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0314,
+                    "closure outlives stack frame");
+                self.tcx.note_and_explain_region(&mut err,
+                    "...the closure must be valid for ",
+                    sub,
+                    "...");
+                self.tcx.note_and_explain_region(&mut err,
+                    "...but the closure's stack frame is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::InvokeClosure(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0315,
+                    "cannot invoke closure outside of its lifetime");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the closure is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::DerefPointer(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0473,
+                          "dereference of reference outside its lifetime");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the reference is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::FreeVariable(span, id) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0474,
+                          "captured variable `{}` does not outlive the enclosing closure",
+                          self.tcx.local_var_name_str(id));
+                self.tcx.note_and_explain_region(&mut err,
+                    "captured variable is valid for ",
+                    sup,
+                    "");
+                self.tcx.note_and_explain_region(&mut err,
+                    "closure is valid for ",
+                    sub,
+                    "");
+                err
+            }
+            infer::IndexSlice(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0475,
+                          "index of slice outside its lifetime");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the slice is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::RelateObjectBound(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0476,
+                          "lifetime of the source pointer does not outlive \
+                           lifetime bound of the object type");
+                self.tcx.note_and_explain_region(&mut err,
+                    "object type is valid for ",
+                    sub,
+                    "");
+                self.tcx.note_and_explain_region(&mut err,
+                    "source pointer is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::RelateParamBound(span, ty) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0477,
+                          "the type `{}` does not fulfill the required lifetime",
+                          self.ty_to_string(ty));
+                self.tcx.note_and_explain_region(&mut err,
+                                        "type must outlive ",
+                                        sub,
+                                        "");
+                err
+            }
+            infer::RelateRegionParamBound(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0478,
+                          "lifetime bound not satisfied");
+                self.tcx.note_and_explain_region(&mut err,
+                    "lifetime parameter instantiated with ",
+                    sup,
+                    "");
+                self.tcx.note_and_explain_region(&mut err,
+                    "but lifetime parameter must outlive ",
+                    sub,
+                    "");
+                err
+            }
+            infer::RelateDefaultParamBound(span, ty) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0479,
+                          "the type `{}` (provided as the value of \
+                           a type parameter) is not valid at this point",
+                          self.ty_to_string(ty));
+                self.tcx.note_and_explain_region(&mut err,
+                                        "type must outlive ",
+                                        sub,
+                                        "");
+                err
+            }
+            infer::CallRcvr(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0480,
+                          "lifetime of method receiver does not outlive \
+                           the method call");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the receiver is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::CallArg(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0481,
+                          "lifetime of function argument does not outlive \
+                           the function call");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the function argument is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::CallReturn(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0482,
+                          "lifetime of return value does not outlive \
+                           the function call");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the return value is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::Operand(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0483,
+                          "lifetime of operand does not outlive \
+                           the operation");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the operand is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::AddrOf(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0484,
+                          "reference is not valid at the time of borrow");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the borrow is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::AutoBorrow(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0485,
+                          "automatically reference is not valid \
+                           at the time of borrow");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the automatic borrow is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::ExprTypeIsNotInScope(t, span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0486,
+                          "type of expression contains references \
+                           that are not valid during the expression: `{}`",
+                          self.ty_to_string(t));
+                self.tcx.note_and_explain_region(&mut err,
+                    "type is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::SafeDestructor(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0487,
+                          "unsafe use of destructor: destructor might be called \
+                           while references are dead");
+                // FIXME (22171): terms "super/subregion" are suboptimal
+                self.tcx.note_and_explain_region(&mut err,
+                    "superregion: ",
+                    sup,
+                    "");
+                self.tcx.note_and_explain_region(&mut err,
+                    "subregion: ",
+                    sub,
+                    "");
+                err
+            }
+            infer::BindingTypeIsNotValidAtDecl(span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0488,
+                          "lifetime of variable does not enclose its declaration");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the variable is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+            infer::ParameterInScope(_, span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0489,
+                          "type/lifetime parameter not in scope here");
+                self.tcx.note_and_explain_region(&mut err,
+                    "the parameter is only valid for ",
+                    sub,
+                    "");
+                err
+            }
+            infer::DataBorrowed(ty, span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0490,
+                          "a value of type `{}` is borrowed for too long",
+                          self.ty_to_string(ty));
+                self.tcx.note_and_explain_region(&mut err, "the type is valid for ", sub, "");
+                self.tcx.note_and_explain_region(&mut err, "but the borrow lasts for ", sup, "");
+                err
+            }
+            infer::ReferenceOutlivesReferent(ty, span) => {
+                let mut err = struct_span_err!(self.tcx.sess, span, E0491,
+                          "in type `{}`, reference has a longer lifetime \
+                           than the data it references",
+                          self.ty_to_string(ty));
+                self.tcx.note_and_explain_region(&mut err,
+                    "the pointer is valid for ",
+                    sub,
+                    "");
+                self.tcx.note_and_explain_region(&mut err,
+                    "but the referenced data is only valid for ",
+                    sup,
+                    "");
+                err
+            }
+        }
+    }
+
+    fn report_sub_sup_conflict(&self,
+                               var_origin: RegionVariableOrigin,
+                               sub_origin: SubregionOrigin<'tcx>,
+                               sub_region: Region,
+                               sup_origin: SubregionOrigin<'tcx>,
+                               sup_region: Region) {
+        let mut err = self.report_inference_failure(var_origin);
+
+        self.tcx.note_and_explain_region(&mut err,
+            "first, the lifetime cannot outlive ",
+            sup_region,
+            "...");
+
+        self.note_region_origin(&mut err, &sup_origin);
+
+        self.tcx.note_and_explain_region(&mut err,
+            "but, the lifetime must be valid for ",
+            sub_region,
+            "...");
+
+        self.note_region_origin(&mut err, &sub_origin);
+        err.emit();
+    }
+
+    fn report_processed_errors(&self,
+                               origins: &[ProcessedErrorOrigin<'tcx>],
+                               same_regions: &[SameRegions]) {
+        for (i, origin) in origins.iter().enumerate() {
+            let mut err = match *origin {
+                ProcessedErrorOrigin::VariableFailure(ref var_origin) =>
+                    self.report_inference_failure(var_origin.clone()),
+                ProcessedErrorOrigin::ConcreteFailure(ref sr_origin, sub, sup) =>
+                    self.report_concrete_failure(sr_origin.clone(), sub, sup),
+            };
+
+            // attach the suggestion to the last such error
+            if i == origins.len() - 1 {
+                self.give_suggestion(&mut err, same_regions);
+            }
+
+            err.emit();
+        }
+    }
+
+    fn give_suggestion(&self, err: &mut DiagnosticBuilder, same_regions: &[SameRegions]) {
+        let scope_id = same_regions[0].scope_id;
+        let parent = self.tcx.map.get_parent(scope_id);
+        let parent_node = self.tcx.map.find(parent);
+        let taken = lifetimes_in_scope(self.tcx, scope_id);
+        let life_giver = LifeGiver::with_taken(&taken[..]);
+        let node_inner = match parent_node {
+            Some(ref node) => match *node {
+                ast_map::NodeItem(ref item) => {
+                    match item.node {
+                        hir::ItemFn(ref fn_decl, unsafety, constness, _, ref gen, _) => {
+                            Some((fn_decl, gen, unsafety, constness,
+                                  item.name, None, item.span))
+                        },
+                        _ => None
+                    }
+                }
+                ast_map::NodeImplItem(item) => {
+                    match item.node {
+                        hir::ImplItemKind::Method(ref sig, _) => {
+                            Some((&sig.decl,
+                                  &sig.generics,
+                                  sig.unsafety,
+                                  sig.constness,
+                                  item.name,
+                                  Some(&sig.explicit_self.node),
+                                  item.span))
+                        }
+                        _ => None,
+                    }
+                },
+                ast_map::NodeTraitItem(item) => {
+                    match item.node {
+                        hir::MethodTraitItem(ref sig, Some(_)) => {
+                            Some((&sig.decl,
+                                  &sig.generics,
+                                  sig.unsafety,
+                                  sig.constness,
+                                  item.name,
+                                  Some(&sig.explicit_self.node),
+                                  item.span))
+                        }
+                        _ => None
+                    }
+                }
+                _ => None
+            },
+            None => None
+        };
+        let (fn_decl, generics, unsafety, constness, name, expl_self, span)
+                                    = node_inner.expect("expect item fn");
+        let rebuilder = Rebuilder::new(self.tcx, fn_decl, expl_self,
+                                       generics, same_regions, &life_giver);
+        let (fn_decl, expl_self, generics) = rebuilder.rebuild();
+        self.give_expl_lifetime_param(err, &fn_decl, unsafety, constness, name,
+                                      expl_self.as_ref(), &generics, span);
+    }
+}
+
+struct RebuildPathInfo<'a> {
+    path: &'a hir::Path,
+    // indexes to insert lifetime on path.lifetimes
+    indexes: Vec<u32>,
+    // number of lifetimes we expect to see on the type referred by `path`
+    // (e.g., expected=1 for struct Foo<'a>)
+    expected: u32,
+    anon_nums: &'a HashSet<u32>,
+    region_names: &'a HashSet<ast::Name>
+}
+
+struct Rebuilder<'a, 'tcx: 'a> {
+    tcx: &'a TyCtxt<'tcx>,
+    fn_decl: &'a hir::FnDecl,
+    expl_self_opt: Option<&'a hir::ExplicitSelf_>,
+    generics: &'a hir::Generics,
+    same_regions: &'a [SameRegions],
+    life_giver: &'a LifeGiver,
+    cur_anon: Cell<u32>,
+    inserted_anons: RefCell<HashSet<u32>>,
+}
+
+enum FreshOrKept {
+    Fresh,
+    Kept
+}
+
+impl<'a, 'tcx> Rebuilder<'a, 'tcx> {
+    fn new(tcx: &'a TyCtxt<'tcx>,
+           fn_decl: &'a hir::FnDecl,
+           expl_self_opt: Option<&'a hir::ExplicitSelf_>,
+           generics: &'a hir::Generics,
+           same_regions: &'a [SameRegions],
+           life_giver: &'a LifeGiver)
+           -> Rebuilder<'a, 'tcx> {
+        Rebuilder {
+            tcx: tcx,
+            fn_decl: fn_decl,
+            expl_self_opt: expl_self_opt,
+            generics: generics,
+            same_regions: same_regions,
+            life_giver: life_giver,
+            cur_anon: Cell::new(0),
+            inserted_anons: RefCell::new(HashSet::new()),
+        }
+    }
+
+    fn rebuild(&self)
+               -> (hir::FnDecl, Option<hir::ExplicitSelf_>, hir::Generics) {
+        let mut expl_self_opt = self.expl_self_opt.cloned();
+        let mut inputs = self.fn_decl.inputs.clone();
+        let mut output = self.fn_decl.output.clone();
+        let mut ty_params = self.generics.ty_params.clone();
+        let where_clause = self.generics.where_clause.clone();
+        let mut kept_lifetimes = HashSet::new();
+        for sr in self.same_regions {
+            self.cur_anon.set(0);
+            self.offset_cur_anon();
+            let (anon_nums, region_names) =
+                                self.extract_anon_nums_and_names(sr);
+            let (lifetime, fresh_or_kept) = self.pick_lifetime(&region_names);
+            match fresh_or_kept {
+                Kept => { kept_lifetimes.insert(lifetime.name); }
+                _ => ()
+            }
+            expl_self_opt = self.rebuild_expl_self(expl_self_opt, lifetime,
+                                                   &anon_nums, &region_names);
+            inputs = self.rebuild_args_ty(&inputs[..], lifetime,
+                                          &anon_nums, &region_names);
+            output = self.rebuild_output(&output, lifetime, &anon_nums, &region_names);
+            ty_params = self.rebuild_ty_params(ty_params, lifetime,
+                                               &region_names);
+        }
+        let fresh_lifetimes = self.life_giver.get_generated_lifetimes();
+        let all_region_names = self.extract_all_region_names();
+        let generics = self.rebuild_generics(self.generics,
+                                             &fresh_lifetimes,
+                                             &kept_lifetimes,
+                                             &all_region_names,
+                                             ty_params,
+                                             where_clause);
+        let new_fn_decl = hir::FnDecl {
+            inputs: inputs,
+            output: output,
+            variadic: self.fn_decl.variadic
+        };
+        (new_fn_decl, expl_self_opt, generics)
+    }
+
+    fn pick_lifetime(&self,
+                     region_names: &HashSet<ast::Name>)
+                     -> (hir::Lifetime, FreshOrKept) {
+        if !region_names.is_empty() {
+            // It's not necessary to convert the set of region names to a
+            // vector of string and then sort them. However, it makes the
+            // choice of lifetime name deterministic and thus easier to test.
+            let mut names = Vec::new();
+            for rn in region_names {
+                let lt_name = rn.to_string();
+                names.push(lt_name);
+            }
+            names.sort();
+            let name = token::intern(&names[0]);
+            return (name_to_dummy_lifetime(name), Kept);
+        }
+        return (self.life_giver.give_lifetime(), Fresh);
+    }
+
+    fn extract_anon_nums_and_names(&self, same_regions: &SameRegions)
+                                   -> (HashSet<u32>, HashSet<ast::Name>) {
+        let mut anon_nums = HashSet::new();
+        let mut region_names = HashSet::new();
+        for br in &same_regions.regions {
+            match *br {
+                ty::BrAnon(i) => {
+                    anon_nums.insert(i);
+                }
+                ty::BrNamed(_, name) => {
+                    region_names.insert(name);
+                }
+                _ => ()
+            }
+        }
+        (anon_nums, region_names)
+    }
+
+    fn extract_all_region_names(&self) -> HashSet<ast::Name> {
+        let mut all_region_names = HashSet::new();
+        for sr in self.same_regions {
+            for br in &sr.regions {
+                match *br {
+                    ty::BrNamed(_, name) => {
+                        all_region_names.insert(name);
+                    }
+                    _ => ()
+                }
+            }
+        }
+        all_region_names
+    }
+
+    fn inc_cur_anon(&self, n: u32) {
+        let anon = self.cur_anon.get();
+        self.cur_anon.set(anon+n);
+    }
+
+    fn offset_cur_anon(&self) {
+        let mut anon = self.cur_anon.get();
+        while self.inserted_anons.borrow().contains(&anon) {
+            anon += 1;
+        }
+        self.cur_anon.set(anon);
+    }
+
+    fn inc_and_offset_cur_anon(&self, n: u32) {
+        self.inc_cur_anon(n);
+        self.offset_cur_anon();
+    }
+
+    fn track_anon(&self, anon: u32) {
+        self.inserted_anons.borrow_mut().insert(anon);
+    }
+
+    fn rebuild_ty_params(&self,
+                         ty_params: hir::HirVec<hir::TyParam>,
+                         lifetime: hir::Lifetime,
+                         region_names: &HashSet<ast::Name>)
+                         -> hir::HirVec<hir::TyParam> {
+        ty_params.iter().map(|ty_param| {
+            let bounds = self.rebuild_ty_param_bounds(ty_param.bounds.clone(),
+                                                      lifetime,
+                                                      region_names);
+            hir::TyParam {
+                name: ty_param.name,
+                id: ty_param.id,
+                bounds: bounds,
+                default: ty_param.default.clone(),
+                span: ty_param.span,
+            }
+        }).collect()
+    }
+
+    fn rebuild_ty_param_bounds(&self,
+                               ty_param_bounds: hir::TyParamBounds,
+                               lifetime: hir::Lifetime,
+                               region_names: &HashSet<ast::Name>)
+                               -> hir::TyParamBounds {
+        ty_param_bounds.iter().map(|tpb| {
+            match tpb {
+                &hir::RegionTyParamBound(lt) => {
+                    // FIXME -- it's unclear whether I'm supposed to
+                    // substitute lifetime here. I suspect we need to
+                    // be passing down a map.
+                    hir::RegionTyParamBound(lt)
+                }
+                &hir::TraitTyParamBound(ref poly_tr, modifier) => {
+                    let tr = &poly_tr.trait_ref;
+                    let last_seg = tr.path.segments.last().unwrap();
+                    let mut insert = Vec::new();
+                    let lifetimes = last_seg.parameters.lifetimes();
+                    for (i, lt) in lifetimes.iter().enumerate() {
+                        if region_names.contains(&lt.name) {
+                            insert.push(i as u32);
+                        }
+                    }
+                    let rebuild_info = RebuildPathInfo {
+                        path: &tr.path,
+                        indexes: insert,
+                        expected: lifetimes.len() as u32,
+                        anon_nums: &HashSet::new(),
+                        region_names: region_names
+                    };
+                    let new_path = self.rebuild_path(rebuild_info, lifetime);
+                    hir::TraitTyParamBound(hir::PolyTraitRef {
+                        bound_lifetimes: poly_tr.bound_lifetimes.clone(),
+                        trait_ref: hir::TraitRef {
+                            path: new_path,
+                            ref_id: tr.ref_id,
+                        },
+                        span: poly_tr.span,
+                    }, modifier)
+                }
+            }
+        }).collect()
+    }
+
+    fn rebuild_expl_self(&self,
+                         expl_self_opt: Option<hir::ExplicitSelf_>,
+                         lifetime: hir::Lifetime,
+                         anon_nums: &HashSet<u32>,
+                         region_names: &HashSet<ast::Name>)
+                         -> Option<hir::ExplicitSelf_> {
+        match expl_self_opt {
+            Some(ref expl_self) => match *expl_self {
+                hir::SelfRegion(lt_opt, muta, id) => match lt_opt {
+                    Some(lt) => if region_names.contains(&lt.name) {
+                        return Some(hir::SelfRegion(Some(lifetime), muta, id));
+                    },
+                    None => {
+                        let anon = self.cur_anon.get();
+                        self.inc_and_offset_cur_anon(1);
+                        if anon_nums.contains(&anon) {
+                            self.track_anon(anon);
+                            return Some(hir::SelfRegion(Some(lifetime), muta, id));
+                        }
+                    }
+                },
+                _ => ()
+            },
+            None => ()
+        }
+        expl_self_opt
+    }
+
+    fn rebuild_generics(&self,
+                        generics: &hir::Generics,
+                        add: &Vec<hir::Lifetime>,
+                        keep: &HashSet<ast::Name>,
+                        remove: &HashSet<ast::Name>,
+                        ty_params: hir::HirVec<hir::TyParam>,
+                        where_clause: hir::WhereClause)
+                        -> hir::Generics {
+        let mut lifetimes = Vec::new();
+        for lt in add {
+            lifetimes.push(hir::LifetimeDef { lifetime: *lt,
+                                              bounds: hir::HirVec::new() });
+        }
+        for lt in &generics.lifetimes {
+            if keep.contains(&lt.lifetime.name) ||
+                !remove.contains(&lt.lifetime.name) {
+                lifetimes.push((*lt).clone());
+            }
+        }
+        hir::Generics {
+            lifetimes: lifetimes.into(),
+            ty_params: ty_params,
+            where_clause: where_clause,
+        }
+    }
+
+    fn rebuild_args_ty(&self,
+                       inputs: &[hir::Arg],
+                       lifetime: hir::Lifetime,
+                       anon_nums: &HashSet<u32>,
+                       region_names: &HashSet<ast::Name>)
+                       -> hir::HirVec<hir::Arg> {
+        let mut new_inputs = Vec::new();
+        for arg in inputs {
+            let new_ty = self.rebuild_arg_ty_or_output(&arg.ty, lifetime,
+                                                       anon_nums, region_names);
+            let possibly_new_arg = hir::Arg {
+                ty: new_ty,
+                pat: arg.pat.clone(),
+                id: arg.id
+            };
+            new_inputs.push(possibly_new_arg);
+        }
+        new_inputs.into()
+    }
+
+    fn rebuild_output(&self, ty: &hir::FunctionRetTy,
+                      lifetime: hir::Lifetime,
+                      anon_nums: &HashSet<u32>,
+                      region_names: &HashSet<ast::Name>) -> hir::FunctionRetTy {
+        match *ty {
+            hir::Return(ref ret_ty) => hir::Return(
+                self.rebuild_arg_ty_or_output(&ret_ty, lifetime, anon_nums, region_names)
+            ),
+            hir::DefaultReturn(span) => hir::DefaultReturn(span),
+            hir::NoReturn(span) => hir::NoReturn(span)
+        }
+    }
+
+    fn rebuild_arg_ty_or_output(&self,
+                                ty: &hir::Ty,
+                                lifetime: hir::Lifetime,
+                                anon_nums: &HashSet<u32>,
+                                region_names: &HashSet<ast::Name>)
+                                -> P<hir::Ty> {
+        let mut new_ty = P(ty.clone());
+        let mut ty_queue = vec!(ty);
+        while !ty_queue.is_empty() {
+            let cur_ty = ty_queue.remove(0);
+            match cur_ty.node {
+                hir::TyRptr(lt_opt, ref mut_ty) => {
+                    let rebuild = match lt_opt {
+                        Some(lt) => region_names.contains(&lt.name),
+                        None => {
+                            let anon = self.cur_anon.get();
+                            let rebuild = anon_nums.contains(&anon);
+                            if rebuild {
+                                self.track_anon(anon);
+                            }
+                            self.inc_and_offset_cur_anon(1);
+                            rebuild
+                        }
+                    };
+                    if rebuild {
+                        let to = hir::Ty {
+                            id: cur_ty.id,
+                            node: hir::TyRptr(Some(lifetime), mut_ty.clone()),
+                            span: cur_ty.span
+                        };
+                        new_ty = self.rebuild_ty(new_ty, P(to));
+                    }
+                    ty_queue.push(&mut_ty.ty);
+                }
+                hir::TyPath(ref maybe_qself, ref path) => {
+                    let a_def = match self.tcx.def_map.borrow().get(&cur_ty.id) {
+                        None => {
+                            self.tcx
+                                .sess
+                                .fatal(&format!(
+                                        "unbound path {}",
+                                        pprust::path_to_string(path)))
+                        }
+                        Some(d) => d.full_def()
+                    };
+                    match a_def {
+                        Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) => {
+                            let generics = self.tcx.lookup_item_type(did).generics;
+
+                            let expected =
+                                generics.regions.len(subst::TypeSpace) as u32;
+                            let lifetimes =
+                                path.segments.last().unwrap().parameters.lifetimes();
+                            let mut insert = Vec::new();
+                            if lifetimes.is_empty() {
+                                let anon = self.cur_anon.get();
+                                for (i, a) in (anon..anon+expected).enumerate() {
+                                    if anon_nums.contains(&a) {
+                                        insert.push(i as u32);
+                                    }
+                                    self.track_anon(a);
+                                }
+                                self.inc_and_offset_cur_anon(expected);
+                            } else {
+                                for (i, lt) in lifetimes.iter().enumerate() {
+                                    if region_names.contains(&lt.name) {
+                                        insert.push(i as u32);
+                                    }
+                                }
+                            }
+                            let rebuild_info = RebuildPathInfo {
+                                path: path,
+                                indexes: insert,
+                                expected: expected,
+                                anon_nums: anon_nums,
+                                region_names: region_names
+                            };
+                            let new_path = self.rebuild_path(rebuild_info, lifetime);
+                            let qself = maybe_qself.as_ref().map(|qself| {
+                                hir::QSelf {
+                                    ty: self.rebuild_arg_ty_or_output(&qself.ty, lifetime,
+                                                                      anon_nums, region_names),
+                                    position: qself.position
+                                }
+                            });
+                            let to = hir::Ty {
+                                id: cur_ty.id,
+                                node: hir::TyPath(qself, new_path),
+                                span: cur_ty.span
+                            };
+                            new_ty = self.rebuild_ty(new_ty, P(to));
+                        }
+                        _ => ()
+                    }
+                }
+
+                hir::TyPtr(ref mut_ty) => {
+                    ty_queue.push(&mut_ty.ty);
+                }
+                hir::TyVec(ref ty) |
+                hir::TyFixedLengthVec(ref ty, _) => {
+                    ty_queue.push(&ty);
+                }
+                hir::TyTup(ref tys) => ty_queue.extend(tys.iter().map(|ty| &**ty)),
+                _ => {}
+            }
+        }
+        new_ty
+    }
+
+    fn rebuild_ty(&self,
+                  from: P<hir::Ty>,
+                  to: P<hir::Ty>)
+                  -> P<hir::Ty> {
+
+        fn build_to(from: P<hir::Ty>,
+                    to: &mut Option<P<hir::Ty>>)
+                    -> P<hir::Ty> {
+            if Some(from.id) == to.as_ref().map(|ty| ty.id) {
+                return to.take().expect("`to` type found more than once during rebuild");
+            }
+            from.map(|hir::Ty {id, node, span}| {
+                let new_node = match node {
+                    hir::TyRptr(lifetime, mut_ty) => {
+                        hir::TyRptr(lifetime, hir::MutTy {
+                            mutbl: mut_ty.mutbl,
+                            ty: build_to(mut_ty.ty, to),
+                        })
+                    }
+                    hir::TyPtr(mut_ty) => {
+                        hir::TyPtr(hir::MutTy {
+                            mutbl: mut_ty.mutbl,
+                            ty: build_to(mut_ty.ty, to),
+                        })
+                    }
+                    hir::TyVec(ty) => hir::TyVec(build_to(ty, to)),
+                    hir::TyFixedLengthVec(ty, e) => {
+                        hir::TyFixedLengthVec(build_to(ty, to), e)
+                    }
+                    hir::TyTup(tys) => {
+                        hir::TyTup(tys.into_iter().map(|ty| build_to(ty, to)).collect())
+                    }
+                    other => other
+                };
+                hir::Ty { id: id, node: new_node, span: span }
+            })
+        }
+
+        build_to(from, &mut Some(to))
+    }
+
+    fn rebuild_path(&self,
+                    rebuild_info: RebuildPathInfo,
+                    lifetime: hir::Lifetime)
+                    -> hir::Path
+    {
+        let RebuildPathInfo {
+            path,
+            indexes,
+            expected,
+            anon_nums,
+            region_names,
+        } = rebuild_info;
+
+        let last_seg = path.segments.last().unwrap();
+        let new_parameters = match last_seg.parameters {
+            hir::ParenthesizedParameters(..) => {
+                last_seg.parameters.clone()
+            }
+
+            hir::AngleBracketedParameters(ref data) => {
+                let mut new_lts = Vec::new();
+                if data.lifetimes.is_empty() {
+                    // traverse once to see if there's a need to insert lifetime
+                    let need_insert = (0..expected).any(|i| {
+                        indexes.contains(&i)
+                    });
+                    if need_insert {
+                        for i in 0..expected {
+                            if indexes.contains(&i) {
+                                new_lts.push(lifetime);
+                            } else {
+                                new_lts.push(self.life_giver.give_lifetime());
+                            }
+                        }
+                    }
+                } else {
+                    for (i, lt) in data.lifetimes.iter().enumerate() {
+                        if indexes.contains(&(i as u32)) {
+                            new_lts.push(lifetime);
+                        } else {
+                            new_lts.push(*lt);
+                        }
+                    }
+                }
+                let new_types = data.types.iter().map(|t| {
+                    self.rebuild_arg_ty_or_output(&t, lifetime, anon_nums, region_names)
+                }).collect();
+                let new_bindings = data.bindings.iter().map(|b| {
+                    hir::TypeBinding {
+                        id: b.id,
+                        name: b.name,
+                        ty: self.rebuild_arg_ty_or_output(&b.ty,
+                                                          lifetime,
+                                                          anon_nums,
+                                                          region_names),
+                        span: b.span
+                    }
+                }).collect();
+                hir::AngleBracketedParameters(hir::AngleBracketedParameterData {
+                    lifetimes: new_lts.into(),
+                    types: new_types,
+                    bindings: new_bindings,
+               })
+            }
+        };
+        let new_seg = hir::PathSegment {
+            identifier: last_seg.identifier,
+            parameters: new_parameters
+        };
+        let mut new_segs = Vec::new();
+        new_segs.extend_from_slice(path.segments.split_last().unwrap().1);
+        new_segs.push(new_seg);
+        hir::Path {
+            span: path.span,
+            global: path.global,
+            segments: new_segs.into()
+        }
+    }
+}
+
+impl<'a, 'tcx> ErrorReportingHelpers<'tcx> for InferCtxt<'a, 'tcx> {
+    fn give_expl_lifetime_param(&self,
+                                err: &mut DiagnosticBuilder,
+                                decl: &hir::FnDecl,
+                                unsafety: hir::Unsafety,
+                                constness: hir::Constness,
+                                name: ast::Name,
+                                opt_explicit_self: Option<&hir::ExplicitSelf_>,
+                                generics: &hir::Generics,
+                                span: Span) {
+        let suggested_fn = pprust::fun_to_string(decl, unsafety, constness, name,
+                                                 opt_explicit_self, generics);
+        let msg = format!("consider using an explicit lifetime \
+                           parameter as shown: {}", suggested_fn);
+        err.span_help(span, &msg[..]);
+    }
+
+    fn report_inference_failure(&self,
+                                var_origin: RegionVariableOrigin)
+                                -> DiagnosticBuilder<'tcx> {
+        let br_string = |br: ty::BoundRegion| {
+            let mut s = br.to_string();
+            if !s.is_empty() {
+                s.push_str(" ");
+            }
+            s
+        };
+        let var_description = match var_origin {
+            infer::MiscVariable(_) => "".to_string(),
+            infer::PatternRegion(_) => " for pattern".to_string(),
+            infer::AddrOfRegion(_) => " for borrow expression".to_string(),
+            infer::Autoref(_) => " for autoref".to_string(),
+            infer::Coercion(_) => " for automatic coercion".to_string(),
+            infer::LateBoundRegion(_, br, infer::FnCall) => {
+                format!(" for lifetime parameter {}in function call",
+                        br_string(br))
+            }
+            infer::LateBoundRegion(_, br, infer::HigherRankedType) => {
+                format!(" for lifetime parameter {}in generic type", br_string(br))
+            }
+            infer::LateBoundRegion(_, br, infer::AssocTypeProjection(type_name)) => {
+                format!(" for lifetime parameter {}in trait containing associated type `{}`",
+                        br_string(br), type_name)
+            }
+            infer::EarlyBoundRegion(_, name) => {
+                format!(" for lifetime parameter `{}`",
+                        name)
+            }
+            infer::BoundRegionInCoherence(name) => {
+                format!(" for lifetime parameter `{}` in coherence check",
+                        name)
+            }
+            infer::UpvarRegion(ref upvar_id, _) => {
+                format!(" for capture of `{}` by closure",
+                        self.tcx.local_var_name_str(upvar_id.var_id).to_string())
+            }
+        };
+
+        struct_span_err!(self.tcx.sess, var_origin.span(), E0495,
+                  "cannot infer an appropriate lifetime{} \
+                   due to conflicting requirements",
+                  var_description)
+    }
+
+    fn note_region_origin(&self, err: &mut DiagnosticBuilder, origin: &SubregionOrigin<'tcx>) {
+        match *origin {
+            infer::Subtype(ref trace) => {
+                let desc = match trace.origin {
+                    TypeOrigin::Misc(_) => {
+                        "types are compatible"
+                    }
+                    TypeOrigin::MethodCompatCheck(_) => {
+                        "method type is compatible with trait"
+                    }
+                    TypeOrigin::ExprAssignable(_) => {
+                        "expression is assignable"
+                    }
+                    TypeOrigin::RelateTraitRefs(_) => {
+                        "traits are compatible"
+                    }
+                    TypeOrigin::RelateSelfType(_) => {
+                        "self type matches impl self type"
+                    }
+                    TypeOrigin::RelateOutputImplTypes(_) => {
+                        "trait type parameters matches those \
+                                 specified on the impl"
+                    }
+                    TypeOrigin::MatchExpressionArm(_, _, _) => {
+                        "match arms have compatible types"
+                    }
+                    TypeOrigin::IfExpression(_) => {
+                        "if and else have compatible types"
+                    }
+                    TypeOrigin::IfExpressionWithNoElse(_) => {
+                        "if may be missing an else clause"
+                    }
+                    TypeOrigin::RangeExpression(_) => {
+                        "start and end of range have compatible types"
+                    }
+                    TypeOrigin::EquatePredicate(_) => {
+                        "equality where clause is satisfied"
+                    }
+                };
+
+                match self.values_str(&trace.values) {
+                    Some(values_str) => {
+                        err.span_note(
+                            trace.origin.span(),
+                            &format!("...so that {} ({})",
+                                    desc, values_str));
+                    }
+                    None => {
+                        // Really should avoid printing this error at
+                        // all, since it is derived, but that would
+                        // require more refactoring than I feel like
+                        // doing right now. - nmatsakis
+                        err.span_note(
+                            trace.origin.span(),
+                            &format!("...so that {}", desc));
+                    }
+                }
+            }
+            infer::Reborrow(span) => {
+                err.span_note(
+                    span,
+                    "...so that reference does not outlive \
+                    borrowed content");
+            }
+            infer::ReborrowUpvar(span, ref upvar_id) => {
+                err.span_note(
+                    span,
+                    &format!(
+                        "...so that closure can access `{}`",
+                        self.tcx.local_var_name_str(upvar_id.var_id)
+                            .to_string()));
+            }
+            infer::InfStackClosure(span) => {
+                err.span_note(
+                    span,
+                    "...so that closure does not outlive its stack frame");
+            }
+            infer::InvokeClosure(span) => {
+                err.span_note(
+                    span,
+                    "...so that closure is not invoked outside its lifetime");
+            }
+            infer::DerefPointer(span) => {
+                err.span_note(
+                    span,
+                    "...so that pointer is not dereferenced \
+                    outside its lifetime");
+            }
+            infer::FreeVariable(span, id) => {
+                err.span_note(
+                    span,
+                    &format!("...so that captured variable `{}` \
+                            does not outlive the enclosing closure",
+                            self.tcx.local_var_name_str(id)));
+            }
+            infer::IndexSlice(span) => {
+                err.span_note(
+                    span,
+                    "...so that slice is not indexed outside the lifetime");
+            }
+            infer::RelateObjectBound(span) => {
+                err.span_note(
+                    span,
+                    "...so that it can be closed over into an object");
+            }
+            infer::CallRcvr(span) => {
+                err.span_note(
+                    span,
+                    "...so that method receiver is valid for the method call");
+            }
+            infer::CallArg(span) => {
+                err.span_note(
+                    span,
+                    "...so that argument is valid for the call");
+            }
+            infer::CallReturn(span) => {
+                err.span_note(
+                    span,
+                    "...so that return value is valid for the call");
+            }
+            infer::Operand(span) => {
+                err.span_note(
+                    span,
+                    "...so that operand is valid for operation");
+            }
+            infer::AddrOf(span) => {
+                err.span_note(
+                    span,
+                    "...so that reference is valid \
+                     at the time of borrow");
+            }
+            infer::AutoBorrow(span) => {
+                err.span_note(
+                    span,
+                    "...so that auto-reference is valid \
+                     at the time of borrow");
+            }
+            infer::ExprTypeIsNotInScope(t, span) => {
+                err.span_note(
+                    span,
+                    &format!("...so type `{}` of expression is valid during the \
+                             expression",
+                            self.ty_to_string(t)));
+            }
+            infer::BindingTypeIsNotValidAtDecl(span) => {
+                err.span_note(
+                    span,
+                    "...so that variable is valid at time of its declaration");
+            }
+            infer::ParameterInScope(_, span) => {
+                err.span_note(
+                    span,
+                    "...so that a type/lifetime parameter is in scope here");
+            }
+            infer::DataBorrowed(ty, span) => {
+                err.span_note(
+                    span,
+                    &format!("...so that the type `{}` is not borrowed for too long",
+                             self.ty_to_string(ty)));
+            }
+            infer::ReferenceOutlivesReferent(ty, span) => {
+                err.span_note(
+                    span,
+                    &format!("...so that the reference type `{}` \
+                             does not outlive the data it points at",
+                            self.ty_to_string(ty)));
+            }
+            infer::RelateParamBound(span, t) => {
+                err.span_note(
+                    span,
+                    &format!("...so that the type `{}` \
+                             will meet its required lifetime bounds",
+                            self.ty_to_string(t)));
+            }
+            infer::RelateDefaultParamBound(span, t) => {
+                err.span_note(
+                    span,
+                    &format!("...so that type parameter \
+                             instantiated with `{}`, \
+                             will meet its declared lifetime bounds",
+                            self.ty_to_string(t)));
+            }
+            infer::RelateRegionParamBound(span) => {
+                err.span_note(
+                    span,
+                    "...so that the declared lifetime parameter bounds \
+                                are satisfied");
+            }
+            infer::SafeDestructor(span) => {
+                err.span_note(
+                    span,
+                    "...so that references are valid when the destructor \
+                     runs");
+            }
+        }
+    }
+}
+
+pub trait Resolvable<'tcx> {
+    fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Self;
+}
+
+impl<'tcx> Resolvable<'tcx> for Ty<'tcx> {
+    fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Ty<'tcx> {
+        infcx.resolve_type_vars_if_possible(self)
+    }
+}
+
+impl<'tcx> Resolvable<'tcx> for ty::TraitRef<'tcx> {
+    fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>)
+                   -> ty::TraitRef<'tcx> {
+        infcx.resolve_type_vars_if_possible(self)
+    }
+}
+
+impl<'tcx> Resolvable<'tcx> for ty::PolyTraitRef<'tcx> {
+    fn resolve<'a>(&self,
+                   infcx: &InferCtxt<'a, 'tcx>)
+                   -> ty::PolyTraitRef<'tcx>
+    {
+        infcx.resolve_type_vars_if_possible(self)
+    }
+}
+
+fn lifetimes_in_scope(tcx: &TyCtxt,
+                      scope_id: ast::NodeId)
+                      -> Vec<hir::LifetimeDef> {
+    let mut taken = Vec::new();
+    let parent = tcx.map.get_parent(scope_id);
+    let method_id_opt = match tcx.map.find(parent) {
+        Some(node) => match node {
+            ast_map::NodeItem(item) => match item.node {
+                hir::ItemFn(_, _, _, _, ref gen, _) => {
+                    taken.extend_from_slice(&gen.lifetimes);
+                    None
+                },
+                _ => None
+            },
+            ast_map::NodeImplItem(ii) => {
+                match ii.node {
+                    hir::ImplItemKind::Method(ref sig, _) => {
+                        taken.extend_from_slice(&sig.generics.lifetimes);
+                        Some(ii.id)
+                    }
+                    _ => None,
+                }
+            }
+            _ => None
+        },
+        None => None
+    };
+    if method_id_opt.is_some() {
+        let method_id = method_id_opt.unwrap();
+        let parent = tcx.map.get_parent(method_id);
+        match tcx.map.find(parent) {
+            Some(node) => match node {
+                ast_map::NodeItem(item) => match item.node {
+                    hir::ItemImpl(_, _, ref gen, _, _, _) => {
+                        taken.extend_from_slice(&gen.lifetimes);
+                    }
+                    _ => ()
+                },
+                _ => ()
+            },
+            None => ()
+        }
+    }
+    return taken;
+}
+
+// LifeGiver is responsible for generating fresh lifetime names
+struct LifeGiver {
+    taken: HashSet<String>,
+    counter: Cell<usize>,
+    generated: RefCell<Vec<hir::Lifetime>>,
+}
+
+impl LifeGiver {
+    fn with_taken(taken: &[hir::LifetimeDef]) -> LifeGiver {
+        let mut taken_ = HashSet::new();
+        for lt in taken {
+            let lt_name = lt.lifetime.name.to_string();
+            taken_.insert(lt_name);
+        }
+        LifeGiver {
+            taken: taken_,
+            counter: Cell::new(0),
+            generated: RefCell::new(Vec::new()),
+        }
+    }
+
+    fn inc_counter(&self) {
+        let c = self.counter.get();
+        self.counter.set(c+1);
+    }
+
+    fn give_lifetime(&self) -> hir::Lifetime {
+        let lifetime;
+        loop {
+            let mut s = String::from("'");
+            s.push_str(&num_to_string(self.counter.get()));
+            if !self.taken.contains(&s) {
+                lifetime = name_to_dummy_lifetime(token::intern(&s[..]));
+                self.generated.borrow_mut().push(lifetime);
+                break;
+            }
+            self.inc_counter();
+        }
+        self.inc_counter();
+        return lifetime;
+
+        // 0 .. 25 generates a .. z, 26 .. 51 generates aa .. zz, and so on
+        fn num_to_string(counter: usize) -> String {
+            let mut s = String::new();
+            let (n, r) = (counter/26 + 1, counter % 26);
+            let letter: char = from_u32((r+97) as u32).unwrap();
+            for _ in 0..n {
+                s.push(letter);
+            }
+            s
+        }
+    }
+
+    fn get_generated_lifetimes(&self) -> Vec<hir::Lifetime> {
+        self.generated.borrow().clone()
+    }
+}
+
+fn name_to_dummy_lifetime(name: ast::Name) -> hir::Lifetime {
+    hir::Lifetime { id: ast::DUMMY_NODE_ID,
+                    span: codemap::DUMMY_SP,
+                    name: name }
+}
diff --git a/src/librustc/infer/freshen.rs b/src/librustc/infer/freshen.rs
new file mode 100644 (file)
index 0000000..b0f1e9d
--- /dev/null
@@ -0,0 +1,176 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Freshening is the process of replacing unknown variables with fresh types. The idea is that
+//! the type, after freshening, contains no inference variables but instead contains either a
+//! value for each variable or fresh "arbitrary" types wherever a variable would have been.
+//!
+//! Freshening is used primarily to get a good type for inserting into a cache. The result
+//! summarizes what the type inferencer knows "so far". The primary place it is used right now is
+//! in the trait matching algorithm, which needs to be able to cache whether an `impl` self type
+//! matches some other type X -- *without* affecting `X`. That means if that if the type `X` is in
+//! fact an unbound type variable, we want the match to be regarded as ambiguous, because depending
+//! on what type that type variable is ultimately assigned, the match may or may not succeed.
+//!
+//! Note that you should be careful not to allow the output of freshening to leak to the user in
+//! error messages or in any other form. Freshening is only really useful as an internal detail.
+//!
+//! __An important detail concerning regions.__ The freshener also replaces *all* regions with
+//! 'static. The reason behind this is that, in general, we do not take region relationships into
+//! account when making type-overloaded decisions. This is important because of the design of the
+//! region inferencer, which is not based on unification but rather on accumulating and then
+//! solving a set of constraints. In contrast, the type inferencer assigns a value to each type
+//! variable only once, and it does so as soon as it can, so it is reasonable to ask what the type
+//! inferencer knows "so far".
+
+use ty::{self, Ty, TyCtxt, TypeFoldable};
+use ty::fold::TypeFolder;
+use std::collections::hash_map::{self, Entry};
+
+use super::InferCtxt;
+use super::unify_key::ToType;
+
+pub struct TypeFreshener<'a, 'tcx:'a> {
+    infcx: &'a InferCtxt<'a, 'tcx>,
+    freshen_count: u32,
+    freshen_map: hash_map::HashMap<ty::InferTy, Ty<'tcx>>,
+}
+
+impl<'a, 'tcx> TypeFreshener<'a, 'tcx> {
+    pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> TypeFreshener<'a, 'tcx> {
+        TypeFreshener {
+            infcx: infcx,
+            freshen_count: 0,
+            freshen_map: hash_map::HashMap::new(),
+        }
+    }
+
+    fn freshen<F>(&mut self,
+                  opt_ty: Option<Ty<'tcx>>,
+                  key: ty::InferTy,
+                  freshener: F)
+                  -> Ty<'tcx> where
+        F: FnOnce(u32) -> ty::InferTy,
+    {
+        match opt_ty {
+            Some(ty) => { return ty.fold_with(self); }
+            None => { }
+        }
+
+        match self.freshen_map.entry(key) {
+            Entry::Occupied(entry) => *entry.get(),
+            Entry::Vacant(entry) => {
+                let index = self.freshen_count;
+                self.freshen_count += 1;
+                let t = self.infcx.tcx.mk_infer(freshener(index));
+                entry.insert(t);
+                t
+            }
+        }
+    }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> {
+    fn tcx<'b>(&'b self) -> &'b TyCtxt<'tcx> {
+        self.infcx.tcx
+    }
+
+    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+        match r {
+            ty::ReEarlyBound(..) |
+            ty::ReLateBound(..) => {
+                // leave bound regions alone
+                r
+            }
+
+            ty::ReStatic |
+            ty::ReFree(_) |
+            ty::ReScope(_) |
+            ty::ReVar(_) |
+            ty::ReSkolemized(..) |
+            ty::ReEmpty => {
+                // replace all free regions with 'static
+                ty::ReStatic
+            }
+        }
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        if !t.needs_infer() && !t.has_erasable_regions() {
+            return t;
+        }
+
+        let tcx = self.infcx.tcx;
+
+        match t.sty {
+            ty::TyInfer(ty::TyVar(v)) => {
+                let opt_ty = self.infcx.type_variables.borrow_mut().probe(v);
+                self.freshen(
+                    opt_ty,
+                    ty::TyVar(v),
+                    ty::FreshTy)
+            }
+
+            ty::TyInfer(ty::IntVar(v)) => {
+                self.freshen(
+                    self.infcx.int_unification_table.borrow_mut()
+                                                    .probe(v)
+                                                    .map(|v| v.to_type(tcx)),
+                    ty::IntVar(v),
+                    ty::FreshIntTy)
+            }
+
+            ty::TyInfer(ty::FloatVar(v)) => {
+                self.freshen(
+                    self.infcx.float_unification_table.borrow_mut()
+                                                      .probe(v)
+                                                      .map(|v| v.to_type(tcx)),
+                    ty::FloatVar(v),
+                    ty::FreshFloatTy)
+            }
+
+            ty::TyInfer(ty::FreshTy(c)) |
+            ty::TyInfer(ty::FreshIntTy(c)) |
+            ty::TyInfer(ty::FreshFloatTy(c)) => {
+                if c >= self.freshen_count {
+                    bug!("Encountered a freshend type with id {} \
+                          but our counter is only at {}",
+                         c,
+                         self.freshen_count);
+                }
+                t
+            }
+
+            ty::TyBool |
+            ty::TyChar |
+            ty::TyInt(..) |
+            ty::TyUint(..) |
+            ty::TyFloat(..) |
+            ty::TyEnum(..) |
+            ty::TyBox(..) |
+            ty::TyStr |
+            ty::TyError |
+            ty::TyArray(..) |
+            ty::TySlice(..) |
+            ty::TyRawPtr(..) |
+            ty::TyRef(..) |
+            ty::TyFnDef(..) |
+            ty::TyFnPtr(_) |
+            ty::TyTrait(..) |
+            ty::TyStruct(..) |
+            ty::TyClosure(..) |
+            ty::TyTuple(..) |
+            ty::TyProjection(..) |
+            ty::TyParam(..) => {
+                t.super_fold_with(self)
+            }
+        }
+    }
+}
diff --git a/src/librustc/infer/glb.rs b/src/librustc/infer/glb.rs
new file mode 100644 (file)
index 0000000..37717c2
--- /dev/null
@@ -0,0 +1,90 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::combine::CombineFields;
+use super::higher_ranked::HigherRankedRelations;
+use super::InferCtxt;
+use super::lattice::{self, LatticeDir};
+use super::Subtype;
+
+use ty::{self, Ty, TyCtxt};
+use ty::relate::{Relate, RelateResult, TypeRelation};
+use traits::PredicateObligations;
+
+/// "Greatest lower bound" (common subtype)
+pub struct Glb<'a, 'tcx: 'a> {
+    fields: CombineFields<'a, 'tcx>
+}
+
+impl<'a, 'tcx> Glb<'a, 'tcx> {
+    pub fn new(fields: CombineFields<'a, 'tcx>) -> Glb<'a, 'tcx> {
+        Glb { fields: fields }
+    }
+
+    pub fn obligations(self) -> PredicateObligations<'tcx> {
+        self.fields.obligations
+    }
+}
+
+impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Glb<'a, 'tcx> {
+    fn tag(&self) -> &'static str { "Glb" }
+
+    fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.tcx() }
+
+    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
+
+    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
+                                               variance: ty::Variance,
+                                               a: &T,
+                                               b: &T)
+                                               -> RelateResult<'tcx, T>
+    {
+        match variance {
+            ty::Invariant => self.fields.equate().relate(a, b),
+            ty::Covariant => self.relate(a, b),
+            ty::Bivariant => self.fields.bivariate().relate(a, b),
+            ty::Contravariant => self.fields.lub().relate(a, b),
+        }
+    }
+
+    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+        lattice::super_lattice_tys(self, a, b)
+    }
+
+    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+        debug!("{}.regions({:?}, {:?})",
+               self.tag(),
+               a,
+               b);
+
+        let origin = Subtype(self.fields.trace.clone());
+        Ok(self.fields.infcx.region_vars.glb_regions(origin, a, b))
+    }
+
+    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
+                  -> RelateResult<'tcx, ty::Binder<T>>
+        where T: Relate<'a, 'tcx>
+    {
+        self.fields.higher_ranked_glb(a, b)
+    }
+}
+
+impl<'a, 'tcx> LatticeDir<'a,'tcx> for Glb<'a, 'tcx> {
+    fn infcx(&self) -> &'a InferCtxt<'a,'tcx> {
+        self.fields.infcx
+    }
+
+    fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> {
+        let mut sub = self.fields.sub();
+        sub.relate(&v, &a)?;
+        sub.relate(&v, &b)?;
+        Ok(())
+    }
+}
diff --git a/src/librustc/infer/higher_ranked/README.md b/src/librustc/infer/higher_ranked/README.md
new file mode 100644 (file)
index 0000000..57665b6
--- /dev/null
@@ -0,0 +1,403 @@
+# Skolemization and functions
+
+One of the trickiest and most subtle aspects of regions is dealing
+with higher-ranked things which include bound region variables, such
+as function types. I strongly suggest that if you want to understand
+the situation, you read this paper (which is, admittedly, very long,
+but you don't have to read the whole thing):
+
+http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/
+
+Although my explanation will never compete with SPJ's (for one thing,
+his is approximately 100 pages), I will attempt to explain the basic
+problem and also how we solve it. Note that the paper only discusses
+subtyping, not the computation of LUB/GLB.
+
+The problem we are addressing is that there is a kind of subtyping
+between functions with bound region parameters. Consider, for
+example, whether the following relation holds:
+
+    for<'a> fn(&'a isize) <: for<'b> fn(&'b isize)? (Yes, a => b)
+
+The answer is that of course it does. These two types are basically
+the same, except that in one we used the name `a` and one we used
+the name `b`.
+
+In the examples that follow, it becomes very important to know whether
+a lifetime is bound in a function type (that is, is a lifetime
+parameter) or appears free (is defined in some outer scope).
+Therefore, from now on I will always write the bindings explicitly,
+using the Rust syntax `for<'a> fn(&'a isize)` to indicate that `a` is a
+lifetime parameter.
+
+Now let's consider two more function types. Here, we assume that the
+`'b` lifetime is defined somewhere outside and hence is not a lifetime
+parameter bound by the function type (it "appears free"):
+
+    for<'a> fn(&'a isize) <: fn(&'b isize)? (Yes, a => b)
+
+This subtyping relation does in fact hold. To see why, you have to
+consider what subtyping means. One way to look at `T1 <: T2` is to
+say that it means that it is always ok to treat an instance of `T1` as
+if it had the type `T2`. So, with our functions, it is always ok to
+treat a function that can take pointers with any lifetime as if it
+were a function that can only take a pointer with the specific
+lifetime `'b`. After all, `'b` is a lifetime, after all, and
+the function can take values of any lifetime.
+
+You can also look at subtyping as the *is a* relationship. This amounts
+to the same thing: a function that accepts pointers with any lifetime
+*is a* function that accepts pointers with some specific lifetime.
+
+So, what if we reverse the order of the two function types, like this:
+
+    fn(&'b isize) <: for<'a> fn(&'a isize)? (No)
+
+Does the subtyping relationship still hold?  The answer of course is
+no. In this case, the function accepts *only the lifetime `'b`*,
+so it is not reasonable to treat it as if it were a function that
+accepted any lifetime.
+
+What about these two examples:
+
+    for<'a,'b> fn(&'a isize, &'b isize) <: for<'a>    fn(&'a isize, &'a isize)? (Yes)
+    for<'a>    fn(&'a isize, &'a isize) <: for<'a,'b> fn(&'a isize, &'b isize)? (No)
+
+Here, it is true that functions which take two pointers with any two
+lifetimes can be treated as if they only accepted two pointers with
+the same lifetime, but not the reverse.
+
+## The algorithm
+
+Here is the algorithm we use to perform the subtyping check:
+
+1. Replace all bound regions in the subtype with new variables
+2. Replace all bound regions in the supertype with skolemized
+   equivalents. A "skolemized" region is just a new fresh region
+   name.
+3. Check that the parameter and return types match as normal
+4. Ensure that no skolemized regions 'leak' into region variables
+   visible from "the outside"
+
+Let's walk through some examples and see how this algorithm plays out.
+
+#### First example
+
+We'll start with the first example, which was:
+
+    1. for<'a> fn(&'a T) <: for<'b> fn(&'b T)?        Yes: a -> b
+
+After steps 1 and 2 of the algorithm we will have replaced the types
+like so:
+
+    1. fn(&'A T) <: fn(&'x T)?
+
+Here the upper case `&A` indicates a *region variable*, that is, a
+region whose value is being inferred by the system. I also replaced
+`&b` with `&x`---I'll use letters late in the alphabet (`x`, `y`, `z`)
+to indicate skolemized region names. We can assume they don't appear
+elsewhere. Note that neither the sub- nor the supertype bind any
+region names anymore (as indicated by the absence of `<` and `>`).
+
+The next step is to check that the parameter types match. Because
+parameters are contravariant, this means that we check whether:
+
+    &'x T <: &'A T
+
+Region pointers are contravariant so this implies that
+
+    &A <= &x
+
+must hold, where `<=` is the subregion relationship. Processing
+*this* constrain simply adds a constraint into our graph that `&A <=
+&x` and is considered successful (it can, for example, be satisfied by
+choosing the value `&x` for `&A`).
+
+So far we have encountered no error, so the subtype check succeeds.
+
+#### The third example
+
+Now let's look first at the third example, which was:
+
+    3. fn(&'a T)    <: for<'b> fn(&'b T)?        No!
+
+After steps 1 and 2 of the algorithm we will have replaced the types
+like so:
+
+    3. fn(&'a T) <: fn(&'x T)?
+
+This looks pretty much the same as before, except that on the LHS
+`'a` was not bound, and hence was left as-is and not replaced with
+a variable. The next step is again to check that the parameter types
+match. This will ultimately require (as before) that `'a` <= `&x`
+must hold: but this does not hold. `self` and `x` are both distinct
+free regions. So the subtype check fails.
+
+#### Checking for skolemization leaks
+
+You may be wondering about that mysterious last step in the algorithm.
+So far it has not been relevant. The purpose of that last step is to
+catch something like *this*:
+
+    for<'a> fn() -> fn(&'a T) <: fn() -> for<'b> fn(&'b T)?   No.
+
+Here the function types are the same but for where the binding occurs.
+The subtype returns a function that expects a value in precisely one
+region. The supertype returns a function that expects a value in any
+region. If we allow an instance of the subtype to be used where the
+supertype is expected, then, someone could call the fn and think that
+the return value has type `fn<b>(&'b T)` when it really has type
+`fn(&'a T)` (this is case #3, above). Bad.
+
+So let's step through what happens when we perform this subtype check.
+We first replace the bound regions in the subtype (the supertype has
+no bound regions). This gives us:
+
+    fn() -> fn(&'A T) <: fn() -> for<'b> fn(&'b T)?
+
+Now we compare the return types, which are covariant, and hence we have:
+
+    fn(&'A T) <: for<'b> fn(&'b T)?
+
+Here we skolemize the bound region in the supertype to yield:
+
+    fn(&'A T) <: fn(&'x T)?
+
+And then proceed to compare the argument types:
+
+    &'x T <: &'A T
+    'A <= 'x
+
+Finally, this is where it gets interesting!  This is where an error
+*should* be reported. But in fact this will not happen. The reason why
+is that `A` is a variable: we will infer that its value is the fresh
+region `x` and think that everything is happy. In fact, this behavior
+is *necessary*, it was key to the first example we walked through.
+
+The difference between this example and the first one is that the variable
+`A` already existed at the point where the skolemization occurred. In
+the first example, you had two functions:
+
+    for<'a> fn(&'a T) <: for<'b> fn(&'b T)
+
+and hence `&A` and `&x` were created "together". In general, the
+intention of the skolemized names is that they are supposed to be
+fresh names that could never be equal to anything from the outside.
+But when inference comes into play, we might not be respecting this
+rule.
+
+So the way we solve this is to add a fourth step that examines the
+constraints that refer to skolemized names. Basically, consider a
+non-directed version of the constraint graph. Let `Tainted(x)` be the
+set of all things reachable from a skolemized variable `x`.
+`Tainted(x)` should not contain any regions that existed before the
+step at which the skolemization was performed. So this case here
+would fail because `&x` was created alone, but is relatable to `&A`.
+
+## Computing the LUB and GLB
+
+The paper I pointed you at is written for Haskell. It does not
+therefore considering subtyping and in particular does not consider
+LUB or GLB computation. We have to consider this. Here is the
+algorithm I implemented.
+
+First though, let's discuss what we are trying to compute in more
+detail. The LUB is basically the "common supertype" and the GLB is
+"common subtype"; one catch is that the LUB should be the
+*most-specific* common supertype and the GLB should be *most general*
+common subtype (as opposed to any common supertype or any common
+subtype).
+
+Anyway, to help clarify, here is a table containing some function
+pairs and their LUB/GLB (for conciseness, in this table, I'm just
+including the lifetimes here, not the rest of the types, and I'm
+writing `fn<>` instead of `for<> fn`):
+
+```
+Type 1                Type 2                LUB                    GLB
+fn<'a>('a)            fn('X)                fn('X)                 fn<'a>('a)
+fn('a)                fn('X)                --                     fn<'a>('a)
+fn<'a,'b>('a, 'b)     fn<'x>('x, 'x)        fn<'a>('a, 'a)         fn<'a,'b>('a, 'b)
+fn<'a,'b>('a, 'b, 'a) fn<'x,'y>('x, 'y, 'y) fn<'a>('a, 'a, 'a)     fn<'a,'b,'c>('a,'b,'c)
+```
+
+### Conventions
+
+I use lower-case letters (e.g., `&a`) for bound regions and upper-case
+letters for free regions (`&A`).  Region variables written with a
+dollar-sign (e.g., `$a`).  I will try to remember to enumerate the
+bound-regions on the fn type as well (e.g., `for<'a> fn(&a)`).
+
+### High-level summary
+
+Both the LUB and the GLB algorithms work in a similar fashion.  They
+begin by replacing all bound regions (on both sides) with fresh region
+inference variables.  Therefore, both functions are converted to types
+that contain only free regions.  We can then compute the LUB/GLB in a
+straightforward way, as described in `combine.rs`.  This results in an
+interim type T.  The algorithms then examine the regions that appear
+in T and try to, in some cases, replace them with bound regions to
+yield the final result.
+
+To decide whether to replace a region `R` that appears in `T` with
+a bound region, the algorithms make use of two bits of
+information.  First is a set `V` that contains all region
+variables created as part of the LUB/GLB computation (roughly; see
+`region_vars_confined_to_snapshot()` for full details). `V` will
+contain the region variables created to replace the bound regions
+in the input types, but it also contains 'intermediate' variables
+created to represent the LUB/GLB of individual regions.
+Basically, when asked to compute the LUB/GLB of a region variable
+with another region, the inferencer cannot oblige immediately
+since the values of that variables are not known.  Therefore, it
+creates a new variable that is related to the two regions.  For
+example, the LUB of two variables `$x` and `$y` is a fresh
+variable `$z` that is constrained such that `$x <= $z` and `$y <=
+$z`.  So `V` will contain these intermediate variables as well.
+
+The other important factor in deciding how to replace a region in T is
+the function `Tainted($r)` which, for a region variable, identifies
+all regions that the region variable is related to in some way
+(`Tainted()` made an appearance in the subtype computation as well).
+
+### LUB
+
+The LUB algorithm proceeds in three steps:
+
+1. Replace all bound regions (on both sides) with fresh region
+   inference variables.
+2. Compute the LUB "as normal", meaning compute the GLB of each
+   pair of argument types and the LUB of the return types and
+   so forth.  Combine those to a new function type `F`.
+3. Replace each region `R` that appears in `F` as follows:
+   - Let `V` be the set of variables created during the LUB
+     computational steps 1 and 2, as described in the previous section.
+   - If `R` is not in `V`, replace `R` with itself.
+   - If `Tainted(R)` contains a region that is not in `V`,
+     replace `R` with itself.
+   - Otherwise, select the earliest variable in `Tainted(R)` that originates
+     from the left-hand side and replace `R` with the bound region that
+     this variable was a replacement for.
+
+So, let's work through the simplest example: `fn(&A)` and `for<'a> fn(&a)`.
+In this case, `&a` will be replaced with `$a` and the interim LUB type
+`fn($b)` will be computed, where `$b=GLB(&A,$a)`.  Therefore, `V =
+{$a, $b}` and `Tainted($b) = { $b, $a, &A }`.  When we go to replace
+`$b`, we find that since `&A \in Tainted($b)` is not a member of `V`,
+we leave `$b` as is.  When region inference happens, `$b` will be
+resolved to `&A`, as we wanted.
+
+Let's look at a more complex one: `fn(&a, &b)` and `fn(&x, &x)`.  In
+this case, we'll end up with a (pre-replacement) LUB type of `fn(&g,
+&h)` and a graph that looks like:
+
+```
+     $a        $b     *--$x
+       \        \    /  /
+        \        $h-*  /
+         $g-----------*
+```
+
+Here `$g` and `$h` are fresh variables that are created to represent
+the LUB/GLB of things requiring inference.  This means that `V` and
+`Tainted` will look like:
+
+```
+V = {$a, $b, $g, $h, $x}
+Tainted($g) = Tainted($h) = { $a, $b, $h, $g, $x }
+```
+
+Therefore we replace both `$g` and `$h` with `$a`, and end up
+with the type `fn(&a, &a)`.
+
+### GLB
+
+The procedure for computing the GLB is similar.  The difference lies
+in computing the replacements for the various variables. For each
+region `R` that appears in the type `F`, we again compute `Tainted(R)`
+and examine the results:
+
+1. If `R` is not in `V`, it is not replaced.
+2. Else, if `Tainted(R)` contains only variables in `V`, and it
+   contains exactly one variable from the LHS and one variable from
+   the RHS, then `R` can be mapped to the bound version of the
+   variable from the LHS.
+3. Else, if `Tainted(R)` contains no variable from the LHS and no
+   variable from the RHS, then `R` can be mapped to itself.
+4. Else, `R` is mapped to a fresh bound variable.
+
+These rules are pretty complex.  Let's look at some examples to see
+how they play out.
+
+Out first example was `fn(&a)` and `fn(&X)`.  In this case, `&a` will
+be replaced with `$a` and we will ultimately compute a
+(pre-replacement) GLB type of `fn($g)` where `$g=LUB($a,&X)`.
+Therefore, `V={$a,$g}` and `Tainted($g)={$g,$a,&X}.  To find the
+replacement for `$g` we consult the rules above:
+- Rule (1) does not apply because `$g \in V`
+- Rule (2) does not apply because `&X \in Tainted($g)`
+- Rule (3) does not apply because `$a \in Tainted($g)`
+- Hence, by rule (4), we replace `$g` with a fresh bound variable `&z`.
+So our final result is `fn(&z)`, which is correct.
+
+The next example is `fn(&A)` and `fn(&Z)`. In this case, we will again
+have a (pre-replacement) GLB of `fn(&g)`, where `$g = LUB(&A,&Z)`.
+Therefore, `V={$g}` and `Tainted($g) = {$g, &A, &Z}`.  In this case,
+by rule (3), `$g` is mapped to itself, and hence the result is
+`fn($g)`.  This result is correct (in this case, at least), but it is
+indicative of a case that *can* lead us into concluding that there is
+no GLB when in fact a GLB does exist.  See the section "Questionable
+Results" below for more details.
+
+The next example is `fn(&a, &b)` and `fn(&c, &c)`. In this case, as
+before, we'll end up with `F=fn($g, $h)` where `Tainted($g) =
+Tainted($h) = {$g, $h, $a, $b, $c}`.  Only rule (4) applies and hence
+we'll select fresh bound variables `y` and `z` and wind up with
+`fn(&y, &z)`.
+
+For the last example, let's consider what may seem trivial, but is
+not: `fn(&a, &a)` and `fn(&b, &b)`.  In this case, we'll get `F=fn($g,
+$h)` where `Tainted($g) = {$g, $a, $x}` and `Tainted($h) = {$h, $a,
+$x}`.  Both of these sets contain exactly one bound variable from each
+side, so we'll map them both to `&a`, resulting in `fn(&a, &a)`, which
+is the desired result.
+
+### Shortcomings and correctness
+
+You may be wondering whether this algorithm is correct.  The answer is
+"sort of".  There are definitely cases where they fail to compute a
+result even though a correct result exists.  I believe, though, that
+if they succeed, then the result is valid, and I will attempt to
+convince you.  The basic argument is that the "pre-replacement" step
+computes a set of constraints.  The replacements, then, attempt to
+satisfy those constraints, using bound identifiers where needed.
+
+For now I will briefly go over the cases for LUB/GLB and identify
+their intent:
+
+- LUB:
+  - The region variables that are substituted in place of bound regions
+    are intended to collect constraints on those bound regions.
+  - If Tainted(R) contains only values in V, then this region is unconstrained
+    and can therefore be generalized, otherwise it cannot.
+- GLB:
+  - The region variables that are substituted in place of bound regions
+    are intended to collect constraints on those bound regions.
+  - If Tainted(R) contains exactly one variable from each side, and
+    only variables in V, that indicates that those two bound regions
+    must be equated.
+  - Otherwise, if Tainted(R) references any variables from left or right
+    side, then it is trying to combine a bound region with a free one or
+    multiple bound regions, so we need to select fresh bound regions.
+
+Sorry this is more of a shorthand to myself.  I will try to write up something
+more convincing in the future.
+
+#### Where are the algorithms wrong?
+
+- The pre-replacement computation can fail even though using a
+  bound-region would have succeeded.
+- We will compute GLB(fn(fn($a)), fn(fn($b))) as fn($c) where $c is the
+  GLB of $a and $b.  But if inference finds that $a and $b must be mapped
+  to regions without a GLB, then this is effectively a failure to compute
+  the GLB.  However, the result `fn<$c>(fn($c))` is a valid GLB.
diff --git a/src/librustc/infer/higher_ranked/mod.rs b/src/librustc/infer/higher_ranked/mod.rs
new file mode 100644 (file)
index 0000000..d89ef8e
--- /dev/null
@@ -0,0 +1,614 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Helper routines for higher-ranked things. See the `doc` module at
+//! the end of the file for details.
+
+use super::{CombinedSnapshot, InferCtxt, HigherRankedType, SkolemizationMap};
+use super::combine::CombineFields;
+
+use ty::{self, TyCtxt, Binder, TypeFoldable};
+use ty::error::TypeError;
+use ty::relate::{Relate, RelateResult, TypeRelation};
+use syntax::codemap::Span;
+use util::nodemap::{FnvHashMap, FnvHashSet};
+
+pub trait HigherRankedRelations<'a,'tcx> {
+    fn higher_ranked_sub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
+        where T: Relate<'a,'tcx>;
+
+    fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
+        where T: Relate<'a,'tcx>;
+
+    fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
+        where T: Relate<'a,'tcx>;
+}
+
+trait InferCtxtExt {
+    fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region>;
+
+    fn region_vars_confined_to_snapshot(&self,
+                                        snapshot: &CombinedSnapshot)
+                                        -> Vec<ty::RegionVid>;
+}
+
+impl<'a,'tcx> HigherRankedRelations<'a,'tcx> for CombineFields<'a,'tcx> {
+    fn higher_ranked_sub<T>(&self, a: &Binder<T>, b: &Binder<T>)
+                            -> RelateResult<'tcx, Binder<T>>
+        where T: Relate<'a,'tcx>
+    {
+        debug!("higher_ranked_sub(a={:?}, b={:?})",
+               a, b);
+
+        // Rather than checking the subtype relationship between `a` and `b`
+        // as-is, we need to do some extra work here in order to make sure
+        // that function subtyping works correctly with respect to regions
+        //
+        // Note: this is a subtle algorithm.  For a full explanation,
+        // please see the large comment at the end of the file in the (inlined) module
+        // `doc`.
+
+        // Start a snapshot so we can examine "all bindings that were
+        // created as part of this type comparison".
+        return self.infcx.commit_if_ok(|snapshot| {
+            // First, we instantiate each bound region in the subtype with a fresh
+            // region variable.
+            let (a_prime, _) =
+                self.infcx.replace_late_bound_regions_with_fresh_var(
+                    self.trace.origin.span(),
+                    HigherRankedType,
+                    a);
+
+            // Second, we instantiate each bound region in the supertype with a
+            // fresh concrete region.
+            let (b_prime, skol_map) =
+                self.infcx.skolemize_late_bound_regions(b, snapshot);
+
+            debug!("a_prime={:?}", a_prime);
+            debug!("b_prime={:?}", b_prime);
+
+            // Compare types now that bound regions have been replaced.
+            let result = self.sub().relate(&a_prime, &b_prime)?;
+
+            // Presuming type comparison succeeds, we need to check
+            // that the skolemized regions do not "leak".
+            match leak_check(self.infcx, &skol_map, snapshot) {
+                Ok(()) => { }
+                Err((skol_br, tainted_region)) => {
+                    if self.a_is_expected {
+                        debug!("Not as polymorphic!");
+                        return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br,
+                                                                               tainted_region));
+                    } else {
+                        debug!("Overly polymorphic!");
+                        return Err(TypeError::RegionsOverlyPolymorphic(skol_br,
+                                                                       tainted_region));
+                    }
+                }
+            }
+
+            debug!("higher_ranked_sub: OK result={:?}",
+                   result);
+
+            Ok(ty::Binder(result))
+        });
+    }
+
+    fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
+        where T: Relate<'a,'tcx>
+    {
+        // Start a snapshot so we can examine "all bindings that were
+        // created as part of this type comparison".
+        return self.infcx.commit_if_ok(|snapshot| {
+            // Instantiate each bound region with a fresh region variable.
+            let span = self.trace.origin.span();
+            let (a_with_fresh, a_map) =
+                self.infcx.replace_late_bound_regions_with_fresh_var(
+                    span, HigherRankedType, a);
+            let (b_with_fresh, _) =
+                self.infcx.replace_late_bound_regions_with_fresh_var(
+                    span, HigherRankedType, b);
+
+            // Collect constraints.
+            let result0 =
+                self.lub().relate(&a_with_fresh, &b_with_fresh)?;
+            let result0 =
+                self.infcx.resolve_type_vars_if_possible(&result0);
+            debug!("lub result0 = {:?}", result0);
+
+            // Generalize the regions appearing in result0 if possible
+            let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot);
+            let span = self.trace.origin.span();
+            let result1 =
+                fold_regions_in(
+                    self.tcx(),
+                    &result0,
+                    |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn,
+                                                    &new_vars, &a_map, r));
+
+            debug!("lub({:?},{:?}) = {:?}",
+                   a,
+                   b,
+                   result1);
+
+            Ok(ty::Binder(result1))
+        });
+
+        fn generalize_region(infcx: &InferCtxt,
+                             span: Span,
+                             snapshot: &CombinedSnapshot,
+                             debruijn: ty::DebruijnIndex,
+                             new_vars: &[ty::RegionVid],
+                             a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
+                             r0: ty::Region)
+                             -> ty::Region {
+            // Regions that pre-dated the LUB computation stay as they are.
+            if !is_var_in_set(new_vars, r0) {
+                assert!(!r0.is_bound());
+                debug!("generalize_region(r0={:?}): not new variable", r0);
+                return r0;
+            }
+
+            let tainted = infcx.tainted_regions(snapshot, r0);
+
+            // Variables created during LUB computation which are
+            // *related* to regions that pre-date the LUB computation
+            // stay as they are.
+            if !tainted.iter().all(|r| is_var_in_set(new_vars, *r)) {
+                debug!("generalize_region(r0={:?}): \
+                        non-new-variables found in {:?}",
+                       r0, tainted);
+                assert!(!r0.is_bound());
+                return r0;
+            }
+
+            // Otherwise, the variable must be associated with at
+            // least one of the variables representing bound regions
+            // in both A and B.  Replace the variable with the "first"
+            // bound region from A that we find it to be associated
+            // with.
+            for (a_br, a_r) in a_map {
+                if tainted.iter().any(|x| x == a_r) {
+                    debug!("generalize_region(r0={:?}): \
+                            replacing with {:?}, tainted={:?}",
+                           r0, *a_br, tainted);
+                    return ty::ReLateBound(debruijn, *a_br);
+                }
+            }
+
+            span_bug!(
+                span,
+                "region {:?} is not associated with any bound region from A!",
+                r0)
+        }
+    }
+
+    fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
+        where T: Relate<'a,'tcx>
+    {
+        debug!("higher_ranked_glb({:?}, {:?})",
+               a, b);
+
+        // Make a snapshot so we can examine "all bindings that were
+        // created as part of this type comparison".
+        return self.infcx.commit_if_ok(|snapshot| {
+            // Instantiate each bound region with a fresh region variable.
+            let (a_with_fresh, a_map) =
+                self.infcx.replace_late_bound_regions_with_fresh_var(
+                    self.trace.origin.span(), HigherRankedType, a);
+            let (b_with_fresh, b_map) =
+                self.infcx.replace_late_bound_regions_with_fresh_var(
+                    self.trace.origin.span(), HigherRankedType, b);
+            let a_vars = var_ids(self, &a_map);
+            let b_vars = var_ids(self, &b_map);
+
+            // Collect constraints.
+            let result0 =
+                self.glb().relate(&a_with_fresh, &b_with_fresh)?;
+            let result0 =
+                self.infcx.resolve_type_vars_if_possible(&result0);
+            debug!("glb result0 = {:?}", result0);
+
+            // Generalize the regions appearing in result0 if possible
+            let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot);
+            let span = self.trace.origin.span();
+            let result1 =
+                fold_regions_in(
+                    self.tcx(),
+                    &result0,
+                    |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn,
+                                                    &new_vars,
+                                                    &a_map, &a_vars, &b_vars,
+                                                    r));
+
+            debug!("glb({:?},{:?}) = {:?}",
+                   a,
+                   b,
+                   result1);
+
+            Ok(ty::Binder(result1))
+        });
+
+        fn generalize_region(infcx: &InferCtxt,
+                             span: Span,
+                             snapshot: &CombinedSnapshot,
+                             debruijn: ty::DebruijnIndex,
+                             new_vars: &[ty::RegionVid],
+                             a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
+                             a_vars: &[ty::RegionVid],
+                             b_vars: &[ty::RegionVid],
+                             r0: ty::Region) -> ty::Region {
+            if !is_var_in_set(new_vars, r0) {
+                assert!(!r0.is_bound());
+                return r0;
+            }
+
+            let tainted = infcx.tainted_regions(snapshot, r0);
+
+            let mut a_r = None;
+            let mut b_r = None;
+            let mut only_new_vars = true;
+            for r in &tainted {
+                if is_var_in_set(a_vars, *r) {
+                    if a_r.is_some() {
+                        return fresh_bound_variable(infcx, debruijn);
+                    } else {
+                        a_r = Some(*r);
+                    }
+                } else if is_var_in_set(b_vars, *r) {
+                    if b_r.is_some() {
+                        return fresh_bound_variable(infcx, debruijn);
+                    } else {
+                        b_r = Some(*r);
+                    }
+                } else if !is_var_in_set(new_vars, *r) {
+                    only_new_vars = false;
+                }
+            }
+
+            // NB---I do not believe this algorithm computes
+            // (necessarily) the GLB.  As written it can
+            // spuriously fail. In particular, if there is a case
+            // like: |fn(&a)| and fn(fn(&b)), where a and b are
+            // free, it will return fn(&c) where c = GLB(a,b).  If
+            // however this GLB is not defined, then the result is
+            // an error, even though something like
+            // "fn<X>(fn(&X))" where X is bound would be a
+            // subtype of both of those.
+            //
+            // The problem is that if we were to return a bound
+            // variable, we'd be computing a lower-bound, but not
+            // necessarily the *greatest* lower-bound.
+            //
+            // Unfortunately, this problem is non-trivial to solve,
+            // because we do not know at the time of computing the GLB
+            // whether a GLB(a,b) exists or not, because we haven't
+            // run region inference (or indeed, even fully computed
+            // the region hierarchy!). The current algorithm seems to
+            // works ok in practice.
+
+            if a_r.is_some() && b_r.is_some() && only_new_vars {
+                // Related to exactly one bound variable from each fn:
+                return rev_lookup(span, a_map, a_r.unwrap());
+            } else if a_r.is_none() && b_r.is_none() {
+                // Not related to bound variables from either fn:
+                assert!(!r0.is_bound());
+                return r0;
+            } else {
+                // Other:
+                return fresh_bound_variable(infcx, debruijn);
+            }
+        }
+
+        fn rev_lookup(span: Span,
+                      a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
+                      r: ty::Region) -> ty::Region
+        {
+            for (a_br, a_r) in a_map {
+                if *a_r == r {
+                    return ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br);
+                }
+            }
+            span_bug!(
+                span,
+                "could not find original bound region for {:?}",
+                r);
+        }
+
+        fn fresh_bound_variable(infcx: &InferCtxt, debruijn: ty::DebruijnIndex) -> ty::Region {
+            infcx.region_vars.new_bound(debruijn)
+        }
+    }
+}
+
+fn var_ids<'a, 'tcx>(fields: &CombineFields<'a, 'tcx>,
+                      map: &FnvHashMap<ty::BoundRegion, ty::Region>)
+                     -> Vec<ty::RegionVid> {
+    map.iter()
+       .map(|(_, r)| match *r {
+           ty::ReVar(r) => { r }
+           r => {
+               span_bug!(
+                   fields.trace.origin.span(),
+                   "found non-region-vid: {:?}",
+                   r);
+           }
+       })
+       .collect()
+}
+
+fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region) -> bool {
+    match r {
+        ty::ReVar(ref v) => new_vars.iter().any(|x| x == v),
+        _ => false
+    }
+}
+
+fn fold_regions_in<'tcx, T, F>(tcx: &TyCtxt<'tcx>,
+                               unbound_value: &T,
+                               mut fldr: F)
+                               -> T
+    where T: TypeFoldable<'tcx>,
+          F: FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region,
+{
+    tcx.fold_regions(unbound_value, &mut false, |region, current_depth| {
+        // we should only be encountering "escaping" late-bound regions here,
+        // because the ones at the current level should have been replaced
+        // with fresh variables
+        assert!(match region {
+            ty::ReLateBound(..) => false,
+            _ => true
+        });
+
+        fldr(region, ty::DebruijnIndex::new(current_depth))
+    })
+}
+
+impl<'a,'tcx> InferCtxtExt for InferCtxt<'a,'tcx> {
+    fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region> {
+        self.region_vars.tainted(&snapshot.region_vars_snapshot, r)
+    }
+
+    fn region_vars_confined_to_snapshot(&self,
+                                        snapshot: &CombinedSnapshot)
+                                        -> Vec<ty::RegionVid>
+    {
+        /*!
+         * Returns the set of region variables that do not affect any
+         * types/regions which existed before `snapshot` was
+         * started. This is used in the sub/lub/glb computations. The
+         * idea here is that when we are computing lub/glb of two
+         * regions, we sometimes create intermediate region variables.
+         * Those region variables may touch some of the skolemized or
+         * other "forbidden" regions we created to replace bound
+         * regions, but they don't really represent an "external"
+         * constraint.
+         *
+         * However, sometimes fresh variables are created for other
+         * purposes too, and those *may* represent an external
+         * constraint. In particular, when a type variable is
+         * instantiated, we create region variables for all the
+         * regions that appear within, and if that type variable
+         * pre-existed the snapshot, then those region variables
+         * represent external constraints.
+         *
+         * An example appears in the unit test
+         * `sub_free_bound_false_infer`.  In this test, we want to
+         * know whether
+         *
+         * ```rust
+         * fn(_#0t) <: for<'a> fn(&'a int)
+         * ```
+         *
+         * Note that the subtype has a type variable. Because the type
+         * variable can't be instantiated with a region that is bound
+         * in the fn signature, this comparison ought to fail. But if
+         * we're not careful, it will succeed.
+         *
+         * The reason is that when we walk through the subtyping
+         * algorith, we begin by replacing `'a` with a skolemized
+         * variable `'1`. We then have `fn(_#0t) <: fn(&'1 int)`. This
+         * can be made true by unifying `_#0t` with `&'1 int`. In the
+         * process, we create a fresh variable for the skolemized
+         * region, `'$2`, and hence we have that `_#0t == &'$2
+         * int`. However, because `'$2` was created during the sub
+         * computation, if we're not careful we will erroneously
+         * assume it is one of the transient region variables
+         * representing a lub/glb internally. Not good.
+         *
+         * To prevent this, we check for type variables which were
+         * unified during the snapshot, and say that any region
+         * variable created during the snapshot but which finds its
+         * way into a type variable is considered to "escape" the
+         * snapshot.
+         */
+
+        let mut region_vars =
+            self.region_vars.vars_created_since_snapshot(&snapshot.region_vars_snapshot);
+
+        let escaping_types =
+            self.type_variables.borrow_mut().types_escaping_snapshot(&snapshot.type_snapshot);
+
+        let mut escaping_region_vars = FnvHashSet();
+        for ty in &escaping_types {
+            self.tcx.collect_regions(ty, &mut escaping_region_vars);
+        }
+
+        region_vars.retain(|&region_vid| {
+            let r = ty::ReVar(region_vid);
+            !escaping_region_vars.contains(&r)
+        });
+
+        debug!("region_vars_confined_to_snapshot: region_vars={:?} escaping_types={:?}",
+               region_vars,
+               escaping_types);
+
+        region_vars
+    }
+}
+
+pub fn skolemize_late_bound_regions<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
+                                               binder: &ty::Binder<T>,
+                                               snapshot: &CombinedSnapshot)
+                                               -> (T, SkolemizationMap)
+    where T : TypeFoldable<'tcx>
+{
+    /*!
+     * Replace all regions bound by `binder` with skolemized regions and
+     * return a map indicating which bound-region was replaced with what
+     * skolemized region. This is the first step of checking subtyping
+     * when higher-ranked things are involved. See `README.md` for more
+     * details.
+     */
+
+    let (result, map) = infcx.tcx.replace_late_bound_regions(binder, |br| {
+        infcx.region_vars.new_skolemized(br, &snapshot.region_vars_snapshot)
+    });
+
+    debug!("skolemize_bound_regions(binder={:?}, result={:?}, map={:?})",
+           binder,
+           result,
+           map);
+
+    (result, map)
+}
+
+pub fn leak_check<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
+                           skol_map: &SkolemizationMap,
+                           snapshot: &CombinedSnapshot)
+                           -> Result<(),(ty::BoundRegion,ty::Region)>
+{
+    /*!
+     * Searches the region constriants created since `snapshot` was started
+     * and checks to determine whether any of the skolemized regions created
+     * in `skol_map` would "escape" -- meaning that they are related to
+     * other regions in some way. If so, the higher-ranked subtyping doesn't
+     * hold. See `README.md` for more details.
+     */
+
+    debug!("leak_check: skol_map={:?}",
+           skol_map);
+
+    let new_vars = infcx.region_vars_confined_to_snapshot(snapshot);
+    for (&skol_br, &skol) in skol_map {
+        let tainted = infcx.tainted_regions(snapshot, skol);
+        for &tainted_region in &tainted {
+            // Each skolemized should only be relatable to itself
+            // or new variables:
+            match tainted_region {
+                ty::ReVar(vid) => {
+                    if new_vars.iter().any(|&x| x == vid) { continue; }
+                }
+                _ => {
+                    if tainted_region == skol { continue; }
+                }
+            };
+
+            debug!("{:?} (which replaced {:?}) is tainted by {:?}",
+                   skol,
+                   skol_br,
+                   tainted_region);
+
+            // A is not as polymorphic as B:
+            return Err((skol_br, tainted_region));
+        }
+    }
+    Ok(())
+}
+
+/// This code converts from skolemized regions back to late-bound
+/// regions. It works by replacing each region in the taint set of a
+/// skolemized region with a bound-region. The bound region will be bound
+/// by the outer-most binder in `value`; the caller must ensure that there is
+/// such a binder and it is the right place.
+///
+/// This routine is only intended to be used when the leak-check has
+/// passed; currently, it's used in the trait matching code to create
+/// a set of nested obligations frmo an impl that matches against
+/// something higher-ranked.  More details can be found in
+/// `librustc/middle/traits/README.md`.
+///
+/// As a brief example, consider the obligation `for<'a> Fn(&'a int)
+/// -> &'a int`, and the impl:
+///
+///     impl<A,R> Fn<A,R> for SomethingOrOther
+///         where A : Clone
+///     { ... }
+///
+/// Here we will have replaced `'a` with a skolemized region
+/// `'0`. This means that our substitution will be `{A=>&'0
+/// int, R=>&'0 int}`.
+///
+/// When we apply the substitution to the bounds, we will wind up with
+/// `&'0 int : Clone` as a predicate. As a last step, we then go and
+/// replace `'0` with a late-bound region `'a`.  The depth is matched
+/// to the depth of the predicate, in this case 1, so that the final
+/// predicate is `for<'a> &'a int : Clone`.
+pub fn plug_leaks<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
+                             skol_map: SkolemizationMap,
+                             snapshot: &CombinedSnapshot,
+                             value: &T)
+                             -> T
+    where T : TypeFoldable<'tcx>
+{
+    debug_assert!(leak_check(infcx, &skol_map, snapshot).is_ok());
+
+    debug!("plug_leaks(skol_map={:?}, value={:?})",
+           skol_map,
+           value);
+
+    // Compute a mapping from the "taint set" of each skolemized
+    // region back to the `ty::BoundRegion` that it originally
+    // represented. Because `leak_check` passed, we know that
+    // these taint sets are mutually disjoint.
+    let inv_skol_map: FnvHashMap<ty::Region, ty::BoundRegion> =
+        skol_map
+        .into_iter()
+        .flat_map(|(skol_br, skol)| {
+            infcx.tainted_regions(snapshot, skol)
+                .into_iter()
+                .map(move |tainted_region| (tainted_region, skol_br))
+        })
+        .collect();
+
+    debug!("plug_leaks: inv_skol_map={:?}",
+           inv_skol_map);
+
+    // Remove any instantiated type variables from `value`; those can hide
+    // references to regions from the `fold_regions` code below.
+    let value = infcx.resolve_type_vars_if_possible(value);
+
+    // Map any skolemization byproducts back to a late-bound
+    // region. Put that late-bound region at whatever the outermost
+    // binder is that we encountered in `value`. The caller is
+    // responsible for ensuring that (a) `value` contains at least one
+    // binder and (b) that binder is the one we want to use.
+    let result = infcx.tcx.fold_regions(&value, &mut false, |r, current_depth| {
+        match inv_skol_map.get(&r) {
+            None => r,
+            Some(br) => {
+                // It is the responsibility of the caller to ensure
+                // that each skolemized region appears within a
+                // binder. In practice, this routine is only used by
+                // trait checking, and all of the skolemized regions
+                // appear inside predicates, which always have
+                // binders, so this assert is satisfied.
+                assert!(current_depth > 1);
+
+                ty::ReLateBound(ty::DebruijnIndex::new(current_depth - 1), br.clone())
+            }
+        }
+    });
+
+    debug!("plug_leaks: result={:?}",
+           result);
+
+    result
+}
diff --git a/src/librustc/infer/lattice.rs b/src/librustc/infer/lattice.rs
new file mode 100644 (file)
index 0000000..17ad32e
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! # Lattice Variables
+//!
+//! This file contains generic code for operating on inference variables
+//! that are characterized by an upper- and lower-bound.  The logic and
+//! reasoning is explained in detail in the large comment in `infer.rs`.
+//!
+//! The code in here is defined quite generically so that it can be
+//! applied both to type variables, which represent types being inferred,
+//! and fn variables, which represent function types being inferred.
+//! It may eventually be applied to their types as well, who knows.
+//! In some cases, the functions are also generic with respect to the
+//! operation on the lattice (GLB vs LUB).
+//!
+//! Although all the functions are generic, we generally write the
+//! comments in a way that is specific to type variables and the LUB
+//! operation.  It's just easier that way.
+//!
+//! In general all of the functions are defined parametrically
+//! over a `LatticeValue`, which is a value defined with respect to
+//! a lattice.
+
+use super::combine;
+use super::InferCtxt;
+
+use ty::TyVar;
+use ty::{self, Ty};
+use ty::relate::{RelateResult, TypeRelation};
+
+pub trait LatticeDir<'f,'tcx> : TypeRelation<'f,'tcx> {
+    fn infcx(&self) -> &'f InferCtxt<'f, 'tcx>;
+
+    // Relates the type `v` to `a` and `b` such that `v` represents
+    // the LUB/GLB of `a` and `b` as appropriate.
+    fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>;
+}
+
+pub fn super_lattice_tys<'a,'tcx,L:LatticeDir<'a,'tcx>>(this: &mut L,
+                                                        a: Ty<'tcx>,
+                                                        b: Ty<'tcx>)
+                                                        -> RelateResult<'tcx, Ty<'tcx>>
+    where 'tcx: 'a
+{
+    debug!("{}.lattice_tys({:?}, {:?})",
+           this.tag(),
+           a,
+           b);
+
+    if a == b {
+        return Ok(a);
+    }
+
+    let infcx = this.infcx();
+    let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
+    let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
+    match (&a.sty, &b.sty) {
+        (&ty::TyInfer(TyVar(..)), &ty::TyInfer(TyVar(..)))
+            if infcx.type_var_diverges(a) && infcx.type_var_diverges(b) => {
+            let v = infcx.next_diverging_ty_var();
+            this.relate_bound(v, a, b)?;
+            Ok(v)
+        }
+
+        (&ty::TyInfer(TyVar(..)), _) |
+        (_, &ty::TyInfer(TyVar(..))) => {
+            let v = infcx.next_ty_var();
+            this.relate_bound(v, a, b)?;
+            Ok(v)
+        }
+
+        _ => {
+            combine::super_combine_tys(this.infcx(), this, a, b)
+        }
+    }
+}
diff --git a/src/librustc/infer/lub.rs b/src/librustc/infer/lub.rs
new file mode 100644 (file)
index 0000000..32b2fe9
--- /dev/null
@@ -0,0 +1,90 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::combine::CombineFields;
+use super::higher_ranked::HigherRankedRelations;
+use super::InferCtxt;
+use super::lattice::{self, LatticeDir};
+use super::Subtype;
+
+use ty::{self, Ty, TyCtxt};
+use ty::relate::{Relate, RelateResult, TypeRelation};
+use traits::PredicateObligations;
+
+/// "Least upper bound" (common supertype)
+pub struct Lub<'a, 'tcx: 'a> {
+    fields: CombineFields<'a, 'tcx>
+}
+
+impl<'a, 'tcx> Lub<'a, 'tcx> {
+    pub fn new(fields: CombineFields<'a, 'tcx>) -> Lub<'a, 'tcx> {
+        Lub { fields: fields }
+    }
+
+    pub fn obligations(self) -> PredicateObligations<'tcx> {
+        self.fields.obligations
+    }
+}
+
+impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Lub<'a, 'tcx> {
+    fn tag(&self) -> &'static str { "Lub" }
+
+    fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.tcx() }
+
+    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
+
+    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
+                                               variance: ty::Variance,
+                                               a: &T,
+                                               b: &T)
+                                               -> RelateResult<'tcx, T>
+    {
+        match variance {
+            ty::Invariant => self.fields.equate().relate(a, b),
+            ty::Covariant => self.relate(a, b),
+            ty::Bivariant => self.fields.bivariate().relate(a, b),
+            ty::Contravariant => self.fields.glb().relate(a, b),
+        }
+    }
+
+    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+        lattice::super_lattice_tys(self, a, b)
+    }
+
+    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+        debug!("{}.regions({:?}, {:?})",
+               self.tag(),
+               a,
+               b);
+
+        let origin = Subtype(self.fields.trace.clone());
+        Ok(self.fields.infcx.region_vars.lub_regions(origin, a, b))
+    }
+
+    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
+                  -> RelateResult<'tcx, ty::Binder<T>>
+        where T: Relate<'a, 'tcx>
+    {
+        self.fields.higher_ranked_lub(a, b)
+    }
+}
+
+impl<'a, 'tcx> LatticeDir<'a,'tcx> for Lub<'a, 'tcx> {
+    fn infcx(&self) -> &'a InferCtxt<'a,'tcx> {
+        self.fields.infcx
+    }
+
+    fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> {
+        let mut sub = self.fields.sub();
+        sub.relate(&a, &v)?;
+        sub.relate(&b, &v)?;
+        Ok(())
+    }
+}
diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs
new file mode 100644 (file)
index 0000000..4c1216a
--- /dev/null
@@ -0,0 +1,1669 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! See the Book for more information.
+
+pub use self::LateBoundRegionConversionTime::*;
+pub use self::RegionVariableOrigin::*;
+pub use self::SubregionOrigin::*;
+pub use self::ValuePairs::*;
+pub use ty::IntVarValue;
+pub use self::freshen::TypeFreshener;
+pub use self::region_inference::{GenericKind, VerifyBound};
+
+use hir::def_id::DefId;
+use hir;
+use middle::free_region::FreeRegionMap;
+use middle::mem_categorization as mc;
+use middle::mem_categorization::McResult;
+use middle::region::CodeExtent;
+use ty::subst;
+use ty::subst::Substs;
+use ty::subst::Subst;
+use ty::adjustment;
+use ty::{TyVid, IntVid, FloatVid};
+use ty::{self, Ty, TyCtxt};
+use ty::error::{ExpectedFound, TypeError, UnconstrainedNumeric};
+use ty::fold::{TypeFolder, TypeFoldable};
+use ty::relate::{Relate, RelateResult, TypeRelation};
+use traits::{self, PredicateObligations, ProjectionMode};
+use rustc_data_structures::unify::{self, UnificationTable};
+use std::cell::{RefCell, Ref};
+use std::fmt;
+use syntax::ast;
+use syntax::codemap;
+use syntax::codemap::{Span, DUMMY_SP};
+use syntax::errors::DiagnosticBuilder;
+use util::nodemap::{FnvHashMap, FnvHashSet, NodeMap};
+
+use self::combine::CombineFields;
+use self::region_inference::{RegionVarBindings, RegionSnapshot};
+use self::error_reporting::ErrorReporting;
+use self::unify_key::ToType;
+
+pub mod bivariate;
+pub mod combine;
+pub mod equate;
+pub mod error_reporting;
+pub mod glb;
+mod higher_ranked;
+pub mod lattice;
+pub mod lub;
+pub mod region_inference;
+pub mod resolve;
+mod freshen;
+pub mod sub;
+pub mod type_variable;
+pub mod unify_key;
+
+pub struct InferOk<'tcx, T> {
+    pub value: T,
+    pub obligations: PredicateObligations<'tcx>,
+}
+pub type InferResult<'tcx, T> = Result<InferOk<'tcx, T>, TypeError<'tcx>>;
+
+pub type Bound<T> = Option<T>;
+pub type UnitResult<'tcx> = RelateResult<'tcx, ()>; // "unify result"
+pub type FixupResult<T> = Result<T, FixupError>; // "fixup result"
+
+pub struct InferCtxt<'a, 'tcx: 'a> {
+    pub tcx: &'a TyCtxt<'tcx>,
+
+    pub tables: &'a RefCell<ty::Tables<'tcx>>,
+
+    // We instantiate UnificationTable with bounds<Ty> because the
+    // types that might instantiate a general type variable have an
+    // order, represented by its upper and lower bounds.
+    type_variables: RefCell<type_variable::TypeVariableTable<'tcx>>,
+
+    // Map from integral variable to the kind of integer it represents
+    int_unification_table: RefCell<UnificationTable<ty::IntVid>>,
+
+    // Map from floating variable to the kind of float it represents
+    float_unification_table: RefCell<UnificationTable<ty::FloatVid>>,
+
+    // For region variables.
+    region_vars: RegionVarBindings<'a, 'tcx>,
+
+    pub parameter_environment: ty::ParameterEnvironment<'a, 'tcx>,
+
+    // the set of predicates on which errors have been reported, to
+    // avoid reporting the same error twice.
+    pub reported_trait_errors: RefCell<FnvHashSet<traits::TraitErrorKey<'tcx>>>,
+
+    // This is a temporary field used for toggling on normalization in the inference context,
+    // as we move towards the approach described here:
+    // https://internals.rust-lang.org/t/flattening-the-contexts-for-fun-and-profit/2293
+    // At a point sometime in the future normalization will be done by the typing context
+    // directly.
+    normalize: bool,
+
+    // Sadly, the behavior of projection varies a bit depending on the
+    // stage of compilation. The specifics are given in the
+    // documentation for `ProjectionMode`.
+    projection_mode: ProjectionMode,
+
+    err_count_on_creation: usize,
+}
+
+/// A map returned by `skolemize_late_bound_regions()` indicating the skolemized
+/// region that each late-bound region was replaced with.
+pub type SkolemizationMap = FnvHashMap<ty::BoundRegion,ty::Region>;
+
+/// Why did we require that the two types be related?
+///
+/// See `error_reporting.rs` for more details
+#[derive(Clone, Copy, Debug)]
+pub enum TypeOrigin {
+    // Not yet categorized in a better way
+    Misc(Span),
+
+    // Checking that method of impl is compatible with trait
+    MethodCompatCheck(Span),
+
+    // Checking that this expression can be assigned where it needs to be
+    // FIXME(eddyb) #11161 is the original Expr required?
+    ExprAssignable(Span),
+
+    // Relating trait refs when resolving vtables
+    RelateTraitRefs(Span),
+
+    // Relating self types when resolving vtables
+    RelateSelfType(Span),
+
+    // Relating trait type parameters to those found in impl etc
+    RelateOutputImplTypes(Span),
+
+    // Computing common supertype in the arms of a match expression
+    MatchExpressionArm(Span, Span, hir::MatchSource),
+
+    // Computing common supertype in an if expression
+    IfExpression(Span),
+
+    // Computing common supertype of an if expression with no else counter-part
+    IfExpressionWithNoElse(Span),
+
+    // Computing common supertype in a range expression
+    RangeExpression(Span),
+
+    // `where a == b`
+    EquatePredicate(Span),
+}
+
+impl TypeOrigin {
+    fn as_str(&self) -> &'static str {
+        match self {
+            &TypeOrigin::Misc(_) |
+            &TypeOrigin::RelateSelfType(_) |
+            &TypeOrigin::RelateOutputImplTypes(_) |
+            &TypeOrigin::ExprAssignable(_) => "mismatched types",
+            &TypeOrigin::RelateTraitRefs(_) => "mismatched traits",
+            &TypeOrigin::MethodCompatCheck(_) => "method not compatible with trait",
+            &TypeOrigin::MatchExpressionArm(_, _, source) => match source {
+                hir::MatchSource::IfLetDesugar{..} => "`if let` arms have incompatible types",
+                _ => "match arms have incompatible types",
+            },
+            &TypeOrigin::IfExpression(_) => "if and else have incompatible types",
+            &TypeOrigin::IfExpressionWithNoElse(_) => "if may be missing an else clause",
+            &TypeOrigin::RangeExpression(_) => "start and end of range have incompatible types",
+            &TypeOrigin::EquatePredicate(_) => "equality predicate not satisfied",
+        }
+    }
+}
+
+impl fmt::Display for TypeOrigin {
+    fn fmt(&self, f: &mut fmt::Formatter) -> Result<(),fmt::Error> {
+        fmt::Display::fmt(self.as_str(), f)
+    }
+}
+
+/// See `error_reporting.rs` for more details
+#[derive(Clone, Debug)]
+pub enum ValuePairs<'tcx> {
+    Types(ExpectedFound<Ty<'tcx>>),
+    TraitRefs(ExpectedFound<ty::TraitRef<'tcx>>),
+    PolyTraitRefs(ExpectedFound<ty::PolyTraitRef<'tcx>>),
+}
+
+/// The trace designates the path through inference that we took to
+/// encounter an error or subtyping constraint.
+///
+/// See `error_reporting.rs` for more details.
+#[derive(Clone)]
+pub struct TypeTrace<'tcx> {
+    origin: TypeOrigin,
+    values: ValuePairs<'tcx>,
+}
+
+/// The origin of a `r1 <= r2` constraint.
+///
+/// See `error_reporting.rs` for more details
+#[derive(Clone, Debug)]
+pub enum SubregionOrigin<'tcx> {
+    // Arose from a subtyping relation
+    Subtype(TypeTrace<'tcx>),
+
+    // Stack-allocated closures cannot outlive innermost loop
+    // or function so as to ensure we only require finite stack
+    InfStackClosure(Span),
+
+    // Invocation of closure must be within its lifetime
+    InvokeClosure(Span),
+
+    // Dereference of reference must be within its lifetime
+    DerefPointer(Span),
+
+    // Closure bound must not outlive captured free variables
+    FreeVariable(Span, ast::NodeId),
+
+    // Index into slice must be within its lifetime
+    IndexSlice(Span),
+
+    // When casting `&'a T` to an `&'b Trait` object,
+    // relating `'a` to `'b`
+    RelateObjectBound(Span),
+
+    // Some type parameter was instantiated with the given type,
+    // and that type must outlive some region.
+    RelateParamBound(Span, Ty<'tcx>),
+
+    // The given region parameter was instantiated with a region
+    // that must outlive some other region.
+    RelateRegionParamBound(Span),
+
+    // A bound placed on type parameters that states that must outlive
+    // the moment of their instantiation.
+    RelateDefaultParamBound(Span, Ty<'tcx>),
+
+    // Creating a pointer `b` to contents of another reference
+    Reborrow(Span),
+
+    // Creating a pointer `b` to contents of an upvar
+    ReborrowUpvar(Span, ty::UpvarId),
+
+    // Data with type `Ty<'tcx>` was borrowed
+    DataBorrowed(Ty<'tcx>, Span),
+
+    // (&'a &'b T) where a >= b
+    ReferenceOutlivesReferent(Ty<'tcx>, Span),
+
+    // Type or region parameters must be in scope.
+    ParameterInScope(ParameterOrigin, Span),
+
+    // The type T of an expression E must outlive the lifetime for E.
+    ExprTypeIsNotInScope(Ty<'tcx>, Span),
+
+    // A `ref b` whose region does not enclose the decl site
+    BindingTypeIsNotValidAtDecl(Span),
+
+    // Regions appearing in a method receiver must outlive method call
+    CallRcvr(Span),
+
+    // Regions appearing in a function argument must outlive func call
+    CallArg(Span),
+
+    // Region in return type of invoked fn must enclose call
+    CallReturn(Span),
+
+    // Operands must be in scope
+    Operand(Span),
+
+    // Region resulting from a `&` expr must enclose the `&` expr
+    AddrOf(Span),
+
+    // An auto-borrow that does not enclose the expr where it occurs
+    AutoBorrow(Span),
+
+    // Region constraint arriving from destructor safety
+    SafeDestructor(Span),
+}
+
+/// Places that type/region parameters can appear.
+#[derive(Clone, Copy, Debug)]
+pub enum ParameterOrigin {
+    Path, // foo::bar
+    MethodCall, // foo.bar() <-- parameters on impl providing bar()
+    OverloadedOperator, // a + b when overloaded
+    OverloadedDeref, // *a when overloaded
+}
+
+/// Times when we replace late-bound regions with variables:
+#[derive(Clone, Copy, Debug)]
+pub enum LateBoundRegionConversionTime {
+    /// when a fn is called
+    FnCall,
+
+    /// when two higher-ranked types are compared
+    HigherRankedType,
+
+    /// when projecting an associated type
+    AssocTypeProjection(ast::Name),
+}
+
+/// Reasons to create a region inference variable
+///
+/// See `error_reporting.rs` for more details
+#[derive(Clone, Debug)]
+pub enum RegionVariableOrigin {
+    // Region variables created for ill-categorized reasons,
+    // mostly indicates places in need of refactoring
+    MiscVariable(Span),
+
+    // Regions created by a `&P` or `[...]` pattern
+    PatternRegion(Span),
+
+    // Regions created by `&` operator
+    AddrOfRegion(Span),
+
+    // Regions created as part of an autoref of a method receiver
+    Autoref(Span),
+
+    // Regions created as part of an automatic coercion
+    Coercion(Span),
+
+    // Region variables created as the values for early-bound regions
+    EarlyBoundRegion(Span, ast::Name),
+
+    // Region variables created for bound regions
+    // in a function or method that is called
+    LateBoundRegion(Span, ty::BoundRegion, LateBoundRegionConversionTime),
+
+    UpvarRegion(ty::UpvarId, Span),
+
+    BoundRegionInCoherence(ast::Name),
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum FixupError {
+    UnresolvedIntTy(IntVid),
+    UnresolvedFloatTy(FloatVid),
+    UnresolvedTy(TyVid)
+}
+
+pub fn fixup_err_to_string(f: FixupError) -> String {
+    use self::FixupError::*;
+
+    match f {
+      UnresolvedIntTy(_) => {
+          "cannot determine the type of this integer; add a suffix to \
+           specify the type explicitly".to_string()
+      }
+      UnresolvedFloatTy(_) => {
+          "cannot determine the type of this number; add a suffix to specify \
+           the type explicitly".to_string()
+      }
+      UnresolvedTy(_) => "unconstrained type".to_string(),
+    }
+}
+
+pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a TyCtxt<'tcx>,
+                                tables: &'a RefCell<ty::Tables<'tcx>>,
+                                param_env: Option<ty::ParameterEnvironment<'a, 'tcx>>,
+                                projection_mode: ProjectionMode)
+                                -> InferCtxt<'a, 'tcx> {
+    InferCtxt {
+        tcx: tcx,
+        tables: tables,
+        type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
+        int_unification_table: RefCell::new(UnificationTable::new()),
+        float_unification_table: RefCell::new(UnificationTable::new()),
+        region_vars: RegionVarBindings::new(tcx),
+        parameter_environment: param_env.unwrap_or(tcx.empty_parameter_environment()),
+        reported_trait_errors: RefCell::new(FnvHashSet()),
+        normalize: false,
+        projection_mode: projection_mode,
+        err_count_on_creation: tcx.sess.err_count()
+    }
+}
+
+pub fn normalizing_infer_ctxt<'a, 'tcx>(tcx: &'a TyCtxt<'tcx>,
+                                        tables: &'a RefCell<ty::Tables<'tcx>>,
+                                        projection_mode: ProjectionMode)
+                                        -> InferCtxt<'a, 'tcx> {
+    let mut infcx = new_infer_ctxt(tcx, tables, None, projection_mode);
+    infcx.normalize = true;
+    infcx
+}
+
+pub fn mk_subty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
+                          a_is_expected: bool,
+                          origin: TypeOrigin,
+                          a: Ty<'tcx>,
+                          b: Ty<'tcx>)
+    -> InferResult<'tcx, ()>
+{
+    debug!("mk_subty({:?} <: {:?})", a, b);
+    cx.sub_types(a_is_expected, origin, a, b)
+}
+
+pub fn can_mk_subty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, a: Ty<'tcx>, b: Ty<'tcx>)
+    -> UnitResult<'tcx>
+{
+    debug!("can_mk_subty({:?} <: {:?})", a, b);
+    cx.probe(|_| {
+        let trace = TypeTrace {
+            origin: TypeOrigin::Misc(codemap::DUMMY_SP),
+            values: Types(expected_found(true, a, b))
+        };
+        cx.sub(true, trace, &a, &b).map(|_| ())
+    })
+}
+
+pub fn can_mk_eqty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, a: Ty<'tcx>, b: Ty<'tcx>)
+    -> UnitResult<'tcx>
+{
+    cx.can_equate(&a, &b)
+}
+
+pub fn mk_subr<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
+                         origin: SubregionOrigin<'tcx>,
+                         a: ty::Region,
+                         b: ty::Region) {
+    debug!("mk_subr({:?} <: {:?})", a, b);
+    let snapshot = cx.region_vars.start_snapshot();
+    cx.region_vars.make_subregion(origin, a, b);
+    cx.region_vars.commit(snapshot);
+}
+
+pub fn mk_eqty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
+                         a_is_expected: bool,
+                         origin: TypeOrigin,
+                         a: Ty<'tcx>,
+                         b: Ty<'tcx>)
+    -> InferResult<'tcx, ()>
+{
+    debug!("mk_eqty({:?} <: {:?})", a, b);
+    cx.eq_types(a_is_expected, origin, a, b)
+}
+
+pub fn mk_eq_trait_refs<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
+                                  a_is_expected: bool,
+                                  origin: TypeOrigin,
+                                  a: ty::TraitRef<'tcx>,
+                                  b: ty::TraitRef<'tcx>)
+    -> InferResult<'tcx, ()>
+{
+    debug!("mk_eq_trait_refs({:?} = {:?})", a, b);
+    cx.eq_trait_refs(a_is_expected, origin, a, b)
+}
+
+pub fn mk_sub_poly_trait_refs<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
+                                        a_is_expected: bool,
+                                        origin: TypeOrigin,
+                                        a: ty::PolyTraitRef<'tcx>,
+                                        b: ty::PolyTraitRef<'tcx>)
+    -> InferResult<'tcx, ()>
+{
+    debug!("mk_sub_poly_trait_refs({:?} <: {:?})", a, b);
+    cx.sub_poly_trait_refs(a_is_expected, origin, a, b)
+}
+
+pub fn mk_eq_impl_headers<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
+                                    a_is_expected: bool,
+                                    origin: TypeOrigin,
+                                    a: &ty::ImplHeader<'tcx>,
+                                    b: &ty::ImplHeader<'tcx>)
+    -> InferResult<'tcx, ()>
+{
+    debug!("mk_eq_impl_header({:?} = {:?})", a, b);
+    match (a.trait_ref, b.trait_ref) {
+        (Some(a_ref), Some(b_ref)) => mk_eq_trait_refs(cx, a_is_expected, origin, a_ref, b_ref),
+        (None, None) => mk_eqty(cx, a_is_expected, origin, a.self_ty, b.self_ty),
+        _ => bug!("mk_eq_impl_headers given mismatched impl kinds"),
+    }
+}
+
+fn expected_found<T>(a_is_expected: bool,
+                     a: T,
+                     b: T)
+                     -> ExpectedFound<T>
+{
+    if a_is_expected {
+        ExpectedFound {expected: a, found: b}
+    } else {
+        ExpectedFound {expected: b, found: a}
+    }
+}
+
+#[must_use = "once you start a snapshot, you should always consume it"]
+pub struct CombinedSnapshot {
+    type_snapshot: type_variable::Snapshot,
+    int_snapshot: unify::Snapshot<ty::IntVid>,
+    float_snapshot: unify::Snapshot<ty::FloatVid>,
+    region_vars_snapshot: RegionSnapshot,
+}
+
+// NOTE: Callable from trans only!
+pub fn normalize_associated_type<'tcx,T>(tcx: &TyCtxt<'tcx>, value: &T) -> T
+    where T : TypeFoldable<'tcx>
+{
+    debug!("normalize_associated_type(t={:?})", value);
+
+    let value = tcx.erase_regions(value);
+
+    if !value.has_projection_types() {
+        return value;
+    }
+
+    let infcx = new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::Any);
+    let mut selcx = traits::SelectionContext::new(&infcx);
+    let cause = traits::ObligationCause::dummy();
+    let traits::Normalized { value: result, obligations } =
+        traits::normalize(&mut selcx, cause, &value);
+
+    debug!("normalize_associated_type: result={:?} obligations={:?}",
+           result,
+           obligations);
+
+    let mut fulfill_cx = traits::FulfillmentContext::new();
+
+    for obligation in obligations {
+        fulfill_cx.register_predicate_obligation(&infcx, obligation);
+    }
+
+    drain_fulfillment_cx_or_panic(DUMMY_SP, &infcx, &mut fulfill_cx, &result)
+}
+
+pub fn drain_fulfillment_cx_or_panic<'a,'tcx,T>(span: Span,
+                                                infcx: &InferCtxt<'a,'tcx>,
+                                                fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
+                                                result: &T)
+                                                -> T
+    where T : TypeFoldable<'tcx>
+{
+    match drain_fulfillment_cx(infcx, fulfill_cx, result) {
+        Ok(v) => v,
+        Err(errors) => {
+            span_bug!(
+                span,
+                "Encountered errors `{:?}` fulfilling during trans",
+                errors);
+        }
+    }
+}
+
+/// Finishes processes any obligations that remain in the fulfillment
+/// context, and then "freshens" and returns `result`. This is
+/// primarily used during normalization and other cases where
+/// processing the obligations in `fulfill_cx` may cause type
+/// inference variables that appear in `result` to be unified, and
+/// hence we need to process those obligations to get the complete
+/// picture of the type.
+pub fn drain_fulfillment_cx<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
+                                       fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
+                                       result: &T)
+                                       -> Result<T,Vec<traits::FulfillmentError<'tcx>>>
+    where T : TypeFoldable<'tcx>
+{
+    debug!("drain_fulfillment_cx(result={:?})",
+           result);
+
+    // In principle, we only need to do this so long as `result`
+    // contains unbound type parameters. It could be a slight
+    // optimization to stop iterating early.
+    match fulfill_cx.select_all_or_error(infcx) {
+        Ok(()) => { }
+        Err(errors) => {
+            return Err(errors);
+        }
+    }
+
+    let result = infcx.resolve_type_vars_if_possible(result);
+    Ok(infcx.tcx.erase_regions(&result))
+}
+
+impl<'tcx, T> InferOk<'tcx, T> {
+    fn unit(self) -> InferOk<'tcx, ()> {
+        InferOk { value: (), obligations: self.obligations }
+    }
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+    pub fn projection_mode(&self) -> ProjectionMode {
+        self.projection_mode
+    }
+
+    pub fn freshen<T:TypeFoldable<'tcx>>(&self, t: T) -> T {
+        t.fold_with(&mut self.freshener())
+    }
+
+    pub fn type_var_diverges(&'a self, ty: Ty) -> bool {
+        match ty.sty {
+            ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().var_diverges(vid),
+            _ => false
+        }
+    }
+
+    pub fn freshener<'b>(&'b self) -> TypeFreshener<'b, 'tcx> {
+        freshen::TypeFreshener::new(self)
+    }
+
+    pub fn type_is_unconstrained_numeric(&'a self, ty: Ty) -> UnconstrainedNumeric {
+        use ty::error::UnconstrainedNumeric::Neither;
+        use ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
+        match ty.sty {
+            ty::TyInfer(ty::IntVar(vid)) => {
+                if self.int_unification_table.borrow_mut().has_value(vid) {
+                    Neither
+                } else {
+                    UnconstrainedInt
+                }
+            },
+            ty::TyInfer(ty::FloatVar(vid)) => {
+                if self.float_unification_table.borrow_mut().has_value(vid) {
+                    Neither
+                } else {
+                    UnconstrainedFloat
+                }
+            },
+            _ => Neither,
+        }
+    }
+
+    /// Returns a type variable's default fallback if any exists. A default
+    /// must be attached to the variable when created, if it is created
+    /// without a default, this will return None.
+    ///
+    /// This code does not apply to integral or floating point variables,
+    /// only to use declared defaults.
+    ///
+    /// See `new_ty_var_with_default` to create a type variable with a default.
+    /// See `type_variable::Default` for details about what a default entails.
+    pub fn default(&self, ty: Ty<'tcx>) -> Option<type_variable::Default<'tcx>> {
+        match ty.sty {
+            ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().default(vid),
+            _ => None
+        }
+    }
+
+    pub fn unsolved_variables(&self) -> Vec<ty::Ty<'tcx>> {
+        let mut variables = Vec::new();
+
+        let unbound_ty_vars = self.type_variables
+                                  .borrow_mut()
+                                  .unsolved_variables()
+                                  .into_iter()
+                                  .map(|t| self.tcx.mk_var(t));
+
+        let unbound_int_vars = self.int_unification_table
+                                   .borrow_mut()
+                                   .unsolved_variables()
+                                   .into_iter()
+                                   .map(|v| self.tcx.mk_int_var(v));
+
+        let unbound_float_vars = self.float_unification_table
+                                     .borrow_mut()
+                                     .unsolved_variables()
+                                     .into_iter()
+                                     .map(|v| self.tcx.mk_float_var(v));
+
+        variables.extend(unbound_ty_vars);
+        variables.extend(unbound_int_vars);
+        variables.extend(unbound_float_vars);
+
+        return variables;
+    }
+
+    fn combine_fields(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>)
+        -> CombineFields<'a, 'tcx>
+    {
+        CombineFields {
+            infcx: self,
+            a_is_expected: a_is_expected,
+            trace: trace,
+            cause: None,
+            obligations: PredicateObligations::new(),
+        }
+    }
+
+    pub fn equate<T>(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T)
+        -> InferResult<'tcx, T>
+        where T: Relate<'a, 'tcx>
+    {
+        let mut equate = self.combine_fields(a_is_expected, trace).equate();
+        let result = equate.relate(a, b);
+        result.map(|t| InferOk { value: t, obligations: equate.obligations() })
+    }
+
+    pub fn sub<T>(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T)
+        -> InferResult<'tcx, T>
+        where T: Relate<'a, 'tcx>
+    {
+        let mut sub = self.combine_fields(a_is_expected, trace).sub();
+        let result = sub.relate(a, b);
+        result.map(|t| InferOk { value: t, obligations: sub.obligations() })
+    }
+
+    pub fn lub<T>(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T)
+        -> InferResult<'tcx, T>
+        where T: Relate<'a, 'tcx>
+    {
+        let mut lub = self.combine_fields(a_is_expected, trace).lub();
+        let result = lub.relate(a, b);
+        result.map(|t| InferOk { value: t, obligations: lub.obligations() })
+    }
+
+    pub fn glb<T>(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T)
+        -> InferResult<'tcx, T>
+        where T: Relate<'a, 'tcx>
+    {
+        let mut glb = self.combine_fields(a_is_expected, trace).glb();
+        let result = glb.relate(a, b);
+        result.map(|t| InferOk { value: t, obligations: glb.obligations() })
+    }
+
+    fn start_snapshot(&self) -> CombinedSnapshot {
+        CombinedSnapshot {
+            type_snapshot: self.type_variables.borrow_mut().snapshot(),
+            int_snapshot: self.int_unification_table.borrow_mut().snapshot(),
+            float_snapshot: self.float_unification_table.borrow_mut().snapshot(),
+            region_vars_snapshot: self.region_vars.start_snapshot(),
+        }
+    }
+
+    fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot) {
+        debug!("rollback_to(cause={})", cause);
+        let CombinedSnapshot { type_snapshot,
+                               int_snapshot,
+                               float_snapshot,
+                               region_vars_snapshot } = snapshot;
+
+        self.type_variables
+            .borrow_mut()
+            .rollback_to(type_snapshot);
+        self.int_unification_table
+            .borrow_mut()
+            .rollback_to(int_snapshot);
+        self.float_unification_table
+            .borrow_mut()
+            .rollback_to(float_snapshot);
+        self.region_vars
+            .rollback_to(region_vars_snapshot);
+    }
+
+    fn commit_from(&self, snapshot: CombinedSnapshot) {
+        debug!("commit_from!");
+        let CombinedSnapshot { type_snapshot,
+                               int_snapshot,
+                               float_snapshot,
+                               region_vars_snapshot } = snapshot;
+
+        self.type_variables
+            .borrow_mut()
+            .commit(type_snapshot);
+        self.int_unification_table
+            .borrow_mut()
+            .commit(int_snapshot);
+        self.float_unification_table
+            .borrow_mut()
+            .commit(float_snapshot);
+        self.region_vars
+            .commit(region_vars_snapshot);
+    }
+
+    /// Execute `f` and commit the bindings
+    pub fn commit_unconditionally<R, F>(&self, f: F) -> R where
+        F: FnOnce() -> R,
+    {
+        debug!("commit()");
+        let snapshot = self.start_snapshot();
+        let r = f();
+        self.commit_from(snapshot);
+        r
+    }
+
+    /// Execute `f` and commit the bindings if closure `f` returns `Ok(_)`
+    pub fn commit_if_ok<T, E, F>(&self, f: F) -> Result<T, E> where
+        F: FnOnce(&CombinedSnapshot) -> Result<T, E>
+    {
+        debug!("commit_if_ok()");
+        let snapshot = self.start_snapshot();
+        let r = f(&snapshot);
+        debug!("commit_if_ok() -- r.is_ok() = {}", r.is_ok());
+        match r {
+            Ok(_) => { self.commit_from(snapshot); }
+            Err(_) => { self.rollback_to("commit_if_ok -- error", snapshot); }
+        }
+        r
+    }
+
+    /// Execute `f` and commit only the region bindings if successful.
+    /// The function f must be very careful not to leak any non-region
+    /// variables that get created.
+    pub fn commit_regions_if_ok<T, E, F>(&self, f: F) -> Result<T, E> where
+        F: FnOnce() -> Result<T, E>
+    {
+        debug!("commit_regions_if_ok()");
+        let CombinedSnapshot { type_snapshot,
+                               int_snapshot,
+                               float_snapshot,
+                               region_vars_snapshot } = self.start_snapshot();
+
+        let r = self.commit_if_ok(|_| f());
+
+        debug!("commit_regions_if_ok: rolling back everything but regions");
+
+        // Roll back any non-region bindings - they should be resolved
+        // inside `f`, with, e.g. `resolve_type_vars_if_possible`.
+        self.type_variables
+            .borrow_mut()
+            .rollback_to(type_snapshot);
+        self.int_unification_table
+            .borrow_mut()
+            .rollback_to(int_snapshot);
+        self.float_unification_table
+            .borrow_mut()
+            .rollback_to(float_snapshot);
+
+        // Commit region vars that may escape through resolved types.
+        self.region_vars
+            .commit(region_vars_snapshot);
+
+        r
+    }
+
+    /// Execute `f` then unroll any bindings it creates
+    pub fn probe<R, F>(&self, f: F) -> R where
+        F: FnOnce(&CombinedSnapshot) -> R,
+    {
+        debug!("probe()");
+        let snapshot = self.start_snapshot();
+        let r = f(&snapshot);
+        self.rollback_to("probe", snapshot);
+        r
+    }
+
+    pub fn add_given(&self,
+                     sub: ty::FreeRegion,
+                     sup: ty::RegionVid)
+    {
+        self.region_vars.add_given(sub, sup);
+    }
+
+    pub fn sub_types(&self,
+                     a_is_expected: bool,
+                     origin: TypeOrigin,
+                     a: Ty<'tcx>,
+                     b: Ty<'tcx>)
+        -> InferResult<'tcx, ()>
+    {
+        debug!("sub_types({:?} <: {:?})", a, b);
+        self.commit_if_ok(|_| {
+            let trace = TypeTrace::types(origin, a_is_expected, a, b);
+            self.sub(a_is_expected, trace, &a, &b).map(|ok| ok.unit())
+        })
+    }
+
+    pub fn eq_types(&self,
+                    a_is_expected: bool,
+                    origin: TypeOrigin,
+                    a: Ty<'tcx>,
+                    b: Ty<'tcx>)
+        -> InferResult<'tcx, ()>
+    {
+        self.commit_if_ok(|_| {
+            let trace = TypeTrace::types(origin, a_is_expected, a, b);
+            self.equate(a_is_expected, trace, &a, &b).map(|ok| ok.unit())
+        })
+    }
+
+    pub fn eq_trait_refs(&self,
+                          a_is_expected: bool,
+                          origin: TypeOrigin,
+                          a: ty::TraitRef<'tcx>,
+                          b: ty::TraitRef<'tcx>)
+        -> InferResult<'tcx, ()>
+    {
+        debug!("eq_trait_refs({:?} <: {:?})",
+               a,
+               b);
+        self.commit_if_ok(|_| {
+            let trace = TypeTrace {
+                origin: origin,
+                values: TraitRefs(expected_found(a_is_expected, a.clone(), b.clone()))
+            };
+            self.equate(a_is_expected, trace, &a, &b).map(|ok| ok.unit())
+        })
+    }
+
+    pub fn sub_poly_trait_refs(&self,
+                               a_is_expected: bool,
+                               origin: TypeOrigin,
+                               a: ty::PolyTraitRef<'tcx>,
+                               b: ty::PolyTraitRef<'tcx>)
+        -> InferResult<'tcx, ()>
+    {
+        debug!("sub_poly_trait_refs({:?} <: {:?})",
+               a,
+               b);
+        self.commit_if_ok(|_| {
+            let trace = TypeTrace {
+                origin: origin,
+                values: PolyTraitRefs(expected_found(a_is_expected, a.clone(), b.clone()))
+            };
+            self.sub(a_is_expected, trace, &a, &b).map(|ok| ok.unit())
+        })
+    }
+
+    pub fn skolemize_late_bound_regions<T>(&self,
+                                           value: &ty::Binder<T>,
+                                           snapshot: &CombinedSnapshot)
+                                           -> (T, SkolemizationMap)
+        where T : TypeFoldable<'tcx>
+    {
+        /*! See `higher_ranked::skolemize_late_bound_regions` */
+
+        higher_ranked::skolemize_late_bound_regions(self, value, snapshot)
+    }
+
+    pub fn leak_check(&self,
+                      skol_map: &SkolemizationMap,
+                      snapshot: &CombinedSnapshot)
+                      -> UnitResult<'tcx>
+    {
+        /*! See `higher_ranked::leak_check` */
+
+        match higher_ranked::leak_check(self, skol_map, snapshot) {
+            Ok(()) => Ok(()),
+            Err((br, r)) => Err(TypeError::RegionsInsufficientlyPolymorphic(br, r))
+        }
+    }
+
+    pub fn plug_leaks<T>(&self,
+                         skol_map: SkolemizationMap,
+                         snapshot: &CombinedSnapshot,
+                         value: &T)
+                         -> T
+        where T : TypeFoldable<'tcx>
+    {
+        /*! See `higher_ranked::plug_leaks` */
+
+        higher_ranked::plug_leaks(self, skol_map, snapshot, value)
+    }
+
+    pub fn equality_predicate(&self,
+                              span: Span,
+                              predicate: &ty::PolyEquatePredicate<'tcx>)
+        -> InferResult<'tcx, ()>
+    {
+        self.commit_if_ok(|snapshot| {
+            let (ty::EquatePredicate(a, b), skol_map) =
+                self.skolemize_late_bound_regions(predicate, snapshot);
+            let origin = TypeOrigin::EquatePredicate(span);
+            let eqty_ok = mk_eqty(self, false, origin, a, b)?;
+            self.leak_check(&skol_map, snapshot).map(|_| eqty_ok.unit())
+        })
+    }
+
+    pub fn region_outlives_predicate(&self,
+                                     span: Span,
+                                     predicate: &ty::PolyRegionOutlivesPredicate)
+        -> UnitResult<'tcx>
+    {
+        self.commit_if_ok(|snapshot| {
+            let (ty::OutlivesPredicate(r_a, r_b), skol_map) =
+                self.skolemize_late_bound_regions(predicate, snapshot);
+            let origin = RelateRegionParamBound(span);
+            let () = mk_subr(self, origin, r_b, r_a); // `b : a` ==> `a <= b`
+            self.leak_check(&skol_map, snapshot)
+        })
+    }
+
+    pub fn next_ty_var_id(&self, diverging: bool) -> TyVid {
+        self.type_variables
+            .borrow_mut()
+            .new_var(diverging, None)
+    }
+
+    pub fn next_ty_var(&self) -> Ty<'tcx> {
+        self.tcx.mk_var(self.next_ty_var_id(false))
+    }
+
+    pub fn next_ty_var_with_default(&self,
+                                    default: Option<type_variable::Default<'tcx>>) -> Ty<'tcx> {
+        let ty_var_id = self.type_variables
+                            .borrow_mut()
+                            .new_var(false, default);
+
+        self.tcx.mk_var(ty_var_id)
+    }
+
+    pub fn next_diverging_ty_var(&self) -> Ty<'tcx> {
+        self.tcx.mk_var(self.next_ty_var_id(true))
+    }
+
+    pub fn next_ty_vars(&self, n: usize) -> Vec<Ty<'tcx>> {
+        (0..n).map(|_i| self.next_ty_var()).collect()
+    }
+
+    pub fn next_int_var_id(&self) -> IntVid {
+        self.int_unification_table
+            .borrow_mut()
+            .new_key(None)
+    }
+
+    pub fn next_float_var_id(&self) -> FloatVid {
+        self.float_unification_table
+            .borrow_mut()
+            .new_key(None)
+    }
+
+    pub fn next_region_var(&self, origin: RegionVariableOrigin) -> ty::Region {
+        ty::ReVar(self.region_vars.new_region_var(origin))
+    }
+
+    pub fn region_vars_for_defs(&self,
+                                span: Span,
+                                defs: &[ty::RegionParameterDef])
+                                -> Vec<ty::Region> {
+        defs.iter()
+            .map(|d| self.next_region_var(EarlyBoundRegion(span, d.name)))
+            .collect()
+    }
+
+    // We have to take `&mut Substs` in order to provide the correct substitutions for defaults
+    // along the way, for this reason we don't return them.
+    pub fn type_vars_for_defs(&self,
+                              span: Span,
+                              space: subst::ParamSpace,
+                              substs: &mut Substs<'tcx>,
+                              defs: &[ty::TypeParameterDef<'tcx>]) {
+
+        for def in defs.iter() {
+            let default = def.default.map(|default| {
+                type_variable::Default {
+                    ty: default.subst_spanned(self.tcx, substs, Some(span)),
+                    origin_span: span,
+                    def_id: def.default_def_id
+                }
+            });
+
+            let ty_var = self.next_ty_var_with_default(default);
+            substs.types.push(space, ty_var);
+        }
+    }
+
+    /// Given a set of generics defined on a type or impl, returns a substitution mapping each
+    /// type/region parameter to a fresh inference variable.
+    pub fn fresh_substs_for_generics(&self,
+                                     span: Span,
+                                     generics: &ty::Generics<'tcx>)
+                                     -> subst::Substs<'tcx>
+    {
+        let type_params = subst::VecPerParamSpace::empty();
+
+        let region_params =
+            generics.regions.map(
+                |d| self.next_region_var(EarlyBoundRegion(span, d.name)));
+
+        let mut substs = subst::Substs::new(type_params, region_params);
+
+        for space in subst::ParamSpace::all().iter() {
+            self.type_vars_for_defs(
+                span,
+                *space,
+                &mut substs,
+                generics.types.get_slice(*space));
+        }
+
+        return substs;
+    }
+
+    /// Given a set of generics defined on a trait, returns a substitution mapping each output
+    /// type/region parameter to a fresh inference variable, and mapping the self type to
+    /// `self_ty`.
+    pub fn fresh_substs_for_trait(&self,
+                                  span: Span,
+                                  generics: &ty::Generics<'tcx>,
+                                  self_ty: Ty<'tcx>)
+                                  -> subst::Substs<'tcx>
+    {
+
+        assert!(generics.types.len(subst::SelfSpace) == 1);
+        assert!(generics.types.len(subst::FnSpace) == 0);
+        assert!(generics.regions.len(subst::SelfSpace) == 0);
+        assert!(generics.regions.len(subst::FnSpace) == 0);
+
+        let type_params = Vec::new();
+
+        let region_param_defs = generics.regions.get_slice(subst::TypeSpace);
+        let regions = self.region_vars_for_defs(span, region_param_defs);
+
+        let mut substs = subst::Substs::new_trait(type_params, regions, self_ty);
+
+        let type_parameter_defs = generics.types.get_slice(subst::TypeSpace);
+        self.type_vars_for_defs(span, subst::TypeSpace, &mut substs, type_parameter_defs);
+
+        return substs;
+    }
+
+    pub fn fresh_bound_region(&self, debruijn: ty::DebruijnIndex) -> ty::Region {
+        self.region_vars.new_bound(debruijn)
+    }
+
+    /// Apply `adjustment` to the type of `expr`
+    pub fn adjust_expr_ty(&self,
+                          expr: &hir::Expr,
+                          adjustment: Option<&adjustment::AutoAdjustment<'tcx>>)
+                          -> Ty<'tcx>
+    {
+        let raw_ty = self.expr_ty(expr);
+        let raw_ty = self.shallow_resolve(raw_ty);
+        let resolve_ty = |ty: Ty<'tcx>| self.resolve_type_vars_if_possible(&ty);
+        raw_ty.adjust(self.tcx,
+                      expr.span,
+                      expr.id,
+                      adjustment,
+                      |method_call| self.tables
+                                        .borrow()
+                                        .method_map
+                                        .get(&method_call)
+                                        .map(|method| resolve_ty(method.ty)))
+    }
+
+    pub fn errors_since_creation(&self) -> bool {
+        self.tcx.sess.err_count() - self.err_count_on_creation != 0
+    }
+
+    pub fn node_type(&self, id: ast::NodeId) -> Ty<'tcx> {
+        match self.tables.borrow().node_types.get(&id) {
+            Some(&t) => t,
+            // FIXME
+            None if self.errors_since_creation() =>
+                self.tcx.types.err,
+            None => {
+                bug!("no type for node {}: {} in fcx",
+                     id, self.tcx.map.node_to_string(id));
+            }
+        }
+    }
+
+    pub fn expr_ty(&self, ex: &hir::Expr) -> Ty<'tcx> {
+        match self.tables.borrow().node_types.get(&ex.id) {
+            Some(&t) => t,
+            None => {
+                bug!("no type for expr in fcx");
+            }
+        }
+    }
+
+    pub fn resolve_regions_and_report_errors(&self,
+                                             free_regions: &FreeRegionMap,
+                                             subject_node_id: ast::NodeId) {
+        let errors = self.region_vars.resolve_regions(free_regions, subject_node_id);
+        if !self.errors_since_creation() {
+            // As a heuristic, just skip reporting region errors
+            // altogether if other errors have been reported while
+            // this infcx was in use.  This is totally hokey but
+            // otherwise we have a hard time separating legit region
+            // errors from silly ones.
+            self.report_region_errors(&errors); // see error_reporting.rs
+        }
+    }
+
+    pub fn ty_to_string(&self, t: Ty<'tcx>) -> String {
+        self.resolve_type_vars_if_possible(&t).to_string()
+    }
+
+    pub fn tys_to_string(&self, ts: &[Ty<'tcx>]) -> String {
+        let tstrs: Vec<String> = ts.iter().map(|t| self.ty_to_string(*t)).collect();
+        format!("({})", tstrs.join(", "))
+    }
+
+    pub fn trait_ref_to_string(&self, t: &ty::TraitRef<'tcx>) -> String {
+        self.resolve_type_vars_if_possible(t).to_string()
+    }
+
+    pub fn shallow_resolve(&self, typ: Ty<'tcx>) -> Ty<'tcx> {
+        match typ.sty {
+            ty::TyInfer(ty::TyVar(v)) => {
+                // Not entirely obvious: if `typ` is a type variable,
+                // it can be resolved to an int/float variable, which
+                // can then be recursively resolved, hence the
+                // recursion. Note though that we prevent type
+                // variables from unifying to other type variables
+                // directly (though they may be embedded
+                // structurally), and we prevent cycles in any case,
+                // so this recursion should always be of very limited
+                // depth.
+                self.type_variables.borrow_mut()
+                    .probe(v)
+                    .map(|t| self.shallow_resolve(t))
+                    .unwrap_or(typ)
+            }
+
+            ty::TyInfer(ty::IntVar(v)) => {
+                self.int_unification_table
+                    .borrow_mut()
+                    .probe(v)
+                    .map(|v| v.to_type(self.tcx))
+                    .unwrap_or(typ)
+            }
+
+            ty::TyInfer(ty::FloatVar(v)) => {
+                self.float_unification_table
+                    .borrow_mut()
+                    .probe(v)
+                    .map(|v| v.to_type(self.tcx))
+                    .unwrap_or(typ)
+            }
+
+            _ => {
+                typ
+            }
+        }
+    }
+
+    pub fn resolve_type_vars_if_possible<T>(&self, value: &T) -> T
+        where T: TypeFoldable<'tcx>
+    {
+        /*!
+         * Where possible, replaces type/int/float variables in
+         * `value` with their final value. Note that region variables
+         * are unaffected. If a type variable has not been unified, it
+         * is left as is.  This is an idempotent operation that does
+         * not affect inference state in any way and so you can do it
+         * at will.
+         */
+
+        if !value.needs_infer() {
+            return value.clone(); // avoid duplicated subst-folding
+        }
+        let mut r = resolve::OpportunisticTypeResolver::new(self);
+        value.fold_with(&mut r)
+    }
+
+    pub fn resolve_type_and_region_vars_if_possible<T>(&self, value: &T) -> T
+        where T: TypeFoldable<'tcx>
+    {
+        let mut r = resolve::OpportunisticTypeAndRegionResolver::new(self);
+        value.fold_with(&mut r)
+    }
+
+    /// Resolves all type variables in `t` and then, if any were left
+    /// unresolved, substitutes an error type. This is used after the
+    /// main checking when doing a second pass before writeback. The
+    /// justification is that writeback will produce an error for
+    /// these unconstrained type variables.
+    fn resolve_type_vars_or_error(&self, t: &Ty<'tcx>) -> mc::McResult<Ty<'tcx>> {
+        let ty = self.resolve_type_vars_if_possible(t);
+        if ty.references_error() || ty.is_ty_var() {
+            debug!("resolve_type_vars_or_error: error from {:?}", ty);
+            Err(())
+        } else {
+            Ok(ty)
+        }
+    }
+
+    pub fn fully_resolve<T:TypeFoldable<'tcx>>(&self, value: &T) -> FixupResult<T> {
+        /*!
+         * Attempts to resolve all type/region variables in
+         * `value`. Region inference must have been run already (e.g.,
+         * by calling `resolve_regions_and_report_errors`).  If some
+         * variable was never unified, an `Err` results.
+         *
+         * This method is idempotent, but it not typically not invoked
+         * except during the writeback phase.
+         */
+
+        resolve::fully_resolve(self, value)
+    }
+
+    // [Note-Type-error-reporting]
+    // An invariant is that anytime the expected or actual type is TyError (the special
+    // error type, meaning that an error occurred when typechecking this expression),
+    // this is a derived error. The error cascaded from another error (that was already
+    // reported), so it's not useful to display it to the user.
+    // The following four methods -- type_error_message_str, type_error_message_str_with_expected,
+    // type_error_message, and report_mismatched_types -- implement this logic.
+    // They check if either the actual or expected type is TyError, and don't print the error
+    // in this case. The typechecker should only ever report type errors involving mismatched
+    // types using one of these four methods, and should not call span_err directly for such
+    // errors.
+    pub fn type_error_message_str<M>(&self,
+                                     sp: Span,
+                                     mk_msg: M,
+                                     actual_ty: String,
+                                     err: Option<&TypeError<'tcx>>)
+        where M: FnOnce(Option<String>, String) -> String,
+    {
+        self.type_error_message_str_with_expected(sp, mk_msg, None, actual_ty, err)
+    }
+
+    pub fn type_error_struct_str<M>(&self,
+                                    sp: Span,
+                                    mk_msg: M,
+                                    actual_ty: String,
+                                    err: Option<&TypeError<'tcx>>)
+                                    -> DiagnosticBuilder<'tcx>
+        where M: FnOnce(Option<String>, String) -> String,
+    {
+        self.type_error_struct_str_with_expected(sp, mk_msg, None, actual_ty, err)
+    }
+
+    pub fn type_error_message_str_with_expected<M>(&self,
+                                                   sp: Span,
+                                                   mk_msg: M,
+                                                   expected_ty: Option<Ty<'tcx>>,
+                                                   actual_ty: String,
+                                                   err: Option<&TypeError<'tcx>>)
+        where M: FnOnce(Option<String>, String) -> String,
+    {
+        self.type_error_struct_str_with_expected(sp, mk_msg, expected_ty, actual_ty, err)
+            .emit();
+    }
+
+    pub fn type_error_struct_str_with_expected<M>(&self,
+                                                  sp: Span,
+                                                  mk_msg: M,
+                                                  expected_ty: Option<Ty<'tcx>>,
+                                                  actual_ty: String,
+                                                  err: Option<&TypeError<'tcx>>)
+                                                  -> DiagnosticBuilder<'tcx>
+        where M: FnOnce(Option<String>, String) -> String,
+    {
+        debug!("hi! expected_ty = {:?}, actual_ty = {}", expected_ty, actual_ty);
+
+        let resolved_expected = expected_ty.map(|e_ty| self.resolve_type_vars_if_possible(&e_ty));
+
+        if !resolved_expected.references_error() {
+            let error_str = err.map_or("".to_string(), |t_err| {
+                format!(" ({})", t_err)
+            });
+
+            let mut db = self.tcx.sess.struct_span_err(sp, &format!("{}{}",
+                mk_msg(resolved_expected.map(|t| self.ty_to_string(t)), actual_ty),
+                error_str));
+
+            if let Some(err) = err {
+                self.tcx.note_and_explain_type_err(&mut db, err, sp);
+            }
+            db
+        } else {
+            self.tcx.sess.diagnostic().struct_dummy()
+        }
+    }
+
+    pub fn type_error_message<M>(&self,
+                                 sp: Span,
+                                 mk_msg: M,
+                                 actual_ty: Ty<'tcx>,
+                                 err: Option<&TypeError<'tcx>>)
+        where M: FnOnce(String) -> String,
+    {
+        self.type_error_struct(sp, mk_msg, actual_ty, err).emit();
+    }
+
+    pub fn type_error_struct<M>(&self,
+                                sp: Span,
+                                mk_msg: M,
+                                actual_ty: Ty<'tcx>,
+                                err: Option<&TypeError<'tcx>>)
+                                -> DiagnosticBuilder<'tcx>
+        where M: FnOnce(String) -> String,
+    {
+        let actual_ty = self.resolve_type_vars_if_possible(&actual_ty);
+
+        // Don't report an error if actual type is TyError.
+        if actual_ty.references_error() {
+            return self.tcx.sess.diagnostic().struct_dummy();
+        }
+
+        self.type_error_struct_str(sp,
+            move |_e, a| { mk_msg(a) },
+            self.ty_to_string(actual_ty), err)
+    }
+
+    pub fn report_mismatched_types(&self,
+                                   origin: TypeOrigin,
+                                   expected: Ty<'tcx>,
+                                   actual: Ty<'tcx>,
+                                   err: TypeError<'tcx>) {
+        let trace = TypeTrace {
+            origin: origin,
+            values: Types(ExpectedFound {
+                expected: expected,
+                found: actual
+            })
+        };
+        self.report_and_explain_type_error(trace, &err).emit();
+    }
+
+    pub fn report_conflicting_default_types(&self,
+                                            span: Span,
+                                            expected: type_variable::Default<'tcx>,
+                                            actual: type_variable::Default<'tcx>) {
+        let trace = TypeTrace {
+            origin: TypeOrigin::Misc(span),
+            values: Types(ExpectedFound {
+                expected: expected.ty,
+                found: actual.ty
+            })
+        };
+
+        self.report_and_explain_type_error(
+            trace,
+            &TypeError::TyParamDefaultMismatch(ExpectedFound {
+                expected: expected,
+                found: actual
+            }))
+            .emit();
+    }
+
+    pub fn replace_late_bound_regions_with_fresh_var<T>(
+        &self,
+        span: Span,
+        lbrct: LateBoundRegionConversionTime,
+        value: &ty::Binder<T>)
+        -> (T, FnvHashMap<ty::BoundRegion,ty::Region>)
+        where T : TypeFoldable<'tcx>
+    {
+        self.tcx.replace_late_bound_regions(
+            value,
+            |br| self.next_region_var(LateBoundRegion(span, br, lbrct)))
+    }
+
+    /// See `verify_generic_bound` method in `region_inference`
+    pub fn verify_generic_bound(&self,
+                                origin: SubregionOrigin<'tcx>,
+                                kind: GenericKind<'tcx>,
+                                a: ty::Region,
+                                bound: VerifyBound) {
+        debug!("verify_generic_bound({:?}, {:?} <: {:?})",
+               kind,
+               a,
+               bound);
+
+        self.region_vars.verify_generic_bound(origin, kind, a, bound);
+    }
+
+    pub fn can_equate<'b,T>(&'b self, a: &T, b: &T) -> UnitResult<'tcx>
+        where T: Relate<'b,'tcx> + fmt::Debug
+    {
+        debug!("can_equate({:?}, {:?})", a, b);
+        self.probe(|_| {
+            // Gin up a dummy trace, since this won't be committed
+            // anyhow. We should make this typetrace stuff more
+            // generic so we don't have to do anything quite this
+            // terrible.
+            let e = self.tcx.types.err;
+            let trace = TypeTrace {
+                origin: TypeOrigin::Misc(codemap::DUMMY_SP),
+                values: Types(expected_found(true, e, e))
+            };
+            self.equate(true, trace, a, b)
+        }).map(|_| ())
+    }
+
+    pub fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
+        let ty = self.node_type(id);
+        self.resolve_type_vars_or_error(&ty)
+    }
+
+    pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
+        let ty = self.adjust_expr_ty(expr, self.tables.borrow().adjustments.get(&expr.id));
+        self.resolve_type_vars_or_error(&ty)
+    }
+
+    pub fn tables_are_tcx_tables(&self) -> bool {
+        let tables: &RefCell<ty::Tables> = &self.tables;
+        let tcx_tables: &RefCell<ty::Tables> = &self.tcx.tables;
+        tables as *const _ == tcx_tables as *const _
+    }
+
+    pub fn type_moves_by_default(&self, ty: Ty<'tcx>, span: Span) -> bool {
+        let ty = self.resolve_type_vars_if_possible(&ty);
+        if ty.needs_infer() ||
+            (ty.has_closure_types() && !self.tables_are_tcx_tables()) {
+            // this can get called from typeck (by euv), and moves_by_default
+            // rightly refuses to work with inference variables, but
+            // moves_by_default has a cache, which we want to use in other
+            // cases.
+            !traits::type_known_to_meet_builtin_bound(self, ty, ty::BoundCopy, span)
+        } else {
+            ty.moves_by_default(&self.parameter_environment, span)
+        }
+    }
+
+    pub fn node_method_ty(&self, method_call: ty::MethodCall)
+                          -> Option<Ty<'tcx>> {
+        self.tables
+            .borrow()
+            .method_map
+            .get(&method_call)
+            .map(|method| method.ty)
+            .map(|ty| self.resolve_type_vars_if_possible(&ty))
+    }
+
+    pub fn node_method_id(&self, method_call: ty::MethodCall)
+                          -> Option<DefId> {
+        self.tables
+            .borrow()
+            .method_map
+            .get(&method_call)
+            .map(|method| method.def_id)
+    }
+
+    pub fn adjustments(&self) -> Ref<NodeMap<adjustment::AutoAdjustment<'tcx>>> {
+        fn project_adjustments<'a, 'tcx>(tables: &'a ty::Tables<'tcx>)
+                                        -> &'a NodeMap<adjustment::AutoAdjustment<'tcx>> {
+            &tables.adjustments
+        }
+
+        Ref::map(self.tables.borrow(), project_adjustments)
+    }
+
+    pub fn is_method_call(&self, id: ast::NodeId) -> bool {
+        self.tables.borrow().method_map.contains_key(&ty::MethodCall::expr(id))
+    }
+
+    pub fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option<CodeExtent> {
+        self.tcx.region_maps.temporary_scope(rvalue_id)
+    }
+
+    pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
+        self.tables.borrow().upvar_capture_map.get(&upvar_id).cloned()
+    }
+
+    pub fn param_env<'b>(&'b self) -> &'b ty::ParameterEnvironment<'b,'tcx> {
+        &self.parameter_environment
+    }
+
+    pub fn closure_kind(&self,
+                        def_id: DefId)
+                        -> Option<ty::ClosureKind>
+    {
+        if def_id.is_local() {
+            self.tables.borrow().closure_kinds.get(&def_id).cloned()
+        } else {
+            // During typeck, ALL closures are local. But afterwards,
+            // during trans, we see closure ids from other traits.
+            // That may require loading the closure data out of the
+            // cstore.
+            Some(ty::Tables::closure_kind(&self.tables, self.tcx, def_id))
+        }
+    }
+
+    pub fn closure_type(&self,
+                        def_id: DefId,
+                        substs: &ty::ClosureSubsts<'tcx>)
+                        -> ty::ClosureTy<'tcx>
+    {
+        let closure_ty =
+            ty::Tables::closure_type(self.tables,
+                                     self.tcx,
+                                     def_id,
+                                     substs);
+
+        if self.normalize {
+            normalize_associated_type(&self.tcx, &closure_ty)
+        } else {
+            closure_ty
+        }
+    }
+}
+
+impl<'tcx> TypeTrace<'tcx> {
+    pub fn span(&self) -> Span {
+        self.origin.span()
+    }
+
+    pub fn types(origin: TypeOrigin,
+                 a_is_expected: bool,
+                 a: Ty<'tcx>,
+                 b: Ty<'tcx>)
+                 -> TypeTrace<'tcx> {
+        TypeTrace {
+            origin: origin,
+            values: Types(expected_found(a_is_expected, a, b))
+        }
+    }
+
+    pub fn dummy(tcx: &TyCtxt<'tcx>) -> TypeTrace<'tcx> {
+        TypeTrace {
+            origin: TypeOrigin::Misc(codemap::DUMMY_SP),
+            values: Types(ExpectedFound {
+                expected: tcx.types.err,
+                found: tcx.types.err,
+            })
+        }
+    }
+}
+
+impl<'tcx> fmt::Debug for TypeTrace<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "TypeTrace({:?})", self.origin)
+    }
+}
+
+impl TypeOrigin {
+    pub fn span(&self) -> Span {
+        match *self {
+            TypeOrigin::MethodCompatCheck(span) => span,
+            TypeOrigin::ExprAssignable(span) => span,
+            TypeOrigin::Misc(span) => span,
+            TypeOrigin::RelateTraitRefs(span) => span,
+            TypeOrigin::RelateSelfType(span) => span,
+            TypeOrigin::RelateOutputImplTypes(span) => span,
+            TypeOrigin::MatchExpressionArm(match_span, _, _) => match_span,
+            TypeOrigin::IfExpression(span) => span,
+            TypeOrigin::IfExpressionWithNoElse(span) => span,
+            TypeOrigin::RangeExpression(span) => span,
+            TypeOrigin::EquatePredicate(span) => span,
+        }
+    }
+}
+
+impl<'tcx> SubregionOrigin<'tcx> {
+    pub fn span(&self) -> Span {
+        match *self {
+            Subtype(ref a) => a.span(),
+            InfStackClosure(a) => a,
+            InvokeClosure(a) => a,
+            DerefPointer(a) => a,
+            FreeVariable(a, _) => a,
+            IndexSlice(a) => a,
+            RelateObjectBound(a) => a,
+            RelateParamBound(a, _) => a,
+            RelateRegionParamBound(a) => a,
+            RelateDefaultParamBound(a, _) => a,
+            Reborrow(a) => a,
+            ReborrowUpvar(a, _) => a,
+            DataBorrowed(_, a) => a,
+            ReferenceOutlivesReferent(_, a) => a,
+            ParameterInScope(_, a) => a,
+            ExprTypeIsNotInScope(_, a) => a,
+            BindingTypeIsNotValidAtDecl(a) => a,
+            CallRcvr(a) => a,
+            CallArg(a) => a,
+            CallReturn(a) => a,
+            Operand(a) => a,
+            AddrOf(a) => a,
+            AutoBorrow(a) => a,
+            SafeDestructor(a) => a,
+        }
+    }
+}
+
+impl RegionVariableOrigin {
+    pub fn span(&self) -> Span {
+        match *self {
+            MiscVariable(a) => a,
+            PatternRegion(a) => a,
+            AddrOfRegion(a) => a,
+            Autoref(a) => a,
+            Coercion(a) => a,
+            EarlyBoundRegion(a, _) => a,
+            LateBoundRegion(a, _, _) => a,
+            BoundRegionInCoherence(_) => codemap::DUMMY_SP,
+            UpvarRegion(_, a) => a
+        }
+    }
+}
diff --git a/src/librustc/infer/region_inference/README.md b/src/librustc/infer/region_inference/README.md
new file mode 100644 (file)
index 0000000..80da861
--- /dev/null
@@ -0,0 +1,311 @@
+Region inference
+
+# Terminology
+
+Note that we use the terms region and lifetime interchangeably.
+
+# Introduction
+
+Region inference uses a somewhat more involved algorithm than type
+inference. It is not the most efficient thing ever written though it
+seems to work well enough in practice (famous last words).  The reason
+that we use a different algorithm is because, unlike with types, it is
+impractical to hand-annotate with regions (in some cases, there aren't
+even the requisite syntactic forms).  So we have to get it right, and
+it's worth spending more time on a more involved analysis.  Moreover,
+regions are a simpler case than types: they don't have aggregate
+structure, for example.
+
+Unlike normal type inference, which is similar in spirit to H-M and thus
+works progressively, the region type inference works by accumulating
+constraints over the course of a function.  Finally, at the end of
+processing a function, we process and solve the constraints all at
+once.
+
+The constraints are always of one of three possible forms:
+
+- `ConstrainVarSubVar(Ri, Rj)` states that region variable Ri must be
+  a subregion of Rj
+- `ConstrainRegSubVar(R, Ri)` states that the concrete region R (which
+  must not be a variable) must be a subregion of the variable Ri
+- `ConstrainVarSubReg(Ri, R)` states the variable Ri shoudl be less
+  than the concrete region R. This is kind of deprecated and ought to
+  be replaced with a verify (they essentially play the same role).
+
+In addition to constraints, we also gather up a set of "verifys"
+(what, you don't think Verify is a noun? Get used to it my
+friend!). These represent relations that must hold but which don't
+influence inference proper. These take the form of:
+
+- `VerifyRegSubReg(Ri, Rj)` indicates that Ri <= Rj must hold,
+  where Rj is not an inference variable (and Ri may or may not contain
+  one). This doesn't influence inference because we will already have
+  inferred Ri to be as small as possible, so then we just test whether
+  that result was less than Rj or not.
+- `VerifyGenericBound(R, Vb)` is a more complex expression which tests
+  that the region R must satisfy the bound `Vb`. The bounds themselves
+  may have structure like "must outlive one of the following regions"
+  or "must outlive ALL of the following regions. These bounds arise
+  from constraints like `T: 'a` -- if we know that `T: 'b` and `T: 'c`
+  (say, from where clauses), then we can conclude that `T: 'a` if `'b:
+  'a` *or* `'c: 'a`.
+
+# Building up the constraints
+
+Variables and constraints are created using the following methods:
+
+- `new_region_var()` creates a new, unconstrained region variable;
+- `make_subregion(Ri, Rj)` states that Ri is a subregion of Rj
+- `lub_regions(Ri, Rj) -> Rk` returns a region Rk which is
+  the smallest region that is greater than both Ri and Rj
+- `glb_regions(Ri, Rj) -> Rk` returns a region Rk which is
+  the greatest region that is smaller than both Ri and Rj
+
+The actual region resolution algorithm is not entirely
+obvious, though it is also not overly complex.
+
+## Snapshotting
+
+It is also permitted to try (and rollback) changes to the graph.  This
+is done by invoking `start_snapshot()`, which returns a value.  Then
+later you can call `rollback_to()` which undoes the work.
+Alternatively, you can call `commit()` which ends all snapshots.
+Snapshots can be recursive---so you can start a snapshot when another
+is in progress, but only the root snapshot can "commit".
+
+## The problem
+
+Basically our input is a directed graph where nodes can be divided
+into two categories: region variables and concrete regions.  Each edge
+`R -> S` in the graph represents a constraint that the region `R` is a
+subregion of the region `S`.
+
+Region variable nodes can have arbitrary degree.  There is one region
+variable node per region variable.
+
+Each concrete region node is associated with some, well, concrete
+region: e.g., a free lifetime, or the region for a particular scope.
+Note that there may be more than one concrete region node for a
+particular region value.  Moreover, because of how the graph is built,
+we know that all concrete region nodes have either in-degree 1 or
+out-degree 1.
+
+Before resolution begins, we build up the constraints in a hashmap
+that maps `Constraint` keys to spans.  During resolution, we construct
+the actual `Graph` structure that we describe here.
+
+## Computing the values for region variables
+
+The algorithm is a simple dataflow algorithm. Each region variable
+begins as empty. We iterate over the constraints, and for each constraint
+we grow the relevant region variable to be as big as it must be to meet all the
+constraints. This means the region variables can grow to be `'static` if
+necessary.
+
+## Verification
+
+After all constraints are fully propoagated, we do a "verification"
+step where we walk over the verify bounds and check that they are
+satisfied. These bounds represent the "maximal" values that a region
+variable can take on, basically.
+
+# The Region Hierarchy
+
+## Without closures
+
+Let's first consider the region hierarchy without thinking about
+closures, because they add a lot of complications. The region
+hierarchy *basically* mirrors the lexical structure of the code.
+There is a region for every piece of 'evaluation' that occurs, meaning
+every expression, block, and pattern (patterns are considered to
+"execute" by testing the value they are applied to and creating any
+relevant bindings).  So, for example:
+
+    fn foo(x: isize, y: isize) { // -+
+    //  +------------+       //  |
+    //  |      +-----+       //  |
+    //  |  +-+ +-+ +-+       //  |
+    //  |  | | | | | |       //  |
+    //  v  v v v v v v       //  |
+        let z = x + y;       //  |
+        ...                  //  |
+    }                        // -+
+
+    fn bar() { ... }
+
+In this example, there is a region for the fn body block as a whole,
+and then a subregion for the declaration of the local variable.
+Within that, there are sublifetimes for the assignment pattern and
+also the expression `x + y`. The expression itself has sublifetimes
+for evaluating `x` and `y`.
+
+## Function calls
+
+Function calls are a bit tricky. I will describe how we handle them
+*now* and then a bit about how we can improve them (Issue #6268).
+
+Consider a function call like `func(expr1, expr2)`, where `func`,
+`arg1`, and `arg2` are all arbitrary expressions. Currently,
+we construct a region hierarchy like:
+
+    +----------------+
+    |                |
+    +--+ +---+  +---+|
+    v  v v   v  v   vv
+    func(expr1, expr2)
+
+Here you can see that the call as a whole has a region and the
+function plus arguments are subregions of that. As a side-effect of
+this, we get a lot of spurious errors around nested calls, in
+particular when combined with `&mut` functions. For example, a call
+like this one
+
+    self.foo(self.bar())
+
+where both `foo` and `bar` are `&mut self` functions will always yield
+an error.
+
+Here is a more involved example (which is safe) so we can see what's
+going on:
+
+    struct Foo { f: usize, g: usize }
+    ...
+    fn add(p: &mut usize, v: usize) {
+        *p += v;
+    }
+    ...
+    fn inc(p: &mut usize) -> usize {
+        *p += 1; *p
+    }
+    fn weird() {
+        let mut x: Box<Foo> = box Foo { ... };
+        'a: add(&mut (*x).f,
+                'b: inc(&mut (*x).f)) // (..)
+    }
+
+The important part is the line marked `(..)` which contains a call to
+`add()`. The first argument is a mutable borrow of the field `f`.  The
+second argument also borrows the field `f`. Now, in the current borrow
+checker, the first borrow is given the lifetime of the call to
+`add()`, `'a`.  The second borrow is given the lifetime of `'b` of the
+call to `inc()`. Because `'b` is considered to be a sublifetime of
+`'a`, an error is reported since there are two co-existing mutable
+borrows of the same data.
+
+However, if we were to examine the lifetimes a bit more carefully, we
+can see that this error is unnecessary. Let's examine the lifetimes
+involved with `'a` in detail. We'll break apart all the steps involved
+in a call expression:
+
+    'a: {
+        'a_arg1: let a_temp1: ... = add;
+        'a_arg2: let a_temp2: &'a mut usize = &'a mut (*x).f;
+        'a_arg3: let a_temp3: usize = {
+            let b_temp1: ... = inc;
+            let b_temp2: &'b = &'b mut (*x).f;
+            'b_call: b_temp1(b_temp2)
+        };
+        'a_call: a_temp1(a_temp2, a_temp3) // (**)
+    }
+
+Here we see that the lifetime `'a` includes a number of substatements.
+In particular, there is this lifetime I've called `'a_call` that
+corresponds to the *actual execution of the function `add()`*, after
+all arguments have been evaluated. There is a corresponding lifetime
+`'b_call` for the execution of `inc()`. If we wanted to be precise
+about it, the lifetime of the two borrows should be `'a_call` and
+`'b_call` respectively, since the references that were created
+will not be dereferenced except during the execution itself.
+
+However, this model by itself is not sound. The reason is that
+while the two references that are created will never be used
+simultaneously, it is still true that the first reference is
+*created* before the second argument is evaluated, and so even though
+it will not be *dereferenced* during the evaluation of the second
+argument, it can still be *invalidated* by that evaluation. Consider
+this similar but unsound example:
+
+    struct Foo { f: usize, g: usize }
+    ...
+    fn add(p: &mut usize, v: usize) {
+        *p += v;
+    }
+    ...
+    fn consume(x: Box<Foo>) -> usize {
+        x.f + x.g
+    }
+    fn weird() {
+        let mut x: Box<Foo> = box Foo { ... };
+        'a: add(&mut (*x).f, consume(x)) // (..)
+    }
+
+In this case, the second argument to `add` actually consumes `x`, thus
+invalidating the first argument.
+
+So, for now, we exclude the `call` lifetimes from our model.
+Eventually I would like to include them, but we will have to make the
+borrow checker handle this situation correctly. In particular, if
+there is a reference created whose lifetime does not enclose
+the borrow expression, we must issue sufficient restrictions to ensure
+that the pointee remains valid.
+
+## Modeling closures
+
+Integrating closures properly into the model is a bit of
+work-in-progress. In an ideal world, we would model closures as
+closely as possible after their desugared equivalents. That is, a
+closure type would be modeled as a struct, and the region hierarchy of
+different closure bodies would be completely distinct from all other
+fns. We are generally moving in that direction but there are
+complications in terms of the implementation.
+
+In practice what we currently do is somewhat different. The basis for
+the current approach is the observation that the only time that
+regions from distinct fn bodies interact with one another is through
+an upvar or the type of a fn parameter (since closures live in the fn
+body namespace, they can in fact have fn parameters whose types
+include regions from the surrounding fn body). For these cases, there
+are separate mechanisms which ensure that the regions that appear in
+upvars/parameters outlive the dynamic extent of each call to the
+closure:
+
+1. Types must outlive the region of any expression where they are used.
+   For a closure type `C` to outlive a region `'r`, that implies that the
+   types of all its upvars must outlive `'r`.
+2. Parameters must outlive the region of any fn that they are passed to.
+
+Therefore, we can -- sort of -- assume that any region from an
+enclosing fns is larger than any region from one of its enclosed
+fn. And that is precisely what we do: when building the region
+hierarchy, each region lives in its own distinct subtree, but if we
+are asked to compute the `LUB(r1, r2)` of two regions, and those
+regions are in disjoint subtrees, we compare the lexical nesting of
+the two regions.
+
+*Ideas for improving the situation:* (FIXME #3696) The correctness
+argument here is subtle and a bit hand-wavy. The ideal, as stated
+earlier, would be to model things in such a way that it corresponds
+more closely to the desugared code. The best approach for doing this
+is a bit unclear: it may in fact be possible to *actually* desugar
+before we start, but I don't think so. The main option that I've been
+thinking through is imposing a "view shift" as we enter the fn body,
+so that regions appearing in the types of fn parameters and upvars are
+translated from being regions in the outer fn into free region
+parameters, just as they would be if we applied the desugaring. The
+challenge here is that type inference may not have fully run, so the
+types may not be fully known: we could probably do this translation
+lazilly, as type variables are instantiated. We would also have to
+apply a kind of inverse translation to the return value. This would be
+a good idea anyway, as right now it is possible for free regions
+instantiated within the closure to leak into the parent: this
+currently leads to type errors, since those regions cannot outlive any
+expressions within the parent hierarchy. Much like the current
+handling of closures, there are no known cases where this leads to a
+type-checking accepting incorrect code (though it sometimes rejects
+what might be considered correct code; see rust-lang/rust#22557), but
+it still doesn't feel like the right approach.
+
+### Skolemization
+
+For a discussion on skolemization and higher-ranked subtyping, please
+see the module `middle::infer::higher_ranked::doc`.
diff --git a/src/librustc/infer/region_inference/graphviz.rs b/src/librustc/infer/region_inference/graphviz.rs
new file mode 100644 (file)
index 0000000..e611c00
--- /dev/null
@@ -0,0 +1,273 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This module provides linkage between libgraphviz traits and
+//! `rustc::middle::typeck::infer::region_inference`, generating a
+//! rendering of the graph represented by the list of `Constraint`
+//! instances (which make up the edges of the graph), as well as the
+//! origin for each constraint (which are attached to the labels on
+//! each edge).
+
+/// For clarity, rename the graphviz crate locally to dot.
+use graphviz as dot;
+
+use ty::{self, TyCtxt};
+use middle::region::CodeExtent;
+use super::Constraint;
+use infer::SubregionOrigin;
+use infer::region_inference::RegionVarBindings;
+use util::nodemap::{FnvHashMap, FnvHashSet};
+
+use std::borrow::Cow;
+use std::collections::hash_map::Entry::Vacant;
+use std::env;
+use std::fs::File;
+use std::io;
+use std::io::prelude::*;
+use std::sync::atomic::{AtomicBool, Ordering};
+use syntax::ast;
+
+fn print_help_message() {
+    println!("\
+-Z print-region-graph by default prints a region constraint graph for every \n\
+function body, to the path `/tmp/constraints.nodeXXX.dot`, where the XXX is \n\
+replaced with the node id of the function under analysis.                   \n\
+                                                                            \n\
+To select one particular function body, set `RUST_REGION_GRAPH_NODE=XXX`,   \n\
+where XXX is the node id desired.                                           \n\
+                                                                            \n\
+To generate output to some path other than the default                      \n\
+`/tmp/constraints.nodeXXX.dot`, set `RUST_REGION_GRAPH=/path/desired.dot`;  \n\
+occurrences of the character `%` in the requested path will be replaced with\n\
+the node id of the function under analysis.                                 \n\
+                                                                            \n\
+(Since you requested help via RUST_REGION_GRAPH=help, no region constraint  \n\
+graphs will be printed.                                                     \n\
+");
+}
+
+pub fn maybe_print_constraints_for<'a, 'tcx>(region_vars: &RegionVarBindings<'a, 'tcx>,
+                                             subject_node: ast::NodeId) {
+    let tcx = region_vars.tcx;
+
+    if !region_vars.tcx.sess.opts.debugging_opts.print_region_graph {
+        return;
+    }
+
+    let requested_node: Option<ast::NodeId> = env::var("RUST_REGION_GRAPH_NODE")
+                                                  .ok()
+                                                  .and_then(|s| s.parse().ok());
+
+    if requested_node.is_some() && requested_node != Some(subject_node) {
+        return;
+    }
+
+    let requested_output = env::var("RUST_REGION_GRAPH");
+    debug!("requested_output: {:?} requested_node: {:?}",
+           requested_output,
+           requested_node);
+
+    let output_path = {
+        let output_template = match requested_output {
+            Ok(ref s) if s == "help" => {
+                static PRINTED_YET: AtomicBool = AtomicBool::new(false);
+                if !PRINTED_YET.load(Ordering::SeqCst) {
+                    print_help_message();
+                    PRINTED_YET.store(true, Ordering::SeqCst);
+                }
+                return;
+            }
+
+            Ok(other_path) => other_path,
+            Err(_) => "/tmp/constraints.node%.dot".to_string(),
+        };
+
+        if output_template.is_empty() {
+            bug!("empty string provided as RUST_REGION_GRAPH");
+        }
+
+        if output_template.contains('%') {
+            let mut new_str = String::new();
+            for c in output_template.chars() {
+                if c == '%' {
+                    new_str.push_str(&subject_node.to_string());
+                } else {
+                    new_str.push(c);
+                }
+            }
+            new_str
+        } else {
+            output_template
+        }
+    };
+
+    let constraints = &*region_vars.constraints.borrow();
+    match dump_region_constraints_to(tcx, constraints, &output_path) {
+        Ok(()) => {}
+        Err(e) => {
+            let msg = format!("io error dumping region constraints: {}", e);
+            region_vars.tcx.sess.err(&msg)
+        }
+    }
+}
+
+struct ConstraintGraph<'a, 'tcx: 'a> {
+    tcx: &'a TyCtxt<'tcx>,
+    graph_name: String,
+    map: &'a FnvHashMap<Constraint, SubregionOrigin<'tcx>>,
+    node_ids: FnvHashMap<Node, usize>,
+}
+
+#[derive(Clone, Hash, PartialEq, Eq, Debug, Copy)]
+enum Node {
+    RegionVid(ty::RegionVid),
+    Region(ty::Region),
+}
+
+// type Edge = Constraint;
+#[derive(Clone, PartialEq, Eq, Debug, Copy)]
+enum Edge {
+    Constraint(Constraint),
+    EnclScope(CodeExtent, CodeExtent),
+}
+
+impl<'a, 'tcx> ConstraintGraph<'a, 'tcx> {
+    fn new(tcx: &'a TyCtxt<'tcx>,
+           name: String,
+           map: &'a ConstraintMap<'tcx>)
+           -> ConstraintGraph<'a, 'tcx> {
+        let mut i = 0;
+        let mut node_ids = FnvHashMap();
+        {
+            let mut add_node = |node| {
+                if let Vacant(e) = node_ids.entry(node) {
+                    e.insert(i);
+                    i += 1;
+                }
+            };
+
+            for (n1, n2) in map.keys().map(|c| constraint_to_nodes(c)) {
+                add_node(n1);
+                add_node(n2);
+            }
+
+            tcx.region_maps.each_encl_scope(|sub, sup| {
+                add_node(Node::Region(ty::ReScope(*sub)));
+                add_node(Node::Region(ty::ReScope(*sup)));
+            });
+        }
+
+        ConstraintGraph {
+            tcx: tcx,
+            graph_name: name,
+            map: map,
+            node_ids: node_ids,
+        }
+    }
+}
+
+impl<'a, 'tcx> dot::Labeller<'a> for ConstraintGraph<'a, 'tcx> {
+    type Node = Node;
+    type Edge = Edge;
+    fn graph_id(&self) -> dot::Id {
+        dot::Id::new(&*self.graph_name).unwrap()
+    }
+    fn node_id(&self, n: &Node) -> dot::Id {
+        let node_id = match self.node_ids.get(n) {
+            Some(node_id) => node_id,
+            None => bug!("no node_id found for node: {:?}", n),
+        };
+        let name = || format!("node_{}", node_id);
+        match dot::Id::new(name()) {
+            Ok(id) => id,
+            Err(_) => {
+                bug!("failed to create graphviz node identified by {}", name());
+            }
+        }
+    }
+    fn node_label(&self, n: &Node) -> dot::LabelText {
+        match *n {
+            Node::RegionVid(n_vid) => dot::LabelText::label(format!("{:?}", n_vid)),
+            Node::Region(n_rgn) => dot::LabelText::label(format!("{:?}", n_rgn)),
+        }
+    }
+    fn edge_label(&self, e: &Edge) -> dot::LabelText {
+        match *e {
+            Edge::Constraint(ref c) =>
+                dot::LabelText::label(format!("{:?}", self.map.get(c).unwrap())),
+            Edge::EnclScope(..) => dot::LabelText::label(format!("(enclosed)")),
+        }
+    }
+}
+
+fn constraint_to_nodes(c: &Constraint) -> (Node, Node) {
+    match *c {
+        Constraint::ConstrainVarSubVar(rv_1, rv_2) =>
+            (Node::RegionVid(rv_1), Node::RegionVid(rv_2)),
+        Constraint::ConstrainRegSubVar(r_1, rv_2) => (Node::Region(r_1), Node::RegionVid(rv_2)),
+        Constraint::ConstrainVarSubReg(rv_1, r_2) => (Node::RegionVid(rv_1), Node::Region(r_2)),
+    }
+}
+
+fn edge_to_nodes(e: &Edge) -> (Node, Node) {
+    match *e {
+        Edge::Constraint(ref c) => constraint_to_nodes(c),
+        Edge::EnclScope(sub, sup) => {
+            (Node::Region(ty::ReScope(sub)),
+             Node::Region(ty::ReScope(sup)))
+        }
+    }
+}
+
+impl<'a, 'tcx> dot::GraphWalk<'a> for ConstraintGraph<'a, 'tcx> {
+    type Node = Node;
+    type Edge = Edge;
+    fn nodes(&self) -> dot::Nodes<Node> {
+        let mut set = FnvHashSet();
+        for node in self.node_ids.keys() {
+            set.insert(*node);
+        }
+        debug!("constraint graph has {} nodes", set.len());
+        set.into_iter().collect()
+    }
+    fn edges(&self) -> dot::Edges<Edge> {
+        debug!("constraint graph has {} edges", self.map.len());
+        let mut v: Vec<_> = self.map.keys().map(|e| Edge::Constraint(*e)).collect();
+        self.tcx.region_maps.each_encl_scope(|sub, sup| v.push(Edge::EnclScope(*sub, *sup)));
+        debug!("region graph has {} edges", v.len());
+        Cow::Owned(v)
+    }
+    fn source(&self, edge: &Edge) -> Node {
+        let (n1, _) = edge_to_nodes(edge);
+        debug!("edge {:?} has source {:?}", edge, n1);
+        n1
+    }
+    fn target(&self, edge: &Edge) -> Node {
+        let (_, n2) = edge_to_nodes(edge);
+        debug!("edge {:?} has target {:?}", edge, n2);
+        n2
+    }
+}
+
+pub type ConstraintMap<'tcx> = FnvHashMap<Constraint, SubregionOrigin<'tcx>>;
+
+fn dump_region_constraints_to<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
+                                            map: &ConstraintMap<'tcx>,
+                                            path: &str)
+                                            -> io::Result<()> {
+    debug!("dump_region_constraints map (len: {}) path: {}",
+           map.len(),
+           path);
+    let g = ConstraintGraph::new(tcx, format!("region_constraints"), map);
+    debug!("dump_region_constraints calling render");
+    let mut v = Vec::new();
+    dot::render(&g, &mut v).unwrap();
+    File::create(path).and_then(|mut f| f.write_all(&v))
+}
diff --git a/src/librustc/infer/region_inference/mod.rs b/src/librustc/infer/region_inference/mod.rs
new file mode 100644 (file)
index 0000000..2f610bf
--- /dev/null
@@ -0,0 +1,1453 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! See README.md
+
+pub use self::Constraint::*;
+pub use self::Verify::*;
+pub use self::UndoLogEntry::*;
+pub use self::CombineMapType::*;
+pub use self::RegionResolutionError::*;
+pub use self::VarValue::*;
+
+use super::{RegionVariableOrigin, SubregionOrigin, MiscVariable};
+use super::unify_key;
+
+use rustc_data_structures::graph::{self, Direction, NodeIndex};
+use rustc_data_structures::unify::{self, UnificationTable};
+use middle::free_region::FreeRegionMap;
+use ty::{self, Ty, TyCtxt};
+use ty::{BoundRegion, Region, RegionVid};
+use ty::{ReEmpty, ReStatic, ReFree, ReEarlyBound};
+use ty::{ReLateBound, ReScope, ReVar, ReSkolemized, BrFresh};
+use util::common::indenter;
+use util::nodemap::{FnvHashMap, FnvHashSet};
+
+use std::cell::{Cell, RefCell};
+use std::cmp::Ordering::{self, Less, Greater, Equal};
+use std::fmt;
+use std::u32;
+use syntax::ast;
+
+mod graphviz;
+
+// A constraint that influences the inference process.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+pub enum Constraint {
+    // One region variable is subregion of another
+    ConstrainVarSubVar(RegionVid, RegionVid),
+
+    // Concrete region is subregion of region variable
+    ConstrainRegSubVar(Region, RegionVid),
+
+    // Region variable is subregion of concrete region
+    //
+    // FIXME(#29436) -- should be remove in favor of a Verify
+    ConstrainVarSubReg(RegionVid, Region),
+}
+
+// Something we have to verify after region inference is done, but
+// which does not directly influence the inference process
+pub enum Verify<'tcx> {
+    // VerifyRegSubReg(a, b): Verify that `a <= b`. Neither `a` nor
+    // `b` are inference variables.
+    VerifyRegSubReg(SubregionOrigin<'tcx>, Region, Region),
+
+    // VerifyGenericBound(T, _, R, RS): The parameter type `T` (or
+    // associated type) must outlive the region `R`. `T` is known to
+    // outlive `RS`. Therefore verify that `R <= RS[i]` for some
+    // `i`. Inference variables may be involved (but this verification
+    // step doesn't influence inference).
+    VerifyGenericBound(GenericKind<'tcx>, SubregionOrigin<'tcx>, Region, VerifyBound),
+}
+
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum GenericKind<'tcx> {
+    Param(ty::ParamTy),
+    Projection(ty::ProjectionTy<'tcx>),
+}
+
+// When we introduce a verification step, we wish to test that a
+// particular region (let's call it `'min`) meets some bound.
+// The bound is described the by the following grammar:
+#[derive(Debug)]
+pub enum VerifyBound {
+    // B = exists {R} --> some 'r in {R} must outlive 'min
+    //
+    // Put another way, the subject value is known to outlive all
+    // regions in {R}, so if any of those outlives 'min, then the
+    // bound is met.
+    AnyRegion(Vec<Region>),
+
+    // B = forall {R} --> all 'r in {R} must outlive 'min
+    //
+    // Put another way, the subject value is known to outlive some
+    // region in {R}, so if all of those outlives 'min, then the bound
+    // is met.
+    AllRegions(Vec<Region>),
+
+    // B = exists {B} --> 'min must meet some bound b in {B}
+    AnyBound(Vec<VerifyBound>),
+
+    // B = forall {B} --> 'min must meet all bounds b in {B}
+    AllBounds(Vec<VerifyBound>),
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct TwoRegions {
+    a: Region,
+    b: Region,
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub enum UndoLogEntry {
+    OpenSnapshot,
+    CommitedSnapshot,
+    AddVar(RegionVid),
+    AddConstraint(Constraint),
+    AddVerify(usize),
+    AddGiven(ty::FreeRegion, ty::RegionVid),
+    AddCombination(CombineMapType, TwoRegions),
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub enum CombineMapType {
+    Lub,
+    Glb,
+}
+
+#[derive(Clone, Debug)]
+pub enum RegionResolutionError<'tcx> {
+    /// `ConcreteFailure(o, a, b)`:
+    ///
+    /// `o` requires that `a <= b`, but this does not hold
+    ConcreteFailure(SubregionOrigin<'tcx>, Region, Region),
+
+    /// `GenericBoundFailure(p, s, a)
+    ///
+    /// The parameter/associated-type `p` must be known to outlive the lifetime
+    /// `a` (but none of the known bounds are sufficient).
+    GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, Region),
+
+    /// `SubSupConflict(v, sub_origin, sub_r, sup_origin, sup_r)`:
+    ///
+    /// Could not infer a value for `v` because `sub_r <= v` (due to
+    /// `sub_origin`) but `v <= sup_r` (due to `sup_origin`) and
+    /// `sub_r <= sup_r` does not hold.
+    SubSupConflict(RegionVariableOrigin,
+                   SubregionOrigin<'tcx>,
+                   Region,
+                   SubregionOrigin<'tcx>,
+                   Region),
+
+    /// For subsets of `ConcreteFailure` and `SubSupConflict`, we can derive
+    /// more specific errors message by suggesting to the user where they
+    /// should put a lifetime. In those cases we process and put those errors
+    /// into `ProcessedErrors` before we do any reporting.
+    ProcessedErrors(Vec<ProcessedErrorOrigin<'tcx>>,
+                    Vec<SameRegions>),
+}
+
+#[derive(Clone, Debug)]
+pub enum ProcessedErrorOrigin<'tcx> {
+    ConcreteFailure(SubregionOrigin<'tcx>, Region, Region),
+    VariableFailure(RegionVariableOrigin),
+}
+
+/// SameRegions is used to group regions that we think are the same and would
+/// like to indicate so to the user.
+/// For example, the following function
+/// ```
+/// struct Foo { bar: i32 }
+/// fn foo2<'a, 'b>(x: &'a Foo) -> &'b i32 {
+///    &x.bar
+/// }
+/// ```
+/// would report an error because we expect 'a and 'b to match, and so we group
+/// 'a and 'b together inside a SameRegions struct
+#[derive(Clone, Debug)]
+pub struct SameRegions {
+    pub scope_id: ast::NodeId,
+    pub regions: Vec<BoundRegion>,
+}
+
+impl SameRegions {
+    pub fn contains(&self, other: &BoundRegion) -> bool {
+        self.regions.contains(other)
+    }
+
+    pub fn push(&mut self, other: BoundRegion) {
+        self.regions.push(other);
+    }
+}
+
+pub type CombineMap = FnvHashMap<TwoRegions, RegionVid>;
+
+pub struct RegionVarBindings<'a, 'tcx: 'a> {
+    tcx: &'a TyCtxt<'tcx>,
+    var_origins: RefCell<Vec<RegionVariableOrigin>>,
+
+    // Constraints of the form `A <= B` introduced by the region
+    // checker.  Here at least one of `A` and `B` must be a region
+    // variable.
+    constraints: RefCell<FnvHashMap<Constraint, SubregionOrigin<'tcx>>>,
+
+    // A "verify" is something that we need to verify after inference is
+    // done, but which does not directly affect inference in any way.
+    //
+    // An example is a `A <= B` where neither `A` nor `B` are
+    // inference variables.
+    verifys: RefCell<Vec<Verify<'tcx>>>,
+
+    // A "given" is a relationship that is known to hold. In particular,
+    // we often know from closure fn signatures that a particular free
+    // region must be a subregion of a region variable:
+    //
+    //    foo.iter().filter(<'a> |x: &'a &'b T| ...)
+    //
+    // In situations like this, `'b` is in fact a region variable
+    // introduced by the call to `iter()`, and `'a` is a bound region
+    // on the closure (as indicated by the `<'a>` prefix). If we are
+    // naive, we wind up inferring that `'b` must be `'static`,
+    // because we require that it be greater than `'a` and we do not
+    // know what `'a` is precisely.
+    //
+    // This hashmap is used to avoid that naive scenario. Basically we
+    // record the fact that `'a <= 'b` is implied by the fn signature,
+    // and then ignore the constraint when solving equations. This is
+    // a bit of a hack but seems to work.
+    givens: RefCell<FnvHashSet<(ty::FreeRegion, ty::RegionVid)>>,
+
+    lubs: RefCell<CombineMap>,
+    glbs: RefCell<CombineMap>,
+    skolemization_count: Cell<u32>,
+    bound_count: Cell<u32>,
+
+    // The undo log records actions that might later be undone.
+    //
+    // Note: when the undo_log is empty, we are not actively
+    // snapshotting. When the `start_snapshot()` method is called, we
+    // push an OpenSnapshot entry onto the list to indicate that we
+    // are now actively snapshotting. The reason for this is that
+    // otherwise we end up adding entries for things like the lower
+    // bound on a variable and so forth, which can never be rolled
+    // back.
+    undo_log: RefCell<Vec<UndoLogEntry>>,
+    unification_table: RefCell<UnificationTable<ty::RegionVid>>,
+
+    // This contains the results of inference.  It begins as an empty
+    // option and only acquires a value after inference is complete.
+    values: RefCell<Option<Vec<VarValue>>>,
+}
+
+pub struct RegionSnapshot {
+    length: usize,
+    region_snapshot: unify::Snapshot<ty::RegionVid>,
+    skolemization_count: u32,
+}
+
+impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
+    pub fn new(tcx: &'a TyCtxt<'tcx>) -> RegionVarBindings<'a, 'tcx> {
+        RegionVarBindings {
+            tcx: tcx,
+            var_origins: RefCell::new(Vec::new()),
+            values: RefCell::new(None),
+            constraints: RefCell::new(FnvHashMap()),
+            verifys: RefCell::new(Vec::new()),
+            givens: RefCell::new(FnvHashSet()),
+            lubs: RefCell::new(FnvHashMap()),
+            glbs: RefCell::new(FnvHashMap()),
+            skolemization_count: Cell::new(0),
+            bound_count: Cell::new(0),
+            undo_log: RefCell::new(Vec::new()),
+            unification_table: RefCell::new(UnificationTable::new()),
+        }
+    }
+
+    fn in_snapshot(&self) -> bool {
+        !self.undo_log.borrow().is_empty()
+    }
+
+    pub fn start_snapshot(&self) -> RegionSnapshot {
+        let length = self.undo_log.borrow().len();
+        debug!("RegionVarBindings: start_snapshot({})", length);
+        self.undo_log.borrow_mut().push(OpenSnapshot);
+        RegionSnapshot {
+            length: length,
+            region_snapshot: self.unification_table.borrow_mut().snapshot(),
+            skolemization_count: self.skolemization_count.get(),
+        }
+    }
+
+    pub fn commit(&self, snapshot: RegionSnapshot) {
+        debug!("RegionVarBindings: commit({})", snapshot.length);
+        assert!(self.undo_log.borrow().len() > snapshot.length);
+        assert!((*self.undo_log.borrow())[snapshot.length] == OpenSnapshot);
+
+        let mut undo_log = self.undo_log.borrow_mut();
+        if snapshot.length == 0 {
+            undo_log.truncate(0);
+        } else {
+            (*undo_log)[snapshot.length] = CommitedSnapshot;
+        }
+        self.skolemization_count.set(snapshot.skolemization_count);
+        self.unification_table.borrow_mut().commit(snapshot.region_snapshot);
+    }
+
+    pub fn rollback_to(&self, snapshot: RegionSnapshot) {
+        debug!("RegionVarBindings: rollback_to({:?})", snapshot);
+        let mut undo_log = self.undo_log.borrow_mut();
+        assert!(undo_log.len() > snapshot.length);
+        assert!((*undo_log)[snapshot.length] == OpenSnapshot);
+        while undo_log.len() > snapshot.length + 1 {
+            match undo_log.pop().unwrap() {
+                OpenSnapshot => {
+                    bug!("Failure to observe stack discipline");
+                }
+                CommitedSnapshot => {}
+                AddVar(vid) => {
+                    let mut var_origins = self.var_origins.borrow_mut();
+                    var_origins.pop().unwrap();
+                    assert_eq!(var_origins.len(), vid.index as usize);
+                }
+                AddConstraint(ref constraint) => {
+                    self.constraints.borrow_mut().remove(constraint);
+                }
+                AddVerify(index) => {
+                    self.verifys.borrow_mut().pop();
+                    assert_eq!(self.verifys.borrow().len(), index);
+                }
+                AddGiven(sub, sup) => {
+                    self.givens.borrow_mut().remove(&(sub, sup));
+                }
+                AddCombination(Glb, ref regions) => {
+                    self.glbs.borrow_mut().remove(regions);
+                }
+                AddCombination(Lub, ref regions) => {
+                    self.lubs.borrow_mut().remove(regions);
+                }
+            }
+        }
+        let c = undo_log.pop().unwrap();
+        assert!(c == OpenSnapshot);
+        self.skolemization_count.set(snapshot.skolemization_count);
+        self.unification_table.borrow_mut()
+            .rollback_to(snapshot.region_snapshot);
+    }
+
+    pub fn num_vars(&self) -> u32 {
+        let len = self.var_origins.borrow().len();
+        // enforce no overflow
+        assert!(len as u32 as usize == len);
+        len as u32
+    }
+
+    pub fn new_region_var(&self, origin: RegionVariableOrigin) -> RegionVid {
+        let vid = RegionVid { index: self.num_vars() };
+        self.var_origins.borrow_mut().push(origin.clone());
+
+        let u_vid = self.unification_table.borrow_mut().new_key(
+            unify_key::RegionVidKey { min_vid: vid }
+            );
+        assert_eq!(vid, u_vid);
+        if self.in_snapshot() {
+            self.undo_log.borrow_mut().push(AddVar(vid));
+        }
+        debug!("created new region variable {:?} with origin {:?}",
+               vid,
+               origin);
+        return vid;
+    }
+
+    /// Creates a new skolemized region. Skolemized regions are fresh
+    /// regions used when performing higher-ranked computations. They
+    /// must be used in a very particular way and are never supposed
+    /// to "escape" out into error messages or the code at large.
+    ///
+    /// The idea is to always create a snapshot. Skolemized regions
+    /// can be created in the context of this snapshot, but once the
+    /// snapshot is committed or rolled back, their numbers will be
+    /// recycled, so you must be finished with them. See the extensive
+    /// comments in `higher_ranked.rs` to see how it works (in
+    /// particular, the subtyping comparison).
+    ///
+    /// The `snapshot` argument to this function is not really used;
+    /// it's just there to make it explicit which snapshot bounds the
+    /// skolemized region that results.
+    pub fn new_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot) -> Region {
+        assert!(self.in_snapshot());
+        assert!(self.undo_log.borrow()[snapshot.length] == OpenSnapshot);
+
+        let sc = self.skolemization_count.get();
+        self.skolemization_count.set(sc + 1);
+        ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br)
+    }
+
+    pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> Region {
+        // Creates a fresh bound variable for use in GLB computations.
+        // See discussion of GLB computation in the large comment at
+        // the top of this file for more details.
+        //
+        // This computation is potentially wrong in the face of
+        // rollover.  It's conceivable, if unlikely, that one might
+        // wind up with accidental capture for nested functions in
+        // that case, if the outer function had bound regions created
+        // a very long time before and the inner function somehow
+        // wound up rolling over such that supposedly fresh
+        // identifiers were in fact shadowed. For now, we just assert
+        // that there is no rollover -- eventually we should try to be
+        // robust against this possibility, either by checking the set
+        // of bound identifiers that appear in a given expression and
+        // ensure that we generate one that is distinct, or by
+        // changing the representation of bound regions in a fn
+        // declaration
+
+        let sc = self.bound_count.get();
+        self.bound_count.set(sc + 1);
+
+        if sc >= self.bound_count.get() {
+            bug!("rollover in RegionInference new_bound()");
+        }
+
+        ReLateBound(debruijn, BrFresh(sc))
+    }
+
+    fn values_are_none(&self) -> bool {
+        self.values.borrow().is_none()
+    }
+
+    fn add_constraint(&self, constraint: Constraint, origin: SubregionOrigin<'tcx>) {
+        // cannot add constraints once regions are resolved
+        assert!(self.values_are_none());
+
+        debug!("RegionVarBindings: add_constraint({:?})", constraint);
+
+        if self.constraints.borrow_mut().insert(constraint, origin).is_none() {
+            if self.in_snapshot() {
+                self.undo_log.borrow_mut().push(AddConstraint(constraint));
+            }
+        }
+    }
+
+    fn add_verify(&self, verify: Verify<'tcx>) {
+        // cannot add verifys once regions are resolved
+        assert!(self.values_are_none());
+
+        debug!("RegionVarBindings: add_verify({:?})", verify);
+
+        // skip no-op cases known to be satisfied
+        match verify {
+            VerifyGenericBound(_, _, _, VerifyBound::AllBounds(ref bs)) if bs.len() == 0 => {
+                return;
+            }
+            _ => {}
+        }
+
+        let mut verifys = self.verifys.borrow_mut();
+        let index = verifys.len();
+        verifys.push(verify);
+        if self.in_snapshot() {
+            self.undo_log.borrow_mut().push(AddVerify(index));
+        }
+    }
+
+    pub fn add_given(&self, sub: ty::FreeRegion, sup: ty::RegionVid) {
+        // cannot add givens once regions are resolved
+        assert!(self.values_are_none());
+
+        let mut givens = self.givens.borrow_mut();
+        if givens.insert((sub, sup)) {
+            debug!("add_given({:?} <= {:?})", sub, sup);
+
+            self.undo_log.borrow_mut().push(AddGiven(sub, sup));
+        }
+    }
+
+    pub fn make_eqregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) {
+        if sub != sup {
+            // Eventually, it would be nice to add direct support for
+            // equating regions.
+            self.make_subregion(origin.clone(), sub, sup);
+            self.make_subregion(origin, sup, sub);
+
+            if let (ty::ReVar(sub), ty::ReVar(sup)) = (sub, sup) {
+                self.unification_table.borrow_mut().union(sub, sup);
+            }
+        }
+    }
+
+    pub fn make_subregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) {
+        // cannot add constraints once regions are resolved
+        assert!(self.values_are_none());
+
+        debug!("RegionVarBindings: make_subregion({:?}, {:?}) due to {:?}",
+               sub,
+               sup,
+               origin);
+
+        match (sub, sup) {
+            (ReEarlyBound(..), _) |
+            (ReLateBound(..), _) |
+            (_, ReEarlyBound(..)) |
+            (_, ReLateBound(..)) => {
+                span_bug!(origin.span(),
+                          "cannot relate bound region: {:?} <= {:?}",
+                          sub,
+                          sup);
+            }
+            (_, ReStatic) => {
+                // all regions are subregions of static, so we can ignore this
+            }
+            (ReVar(sub_id), ReVar(sup_id)) => {
+                self.add_constraint(ConstrainVarSubVar(sub_id, sup_id), origin);
+            }
+            (r, ReVar(sup_id)) => {
+                self.add_constraint(ConstrainRegSubVar(r, sup_id), origin);
+            }
+            (ReVar(sub_id), r) => {
+                self.add_constraint(ConstrainVarSubReg(sub_id, r), origin);
+            }
+            _ => {
+                self.add_verify(VerifyRegSubReg(origin, sub, sup));
+            }
+        }
+    }
+
+    /// See `Verify::VerifyGenericBound`
+    pub fn verify_generic_bound(&self,
+                                origin: SubregionOrigin<'tcx>,
+                                kind: GenericKind<'tcx>,
+                                sub: Region,
+                                bound: VerifyBound) {
+        self.add_verify(VerifyGenericBound(kind, origin, sub, bound));
+    }
+
+    pub fn lub_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region {
+        // cannot add constraints once regions are resolved
+        assert!(self.values_are_none());
+
+        debug!("RegionVarBindings: lub_regions({:?}, {:?})", a, b);
+        if a == ty::ReStatic || b == ty::ReStatic {
+            ReStatic // nothing lives longer than static
+        } else if a == b {
+            a // LUB(a,a) = a
+        } else {
+            self.combine_vars(Lub, a, b, origin.clone(), |this, old_r, new_r| {
+                this.make_subregion(origin.clone(), old_r, new_r)
+            })
+        }
+    }
+
+    pub fn glb_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region {
+        // cannot add constraints once regions are resolved
+        assert!(self.values_are_none());
+
+        debug!("RegionVarBindings: glb_regions({:?}, {:?})", a, b);
+        match (a, b) {
+            (ReStatic, r) | (r, ReStatic) => {
+                r // static lives longer than everything else
+            }
+
+            _ if a == b => {
+                a // GLB(a,a) = a
+            }
+
+            _ => {
+                self.combine_vars(Glb, a, b, origin.clone(), |this, old_r, new_r| {
+                    this.make_subregion(origin.clone(), new_r, old_r)
+                })
+            }
+        }
+    }
+
+    pub fn resolve_var(&self, rid: RegionVid) -> ty::Region {
+        match *self.values.borrow() {
+            None => {
+                span_bug!((*self.var_origins.borrow())[rid.index as usize].span(),
+                          "attempt to resolve region variable before values have \
+                           been computed!")
+            }
+            Some(ref values) => {
+                let r = lookup(values, rid);
+                debug!("resolve_var({:?}) = {:?}", rid, r);
+                r
+            }
+        }
+    }
+
+    pub fn opportunistic_resolve_var(&self, rid: RegionVid) -> ty::Region {
+        ty::ReVar(self.unification_table.borrow_mut().find_value(rid).min_vid)
+    }
+
+    fn combine_map(&self, t: CombineMapType) -> &RefCell<CombineMap> {
+        match t {
+            Glb => &self.glbs,
+            Lub => &self.lubs,
+        }
+    }
+
+    pub fn combine_vars<F>(&self,
+                           t: CombineMapType,
+                           a: Region,
+                           b: Region,
+                           origin: SubregionOrigin<'tcx>,
+                           mut relate: F)
+                           -> Region
+        where F: FnMut(&RegionVarBindings<'a, 'tcx>, Region, Region)
+    {
+        let vars = TwoRegions { a: a, b: b };
+        match self.combine_map(t).borrow().get(&vars) {
+            Some(&c) => {
+                return ReVar(c);
+            }
+            None => {}
+        }
+        let c = self.new_region_var(MiscVariable(origin.span()));
+        self.combine_map(t).borrow_mut().insert(vars, c);
+        if self.in_snapshot() {
+            self.undo_log.borrow_mut().push(AddCombination(t, vars));
+        }
+        relate(self, a, ReVar(c));
+        relate(self, b, ReVar(c));
+        debug!("combine_vars() c={:?}", c);
+        ReVar(c)
+    }
+
+    pub fn vars_created_since_snapshot(&self, mark: &RegionSnapshot) -> Vec<RegionVid> {
+        self.undo_log.borrow()[mark.length..]
+            .iter()
+            .filter_map(|&elt| {
+                match elt {
+                    AddVar(vid) => Some(vid),
+                    _ => None,
+                }
+            })
+            .collect()
+    }
+
+    /// Computes all regions that have been related to `r0` in any way since the mark `mark` was
+    /// made---`r0` itself will be the first entry. This is used when checking whether skolemized
+    /// regions are being improperly related to other regions.
+    pub fn tainted(&self, mark: &RegionSnapshot, r0: Region) -> Vec<Region> {
+        debug!("tainted(mark={:?}, r0={:?})", mark, r0);
+        let _indenter = indenter();
+
+        // `result_set` acts as a worklist: we explore all outgoing
+        // edges and add any new regions we find to result_set.  This
+        // is not a terribly efficient implementation.
+        let mut result_set = vec![r0];
+        let mut result_index = 0;
+        while result_index < result_set.len() {
+            // nb: can't use usize::range() here because result_set grows
+            let r = result_set[result_index];
+            debug!("result_index={}, r={:?}", result_index, r);
+
+            for undo_entry in self.undo_log.borrow()[mark.length..].iter() {
+                match undo_entry {
+                    &AddConstraint(ConstrainVarSubVar(a, b)) => {
+                        consider_adding_bidirectional_edges(&mut result_set, r, ReVar(a), ReVar(b));
+                    }
+                    &AddConstraint(ConstrainRegSubVar(a, b)) => {
+                        consider_adding_bidirectional_edges(&mut result_set, r, a, ReVar(b));
+                    }
+                    &AddConstraint(ConstrainVarSubReg(a, b)) => {
+                        consider_adding_bidirectional_edges(&mut result_set, r, ReVar(a), b);
+                    }
+                    &AddGiven(a, b) => {
+                        consider_adding_bidirectional_edges(&mut result_set,
+                                                            r,
+                                                            ReFree(a),
+                                                            ReVar(b));
+                    }
+                    &AddVerify(i) => {
+                        match (*self.verifys.borrow())[i] {
+                            VerifyRegSubReg(_, a, b) => {
+                                consider_adding_bidirectional_edges(&mut result_set, r, a, b);
+                            }
+                            VerifyGenericBound(_, _, a, ref bound) => {
+                                bound.for_each_region(&mut |b| {
+                                    consider_adding_bidirectional_edges(&mut result_set, r, a, b)
+                                });
+                            }
+                        }
+                    }
+                    &AddCombination(..) |
+                    &AddVar(..) |
+                    &OpenSnapshot |
+                    &CommitedSnapshot => {}
+                }
+            }
+
+            result_index += 1;
+        }
+
+        return result_set;
+
+        fn consider_adding_bidirectional_edges(result_set: &mut Vec<Region>,
+                                               r: Region,
+                                               r1: Region,
+                                               r2: Region) {
+            consider_adding_directed_edge(result_set, r, r1, r2);
+            consider_adding_directed_edge(result_set, r, r2, r1);
+        }
+
+        fn consider_adding_directed_edge(result_set: &mut Vec<Region>,
+                                         r: Region,
+                                         r1: Region,
+                                         r2: Region) {
+            if r == r1 {
+                // Clearly, this is potentially inefficient.
+                if !result_set.iter().any(|x| *x == r2) {
+                    result_set.push(r2);
+                }
+            }
+        }
+    }
+
+    /// This function performs the actual region resolution.  It must be
+    /// called after all constraints have been added.  It performs a
+    /// fixed-point iteration to find region values which satisfy all
+    /// constraints, assuming such values can be found; if they cannot,
+    /// errors are reported.
+    pub fn resolve_regions(&self,
+                           free_regions: &FreeRegionMap,
+                           subject_node: ast::NodeId)
+                           -> Vec<RegionResolutionError<'tcx>> {
+        debug!("RegionVarBindings: resolve_regions()");
+        let mut errors = vec![];
+        let v = self.infer_variable_values(free_regions, &mut errors, subject_node);
+        *self.values.borrow_mut() = Some(v);
+        errors
+    }
+
+    fn lub_concrete_regions(&self, free_regions: &FreeRegionMap, a: Region, b: Region) -> Region {
+        match (a, b) {
+            (ReLateBound(..), _) |
+            (_, ReLateBound(..)) |
+            (ReEarlyBound(..), _) |
+            (_, ReEarlyBound(..)) => {
+                bug!("cannot relate bound region: LUB({:?}, {:?})", a, b);
+            }
+
+            (ReStatic, _) | (_, ReStatic) => {
+                ReStatic // nothing lives longer than static
+            }
+
+            (ReEmpty, r) | (r, ReEmpty) => {
+                r // everything lives longer than empty
+            }
+
+            (ReVar(v_id), _) | (_, ReVar(v_id)) => {
+                span_bug!((*self.var_origins.borrow())[v_id.index as usize].span(),
+                          "lub_concrete_regions invoked with non-concrete \
+                           regions: {:?}, {:?}",
+                          a,
+                          b);
+            }
+
+            (ReFree(ref fr), ReScope(s_id)) |
+            (ReScope(s_id), ReFree(ref fr)) => {
+                let f = ReFree(*fr);
+                // A "free" region can be interpreted as "some region
+                // at least as big as the block fr.scope_id".  So, we can
+                // reasonably compare free regions and scopes:
+                let r_id = self.tcx.region_maps.nearest_common_ancestor(fr.scope, s_id);
+
+                if r_id == fr.scope {
+                    // if the free region's scope `fr.scope_id` is bigger than
+                    // the scope region `s_id`, then the LUB is the free
+                    // region itself:
+                    f
+                } else {
+                    // otherwise, we don't know what the free region is,
+                    // so we must conservatively say the LUB is static:
+                    ReStatic
+                }
+            }
+
+            (ReScope(a_id), ReScope(b_id)) => {
+                // The region corresponding to an outer block is a
+                // subtype of the region corresponding to an inner
+                // block.
+                ReScope(self.tcx.region_maps.nearest_common_ancestor(a_id, b_id))
+            }
+
+            (ReFree(a_fr), ReFree(b_fr)) => {
+                free_regions.lub_free_regions(a_fr, b_fr)
+            }
+
+            // For these types, we cannot define any additional
+            // relationship:
+            (ReSkolemized(..), _) |
+            (_, ReSkolemized(..)) => {
+                if a == b {
+                    a
+                } else {
+                    ReStatic
+                }
+            }
+        }
+    }
+}
+
+// ______________________________________________________________________
+
+#[derive(Copy, Clone, Debug)]
+pub enum VarValue {
+    Value(Region),
+    ErrorValue,
+}
+
+struct VarData {
+    value: VarValue,
+}
+
+struct RegionAndOrigin<'tcx> {
+    region: Region,
+    origin: SubregionOrigin<'tcx>,
+}
+
+type RegionGraph = graph::Graph<(), Constraint>;
+
+impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
+    fn infer_variable_values(&self,
+                             free_regions: &FreeRegionMap,
+                             errors: &mut Vec<RegionResolutionError<'tcx>>,
+                             subject: ast::NodeId)
+                             -> Vec<VarValue> {
+        let mut var_data = self.construct_var_data();
+
+        // Dorky hack to cause `dump_constraints` to only get called
+        // if debug mode is enabled:
+        debug!("----() End constraint listing (subject={}) {:?}---",
+               subject,
+               self.dump_constraints(subject));
+        graphviz::maybe_print_constraints_for(self, subject);
+
+        let graph = self.construct_graph();
+        self.expand_givens(&graph);
+        self.expansion(free_regions, &mut var_data);
+        self.contraction(free_regions, &mut var_data);
+        let values = self.extract_values_and_collect_conflicts(free_regions,
+                                                               &var_data,
+                                                               &graph,
+                                                               errors);
+        self.collect_concrete_region_errors(free_regions, &values, errors);
+        values
+    }
+
+    fn construct_var_data(&self) -> Vec<VarData> {
+        (0..self.num_vars() as usize)
+            .map(|_| VarData { value: Value(ty::ReEmpty) })
+            .collect()
+    }
+
+    fn dump_constraints(&self, subject: ast::NodeId) {
+        debug!("----() Start constraint listing (subject={}) ()----",
+               subject);
+        for (idx, (constraint, _)) in self.constraints.borrow().iter().enumerate() {
+            debug!("Constraint {} => {:?}", idx, constraint);
+        }
+    }
+
+    fn expand_givens(&self, graph: &RegionGraph) {
+        // Givens are a kind of horrible hack to account for
+        // constraints like 'c <= '0 that are known to hold due to
+        // closure signatures (see the comment above on the `givens`
+        // field). They should go away. But until they do, the role
+        // of this fn is to account for the transitive nature:
+        //
+        //     Given 'c <= '0
+        //     and   '0 <= '1
+        //     then  'c <= '1
+
+        let mut givens = self.givens.borrow_mut();
+        let seeds: Vec<_> = givens.iter().cloned().collect();
+        for (fr, vid) in seeds {
+            let seed_index = NodeIndex(vid.index as usize);
+            for succ_index in graph.depth_traverse(seed_index) {
+                let succ_index = succ_index.0 as u32;
+                if succ_index < self.num_vars() {
+                    let succ_vid = RegionVid { index: succ_index };
+                    givens.insert((fr, succ_vid));
+                }
+            }
+        }
+    }
+
+    fn expansion(&self, free_regions: &FreeRegionMap, var_data: &mut [VarData]) {
+        self.iterate_until_fixed_point("Expansion", |constraint| {
+            debug!("expansion: constraint={:?} origin={:?}",
+                   constraint,
+                   self.constraints
+                       .borrow()
+                       .get(constraint)
+                       .unwrap());
+            match *constraint {
+                ConstrainRegSubVar(a_region, b_vid) => {
+                    let b_data = &mut var_data[b_vid.index as usize];
+                    self.expand_node(free_regions, a_region, b_vid, b_data)
+                }
+                ConstrainVarSubVar(a_vid, b_vid) => {
+                    match var_data[a_vid.index as usize].value {
+                        ErrorValue => false,
+                        Value(a_region) => {
+                            let b_node = &mut var_data[b_vid.index as usize];
+                            self.expand_node(free_regions, a_region, b_vid, b_node)
+                        }
+                    }
+                }
+                ConstrainVarSubReg(..) => {
+                    // This is a contraction constraint.  Ignore it.
+                    false
+                }
+            }
+        })
+    }
+
+    fn expand_node(&self,
+                   free_regions: &FreeRegionMap,
+                   a_region: Region,
+                   b_vid: RegionVid,
+                   b_data: &mut VarData)
+                   -> bool {
+        debug!("expand_node({:?}, {:?} == {:?})",
+               a_region,
+               b_vid,
+               b_data.value);
+
+        // Check if this relationship is implied by a given.
+        match a_region {
+            ty::ReFree(fr) => {
+                if self.givens.borrow().contains(&(fr, b_vid)) {
+                    debug!("given");
+                    return false;
+                }
+            }
+            _ => {}
+        }
+
+        match b_data.value {
+            Value(cur_region) => {
+                let lub = self.lub_concrete_regions(free_regions, a_region, cur_region);
+                if lub == cur_region {
+                    return false;
+                }
+
+                debug!("Expanding value of {:?} from {:?} to {:?}",
+                       b_vid,
+                       cur_region,
+                       lub);
+
+                b_data.value = Value(lub);
+                return true;
+            }
+
+            ErrorValue => {
+                return false;
+            }
+        }
+    }
+
+    // FIXME(#29436) -- this fn would just go away if we removed ConstrainVarSubReg
+    fn contraction(&self, free_regions: &FreeRegionMap, var_data: &mut [VarData]) {
+        self.iterate_until_fixed_point("Contraction", |constraint| {
+            debug!("contraction: constraint={:?} origin={:?}",
+                   constraint,
+                   self.constraints
+                       .borrow()
+                       .get(constraint)
+                       .unwrap());
+            match *constraint {
+                ConstrainRegSubVar(..) |
+                ConstrainVarSubVar(..) => {
+                    // Expansion will ensure that these constraints hold. Ignore.
+                }
+                ConstrainVarSubReg(a_vid, b_region) => {
+                    let a_data = &mut var_data[a_vid.index as usize];
+                    debug!("contraction: {:?} == {:?}, {:?}",
+                           a_vid,
+                           a_data.value,
+                           b_region);
+
+                    let a_region = match a_data.value {
+                        ErrorValue => return false,
+                        Value(a_region) => a_region,
+                    };
+
+                    if !free_regions.is_subregion_of(self.tcx, a_region, b_region) {
+                        debug!("Setting {:?} to ErrorValue: {:?} not subregion of {:?}",
+                               a_vid,
+                               a_region,
+                               b_region);
+                        a_data.value = ErrorValue;
+                    }
+                }
+            }
+
+            false
+        })
+    }
+
+    fn collect_concrete_region_errors(&self,
+                                      free_regions: &FreeRegionMap,
+                                      values: &Vec<VarValue>,
+                                      errors: &mut Vec<RegionResolutionError<'tcx>>) {
+        let mut reg_reg_dups = FnvHashSet();
+        for verify in self.verifys.borrow().iter() {
+            match *verify {
+                VerifyRegSubReg(ref origin, sub, sup) => {
+                    if free_regions.is_subregion_of(self.tcx, sub, sup) {
+                        continue;
+                    }
+
+                    if !reg_reg_dups.insert((sub, sup)) {
+                        continue;
+                    }
+
+                    debug!("region inference error at {:?}: {:?} <= {:?} is not true",
+                           origin,
+                           sub,
+                           sup);
+
+                    errors.push(ConcreteFailure((*origin).clone(), sub, sup));
+                }
+
+                VerifyGenericBound(ref kind, ref origin, sub, ref bound) => {
+                    let sub = normalize(values, sub);
+                    if bound.is_met(self.tcx, free_regions, values, sub) {
+                        continue;
+                    }
+
+                    debug!("region inference error at {:?}: verifying {:?} <= {:?}",
+                           origin,
+                           sub,
+                           bound);
+
+                    errors.push(GenericBoundFailure((*origin).clone(), kind.clone(), sub));
+                }
+            }
+        }
+    }
+
+    fn extract_values_and_collect_conflicts(&self,
+                                            free_regions: &FreeRegionMap,
+                                            var_data: &[VarData],
+                                            graph: &RegionGraph,
+                                            errors: &mut Vec<RegionResolutionError<'tcx>>)
+                                            -> Vec<VarValue> {
+        debug!("extract_values_and_collect_conflicts()");
+
+        // This is the best way that I have found to suppress
+        // duplicate and related errors. Basically we keep a set of
+        // flags for every node. Whenever an error occurs, we will
+        // walk some portion of the graph looking to find pairs of
+        // conflicting regions to report to the user. As we walk, we
+        // trip the flags from false to true, and if we find that
+        // we've already reported an error involving any particular
+        // node we just stop and don't report the current error.  The
+        // idea is to report errors that derive from independent
+        // regions of the graph, but not those that derive from
+        // overlapping locations.
+        let mut dup_vec = vec![u32::MAX; self.num_vars() as usize];
+
+        for idx in 0..self.num_vars() as usize {
+            match var_data[idx].value {
+                Value(_) => {
+                    /* Inference successful */
+                }
+                ErrorValue => {
+                    /* Inference impossible, this value contains
+                       inconsistent constraints.
+
+                       I think that in this case we should report an
+                       error now---unlike the case above, we can't
+                       wait to see whether the user needs the result
+                       of this variable.  The reason is that the mere
+                       existence of this variable implies that the
+                       region graph is inconsistent, whether or not it
+                       is used.
+
+                       For example, we may have created a region
+                       variable that is the GLB of two other regions
+                       which do not have a GLB.  Even if that variable
+                       is not used, it implies that those two regions
+                       *should* have a GLB.
+
+                       At least I think this is true. It may be that
+                       the mere existence of a conflict in a region variable
+                       that is not used is not a problem, so if this rule
+                       starts to create problems we'll have to revisit
+                       this portion of the code and think hard about it. =) */
+
+                    let node_vid = RegionVid { index: idx as u32 };
+                    self.collect_error_for_expanding_node(free_regions,
+                                                          graph,
+                                                          &mut dup_vec,
+                                                          node_vid,
+                                                          errors);
+                }
+            }
+        }
+
+        (0..self.num_vars() as usize).map(|idx| var_data[idx].value).collect()
+    }
+
+    fn construct_graph(&self) -> RegionGraph {
+        let num_vars = self.num_vars();
+
+        let constraints = self.constraints.borrow();
+
+        let mut graph = graph::Graph::new();
+
+        for _ in 0..num_vars {
+            graph.add_node(());
+        }
+
+        // Issue #30438: two distinct dummy nodes, one for incoming
+        // edges (dummy_source) and another for outgoing edges
+        // (dummy_sink). In `dummy -> a -> b -> dummy`, using one
+        // dummy node leads one to think (erroneously) there exists a
+        // path from `b` to `a`. Two dummy nodes sidesteps the issue.
+        let dummy_source = graph.add_node(());
+        let dummy_sink = graph.add_node(());
+
+        for (constraint, _) in constraints.iter() {
+            match *constraint {
+                ConstrainVarSubVar(a_id, b_id) => {
+                    graph.add_edge(NodeIndex(a_id.index as usize),
+                                   NodeIndex(b_id.index as usize),
+                                   *constraint);
+                }
+                ConstrainRegSubVar(_, b_id) => {
+                    graph.add_edge(dummy_source, NodeIndex(b_id.index as usize), *constraint);
+                }
+                ConstrainVarSubReg(a_id, _) => {
+                    graph.add_edge(NodeIndex(a_id.index as usize), dummy_sink, *constraint);
+                }
+            }
+        }
+
+        return graph;
+    }
+
+    fn collect_error_for_expanding_node(&self,
+                                        free_regions: &FreeRegionMap,
+                                        graph: &RegionGraph,
+                                        dup_vec: &mut [u32],
+                                        node_idx: RegionVid,
+                                        errors: &mut Vec<RegionResolutionError<'tcx>>) {
+        // Errors in expanding nodes result from a lower-bound that is
+        // not contained by an upper-bound.
+        let (mut lower_bounds, lower_dup) = self.collect_concrete_regions(graph,
+                                                                          node_idx,
+                                                                          graph::INCOMING,
+                                                                          dup_vec);
+        let (mut upper_bounds, upper_dup) = self.collect_concrete_regions(graph,
+                                                                          node_idx,
+                                                                          graph::OUTGOING,
+                                                                          dup_vec);
+
+        if lower_dup || upper_dup {
+            return;
+        }
+
+        // We place free regions first because we are special casing
+        // SubSupConflict(ReFree, ReFree) when reporting error, and so
+        // the user will more likely get a specific suggestion.
+        fn free_regions_first(a: &RegionAndOrigin, b: &RegionAndOrigin) -> Ordering {
+            match (a.region, b.region) {
+                (ReFree(..), ReFree(..)) => Equal,
+                (ReFree(..), _) => Less,
+                (_, ReFree(..)) => Greater,
+                (_, _) => Equal,
+            }
+        }
+        lower_bounds.sort_by(|a, b| free_regions_first(a, b));
+        upper_bounds.sort_by(|a, b| free_regions_first(a, b));
+
+        for lower_bound in &lower_bounds {
+            for upper_bound in &upper_bounds {
+                if !free_regions.is_subregion_of(self.tcx, lower_bound.region, upper_bound.region) {
+                    let origin = (*self.var_origins.borrow())[node_idx.index as usize].clone();
+                    debug!("region inference error at {:?} for {:?}: SubSupConflict sub: {:?} \
+                            sup: {:?}",
+                           origin,
+                           node_idx,
+                           lower_bound.region,
+                           upper_bound.region);
+                    errors.push(SubSupConflict(origin,
+                                               lower_bound.origin.clone(),
+                                               lower_bound.region,
+                                               upper_bound.origin.clone(),
+                                               upper_bound.region));
+                    return;
+                }
+            }
+        }
+
+        span_bug!((*self.var_origins.borrow())[node_idx.index as usize].span(),
+                  "collect_error_for_expanding_node() could not find \
+                   error for var {:?}, lower_bounds={:?}, \
+                   upper_bounds={:?}",
+                  node_idx,
+                  lower_bounds,
+                  upper_bounds);
+    }
+
+    fn collect_concrete_regions(&self,
+                                graph: &RegionGraph,
+                                orig_node_idx: RegionVid,
+                                dir: Direction,
+                                dup_vec: &mut [u32])
+                                -> (Vec<RegionAndOrigin<'tcx>>, bool) {
+        struct WalkState<'tcx> {
+            set: FnvHashSet<RegionVid>,
+            stack: Vec<RegionVid>,
+            result: Vec<RegionAndOrigin<'tcx>>,
+            dup_found: bool,
+        }
+        let mut state = WalkState {
+            set: FnvHashSet(),
+            stack: vec![orig_node_idx],
+            result: Vec::new(),
+            dup_found: false,
+        };
+        state.set.insert(orig_node_idx);
+
+        // to start off the process, walk the source node in the
+        // direction specified
+        process_edges(self, &mut state, graph, orig_node_idx, dir);
+
+        while !state.stack.is_empty() {
+            let node_idx = state.stack.pop().unwrap();
+
+            // check whether we've visited this node on some previous walk
+            if dup_vec[node_idx.index as usize] == u32::MAX {
+                dup_vec[node_idx.index as usize] = orig_node_idx.index;
+            } else if dup_vec[node_idx.index as usize] != orig_node_idx.index {
+                state.dup_found = true;
+            }
+
+            debug!("collect_concrete_regions(orig_node_idx={:?}, node_idx={:?})",
+                   orig_node_idx,
+                   node_idx);
+
+            // figure out the direction from which this node takes its
+            // values, and search for concrete regions etc in that direction
+            let dir = graph::INCOMING;
+            process_edges(self, &mut state, graph, node_idx, dir);
+        }
+
+        let WalkState {result, dup_found, ..} = state;
+        return (result, dup_found);
+
+        fn process_edges<'a, 'tcx>(this: &RegionVarBindings<'a, 'tcx>,
+                                   state: &mut WalkState<'tcx>,
+                                   graph: &RegionGraph,
+                                   source_vid: RegionVid,
+                                   dir: Direction) {
+            debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir);
+
+            let source_node_index = NodeIndex(source_vid.index as usize);
+            for (_, edge) in graph.adjacent_edges(source_node_index, dir) {
+                match edge.data {
+                    ConstrainVarSubVar(from_vid, to_vid) => {
+                        let opp_vid = if from_vid == source_vid {
+                            to_vid
+                        } else {
+                            from_vid
+                        };
+                        if state.set.insert(opp_vid) {
+                            state.stack.push(opp_vid);
+                        }
+                    }
+
+                    ConstrainRegSubVar(region, _) |
+                    ConstrainVarSubReg(_, region) => {
+                        state.result.push(RegionAndOrigin {
+                            region: region,
+                            origin: this.constraints.borrow().get(&edge.data).unwrap().clone(),
+                        });
+                    }
+                }
+            }
+        }
+    }
+
+    fn iterate_until_fixed_point<F>(&self, tag: &str, mut body: F)
+        where F: FnMut(&Constraint) -> bool
+    {
+        let mut iteration = 0;
+        let mut changed = true;
+        while changed {
+            changed = false;
+            iteration += 1;
+            debug!("---- {} Iteration {}{}", "#", tag, iteration);
+            for (constraint, _) in self.constraints.borrow().iter() {
+                let edge_changed = body(constraint);
+                if edge_changed {
+                    debug!("Updated due to constraint {:?}", constraint);
+                    changed = true;
+                }
+            }
+        }
+        debug!("---- {} Complete after {} iteration(s)", tag, iteration);
+    }
+
+}
+
+impl<'tcx> fmt::Debug for Verify<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            VerifyRegSubReg(_, ref a, ref b) => {
+                write!(f, "VerifyRegSubReg({:?}, {:?})", a, b)
+            }
+            VerifyGenericBound(_, ref p, ref a, ref bs) => {
+                write!(f, "VerifyGenericBound({:?}, {:?}, {:?})", p, a, bs)
+            }
+        }
+    }
+}
+
+fn normalize(values: &Vec<VarValue>, r: ty::Region) -> ty::Region {
+    match r {
+        ty::ReVar(rid) => lookup(values, rid),
+        _ => r,
+    }
+}
+
+fn lookup(values: &Vec<VarValue>, rid: ty::RegionVid) -> ty::Region {
+    match values[rid.index as usize] {
+        Value(r) => r,
+        ErrorValue => ReStatic, // Previously reported error.
+    }
+}
+
+impl<'tcx> fmt::Debug for RegionAndOrigin<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "RegionAndOrigin({:?},{:?})", self.region, self.origin)
+    }
+}
+
+impl fmt::Debug for RegionSnapshot {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "RegionSnapshot(length={},skolemization={})",
+               self.length, self.skolemization_count)
+    }
+}
+
+impl<'tcx> fmt::Debug for GenericKind<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            GenericKind::Param(ref p) => write!(f, "{:?}", p),
+            GenericKind::Projection(ref p) => write!(f, "{:?}", p),
+        }
+    }
+}
+
+impl<'tcx> fmt::Display for GenericKind<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            GenericKind::Param(ref p) => write!(f, "{}", p),
+            GenericKind::Projection(ref p) => write!(f, "{}", p),
+        }
+    }
+}
+
+impl<'tcx> GenericKind<'tcx> {
+    pub fn to_ty(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+        match *self {
+            GenericKind::Param(ref p) => p.to_ty(tcx),
+            GenericKind::Projection(ref p) => tcx.mk_projection(p.trait_ref.clone(), p.item_name),
+        }
+    }
+}
+
+impl VerifyBound {
+    fn for_each_region(&self, f: &mut FnMut(ty::Region)) {
+        match self {
+            &VerifyBound::AnyRegion(ref rs) |
+            &VerifyBound::AllRegions(ref rs) => for &r in rs {
+                f(r);
+            },
+
+            &VerifyBound::AnyBound(ref bs) |
+            &VerifyBound::AllBounds(ref bs) => for b in bs {
+                b.for_each_region(f);
+            },
+        }
+    }
+
+    pub fn must_hold(&self) -> bool {
+        match self {
+            &VerifyBound::AnyRegion(ref bs) => bs.contains(&ty::ReStatic),
+            &VerifyBound::AllRegions(ref bs) => bs.is_empty(),
+            &VerifyBound::AnyBound(ref bs) => bs.iter().any(|b| b.must_hold()),
+            &VerifyBound::AllBounds(ref bs) => bs.iter().all(|b| b.must_hold()),
+        }
+    }
+
+    pub fn cannot_hold(&self) -> bool {
+        match self {
+            &VerifyBound::AnyRegion(ref bs) => bs.is_empty(),
+            &VerifyBound::AllRegions(ref bs) => bs.contains(&ty::ReEmpty),
+            &VerifyBound::AnyBound(ref bs) => bs.iter().all(|b| b.cannot_hold()),
+            &VerifyBound::AllBounds(ref bs) => bs.iter().any(|b| b.cannot_hold()),
+        }
+    }
+
+    pub fn or(self, vb: VerifyBound) -> VerifyBound {
+        if self.must_hold() || vb.cannot_hold() {
+            self
+        } else if self.cannot_hold() || vb.must_hold() {
+            vb
+        } else {
+            VerifyBound::AnyBound(vec![self, vb])
+        }
+    }
+
+    pub fn and(self, vb: VerifyBound) -> VerifyBound {
+        if self.must_hold() && vb.must_hold() {
+            self
+        } else if self.cannot_hold() && vb.cannot_hold() {
+            self
+        } else {
+            VerifyBound::AllBounds(vec![self, vb])
+        }
+    }
+
+    fn is_met<'tcx>(&self,
+                    tcx: &TyCtxt<'tcx>,
+                    free_regions: &FreeRegionMap,
+                    var_values: &Vec<VarValue>,
+                    min: ty::Region)
+                    -> bool {
+        match self {
+            &VerifyBound::AnyRegion(ref rs) =>
+                rs.iter()
+                  .map(|&r| normalize(var_values, r))
+                  .any(|r| free_regions.is_subregion_of(tcx, min, r)),
+
+            &VerifyBound::AllRegions(ref rs) =>
+                rs.iter()
+                  .map(|&r| normalize(var_values, r))
+                  .all(|r| free_regions.is_subregion_of(tcx, min, r)),
+
+            &VerifyBound::AnyBound(ref bs) =>
+                bs.iter()
+                  .any(|b| b.is_met(tcx, free_regions, var_values, min)),
+
+            &VerifyBound::AllBounds(ref bs) =>
+                bs.iter()
+                  .all(|b| b.is_met(tcx, free_regions, var_values, min)),
+        }
+    }
+}
diff --git a/src/librustc/infer/resolve.rs b/src/librustc/infer/resolve.rs
new file mode 100644 (file)
index 0000000..8651b52
--- /dev/null
@@ -0,0 +1,145 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::{InferCtxt, FixupError, FixupResult};
+use ty::{self, Ty, TyCtxt, TypeFoldable};
+
+///////////////////////////////////////////////////////////////////////////
+// OPPORTUNISTIC TYPE RESOLVER
+
+/// The opportunistic type resolver can be used at any time. It simply replaces
+/// type variables that have been unified with the things they have
+/// been unified with (similar to `shallow_resolve`, but deep). This is
+/// useful for printing messages etc but also required at various
+/// points for correctness.
+pub struct OpportunisticTypeResolver<'a, 'tcx:'a> {
+    infcx: &'a InferCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> OpportunisticTypeResolver<'a, 'tcx> {
+    pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> OpportunisticTypeResolver<'a, 'tcx> {
+        OpportunisticTypeResolver { infcx: infcx }
+    }
+}
+
+impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for OpportunisticTypeResolver<'a, 'tcx> {
+    fn tcx(&self) -> &TyCtxt<'tcx> {
+        self.infcx.tcx
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        if !t.has_infer_types() {
+            t // micro-optimize -- if there is nothing in this type that this fold affects...
+        } else {
+            let t0 = self.infcx.shallow_resolve(t);
+            t0.super_fold_with(self)
+        }
+    }
+}
+
+/// The opportunistic type and region resolver is similar to the
+/// opportunistic type resolver, but also opportunistly resolves
+/// regions. It is useful for canonicalization.
+pub struct OpportunisticTypeAndRegionResolver<'a, 'tcx:'a> {
+    infcx: &'a InferCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> OpportunisticTypeAndRegionResolver<'a, 'tcx> {
+    pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+        OpportunisticTypeAndRegionResolver { infcx: infcx }
+    }
+}
+
+impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for OpportunisticTypeAndRegionResolver<'a, 'tcx> {
+    fn tcx(&self) -> &TyCtxt<'tcx> {
+        self.infcx.tcx
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        if !t.needs_infer() {
+            t // micro-optimize -- if there is nothing in this type that this fold affects...
+        } else {
+            let t0 = self.infcx.shallow_resolve(t);
+            t0.super_fold_with(self)
+        }
+    }
+
+    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+        match r {
+          ty::ReVar(rid) => self.infcx.region_vars.opportunistic_resolve_var(rid),
+          _ => r,
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// FULL TYPE RESOLUTION
+
+/// Full type resolution replaces all type and region variables with
+/// their concrete results. If any variable cannot be replaced (never unified, etc)
+/// then an `Err` result is returned.
+pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a,'tcx>, value: &T) -> FixupResult<T>
+    where T : TypeFoldable<'tcx>
+{
+    let mut full_resolver = FullTypeResolver { infcx: infcx, err: None };
+    let result = value.fold_with(&mut full_resolver);
+    match full_resolver.err {
+        None => Ok(result),
+        Some(e) => Err(e),
+    }
+}
+
+// N.B. This type is not public because the protocol around checking the
+// `err` field is not enforcable otherwise.
+struct FullTypeResolver<'a, 'tcx:'a> {
+    infcx: &'a InferCtxt<'a, 'tcx>,
+    err: Option<FixupError>,
+}
+
+impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for FullTypeResolver<'a, 'tcx> {
+    fn tcx(&self) -> &TyCtxt<'tcx> {
+        self.infcx.tcx
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        if !t.needs_infer() {
+            t // micro-optimize -- if there is nothing in this type that this fold affects...
+        } else {
+            let t = self.infcx.shallow_resolve(t);
+            match t.sty {
+                ty::TyInfer(ty::TyVar(vid)) => {
+                    self.err = Some(FixupError::UnresolvedTy(vid));
+                    self.tcx().types.err
+                }
+                ty::TyInfer(ty::IntVar(vid)) => {
+                    self.err = Some(FixupError::UnresolvedIntTy(vid));
+                    self.tcx().types.err
+                }
+                ty::TyInfer(ty::FloatVar(vid)) => {
+                    self.err = Some(FixupError::UnresolvedFloatTy(vid));
+                    self.tcx().types.err
+                }
+                ty::TyInfer(_) => {
+                    bug!("Unexpected type in full type resolver: {:?}", t);
+                }
+                _ => {
+                    t.super_fold_with(self)
+                }
+            }
+        }
+    }
+
+    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+        match r {
+          ty::ReVar(rid) => self.infcx.region_vars.resolve_var(rid),
+          _ => r,
+        }
+    }
+}
diff --git a/src/librustc/infer/sub.rs b/src/librustc/infer/sub.rs
new file mode 100644 (file)
index 0000000..ece8c0c
--- /dev/null
@@ -0,0 +1,121 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::combine::{self, CombineFields};
+use super::higher_ranked::HigherRankedRelations;
+use super::SubregionOrigin;
+use super::type_variable::{SubtypeOf, SupertypeOf};
+
+use ty::{self, Ty, TyCtxt};
+use ty::TyVar;
+use ty::relate::{Cause, Relate, RelateResult, TypeRelation};
+use traits::PredicateObligations;
+use std::mem;
+
+/// Ensures `a` is made a subtype of `b`. Returns `a` on success.
+pub struct Sub<'a, 'tcx: 'a> {
+    fields: CombineFields<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> Sub<'a, 'tcx> {
+    pub fn new(f: CombineFields<'a, 'tcx>) -> Sub<'a, 'tcx> {
+        Sub { fields: f }
+    }
+
+    pub fn obligations(self) -> PredicateObligations<'tcx> {
+        self.fields.obligations
+    }
+}
+
+impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Sub<'a, 'tcx> {
+    fn tag(&self) -> &'static str { "Sub" }
+    fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.infcx.tcx }
+    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
+
+    fn with_cause<F,R>(&mut self, cause: Cause, f: F) -> R
+        where F: FnOnce(&mut Self) -> R
+    {
+        debug!("sub with_cause={:?}", cause);
+        let old_cause = mem::replace(&mut self.fields.cause, Some(cause));
+        let r = f(self);
+        debug!("sub old_cause={:?}", old_cause);
+        self.fields.cause = old_cause;
+        r
+    }
+
+    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
+                                               variance: ty::Variance,
+                                               a: &T,
+                                               b: &T)
+                                               -> RelateResult<'tcx, T>
+    {
+        match variance {
+            ty::Invariant => self.fields.equate().relate(a, b),
+            ty::Covariant => self.relate(a, b),
+            ty::Bivariant => self.fields.bivariate().relate(a, b),
+            ty::Contravariant => self.fields.switch_expected().sub().relate(b, a),
+        }
+    }
+
+    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+        debug!("{}.tys({:?}, {:?})", self.tag(), a, b);
+
+        if a == b { return Ok(a); }
+
+        let infcx = self.fields.infcx;
+        let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
+        let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
+        match (&a.sty, &b.sty) {
+            (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
+                infcx.type_variables
+                    .borrow_mut()
+                    .relate_vars(a_id, SubtypeOf, b_id);
+                Ok(a)
+            }
+            (&ty::TyInfer(TyVar(a_id)), _) => {
+                self.fields
+                    .switch_expected()
+                    .instantiate(b, SupertypeOf, a_id)?;
+                Ok(a)
+            }
+            (_, &ty::TyInfer(TyVar(b_id))) => {
+                self.fields.instantiate(a, SubtypeOf, b_id)?;
+                Ok(a)
+            }
+
+            (&ty::TyError, _) | (_, &ty::TyError) => {
+                Ok(self.tcx().types.err)
+            }
+
+            _ => {
+                combine::super_combine_tys(self.fields.infcx, self, a, b)?;
+                Ok(a)
+            }
+        }
+    }
+
+    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+        debug!("{}.regions({:?}, {:?}) self.cause={:?}",
+               self.tag(), a, b, self.fields.cause);
+        // FIXME -- we have more fine-grained information available
+        // from the "cause" field, we could perhaps give more tailored
+        // error messages.
+        let origin = SubregionOrigin::Subtype(self.fields.trace.clone());
+        self.fields.infcx.region_vars.make_subregion(origin, a, b);
+        Ok(a)
+    }
+
+    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
+                  -> RelateResult<'tcx, ty::Binder<T>>
+        where T: Relate<'a,'tcx>
+    {
+        self.fields.higher_ranked_sub(a, b)
+    }
+}
diff --git a/src/librustc/infer/type_variable.rs b/src/librustc/infer/type_variable.rs
new file mode 100644 (file)
index 0000000..c2a8d04
--- /dev/null
@@ -0,0 +1,324 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use self::RelationDir::*;
+use self::TypeVariableValue::*;
+use self::UndoEntry::*;
+use hir::def_id::{DefId};
+use ty::{self, Ty};
+use syntax::codemap::Span;
+
+use std::cmp::min;
+use std::marker::PhantomData;
+use std::mem;
+use std::u32;
+use rustc_data_structures::snapshot_vec as sv;
+use rustc_data_structures::unify as ut;
+
+pub struct TypeVariableTable<'tcx> {
+    values: sv::SnapshotVec<Delegate<'tcx>>,
+    eq_relations: ut::UnificationTable<ty::TyVid>,
+}
+
+struct TypeVariableData<'tcx> {
+    value: TypeVariableValue<'tcx>,
+    diverging: bool
+}
+
+enum TypeVariableValue<'tcx> {
+    Known(Ty<'tcx>),
+    Bounded {
+        relations: Vec<Relation>,
+        default: Option<Default<'tcx>>
+    }
+}
+
+// We will use this to store the required information to recapitulate what happened when
+// an error occurs.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub struct Default<'tcx> {
+    pub ty: Ty<'tcx>,
+    /// The span where the default was incurred
+    pub origin_span: Span,
+    /// The definition that the default originates from
+    pub def_id: DefId
+}
+
+pub struct Snapshot {
+    snapshot: sv::Snapshot,
+    eq_snapshot: ut::Snapshot<ty::TyVid>,
+}
+
+enum UndoEntry<'tcx> {
+    // The type of the var was specified.
+    SpecifyVar(ty::TyVid, Vec<Relation>, Option<Default<'tcx>>),
+    Relate(ty::TyVid, ty::TyVid),
+    RelateRange(ty::TyVid, usize),
+}
+
+struct Delegate<'tcx>(PhantomData<&'tcx ()>);
+
+type Relation = (RelationDir, ty::TyVid);
+
+#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
+pub enum RelationDir {
+    SubtypeOf, SupertypeOf, EqTo, BiTo
+}
+
+impl RelationDir {
+    fn opposite(self) -> RelationDir {
+        match self {
+            SubtypeOf => SupertypeOf,
+            SupertypeOf => SubtypeOf,
+            EqTo => EqTo,
+            BiTo => BiTo,
+        }
+    }
+}
+
+impl<'tcx> TypeVariableTable<'tcx> {
+    pub fn new() -> TypeVariableTable<'tcx> {
+        TypeVariableTable {
+            values: sv::SnapshotVec::new(),
+            eq_relations: ut::UnificationTable::new(),
+        }
+    }
+
+    fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
+        relations(self.values.get_mut(a.index as usize))
+    }
+
+    pub fn default(&self, vid: ty::TyVid) -> Option<Default<'tcx>> {
+        match &self.values.get(vid.index as usize).value {
+            &Known(_) => None,
+            &Bounded { ref default, .. } => default.clone()
+        }
+    }
+
+    pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
+        self.values.get(vid.index as usize).diverging
+    }
+
+    /// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
+    ///
+    /// Precondition: neither `a` nor `b` are known.
+    pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
+        let a = self.root_var(a);
+        let b = self.root_var(b);
+        if a != b {
+            if dir == EqTo {
+                // a and b must be equal which we mark in the unification table
+                let root = self.eq_relations.union(a, b);
+                // In addition to being equal, all relations from the variable which is no longer
+                // the root must be added to the root so they are not forgotten as the other
+                // variable should no longer be referenced (other than to get the root)
+                let other = if a == root { b } else { a };
+                let count = {
+                    let (relations, root_relations) = if other.index < root.index {
+                        let (pre, post) = self.values.split_at_mut(root.index as usize);
+                        (relations(&mut pre[other.index as usize]), relations(&mut post[0]))
+                    } else {
+                        let (pre, post) = self.values.split_at_mut(other.index as usize);
+                        (relations(&mut post[0]), relations(&mut pre[root.index as usize]))
+                    };
+                    root_relations.extend_from_slice(relations);
+                    relations.len()
+                };
+                self.values.record(RelateRange(root, count));
+            } else {
+                self.relations(a).push((dir, b));
+                self.relations(b).push((dir.opposite(), a));
+                self.values.record(Relate(a, b));
+            }
+        }
+    }
+
+    /// Instantiates `vid` with the type `ty` and then pushes an entry onto `stack` for each of the
+    /// relations of `vid` to other variables. The relations will have the form `(ty, dir, vid1)`
+    /// where `vid1` is some other variable id.
+    ///
+    /// Precondition: `vid` must be a root in the unification table
+    pub fn instantiate_and_push(
+        &mut self,
+        vid: ty::TyVid,
+        ty: Ty<'tcx>,
+        stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
+    {
+        debug_assert!(self.root_var(vid) == vid);
+        let old_value = {
+            let value_ptr = &mut self.values.get_mut(vid.index as usize).value;
+            mem::replace(value_ptr, Known(ty))
+        };
+
+        let (relations, default) = match old_value {
+            Bounded { relations, default } => (relations, default),
+            Known(_) => bug!("Asked to instantiate variable that is \
+                              already instantiated")
+        };
+
+        for &(dir, vid) in &relations {
+            stack.push((ty, dir, vid));
+        }
+
+        self.values.record(SpecifyVar(vid, relations, default));
+    }
+
+    pub fn new_var(&mut self,
+                   diverging: bool,
+                   default: Option<Default<'tcx>>) -> ty::TyVid {
+        self.eq_relations.new_key(());
+        let index = self.values.push(TypeVariableData {
+            value: Bounded { relations: vec![], default: default },
+            diverging: diverging
+        });
+        ty::TyVid { index: index as u32 }
+    }
+
+    pub fn root_var(&mut self, vid: ty::TyVid) -> ty::TyVid {
+        self.eq_relations.find(vid)
+    }
+
+    pub fn probe(&mut self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
+        let vid = self.root_var(vid);
+        self.probe_root(vid)
+    }
+
+    /// Retrieves the type of `vid` given that it is currently a root in the unification table
+    pub fn probe_root(&mut self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
+        debug_assert!(self.root_var(vid) == vid);
+        match self.values.get(vid.index as usize).value {
+            Bounded { .. } => None,
+            Known(t) => Some(t)
+        }
+    }
+
+    pub fn replace_if_possible(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        match t.sty {
+            ty::TyInfer(ty::TyVar(v)) => {
+                match self.probe(v) {
+                    None => t,
+                    Some(u) => u
+                }
+            }
+            _ => t,
+        }
+    }
+
+    pub fn snapshot(&mut self) -> Snapshot {
+        Snapshot {
+            snapshot: self.values.start_snapshot(),
+            eq_snapshot: self.eq_relations.snapshot(),
+        }
+    }
+
+    pub fn rollback_to(&mut self, s: Snapshot) {
+        self.values.rollback_to(s.snapshot);
+        self.eq_relations.rollback_to(s.eq_snapshot);
+    }
+
+    pub fn commit(&mut self, s: Snapshot) {
+        self.values.commit(s.snapshot);
+        self.eq_relations.commit(s.eq_snapshot);
+    }
+
+    pub fn types_escaping_snapshot(&mut self, s: &Snapshot) -> Vec<Ty<'tcx>> {
+        /*!
+         * Find the set of type variables that existed *before* `s`
+         * but which have only been unified since `s` started, and
+         * return the types with which they were unified. So if we had
+         * a type variable `V0`, then we started the snapshot, then we
+         * created a type variable `V1`, unifed `V0` with `T0`, and
+         * unified `V1` with `T1`, this function would return `{T0}`.
+         */
+
+        let mut new_elem_threshold = u32::MAX;
+        let mut escaping_types = Vec::new();
+        let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot);
+        debug!("actions_since_snapshot.len() = {}", actions_since_snapshot.len());
+        for action in actions_since_snapshot {
+            match *action {
+                sv::UndoLog::NewElem(index) => {
+                    // if any new variables were created during the
+                    // snapshot, remember the lower index (which will
+                    // always be the first one we see). Note that this
+                    // action must precede those variables being
+                    // specified.
+                    new_elem_threshold = min(new_elem_threshold, index as u32);
+                    debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold);
+                }
+
+                sv::UndoLog::Other(SpecifyVar(vid, _, _)) => {
+                    if vid.index < new_elem_threshold {
+                        // quick check to see if this variable was
+                        // created since the snapshot started or not.
+                        let escaping_type = match self.values.get(vid.index as usize).value {
+                            Bounded { .. } => bug!(),
+                            Known(ty) => ty,
+                        };
+                        escaping_types.push(escaping_type);
+                    }
+                    debug!("SpecifyVar({:?}) new_elem_threshold={}", vid, new_elem_threshold);
+                }
+
+                _ => { }
+            }
+        }
+
+        escaping_types
+    }
+
+    pub fn unsolved_variables(&mut self) -> Vec<ty::TyVid> {
+        (0..self.values.len())
+            .filter_map(|i| {
+                let vid = ty::TyVid { index: i as u32 };
+                if self.probe(vid).is_some() {
+                    None
+                } else {
+                    Some(vid)
+                }
+            })
+            .collect()
+    }
+}
+
+impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> {
+    type Value = TypeVariableData<'tcx>;
+    type Undo = UndoEntry<'tcx>;
+
+    fn reverse(values: &mut Vec<TypeVariableData<'tcx>>, action: UndoEntry<'tcx>) {
+        match action {
+            SpecifyVar(vid, relations, default) => {
+                values[vid.index as usize].value = Bounded {
+                    relations: relations,
+                    default: default
+                };
+            }
+
+            Relate(a, b) => {
+                relations(&mut (*values)[a.index as usize]).pop();
+                relations(&mut (*values)[b.index as usize]).pop();
+            }
+
+            RelateRange(i, n) => {
+                let relations = relations(&mut (*values)[i.index as usize]);
+                for _ in 0..n {
+                    relations.pop();
+                }
+            }
+        }
+    }
+}
+
+fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec<Relation> {
+    match v.value {
+        Known(_) => bug!("var_sub_var: variable is known"),
+        Bounded { ref mut relations, .. } => relations
+    }
+}
diff --git a/src/librustc/infer/unify_key.rs b/src/librustc/infer/unify_key.rs
new file mode 100644 (file)
index 0000000..a9eb20b
--- /dev/null
@@ -0,0 +1,82 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use syntax::ast;
+use ty::{self, IntVarValue, Ty, TyCtxt};
+use rustc_data_structures::unify::{Combine, UnifyKey};
+
+pub trait ToType<'tcx> {
+    fn to_type(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx>;
+}
+
+impl UnifyKey for ty::IntVid {
+    type Value = Option<IntVarValue>;
+    fn index(&self) -> u32 { self.index }
+    fn from_index(i: u32) -> ty::IntVid { ty::IntVid { index: i } }
+    fn tag(_: Option<ty::IntVid>) -> &'static str { "IntVid" }
+}
+
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct RegionVidKey {
+    /// The minimum region vid in the unification set. This is needed
+    /// to have a canonical name for a type to prevent infinite
+    /// recursion.
+    pub min_vid: ty::RegionVid
+}
+
+impl Combine for RegionVidKey {
+    fn combine(&self, other: &RegionVidKey) -> RegionVidKey {
+        let min_vid = if self.min_vid.index < other.min_vid.index {
+            self.min_vid
+        } else {
+            other.min_vid
+        };
+
+        RegionVidKey { min_vid: min_vid }
+    }
+}
+
+impl UnifyKey for ty::RegionVid {
+    type Value = RegionVidKey;
+    fn index(&self) -> u32 { self.index }
+    fn from_index(i: u32) -> ty::RegionVid { ty::RegionVid { index: i } }
+    fn tag(_: Option<ty::RegionVid>) -> &'static str { "RegionVid" }
+}
+
+impl<'tcx> ToType<'tcx> for IntVarValue {
+    fn to_type(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+        match *self {
+            ty::IntType(i) => tcx.mk_mach_int(i),
+            ty::UintType(i) => tcx.mk_mach_uint(i),
+        }
+    }
+}
+
+// Floating point type keys
+
+impl UnifyKey for ty::FloatVid {
+    type Value = Option<ast::FloatTy>;
+    fn index(&self) -> u32 { self.index }
+    fn from_index(i: u32) -> ty::FloatVid { ty::FloatVid { index: i } }
+    fn tag(_: Option<ty::FloatVid>) -> &'static str { "FloatVid" }
+}
+
+impl<'tcx> ToType<'tcx> for ast::FloatTy {
+    fn to_type(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+        tcx.mk_mach_float(*self)
+    }
+}
+
+impl UnifyKey for ty::TyVid {
+    type Value = ();
+    fn index(&self) -> u32 { self.index }
+    fn from_index(i: u32) -> ty::TyVid { ty::TyVid { index: i } }
+    fn tag(_: Option<ty::TyVid>) -> &'static str { "TyVid" }
+}
index 4ff3b21dc83f4901bacaabf9f6c15767a719ae0f..d1bbbf08ac230f5a261a619c7a74aaf433bb563f 100644 (file)
@@ -28,7 +28,6 @@
 #![feature(box_syntax)]
 #![feature(collections)]
 #![feature(const_fn)]
-#![feature(copy_from_slice)]
 #![feature(enumset)]
 #![feature(iter_arith)]
 #![feature(libc)]
@@ -38,7 +37,8 @@
 #![feature(rustc_private)]
 #![feature(slice_patterns)]
 #![feature(staged_api)]
-#![feature(str_char)]
+#![feature(step_by)]
+#![feature(question_mark)]
 #![cfg_attr(test, feature(test))]
 
 extern crate arena;
@@ -49,12 +49,11 @@ extern crate getopts;
 extern crate graphviz;
 extern crate libc;
 extern crate rbml;
-pub extern crate rustc_llvm as llvm;
 extern crate rustc_back;
-extern crate rustc_front;
 extern crate rustc_data_structures;
 extern crate serialize;
 extern crate collections;
+extern crate rustc_const_math;
 #[macro_use] extern crate log;
 #[macro_use] extern crate syntax;
 #[macro_use] #[no_link] extern crate rustc_bitflags;
@@ -71,50 +70,34 @@ mod macros;
 // registered before they are used.
 pub mod diagnostics;
 
-pub mod back {
-    pub use rustc_back::abi;
-    pub use rustc_back::rpath;
-    pub use rustc_back::svh;
-}
-
+pub mod cfg;
 pub mod dep_graph;
-
-pub mod front {
-    pub mod check_attr;
-    pub mod map;
-}
+pub mod hir;
+pub mod infer;
+pub mod lint;
 
 pub mod middle {
     pub mod astconv_util;
     pub mod expr_use_visitor; // STAGE0: increase glitch immunity
-    pub mod cfg;
-    pub mod check_match;
-    pub mod const_eval;
+    pub mod const_val;
     pub mod const_qualif;
     pub mod cstore;
     pub mod dataflow;
     pub mod dead;
-    pub mod def;
-    pub mod def_id;
     pub mod dependency_format;
     pub mod effect;
     pub mod entry;
     pub mod free_region;
     pub mod intrinsicck;
-    pub mod infer;
     pub mod lang_items;
     pub mod liveness;
     pub mod mem_categorization;
-    pub mod pat_util;
     pub mod privacy;
     pub mod reachable;
     pub mod region;
     pub mod recursion_limit;
     pub mod resolve_lifetime;
     pub mod stability;
-    pub mod subst;
-    pub mod traits;
-    pub mod ty;
     pub mod weak_lang_items;
 }
 
@@ -127,8 +110,8 @@ pub mod mir {
 }
 
 pub mod session;
-
-pub mod lint;
+pub mod traits;
+pub mod ty;
 
 pub mod util {
     pub use rustc_back::sha2;
@@ -140,10 +123,6 @@ pub mod util {
     pub mod fs;
 }
 
-pub mod lib {
-    pub use llvm;
-}
-
 // A private module so that macro-expanded idents like
 // `::rustc::lint::Lint` will also work in `rustc` itself.
 //
index 4bb69a2688a41ded0c12a090929b61d422f093e5..2564838c67d6567bf67acb703d851c53c534476b 100644 (file)
@@ -136,6 +136,19 @@ declare_lint! {
     "type parameter default erroneously allowed in invalid location"
 }
 
+declare_lint! {
+    pub ILLEGAL_FLOATING_POINT_CONSTANT_PATTERN,
+    Warn,
+    "floating-point constants cannot be used in patterns"
+}
+
+declare_lint! {
+    pub ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN,
+    Warn,
+    "constants of struct or enum type can only be used in a pattern if \
+     the struct or enum has `#[derive(PartialEq, Eq)]`"
+}
+
 declare_lint! {
     pub MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT,
     Deny,
@@ -148,6 +161,30 @@ declare_lint! {
     "uses of #[derive] with raw pointers are rarely correct"
 }
 
+declare_lint! {
+    pub TRANSMUTE_FROM_FN_ITEM_TYPES,
+    Warn,
+    "transmute from function item type to pointer-sized type erroneously allowed"
+}
+
+declare_lint! {
+    pub OVERLAPPING_INHERENT_IMPLS,
+    Warn,
+    "two overlapping inherent impls define an item with the same name were erroneously allowed"
+}
+
+declare_lint! {
+    pub RENAMED_AND_REMOVED_LINTS,
+    Warn,
+    "lints that have been renamed or removed"
+}
+
+declare_lint! {
+    pub SUPER_OR_SELF_IN_GLOBAL_PATH,
+    Warn,
+    "detects super or self keywords at the beginning of global path"
+}
+
 /// Does nothing as a lint pass, but registers some `Lint`s
 /// which are used by other parts of the compiler.
 #[derive(Copy, Clone)]
@@ -175,9 +212,15 @@ impl LintPass for HardwiredLints {
             PRIVATE_IN_PUBLIC,
             INACCESSIBLE_EXTERN_CRATE,
             INVALID_TYPE_PARAM_DEFAULT,
+            ILLEGAL_FLOATING_POINT_CONSTANT_PATTERN,
+            ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN,
             MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT,
             CONST_ERR,
-            RAW_POINTER_DERIVE
+            RAW_POINTER_DERIVE,
+            TRANSMUTE_FROM_FN_ITEM_TYPES,
+            OVERLAPPING_INHERENT_IMPLS,
+            RENAMED_AND_REMOVED_LINTS,
+            SUPER_OR_SELF_IN_GLOBAL_PATH
         )
     }
 }
index d138ab101b524c81f1daa80f0cd8d547ffcc2253..1c6dd2658897a60302f4ba124bf0cddc0e77d2aa 100644 (file)
@@ -27,7 +27,7 @@ use self::TargetLint::*;
 
 use dep_graph::DepNode;
 use middle::privacy::AccessLevels;
-use middle::ty;
+use ty::TyCtxt;
 use session::{config, early_error, Session};
 use lint::{Level, LevelSource, Lint, LintId, LintArray, LintPass};
 use lint::{EarlyLintPass, EarlyLintPassObject, LateLintPass, LateLintPassObject};
@@ -39,16 +39,15 @@ use std::cell::RefCell;
 use std::cmp;
 use std::default::Default as StdDefault;
 use std::mem;
-use syntax::ast_util::{self, IdVisitingOperation};
 use syntax::attr::{self, AttrMetaMethods};
 use syntax::codemap::Span;
 use syntax::errors::DiagnosticBuilder;
 use syntax::parse::token::InternedString;
 use syntax::ast;
 use syntax::attr::ThinAttributesExt;
-use rustc_front::hir;
-use rustc_front::util;
-use rustc_front::intravisit as hir_visit;
+use hir;
+use hir::intravisit as hir_visit;
+use hir::intravisit::{IdVisitor, IdVisitingOperation};
 use syntax::visit as ast_visit;
 
 /// Information about the registered lints.
@@ -183,7 +182,7 @@ impl LintStore {
                     // We load builtin lints first, so a duplicate is a compiler bug.
                     // Use early_error when handling -W help with no crate.
                     (None, _) => early_error(config::ErrorOutputType::default(), &msg[..]),
-                    (Some(sess), false) => sess.bug(&msg[..]),
+                    (Some(_), false) => bug!("{}", msg),
 
                     // A duplicate name from a plugin is a user error.
                     (Some(sess), true)  => sess.err(&msg[..]),
@@ -221,7 +220,7 @@ impl LintStore {
                 // We load builtin lints first, so a duplicate is a compiler bug.
                 // Use early_error when handling -W help with no crate.
                 (None, _) => early_error(config::ErrorOutputType::default(), &msg[..]),
-                (Some(sess), false) => sess.bug(&msg[..]),
+                (Some(_), false) => bug!("{}", msg),
 
                 // A duplicate name from a plugin is a user error.
                 (Some(sess), true)  => sess.err(&msg[..]),
@@ -232,7 +231,7 @@ impl LintStore {
     pub fn register_renamed(&mut self, old_name: &str, new_name: &str) {
         let target = match self.by_name.get(new_name) {
             Some(&Id(lint_id)) => lint_id.clone(),
-            _ => panic!("invalid lint renaming of {} to {}", old_name, new_name)
+            _ => bug!("invalid lint renaming of {} to {}", old_name, new_name)
         };
         self.by_name.insert(old_name.to_string(), Renamed(new_name.to_string(), target));
     }
@@ -298,7 +297,7 @@ impl LintStore {
 /// Context for lint checking after type checking.
 pub struct LateContext<'a, 'tcx: 'a> {
     /// Type context we're checking in.
-    pub tcx: &'a ty::ctxt<'tcx>,
+    pub tcx: &'a TyCtxt<'tcx>,
 
     /// The crate being checked.
     pub krate: &'a hir::Crate,
@@ -430,7 +429,7 @@ pub fn raw_struct_lint<'a>(sess: &'a Session,
             format!("{} [-{} {}]", msg,
                     match level {
                         Warn => 'W', Deny => 'D', Forbid => 'F',
-                        Allow => panic!()
+                        Allow => bug!()
                     }, name.replace("_", "-"))
         },
         Node(src) => {
@@ -447,7 +446,7 @@ pub fn raw_struct_lint<'a>(sess: &'a Session,
         (Warn, None)     => sess.struct_warn(&msg[..]),
         (Deny, Some(sp)) => sess.struct_span_err(sp, &msg[..]),
         (Deny, None)     => sess.struct_err(&msg[..]),
-        _ => sess.bug("impossible level in raw_emit_lint"),
+        _ => bug!("impossible level in raw_emit_lint"),
     };
 
     // Check for future incompatibility lints and issue a stronger warning.
@@ -489,9 +488,14 @@ pub trait LintContext: Sized {
 
     fn level_src(&self, lint: &'static Lint) -> Option<LevelSource> {
         self.lints().levels.get(&LintId::of(lint)).map(|ls| match ls {
-            &(Warn, src) => {
+            &(Warn, _) => {
                 let lint_id = LintId::of(builtin::WARNINGS);
-                (self.lints().get_level_source(lint_id).0, src)
+                let warn_src = self.lints().get_level_source(lint_id);
+                if warn_src.0 != Warn {
+                    warn_src
+                } else {
+                    *ls
+                }
             }
             _ => *ls
         })
@@ -649,20 +653,10 @@ impl<'a> EarlyContext<'a> {
             level_stack: vec![],
         }
     }
-
-    fn visit_ids<F>(&mut self, f: F)
-        where F: FnOnce(&mut ast_util::IdVisitor<EarlyContext>)
-    {
-        let mut v = ast_util::IdVisitor {
-            operation: self,
-            visited_outermost: false,
-        };
-        f(&mut v);
-    }
 }
 
 impl<'a, 'tcx> LateContext<'a, 'tcx> {
-    fn new(tcx: &'a ty::ctxt<'tcx>,
+    fn new(tcx: &'a TyCtxt<'tcx>,
            krate: &'a hir::Crate,
            access_levels: &'a AccessLevels) -> LateContext<'a, 'tcx> {
         // We want to own the lint store, so move it out of the session.
@@ -680,9 +674,9 @@ impl<'a, 'tcx> LateContext<'a, 'tcx> {
     }
 
     fn visit_ids<F>(&mut self, f: F)
-        where F: FnOnce(&mut util::IdVisitor<LateContext>)
+        where F: FnOnce(&mut IdVisitor<LateContext>)
     {
-        let mut v = util::IdVisitor::new(self);
+        let mut v = IdVisitor::new(self);
         f(&mut v);
     }
 }
@@ -811,7 +805,7 @@ impl<'a, 'tcx, 'v> hir_visit::Visitor<'v> for LateContext<'a, 'tcx> {
     }
 
     fn visit_struct_field(&mut self, s: &hir::StructField) {
-        self.with_lint_attrs(&s.node.attrs, |cx| {
+        self.with_lint_attrs(&s.attrs, |cx| {
             run_lints!(cx, check_struct_field, late_passes, s);
             hir_visit::walk_struct_field(cx, s);
         })
@@ -923,7 +917,6 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> {
     fn visit_item(&mut self, it: &ast::Item) {
         self.with_lint_attrs(&it.attrs, |cx| {
             run_lints!(cx, check_item, early_passes, it);
-            cx.visit_ids(|v| v.visit_item(it));
             ast_visit::walk_item(cx, it);
             run_lints!(cx, check_item_post, early_passes, it);
         })
@@ -973,7 +966,7 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> {
     }
 
     fn visit_struct_field(&mut self, s: &ast::StructField) {
-        self.with_lint_attrs(&s.node.attrs, |cx| {
+        self.with_lint_attrs(&s.attrs, |cx| {
             run_lints!(cx, check_struct_field, early_passes, s);
             ast_visit::walk_struct_field(cx, s);
         })
@@ -1037,7 +1030,6 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> {
     fn visit_trait_item(&mut self, trait_item: &ast::TraitItem) {
         self.with_lint_attrs(&trait_item.attrs, |cx| {
             run_lints!(cx, check_trait_item, early_passes, trait_item);
-            cx.visit_ids(|v| v.visit_trait_item(trait_item));
             ast_visit::walk_trait_item(cx, trait_item);
             run_lints!(cx, check_trait_item_post, early_passes, trait_item);
         });
@@ -1046,7 +1038,6 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> {
     fn visit_impl_item(&mut self, impl_item: &ast::ImplItem) {
         self.with_lint_attrs(&impl_item.attrs, |cx| {
             run_lints!(cx, check_impl_item, early_passes, impl_item);
-            cx.visit_ids(|v| v.visit_impl_item(impl_item));
             ast_visit::walk_impl_item(cx, impl_item);
             run_lints!(cx, check_impl_item_post, early_passes, impl_item);
         });
@@ -1094,18 +1085,6 @@ impl<'a, 'tcx> IdVisitingOperation for LateContext<'a, 'tcx> {
         }
     }
 }
-impl<'a> IdVisitingOperation for EarlyContext<'a> {
-    fn visit_id(&mut self, id: ast::NodeId) {
-        match self.sess.lints.borrow_mut().remove(&id) {
-            None => {}
-            Some(lints) => {
-                for (lint_id, span, msg) in lints {
-                    self.span_lint(lint_id.lint, span, &msg[..])
-                }
-            }
-        }
-    }
-}
 
 // This lint pass is defined here because it touches parts of the `LateContext`
 // that we don't want to expose. It records the lint level at certain AST
@@ -1139,13 +1118,13 @@ impl LateLintPass for GatherNodeLevels {
     }
 }
 
-enum CheckLintNameResult<'a> {
+enum CheckLintNameResult {
     Ok,
     // Lint doesn't exist
     NoLint,
-    // The lint is either renamed or removed and a warning was
-    // generated in the DiagnosticBuilder
-    Mentioned(DiagnosticBuilder<'a>)
+    // The lint is either renamed or removed. This is the warning
+    // message.
+    Warning(String)
 }
 
 /// Checks the name of a lint for its existence, and whether it was
@@ -1155,27 +1134,18 @@ enum CheckLintNameResult<'a> {
 /// it emits non-fatal warnings and there are *two* lint passes that
 /// inspect attributes, this is only run from the late pass to avoid
 /// printing duplicate warnings.
-fn check_lint_name<'a>(sess: &'a Session,
-                       lint_cx: &LintStore,
-                       lint_name: &str,
-                       span: Option<Span>) -> CheckLintNameResult<'a> {
+fn check_lint_name(lint_cx: &LintStore,
+                   lint_name: &str) -> CheckLintNameResult {
     match lint_cx.by_name.get(lint_name) {
         Some(&Renamed(ref new_name, _)) => {
-            let warning = format!("lint {} has been renamed to {}",
-                                  lint_name, new_name);
-            let db = match span {
-                Some(span) => sess.struct_span_warn(span, &warning[..]),
-                None => sess.struct_warn(&warning[..]),
-            };
-            CheckLintNameResult::Mentioned(db)
+            CheckLintNameResult::Warning(
+                format!("lint {} has been renamed to {}", lint_name, new_name)
+            )
         },
         Some(&Removed(ref reason)) => {
-            let warning = format!("lint {} has been removed: {}", lint_name, reason);
-            let db = match span {
-                Some(span) => sess.struct_span_warn(span, &warning[..]),
-                None => sess.struct_warn(&warning[..])
-            };
-            CheckLintNameResult::Mentioned(db)
+            CheckLintNameResult::Warning(
+                format!("lint {} has been removed: {}", lint_name, reason)
+            )
         },
         None => {
             match lint_cx.lint_groups.get(lint_name) {
@@ -1204,10 +1174,12 @@ fn check_lint_name_attribute(cx: &LateContext, attr: &ast::Attribute) {
                 continue;
             }
             Ok((lint_name, _, span)) => {
-                match check_lint_name(&cx.tcx.sess, &cx.lints, &lint_name[..], Some(span)) {
+                match check_lint_name(&cx.lints,
+                                      &lint_name[..]) {
                     CheckLintNameResult::Ok => (),
-                    CheckLintNameResult::Mentioned(mut db) => {
-                        db.emit();
+                    CheckLintNameResult::Warning(ref msg) => {
+                        cx.span_lint(builtin::RENAMED_AND_REMOVED_LINTS,
+                                     span, msg);
                     }
                     CheckLintNameResult::NoLint => {
                         cx.span_lint(builtin::UNKNOWN_LINTS, span,
@@ -1223,9 +1195,11 @@ fn check_lint_name_attribute(cx: &LateContext, attr: &ast::Attribute) {
 // Checks the validity of lint names derived from the command line
 fn check_lint_name_cmdline(sess: &Session, lint_cx: &LintStore,
                            lint_name: &str, level: Level) {
-    let db = match check_lint_name(sess, lint_cx, lint_name, None) {
+    let db = match check_lint_name(lint_cx, lint_name) {
         CheckLintNameResult::Ok => None,
-        CheckLintNameResult::Mentioned(db) => Some(db),
+        CheckLintNameResult::Warning(ref msg) => {
+            Some(sess.struct_warn(msg))
+        },
         CheckLintNameResult::NoLint => {
             Some(sess.struct_err(&format!("unknown lint: `{}`", lint_name)))
         }
@@ -1249,7 +1223,7 @@ fn check_lint_name_cmdline(sess: &Session, lint_cx: &LintStore,
 /// Perform lint checking on a crate.
 ///
 /// Consumes the `lint_store` field of the `Session`.
-pub fn check_crate(tcx: &ty::ctxt, access_levels: &AccessLevels) {
+pub fn check_crate(tcx: &TyCtxt, access_levels: &AccessLevels) {
     let _task = tcx.dep_graph.in_task(DepNode::LateLintCheck);
 
     let krate = tcx.map.krate();
@@ -1275,13 +1249,16 @@ pub fn check_crate(tcx: &ty::ctxt, access_levels: &AccessLevels) {
     // in the iteration code.
     for (id, v) in tcx.sess.lints.borrow().iter() {
         for &(lint, span, ref msg) in v {
-            tcx.sess.span_bug(span,
-                              &format!("unprocessed lint {} at {}: {}",
-                                       lint.as_str(), tcx.map.node_to_string(*id), *msg))
+            span_bug!(span,
+                      "unprocessed lint {} at {}: {}",
+                      lint.as_str(), tcx.map.node_to_string(*id), *msg)
         }
     }
 
     *tcx.node_lint_levels.borrow_mut() = cx.node_levels.into_inner();
+
+    // Put the lint store back in the session.
+    mem::replace(&mut *tcx.sess.lint_store.borrow_mut(), cx.lints);
 }
 
 pub fn check_ast_crate(sess: &Session, krate: &ast::Crate) {
@@ -1289,11 +1266,12 @@ pub fn check_ast_crate(sess: &Session, krate: &ast::Crate) {
 
     // Visit the whole crate.
     cx.with_lint_attrs(&krate.attrs, |cx| {
-        cx.visit_id(ast::CRATE_NODE_ID);
-        cx.visit_ids(|v| {
-            v.visited_outermost = true;
-            ast_visit::walk_crate(v, krate);
-        });
+        // Lints may be assigned to the whole crate.
+        if let Some(lints) = cx.sess.lints.borrow_mut().remove(&ast::CRATE_NODE_ID) {
+            for (lint_id, span, msg) in lints {
+                cx.span_lint(lint_id.lint, span, &msg[..])
+            }
+        }
 
         // since the root module isn't visited as an item (because it isn't an
         // item), warn for it here.
@@ -1311,9 +1289,7 @@ pub fn check_ast_crate(sess: &Session, krate: &ast::Crate) {
     // in the iteration code.
     for (_, v) in sess.lints.borrow().iter() {
         for &(lint, span, ref msg) in v {
-            sess.span_bug(span,
-                          &format!("unprocessed lint {}: {}",
-                                   lint.as_str(), *msg))
+            span_bug!(span, "unprocessed lint {}: {}", lint.as_str(), *msg)
         }
     }
 }
index 133d0163a8c9b9d568d716359152d7e3af188ac0..28994e1a7c48d50d82f6c807784a0b3bee4c5989 100644 (file)
@@ -34,10 +34,10 @@ pub use self::LintSource::*;
 use std::hash;
 use std::ascii::AsciiExt;
 use syntax::codemap::Span;
-use rustc_front::intravisit::FnKind;
+use hir::intravisit::FnKind;
 use syntax::visit as ast_visit;
 use syntax::ast;
-use rustc_front::hir;
+use hir;
 
 pub use lint::context::{LateContext, EarlyContext, LintContext, LintStore,
                         raw_emit_lint, check_crate, check_ast_crate, gather_attrs,
index ed764ebd9f95dd4e27b579342fc0f4c66e77a486..76dca1bb5b64941b61d989de99b18515c84c47dd 100644 (file)
@@ -44,3 +44,18 @@ macro_rules! enum_from_u32 {
         }
     }
 }
+
+#[macro_export]
+macro_rules! bug {
+    () => ( bug!("impossible case reached") );
+    ($($message:tt)*) => ({
+        $crate::session::bug_fmt(file!(), line!(), format_args!($($message)*))
+    })
+}
+
+#[macro_export]
+macro_rules! span_bug {
+    ($span:expr, $($message:tt)*) => ({
+        $crate::session::span_bug_fmt(file!(), line!(), $span, format_args!($($message)*))
+    })
+}
index 8b1bdc31beb01bf916aed85ab623a9907edd9cc5..2a27732b9532cdb83014ab3d6faac6e44ce859cf 100644 (file)
  * Almost certainly this could (and should) be refactored out of existence.
  */
 
-use middle::def::Def;
-use middle::ty::{self, Ty};
+use hir::def::Def;
+use ty::{Ty, TyCtxt};
 
 use syntax::codemap::Span;
-use rustc_front::hir as ast;
+use hir as ast;
 
-pub fn prohibit_type_params(tcx: &ty::ctxt, segments: &[ast::PathSegment]) {
+pub fn prohibit_type_params(tcx: &TyCtxt, segments: &[ast::PathSegment]) {
     for segment in segments {
         for typ in segment.parameters.types() {
             span_err!(tcx.sess, typ.span, E0109,
@@ -39,13 +39,13 @@ pub fn prohibit_type_params(tcx: &ty::ctxt, segments: &[ast::PathSegment]) {
     }
 }
 
-pub fn prohibit_projection(tcx: &ty::ctxt, span: Span)
+pub fn prohibit_projection(tcx: &TyCtxt, span: Span)
 {
     span_err!(tcx.sess, span, E0229,
               "associated type bindings are not allowed here");
 }
 
-pub fn prim_ty_to_ty<'tcx>(tcx: &ty::ctxt<'tcx>,
+pub fn prim_ty_to_ty<'tcx>(tcx: &TyCtxt<'tcx>,
                            segments: &[ast::PathSegment],
                            nty: ast::PrimTy)
                            -> Ty<'tcx> {
@@ -62,13 +62,12 @@ pub fn prim_ty_to_ty<'tcx>(tcx: &ty::ctxt<'tcx>,
 
 /// If a type in the AST is a primitive type, return the ty::Ty corresponding
 /// to it.
-pub fn ast_ty_to_prim_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ast_ty: &ast::Ty)
+pub fn ast_ty_to_prim_ty<'tcx>(tcx: &TyCtxt<'tcx>, ast_ty: &ast::Ty)
                                -> Option<Ty<'tcx>> {
     if let ast::TyPath(None, ref path) = ast_ty.node {
         let def = match tcx.def_map.borrow().get(&ast_ty.id) {
             None => {
-                tcx.sess.span_bug(ast_ty.span,
-                                  &format!("unbound path {:?}", path))
+                span_bug!(ast_ty.span, "unbound path {:?}", path)
             }
             Some(d) => d.full_def()
         };
diff --git a/src/librustc/middle/cfg/construct.rs b/src/librustc/middle/cfg/construct.rs
deleted file mode 100644 (file)
index 701a459..0000000
+++ /dev/null
@@ -1,611 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc_data_structures::graph;
-use middle::cfg::*;
-use middle::def::Def;
-use middle::pat_util;
-use middle::ty;
-use syntax::ast;
-use syntax::ptr::P;
-
-use rustc_front::hir::{self, PatKind};
-
-struct CFGBuilder<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    graph: CFGGraph,
-    fn_exit: CFGIndex,
-    loop_scopes: Vec<LoopScope>,
-}
-
-#[derive(Copy, Clone)]
-struct LoopScope {
-    loop_id: ast::NodeId,     // id of loop/while node
-    continue_index: CFGIndex, // where to go on a `loop`
-    break_index: CFGIndex,    // where to go on a `break
-}
-
-pub fn construct(tcx: &ty::ctxt,
-                 blk: &hir::Block) -> CFG {
-    let mut graph = graph::Graph::new();
-    let entry = graph.add_node(CFGNodeData::Entry);
-
-    // `fn_exit` is target of return exprs, which lies somewhere
-    // outside input `blk`. (Distinguishing `fn_exit` and `block_exit`
-    // also resolves chicken-and-egg problem that arises if you try to
-    // have return exprs jump to `block_exit` during construction.)
-    let fn_exit = graph.add_node(CFGNodeData::Exit);
-    let block_exit;
-
-    let mut cfg_builder = CFGBuilder {
-        graph: graph,
-        fn_exit: fn_exit,
-        tcx: tcx,
-        loop_scopes: Vec::new()
-    };
-    block_exit = cfg_builder.block(blk, entry);
-    cfg_builder.add_contained_edge(block_exit, fn_exit);
-    let CFGBuilder {graph, ..} = cfg_builder;
-    CFG {graph: graph,
-         entry: entry,
-         exit: fn_exit}
-}
-
-impl<'a, 'tcx> CFGBuilder<'a, 'tcx> {
-    fn block(&mut self, blk: &hir::Block, pred: CFGIndex) -> CFGIndex {
-        let mut stmts_exit = pred;
-        for stmt in &blk.stmts {
-            stmts_exit = self.stmt(stmt, stmts_exit);
-        }
-
-        let expr_exit = self.opt_expr(&blk.expr, stmts_exit);
-
-        self.add_ast_node(blk.id, &[expr_exit])
-    }
-
-    fn stmt(&mut self, stmt: &hir::Stmt, pred: CFGIndex) -> CFGIndex {
-        match stmt.node {
-            hir::StmtDecl(ref decl, id) => {
-                let exit = self.decl(&decl, pred);
-                self.add_ast_node(id, &[exit])
-            }
-
-            hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) => {
-                let exit = self.expr(&expr, pred);
-                self.add_ast_node(id, &[exit])
-            }
-        }
-    }
-
-    fn decl(&mut self, decl: &hir::Decl, pred: CFGIndex) -> CFGIndex {
-        match decl.node {
-            hir::DeclLocal(ref local) => {
-                let init_exit = self.opt_expr(&local.init, pred);
-                self.pat(&local.pat, init_exit)
-            }
-
-            hir::DeclItem(_) => {
-                pred
-            }
-        }
-    }
-
-    fn pat(&mut self, pat: &hir::Pat, pred: CFGIndex) -> CFGIndex {
-        match pat.node {
-            PatKind::Ident(_, _, None) |
-            PatKind::TupleStruct(_, None) |
-            PatKind::Path(..) |
-            PatKind::QPath(..) |
-            PatKind::Lit(..) |
-            PatKind::Range(..) |
-            PatKind::Wild => {
-                self.add_ast_node(pat.id, &[pred])
-            }
-
-            PatKind::Box(ref subpat) |
-            PatKind::Ref(ref subpat, _) |
-            PatKind::Ident(_, _, Some(ref subpat)) => {
-                let subpat_exit = self.pat(&subpat, pred);
-                self.add_ast_node(pat.id, &[subpat_exit])
-            }
-
-            PatKind::TupleStruct(_, Some(ref subpats)) |
-            PatKind::Tup(ref subpats) => {
-                let pats_exit = self.pats_all(subpats.iter(), pred);
-                self.add_ast_node(pat.id, &[pats_exit])
-            }
-
-            PatKind::Struct(_, ref subpats, _) => {
-                let pats_exit =
-                    self.pats_all(subpats.iter().map(|f| &f.node.pat), pred);
-                self.add_ast_node(pat.id, &[pats_exit])
-            }
-
-            PatKind::Vec(ref pre, ref vec, ref post) => {
-                let pre_exit = self.pats_all(pre.iter(), pred);
-                let vec_exit = self.pats_all(vec.iter(), pre_exit);
-                let post_exit = self.pats_all(post.iter(), vec_exit);
-                self.add_ast_node(pat.id, &[post_exit])
-            }
-        }
-    }
-
-    fn pats_all<'b, I: Iterator<Item=&'b P<hir::Pat>>>(&mut self,
-                                          pats: I,
-                                          pred: CFGIndex) -> CFGIndex {
-        //! Handles case where all of the patterns must match.
-        pats.fold(pred, |pred, pat| self.pat(&pat, pred))
-    }
-
-    fn expr(&mut self, expr: &hir::Expr, pred: CFGIndex) -> CFGIndex {
-        match expr.node {
-            hir::ExprBlock(ref blk) => {
-                let blk_exit = self.block(&blk, pred);
-                self.add_ast_node(expr.id, &[blk_exit])
-            }
-
-            hir::ExprIf(ref cond, ref then, None) => {
-                //
-                //     [pred]
-                //       |
-                //       v 1
-                //     [cond]
-                //       |
-                //      / \
-                //     /   \
-                //    v 2   *
-                //  [then]  |
-                //    |     |
-                //    v 3   v 4
-                //   [..expr..]
-                //
-                let cond_exit = self.expr(&cond, pred);                // 1
-                let then_exit = self.block(&then, cond_exit);          // 2
-                self.add_ast_node(expr.id, &[cond_exit, then_exit])      // 3,4
-            }
-
-            hir::ExprIf(ref cond, ref then, Some(ref otherwise)) => {
-                //
-                //     [pred]
-                //       |
-                //       v 1
-                //     [cond]
-                //       |
-                //      / \
-                //     /   \
-                //    v 2   v 3
-                //  [then][otherwise]
-                //    |     |
-                //    v 4   v 5
-                //   [..expr..]
-                //
-                let cond_exit = self.expr(&cond, pred);                // 1
-                let then_exit = self.block(&then, cond_exit);          // 2
-                let else_exit = self.expr(&otherwise, cond_exit);      // 3
-                self.add_ast_node(expr.id, &[then_exit, else_exit])      // 4, 5
-            }
-
-            hir::ExprWhile(ref cond, ref body, _) => {
-                //
-                //         [pred]
-                //           |
-                //           v 1
-                //       [loopback] <--+ 5
-                //           |         |
-                //           v 2       |
-                //   +-----[cond]      |
-                //   |       |         |
-                //   |       v 4       |
-                //   |     [body] -----+
-                //   v 3
-                // [expr]
-                //
-                // Note that `break` and `continue` statements
-                // may cause additional edges.
-
-                // Is the condition considered part of the loop?
-                let loopback = self.add_dummy_node(&[pred]);              // 1
-                let cond_exit = self.expr(&cond, loopback);             // 2
-                let expr_exit = self.add_ast_node(expr.id, &[cond_exit]); // 3
-                self.loop_scopes.push(LoopScope {
-                    loop_id: expr.id,
-                    continue_index: loopback,
-                    break_index: expr_exit
-                });
-                let body_exit = self.block(&body, cond_exit);          // 4
-                self.add_contained_edge(body_exit, loopback);            // 5
-                self.loop_scopes.pop();
-                expr_exit
-            }
-
-            hir::ExprLoop(ref body, _) => {
-                //
-                //     [pred]
-                //       |
-                //       v 1
-                //   [loopback] <---+
-                //       |      4   |
-                //       v 3        |
-                //     [body] ------+
-                //
-                //     [expr] 2
-                //
-                // Note that `break` and `loop` statements
-                // may cause additional edges.
-
-                let loopback = self.add_dummy_node(&[pred]);              // 1
-                let expr_exit = self.add_ast_node(expr.id, &[]);          // 2
-                self.loop_scopes.push(LoopScope {
-                    loop_id: expr.id,
-                    continue_index: loopback,
-                    break_index: expr_exit,
-                });
-                let body_exit = self.block(&body, loopback);           // 3
-                self.add_contained_edge(body_exit, loopback);            // 4
-                self.loop_scopes.pop();
-                expr_exit
-            }
-
-            hir::ExprMatch(ref discr, ref arms, _) => {
-                self.match_(expr.id, &discr, &arms, pred)
-            }
-
-            hir::ExprBinary(op, ref l, ref r) if ::rustc_front::util::lazy_binop(op.node) => {
-                //
-                //     [pred]
-                //       |
-                //       v 1
-                //      [l]
-                //       |
-                //      / \
-                //     /   \
-                //    v 2  *
-                //   [r]   |
-                //    |    |
-                //    v 3  v 4
-                //   [..exit..]
-                //
-                let l_exit = self.expr(&l, pred);                      // 1
-                let r_exit = self.expr(&r, l_exit);                    // 2
-                self.add_ast_node(expr.id, &[l_exit, r_exit])            // 3,4
-            }
-
-            hir::ExprRet(ref v) => {
-                let v_exit = self.opt_expr(v, pred);
-                let b = self.add_ast_node(expr.id, &[v_exit]);
-                self.add_returning_edge(expr, b);
-                self.add_unreachable_node()
-            }
-
-            hir::ExprBreak(label) => {
-                let loop_scope = self.find_scope(expr, label.map(|l| l.node.name));
-                let b = self.add_ast_node(expr.id, &[pred]);
-                self.add_exiting_edge(expr, b,
-                                      loop_scope, loop_scope.break_index);
-                self.add_unreachable_node()
-            }
-
-            hir::ExprAgain(label) => {
-                let loop_scope = self.find_scope(expr, label.map(|l| l.node.name));
-                let a = self.add_ast_node(expr.id, &[pred]);
-                self.add_exiting_edge(expr, a,
-                                      loop_scope, loop_scope.continue_index);
-                self.add_unreachable_node()
-            }
-
-            hir::ExprVec(ref elems) => {
-                self.straightline(expr, pred, elems.iter().map(|e| &**e))
-            }
-
-            hir::ExprCall(ref func, ref args) => {
-                self.call(expr, pred, &func, args.iter().map(|e| &**e))
-            }
-
-            hir::ExprMethodCall(_, _, ref args) => {
-                self.call(expr, pred, &args[0], args[1..].iter().map(|e| &**e))
-            }
-
-            hir::ExprIndex(ref l, ref r) |
-            hir::ExprBinary(_, ref l, ref r) if self.tcx.is_method_call(expr.id) => {
-                self.call(expr, pred, &l, Some(&**r).into_iter())
-            }
-
-            hir::ExprRange(ref start, ref end) => {
-                let fields = start.as_ref().map(|e| &**e).into_iter()
-                    .chain(end.as_ref().map(|e| &**e));
-                self.straightline(expr, pred, fields)
-            }
-
-            hir::ExprUnary(_, ref e) if self.tcx.is_method_call(expr.id) => {
-                self.call(expr, pred, &e, None::<hir::Expr>.iter())
-            }
-
-            hir::ExprTup(ref exprs) => {
-                self.straightline(expr, pred, exprs.iter().map(|e| &**e))
-            }
-
-            hir::ExprStruct(_, ref fields, ref base) => {
-                let field_cfg = self.straightline(expr, pred, fields.iter().map(|f| &*f.expr));
-                self.opt_expr(base, field_cfg)
-            }
-
-            hir::ExprRepeat(ref elem, ref count) => {
-                self.straightline(expr, pred, [elem, count].iter().map(|&e| &**e))
-            }
-
-            hir::ExprAssign(ref l, ref r) |
-            hir::ExprAssignOp(_, ref l, ref r) => {
-                self.straightline(expr, pred, [r, l].iter().map(|&e| &**e))
-            }
-
-            hir::ExprIndex(ref l, ref r) |
-            hir::ExprBinary(_, ref l, ref r) => { // NB: && and || handled earlier
-                self.straightline(expr, pred, [l, r].iter().map(|&e| &**e))
-            }
-
-            hir::ExprBox(ref e) |
-            hir::ExprAddrOf(_, ref e) |
-            hir::ExprCast(ref e, _) |
-            hir::ExprType(ref e, _) |
-            hir::ExprUnary(_, ref e) |
-            hir::ExprField(ref e, _) |
-            hir::ExprTupField(ref e, _) => {
-                self.straightline(expr, pred, Some(&**e).into_iter())
-            }
-
-            hir::ExprInlineAsm(ref inline_asm) => {
-                let inputs = inline_asm.inputs.iter();
-                let outputs = inline_asm.outputs.iter();
-                let post_inputs = self.exprs(inputs.map(|a| {
-                    debug!("cfg::construct InlineAsm id:{} input:{:?}", expr.id, a);
-                    let &(_, ref expr) = a;
-                    &**expr
-                }), pred);
-                let post_outputs = self.exprs(outputs.map(|a| {
-                    debug!("cfg::construct InlineAsm id:{} output:{:?}", expr.id, a);
-                    &*a.expr
-                }), post_inputs);
-                self.add_ast_node(expr.id, &[post_outputs])
-            }
-
-            hir::ExprClosure(..) |
-            hir::ExprLit(..) |
-            hir::ExprPath(..) => {
-                self.straightline(expr, pred, None::<hir::Expr>.iter())
-            }
-        }
-    }
-
-    fn call<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self,
-            call_expr: &hir::Expr,
-            pred: CFGIndex,
-            func_or_rcvr: &hir::Expr,
-            args: I) -> CFGIndex {
-        let method_call = ty::MethodCall::expr(call_expr.id);
-        let fn_ty = match self.tcx.tables.borrow().method_map.get(&method_call) {
-            Some(method) => method.ty,
-            None => self.tcx.expr_ty_adjusted(func_or_rcvr)
-        };
-
-        let func_or_rcvr_exit = self.expr(func_or_rcvr, pred);
-        let ret = self.straightline(call_expr, func_or_rcvr_exit, args);
-        if fn_ty.fn_ret().diverges() {
-            self.add_unreachable_node()
-        } else {
-            ret
-        }
-    }
-
-    fn exprs<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self,
-                                             exprs: I,
-                                             pred: CFGIndex) -> CFGIndex {
-        //! Constructs graph for `exprs` evaluated in order
-        exprs.fold(pred, |p, e| self.expr(e, p))
-    }
-
-    fn opt_expr(&mut self,
-                opt_expr: &Option<P<hir::Expr>>,
-                pred: CFGIndex) -> CFGIndex {
-        //! Constructs graph for `opt_expr` evaluated, if Some
-        opt_expr.iter().fold(pred, |p, e| self.expr(&e, p))
-    }
-
-    fn straightline<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self,
-                    expr: &hir::Expr,
-                    pred: CFGIndex,
-                    subexprs: I) -> CFGIndex {
-        //! Handles case of an expression that evaluates `subexprs` in order
-
-        let subexprs_exit = self.exprs(subexprs, pred);
-        self.add_ast_node(expr.id, &[subexprs_exit])
-    }
-
-    fn match_(&mut self, id: ast::NodeId, discr: &hir::Expr,
-              arms: &[hir::Arm], pred: CFGIndex) -> CFGIndex {
-        // The CFG for match expression is quite complex, so no ASCII
-        // art for it (yet).
-        //
-        // The CFG generated below matches roughly what trans puts
-        // out. Each pattern and guard is visited in parallel, with
-        // arms containing multiple patterns generating multiple nodes
-        // for the same guard expression. The guard expressions chain
-        // into each other from top to bottom, with a specific
-        // exception to allow some additional valid programs
-        // (explained below). Trans differs slightly in that the
-        // pattern matching may continue after a guard but the visible
-        // behaviour should be the same.
-        //
-        // What is going on is explained in further comments.
-
-        // Visit the discriminant expression
-        let discr_exit = self.expr(discr, pred);
-
-        // Add a node for the exit of the match expression as a whole.
-        let expr_exit = self.add_ast_node(id, &[]);
-
-        // Keep track of the previous guard expressions
-        let mut prev_guards = Vec::new();
-        // Track if the previous pattern contained bindings or wildcards
-        let mut prev_has_bindings = false;
-
-        for arm in arms {
-            // Add an exit node for when we've visited all the
-            // patterns and the guard (if there is one) in the arm.
-            let arm_exit = self.add_dummy_node(&[]);
-
-            for pat in &arm.pats {
-                // Visit the pattern, coming from the discriminant exit
-                let mut pat_exit = self.pat(&pat, discr_exit);
-
-                // If there is a guard expression, handle it here
-                if let Some(ref guard) = arm.guard {
-                    // Add a dummy node for the previous guard
-                    // expression to target
-                    let guard_start = self.add_dummy_node(&[pat_exit]);
-                    // Visit the guard expression
-                    let guard_exit = self.expr(&guard, guard_start);
-
-                    let this_has_bindings = pat_util::pat_contains_bindings_or_wild(
-                        &self.tcx.def_map.borrow(), &pat);
-
-                    // If both this pattern and the previous pattern
-                    // were free of bindings, they must consist only
-                    // of "constant" patterns. Note we cannot match an
-                    // all-constant pattern, fail the guard, and then
-                    // match *another* all-constant pattern. This is
-                    // because if the previous pattern matches, then
-                    // we *cannot* match this one, unless all the
-                    // constants are the same (which is rejected by
-                    // `check_match`).
-                    //
-                    // We can use this to be smarter about the flow
-                    // along guards. If the previous pattern matched,
-                    // then we know we will not visit the guard in
-                    // this one (whether or not the guard succeeded),
-                    // if the previous pattern failed, then we know
-                    // the guard for that pattern will not have been
-                    // visited. Thus, it is not possible to visit both
-                    // the previous guard and the current one when
-                    // both patterns consist only of constant
-                    // sub-patterns.
-                    //
-                    // However, if the above does not hold, then all
-                    // previous guards need to be wired to visit the
-                    // current guard pattern.
-                    if prev_has_bindings || this_has_bindings {
-                        while let Some(prev) = prev_guards.pop() {
-                            self.add_contained_edge(prev, guard_start);
-                        }
-                    }
-
-                    prev_has_bindings = this_has_bindings;
-
-                    // Push the guard onto the list of previous guards
-                    prev_guards.push(guard_exit);
-
-                    // Update the exit node for the pattern
-                    pat_exit = guard_exit;
-                }
-
-                // Add an edge from the exit of this pattern to the
-                // exit of the arm
-                self.add_contained_edge(pat_exit, arm_exit);
-            }
-
-            // Visit the body of this arm
-            let body_exit = self.expr(&arm.body, arm_exit);
-
-            // Link the body to the exit of the expression
-            self.add_contained_edge(body_exit, expr_exit);
-        }
-
-        expr_exit
-    }
-
-    fn add_dummy_node(&mut self, preds: &[CFGIndex]) -> CFGIndex {
-        self.add_node(CFGNodeData::Dummy, preds)
-    }
-
-    fn add_ast_node(&mut self, id: ast::NodeId, preds: &[CFGIndex]) -> CFGIndex {
-        assert!(id != ast::DUMMY_NODE_ID);
-        self.add_node(CFGNodeData::AST(id), preds)
-    }
-
-    fn add_unreachable_node(&mut self) -> CFGIndex {
-        self.add_node(CFGNodeData::Unreachable, &[])
-    }
-
-    fn add_node(&mut self, data: CFGNodeData, preds: &[CFGIndex]) -> CFGIndex {
-        let node = self.graph.add_node(data);
-        for &pred in preds {
-            self.add_contained_edge(pred, node);
-        }
-        node
-    }
-
-    fn add_contained_edge(&mut self,
-                          source: CFGIndex,
-                          target: CFGIndex) {
-        let data = CFGEdgeData {exiting_scopes: vec!() };
-        self.graph.add_edge(source, target, data);
-    }
-
-    fn add_exiting_edge(&mut self,
-                        from_expr: &hir::Expr,
-                        from_index: CFGIndex,
-                        to_loop: LoopScope,
-                        to_index: CFGIndex) {
-        let mut data = CFGEdgeData {exiting_scopes: vec!() };
-        let mut scope = self.tcx.region_maps.node_extent(from_expr.id);
-        let target_scope = self.tcx.region_maps.node_extent(to_loop.loop_id);
-        while scope != target_scope {
-            data.exiting_scopes.push(scope.node_id(&self.tcx.region_maps));
-            scope = self.tcx.region_maps.encl_scope(scope);
-        }
-        self.graph.add_edge(from_index, to_index, data);
-    }
-
-    fn add_returning_edge(&mut self,
-                          _from_expr: &hir::Expr,
-                          from_index: CFGIndex) {
-        let mut data = CFGEdgeData {
-            exiting_scopes: vec!(),
-        };
-        for &LoopScope { loop_id: id, .. } in self.loop_scopes.iter().rev() {
-            data.exiting_scopes.push(id);
-        }
-        self.graph.add_edge(from_index, self.fn_exit, data);
-    }
-
-    fn find_scope(&self,
-                  expr: &hir::Expr,
-                  label: Option<ast::Name>) -> LoopScope {
-        if label.is_none() {
-            return *self.loop_scopes.last().unwrap();
-        }
-
-        match self.tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def()) {
-            Some(Def::Label(loop_id)) => {
-                for l in &self.loop_scopes {
-                    if l.loop_id == loop_id {
-                        return *l;
-                    }
-                }
-                self.tcx.sess.span_bug(expr.span,
-                    &format!("no loop scope for id {}", loop_id));
-            }
-
-            r => {
-                self.tcx.sess.span_bug(expr.span,
-                    &format!("bad entry `{:?}` in def_map for label", r));
-            }
-        }
-    }
-}
diff --git a/src/librustc/middle/cfg/graphviz.rs b/src/librustc/middle/cfg/graphviz.rs
deleted file mode 100644 (file)
index e807092..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/// This module provides linkage between rustc::middle::graph and
-/// libgraphviz traits.
-
-// For clarity, rename the graphviz crate locally to dot.
-use graphviz as dot;
-use graphviz::IntoCow;
-
-use syntax::ast;
-
-use front::map as ast_map;
-use middle::cfg;
-
-pub type Node<'a> = (cfg::CFGIndex, &'a cfg::CFGNode);
-pub type Edge<'a> = &'a cfg::CFGEdge;
-
-pub struct LabelledCFG<'a, 'ast: 'a> {
-    pub ast_map: &'a ast_map::Map<'ast>,
-    pub cfg: &'a cfg::CFG,
-    pub name: String,
-    /// `labelled_edges` controls whether we emit labels on the edges
-    pub labelled_edges: bool,
-}
-
-fn replace_newline_with_backslash_l(s: String) -> String {
-    // Replacing newlines with \\l causes each line to be left-aligned,
-    // improving presentation of (long) pretty-printed expressions.
-    if s.contains("\n") {
-        let mut s = s.replace("\n", "\\l");
-        // Apparently left-alignment applies to the line that precedes
-        // \l, not the line that follows; so, add \l at end of string
-        // if not already present, ensuring last line gets left-aligned
-        // as well.
-        let mut last_two: Vec<_> =
-            s.chars().rev().take(2).collect();
-        last_two.reverse();
-        if last_two != ['\\', 'l'] {
-            s.push_str("\\l");
-        }
-        s
-    } else {
-        s
-    }
-}
-
-impl<'a, 'ast> dot::Labeller<'a, Node<'a>, Edge<'a>> for LabelledCFG<'a, 'ast> {
-    fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new(&self.name[..]).unwrap() }
-
-    fn node_id(&'a self, &(i,_): &Node<'a>) -> dot::Id<'a> {
-        dot::Id::new(format!("N{}", i.node_id())).unwrap()
-    }
-
-    fn node_label(&'a self, &(i, n): &Node<'a>) -> dot::LabelText<'a> {
-        if i == self.cfg.entry {
-            dot::LabelText::LabelStr("entry".into_cow())
-        } else if i == self.cfg.exit {
-            dot::LabelText::LabelStr("exit".into_cow())
-        } else if n.data.id() == ast::DUMMY_NODE_ID {
-            dot::LabelText::LabelStr("(dummy_node)".into_cow())
-        } else {
-            let s = self.ast_map.node_to_string(n.data.id());
-            // left-aligns the lines
-            let s = replace_newline_with_backslash_l(s);
-            dot::LabelText::EscStr(s.into_cow())
-        }
-    }
-
-    fn edge_label(&self, e: &Edge<'a>) -> dot::LabelText<'a> {
-        let mut label = String::new();
-        if !self.labelled_edges {
-            return dot::LabelText::EscStr(label.into_cow());
-        }
-        let mut put_one = false;
-        for (i, &node_id) in e.data.exiting_scopes.iter().enumerate() {
-            if put_one {
-                label.push_str(",\\l");
-            } else {
-                put_one = true;
-            }
-            let s = self.ast_map.node_to_string(node_id);
-            // left-aligns the lines
-            let s = replace_newline_with_backslash_l(s);
-            label.push_str(&format!("exiting scope_{} {}",
-                                   i,
-                                   &s[..]));
-        }
-        dot::LabelText::EscStr(label.into_cow())
-    }
-}
-
-impl<'a> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for &'a cfg::CFG {
-    fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> {
-        let mut v = Vec::new();
-        self.graph.each_node(|i, nd| { v.push((i, nd)); true });
-        v.into_cow()
-    }
-    fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> {
-        self.graph.all_edges().iter().collect()
-    }
-    fn source(&'a self, edge: &Edge<'a>) -> Node<'a> {
-        let i = edge.source();
-        (i, self.graph.node(i))
-    }
-    fn target(&'a self, edge: &Edge<'a>) -> Node<'a> {
-        let i = edge.target();
-        (i, self.graph.node(i))
-    }
-}
-
-impl<'a, 'ast> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for LabelledCFG<'a, 'ast>
-{
-    fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> { self.cfg.nodes() }
-    fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> { self.cfg.edges() }
-    fn source(&'a self, edge: &Edge<'a>) -> Node<'a> { self.cfg.source(edge) }
-    fn target(&'a self, edge: &Edge<'a>) -> Node<'a> { self.cfg.target(edge) }
-}
diff --git a/src/librustc/middle/cfg/mod.rs b/src/librustc/middle/cfg/mod.rs
deleted file mode 100644 (file)
index ac84d3d..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Module that constructs a control-flow graph representing an item.
-//! Uses `Graph` as the underlying representation.
-
-use rustc_data_structures::graph;
-use middle::ty;
-use syntax::ast;
-use rustc_front::hir;
-
-mod construct;
-pub mod graphviz;
-
-pub struct CFG {
-    pub graph: CFGGraph,
-    pub entry: CFGIndex,
-    pub exit: CFGIndex,
-}
-
-#[derive(Copy, Clone, Debug, PartialEq)]
-pub enum CFGNodeData {
-    AST(ast::NodeId),
-    Entry,
-    Exit,
-    Dummy,
-    Unreachable,
-}
-
-impl CFGNodeData {
-    pub fn id(&self) -> ast::NodeId {
-        if let CFGNodeData::AST(id) = *self {
-            id
-        } else {
-            ast::DUMMY_NODE_ID
-        }
-    }
-}
-
-#[derive(Debug)]
-pub struct CFGEdgeData {
-    pub exiting_scopes: Vec<ast::NodeId>
-}
-
-pub type CFGIndex = graph::NodeIndex;
-
-pub type CFGGraph = graph::Graph<CFGNodeData, CFGEdgeData>;
-
-pub type CFGNode = graph::Node<CFGNodeData>;
-
-pub type CFGEdge = graph::Edge<CFGEdgeData>;
-
-impl CFG {
-    pub fn new(tcx: &ty::ctxt,
-               blk: &hir::Block) -> CFG {
-        construct::construct(tcx, blk)
-    }
-
-    pub fn node_is_reachable(&self, id: ast::NodeId) -> bool {
-        self.graph.depth_traverse(self.entry)
-                  .any(|idx| self.graph.node_data(idx).id() == id)
-    }
-}
diff --git a/src/librustc/middle/check_match.rs b/src/librustc/middle/check_match.rs
deleted file mode 100644 (file)
index 7f0e9ca..0000000
+++ /dev/null
@@ -1,1202 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub use self::Constructor::*;
-use self::Usefulness::*;
-use self::WitnessPreference::*;
-
-use dep_graph::DepNode;
-use middle::const_eval::{compare_const_vals, ConstVal};
-use middle::const_eval::{eval_const_expr, eval_const_expr_partial};
-use middle::const_eval::{const_expr_to_pat, lookup_const_by_id};
-use middle::const_eval::EvalHint::ExprTypeChecked;
-use middle::def::*;
-use middle::def_id::{DefId};
-use middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor};
-use middle::expr_use_visitor::{LoanCause, MutateMode};
-use middle::expr_use_visitor as euv;
-use middle::infer;
-use middle::mem_categorization::{cmt};
-use middle::pat_util::*;
-use middle::ty::*;
-use middle::ty;
-use std::cmp::Ordering;
-use std::fmt;
-use std::iter::{FromIterator, IntoIterator, repeat};
-
-use rustc_front::hir;
-use rustc_front::hir::{Pat, PatKind};
-use rustc_front::intravisit::{self, Visitor, FnKind};
-use rustc_front::util as front_util;
-use rustc_back::slice;
-
-use syntax::ast::{self, DUMMY_NODE_ID, NodeId};
-use syntax::ast_util;
-use syntax::codemap::{Span, Spanned, DUMMY_SP};
-use rustc_front::fold::{Folder, noop_fold_pat};
-use rustc_front::print::pprust::pat_to_string;
-use syntax::ptr::P;
-use util::nodemap::FnvHashMap;
-
-pub const DUMMY_WILD_PAT: &'static Pat = &Pat {
-    id: DUMMY_NODE_ID,
-    node: PatKind::Wild,
-    span: DUMMY_SP
-};
-
-struct Matrix<'a>(Vec<Vec<&'a Pat>>);
-
-/// Pretty-printer for matrices of patterns, example:
-/// ++++++++++++++++++++++++++
-/// + _     + []             +
-/// ++++++++++++++++++++++++++
-/// + true  + [First]        +
-/// ++++++++++++++++++++++++++
-/// + true  + [Second(true)] +
-/// ++++++++++++++++++++++++++
-/// + false + [_]            +
-/// ++++++++++++++++++++++++++
-/// + _     + [_, _, ..tail] +
-/// ++++++++++++++++++++++++++
-impl<'a> fmt::Debug for Matrix<'a> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(f, "\n"));
-
-        let &Matrix(ref m) = self;
-        let pretty_printed_matrix: Vec<Vec<String>> = m.iter().map(|row| {
-            row.iter()
-               .map(|&pat| pat_to_string(&pat))
-               .collect::<Vec<String>>()
-        }).collect();
-
-        let column_count = m.iter().map(|row| row.len()).max().unwrap_or(0);
-        assert!(m.iter().all(|row| row.len() == column_count));
-        let column_widths: Vec<usize> = (0..column_count).map(|col| {
-            pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0)
-        }).collect();
-
-        let total_width = column_widths.iter().cloned().sum::<usize>() + column_count * 3 + 1;
-        let br = repeat('+').take(total_width).collect::<String>();
-        try!(write!(f, "{}\n", br));
-        for row in pretty_printed_matrix {
-            try!(write!(f, "+"));
-            for (column, pat_str) in row.into_iter().enumerate() {
-                try!(write!(f, " "));
-                try!(write!(f, "{:1$}", pat_str, column_widths[column]));
-                try!(write!(f, " +"));
-            }
-            try!(write!(f, "\n"));
-            try!(write!(f, "{}\n", br));
-        }
-        Ok(())
-    }
-}
-
-impl<'a> FromIterator<Vec<&'a Pat>> for Matrix<'a> {
-    fn from_iter<T: IntoIterator<Item=Vec<&'a Pat>>>(iter: T) -> Matrix<'a> {
-        Matrix(iter.into_iter().collect())
-    }
-}
-
-//NOTE: appears to be the only place other then InferCtxt to contain a ParamEnv
-pub struct MatchCheckCtxt<'a, 'tcx: 'a> {
-    pub tcx: &'a ty::ctxt<'tcx>,
-    pub param_env: ParameterEnvironment<'a, 'tcx>,
-}
-
-#[derive(Clone, PartialEq)]
-pub enum Constructor {
-    /// The constructor of all patterns that don't vary by constructor,
-    /// e.g. struct patterns and fixed-length arrays.
-    Single,
-    /// Enum variants.
-    Variant(DefId),
-    /// Literal values.
-    ConstantValue(ConstVal),
-    /// Ranges of literal values (2..5).
-    ConstantRange(ConstVal, ConstVal),
-    /// Array patterns of length n.
-    Slice(usize),
-    /// Array patterns with a subslice.
-    SliceWithSubslice(usize, usize)
-}
-
-#[derive(Clone, PartialEq)]
-enum Usefulness {
-    Useful,
-    UsefulWithWitness(Vec<P<Pat>>),
-    NotUseful
-}
-
-#[derive(Copy, Clone)]
-enum WitnessPreference {
-    ConstructWitness,
-    LeaveOutWitness
-}
-
-impl<'a, 'tcx, 'v> Visitor<'v> for MatchCheckCtxt<'a, 'tcx> {
-    fn visit_expr(&mut self, ex: &hir::Expr) {
-        check_expr(self, ex);
-    }
-    fn visit_local(&mut self, l: &hir::Local) {
-        check_local(self, l);
-    }
-    fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl,
-                b: &'v hir::Block, s: Span, n: NodeId) {
-        check_fn(self, fk, fd, b, s, n);
-    }
-}
-
-pub fn check_crate(tcx: &ty::ctxt) {
-    tcx.visit_all_items_in_krate(DepNode::MatchCheck, &mut MatchCheckCtxt {
-        tcx: tcx,
-        param_env: tcx.empty_parameter_environment(),
-    });
-    tcx.sess.abort_if_errors();
-}
-
-fn check_expr(cx: &mut MatchCheckCtxt, ex: &hir::Expr) {
-    intravisit::walk_expr(cx, ex);
-    match ex.node {
-        hir::ExprMatch(ref scrut, ref arms, source) => {
-            for arm in arms {
-                // First, check legality of move bindings.
-                check_legality_of_move_bindings(cx,
-                                                arm.guard.is_some(),
-                                                &arm.pats);
-
-                // Second, if there is a guard on each arm, make sure it isn't
-                // assigning or borrowing anything mutably.
-                match arm.guard {
-                    Some(ref guard) => check_for_mutation_in_guard(cx, &guard),
-                    None => {}
-                }
-            }
-
-            let mut static_inliner = StaticInliner::new(cx.tcx, None);
-            let inlined_arms = arms.iter().map(|arm| {
-                (arm.pats.iter().map(|pat| {
-                    static_inliner.fold_pat((*pat).clone())
-                }).collect(), arm.guard.as_ref().map(|e| &**e))
-            }).collect::<Vec<(Vec<P<Pat>>, Option<&hir::Expr>)>>();
-
-            // Bail out early if inlining failed.
-            if static_inliner.failed {
-                return;
-            }
-
-            for pat in inlined_arms
-                .iter()
-                .flat_map(|&(ref pats, _)| pats) {
-                // Third, check legality of move bindings.
-                check_legality_of_bindings_in_at_patterns(cx, &pat);
-
-                // Fourth, check if there are any references to NaN that we should warn about.
-                check_for_static_nan(cx, &pat);
-
-                // Fifth, check if for any of the patterns that match an enumerated type
-                // are bindings with the same name as one of the variants of said type.
-                check_for_bindings_named_the_same_as_variants(cx, &pat);
-            }
-
-            // Fourth, check for unreachable arms.
-            check_arms(cx, &inlined_arms[..], source);
-
-            // Finally, check if the whole match expression is exhaustive.
-            // Check for empty enum, because is_useful only works on inhabited types.
-            let pat_ty = cx.tcx.node_id_to_type(scrut.id);
-            if inlined_arms.is_empty() {
-                if !pat_ty.is_empty(cx.tcx) {
-                    // We know the type is inhabited, so this must be wrong
-                    let mut err = struct_span_err!(cx.tcx.sess, ex.span, E0002,
-                                                   "non-exhaustive patterns: type {} is non-empty",
-                                                   pat_ty);
-                    span_help!(&mut err, ex.span,
-                        "Please ensure that all possible cases are being handled; \
-                         possibly adding wildcards or more match arms.");
-                    err.emit();
-                }
-                // If the type *is* empty, it's vacuously exhaustive
-                return;
-            }
-
-            let matrix: Matrix = inlined_arms
-                .iter()
-                .filter(|&&(_, guard)| guard.is_none())
-                .flat_map(|arm| &arm.0)
-                .map(|pat| vec![&**pat])
-                .collect();
-            check_exhaustive(cx, ex.span, &matrix, source);
-        },
-        _ => ()
-    }
-}
-
-fn check_for_bindings_named_the_same_as_variants(cx: &MatchCheckCtxt, pat: &Pat) {
-    front_util::walk_pat(pat, |p| {
-        match p.node {
-            PatKind::Ident(hir::BindByValue(hir::MutImmutable), ident, None) => {
-                let pat_ty = cx.tcx.pat_ty(p);
-                if let ty::TyEnum(edef, _) = pat_ty.sty {
-                    let def = cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def());
-                    if let Some(Def::Local(..)) = def {
-                        if edef.variants.iter().any(|variant|
-                            variant.name == ident.node.unhygienic_name
-                                && variant.kind() == VariantKind::Unit
-                        ) {
-                            let ty_path = cx.tcx.item_path_str(edef.did);
-                            let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170,
-                                "pattern binding `{}` is named the same as one \
-                                 of the variants of the type `{}`",
-                                ident.node, ty_path);
-                            fileline_help!(err, p.span,
-                                "if you meant to match on a variant, \
-                                 consider making the path in the pattern qualified: `{}::{}`",
-                                ty_path, ident.node);
-                            err.emit();
-                        }
-                    }
-                }
-            }
-            _ => ()
-        }
-        true
-    });
-}
-
-// Check that we do not match against a static NaN (#6804)
-fn check_for_static_nan(cx: &MatchCheckCtxt, pat: &Pat) {
-    front_util::walk_pat(pat, |p| {
-        if let PatKind::Lit(ref expr) = p.node {
-            match eval_const_expr_partial(cx.tcx, &expr, ExprTypeChecked, None) {
-                Ok(ConstVal::Float(f)) if f.is_nan() => {
-                    span_warn!(cx.tcx.sess, p.span, E0003,
-                               "unmatchable NaN in pattern, \
-                                use the is_nan method in a guard instead");
-                }
-                Ok(_) => {}
-
-                Err(err) => {
-                    let mut diag = struct_span_err!(cx.tcx.sess, err.span, E0471,
-                                                    "constant evaluation error: {}",
-                                                    err.description());
-                    if !p.span.contains(err.span) {
-                        diag.span_note(p.span, "in pattern here");
-                    }
-                    diag.emit();
-                }
-            }
-        }
-        true
-    });
-}
-
-// Check for unreachable patterns
-fn check_arms(cx: &MatchCheckCtxt,
-              arms: &[(Vec<P<Pat>>, Option<&hir::Expr>)],
-              source: hir::MatchSource) {
-    let mut seen = Matrix(vec![]);
-    let mut printed_if_let_err = false;
-    for &(ref pats, guard) in arms {
-        for pat in pats {
-            let v = vec![&**pat];
-
-            match is_useful(cx, &seen, &v[..], LeaveOutWitness) {
-                NotUseful => {
-                    match source {
-                        hir::MatchSource::IfLetDesugar { .. } => {
-                            if printed_if_let_err {
-                                // we already printed an irrefutable if-let pattern error.
-                                // We don't want two, that's just confusing.
-                            } else {
-                                // find the first arm pattern so we can use its span
-                                let &(ref first_arm_pats, _) = &arms[0];
-                                let first_pat = &first_arm_pats[0];
-                                let span = first_pat.span;
-                                span_err!(cx.tcx.sess, span, E0162, "irrefutable if-let pattern");
-                                printed_if_let_err = true;
-                            }
-                        },
-
-                        hir::MatchSource::WhileLetDesugar => {
-                            // find the first arm pattern so we can use its span
-                            let &(ref first_arm_pats, _) = &arms[0];
-                            let first_pat = &first_arm_pats[0];
-                            let span = first_pat.span;
-                            span_err!(cx.tcx.sess, span, E0165, "irrefutable while-let pattern");
-                        },
-
-                        hir::MatchSource::ForLoopDesugar => {
-                            // this is a bug, because on `match iter.next()` we cover
-                            // `Some(<head>)` and `None`. It's impossible to have an unreachable
-                            // pattern
-                            // (see libsyntax/ext/expand.rs for the full expansion of a for loop)
-                            cx.tcx.sess.span_bug(pat.span, "unreachable for-loop pattern")
-                        },
-
-                        hir::MatchSource::Normal => {
-                            span_err!(cx.tcx.sess, pat.span, E0001, "unreachable pattern")
-                        },
-                    }
-                }
-                Useful => (),
-                UsefulWithWitness(_) => unreachable!()
-            }
-            if guard.is_none() {
-                let Matrix(mut rows) = seen;
-                rows.push(v);
-                seen = Matrix(rows);
-            }
-        }
-    }
-}
-
-fn raw_pat<'a>(p: &'a Pat) -> &'a Pat {
-    match p.node {
-        PatKind::Ident(_, _, Some(ref s)) => raw_pat(&s),
-        _ => p
-    }
-}
-
-fn check_exhaustive(cx: &MatchCheckCtxt, sp: Span, matrix: &Matrix, source: hir::MatchSource) {
-    match is_useful(cx, matrix, &[DUMMY_WILD_PAT], ConstructWitness) {
-        UsefulWithWitness(pats) => {
-            let witnesses = if pats.is_empty() {
-                vec![DUMMY_WILD_PAT]
-            } else {
-                pats.iter().map(|w| &**w ).collect()
-            };
-            match source {
-                hir::MatchSource::ForLoopDesugar => {
-                    // `witnesses[0]` has the form `Some(<head>)`, peel off the `Some`
-                    let witness = match witnesses[0].node {
-                        PatKind::TupleStruct(_, Some(ref pats)) => match &pats[..] {
-                            [ref pat] => &**pat,
-                            _ => unreachable!(),
-                        },
-                        _ => unreachable!(),
-                    };
-                    span_err!(cx.tcx.sess, sp, E0297,
-                        "refutable pattern in `for` loop binding: \
-                                `{}` not covered",
-                                pat_to_string(witness));
-                },
-                _ => {
-                    let pattern_strings: Vec<_> = witnesses.iter().map(|w| {
-                        pat_to_string(w)
-                    }).collect();
-                    const LIMIT: usize = 3;
-                    let joined_patterns = match pattern_strings.len() {
-                        0 => unreachable!(),
-                        1 => format!("`{}`", pattern_strings[0]),
-                        2...LIMIT => {
-                            let (tail, head) = pattern_strings.split_last().unwrap();
-                            format!("`{}`", head.join("`, `") + "` and `" + tail)
-                        },
-                        _ => {
-                            let (head, tail) = pattern_strings.split_at(LIMIT);
-                            format!("`{}` and {} more", head.join("`, `"), tail.len())
-                        }
-                    };
-                    span_err!(cx.tcx.sess, sp, E0004,
-                        "non-exhaustive patterns: {} not covered",
-                        joined_patterns
-                    );
-                },
-            }
-        }
-        NotUseful => {
-            // This is good, wildcard pattern isn't reachable
-        },
-        _ => unreachable!()
-    }
-}
-
-fn const_val_to_expr(value: &ConstVal) -> P<hir::Expr> {
-    let node = match value {
-        &ConstVal::Bool(b) => ast::LitKind::Bool(b),
-        _ => unreachable!()
-    };
-    P(hir::Expr {
-        id: 0,
-        node: hir::ExprLit(P(Spanned { node: node, span: DUMMY_SP })),
-        span: DUMMY_SP,
-        attrs: None,
-    })
-}
-
-pub struct StaticInliner<'a, 'tcx: 'a> {
-    pub tcx: &'a ty::ctxt<'tcx>,
-    pub failed: bool,
-    pub renaming_map: Option<&'a mut FnvHashMap<(NodeId, Span), NodeId>>,
-}
-
-impl<'a, 'tcx> StaticInliner<'a, 'tcx> {
-    pub fn new<'b>(tcx: &'b ty::ctxt<'tcx>,
-                   renaming_map: Option<&'b mut FnvHashMap<(NodeId, Span), NodeId>>)
-                   -> StaticInliner<'b, 'tcx> {
-        StaticInliner {
-            tcx: tcx,
-            failed: false,
-            renaming_map: renaming_map
-        }
-    }
-}
-
-struct RenamingRecorder<'map> {
-    substituted_node_id: NodeId,
-    origin_span: Span,
-    renaming_map: &'map mut FnvHashMap<(NodeId, Span), NodeId>
-}
-
-impl<'map> ast_util::IdVisitingOperation for RenamingRecorder<'map> {
-    fn visit_id(&mut self, node_id: NodeId) {
-        let key = (node_id, self.origin_span);
-        self.renaming_map.insert(key, self.substituted_node_id);
-    }
-}
-
-impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> {
-    fn fold_pat(&mut self, pat: P<Pat>) -> P<Pat> {
-        return match pat.node {
-            PatKind::Ident(..) | PatKind::Path(..) | PatKind::QPath(..) => {
-                let def = self.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def());
-                match def {
-                    Some(Def::AssociatedConst(did)) |
-                    Some(Def::Const(did)) => match lookup_const_by_id(self.tcx, did,
-                                                                    Some(pat.id), None) {
-                        Some(const_expr) => {
-                            const_expr_to_pat(self.tcx, const_expr, pat.span).map(|new_pat| {
-
-                                if let Some(ref mut renaming_map) = self.renaming_map {
-                                    // Record any renamings we do here
-                                    record_renamings(const_expr, &pat, renaming_map);
-                                }
-
-                                new_pat
-                            })
-                        }
-                        None => {
-                            self.failed = true;
-                            span_err!(self.tcx.sess, pat.span, E0158,
-                                "statics cannot be referenced in patterns");
-                            pat
-                        }
-                    },
-                    _ => noop_fold_pat(pat, self)
-                }
-            }
-            _ => noop_fold_pat(pat, self)
-        };
-
-        fn record_renamings(const_expr: &hir::Expr,
-                            substituted_pat: &hir::Pat,
-                            renaming_map: &mut FnvHashMap<(NodeId, Span), NodeId>) {
-            let mut renaming_recorder = RenamingRecorder {
-                substituted_node_id: substituted_pat.id,
-                origin_span: substituted_pat.span,
-                renaming_map: renaming_map,
-            };
-
-            let mut id_visitor = front_util::IdVisitor::new(&mut renaming_recorder);
-
-            id_visitor.visit_expr(const_expr);
-        }
-    }
-}
-
-/// Constructs a partial witness for a pattern given a list of
-/// patterns expanded by the specialization step.
-///
-/// When a pattern P is discovered to be useful, this function is used bottom-up
-/// to reconstruct a complete witness, e.g. a pattern P' that covers a subset
-/// of values, V, where each value in that set is not covered by any previously
-/// used patterns and is covered by the pattern P'. Examples:
-///
-/// left_ty: tuple of 3 elements
-/// pats: [10, 20, _]           => (10, 20, _)
-///
-/// left_ty: struct X { a: (bool, &'static str), b: usize}
-/// pats: [(false, "foo"), 42]  => X { a: (false, "foo"), b: 42 }
-fn construct_witness<'a,'tcx>(cx: &MatchCheckCtxt<'a,'tcx>, ctor: &Constructor,
-                              pats: Vec<&Pat>, left_ty: Ty<'tcx>) -> P<Pat> {
-    let pats_len = pats.len();
-    let mut pats = pats.into_iter().map(|p| P((*p).clone()));
-    let pat = match left_ty.sty {
-        ty::TyTuple(_) => PatKind::Tup(pats.collect()),
-
-        ty::TyEnum(adt, _) | ty::TyStruct(adt, _)  => {
-            let v = adt.variant_of_ctor(ctor);
-            match v.kind() {
-                VariantKind::Struct => {
-                    let field_pats: hir::HirVec<_> = v.fields.iter()
-                        .zip(pats)
-                        .filter(|&(_, ref pat)| pat.node != PatKind::Wild)
-                        .map(|(field, pat)| Spanned {
-                            span: DUMMY_SP,
-                            node: hir::FieldPat {
-                                name: field.name,
-                                pat: pat,
-                                is_shorthand: false,
-                            }
-                        }).collect();
-                    let has_more_fields = field_pats.len() < pats_len;
-                    PatKind::Struct(def_to_path(cx.tcx, v.did), field_pats, has_more_fields)
-                }
-                VariantKind::Tuple => {
-                    PatKind::TupleStruct(def_to_path(cx.tcx, v.did), Some(pats.collect()))
-                }
-                VariantKind::Unit => {
-                    PatKind::Path(def_to_path(cx.tcx, v.did))
-                }
-            }
-        }
-
-        ty::TyRef(_, ty::TypeAndMut { ty, mutbl }) => {
-            match ty.sty {
-               ty::TyArray(_, n) => match ctor {
-                    &Single => {
-                        assert_eq!(pats_len, n);
-                        PatKind::Vec(pats.collect(), None, hir::HirVec::new())
-                    },
-                    _ => unreachable!()
-                },
-                ty::TySlice(_) => match ctor {
-                    &Slice(n) => {
-                        assert_eq!(pats_len, n);
-                        PatKind::Vec(pats.collect(), None, hir::HirVec::new())
-                    },
-                    _ => unreachable!()
-                },
-                ty::TyStr => PatKind::Wild,
-
-                _ => {
-                    assert_eq!(pats_len, 1);
-                    PatKind::Ref(pats.nth(0).unwrap(), mutbl)
-                }
-            }
-        }
-
-        ty::TyArray(_, len) => {
-            assert_eq!(pats_len, len);
-            PatKind::Vec(pats.collect(), None, hir::HirVec::new())
-        }
-
-        _ => {
-            match *ctor {
-                ConstantValue(ref v) => PatKind::Lit(const_val_to_expr(v)),
-                _ => PatKind::Wild,
-            }
-        }
-    };
-
-    P(hir::Pat {
-        id: 0,
-        node: pat,
-        span: DUMMY_SP
-    })
-}
-
-impl<'tcx, 'container> ty::AdtDefData<'tcx, 'container> {
-    fn variant_of_ctor(&self,
-                       ctor: &Constructor)
-                       -> &VariantDefData<'tcx, 'container> {
-        match ctor {
-            &Variant(vid) => self.variant_with_id(vid),
-            _ => self.struct_variant()
-        }
-    }
-}
-
-fn missing_constructors(cx: &MatchCheckCtxt, &Matrix(ref rows): &Matrix,
-                       left_ty: Ty, max_slice_length: usize) -> Vec<Constructor> {
-    let used_constructors: Vec<Constructor> = rows.iter()
-        .flat_map(|row| pat_constructors(cx, row[0], left_ty, max_slice_length))
-        .collect();
-    all_constructors(cx, left_ty, max_slice_length)
-        .into_iter()
-        .filter(|c| !used_constructors.contains(c))
-        .collect()
-}
-
-/// This determines the set of all possible constructors of a pattern matching
-/// values of type `left_ty`. For vectors, this would normally be an infinite set
-/// but is instead bounded by the maximum fixed length of slice patterns in
-/// the column of patterns being analyzed.
-fn all_constructors(_cx: &MatchCheckCtxt, left_ty: Ty,
-                    max_slice_length: usize) -> Vec<Constructor> {
-    match left_ty.sty {
-        ty::TyBool =>
-            [true, false].iter().map(|b| ConstantValue(ConstVal::Bool(*b))).collect(),
-
-        ty::TyRef(_, ty::TypeAndMut { ty, .. }) => match ty.sty {
-            ty::TySlice(_) =>
-                (0..max_slice_length+1).map(|length| Slice(length)).collect(),
-            _ => vec![Single]
-        },
-
-        ty::TyEnum(def, _) => def.variants.iter().map(|v| Variant(v.did)).collect(),
-        _ => vec![Single]
-    }
-}
-
-// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html
-//
-// Whether a vector `v` of patterns is 'useful' in relation to a set of such
-// vectors `m` is defined as there being a set of inputs that will match `v`
-// but not any of the sets in `m`.
-//
-// This is used both for reachability checking (if a pattern isn't useful in
-// relation to preceding patterns, it is not reachable) and exhaustiveness
-// checking (if a wildcard pattern is useful in relation to a matrix, the
-// matrix isn't exhaustive).
-
-// Note: is_useful doesn't work on empty types, as the paper notes.
-// So it assumes that v is non-empty.
-fn is_useful(cx: &MatchCheckCtxt,
-             matrix: &Matrix,
-             v: &[&Pat],
-             witness: WitnessPreference)
-             -> Usefulness {
-    let &Matrix(ref rows) = matrix;
-    debug!("{:?}", matrix);
-    if rows.is_empty() {
-        return match witness {
-            ConstructWitness => UsefulWithWitness(vec!()),
-            LeaveOutWitness => Useful
-        };
-    }
-    if rows[0].is_empty() {
-        return NotUseful;
-    }
-    assert!(rows.iter().all(|r| r.len() == v.len()));
-    let real_pat = match rows.iter().find(|r| (*r)[0].id != DUMMY_NODE_ID) {
-        Some(r) => raw_pat(r[0]),
-        None if v.is_empty() => return NotUseful,
-        None => v[0]
-    };
-    let left_ty = if real_pat.id == DUMMY_NODE_ID {
-        cx.tcx.mk_nil()
-    } else {
-        let left_ty = cx.tcx.pat_ty(&real_pat);
-
-        match real_pat.node {
-            PatKind::Ident(hir::BindByRef(..), _, _) => {
-                left_ty.builtin_deref(false, NoPreference).unwrap().ty
-            }
-            _ => left_ty,
-        }
-    };
-
-    let max_slice_length = rows.iter().filter_map(|row| match row[0].node {
-        PatKind::Vec(ref before, _, ref after) => Some(before.len() + after.len()),
-        _ => None
-    }).max().map_or(0, |v| v + 1);
-
-    let constructors = pat_constructors(cx, v[0], left_ty, max_slice_length);
-    if constructors.is_empty() {
-        let constructors = missing_constructors(cx, matrix, left_ty, max_slice_length);
-        if constructors.is_empty() {
-            all_constructors(cx, left_ty, max_slice_length).into_iter().map(|c| {
-                match is_useful_specialized(cx, matrix, v, c.clone(), left_ty, witness) {
-                    UsefulWithWitness(pats) => UsefulWithWitness({
-                        let arity = constructor_arity(cx, &c, left_ty);
-                        let mut result = {
-                            let pat_slice = &pats[..];
-                            let subpats: Vec<_> = (0..arity).map(|i| {
-                                pat_slice.get(i).map_or(DUMMY_WILD_PAT, |p| &**p)
-                            }).collect();
-                            vec![construct_witness(cx, &c, subpats, left_ty)]
-                        };
-                        result.extend(pats.into_iter().skip(arity));
-                        result
-                    }),
-                    result => result
-                }
-            }).find(|result| result != &NotUseful).unwrap_or(NotUseful)
-        } else {
-            let matrix = rows.iter().filter_map(|r| {
-                if pat_is_binding_or_wild(&cx.tcx.def_map.borrow(), raw_pat(r[0])) {
-                    Some(r[1..].to_vec())
-                } else {
-                    None
-                }
-            }).collect();
-            match is_useful(cx, &matrix, &v[1..], witness) {
-                UsefulWithWitness(pats) => {
-                    let mut new_pats: Vec<_> = constructors.into_iter().map(|constructor| {
-                        let arity = constructor_arity(cx, &constructor, left_ty);
-                        let wild_pats = vec![DUMMY_WILD_PAT; arity];
-                        construct_witness(cx, &constructor, wild_pats, left_ty)
-                    }).collect();
-                    new_pats.extend(pats);
-                    UsefulWithWitness(new_pats)
-                },
-                result => result
-            }
-        }
-    } else {
-        constructors.into_iter().map(|c|
-            is_useful_specialized(cx, matrix, v, c.clone(), left_ty, witness)
-        ).find(|result| result != &NotUseful).unwrap_or(NotUseful)
-    }
-}
-
-fn is_useful_specialized(cx: &MatchCheckCtxt, &Matrix(ref m): &Matrix,
-                         v: &[&Pat], ctor: Constructor, lty: Ty,
-                         witness: WitnessPreference) -> Usefulness {
-    let arity = constructor_arity(cx, &ctor, lty);
-    let matrix = Matrix(m.iter().filter_map(|r| {
-        specialize(cx, &r[..], &ctor, 0, arity)
-    }).collect());
-    match specialize(cx, v, &ctor, 0, arity) {
-        Some(v) => is_useful(cx, &matrix, &v[..], witness),
-        None => NotUseful
-    }
-}
-
-/// Determines the constructors that the given pattern can be specialized to.
-///
-/// In most cases, there's only one constructor that a specific pattern
-/// represents, such as a specific enum variant or a specific literal value.
-/// Slice patterns, however, can match slices of different lengths. For instance,
-/// `[a, b, ..tail]` can match a slice of length 2, 3, 4 and so on.
-///
-/// On the other hand, a wild pattern and an identifier pattern cannot be
-/// specialized in any way.
-fn pat_constructors(cx: &MatchCheckCtxt, p: &Pat,
-                    left_ty: Ty, max_slice_length: usize) -> Vec<Constructor> {
-    let pat = raw_pat(p);
-    match pat.node {
-        PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Path(..) | PatKind::Ident(..) =>
-            match cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def() {
-                Def::Const(..) | Def::AssociatedConst(..) =>
-                    cx.tcx.sess.span_bug(pat.span, "const pattern should've \
-                                                    been rewritten"),
-                Def::Struct(..) | Def::TyAlias(..) => vec![Single],
-                Def::Variant(_, id) => vec![Variant(id)],
-                Def::Local(..) => vec![],
-                def => cx.tcx.sess.span_bug(pat.span, &format!("pat_constructors: unexpected \
-                                                                definition {:?}", def)),
-            },
-        PatKind::QPath(..) =>
-            cx.tcx.sess.span_bug(pat.span, "const pattern should've \
-                                            been rewritten"),
-        PatKind::Lit(ref expr) =>
-            vec!(ConstantValue(eval_const_expr(cx.tcx, &expr))),
-        PatKind::Range(ref lo, ref hi) =>
-            vec!(ConstantRange(eval_const_expr(cx.tcx, &lo), eval_const_expr(cx.tcx, &hi))),
-        PatKind::Vec(ref before, ref slice, ref after) =>
-            match left_ty.sty {
-                ty::TyArray(_, _) => vec!(Single),
-                _                      => if slice.is_some() {
-                    (before.len() + after.len()..max_slice_length+1)
-                        .map(|length| Slice(length))
-                        .collect()
-                } else {
-                    vec!(Slice(before.len() + after.len()))
-                }
-            },
-        PatKind::Box(_) | PatKind::Tup(_) | PatKind::Ref(..) =>
-            vec!(Single),
-        PatKind::Wild =>
-            vec!(),
-    }
-}
-
-/// This computes the arity of a constructor. The arity of a constructor
-/// is how many subpattern patterns of that constructor should be expanded to.
-///
-/// For instance, a tuple pattern (_, 42, Some([])) has the arity of 3.
-/// A struct pattern's arity is the number of fields it contains, etc.
-pub fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> usize {
-    match ty.sty {
-        ty::TyTuple(ref fs) => fs.len(),
-        ty::TyBox(_) => 1,
-        ty::TyRef(_, ty::TypeAndMut { ty, .. }) => match ty.sty {
-            ty::TySlice(_) => match *ctor {
-                Slice(length) => length,
-                ConstantValue(_) => 0,
-                _ => unreachable!()
-            },
-            ty::TyStr => 0,
-            _ => 1
-        },
-        ty::TyEnum(adt, _) | ty::TyStruct(adt, _) => {
-            adt.variant_of_ctor(ctor).fields.len()
-        }
-        ty::TyArray(_, n) => n,
-        _ => 0
-    }
-}
-
-fn range_covered_by_constructor(ctor: &Constructor,
-                                from: &ConstVal, to: &ConstVal) -> Option<bool> {
-    let (c_from, c_to) = match *ctor {
-        ConstantValue(ref value)        => (value, value),
-        ConstantRange(ref from, ref to) => (from, to),
-        Single                          => return Some(true),
-        _                               => unreachable!()
-    };
-    let cmp_from = compare_const_vals(c_from, from);
-    let cmp_to = compare_const_vals(c_to, to);
-    match (cmp_from, cmp_to) {
-        (Some(cmp_from), Some(cmp_to)) => {
-            Some(cmp_from != Ordering::Less && cmp_to != Ordering::Greater)
-        }
-        _ => None
-    }
-}
-
-/// This is the main specialization step. It expands the first pattern in the given row
-/// into `arity` patterns based on the constructor. For most patterns, the step is trivial,
-/// for instance tuple patterns are flattened and box patterns expand into their inner pattern.
-///
-/// OTOH, slice patterns with a subslice pattern (..tail) can be expanded into multiple
-/// different patterns.
-/// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing
-/// fields filled with wild patterns.
-pub fn specialize<'a>(cx: &MatchCheckCtxt, r: &[&'a Pat],
-                      constructor: &Constructor, col: usize, arity: usize) -> Option<Vec<&'a Pat>> {
-    let &Pat {
-        id: pat_id, ref node, span: pat_span
-    } = raw_pat(r[col]);
-    let head: Option<Vec<&Pat>> = match *node {
-        PatKind::Wild =>
-            Some(vec![DUMMY_WILD_PAT; arity]),
-
-        PatKind::Path(..) | PatKind::Ident(..) => {
-            let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def();
-            match def {
-                Def::Const(..) | Def::AssociatedConst(..) =>
-                    cx.tcx.sess.span_bug(pat_span, "const pattern should've \
-                                                    been rewritten"),
-                Def::Variant(_, id) if *constructor != Variant(id) => None,
-                Def::Variant(..) | Def::Struct(..) => Some(Vec::new()),
-                Def::Local(..) => Some(vec![DUMMY_WILD_PAT; arity]),
-                _ => cx.tcx.sess.span_bug(pat_span, &format!("specialize: unexpected \
-                                                              definition {:?}", def)),
-            }
-        }
-
-        PatKind::TupleStruct(_, ref args) => {
-            let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def();
-            match def {
-                Def::Const(..) | Def::AssociatedConst(..) =>
-                    cx.tcx.sess.span_bug(pat_span, "const pattern should've \
-                                                    been rewritten"),
-                Def::Variant(_, id) if *constructor != Variant(id) => None,
-                Def::Variant(..) | Def::Struct(..) => {
-                    Some(match args {
-                        &Some(ref args) => args.iter().map(|p| &**p).collect(),
-                        &None => vec![DUMMY_WILD_PAT; arity],
-                    })
-                }
-                _ => None
-            }
-        }
-
-        PatKind::QPath(_, _) => {
-            cx.tcx.sess.span_bug(pat_span, "const pattern should've \
-                                            been rewritten")
-        }
-
-        PatKind::Struct(_, ref pattern_fields, _) => {
-            let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def();
-            let adt = cx.tcx.node_id_to_type(pat_id).ty_adt_def().unwrap();
-            let variant = adt.variant_of_ctor(constructor);
-            let def_variant = adt.variant_of_def(def);
-            if variant.did == def_variant.did {
-                Some(variant.fields.iter().map(|sf| {
-                    match pattern_fields.iter().find(|f| f.node.name == sf.name) {
-                        Some(ref f) => &*f.node.pat,
-                        _ => DUMMY_WILD_PAT
-                    }
-                }).collect())
-            } else {
-                None
-            }
-        }
-
-        PatKind::Tup(ref args) =>
-            Some(args.iter().map(|p| &**p).collect()),
-
-        PatKind::Box(ref inner) | PatKind::Ref(ref inner, _) =>
-            Some(vec![&**inner]),
-
-        PatKind::Lit(ref expr) => {
-            let expr_value = eval_const_expr(cx.tcx, &expr);
-            match range_covered_by_constructor(constructor, &expr_value, &expr_value) {
-                Some(true) => Some(vec![]),
-                Some(false) => None,
-                None => {
-                    span_err!(cx.tcx.sess, pat_span, E0298, "mismatched types between arms");
-                    None
-                }
-            }
-        }
-
-        PatKind::Range(ref from, ref to) => {
-            let from_value = eval_const_expr(cx.tcx, &from);
-            let to_value = eval_const_expr(cx.tcx, &to);
-            match range_covered_by_constructor(constructor, &from_value, &to_value) {
-                Some(true) => Some(vec![]),
-                Some(false) => None,
-                None => {
-                    span_err!(cx.tcx.sess, pat_span, E0299, "mismatched types between arms");
-                    None
-                }
-            }
-        }
-
-        PatKind::Vec(ref before, ref slice, ref after) => {
-            match *constructor {
-                // Fixed-length vectors.
-                Single => {
-                    let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect();
-                    pats.extend(repeat(DUMMY_WILD_PAT).take(arity - before.len() - after.len()));
-                    pats.extend(after.iter().map(|p| &**p));
-                    Some(pats)
-                },
-                Slice(length) if before.len() + after.len() <= length && slice.is_some() => {
-                    let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect();
-                    pats.extend(repeat(DUMMY_WILD_PAT).take(arity - before.len() - after.len()));
-                    pats.extend(after.iter().map(|p| &**p));
-                    Some(pats)
-                },
-                Slice(length) if before.len() + after.len() == length => {
-                    let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect();
-                    pats.extend(after.iter().map(|p| &**p));
-                    Some(pats)
-                },
-                SliceWithSubslice(prefix, suffix)
-                    if before.len() == prefix
-                        && after.len() == suffix
-                        && slice.is_some() => {
-                    let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect();
-                    pats.extend(after.iter().map(|p| &**p));
-                    Some(pats)
-                }
-                _ => None
-            }
-        }
-    };
-    head.map(|mut head| {
-        head.extend_from_slice(&r[..col]);
-        head.extend_from_slice(&r[col + 1..]);
-        head
-    })
-}
-
-fn check_local(cx: &mut MatchCheckCtxt, loc: &hir::Local) {
-    intravisit::walk_local(cx, loc);
-
-    let pat = StaticInliner::new(cx.tcx, None).fold_pat(loc.pat.clone());
-    check_irrefutable(cx, &pat, false);
-
-    // Check legality of move bindings and `@` patterns.
-    check_legality_of_move_bindings(cx, false, slice::ref_slice(&loc.pat));
-    check_legality_of_bindings_in_at_patterns(cx, &loc.pat);
-}
-
-fn check_fn(cx: &mut MatchCheckCtxt,
-            kind: FnKind,
-            decl: &hir::FnDecl,
-            body: &hir::Block,
-            sp: Span,
-            fn_id: NodeId) {
-    match kind {
-        FnKind::Closure => {}
-        _ => cx.param_env = ParameterEnvironment::for_item(cx.tcx, fn_id),
-    }
-
-    intravisit::walk_fn(cx, kind, decl, body, sp);
-
-    for input in &decl.inputs {
-        check_irrefutable(cx, &input.pat, true);
-        check_legality_of_move_bindings(cx, false, slice::ref_slice(&input.pat));
-        check_legality_of_bindings_in_at_patterns(cx, &input.pat);
-    }
-}
-
-fn check_irrefutable(cx: &MatchCheckCtxt, pat: &Pat, is_fn_arg: bool) {
-    let origin = if is_fn_arg {
-        "function argument"
-    } else {
-        "local binding"
-    };
-
-    is_refutable(cx, pat, |uncovered_pat| {
-        span_err!(cx.tcx.sess, pat.span, E0005,
-            "refutable pattern in {}: `{}` not covered",
-            origin,
-            pat_to_string(uncovered_pat),
-        );
-    });
-}
-
-fn is_refutable<A, F>(cx: &MatchCheckCtxt, pat: &Pat, refutable: F) -> Option<A> where
-    F: FnOnce(&Pat) -> A,
-{
-    let pats = Matrix(vec!(vec!(pat)));
-    match is_useful(cx, &pats, &[DUMMY_WILD_PAT], ConstructWitness) {
-        UsefulWithWitness(pats) => Some(refutable(&pats[0])),
-        NotUseful => None,
-        Useful => unreachable!()
-    }
-}
-
-// Legality of move bindings checking
-fn check_legality_of_move_bindings(cx: &MatchCheckCtxt,
-                                   has_guard: bool,
-                                   pats: &[P<Pat>]) {
-    let tcx = cx.tcx;
-    let def_map = &tcx.def_map;
-    let mut by_ref_span = None;
-    for pat in pats {
-        pat_bindings(def_map, &pat, |bm, _, span, _path| {
-            match bm {
-                hir::BindByRef(_) => {
-                    by_ref_span = Some(span);
-                }
-                hir::BindByValue(_) => {
-                }
-            }
-        })
-    }
-
-    let check_move = |p: &Pat, sub: Option<&Pat>| {
-        // check legality of moving out of the enum
-
-        // x @ Foo(..) is legal, but x @ Foo(y) isn't.
-        if sub.map_or(false, |p| pat_contains_bindings(&def_map.borrow(), &p)) {
-            span_err!(cx.tcx.sess, p.span, E0007, "cannot bind by-move with sub-bindings");
-        } else if has_guard {
-            span_err!(cx.tcx.sess, p.span, E0008, "cannot bind by-move into a pattern guard");
-        } else if by_ref_span.is_some() {
-            let mut err = struct_span_err!(cx.tcx.sess, p.span, E0009,
-                                           "cannot bind by-move and by-ref in the same pattern");
-            span_note!(&mut err, by_ref_span.unwrap(), "by-ref binding occurs here");
-            err.emit();
-        }
-    };
-
-    for pat in pats {
-        front_util::walk_pat(&pat, |p| {
-            if pat_is_binding(&def_map.borrow(), &p) {
-                match p.node {
-                    PatKind::Ident(hir::BindByValue(_), _, ref sub) => {
-                        let pat_ty = tcx.node_id_to_type(p.id);
-                        //FIXME: (@jroesch) this code should be floated up as well
-                        let infcx = infer::new_infer_ctxt(cx.tcx,
-                                                          &cx.tcx.tables,
-                                                          Some(cx.param_env.clone()));
-                        if infcx.type_moves_by_default(pat_ty, pat.span) {
-                            check_move(p, sub.as_ref().map(|p| &**p));
-                        }
-                    }
-                    PatKind::Ident(hir::BindByRef(_), _, _) => {
-                    }
-                    _ => {
-                        cx.tcx.sess.span_bug(
-                            p.span,
-                            &format!("binding pattern {} is not an \
-                                     identifier: {:?}",
-                                    p.id,
-                                    p.node));
-                    }
-                }
-            }
-            true
-        });
-    }
-}
-
-/// Ensures that a pattern guard doesn't borrow by mutable reference or
-/// assign.
-fn check_for_mutation_in_guard<'a, 'tcx>(cx: &'a MatchCheckCtxt<'a, 'tcx>,
-                                         guard: &hir::Expr) {
-    let mut checker = MutationChecker {
-        cx: cx,
-    };
-
-    let infcx = infer::new_infer_ctxt(cx.tcx,
-                                      &cx.tcx.tables,
-                                      Some(checker.cx.param_env.clone()));
-
-    let mut visitor = ExprUseVisitor::new(&mut checker, &infcx);
-    visitor.walk_expr(guard);
-}
-
-struct MutationChecker<'a, 'tcx: 'a> {
-    cx: &'a MatchCheckCtxt<'a, 'tcx>,
-}
-
-impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> {
-    fn matched_pat(&mut self, _: &Pat, _: cmt, _: euv::MatchMode) {}
-    fn consume(&mut self, _: NodeId, _: Span, _: cmt, _: ConsumeMode) {}
-    fn consume_pat(&mut self, _: &Pat, _: cmt, _: ConsumeMode) {}
-    fn borrow(&mut self,
-              _: NodeId,
-              span: Span,
-              _: cmt,
-              _: Region,
-              kind: BorrowKind,
-              _: LoanCause) {
-        match kind {
-            MutBorrow => {
-                span_err!(self.cx.tcx.sess, span, E0301,
-                          "cannot mutably borrow in a pattern guard")
-            }
-            ImmBorrow | UniqueImmBorrow => {}
-        }
-    }
-    fn decl_without_init(&mut self, _: NodeId, _: Span) {}
-    fn mutate(&mut self, _: NodeId, span: Span, _: cmt, mode: MutateMode) {
-        match mode {
-            MutateMode::JustWrite | MutateMode::WriteAndRead => {
-                span_err!(self.cx.tcx.sess, span, E0302, "cannot assign in a pattern guard")
-            }
-            MutateMode::Init => {}
-        }
-    }
-}
-
-/// Forbids bindings in `@` patterns. This is necessary for memory safety,
-/// because of the way rvalues are handled in the borrow check. (See issue
-/// #14587.)
-fn check_legality_of_bindings_in_at_patterns(cx: &MatchCheckCtxt, pat: &Pat) {
-    AtBindingPatternVisitor { cx: cx, bindings_allowed: true }.visit_pat(pat);
-}
-
-struct AtBindingPatternVisitor<'a, 'b:'a, 'tcx:'b> {
-    cx: &'a MatchCheckCtxt<'b, 'tcx>,
-    bindings_allowed: bool
-}
-
-impl<'a, 'b, 'tcx, 'v> Visitor<'v> for AtBindingPatternVisitor<'a, 'b, 'tcx> {
-    fn visit_pat(&mut self, pat: &Pat) {
-        if !self.bindings_allowed && pat_is_binding(&self.cx.tcx.def_map.borrow(), pat) {
-            span_err!(self.cx.tcx.sess, pat.span, E0303,
-                                      "pattern bindings are not allowed \
-                                       after an `@`");
-        }
-
-        match pat.node {
-            PatKind::Ident(_, _, Some(_)) => {
-                let bindings_were_allowed = self.bindings_allowed;
-                self.bindings_allowed = false;
-                intravisit::walk_pat(self, pat);
-                self.bindings_allowed = bindings_were_allowed;
-            }
-            _ => intravisit::walk_pat(self, pat),
-        }
-    }
-}
diff --git a/src/librustc/middle/const_eval.rs b/src/librustc/middle/const_eval.rs
deleted file mode 100644 (file)
index 3d28f3a..0000000
+++ /dev/null
@@ -1,1406 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//#![allow(non_camel_case_types)]
-
-use self::ConstVal::*;
-use self::ErrKind::*;
-use self::EvalHint::*;
-
-use front::map as ast_map;
-use front::map::blocks::FnLikeNode;
-use middle::cstore::{self, CrateStore, InlinedItem};
-use middle::{infer, subst, traits};
-use middle::def::Def;
-use middle::subst::Subst;
-use middle::def_id::DefId;
-use middle::pat_util::def_to_path;
-use middle::ty::{self, Ty};
-use middle::astconv_util::ast_ty_to_prim_ty;
-use util::num::ToPrimitive;
-use util::nodemap::NodeMap;
-use session::Session;
-
-use graphviz::IntoCow;
-use syntax::ast;
-use rustc_front::hir::{Expr, PatKind};
-use rustc_front::hir;
-use rustc_front::intravisit::FnKind;
-use syntax::codemap::Span;
-use syntax::parse::token::InternedString;
-use syntax::ptr::P;
-use syntax::codemap;
-
-use std::borrow::Cow;
-use std::cmp::Ordering;
-use std::collections::hash_map::Entry::Vacant;
-use std::hash;
-use std::mem::transmute;
-use std::{i8, i16, i32, i64, u8, u16, u32, u64};
-use std::rc::Rc;
-
-fn lookup_variant_by_id<'a>(tcx: &'a ty::ctxt,
-                            enum_def: DefId,
-                            variant_def: DefId)
-                            -> Option<&'a Expr> {
-    fn variant_expr<'a>(variants: &'a [hir::Variant], id: ast::NodeId)
-                        -> Option<&'a Expr> {
-        for variant in variants {
-            if variant.node.data.id() == id {
-                return variant.node.disr_expr.as_ref().map(|e| &**e);
-            }
-        }
-        None
-    }
-
-    if let Some(enum_node_id) = tcx.map.as_local_node_id(enum_def) {
-        let variant_node_id = tcx.map.as_local_node_id(variant_def).unwrap();
-        match tcx.map.find(enum_node_id) {
-            None => None,
-            Some(ast_map::NodeItem(it)) => match it.node {
-                hir::ItemEnum(hir::EnumDef { ref variants }, _) => {
-                    variant_expr(variants, variant_node_id)
-                }
-                _ => None
-            },
-            Some(_) => None
-        }
-    } else {
-        None
-    }
-}
-
-/// * `def_id` is the id of the constant.
-/// * `maybe_ref_id` is the id of the expr referencing the constant.
-/// * `param_substs` is the monomorphization substitution for the expression.
-///
-/// `maybe_ref_id` and `param_substs` are optional and are used for
-/// finding substitutions in associated constants. This generally
-/// happens in late/trans const evaluation.
-pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a ty::ctxt<'tcx>,
-                                        def_id: DefId,
-                                        maybe_ref_id: Option<ast::NodeId>,
-                                        param_substs: Option<&'tcx subst::Substs<'tcx>>)
-                                        -> Option<&'tcx Expr> {
-    if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
-        match tcx.map.find(node_id) {
-            None => None,
-            Some(ast_map::NodeItem(it)) => match it.node {
-                hir::ItemConst(_, ref const_expr) => {
-                    Some(&const_expr)
-                }
-                _ => None
-            },
-            Some(ast_map::NodeTraitItem(ti)) => match ti.node {
-                hir::ConstTraitItem(_, _) => {
-                    match maybe_ref_id {
-                        // If we have a trait item, and we know the expression
-                        // that's the source of the obligation to resolve it,
-                        // `resolve_trait_associated_const` will select an impl
-                        // or the default.
-                        Some(ref_id) => {
-                            let trait_id = tcx.trait_of_item(def_id)
-                                              .unwrap();
-                            let mut substs = tcx.node_id_item_substs(ref_id)
-                                                .substs;
-                            if let Some(param_substs) = param_substs {
-                                substs = substs.subst(tcx, param_substs);
-                            }
-                            resolve_trait_associated_const(tcx, ti, trait_id,
-                                                           substs)
-                        }
-                        // Technically, without knowing anything about the
-                        // expression that generates the obligation, we could
-                        // still return the default if there is one. However,
-                        // it's safer to return `None` than to return some value
-                        // that may differ from what you would get from
-                        // correctly selecting an impl.
-                        None => None
-                    }
-                }
-                _ => None
-            },
-            Some(ast_map::NodeImplItem(ii)) => match ii.node {
-                hir::ImplItemKind::Const(_, ref expr) => {
-                    Some(&expr)
-                }
-                _ => None
-            },
-            Some(_) => None
-        }
-    } else {
-        match tcx.extern_const_statics.borrow().get(&def_id) {
-            Some(&ast::DUMMY_NODE_ID) => return None,
-            Some(&expr_id) => {
-                return Some(tcx.map.expect_expr(expr_id));
-            }
-            None => {}
-        }
-        let mut used_ref_id = false;
-        let expr_id = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) {
-            cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => match item.node {
-                hir::ItemConst(_, ref const_expr) => Some(const_expr.id),
-                _ => None
-            },
-            cstore::FoundAst::Found(&InlinedItem::TraitItem(trait_id, ref ti)) => match ti.node {
-                hir::ConstTraitItem(_, _) => {
-                    used_ref_id = true;
-                    match maybe_ref_id {
-                        // As mentioned in the comments above for in-crate
-                        // constants, we only try to find the expression for
-                        // a trait-associated const if the caller gives us
-                        // the expression that refers to it.
-                        Some(ref_id) => {
-                            let mut substs = tcx.node_id_item_substs(ref_id)
-                                                .substs;
-                            if let Some(param_substs) = param_substs {
-                                substs = substs.subst(tcx, param_substs);
-                            }
-                            resolve_trait_associated_const(tcx, ti, trait_id,
-                                                           substs).map(|e| e.id)
-                        }
-                        None => None
-                    }
-                }
-                _ => None
-            },
-            cstore::FoundAst::Found(&InlinedItem::ImplItem(_, ref ii)) => match ii.node {
-                hir::ImplItemKind::Const(_, ref expr) => Some(expr.id),
-                _ => None
-            },
-            _ => None
-        };
-        // If we used the reference expression, particularly to choose an impl
-        // of a trait-associated const, don't cache that, because the next
-        // lookup with the same def_id may yield a different result.
-        if !used_ref_id {
-            tcx.extern_const_statics
-               .borrow_mut().insert(def_id,
-                                    expr_id.unwrap_or(ast::DUMMY_NODE_ID));
-        }
-        expr_id.map(|id| tcx.map.expect_expr(id))
-    }
-}
-
-fn inline_const_fn_from_external_crate(tcx: &ty::ctxt, def_id: DefId)
-                                       -> Option<ast::NodeId> {
-    match tcx.extern_const_fns.borrow().get(&def_id) {
-        Some(&ast::DUMMY_NODE_ID) => return None,
-        Some(&fn_id) => return Some(fn_id),
-        None => {}
-    }
-
-    if !tcx.sess.cstore.is_const_fn(def_id) {
-        tcx.extern_const_fns.borrow_mut().insert(def_id, ast::DUMMY_NODE_ID);
-        return None;
-    }
-
-    let fn_id = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) {
-        cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => Some(item.id),
-        cstore::FoundAst::Found(&InlinedItem::ImplItem(_, ref item)) => Some(item.id),
-        _ => None
-    };
-    tcx.extern_const_fns.borrow_mut().insert(def_id,
-                                             fn_id.unwrap_or(ast::DUMMY_NODE_ID));
-    fn_id
-}
-
-pub fn lookup_const_fn_by_id<'tcx>(tcx: &ty::ctxt<'tcx>, def_id: DefId)
-                                   -> Option<FnLikeNode<'tcx>>
-{
-    let fn_id = if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
-        node_id
-    } else {
-        if let Some(fn_id) = inline_const_fn_from_external_crate(tcx, def_id) {
-            fn_id
-        } else {
-            return None;
-        }
-    };
-
-    let fn_like = match FnLikeNode::from_node(tcx.map.get(fn_id)) {
-        Some(fn_like) => fn_like,
-        None => return None
-    };
-
-    match fn_like.kind() {
-        FnKind::ItemFn(_, _, _, hir::Constness::Const, _, _) => {
-            Some(fn_like)
-        }
-        FnKind::Method(_, m, _) => {
-            if m.constness == hir::Constness::Const {
-                Some(fn_like)
-            } else {
-                None
-            }
-        }
-        _ => None
-    }
-}
-
-#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
-pub enum ConstVal {
-    Float(f64),
-    Int(i64),
-    Uint(u64),
-    Str(InternedString),
-    ByteStr(Rc<Vec<u8>>),
-    Bool(bool),
-    Struct(ast::NodeId),
-    Tuple(ast::NodeId),
-    Function(DefId),
-    Array(ast::NodeId, u64),
-    Repeat(ast::NodeId, u64),
-}
-
-impl hash::Hash for ConstVal {
-    fn hash<H: hash::Hasher>(&self, state: &mut H) {
-        match *self {
-            Float(a) => unsafe { transmute::<_,u64>(a) }.hash(state),
-            Int(a) => a.hash(state),
-            Uint(a) => a.hash(state),
-            Str(ref a) => a.hash(state),
-            ByteStr(ref a) => a.hash(state),
-            Bool(a) => a.hash(state),
-            Struct(a) => a.hash(state),
-            Tuple(a) => a.hash(state),
-            Function(a) => a.hash(state),
-            Array(a, n) => { a.hash(state); n.hash(state) },
-            Repeat(a, n) => { a.hash(state); n.hash(state) },
-        }
-    }
-}
-
-/// Note that equality for `ConstVal` means that the it is the same
-/// constant, not that the rust values are equal. In particular, `NaN
-/// == NaN` (at least if it's the same NaN; distinct encodings for NaN
-/// are considering unequal).
-impl PartialEq for ConstVal {
-    fn eq(&self, other: &ConstVal) -> bool {
-        match (self, other) {
-            (&Float(a), &Float(b)) => unsafe{transmute::<_,u64>(a) == transmute::<_,u64>(b)},
-            (&Int(a), &Int(b)) => a == b,
-            (&Uint(a), &Uint(b)) => a == b,
-            (&Str(ref a), &Str(ref b)) => a == b,
-            (&ByteStr(ref a), &ByteStr(ref b)) => a == b,
-            (&Bool(a), &Bool(b)) => a == b,
-            (&Struct(a), &Struct(b)) => a == b,
-            (&Tuple(a), &Tuple(b)) => a == b,
-            (&Function(a), &Function(b)) => a == b,
-            (&Array(a, an), &Array(b, bn)) => (a == b) && (an == bn),
-            (&Repeat(a, an), &Repeat(b, bn)) => (a == b) && (an == bn),
-            _ => false,
-        }
-    }
-}
-
-impl Eq for ConstVal { }
-
-impl ConstVal {
-    pub fn description(&self) -> &'static str {
-        match *self {
-            Float(_) => "float",
-            Int(i) if i < 0 => "negative integer",
-            Int(_) => "positive integer",
-            Uint(_) => "unsigned integer",
-            Str(_) => "string literal",
-            ByteStr(_) => "byte string literal",
-            Bool(_) => "boolean",
-            Struct(_) => "struct",
-            Tuple(_) => "tuple",
-            Function(_) => "function definition",
-            Array(..) => "array",
-            Repeat(..) => "repeat",
-        }
-    }
-}
-
-pub fn const_expr_to_pat(tcx: &ty::ctxt, expr: &Expr, span: Span) -> P<hir::Pat> {
-    let pat = match expr.node {
-        hir::ExprTup(ref exprs) =>
-            PatKind::Tup(exprs.iter().map(|expr| const_expr_to_pat(tcx, &expr, span)).collect()),
-
-        hir::ExprCall(ref callee, ref args) => {
-            let def = *tcx.def_map.borrow().get(&callee.id).unwrap();
-            if let Vacant(entry) = tcx.def_map.borrow_mut().entry(expr.id) {
-               entry.insert(def);
-            }
-            let path = match def.full_def() {
-                Def::Struct(def_id) => def_to_path(tcx, def_id),
-                Def::Variant(_, variant_did) => def_to_path(tcx, variant_did),
-                Def::Fn(..) => return P(hir::Pat {
-                    id: expr.id,
-                    node: PatKind::Lit(P(expr.clone())),
-                    span: span,
-                }),
-                _ => unreachable!()
-            };
-            let pats = args.iter().map(|expr| const_expr_to_pat(tcx, &expr, span)).collect();
-            PatKind::TupleStruct(path, Some(pats))
-        }
-
-        hir::ExprStruct(ref path, ref fields, None) => {
-            let field_pats = fields.iter().map(|field| codemap::Spanned {
-                span: codemap::DUMMY_SP,
-                node: hir::FieldPat {
-                    name: field.name.node,
-                    pat: const_expr_to_pat(tcx, &field.expr, span),
-                    is_shorthand: false,
-                },
-            }).collect();
-            PatKind::Struct(path.clone(), field_pats, false)
-        }
-
-        hir::ExprVec(ref exprs) => {
-            let pats = exprs.iter().map(|expr| const_expr_to_pat(tcx, &expr, span)).collect();
-            PatKind::Vec(pats, None, hir::HirVec::new())
-        }
-
-        hir::ExprPath(_, ref path) => {
-            let opt_def = tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def());
-            match opt_def {
-                Some(Def::Struct(..)) | Some(Def::Variant(..)) =>
-                    PatKind::Path(path.clone()),
-                Some(Def::Const(def_id)) |
-                Some(Def::AssociatedConst(def_id)) => {
-                    let expr = lookup_const_by_id(tcx, def_id, Some(expr.id), None).unwrap();
-                    return const_expr_to_pat(tcx, expr, span);
-                },
-                _ => unreachable!(),
-            }
-        }
-
-        _ => PatKind::Lit(P(expr.clone()))
-    };
-    P(hir::Pat { id: expr.id, node: pat, span: span })
-}
-
-pub fn eval_const_expr(tcx: &ty::ctxt, e: &Expr) -> ConstVal {
-    match eval_const_expr_partial(tcx, e, ExprTypeChecked, None) {
-        Ok(r) => r,
-        Err(s) => tcx.sess.span_fatal(s.span, &s.description())
-    }
-}
-
-pub type FnArgMap<'a> = Option<&'a NodeMap<ConstVal>>;
-
-#[derive(Clone)]
-pub struct ConstEvalErr {
-    pub span: Span,
-    pub kind: ErrKind,
-}
-
-#[derive(Clone)]
-pub enum ErrKind {
-    CannotCast,
-    CannotCastTo(&'static str),
-    InvalidOpForInts(hir::BinOp_),
-    InvalidOpForUInts(hir::BinOp_),
-    InvalidOpForBools(hir::BinOp_),
-    InvalidOpForFloats(hir::BinOp_),
-    InvalidOpForIntUint(hir::BinOp_),
-    InvalidOpForUintInt(hir::BinOp_),
-    NegateOn(ConstVal),
-    NotOn(ConstVal),
-    CallOn(ConstVal),
-
-    NegateWithOverflow(i64),
-    AddiWithOverflow(i64, i64),
-    SubiWithOverflow(i64, i64),
-    MuliWithOverflow(i64, i64),
-    AdduWithOverflow(u64, u64),
-    SubuWithOverflow(u64, u64),
-    MuluWithOverflow(u64, u64),
-    DivideByZero,
-    DivideWithOverflow,
-    ModuloByZero,
-    ModuloWithOverflow,
-    ShiftLeftWithOverflow,
-    ShiftRightWithOverflow,
-    MissingStructField,
-    NonConstPath,
-    UnimplementedConstVal(&'static str),
-    UnresolvedPath,
-    ExpectedConstTuple,
-    ExpectedConstStruct,
-    TupleIndexOutOfBounds,
-    IndexedNonVec,
-    IndexNegative,
-    IndexNotInt,
-    IndexOutOfBounds,
-    RepeatCountNotNatural,
-    RepeatCountNotInt,
-
-    MiscBinaryOp,
-    MiscCatchAll,
-
-    IndexOpFeatureGated,
-}
-
-impl ConstEvalErr {
-    pub fn description(&self) -> Cow<str> {
-        use self::ErrKind::*;
-
-        match self.kind {
-            CannotCast => "can't cast this type".into_cow(),
-            CannotCastTo(s) => format!("can't cast this type to {}", s).into_cow(),
-            InvalidOpForInts(_) =>  "can't do this op on signed integrals".into_cow(),
-            InvalidOpForUInts(_) =>  "can't do this op on unsigned integrals".into_cow(),
-            InvalidOpForBools(_) =>  "can't do this op on bools".into_cow(),
-            InvalidOpForFloats(_) => "can't do this op on floats".into_cow(),
-            InvalidOpForIntUint(..) => "can't do this op on an isize and usize".into_cow(),
-            InvalidOpForUintInt(..) => "can't do this op on a usize and isize".into_cow(),
-            NegateOn(ref const_val) => format!("negate on {}", const_val.description()).into_cow(),
-            NotOn(ref const_val) => format!("not on {}", const_val.description()).into_cow(),
-            CallOn(ref const_val) => format!("call on {}", const_val.description()).into_cow(),
-
-            NegateWithOverflow(..) => "attempted to negate with overflow".into_cow(),
-            AddiWithOverflow(..) => "attempted to add with overflow".into_cow(),
-            SubiWithOverflow(..) => "attempted to sub with overflow".into_cow(),
-            MuliWithOverflow(..) => "attempted to mul with overflow".into_cow(),
-            AdduWithOverflow(..) => "attempted to add with overflow".into_cow(),
-            SubuWithOverflow(..) => "attempted to sub with overflow".into_cow(),
-            MuluWithOverflow(..) => "attempted to mul with overflow".into_cow(),
-            DivideByZero         => "attempted to divide by zero".into_cow(),
-            DivideWithOverflow   => "attempted to divide with overflow".into_cow(),
-            ModuloByZero         => "attempted remainder with a divisor of zero".into_cow(),
-            ModuloWithOverflow   => "attempted remainder with overflow".into_cow(),
-            ShiftLeftWithOverflow => "attempted left shift with overflow".into_cow(),
-            ShiftRightWithOverflow => "attempted right shift with overflow".into_cow(),
-            MissingStructField  => "nonexistent struct field".into_cow(),
-            NonConstPath        => "non-constant path in constant expression".into_cow(),
-            UnimplementedConstVal(what) =>
-                format!("unimplemented constant expression: {}", what).into_cow(),
-            UnresolvedPath => "unresolved path in constant expression".into_cow(),
-            ExpectedConstTuple => "expected constant tuple".into_cow(),
-            ExpectedConstStruct => "expected constant struct".into_cow(),
-            TupleIndexOutOfBounds => "tuple index out of bounds".into_cow(),
-            IndexedNonVec => "indexing is only supported for arrays".into_cow(),
-            IndexNegative => "indices must be non-negative integers".into_cow(),
-            IndexNotInt => "indices must be integers".into_cow(),
-            IndexOutOfBounds => "array index out of bounds".into_cow(),
-            RepeatCountNotNatural => "repeat count must be a natural number".into_cow(),
-            RepeatCountNotInt => "repeat count must be integers".into_cow(),
-
-            MiscBinaryOp => "bad operands for binary".into_cow(),
-            MiscCatchAll => "unsupported constant expr".into_cow(),
-            IndexOpFeatureGated => "the index operation on const values is unstable".into_cow(),
-        }
-    }
-}
-
-pub type EvalResult = Result<ConstVal, ConstEvalErr>;
-pub type CastResult = Result<ConstVal, ErrKind>;
-
-// FIXME: Long-term, this enum should go away: trying to evaluate
-// an expression which hasn't been type-checked is a recipe for
-// disaster.  That said, it's not clear how to fix ast_ty_to_ty
-// to avoid the ordering issue.
-
-/// Hint to determine how to evaluate constant expressions which
-/// might not be type-checked.
-#[derive(Copy, Clone, Debug)]
-pub enum EvalHint<'tcx> {
-    /// We have a type-checked expression.
-    ExprTypeChecked,
-    /// We have an expression which hasn't been type-checked, but we have
-    /// an idea of what the type will be because of the context. For example,
-    /// the length of an array is always `usize`. (This is referred to as
-    /// a hint because it isn't guaranteed to be consistent with what
-    /// type-checking would compute.)
-    UncheckedExprHint(Ty<'tcx>),
-    /// We have an expression which has not yet been type-checked, and
-    /// and we have no clue what the type will be.
-    UncheckedExprNoHint,
-}
-
-impl<'tcx> EvalHint<'tcx> {
-    fn erase_hint(&self) -> EvalHint<'tcx> {
-        match *self {
-            ExprTypeChecked => ExprTypeChecked,
-            UncheckedExprHint(_) | UncheckedExprNoHint => UncheckedExprNoHint,
-        }
-    }
-    fn checked_or(&self, ty: Ty<'tcx>) -> EvalHint<'tcx> {
-        match *self {
-            ExprTypeChecked => ExprTypeChecked,
-            _ => UncheckedExprHint(ty),
-        }
-    }
-}
-
-#[derive(Copy, Clone, PartialEq, Debug)]
-pub enum IntTy { I8, I16, I32, I64 }
-#[derive(Copy, Clone, PartialEq, Debug)]
-pub enum UintTy { U8, U16, U32, U64 }
-
-impl IntTy {
-    pub fn from(tcx: &ty::ctxt, t: ast::IntTy) -> IntTy {
-        let t = if let ast::IntTy::Is = t {
-            tcx.sess.target.int_type
-        } else {
-            t
-        };
-        match t {
-            ast::IntTy::Is => unreachable!(),
-            ast::IntTy::I8  => IntTy::I8,
-            ast::IntTy::I16 => IntTy::I16,
-            ast::IntTy::I32 => IntTy::I32,
-            ast::IntTy::I64 => IntTy::I64,
-        }
-    }
-}
-
-impl UintTy {
-    pub fn from(tcx: &ty::ctxt, t: ast::UintTy) -> UintTy {
-        let t = if let ast::UintTy::Us = t {
-            tcx.sess.target.uint_type
-        } else {
-            t
-        };
-        match t {
-            ast::UintTy::Us => unreachable!(),
-            ast::UintTy::U8  => UintTy::U8,
-            ast::UintTy::U16 => UintTy::U16,
-            ast::UintTy::U32 => UintTy::U32,
-            ast::UintTy::U64 => UintTy::U64,
-        }
-    }
-}
-
-macro_rules! signal {
-    ($e:expr, $exn:expr) => {
-        return Err(ConstEvalErr { span: $e.span, kind: $exn })
-    }
-}
-
-// The const_{int,uint}_checked_{neg,add,sub,mul,div,shl,shr} family
-// of functions catch and signal overflow errors during constant
-// evaluation.
-//
-// They all take the operator's arguments (`a` and `b` if binary), the
-// overall expression (`e`) and, if available, whole expression's
-// concrete type (`opt_ety`).
-//
-// If the whole expression's concrete type is None, then this is a
-// constant evaluation happening before type check (e.g. in the check
-// to confirm that a pattern range's left-side is not greater than its
-// right-side). We do not do arithmetic modulo the type's bitwidth in
-// such a case; we just do 64-bit arithmetic and assume that later
-// passes will do it again with the type information, and thus do the
-// overflow checks then.
-
-pub fn const_int_checked_neg<'a>(
-    a: i64, e: &'a Expr, opt_ety: Option<IntTy>) -> EvalResult {
-
-    let (min,max) = match opt_ety {
-        // (-i8::MIN is itself not an i8, etc, but this is an easy way
-        // to allow literals to pass the check. Of course that does
-        // not work for i64::MIN.)
-        Some(IntTy::I8) =>  (-(i8::MAX as i64), -(i8::MIN as i64)),
-        Some(IntTy::I16) => (-(i16::MAX as i64), -(i16::MIN as i64)),
-        Some(IntTy::I32) => (-(i32::MAX as i64), -(i32::MIN as i64)),
-        None | Some(IntTy::I64) => (-i64::MAX, -(i64::MIN+1)),
-    };
-
-    let oflo = a < min || a > max;
-    if oflo {
-        signal!(e, NegateWithOverflow(a));
-    } else {
-        Ok(Int(-a))
-    }
-}
-
-pub fn const_uint_checked_neg<'a>(
-    a: u64, _e: &'a Expr, _opt_ety: Option<UintTy>) -> EvalResult {
-    // This always succeeds, and by definition, returns `(!a)+1`.
-    Ok(Uint((!a).wrapping_add(1)))
-}
-
-fn const_uint_not(a: u64, opt_ety: Option<UintTy>) -> ConstVal {
-    let mask = match opt_ety {
-        Some(UintTy::U8) => u8::MAX as u64,
-        Some(UintTy::U16) => u16::MAX as u64,
-        Some(UintTy::U32) => u32::MAX as u64,
-        None | Some(UintTy::U64) => u64::MAX,
-    };
-    Uint(!a & mask)
-}
-
-macro_rules! overflow_checking_body {
-    ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident,
-     lhs: $to_8_lhs:ident $to_16_lhs:ident $to_32_lhs:ident,
-     rhs: $to_8_rhs:ident $to_16_rhs:ident $to_32_rhs:ident $to_64_rhs:ident,
-     $EnumTy:ident $T8: ident $T16: ident $T32: ident $T64: ident,
-     $result_type: ident) => { {
-        let (a,b,opt_ety) = ($a,$b,$ety);
-        match opt_ety {
-            Some($EnumTy::$T8) => match (a.$to_8_lhs(), b.$to_8_rhs()) {
-                (Some(a), Some(b)) => {
-                    let (a, oflo) = a.$overflowing_op(b);
-                    (a as $result_type, oflo)
-                }
-                (None, _) | (_, None) => (0, true)
-            },
-            Some($EnumTy::$T16) => match (a.$to_16_lhs(), b.$to_16_rhs()) {
-                (Some(a), Some(b)) => {
-                    let (a, oflo) = a.$overflowing_op(b);
-                    (a as $result_type, oflo)
-                }
-                (None, _) | (_, None) => (0, true)
-            },
-            Some($EnumTy::$T32) => match (a.$to_32_lhs(), b.$to_32_rhs()) {
-                (Some(a), Some(b)) => {
-                    let (a, oflo) = a.$overflowing_op(b);
-                    (a as $result_type, oflo)
-                }
-                (None, _) | (_, None) => (0, true)
-            },
-            None | Some($EnumTy::$T64) => match b.$to_64_rhs() {
-                Some(b) => a.$overflowing_op(b),
-                None => (0, true),
-            }
-        }
-    } }
-}
-
-macro_rules! int_arith_body {
-    ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident) => {
-        overflow_checking_body!(
-            $a, $b, $ety, $overflowing_op,
-            lhs: to_i8 to_i16 to_i32,
-            rhs: to_i8 to_i16 to_i32 to_i64, IntTy I8 I16 I32 I64, i64)
-    }
-}
-
-macro_rules! uint_arith_body {
-    ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident) => {
-        overflow_checking_body!(
-            $a, $b, $ety, $overflowing_op,
-            lhs: to_u8 to_u16 to_u32,
-            rhs: to_u8 to_u16 to_u32 to_u64, UintTy U8 U16 U32 U64, u64)
-    }
-}
-
-macro_rules! int_shift_body {
-    ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident) => {
-        overflow_checking_body!(
-            $a, $b, $ety, $overflowing_op,
-            lhs: to_i8 to_i16 to_i32,
-            rhs: to_u32 to_u32 to_u32 to_u32, IntTy I8 I16 I32 I64, i64)
-    }
-}
-
-macro_rules! uint_shift_body {
-    ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident) => {
-        overflow_checking_body!(
-            $a, $b, $ety, $overflowing_op,
-            lhs: to_u8 to_u16 to_u32,
-            rhs: to_u32 to_u32 to_u32 to_u32, UintTy U8 U16 U32 U64, u64)
-    }
-}
-
-macro_rules! pub_fn_checked_op {
-    {$fn_name:ident ($a:ident : $a_ty:ty, $b:ident : $b_ty:ty,.. $WhichTy:ident) {
-        $ret_oflo_body:ident $overflowing_op:ident
-            $const_ty:ident $signal_exn:expr
-    }} => {
-        pub fn $fn_name<'a>($a: $a_ty,
-                            $b: $b_ty,
-                            e: &'a Expr,
-                            opt_ety: Option<$WhichTy>) -> EvalResult {
-            let (ret, oflo) = $ret_oflo_body!($a, $b, opt_ety, $overflowing_op);
-            if !oflo { Ok($const_ty(ret)) } else { signal!(e, $signal_exn) }
-        }
-    }
-}
-
-pub_fn_checked_op!{ const_int_checked_add(a: i64, b: i64,.. IntTy) {
-           int_arith_body overflowing_add Int AddiWithOverflow(a, b)
-}}
-
-pub_fn_checked_op!{ const_int_checked_sub(a: i64, b: i64,.. IntTy) {
-           int_arith_body overflowing_sub Int SubiWithOverflow(a, b)
-}}
-
-pub_fn_checked_op!{ const_int_checked_mul(a: i64, b: i64,.. IntTy) {
-           int_arith_body overflowing_mul Int MuliWithOverflow(a, b)
-}}
-
-pub fn const_int_checked_div<'a>(
-    a: i64, b: i64, e: &'a Expr, opt_ety: Option<IntTy>) -> EvalResult {
-    if b == 0 { signal!(e, DivideByZero); }
-    let (ret, oflo) = int_arith_body!(a, b, opt_ety, overflowing_div);
-    if !oflo { Ok(Int(ret)) } else { signal!(e, DivideWithOverflow) }
-}
-
-pub fn const_int_checked_rem<'a>(
-    a: i64, b: i64, e: &'a Expr, opt_ety: Option<IntTy>) -> EvalResult {
-    if b == 0 { signal!(e, ModuloByZero); }
-    let (ret, oflo) = int_arith_body!(a, b, opt_ety, overflowing_rem);
-    if !oflo { Ok(Int(ret)) } else { signal!(e, ModuloWithOverflow) }
-}
-
-pub_fn_checked_op!{ const_int_checked_shl(a: i64, b: i64,.. IntTy) {
-           int_shift_body overflowing_shl Int ShiftLeftWithOverflow
-}}
-
-pub_fn_checked_op!{ const_int_checked_shl_via_uint(a: i64, b: u64,.. IntTy) {
-           int_shift_body overflowing_shl Int ShiftLeftWithOverflow
-}}
-
-pub_fn_checked_op!{ const_int_checked_shr(a: i64, b: i64,.. IntTy) {
-           int_shift_body overflowing_shr Int ShiftRightWithOverflow
-}}
-
-pub_fn_checked_op!{ const_int_checked_shr_via_uint(a: i64, b: u64,.. IntTy) {
-           int_shift_body overflowing_shr Int ShiftRightWithOverflow
-}}
-
-pub_fn_checked_op!{ const_uint_checked_add(a: u64, b: u64,.. UintTy) {
-           uint_arith_body overflowing_add Uint AdduWithOverflow(a, b)
-}}
-
-pub_fn_checked_op!{ const_uint_checked_sub(a: u64, b: u64,.. UintTy) {
-           uint_arith_body overflowing_sub Uint SubuWithOverflow(a, b)
-}}
-
-pub_fn_checked_op!{ const_uint_checked_mul(a: u64, b: u64,.. UintTy) {
-           uint_arith_body overflowing_mul Uint MuluWithOverflow(a, b)
-}}
-
-pub fn const_uint_checked_div<'a>(
-    a: u64, b: u64, e: &'a Expr, opt_ety: Option<UintTy>) -> EvalResult {
-    if b == 0 { signal!(e, DivideByZero); }
-    let (ret, oflo) = uint_arith_body!(a, b, opt_ety, overflowing_div);
-    if !oflo { Ok(Uint(ret)) } else { signal!(e, DivideWithOverflow) }
-}
-
-pub fn const_uint_checked_rem<'a>(
-    a: u64, b: u64, e: &'a Expr, opt_ety: Option<UintTy>) -> EvalResult {
-    if b == 0 { signal!(e, ModuloByZero); }
-    let (ret, oflo) = uint_arith_body!(a, b, opt_ety, overflowing_rem);
-    if !oflo { Ok(Uint(ret)) } else { signal!(e, ModuloWithOverflow) }
-}
-
-pub_fn_checked_op!{ const_uint_checked_shl(a: u64, b: u64,.. UintTy) {
-           uint_shift_body overflowing_shl Uint ShiftLeftWithOverflow
-}}
-
-pub_fn_checked_op!{ const_uint_checked_shl_via_int(a: u64, b: i64,.. UintTy) {
-           uint_shift_body overflowing_shl Uint ShiftLeftWithOverflow
-}}
-
-pub_fn_checked_op!{ const_uint_checked_shr(a: u64, b: u64,.. UintTy) {
-           uint_shift_body overflowing_shr Uint ShiftRightWithOverflow
-}}
-
-pub_fn_checked_op!{ const_uint_checked_shr_via_int(a: u64, b: i64,.. UintTy) {
-           uint_shift_body overflowing_shr Uint ShiftRightWithOverflow
-}}
-
-/// Evaluate a constant expression in a context where the expression isn't
-/// guaranteed to be evaluatable. `ty_hint` is usually ExprTypeChecked,
-/// but a few places need to evaluate constants during type-checking, like
-/// computing the length of an array. (See also the FIXME above EvalHint.)
-pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                     e: &Expr,
-                                     ty_hint: EvalHint<'tcx>,
-                                     fn_args: FnArgMap) -> EvalResult {
-    // Try to compute the type of the expression based on the EvalHint.
-    // (See also the definition of EvalHint, and the FIXME above EvalHint.)
-    let ety = match ty_hint {
-        ExprTypeChecked => {
-            // After type-checking, expr_ty is guaranteed to succeed.
-            Some(tcx.expr_ty(e))
-        }
-        UncheckedExprHint(ty) => {
-            // Use the type hint; it's not guaranteed to be right, but it's
-            // usually good enough.
-            Some(ty)
-        }
-        UncheckedExprNoHint => {
-            // This expression might not be type-checked, and we have no hint.
-            // Try to query the context for a type anyway; we might get lucky
-            // (for example, if the expression was imported from another crate).
-            tcx.expr_ty_opt(e)
-        }
-    };
-
-    // If type of expression itself is int or uint, normalize in these
-    // bindings so that isize/usize is mapped to a type with an
-    // inherently known bitwidth.
-    let expr_int_type = ety.and_then(|ty| {
-        if let ty::TyInt(t) = ty.sty {
-            Some(IntTy::from(tcx, t)) } else { None }
-    });
-    let expr_uint_type = ety.and_then(|ty| {
-        if let ty::TyUint(t) = ty.sty {
-            Some(UintTy::from(tcx, t)) } else { None }
-    });
-
-    let result = match e.node {
-      hir::ExprUnary(hir::UnNeg, ref inner) => {
-        match try!(eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)) {
-          Float(f) => Float(-f),
-          Int(n) =>  try!(const_int_checked_neg(n, e, expr_int_type)),
-          Uint(i) => {
-              try!(const_uint_checked_neg(i, e, expr_uint_type))
-          }
-          const_val => signal!(e, NegateOn(const_val)),
-        }
-      }
-      hir::ExprUnary(hir::UnNot, ref inner) => {
-        match try!(eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)) {
-          Int(i) => Int(!i),
-          Uint(i) => const_uint_not(i, expr_uint_type),
-          Bool(b) => Bool(!b),
-          const_val => signal!(e, NotOn(const_val)),
-        }
-      }
-      hir::ExprBinary(op, ref a, ref b) => {
-        let b_ty = match op.node {
-            hir::BiShl | hir::BiShr => ty_hint.checked_or(tcx.types.usize),
-            _ => ty_hint
-        };
-        match (try!(eval_const_expr_partial(tcx, &a, ty_hint, fn_args)),
-               try!(eval_const_expr_partial(tcx, &b, b_ty, fn_args))) {
-          (Float(a), Float(b)) => {
-            match op.node {
-              hir::BiAdd => Float(a + b),
-              hir::BiSub => Float(a - b),
-              hir::BiMul => Float(a * b),
-              hir::BiDiv => Float(a / b),
-              hir::BiRem => Float(a % b),
-              hir::BiEq => Bool(a == b),
-              hir::BiLt => Bool(a < b),
-              hir::BiLe => Bool(a <= b),
-              hir::BiNe => Bool(a != b),
-              hir::BiGe => Bool(a >= b),
-              hir::BiGt => Bool(a > b),
-              _ => signal!(e, InvalidOpForFloats(op.node)),
-            }
-          }
-          (Int(a), Int(b)) => {
-            match op.node {
-              hir::BiAdd => try!(const_int_checked_add(a,b,e,expr_int_type)),
-              hir::BiSub => try!(const_int_checked_sub(a,b,e,expr_int_type)),
-              hir::BiMul => try!(const_int_checked_mul(a,b,e,expr_int_type)),
-              hir::BiDiv => try!(const_int_checked_div(a,b,e,expr_int_type)),
-              hir::BiRem => try!(const_int_checked_rem(a,b,e,expr_int_type)),
-              hir::BiBitAnd => Int(a & b),
-              hir::BiBitOr => Int(a | b),
-              hir::BiBitXor => Int(a ^ b),
-              hir::BiShl => try!(const_int_checked_shl(a,b,e,expr_int_type)),
-              hir::BiShr => try!(const_int_checked_shr(a,b,e,expr_int_type)),
-              hir::BiEq => Bool(a == b),
-              hir::BiLt => Bool(a < b),
-              hir::BiLe => Bool(a <= b),
-              hir::BiNe => Bool(a != b),
-              hir::BiGe => Bool(a >= b),
-              hir::BiGt => Bool(a > b),
-              _ => signal!(e, InvalidOpForInts(op.node)),
-            }
-          }
-          (Uint(a), Uint(b)) => {
-            match op.node {
-              hir::BiAdd => try!(const_uint_checked_add(a,b,e,expr_uint_type)),
-              hir::BiSub => try!(const_uint_checked_sub(a,b,e,expr_uint_type)),
-              hir::BiMul => try!(const_uint_checked_mul(a,b,e,expr_uint_type)),
-              hir::BiDiv => try!(const_uint_checked_div(a,b,e,expr_uint_type)),
-              hir::BiRem => try!(const_uint_checked_rem(a,b,e,expr_uint_type)),
-              hir::BiBitAnd => Uint(a & b),
-              hir::BiBitOr => Uint(a | b),
-              hir::BiBitXor => Uint(a ^ b),
-              hir::BiShl => try!(const_uint_checked_shl(a,b,e,expr_uint_type)),
-              hir::BiShr => try!(const_uint_checked_shr(a,b,e,expr_uint_type)),
-              hir::BiEq => Bool(a == b),
-              hir::BiLt => Bool(a < b),
-              hir::BiLe => Bool(a <= b),
-              hir::BiNe => Bool(a != b),
-              hir::BiGe => Bool(a >= b),
-              hir::BiGt => Bool(a > b),
-              _ => signal!(e, InvalidOpForUInts(op.node)),
-            }
-          }
-          // shifts can have any integral type as their rhs
-          (Int(a), Uint(b)) => {
-            match op.node {
-              hir::BiShl => try!(const_int_checked_shl_via_uint(a,b,e,expr_int_type)),
-              hir::BiShr => try!(const_int_checked_shr_via_uint(a,b,e,expr_int_type)),
-              _ => signal!(e, InvalidOpForIntUint(op.node)),
-            }
-          }
-          (Uint(a), Int(b)) => {
-            match op.node {
-              hir::BiShl => try!(const_uint_checked_shl_via_int(a,b,e,expr_uint_type)),
-              hir::BiShr => try!(const_uint_checked_shr_via_int(a,b,e,expr_uint_type)),
-              _ => signal!(e, InvalidOpForUintInt(op.node)),
-            }
-          }
-          (Bool(a), Bool(b)) => {
-            Bool(match op.node {
-              hir::BiAnd => a && b,
-              hir::BiOr => a || b,
-              hir::BiBitXor => a ^ b,
-              hir::BiBitAnd => a & b,
-              hir::BiBitOr => a | b,
-              hir::BiEq => a == b,
-              hir::BiNe => a != b,
-              _ => signal!(e, InvalidOpForBools(op.node)),
-             })
-          }
-
-          _ => signal!(e, MiscBinaryOp),
-        }
-      }
-      hir::ExprCast(ref base, ref target_ty) => {
-        let ety = ety.or_else(|| ast_ty_to_prim_ty(tcx, &target_ty))
-                .unwrap_or_else(|| {
-                    tcx.sess.span_fatal(target_ty.span,
-                                        "target type not found for const cast")
-                });
-
-        let base_hint = if let ExprTypeChecked = ty_hint {
-            ExprTypeChecked
-        } else {
-            // FIXME (#23833): the type-hint can cause problems,
-            // e.g. `(i8::MAX + 1_i8) as u32` feeds in `u32` as result
-            // type to the sum, and thus no overflow is signaled.
-            match tcx.expr_ty_opt(&base) {
-                Some(t) => UncheckedExprHint(t),
-                None => ty_hint
-            }
-        };
-
-        let val = try!(eval_const_expr_partial(tcx, &base, base_hint, fn_args));
-        match cast_const(tcx, val, ety) {
-            Ok(val) => val,
-            Err(kind) => return Err(ConstEvalErr { span: e.span, kind: kind }),
-        }
-      }
-      hir::ExprPath(..) => {
-          let opt_def = if let Some(def) = tcx.def_map.borrow().get(&e.id) {
-              // After type-checking, def_map contains definition of the
-              // item referred to by the path. During type-checking, it
-              // can contain the raw output of path resolution, which
-              // might be a partially resolved path.
-              // FIXME: There's probably a better way to make sure we don't
-              // panic here.
-              if def.depth != 0 {
-                  signal!(e, UnresolvedPath);
-              }
-              Some(def.full_def())
-          } else {
-              None
-          };
-          let (const_expr, const_ty) = match opt_def {
-              Some(Def::Const(def_id)) => {
-                  if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
-                      match tcx.map.find(node_id) {
-                          Some(ast_map::NodeItem(it)) => match it.node {
-                              hir::ItemConst(ref ty, ref expr) => {
-                                  (Some(&**expr), Some(&**ty))
-                              }
-                              _ => (None, None)
-                          },
-                          _ => (None, None)
-                      }
-                  } else {
-                      (lookup_const_by_id(tcx, def_id, Some(e.id), None), None)
-                  }
-              }
-              Some(Def::AssociatedConst(def_id)) => {
-                  if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
-                      match impl_or_trait_container(tcx, def_id) {
-                          ty::TraitContainer(trait_id) => match tcx.map.find(node_id) {
-                              Some(ast_map::NodeTraitItem(ti)) => match ti.node {
-                                  hir::ConstTraitItem(ref ty, _) => {
-                                      if let ExprTypeChecked = ty_hint {
-                                          let substs = tcx.node_id_item_substs(e.id).substs;
-                                          (resolve_trait_associated_const(tcx,
-                                                                          ti,
-                                                                          trait_id,
-                                                                          substs),
-                                           Some(&**ty))
-                                       } else {
-                                           (None, None)
-                                       }
-                                  }
-                                  _ => (None, None)
-                              },
-                              _ => (None, None)
-                          },
-                          ty::ImplContainer(_) => match tcx.map.find(node_id) {
-                              Some(ast_map::NodeImplItem(ii)) => match ii.node {
-                                  hir::ImplItemKind::Const(ref ty, ref expr) => {
-                                      (Some(&**expr), Some(&**ty))
-                                  }
-                                  _ => (None, None)
-                              },
-                              _ => (None, None)
-                          },
-                      }
-                  } else {
-                      (lookup_const_by_id(tcx, def_id, Some(e.id), None), None)
-                  }
-              }
-              Some(Def::Variant(enum_def, variant_def)) => {
-                  (lookup_variant_by_id(tcx, enum_def, variant_def), None)
-              }
-              Some(Def::Struct(..)) => {
-                  return Ok(ConstVal::Struct(e.id))
-              }
-              Some(Def::Local(_, id)) => {
-                  debug!("Def::Local({:?}): {:?}", id, fn_args);
-                  if let Some(val) = fn_args.and_then(|args| args.get(&id)) {
-                      return Ok(val.clone());
-                  } else {
-                      (None, None)
-                  }
-              },
-              Some(Def::Method(id)) | Some(Def::Fn(id)) => return Ok(Function(id)),
-              _ => (None, None)
-          };
-          let const_expr = match const_expr {
-              Some(actual_e) => actual_e,
-              None => signal!(e, NonConstPath)
-          };
-          let item_hint = if let UncheckedExprNoHint = ty_hint {
-              match const_ty {
-                  Some(ty) => match ast_ty_to_prim_ty(tcx, ty) {
-                      Some(ty) => UncheckedExprHint(ty),
-                      None => UncheckedExprNoHint
-                  },
-                  None => UncheckedExprNoHint
-              }
-          } else {
-              ty_hint
-          };
-          try!(eval_const_expr_partial(tcx, const_expr, item_hint, fn_args))
-      }
-      hir::ExprCall(ref callee, ref args) => {
-          let sub_ty_hint = ty_hint.erase_hint();
-          let callee_val = try!(eval_const_expr_partial(tcx, callee, sub_ty_hint, fn_args));
-          let did = match callee_val {
-              Function(did) => did,
-              callee => signal!(e, CallOn(callee)),
-          };
-          let (decl, result) = if let Some(fn_like) = lookup_const_fn_by_id(tcx, did) {
-              (fn_like.decl(), &fn_like.body().expr)
-          } else {
-              signal!(e, NonConstPath)
-          };
-          let result = result.as_ref().expect("const fn has no result expression");
-          assert_eq!(decl.inputs.len(), args.len());
-
-          let mut call_args = NodeMap();
-          for (arg, arg_expr) in decl.inputs.iter().zip(args.iter()) {
-              let arg_val = try!(eval_const_expr_partial(
-                  tcx,
-                  arg_expr,
-                  sub_ty_hint,
-                  fn_args
-              ));
-              debug!("const call arg: {:?}", arg);
-              let old = call_args.insert(arg.pat.id, arg_val);
-              assert!(old.is_none());
-          }
-          debug!("const call({:?})", call_args);
-          try!(eval_const_expr_partial(tcx, &result, ty_hint, Some(&call_args)))
-      },
-      hir::ExprLit(ref lit) => lit_to_const(tcx.sess, e.span, &lit, ety),
-      hir::ExprBlock(ref block) => {
-        match block.expr {
-            Some(ref expr) => try!(eval_const_expr_partial(tcx, &expr, ty_hint, fn_args)),
-            None => unreachable!(),
-        }
-      }
-      hir::ExprType(ref e, _) => try!(eval_const_expr_partial(tcx, &e, ty_hint, fn_args)),
-      hir::ExprTup(_) => Tuple(e.id),
-      hir::ExprStruct(..) => Struct(e.id),
-      hir::ExprIndex(ref arr, ref idx) => {
-        if !tcx.sess.features.borrow().const_indexing {
-            signal!(e, IndexOpFeatureGated);
-        }
-        let arr_hint = ty_hint.erase_hint();
-        let arr = try!(eval_const_expr_partial(tcx, arr, arr_hint, fn_args));
-        let idx_hint = ty_hint.checked_or(tcx.types.usize);
-        let idx = match try!(eval_const_expr_partial(tcx, idx, idx_hint, fn_args)) {
-            Int(i) if i >= 0 => i as u64,
-            Int(_) => signal!(idx, IndexNegative),
-            Uint(i) => i,
-            _ => signal!(idx, IndexNotInt),
-        };
-        match arr {
-            Array(_, n) if idx >= n => signal!(e, IndexOutOfBounds),
-            Array(v, _) => if let hir::ExprVec(ref v) = tcx.map.expect_expr(v).node {
-                try!(eval_const_expr_partial(tcx, &v[idx as usize], ty_hint, fn_args))
-            } else {
-                unreachable!()
-            },
-
-            Repeat(_, n) if idx >= n => signal!(e, IndexOutOfBounds),
-            Repeat(elem, _) => try!(eval_const_expr_partial(
-                tcx,
-                &tcx.map.expect_expr(elem),
-                ty_hint,
-                fn_args,
-            )),
-
-            ByteStr(ref data) if idx as usize >= data.len()
-                => signal!(e, IndexOutOfBounds),
-            ByteStr(data) => Uint(data[idx as usize] as u64),
-
-            Str(ref s) if idx as usize >= s.len()
-                => signal!(e, IndexOutOfBounds),
-            Str(_) => unimplemented!(), // there's no const_char type
-            _ => signal!(e, IndexedNonVec),
-        }
-      }
-      hir::ExprVec(ref v) => Array(e.id, v.len() as u64),
-      hir::ExprRepeat(_, ref n) => {
-          let len_hint = ty_hint.checked_or(tcx.types.usize);
-          Repeat(
-              e.id,
-              match try!(eval_const_expr_partial(tcx, &n, len_hint, fn_args)) {
-                  Int(i) if i >= 0 => i as u64,
-                  Int(_) => signal!(e, RepeatCountNotNatural),
-                  Uint(i) => i,
-                  _ => signal!(e, RepeatCountNotInt),
-              },
-          )
-      },
-      hir::ExprTupField(ref base, index) => {
-        let base_hint = ty_hint.erase_hint();
-        let c = try!(eval_const_expr_partial(tcx, base, base_hint, fn_args));
-        if let Tuple(tup_id) = c {
-            if let hir::ExprTup(ref fields) = tcx.map.expect_expr(tup_id).node {
-                if index.node < fields.len() {
-                    return eval_const_expr_partial(tcx, &fields[index.node], base_hint, fn_args)
-                } else {
-                    signal!(e, TupleIndexOutOfBounds);
-                }
-            } else {
-                unreachable!()
-            }
-        } else {
-            signal!(base, ExpectedConstTuple);
-        }
-      }
-      hir::ExprField(ref base, field_name) => {
-        let base_hint = ty_hint.erase_hint();
-        // Get the base expression if it is a struct and it is constant
-        let c = try!(eval_const_expr_partial(tcx, base, base_hint, fn_args));
-        if let Struct(struct_id) = c {
-            if let hir::ExprStruct(_, ref fields, _) = tcx.map.expect_expr(struct_id).node {
-                // Check that the given field exists and evaluate it
-                // if the idents are compared run-pass/issue-19244 fails
-                if let Some(f) = fields.iter().find(|f| f.name.node
-                                                     == field_name.node) {
-                    return eval_const_expr_partial(tcx, &f.expr, base_hint, fn_args)
-                } else {
-                    signal!(e, MissingStructField);
-                }
-            } else {
-                unreachable!()
-            }
-        } else {
-            signal!(base, ExpectedConstStruct);
-        }
-      }
-      _ => signal!(e, MiscCatchAll)
-    };
-
-    Ok(result)
-}
-
-fn impl_or_trait_container(tcx: &ty::ctxt, def_id: DefId) -> ty::ImplOrTraitItemContainer {
-    // This is intended to be equivalent to tcx.impl_or_trait_item(def_id).container()
-    // for local def_id, but it can be called before tcx.impl_or_trait_items is complete.
-    if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
-        if let Some(ast_map::NodeItem(item)) = tcx.map.find(tcx.map.get_parent_node(node_id)) {
-            let container_id = tcx.map.local_def_id(item.id);
-            match item.node {
-                hir::ItemImpl(..) => return ty::ImplContainer(container_id),
-                hir::ItemTrait(..) => return ty::TraitContainer(container_id),
-                _ => ()
-            }
-        }
-        panic!("No impl or trait container for {:?}", def_id);
-    }
-    panic!("{:?} is not local", def_id);
-}
-
-fn resolve_trait_associated_const<'a, 'tcx: 'a>(tcx: &'a ty::ctxt<'tcx>,
-                                                ti: &'tcx hir::TraitItem,
-                                                trait_id: DefId,
-                                                rcvr_substs: subst::Substs<'tcx>)
-                                                -> Option<&'tcx Expr>
-{
-    let trait_ref = ty::Binder(
-        rcvr_substs.erase_regions().to_trait_ref(tcx, trait_id)
-    );
-    debug!("resolve_trait_associated_const: trait_ref={:?}",
-           trait_ref);
-
-    tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id());
-    let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None);
-
-    let mut selcx = traits::SelectionContext::new(&infcx);
-    let obligation = traits::Obligation::new(traits::ObligationCause::dummy(),
-                                             trait_ref.to_poly_trait_predicate());
-    let selection = match selcx.select(&obligation) {
-        Ok(Some(vtable)) => vtable,
-        // Still ambiguous, so give up and let the caller decide whether this
-        // expression is really needed yet. Some associated constant values
-        // can't be evaluated until monomorphization is done in trans.
-        Ok(None) => {
-            return None
-        }
-        Err(_) => {
-            return None
-        }
-    };
-
-    match selection {
-        traits::VtableImpl(ref impl_data) => {
-            match tcx.associated_consts(impl_data.impl_def_id)
-                     .iter().find(|ic| ic.name == ti.name) {
-                Some(ic) => lookup_const_by_id(tcx, ic.def_id, None, None),
-                None => match ti.node {
-                    hir::ConstTraitItem(_, Some(ref expr)) => Some(&*expr),
-                    _ => None,
-                },
-            }
-        }
-        _ => {
-            tcx.sess.span_bug(
-                ti.span,
-                "resolve_trait_associated_const: unexpected vtable type")
-        }
-    }
-}
-
-fn cast_const<'tcx>(tcx: &ty::ctxt<'tcx>, val: ConstVal, ty: Ty) -> CastResult {
-    macro_rules! convert_val {
-        ($intermediate_ty:ty, $const_type:ident, $target_ty:ty) => {
-            match val {
-                Bool(b) => Ok($const_type(b as u64 as $intermediate_ty as $target_ty)),
-                Uint(u) => Ok($const_type(u as $intermediate_ty as $target_ty)),
-                Int(i) => Ok($const_type(i as $intermediate_ty as $target_ty)),
-                Float(f) => Ok($const_type(f as $intermediate_ty as $target_ty)),
-                _ => Err(ErrKind::CannotCastTo(stringify!($const_type))),
-            }
-        }
-    }
-
-    // Issue #23890: If isize/usize, then dispatch to appropriate target representation type
-    match (&ty.sty, tcx.sess.target.int_type, tcx.sess.target.uint_type) {
-        (&ty::TyInt(ast::IntTy::Is), ast::IntTy::I32, _) => return convert_val!(i32, Int, i64),
-        (&ty::TyInt(ast::IntTy::Is), ast::IntTy::I64, _) => return convert_val!(i64, Int, i64),
-        (&ty::TyInt(ast::IntTy::Is), _, _) => panic!("unexpected target.int_type"),
-
-        (&ty::TyUint(ast::UintTy::Us), _, ast::UintTy::U32) => return convert_val!(u32, Uint, u64),
-        (&ty::TyUint(ast::UintTy::Us), _, ast::UintTy::U64) => return convert_val!(u64, Uint, u64),
-        (&ty::TyUint(ast::UintTy::Us), _, _) => panic!("unexpected target.uint_type"),
-
-        _ => {}
-    }
-
-    match ty.sty {
-        ty::TyInt(ast::IntTy::Is) => unreachable!(),
-        ty::TyUint(ast::UintTy::Us) => unreachable!(),
-
-        ty::TyInt(ast::IntTy::I8) => convert_val!(i8, Int, i64),
-        ty::TyInt(ast::IntTy::I16) => convert_val!(i16, Int, i64),
-        ty::TyInt(ast::IntTy::I32) => convert_val!(i32, Int, i64),
-        ty::TyInt(ast::IntTy::I64) => convert_val!(i64, Int, i64),
-
-        ty::TyUint(ast::UintTy::U8) => convert_val!(u8, Uint, u64),
-        ty::TyUint(ast::UintTy::U16) => convert_val!(u16, Uint, u64),
-        ty::TyUint(ast::UintTy::U32) => convert_val!(u32, Uint, u64),
-        ty::TyUint(ast::UintTy::U64) => convert_val!(u64, Uint, u64),
-
-        ty::TyFloat(ast::FloatTy::F32) => convert_val!(f32, Float, f64),
-        ty::TyFloat(ast::FloatTy::F64) => convert_val!(f64, Float, f64),
-        _ => Err(ErrKind::CannotCast),
-    }
-}
-
-fn lit_to_const(sess: &Session, span: Span, lit: &ast::Lit, ty_hint: Option<Ty>) -> ConstVal {
-    match lit.node {
-        ast::LitKind::Str(ref s, _) => Str((*s).clone()),
-        ast::LitKind::ByteStr(ref data) => {
-            ByteStr(data.clone())
-        }
-        ast::LitKind::Byte(n) => Uint(n as u64),
-        ast::LitKind::Char(n) => Uint(n as u64),
-        ast::LitKind::Int(n, ast::LitIntType::Signed(_)) => Int(n as i64),
-        ast::LitKind::Int(n, ast::LitIntType::Unsuffixed) => {
-            match ty_hint.map(|ty| &ty.sty) {
-                Some(&ty::TyUint(_)) => Uint(n),
-                _ => Int(n as i64)
-            }
-        }
-        ast::LitKind::Int(n, ast::LitIntType::Unsigned(_)) => Uint(n),
-        ast::LitKind::Float(ref n, _) |
-        ast::LitKind::FloatUnsuffixed(ref n) => {
-            if let Ok(x) = n.parse::<f64>() {
-                Float(x)
-            } else {
-                // FIXME(#31407) this is only necessary because float parsing is buggy
-                sess.span_bug(span, "could not evaluate float literal (see issue #31407)");
-            }
-        }
-        ast::LitKind::Bool(b) => Bool(b)
-    }
-}
-
-pub fn compare_const_vals(a: &ConstVal, b: &ConstVal) -> Option<Ordering> {
-    Some(match (a, b) {
-        (&Int(a), &Int(b)) => a.cmp(&b),
-        (&Uint(a), &Uint(b)) => a.cmp(&b),
-        (&Float(a), &Float(b)) => {
-            // This is pretty bad but it is the existing behavior.
-            if a == b {
-                Ordering::Equal
-            } else if a < b {
-                Ordering::Less
-            } else {
-                Ordering::Greater
-            }
-        }
-        (&Str(ref a), &Str(ref b)) => a.cmp(b),
-        (&Bool(a), &Bool(b)) => a.cmp(&b),
-        (&ByteStr(ref a), &ByteStr(ref b)) => a.cmp(b),
-        _ => return None
-    })
-}
-
-pub fn compare_lit_exprs<'tcx>(tcx: &ty::ctxt<'tcx>,
-                               a: &Expr,
-                               b: &Expr) -> Option<Ordering> {
-    let a = match eval_const_expr_partial(tcx, a, ExprTypeChecked, None) {
-        Ok(a) => a,
-        Err(e) => {
-            tcx.sess.span_err(a.span, &e.description());
-            return None;
-        }
-    };
-    let b = match eval_const_expr_partial(tcx, b, ExprTypeChecked, None) {
-        Ok(b) => b,
-        Err(e) => {
-            tcx.sess.span_err(b.span, &e.description());
-            return None;
-        }
-    };
-    compare_const_vals(&a, &b)
-}
diff --git a/src/librustc/middle/const_val.rs b/src/librustc/middle/const_val.rs
new file mode 100644 (file)
index 0000000..3621cb2
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use syntax::parse::token::InternedString;
+use syntax::ast;
+use std::rc::Rc;
+use hir::def_id::DefId;
+use std::hash;
+use std::mem::transmute;
+use rustc_const_math::*;
+use self::ConstVal::*;
+
+#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
+pub enum ConstVal {
+    Float(f64),
+    Integral(ConstInt),
+    Str(InternedString),
+    ByteStr(Rc<Vec<u8>>),
+    Bool(bool),
+    Struct(ast::NodeId),
+    Tuple(ast::NodeId),
+    Function(DefId),
+    Array(ast::NodeId, u64),
+    Repeat(ast::NodeId, u64),
+    Char(char),
+    /// A value that only occurs in case `eval_const_expr` reported an error. You should never
+    /// handle this case. Its sole purpose is to allow more errors to be reported instead of
+    /// causing a fatal error.
+    Dummy,
+}
+
+impl hash::Hash for ConstVal {
+    fn hash<H: hash::Hasher>(&self, state: &mut H) {
+        match *self {
+            Float(a) => unsafe { transmute::<_,u64>(a) }.hash(state),
+            Integral(a) => a.hash(state),
+            Str(ref a) => a.hash(state),
+            ByteStr(ref a) => a.hash(state),
+            Bool(a) => a.hash(state),
+            Struct(a) => a.hash(state),
+            Tuple(a) => a.hash(state),
+            Function(a) => a.hash(state),
+            Array(a, n) => { a.hash(state); n.hash(state) },
+            Repeat(a, n) => { a.hash(state); n.hash(state) },
+            Char(c) => c.hash(state),
+            Dummy => ().hash(state),
+        }
+    }
+}
+
+/// Note that equality for `ConstVal` means that the it is the same
+/// constant, not that the rust values are equal. In particular, `NaN
+/// == NaN` (at least if it's the same NaN; distinct encodings for NaN
+/// are considering unequal).
+impl PartialEq for ConstVal {
+    fn eq(&self, other: &ConstVal) -> bool {
+        match (self, other) {
+            (&Float(a), &Float(b)) => unsafe{transmute::<_,u64>(a) == transmute::<_,u64>(b)},
+            (&Integral(a), &Integral(b)) => a == b,
+            (&Str(ref a), &Str(ref b)) => a == b,
+            (&ByteStr(ref a), &ByteStr(ref b)) => a == b,
+            (&Bool(a), &Bool(b)) => a == b,
+            (&Struct(a), &Struct(b)) => a == b,
+            (&Tuple(a), &Tuple(b)) => a == b,
+            (&Function(a), &Function(b)) => a == b,
+            (&Array(a, an), &Array(b, bn)) => (a == b) && (an == bn),
+            (&Repeat(a, an), &Repeat(b, bn)) => (a == b) && (an == bn),
+            (&Char(a), &Char(b)) => a == b,
+            (&Dummy, &Dummy) => true, // FIXME: should this be false?
+            _ => false,
+        }
+    }
+}
+
+impl Eq for ConstVal { }
+
+impl ConstVal {
+    pub fn description(&self) -> &'static str {
+        match *self {
+            Float(_) => "float",
+            Integral(i) => i.description(),
+            Str(_) => "string literal",
+            ByteStr(_) => "byte string literal",
+            Bool(_) => "boolean",
+            Struct(_) => "struct",
+            Tuple(_) => "tuple",
+            Function(_) => "function definition",
+            Array(..) => "array",
+            Repeat(..) => "repeat",
+            Char(..) => "char",
+            Dummy => "dummy value",
+        }
+    }
+}
index 3b72685eca310fd66749ec61d3793527792ef85b..d3db0804c24229abf30cdc23bbe44d66529518ac 100644 (file)
 // are *mostly* used as a part of that interface, but these should
 // probably get a better home if someone can find one.
 
-use back::svh::Svh;
-use front::map as hir_map;
-use middle::def::{self, Def};
+use hir::svh::Svh;
+use hir::map as hir_map;
+use hir::def::{self, Def};
 use middle::lang_items;
-use middle::ty::{self, Ty, VariantKind};
-use middle::def_id::{DefId, DefIndex};
+use ty::{self, Ty, TyCtxt, VariantKind};
+use hir::def_id::{DefId, DefIndex};
 use mir::repr::Mir;
 use mir::mir_map::MirMap;
 use session::Session;
 use session::search_paths::PathKind;
-use util::nodemap::{FnvHashMap, NodeMap, NodeSet};
+use util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap};
 use std::any::Any;
 use std::cell::RefCell;
 use std::rc::Rc;
 use std::path::PathBuf;
 use syntax::ast;
-use syntax::ast_util::{IdVisitingOperation};
 use syntax::attr;
 use syntax::codemap::Span;
 use syntax::ptr::P;
+use syntax::parse::token::InternedString;
 use rustc_back::target::Target;
-use rustc_front::hir;
-use rustc_front::intravisit::Visitor;
-use rustc_front::util::IdVisitor;
+use hir;
+use hir::intravisit::{IdVisitor, IdVisitingOperation, Visitor};
 
 pub use self::DefLike::{DlDef, DlField, DlImpl};
 pub use self::NativeLibraryKind::{NativeStatic, NativeFramework, NativeUnknown};
@@ -117,15 +116,36 @@ pub const LOCAL_CRATE: ast::CrateNum = 0;
 pub struct ChildItem {
     pub def: DefLike,
     pub name: ast::Name,
-    pub vis: hir::Visibility
+    pub vis: ty::Visibility,
 }
 
 pub enum FoundAst<'ast> {
     Found(&'ast InlinedItem),
-    FoundParent(DefId, &'ast InlinedItem),
+    FoundParent(DefId, &'ast hir::Item),
     NotFound,
 }
 
+#[derive(Copy, Clone, Debug)]
+pub struct ExternCrate {
+    /// def_id of an `extern crate` in the current crate that caused
+    /// this crate to be loaded; note that there could be multiple
+    /// such ids
+    pub def_id: DefId,
+
+    /// span of the extern crate that caused this to be loaded
+    pub span: Span,
+
+    /// If true, then this crate is the crate named by the extern
+    /// crate referenced above. If false, then this crate is a dep
+    /// of the crate.
+    pub direct: bool,
+
+    /// Number of links to reach the extern crate `def_id`
+    /// declaration; used to select the extern crate with the shortest
+    /// path
+    pub path_len: usize,
+}
+
 /// A store of Rust crates, through with their metadata
 /// can be accessed.
 ///
@@ -137,57 +157,58 @@ pub trait CrateStore<'tcx> : Any {
     // item info
     fn stability(&self, def: DefId) -> Option<attr::Stability>;
     fn deprecation(&self, def: DefId) -> Option<attr::Deprecation>;
-    fn closure_kind(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId)
+    fn visibility(&self, def: DefId) -> ty::Visibility;
+    fn closure_kind(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
                     -> ty::ClosureKind;
-    fn closure_ty(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId)
+    fn closure_ty(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
                   -> ty::ClosureTy<'tcx>;
     fn item_variances(&self, def: DefId) -> ty::ItemVariances;
     fn repr_attrs(&self, def: DefId) -> Vec<attr::ReprAttr>;
-    fn item_type(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn item_type(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                  -> ty::TypeScheme<'tcx>;
-    fn item_path(&self, def: DefId) -> Vec<hir_map::PathElem>;
-    fn extern_item_path(&self, def: DefId) -> Vec<hir_map::PathElem>;
+    fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap<DefId>>;
     fn item_name(&self, def: DefId) -> ast::Name;
-    fn item_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn item_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                        -> ty::GenericPredicates<'tcx>;
-    fn item_super_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn item_super_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                              -> ty::GenericPredicates<'tcx>;
     fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute>;
     fn item_symbol(&self, def: DefId) -> String;
-    fn trait_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId)-> ty::TraitDef<'tcx>;
-    fn adt_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>;
+    fn trait_def(&self, tcx: &TyCtxt<'tcx>, def: DefId)-> ty::TraitDef<'tcx>;
+    fn adt_def(&self, tcx: &TyCtxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>;
     fn method_arg_names(&self, did: DefId) -> Vec<String>;
     fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec<DefId>;
 
     // trait info
     fn implementations_of_trait(&self, def_id: DefId) -> Vec<DefId>;
-    fn provided_trait_methods(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn provided_trait_methods(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                               -> Vec<Rc<ty::Method<'tcx>>>;
     fn trait_item_def_ids(&self, def: DefId)
                           -> Vec<ty::ImplOrTraitItemId>;
 
     // impl info
     fn impl_items(&self, impl_def_id: DefId) -> Vec<ty::ImplOrTraitItemId>;
-    fn impl_trait_ref(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn impl_trait_ref(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                       -> Option<ty::TraitRef<'tcx>>;
     fn impl_polarity(&self, def: DefId) -> Option<hir::ImplPolarity>;
     fn custom_coerce_unsized_kind(&self, def: DefId)
                                   -> Option<ty::adjustment::CustomCoerceUnsized>;
-    fn associated_consts(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn associated_consts(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                          -> Vec<Rc<ty::AssociatedConst<'tcx>>>;
+    fn impl_parent(&self, impl_def_id: DefId) -> Option<DefId>;
 
     // trait/impl-item info
-    fn trait_of_item(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId)
+    fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
                      -> Option<DefId>;
-    fn impl_or_trait_item(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                          -> ty::ImplOrTraitItem<'tcx>;
+    fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                          -> Option<ty::ImplOrTraitItem<'tcx>>;
 
     // flags
     fn is_const_fn(&self, did: DefId) -> bool;
     fn is_defaulted_trait(&self, did: DefId) -> bool;
     fn is_impl(&self, did: DefId) -> bool;
     fn is_default_impl(&self, impl_did: DefId) -> bool;
-    fn is_extern_item(&self, tcx: &ty::ctxt<'tcx>, did: DefId) -> bool;
+    fn is_extern_item(&self, tcx: &TyCtxt<'tcx>, did: DefId) -> bool;
     fn is_static_method(&self, did: DefId) -> bool;
     fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool;
     fn is_typedef(&self, did: DefId) -> bool;
@@ -200,9 +221,15 @@ pub trait CrateStore<'tcx> : Any {
     fn is_staged_api(&self, cnum: ast::CrateNum) -> bool;
     fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool;
     fn is_allocator(&self, cnum: ast::CrateNum) -> bool;
+    fn extern_crate(&self, cnum: ast::CrateNum) -> Option<ExternCrate>;
     fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec<ast::Attribute>;
-    fn crate_name(&self, cnum: ast::CrateNum) -> String;
+    /// The name of the crate as it is referred to in source code of the current
+    /// crate.
+    fn crate_name(&self, cnum: ast::CrateNum) -> InternedString;
+    /// The name of the crate as it is stored in the crate's metadata.
+    fn original_crate_name(&self, cnum: ast::CrateNum) -> InternedString;
     fn crate_hash(&self, cnum: ast::CrateNum) -> Svh;
+    fn crate_disambiguator(&self, cnum: ast::CrateNum) -> InternedString;
     fn crate_struct_field_attrs(&self, cnum: ast::CrateNum)
                                 -> FnvHashMap<DefId, Vec<ast::Attribute>>;
     fn plugin_registrar_fn(&self, cnum: ast::CrateNum) -> Option<DefId>;
@@ -210,7 +237,8 @@ pub trait CrateStore<'tcx> : Any {
     fn reachable_ids(&self, cnum: ast::CrateNum) -> Vec<DefId>;
 
     // resolve
-    fn def_path(&self, def: DefId) -> hir_map::DefPath;
+    fn def_key(&self, def: DefId) -> hir_map::DefKey;
+    fn relative_def_path(&self, def: DefId) -> hir_map::DefPath;
     fn variant_kind(&self, def_id: DefId) -> Option<VariantKind>;
     fn struct_ctor_def_id(&self, struct_def_id: DefId) -> Option<DefId>;
     fn tuple_struct_definition_if_ctor(&self, did: DefId) -> Option<DefId>;
@@ -219,9 +247,9 @@ pub trait CrateStore<'tcx> : Any {
     fn crate_top_level_items(&self, cnum: ast::CrateNum) -> Vec<ChildItem>;
 
     // misc. metadata
-    fn maybe_get_item_ast(&'tcx self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn maybe_get_item_ast(&'tcx self, tcx: &TyCtxt<'tcx>, def: DefId)
                           -> FoundAst<'tcx>;
-    fn maybe_get_item_mir(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn maybe_get_item_mir(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                           -> Option<Mir<'tcx>>;
     fn is_item_mir_available(&self, def: DefId) -> bool;
 
@@ -234,12 +262,16 @@ pub trait CrateStore<'tcx> : Any {
     // utility functions
     fn metadata_filename(&self) -> &str;
     fn metadata_section_name(&self, target: &Target) -> &str;
-    fn encode_type(&self, tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> Vec<u8>;
+    fn encode_type(&self,
+                   tcx: &TyCtxt<'tcx>,
+                   ty: Ty<'tcx>,
+                   def_id_to_string: fn(&TyCtxt<'tcx>, DefId) -> String)
+                   -> Vec<u8>;
     fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option<PathBuf>)>;
     fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource;
     fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option<ast::CrateNum>;
     fn encode_metadata(&self,
-                       tcx: &ty::ctxt<'tcx>,
+                       tcx: &TyCtxt<'tcx>,
                        reexports: &def::ExportMap,
                        item_symbols: &RefCell<NodeMap<String>>,
                        link_meta: &LinkMeta,
@@ -273,7 +305,7 @@ pub fn validate_crate_name(sess: Option<&Session>, s: &str, sp: Option<Span>) {
     {
         let mut say = |s: &str| {
             match (sp, sess) {
-                (_, None) => panic!("{}", s),
+                (_, None) => bug!("{}", s),
                 (Some(sp), Some(sess)) => sess.span_err(sp, s),
                 (None, Some(sess)) => sess.err(s),
             }
@@ -300,110 +332,120 @@ pub struct DummyCrateStore;
 #[allow(unused_variables)]
 impl<'tcx> CrateStore<'tcx> for DummyCrateStore {
     // item info
-    fn stability(&self, def: DefId) -> Option<attr::Stability> { unimplemented!() }
-    fn deprecation(&self, def: DefId) -> Option<attr::Deprecation> { unimplemented!() }
-    fn closure_kind(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId)
-                    -> ty::ClosureKind  { unimplemented!() }
-    fn closure_ty(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId)
-                  -> ty::ClosureTy<'tcx>  { unimplemented!() }
-    fn item_variances(&self, def: DefId) -> ty::ItemVariances { unimplemented!() }
-    fn repr_attrs(&self, def: DefId) -> Vec<attr::ReprAttr> { unimplemented!() }
-    fn item_type(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                 -> ty::TypeScheme<'tcx> { unimplemented!() }
-    fn item_path(&self, def: DefId) -> Vec<hir_map::PathElem> { unimplemented!() }
-    fn extern_item_path(&self, def: DefId) -> Vec<hir_map::PathElem> { unimplemented!() }
-    fn item_name(&self, def: DefId) -> ast::Name { unimplemented!() }
-    fn item_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                       -> ty::GenericPredicates<'tcx> { unimplemented!() }
-    fn item_super_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                             -> ty::GenericPredicates<'tcx> { unimplemented!() }
-    fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute> { unimplemented!() }
-    fn item_symbol(&self, def: DefId) -> String { unimplemented!() }
-    fn trait_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId)-> ty::TraitDef<'tcx>
-        { unimplemented!() }
-    fn adt_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>
-        { unimplemented!() }
-    fn method_arg_names(&self, did: DefId) -> Vec<String> { unimplemented!() }
+    fn stability(&self, def: DefId) -> Option<attr::Stability> { bug!("stability") }
+    fn deprecation(&self, def: DefId) -> Option<attr::Deprecation> { bug!("deprecation") }
+    fn visibility(&self, def: DefId) -> ty::Visibility { bug!("visibility") }
+    fn closure_kind(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
+                    -> ty::ClosureKind  { bug!("closure_kind") }
+    fn closure_ty(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
+                  -> ty::ClosureTy<'tcx>  { bug!("closure_ty") }
+    fn item_variances(&self, def: DefId) -> ty::ItemVariances { bug!("item_variances") }
+    fn repr_attrs(&self, def: DefId) -> Vec<attr::ReprAttr> { bug!("repr_attrs") }
+    fn item_type(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                 -> ty::TypeScheme<'tcx> { bug!("item_type") }
+    fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap<DefId>> {
+        bug!("visible_parent_map")
+    }
+    fn item_name(&self, def: DefId) -> ast::Name { bug!("item_name") }
+    fn item_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                       -> ty::GenericPredicates<'tcx> { bug!("item_predicates") }
+    fn item_super_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                             -> ty::GenericPredicates<'tcx> { bug!("item_super_predicates") }
+    fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute> { bug!("item_attrs") }
+    fn item_symbol(&self, def: DefId) -> String { bug!("item_symbol") }
+    fn trait_def(&self, tcx: &TyCtxt<'tcx>, def: DefId)-> ty::TraitDef<'tcx>
+        { bug!("trait_def") }
+    fn adt_def(&self, tcx: &TyCtxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>
+        { bug!("adt_def") }
+    fn method_arg_names(&self, did: DefId) -> Vec<String> { bug!("method_arg_names") }
     fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec<DefId> { vec![] }
 
     // trait info
     fn implementations_of_trait(&self, def_id: DefId) -> Vec<DefId> { vec![] }
-    fn provided_trait_methods(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                              -> Vec<Rc<ty::Method<'tcx>>> { unimplemented!() }
+    fn provided_trait_methods(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                              -> Vec<Rc<ty::Method<'tcx>>> { bug!("provided_trait_methods") }
     fn trait_item_def_ids(&self, def: DefId)
-                          -> Vec<ty::ImplOrTraitItemId> { unimplemented!() }
+                          -> Vec<ty::ImplOrTraitItemId> { bug!("trait_item_def_ids") }
 
     // impl info
     fn impl_items(&self, impl_def_id: DefId) -> Vec<ty::ImplOrTraitItemId>
-        { unimplemented!() }
-    fn impl_trait_ref(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                      -> Option<ty::TraitRef<'tcx>> { unimplemented!() }
-    fn impl_polarity(&self, def: DefId) -> Option<hir::ImplPolarity> { unimplemented!() }
+        { bug!("impl_items") }
+    fn impl_trait_ref(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                      -> Option<ty::TraitRef<'tcx>> { bug!("impl_trait_ref") }
+    fn impl_polarity(&self, def: DefId) -> Option<hir::ImplPolarity> { bug!("impl_polarity") }
     fn custom_coerce_unsized_kind(&self, def: DefId)
                                   -> Option<ty::adjustment::CustomCoerceUnsized>
-        { unimplemented!() }
-    fn associated_consts(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                         -> Vec<Rc<ty::AssociatedConst<'tcx>>> { unimplemented!() }
+        { bug!("custom_coerce_unsized_kind") }
+    fn associated_consts(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                         -> Vec<Rc<ty::AssociatedConst<'tcx>>> { bug!("associated_consts") }
+    fn impl_parent(&self, def: DefId) -> Option<DefId> { bug!("impl_parent") }
 
     // trait/impl-item info
-    fn trait_of_item(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId)
-                     -> Option<DefId> { unimplemented!() }
-    fn impl_or_trait_item(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                          -> ty::ImplOrTraitItem<'tcx> { unimplemented!() }
+    fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
+                     -> Option<DefId> { bug!("trait_of_item") }
+    fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                          -> Option<ty::ImplOrTraitItem<'tcx>> { bug!("impl_or_trait_item") }
 
     // flags
-    fn is_const_fn(&self, did: DefId) -> bool { unimplemented!() }
-    fn is_defaulted_trait(&self, did: DefId) -> bool { unimplemented!() }
-    fn is_impl(&self, did: DefId) -> bool { unimplemented!() }
-    fn is_default_impl(&self, impl_did: DefId) -> bool { unimplemented!() }
-    fn is_extern_item(&self, tcx: &ty::ctxt<'tcx>, did: DefId) -> bool { unimplemented!() }
-    fn is_static_method(&self, did: DefId) -> bool { unimplemented!() }
+    fn is_const_fn(&self, did: DefId) -> bool { bug!("is_const_fn") }
+    fn is_defaulted_trait(&self, did: DefId) -> bool { bug!("is_defaulted_trait") }
+    fn is_impl(&self, did: DefId) -> bool { bug!("is_impl") }
+    fn is_default_impl(&self, impl_did: DefId) -> bool { bug!("is_default_impl") }
+    fn is_extern_item(&self, tcx: &TyCtxt<'tcx>, did: DefId) -> bool { bug!("is_extern_item") }
+    fn is_static_method(&self, did: DefId) -> bool { bug!("is_static_method") }
     fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool { false }
-    fn is_typedef(&self, did: DefId) -> bool { unimplemented!() }
+    fn is_typedef(&self, did: DefId) -> bool { bug!("is_typedef") }
 
     // crate metadata
     fn dylib_dependency_formats(&self, cnum: ast::CrateNum)
                                     -> Vec<(ast::CrateNum, LinkagePreference)>
-        { unimplemented!() }
+        { bug!("dylib_dependency_formats") }
     fn lang_items(&self, cnum: ast::CrateNum) -> Vec<(DefIndex, usize)>
-        { unimplemented!() }
+        { bug!("lang_items") }
     fn missing_lang_items(&self, cnum: ast::CrateNum) -> Vec<lang_items::LangItem>
-        { unimplemented!() }
-    fn is_staged_api(&self, cnum: ast::CrateNum) -> bool { unimplemented!() }
-    fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool { unimplemented!() }
-    fn is_allocator(&self, cnum: ast::CrateNum) -> bool { unimplemented!() }
+        { bug!("missing_lang_items") }
+    fn is_staged_api(&self, cnum: ast::CrateNum) -> bool { bug!("is_staged_api") }
+    fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool { bug!("is_explicitly_linked") }
+    fn is_allocator(&self, cnum: ast::CrateNum) -> bool { bug!("is_allocator") }
+    fn extern_crate(&self, cnum: ast::CrateNum) -> Option<ExternCrate> { bug!("extern_crate") }
     fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec<ast::Attribute>
-        { unimplemented!() }
-    fn crate_name(&self, cnum: ast::CrateNum) -> String { unimplemented!() }
-    fn crate_hash(&self, cnum: ast::CrateNum) -> Svh { unimplemented!() }
+        { bug!("crate_attrs") }
+    fn crate_name(&self, cnum: ast::CrateNum) -> InternedString { bug!("crate_name") }
+    fn original_crate_name(&self, cnum: ast::CrateNum) -> InternedString {
+        bug!("original_crate_name")
+    }
+    fn crate_hash(&self, cnum: ast::CrateNum) -> Svh { bug!("crate_hash") }
+    fn crate_disambiguator(&self, cnum: ast::CrateNum)
+                           -> InternedString { bug!("crate_disambiguator") }
     fn crate_struct_field_attrs(&self, cnum: ast::CrateNum)
                                 -> FnvHashMap<DefId, Vec<ast::Attribute>>
-        { unimplemented!() }
+        { bug!("crate_struct_field_attrs") }
     fn plugin_registrar_fn(&self, cnum: ast::CrateNum) -> Option<DefId>
-        { unimplemented!() }
+        { bug!("plugin_registrar_fn") }
     fn native_libraries(&self, cnum: ast::CrateNum) -> Vec<(NativeLibraryKind, String)>
-        { unimplemented!() }
-    fn reachable_ids(&self, cnum: ast::CrateNum) -> Vec<DefId> { unimplemented!() }
+        { bug!("native_libraries") }
+    fn reachable_ids(&self, cnum: ast::CrateNum) -> Vec<DefId> { bug!("reachable_ids") }
 
     // resolve
-    fn def_path(&self, def: DefId) -> hir_map::DefPath { unimplemented!() }
-    fn variant_kind(&self, def_id: DefId) -> Option<VariantKind> { unimplemented!() }
+    fn def_key(&self, def: DefId) -> hir_map::DefKey { bug!("def_key") }
+    fn relative_def_path(&self, def: DefId) -> hir_map::DefPath { bug!("relative_def_path") }
+    fn variant_kind(&self, def_id: DefId) -> Option<VariantKind> { bug!("variant_kind") }
     fn struct_ctor_def_id(&self, struct_def_id: DefId) -> Option<DefId>
-        { unimplemented!() }
+        { bug!("struct_ctor_def_id") }
     fn tuple_struct_definition_if_ctor(&self, did: DefId) -> Option<DefId>
-        { unimplemented!() }
-    fn struct_field_names(&self, def: DefId) -> Vec<ast::Name> { unimplemented!() }
-    fn item_children(&self, did: DefId) -> Vec<ChildItem> { unimplemented!() }
+        { bug!("tuple_struct_definition_if_ctor") }
+    fn struct_field_names(&self, def: DefId) -> Vec<ast::Name> { bug!("struct_field_names") }
+    fn item_children(&self, did: DefId) -> Vec<ChildItem> { bug!("item_children") }
     fn crate_top_level_items(&self, cnum: ast::CrateNum) -> Vec<ChildItem>
-        { unimplemented!() }
+        { bug!("crate_top_level_items") }
 
     // misc. metadata
-    fn maybe_get_item_ast(&'tcx self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                          -> FoundAst<'tcx> { unimplemented!() }
-    fn maybe_get_item_mir(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                          -> Option<Mir<'tcx>> { unimplemented!() }
+    fn maybe_get_item_ast(&'tcx self, tcx: &TyCtxt<'tcx>, def: DefId)
+                          -> FoundAst<'tcx> { bug!("maybe_get_item_ast") }
+    fn maybe_get_item_mir(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                          -> Option<Mir<'tcx>> { bug!("maybe_get_item_mir") }
     fn is_item_mir_available(&self, def: DefId) -> bool {
-        unimplemented!()
+        bug!("is_item_mir_available")
     }
 
     // This is basically a 1-based range of ints, which is a little
@@ -413,23 +455,28 @@ impl<'tcx> CrateStore<'tcx> for DummyCrateStore {
     fn used_link_args(&self) -> Vec<String> { vec![] }
 
     // utility functions
-    fn metadata_filename(&self) -> &str { unimplemented!() }
-    fn metadata_section_name(&self, target: &Target) -> &str { unimplemented!() }
-    fn encode_type(&self, tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> Vec<u8>
-        { unimplemented!() }
+    fn metadata_filename(&self) -> &str { bug!("metadata_filename") }
+    fn metadata_section_name(&self, target: &Target) -> &str { bug!("metadata_section_name") }
+    fn encode_type(&self,
+                   tcx: &TyCtxt<'tcx>,
+                   ty: Ty<'tcx>,
+                   def_id_to_string: fn(&TyCtxt<'tcx>, DefId) -> String)
+                   -> Vec<u8> {
+        bug!("encode_type")
+    }
     fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option<PathBuf>)>
         { vec![] }
-    fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource { unimplemented!() }
+    fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource { bug!("used_crate_source") }
     fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option<ast::CrateNum> { None }
     fn encode_metadata(&self,
-                       tcx: &ty::ctxt<'tcx>,
+                       tcx: &TyCtxt<'tcx>,
                        reexports: &def::ExportMap,
                        item_symbols: &RefCell<NodeMap<String>>,
                        link_meta: &LinkMeta,
                        reachable: &NodeSet,
                        mir_map: &MirMap<'tcx>,
                        krate: &hir::Crate) -> Vec<u8> { vec![] }
-    fn metadata_encoding_version(&self) -> &[u8] { unimplemented!() }
+    fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") }
 }
 
 
@@ -439,7 +486,7 @@ impl<'tcx> CrateStore<'tcx> for DummyCrateStore {
 /// be available to them. For example, we can automatically translate def-id and
 /// span information during decoding because the decoding context knows which
 /// crate the data is decoded from. Or it allows to make ty::Ty decodable
-/// because the context has access to the ty::ctxt that is needed for creating
+/// because the context has access to the TyCtxt that is needed for creating
 /// ty::Ty instances.
 ///
 /// Note, however, that this only works for RBML-based encoding and decoding at
@@ -450,12 +497,12 @@ pub mod tls {
     use serialize;
     use std::cell::Cell;
     use std::mem;
-    use middle::ty::{self, Ty};
-    use middle::subst::Substs;
-    use middle::def_id::DefId;
+    use ty::{self, Ty, TyCtxt};
+    use ty::subst::Substs;
+    use hir::def_id::DefId;
 
     pub trait EncodingContext<'tcx> {
-        fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
+        fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx>;
         fn encode_ty(&self, encoder: &mut OpaqueEncoder, t: Ty<'tcx>);
         fn encode_substs(&self, encoder: &mut OpaqueEncoder, substs: &Substs<'tcx>);
     }
@@ -522,7 +569,7 @@ pub mod tls {
     }
 
     pub trait DecodingContext<'tcx> {
-        fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
+        fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx>;
         fn decode_ty(&self, decoder: &mut OpaqueDecoder) -> ty::Ty<'tcx>;
         fn decode_substs(&self, decoder: &mut OpaqueDecoder) -> Substs<'tcx>;
         fn translate_def_id(&self, def_id: DefId) -> DefId;
index 3fc45c575f0454682edf081448a1dd9499a9319d..1aaaa4bcd77f6afc0e4ec114c5cd3e16ba979ea6 100644 (file)
 //! and thus uses bitvectors. Your job is simply to specify the so-called
 //! GEN and KILL bits for each expression.
 
-use middle::cfg;
-use middle::cfg::CFGIndex;
-use middle::ty;
+use cfg;
+use cfg::CFGIndex;
+use ty::TyCtxt;
 use std::io;
 use std::mem;
 use std::usize;
 use syntax::ast;
-use syntax::ast_util::IdRange;
 use syntax::print::pp;
 use syntax::print::pprust::PrintState;
 use util::nodemap::NodeMap;
-use rustc_front::hir;
-use rustc_front::intravisit;
-use rustc_front::print::pprust;
+use hir;
+use hir::intravisit::{self, IdRange};
+use hir::print as pprust;
 
 
 #[derive(Copy, Clone, Debug)]
@@ -38,7 +37,7 @@ pub enum EntryOrExit {
 
 #[derive(Clone)]
 pub struct DataFlowContext<'a, 'tcx: 'a, O> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
 
     /// a name for the analysis using this dataflow instance
     analysis_name: &'static str,
@@ -152,10 +151,10 @@ impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O
                 "".to_string()
             };
 
-            try!(ps.synth_comment(
+            ps.synth_comment(
                 format!("id {}: {}{}{}{}", id, entry_str,
-                        gens_str, action_kills_str, scope_kills_str)));
-            try!(pp::space(&mut ps.s));
+                        gens_str, action_kills_str, scope_kills_str))?;
+            pp::space(&mut ps.s)?;
         }
         Ok(())
     }
@@ -223,7 +222,7 @@ pub enum KillFrom {
 }
 
 impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
-    pub fn new(tcx: &'a ty::ctxt<'tcx>,
+    pub fn new(tcx: &'a TyCtxt<'tcx>,
                analysis_name: &'static str,
                decl: Option<&hir::FnDecl>,
                cfg: &cfg::CFG,
@@ -534,9 +533,9 @@ impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> {
     fn pretty_print_to<'b>(&self, wr: Box<io::Write + 'b>,
                            blk: &hir::Block) -> io::Result<()> {
         let mut ps = pprust::rust_printer_annotated(wr, self, None);
-        try!(ps.cbox(pprust::indent_unit));
-        try!(ps.ibox(0));
-        try!(ps.print_block(blk));
+        ps.cbox(pprust::indent_unit)?;
+        ps.ibox(0)?;
+        ps.print_block(blk)?;
         pp::eof(&mut ps.s)
     }
 }
@@ -653,7 +652,7 @@ fn set_bit(words: &mut [usize], bit: usize) -> bool {
     let word = bit / usize_bits;
     let bit_in_word = bit % usize_bits;
     let bit_mask = 1 << bit_in_word;
-    debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, word);
+    debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask);
     let oldv = words[word];
     let newv = oldv | bit_mask;
     words[word] = newv;
index 92372870cd16e0d2f8ae7b4df59b4f6ae3355eb7..dcdc02c9df6097d62f26c935da6f4552fd7ad586 100644 (file)
 // from live codes are live, and everything else is dead.
 
 use dep_graph::DepNode;
-use front::map as ast_map;
-use rustc_front::hir::{self, PatKind};
-use rustc_front::intravisit::{self, Visitor};
-
-use middle::{pat_util, privacy, ty};
-use middle::def::Def;
-use middle::def_id::{DefId};
+use hir::map as ast_map;
+use hir::{self, pat_util, PatKind};
+use hir::intravisit::{self, Visitor};
+
+use middle::privacy;
+use ty::{self, TyCtxt};
+use hir::def::Def;
+use hir::def_id::{DefId};
 use lint;
 
 use std::collections::HashSet;
@@ -30,7 +31,7 @@ use syntax::attr::{self, AttrMetaMethods};
 // explored. For example, if it's a live NodeItem that is a
 // function, then we should explore its block to check for codes that
 // may need to be marked as live.
-fn should_explore(tcx: &ty::ctxt, node_id: ast::NodeId) -> bool {
+fn should_explore(tcx: &TyCtxt, node_id: ast::NodeId) -> bool {
     match tcx.map.find(node_id) {
         Some(ast_map::NodeItem(..)) |
         Some(ast_map::NodeImplItem(..)) |
@@ -44,7 +45,7 @@ fn should_explore(tcx: &ty::ctxt, node_id: ast::NodeId) -> bool {
 
 struct MarkSymbolVisitor<'a, 'tcx: 'a> {
     worklist: Vec<ast::NodeId>,
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     live_symbols: Box<HashSet<ast::NodeId>>,
     struct_has_extern_repr: bool,
     ignore_non_const_paths: bool,
@@ -53,7 +54,7 @@ struct MarkSymbolVisitor<'a, 'tcx: 'a> {
 }
 
 impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
-    fn new(tcx: &'a ty::ctxt<'tcx>,
+    fn new(tcx: &'a TyCtxt<'tcx>,
            worklist: Vec<ast::NodeId>) -> MarkSymbolVisitor<'a, 'tcx> {
         MarkSymbolVisitor {
             worklist: worklist,
@@ -83,7 +84,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
     }
 
     fn lookup_and_handle_definition(&mut self, id: &ast::NodeId) {
-        use middle::ty::TypeVariants::{TyEnum, TyStruct};
+        use ty::TypeVariants::{TyEnum, TyStruct};
 
         // If `bar` is a trait item, make sure to mark Foo as alive in `Foo::bar`
         self.tcx.tables.borrow().item_substs.get(id)
@@ -124,7 +125,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
         if let ty::TyStruct(def, _) = self.tcx.expr_ty_adjusted(lhs).sty {
             self.insert_def_id(def.struct_variant().field_named(name).did);
         } else {
-            self.tcx.sess.span_bug(lhs.span, "named field access on non-struct")
+            span_bug!(lhs.span, "named field access on non-struct")
         }
     }
 
@@ -140,7 +141,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
         let pat_ty = self.tcx.node_id_to_type(lhs.id);
         let variant = match pat_ty.sty {
             ty::TyStruct(adt, _) | ty::TyEnum(adt, _) => adt.variant_of_def(def),
-            _ => self.tcx.sess.span_bug(lhs.span, "non-ADT in struct pattern")
+            _ => span_bug!(lhs.span, "non-ADT in struct pattern")
         };
         for pat in pats {
             if let PatKind::Wild = pat.node.pat.node {
@@ -221,12 +222,9 @@ impl<'a, 'tcx, 'v> Visitor<'v> for MarkSymbolVisitor<'a, 'tcx> {
         let has_extern_repr = self.struct_has_extern_repr;
         let inherited_pub_visibility = self.inherited_pub_visibility;
         let live_fields = def.fields().iter().filter(|f| {
-            has_extern_repr || inherited_pub_visibility || match f.node.kind {
-                hir::NamedField(_, hir::Public) => true,
-                _ => false
-            }
+            has_extern_repr || inherited_pub_visibility || f.vis == hir::Public
         });
-        self.live_symbols.extend(live_fields.map(|f| f.node.id));
+        self.live_symbols.extend(live_fields.map(|f| f.id));
 
         intravisit::walk_struct_def(self, def);
     }
@@ -353,15 +351,9 @@ impl<'v> Visitor<'v> for LifeSeeder {
             }
             hir::ItemImpl(_, _, _, ref opt_trait, _, ref impl_items) => {
                 for impl_item in impl_items {
-                    match impl_item.node {
-                        hir::ImplItemKind::Const(..) |
-                        hir::ImplItemKind::Method(..) => {
-                            if opt_trait.is_some() ||
-                                    has_allow_dead_code_or_lang_attr(&impl_item.attrs) {
-                                self.worklist.push(impl_item.id);
-                            }
-                        }
-                        hir::ImplItemKind::Type(_) => {}
+                    if opt_trait.is_some() ||
+                            has_allow_dead_code_or_lang_attr(&impl_item.attrs) {
+                        self.worklist.push(impl_item.id);
                     }
                 }
             }
@@ -370,7 +362,7 @@ impl<'v> Visitor<'v> for LifeSeeder {
     }
 }
 
-fn create_and_seed_worklist(tcx: &ty::ctxt,
+fn create_and_seed_worklist(tcx: &TyCtxt,
                             access_levels: &privacy::AccessLevels,
                             krate: &hir::Crate) -> Vec<ast::NodeId> {
     let mut worklist = Vec::new();
@@ -393,7 +385,7 @@ fn create_and_seed_worklist(tcx: &ty::ctxt,
     return life_seeder.worklist;
 }
 
-fn find_live(tcx: &ty::ctxt,
+fn find_live(tcx: &TyCtxt,
              access_levels: &privacy::AccessLevels,
              krate: &hir::Crate)
              -> Box<HashSet<ast::NodeId>> {
@@ -413,7 +405,7 @@ fn get_struct_ctor_id(item: &hir::Item) -> Option<ast::NodeId> {
 }
 
 struct DeadVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     live_symbols: Box<HashSet<ast::NodeId>>,
 }
 
@@ -431,17 +423,16 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> {
         should_warn && !self.symbol_is_live(item.id, ctor_id)
     }
 
-    fn should_warn_about_field(&mut self, node: &hir::StructField_) -> bool {
-        let is_named = node.name().is_some();
-        let field_type = self.tcx.node_id_to_type(node.id);
+    fn should_warn_about_field(&mut self, field: &hir::StructField) -> bool {
+        let field_type = self.tcx.node_id_to_type(field.id);
         let is_marker_field = match field_type.ty_to_def_id() {
             Some(def_id) => self.tcx.lang_items.items().iter().any(|item| *item == Some(def_id)),
             _ => false
         };
-        is_named
-            && !self.symbol_is_live(node.id, None)
+        !field.is_positional()
+            && !self.symbol_is_live(field.id, None)
             && !is_marker_field
-            && !has_allow_dead_code_or_lang_attr(&node.attrs)
+            && !has_allow_dead_code_or_lang_attr(&field.attrs)
     }
 
     fn should_warn_about_variant(&mut self, variant: &hir::Variant_) -> bool {
@@ -547,9 +538,9 @@ impl<'a, 'tcx, 'v> Visitor<'v> for DeadVisitor<'a, 'tcx> {
     }
 
     fn visit_struct_field(&mut self, field: &hir::StructField) {
-        if self.should_warn_about_field(&field.node) {
-            self.warn_dead_code(field.node.id, field.span,
-                                field.node.name().unwrap(), "struct field");
+        if self.should_warn_about_field(&field) {
+            self.warn_dead_code(field.id, field.span,
+                                field.name, "struct field");
         }
 
         intravisit::walk_struct_field(self, field);
@@ -591,7 +582,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for DeadVisitor<'a, 'tcx> {
     }
 }
 
-pub fn check_crate(tcx: &ty::ctxt, access_levels: &privacy::AccessLevels) {
+pub fn check_crate(tcx: &TyCtxt, access_levels: &privacy::AccessLevels) {
     let _task = tcx.dep_graph.in_task(DepNode::DeadCheck);
     let krate = tcx.map.krate();
     let live_symbols = find_live(tcx, access_levels, krate);
diff --git a/src/librustc/middle/def.rs b/src/librustc/middle/def.rs
deleted file mode 100644 (file)
index aee8fb1..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::def_id::DefId;
-use middle::privacy::LastPrivate;
-use middle::subst::ParamSpace;
-use util::nodemap::NodeMap;
-use syntax::ast;
-use rustc_front::hir;
-
-#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum Def {
-    Fn(DefId),
-    SelfTy(Option<DefId>,                    // trait id
-              Option<(ast::NodeId, ast::NodeId)>),   // (impl id, self type id)
-    Mod(DefId),
-    ForeignMod(DefId),
-    Static(DefId, bool /* is_mutbl */),
-    Const(DefId),
-    AssociatedConst(DefId),
-    Local(DefId, // def id of variable
-             ast::NodeId), // node id of variable
-    Variant(DefId /* enum */, DefId /* variant */),
-    Enum(DefId),
-    TyAlias(DefId),
-    AssociatedTy(DefId /* trait */, DefId),
-    Trait(DefId),
-    PrimTy(hir::PrimTy),
-    TyParam(ParamSpace, u32, DefId, ast::Name),
-    Upvar(DefId,        // def id of closed over local
-             ast::NodeId,  // node id of closed over local
-             usize,        // index in the freevars list of the closure
-             ast::NodeId), // expr node that creates the closure
-
-    // If Def::Struct lives in type namespace it denotes a struct item and its DefId refers
-    // to NodeId of the struct itself.
-    // If Def::Struct lives in value namespace (e.g. tuple struct, unit struct expressions)
-    // it denotes a constructor and its DefId refers to NodeId of the struct's constructor.
-    Struct(DefId),
-    Label(ast::NodeId),
-    Method(DefId),
-    Err,
-}
-
-/// The result of resolving a path.
-/// Before type checking completes, `depth` represents the number of
-/// trailing segments which are yet unresolved. Afterwards, if there
-/// were no errors, all paths should be fully resolved, with `depth`
-/// set to `0` and `base_def` representing the final resolution.
-///
-///     module::Type::AssocX::AssocY::MethodOrAssocType
-///     ^~~~~~~~~~~~  ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-///     base_def      depth = 3
-///
-///     <T as Trait>::AssocX::AssocY::MethodOrAssocType
-///           ^~~~~~~~~~~~~~  ^~~~~~~~~~~~~~~~~~~~~~~~~
-///           base_def        depth = 2
-#[derive(Copy, Clone, Debug)]
-pub struct PathResolution {
-    pub base_def: Def,
-    pub last_private: LastPrivate,
-    pub depth: usize
-}
-
-impl PathResolution {
-    /// Get the definition, if fully resolved, otherwise panic.
-    pub fn full_def(&self) -> Def {
-        if self.depth != 0 {
-            panic!("path not fully resolved: {:?}", self);
-        }
-        self.base_def
-    }
-
-    /// Get the DefId, if fully resolved, otherwise panic.
-    pub fn def_id(&self) -> DefId {
-        self.full_def().def_id()
-    }
-
-    pub fn new(base_def: Def,
-               last_private: LastPrivate,
-               depth: usize)
-               -> PathResolution {
-        PathResolution {
-            base_def: base_def,
-            last_private: last_private,
-            depth: depth,
-        }
-    }
-}
-
-// Definition mapping
-pub type DefMap = NodeMap<PathResolution>;
-// This is the replacement export map. It maps a module to all of the exports
-// within.
-pub type ExportMap = NodeMap<Vec<Export>>;
-
-#[derive(Copy, Clone)]
-pub struct Export {
-    pub name: ast::Name,    // The name of the target.
-    pub def_id: DefId, // The definition of the target.
-}
-
-impl Def {
-    pub fn var_id(&self) -> ast::NodeId {
-        match *self {
-            Def::Local(_, id) |
-            Def::Upvar(_, id, _, _) => {
-                id
-            }
-
-            Def::Fn(..) | Def::Mod(..) | Def::ForeignMod(..) | Def::Static(..) |
-            Def::Variant(..) | Def::Enum(..) | Def::TyAlias(..) | Def::AssociatedTy(..) |
-            Def::TyParam(..) | Def::Struct(..) | Def::Trait(..) |
-            Def::Method(..) | Def::Const(..) | Def::AssociatedConst(..) |
-            Def::PrimTy(..) | Def::Label(..) | Def::SelfTy(..) | Def::Err => {
-                panic!("attempted .var_id() on invalid {:?}", self)
-            }
-        }
-    }
-
-    pub fn def_id(&self) -> DefId {
-        match *self {
-            Def::Fn(id) | Def::Mod(id) | Def::ForeignMod(id) | Def::Static(id, _) |
-            Def::Variant(_, id) | Def::Enum(id) | Def::TyAlias(id) | Def::AssociatedTy(_, id) |
-            Def::TyParam(_, _, id, _) | Def::Struct(id) | Def::Trait(id) |
-            Def::Method(id) | Def::Const(id) | Def::AssociatedConst(id) |
-            Def::Local(id, _) | Def::Upvar(id, _, _, _) => {
-                id
-            }
-
-            Def::Label(..)  |
-            Def::PrimTy(..) |
-            Def::SelfTy(..) |
-            Def::Err => {
-                panic!("attempted .def_id() on invalid def: {:?}", self)
-            }
-        }
-    }
-
-    pub fn variant_def_ids(&self) -> Option<(DefId, DefId)> {
-        match *self {
-            Def::Variant(enum_id, var_id) => {
-                Some((enum_id, var_id))
-            }
-            _ => None
-        }
-    }
-}
diff --git a/src/librustc/middle/def_id.rs b/src/librustc/middle/def_id.rs
deleted file mode 100644 (file)
index 4d0005f..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::cstore::LOCAL_CRATE;
-use middle::ty;
-use syntax::ast::CrateNum;
-use std::fmt;
-use std::u32;
-
-/// A DefIndex is an index into the hir-map for a crate, identifying a
-/// particular definition. It should really be considered an interned
-/// shorthand for a particular DefPath.
-#[derive(Clone, Debug, Eq, Ord, PartialOrd, PartialEq, RustcEncodable,
-           RustcDecodable, Hash, Copy)]
-pub struct DefIndex(u32);
-
-impl DefIndex {
-    pub fn new(x: usize) -> DefIndex {
-        assert!(x < (u32::MAX as usize));
-        DefIndex(x as u32)
-    }
-
-    pub fn from_u32(x: u32) -> DefIndex {
-        DefIndex(x)
-    }
-
-    pub fn as_usize(&self) -> usize {
-        self.0 as usize
-    }
-
-    pub fn as_u32(&self) -> u32 {
-        self.0
-    }
-}
-
-/// The crate root is always assigned index 0 by the AST Map code,
-/// thanks to `NodeCollector::new`.
-pub const CRATE_DEF_INDEX: DefIndex = DefIndex(0);
-
-/// A DefId identifies a particular *definition*, by combining a crate
-/// index and a def index.
-#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, RustcEncodable,
-           RustcDecodable, Hash, Copy)]
-pub struct DefId {
-    pub krate: CrateNum,
-    pub index: DefIndex,
-}
-
-impl fmt::Debug for DefId {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(f, "DefId {{ krate: {:?}, node: {:?}",
-                    self.krate, self.index));
-
-        // Unfortunately, there seems to be no way to attempt to print
-        // a path for a def-id, so I'll just make a best effort for now
-        // and otherwise fallback to just printing the crate/node pair
-        if self.is_local() { // (1)
-            // (1) side-step fact that not all external things have paths at
-            // the moment, such as type parameters
-            try!(ty::tls::with_opt(|opt_tcx| {
-                if let Some(tcx) = opt_tcx {
-                    try!(write!(f, " => {}", tcx.item_path_str(*self)));
-                }
-                Ok(())
-            }));
-        }
-
-        write!(f, " }}")
-    }
-}
-
-
-impl DefId {
-    pub fn local(index: DefIndex) -> DefId {
-        DefId { krate: LOCAL_CRATE, index: index }
-    }
-
-    pub fn is_local(&self) -> bool {
-        self.krate == LOCAL_CRATE
-    }
-}
index c27d029374affc8ebb07a04ad0824c57537dcaba..ac7a1b8aa0de5bae1abdc4ceec40a891161efd99 100644 (file)
 use self::RootUnsafeContext::*;
 
 use dep_graph::DepNode;
-use middle::def::Def;
-use middle::ty::{self, Ty};
-use middle::ty::MethodCall;
+use hir::def::Def;
+use ty::{self, Ty, TyCtxt};
+use ty::MethodCall;
 
 use syntax::ast;
 use syntax::codemap::Span;
-use rustc_front::hir;
-use rustc_front::intravisit;
-use rustc_front::intravisit::{FnKind, Visitor};
+use hir;
+use hir::intravisit;
+use hir::intravisit::{FnKind, Visitor};
 
 #[derive(Copy, Clone)]
 struct UnsafeContext {
@@ -44,13 +44,14 @@ enum RootUnsafeContext {
 
 fn type_is_unsafe_function(ty: Ty) -> bool {
     match ty.sty {
-        ty::TyBareFn(_, ref f) => f.unsafety == hir::Unsafety::Unsafe,
+        ty::TyFnDef(_, _, ref f) |
+        ty::TyFnPtr(ref f) => f.unsafety == hir::Unsafety::Unsafe,
         _ => false,
     }
 }
 
 struct EffectCheckVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
 
     /// Whether we're in an unsafe context.
     unsafe_context: UnsafeContext,
@@ -81,9 +82,9 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> {
                 block: &'v hir::Block, span: Span, _: ast::NodeId) {
 
         let (is_item_fn, is_unsafe_fn) = match fn_kind {
-            FnKind::ItemFn(_, _, unsafety, _, _, _) =>
+            FnKind::ItemFn(_, _, unsafety, _, _, _, _) =>
                 (true, unsafety == hir::Unsafety::Unsafe),
-            FnKind::Method(_, sig, _) =>
+            FnKind::Method(_, sig, _, _) =>
                 (true, sig.unsafety == hir::Unsafety::Unsafe),
             _ => (false, false),
         };
@@ -182,7 +183,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> {
     }
 }
 
-pub fn check_crate(tcx: &ty::ctxt) {
+pub fn check_crate(tcx: &TyCtxt) {
     let _task = tcx.dep_graph.in_task(DepNode::EffectCheck);
 
     let mut visitor = EffectCheckVisitor {
index 67e96816abf96c49db7ed10f38de12a0c8298d02..602889375e970250cb6756d1f751d1769e829416 100644 (file)
 
 
 use dep_graph::DepNode;
-use front::map as ast_map;
-use middle::def_id::{CRATE_DEF_INDEX};
+use hir::map as ast_map;
+use hir::def_id::{CRATE_DEF_INDEX};
 use session::{config, Session};
 use syntax::ast::NodeId;
 use syntax::attr;
 use syntax::codemap::Span;
 use syntax::entry::EntryPointType;
-use rustc_front::hir::{Item, ItemFn};
-use rustc_front::intravisit::Visitor;
+use hir::{Item, ItemFn};
+use hir::intravisit::Visitor;
 
 struct EntryContext<'a, 'tcx: 'a> {
     session: &'a Session,
index 93cc158cd12f2d6c58858b84a5244d0904dee7d3..c4d6f100671b003514707e4a7a524b20dab545c6 100644 (file)
@@ -19,15 +19,14 @@ pub use self::MatchMode::*;
 use self::TrackMatchMode::*;
 use self::OverloadedCallType::*;
 
-use middle::pat_util;
-use middle::def::Def;
-use middle::def_id::{DefId};
-use middle::infer;
+use hir::pat_util;
+use hir::def::Def;
+use hir::def_id::{DefId};
+use infer;
 use middle::mem_categorization as mc;
-use middle::ty;
-use middle::ty::adjustment;
+use ty::{self, TyCtxt, adjustment};
 
-use rustc_front::hir::{self, PatKind};
+use hir::{self, PatKind};
 
 use syntax::ast;
 use syntax::ptr::P;
@@ -210,7 +209,7 @@ enum OverloadedCallType {
 }
 
 impl OverloadedCallType {
-    fn from_trait_id(tcx: &ty::ctxt, trait_id: DefId)
+    fn from_trait_id(tcx: &TyCtxt, trait_id: DefId)
                      -> OverloadedCallType {
         for &(maybe_function_trait, overloaded_call_type) in &[
             (tcx.lang_items.fn_once_trait(), FnOnceOverloadedCall),
@@ -225,10 +224,10 @@ impl OverloadedCallType {
             }
         }
 
-        tcx.sess.bug("overloaded call didn't map to known function trait")
+        bug!("overloaded call didn't map to known function trait")
     }
 
-    fn from_method_id(tcx: &ty::ctxt, method_id: DefId)
+    fn from_method_id(tcx: &TyCtxt, method_id: DefId)
                       -> OverloadedCallType {
         let method = tcx.impl_or_trait_item(method_id);
         OverloadedCallType::from_trait_id(tcx, method.container().id())
@@ -307,7 +306,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
         }
     }
 
-    fn tcx(&self) -> &'t ty::ctxt<'tcx> {
+    fn tcx(&self) -> &'t TyCtxt<'tcx> {
         self.typer.tcx
     }
 
@@ -399,11 +398,6 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
                 }
             }
 
-            hir::ExprRange(ref start, ref end) => {
-                start.as_ref().map(|e| self.consume_expr(&e));
-                end.as_ref().map(|e| self.consume_expr(&e));
-            }
-
             hir::ExprCall(ref callee, ref args) => {    // callee(args)
                 self.walk_callee(expr, &callee);
                 self.consume_exprs(args);
@@ -455,23 +449,20 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
                 }
             }
 
-            hir::ExprInlineAsm(ref ia) => {
-                for &(_, ref input) in &ia.inputs {
-                    self.consume_expr(&input);
-                }
-
-                for output in &ia.outputs {
-                    if output.is_indirect {
-                        self.consume_expr(&output.expr);
+            hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => {
+                for (o, output) in ia.outputs.iter().zip(outputs) {
+                    if o.is_indirect {
+                        self.consume_expr(output);
                     } else {
-                        self.mutate_expr(expr, &output.expr,
-                                         if output.is_rw {
+                        self.mutate_expr(expr, output,
+                                         if o.is_rw {
                                              MutateMode::WriteAndRead
                                          } else {
                                              MutateMode::JustWrite
                                          });
                     }
                 }
+                self.consume_exprs(inputs);
             }
 
             hir::ExprBreak(..) |
@@ -488,7 +479,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
             }
 
             hir::ExprUnary(op, ref lhs) => {
-                let pass_args = if ::rustc_front::util::is_by_value_unop(op) {
+                let pass_args = if op.is_by_value() {
                     PassArgs::ByValue
                 } else {
                     PassArgs::ByRef
@@ -500,7 +491,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
             }
 
             hir::ExprBinary(op, ref lhs, ref rhs) => {
-                let pass_args = if ::rustc_front::util::is_by_value_binop(op.node) {
+                let pass_args = if op.node.is_by_value() {
                     PassArgs::ByValue
                 } else {
                     PassArgs::ByRef
@@ -533,7 +524,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
 
             hir::ExprAssignOp(op, ref lhs, ref rhs) => {
                 // NB All our assignment operations take the RHS by value
-                assert!(::rustc_front::util::is_by_value_binop(op.node));
+                assert!(op.node.is_by_value());
 
                 if !self.walk_overloaded_operator(expr, lhs, vec![rhs], PassArgs::ByValue) {
                     self.mutate_expr(expr, &lhs, MutateMode::WriteAndRead);
@@ -562,7 +553,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
                callee, callee_ty);
         let call_scope = self.tcx().region_maps.node_extent(call.id);
         match callee_ty.sty {
-            ty::TyBareFn(..) => {
+            ty::TyFnDef(..) | ty::TyFnPtr(_) => {
                 self.consume_expr(callee);
             }
             ty::TyError => { }
@@ -573,9 +564,10 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
                             OverloadedCallType::from_method_id(self.tcx(), method_id)
                         }
                         None => {
-                            self.tcx().sess.span_bug(
+                            span_bug!(
                                 callee.span,
-                                &format!("unexpected callee type {}", callee_ty))
+                                "unexpected callee type {}",
+                                callee_ty)
                         }
                     };
                 match overloaded_call_type {
@@ -692,7 +684,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
             // may not. This will generate an error earlier in typeck,
             // so we can just ignore it.
             if !self.tcx().sess.has_errors() {
-                self.tcx().sess.span_bug(
+                span_bug!(
                     with_expr.span,
                     "with expression doesn't evaluate to a struct");
             }
@@ -759,9 +751,9 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
 
                     let (m, r) = match self_ty.sty {
                         ty::TyRef(r, ref m) => (m.mutbl, r),
-                        _ => self.tcx().sess.span_bug(expr.span,
-                                &format!("bad overloaded deref type {:?}",
-                                    method_ty))
+                        _ => span_bug!(expr.span,
+                                "bad overloaded deref type {:?}",
+                                method_ty)
                     };
                     let bk = ty::BorrowKind::from_mutbl(m);
                     self.delegate.borrow(expr.id, expr.span, cmt,
@@ -943,7 +935,6 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
         debug!("determine_pat_move_mode cmt_discr={:?} pat={:?}", cmt_discr,
                pat);
         return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |_mc, cmt_pat, pat| {
-            let tcx = self.tcx();
             let def_map = &self.tcx().def_map;
             if pat_util::pat_is_binding(&def_map.borrow(), pat) {
                 match pat.node {
@@ -956,7 +947,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
                         }
                     }
                     _ => {
-                        tcx.sess.span_bug(
+                        span_bug!(
                             pat.span,
                             "binding pattern not an identifier");
                     }
@@ -981,8 +972,6 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
         let delegate = &mut self.delegate;
         return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |mc, cmt_pat, pat| {
             if pat_util::pat_is_binding(&def_map.borrow(), pat) {
-                let tcx = typer.tcx;
-
                 debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}",
                        cmt_pat,
                        pat,
@@ -1016,7 +1005,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
                         delegate.consume_pat(pat, cmt_pat, mode);
                     }
                     _ => {
-                        tcx.sess.span_bug(
+                        span_bug!(
                             pat.span,
                             "binding pattern not an identifier");
                     }
@@ -1126,10 +1115,10 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
                             // reported.
 
                             if !tcx.sess.has_errors() {
-                                let msg = format!("Pattern has unexpected def: {:?} and type {:?}",
-                                                  def,
-                                                  cmt_pat.ty);
-                                tcx.sess.span_bug(pat.span, &msg[..])
+                                span_bug!(pat.span,
+                                          "Pattern has unexpected def: {:?} and type {:?}",
+                                          def,
+                                          cmt_pat.ty);
                             }
                         }
                     }
@@ -1191,7 +1180,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
         // Create the cmt for the variable being borrowed, from the
         // caller's perspective
         let var_id = upvar_def.var_id();
-        let var_ty = try!(self.typer.node_ty(var_id));
+        let var_ty = self.typer.node_ty(var_id)?;
         self.mc.cat_def(closure_id, closure_span, var_ty, upvar_def)
     }
 }
index face6d629340db1aaf6aef013984b1f38828c870..ae0540696c5728db21ac8b44fdff74f83e2fe7ea 100644 (file)
@@ -15,8 +15,8 @@
 //! `TransitiveRelation` type and use that to decide when one free
 //! region outlives another and so forth.
 
-use middle::ty::{self, FreeRegion, Region};
-use middle::ty::wf::ImpliedBound;
+use ty::{self, TyCtxt, FreeRegion, Region};
+use ty::wf::ImpliedBound;
 use rustc_data_structures::transitive_relation::TransitiveRelation;
 
 #[derive(Clone)]
@@ -49,7 +49,7 @@ impl FreeRegionMap {
     }
 
     pub fn relate_free_regions_from_predicates<'tcx>(&mut self,
-                                                     tcx: &ty::ctxt<'tcx>,
+                                                     _tcx: &TyCtxt<'tcx>,
                                                      predicates: &[ty::Predicate<'tcx>]) {
         debug!("relate_free_regions_from_predicates(predicates={:?})", predicates);
         for predicate in predicates {
@@ -72,10 +72,9 @@ impl FreeRegionMap {
                         }
                         _ => {
                             // All named regions are instantiated with free regions.
-                            tcx.sess.bug(
-                                &format!("record_region_bounds: non free region: {:?} / {:?}",
-                                         r_a,
-                                         r_b));
+                            bug!("record_region_bounds: non free region: {:?} / {:?}",
+                                 r_a,
+                                 r_b);
                         }
                     }
                 }
@@ -121,7 +120,7 @@ impl FreeRegionMap {
     /// Determines whether one region is a subregion of another.  This is intended to run *after
     /// inference* and sadly the logic is somewhat duplicated with the code in infer.rs.
     pub fn is_subregion_of(&self,
-                           tcx: &ty::ctxt,
+                           tcx: &TyCtxt,
                            sub_region: ty::Region,
                            super_region: ty::Region)
                            -> bool {
diff --git a/src/librustc/middle/infer/README.md b/src/librustc/middle/infer/README.md
deleted file mode 100644 (file)
index c835189..0000000
+++ /dev/null
@@ -1,237 +0,0 @@
-# Type inference engine
-
-This is loosely based on standard HM-type inference, but with an
-extension to try and accommodate subtyping.  There is nothing
-principled about this extension; it's sound---I hope!---but it's a
-heuristic, ultimately, and does not guarantee that it finds a valid
-typing even if one exists (in fact, there are known scenarios where it
-fails, some of which may eventually become problematic).
-
-## Key idea
-
-The main change is that each type variable T is associated with a
-lower-bound L and an upper-bound U.  L and U begin as bottom and top,
-respectively, but gradually narrow in response to new constraints
-being introduced.  When a variable is finally resolved to a concrete
-type, it can (theoretically) select any type that is a supertype of L
-and a subtype of U.
-
-There are several critical invariants which we maintain:
-
-- the upper-bound of a variable only becomes lower and the lower-bound
-  only becomes higher over time;
-- the lower-bound L is always a subtype of the upper bound U;
-- the lower-bound L and upper-bound U never refer to other type variables,
-  but only to types (though those types may contain type variables).
-
-> An aside: if the terms upper- and lower-bound confuse you, think of
-> "supertype" and "subtype".  The upper-bound is a "supertype"
-> (super=upper in Latin, or something like that anyway) and the lower-bound
-> is a "subtype" (sub=lower in Latin).  I find it helps to visualize
-> a simple class hierarchy, like Java minus interfaces and
-> primitive types.  The class Object is at the root (top) and other
-> types lie in between.  The bottom type is then the Null type.
-> So the tree looks like:
->
-> ```text
->         Object
->         /    \
->     String   Other
->         \    /
->         (null)
-> ```
->
-> So the upper bound type is the "supertype" and the lower bound is the
-> "subtype" (also, super and sub mean upper and lower in Latin, or something
-> like that anyway).
-
-## Satisfying constraints
-
-At a primitive level, there is only one form of constraint that the
-inference understands: a subtype relation.  So the outside world can
-say "make type A a subtype of type B".  If there are variables
-involved, the inferencer will adjust their upper- and lower-bounds as
-needed to ensure that this relation is satisfied. (We also allow "make
-type A equal to type B", but this is translated into "A <: B" and "B
-<: A")
-
-As stated above, we always maintain the invariant that type bounds
-never refer to other variables.  This keeps the inference relatively
-simple, avoiding the scenario of having a kind of graph where we have
-to pump constraints along and reach a fixed point, but it does impose
-some heuristics in the case where the user is relating two type
-variables A <: B.
-
-Combining two variables such that variable A will forever be a subtype
-of variable B is the trickiest part of the algorithm because there is
-often no right choice---that is, the right choice will depend on
-future constraints which we do not yet know. The problem comes about
-because both A and B have bounds that can be adjusted in the future.
-Let's look at some of the cases that can come up.
-
-Imagine, to start, the best case, where both A and B have an upper and
-lower bound (that is, the bounds are not top nor bot respectively). In
-that case, if we're lucky, A.ub <: B.lb, and so we know that whatever
-A and B should become, they will forever have the desired subtyping
-relation.  We can just leave things as they are.
-
-### Option 1: Unify
-
-However, suppose that A.ub is *not* a subtype of B.lb.  In
-that case, we must make a decision.  One option is to unify A
-and B so that they are one variable whose bounds are:
-
-    UB = GLB(A.ub, B.ub)
-    LB = LUB(A.lb, B.lb)
-
-(Note that we will have to verify that LB <: UB; if it does not, the
-types are not intersecting and there is an error) In that case, A <: B
-holds trivially because A==B.  However, we have now lost some
-flexibility, because perhaps the user intended for A and B to end up
-as different types and not the same type.
-
-Pictorally, what this does is to take two distinct variables with
-(hopefully not completely) distinct type ranges and produce one with
-the intersection.
-
-```text
-                  B.ub                  B.ub
-                   /\                    /
-           A.ub   /  \           A.ub   /
-           /   \ /    \              \ /
-          /     X      \              UB
-         /     / \      \            / \
-        /     /   /      \          /   /
-        \     \  /       /          \  /
-         \      X       /             LB
-          \    / \     /             / \
-           \  /   \   /             /   \
-           A.lb    B.lb          A.lb    B.lb
-```
-
-
-### Option 2: Relate UB/LB
-
-Another option is to keep A and B as distinct variables but set their
-bounds in such a way that, whatever happens, we know that A <: B will hold.
-This can be achieved by ensuring that A.ub <: B.lb.  In practice there
-are two ways to do that, depicted pictorially here:
-
-```text
-    Before                Option #1            Option #2
-
-             B.ub                B.ub                B.ub
-              /\                 /  \                /  \
-      A.ub   /  \        A.ub   /(B')\       A.ub   /(B')\
-      /   \ /    \           \ /     /           \ /     /
-     /     X      \         __UB____/             UB    /
-    /     / \      \       /  |                   |    /
-   /     /   /      \     /   |                   |   /
-   \     \  /       /    /(A')|                   |  /
-    \      X       /    /     LB            ______LB/
-     \    / \     /    /     / \           / (A')/ \
-      \  /   \   /     \    /   \          \    /   \
-      A.lb    B.lb       A.lb    B.lb        A.lb    B.lb
-```
-
-In these diagrams, UB and LB are defined as before.  As you can see,
-the new ranges `A'` and `B'` are quite different from the range that
-would be produced by unifying the variables.
-
-### What we do now
-
-Our current technique is to *try* (transactionally) to relate the
-existing bounds of A and B, if there are any (i.e., if `UB(A) != top
-&& LB(B) != bot`).  If that succeeds, we're done.  If it fails, then
-we merge A and B into same variable.
-
-This is not clearly the correct course.  For example, if `UB(A) !=
-top` but `LB(B) == bot`, we could conceivably set `LB(B)` to `UB(A)`
-and leave the variables unmerged.  This is sometimes the better
-course, it depends on the program.
-
-The main case which fails today that I would like to support is:
-
-```text
-fn foo<T>(x: T, y: T) { ... }
-
-fn bar() {
-    let x: @mut int = @mut 3;
-    let y: @int = @3;
-    foo(x, y);
-}
-```
-
-In principle, the inferencer ought to find that the parameter `T` to
-`foo(x, y)` is `@const int`.  Today, however, it does not; this is
-because the type variable `T` is merged with the type variable for
-`X`, and thus inherits its UB/LB of `@mut int`.  This leaves no
-flexibility for `T` to later adjust to accommodate `@int`.
-
-### What to do when not all bounds are present
-
-In the prior discussion we assumed that A.ub was not top and B.lb was
-not bot.  Unfortunately this is rarely the case.  Often type variables
-have "lopsided" bounds.  For example, if a variable in the program has
-been initialized but has not been used, then its corresponding type
-variable will have a lower bound but no upper bound.  When that
-variable is then used, we would like to know its upper bound---but we
-don't have one!  In this case we'll do different things depending on
-how the variable is being used.
-
-## Transactional support
-
-Whenever we adjust merge variables or adjust their bounds, we always
-keep a record of the old value.  This allows the changes to be undone.
-
-## Regions
-
-I've only talked about type variables here, but region variables
-follow the same principle.  They have upper- and lower-bounds.  A
-region A is a subregion of a region B if A being valid implies that B
-is valid.  This basically corresponds to the block nesting structure:
-the regions for outer block scopes are superregions of those for inner
-block scopes.
-
-## Integral and floating-point type variables
-
-There is a third variety of type variable that we use only for
-inferring the types of unsuffixed integer literals.  Integral type
-variables differ from general-purpose type variables in that there's
-no subtyping relationship among the various integral types, so instead
-of associating each variable with an upper and lower bound, we just
-use simple unification.  Each integer variable is associated with at
-most one integer type.  Floating point types are handled similarly to
-integral types.
-
-## GLB/LUB
-
-Computing the greatest-lower-bound and least-upper-bound of two
-types/regions is generally straightforward except when type variables
-are involved. In that case, we follow a similar "try to use the bounds
-when possible but otherwise merge the variables" strategy.  In other
-words, `GLB(A, B)` where `A` and `B` are variables will often result
-in `A` and `B` being merged and the result being `A`.
-
-## Type coercion
-
-We have a notion of assignability which differs somewhat from
-subtyping; in particular it may cause region borrowing to occur.  See
-the big comment later in this file on Type Coercion for specifics.
-
-### In conclusion
-
-I showed you three ways to relate `A` and `B`.  There are also more,
-of course, though I'm not sure if there are any more sensible options.
-The main point is that there are various options, each of which
-produce a distinct range of types for `A` and `B`.  Depending on what
-the correct values for A and B are, one of these options will be the
-right choice: but of course we don't know the right values for A and B
-yet, that's what we're trying to find!  In our code, we opt to unify
-(Option #1).
-
-# Implementation details
-
-We make use of a trait-like implementation strategy to consolidate
-duplicated code between subtypes, GLB, and LUB computations.  See the
-section on "Type Combining" below for details.
diff --git a/src/librustc/middle/infer/bivariate.rs b/src/librustc/middle/infer/bivariate.rs
deleted file mode 100644 (file)
index 2d9432b..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Applies the "bivariance relationship" to two types and/or regions.
-//! If (A,B) are bivariant then either A <: B or B <: A. It occurs
-//! when type/lifetime parameters are unconstrained. Usually this is
-//! an error, but we permit it in the specific case where a type
-//! parameter is constrained in a where-clause via an associated type.
-//!
-//! There are several ways one could implement bivariance. You could
-//! just do nothing at all, for example, or you could fully verify
-//! that one of the two subtyping relationships hold. We choose to
-//! thread a middle line: we relate types up to regions, but ignore
-//! all region relationships.
-//!
-//! At one point, handling bivariance in this fashion was necessary
-//! for inference, but I'm actually not sure if that is true anymore.
-//! In particular, it might be enough to say (A,B) are bivariant for
-//! all (A,B).
-
-use super::combine::{self, CombineFields};
-use super::type_variable::{BiTo};
-
-use middle::ty::{self, Ty};
-use middle::ty::TyVar;
-use middle::ty::relate::{Relate, RelateResult, TypeRelation};
-
-pub struct Bivariate<'a, 'tcx: 'a> {
-    fields: CombineFields<'a, 'tcx>
-}
-
-impl<'a, 'tcx> Bivariate<'a, 'tcx> {
-    pub fn new(fields: CombineFields<'a, 'tcx>) -> Bivariate<'a, 'tcx> {
-        Bivariate { fields: fields }
-    }
-}
-
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Bivariate<'a, 'tcx> {
-    fn tag(&self) -> &'static str { "Bivariate" }
-
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.tcx() }
-
-    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
-
-    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
-                                               variance: ty::Variance,
-                                               a: &T,
-                                               b: &T)
-                                               -> RelateResult<'tcx, T>
-    {
-        match variance {
-            // If we have Foo<A> and Foo is invariant w/r/t A,
-            // and we want to assert that
-            //
-            //     Foo<A> <: Foo<B> ||
-            //     Foo<B> <: Foo<A>
-            //
-            // then still A must equal B.
-            ty::Invariant => self.relate(a, b),
-
-            ty::Covariant => self.relate(a, b),
-            ty::Bivariant => self.relate(a, b),
-            ty::Contravariant => self.relate(a, b),
-        }
-    }
-
-    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
-        debug!("{}.tys({:?}, {:?})", self.tag(),
-               a, b);
-        if a == b { return Ok(a); }
-
-        let infcx = self.fields.infcx;
-        let a = infcx.type_variables.borrow().replace_if_possible(a);
-        let b = infcx.type_variables.borrow().replace_if_possible(b);
-        match (&a.sty, &b.sty) {
-            (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
-                infcx.type_variables.borrow_mut().relate_vars(a_id, BiTo, b_id);
-                Ok(a)
-            }
-
-            (&ty::TyInfer(TyVar(a_id)), _) => {
-                try!(self.fields.instantiate(b, BiTo, a_id));
-                Ok(a)
-            }
-
-            (_, &ty::TyInfer(TyVar(b_id))) => {
-                try!(self.fields.instantiate(a, BiTo, b_id));
-                Ok(a)
-            }
-
-            _ => {
-                combine::super_combine_tys(self.fields.infcx, self, a, b)
-            }
-        }
-    }
-
-    fn regions(&mut self, a: ty::Region, _: ty::Region) -> RelateResult<'tcx, ty::Region> {
-        Ok(a)
-    }
-
-    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-                  -> RelateResult<'tcx, ty::Binder<T>>
-        where T: Relate<'a,'tcx>
-    {
-        let a1 = self.tcx().erase_late_bound_regions(a);
-        let b1 = self.tcx().erase_late_bound_regions(b);
-        let c = try!(self.relate(&a1, &b1));
-        Ok(ty::Binder(c))
-    }
-}
diff --git a/src/librustc/middle/infer/combine.rs b/src/librustc/middle/infer/combine.rs
deleted file mode 100644 (file)
index faf1bdb..0000000
+++ /dev/null
@@ -1,395 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-///////////////////////////////////////////////////////////////////////////
-// # Type combining
-//
-// There are four type combiners: equate, sub, lub, and glb.  Each
-// implements the trait `Combine` and contains methods for combining
-// two instances of various things and yielding a new instance.  These
-// combiner methods always yield a `Result<T>`.  There is a lot of
-// common code for these operations, implemented as default methods on
-// the `Combine` trait.
-//
-// Each operation may have side-effects on the inference context,
-// though these can be unrolled using snapshots. On success, the
-// LUB/GLB operations return the appropriate bound. The Eq and Sub
-// operations generally return the first operand.
-//
-// ## Contravariance
-//
-// When you are relating two things which have a contravariant
-// relationship, you should use `contratys()` or `contraregions()`,
-// rather than inversing the order of arguments!  This is necessary
-// because the order of arguments is not relevant for LUB and GLB.  It
-// is also useful to track which value is the "expected" value in
-// terms of error reporting.
-
-use super::bivariate::Bivariate;
-use super::equate::Equate;
-use super::glb::Glb;
-use super::lub::Lub;
-use super::sub::Sub;
-use super::{InferCtxt};
-use super::{MiscVariable, TypeTrace};
-use super::type_variable::{RelationDir, BiTo, EqTo, SubtypeOf, SupertypeOf};
-
-use middle::ty::{IntType, UintType};
-use middle::ty::{self, Ty};
-use middle::ty::error::TypeError;
-use middle::ty::fold::{TypeFolder, TypeFoldable};
-use middle::ty::relate::{Relate, RelateResult, TypeRelation};
-
-use syntax::ast;
-use syntax::codemap::Span;
-
-#[derive(Clone)]
-pub struct CombineFields<'a, 'tcx: 'a> {
-    pub infcx: &'a InferCtxt<'a, 'tcx>,
-    pub a_is_expected: bool,
-    pub trace: TypeTrace<'tcx>,
-    pub cause: Option<ty::relate::Cause>,
-}
-
-pub fn super_combine_tys<'a,'tcx:'a,R>(infcx: &InferCtxt<'a, 'tcx>,
-                                       relation: &mut R,
-                                       a: Ty<'tcx>,
-                                       b: Ty<'tcx>)
-                                       -> RelateResult<'tcx, Ty<'tcx>>
-    where R: TypeRelation<'a,'tcx>
-{
-    let a_is_expected = relation.a_is_expected();
-
-    match (&a.sty, &b.sty) {
-        // Relate integral variables to other types
-        (&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => {
-            try!(infcx.int_unification_table
-                      .borrow_mut()
-                      .unify_var_var(a_id, b_id)
-                      .map_err(|e| int_unification_error(a_is_expected, e)));
-            Ok(a)
-        }
-        (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => {
-            unify_integral_variable(infcx, a_is_expected, v_id, IntType(v))
-        }
-        (&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => {
-            unify_integral_variable(infcx, !a_is_expected, v_id, IntType(v))
-        }
-        (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => {
-            unify_integral_variable(infcx, a_is_expected, v_id, UintType(v))
-        }
-        (&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => {
-            unify_integral_variable(infcx, !a_is_expected, v_id, UintType(v))
-        }
-
-        // Relate floating-point variables to other types
-        (&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => {
-            try!(infcx.float_unification_table
-                      .borrow_mut()
-                      .unify_var_var(a_id, b_id)
-                      .map_err(|e| float_unification_error(relation.a_is_expected(), e)));
-            Ok(a)
-        }
-        (&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => {
-            unify_float_variable(infcx, a_is_expected, v_id, v)
-        }
-        (&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => {
-            unify_float_variable(infcx, !a_is_expected, v_id, v)
-        }
-
-        // All other cases of inference are errors
-        (&ty::TyInfer(_), _) |
-        (_, &ty::TyInfer(_)) => {
-            Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b)))
-        }
-
-
-        _ => {
-            ty::relate::super_relate_tys(relation, a, b)
-        }
-    }
-}
-
-fn unify_integral_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
-                                    vid_is_expected: bool,
-                                    vid: ty::IntVid,
-                                    val: ty::IntVarValue)
-                                    -> RelateResult<'tcx, Ty<'tcx>>
-{
-    try!(infcx
-         .int_unification_table
-         .borrow_mut()
-         .unify_var_value(vid, val)
-         .map_err(|e| int_unification_error(vid_is_expected, e)));
-    match val {
-        IntType(v) => Ok(infcx.tcx.mk_mach_int(v)),
-        UintType(v) => Ok(infcx.tcx.mk_mach_uint(v)),
-    }
-}
-
-fn unify_float_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
-                                 vid_is_expected: bool,
-                                 vid: ty::FloatVid,
-                                 val: ast::FloatTy)
-                                 -> RelateResult<'tcx, Ty<'tcx>>
-{
-    try!(infcx
-         .float_unification_table
-         .borrow_mut()
-         .unify_var_value(vid, val)
-         .map_err(|e| float_unification_error(vid_is_expected, e)));
-    Ok(infcx.tcx.mk_mach_float(val))
-}
-
-impl<'a, 'tcx> CombineFields<'a, 'tcx> {
-    pub fn tcx(&self) -> &'a ty::ctxt<'tcx> {
-        self.infcx.tcx
-    }
-
-    pub fn switch_expected(&self) -> CombineFields<'a, 'tcx> {
-        CombineFields {
-            a_is_expected: !self.a_is_expected,
-            ..(*self).clone()
-        }
-    }
-
-    pub fn equate(&self) -> Equate<'a, 'tcx> {
-        Equate::new(self.clone())
-    }
-
-    pub fn bivariate(&self) -> Bivariate<'a, 'tcx> {
-        Bivariate::new(self.clone())
-    }
-
-    pub fn sub(&self) -> Sub<'a, 'tcx> {
-        Sub::new(self.clone())
-    }
-
-    pub fn lub(&self) -> Lub<'a, 'tcx> {
-        Lub::new(self.clone())
-    }
-
-    pub fn glb(&self) -> Glb<'a, 'tcx> {
-        Glb::new(self.clone())
-    }
-
-    pub fn instantiate(&self,
-                       a_ty: Ty<'tcx>,
-                       dir: RelationDir,
-                       b_vid: ty::TyVid)
-                       -> RelateResult<'tcx, ()>
-    {
-        let mut stack = Vec::new();
-        stack.push((a_ty, dir, b_vid));
-        loop {
-            // For each turn of the loop, we extract a tuple
-            //
-            //     (a_ty, dir, b_vid)
-            //
-            // to relate. Here dir is either SubtypeOf or
-            // SupertypeOf. The idea is that we should ensure that
-            // the type `a_ty` is a subtype or supertype (respectively) of the
-            // type to which `b_vid` is bound.
-            //
-            // If `b_vid` has not yet been instantiated with a type
-            // (which is always true on the first iteration, but not
-            // necessarily true on later iterations), we will first
-            // instantiate `b_vid` with a *generalized* version of
-            // `a_ty`. Generalization introduces other inference
-            // variables wherever subtyping could occur (at time of
-            // this writing, this means replacing free regions with
-            // region variables).
-            let (a_ty, dir, b_vid) = match stack.pop() {
-                None => break,
-                Some(e) => e,
-            };
-
-            debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})",
-                   a_ty,
-                   dir,
-                   b_vid);
-
-            // Check whether `vid` has been instantiated yet.  If not,
-            // make a generalized form of `ty` and instantiate with
-            // that.
-            let b_ty = self.infcx.type_variables.borrow().probe(b_vid);
-            let b_ty = match b_ty {
-                Some(t) => t, // ...already instantiated.
-                None => {     // ...not yet instantiated:
-                    // Generalize type if necessary.
-                    let generalized_ty = try!(match dir {
-                        EqTo => self.generalize(a_ty, b_vid, false),
-                        BiTo | SupertypeOf | SubtypeOf => self.generalize(a_ty, b_vid, true),
-                    });
-                    debug!("instantiate(a_ty={:?}, dir={:?}, \
-                                        b_vid={:?}, generalized_ty={:?})",
-                           a_ty, dir, b_vid,
-                           generalized_ty);
-                    self.infcx.type_variables
-                        .borrow_mut()
-                        .instantiate_and_push(
-                            b_vid, generalized_ty, &mut stack);
-                    generalized_ty
-                }
-            };
-
-            // The original triple was `(a_ty, dir, b_vid)` -- now we have
-            // resolved `b_vid` to `b_ty`, so apply `(a_ty, dir, b_ty)`:
-            //
-            // FIXME(#16847): This code is non-ideal because all these subtype
-            // relations wind up attributed to the same spans. We need
-            // to associate causes/spans with each of the relations in
-            // the stack to get this right.
-            try!(match dir {
-                BiTo => self.bivariate().relate(&a_ty, &b_ty),
-                EqTo => self.equate().relate(&a_ty, &b_ty),
-                SubtypeOf => self.sub().relate(&a_ty, &b_ty),
-                SupertypeOf => self.sub().relate_with_variance(ty::Contravariant, &a_ty, &b_ty),
-            });
-        }
-
-        Ok(())
-    }
-
-    /// Attempts to generalize `ty` for the type variable `for_vid`.  This checks for cycle -- that
-    /// is, whether the type `ty` references `for_vid`. If `make_region_vars` is true, it will also
-    /// replace all regions with fresh variables. Returns `TyError` in the case of a cycle, `Ok`
-    /// otherwise.
-    fn generalize(&self,
-                  ty: Ty<'tcx>,
-                  for_vid: ty::TyVid,
-                  make_region_vars: bool)
-                  -> RelateResult<'tcx, Ty<'tcx>>
-    {
-        let mut generalize = Generalizer {
-            infcx: self.infcx,
-            span: self.trace.origin.span(),
-            for_vid: for_vid,
-            make_region_vars: make_region_vars,
-            cycle_detected: false
-        };
-        let u = ty.fold_with(&mut generalize);
-        if generalize.cycle_detected {
-            Err(TypeError::CyclicTy)
-        } else {
-            Ok(u)
-        }
-    }
-}
-
-struct Generalizer<'cx, 'tcx:'cx> {
-    infcx: &'cx InferCtxt<'cx, 'tcx>,
-    span: Span,
-    for_vid: ty::TyVid,
-    make_region_vars: bool,
-    cycle_detected: bool,
-}
-
-impl<'cx, 'tcx> ty::fold::TypeFolder<'tcx> for Generalizer<'cx, 'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> {
-        self.infcx.tcx
-    }
-
-    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-        // Check to see whether the type we are genealizing references
-        // `vid`. At the same time, also update any type variables to
-        // the values that they are bound to. This is needed to truly
-        // check for cycles, but also just makes things readable.
-        //
-        // (In particular, you could have something like `$0 = Box<$1>`
-        //  where `$1` has already been instantiated with `Box<$0>`)
-        match t.sty {
-            ty::TyInfer(ty::TyVar(vid)) => {
-                if vid == self.for_vid {
-                    self.cycle_detected = true;
-                    self.tcx().types.err
-                } else {
-                    match self.infcx.type_variables.borrow().probe(vid) {
-                        Some(u) => self.fold_ty(u),
-                        None => t,
-                    }
-                }
-            }
-            _ => {
-                t.super_fold_with(self)
-            }
-        }
-    }
-
-    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
-        match r {
-            // Never make variables for regions bound within the type itself.
-            ty::ReLateBound(..) => { return r; }
-
-            // Early-bound regions should really have been substituted away before
-            // we get to this point.
-            ty::ReEarlyBound(..) => {
-                self.tcx().sess.span_bug(
-                    self.span,
-                    &format!("Encountered early bound region when generalizing: {:?}",
-                            r));
-            }
-
-            // Always make a fresh region variable for skolemized regions;
-            // the higher-ranked decision procedures rely on this.
-            ty::ReSkolemized(..) => { }
-
-            // For anything else, we make a region variable, unless we
-            // are *equating*, in which case it's just wasteful.
-            ty::ReEmpty |
-            ty::ReStatic |
-            ty::ReScope(..) |
-            ty::ReVar(..) |
-            ty::ReFree(..) => {
-                if !self.make_region_vars {
-                    return r;
-                }
-            }
-        }
-
-        // FIXME: This is non-ideal because we don't give a
-        // very descriptive origin for this region variable.
-        self.infcx.next_region_var(MiscVariable(self.span))
-    }
-}
-
-pub trait RelateResultCompare<'tcx, T> {
-    fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
-        F: FnOnce() -> TypeError<'tcx>;
-}
-
-impl<'tcx, T:Clone + PartialEq> RelateResultCompare<'tcx, T> for RelateResult<'tcx, T> {
-    fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
-        F: FnOnce() -> TypeError<'tcx>,
-    {
-        self.clone().and_then(|s| {
-            if s == t {
-                self.clone()
-            } else {
-                Err(f())
-            }
-        })
-    }
-}
-
-fn int_unification_error<'tcx>(a_is_expected: bool, v: (ty::IntVarValue, ty::IntVarValue))
-                               -> TypeError<'tcx>
-{
-    let (a, b) = v;
-    TypeError::IntMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b))
-}
-
-fn float_unification_error<'tcx>(a_is_expected: bool,
-                                 v: (ast::FloatTy, ast::FloatTy))
-                                 -> TypeError<'tcx>
-{
-    let (a, b) = v;
-    TypeError::FloatMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b))
-}
diff --git a/src/librustc/middle/infer/equate.rs b/src/librustc/middle/infer/equate.rs
deleted file mode 100644 (file)
index d1dad49..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::combine::{self, CombineFields};
-use super::higher_ranked::HigherRankedRelations;
-use super::{Subtype};
-use super::type_variable::{EqTo};
-
-use middle::ty::{self, Ty};
-use middle::ty::TyVar;
-use middle::ty::relate::{Relate, RelateResult, TypeRelation};
-
-/// Ensures `a` is made equal to `b`. Returns `a` on success.
-pub struct Equate<'a, 'tcx: 'a> {
-    fields: CombineFields<'a, 'tcx>
-}
-
-impl<'a, 'tcx> Equate<'a, 'tcx> {
-    pub fn new(fields: CombineFields<'a, 'tcx>) -> Equate<'a, 'tcx> {
-        Equate { fields: fields }
-    }
-}
-
-impl<'a, 'tcx> TypeRelation<'a,'tcx> for Equate<'a, 'tcx> {
-    fn tag(&self) -> &'static str { "Equate" }
-
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.tcx() }
-
-    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
-
-    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
-                                               _: ty::Variance,
-                                               a: &T,
-                                               b: &T)
-                                               -> RelateResult<'tcx, T>
-    {
-        self.relate(a, b)
-    }
-
-    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
-        debug!("{}.tys({:?}, {:?})", self.tag(),
-               a, b);
-        if a == b { return Ok(a); }
-
-        let infcx = self.fields.infcx;
-        let a = infcx.type_variables.borrow().replace_if_possible(a);
-        let b = infcx.type_variables.borrow().replace_if_possible(b);
-        match (&a.sty, &b.sty) {
-            (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
-                infcx.type_variables.borrow_mut().relate_vars(a_id, EqTo, b_id);
-                Ok(a)
-            }
-
-            (&ty::TyInfer(TyVar(a_id)), _) => {
-                try!(self.fields.instantiate(b, EqTo, a_id));
-                Ok(a)
-            }
-
-            (_, &ty::TyInfer(TyVar(b_id))) => {
-                try!(self.fields.instantiate(a, EqTo, b_id));
-                Ok(a)
-            }
-
-            _ => {
-                try!(combine::super_combine_tys(self.fields.infcx, self, a, b));
-                Ok(a)
-            }
-        }
-    }
-
-    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
-        debug!("{}.regions({:?}, {:?})",
-               self.tag(),
-               a,
-               b);
-        let origin = Subtype(self.fields.trace.clone());
-        self.fields.infcx.region_vars.make_eqregion(origin, a, b);
-        Ok(a)
-    }
-
-    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-                  -> RelateResult<'tcx, ty::Binder<T>>
-        where T: Relate<'a, 'tcx>
-    {
-        try!(self.fields.higher_ranked_sub(a, b));
-        self.fields.higher_ranked_sub(b, a)
-    }
-}
diff --git a/src/librustc/middle/infer/error_reporting.rs b/src/librustc/middle/infer/error_reporting.rs
deleted file mode 100644 (file)
index 8c578bc..0000000
+++ /dev/null
@@ -1,1988 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Error Reporting Code for the inference engine
-//!
-//! Because of the way inference, and in particular region inference,
-//! works, it often happens that errors are not detected until far after
-//! the relevant line of code has been type-checked. Therefore, there is
-//! an elaborate system to track why a particular constraint in the
-//! inference graph arose so that we can explain to the user what gave
-//! rise to a particular error.
-//!
-//! The basis of the system are the "origin" types. An "origin" is the
-//! reason that a constraint or inference variable arose. There are
-//! different "origin" enums for different kinds of constraints/variables
-//! (e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has
-//! a span, but also more information so that we can generate a meaningful
-//! error message.
-//!
-//! Having a catalogue of all the different reasons an error can arise is
-//! also useful for other reasons, like cross-referencing FAQs etc, though
-//! we are not really taking advantage of this yet.
-//!
-//! # Region Inference
-//!
-//! Region inference is particularly tricky because it always succeeds "in
-//! the moment" and simply registers a constraint. Then, at the end, we
-//! can compute the full graph and report errors, so we need to be able to
-//! store and later report what gave rise to the conflicting constraints.
-//!
-//! # Subtype Trace
-//!
-//! Determining whether `T1 <: T2` often involves a number of subtypes and
-//! subconstraints along the way. A "TypeTrace" is an extended version
-//! of an origin that traces the types and other values that were being
-//! compared. It is not necessarily comprehensive (in fact, at the time of
-//! this writing it only tracks the root values being compared) but I'd
-//! like to extend it to include significant "waypoints". For example, if
-//! you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2
-//! <: T4` fails, I'd like the trace to include enough information to say
-//! "in the 2nd element of the tuple". Similarly, failures when comparing
-//! arguments or return types in fn types should be able to cite the
-//! specific position, etc.
-//!
-//! # Reality vs plan
-//!
-//! Of course, there is still a LOT of code in typeck that has yet to be
-//! ported to this system, and which relies on string concatenation at the
-//! time of error detection.
-
-use self::FreshOrKept::*;
-
-use super::InferCtxt;
-use super::TypeTrace;
-use super::SubregionOrigin;
-use super::RegionVariableOrigin;
-use super::ValuePairs;
-use super::region_inference::RegionResolutionError;
-use super::region_inference::ConcreteFailure;
-use super::region_inference::SubSupConflict;
-use super::region_inference::GenericBoundFailure;
-use super::region_inference::GenericKind;
-use super::region_inference::ProcessedErrors;
-use super::region_inference::SameRegions;
-
-use std::collections::HashSet;
-
-use front::map as ast_map;
-use rustc_front::hir;
-use rustc_front::print::pprust;
-
-use middle::cstore::CrateStore;
-use middle::def::Def;
-use middle::def_id::DefId;
-use middle::infer::{self, TypeOrigin};
-use middle::region;
-use middle::subst;
-use middle::ty::{self, Ty, TypeFoldable};
-use middle::ty::{Region, ReFree};
-use middle::ty::error::TypeError;
-
-use std::cell::{Cell, RefCell};
-use std::char::from_u32;
-use std::fmt;
-use syntax::ast;
-use syntax::errors::DiagnosticBuilder;
-use syntax::codemap::{self, Pos, Span};
-use syntax::parse::token;
-use syntax::ptr::P;
-
-impl<'tcx> ty::ctxt<'tcx> {
-    pub fn note_and_explain_region(&self,
-                                   err: &mut DiagnosticBuilder,
-                                   prefix: &str,
-                                   region: ty::Region,
-                                   suffix: &str) {
-        fn item_scope_tag(item: &hir::Item) -> &'static str {
-            match item.node {
-                hir::ItemImpl(..) => "impl",
-                hir::ItemStruct(..) => "struct",
-                hir::ItemEnum(..) => "enum",
-                hir::ItemTrait(..) => "trait",
-                hir::ItemFn(..) => "function body",
-                _ => "item"
-            }
-        }
-
-        fn explain_span(tcx: &ty::ctxt, heading: &str, span: Span)
-                        -> (String, Option<Span>) {
-            let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo);
-            (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize()),
-             Some(span))
-        }
-
-        let (description, span) = match region {
-            ty::ReScope(scope) => {
-                let new_string;
-                let unknown_scope = || {
-                    format!("{}unknown scope: {:?}{}.  Please report a bug.",
-                            prefix, scope, suffix)
-                };
-                let span = match scope.span(&self.region_maps, &self.map) {
-                    Some(s) => s,
-                    None => {
-                        err.note(&unknown_scope());
-                        return;
-                    }
-                };
-                let tag = match self.map.find(scope.node_id(&self.region_maps)) {
-                    Some(ast_map::NodeBlock(_)) => "block",
-                    Some(ast_map::NodeExpr(expr)) => match expr.node {
-                        hir::ExprCall(..) => "call",
-                        hir::ExprMethodCall(..) => "method call",
-                        hir::ExprMatch(_, _, hir::MatchSource::IfLetDesugar { .. }) => "if let",
-                        hir::ExprMatch(_, _, hir::MatchSource::WhileLetDesugar) =>  "while let",
-                        hir::ExprMatch(_, _, hir::MatchSource::ForLoopDesugar) =>  "for",
-                        hir::ExprMatch(..) => "match",
-                        _ => "expression",
-                    },
-                    Some(ast_map::NodeStmt(_)) => "statement",
-                    Some(ast_map::NodeItem(it)) => item_scope_tag(&it),
-                    Some(_) | None => {
-                        err.span_note(span, &unknown_scope());
-                        return;
-                    }
-                };
-                let scope_decorated_tag = match self.region_maps.code_extent_data(scope) {
-                    region::CodeExtentData::Misc(_) => tag,
-                    region::CodeExtentData::CallSiteScope { .. } => {
-                        "scope of call-site for function"
-                    }
-                    region::CodeExtentData::ParameterScope { .. } => {
-                        "scope of parameters for function"
-                    }
-                    region::CodeExtentData::DestructionScope(_) => {
-                        new_string = format!("destruction scope surrounding {}", tag);
-                        &new_string[..]
-                    }
-                    region::CodeExtentData::Remainder(r) => {
-                        new_string = format!("block suffix following statement {}",
-                                             r.first_statement_index);
-                        &new_string[..]
-                    }
-                };
-                explain_span(self, scope_decorated_tag, span)
-            }
-
-            ty::ReFree(ref fr) => {
-                let prefix = match fr.bound_region {
-                    ty::BrAnon(idx) => {
-                        format!("the anonymous lifetime #{} defined on", idx + 1)
-                    }
-                    ty::BrFresh(_) => "an anonymous lifetime defined on".to_owned(),
-                    _ => {
-                        format!("the lifetime {} as defined on",
-                                fr.bound_region)
-                    }
-                };
-
-                match self.map.find(fr.scope.node_id(&self.region_maps)) {
-                    Some(ast_map::NodeBlock(ref blk)) => {
-                        let (msg, opt_span) = explain_span(self, "block", blk.span);
-                        (format!("{} {}", prefix, msg), opt_span)
-                    }
-                    Some(ast_map::NodeItem(it)) => {
-                        let tag = item_scope_tag(&it);
-                        let (msg, opt_span) = explain_span(self, tag, it.span);
-                        (format!("{} {}", prefix, msg), opt_span)
-                    }
-                    Some(_) | None => {
-                        // this really should not happen, but it does:
-                        // FIXME(#27942)
-                        (format!("{} unknown free region bounded by scope {:?}",
-                                 prefix, fr.scope), None)
-                    }
-                }
-            }
-
-            ty::ReStatic => ("the static lifetime".to_owned(), None),
-
-            ty::ReEmpty => ("the empty lifetime".to_owned(), None),
-
-            ty::ReEarlyBound(ref data) => (data.name.to_string(), None),
-
-            // FIXME(#13998) ReSkolemized should probably print like
-            // ReFree rather than dumping Debug output on the user.
-            //
-            // We shouldn't really be having unification failures with ReVar
-            // and ReLateBound though.
-            ty::ReSkolemized(..) | ty::ReVar(_) | ty::ReLateBound(..) => {
-                (format!("lifetime {:?}", region), None)
-            }
-        };
-        let message = format!("{}{}{}", prefix, description, suffix);
-        if let Some(span) = span {
-            err.span_note(span, &message);
-        } else {
-            err.note(&message);
-        }
-    }
-}
-
-pub trait ErrorReporting<'tcx> {
-    fn report_region_errors(&self,
-                            errors: &Vec<RegionResolutionError<'tcx>>);
-
-    fn process_errors(&self, errors: &Vec<RegionResolutionError<'tcx>>)
-                      -> Vec<RegionResolutionError<'tcx>>;
-
-    fn report_type_error(&self,
-                         trace: TypeTrace<'tcx>,
-                         terr: &TypeError<'tcx>)
-                         -> DiagnosticBuilder<'tcx>;
-
-    fn check_and_note_conflicting_crates(&self,
-                                         err: &mut DiagnosticBuilder,
-                                         terr: &TypeError<'tcx>,
-                                         sp: Span);
-
-    fn report_and_explain_type_error(&self,
-                                     trace: TypeTrace<'tcx>,
-                                     terr: &TypeError<'tcx>);
-
-    fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<String>;
-
-    fn expected_found_str<T: fmt::Display + Resolvable<'tcx> + TypeFoldable<'tcx>>(
-        &self,
-        exp_found: &ty::error::ExpectedFound<T>)
-        -> Option<String>;
-
-    fn report_concrete_failure(&self,
-                               origin: SubregionOrigin<'tcx>,
-                               sub: Region,
-                               sup: Region);
-
-    fn report_generic_bound_failure(&self,
-                                    origin: SubregionOrigin<'tcx>,
-                                    kind: GenericKind<'tcx>,
-                                    sub: Region);
-
-    fn report_sub_sup_conflict(&self,
-                               var_origin: RegionVariableOrigin,
-                               sub_origin: SubregionOrigin<'tcx>,
-                               sub_region: Region,
-                               sup_origin: SubregionOrigin<'tcx>,
-                               sup_region: Region);
-
-    fn report_processed_errors(&self,
-                               var_origin: &[RegionVariableOrigin],
-                               trace_origin: &[(TypeTrace<'tcx>, TypeError<'tcx>)],
-                               same_regions: &[SameRegions]);
-
-    fn give_suggestion(&self, err: &mut DiagnosticBuilder, same_regions: &[SameRegions]);
-}
-
-trait ErrorReportingHelpers<'tcx> {
-    fn report_inference_failure(&self,
-                                var_origin: RegionVariableOrigin)
-                                -> DiagnosticBuilder<'tcx>;
-
-    fn note_region_origin(&self,
-                          err: &mut DiagnosticBuilder,
-                          origin: &SubregionOrigin<'tcx>);
-
-    fn give_expl_lifetime_param(&self,
-                                err: &mut DiagnosticBuilder,
-                                decl: &hir::FnDecl,
-                                unsafety: hir::Unsafety,
-                                constness: hir::Constness,
-                                name: ast::Name,
-                                opt_explicit_self: Option<&hir::ExplicitSelf_>,
-                                generics: &hir::Generics,
-                                span: Span);
-}
-
-impl<'a, 'tcx> ErrorReporting<'tcx> for InferCtxt<'a, 'tcx> {
-    fn report_region_errors(&self,
-                            errors: &Vec<RegionResolutionError<'tcx>>) {
-        let p_errors = self.process_errors(errors);
-        let errors = if p_errors.is_empty() { errors } else { &p_errors };
-        for error in errors {
-            match error.clone() {
-                ConcreteFailure(origin, sub, sup) => {
-                    self.report_concrete_failure(origin, sub, sup);
-                }
-
-                GenericBoundFailure(kind, param_ty, sub) => {
-                    self.report_generic_bound_failure(kind, param_ty, sub);
-                }
-
-                SubSupConflict(var_origin,
-                               sub_origin, sub_r,
-                               sup_origin, sup_r) => {
-                    self.report_sub_sup_conflict(var_origin,
-                                                 sub_origin, sub_r,
-                                                 sup_origin, sup_r);
-                }
-
-                ProcessedErrors(ref var_origins,
-                                ref trace_origins,
-                                ref same_regions) => {
-                    if !same_regions.is_empty() {
-                        self.report_processed_errors(&var_origins[..],
-                                                     &trace_origins[..],
-                                                     &same_regions[..]);
-                    }
-                }
-            }
-        }
-    }
-
-    // This method goes through all the errors and try to group certain types
-    // of error together, for the purpose of suggesting explicit lifetime
-    // parameters to the user. This is done so that we can have a more
-    // complete view of what lifetimes should be the same.
-    // If the return value is an empty vector, it means that processing
-    // failed (so the return value of this method should not be used)
-    fn process_errors(&self, errors: &Vec<RegionResolutionError<'tcx>>)
-                      -> Vec<RegionResolutionError<'tcx>> {
-        debug!("process_errors()");
-        let mut var_origins = Vec::new();
-        let mut trace_origins = Vec::new();
-        let mut same_regions = Vec::new();
-        let mut processed_errors = Vec::new();
-        for error in errors {
-            match error.clone() {
-                ConcreteFailure(origin, sub, sup) => {
-                    debug!("processing ConcreteFailure");
-                    let trace = match origin {
-                        infer::Subtype(trace) => Some(trace),
-                        _ => None,
-                    };
-                    match free_regions_from_same_fn(self.tcx, sub, sup) {
-                        Some(ref same_frs) if trace.is_some() => {
-                            let trace = trace.unwrap();
-                            let terr = TypeError::RegionsDoesNotOutlive(sup,
-                                                                        sub);
-                            trace_origins.push((trace, terr));
-                            append_to_same_regions(&mut same_regions, same_frs);
-                        }
-                        _ => processed_errors.push((*error).clone()),
-                    }
-                }
-                SubSupConflict(var_origin, _, sub_r, _, sup_r) => {
-                    debug!("processing SubSupConflict sub: {:?} sup: {:?}", sub_r, sup_r);
-                    match free_regions_from_same_fn(self.tcx, sub_r, sup_r) {
-                        Some(ref same_frs) => {
-                            var_origins.push(var_origin);
-                            append_to_same_regions(&mut same_regions, same_frs);
-                        }
-                        None => processed_errors.push((*error).clone()),
-                    }
-                }
-                _ => ()  // This shouldn't happen
-            }
-        }
-        if !same_regions.is_empty() {
-            let common_scope_id = same_regions[0].scope_id;
-            for sr in &same_regions {
-                // Since ProcessedErrors is used to reconstruct the function
-                // declaration, we want to make sure that they are, in fact,
-                // from the same scope
-                if sr.scope_id != common_scope_id {
-                    debug!("returning empty result from process_errors because
-                            {} != {}", sr.scope_id, common_scope_id);
-                    return vec!();
-                }
-            }
-            let pe = ProcessedErrors(var_origins, trace_origins, same_regions);
-            debug!("errors processed: {:?}", pe);
-            processed_errors.push(pe);
-        }
-        return processed_errors;
-
-
-        struct FreeRegionsFromSameFn {
-            sub_fr: ty::FreeRegion,
-            sup_fr: ty::FreeRegion,
-            scope_id: ast::NodeId
-        }
-
-        impl FreeRegionsFromSameFn {
-            fn new(sub_fr: ty::FreeRegion,
-                   sup_fr: ty::FreeRegion,
-                   scope_id: ast::NodeId)
-                   -> FreeRegionsFromSameFn {
-                FreeRegionsFromSameFn {
-                    sub_fr: sub_fr,
-                    sup_fr: sup_fr,
-                    scope_id: scope_id
-                }
-            }
-        }
-
-        fn free_regions_from_same_fn(tcx: &ty::ctxt,
-                                     sub: Region,
-                                     sup: Region)
-                                     -> Option<FreeRegionsFromSameFn> {
-            debug!("free_regions_from_same_fn(sub={:?}, sup={:?})", sub, sup);
-            let (scope_id, fr1, fr2) = match (sub, sup) {
-                (ReFree(fr1), ReFree(fr2)) => {
-                    if fr1.scope != fr2.scope {
-                        return None
-                    }
-                    assert!(fr1.scope == fr2.scope);
-                    (fr1.scope.node_id(&tcx.region_maps), fr1, fr2)
-                },
-                _ => return None
-            };
-            let parent = tcx.map.get_parent(scope_id);
-            let parent_node = tcx.map.find(parent);
-            match parent_node {
-                Some(node) => match node {
-                    ast_map::NodeItem(item) => match item.node {
-                        hir::ItemFn(..) => {
-                            Some(FreeRegionsFromSameFn::new(fr1, fr2, scope_id))
-                        },
-                        _ => None
-                    },
-                    ast_map::NodeImplItem(..) |
-                    ast_map::NodeTraitItem(..) => {
-                        Some(FreeRegionsFromSameFn::new(fr1, fr2, scope_id))
-                    },
-                    _ => None
-                },
-                None => {
-                    debug!("no parent node of scope_id {}", scope_id);
-                    None
-                }
-            }
-        }
-
-        fn append_to_same_regions(same_regions: &mut Vec<SameRegions>,
-                                  same_frs: &FreeRegionsFromSameFn) {
-            let scope_id = same_frs.scope_id;
-            let (sub_fr, sup_fr) = (same_frs.sub_fr, same_frs.sup_fr);
-            for sr in &mut *same_regions {
-                if sr.contains(&sup_fr.bound_region)
-                   && scope_id == sr.scope_id {
-                    sr.push(sub_fr.bound_region);
-                    return
-                }
-            }
-            same_regions.push(SameRegions {
-                scope_id: scope_id,
-                regions: vec!(sub_fr.bound_region, sup_fr.bound_region)
-            })
-        }
-    }
-
-    fn report_type_error(&self,
-                         trace: TypeTrace<'tcx>,
-                         terr: &TypeError<'tcx>)
-                         -> DiagnosticBuilder<'tcx> {
-        let expected_found_str = match self.values_str(&trace.values) {
-            Some(v) => v,
-            None => {
-                return self.tcx.sess.diagnostic().struct_dummy(); /* derived error */
-            }
-        };
-
-        let is_simple_error = if let &TypeError::Sorts(ref values) = terr {
-            values.expected.is_primitive() && values.found.is_primitive()
-        } else {
-            false
-        };
-
-        let expected_found_str = if is_simple_error {
-            expected_found_str
-        } else {
-            format!("{} ({})", expected_found_str, terr)
-        };
-
-        let mut err = struct_span_err!(self.tcx.sess,
-                                       trace.origin.span(),
-                                       E0308,
-                                       "{}: {}",
-                                       trace.origin,
-                                       expected_found_str);
-
-        self.check_and_note_conflicting_crates(&mut err, terr, trace.origin.span());
-
-        match trace.origin {
-            TypeOrigin::MatchExpressionArm(_, arm_span, source) => match source {
-                hir::MatchSource::IfLetDesugar{..} => {
-                    err.span_note(arm_span, "`if let` arm with an incompatible type");
-                }
-                _ => {
-                    err.span_note(arm_span, "match arm with an incompatible type");
-                }
-            },
-            _ => ()
-        }
-        err
-    }
-
-    /// Adds a note if the types come from similarly named crates
-    fn check_and_note_conflicting_crates(&self,
-                                         err: &mut DiagnosticBuilder,
-                                         terr: &TypeError<'tcx>,
-                                         sp: Span) {
-        let report_path_match = |err: &mut DiagnosticBuilder, did1: DefId, did2: DefId| {
-            // Only external crates, if either is from a local
-            // module we could have false positives
-            if !(did1.is_local() || did2.is_local()) && did1.krate != did2.krate {
-                let exp_path = self.tcx.with_path(did1,
-                                                  |p| p.map(|x| x.to_string())
-                                                       .collect::<Vec<_>>());
-                let found_path = self.tcx.with_path(did2,
-                                                    |p| p.map(|x| x.to_string())
-                                                         .collect::<Vec<_>>());
-                // We compare strings because PathMod and PathName can be different
-                // for imported and non-imported crates
-                if exp_path == found_path {
-                    let crate_name = self.tcx.sess.cstore.crate_name(did1.krate);
-                    err.span_note(sp, &format!("Perhaps two different versions \
-                                                of crate `{}` are being used?",
-                                               crate_name));
-                }
-            }
-        };
-        match *terr {
-            TypeError::Sorts(ref exp_found) => {
-                // if they are both "path types", there's a chance of ambiguity
-                // due to different versions of the same crate
-                match (&exp_found.expected.sty, &exp_found.found.sty) {
-                    (&ty::TyEnum(ref exp_adt, _), &ty::TyEnum(ref found_adt, _)) |
-                    (&ty::TyStruct(ref exp_adt, _), &ty::TyStruct(ref found_adt, _)) |
-                    (&ty::TyEnum(ref exp_adt, _), &ty::TyStruct(ref found_adt, _)) |
-                    (&ty::TyStruct(ref exp_adt, _), &ty::TyEnum(ref found_adt, _)) => {
-                        report_path_match(err, exp_adt.did, found_adt.did);
-                    },
-                    _ => ()
-                }
-            },
-            TypeError::Traits(ref exp_found) => {
-                report_path_match(err, exp_found.expected, exp_found.found);
-            },
-            _ => () // FIXME(#22750) handle traits and stuff
-        }
-    }
-
-    fn report_and_explain_type_error(&self,
-                                     trace: TypeTrace<'tcx>,
-                                     terr: &TypeError<'tcx>) {
-        let span = trace.origin.span();
-        let mut err = self.report_type_error(trace, terr);
-        self.tcx.note_and_explain_type_err(&mut err, terr, span);
-        err.emit();
-    }
-
-    /// Returns a string of the form "expected `{}`, found `{}`", or None if this is a derived
-    /// error.
-    fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<String> {
-        match *values {
-            infer::Types(ref exp_found) => self.expected_found_str(exp_found),
-            infer::TraitRefs(ref exp_found) => self.expected_found_str(exp_found),
-            infer::PolyTraitRefs(ref exp_found) => self.expected_found_str(exp_found)
-        }
-    }
-
-    fn expected_found_str<T: fmt::Display + Resolvable<'tcx> + TypeFoldable<'tcx>>(
-        &self,
-        exp_found: &ty::error::ExpectedFound<T>)
-        -> Option<String>
-    {
-        let expected = exp_found.expected.resolve(self);
-        if expected.references_error() {
-            return None;
-        }
-
-        let found = exp_found.found.resolve(self);
-        if found.references_error() {
-            return None;
-        }
-
-        Some(format!("expected `{}`, found `{}`",
-                     expected,
-                     found))
-    }
-
-    fn report_generic_bound_failure(&self,
-                                    origin: SubregionOrigin<'tcx>,
-                                    bound_kind: GenericKind<'tcx>,
-                                    sub: Region)
-    {
-        // FIXME: it would be better to report the first error message
-        // with the span of the parameter itself, rather than the span
-        // where the error was detected. But that span is not readily
-        // accessible.
-
-        let labeled_user_string = match bound_kind {
-            GenericKind::Param(ref p) =>
-                format!("the parameter type `{}`", p),
-            GenericKind::Projection(ref p) =>
-                format!("the associated type `{}`", p),
-        };
-
-        let mut err = match sub {
-            ty::ReFree(ty::FreeRegion {bound_region: ty::BrNamed(..), ..}) => {
-                // Does the required lifetime have a nice name we can print?
-                let mut err = struct_span_err!(self.tcx.sess,
-                                               origin.span(),
-                                               E0309,
-                                               "{} may not live long enough",
-                                               labeled_user_string);
-                err.fileline_help(origin.span(),
-                                  &format!("consider adding an explicit lifetime bound `{}: {}`...",
-                                           bound_kind,
-                                           sub));
-                err
-            }
-
-            ty::ReStatic => {
-                // Does the required lifetime have a nice name we can print?
-                let mut err = struct_span_err!(self.tcx.sess,
-                                               origin.span(),
-                                               E0310,
-                                               "{} may not live long enough",
-                                               labeled_user_string);
-                err.fileline_help(origin.span(),
-                                  &format!("consider adding an explicit lifetime \
-                                            bound `{}: 'static`...",
-                                           bound_kind));
-                err
-            }
-
-            _ => {
-                // If not, be less specific.
-                let mut err = struct_span_err!(self.tcx.sess,
-                                               origin.span(),
-                                               E0311,
-                                               "{} may not live long enough",
-                                               labeled_user_string);
-                err.fileline_help(origin.span(),
-                                  &format!("consider adding an explicit lifetime bound for `{}`",
-                                           bound_kind));
-                self.tcx.note_and_explain_region(
-                    &mut err,
-                    &format!("{} must be valid for ", labeled_user_string),
-                    sub,
-                    "...");
-                err
-            }
-        };
-
-        self.note_region_origin(&mut err, &origin);
-        err.emit();
-    }
-
-    fn report_concrete_failure(&self,
-                               origin: SubregionOrigin<'tcx>,
-                               sub: Region,
-                               sup: Region) {
-        match origin {
-            infer::Subtype(trace) => {
-                let terr = TypeError::RegionsDoesNotOutlive(sup, sub);
-                self.report_and_explain_type_error(trace, &terr);
-            }
-            infer::Reborrow(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0312,
-                    "lifetime of reference outlines \
-                     lifetime of borrowed content...");
-                self.tcx.note_and_explain_region(&mut err,
-                    "...the reference is valid for ",
-                    sub,
-                    "...");
-                self.tcx.note_and_explain_region(&mut err,
-                    "...but the borrowed content is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::ReborrowUpvar(span, ref upvar_id) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0313,
-                    "lifetime of borrowed pointer outlives \
-                            lifetime of captured variable `{}`...",
-                            self.tcx.local_var_name_str(upvar_id.var_id));
-                self.tcx.note_and_explain_region(&mut err,
-                    "...the borrowed pointer is valid for ",
-                    sub,
-                    "...");
-                self.tcx.note_and_explain_region(&mut err,
-                    &format!("...but `{}` is only valid for ",
-                             self.tcx.local_var_name_str(upvar_id.var_id)),
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::InfStackClosure(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0314,
-                    "closure outlives stack frame");
-                self.tcx.note_and_explain_region(&mut err,
-                    "...the closure must be valid for ",
-                    sub,
-                    "...");
-                self.tcx.note_and_explain_region(&mut err,
-                    "...but the closure's stack frame is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::InvokeClosure(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0315,
-                    "cannot invoke closure outside of its lifetime");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the closure is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::DerefPointer(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0473,
-                          "dereference of reference outside its lifetime");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the reference is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::FreeVariable(span, id) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0474,
-                          "captured variable `{}` does not outlive the enclosing closure",
-                          self.tcx.local_var_name_str(id));
-                self.tcx.note_and_explain_region(&mut err,
-                    "captured variable is valid for ",
-                    sup,
-                    "");
-                self.tcx.note_and_explain_region(&mut err,
-                    "closure is valid for ",
-                    sub,
-                    "");
-                err.emit();
-            }
-            infer::IndexSlice(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0475,
-                          "index of slice outside its lifetime");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the slice is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::RelateObjectBound(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0476,
-                          "lifetime of the source pointer does not outlive \
-                           lifetime bound of the object type");
-                self.tcx.note_and_explain_region(&mut err,
-                    "object type is valid for ",
-                    sub,
-                    "");
-                self.tcx.note_and_explain_region(&mut err,
-                    "source pointer is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::RelateParamBound(span, ty) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0477,
-                          "the type `{}` does not fulfill the required lifetime",
-                          self.ty_to_string(ty));
-                self.tcx.note_and_explain_region(&mut err,
-                                        "type must outlive ",
-                                        sub,
-                                        "");
-                err.emit();
-            }
-            infer::RelateRegionParamBound(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0478,
-                          "lifetime bound not satisfied");
-                self.tcx.note_and_explain_region(&mut err,
-                    "lifetime parameter instantiated with ",
-                    sup,
-                    "");
-                self.tcx.note_and_explain_region(&mut err,
-                    "but lifetime parameter must outlive ",
-                    sub,
-                    "");
-                err.emit();
-            }
-            infer::RelateDefaultParamBound(span, ty) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0479,
-                          "the type `{}` (provided as the value of \
-                           a type parameter) is not valid at this point",
-                          self.ty_to_string(ty));
-                self.tcx.note_and_explain_region(&mut err,
-                                        "type must outlive ",
-                                        sub,
-                                        "");
-                err.emit();
-            }
-            infer::CallRcvr(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0480,
-                          "lifetime of method receiver does not outlive \
-                           the method call");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the receiver is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::CallArg(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0481,
-                          "lifetime of function argument does not outlive \
-                           the function call");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the function argument is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::CallReturn(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0482,
-                          "lifetime of return value does not outlive \
-                           the function call");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the return value is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::Operand(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0483,
-                          "lifetime of operand does not outlive \
-                           the operation");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the operand is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::AddrOf(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0484,
-                          "reference is not valid at the time of borrow");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the borrow is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::AutoBorrow(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0485,
-                          "automatically reference is not valid \
-                           at the time of borrow");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the automatic borrow is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::ExprTypeIsNotInScope(t, span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0486,
-                          "type of expression contains references \
-                           that are not valid during the expression: `{}`",
-                          self.ty_to_string(t));
-                self.tcx.note_and_explain_region(&mut err,
-                    "type is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::SafeDestructor(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0487,
-                          "unsafe use of destructor: destructor might be called \
-                           while references are dead");
-                // FIXME (22171): terms "super/subregion" are suboptimal
-                self.tcx.note_and_explain_region(&mut err,
-                    "superregion: ",
-                    sup,
-                    "");
-                self.tcx.note_and_explain_region(&mut err,
-                    "subregion: ",
-                    sub,
-                    "");
-                err.emit();
-            }
-            infer::BindingTypeIsNotValidAtDecl(span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0488,
-                          "lifetime of variable does not enclose its declaration");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the variable is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-            infer::ParameterInScope(_, span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0489,
-                          "type/lifetime parameter not in scope here");
-                self.tcx.note_and_explain_region(&mut err,
-                    "the parameter is only valid for ",
-                    sub,
-                    "");
-                err.emit();
-            }
-            infer::DataBorrowed(ty, span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0490,
-                          "a value of type `{}` is borrowed for too long",
-                          self.ty_to_string(ty));
-                self.tcx.note_and_explain_region(&mut err, "the type is valid for ", sub, "");
-                self.tcx.note_and_explain_region(&mut err, "but the borrow lasts for ", sup, "");
-                err.emit();
-            }
-            infer::ReferenceOutlivesReferent(ty, span) => {
-                let mut err = struct_span_err!(self.tcx.sess, span, E0491,
-                          "in type `{}`, reference has a longer lifetime \
-                           than the data it references",
-                          self.ty_to_string(ty));
-                self.tcx.note_and_explain_region(&mut err,
-                    "the pointer is valid for ",
-                    sub,
-                    "");
-                self.tcx.note_and_explain_region(&mut err,
-                    "but the referenced data is only valid for ",
-                    sup,
-                    "");
-                err.emit();
-            }
-        }
-    }
-
-    fn report_sub_sup_conflict(&self,
-                               var_origin: RegionVariableOrigin,
-                               sub_origin: SubregionOrigin<'tcx>,
-                               sub_region: Region,
-                               sup_origin: SubregionOrigin<'tcx>,
-                               sup_region: Region) {
-        let mut err = self.report_inference_failure(var_origin);
-
-        self.tcx.note_and_explain_region(&mut err,
-            "first, the lifetime cannot outlive ",
-            sup_region,
-            "...");
-
-        self.note_region_origin(&mut err, &sup_origin);
-
-        self.tcx.note_and_explain_region(&mut err,
-            "but, the lifetime must be valid for ",
-            sub_region,
-            "...");
-
-        self.note_region_origin(&mut err, &sub_origin);
-        err.emit();
-    }
-
-    fn report_processed_errors(&self,
-                               var_origins: &[RegionVariableOrigin],
-                               trace_origins: &[(TypeTrace<'tcx>, TypeError<'tcx>)],
-                               same_regions: &[SameRegions]) {
-        for (i, vo) in var_origins.iter().enumerate() {
-            let mut err = self.report_inference_failure(vo.clone());
-            if i == var_origins.len() - 1 {
-                self.give_suggestion(&mut err, same_regions);
-            }
-            err.emit();
-        }
-
-        for &(ref trace, ref terr) in trace_origins {
-            self.report_and_explain_type_error(trace.clone(), terr);
-        }
-    }
-
-    fn give_suggestion(&self, err: &mut DiagnosticBuilder, same_regions: &[SameRegions]) {
-        let scope_id = same_regions[0].scope_id;
-        let parent = self.tcx.map.get_parent(scope_id);
-        let parent_node = self.tcx.map.find(parent);
-        let taken = lifetimes_in_scope(self.tcx, scope_id);
-        let life_giver = LifeGiver::with_taken(&taken[..]);
-        let node_inner = match parent_node {
-            Some(ref node) => match *node {
-                ast_map::NodeItem(ref item) => {
-                    match item.node {
-                        hir::ItemFn(ref fn_decl, unsafety, constness, _, ref gen, _) => {
-                            Some((fn_decl, gen, unsafety, constness,
-                                  item.name, None, item.span))
-                        },
-                        _ => None
-                    }
-                }
-                ast_map::NodeImplItem(item) => {
-                    match item.node {
-                        hir::ImplItemKind::Method(ref sig, _) => {
-                            Some((&sig.decl,
-                                  &sig.generics,
-                                  sig.unsafety,
-                                  sig.constness,
-                                  item.name,
-                                  Some(&sig.explicit_self.node),
-                                  item.span))
-                        }
-                        _ => None,
-                    }
-                },
-                ast_map::NodeTraitItem(item) => {
-                    match item.node {
-                        hir::MethodTraitItem(ref sig, Some(_)) => {
-                            Some((&sig.decl,
-                                  &sig.generics,
-                                  sig.unsafety,
-                                  sig.constness,
-                                  item.name,
-                                  Some(&sig.explicit_self.node),
-                                  item.span))
-                        }
-                        _ => None
-                    }
-                }
-                _ => None
-            },
-            None => None
-        };
-        let (fn_decl, generics, unsafety, constness, name, expl_self, span)
-                                    = node_inner.expect("expect item fn");
-        let rebuilder = Rebuilder::new(self.tcx, fn_decl, expl_self,
-                                       generics, same_regions, &life_giver);
-        let (fn_decl, expl_self, generics) = rebuilder.rebuild();
-        self.give_expl_lifetime_param(err, &fn_decl, unsafety, constness, name,
-                                      expl_self.as_ref(), &generics, span);
-    }
-}
-
-struct RebuildPathInfo<'a> {
-    path: &'a hir::Path,
-    // indexes to insert lifetime on path.lifetimes
-    indexes: Vec<u32>,
-    // number of lifetimes we expect to see on the type referred by `path`
-    // (e.g., expected=1 for struct Foo<'a>)
-    expected: u32,
-    anon_nums: &'a HashSet<u32>,
-    region_names: &'a HashSet<ast::Name>
-}
-
-struct Rebuilder<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    fn_decl: &'a hir::FnDecl,
-    expl_self_opt: Option<&'a hir::ExplicitSelf_>,
-    generics: &'a hir::Generics,
-    same_regions: &'a [SameRegions],
-    life_giver: &'a LifeGiver,
-    cur_anon: Cell<u32>,
-    inserted_anons: RefCell<HashSet<u32>>,
-}
-
-enum FreshOrKept {
-    Fresh,
-    Kept
-}
-
-impl<'a, 'tcx> Rebuilder<'a, 'tcx> {
-    fn new(tcx: &'a ty::ctxt<'tcx>,
-           fn_decl: &'a hir::FnDecl,
-           expl_self_opt: Option<&'a hir::ExplicitSelf_>,
-           generics: &'a hir::Generics,
-           same_regions: &'a [SameRegions],
-           life_giver: &'a LifeGiver)
-           -> Rebuilder<'a, 'tcx> {
-        Rebuilder {
-            tcx: tcx,
-            fn_decl: fn_decl,
-            expl_self_opt: expl_self_opt,
-            generics: generics,
-            same_regions: same_regions,
-            life_giver: life_giver,
-            cur_anon: Cell::new(0),
-            inserted_anons: RefCell::new(HashSet::new()),
-        }
-    }
-
-    fn rebuild(&self)
-               -> (hir::FnDecl, Option<hir::ExplicitSelf_>, hir::Generics) {
-        let mut expl_self_opt = self.expl_self_opt.cloned();
-        let mut inputs = self.fn_decl.inputs.clone();
-        let mut output = self.fn_decl.output.clone();
-        let mut ty_params = self.generics.ty_params.clone();
-        let where_clause = self.generics.where_clause.clone();
-        let mut kept_lifetimes = HashSet::new();
-        for sr in self.same_regions {
-            self.cur_anon.set(0);
-            self.offset_cur_anon();
-            let (anon_nums, region_names) =
-                                self.extract_anon_nums_and_names(sr);
-            let (lifetime, fresh_or_kept) = self.pick_lifetime(&region_names);
-            match fresh_or_kept {
-                Kept => { kept_lifetimes.insert(lifetime.name); }
-                _ => ()
-            }
-            expl_self_opt = self.rebuild_expl_self(expl_self_opt, lifetime,
-                                                   &anon_nums, &region_names);
-            inputs = self.rebuild_args_ty(&inputs[..], lifetime,
-                                          &anon_nums, &region_names);
-            output = self.rebuild_output(&output, lifetime, &anon_nums, &region_names);
-            ty_params = self.rebuild_ty_params(ty_params, lifetime,
-                                               &region_names);
-        }
-        let fresh_lifetimes = self.life_giver.get_generated_lifetimes();
-        let all_region_names = self.extract_all_region_names();
-        let generics = self.rebuild_generics(self.generics,
-                                             &fresh_lifetimes,
-                                             &kept_lifetimes,
-                                             &all_region_names,
-                                             ty_params,
-                                             where_clause);
-        let new_fn_decl = hir::FnDecl {
-            inputs: inputs,
-            output: output,
-            variadic: self.fn_decl.variadic
-        };
-        (new_fn_decl, expl_self_opt, generics)
-    }
-
-    fn pick_lifetime(&self,
-                     region_names: &HashSet<ast::Name>)
-                     -> (hir::Lifetime, FreshOrKept) {
-        if !region_names.is_empty() {
-            // It's not necessary to convert the set of region names to a
-            // vector of string and then sort them. However, it makes the
-            // choice of lifetime name deterministic and thus easier to test.
-            let mut names = Vec::new();
-            for rn in region_names {
-                let lt_name = rn.to_string();
-                names.push(lt_name);
-            }
-            names.sort();
-            let name = token::intern(&names[0]);
-            return (name_to_dummy_lifetime(name), Kept);
-        }
-        return (self.life_giver.give_lifetime(), Fresh);
-    }
-
-    fn extract_anon_nums_and_names(&self, same_regions: &SameRegions)
-                                   -> (HashSet<u32>, HashSet<ast::Name>) {
-        let mut anon_nums = HashSet::new();
-        let mut region_names = HashSet::new();
-        for br in &same_regions.regions {
-            match *br {
-                ty::BrAnon(i) => {
-                    anon_nums.insert(i);
-                }
-                ty::BrNamed(_, name) => {
-                    region_names.insert(name);
-                }
-                _ => ()
-            }
-        }
-        (anon_nums, region_names)
-    }
-
-    fn extract_all_region_names(&self) -> HashSet<ast::Name> {
-        let mut all_region_names = HashSet::new();
-        for sr in self.same_regions {
-            for br in &sr.regions {
-                match *br {
-                    ty::BrNamed(_, name) => {
-                        all_region_names.insert(name);
-                    }
-                    _ => ()
-                }
-            }
-        }
-        all_region_names
-    }
-
-    fn inc_cur_anon(&self, n: u32) {
-        let anon = self.cur_anon.get();
-        self.cur_anon.set(anon+n);
-    }
-
-    fn offset_cur_anon(&self) {
-        let mut anon = self.cur_anon.get();
-        while self.inserted_anons.borrow().contains(&anon) {
-            anon += 1;
-        }
-        self.cur_anon.set(anon);
-    }
-
-    fn inc_and_offset_cur_anon(&self, n: u32) {
-        self.inc_cur_anon(n);
-        self.offset_cur_anon();
-    }
-
-    fn track_anon(&self, anon: u32) {
-        self.inserted_anons.borrow_mut().insert(anon);
-    }
-
-    fn rebuild_ty_params(&self,
-                         ty_params: hir::HirVec<hir::TyParam>,
-                         lifetime: hir::Lifetime,
-                         region_names: &HashSet<ast::Name>)
-                         -> hir::HirVec<hir::TyParam> {
-        ty_params.iter().map(|ty_param| {
-            let bounds = self.rebuild_ty_param_bounds(ty_param.bounds.clone(),
-                                                      lifetime,
-                                                      region_names);
-            hir::TyParam {
-                name: ty_param.name,
-                id: ty_param.id,
-                bounds: bounds,
-                default: ty_param.default.clone(),
-                span: ty_param.span,
-            }
-        }).collect()
-    }
-
-    fn rebuild_ty_param_bounds(&self,
-                               ty_param_bounds: hir::TyParamBounds,
-                               lifetime: hir::Lifetime,
-                               region_names: &HashSet<ast::Name>)
-                               -> hir::TyParamBounds {
-        ty_param_bounds.iter().map(|tpb| {
-            match tpb {
-                &hir::RegionTyParamBound(lt) => {
-                    // FIXME -- it's unclear whether I'm supposed to
-                    // substitute lifetime here. I suspect we need to
-                    // be passing down a map.
-                    hir::RegionTyParamBound(lt)
-                }
-                &hir::TraitTyParamBound(ref poly_tr, modifier) => {
-                    let tr = &poly_tr.trait_ref;
-                    let last_seg = tr.path.segments.last().unwrap();
-                    let mut insert = Vec::new();
-                    let lifetimes = last_seg.parameters.lifetimes();
-                    for (i, lt) in lifetimes.iter().enumerate() {
-                        if region_names.contains(&lt.name) {
-                            insert.push(i as u32);
-                        }
-                    }
-                    let rebuild_info = RebuildPathInfo {
-                        path: &tr.path,
-                        indexes: insert,
-                        expected: lifetimes.len() as u32,
-                        anon_nums: &HashSet::new(),
-                        region_names: region_names
-                    };
-                    let new_path = self.rebuild_path(rebuild_info, lifetime);
-                    hir::TraitTyParamBound(hir::PolyTraitRef {
-                        bound_lifetimes: poly_tr.bound_lifetimes.clone(),
-                        trait_ref: hir::TraitRef {
-                            path: new_path,
-                            ref_id: tr.ref_id,
-                        },
-                        span: poly_tr.span,
-                    }, modifier)
-                }
-            }
-        }).collect()
-    }
-
-    fn rebuild_expl_self(&self,
-                         expl_self_opt: Option<hir::ExplicitSelf_>,
-                         lifetime: hir::Lifetime,
-                         anon_nums: &HashSet<u32>,
-                         region_names: &HashSet<ast::Name>)
-                         -> Option<hir::ExplicitSelf_> {
-        match expl_self_opt {
-            Some(ref expl_self) => match *expl_self {
-                hir::SelfRegion(lt_opt, muta, id) => match lt_opt {
-                    Some(lt) => if region_names.contains(&lt.name) {
-                        return Some(hir::SelfRegion(Some(lifetime), muta, id));
-                    },
-                    None => {
-                        let anon = self.cur_anon.get();
-                        self.inc_and_offset_cur_anon(1);
-                        if anon_nums.contains(&anon) {
-                            self.track_anon(anon);
-                            return Some(hir::SelfRegion(Some(lifetime), muta, id));
-                        }
-                    }
-                },
-                _ => ()
-            },
-            None => ()
-        }
-        expl_self_opt
-    }
-
-    fn rebuild_generics(&self,
-                        generics: &hir::Generics,
-                        add: &Vec<hir::Lifetime>,
-                        keep: &HashSet<ast::Name>,
-                        remove: &HashSet<ast::Name>,
-                        ty_params: hir::HirVec<hir::TyParam>,
-                        where_clause: hir::WhereClause)
-                        -> hir::Generics {
-        let mut lifetimes = Vec::new();
-        for lt in add {
-            lifetimes.push(hir::LifetimeDef { lifetime: *lt,
-                                              bounds: hir::HirVec::new() });
-        }
-        for lt in &generics.lifetimes {
-            if keep.contains(&lt.lifetime.name) ||
-                !remove.contains(&lt.lifetime.name) {
-                lifetimes.push((*lt).clone());
-            }
-        }
-        hir::Generics {
-            lifetimes: lifetimes.into(),
-            ty_params: ty_params,
-            where_clause: where_clause,
-        }
-    }
-
-    fn rebuild_args_ty(&self,
-                       inputs: &[hir::Arg],
-                       lifetime: hir::Lifetime,
-                       anon_nums: &HashSet<u32>,
-                       region_names: &HashSet<ast::Name>)
-                       -> hir::HirVec<hir::Arg> {
-        let mut new_inputs = Vec::new();
-        for arg in inputs {
-            let new_ty = self.rebuild_arg_ty_or_output(&arg.ty, lifetime,
-                                                       anon_nums, region_names);
-            let possibly_new_arg = hir::Arg {
-                ty: new_ty,
-                pat: arg.pat.clone(),
-                id: arg.id
-            };
-            new_inputs.push(possibly_new_arg);
-        }
-        new_inputs.into()
-    }
-
-    fn rebuild_output(&self, ty: &hir::FunctionRetTy,
-                      lifetime: hir::Lifetime,
-                      anon_nums: &HashSet<u32>,
-                      region_names: &HashSet<ast::Name>) -> hir::FunctionRetTy {
-        match *ty {
-            hir::Return(ref ret_ty) => hir::Return(
-                self.rebuild_arg_ty_or_output(&ret_ty, lifetime, anon_nums, region_names)
-            ),
-            hir::DefaultReturn(span) => hir::DefaultReturn(span),
-            hir::NoReturn(span) => hir::NoReturn(span)
-        }
-    }
-
-    fn rebuild_arg_ty_or_output(&self,
-                                ty: &hir::Ty,
-                                lifetime: hir::Lifetime,
-                                anon_nums: &HashSet<u32>,
-                                region_names: &HashSet<ast::Name>)
-                                -> P<hir::Ty> {
-        let mut new_ty = P(ty.clone());
-        let mut ty_queue = vec!(ty);
-        while !ty_queue.is_empty() {
-            let cur_ty = ty_queue.remove(0);
-            match cur_ty.node {
-                hir::TyRptr(lt_opt, ref mut_ty) => {
-                    let rebuild = match lt_opt {
-                        Some(lt) => region_names.contains(&lt.name),
-                        None => {
-                            let anon = self.cur_anon.get();
-                            let rebuild = anon_nums.contains(&anon);
-                            if rebuild {
-                                self.track_anon(anon);
-                            }
-                            self.inc_and_offset_cur_anon(1);
-                            rebuild
-                        }
-                    };
-                    if rebuild {
-                        let to = hir::Ty {
-                            id: cur_ty.id,
-                            node: hir::TyRptr(Some(lifetime), mut_ty.clone()),
-                            span: cur_ty.span
-                        };
-                        new_ty = self.rebuild_ty(new_ty, P(to));
-                    }
-                    ty_queue.push(&mut_ty.ty);
-                }
-                hir::TyPath(ref maybe_qself, ref path) => {
-                    let a_def = match self.tcx.def_map.borrow().get(&cur_ty.id) {
-                        None => {
-                            self.tcx
-                                .sess
-                                .fatal(&format!(
-                                        "unbound path {}",
-                                        pprust::path_to_string(path)))
-                        }
-                        Some(d) => d.full_def()
-                    };
-                    match a_def {
-                        Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) => {
-                            let generics = self.tcx.lookup_item_type(did).generics;
-
-                            let expected =
-                                generics.regions.len(subst::TypeSpace) as u32;
-                            let lifetimes =
-                                path.segments.last().unwrap().parameters.lifetimes();
-                            let mut insert = Vec::new();
-                            if lifetimes.is_empty() {
-                                let anon = self.cur_anon.get();
-                                for (i, a) in (anon..anon+expected).enumerate() {
-                                    if anon_nums.contains(&a) {
-                                        insert.push(i as u32);
-                                    }
-                                    self.track_anon(a);
-                                }
-                                self.inc_and_offset_cur_anon(expected);
-                            } else {
-                                for (i, lt) in lifetimes.iter().enumerate() {
-                                    if region_names.contains(&lt.name) {
-                                        insert.push(i as u32);
-                                    }
-                                }
-                            }
-                            let rebuild_info = RebuildPathInfo {
-                                path: path,
-                                indexes: insert,
-                                expected: expected,
-                                anon_nums: anon_nums,
-                                region_names: region_names
-                            };
-                            let new_path = self.rebuild_path(rebuild_info, lifetime);
-                            let qself = maybe_qself.as_ref().map(|qself| {
-                                hir::QSelf {
-                                    ty: self.rebuild_arg_ty_or_output(&qself.ty, lifetime,
-                                                                      anon_nums, region_names),
-                                    position: qself.position
-                                }
-                            });
-                            let to = hir::Ty {
-                                id: cur_ty.id,
-                                node: hir::TyPath(qself, new_path),
-                                span: cur_ty.span
-                            };
-                            new_ty = self.rebuild_ty(new_ty, P(to));
-                        }
-                        _ => ()
-                    }
-                }
-
-                hir::TyPtr(ref mut_ty) => {
-                    ty_queue.push(&mut_ty.ty);
-                }
-                hir::TyVec(ref ty) |
-                hir::TyFixedLengthVec(ref ty, _) => {
-                    ty_queue.push(&ty);
-                }
-                hir::TyTup(ref tys) => ty_queue.extend(tys.iter().map(|ty| &**ty)),
-                _ => {}
-            }
-        }
-        new_ty
-    }
-
-    fn rebuild_ty(&self,
-                  from: P<hir::Ty>,
-                  to: P<hir::Ty>)
-                  -> P<hir::Ty> {
-
-        fn build_to(from: P<hir::Ty>,
-                    to: &mut Option<P<hir::Ty>>)
-                    -> P<hir::Ty> {
-            if Some(from.id) == to.as_ref().map(|ty| ty.id) {
-                return to.take().expect("`to` type found more than once during rebuild");
-            }
-            from.map(|hir::Ty {id, node, span}| {
-                let new_node = match node {
-                    hir::TyRptr(lifetime, mut_ty) => {
-                        hir::TyRptr(lifetime, hir::MutTy {
-                            mutbl: mut_ty.mutbl,
-                            ty: build_to(mut_ty.ty, to),
-                        })
-                    }
-                    hir::TyPtr(mut_ty) => {
-                        hir::TyPtr(hir::MutTy {
-                            mutbl: mut_ty.mutbl,
-                            ty: build_to(mut_ty.ty, to),
-                        })
-                    }
-                    hir::TyVec(ty) => hir::TyVec(build_to(ty, to)),
-                    hir::TyFixedLengthVec(ty, e) => {
-                        hir::TyFixedLengthVec(build_to(ty, to), e)
-                    }
-                    hir::TyTup(tys) => {
-                        hir::TyTup(tys.into_iter().map(|ty| build_to(ty, to)).collect())
-                    }
-                    other => other
-                };
-                hir::Ty { id: id, node: new_node, span: span }
-            })
-        }
-
-        build_to(from, &mut Some(to))
-    }
-
-    fn rebuild_path(&self,
-                    rebuild_info: RebuildPathInfo,
-                    lifetime: hir::Lifetime)
-                    -> hir::Path
-    {
-        let RebuildPathInfo {
-            path,
-            indexes,
-            expected,
-            anon_nums,
-            region_names,
-        } = rebuild_info;
-
-        let last_seg = path.segments.last().unwrap();
-        let new_parameters = match last_seg.parameters {
-            hir::ParenthesizedParameters(..) => {
-                last_seg.parameters.clone()
-            }
-
-            hir::AngleBracketedParameters(ref data) => {
-                let mut new_lts = Vec::new();
-                if data.lifetimes.is_empty() {
-                    // traverse once to see if there's a need to insert lifetime
-                    let need_insert = (0..expected).any(|i| {
-                        indexes.contains(&i)
-                    });
-                    if need_insert {
-                        for i in 0..expected {
-                            if indexes.contains(&i) {
-                                new_lts.push(lifetime);
-                            } else {
-                                new_lts.push(self.life_giver.give_lifetime());
-                            }
-                        }
-                    }
-                } else {
-                    for (i, lt) in data.lifetimes.iter().enumerate() {
-                        if indexes.contains(&(i as u32)) {
-                            new_lts.push(lifetime);
-                        } else {
-                            new_lts.push(*lt);
-                        }
-                    }
-                }
-                let new_types = data.types.iter().map(|t| {
-                    self.rebuild_arg_ty_or_output(&t, lifetime, anon_nums, region_names)
-                }).collect();
-                let new_bindings = data.bindings.iter().map(|b| {
-                    hir::TypeBinding {
-                        id: b.id,
-                        name: b.name,
-                        ty: self.rebuild_arg_ty_or_output(&b.ty,
-                                                          lifetime,
-                                                          anon_nums,
-                                                          region_names),
-                        span: b.span
-                    }
-                }).collect();
-                hir::AngleBracketedParameters(hir::AngleBracketedParameterData {
-                    lifetimes: new_lts.into(),
-                    types: new_types,
-                    bindings: new_bindings,
-               })
-            }
-        };
-        let new_seg = hir::PathSegment {
-            identifier: last_seg.identifier,
-            parameters: new_parameters
-        };
-        let mut new_segs = Vec::new();
-        new_segs.extend_from_slice(path.segments.split_last().unwrap().1);
-        new_segs.push(new_seg);
-        hir::Path {
-            span: path.span,
-            global: path.global,
-            segments: new_segs.into()
-        }
-    }
-}
-
-impl<'a, 'tcx> ErrorReportingHelpers<'tcx> for InferCtxt<'a, 'tcx> {
-    fn give_expl_lifetime_param(&self,
-                                err: &mut DiagnosticBuilder,
-                                decl: &hir::FnDecl,
-                                unsafety: hir::Unsafety,
-                                constness: hir::Constness,
-                                name: ast::Name,
-                                opt_explicit_self: Option<&hir::ExplicitSelf_>,
-                                generics: &hir::Generics,
-                                span: Span) {
-        let suggested_fn = pprust::fun_to_string(decl, unsafety, constness, name,
-                                                 opt_explicit_self, generics);
-        let msg = format!("consider using an explicit lifetime \
-                           parameter as shown: {}", suggested_fn);
-        err.span_help(span, &msg[..]);
-    }
-
-    fn report_inference_failure(&self,
-                                var_origin: RegionVariableOrigin)
-                                -> DiagnosticBuilder<'tcx> {
-        let br_string = |br: ty::BoundRegion| {
-            let mut s = br.to_string();
-            if !s.is_empty() {
-                s.push_str(" ");
-            }
-            s
-        };
-        let var_description = match var_origin {
-            infer::MiscVariable(_) => "".to_string(),
-            infer::PatternRegion(_) => " for pattern".to_string(),
-            infer::AddrOfRegion(_) => " for borrow expression".to_string(),
-            infer::Autoref(_) => " for autoref".to_string(),
-            infer::Coercion(_) => " for automatic coercion".to_string(),
-            infer::LateBoundRegion(_, br, infer::FnCall) => {
-                format!(" for lifetime parameter {}in function call",
-                        br_string(br))
-            }
-            infer::LateBoundRegion(_, br, infer::HigherRankedType) => {
-                format!(" for lifetime parameter {}in generic type", br_string(br))
-            }
-            infer::LateBoundRegion(_, br, infer::AssocTypeProjection(type_name)) => {
-                format!(" for lifetime parameter {}in trait containing associated type `{}`",
-                        br_string(br), type_name)
-            }
-            infer::EarlyBoundRegion(_, name) => {
-                format!(" for lifetime parameter `{}`",
-                        name)
-            }
-            infer::BoundRegionInCoherence(name) => {
-                format!(" for lifetime parameter `{}` in coherence check",
-                        name)
-            }
-            infer::UpvarRegion(ref upvar_id, _) => {
-                format!(" for capture of `{}` by closure",
-                        self.tcx.local_var_name_str(upvar_id.var_id).to_string())
-            }
-        };
-
-        struct_span_err!(self.tcx.sess, var_origin.span(), E0495,
-                  "cannot infer an appropriate lifetime{} \
-                   due to conflicting requirements",
-                  var_description)
-    }
-
-    fn note_region_origin(&self, err: &mut DiagnosticBuilder, origin: &SubregionOrigin<'tcx>) {
-        match *origin {
-            infer::Subtype(ref trace) => {
-                let desc = match trace.origin {
-                    TypeOrigin::Misc(_) => {
-                        "types are compatible"
-                    }
-                    TypeOrigin::MethodCompatCheck(_) => {
-                        "method type is compatible with trait"
-                    }
-                    TypeOrigin::ExprAssignable(_) => {
-                        "expression is assignable"
-                    }
-                    TypeOrigin::RelateTraitRefs(_) => {
-                        "traits are compatible"
-                    }
-                    TypeOrigin::RelateSelfType(_) => {
-                        "self type matches impl self type"
-                    }
-                    TypeOrigin::RelateOutputImplTypes(_) => {
-                        "trait type parameters matches those \
-                                 specified on the impl"
-                    }
-                    TypeOrigin::MatchExpressionArm(_, _, _) => {
-                        "match arms have compatible types"
-                    }
-                    TypeOrigin::IfExpression(_) => {
-                        "if and else have compatible types"
-                    }
-                    TypeOrigin::IfExpressionWithNoElse(_) => {
-                        "if may be missing an else clause"
-                    }
-                    TypeOrigin::RangeExpression(_) => {
-                        "start and end of range have compatible types"
-                    }
-                    TypeOrigin::EquatePredicate(_) => {
-                        "equality where clause is satisfied"
-                    }
-                };
-
-                match self.values_str(&trace.values) {
-                    Some(values_str) => {
-                        err.span_note(
-                            trace.origin.span(),
-                            &format!("...so that {} ({})",
-                                    desc, values_str));
-                    }
-                    None => {
-                        // Really should avoid printing this error at
-                        // all, since it is derived, but that would
-                        // require more refactoring than I feel like
-                        // doing right now. - nmatsakis
-                        err.span_note(
-                            trace.origin.span(),
-                            &format!("...so that {}", desc));
-                    }
-                }
-            }
-            infer::Reborrow(span) => {
-                err.span_note(
-                    span,
-                    "...so that reference does not outlive \
-                    borrowed content");
-            }
-            infer::ReborrowUpvar(span, ref upvar_id) => {
-                err.span_note(
-                    span,
-                    &format!(
-                        "...so that closure can access `{}`",
-                        self.tcx.local_var_name_str(upvar_id.var_id)
-                            .to_string()));
-            }
-            infer::InfStackClosure(span) => {
-                err.span_note(
-                    span,
-                    "...so that closure does not outlive its stack frame");
-            }
-            infer::InvokeClosure(span) => {
-                err.span_note(
-                    span,
-                    "...so that closure is not invoked outside its lifetime");
-            }
-            infer::DerefPointer(span) => {
-                err.span_note(
-                    span,
-                    "...so that pointer is not dereferenced \
-                    outside its lifetime");
-            }
-            infer::FreeVariable(span, id) => {
-                err.span_note(
-                    span,
-                    &format!("...so that captured variable `{}` \
-                            does not outlive the enclosing closure",
-                            self.tcx.local_var_name_str(id)));
-            }
-            infer::IndexSlice(span) => {
-                err.span_note(
-                    span,
-                    "...so that slice is not indexed outside the lifetime");
-            }
-            infer::RelateObjectBound(span) => {
-                err.span_note(
-                    span,
-                    "...so that it can be closed over into an object");
-            }
-            infer::CallRcvr(span) => {
-                err.span_note(
-                    span,
-                    "...so that method receiver is valid for the method call");
-            }
-            infer::CallArg(span) => {
-                err.span_note(
-                    span,
-                    "...so that argument is valid for the call");
-            }
-            infer::CallReturn(span) => {
-                err.span_note(
-                    span,
-                    "...so that return value is valid for the call");
-            }
-            infer::Operand(span) => {
-                err.span_note(
-                    span,
-                    "...so that operand is valid for operation");
-            }
-            infer::AddrOf(span) => {
-                err.span_note(
-                    span,
-                    "...so that reference is valid \
-                     at the time of borrow");
-            }
-            infer::AutoBorrow(span) => {
-                err.span_note(
-                    span,
-                    "...so that auto-reference is valid \
-                     at the time of borrow");
-            }
-            infer::ExprTypeIsNotInScope(t, span) => {
-                err.span_note(
-                    span,
-                    &format!("...so type `{}` of expression is valid during the \
-                             expression",
-                            self.ty_to_string(t)));
-            }
-            infer::BindingTypeIsNotValidAtDecl(span) => {
-                err.span_note(
-                    span,
-                    "...so that variable is valid at time of its declaration");
-            }
-            infer::ParameterInScope(_, span) => {
-                err.span_note(
-                    span,
-                    "...so that a type/lifetime parameter is in scope here");
-            }
-            infer::DataBorrowed(ty, span) => {
-                err.span_note(
-                    span,
-                    &format!("...so that the type `{}` is not borrowed for too long",
-                             self.ty_to_string(ty)));
-            }
-            infer::ReferenceOutlivesReferent(ty, span) => {
-                err.span_note(
-                    span,
-                    &format!("...so that the reference type `{}` \
-                             does not outlive the data it points at",
-                            self.ty_to_string(ty)));
-            }
-            infer::RelateParamBound(span, t) => {
-                err.span_note(
-                    span,
-                    &format!("...so that the type `{}` \
-                             will meet its required lifetime bounds",
-                            self.ty_to_string(t)));
-            }
-            infer::RelateDefaultParamBound(span, t) => {
-                err.span_note(
-                    span,
-                    &format!("...so that type parameter \
-                             instantiated with `{}`, \
-                             will meet its declared lifetime bounds",
-                            self.ty_to_string(t)));
-            }
-            infer::RelateRegionParamBound(span) => {
-                err.span_note(
-                    span,
-                    "...so that the declared lifetime parameter bounds \
-                                are satisfied");
-            }
-            infer::SafeDestructor(span) => {
-                err.span_note(
-                    span,
-                    "...so that references are valid when the destructor \
-                     runs");
-            }
-        }
-    }
-}
-
-pub trait Resolvable<'tcx> {
-    fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Self;
-}
-
-impl<'tcx> Resolvable<'tcx> for Ty<'tcx> {
-    fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Ty<'tcx> {
-        infcx.resolve_type_vars_if_possible(self)
-    }
-}
-
-impl<'tcx> Resolvable<'tcx> for ty::TraitRef<'tcx> {
-    fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>)
-                   -> ty::TraitRef<'tcx> {
-        infcx.resolve_type_vars_if_possible(self)
-    }
-}
-
-impl<'tcx> Resolvable<'tcx> for ty::PolyTraitRef<'tcx> {
-    fn resolve<'a>(&self,
-                   infcx: &InferCtxt<'a, 'tcx>)
-                   -> ty::PolyTraitRef<'tcx>
-    {
-        infcx.resolve_type_vars_if_possible(self)
-    }
-}
-
-fn lifetimes_in_scope(tcx: &ty::ctxt,
-                      scope_id: ast::NodeId)
-                      -> Vec<hir::LifetimeDef> {
-    let mut taken = Vec::new();
-    let parent = tcx.map.get_parent(scope_id);
-    let method_id_opt = match tcx.map.find(parent) {
-        Some(node) => match node {
-            ast_map::NodeItem(item) => match item.node {
-                hir::ItemFn(_, _, _, _, ref gen, _) => {
-                    taken.extend_from_slice(&gen.lifetimes);
-                    None
-                },
-                _ => None
-            },
-            ast_map::NodeImplItem(ii) => {
-                match ii.node {
-                    hir::ImplItemKind::Method(ref sig, _) => {
-                        taken.extend_from_slice(&sig.generics.lifetimes);
-                        Some(ii.id)
-                    }
-                    _ => None,
-                }
-            }
-            _ => None
-        },
-        None => None
-    };
-    if method_id_opt.is_some() {
-        let method_id = method_id_opt.unwrap();
-        let parent = tcx.map.get_parent(method_id);
-        match tcx.map.find(parent) {
-            Some(node) => match node {
-                ast_map::NodeItem(item) => match item.node {
-                    hir::ItemImpl(_, _, ref gen, _, _, _) => {
-                        taken.extend_from_slice(&gen.lifetimes);
-                    }
-                    _ => ()
-                },
-                _ => ()
-            },
-            None => ()
-        }
-    }
-    return taken;
-}
-
-// LifeGiver is responsible for generating fresh lifetime names
-struct LifeGiver {
-    taken: HashSet<String>,
-    counter: Cell<usize>,
-    generated: RefCell<Vec<hir::Lifetime>>,
-}
-
-impl LifeGiver {
-    fn with_taken(taken: &[hir::LifetimeDef]) -> LifeGiver {
-        let mut taken_ = HashSet::new();
-        for lt in taken {
-            let lt_name = lt.lifetime.name.to_string();
-            taken_.insert(lt_name);
-        }
-        LifeGiver {
-            taken: taken_,
-            counter: Cell::new(0),
-            generated: RefCell::new(Vec::new()),
-        }
-    }
-
-    fn inc_counter(&self) {
-        let c = self.counter.get();
-        self.counter.set(c+1);
-    }
-
-    fn give_lifetime(&self) -> hir::Lifetime {
-        let lifetime;
-        loop {
-            let mut s = String::from("'");
-            s.push_str(&num_to_string(self.counter.get()));
-            if !self.taken.contains(&s) {
-                lifetime = name_to_dummy_lifetime(token::intern(&s[..]));
-                self.generated.borrow_mut().push(lifetime);
-                break;
-            }
-            self.inc_counter();
-        }
-        self.inc_counter();
-        return lifetime;
-
-        // 0 .. 25 generates a .. z, 26 .. 51 generates aa .. zz, and so on
-        fn num_to_string(counter: usize) -> String {
-            let mut s = String::new();
-            let (n, r) = (counter/26 + 1, counter % 26);
-            let letter: char = from_u32((r+97) as u32).unwrap();
-            for _ in 0..n {
-                s.push(letter);
-            }
-            s
-        }
-    }
-
-    fn get_generated_lifetimes(&self) -> Vec<hir::Lifetime> {
-        self.generated.borrow().clone()
-    }
-}
-
-fn name_to_dummy_lifetime(name: ast::Name) -> hir::Lifetime {
-    hir::Lifetime { id: ast::DUMMY_NODE_ID,
-                    span: codemap::DUMMY_SP,
-                    name: name }
-}
diff --git a/src/librustc/middle/infer/freshen.rs b/src/librustc/middle/infer/freshen.rs
deleted file mode 100644 (file)
index 76dd623..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Freshening is the process of replacing unknown variables with fresh types. The idea is that
-//! the type, after freshening, contains no inference variables but instead contains either a
-//! value for each variable or fresh "arbitrary" types wherever a variable would have been.
-//!
-//! Freshening is used primarily to get a good type for inserting into a cache. The result
-//! summarizes what the type inferencer knows "so far". The primary place it is used right now is
-//! in the trait matching algorithm, which needs to be able to cache whether an `impl` self type
-//! matches some other type X -- *without* affecting `X`. That means if that if the type `X` is in
-//! fact an unbound type variable, we want the match to be regarded as ambiguous, because depending
-//! on what type that type variable is ultimately assigned, the match may or may not succeed.
-//!
-//! Note that you should be careful not to allow the output of freshening to leak to the user in
-//! error messages or in any other form. Freshening is only really useful as an internal detail.
-//!
-//! __An important detail concerning regions.__ The freshener also replaces *all* regions with
-//! 'static. The reason behind this is that, in general, we do not take region relationships into
-//! account when making type-overloaded decisions. This is important because of the design of the
-//! region inferencer, which is not based on unification but rather on accumulating and then
-//! solving a set of constraints. In contrast, the type inferencer assigns a value to each type
-//! variable only once, and it does so as soon as it can, so it is reasonable to ask what the type
-//! inferencer knows "so far".
-
-use middle::ty::{self, Ty, TypeFoldable};
-use middle::ty::fold::TypeFolder;
-use std::collections::hash_map::{self, Entry};
-
-use super::InferCtxt;
-use super::unify_key::ToType;
-
-pub struct TypeFreshener<'a, 'tcx:'a> {
-    infcx: &'a InferCtxt<'a, 'tcx>,
-    freshen_count: u32,
-    freshen_map: hash_map::HashMap<ty::InferTy, Ty<'tcx>>,
-}
-
-impl<'a, 'tcx> TypeFreshener<'a, 'tcx> {
-    pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> TypeFreshener<'a, 'tcx> {
-        TypeFreshener {
-            infcx: infcx,
-            freshen_count: 0,
-            freshen_map: hash_map::HashMap::new(),
-        }
-    }
-
-    fn freshen<F>(&mut self,
-                  opt_ty: Option<Ty<'tcx>>,
-                  key: ty::InferTy,
-                  freshener: F)
-                  -> Ty<'tcx> where
-        F: FnOnce(u32) -> ty::InferTy,
-    {
-        match opt_ty {
-            Some(ty) => { return ty.fold_with(self); }
-            None => { }
-        }
-
-        match self.freshen_map.entry(key) {
-            Entry::Occupied(entry) => *entry.get(),
-            Entry::Vacant(entry) => {
-                let index = self.freshen_count;
-                self.freshen_count += 1;
-                let t = self.infcx.tcx.mk_infer(freshener(index));
-                entry.insert(t);
-                t
-            }
-        }
-    }
-}
-
-impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> {
-    fn tcx<'b>(&'b self) -> &'b ty::ctxt<'tcx> {
-        self.infcx.tcx
-    }
-
-    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
-        match r {
-            ty::ReEarlyBound(..) |
-            ty::ReLateBound(..) => {
-                // leave bound regions alone
-                r
-            }
-
-            ty::ReStatic |
-            ty::ReFree(_) |
-            ty::ReScope(_) |
-            ty::ReVar(_) |
-            ty::ReSkolemized(..) |
-            ty::ReEmpty => {
-                // replace all free regions with 'static
-                ty::ReStatic
-            }
-        }
-    }
-
-    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-        if !t.needs_infer() && !t.has_erasable_regions() {
-            return t;
-        }
-
-        let tcx = self.infcx.tcx;
-
-        match t.sty {
-            ty::TyInfer(ty::TyVar(v)) => {
-                self.freshen(
-                    self.infcx.type_variables.borrow().probe(v),
-                    ty::TyVar(v),
-                    ty::FreshTy)
-            }
-
-            ty::TyInfer(ty::IntVar(v)) => {
-                self.freshen(
-                    self.infcx.int_unification_table.borrow_mut()
-                                                    .probe(v)
-                                                    .map(|v| v.to_type(tcx)),
-                    ty::IntVar(v),
-                    ty::FreshIntTy)
-            }
-
-            ty::TyInfer(ty::FloatVar(v)) => {
-                self.freshen(
-                    self.infcx.float_unification_table.borrow_mut()
-                                                      .probe(v)
-                                                      .map(|v| v.to_type(tcx)),
-                    ty::FloatVar(v),
-                    ty::FreshFloatTy)
-            }
-
-            ty::TyInfer(ty::FreshTy(c)) |
-            ty::TyInfer(ty::FreshIntTy(c)) |
-            ty::TyInfer(ty::FreshFloatTy(c)) => {
-                if c >= self.freshen_count {
-                    tcx.sess.bug(
-                        &format!("Encountered a freshend type with id {} \
-                                  but our counter is only at {}",
-                                 c,
-                                 self.freshen_count));
-                }
-                t
-            }
-
-            ty::TyBool |
-            ty::TyChar |
-            ty::TyInt(..) |
-            ty::TyUint(..) |
-            ty::TyFloat(..) |
-            ty::TyEnum(..) |
-            ty::TyBox(..) |
-            ty::TyStr |
-            ty::TyError |
-            ty::TyArray(..) |
-            ty::TySlice(..) |
-            ty::TyRawPtr(..) |
-            ty::TyRef(..) |
-            ty::TyBareFn(..) |
-            ty::TyTrait(..) |
-            ty::TyStruct(..) |
-            ty::TyClosure(..) |
-            ty::TyTuple(..) |
-            ty::TyProjection(..) |
-            ty::TyParam(..) => {
-                t.super_fold_with(self)
-            }
-        }
-    }
-}
diff --git a/src/librustc/middle/infer/glb.rs b/src/librustc/middle/infer/glb.rs
deleted file mode 100644 (file)
index 0035f31..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::combine::CombineFields;
-use super::higher_ranked::HigherRankedRelations;
-use super::InferCtxt;
-use super::lattice::{self, LatticeDir};
-use super::Subtype;
-
-use middle::ty::{self, Ty};
-use middle::ty::relate::{Relate, RelateResult, TypeRelation};
-
-/// "Greatest lower bound" (common subtype)
-pub struct Glb<'a, 'tcx: 'a> {
-    fields: CombineFields<'a, 'tcx>
-}
-
-impl<'a, 'tcx> Glb<'a, 'tcx> {
-    pub fn new(fields: CombineFields<'a, 'tcx>) -> Glb<'a, 'tcx> {
-        Glb { fields: fields }
-    }
-}
-
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Glb<'a, 'tcx> {
-    fn tag(&self) -> &'static str { "Glb" }
-
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.tcx() }
-
-    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
-
-    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
-                                               variance: ty::Variance,
-                                               a: &T,
-                                               b: &T)
-                                               -> RelateResult<'tcx, T>
-    {
-        match variance {
-            ty::Invariant => self.fields.equate().relate(a, b),
-            ty::Covariant => self.relate(a, b),
-            ty::Bivariant => self.fields.bivariate().relate(a, b),
-            ty::Contravariant => self.fields.lub().relate(a, b),
-        }
-    }
-
-    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
-        lattice::super_lattice_tys(self, a, b)
-    }
-
-    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
-        debug!("{}.regions({:?}, {:?})",
-               self.tag(),
-               a,
-               b);
-
-        let origin = Subtype(self.fields.trace.clone());
-        Ok(self.fields.infcx.region_vars.glb_regions(origin, a, b))
-    }
-
-    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-                  -> RelateResult<'tcx, ty::Binder<T>>
-        where T: Relate<'a, 'tcx>
-    {
-        self.fields.higher_ranked_glb(a, b)
-    }
-}
-
-impl<'a, 'tcx> LatticeDir<'a,'tcx> for Glb<'a, 'tcx> {
-    fn infcx(&self) -> &'a InferCtxt<'a,'tcx> {
-        self.fields.infcx
-    }
-
-    fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> {
-        let mut sub = self.fields.sub();
-        try!(sub.relate(&v, &a));
-        try!(sub.relate(&v, &b));
-        Ok(())
-    }
-}
diff --git a/src/librustc/middle/infer/higher_ranked/README.md b/src/librustc/middle/infer/higher_ranked/README.md
deleted file mode 100644 (file)
index 57665b6..0000000
+++ /dev/null
@@ -1,403 +0,0 @@
-# Skolemization and functions
-
-One of the trickiest and most subtle aspects of regions is dealing
-with higher-ranked things which include bound region variables, such
-as function types. I strongly suggest that if you want to understand
-the situation, you read this paper (which is, admittedly, very long,
-but you don't have to read the whole thing):
-
-http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/
-
-Although my explanation will never compete with SPJ's (for one thing,
-his is approximately 100 pages), I will attempt to explain the basic
-problem and also how we solve it. Note that the paper only discusses
-subtyping, not the computation of LUB/GLB.
-
-The problem we are addressing is that there is a kind of subtyping
-between functions with bound region parameters. Consider, for
-example, whether the following relation holds:
-
-    for<'a> fn(&'a isize) <: for<'b> fn(&'b isize)? (Yes, a => b)
-
-The answer is that of course it does. These two types are basically
-the same, except that in one we used the name `a` and one we used
-the name `b`.
-
-In the examples that follow, it becomes very important to know whether
-a lifetime is bound in a function type (that is, is a lifetime
-parameter) or appears free (is defined in some outer scope).
-Therefore, from now on I will always write the bindings explicitly,
-using the Rust syntax `for<'a> fn(&'a isize)` to indicate that `a` is a
-lifetime parameter.
-
-Now let's consider two more function types. Here, we assume that the
-`'b` lifetime is defined somewhere outside and hence is not a lifetime
-parameter bound by the function type (it "appears free"):
-
-    for<'a> fn(&'a isize) <: fn(&'b isize)? (Yes, a => b)
-
-This subtyping relation does in fact hold. To see why, you have to
-consider what subtyping means. One way to look at `T1 <: T2` is to
-say that it means that it is always ok to treat an instance of `T1` as
-if it had the type `T2`. So, with our functions, it is always ok to
-treat a function that can take pointers with any lifetime as if it
-were a function that can only take a pointer with the specific
-lifetime `'b`. After all, `'b` is a lifetime, after all, and
-the function can take values of any lifetime.
-
-You can also look at subtyping as the *is a* relationship. This amounts
-to the same thing: a function that accepts pointers with any lifetime
-*is a* function that accepts pointers with some specific lifetime.
-
-So, what if we reverse the order of the two function types, like this:
-
-    fn(&'b isize) <: for<'a> fn(&'a isize)? (No)
-
-Does the subtyping relationship still hold?  The answer of course is
-no. In this case, the function accepts *only the lifetime `'b`*,
-so it is not reasonable to treat it as if it were a function that
-accepted any lifetime.
-
-What about these two examples:
-
-    for<'a,'b> fn(&'a isize, &'b isize) <: for<'a>    fn(&'a isize, &'a isize)? (Yes)
-    for<'a>    fn(&'a isize, &'a isize) <: for<'a,'b> fn(&'a isize, &'b isize)? (No)
-
-Here, it is true that functions which take two pointers with any two
-lifetimes can be treated as if they only accepted two pointers with
-the same lifetime, but not the reverse.
-
-## The algorithm
-
-Here is the algorithm we use to perform the subtyping check:
-
-1. Replace all bound regions in the subtype with new variables
-2. Replace all bound regions in the supertype with skolemized
-   equivalents. A "skolemized" region is just a new fresh region
-   name.
-3. Check that the parameter and return types match as normal
-4. Ensure that no skolemized regions 'leak' into region variables
-   visible from "the outside"
-
-Let's walk through some examples and see how this algorithm plays out.
-
-#### First example
-
-We'll start with the first example, which was:
-
-    1. for<'a> fn(&'a T) <: for<'b> fn(&'b T)?        Yes: a -> b
-
-After steps 1 and 2 of the algorithm we will have replaced the types
-like so:
-
-    1. fn(&'A T) <: fn(&'x T)?
-
-Here the upper case `&A` indicates a *region variable*, that is, a
-region whose value is being inferred by the system. I also replaced
-`&b` with `&x`---I'll use letters late in the alphabet (`x`, `y`, `z`)
-to indicate skolemized region names. We can assume they don't appear
-elsewhere. Note that neither the sub- nor the supertype bind any
-region names anymore (as indicated by the absence of `<` and `>`).
-
-The next step is to check that the parameter types match. Because
-parameters are contravariant, this means that we check whether:
-
-    &'x T <: &'A T
-
-Region pointers are contravariant so this implies that
-
-    &A <= &x
-
-must hold, where `<=` is the subregion relationship. Processing
-*this* constrain simply adds a constraint into our graph that `&A <=
-&x` and is considered successful (it can, for example, be satisfied by
-choosing the value `&x` for `&A`).
-
-So far we have encountered no error, so the subtype check succeeds.
-
-#### The third example
-
-Now let's look first at the third example, which was:
-
-    3. fn(&'a T)    <: for<'b> fn(&'b T)?        No!
-
-After steps 1 and 2 of the algorithm we will have replaced the types
-like so:
-
-    3. fn(&'a T) <: fn(&'x T)?
-
-This looks pretty much the same as before, except that on the LHS
-`'a` was not bound, and hence was left as-is and not replaced with
-a variable. The next step is again to check that the parameter types
-match. This will ultimately require (as before) that `'a` <= `&x`
-must hold: but this does not hold. `self` and `x` are both distinct
-free regions. So the subtype check fails.
-
-#### Checking for skolemization leaks
-
-You may be wondering about that mysterious last step in the algorithm.
-So far it has not been relevant. The purpose of that last step is to
-catch something like *this*:
-
-    for<'a> fn() -> fn(&'a T) <: fn() -> for<'b> fn(&'b T)?   No.
-
-Here the function types are the same but for where the binding occurs.
-The subtype returns a function that expects a value in precisely one
-region. The supertype returns a function that expects a value in any
-region. If we allow an instance of the subtype to be used where the
-supertype is expected, then, someone could call the fn and think that
-the return value has type `fn<b>(&'b T)` when it really has type
-`fn(&'a T)` (this is case #3, above). Bad.
-
-So let's step through what happens when we perform this subtype check.
-We first replace the bound regions in the subtype (the supertype has
-no bound regions). This gives us:
-
-    fn() -> fn(&'A T) <: fn() -> for<'b> fn(&'b T)?
-
-Now we compare the return types, which are covariant, and hence we have:
-
-    fn(&'A T) <: for<'b> fn(&'b T)?
-
-Here we skolemize the bound region in the supertype to yield:
-
-    fn(&'A T) <: fn(&'x T)?
-
-And then proceed to compare the argument types:
-
-    &'x T <: &'A T
-    'A <= 'x
-
-Finally, this is where it gets interesting!  This is where an error
-*should* be reported. But in fact this will not happen. The reason why
-is that `A` is a variable: we will infer that its value is the fresh
-region `x` and think that everything is happy. In fact, this behavior
-is *necessary*, it was key to the first example we walked through.
-
-The difference between this example and the first one is that the variable
-`A` already existed at the point where the skolemization occurred. In
-the first example, you had two functions:
-
-    for<'a> fn(&'a T) <: for<'b> fn(&'b T)
-
-and hence `&A` and `&x` were created "together". In general, the
-intention of the skolemized names is that they are supposed to be
-fresh names that could never be equal to anything from the outside.
-But when inference comes into play, we might not be respecting this
-rule.
-
-So the way we solve this is to add a fourth step that examines the
-constraints that refer to skolemized names. Basically, consider a
-non-directed version of the constraint graph. Let `Tainted(x)` be the
-set of all things reachable from a skolemized variable `x`.
-`Tainted(x)` should not contain any regions that existed before the
-step at which the skolemization was performed. So this case here
-would fail because `&x` was created alone, but is relatable to `&A`.
-
-## Computing the LUB and GLB
-
-The paper I pointed you at is written for Haskell. It does not
-therefore considering subtyping and in particular does not consider
-LUB or GLB computation. We have to consider this. Here is the
-algorithm I implemented.
-
-First though, let's discuss what we are trying to compute in more
-detail. The LUB is basically the "common supertype" and the GLB is
-"common subtype"; one catch is that the LUB should be the
-*most-specific* common supertype and the GLB should be *most general*
-common subtype (as opposed to any common supertype or any common
-subtype).
-
-Anyway, to help clarify, here is a table containing some function
-pairs and their LUB/GLB (for conciseness, in this table, I'm just
-including the lifetimes here, not the rest of the types, and I'm
-writing `fn<>` instead of `for<> fn`):
-
-```
-Type 1                Type 2                LUB                    GLB
-fn<'a>('a)            fn('X)                fn('X)                 fn<'a>('a)
-fn('a)                fn('X)                --                     fn<'a>('a)
-fn<'a,'b>('a, 'b)     fn<'x>('x, 'x)        fn<'a>('a, 'a)         fn<'a,'b>('a, 'b)
-fn<'a,'b>('a, 'b, 'a) fn<'x,'y>('x, 'y, 'y) fn<'a>('a, 'a, 'a)     fn<'a,'b,'c>('a,'b,'c)
-```
-
-### Conventions
-
-I use lower-case letters (e.g., `&a`) for bound regions and upper-case
-letters for free regions (`&A`).  Region variables written with a
-dollar-sign (e.g., `$a`).  I will try to remember to enumerate the
-bound-regions on the fn type as well (e.g., `for<'a> fn(&a)`).
-
-### High-level summary
-
-Both the LUB and the GLB algorithms work in a similar fashion.  They
-begin by replacing all bound regions (on both sides) with fresh region
-inference variables.  Therefore, both functions are converted to types
-that contain only free regions.  We can then compute the LUB/GLB in a
-straightforward way, as described in `combine.rs`.  This results in an
-interim type T.  The algorithms then examine the regions that appear
-in T and try to, in some cases, replace them with bound regions to
-yield the final result.
-
-To decide whether to replace a region `R` that appears in `T` with
-a bound region, the algorithms make use of two bits of
-information.  First is a set `V` that contains all region
-variables created as part of the LUB/GLB computation (roughly; see
-`region_vars_confined_to_snapshot()` for full details). `V` will
-contain the region variables created to replace the bound regions
-in the input types, but it also contains 'intermediate' variables
-created to represent the LUB/GLB of individual regions.
-Basically, when asked to compute the LUB/GLB of a region variable
-with another region, the inferencer cannot oblige immediately
-since the values of that variables are not known.  Therefore, it
-creates a new variable that is related to the two regions.  For
-example, the LUB of two variables `$x` and `$y` is a fresh
-variable `$z` that is constrained such that `$x <= $z` and `$y <=
-$z`.  So `V` will contain these intermediate variables as well.
-
-The other important factor in deciding how to replace a region in T is
-the function `Tainted($r)` which, for a region variable, identifies
-all regions that the region variable is related to in some way
-(`Tainted()` made an appearance in the subtype computation as well).
-
-### LUB
-
-The LUB algorithm proceeds in three steps:
-
-1. Replace all bound regions (on both sides) with fresh region
-   inference variables.
-2. Compute the LUB "as normal", meaning compute the GLB of each
-   pair of argument types and the LUB of the return types and
-   so forth.  Combine those to a new function type `F`.
-3. Replace each region `R` that appears in `F` as follows:
-   - Let `V` be the set of variables created during the LUB
-     computational steps 1 and 2, as described in the previous section.
-   - If `R` is not in `V`, replace `R` with itself.
-   - If `Tainted(R)` contains a region that is not in `V`,
-     replace `R` with itself.
-   - Otherwise, select the earliest variable in `Tainted(R)` that originates
-     from the left-hand side and replace `R` with the bound region that
-     this variable was a replacement for.
-
-So, let's work through the simplest example: `fn(&A)` and `for<'a> fn(&a)`.
-In this case, `&a` will be replaced with `$a` and the interim LUB type
-`fn($b)` will be computed, where `$b=GLB(&A,$a)`.  Therefore, `V =
-{$a, $b}` and `Tainted($b) = { $b, $a, &A }`.  When we go to replace
-`$b`, we find that since `&A \in Tainted($b)` is not a member of `V`,
-we leave `$b` as is.  When region inference happens, `$b` will be
-resolved to `&A`, as we wanted.
-
-Let's look at a more complex one: `fn(&a, &b)` and `fn(&x, &x)`.  In
-this case, we'll end up with a (pre-replacement) LUB type of `fn(&g,
-&h)` and a graph that looks like:
-
-```
-     $a        $b     *--$x
-       \        \    /  /
-        \        $h-*  /
-         $g-----------*
-```
-
-Here `$g` and `$h` are fresh variables that are created to represent
-the LUB/GLB of things requiring inference.  This means that `V` and
-`Tainted` will look like:
-
-```
-V = {$a, $b, $g, $h, $x}
-Tainted($g) = Tainted($h) = { $a, $b, $h, $g, $x }
-```
-
-Therefore we replace both `$g` and `$h` with `$a`, and end up
-with the type `fn(&a, &a)`.
-
-### GLB
-
-The procedure for computing the GLB is similar.  The difference lies
-in computing the replacements for the various variables. For each
-region `R` that appears in the type `F`, we again compute `Tainted(R)`
-and examine the results:
-
-1. If `R` is not in `V`, it is not replaced.
-2. Else, if `Tainted(R)` contains only variables in `V`, and it
-   contains exactly one variable from the LHS and one variable from
-   the RHS, then `R` can be mapped to the bound version of the
-   variable from the LHS.
-3. Else, if `Tainted(R)` contains no variable from the LHS and no
-   variable from the RHS, then `R` can be mapped to itself.
-4. Else, `R` is mapped to a fresh bound variable.
-
-These rules are pretty complex.  Let's look at some examples to see
-how they play out.
-
-Out first example was `fn(&a)` and `fn(&X)`.  In this case, `&a` will
-be replaced with `$a` and we will ultimately compute a
-(pre-replacement) GLB type of `fn($g)` where `$g=LUB($a,&X)`.
-Therefore, `V={$a,$g}` and `Tainted($g)={$g,$a,&X}.  To find the
-replacement for `$g` we consult the rules above:
-- Rule (1) does not apply because `$g \in V`
-- Rule (2) does not apply because `&X \in Tainted($g)`
-- Rule (3) does not apply because `$a \in Tainted($g)`
-- Hence, by rule (4), we replace `$g` with a fresh bound variable `&z`.
-So our final result is `fn(&z)`, which is correct.
-
-The next example is `fn(&A)` and `fn(&Z)`. In this case, we will again
-have a (pre-replacement) GLB of `fn(&g)`, where `$g = LUB(&A,&Z)`.
-Therefore, `V={$g}` and `Tainted($g) = {$g, &A, &Z}`.  In this case,
-by rule (3), `$g` is mapped to itself, and hence the result is
-`fn($g)`.  This result is correct (in this case, at least), but it is
-indicative of a case that *can* lead us into concluding that there is
-no GLB when in fact a GLB does exist.  See the section "Questionable
-Results" below for more details.
-
-The next example is `fn(&a, &b)` and `fn(&c, &c)`. In this case, as
-before, we'll end up with `F=fn($g, $h)` where `Tainted($g) =
-Tainted($h) = {$g, $h, $a, $b, $c}`.  Only rule (4) applies and hence
-we'll select fresh bound variables `y` and `z` and wind up with
-`fn(&y, &z)`.
-
-For the last example, let's consider what may seem trivial, but is
-not: `fn(&a, &a)` and `fn(&b, &b)`.  In this case, we'll get `F=fn($g,
-$h)` where `Tainted($g) = {$g, $a, $x}` and `Tainted($h) = {$h, $a,
-$x}`.  Both of these sets contain exactly one bound variable from each
-side, so we'll map them both to `&a`, resulting in `fn(&a, &a)`, which
-is the desired result.
-
-### Shortcomings and correctness
-
-You may be wondering whether this algorithm is correct.  The answer is
-"sort of".  There are definitely cases where they fail to compute a
-result even though a correct result exists.  I believe, though, that
-if they succeed, then the result is valid, and I will attempt to
-convince you.  The basic argument is that the "pre-replacement" step
-computes a set of constraints.  The replacements, then, attempt to
-satisfy those constraints, using bound identifiers where needed.
-
-For now I will briefly go over the cases for LUB/GLB and identify
-their intent:
-
-- LUB:
-  - The region variables that are substituted in place of bound regions
-    are intended to collect constraints on those bound regions.
-  - If Tainted(R) contains only values in V, then this region is unconstrained
-    and can therefore be generalized, otherwise it cannot.
-- GLB:
-  - The region variables that are substituted in place of bound regions
-    are intended to collect constraints on those bound regions.
-  - If Tainted(R) contains exactly one variable from each side, and
-    only variables in V, that indicates that those two bound regions
-    must be equated.
-  - Otherwise, if Tainted(R) references any variables from left or right
-    side, then it is trying to combine a bound region with a free one or
-    multiple bound regions, so we need to select fresh bound regions.
-
-Sorry this is more of a shorthand to myself.  I will try to write up something
-more convincing in the future.
-
-#### Where are the algorithms wrong?
-
-- The pre-replacement computation can fail even though using a
-  bound-region would have succeeded.
-- We will compute GLB(fn(fn($a)), fn(fn($b))) as fn($c) where $c is the
-  GLB of $a and $b.  But if inference finds that $a and $b must be mapped
-  to regions without a GLB, then this is effectively a failure to compute
-  the GLB.  However, the result `fn<$c>(fn($c))` is a valid GLB.
diff --git a/src/librustc/middle/infer/higher_ranked/mod.rs b/src/librustc/middle/infer/higher_ranked/mod.rs
deleted file mode 100644 (file)
index e8f542d..0000000
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Helper routines for higher-ranked things. See the `doc` module at
-//! the end of the file for details.
-
-use super::{CombinedSnapshot, InferCtxt, HigherRankedType, SkolemizationMap};
-use super::combine::CombineFields;
-
-use middle::ty::{self, Binder, TypeFoldable};
-use middle::ty::error::TypeError;
-use middle::ty::relate::{Relate, RelateResult, TypeRelation};
-use syntax::codemap::Span;
-use util::nodemap::{FnvHashMap, FnvHashSet};
-
-pub trait HigherRankedRelations<'a,'tcx> {
-    fn higher_ranked_sub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
-        where T: Relate<'a,'tcx>;
-
-    fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
-        where T: Relate<'a,'tcx>;
-
-    fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
-        where T: Relate<'a,'tcx>;
-}
-
-trait InferCtxtExt {
-    fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region>;
-
-    fn region_vars_confined_to_snapshot(&self,
-                                        snapshot: &CombinedSnapshot)
-                                        -> Vec<ty::RegionVid>;
-}
-
-impl<'a,'tcx> HigherRankedRelations<'a,'tcx> for CombineFields<'a,'tcx> {
-    fn higher_ranked_sub<T>(&self, a: &Binder<T>, b: &Binder<T>)
-                            -> RelateResult<'tcx, Binder<T>>
-        where T: Relate<'a,'tcx>
-    {
-        debug!("higher_ranked_sub(a={:?}, b={:?})",
-               a, b);
-
-        // Rather than checking the subtype relationship between `a` and `b`
-        // as-is, we need to do some extra work here in order to make sure
-        // that function subtyping works correctly with respect to regions
-        //
-        // Note: this is a subtle algorithm.  For a full explanation,
-        // please see the large comment at the end of the file in the (inlined) module
-        // `doc`.
-
-        // Start a snapshot so we can examine "all bindings that were
-        // created as part of this type comparison".
-        return self.infcx.commit_if_ok(|snapshot| {
-            // First, we instantiate each bound region in the subtype with a fresh
-            // region variable.
-            let (a_prime, _) =
-                self.infcx.replace_late_bound_regions_with_fresh_var(
-                    self.trace.origin.span(),
-                    HigherRankedType,
-                    a);
-
-            // Second, we instantiate each bound region in the supertype with a
-            // fresh concrete region.
-            let (b_prime, skol_map) =
-                self.infcx.skolemize_late_bound_regions(b, snapshot);
-
-            debug!("a_prime={:?}", a_prime);
-            debug!("b_prime={:?}", b_prime);
-
-            // Compare types now that bound regions have been replaced.
-            let result = try!(self.sub().relate(&a_prime, &b_prime));
-
-            // Presuming type comparison succeeds, we need to check
-            // that the skolemized regions do not "leak".
-            match leak_check(self.infcx, &skol_map, snapshot) {
-                Ok(()) => { }
-                Err((skol_br, tainted_region)) => {
-                    if self.a_is_expected {
-                        debug!("Not as polymorphic!");
-                        return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br,
-                                                                               tainted_region));
-                    } else {
-                        debug!("Overly polymorphic!");
-                        return Err(TypeError::RegionsOverlyPolymorphic(skol_br,
-                                                                       tainted_region));
-                    }
-                }
-            }
-
-            debug!("higher_ranked_sub: OK result={:?}",
-                   result);
-
-            Ok(ty::Binder(result))
-        });
-    }
-
-    fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
-        where T: Relate<'a,'tcx>
-    {
-        // Start a snapshot so we can examine "all bindings that were
-        // created as part of this type comparison".
-        return self.infcx.commit_if_ok(|snapshot| {
-            // Instantiate each bound region with a fresh region variable.
-            let span = self.trace.origin.span();
-            let (a_with_fresh, a_map) =
-                self.infcx.replace_late_bound_regions_with_fresh_var(
-                    span, HigherRankedType, a);
-            let (b_with_fresh, _) =
-                self.infcx.replace_late_bound_regions_with_fresh_var(
-                    span, HigherRankedType, b);
-
-            // Collect constraints.
-            let result0 =
-                try!(self.lub().relate(&a_with_fresh, &b_with_fresh));
-            let result0 =
-                self.infcx.resolve_type_vars_if_possible(&result0);
-            debug!("lub result0 = {:?}", result0);
-
-            // Generalize the regions appearing in result0 if possible
-            let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot);
-            let span = self.trace.origin.span();
-            let result1 =
-                fold_regions_in(
-                    self.tcx(),
-                    &result0,
-                    |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn,
-                                                    &new_vars, &a_map, r));
-
-            debug!("lub({:?},{:?}) = {:?}",
-                   a,
-                   b,
-                   result1);
-
-            Ok(ty::Binder(result1))
-        });
-
-        fn generalize_region(infcx: &InferCtxt,
-                             span: Span,
-                             snapshot: &CombinedSnapshot,
-                             debruijn: ty::DebruijnIndex,
-                             new_vars: &[ty::RegionVid],
-                             a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
-                             r0: ty::Region)
-                             -> ty::Region {
-            // Regions that pre-dated the LUB computation stay as they are.
-            if !is_var_in_set(new_vars, r0) {
-                assert!(!r0.is_bound());
-                debug!("generalize_region(r0={:?}): not new variable", r0);
-                return r0;
-            }
-
-            let tainted = infcx.tainted_regions(snapshot, r0);
-
-            // Variables created during LUB computation which are
-            // *related* to regions that pre-date the LUB computation
-            // stay as they are.
-            if !tainted.iter().all(|r| is_var_in_set(new_vars, *r)) {
-                debug!("generalize_region(r0={:?}): \
-                        non-new-variables found in {:?}",
-                       r0, tainted);
-                assert!(!r0.is_bound());
-                return r0;
-            }
-
-            // Otherwise, the variable must be associated with at
-            // least one of the variables representing bound regions
-            // in both A and B.  Replace the variable with the "first"
-            // bound region from A that we find it to be associated
-            // with.
-            for (a_br, a_r) in a_map {
-                if tainted.iter().any(|x| x == a_r) {
-                    debug!("generalize_region(r0={:?}): \
-                            replacing with {:?}, tainted={:?}",
-                           r0, *a_br, tainted);
-                    return ty::ReLateBound(debruijn, *a_br);
-                }
-            }
-
-            infcx.tcx.sess.span_bug(
-                span,
-                &format!("region {:?} is not associated with \
-                         any bound region from A!",
-                        r0))
-        }
-    }
-
-    fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
-        where T: Relate<'a,'tcx>
-    {
-        debug!("higher_ranked_glb({:?}, {:?})",
-               a, b);
-
-        // Make a snapshot so we can examine "all bindings that were
-        // created as part of this type comparison".
-        return self.infcx.commit_if_ok(|snapshot| {
-            // Instantiate each bound region with a fresh region variable.
-            let (a_with_fresh, a_map) =
-                self.infcx.replace_late_bound_regions_with_fresh_var(
-                    self.trace.origin.span(), HigherRankedType, a);
-            let (b_with_fresh, b_map) =
-                self.infcx.replace_late_bound_regions_with_fresh_var(
-                    self.trace.origin.span(), HigherRankedType, b);
-            let a_vars = var_ids(self, &a_map);
-            let b_vars = var_ids(self, &b_map);
-
-            // Collect constraints.
-            let result0 =
-                try!(self.glb().relate(&a_with_fresh, &b_with_fresh));
-            let result0 =
-                self.infcx.resolve_type_vars_if_possible(&result0);
-            debug!("glb result0 = {:?}", result0);
-
-            // Generalize the regions appearing in result0 if possible
-            let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot);
-            let span = self.trace.origin.span();
-            let result1 =
-                fold_regions_in(
-                    self.tcx(),
-                    &result0,
-                    |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn,
-                                                    &new_vars,
-                                                    &a_map, &a_vars, &b_vars,
-                                                    r));
-
-            debug!("glb({:?},{:?}) = {:?}",
-                   a,
-                   b,
-                   result1);
-
-            Ok(ty::Binder(result1))
-        });
-
-        fn generalize_region(infcx: &InferCtxt,
-                             span: Span,
-                             snapshot: &CombinedSnapshot,
-                             debruijn: ty::DebruijnIndex,
-                             new_vars: &[ty::RegionVid],
-                             a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
-                             a_vars: &[ty::RegionVid],
-                             b_vars: &[ty::RegionVid],
-                             r0: ty::Region) -> ty::Region {
-            if !is_var_in_set(new_vars, r0) {
-                assert!(!r0.is_bound());
-                return r0;
-            }
-
-            let tainted = infcx.tainted_regions(snapshot, r0);
-
-            let mut a_r = None;
-            let mut b_r = None;
-            let mut only_new_vars = true;
-            for r in &tainted {
-                if is_var_in_set(a_vars, *r) {
-                    if a_r.is_some() {
-                        return fresh_bound_variable(infcx, debruijn);
-                    } else {
-                        a_r = Some(*r);
-                    }
-                } else if is_var_in_set(b_vars, *r) {
-                    if b_r.is_some() {
-                        return fresh_bound_variable(infcx, debruijn);
-                    } else {
-                        b_r = Some(*r);
-                    }
-                } else if !is_var_in_set(new_vars, *r) {
-                    only_new_vars = false;
-                }
-            }
-
-            // NB---I do not believe this algorithm computes
-            // (necessarily) the GLB.  As written it can
-            // spuriously fail. In particular, if there is a case
-            // like: |fn(&a)| and fn(fn(&b)), where a and b are
-            // free, it will return fn(&c) where c = GLB(a,b).  If
-            // however this GLB is not defined, then the result is
-            // an error, even though something like
-            // "fn<X>(fn(&X))" where X is bound would be a
-            // subtype of both of those.
-            //
-            // The problem is that if we were to return a bound
-            // variable, we'd be computing a lower-bound, but not
-            // necessarily the *greatest* lower-bound.
-            //
-            // Unfortunately, this problem is non-trivial to solve,
-            // because we do not know at the time of computing the GLB
-            // whether a GLB(a,b) exists or not, because we haven't
-            // run region inference (or indeed, even fully computed
-            // the region hierarchy!). The current algorithm seems to
-            // works ok in practice.
-
-            if a_r.is_some() && b_r.is_some() && only_new_vars {
-                // Related to exactly one bound variable from each fn:
-                return rev_lookup(infcx, span, a_map, a_r.unwrap());
-            } else if a_r.is_none() && b_r.is_none() {
-                // Not related to bound variables from either fn:
-                assert!(!r0.is_bound());
-                return r0;
-            } else {
-                // Other:
-                return fresh_bound_variable(infcx, debruijn);
-            }
-        }
-
-        fn rev_lookup(infcx: &InferCtxt,
-                      span: Span,
-                      a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
-                      r: ty::Region) -> ty::Region
-        {
-            for (a_br, a_r) in a_map {
-                if *a_r == r {
-                    return ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br);
-                }
-            }
-            infcx.tcx.sess.span_bug(
-                span,
-                &format!("could not find original bound region for {:?}", r));
-        }
-
-        fn fresh_bound_variable(infcx: &InferCtxt, debruijn: ty::DebruijnIndex) -> ty::Region {
-            infcx.region_vars.new_bound(debruijn)
-        }
-    }
-}
-
-fn var_ids<'a, 'tcx>(fields: &CombineFields<'a, 'tcx>,
-                      map: &FnvHashMap<ty::BoundRegion, ty::Region>)
-                     -> Vec<ty::RegionVid> {
-    map.iter()
-       .map(|(_, r)| match *r {
-           ty::ReVar(r) => { r }
-           r => {
-               fields.tcx().sess.span_bug(
-                   fields.trace.origin.span(),
-                   &format!("found non-region-vid: {:?}", r));
-           }
-       })
-       .collect()
-}
-
-fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region) -> bool {
-    match r {
-        ty::ReVar(ref v) => new_vars.iter().any(|x| x == v),
-        _ => false
-    }
-}
-
-fn fold_regions_in<'tcx, T, F>(tcx: &ty::ctxt<'tcx>,
-                               unbound_value: &T,
-                               mut fldr: F)
-                               -> T
-    where T: TypeFoldable<'tcx>,
-          F: FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region,
-{
-    tcx.fold_regions(unbound_value, &mut false, |region, current_depth| {
-        // we should only be encountering "escaping" late-bound regions here,
-        // because the ones at the current level should have been replaced
-        // with fresh variables
-        assert!(match region {
-            ty::ReLateBound(..) => false,
-            _ => true
-        });
-
-        fldr(region, ty::DebruijnIndex::new(current_depth))
-    })
-}
-
-impl<'a,'tcx> InferCtxtExt for InferCtxt<'a,'tcx> {
-    fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region> {
-        self.region_vars.tainted(&snapshot.region_vars_snapshot, r)
-    }
-
-    fn region_vars_confined_to_snapshot(&self,
-                                        snapshot: &CombinedSnapshot)
-                                        -> Vec<ty::RegionVid>
-    {
-        /*!
-         * Returns the set of region variables that do not affect any
-         * types/regions which existed before `snapshot` was
-         * started. This is used in the sub/lub/glb computations. The
-         * idea here is that when we are computing lub/glb of two
-         * regions, we sometimes create intermediate region variables.
-         * Those region variables may touch some of the skolemized or
-         * other "forbidden" regions we created to replace bound
-         * regions, but they don't really represent an "external"
-         * constraint.
-         *
-         * However, sometimes fresh variables are created for other
-         * purposes too, and those *may* represent an external
-         * constraint. In particular, when a type variable is
-         * instantiated, we create region variables for all the
-         * regions that appear within, and if that type variable
-         * pre-existed the snapshot, then those region variables
-         * represent external constraints.
-         *
-         * An example appears in the unit test
-         * `sub_free_bound_false_infer`.  In this test, we want to
-         * know whether
-         *
-         * ```rust
-         * fn(_#0t) <: for<'a> fn(&'a int)
-         * ```
-         *
-         * Note that the subtype has a type variable. Because the type
-         * variable can't be instantiated with a region that is bound
-         * in the fn signature, this comparison ought to fail. But if
-         * we're not careful, it will succeed.
-         *
-         * The reason is that when we walk through the subtyping
-         * algorith, we begin by replacing `'a` with a skolemized
-         * variable `'1`. We then have `fn(_#0t) <: fn(&'1 int)`. This
-         * can be made true by unifying `_#0t` with `&'1 int`. In the
-         * process, we create a fresh variable for the skolemized
-         * region, `'$2`, and hence we have that `_#0t == &'$2
-         * int`. However, because `'$2` was created during the sub
-         * computation, if we're not careful we will erroneously
-         * assume it is one of the transient region variables
-         * representing a lub/glb internally. Not good.
-         *
-         * To prevent this, we check for type variables which were
-         * unified during the snapshot, and say that any region
-         * variable created during the snapshot but which finds its
-         * way into a type variable is considered to "escape" the
-         * snapshot.
-         */
-
-        let mut region_vars =
-            self.region_vars.vars_created_since_snapshot(&snapshot.region_vars_snapshot);
-
-        let escaping_types =
-            self.type_variables.borrow().types_escaping_snapshot(&snapshot.type_snapshot);
-
-        let mut escaping_region_vars = FnvHashSet();
-        for ty in &escaping_types {
-            self.tcx.collect_regions(ty, &mut escaping_region_vars);
-        }
-
-        region_vars.retain(|&region_vid| {
-            let r = ty::ReVar(region_vid);
-            !escaping_region_vars.contains(&r)
-        });
-
-        debug!("region_vars_confined_to_snapshot: region_vars={:?} escaping_types={:?}",
-               region_vars,
-               escaping_types);
-
-        region_vars
-    }
-}
-
-pub fn skolemize_late_bound_regions<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
-                                               binder: &ty::Binder<T>,
-                                               snapshot: &CombinedSnapshot)
-                                               -> (T, SkolemizationMap)
-    where T : TypeFoldable<'tcx>
-{
-    /*!
-     * Replace all regions bound by `binder` with skolemized regions and
-     * return a map indicating which bound-region was replaced with what
-     * skolemized region. This is the first step of checking subtyping
-     * when higher-ranked things are involved. See `README.md` for more
-     * details.
-     */
-
-    let (result, map) = infcx.tcx.replace_late_bound_regions(binder, |br| {
-        infcx.region_vars.new_skolemized(br, &snapshot.region_vars_snapshot)
-    });
-
-    debug!("skolemize_bound_regions(binder={:?}, result={:?}, map={:?})",
-           binder,
-           result,
-           map);
-
-    (result, map)
-}
-
-pub fn leak_check<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
-                           skol_map: &SkolemizationMap,
-                           snapshot: &CombinedSnapshot)
-                           -> Result<(),(ty::BoundRegion,ty::Region)>
-{
-    /*!
-     * Searches the region constriants created since `snapshot` was started
-     * and checks to determine whether any of the skolemized regions created
-     * in `skol_map` would "escape" -- meaning that they are related to
-     * other regions in some way. If so, the higher-ranked subtyping doesn't
-     * hold. See `README.md` for more details.
-     */
-
-    debug!("leak_check: skol_map={:?}",
-           skol_map);
-
-    let new_vars = infcx.region_vars_confined_to_snapshot(snapshot);
-    for (&skol_br, &skol) in skol_map {
-        let tainted = infcx.tainted_regions(snapshot, skol);
-        for &tainted_region in &tainted {
-            // Each skolemized should only be relatable to itself
-            // or new variables:
-            match tainted_region {
-                ty::ReVar(vid) => {
-                    if new_vars.iter().any(|&x| x == vid) { continue; }
-                }
-                _ => {
-                    if tainted_region == skol { continue; }
-                }
-            };
-
-            debug!("{:?} (which replaced {:?}) is tainted by {:?}",
-                   skol,
-                   skol_br,
-                   tainted_region);
-
-            // A is not as polymorphic as B:
-            return Err((skol_br, tainted_region));
-        }
-    }
-    Ok(())
-}
-
-/// This code converts from skolemized regions back to late-bound
-/// regions. It works by replacing each region in the taint set of a
-/// skolemized region with a bound-region. The bound region will be bound
-/// by the outer-most binder in `value`; the caller must ensure that there is
-/// such a binder and it is the right place.
-///
-/// This routine is only intended to be used when the leak-check has
-/// passed; currently, it's used in the trait matching code to create
-/// a set of nested obligations frmo an impl that matches against
-/// something higher-ranked.  More details can be found in
-/// `librustc/middle/traits/README.md`.
-///
-/// As a brief example, consider the obligation `for<'a> Fn(&'a int)
-/// -> &'a int`, and the impl:
-///
-///     impl<A,R> Fn<A,R> for SomethingOrOther
-///         where A : Clone
-///     { ... }
-///
-/// Here we will have replaced `'a` with a skolemized region
-/// `'0`. This means that our substitution will be `{A=>&'0
-/// int, R=>&'0 int}`.
-///
-/// When we apply the substitution to the bounds, we will wind up with
-/// `&'0 int : Clone` as a predicate. As a last step, we then go and
-/// replace `'0` with a late-bound region `'a`.  The depth is matched
-/// to the depth of the predicate, in this case 1, so that the final
-/// predicate is `for<'a> &'a int : Clone`.
-pub fn plug_leaks<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
-                             skol_map: SkolemizationMap,
-                             snapshot: &CombinedSnapshot,
-                             value: &T)
-                             -> T
-    where T : TypeFoldable<'tcx>
-{
-    debug_assert!(leak_check(infcx, &skol_map, snapshot).is_ok());
-
-    debug!("plug_leaks(skol_map={:?}, value={:?})",
-           skol_map,
-           value);
-
-    // Compute a mapping from the "taint set" of each skolemized
-    // region back to the `ty::BoundRegion` that it originally
-    // represented. Because `leak_check` passed, we know that
-    // these taint sets are mutually disjoint.
-    let inv_skol_map: FnvHashMap<ty::Region, ty::BoundRegion> =
-        skol_map
-        .into_iter()
-        .flat_map(|(skol_br, skol)| {
-            infcx.tainted_regions(snapshot, skol)
-                .into_iter()
-                .map(move |tainted_region| (tainted_region, skol_br))
-        })
-        .collect();
-
-    debug!("plug_leaks: inv_skol_map={:?}",
-           inv_skol_map);
-
-    // Remove any instantiated type variables from `value`; those can hide
-    // references to regions from the `fold_regions` code below.
-    let value = infcx.resolve_type_vars_if_possible(value);
-
-    // Map any skolemization byproducts back to a late-bound
-    // region. Put that late-bound region at whatever the outermost
-    // binder is that we encountered in `value`. The caller is
-    // responsible for ensuring that (a) `value` contains at least one
-    // binder and (b) that binder is the one we want to use.
-    let result = infcx.tcx.fold_regions(&value, &mut false, |r, current_depth| {
-        match inv_skol_map.get(&r) {
-            None => r,
-            Some(br) => {
-                // It is the responsibility of the caller to ensure
-                // that each skolemized region appears within a
-                // binder. In practice, this routine is only used by
-                // trait checking, and all of the skolemized regions
-                // appear inside predicates, which always have
-                // binders, so this assert is satisfied.
-                assert!(current_depth > 1);
-
-                ty::ReLateBound(ty::DebruijnIndex::new(current_depth - 1), br.clone())
-            }
-        }
-    });
-
-    debug!("plug_leaks: result={:?}",
-           result);
-
-    result
-}
diff --git a/src/librustc/middle/infer/lattice.rs b/src/librustc/middle/infer/lattice.rs
deleted file mode 100644 (file)
index 2a560ec..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Lattice Variables
-//!
-//! This file contains generic code for operating on inference variables
-//! that are characterized by an upper- and lower-bound.  The logic and
-//! reasoning is explained in detail in the large comment in `infer.rs`.
-//!
-//! The code in here is defined quite generically so that it can be
-//! applied both to type variables, which represent types being inferred,
-//! and fn variables, which represent function types being inferred.
-//! It may eventually be applied to their types as well, who knows.
-//! In some cases, the functions are also generic with respect to the
-//! operation on the lattice (GLB vs LUB).
-//!
-//! Although all the functions are generic, we generally write the
-//! comments in a way that is specific to type variables and the LUB
-//! operation.  It's just easier that way.
-//!
-//! In general all of the functions are defined parametrically
-//! over a `LatticeValue`, which is a value defined with respect to
-//! a lattice.
-
-use super::combine;
-use super::InferCtxt;
-
-use middle::ty::TyVar;
-use middle::ty::{self, Ty};
-use middle::ty::relate::{RelateResult, TypeRelation};
-
-pub trait LatticeDir<'f,'tcx> : TypeRelation<'f,'tcx> {
-    fn infcx(&self) -> &'f InferCtxt<'f, 'tcx>;
-
-    // Relates the type `v` to `a` and `b` such that `v` represents
-    // the LUB/GLB of `a` and `b` as appropriate.
-    fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>;
-}
-
-pub fn super_lattice_tys<'a,'tcx,L:LatticeDir<'a,'tcx>>(this: &mut L,
-                                                        a: Ty<'tcx>,
-                                                        b: Ty<'tcx>)
-                                                        -> RelateResult<'tcx, Ty<'tcx>>
-    where 'tcx: 'a
-{
-    debug!("{}.lattice_tys({:?}, {:?})",
-           this.tag(),
-           a,
-           b);
-
-    if a == b {
-        return Ok(a);
-    }
-
-    let infcx = this.infcx();
-    let a = infcx.type_variables.borrow().replace_if_possible(a);
-    let b = infcx.type_variables.borrow().replace_if_possible(b);
-    match (&a.sty, &b.sty) {
-        (&ty::TyInfer(TyVar(..)), &ty::TyInfer(TyVar(..)))
-            if infcx.type_var_diverges(a) && infcx.type_var_diverges(b) => {
-            let v = infcx.next_diverging_ty_var();
-            try!(this.relate_bound(v, a, b));
-            Ok(v)
-        }
-
-        (&ty::TyInfer(TyVar(..)), _) |
-        (_, &ty::TyInfer(TyVar(..))) => {
-            let v = infcx.next_ty_var();
-            try!(this.relate_bound(v, a, b));
-            Ok(v)
-        }
-
-        _ => {
-            combine::super_combine_tys(this.infcx(), this, a, b)
-        }
-    }
-}
diff --git a/src/librustc/middle/infer/lub.rs b/src/librustc/middle/infer/lub.rs
deleted file mode 100644 (file)
index 238dad6..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::combine::CombineFields;
-use super::higher_ranked::HigherRankedRelations;
-use super::InferCtxt;
-use super::lattice::{self, LatticeDir};
-use super::Subtype;
-
-use middle::ty::{self, Ty};
-use middle::ty::relate::{Relate, RelateResult, TypeRelation};
-
-/// "Least upper bound" (common supertype)
-pub struct Lub<'a, 'tcx: 'a> {
-    fields: CombineFields<'a, 'tcx>
-}
-
-impl<'a, 'tcx> Lub<'a, 'tcx> {
-    pub fn new(fields: CombineFields<'a, 'tcx>) -> Lub<'a, 'tcx> {
-        Lub { fields: fields }
-    }
-}
-
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Lub<'a, 'tcx> {
-    fn tag(&self) -> &'static str { "Lub" }
-
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.tcx() }
-
-    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
-
-    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
-                                               variance: ty::Variance,
-                                               a: &T,
-                                               b: &T)
-                                               -> RelateResult<'tcx, T>
-    {
-        match variance {
-            ty::Invariant => self.fields.equate().relate(a, b),
-            ty::Covariant => self.relate(a, b),
-            ty::Bivariant => self.fields.bivariate().relate(a, b),
-            ty::Contravariant => self.fields.glb().relate(a, b),
-        }
-    }
-
-    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
-        lattice::super_lattice_tys(self, a, b)
-    }
-
-    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
-        debug!("{}.regions({:?}, {:?})",
-               self.tag(),
-               a,
-               b);
-
-        let origin = Subtype(self.fields.trace.clone());
-        Ok(self.fields.infcx.region_vars.lub_regions(origin, a, b))
-    }
-
-    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-                  -> RelateResult<'tcx, ty::Binder<T>>
-        where T: Relate<'a, 'tcx>
-    {
-        self.fields.higher_ranked_lub(a, b)
-    }
-}
-
-impl<'a, 'tcx> LatticeDir<'a,'tcx> for Lub<'a, 'tcx> {
-    fn infcx(&self) -> &'a InferCtxt<'a,'tcx> {
-        self.fields.infcx
-    }
-
-    fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> {
-        let mut sub = self.fields.sub();
-        try!(sub.relate(&a, &v));
-        try!(sub.relate(&b, &v));
-        Ok(())
-    }
-}
diff --git a/src/librustc/middle/infer/mod.rs b/src/librustc/middle/infer/mod.rs
deleted file mode 100644 (file)
index 250cb10..0000000
+++ /dev/null
@@ -1,1647 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! See the Book for more information.
-
-pub use self::LateBoundRegionConversionTime::*;
-pub use self::RegionVariableOrigin::*;
-pub use self::SubregionOrigin::*;
-pub use self::ValuePairs::*;
-pub use middle::ty::IntVarValue;
-pub use self::freshen::TypeFreshener;
-pub use self::region_inference::{GenericKind, VerifyBound};
-
-use middle::def_id::DefId;
-use rustc_front::hir;
-use middle::free_region::FreeRegionMap;
-use middle::mem_categorization as mc;
-use middle::mem_categorization::McResult;
-use middle::region::CodeExtent;
-use middle::subst;
-use middle::subst::Substs;
-use middle::subst::Subst;
-use middle::traits;
-use middle::ty::adjustment;
-use middle::ty::{TyVid, IntVid, FloatVid};
-use middle::ty::{self, Ty};
-use middle::ty::error::{ExpectedFound, TypeError, UnconstrainedNumeric};
-use middle::ty::fold::{TypeFolder, TypeFoldable};
-use middle::ty::relate::{Relate, RelateResult, TypeRelation};
-use rustc_data_structures::unify::{self, UnificationTable};
-use std::cell::{RefCell, Ref};
-use std::fmt;
-use syntax::ast;
-use syntax::codemap;
-use syntax::codemap::{Span, DUMMY_SP};
-use syntax::errors::DiagnosticBuilder;
-use util::nodemap::{FnvHashMap, FnvHashSet, NodeMap};
-
-use self::combine::CombineFields;
-use self::region_inference::{RegionVarBindings, RegionSnapshot};
-use self::error_reporting::ErrorReporting;
-use self::unify_key::ToType;
-
-pub mod bivariate;
-pub mod combine;
-pub mod equate;
-pub mod error_reporting;
-pub mod glb;
-mod higher_ranked;
-pub mod lattice;
-pub mod lub;
-pub mod region_inference;
-pub mod resolve;
-mod freshen;
-pub mod sub;
-pub mod type_variable;
-pub mod unify_key;
-
-pub type Bound<T> = Option<T>;
-pub type UnitResult<'tcx> = RelateResult<'tcx, ()>; // "unify result"
-pub type FixupResult<T> = Result<T, FixupError>; // "fixup result"
-
-pub struct InferCtxt<'a, 'tcx: 'a> {
-    pub tcx: &'a ty::ctxt<'tcx>,
-
-    pub tables: &'a RefCell<ty::Tables<'tcx>>,
-
-    // We instantiate UnificationTable with bounds<Ty> because the
-    // types that might instantiate a general type variable have an
-    // order, represented by its upper and lower bounds.
-    type_variables: RefCell<type_variable::TypeVariableTable<'tcx>>,
-
-    // Map from integral variable to the kind of integer it represents
-    int_unification_table: RefCell<UnificationTable<ty::IntVid>>,
-
-    // Map from floating variable to the kind of float it represents
-    float_unification_table: RefCell<UnificationTable<ty::FloatVid>>,
-
-    // For region variables.
-    region_vars: RegionVarBindings<'a, 'tcx>,
-
-    pub parameter_environment: ty::ParameterEnvironment<'a, 'tcx>,
-
-    // the set of predicates on which errors have been reported, to
-    // avoid reporting the same error twice.
-    pub reported_trait_errors: RefCell<FnvHashSet<traits::TraitErrorKey<'tcx>>>,
-
-    // This is a temporary field used for toggling on normalization in the inference context,
-    // as we move towards the approach described here:
-    // https://internals.rust-lang.org/t/flattening-the-contexts-for-fun-and-profit/2293
-    // At a point sometime in the future normalization will be done by the typing context
-    // directly.
-    normalize: bool,
-
-    err_count_on_creation: usize,
-}
-
-/// A map returned by `skolemize_late_bound_regions()` indicating the skolemized
-/// region that each late-bound region was replaced with.
-pub type SkolemizationMap = FnvHashMap<ty::BoundRegion,ty::Region>;
-
-/// Why did we require that the two types be related?
-///
-/// See `error_reporting.rs` for more details
-#[derive(Clone, Copy, Debug)]
-pub enum TypeOrigin {
-    // Not yet categorized in a better way
-    Misc(Span),
-
-    // Checking that method of impl is compatible with trait
-    MethodCompatCheck(Span),
-
-    // Checking that this expression can be assigned where it needs to be
-    // FIXME(eddyb) #11161 is the original Expr required?
-    ExprAssignable(Span),
-
-    // Relating trait refs when resolving vtables
-    RelateTraitRefs(Span),
-
-    // Relating self types when resolving vtables
-    RelateSelfType(Span),
-
-    // Relating trait type parameters to those found in impl etc
-    RelateOutputImplTypes(Span),
-
-    // Computing common supertype in the arms of a match expression
-    MatchExpressionArm(Span, Span, hir::MatchSource),
-
-    // Computing common supertype in an if expression
-    IfExpression(Span),
-
-    // Computing common supertype of an if expression with no else counter-part
-    IfExpressionWithNoElse(Span),
-
-    // Computing common supertype in a range expression
-    RangeExpression(Span),
-
-    // `where a == b`
-    EquatePredicate(Span),
-}
-
-impl TypeOrigin {
-    fn as_str(&self) -> &'static str {
-        match self {
-            &TypeOrigin::Misc(_) |
-            &TypeOrigin::RelateSelfType(_) |
-            &TypeOrigin::RelateOutputImplTypes(_) |
-            &TypeOrigin::ExprAssignable(_) => "mismatched types",
-            &TypeOrigin::RelateTraitRefs(_) => "mismatched traits",
-            &TypeOrigin::MethodCompatCheck(_) => "method not compatible with trait",
-            &TypeOrigin::MatchExpressionArm(_, _, source) => match source {
-                hir::MatchSource::IfLetDesugar{..} => "`if let` arms have incompatible types",
-                _ => "match arms have incompatible types",
-            },
-            &TypeOrigin::IfExpression(_) => "if and else have incompatible types",
-            &TypeOrigin::IfExpressionWithNoElse(_) => "if may be missing an else clause",
-            &TypeOrigin::RangeExpression(_) => "start and end of range have incompatible types",
-            &TypeOrigin::EquatePredicate(_) => "equality predicate not satisfied",
-        }
-    }
-}
-
-impl fmt::Display for TypeOrigin {
-    fn fmt(&self, f: &mut fmt::Formatter) -> Result<(),fmt::Error> {
-        fmt::Display::fmt(self.as_str(), f)
-    }
-}
-
-/// See `error_reporting.rs` for more details
-#[derive(Clone, Debug)]
-pub enum ValuePairs<'tcx> {
-    Types(ExpectedFound<Ty<'tcx>>),
-    TraitRefs(ExpectedFound<ty::TraitRef<'tcx>>),
-    PolyTraitRefs(ExpectedFound<ty::PolyTraitRef<'tcx>>),
-}
-
-/// The trace designates the path through inference that we took to
-/// encounter an error or subtyping constraint.
-///
-/// See `error_reporting.rs` for more details.
-#[derive(Clone)]
-pub struct TypeTrace<'tcx> {
-    origin: TypeOrigin,
-    values: ValuePairs<'tcx>,
-}
-
-/// The origin of a `r1 <= r2` constraint.
-///
-/// See `error_reporting.rs` for more details
-#[derive(Clone, Debug)]
-pub enum SubregionOrigin<'tcx> {
-    // Arose from a subtyping relation
-    Subtype(TypeTrace<'tcx>),
-
-    // Stack-allocated closures cannot outlive innermost loop
-    // or function so as to ensure we only require finite stack
-    InfStackClosure(Span),
-
-    // Invocation of closure must be within its lifetime
-    InvokeClosure(Span),
-
-    // Dereference of reference must be within its lifetime
-    DerefPointer(Span),
-
-    // Closure bound must not outlive captured free variables
-    FreeVariable(Span, ast::NodeId),
-
-    // Index into slice must be within its lifetime
-    IndexSlice(Span),
-
-    // When casting `&'a T` to an `&'b Trait` object,
-    // relating `'a` to `'b`
-    RelateObjectBound(Span),
-
-    // Some type parameter was instantiated with the given type,
-    // and that type must outlive some region.
-    RelateParamBound(Span, Ty<'tcx>),
-
-    // The given region parameter was instantiated with a region
-    // that must outlive some other region.
-    RelateRegionParamBound(Span),
-
-    // A bound placed on type parameters that states that must outlive
-    // the moment of their instantiation.
-    RelateDefaultParamBound(Span, Ty<'tcx>),
-
-    // Creating a pointer `b` to contents of another reference
-    Reborrow(Span),
-
-    // Creating a pointer `b` to contents of an upvar
-    ReborrowUpvar(Span, ty::UpvarId),
-
-    // Data with type `Ty<'tcx>` was borrowed
-    DataBorrowed(Ty<'tcx>, Span),
-
-    // (&'a &'b T) where a >= b
-    ReferenceOutlivesReferent(Ty<'tcx>, Span),
-
-    // Type or region parameters must be in scope.
-    ParameterInScope(ParameterOrigin, Span),
-
-    // The type T of an expression E must outlive the lifetime for E.
-    ExprTypeIsNotInScope(Ty<'tcx>, Span),
-
-    // A `ref b` whose region does not enclose the decl site
-    BindingTypeIsNotValidAtDecl(Span),
-
-    // Regions appearing in a method receiver must outlive method call
-    CallRcvr(Span),
-
-    // Regions appearing in a function argument must outlive func call
-    CallArg(Span),
-
-    // Region in return type of invoked fn must enclose call
-    CallReturn(Span),
-
-    // Operands must be in scope
-    Operand(Span),
-
-    // Region resulting from a `&` expr must enclose the `&` expr
-    AddrOf(Span),
-
-    // An auto-borrow that does not enclose the expr where it occurs
-    AutoBorrow(Span),
-
-    // Region constraint arriving from destructor safety
-    SafeDestructor(Span),
-}
-
-/// Places that type/region parameters can appear.
-#[derive(Clone, Copy, Debug)]
-pub enum ParameterOrigin {
-    Path, // foo::bar
-    MethodCall, // foo.bar() <-- parameters on impl providing bar()
-    OverloadedOperator, // a + b when overloaded
-    OverloadedDeref, // *a when overloaded
-}
-
-/// Times when we replace late-bound regions with variables:
-#[derive(Clone, Copy, Debug)]
-pub enum LateBoundRegionConversionTime {
-    /// when a fn is called
-    FnCall,
-
-    /// when two higher-ranked types are compared
-    HigherRankedType,
-
-    /// when projecting an associated type
-    AssocTypeProjection(ast::Name),
-}
-
-/// Reasons to create a region inference variable
-///
-/// See `error_reporting.rs` for more details
-#[derive(Clone, Debug)]
-pub enum RegionVariableOrigin {
-    // Region variables created for ill-categorized reasons,
-    // mostly indicates places in need of refactoring
-    MiscVariable(Span),
-
-    // Regions created by a `&P` or `[...]` pattern
-    PatternRegion(Span),
-
-    // Regions created by `&` operator
-    AddrOfRegion(Span),
-
-    // Regions created as part of an autoref of a method receiver
-    Autoref(Span),
-
-    // Regions created as part of an automatic coercion
-    Coercion(Span),
-
-    // Region variables created as the values for early-bound regions
-    EarlyBoundRegion(Span, ast::Name),
-
-    // Region variables created for bound regions
-    // in a function or method that is called
-    LateBoundRegion(Span, ty::BoundRegion, LateBoundRegionConversionTime),
-
-    UpvarRegion(ty::UpvarId, Span),
-
-    BoundRegionInCoherence(ast::Name),
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum FixupError {
-    UnresolvedIntTy(IntVid),
-    UnresolvedFloatTy(FloatVid),
-    UnresolvedTy(TyVid)
-}
-
-pub fn fixup_err_to_string(f: FixupError) -> String {
-    use self::FixupError::*;
-
-    match f {
-      UnresolvedIntTy(_) => {
-          "cannot determine the type of this integer; add a suffix to \
-           specify the type explicitly".to_string()
-      }
-      UnresolvedFloatTy(_) => {
-          "cannot determine the type of this number; add a suffix to specify \
-           the type explicitly".to_string()
-      }
-      UnresolvedTy(_) => "unconstrained type".to_string(),
-    }
-}
-
-pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
-                                tables: &'a RefCell<ty::Tables<'tcx>>,
-                                param_env: Option<ty::ParameterEnvironment<'a, 'tcx>>)
-                                -> InferCtxt<'a, 'tcx> {
-    InferCtxt {
-        tcx: tcx,
-        tables: tables,
-        type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
-        int_unification_table: RefCell::new(UnificationTable::new()),
-        float_unification_table: RefCell::new(UnificationTable::new()),
-        region_vars: RegionVarBindings::new(tcx),
-        parameter_environment: param_env.unwrap_or(tcx.empty_parameter_environment()),
-        reported_trait_errors: RefCell::new(FnvHashSet()),
-        normalize: false,
-        err_count_on_creation: tcx.sess.err_count()
-    }
-}
-
-pub fn normalizing_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
-                                        tables: &'a RefCell<ty::Tables<'tcx>>)
-                                        -> InferCtxt<'a, 'tcx> {
-    let mut infcx = new_infer_ctxt(tcx, tables, None);
-    infcx.normalize = true;
-    infcx
-}
-
-/// Computes the least upper-bound of `a` and `b`. If this is not possible, reports an error and
-/// returns ty::err.
-pub fn common_supertype<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
-                                  origin: TypeOrigin,
-                                  a_is_expected: bool,
-                                  a: Ty<'tcx>,
-                                  b: Ty<'tcx>)
-                                  -> Ty<'tcx>
-{
-    debug!("common_supertype({:?}, {:?})",
-           a, b);
-
-    let trace = TypeTrace {
-        origin: origin,
-        values: Types(expected_found(a_is_expected, a, b))
-    };
-
-    let result = cx.commit_if_ok(|_| cx.lub(a_is_expected, trace.clone()).relate(&a, &b));
-    match result {
-        Ok(t) => t,
-        Err(ref err) => {
-            cx.report_and_explain_type_error(trace, err);
-            cx.tcx.types.err
-        }
-    }
-}
-
-pub fn mk_subty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
-                          a_is_expected: bool,
-                          origin: TypeOrigin,
-                          a: Ty<'tcx>,
-                          b: Ty<'tcx>)
-                          -> UnitResult<'tcx>
-{
-    debug!("mk_subty({:?} <: {:?})", a, b);
-    cx.sub_types(a_is_expected, origin, a, b)
-}
-
-pub fn can_mk_subty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
-                              a: Ty<'tcx>,
-                              b: Ty<'tcx>)
-                              -> UnitResult<'tcx> {
-    debug!("can_mk_subty({:?} <: {:?})", a, b);
-    cx.probe(|_| {
-        let trace = TypeTrace {
-            origin: TypeOrigin::Misc(codemap::DUMMY_SP),
-            values: Types(expected_found(true, a, b))
-        };
-        cx.sub(true, trace).relate(&a, &b).map(|_| ())
-    })
-}
-
-pub fn can_mk_eqty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, a: Ty<'tcx>, b: Ty<'tcx>)
-                             -> UnitResult<'tcx>
-{
-    cx.can_equate(&a, &b)
-}
-
-pub fn mk_subr<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
-                         origin: SubregionOrigin<'tcx>,
-                         a: ty::Region,
-                         b: ty::Region) {
-    debug!("mk_subr({:?} <: {:?})", a, b);
-    let snapshot = cx.region_vars.start_snapshot();
-    cx.region_vars.make_subregion(origin, a, b);
-    cx.region_vars.commit(snapshot);
-}
-
-pub fn mk_eqty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
-                         a_is_expected: bool,
-                         origin: TypeOrigin,
-                         a: Ty<'tcx>,
-                         b: Ty<'tcx>)
-                         -> UnitResult<'tcx>
-{
-    debug!("mk_eqty({:?} <: {:?})", a, b);
-    cx.commit_if_ok(|_| cx.eq_types(a_is_expected, origin, a, b))
-}
-
-pub fn mk_eq_trait_refs<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
-                                   a_is_expected: bool,
-                                   origin: TypeOrigin,
-                                   a: ty::TraitRef<'tcx>,
-                                   b: ty::TraitRef<'tcx>)
-                                   -> UnitResult<'tcx>
-{
-    debug!("mk_eq_trait_refs({:?} <: {:?})",
-           a, b);
-    cx.commit_if_ok(|_| cx.eq_trait_refs(a_is_expected, origin, a.clone(), b.clone()))
-}
-
-pub fn mk_sub_poly_trait_refs<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
-                                        a_is_expected: bool,
-                                        origin: TypeOrigin,
-                                        a: ty::PolyTraitRef<'tcx>,
-                                        b: ty::PolyTraitRef<'tcx>)
-                                        -> UnitResult<'tcx>
-{
-    debug!("mk_sub_poly_trait_refs({:?} <: {:?})",
-           a, b);
-    cx.commit_if_ok(|_| cx.sub_poly_trait_refs(a_is_expected, origin, a.clone(), b.clone()))
-}
-
-fn expected_found<T>(a_is_expected: bool,
-                     a: T,
-                     b: T)
-                     -> ExpectedFound<T>
-{
-    if a_is_expected {
-        ExpectedFound {expected: a, found: b}
-    } else {
-        ExpectedFound {expected: b, found: a}
-    }
-}
-
-#[must_use = "once you start a snapshot, you should always consume it"]
-pub struct CombinedSnapshot {
-    type_snapshot: type_variable::Snapshot,
-    int_snapshot: unify::Snapshot<ty::IntVid>,
-    float_snapshot: unify::Snapshot<ty::FloatVid>,
-    region_vars_snapshot: RegionSnapshot,
-}
-
-pub fn normalize_associated_type<'tcx,T>(tcx: &ty::ctxt<'tcx>, value: &T) -> T
-    where T : TypeFoldable<'tcx>
-{
-    debug!("normalize_associated_type(t={:?})", value);
-
-    let value = tcx.erase_regions(value);
-
-    if !value.has_projection_types() {
-        return value;
-    }
-
-    let infcx = new_infer_ctxt(tcx, &tcx.tables, None);
-    let mut selcx = traits::SelectionContext::new(&infcx);
-    let cause = traits::ObligationCause::dummy();
-    let traits::Normalized { value: result, obligations } =
-        traits::normalize(&mut selcx, cause, &value);
-
-    debug!("normalize_associated_type: result={:?} obligations={:?}",
-           result,
-           obligations);
-
-    let mut fulfill_cx = traits::FulfillmentContext::new();
-
-    for obligation in obligations {
-        fulfill_cx.register_predicate_obligation(&infcx, obligation);
-    }
-
-    drain_fulfillment_cx_or_panic(DUMMY_SP, &infcx, &mut fulfill_cx, &result)
-}
-
-pub fn drain_fulfillment_cx_or_panic<'a,'tcx,T>(span: Span,
-                                                infcx: &InferCtxt<'a,'tcx>,
-                                                fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
-                                                result: &T)
-                                                -> T
-    where T : TypeFoldable<'tcx>
-{
-    match drain_fulfillment_cx(infcx, fulfill_cx, result) {
-        Ok(v) => v,
-        Err(errors) => {
-            infcx.tcx.sess.span_bug(
-                span,
-                &format!("Encountered errors `{:?}` fulfilling during trans",
-                         errors));
-        }
-    }
-}
-
-/// Finishes processes any obligations that remain in the fulfillment
-/// context, and then "freshens" and returns `result`. This is
-/// primarily used during normalization and other cases where
-/// processing the obligations in `fulfill_cx` may cause type
-/// inference variables that appear in `result` to be unified, and
-/// hence we need to process those obligations to get the complete
-/// picture of the type.
-pub fn drain_fulfillment_cx<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
-                                       fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
-                                       result: &T)
-                                       -> Result<T,Vec<traits::FulfillmentError<'tcx>>>
-    where T : TypeFoldable<'tcx>
-{
-    debug!("drain_fulfillment_cx(result={:?})",
-           result);
-
-    // In principle, we only need to do this so long as `result`
-    // contains unbound type parameters. It could be a slight
-    // optimization to stop iterating early.
-    match fulfill_cx.select_all_or_error(infcx) {
-        Ok(()) => { }
-        Err(errors) => {
-            return Err(errors);
-        }
-    }
-
-    let result = infcx.resolve_type_vars_if_possible(result);
-    Ok(infcx.tcx.erase_regions(&result))
-}
-
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
-    pub fn freshen<T:TypeFoldable<'tcx>>(&self, t: T) -> T {
-        t.fold_with(&mut self.freshener())
-    }
-
-    pub fn type_var_diverges(&'a self, ty: Ty) -> bool {
-        match ty.sty {
-            ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().var_diverges(vid),
-            _ => false
-        }
-    }
-
-    pub fn freshener<'b>(&'b self) -> TypeFreshener<'b, 'tcx> {
-        freshen::TypeFreshener::new(self)
-    }
-
-    pub fn type_is_unconstrained_numeric(&'a self, ty: Ty) -> UnconstrainedNumeric {
-        use middle::ty::error::UnconstrainedNumeric::Neither;
-        use middle::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
-        match ty.sty {
-            ty::TyInfer(ty::IntVar(vid)) => {
-                if self.int_unification_table.borrow_mut().has_value(vid) {
-                    Neither
-                } else {
-                    UnconstrainedInt
-                }
-            },
-            ty::TyInfer(ty::FloatVar(vid)) => {
-                if self.float_unification_table.borrow_mut().has_value(vid) {
-                    Neither
-                } else {
-                    UnconstrainedFloat
-                }
-            },
-            _ => Neither,
-        }
-    }
-
-    /// Returns a type variable's default fallback if any exists. A default
-    /// must be attached to the variable when created, if it is created
-    /// without a default, this will return None.
-    ///
-    /// This code does not apply to integral or floating point variables,
-    /// only to use declared defaults.
-    ///
-    /// See `new_ty_var_with_default` to create a type variable with a default.
-    /// See `type_variable::Default` for details about what a default entails.
-    pub fn default(&self, ty: Ty<'tcx>) -> Option<type_variable::Default<'tcx>> {
-        match ty.sty {
-            ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().default(vid),
-            _ => None
-        }
-    }
-
-    pub fn unsolved_variables(&self) -> Vec<ty::Ty<'tcx>> {
-        let mut variables = Vec::new();
-
-        let unbound_ty_vars = self.type_variables
-                                  .borrow()
-                                  .unsolved_variables()
-                                  .into_iter()
-                                  .map(|t| self.tcx.mk_var(t));
-
-        let unbound_int_vars = self.int_unification_table
-                                   .borrow_mut()
-                                   .unsolved_variables()
-                                   .into_iter()
-                                   .map(|v| self.tcx.mk_int_var(v));
-
-        let unbound_float_vars = self.float_unification_table
-                                     .borrow_mut()
-                                     .unsolved_variables()
-                                     .into_iter()
-                                     .map(|v| self.tcx.mk_float_var(v));
-
-        variables.extend(unbound_ty_vars);
-        variables.extend(unbound_int_vars);
-        variables.extend(unbound_float_vars);
-
-        return variables;
-    }
-
-    fn combine_fields(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>)
-                      -> CombineFields<'a, 'tcx> {
-        CombineFields {infcx: self,
-                       a_is_expected: a_is_expected,
-                       trace: trace,
-                       cause: None}
-    }
-
-    // public so that it can be used from the rustc_driver unit tests
-    pub fn equate(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>)
-              -> equate::Equate<'a, 'tcx>
-    {
-        self.combine_fields(a_is_expected, trace).equate()
-    }
-
-    // public so that it can be used from the rustc_driver unit tests
-    pub fn sub(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>)
-               -> sub::Sub<'a, 'tcx>
-    {
-        self.combine_fields(a_is_expected, trace).sub()
-    }
-
-    // public so that it can be used from the rustc_driver unit tests
-    pub fn lub(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>)
-               -> lub::Lub<'a, 'tcx>
-    {
-        self.combine_fields(a_is_expected, trace).lub()
-    }
-
-    // public so that it can be used from the rustc_driver unit tests
-    pub fn glb(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>)
-               -> glb::Glb<'a, 'tcx>
-    {
-        self.combine_fields(a_is_expected, trace).glb()
-    }
-
-    fn start_snapshot(&self) -> CombinedSnapshot {
-        CombinedSnapshot {
-            type_snapshot: self.type_variables.borrow_mut().snapshot(),
-            int_snapshot: self.int_unification_table.borrow_mut().snapshot(),
-            float_snapshot: self.float_unification_table.borrow_mut().snapshot(),
-            region_vars_snapshot: self.region_vars.start_snapshot(),
-        }
-    }
-
-    fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot) {
-        debug!("rollback_to(cause={})", cause);
-        let CombinedSnapshot { type_snapshot,
-                               int_snapshot,
-                               float_snapshot,
-                               region_vars_snapshot } = snapshot;
-
-        self.type_variables
-            .borrow_mut()
-            .rollback_to(type_snapshot);
-        self.int_unification_table
-            .borrow_mut()
-            .rollback_to(int_snapshot);
-        self.float_unification_table
-            .borrow_mut()
-            .rollback_to(float_snapshot);
-        self.region_vars
-            .rollback_to(region_vars_snapshot);
-    }
-
-    fn commit_from(&self, snapshot: CombinedSnapshot) {
-        debug!("commit_from!");
-        let CombinedSnapshot { type_snapshot,
-                               int_snapshot,
-                               float_snapshot,
-                               region_vars_snapshot } = snapshot;
-
-        self.type_variables
-            .borrow_mut()
-            .commit(type_snapshot);
-        self.int_unification_table
-            .borrow_mut()
-            .commit(int_snapshot);
-        self.float_unification_table
-            .borrow_mut()
-            .commit(float_snapshot);
-        self.region_vars
-            .commit(region_vars_snapshot);
-    }
-
-    /// Execute `f` and commit the bindings
-    pub fn commit_unconditionally<R, F>(&self, f: F) -> R where
-        F: FnOnce() -> R,
-    {
-        debug!("commit()");
-        let snapshot = self.start_snapshot();
-        let r = f();
-        self.commit_from(snapshot);
-        r
-    }
-
-    /// Execute `f` and commit the bindings if closure `f` returns `Ok(_)`
-    pub fn commit_if_ok<T, E, F>(&self, f: F) -> Result<T, E> where
-        F: FnOnce(&CombinedSnapshot) -> Result<T, E>
-    {
-        debug!("commit_if_ok()");
-        let snapshot = self.start_snapshot();
-        let r = f(&snapshot);
-        debug!("commit_if_ok() -- r.is_ok() = {}", r.is_ok());
-        match r {
-            Ok(_) => { self.commit_from(snapshot); }
-            Err(_) => { self.rollback_to("commit_if_ok -- error", snapshot); }
-        }
-        r
-    }
-
-    /// Execute `f` and commit only the region bindings if successful.
-    /// The function f must be very careful not to leak any non-region
-    /// variables that get created.
-    pub fn commit_regions_if_ok<T, E, F>(&self, f: F) -> Result<T, E> where
-        F: FnOnce() -> Result<T, E>
-    {
-        debug!("commit_regions_if_ok()");
-        let CombinedSnapshot { type_snapshot,
-                               int_snapshot,
-                               float_snapshot,
-                               region_vars_snapshot } = self.start_snapshot();
-
-        let r = self.commit_if_ok(|_| f());
-
-        debug!("commit_regions_if_ok: rolling back everything but regions");
-
-        // Roll back any non-region bindings - they should be resolved
-        // inside `f`, with, e.g. `resolve_type_vars_if_possible`.
-        self.type_variables
-            .borrow_mut()
-            .rollback_to(type_snapshot);
-        self.int_unification_table
-            .borrow_mut()
-            .rollback_to(int_snapshot);
-        self.float_unification_table
-            .borrow_mut()
-            .rollback_to(float_snapshot);
-
-        // Commit region vars that may escape through resolved types.
-        self.region_vars
-            .commit(region_vars_snapshot);
-
-        r
-    }
-
-    /// Execute `f` then unroll any bindings it creates
-    pub fn probe<R, F>(&self, f: F) -> R where
-        F: FnOnce(&CombinedSnapshot) -> R,
-    {
-        debug!("probe()");
-        let snapshot = self.start_snapshot();
-        let r = f(&snapshot);
-        self.rollback_to("probe", snapshot);
-        r
-    }
-
-    pub fn add_given(&self,
-                     sub: ty::FreeRegion,
-                     sup: ty::RegionVid)
-    {
-        self.region_vars.add_given(sub, sup);
-    }
-
-    pub fn sub_types(&self,
-                     a_is_expected: bool,
-                     origin: TypeOrigin,
-                     a: Ty<'tcx>,
-                     b: Ty<'tcx>)
-                     -> UnitResult<'tcx>
-    {
-        debug!("sub_types({:?} <: {:?})", a, b);
-        self.commit_if_ok(|_| {
-            let trace = TypeTrace::types(origin, a_is_expected, a, b);
-            self.sub(a_is_expected, trace).relate(&a, &b).map(|_| ())
-        })
-    }
-
-    pub fn eq_types(&self,
-                    a_is_expected: bool,
-                    origin: TypeOrigin,
-                    a: Ty<'tcx>,
-                    b: Ty<'tcx>)
-                    -> UnitResult<'tcx>
-    {
-        self.commit_if_ok(|_| {
-            let trace = TypeTrace::types(origin, a_is_expected, a, b);
-            self.equate(a_is_expected, trace).relate(&a, &b).map(|_| ())
-        })
-    }
-
-    pub fn eq_trait_refs(&self,
-                          a_is_expected: bool,
-                          origin: TypeOrigin,
-                          a: ty::TraitRef<'tcx>,
-                          b: ty::TraitRef<'tcx>)
-                          -> UnitResult<'tcx>
-    {
-        debug!("eq_trait_refs({:?} <: {:?})",
-               a,
-               b);
-        self.commit_if_ok(|_| {
-            let trace = TypeTrace {
-                origin: origin,
-                values: TraitRefs(expected_found(a_is_expected, a.clone(), b.clone()))
-            };
-            self.equate(a_is_expected, trace).relate(&a, &b).map(|_| ())
-        })
-    }
-
-    pub fn sub_poly_trait_refs(&self,
-                               a_is_expected: bool,
-                               origin: TypeOrigin,
-                               a: ty::PolyTraitRef<'tcx>,
-                               b: ty::PolyTraitRef<'tcx>)
-                               -> UnitResult<'tcx>
-    {
-        debug!("sub_poly_trait_refs({:?} <: {:?})",
-               a,
-               b);
-        self.commit_if_ok(|_| {
-            let trace = TypeTrace {
-                origin: origin,
-                values: PolyTraitRefs(expected_found(a_is_expected, a.clone(), b.clone()))
-            };
-            self.sub(a_is_expected, trace).relate(&a, &b).map(|_| ())
-        })
-    }
-
-    pub fn skolemize_late_bound_regions<T>(&self,
-                                           value: &ty::Binder<T>,
-                                           snapshot: &CombinedSnapshot)
-                                           -> (T, SkolemizationMap)
-        where T : TypeFoldable<'tcx>
-    {
-        /*! See `higher_ranked::skolemize_late_bound_regions` */
-
-        higher_ranked::skolemize_late_bound_regions(self, value, snapshot)
-    }
-
-    pub fn leak_check(&self,
-                      skol_map: &SkolemizationMap,
-                      snapshot: &CombinedSnapshot)
-                      -> UnitResult<'tcx>
-    {
-        /*! See `higher_ranked::leak_check` */
-
-        match higher_ranked::leak_check(self, skol_map, snapshot) {
-            Ok(()) => Ok(()),
-            Err((br, r)) => Err(TypeError::RegionsInsufficientlyPolymorphic(br, r))
-        }
-    }
-
-    pub fn plug_leaks<T>(&self,
-                         skol_map: SkolemizationMap,
-                         snapshot: &CombinedSnapshot,
-                         value: &T)
-                         -> T
-        where T : TypeFoldable<'tcx>
-    {
-        /*! See `higher_ranked::plug_leaks` */
-
-        higher_ranked::plug_leaks(self, skol_map, snapshot, value)
-    }
-
-    pub fn equality_predicate(&self,
-                              span: Span,
-                              predicate: &ty::PolyEquatePredicate<'tcx>)
-                              -> UnitResult<'tcx> {
-        self.commit_if_ok(|snapshot| {
-            let (ty::EquatePredicate(a, b), skol_map) =
-                self.skolemize_late_bound_regions(predicate, snapshot);
-            let origin = TypeOrigin::EquatePredicate(span);
-            let () = try!(mk_eqty(self, false, origin, a, b));
-            self.leak_check(&skol_map, snapshot)
-        })
-    }
-
-    pub fn region_outlives_predicate(&self,
-                                     span: Span,
-                                     predicate: &ty::PolyRegionOutlivesPredicate)
-                                     -> UnitResult<'tcx> {
-        self.commit_if_ok(|snapshot| {
-            let (ty::OutlivesPredicate(r_a, r_b), skol_map) =
-                self.skolemize_late_bound_regions(predicate, snapshot);
-            let origin = RelateRegionParamBound(span);
-            let () = mk_subr(self, origin, r_b, r_a); // `b : a` ==> `a <= b`
-            self.leak_check(&skol_map, snapshot)
-        })
-    }
-
-    pub fn next_ty_var_id(&self, diverging: bool) -> TyVid {
-        self.type_variables
-            .borrow_mut()
-            .new_var(diverging, None)
-    }
-
-    pub fn next_ty_var(&self) -> Ty<'tcx> {
-        self.tcx.mk_var(self.next_ty_var_id(false))
-    }
-
-    pub fn next_ty_var_with_default(&self,
-                                    default: Option<type_variable::Default<'tcx>>) -> Ty<'tcx> {
-        let ty_var_id = self.type_variables
-                            .borrow_mut()
-                            .new_var(false, default);
-
-        self.tcx.mk_var(ty_var_id)
-    }
-
-    pub fn next_diverging_ty_var(&self) -> Ty<'tcx> {
-        self.tcx.mk_var(self.next_ty_var_id(true))
-    }
-
-    pub fn next_ty_vars(&self, n: usize) -> Vec<Ty<'tcx>> {
-        (0..n).map(|_i| self.next_ty_var()).collect()
-    }
-
-    pub fn next_int_var_id(&self) -> IntVid {
-        self.int_unification_table
-            .borrow_mut()
-            .new_key(None)
-    }
-
-    pub fn next_float_var_id(&self) -> FloatVid {
-        self.float_unification_table
-            .borrow_mut()
-            .new_key(None)
-    }
-
-    pub fn next_region_var(&self, origin: RegionVariableOrigin) -> ty::Region {
-        ty::ReVar(self.region_vars.new_region_var(origin))
-    }
-
-    pub fn region_vars_for_defs(&self,
-                                span: Span,
-                                defs: &[ty::RegionParameterDef])
-                                -> Vec<ty::Region> {
-        defs.iter()
-            .map(|d| self.next_region_var(EarlyBoundRegion(span, d.name)))
-            .collect()
-    }
-
-    // We have to take `&mut Substs` in order to provide the correct substitutions for defaults
-    // along the way, for this reason we don't return them.
-    pub fn type_vars_for_defs(&self,
-                              span: Span,
-                              space: subst::ParamSpace,
-                              substs: &mut Substs<'tcx>,
-                              defs: &[ty::TypeParameterDef<'tcx>]) {
-
-        let mut vars = Vec::with_capacity(defs.len());
-
-        for def in defs.iter() {
-            let default = def.default.map(|default| {
-                type_variable::Default {
-                    ty: default.subst_spanned(self.tcx, substs, Some(span)),
-                    origin_span: span,
-                    def_id: def.default_def_id
-                }
-            });
-
-            let ty_var = self.next_ty_var_with_default(default);
-            substs.types.push(space, ty_var);
-            vars.push(ty_var)
-        }
-    }
-
-    /// Given a set of generics defined on a type or impl, returns a substitution mapping each
-    /// type/region parameter to a fresh inference variable.
-    pub fn fresh_substs_for_generics(&self,
-                                     span: Span,
-                                     generics: &ty::Generics<'tcx>)
-                                     -> subst::Substs<'tcx>
-    {
-        let type_params = subst::VecPerParamSpace::empty();
-
-        let region_params =
-            generics.regions.map(
-                |d| self.next_region_var(EarlyBoundRegion(span, d.name)));
-
-        let mut substs = subst::Substs::new(type_params, region_params);
-
-        for space in subst::ParamSpace::all().iter() {
-            self.type_vars_for_defs(
-                span,
-                *space,
-                &mut substs,
-                generics.types.get_slice(*space));
-        }
-
-        return substs;
-    }
-
-    /// Given a set of generics defined on a trait, returns a substitution mapping each output
-    /// type/region parameter to a fresh inference variable, and mapping the self type to
-    /// `self_ty`.
-    pub fn fresh_substs_for_trait(&self,
-                                  span: Span,
-                                  generics: &ty::Generics<'tcx>,
-                                  self_ty: Ty<'tcx>)
-                                  -> subst::Substs<'tcx>
-    {
-
-        assert!(generics.types.len(subst::SelfSpace) == 1);
-        assert!(generics.types.len(subst::FnSpace) == 0);
-        assert!(generics.regions.len(subst::SelfSpace) == 0);
-        assert!(generics.regions.len(subst::FnSpace) == 0);
-
-        let type_params = Vec::new();
-
-        let region_param_defs = generics.regions.get_slice(subst::TypeSpace);
-        let regions = self.region_vars_for_defs(span, region_param_defs);
-
-        let mut substs = subst::Substs::new_trait(type_params, regions, self_ty);
-
-        let type_parameter_defs = generics.types.get_slice(subst::TypeSpace);
-        self.type_vars_for_defs(span, subst::TypeSpace, &mut substs, type_parameter_defs);
-
-        return substs;
-    }
-
-    pub fn fresh_bound_region(&self, debruijn: ty::DebruijnIndex) -> ty::Region {
-        self.region_vars.new_bound(debruijn)
-    }
-
-    /// Apply `adjustment` to the type of `expr`
-    pub fn adjust_expr_ty(&self,
-                          expr: &hir::Expr,
-                          adjustment: Option<&adjustment::AutoAdjustment<'tcx>>)
-                          -> Ty<'tcx>
-    {
-        let raw_ty = self.expr_ty(expr);
-        let raw_ty = self.shallow_resolve(raw_ty);
-        let resolve_ty = |ty: Ty<'tcx>| self.resolve_type_vars_if_possible(&ty);
-        raw_ty.adjust(self.tcx,
-                      expr.span,
-                      expr.id,
-                      adjustment,
-                      |method_call| self.tables
-                                        .borrow()
-                                        .method_map
-                                        .get(&method_call)
-                                        .map(|method| resolve_ty(method.ty)))
-    }
-
-    pub fn errors_since_creation(&self) -> bool {
-        self.tcx.sess.err_count() - self.err_count_on_creation != 0
-    }
-
-    pub fn node_type(&self, id: ast::NodeId) -> Ty<'tcx> {
-        match self.tables.borrow().node_types.get(&id) {
-            Some(&t) => t,
-            // FIXME
-            None if self.errors_since_creation() =>
-                self.tcx.types.err,
-            None => {
-                self.tcx.sess.bug(
-                    &format!("no type for node {}: {} in fcx",
-                            id, self.tcx.map.node_to_string(id)));
-            }
-        }
-    }
-
-    pub fn expr_ty(&self, ex: &hir::Expr) -> Ty<'tcx> {
-        match self.tables.borrow().node_types.get(&ex.id) {
-            Some(&t) => t,
-            None => {
-                self.tcx.sess.bug("no type for expr in fcx");
-            }
-        }
-    }
-
-    pub fn resolve_regions_and_report_errors(&self,
-                                             free_regions: &FreeRegionMap,
-                                             subject_node_id: ast::NodeId) {
-        let errors = self.region_vars.resolve_regions(free_regions, subject_node_id);
-        if !self.errors_since_creation() {
-            // As a heuristic, just skip reporting region errors
-            // altogether if other errors have been reported while
-            // this infcx was in use.  This is totally hokey but
-            // otherwise we have a hard time separating legit region
-            // errors from silly ones.
-            self.report_region_errors(&errors); // see error_reporting.rs
-        }
-    }
-
-    pub fn ty_to_string(&self, t: Ty<'tcx>) -> String {
-        self.resolve_type_vars_if_possible(&t).to_string()
-    }
-
-    pub fn tys_to_string(&self, ts: &[Ty<'tcx>]) -> String {
-        let tstrs: Vec<String> = ts.iter().map(|t| self.ty_to_string(*t)).collect();
-        format!("({})", tstrs.join(", "))
-    }
-
-    pub fn trait_ref_to_string(&self, t: &ty::TraitRef<'tcx>) -> String {
-        self.resolve_type_vars_if_possible(t).to_string()
-    }
-
-    pub fn shallow_resolve(&self, typ: Ty<'tcx>) -> Ty<'tcx> {
-        match typ.sty {
-            ty::TyInfer(ty::TyVar(v)) => {
-                // Not entirely obvious: if `typ` is a type variable,
-                // it can be resolved to an int/float variable, which
-                // can then be recursively resolved, hence the
-                // recursion. Note though that we prevent type
-                // variables from unifying to other type variables
-                // directly (though they may be embedded
-                // structurally), and we prevent cycles in any case,
-                // so this recursion should always be of very limited
-                // depth.
-                self.type_variables.borrow()
-                    .probe(v)
-                    .map(|t| self.shallow_resolve(t))
-                    .unwrap_or(typ)
-            }
-
-            ty::TyInfer(ty::IntVar(v)) => {
-                self.int_unification_table
-                    .borrow_mut()
-                    .probe(v)
-                    .map(|v| v.to_type(self.tcx))
-                    .unwrap_or(typ)
-            }
-
-            ty::TyInfer(ty::FloatVar(v)) => {
-                self.float_unification_table
-                    .borrow_mut()
-                    .probe(v)
-                    .map(|v| v.to_type(self.tcx))
-                    .unwrap_or(typ)
-            }
-
-            _ => {
-                typ
-            }
-        }
-    }
-
-    pub fn resolve_type_vars_if_possible<T>(&self, value: &T) -> T
-        where T: TypeFoldable<'tcx>
-    {
-        /*!
-         * Where possible, replaces type/int/float variables in
-         * `value` with their final value. Note that region variables
-         * are unaffected. If a type variable has not been unified, it
-         * is left as is.  This is an idempotent operation that does
-         * not affect inference state in any way and so you can do it
-         * at will.
-         */
-
-        if !value.needs_infer() {
-            return value.clone(); // avoid duplicated subst-folding
-        }
-        let mut r = resolve::OpportunisticTypeResolver::new(self);
-        value.fold_with(&mut r)
-    }
-
-    pub fn resolve_type_and_region_vars_if_possible<T>(&self, value: &T) -> T
-        where T: TypeFoldable<'tcx>
-    {
-        let mut r = resolve::OpportunisticTypeAndRegionResolver::new(self);
-        value.fold_with(&mut r)
-    }
-
-    /// Resolves all type variables in `t` and then, if any were left
-    /// unresolved, substitutes an error type. This is used after the
-    /// main checking when doing a second pass before writeback. The
-    /// justification is that writeback will produce an error for
-    /// these unconstrained type variables.
-    fn resolve_type_vars_or_error(&self, t: &Ty<'tcx>) -> mc::McResult<Ty<'tcx>> {
-        let ty = self.resolve_type_vars_if_possible(t);
-        if ty.references_error() || ty.is_ty_var() {
-            debug!("resolve_type_vars_or_error: error from {:?}", ty);
-            Err(())
-        } else {
-            Ok(ty)
-        }
-    }
-
-    pub fn fully_resolve<T:TypeFoldable<'tcx>>(&self, value: &T) -> FixupResult<T> {
-        /*!
-         * Attempts to resolve all type/region variables in
-         * `value`. Region inference must have been run already (e.g.,
-         * by calling `resolve_regions_and_report_errors`).  If some
-         * variable was never unified, an `Err` results.
-         *
-         * This method is idempotent, but it not typically not invoked
-         * except during the writeback phase.
-         */
-
-        resolve::fully_resolve(self, value)
-    }
-
-    // [Note-Type-error-reporting]
-    // An invariant is that anytime the expected or actual type is TyError (the special
-    // error type, meaning that an error occurred when typechecking this expression),
-    // this is a derived error. The error cascaded from another error (that was already
-    // reported), so it's not useful to display it to the user.
-    // The following four methods -- type_error_message_str, type_error_message_str_with_expected,
-    // type_error_message, and report_mismatched_types -- implement this logic.
-    // They check if either the actual or expected type is TyError, and don't print the error
-    // in this case. The typechecker should only ever report type errors involving mismatched
-    // types using one of these four methods, and should not call span_err directly for such
-    // errors.
-    pub fn type_error_message_str<M>(&self,
-                                     sp: Span,
-                                     mk_msg: M,
-                                     actual_ty: String,
-                                     err: Option<&TypeError<'tcx>>)
-        where M: FnOnce(Option<String>, String) -> String,
-    {
-        self.type_error_message_str_with_expected(sp, mk_msg, None, actual_ty, err)
-    }
-
-    pub fn type_error_struct_str<M>(&self,
-                                    sp: Span,
-                                    mk_msg: M,
-                                    actual_ty: String,
-                                    err: Option<&TypeError<'tcx>>)
-                                    -> DiagnosticBuilder<'tcx>
-        where M: FnOnce(Option<String>, String) -> String,
-    {
-        self.type_error_struct_str_with_expected(sp, mk_msg, None, actual_ty, err)
-    }
-
-    pub fn type_error_message_str_with_expected<M>(&self,
-                                                   sp: Span,
-                                                   mk_msg: M,
-                                                   expected_ty: Option<Ty<'tcx>>,
-                                                   actual_ty: String,
-                                                   err: Option<&TypeError<'tcx>>)
-        where M: FnOnce(Option<String>, String) -> String,
-    {
-        self.type_error_struct_str_with_expected(sp, mk_msg, expected_ty, actual_ty, err)
-            .emit();
-    }
-
-    pub fn type_error_struct_str_with_expected<M>(&self,
-                                                  sp: Span,
-                                                  mk_msg: M,
-                                                  expected_ty: Option<Ty<'tcx>>,
-                                                  actual_ty: String,
-                                                  err: Option<&TypeError<'tcx>>)
-                                                  -> DiagnosticBuilder<'tcx>
-        where M: FnOnce(Option<String>, String) -> String,
-    {
-        debug!("hi! expected_ty = {:?}, actual_ty = {}", expected_ty, actual_ty);
-
-        let resolved_expected = expected_ty.map(|e_ty| self.resolve_type_vars_if_possible(&e_ty));
-
-        if !resolved_expected.references_error() {
-            let error_str = err.map_or("".to_string(), |t_err| {
-                format!(" ({})", t_err)
-            });
-
-            let mut db = self.tcx.sess.struct_span_err(sp, &format!("{}{}",
-                mk_msg(resolved_expected.map(|t| self.ty_to_string(t)), actual_ty),
-                error_str));
-
-            if let Some(err) = err {
-                self.tcx.note_and_explain_type_err(&mut db, err, sp);
-            }
-            db
-        } else {
-            self.tcx.sess.diagnostic().struct_dummy()
-        }
-    }
-
-    pub fn type_error_message<M>(&self,
-                                 sp: Span,
-                                 mk_msg: M,
-                                 actual_ty: Ty<'tcx>,
-                                 err: Option<&TypeError<'tcx>>)
-        where M: FnOnce(String) -> String,
-    {
-        self.type_error_struct(sp, mk_msg, actual_ty, err).emit();
-    }
-
-    pub fn type_error_struct<M>(&self,
-                                sp: Span,
-                                mk_msg: M,
-                                actual_ty: Ty<'tcx>,
-                                err: Option<&TypeError<'tcx>>)
-                                -> DiagnosticBuilder<'tcx>
-        where M: FnOnce(String) -> String,
-    {
-        let actual_ty = self.resolve_type_vars_if_possible(&actual_ty);
-
-        // Don't report an error if actual type is TyError.
-        if actual_ty.references_error() {
-            return self.tcx.sess.diagnostic().struct_dummy();
-        }
-
-        self.type_error_struct_str(sp,
-            move |_e, a| { mk_msg(a) },
-            self.ty_to_string(actual_ty), err)
-    }
-
-    pub fn report_mismatched_types(&self,
-                                   span: Span,
-                                   expected: Ty<'tcx>,
-                                   actual: Ty<'tcx>,
-                                   err: &TypeError<'tcx>) {
-        let trace = TypeTrace {
-            origin: TypeOrigin::Misc(span),
-            values: Types(ExpectedFound {
-                expected: expected,
-                found: actual
-            })
-        };
-        self.report_and_explain_type_error(trace, err);
-    }
-
-    pub fn report_conflicting_default_types(&self,
-                                            span: Span,
-                                            expected: type_variable::Default<'tcx>,
-                                            actual: type_variable::Default<'tcx>) {
-        let trace = TypeTrace {
-            origin: TypeOrigin::Misc(span),
-            values: Types(ExpectedFound {
-                expected: expected.ty,
-                found: actual.ty
-            })
-        };
-
-        self.report_and_explain_type_error(trace,
-            &TypeError::TyParamDefaultMismatch(ExpectedFound {
-                expected: expected,
-                found: actual
-        }));
-    }
-
-    pub fn replace_late_bound_regions_with_fresh_var<T>(
-        &self,
-        span: Span,
-        lbrct: LateBoundRegionConversionTime,
-        value: &ty::Binder<T>)
-        -> (T, FnvHashMap<ty::BoundRegion,ty::Region>)
-        where T : TypeFoldable<'tcx>
-    {
-        self.tcx.replace_late_bound_regions(
-            value,
-            |br| self.next_region_var(LateBoundRegion(span, br, lbrct)))
-    }
-
-    /// See `verify_generic_bound` method in `region_inference`
-    pub fn verify_generic_bound(&self,
-                                origin: SubregionOrigin<'tcx>,
-                                kind: GenericKind<'tcx>,
-                                a: ty::Region,
-                                bound: VerifyBound) {
-        debug!("verify_generic_bound({:?}, {:?} <: {:?})",
-               kind,
-               a,
-               bound);
-
-        self.region_vars.verify_generic_bound(origin, kind, a, bound);
-    }
-
-    pub fn can_equate<'b,T>(&'b self, a: &T, b: &T) -> UnitResult<'tcx>
-        where T: Relate<'b,'tcx> + fmt::Debug
-    {
-        debug!("can_equate({:?}, {:?})", a, b);
-        self.probe(|_| {
-            // Gin up a dummy trace, since this won't be committed
-            // anyhow. We should make this typetrace stuff more
-            // generic so we don't have to do anything quite this
-            // terrible.
-            let e = self.tcx.types.err;
-            let trace = TypeTrace {
-                origin: TypeOrigin::Misc(codemap::DUMMY_SP),
-                values: Types(expected_found(true, e, e))
-            };
-            self.equate(true, trace).relate(a, b)
-        }).map(|_| ())
-    }
-
-    pub fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
-        let ty = self.node_type(id);
-        self.resolve_type_vars_or_error(&ty)
-    }
-
-    pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
-        let ty = self.adjust_expr_ty(expr, self.tables.borrow().adjustments.get(&expr.id));
-        self.resolve_type_vars_or_error(&ty)
-    }
-
-    pub fn tables_are_tcx_tables(&self) -> bool {
-        let tables: &RefCell<ty::Tables> = &self.tables;
-        let tcx_tables: &RefCell<ty::Tables> = &self.tcx.tables;
-        tables as *const _ == tcx_tables as *const _
-    }
-
-    pub fn type_moves_by_default(&self, ty: Ty<'tcx>, span: Span) -> bool {
-        let ty = self.resolve_type_vars_if_possible(&ty);
-        if ty.needs_infer() ||
-            (ty.has_closure_types() && !self.tables_are_tcx_tables()) {
-            // this can get called from typeck (by euv), and moves_by_default
-            // rightly refuses to work with inference variables, but
-            // moves_by_default has a cache, which we want to use in other
-            // cases.
-            !traits::type_known_to_meet_builtin_bound(self, ty, ty::BoundCopy, span)
-        } else {
-            ty.moves_by_default(&self.parameter_environment, span)
-        }
-    }
-
-    pub fn node_method_ty(&self, method_call: ty::MethodCall)
-                          -> Option<Ty<'tcx>> {
-        self.tables
-            .borrow()
-            .method_map
-            .get(&method_call)
-            .map(|method| method.ty)
-            .map(|ty| self.resolve_type_vars_if_possible(&ty))
-    }
-
-    pub fn node_method_id(&self, method_call: ty::MethodCall)
-                          -> Option<DefId> {
-        self.tables
-            .borrow()
-            .method_map
-            .get(&method_call)
-            .map(|method| method.def_id)
-    }
-
-    pub fn adjustments(&self) -> Ref<NodeMap<adjustment::AutoAdjustment<'tcx>>> {
-        fn project_adjustments<'a, 'tcx>(tables: &'a ty::Tables<'tcx>)
-                                        -> &'a NodeMap<adjustment::AutoAdjustment<'tcx>> {
-            &tables.adjustments
-        }
-
-        Ref::map(self.tables.borrow(), project_adjustments)
-    }
-
-    pub fn is_method_call(&self, id: ast::NodeId) -> bool {
-        self.tables.borrow().method_map.contains_key(&ty::MethodCall::expr(id))
-    }
-
-    pub fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option<CodeExtent> {
-        self.tcx.region_maps.temporary_scope(rvalue_id)
-    }
-
-    pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
-        self.tables.borrow().upvar_capture_map.get(&upvar_id).cloned()
-    }
-
-    pub fn param_env<'b>(&'b self) -> &'b ty::ParameterEnvironment<'b,'tcx> {
-        &self.parameter_environment
-    }
-
-    pub fn closure_kind(&self,
-                        def_id: DefId)
-                        -> Option<ty::ClosureKind>
-    {
-        if def_id.is_local() {
-            self.tables.borrow().closure_kinds.get(&def_id).cloned()
-        } else {
-            // During typeck, ALL closures are local. But afterwards,
-            // during trans, we see closure ids from other traits.
-            // That may require loading the closure data out of the
-            // cstore.
-            Some(ty::Tables::closure_kind(&self.tables, self.tcx, def_id))
-        }
-    }
-
-    pub fn closure_type(&self,
-                        def_id: DefId,
-                        substs: &ty::ClosureSubsts<'tcx>)
-                        -> ty::ClosureTy<'tcx>
-    {
-        let closure_ty =
-            ty::Tables::closure_type(self.tables,
-                                     self.tcx,
-                                     def_id,
-                                     substs);
-
-        if self.normalize {
-            normalize_associated_type(&self.tcx, &closure_ty)
-        } else {
-            closure_ty
-        }
-    }
-}
-
-impl<'tcx> TypeTrace<'tcx> {
-    pub fn span(&self) -> Span {
-        self.origin.span()
-    }
-
-    pub fn types(origin: TypeOrigin,
-                 a_is_expected: bool,
-                 a: Ty<'tcx>,
-                 b: Ty<'tcx>)
-                 -> TypeTrace<'tcx> {
-        TypeTrace {
-            origin: origin,
-            values: Types(expected_found(a_is_expected, a, b))
-        }
-    }
-
-    pub fn dummy(tcx: &ty::ctxt<'tcx>) -> TypeTrace<'tcx> {
-        TypeTrace {
-            origin: TypeOrigin::Misc(codemap::DUMMY_SP),
-            values: Types(ExpectedFound {
-                expected: tcx.types.err,
-                found: tcx.types.err,
-            })
-        }
-    }
-}
-
-impl<'tcx> fmt::Debug for TypeTrace<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "TypeTrace({:?})", self.origin)
-    }
-}
-
-impl TypeOrigin {
-    pub fn span(&self) -> Span {
-        match *self {
-            TypeOrigin::MethodCompatCheck(span) => span,
-            TypeOrigin::ExprAssignable(span) => span,
-            TypeOrigin::Misc(span) => span,
-            TypeOrigin::RelateTraitRefs(span) => span,
-            TypeOrigin::RelateSelfType(span) => span,
-            TypeOrigin::RelateOutputImplTypes(span) => span,
-            TypeOrigin::MatchExpressionArm(match_span, _, _) => match_span,
-            TypeOrigin::IfExpression(span) => span,
-            TypeOrigin::IfExpressionWithNoElse(span) => span,
-            TypeOrigin::RangeExpression(span) => span,
-            TypeOrigin::EquatePredicate(span) => span,
-        }
-    }
-}
-
-impl<'tcx> SubregionOrigin<'tcx> {
-    pub fn span(&self) -> Span {
-        match *self {
-            Subtype(ref a) => a.span(),
-            InfStackClosure(a) => a,
-            InvokeClosure(a) => a,
-            DerefPointer(a) => a,
-            FreeVariable(a, _) => a,
-            IndexSlice(a) => a,
-            RelateObjectBound(a) => a,
-            RelateParamBound(a, _) => a,
-            RelateRegionParamBound(a) => a,
-            RelateDefaultParamBound(a, _) => a,
-            Reborrow(a) => a,
-            ReborrowUpvar(a, _) => a,
-            DataBorrowed(_, a) => a,
-            ReferenceOutlivesReferent(_, a) => a,
-            ParameterInScope(_, a) => a,
-            ExprTypeIsNotInScope(_, a) => a,
-            BindingTypeIsNotValidAtDecl(a) => a,
-            CallRcvr(a) => a,
-            CallArg(a) => a,
-            CallReturn(a) => a,
-            Operand(a) => a,
-            AddrOf(a) => a,
-            AutoBorrow(a) => a,
-            SafeDestructor(a) => a,
-        }
-    }
-}
-
-impl RegionVariableOrigin {
-    pub fn span(&self) -> Span {
-        match *self {
-            MiscVariable(a) => a,
-            PatternRegion(a) => a,
-            AddrOfRegion(a) => a,
-            Autoref(a) => a,
-            Coercion(a) => a,
-            EarlyBoundRegion(a, _) => a,
-            LateBoundRegion(a, _, _) => a,
-            BoundRegionInCoherence(_) => codemap::DUMMY_SP,
-            UpvarRegion(_, a) => a
-        }
-    }
-}
diff --git a/src/librustc/middle/infer/region_inference/README.md b/src/librustc/middle/infer/region_inference/README.md
deleted file mode 100644 (file)
index 80da861..0000000
+++ /dev/null
@@ -1,311 +0,0 @@
-Region inference
-
-# Terminology
-
-Note that we use the terms region and lifetime interchangeably.
-
-# Introduction
-
-Region inference uses a somewhat more involved algorithm than type
-inference. It is not the most efficient thing ever written though it
-seems to work well enough in practice (famous last words).  The reason
-that we use a different algorithm is because, unlike with types, it is
-impractical to hand-annotate with regions (in some cases, there aren't
-even the requisite syntactic forms).  So we have to get it right, and
-it's worth spending more time on a more involved analysis.  Moreover,
-regions are a simpler case than types: they don't have aggregate
-structure, for example.
-
-Unlike normal type inference, which is similar in spirit to H-M and thus
-works progressively, the region type inference works by accumulating
-constraints over the course of a function.  Finally, at the end of
-processing a function, we process and solve the constraints all at
-once.
-
-The constraints are always of one of three possible forms:
-
-- `ConstrainVarSubVar(Ri, Rj)` states that region variable Ri must be
-  a subregion of Rj
-- `ConstrainRegSubVar(R, Ri)` states that the concrete region R (which
-  must not be a variable) must be a subregion of the variable Ri
-- `ConstrainVarSubReg(Ri, R)` states the variable Ri shoudl be less
-  than the concrete region R. This is kind of deprecated and ought to
-  be replaced with a verify (they essentially play the same role).
-
-In addition to constraints, we also gather up a set of "verifys"
-(what, you don't think Verify is a noun? Get used to it my
-friend!). These represent relations that must hold but which don't
-influence inference proper. These take the form of:
-
-- `VerifyRegSubReg(Ri, Rj)` indicates that Ri <= Rj must hold,
-  where Rj is not an inference variable (and Ri may or may not contain
-  one). This doesn't influence inference because we will already have
-  inferred Ri to be as small as possible, so then we just test whether
-  that result was less than Rj or not.
-- `VerifyGenericBound(R, Vb)` is a more complex expression which tests
-  that the region R must satisfy the bound `Vb`. The bounds themselves
-  may have structure like "must outlive one of the following regions"
-  or "must outlive ALL of the following regions. These bounds arise
-  from constraints like `T: 'a` -- if we know that `T: 'b` and `T: 'c`
-  (say, from where clauses), then we can conclude that `T: 'a` if `'b:
-  'a` *or* `'c: 'a`.
-
-# Building up the constraints
-
-Variables and constraints are created using the following methods:
-
-- `new_region_var()` creates a new, unconstrained region variable;
-- `make_subregion(Ri, Rj)` states that Ri is a subregion of Rj
-- `lub_regions(Ri, Rj) -> Rk` returns a region Rk which is
-  the smallest region that is greater than both Ri and Rj
-- `glb_regions(Ri, Rj) -> Rk` returns a region Rk which is
-  the greatest region that is smaller than both Ri and Rj
-
-The actual region resolution algorithm is not entirely
-obvious, though it is also not overly complex.
-
-## Snapshotting
-
-It is also permitted to try (and rollback) changes to the graph.  This
-is done by invoking `start_snapshot()`, which returns a value.  Then
-later you can call `rollback_to()` which undoes the work.
-Alternatively, you can call `commit()` which ends all snapshots.
-Snapshots can be recursive---so you can start a snapshot when another
-is in progress, but only the root snapshot can "commit".
-
-## The problem
-
-Basically our input is a directed graph where nodes can be divided
-into two categories: region variables and concrete regions.  Each edge
-`R -> S` in the graph represents a constraint that the region `R` is a
-subregion of the region `S`.
-
-Region variable nodes can have arbitrary degree.  There is one region
-variable node per region variable.
-
-Each concrete region node is associated with some, well, concrete
-region: e.g., a free lifetime, or the region for a particular scope.
-Note that there may be more than one concrete region node for a
-particular region value.  Moreover, because of how the graph is built,
-we know that all concrete region nodes have either in-degree 1 or
-out-degree 1.
-
-Before resolution begins, we build up the constraints in a hashmap
-that maps `Constraint` keys to spans.  During resolution, we construct
-the actual `Graph` structure that we describe here.
-
-## Computing the values for region variables
-
-The algorithm is a simple dataflow algorithm. Each region variable
-begins as empty. We iterate over the constraints, and for each constraint
-we grow the relevant region variable to be as big as it must be to meet all the
-constraints. This means the region variables can grow to be `'static` if
-necessary.
-
-## Verification
-
-After all constraints are fully propoagated, we do a "verification"
-step where we walk over the verify bounds and check that they are
-satisfied. These bounds represent the "maximal" values that a region
-variable can take on, basically.
-
-# The Region Hierarchy
-
-## Without closures
-
-Let's first consider the region hierarchy without thinking about
-closures, because they add a lot of complications. The region
-hierarchy *basically* mirrors the lexical structure of the code.
-There is a region for every piece of 'evaluation' that occurs, meaning
-every expression, block, and pattern (patterns are considered to
-"execute" by testing the value they are applied to and creating any
-relevant bindings).  So, for example:
-
-    fn foo(x: isize, y: isize) { // -+
-    //  +------------+       //  |
-    //  |      +-----+       //  |
-    //  |  +-+ +-+ +-+       //  |
-    //  |  | | | | | |       //  |
-    //  v  v v v v v v       //  |
-        let z = x + y;       //  |
-        ...                  //  |
-    }                        // -+
-
-    fn bar() { ... }
-
-In this example, there is a region for the fn body block as a whole,
-and then a subregion for the declaration of the local variable.
-Within that, there are sublifetimes for the assignment pattern and
-also the expression `x + y`. The expression itself has sublifetimes
-for evaluating `x` and `y`.
-
-## Function calls
-
-Function calls are a bit tricky. I will describe how we handle them
-*now* and then a bit about how we can improve them (Issue #6268).
-
-Consider a function call like `func(expr1, expr2)`, where `func`,
-`arg1`, and `arg2` are all arbitrary expressions. Currently,
-we construct a region hierarchy like:
-
-    +----------------+
-    |                |
-    +--+ +---+  +---+|
-    v  v v   v  v   vv
-    func(expr1, expr2)
-
-Here you can see that the call as a whole has a region and the
-function plus arguments are subregions of that. As a side-effect of
-this, we get a lot of spurious errors around nested calls, in
-particular when combined with `&mut` functions. For example, a call
-like this one
-
-    self.foo(self.bar())
-
-where both `foo` and `bar` are `&mut self` functions will always yield
-an error.
-
-Here is a more involved example (which is safe) so we can see what's
-going on:
-
-    struct Foo { f: usize, g: usize }
-    ...
-    fn add(p: &mut usize, v: usize) {
-        *p += v;
-    }
-    ...
-    fn inc(p: &mut usize) -> usize {
-        *p += 1; *p
-    }
-    fn weird() {
-        let mut x: Box<Foo> = box Foo { ... };
-        'a: add(&mut (*x).f,
-                'b: inc(&mut (*x).f)) // (..)
-    }
-
-The important part is the line marked `(..)` which contains a call to
-`add()`. The first argument is a mutable borrow of the field `f`.  The
-second argument also borrows the field `f`. Now, in the current borrow
-checker, the first borrow is given the lifetime of the call to
-`add()`, `'a`.  The second borrow is given the lifetime of `'b` of the
-call to `inc()`. Because `'b` is considered to be a sublifetime of
-`'a`, an error is reported since there are two co-existing mutable
-borrows of the same data.
-
-However, if we were to examine the lifetimes a bit more carefully, we
-can see that this error is unnecessary. Let's examine the lifetimes
-involved with `'a` in detail. We'll break apart all the steps involved
-in a call expression:
-
-    'a: {
-        'a_arg1: let a_temp1: ... = add;
-        'a_arg2: let a_temp2: &'a mut usize = &'a mut (*x).f;
-        'a_arg3: let a_temp3: usize = {
-            let b_temp1: ... = inc;
-            let b_temp2: &'b = &'b mut (*x).f;
-            'b_call: b_temp1(b_temp2)
-        };
-        'a_call: a_temp1(a_temp2, a_temp3) // (**)
-    }
-
-Here we see that the lifetime `'a` includes a number of substatements.
-In particular, there is this lifetime I've called `'a_call` that
-corresponds to the *actual execution of the function `add()`*, after
-all arguments have been evaluated. There is a corresponding lifetime
-`'b_call` for the execution of `inc()`. If we wanted to be precise
-about it, the lifetime of the two borrows should be `'a_call` and
-`'b_call` respectively, since the references that were created
-will not be dereferenced except during the execution itself.
-
-However, this model by itself is not sound. The reason is that
-while the two references that are created will never be used
-simultaneously, it is still true that the first reference is
-*created* before the second argument is evaluated, and so even though
-it will not be *dereferenced* during the evaluation of the second
-argument, it can still be *invalidated* by that evaluation. Consider
-this similar but unsound example:
-
-    struct Foo { f: usize, g: usize }
-    ...
-    fn add(p: &mut usize, v: usize) {
-        *p += v;
-    }
-    ...
-    fn consume(x: Box<Foo>) -> usize {
-        x.f + x.g
-    }
-    fn weird() {
-        let mut x: Box<Foo> = box Foo { ... };
-        'a: add(&mut (*x).f, consume(x)) // (..)
-    }
-
-In this case, the second argument to `add` actually consumes `x`, thus
-invalidating the first argument.
-
-So, for now, we exclude the `call` lifetimes from our model.
-Eventually I would like to include them, but we will have to make the
-borrow checker handle this situation correctly. In particular, if
-there is a reference created whose lifetime does not enclose
-the borrow expression, we must issue sufficient restrictions to ensure
-that the pointee remains valid.
-
-## Modeling closures
-
-Integrating closures properly into the model is a bit of
-work-in-progress. In an ideal world, we would model closures as
-closely as possible after their desugared equivalents. That is, a
-closure type would be modeled as a struct, and the region hierarchy of
-different closure bodies would be completely distinct from all other
-fns. We are generally moving in that direction but there are
-complications in terms of the implementation.
-
-In practice what we currently do is somewhat different. The basis for
-the current approach is the observation that the only time that
-regions from distinct fn bodies interact with one another is through
-an upvar or the type of a fn parameter (since closures live in the fn
-body namespace, they can in fact have fn parameters whose types
-include regions from the surrounding fn body). For these cases, there
-are separate mechanisms which ensure that the regions that appear in
-upvars/parameters outlive the dynamic extent of each call to the
-closure:
-
-1. Types must outlive the region of any expression where they are used.
-   For a closure type `C` to outlive a region `'r`, that implies that the
-   types of all its upvars must outlive `'r`.
-2. Parameters must outlive the region of any fn that they are passed to.
-
-Therefore, we can -- sort of -- assume that any region from an
-enclosing fns is larger than any region from one of its enclosed
-fn. And that is precisely what we do: when building the region
-hierarchy, each region lives in its own distinct subtree, but if we
-are asked to compute the `LUB(r1, r2)` of two regions, and those
-regions are in disjoint subtrees, we compare the lexical nesting of
-the two regions.
-
-*Ideas for improving the situation:* (FIXME #3696) The correctness
-argument here is subtle and a bit hand-wavy. The ideal, as stated
-earlier, would be to model things in such a way that it corresponds
-more closely to the desugared code. The best approach for doing this
-is a bit unclear: it may in fact be possible to *actually* desugar
-before we start, but I don't think so. The main option that I've been
-thinking through is imposing a "view shift" as we enter the fn body,
-so that regions appearing in the types of fn parameters and upvars are
-translated from being regions in the outer fn into free region
-parameters, just as they would be if we applied the desugaring. The
-challenge here is that type inference may not have fully run, so the
-types may not be fully known: we could probably do this translation
-lazilly, as type variables are instantiated. We would also have to
-apply a kind of inverse translation to the return value. This would be
-a good idea anyway, as right now it is possible for free regions
-instantiated within the closure to leak into the parent: this
-currently leads to type errors, since those regions cannot outlive any
-expressions within the parent hierarchy. Much like the current
-handling of closures, there are no known cases where this leads to a
-type-checking accepting incorrect code (though it sometimes rejects
-what might be considered correct code; see rust-lang/rust#22557), but
-it still doesn't feel like the right approach.
-
-### Skolemization
-
-For a discussion on skolemization and higher-ranked subtyping, please
-see the module `middle::infer::higher_ranked::doc`.
diff --git a/src/librustc/middle/infer/region_inference/graphviz.rs b/src/librustc/middle/infer/region_inference/graphviz.rs
deleted file mode 100644 (file)
index dafa65c..0000000
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! This module provides linkage between libgraphviz traits and
-//! `rustc::middle::typeck::infer::region_inference`, generating a
-//! rendering of the graph represented by the list of `Constraint`
-//! instances (which make up the edges of the graph), as well as the
-//! origin for each constraint (which are attached to the labels on
-//! each edge).
-
-/// For clarity, rename the graphviz crate locally to dot.
-use graphviz as dot;
-
-use middle::ty;
-use middle::region::CodeExtent;
-use super::Constraint;
-use middle::infer::SubregionOrigin;
-use middle::infer::region_inference::RegionVarBindings;
-use util::nodemap::{FnvHashMap, FnvHashSet};
-
-use std::borrow::Cow;
-use std::collections::hash_map::Entry::Vacant;
-use std::env;
-use std::fs::File;
-use std::io;
-use std::io::prelude::*;
-use std::sync::atomic::{AtomicBool, Ordering};
-use syntax::ast;
-
-fn print_help_message() {
-    println!("\
--Z print-region-graph by default prints a region constraint graph for every \n\
-function body, to the path `/tmp/constraints.nodeXXX.dot`, where the XXX is \n\
-replaced with the node id of the function under analysis.                   \n\
-                                                                            \n\
-To select one particular function body, set `RUST_REGION_GRAPH_NODE=XXX`,   \n\
-where XXX is the node id desired.                                           \n\
-                                                                            \n\
-To generate output to some path other than the default                      \n\
-`/tmp/constraints.nodeXXX.dot`, set `RUST_REGION_GRAPH=/path/desired.dot`;  \n\
-occurrences of the character `%` in the requested path will be replaced with\n\
-the node id of the function under analysis.                                 \n\
-                                                                            \n\
-(Since you requested help via RUST_REGION_GRAPH=help, no region constraint  \n\
-graphs will be printed.                                                     \n\
-");
-}
-
-pub fn maybe_print_constraints_for<'a, 'tcx>(region_vars: &RegionVarBindings<'a, 'tcx>,
-                                             subject_node: ast::NodeId) {
-    let tcx = region_vars.tcx;
-
-    if !region_vars.tcx.sess.opts.debugging_opts.print_region_graph {
-        return;
-    }
-
-    let requested_node: Option<ast::NodeId> = env::var("RUST_REGION_GRAPH_NODE")
-                                                  .ok()
-                                                  .and_then(|s| s.parse().ok());
-
-    if requested_node.is_some() && requested_node != Some(subject_node) {
-        return;
-    }
-
-    let requested_output = env::var("RUST_REGION_GRAPH");
-    debug!("requested_output: {:?} requested_node: {:?}",
-           requested_output,
-           requested_node);
-
-    let output_path = {
-        let output_template = match requested_output {
-            Ok(ref s) if s == "help" => {
-                static PRINTED_YET: AtomicBool = AtomicBool::new(false);
-                if !PRINTED_YET.load(Ordering::SeqCst) {
-                    print_help_message();
-                    PRINTED_YET.store(true, Ordering::SeqCst);
-                }
-                return;
-            }
-
-            Ok(other_path) => other_path,
-            Err(_) => "/tmp/constraints.node%.dot".to_string(),
-        };
-
-        if output_template.is_empty() {
-            tcx.sess.bug("empty string provided as RUST_REGION_GRAPH");
-        }
-
-        if output_template.contains('%') {
-            let mut new_str = String::new();
-            for c in output_template.chars() {
-                if c == '%' {
-                    new_str.push_str(&subject_node.to_string());
-                } else {
-                    new_str.push(c);
-                }
-            }
-            new_str
-        } else {
-            output_template
-        }
-    };
-
-    let constraints = &*region_vars.constraints.borrow();
-    match dump_region_constraints_to(tcx, constraints, &output_path) {
-        Ok(()) => {}
-        Err(e) => {
-            let msg = format!("io error dumping region constraints: {}", e);
-            region_vars.tcx.sess.err(&msg)
-        }
-    }
-}
-
-struct ConstraintGraph<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    graph_name: String,
-    map: &'a FnvHashMap<Constraint, SubregionOrigin<'tcx>>,
-    node_ids: FnvHashMap<Node, usize>,
-}
-
-#[derive(Clone, Hash, PartialEq, Eq, Debug, Copy)]
-enum Node {
-    RegionVid(ty::RegionVid),
-    Region(ty::Region),
-}
-
-// type Edge = Constraint;
-#[derive(Clone, PartialEq, Eq, Debug, Copy)]
-enum Edge {
-    Constraint(Constraint),
-    EnclScope(CodeExtent, CodeExtent),
-}
-
-impl<'a, 'tcx> ConstraintGraph<'a, 'tcx> {
-    fn new(tcx: &'a ty::ctxt<'tcx>,
-           name: String,
-           map: &'a ConstraintMap<'tcx>)
-           -> ConstraintGraph<'a, 'tcx> {
-        let mut i = 0;
-        let mut node_ids = FnvHashMap();
-        {
-            let mut add_node = |node| {
-                if let Vacant(e) = node_ids.entry(node) {
-                    e.insert(i);
-                    i += 1;
-                }
-            };
-
-            for (n1, n2) in map.keys().map(|c| constraint_to_nodes(c)) {
-                add_node(n1);
-                add_node(n2);
-            }
-
-            tcx.region_maps.each_encl_scope(|sub, sup| {
-                add_node(Node::Region(ty::ReScope(*sub)));
-                add_node(Node::Region(ty::ReScope(*sup)));
-            });
-        }
-
-        ConstraintGraph {
-            tcx: tcx,
-            graph_name: name,
-            map: map,
-            node_ids: node_ids,
-        }
-    }
-}
-
-impl<'a, 'tcx> dot::Labeller<'a, Node, Edge> for ConstraintGraph<'a, 'tcx> {
-    fn graph_id(&self) -> dot::Id {
-        dot::Id::new(&*self.graph_name).unwrap()
-    }
-    fn node_id(&self, n: &Node) -> dot::Id {
-        let node_id = match self.node_ids.get(n) {
-            Some(node_id) => node_id,
-            None => panic!("no node_id found for node: {:?}", n),
-        };
-        let name = || format!("node_{}", node_id);
-        match dot::Id::new(name()) {
-            Ok(id) => id,
-            Err(_) => {
-                panic!("failed to create graphviz node identified by {}", name());
-            }
-        }
-    }
-    fn node_label(&self, n: &Node) -> dot::LabelText {
-        match *n {
-            Node::RegionVid(n_vid) => dot::LabelText::label(format!("{:?}", n_vid)),
-            Node::Region(n_rgn) => dot::LabelText::label(format!("{:?}", n_rgn)),
-        }
-    }
-    fn edge_label(&self, e: &Edge) -> dot::LabelText {
-        match *e {
-            Edge::Constraint(ref c) =>
-                dot::LabelText::label(format!("{:?}", self.map.get(c).unwrap())),
-            Edge::EnclScope(..) => dot::LabelText::label(format!("(enclosed)")),
-        }
-    }
-}
-
-fn constraint_to_nodes(c: &Constraint) -> (Node, Node) {
-    match *c {
-        Constraint::ConstrainVarSubVar(rv_1, rv_2) =>
-            (Node::RegionVid(rv_1), Node::RegionVid(rv_2)),
-        Constraint::ConstrainRegSubVar(r_1, rv_2) => (Node::Region(r_1), Node::RegionVid(rv_2)),
-        Constraint::ConstrainVarSubReg(rv_1, r_2) => (Node::RegionVid(rv_1), Node::Region(r_2)),
-    }
-}
-
-fn edge_to_nodes(e: &Edge) -> (Node, Node) {
-    match *e {
-        Edge::Constraint(ref c) => constraint_to_nodes(c),
-        Edge::EnclScope(sub, sup) => {
-            (Node::Region(ty::ReScope(sub)),
-             Node::Region(ty::ReScope(sup)))
-        }
-    }
-}
-
-impl<'a, 'tcx> dot::GraphWalk<'a, Node, Edge> for ConstraintGraph<'a, 'tcx> {
-    fn nodes(&self) -> dot::Nodes<Node> {
-        let mut set = FnvHashSet();
-        for node in self.node_ids.keys() {
-            set.insert(*node);
-        }
-        debug!("constraint graph has {} nodes", set.len());
-        set.into_iter().collect()
-    }
-    fn edges(&self) -> dot::Edges<Edge> {
-        debug!("constraint graph has {} edges", self.map.len());
-        let mut v: Vec<_> = self.map.keys().map(|e| Edge::Constraint(*e)).collect();
-        self.tcx.region_maps.each_encl_scope(|sub, sup| v.push(Edge::EnclScope(*sub, *sup)));
-        debug!("region graph has {} edges", v.len());
-        Cow::Owned(v)
-    }
-    fn source(&self, edge: &Edge) -> Node {
-        let (n1, _) = edge_to_nodes(edge);
-        debug!("edge {:?} has source {:?}", edge, n1);
-        n1
-    }
-    fn target(&self, edge: &Edge) -> Node {
-        let (_, n2) = edge_to_nodes(edge);
-        debug!("edge {:?} has target {:?}", edge, n2);
-        n2
-    }
-}
-
-pub type ConstraintMap<'tcx> = FnvHashMap<Constraint, SubregionOrigin<'tcx>>;
-
-fn dump_region_constraints_to<'a, 'tcx: 'a>(tcx: &'a ty::ctxt<'tcx>,
-                                            map: &ConstraintMap<'tcx>,
-                                            path: &str)
-                                            -> io::Result<()> {
-    debug!("dump_region_constraints map (len: {}) path: {}",
-           map.len(),
-           path);
-    let g = ConstraintGraph::new(tcx, format!("region_constraints"), map);
-    debug!("dump_region_constraints calling render");
-    let mut v = Vec::new();
-    dot::render(&g, &mut v).unwrap();
-    File::create(path).and_then(|mut f| f.write_all(&v))
-}
diff --git a/src/librustc/middle/infer/region_inference/mod.rs b/src/librustc/middle/infer/region_inference/mod.rs
deleted file mode 100644 (file)
index bfc770d..0000000
+++ /dev/null
@@ -1,1448 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! See README.md
-
-pub use self::Constraint::*;
-pub use self::Verify::*;
-pub use self::UndoLogEntry::*;
-pub use self::CombineMapType::*;
-pub use self::RegionResolutionError::*;
-pub use self::VarValue::*;
-
-use super::{RegionVariableOrigin, SubregionOrigin, TypeTrace, MiscVariable};
-use super::unify_key;
-
-use rustc_data_structures::graph::{self, Direction, NodeIndex};
-use rustc_data_structures::unify::{self, UnificationTable};
-use middle::free_region::FreeRegionMap;
-use middle::ty::{self, Ty};
-use middle::ty::{BoundRegion, Region, RegionVid};
-use middle::ty::{ReEmpty, ReStatic, ReFree, ReEarlyBound};
-use middle::ty::{ReLateBound, ReScope, ReVar, ReSkolemized, BrFresh};
-use middle::ty::error::TypeError;
-use util::common::indenter;
-use util::nodemap::{FnvHashMap, FnvHashSet};
-
-use std::cell::{Cell, RefCell};
-use std::cmp::Ordering::{self, Less, Greater, Equal};
-use std::fmt;
-use std::u32;
-use syntax::ast;
-
-mod graphviz;
-
-// A constraint that influences the inference process.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub enum Constraint {
-    // One region variable is subregion of another
-    ConstrainVarSubVar(RegionVid, RegionVid),
-
-    // Concrete region is subregion of region variable
-    ConstrainRegSubVar(Region, RegionVid),
-
-    // Region variable is subregion of concrete region
-    //
-    // FIXME(#29436) -- should be remove in favor of a Verify
-    ConstrainVarSubReg(RegionVid, Region),
-}
-
-// Something we have to verify after region inference is done, but
-// which does not directly influence the inference process
-pub enum Verify<'tcx> {
-    // VerifyRegSubReg(a, b): Verify that `a <= b`. Neither `a` nor
-    // `b` are inference variables.
-    VerifyRegSubReg(SubregionOrigin<'tcx>, Region, Region),
-
-    // VerifyGenericBound(T, _, R, RS): The parameter type `T` (or
-    // associated type) must outlive the region `R`. `T` is known to
-    // outlive `RS`. Therefore verify that `R <= RS[i]` for some
-    // `i`. Inference variables may be involved (but this verification
-    // step doesn't influence inference).
-    VerifyGenericBound(GenericKind<'tcx>, SubregionOrigin<'tcx>, Region, VerifyBound),
-}
-
-#[derive(Copy, Clone, PartialEq, Eq)]
-pub enum GenericKind<'tcx> {
-    Param(ty::ParamTy),
-    Projection(ty::ProjectionTy<'tcx>),
-}
-
-// When we introduce a verification step, we wish to test that a
-// particular region (let's call it `'min`) meets some bound.
-// The bound is described the by the following grammar:
-#[derive(Debug)]
-pub enum VerifyBound {
-    // B = exists {R} --> some 'r in {R} must outlive 'min
-    //
-    // Put another way, the subject value is known to outlive all
-    // regions in {R}, so if any of those outlives 'min, then the
-    // bound is met.
-    AnyRegion(Vec<Region>),
-
-    // B = forall {R} --> all 'r in {R} must outlive 'min
-    //
-    // Put another way, the subject value is known to outlive some
-    // region in {R}, so if all of those outlives 'min, then the bound
-    // is met.
-    AllRegions(Vec<Region>),
-
-    // B = exists {B} --> 'min must meet some bound b in {B}
-    AnyBound(Vec<VerifyBound>),
-
-    // B = forall {B} --> 'min must meet all bounds b in {B}
-    AllBounds(Vec<VerifyBound>),
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-pub struct TwoRegions {
-    a: Region,
-    b: Region,
-}
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum UndoLogEntry {
-    OpenSnapshot,
-    CommitedSnapshot,
-    AddVar(RegionVid),
-    AddConstraint(Constraint),
-    AddVerify(usize),
-    AddGiven(ty::FreeRegion, ty::RegionVid),
-    AddCombination(CombineMapType, TwoRegions),
-}
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum CombineMapType {
-    Lub,
-    Glb,
-}
-
-#[derive(Clone, Debug)]
-pub enum RegionResolutionError<'tcx> {
-    /// `ConcreteFailure(o, a, b)`:
-    ///
-    /// `o` requires that `a <= b`, but this does not hold
-    ConcreteFailure(SubregionOrigin<'tcx>, Region, Region),
-
-    /// `GenericBoundFailure(p, s, a)
-    ///
-    /// The parameter/associated-type `p` must be known to outlive the lifetime
-    /// `a` (but none of the known bounds are sufficient).
-    GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, Region),
-
-    /// `SubSupConflict(v, sub_origin, sub_r, sup_origin, sup_r)`:
-    ///
-    /// Could not infer a value for `v` because `sub_r <= v` (due to
-    /// `sub_origin`) but `v <= sup_r` (due to `sup_origin`) and
-    /// `sub_r <= sup_r` does not hold.
-    SubSupConflict(RegionVariableOrigin,
-                   SubregionOrigin<'tcx>,
-                   Region,
-                   SubregionOrigin<'tcx>,
-                   Region),
-
-    /// For subsets of `ConcreteFailure` and `SubSupConflict`, we can derive
-    /// more specific errors message by suggesting to the user where they
-    /// should put a lifetime. In those cases we process and put those errors
-    /// into `ProcessedErrors` before we do any reporting.
-    ProcessedErrors(Vec<RegionVariableOrigin>,
-                    Vec<(TypeTrace<'tcx>, TypeError<'tcx>)>,
-                    Vec<SameRegions>),
-}
-
-/// SameRegions is used to group regions that we think are the same and would
-/// like to indicate so to the user.
-/// For example, the following function
-/// ```
-/// struct Foo { bar: i32 }
-/// fn foo2<'a, 'b>(x: &'a Foo) -> &'b i32 {
-///    &x.bar
-/// }
-/// ```
-/// would report an error because we expect 'a and 'b to match, and so we group
-/// 'a and 'b together inside a SameRegions struct
-#[derive(Clone, Debug)]
-pub struct SameRegions {
-    pub scope_id: ast::NodeId,
-    pub regions: Vec<BoundRegion>,
-}
-
-impl SameRegions {
-    pub fn contains(&self, other: &BoundRegion) -> bool {
-        self.regions.contains(other)
-    }
-
-    pub fn push(&mut self, other: BoundRegion) {
-        self.regions.push(other);
-    }
-}
-
-pub type CombineMap = FnvHashMap<TwoRegions, RegionVid>;
-
-pub struct RegionVarBindings<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    var_origins: RefCell<Vec<RegionVariableOrigin>>,
-
-    // Constraints of the form `A <= B` introduced by the region
-    // checker.  Here at least one of `A` and `B` must be a region
-    // variable.
-    constraints: RefCell<FnvHashMap<Constraint, SubregionOrigin<'tcx>>>,
-
-    // A "verify" is something that we need to verify after inference is
-    // done, but which does not directly affect inference in any way.
-    //
-    // An example is a `A <= B` where neither `A` nor `B` are
-    // inference variables.
-    verifys: RefCell<Vec<Verify<'tcx>>>,
-
-    // A "given" is a relationship that is known to hold. In particular,
-    // we often know from closure fn signatures that a particular free
-    // region must be a subregion of a region variable:
-    //
-    //    foo.iter().filter(<'a> |x: &'a &'b T| ...)
-    //
-    // In situations like this, `'b` is in fact a region variable
-    // introduced by the call to `iter()`, and `'a` is a bound region
-    // on the closure (as indicated by the `<'a>` prefix). If we are
-    // naive, we wind up inferring that `'b` must be `'static`,
-    // because we require that it be greater than `'a` and we do not
-    // know what `'a` is precisely.
-    //
-    // This hashmap is used to avoid that naive scenario. Basically we
-    // record the fact that `'a <= 'b` is implied by the fn signature,
-    // and then ignore the constraint when solving equations. This is
-    // a bit of a hack but seems to work.
-    givens: RefCell<FnvHashSet<(ty::FreeRegion, ty::RegionVid)>>,
-
-    lubs: RefCell<CombineMap>,
-    glbs: RefCell<CombineMap>,
-    skolemization_count: Cell<u32>,
-    bound_count: Cell<u32>,
-
-    // The undo log records actions that might later be undone.
-    //
-    // Note: when the undo_log is empty, we are not actively
-    // snapshotting. When the `start_snapshot()` method is called, we
-    // push an OpenSnapshot entry onto the list to indicate that we
-    // are now actively snapshotting. The reason for this is that
-    // otherwise we end up adding entries for things like the lower
-    // bound on a variable and so forth, which can never be rolled
-    // back.
-    undo_log: RefCell<Vec<UndoLogEntry>>,
-    unification_table: RefCell<UnificationTable<ty::RegionVid>>,
-
-    // This contains the results of inference.  It begins as an empty
-    // option and only acquires a value after inference is complete.
-    values: RefCell<Option<Vec<VarValue>>>,
-}
-
-pub struct RegionSnapshot {
-    length: usize,
-    region_snapshot: unify::Snapshot<ty::RegionVid>,
-    skolemization_count: u32,
-}
-
-impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
-    pub fn new(tcx: &'a ty::ctxt<'tcx>) -> RegionVarBindings<'a, 'tcx> {
-        RegionVarBindings {
-            tcx: tcx,
-            var_origins: RefCell::new(Vec::new()),
-            values: RefCell::new(None),
-            constraints: RefCell::new(FnvHashMap()),
-            verifys: RefCell::new(Vec::new()),
-            givens: RefCell::new(FnvHashSet()),
-            lubs: RefCell::new(FnvHashMap()),
-            glbs: RefCell::new(FnvHashMap()),
-            skolemization_count: Cell::new(0),
-            bound_count: Cell::new(0),
-            undo_log: RefCell::new(Vec::new()),
-            unification_table: RefCell::new(UnificationTable::new()),
-        }
-    }
-
-    fn in_snapshot(&self) -> bool {
-        !self.undo_log.borrow().is_empty()
-    }
-
-    pub fn start_snapshot(&self) -> RegionSnapshot {
-        let length = self.undo_log.borrow().len();
-        debug!("RegionVarBindings: start_snapshot({})", length);
-        self.undo_log.borrow_mut().push(OpenSnapshot);
-        RegionSnapshot {
-            length: length,
-            region_snapshot: self.unification_table.borrow_mut().snapshot(),
-            skolemization_count: self.skolemization_count.get(),
-        }
-    }
-
-    pub fn commit(&self, snapshot: RegionSnapshot) {
-        debug!("RegionVarBindings: commit({})", snapshot.length);
-        assert!(self.undo_log.borrow().len() > snapshot.length);
-        assert!((*self.undo_log.borrow())[snapshot.length] == OpenSnapshot);
-
-        let mut undo_log = self.undo_log.borrow_mut();
-        if snapshot.length == 0 {
-            undo_log.truncate(0);
-        } else {
-            (*undo_log)[snapshot.length] = CommitedSnapshot;
-        }
-        self.skolemization_count.set(snapshot.skolemization_count);
-        self.unification_table.borrow_mut().commit(snapshot.region_snapshot);
-    }
-
-    pub fn rollback_to(&self, snapshot: RegionSnapshot) {
-        debug!("RegionVarBindings: rollback_to({:?})", snapshot);
-        let mut undo_log = self.undo_log.borrow_mut();
-        assert!(undo_log.len() > snapshot.length);
-        assert!((*undo_log)[snapshot.length] == OpenSnapshot);
-        while undo_log.len() > snapshot.length + 1 {
-            match undo_log.pop().unwrap() {
-                OpenSnapshot => {
-                    panic!("Failure to observe stack discipline");
-                }
-                CommitedSnapshot => {}
-                AddVar(vid) => {
-                    let mut var_origins = self.var_origins.borrow_mut();
-                    var_origins.pop().unwrap();
-                    assert_eq!(var_origins.len(), vid.index as usize);
-                }
-                AddConstraint(ref constraint) => {
-                    self.constraints.borrow_mut().remove(constraint);
-                }
-                AddVerify(index) => {
-                    self.verifys.borrow_mut().pop();
-                    assert_eq!(self.verifys.borrow().len(), index);
-                }
-                AddGiven(sub, sup) => {
-                    self.givens.borrow_mut().remove(&(sub, sup));
-                }
-                AddCombination(Glb, ref regions) => {
-                    self.glbs.borrow_mut().remove(regions);
-                }
-                AddCombination(Lub, ref regions) => {
-                    self.lubs.borrow_mut().remove(regions);
-                }
-            }
-        }
-        let c = undo_log.pop().unwrap();
-        assert!(c == OpenSnapshot);
-        self.skolemization_count.set(snapshot.skolemization_count);
-        self.unification_table.borrow_mut()
-            .rollback_to(snapshot.region_snapshot);
-    }
-
-    pub fn num_vars(&self) -> u32 {
-        let len = self.var_origins.borrow().len();
-        // enforce no overflow
-        assert!(len as u32 as usize == len);
-        len as u32
-    }
-
-    pub fn new_region_var(&self, origin: RegionVariableOrigin) -> RegionVid {
-        let vid = RegionVid { index: self.num_vars() };
-        self.var_origins.borrow_mut().push(origin.clone());
-
-        let u_vid = self.unification_table.borrow_mut().new_key(
-            unify_key::RegionVidKey { min_vid: vid }
-            );
-        assert_eq!(vid, u_vid);
-        if self.in_snapshot() {
-            self.undo_log.borrow_mut().push(AddVar(vid));
-        }
-        debug!("created new region variable {:?} with origin {:?}",
-               vid,
-               origin);
-        return vid;
-    }
-
-    /// Creates a new skolemized region. Skolemized regions are fresh
-    /// regions used when performing higher-ranked computations. They
-    /// must be used in a very particular way and are never supposed
-    /// to "escape" out into error messages or the code at large.
-    ///
-    /// The idea is to always create a snapshot. Skolemized regions
-    /// can be created in the context of this snapshot, but once the
-    /// snapshot is committed or rolled back, their numbers will be
-    /// recycled, so you must be finished with them. See the extensive
-    /// comments in `higher_ranked.rs` to see how it works (in
-    /// particular, the subtyping comparison).
-    ///
-    /// The `snapshot` argument to this function is not really used;
-    /// it's just there to make it explicit which snapshot bounds the
-    /// skolemized region that results.
-    pub fn new_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot) -> Region {
-        assert!(self.in_snapshot());
-        assert!(self.undo_log.borrow()[snapshot.length] == OpenSnapshot);
-
-        let sc = self.skolemization_count.get();
-        self.skolemization_count.set(sc + 1);
-        ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br)
-    }
-
-    pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> Region {
-        // Creates a fresh bound variable for use in GLB computations.
-        // See discussion of GLB computation in the large comment at
-        // the top of this file for more details.
-        //
-        // This computation is potentially wrong in the face of
-        // rollover.  It's conceivable, if unlikely, that one might
-        // wind up with accidental capture for nested functions in
-        // that case, if the outer function had bound regions created
-        // a very long time before and the inner function somehow
-        // wound up rolling over such that supposedly fresh
-        // identifiers were in fact shadowed. For now, we just assert
-        // that there is no rollover -- eventually we should try to be
-        // robust against this possibility, either by checking the set
-        // of bound identifiers that appear in a given expression and
-        // ensure that we generate one that is distinct, or by
-        // changing the representation of bound regions in a fn
-        // declaration
-
-        let sc = self.bound_count.get();
-        self.bound_count.set(sc + 1);
-
-        if sc >= self.bound_count.get() {
-            self.tcx.sess.bug("rollover in RegionInference new_bound()");
-        }
-
-        ReLateBound(debruijn, BrFresh(sc))
-    }
-
-    fn values_are_none(&self) -> bool {
-        self.values.borrow().is_none()
-    }
-
-    fn add_constraint(&self, constraint: Constraint, origin: SubregionOrigin<'tcx>) {
-        // cannot add constraints once regions are resolved
-        assert!(self.values_are_none());
-
-        debug!("RegionVarBindings: add_constraint({:?})", constraint);
-
-        if self.constraints.borrow_mut().insert(constraint, origin).is_none() {
-            if self.in_snapshot() {
-                self.undo_log.borrow_mut().push(AddConstraint(constraint));
-            }
-        }
-    }
-
-    fn add_verify(&self, verify: Verify<'tcx>) {
-        // cannot add verifys once regions are resolved
-        assert!(self.values_are_none());
-
-        debug!("RegionVarBindings: add_verify({:?})", verify);
-
-        // skip no-op cases known to be satisfied
-        match verify {
-            VerifyGenericBound(_, _, _, VerifyBound::AllBounds(ref bs)) if bs.len() == 0 => {
-                return;
-            }
-            _ => {}
-        }
-
-        let mut verifys = self.verifys.borrow_mut();
-        let index = verifys.len();
-        verifys.push(verify);
-        if self.in_snapshot() {
-            self.undo_log.borrow_mut().push(AddVerify(index));
-        }
-    }
-
-    pub fn add_given(&self, sub: ty::FreeRegion, sup: ty::RegionVid) {
-        // cannot add givens once regions are resolved
-        assert!(self.values_are_none());
-
-        let mut givens = self.givens.borrow_mut();
-        if givens.insert((sub, sup)) {
-            debug!("add_given({:?} <= {:?})", sub, sup);
-
-            self.undo_log.borrow_mut().push(AddGiven(sub, sup));
-        }
-    }
-
-    pub fn make_eqregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) {
-        if sub != sup {
-            // Eventually, it would be nice to add direct support for
-            // equating regions.
-            self.make_subregion(origin.clone(), sub, sup);
-            self.make_subregion(origin, sup, sub);
-
-            if let (ty::ReVar(sub), ty::ReVar(sup)) = (sub, sup) {
-                self.unification_table.borrow_mut().union(sub, sup);
-            }
-        }
-    }
-
-    pub fn make_subregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) {
-        // cannot add constraints once regions are resolved
-        assert!(self.values_are_none());
-
-        debug!("RegionVarBindings: make_subregion({:?}, {:?}) due to {:?}",
-               sub,
-               sup,
-               origin);
-
-        match (sub, sup) {
-            (ReEarlyBound(..), _) |
-            (ReLateBound(..), _) |
-            (_, ReEarlyBound(..)) |
-            (_, ReLateBound(..)) => {
-                self.tcx.sess.span_bug(origin.span(),
-                                       &format!("cannot relate bound region: {:?} <= {:?}",
-                                                sub,
-                                                sup));
-            }
-            (_, ReStatic) => {
-                // all regions are subregions of static, so we can ignore this
-            }
-            (ReVar(sub_id), ReVar(sup_id)) => {
-                self.add_constraint(ConstrainVarSubVar(sub_id, sup_id), origin);
-            }
-            (r, ReVar(sup_id)) => {
-                self.add_constraint(ConstrainRegSubVar(r, sup_id), origin);
-            }
-            (ReVar(sub_id), r) => {
-                self.add_constraint(ConstrainVarSubReg(sub_id, r), origin);
-            }
-            _ => {
-                self.add_verify(VerifyRegSubReg(origin, sub, sup));
-            }
-        }
-    }
-
-    /// See `Verify::VerifyGenericBound`
-    pub fn verify_generic_bound(&self,
-                                origin: SubregionOrigin<'tcx>,
-                                kind: GenericKind<'tcx>,
-                                sub: Region,
-                                bound: VerifyBound) {
-        self.add_verify(VerifyGenericBound(kind, origin, sub, bound));
-    }
-
-    pub fn lub_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region {
-        // cannot add constraints once regions are resolved
-        assert!(self.values_are_none());
-
-        debug!("RegionVarBindings: lub_regions({:?}, {:?})", a, b);
-        match (a, b) {
-            (ReStatic, _) | (_, ReStatic) => {
-                ReStatic // nothing lives longer than static
-            }
-
-            _ => {
-                self.combine_vars(Lub, a, b, origin.clone(), |this, old_r, new_r| {
-                    this.make_subregion(origin.clone(), old_r, new_r)
-                })
-            }
-        }
-    }
-
-    pub fn glb_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region {
-        // cannot add constraints once regions are resolved
-        assert!(self.values_are_none());
-
-        debug!("RegionVarBindings: glb_regions({:?}, {:?})", a, b);
-        match (a, b) {
-            (ReStatic, r) | (r, ReStatic) => {
-                // static lives longer than everything else
-                r
-            }
-
-            _ => {
-                self.combine_vars(Glb, a, b, origin.clone(), |this, old_r, new_r| {
-                    this.make_subregion(origin.clone(), new_r, old_r)
-                })
-            }
-        }
-    }
-
-    pub fn resolve_var(&self, rid: RegionVid) -> ty::Region {
-        match *self.values.borrow() {
-            None => {
-                self.tcx.sess.span_bug((*self.var_origins.borrow())[rid.index as usize].span(),
-                                       "attempt to resolve region variable before values have \
-                                        been computed!")
-            }
-            Some(ref values) => {
-                let r = lookup(values, rid);
-                debug!("resolve_var({:?}) = {:?}", rid, r);
-                r
-            }
-        }
-    }
-
-    pub fn opportunistic_resolve_var(&self, rid: RegionVid) -> ty::Region {
-        ty::ReVar(self.unification_table.borrow_mut().find_value(rid).min_vid)
-    }
-
-    fn combine_map(&self, t: CombineMapType) -> &RefCell<CombineMap> {
-        match t {
-            Glb => &self.glbs,
-            Lub => &self.lubs,
-        }
-    }
-
-    pub fn combine_vars<F>(&self,
-                           t: CombineMapType,
-                           a: Region,
-                           b: Region,
-                           origin: SubregionOrigin<'tcx>,
-                           mut relate: F)
-                           -> Region
-        where F: FnMut(&RegionVarBindings<'a, 'tcx>, Region, Region)
-    {
-        let vars = TwoRegions { a: a, b: b };
-        match self.combine_map(t).borrow().get(&vars) {
-            Some(&c) => {
-                return ReVar(c);
-            }
-            None => {}
-        }
-        let c = self.new_region_var(MiscVariable(origin.span()));
-        self.combine_map(t).borrow_mut().insert(vars, c);
-        if self.in_snapshot() {
-            self.undo_log.borrow_mut().push(AddCombination(t, vars));
-        }
-        relate(self, a, ReVar(c));
-        relate(self, b, ReVar(c));
-        debug!("combine_vars() c={:?}", c);
-        ReVar(c)
-    }
-
-    pub fn vars_created_since_snapshot(&self, mark: &RegionSnapshot) -> Vec<RegionVid> {
-        self.undo_log.borrow()[mark.length..]
-            .iter()
-            .filter_map(|&elt| {
-                match elt {
-                    AddVar(vid) => Some(vid),
-                    _ => None,
-                }
-            })
-            .collect()
-    }
-
-    /// Computes all regions that have been related to `r0` in any way since the mark `mark` was
-    /// made---`r0` itself will be the first entry. This is used when checking whether skolemized
-    /// regions are being improperly related to other regions.
-    pub fn tainted(&self, mark: &RegionSnapshot, r0: Region) -> Vec<Region> {
-        debug!("tainted(mark={:?}, r0={:?})", mark, r0);
-        let _indenter = indenter();
-
-        // `result_set` acts as a worklist: we explore all outgoing
-        // edges and add any new regions we find to result_set.  This
-        // is not a terribly efficient implementation.
-        let mut result_set = vec![r0];
-        let mut result_index = 0;
-        while result_index < result_set.len() {
-            // nb: can't use usize::range() here because result_set grows
-            let r = result_set[result_index];
-            debug!("result_index={}, r={:?}", result_index, r);
-
-            for undo_entry in self.undo_log.borrow()[mark.length..].iter() {
-                match undo_entry {
-                    &AddConstraint(ConstrainVarSubVar(a, b)) => {
-                        consider_adding_bidirectional_edges(&mut result_set, r, ReVar(a), ReVar(b));
-                    }
-                    &AddConstraint(ConstrainRegSubVar(a, b)) => {
-                        consider_adding_bidirectional_edges(&mut result_set, r, a, ReVar(b));
-                    }
-                    &AddConstraint(ConstrainVarSubReg(a, b)) => {
-                        consider_adding_bidirectional_edges(&mut result_set, r, ReVar(a), b);
-                    }
-                    &AddGiven(a, b) => {
-                        consider_adding_bidirectional_edges(&mut result_set,
-                                                            r,
-                                                            ReFree(a),
-                                                            ReVar(b));
-                    }
-                    &AddVerify(i) => {
-                        match (*self.verifys.borrow())[i] {
-                            VerifyRegSubReg(_, a, b) => {
-                                consider_adding_bidirectional_edges(&mut result_set, r, a, b);
-                            }
-                            VerifyGenericBound(_, _, a, ref bound) => {
-                                bound.for_each_region(&mut |b| {
-                                    consider_adding_bidirectional_edges(&mut result_set, r, a, b)
-                                });
-                            }
-                        }
-                    }
-                    &AddCombination(..) |
-                    &AddVar(..) |
-                    &OpenSnapshot |
-                    &CommitedSnapshot => {}
-                }
-            }
-
-            result_index += 1;
-        }
-
-        return result_set;
-
-        fn consider_adding_bidirectional_edges(result_set: &mut Vec<Region>,
-                                               r: Region,
-                                               r1: Region,
-                                               r2: Region) {
-            consider_adding_directed_edge(result_set, r, r1, r2);
-            consider_adding_directed_edge(result_set, r, r2, r1);
-        }
-
-        fn consider_adding_directed_edge(result_set: &mut Vec<Region>,
-                                         r: Region,
-                                         r1: Region,
-                                         r2: Region) {
-            if r == r1 {
-                // Clearly, this is potentially inefficient.
-                if !result_set.iter().any(|x| *x == r2) {
-                    result_set.push(r2);
-                }
-            }
-        }
-    }
-
-    /// This function performs the actual region resolution.  It must be
-    /// called after all constraints have been added.  It performs a
-    /// fixed-point iteration to find region values which satisfy all
-    /// constraints, assuming such values can be found; if they cannot,
-    /// errors are reported.
-    pub fn resolve_regions(&self,
-                           free_regions: &FreeRegionMap,
-                           subject_node: ast::NodeId)
-                           -> Vec<RegionResolutionError<'tcx>> {
-        debug!("RegionVarBindings: resolve_regions()");
-        let mut errors = vec![];
-        let v = self.infer_variable_values(free_regions, &mut errors, subject_node);
-        *self.values.borrow_mut() = Some(v);
-        errors
-    }
-
-    fn lub_concrete_regions(&self, free_regions: &FreeRegionMap, a: Region, b: Region) -> Region {
-        match (a, b) {
-            (ReLateBound(..), _) |
-            (_, ReLateBound(..)) |
-            (ReEarlyBound(..), _) |
-            (_, ReEarlyBound(..)) => {
-                self.tcx.sess.bug(&format!("cannot relate bound region: LUB({:?}, {:?})", a, b));
-            }
-
-            (ReStatic, _) | (_, ReStatic) => {
-                ReStatic // nothing lives longer than static
-            }
-
-            (ReEmpty, r) | (r, ReEmpty) => {
-                r // everything lives longer than empty
-            }
-
-            (ReVar(v_id), _) | (_, ReVar(v_id)) => {
-                self.tcx.sess.span_bug((*self.var_origins.borrow())[v_id.index as usize].span(),
-                                       &format!("lub_concrete_regions invoked with non-concrete \
-                                                 regions: {:?}, {:?}",
-                                                a,
-                                                b));
-            }
-
-            (ReFree(ref fr), ReScope(s_id)) |
-            (ReScope(s_id), ReFree(ref fr)) => {
-                let f = ReFree(*fr);
-                // A "free" region can be interpreted as "some region
-                // at least as big as the block fr.scope_id".  So, we can
-                // reasonably compare free regions and scopes:
-                let r_id = self.tcx.region_maps.nearest_common_ancestor(fr.scope, s_id);
-
-                if r_id == fr.scope {
-                    // if the free region's scope `fr.scope_id` is bigger than
-                    // the scope region `s_id`, then the LUB is the free
-                    // region itself:
-                    f
-                } else {
-                    // otherwise, we don't know what the free region is,
-                    // so we must conservatively say the LUB is static:
-                    ReStatic
-                }
-            }
-
-            (ReScope(a_id), ReScope(b_id)) => {
-                // The region corresponding to an outer block is a
-                // subtype of the region corresponding to an inner
-                // block.
-                ReScope(self.tcx.region_maps.nearest_common_ancestor(a_id, b_id))
-            }
-
-            (ReFree(a_fr), ReFree(b_fr)) => {
-                free_regions.lub_free_regions(a_fr, b_fr)
-            }
-
-            // For these types, we cannot define any additional
-            // relationship:
-            (ReSkolemized(..), _) |
-            (_, ReSkolemized(..)) => {
-                if a == b {
-                    a
-                } else {
-                    ReStatic
-                }
-            }
-        }
-    }
-}
-
-// ______________________________________________________________________
-
-#[derive(Copy, Clone, Debug)]
-pub enum VarValue {
-    Value(Region),
-    ErrorValue,
-}
-
-struct VarData {
-    value: VarValue,
-}
-
-struct RegionAndOrigin<'tcx> {
-    region: Region,
-    origin: SubregionOrigin<'tcx>,
-}
-
-type RegionGraph = graph::Graph<(), Constraint>;
-
-impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
-    fn infer_variable_values(&self,
-                             free_regions: &FreeRegionMap,
-                             errors: &mut Vec<RegionResolutionError<'tcx>>,
-                             subject: ast::NodeId)
-                             -> Vec<VarValue> {
-        let mut var_data = self.construct_var_data();
-
-        // Dorky hack to cause `dump_constraints` to only get called
-        // if debug mode is enabled:
-        debug!("----() End constraint listing (subject={}) {:?}---",
-               subject,
-               self.dump_constraints(subject));
-        graphviz::maybe_print_constraints_for(self, subject);
-
-        let graph = self.construct_graph();
-        self.expand_givens(&graph);
-        self.expansion(free_regions, &mut var_data);
-        self.contraction(free_regions, &mut var_data);
-        let values = self.extract_values_and_collect_conflicts(free_regions,
-                                                               &var_data,
-                                                               &graph,
-                                                               errors);
-        self.collect_concrete_region_errors(free_regions, &values, errors);
-        values
-    }
-
-    fn construct_var_data(&self) -> Vec<VarData> {
-        (0..self.num_vars() as usize)
-            .map(|_| VarData { value: Value(ty::ReEmpty) })
-            .collect()
-    }
-
-    fn dump_constraints(&self, subject: ast::NodeId) {
-        debug!("----() Start constraint listing (subject={}) ()----",
-               subject);
-        for (idx, (constraint, _)) in self.constraints.borrow().iter().enumerate() {
-            debug!("Constraint {} => {:?}", idx, constraint);
-        }
-    }
-
-    fn expand_givens(&self, graph: &RegionGraph) {
-        // Givens are a kind of horrible hack to account for
-        // constraints like 'c <= '0 that are known to hold due to
-        // closure signatures (see the comment above on the `givens`
-        // field). They should go away. But until they do, the role
-        // of this fn is to account for the transitive nature:
-        //
-        //     Given 'c <= '0
-        //     and   '0 <= '1
-        //     then  'c <= '1
-
-        let mut givens = self.givens.borrow_mut();
-        let seeds: Vec<_> = givens.iter().cloned().collect();
-        for (fr, vid) in seeds {
-            let seed_index = NodeIndex(vid.index as usize);
-            for succ_index in graph.depth_traverse(seed_index) {
-                let succ_index = succ_index.0 as u32;
-                if succ_index < self.num_vars() {
-                    let succ_vid = RegionVid { index: succ_index };
-                    givens.insert((fr, succ_vid));
-                }
-            }
-        }
-    }
-
-    fn expansion(&self, free_regions: &FreeRegionMap, var_data: &mut [VarData]) {
-        self.iterate_until_fixed_point("Expansion", |constraint| {
-            debug!("expansion: constraint={:?} origin={:?}",
-                   constraint,
-                   self.constraints
-                       .borrow()
-                       .get(constraint)
-                       .unwrap());
-            match *constraint {
-                ConstrainRegSubVar(a_region, b_vid) => {
-                    let b_data = &mut var_data[b_vid.index as usize];
-                    self.expand_node(free_regions, a_region, b_vid, b_data)
-                }
-                ConstrainVarSubVar(a_vid, b_vid) => {
-                    match var_data[a_vid.index as usize].value {
-                        ErrorValue => false,
-                        Value(a_region) => {
-                            let b_node = &mut var_data[b_vid.index as usize];
-                            self.expand_node(free_regions, a_region, b_vid, b_node)
-                        }
-                    }
-                }
-                ConstrainVarSubReg(..) => {
-                    // This is a contraction constraint.  Ignore it.
-                    false
-                }
-            }
-        })
-    }
-
-    fn expand_node(&self,
-                   free_regions: &FreeRegionMap,
-                   a_region: Region,
-                   b_vid: RegionVid,
-                   b_data: &mut VarData)
-                   -> bool {
-        debug!("expand_node({:?}, {:?} == {:?})",
-               a_region,
-               b_vid,
-               b_data.value);
-
-        // Check if this relationship is implied by a given.
-        match a_region {
-            ty::ReFree(fr) => {
-                if self.givens.borrow().contains(&(fr, b_vid)) {
-                    debug!("given");
-                    return false;
-                }
-            }
-            _ => {}
-        }
-
-        match b_data.value {
-            Value(cur_region) => {
-                let lub = self.lub_concrete_regions(free_regions, a_region, cur_region);
-                if lub == cur_region {
-                    return false;
-                }
-
-                debug!("Expanding value of {:?} from {:?} to {:?}",
-                       b_vid,
-                       cur_region,
-                       lub);
-
-                b_data.value = Value(lub);
-                return true;
-            }
-
-            ErrorValue => {
-                return false;
-            }
-        }
-    }
-
-    // FIXME(#29436) -- this fn would just go away if we removed ConstrainVarSubReg
-    fn contraction(&self, free_regions: &FreeRegionMap, var_data: &mut [VarData]) {
-        self.iterate_until_fixed_point("Contraction", |constraint| {
-            debug!("contraction: constraint={:?} origin={:?}",
-                   constraint,
-                   self.constraints
-                       .borrow()
-                       .get(constraint)
-                       .unwrap());
-            match *constraint {
-                ConstrainRegSubVar(..) |
-                ConstrainVarSubVar(..) => {
-                    // Expansion will ensure that these constraints hold. Ignore.
-                }
-                ConstrainVarSubReg(a_vid, b_region) => {
-                    let a_data = &mut var_data[a_vid.index as usize];
-                    debug!("contraction: {:?} == {:?}, {:?}",
-                           a_vid,
-                           a_data.value,
-                           b_region);
-
-                    let a_region = match a_data.value {
-                        ErrorValue => return false,
-                        Value(a_region) => a_region,
-                    };
-
-                    if !free_regions.is_subregion_of(self.tcx, a_region, b_region) {
-                        debug!("Setting {:?} to ErrorValue: {:?} not subregion of {:?}",
-                               a_vid,
-                               a_region,
-                               b_region);
-                        a_data.value = ErrorValue;
-                    }
-                }
-            }
-
-            false
-        })
-    }
-
-    fn collect_concrete_region_errors(&self,
-                                      free_regions: &FreeRegionMap,
-                                      values: &Vec<VarValue>,
-                                      errors: &mut Vec<RegionResolutionError<'tcx>>) {
-        let mut reg_reg_dups = FnvHashSet();
-        for verify in self.verifys.borrow().iter() {
-            match *verify {
-                VerifyRegSubReg(ref origin, sub, sup) => {
-                    if free_regions.is_subregion_of(self.tcx, sub, sup) {
-                        continue;
-                    }
-
-                    if !reg_reg_dups.insert((sub, sup)) {
-                        continue;
-                    }
-
-                    debug!("region inference error at {:?}: {:?} <= {:?} is not true",
-                           origin,
-                           sub,
-                           sup);
-
-                    errors.push(ConcreteFailure((*origin).clone(), sub, sup));
-                }
-
-                VerifyGenericBound(ref kind, ref origin, sub, ref bound) => {
-                    let sub = normalize(values, sub);
-                    if bound.is_met(self.tcx, free_regions, values, sub) {
-                        continue;
-                    }
-
-                    debug!("region inference error at {:?}: verifying {:?} <= {:?}",
-                           origin,
-                           sub,
-                           bound);
-
-                    errors.push(GenericBoundFailure((*origin).clone(), kind.clone(), sub));
-                }
-            }
-        }
-    }
-
-    fn extract_values_and_collect_conflicts(&self,
-                                            free_regions: &FreeRegionMap,
-                                            var_data: &[VarData],
-                                            graph: &RegionGraph,
-                                            errors: &mut Vec<RegionResolutionError<'tcx>>)
-                                            -> Vec<VarValue> {
-        debug!("extract_values_and_collect_conflicts()");
-
-        // This is the best way that I have found to suppress
-        // duplicate and related errors. Basically we keep a set of
-        // flags for every node. Whenever an error occurs, we will
-        // walk some portion of the graph looking to find pairs of
-        // conflicting regions to report to the user. As we walk, we
-        // trip the flags from false to true, and if we find that
-        // we've already reported an error involving any particular
-        // node we just stop and don't report the current error.  The
-        // idea is to report errors that derive from independent
-        // regions of the graph, but not those that derive from
-        // overlapping locations.
-        let mut dup_vec = vec![u32::MAX; self.num_vars() as usize];
-
-        for idx in 0..self.num_vars() as usize {
-            match var_data[idx].value {
-                Value(_) => {
-                    /* Inference successful */
-                }
-                ErrorValue => {
-                    /* Inference impossible, this value contains
-                       inconsistent constraints.
-
-                       I think that in this case we should report an
-                       error now---unlike the case above, we can't
-                       wait to see whether the user needs the result
-                       of this variable.  The reason is that the mere
-                       existence of this variable implies that the
-                       region graph is inconsistent, whether or not it
-                       is used.
-
-                       For example, we may have created a region
-                       variable that is the GLB of two other regions
-                       which do not have a GLB.  Even if that variable
-                       is not used, it implies that those two regions
-                       *should* have a GLB.
-
-                       At least I think this is true. It may be that
-                       the mere existence of a conflict in a region variable
-                       that is not used is not a problem, so if this rule
-                       starts to create problems we'll have to revisit
-                       this portion of the code and think hard about it. =) */
-
-                    let node_vid = RegionVid { index: idx as u32 };
-                    self.collect_error_for_expanding_node(free_regions,
-                                                          graph,
-                                                          &mut dup_vec,
-                                                          node_vid,
-                                                          errors);
-                }
-            }
-        }
-
-        (0..self.num_vars() as usize).map(|idx| var_data[idx].value).collect()
-    }
-
-    fn construct_graph(&self) -> RegionGraph {
-        let num_vars = self.num_vars();
-
-        let constraints = self.constraints.borrow();
-
-        let mut graph = graph::Graph::new();
-
-        for _ in 0..num_vars {
-            graph.add_node(());
-        }
-
-        // Issue #30438: two distinct dummy nodes, one for incoming
-        // edges (dummy_source) and another for outgoing edges
-        // (dummy_sink). In `dummy -> a -> b -> dummy`, using one
-        // dummy node leads one to think (erroneously) there exists a
-        // path from `b` to `a`. Two dummy nodes sidesteps the issue.
-        let dummy_source = graph.add_node(());
-        let dummy_sink = graph.add_node(());
-
-        for (constraint, _) in constraints.iter() {
-            match *constraint {
-                ConstrainVarSubVar(a_id, b_id) => {
-                    graph.add_edge(NodeIndex(a_id.index as usize),
-                                   NodeIndex(b_id.index as usize),
-                                   *constraint);
-                }
-                ConstrainRegSubVar(_, b_id) => {
-                    graph.add_edge(dummy_source, NodeIndex(b_id.index as usize), *constraint);
-                }
-                ConstrainVarSubReg(a_id, _) => {
-                    graph.add_edge(NodeIndex(a_id.index as usize), dummy_sink, *constraint);
-                }
-            }
-        }
-
-        return graph;
-    }
-
-    fn collect_error_for_expanding_node(&self,
-                                        free_regions: &FreeRegionMap,
-                                        graph: &RegionGraph,
-                                        dup_vec: &mut [u32],
-                                        node_idx: RegionVid,
-                                        errors: &mut Vec<RegionResolutionError<'tcx>>) {
-        // Errors in expanding nodes result from a lower-bound that is
-        // not contained by an upper-bound.
-        let (mut lower_bounds, lower_dup) = self.collect_concrete_regions(graph,
-                                                                          node_idx,
-                                                                          graph::INCOMING,
-                                                                          dup_vec);
-        let (mut upper_bounds, upper_dup) = self.collect_concrete_regions(graph,
-                                                                          node_idx,
-                                                                          graph::OUTGOING,
-                                                                          dup_vec);
-
-        if lower_dup || upper_dup {
-            return;
-        }
-
-        // We place free regions first because we are special casing
-        // SubSupConflict(ReFree, ReFree) when reporting error, and so
-        // the user will more likely get a specific suggestion.
-        fn free_regions_first(a: &RegionAndOrigin, b: &RegionAndOrigin) -> Ordering {
-            match (a.region, b.region) {
-                (ReFree(..), ReFree(..)) => Equal,
-                (ReFree(..), _) => Less,
-                (_, ReFree(..)) => Greater,
-                (_, _) => Equal,
-            }
-        }
-        lower_bounds.sort_by(|a, b| free_regions_first(a, b));
-        upper_bounds.sort_by(|a, b| free_regions_first(a, b));
-
-        for lower_bound in &lower_bounds {
-            for upper_bound in &upper_bounds {
-                if !free_regions.is_subregion_of(self.tcx, lower_bound.region, upper_bound.region) {
-                    let origin = (*self.var_origins.borrow())[node_idx.index as usize].clone();
-                    debug!("region inference error at {:?} for {:?}: SubSupConflict sub: {:?} \
-                            sup: {:?}",
-                           origin,
-                           node_idx,
-                           lower_bound.region,
-                           upper_bound.region);
-                    errors.push(SubSupConflict(origin,
-                                               lower_bound.origin.clone(),
-                                               lower_bound.region,
-                                               upper_bound.origin.clone(),
-                                               upper_bound.region));
-                    return;
-                }
-            }
-        }
-
-        self.tcx.sess.span_bug((*self.var_origins.borrow())[node_idx.index as usize].span(),
-                               &format!("collect_error_for_expanding_node() could not find \
-                                         error for var {:?}, lower_bounds={:?}, \
-                                         upper_bounds={:?}",
-                                        node_idx,
-                                        lower_bounds,
-                                        upper_bounds));
-    }
-
-    fn collect_concrete_regions(&self,
-                                graph: &RegionGraph,
-                                orig_node_idx: RegionVid,
-                                dir: Direction,
-                                dup_vec: &mut [u32])
-                                -> (Vec<RegionAndOrigin<'tcx>>, bool) {
-        struct WalkState<'tcx> {
-            set: FnvHashSet<RegionVid>,
-            stack: Vec<RegionVid>,
-            result: Vec<RegionAndOrigin<'tcx>>,
-            dup_found: bool,
-        }
-        let mut state = WalkState {
-            set: FnvHashSet(),
-            stack: vec![orig_node_idx],
-            result: Vec::new(),
-            dup_found: false,
-        };
-        state.set.insert(orig_node_idx);
-
-        // to start off the process, walk the source node in the
-        // direction specified
-        process_edges(self, &mut state, graph, orig_node_idx, dir);
-
-        while !state.stack.is_empty() {
-            let node_idx = state.stack.pop().unwrap();
-
-            // check whether we've visited this node on some previous walk
-            if dup_vec[node_idx.index as usize] == u32::MAX {
-                dup_vec[node_idx.index as usize] = orig_node_idx.index;
-            } else if dup_vec[node_idx.index as usize] != orig_node_idx.index {
-                state.dup_found = true;
-            }
-
-            debug!("collect_concrete_regions(orig_node_idx={:?}, node_idx={:?})",
-                   orig_node_idx,
-                   node_idx);
-
-            // figure out the direction from which this node takes its
-            // values, and search for concrete regions etc in that direction
-            let dir = graph::INCOMING;
-            process_edges(self, &mut state, graph, node_idx, dir);
-        }
-
-        let WalkState {result, dup_found, ..} = state;
-        return (result, dup_found);
-
-        fn process_edges<'a, 'tcx>(this: &RegionVarBindings<'a, 'tcx>,
-                                   state: &mut WalkState<'tcx>,
-                                   graph: &RegionGraph,
-                                   source_vid: RegionVid,
-                                   dir: Direction) {
-            debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir);
-
-            let source_node_index = NodeIndex(source_vid.index as usize);
-            for (_, edge) in graph.adjacent_edges(source_node_index, dir) {
-                match edge.data {
-                    ConstrainVarSubVar(from_vid, to_vid) => {
-                        let opp_vid = if from_vid == source_vid {
-                            to_vid
-                        } else {
-                            from_vid
-                        };
-                        if state.set.insert(opp_vid) {
-                            state.stack.push(opp_vid);
-                        }
-                    }
-
-                    ConstrainRegSubVar(region, _) |
-                    ConstrainVarSubReg(_, region) => {
-                        state.result.push(RegionAndOrigin {
-                            region: region,
-                            origin: this.constraints.borrow().get(&edge.data).unwrap().clone(),
-                        });
-                    }
-                }
-            }
-        }
-    }
-
-    fn iterate_until_fixed_point<F>(&self, tag: &str, mut body: F)
-        where F: FnMut(&Constraint) -> bool
-    {
-        let mut iteration = 0;
-        let mut changed = true;
-        while changed {
-            changed = false;
-            iteration += 1;
-            debug!("---- {} Iteration {}{}", "#", tag, iteration);
-            for (constraint, _) in self.constraints.borrow().iter() {
-                let edge_changed = body(constraint);
-                if edge_changed {
-                    debug!("Updated due to constraint {:?}", constraint);
-                    changed = true;
-                }
-            }
-        }
-        debug!("---- {} Complete after {} iteration(s)", tag, iteration);
-    }
-
-}
-
-impl<'tcx> fmt::Debug for Verify<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            VerifyRegSubReg(_, ref a, ref b) => {
-                write!(f, "VerifyRegSubReg({:?}, {:?})", a, b)
-            }
-            VerifyGenericBound(_, ref p, ref a, ref bs) => {
-                write!(f, "VerifyGenericBound({:?}, {:?}, {:?})", p, a, bs)
-            }
-        }
-    }
-}
-
-fn normalize(values: &Vec<VarValue>, r: ty::Region) -> ty::Region {
-    match r {
-        ty::ReVar(rid) => lookup(values, rid),
-        _ => r,
-    }
-}
-
-fn lookup(values: &Vec<VarValue>, rid: ty::RegionVid) -> ty::Region {
-    match values[rid.index as usize] {
-        Value(r) => r,
-        ErrorValue => ReStatic, // Previously reported error.
-    }
-}
-
-impl<'tcx> fmt::Debug for RegionAndOrigin<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "RegionAndOrigin({:?},{:?})", self.region, self.origin)
-    }
-}
-
-impl fmt::Debug for RegionSnapshot {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "RegionSnapshot(length={},skolemization={})",
-               self.length, self.skolemization_count)
-    }
-}
-
-impl<'tcx> fmt::Debug for GenericKind<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            GenericKind::Param(ref p) => write!(f, "{:?}", p),
-            GenericKind::Projection(ref p) => write!(f, "{:?}", p),
-        }
-    }
-}
-
-impl<'tcx> fmt::Display for GenericKind<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            GenericKind::Param(ref p) => write!(f, "{}", p),
-            GenericKind::Projection(ref p) => write!(f, "{}", p),
-        }
-    }
-}
-
-impl<'tcx> GenericKind<'tcx> {
-    pub fn to_ty(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
-        match *self {
-            GenericKind::Param(ref p) => p.to_ty(tcx),
-            GenericKind::Projection(ref p) => tcx.mk_projection(p.trait_ref.clone(), p.item_name),
-        }
-    }
-}
-
-impl VerifyBound {
-    fn for_each_region(&self, f: &mut FnMut(ty::Region)) {
-        match self {
-            &VerifyBound::AnyRegion(ref rs) |
-            &VerifyBound::AllRegions(ref rs) => for &r in rs {
-                f(r);
-            },
-
-            &VerifyBound::AnyBound(ref bs) |
-            &VerifyBound::AllBounds(ref bs) => for b in bs {
-                b.for_each_region(f);
-            },
-        }
-    }
-
-    pub fn must_hold(&self) -> bool {
-        match self {
-            &VerifyBound::AnyRegion(ref bs) => bs.contains(&ty::ReStatic),
-            &VerifyBound::AllRegions(ref bs) => bs.is_empty(),
-            &VerifyBound::AnyBound(ref bs) => bs.iter().any(|b| b.must_hold()),
-            &VerifyBound::AllBounds(ref bs) => bs.iter().all(|b| b.must_hold()),
-        }
-    }
-
-    pub fn cannot_hold(&self) -> bool {
-        match self {
-            &VerifyBound::AnyRegion(ref bs) => bs.is_empty(),
-            &VerifyBound::AllRegions(ref bs) => bs.contains(&ty::ReEmpty),
-            &VerifyBound::AnyBound(ref bs) => bs.iter().all(|b| b.cannot_hold()),
-            &VerifyBound::AllBounds(ref bs) => bs.iter().any(|b| b.cannot_hold()),
-        }
-    }
-
-    pub fn or(self, vb: VerifyBound) -> VerifyBound {
-        if self.must_hold() || vb.cannot_hold() {
-            self
-        } else if self.cannot_hold() || vb.must_hold() {
-            vb
-        } else {
-            VerifyBound::AnyBound(vec![self, vb])
-        }
-    }
-
-    pub fn and(self, vb: VerifyBound) -> VerifyBound {
-        if self.must_hold() && vb.must_hold() {
-            self
-        } else if self.cannot_hold() && vb.cannot_hold() {
-            self
-        } else {
-            VerifyBound::AllBounds(vec![self, vb])
-        }
-    }
-
-    fn is_met<'tcx>(&self,
-                    tcx: &ty::ctxt<'tcx>,
-                    free_regions: &FreeRegionMap,
-                    var_values: &Vec<VarValue>,
-                    min: ty::Region)
-                    -> bool {
-        match self {
-            &VerifyBound::AnyRegion(ref rs) =>
-                rs.iter()
-                  .map(|&r| normalize(var_values, r))
-                  .any(|r| free_regions.is_subregion_of(tcx, min, r)),
-
-            &VerifyBound::AllRegions(ref rs) =>
-                rs.iter()
-                  .map(|&r| normalize(var_values, r))
-                  .all(|r| free_regions.is_subregion_of(tcx, min, r)),
-
-            &VerifyBound::AnyBound(ref bs) =>
-                bs.iter()
-                  .any(|b| b.is_met(tcx, free_regions, var_values, min)),
-
-            &VerifyBound::AllBounds(ref bs) =>
-                bs.iter()
-                  .all(|b| b.is_met(tcx, free_regions, var_values, min)),
-        }
-    }
-}
diff --git a/src/librustc/middle/infer/resolve.rs b/src/librustc/middle/infer/resolve.rs
deleted file mode 100644 (file)
index c68d0a9..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::{InferCtxt, FixupError, FixupResult};
-use middle::ty::{self, Ty, TypeFoldable};
-
-///////////////////////////////////////////////////////////////////////////
-// OPPORTUNISTIC TYPE RESOLVER
-
-/// The opportunistic type resolver can be used at any time. It simply replaces
-/// type variables that have been unified with the things they have
-/// been unified with (similar to `shallow_resolve`, but deep). This is
-/// useful for printing messages etc but also required at various
-/// points for correctness.
-pub struct OpportunisticTypeResolver<'a, 'tcx:'a> {
-    infcx: &'a InferCtxt<'a, 'tcx>,
-}
-
-impl<'a, 'tcx> OpportunisticTypeResolver<'a, 'tcx> {
-    pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> OpportunisticTypeResolver<'a, 'tcx> {
-        OpportunisticTypeResolver { infcx: infcx }
-    }
-}
-
-impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for OpportunisticTypeResolver<'a, 'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> {
-        self.infcx.tcx
-    }
-
-    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-        if !t.has_infer_types() {
-            t // micro-optimize -- if there is nothing in this type that this fold affects...
-        } else {
-            let t0 = self.infcx.shallow_resolve(t);
-            t0.super_fold_with(self)
-        }
-    }
-}
-
-/// The opportunistic type and region resolver is similar to the
-/// opportunistic type resolver, but also opportunistly resolves
-/// regions. It is useful for canonicalization.
-pub struct OpportunisticTypeAndRegionResolver<'a, 'tcx:'a> {
-    infcx: &'a InferCtxt<'a, 'tcx>,
-}
-
-impl<'a, 'tcx> OpportunisticTypeAndRegionResolver<'a, 'tcx> {
-    pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
-        OpportunisticTypeAndRegionResolver { infcx: infcx }
-    }
-}
-
-impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for OpportunisticTypeAndRegionResolver<'a, 'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> {
-        self.infcx.tcx
-    }
-
-    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-        if !t.needs_infer() {
-            t // micro-optimize -- if there is nothing in this type that this fold affects...
-        } else {
-            let t0 = self.infcx.shallow_resolve(t);
-            t0.super_fold_with(self)
-        }
-    }
-
-    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
-        match r {
-          ty::ReVar(rid) => self.infcx.region_vars.opportunistic_resolve_var(rid),
-          _ => r,
-        }
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// FULL TYPE RESOLUTION
-
-/// Full type resolution replaces all type and region variables with
-/// their concrete results. If any variable cannot be replaced (never unified, etc)
-/// then an `Err` result is returned.
-pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a,'tcx>, value: &T) -> FixupResult<T>
-    where T : TypeFoldable<'tcx>
-{
-    let mut full_resolver = FullTypeResolver { infcx: infcx, err: None };
-    let result = value.fold_with(&mut full_resolver);
-    match full_resolver.err {
-        None => Ok(result),
-        Some(e) => Err(e),
-    }
-}
-
-// N.B. This type is not public because the protocol around checking the
-// `err` field is not enforcable otherwise.
-struct FullTypeResolver<'a, 'tcx:'a> {
-    infcx: &'a InferCtxt<'a, 'tcx>,
-    err: Option<FixupError>,
-}
-
-impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for FullTypeResolver<'a, 'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> {
-        self.infcx.tcx
-    }
-
-    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-        if !t.needs_infer() {
-            t // micro-optimize -- if there is nothing in this type that this fold affects...
-        } else {
-            let t = self.infcx.shallow_resolve(t);
-            match t.sty {
-                ty::TyInfer(ty::TyVar(vid)) => {
-                    self.err = Some(FixupError::UnresolvedTy(vid));
-                    self.tcx().types.err
-                }
-                ty::TyInfer(ty::IntVar(vid)) => {
-                    self.err = Some(FixupError::UnresolvedIntTy(vid));
-                    self.tcx().types.err
-                }
-                ty::TyInfer(ty::FloatVar(vid)) => {
-                    self.err = Some(FixupError::UnresolvedFloatTy(vid));
-                    self.tcx().types.err
-                }
-                ty::TyInfer(_) => {
-                    self.infcx.tcx.sess.bug(
-                        &format!("Unexpected type in full type resolver: {:?}",
-                                t));
-                }
-                _ => {
-                    t.super_fold_with(self)
-                }
-            }
-        }
-    }
-
-    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
-        match r {
-          ty::ReVar(rid) => self.infcx.region_vars.resolve_var(rid),
-          _ => r,
-        }
-    }
-}
diff --git a/src/librustc/middle/infer/sub.rs b/src/librustc/middle/infer/sub.rs
deleted file mode 100644 (file)
index 2cd686f..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::combine::{self, CombineFields};
-use super::higher_ranked::HigherRankedRelations;
-use super::SubregionOrigin;
-use super::type_variable::{SubtypeOf, SupertypeOf};
-
-use middle::ty::{self, Ty};
-use middle::ty::TyVar;
-use middle::ty::relate::{Cause, Relate, RelateResult, TypeRelation};
-use std::mem;
-
-/// Ensures `a` is made a subtype of `b`. Returns `a` on success.
-pub struct Sub<'a, 'tcx: 'a> {
-    fields: CombineFields<'a, 'tcx>,
-}
-
-impl<'a, 'tcx> Sub<'a, 'tcx> {
-    pub fn new(f: CombineFields<'a, 'tcx>) -> Sub<'a, 'tcx> {
-        Sub { fields: f }
-    }
-}
-
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Sub<'a, 'tcx> {
-    fn tag(&self) -> &'static str { "Sub" }
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.infcx.tcx }
-    fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
-
-    fn with_cause<F,R>(&mut self, cause: Cause, f: F) -> R
-        where F: FnOnce(&mut Self) -> R
-    {
-        debug!("sub with_cause={:?}", cause);
-        let old_cause = mem::replace(&mut self.fields.cause, Some(cause));
-        let r = f(self);
-        debug!("sub old_cause={:?}", old_cause);
-        self.fields.cause = old_cause;
-        r
-    }
-
-    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
-                                               variance: ty::Variance,
-                                               a: &T,
-                                               b: &T)
-                                               -> RelateResult<'tcx, T>
-    {
-        match variance {
-            ty::Invariant => self.fields.equate().relate(a, b),
-            ty::Covariant => self.relate(a, b),
-            ty::Bivariant => self.fields.bivariate().relate(a, b),
-            ty::Contravariant => self.fields.switch_expected().sub().relate(b, a),
-        }
-    }
-
-    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
-        debug!("{}.tys({:?}, {:?})", self.tag(), a, b);
-
-        if a == b { return Ok(a); }
-
-        let infcx = self.fields.infcx;
-        let a = infcx.type_variables.borrow().replace_if_possible(a);
-        let b = infcx.type_variables.borrow().replace_if_possible(b);
-        match (&a.sty, &b.sty) {
-            (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
-                infcx.type_variables
-                    .borrow_mut()
-                    .relate_vars(a_id, SubtypeOf, b_id);
-                Ok(a)
-            }
-            (&ty::TyInfer(TyVar(a_id)), _) => {
-                try!(self.fields
-                         .switch_expected()
-                         .instantiate(b, SupertypeOf, a_id));
-                Ok(a)
-            }
-            (_, &ty::TyInfer(TyVar(b_id))) => {
-                try!(self.fields.instantiate(a, SubtypeOf, b_id));
-                Ok(a)
-            }
-
-            (&ty::TyError, _) | (_, &ty::TyError) => {
-                Ok(self.tcx().types.err)
-            }
-
-            _ => {
-                try!(combine::super_combine_tys(self.fields.infcx, self, a, b));
-                Ok(a)
-            }
-        }
-    }
-
-    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
-        debug!("{}.regions({:?}, {:?}) self.cause={:?}",
-               self.tag(), a, b, self.fields.cause);
-        // FIXME -- we have more fine-grained information available
-        // from the "cause" field, we could perhaps give more tailored
-        // error messages.
-        let origin = SubregionOrigin::Subtype(self.fields.trace.clone());
-        self.fields.infcx.region_vars.make_subregion(origin, a, b);
-        Ok(a)
-    }
-
-    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-                  -> RelateResult<'tcx, ty::Binder<T>>
-        where T: Relate<'a,'tcx>
-    {
-        self.fields.higher_ranked_sub(a, b)
-    }
-}
diff --git a/src/librustc/middle/infer/type_variable.rs b/src/librustc/middle/infer/type_variable.rs
deleted file mode 100644 (file)
index e4af098..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub use self::RelationDir::*;
-use self::TypeVariableValue::*;
-use self::UndoEntry::*;
-use middle::def_id::{DefId};
-use middle::ty::{self, Ty};
-use syntax::codemap::Span;
-
-use std::cmp::min;
-use std::marker::PhantomData;
-use std::mem;
-use std::u32;
-use rustc_data_structures::snapshot_vec as sv;
-
-pub struct TypeVariableTable<'tcx> {
-    values: sv::SnapshotVec<Delegate<'tcx>>,
-}
-
-struct TypeVariableData<'tcx> {
-    value: TypeVariableValue<'tcx>,
-    diverging: bool
-}
-
-enum TypeVariableValue<'tcx> {
-    Known(Ty<'tcx>),
-    Bounded {
-        relations: Vec<Relation>,
-        default: Option<Default<'tcx>>
-    }
-}
-
-// We will use this to store the required information to recapitulate what happened when
-// an error occurs.
-#[derive(Clone, Debug, PartialEq, Eq, Hash)]
-pub struct Default<'tcx> {
-    pub ty: Ty<'tcx>,
-    /// The span where the default was incurred
-    pub origin_span: Span,
-    /// The definition that the default originates from
-    pub def_id: DefId
-}
-
-pub struct Snapshot {
-    snapshot: sv::Snapshot
-}
-
-enum UndoEntry<'tcx> {
-    // The type of the var was specified.
-    SpecifyVar(ty::TyVid, Vec<Relation>, Option<Default<'tcx>>),
-    Relate(ty::TyVid, ty::TyVid),
-}
-
-struct Delegate<'tcx>(PhantomData<&'tcx ()>);
-
-type Relation = (RelationDir, ty::TyVid);
-
-#[derive(Copy, Clone, PartialEq, Debug)]
-pub enum RelationDir {
-    SubtypeOf, SupertypeOf, EqTo, BiTo
-}
-
-impl RelationDir {
-    fn opposite(self) -> RelationDir {
-        match self {
-            SubtypeOf => SupertypeOf,
-            SupertypeOf => SubtypeOf,
-            EqTo => EqTo,
-            BiTo => BiTo,
-        }
-    }
-}
-
-impl<'tcx> TypeVariableTable<'tcx> {
-    pub fn new() -> TypeVariableTable<'tcx> {
-        TypeVariableTable { values: sv::SnapshotVec::new() }
-    }
-
-    fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
-        relations(self.values.get_mut(a.index as usize))
-    }
-
-    pub fn default(&self, vid: ty::TyVid) -> Option<Default<'tcx>> {
-        match &self.values.get(vid.index as usize).value {
-            &Known(_) => None,
-            &Bounded { ref default, .. } => default.clone()
-        }
-    }
-
-    pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
-        self.values.get(vid.index as usize).diverging
-    }
-
-    /// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
-    ///
-    /// Precondition: neither `a` nor `b` are known.
-    pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
-        if a != b {
-            self.relations(a).push((dir, b));
-            self.relations(b).push((dir.opposite(), a));
-            self.values.record(Relate(a, b));
-        }
-    }
-
-    /// Instantiates `vid` with the type `ty` and then pushes an entry onto `stack` for each of the
-    /// relations of `vid` to other variables. The relations will have the form `(ty, dir, vid1)`
-    /// where `vid1` is some other variable id.
-    pub fn instantiate_and_push(
-        &mut self,
-        vid: ty::TyVid,
-        ty: Ty<'tcx>,
-        stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
-    {
-        let old_value = {
-            let value_ptr = &mut self.values.get_mut(vid.index as usize).value;
-            mem::replace(value_ptr, Known(ty))
-        };
-
-        let (relations, default) = match old_value {
-            Bounded { relations, default } => (relations, default),
-            Known(_) => panic!("Asked to instantiate variable that is \
-                               already instantiated")
-        };
-
-        for &(dir, vid) in &relations {
-            stack.push((ty, dir, vid));
-        }
-
-        self.values.record(SpecifyVar(vid, relations, default));
-    }
-
-    pub fn new_var(&mut self,
-                   diverging: bool,
-                   default: Option<Default<'tcx>>) -> ty::TyVid {
-        let index = self.values.push(TypeVariableData {
-            value: Bounded { relations: vec![], default: default },
-            diverging: diverging
-        });
-        ty::TyVid { index: index as u32 }
-    }
-
-    pub fn probe(&self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
-        match self.values.get(vid.index as usize).value {
-            Bounded { .. } => None,
-            Known(t) => Some(t)
-        }
-    }
-
-    pub fn replace_if_possible(&self, t: Ty<'tcx>) -> Ty<'tcx> {
-        match t.sty {
-            ty::TyInfer(ty::TyVar(v)) => {
-                match self.probe(v) {
-                    None => t,
-                    Some(u) => u
-                }
-            }
-            _ => t,
-        }
-    }
-
-    pub fn snapshot(&mut self) -> Snapshot {
-        Snapshot { snapshot: self.values.start_snapshot() }
-    }
-
-    pub fn rollback_to(&mut self, s: Snapshot) {
-        self.values.rollback_to(s.snapshot);
-    }
-
-    pub fn commit(&mut self, s: Snapshot) {
-        self.values.commit(s.snapshot);
-    }
-
-    pub fn types_escaping_snapshot(&self, s: &Snapshot) -> Vec<Ty<'tcx>> {
-        /*!
-         * Find the set of type variables that existed *before* `s`
-         * but which have only been unified since `s` started, and
-         * return the types with which they were unified. So if we had
-         * a type variable `V0`, then we started the snapshot, then we
-         * created a type variable `V1`, unifed `V0` with `T0`, and
-         * unified `V1` with `T1`, this function would return `{T0}`.
-         */
-
-        let mut new_elem_threshold = u32::MAX;
-        let mut escaping_types = Vec::new();
-        let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot);
-        debug!("actions_since_snapshot.len() = {}", actions_since_snapshot.len());
-        for action in actions_since_snapshot {
-            match *action {
-                sv::UndoLog::NewElem(index) => {
-                    // if any new variables were created during the
-                    // snapshot, remember the lower index (which will
-                    // always be the first one we see). Note that this
-                    // action must precede those variables being
-                    // specified.
-                    new_elem_threshold = min(new_elem_threshold, index as u32);
-                    debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold);
-                }
-
-                sv::UndoLog::Other(SpecifyVar(vid, _, _)) => {
-                    if vid.index < new_elem_threshold {
-                        // quick check to see if this variable was
-                        // created since the snapshot started or not.
-                        let escaping_type = self.probe(vid).unwrap();
-                        escaping_types.push(escaping_type);
-                    }
-                    debug!("SpecifyVar({:?}) new_elem_threshold={}", vid, new_elem_threshold);
-                }
-
-                _ => { }
-            }
-        }
-
-        escaping_types
-    }
-
-    pub fn unsolved_variables(&self) -> Vec<ty::TyVid> {
-        self.values
-            .iter()
-            .enumerate()
-            .filter_map(|(i, value)| match &value.value {
-                &TypeVariableValue::Known(_) => None,
-                &TypeVariableValue::Bounded { .. } => Some(ty::TyVid { index: i as u32 })
-            })
-            .collect()
-    }
-}
-
-impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> {
-    type Value = TypeVariableData<'tcx>;
-    type Undo = UndoEntry<'tcx>;
-
-    fn reverse(values: &mut Vec<TypeVariableData<'tcx>>, action: UndoEntry<'tcx>) {
-        match action {
-            SpecifyVar(vid, relations, default) => {
-                values[vid.index as usize].value = Bounded {
-                    relations: relations,
-                    default: default
-                };
-            }
-
-            Relate(a, b) => {
-                relations(&mut (*values)[a.index as usize]).pop();
-                relations(&mut (*values)[b.index as usize]).pop();
-            }
-        }
-    }
-}
-
-fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec<Relation> {
-    match v.value {
-        Known(_) => panic!("var_sub_var: variable is known"),
-        Bounded { ref mut relations, .. } => relations
-    }
-}
diff --git a/src/librustc/middle/infer/unify_key.rs b/src/librustc/middle/infer/unify_key.rs
deleted file mode 100644 (file)
index c832319..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use syntax::ast;
-use middle::ty::{self, IntVarValue, Ty};
-use rustc_data_structures::unify::{Combine, UnifyKey};
-
-pub trait ToType<'tcx> {
-    fn to_type(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx>;
-}
-
-impl UnifyKey for ty::IntVid {
-    type Value = Option<IntVarValue>;
-    fn index(&self) -> u32 { self.index }
-    fn from_index(i: u32) -> ty::IntVid { ty::IntVid { index: i } }
-    fn tag(_: Option<ty::IntVid>) -> &'static str { "IntVid" }
-}
-
-#[derive(PartialEq, Copy, Clone, Debug)]
-pub struct RegionVidKey {
-    /// The minimum region vid in the unification set. This is needed
-    /// to have a canonical name for a type to prevent infinite
-    /// recursion.
-    pub min_vid: ty::RegionVid
-}
-
-impl Combine for RegionVidKey {
-    fn combine(&self, other: &RegionVidKey) -> RegionVidKey {
-        let min_vid = if self.min_vid.index < other.min_vid.index {
-            self.min_vid
-        } else {
-            other.min_vid
-        };
-
-        RegionVidKey { min_vid: min_vid }
-    }
-}
-
-impl UnifyKey for ty::RegionVid {
-    type Value = RegionVidKey;
-    fn index(&self) -> u32 { self.index }
-    fn from_index(i: u32) -> ty::RegionVid { ty::RegionVid { index: i } }
-    fn tag(_: Option<ty::RegionVid>) -> &'static str { "RegionVid" }
-}
-
-impl<'tcx> ToType<'tcx> for IntVarValue {
-    fn to_type(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
-        match *self {
-            ty::IntType(i) => tcx.mk_mach_int(i),
-            ty::UintType(i) => tcx.mk_mach_uint(i),
-        }
-    }
-}
-
-// Floating point type keys
-
-impl UnifyKey for ty::FloatVid {
-    type Value = Option<ast::FloatTy>;
-    fn index(&self) -> u32 { self.index }
-    fn from_index(i: u32) -> ty::FloatVid { ty::FloatVid { index: i } }
-    fn tag(_: Option<ty::FloatVid>) -> &'static str { "FloatVid" }
-}
-
-impl<'tcx> ToType<'tcx> for ast::FloatTy {
-    fn to_type(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
-        tcx.mk_mach_float(*self)
-    }
-}
index a763677db066109b203bc4a0b7a4224d1d564139..e84be7e45606b0c77f0f7a55a2d4be4e713e619d 100644 (file)
 // except according to those terms.
 
 use dep_graph::DepNode;
-use middle::def::Def;
-use middle::def_id::DefId;
-use middle::subst::{Subst, Substs, EnumeratedItems};
-use middle::ty::{TransmuteRestriction, ctxt, TyBareFn};
-use middle::ty::{self, Ty, TypeFoldable};
-
-use std::fmt;
+use hir::def::Def;
+use hir::def_id::DefId;
+use infer::{InferCtxt, new_infer_ctxt};
+use traits::ProjectionMode;
+use ty::{self, Ty, TyCtxt};
+use ty::layout::{LayoutError, Pointer, SizeSkeleton};
 
 use syntax::abi::Abi::RustIntrinsic;
 use syntax::ast;
 use syntax::codemap::Span;
-use rustc_front::intravisit::{self, Visitor, FnKind};
-use rustc_front::hir;
+use hir::intravisit::{self, Visitor, FnKind};
+use hir;
 
-pub fn check_crate(tcx: &ctxt) {
-    let mut visitor = IntrinsicCheckingVisitor {
-        tcx: tcx,
-        param_envs: Vec::new(),
-        dummy_sized_ty: tcx.types.isize,
-        dummy_unsized_ty: tcx.mk_slice(tcx.types.isize),
+pub fn check_crate(tcx: &TyCtxt) {
+    let mut visitor = ItemVisitor {
+        tcx: tcx
     };
     tcx.visit_all_items_in_krate(DepNode::IntrinsicCheck, &mut visitor);
 }
 
-struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ctxt<'tcx>,
+struct ItemVisitor<'a, 'tcx: 'a> {
+    tcx: &'a TyCtxt<'tcx>
+}
 
-    // As we traverse the AST, we keep a stack of the parameter
-    // environments for each function we encounter. When we find a
-    // call to `transmute`, we can check it in the context of the top
-    // of the stack (which ought not to be empty).
-    param_envs: Vec<ty::ParameterEnvironment<'a,'tcx>>,
+impl<'a, 'tcx> ItemVisitor<'a, 'tcx> {
+    fn visit_const(&mut self, item_id: ast::NodeId, expr: &hir::Expr) {
+        let param_env = ty::ParameterEnvironment::for_item(self.tcx, item_id);
+        let infcx = new_infer_ctxt(self.tcx, &self.tcx.tables,
+                                   Some(param_env),
+                                   ProjectionMode::Any);
+        let mut visitor = ExprVisitor {
+            infcx: &infcx
+        };
+        visitor.visit_expr(expr);
+    }
+}
 
-    // Dummy sized/unsized types that use to substitute for type
-    // parameters in order to estimate how big a type will be for any
-    // possible instantiation of the type parameters in scope.  See
-    // `check_transmute` for more details.
-    dummy_sized_ty: Ty<'tcx>,
-    dummy_unsized_ty: Ty<'tcx>,
+struct ExprVisitor<'a, 'tcx: 'a> {
+    infcx: &'a InferCtxt<'a, 'tcx>
 }
 
-impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> {
+impl<'a, 'tcx> ExprVisitor<'a, 'tcx> {
     fn def_id_is_transmute(&self, def_id: DefId) -> bool {
-        let intrinsic = match self.tcx.lookup_item_type(def_id).ty.sty {
-            ty::TyBareFn(_, ref bfty) => bfty.abi == RustIntrinsic,
+        let intrinsic = match self.infcx.tcx.lookup_item_type(def_id).ty.sty {
+            ty::TyFnDef(_, _, ref bfty) => bfty.abi == RustIntrinsic,
             _ => return false
         };
-        intrinsic && self.tcx.item_name(def_id).as_str() == "transmute"
+        intrinsic && self.infcx.tcx.item_name(def_id).as_str() == "transmute"
     }
 
     fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>, id: ast::NodeId) {
-        // Find the parameter environment for the most recent function that
-        // we entered.
+        let sk_from = SizeSkeleton::compute(from, self.infcx);
+        let sk_to = SizeSkeleton::compute(to, self.infcx);
 
-        let param_env = match self.param_envs.last() {
-            Some(p) => p,
-            None => {
-                self.tcx.sess.span_bug(
-                    span,
-                    "transmute encountered outside of any fn");
+        // Check for same size using the skeletons.
+        if let (Ok(sk_from), Ok(sk_to)) = (sk_from, sk_to) {
+            if sk_from.same_size(sk_to) {
+                return;
             }
-        };
-
-        // Simple case: no type parameters involved.
-        if
-            !from.has_param_types() && !from.has_self_ty() &&
-            !to.has_param_types() && !to.has_self_ty()
-        {
-            let restriction = TransmuteRestriction {
-                span: span,
-                original_from: from,
-                original_to: to,
-                substituted_from: from,
-                substituted_to: to,
-                id: id,
-            };
-            self.push_transmute_restriction(restriction);
-            return;
-        }
 
-        // The rules around type parameters are a bit subtle. We are
-        // checking these rules before monomorphization, so there may
-        // be unsubstituted type parameters present in the
-        // types. Obviously we cannot create LLVM types for those.
-        // However, if a type parameter appears only indirectly (i.e.,
-        // through a pointer), it does not necessarily affect the
-        // size, so that should be allowed. The only catch is that we
-        // DO want to be careful around unsized type parameters, since
-        // fat pointers have a different size than a thin pointer, and
-        // hence `&T` and `&U` have different sizes if `T : Sized` but
-        // `U : Sized` does not hold.
-        //
-        // However, it's not as simple as checking whether `T :
-        // Sized`, because even if `T : Sized` does not hold, that
-        // just means that `T` *may* not be sized.  After all, even a
-        // type parameter `T: ?Sized` could be bound to a sized
-        // type. (Issue #20116)
-        //
-        // To handle this, we first check for "interior" type
-        // parameters, which are always illegal. If there are none of
-        // those, then we know that the only way that all type
-        // parameters `T` are referenced indirectly, e.g. via a
-        // pointer type like `&T`. In that case, we only care whether
-        // `T` is sized or not, because that influences whether `&T`
-        // is a thin or fat pointer.
-        //
-        // One could imagine establishing a sophisticated constraint
-        // system to ensure that the transmute is legal, but instead
-        // we do something brutally dumb. We just substitute dummy
-        // sized or unsized types for every type parameter in scope,
-        // exhaustively checking all possible combinations. Here are some examples:
-        //
-        // ```
-        // fn foo<T, U>() {
-        //     // T=int, U=int
-        // }
-        //
-        // fn bar<T: ?Sized, U>() {
-        //     // T=int, U=int
-        //     // T=[int], U=int
-        // }
-        //
-        // fn baz<T: ?Sized, U: ?Sized>() {
-        //     // T=int, U=int
-        //     // T=[int], U=int
-        //     // T=int, U=[int]
-        //     // T=[int], U=[int]
-        // }
-        // ```
-        //
-        // In all cases, we keep the original unsubstituted types
-        // around for error reporting.
-
-        let from_tc = from.type_contents(self.tcx);
-        let to_tc = to.type_contents(self.tcx);
-        if from_tc.interior_param() || to_tc.interior_param() {
-            span_err!(self.tcx.sess, span, E0139,
-                      "cannot transmute to or from a type that contains \
-                       unsubstituted type parameters");
-            return;
+            match (&from.sty, sk_to) {
+                (&ty::TyFnDef(..), SizeSkeleton::Known(size_to))
+                        if size_to == Pointer.size(&self.infcx.tcx.data_layout) => {
+                    // FIXME #19925 Remove this warning after a release cycle.
+                    let msg = format!("`{}` is now zero-sized and has to be cast \
+                                       to a pointer before transmuting to `{}`",
+                                      from, to);
+                    self.infcx.tcx.sess.add_lint(
+                        ::lint::builtin::TRANSMUTE_FROM_FN_ITEM_TYPES, id, span, msg);
+                    return;
+                }
+                _ => {}
+            }
         }
 
-        let mut substs = param_env.free_substs.clone();
-        self.with_each_combination(
-            span,
-            param_env,
-            param_env.free_substs.types.iter_enumerated(),
-            &mut substs,
-            &mut |substs| {
-                let restriction = TransmuteRestriction {
-                    span: span,
-                    original_from: from,
-                    original_to: to,
-                    substituted_from: from.subst(self.tcx, substs),
-                    substituted_to: to.subst(self.tcx, substs),
-                    id: id,
-                };
-                self.push_transmute_restriction(restriction);
-            });
-    }
-
-    fn with_each_combination(&self,
-                             span: Span,
-                             param_env: &ty::ParameterEnvironment<'a,'tcx>,
-                             mut types_in_scope: EnumeratedItems<Ty<'tcx>>,
-                             substs: &mut Substs<'tcx>,
-                             callback: &mut FnMut(&Substs<'tcx>))
-    {
-        // This parameter invokes `callback` many times with different
-        // substitutions that replace all the parameters in scope with
-        // either `int` or `[int]`, depending on whether the type
-        // parameter is known to be sized. See big comment above for
-        // an explanation of why this is a reasonable thing to do.
-
-        match types_in_scope.next() {
-            None => {
-                debug!("with_each_combination(substs={:?})",
-                       substs);
-
-                callback(substs);
+        // Try to display a sensible error with as much information as possible.
+        let skeleton_string = |ty: Ty<'tcx>, sk| {
+            match sk {
+                Ok(SizeSkeleton::Known(size)) => {
+                    format!("{} bits", size.bits())
+                }
+                Ok(SizeSkeleton::Pointer { tail, .. }) => {
+                    format!("pointer to {}", tail)
+                }
+                Err(LayoutError::Unknown(bad)) => {
+                    if bad == ty {
+                        format!("size can vary")
+                    } else {
+                        format!("size can vary because of {}", bad)
+                    }
+                }
+                Err(err) => err.to_string()
             }
+        };
 
-            Some((space, index, &param_ty)) => {
-                debug!("with_each_combination: space={:?}, index={}, param_ty={:?}",
-                       space, index, param_ty);
-
-                if !param_ty.is_sized(param_env, span) {
-                    debug!("with_each_combination: param_ty is not known to be sized");
+        span_err!(self.infcx.tcx.sess, span, E0512,
+                  "transmute called with differently sized types: \
+                   {} ({}) to {} ({})",
+                  from, skeleton_string(from, sk_from),
+                  to, skeleton_string(to, sk_to));
+    }
+}
 
-                    substs.types.get_mut_slice(space)[index] = self.dummy_unsized_ty;
-                    self.with_each_combination(span, param_env, types_in_scope.clone(),
-                                               substs, callback);
-                }
+impl<'a, 'tcx, 'v> Visitor<'v> for ItemVisitor<'a, 'tcx> {
+    // const, static and N in [T; N].
+    fn visit_expr(&mut self, expr: &hir::Expr) {
+        let infcx = new_infer_ctxt(self.tcx, &self.tcx.tables,
+                                   None, ProjectionMode::Any);
+        let mut visitor = ExprVisitor {
+            infcx: &infcx
+        };
+        visitor.visit_expr(expr);
+    }
 
-                substs.types.get_mut_slice(space)[index] = self.dummy_sized_ty;
-                self.with_each_combination(span, param_env, types_in_scope,
-                                           substs, callback);
-            }
+    fn visit_trait_item(&mut self, item: &hir::TraitItem) {
+        if let hir::ConstTraitItem(_, Some(ref expr)) = item.node {
+            self.visit_const(item.id, expr);
+        } else {
+            intravisit::walk_trait_item(self, item);
         }
     }
 
-    fn push_transmute_restriction(&self, restriction: TransmuteRestriction<'tcx>) {
-        debug!("Pushing transmute restriction: {:?}", restriction);
-        self.tcx.transmute_restrictions.borrow_mut().push(restriction);
+    fn visit_impl_item(&mut self, item: &hir::ImplItem) {
+        if let hir::ImplItemKind::Const(_, ref expr) = item.node {
+            self.visit_const(item.id, expr);
+        } else {
+            intravisit::walk_impl_item(self, item);
+        }
     }
-}
 
-impl<'a, 'tcx, 'v> Visitor<'v> for IntrinsicCheckingVisitor<'a, 'tcx> {
     fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl,
                 b: &'v hir::Block, s: Span, id: ast::NodeId) {
         match fk {
             FnKind::ItemFn(..) | FnKind::Method(..) => {
                 let param_env = ty::ParameterEnvironment::for_item(self.tcx, id);
-                self.param_envs.push(param_env);
-                intravisit::walk_fn(self, fk, fd, b, s);
-                self.param_envs.pop();
+                let infcx = new_infer_ctxt(self.tcx, &self.tcx.tables,
+                                           Some(param_env),
+                                           ProjectionMode::Any);
+                let mut visitor = ExprVisitor {
+                    infcx: &infcx
+                };
+                visitor.visit_fn(fk, fd, b, s, id);
             }
-            FnKind::Closure => {
-                intravisit::walk_fn(self, fk, fd, b, s);
+            FnKind::Closure(..) => {
+                span_bug!(s, "intrinsicck: closure outside of function")
             }
         }
     }
+}
 
+impl<'a, 'tcx, 'v> Visitor<'v> for ExprVisitor<'a, 'tcx> {
     fn visit_expr(&mut self, expr: &hir::Expr) {
         if let hir::ExprPath(..) = expr.node {
-            match self.tcx.resolve_expr(expr) {
+            match self.infcx.tcx.resolve_expr(expr) {
                 Def::Fn(did) if self.def_id_is_transmute(did) => {
-                    let typ = self.tcx.node_id_to_type(expr.id);
+                    let typ = self.infcx.tcx.node_id_to_type(expr.id);
                     match typ.sty {
-                        TyBareFn(_, ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => {
+                        ty::TyFnDef(_, _, ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => {
                             if let ty::FnConverging(to) = bare_fn_ty.sig.0.output {
                                 let from = bare_fn_ty.sig.0.inputs[0];
                                 self.check_transmute(expr.span, from, to, expr.id);
                             }
                         }
                         _ => {
-                            self.tcx
-                                .sess
-                                .span_bug(expr.span, "transmute wasn't a bare fn?!");
+                            span_bug!(expr.span, "transmute wasn't a bare fn?!");
                         }
                     }
                 }
@@ -258,14 +184,3 @@ impl<'a, 'tcx, 'v> Visitor<'v> for IntrinsicCheckingVisitor<'a, 'tcx> {
         intravisit::walk_expr(self, expr);
     }
 }
-
-impl<'tcx> fmt::Debug for TransmuteRestriction<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "TransmuteRestriction(id={}, original=({:?},{:?}), substituted=({:?},{:?}))",
-               self.id,
-               self.original_from,
-               self.original_to,
-               self.substituted_from,
-               self.substituted_to)
-    }
-}
index 6cbb90627eab5ee12dc6e153d24d292f51dffcab..86531ced8dfae41e1f174074ef3682c9e97d7bc2 100644 (file)
 pub use self::LangItem::*;
 
 use dep_graph::DepNode;
-use front::map as hir_map;
+use hir::map as hir_map;
 use session::Session;
 use middle::cstore::CrateStore;
-use middle::def_id::DefId;
-use middle::ty;
+use hir::def_id::DefId;
+use ty;
 use middle::weak_lang_items;
 use util::nodemap::FnvHashMap;
 
 use syntax::ast;
 use syntax::attr::AttrMetaMethods;
-use syntax::codemap::{DUMMY_SP, Span};
 use syntax::parse::token::InternedString;
-use rustc_front::intravisit::Visitor;
-use rustc_front::hir;
+use hir::intravisit::Visitor;
+use hir;
 
 // The actual lang items defined come at the end of this file in one handy table.
 // So you probably just want to nip down to the end.
@@ -120,9 +119,9 @@ impl LanguageItems {
 
     pub fn fn_trait_kind(&self, id: DefId) -> Option<ty::ClosureKind> {
         let def_id_kinds = [
-            (self.fn_trait(), ty::FnClosureKind),
-            (self.fn_mut_trait(), ty::FnMutClosureKind),
-            (self.fn_once_trait(), ty::FnOnceClosureKind),
+            (self.fn_trait(), ty::ClosureKind::Fn),
+            (self.fn_mut_trait(), ty::ClosureKind::FnMut),
+            (self.fn_once_trait(), ty::ClosureKind::FnOnce),
             ];
 
         for &(opt_def_id, kind) in &def_id_kinds {
@@ -158,7 +157,12 @@ impl<'a, 'v, 'tcx> Visitor<'v> for LanguageItemCollector<'a, 'tcx> {
             let item_index = self.item_refs.get(&value[..]).cloned();
 
             if let Some(item_index) = item_index {
-                self.collect_item(item_index, self.ast_map.local_def_id(item.id), item.span)
+                self.collect_item(item_index, self.ast_map.local_def_id(item.id))
+            } else {
+                let span = self.ast_map.span(item.id);
+                span_err!(self.session, span, E0522,
+                          "definition of an unknown language item: `{}`.",
+                          &value[..]);
             }
         }
     }
@@ -180,15 +184,26 @@ impl<'a, 'tcx> LanguageItemCollector<'a, 'tcx> {
     }
 
     pub fn collect_item(&mut self, item_index: usize,
-                        item_def_id: DefId, span: Span) {
+                        item_def_id: DefId) {
         // Check for duplicates.
         match self.items.items[item_index] {
             Some(original_def_id) if original_def_id != item_def_id => {
                 let cstore = &self.session.cstore;
-                span_err!(self.session, span, E0152,
-                          "duplicate entry for `{}`, first definition found in `{}`",
-                          LanguageItems::item_name(item_index),
-                          cstore.crate_name(item_def_id.krate));
+                let span = self.ast_map.span_if_local(item_def_id)
+                                       .expect("we should have found local duplicate earlier");
+                let mut err = struct_span_err!(self.session,
+                                               span,
+                                               E0152,
+                                               "duplicate lang item found: `{}`.",
+                                               LanguageItems::item_name(item_index));
+                if let Some(span) = self.ast_map.span_if_local(original_def_id) {
+                    span_note!(&mut err, span,
+                               "first defined here.");
+                } else {
+                    err.note(&format!("first defined in crate `{}`.",
+                                      cstore.crate_name(original_def_id.krate)));
+                }
+                err.emit();
             }
             _ => {
                 // OK.
@@ -205,17 +220,18 @@ impl<'a, 'tcx> LanguageItemCollector<'a, 'tcx> {
 
     pub fn collect_external_language_items(&mut self) {
         let cstore = &self.session.cstore;
+
         for cnum in cstore.crates() {
             for (index, item_index) in cstore.lang_items(cnum) {
                 let def_id = DefId { krate: cnum, index: index };
-                self.collect_item(item_index, def_id, DUMMY_SP);
+                self.collect_item(item_index, def_id);
             }
         }
     }
 
     pub fn collect(&mut self, krate: &hir::Crate) {
-        self.collect_local_language_items(krate);
         self.collect_external_language_items();
+        self.collect_local_language_items(krate);
     }
 }
 
@@ -302,10 +318,6 @@ lets_do_this! {
     ShrAssignTraitLangItem,          "shr_assign",              shr_assign_trait;
     IndexTraitLangItem,              "index",                   index_trait;
     IndexMutTraitLangItem,           "index_mut",               index_mut_trait;
-    RangeStructLangItem,             "range",                   range_struct;
-    RangeFromStructLangItem,         "range_from",              range_from_struct;
-    RangeToStructLangItem,           "range_to",                range_to_struct;
-    RangeFullStructLangItem,         "range_full",              range_full_struct;
 
     UnsafeCellTypeLangItem,          "unsafe_cell",             unsafe_cell_type;
 
index a487ddbc2b1c258aa5a3489e158f256a774c1ca2..35991ae56c8a1477097bd30f065c7aa99acf23d9 100644 (file)
@@ -110,9 +110,12 @@ use self::LiveNodeKind::*;
 use self::VarKind::*;
 
 use dep_graph::DepNode;
-use middle::def::*;
-use middle::pat_util;
-use middle::ty;
+use hir::def::*;
+use hir::pat_util;
+use ty::{self, TyCtxt, ParameterEnvironment};
+use traits::{self, ProjectionMode};
+use infer;
+use ty::subst::Subst;
 use lint;
 use util::nodemap::NodeMap;
 
@@ -125,10 +128,10 @@ use syntax::codemap::{BytePos, original_sp, Span};
 use syntax::parse::token::special_idents;
 use syntax::ptr::P;
 
-use rustc_front::hir::Expr;
-use rustc_front::hir;
-use rustc_front::print::pprust::{expr_to_string, block_to_string};
-use rustc_front::intravisit::{self, Visitor, FnKind};
+use hir::Expr;
+use hir;
+use hir::print::{expr_to_string, block_to_string};
+use hir::intravisit::{self, Visitor, FnKind};
 
 /// For use with `propagate_through_loop`.
 enum LoopKind<'a> {
@@ -166,7 +169,7 @@ enum LiveNodeKind {
     ExitNode
 }
 
-fn live_node_kind_to_string(lnk: LiveNodeKind, cx: &ty::ctxt) -> String {
+fn live_node_kind_to_string(lnk: LiveNodeKind, cx: &TyCtxt) -> String {
     let cm = cx.sess.codemap();
     match lnk {
         FreeVarNode(s) => {
@@ -192,7 +195,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for IrMaps<'a, 'tcx> {
     fn visit_arm(&mut self, a: &hir::Arm) { visit_arm(self, a); }
 }
 
-pub fn check_crate(tcx: &ty::ctxt) {
+pub fn check_crate(tcx: &TyCtxt) {
     let _task = tcx.dep_graph.in_task(DepNode::Liveness);
     tcx.map.krate().visit_all_items(&mut IrMaps::new(tcx));
     tcx.sess.abort_if_errors();
@@ -260,7 +263,7 @@ enum VarKind {
 }
 
 struct IrMaps<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
 
     num_live_nodes: usize,
     num_vars: usize,
@@ -272,7 +275,7 @@ struct IrMaps<'a, 'tcx: 'a> {
 }
 
 impl<'a, 'tcx> IrMaps<'a, 'tcx> {
-    fn new(tcx: &'a ty::ctxt<'tcx>) -> IrMaps<'a, 'tcx> {
+    fn new(tcx: &'a TyCtxt<'tcx>) -> IrMaps<'a, 'tcx> {
         IrMaps {
             tcx: tcx,
             num_live_nodes: 0,
@@ -322,13 +325,10 @@ impl<'a, 'tcx> IrMaps<'a, 'tcx> {
 
     fn variable(&self, node_id: NodeId, span: Span) -> Variable {
         match self.variable_map.get(&node_id) {
-          Some(&var) => var,
-          None => {
-            self.tcx
-                .sess
-                .span_bug(span, &format!("no variable registered for id {}",
-                                        node_id));
-          }
+            Some(&var) => var,
+            None => {
+                span_bug!(span, "no variable registered for id {}", node_id);
+            }
         }
     }
 
@@ -484,7 +484,7 @@ fn visit_expr(ir: &mut IrMaps, expr: &Expr) {
         ir.add_live_node_for_node(expr.id, ExprNode(expr.span));
         intravisit::walk_expr(ir, expr);
       }
-      hir::ExprBinary(op, _, _) if ::rustc_front::util::lazy_binop(op.node) => {
+      hir::ExprBinary(op, _, _) if op.node.is_lazy() => {
         ir.add_live_node_for_node(expr.id, ExprNode(expr.span));
         intravisit::walk_expr(ir, expr);
       }
@@ -498,7 +498,7 @@ fn visit_expr(ir: &mut IrMaps, expr: &Expr) {
       hir::ExprBlock(..) | hir::ExprAssign(..) | hir::ExprAssignOp(..) |
       hir::ExprStruct(..) | hir::ExprRepeat(..) |
       hir::ExprInlineAsm(..) | hir::ExprBox(..) |
-      hir::ExprRange(..) | hir::ExprType(..) => {
+      hir::ExprType(..) => {
           intravisit::walk_expr(ir, expr);
       }
     }
@@ -575,10 +575,10 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
             // above and the propagation code below; the two sets of
             // code have to agree about which AST nodes are worth
             // creating liveness nodes for.
-            self.ir.tcx.sess.span_bug(
+            span_bug!(
                 span,
-                &format!("no live node registered for node {}",
-                        node_id));
+                "no live node registered for node {}",
+                node_id);
           }
         }
     }
@@ -683,7 +683,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
         for var_idx in 0..self.ir.num_vars {
             let idx = node_base_idx + var_idx;
             if test(idx).is_valid() {
-                try!(write!(wr, " {:?}", Variable(var_idx)));
+                write!(wr, " {:?}", Variable(var_idx))?;
             }
         }
         Ok(())
@@ -700,15 +700,15 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
                 // to find with one
                 match self.ir.tcx.def_map.borrow().get(&id).map(|d| d.full_def()) {
                     Some(Def::Label(loop_id)) => loop_id,
-                    _ => self.ir.tcx.sess.span_bug(sp, "label on break/loop \
-                                                        doesn't refer to a loop")
+                    _ => span_bug!(sp, "label on break/loop \
+                                        doesn't refer to a loop")
                 }
             }
             None => {
                 // Vanilla 'break' or 'loop', so use the enclosing
                 // loop scope
                 if self.loop_scope.is_empty() {
-                    self.ir.tcx.sess.span_bug(sp, "break outside loop");
+                    span_bug!(sp, "break outside loop");
                 } else {
                     *self.loop_scope.last().unwrap()
                 }
@@ -964,7 +964,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
                  let caps = match this.ir.capture_info_map.get(&expr.id) {
                     Some(caps) => caps.clone(),
                     None => {
-                        this.ir.tcx.sess.span_bug(expr.span, "no registered caps");
+                        span_bug!(expr.span, "no registered caps");
                      }
                  };
                  caps.iter().rev().fold(succ, |succ, cap| {
@@ -1058,8 +1058,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
 
               match self.break_ln.get(&sc) {
                   Some(&b) => b,
-                  None => self.ir.tcx.sess.span_bug(expr.span,
-                                                    "break to unknown label")
+                  None => span_bug!(expr.span, "break to unknown label")
               }
           }
 
@@ -1072,8 +1071,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
 
               match self.cont_ln.get(&sc) {
                   Some(&b) => b,
-                  None => self.ir.tcx.sess.span_bug(expr.span,
-                                                    "loop to unknown label")
+                  None => span_bug!(expr.span, "loop to unknown label")
               }
           }
 
@@ -1086,11 +1084,17 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
           }
 
           hir::ExprAssignOp(_, ref l, ref r) => {
-            // see comment on lvalues in
-            // propagate_through_lvalue_components()
-            let succ = self.write_lvalue(&l, succ, ACC_WRITE|ACC_READ);
-            let succ = self.propagate_through_expr(&r, succ);
-            self.propagate_through_lvalue_components(&l, succ)
+            // an overloaded assign op is like a method call
+            if self.ir.tcx.is_method_call(expr.id) {
+                let succ = self.propagate_through_expr(&l, succ);
+                self.propagate_through_expr(&r, succ)
+            } else {
+                // see comment on lvalues in
+                // propagate_through_lvalue_components()
+                let succ = self.write_lvalue(&l, succ, ACC_WRITE|ACC_READ);
+                let succ = self.propagate_through_expr(&r, succ);
+                self.propagate_through_lvalue_components(&l, succ)
+            }
           }
 
           // Uninteresting cases: just propagate in rev exec order
@@ -1138,7 +1142,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
             self.propagate_through_exprs(&exprs[..], succ)
           }
 
-          hir::ExprBinary(op, ref l, ref r) if ::rustc_front::util::lazy_binop(op.node) => {
+          hir::ExprBinary(op, ref l, ref r) if op.node.is_lazy() => {
             let r_succ = self.propagate_through_expr(&r, succ);
 
             let ln = self.live_node(expr.id, expr.span);
@@ -1154,11 +1158,6 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
             self.propagate_through_expr(&l, r_succ)
           }
 
-          hir::ExprRange(ref e1, ref e2) => {
-            let succ = e2.as_ref().map_or(succ, |e| self.propagate_through_expr(&e, succ));
-            e1.as_ref().map_or(succ, |e| self.propagate_through_expr(&e, succ))
-          }
-
           hir::ExprBox(ref e) |
           hir::ExprAddrOf(_, ref e) |
           hir::ExprCast(ref e, _) |
@@ -1167,25 +1166,21 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
             self.propagate_through_expr(&e, succ)
           }
 
-          hir::ExprInlineAsm(ref ia) => {
-
-            let succ = ia.outputs.iter().rev().fold(succ,
-                |succ, out| {
-                    // see comment on lvalues
-                    // in propagate_through_lvalue_components()
-                    if out.is_indirect {
-                        self.propagate_through_expr(&out.expr, succ)
-                    } else {
-                        let acc = if out.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE };
-                        let succ = self.write_lvalue(&out.expr, succ, acc);
-                        self.propagate_through_lvalue_components(&out.expr, succ)
-                    }
+          hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => {
+            let succ = ia.outputs.iter().zip(outputs).rev().fold(succ, |succ, (o, output)| {
+                // see comment on lvalues
+                // in propagate_through_lvalue_components()
+                if o.is_indirect {
+                    self.propagate_through_expr(output, succ)
+                } else {
+                    let acc = if o.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE };
+                    let succ = self.write_lvalue(output, succ, acc);
+                    self.propagate_through_lvalue_components(output, succ)
                 }
-            );
+            });
+
             // Inputs are executed first. Propagate last because of rev order
-            ia.inputs.iter().rev().fold(succ, |succ, &(_, ref expr)| {
-                self.propagate_through_expr(&expr, succ)
-            })
+            self.propagate_through_exprs(inputs, succ)
           }
 
           hir::ExprLit(..) => {
@@ -1415,22 +1410,24 @@ fn check_expr(this: &mut Liveness, expr: &Expr) {
       }
 
       hir::ExprAssignOp(_, ref l, _) => {
-        this.check_lvalue(&l);
+        if !this.ir.tcx.is_method_call(expr.id) {
+            this.check_lvalue(&l);
+        }
 
         intravisit::walk_expr(this, expr);
       }
 
-      hir::ExprInlineAsm(ref ia) => {
-        for &(_, ref input) in &ia.inputs {
-          this.visit_expr(&input);
+      hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => {
+        for input in inputs {
+          this.visit_expr(input);
         }
 
         // Output operands must be lvalues
-        for out in &ia.outputs {
-          if !out.is_indirect {
-            this.check_lvalue(&out.expr);
+        for (o, output) in ia.outputs.iter().zip(outputs) {
+          if !o.is_indirect {
+            this.check_lvalue(output);
           }
-          this.visit_expr(&out.expr);
+          this.visit_expr(output);
         }
 
         intravisit::walk_expr(this, expr);
@@ -1446,7 +1443,7 @@ fn check_expr(this: &mut Liveness, expr: &Expr) {
       hir::ExprBlock(..) | hir::ExprAddrOf(..) |
       hir::ExprStruct(..) | hir::ExprRepeat(..) |
       hir::ExprClosure(..) | hir::ExprPath(..) | hir::ExprBox(..) |
-      hir::ExprRange(..) | hir::ExprType(..) => {
+      hir::ExprType(..) => {
         intravisit::walk_expr(this, expr);
       }
     }
@@ -1487,14 +1484,25 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
 
         match fn_ret {
             ty::FnConverging(t_ret)
-                if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() => {
-
-                if t_ret.is_nil() {
+                    if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() => {
+
+                let param_env = ParameterEnvironment::for_item(&self.ir.tcx, id);
+                let t_ret_subst = t_ret.subst(&self.ir.tcx, &param_env.free_substs);
+                let infcx = infer::new_infer_ctxt(&self.ir.tcx,
+                                                  &self.ir.tcx.tables,
+                                                  Some(param_env),
+                                                  ProjectionMode::Any);
+                let cause = traits::ObligationCause::dummy();
+                let norm = traits::fully_normalize(&infcx,
+                                                   cause,
+                                                   &t_ret_subst);
+
+                if norm.unwrap().is_nil() {
                     // for nil return types, it is ok to not return a value expl.
                 } else {
                     let ends_with_stmt = match body.expr {
                         None if !body.stmts.is_empty() =>
-                            match body.stmts.first().unwrap().node {
+                            match body.stmts.last().unwrap().node {
                                 hir::StmtSemi(ref e, _) => {
                                     self.ir.tcx.expr_ty(&e) == t_ret
                                 },
@@ -1507,7 +1515,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
                                                    E0269,
                                                    "not all control paths return a value");
                     if ends_with_stmt {
-                        let last_stmt = body.stmts.first().unwrap();
+                        let last_stmt = body.stmts.last().unwrap();
                         let original_span = original_sp(self.ir.tcx.sess.codemap(),
                                                         last_stmt.span, sp);
                         let span_semicolon = Span {
index fef35764e1cc13d216b817df350747d9ce631f4d..2f77552c389ad4e710dccd924512bb5532d045f0 100644 (file)
@@ -71,16 +71,16 @@ pub use self::deref_kind::*;
 
 use self::Aliasability::*;
 
-use middle::def_id::DefId;
-use front::map as ast_map;
-use middle::infer;
+use hir::def_id::DefId;
+use hir::map as ast_map;
+use infer;
 use middle::const_qualif::ConstQualif;
-use middle::def::Def;
-use middle::ty::adjustment;
-use middle::ty::{self, Ty};
+use hir::def::Def;
+use ty::adjustment;
+use ty::{self, Ty, TyCtxt};
 
-use rustc_front::hir::{MutImmutable, MutMutable, PatKind};
-use rustc_front::hir;
+use hir::{MutImmutable, MutMutable, PatKind};
+use hir;
 use syntax::ast;
 use syntax::codemap::Span;
 
@@ -302,7 +302,7 @@ impl MutabilityCategory {
         ret
     }
 
-    fn from_local(tcx: &ty::ctxt, id: ast::NodeId) -> MutabilityCategory {
+    fn from_local(tcx: &TyCtxt, id: ast::NodeId) -> MutabilityCategory {
         let ret = match tcx.map.get(id) {
             ast_map::NodeLocal(p) => match p.node {
                 PatKind::Ident(bind_mode, _, _) => {
@@ -312,9 +312,9 @@ impl MutabilityCategory {
                         McImmutable
                     }
                 }
-                _ => tcx.sess.span_bug(p.span, "expected identifier pattern")
+                _ => span_bug!(p.span, "expected identifier pattern")
             },
-            _ => tcx.sess.span_bug(tcx.map.span(id), "expected identifier pattern")
+            _ => span_bug!(tcx.map.span(id), "expected identifier pattern")
         };
         debug!("MutabilityCategory::{}(tcx, id={:?}) => {:?}",
                "from_local", id, ret);
@@ -363,7 +363,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
         MemCategorizationContext { typer: typer }
     }
 
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+    fn tcx(&self) -> &'a TyCtxt<'tcx> {
         self.typer.tcx
     }
 
@@ -378,7 +378,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
     }
 
     fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
-        let unadjusted_ty = try!(self.expr_ty(expr));
+        let unadjusted_ty = self.expr_ty(expr)?;
         Ok(unadjusted_ty.adjust(
             self.tcx(), expr.span, expr.id,
             self.typer.adjustments().get(&expr.id),
@@ -390,7 +390,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
     }
 
     fn pat_ty(&self, pat: &hir::Pat) -> McResult<Ty<'tcx>> {
-        let base_ty = try!(self.typer.node_ty(pat.id));
+        let base_ty = self.typer.node_ty(pat.id)?;
         // FIXME (Issue #18207): This code detects whether we are
         // looking at a `ref x`, and if so, figures out what the type
         // *being borrowed* is.  But ideally we would put in a more
@@ -436,7 +436,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                                adjustment,
                                expr);
                         // Result is an rvalue.
-                        let expr_ty = try!(self.expr_ty_adjusted(expr));
+                        let expr_ty = self.expr_ty_adjusted(expr)?;
                         Ok(self.cat_rvalue_node(expr.id(), expr.span(), expr_ty))
                     }
                 }
@@ -448,12 +448,12 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                                expr: &hir::Expr,
                                autoderefs: usize)
                                -> McResult<cmt<'tcx>> {
-        let mut cmt = try!(self.cat_expr_unadjusted(expr));
+        let mut cmt = self.cat_expr_unadjusted(expr)?;
         debug!("cat_expr_autoderefd: autoderefs={}, cmt={:?}",
                autoderefs,
                cmt);
         for deref in 1..autoderefs + 1 {
-            cmt = try!(self.cat_deref(expr, cmt, deref, None));
+            cmt = self.cat_deref(expr, cmt, deref, None)?;
         }
         return Ok(cmt);
     }
@@ -461,15 +461,15 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
     pub fn cat_expr_unadjusted(&self, expr: &hir::Expr) -> McResult<cmt<'tcx>> {
         debug!("cat_expr: id={} expr={:?}", expr.id, expr);
 
-        let expr_ty = try!(self.expr_ty(expr));
+        let expr_ty = self.expr_ty(expr)?;
         match expr.node {
           hir::ExprUnary(hir::UnDeref, ref e_base) => {
-            let base_cmt = try!(self.cat_expr(&e_base));
+            let base_cmt = self.cat_expr(&e_base)?;
             self.cat_deref(expr, base_cmt, 0, None)
           }
 
           hir::ExprField(ref base, f_name) => {
-            let base_cmt = try!(self.cat_expr(&base));
+            let base_cmt = self.cat_expr(&base)?;
             debug!("cat_expr(cat_field): id={} expr={:?} base={:?}",
                    expr.id,
                    expr,
@@ -478,7 +478,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
           }
 
           hir::ExprTupField(ref base, idx) => {
-            let base_cmt = try!(self.cat_expr(&base));
+            let base_cmt = self.cat_expr(&base)?;
             Ok(self.cat_tup_field(expr, base_cmt, idx.node, expr_ty))
           }
 
@@ -509,7 +509,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                     self.cat_deref_common(expr, base_cmt, 1, elem_ty, Some(context), true)
                 }
                 None => {
-                    self.cat_index(expr, try!(self.cat_expr(&base)), context)
+                    self.cat_index(expr, self.cat_expr(&base)?, context)
                 }
             }
           }
@@ -526,7 +526,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
           hir::ExprAddrOf(..) | hir::ExprCall(..) |
           hir::ExprAssign(..) | hir::ExprAssignOp(..) |
           hir::ExprClosure(..) | hir::ExprRet(..) |
-          hir::ExprUnary(..) | hir::ExprRange(..) |
+          hir::ExprUnary(..) |
           hir::ExprMethodCall(..) | hir::ExprCast(..) |
           hir::ExprVec(..) | hir::ExprTup(..) | hir::ExprIf(..) |
           hir::ExprBinary(..) | hir::ExprWhile(..) |
@@ -559,8 +559,8 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
           Def::TyParam(..) |
           Def::Label(_) | Def::SelfTy(..) |
           Def::AssociatedTy(..) => {
-              self.tcx().sess.span_bug(span, &format!("Unexpected definition in \
-                                                       memory categorization: {:?}", def));
+              span_bug!(span, "Unexpected definition in \
+                               memory categorization: {:?}", def);
           }
 
           Def::Static(_, mutbl) => {
@@ -575,7 +575,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
           }
 
           Def::Upvar(_, var_id, _, fn_node_id) => {
-              let ty = try!(self.node_ty(fn_node_id));
+              let ty = self.node_ty(fn_node_id)?;
               match ty.sty {
                   ty::TyClosure(closure_id, _) => {
                       match self.typer.closure_kind(closure_id) {
@@ -583,18 +583,19 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                               self.cat_upvar(id, span, var_id, fn_node_id, kind)
                           }
                           None => {
-                              self.tcx().sess.span_bug(
+                              span_bug!(
                                   span,
-                                  &format!("No closure kind for {:?}", closure_id));
+                                  "No closure kind for {:?}",
+                                  closure_id);
                           }
                       }
                   }
                   _ => {
-                      self.tcx().sess.span_bug(
+                      span_bug!(
                           span,
-                          &format!("Upvar of non-closure {} - {:?}",
-                                  fn_node_id,
-                                  ty));
+                          "Upvar of non-closure {} - {:?}",
+                          fn_node_id,
+                          ty);
                   }
               }
           }
@@ -610,7 +611,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
             }))
           }
 
-          Def::Err => panic!("Def::Err in memory categorization")
+          Def::Err => bug!("Def::Err in memory categorization")
         }
     }
 
@@ -649,7 +650,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
 
         let upvar_id = ty::UpvarId { var_id: var_id,
                                      closure_expr_id: fn_node_id };
-        let var_ty = try!(self.node_ty(var_id));
+        let var_ty = self.node_ty(var_id)?;
 
         // Mutability of original variable itself
         let var_mutbl = MutabilityCategory::from_local(self.tcx(), var_id);
@@ -670,13 +671,13 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
         // conceptually a `&mut` or `&` reference, so we have to add a
         // deref.
         let cmt_result = match kind {
-            ty::FnOnceClosureKind => {
+            ty::ClosureKind::FnOnce => {
                 cmt_result
             }
-            ty::FnMutClosureKind => {
+            ty::ClosureKind::FnMut => {
                 self.env_deref(id, span, upvar_id, var_mutbl, ty::MutBorrow, cmt_result)
             }
-            ty::FnClosureKind => {
+            ty::ClosureKind::Fn => {
                 self.env_deref(id, span, upvar_id, var_mutbl, ty::ImmBorrow, cmt_result)
             }
         };
@@ -723,12 +724,12 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
         let fn_body_id = {
             let fn_expr = match self.tcx().map.find(upvar_id.closure_expr_id) {
                 Some(ast_map::NodeExpr(e)) => e,
-                _ => unreachable!()
+                _ => bug!()
             };
 
             match fn_expr.node {
                 hir::ExprClosure(_, _, ref body) => body.id,
-                _ => unreachable!()
+                _ => bug!()
             }
         };
 
@@ -921,12 +922,12 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                                     implicit: bool)
                                     -> McResult<cmt<'tcx>>
     {
-        let (m, cat) = match try!(deref_kind(base_cmt.ty, deref_context)) {
+        let (m, cat) = match deref_kind(base_cmt.ty, deref_context)? {
             deref_ptr(ptr) => {
                 let ptr = if implicit {
                     match ptr {
                         BorrowedPtr(bk, r) => Implicit(bk, r),
-                        _ => self.tcx().sess.span_bug(node.span(),
+                        _ => span_bug!(node.span(),
                             "Implicit deref of non-borrowed pointer")
                     }
                 } else {
@@ -1030,7 +1031,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                              context: InteriorOffsetKind)
                              -> McResult<cmt<'tcx>>
     {
-        let ret = match try!(deref_kind(base_cmt.ty, Some(context))) {
+        let ret = match deref_kind(base_cmt.ty, Some(context))? {
             deref_ptr(ptr) => {
                 // for unique ptrs, we inherit mutability from the
                 // owning reference.
@@ -1044,7 +1045,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                     mutbl:m,
                     ty: match base_cmt.ty.builtin_deref(false, ty::NoPreference) {
                         Some(mt) => mt.ty,
-                        None => self.tcx().sess.bug("Found non-derefable type")
+                        None => bug!("Found non-derefable type")
                     },
                     note: NoteNone
                 })
@@ -1069,19 +1070,19 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                              vec_cmt: cmt<'tcx>,
                              slice_pat: &hir::Pat)
                              -> McResult<(cmt<'tcx>, hir::Mutability, ty::Region)> {
-        let slice_ty = try!(self.node_ty(slice_pat.id));
+        let slice_ty = self.node_ty(slice_pat.id)?;
         let (slice_mutbl, slice_r) = vec_slice_info(self.tcx(),
                                                     slice_pat,
                                                     slice_ty);
         let context = InteriorOffsetKind::Pattern;
-        let cmt_vec = try!(self.deref_vec(slice_pat, vec_cmt, context));
-        let cmt_slice = try!(self.cat_index(slice_pat, cmt_vec, context));
+        let cmt_vec = self.deref_vec(slice_pat, vec_cmt, context)?;
+        let cmt_slice = self.cat_index(slice_pat, cmt_vec, context)?;
         return Ok((cmt_slice, slice_mutbl, slice_r));
 
         /// In a pattern like [a, b, ..c], normally `c` has slice type, but if you have [a, b,
         /// ..ref c], then the type of `ref c` will be `&&[]`, so to extract the slice details we
         /// have to recurse through rptrs.
-        fn vec_slice_info(tcx: &ty::ctxt,
+        fn vec_slice_info(tcx: &TyCtxt,
                           pat: &hir::Pat,
                           slice_ty: Ty)
                           -> (hir::Mutability, ty::Region) {
@@ -1092,8 +1093,8 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                 },
 
                 _ => {
-                    tcx.sess.span_bug(pat.span,
-                                      "type of slice pattern is not a slice");
+                    span_bug!(pat.span,
+                              "type of slice pattern is not a slice");
                 }
             }
         }
@@ -1235,35 +1236,36 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
                 Some(Def::Variant(..)) => {
                     // variant(x, y, z)
                     for (i, subpat) in subpats.iter().enumerate() {
-                        let subpat_ty = try!(self.pat_ty(&subpat)); // see (*2)
+                        let subpat_ty = self.pat_ty(&subpat)?; // see (*2)
 
                         let subcmt =
                             self.cat_imm_interior(
                                 pat, cmt.clone(), subpat_ty,
                                 InteriorField(PositionalField(i)));
 
-                        try!(self.cat_pattern_(subcmt, &subpat, op));
+                        self.cat_pattern_(subcmt, &subpat, op)?;
                     }
                 }
                 Some(Def::Struct(..)) => {
                     for (i, subpat) in subpats.iter().enumerate() {
-                        let subpat_ty = try!(self.pat_ty(&subpat)); // see (*2)
+                        let subpat_ty = self.pat_ty(&subpat)?; // see (*2)
                         let cmt_field =
                             self.cat_imm_interior(
                                 pat, cmt.clone(), subpat_ty,
                                 InteriorField(PositionalField(i)));
-                        try!(self.cat_pattern_(cmt_field, &subpat, op));
+                        self.cat_pattern_(cmt_field, &subpat, op)?;
                     }
                 }
                 Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {
                     for subpat in subpats {
-                        try!(self.cat_pattern_(cmt.clone(), &subpat, op));
+                        self.cat_pattern_(cmt.clone(), &subpat, op)?;
                     }
                 }
                 _ => {
-                    self.tcx().sess.span_bug(
+                    span_bug!(
                         pat.span,
-                        &format!("enum pattern didn't resolve to enum or struct {:?}", opt_def));
+                        "enum pattern didn't resolve to enum or struct {:?}",
+                        opt_def);
                 }
             }
           }
@@ -1273,27 +1275,27 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
           }
 
           PatKind::Ident(_, _, Some(ref subpat)) => {
-              try!(self.cat_pattern_(cmt, &subpat, op));
+              self.cat_pattern_(cmt, &subpat, op)?;
           }
 
           PatKind::Struct(_, ref field_pats, _) => {
             // {f1: p1, ..., fN: pN}
             for fp in field_pats {
-                let field_ty = try!(self.pat_ty(&fp.node.pat)); // see (*2)
+                let field_ty = self.pat_ty(&fp.node.pat)?; // see (*2)
                 let cmt_field = self.cat_field(pat, cmt.clone(), fp.node.name, field_ty);
-                try!(self.cat_pattern_(cmt_field, &fp.node.pat, op));
+                self.cat_pattern_(cmt_field, &fp.node.pat, op)?;
             }
           }
 
           PatKind::Tup(ref subpats) => {
             // (p1, ..., pN)
             for (i, subpat) in subpats.iter().enumerate() {
-                let subpat_ty = try!(self.pat_ty(&subpat)); // see (*2)
+                let subpat_ty = self.pat_ty(&subpat)?; // see (*2)
                 let subcmt =
                     self.cat_imm_interior(
                         pat, cmt.clone(), subpat_ty,
                         InteriorField(PositionalField(i)));
-                try!(self.cat_pattern_(subcmt, &subpat, op));
+                self.cat_pattern_(subcmt, &subpat, op)?;
             }
           }
 
@@ -1301,24 +1303,24 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
             // box p1, &p1, &mut p1.  we can ignore the mutability of
             // PatKind::Ref since that information is already contained
             // in the type.
-            let subcmt = try!(self.cat_deref(pat, cmt, 0, None));
-              try!(self.cat_pattern_(subcmt, &subpat, op));
+            let subcmt = self.cat_deref(pat, cmt, 0, None)?;
+              self.cat_pattern_(subcmt, &subpat, op)?;
           }
 
           PatKind::Vec(ref before, ref slice, ref after) => {
               let context = InteriorOffsetKind::Pattern;
-              let vec_cmt = try!(self.deref_vec(pat, cmt, context));
-              let elt_cmt = try!(self.cat_index(pat, vec_cmt, context));
+              let vec_cmt = self.deref_vec(pat, cmt, context)?;
+              let elt_cmt = self.cat_index(pat, vec_cmt, context)?;
               for before_pat in before {
-                  try!(self.cat_pattern_(elt_cmt.clone(), &before_pat, op));
+                  self.cat_pattern_(elt_cmt.clone(), &before_pat, op)?;
               }
               if let Some(ref slice_pat) = *slice {
-                  let slice_ty = try!(self.pat_ty(&slice_pat));
+                  let slice_ty = self.pat_ty(&slice_pat)?;
                   let slice_cmt = self.cat_rvalue_node(pat.id(), pat.span(), slice_ty);
-                  try!(self.cat_pattern_(slice_cmt, &slice_pat, op));
+                  self.cat_pattern_(slice_cmt, &slice_pat, op)?;
               }
               for after_pat in after {
-                  try!(self.cat_pattern_(elt_cmt.clone(), &after_pat, op));
+                  self.cat_pattern_(elt_cmt.clone(), &after_pat, op)?;
               }
           }
 
@@ -1387,7 +1389,7 @@ impl<'tcx> cmt_<'tcx> {
     }
 
     /// Returns `FreelyAliasable(_)` if this lvalue represents a freely aliasable pointer type.
-    pub fn freely_aliasable(&self, ctxt: &ty::ctxt<'tcx>)
+    pub fn freely_aliasable(&self, ctxt: &TyCtxt<'tcx>)
                             -> Aliasability {
         // Maybe non-obvious: copied upvars can only be considered
         // non-aliasable in once closures, since any other kind can be
@@ -1451,10 +1453,10 @@ impl<'tcx> cmt_<'tcx> {
                         match inner.cat {
                             Categorization::Deref(ref inner, _, _) => inner.clone(),
                             Categorization::Upvar(..) => inner.clone(),
-                            _ => unreachable!()
+                            _ => bug!()
                         }
                     }
-                    _ => unreachable!()
+                    _ => bug!()
                 })
             }
             NoteNone => None
@@ -1462,7 +1464,7 @@ impl<'tcx> cmt_<'tcx> {
     }
 
 
-    pub fn descriptive_string(&self, tcx: &ty::ctxt) -> String {
+    pub fn descriptive_string(&self, tcx: &TyCtxt) -> String {
         match self.cat {
             Categorization::StaticItem => {
                 "static item".to_string()
@@ -1483,7 +1485,7 @@ impl<'tcx> cmt_<'tcx> {
                     Some(&Categorization::Upvar(ref var)) => {
                         var.to_string()
                     }
-                    Some(_) => unreachable!(),
+                    Some(_) => bug!(),
                     None => {
                         match pk {
                             Implicit(..) => {
@@ -1630,9 +1632,9 @@ impl fmt::Debug for Upvar {
 impl fmt::Display for Upvar {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         let kind = match self.kind {
-            ty::FnClosureKind => "Fn",
-            ty::FnMutClosureKind => "FnMut",
-            ty::FnOnceClosureKind => "FnOnce",
+            ty::ClosureKind::Fn => "Fn",
+            ty::ClosureKind::FnMut => "FnMut",
+            ty::ClosureKind::FnOnce => "FnOnce",
         };
         write!(f, "captured outer variable in an `{}` closure", kind)
     }
diff --git a/src/librustc/middle/pat_util.rs b/src/librustc/middle/pat_util.rs
deleted file mode 100644 (file)
index a1a3c19..0000000
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::def::*;
-use middle::def_id::DefId;
-use middle::ty;
-use util::nodemap::FnvHashMap;
-
-use syntax::ast;
-use rustc_front::hir::{self, PatKind};
-use rustc_front::util::walk_pat;
-use syntax::codemap::{respan, Span, Spanned, DUMMY_SP};
-
-use std::cell::RefCell;
-
-pub type PatIdMap = FnvHashMap<ast::Name, ast::NodeId>;
-
-// This is used because same-named variables in alternative patterns need to
-// use the NodeId of their namesake in the first pattern.
-pub fn pat_id_map(dm: &RefCell<DefMap>, pat: &hir::Pat) -> PatIdMap {
-    let mut map = FnvHashMap();
-    pat_bindings(dm, pat, |_bm, p_id, _s, path1| {
-        map.insert(path1.node, p_id);
-    });
-    map
-}
-
-pub fn pat_is_refutable(dm: &DefMap, pat: &hir::Pat) -> bool {
-    match pat.node {
-        PatKind::Lit(_) | PatKind::Range(_, _) | PatKind::QPath(..) => true,
-        PatKind::TupleStruct(..) |
-        PatKind::Path(..) |
-        PatKind::Ident(_, _, None) |
-        PatKind::Struct(..) => {
-            match dm.get(&pat.id).map(|d| d.full_def()) {
-                Some(Def::Variant(..)) => true,
-                _ => false
-            }
-        }
-        PatKind::Vec(_, _, _) => true,
-        _ => false
-    }
-}
-
-pub fn pat_is_variant_or_struct(dm: &DefMap, pat: &hir::Pat) -> bool {
-    match pat.node {
-        PatKind::TupleStruct(..) |
-        PatKind::Path(..) |
-        PatKind::Ident(_, _, None) |
-        PatKind::Struct(..) => {
-            match dm.get(&pat.id).map(|d| d.full_def()) {
-                Some(Def::Variant(..)) | Some(Def::Struct(..)) | Some(Def::TyAlias(..)) => true,
-                _ => false
-            }
-        }
-        _ => false
-    }
-}
-
-pub fn pat_is_const(dm: &DefMap, pat: &hir::Pat) -> bool {
-    match pat.node {
-        PatKind::Ident(_, _, None) | PatKind::Path(..) | PatKind::QPath(..) => {
-            match dm.get(&pat.id).map(|d| d.full_def()) {
-                Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => true,
-                _ => false
-            }
-        }
-        _ => false
-    }
-}
-
-// Same as above, except that partially-resolved defs cause `false` to be
-// returned instead of a panic.
-pub fn pat_is_resolved_const(dm: &DefMap, pat: &hir::Pat) -> bool {
-    match pat.node {
-        PatKind::Ident(_, _, None) | PatKind::Path(..) | PatKind::QPath(..) => {
-            match dm.get(&pat.id)
-                    .and_then(|d| if d.depth == 0 { Some(d.base_def) }
-                                  else { None } ) {
-                Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => true,
-                _ => false
-            }
-        }
-        _ => false
-    }
-}
-
-pub fn pat_is_binding(dm: &DefMap, pat: &hir::Pat) -> bool {
-    match pat.node {
-        PatKind::Ident(..) => {
-            !pat_is_variant_or_struct(dm, pat) &&
-            !pat_is_const(dm, pat)
-        }
-        _ => false
-    }
-}
-
-pub fn pat_is_binding_or_wild(dm: &DefMap, pat: &hir::Pat) -> bool {
-    match pat.node {
-        PatKind::Ident(..) => pat_is_binding(dm, pat),
-        PatKind::Wild => true,
-        _ => false
-    }
-}
-
-/// Call `it` on every "binding" in a pattern, e.g., on `a` in
-/// `match foo() { Some(a) => (), None => () }`
-pub fn pat_bindings<I>(dm: &RefCell<DefMap>, pat: &hir::Pat, mut it: I) where
-    I: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned<ast::Name>),
-{
-    walk_pat(pat, |p| {
-        match p.node {
-          PatKind::Ident(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => {
-            it(binding_mode, p.id, p.span, &respan(pth.span, pth.node.name));
-          }
-          _ => {}
-        }
-        true
-    });
-}
-pub fn pat_bindings_ident<I>(dm: &RefCell<DefMap>, pat: &hir::Pat, mut it: I) where
-    I: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned<hir::Ident>),
-{
-    walk_pat(pat, |p| {
-        match p.node {
-          PatKind::Ident(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => {
-            it(binding_mode, p.id, p.span, &respan(pth.span, pth.node));
-          }
-          _ => {}
-        }
-        true
-    });
-}
-
-/// Checks if the pattern contains any patterns that bind something to
-/// an ident, e.g. `foo`, or `Foo(foo)` or `foo @ Bar(..)`.
-pub fn pat_contains_bindings(dm: &DefMap, pat: &hir::Pat) -> bool {
-    let mut contains_bindings = false;
-    walk_pat(pat, |p| {
-        if pat_is_binding(dm, p) {
-            contains_bindings = true;
-            false // there's at least one binding, can short circuit now.
-        } else {
-            true
-        }
-    });
-    contains_bindings
-}
-
-/// Checks if the pattern contains any `ref` or `ref mut` bindings,
-/// and if yes whether its containing mutable ones or just immutables ones.
-pub fn pat_contains_ref_binding(dm: &RefCell<DefMap>, pat: &hir::Pat) -> Option<hir::Mutability> {
-    let mut result = None;
-    pat_bindings(dm, pat, |mode, _, _, _| {
-        match mode {
-            hir::BindingMode::BindByRef(m) => {
-                // Pick Mutable as maximum
-                match result {
-                    None | Some(hir::MutImmutable) => result = Some(m),
-                    _ => (),
-                }
-            }
-            hir::BindingMode::BindByValue(_) => { }
-        }
-    });
-    result
-}
-
-/// Checks if the patterns for this arm contain any `ref` or `ref mut`
-/// bindings, and if yes whether its containing mutable ones or just immutables ones.
-pub fn arm_contains_ref_binding(dm: &RefCell<DefMap>, arm: &hir::Arm) -> Option<hir::Mutability> {
-    arm.pats.iter()
-            .filter_map(|pat| pat_contains_ref_binding(dm, pat))
-            .max_by_key(|m| match *m {
-                hir::MutMutable => 1,
-                hir::MutImmutable => 0,
-            })
-}
-
-/// Checks if the pattern contains any patterns that bind something to
-/// an ident or wildcard, e.g. `foo`, or `Foo(_)`, `foo @ Bar(..)`,
-pub fn pat_contains_bindings_or_wild(dm: &DefMap, pat: &hir::Pat) -> bool {
-    let mut contains_bindings = false;
-    walk_pat(pat, |p| {
-        if pat_is_binding_or_wild(dm, p) {
-            contains_bindings = true;
-            false // there's at least one binding/wildcard, can short circuit now.
-        } else {
-            true
-        }
-    });
-    contains_bindings
-}
-
-pub fn simple_name<'a>(pat: &'a hir::Pat) -> Option<ast::Name> {
-    match pat.node {
-        PatKind::Ident(hir::BindByValue(_), ref path1, None) => {
-            Some(path1.node.name)
-        }
-        _ => {
-            None
-        }
-    }
-}
-
-pub fn def_to_path(tcx: &ty::ctxt, id: DefId) -> hir::Path {
-    tcx.with_path(id, |path| hir::Path {
-        global: false,
-        segments: path.last().map(|elem| hir::PathSegment {
-            identifier: hir::Ident::from_name(elem.name()),
-            parameters: hir::PathParameters::none(),
-        }).into_iter().collect(),
-        span: DUMMY_SP,
-    })
-}
-
-/// Return variants that are necessary to exist for the pattern to match.
-pub fn necessary_variants(dm: &DefMap, pat: &hir::Pat) -> Vec<DefId> {
-    let mut variants = vec![];
-    walk_pat(pat, |p| {
-        match p.node {
-            PatKind::TupleStruct(..) |
-            PatKind::Path(..) |
-            PatKind::Ident(_, _, None) |
-            PatKind::Struct(..) => {
-                match dm.get(&p.id) {
-                    Some(&PathResolution { base_def: Def::Variant(_, id), .. }) => {
-                        variants.push(id);
-                    }
-                    _ => ()
-                }
-            }
-            _ => ()
-        }
-        true
-    });
-    variants.sort();
-    variants.dedup();
-    variants
-}
index f464ea58c2d196f76a103d5e493791b2dc9f6ec7..c1dc727449ac0ca681a240a02f0071d2ed5f80d0 100644 (file)
 //! outside their scopes. This pass will also generate a set of exported items
 //! which are available for use externally when compiled as a library.
 
-pub use self::PrivateDep::*;
-pub use self::ImportUse::*;
-pub use self::LastPrivate::*;
-
-use middle::def_id::DefId;
 use util::nodemap::{DefIdSet, FnvHashMap};
 
 use std::hash::Hash;
@@ -64,39 +59,3 @@ impl<Id: Hash + Eq> Default for AccessLevels<Id> {
 /// A set containing all exported definitions from external crates.
 /// The set does not contain any entries from local crates.
 pub type ExternalExports = DefIdSet;
-
-#[derive(Copy, Clone, Debug)]
-pub enum LastPrivate {
-    LastMod(PrivateDep),
-    // `use` directives (imports) can refer to two separate definitions in the
-    // type and value namespaces. We record here the last private node for each
-    // and whether the import is in fact used for each.
-    // If the Option<PrivateDep> fields are None, it means there is no definition
-    // in that namespace.
-    LastImport{value_priv: Option<PrivateDep>,
-               value_used: ImportUse,
-               type_priv: Option<PrivateDep>,
-               type_used: ImportUse},
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum PrivateDep {
-    AllPublic,
-    DependsOn(DefId),
-}
-
-// How an import is used.
-#[derive(Copy, Clone, PartialEq, Debug)]
-pub enum ImportUse {
-    Unused,       // The import is not used.
-    Used,         // The import is used.
-}
-
-impl LastPrivate {
-    pub fn or(self, other: LastPrivate) -> LastPrivate {
-        match (self, other) {
-            (me, LastMod(AllPublic)) => me,
-            (_, other) => other,
-        }
-    }
-}
index 614d9be147bd874d27ad4628a87fba03a361eda6..63bccc2d02eb68dbf42ba12cc293a968b785f418 100644 (file)
 // reachable as well.
 
 use dep_graph::DepNode;
-use front::map as ast_map;
-use middle::def::Def;
-use middle::def_id::DefId;
-use middle::ty;
+use hir::map as ast_map;
+use hir::def::Def;
+use hir::def_id::DefId;
+use ty::{self, TyCtxt};
 use middle::privacy;
 use session::config;
 use util::nodemap::NodeSet;
@@ -28,9 +28,9 @@ use std::collections::HashSet;
 use syntax::abi::Abi;
 use syntax::ast;
 use syntax::attr;
-use rustc_front::hir;
-use rustc_front::intravisit::Visitor;
-use rustc_front::intravisit;
+use hir;
+use hir::intravisit::Visitor;
+use hir::intravisit;
 
 // Returns true if the given set of generics implies that the item it's
 // associated with must be inlined.
@@ -55,7 +55,7 @@ fn item_might_be_inlined(item: &hir::Item) -> bool {
     }
 }
 
-fn method_might_be_inlined(tcx: &ty::ctxt, sig: &hir::MethodSig,
+fn method_might_be_inlined(tcx: &TyCtxt, sig: &hir::MethodSig,
                            impl_item: &hir::ImplItem,
                            impl_src: DefId) -> bool {
     if attr::requests_inline(&impl_item.attrs) ||
@@ -67,17 +67,17 @@ fn method_might_be_inlined(tcx: &ty::ctxt, sig: &hir::MethodSig,
             Some(ast_map::NodeItem(item)) =>
                 item_might_be_inlined(&item),
             Some(..) | None =>
-                tcx.sess.span_bug(impl_item.span, "impl did is not an item")
+                span_bug!(impl_item.span, "impl did is not an item")
         }
     } else {
-        tcx.sess.span_bug(impl_item.span, "found a foreign impl as a parent of a local method")
+        span_bug!(impl_item.span, "found a foreign impl as a parent of a local method")
     }
 }
 
 // Information needed while computing reachability.
 struct ReachableContext<'a, 'tcx: 'a> {
     // The type context.
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     // The set of items which must be exported in the linkage sense.
     reachable_symbols: NodeSet,
     // A worklist of item IDs. Each item ID in this worklist will be inlined
@@ -94,8 +94,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ReachableContext<'a, 'tcx> {
                 let def = match self.tcx.def_map.borrow().get(&expr.id) {
                     Some(d) => d.full_def(),
                     None => {
-                        self.tcx.sess.span_bug(expr.span,
-                                               "def ID not in def map?!")
+                        span_bug!(expr.span, "def ID not in def map?!")
                     }
                 };
 
@@ -143,7 +142,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ReachableContext<'a, 'tcx> {
 
 impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
     // Creates a new reachability computation context.
-    fn new(tcx: &'a ty::ctxt<'tcx>) -> ReachableContext<'a, 'tcx> {
+    fn new(tcx: &'a TyCtxt<'tcx>) -> ReachableContext<'a, 'tcx> {
         let any_library = tcx.sess.crate_types.borrow().iter().any(|ty| {
             *ty != config::CrateTypeExecutable
         });
@@ -312,12 +311,8 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
             ast_map::NodeVariant(_) |
             ast_map::NodeStructCtor(_) => {}
             _ => {
-                self.tcx
-                    .sess
-                    .bug(&format!("found unexpected thingy in worklist: {}",
-                                 self.tcx
-                                     .map
-                                     .node_to_string(search_item)))
+                bug!("found unexpected thingy in worklist: {}",
+                     self.tcx.map.node_to_string(search_item))
             }
         }
     }
@@ -349,7 +344,7 @@ impl<'a, 'v> Visitor<'v> for CollectPrivateImplItemsVisitor<'a> {
     }
 }
 
-pub fn find_reachable(tcx: &ty::ctxt,
+pub fn find_reachable(tcx: &TyCtxt,
                       access_levels: &privacy::AccessLevels)
                       -> NodeSet {
     let _task = tcx.dep_graph.in_task(DepNode::Reachability);
index b39964e2861e33a60bef8c985da8efe405de0bd5..de349917258815ba538a176f83f3e48794f5d78a 100644 (file)
 //! `middle/infer/region_inference/README.md`
 
 use dep_graph::DepNode;
-use front::map as ast_map;
+use hir::map as ast_map;
 use session::Session;
 use util::nodemap::{FnvHashMap, NodeMap, NodeSet};
 use middle::cstore::InlinedItem;
-use middle::ty;
+use ty;
 
 use std::cell::RefCell;
 use std::collections::hash_map::Entry;
@@ -30,10 +30,9 @@ use std::mem;
 use syntax::codemap::{self, Span};
 use syntax::ast::{self, NodeId};
 
-use rustc_front::hir;
-use rustc_front::intravisit::{self, Visitor, FnKind};
-use rustc_front::hir::{Block, Item, FnDecl, Arm, Pat, PatKind, Stmt, Expr, Local};
-use rustc_front::util::stmt_id;
+use hir;
+use hir::intravisit::{self, Visitor, FnKind};
+use hir::{Block, Item, FnDecl, Arm, Pat, PatKind, Stmt, Expr, Local};
 
 #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable,
            RustcDecodable, Copy)]
@@ -41,15 +40,16 @@ pub struct CodeExtent(u32);
 
 impl fmt::Debug for CodeExtent {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(f, "CodeExtent({:?}", self.0));
+        write!(f, "CodeExtent({:?}", self.0)?;
 
-        try!(ty::tls::with_opt(|opt_tcx| {
+        ty::tls::with_opt(|opt_tcx| {
             if let Some(tcx) = opt_tcx {
-                let data = tcx.region_maps.code_extents.borrow()[self.0 as usize];
-                try!(write!(f, "/{:?}", data));
+                if let Some(data) = tcx.region_maps.code_extents.borrow().get(self.0 as usize) {
+                    write!(f, "/{:?}", data)?;
+                }
             }
             Ok(())
-        }));
+        })?;
 
         write!(f, ")")
     }
@@ -280,7 +280,7 @@ pub struct RegionMaps {
     /// hierarchy based on their lexical mapping. This is used to
     /// handle the relationships between regions in a fn and in a
     /// closure defined by that fn. See the "Modeling closures"
-    /// section of the README in middle::infer::region_inference for
+    /// section of the README in infer::region_inference for
     /// more details.
     fn_tree: RefCell<NodeMap<ast::NodeId>>,
 }
@@ -291,7 +291,7 @@ pub struct Context {
     /// of the innermost fn body. Each fn forms its own disjoint tree
     /// in the region hierarchy. These fn bodies are themselves
     /// arranged into a tree. See the "Modeling closures" section of
-    /// the README in middle::infer::region_inference for more
+    /// the README in infer::region_inference for more
     /// details.
     root_id: Option<ast::NodeId>,
 
@@ -343,7 +343,7 @@ impl RegionMaps {
     pub fn lookup_code_extent(&self, e: CodeExtentData) -> CodeExtent {
         match self.code_extent_interner.borrow().get(&e) {
             Some(&d) => d,
-            None => panic!("unknown code extent {:?}", e)
+            None => bug!("unknown code extent {:?}", e)
         }
     }
     pub fn node_extent(&self, n: ast::NodeId) -> CodeExtent {
@@ -385,8 +385,8 @@ impl RegionMaps {
             }
             Entry::Vacant(v) => {
                 if self.code_extents.borrow().len() > 0xffffffffusize {
-                    unreachable!() // should pass a sess,
-                                   // but this isn't the only place
+                    bug!() // should pass a sess,
+                           // but this isn't the only place
                 }
                 let idx = CodeExtent(self.code_extents.borrow().len() as u32);
                 info!("CodeExtent({}) = {:?} [parent={}]", idx.0, e, parent.0);
@@ -460,7 +460,7 @@ impl RegionMaps {
         self.scope_map.borrow()[id.0 as usize].into_option()
     }
 
-    #[allow(dead_code)] // used in middle::cfg
+    #[allow(dead_code)] // used in cfg
     pub fn encl_scope(&self, id: CodeExtent) -> CodeExtent {
         //! Returns the narrowest scope that encloses `id`, if any.
         self.opt_encl_scope(id).unwrap()
@@ -470,7 +470,7 @@ impl RegionMaps {
     pub fn var_scope(&self, var_id: ast::NodeId) -> CodeExtent {
         match self.var_map.borrow().get(&var_id) {
             Some(&r) => r,
-            None => { panic!("no enclosing scope for id {:?}", var_id); }
+            None => { bug!("no enclosing scope for id {:?}", var_id); }
         }
     }
 
@@ -587,7 +587,7 @@ impl RegionMaps {
             // different functions.  Compare those fn for lexical
             // nesting. The reasoning behind this is subtle.  See the
             // "Modeling closures" section of the README in
-            // middle::infer::region_inference for more details.
+            // infer::region_inference for more details.
             let a_root_scope = self.code_extent_data(a_ancestors[a_index]);
             let b_root_scope = self.code_extent_data(a_ancestors[a_index]);
             return match (a_root_scope, b_root_scope) {
@@ -601,12 +601,12 @@ impl RegionMaps {
                         scope_a
                     } else {
                         // neither fn encloses the other
-                        unreachable!()
+                        bug!()
                     }
                 }
                 _ => {
                     // root ids are always Misc right now
-                    unreachable!()
+                    bug!()
                 }
             };
         }
@@ -765,7 +765,7 @@ fn resolve_pat(visitor: &mut RegionResolutionVisitor, pat: &hir::Pat) {
 }
 
 fn resolve_stmt(visitor: &mut RegionResolutionVisitor, stmt: &hir::Stmt) {
-    let stmt_id = stmt_id(stmt);
+    let stmt_id = stmt.node.id();
     debug!("resolve_stmt(stmt.id={:?})", stmt_id);
 
     // Every statement will clean up the temporaries created during
index f29e14f67d73fa7b3407ecd70d2e08b53be9dd80..585b65b9f5e42e7613a1e5803213d9711bab5b13 100644 (file)
@@ -19,12 +19,12 @@ pub use self::DefRegion::*;
 use self::ScopeChain::*;
 
 use dep_graph::DepNode;
-use front::map::Map;
+use hir::map::Map;
 use session::Session;
-use middle::def::{Def, DefMap};
+use hir::def::{Def, DefMap};
 use middle::region;
-use middle::subst;
-use middle::ty;
+use ty::subst;
+use ty;
 use std::fmt;
 use std::mem::replace;
 use syntax::ast;
@@ -32,9 +32,9 @@ use syntax::codemap::Span;
 use syntax::parse::token::special_idents;
 use util::nodemap::NodeMap;
 
-use rustc_front::hir;
-use rustc_front::print::pprust::lifetime_to_string;
-use rustc_front::intravisit::{self, Visitor, FnKind};
+use hir;
+use hir::print::lifetime_to_string;
+use hir::intravisit::{self, Visitor, FnKind};
 
 #[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)]
 pub enum DefRegion {
@@ -102,7 +102,7 @@ pub fn krate(sess: &Session,
     let _task = hir_map.dep_graph.in_task(DepNode::ResolveLifetimes);
     let krate = hir_map.krate();
     let mut named_region_map = NodeMap();
-    try!(sess.track_errors(|| {
+    sess.track_errors(|| {
         krate.visit_all_items(&mut LifetimeContext {
             sess: sess,
             named_region_map: &mut named_region_map,
@@ -111,7 +111,7 @@ pub fn krate(sess: &Session,
             trait_ref_hack: false,
             labels_in_fn: vec![],
         });
-    }));
+    })?;
     Ok(named_region_map)
 }
 
@@ -182,17 +182,17 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> {
     fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl,
                 b: &'v hir::Block, s: Span, fn_id: ast::NodeId) {
         match fk {
-            FnKind::ItemFn(_, generics, _, _, _, _) => {
+            FnKind::ItemFn(_, generics, _, _, _, _, _) => {
                 self.visit_early_late(subst::FnSpace, generics, |this| {
                     this.add_scope_and_walk_fn(fk, fd, b, s, fn_id)
                 })
             }
-            FnKind::Method(_, sig, _) => {
+            FnKind::Method(_, sig, _, _) => {
                 self.visit_early_late(subst::FnSpace, &sig.generics, |this| {
                     this.add_scope_and_walk_fn(fk, fd, b, s, fn_id)
                 })
             }
-            FnKind::Closure => {
+            FnKind::Closure(_) => {
                 self.add_scope_and_walk_fn(fk, fd, b, s, fn_id)
             }
         }
@@ -471,16 +471,16 @@ impl<'a> LifetimeContext<'a> {
                                  fn_id: ast::NodeId) {
 
         match fk {
-            FnKind::ItemFn(_, generics, _, _, _, _) => {
+            FnKind::ItemFn(_, generics, _, _, _, _, _) => {
                 intravisit::walk_fn_decl(self, fd);
                 self.visit_generics(generics);
             }
-            FnKind::Method(_, sig, _) => {
+            FnKind::Method(_, sig, _, _) => {
                 intravisit::walk_fn_decl(self, fd);
                 self.visit_generics(&sig.generics);
                 self.visit_explicit_self(&sig.explicit_self);
             }
-            FnKind::Closure => {
+            FnKind::Closure(_) => {
                 intravisit::walk_fn_decl(self, fd);
             }
         }
@@ -747,9 +747,9 @@ impl<'a> LifetimeContext<'a> {
                        lifetime_ref: &hir::Lifetime,
                        def: DefRegion) {
         if lifetime_ref.id == ast::DUMMY_NODE_ID {
-            self.sess.span_bug(lifetime_ref.span,
-                               "lifetime reference not renumbered, \
-                               probably a bug in syntax::fold");
+            span_bug!(lifetime_ref.span,
+                      "lifetime reference not renumbered, \
+                       probably a bug in syntax::fold");
         }
 
         debug!("lifetime_ref={:?} id={:?} resolved to {:?}",
@@ -822,7 +822,7 @@ fn early_bound_lifetime_names(generics: &hir::Generics) -> Vec<ast::Name> {
                         collector.visit_lifetime(bound);
                     }
                 }
-                &hir::WherePredicate::EqPredicate(_) => unimplemented!()
+                &hir::WherePredicate::EqPredicate(_) => bug!("unimplemented")
             }
         }
     }
index fbb84c3cd7ec8858c2a2dcf0842836a11812d03b..6c7c7f160f4e5b5ea9d254c57dcb3fdcdd0ce7de 100644 (file)
 pub use self::StabilityLevel::*;
 
 use dep_graph::DepNode;
-use front::map as hir_map;
+use hir::map as hir_map;
 use session::Session;
 use lint;
 use middle::cstore::{CrateStore, LOCAL_CRATE};
-use middle::def::Def;
-use middle::def_id::{CRATE_DEF_INDEX, DefId};
-use middle::ty;
+use hir::def::Def;
+use hir::def_id::{CRATE_DEF_INDEX, DefId};
+use ty::{self, TyCtxt};
 use middle::privacy::AccessLevels;
 use syntax::parse::token::InternedString;
 use syntax::codemap::{Span, DUMMY_SP};
@@ -30,9 +30,9 @@ use syntax::feature_gate::{GateIssue, emit_feature_err};
 use syntax::attr::{self, Stability, Deprecation, AttrMetaMethods};
 use util::nodemap::{DefIdMap, FnvHashSet, FnvHashMap};
 
-use rustc_front::hir;
-use rustc_front::hir::{Item, Generics, StructField, Variant, PatKind};
-use rustc_front::intravisit::{self, Visitor};
+use hir;
+use hir::{Item, Generics, StructField, Variant, PatKind};
+use hir::intravisit::{self, Visitor};
 
 use std::mem::replace;
 use std::cmp::Ordering;
@@ -72,7 +72,7 @@ pub struct Index<'tcx> {
 
 // A private tree-walker for producing an Index.
 struct Annotator<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     index: &'a mut Index<'tcx>,
     parent_stab: Option<&'tcx Stability>,
     parent_depr: Option<Deprecation>,
@@ -259,7 +259,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for Annotator<'a, 'tcx> {
     }
 
     fn visit_struct_field(&mut self, s: &StructField) {
-        self.annotate(s.node.id, &s.node.attrs, s.span, AnnotationKind::Required, |v| {
+        self.annotate(s.id, &s.attrs, s.span, AnnotationKind::Required, |v| {
             intravisit::walk_struct_field(v, s);
         });
     }
@@ -279,7 +279,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for Annotator<'a, 'tcx> {
 
 impl<'tcx> Index<'tcx> {
     /// Construct the stability index for a crate being compiled.
-    pub fn build(&mut self, tcx: &ty::ctxt<'tcx>, access_levels: &AccessLevels) {
+    pub fn build(&mut self, tcx: &TyCtxt<'tcx>, access_levels: &AccessLevels) {
         let _task = tcx.dep_graph.in_task(DepNode::StabilityIndex);
         let krate = tcx.map.krate();
         let mut annotator = Annotator {
@@ -319,7 +319,7 @@ impl<'tcx> Index<'tcx> {
 /// Cross-references the feature names of unstable APIs with enabled
 /// features and possibly prints errors. Returns a list of all
 /// features used.
-pub fn check_unstable_api_usage(tcx: &ty::ctxt)
+pub fn check_unstable_api_usage(tcx: &TyCtxt)
                                 -> FnvHashMap<InternedString, StabilityLevel> {
     let _task = tcx.dep_graph.in_task(DepNode::StabilityCheck);
     let ref active_lib_features = tcx.sess.features.borrow().declared_lib_features;
@@ -339,7 +339,7 @@ pub fn check_unstable_api_usage(tcx: &ty::ctxt)
 }
 
 struct Checker<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     active_features: FnvHashSet<InternedString>,
     used_features: FnvHashMap<InternedString, StabilityLevel>,
     // Within a block where feature gate checking can be skipped.
@@ -466,7 +466,7 @@ impl<'a, 'v, 'tcx> Visitor<'v> for Checker<'a, 'tcx> {
 }
 
 /// Helper for discovering nodes to check for stability
-pub fn check_item(tcx: &ty::ctxt, item: &hir::Item, warn_about_defns: bool,
+pub fn check_item(tcx: &TyCtxt, item: &hir::Item, warn_about_defns: bool,
                   cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
     match item.node {
         hir::ItemExternCrate(_) => {
@@ -503,7 +503,7 @@ pub fn check_item(tcx: &ty::ctxt, item: &hir::Item, warn_about_defns: bool,
 }
 
 /// Helper for discovering nodes to check for stability
-pub fn check_expr(tcx: &ty::ctxt, e: &hir::Expr,
+pub fn check_expr(tcx: &TyCtxt, e: &hir::Expr,
                   cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
     let span;
     let id = match e.node {
@@ -516,8 +516,8 @@ pub fn check_expr(tcx: &ty::ctxt, e: &hir::Expr,
             span = field.span;
             match tcx.expr_ty_adjusted(base_e).sty {
                 ty::TyStruct(def, _) => def.struct_variant().field_named(field.node).did,
-                _ => tcx.sess.span_bug(e.span,
-                                       "stability::check_expr: named field access on non-struct")
+                _ => span_bug!(e.span,
+                               "stability::check_expr: named field access on non-struct")
             }
         }
         hir::ExprTupField(ref base_e, ref field) => {
@@ -525,9 +525,9 @@ pub fn check_expr(tcx: &ty::ctxt, e: &hir::Expr,
             match tcx.expr_ty_adjusted(base_e).sty {
                 ty::TyStruct(def, _) => def.struct_variant().fields[field.node].did,
                 ty::TyTuple(..) => return,
-                _ => tcx.sess.span_bug(e.span,
-                                       "stability::check_expr: unnamed field access on \
-                                        something other than a tuple or struct")
+                _ => span_bug!(e.span,
+                               "stability::check_expr: unnamed field access on \
+                                something other than a tuple or struct")
             }
         }
         hir::ExprStruct(_, ref expr_fields, _) => {
@@ -551,10 +551,10 @@ pub fn check_expr(tcx: &ty::ctxt, e: &hir::Expr,
                 // a bug to have construct one.
                 ty::TyEnum(..) => return,
                 _ => {
-                    tcx.sess.span_bug(e.span,
-                                      &format!("stability::check_expr: struct construction \
-                                                of non-struct, type {:?}",
-                                               type_));
+                    span_bug!(e.span,
+                              "stability::check_expr: struct construction \
+                               of non-struct, type {:?}",
+                              type_);
                 }
             }
         }
@@ -564,7 +564,7 @@ pub fn check_expr(tcx: &ty::ctxt, e: &hir::Expr,
     maybe_do_stability_check(tcx, id, span, cb);
 }
 
-pub fn check_path(tcx: &ty::ctxt, path: &hir::Path, id: ast::NodeId,
+pub fn check_path(tcx: &TyCtxt, path: &hir::Path, id: ast::NodeId,
                   cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
     match tcx.def_map.borrow().get(&id).map(|d| d.full_def()) {
         Some(Def::PrimTy(..)) => {}
@@ -576,7 +576,7 @@ pub fn check_path(tcx: &ty::ctxt, path: &hir::Path, id: ast::NodeId,
     }
 }
 
-pub fn check_path_list_item(tcx: &ty::ctxt, item: &hir::PathListItem,
+pub fn check_path_list_item(tcx: &TyCtxt, item: &hir::PathListItem,
                   cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
     match tcx.def_map.borrow().get(&item.node.id()).map(|d| d.full_def()) {
         Some(Def::PrimTy(..)) => {}
@@ -587,7 +587,7 @@ pub fn check_path_list_item(tcx: &ty::ctxt, item: &hir::PathListItem,
     }
 }
 
-pub fn check_pat(tcx: &ty::ctxt, pat: &hir::Pat,
+pub fn check_pat(tcx: &TyCtxt, pat: &hir::Pat,
                  cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
     debug!("check_pat(pat = {:?})", pat);
     if is_internal(tcx, pat.span) { return; }
@@ -616,7 +616,7 @@ pub fn check_pat(tcx: &ty::ctxt, pat: &hir::Pat,
     }
 }
 
-fn maybe_do_stability_check(tcx: &ty::ctxt, id: DefId, span: Span,
+fn maybe_do_stability_check(tcx: &TyCtxt, id: DefId, span: Span,
                             cb: &mut FnMut(DefId, Span,
                                            &Option<&Stability>, &Option<Deprecation>)) {
     if is_internal(tcx, span) {
@@ -634,11 +634,11 @@ fn maybe_do_stability_check(tcx: &ty::ctxt, id: DefId, span: Span,
     cb(id, span, &stability, &deprecation);
 }
 
-fn is_internal(tcx: &ty::ctxt, span: Span) -> bool {
+fn is_internal(tcx: &TyCtxt, span: Span) -> bool {
     tcx.sess.codemap().span_allows_unstable(span)
 }
 
-fn is_staged_api(tcx: &ty::ctxt, id: DefId) -> bool {
+fn is_staged_api(tcx: &TyCtxt, id: DefId) -> bool {
     match tcx.trait_item_of_item(id) {
         Some(ty::MethodTraitItemId(trait_method_id))
             if trait_method_id != id => {
@@ -653,7 +653,7 @@ fn is_staged_api(tcx: &ty::ctxt, id: DefId) -> bool {
 
 /// Lookup the stability for a node, loading external crate
 /// metadata as necessary.
-pub fn lookup_stability<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option<&'tcx Stability> {
+pub fn lookup_stability<'tcx>(tcx: &TyCtxt<'tcx>, id: DefId) -> Option<&'tcx Stability> {
     if let Some(st) = tcx.stability.borrow().stab_map.get(&id) {
         return *st;
     }
@@ -663,7 +663,7 @@ pub fn lookup_stability<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option<&'tcx S
     st
 }
 
-pub fn lookup_deprecation<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option<Deprecation> {
+pub fn lookup_deprecation<'tcx>(tcx: &TyCtxt<'tcx>, id: DefId) -> Option<Deprecation> {
     if let Some(depr) = tcx.stability.borrow().depr_map.get(&id) {
         return depr.clone();
     }
@@ -673,7 +673,7 @@ pub fn lookup_deprecation<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option<Depre
     depr
 }
 
-fn lookup_stability_uncached<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option<&'tcx Stability> {
+fn lookup_stability_uncached<'tcx>(tcx: &TyCtxt<'tcx>, id: DefId) -> Option<&'tcx Stability> {
     debug!("lookup(id={:?})", id);
     if id.is_local() {
         None // The stability cache is filled partially lazily
@@ -682,7 +682,7 @@ fn lookup_stability_uncached<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option<&'
     }
 }
 
-fn lookup_deprecation_uncached<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option<Deprecation> {
+fn lookup_deprecation_uncached<'tcx>(tcx: &TyCtxt<'tcx>, id: DefId) -> Option<Deprecation> {
     debug!("lookup(id={:?})", id);
     if id.is_local() {
         None // The stability cache is filled partially lazily
diff --git a/src/librustc/middle/subst.rs b/src/librustc/middle/subst.rs
deleted file mode 100644 (file)
index f8c6d3d..0000000
+++ /dev/null
@@ -1,798 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Type substitutions.
-
-pub use self::ParamSpace::*;
-pub use self::RegionSubsts::*;
-
-use middle::cstore;
-use middle::def_id::DefId;
-use middle::ty::{self, Ty};
-use middle::ty::fold::{TypeFoldable, TypeFolder};
-
-use serialize::{Encodable, Encoder, Decodable, Decoder};
-use std::fmt;
-use std::iter::IntoIterator;
-use std::slice::Iter;
-use std::vec::{Vec, IntoIter};
-use syntax::codemap::{Span, DUMMY_SP};
-
-///////////////////////////////////////////////////////////////////////////
-
-/// A substitution mapping type/region parameters to new values. We
-/// identify each in-scope parameter by an *index* and a *parameter
-/// space* (which indices where the parameter is defined; see
-/// `ParamSpace`).
-#[derive(Clone, PartialEq, Eq, Hash)]
-pub struct Substs<'tcx> {
-    pub types: VecPerParamSpace<Ty<'tcx>>,
-    pub regions: RegionSubsts,
-}
-
-/// Represents the values to use when substituting lifetime parameters.
-/// If the value is `ErasedRegions`, then this subst is occurring during
-/// trans, and all region parameters will be replaced with `ty::ReStatic`.
-#[derive(Clone, PartialEq, Eq, Hash)]
-pub enum RegionSubsts {
-    ErasedRegions,
-    NonerasedRegions(VecPerParamSpace<ty::Region>)
-}
-
-impl<'tcx> Substs<'tcx> {
-    pub fn new(t: VecPerParamSpace<Ty<'tcx>>,
-               r: VecPerParamSpace<ty::Region>)
-               -> Substs<'tcx>
-    {
-        Substs { types: t, regions: NonerasedRegions(r) }
-    }
-
-    pub fn new_type(t: Vec<Ty<'tcx>>,
-                    r: Vec<ty::Region>)
-                    -> Substs<'tcx>
-    {
-        Substs::new(VecPerParamSpace::new(t, Vec::new(), Vec::new()),
-                    VecPerParamSpace::new(r, Vec::new(), Vec::new()))
-    }
-
-    pub fn new_trait(t: Vec<Ty<'tcx>>,
-                     r: Vec<ty::Region>,
-                     s: Ty<'tcx>)
-                    -> Substs<'tcx>
-    {
-        Substs::new(VecPerParamSpace::new(t, vec!(s), Vec::new()),
-                    VecPerParamSpace::new(r, Vec::new(), Vec::new()))
-    }
-
-    pub fn erased(t: VecPerParamSpace<Ty<'tcx>>) -> Substs<'tcx>
-    {
-        Substs { types: t, regions: ErasedRegions }
-    }
-
-    pub fn empty() -> Substs<'tcx> {
-        Substs {
-            types: VecPerParamSpace::empty(),
-            regions: NonerasedRegions(VecPerParamSpace::empty()),
-        }
-    }
-
-    pub fn trans_empty() -> Substs<'tcx> {
-        Substs {
-            types: VecPerParamSpace::empty(),
-            regions: ErasedRegions
-        }
-    }
-
-    pub fn is_noop(&self) -> bool {
-        let regions_is_noop = match self.regions {
-            ErasedRegions => false, // may be used to canonicalize
-            NonerasedRegions(ref regions) => regions.is_empty(),
-        };
-
-        regions_is_noop && self.types.is_empty()
-    }
-
-    pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
-        *self.types.get(ty_param_def.space, ty_param_def.index as usize)
-    }
-
-    pub fn self_ty(&self) -> Option<Ty<'tcx>> {
-        self.types.get_self().cloned()
-    }
-
-    pub fn with_self_ty(&self, self_ty: Ty<'tcx>) -> Substs<'tcx> {
-        assert!(self.self_ty().is_none());
-        let mut s = (*self).clone();
-        s.types.push(SelfSpace, self_ty);
-        s
-    }
-
-    pub fn erase_regions(self) -> Substs<'tcx> {
-        let Substs { types, regions: _ } = self;
-        Substs { types: types, regions: ErasedRegions }
-    }
-
-    /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
-    /// to easily access the set of region substitutions.
-    pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace<ty::Region> {
-        match self.regions {
-            ErasedRegions => panic!("Erased regions only expected in trans"),
-            NonerasedRegions(ref r) => r
-        }
-    }
-
-    /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
-    /// to easily access the set of region substitutions.
-    pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace<ty::Region> {
-        match self.regions {
-            ErasedRegions => panic!("Erased regions only expected in trans"),
-            NonerasedRegions(ref mut r) => r
-        }
-    }
-
-    pub fn with_method(self,
-                       m_types: Vec<Ty<'tcx>>,
-                       m_regions: Vec<ty::Region>)
-                       -> Substs<'tcx>
-    {
-        let Substs { types, regions } = self;
-        let types = types.with_slice(FnSpace, &m_types);
-        let regions = regions.map(|r| r.with_slice(FnSpace, &m_regions));
-        Substs { types: types, regions: regions }
-    }
-
-    pub fn with_method_from(self,
-                            meth_substs: &Substs<'tcx>)
-                            -> Substs<'tcx>
-    {
-        let Substs { types, regions } = self;
-        let types = types.with_slice(FnSpace, meth_substs.types.get_slice(FnSpace));
-        let regions = regions.map(|r| {
-            r.with_slice(FnSpace, meth_substs.regions().get_slice(FnSpace))
-        });
-        Substs { types: types, regions: regions }
-    }
-
-    /// Creates a trait-ref out of this substs, ignoring the FnSpace substs
-    pub fn to_trait_ref(&self, tcx: &ty::ctxt<'tcx>, trait_id: DefId)
-                        -> ty::TraitRef<'tcx> {
-        let Substs { mut types, regions } = self.clone();
-        types.truncate(FnSpace, 0);
-        let regions = regions.map(|mut r| { r.truncate(FnSpace, 0); r });
-
-        ty::TraitRef {
-            def_id: trait_id,
-            substs: tcx.mk_substs(Substs { types: types, regions: regions })
-        }
-    }
-}
-
-impl<'tcx> Encodable for Substs<'tcx> {
-
-    fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
-        cstore::tls::with_encoding_context(s, |ecx, rbml_w| {
-            ecx.encode_substs(rbml_w, self);
-            Ok(())
-        })
-    }
-}
-
-impl<'tcx> Decodable for Substs<'tcx> {
-    fn decode<D: Decoder>(d: &mut D) -> Result<Substs<'tcx>, D::Error> {
-        cstore::tls::with_decoding_context(d, |dcx, rbml_r| {
-            Ok(dcx.decode_substs(rbml_r))
-        })
-    }
-}
-
-impl<'tcx> Decodable for &'tcx Substs<'tcx> {
-    fn decode<D: Decoder>(d: &mut D) -> Result<&'tcx Substs<'tcx>, D::Error> {
-        let substs = cstore::tls::with_decoding_context(d, |dcx, rbml_r| {
-            let substs = dcx.decode_substs(rbml_r);
-            dcx.tcx().mk_substs(substs)
-        });
-
-        Ok(substs)
-    }
-}
-
-impl RegionSubsts {
-    pub fn map<F>(self, op: F) -> RegionSubsts where
-        F: FnOnce(VecPerParamSpace<ty::Region>) -> VecPerParamSpace<ty::Region>,
-    {
-        match self {
-            ErasedRegions => ErasedRegions,
-            NonerasedRegions(r) => NonerasedRegions(op(r))
-        }
-    }
-
-    pub fn is_erased(&self) -> bool {
-        match *self {
-            ErasedRegions => true,
-            NonerasedRegions(_) => false,
-        }
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// ParamSpace
-
-#[derive(PartialOrd, Ord, PartialEq, Eq, Copy,
-           Clone, Hash, RustcEncodable, RustcDecodable, Debug)]
-pub enum ParamSpace {
-    TypeSpace,  // Type parameters attached to a type definition, trait, or impl
-    SelfSpace,  // Self parameter on a trait
-    FnSpace,    // Type parameters attached to a method or fn
-}
-
-impl ParamSpace {
-    pub fn all() -> [ParamSpace; 3] {
-        [TypeSpace, SelfSpace, FnSpace]
-    }
-
-    pub fn to_uint(self) -> usize {
-        match self {
-            TypeSpace => 0,
-            SelfSpace => 1,
-            FnSpace => 2,
-        }
-    }
-
-    pub fn from_uint(u: usize) -> ParamSpace {
-        match u {
-            0 => TypeSpace,
-            1 => SelfSpace,
-            2 => FnSpace,
-            _ => panic!("Invalid ParamSpace: {}", u)
-        }
-    }
-}
-
-/// Vector of things sorted by param space. Used to keep
-/// the set of things declared on the type, self, or method
-/// distinct.
-#[derive(PartialEq, Eq, Clone, Hash, RustcEncodable, RustcDecodable)]
-pub struct VecPerParamSpace<T> {
-    // This was originally represented as a tuple with one Vec<T> for
-    // each variant of ParamSpace, and that remains the abstraction
-    // that it provides to its clients.
-    //
-    // Here is how the representation corresponds to the abstraction
-    // i.e. the "abstraction function" AF:
-    //
-    // AF(self) = (self.content[..self.type_limit],
-    //             self.content[self.type_limit..self.self_limit],
-    //             self.content[self.self_limit..])
-    type_limit: usize,
-    self_limit: usize,
-    content: Vec<T>,
-}
-
-/// The `split` function converts one `VecPerParamSpace` into this
-/// `SeparateVecsPerParamSpace` structure.
-pub struct SeparateVecsPerParamSpace<T> {
-    pub types: Vec<T>,
-    pub selfs: Vec<T>,
-    pub fns: Vec<T>,
-}
-
-impl<T: fmt::Debug> fmt::Debug for VecPerParamSpace<T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "[{:?};{:?};{:?}]",
-               self.get_slice(TypeSpace),
-               self.get_slice(SelfSpace),
-               self.get_slice(FnSpace))
-    }
-}
-
-impl<T> VecPerParamSpace<T> {
-    fn limits(&self, space: ParamSpace) -> (usize, usize) {
-        match space {
-            TypeSpace => (0, self.type_limit),
-            SelfSpace => (self.type_limit, self.self_limit),
-            FnSpace => (self.self_limit, self.content.len()),
-        }
-    }
-
-    pub fn empty() -> VecPerParamSpace<T> {
-        VecPerParamSpace {
-            type_limit: 0,
-            self_limit: 0,
-            content: Vec::new()
-        }
-    }
-
-    /// `t` is the type space.
-    /// `s` is the self space.
-    /// `f` is the fn space.
-    pub fn new(t: Vec<T>, s: Vec<T>, f: Vec<T>) -> VecPerParamSpace<T> {
-        let type_limit = t.len();
-        let self_limit = type_limit + s.len();
-
-        let mut content = t;
-        content.extend(s);
-        content.extend(f);
-
-        VecPerParamSpace {
-            type_limit: type_limit,
-            self_limit: self_limit,
-            content: content,
-        }
-    }
-
-    fn new_internal(content: Vec<T>, type_limit: usize, self_limit: usize)
-                    -> VecPerParamSpace<T>
-    {
-        VecPerParamSpace {
-            type_limit: type_limit,
-            self_limit: self_limit,
-            content: content,
-        }
-    }
-
-    /// Appends `value` to the vector associated with `space`.
-    ///
-    /// Unlike the `push` method in `Vec`, this should not be assumed
-    /// to be a cheap operation (even when amortized over many calls).
-    pub fn push(&mut self, space: ParamSpace, value: T) {
-        let (_, limit) = self.limits(space);
-        match space {
-            TypeSpace => { self.type_limit += 1; self.self_limit += 1; }
-            SelfSpace => { self.self_limit += 1; }
-            FnSpace => { }
-        }
-        self.content.insert(limit, value);
-    }
-
-    /// Appends `values` to the vector associated with `space`.
-    ///
-    /// Unlike the `extend` method in `Vec`, this should not be assumed
-    /// to be a cheap operation (even when amortized over many calls).
-    pub fn extend<I:Iterator<Item=T>>(&mut self, space: ParamSpace, values: I) {
-        // This could be made more efficient, obviously.
-        for item in values {
-            self.push(space, item);
-        }
-    }
-
-    pub fn pop(&mut self, space: ParamSpace) -> Option<T> {
-        let (start, limit) = self.limits(space);
-        if start == limit {
-            None
-        } else {
-            match space {
-                TypeSpace => { self.type_limit -= 1; self.self_limit -= 1; }
-                SelfSpace => { self.self_limit -= 1; }
-                FnSpace => {}
-            }
-            if self.content.is_empty() {
-                None
-            } else {
-                Some(self.content.remove(limit - 1))
-            }
-        }
-    }
-
-    pub fn truncate(&mut self, space: ParamSpace, len: usize) {
-        // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
-        while self.len(space) > len {
-            self.pop(space);
-        }
-    }
-
-    pub fn replace(&mut self, space: ParamSpace, elems: Vec<T>) {
-        // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
-        self.truncate(space, 0);
-        for t in elems {
-            self.push(space, t);
-        }
-    }
-
-    pub fn get_self<'a>(&'a self) -> Option<&'a T> {
-        let v = self.get_slice(SelfSpace);
-        assert!(v.len() <= 1);
-        if v.is_empty() { None } else { Some(&v[0]) }
-    }
-
-    pub fn len(&self, space: ParamSpace) -> usize {
-        self.get_slice(space).len()
-    }
-
-    pub fn is_empty_in(&self, space: ParamSpace) -> bool {
-        self.len(space) == 0
-    }
-
-    pub fn get_slice<'a>(&'a self, space: ParamSpace) -> &'a [T] {
-        let (start, limit) = self.limits(space);
-        &self.content[start.. limit]
-    }
-
-    pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] {
-        let (start, limit) = self.limits(space);
-        &mut self.content[start.. limit]
-    }
-
-    pub fn opt_get<'a>(&'a self,
-                       space: ParamSpace,
-                       index: usize)
-                       -> Option<&'a T> {
-        let v = self.get_slice(space);
-        if index < v.len() { Some(&v[index]) } else { None }
-    }
-
-    pub fn get<'a>(&'a self, space: ParamSpace, index: usize) -> &'a T {
-        &self.get_slice(space)[index]
-    }
-
-    pub fn iter<'a>(&'a self) -> Iter<'a,T> {
-        self.content.iter()
-    }
-
-    pub fn into_iter(self) -> IntoIter<T> {
-        self.content.into_iter()
-    }
-
-    pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> {
-        EnumeratedItems::new(self)
-    }
-
-    pub fn as_slice(&self) -> &[T] {
-        &self.content
-    }
-
-    pub fn into_vec(self) -> Vec<T> {
-        self.content
-    }
-
-    pub fn all_vecs<P>(&self, mut pred: P) -> bool where
-        P: FnMut(&[T]) -> bool,
-    {
-        let spaces = [TypeSpace, SelfSpace, FnSpace];
-        spaces.iter().all(|&space| { pred(self.get_slice(space)) })
-    }
-
-    pub fn all<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
-        self.iter().all(pred)
-    }
-
-    pub fn any<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
-        self.iter().any(pred)
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.all_vecs(|v| v.is_empty())
-    }
-
-    pub fn map<U, P>(&self, pred: P) -> VecPerParamSpace<U> where P: FnMut(&T) -> U {
-        let result = self.iter().map(pred).collect();
-        VecPerParamSpace::new_internal(result,
-                                       self.type_limit,
-                                       self.self_limit)
-    }
-
-    pub fn map_enumerated<U, P>(&self, pred: P) -> VecPerParamSpace<U> where
-        P: FnMut((ParamSpace, usize, &T)) -> U,
-    {
-        let result = self.iter_enumerated().map(pred).collect();
-        VecPerParamSpace::new_internal(result,
-                                       self.type_limit,
-                                       self.self_limit)
-    }
-
-    pub fn split(self) -> SeparateVecsPerParamSpace<T> {
-        let VecPerParamSpace { type_limit, self_limit, content } = self;
-
-        let mut content_iter = content.into_iter();
-
-        SeparateVecsPerParamSpace {
-            types: content_iter.by_ref().take(type_limit).collect(),
-            selfs: content_iter.by_ref().take(self_limit - type_limit).collect(),
-            fns: content_iter.collect()
-        }
-    }
-
-    pub fn with_slice(mut self, space: ParamSpace, slice: &[T])
-                    -> VecPerParamSpace<T>
-        where T: Clone
-    {
-        assert!(self.is_empty_in(space));
-        for t in slice {
-            self.push(space, t.clone());
-        }
-
-        self
-    }
-}
-
-#[derive(Clone)]
-pub struct EnumeratedItems<'a,T:'a> {
-    vec: &'a VecPerParamSpace<T>,
-    space_index: usize,
-    elem_index: usize
-}
-
-impl<'a,T> EnumeratedItems<'a,T> {
-    fn new(v: &'a VecPerParamSpace<T>) -> EnumeratedItems<'a,T> {
-        let mut result = EnumeratedItems { vec: v, space_index: 0, elem_index: 0 };
-        result.adjust_space();
-        result
-    }
-
-    fn adjust_space(&mut self) {
-        let spaces = ParamSpace::all();
-        while
-            self.space_index < spaces.len() &&
-            self.elem_index >= self.vec.len(spaces[self.space_index])
-        {
-            self.space_index += 1;
-            self.elem_index = 0;
-        }
-    }
-}
-
-impl<'a,T> Iterator for EnumeratedItems<'a,T> {
-    type Item = (ParamSpace, usize, &'a T);
-
-    fn next(&mut self) -> Option<(ParamSpace, usize, &'a T)> {
-        let spaces = ParamSpace::all();
-        if self.space_index < spaces.len() {
-            let space = spaces[self.space_index];
-            let index = self.elem_index;
-            let item = self.vec.get(space, index);
-
-            self.elem_index += 1;
-            self.adjust_space();
-
-            Some((space, index, item))
-        } else {
-            None
-        }
-    }
-
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        let size = self.vec.as_slice().len();
-        (size, Some(size))
-    }
-}
-
-impl<T> IntoIterator for VecPerParamSpace<T> {
-    type Item = T;
-    type IntoIter = IntoIter<T>;
-
-    fn into_iter(self) -> IntoIter<T> {
-        self.into_vec().into_iter()
-    }
-}
-
-impl<'a,T> IntoIterator for &'a VecPerParamSpace<T> {
-    type Item = &'a T;
-    type IntoIter = Iter<'a, T>;
-
-    fn into_iter(self) -> Iter<'a, T> {
-        self.as_slice().into_iter()
-    }
-}
-
-
-///////////////////////////////////////////////////////////////////////////
-// Public trait `Subst`
-//
-// Just call `foo.subst(tcx, substs)` to perform a substitution across
-// `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when
-// there is more information available (for better errors).
-
-pub trait Subst<'tcx> : Sized {
-    fn subst(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>) -> Self {
-        self.subst_spanned(tcx, substs, None)
-    }
-
-    fn subst_spanned(&self, tcx: &ty::ctxt<'tcx>,
-                     substs: &Substs<'tcx>,
-                     span: Option<Span>)
-                     -> Self;
-}
-
-impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T {
-    fn subst_spanned(&self,
-                     tcx: &ty::ctxt<'tcx>,
-                     substs: &Substs<'tcx>,
-                     span: Option<Span>)
-                     -> T
-    {
-        let mut folder = SubstFolder { tcx: tcx,
-                                       substs: substs,
-                                       span: span,
-                                       root_ty: None,
-                                       ty_stack_depth: 0,
-                                       region_binders_passed: 0 };
-        (*self).fold_with(&mut folder)
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// The actual substitution engine itself is a type folder.
-
-struct SubstFolder<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    substs: &'a Substs<'tcx>,
-
-    // The location for which the substitution is performed, if available.
-    span: Option<Span>,
-
-    // The root type that is being substituted, if available.
-    root_ty: Option<Ty<'tcx>>,
-
-    // Depth of type stack
-    ty_stack_depth: usize,
-
-    // Number of region binders we have passed through while doing the substitution
-    region_binders_passed: u32,
-}
-
-impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx }
-
-    fn enter_region_binder(&mut self) {
-        self.region_binders_passed += 1;
-    }
-
-    fn exit_region_binder(&mut self) {
-        self.region_binders_passed -= 1;
-    }
-
-    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
-        // Note: This routine only handles regions that are bound on
-        // type declarations and other outer declarations, not those
-        // bound in *fn types*. Region substitution of the bound
-        // regions that appear in a function signature is done using
-        // the specialized routine `ty::replace_late_regions()`.
-        match r {
-            ty::ReEarlyBound(data) => {
-                match self.substs.regions {
-                    ErasedRegions => ty::ReStatic,
-                    NonerasedRegions(ref regions) =>
-                        match regions.opt_get(data.space, data.index as usize) {
-                            Some(&r) => {
-                                self.shift_region_through_binders(r)
-                            }
-                            None => {
-                                let span = self.span.unwrap_or(DUMMY_SP);
-                                self.tcx().sess.span_bug(
-                                    span,
-                                    &format!("Type parameter out of range \
-                                              when substituting in region {} (root type={:?}) \
-                                              (space={:?}, index={})",
-                                             data.name,
-                                             self.root_ty,
-                                             data.space,
-                                             data.index));
-                            }
-                        }
-                }
-            }
-            _ => r
-        }
-    }
-
-    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-        if !t.needs_subst() {
-            return t;
-        }
-
-        // track the root type we were asked to substitute
-        let depth = self.ty_stack_depth;
-        if depth == 0 {
-            self.root_ty = Some(t);
-        }
-        self.ty_stack_depth += 1;
-
-        let t1 = match t.sty {
-            ty::TyParam(p) => {
-                self.ty_for_param(p, t)
-            }
-            _ => {
-                t.super_fold_with(self)
-            }
-        };
-
-        assert_eq!(depth + 1, self.ty_stack_depth);
-        self.ty_stack_depth -= 1;
-        if depth == 0 {
-            self.root_ty = None;
-        }
-
-        return t1;
-    }
-}
-
-impl<'a,'tcx> SubstFolder<'a,'tcx> {
-    fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
-        // Look up the type in the substitutions. It really should be in there.
-        let opt_ty = self.substs.types.opt_get(p.space, p.idx as usize);
-        let ty = match opt_ty {
-            Some(t) => *t,
-            None => {
-                let span = self.span.unwrap_or(DUMMY_SP);
-                self.tcx().sess.span_bug(
-                    span,
-                    &format!("Type parameter `{:?}` ({:?}/{:?}/{}) out of range \
-                                 when substituting (root type={:?}) substs={:?}",
-                            p,
-                            source_ty,
-                            p.space,
-                            p.idx,
-                            self.root_ty,
-                            self.substs));
-            }
-        };
-
-        self.shift_regions_through_binders(ty)
-    }
-
-    /// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs
-    /// when we are substituting a type with escaping regions into a context where we have passed
-    /// through region binders. That's quite a mouthful. Let's see an example:
-    ///
-    /// ```
-    /// type Func<A> = fn(A);
-    /// type MetaFunc = for<'a> fn(Func<&'a int>)
-    /// ```
-    ///
-    /// The type `MetaFunc`, when fully expanded, will be
-    ///
-    ///     for<'a> fn(fn(&'a int))
-    ///             ^~ ^~ ^~~
-    ///             |  |  |
-    ///             |  |  DebruijnIndex of 2
-    ///             Binders
-    ///
-    /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the
-    /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip
-    /// over the inner binder (remember that we count Debruijn indices from 1). However, in the
-    /// definition of `MetaFunc`, the binder is not visible, so the type `&'a int` will have a
-    /// debruijn index of 1. It's only during the substitution that we can see we must increase the
-    /// depth by 1 to account for the binder that we passed through.
-    ///
-    /// As a second example, consider this twist:
-    ///
-    /// ```
-    /// type FuncTuple<A> = (A,fn(A));
-    /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>)
-    /// ```
-    ///
-    /// Here the final type will be:
-    ///
-    ///     for<'a> fn((&'a int, fn(&'a int)))
-    ///                 ^~~         ^~~
-    ///                 |           |
-    ///          DebruijnIndex of 1 |
-    ///                      DebruijnIndex of 2
-    ///
-    /// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the
-    /// first case we do not increase the Debruijn index and in the second case we do. The reason
-    /// is that only in the second case have we passed through a fn binder.
-    fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        debug!("shift_regions(ty={:?}, region_binders_passed={:?}, has_escaping_regions={:?})",
-               ty, self.region_binders_passed, ty.has_escaping_regions());
-
-        if self.region_binders_passed == 0 || !ty.has_escaping_regions() {
-            return ty;
-        }
-
-        let result = ty::fold::shift_regions(self.tcx(), self.region_binders_passed, &ty);
-        debug!("shift_regions: shifted result = {:?}", result);
-
-        result
-    }
-
-    fn shift_region_through_binders(&self, region: ty::Region) -> ty::Region {
-        ty::fold::shift_region(region, self.region_binders_passed)
-    }
-}
diff --git a/src/librustc/middle/traits/README.md b/src/librustc/middle/traits/README.md
deleted file mode 100644 (file)
index 92982af..0000000
+++ /dev/null
@@ -1,430 +0,0 @@
-# TRAIT RESOLUTION
-
-This document describes the general process and points out some non-obvious
-things.
-
-## Major concepts
-
-Trait resolution is the process of pairing up an impl with each
-reference to a trait. So, for example, if there is a generic function like:
-
-    fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> { ... }
-
-and then a call to that function:
-
-    let v: Vec<isize> = clone_slice([1, 2, 3])
-
-it is the job of trait resolution to figure out (in which case)
-whether there exists an impl of `isize : Clone`
-
-Note that in some cases, like generic functions, we may not be able to
-find a specific impl, but we can figure out that the caller must
-provide an impl. To see what I mean, consider the body of `clone_slice`:
-
-    fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> {
-        let mut v = Vec::new();
-        for e in &x {
-            v.push((*e).clone()); // (*)
-        }
-    }
-
-The line marked `(*)` is only legal if `T` (the type of `*e`)
-implements the `Clone` trait. Naturally, since we don't know what `T`
-is, we can't find the specific impl; but based on the bound `T:Clone`,
-we can say that there exists an impl which the caller must provide.
-
-We use the term *obligation* to refer to a trait reference in need of
-an impl.
-
-## Overview
-
-Trait resolution consists of three major parts:
-
-- SELECTION: Deciding how to resolve a specific obligation. For
-  example, selection might decide that a specific obligation can be
-  resolved by employing an impl which matches the self type, or by
-  using a parameter bound. In the case of an impl, Selecting one
-  obligation can create *nested obligations* because of where clauses
-  on the impl itself. It may also require evaluating those nested
-  obligations to resolve ambiguities.
-
-- FULFILLMENT: The fulfillment code is what tracks that obligations
-  are completely fulfilled. Basically it is a worklist of obligations
-  to be selected: once selection is successful, the obligation is
-  removed from the worklist and any nested obligations are enqueued.
-
-- COHERENCE: The coherence checks are intended to ensure that there
-  are never overlapping impls, where two impls could be used with
-  equal precedence.
-
-## Selection
-
-Selection is the process of deciding whether an obligation can be
-resolved and, if so, how it is to be resolved (via impl, where clause, etc).
-The main interface is the `select()` function, which takes an obligation
-and returns a `SelectionResult`. There are three possible outcomes:
-
-- `Ok(Some(selection))` -- yes, the obligation can be resolved, and
-  `selection` indicates how. If the impl was resolved via an impl,
-  then `selection` may also indicate nested obligations that are required
-  by the impl.
-
-- `Ok(None)` -- we are not yet sure whether the obligation can be
-  resolved or not. This happens most commonly when the obligation
-  contains unbound type variables.
-
-- `Err(err)` -- the obligation definitely cannot be resolved due to a
-  type error, or because there are no impls that could possibly apply,
-  etc.
-
-The basic algorithm for selection is broken into two big phases:
-candidate assembly and confirmation.
-
-### Candidate assembly
-
-Searches for impls/where-clauses/etc that might
-possibly be used to satisfy the obligation. Each of those is called
-a candidate. To avoid ambiguity, we want to find exactly one
-candidate that is definitively applicable. In some cases, we may not
-know whether an impl/where-clause applies or not -- this occurs when
-the obligation contains unbound inference variables.
-
-The basic idea for candidate assembly is to do a first pass in which
-we identify all possible candidates. During this pass, all that we do
-is try and unify the type parameters. (In particular, we ignore any
-nested where clauses.) Presuming that this unification succeeds, the
-impl is added as a candidate.
-
-Once this first pass is done, we can examine the set of candidates. If
-it is a singleton set, then we are done: this is the only impl in
-scope that could possibly apply. Otherwise, we can winnow down the set
-of candidates by using where clauses and other conditions. If this
-reduced set yields a single, unambiguous entry, we're good to go,
-otherwise the result is considered ambiguous.
-
-#### The basic process: Inferring based on the impls we see
-
-This process is easier if we work through some examples. Consider
-the following trait:
-
-```
-trait Convert<Target> {
-    fn convert(&self) -> Target;
-}
-```
-
-This trait just has one method. It's about as simple as it gets. It
-converts from the (implicit) `Self` type to the `Target` type. If we
-wanted to permit conversion between `isize` and `usize`, we might
-implement `Convert` like so:
-
-```rust
-impl Convert<usize> for isize { ... } // isize -> usize
-impl Convert<isize> for usize { ... } // usize -> isize
-```
-
-Now imagine there is some code like the following:
-
-```rust
-let x: isize = ...;
-let y = x.convert();
-```
-
-The call to convert will generate a trait reference `Convert<$Y> for
-isize`, where `$Y` is the type variable representing the type of
-`y`. When we match this against the two impls we can see, we will find
-that only one remains: `Convert<usize> for isize`. Therefore, we can
-select this impl, which will cause the type of `$Y` to be unified to
-`usize`. (Note that while assembling candidates, we do the initial
-unifications in a transaction, so that they don't affect one another.)
-
-There are tests to this effect in src/test/run-pass:
-
-   traits-multidispatch-infer-convert-source-and-target.rs
-   traits-multidispatch-infer-convert-target.rs
-
-#### Winnowing: Resolving ambiguities
-
-But what happens if there are multiple impls where all the types
-unify? Consider this example:
-
-```rust
-trait Get {
-    fn get(&self) -> Self;
-}
-
-impl<T:Copy> Get for T {
-    fn get(&self) -> T { *self }
-}
-
-impl<T:Get> Get for Box<T> {
-    fn get(&self) -> Box<T> { box get_it(&**self) }
-}
-```
-
-What happens when we invoke `get_it(&box 1_u16)`, for example? In this
-case, the `Self` type is `Box<u16>` -- that unifies with both impls,
-because the first applies to all types, and the second to all
-boxes. In the olden days we'd have called this ambiguous. But what we
-do now is do a second *winnowing* pass that considers where clauses
-and attempts to remove candidates -- in this case, the first impl only
-applies if `Box<u16> : Copy`, which doesn't hold. After winnowing,
-then, we are left with just one candidate, so we can proceed. There is
-a test of this in `src/test/run-pass/traits-conditional-dispatch.rs`.
-
-#### Matching
-
-The subroutines that decide whether a particular impl/where-clause/etc
-applies to a particular obligation. At the moment, this amounts to
-unifying the self types, but in the future we may also recursively
-consider some of the nested obligations, in the case of an impl.
-
-#### Lifetimes and selection
-
-Because of how that lifetime inference works, it is not possible to
-give back immediate feedback as to whether a unification or subtype
-relationship between lifetimes holds or not. Therefore, lifetime
-matching is *not* considered during selection. This is reflected in
-the fact that subregion assignment is infallible. This may yield
-lifetime constraints that will later be found to be in error (in
-contrast, the non-lifetime-constraints have already been checked
-during selection and can never cause an error, though naturally they
-may lead to other errors downstream).
-
-#### Where clauses
-
-Besides an impl, the other major way to resolve an obligation is via a
-where clause. The selection process is always given a *parameter
-environment* which contains a list of where clauses, which are
-basically obligations that can assume are satisfiable. We will iterate
-over that list and check whether our current obligation can be found
-in that list, and if so it is considered satisfied. More precisely, we
-want to check whether there is a where-clause obligation that is for
-the same trait (or some subtrait) and for which the self types match,
-using the definition of *matching* given above.
-
-Consider this simple example:
-
-     trait A1 { ... }
-     trait A2 : A1 { ... }
-
-     trait B { ... }
-
-     fn foo<X:A2+B> { ... }
-
-Clearly we can use methods offered by `A1`, `A2`, or `B` within the
-body of `foo`. In each case, that will incur an obligation like `X :
-A1` or `X : A2`. The parameter environment will contain two
-where-clauses, `X : A2` and `X : B`. For each obligation, then, we
-search this list of where-clauses.  To resolve an obligation `X:A1`,
-we would note that `X:A2` implies that `X:A1`.
-
-### Confirmation
-
-Confirmation unifies the output type parameters of the trait with the
-values found in the obligation, possibly yielding a type error.  If we
-return to our example of the `Convert` trait from the previous
-section, confirmation is where an error would be reported, because the
-impl specified that `T` would be `usize`, but the obligation reported
-`char`. Hence the result of selection would be an error.
-
-### Selection during translation
-
-During type checking, we do not store the results of trait selection.
-We simply wish to verify that trait selection will succeed. Then
-later, at trans time, when we have all concrete types available, we
-can repeat the trait selection.  In this case, we do not consider any
-where-clauses to be in scope. We know that therefore each resolution
-will resolve to a particular impl.
-
-One interesting twist has to do with nested obligations. In general, in trans,
-we only need to do a "shallow" selection for an obligation. That is, we wish to
-identify which impl applies, but we do not (yet) need to decide how to select
-any nested obligations. Nonetheless, we *do* currently do a complete resolution,
-and that is because it can sometimes inform the results of type inference. That is,
-we do not have the full substitutions in terms of the type variables of the impl available
-to us, so we must run trait selection to figure everything out.
-
-Here is an example:
-
-    trait Foo { ... }
-    impl<U,T:Bar<U>> Foo for Vec<T> { ... }
-
-    impl Bar<usize> for isize { ... }
-
-After one shallow round of selection for an obligation like `Vec<isize>
-: Foo`, we would know which impl we want, and we would know that
-`T=isize`, but we do not know the type of `U`.  We must select the
-nested obligation `isize : Bar<U>` to find out that `U=usize`.
-
-It would be good to only do *just as much* nested resolution as
-necessary. Currently, though, we just do a full resolution.
-
-# Higher-ranked trait bounds
-
-One of the more subtle concepts at work are *higher-ranked trait
-bounds*. An example of such a bound is `for<'a> MyTrait<&'a isize>`.
-Let's walk through how selection on higher-ranked trait references
-works.
-
-## Basic matching and skolemization leaks
-
-Let's walk through the test `compile-fail/hrtb-just-for-static.rs` to see
-how it works. The test starts with the trait `Foo`:
-
-```rust
-trait Foo<X> {
-    fn foo(&self, x: X) { }
-}
-```
-
-Let's say we have a function `want_hrtb` that wants a type which
-implements `Foo<&'a isize>` for any `'a`:
-
-```rust
-fn want_hrtb<T>() where T : for<'a> Foo<&'a isize> { ... }
-```
-
-Now we have a struct `AnyInt` that implements `Foo<&'a isize>` for any
-`'a`:
-
-```rust
-struct AnyInt;
-impl<'a> Foo<&'a isize> for AnyInt { }
-```
-
-And the question is, does `AnyInt : for<'a> Foo<&'a isize>`? We want the
-answer to be yes. The algorithm for figuring it out is closely related
-to the subtyping for higher-ranked types (which is described in
-`middle::infer::higher_ranked::doc`, but also in a [paper by SPJ] that
-I recommend you read).
-
-1. Skolemize the obligation.
-2. Match the impl against the skolemized obligation.
-3. Check for skolemization leaks.
-
-[paper by SPJ]: http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/
-
-So let's work through our example. The first thing we would do is to
-skolemize the obligation, yielding `AnyInt : Foo<&'0 isize>` (here `'0`
-represents skolemized region #0). Note that now have no quantifiers;
-in terms of the compiler type, this changes from a `ty::PolyTraitRef`
-to a `TraitRef`. We would then create the `TraitRef` from the impl,
-using fresh variables for it's bound regions (and thus getting
-`Foo<&'$a isize>`, where `'$a` is the inference variable for `'a`). Next
-we relate the two trait refs, yielding a graph with the constraint
-that `'0 == '$a`. Finally, we check for skolemization "leaks" -- a
-leak is basically any attempt to relate a skolemized region to another
-skolemized region, or to any region that pre-existed the impl match.
-The leak check is done by searching from the skolemized region to find
-the set of regions that it is related to in any way. This is called
-the "taint" set. To pass the check, that set must consist *solely* of
-itself and region variables from the impl. If the taint set includes
-any other region, then the match is a failure. In this case, the taint
-set for `'0` is `{'0, '$a}`, and hence the check will succeed.
-
-Let's consider a failure case. Imagine we also have a struct
-
-```rust
-struct StaticInt;
-impl Foo<&'static isize> for StaticInt;
-```
-
-We want the obligation `StaticInt : for<'a> Foo<&'a isize>` to be
-considered unsatisfied. The check begins just as before. `'a` is
-skolemized to `'0` and the impl trait reference is instantiated to
-`Foo<&'static isize>`. When we relate those two, we get a constraint
-like `'static == '0`. This means that the taint set for `'0` is `{'0,
-'static}`, which fails the leak check.
-
-## Higher-ranked trait obligations
-
-Once the basic matching is done, we get to another interesting topic:
-how to deal with impl obligations. I'll work through a simple example
-here. Imagine we have the traits `Foo` and `Bar` and an associated impl:
-
-```
-trait Foo<X> {
-    fn foo(&self, x: X) { }
-}
-
-trait Bar<X> {
-    fn bar(&self, x: X) { }
-}
-
-impl<X,F> Foo<X> for F
-    where F : Bar<X>
-{
-}
-```
-
-Now let's say we have a obligation `for<'a> Foo<&'a isize>` and we match
-this impl. What obligation is generated as a result? We want to get
-`for<'a> Bar<&'a isize>`, but how does that happen?
-
-After the matching, we are in a position where we have a skolemized
-substitution like `X => &'0 isize`. If we apply this substitution to the
-impl obligations, we get `F : Bar<&'0 isize>`. Obviously this is not
-directly usable because the skolemized region `'0` cannot leak out of
-our computation.
-
-What we do is to create an inverse mapping from the taint set of `'0`
-back to the original bound region (`'a`, here) that `'0` resulted
-from. (This is done in `higher_ranked::plug_leaks`). We know that the
-leak check passed, so this taint set consists solely of the skolemized
-region itself plus various intermediate region variables. We then walk
-the trait-reference and convert every region in that taint set back to
-a late-bound region, so in this case we'd wind up with `for<'a> F :
-Bar<&'a isize>`.
-
-# Caching and subtle considerations therewith
-
-In general we attempt to cache the results of trait selection.  This
-is a somewhat complex process. Part of the reason for this is that we
-want to be able to cache results even when all the types in the trait
-reference are not fully known. In that case, it may happen that the
-trait selection process is also influencing type variables, so we have
-to be able to not only cache the *result* of the selection process,
-but *replay* its effects on the type variables.
-
-## An example
-
-The high-level idea of how the cache works is that we first replace
-all unbound inference variables with skolemized versions. Therefore,
-if we had a trait reference `usize : Foo<$1>`, where `$n` is an unbound
-inference variable, we might replace it with `usize : Foo<%0>`, where
-`%n` is a skolemized type. We would then look this up in the cache.
-If we found a hit, the hit would tell us the immediate next step to
-take in the selection process: i.e., apply impl #22, or apply where
-clause `X : Foo<Y>`. Let's say in this case there is no hit.
-Therefore, we search through impls and where clauses and so forth, and
-we come to the conclusion that the only possible impl is this one,
-with def-id 22:
-
-    impl Foo<isize> for usize { ... } // Impl #22
-
-We would then record in the cache `usize : Foo<%0> ==>
-ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which
-would (as a side-effect) unify `$1` with `isize`.
-
-Now, at some later time, we might come along and see a `usize :
-Foo<$3>`.  When skolemized, this would yield `usize : Foo<%0>`, just as
-before, and hence the cache lookup would succeed, yielding
-`ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would
-(as a side-effect) unify `$3` with `isize`.
-
-## Where clauses and the local vs global cache
-
-One subtle interaction is that the results of trait lookup will vary
-depending on what where clauses are in scope. Therefore, we actually
-have *two* caches, a local and a global cache. The local cache is
-attached to the `ParameterEnvironment` and the global cache attached
-to the `tcx`. We use the local cache whenever the result might depend
-on the where clauses that are in scope. The determination of which
-cache to use is done by the method `pick_candidate_cache` in
-`select.rs`. At the moment, we use a very simple, conservative rule:
-if there are any where-clauses in scope, then we use the local cache.
-We used to try and draw finer-grained distinctions, but that led to a
-serious of annoying and weird bugs like #22019 and #18290. This simple
-rule seems to be pretty clearly safe and also still retains a very
-high hit rate (~95% when compiling rustc).
diff --git a/src/librustc/middle/traits/coherence.rs b/src/librustc/middle/traits/coherence.rs
deleted file mode 100644 (file)
index 0f95aa7..0000000
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! See `README.md` for high-level documentation
-
-use super::Normalized;
-use super::SelectionContext;
-use super::ObligationCause;
-use super::PredicateObligation;
-use super::project;
-use super::util;
-
-use middle::cstore::LOCAL_CRATE;
-use middle::def_id::DefId;
-use middle::subst::{Subst, Substs, TypeSpace};
-use middle::ty::{self, Ty};
-use middle::infer::{self, InferCtxt, TypeOrigin};
-use syntax::codemap::{DUMMY_SP, Span};
-
-#[derive(Copy, Clone)]
-struct InferIsLocal(bool);
-
-/// If there are types that satisfy both impls, returns a `TraitRef`
-/// with those types substituted (by updating the given `infcx`)
-pub fn overlapping_impls<'cx, 'tcx>(infcx: &InferCtxt<'cx, 'tcx>,
-                                    impl1_def_id: DefId,
-                                    impl2_def_id: DefId)
-                                    -> Option<ty::TraitRef<'tcx>>
-{
-    debug!("impl_can_satisfy(\
-           impl1_def_id={:?}, \
-           impl2_def_id={:?})",
-           impl1_def_id,
-           impl2_def_id);
-
-    let selcx = &mut SelectionContext::intercrate(infcx);
-    overlap(selcx, impl1_def_id, impl2_def_id)
-}
-
-/// Can both impl `a` and impl `b` be satisfied by a common type (including
-/// `where` clauses)? If so, returns a `TraitRef` that unifies the two impls.
-fn overlap<'cx, 'tcx>(selcx: &mut SelectionContext<'cx, 'tcx>,
-                      a_def_id: DefId,
-                      b_def_id: DefId)
-                      -> Option<ty::TraitRef<'tcx>>
-{
-    debug!("overlap(a_def_id={:?}, b_def_id={:?})",
-           a_def_id,
-           b_def_id);
-
-    let (a_trait_ref, a_obligations) = impl_trait_ref_and_oblig(selcx,
-                                                                a_def_id,
-                                                                util::fresh_type_vars_for_impl);
-
-    let (b_trait_ref, b_obligations) = impl_trait_ref_and_oblig(selcx,
-                                                                b_def_id,
-                                                                util::fresh_type_vars_for_impl);
-
-    debug!("overlap: a_trait_ref={:?} a_obligations={:?}", a_trait_ref, a_obligations);
-
-    debug!("overlap: b_trait_ref={:?} b_obligations={:?}", b_trait_ref, b_obligations);
-
-    // Do `a` and `b` unify? If not, no overlap.
-    if let Err(_) = infer::mk_eq_trait_refs(selcx.infcx(),
-                                            true,
-                                            TypeOrigin::Misc(DUMMY_SP),
-                                            a_trait_ref,
-                                            b_trait_ref) {
-        return None;
-    }
-
-    debug!("overlap: unification check succeeded");
-
-    // Are any of the obligations unsatisfiable? If so, no overlap.
-    let infcx = selcx.infcx();
-    let opt_failing_obligation =
-        a_obligations.iter()
-                     .chain(&b_obligations)
-                     .map(|o| infcx.resolve_type_vars_if_possible(o))
-                     .find(|o| !selcx.evaluate_obligation(o));
-
-    if let Some(failing_obligation) = opt_failing_obligation {
-        debug!("overlap: obligation unsatisfiable {:?}", failing_obligation);
-        return None
-    }
-
-    Some(selcx.infcx().resolve_type_vars_if_possible(&a_trait_ref))
-}
-
-pub fn trait_ref_is_knowable<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool
-{
-    debug!("trait_ref_is_knowable(trait_ref={:?})", trait_ref);
-
-    // if the orphan rules pass, that means that no ancestor crate can
-    // impl this, so it's up to us.
-    if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() {
-        debug!("trait_ref_is_knowable: orphan check passed");
-        return true;
-    }
-
-    // if the trait is not marked fundamental, then it's always possible that
-    // an ancestor crate will impl this in the future, if they haven't
-    // already
-    if
-        trait_ref.def_id.krate != LOCAL_CRATE &&
-        !tcx.has_attr(trait_ref.def_id, "fundamental")
-    {
-        debug!("trait_ref_is_knowable: trait is neither local nor fundamental");
-        return false;
-    }
-
-    // find out when some downstream (or cousin) crate could impl this
-    // trait-ref, presuming that all the parameters were instantiated
-    // with downstream types. If not, then it could only be
-    // implemented by an upstream crate, which means that the impl
-    // must be visible to us, and -- since the trait is fundamental
-    // -- we can test.
-    orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err()
-}
-
-type SubstsFn = for<'a,'tcx> fn(infcx: &InferCtxt<'a, 'tcx>,
-                                span: Span,
-                                impl_def_id: DefId)
-                                -> Substs<'tcx>;
-
-/// Instantiate fresh variables for all bound parameters of the impl
-/// and return the impl trait ref with those variables substituted.
-fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
-                                     impl_def_id: DefId,
-                                     substs_fn: SubstsFn)
-                                     -> (ty::TraitRef<'tcx>,
-                                         Vec<PredicateObligation<'tcx>>)
-{
-    let impl_substs =
-        &substs_fn(selcx.infcx(), DUMMY_SP, impl_def_id);
-    let impl_trait_ref =
-        selcx.tcx().impl_trait_ref(impl_def_id).unwrap();
-    let impl_trait_ref =
-        impl_trait_ref.subst(selcx.tcx(), impl_substs);
-    let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } =
-        project::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref);
-
-    let predicates = selcx.tcx().lookup_predicates(impl_def_id);
-    let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
-    let Normalized { value: predicates, obligations: normalization_obligations2 } =
-        project::normalize(selcx, ObligationCause::dummy(), &predicates);
-    let impl_obligations =
-        util::predicates_for_generics(ObligationCause::dummy(), 0, &predicates);
-
-    let impl_obligations: Vec<_> =
-        impl_obligations.into_iter()
-        .chain(normalization_obligations1)
-        .chain(normalization_obligations2)
-        .collect();
-
-    (impl_trait_ref, impl_obligations)
-}
-
-pub enum OrphanCheckErr<'tcx> {
-    NoLocalInputType,
-    UncoveredTy(Ty<'tcx>),
-}
-
-/// Checks the coherence orphan rules. `impl_def_id` should be the
-/// def-id of a trait impl. To pass, either the trait must be local, or else
-/// two conditions must be satisfied:
-///
-/// 1. All type parameters in `Self` must be "covered" by some local type constructor.
-/// 2. Some local type must appear in `Self`.
-pub fn orphan_check<'tcx>(tcx: &ty::ctxt<'tcx>,
-                          impl_def_id: DefId)
-                          -> Result<(), OrphanCheckErr<'tcx>>
-{
-    debug!("orphan_check({:?})", impl_def_id);
-
-    // We only except this routine to be invoked on implementations
-    // of a trait, not inherent implementations.
-    let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
-    debug!("orphan_check: trait_ref={:?}", trait_ref);
-
-    // If the *trait* is local to the crate, ok.
-    if trait_ref.def_id.is_local() {
-        debug!("trait {:?} is local to current crate",
-               trait_ref.def_id);
-        return Ok(());
-    }
-
-    orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false))
-}
-
-fn orphan_check_trait_ref<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                trait_ref: &ty::TraitRef<'tcx>,
-                                infer_is_local: InferIsLocal)
-                                -> Result<(), OrphanCheckErr<'tcx>>
-{
-    debug!("orphan_check_trait_ref(trait_ref={:?}, infer_is_local={})",
-           trait_ref, infer_is_local.0);
-
-    // First, create an ordered iterator over all the type parameters to the trait, with the self
-    // type appearing first.
-    let input_tys = Some(trait_ref.self_ty());
-    let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace));
-
-    // Find the first input type that either references a type parameter OR
-    // some local type.
-    for input_ty in input_tys {
-        if ty_is_local(tcx, input_ty, infer_is_local) {
-            debug!("orphan_check_trait_ref: ty_is_local `{:?}`", input_ty);
-
-            // First local input type. Check that there are no
-            // uncovered type parameters.
-            let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local);
-            for uncovered_ty in uncovered_tys {
-                if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) {
-                    debug!("orphan_check_trait_ref: uncovered type `{:?}`", param);
-                    return Err(OrphanCheckErr::UncoveredTy(param));
-                }
-            }
-
-            // OK, found local type, all prior types upheld invariant.
-            return Ok(());
-        }
-
-        // Otherwise, enforce invariant that there are no type
-        // parameters reachable.
-        if !infer_is_local.0 {
-            if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) {
-                debug!("orphan_check_trait_ref: uncovered type `{:?}`", param);
-                return Err(OrphanCheckErr::UncoveredTy(param));
-            }
-        }
-    }
-
-    // If we exit above loop, never found a local type.
-    debug!("orphan_check_trait_ref: no local type");
-    return Err(OrphanCheckErr::NoLocalInputType);
-}
-
-fn uncovered_tys<'tcx>(tcx: &ty::ctxt<'tcx>,
-                       ty: Ty<'tcx>,
-                       infer_is_local: InferIsLocal)
-                       -> Vec<Ty<'tcx>>
-{
-    if ty_is_local_constructor(tcx, ty, infer_is_local) {
-        vec![]
-    } else if fundamental_ty(tcx, ty) {
-        ty.walk_shallow()
-          .flat_map(|t| uncovered_tys(tcx, t, infer_is_local))
-          .collect()
-    } else {
-        vec![ty]
-    }
-}
-
-fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool {
-    match ty.sty {
-        // FIXME(#20590) straighten story about projection types
-        ty::TyProjection(..) | ty::TyParam(..) => true,
-        _ => false,
-    }
-}
-
-fn ty_is_local<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool
-{
-    ty_is_local_constructor(tcx, ty, infer_is_local) ||
-        fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local))
-}
-
-fn fundamental_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool
-{
-    match ty.sty {
-        ty::TyBox(..) | ty::TyRef(..) =>
-            true,
-        ty::TyEnum(def, _) | ty::TyStruct(def, _) =>
-            def.is_fundamental(),
-        ty::TyTrait(ref data) =>
-            tcx.has_attr(data.principal_def_id(), "fundamental"),
-        _ =>
-            false
-    }
-}
-
-fn ty_is_local_constructor<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                 ty: Ty<'tcx>,
-                                 infer_is_local: InferIsLocal)
-                                 -> bool
-{
-    debug!("ty_is_local_constructor({:?})", ty);
-
-    match ty.sty {
-        ty::TyBool |
-        ty::TyChar |
-        ty::TyInt(..) |
-        ty::TyUint(..) |
-        ty::TyFloat(..) |
-        ty::TyStr |
-        ty::TyBareFn(..) |
-        ty::TyArray(..) |
-        ty::TySlice(..) |
-        ty::TyRawPtr(..) |
-        ty::TyRef(..) |
-        ty::TyTuple(..) |
-        ty::TyParam(..) |
-        ty::TyProjection(..) => {
-            false
-        }
-
-        ty::TyInfer(..) => {
-            infer_is_local.0
-        }
-
-        ty::TyEnum(def, _) |
-        ty::TyStruct(def, _) => {
-            def.did.is_local()
-        }
-
-        ty::TyBox(_) => { // Box<T>
-            let krate = tcx.lang_items.owned_box().map(|d| d.krate);
-            krate == Some(LOCAL_CRATE)
-        }
-
-        ty::TyTrait(ref tt) => {
-            tt.principal_def_id().is_local()
-        }
-
-        ty::TyError => {
-            true
-        }
-
-        ty::TyClosure(..) => {
-            tcx.sess.bug(
-                &format!("ty_is_local invoked on unexpected type: {:?}",
-                        ty))
-        }
-    }
-}
diff --git a/src/librustc/middle/traits/error_reporting.rs b/src/librustc/middle/traits/error_reporting.rs
deleted file mode 100644 (file)
index fac53ec..0000000
+++ /dev/null
@@ -1,807 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::{
-    FulfillmentError,
-    FulfillmentErrorCode,
-    MismatchedProjectionTypes,
-    Obligation,
-    ObligationCauseCode,
-    OutputTypeParameterMismatch,
-    TraitNotObjectSafe,
-    PredicateObligation,
-    SelectionError,
-    ObjectSafetyViolation,
-    MethodViolationCode,
-    object_safety_violations,
-};
-
-use fmt_macros::{Parser, Piece, Position};
-use middle::def_id::DefId;
-use middle::infer::InferCtxt;
-use middle::ty::{self, ToPredicate, ToPolyTraitRef, TraitRef, Ty, TypeFoldable};
-use middle::ty::fast_reject;
-use util::nodemap::{FnvHashMap, FnvHashSet};
-
-use std::cmp;
-use std::fmt;
-use syntax::attr::{AttributeMethods, AttrMetaMethods};
-use syntax::codemap::Span;
-use syntax::errors::DiagnosticBuilder;
-
-#[derive(Debug, PartialEq, Eq, Hash)]
-pub struct TraitErrorKey<'tcx> {
-    span: Span,
-    predicate: ty::Predicate<'tcx>
-}
-
-impl<'tcx> TraitErrorKey<'tcx> {
-    fn from_error<'a>(infcx: &InferCtxt<'a, 'tcx>,
-                      e: &FulfillmentError<'tcx>) -> Self {
-        let predicate =
-            infcx.resolve_type_vars_if_possible(&e.obligation.predicate);
-        TraitErrorKey {
-            span: e.obligation.cause.span,
-            predicate: infcx.tcx.erase_regions(&predicate)
-        }
-    }
-}
-
-pub fn report_fulfillment_errors<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                           errors: &Vec<FulfillmentError<'tcx>>) {
-    for error in errors {
-        report_fulfillment_error(infcx, error);
-    }
-}
-
-fn report_fulfillment_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                      error: &FulfillmentError<'tcx>) {
-    let error_key = TraitErrorKey::from_error(infcx, error);
-    debug!("report_fulfillment_errors({:?}) - key={:?}",
-           error, error_key);
-    if !infcx.reported_trait_errors.borrow_mut().insert(error_key) {
-        debug!("report_fulfillment_errors: skipping duplicate");
-        return;
-    }
-    match error.code {
-        FulfillmentErrorCode::CodeSelectionError(ref e) => {
-            report_selection_error(infcx, &error.obligation, e);
-        }
-        FulfillmentErrorCode::CodeProjectionError(ref e) => {
-            report_projection_error(infcx, &error.obligation, e);
-        }
-        FulfillmentErrorCode::CodeAmbiguity => {
-            maybe_report_ambiguity(infcx, &error.obligation);
-        }
-    }
-}
-
-pub fn report_projection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                         obligation: &PredicateObligation<'tcx>,
-                                         error: &MismatchedProjectionTypes<'tcx>)
-{
-    let predicate =
-        infcx.resolve_type_vars_if_possible(&obligation.predicate);
-
-    // The TyError created by normalize_to_error can end up being unified
-    // into all obligations: for example, if our obligation is something
-    // like `$X = <() as Foo<$X>>::Out` and () does not implement Foo<_>,
-    // then $X will be unified with TyError, but the error still needs to be
-    // reported.
-    if !infcx.tcx.sess.has_errors() || !predicate.references_error() {
-        let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0271,
-            "type mismatch resolving `{}`: {}",
-            predicate,
-            error.err);
-        note_obligation_cause(infcx, &mut err, obligation);
-        err.emit();
-    }
-}
-
-fn report_on_unimplemented<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                     trait_ref: &TraitRef<'tcx>,
-                                     span: Span) -> Option<String> {
-    let def_id = trait_ref.def_id;
-    let mut report = None;
-    for item in infcx.tcx.get_attrs(def_id).iter() {
-        if item.check_name("rustc_on_unimplemented") {
-            let err_sp = item.meta().span.substitute_dummy(span);
-            let def = infcx.tcx.lookup_trait_def(def_id);
-            let trait_str = def.trait_ref.to_string();
-            if let Some(ref istring) = item.value_str() {
-                let mut generic_map = def.generics.types.iter_enumerated()
-                                         .map(|(param, i, gen)| {
-                                               (gen.name.as_str().to_string(),
-                                                trait_ref.substs.types.get(param, i)
-                                                         .to_string())
-                                              }).collect::<FnvHashMap<String, String>>();
-                generic_map.insert("Self".to_string(),
-                                   trait_ref.self_ty().to_string());
-                let parser = Parser::new(&istring);
-                let mut errored = false;
-                let err: String = parser.filter_map(|p| {
-                    match p {
-                        Piece::String(s) => Some(s),
-                        Piece::NextArgument(a) => match a.position {
-                            Position::ArgumentNamed(s) => match generic_map.get(s) {
-                                Some(val) => Some(val),
-                                None => {
-                                    span_err!(infcx.tcx.sess, err_sp, E0272,
-                                                   "the #[rustc_on_unimplemented] \
-                                                            attribute on \
-                                                            trait definition for {} refers to \
-                                                            non-existent type parameter {}",
-                                                           trait_str, s);
-                                    errored = true;
-                                    None
-                                }
-                            },
-                            _ => {
-                                     span_err!(infcx.tcx.sess, err_sp, E0273,
-                                               "the #[rustc_on_unimplemented] \
-                                                        attribute on \
-                                                        trait definition for {} must have named \
-                                                        format arguments, \
-                                                        eg `#[rustc_on_unimplemented = \
-                                                        \"foo {{T}}\"]`",
-                                                       trait_str);
-                                errored = true;
-                                None
-                            }
-                        }
-                    }
-                }).collect();
-                // Report only if the format string checks out
-                if !errored {
-                    report = Some(err);
-                }
-            } else {
-                span_err!(infcx.tcx.sess, err_sp, E0274,
-                                        "the #[rustc_on_unimplemented] attribute on \
-                                                 trait definition for {} must have a value, \
-                                                 eg `#[rustc_on_unimplemented = \"foo\"]`",
-                                                 trait_str);
-            }
-            break;
-        }
-    }
-    report
-}
-
-/// Reports that an overflow has occurred and halts compilation. We
-/// halt compilation unconditionally because it is important that
-/// overflows never be masked -- they basically represent computations
-/// whose result could not be truly determined and thus we can't say
-/// if the program type checks or not -- and they are unusual
-/// occurrences in any case.
-pub fn report_overflow_error<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>,
-                                          obligation: &Obligation<'tcx, T>,
-                                          suggest_increasing_limit: bool)
-                                          -> !
-    where T: fmt::Display + TypeFoldable<'tcx>
-{
-    let predicate =
-        infcx.resolve_type_vars_if_possible(&obligation.predicate);
-    let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0275,
-                                   "overflow evaluating the requirement `{}`",
-                                   predicate);
-
-    if suggest_increasing_limit {
-        suggest_new_overflow_limit(infcx.tcx, &mut err, obligation.cause.span);
-    }
-
-    note_obligation_cause(infcx, &mut err, obligation);
-
-    err.emit();
-    infcx.tcx.sess.abort_if_errors();
-    unreachable!();
-}
-
-/// Reports that a cycle was detected which led to overflow and halts
-/// compilation. This is equivalent to `report_overflow_error` except
-/// that we can give a more helpful error message (and, in particular,
-/// we do not suggest increasing the overflow limit, which is not
-/// going to help).
-pub fn report_overflow_error_cycle<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                             cycle: &Vec<PredicateObligation<'tcx>>)
-                                             -> !
-{
-    assert!(cycle.len() > 1);
-
-    debug!("report_overflow_error_cycle(cycle length = {})", cycle.len());
-
-    let cycle = infcx.resolve_type_vars_if_possible(cycle);
-
-    debug!("report_overflow_error_cycle: cycle={:?}", cycle);
-
-    assert_eq!(&cycle[0].predicate, &cycle.last().unwrap().predicate);
-
-    try_report_overflow_error_type_of_infinite_size(infcx, &cycle);
-    report_overflow_error(infcx, &cycle[0], false);
-}
-
-/// If a cycle results from evaluated whether something is Sized, that
-/// is a particular special case that always results from a struct or
-/// enum definition that lacks indirection (e.g., `struct Foo { x: Foo
-/// }`). We wish to report a targeted error for this case.
-pub fn try_report_overflow_error_type_of_infinite_size<'a, 'tcx>(
-    infcx: &InferCtxt<'a, 'tcx>,
-    cycle: &[PredicateObligation<'tcx>])
-{
-    let sized_trait = match infcx.tcx.lang_items.sized_trait() {
-        Some(v) => v,
-        None => return,
-    };
-    let top_is_sized = {
-        match cycle[0].predicate {
-            ty::Predicate::Trait(ref data) => data.def_id() == sized_trait,
-            _ => false,
-        }
-    };
-    if !top_is_sized {
-        return;
-    }
-
-    // The only way to have a type of infinite size is to have,
-    // somewhere, a struct/enum type involved. Identify all such types
-    // and report the cycle to the user.
-
-    let struct_enum_tys: Vec<_> =
-        cycle.iter()
-             .flat_map(|obligation| match obligation.predicate {
-                 ty::Predicate::Trait(ref data) => {
-                     assert_eq!(data.def_id(), sized_trait);
-                     let self_ty = data.skip_binder().trait_ref.self_ty(); // (*)
-                     // (*) ok to skip binder because this is just
-                     // error reporting and regions don't really
-                     // matter
-                     match self_ty.sty {
-                         ty::TyEnum(..) | ty::TyStruct(..) => Some(self_ty),
-                         _ => None,
-                     }
-                 }
-                 _ => {
-                     infcx.tcx.sess.span_bug(obligation.cause.span,
-                                             &format!("Sized cycle involving non-trait-ref: {:?}",
-                                                      obligation.predicate));
-                 }
-             })
-             .collect();
-
-    assert!(!struct_enum_tys.is_empty());
-
-    // This is a bit tricky. We want to pick a "main type" in the
-    // listing that is local to the current crate, so we can give a
-    // good span to the user. But it might not be the first one in our
-    // cycle list. So find the first one that is local and then
-    // rotate.
-    let (main_index, main_def_id) =
-        struct_enum_tys.iter()
-                       .enumerate()
-                       .filter_map(|(index, ty)| match ty.sty {
-                           ty::TyEnum(adt_def, _) | ty::TyStruct(adt_def, _)
-                               if adt_def.did.is_local() =>
-                               Some((index, adt_def.did)),
-                           _ =>
-                               None,
-                       })
-                       .next()
-                       .unwrap(); // should always be SOME local type involved!
-
-    // Rotate so that the "main" type is at index 0.
-    let struct_enum_tys: Vec<_> =
-        struct_enum_tys.iter()
-                       .cloned()
-                       .skip(main_index)
-                       .chain(struct_enum_tys.iter().cloned().take(main_index))
-                       .collect();
-
-    let tcx = infcx.tcx;
-    let mut err = recursive_type_with_infinite_size_error(tcx, main_def_id);
-    let len = struct_enum_tys.len();
-    if len > 2 {
-        let span = tcx.map.span_if_local(main_def_id).unwrap();
-        err.fileline_note(span,
-                          &format!("type `{}` is embedded within `{}`...",
-                                   struct_enum_tys[0],
-                                   struct_enum_tys[1]));
-        for &next_ty in &struct_enum_tys[1..len-1] {
-            err.fileline_note(span,
-                              &format!("...which in turn is embedded within `{}`...", next_ty));
-        }
-        err.fileline_note(span,
-                          &format!("...which in turn is embedded within `{}`, \
-                                    completing the cycle.",
-                                   struct_enum_tys[len-1]));
-    }
-    err.emit();
-    infcx.tcx.sess.abort_if_errors();
-    unreachable!();
-}
-
-pub fn recursive_type_with_infinite_size_error<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                                     type_def_id: DefId)
-                                                     -> DiagnosticBuilder<'tcx>
-{
-    assert!(type_def_id.is_local());
-    let span = tcx.map.span_if_local(type_def_id).unwrap();
-    let mut err = struct_span_err!(tcx.sess, span, E0072, "recursive type `{}` has infinite size",
-                                   tcx.item_path_str(type_def_id));
-    err.fileline_help(span, &format!("insert indirection (e.g., a `Box`, `Rc`, or `&`) \
-                                      at some point to make `{}` representable",
-                                     tcx.item_path_str(type_def_id)));
-    err
-}
-
-pub fn report_selection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                        obligation: &PredicateObligation<'tcx>,
-                                        error: &SelectionError<'tcx>)
-{
-    match *error {
-        SelectionError::Unimplemented => {
-            if let ObligationCauseCode::CompareImplMethodObligation = obligation.cause.code {
-                span_err!(
-                    infcx.tcx.sess, obligation.cause.span, E0276,
-                    "the requirement `{}` appears on the impl \
-                     method but not on the corresponding trait method",
-                    obligation.predicate);
-            } else {
-                match obligation.predicate {
-                    ty::Predicate::Trait(ref trait_predicate) => {
-                        let trait_predicate =
-                            infcx.resolve_type_vars_if_possible(trait_predicate);
-
-                        if !infcx.tcx.sess.has_errors() || !trait_predicate.references_error() {
-                            let trait_ref = trait_predicate.to_poly_trait_ref();
-                            let mut err = struct_span_err!(
-                                infcx.tcx.sess, obligation.cause.span, E0277,
-                                "the trait `{}` is not implemented for the type `{}`",
-                                trait_ref, trait_ref.self_ty());
-
-                            // Check if it has a custom "#[rustc_on_unimplemented]"
-                            // error message, report with that message if it does
-                            let custom_note = report_on_unimplemented(infcx, &trait_ref.0,
-                                                                      obligation.cause.span);
-                            if let Some(s) = custom_note {
-                                err.fileline_note(obligation.cause.span, &s);
-                            } else {
-                                let simp = fast_reject::simplify_type(infcx.tcx,
-                                                                      trait_ref.self_ty(),
-                                                                      true);
-                                let mut impl_candidates = Vec::new();
-                                let trait_def = infcx.tcx.lookup_trait_def(trait_ref.def_id());
-
-                                match simp {
-                                    Some(simp) => trait_def.for_each_impl(infcx.tcx, |def_id| {
-                                        let imp = infcx.tcx.impl_trait_ref(def_id).unwrap();
-                                        let imp_simp = fast_reject::simplify_type(infcx.tcx,
-                                                                                  imp.self_ty(),
-                                                                                  true);
-                                        if let Some(imp_simp) = imp_simp {
-                                            if simp != imp_simp {
-                                                return;
-                                            }
-                                        }
-                                        impl_candidates.push(imp);
-                                    }),
-                                    None => trait_def.for_each_impl(infcx.tcx, |def_id| {
-                                        impl_candidates.push(
-                                            infcx.tcx.impl_trait_ref(def_id).unwrap());
-                                    })
-                                };
-
-                                if impl_candidates.len() > 0 {
-                                    err.fileline_help(
-                                        obligation.cause.span,
-                                        &format!("the following implementations were found:"));
-
-                                    let end = cmp::min(4, impl_candidates.len());
-                                    for candidate in &impl_candidates[0..end] {
-                                        err.fileline_help(obligation.cause.span,
-                                                          &format!("  {:?}", candidate));
-                                    }
-                                    if impl_candidates.len() > 4 {
-                                        err.fileline_help(obligation.cause.span,
-                                                          &format!("and {} others",
-                                                                   impl_candidates.len()-4));
-                                    }
-                                }
-                            }
-                            note_obligation_cause(infcx, &mut err, obligation);
-                            err.emit();
-                        }
-                    },
-                    ty::Predicate::Equate(ref predicate) => {
-                        let predicate = infcx.resolve_type_vars_if_possible(predicate);
-                        let err = infcx.equality_predicate(obligation.cause.span,
-                                                           &predicate).err().unwrap();
-                        let mut err = struct_span_err!(
-                            infcx.tcx.sess, obligation.cause.span, E0278,
-                            "the requirement `{}` is not satisfied (`{}`)",
-                            predicate,
-                            err);
-                        note_obligation_cause(infcx, &mut err, obligation);
-                        err.emit();
-                    }
-
-                    ty::Predicate::RegionOutlives(ref predicate) => {
-                        let predicate = infcx.resolve_type_vars_if_possible(predicate);
-                        let err = infcx.region_outlives_predicate(obligation.cause.span,
-                                                                  &predicate).err().unwrap();
-                        let mut err = struct_span_err!(
-                            infcx.tcx.sess, obligation.cause.span, E0279,
-                            "the requirement `{}` is not satisfied (`{}`)",
-                            predicate,
-                            err);
-                        note_obligation_cause(infcx, &mut err, obligation);
-                        err.emit();
-                    }
-
-                    ty::Predicate::Projection(..) | ty::Predicate::TypeOutlives(..) => {
-                        let predicate =
-                            infcx.resolve_type_vars_if_possible(&obligation.predicate);
-                        let mut err = struct_span_err!(
-                            infcx.tcx.sess, obligation.cause.span, E0280,
-                            "the requirement `{}` is not satisfied",
-                            predicate);
-                        note_obligation_cause(infcx, &mut err, obligation);
-                        err.emit();
-                    }
-
-                    ty::Predicate::ObjectSafe(trait_def_id) => {
-                        let violations = object_safety_violations(
-                            infcx.tcx, trait_def_id);
-                        let mut err = report_object_safety_error(infcx.tcx,
-                                                                 obligation.cause.span,
-                                                                 trait_def_id,
-                                                                 violations);
-                        note_obligation_cause(infcx, &mut err, obligation);
-                        err.emit();
-                    }
-
-                    ty::Predicate::WellFormed(ty) => {
-                        // WF predicates cannot themselves make
-                        // errors. They can only block due to
-                        // ambiguity; otherwise, they always
-                        // degenerate into other obligations
-                        // (which may fail).
-                        infcx.tcx.sess.span_bug(
-                            obligation.cause.span,
-                            &format!("WF predicate not satisfied for {:?}", ty));
-                    }
-                }
-            }
-        }
-
-        OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
-            let expected_trait_ref = infcx.resolve_type_vars_if_possible(&*expected_trait_ref);
-            let actual_trait_ref = infcx.resolve_type_vars_if_possible(&*actual_trait_ref);
-            if !actual_trait_ref.self_ty().references_error() {
-                let mut err = struct_span_err!(
-                    infcx.tcx.sess, obligation.cause.span, E0281,
-                    "type mismatch: the type `{}` implements the trait `{}`, \
-                     but the trait `{}` is required ({})",
-                    expected_trait_ref.self_ty(),
-                    expected_trait_ref,
-                    actual_trait_ref,
-                    e);
-                note_obligation_cause(infcx, &mut err, obligation);
-                err.emit();
-            }
-        }
-
-        TraitNotObjectSafe(did) => {
-            let violations = object_safety_violations(infcx.tcx, did);
-            let mut err = report_object_safety_error(infcx.tcx, obligation.cause.span, did,
-                                                     violations);
-            note_obligation_cause(infcx, &mut err, obligation);
-            err.emit();
-        }
-    }
-}
-
-pub fn report_object_safety_error<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                        span: Span,
-                                        trait_def_id: DefId,
-                                        violations: Vec<ObjectSafetyViolation>)
-                                        -> DiagnosticBuilder<'tcx>
-{
-    let mut err = struct_span_err!(
-        tcx.sess, span, E0038,
-        "the trait `{}` cannot be made into an object",
-        tcx.item_path_str(trait_def_id));
-
-    let mut reported_violations = FnvHashSet();
-    for violation in violations {
-        if !reported_violations.insert(violation.clone()) {
-            continue;
-        }
-        match violation {
-            ObjectSafetyViolation::SizedSelf => {
-                err.fileline_note(
-                    span,
-                    "the trait cannot require that `Self : Sized`");
-            }
-
-            ObjectSafetyViolation::SupertraitSelf => {
-                err.fileline_note(
-                    span,
-                    "the trait cannot use `Self` as a type parameter \
-                     in the supertrait listing");
-            }
-
-            ObjectSafetyViolation::Method(method,
-                                          MethodViolationCode::StaticMethod) => {
-                err.fileline_note(
-                    span,
-                    &format!("method `{}` has no receiver",
-                             method.name));
-            }
-
-            ObjectSafetyViolation::Method(method,
-                                          MethodViolationCode::ReferencesSelf) => {
-                err.fileline_note(
-                    span,
-                    &format!("method `{}` references the `Self` type \
-                              in its arguments or return type",
-                             method.name));
-            }
-
-            ObjectSafetyViolation::Method(method,
-                                          MethodViolationCode::Generic) => {
-                err.fileline_note(
-                    span,
-                    &format!("method `{}` has generic type parameters",
-                             method.name));
-            }
-        }
-    }
-    err
-}
-
-pub fn maybe_report_ambiguity<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                        obligation: &PredicateObligation<'tcx>) {
-    // Unable to successfully determine, probably means
-    // insufficient type information, but could mean
-    // ambiguous impls. The latter *ought* to be a
-    // coherence violation, so we don't report it here.
-
-    let predicate = infcx.resolve_type_vars_if_possible(&obligation.predicate);
-
-    debug!("maybe_report_ambiguity(predicate={:?}, obligation={:?})",
-           predicate,
-           obligation);
-
-    match predicate {
-        ty::Predicate::Trait(ref data) => {
-            let trait_ref = data.to_poly_trait_ref();
-            let self_ty = trait_ref.self_ty();
-            let all_types = &trait_ref.substs().types;
-            if all_types.references_error() {
-            } else {
-                // Typically, this ambiguity should only happen if
-                // there are unresolved type inference variables
-                // (otherwise it would suggest a coherence
-                // failure). But given #21974 that is not necessarily
-                // the case -- we can have multiple where clauses that
-                // are only distinguished by a region, which results
-                // in an ambiguity even when all types are fully
-                // known, since we don't dispatch based on region
-                // relationships.
-
-                // This is kind of a hack: it frequently happens that some earlier
-                // error prevents types from being fully inferred, and then we get
-                // a bunch of uninteresting errors saying something like "<generic
-                // #0> doesn't implement Sized".  It may even be true that we
-                // could just skip over all checks where the self-ty is an
-                // inference variable, but I was afraid that there might be an
-                // inference variable created, registered as an obligation, and
-                // then never forced by writeback, and hence by skipping here we'd
-                // be ignoring the fact that we don't KNOW the type works
-                // out. Though even that would probably be harmless, given that
-                // we're only talking about builtin traits, which are known to be
-                // inhabited. But in any case I just threw in this check for
-                // has_errors() to be sure that compilation isn't happening
-                // anyway. In that case, why inundate the user.
-                if !infcx.tcx.sess.has_errors() {
-                    if
-                        infcx.tcx.lang_items.sized_trait()
-                        .map_or(false, |sized_id| sized_id == trait_ref.def_id())
-                    {
-                        need_type_info(infcx, obligation.cause.span, self_ty);
-                    } else {
-                        let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0283,
-                                                       "type annotations required: \
-                                                        cannot resolve `{}`",
-                                                       predicate);
-                        note_obligation_cause(infcx, &mut err, obligation);
-                        err.emit();
-                    }
-                }
-            }
-        }
-
-        ty::Predicate::WellFormed(ty) => {
-            // Same hacky approach as above to avoid deluging user
-            // with error messages.
-            if !ty.references_error() && !infcx.tcx.sess.has_errors() {
-                need_type_info(infcx, obligation.cause.span, ty);
-            }
-        }
-
-        _ => {
-            if !infcx.tcx.sess.has_errors() {
-                let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0284,
-                                               "type annotations required: cannot resolve `{}`",
-                                               predicate);
-                note_obligation_cause(infcx, &mut err, obligation);
-                err.emit();
-            }
-        }
-    }
-}
-
-fn need_type_info<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                            span: Span,
-                            ty: Ty<'tcx>)
-{
-    span_err!(infcx.tcx.sess, span, E0282,
-              "unable to infer enough type information about `{}`; \
-               type annotations or generic parameter binding required",
-              ty);
-}
-
-fn note_obligation_cause<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>,
-                                      err: &mut DiagnosticBuilder,
-                                      obligation: &Obligation<'tcx, T>)
-    where T: fmt::Display
-{
-    note_obligation_cause_code(infcx,
-                               err,
-                               &obligation.predicate,
-                               obligation.cause.span,
-                               &obligation.cause.code);
-}
-
-fn note_obligation_cause_code<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>,
-                                           err: &mut DiagnosticBuilder,
-                                           predicate: &T,
-                                           cause_span: Span,
-                                           cause_code: &ObligationCauseCode<'tcx>)
-    where T: fmt::Display
-{
-    let tcx = infcx.tcx;
-    match *cause_code {
-        ObligationCauseCode::MiscObligation => { }
-        ObligationCauseCode::SliceOrArrayElem => {
-            err.fileline_note(
-                cause_span,
-                "slice and array elements must have `Sized` type");
-        }
-        ObligationCauseCode::ProjectionWf(data) => {
-            err.fileline_note(
-                cause_span,
-                &format!("required so that the projection `{}` is well-formed",
-                         data));
-        }
-        ObligationCauseCode::ReferenceOutlivesReferent(ref_ty) => {
-            err.fileline_note(
-                cause_span,
-                &format!("required so that reference `{}` does not outlive its referent",
-                         ref_ty));
-        }
-        ObligationCauseCode::ItemObligation(item_def_id) => {
-            let item_name = tcx.item_path_str(item_def_id);
-            err.fileline_note(
-                cause_span,
-                &format!("required by `{}`", item_name));
-        }
-        ObligationCauseCode::ObjectCastObligation(object_ty) => {
-            err.fileline_note(
-                cause_span,
-                &format!(
-                    "required for the cast to the object type `{}`",
-                    infcx.ty_to_string(object_ty)));
-        }
-        ObligationCauseCode::RepeatVec => {
-            err.fileline_note(
-                cause_span,
-                "the `Copy` trait is required because the \
-                 repeated element will be copied");
-        }
-        ObligationCauseCode::VariableType(_) => {
-            err.fileline_note(
-                cause_span,
-                "all local variables must have a statically known size");
-        }
-        ObligationCauseCode::ReturnType => {
-            err.fileline_note(
-                cause_span,
-                "the return type of a function must have a \
-                 statically known size");
-        }
-        ObligationCauseCode::AssignmentLhsSized => {
-            err.fileline_note(
-                cause_span,
-                "the left-hand-side of an assignment must have a statically known size");
-        }
-        ObligationCauseCode::StructInitializerSized => {
-            err.fileline_note(
-                cause_span,
-                "structs must have a statically known size to be initialized");
-        }
-        ObligationCauseCode::ClosureCapture(var_id, _, builtin_bound) => {
-            let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
-            let trait_name = tcx.item_path_str(def_id);
-            let name = tcx.local_var_name_str(var_id);
-            err.fileline_note(
-                cause_span,
-                &format!("the closure that captures `{}` requires that all captured variables \
-                          implement the trait `{}`",
-                         name,
-                         trait_name));
-        }
-        ObligationCauseCode::FieldSized => {
-            err.fileline_note(
-                cause_span,
-                "only the last field of a struct or enum variant \
-                 may have a dynamically sized type");
-        }
-        ObligationCauseCode::SharedStatic => {
-            err.fileline_note(
-                cause_span,
-                "shared static variables must have a type that implements `Sync`");
-        }
-        ObligationCauseCode::BuiltinDerivedObligation(ref data) => {
-            let parent_trait_ref = infcx.resolve_type_vars_if_possible(&data.parent_trait_ref);
-            err.fileline_note(
-                cause_span,
-                &format!("required because it appears within the type `{}`",
-                         parent_trait_ref.0.self_ty()));
-            let parent_predicate = parent_trait_ref.to_predicate();
-            note_obligation_cause_code(infcx,
-                                       err,
-                                       &parent_predicate,
-                                       cause_span,
-                                       &data.parent_code);
-        }
-        ObligationCauseCode::ImplDerivedObligation(ref data) => {
-            let parent_trait_ref = infcx.resolve_type_vars_if_possible(&data.parent_trait_ref);
-            err.fileline_note(
-                cause_span,
-                &format!("required because of the requirements on the impl of `{}` for `{}`",
-                         parent_trait_ref,
-                         parent_trait_ref.0.self_ty()));
-            let parent_predicate = parent_trait_ref.to_predicate();
-            note_obligation_cause_code(infcx,
-                                       err,
-                                       &parent_predicate,
-                                       cause_span,
-                                       &data.parent_code);
-        }
-        ObligationCauseCode::CompareImplMethodObligation => {
-            err.fileline_note(
-                cause_span,
-                &format!("the requirement `{}` appears on the impl method \
-                          but not on the corresponding trait method",
-                         predicate));
-        }
-    }
-}
-
-fn suggest_new_overflow_limit(tcx: &ty::ctxt, err:&mut DiagnosticBuilder, span: Span) {
-    let current_limit = tcx.sess.recursion_limit.get();
-    let suggested_limit = current_limit * 2;
-    err.fileline_note(
-        span,
-        &format!(
-            "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
-            suggested_limit));
-}
diff --git a/src/librustc/middle/traits/fulfill.rs b/src/librustc/middle/traits/fulfill.rs
deleted file mode 100644 (file)
index de70cdb..0000000
+++ /dev/null
@@ -1,764 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use dep_graph::DepGraph;
-use middle::infer::InferCtxt;
-use middle::ty::{self, Ty, TypeFoldable, ToPolyTraitRef};
-use rustc_data_structures::obligation_forest::{Backtrace, ObligationForest, Error};
-use std::iter;
-use syntax::ast;
-use util::common::ErrorReported;
-use util::nodemap::{FnvHashMap, FnvHashSet, NodeMap};
-
-use super::CodeAmbiguity;
-use super::CodeProjectionError;
-use super::CodeSelectionError;
-use super::is_object_safe;
-use super::FulfillmentError;
-use super::FulfillmentErrorCode;
-use super::ObligationCause;
-use super::PredicateObligation;
-use super::project;
-use super::report_overflow_error_cycle;
-use super::select::SelectionContext;
-use super::Unimplemented;
-use super::util::predicate_for_builtin_bound;
-
-pub struct GlobalFulfilledPredicates<'tcx> {
-    set: FnvHashSet<ty::PolyTraitPredicate<'tcx>>,
-    dep_graph: DepGraph,
-}
-
-#[derive(Debug)]
-pub struct LocalFulfilledPredicates<'tcx> {
-    set: FnvHashSet<ty::Predicate<'tcx>>
-}
-
-/// The fulfillment context is used to drive trait resolution.  It
-/// consists of a list of obligations that must be (eventually)
-/// satisfied. The job is to track which are satisfied, which yielded
-/// errors, and which are still pending. At any point, users can call
-/// `select_where_possible`, and the fulfilment context will try to do
-/// selection, retaining only those obligations that remain
-/// ambiguous. This may be helpful in pushing type inference
-/// along. Once all type inference constraints have been generated, the
-/// method `select_all_or_error` can be used to report any remaining
-/// ambiguous cases as errors.
-pub struct FulfillmentContext<'tcx> {
-    // a simple cache that aims to cache *exact duplicate obligations*
-    // and avoid adding them twice. This serves a different purpose
-    // than the `SelectionCache`: it avoids duplicate errors and
-    // permits recursive obligations, which are often generated from
-    // traits like `Send` et al.
-    //
-    // Note that because of type inference, a predicate can still
-    // occur twice in the predicates list, for example when 2
-    // initially-distinct type variables are unified after being
-    // inserted. Deduplicating the predicate set on selection had a
-    // significant performance cost the last time I checked.
-    duplicate_set: LocalFulfilledPredicates<'tcx>,
-
-    // A list of all obligations that have been registered with this
-    // fulfillment context.
-    predicates: ObligationForest<PendingPredicateObligation<'tcx>,
-                                 LocalFulfilledPredicates<'tcx>>,
-
-    // A set of constraints that regionck must validate. Each
-    // constraint has the form `T:'a`, meaning "some type `T` must
-    // outlive the lifetime 'a". These constraints derive from
-    // instantiated type parameters. So if you had a struct defined
-    // like
-    //
-    //     struct Foo<T:'static> { ... }
-    //
-    // then in some expression `let x = Foo { ... }` it will
-    // instantiate the type parameter `T` with a fresh type `$0`. At
-    // the same time, it will record a region obligation of
-    // `$0:'static`. This will get checked later by regionck. (We
-    // can't generally check these things right away because we have
-    // to wait until types are resolved.)
-    //
-    // These are stored in a map keyed to the id of the innermost
-    // enclosing fn body / static initializer expression. This is
-    // because the location where the obligation was incurred can be
-    // relevant with respect to which sublifetime assumptions are in
-    // place. The reason that we store under the fn-id, and not
-    // something more fine-grained, is so that it is easier for
-    // regionck to be sure that it has found *all* the region
-    // obligations (otherwise, it's easy to fail to walk to a
-    // particular node-id).
-    region_obligations: NodeMap<Vec<RegionObligation<'tcx>>>,
-}
-
-#[derive(Clone)]
-pub struct RegionObligation<'tcx> {
-    pub sub_region: ty::Region,
-    pub sup_type: Ty<'tcx>,
-    pub cause: ObligationCause<'tcx>,
-}
-
-#[derive(Clone, Debug)]
-pub struct PendingPredicateObligation<'tcx> {
-    pub obligation: PredicateObligation<'tcx>,
-    pub stalled_on: Vec<Ty<'tcx>>,
-}
-
-impl<'tcx> FulfillmentContext<'tcx> {
-    /// Creates a new fulfillment context.
-    pub fn new() -> FulfillmentContext<'tcx> {
-        FulfillmentContext {
-            duplicate_set: LocalFulfilledPredicates::new(),
-            predicates: ObligationForest::new(),
-            region_obligations: NodeMap(),
-        }
-    }
-
-    /// "Normalize" a projection type `<SomeType as SomeTrait>::X` by
-    /// creating a fresh type variable `$0` as well as a projection
-    /// predicate `<SomeType as SomeTrait>::X == $0`. When the
-    /// inference engine runs, it will attempt to find an impl of
-    /// `SomeTrait` or a where clause that lets us unify `$0` with
-    /// something concrete. If this fails, we'll unify `$0` with
-    /// `projection_ty` again.
-    pub fn normalize_projection_type<'a>(&mut self,
-                                         infcx: &InferCtxt<'a,'tcx>,
-                                         projection_ty: ty::ProjectionTy<'tcx>,
-                                         cause: ObligationCause<'tcx>)
-                                         -> Ty<'tcx>
-    {
-        debug!("normalize_projection_type(projection_ty={:?})",
-               projection_ty);
-
-        assert!(!projection_ty.has_escaping_regions());
-
-        // FIXME(#20304) -- cache
-
-        let mut selcx = SelectionContext::new(infcx);
-        let normalized = project::normalize_projection_type(&mut selcx, projection_ty, cause, 0);
-
-        for obligation in normalized.obligations {
-            self.register_predicate_obligation(infcx, obligation);
-        }
-
-        debug!("normalize_projection_type: result={:?}", normalized.value);
-
-        normalized.value
-    }
-
-    pub fn register_builtin_bound<'a>(&mut self,
-                                      infcx: &InferCtxt<'a,'tcx>,
-                                      ty: Ty<'tcx>,
-                                      builtin_bound: ty::BuiltinBound,
-                                      cause: ObligationCause<'tcx>)
-    {
-        match predicate_for_builtin_bound(infcx.tcx, cause, builtin_bound, 0, ty) {
-            Ok(predicate) => {
-                self.register_predicate_obligation(infcx, predicate);
-            }
-            Err(ErrorReported) => { }
-        }
-    }
-
-    pub fn register_region_obligation<'a>(&mut self,
-                                          t_a: Ty<'tcx>,
-                                          r_b: ty::Region,
-                                          cause: ObligationCause<'tcx>)
-    {
-        register_region_obligation(t_a, r_b, cause, &mut self.region_obligations);
-    }
-
-    pub fn register_predicate_obligation<'a>(&mut self,
-                                             infcx: &InferCtxt<'a,'tcx>,
-                                             obligation: PredicateObligation<'tcx>)
-    {
-        // this helps to reduce duplicate errors, as well as making
-        // debug output much nicer to read and so on.
-        let obligation = infcx.resolve_type_vars_if_possible(&obligation);
-
-        assert!(!obligation.has_escaping_regions());
-
-        if self.is_duplicate_or_add(infcx.tcx, &obligation.predicate) {
-            debug!("register_predicate_obligation({:?}) -- already seen, skip", obligation);
-            return;
-        }
-
-        debug!("register_predicate_obligation({:?})", obligation);
-        let obligation = PendingPredicateObligation {
-            obligation: obligation,
-            stalled_on: vec![]
-        };
-        self.predicates.push_tree(obligation, LocalFulfilledPredicates::new());
-    }
-
-    pub fn region_obligations(&self,
-                              body_id: ast::NodeId)
-                              -> &[RegionObligation<'tcx>]
-    {
-        match self.region_obligations.get(&body_id) {
-            None => Default::default(),
-            Some(vec) => vec,
-        }
-    }
-
-    pub fn select_all_or_error<'a>(&mut self,
-                                   infcx: &InferCtxt<'a,'tcx>)
-                                   -> Result<(),Vec<FulfillmentError<'tcx>>>
-    {
-        try!(self.select_where_possible(infcx));
-        let errors: Vec<_> =
-            self.predicates.to_errors(CodeAmbiguity)
-                           .into_iter()
-                           .map(|e| to_fulfillment_error(e))
-                           .collect();
-        if errors.is_empty() {
-            Ok(())
-        } else {
-            Err(errors)
-        }
-    }
-
-    pub fn select_where_possible<'a>(&mut self,
-                                     infcx: &InferCtxt<'a,'tcx>)
-                                     -> Result<(),Vec<FulfillmentError<'tcx>>>
-    {
-        let mut selcx = SelectionContext::new(infcx);
-        self.select(&mut selcx)
-    }
-
-    pub fn pending_obligations(&self) -> Vec<PendingPredicateObligation<'tcx>> {
-        self.predicates.pending_obligations()
-    }
-
-    fn is_duplicate_or_add(&mut self,
-                           tcx: &ty::ctxt<'tcx>,
-                           predicate: &ty::Predicate<'tcx>)
-                           -> bool {
-        // For "global" predicates -- that is, predicates that don't
-        // involve type parameters, inference variables, or regions
-        // other than 'static -- we can check the cache in the tcx,
-        // which allows us to leverage work from other threads. Note
-        // that we don't add anything to this cache yet (unlike the
-        // local cache).  This is because the tcx cache maintains the
-        // invariant that it only contains things that have been
-        // proven, and we have not yet proven that `predicate` holds.
-        if tcx.fulfilled_predicates.borrow().check_duplicate(predicate) {
-            return true;
-        }
-
-        // If `predicate` is not global, or not present in the tcx
-        // cache, we can still check for it in our local cache and add
-        // it if not present. Note that if we find this predicate in
-        // the local cache we can stop immediately, without reporting
-        // any errors, even though we don't know yet if it is
-        // true. This is because, while we don't yet know if the
-        // predicate holds, we know that this same fulfillment context
-        // already is in the process of finding out.
-        self.duplicate_set.is_duplicate_or_add(predicate)
-    }
-
-    /// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it
-    /// only attempts to select obligations that haven't been seen before.
-    fn select<'a>(&mut self,
-                  selcx: &mut SelectionContext<'a, 'tcx>)
-                  -> Result<(),Vec<FulfillmentError<'tcx>>>
-    {
-        debug!("select(obligation-forest-size={})", self.predicates.len());
-
-        let mut errors = Vec::new();
-
-        loop {
-            debug!("select: starting another iteration");
-
-            // Process pending obligations.
-            let outcome = {
-                let region_obligations = &mut self.region_obligations;
-                self.predicates.process_obligations(
-                    |obligation, tree, backtrace| process_predicate(selcx,
-                                                                     tree,
-                                                                     obligation,
-                                                                     backtrace,
-                                                                     region_obligations))
-            };
-
-            debug!("select: outcome={:?}", outcome);
-
-            // these are obligations that were proven to be true.
-            for pending_obligation in outcome.completed {
-                let predicate = &pending_obligation.obligation.predicate;
-                selcx.tcx().fulfilled_predicates.borrow_mut().add_if_global(predicate);
-            }
-
-            errors.extend(
-                outcome.errors.into_iter()
-                              .map(|e| to_fulfillment_error(e)));
-
-            // If nothing new was added, no need to keep looping.
-            if outcome.stalled {
-                break;
-            }
-        }
-
-        debug!("select({} predicates remaining, {} errors) done",
-               self.predicates.len(), errors.len());
-
-        if errors.is_empty() {
-            Ok(())
-        } else {
-            Err(errors)
-        }
-    }
-}
-
-/// Like `process_predicate1`, but wrap result into a pending predicate.
-fn process_predicate<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
-                              tree_cache: &mut LocalFulfilledPredicates<'tcx>,
-                              pending_obligation: &mut PendingPredicateObligation<'tcx>,
-                              mut backtrace: Backtrace<PendingPredicateObligation<'tcx>>,
-                              region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>)
-                              -> Result<Option<Vec<PendingPredicateObligation<'tcx>>>,
-                                        FulfillmentErrorCode<'tcx>>
-{
-    match process_predicate1(selcx, pending_obligation, backtrace.clone(), region_obligations) {
-        Ok(Some(v)) => {
-            // FIXME(#30977) The code below is designed to detect (and
-            // permit) DAGs, while still ensuring that the reasoning
-            // is acyclic. However, it does a few things
-            // suboptimally. For example, it refreshes type variables
-            // a lot, probably more than needed, but also less than
-            // you might want.
-            //
-            //   - more than needed: I want to be very sure we don't
-            //     accidentally treat a cycle as a DAG, so I am
-            //     refreshing type variables as we walk the ancestors;
-            //     but we are going to repeat this a lot, which is
-            //     sort of silly, and it would be nicer to refresh
-            //     them *in place* so that later predicate processing
-            //     can benefit from the same work;
-            //   - less than you might want: we only add items in the cache here,
-            //     but maybe we learn more about type variables and could add them into
-            //     the cache later on.
-
-            let tcx = selcx.tcx();
-
-            // Compute a little FnvHashSet for the ancestors. We only
-            // do this the first time that we care.
-            let mut cache = None;
-            let mut is_ancestor = |predicate: &ty::Predicate<'tcx>| {
-                if cache.is_none() {
-                    let mut c = FnvHashSet();
-                    for ancestor in backtrace.by_ref() {
-                        // Ugh. This just feels ridiculously
-                        // inefficient.  But we need to compare
-                        // predicates without being concerned about
-                        // the vagaries of type inference, so for now
-                        // just ensure that they are always
-                        // up-to-date. (I suppose we could just use a
-                        // snapshot and check if they are unifiable?)
-                        let resolved_predicate =
-                            selcx.infcx().resolve_type_vars_if_possible(
-                                &ancestor.obligation.predicate);
-                        c.insert(resolved_predicate);
-                    }
-                    cache = Some(c);
-                }
-
-                cache.as_ref().unwrap().contains(predicate)
-            };
-
-            let pending_predicate_obligations: Vec<_> =
-                v.into_iter()
-                 .filter_map(|obligation| {
-                     // Probably silly, but remove any inference
-                     // variables. This is actually crucial to the
-                     // ancestor check below, but it's not clear that
-                     // it makes sense to ALWAYS do it.
-                     let obligation = selcx.infcx().resolve_type_vars_if_possible(&obligation);
-
-                     // Screen out obligations that we know globally
-                     // are true. This should really be the DAG check
-                     // mentioned above.
-                     if tcx.fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) {
-                         return None;
-                     }
-
-                     // Check whether this obligation appears somewhere else in the tree.
-                     if tree_cache.is_duplicate_or_add(&obligation.predicate) {
-                         // If the obligation appears as a parent,
-                         // allow it, because that is a cycle.
-                         // Otherwise though we can just ignore
-                         // it. Note that we have to be careful around
-                         // inference variables here -- for the
-                         // purposes of the ancestor check, we retain
-                         // the invariant that all type variables are
-                         // fully refreshed.
-                         if !(&mut is_ancestor)(&obligation.predicate) {
-                             return None;
-                         }
-                     }
-
-                     Some(PendingPredicateObligation {
-                         obligation: obligation,
-                         stalled_on: vec![]
-                     })
-                 })
-                 .collect();
-
-            Ok(Some(pending_predicate_obligations))
-        }
-        Ok(None) => Ok(None),
-        Err(e) => Err(e)
-    }
-}
-
-
-/// Return the set of type variables contained in a trait ref
-fn trait_ref_type_vars<'a, 'tcx>(selcx: &mut SelectionContext<'a, 'tcx>,
-                                 t: ty::PolyTraitRef<'tcx>) -> Vec<Ty<'tcx>>
-{
-    t.skip_binder() // ok b/c this check doesn't care about regions
-     .input_types()
-     .iter()
-     .map(|t| selcx.infcx().resolve_type_vars_if_possible(t))
-     .filter(|t| t.has_infer_types())
-     .flat_map(|t| t.walk())
-     .filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false })
-     .collect()
-}
-
-/// Processes a predicate obligation and returns either:
-/// - `Ok(Some(v))` if the predicate is true, presuming that `v` are also true
-/// - `Ok(None)` if we don't have enough info to be sure
-/// - `Err` if the predicate does not hold
-fn process_predicate1<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
-                               pending_obligation: &mut PendingPredicateObligation<'tcx>,
-                               backtrace: Backtrace<PendingPredicateObligation<'tcx>>,
-                               region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>)
-                               -> Result<Option<Vec<PredicateObligation<'tcx>>>,
-                                         FulfillmentErrorCode<'tcx>>
-{
-    // if we were stalled on some unresolved variables, first check
-    // whether any of them have been resolved; if not, don't bother
-    // doing more work yet
-    if !pending_obligation.stalled_on.is_empty() {
-        if pending_obligation.stalled_on.iter().all(|&ty| {
-            let resolved_ty = selcx.infcx().shallow_resolve(&ty);
-            resolved_ty == ty // nothing changed here
-        }) {
-            debug!("process_predicate: pending obligation {:?} still stalled on {:?}",
-                   selcx.infcx().resolve_type_vars_if_possible(&pending_obligation.obligation),
-                   pending_obligation.stalled_on);
-            return Ok(None);
-        }
-        pending_obligation.stalled_on = vec![];
-    }
-
-    let obligation = &mut pending_obligation.obligation;
-
-    // If we exceed the recursion limit, take a moment to look for a
-    // cycle so we can give a better error report from here, where we
-    // have more context.
-    let recursion_limit = selcx.tcx().sess.recursion_limit.get();
-    if obligation.recursion_depth >= recursion_limit {
-        if let Some(cycle) = scan_for_cycle(obligation, &backtrace) {
-            report_overflow_error_cycle(selcx.infcx(), &cycle);
-        }
-    }
-
-    if obligation.predicate.has_infer_types() {
-        obligation.predicate = selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate);
-    }
-
-    match obligation.predicate {
-        ty::Predicate::Trait(ref data) => {
-            if selcx.tcx().fulfilled_predicates.borrow().check_duplicate_trait(data) {
-                return Ok(Some(vec![]));
-            }
-
-            if coinductive_match(selcx, obligation, data, &backtrace) {
-                return Ok(Some(vec![]));
-            }
-
-            let trait_obligation = obligation.with(data.clone());
-            match selcx.select(&trait_obligation) {
-                Ok(Some(vtable)) => {
-                    info!("selecting trait `{:?}` at depth {} yielded Ok(Some)",
-                          data, obligation.recursion_depth);
-                    Ok(Some(vtable.nested_obligations()))
-                }
-                Ok(None) => {
-                    info!("selecting trait `{:?}` at depth {} yielded Ok(None)",
-                          data, obligation.recursion_depth);
-
-                    // This is a bit subtle: for the most part, the
-                    // only reason we can fail to make progress on
-                    // trait selection is because we don't have enough
-                    // information about the types in the trait. One
-                    // exception is that we sometimes haven't decided
-                    // what kind of closure a closure is. *But*, in
-                    // that case, it turns out, the type of the
-                    // closure will also change, because the closure
-                    // also includes references to its upvars as part
-                    // of its type, and those types are resolved at
-                    // the same time.
-                    pending_obligation.stalled_on =
-                        trait_ref_type_vars(selcx, data.to_poly_trait_ref());
-
-                    debug!("process_predicate: pending obligation {:?} now stalled on {:?}",
-                           selcx.infcx().resolve_type_vars_if_possible(obligation),
-                           pending_obligation.stalled_on);
-
-                    Ok(None)
-                }
-                Err(selection_err) => {
-                    info!("selecting trait `{:?}` at depth {} yielded Err",
-                          data, obligation.recursion_depth);
-                    Err(CodeSelectionError(selection_err))
-                }
-            }
-        }
-
-        ty::Predicate::Equate(ref binder) => {
-            match selcx.infcx().equality_predicate(obligation.cause.span, binder) {
-                Ok(()) => Ok(Some(Vec::new())),
-                Err(_) => Err(CodeSelectionError(Unimplemented)),
-            }
-        }
-
-        ty::Predicate::RegionOutlives(ref binder) => {
-            match selcx.infcx().region_outlives_predicate(obligation.cause.span, binder) {
-                Ok(()) => Ok(Some(Vec::new())),
-                Err(_) => Err(CodeSelectionError(Unimplemented)),
-            }
-        }
-
-        ty::Predicate::TypeOutlives(ref binder) => {
-            // Check if there are higher-ranked regions.
-            match selcx.tcx().no_late_bound_regions(binder) {
-                // If there are, inspect the underlying type further.
-                None => {
-                    // Convert from `Binder<OutlivesPredicate<Ty, Region>>` to `Binder<Ty>`.
-                    let binder = binder.map_bound_ref(|pred| pred.0);
-
-                    // Check if the type has any bound regions.
-                    match selcx.tcx().no_late_bound_regions(&binder) {
-                        // If so, this obligation is an error (for now). Eventually we should be
-                        // able to support additional cases here, like `for<'a> &'a str: 'a`.
-                        None => {
-                            Err(CodeSelectionError(Unimplemented))
-                        }
-                        // Otherwise, we have something of the form
-                        // `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`.
-                        Some(t_a) => {
-                            register_region_obligation(t_a, ty::ReStatic,
-                                                       obligation.cause.clone(),
-                                                       region_obligations);
-                            Ok(Some(vec![]))
-                        }
-                    }
-                }
-                // If there aren't, register the obligation.
-                Some(ty::OutlivesPredicate(t_a, r_b)) => {
-                    register_region_obligation(t_a, r_b,
-                                               obligation.cause.clone(),
-                                               region_obligations);
-                    Ok(Some(vec![]))
-                }
-            }
-        }
-
-        ty::Predicate::Projection(ref data) => {
-            let project_obligation = obligation.with(data.clone());
-            match project::poly_project_and_unify_type(selcx, &project_obligation) {
-                Ok(None) => {
-                    pending_obligation.stalled_on =
-                        trait_ref_type_vars(selcx, data.to_poly_trait_ref());
-                    Ok(None)
-                }
-                Ok(v) => Ok(v),
-                Err(e) => Err(CodeProjectionError(e))
-            }
-        }
-
-        ty::Predicate::ObjectSafe(trait_def_id) => {
-            if !is_object_safe(selcx.tcx(), trait_def_id) {
-                Err(CodeSelectionError(Unimplemented))
-            } else {
-                Ok(Some(Vec::new()))
-            }
-        }
-
-        ty::Predicate::WellFormed(ty) => {
-            match ty::wf::obligations(selcx.infcx(), obligation.cause.body_id,
-                                      ty, obligation.cause.span) {
-                None => {
-                    pending_obligation.stalled_on = vec![ty];
-                    Ok(None)
-                }
-                s => Ok(s)
-            }
-        }
-    }
-}
-
-/// For defaulted traits, we use a co-inductive strategy to solve, so
-/// that recursion is ok. This routine returns true if the top of the
-/// stack (`top_obligation` and `top_data`):
-/// - is a defaulted trait, and
-/// - it also appears in the backtrace at some position `X`; and,
-/// - all the predicates at positions `X..` between `X` an the top are
-///   also defaulted traits.
-fn coinductive_match<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
-                              top_obligation: &PredicateObligation<'tcx>,
-                              top_data: &ty::PolyTraitPredicate<'tcx>,
-                              backtrace: &Backtrace<PendingPredicateObligation<'tcx>>)
-                              -> bool
-{
-    if selcx.tcx().trait_has_default_impl(top_data.def_id()) {
-        debug!("coinductive_match: top_data={:?}", top_data);
-        for bt_obligation in backtrace.clone() {
-            debug!("coinductive_match: bt_obligation={:?}", bt_obligation);
-
-            // *Everything* in the backtrace must be a defaulted trait.
-            match bt_obligation.obligation.predicate {
-                ty::Predicate::Trait(ref data) => {
-                    if !selcx.tcx().trait_has_default_impl(data.def_id()) {
-                        debug!("coinductive_match: trait does not have default impl");
-                        break;
-                    }
-                }
-                _ => { break; }
-            }
-
-            // And we must find a recursive match.
-            if bt_obligation.obligation.predicate == top_obligation.predicate {
-                debug!("coinductive_match: found a match in the backtrace");
-                return true;
-            }
-        }
-    }
-
-    false
-}
-
-fn scan_for_cycle<'a,'tcx>(top_obligation: &PredicateObligation<'tcx>,
-                           backtrace: &Backtrace<PendingPredicateObligation<'tcx>>)
-                           -> Option<Vec<PredicateObligation<'tcx>>>
-{
-    let mut map = FnvHashMap();
-    let all_obligations =
-        || iter::once(top_obligation)
-               .chain(backtrace.clone()
-                               .map(|p| &p.obligation));
-    for (index, bt_obligation) in all_obligations().enumerate() {
-        if let Some(&start) = map.get(&bt_obligation.predicate) {
-            // Found a cycle starting at position `start` and running
-            // until the current position (`index`).
-            return Some(all_obligations().skip(start).take(index - start + 1).cloned().collect());
-        } else {
-            map.insert(bt_obligation.predicate.clone(), index);
-        }
-    }
-    None
-}
-
-fn register_region_obligation<'tcx>(t_a: Ty<'tcx>,
-                                    r_b: ty::Region,
-                                    cause: ObligationCause<'tcx>,
-                                    region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>)
-{
-    let region_obligation = RegionObligation { sup_type: t_a,
-                                               sub_region: r_b,
-                                               cause: cause };
-
-    debug!("register_region_obligation({:?}, cause={:?})",
-           region_obligation, region_obligation.cause);
-
-    region_obligations.entry(region_obligation.cause.body_id)
-                      .or_insert(vec![])
-                      .push(region_obligation);
-
-}
-
-impl<'tcx> LocalFulfilledPredicates<'tcx> {
-    pub fn new() -> LocalFulfilledPredicates<'tcx> {
-        LocalFulfilledPredicates {
-            set: FnvHashSet()
-        }
-    }
-
-    fn is_duplicate_or_add(&mut self, key: &ty::Predicate<'tcx>) -> bool {
-        // For a `LocalFulfilledPredicates`, if we find a match, we
-        // don't need to add a read edge to the dep-graph. This is
-        // because it means that the predicate has already been
-        // considered by this `FulfillmentContext`, and hence the
-        // containing task will already have an edge. (Here we are
-        // assuming each `FulfillmentContext` only gets used from one
-        // task; but to do otherwise makes no sense)
-        !self.set.insert(key.clone())
-    }
-}
-
-impl<'tcx> GlobalFulfilledPredicates<'tcx> {
-    pub fn new(dep_graph: DepGraph) -> GlobalFulfilledPredicates<'tcx> {
-        GlobalFulfilledPredicates {
-            set: FnvHashSet(),
-            dep_graph: dep_graph,
-        }
-    }
-
-    pub fn check_duplicate(&self, key: &ty::Predicate<'tcx>) -> bool {
-        if let ty::Predicate::Trait(ref data) = *key {
-            self.check_duplicate_trait(data)
-        } else {
-            false
-        }
-    }
-
-    pub fn check_duplicate_trait(&self, data: &ty::PolyTraitPredicate<'tcx>) -> bool {
-        // For the global predicate registry, when we find a match, it
-        // may have been computed by some other task, so we want to
-        // add a read from the node corresponding to the predicate
-        // processing to make sure we get the transitive dependencies.
-        if self.set.contains(data) {
-            debug_assert!(data.is_global());
-            self.dep_graph.read(data.dep_node());
-            debug!("check_duplicate: global predicate `{:?}` already proved elsewhere", data);
-
-            info!("check_duplicate_trait hit: `{:?}`", data);
-
-            true
-        } else {
-            false
-        }
-    }
-
-    fn add_if_global(&mut self, key: &ty::Predicate<'tcx>) {
-        if let ty::Predicate::Trait(ref data) = *key {
-            // We only add things to the global predicate registry
-            // after the current task has proved them, and hence
-            // already has the required read edges, so we don't need
-            // to add any more edges here.
-            if data.is_global() {
-                if self.set.insert(data.clone()) {
-                    debug!("add_if_global: global predicate `{:?}` added", data);
-                    info!("check_duplicate_trait entry: `{:?}`", data);
-                }
-            }
-        }
-    }
-}
-
-fn to_fulfillment_error<'tcx>(
-    error: Error<PendingPredicateObligation<'tcx>, FulfillmentErrorCode<'tcx>>)
-    -> FulfillmentError<'tcx>
-{
-    let obligation = error.backtrace.into_iter().next().unwrap().obligation;
-    FulfillmentError::new(obligation, error.error)
-}
diff --git a/src/librustc/middle/traits/mod.rs b/src/librustc/middle/traits/mod.rs
deleted file mode 100644 (file)
index f0ff038..0000000
+++ /dev/null
@@ -1,621 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Trait Resolution. See the Book for more.
-
-pub use self::SelectionError::*;
-pub use self::FulfillmentErrorCode::*;
-pub use self::Vtable::*;
-pub use self::ObligationCauseCode::*;
-
-use middle::def_id::DefId;
-use middle::free_region::FreeRegionMap;
-use middle::subst;
-use middle::ty::{self, Ty, TypeFoldable};
-use middle::infer::{self, fixup_err_to_string, InferCtxt};
-
-use std::rc::Rc;
-use syntax::ast;
-use syntax::codemap::{Span, DUMMY_SP};
-
-pub use self::error_reporting::TraitErrorKey;
-pub use self::error_reporting::recursive_type_with_infinite_size_error;
-pub use self::error_reporting::report_fulfillment_errors;
-pub use self::error_reporting::report_overflow_error;
-pub use self::error_reporting::report_overflow_error_cycle;
-pub use self::error_reporting::report_selection_error;
-pub use self::error_reporting::report_object_safety_error;
-pub use self::coherence::orphan_check;
-pub use self::coherence::overlapping_impls;
-pub use self::coherence::OrphanCheckErr;
-pub use self::fulfill::{FulfillmentContext, GlobalFulfilledPredicates, RegionObligation};
-pub use self::project::MismatchedProjectionTypes;
-pub use self::project::normalize;
-pub use self::project::Normalized;
-pub use self::object_safety::is_object_safe;
-pub use self::object_safety::astconv_object_safety_violations;
-pub use self::object_safety::object_safety_violations;
-pub use self::object_safety::ObjectSafetyViolation;
-pub use self::object_safety::MethodViolationCode;
-pub use self::object_safety::is_vtable_safe_method;
-pub use self::select::EvaluationCache;
-pub use self::select::SelectionContext;
-pub use self::select::SelectionCache;
-pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch};
-pub use self::select::{MethodMatchedData}; // intentionally don't export variants
-pub use self::util::elaborate_predicates;
-pub use self::util::get_vtable_index_of_object_method;
-pub use self::util::trait_ref_for_builtin_bound;
-pub use self::util::predicate_for_trait_def;
-pub use self::util::supertraits;
-pub use self::util::Supertraits;
-pub use self::util::supertrait_def_ids;
-pub use self::util::SupertraitDefIds;
-pub use self::util::transitive_bounds;
-pub use self::util::upcast;
-
-mod coherence;
-mod error_reporting;
-mod fulfill;
-mod project;
-mod object_safety;
-mod select;
-mod structural_impls;
-mod util;
-
-/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for
-/// which the vtable must be found.  The process of finding a vtable is
-/// called "resolving" the `Obligation`. This process consists of
-/// either identifying an `impl` (e.g., `impl Eq for int`) that
-/// provides the required vtable, or else finding a bound that is in
-/// scope. The eventual result is usually a `Selection` (defined below).
-#[derive(Clone, PartialEq, Eq)]
-pub struct Obligation<'tcx, T> {
-    pub cause: ObligationCause<'tcx>,
-    pub recursion_depth: usize,
-    pub predicate: T,
-}
-
-pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>;
-pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>;
-
-/// Why did we incur this obligation? Used for error reporting.
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub struct ObligationCause<'tcx> {
-    pub span: Span,
-
-    // The id of the fn body that triggered this obligation. This is
-    // used for region obligations to determine the precise
-    // environment in which the region obligation should be evaluated
-    // (in particular, closures can add new assumptions). See the
-    // field `region_obligations` of the `FulfillmentContext` for more
-    // information.
-    pub body_id: ast::NodeId,
-
-    pub code: ObligationCauseCode<'tcx>
-}
-
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub enum ObligationCauseCode<'tcx> {
-    /// Not well classified or should be obvious from span.
-    MiscObligation,
-
-    /// This is the trait reference from the given projection
-    SliceOrArrayElem,
-
-    /// This is the trait reference from the given projection
-    ProjectionWf(ty::ProjectionTy<'tcx>),
-
-    /// In an impl of trait X for type Y, type Y must
-    /// also implement all supertraits of X.
-    ItemObligation(DefId),
-
-    /// A type like `&'a T` is WF only if `T: 'a`.
-    ReferenceOutlivesReferent(Ty<'tcx>),
-
-    /// Obligation incurred due to an object cast.
-    ObjectCastObligation(/* Object type */ Ty<'tcx>),
-
-    /// Various cases where expressions must be sized/copy/etc:
-    AssignmentLhsSized,        // L = X implies that L is Sized
-    StructInitializerSized,    // S { ... } must be Sized
-    VariableType(ast::NodeId), // Type of each variable must be Sized
-    ReturnType,                // Return type must be Sized
-    RepeatVec,                 // [T,..n] --> T must be Copy
-
-    // Captures of variable the given id by a closure (span is the
-    // span of the closure)
-    ClosureCapture(ast::NodeId, Span, ty::BuiltinBound),
-
-    // Types of fields (other than the last) in a struct must be sized.
-    FieldSized,
-
-    // static items must have `Sync` type
-    SharedStatic,
-
-    BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
-
-    ImplDerivedObligation(DerivedObligationCause<'tcx>),
-
-    CompareImplMethodObligation,
-}
-
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub struct DerivedObligationCause<'tcx> {
-    /// The trait reference of the parent obligation that led to the
-    /// current obligation. Note that only trait obligations lead to
-    /// derived obligations, so we just store the trait reference here
-    /// directly.
-    parent_trait_ref: ty::PolyTraitRef<'tcx>,
-
-    /// The parent trait had this cause
-    parent_code: Rc<ObligationCauseCode<'tcx>>
-}
-
-pub type Obligations<'tcx, O> = Vec<Obligation<'tcx, O>>;
-pub type PredicateObligations<'tcx> = Vec<PredicateObligation<'tcx>>;
-pub type TraitObligations<'tcx> = Vec<TraitObligation<'tcx>>;
-
-pub type Selection<'tcx> = Vtable<'tcx, PredicateObligation<'tcx>>;
-
-#[derive(Clone,Debug)]
-pub enum SelectionError<'tcx> {
-    Unimplemented,
-    OutputTypeParameterMismatch(ty::PolyTraitRef<'tcx>,
-                                ty::PolyTraitRef<'tcx>,
-                                ty::error::TypeError<'tcx>),
-    TraitNotObjectSafe(DefId),
-}
-
-pub struct FulfillmentError<'tcx> {
-    pub obligation: PredicateObligation<'tcx>,
-    pub code: FulfillmentErrorCode<'tcx>
-}
-
-#[derive(Clone)]
-pub enum FulfillmentErrorCode<'tcx> {
-    CodeSelectionError(SelectionError<'tcx>),
-    CodeProjectionError(MismatchedProjectionTypes<'tcx>),
-    CodeAmbiguity,
-}
-
-/// When performing resolution, it is typically the case that there
-/// can be one of three outcomes:
-///
-/// - `Ok(Some(r))`: success occurred with result `r`
-/// - `Ok(None)`: could not definitely determine anything, usually due
-///   to inconclusive type inference.
-/// - `Err(e)`: error `e` occurred
-pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
-
-/// Given the successful resolution of an obligation, the `Vtable`
-/// indicates where the vtable comes from. Note that while we call this
-/// a "vtable", it does not necessarily indicate dynamic dispatch at
-/// runtime. `Vtable` instances just tell the compiler where to find
-/// methods, but in generic code those methods are typically statically
-/// dispatched -- only when an object is constructed is a `Vtable`
-/// instance reified into an actual vtable.
-///
-/// For example, the vtable may be tied to a specific impl (case A),
-/// or it may be relative to some bound that is in scope (case B).
-///
-///
-/// ```
-/// impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
-/// impl<T:Clone> Clone<T> for Box<T> { ... }    // Impl_2
-/// impl Clone for int { ... }             // Impl_3
-///
-/// fn foo<T:Clone>(concrete: Option<Box<int>>,
-///                 param: T,
-///                 mixed: Option<T>) {
-///
-///    // Case A: Vtable points at a specific impl. Only possible when
-///    // type is concretely known. If the impl itself has bounded
-///    // type parameters, Vtable will carry resolutions for those as well:
-///    concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
-///
-///    // Case B: Vtable must be provided by caller. This applies when
-///    // type is a type parameter.
-///    param.clone();    // VtableParam
-///
-///    // Case C: A mix of cases A and B.
-///    mixed.clone();    // Vtable(Impl_1, [VtableParam])
-/// }
-/// ```
-///
-/// ### The type parameter `N`
-///
-/// See explanation on `VtableImplData`.
-#[derive(Clone)]
-pub enum Vtable<'tcx, N> {
-    /// Vtable identifying a particular impl.
-    VtableImpl(VtableImplData<'tcx, N>),
-
-    /// Vtable for default trait implementations
-    /// This carries the information and nested obligations with regards
-    /// to a default implementation for a trait `Trait`. The nested obligations
-    /// ensure the trait implementation holds for all the constituent types.
-    VtableDefaultImpl(VtableDefaultImplData<N>),
-
-    /// Successful resolution to an obligation provided by the caller
-    /// for some type parameter. The `Vec<N>` represents the
-    /// obligations incurred from normalizing the where-clause (if
-    /// any).
-    VtableParam(Vec<N>),
-
-    /// Virtual calls through an object
-    VtableObject(VtableObjectData<'tcx>),
-
-    /// Successful resolution for a builtin trait.
-    VtableBuiltin(VtableBuiltinData<N>),
-
-    /// Vtable automatically generated for a closure. The def ID is the ID
-    /// of the closure expression. This is a `VtableImpl` in spirit, but the
-    /// impl is generated by the compiler and does not appear in the source.
-    VtableClosure(VtableClosureData<'tcx, N>),
-
-    /// Same as above, but for a fn pointer type with the given signature.
-    VtableFnPointer(ty::Ty<'tcx>),
-}
-
-/// Identifies a particular impl in the source, along with a set of
-/// substitutions from the impl's type/lifetime parameters. The
-/// `nested` vector corresponds to the nested obligations attached to
-/// the impl's type parameters.
-///
-/// The type parameter `N` indicates the type used for "nested
-/// obligations" that are required by the impl. During type check, this
-/// is `Obligation`, as one might expect. During trans, however, this
-/// is `()`, because trans only requires a shallow resolution of an
-/// impl, and nested obligations are satisfied later.
-#[derive(Clone, PartialEq, Eq)]
-pub struct VtableImplData<'tcx, N> {
-    pub impl_def_id: DefId,
-    pub substs: subst::Substs<'tcx>,
-    pub nested: Vec<N>
-}
-
-#[derive(Clone, PartialEq, Eq)]
-pub struct VtableClosureData<'tcx, N> {
-    pub closure_def_id: DefId,
-    pub substs: ty::ClosureSubsts<'tcx>,
-    /// Nested obligations. This can be non-empty if the closure
-    /// signature contains associated types.
-    pub nested: Vec<N>
-}
-
-#[derive(Clone)]
-pub struct VtableDefaultImplData<N> {
-    pub trait_def_id: DefId,
-    pub nested: Vec<N>
-}
-
-#[derive(Clone)]
-pub struct VtableBuiltinData<N> {
-    pub nested: Vec<N>
-}
-
-/// A vtable for some object-safe trait `Foo` automatically derived
-/// for the object type `Foo`.
-#[derive(PartialEq,Eq,Clone)]
-pub struct VtableObjectData<'tcx> {
-    /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
-    pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
-
-    /// The vtable is formed by concatenating together the method lists of
-    /// the base object trait and all supertraits; this is the start of
-    /// `upcast_trait_ref`'s methods in that vtable.
-    pub vtable_base: usize
-}
-
-/// Creates predicate obligations from the generic bounds.
-pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>,
-                                     generic_bounds: &ty::InstantiatedPredicates<'tcx>)
-                                     -> PredicateObligations<'tcx>
-{
-    util::predicates_for_generics(cause, 0, generic_bounds)
-}
-
-/// Determines whether the type `ty` is known to meet `bound` and
-/// returns true if so. Returns false if `ty` either does not meet
-/// `bound` or is not known to meet bound (note that this is
-/// conservative towards *no impl*, which is the opposite of the
-/// `evaluate` methods).
-pub fn type_known_to_meet_builtin_bound<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
-                                                 ty: Ty<'tcx>,
-                                                 bound: ty::BuiltinBound,
-                                                 span: Span)
-                                                 -> bool
-{
-    debug!("type_known_to_meet_builtin_bound(ty={:?}, bound={:?})",
-           ty,
-           bound);
-
-    let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID);
-    let obligation =
-        util::predicate_for_builtin_bound(infcx.tcx, cause, bound, 0, ty);
-    let obligation = match obligation {
-        Ok(o) => o,
-        Err(..) => return false
-    };
-    let result = SelectionContext::new(infcx)
-        .evaluate_obligation_conservatively(&obligation);
-    debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} => {:?}",
-           ty, bound, result);
-
-    if result && (ty.has_infer_types() || ty.has_closure_types()) {
-        // Because of inference "guessing", selection can sometimes claim
-        // to succeed while the success requires a guess. To ensure
-        // this function's result remains infallible, we must confirm
-        // that guess. While imperfect, I believe this is sound.
-
-        let mut fulfill_cx = FulfillmentContext::new();
-
-        // We can use a dummy node-id here because we won't pay any mind
-        // to region obligations that arise (there shouldn't really be any
-        // anyhow).
-        let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID);
-
-        fulfill_cx.register_builtin_bound(infcx, ty, bound, cause);
-
-        // Note: we only assume something is `Copy` if we can
-        // *definitively* show that it implements `Copy`. Otherwise,
-        // assume it is move; linear is always ok.
-        match fulfill_cx.select_all_or_error(infcx) {
-            Ok(()) => {
-                debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} success",
-                       ty,
-                       bound);
-                true
-            }
-            Err(e) => {
-                debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} errors={:?}",
-                       ty,
-                       bound,
-                       e);
-                false
-            }
-        }
-    } else {
-        result
-    }
-}
-
-// FIXME: this is gonna need to be removed ...
-/// Normalizes the parameter environment, reporting errors if they occur.
-pub fn normalize_param_env_or_error<'a,'tcx>(unnormalized_env: ty::ParameterEnvironment<'a,'tcx>,
-                                             cause: ObligationCause<'tcx>)
-                                             -> ty::ParameterEnvironment<'a,'tcx>
-{
-    // I'm not wild about reporting errors here; I'd prefer to
-    // have the errors get reported at a defined place (e.g.,
-    // during typeck). Instead I have all parameter
-    // environments, in effect, going through this function
-    // and hence potentially reporting errors. This ensurse of
-    // course that we never forget to normalize (the
-    // alternative seemed like it would involve a lot of
-    // manual invocations of this fn -- and then we'd have to
-    // deal with the errors at each of those sites).
-    //
-    // In any case, in practice, typeck constructs all the
-    // parameter environments once for every fn as it goes,
-    // and errors will get reported then; so after typeck we
-    // can be sure that no errors should occur.
-
-    let tcx = unnormalized_env.tcx;
-    let span = cause.span;
-    let body_id = cause.body_id;
-
-    debug!("normalize_param_env_or_error(unnormalized_env={:?})",
-           unnormalized_env);
-
-    let predicates: Vec<_> =
-        util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.clone())
-        .filter(|p| !p.is_global()) // (*)
-        .collect();
-
-    // (*) Any predicate like `i32: Trait<u32>` or whatever doesn't
-    // need to be in the *environment* to be proven, so screen those
-    // out. This is important for the soundness of inter-fn
-    // caching. Note though that we should probably check that these
-    // predicates hold at the point where the environment is
-    // constructed, but I am not currently doing so out of laziness.
-    // -nmatsakis
-
-    debug!("normalize_param_env_or_error: elaborated-predicates={:?}",
-           predicates);
-
-    let elaborated_env = unnormalized_env.with_caller_bounds(predicates);
-
-    let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(elaborated_env));
-    let predicates = match fully_normalize(&infcx,
-                                           cause,
-                                           &infcx.parameter_environment.caller_bounds) {
-        Ok(predicates) => predicates,
-        Err(errors) => {
-            report_fulfillment_errors(&infcx, &errors);
-            return infcx.parameter_environment; // an unnormalized env is better than nothing
-        }
-    };
-
-    debug!("normalize_param_env_or_error: normalized predicates={:?}",
-           predicates);
-
-    let free_regions = FreeRegionMap::new();
-    infcx.resolve_regions_and_report_errors(&free_regions, body_id);
-    let predicates = match infcx.fully_resolve(&predicates) {
-        Ok(predicates) => predicates,
-        Err(fixup_err) => {
-            // If we encounter a fixup error, it means that some type
-            // variable wound up unconstrained. I actually don't know
-            // if this can happen, and I certainly don't expect it to
-            // happen often, but if it did happen it probably
-            // represents a legitimate failure due to some kind of
-            // unconstrained variable, and it seems better not to ICE,
-            // all things considered.
-            let err_msg = fixup_err_to_string(fixup_err);
-            tcx.sess.span_err(span, &err_msg);
-            return infcx.parameter_environment; // an unnormalized env is better than nothing
-        }
-    };
-
-    debug!("normalize_param_env_or_error: resolved predicates={:?}",
-           predicates);
-
-    infcx.parameter_environment.with_caller_bounds(predicates)
-}
-
-pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
-                                  cause: ObligationCause<'tcx>,
-                                  value: &T)
-                                  -> Result<T, Vec<FulfillmentError<'tcx>>>
-    where T : TypeFoldable<'tcx>
-{
-    debug!("fully_normalize(value={:?})", value);
-
-    let mut selcx = &mut SelectionContext::new(infcx);
-    // FIXME (@jroesch) ISSUE 26721
-    // I'm not sure if this is a bug or not, needs further investigation.
-    // It appears that by reusing the fulfillment_cx here we incur more
-    // obligations and later trip an asssertion on regionck.rs line 337.
-    //
-    // The two possibilities I see is:
-    //      - normalization is not actually fully happening and we
-    //        have a bug else where
-    //      - we are adding a duplicate bound into the list causing
-    //        its size to change.
-    //
-    // I think we should probably land this refactor and then come
-    // back to this is a follow-up patch.
-    let mut fulfill_cx = FulfillmentContext::new();
-
-    let Normalized { value: normalized_value, obligations } =
-        project::normalize(selcx, cause, value);
-    debug!("fully_normalize: normalized_value={:?} obligations={:?}",
-           normalized_value,
-           obligations);
-    for obligation in obligations {
-        fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation);
-    }
-
-    debug!("fully_normalize: select_all_or_error start");
-    match fulfill_cx.select_all_or_error(infcx) {
-        Ok(()) => { }
-        Err(e) => {
-            debug!("fully_normalize: error={:?}", e);
-            return Err(e);
-        }
-    }
-    debug!("fully_normalize: select_all_or_error complete");
-    let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value);
-    debug!("fully_normalize: resolved_value={:?}", resolved_value);
-    Ok(resolved_value)
-}
-
-impl<'tcx,O> Obligation<'tcx,O> {
-    pub fn new(cause: ObligationCause<'tcx>,
-               trait_ref: O)
-               -> Obligation<'tcx, O>
-    {
-        Obligation { cause: cause,
-                     recursion_depth: 0,
-                     predicate: trait_ref }
-    }
-
-    fn with_depth(cause: ObligationCause<'tcx>,
-                  recursion_depth: usize,
-                  trait_ref: O)
-                  -> Obligation<'tcx, O>
-    {
-        Obligation { cause: cause,
-                     recursion_depth: recursion_depth,
-                     predicate: trait_ref }
-    }
-
-    pub fn misc(span: Span, body_id: ast::NodeId, trait_ref: O) -> Obligation<'tcx, O> {
-        Obligation::new(ObligationCause::misc(span, body_id), trait_ref)
-    }
-
-    pub fn with<P>(&self, value: P) -> Obligation<'tcx,P> {
-        Obligation { cause: self.cause.clone(),
-                     recursion_depth: self.recursion_depth,
-                     predicate: value }
-    }
-}
-
-impl<'tcx> ObligationCause<'tcx> {
-    pub fn new(span: Span,
-               body_id: ast::NodeId,
-               code: ObligationCauseCode<'tcx>)
-               -> ObligationCause<'tcx> {
-        ObligationCause { span: span, body_id: body_id, code: code }
-    }
-
-    pub fn misc(span: Span, body_id: ast::NodeId) -> ObligationCause<'tcx> {
-        ObligationCause { span: span, body_id: body_id, code: MiscObligation }
-    }
-
-    pub fn dummy() -> ObligationCause<'tcx> {
-        ObligationCause { span: DUMMY_SP, body_id: 0, code: MiscObligation }
-    }
-}
-
-impl<'tcx, N> Vtable<'tcx, N> {
-    pub fn nested_obligations(self) -> Vec<N> {
-        match self {
-            VtableImpl(i) => i.nested,
-            VtableParam(n) => n,
-            VtableBuiltin(i) => i.nested,
-            VtableDefaultImpl(d) => d.nested,
-            VtableClosure(c) => c.nested,
-            VtableObject(_) | VtableFnPointer(..) => vec![]
-        }
-    }
-
-    pub fn map<M, F>(self, f: F) -> Vtable<'tcx, M> where F: FnMut(N) -> M {
-        match self {
-            VtableImpl(i) => VtableImpl(VtableImplData {
-                impl_def_id: i.impl_def_id,
-                substs: i.substs,
-                nested: i.nested.into_iter().map(f).collect()
-            }),
-            VtableParam(n) => VtableParam(n.into_iter().map(f).collect()),
-            VtableBuiltin(i) => VtableBuiltin(VtableBuiltinData {
-                nested: i.nested.into_iter().map(f).collect()
-            }),
-            VtableObject(o) => VtableObject(o),
-            VtableDefaultImpl(d) => VtableDefaultImpl(VtableDefaultImplData {
-                trait_def_id: d.trait_def_id,
-                nested: d.nested.into_iter().map(f).collect()
-            }),
-            VtableFnPointer(f) => VtableFnPointer(f),
-            VtableClosure(c) => VtableClosure(VtableClosureData {
-                closure_def_id: c.closure_def_id,
-                substs: c.substs,
-                nested: c.nested.into_iter().map(f).collect(),
-            })
-        }
-    }
-}
-
-impl<'tcx> FulfillmentError<'tcx> {
-    fn new(obligation: PredicateObligation<'tcx>,
-           code: FulfillmentErrorCode<'tcx>)
-           -> FulfillmentError<'tcx>
-    {
-        FulfillmentError { obligation: obligation, code: code }
-    }
-}
-
-impl<'tcx> TraitObligation<'tcx> {
-    fn self_ty(&self) -> ty::Binder<Ty<'tcx>> {
-        ty::Binder(self.predicate.skip_binder().self_ty())
-    }
-}
diff --git a/src/librustc/middle/traits/object_safety.rs b/src/librustc/middle/traits/object_safety.rs
deleted file mode 100644 (file)
index 7ffdc3b..0000000
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! "Object safety" refers to the ability for a trait to be converted
-//! to an object. In general, traits may only be converted to an
-//! object if all of their methods meet certain criteria. In particular,
-//! they must:
-//!
-//!   - have a suitable receiver from which we can extract a vtable;
-//!   - not reference the erased type `Self` except for in this receiver;
-//!   - not have generic type parameters
-
-use super::supertraits;
-use super::elaborate_predicates;
-
-use middle::def_id::DefId;
-use middle::subst::{self, SelfSpace, TypeSpace};
-use middle::traits;
-use middle::ty::{self, ToPolyTraitRef, Ty, TypeFoldable};
-use std::rc::Rc;
-use syntax::ast;
-
-#[derive(Clone, Debug, PartialEq, Eq, Hash)]
-pub enum ObjectSafetyViolation<'tcx> {
-    /// Self : Sized declared on the trait
-    SizedSelf,
-
-    /// Supertrait reference references `Self` an in illegal location
-    /// (e.g. `trait Foo : Bar<Self>`)
-    SupertraitSelf,
-
-    /// Method has something illegal
-    Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
-}
-
-/// Reasons a method might not be object-safe.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
-pub enum MethodViolationCode {
-    /// e.g., `fn foo()`
-    StaticMethod,
-
-    /// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
-    ReferencesSelf,
-
-    /// e.g., `fn foo<A>()`
-    Generic,
-}
-
-pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>,
-                            trait_def_id: DefId)
-                            -> bool
-{
-    // Because we query yes/no results frequently, we keep a cache:
-    let def = tcx.lookup_trait_def(trait_def_id);
-
-    let result = def.object_safety().unwrap_or_else(|| {
-        let result = object_safety_violations(tcx, trait_def_id).is_empty();
-
-        // Record just a yes/no result in the cache; this is what is
-        // queried most frequently. Note that this may overwrite a
-        // previous result, but always with the same thing.
-        def.set_object_safety(result);
-
-        result
-    });
-
-    debug!("is_object_safe({:?}) = {}", trait_def_id, result);
-
-    result
-}
-
-/// Returns the object safety violations that affect
-/// astconv - currently, Self in supertraits. This is needed
-/// because `object_safety_violations` can't be used during
-/// type collection.
-pub fn astconv_object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                              trait_def_id: DefId)
-                                              -> Vec<ObjectSafetyViolation<'tcx>>
-{
-    let mut violations = vec![];
-
-    if supertraits_reference_self(tcx, trait_def_id) {
-        violations.push(ObjectSafetyViolation::SupertraitSelf);
-    }
-
-    debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
-           trait_def_id,
-           violations);
-
-    violations
-}
-
-pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                      trait_def_id: DefId)
-                                      -> Vec<ObjectSafetyViolation<'tcx>>
-{
-    traits::supertrait_def_ids(tcx, trait_def_id)
-        .flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
-        .collect()
-}
-
-fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                            trait_def_id: DefId)
-                                            -> Vec<ObjectSafetyViolation<'tcx>>
-{
-    // Check methods for violations.
-    let mut violations: Vec<_> =
-        tcx.trait_items(trait_def_id).iter()
-        .filter_map(|item| {
-            match *item {
-                ty::MethodTraitItem(ref m) => {
-                    object_safety_violation_for_method(tcx, trait_def_id, &m)
-                        .map(|code| ObjectSafetyViolation::Method(m.clone(), code))
-                }
-                _ => None,
-            }
-        })
-        .collect();
-
-    // Check the trait itself.
-    if trait_has_sized_self(tcx, trait_def_id) {
-        violations.push(ObjectSafetyViolation::SizedSelf);
-    }
-    if supertraits_reference_self(tcx, trait_def_id) {
-        violations.push(ObjectSafetyViolation::SupertraitSelf);
-    }
-
-    debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
-           trait_def_id,
-           violations);
-
-    violations
-}
-
-pub fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                        trait_def_id: DefId)
-                                        -> bool
-{
-    let trait_def = tcx.lookup_trait_def(trait_def_id);
-    let trait_ref = trait_def.trait_ref.clone();
-    let trait_ref = trait_ref.to_poly_trait_ref();
-    let predicates = tcx.lookup_super_predicates(trait_def_id);
-    predicates
-        .predicates
-        .into_iter()
-        .map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
-        .any(|predicate| {
-            match predicate {
-                ty::Predicate::Trait(ref data) => {
-                    // In the case of a trait predicate, we can skip the "self" type.
-                    data.0.trait_ref.substs.types.get_slice(TypeSpace)
-                                                 .iter()
-                                                 .cloned()
-                                                 .any(|t| t.has_self_ty())
-                }
-                ty::Predicate::Projection(..) |
-                ty::Predicate::WellFormed(..) |
-                ty::Predicate::ObjectSafe(..) |
-                ty::Predicate::TypeOutlives(..) |
-                ty::Predicate::RegionOutlives(..) |
-                ty::Predicate::Equate(..) => {
-                    false
-                }
-            }
-        })
-}
-
-fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
-                              trait_def_id: DefId)
-                              -> bool
-{
-    let trait_def = tcx.lookup_trait_def(trait_def_id);
-    let trait_predicates = tcx.lookup_predicates(trait_def_id);
-    generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
-}
-
-fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                     generics: &ty::Generics<'tcx>,
-                                     predicates: &ty::GenericPredicates<'tcx>)
-                                     -> bool
-{
-    let sized_def_id = match tcx.lang_items.sized_trait() {
-        Some(def_id) => def_id,
-        None => { return false; /* No Sized trait, can't require it! */ }
-    };
-
-    // Search for a predicate like `Self : Sized` amongst the trait bounds.
-    let free_substs = tcx.construct_free_substs(generics,
-                                                tcx.region_maps.node_extent(ast::DUMMY_NODE_ID));
-    let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
-    elaborate_predicates(tcx, predicates)
-        .any(|predicate| {
-            match predicate {
-                ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
-                    trait_pred.0.self_ty().is_self()
-                }
-                ty::Predicate::Projection(..) |
-                ty::Predicate::Trait(..) |
-                ty::Predicate::Equate(..) |
-                ty::Predicate::RegionOutlives(..) |
-                ty::Predicate::WellFormed(..) |
-                ty::Predicate::ObjectSafe(..) |
-                ty::Predicate::TypeOutlives(..) => {
-                    false
-                }
-            }
-        })
-}
-
-/// Returns `Some(_)` if this method makes the containing trait not object safe.
-fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                            trait_def_id: DefId,
-                                            method: &ty::Method<'tcx>)
-                                            -> Option<MethodViolationCode>
-{
-    // Any method that has a `Self : Sized` requisite is otherwise
-    // exempt from the regulations.
-    if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
-        return None;
-    }
-
-    virtual_call_violation_for_method(tcx, trait_def_id, method)
-}
-
-/// We say a method is *vtable safe* if it can be invoked on a trait
-/// object.  Note that object-safe traits can have some
-/// non-vtable-safe methods, so long as they require `Self:Sized` or
-/// otherwise ensure that they cannot be used when `Self=Trait`.
-pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                   trait_def_id: DefId,
-                                   method: &ty::Method<'tcx>)
-                                   -> bool
-{
-    virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
-}
-
-/// Returns `Some(_)` if this method cannot be called on a trait
-/// object; this does not necessarily imply that the enclosing trait
-/// is not object safe, because the method might have a where clause
-/// `Self:Sized`.
-fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                           trait_def_id: DefId,
-                                           method: &ty::Method<'tcx>)
-                                           -> Option<MethodViolationCode>
-{
-    // The method's first parameter must be something that derefs (or
-    // autorefs) to `&self`. For now, we only accept `self`, `&self`
-    // and `Box<Self>`.
-    match method.explicit_self {
-        ty::ExplicitSelfCategory::Static => {
-            return Some(MethodViolationCode::StaticMethod);
-        }
-
-        ty::ExplicitSelfCategory::ByValue |
-        ty::ExplicitSelfCategory::ByReference(..) |
-        ty::ExplicitSelfCategory::ByBox => {
-        }
-    }
-
-    // The `Self` type is erased, so it should not appear in list of
-    // arguments or return type apart from the receiver.
-    let ref sig = method.fty.sig;
-    for &input_ty in &sig.0.inputs[1..] {
-        if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
-            return Some(MethodViolationCode::ReferencesSelf);
-        }
-    }
-    if let ty::FnConverging(result_type) = sig.0.output {
-        if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
-            return Some(MethodViolationCode::ReferencesSelf);
-        }
-    }
-
-    // We can't monomorphize things like `fn foo<A>(...)`.
-    if !method.generics.types.is_empty_in(subst::FnSpace) {
-        return Some(MethodViolationCode::Generic);
-    }
-
-    None
-}
-
-fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                              trait_def_id: DefId,
-                                              ty: Ty<'tcx>)
-                                              -> bool
-{
-    // This is somewhat subtle. In general, we want to forbid
-    // references to `Self` in the argument and return types,
-    // since the value of `Self` is erased. However, there is one
-    // exception: it is ok to reference `Self` in order to access
-    // an associated type of the current trait, since we retain
-    // the value of those associated types in the object type
-    // itself.
-    //
-    // ```rust
-    // trait SuperTrait {
-    //     type X;
-    // }
-    //
-    // trait Trait : SuperTrait {
-    //     type Y;
-    //     fn foo(&self, x: Self) // bad
-    //     fn foo(&self) -> Self // bad
-    //     fn foo(&self) -> Option<Self> // bad
-    //     fn foo(&self) -> Self::Y // OK, desugars to next example
-    //     fn foo(&self) -> <Self as Trait>::Y // OK
-    //     fn foo(&self) -> Self::X // OK, desugars to next example
-    //     fn foo(&self) -> <Self as SuperTrait>::X // OK
-    // }
-    // ```
-    //
-    // However, it is not as simple as allowing `Self` in a projected
-    // type, because there are illegal ways to use `Self` as well:
-    //
-    // ```rust
-    // trait Trait : SuperTrait {
-    //     ...
-    //     fn foo(&self) -> <Self as SomeOtherTrait>::X;
-    // }
-    // ```
-    //
-    // Here we will not have the type of `X` recorded in the
-    // object type, and we cannot resolve `Self as SomeOtherTrait`
-    // without knowing what `Self` is.
-
-    let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
-    let mut error = false;
-    ty.maybe_walk(|ty| {
-        match ty.sty {
-            ty::TyParam(ref param_ty) => {
-                if param_ty.space == SelfSpace {
-                    error = true;
-                }
-
-                false // no contained types to walk
-            }
-
-            ty::TyProjection(ref data) => {
-                // This is a projected type `<Foo as SomeTrait>::X`.
-
-                // Compute supertraits of current trait lazily.
-                if supertraits.is_none() {
-                    let trait_def = tcx.lookup_trait_def(trait_def_id);
-                    let trait_ref = ty::Binder(trait_def.trait_ref.clone());
-                    supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
-                }
-
-                // Determine whether the trait reference `Foo as
-                // SomeTrait` is in fact a supertrait of the
-                // current trait. In that case, this type is
-                // legal, because the type `X` will be specified
-                // in the object type.  Note that we can just use
-                // direct equality here because all of these types
-                // are part of the formal parameter listing, and
-                // hence there should be no inference variables.
-                let projection_trait_ref = ty::Binder(data.trait_ref.clone());
-                let is_supertrait_of_current_trait =
-                    supertraits.as_ref().unwrap().contains(&projection_trait_ref);
-
-                if is_supertrait_of_current_trait {
-                    false // do not walk contained types, do not report error, do collect $200
-                } else {
-                    true // DO walk contained types, POSSIBLY reporting an error
-                }
-            }
-
-            _ => true, // walk contained types, if any
-        }
-    });
-
-    error
-}
diff --git a/src/librustc/middle/traits/project.rs b/src/librustc/middle/traits/project.rs
deleted file mode 100644 (file)
index c363425..0000000
+++ /dev/null
@@ -1,983 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Code for projecting associated types out of trait references.
-
-use super::elaborate_predicates;
-use super::report_overflow_error;
-use super::Obligation;
-use super::ObligationCause;
-use super::PredicateObligation;
-use super::SelectionContext;
-use super::SelectionError;
-use super::VtableClosureData;
-use super::VtableImplData;
-use super::util;
-
-use middle::infer::{self, TypeOrigin};
-use middle::subst::Subst;
-use middle::ty::{self, ToPredicate, ToPolyTraitRef, Ty};
-use middle::ty::fold::{TypeFoldable, TypeFolder};
-use syntax::parse::token;
-use util::common::FN_OUTPUT_NAME;
-
-pub type PolyProjectionObligation<'tcx> =
-    Obligation<'tcx, ty::PolyProjectionPredicate<'tcx>>;
-
-pub type ProjectionObligation<'tcx> =
-    Obligation<'tcx, ty::ProjectionPredicate<'tcx>>;
-
-pub type ProjectionTyObligation<'tcx> =
-    Obligation<'tcx, ty::ProjectionTy<'tcx>>;
-
-/// When attempting to resolve `<T as TraitRef>::Name` ...
-#[derive(Debug)]
-pub enum ProjectionTyError<'tcx> {
-    /// ...we found multiple sources of information and couldn't resolve the ambiguity.
-    TooManyCandidates,
-
-    /// ...an error occurred matching `T : TraitRef`
-    TraitSelectionError(SelectionError<'tcx>),
-}
-
-#[derive(Clone)]
-pub struct MismatchedProjectionTypes<'tcx> {
-    pub err: ty::error::TypeError<'tcx>
-}
-
-#[derive(PartialEq, Eq, Debug)]
-enum ProjectionTyCandidate<'tcx> {
-    // from a where-clause in the env or object type
-    ParamEnv(ty::PolyProjectionPredicate<'tcx>),
-
-    // from the definition of `Trait` when you have something like <<A as Trait>::B as Trait2>::C
-    TraitDef(ty::PolyProjectionPredicate<'tcx>),
-
-    // defined in an impl
-    Impl(VtableImplData<'tcx, PredicateObligation<'tcx>>),
-
-    // closure return type
-    Closure(VtableClosureData<'tcx, PredicateObligation<'tcx>>),
-
-    // fn pointer return type
-    FnPointer(Ty<'tcx>),
-}
-
-struct ProjectionTyCandidateSet<'tcx> {
-    vec: Vec<ProjectionTyCandidate<'tcx>>,
-    ambiguous: bool
-}
-
-/// Evaluates constraints of the form:
-///
-///     for<...> <T as Trait>::U == V
-///
-/// If successful, this may result in additional obligations.
-pub fn poly_project_and_unify_type<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &PolyProjectionObligation<'tcx>)
-    -> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>>
-{
-    debug!("poly_project_and_unify_type(obligation={:?})",
-           obligation);
-
-    let infcx = selcx.infcx();
-    infcx.commit_if_ok(|snapshot| {
-        let (skol_predicate, skol_map) =
-            infcx.skolemize_late_bound_regions(&obligation.predicate, snapshot);
-
-        let skol_obligation = obligation.with(skol_predicate);
-        match project_and_unify_type(selcx, &skol_obligation) {
-            Ok(result) => {
-                match infcx.leak_check(&skol_map, snapshot) {
-                    Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, &result)),
-                    Err(e) => Err(MismatchedProjectionTypes { err: e }),
-                }
-            }
-            Err(e) => {
-                Err(e)
-            }
-        }
-    })
-}
-
-/// Evaluates constraints of the form:
-///
-///     <T as Trait>::U == V
-///
-/// If successful, this may result in additional obligations.
-fn project_and_unify_type<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionObligation<'tcx>)
-    -> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>>
-{
-    debug!("project_and_unify_type(obligation={:?})",
-           obligation);
-
-    let Normalized { value: normalized_ty, obligations } =
-        match opt_normalize_projection_type(selcx,
-                                            obligation.predicate.projection_ty.clone(),
-                                            obligation.cause.clone(),
-                                            obligation.recursion_depth) {
-            Some(n) => n,
-            None => {
-                consider_unification_despite_ambiguity(selcx, obligation);
-                return Ok(None);
-            }
-        };
-
-    debug!("project_and_unify_type: normalized_ty={:?} obligations={:?}",
-           normalized_ty,
-           obligations);
-
-    let infcx = selcx.infcx();
-    let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
-    match infer::mk_eqty(infcx, true, origin, normalized_ty, obligation.predicate.ty) {
-        Ok(()) => Ok(Some(obligations)),
-        Err(err) => Err(MismatchedProjectionTypes { err: err }),
-    }
-}
-
-fn consider_unification_despite_ambiguity<'cx,'tcx>(selcx: &mut SelectionContext<'cx,'tcx>,
-                                                    obligation: &ProjectionObligation<'tcx>) {
-    debug!("consider_unification_despite_ambiguity(obligation={:?})",
-           obligation);
-
-    let def_id = obligation.predicate.projection_ty.trait_ref.def_id;
-    match selcx.tcx().lang_items.fn_trait_kind(def_id) {
-        Some(_) => { }
-        None => { return; }
-    }
-
-    let infcx = selcx.infcx();
-    let self_ty = obligation.predicate.projection_ty.trait_ref.self_ty();
-    let self_ty = infcx.shallow_resolve(self_ty);
-    debug!("consider_unification_despite_ambiguity: self_ty.sty={:?}",
-           self_ty.sty);
-    match self_ty.sty {
-        ty::TyClosure(closure_def_id, ref substs) => {
-            let closure_typer = selcx.closure_typer();
-            let closure_type = closure_typer.closure_type(closure_def_id, substs);
-            let ty::Binder((_, ret_type)) =
-                util::closure_trait_ref_and_return_type(infcx.tcx,
-                                                        def_id,
-                                                        self_ty,
-                                                        &closure_type.sig,
-                                                        util::TupleArgumentsFlag::No);
-            // We don't have to normalize the return type here - this is only
-            // reached for TyClosure: Fn inputs where the closure kind is
-            // still unknown, which should only occur in typeck where the
-            // closure type is already normalized.
-            let (ret_type, _) =
-                infcx.replace_late_bound_regions_with_fresh_var(
-                    obligation.cause.span,
-                    infer::AssocTypeProjection(obligation.predicate.projection_ty.item_name),
-                    &ty::Binder(ret_type));
-
-            debug!("consider_unification_despite_ambiguity: ret_type={:?}",
-                   ret_type);
-            let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
-            let obligation_ty = obligation.predicate.ty;
-            match infer::mk_eqty(infcx, true, origin, obligation_ty, ret_type) {
-                Ok(()) => { }
-                Err(_) => { /* ignore errors */ }
-            }
-        }
-        _ => { }
-    }
-}
-
-/// Normalizes any associated type projections in `value`, replacing
-/// them with a fully resolved type where possible. The return value
-/// combines the normalized result and any additional obligations that
-/// were incurred as result.
-pub fn normalize<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>,
-                               cause: ObligationCause<'tcx>,
-                               value: &T)
-                               -> Normalized<'tcx, T>
-    where T : TypeFoldable<'tcx>
-{
-    normalize_with_depth(selcx, cause, 0, value)
-}
-
-/// As `normalize`, but with a custom depth.
-pub fn normalize_with_depth<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>,
-                                          cause: ObligationCause<'tcx>,
-                                          depth: usize,
-                                          value: &T)
-                                          -> Normalized<'tcx, T>
-    where T : TypeFoldable<'tcx>
-{
-    let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth);
-    let result = normalizer.fold(value);
-
-    Normalized {
-        value: result,
-        obligations: normalizer.obligations,
-    }
-}
-
-struct AssociatedTypeNormalizer<'a,'b:'a,'tcx:'b> {
-    selcx: &'a mut SelectionContext<'b,'tcx>,
-    cause: ObligationCause<'tcx>,
-    obligations: Vec<PredicateObligation<'tcx>>,
-    depth: usize,
-}
-
-impl<'a,'b,'tcx> AssociatedTypeNormalizer<'a,'b,'tcx> {
-    fn new(selcx: &'a mut SelectionContext<'b,'tcx>,
-           cause: ObligationCause<'tcx>,
-           depth: usize)
-           -> AssociatedTypeNormalizer<'a,'b,'tcx>
-    {
-        AssociatedTypeNormalizer {
-            selcx: selcx,
-            cause: cause,
-            obligations: vec!(),
-            depth: depth,
-        }
-    }
-
-    fn fold<T:TypeFoldable<'tcx>>(&mut self, value: &T) -> T {
-        let value = self.selcx.infcx().resolve_type_vars_if_possible(value);
-
-        if !value.has_projection_types() {
-            value.clone()
-        } else {
-            value.fold_with(self)
-        }
-    }
-}
-
-impl<'a,'b,'tcx> TypeFolder<'tcx> for AssociatedTypeNormalizer<'a,'b,'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> {
-        self.selcx.tcx()
-    }
-
-    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        // We don't want to normalize associated types that occur inside of region
-        // binders, because they may contain bound regions, and we can't cope with that.
-        //
-        // Example:
-        //
-        //     for<'a> fn(<T as Foo<&'a>>::A)
-        //
-        // Instead of normalizing `<T as Foo<&'a>>::A` here, we'll
-        // normalize it when we instantiate those bound regions (which
-        // should occur eventually).
-
-        let ty = ty.super_fold_with(self);
-        match ty.sty {
-            ty::TyProjection(ref data) if !data.has_escaping_regions() => { // (*)
-
-                // (*) This is kind of hacky -- we need to be able to
-                // handle normalization within binders because
-                // otherwise we wind up a need to normalize when doing
-                // trait matching (since you can have a trait
-                // obligation like `for<'a> T::B : Fn(&'a int)`), but
-                // we can't normalize with bound regions in scope. So
-                // far now we just ignore binders but only normalize
-                // if all bound regions are gone (and then we still
-                // have to renormalize whenever we instantiate a
-                // binder). It would be better to normalize in a
-                // binding-aware fashion.
-
-                let Normalized { value: ty, obligations } =
-                    normalize_projection_type(self.selcx,
-                                              data.clone(),
-                                              self.cause.clone(),
-                                              self.depth);
-                self.obligations.extend(obligations);
-                ty
-            }
-
-            _ => {
-                ty
-            }
-        }
-    }
-}
-
-#[derive(Clone)]
-pub struct Normalized<'tcx,T> {
-    pub value: T,
-    pub obligations: Vec<PredicateObligation<'tcx>>,
-}
-
-pub type NormalizedTy<'tcx> = Normalized<'tcx, Ty<'tcx>>;
-
-impl<'tcx,T> Normalized<'tcx,T> {
-    pub fn with<U>(self, value: U) -> Normalized<'tcx,U> {
-        Normalized { value: value, obligations: self.obligations }
-    }
-}
-
-/// The guts of `normalize`: normalize a specific projection like `<T
-/// as Trait>::Item`. The result is always a type (and possibly
-/// additional obligations). If ambiguity arises, which implies that
-/// there are unresolved type variables in the projection, we will
-/// substitute a fresh type variable `$X` and generate a new
-/// obligation `<T as Trait>::Item == $X` for later.
-pub fn normalize_projection_type<'a,'b,'tcx>(
-    selcx: &'a mut SelectionContext<'b,'tcx>,
-    projection_ty: ty::ProjectionTy<'tcx>,
-    cause: ObligationCause<'tcx>,
-    depth: usize)
-    -> NormalizedTy<'tcx>
-{
-    opt_normalize_projection_type(selcx, projection_ty.clone(), cause.clone(), depth)
-        .unwrap_or_else(move || {
-            // if we bottom out in ambiguity, create a type variable
-            // and a deferred predicate to resolve this when more type
-            // information is available.
-
-            let ty_var = selcx.infcx().next_ty_var();
-            let projection = ty::Binder(ty::ProjectionPredicate {
-                projection_ty: projection_ty,
-                ty: ty_var
-            });
-            let obligation = Obligation::with_depth(
-                cause, depth + 1, projection.to_predicate());
-            Normalized {
-                value: ty_var,
-                obligations: vec!(obligation)
-            }
-        })
-}
-
-/// The guts of `normalize`: normalize a specific projection like `<T
-/// as Trait>::Item`. The result is always a type (and possibly
-/// additional obligations). Returns `None` in the case of ambiguity,
-/// which indicates that there are unbound type variables.
-fn opt_normalize_projection_type<'a,'b,'tcx>(
-    selcx: &'a mut SelectionContext<'b,'tcx>,
-    projection_ty: ty::ProjectionTy<'tcx>,
-    cause: ObligationCause<'tcx>,
-    depth: usize)
-    -> Option<NormalizedTy<'tcx>>
-{
-    debug!("normalize_projection_type(\
-           projection_ty={:?}, \
-           depth={})",
-           projection_ty,
-           depth);
-
-    let obligation = Obligation::with_depth(cause.clone(), depth, projection_ty.clone());
-    match project_type(selcx, &obligation) {
-        Ok(ProjectedTy::Progress(projected_ty, mut obligations)) => {
-            // if projection succeeded, then what we get out of this
-            // is also non-normalized (consider: it was derived from
-            // an impl, where-clause etc) and hence we must
-            // re-normalize it
-
-            debug!("normalize_projection_type: projected_ty={:?} depth={} obligations={:?}",
-                   projected_ty,
-                   depth,
-                   obligations);
-
-            if projected_ty.has_projection_types() {
-                let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth+1);
-                let normalized_ty = normalizer.fold(&projected_ty);
-
-                debug!("normalize_projection_type: normalized_ty={:?} depth={}",
-                       normalized_ty,
-                       depth);
-
-                obligations.extend(normalizer.obligations);
-                Some(Normalized {
-                    value: normalized_ty,
-                    obligations: obligations,
-                })
-            } else {
-                Some(Normalized {
-                    value: projected_ty,
-                    obligations: obligations,
-                })
-            }
-        }
-        Ok(ProjectedTy::NoProgress(projected_ty)) => {
-            debug!("normalize_projection_type: projected_ty={:?} no progress",
-                   projected_ty);
-            Some(Normalized {
-                value: projected_ty,
-                obligations: vec!()
-            })
-        }
-        Err(ProjectionTyError::TooManyCandidates) => {
-            debug!("normalize_projection_type: too many candidates");
-            None
-        }
-        Err(ProjectionTyError::TraitSelectionError(_)) => {
-            debug!("normalize_projection_type: ERROR");
-            // if we got an error processing the `T as Trait` part,
-            // just return `ty::err` but add the obligation `T :
-            // Trait`, which when processed will cause the error to be
-            // reported later
-
-            Some(normalize_to_error(selcx, projection_ty, cause, depth))
-        }
-    }
-}
-
-/// If we are projecting `<T as Trait>::Item`, but `T: Trait` does not
-/// hold. In various error cases, we cannot generate a valid
-/// normalized projection. Therefore, we create an inference variable
-/// return an associated obligation that, when fulfilled, will lead to
-/// an error.
-///
-/// Note that we used to return `TyError` here, but that was quite
-/// dubious -- the premise was that an error would *eventually* be
-/// reported, when the obligation was processed. But in general once
-/// you see a `TyError` you are supposed to be able to assume that an
-/// error *has been* reported, so that you can take whatever heuristic
-/// paths you want to take. To make things worse, it was possible for
-/// cycles to arise, where you basically had a setup like `<MyType<$0>
-/// as Trait>::Foo == $0`. Here, normalizing `<MyType<$0> as
-/// Trait>::Foo> to `[type error]` would lead to an obligation of
-/// `<MyType<[type error]> as Trait>::Foo`.  We are supposed to report
-/// an error for this obligation, but we legitimately should not,
-/// because it contains `[type error]`. Yuck! (See issue #29857 for
-/// one case where this arose.)
-fn normalize_to_error<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
-                               projection_ty: ty::ProjectionTy<'tcx>,
-                               cause: ObligationCause<'tcx>,
-                               depth: usize)
-                               -> NormalizedTy<'tcx>
-{
-    let trait_ref = projection_ty.trait_ref.to_poly_trait_ref();
-    let trait_obligation = Obligation { cause: cause,
-                                        recursion_depth: depth,
-                                        predicate: trait_ref.to_predicate() };
-    let new_value = selcx.infcx().next_ty_var();
-    Normalized {
-        value: new_value,
-        obligations: vec!(trait_obligation)
-    }
-}
-
-enum ProjectedTy<'tcx> {
-    Progress(Ty<'tcx>, Vec<PredicateObligation<'tcx>>),
-    NoProgress(Ty<'tcx>),
-}
-
-/// Compute the result of a projection type (if we can).
-fn project_type<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>)
-    -> Result<ProjectedTy<'tcx>, ProjectionTyError<'tcx>>
-{
-    debug!("project(obligation={:?})",
-           obligation);
-
-    let recursion_limit = selcx.tcx().sess.recursion_limit.get();
-    if obligation.recursion_depth >= recursion_limit {
-        debug!("project: overflow!");
-        report_overflow_error(selcx.infcx(), &obligation, true);
-    }
-
-    let obligation_trait_ref =
-        selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate.trait_ref);
-
-    debug!("project: obligation_trait_ref={:?}", obligation_trait_ref);
-
-    if obligation_trait_ref.references_error() {
-        return Ok(ProjectedTy::Progress(selcx.tcx().types.err, vec!()));
-    }
-
-    let mut candidates = ProjectionTyCandidateSet {
-        vec: Vec::new(),
-        ambiguous: false,
-    };
-
-    assemble_candidates_from_param_env(selcx,
-                                       obligation,
-                                       &obligation_trait_ref,
-                                       &mut candidates);
-
-    assemble_candidates_from_trait_def(selcx,
-                                       obligation,
-                                       &obligation_trait_ref,
-                                       &mut candidates);
-
-    if let Err(e) = assemble_candidates_from_impls(selcx,
-                                                   obligation,
-                                                   &obligation_trait_ref,
-                                                   &mut candidates) {
-        return Err(ProjectionTyError::TraitSelectionError(e));
-    }
-
-    debug!("{} candidates, ambiguous={}",
-           candidates.vec.len(),
-           candidates.ambiguous);
-
-    // Inherent ambiguity that prevents us from even enumerating the
-    // candidates.
-    if candidates.ambiguous {
-        return Err(ProjectionTyError::TooManyCandidates);
-    }
-
-    // Drop duplicates.
-    //
-    // Note: `candidates.vec` seems to be on the critical path of the
-    // compiler. Replacing it with an hash set was also tried, which would
-    // render the following dedup unnecessary. It led to cleaner code but
-    // prolonged compiling time of `librustc` from 5m30s to 6m in one test, or
-    // ~9% performance lost.
-    if candidates.vec.len() > 1 {
-        let mut i = 0;
-        while i < candidates.vec.len() {
-            let has_dup = (0..i).any(|j| candidates.vec[i] == candidates.vec[j]);
-            if has_dup {
-                candidates.vec.swap_remove(i);
-            } else {
-                i += 1;
-            }
-        }
-    }
-
-    // Prefer where-clauses. As in select, if there are multiple
-    // candidates, we prefer where-clause candidates over impls.  This
-    // may seem a bit surprising, since impls are the source of
-    // "truth" in some sense, but in fact some of the impls that SEEM
-    // applicable are not, because of nested obligations. Where
-    // clauses are the safer choice. See the comment on
-    // `select::SelectionCandidate` and #21974 for more details.
-    if candidates.vec.len() > 1 {
-        debug!("retaining param-env candidates only from {:?}", candidates.vec);
-        candidates.vec.retain(|c| match *c {
-            ProjectionTyCandidate::ParamEnv(..) => true,
-            ProjectionTyCandidate::Impl(..) |
-            ProjectionTyCandidate::Closure(..) |
-            ProjectionTyCandidate::TraitDef(..) |
-            ProjectionTyCandidate::FnPointer(..) => false,
-        });
-        debug!("resulting candidate set: {:?}", candidates.vec);
-        if candidates.vec.len() != 1 {
-            return Err(ProjectionTyError::TooManyCandidates);
-        }
-    }
-
-    assert!(candidates.vec.len() <= 1);
-
-    match candidates.vec.pop() {
-        Some(candidate) => {
-            let (ty, obligations) = confirm_candidate(selcx, obligation, candidate);
-            Ok(ProjectedTy::Progress(ty, obligations))
-        }
-        None => {
-            Ok(ProjectedTy::NoProgress(selcx.tcx().mk_projection(
-                obligation.predicate.trait_ref.clone(),
-                obligation.predicate.item_name)))
-        }
-    }
-}
-
-/// The first thing we have to do is scan through the parameter
-/// environment to see whether there are any projection predicates
-/// there that can answer this question.
-fn assemble_candidates_from_param_env<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    obligation_trait_ref: &ty::TraitRef<'tcx>,
-    candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
-{
-    debug!("assemble_candidates_from_param_env(..)");
-    let env_predicates = selcx.param_env().caller_bounds.iter().cloned();
-    assemble_candidates_from_predicates(selcx,
-                                        obligation,
-                                        obligation_trait_ref,
-                                        candidate_set,
-                                        ProjectionTyCandidate::ParamEnv,
-                                        env_predicates);
-}
-
-/// In the case of a nested projection like <<A as Foo>::FooT as Bar>::BarT, we may find
-/// that the definition of `Foo` has some clues:
-///
-/// ```
-/// trait Foo {
-///     type FooT : Bar<BarT=i32>
-/// }
-/// ```
-///
-/// Here, for example, we could conclude that the result is `i32`.
-fn assemble_candidates_from_trait_def<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    obligation_trait_ref: &ty::TraitRef<'tcx>,
-    candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
-{
-    debug!("assemble_candidates_from_trait_def(..)");
-
-    // Check whether the self-type is itself a projection.
-    let trait_ref = match obligation_trait_ref.self_ty().sty {
-        ty::TyProjection(ref data) => data.trait_ref.clone(),
-        ty::TyInfer(ty::TyVar(_)) => {
-            // If the self-type is an inference variable, then it MAY wind up
-            // being a projected type, so induce an ambiguity.
-            candidate_set.ambiguous = true;
-            return;
-        }
-        _ => { return; }
-    };
-
-    // If so, extract what we know from the trait and try to come up with a good answer.
-    let trait_predicates = selcx.tcx().lookup_predicates(trait_ref.def_id);
-    let bounds = trait_predicates.instantiate(selcx.tcx(), trait_ref.substs);
-    let bounds = elaborate_predicates(selcx.tcx(), bounds.predicates.into_vec());
-    assemble_candidates_from_predicates(selcx,
-                                        obligation,
-                                        obligation_trait_ref,
-                                        candidate_set,
-                                        ProjectionTyCandidate::TraitDef,
-                                        bounds)
-}
-
-fn assemble_candidates_from_predicates<'cx,'tcx,I>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    obligation_trait_ref: &ty::TraitRef<'tcx>,
-    candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
-    ctor: fn(ty::PolyProjectionPredicate<'tcx>) -> ProjectionTyCandidate<'tcx>,
-    env_predicates: I)
-    where I: Iterator<Item=ty::Predicate<'tcx>>
-{
-    debug!("assemble_candidates_from_predicates(obligation={:?})",
-           obligation);
-    let infcx = selcx.infcx();
-    for predicate in env_predicates {
-        debug!("assemble_candidates_from_predicates: predicate={:?}",
-               predicate);
-        match predicate {
-            ty::Predicate::Projection(ref data) => {
-                let same_name = data.item_name() == obligation.predicate.item_name;
-
-                let is_match = same_name && infcx.probe(|_| {
-                    let origin = TypeOrigin::Misc(obligation.cause.span);
-                    let data_poly_trait_ref =
-                        data.to_poly_trait_ref();
-                    let obligation_poly_trait_ref =
-                        obligation_trait_ref.to_poly_trait_ref();
-                    infcx.sub_poly_trait_refs(false,
-                                              origin,
-                                              data_poly_trait_ref,
-                                              obligation_poly_trait_ref).is_ok()
-                });
-
-                debug!("assemble_candidates_from_predicates: candidate={:?} \
-                                                             is_match={} same_name={}",
-                       data, is_match, same_name);
-
-                if is_match {
-                    candidate_set.vec.push(ctor(data.clone()));
-                }
-            }
-            _ => { }
-        }
-    }
-}
-
-fn assemble_candidates_from_object_type<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation:  &ProjectionTyObligation<'tcx>,
-    obligation_trait_ref: &ty::TraitRef<'tcx>,
-    candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
-{
-    let self_ty = obligation_trait_ref.self_ty();
-    let object_ty = selcx.infcx().shallow_resolve(self_ty);
-    debug!("assemble_candidates_from_object_type(object_ty={:?})",
-           object_ty);
-    let data = match object_ty.sty {
-        ty::TyTrait(ref data) => data,
-        _ => {
-            selcx.tcx().sess.span_bug(
-                obligation.cause.span,
-                &format!("assemble_candidates_from_object_type called with non-object: {:?}",
-                         object_ty));
-        }
-    };
-    let projection_bounds = data.projection_bounds_with_self_ty(selcx.tcx(), object_ty);
-    let env_predicates = projection_bounds.iter()
-                                          .map(|p| p.to_predicate())
-                                          .collect();
-    let env_predicates = elaborate_predicates(selcx.tcx(), env_predicates);
-    assemble_candidates_from_predicates(selcx,
-                                        obligation,
-                                        obligation_trait_ref,
-                                        candidate_set,
-                                        ProjectionTyCandidate::ParamEnv,
-                                        env_predicates)
-}
-
-fn assemble_candidates_from_impls<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    obligation_trait_ref: &ty::TraitRef<'tcx>,
-    candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
-    -> Result<(), SelectionError<'tcx>>
-{
-    // If we are resolving `<T as TraitRef<...>>::Item == Type`,
-    // start out by selecting the predicate `T as TraitRef<...>`:
-    let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref();
-    let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate());
-    let vtable = match selcx.select(&trait_obligation) {
-        Ok(Some(vtable)) => vtable,
-        Ok(None) => {
-            candidate_set.ambiguous = true;
-            return Ok(());
-        }
-        Err(e) => {
-            debug!("assemble_candidates_from_impls: selection error {:?}",
-                   e);
-            return Err(e);
-        }
-    };
-
-    match vtable {
-        super::VtableImpl(data) => {
-            debug!("assemble_candidates_from_impls: impl candidate {:?}",
-                   data);
-
-            candidate_set.vec.push(
-                ProjectionTyCandidate::Impl(data));
-        }
-        super::VtableObject(_) => {
-            assemble_candidates_from_object_type(
-                selcx, obligation, obligation_trait_ref, candidate_set);
-        }
-        super::VtableClosure(data) => {
-            candidate_set.vec.push(
-                ProjectionTyCandidate::Closure(data));
-        }
-        super::VtableFnPointer(fn_type) => {
-            candidate_set.vec.push(
-                ProjectionTyCandidate::FnPointer(fn_type));
-        }
-        super::VtableParam(..) => {
-            // This case tell us nothing about the value of an
-            // associated type. Consider:
-            //
-            // ```
-            // trait SomeTrait { type Foo; }
-            // fn foo<T:SomeTrait>(...) { }
-            // ```
-            //
-            // If the user writes `<T as SomeTrait>::Foo`, then the `T
-            // : SomeTrait` binding does not help us decide what the
-            // type `Foo` is (at least, not more specifically than
-            // what we already knew).
-            //
-            // But wait, you say! What about an example like this:
-            //
-            // ```
-            // fn bar<T:SomeTrait<Foo=usize>>(...) { ... }
-            // ```
-            //
-            // Doesn't the `T : Sometrait<Foo=usize>` predicate help
-            // resolve `T::Foo`? And of course it does, but in fact
-            // that single predicate is desugared into two predicates
-            // in the compiler: a trait predicate (`T : SomeTrait`) and a
-            // projection. And the projection where clause is handled
-            // in `assemble_candidates_from_param_env`.
-        }
-        super::VtableDefaultImpl(..) |
-        super::VtableBuiltin(..) => {
-            // These traits have no associated types.
-            selcx.tcx().sess.span_bug(
-                obligation.cause.span,
-                &format!("Cannot project an associated type from `{:?}`",
-                         vtable));
-        }
-    }
-
-    Ok(())
-}
-
-fn confirm_candidate<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    candidate: ProjectionTyCandidate<'tcx>)
-    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
-{
-    debug!("confirm_candidate(candidate={:?}, obligation={:?})",
-           candidate,
-           obligation);
-
-    match candidate {
-        ProjectionTyCandidate::ParamEnv(poly_projection) |
-        ProjectionTyCandidate::TraitDef(poly_projection) => {
-            confirm_param_env_candidate(selcx, obligation, poly_projection)
-        }
-
-        ProjectionTyCandidate::Impl(impl_vtable) => {
-            confirm_impl_candidate(selcx, obligation, impl_vtable)
-        }
-
-        ProjectionTyCandidate::Closure(closure_vtable) => {
-            confirm_closure_candidate(selcx, obligation, closure_vtable)
-        }
-
-        ProjectionTyCandidate::FnPointer(fn_type) => {
-            confirm_fn_pointer_candidate(selcx, obligation, fn_type)
-        }
-    }
-}
-
-fn confirm_fn_pointer_candidate<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    fn_type: Ty<'tcx>)
-    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
-{
-    let fn_type = selcx.infcx().shallow_resolve(fn_type);
-    let sig = fn_type.fn_sig();
-    confirm_callable_candidate(selcx, obligation, sig, util::TupleArgumentsFlag::Yes)
-}
-
-fn confirm_closure_candidate<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    vtable: VtableClosureData<'tcx, PredicateObligation<'tcx>>)
-    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
-{
-    let closure_typer = selcx.closure_typer();
-    let closure_type = closure_typer.closure_type(vtable.closure_def_id, &vtable.substs);
-    let Normalized {
-        value: closure_type,
-        mut obligations
-    } = normalize_with_depth(selcx,
-                             obligation.cause.clone(),
-                             obligation.recursion_depth+1,
-                             &closure_type);
-    let (ty, mut cc_obligations) = confirm_callable_candidate(selcx,
-                                                              obligation,
-                                                              &closure_type.sig,
-                                                              util::TupleArgumentsFlag::No);
-    obligations.append(&mut cc_obligations);
-    (ty, obligations)
-}
-
-fn confirm_callable_candidate<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    fn_sig: &ty::PolyFnSig<'tcx>,
-    flag: util::TupleArgumentsFlag)
-    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
-{
-    let tcx = selcx.tcx();
-
-    debug!("confirm_callable_candidate({:?},{:?})",
-           obligation,
-           fn_sig);
-
-    // the `Output` associated type is declared on `FnOnce`
-    let fn_once_def_id = tcx.lang_items.fn_once_trait().unwrap();
-
-    // Note: we unwrap the binder here but re-create it below (1)
-    let ty::Binder((trait_ref, ret_type)) =
-        util::closure_trait_ref_and_return_type(tcx,
-                                                fn_once_def_id,
-                                                obligation.predicate.trait_ref.self_ty(),
-                                                fn_sig,
-                                                flag);
-
-    let predicate = ty::Binder(ty::ProjectionPredicate { // (1) recreate binder here
-        projection_ty: ty::ProjectionTy {
-            trait_ref: trait_ref,
-            item_name: token::intern(FN_OUTPUT_NAME),
-        },
-        ty: ret_type
-    });
-
-    confirm_param_env_candidate(selcx, obligation, predicate)
-}
-
-fn confirm_param_env_candidate<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    poly_projection: ty::PolyProjectionPredicate<'tcx>)
-    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
-{
-    let infcx = selcx.infcx();
-
-    let projection =
-        infcx.replace_late_bound_regions_with_fresh_var(
-            obligation.cause.span,
-            infer::LateBoundRegionConversionTime::HigherRankedType,
-            &poly_projection).0;
-
-    assert_eq!(projection.projection_ty.item_name,
-               obligation.predicate.item_name);
-
-    let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
-    match infcx.eq_trait_refs(false,
-                              origin,
-                              obligation.predicate.trait_ref.clone(),
-                              projection.projection_ty.trait_ref.clone()) {
-        Ok(()) => { }
-        Err(e) => {
-            selcx.tcx().sess.span_bug(
-                obligation.cause.span,
-                &format!("Failed to unify `{:?}` and `{:?}` in projection: {}",
-                         obligation,
-                         projection,
-                         e));
-        }
-    }
-
-    (projection.ty, vec!())
-}
-
-fn confirm_impl_candidate<'cx,'tcx>(
-    selcx: &mut SelectionContext<'cx,'tcx>,
-    obligation: &ProjectionTyObligation<'tcx>,
-    impl_vtable: VtableImplData<'tcx, PredicateObligation<'tcx>>)
-    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
-{
-    // there don't seem to be nicer accessors to these:
-    let impl_or_trait_items_map = selcx.tcx().impl_or_trait_items.borrow();
-
-    // Look for the associated type in the impl
-    for impl_item in &selcx.tcx().impl_items.borrow()[&impl_vtable.impl_def_id] {
-        if let ty::TypeTraitItem(ref assoc_ty) = impl_or_trait_items_map[&impl_item.def_id()] {
-            if assoc_ty.name == obligation.predicate.item_name {
-                return (assoc_ty.ty.unwrap().subst(selcx.tcx(), &impl_vtable.substs),
-                        impl_vtable.nested);
-            }
-        }
-    }
-
-    // It is not in the impl - get the default from the trait.
-    let trait_ref = obligation.predicate.trait_ref;
-    for trait_item in selcx.tcx().trait_items(trait_ref.def_id).iter() {
-        if let &ty::TypeTraitItem(ref assoc_ty) = trait_item {
-            if assoc_ty.name == obligation.predicate.item_name {
-                if let Some(ty) = assoc_ty.ty {
-                    return (ty.subst(selcx.tcx(), trait_ref.substs),
-                            impl_vtable.nested);
-                } else {
-                    // This means that the impl is missing a
-                    // definition for the associated type. This error
-                    // ought to be reported by the type checker method
-                    // `check_impl_items_against_trait`, so here we
-                    // just return TyError.
-                    debug!("confirm_impl_candidate: no associated type {:?} for {:?}",
-                           assoc_ty.name,
-                           trait_ref);
-                    return (selcx.tcx().types.err, vec!());
-                }
-            }
-        }
-    }
-
-    selcx.tcx().sess.span_bug(obligation.cause.span,
-                              &format!("No associated type for {:?}",
-                                       trait_ref));
-}
diff --git a/src/librustc/middle/traits/select.rs b/src/librustc/middle/traits/select.rs
deleted file mode 100644 (file)
index 29355e0..0000000
+++ /dev/null
@@ -1,3004 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! See `README.md` for high-level documentation
-
-pub use self::MethodMatchResult::*;
-pub use self::MethodMatchedData::*;
-use self::SelectionCandidate::*;
-use self::BuiltinBoundConditions::*;
-use self::EvaluationResult::*;
-
-use super::coherence;
-use super::DerivedObligationCause;
-use super::project;
-use super::project::{normalize_with_depth, Normalized};
-use super::{PredicateObligation, TraitObligation, ObligationCause};
-use super::report_overflow_error;
-use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation};
-use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch};
-use super::{ObjectCastObligation, Obligation};
-use super::TraitNotObjectSafe;
-use super::Selection;
-use super::SelectionResult;
-use super::{VtableBuiltin, VtableImpl, VtableParam, VtableClosure,
-            VtableFnPointer, VtableObject, VtableDefaultImpl};
-use super::{VtableImplData, VtableObjectData, VtableBuiltinData,
-            VtableClosureData, VtableDefaultImplData};
-use super::object_safety;
-use super::util;
-
-use middle::def_id::DefId;
-use middle::infer;
-use middle::infer::{InferCtxt, TypeFreshener, TypeOrigin};
-use middle::subst::{Subst, Substs, TypeSpace};
-use middle::ty::{self, ToPredicate, ToPolyTraitRef, Ty, TypeFoldable};
-use middle::ty::fast_reject;
-use middle::ty::relate::TypeRelation;
-
-use std::cell::RefCell;
-use std::fmt;
-use std::rc::Rc;
-use syntax::abi::Abi;
-use rustc_front::hir;
-use util::common::ErrorReported;
-use util::nodemap::FnvHashMap;
-
-pub struct SelectionContext<'cx, 'tcx:'cx> {
-    infcx: &'cx InferCtxt<'cx, 'tcx>,
-
-    /// Freshener used specifically for skolemizing entries on the
-    /// obligation stack. This ensures that all entries on the stack
-    /// at one time will have the same set of skolemized entries,
-    /// which is important for checking for trait bounds that
-    /// recursively require themselves.
-    freshener: TypeFreshener<'cx, 'tcx>,
-
-    /// If true, indicates that the evaluation should be conservative
-    /// and consider the possibility of types outside this crate.
-    /// This comes up primarily when resolving ambiguity. Imagine
-    /// there is some trait reference `$0 : Bar` where `$0` is an
-    /// inference variable. If `intercrate` is true, then we can never
-    /// say for sure that this reference is not implemented, even if
-    /// there are *no impls at all for `Bar`*, because `$0` could be
-    /// bound to some type that in a downstream crate that implements
-    /// `Bar`. This is the suitable mode for coherence. Elsewhere,
-    /// though, we set this to false, because we are only interested
-    /// in types that the user could actually have written --- in
-    /// other words, we consider `$0 : Bar` to be unimplemented if
-    /// there is no type that the user could *actually name* that
-    /// would satisfy it. This avoids crippling inference, basically.
-
-    intercrate: bool,
-}
-
-// A stack that walks back up the stack frame.
-struct TraitObligationStack<'prev, 'tcx: 'prev> {
-    obligation: &'prev TraitObligation<'tcx>,
-
-    /// Trait ref from `obligation` but skolemized with the
-    /// selection-context's freshener. Used to check for recursion.
-    fresh_trait_ref: ty::PolyTraitRef<'tcx>,
-
-    previous: TraitObligationStackList<'prev, 'tcx>,
-}
-
-#[derive(Clone)]
-pub struct SelectionCache<'tcx> {
-    hashmap: RefCell<FnvHashMap<ty::TraitRef<'tcx>,
-                                SelectionResult<'tcx, SelectionCandidate<'tcx>>>>,
-}
-
-pub enum MethodMatchResult {
-    MethodMatched(MethodMatchedData),
-    MethodAmbiguous(/* list of impls that could apply */ Vec<DefId>),
-    MethodDidNotMatch,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum MethodMatchedData {
-    // In the case of a precise match, we don't really need to store
-    // how the match was found. So don't.
-    PreciseMethodMatch,
-
-    // In the case of a coercion, we need to know the precise impl so
-    // that we can determine the type to which things were coerced.
-    CoerciveMethodMatch(/* impl we matched */ DefId)
-}
-
-/// The selection process begins by considering all impls, where
-/// clauses, and so forth that might resolve an obligation.  Sometimes
-/// we'll be able to say definitively that (e.g.) an impl does not
-/// apply to the obligation: perhaps it is defined for `usize` but the
-/// obligation is for `int`. In that case, we drop the impl out of the
-/// list.  But the other cases are considered *candidates*.
-///
-/// For selection to succeed, there must be exactly one matching
-/// candidate. If the obligation is fully known, this is guaranteed
-/// by coherence. However, if the obligation contains type parameters
-/// or variables, there may be multiple such impls.
-///
-/// It is not a real problem if multiple matching impls exist because
-/// of type variables - it just means the obligation isn't sufficiently
-/// elaborated. In that case we report an ambiguity, and the caller can
-/// try again after more type information has been gathered or report a
-/// "type annotations required" error.
-///
-/// However, with type parameters, this can be a real problem - type
-/// parameters don't unify with regular types, but they *can* unify
-/// with variables from blanket impls, and (unless we know its bounds
-/// will always be satisfied) picking the blanket impl will be wrong
-/// for at least *some* substitutions. To make this concrete, if we have
-///
-///    trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; }
-///    impl<T: fmt::Debug> AsDebug for T {
-///        type Out = T;
-///        fn debug(self) -> fmt::Debug { self }
-///    }
-///    fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); }
-///
-/// we can't just use the impl to resolve the <T as AsDebug> obligation
-/// - a type from another crate (that doesn't implement fmt::Debug) could
-/// implement AsDebug.
-///
-/// Because where-clauses match the type exactly, multiple clauses can
-/// only match if there are unresolved variables, and we can mostly just
-/// report this ambiguity in that case. This is still a problem - we can't
-/// *do anything* with ambiguities that involve only regions. This is issue
-/// #21974.
-///
-/// If a single where-clause matches and there are no inference
-/// variables left, then it definitely matches and we can just select
-/// it.
-///
-/// In fact, we even select the where-clause when the obligation contains
-/// inference variables. The can lead to inference making "leaps of logic",
-/// for example in this situation:
-///
-///    pub trait Foo<T> { fn foo(&self) -> T; }
-///    impl<T> Foo<()> for T { fn foo(&self) { } }
-///    impl Foo<bool> for bool { fn foo(&self) -> bool { *self } }
-///
-///    pub fn foo<T>(t: T) where T: Foo<bool> {
-///       println!("{:?}", <T as Foo<_>>::foo(&t));
-///    }
-///    fn main() { foo(false); }
-///
-/// Here the obligation <T as Foo<$0>> can be matched by both the blanket
-/// impl and the where-clause. We select the where-clause and unify $0=bool,
-/// so the program prints "false". However, if the where-clause is omitted,
-/// the blanket impl is selected, we unify $0=(), and the program prints
-/// "()".
-///
-/// Exactly the same issues apply to projection and object candidates, except
-/// that we can have both a projection candidate and a where-clause candidate
-/// for the same obligation. In that case either would do (except that
-/// different "leaps of logic" would occur if inference variables are
-/// present), and we just pick the where-clause. This is, for example,
-/// required for associated types to work in default impls, as the bounds
-/// are visible both as projection bounds and as where-clauses from the
-/// parameter environment.
-#[derive(PartialEq,Eq,Debug,Clone)]
-enum SelectionCandidate<'tcx> {
-    BuiltinCandidate(ty::BuiltinBound),
-    ParamCandidate(ty::PolyTraitRef<'tcx>),
-    ImplCandidate(DefId),
-    DefaultImplCandidate(DefId),
-    DefaultImplObjectCandidate(DefId),
-
-    /// This is a trait matching with a projected type as `Self`, and
-    /// we found an applicable bound in the trait definition.
-    ProjectionCandidate,
-
-    /// Implementation of a `Fn`-family trait by one of the
-    /// anonymous types generated for a `||` expression.
-    ClosureCandidate(/* closure */ DefId, &'tcx ty::ClosureSubsts<'tcx>),
-
-    /// Implementation of a `Fn`-family trait by one of the anonymous
-    /// types generated for a fn pointer type (e.g., `fn(int)->int`)
-    FnPointerCandidate,
-
-    ObjectCandidate,
-
-    BuiltinObjectCandidate,
-
-    BuiltinUnsizeCandidate,
-}
-
-struct SelectionCandidateSet<'tcx> {
-    // a list of candidates that definitely apply to the current
-    // obligation (meaning: types unify).
-    vec: Vec<SelectionCandidate<'tcx>>,
-
-    // if this is true, then there were candidates that might or might
-    // not have applied, but we couldn't tell. This occurs when some
-    // of the input types are type variables, in which case there are
-    // various "builtin" rules that might or might not trigger.
-    ambiguous: bool,
-}
-
-enum BuiltinBoundConditions<'tcx> {
-    If(ty::Binder<Vec<Ty<'tcx>>>),
-    ParameterBuiltin,
-    AmbiguousBuiltin
-}
-
-#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
-/// The result of trait evaluation. The order is important
-/// here as the evaluation of a list is the maximum of the
-/// evaluations.
-enum EvaluationResult {
-    /// Evaluation successful
-    EvaluatedToOk,
-    /// Evaluation failed because of recursion - treated as ambiguous
-    EvaluatedToUnknown,
-    /// Evaluation is known to be ambiguous
-    EvaluatedToAmbig,
-    /// Evaluation failed
-    EvaluatedToErr,
-}
-
-#[derive(Clone)]
-pub struct EvaluationCache<'tcx> {
-    hashmap: RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, EvaluationResult>>
-}
-
-impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
-    pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>)
-               -> SelectionContext<'cx, 'tcx> {
-        SelectionContext {
-            infcx: infcx,
-            freshener: infcx.freshener(),
-            intercrate: false,
-        }
-    }
-
-    pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>)
-                      -> SelectionContext<'cx, 'tcx> {
-        SelectionContext {
-            infcx: infcx,
-            freshener: infcx.freshener(),
-            intercrate: true,
-        }
-    }
-
-    pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
-        self.infcx
-    }
-
-    pub fn tcx(&self) -> &'cx ty::ctxt<'tcx> {
-        self.infcx.tcx
-    }
-
-    pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'cx, 'tcx> {
-        self.infcx.param_env()
-    }
-
-    pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'tcx> {
-        self.infcx
-    }
-
-    ///////////////////////////////////////////////////////////////////////////
-    // Selection
-    //
-    // The selection phase tries to identify *how* an obligation will
-    // be resolved. For example, it will identify which impl or
-    // parameter bound is to be used. The process can be inconclusive
-    // if the self type in the obligation is not fully inferred. Selection
-    // can result in an error in one of two ways:
-    //
-    // 1. If no applicable impl or parameter bound can be found.
-    // 2. If the output type parameters in the obligation do not match
-    //    those specified by the impl/bound. For example, if the obligation
-    //    is `Vec<Foo>:Iterable<Bar>`, but the impl specifies
-    //    `impl<T> Iterable<T> for Vec<T>`, than an error would result.
-
-    /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
-    /// type environment by performing unification.
-    pub fn select(&mut self, obligation: &TraitObligation<'tcx>)
-                  -> SelectionResult<'tcx, Selection<'tcx>> {
-        debug!("select({:?})", obligation);
-        assert!(!obligation.predicate.has_escaping_regions());
-
-        let dep_node = obligation.predicate.dep_node();
-        let _task = self.tcx().dep_graph.in_task(dep_node);
-
-        let stack = self.push_stack(TraitObligationStackList::empty(), obligation);
-        match try!(self.candidate_from_obligation(&stack)) {
-            None => {
-                self.consider_unification_despite_ambiguity(obligation);
-                Ok(None)
-            }
-            Some(candidate) => Ok(Some(try!(self.confirm_candidate(obligation, candidate)))),
-        }
-    }
-
-    /// In the particular case of unboxed closure obligations, we can
-    /// sometimes do some amount of unification for the
-    /// argument/return types even though we can't yet fully match obligation.
-    /// The particular case we are interesting in is an obligation of the form:
-    ///
-    ///    C : FnFoo<A>
-    ///
-    /// where `C` is an unboxed closure type and `FnFoo` is one of the
-    /// `Fn` traits. Because we know that users cannot write impls for closure types
-    /// themselves, the only way that `C : FnFoo` can fail to match is under two
-    /// conditions:
-    ///
-    /// 1. The closure kind for `C` is not yet known, because inference isn't complete.
-    /// 2. The closure kind for `C` *is* known, but doesn't match what is needed.
-    ///    For example, `C` may be a `FnOnce` closure, but a `Fn` closure is needed.
-    ///
-    /// In either case, we always know what argument types are
-    /// expected by `C`, no matter what kind of `Fn` trait it
-    /// eventually matches. So we can go ahead and unify the argument
-    /// types, even though the end result is ambiguous.
-    ///
-    /// Note that this is safe *even if* the trait would never be
-    /// matched (case 2 above). After all, in that case, an error will
-    /// result, so it kind of doesn't matter what we do --- unifying
-    /// the argument types can only be helpful to the user, because
-    /// once they patch up the kind of closure that is expected, the
-    /// argment types won't really change.
-    fn consider_unification_despite_ambiguity(&mut self, obligation: &TraitObligation<'tcx>) {
-        // Is this a `C : FnFoo(...)` trait reference for some trait binding `FnFoo`?
-        match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) {
-            Some(_) => { }
-            None => { return; }
-        }
-
-        // Is the self-type a closure type? We ignore bindings here
-        // because if it is a closure type, it must be a closure type from
-        // within this current fn, and hence none of the higher-ranked
-        // lifetimes can appear inside the self-type.
-        let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
-        let (closure_def_id, substs) = match self_ty.sty {
-            ty::TyClosure(id, ref substs) => (id, substs),
-            _ => { return; }
-        };
-        assert!(!substs.has_escaping_regions());
-
-        // It is OK to call the unnormalized variant here - this is only
-        // reached for TyClosure: Fn inputs where the closure kind is
-        // still unknown, which should only occur in typeck where the
-        // closure type is already normalized.
-        let closure_trait_ref = self.closure_trait_ref_unnormalized(obligation,
-                                                                    closure_def_id,
-                                                                    substs);
-
-        match self.confirm_poly_trait_refs(obligation.cause.clone(),
-                                           obligation.predicate.to_poly_trait_ref(),
-                                           closure_trait_ref) {
-            Ok(()) => { }
-            Err(_) => { /* Silently ignore errors. */ }
-        }
-    }
-
-    ///////////////////////////////////////////////////////////////////////////
-    // EVALUATION
-    //
-    // Tests whether an obligation can be selected or whether an impl
-    // can be applied to particular types. It skips the "confirmation"
-    // step and hence completely ignores output type parameters.
-    //
-    // The result is "true" if the obligation *may* hold and "false" if
-    // we can be sure it does not.
-
-
-    /// Evaluates whether the obligation `obligation` can be satisfied (by any means).
-    pub fn evaluate_obligation(&mut self,
-                               obligation: &PredicateObligation<'tcx>)
-                               -> bool
-    {
-        debug!("evaluate_obligation({:?})",
-               obligation);
-
-        self.infcx.probe(|_| {
-            self.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation)
-                .may_apply()
-        })
-    }
-
-    /// Evaluates whether the obligation `obligation` can be satisfied,
-    /// and returns `false` if not certain. However, this is not entirely
-    /// accurate if inference variables are involved.
-    pub fn evaluate_obligation_conservatively(&mut self,
-                                              obligation: &PredicateObligation<'tcx>)
-                                              -> bool
-    {
-        debug!("evaluate_obligation_conservatively({:?})",
-               obligation);
-
-        self.infcx.probe(|_| {
-            self.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation)
-                == EvaluatedToOk
-        })
-    }
-
-    /// Evaluates the predicates in `predicates` recursively. Note that
-    /// this applies projections in the predicates, and therefore
-    /// is run within an inference probe.
-    fn evaluate_predicates_recursively<'a,'o,I>(&mut self,
-                                                stack: TraitObligationStackList<'o, 'tcx>,
-                                                predicates: I)
-                                                -> EvaluationResult
-        where I : Iterator<Item=&'a PredicateObligation<'tcx>>, 'tcx:'a
-    {
-        let mut result = EvaluatedToOk;
-        for obligation in predicates {
-            let eval = self.evaluate_predicate_recursively(stack, obligation);
-            debug!("evaluate_predicate_recursively({:?}) = {:?}",
-                   obligation, eval);
-            match eval {
-                EvaluatedToErr => { return EvaluatedToErr; }
-                EvaluatedToAmbig => { result = EvaluatedToAmbig; }
-                EvaluatedToUnknown => {
-                    if result < EvaluatedToUnknown {
-                        result = EvaluatedToUnknown;
-                    }
-                }
-                EvaluatedToOk => { }
-            }
-        }
-        result
-    }
-
-    fn evaluate_predicate_recursively<'o>(&mut self,
-                                          previous_stack: TraitObligationStackList<'o, 'tcx>,
-                                          obligation: &PredicateObligation<'tcx>)
-                                           -> EvaluationResult
-    {
-        debug!("evaluate_predicate_recursively({:?})",
-               obligation);
-
-        // Check the cache from the tcx of predicates that we know
-        // have been proven elsewhere. This cache only contains
-        // predicates that are global in scope and hence unaffected by
-        // the current environment.
-        if self.tcx().fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) {
-            return EvaluatedToOk;
-        }
-
-        match obligation.predicate {
-            ty::Predicate::Trait(ref t) => {
-                assert!(!t.has_escaping_regions());
-                let obligation = obligation.with(t.clone());
-                self.evaluate_obligation_recursively(previous_stack, &obligation)
-            }
-
-            ty::Predicate::Equate(ref p) => {
-                // does this code ever run?
-                match self.infcx.equality_predicate(obligation.cause.span, p) {
-                    Ok(()) => EvaluatedToOk,
-                    Err(_) => EvaluatedToErr
-                }
-            }
-
-            ty::Predicate::WellFormed(ty) => {
-                match ty::wf::obligations(self.infcx, obligation.cause.body_id,
-                                          ty, obligation.cause.span) {
-                    Some(obligations) =>
-                        self.evaluate_predicates_recursively(previous_stack, obligations.iter()),
-                    None =>
-                        EvaluatedToAmbig,
-                }
-            }
-
-            ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => {
-                // we do not consider region relationships when
-                // evaluating trait matches
-                EvaluatedToOk
-            }
-
-            ty::Predicate::ObjectSafe(trait_def_id) => {
-                if object_safety::is_object_safe(self.tcx(), trait_def_id) {
-                    EvaluatedToOk
-                } else {
-                    EvaluatedToErr
-                }
-            }
-
-            ty::Predicate::Projection(ref data) => {
-                let project_obligation = obligation.with(data.clone());
-                match project::poly_project_and_unify_type(self, &project_obligation) {
-                    Ok(Some(subobligations)) => {
-                        self.evaluate_predicates_recursively(previous_stack,
-                                                             subobligations.iter())
-                    }
-                    Ok(None) => {
-                        EvaluatedToAmbig
-                    }
-                    Err(_) => {
-                        EvaluatedToErr
-                    }
-                }
-            }
-        }
-    }
-
-    fn evaluate_obligation_recursively<'o>(&mut self,
-                                           previous_stack: TraitObligationStackList<'o, 'tcx>,
-                                           obligation: &TraitObligation<'tcx>)
-                                           -> EvaluationResult
-    {
-        debug!("evaluate_obligation_recursively({:?})",
-               obligation);
-
-        let stack = self.push_stack(previous_stack, obligation);
-        let fresh_trait_ref = stack.fresh_trait_ref;
-        if let Some(result) = self.check_evaluation_cache(fresh_trait_ref) {
-            debug!("CACHE HIT: EVAL({:?})={:?}",
-                   fresh_trait_ref,
-                   result);
-            return result;
-        }
-
-        let result = self.evaluate_stack(&stack);
-
-        debug!("CACHE MISS: EVAL({:?})={:?}",
-               fresh_trait_ref,
-               result);
-        self.insert_evaluation_cache(fresh_trait_ref, result);
-
-        result
-    }
-
-    fn evaluate_stack<'o>(&mut self,
-                          stack: &TraitObligationStack<'o, 'tcx>)
-                          -> EvaluationResult
-    {
-        // In intercrate mode, whenever any of the types are unbound,
-        // there can always be an impl. Even if there are no impls in
-        // this crate, perhaps the type would be unified with
-        // something from another crate that does provide an impl.
-        //
-        // In intracrate mode, we must still be conservative. The reason is
-        // that we want to avoid cycles. Imagine an impl like:
-        //
-        //     impl<T:Eq> Eq for Vec<T>
-        //
-        // and a trait reference like `$0 : Eq` where `$0` is an
-        // unbound variable. When we evaluate this trait-reference, we
-        // will unify `$0` with `Vec<$1>` (for some fresh variable
-        // `$1`), on the condition that `$1 : Eq`. We will then wind
-        // up with many candidates (since that are other `Eq` impls
-        // that apply) and try to winnow things down. This results in
-        // a recursive evaluation that `$1 : Eq` -- as you can
-        // imagine, this is just where we started. To avoid that, we
-        // check for unbound variables and return an ambiguous (hence possible)
-        // match if we've seen this trait before.
-        //
-        // This suffices to allow chains like `FnMut` implemented in
-        // terms of `Fn` etc, but we could probably make this more
-        // precise still.
-        let input_types = stack.fresh_trait_ref.0.input_types();
-        let unbound_input_types = input_types.iter().any(|ty| ty.is_fresh());
-        if unbound_input_types && self.intercrate {
-            debug!("evaluate_stack({:?}) --> unbound argument, intercrate -->  ambiguous",
-                   stack.fresh_trait_ref);
-            return EvaluatedToAmbig;
-        }
-        if unbound_input_types &&
-              stack.iter().skip(1).any(
-                  |prev| self.match_fresh_trait_refs(&stack.fresh_trait_ref,
-                                                     &prev.fresh_trait_ref))
-        {
-            debug!("evaluate_stack({:?}) --> unbound argument, recursive --> giving up",
-                   stack.fresh_trait_ref);
-            return EvaluatedToUnknown;
-        }
-
-        // If there is any previous entry on the stack that precisely
-        // matches this obligation, then we can assume that the
-        // obligation is satisfied for now (still all other conditions
-        // must be met of course). One obvious case this comes up is
-        // marker traits like `Send`. Think of a linked list:
-        //
-        //    struct List<T> { data: T, next: Option<Box<List<T>>> {
-        //
-        // `Box<List<T>>` will be `Send` if `T` is `Send` and
-        // `Option<Box<List<T>>>` is `Send`, and in turn
-        // `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
-        // `Send`.
-        //
-        // Note that we do this comparison using the `fresh_trait_ref`
-        // fields. Because these have all been skolemized using
-        // `self.freshener`, we can be sure that (a) this will not
-        // affect the inferencer state and (b) that if we see two
-        // skolemized types with the same index, they refer to the
-        // same unbound type variable.
-        if
-            stack.iter()
-            .skip(1) // skip top-most frame
-            .any(|prev| stack.fresh_trait_ref == prev.fresh_trait_ref)
-        {
-            debug!("evaluate_stack({:?}) --> recursive",
-                   stack.fresh_trait_ref);
-            return EvaluatedToOk;
-        }
-
-        match self.candidate_from_obligation(stack) {
-            Ok(Some(c)) => self.evaluate_candidate(stack, &c),
-            Ok(None) => EvaluatedToAmbig,
-            Err(..) => EvaluatedToErr
-        }
-    }
-
-    /// Further evaluate `candidate` to decide whether all type parameters match and whether nested
-    /// obligations are met. Returns true if `candidate` remains viable after this further
-    /// scrutiny.
-    fn evaluate_candidate<'o>(&mut self,
-                              stack: &TraitObligationStack<'o, 'tcx>,
-                              candidate: &SelectionCandidate<'tcx>)
-                              -> EvaluationResult
-    {
-        debug!("evaluate_candidate: depth={} candidate={:?}",
-               stack.obligation.recursion_depth, candidate);
-        let result = self.infcx.probe(|_| {
-            let candidate = (*candidate).clone();
-            match self.confirm_candidate(stack.obligation, candidate) {
-                Ok(selection) => {
-                    self.evaluate_predicates_recursively(
-                        stack.list(),
-                        selection.nested_obligations().iter())
-                }
-                Err(..) => EvaluatedToErr
-            }
-        });
-        debug!("evaluate_candidate: depth={} result={:?}",
-               stack.obligation.recursion_depth, result);
-        result
-    }
-
-    fn pick_evaluation_cache(&self) -> &EvaluationCache<'tcx> {
-        // see comment in `pick_candidate_cache`
-        if self.intercrate ||
-            !self.param_env().caller_bounds.is_empty()
-        {
-            &self.param_env().evaluation_cache
-        } else
-        {
-            &self.tcx().evaluation_cache
-        }
-    }
-
-    fn check_evaluation_cache(&self, trait_ref: ty::PolyTraitRef<'tcx>)
-                              -> Option<EvaluationResult>
-    {
-        let cache = self.pick_evaluation_cache();
-        cache.hashmap.borrow().get(&trait_ref).cloned()
-    }
-
-    fn insert_evaluation_cache(&mut self,
-                               trait_ref: ty::PolyTraitRef<'tcx>,
-                               result: EvaluationResult)
-    {
-        // Avoid caching results that depend on more than just the trait-ref:
-        // The stack can create EvaluatedToUnknown, and closure signatures
-        // being yet uninferred can create "spurious" EvaluatedToAmbig
-        // and EvaluatedToOk.
-        if result == EvaluatedToUnknown ||
-            ((result == EvaluatedToAmbig || result == EvaluatedToOk)
-             && trait_ref.has_closure_types())
-        {
-            return;
-        }
-
-        let cache = self.pick_evaluation_cache();
-        cache.hashmap.borrow_mut().insert(trait_ref, result);
-    }
-
-    ///////////////////////////////////////////////////////////////////////////
-    // CANDIDATE ASSEMBLY
-    //
-    // The selection process begins by examining all in-scope impls,
-    // caller obligations, and so forth and assembling a list of
-    // candidates. See `README.md` and the `Candidate` type for more
-    // details.
-
-    fn candidate_from_obligation<'o>(&mut self,
-                                     stack: &TraitObligationStack<'o, 'tcx>)
-                                     -> SelectionResult<'tcx, SelectionCandidate<'tcx>>
-    {
-        // Watch out for overflow. This intentionally bypasses (and does
-        // not update) the cache.
-        let recursion_limit = self.infcx.tcx.sess.recursion_limit.get();
-        if stack.obligation.recursion_depth >= recursion_limit {
-            report_overflow_error(self.infcx(), &stack.obligation, true);
-        }
-
-        // Check the cache. Note that we skolemize the trait-ref
-        // separately rather than using `stack.fresh_trait_ref` -- this
-        // is because we want the unbound variables to be replaced
-        // with fresh skolemized types starting from index 0.
-        let cache_fresh_trait_pred =
-            self.infcx.freshen(stack.obligation.predicate.clone());
-        debug!("candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})",
-               cache_fresh_trait_pred,
-               stack);
-        assert!(!stack.obligation.predicate.has_escaping_regions());
-
-        match self.check_candidate_cache(&cache_fresh_trait_pred) {
-            Some(c) => {
-                debug!("CACHE HIT: SELECT({:?})={:?}",
-                       cache_fresh_trait_pred,
-                       c);
-                return c;
-            }
-            None => { }
-        }
-
-        // If no match, compute result and insert into cache.
-        let candidate = self.candidate_from_obligation_no_cache(stack);
-
-        if self.should_update_candidate_cache(&cache_fresh_trait_pred, &candidate) {
-            debug!("CACHE MISS: SELECT({:?})={:?}",
-                   cache_fresh_trait_pred, candidate);
-            self.insert_candidate_cache(cache_fresh_trait_pred, candidate.clone());
-        }
-
-        candidate
-    }
-
-    fn candidate_from_obligation_no_cache<'o>(&mut self,
-                                              stack: &TraitObligationStack<'o, 'tcx>)
-                                              -> SelectionResult<'tcx, SelectionCandidate<'tcx>>
-    {
-        if stack.obligation.predicate.references_error() {
-            // If we encounter a `TyError`, we generally prefer the
-            // most "optimistic" result in response -- that is, the
-            // one least likely to report downstream errors. But
-            // because this routine is shared by coherence and by
-            // trait selection, there isn't an obvious "right" choice
-            // here in that respect, so we opt to just return
-            // ambiguity and let the upstream clients sort it out.
-            return Ok(None);
-        }
-
-        if !self.is_knowable(stack) {
-            debug!("intercrate not knowable");
-            return Ok(None);
-        }
-
-        let candidate_set = try!(self.assemble_candidates(stack));
-
-        if candidate_set.ambiguous {
-            debug!("candidate set contains ambig");
-            return Ok(None);
-        }
-
-        let mut candidates = candidate_set.vec;
-
-        debug!("assembled {} candidates for {:?}: {:?}",
-               candidates.len(),
-               stack,
-               candidates);
-
-        // At this point, we know that each of the entries in the
-        // candidate set is *individually* applicable. Now we have to
-        // figure out if they contain mutual incompatibilities. This
-        // frequently arises if we have an unconstrained input type --
-        // for example, we are looking for $0:Eq where $0 is some
-        // unconstrained type variable. In that case, we'll get a
-        // candidate which assumes $0 == int, one that assumes $0 ==
-        // usize, etc. This spells an ambiguity.
-
-        // If there is more than one candidate, first winnow them down
-        // by considering extra conditions (nested obligations and so
-        // forth). We don't winnow if there is exactly one
-        // candidate. This is a relatively minor distinction but it
-        // can lead to better inference and error-reporting. An
-        // example would be if there was an impl:
-        //
-        //     impl<T:Clone> Vec<T> { fn push_clone(...) { ... } }
-        //
-        // and we were to see some code `foo.push_clone()` where `boo`
-        // is a `Vec<Bar>` and `Bar` does not implement `Clone`.  If
-        // we were to winnow, we'd wind up with zero candidates.
-        // Instead, we select the right impl now but report `Bar does
-        // not implement Clone`.
-        if candidates.len() > 1 {
-            candidates.retain(|c| self.evaluate_candidate(stack, c).may_apply())
-        }
-
-        // If there are STILL multiple candidate, we can further reduce
-        // the list by dropping duplicates.
-        if candidates.len() > 1 {
-            let mut i = 0;
-            while i < candidates.len() {
-                let is_dup =
-                    (0..candidates.len())
-                    .filter(|&j| i != j)
-                    .any(|j| self.candidate_should_be_dropped_in_favor_of(&candidates[i],
-                                                                          &candidates[j]));
-                if is_dup {
-                    debug!("Dropping candidate #{}/{}: {:?}",
-                           i, candidates.len(), candidates[i]);
-                    candidates.swap_remove(i);
-                } else {
-                    debug!("Retaining candidate #{}/{}: {:?}",
-                           i, candidates.len(), candidates[i]);
-                    i += 1;
-                }
-            }
-        }
-
-        // If there are *STILL* multiple candidates, give up and
-        // report ambiguity.
-        if candidates.len() > 1 {
-            debug!("multiple matches, ambig");
-            return Ok(None);
-        }
-
-
-        // If there are *NO* candidates, that there are no impls --
-        // that we know of, anyway. Note that in the case where there
-        // are unbound type variables within the obligation, it might
-        // be the case that you could still satisfy the obligation
-        // from another crate by instantiating the type variables with
-        // a type from another crate that does have an impl. This case
-        // is checked for in `evaluate_stack` (and hence users
-        // who might care about this case, like coherence, should use
-        // that function).
-        if candidates.is_empty() {
-            return Err(Unimplemented);
-        }
-
-        // Just one candidate left.
-        let candidate = candidates.pop().unwrap();
-
-        match candidate {
-            ImplCandidate(def_id) => {
-                match self.tcx().trait_impl_polarity(def_id) {
-                    Some(hir::ImplPolarity::Negative) => return Err(Unimplemented),
-                    _ => {}
-                }
-            }
-            _ => {}
-        }
-
-        Ok(Some(candidate))
-    }
-
-    fn is_knowable<'o>(&mut self,
-                       stack: &TraitObligationStack<'o, 'tcx>)
-                       -> bool
-    {
-        debug!("is_knowable(intercrate={})", self.intercrate);
-
-        if !self.intercrate {
-            return true;
-        }
-
-        let obligation = &stack.obligation;
-        let predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate);
-
-        // ok to skip binder because of the nature of the
-        // trait-ref-is-knowable check, which does not care about
-        // bound regions
-        let trait_ref = &predicate.skip_binder().trait_ref;
-
-        coherence::trait_ref_is_knowable(self.tcx(), trait_ref)
-    }
-
-    fn pick_candidate_cache(&self) -> &SelectionCache<'tcx> {
-        // If there are any where-clauses in scope, then we always use
-        // a cache local to this particular scope. Otherwise, we
-        // switch to a global cache. We used to try and draw
-        // finer-grained distinctions, but that led to a serious of
-        // annoying and weird bugs like #22019 and #18290. This simple
-        // rule seems to be pretty clearly safe and also still retains
-        // a very high hit rate (~95% when compiling rustc).
-        if !self.param_env().caller_bounds.is_empty() {
-            return &self.param_env().selection_cache;
-        }
-
-        // Avoid using the master cache during coherence and just rely
-        // on the local cache. This effectively disables caching
-        // during coherence. It is really just a simplification to
-        // avoid us having to fear that coherence results "pollute"
-        // the master cache. Since coherence executes pretty quickly,
-        // it's not worth going to more trouble to increase the
-        // hit-rate I don't think.
-        if self.intercrate {
-            return &self.param_env().selection_cache;
-        }
-
-        // Otherwise, we can use the global cache.
-        &self.tcx().selection_cache
-    }
-
-    fn check_candidate_cache(&mut self,
-                             cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>)
-                             -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>>
-    {
-        let cache = self.pick_candidate_cache();
-        let hashmap = cache.hashmap.borrow();
-        hashmap.get(&cache_fresh_trait_pred.0.trait_ref).cloned()
-    }
-
-    fn insert_candidate_cache(&mut self,
-                              cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
-                              candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>)
-    {
-        let cache = self.pick_candidate_cache();
-        let mut hashmap = cache.hashmap.borrow_mut();
-        hashmap.insert(cache_fresh_trait_pred.0.trait_ref.clone(), candidate);
-    }
-
-    fn should_update_candidate_cache(&mut self,
-                                     cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>,
-                                     candidate: &SelectionResult<'tcx, SelectionCandidate<'tcx>>)
-                                     -> bool
-    {
-        // In general, it's a good idea to cache results, even
-        // ambiguous ones, to save us some trouble later. But we have
-        // to be careful not to cache results that could be
-        // invalidated later by advances in inference. Normally, this
-        // is not an issue, because any inference variables whose
-        // types are not yet bound are "freshened" in the cache key,
-        // which means that if we later get the same request once that
-        // type variable IS bound, we'll have a different cache key.
-        // For example, if we have `Vec<_#0t> : Foo`, and `_#0t` is
-        // not yet known, we may cache the result as `None`. But if
-        // later `_#0t` is bound to `Bar`, then when we freshen we'll
-        // have `Vec<Bar> : Foo` as the cache key.
-        //
-        // HOWEVER, it CAN happen that we get an ambiguity result in
-        // one particular case around closures where the cache key
-        // would not change. That is when the precise types of the
-        // upvars that a closure references have not yet been figured
-        // out (i.e., because it is not yet known if they are captured
-        // by ref, and if by ref, what kind of ref). In these cases,
-        // when matching a builtin bound, we will yield back an
-        // ambiguous result. But the *cache key* is just the closure type,
-        // it doesn't capture the state of the upvar computation.
-        //
-        // To avoid this trap, just don't cache ambiguous results if
-        // the self-type contains no inference byproducts (that really
-        // shouldn't happen in other circumstances anyway, given
-        // coherence).
-
-        match *candidate {
-            Ok(Some(_)) | Err(_) => true,
-            Ok(None) => {
-                cache_fresh_trait_pred.0.trait_ref.substs.types.has_infer_types()
-            }
-        }
-    }
-
-    fn assemble_candidates<'o>(&mut self,
-                               stack: &TraitObligationStack<'o, 'tcx>)
-                               -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>>
-    {
-        let TraitObligationStack { obligation, .. } = *stack;
-        let ref obligation = Obligation {
-            cause: obligation.cause.clone(),
-            recursion_depth: obligation.recursion_depth,
-            predicate: self.infcx().resolve_type_vars_if_possible(&obligation.predicate)
-        };
-
-        if obligation.predicate.skip_binder().self_ty().is_ty_var() {
-            // FIXME(#20297): Self is a type variable (e.g. `_: AsRef<str>`).
-            //
-            // This is somewhat problematic, as the current scheme can't really
-            // handle it turning to be a projection. This does end up as truly
-            // ambiguous in most cases anyway.
-            //
-            // Until this is fixed, take the fast path out - this also improves
-            // performance by preventing assemble_candidates_from_impls from
-            // matching every impl for this trait.
-            return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true });
-        }
-
-        let mut candidates = SelectionCandidateSet {
-            vec: Vec::new(),
-            ambiguous: false
-        };
-
-        // Other bounds. Consider both in-scope bounds from fn decl
-        // and applicable impls. There is a certain set of precedence rules here.
-
-        match self.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) {
-            Some(ty::BoundCopy) => {
-                debug!("obligation self ty is {:?}",
-                       obligation.predicate.0.self_ty());
-
-                // User-defined copy impls are permitted, but only for
-                // structs and enums.
-                try!(self.assemble_candidates_from_impls(obligation, &mut candidates));
-
-                // For other types, we'll use the builtin rules.
-                try!(self.assemble_builtin_bound_candidates(ty::BoundCopy,
-                                                            obligation,
-                                                            &mut candidates));
-            }
-            Some(bound @ ty::BoundSized) => {
-                // Sized is never implementable by end-users, it is
-                // always automatically computed.
-                try!(self.assemble_builtin_bound_candidates(bound,
-                                                            obligation,
-                                                            &mut candidates));
-            }
-
-            None if self.tcx().lang_items.unsize_trait() ==
-                    Some(obligation.predicate.def_id()) => {
-                self.assemble_candidates_for_unsizing(obligation, &mut candidates);
-            }
-
-            Some(ty::BoundSend) |
-            Some(ty::BoundSync) |
-            None => {
-                try!(self.assemble_closure_candidates(obligation, &mut candidates));
-                try!(self.assemble_fn_pointer_candidates(obligation, &mut candidates));
-                try!(self.assemble_candidates_from_impls(obligation, &mut candidates));
-                self.assemble_candidates_from_object_ty(obligation, &mut candidates);
-            }
-        }
-
-        self.assemble_candidates_from_projected_tys(obligation, &mut candidates);
-        try!(self.assemble_candidates_from_caller_bounds(stack, &mut candidates));
-        // Default implementations have lower priority, so we only
-        // consider triggering a default if there is no other impl that can apply.
-        if candidates.vec.is_empty() {
-            try!(self.assemble_candidates_from_default_impls(obligation, &mut candidates));
-        }
-        debug!("candidate list size: {}", candidates.vec.len());
-        Ok(candidates)
-    }
-
-    fn assemble_candidates_from_projected_tys(&mut self,
-                                              obligation: &TraitObligation<'tcx>,
-                                              candidates: &mut SelectionCandidateSet<'tcx>)
-    {
-        debug!("assemble_candidates_for_projected_tys({:?})", obligation);
-
-        // FIXME(#20297) -- just examining the self-type is very simplistic
-
-        // before we go into the whole skolemization thing, just
-        // quickly check if the self-type is a projection at all.
-        let trait_def_id = match obligation.predicate.0.trait_ref.self_ty().sty {
-            ty::TyProjection(ref data) => data.trait_ref.def_id,
-            ty::TyInfer(ty::TyVar(_)) => {
-                self.tcx().sess.span_bug(obligation.cause.span,
-                    "Self=_ should have been handled by assemble_candidates");
-            }
-            _ => { return; }
-        };
-
-        debug!("assemble_candidates_for_projected_tys: trait_def_id={:?}",
-               trait_def_id);
-
-        let result = self.infcx.probe(|snapshot| {
-            self.match_projection_obligation_against_bounds_from_trait(obligation,
-                                                                       snapshot)
-        });
-
-        if result {
-            candidates.vec.push(ProjectionCandidate);
-        }
-    }
-
-    fn match_projection_obligation_against_bounds_from_trait(
-        &mut self,
-        obligation: &TraitObligation<'tcx>,
-        snapshot: &infer::CombinedSnapshot)
-        -> bool
-    {
-        let poly_trait_predicate =
-            self.infcx().resolve_type_vars_if_possible(&obligation.predicate);
-        let (skol_trait_predicate, skol_map) =
-            self.infcx().skolemize_late_bound_regions(&poly_trait_predicate, snapshot);
-        debug!("match_projection_obligation_against_bounds_from_trait: \
-                skol_trait_predicate={:?} skol_map={:?}",
-               skol_trait_predicate,
-               skol_map);
-
-        let projection_trait_ref = match skol_trait_predicate.trait_ref.self_ty().sty {
-            ty::TyProjection(ref data) => &data.trait_ref,
-            _ => {
-                self.tcx().sess.span_bug(
-                    obligation.cause.span,
-                    &format!("match_projection_obligation_against_bounds_from_trait() called \
-                              but self-ty not a projection: {:?}",
-                             skol_trait_predicate.trait_ref.self_ty()));
-            }
-        };
-        debug!("match_projection_obligation_against_bounds_from_trait: \
-                projection_trait_ref={:?}",
-               projection_trait_ref);
-
-        let trait_predicates = self.tcx().lookup_predicates(projection_trait_ref.def_id);
-        let bounds = trait_predicates.instantiate(self.tcx(), projection_trait_ref.substs);
-        debug!("match_projection_obligation_against_bounds_from_trait: \
-                bounds={:?}",
-               bounds);
-
-        let matching_bound =
-            util::elaborate_predicates(self.tcx(), bounds.predicates.into_vec())
-            .filter_to_traits()
-            .find(
-                |bound| self.infcx.probe(
-                    |_| self.match_projection(obligation,
-                                              bound.clone(),
-                                              skol_trait_predicate.trait_ref.clone(),
-                                              &skol_map,
-                                              snapshot)));
-
-        debug!("match_projection_obligation_against_bounds_from_trait: \
-                matching_bound={:?}",
-               matching_bound);
-        match matching_bound {
-            None => false,
-            Some(bound) => {
-                // Repeat the successful match, if any, this time outside of a probe.
-                let result = self.match_projection(obligation,
-                                                   bound,
-                                                   skol_trait_predicate.trait_ref.clone(),
-                                                   &skol_map,
-                                                   snapshot);
-                assert!(result);
-                true
-            }
-        }
-    }
-
-    fn match_projection(&mut self,
-                        obligation: &TraitObligation<'tcx>,
-                        trait_bound: ty::PolyTraitRef<'tcx>,
-                        skol_trait_ref: ty::TraitRef<'tcx>,
-                        skol_map: &infer::SkolemizationMap,
-                        snapshot: &infer::CombinedSnapshot)
-                        -> bool
-    {
-        assert!(!skol_trait_ref.has_escaping_regions());
-        let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
-        match self.infcx.sub_poly_trait_refs(false,
-                                             origin,
-                                             trait_bound.clone(),
-                                             ty::Binder(skol_trait_ref.clone())) {
-            Ok(()) => { }
-            Err(_) => { return false; }
-        }
-
-        self.infcx.leak_check(skol_map, snapshot).is_ok()
-    }
-
-    /// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller
-    /// supplied to find out whether it is listed among them.
-    ///
-    /// Never affects inference environment.
-    fn assemble_candidates_from_caller_bounds<'o>(&mut self,
-                                                  stack: &TraitObligationStack<'o, 'tcx>,
-                                                  candidates: &mut SelectionCandidateSet<'tcx>)
-                                                  -> Result<(),SelectionError<'tcx>>
-    {
-        debug!("assemble_candidates_from_caller_bounds({:?})",
-               stack.obligation);
-
-        let all_bounds =
-            self.param_env().caller_bounds
-                            .iter()
-                            .filter_map(|o| o.to_opt_poly_trait_ref());
-
-        let matching_bounds =
-            all_bounds.filter(
-                |bound| self.evaluate_where_clause(stack, bound.clone()).may_apply());
-
-        let param_candidates =
-            matching_bounds.map(|bound| ParamCandidate(bound));
-
-        candidates.vec.extend(param_candidates);
-
-        Ok(())
-    }
-
-    fn evaluate_where_clause<'o>(&mut self,
-                                 stack: &TraitObligationStack<'o, 'tcx>,
-                                 where_clause_trait_ref: ty::PolyTraitRef<'tcx>)
-                                 -> EvaluationResult
-    {
-        self.infcx().probe(move |_| {
-            match self.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
-                Ok(obligations) => {
-                    self.evaluate_predicates_recursively(stack.list(), obligations.iter())
-                }
-                Err(()) => EvaluatedToErr
-            }
-        })
-    }
-
-    /// Check for the artificial impl that the compiler will create for an obligation like `X :
-    /// FnMut<..>` where `X` is a closure type.
-    ///
-    /// Note: the type parameters on a closure candidate are modeled as *output* type
-    /// parameters and hence do not affect whether this trait is a match or not. They will be
-    /// unified during the confirmation step.
-    fn assemble_closure_candidates(&mut self,
-                                   obligation: &TraitObligation<'tcx>,
-                                   candidates: &mut SelectionCandidateSet<'tcx>)
-                                   -> Result<(),SelectionError<'tcx>>
-    {
-        let kind = match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) {
-            Some(k) => k,
-            None => { return Ok(()); }
-        };
-
-        // ok to skip binder because the substs on closure types never
-        // touch bound regions, they just capture the in-scope
-        // type/region parameters
-        let self_ty = *obligation.self_ty().skip_binder();
-        let (closure_def_id, substs) = match self_ty.sty {
-            ty::TyClosure(id, ref substs) => (id, substs),
-            ty::TyInfer(ty::TyVar(_)) => {
-                debug!("assemble_unboxed_closure_candidates: ambiguous self-type");
-                candidates.ambiguous = true;
-                return Ok(());
-            }
-            _ => { return Ok(()); }
-        };
-
-        debug!("assemble_unboxed_candidates: self_ty={:?} kind={:?} obligation={:?}",
-               self_ty,
-               kind,
-               obligation);
-
-        match self.infcx.closure_kind(closure_def_id) {
-            Some(closure_kind) => {
-                debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind);
-                if closure_kind.extends(kind) {
-                    candidates.vec.push(ClosureCandidate(closure_def_id, substs));
-                }
-            }
-            None => {
-                debug!("assemble_unboxed_candidates: closure_kind not yet known");
-                candidates.ambiguous = true;
-            }
-        }
-
-        Ok(())
-    }
-
-    /// Implement one of the `Fn()` family for a fn pointer.
-    fn assemble_fn_pointer_candidates(&mut self,
-                                      obligation: &TraitObligation<'tcx>,
-                                      candidates: &mut SelectionCandidateSet<'tcx>)
-                                      -> Result<(),SelectionError<'tcx>>
-    {
-        // We provide impl of all fn traits for fn pointers.
-        if self.tcx().lang_items.fn_trait_kind(obligation.predicate.def_id()).is_none() {
-            return Ok(());
-        }
-
-        // ok to skip binder because what we are inspecting doesn't involve bound regions
-        let self_ty = *obligation.self_ty().skip_binder();
-        match self_ty.sty {
-            ty::TyInfer(ty::TyVar(_)) => {
-                debug!("assemble_fn_pointer_candidates: ambiguous self-type");
-                candidates.ambiguous = true; // could wind up being a fn() type
-            }
-
-            // provide an impl, but only for suitable `fn` pointers
-            ty::TyBareFn(_, &ty::BareFnTy {
-                unsafety: hir::Unsafety::Normal,
-                abi: Abi::Rust,
-                sig: ty::Binder(ty::FnSig {
-                    inputs: _,
-                    output: ty::FnConverging(_),
-                    variadic: false
-                })
-            }) => {
-                candidates.vec.push(FnPointerCandidate);
-            }
-
-            _ => { }
-        }
-
-        Ok(())
-    }
-
-    /// Search for impls that might apply to `obligation`.
-    fn assemble_candidates_from_impls(&mut self,
-                                      obligation: &TraitObligation<'tcx>,
-                                      candidates: &mut SelectionCandidateSet<'tcx>)
-                                      -> Result<(), SelectionError<'tcx>>
-    {
-        debug!("assemble_candidates_from_impls(obligation={:?})", obligation);
-
-        let def = self.tcx().lookup_trait_def(obligation.predicate.def_id());
-
-        def.for_each_relevant_impl(
-            self.tcx(),
-            obligation.predicate.0.trait_ref.self_ty(),
-            |impl_def_id| {
-                self.infcx.probe(|snapshot| {
-                    if let Ok(_) = self.match_impl(impl_def_id, obligation, snapshot) {
-                        candidates.vec.push(ImplCandidate(impl_def_id));
-                    }
-                });
-            }
-        );
-
-        Ok(())
-    }
-
-    fn assemble_candidates_from_default_impls(&mut self,
-                                              obligation: &TraitObligation<'tcx>,
-                                              candidates: &mut SelectionCandidateSet<'tcx>)
-                                              -> Result<(), SelectionError<'tcx>>
-    {
-        // OK to skip binder here because the tests we do below do not involve bound regions
-        let self_ty = *obligation.self_ty().skip_binder();
-        debug!("assemble_candidates_from_default_impls(self_ty={:?})", self_ty);
-
-        let def_id = obligation.predicate.def_id();
-
-        if self.tcx().trait_has_default_impl(def_id) {
-            match self_ty.sty {
-                ty::TyTrait(..) => {
-                    // For object types, we don't know what the closed
-                    // over types are. For most traits, this means we
-                    // conservatively say nothing; a candidate may be
-                    // added by `assemble_candidates_from_object_ty`.
-                    // However, for the kind of magic reflect trait,
-                    // we consider it to be implemented even for
-                    // object types, because it just lets you reflect
-                    // onto the object type, not into the object's
-                    // interior.
-                    if self.tcx().has_attr(def_id, "rustc_reflect_like") {
-                        candidates.vec.push(DefaultImplObjectCandidate(def_id));
-                    }
-                }
-                ty::TyParam(..) |
-                ty::TyProjection(..) => {
-                    // In these cases, we don't know what the actual
-                    // type is.  Therefore, we cannot break it down
-                    // into its constituent types. So we don't
-                    // consider the `..` impl but instead just add no
-                    // candidates: this means that typeck will only
-                    // succeed if there is another reason to believe
-                    // that this obligation holds. That could be a
-                    // where-clause or, in the case of an object type,
-                    // it could be that the object type lists the
-                    // trait (e.g. `Foo+Send : Send`). See
-                    // `compile-fail/typeck-default-trait-impl-send-param.rs`
-                    // for an example of a test case that exercises
-                    // this path.
-                }
-                ty::TyInfer(ty::TyVar(_)) => {
-                    // the defaulted impl might apply, we don't know
-                    candidates.ambiguous = true;
-                }
-                _ => {
-                    candidates.vec.push(DefaultImplCandidate(def_id.clone()))
-                }
-            }
-        }
-
-        Ok(())
-    }
-
-    /// Search for impls that might apply to `obligation`.
-    fn assemble_candidates_from_object_ty(&mut self,
-                                          obligation: &TraitObligation<'tcx>,
-                                          candidates: &mut SelectionCandidateSet<'tcx>)
-    {
-        debug!("assemble_candidates_from_object_ty(self_ty={:?})",
-               obligation.self_ty().skip_binder());
-
-        // Object-safety candidates are only applicable to object-safe
-        // traits. Including this check is useful because it helps
-        // inference in cases of traits like `BorrowFrom`, which are
-        // not object-safe, and which rely on being able to infer the
-        // self-type from one of the other inputs. Without this check,
-        // these cases wind up being considered ambiguous due to a
-        // (spurious) ambiguity introduced here.
-        let predicate_trait_ref = obligation.predicate.to_poly_trait_ref();
-        if !object_safety::is_object_safe(self.tcx(), predicate_trait_ref.def_id()) {
-            return;
-        }
-
-        self.infcx.commit_if_ok(|snapshot| {
-            let (self_ty, _) =
-                self.infcx().skolemize_late_bound_regions(&obligation.self_ty(), snapshot);
-            let poly_trait_ref = match self_ty.sty {
-                ty::TyTrait(ref data) => {
-                    match self.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) {
-                        Some(bound @ ty::BoundSend) | Some(bound @ ty::BoundSync) => {
-                            if data.bounds.builtin_bounds.contains(&bound) {
-                                debug!("assemble_candidates_from_object_ty: matched builtin bound, \
-                                        pushing candidate");
-                                candidates.vec.push(BuiltinObjectCandidate);
-                                return Ok(());
-                            }
-                        }
-                        _ => {}
-                    }
-
-                    data.principal_trait_ref_with_self_ty(self.tcx(), self_ty)
-                }
-                ty::TyInfer(ty::TyVar(_)) => {
-                    debug!("assemble_candidates_from_object_ty: ambiguous");
-                    candidates.ambiguous = true; // could wind up being an object type
-                    return Ok(());
-                }
-                _ => {
-                    return Ok(());
-                }
-            };
-
-            debug!("assemble_candidates_from_object_ty: poly_trait_ref={:?}",
-                   poly_trait_ref);
-
-            // Count only those upcast versions that match the trait-ref
-            // we are looking for. Specifically, do not only check for the
-            // correct trait, but also the correct type parameters.
-            // For example, we may be trying to upcast `Foo` to `Bar<i32>`,
-            // but `Foo` is declared as `trait Foo : Bar<u32>`.
-            let upcast_trait_refs =
-                util::supertraits(self.tcx(), poly_trait_ref)
-                .filter(|upcast_trait_ref| {
-                    self.infcx.probe(|_| {
-                        let upcast_trait_ref = upcast_trait_ref.clone();
-                        self.match_poly_trait_ref(obligation, upcast_trait_ref).is_ok()
-                    })
-                })
-                .count();
-
-            if upcast_trait_refs > 1 {
-                // can be upcast in many ways; need more type information
-                candidates.ambiguous = true;
-            } else if upcast_trait_refs == 1 {
-                candidates.vec.push(ObjectCandidate);
-            }
-
-            Ok::<(),()>(())
-        }).unwrap();
-    }
-
-    /// Search for unsizing that might apply to `obligation`.
-    fn assemble_candidates_for_unsizing(&mut self,
-                                        obligation: &TraitObligation<'tcx>,
-                                        candidates: &mut SelectionCandidateSet<'tcx>) {
-        // We currently never consider higher-ranked obligations e.g.
-        // `for<'a> &'a T: Unsize<Trait+'a>` to be implemented. This is not
-        // because they are a priori invalid, and we could potentially add support
-        // for them later, it's just that there isn't really a strong need for it.
-        // A `T: Unsize<U>` obligation is always used as part of a `T: CoerceUnsize<U>`
-        // impl, and those are generally applied to concrete types.
-        //
-        // That said, one might try to write a fn with a where clause like
-        //     for<'a> Foo<'a, T>: Unsize<Foo<'a, Trait>>
-        // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`.
-        // Still, you'd be more likely to write that where clause as
-        //     T: Trait
-        // so it seems ok if we (conservatively) fail to accept that `Unsize`
-        // obligation above. Should be possible to extend this in the future.
-        let source = match self.tcx().no_late_bound_regions(&obligation.self_ty()) {
-            Some(t) => t,
-            None => {
-                // Don't add any candidates if there are bound regions.
-                return;
-            }
-        };
-        let target = obligation.predicate.0.input_types()[0];
-
-        debug!("assemble_candidates_for_unsizing(source={:?}, target={:?})",
-               source, target);
-
-        let may_apply = match (&source.sty, &target.sty) {
-            // Trait+Kx+'a -> Trait+Ky+'b (upcasts).
-            (&ty::TyTrait(ref data_a), &ty::TyTrait(ref data_b)) => {
-                // Upcasts permit two things:
-                //
-                // 1. Dropping builtin bounds, e.g. `Foo+Send` to `Foo`
-                // 2. Tightening the region bound, e.g. `Foo+'a` to `Foo+'b` if `'a : 'b`
-                //
-                // Note that neither of these changes requires any
-                // change at runtime.  Eventually this will be
-                // generalized.
-                //
-                // We always upcast when we can because of reason
-                // #2 (region bounds).
-                data_a.principal.def_id() == data_a.principal.def_id() &&
-                data_a.bounds.builtin_bounds.is_superset(&data_b.bounds.builtin_bounds)
-            }
-
-            // T -> Trait.
-            (_, &ty::TyTrait(_)) => true,
-
-            // Ambiguous handling is below T -> Trait, because inference
-            // variables can still implement Unsize<Trait> and nested
-            // obligations will have the final say (likely deferred).
-            (&ty::TyInfer(ty::TyVar(_)), _) |
-            (_, &ty::TyInfer(ty::TyVar(_))) => {
-                debug!("assemble_candidates_for_unsizing: ambiguous");
-                candidates.ambiguous = true;
-                false
-            }
-
-            // [T; n] -> [T].
-            (&ty::TyArray(_, _), &ty::TySlice(_)) => true,
-
-            // Struct<T> -> Struct<U>.
-            (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
-                def_id_a == def_id_b
-            }
-
-            _ => false
-        };
-
-        if may_apply {
-            candidates.vec.push(BuiltinUnsizeCandidate);
-        }
-    }
-
-    ///////////////////////////////////////////////////////////////////////////
-    // WINNOW
-    //
-    // Winnowing is the process of attempting to resolve ambiguity by
-    // probing further. During the winnowing process, we unify all
-    // type variables (ignoring skolemization) and then we also
-    // attempt to evaluate recursive bounds to see if they are
-    // satisfied.
-
-    /// Returns true if `candidate_i` should be dropped in favor of
-    /// `candidate_j`.  Generally speaking we will drop duplicate
-    /// candidates and prefer where-clause candidates.
-    /// Returns true if `victim` should be dropped in favor of
-    /// `other`.  Generally speaking we will drop duplicate
-    /// candidates and prefer where-clause candidates.
-    ///
-    /// See the comment for "SelectionCandidate" for more details.
-    fn candidate_should_be_dropped_in_favor_of<'o>(&mut self,
-                                                   victim: &SelectionCandidate<'tcx>,
-                                                   other: &SelectionCandidate<'tcx>)
-                                                   -> bool
-    {
-        if victim == other {
-            return true;
-        }
-
-        match other {
-            &ObjectCandidate |
-            &ParamCandidate(_) | &ProjectionCandidate => match victim {
-                &DefaultImplCandidate(..) => {
-                    self.tcx().sess.bug(
-                        "default implementations shouldn't be recorded \
-                         when there are other valid candidates");
-                }
-                &ImplCandidate(..) |
-                &ClosureCandidate(..) |
-                &FnPointerCandidate |
-                &BuiltinObjectCandidate |
-                &BuiltinUnsizeCandidate |
-                &DefaultImplObjectCandidate(..) |
-                &BuiltinCandidate(..) => {
-                    // We have a where-clause so don't go around looking
-                    // for impls.
-                    true
-                }
-                &ObjectCandidate |
-                &ProjectionCandidate => {
-                    // Arbitrarily give param candidates priority
-                    // over projection and object candidates.
-                    true
-                },
-                &ParamCandidate(..) => false,
-            },
-            _ => false
-        }
-    }
-
-    ///////////////////////////////////////////////////////////////////////////
-    // BUILTIN BOUNDS
-    //
-    // These cover the traits that are built-in to the language
-    // itself.  This includes `Copy` and `Sized` for sure. For the
-    // moment, it also includes `Send` / `Sync` and a few others, but
-    // those will hopefully change to library-defined traits in the
-    // future.
-
-    fn assemble_builtin_bound_candidates<'o>(&mut self,
-                                             bound: ty::BuiltinBound,
-                                             obligation: &TraitObligation<'tcx>,
-                                             candidates: &mut SelectionCandidateSet<'tcx>)
-                                             -> Result<(),SelectionError<'tcx>>
-    {
-        match self.builtin_bound(bound, obligation) {
-            Ok(If(..)) => {
-                debug!("builtin_bound: bound={:?}",
-                       bound);
-                candidates.vec.push(BuiltinCandidate(bound));
-                Ok(())
-            }
-            Ok(ParameterBuiltin) => { Ok(()) }
-            Ok(AmbiguousBuiltin) => {
-                debug!("assemble_builtin_bound_candidates: ambiguous builtin");
-                Ok(candidates.ambiguous = true)
-            }
-            Err(e) => { Err(e) }
-        }
-    }
-
-    fn builtin_bound(&mut self,
-                     bound: ty::BuiltinBound,
-                     obligation: &TraitObligation<'tcx>)
-                     -> Result<BuiltinBoundConditions<'tcx>,SelectionError<'tcx>>
-    {
-        // Note: these tests operate on types that may contain bound
-        // regions. To be proper, we ought to skolemize here, but we
-        // forego the skolemization and defer it until the
-        // confirmation step.
-
-        let self_ty = self.infcx.shallow_resolve(obligation.predicate.0.self_ty());
-        return match self_ty.sty {
-            ty::TyInfer(ty::IntVar(_)) |
-            ty::TyInfer(ty::FloatVar(_)) |
-            ty::TyUint(_) |
-            ty::TyInt(_) |
-            ty::TyBool |
-            ty::TyFloat(_) |
-            ty::TyBareFn(..) |
-            ty::TyChar => {
-                // safe for everything
-                ok_if(Vec::new())
-            }
-
-            ty::TyBox(_) => {  // Box<T>
-                match bound {
-                    ty::BoundCopy => Err(Unimplemented),
-
-                    ty::BoundSized => ok_if(Vec::new()),
-
-                    ty::BoundSync | ty::BoundSend => {
-                        self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()");
-                    }
-                }
-            }
-
-            ty::TyRawPtr(..) => {     // *const T, *mut T
-                match bound {
-                    ty::BoundCopy | ty::BoundSized => ok_if(Vec::new()),
-
-                    ty::BoundSync | ty::BoundSend => {
-                        self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()");
-                    }
-                }
-            }
-
-            ty::TyTrait(ref data) => {
-                match bound {
-                    ty::BoundSized => Err(Unimplemented),
-                    ty::BoundCopy => {
-                        if data.bounds.builtin_bounds.contains(&bound) {
-                            ok_if(Vec::new())
-                        } else {
-                            // Recursively check all supertraits to find out if any further
-                            // bounds are required and thus we must fulfill.
-                            let principal =
-                                data.principal_trait_ref_with_self_ty(self.tcx(),
-                                                                      self.tcx().types.err);
-                            let copy_def_id = obligation.predicate.def_id();
-                            for tr in util::supertraits(self.tcx(), principal) {
-                                if tr.def_id() == copy_def_id {
-                                    return ok_if(Vec::new())
-                                }
-                            }
-
-                            Err(Unimplemented)
-                        }
-                    }
-                    ty::BoundSync | ty::BoundSend => {
-                        self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()");
-                    }
-                }
-            }
-
-            ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl }) => {
-                // &mut T or &T
-                match bound {
-                    ty::BoundCopy => {
-                        match mutbl {
-                            // &mut T is affine and hence never `Copy`
-                            hir::MutMutable => Err(Unimplemented),
-
-                            // &T is always copyable
-                            hir::MutImmutable => ok_if(Vec::new()),
-                        }
-                    }
-
-                    ty::BoundSized => ok_if(Vec::new()),
-
-                    ty::BoundSync | ty::BoundSend => {
-                        self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()");
-                    }
-                }
-            }
-
-            ty::TyArray(element_ty, _) => {
-                // [T; n]
-                match bound {
-                    ty::BoundCopy => ok_if(vec![element_ty]),
-                    ty::BoundSized => ok_if(Vec::new()),
-                    ty::BoundSync | ty::BoundSend => {
-                        self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()");
-                    }
-                }
-            }
-
-            ty::TyStr | ty::TySlice(_) => {
-                match bound {
-                    ty::BoundSync | ty::BoundSend => {
-                        self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()");
-                    }
-
-                    ty::BoundCopy | ty::BoundSized => Err(Unimplemented),
-                }
-            }
-
-            // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
-            ty::TyTuple(ref tys) => ok_if(tys.clone()),
-
-            ty::TyClosure(_, ref substs) => {
-                // FIXME -- This case is tricky. In the case of by-ref
-                // closures particularly, we need the results of
-                // inference to decide how to reflect the type of each
-                // upvar (the upvar may have type `T`, but the runtime
-                // type could be `&mut`, `&`, or just `T`). For now,
-                // though, we'll do this unsoundly and assume that all
-                // captures are by value. Really what we ought to do
-                // is reserve judgement and then intertwine this
-                // analysis with closure inference.
-
-                // Unboxed closures shouldn't be
-                // implicitly copyable
-                if bound == ty::BoundCopy {
-                    return Ok(ParameterBuiltin);
-                }
-
-                // Upvars are always local variables or references to
-                // local variables, and local variables cannot be
-                // unsized, so the closure struct as a whole must be
-                // Sized.
-                if bound == ty::BoundSized {
-                    return ok_if(Vec::new());
-                }
-
-                ok_if(substs.upvar_tys.clone())
-            }
-
-            ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
-                let types: Vec<Ty> = def.all_fields().map(|f| {
-                    f.ty(self.tcx(), substs)
-                }).collect();
-                nominal(bound, types)
-            }
-
-            ty::TyProjection(_) | ty::TyParam(_) => {
-                // Note: A type parameter is only considered to meet a
-                // particular bound if there is a where clause telling
-                // us that it does, and that case is handled by
-                // `assemble_candidates_from_caller_bounds()`.
-                Ok(ParameterBuiltin)
-            }
-
-            ty::TyInfer(ty::TyVar(_)) => {
-                // Unbound type variable. Might or might not have
-                // applicable impls and so forth, depending on what
-                // those type variables wind up being bound to.
-                debug!("assemble_builtin_bound_candidates: ambiguous builtin");
-                Ok(AmbiguousBuiltin)
-            }
-
-            ty::TyError => ok_if(Vec::new()),
-
-            ty::TyInfer(ty::FreshTy(_))
-            | ty::TyInfer(ty::FreshIntTy(_))
-            | ty::TyInfer(ty::FreshFloatTy(_)) => {
-                self.tcx().sess.bug(
-                    &format!(
-                        "asked to assemble builtin bounds of unexpected type: {:?}",
-                        self_ty));
-            }
-        };
-
-        fn ok_if<'tcx>(v: Vec<Ty<'tcx>>)
-                       -> Result<BuiltinBoundConditions<'tcx>, SelectionError<'tcx>> {
-            Ok(If(ty::Binder(v)))
-        }
-
-        fn nominal<'cx, 'tcx>(bound: ty::BuiltinBound,
-                              types: Vec<Ty<'tcx>>)
-                              -> Result<BuiltinBoundConditions<'tcx>, SelectionError<'tcx>>
-        {
-            // First check for markers and other nonsense.
-            match bound {
-                // Fallback to whatever user-defined impls exist in this case.
-                ty::BoundCopy => Ok(ParameterBuiltin),
-
-                // Sized if all the component types are sized.
-                ty::BoundSized => ok_if(types),
-
-                // Shouldn't be coming through here.
-                ty::BoundSend | ty::BoundSync => unreachable!(),
-            }
-        }
-    }
-
-    /// For default impls, we need to break apart a type into its
-    /// "constituent types" -- meaning, the types that it contains.
-    ///
-    /// Here are some (simple) examples:
-    ///
-    /// ```
-    /// (i32, u32) -> [i32, u32]
-    /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
-    /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
-    /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
-    /// ```
-    fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
-        match t.sty {
-            ty::TyUint(_) |
-            ty::TyInt(_) |
-            ty::TyBool |
-            ty::TyFloat(_) |
-            ty::TyBareFn(..) |
-            ty::TyStr |
-            ty::TyError |
-            ty::TyInfer(ty::IntVar(_)) |
-            ty::TyInfer(ty::FloatVar(_)) |
-            ty::TyChar => {
-                Vec::new()
-            }
-
-            ty::TyTrait(..) |
-            ty::TyParam(..) |
-            ty::TyProjection(..) |
-            ty::TyInfer(ty::TyVar(_)) |
-            ty::TyInfer(ty::FreshTy(_)) |
-            ty::TyInfer(ty::FreshIntTy(_)) |
-            ty::TyInfer(ty::FreshFloatTy(_)) => {
-                self.tcx().sess.bug(
-                    &format!(
-                        "asked to assemble constituent types of unexpected type: {:?}",
-                        t));
-            }
-
-            ty::TyBox(referent_ty) => {  // Box<T>
-                vec![referent_ty]
-            }
-
-            ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) |
-            ty::TyRef(_, ty::TypeAndMut { ty: element_ty, ..}) => {
-                vec![element_ty]
-            },
-
-            ty::TyArray(element_ty, _) | ty::TySlice(element_ty) => {
-                vec![element_ty]
-            }
-
-            ty::TyTuple(ref tys) => {
-                // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
-                tys.clone()
-            }
-
-            ty::TyClosure(_, ref substs) => {
-                // FIXME(#27086). We are invariant w/r/t our
-                // substs.func_substs, but we don't see them as
-                // constituent types; this seems RIGHT but also like
-                // something that a normal type couldn't simulate. Is
-                // this just a gap with the way that PhantomData and
-                // OIBIT interact? That is, there is no way to say
-                // "make me invariant with respect to this TYPE, but
-                // do not act as though I can reach it"
-                substs.upvar_tys.clone()
-            }
-
-            // for `PhantomData<T>`, we pass `T`
-            ty::TyStruct(def, substs) if def.is_phantom_data() => {
-                substs.types.get_slice(TypeSpace).to_vec()
-            }
-
-            ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
-                def.all_fields()
-                    .map(|f| f.ty(self.tcx(), substs))
-                    .collect()
-            }
-        }
-    }
-
-    fn collect_predicates_for_types(&mut self,
-                                    obligation: &TraitObligation<'tcx>,
-                                    trait_def_id: DefId,
-                                    types: ty::Binder<Vec<Ty<'tcx>>>)
-                                    -> Vec<PredicateObligation<'tcx>>
-    {
-        let derived_cause = match self.tcx().lang_items.to_builtin_kind(trait_def_id) {
-            Some(_) => {
-                self.derived_cause(obligation, BuiltinDerivedObligation)
-            },
-            None => {
-                self.derived_cause(obligation, ImplDerivedObligation)
-            }
-        };
-
-        // Because the types were potentially derived from
-        // higher-ranked obligations they may reference late-bound
-        // regions. For example, `for<'a> Foo<&'a int> : Copy` would
-        // yield a type like `for<'a> &'a int`. In general, we
-        // maintain the invariant that we never manipulate bound
-        // regions, so we have to process these bound regions somehow.
-        //
-        // The strategy is to:
-        //
-        // 1. Instantiate those regions to skolemized regions (e.g.,
-        //    `for<'a> &'a int` becomes `&0 int`.
-        // 2. Produce something like `&'0 int : Copy`
-        // 3. Re-bind the regions back to `for<'a> &'a int : Copy`
-
-        // Move the binder into the individual types
-        let bound_types: Vec<ty::Binder<Ty<'tcx>>> =
-            types.skip_binder()
-                 .iter()
-                 .map(|&nested_ty| ty::Binder(nested_ty))
-                 .collect();
-
-        // For each type, produce a vector of resulting obligations
-        let obligations: Result<Vec<Vec<_>>, _> = bound_types.iter().map(|nested_ty| {
-            self.infcx.commit_if_ok(|snapshot| {
-                let (skol_ty, skol_map) =
-                    self.infcx().skolemize_late_bound_regions(nested_ty, snapshot);
-                let Normalized { value: normalized_ty, mut obligations } =
-                    project::normalize_with_depth(self,
-                                                  obligation.cause.clone(),
-                                                  obligation.recursion_depth + 1,
-                                                  &skol_ty);
-                let skol_obligation =
-                    util::predicate_for_trait_def(self.tcx(),
-                                                  derived_cause.clone(),
-                                                  trait_def_id,
-                                                  obligation.recursion_depth + 1,
-                                                  normalized_ty,
-                                                  vec![]);
-                obligations.push(skol_obligation);
-                Ok(self.infcx().plug_leaks(skol_map, snapshot, &obligations))
-            })
-        }).collect();
-
-        // Flatten those vectors (couldn't do it above due `collect`)
-        match obligations {
-            Ok(obligations) => obligations.into_iter().flat_map(|o| o).collect(),
-            Err(ErrorReported) => Vec::new(),
-        }
-    }
-
-    ///////////////////////////////////////////////////////////////////////////
-    // CONFIRMATION
-    //
-    // Confirmation unifies the output type parameters of the trait
-    // with the values found in the obligation, possibly yielding a
-    // type error.  See `README.md` for more details.
-
-    fn confirm_candidate(&mut self,
-                         obligation: &TraitObligation<'tcx>,
-                         candidate: SelectionCandidate<'tcx>)
-                         -> Result<Selection<'tcx>,SelectionError<'tcx>>
-    {
-        debug!("confirm_candidate({:?}, {:?})",
-               obligation,
-               candidate);
-
-        match candidate {
-            BuiltinCandidate(builtin_bound) => {
-                Ok(VtableBuiltin(
-                    try!(self.confirm_builtin_candidate(obligation, builtin_bound))))
-            }
-
-            ParamCandidate(param) => {
-                let obligations = self.confirm_param_candidate(obligation, param);
-                Ok(VtableParam(obligations))
-            }
-
-            DefaultImplCandidate(trait_def_id) => {
-                let data = self.confirm_default_impl_candidate(obligation, trait_def_id);
-                Ok(VtableDefaultImpl(data))
-            }
-
-            DefaultImplObjectCandidate(trait_def_id) => {
-                let data = self.confirm_default_impl_object_candidate(obligation, trait_def_id);
-                Ok(VtableDefaultImpl(data))
-            }
-
-            ImplCandidate(impl_def_id) => {
-                let vtable_impl =
-                    try!(self.confirm_impl_candidate(obligation, impl_def_id));
-                Ok(VtableImpl(vtable_impl))
-            }
-
-            ClosureCandidate(closure_def_id, substs) => {
-                let vtable_closure =
-                    try!(self.confirm_closure_candidate(obligation, closure_def_id, substs));
-                Ok(VtableClosure(vtable_closure))
-            }
-
-            BuiltinObjectCandidate => {
-                // This indicates something like `(Trait+Send) :
-                // Send`. In this case, we know that this holds
-                // because that's what the object type is telling us,
-                // and there's really no additional obligations to
-                // prove and no types in particular to unify etc.
-                Ok(VtableParam(Vec::new()))
-            }
-
-            ObjectCandidate => {
-                let data = self.confirm_object_candidate(obligation);
-                Ok(VtableObject(data))
-            }
-
-            FnPointerCandidate => {
-                let fn_type =
-                    try!(self.confirm_fn_pointer_candidate(obligation));
-                Ok(VtableFnPointer(fn_type))
-            }
-
-            ProjectionCandidate => {
-                self.confirm_projection_candidate(obligation);
-                Ok(VtableParam(Vec::new()))
-            }
-
-            BuiltinUnsizeCandidate => {
-                let data = try!(self.confirm_builtin_unsize_candidate(obligation));
-                Ok(VtableBuiltin(data))
-            }
-        }
-    }
-
-    fn confirm_projection_candidate(&mut self,
-                                    obligation: &TraitObligation<'tcx>)
-    {
-        let _: Result<(),()> =
-            self.infcx.commit_if_ok(|snapshot| {
-                let result =
-                    self.match_projection_obligation_against_bounds_from_trait(obligation,
-                                                                               snapshot);
-                assert!(result);
-                Ok(())
-            });
-    }
-
-    fn confirm_param_candidate(&mut self,
-                               obligation: &TraitObligation<'tcx>,
-                               param: ty::PolyTraitRef<'tcx>)
-                               -> Vec<PredicateObligation<'tcx>>
-    {
-        debug!("confirm_param_candidate({:?},{:?})",
-               obligation,
-               param);
-
-        // During evaluation, we already checked that this
-        // where-clause trait-ref could be unified with the obligation
-        // trait-ref. Repeat that unification now without any
-        // transactional boundary; it should not fail.
-        match self.match_where_clause_trait_ref(obligation, param.clone()) {
-            Ok(obligations) => obligations,
-            Err(()) => {
-                self.tcx().sess.bug(
-                    &format!("Where clause `{:?}` was applicable to `{:?}` but now is not",
-                             param,
-                             obligation));
-            }
-        }
-    }
-
-    fn confirm_builtin_candidate(&mut self,
-                                 obligation: &TraitObligation<'tcx>,
-                                 bound: ty::BuiltinBound)
-                                 -> Result<VtableBuiltinData<PredicateObligation<'tcx>>,
-                                           SelectionError<'tcx>>
-    {
-        debug!("confirm_builtin_candidate({:?})",
-               obligation);
-
-        match try!(self.builtin_bound(bound, obligation)) {
-            If(nested) => Ok(self.vtable_builtin_data(obligation, bound, nested)),
-            AmbiguousBuiltin | ParameterBuiltin => {
-                self.tcx().sess.span_bug(
-                    obligation.cause.span,
-                    &format!("builtin bound for {:?} was ambig",
-                            obligation));
-            }
-        }
-    }
-
-    fn vtable_builtin_data(&mut self,
-                           obligation: &TraitObligation<'tcx>,
-                           bound: ty::BuiltinBound,
-                           nested: ty::Binder<Vec<Ty<'tcx>>>)
-                           -> VtableBuiltinData<PredicateObligation<'tcx>>
-    {
-        debug!("vtable_builtin_data(obligation={:?}, bound={:?}, nested={:?})",
-               obligation, bound, nested);
-
-        let trait_def = match self.tcx().lang_items.from_builtin_kind(bound) {
-            Ok(def_id) => def_id,
-            Err(_) => {
-                self.tcx().sess.bug("builtin trait definition not found");
-            }
-        };
-
-        let obligations = self.collect_predicates_for_types(obligation, trait_def, nested);
-
-        debug!("vtable_builtin_data: obligations={:?}",
-               obligations);
-
-        VtableBuiltinData { nested: obligations }
-    }
-
-    /// This handles the case where a `impl Foo for ..` impl is being used.
-    /// The idea is that the impl applies to `X : Foo` if the following conditions are met:
-    ///
-    /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds
-    /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds.
-    fn confirm_default_impl_candidate(&mut self,
-                                      obligation: &TraitObligation<'tcx>,
-                                      trait_def_id: DefId)
-                                      -> VtableDefaultImplData<PredicateObligation<'tcx>>
-    {
-        debug!("confirm_default_impl_candidate({:?}, {:?})",
-               obligation,
-               trait_def_id);
-
-        // binder is moved below
-        let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
-        let types = self.constituent_types_for_ty(self_ty);
-        self.vtable_default_impl(obligation, trait_def_id, ty::Binder(types))
-    }
-
-    fn confirm_default_impl_object_candidate(&mut self,
-                                             obligation: &TraitObligation<'tcx>,
-                                             trait_def_id: DefId)
-                                             -> VtableDefaultImplData<PredicateObligation<'tcx>>
-    {
-        debug!("confirm_default_impl_object_candidate({:?}, {:?})",
-               obligation,
-               trait_def_id);
-
-        assert!(self.tcx().has_attr(trait_def_id, "rustc_reflect_like"));
-
-        // OK to skip binder, it is reintroduced below
-        let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
-        match self_ty.sty {
-            ty::TyTrait(ref data) => {
-                // OK to skip the binder, it is reintroduced below
-                let input_types = data.principal.skip_binder().substs.types.get_slice(TypeSpace);
-                let assoc_types = data.bounds.projection_bounds
-                                             .iter()
-                                             .map(|pb| pb.skip_binder().ty);
-                let all_types: Vec<_> = input_types.iter().cloned()
-                                                          .chain(assoc_types)
-                                                          .collect();
-
-                // reintroduce the two binding levels we skipped, then flatten into one
-                let all_types = ty::Binder(ty::Binder(all_types));
-                let all_types = self.tcx().flatten_late_bound_regions(&all_types);
-
-                self.vtable_default_impl(obligation, trait_def_id, all_types)
-            }
-            _ => {
-                self.tcx().sess.bug(
-                    &format!(
-                        "asked to confirm default object implementation for non-object type: {:?}",
-                        self_ty));
-            }
-        }
-    }
-
-    /// See `confirm_default_impl_candidate`
-    fn vtable_default_impl(&mut self,
-                           obligation: &TraitObligation<'tcx>,
-                           trait_def_id: DefId,
-                           nested: ty::Binder<Vec<Ty<'tcx>>>)
-                           -> VtableDefaultImplData<PredicateObligation<'tcx>>
-    {
-        debug!("vtable_default_impl_data: nested={:?}", nested);
-
-        let mut obligations = self.collect_predicates_for_types(obligation,
-                                                                trait_def_id,
-                                                                nested);
-
-        let trait_obligations: Result<Vec<_>,()> = self.infcx.commit_if_ok(|snapshot| {
-            let poly_trait_ref = obligation.predicate.to_poly_trait_ref();
-            let (trait_ref, skol_map) =
-                self.infcx().skolemize_late_bound_regions(&poly_trait_ref, snapshot);
-            Ok(self.impl_or_trait_obligations(obligation.cause.clone(),
-                                              obligation.recursion_depth + 1,
-                                              trait_def_id,
-                                              &trait_ref.substs,
-                                              skol_map,
-                                              snapshot))
-        });
-
-        // no Errors in that code above
-        obligations.append(&mut trait_obligations.unwrap());
-
-        debug!("vtable_default_impl_data: obligations={:?}", obligations);
-
-        VtableDefaultImplData {
-            trait_def_id: trait_def_id,
-            nested: obligations
-        }
-    }
-
-    fn confirm_impl_candidate(&mut self,
-                              obligation: &TraitObligation<'tcx>,
-                              impl_def_id: DefId)
-                              -> Result<VtableImplData<'tcx, PredicateObligation<'tcx>>,
-                                        SelectionError<'tcx>>
-    {
-        debug!("confirm_impl_candidate({:?},{:?})",
-               obligation,
-               impl_def_id);
-
-        // First, create the substitutions by matching the impl again,
-        // this time not in a probe.
-        self.infcx.commit_if_ok(|snapshot| {
-            let (substs, skol_map) =
-                self.rematch_impl(impl_def_id, obligation,
-                                  snapshot);
-            debug!("confirm_impl_candidate substs={:?}", substs);
-            Ok(self.vtable_impl(impl_def_id, substs, obligation.cause.clone(),
-                                obligation.recursion_depth + 1, skol_map, snapshot))
-        })
-    }
-
-    fn vtable_impl(&mut self,
-                   impl_def_id: DefId,
-                   mut substs: Normalized<'tcx, Substs<'tcx>>,
-                   cause: ObligationCause<'tcx>,
-                   recursion_depth: usize,
-                   skol_map: infer::SkolemizationMap,
-                   snapshot: &infer::CombinedSnapshot)
-                   -> VtableImplData<'tcx, PredicateObligation<'tcx>>
-    {
-        debug!("vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={}, skol_map={:?})",
-               impl_def_id,
-               substs,
-               recursion_depth,
-               skol_map);
-
-        let mut impl_obligations =
-            self.impl_or_trait_obligations(cause,
-                                           recursion_depth,
-                                           impl_def_id,
-                                           &substs.value,
-                                           skol_map,
-                                           snapshot);
-
-        debug!("vtable_impl: impl_def_id={:?} impl_obligations={:?}",
-               impl_def_id,
-               impl_obligations);
-
-        // Because of RFC447, the impl-trait-ref and obligations
-        // are sufficient to determine the impl substs, without
-        // relying on projections in the impl-trait-ref.
-        //
-        // e.g. `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V`
-        impl_obligations.append(&mut substs.obligations);
-
-        VtableImplData { impl_def_id: impl_def_id,
-                         substs: substs.value,
-                         nested: impl_obligations }
-    }
-
-    fn confirm_object_candidate(&mut self,
-                                obligation: &TraitObligation<'tcx>)
-                                -> VtableObjectData<'tcx>
-    {
-        debug!("confirm_object_candidate({:?})",
-               obligation);
-
-        // FIXME skipping binder here seems wrong -- we should
-        // probably flatten the binder from the obligation and the
-        // binder from the object. Have to try to make a broken test
-        // case that results. -nmatsakis
-        let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
-        let poly_trait_ref = match self_ty.sty {
-            ty::TyTrait(ref data) => {
-                data.principal_trait_ref_with_self_ty(self.tcx(), self_ty)
-            }
-            _ => {
-                self.tcx().sess.span_bug(obligation.cause.span,
-                                         "object candidate with non-object");
-            }
-        };
-
-        let mut upcast_trait_ref = None;
-        let vtable_base;
-
-        {
-            // We want to find the first supertrait in the list of
-            // supertraits that we can unify with, and do that
-            // unification. We know that there is exactly one in the list
-            // where we can unify because otherwise select would have
-            // reported an ambiguity. (When we do find a match, also
-            // record it for later.)
-            let nonmatching =
-                util::supertraits(self.tcx(), poly_trait_ref)
-                .take_while(|&t| {
-                    match
-                        self.infcx.commit_if_ok(
-                            |_| self.match_poly_trait_ref(obligation, t))
-                    {
-                        Ok(_) => { upcast_trait_ref = Some(t); false }
-                        Err(_) => { true }
-                    }
-                });
-
-            // Additionally, for each of the nonmatching predicates that
-            // we pass over, we sum up the set of number of vtable
-            // entries, so that we can compute the offset for the selected
-            // trait.
-            vtable_base =
-                nonmatching.map(|t| util::count_own_vtable_entries(self.tcx(), t))
-                           .sum();
-
-        }
-
-        VtableObjectData {
-            upcast_trait_ref: upcast_trait_ref.unwrap(),
-            vtable_base: vtable_base,
-        }
-    }
-
-    fn confirm_fn_pointer_candidate(&mut self,
-                                    obligation: &TraitObligation<'tcx>)
-                                    -> Result<ty::Ty<'tcx>,SelectionError<'tcx>>
-    {
-        debug!("confirm_fn_pointer_candidate({:?})",
-               obligation);
-
-        // ok to skip binder; it is reintroduced below
-        let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
-        let sig = self_ty.fn_sig();
-        let trait_ref =
-            util::closure_trait_ref_and_return_type(self.tcx(),
-                                                    obligation.predicate.def_id(),
-                                                    self_ty,
-                                                    sig,
-                                                    util::TupleArgumentsFlag::Yes)
-            .map_bound(|(trait_ref, _)| trait_ref);
-
-        try!(self.confirm_poly_trait_refs(obligation.cause.clone(),
-                                          obligation.predicate.to_poly_trait_ref(),
-                                          trait_ref));
-        Ok(self_ty)
-    }
-
-    fn confirm_closure_candidate(&mut self,
-                                 obligation: &TraitObligation<'tcx>,
-                                 closure_def_id: DefId,
-                                 substs: &ty::ClosureSubsts<'tcx>)
-                                 -> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>,
-                                           SelectionError<'tcx>>
-    {
-        debug!("confirm_closure_candidate({:?},{:?},{:?})",
-               obligation,
-               closure_def_id,
-               substs);
-
-        let Normalized {
-            value: trait_ref,
-            obligations
-        } = self.closure_trait_ref(obligation, closure_def_id, substs);
-
-        debug!("confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})",
-               closure_def_id,
-               trait_ref,
-               obligations);
-
-        try!(self.confirm_poly_trait_refs(obligation.cause.clone(),
-                                          obligation.predicate.to_poly_trait_ref(),
-                                          trait_ref));
-
-        Ok(VtableClosureData {
-            closure_def_id: closure_def_id,
-            substs: substs.clone(),
-            nested: obligations
-        })
-    }
-
-    /// In the case of closure types and fn pointers,
-    /// we currently treat the input type parameters on the trait as
-    /// outputs. This means that when we have a match we have only
-    /// considered the self type, so we have to go back and make sure
-    /// to relate the argument types too.  This is kind of wrong, but
-    /// since we control the full set of impls, also not that wrong,
-    /// and it DOES yield better error messages (since we don't report
-    /// errors as if there is no applicable impl, but rather report
-    /// errors are about mismatched argument types.
-    ///
-    /// Here is an example. Imagine we have a closure expression
-    /// and we desugared it so that the type of the expression is
-    /// `Closure`, and `Closure` expects an int as argument. Then it
-    /// is "as if" the compiler generated this impl:
-    ///
-    ///     impl Fn(int) for Closure { ... }
-    ///
-    /// Now imagine our obligation is `Fn(usize) for Closure`. So far
-    /// we have matched the self-type `Closure`. At this point we'll
-    /// compare the `int` to `usize` and generate an error.
-    ///
-    /// Note that this checking occurs *after* the impl has selected,
-    /// because these output type parameters should not affect the
-    /// selection of the impl. Therefore, if there is a mismatch, we
-    /// report an error to the user.
-    fn confirm_poly_trait_refs(&mut self,
-                               obligation_cause: ObligationCause,
-                               obligation_trait_ref: ty::PolyTraitRef<'tcx>,
-                               expected_trait_ref: ty::PolyTraitRef<'tcx>)
-                               -> Result<(), SelectionError<'tcx>>
-    {
-        let origin = TypeOrigin::RelateOutputImplTypes(obligation_cause.span);
-
-        let obligation_trait_ref = obligation_trait_ref.clone();
-        match self.infcx.sub_poly_trait_refs(false,
-                                             origin,
-                                             expected_trait_ref.clone(),
-                                             obligation_trait_ref.clone()) {
-            Ok(()) => Ok(()),
-            Err(e) => Err(OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e))
-        }
-    }
-
-    fn confirm_builtin_unsize_candidate(&mut self,
-                                        obligation: &TraitObligation<'tcx>,)
-                                        -> Result<VtableBuiltinData<PredicateObligation<'tcx>>,
-                                                  SelectionError<'tcx>> {
-        let tcx = self.tcx();
-
-        // assemble_candidates_for_unsizing should ensure there are no late bound
-        // regions here. See the comment there for more details.
-        let source = self.infcx.shallow_resolve(
-            tcx.no_late_bound_regions(&obligation.self_ty()).unwrap());
-        let target = self.infcx.shallow_resolve(obligation.predicate.0.input_types()[0]);
-
-        debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})",
-               source, target);
-
-        let mut nested = vec![];
-        match (&source.sty, &target.sty) {
-            // Trait+Kx+'a -> Trait+Ky+'b (upcasts).
-            (&ty::TyTrait(ref data_a), &ty::TyTrait(ref data_b)) => {
-                // See assemble_candidates_for_unsizing for more info.
-                let bounds = ty::ExistentialBounds {
-                    region_bound: data_b.bounds.region_bound,
-                    builtin_bounds: data_b.bounds.builtin_bounds,
-                    projection_bounds: data_a.bounds.projection_bounds.clone(),
-                };
-
-                let new_trait = tcx.mk_trait(data_a.principal.clone(), bounds);
-                let origin = TypeOrigin::Misc(obligation.cause.span);
-                if self.infcx.sub_types(false, origin, new_trait, target).is_err() {
-                    return Err(Unimplemented);
-                }
-
-                // Register one obligation for 'a: 'b.
-                let cause = ObligationCause::new(obligation.cause.span,
-                                                 obligation.cause.body_id,
-                                                 ObjectCastObligation(target));
-                let outlives = ty::OutlivesPredicate(data_a.bounds.region_bound,
-                                                     data_b.bounds.region_bound);
-                nested.push(Obligation::with_depth(cause,
-                                                   obligation.recursion_depth + 1,
-                                                   ty::Binder(outlives).to_predicate()));
-            }
-
-            // T -> Trait.
-            (_, &ty::TyTrait(ref data)) => {
-                let object_did = data.principal_def_id();
-                if !object_safety::is_object_safe(tcx, object_did) {
-                    return Err(TraitNotObjectSafe(object_did));
-                }
-
-                let cause = ObligationCause::new(obligation.cause.span,
-                                                 obligation.cause.body_id,
-                                                 ObjectCastObligation(target));
-                let mut push = |predicate| {
-                    nested.push(Obligation::with_depth(cause.clone(),
-                                                       obligation.recursion_depth + 1,
-                                                       predicate));
-                };
-
-                // Create the obligation for casting from T to Trait.
-                push(data.principal_trait_ref_with_self_ty(tcx, source).to_predicate());
-
-                // We can only make objects from sized types.
-                let mut builtin_bounds = data.bounds.builtin_bounds;
-                builtin_bounds.insert(ty::BoundSized);
-
-                // Create additional obligations for all the various builtin
-                // bounds attached to the object cast. (In other words, if the
-                // object type is Foo+Send, this would create an obligation
-                // for the Send check.)
-                for bound in &builtin_bounds {
-                    if let Ok(tr) = util::trait_ref_for_builtin_bound(tcx, bound, source) {
-                        push(tr.to_predicate());
-                    } else {
-                        return Err(Unimplemented);
-                    }
-                }
-
-                // Create obligations for the projection predicates.
-                for bound in data.projection_bounds_with_self_ty(tcx, source) {
-                    push(bound.to_predicate());
-                }
-
-                // If the type is `Foo+'a`, ensures that the type
-                // being cast to `Foo+'a` outlives `'a`:
-                let outlives = ty::OutlivesPredicate(source,
-                                                     data.bounds.region_bound);
-                push(ty::Binder(outlives).to_predicate());
-            }
-
-            // [T; n] -> [T].
-            (&ty::TyArray(a, _), &ty::TySlice(b)) => {
-                let origin = TypeOrigin::Misc(obligation.cause.span);
-                if self.infcx.sub_types(false, origin, a, b).is_err() {
-                    return Err(Unimplemented);
-                }
-            }
-
-            // Struct<T> -> Struct<U>.
-            (&ty::TyStruct(def, substs_a), &ty::TyStruct(_, substs_b)) => {
-                let fields = def
-                    .all_fields()
-                    .map(|f| f.unsubst_ty())
-                    .collect::<Vec<_>>();
-
-                // The last field of the structure has to exist and contain type parameters.
-                let field = if let Some(&field) = fields.last() {
-                    field
-                } else {
-                    return Err(Unimplemented);
-                };
-                let mut ty_params = vec![];
-                for ty in field.walk() {
-                    if let ty::TyParam(p) = ty.sty {
-                        assert!(p.space == TypeSpace);
-                        let idx = p.idx as usize;
-                        if !ty_params.contains(&idx) {
-                            ty_params.push(idx);
-                        }
-                    }
-                }
-                if ty_params.is_empty() {
-                    return Err(Unimplemented);
-                }
-
-                // Replace type parameters used in unsizing with
-                // TyError and ensure they do not affect any other fields.
-                // This could be checked after type collection for any struct
-                // with a potentially unsized trailing field.
-                let mut new_substs = substs_a.clone();
-                for &i in &ty_params {
-                    new_substs.types.get_mut_slice(TypeSpace)[i] = tcx.types.err;
-                }
-                for &ty in fields.split_last().unwrap().1 {
-                    if ty.subst(tcx, &new_substs).references_error() {
-                        return Err(Unimplemented);
-                    }
-                }
-
-                // Extract Field<T> and Field<U> from Struct<T> and Struct<U>.
-                let inner_source = field.subst(tcx, substs_a);
-                let inner_target = field.subst(tcx, substs_b);
-
-                // Check that the source structure with the target's
-                // type parameters is a subtype of the target.
-                for &i in &ty_params {
-                    let param_b = *substs_b.types.get(TypeSpace, i);
-                    new_substs.types.get_mut_slice(TypeSpace)[i] = param_b;
-                }
-                let new_struct = tcx.mk_struct(def, tcx.mk_substs(new_substs));
-                let origin = TypeOrigin::Misc(obligation.cause.span);
-                if self.infcx.sub_types(false, origin, new_struct, target).is_err() {
-                    return Err(Unimplemented);
-                }
-
-                // Construct the nested Field<T>: Unsize<Field<U>> predicate.
-                nested.push(util::predicate_for_trait_def(tcx,
-                    obligation.cause.clone(),
-                    obligation.predicate.def_id(),
-                    obligation.recursion_depth + 1,
-                    inner_source,
-                    vec![inner_target]));
-            }
-
-            _ => unreachable!()
-        };
-
-        Ok(VtableBuiltinData { nested: nested })
-    }
-
-    ///////////////////////////////////////////////////////////////////////////
-    // Matching
-    //
-    // Matching is a common path used for both evaluation and
-    // confirmation.  It basically unifies types that appear in impls
-    // and traits. This does affect the surrounding environment;
-    // therefore, when used during evaluation, match routines must be
-    // run inside of a `probe()` so that their side-effects are
-    // contained.
-
-    fn rematch_impl(&mut self,
-                    impl_def_id: DefId,
-                    obligation: &TraitObligation<'tcx>,
-                    snapshot: &infer::CombinedSnapshot)
-                    -> (Normalized<'tcx, Substs<'tcx>>, infer::SkolemizationMap)
-    {
-        match self.match_impl(impl_def_id, obligation, snapshot) {
-            Ok((substs, skol_map)) => (substs, skol_map),
-            Err(()) => {
-                self.tcx().sess.bug(
-                    &format!("Impl {:?} was matchable against {:?} but now is not",
-                            impl_def_id,
-                            obligation));
-            }
-        }
-    }
-
-    fn match_impl(&mut self,
-                  impl_def_id: DefId,
-                  obligation: &TraitObligation<'tcx>,
-                  snapshot: &infer::CombinedSnapshot)
-                  -> Result<(Normalized<'tcx, Substs<'tcx>>,
-                             infer::SkolemizationMap), ()>
-    {
-        let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
-
-        // Before we create the substitutions and everything, first
-        // consider a "quick reject". This avoids creating more types
-        // and so forth that we need to.
-        if self.fast_reject_trait_refs(obligation, &impl_trait_ref) {
-            return Err(());
-        }
-
-        let (skol_obligation, skol_map) = self.infcx().skolemize_late_bound_regions(
-            &obligation.predicate,
-            snapshot);
-        let skol_obligation_trait_ref = skol_obligation.trait_ref;
-
-        let impl_substs = util::fresh_type_vars_for_impl(self.infcx,
-                                                         obligation.cause.span,
-                                                         impl_def_id);
-
-        let impl_trait_ref = impl_trait_ref.subst(self.tcx(),
-                                                  &impl_substs);
-
-        let impl_trait_ref =
-            project::normalize_with_depth(self,
-                                          obligation.cause.clone(),
-                                          obligation.recursion_depth + 1,
-                                          &impl_trait_ref);
-
-        debug!("match_impl(impl_def_id={:?}, obligation={:?}, \
-               impl_trait_ref={:?}, skol_obligation_trait_ref={:?})",
-               impl_def_id,
-               obligation,
-               impl_trait_ref,
-               skol_obligation_trait_ref);
-
-        let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
-        if let Err(e) = self.infcx.eq_trait_refs(false,
-                                                 origin,
-                                                 impl_trait_ref.value.clone(),
-                                                 skol_obligation_trait_ref) {
-            debug!("match_impl: failed eq_trait_refs due to `{}`", e);
-            return Err(());
-        }
-
-        if let Err(e) = self.infcx.leak_check(&skol_map, snapshot) {
-            debug!("match_impl: failed leak check due to `{}`", e);
-            return Err(());
-        }
-
-        debug!("match_impl: success impl_substs={:?}", impl_substs);
-        Ok((Normalized {
-            value: impl_substs,
-            obligations: impl_trait_ref.obligations
-        }, skol_map))
-    }
-
-    fn fast_reject_trait_refs(&mut self,
-                              obligation: &TraitObligation,
-                              impl_trait_ref: &ty::TraitRef)
-                              -> bool
-    {
-        // We can avoid creating type variables and doing the full
-        // substitution if we find that any of the input types, when
-        // simplified, do not match.
-
-        obligation.predicate.0.input_types().iter()
-            .zip(impl_trait_ref.input_types())
-            .any(|(&obligation_ty, &impl_ty)| {
-                let simplified_obligation_ty =
-                    fast_reject::simplify_type(self.tcx(), obligation_ty, true);
-                let simplified_impl_ty =
-                    fast_reject::simplify_type(self.tcx(), impl_ty, false);
-
-                simplified_obligation_ty.is_some() &&
-                    simplified_impl_ty.is_some() &&
-                    simplified_obligation_ty != simplified_impl_ty
-            })
-    }
-
-    /// Normalize `where_clause_trait_ref` and try to match it against
-    /// `obligation`.  If successful, return any predicates that
-    /// result from the normalization. Normalization is necessary
-    /// because where-clauses are stored in the parameter environment
-    /// unnormalized.
-    fn match_where_clause_trait_ref(&mut self,
-                                    obligation: &TraitObligation<'tcx>,
-                                    where_clause_trait_ref: ty::PolyTraitRef<'tcx>)
-                                    -> Result<Vec<PredicateObligation<'tcx>>,()>
-    {
-        try!(self.match_poly_trait_ref(obligation, where_clause_trait_ref));
-        Ok(Vec::new())
-    }
-
-    /// Returns `Ok` if `poly_trait_ref` being true implies that the
-    /// obligation is satisfied.
-    fn match_poly_trait_ref(&self,
-                            obligation: &TraitObligation<'tcx>,
-                            poly_trait_ref: ty::PolyTraitRef<'tcx>)
-                            -> Result<(),()>
-    {
-        debug!("match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}",
-               obligation,
-               poly_trait_ref);
-
-        let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
-        match self.infcx.sub_poly_trait_refs(false,
-                                             origin,
-                                             poly_trait_ref,
-                                             obligation.predicate.to_poly_trait_ref()) {
-            Ok(()) => Ok(()),
-            Err(_) => Err(()),
-        }
-    }
-
-    ///////////////////////////////////////////////////////////////////////////
-    // Miscellany
-
-    fn match_fresh_trait_refs(&self,
-                              previous: &ty::PolyTraitRef<'tcx>,
-                              current: &ty::PolyTraitRef<'tcx>)
-                              -> bool
-    {
-        let mut matcher = ty::_match::Match::new(self.tcx());
-        matcher.relate(previous, current).is_ok()
-    }
-
-    fn push_stack<'o,'s:'o>(&mut self,
-                            previous_stack: TraitObligationStackList<'s, 'tcx>,
-                            obligation: &'o TraitObligation<'tcx>)
-                            -> TraitObligationStack<'o, 'tcx>
-    {
-        let fresh_trait_ref =
-            obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener);
-
-        TraitObligationStack {
-            obligation: obligation,
-            fresh_trait_ref: fresh_trait_ref,
-            previous: previous_stack,
-        }
-    }
-
-    fn closure_trait_ref_unnormalized(&mut self,
-                                      obligation: &TraitObligation<'tcx>,
-                                      closure_def_id: DefId,
-                                      substs: &ty::ClosureSubsts<'tcx>)
-                                      -> ty::PolyTraitRef<'tcx>
-    {
-        let closure_type = self.infcx.closure_type(closure_def_id, substs);
-        let ty::Binder((trait_ref, _)) =
-            util::closure_trait_ref_and_return_type(self.tcx(),
-                                                    obligation.predicate.def_id(),
-                                                    obligation.predicate.0.self_ty(), // (1)
-                                                    &closure_type.sig,
-                                                    util::TupleArgumentsFlag::No);
-        // (1) Feels icky to skip the binder here, but OTOH we know
-        // that the self-type is an unboxed closure type and hence is
-        // in fact unparameterized (or at least does not reference any
-        // regions bound in the obligation). Still probably some
-        // refactoring could make this nicer.
-
-        ty::Binder(trait_ref)
-    }
-
-    fn closure_trait_ref(&mut self,
-                         obligation: &TraitObligation<'tcx>,
-                         closure_def_id: DefId,
-                         substs: &ty::ClosureSubsts<'tcx>)
-                         -> Normalized<'tcx, ty::PolyTraitRef<'tcx>>
-    {
-        let trait_ref = self.closure_trait_ref_unnormalized(
-            obligation, closure_def_id, substs);
-
-        // A closure signature can contain associated types which
-        // must be normalized.
-        normalize_with_depth(self,
-                             obligation.cause.clone(),
-                             obligation.recursion_depth+1,
-                             &trait_ref)
-    }
-
-    /// Returns the obligations that are implied by instantiating an
-    /// impl or trait. The obligations are substituted and fully
-    /// normalized. This is used when confirming an impl or default
-    /// impl.
-    fn impl_or_trait_obligations(&mut self,
-                                 cause: ObligationCause<'tcx>,
-                                 recursion_depth: usize,
-                                 def_id: DefId, // of impl or trait
-                                 substs: &Substs<'tcx>, // for impl or trait
-                                 skol_map: infer::SkolemizationMap,
-                                 snapshot: &infer::CombinedSnapshot)
-                                 -> Vec<PredicateObligation<'tcx>>
-    {
-        debug!("impl_or_trait_obligations(def_id={:?})", def_id);
-        let tcx = self.tcx();
-
-        // To allow for one-pass evaluation of the nested obligation,
-        // each predicate must be preceded by the obligations required
-        // to normalize it.
-        // for example, if we have:
-        //    impl<U: Iterator, V: Iterator<Item=U>> Foo for V where U::Item: Copy
-        // the impl will have the following predicates:
-        //    <V as Iterator>::Item = U,
-        //    U: Iterator, U: Sized,
-        //    V: Iterator, V: Sized,
-        //    <U as Iterator>::Item: Copy
-        // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last
-        // obligation will normalize to `<$0 as Iterator>::Item = $1` and
-        // `$1: Copy`, so we must ensure the obligations are emitted in
-        // that order.
-        let predicates = tcx
-            .lookup_predicates(def_id)
-            .predicates.iter()
-            .flat_map(|predicate| {
-                let predicate =
-                    normalize_with_depth(self, cause.clone(), recursion_depth,
-                                         &predicate.subst(tcx, substs));
-                predicate.obligations.into_iter().chain(
-                    Some(Obligation {
-                        cause: cause.clone(),
-                        recursion_depth: recursion_depth,
-                        predicate: predicate.value
-                    }))
-            }).collect();
-        self.infcx().plug_leaks(skol_map, snapshot, &predicates)
-    }
-
-    #[allow(unused_comparisons)]
-    fn derived_cause(&self,
-                     obligation: &TraitObligation<'tcx>,
-                     variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>)
-                     -> ObligationCause<'tcx>
-    {
-        /*!
-         * Creates a cause for obligations that are derived from
-         * `obligation` by a recursive search (e.g., for a builtin
-         * bound, or eventually a `impl Foo for ..`). If `obligation`
-         * is itself a derived obligation, this is just a clone, but
-         * otherwise we create a "derived obligation" cause so as to
-         * keep track of the original root obligation for error
-         * reporting.
-         */
-
-        // NOTE(flaper87): As of now, it keeps track of the whole error
-        // chain. Ideally, we should have a way to configure this either
-        // by using -Z verbose or just a CLI argument.
-        if obligation.recursion_depth >= 0 {
-            let derived_cause = DerivedObligationCause {
-                parent_trait_ref: obligation.predicate.to_poly_trait_ref(),
-                parent_code: Rc::new(obligation.cause.code.clone())
-            };
-            let derived_code = variant(derived_cause);
-            ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code)
-        } else {
-            obligation.cause.clone()
-        }
-    }
-}
-
-impl<'tcx> SelectionCache<'tcx> {
-    pub fn new() -> SelectionCache<'tcx> {
-        SelectionCache {
-            hashmap: RefCell::new(FnvHashMap())
-        }
-    }
-}
-
-impl<'tcx> EvaluationCache<'tcx> {
-    pub fn new() -> EvaluationCache<'tcx> {
-        EvaluationCache {
-            hashmap: RefCell::new(FnvHashMap())
-        }
-    }
-}
-
-impl<'o,'tcx> TraitObligationStack<'o,'tcx> {
-    fn list(&'o self) -> TraitObligationStackList<'o,'tcx> {
-        TraitObligationStackList::with(self)
-    }
-
-    fn iter(&'o self) -> TraitObligationStackList<'o,'tcx> {
-        self.list()
-    }
-}
-
-#[derive(Copy, Clone)]
-struct TraitObligationStackList<'o,'tcx:'o> {
-    head: Option<&'o TraitObligationStack<'o,'tcx>>
-}
-
-impl<'o,'tcx> TraitObligationStackList<'o,'tcx> {
-    fn empty() -> TraitObligationStackList<'o,'tcx> {
-        TraitObligationStackList { head: None }
-    }
-
-    fn with(r: &'o TraitObligationStack<'o,'tcx>) -> TraitObligationStackList<'o,'tcx> {
-        TraitObligationStackList { head: Some(r) }
-    }
-}
-
-impl<'o,'tcx> Iterator for TraitObligationStackList<'o,'tcx>{
-    type Item = &'o TraitObligationStack<'o,'tcx>;
-
-    fn next(&mut self) -> Option<&'o TraitObligationStack<'o,'tcx>> {
-        match self.head {
-            Some(o) => {
-                *self = o.previous;
-                Some(o)
-            }
-            None => None
-        }
-    }
-}
-
-impl<'o,'tcx> fmt::Debug for TraitObligationStack<'o,'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "TraitObligationStack({:?})", self.obligation)
-    }
-}
-
-impl EvaluationResult {
-    fn may_apply(&self) -> bool {
-        match *self {
-            EvaluatedToOk |
-            EvaluatedToAmbig |
-            EvaluatedToUnknown => true,
-
-            EvaluatedToErr => false
-        }
-    }
-}
-
-impl MethodMatchResult {
-    pub fn may_apply(&self) -> bool {
-        match *self {
-            MethodMatched(_) => true,
-            MethodAmbiguous(_) => true,
-            MethodDidNotMatch => false,
-        }
-    }
-}
diff --git a/src/librustc/middle/traits/structural_impls.rs b/src/librustc/middle/traits/structural_impls.rs
deleted file mode 100644 (file)
index 453420e..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::traits;
-use middle::traits::project::Normalized;
-use middle::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
-
-use std::fmt;
-
-// structural impls for the structs in middle::traits
-
-impl<'tcx, T: fmt::Debug> fmt::Debug for Normalized<'tcx, T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "Normalized({:?},{:?})",
-               self.value,
-               self.obligations)
-    }
-}
-
-impl<'tcx> fmt::Debug for traits::RegionObligation<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "RegionObligation(sub_region={:?}, sup_type={:?})",
-               self.sub_region,
-               self.sup_type)
-    }
-}
-impl<'tcx, O: fmt::Debug> fmt::Debug for traits::Obligation<'tcx, O> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "Obligation(predicate={:?},depth={})",
-               self.predicate,
-               self.recursion_depth)
-    }
-}
-
-impl<'tcx, N: fmt::Debug> fmt::Debug for traits::Vtable<'tcx, N> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            super::VtableImpl(ref v) =>
-                write!(f, "{:?}", v),
-
-            super::VtableDefaultImpl(ref t) =>
-                write!(f, "{:?}", t),
-
-            super::VtableClosure(ref d) =>
-                write!(f, "{:?}", d),
-
-            super::VtableFnPointer(ref d) =>
-                write!(f, "VtableFnPointer({:?})", d),
-
-            super::VtableObject(ref d) =>
-                write!(f, "{:?}", d),
-
-            super::VtableParam(ref n) =>
-                write!(f, "VtableParam({:?})", n),
-
-            super::VtableBuiltin(ref d) =>
-                write!(f, "{:?}", d)
-        }
-    }
-}
-
-impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableImplData<'tcx, N> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "VtableImpl(impl_def_id={:?}, substs={:?}, nested={:?})",
-               self.impl_def_id,
-               self.substs,
-               self.nested)
-    }
-}
-
-impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableClosureData<'tcx, N> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "VtableClosure(closure_def_id={:?}, substs={:?}, nested={:?})",
-               self.closure_def_id,
-               self.substs,
-               self.nested)
-    }
-}
-
-impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableBuiltinData<N> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "VtableBuiltin(nested={:?})", self.nested)
-    }
-}
-
-impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableDefaultImplData<N> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "VtableDefaultImplData(trait_def_id={:?}, nested={:?})",
-               self.trait_def_id,
-               self.nested)
-    }
-}
-
-impl<'tcx> fmt::Debug for traits::VtableObjectData<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "VtableObject(upcast={:?}, vtable_base={})",
-               self.upcast_trait_ref,
-               self.vtable_base)
-    }
-}
-
-impl<'tcx> fmt::Debug for traits::FulfillmentError<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "FulfillmentError({:?},{:?})",
-               self.obligation,
-               self.code)
-    }
-}
-
-impl<'tcx> fmt::Debug for traits::FulfillmentErrorCode<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            super::CodeSelectionError(ref e) => write!(f, "{:?}", e),
-            super::CodeProjectionError(ref e) => write!(f, "{:?}", e),
-            super::CodeAmbiguity => write!(f, "Ambiguity")
-        }
-    }
-}
-
-impl<'tcx> fmt::Debug for traits::MismatchedProjectionTypes<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "MismatchedProjectionTypes({:?})", self.err)
-    }
-}
-
-impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx, O>
-{
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        traits::Obligation {
-            cause: self.cause.clone(),
-            recursion_depth: self.recursion_depth,
-            predicate: self.predicate.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.predicate.visit_with(visitor)
-    }
-}
-
-impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableImplData<'tcx, N> {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        traits::VtableImplData {
-            impl_def_id: self.impl_def_id,
-            substs: self.substs.fold_with(folder),
-            nested: self.nested.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.substs.visit_with(visitor) || self.nested.visit_with(visitor)
-    }
-}
-
-impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableClosureData<'tcx, N> {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        traits::VtableClosureData {
-            closure_def_id: self.closure_def_id,
-            substs: self.substs.fold_with(folder),
-            nested: self.nested.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.substs.visit_with(visitor) || self.nested.visit_with(visitor)
-    }
-}
-
-impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableDefaultImplData<N> {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        traits::VtableDefaultImplData {
-            trait_def_id: self.trait_def_id,
-            nested: self.nested.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.nested.visit_with(visitor)
-    }
-}
-
-impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableBuiltinData<N> {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        traits::VtableBuiltinData {
-            nested: self.nested.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.nested.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for traits::VtableObjectData<'tcx> {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        traits::VtableObjectData {
-            upcast_trait_ref: self.upcast_trait_ref.fold_with(folder),
-            vtable_base: self.vtable_base
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.upcast_trait_ref.visit_with(visitor)
-    }
-}
-
-impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        match *self {
-            traits::VtableImpl(ref v) => traits::VtableImpl(v.fold_with(folder)),
-            traits::VtableDefaultImpl(ref t) => traits::VtableDefaultImpl(t.fold_with(folder)),
-            traits::VtableClosure(ref d) => {
-                traits::VtableClosure(d.fold_with(folder))
-            }
-            traits::VtableFnPointer(ref d) => {
-                traits::VtableFnPointer(d.fold_with(folder))
-            }
-            traits::VtableParam(ref n) => traits::VtableParam(n.fold_with(folder)),
-            traits::VtableBuiltin(ref d) => traits::VtableBuiltin(d.fold_with(folder)),
-            traits::VtableObject(ref d) => traits::VtableObject(d.fold_with(folder)),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        match *self {
-            traits::VtableImpl(ref v) => v.visit_with(visitor),
-            traits::VtableDefaultImpl(ref t) => t.visit_with(visitor),
-            traits::VtableClosure(ref d) => d.visit_with(visitor),
-            traits::VtableFnPointer(ref d) => d.visit_with(visitor),
-            traits::VtableParam(ref n) => n.visit_with(visitor),
-            traits::VtableBuiltin(ref d) => d.visit_with(visitor),
-            traits::VtableObject(ref d) => d.visit_with(visitor),
-        }
-    }
-}
-
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Normalized<'tcx, T> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        Normalized {
-            value: self.value.fold_with(folder),
-            obligations: self.obligations.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.value.visit_with(visitor) || self.obligations.visit_with(visitor)
-    }
-}
diff --git a/src/librustc/middle/traits/util.rs b/src/librustc/middle/traits/util.rs
deleted file mode 100644 (file)
index c50c9e9..0000000
+++ /dev/null
@@ -1,477 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::def_id::DefId;
-use middle::infer::InferCtxt;
-use middle::subst::Substs;
-use middle::ty::{self, Ty, ToPredicate, ToPolyTraitRef};
-use syntax::codemap::Span;
-use util::common::ErrorReported;
-use util::nodemap::FnvHashSet;
-
-use super::{Obligation, ObligationCause, PredicateObligation};
-
-struct PredicateSet<'a,'tcx:'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    set: FnvHashSet<ty::Predicate<'tcx>>,
-}
-
-impl<'a,'tcx> PredicateSet<'a,'tcx> {
-    fn new(tcx: &'a ty::ctxt<'tcx>) -> PredicateSet<'a,'tcx> {
-        PredicateSet { tcx: tcx, set: FnvHashSet() }
-    }
-
-    fn insert(&mut self, pred: &ty::Predicate<'tcx>) -> bool {
-        // We have to be careful here because we want
-        //
-        //    for<'a> Foo<&'a int>
-        //
-        // and
-        //
-        //    for<'b> Foo<&'b int>
-        //
-        // to be considered equivalent. So normalize all late-bound
-        // regions before we throw things into the underlying set.
-        let normalized_pred = match *pred {
-            ty::Predicate::Trait(ref data) =>
-                ty::Predicate::Trait(self.tcx.anonymize_late_bound_regions(data)),
-
-            ty::Predicate::Equate(ref data) =>
-                ty::Predicate::Equate(self.tcx.anonymize_late_bound_regions(data)),
-
-            ty::Predicate::RegionOutlives(ref data) =>
-                ty::Predicate::RegionOutlives(self.tcx.anonymize_late_bound_regions(data)),
-
-            ty::Predicate::TypeOutlives(ref data) =>
-                ty::Predicate::TypeOutlives(self.tcx.anonymize_late_bound_regions(data)),
-
-            ty::Predicate::Projection(ref data) =>
-                ty::Predicate::Projection(self.tcx.anonymize_late_bound_regions(data)),
-
-            ty::Predicate::WellFormed(data) =>
-                ty::Predicate::WellFormed(data),
-
-            ty::Predicate::ObjectSafe(data) =>
-                ty::Predicate::ObjectSafe(data),
-        };
-        self.set.insert(normalized_pred)
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// `Elaboration` iterator
-///////////////////////////////////////////////////////////////////////////
-
-/// "Elaboration" is the process of identifying all the predicates that
-/// are implied by a source predicate. Currently this basically means
-/// walking the "supertraits" and other similar assumptions. For
-/// example, if we know that `T : Ord`, the elaborator would deduce
-/// that `T : PartialOrd` holds as well. Similarly, if we have `trait
-/// Foo : 'static`, and we know that `T : Foo`, then we know that `T :
-/// 'static`.
-pub struct Elaborator<'cx, 'tcx:'cx> {
-    tcx: &'cx ty::ctxt<'tcx>,
-    stack: Vec<ty::Predicate<'tcx>>,
-    visited: PredicateSet<'cx,'tcx>,
-}
-
-pub fn elaborate_trait_ref<'cx, 'tcx>(
-    tcx: &'cx ty::ctxt<'tcx>,
-    trait_ref: ty::PolyTraitRef<'tcx>)
-    -> Elaborator<'cx, 'tcx>
-{
-    elaborate_predicates(tcx, vec![trait_ref.to_predicate()])
-}
-
-pub fn elaborate_trait_refs<'cx, 'tcx>(
-    tcx: &'cx ty::ctxt<'tcx>,
-    trait_refs: &[ty::PolyTraitRef<'tcx>])
-    -> Elaborator<'cx, 'tcx>
-{
-    let predicates = trait_refs.iter()
-                               .map(|trait_ref| trait_ref.to_predicate())
-                               .collect();
-    elaborate_predicates(tcx, predicates)
-}
-
-pub fn elaborate_predicates<'cx, 'tcx>(
-    tcx: &'cx ty::ctxt<'tcx>,
-    mut predicates: Vec<ty::Predicate<'tcx>>)
-    -> Elaborator<'cx, 'tcx>
-{
-    let mut visited = PredicateSet::new(tcx);
-    predicates.retain(|pred| visited.insert(pred));
-    Elaborator { tcx: tcx, stack: predicates, visited: visited }
-}
-
-impl<'cx, 'tcx> Elaborator<'cx, 'tcx> {
-    pub fn filter_to_traits(self) -> FilterToTraits<Elaborator<'cx, 'tcx>> {
-        FilterToTraits::new(self)
-    }
-
-    fn push(&mut self, predicate: &ty::Predicate<'tcx>) {
-        match *predicate {
-            ty::Predicate::Trait(ref data) => {
-                // Predicates declared on the trait.
-                let predicates = self.tcx.lookup_super_predicates(data.def_id());
-
-                let mut predicates: Vec<_> =
-                    predicates.predicates
-                              .iter()
-                              .map(|p| p.subst_supertrait(self.tcx, &data.to_poly_trait_ref()))
-                              .collect();
-
-                debug!("super_predicates: data={:?} predicates={:?}",
-                       data, predicates);
-
-                // Only keep those bounds that we haven't already
-                // seen.  This is necessary to prevent infinite
-                // recursion in some cases.  One common case is when
-                // people define `trait Sized: Sized { }` rather than `trait
-                // Sized { }`.
-                predicates.retain(|r| self.visited.insert(r));
-
-                self.stack.extend(predicates);
-            }
-            ty::Predicate::WellFormed(..) => {
-                // Currently, we do not elaborate WF predicates,
-                // although we easily could.
-            }
-            ty::Predicate::ObjectSafe(..) => {
-                // Currently, we do not elaborate object-safe
-                // predicates.
-            }
-            ty::Predicate::Equate(..) => {
-                // Currently, we do not "elaborate" predicates like
-                // `X == Y`, though conceivably we might. For example,
-                // `&X == &Y` implies that `X == Y`.
-            }
-            ty::Predicate::Projection(..) => {
-                // Nothing to elaborate in a projection predicate.
-            }
-            ty::Predicate::RegionOutlives(..) |
-            ty::Predicate::TypeOutlives(..) => {
-                // Currently, we do not "elaborate" predicates like
-                // `'a : 'b` or `T : 'a`.  We could conceivably do
-                // more here.  For example,
-                //
-                //     &'a int : 'b
-                //
-                // implies that
-                //
-                //     'a : 'b
-                //
-                // and we could get even more if we took WF
-                // constraints into account. For example,
-                //
-                //     &'a &'b int : 'c
-                //
-                // implies that
-                //
-                //     'b : 'a
-                //     'a : 'c
-            }
-        }
-    }
-}
-
-impl<'cx, 'tcx> Iterator for Elaborator<'cx, 'tcx> {
-    type Item = ty::Predicate<'tcx>;
-
-    fn next(&mut self) -> Option<ty::Predicate<'tcx>> {
-        // Extract next item from top-most stack frame, if any.
-        let next_predicate = match self.stack.pop() {
-            Some(predicate) => predicate,
-            None => {
-                // No more stack frames. Done.
-                return None;
-            }
-        };
-        self.push(&next_predicate);
-        return Some(next_predicate);
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Supertrait iterator
-///////////////////////////////////////////////////////////////////////////
-
-pub type Supertraits<'cx, 'tcx> = FilterToTraits<Elaborator<'cx, 'tcx>>;
-
-pub fn supertraits<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>,
-                              trait_ref: ty::PolyTraitRef<'tcx>)
-                              -> Supertraits<'cx, 'tcx>
-{
-    elaborate_trait_ref(tcx, trait_ref).filter_to_traits()
-}
-
-pub fn transitive_bounds<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>,
-                                    bounds: &[ty::PolyTraitRef<'tcx>])
-                                    -> Supertraits<'cx, 'tcx>
-{
-    elaborate_trait_refs(tcx, bounds).filter_to_traits()
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Iterator over def-ids of supertraits
-
-pub struct SupertraitDefIds<'cx, 'tcx:'cx> {
-    tcx: &'cx ty::ctxt<'tcx>,
-    stack: Vec<DefId>,
-    visited: FnvHashSet<DefId>,
-}
-
-pub fn supertrait_def_ids<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>,
-                                     trait_def_id: DefId)
-                                     -> SupertraitDefIds<'cx, 'tcx>
-{
-    SupertraitDefIds {
-        tcx: tcx,
-        stack: vec![trait_def_id],
-        visited: Some(trait_def_id).into_iter().collect(),
-    }
-}
-
-impl<'cx, 'tcx> Iterator for SupertraitDefIds<'cx, 'tcx> {
-    type Item = DefId;
-
-    fn next(&mut self) -> Option<DefId> {
-        let def_id = match self.stack.pop() {
-            Some(def_id) => def_id,
-            None => { return None; }
-        };
-
-        let predicates = self.tcx.lookup_super_predicates(def_id);
-        let visited = &mut self.visited;
-        self.stack.extend(
-            predicates.predicates
-                      .iter()
-                      .filter_map(|p| p.to_opt_poly_trait_ref())
-                      .map(|t| t.def_id())
-                      .filter(|&super_def_id| visited.insert(super_def_id)));
-        Some(def_id)
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Other
-///////////////////////////////////////////////////////////////////////////
-
-/// A filter around an iterator of predicates that makes it yield up
-/// just trait references.
-pub struct FilterToTraits<I> {
-    base_iterator: I
-}
-
-impl<I> FilterToTraits<I> {
-    fn new(base: I) -> FilterToTraits<I> {
-        FilterToTraits { base_iterator: base }
-    }
-}
-
-impl<'tcx,I:Iterator<Item=ty::Predicate<'tcx>>> Iterator for FilterToTraits<I> {
-    type Item = ty::PolyTraitRef<'tcx>;
-
-    fn next(&mut self) -> Option<ty::PolyTraitRef<'tcx>> {
-        loop {
-            match self.base_iterator.next() {
-                None => {
-                    return None;
-                }
-                Some(ty::Predicate::Trait(data)) => {
-                    return Some(data.to_poly_trait_ref());
-                }
-                Some(_) => {
-                }
-            }
-        }
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Other
-///////////////////////////////////////////////////////////////////////////
-
-// determine the `self` type, using fresh variables for all variables
-// declared on the impl declaration e.g., `impl<A,B> for Box<[(A,B)]>`
-// would return ($0, $1) where $0 and $1 are freshly instantiated type
-// variables.
-pub fn fresh_type_vars_for_impl<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                          span: Span,
-                                          impl_def_id: DefId)
-                                          -> Substs<'tcx>
-{
-    let tcx = infcx.tcx;
-    let impl_generics = tcx.lookup_item_type(impl_def_id).generics;
-    infcx.fresh_substs_for_generics(span, &impl_generics)
-}
-
-/// See `super::obligations_for_generics`
-pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>,
-                                     recursion_depth: usize,
-                                     generic_bounds: &ty::InstantiatedPredicates<'tcx>)
-                                     -> Vec<PredicateObligation<'tcx>>
-{
-    debug!("predicates_for_generics(generic_bounds={:?})",
-           generic_bounds);
-
-    generic_bounds.predicates.iter().map(|predicate| {
-        Obligation { cause: cause.clone(),
-                     recursion_depth: recursion_depth,
-                     predicate: predicate.clone() }
-    }).collect()
-}
-
-pub fn trait_ref_for_builtin_bound<'tcx>(
-    tcx: &ty::ctxt<'tcx>,
-    builtin_bound: ty::BuiltinBound,
-    param_ty: Ty<'tcx>)
-    -> Result<ty::TraitRef<'tcx>, ErrorReported>
-{
-    match tcx.lang_items.from_builtin_kind(builtin_bound) {
-        Ok(def_id) => {
-            Ok(ty::TraitRef {
-                def_id: def_id,
-                substs: tcx.mk_substs(Substs::empty().with_self_ty(param_ty))
-            })
-        }
-        Err(e) => {
-            tcx.sess.err(&e);
-            Err(ErrorReported)
-        }
-    }
-}
-
-
-pub fn predicate_for_trait_ref<'tcx>(
-    cause: ObligationCause<'tcx>,
-    trait_ref: ty::TraitRef<'tcx>,
-    recursion_depth: usize)
-    -> PredicateObligation<'tcx>
-{
-    Obligation {
-        cause: cause,
-        recursion_depth: recursion_depth,
-        predicate: trait_ref.to_predicate(),
-    }
-}
-
-pub fn predicate_for_trait_def<'tcx>(
-    tcx: &ty::ctxt<'tcx>,
-    cause: ObligationCause<'tcx>,
-    trait_def_id: DefId,
-    recursion_depth: usize,
-    param_ty: Ty<'tcx>,
-    ty_params: Vec<Ty<'tcx>>)
-    -> PredicateObligation<'tcx>
-{
-    let trait_ref = ty::TraitRef {
-        def_id: trait_def_id,
-        substs: tcx.mk_substs(Substs::new_trait(ty_params, vec![], param_ty))
-    };
-    predicate_for_trait_ref(cause, trait_ref, recursion_depth)
-}
-
-pub fn predicate_for_builtin_bound<'tcx>(
-    tcx: &ty::ctxt<'tcx>,
-    cause: ObligationCause<'tcx>,
-    builtin_bound: ty::BuiltinBound,
-    recursion_depth: usize,
-    param_ty: Ty<'tcx>)
-    -> Result<PredicateObligation<'tcx>, ErrorReported>
-{
-    let trait_ref = try!(trait_ref_for_builtin_bound(tcx, builtin_bound, param_ty));
-    Ok(predicate_for_trait_ref(cause, trait_ref, recursion_depth))
-}
-
-/// Cast a trait reference into a reference to one of its super
-/// traits; returns `None` if `target_trait_def_id` is not a
-/// supertrait.
-pub fn upcast<'tcx>(tcx: &ty::ctxt<'tcx>,
-                    source_trait_ref: ty::PolyTraitRef<'tcx>,
-                    target_trait_def_id: DefId)
-                    -> Vec<ty::PolyTraitRef<'tcx>>
-{
-    if source_trait_ref.def_id() == target_trait_def_id {
-        return vec![source_trait_ref]; // shorcut the most common case
-    }
-
-    supertraits(tcx, source_trait_ref)
-        .filter(|r| r.def_id() == target_trait_def_id)
-        .collect()
-}
-
-/// Given a trait `trait_ref`, returns the number of vtable entries
-/// that come from `trait_ref`, excluding its supertraits. Used in
-/// computing the vtable base for an upcast trait of a trait object.
-pub fn count_own_vtable_entries<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                      trait_ref: ty::PolyTraitRef<'tcx>)
-                                      -> usize {
-    let mut entries = 0;
-    // Count number of methods and add them to the total offset.
-    // Skip over associated types and constants.
-    for trait_item in &tcx.trait_items(trait_ref.def_id())[..] {
-        if let ty::MethodTraitItem(_) = *trait_item {
-            entries += 1;
-        }
-    }
-    entries
-}
-
-/// Given an upcast trait object described by `object`, returns the
-/// index of the method `method_def_id` (which should be part of
-/// `object.upcast_trait_ref`) within the vtable for `object`.
-pub fn get_vtable_index_of_object_method<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                               object: &super::VtableObjectData<'tcx>,
-                                               method_def_id: DefId) -> usize {
-    // Count number of methods preceding the one we are selecting and
-    // add them to the total offset.
-    // Skip over associated types and constants.
-    let mut entries = object.vtable_base;
-    for trait_item in &tcx.trait_items(object.upcast_trait_ref.def_id())[..] {
-        if trait_item.def_id() == method_def_id {
-            // The item with the ID we were given really ought to be a method.
-            assert!(match *trait_item {
-                ty::MethodTraitItem(_) => true,
-                _ => false
-            });
-
-            return entries;
-        }
-        if let ty::MethodTraitItem(_) = *trait_item {
-            entries += 1;
-        }
-    }
-
-    tcx.sess.bug(&format!("get_vtable_index_of_object_method: {:?} was not found",
-                          method_def_id));
-}
-
-pub enum TupleArgumentsFlag { Yes, No }
-
-pub fn closure_trait_ref_and_return_type<'tcx>(
-    tcx: &ty::ctxt<'tcx>,
-    fn_trait_def_id: DefId,
-    self_ty: Ty<'tcx>,
-    sig: &ty::PolyFnSig<'tcx>,
-    tuple_arguments: TupleArgumentsFlag)
-    -> ty::Binder<(ty::TraitRef<'tcx>, Ty<'tcx>)>
-{
-    let arguments_tuple = match tuple_arguments {
-        TupleArgumentsFlag::No => sig.0.inputs[0],
-        TupleArgumentsFlag::Yes => tcx.mk_tup(sig.0.inputs.to_vec()),
-    };
-    let trait_substs = Substs::new_trait(vec![arguments_tuple], vec![], self_ty);
-    let trait_ref = ty::TraitRef {
-        def_id: fn_trait_def_id,
-        substs: tcx.mk_substs(trait_substs),
-    };
-    ty::Binder((trait_ref, sig.0.output.unwrap_or(tcx.mk_nil())))
-}
diff --git a/src/librustc/middle/ty/_match.rs b/src/librustc/middle/ty/_match.rs
deleted file mode 100644 (file)
index 5a3ad90..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::ty::{self, Ty};
-use middle::ty::error::TypeError;
-use middle::ty::relate::{self, Relate, TypeRelation, RelateResult};
-
-/// A type "A" *matches* "B" if the fresh types in B could be
-/// substituted with values so as to make it equal to A. Matching is
-/// intended to be used only on freshened types, and it basically
-/// indicates if the non-freshened versions of A and B could have been
-/// unified.
-///
-/// It is only an approximation. If it yields false, unification would
-/// definitely fail, but a true result doesn't mean unification would
-/// succeed. This is because we don't track the "side-constraints" on
-/// type variables, nor do we track if the same freshened type appears
-/// more than once. To some extent these approximations could be
-/// fixed, given effort.
-///
-/// Like subtyping, matching is really a binary relation, so the only
-/// important thing about the result is Ok/Err. Also, matching never
-/// affects any type variables or unification state.
-pub struct Match<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>
-}
-
-impl<'a, 'tcx> Match<'a, 'tcx> {
-    pub fn new(tcx: &'a ty::ctxt<'tcx>) -> Match<'a, 'tcx> {
-        Match { tcx: tcx }
-    }
-}
-
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Match<'a, 'tcx> {
-    fn tag(&self) -> &'static str { "Match" }
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.tcx }
-    fn a_is_expected(&self) -> bool { true } // irrelevant
-
-    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
-                                               _: ty::Variance,
-                                               a: &T,
-                                               b: &T)
-                                               -> RelateResult<'tcx, T>
-    {
-        self.relate(a, b)
-    }
-
-    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
-        debug!("{}.regions({:?}, {:?})",
-               self.tag(),
-               a,
-               b);
-        Ok(a)
-    }
-
-    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
-        debug!("{}.tys({:?}, {:?})", self.tag(),
-               a, b);
-        if a == b { return Ok(a); }
-
-        match (&a.sty, &b.sty) {
-            (_, &ty::TyInfer(ty::FreshTy(_))) |
-            (_, &ty::TyInfer(ty::FreshIntTy(_))) |
-            (_, &ty::TyInfer(ty::FreshFloatTy(_))) => {
-                Ok(a)
-            }
-
-            (&ty::TyInfer(_), _) |
-            (_, &ty::TyInfer(_)) => {
-                Err(TypeError::Sorts(relate::expected_found(self, &a, &b)))
-            }
-
-            (&ty::TyError, _) | (_, &ty::TyError) => {
-                Ok(self.tcx().types.err)
-            }
-
-            _ => {
-                relate::super_relate_tys(self, a, b)
-            }
-        }
-    }
-
-    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-                  -> RelateResult<'tcx, ty::Binder<T>>
-        where T: Relate<'a,'tcx>
-    {
-        Ok(ty::Binder(try!(self.relate(a.skip_binder(), b.skip_binder()))))
-    }
-}
diff --git a/src/librustc/middle/ty/adjustment.rs b/src/librustc/middle/ty/adjustment.rs
deleted file mode 100644 (file)
index afe177f..0000000
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub use self::AutoAdjustment::*;
-pub use self::AutoRef::*;
-
-use middle::ty::{self, Ty, TypeAndMut, TypeFoldable};
-use middle::ty::LvaluePreference::{NoPreference};
-
-use syntax::ast;
-use syntax::codemap::Span;
-
-use rustc_front::hir;
-
-#[derive(Copy, Clone)]
-pub enum AutoAdjustment<'tcx> {
-    AdjustReifyFnPointer,    // go from a fn-item type to a fn-pointer type
-    AdjustUnsafeFnPointer,   // go from a safe fn pointer to an unsafe fn pointer
-    AdjustMutToConstPointer, // go from a mut raw pointer to a const raw pointer
-    AdjustDerefRef(AutoDerefRef<'tcx>),
-}
-
-/// Represents coercing a pointer to a different kind of pointer - where 'kind'
-/// here means either or both of raw vs borrowed vs unique and fat vs thin.
-///
-/// We transform pointers by following the following steps in order:
-/// 1. Deref the pointer `self.autoderefs` times (may be 0).
-/// 2. If `autoref` is `Some(_)`, then take the address and produce either a
-///    `&` or `*` pointer.
-/// 3. If `unsize` is `Some(_)`, then apply the unsize transformation,
-///    which will do things like convert thin pointers to fat
-///    pointers, or convert structs containing thin pointers to
-///    structs containing fat pointers, or convert between fat
-///    pointers.  We don't store the details of how the transform is
-///    done (in fact, we don't know that, because it might depend on
-///    the precise type parameters). We just store the target
-///    type. Trans figures out what has to be done at monomorphization
-///    time based on the precise source/target type at hand.
-///
-/// To make that more concrete, here are some common scenarios:
-///
-/// 1. The simplest cases are where the pointer is not adjusted fat vs thin.
-/// Here the pointer will be dereferenced N times (where a dereference can
-/// happen to raw or borrowed pointers or any smart pointer which implements
-/// Deref, including Box<_>). The number of dereferences is given by
-/// `autoderefs`.  It can then be auto-referenced zero or one times, indicated
-/// by `autoref`, to either a raw or borrowed pointer. In these cases unsize is
-/// None.
-///
-/// 2. A thin-to-fat coercon involves unsizing the underlying data. We start
-/// with a thin pointer, deref a number of times, unsize the underlying data,
-/// then autoref. The 'unsize' phase may change a fixed length array to a
-/// dynamically sized one, a concrete object to a trait object, or statically
-/// sized struct to a dyncamically sized one. E.g., &[i32; 4] -> &[i32] is
-/// represented by:
-///
-/// ```
-/// AutoDerefRef {
-///     autoderefs: 1,          // &[i32; 4] -> [i32; 4]
-///     autoref: Some(AutoPtr), // [i32] -> &[i32]
-///     unsize: Some([i32]),    // [i32; 4] -> [i32]
-/// }
-/// ```
-///
-/// Note that for a struct, the 'deep' unsizing of the struct is not recorded.
-/// E.g., `struct Foo<T> { x: T }` we can coerce &Foo<[i32; 4]> to &Foo<[i32]>
-/// The autoderef and -ref are the same as in the above example, but the type
-/// stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about
-/// the underlying conversions from `[i32; 4]` to `[i32]`.
-///
-/// 3. Coercing a `Box<T>` to `Box<Trait>` is an interesting special case.  In
-/// that case, we have the pointer we need coming in, so there are no
-/// autoderefs, and no autoref. Instead we just do the `Unsize` transformation.
-/// At some point, of course, `Box` should move out of the compiler, in which
-/// case this is analogous to transformating a struct. E.g., Box<[i32; 4]> ->
-/// Box<[i32]> is represented by:
-///
-/// ```
-/// AutoDerefRef {
-///     autoderefs: 0,
-///     autoref: None,
-///     unsize: Some(Box<[i32]>),
-/// }
-/// ```
-#[derive(Copy, Clone)]
-pub struct AutoDerefRef<'tcx> {
-    /// Step 1. Apply a number of dereferences, producing an lvalue.
-    pub autoderefs: usize,
-
-    /// Step 2. Optionally produce a pointer/reference from the value.
-    pub autoref: Option<AutoRef<'tcx>>,
-
-    /// Step 3. Unsize a pointer/reference value, e.g. `&[T; n]` to
-    /// `&[T]`. The stored type is the target pointer type. Note that
-    /// the source could be a thin or fat pointer.
-    pub unsize: Option<Ty<'tcx>>,
-}
-
-impl<'tcx> AutoAdjustment<'tcx> {
-    pub fn is_identity(&self) -> bool {
-        match *self {
-            AdjustReifyFnPointer |
-            AdjustUnsafeFnPointer |
-            AdjustMutToConstPointer => false,
-            AdjustDerefRef(ref r) => r.is_identity(),
-        }
-    }
-}
-impl<'tcx> AutoDerefRef<'tcx> {
-    pub fn is_identity(&self) -> bool {
-        self.autoderefs == 0 && self.unsize.is_none() && self.autoref.is_none()
-    }
-}
-
-
-#[derive(Copy, Clone, PartialEq, Debug)]
-pub enum AutoRef<'tcx> {
-    /// Convert from T to &T.
-    AutoPtr(&'tcx ty::Region, hir::Mutability),
-
-    /// Convert from T to *T.
-    /// Value to thin pointer.
-    AutoUnsafe(hir::Mutability),
-}
-
-#[derive(Clone, Copy, RustcEncodable, RustcDecodable, Debug)]
-pub enum CustomCoerceUnsized {
-    /// Records the index of the field being coerced.
-    Struct(usize)
-}
-
-impl<'tcx> ty::TyS<'tcx> {
-    /// See `expr_ty_adjusted`
-    pub fn adjust<F>(&'tcx self, cx: &ty::ctxt<'tcx>,
-                     span: Span,
-                     expr_id: ast::NodeId,
-                     adjustment: Option<&AutoAdjustment<'tcx>>,
-                     mut method_type: F)
-                     -> Ty<'tcx> where
-        F: FnMut(ty::MethodCall) -> Option<Ty<'tcx>>,
-    {
-        if let ty::TyError = self.sty {
-            return self;
-        }
-
-        return match adjustment {
-            Some(adjustment) => {
-                match *adjustment {
-                    AdjustReifyFnPointer => {
-                        match self.sty {
-                            ty::TyBareFn(Some(_), b) => {
-                                cx.mk_fn(None, b)
-                            }
-                            _ => {
-                                cx.sess.bug(
-                                    &format!("AdjustReifyFnPointer adjustment on non-fn-item: \
-                                              {:?}", self));
-                            }
-                        }
-                    }
-
-                    AdjustUnsafeFnPointer => {
-                        match self.sty {
-                            ty::TyBareFn(None, b) => cx.safe_to_unsafe_fn_ty(b),
-                            ref b => {
-                                cx.sess.bug(
-                                    &format!("AdjustUnsafeFnPointer adjustment on non-fn-ptr: \
-                                             {:?}",
-                                            b));
-                            }
-                        }
-                    }
-
-                    AdjustMutToConstPointer => {
-                        match self.sty {
-                            ty::TyRawPtr(mt) => cx.mk_ptr(ty::TypeAndMut {
-                                ty: mt.ty,
-                                mutbl: hir::MutImmutable
-                            }),
-                            ref b => {
-                                cx.sess.bug(
-                                    &format!("AdjustMutToConstPointer on non-raw-ptr: \
-                                             {:?}",
-                                            b));
-                            }
-                        }
-                    }
-
-                    AdjustDerefRef(ref adj) => {
-                        let mut adjusted_ty = self;
-
-                        if !adjusted_ty.references_error() {
-                            for i in 0..adj.autoderefs {
-                                adjusted_ty =
-                                    adjusted_ty.adjust_for_autoderef(cx,
-                                                                     expr_id,
-                                                                     span,
-                                                                     i as u32,
-                                                                     &mut method_type);
-                            }
-                        }
-
-                        if let Some(target) = adj.unsize {
-                            target
-                        } else {
-                            adjusted_ty.adjust_for_autoref(cx, adj.autoref)
-                        }
-                    }
-                }
-            }
-            None => self
-        };
-    }
-
-    pub fn adjust_for_autoderef<F>(&'tcx self,
-                                   cx: &ty::ctxt<'tcx>,
-                                   expr_id: ast::NodeId,
-                                   expr_span: Span,
-                                   autoderef: u32, // how many autoderefs so far?
-                                   mut method_type: F)
-                                   -> Ty<'tcx> where
-        F: FnMut(ty::MethodCall) -> Option<Ty<'tcx>>,
-    {
-        let method_call = ty::MethodCall::autoderef(expr_id, autoderef);
-        let mut adjusted_ty = self;
-        if let Some(method_ty) = method_type(method_call) {
-            // Method calls always have all late-bound regions
-            // fully instantiated.
-            let fn_ret = cx.no_late_bound_regions(&method_ty.fn_ret()).unwrap();
-            adjusted_ty = fn_ret.unwrap();
-        }
-        match adjusted_ty.builtin_deref(true, NoPreference) {
-            Some(mt) => mt.ty,
-            None => {
-                cx.sess.span_bug(
-                    expr_span,
-                    &format!("the {}th autoderef failed: {}",
-                             autoderef,
-                             adjusted_ty)
-                        );
-            }
-        }
-    }
-
-    pub fn adjust_for_autoref(&'tcx self, cx: &ty::ctxt<'tcx>,
-                              autoref: Option<AutoRef<'tcx>>)
-                              -> Ty<'tcx> {
-        match autoref {
-            None => self,
-            Some(AutoPtr(r, m)) => {
-                cx.mk_ref(r, TypeAndMut { ty: self, mutbl: m })
-            }
-            Some(AutoUnsafe(m)) => {
-                cx.mk_ptr(TypeAndMut { ty: self, mutbl: m })
-            }
-        }
-    }
-}
diff --git a/src/librustc/middle/ty/cast.rs b/src/librustc/middle/ty/cast.rs
deleted file mode 100644 (file)
index 8233b6b..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Helpers for handling cast expressions, used in both
-// typeck and trans.
-
-use middle::ty::{self, Ty};
-
-use syntax::ast;
-
-/// Types that are represented as ints.
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum IntTy {
-    U(ast::UintTy),
-    I,
-    CEnum,
-    Bool,
-    Char
-}
-
-// Valid types for the result of a non-coercion cast
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum CastTy<'tcx> {
-    /// Various types that are represented as ints and handled mostly
-    /// in the same way, merged for easier matching.
-    Int(IntTy),
-    /// Floating-Point types
-    Float,
-    /// Function Pointers
-    FnPtr,
-    /// Raw pointers
-    Ptr(&'tcx ty::TypeAndMut<'tcx>),
-    /// References
-    RPtr(&'tcx ty::TypeAndMut<'tcx>),
-}
-
-/// Cast Kind. See RFC 401 (or librustc_typeck/check/cast.rs)
-#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
-pub enum CastKind {
-    CoercionCast,
-    PtrPtrCast,
-    PtrAddrCast,
-    AddrPtrCast,
-    NumericCast,
-    EnumCast,
-    PrimIntCast,
-    U8CharCast,
-    ArrayPtrCast,
-    FnPtrPtrCast,
-    FnPtrAddrCast
-}
-
-impl<'tcx> CastTy<'tcx> {
-    pub fn from_ty(t: Ty<'tcx>) -> Option<CastTy<'tcx>> {
-        match t.sty {
-            ty::TyBool => Some(CastTy::Int(IntTy::Bool)),
-            ty::TyChar => Some(CastTy::Int(IntTy::Char)),
-            ty::TyInt(_) => Some(CastTy::Int(IntTy::I)),
-            ty::TyUint(u) => Some(CastTy::Int(IntTy::U(u))),
-            ty::TyFloat(_) => Some(CastTy::Float),
-            ty::TyEnum(d,_) if d.is_payloadfree() =>
-                Some(CastTy::Int(IntTy::CEnum)),
-            ty::TyRawPtr(ref mt) => Some(CastTy::Ptr(mt)),
-            ty::TyRef(_, ref mt) => Some(CastTy::RPtr(mt)),
-            ty::TyBareFn(..) => Some(CastTy::FnPtr),
-            _ => None,
-        }
-    }
-}
diff --git a/src/librustc/middle/ty/contents.rs b/src/librustc/middle/ty/contents.rs
deleted file mode 100644 (file)
index 3a42e8e..0000000
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::def_id::{DefId};
-use middle::ty::{self, Ty};
-use util::common::MemoizationMap;
-use util::nodemap::FnvHashMap;
-
-use std::fmt;
-use std::ops;
-
-use syntax::ast;
-
-/// Type contents is how the type checker reasons about kinds.
-/// They track what kinds of things are found within a type.  You can
-/// think of them as kind of an "anti-kind".  They track the kinds of values
-/// and thinks that are contained in types.  Having a larger contents for
-/// a type tends to rule that type *out* from various kinds.  For example,
-/// a type that contains a reference is not sendable.
-///
-/// The reason we compute type contents and not kinds is that it is
-/// easier for me (nmatsakis) to think about what is contained within
-/// a type than to think about what is *not* contained within a type.
-#[derive(Clone, Copy)]
-pub struct TypeContents {
-    pub bits: u64
-}
-
-macro_rules! def_type_content_sets {
-    (mod $mname:ident { $($name:ident = $bits:expr),+ }) => {
-        #[allow(non_snake_case)]
-        mod $mname {
-            use super::TypeContents;
-            $(
-                #[allow(non_upper_case_globals)]
-                pub const $name: TypeContents = TypeContents { bits: $bits };
-             )+
-        }
-    }
-}
-
-def_type_content_sets! {
-    mod TC {
-        None                                = 0b0000_0000__0000_0000__0000,
-
-        // Things that are interior to the value (first nibble):
-        InteriorUnsafe                      = 0b0000_0000__0000_0000__0010,
-        InteriorParam                       = 0b0000_0000__0000_0000__0100,
-        // InteriorAll                         = 0b00000000__00000000__1111,
-
-        // Things that are owned by the value (second and third nibbles):
-        OwnsOwned                           = 0b0000_0000__0000_0001__0000,
-        OwnsDtor                            = 0b0000_0000__0000_0010__0000,
-        OwnsAll                             = 0b0000_0000__1111_1111__0000,
-
-        // Things that mean drop glue is necessary
-        NeedsDrop                           = 0b0000_0000__0000_0111__0000,
-
-        // All bits
-        All                                 = 0b1111_1111__1111_1111__1111
-    }
-}
-
-impl TypeContents {
-    pub fn when(&self, cond: bool) -> TypeContents {
-        if cond {*self} else {TC::None}
-    }
-
-    pub fn intersects(&self, tc: TypeContents) -> bool {
-        (self.bits & tc.bits) != 0
-    }
-
-    pub fn owns_owned(&self) -> bool {
-        self.intersects(TC::OwnsOwned)
-    }
-
-    pub fn interior_param(&self) -> bool {
-        self.intersects(TC::InteriorParam)
-    }
-
-    pub fn interior_unsafe(&self) -> bool {
-        self.intersects(TC::InteriorUnsafe)
-    }
-
-    pub fn needs_drop(&self, _: &ty::ctxt) -> bool {
-        self.intersects(TC::NeedsDrop)
-    }
-
-    /// Includes only those bits that still apply when indirected through a `Box` pointer
-    pub fn owned_pointer(&self) -> TypeContents {
-        TC::OwnsOwned | (*self & TC::OwnsAll)
-    }
-
-    pub fn union<T, F>(v: &[T], mut f: F) -> TypeContents where
-        F: FnMut(&T) -> TypeContents,
-    {
-        v.iter().fold(TC::None, |tc, ty| tc | f(ty))
-    }
-
-    pub fn has_dtor(&self) -> bool {
-        self.intersects(TC::OwnsDtor)
-    }
-}
-
-impl ops::BitOr for TypeContents {
-    type Output = TypeContents;
-
-    fn bitor(self, other: TypeContents) -> TypeContents {
-        TypeContents {bits: self.bits | other.bits}
-    }
-}
-
-impl ops::BitAnd for TypeContents {
-    type Output = TypeContents;
-
-    fn bitand(self, other: TypeContents) -> TypeContents {
-        TypeContents {bits: self.bits & other.bits}
-    }
-}
-
-impl ops::Sub for TypeContents {
-    type Output = TypeContents;
-
-    fn sub(self, other: TypeContents) -> TypeContents {
-        TypeContents {bits: self.bits & !other.bits}
-    }
-}
-
-impl fmt::Debug for TypeContents {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "TypeContents({:b})", self.bits)
-    }
-}
-
-impl<'tcx> ty::TyS<'tcx> {
-    pub fn type_contents(&'tcx self, cx: &ty::ctxt<'tcx>) -> TypeContents {
-        return cx.tc_cache.memoize(self, || tc_ty(cx, self, &mut FnvHashMap()));
-
-        fn tc_ty<'tcx>(cx: &ty::ctxt<'tcx>,
-                       ty: Ty<'tcx>,
-                       cache: &mut FnvHashMap<Ty<'tcx>, TypeContents>) -> TypeContents
-        {
-            // Subtle: Note that we are *not* using cx.tc_cache here but rather a
-            // private cache for this walk.  This is needed in the case of cyclic
-            // types like:
-            //
-            //     struct List { next: Box<Option<List>>, ... }
-            //
-            // When computing the type contents of such a type, we wind up deeply
-            // recursing as we go.  So when we encounter the recursive reference
-            // to List, we temporarily use TC::None as its contents.  Later we'll
-            // patch up the cache with the correct value, once we've computed it
-            // (this is basically a co-inductive process, if that helps).  So in
-            // the end we'll compute TC::OwnsOwned, in this case.
-            //
-            // The problem is, as we are doing the computation, we will also
-            // compute an *intermediate* contents for, e.g., Option<List> of
-            // TC::None.  This is ok during the computation of List itself, but if
-            // we stored this intermediate value into cx.tc_cache, then later
-            // requests for the contents of Option<List> would also yield TC::None
-            // which is incorrect.  This value was computed based on the crutch
-            // value for the type contents of list.  The correct value is
-            // TC::OwnsOwned.  This manifested as issue #4821.
-            match cache.get(&ty) {
-                Some(tc) => { return *tc; }
-                None => {}
-            }
-            match cx.tc_cache.borrow().get(&ty) {    // Must check both caches!
-                Some(tc) => { return *tc; }
-                None => {}
-            }
-            cache.insert(ty, TC::None);
-
-            let result = match ty.sty {
-                // usize and isize are ffi-unsafe
-                ty::TyUint(ast::UintTy::Us) | ty::TyInt(ast::IntTy::Is) => {
-                    TC::None
-                }
-
-                // Scalar and unique types are sendable, and durable
-                ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) |
-                ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) |
-                ty::TyBareFn(..) | ty::TyChar => {
-                    TC::None
-                }
-
-                ty::TyBox(typ) => {
-                    tc_ty(cx, typ, cache).owned_pointer()
-                }
-
-                ty::TyTrait(_) => {
-                    TC::All - TC::InteriorParam
-                }
-
-                ty::TyRawPtr(_) => {
-                    TC::None
-                }
-
-                ty::TyRef(_, _) => {
-                    TC::None
-                }
-
-                ty::TyArray(ty, _) => {
-                    tc_ty(cx, ty, cache)
-                }
-
-                ty::TySlice(ty) => {
-                    tc_ty(cx, ty, cache)
-                }
-                ty::TyStr => TC::None,
-
-                ty::TyClosure(_, ref substs) => {
-                    TypeContents::union(&substs.upvar_tys, |ty| tc_ty(cx, &ty, cache))
-                }
-
-                ty::TyTuple(ref tys) => {
-                    TypeContents::union(&tys[..],
-                                        |ty| tc_ty(cx, *ty, cache))
-                }
-
-                ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
-                    let mut res =
-                        TypeContents::union(&def.variants, |v| {
-                            TypeContents::union(&v.fields, |f| {
-                                tc_ty(cx, f.ty(cx, substs), cache)
-                            })
-                        });
-
-                    if def.has_dtor() {
-                        res = res | TC::OwnsDtor;
-                    }
-
-                    apply_lang_items(cx, def.did, res)
-                }
-
-                ty::TyProjection(..) |
-                ty::TyParam(_) => {
-                    TC::All
-                }
-
-                ty::TyInfer(_) |
-                ty::TyError => {
-                    cx.sess.bug("asked to compute contents of error type");
-                }
-            };
-
-            cache.insert(ty, result);
-            result
-        }
-
-        fn apply_lang_items(cx: &ty::ctxt, did: DefId, tc: TypeContents)
-                            -> TypeContents {
-            if Some(did) == cx.lang_items.unsafe_cell_type() {
-                tc | TC::InteriorUnsafe
-            } else {
-                tc
-            }
-        }
-    }
-}
diff --git a/src/librustc/middle/ty/context.rs b/src/librustc/middle/ty/context.rs
deleted file mode 100644 (file)
index a014c63..0000000
+++ /dev/null
@@ -1,1072 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! type context book-keeping
-
-// FIXME: (@jroesch) @eddyb should remove this when he renames ctxt
-#![allow(non_camel_case_types)]
-
-use dep_graph::{DepGraph, DepTrackingMap};
-use front::map as ast_map;
-use session::Session;
-use lint;
-use middle;
-use middle::cstore::CrateStore;
-use middle::def::DefMap;
-use middle::def_id::DefId;
-use middle::free_region::FreeRegionMap;
-use middle::region::RegionMaps;
-use middle::resolve_lifetime;
-use middle::stability;
-use middle::subst::{self, Subst, Substs};
-use middle::traits;
-use middle::ty::{self, TraitRef, Ty, TypeAndMut};
-use middle::ty::{TyS, TypeVariants};
-use middle::ty::{AdtDef, ClosureSubsts, ExistentialBounds, Region};
-use middle::ty::{FreevarMap};
-use middle::ty::{BareFnTy, InferTy, ParamTy, ProjectionTy, TraitTy};
-use middle::ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid};
-use middle::ty::TypeVariants::*;
-use middle::ty::maps;
-use util::common::MemoizationMap;
-use util::nodemap::{NodeMap, NodeSet, DefIdMap, DefIdSet};
-use util::nodemap::FnvHashMap;
-
-use arena::TypedArena;
-use std::borrow::Borrow;
-use std::cell::{Cell, RefCell, Ref};
-use std::hash::{Hash, Hasher};
-use std::rc::Rc;
-use syntax::abi::Abi;
-use syntax::ast::{self, Name, NodeId};
-use syntax::attr;
-use syntax::parse::token::special_idents;
-
-use rustc_front::hir;
-
-/// Internal storage
-pub struct CtxtArenas<'tcx> {
-    // internings
-    type_: TypedArena<TyS<'tcx>>,
-    substs: TypedArena<Substs<'tcx>>,
-    bare_fn: TypedArena<BareFnTy<'tcx>>,
-    region: TypedArena<Region>,
-    stability: TypedArena<attr::Stability>,
-
-    // references
-    trait_defs: TypedArena<ty::TraitDef<'tcx>>,
-    adt_defs: TypedArena<ty::AdtDefData<'tcx, 'tcx>>,
-}
-
-impl<'tcx> CtxtArenas<'tcx> {
-    pub fn new() -> CtxtArenas<'tcx> {
-        CtxtArenas {
-            type_: TypedArena::new(),
-            substs: TypedArena::new(),
-            bare_fn: TypedArena::new(),
-            region: TypedArena::new(),
-            stability: TypedArena::new(),
-
-            trait_defs: TypedArena::new(),
-            adt_defs: TypedArena::new()
-        }
-    }
-}
-
-pub struct CommonTypes<'tcx> {
-    pub bool: Ty<'tcx>,
-    pub char: Ty<'tcx>,
-    pub isize: Ty<'tcx>,
-    pub i8: Ty<'tcx>,
-    pub i16: Ty<'tcx>,
-    pub i32: Ty<'tcx>,
-    pub i64: Ty<'tcx>,
-    pub usize: Ty<'tcx>,
-    pub u8: Ty<'tcx>,
-    pub u16: Ty<'tcx>,
-    pub u32: Ty<'tcx>,
-    pub u64: Ty<'tcx>,
-    pub f32: Ty<'tcx>,
-    pub f64: Ty<'tcx>,
-    pub err: Ty<'tcx>,
-}
-
-pub struct Tables<'tcx> {
-    /// Stores the types for various nodes in the AST.  Note that this table
-    /// is not guaranteed to be populated until after typeck.  See
-    /// typeck::check::fn_ctxt for details.
-    pub node_types: NodeMap<Ty<'tcx>>,
-
-    /// Stores the type parameters which were substituted to obtain the type
-    /// of this node.  This only applies to nodes that refer to entities
-    /// parameterized by type parameters, such as generic fns, types, or
-    /// other items.
-    pub item_substs: NodeMap<ty::ItemSubsts<'tcx>>,
-
-    pub adjustments: NodeMap<ty::adjustment::AutoAdjustment<'tcx>>,
-
-    pub method_map: ty::MethodMap<'tcx>,
-
-    /// Borrows
-    pub upvar_capture_map: ty::UpvarCaptureMap,
-
-    /// Records the type of each closure. The def ID is the ID of the
-    /// expression defining the closure.
-    pub closure_tys: DefIdMap<ty::ClosureTy<'tcx>>,
-
-    /// Records the type of each closure. The def ID is the ID of the
-    /// expression defining the closure.
-    pub closure_kinds: DefIdMap<ty::ClosureKind>,
-
-    /// For each fn, records the "liberated" types of its arguments
-    /// and return type. Liberated means that all bound regions
-    /// (including late-bound regions) are replaced with free
-    /// equivalents. This table is not used in trans (since regions
-    /// are erased there) and hence is not serialized to metadata.
-    pub liberated_fn_sigs: NodeMap<ty::FnSig<'tcx>>,
-
-    /// For each FRU expression, record the normalized types of the fields
-    /// of the struct - this is needed because it is non-trivial to
-    /// normalize while preserving regions. This table is used only in
-    /// MIR construction and hence is not serialized to metadata.
-    pub fru_field_types: NodeMap<Vec<Ty<'tcx>>>
-}
-
-impl<'tcx> Tables<'tcx> {
-    pub fn empty() -> Tables<'tcx> {
-        Tables {
-            node_types: FnvHashMap(),
-            item_substs: NodeMap(),
-            adjustments: NodeMap(),
-            method_map: FnvHashMap(),
-            upvar_capture_map: FnvHashMap(),
-            closure_tys: DefIdMap(),
-            closure_kinds: DefIdMap(),
-            liberated_fn_sigs: NodeMap(),
-            fru_field_types: NodeMap()
-        }
-    }
-
-    pub fn closure_kind(this: &RefCell<Self>,
-                        tcx: &ty::ctxt<'tcx>,
-                        def_id: DefId)
-                        -> ty::ClosureKind {
-        // If this is a local def-id, it should be inserted into the
-        // tables by typeck; else, it will be retreived from
-        // the external crate metadata.
-        if let Some(&kind) = this.borrow().closure_kinds.get(&def_id) {
-            return kind;
-        }
-
-        let kind = tcx.sess.cstore.closure_kind(tcx, def_id);
-        this.borrow_mut().closure_kinds.insert(def_id, kind);
-        kind
-    }
-
-    pub fn closure_type(this: &RefCell<Self>,
-                        tcx: &ty::ctxt<'tcx>,
-                        def_id: DefId,
-                        substs: &ClosureSubsts<'tcx>)
-                        -> ty::ClosureTy<'tcx>
-    {
-        // If this is a local def-id, it should be inserted into the
-        // tables by typeck; else, it will be retreived from
-        // the external crate metadata.
-        if let Some(ty) = this.borrow().closure_tys.get(&def_id) {
-            return ty.subst(tcx, &substs.func_substs);
-        }
-
-        let ty = tcx.sess.cstore.closure_ty(tcx, def_id);
-        this.borrow_mut().closure_tys.insert(def_id, ty.clone());
-        ty.subst(tcx, &substs.func_substs)
-    }
-}
-
-impl<'tcx> CommonTypes<'tcx> {
-    fn new(arena: &'tcx TypedArena<TyS<'tcx>>,
-           interner: &RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>)
-           -> CommonTypes<'tcx>
-    {
-        let mk = |sty| ctxt::intern_ty(arena, interner, sty);
-        CommonTypes {
-            bool: mk(TyBool),
-            char: mk(TyChar),
-            err: mk(TyError),
-            isize: mk(TyInt(ast::IntTy::Is)),
-            i8: mk(TyInt(ast::IntTy::I8)),
-            i16: mk(TyInt(ast::IntTy::I16)),
-            i32: mk(TyInt(ast::IntTy::I32)),
-            i64: mk(TyInt(ast::IntTy::I64)),
-            usize: mk(TyUint(ast::UintTy::Us)),
-            u8: mk(TyUint(ast::UintTy::U8)),
-            u16: mk(TyUint(ast::UintTy::U16)),
-            u32: mk(TyUint(ast::UintTy::U32)),
-            u64: mk(TyUint(ast::UintTy::U64)),
-            f32: mk(TyFloat(ast::FloatTy::F32)),
-            f64: mk(TyFloat(ast::FloatTy::F64)),
-        }
-    }
-}
-
-/// The data structure to keep track of all the information that typechecker
-/// generates so that so that it can be reused and doesn't have to be redone
-/// later on.
-pub struct ctxt<'tcx> {
-    /// The arenas that types etc are allocated from.
-    arenas: &'tcx CtxtArenas<'tcx>,
-
-    /// Specifically use a speedy hash algorithm for this hash map, it's used
-    /// quite often.
-    // FIXME(eddyb) use a FnvHashSet<InternedTy<'tcx>> when equivalent keys can
-    // queried from a HashSet.
-    interner: RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
-
-    // FIXME as above, use a hashset if equivalent elements can be queried.
-    substs_interner: RefCell<FnvHashMap<&'tcx Substs<'tcx>, &'tcx Substs<'tcx>>>,
-    bare_fn_interner: RefCell<FnvHashMap<&'tcx BareFnTy<'tcx>, &'tcx BareFnTy<'tcx>>>,
-    region_interner: RefCell<FnvHashMap<&'tcx Region, &'tcx Region>>,
-    stability_interner: RefCell<FnvHashMap<&'tcx attr::Stability, &'tcx attr::Stability>>,
-
-    pub dep_graph: DepGraph,
-
-    /// Common types, pre-interned for your convenience.
-    pub types: CommonTypes<'tcx>,
-
-    pub sess: &'tcx Session,
-    pub def_map: RefCell<DefMap>,
-
-    pub named_region_map: resolve_lifetime::NamedRegionMap,
-
-    pub region_maps: RegionMaps,
-
-    // For each fn declared in the local crate, type check stores the
-    // free-region relationships that were deduced from its where
-    // clauses and parameter types. These are then read-again by
-    // borrowck. (They are not used during trans, and hence are not
-    // serialized or needed for cross-crate fns.)
-    free_region_maps: RefCell<NodeMap<FreeRegionMap>>,
-    // FIXME: jroesch make this a refcell
-
-    pub tables: RefCell<Tables<'tcx>>,
-
-    /// Maps from a trait item to the trait item "descriptor"
-    pub impl_or_trait_items: RefCell<DepTrackingMap<maps::ImplOrTraitItems<'tcx>>>,
-
-    /// Maps from a trait def-id to a list of the def-ids of its trait items
-    pub trait_item_def_ids: RefCell<DepTrackingMap<maps::TraitItemDefIds<'tcx>>>,
-
-    /// A cache for the trait_items() routine; note that the routine
-    /// itself pushes the `TraitItems` dependency node.
-    trait_items_cache: RefCell<DepTrackingMap<maps::TraitItems<'tcx>>>,
-
-    pub impl_trait_refs: RefCell<DepTrackingMap<maps::ImplTraitRefs<'tcx>>>,
-    pub trait_defs: RefCell<DepTrackingMap<maps::TraitDefs<'tcx>>>,
-    pub adt_defs: RefCell<DepTrackingMap<maps::AdtDefs<'tcx>>>,
-
-    /// Maps from the def-id of an item (trait/struct/enum/fn) to its
-    /// associated predicates.
-    pub predicates: RefCell<DepTrackingMap<maps::Predicates<'tcx>>>,
-
-    /// Maps from the def-id of a trait to the list of
-    /// super-predicates. This is a subset of the full list of
-    /// predicates. We store these in a separate map because we must
-    /// evaluate them even during type conversion, often before the
-    /// full predicates are available (note that supertraits have
-    /// additional acyclicity requirements).
-    pub super_predicates: RefCell<DepTrackingMap<maps::Predicates<'tcx>>>,
-
-    pub map: ast_map::Map<'tcx>,
-
-    // Records the free variables refrenced by every closure
-    // expression. Do not track deps for this, just recompute it from
-    // scratch every time.
-    pub freevars: RefCell<FreevarMap>,
-
-    // Records the type of every item.
-    pub tcache: RefCell<DepTrackingMap<maps::Tcache<'tcx>>>,
-
-    // Internal cache for metadata decoding. No need to track deps on this.
-    pub rcache: RefCell<FnvHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
-
-    // Cache for the type-contents routine. FIXME -- track deps?
-    pub tc_cache: RefCell<FnvHashMap<Ty<'tcx>, ty::contents::TypeContents>>,
-
-    // Cache for various types within a method body and so forth.
-    //
-    // FIXME this should be made local to typeck, but it is currently used by one lint
-    pub ast_ty_to_ty_cache: RefCell<NodeMap<Ty<'tcx>>>,
-
-    // FIXME no dep tracking, but we should be able to remove this
-    pub ty_param_defs: RefCell<NodeMap<ty::TypeParameterDef<'tcx>>>,
-
-    // FIXME dep tracking -- should be harmless enough
-    pub normalized_cache: RefCell<FnvHashMap<Ty<'tcx>, Ty<'tcx>>>,
-
-    pub lang_items: middle::lang_items::LanguageItems,
-
-    /// Maps from def-id of a type or region parameter to its
-    /// (inferred) variance.
-    pub item_variance_map: RefCell<DepTrackingMap<maps::ItemVariances<'tcx>>>,
-
-    /// True if the variance has been computed yet; false otherwise.
-    pub variance_computed: Cell<bool>,
-
-    /// Maps a DefId of a type to a list of its inherent impls.
-    /// Contains implementations of methods that are inherent to a type.
-    /// Methods in these implementations don't need to be exported.
-    pub inherent_impls: RefCell<DepTrackingMap<maps::InherentImpls<'tcx>>>,
-
-    /// Maps a DefId of an impl to a list of its items.
-    /// Note that this contains all of the impls that we know about,
-    /// including ones in other crates. It's not clear that this is the best
-    /// way to do it.
-    pub impl_items: RefCell<DepTrackingMap<maps::ImplItems<'tcx>>>,
-
-    /// Set of used unsafe nodes (functions or blocks). Unsafe nodes not
-    /// present in this set can be warned about.
-    pub used_unsafe: RefCell<NodeSet>,
-
-    /// Set of nodes which mark locals as mutable which end up getting used at
-    /// some point. Local variable definitions not in this set can be warned
-    /// about.
-    pub used_mut_nodes: RefCell<NodeSet>,
-
-    /// The set of external nominal types whose implementations have been read.
-    /// This is used for lazy resolution of methods.
-    pub populated_external_types: RefCell<DefIdSet>,
-
-    /// The set of external primitive types whose implementations have been read.
-    /// FIXME(arielb1): why is this separate from populated_external_types?
-    pub populated_external_primitive_impls: RefCell<DefIdSet>,
-
-    /// These caches are used by const_eval when decoding external constants.
-    pub extern_const_statics: RefCell<DefIdMap<NodeId>>,
-    pub extern_const_fns: RefCell<DefIdMap<NodeId>>,
-
-    pub node_lint_levels: RefCell<FnvHashMap<(NodeId, lint::LintId),
-                                              lint::LevelSource>>,
-
-    /// The types that must be asserted to be the same size for `transmute`
-    /// to be valid. We gather up these restrictions in the intrinsicck pass
-    /// and check them in trans.
-    pub transmute_restrictions: RefCell<Vec<ty::TransmuteRestriction<'tcx>>>,
-
-    /// Maps any item's def-id to its stability index.
-    pub stability: RefCell<stability::Index<'tcx>>,
-
-    /// Caches the results of trait selection. This cache is used
-    /// for things that do not have to do with the parameters in scope.
-    pub selection_cache: traits::SelectionCache<'tcx>,
-
-    /// Caches the results of trait evaluation. This cache is used
-    /// for things that do not have to do with the parameters in scope.
-    /// Merge this with `selection_cache`?
-    pub evaluation_cache: traits::EvaluationCache<'tcx>,
-
-    /// A set of predicates that have been fulfilled *somewhere*.
-    /// This is used to avoid duplicate work. Predicates are only
-    /// added to this set when they mention only "global" names
-    /// (i.e., no type or lifetime parameters).
-    pub fulfilled_predicates: RefCell<traits::GlobalFulfilledPredicates<'tcx>>,
-
-    /// Caches the representation hints for struct definitions.
-    repr_hint_cache: RefCell<DepTrackingMap<maps::ReprHints<'tcx>>>,
-
-    /// Maps Expr NodeId's to their constant qualification.
-    pub const_qualif_map: RefCell<NodeMap<middle::const_qualif::ConstQualif>>,
-
-    /// Caches CoerceUnsized kinds for impls on custom types.
-    pub custom_coerce_unsized_kinds: RefCell<DefIdMap<ty::adjustment::CustomCoerceUnsized>>,
-
-    /// Maps a cast expression to its kind. This is keyed on the
-    /// *from* expression of the cast, not the cast itself.
-    pub cast_kinds: RefCell<NodeMap<ty::cast::CastKind>>,
-
-    /// Maps Fn items to a collection of fragment infos.
-    ///
-    /// The main goal is to identify data (each of which may be moved
-    /// or assigned) whose subparts are not moved nor assigned
-    /// (i.e. their state is *unfragmented*) and corresponding ast
-    /// nodes where the path to that data is moved or assigned.
-    ///
-    /// In the long term, unfragmented values will have their
-    /// destructor entirely driven by a single stack-local drop-flag,
-    /// and their parents, the collections of the unfragmented values
-    /// (or more simply, "fragmented values"), are mapped to the
-    /// corresponding collections of stack-local drop-flags.
-    ///
-    /// (However, in the short term that is not the case; e.g. some
-    /// unfragmented paths still need to be zeroed, namely when they
-    /// reference parent data from an outer scope that was not
-    /// entirely moved, and therefore that needs to be zeroed so that
-    /// we do not get double-drop when we hit the end of the parent
-    /// scope.)
-    ///
-    /// Also: currently the table solely holds keys for node-ids of
-    /// unfragmented values (see `FragmentInfo` enum definition), but
-    /// longer-term we will need to also store mappings from
-    /// fragmented data to the set of unfragmented pieces that
-    /// constitute it.
-    pub fragment_infos: RefCell<DefIdMap<Vec<ty::FragmentInfo>>>,
-}
-
-impl<'tcx> ctxt<'tcx> {
-    pub fn type_parameter_def(&self,
-                              node_id: NodeId)
-                              -> ty::TypeParameterDef<'tcx>
-    {
-        self.ty_param_defs.borrow().get(&node_id).unwrap().clone()
-    }
-
-    pub fn node_types(&self) -> Ref<NodeMap<Ty<'tcx>>> {
-        fn projection<'a, 'tcx>(tables: &'a Tables<'tcx>) -> &'a NodeMap<Ty<'tcx>> {
-            &tables.node_types
-        }
-
-        Ref::map(self.tables.borrow(), projection)
-    }
-
-    pub fn node_type_insert(&self, id: NodeId, ty: Ty<'tcx>) {
-        self.tables.borrow_mut().node_types.insert(id, ty);
-    }
-
-    pub fn intern_trait_def(&self, def: ty::TraitDef<'tcx>)
-                            -> &'tcx ty::TraitDef<'tcx> {
-        let did = def.trait_ref.def_id;
-        let interned = self.arenas.trait_defs.alloc(def);
-        if let Some(prev) = self.trait_defs.borrow_mut().insert(did, interned) {
-            self.sess.bug(&format!("Tried to overwrite interned TraitDef: {:?}",
-                                   prev))
-        }
-        interned
-    }
-
-    pub fn alloc_trait_def(&self, def: ty::TraitDef<'tcx>)
-                           -> &'tcx ty::TraitDef<'tcx> {
-        self.arenas.trait_defs.alloc(def)
-    }
-
-    pub fn intern_adt_def(&self,
-                          did: DefId,
-                          kind: ty::AdtKind,
-                          variants: Vec<ty::VariantDefData<'tcx, 'tcx>>)
-                          -> ty::AdtDefMaster<'tcx> {
-        let def = ty::AdtDefData::new(self, did, kind, variants);
-        let interned = self.arenas.adt_defs.alloc(def);
-        // this will need a transmute when reverse-variance is removed
-        if let Some(prev) = self.adt_defs.borrow_mut().insert(did, interned) {
-            self.sess.bug(&format!("Tried to overwrite interned AdtDef: {:?}",
-                                   prev))
-        }
-        interned
-    }
-
-    pub fn intern_stability(&self, stab: attr::Stability) -> &'tcx attr::Stability {
-        if let Some(st) = self.stability_interner.borrow().get(&stab) {
-            return st;
-        }
-
-        let interned = self.arenas.stability.alloc(stab);
-        if let Some(prev) = self.stability_interner
-                                .borrow_mut()
-                                .insert(interned, interned) {
-            self.sess.bug(&format!("Tried to overwrite interned Stability: {:?}",
-                                   prev))
-        }
-        interned
-    }
-
-    pub fn store_free_region_map(&self, id: NodeId, map: FreeRegionMap) {
-        if self.free_region_maps.borrow_mut().insert(id, map).is_some() {
-            self.sess.bug(&format!("Tried to overwrite interned FreeRegionMap for NodeId {:?}",
-                                   id))
-        }
-    }
-
-    pub fn free_region_map(&self, id: NodeId) -> FreeRegionMap {
-        self.free_region_maps.borrow()[&id].clone()
-    }
-
-    pub fn lift<T: ?Sized + Lift<'tcx>>(&self, value: &T) -> Option<T::Lifted> {
-        value.lift_to_tcx(self)
-    }
-
-    /// Create a type context and call the closure with a `&ty::ctxt` reference
-    /// to the context. The closure enforces that the type context and any interned
-    /// value (types, substs, etc.) can only be used while `ty::tls` has a valid
-    /// reference to the context, to allow formatting values that need it.
-    pub fn create_and_enter<F, R>(s: &'tcx Session,
-                                 arenas: &'tcx CtxtArenas<'tcx>,
-                                 def_map: RefCell<DefMap>,
-                                 named_region_map: resolve_lifetime::NamedRegionMap,
-                                 map: ast_map::Map<'tcx>,
-                                 freevars: FreevarMap,
-                                 region_maps: RegionMaps,
-                                 lang_items: middle::lang_items::LanguageItems,
-                                 stability: stability::Index<'tcx>,
-                                 f: F) -> R
-                                 where F: FnOnce(&ctxt<'tcx>) -> R
-    {
-        let interner = RefCell::new(FnvHashMap());
-        let common_types = CommonTypes::new(&arenas.type_, &interner);
-        let dep_graph = map.dep_graph.clone();
-        let fulfilled_predicates = traits::GlobalFulfilledPredicates::new(dep_graph.clone());
-        tls::enter(ctxt {
-            arenas: arenas,
-            interner: interner,
-            substs_interner: RefCell::new(FnvHashMap()),
-            bare_fn_interner: RefCell::new(FnvHashMap()),
-            region_interner: RefCell::new(FnvHashMap()),
-            stability_interner: RefCell::new(FnvHashMap()),
-            dep_graph: dep_graph.clone(),
-            types: common_types,
-            named_region_map: named_region_map,
-            region_maps: region_maps,
-            free_region_maps: RefCell::new(FnvHashMap()),
-            item_variance_map: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            variance_computed: Cell::new(false),
-            sess: s,
-            def_map: def_map,
-            tables: RefCell::new(Tables::empty()),
-            impl_trait_refs: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            trait_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            adt_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            super_predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            fulfilled_predicates: RefCell::new(fulfilled_predicates),
-            map: map,
-            freevars: RefCell::new(freevars),
-            tcache: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            rcache: RefCell::new(FnvHashMap()),
-            tc_cache: RefCell::new(FnvHashMap()),
-            ast_ty_to_ty_cache: RefCell::new(NodeMap()),
-            impl_or_trait_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            trait_item_def_ids: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            trait_items_cache: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            ty_param_defs: RefCell::new(NodeMap()),
-            normalized_cache: RefCell::new(FnvHashMap()),
-            lang_items: lang_items,
-            inherent_impls: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            impl_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            used_unsafe: RefCell::new(NodeSet()),
-            used_mut_nodes: RefCell::new(NodeSet()),
-            populated_external_types: RefCell::new(DefIdSet()),
-            populated_external_primitive_impls: RefCell::new(DefIdSet()),
-            extern_const_statics: RefCell::new(DefIdMap()),
-            extern_const_fns: RefCell::new(DefIdMap()),
-            node_lint_levels: RefCell::new(FnvHashMap()),
-            transmute_restrictions: RefCell::new(Vec::new()),
-            stability: RefCell::new(stability),
-            selection_cache: traits::SelectionCache::new(),
-            evaluation_cache: traits::EvaluationCache::new(),
-            repr_hint_cache: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
-            const_qualif_map: RefCell::new(NodeMap()),
-            custom_coerce_unsized_kinds: RefCell::new(DefIdMap()),
-            cast_kinds: RefCell::new(NodeMap()),
-            fragment_infos: RefCell::new(DefIdMap())
-       }, f)
-    }
-}
-
-/// A trait implemented for all X<'a> types which can be safely and
-/// efficiently converted to X<'tcx> as long as they are part of the
-/// provided ty::ctxt<'tcx>.
-/// This can be done, for example, for Ty<'tcx> or &'tcx Substs<'tcx>
-/// by looking them up in their respective interners.
-/// None is returned if the value or one of the components is not part
-/// of the provided context.
-/// For Ty, None can be returned if either the type interner doesn't
-/// contain the TypeVariants key or if the address of the interned
-/// pointer differs. The latter case is possible if a primitive type,
-/// e.g. `()` or `u8`, was interned in a different context.
-pub trait Lift<'tcx> {
-    type Lifted;
-    fn lift_to_tcx(&self, tcx: &ctxt<'tcx>) -> Option<Self::Lifted>;
-}
-
-impl<'a, 'tcx> Lift<'tcx> for Ty<'a> {
-    type Lifted = Ty<'tcx>;
-    fn lift_to_tcx(&self, tcx: &ctxt<'tcx>) -> Option<Ty<'tcx>> {
-        if let Some(&ty) = tcx.interner.borrow().get(&self.sty) {
-            if *self as *const _ == ty as *const _ {
-                return Some(ty);
-            }
-        }
-        None
-    }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for &'a Substs<'a> {
-    type Lifted = &'tcx Substs<'tcx>;
-    fn lift_to_tcx(&self, tcx: &ctxt<'tcx>) -> Option<&'tcx Substs<'tcx>> {
-        if let Some(&substs) = tcx.substs_interner.borrow().get(*self) {
-            if *self as *const _ == substs as *const _ {
-                return Some(substs);
-            }
-        }
-        None
-    }
-}
-
-
-pub mod tls {
-    use middle::ty;
-
-    use std::cell::Cell;
-    use std::fmt;
-    use syntax::codemap;
-
-    /// Marker type used for the scoped TLS slot.
-    /// The type context cannot be used directly because the scoped TLS
-    /// in libstd doesn't allow types generic over lifetimes.
-    struct ThreadLocalTyCx;
-
-    thread_local! {
-        static TLS_TCX: Cell<Option<*const ThreadLocalTyCx>> = Cell::new(None)
-    }
-
-    fn span_debug(span: codemap::Span, f: &mut fmt::Formatter) -> fmt::Result {
-        with(|tcx| {
-            write!(f, "{}", tcx.sess.codemap().span_to_string(span))
-        })
-    }
-
-    pub fn enter<'tcx, F: FnOnce(&ty::ctxt<'tcx>) -> R, R>(tcx: ty::ctxt<'tcx>, f: F) -> R {
-        codemap::SPAN_DEBUG.with(|span_dbg| {
-            let original_span_debug = span_dbg.get();
-            span_dbg.set(span_debug);
-            let tls_ptr = &tcx as *const _ as *const ThreadLocalTyCx;
-            let result = TLS_TCX.with(|tls| {
-                let prev = tls.get();
-                tls.set(Some(tls_ptr));
-                let ret = f(&tcx);
-                tls.set(prev);
-                ret
-            });
-            span_dbg.set(original_span_debug);
-            result
-        })
-    }
-
-    pub fn with<F: FnOnce(&ty::ctxt) -> R, R>(f: F) -> R {
-        TLS_TCX.with(|tcx| {
-            let tcx = tcx.get().unwrap();
-            f(unsafe { &*(tcx as *const ty::ctxt) })
-        })
-    }
-
-    pub fn with_opt<F: FnOnce(Option<&ty::ctxt>) -> R, R>(f: F) -> R {
-        if TLS_TCX.with(|tcx| tcx.get().is_some()) {
-            with(|v| f(Some(v)))
-        } else {
-            f(None)
-        }
-    }
-}
-
-macro_rules! sty_debug_print {
-    ($ctxt: expr, $($variant: ident),*) => {{
-        // curious inner module to allow variant names to be used as
-        // variable names.
-        #[allow(non_snake_case)]
-        mod inner {
-            use middle::ty;
-            #[derive(Copy, Clone)]
-            struct DebugStat {
-                total: usize,
-                region_infer: usize,
-                ty_infer: usize,
-                both_infer: usize,
-            }
-
-            pub fn go(tcx: &ty::ctxt) {
-                let mut total = DebugStat {
-                    total: 0,
-                    region_infer: 0, ty_infer: 0, both_infer: 0,
-                };
-                $(let mut $variant = total;)*
-
-
-                for (_, t) in tcx.interner.borrow().iter() {
-                    let variant = match t.sty {
-                        ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) |
-                            ty::TyFloat(..) | ty::TyStr => continue,
-                        ty::TyError => /* unimportant */ continue,
-                        $(ty::$variant(..) => &mut $variant,)*
-                    };
-                    let region = t.flags.get().intersects(ty::TypeFlags::HAS_RE_INFER);
-                    let ty = t.flags.get().intersects(ty::TypeFlags::HAS_TY_INFER);
-
-                    variant.total += 1;
-                    total.total += 1;
-                    if region { total.region_infer += 1; variant.region_infer += 1 }
-                    if ty { total.ty_infer += 1; variant.ty_infer += 1 }
-                    if region && ty { total.both_infer += 1; variant.both_infer += 1 }
-                }
-                println!("Ty interner             total           ty region  both");
-                $(println!("    {:18}: {uses:6} {usespc:4.1}%, \
-{ty:4.1}% {region:5.1}% {both:4.1}%",
-                           stringify!($variant),
-                           uses = $variant.total,
-                           usespc = $variant.total as f64 * 100.0 / total.total as f64,
-                           ty = $variant.ty_infer as f64 * 100.0  / total.total as f64,
-                           region = $variant.region_infer as f64 * 100.0  / total.total as f64,
-                           both = $variant.both_infer as f64 * 100.0  / total.total as f64);
-                  )*
-                println!("                  total {uses:6}        \
-{ty:4.1}% {region:5.1}% {both:4.1}%",
-                         uses = total.total,
-                         ty = total.ty_infer as f64 * 100.0  / total.total as f64,
-                         region = total.region_infer as f64 * 100.0  / total.total as f64,
-                         both = total.both_infer as f64 * 100.0  / total.total as f64)
-            }
-        }
-
-        inner::go($ctxt)
-    }}
-}
-
-impl<'tcx> ctxt<'tcx> {
-    pub fn print_debug_stats(&self) {
-        sty_debug_print!(
-            self,
-            TyEnum, TyBox, TyArray, TySlice, TyRawPtr, TyRef, TyBareFn, TyTrait,
-            TyStruct, TyClosure, TyTuple, TyParam, TyInfer, TyProjection);
-
-        println!("Substs interner: #{}", self.substs_interner.borrow().len());
-        println!("BareFnTy interner: #{}", self.bare_fn_interner.borrow().len());
-        println!("Region interner: #{}", self.region_interner.borrow().len());
-        println!("Stability interner: #{}", self.stability_interner.borrow().len());
-    }
-}
-
-
-/// An entry in the type interner.
-pub struct InternedTy<'tcx> {
-    ty: Ty<'tcx>
-}
-
-// NB: An InternedTy compares and hashes as a sty.
-impl<'tcx> PartialEq for InternedTy<'tcx> {
-    fn eq(&self, other: &InternedTy<'tcx>) -> bool {
-        self.ty.sty == other.ty.sty
-    }
-}
-
-impl<'tcx> Eq for InternedTy<'tcx> {}
-
-impl<'tcx> Hash for InternedTy<'tcx> {
-    fn hash<H: Hasher>(&self, s: &mut H) {
-        self.ty.sty.hash(s)
-    }
-}
-
-impl<'tcx> Borrow<TypeVariants<'tcx>> for InternedTy<'tcx> {
-    fn borrow<'a>(&'a self) -> &'a TypeVariants<'tcx> {
-        &self.ty.sty
-    }
-}
-
-fn bound_list_is_sorted(bounds: &[ty::PolyProjectionPredicate]) -> bool {
-    bounds.is_empty() ||
-        bounds[1..].iter().enumerate().all(
-            |(index, bound)| bounds[index].sort_key() <= bound.sort_key())
-}
-
-impl<'tcx> ctxt<'tcx> {
-    // Type constructors
-    pub fn mk_substs(&self, substs: Substs<'tcx>) -> &'tcx Substs<'tcx> {
-        if let Some(substs) = self.substs_interner.borrow().get(&substs) {
-            return *substs;
-        }
-
-        let substs = self.arenas.substs.alloc(substs);
-        self.substs_interner.borrow_mut().insert(substs, substs);
-        substs
-    }
-
-    /// Create an unsafe fn ty based on a safe fn ty.
-    pub fn safe_to_unsafe_fn_ty(&self, bare_fn: &BareFnTy<'tcx>) -> Ty<'tcx> {
-        assert_eq!(bare_fn.unsafety, hir::Unsafety::Normal);
-        let unsafe_fn_ty_a = self.mk_bare_fn(ty::BareFnTy {
-            unsafety: hir::Unsafety::Unsafe,
-            abi: bare_fn.abi,
-            sig: bare_fn.sig.clone()
-        });
-        self.mk_fn(None, unsafe_fn_ty_a)
-    }
-
-    pub fn mk_bare_fn(&self, bare_fn: BareFnTy<'tcx>) -> &'tcx BareFnTy<'tcx> {
-        if let Some(bare_fn) = self.bare_fn_interner.borrow().get(&bare_fn) {
-            return *bare_fn;
-        }
-
-        let bare_fn = self.arenas.bare_fn.alloc(bare_fn);
-        self.bare_fn_interner.borrow_mut().insert(bare_fn, bare_fn);
-        bare_fn
-    }
-
-    pub fn mk_region(&self, region: Region) -> &'tcx Region {
-        if let Some(region) = self.region_interner.borrow().get(&region) {
-            return *region;
-        }
-
-        let region = self.arenas.region.alloc(region);
-        self.region_interner.borrow_mut().insert(region, region);
-        region
-    }
-
-    fn intern_ty(type_arena: &'tcx TypedArena<TyS<'tcx>>,
-                 interner: &RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
-                 st: TypeVariants<'tcx>)
-                 -> Ty<'tcx> {
-        let ty: Ty /* don't be &mut TyS */ = {
-            let mut interner = interner.borrow_mut();
-            match interner.get(&st) {
-                Some(ty) => return *ty,
-                _ => ()
-            }
-
-            let flags = super::flags::FlagComputation::for_sty(&st);
-
-            let ty = match () {
-                () => type_arena.alloc(TyS { sty: st,
-                                             flags: Cell::new(flags.flags),
-                                             region_depth: flags.depth, }),
-            };
-
-            interner.insert(InternedTy { ty: ty }, ty);
-            ty
-        };
-
-        debug!("Interned type: {:?} Pointer: {:?}",
-            ty, ty as *const TyS);
-        ty
-    }
-
-    // Interns a type/name combination, stores the resulting box in cx.interner,
-    // and returns the box as cast to an unsafe ptr (see comments for Ty above).
-    pub fn mk_ty(&self, st: TypeVariants<'tcx>) -> Ty<'tcx> {
-        ctxt::intern_ty(&self.arenas.type_, &self.interner, st)
-    }
-
-    pub fn mk_mach_int(&self, tm: ast::IntTy) -> Ty<'tcx> {
-        match tm {
-            ast::IntTy::Is   => self.types.isize,
-            ast::IntTy::I8   => self.types.i8,
-            ast::IntTy::I16  => self.types.i16,
-            ast::IntTy::I32  => self.types.i32,
-            ast::IntTy::I64  => self.types.i64,
-        }
-    }
-
-    pub fn mk_mach_uint(&self, tm: ast::UintTy) -> Ty<'tcx> {
-        match tm {
-            ast::UintTy::Us   => self.types.usize,
-            ast::UintTy::U8   => self.types.u8,
-            ast::UintTy::U16  => self.types.u16,
-            ast::UintTy::U32  => self.types.u32,
-            ast::UintTy::U64  => self.types.u64,
-        }
-    }
-
-    pub fn mk_mach_float(&self, tm: ast::FloatTy) -> Ty<'tcx> {
-        match tm {
-            ast::FloatTy::F32  => self.types.f32,
-            ast::FloatTy::F64  => self.types.f64,
-        }
-    }
-
-    pub fn mk_str(&self) -> Ty<'tcx> {
-        self.mk_ty(TyStr)
-    }
-
-    pub fn mk_static_str(&self) -> Ty<'tcx> {
-        self.mk_imm_ref(self.mk_region(ty::ReStatic), self.mk_str())
-    }
-
-    pub fn mk_enum(&self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
-        // take a copy of substs so that we own the vectors inside
-        self.mk_ty(TyEnum(def, substs))
-    }
-
-    pub fn mk_box(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        self.mk_ty(TyBox(ty))
-    }
-
-    pub fn mk_ptr(&self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
-        self.mk_ty(TyRawPtr(tm))
-    }
-
-    pub fn mk_ref(&self, r: &'tcx Region, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
-        self.mk_ty(TyRef(r, tm))
-    }
-
-    pub fn mk_mut_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
-        self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutMutable})
-    }
-
-    pub fn mk_imm_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
-        self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutImmutable})
-    }
-
-    pub fn mk_mut_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutMutable})
-    }
-
-    pub fn mk_imm_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutImmutable})
-    }
-
-    pub fn mk_nil_ptr(&self) -> Ty<'tcx> {
-        self.mk_imm_ptr(self.mk_nil())
-    }
-
-    pub fn mk_array(&self, ty: Ty<'tcx>, n: usize) -> Ty<'tcx> {
-        self.mk_ty(TyArray(ty, n))
-    }
-
-    pub fn mk_slice(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        self.mk_ty(TySlice(ty))
-    }
-
-    pub fn mk_tup(&self, ts: Vec<Ty<'tcx>>) -> Ty<'tcx> {
-        self.mk_ty(TyTuple(ts))
-    }
-
-    pub fn mk_nil(&self) -> Ty<'tcx> {
-        self.mk_tup(Vec::new())
-    }
-
-    pub fn mk_bool(&self) -> Ty<'tcx> {
-        self.mk_ty(TyBool)
-    }
-
-    pub fn mk_fn(&self,
-                 opt_def_id: Option<DefId>,
-                 fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> {
-        self.mk_ty(TyBareFn(opt_def_id, fty))
-    }
-
-    pub fn mk_ctor_fn(&self,
-                      def_id: DefId,
-                      input_tys: &[Ty<'tcx>],
-                      output: Ty<'tcx>) -> Ty<'tcx> {
-        let input_args = input_tys.iter().cloned().collect();
-        self.mk_fn(Some(def_id), self.mk_bare_fn(BareFnTy {
-            unsafety: hir::Unsafety::Normal,
-            abi: Abi::Rust,
-            sig: ty::Binder(ty::FnSig {
-                inputs: input_args,
-                output: ty::FnConverging(output),
-                variadic: false
-            })
-        }))
-    }
-
-    pub fn mk_trait(&self,
-                    principal: ty::PolyTraitRef<'tcx>,
-                    bounds: ExistentialBounds<'tcx>)
-                    -> Ty<'tcx>
-    {
-        assert!(bound_list_is_sorted(&bounds.projection_bounds));
-
-        let inner = box TraitTy {
-            principal: principal,
-            bounds: bounds
-        };
-        self.mk_ty(TyTrait(inner))
-    }
-
-    pub fn mk_projection(&self,
-                         trait_ref: TraitRef<'tcx>,
-                         item_name: Name)
-                         -> Ty<'tcx> {
-        // take a copy of substs so that we own the vectors inside
-        let inner = ProjectionTy { trait_ref: trait_ref, item_name: item_name };
-        self.mk_ty(TyProjection(inner))
-    }
-
-    pub fn mk_struct(&self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
-        // take a copy of substs so that we own the vectors inside
-        self.mk_ty(TyStruct(def, substs))
-    }
-
-    pub fn mk_closure(&self,
-                      closure_id: DefId,
-                      substs: &'tcx Substs<'tcx>,
-                      tys: Vec<Ty<'tcx>>)
-                      -> Ty<'tcx> {
-        self.mk_closure_from_closure_substs(closure_id, Box::new(ClosureSubsts {
-            func_substs: substs,
-            upvar_tys: tys
-        }))
-    }
-
-    pub fn mk_closure_from_closure_substs(&self,
-                                          closure_id: DefId,
-                                          closure_substs: Box<ClosureSubsts<'tcx>>)
-                                          -> Ty<'tcx> {
-        self.mk_ty(TyClosure(closure_id, closure_substs))
-    }
-
-    pub fn mk_var(&self, v: TyVid) -> Ty<'tcx> {
-        self.mk_infer(TyVar(v))
-    }
-
-    pub fn mk_int_var(&self, v: IntVid) -> Ty<'tcx> {
-        self.mk_infer(IntVar(v))
-    }
-
-    pub fn mk_float_var(&self, v: FloatVid) -> Ty<'tcx> {
-        self.mk_infer(FloatVar(v))
-    }
-
-    pub fn mk_infer(&self, it: InferTy) -> Ty<'tcx> {
-        self.mk_ty(TyInfer(it))
-    }
-
-    pub fn mk_param(&self,
-                    space: subst::ParamSpace,
-                    index: u32,
-                    name: Name) -> Ty<'tcx> {
-        self.mk_ty(TyParam(ParamTy { space: space, idx: index, name: name }))
-    }
-
-    pub fn mk_self_type(&self) -> Ty<'tcx> {
-        self.mk_param(subst::SelfSpace, 0, special_idents::type_self.name)
-    }
-
-    pub fn mk_param_from_def(&self, def: &ty::TypeParameterDef) -> Ty<'tcx> {
-        self.mk_param(def.space, def.index, def.name)
-    }
-
-    pub fn trait_items(&self, trait_did: DefId) -> Rc<Vec<ty::ImplOrTraitItem<'tcx>>> {
-        self.trait_items_cache.memoize(trait_did, || {
-            let def_ids = self.trait_item_def_ids(trait_did);
-            Rc::new(def_ids.iter()
-                           .map(|d| self.impl_or_trait_item(d.def_id()))
-                           .collect())
-        })
-    }
-
-    /// Obtain the representation annotation for a struct definition.
-    pub fn lookup_repr_hints(&self, did: DefId) -> Rc<Vec<attr::ReprAttr>> {
-        self.repr_hint_cache.memoize(did, || {
-            Rc::new(if did.is_local() {
-                self.get_attrs(did).iter().flat_map(|meta| {
-                    attr::find_repr_attrs(self.sess.diagnostic(), meta).into_iter()
-                }).collect()
-            } else {
-                self.sess.cstore.repr_attrs(did)
-            })
-        })
-    }
-}
diff --git a/src/librustc/middle/ty/error.rs b/src/librustc/middle/ty/error.rs
deleted file mode 100644 (file)
index ab48fd7..0000000
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::def_id::DefId;
-use middle::subst;
-use middle::infer::type_variable;
-use middle::ty::{self, BoundRegion, Region, Ty};
-
-use std::fmt;
-use syntax::abi;
-use syntax::ast::{self, Name};
-use syntax::codemap::Span;
-use syntax::errors::DiagnosticBuilder;
-
-use rustc_front::hir;
-
-#[derive(Clone, Copy, Debug)]
-pub struct ExpectedFound<T> {
-    pub expected: T,
-    pub found: T
-}
-
-// Data structures used in type unification
-#[derive(Clone, Debug)]
-pub enum TypeError<'tcx> {
-    Mismatch,
-    UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
-    AbiMismatch(ExpectedFound<abi::Abi>),
-    Mutability,
-    BoxMutability,
-    PtrMutability,
-    RefMutability,
-    VecMutability,
-    TupleSize(ExpectedFound<usize>),
-    FixedArraySize(ExpectedFound<usize>),
-    TyParamSize(ExpectedFound<usize>),
-    ArgCount,
-    RegionsDoesNotOutlive(Region, Region),
-    RegionsNotSame(Region, Region),
-    RegionsNoOverlap(Region, Region),
-    RegionsInsufficientlyPolymorphic(BoundRegion, Region),
-    RegionsOverlyPolymorphic(BoundRegion, Region),
-    Sorts(ExpectedFound<Ty<'tcx>>),
-    IntegerAsChar,
-    IntMismatch(ExpectedFound<ty::IntVarValue>),
-    FloatMismatch(ExpectedFound<ast::FloatTy>),
-    Traits(ExpectedFound<DefId>),
-    BuiltinBoundsMismatch(ExpectedFound<ty::BuiltinBounds>),
-    VariadicMismatch(ExpectedFound<bool>),
-    CyclicTy,
-    ConvergenceMismatch(ExpectedFound<bool>),
-    ProjectionNameMismatched(ExpectedFound<Name>),
-    ProjectionBoundsLength(ExpectedFound<usize>),
-    TyParamDefaultMismatch(ExpectedFound<type_variable::Default<'tcx>>)
-}
-
-#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)]
-pub enum UnconstrainedNumeric {
-    UnconstrainedFloat,
-    UnconstrainedInt,
-    Neither,
-}
-
-/// Explains the source of a type err in a short, human readable way. This is meant to be placed
-/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
-/// afterwards to present additional details, particularly when it comes to lifetime-related
-/// errors.
-impl<'tcx> fmt::Display for TypeError<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        use self::TypeError::*;
-        fn report_maybe_different(f: &mut fmt::Formatter,
-                                  expected: String, found: String) -> fmt::Result {
-            // A naive approach to making sure that we're not reporting silly errors such as:
-            // (expected closure, found closure).
-            if expected == found {
-                write!(f, "expected {}, found a different {}", expected, found)
-            } else {
-                write!(f, "expected {}, found {}", expected, found)
-            }
-        }
-
-        match *self {
-            CyclicTy => write!(f, "cyclic type of infinite size"),
-            Mismatch => write!(f, "types differ"),
-            UnsafetyMismatch(values) => {
-                write!(f, "expected {} fn, found {} fn",
-                       values.expected,
-                       values.found)
-            }
-            AbiMismatch(values) => {
-                write!(f, "expected {} fn, found {} fn",
-                       values.expected,
-                       values.found)
-            }
-            Mutability => write!(f, "values differ in mutability"),
-            BoxMutability => {
-                write!(f, "boxed values differ in mutability")
-            }
-            VecMutability => write!(f, "vectors differ in mutability"),
-            PtrMutability => write!(f, "pointers differ in mutability"),
-            RefMutability => write!(f, "references differ in mutability"),
-            TyParamSize(values) => {
-                write!(f, "expected a type with {} type params, \
-                           found one with {} type params",
-                       values.expected,
-                       values.found)
-            }
-            FixedArraySize(values) => {
-                write!(f, "expected an array with a fixed size of {} elements, \
-                           found one with {} elements",
-                       values.expected,
-                       values.found)
-            }
-            TupleSize(values) => {
-                write!(f, "expected a tuple with {} elements, \
-                           found one with {} elements",
-                       values.expected,
-                       values.found)
-            }
-            ArgCount => {
-                write!(f, "incorrect number of function parameters")
-            }
-            RegionsDoesNotOutlive(..) => {
-                write!(f, "lifetime mismatch")
-            }
-            RegionsNotSame(..) => {
-                write!(f, "lifetimes are not the same")
-            }
-            RegionsNoOverlap(..) => {
-                write!(f, "lifetimes do not intersect")
-            }
-            RegionsInsufficientlyPolymorphic(br, _) => {
-                write!(f, "expected bound lifetime parameter {}, \
-                           found concrete lifetime", br)
-            }
-            RegionsOverlyPolymorphic(br, _) => {
-                write!(f, "expected concrete lifetime, \
-                           found bound lifetime parameter {}", br)
-            }
-            Sorts(values) => ty::tls::with(|tcx| {
-                report_maybe_different(f, values.expected.sort_string(tcx),
-                                       values.found.sort_string(tcx))
-            }),
-            Traits(values) => ty::tls::with(|tcx| {
-                report_maybe_different(f,
-                                       format!("trait `{}`",
-                                               tcx.item_path_str(values.expected)),
-                                       format!("trait `{}`",
-                                               tcx.item_path_str(values.found)))
-            }),
-            BuiltinBoundsMismatch(values) => {
-                if values.expected.is_empty() {
-                    write!(f, "expected no bounds, found `{}`",
-                           values.found)
-                } else if values.found.is_empty() {
-                    write!(f, "expected bounds `{}`, found no bounds",
-                           values.expected)
-                } else {
-                    write!(f, "expected bounds `{}`, found bounds `{}`",
-                           values.expected,
-                           values.found)
-                }
-            }
-            IntegerAsChar => {
-                write!(f, "expected an integral type, found `char`")
-            }
-            IntMismatch(ref values) => {
-                write!(f, "expected `{:?}`, found `{:?}`",
-                       values.expected,
-                       values.found)
-            }
-            FloatMismatch(ref values) => {
-                write!(f, "expected `{:?}`, found `{:?}`",
-                       values.expected,
-                       values.found)
-            }
-            VariadicMismatch(ref values) => {
-                write!(f, "expected {} fn, found {} function",
-                       if values.expected { "variadic" } else { "non-variadic" },
-                       if values.found { "variadic" } else { "non-variadic" })
-            }
-            ConvergenceMismatch(ref values) => {
-                write!(f, "expected {} fn, found {} function",
-                       if values.expected { "converging" } else { "diverging" },
-                       if values.found { "converging" } else { "diverging" })
-            }
-            ProjectionNameMismatched(ref values) => {
-                write!(f, "expected {}, found {}",
-                       values.expected,
-                       values.found)
-            }
-            ProjectionBoundsLength(ref values) => {
-                write!(f, "expected {} associated type bindings, found {}",
-                       values.expected,
-                       values.found)
-            },
-            TyParamDefaultMismatch(ref values) => {
-                write!(f, "conflicting type parameter defaults `{}` and `{}`",
-                       values.expected.ty,
-                       values.found.ty)
-            }
-        }
-    }
-}
-
-impl<'tcx> ty::TyS<'tcx> {
-    fn sort_string(&self, cx: &ty::ctxt) -> String {
-        match self.sty {
-            ty::TyBool | ty::TyChar | ty::TyInt(_) |
-            ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr => self.to_string(),
-            ty::TyTuple(ref tys) if tys.is_empty() => self.to_string(),
-
-            ty::TyEnum(def, _) => format!("enum `{}`", cx.item_path_str(def.did)),
-            ty::TyBox(_) => "box".to_string(),
-            ty::TyArray(_, n) => format!("array of {} elements", n),
-            ty::TySlice(_) => "slice".to_string(),
-            ty::TyRawPtr(_) => "*-ptr".to_string(),
-            ty::TyRef(_, _) => "&-ptr".to_string(),
-            ty::TyBareFn(Some(_), _) => format!("fn item"),
-            ty::TyBareFn(None, _) => "fn pointer".to_string(),
-            ty::TyTrait(ref inner) => {
-                format!("trait {}", cx.item_path_str(inner.principal_def_id()))
-            }
-            ty::TyStruct(def, _) => {
-                format!("struct `{}`", cx.item_path_str(def.did))
-            }
-            ty::TyClosure(..) => "closure".to_string(),
-            ty::TyTuple(_) => "tuple".to_string(),
-            ty::TyInfer(ty::TyVar(_)) => "inferred type".to_string(),
-            ty::TyInfer(ty::IntVar(_)) => "integral variable".to_string(),
-            ty::TyInfer(ty::FloatVar(_)) => "floating-point variable".to_string(),
-            ty::TyInfer(ty::FreshTy(_)) => "skolemized type".to_string(),
-            ty::TyInfer(ty::FreshIntTy(_)) => "skolemized integral type".to_string(),
-            ty::TyInfer(ty::FreshFloatTy(_)) => "skolemized floating-point type".to_string(),
-            ty::TyProjection(_) => "associated type".to_string(),
-            ty::TyParam(ref p) => {
-                if p.space == subst::SelfSpace {
-                    "Self".to_string()
-                } else {
-                    "type parameter".to_string()
-                }
-            }
-            ty::TyError => "type error".to_string(),
-        }
-    }
-}
-
-impl<'tcx> ty::ctxt<'tcx> {
-    pub fn note_and_explain_type_err(&self,
-                                     db: &mut DiagnosticBuilder,
-                                     err: &TypeError<'tcx>,
-                                     sp: Span) {
-        use self::TypeError::*;
-
-        match err.clone() {
-            RegionsDoesNotOutlive(subregion, superregion) => {
-                self.note_and_explain_region(db, "", subregion, "...");
-                self.note_and_explain_region(db, "...does not necessarily outlive ",
-                                           superregion, "");
-            }
-            RegionsNotSame(region1, region2) => {
-                self.note_and_explain_region(db, "", region1, "...");
-                self.note_and_explain_region(db, "...is not the same lifetime as ",
-                                           region2, "");
-            }
-            RegionsNoOverlap(region1, region2) => {
-                self.note_and_explain_region(db, "", region1, "...");
-                self.note_and_explain_region(db, "...does not overlap ",
-                                           region2, "");
-            }
-            RegionsInsufficientlyPolymorphic(_, conc_region) => {
-                self.note_and_explain_region(db, "concrete lifetime that was found is ",
-                                           conc_region, "");
-            }
-            RegionsOverlyPolymorphic(_, ty::ReVar(_)) => {
-                // don't bother to print out the message below for
-                // inference variables, it's not very illuminating.
-            }
-            RegionsOverlyPolymorphic(_, conc_region) => {
-                self.note_and_explain_region(db, "expected concrete lifetime is ",
-                                           conc_region, "");
-            }
-            Sorts(values) => {
-                let expected_str = values.expected.sort_string(self);
-                let found_str = values.found.sort_string(self);
-                if expected_str == found_str && expected_str == "closure" {
-                    db.span_note(sp,
-                        "no two closures, even if identical, have the same type");
-                    db.span_help(sp,
-                        "consider boxing your closure and/or using it as a trait object");
-                }
-            },
-            TyParamDefaultMismatch(values) => {
-                let expected = values.expected;
-                let found = values.found;
-                db.span_note(sp, &format!("conflicting type parameter defaults `{}` and `{}`",
-                                          expected.ty,
-                                          found.ty));
-
-                match
-                    self.map.as_local_node_id(expected.def_id)
-                            .and_then(|node_id| self.map.opt_span(node_id))
-                {
-                    Some(span) => {
-                        db.span_note(span, "a default was defined here...");
-                    }
-                    None => {
-                        db.note(&format!("a default is defined on `{}`",
-                                         self.item_path_str(expected.def_id)));
-                    }
-                }
-
-                db.span_note(
-                    expected.origin_span,
-                    "...that was applied to an unconstrained type variable here");
-
-                match
-                    self.map.as_local_node_id(found.def_id)
-                            .and_then(|node_id| self.map.opt_span(node_id))
-                {
-                    Some(span) => {
-                        db.span_note(span, "a second default was defined here...");
-                    }
-                    None => {
-                        db.note(&format!("a second default is defined on `{}`",
-                                         self.item_path_str(found.def_id)));
-                    }
-                }
-
-                db.span_note(found.origin_span,
-                             "...that also applies to the same type variable here");
-            }
-            _ => {}
-        }
-    }
-}
diff --git a/src/librustc/middle/ty/fast_reject.rs b/src/librustc/middle/ty/fast_reject.rs
deleted file mode 100644 (file)
index a06e8a7..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::def_id::DefId;
-use middle::ty::{self, Ty};
-use syntax::ast;
-
-use self::SimplifiedType::*;
-
-/// See `simplify_type
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-pub enum SimplifiedType {
-    BoolSimplifiedType,
-    CharSimplifiedType,
-    IntSimplifiedType(ast::IntTy),
-    UintSimplifiedType(ast::UintTy),
-    FloatSimplifiedType(ast::FloatTy),
-    EnumSimplifiedType(DefId),
-    StrSimplifiedType,
-    VecSimplifiedType,
-    PtrSimplifiedType,
-    TupleSimplifiedType(usize),
-    TraitSimplifiedType(DefId),
-    StructSimplifiedType(DefId),
-    ClosureSimplifiedType(DefId),
-    FunctionSimplifiedType(usize),
-    ParameterSimplifiedType,
-}
-
-/// Tries to simplify a type by dropping type parameters, deref'ing away any reference types, etc.
-/// The idea is to get something simple that we can use to quickly decide if two types could unify
-/// during method lookup.
-///
-/// If `can_simplify_params` is false, then we will fail to simplify type parameters entirely. This
-/// is useful when those type parameters would be instantiated with fresh type variables, since
-/// then we can't say much about whether two types would unify. Put another way,
-/// `can_simplify_params` should be true if type parameters appear free in `ty` and `false` if they
-/// are to be considered bound.
-pub fn simplify_type(tcx: &ty::ctxt,
-                     ty: Ty,
-                     can_simplify_params: bool)
-                     -> Option<SimplifiedType>
-{
-    match ty.sty {
-        ty::TyBool => Some(BoolSimplifiedType),
-        ty::TyChar => Some(CharSimplifiedType),
-        ty::TyInt(int_type) => Some(IntSimplifiedType(int_type)),
-        ty::TyUint(uint_type) => Some(UintSimplifiedType(uint_type)),
-        ty::TyFloat(float_type) => Some(FloatSimplifiedType(float_type)),
-        ty::TyEnum(def, _) => Some(EnumSimplifiedType(def.did)),
-        ty::TyStr => Some(StrSimplifiedType),
-        ty::TyArray(..) | ty::TySlice(_) => Some(VecSimplifiedType),
-        ty::TyRawPtr(_) => Some(PtrSimplifiedType),
-        ty::TyTrait(ref trait_info) => {
-            Some(TraitSimplifiedType(trait_info.principal_def_id()))
-        }
-        ty::TyStruct(def, _) => {
-            Some(StructSimplifiedType(def.did))
-        }
-        ty::TyRef(_, mt) => {
-            // since we introduce auto-refs during method lookup, we
-            // just treat &T and T as equivalent from the point of
-            // view of possibly unifying
-            simplify_type(tcx, mt.ty, can_simplify_params)
-        }
-        ty::TyBox(_) => {
-            // treat like we would treat `Box`
-            match tcx.lang_items.require_owned_box() {
-                Ok(def_id) => Some(StructSimplifiedType(def_id)),
-                Err(msg) => tcx.sess.fatal(&msg),
-            }
-        }
-        ty::TyClosure(def_id, _) => {
-            Some(ClosureSimplifiedType(def_id))
-        }
-        ty::TyTuple(ref tys) => {
-            Some(TupleSimplifiedType(tys.len()))
-        }
-        ty::TyBareFn(_, ref f) => {
-            Some(FunctionSimplifiedType(f.sig.0.inputs.len()))
-        }
-        ty::TyProjection(_) | ty::TyParam(_) => {
-            if can_simplify_params {
-                // In normalized types, projections don't unify with
-                // anything. when lazy normalization happens, this
-                // will change. It would still be nice to have a way
-                // to deal with known-not-to-unify-with-anything
-                // projections (e.g. the likes of <__S as Encoder>::Error).
-                Some(ParameterSimplifiedType)
-            } else {
-                None
-            }
-        }
-        ty::TyInfer(_) | ty::TyError => None,
-    }
-}
diff --git a/src/librustc/middle/ty/flags.rs b/src/librustc/middle/ty/flags.rs
deleted file mode 100644 (file)
index a0b03fe..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::subst;
-use middle::ty::{self, Ty, TypeFlags, TypeFoldable};
-
-pub struct FlagComputation {
-    pub flags: TypeFlags,
-
-    // maximum depth of any bound region that we have seen thus far
-    pub depth: u32,
-}
-
-impl FlagComputation {
-    fn new() -> FlagComputation {
-        FlagComputation { flags: TypeFlags::empty(), depth: 0 }
-    }
-
-    pub fn for_sty(st: &ty::TypeVariants) -> FlagComputation {
-        let mut result = FlagComputation::new();
-        result.add_sty(st);
-        result
-    }
-
-    fn add_flags(&mut self, flags: TypeFlags) {
-        self.flags = self.flags | (flags & TypeFlags::NOMINAL_FLAGS);
-    }
-
-    fn add_depth(&mut self, depth: u32) {
-        if depth > self.depth {
-            self.depth = depth;
-        }
-    }
-
-    /// Adds the flags/depth from a set of types that appear within the current type, but within a
-    /// region binder.
-    fn add_bound_computation(&mut self, computation: &FlagComputation) {
-        self.add_flags(computation.flags);
-
-        // The types that contributed to `computation` occurred within
-        // a region binder, so subtract one from the region depth
-        // within when adding the depth to `self`.
-        let depth = computation.depth;
-        if depth > 0 {
-            self.add_depth(depth - 1);
-        }
-    }
-
-    fn add_sty(&mut self, st: &ty::TypeVariants) {
-        match st {
-            &ty::TyBool |
-            &ty::TyChar |
-            &ty::TyInt(_) |
-            &ty::TyFloat(_) |
-            &ty::TyUint(_) |
-            &ty::TyStr => {
-            }
-
-            // You might think that we could just return TyError for
-            // any type containing TyError as a component, and get
-            // rid of the TypeFlags::HAS_TY_ERR flag -- likewise for ty_bot (with
-            // the exception of function types that return bot).
-            // But doing so caused sporadic memory corruption, and
-            // neither I (tjc) nor nmatsakis could figure out why,
-            // so we're doing it this way.
-            &ty::TyError => {
-                self.add_flags(TypeFlags::HAS_TY_ERR)
-            }
-
-            &ty::TyParam(ref p) => {
-                self.add_flags(TypeFlags::HAS_LOCAL_NAMES);
-                if p.space == subst::SelfSpace {
-                    self.add_flags(TypeFlags::HAS_SELF);
-                } else {
-                    self.add_flags(TypeFlags::HAS_PARAMS);
-                }
-            }
-
-            &ty::TyClosure(_, ref substs) => {
-                self.add_flags(TypeFlags::HAS_TY_CLOSURE);
-                self.add_flags(TypeFlags::HAS_LOCAL_NAMES);
-                self.add_substs(&substs.func_substs);
-                self.add_tys(&substs.upvar_tys);
-            }
-
-            &ty::TyInfer(_) => {
-                self.add_flags(TypeFlags::HAS_LOCAL_NAMES); // it might, right?
-                self.add_flags(TypeFlags::HAS_TY_INFER)
-            }
-
-            &ty::TyEnum(_, substs) | &ty::TyStruct(_, substs) => {
-                self.add_substs(substs);
-            }
-
-            &ty::TyProjection(ref data) => {
-                self.add_flags(TypeFlags::HAS_PROJECTION);
-                self.add_projection_ty(data);
-            }
-
-            &ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => {
-                let mut computation = FlagComputation::new();
-                computation.add_substs(principal.0.substs);
-                for projection_bound in &bounds.projection_bounds {
-                    let mut proj_computation = FlagComputation::new();
-                    proj_computation.add_projection_predicate(&projection_bound.0);
-                    self.add_bound_computation(&proj_computation);
-                }
-                self.add_bound_computation(&computation);
-
-                self.add_bounds(bounds);
-            }
-
-            &ty::TyBox(tt) | &ty::TyArray(tt, _) | &ty::TySlice(tt) => {
-                self.add_ty(tt)
-            }
-
-            &ty::TyRawPtr(ref m) => {
-                self.add_ty(m.ty);
-            }
-
-            &ty::TyRef(r, ref m) => {
-                self.add_region(*r);
-                self.add_ty(m.ty);
-            }
-
-            &ty::TyTuple(ref ts) => {
-                self.add_tys(&ts[..]);
-            }
-
-            &ty::TyBareFn(_, ref f) => {
-                self.add_fn_sig(&f.sig);
-            }
-        }
-    }
-
-    fn add_ty(&mut self, ty: Ty) {
-        self.add_flags(ty.flags.get());
-        self.add_depth(ty.region_depth);
-    }
-
-    fn add_tys(&mut self, tys: &[Ty]) {
-        for &ty in tys {
-            self.add_ty(ty);
-        }
-    }
-
-    fn add_fn_sig(&mut self, fn_sig: &ty::PolyFnSig) {
-        let mut computation = FlagComputation::new();
-
-        computation.add_tys(&fn_sig.0.inputs);
-
-        if let ty::FnConverging(output) = fn_sig.0.output {
-            computation.add_ty(output);
-        }
-
-        self.add_bound_computation(&computation);
-    }
-
-    fn add_region(&mut self, r: ty::Region) {
-        match r {
-            ty::ReVar(..) |
-            ty::ReSkolemized(..) => { self.add_flags(TypeFlags::HAS_RE_INFER); }
-            ty::ReLateBound(debruijn, _) => { self.add_depth(debruijn.depth); }
-            ty::ReEarlyBound(..) => { self.add_flags(TypeFlags::HAS_RE_EARLY_BOUND); }
-            ty::ReStatic => {}
-            _ => { self.add_flags(TypeFlags::HAS_FREE_REGIONS); }
-        }
-
-        if !r.is_global() {
-            self.add_flags(TypeFlags::HAS_LOCAL_NAMES);
-        }
-    }
-
-    fn add_projection_predicate(&mut self, projection_predicate: &ty::ProjectionPredicate) {
-        self.add_projection_ty(&projection_predicate.projection_ty);
-        self.add_ty(projection_predicate.ty);
-    }
-
-    fn add_projection_ty(&mut self, projection_ty: &ty::ProjectionTy) {
-        self.add_substs(projection_ty.trait_ref.substs);
-    }
-
-    fn add_substs(&mut self, substs: &subst::Substs) {
-        self.add_tys(substs.types.as_slice());
-        match substs.regions {
-            subst::ErasedRegions => {}
-            subst::NonerasedRegions(ref regions) => {
-                for &r in regions {
-                    self.add_region(r);
-                }
-            }
-        }
-    }
-
-    fn add_bounds(&mut self, bounds: &ty::ExistentialBounds) {
-        self.add_region(bounds.region_bound);
-    }
-}
diff --git a/src/librustc/middle/ty/fold.rs b/src/librustc/middle/ty/fold.rs
deleted file mode 100644 (file)
index da0245a..0000000
+++ /dev/null
@@ -1,638 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Generalized type folding mechanism. The setup is a bit convoluted
-//! but allows for convenient usage. Let T be an instance of some
-//! "foldable type" (one which implements `TypeFoldable`) and F be an
-//! instance of a "folder" (a type which implements `TypeFolder`). Then
-//! the setup is intended to be:
-//!
-//!   T.fold_with(F) --calls--> F.fold_T(T) --calls--> T.super_fold_with(F)
-//!
-//! This way, when you define a new folder F, you can override
-//! `fold_T()` to customize the behavior, and invoke `T.super_fold_with()`
-//! to get the original behavior. Meanwhile, to actually fold
-//! something, you can just write `T.fold_with(F)`, which is
-//! convenient. (Note that `fold_with` will also transparently handle
-//! things like a `Vec<T>` where T is foldable and so on.)
-//!
-//! In this ideal setup, the only function that actually *does*
-//! anything is `T.super_fold_with()`, which traverses the type `T`.
-//! Moreover, `T.super_fold_with()` should only ever call `T.fold_with()`.
-//!
-//! In some cases, we follow a degenerate pattern where we do not have
-//! a `fold_T` method. Instead, `T.fold_with` traverses the structure directly.
-//! This is suboptimal because the behavior cannot be overridden, but it's
-//! much less work to implement. If you ever *do* need an override that
-//! doesn't exist, it's not hard to convert the degenerate pattern into the
-//! proper thing.
-//!
-//! A `TypeFoldable` T can also be visited by a `TypeVisitor` V using similar setup:
-//!   T.visit_with(V) --calls--> V.visit_T(T) --calls--> T.super_visit_with(V).
-//! These methods return true to indicate that the visitor has found what it is looking for
-//! and does not need to visit anything else.
-
-use middle::region;
-use middle::subst;
-use middle::ty::adjustment;
-use middle::ty::{self, Binder, Ty, TypeFlags};
-
-use std::fmt;
-use util::nodemap::{FnvHashMap, FnvHashSet};
-
-/// The TypeFoldable trait is implemented for every type that can be folded.
-/// Basically, every type that has a corresponding method in TypeFolder.
-pub trait TypeFoldable<'tcx>: fmt::Debug + Clone {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self;
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        self.super_fold_with(folder)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool;
-    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.super_visit_with(visitor)
-    }
-
-    fn has_regions_escaping_depth(&self, depth: u32) -> bool {
-        self.visit_with(&mut HasEscapingRegionsVisitor { depth: depth })
-    }
-    fn has_escaping_regions(&self) -> bool {
-        self.has_regions_escaping_depth(0)
-    }
-
-    fn has_type_flags(&self, flags: TypeFlags) -> bool {
-        self.visit_with(&mut HasTypeFlagsVisitor { flags: flags })
-    }
-    fn has_projection_types(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_PROJECTION)
-    }
-    fn references_error(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_TY_ERR)
-    }
-    fn has_param_types(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_PARAMS)
-    }
-    fn has_self_ty(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_SELF)
-    }
-    fn has_infer_types(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_TY_INFER)
-    }
-    fn needs_infer(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER)
-    }
-    fn needs_subst(&self) -> bool {
-        self.has_type_flags(TypeFlags::NEEDS_SUBST)
-    }
-    fn has_closure_types(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_TY_CLOSURE)
-    }
-    fn has_erasable_regions(&self) -> bool {
-        self.has_type_flags(TypeFlags::HAS_RE_EARLY_BOUND |
-                            TypeFlags::HAS_RE_INFER |
-                            TypeFlags::HAS_FREE_REGIONS)
-    }
-    /// Indicates whether this value references only 'global'
-    /// types/lifetimes that are the same regardless of what fn we are
-    /// in. This is used for caching. Errs on the side of returning
-    /// false.
-    fn is_global(&self) -> bool {
-        !self.has_type_flags(TypeFlags::HAS_LOCAL_NAMES)
-    }
-}
-
-/// The TypeFolder trait defines the actual *folding*. There is a
-/// method defined for every foldable type. Each of these has a
-/// default implementation that does an "identity" fold. Within each
-/// identity fold, it should invoke `foo.fold_with(self)` to fold each
-/// sub-item.
-pub trait TypeFolder<'tcx> : Sized {
-    fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
-
-    /// Invoked by the `super_*` routines when we enter a region
-    /// binding level (for example, when entering a function
-    /// signature). This is used by clients that want to track the
-    /// Debruijn index nesting level.
-    fn enter_region_binder(&mut self) { }
-
-    /// Invoked by the `super_*` routines when we exit a region
-    /// binding level. This is used by clients that want to
-    /// track the Debruijn index nesting level.
-    fn exit_region_binder(&mut self) { }
-
-    fn fold_binder<T>(&mut self, t: &Binder<T>) -> Binder<T>
-        where T : TypeFoldable<'tcx>
-    {
-        // FIXME(#20526) this should replace `enter_region_binder`/`exit_region_binder`.
-        t.super_fold_with(self)
-    }
-
-    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-        t.super_fold_with(self)
-    }
-
-    fn fold_mt(&mut self, t: &ty::TypeAndMut<'tcx>) -> ty::TypeAndMut<'tcx> {
-        t.super_fold_with(self)
-    }
-
-    fn fold_trait_ref(&mut self, t: &ty::TraitRef<'tcx>) -> ty::TraitRef<'tcx> {
-        t.super_fold_with(self)
-    }
-
-    fn fold_substs(&mut self,
-                   substs: &subst::Substs<'tcx>)
-                   -> subst::Substs<'tcx> {
-        substs.super_fold_with(self)
-    }
-
-    fn fold_fn_sig(&mut self,
-                   sig: &ty::FnSig<'tcx>)
-                   -> ty::FnSig<'tcx> {
-        sig.super_fold_with(self)
-    }
-
-    fn fold_output(&mut self,
-                      output: &ty::FnOutput<'tcx>)
-                      -> ty::FnOutput<'tcx> {
-        output.super_fold_with(self)
-    }
-
-    fn fold_bare_fn_ty(&mut self,
-                       fty: &ty::BareFnTy<'tcx>)
-                       -> ty::BareFnTy<'tcx>
-    {
-        fty.super_fold_with(self)
-    }
-
-    fn fold_closure_ty(&mut self,
-                       fty: &ty::ClosureTy<'tcx>)
-                       -> ty::ClosureTy<'tcx> {
-        fty.super_fold_with(self)
-    }
-
-    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
-        r.super_fold_with(self)
-    }
-
-    fn fold_existential_bounds(&mut self, s: &ty::ExistentialBounds<'tcx>)
-                               -> ty::ExistentialBounds<'tcx> {
-        s.super_fold_with(self)
-    }
-
-    fn fold_autoref(&mut self, ar: &adjustment::AutoRef<'tcx>)
-                    -> adjustment::AutoRef<'tcx> {
-        ar.super_fold_with(self)
-    }
-}
-
-pub trait TypeVisitor<'tcx> : Sized {
-    fn enter_region_binder(&mut self) { }
-    fn exit_region_binder(&mut self) { }
-
-    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
-        t.super_visit_with(self)
-    }
-
-    fn visit_region(&mut self, r: ty::Region) -> bool {
-        r.super_visit_with(self)
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Some sample folders
-
-pub struct BottomUpFolder<'a, 'tcx: 'a, F> where F: FnMut(Ty<'tcx>) -> Ty<'tcx> {
-    pub tcx: &'a ty::ctxt<'tcx>,
-    pub fldop: F,
-}
-
-impl<'a, 'tcx, F> TypeFolder<'tcx> for BottomUpFolder<'a, 'tcx, F> where
-    F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
-{
-    fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx }
-
-    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        let t1 = ty.super_fold_with(self);
-        (self.fldop)(t1)
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Region folder
-
-impl<'tcx> ty::ctxt<'tcx> {
-    /// Collects the free and escaping regions in `value` into `region_set`. Returns
-    /// whether any late-bound regions were skipped
-    pub fn collect_regions<T>(&self,
-        value: &T,
-        region_set: &mut FnvHashSet<ty::Region>)
-        -> bool
-        where T : TypeFoldable<'tcx>
-    {
-        let mut have_bound_regions = false;
-        self.fold_regions(value, &mut have_bound_regions,
-                          |r, d| { region_set.insert(r.from_depth(d)); r });
-        have_bound_regions
-    }
-
-    /// Folds the escaping and free regions in `value` using `f`, and
-    /// sets `skipped_regions` to true if any late-bound region was found
-    /// and skipped.
-    pub fn fold_regions<T,F>(&self,
-        value: &T,
-        skipped_regions: &mut bool,
-        mut f: F)
-        -> T
-        where F : FnMut(ty::Region, u32) -> ty::Region,
-              T : TypeFoldable<'tcx>,
-    {
-        value.fold_with(&mut RegionFolder::new(self, skipped_regions, &mut f))
-    }
-}
-
-/// Folds over the substructure of a type, visiting its component
-/// types and all regions that occur *free* within it.
-///
-/// That is, `Ty` can contain function or method types that bind
-/// regions at the call site (`ReLateBound`), and occurrences of
-/// regions (aka "lifetimes") that are bound within a type are not
-/// visited by this folder; only regions that occur free will be
-/// visited by `fld_r`.
-
-pub struct RegionFolder<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    skipped_regions: &'a mut bool,
-    current_depth: u32,
-    fld_r: &'a mut (FnMut(ty::Region, u32) -> ty::Region + 'a),
-}
-
-impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
-    pub fn new<F>(tcx: &'a ty::ctxt<'tcx>,
-                  skipped_regions: &'a mut bool,
-                  fld_r: &'a mut F) -> RegionFolder<'a, 'tcx>
-        where F : FnMut(ty::Region, u32) -> ty::Region
-    {
-        RegionFolder {
-            tcx: tcx,
-            skipped_regions: skipped_regions,
-            current_depth: 1,
-            fld_r: fld_r,
-        }
-    }
-}
-
-impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx>
-{
-    fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx }
-
-    fn enter_region_binder(&mut self) {
-        self.current_depth += 1;
-    }
-
-    fn exit_region_binder(&mut self) {
-        self.current_depth -= 1;
-    }
-
-    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
-        match r {
-            ty::ReLateBound(debruijn, _) if debruijn.depth < self.current_depth => {
-                debug!("RegionFolder.fold_region({:?}) skipped bound region (current depth={})",
-                       r, self.current_depth);
-                *self.skipped_regions = true;
-                r
-            }
-            _ => {
-                debug!("RegionFolder.fold_region({:?}) folding free region (current_depth={})",
-                       r, self.current_depth);
-                (self.fld_r)(r, self.current_depth)
-            }
-        }
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Late-bound region replacer
-
-// Replaces the escaping regions in a type.
-
-struct RegionReplacer<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    current_depth: u32,
-    fld_r: &'a mut (FnMut(ty::BoundRegion) -> ty::Region + 'a),
-    map: FnvHashMap<ty::BoundRegion, ty::Region>
-}
-
-impl<'tcx> ty::ctxt<'tcx> {
-    pub fn replace_late_bound_regions<T,F>(&self,
-        value: &Binder<T>,
-        mut f: F)
-        -> (T, FnvHashMap<ty::BoundRegion, ty::Region>)
-        where F : FnMut(ty::BoundRegion) -> ty::Region,
-              T : TypeFoldable<'tcx>,
-    {
-        debug!("replace_late_bound_regions({:?})", value);
-        let mut replacer = RegionReplacer::new(self, &mut f);
-        let result = value.skip_binder().fold_with(&mut replacer);
-        (result, replacer.map)
-    }
-
-
-    /// Replace any late-bound regions bound in `value` with free variants attached to scope-id
-    /// `scope_id`.
-    pub fn liberate_late_bound_regions<T>(&self,
-        all_outlive_scope: region::CodeExtent,
-        value: &Binder<T>)
-        -> T
-        where T : TypeFoldable<'tcx>
-    {
-        self.replace_late_bound_regions(value, |br| {
-            ty::ReFree(ty::FreeRegion{scope: all_outlive_scope, bound_region: br})
-        }).0
-    }
-
-    /// Flattens two binding levels into one. So `for<'a> for<'b> Foo`
-    /// becomes `for<'a,'b> Foo`.
-    pub fn flatten_late_bound_regions<T>(&self, bound2_value: &Binder<Binder<T>>)
-                                         -> Binder<T>
-        where T: TypeFoldable<'tcx>
-    {
-        let bound0_value = bound2_value.skip_binder().skip_binder();
-        let value = self.fold_regions(bound0_value, &mut false,
-                                      |region, current_depth| {
-            match region {
-                ty::ReLateBound(debruijn, br) if debruijn.depth >= current_depth => {
-                    // should be true if no escaping regions from bound2_value
-                    assert!(debruijn.depth - current_depth <= 1);
-                    ty::ReLateBound(ty::DebruijnIndex::new(current_depth), br)
-                }
-                _ => {
-                    region
-                }
-            }
-        });
-        Binder(value)
-    }
-
-    pub fn no_late_bound_regions<T>(&self, value: &Binder<T>) -> Option<T>
-        where T : TypeFoldable<'tcx>
-    {
-        if value.0.has_escaping_regions() {
-            None
-        } else {
-            Some(value.0.clone())
-        }
-    }
-
-    /// Replace any late-bound regions bound in `value` with `'static`. Useful in trans but also
-    /// method lookup and a few other places where precise region relationships are not required.
-    pub fn erase_late_bound_regions<T>(&self, value: &Binder<T>) -> T
-        where T : TypeFoldable<'tcx>
-    {
-        self.replace_late_bound_regions(value, |_| ty::ReStatic).0
-    }
-
-    /// Rewrite any late-bound regions so that they are anonymous.  Region numbers are
-    /// assigned starting at 1 and increasing monotonically in the order traversed
-    /// by the fold operation.
-    ///
-    /// The chief purpose of this function is to canonicalize regions so that two
-    /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become
-    /// structurally identical.  For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and
-    /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization.
-    pub fn anonymize_late_bound_regions<T>(&self, sig: &Binder<T>) -> Binder<T>
-        where T : TypeFoldable<'tcx>,
-    {
-        let mut counter = 0;
-        Binder(self.replace_late_bound_regions(sig, |_| {
-            counter += 1;
-            ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(counter))
-        }).0)
-    }
-}
-
-impl<'a, 'tcx> RegionReplacer<'a, 'tcx> {
-    fn new<F>(tcx: &'a ty::ctxt<'tcx>, fld_r: &'a mut F) -> RegionReplacer<'a, 'tcx>
-        where F : FnMut(ty::BoundRegion) -> ty::Region
-    {
-        RegionReplacer {
-            tcx: tcx,
-            current_depth: 1,
-            fld_r: fld_r,
-            map: FnvHashMap()
-        }
-    }
-}
-
-impl<'a, 'tcx> TypeFolder<'tcx> for RegionReplacer<'a, 'tcx>
-{
-    fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx }
-
-    fn enter_region_binder(&mut self) {
-        self.current_depth += 1;
-    }
-
-    fn exit_region_binder(&mut self) {
-        self.current_depth -= 1;
-    }
-
-    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
-        if !t.has_regions_escaping_depth(self.current_depth-1) {
-            return t;
-        }
-
-        t.super_fold_with(self)
-    }
-
-    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
-        match r {
-            ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => {
-                debug!("RegionReplacer.fold_region({:?}) folding region (current_depth={})",
-                       r, self.current_depth);
-                let fld_r = &mut self.fld_r;
-                let region = *self.map.entry(br).or_insert_with(|| fld_r(br));
-                if let ty::ReLateBound(debruijn1, br) = region {
-                    // If the callback returns a late-bound region,
-                    // that region should always use depth 1. Then we
-                    // adjust it to the correct depth.
-                    assert_eq!(debruijn1.depth, 1);
-                    ty::ReLateBound(debruijn, br)
-                } else {
-                    region
-                }
-            }
-            r => r
-        }
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Region eraser
-
-impl<'tcx> ty::ctxt<'tcx> {
-    /// Returns an equivalent value with all free regions removed (note
-    /// that late-bound regions remain, because they are important for
-    /// subtyping, but they are anonymized and normalized as well)..
-    pub fn erase_regions<T>(&self, value: &T) -> T
-        where T : TypeFoldable<'tcx>
-    {
-        let value1 = value.fold_with(&mut RegionEraser(self));
-        debug!("erase_regions({:?}) = {:?}",
-               value, value1);
-        return value1;
-
-        struct RegionEraser<'a, 'tcx: 'a>(&'a ty::ctxt<'tcx>);
-
-        impl<'a, 'tcx> TypeFolder<'tcx> for RegionEraser<'a, 'tcx> {
-            fn tcx(&self) -> &ty::ctxt<'tcx> { self.0 }
-
-            fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
-                match self.tcx().normalized_cache.borrow().get(&ty).cloned() {
-                    None => {}
-                    Some(u) => return u
-                }
-
-                let t_norm = ty.super_fold_with(self);
-                self.tcx().normalized_cache.borrow_mut().insert(ty, t_norm);
-                return t_norm;
-            }
-
-            fn fold_binder<T>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T>
-                where T : TypeFoldable<'tcx>
-            {
-                let u = self.tcx().anonymize_late_bound_regions(t);
-                u.super_fold_with(self)
-            }
-
-            fn fold_region(&mut self, r: ty::Region) -> ty::Region {
-                // because late-bound regions affect subtyping, we can't
-                // erase the bound/free distinction, but we can replace
-                // all free regions with 'static.
-                //
-                // Note that we *CAN* replace early-bound regions -- the
-                // type system never "sees" those, they get substituted
-                // away. In trans, they will always be erased to 'static
-                // whenever a substitution occurs.
-                match r {
-                    ty::ReLateBound(..) => r,
-                    _ => ty::ReStatic
-                }
-            }
-
-            fn fold_substs(&mut self,
-                           substs: &subst::Substs<'tcx>)
-                           -> subst::Substs<'tcx> {
-                subst::Substs { regions: subst::ErasedRegions,
-                                types: substs.types.fold_with(self) }
-            }
-        }
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Region shifter
-//
-// Shifts the De Bruijn indices on all escaping bound regions by a
-// fixed amount. Useful in substitution or when otherwise introducing
-// a binding level that is not intended to capture the existing bound
-// regions. See comment on `shift_regions_through_binders` method in
-// `subst.rs` for more details.
-
-pub fn shift_region(region: ty::Region, amount: u32) -> ty::Region {
-    match region {
-        ty::ReLateBound(debruijn, br) => {
-            ty::ReLateBound(debruijn.shifted(amount), br)
-        }
-        _ => {
-            region
-        }
-    }
-}
-
-pub fn shift_regions<'tcx, T:TypeFoldable<'tcx>>(tcx: &ty::ctxt<'tcx>,
-                                                 amount: u32, value: &T) -> T {
-    debug!("shift_regions(value={:?}, amount={})",
-           value, amount);
-
-    value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| {
-        shift_region(region, amount)
-    }))
-}
-
-/// An "escaping region" is a bound region whose binder is not part of `t`.
-///
-/// So, for example, consider a type like the following, which has two binders:
-///
-///    for<'a> fn(x: for<'b> fn(&'a isize, &'b isize))
-///    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
-///                  ^~~~~~~~~~~~~~~~~~~~~~~~~~~~  inner scope
-///
-/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the
-/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
-/// fn type*, that type has an escaping region: `'a`.
-///
-/// Note that what I'm calling an "escaping region" is often just called a "free region". However,
-/// we already use the term "free region". It refers to the regions that we use to represent bound
-/// regions on a fn definition while we are typechecking its body.
-///
-/// To clarify, conceptually there is no particular difference between an "escaping" region and a
-/// "free" region. However, there is a big difference in practice. Basically, when "entering" a
-/// binding level, one is generally required to do some sort of processing to a bound region, such
-/// as replacing it with a fresh/skolemized region, or making an entry in the environment to
-/// represent the scope to which it is attached, etc. An escaping region represents a bound region
-/// for which this processing has not yet been done.
-struct HasEscapingRegionsVisitor {
-    depth: u32,
-}
-
-impl<'tcx> TypeVisitor<'tcx> for HasEscapingRegionsVisitor {
-    fn enter_region_binder(&mut self) {
-        self.depth += 1;
-    }
-
-    fn exit_region_binder(&mut self) {
-        self.depth -= 1;
-    }
-
-    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
-        t.region_depth > self.depth
-    }
-
-    fn visit_region(&mut self, r: ty::Region) -> bool {
-        r.escapes_depth(self.depth)
-    }
-}
-
-struct HasTypeFlagsVisitor {
-    flags: ty::TypeFlags,
-}
-
-impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
-    fn visit_ty(&mut self, t: Ty) -> bool {
-        t.flags.get().intersects(self.flags)
-    }
-
-    fn visit_region(&mut self, r: ty::Region) -> bool {
-        if self.flags.intersects(ty::TypeFlags::HAS_LOCAL_NAMES) {
-            // does this represent a region that cannot be named
-            // in a global way? used in fulfillment caching.
-            match r {
-                ty::ReStatic | ty::ReEmpty => {}
-                _ => return true,
-            }
-        }
-        if self.flags.intersects(ty::TypeFlags::HAS_RE_INFER) {
-            match r {
-                ty::ReVar(_) | ty::ReSkolemized(..) => { return true }
-                _ => {}
-            }
-        }
-        false
-    }
-}
diff --git a/src/librustc/middle/ty/ivar.rs b/src/librustc/middle/ty/ivar.rs
deleted file mode 100644 (file)
index ffc12aa..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use dep_graph::DepNode;
-use middle::ty::{Ty, TyS};
-use middle::ty::tls;
-
-use rustc_data_structures::ivar;
-
-use std::fmt;
-use std::marker::PhantomData;
-use core::nonzero::NonZero;
-
-/// An IVar that contains a Ty. 'lt is a (reverse-variant) upper bound
-/// on the lifetime of the IVar. This is required because of variance
-/// problems: the IVar needs to be variant with respect to 'tcx (so
-/// it can be referred to from Ty) but can only be modified if its
-/// lifetime is exactly 'tcx.
-///
-/// Safety invariants:
-///     (A) self.0, if fulfilled, is a valid Ty<'tcx>
-///     (B) no aliases to this value with a 'tcx longer than this
-///         value's 'lt exist
-///
-/// Dependency tracking: each ivar does not know what node in the
-/// dependency graph it is associated with, so when you get/fulfill
-/// you must supply a `DepNode` id. This should always be the same id!
-///
-/// NonZero is used rather than Unique because Unique isn't Copy.
-pub struct TyIVar<'tcx, 'lt: 'tcx>(ivar::Ivar<NonZero<*const TyS<'static>>>,
-                                   PhantomData<fn(TyS<'lt>)->TyS<'tcx>>);
-
-impl<'tcx, 'lt> TyIVar<'tcx, 'lt> {
-    #[inline]
-    pub fn new() -> Self {
-        // Invariant (A) satisfied because the IVar is unfulfilled
-        // Invariant (B) because 'lt : 'tcx
-        TyIVar(ivar::Ivar::new(), PhantomData)
-    }
-
-    #[inline]
-    pub fn get(&self, dep_node: DepNode) -> Option<Ty<'tcx>> {
-        tls::with(|tcx| tcx.dep_graph.read(dep_node));
-        self.untracked_get()
-    }
-
-    #[inline]
-    fn untracked_get(&self) -> Option<Ty<'tcx>> {
-        match self.0.get() {
-            None => None,
-            // valid because of invariant (A)
-            Some(v) => Some(unsafe { &*(*v as *const TyS<'tcx>) })
-        }
-    }
-
-    #[inline]
-    pub fn unwrap(&self, dep_node: DepNode) -> Ty<'tcx> {
-        self.get(dep_node).unwrap()
-    }
-
-    pub fn fulfill(&self, dep_node: DepNode, value: Ty<'lt>) {
-        tls::with(|tcx| tcx.dep_graph.write(dep_node));
-
-        // Invariant (A) is fulfilled, because by (B), every alias
-        // of this has a 'tcx longer than 'lt.
-        let value: *const TyS<'lt> = value;
-        // FIXME(27214): unneeded [as *const ()]
-        let value = value as *const () as *const TyS<'static>;
-        self.0.fulfill(unsafe { NonZero::new(value) })
-    }
-}
-
-impl<'tcx, 'lt> fmt::Debug for TyIVar<'tcx, 'lt> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match self.untracked_get() {
-            Some(val) => write!(f, "TyIVar({:?})", val),
-            None => f.write_str("TyIVar(<unfulfilled>)")
-        }
-    }
-}
diff --git a/src/librustc/middle/ty/maps.rs b/src/librustc/middle/ty/maps.rs
deleted file mode 100644 (file)
index 8d8afc1..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use dep_graph::{DepNode, DepTrackingMapConfig};
-use middle::def_id::DefId;
-use middle::ty;
-use std::marker::PhantomData;
-use std::rc::Rc;
-use syntax::{attr, ast};
-
-macro_rules! dep_map_ty {
-    ($ty_name:ident : $node_name:ident ($key:ty) -> $value:ty) => {
-        pub struct $ty_name<'tcx> {
-            data: PhantomData<&'tcx ()>
-        }
-
-        impl<'tcx> DepTrackingMapConfig for $ty_name<'tcx> {
-            type Key = $key;
-            type Value = $value;
-            fn to_dep_node(key: &$key) -> DepNode { DepNode::$node_name(*key) }
-        }
-    }
-}
-
-dep_map_ty! { ImplOrTraitItems: ImplOrTraitItems(DefId) -> ty::ImplOrTraitItem<'tcx> }
-dep_map_ty! { Tcache: ItemSignature(DefId) -> ty::TypeScheme<'tcx> }
-dep_map_ty! { Predicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> }
-dep_map_ty! { SuperPredicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> }
-dep_map_ty! { TraitItemDefIds: TraitItemDefIds(DefId) -> Rc<Vec<ty::ImplOrTraitItemId>> }
-dep_map_ty! { ImplTraitRefs: ItemSignature(DefId) -> Option<ty::TraitRef<'tcx>> }
-dep_map_ty! { TraitDefs: ItemSignature(DefId) -> &'tcx ty::TraitDef<'tcx> }
-dep_map_ty! { AdtDefs: ItemSignature(DefId) -> ty::AdtDefMaster<'tcx> }
-dep_map_ty! { ItemVariances: ItemSignature(DefId) -> Rc<ty::ItemVariances> }
-dep_map_ty! { InherentImpls: InherentImpls(DefId) -> Rc<Vec<DefId>> }
-dep_map_ty! { ImplItems: ImplItems(DefId) -> Vec<ty::ImplOrTraitItemId> }
-dep_map_ty! { TraitItems: TraitItems(DefId) -> Rc<Vec<ty::ImplOrTraitItem<'tcx>>> }
-dep_map_ty! { ReprHints: ReprHints(DefId) -> Rc<Vec<attr::ReprAttr>> }
-dep_map_ty! { InlinedClosures: Hir(DefId) -> ast::NodeId }
diff --git a/src/librustc/middle/ty/mod.rs b/src/librustc/middle/ty/mod.rs
deleted file mode 100644 (file)
index 00a011c..0000000
+++ /dev/null
@@ -1,2696 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub use self::ImplOrTraitItemId::*;
-pub use self::ClosureKind::*;
-pub use self::Variance::*;
-pub use self::DtorKind::*;
-pub use self::ImplOrTraitItemContainer::*;
-pub use self::BorrowKind::*;
-pub use self::ImplOrTraitItem::*;
-pub use self::IntVarValue::*;
-pub use self::LvaluePreference::*;
-pub use self::fold::TypeFoldable;
-
-use dep_graph::{self, DepNode};
-use front::map as ast_map;
-use front::map::LinkedPath;
-use middle;
-use middle::cstore::{self, CrateStore, LOCAL_CRATE};
-use middle::def::{self, Def, ExportMap};
-use middle::def_id::DefId;
-use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem};
-use middle::region::{CodeExtent};
-use middle::subst::{self, Subst, Substs, VecPerParamSpace};
-use middle::traits;
-use middle::ty;
-use middle::ty::fold::TypeFolder;
-use middle::ty::walk::TypeWalker;
-use util::common::MemoizationMap;
-use util::nodemap::{NodeMap, NodeSet};
-use util::nodemap::FnvHashMap;
-
-use serialize::{Encodable, Encoder, Decodable, Decoder};
-use std::borrow::{Borrow, Cow};
-use std::cell::Cell;
-use std::hash::{Hash, Hasher};
-use std::iter;
-use std::rc::Rc;
-use std::slice;
-use std::vec::IntoIter;
-use std::collections::{HashMap, HashSet};
-use syntax::ast::{self, CrateNum, Name, NodeId};
-use syntax::attr::{self, AttrMetaMethods};
-use syntax::codemap::{DUMMY_SP, Span};
-use syntax::parse::token::InternedString;
-
-use rustc_front::hir;
-use rustc_front::hir::{ItemImpl, ItemTrait, PatKind};
-use rustc_front::intravisit::Visitor;
-
-pub use self::sty::{Binder, DebruijnIndex};
-pub use self::sty::{BuiltinBound, BuiltinBounds, ExistentialBounds};
-pub use self::sty::{BareFnTy, FnSig, PolyFnSig, FnOutput, PolyFnOutput};
-pub use self::sty::{ClosureTy, InferTy, ParamTy, ProjectionTy, TraitTy};
-pub use self::sty::{ClosureSubsts, TypeAndMut};
-pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef};
-pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region};
-pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid};
-pub use self::sty::BoundRegion::*;
-pub use self::sty::FnOutput::*;
-pub use self::sty::InferTy::*;
-pub use self::sty::Region::*;
-pub use self::sty::TypeVariants::*;
-
-pub use self::sty::BuiltinBound::Send as BoundSend;
-pub use self::sty::BuiltinBound::Sized as BoundSized;
-pub use self::sty::BuiltinBound::Copy as BoundCopy;
-pub use self::sty::BuiltinBound::Sync as BoundSync;
-
-pub use self::contents::TypeContents;
-pub use self::context::{ctxt, tls};
-pub use self::context::{CtxtArenas, Lift, Tables};
-
-pub use self::trait_def::{TraitDef, TraitFlags};
-
-pub mod adjustment;
-pub mod cast;
-pub mod error;
-pub mod fast_reject;
-pub mod fold;
-pub mod _match;
-pub mod maps;
-pub mod outlives;
-pub mod relate;
-pub mod trait_def;
-pub mod walk;
-pub mod wf;
-pub mod util;
-
-mod contents;
-mod context;
-mod flags;
-mod ivar;
-mod structural_impls;
-mod sty;
-
-pub type Disr = u64;
-pub const INITIAL_DISCRIMINANT_VALUE: Disr = 0;
-
-// Data types
-
-/// The complete set of all analyses described in this module. This is
-/// produced by the driver and fed to trans and later passes.
-pub struct CrateAnalysis<'a> {
-    pub export_map: ExportMap,
-    pub access_levels: middle::privacy::AccessLevels,
-    pub reachable: NodeSet,
-    pub name: &'a str,
-    pub glob_map: Option<GlobMap>,
-}
-
-#[derive(Copy, Clone)]
-pub enum DtorKind {
-    NoDtor,
-    TraitDtor(bool)
-}
-
-impl DtorKind {
-    pub fn is_present(&self) -> bool {
-        match *self {
-            TraitDtor(..) => true,
-            _ => false
-        }
-    }
-
-    pub fn has_drop_flag(&self) -> bool {
-        match self {
-            &NoDtor => false,
-            &TraitDtor(flag) => flag
-        }
-    }
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
-pub enum ImplOrTraitItemContainer {
-    TraitContainer(DefId),
-    ImplContainer(DefId),
-}
-
-impl ImplOrTraitItemContainer {
-    pub fn id(&self) -> DefId {
-        match *self {
-            TraitContainer(id) => id,
-            ImplContainer(id) => id,
-        }
-    }
-}
-
-#[derive(Clone)]
-pub enum ImplOrTraitItem<'tcx> {
-    ConstTraitItem(Rc<AssociatedConst<'tcx>>),
-    MethodTraitItem(Rc<Method<'tcx>>),
-    TypeTraitItem(Rc<AssociatedType<'tcx>>),
-}
-
-impl<'tcx> ImplOrTraitItem<'tcx> {
-    fn id(&self) -> ImplOrTraitItemId {
-        match *self {
-            ConstTraitItem(ref associated_const) => {
-                ConstTraitItemId(associated_const.def_id)
-            }
-            MethodTraitItem(ref method) => MethodTraitItemId(method.def_id),
-            TypeTraitItem(ref associated_type) => {
-                TypeTraitItemId(associated_type.def_id)
-            }
-        }
-    }
-
-    pub fn def_id(&self) -> DefId {
-        match *self {
-            ConstTraitItem(ref associated_const) => associated_const.def_id,
-            MethodTraitItem(ref method) => method.def_id,
-            TypeTraitItem(ref associated_type) => associated_type.def_id,
-        }
-    }
-
-    pub fn name(&self) -> Name {
-        match *self {
-            ConstTraitItem(ref associated_const) => associated_const.name,
-            MethodTraitItem(ref method) => method.name,
-            TypeTraitItem(ref associated_type) => associated_type.name,
-        }
-    }
-
-    pub fn vis(&self) -> hir::Visibility {
-        match *self {
-            ConstTraitItem(ref associated_const) => associated_const.vis,
-            MethodTraitItem(ref method) => method.vis,
-            TypeTraitItem(ref associated_type) => associated_type.vis,
-        }
-    }
-
-    pub fn container(&self) -> ImplOrTraitItemContainer {
-        match *self {
-            ConstTraitItem(ref associated_const) => associated_const.container,
-            MethodTraitItem(ref method) => method.container,
-            TypeTraitItem(ref associated_type) => associated_type.container,
-        }
-    }
-
-    pub fn as_opt_method(&self) -> Option<Rc<Method<'tcx>>> {
-        match *self {
-            MethodTraitItem(ref m) => Some((*m).clone()),
-            _ => None,
-        }
-    }
-}
-
-#[derive(Clone, Copy, Debug)]
-pub enum ImplOrTraitItemId {
-    ConstTraitItemId(DefId),
-    MethodTraitItemId(DefId),
-    TypeTraitItemId(DefId),
-}
-
-impl ImplOrTraitItemId {
-    pub fn def_id(&self) -> DefId {
-        match *self {
-            ConstTraitItemId(def_id) => def_id,
-            MethodTraitItemId(def_id) => def_id,
-            TypeTraitItemId(def_id) => def_id,
-        }
-    }
-}
-
-#[derive(Clone, Debug)]
-pub struct Method<'tcx> {
-    pub name: Name,
-    pub generics: Generics<'tcx>,
-    pub predicates: GenericPredicates<'tcx>,
-    pub fty: BareFnTy<'tcx>,
-    pub explicit_self: ExplicitSelfCategory,
-    pub vis: hir::Visibility,
-    pub def_id: DefId,
-    pub container: ImplOrTraitItemContainer,
-}
-
-impl<'tcx> Method<'tcx> {
-    pub fn new(name: Name,
-               generics: ty::Generics<'tcx>,
-               predicates: GenericPredicates<'tcx>,
-               fty: BareFnTy<'tcx>,
-               explicit_self: ExplicitSelfCategory,
-               vis: hir::Visibility,
-               def_id: DefId,
-               container: ImplOrTraitItemContainer)
-               -> Method<'tcx> {
-       Method {
-            name: name,
-            generics: generics,
-            predicates: predicates,
-            fty: fty,
-            explicit_self: explicit_self,
-            vis: vis,
-            def_id: def_id,
-            container: container,
-        }
-    }
-
-    pub fn container_id(&self) -> DefId {
-        match self.container {
-            TraitContainer(id) => id,
-            ImplContainer(id) => id,
-        }
-    }
-}
-
-impl<'tcx> PartialEq for Method<'tcx> {
-    #[inline]
-    fn eq(&self, other: &Self) -> bool { self.def_id == other.def_id }
-}
-
-impl<'tcx> Eq for Method<'tcx> {}
-
-impl<'tcx> Hash for Method<'tcx> {
-    #[inline]
-    fn hash<H: Hasher>(&self, s: &mut H) {
-        self.def_id.hash(s)
-    }
-}
-
-#[derive(Clone, Copy, Debug)]
-pub struct AssociatedConst<'tcx> {
-    pub name: Name,
-    pub ty: Ty<'tcx>,
-    pub vis: hir::Visibility,
-    pub def_id: DefId,
-    pub container: ImplOrTraitItemContainer,
-    pub has_value: bool
-}
-
-#[derive(Clone, Copy, Debug)]
-pub struct AssociatedType<'tcx> {
-    pub name: Name,
-    pub ty: Option<Ty<'tcx>>,
-    pub vis: hir::Visibility,
-    pub def_id: DefId,
-    pub container: ImplOrTraitItemContainer,
-}
-
-#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
-pub struct ItemVariances {
-    pub types: VecPerParamSpace<Variance>,
-    pub regions: VecPerParamSpace<Variance>,
-}
-
-#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable, Copy)]
-pub enum Variance {
-    Covariant,      // T<A> <: T<B> iff A <: B -- e.g., function return type
-    Invariant,      // T<A> <: T<B> iff B == A -- e.g., type of mutable cell
-    Contravariant,  // T<A> <: T<B> iff B <: A -- e.g., function param type
-    Bivariant,      // T<A> <: T<B>            -- e.g., unused type parameter
-}
-
-#[derive(Clone, Copy, Debug)]
-pub struct MethodCallee<'tcx> {
-    /// Impl method ID, for inherent methods, or trait method ID, otherwise.
-    pub def_id: DefId,
-    pub ty: Ty<'tcx>,
-    pub substs: &'tcx subst::Substs<'tcx>
-}
-
-/// With method calls, we store some extra information in
-/// side tables (i.e method_map). We use
-/// MethodCall as a key to index into these tables instead of
-/// just directly using the expression's NodeId. The reason
-/// for this being that we may apply adjustments (coercions)
-/// with the resulting expression also needing to use the
-/// side tables. The problem with this is that we don't
-/// assign a separate NodeId to this new expression
-/// and so it would clash with the base expression if both
-/// needed to add to the side tables. Thus to disambiguate
-/// we also keep track of whether there's an adjustment in
-/// our key.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub struct MethodCall {
-    pub expr_id: NodeId,
-    pub autoderef: u32
-}
-
-impl MethodCall {
-    pub fn expr(id: NodeId) -> MethodCall {
-        MethodCall {
-            expr_id: id,
-            autoderef: 0
-        }
-    }
-
-    pub fn autoderef(expr_id: NodeId, autoderef: u32) -> MethodCall {
-        MethodCall {
-            expr_id: expr_id,
-            autoderef: 1 + autoderef
-        }
-    }
-}
-
-// maps from an expression id that corresponds to a method call to the details
-// of the method to be invoked
-pub type MethodMap<'tcx> = FnvHashMap<MethodCall, MethodCallee<'tcx>>;
-
-// Contains information needed to resolve types and (in the future) look up
-// the types of AST nodes.
-#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-pub struct CReaderCacheKey {
-    pub cnum: CrateNum,
-    pub pos: usize,
-}
-
-/// A restriction that certain types must be the same size. The use of
-/// `transmute` gives rise to these restrictions. These generally
-/// cannot be checked until trans; therefore, each call to `transmute`
-/// will push one or more such restriction into the
-/// `transmute_restrictions` vector during `intrinsicck`. They are
-/// then checked during `trans` by the fn `check_intrinsics`.
-#[derive(Copy, Clone)]
-pub struct TransmuteRestriction<'tcx> {
-    /// The span whence the restriction comes.
-    pub span: Span,
-
-    /// The type being transmuted from.
-    pub original_from: Ty<'tcx>,
-
-    /// The type being transmuted to.
-    pub original_to: Ty<'tcx>,
-
-    /// The type being transmuted from, with all type parameters
-    /// substituted for an arbitrary representative. Not to be shown
-    /// to the end user.
-    pub substituted_from: Ty<'tcx>,
-
-    /// The type being transmuted to, with all type parameters
-    /// substituted for an arbitrary representative. Not to be shown
-    /// to the end user.
-    pub substituted_to: Ty<'tcx>,
-
-    /// NodeId of the transmute intrinsic.
-    pub id: NodeId,
-}
-
-/// Describes the fragment-state associated with a NodeId.
-///
-/// Currently only unfragmented paths have entries in the table,
-/// but longer-term this enum is expected to expand to also
-/// include data for fragmented paths.
-#[derive(Copy, Clone, Debug)]
-pub enum FragmentInfo {
-    Moved { var: NodeId, move_expr: NodeId },
-    Assigned { var: NodeId, assign_expr: NodeId, assignee_id: NodeId },
-}
-
-// Flags that we track on types. These flags are propagated upwards
-// through the type during type construction, so that we can quickly
-// check whether the type has various kinds of types in it without
-// recursing over the type itself.
-bitflags! {
-    flags TypeFlags: u32 {
-        const HAS_PARAMS         = 1 << 0,
-        const HAS_SELF           = 1 << 1,
-        const HAS_TY_INFER       = 1 << 2,
-        const HAS_RE_INFER       = 1 << 3,
-        const HAS_RE_EARLY_BOUND = 1 << 4,
-        const HAS_FREE_REGIONS   = 1 << 5,
-        const HAS_TY_ERR         = 1 << 6,
-        const HAS_PROJECTION     = 1 << 7,
-        const HAS_TY_CLOSURE     = 1 << 8,
-
-        // true if there are "names" of types and regions and so forth
-        // that are local to a particular fn
-        const HAS_LOCAL_NAMES   = 1 << 9,
-
-        const NEEDS_SUBST        = TypeFlags::HAS_PARAMS.bits |
-                                   TypeFlags::HAS_SELF.bits |
-                                   TypeFlags::HAS_RE_EARLY_BOUND.bits,
-
-        // Flags representing the nominal content of a type,
-        // computed by FlagsComputation. If you add a new nominal
-        // flag, it should be added here too.
-        const NOMINAL_FLAGS     = TypeFlags::HAS_PARAMS.bits |
-                                  TypeFlags::HAS_SELF.bits |
-                                  TypeFlags::HAS_TY_INFER.bits |
-                                  TypeFlags::HAS_RE_INFER.bits |
-                                  TypeFlags::HAS_RE_EARLY_BOUND.bits |
-                                  TypeFlags::HAS_FREE_REGIONS.bits |
-                                  TypeFlags::HAS_TY_ERR.bits |
-                                  TypeFlags::HAS_PROJECTION.bits |
-                                  TypeFlags::HAS_TY_CLOSURE.bits |
-                                  TypeFlags::HAS_LOCAL_NAMES.bits,
-
-        // Caches for type_is_sized, type_moves_by_default
-        const SIZEDNESS_CACHED  = 1 << 16,
-        const IS_SIZED          = 1 << 17,
-        const MOVENESS_CACHED   = 1 << 18,
-        const MOVES_BY_DEFAULT  = 1 << 19,
-    }
-}
-
-pub struct TyS<'tcx> {
-    pub sty: TypeVariants<'tcx>,
-    pub flags: Cell<TypeFlags>,
-
-    // the maximal depth of any bound regions appearing in this type.
-    region_depth: u32,
-}
-
-impl<'tcx> PartialEq for TyS<'tcx> {
-    #[inline]
-    fn eq(&self, other: &TyS<'tcx>) -> bool {
-        // (self as *const _) == (other as *const _)
-        (self as *const TyS<'tcx>) == (other as *const TyS<'tcx>)
-    }
-}
-impl<'tcx> Eq for TyS<'tcx> {}
-
-impl<'tcx> Hash for TyS<'tcx> {
-    fn hash<H: Hasher>(&self, s: &mut H) {
-        (self as *const TyS).hash(s)
-    }
-}
-
-pub type Ty<'tcx> = &'tcx TyS<'tcx>;
-
-impl<'tcx> Encodable for Ty<'tcx> {
-    fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
-        cstore::tls::with_encoding_context(s, |ecx, rbml_w| {
-            ecx.encode_ty(rbml_w, *self);
-            Ok(())
-        })
-    }
-}
-
-impl<'tcx> Decodable for Ty<'tcx> {
-    fn decode<D: Decoder>(d: &mut D) -> Result<Ty<'tcx>, D::Error> {
-        cstore::tls::with_decoding_context(d, |dcx, rbml_r| {
-            Ok(dcx.decode_ty(rbml_r))
-        })
-    }
-}
-
-
-/// Upvars do not get their own node-id. Instead, we use the pair of
-/// the original var id (that is, the root variable that is referenced
-/// by the upvar) and the id of the closure expression.
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-pub struct UpvarId {
-    pub var_id: NodeId,
-    pub closure_expr_id: NodeId,
-}
-
-#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, Copy)]
-pub enum BorrowKind {
-    /// Data must be immutable and is aliasable.
-    ImmBorrow,
-
-    /// Data must be immutable but not aliasable.  This kind of borrow
-    /// cannot currently be expressed by the user and is used only in
-    /// implicit closure bindings. It is needed when you the closure
-    /// is borrowing or mutating a mutable referent, e.g.:
-    ///
-    ///    let x: &mut isize = ...;
-    ///    let y = || *x += 5;
-    ///
-    /// If we were to try to translate this closure into a more explicit
-    /// form, we'd encounter an error with the code as written:
-    ///
-    ///    struct Env { x: & &mut isize }
-    ///    let x: &mut isize = ...;
-    ///    let y = (&mut Env { &x }, fn_ptr);  // Closure is pair of env and fn
-    ///    fn fn_ptr(env: &mut Env) { **env.x += 5; }
-    ///
-    /// This is then illegal because you cannot mutate a `&mut` found
-    /// in an aliasable location. To solve, you'd have to translate with
-    /// an `&mut` borrow:
-    ///
-    ///    struct Env { x: & &mut isize }
-    ///    let x: &mut isize = ...;
-    ///    let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x
-    ///    fn fn_ptr(env: &mut Env) { **env.x += 5; }
-    ///
-    /// Now the assignment to `**env.x` is legal, but creating a
-    /// mutable pointer to `x` is not because `x` is not mutable. We
-    /// could fix this by declaring `x` as `let mut x`. This is ok in
-    /// user code, if awkward, but extra weird for closures, since the
-    /// borrow is hidden.
-    ///
-    /// So we introduce a "unique imm" borrow -- the referent is
-    /// immutable, but not aliasable. This solves the problem. For
-    /// simplicity, we don't give users the way to express this
-    /// borrow, it's just used when translating closures.
-    UniqueImmBorrow,
-
-    /// Data is mutable and not aliasable.
-    MutBorrow
-}
-
-/// Information describing the capture of an upvar. This is computed
-/// during `typeck`, specifically by `regionck`.
-#[derive(PartialEq, Clone, Debug, Copy)]
-pub enum UpvarCapture {
-    /// Upvar is captured by value. This is always true when the
-    /// closure is labeled `move`, but can also be true in other cases
-    /// depending on inference.
-    ByValue,
-
-    /// Upvar is captured by reference.
-    ByRef(UpvarBorrow),
-}
-
-#[derive(PartialEq, Clone, Copy)]
-pub struct UpvarBorrow {
-    /// The kind of borrow: by-ref upvars have access to shared
-    /// immutable borrows, which are not part of the normal language
-    /// syntax.
-    pub kind: BorrowKind,
-
-    /// Region of the resulting reference.
-    pub region: ty::Region,
-}
-
-pub type UpvarCaptureMap = FnvHashMap<UpvarId, UpvarCapture>;
-
-#[derive(Copy, Clone)]
-pub struct ClosureUpvar<'tcx> {
-    pub def: Def,
-    pub span: Span,
-    pub ty: Ty<'tcx>,
-}
-
-#[derive(Clone, Copy, PartialEq)]
-pub enum IntVarValue {
-    IntType(ast::IntTy),
-    UintType(ast::UintTy),
-}
-
-/// Default region to use for the bound of objects that are
-/// supplied as the value for this type parameter. This is derived
-/// from `T:'a` annotations appearing in the type definition.  If
-/// this is `None`, then the default is inherited from the
-/// surrounding context. See RFC #599 for details.
-#[derive(Copy, Clone)]
-pub enum ObjectLifetimeDefault {
-    /// Require an explicit annotation. Occurs when multiple
-    /// `T:'a` constraints are found.
-    Ambiguous,
-
-    /// Use the base default, typically 'static, but in a fn body it is a fresh variable
-    BaseDefault,
-
-    /// Use the given region as the default.
-    Specific(Region),
-}
-
-#[derive(Clone)]
-pub struct TypeParameterDef<'tcx> {
-    pub name: Name,
-    pub def_id: DefId,
-    pub space: subst::ParamSpace,
-    pub index: u32,
-    pub default_def_id: DefId, // for use in error reporing about defaults
-    pub default: Option<Ty<'tcx>>,
-    pub object_lifetime_default: ObjectLifetimeDefault,
-}
-
-#[derive(Clone)]
-pub struct RegionParameterDef {
-    pub name: Name,
-    pub def_id: DefId,
-    pub space: subst::ParamSpace,
-    pub index: u32,
-    pub bounds: Vec<ty::Region>,
-}
-
-impl RegionParameterDef {
-    pub fn to_early_bound_region(&self) -> ty::Region {
-        ty::ReEarlyBound(ty::EarlyBoundRegion {
-            space: self.space,
-            index: self.index,
-            name: self.name,
-        })
-    }
-    pub fn to_bound_region(&self) -> ty::BoundRegion {
-        ty::BoundRegion::BrNamed(self.def_id, self.name)
-    }
-}
-
-/// Information about the formal type/lifetime parameters associated
-/// with an item or method. Analogous to hir::Generics.
-#[derive(Clone, Debug)]
-pub struct Generics<'tcx> {
-    pub types: VecPerParamSpace<TypeParameterDef<'tcx>>,
-    pub regions: VecPerParamSpace<RegionParameterDef>,
-}
-
-impl<'tcx> Generics<'tcx> {
-    pub fn empty() -> Generics<'tcx> {
-        Generics {
-            types: VecPerParamSpace::empty(),
-            regions: VecPerParamSpace::empty(),
-        }
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.types.is_empty() && self.regions.is_empty()
-    }
-
-    pub fn has_type_params(&self, space: subst::ParamSpace) -> bool {
-        !self.types.is_empty_in(space)
-    }
-
-    pub fn has_region_params(&self, space: subst::ParamSpace) -> bool {
-        !self.regions.is_empty_in(space)
-    }
-}
-
-/// Bounds on generics.
-#[derive(Clone)]
-pub struct GenericPredicates<'tcx> {
-    pub predicates: VecPerParamSpace<Predicate<'tcx>>,
-}
-
-impl<'tcx> GenericPredicates<'tcx> {
-    pub fn empty() -> GenericPredicates<'tcx> {
-        GenericPredicates {
-            predicates: VecPerParamSpace::empty(),
-        }
-    }
-
-    pub fn instantiate(&self, tcx: &ctxt<'tcx>, substs: &Substs<'tcx>)
-                       -> InstantiatedPredicates<'tcx> {
-        InstantiatedPredicates {
-            predicates: self.predicates.subst(tcx, substs),
-        }
-    }
-
-    pub fn instantiate_supertrait(&self,
-                                  tcx: &ctxt<'tcx>,
-                                  poly_trait_ref: &ty::PolyTraitRef<'tcx>)
-                                  -> InstantiatedPredicates<'tcx>
-    {
-        InstantiatedPredicates {
-            predicates: self.predicates.map(|pred| pred.subst_supertrait(tcx, poly_trait_ref))
-        }
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, Hash)]
-pub enum Predicate<'tcx> {
-    /// Corresponds to `where Foo : Bar<A,B,C>`. `Foo` here would be
-    /// the `Self` type of the trait reference and `A`, `B`, and `C`
-    /// would be the parameters in the `TypeSpace`.
-    Trait(PolyTraitPredicate<'tcx>),
-
-    /// where `T1 == T2`.
-    Equate(PolyEquatePredicate<'tcx>),
-
-    /// where 'a : 'b
-    RegionOutlives(PolyRegionOutlivesPredicate),
-
-    /// where T : 'a
-    TypeOutlives(PolyTypeOutlivesPredicate<'tcx>),
-
-    /// where <T as TraitRef>::Name == X, approximately.
-    /// See `ProjectionPredicate` struct for details.
-    Projection(PolyProjectionPredicate<'tcx>),
-
-    /// no syntax: T WF
-    WellFormed(Ty<'tcx>),
-
-    /// trait must be object-safe
-    ObjectSafe(DefId),
-}
-
-impl<'tcx> Predicate<'tcx> {
-    /// Performs a substitution suitable for going from a
-    /// poly-trait-ref to supertraits that must hold if that
-    /// poly-trait-ref holds. This is slightly different from a normal
-    /// substitution in terms of what happens with bound regions.  See
-    /// lengthy comment below for details.
-    pub fn subst_supertrait(&self,
-                            tcx: &ctxt<'tcx>,
-                            trait_ref: &ty::PolyTraitRef<'tcx>)
-                            -> ty::Predicate<'tcx>
-    {
-        // The interaction between HRTB and supertraits is not entirely
-        // obvious. Let me walk you (and myself) through an example.
-        //
-        // Let's start with an easy case. Consider two traits:
-        //
-        //     trait Foo<'a> : Bar<'a,'a> { }
-        //     trait Bar<'b,'c> { }
-        //
-        // Now, if we have a trait reference `for<'x> T : Foo<'x>`, then
-        // we can deduce that `for<'x> T : Bar<'x,'x>`. Basically, if we
-        // knew that `Foo<'x>` (for any 'x) then we also know that
-        // `Bar<'x,'x>` (for any 'x). This more-or-less falls out from
-        // normal substitution.
-        //
-        // In terms of why this is sound, the idea is that whenever there
-        // is an impl of `T:Foo<'a>`, it must show that `T:Bar<'a,'a>`
-        // holds.  So if there is an impl of `T:Foo<'a>` that applies to
-        // all `'a`, then we must know that `T:Bar<'a,'a>` holds for all
-        // `'a`.
-        //
-        // Another example to be careful of is this:
-        //
-        //     trait Foo1<'a> : for<'b> Bar1<'a,'b> { }
-        //     trait Bar1<'b,'c> { }
-        //
-        // Here, if we have `for<'x> T : Foo1<'x>`, then what do we know?
-        // The answer is that we know `for<'x,'b> T : Bar1<'x,'b>`. The
-        // reason is similar to the previous example: any impl of
-        // `T:Foo1<'x>` must show that `for<'b> T : Bar1<'x, 'b>`.  So
-        // basically we would want to collapse the bound lifetimes from
-        // the input (`trait_ref`) and the supertraits.
-        //
-        // To achieve this in practice is fairly straightforward. Let's
-        // consider the more complicated scenario:
-        //
-        // - We start out with `for<'x> T : Foo1<'x>`. In this case, `'x`
-        //   has a De Bruijn index of 1. We want to produce `for<'x,'b> T : Bar1<'x,'b>`,
-        //   where both `'x` and `'b` would have a DB index of 1.
-        //   The substitution from the input trait-ref is therefore going to be
-        //   `'a => 'x` (where `'x` has a DB index of 1).
-        // - The super-trait-ref is `for<'b> Bar1<'a,'b>`, where `'a` is an
-        //   early-bound parameter and `'b' is a late-bound parameter with a
-        //   DB index of 1.
-        // - If we replace `'a` with `'x` from the input, it too will have
-        //   a DB index of 1, and thus we'll have `for<'x,'b> Bar1<'x,'b>`
-        //   just as we wanted.
-        //
-        // There is only one catch. If we just apply the substitution `'a
-        // => 'x` to `for<'b> Bar1<'a,'b>`, the substitution code will
-        // adjust the DB index because we substituting into a binder (it
-        // tries to be so smart...) resulting in `for<'x> for<'b>
-        // Bar1<'x,'b>` (we have no syntax for this, so use your
-        // imagination). Basically the 'x will have DB index of 2 and 'b
-        // will have DB index of 1. Not quite what we want. So we apply
-        // the substitution to the *contents* of the trait reference,
-        // rather than the trait reference itself (put another way, the
-        // substitution code expects equal binding levels in the values
-        // from the substitution and the value being substituted into, and
-        // this trick achieves that).
-
-        let substs = &trait_ref.0.substs;
-        match *self {
-            Predicate::Trait(ty::Binder(ref data)) =>
-                Predicate::Trait(ty::Binder(data.subst(tcx, substs))),
-            Predicate::Equate(ty::Binder(ref data)) =>
-                Predicate::Equate(ty::Binder(data.subst(tcx, substs))),
-            Predicate::RegionOutlives(ty::Binder(ref data)) =>
-                Predicate::RegionOutlives(ty::Binder(data.subst(tcx, substs))),
-            Predicate::TypeOutlives(ty::Binder(ref data)) =>
-                Predicate::TypeOutlives(ty::Binder(data.subst(tcx, substs))),
-            Predicate::Projection(ty::Binder(ref data)) =>
-                Predicate::Projection(ty::Binder(data.subst(tcx, substs))),
-            Predicate::WellFormed(data) =>
-                Predicate::WellFormed(data.subst(tcx, substs)),
-            Predicate::ObjectSafe(trait_def_id) =>
-                Predicate::ObjectSafe(trait_def_id),
-        }
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, Hash)]
-pub struct TraitPredicate<'tcx> {
-    pub trait_ref: TraitRef<'tcx>
-}
-pub type PolyTraitPredicate<'tcx> = ty::Binder<TraitPredicate<'tcx>>;
-
-impl<'tcx> TraitPredicate<'tcx> {
-    pub fn def_id(&self) -> DefId {
-        self.trait_ref.def_id
-    }
-
-    /// Creates the dep-node for selecting/evaluating this trait reference.
-    fn dep_node(&self) -> DepNode {
-        DepNode::TraitSelect(self.def_id())
-    }
-
-    pub fn input_types(&self) -> &[Ty<'tcx>] {
-        self.trait_ref.substs.types.as_slice()
-    }
-
-    pub fn self_ty(&self) -> Ty<'tcx> {
-        self.trait_ref.self_ty()
-    }
-}
-
-impl<'tcx> PolyTraitPredicate<'tcx> {
-    pub fn def_id(&self) -> DefId {
-        // ok to skip binder since trait def-id does not care about regions
-        self.0.def_id()
-    }
-
-    pub fn dep_node(&self) -> DepNode {
-        // ok to skip binder since depnode does not care about regions
-        self.0.dep_node()
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, Hash, Debug)]
-pub struct EquatePredicate<'tcx>(pub Ty<'tcx>, pub Ty<'tcx>); // `0 == 1`
-pub type PolyEquatePredicate<'tcx> = ty::Binder<EquatePredicate<'tcx>>;
-
-#[derive(Clone, PartialEq, Eq, Hash, Debug)]
-pub struct OutlivesPredicate<A,B>(pub A, pub B); // `A : B`
-pub type PolyOutlivesPredicate<A,B> = ty::Binder<OutlivesPredicate<A,B>>;
-pub type PolyRegionOutlivesPredicate = PolyOutlivesPredicate<ty::Region, ty::Region>;
-pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate<Ty<'tcx>, ty::Region>;
-
-/// This kind of predicate has no *direct* correspondent in the
-/// syntax, but it roughly corresponds to the syntactic forms:
-///
-/// 1. `T : TraitRef<..., Item=Type>`
-/// 2. `<T as TraitRef<...>>::Item == Type` (NYI)
-///
-/// In particular, form #1 is "desugared" to the combination of a
-/// normal trait predicate (`T : TraitRef<...>`) and one of these
-/// predicates. Form #2 is a broader form in that it also permits
-/// equality between arbitrary types. Processing an instance of Form
-/// #2 eventually yields one of these `ProjectionPredicate`
-/// instances to normalize the LHS.
-#[derive(Clone, PartialEq, Eq, Hash)]
-pub struct ProjectionPredicate<'tcx> {
-    pub projection_ty: ProjectionTy<'tcx>,
-    pub ty: Ty<'tcx>,
-}
-
-pub type PolyProjectionPredicate<'tcx> = Binder<ProjectionPredicate<'tcx>>;
-
-impl<'tcx> PolyProjectionPredicate<'tcx> {
-    pub fn item_name(&self) -> Name {
-        self.0.projection_ty.item_name // safe to skip the binder to access a name
-    }
-
-    pub fn sort_key(&self) -> (DefId, Name) {
-        self.0.projection_ty.sort_key()
-    }
-}
-
-pub trait ToPolyTraitRef<'tcx> {
-    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx>;
-}
-
-impl<'tcx> ToPolyTraitRef<'tcx> for TraitRef<'tcx> {
-    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
-        assert!(!self.has_escaping_regions());
-        ty::Binder(self.clone())
-    }
-}
-
-impl<'tcx> ToPolyTraitRef<'tcx> for PolyTraitPredicate<'tcx> {
-    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
-        self.map_bound_ref(|trait_pred| trait_pred.trait_ref)
-    }
-}
-
-impl<'tcx> ToPolyTraitRef<'tcx> for PolyProjectionPredicate<'tcx> {
-    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
-        // Note: unlike with TraitRef::to_poly_trait_ref(),
-        // self.0.trait_ref is permitted to have escaping regions.
-        // This is because here `self` has a `Binder` and so does our
-        // return value, so we are preserving the number of binding
-        // levels.
-        ty::Binder(self.0.projection_ty.trait_ref)
-    }
-}
-
-pub trait ToPredicate<'tcx> {
-    fn to_predicate(&self) -> Predicate<'tcx>;
-}
-
-impl<'tcx> ToPredicate<'tcx> for TraitRef<'tcx> {
-    fn to_predicate(&self) -> Predicate<'tcx> {
-        // we're about to add a binder, so let's check that we don't
-        // accidentally capture anything, or else that might be some
-        // weird debruijn accounting.
-        assert!(!self.has_escaping_regions());
-
-        ty::Predicate::Trait(ty::Binder(ty::TraitPredicate {
-            trait_ref: self.clone()
-        }))
-    }
-}
-
-impl<'tcx> ToPredicate<'tcx> for PolyTraitRef<'tcx> {
-    fn to_predicate(&self) -> Predicate<'tcx> {
-        ty::Predicate::Trait(self.to_poly_trait_predicate())
-    }
-}
-
-impl<'tcx> ToPredicate<'tcx> for PolyEquatePredicate<'tcx> {
-    fn to_predicate(&self) -> Predicate<'tcx> {
-        Predicate::Equate(self.clone())
-    }
-}
-
-impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate {
-    fn to_predicate(&self) -> Predicate<'tcx> {
-        Predicate::RegionOutlives(self.clone())
-    }
-}
-
-impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
-    fn to_predicate(&self) -> Predicate<'tcx> {
-        Predicate::TypeOutlives(self.clone())
-    }
-}
-
-impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
-    fn to_predicate(&self) -> Predicate<'tcx> {
-        Predicate::Projection(self.clone())
-    }
-}
-
-impl<'tcx> Predicate<'tcx> {
-    /// Iterates over the types in this predicate. Note that in all
-    /// cases this is skipping over a binder, so late-bound regions
-    /// with depth 0 are bound by the predicate.
-    pub fn walk_tys(&self) -> IntoIter<Ty<'tcx>> {
-        let vec: Vec<_> = match *self {
-            ty::Predicate::Trait(ref data) => {
-                data.0.trait_ref.substs.types.as_slice().to_vec()
-            }
-            ty::Predicate::Equate(ty::Binder(ref data)) => {
-                vec![data.0, data.1]
-            }
-            ty::Predicate::TypeOutlives(ty::Binder(ref data)) => {
-                vec![data.0]
-            }
-            ty::Predicate::RegionOutlives(..) => {
-                vec![]
-            }
-            ty::Predicate::Projection(ref data) => {
-                let trait_inputs = data.0.projection_ty.trait_ref.substs.types.as_slice();
-                trait_inputs.iter()
-                            .cloned()
-                            .chain(Some(data.0.ty))
-                            .collect()
-            }
-            ty::Predicate::WellFormed(data) => {
-                vec![data]
-            }
-            ty::Predicate::ObjectSafe(_trait_def_id) => {
-                vec![]
-            }
-        };
-
-        // The only reason to collect into a vector here is that I was
-        // too lazy to make the full (somewhat complicated) iterator
-        // type that would be needed here. But I wanted this fn to
-        // return an iterator conceptually, rather than a `Vec`, so as
-        // to be closer to `Ty::walk`.
-        vec.into_iter()
-    }
-
-    pub fn to_opt_poly_trait_ref(&self) -> Option<PolyTraitRef<'tcx>> {
-        match *self {
-            Predicate::Trait(ref t) => {
-                Some(t.to_poly_trait_ref())
-            }
-            Predicate::Projection(..) |
-            Predicate::Equate(..) |
-            Predicate::RegionOutlives(..) |
-            Predicate::WellFormed(..) |
-            Predicate::ObjectSafe(..) |
-            Predicate::TypeOutlives(..) => {
-                None
-            }
-        }
-    }
-}
-
-/// Represents the bounds declared on a particular set of type
-/// parameters.  Should eventually be generalized into a flag list of
-/// where clauses.  You can obtain a `InstantiatedPredicates` list from a
-/// `GenericPredicates` by using the `instantiate` method. Note that this method
-/// reflects an important semantic invariant of `InstantiatedPredicates`: while
-/// the `GenericPredicates` are expressed in terms of the bound type
-/// parameters of the impl/trait/whatever, an `InstantiatedPredicates` instance
-/// represented a set of bounds for some particular instantiation,
-/// meaning that the generic parameters have been substituted with
-/// their values.
-///
-/// Example:
-///
-///     struct Foo<T,U:Bar<T>> { ... }
-///
-/// Here, the `GenericPredicates` for `Foo` would contain a list of bounds like
-/// `[[], [U:Bar<T>]]`.  Now if there were some particular reference
-/// like `Foo<isize,usize>`, then the `InstantiatedPredicates` would be `[[],
-/// [usize:Bar<isize>]]`.
-#[derive(Clone)]
-pub struct InstantiatedPredicates<'tcx> {
-    pub predicates: VecPerParamSpace<Predicate<'tcx>>,
-}
-
-impl<'tcx> InstantiatedPredicates<'tcx> {
-    pub fn empty() -> InstantiatedPredicates<'tcx> {
-        InstantiatedPredicates { predicates: VecPerParamSpace::empty() }
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.predicates.is_empty()
-    }
-}
-
-impl<'tcx> TraitRef<'tcx> {
-    pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> TraitRef<'tcx> {
-        TraitRef { def_id: def_id, substs: substs }
-    }
-
-    pub fn self_ty(&self) -> Ty<'tcx> {
-        self.substs.self_ty().unwrap()
-    }
-
-    pub fn input_types(&self) -> &[Ty<'tcx>] {
-        // Select only the "input types" from a trait-reference. For
-        // now this is all the types that appear in the
-        // trait-reference, but it should eventually exclude
-        // associated types.
-        self.substs.types.as_slice()
-    }
-}
-
-/// When type checking, we use the `ParameterEnvironment` to track
-/// details about the type/lifetime parameters that are in scope.
-/// It primarily stores the bounds information.
-///
-/// Note: This information might seem to be redundant with the data in
-/// `tcx.ty_param_defs`, but it is not. That table contains the
-/// parameter definitions from an "outside" perspective, but this
-/// struct will contain the bounds for a parameter as seen from inside
-/// the function body. Currently the only real distinction is that
-/// bound lifetime parameters are replaced with free ones, but in the
-/// future I hope to refine the representation of types so as to make
-/// more distinctions clearer.
-#[derive(Clone)]
-pub struct ParameterEnvironment<'a, 'tcx:'a> {
-    pub tcx: &'a ctxt<'tcx>,
-
-    /// See `construct_free_substs` for details.
-    pub free_substs: Substs<'tcx>,
-
-    /// Each type parameter has an implicit region bound that
-    /// indicates it must outlive at least the function body (the user
-    /// may specify stronger requirements). This field indicates the
-    /// region of the callee.
-    pub implicit_region_bound: ty::Region,
-
-    /// Obligations that the caller must satisfy. This is basically
-    /// the set of bounds on the in-scope type parameters, translated
-    /// into Obligations, and elaborated and normalized.
-    pub caller_bounds: Vec<ty::Predicate<'tcx>>,
-
-    /// Caches the results of trait selection. This cache is used
-    /// for things that have to do with the parameters in scope.
-    pub selection_cache: traits::SelectionCache<'tcx>,
-
-    /// Caches the results of trait evaluation.
-    pub evaluation_cache: traits::EvaluationCache<'tcx>,
-
-    /// Scope that is attached to free regions for this scope. This
-    /// is usually the id of the fn body, but for more abstract scopes
-    /// like structs we often use the node-id of the struct.
-    ///
-    /// FIXME(#3696). It would be nice to refactor so that free
-    /// regions don't have this implicit scope and instead introduce
-    /// relationships in the environment.
-    pub free_id_outlive: CodeExtent,
-}
-
-impl<'a, 'tcx> ParameterEnvironment<'a, 'tcx> {
-    pub fn with_caller_bounds(&self,
-                              caller_bounds: Vec<ty::Predicate<'tcx>>)
-                              -> ParameterEnvironment<'a,'tcx>
-    {
-        ParameterEnvironment {
-            tcx: self.tcx,
-            free_substs: self.free_substs.clone(),
-            implicit_region_bound: self.implicit_region_bound,
-            caller_bounds: caller_bounds,
-            selection_cache: traits::SelectionCache::new(),
-            evaluation_cache: traits::EvaluationCache::new(),
-            free_id_outlive: self.free_id_outlive,
-        }
-    }
-
-    pub fn for_item(cx: &'a ctxt<'tcx>, id: NodeId) -> ParameterEnvironment<'a, 'tcx> {
-        match cx.map.find(id) {
-            Some(ast_map::NodeImplItem(ref impl_item)) => {
-                match impl_item.node {
-                    hir::ImplItemKind::Type(_) => {
-                        // associated types don't have their own entry (for some reason),
-                        // so for now just grab environment for the impl
-                        let impl_id = cx.map.get_parent(id);
-                        let impl_def_id = cx.map.local_def_id(impl_id);
-                        let scheme = cx.lookup_item_type(impl_def_id);
-                        let predicates = cx.lookup_predicates(impl_def_id);
-                        cx.construct_parameter_environment(impl_item.span,
-                                                           &scheme.generics,
-                                                           &predicates,
-                                                           cx.region_maps.item_extent(id))
-                    }
-                    hir::ImplItemKind::Const(_, _) => {
-                        let def_id = cx.map.local_def_id(id);
-                        let scheme = cx.lookup_item_type(def_id);
-                        let predicates = cx.lookup_predicates(def_id);
-                        cx.construct_parameter_environment(impl_item.span,
-                                                           &scheme.generics,
-                                                           &predicates,
-                                                           cx.region_maps.item_extent(id))
-                    }
-                    hir::ImplItemKind::Method(_, ref body) => {
-                        let method_def_id = cx.map.local_def_id(id);
-                        match cx.impl_or_trait_item(method_def_id) {
-                            MethodTraitItem(ref method_ty) => {
-                                let method_generics = &method_ty.generics;
-                                let method_bounds = &method_ty.predicates;
-                                cx.construct_parameter_environment(
-                                    impl_item.span,
-                                    method_generics,
-                                    method_bounds,
-                                    cx.region_maps.call_site_extent(id, body.id))
-                            }
-                            _ => {
-                                cx.sess
-                                  .bug("ParameterEnvironment::for_item(): \
-                                        got non-method item from impl method?!")
-                            }
-                        }
-                    }
-                }
-            }
-            Some(ast_map::NodeTraitItem(trait_item)) => {
-                match trait_item.node {
-                    hir::TypeTraitItem(..) => {
-                        // associated types don't have their own entry (for some reason),
-                        // so for now just grab environment for the trait
-                        let trait_id = cx.map.get_parent(id);
-                        let trait_def_id = cx.map.local_def_id(trait_id);
-                        let trait_def = cx.lookup_trait_def(trait_def_id);
-                        let predicates = cx.lookup_predicates(trait_def_id);
-                        cx.construct_parameter_environment(trait_item.span,
-                                                           &trait_def.generics,
-                                                           &predicates,
-                                                           cx.region_maps.item_extent(id))
-                    }
-                    hir::ConstTraitItem(..) => {
-                        let def_id = cx.map.local_def_id(id);
-                        let scheme = cx.lookup_item_type(def_id);
-                        let predicates = cx.lookup_predicates(def_id);
-                        cx.construct_parameter_environment(trait_item.span,
-                                                           &scheme.generics,
-                                                           &predicates,
-                                                           cx.region_maps.item_extent(id))
-                    }
-                    hir::MethodTraitItem(_, ref body) => {
-                        // Use call-site for extent (unless this is a
-                        // trait method with no default; then fallback
-                        // to the method id).
-                        let method_def_id = cx.map.local_def_id(id);
-                        match cx.impl_or_trait_item(method_def_id) {
-                            MethodTraitItem(ref method_ty) => {
-                                let method_generics = &method_ty.generics;
-                                let method_bounds = &method_ty.predicates;
-                                let extent = if let Some(ref body) = *body {
-                                    // default impl: use call_site extent as free_id_outlive bound.
-                                    cx.region_maps.call_site_extent(id, body.id)
-                                } else {
-                                    // no default impl: use item extent as free_id_outlive bound.
-                                    cx.region_maps.item_extent(id)
-                                };
-                                cx.construct_parameter_environment(
-                                    trait_item.span,
-                                    method_generics,
-                                    method_bounds,
-                                    extent)
-                            }
-                            _ => {
-                                cx.sess
-                                  .bug("ParameterEnvironment::for_item(): \
-                                        got non-method item from provided \
-                                        method?!")
-                            }
-                        }
-                    }
-                }
-            }
-            Some(ast_map::NodeItem(item)) => {
-                match item.node {
-                    hir::ItemFn(_, _, _, _, _, ref body) => {
-                        // We assume this is a function.
-                        let fn_def_id = cx.map.local_def_id(id);
-                        let fn_scheme = cx.lookup_item_type(fn_def_id);
-                        let fn_predicates = cx.lookup_predicates(fn_def_id);
-
-                        cx.construct_parameter_environment(item.span,
-                                                           &fn_scheme.generics,
-                                                           &fn_predicates,
-                                                           cx.region_maps.call_site_extent(id,
-                                                                                           body.id))
-                    }
-                    hir::ItemEnum(..) |
-                    hir::ItemStruct(..) |
-                    hir::ItemImpl(..) |
-                    hir::ItemConst(..) |
-                    hir::ItemStatic(..) => {
-                        let def_id = cx.map.local_def_id(id);
-                        let scheme = cx.lookup_item_type(def_id);
-                        let predicates = cx.lookup_predicates(def_id);
-                        cx.construct_parameter_environment(item.span,
-                                                           &scheme.generics,
-                                                           &predicates,
-                                                           cx.region_maps.item_extent(id))
-                    }
-                    hir::ItemTrait(..) => {
-                        let def_id = cx.map.local_def_id(id);
-                        let trait_def = cx.lookup_trait_def(def_id);
-                        let predicates = cx.lookup_predicates(def_id);
-                        cx.construct_parameter_environment(item.span,
-                                                           &trait_def.generics,
-                                                           &predicates,
-                                                           cx.region_maps.item_extent(id))
-                    }
-                    _ => {
-                        cx.sess.span_bug(item.span,
-                                         "ParameterEnvironment::from_item():
-                                          can't create a parameter \
-                                          environment for this kind of item")
-                    }
-                }
-            }
-            Some(ast_map::NodeExpr(..)) => {
-                // This is a convenience to allow closures to work.
-                ParameterEnvironment::for_item(cx, cx.map.get_parent(id))
-            }
-            _ => {
-                cx.sess.bug(&format!("ParameterEnvironment::from_item(): \
-                                     `{}` is not an item",
-                                    cx.map.node_to_string(id)))
-            }
-        }
-    }
-}
-
-/// A "type scheme", in ML terminology, is a type combined with some
-/// set of generic types that the type is, well, generic over. In Rust
-/// terms, it is the "type" of a fn item or struct -- this type will
-/// include various generic parameters that must be substituted when
-/// the item/struct is referenced. That is called converting the type
-/// scheme to a monotype.
-///
-/// - `generics`: the set of type parameters and their bounds
-/// - `ty`: the base types, which may reference the parameters defined
-///   in `generics`
-///
-/// Note that TypeSchemes are also sometimes called "polytypes" (and
-/// in fact this struct used to carry that name, so you may find some
-/// stray references in a comment or something). We try to reserve the
-/// "poly" prefix to refer to higher-ranked things, as in
-/// `PolyTraitRef`.
-///
-/// Note that each item also comes with predicates, see
-/// `lookup_predicates`.
-#[derive(Clone, Debug)]
-pub struct TypeScheme<'tcx> {
-    pub generics: Generics<'tcx>,
-    pub ty: Ty<'tcx>,
-}
-
-bitflags! {
-    flags AdtFlags: u32 {
-        const NO_ADT_FLAGS        = 0,
-        const IS_ENUM             = 1 << 0,
-        const IS_DTORCK           = 1 << 1, // is this a dtorck type?
-        const IS_DTORCK_VALID     = 1 << 2,
-        const IS_PHANTOM_DATA     = 1 << 3,
-        const IS_SIMD             = 1 << 4,
-        const IS_FUNDAMENTAL      = 1 << 5,
-        const IS_NO_DROP_FLAG     = 1 << 6,
-    }
-}
-
-pub type AdtDef<'tcx> = &'tcx AdtDefData<'tcx, 'static>;
-pub type VariantDef<'tcx> = &'tcx VariantDefData<'tcx, 'static>;
-pub type FieldDef<'tcx> = &'tcx FieldDefData<'tcx, 'static>;
-
-// See comment on AdtDefData for explanation
-pub type AdtDefMaster<'tcx> = &'tcx AdtDefData<'tcx, 'tcx>;
-pub type VariantDefMaster<'tcx> = &'tcx VariantDefData<'tcx, 'tcx>;
-pub type FieldDefMaster<'tcx> = &'tcx FieldDefData<'tcx, 'tcx>;
-
-pub struct VariantDefData<'tcx, 'container: 'tcx> {
-    /// The variant's DefId. If this is a tuple-like struct,
-    /// this is the DefId of the struct's ctor.
-    pub did: DefId,
-    pub name: Name, // struct's name if this is a struct
-    pub disr_val: Disr,
-    pub fields: Vec<FieldDefData<'tcx, 'container>>,
-    pub kind: VariantKind,
-}
-
-pub struct FieldDefData<'tcx, 'container: 'tcx> {
-    /// The field's DefId. NOTE: the fields of tuple-like enum variants
-    /// are not real items, and don't have entries in tcache etc.
-    pub did: DefId,
-    /// special_idents::unnamed_field.name
-    /// if this is a tuple-like field
-    pub name: Name,
-    pub vis: hir::Visibility,
-    /// TyIVar is used here to allow for variance (see the doc at
-    /// AdtDefData).
-    ///
-    /// Note: direct accesses to `ty` must also add dep edges.
-    ty: ivar::TyIVar<'tcx, 'container>
-}
-
-/// The definition of an abstract data type - a struct or enum.
-///
-/// These are all interned (by intern_adt_def) into the adt_defs
-/// table.
-///
-/// Because of the possibility of nested tcx-s, this type
-/// needs 2 lifetimes: the traditional variant lifetime ('tcx)
-/// bounding the lifetime of the inner types is of course necessary.
-/// However, it is not sufficient - types from a child tcx must
-/// not be leaked into the master tcx by being stored in an AdtDefData.
-///
-/// The 'container lifetime ensures that by outliving the container
-/// tcx and preventing shorter-lived types from being inserted. When
-/// write access is not needed, the 'container lifetime can be
-/// erased to 'static, which can be done by the AdtDef wrapper.
-pub struct AdtDefData<'tcx, 'container: 'tcx> {
-    pub did: DefId,
-    pub variants: Vec<VariantDefData<'tcx, 'container>>,
-    destructor: Cell<Option<DefId>>,
-    flags: Cell<AdtFlags>,
-}
-
-impl<'tcx, 'container> PartialEq for AdtDefData<'tcx, 'container> {
-    // AdtDefData are always interned and this is part of TyS equality
-    #[inline]
-    fn eq(&self, other: &Self) -> bool { self as *const _ == other as *const _ }
-}
-
-impl<'tcx, 'container> Eq for AdtDefData<'tcx, 'container> {}
-
-impl<'tcx, 'container> Hash for AdtDefData<'tcx, 'container> {
-    #[inline]
-    fn hash<H: Hasher>(&self, s: &mut H) {
-        (self as *const AdtDefData).hash(s)
-    }
-}
-
-impl<'tcx> Encodable for AdtDef<'tcx> {
-    fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
-        self.did.encode(s)
-    }
-}
-
-impl<'tcx> Decodable for AdtDef<'tcx> {
-    fn decode<D: Decoder>(d: &mut D) -> Result<AdtDef<'tcx>, D::Error> {
-        let def_id: DefId = try!{ Decodable::decode(d) };
-
-        cstore::tls::with_decoding_context(d, |dcx, _| {
-            let def_id = dcx.translate_def_id(def_id);
-            Ok(dcx.tcx().lookup_adt_def(def_id))
-        })
-    }
-}
-
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum AdtKind { Struct, Enum }
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
-pub enum VariantKind { Struct, Tuple, Unit }
-
-impl VariantKind {
-    pub fn from_variant_data(vdata: &hir::VariantData) -> Self {
-        match *vdata {
-            hir::VariantData::Struct(..) => VariantKind::Struct,
-            hir::VariantData::Tuple(..) => VariantKind::Tuple,
-            hir::VariantData::Unit(..) => VariantKind::Unit,
-        }
-    }
-}
-
-impl<'tcx, 'container> AdtDefData<'tcx, 'container> {
-    fn new(tcx: &ctxt<'tcx>,
-           did: DefId,
-           kind: AdtKind,
-           variants: Vec<VariantDefData<'tcx, 'container>>) -> Self {
-        let mut flags = AdtFlags::NO_ADT_FLAGS;
-        let attrs = tcx.get_attrs(did);
-        if attr::contains_name(&attrs, "fundamental") {
-            flags = flags | AdtFlags::IS_FUNDAMENTAL;
-        }
-        if attr::contains_name(&attrs, "unsafe_no_drop_flag") {
-            flags = flags | AdtFlags::IS_NO_DROP_FLAG;
-        }
-        if tcx.lookup_simd(did) {
-            flags = flags | AdtFlags::IS_SIMD;
-        }
-        if Some(did) == tcx.lang_items.phantom_data() {
-            flags = flags | AdtFlags::IS_PHANTOM_DATA;
-        }
-        if let AdtKind::Enum = kind {
-            flags = flags | AdtFlags::IS_ENUM;
-        }
-        AdtDefData {
-            did: did,
-            variants: variants,
-            flags: Cell::new(flags),
-            destructor: Cell::new(None)
-        }
-    }
-
-    fn calculate_dtorck(&'tcx self, tcx: &ctxt<'tcx>) {
-        if tcx.is_adt_dtorck(self) {
-            self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK);
-        }
-        self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK_VALID)
-    }
-
-    /// Returns the kind of the ADT - Struct or Enum.
-    #[inline]
-    pub fn adt_kind(&self) -> AdtKind {
-        if self.flags.get().intersects(AdtFlags::IS_ENUM) {
-            AdtKind::Enum
-        } else {
-            AdtKind::Struct
-        }
-    }
-
-    /// Returns whether this is a dtorck type. If this returns
-    /// true, this type being safe for destruction requires it to be
-    /// alive; Otherwise, only the contents are required to be.
-    #[inline]
-    pub fn is_dtorck(&'tcx self, tcx: &ctxt<'tcx>) -> bool {
-        if !self.flags.get().intersects(AdtFlags::IS_DTORCK_VALID) {
-            self.calculate_dtorck(tcx)
-        }
-        self.flags.get().intersects(AdtFlags::IS_DTORCK)
-    }
-
-    /// Returns whether this type is #[fundamental] for the purposes
-    /// of coherence checking.
-    #[inline]
-    pub fn is_fundamental(&self) -> bool {
-        self.flags.get().intersects(AdtFlags::IS_FUNDAMENTAL)
-    }
-
-    #[inline]
-    pub fn is_simd(&self) -> bool {
-        self.flags.get().intersects(AdtFlags::IS_SIMD)
-    }
-
-    /// Returns true if this is PhantomData<T>.
-    #[inline]
-    pub fn is_phantom_data(&self) -> bool {
-        self.flags.get().intersects(AdtFlags::IS_PHANTOM_DATA)
-    }
-
-    /// Returns whether this type has a destructor.
-    pub fn has_dtor(&self) -> bool {
-        match self.dtor_kind() {
-            NoDtor => false,
-            TraitDtor(..) => true
-        }
-    }
-
-    /// Asserts this is a struct and returns the struct's unique
-    /// variant.
-    pub fn struct_variant(&self) -> &VariantDefData<'tcx, 'container> {
-        assert!(self.adt_kind() == AdtKind::Struct);
-        &self.variants[0]
-    }
-
-    #[inline]
-    pub fn type_scheme(&self, tcx: &ctxt<'tcx>) -> TypeScheme<'tcx> {
-        tcx.lookup_item_type(self.did)
-    }
-
-    #[inline]
-    pub fn predicates(&self, tcx: &ctxt<'tcx>) -> GenericPredicates<'tcx> {
-        tcx.lookup_predicates(self.did)
-    }
-
-    /// Returns an iterator over all fields contained
-    /// by this ADT.
-    #[inline]
-    pub fn all_fields(&self) ->
-            iter::FlatMap<
-                slice::Iter<VariantDefData<'tcx, 'container>>,
-                slice::Iter<FieldDefData<'tcx, 'container>>,
-                for<'s> fn(&'s VariantDefData<'tcx, 'container>)
-                    -> slice::Iter<'s, FieldDefData<'tcx, 'container>>
-            > {
-        self.variants.iter().flat_map(VariantDefData::fields_iter)
-    }
-
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.variants.is_empty()
-    }
-
-    #[inline]
-    pub fn is_univariant(&self) -> bool {
-        self.variants.len() == 1
-    }
-
-    pub fn is_payloadfree(&self) -> bool {
-        !self.variants.is_empty() &&
-            self.variants.iter().all(|v| v.fields.is_empty())
-    }
-
-    pub fn variant_with_id(&self, vid: DefId) -> &VariantDefData<'tcx, 'container> {
-        self.variants
-            .iter()
-            .find(|v| v.did == vid)
-            .expect("variant_with_id: unknown variant")
-    }
-
-    pub fn variant_index_with_id(&self, vid: DefId) -> usize {
-        self.variants
-            .iter()
-            .position(|v| v.did == vid)
-            .expect("variant_index_with_id: unknown variant")
-    }
-
-    pub fn variant_of_def(&self, def: Def) -> &VariantDefData<'tcx, 'container> {
-        match def {
-            Def::Variant(_, vid) => self.variant_with_id(vid),
-            Def::Struct(..) | Def::TyAlias(..) => self.struct_variant(),
-            _ => panic!("unexpected def {:?} in variant_of_def", def)
-        }
-    }
-
-    pub fn destructor(&self) -> Option<DefId> {
-        self.destructor.get()
-    }
-
-    pub fn set_destructor(&self, dtor: DefId) {
-        self.destructor.set(Some(dtor));
-    }
-
-    pub fn dtor_kind(&self) -> DtorKind {
-        match self.destructor.get() {
-            Some(_) => {
-                TraitDtor(!self.flags.get().intersects(AdtFlags::IS_NO_DROP_FLAG))
-            }
-            None => NoDtor,
-        }
-    }
-}
-
-impl<'tcx, 'container> VariantDefData<'tcx, 'container> {
-    #[inline]
-    fn fields_iter(&self) -> slice::Iter<FieldDefData<'tcx, 'container>> {
-        self.fields.iter()
-    }
-
-    pub fn kind(&self) -> VariantKind {
-        self.kind
-    }
-
-    pub fn is_tuple_struct(&self) -> bool {
-        self.kind() == VariantKind::Tuple
-    }
-
-    #[inline]
-    pub fn find_field_named(&self,
-                            name: ast::Name)
-                            -> Option<&FieldDefData<'tcx, 'container>> {
-        self.fields.iter().find(|f| f.name == name)
-    }
-
-    #[inline]
-    pub fn index_of_field_named(&self,
-                                name: ast::Name)
-                                -> Option<usize> {
-        self.fields.iter().position(|f| f.name == name)
-    }
-
-    #[inline]
-    pub fn field_named(&self, name: ast::Name) -> &FieldDefData<'tcx, 'container> {
-        self.find_field_named(name).unwrap()
-    }
-}
-
-impl<'tcx, 'container> FieldDefData<'tcx, 'container> {
-    pub fn new(did: DefId,
-               name: Name,
-               vis: hir::Visibility) -> Self {
-        FieldDefData {
-            did: did,
-            name: name,
-            vis: vis,
-            ty: ivar::TyIVar::new()
-        }
-    }
-
-    pub fn ty(&self, tcx: &ctxt<'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> {
-        self.unsubst_ty().subst(tcx, subst)
-    }
-
-    pub fn unsubst_ty(&self) -> Ty<'tcx> {
-        self.ty.unwrap(DepNode::FieldTy(self.did))
-    }
-
-    pub fn fulfill_ty(&self, ty: Ty<'container>) {
-        self.ty.fulfill(DepNode::FieldTy(self.did), ty);
-    }
-}
-
-/// Records the substitutions used to translate the polytype for an
-/// item into the monotype of an item reference.
-#[derive(Clone)]
-pub struct ItemSubsts<'tcx> {
-    pub substs: Substs<'tcx>,
-}
-
-#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Debug, RustcEncodable, RustcDecodable)]
-pub enum ClosureKind {
-    // Warning: Ordering is significant here! The ordering is chosen
-    // because the trait Fn is a subtrait of FnMut and so in turn, and
-    // hence we order it so that Fn < FnMut < FnOnce.
-    FnClosureKind,
-    FnMutClosureKind,
-    FnOnceClosureKind,
-}
-
-impl ClosureKind {
-    pub fn trait_did(&self, cx: &ctxt) -> DefId {
-        let result = match *self {
-            FnClosureKind => cx.lang_items.require(FnTraitLangItem),
-            FnMutClosureKind => {
-                cx.lang_items.require(FnMutTraitLangItem)
-            }
-            FnOnceClosureKind => {
-                cx.lang_items.require(FnOnceTraitLangItem)
-            }
-        };
-        match result {
-            Ok(trait_did) => trait_did,
-            Err(err) => cx.sess.fatal(&err[..]),
-        }
-    }
-
-    /// True if this a type that impls this closure kind
-    /// must also implement `other`.
-    pub fn extends(self, other: ty::ClosureKind) -> bool {
-        match (self, other) {
-            (FnClosureKind, FnClosureKind) => true,
-            (FnClosureKind, FnMutClosureKind) => true,
-            (FnClosureKind, FnOnceClosureKind) => true,
-            (FnMutClosureKind, FnMutClosureKind) => true,
-            (FnMutClosureKind, FnOnceClosureKind) => true,
-            (FnOnceClosureKind, FnOnceClosureKind) => true,
-            _ => false,
-        }
-    }
-}
-
-impl<'tcx> TyS<'tcx> {
-    /// Iterator that walks `self` and any types reachable from
-    /// `self`, in depth-first order. Note that just walks the types
-    /// that appear in `self`, it does not descend into the fields of
-    /// structs or variants. For example:
-    ///
-    /// ```notrust
-    /// isize => { isize }
-    /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
-    /// [isize] => { [isize], isize }
-    /// ```
-    pub fn walk(&'tcx self) -> TypeWalker<'tcx> {
-        TypeWalker::new(self)
-    }
-
-    /// Iterator that walks the immediate children of `self`.  Hence
-    /// `Foo<Bar<i32>, u32>` yields the sequence `[Bar<i32>, u32]`
-    /// (but not `i32`, like `walk`).
-    pub fn walk_shallow(&'tcx self) -> IntoIter<Ty<'tcx>> {
-        walk::walk_shallow(self)
-    }
-
-    /// Walks `ty` and any types appearing within `ty`, invoking the
-    /// callback `f` on each type. If the callback returns false, then the
-    /// children of the current type are ignored.
-    ///
-    /// Note: prefer `ty.walk()` where possible.
-    pub fn maybe_walk<F>(&'tcx self, mut f: F)
-        where F : FnMut(Ty<'tcx>) -> bool
-    {
-        let mut walker = self.walk();
-        while let Some(ty) = walker.next() {
-            if !f(ty) {
-                walker.skip_current_subtree();
-            }
-        }
-    }
-}
-
-impl<'tcx> ItemSubsts<'tcx> {
-    pub fn empty() -> ItemSubsts<'tcx> {
-        ItemSubsts { substs: Substs::empty() }
-    }
-
-    pub fn is_noop(&self) -> bool {
-        self.substs.is_noop()
-    }
-}
-
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum LvaluePreference {
-    PreferMutLvalue,
-    NoPreference
-}
-
-impl LvaluePreference {
-    pub fn from_mutbl(m: hir::Mutability) -> Self {
-        match m {
-            hir::MutMutable => PreferMutLvalue,
-            hir::MutImmutable => NoPreference,
-        }
-    }
-}
-
-/// Helper for looking things up in the various maps that are populated during
-/// typeck::collect (e.g., `cx.impl_or_trait_items`, `cx.tcache`, etc).  All of
-/// these share the pattern that if the id is local, it should have been loaded
-/// into the map by the `typeck::collect` phase.  If the def-id is external,
-/// then we have to go consult the crate loading code (and cache the result for
-/// the future).
-fn lookup_locally_or_in_crate_store<M, F>(descr: &str,
-                                          def_id: DefId,
-                                          map: &M,
-                                          load_external: F)
-                                          -> M::Value where
-    M: MemoizationMap<Key=DefId>,
-    F: FnOnce() -> M::Value,
-{
-    map.memoize(def_id, || {
-        if def_id.is_local() {
-            panic!("No def'n found for {:?} in tcx.{}", def_id, descr);
-        }
-        load_external()
-    })
-}
-
-impl BorrowKind {
-    pub fn from_mutbl(m: hir::Mutability) -> BorrowKind {
-        match m {
-            hir::MutMutable => MutBorrow,
-            hir::MutImmutable => ImmBorrow,
-        }
-    }
-
-    /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
-    /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
-    /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
-    /// question.
-    pub fn to_mutbl_lossy(self) -> hir::Mutability {
-        match self {
-            MutBorrow => hir::MutMutable,
-            ImmBorrow => hir::MutImmutable,
-
-            // We have no type corresponding to a unique imm borrow, so
-            // use `&mut`. It gives all the capabilities of an `&uniq`
-            // and hence is a safe "over approximation".
-            UniqueImmBorrow => hir::MutMutable,
-        }
-    }
-
-    pub fn to_user_str(&self) -> &'static str {
-        match *self {
-            MutBorrow => "mutable",
-            ImmBorrow => "immutable",
-            UniqueImmBorrow => "uniquely immutable",
-        }
-    }
-}
-
-impl<'tcx> ctxt<'tcx> {
-    pub fn node_id_to_type(&self, id: NodeId) -> Ty<'tcx> {
-        match self.node_id_to_type_opt(id) {
-           Some(ty) => ty,
-           None => self.sess.bug(
-               &format!("node_id_to_type: no type for node `{}`",
-                        self.map.node_to_string(id)))
-        }
-    }
-
-    pub fn node_id_to_type_opt(&self, id: NodeId) -> Option<Ty<'tcx>> {
-        self.tables.borrow().node_types.get(&id).cloned()
-    }
-
-    pub fn node_id_item_substs(&self, id: NodeId) -> ItemSubsts<'tcx> {
-        match self.tables.borrow().item_substs.get(&id) {
-            None => ItemSubsts::empty(),
-            Some(ts) => ts.clone(),
-        }
-    }
-
-    // Returns the type of a pattern as a monotype. Like @expr_ty, this function
-    // doesn't provide type parameter substitutions.
-    pub fn pat_ty(&self, pat: &hir::Pat) -> Ty<'tcx> {
-        self.node_id_to_type(pat.id)
-    }
-    pub fn pat_ty_opt(&self, pat: &hir::Pat) -> Option<Ty<'tcx>> {
-        self.node_id_to_type_opt(pat.id)
-    }
-
-    // Returns the type of an expression as a monotype.
-    //
-    // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression.  That is, in
-    // some cases, we insert `AutoAdjustment` annotations such as auto-deref or
-    // auto-ref.  The type returned by this function does not consider such
-    // adjustments.  See `expr_ty_adjusted()` instead.
-    //
-    // NB (2): This type doesn't provide type parameter substitutions; e.g. if you
-    // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
-    // instead of "fn(ty) -> T with T = isize".
-    pub fn expr_ty(&self, expr: &hir::Expr) -> Ty<'tcx> {
-        self.node_id_to_type(expr.id)
-    }
-
-    pub fn expr_ty_opt(&self, expr: &hir::Expr) -> Option<Ty<'tcx>> {
-        self.node_id_to_type_opt(expr.id)
-    }
-
-    /// Returns the type of `expr`, considering any `AutoAdjustment`
-    /// entry recorded for that expression.
-    ///
-    /// It would almost certainly be better to store the adjusted ty in with
-    /// the `AutoAdjustment`, but I opted not to do this because it would
-    /// require serializing and deserializing the type and, although that's not
-    /// hard to do, I just hate that code so much I didn't want to touch it
-    /// unless it was to fix it properly, which seemed a distraction from the
-    /// thread at hand! -nmatsakis
-    pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> Ty<'tcx> {
-        self.expr_ty(expr)
-            .adjust(self, expr.span, expr.id,
-                    self.tables.borrow().adjustments.get(&expr.id),
-                    |method_call| {
-            self.tables.borrow().method_map.get(&method_call).map(|method| method.ty)
-        })
-    }
-
-    pub fn expr_ty_adjusted_opt(&self, expr: &hir::Expr) -> Option<Ty<'tcx>> {
-        self.expr_ty_opt(expr).map(|t| t.adjust(self,
-                                                expr.span,
-                                                expr.id,
-                                                self.tables.borrow().adjustments.get(&expr.id),
-                                                |method_call| {
-            self.tables.borrow().method_map.get(&method_call).map(|method| method.ty)
-        }))
-    }
-
-    pub fn expr_span(&self, id: NodeId) -> Span {
-        match self.map.find(id) {
-            Some(ast_map::NodeExpr(e)) => {
-                e.span
-            }
-            Some(f) => {
-                self.sess.bug(&format!("Node id {} is not an expr: {:?}",
-                                       id, f));
-            }
-            None => {
-                self.sess.bug(&format!("Node id {} is not present \
-                                        in the node map", id));
-            }
-        }
-    }
-
-    pub fn local_var_name_str(&self, id: NodeId) -> InternedString {
-        match self.map.find(id) {
-            Some(ast_map::NodeLocal(pat)) => {
-                match pat.node {
-                    PatKind::Ident(_, ref path1, _) => path1.node.name.as_str(),
-                    _ => {
-                        self.sess.bug(&format!("Variable id {} maps to {:?}, not local", id, pat));
-                    },
-                }
-            },
-            r => self.sess.bug(&format!("Variable id {} maps to {:?}, not local", id, r)),
-        }
-    }
-
-    pub fn resolve_expr(&self, expr: &hir::Expr) -> Def {
-        match self.def_map.borrow().get(&expr.id) {
-            Some(def) => def.full_def(),
-            None => {
-                self.sess.span_bug(expr.span, &format!(
-                    "no def-map entry for expr {}", expr.id));
-            }
-        }
-    }
-
-    pub fn expr_is_lval(&self, expr: &hir::Expr) -> bool {
-         match expr.node {
-            hir::ExprPath(..) => {
-                // We can't use resolve_expr here, as this needs to run on broken
-                // programs. We don't need to through - associated items are all
-                // rvalues.
-                match self.def_map.borrow().get(&expr.id) {
-                    Some(&def::PathResolution {
-                        base_def: Def::Static(..), ..
-                    }) | Some(&def::PathResolution {
-                        base_def: Def::Upvar(..), ..
-                    }) | Some(&def::PathResolution {
-                        base_def: Def::Local(..), ..
-                    }) => {
-                        true
-                    }
-                    Some(&def::PathResolution { base_def: Def::Err, .. })=> true,
-                    Some(..) => false,
-                    None => self.sess.span_bug(expr.span, &format!(
-                        "no def for path {}", expr.id))
-                }
-            }
-
-            hir::ExprType(ref e, _) => {
-                self.expr_is_lval(e)
-            }
-
-            hir::ExprUnary(hir::UnDeref, _) |
-            hir::ExprField(..) |
-            hir::ExprTupField(..) |
-            hir::ExprIndex(..) => {
-                true
-            }
-
-            hir::ExprCall(..) |
-            hir::ExprMethodCall(..) |
-            hir::ExprStruct(..) |
-            hir::ExprRange(..) |
-            hir::ExprTup(..) |
-            hir::ExprIf(..) |
-            hir::ExprMatch(..) |
-            hir::ExprClosure(..) |
-            hir::ExprBlock(..) |
-            hir::ExprRepeat(..) |
-            hir::ExprVec(..) |
-            hir::ExprBreak(..) |
-            hir::ExprAgain(..) |
-            hir::ExprRet(..) |
-            hir::ExprWhile(..) |
-            hir::ExprLoop(..) |
-            hir::ExprAssign(..) |
-            hir::ExprInlineAsm(..) |
-            hir::ExprAssignOp(..) |
-            hir::ExprLit(_) |
-            hir::ExprUnary(..) |
-            hir::ExprBox(..) |
-            hir::ExprAddrOf(..) |
-            hir::ExprBinary(..) |
-            hir::ExprCast(..) => {
-                false
-            }
-        }
-    }
-
-    pub fn provided_trait_methods(&self, id: DefId) -> Vec<Rc<Method<'tcx>>> {
-        if let Some(id) = self.map.as_local_node_id(id) {
-            if let ItemTrait(_, _, _, ref ms) = self.map.expect_item(id).node {
-                ms.iter().filter_map(|ti| {
-                    if let hir::MethodTraitItem(_, Some(_)) = ti.node {
-                        match self.impl_or_trait_item(self.map.local_def_id(ti.id)) {
-                            MethodTraitItem(m) => Some(m),
-                            _ => {
-                                self.sess.bug("provided_trait_methods(): \
-                                               non-method item found from \
-                                               looking up provided method?!")
-                            }
-                        }
-                    } else {
-                        None
-                    }
-                }).collect()
-            } else {
-                self.sess.bug(&format!("provided_trait_methods: `{:?}` is not a trait", id))
-            }
-        } else {
-            self.sess.cstore.provided_trait_methods(self, id)
-        }
-    }
-
-    pub fn associated_consts(&self, id: DefId) -> Vec<Rc<AssociatedConst<'tcx>>> {
-        if let Some(id) = self.map.as_local_node_id(id) {
-            match self.map.expect_item(id).node {
-                ItemTrait(_, _, _, ref tis) => {
-                    tis.iter().filter_map(|ti| {
-                        if let hir::ConstTraitItem(_, _) = ti.node {
-                            match self.impl_or_trait_item(self.map.local_def_id(ti.id)) {
-                                ConstTraitItem(ac) => Some(ac),
-                                _ => {
-                                    self.sess.bug("associated_consts(): \
-                                                   non-const item found from \
-                                                   looking up a constant?!")
-                                }
-                            }
-                        } else {
-                            None
-                        }
-                    }).collect()
-                }
-                ItemImpl(_, _, _, _, _, ref iis) => {
-                    iis.iter().filter_map(|ii| {
-                        if let hir::ImplItemKind::Const(_, _) = ii.node {
-                            match self.impl_or_trait_item(self.map.local_def_id(ii.id)) {
-                                ConstTraitItem(ac) => Some(ac),
-                                _ => {
-                                    self.sess.bug("associated_consts(): \
-                                                   non-const item found from \
-                                                   looking up a constant?!")
-                                }
-                            }
-                        } else {
-                            None
-                        }
-                    }).collect()
-                }
-                _ => {
-                    self.sess.bug(&format!("associated_consts: `{:?}` is not a trait \
-                                            or impl", id))
-                }
-            }
-        } else {
-            self.sess.cstore.associated_consts(self, id)
-        }
-    }
-
-    pub fn trait_impl_polarity(&self, id: DefId) -> Option<hir::ImplPolarity> {
-        if let Some(id) = self.map.as_local_node_id(id) {
-            match self.map.find(id) {
-                Some(ast_map::NodeItem(item)) => {
-                    match item.node {
-                        hir::ItemImpl(_, polarity, _, _, _, _) => Some(polarity),
-                        _ => None
-                    }
-                }
-                _ => None
-            }
-        } else {
-            self.sess.cstore.impl_polarity(id)
-        }
-    }
-
-    pub fn custom_coerce_unsized_kind(&self, did: DefId) -> adjustment::CustomCoerceUnsized {
-        self.custom_coerce_unsized_kinds.memoize(did, || {
-            let (kind, src) = if did.krate != LOCAL_CRATE {
-                (self.sess.cstore.custom_coerce_unsized_kind(did), "external")
-            } else {
-                (None, "local")
-            };
-
-            match kind {
-                Some(kind) => kind,
-                None => {
-                    self.sess.bug(&format!("custom_coerce_unsized_kind: \
-                                            {} impl `{}` is missing its kind",
-                                           src, self.item_path_str(did)));
-                }
-            }
-        })
-    }
-
-    pub fn impl_or_trait_item(&self, id: DefId) -> ImplOrTraitItem<'tcx> {
-        lookup_locally_or_in_crate_store(
-            "impl_or_trait_items", id, &self.impl_or_trait_items,
-            || self.sess.cstore.impl_or_trait_item(self, id))
-    }
-
-    pub fn trait_item_def_ids(&self, id: DefId) -> Rc<Vec<ImplOrTraitItemId>> {
-        lookup_locally_or_in_crate_store(
-            "trait_item_def_ids", id, &self.trait_item_def_ids,
-            || Rc::new(self.sess.cstore.trait_item_def_ids(id)))
-    }
-
-    /// Returns the trait-ref corresponding to a given impl, or None if it is
-    /// an inherent impl.
-    pub fn impl_trait_ref(&self, id: DefId) -> Option<TraitRef<'tcx>> {
-        lookup_locally_or_in_crate_store(
-            "impl_trait_refs", id, &self.impl_trait_refs,
-            || self.sess.cstore.impl_trait_ref(self, id))
-    }
-
-    /// Returns whether this DefId refers to an impl
-    pub fn is_impl(&self, id: DefId) -> bool {
-        if let Some(id) = self.map.as_local_node_id(id) {
-            if let Some(ast_map::NodeItem(
-                &hir::Item { node: hir::ItemImpl(..), .. })) = self.map.find(id) {
-                true
-            } else {
-                false
-            }
-        } else {
-            self.sess.cstore.is_impl(id)
-        }
-    }
-
-    pub fn trait_ref_to_def_id(&self, tr: &hir::TraitRef) -> DefId {
-        self.def_map.borrow().get(&tr.ref_id).expect("no def-map entry for trait").def_id()
-    }
-
-    pub fn item_path_str(&self, id: DefId) -> String {
-        self.with_path(id, |path| ast_map::path_to_string(path))
-    }
-
-    pub fn def_path(&self, id: DefId) -> ast_map::DefPath {
-        if id.is_local() {
-            self.map.def_path(id)
-        } else {
-            self.sess.cstore.def_path(id)
-        }
-    }
-
-    pub fn with_path<T, F>(&self, id: DefId, f: F) -> T where
-        F: FnOnce(ast_map::PathElems) -> T,
-    {
-        if let Some(id) = self.map.as_local_node_id(id) {
-            self.map.with_path(id, f)
-        } else {
-            f(self.sess.cstore.item_path(id).iter().cloned().chain(LinkedPath::empty()))
-        }
-    }
-
-    pub fn item_name(&self, id: DefId) -> ast::Name {
-        if let Some(id) = self.map.as_local_node_id(id) {
-            self.map.get_path_elem(id).name()
-        } else {
-            self.sess.cstore.item_name(id)
-        }
-    }
-
-    // Register a given item type
-    pub fn register_item_type(&self, did: DefId, ty: TypeScheme<'tcx>) {
-        self.tcache.borrow_mut().insert(did, ty);
-    }
-
-    // If the given item is in an external crate, looks up its type and adds it to
-    // the type cache. Returns the type parameters and type.
-    pub fn lookup_item_type(&self, did: DefId) -> TypeScheme<'tcx> {
-        lookup_locally_or_in_crate_store(
-            "tcache", did, &self.tcache,
-            || self.sess.cstore.item_type(self, did))
-    }
-
-    /// Given the did of a trait, returns its canonical trait ref.
-    pub fn lookup_trait_def(&self, did: DefId) -> &'tcx TraitDef<'tcx> {
-        lookup_locally_or_in_crate_store(
-            "trait_defs", did, &self.trait_defs,
-            || self.alloc_trait_def(self.sess.cstore.trait_def(self, did))
-        )
-    }
-
-    /// Given the did of an ADT, return a master reference to its
-    /// definition. Unless you are planning on fulfilling the ADT's fields,
-    /// use lookup_adt_def instead.
-    pub fn lookup_adt_def_master(&self, did: DefId) -> AdtDefMaster<'tcx> {
-        lookup_locally_or_in_crate_store(
-            "adt_defs", did, &self.adt_defs,
-            || self.sess.cstore.adt_def(self, did)
-        )
-    }
-
-    /// Given the did of an ADT, return a reference to its definition.
-    pub fn lookup_adt_def(&self, did: DefId) -> AdtDef<'tcx> {
-        // when reverse-variance goes away, a transmute::<AdtDefMaster,AdtDef>
-        // would be needed here.
-        self.lookup_adt_def_master(did)
-    }
-
-    /// Given the did of an item, returns its full set of predicates.
-    pub fn lookup_predicates(&self, did: DefId) -> GenericPredicates<'tcx> {
-        lookup_locally_or_in_crate_store(
-            "predicates", did, &self.predicates,
-            || self.sess.cstore.item_predicates(self, did))
-    }
-
-    /// Given the did of a trait, returns its superpredicates.
-    pub fn lookup_super_predicates(&self, did: DefId) -> GenericPredicates<'tcx> {
-        lookup_locally_or_in_crate_store(
-            "super_predicates", did, &self.super_predicates,
-            || self.sess.cstore.item_super_predicates(self, did))
-    }
-
-    /// If `type_needs_drop` returns true, then `ty` is definitely
-    /// non-copy and *might* have a destructor attached; if it returns
-    /// false, then `ty` definitely has no destructor (i.e. no drop glue).
-    ///
-    /// (Note that this implies that if `ty` has a destructor attached,
-    /// then `type_needs_drop` will definitely return `true` for `ty`.)
-    pub fn type_needs_drop_given_env<'a>(&self,
-                                         ty: Ty<'tcx>,
-                                         param_env: &ty::ParameterEnvironment<'a,'tcx>) -> bool {
-        // Issue #22536: We first query type_moves_by_default.  It sees a
-        // normalized version of the type, and therefore will definitely
-        // know whether the type implements Copy (and thus needs no
-        // cleanup/drop/zeroing) ...
-        let implements_copy = !ty.moves_by_default(param_env, DUMMY_SP);
-
-        if implements_copy { return false; }
-
-        // ... (issue #22536 continued) but as an optimization, still use
-        // prior logic of asking if the `needs_drop` bit is set; we need
-        // not zero non-Copy types if they have no destructor.
-
-        // FIXME(#22815): Note that calling `ty::type_contents` is a
-        // conservative heuristic; it may report that `needs_drop` is set
-        // when actual type does not actually have a destructor associated
-        // with it. But since `ty` absolutely did not have the `Copy`
-        // bound attached (see above), it is sound to treat it as having a
-        // destructor (e.g. zero its memory on move).
-
-        let contents = ty.type_contents(self);
-        debug!("type_needs_drop ty={:?} contents={:?}", ty, contents);
-        contents.needs_drop(self)
-    }
-
-    /// Get the attributes of a definition.
-    pub fn get_attrs(&self, did: DefId) -> Cow<'tcx, [ast::Attribute]> {
-        if let Some(id) = self.map.as_local_node_id(did) {
-            Cow::Borrowed(self.map.attrs(id))
-        } else {
-            Cow::Owned(self.sess.cstore.item_attrs(did))
-        }
-    }
-
-    /// Determine whether an item is annotated with an attribute
-    pub fn has_attr(&self, did: DefId, attr: &str) -> bool {
-        self.get_attrs(did).iter().any(|item| item.check_name(attr))
-    }
-
-    /// Determine whether an item is annotated with `#[repr(packed)]`
-    pub fn lookup_packed(&self, did: DefId) -> bool {
-        self.lookup_repr_hints(did).contains(&attr::ReprPacked)
-    }
-
-    /// Determine whether an item is annotated with `#[simd]`
-    pub fn lookup_simd(&self, did: DefId) -> bool {
-        self.has_attr(did, "simd")
-            || self.lookup_repr_hints(did).contains(&attr::ReprSimd)
-    }
-
-    pub fn item_variances(&self, item_id: DefId) -> Rc<ItemVariances> {
-        lookup_locally_or_in_crate_store(
-            "item_variance_map", item_id, &self.item_variance_map,
-            || Rc::new(self.sess.cstore.item_variances(item_id)))
-    }
-
-    pub fn trait_has_default_impl(&self, trait_def_id: DefId) -> bool {
-        self.populate_implementations_for_trait_if_necessary(trait_def_id);
-
-        let def = self.lookup_trait_def(trait_def_id);
-        def.flags.get().intersects(TraitFlags::HAS_DEFAULT_IMPL)
-    }
-
-    /// Records a trait-to-implementation mapping.
-    pub fn record_trait_has_default_impl(&self, trait_def_id: DefId) {
-        let def = self.lookup_trait_def(trait_def_id);
-        def.flags.set(def.flags.get() | TraitFlags::HAS_DEFAULT_IMPL)
-    }
-
-    /// Load primitive inherent implementations if necessary
-    pub fn populate_implementations_for_primitive_if_necessary(&self,
-                                                               primitive_def_id: DefId) {
-        if primitive_def_id.is_local() {
-            return
-        }
-
-        // The primitive is not local, hence we are reading this out
-        // of metadata.
-        let _ignore = self.dep_graph.in_ignore();
-
-        if self.populated_external_primitive_impls.borrow().contains(&primitive_def_id) {
-            return
-        }
-
-        debug!("populate_implementations_for_primitive_if_necessary: searching for {:?}",
-               primitive_def_id);
-
-        let impl_items = self.sess.cstore.impl_items(primitive_def_id);
-
-        // Store the implementation info.
-        self.impl_items.borrow_mut().insert(primitive_def_id, impl_items);
-        self.populated_external_primitive_impls.borrow_mut().insert(primitive_def_id);
-    }
-
-    /// Populates the type context with all the inherent implementations for
-    /// the given type if necessary.
-    pub fn populate_inherent_implementations_for_type_if_necessary(&self,
-                                                                   type_id: DefId) {
-        if type_id.is_local() {
-            return
-        }
-
-        // The type is not local, hence we are reading this out of
-        // metadata and don't need to track edges.
-        let _ignore = self.dep_graph.in_ignore();
-
-        if self.populated_external_types.borrow().contains(&type_id) {
-            return
-        }
-
-        debug!("populate_inherent_implementations_for_type_if_necessary: searching for {:?}",
-               type_id);
-
-        let inherent_impls = self.sess.cstore.inherent_implementations_for_type(type_id);
-        for &impl_def_id in &inherent_impls {
-            // Store the implementation info.
-            let impl_items = self.sess.cstore.impl_items(impl_def_id);
-            self.impl_items.borrow_mut().insert(impl_def_id, impl_items);
-        }
-
-        self.inherent_impls.borrow_mut().insert(type_id, Rc::new(inherent_impls));
-        self.populated_external_types.borrow_mut().insert(type_id);
-    }
-
-    /// Populates the type context with all the implementations for the given
-    /// trait if necessary.
-    pub fn populate_implementations_for_trait_if_necessary(&self, trait_id: DefId) {
-        if trait_id.is_local() {
-            return
-        }
-
-        // The type is not local, hence we are reading this out of
-        // metadata and don't need to track edges.
-        let _ignore = self.dep_graph.in_ignore();
-
-        let def = self.lookup_trait_def(trait_id);
-        if def.flags.get().intersects(TraitFlags::IMPLS_VALID) {
-            return;
-        }
-
-        debug!("populate_implementations_for_trait_if_necessary: searching for {:?}", def);
-
-        if self.sess.cstore.is_defaulted_trait(trait_id) {
-            self.record_trait_has_default_impl(trait_id);
-        }
-
-        for impl_def_id in self.sess.cstore.implementations_of_trait(trait_id) {
-            let impl_items = self.sess.cstore.impl_items(impl_def_id);
-            let trait_ref = self.impl_trait_ref(impl_def_id).unwrap();
-            // Record the trait->implementation mapping.
-            def.record_impl(self, impl_def_id, trait_ref);
-
-            // For any methods that use a default implementation, add them to
-            // the map. This is a bit unfortunate.
-            for impl_item_def_id in &impl_items {
-                let method_def_id = impl_item_def_id.def_id();
-                // load impl items eagerly for convenience
-                // FIXME: we may want to load these lazily
-                self.impl_or_trait_item(method_def_id);
-            }
-
-            // Store the implementation info.
-            self.impl_items.borrow_mut().insert(impl_def_id, impl_items);
-        }
-
-        def.flags.set(def.flags.get() | TraitFlags::IMPLS_VALID);
-    }
-
-    pub fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind {
-        Tables::closure_kind(&self.tables, self, def_id)
-    }
-
-    pub fn closure_type(&self,
-                        def_id: DefId,
-                        substs: &ClosureSubsts<'tcx>)
-                        -> ty::ClosureTy<'tcx>
-    {
-        Tables::closure_type(&self.tables, self, def_id, substs)
-    }
-
-    /// Given the def_id of an impl, return the def_id of the trait it implements.
-    /// If it implements no trait, return `None`.
-    pub fn trait_id_of_impl(&self, def_id: DefId) -> Option<DefId> {
-        self.impl_trait_ref(def_id).map(|tr| tr.def_id)
-    }
-
-    /// If the given def ID describes a method belonging to an impl, return the
-    /// ID of the impl that the method belongs to. Otherwise, return `None`.
-    pub fn impl_of_method(&self, def_id: DefId) -> Option<DefId> {
-        if def_id.krate != LOCAL_CRATE {
-            return match self.sess.cstore.impl_or_trait_item(self, def_id).container() {
-                TraitContainer(_) => None,
-                ImplContainer(def_id) => Some(def_id),
-            };
-        }
-        match self.impl_or_trait_items.borrow().get(&def_id).cloned() {
-            Some(trait_item) => {
-                match trait_item.container() {
-                    TraitContainer(_) => None,
-                    ImplContainer(def_id) => Some(def_id),
-                }
-            }
-            None => None
-        }
-    }
-
-    /// If the given def ID describes an item belonging to a trait (either a
-    /// default method or an implementation of a trait method), return the ID of
-    /// the trait that the method belongs to. Otherwise, return `None`.
-    pub fn trait_of_item(&self, def_id: DefId) -> Option<DefId> {
-        if def_id.krate != LOCAL_CRATE {
-            return self.sess.cstore.trait_of_item(self, def_id);
-        }
-        match self.impl_or_trait_items.borrow().get(&def_id).cloned() {
-            Some(impl_or_trait_item) => {
-                match impl_or_trait_item.container() {
-                    TraitContainer(def_id) => Some(def_id),
-                    ImplContainer(def_id) => self.trait_id_of_impl(def_id),
-                }
-            }
-            None => None
-        }
-    }
-
-    /// If the given def ID describes an item belonging to a trait, (either a
-    /// default method or an implementation of a trait method), return the ID of
-    /// the method inside trait definition (this means that if the given def ID
-    /// is already that of the original trait method, then the return value is
-    /// the same).
-    /// Otherwise, return `None`.
-    pub fn trait_item_of_item(&self, def_id: DefId) -> Option<ImplOrTraitItemId> {
-        let impl_item = match self.impl_or_trait_items.borrow().get(&def_id) {
-            Some(m) => m.clone(),
-            None => return None,
-        };
-        let name = impl_item.name();
-        match self.trait_of_item(def_id) {
-            Some(trait_did) => {
-                self.trait_items(trait_did).iter()
-                    .find(|item| item.name() == name)
-                    .map(|item| item.id())
-            }
-            None => None
-        }
-    }
-
-    /// Construct a parameter environment suitable for static contexts or other contexts where there
-    /// are no free type/lifetime parameters in scope.
-    pub fn empty_parameter_environment<'a>(&'a self)
-                                           -> ParameterEnvironment<'a,'tcx> {
-
-        // for an empty parameter environment, there ARE no free
-        // regions, so it shouldn't matter what we use for the free id
-        let free_id_outlive = self.region_maps.node_extent(ast::DUMMY_NODE_ID);
-        ty::ParameterEnvironment { tcx: self,
-                                   free_substs: Substs::empty(),
-                                   caller_bounds: Vec::new(),
-                                   implicit_region_bound: ty::ReEmpty,
-                                   selection_cache: traits::SelectionCache::new(),
-                                   evaluation_cache: traits::EvaluationCache::new(),
-                                   free_id_outlive: free_id_outlive }
-    }
-
-    /// Constructs and returns a substitution that can be applied to move from
-    /// the "outer" view of a type or method to the "inner" view.
-    /// In general, this means converting from bound parameters to
-    /// free parameters. Since we currently represent bound/free type
-    /// parameters in the same way, this only has an effect on regions.
-    pub fn construct_free_substs(&self, generics: &Generics<'tcx>,
-                                 free_id_outlive: CodeExtent) -> Substs<'tcx> {
-        // map T => T
-        let mut types = VecPerParamSpace::empty();
-        for def in generics.types.as_slice() {
-            debug!("construct_parameter_environment(): push_types_from_defs: def={:?}",
-                    def);
-            types.push(def.space, self.mk_param_from_def(def));
-        }
-
-        // map bound 'a => free 'a
-        let mut regions = VecPerParamSpace::empty();
-        for def in generics.regions.as_slice() {
-            let region =
-                ReFree(FreeRegion { scope: free_id_outlive,
-                                    bound_region: BrNamed(def.def_id, def.name) });
-            debug!("push_region_params {:?}", region);
-            regions.push(def.space, region);
-        }
-
-        Substs {
-            types: types,
-            regions: subst::NonerasedRegions(regions)
-        }
-    }
-
-    /// See `ParameterEnvironment` struct def'n for details.
-    /// If you were using `free_id: NodeId`, you might try `self.region_maps.item_extent(free_id)`
-    /// for the `free_id_outlive` parameter. (But note that that is not always quite right.)
-    pub fn construct_parameter_environment<'a>(&'a self,
-                                               span: Span,
-                                               generics: &ty::Generics<'tcx>,
-                                               generic_predicates: &ty::GenericPredicates<'tcx>,
-                                               free_id_outlive: CodeExtent)
-                                               -> ParameterEnvironment<'a, 'tcx>
-    {
-        //
-        // Construct the free substs.
-        //
-
-        let free_substs = self.construct_free_substs(generics, free_id_outlive);
-
-        //
-        // Compute the bounds on Self and the type parameters.
-        //
-
-        let bounds = generic_predicates.instantiate(self, &free_substs);
-        let bounds = self.liberate_late_bound_regions(free_id_outlive, &ty::Binder(bounds));
-        let predicates = bounds.predicates.into_vec();
-
-        // Finally, we have to normalize the bounds in the environment, in
-        // case they contain any associated type projections. This process
-        // can yield errors if the put in illegal associated types, like
-        // `<i32 as Foo>::Bar` where `i32` does not implement `Foo`. We
-        // report these errors right here; this doesn't actually feel
-        // right to me, because constructing the environment feels like a
-        // kind of a "idempotent" action, but I'm not sure where would be
-        // a better place. In practice, we construct environments for
-        // every fn once during type checking, and we'll abort if there
-        // are any errors at that point, so after type checking you can be
-        // sure that this will succeed without errors anyway.
-        //
-
-        let unnormalized_env = ty::ParameterEnvironment {
-            tcx: self,
-            free_substs: free_substs,
-            implicit_region_bound: ty::ReScope(free_id_outlive),
-            caller_bounds: predicates,
-            selection_cache: traits::SelectionCache::new(),
-            evaluation_cache: traits::EvaluationCache::new(),
-            free_id_outlive: free_id_outlive,
-        };
-
-        let cause = traits::ObligationCause::misc(span, free_id_outlive.node_id(&self.region_maps));
-        traits::normalize_param_env_or_error(unnormalized_env, cause)
-    }
-
-    pub fn is_method_call(&self, expr_id: NodeId) -> bool {
-        self.tables.borrow().method_map.contains_key(&MethodCall::expr(expr_id))
-    }
-
-    pub fn is_overloaded_autoderef(&self, expr_id: NodeId, autoderefs: u32) -> bool {
-        self.tables.borrow().method_map.contains_key(&MethodCall::autoderef(expr_id,
-                                                                            autoderefs))
-    }
-
-    pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
-        Some(self.tables.borrow().upvar_capture_map.get(&upvar_id).unwrap().clone())
-    }
-
-
-    pub fn visit_all_items_in_krate<V,F>(&self,
-                                         dep_node_fn: F,
-                                         visitor: &mut V)
-        where F: FnMut(DefId) -> DepNode, V: Visitor<'tcx>
-    {
-        dep_graph::visit_all_items_in_krate(self, dep_node_fn, visitor);
-    }
-}
-
-/// The category of explicit self.
-#[derive(Clone, Copy, Eq, PartialEq, Debug)]
-pub enum ExplicitSelfCategory {
-    Static,
-    ByValue,
-    ByReference(Region, hir::Mutability),
-    ByBox,
-}
-
-/// A free variable referred to in a function.
-#[derive(Copy, Clone, RustcEncodable, RustcDecodable)]
-pub struct Freevar {
-    /// The variable being accessed free.
-    pub def: Def,
-
-    // First span where it is accessed (there can be multiple).
-    pub span: Span
-}
-
-pub type FreevarMap = NodeMap<Vec<Freevar>>;
-
-pub type CaptureModeMap = NodeMap<hir::CaptureClause>;
-
-// Trait method resolution
-pub type TraitMap = NodeMap<Vec<DefId>>;
-
-// Map from the NodeId of a glob import to a list of items which are actually
-// imported.
-pub type GlobMap = HashMap<NodeId, HashSet<Name>>;
-
-impl<'tcx> ctxt<'tcx> {
-    pub fn with_freevars<T, F>(&self, fid: NodeId, f: F) -> T where
-        F: FnOnce(&[Freevar]) -> T,
-    {
-        match self.freevars.borrow().get(&fid) {
-            None => f(&[]),
-            Some(d) => f(&d[..])
-        }
-    }
-
-    pub fn make_substs_for_receiver_types(&self,
-                                          trait_ref: &ty::TraitRef<'tcx>,
-                                          method: &ty::Method<'tcx>)
-                                          -> subst::Substs<'tcx>
-    {
-        /*!
-         * Substitutes the values for the receiver's type parameters
-         * that are found in method, leaving the method's type parameters
-         * intact.
-         */
-
-        let meth_tps: Vec<Ty> =
-            method.generics.types.get_slice(subst::FnSpace)
-                  .iter()
-                  .map(|def| self.mk_param_from_def(def))
-                  .collect();
-        let meth_regions: Vec<ty::Region> =
-            method.generics.regions.get_slice(subst::FnSpace)
-                  .iter()
-                  .map(|def| def.to_early_bound_region())
-                  .collect();
-        trait_ref.substs.clone().with_method(meth_tps, meth_regions)
-    }
-}
diff --git a/src/librustc/middle/ty/outlives.rs b/src/librustc/middle/ty/outlives.rs
deleted file mode 100644 (file)
index fc20c1b..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// The outlines relation `T: 'a` or `'a: 'b`. This code frequently
-// refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that
-// RFC for reference.
-
-use middle::infer::InferCtxt;
-use middle::ty::{self, Ty, TypeFoldable};
-
-#[derive(Debug)]
-pub enum Component<'tcx> {
-    Region(ty::Region),
-    Param(ty::ParamTy),
-    UnresolvedInferenceVariable(ty::InferTy),
-
-    // Projections like `T::Foo` are tricky because a constraint like
-    // `T::Foo: 'a` can be satisfied in so many ways. There may be a
-    // where-clause that says `T::Foo: 'a`, or the defining trait may
-    // include a bound like `type Foo: 'static`, or -- in the most
-    // conservative way -- we can prove that `T: 'a` (more generally,
-    // that all components in the projection outlive `'a`). This code
-    // is not in a position to judge which is the best technique, so
-    // we just product the projection as a component and leave it to
-    // the consumer to decide (but see `EscapingProjection` below).
-    Projection(ty::ProjectionTy<'tcx>),
-
-    // In the case where a projection has escaping regions -- meaning
-    // regions bound within the type itself -- we always use
-    // the most conservative rule, which requires that all components
-    // outlive the bound. So for example if we had a type like this:
-    //
-    //     for<'a> Trait1<  <T as Trait2<'a,'b>>::Foo  >
-    //                      ~~~~~~~~~~~~~~~~~~~~~~~~~
-    //
-    // then the inner projection (underlined) has an escaping region
-    // `'a`. We consider that outer trait `'c` to meet a bound if `'b`
-    // outlives `'b: 'c`, and we don't consider whether the trait
-    // declares that `Foo: 'static` etc. Therefore, we just return the
-    // free components of such a projection (in this case, `'b`).
-    //
-    // However, in the future, we may want to get smarter, and
-    // actually return a "higher-ranked projection" here. Therefore,
-    // we mark that these components are part of an escaping
-    // projection, so that implied bounds code can avoid relying on
-    // them. This gives us room to improve the regionck reasoning in
-    // the future without breaking backwards compat.
-    EscapingProjection(Vec<Component<'tcx>>),
-}
-
-/// Returns all the things that must outlive `'a` for the condition
-/// `ty0: 'a` to hold.
-pub fn components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
-                           ty0: Ty<'tcx>)
-                           -> Vec<Component<'tcx>> {
-    let mut components = vec![];
-    compute_components(infcx, ty0, &mut components);
-    debug!("components({:?}) = {:?}", ty0, components);
-    components
-}
-
-fn compute_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
-                               ty: Ty<'tcx>,
-                               out: &mut Vec<Component<'tcx>>) {
-    // Descend through the types, looking for the various "base"
-    // components and collecting them into `out`. This is not written
-    // with `collect()` because of the need to sometimes skip subtrees
-    // in the `subtys` iterator (e.g., when encountering a
-    // projection).
-    match ty.sty {
-        ty::TyClosure(_, ref substs) => {
-            // FIXME(#27086). We do not accumulate from substs, since they
-            // don't represent reachable data. This means that, in
-            // practice, some of the lifetime parameters might not
-            // be in scope when the body runs, so long as there is
-            // no reachable data with that lifetime. For better or
-            // worse, this is consistent with fn types, however,
-            // which can also encapsulate data in this fashion
-            // (though it's somewhat harder, and typically
-            // requires virtual dispatch).
-            //
-            // Note that changing this (in a naive way, at least)
-            // causes regressions for what appears to be perfectly
-            // reasonable code like this:
-            //
-            // ```
-            // fn foo<'a>(p: &Data<'a>) {
-            //    bar(|q: &mut Parser| q.read_addr())
-            // }
-            // fn bar(p: Box<FnMut(&mut Parser)+'static>) {
-            // }
-            // ```
-            //
-            // Note that `p` (and `'a`) are not used in the
-            // closure at all, but to meet the requirement that
-            // the closure type `C: 'static` (so it can be coerced
-            // to the object type), we get the requirement that
-            // `'a: 'static` since `'a` appears in the closure
-            // type `C`.
-            //
-            // A smarter fix might "prune" unused `func_substs` --
-            // this would avoid breaking simple examples like
-            // this, but would still break others (which might
-            // indeed be invalid, depending on your POV). Pruning
-            // would be a subtle process, since we have to see
-            // what func/type parameters are used and unused,
-            // taking into consideration UFCS and so forth.
-
-            for &upvar_ty in &substs.upvar_tys {
-                compute_components(infcx, upvar_ty, out);
-            }
-        }
-
-        // OutlivesTypeParameterEnv -- the actual checking that `X:'a`
-        // is implied by the environment is done in regionck.
-        ty::TyParam(p) => {
-            out.push(Component::Param(p));
-        }
-
-        // For projections, we prefer to generate an obligation like
-        // `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the
-        // regionck more ways to prove that it holds. However,
-        // regionck is not (at least currently) prepared to deal with
-        // higher-ranked regions that may appear in the
-        // trait-ref. Therefore, if we see any higher-ranke regions,
-        // we simply fallback to the most restrictive rule, which
-        // requires that `Pi: 'a` for all `i`.
-        ty::TyProjection(ref data) => {
-            if !data.has_escaping_regions() {
-                // best case: no escaping regions, so push the
-                // projection and skip the subtree (thus generating no
-                // constraints for Pi). This defers the choice between
-                // the rules OutlivesProjectionEnv,
-                // OutlivesProjectionTraitDef, and
-                // OutlivesProjectionComponents to regionck.
-                out.push(Component::Projection(*data));
-            } else {
-                // fallback case: hard code
-                // OutlivesProjectionComponents.  Continue walking
-                // through and constrain Pi.
-                let subcomponents = capture_components(infcx, ty);
-                out.push(Component::EscapingProjection(subcomponents));
-            }
-        }
-
-        // If we encounter an inference variable, try to resolve it
-        // and proceed with resolved version. If we cannot resolve it,
-        // then record the unresolved variable as a component.
-        ty::TyInfer(_) => {
-            let ty = infcx.resolve_type_vars_if_possible(&ty);
-            if let ty::TyInfer(infer_ty) = ty.sty {
-                out.push(Component::UnresolvedInferenceVariable(infer_ty));
-            } else {
-                compute_components(infcx, ty, out);
-            }
-        }
-
-        // Most types do not introduce any region binders, nor
-        // involve any other subtle cases, and so the WF relation
-        // simply constraints any regions referenced directly by
-        // the type and then visits the types that are lexically
-        // contained within. (The comments refer to relevant rules
-        // from RFC1214.)
-        ty::TyBool |            // OutlivesScalar
-        ty::TyChar |            // OutlivesScalar
-        ty::TyInt(..) |         // OutlivesScalar
-        ty::TyUint(..) |        // OutlivesScalar
-        ty::TyFloat(..) |       // OutlivesScalar
-        ty::TyEnum(..) |        // OutlivesNominalType
-        ty::TyStruct(..) |      // OutlivesNominalType
-        ty::TyBox(..) |         // OutlivesNominalType (ish)
-        ty::TyStr |             // OutlivesScalar (ish)
-        ty::TyArray(..) |       // ...
-        ty::TySlice(..) |       // ...
-        ty::TyRawPtr(..) |      // ...
-        ty::TyRef(..) |         // OutlivesReference
-        ty::TyTuple(..) |       // ...
-        ty::TyBareFn(..) |      // OutlivesFunction (*)
-        ty::TyTrait(..) |       // OutlivesObject, OutlivesFragment (*)
-        ty::TyError => {
-            // (*) Bare functions and traits are both binders. In the
-            // RFC, this means we would add the bound regions to the
-            // "bound regions list".  In our representation, no such
-            // list is maintained explicitly, because bound regions
-            // themselves can be readily identified.
-
-            push_region_constraints(out, ty.regions());
-            for subty in ty.walk_shallow() {
-                compute_components(infcx, subty, out);
-            }
-        }
-    }
-}
-
-fn capture_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
-                               ty: Ty<'tcx>)
-                               -> Vec<Component<'tcx>> {
-    let mut temp = vec![];
-    push_region_constraints(&mut temp, ty.regions());
-    for subty in ty.walk_shallow() {
-        compute_components(infcx, subty, &mut temp);
-    }
-    temp
-}
-
-fn push_region_constraints<'tcx>(out: &mut Vec<Component<'tcx>>, regions: Vec<ty::Region>) {
-    for r in regions {
-        if !r.is_bound() {
-            out.push(Component::Region(r));
-        }
-    }
-}
diff --git a/src/librustc/middle/ty/relate.rs b/src/librustc/middle/ty/relate.rs
deleted file mode 100644 (file)
index 974b5c4..0000000
+++ /dev/null
@@ -1,684 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Generalized type relating mechanism. A type relation R relates a
-//! pair of values (A, B). A and B are usually types or regions but
-//! can be other things. Examples of type relations are subtyping,
-//! type equality, etc.
-
-use middle::def_id::DefId;
-use middle::subst::{ErasedRegions, NonerasedRegions, ParamSpace, Substs};
-use middle::ty::{self, Ty, TypeFoldable};
-use middle::ty::error::{ExpectedFound, TypeError};
-use std::rc::Rc;
-use syntax::abi;
-use rustc_front::hir as ast;
-
-pub type RelateResult<'tcx, T> = Result<T, TypeError<'tcx>>;
-
-#[derive(Clone, Debug)]
-pub enum Cause {
-    ExistentialRegionBound, // relating an existential region bound
-}
-
-pub trait TypeRelation<'a,'tcx> : Sized {
-    fn tcx(&self) -> &'a ty::ctxt<'tcx>;
-
-    /// Returns a static string we can use for printouts.
-    fn tag(&self) -> &'static str;
-
-    /// Returns true if the value `a` is the "expected" type in the
-    /// relation. Just affects error messages.
-    fn a_is_expected(&self) -> bool;
-
-    fn with_cause<F,R>(&mut self, _cause: Cause, f: F) -> R
-        where F: FnOnce(&mut Self) -> R
-    {
-        f(self)
-    }
-
-    /// Generic relation routine suitable for most anything.
-    fn relate<T:Relate<'a,'tcx>>(&mut self, a: &T, b: &T) -> RelateResult<'tcx, T> {
-        Relate::relate(self, a, b)
-    }
-
-    /// Relete elements of two slices pairwise.
-    fn relate_zip<T:Relate<'a,'tcx>>(&mut self, a: &[T], b: &[T]) -> RelateResult<'tcx, Vec<T>> {
-        assert_eq!(a.len(), b.len());
-        a.iter().zip(b).map(|(a, b)| self.relate(a, b)).collect()
-    }
-
-    /// Switch variance for the purpose of relating `a` and `b`.
-    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
-                                               variance: ty::Variance,
-                                               a: &T,
-                                               b: &T)
-                                               -> RelateResult<'tcx, T>;
-
-    // Overrideable relations. You shouldn't typically call these
-    // directly, instead call `relate()`, which in turn calls
-    // these. This is both more uniform but also allows us to add
-    // additional hooks for other types in the future if needed
-    // without making older code, which called `relate`, obsolete.
-
-    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>)
-           -> RelateResult<'tcx, Ty<'tcx>>;
-
-    fn regions(&mut self, a: ty::Region, b: ty::Region)
-               -> RelateResult<'tcx, ty::Region>;
-
-    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-                  -> RelateResult<'tcx, ty::Binder<T>>
-        where T: Relate<'a,'tcx>;
-}
-
-pub trait Relate<'a,'tcx>: TypeFoldable<'tcx> {
-    fn relate<R:TypeRelation<'a,'tcx>>(relation: &mut R,
-                                       a: &Self,
-                                       b: &Self)
-                                       -> RelateResult<'tcx, Self>;
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Relate impls
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::TypeAndMut<'tcx> {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::TypeAndMut<'tcx>,
-                 b: &ty::TypeAndMut<'tcx>)
-                 -> RelateResult<'tcx, ty::TypeAndMut<'tcx>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        debug!("{}.mts({:?}, {:?})",
-               relation.tag(),
-               a,
-               b);
-        if a.mutbl != b.mutbl {
-            Err(TypeError::Mutability)
-        } else {
-            let mutbl = a.mutbl;
-            let variance = match mutbl {
-                ast::Mutability::MutImmutable => ty::Covariant,
-                ast::Mutability::MutMutable => ty::Invariant,
-            };
-            let ty = try!(relation.relate_with_variance(variance, &a.ty, &b.ty));
-            Ok(ty::TypeAndMut {ty: ty, mutbl: mutbl})
-        }
-    }
-}
-
-// substitutions are not themselves relatable without more context,
-// but they is an important subroutine for things that ARE relatable,
-// like traits etc.
-fn relate_item_substs<'a,'tcx:'a,R>(relation: &mut R,
-                                    item_def_id: DefId,
-                                    a_subst: &Substs<'tcx>,
-                                    b_subst: &Substs<'tcx>)
-                                    -> RelateResult<'tcx, Substs<'tcx>>
-    where R: TypeRelation<'a,'tcx>
-{
-    debug!("substs: item_def_id={:?} a_subst={:?} b_subst={:?}",
-           item_def_id,
-           a_subst,
-           b_subst);
-
-    let variances;
-    let opt_variances = if relation.tcx().variance_computed.get() {
-        variances = relation.tcx().item_variances(item_def_id);
-        Some(&*variances)
-    } else {
-        None
-    };
-    relate_substs(relation, opt_variances, a_subst, b_subst)
-}
-
-fn relate_substs<'a,'tcx:'a,R>(relation: &mut R,
-                               variances: Option<&ty::ItemVariances>,
-                               a_subst: &Substs<'tcx>,
-                               b_subst: &Substs<'tcx>)
-                               -> RelateResult<'tcx, Substs<'tcx>>
-    where R: TypeRelation<'a,'tcx>
-{
-    let mut substs = Substs::empty();
-
-    for &space in &ParamSpace::all() {
-        let a_tps = a_subst.types.get_slice(space);
-        let b_tps = b_subst.types.get_slice(space);
-        let t_variances = variances.map(|v| v.types.get_slice(space));
-        let tps = try!(relate_type_params(relation, t_variances, a_tps, b_tps));
-        substs.types.replace(space, tps);
-    }
-
-    match (&a_subst.regions, &b_subst.regions) {
-        (&ErasedRegions, _) | (_, &ErasedRegions) => {
-            substs.regions = ErasedRegions;
-        }
-
-        (&NonerasedRegions(ref a), &NonerasedRegions(ref b)) => {
-            for &space in &ParamSpace::all() {
-                let a_regions = a.get_slice(space);
-                let b_regions = b.get_slice(space);
-                let r_variances = variances.map(|v| v.regions.get_slice(space));
-                let regions = try!(relate_region_params(relation,
-                                                        r_variances,
-                                                        a_regions,
-                                                        b_regions));
-                substs.mut_regions().replace(space, regions);
-            }
-        }
-    }
-
-    Ok(substs)
-}
-
-fn relate_type_params<'a,'tcx:'a,R>(relation: &mut R,
-                                    variances: Option<&[ty::Variance]>,
-                                    a_tys: &[Ty<'tcx>],
-                                    b_tys: &[Ty<'tcx>])
-                                    -> RelateResult<'tcx, Vec<Ty<'tcx>>>
-    where R: TypeRelation<'a,'tcx>
-{
-    if a_tys.len() != b_tys.len() {
-        return Err(TypeError::TyParamSize(expected_found(relation,
-                                                         &a_tys.len(),
-                                                         &b_tys.len())));
-    }
-
-    (0 .. a_tys.len())
-        .map(|i| {
-            let a_ty = a_tys[i];
-            let b_ty = b_tys[i];
-            let v = variances.map_or(ty::Invariant, |v| v[i]);
-            relation.relate_with_variance(v, &a_ty, &b_ty)
-        })
-        .collect()
-}
-
-fn relate_region_params<'a,'tcx:'a,R>(relation: &mut R,
-                                      variances: Option<&[ty::Variance]>,
-                                      a_rs: &[ty::Region],
-                                      b_rs: &[ty::Region])
-                                      -> RelateResult<'tcx, Vec<ty::Region>>
-    where R: TypeRelation<'a,'tcx>
-{
-    let num_region_params = a_rs.len();
-
-    debug!("relate_region_params(a_rs={:?}, \
-            b_rs={:?}, variances={:?})",
-           a_rs,
-           b_rs,
-           variances);
-
-    assert_eq!(num_region_params,
-               variances.map_or(num_region_params,
-                                |v| v.len()));
-
-    assert_eq!(num_region_params, b_rs.len());
-
-    (0..a_rs.len())
-        .map(|i| {
-            let a_r = a_rs[i];
-            let b_r = b_rs[i];
-            let variance = variances.map_or(ty::Invariant, |v| v[i]);
-            relation.relate_with_variance(variance, &a_r, &b_r)
-        })
-        .collect()
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::BareFnTy<'tcx> {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::BareFnTy<'tcx>,
-                 b: &ty::BareFnTy<'tcx>)
-                 -> RelateResult<'tcx, ty::BareFnTy<'tcx>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        let unsafety = try!(relation.relate(&a.unsafety, &b.unsafety));
-        let abi = try!(relation.relate(&a.abi, &b.abi));
-        let sig = try!(relation.relate(&a.sig, &b.sig));
-        Ok(ty::BareFnTy {unsafety: unsafety,
-                         abi: abi,
-                         sig: sig})
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::FnSig<'tcx> {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::FnSig<'tcx>,
-                 b: &ty::FnSig<'tcx>)
-                 -> RelateResult<'tcx, ty::FnSig<'tcx>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        if a.variadic != b.variadic {
-            return Err(TypeError::VariadicMismatch(
-                expected_found(relation, &a.variadic, &b.variadic)));
-        }
-
-        let inputs = try!(relate_arg_vecs(relation,
-                                          &a.inputs,
-                                          &b.inputs));
-
-        let output = try!(match (a.output, b.output) {
-            (ty::FnConverging(a_ty), ty::FnConverging(b_ty)) =>
-                Ok(ty::FnConverging(try!(relation.relate(&a_ty, &b_ty)))),
-            (ty::FnDiverging, ty::FnDiverging) =>
-                Ok(ty::FnDiverging),
-            (a, b) =>
-                Err(TypeError::ConvergenceMismatch(
-                    expected_found(relation, &(a != ty::FnDiverging), &(b != ty::FnDiverging)))),
-        });
-
-        return Ok(ty::FnSig {inputs: inputs,
-                             output: output,
-                             variadic: a.variadic});
-    }
-}
-
-fn relate_arg_vecs<'a,'tcx:'a,R>(relation: &mut R,
-                                 a_args: &[Ty<'tcx>],
-                                 b_args: &[Ty<'tcx>])
-                                 -> RelateResult<'tcx, Vec<Ty<'tcx>>>
-    where R: TypeRelation<'a,'tcx>
-{
-    if a_args.len() != b_args.len() {
-        return Err(TypeError::ArgCount);
-    }
-
-    a_args.iter().zip(b_args)
-          .map(|(a, b)| relation.relate_with_variance(ty::Contravariant, a, b))
-          .collect()
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ast::Unsafety {
-    fn relate<R>(relation: &mut R,
-                 a: &ast::Unsafety,
-                 b: &ast::Unsafety)
-                 -> RelateResult<'tcx, ast::Unsafety>
-        where R: TypeRelation<'a,'tcx>
-    {
-        if a != b {
-            Err(TypeError::UnsafetyMismatch(expected_found(relation, a, b)))
-        } else {
-            Ok(*a)
-        }
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for abi::Abi {
-    fn relate<R>(relation: &mut R,
-                 a: &abi::Abi,
-                 b: &abi::Abi)
-                 -> RelateResult<'tcx, abi::Abi>
-        where R: TypeRelation<'a,'tcx>
-    {
-        if a == b {
-            Ok(*a)
-        } else {
-            Err(TypeError::AbiMismatch(expected_found(relation, a, b)))
-        }
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ProjectionTy<'tcx> {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::ProjectionTy<'tcx>,
-                 b: &ty::ProjectionTy<'tcx>)
-                 -> RelateResult<'tcx, ty::ProjectionTy<'tcx>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        if a.item_name != b.item_name {
-            Err(TypeError::ProjectionNameMismatched(
-                expected_found(relation, &a.item_name, &b.item_name)))
-        } else {
-            let trait_ref = try!(relation.relate(&a.trait_ref, &b.trait_ref));
-            Ok(ty::ProjectionTy { trait_ref: trait_ref, item_name: a.item_name })
-        }
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ProjectionPredicate<'tcx> {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::ProjectionPredicate<'tcx>,
-                 b: &ty::ProjectionPredicate<'tcx>)
-                 -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        let projection_ty = try!(relation.relate(&a.projection_ty, &b.projection_ty));
-        let ty = try!(relation.relate(&a.ty, &b.ty));
-        Ok(ty::ProjectionPredicate { projection_ty: projection_ty, ty: ty })
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for Vec<ty::PolyProjectionPredicate<'tcx>> {
-    fn relate<R>(relation: &mut R,
-                 a: &Vec<ty::PolyProjectionPredicate<'tcx>>,
-                 b: &Vec<ty::PolyProjectionPredicate<'tcx>>)
-                 -> RelateResult<'tcx, Vec<ty::PolyProjectionPredicate<'tcx>>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        // To be compatible, `a` and `b` must be for precisely the
-        // same set of traits and item names. We always require that
-        // projection bounds lists are sorted by trait-def-id and item-name,
-        // so we can just iterate through the lists pairwise, so long as they are the
-        // same length.
-        if a.len() != b.len() {
-            Err(TypeError::ProjectionBoundsLength(expected_found(relation, &a.len(), &b.len())))
-        } else {
-            a.iter().zip(b)
-                .map(|(a, b)| relation.relate(a, b))
-                .collect()
-        }
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ExistentialBounds<'tcx> {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::ExistentialBounds<'tcx>,
-                 b: &ty::ExistentialBounds<'tcx>)
-                 -> RelateResult<'tcx, ty::ExistentialBounds<'tcx>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        let r =
-            try!(relation.with_cause(
-                Cause::ExistentialRegionBound,
-                |relation| relation.relate_with_variance(ty::Contravariant,
-                                                         &a.region_bound,
-                                                         &b.region_bound)));
-        let nb = try!(relation.relate(&a.builtin_bounds, &b.builtin_bounds));
-        let pb = try!(relation.relate(&a.projection_bounds, &b.projection_bounds));
-        Ok(ty::ExistentialBounds { region_bound: r,
-                                   builtin_bounds: nb,
-                                   projection_bounds: pb })
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::BuiltinBounds {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::BuiltinBounds,
-                 b: &ty::BuiltinBounds)
-                 -> RelateResult<'tcx, ty::BuiltinBounds>
-        where R: TypeRelation<'a,'tcx>
-    {
-        // Two sets of builtin bounds are only relatable if they are
-        // precisely the same (but see the coercion code).
-        if a != b {
-            Err(TypeError::BuiltinBoundsMismatch(expected_found(relation, a, b)))
-        } else {
-            Ok(*a)
-        }
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::TraitRef<'tcx> {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::TraitRef<'tcx>,
-                 b: &ty::TraitRef<'tcx>)
-                 -> RelateResult<'tcx, ty::TraitRef<'tcx>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        // Different traits cannot be related
-        if a.def_id != b.def_id {
-            Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id)))
-        } else {
-            let substs = try!(relate_item_substs(relation, a.def_id, a.substs, b.substs));
-            Ok(ty::TraitRef { def_id: a.def_id, substs: relation.tcx().mk_substs(substs) })
-        }
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for Ty<'tcx> {
-    fn relate<R>(relation: &mut R,
-                 a: &Ty<'tcx>,
-                 b: &Ty<'tcx>)
-                 -> RelateResult<'tcx, Ty<'tcx>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        relation.tys(a, b)
-    }
-}
-
-/// The main "type relation" routine. Note that this does not handle
-/// inference artifacts, so you should filter those out before calling
-/// it.
-pub fn super_relate_tys<'a,'tcx:'a,R>(relation: &mut R,
-                                      a: Ty<'tcx>,
-                                      b: Ty<'tcx>)
-                                      -> RelateResult<'tcx, Ty<'tcx>>
-    where R: TypeRelation<'a,'tcx>
-{
-    let tcx = relation.tcx();
-    let a_sty = &a.sty;
-    let b_sty = &b.sty;
-    debug!("super_tys: a_sty={:?} b_sty={:?}", a_sty, b_sty);
-    match (a_sty, b_sty) {
-        (&ty::TyInfer(_), _) |
-        (_, &ty::TyInfer(_)) =>
-        {
-            // The caller should handle these cases!
-            tcx.sess.bug("var types encountered in super_relate_tys")
-        }
-
-        (&ty::TyError, _) | (_, &ty::TyError) =>
-        {
-            Ok(tcx.types.err)
-        }
-
-        (&ty::TyChar, _) |
-        (&ty::TyBool, _) |
-        (&ty::TyInt(_), _) |
-        (&ty::TyUint(_), _) |
-        (&ty::TyFloat(_), _) |
-        (&ty::TyStr, _)
-            if a == b =>
-        {
-            Ok(a)
-        }
-
-        (&ty::TyParam(ref a_p), &ty::TyParam(ref b_p))
-            if a_p.idx == b_p.idx && a_p.space == b_p.space =>
-        {
-            Ok(a)
-        }
-
-        (&ty::TyEnum(a_def, a_substs), &ty::TyEnum(b_def, b_substs))
-            if a_def == b_def =>
-        {
-            let substs = try!(relate_item_substs(relation, a_def.did, a_substs, b_substs));
-            Ok(tcx.mk_enum(a_def, tcx.mk_substs(substs)))
-        }
-
-        (&ty::TyTrait(ref a_), &ty::TyTrait(ref b_)) =>
-        {
-            let principal = try!(relation.relate(&a_.principal, &b_.principal));
-            let bounds = try!(relation.relate(&a_.bounds, &b_.bounds));
-            Ok(tcx.mk_trait(principal, bounds))
-        }
-
-        (&ty::TyStruct(a_def, a_substs), &ty::TyStruct(b_def, b_substs))
-            if a_def == b_def =>
-        {
-            let substs = try!(relate_item_substs(relation, a_def.did, a_substs, b_substs));
-            Ok(tcx.mk_struct(a_def, tcx.mk_substs(substs)))
-        }
-
-        (&ty::TyClosure(a_id, ref a_substs),
-         &ty::TyClosure(b_id, ref b_substs))
-            if a_id == b_id =>
-        {
-            // All TyClosure types with the same id represent
-            // the (anonymous) type of the same closure expression. So
-            // all of their regions should be equated.
-            let substs = try!(relation.relate(a_substs, b_substs));
-            Ok(tcx.mk_closure_from_closure_substs(a_id, substs))
-        }
-
-        (&ty::TyBox(a_inner), &ty::TyBox(b_inner)) =>
-        {
-            let typ = try!(relation.relate(&a_inner, &b_inner));
-            Ok(tcx.mk_box(typ))
-        }
-
-        (&ty::TyRawPtr(ref a_mt), &ty::TyRawPtr(ref b_mt)) =>
-        {
-            let mt = try!(relation.relate(a_mt, b_mt));
-            Ok(tcx.mk_ptr(mt))
-        }
-
-        (&ty::TyRef(a_r, ref a_mt), &ty::TyRef(b_r, ref b_mt)) =>
-        {
-            let r = try!(relation.relate_with_variance(ty::Contravariant, a_r, b_r));
-            let mt = try!(relation.relate(a_mt, b_mt));
-            Ok(tcx.mk_ref(tcx.mk_region(r), mt))
-        }
-
-        (&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) =>
-        {
-            let t = try!(relation.relate(&a_t, &b_t));
-            if sz_a == sz_b {
-                Ok(tcx.mk_array(t, sz_a))
-            } else {
-                Err(TypeError::FixedArraySize(expected_found(relation, &sz_a, &sz_b)))
-            }
-        }
-
-        (&ty::TySlice(a_t), &ty::TySlice(b_t)) =>
-        {
-            let t = try!(relation.relate(&a_t, &b_t));
-            Ok(tcx.mk_slice(t))
-        }
-
-        (&ty::TyTuple(ref as_), &ty::TyTuple(ref bs)) =>
-        {
-            if as_.len() == bs.len() {
-                let ts = try!(as_.iter().zip(bs)
-                                 .map(|(a, b)| relation.relate(a, b))
-                                 .collect::<Result<_, _>>());
-                Ok(tcx.mk_tup(ts))
-            } else if !(as_.is_empty() || bs.is_empty()) {
-                Err(TypeError::TupleSize(
-                    expected_found(relation, &as_.len(), &bs.len())))
-            } else {
-                Err(TypeError::Sorts(expected_found(relation, &a, &b)))
-            }
-        }
-
-        (&ty::TyBareFn(a_opt_def_id, a_fty), &ty::TyBareFn(b_opt_def_id, b_fty))
-            if a_opt_def_id == b_opt_def_id =>
-        {
-            let fty = try!(relation.relate(a_fty, b_fty));
-            Ok(tcx.mk_fn(a_opt_def_id, tcx.mk_bare_fn(fty)))
-        }
-
-        (&ty::TyProjection(ref a_data), &ty::TyProjection(ref b_data)) =>
-        {
-            let projection_ty = try!(relation.relate(a_data, b_data));
-            Ok(tcx.mk_projection(projection_ty.trait_ref, projection_ty.item_name))
-        }
-
-        _ =>
-        {
-            Err(TypeError::Sorts(expected_found(relation, &a, &b)))
-        }
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ClosureSubsts<'tcx> {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::ClosureSubsts<'tcx>,
-                 b: &ty::ClosureSubsts<'tcx>)
-                 -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        let func_substs = try!(relate_substs(relation, None, a.func_substs, b.func_substs));
-        let upvar_tys = try!(relation.relate_zip(&a.upvar_tys, &b.upvar_tys));
-        Ok(ty::ClosureSubsts { func_substs: relation.tcx().mk_substs(func_substs),
-                               upvar_tys: upvar_tys })
-    }
-}
-
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::Region {
-    fn relate<R>(relation: &mut R,
-                 a: &ty::Region,
-                 b: &ty::Region)
-                 -> RelateResult<'tcx, ty::Region>
-        where R: TypeRelation<'a,'tcx>
-    {
-        relation.regions(*a, *b)
-    }
-}
-
-impl<'a,'tcx:'a,T> Relate<'a,'tcx> for ty::Binder<T>
-    where T: Relate<'a,'tcx>
-{
-    fn relate<R>(relation: &mut R,
-                 a: &ty::Binder<T>,
-                 b: &ty::Binder<T>)
-                 -> RelateResult<'tcx, ty::Binder<T>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        relation.binders(a, b)
-    }
-}
-
-impl<'a,'tcx:'a,T> Relate<'a,'tcx> for Rc<T>
-    where T: Relate<'a,'tcx>
-{
-    fn relate<R>(relation: &mut R,
-                 a: &Rc<T>,
-                 b: &Rc<T>)
-                 -> RelateResult<'tcx, Rc<T>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        let a: &T = a;
-        let b: &T = b;
-        Ok(Rc::new(try!(relation.relate(a, b))))
-    }
-}
-
-impl<'a,'tcx:'a,T> Relate<'a,'tcx> for Box<T>
-    where T: Relate<'a,'tcx>
-{
-    fn relate<R>(relation: &mut R,
-                 a: &Box<T>,
-                 b: &Box<T>)
-                 -> RelateResult<'tcx, Box<T>>
-        where R: TypeRelation<'a,'tcx>
-    {
-        let a: &T = a;
-        let b: &T = b;
-        Ok(Box::new(try!(relation.relate(a, b))))
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Error handling
-
-pub fn expected_found<'a,'tcx:'a,R,T>(relation: &mut R,
-                                      a: &T,
-                                      b: &T)
-                                      -> ExpectedFound<T>
-    where R: TypeRelation<'a,'tcx>, T: Clone
-{
-    expected_found_bool(relation.a_is_expected(), a, b)
-}
-
-pub fn expected_found_bool<T>(a_is_expected: bool,
-                              a: &T,
-                              b: &T)
-                              -> ExpectedFound<T>
-    where T: Clone
-{
-    let a = a.clone();
-    let b = b.clone();
-    if a_is_expected {
-        ExpectedFound {expected: a, found: b}
-    } else {
-        ExpectedFound {expected: b, found: a}
-    }
-}
diff --git a/src/librustc/middle/ty/structural_impls.rs b/src/librustc/middle/ty/structural_impls.rs
deleted file mode 100644 (file)
index 01b2bd3..0000000
+++ /dev/null
@@ -1,794 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::subst::{self, VecPerParamSpace};
-use middle::traits;
-use middle::ty::{self, Lift, TraitRef, Ty};
-use middle::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
-
-use std::rc::Rc;
-use syntax::abi;
-use syntax::ptr::P;
-
-use rustc_front::hir;
-
-///////////////////////////////////////////////////////////////////////////
-// Lift implementations
-
-impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) {
-    type Lifted = (A::Lifted, B::Lifted);
-    fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.0).and_then(|a| tcx.lift(&self.1).map(|b| (a, b)))
-    }
-}
-
-impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for [T] {
-    type Lifted = Vec<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option<Self::Lifted> {
-        // type annotation needed to inform `projection_must_outlive`
-        let mut result : Vec<<T as Lift<'tcx>>::Lifted>
-            = Vec::with_capacity(self.len());
-        for x in self {
-            if let Some(value) = tcx.lift(x) {
-                result.push(value);
-            } else {
-                return None;
-            }
-        }
-        Some(result)
-    }
-}
-
-impl<'tcx> Lift<'tcx> for ty::Region {
-    type Lifted = Self;
-    fn lift_to_tcx(&self, _: &ty::ctxt<'tcx>) -> Option<ty::Region> {
-        Some(*self)
-    }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for TraitRef<'a> {
-    type Lifted = TraitRef<'tcx>;
-    fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option<TraitRef<'tcx>> {
-        tcx.lift(&self.substs).map(|substs| TraitRef {
-            def_id: self.def_id,
-            substs: substs
-        })
-    }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> {
-    type Lifted = ty::TraitPredicate<'tcx>;
-    fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> {
-        tcx.lift(&self.trait_ref).map(|trait_ref| ty::TraitPredicate {
-            trait_ref: trait_ref
-        })
-    }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::EquatePredicate<'a> {
-    type Lifted = ty::EquatePredicate<'tcx>;
-    fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option<ty::EquatePredicate<'tcx>> {
-        tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::EquatePredicate(a, b))
-    }
-}
-
-impl<'tcx, A: Copy+Lift<'tcx>, B: Copy+Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> {
-    type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>;
-    fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b))
-    }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> {
-    type Lifted = ty::ProjectionPredicate<'tcx>;
-    fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> {
-        tcx.lift(&(self.projection_ty.trait_ref, self.ty)).map(|(trait_ref, ty)| {
-            ty::ProjectionPredicate {
-                projection_ty: ty::ProjectionTy {
-                    trait_ref: trait_ref,
-                    item_name: self.projection_ty.item_name
-                },
-                ty: ty
-            }
-        })
-    }
-}
-
-impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<T> {
-    type Lifted = ty::Binder<T::Lifted>;
-    fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option<Self::Lifted> {
-        tcx.lift(&self.0).map(|x| ty::Binder(x))
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// TypeFoldable implementations.
-//
-// Ideally, each type should invoke `folder.fold_foo(self)` and
-// nothing else. In some cases, though, we haven't gotten around to
-// adding methods on the `folder` yet, and thus the folding is
-// hard-coded here. This is less-flexible, because folders cannot
-// override the behavior, but there are a lot of random types and one
-// can easily refactor the folding into the TypeFolder trait as
-// needed.
-
-macro_rules! CopyImpls {
-    ($($ty:ty),+) => {
-        $(
-            impl<'tcx> TypeFoldable<'tcx> for $ty {
-                fn super_fold_with<F:TypeFolder<'tcx>>(&self, _: &mut F) -> $ty {
-                    *self
-                }
-
-                fn super_visit_with<F: TypeVisitor<'tcx>>(&self, _: &mut F) -> bool {
-                    false
-                }
-            }
-        )+
-    }
-}
-
-CopyImpls! { (), hir::Unsafety, abi::Abi }
-
-impl<'tcx, T:TypeFoldable<'tcx>, U:TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> (T, U) {
-        (self.0.fold_with(folder), self.1.fold_with(folder))
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.0.visit_with(visitor) || self.1.visit_with(visitor)
-    }
-}
-
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Option<T> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        self.as_ref().map(|t| t.fold_with(folder))
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
-    }
-}
-
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc<T> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        Rc::new((**self).fold_with(folder))
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        (**self).visit_with(visitor)
-    }
-}
-
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<T> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        let content: T = (**self).fold_with(folder);
-        box content
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        (**self).visit_with(visitor)
-    }
-}
-
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec<T> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        self.iter().map(|t| t.fold_with(folder)).collect()
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
-    }
-}
-
-impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder<T> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.enter_region_binder();
-        let result = ty::Binder(self.0.fold_with(folder));
-        folder.exit_region_binder();
-        result
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_binder(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        visitor.enter_region_binder();
-        if self.0.visit_with(visitor) { return true }
-        visitor.exit_region_binder();
-        false
-    }
-}
-
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for P<[T]> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        self.iter().map(|t| t.fold_with(folder)).collect()
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.iter().any(|t| t.visit_with(visitor))
-    }
-}
-
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for VecPerParamSpace<T> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-
-        // Things in the Fn space take place under an additional level
-        // of region binding relative to the other spaces. This is
-        // because those entries are attached to a method, and methods
-        // always introduce a level of region binding.
-
-        let result = self.map_enumerated(|(space, index, elem)| {
-            if space == subst::FnSpace && index == 0 {
-                // enter new level when/if we reach the first thing in fn space
-                folder.enter_region_binder();
-            }
-            elem.fold_with(folder)
-        });
-        if result.len(subst::FnSpace) > 0 {
-            // if there was anything in fn space, exit the region binding level
-            folder.exit_region_binder();
-        }
-        result
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        let mut entered_region_binder = false;
-        let result = self.iter_enumerated().any(|(space, index, t)| {
-            if space == subst::FnSpace && index == 0 {
-                visitor.enter_region_binder();
-                entered_region_binder = true;
-            }
-            t.visit_with(visitor)
-        });
-        if entered_region_binder {
-            visitor.exit_region_binder();
-        }
-        result
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::TraitTy<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::TraitTy {
-            principal: self.principal.fold_with(folder),
-            bounds: self.bounds.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.principal.visit_with(visitor) || self.bounds.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        let sty = match self.sty {
-            ty::TyBox(typ) => ty::TyBox(typ.fold_with(folder)),
-            ty::TyRawPtr(ref tm) => ty::TyRawPtr(tm.fold_with(folder)),
-            ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz),
-            ty::TySlice(typ) => ty::TySlice(typ.fold_with(folder)),
-            ty::TyEnum(tid, ref substs) => {
-                let substs = substs.fold_with(folder);
-                ty::TyEnum(tid, folder.tcx().mk_substs(substs))
-            }
-            ty::TyTrait(ref trait_ty) => ty::TyTrait(trait_ty.fold_with(folder)),
-            ty::TyTuple(ref ts) => ty::TyTuple(ts.fold_with(folder)),
-            ty::TyBareFn(opt_def_id, ref f) => {
-                let bfn = f.fold_with(folder);
-                ty::TyBareFn(opt_def_id, folder.tcx().mk_bare_fn(bfn))
-            }
-            ty::TyRef(r, ref tm) => {
-                let r = r.fold_with(folder);
-                ty::TyRef(folder.tcx().mk_region(r), tm.fold_with(folder))
-            }
-            ty::TyStruct(did, ref substs) => {
-                let substs = substs.fold_with(folder);
-                ty::TyStruct(did, folder.tcx().mk_substs(substs))
-            }
-            ty::TyClosure(did, ref substs) => {
-                ty::TyClosure(did, substs.fold_with(folder))
-            }
-            ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)),
-            ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) |
-            ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) |
-            ty::TyParam(..) => self.sty.clone(),
-        };
-        folder.tcx().mk_ty(sty)
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_ty(*self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        match self.sty {
-            ty::TyBox(typ) => typ.visit_with(visitor),
-            ty::TyRawPtr(ref tm) => tm.visit_with(visitor),
-            ty::TyArray(typ, _sz) => typ.visit_with(visitor),
-            ty::TySlice(typ) => typ.visit_with(visitor),
-            ty::TyEnum(_tid, ref substs) => substs.visit_with(visitor),
-            ty::TyTrait(ref trait_ty) => trait_ty.visit_with(visitor),
-            ty::TyTuple(ref ts) => ts.visit_with(visitor),
-            ty::TyBareFn(_opt_def_id, ref f) => f.visit_with(visitor),
-            ty::TyRef(r, ref tm) => r.visit_with(visitor) || tm.visit_with(visitor),
-            ty::TyStruct(_did, ref substs) => substs.visit_with(visitor),
-            ty::TyClosure(_did, ref substs) => substs.visit_with(visitor),
-            ty::TyProjection(ref data) => data.visit_with(visitor),
-            ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) |
-            ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) |
-            ty::TyParam(..) => false,
-        }
-    }
-
-    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        visitor.visit_ty(self)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::BareFnTy<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::BareFnTy { sig: self.sig.fold_with(folder),
-                       abi: self.abi,
-                       unsafety: self.unsafety }
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_bare_fn_ty(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.sig.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::ClosureTy<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-       ty::ClosureTy {
-            sig: self.sig.fold_with(folder),
-            unsafety: self.unsafety,
-            abi: self.abi,
-        }
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_closure_ty(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.sig.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::TypeAndMut<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::TypeAndMut { ty: self.ty.fold_with(folder), mutbl: self.mutbl }
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_mt(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.ty.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::FnOutput<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        match *self {
-            ty::FnConverging(ref ty) => ty::FnConverging(ty.fold_with(folder)),
-            ty::FnDiverging => ty::FnDiverging
-        }
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_output(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        match *self {
-            ty::FnConverging(ref ty) => ty.visit_with(visitor),
-            ty::FnDiverging => false,
-        }
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::FnSig { inputs: self.inputs.fold_with(folder),
-                    output: self.output.fold_with(folder),
-                    variadic: self.variadic }
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_fn_sig(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.inputs.visit_with(visitor) || self.output.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::TraitRef<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        let substs = self.substs.fold_with(folder);
-        ty::TraitRef {
-            def_id: self.def_id,
-            substs: folder.tcx().mk_substs(substs),
-        }
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_trait_ref(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.substs.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::Region {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self {
-        *self
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_region(*self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
-        false
-    }
-
-    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        visitor.visit_region(*self)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for subst::Substs<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        let regions = match self.regions {
-            subst::ErasedRegions => subst::ErasedRegions,
-            subst::NonerasedRegions(ref regions) => {
-                subst::NonerasedRegions(regions.fold_with(folder))
-            }
-        };
-
-        subst::Substs { regions: regions,
-                        types: self.types.fold_with(folder) }
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_substs(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.types.visit_with(visitor) || match self.regions {
-            subst::ErasedRegions => false,
-            subst::NonerasedRegions(ref regions) => regions.visit_with(visitor),
-        }
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        let func_substs = self.func_substs.fold_with(folder);
-        ty::ClosureSubsts {
-            func_substs: folder.tcx().mk_substs(func_substs),
-            upvar_tys: self.upvar_tys.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.func_substs.visit_with(visitor) || self.upvar_tys.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::ItemSubsts<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::ItemSubsts {
-            substs: self.substs.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.substs.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::AutoRef<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        match *self {
-            ty::adjustment::AutoPtr(r, m) => {
-                let r = r.fold_with(folder);
-                ty::adjustment::AutoPtr(folder.tcx().mk_region(r), m)
-            }
-            ty::adjustment::AutoUnsafe(m) => ty::adjustment::AutoUnsafe(m)
-        }
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_autoref(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        match *self {
-            ty::adjustment::AutoPtr(r, _m) => r.visit_with(visitor),
-            ty::adjustment::AutoUnsafe(_m) => false,
-        }
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::BuiltinBounds {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self {
-        *self
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
-        false
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialBounds<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::ExistentialBounds {
-            region_bound: self.region_bound.fold_with(folder),
-            builtin_bounds: self.builtin_bounds,
-            projection_bounds: self.projection_bounds.fold_with(folder),
-        }
-    }
-
-    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        folder.fold_existential_bounds(self)
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.region_bound.visit_with(visitor) || self.projection_bounds.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::TypeParameterDef<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::TypeParameterDef {
-            name: self.name,
-            def_id: self.def_id,
-            space: self.space,
-            index: self.index,
-            default: self.default.fold_with(folder),
-            default_def_id: self.default_def_id,
-            object_lifetime_default: self.object_lifetime_default.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.default.visit_with(visitor) ||
-            self.object_lifetime_default.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        match *self {
-            ty::ObjectLifetimeDefault::Ambiguous =>
-                ty::ObjectLifetimeDefault::Ambiguous,
-
-            ty::ObjectLifetimeDefault::BaseDefault =>
-                ty::ObjectLifetimeDefault::BaseDefault,
-
-            ty::ObjectLifetimeDefault::Specific(r) =>
-                ty::ObjectLifetimeDefault::Specific(r.fold_with(folder)),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        match *self {
-            ty::ObjectLifetimeDefault::Specific(r) => r.visit_with(visitor),
-            _ => false,
-        }
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::RegionParameterDef {
-            name: self.name,
-            def_id: self.def_id,
-            space: self.space,
-            index: self.index,
-            bounds: self.bounds.fold_with(folder)
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.bounds.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::Generics<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::Generics {
-            types: self.types.fold_with(folder),
-            regions: self.regions.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.types.visit_with(visitor) || self.regions.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::GenericPredicates<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::GenericPredicates {
-            predicates: self.predicates.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.predicates.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        match *self {
-            ty::Predicate::Trait(ref a) =>
-                ty::Predicate::Trait(a.fold_with(folder)),
-            ty::Predicate::Equate(ref binder) =>
-                ty::Predicate::Equate(binder.fold_with(folder)),
-            ty::Predicate::RegionOutlives(ref binder) =>
-                ty::Predicate::RegionOutlives(binder.fold_with(folder)),
-            ty::Predicate::TypeOutlives(ref binder) =>
-                ty::Predicate::TypeOutlives(binder.fold_with(folder)),
-            ty::Predicate::Projection(ref binder) =>
-                ty::Predicate::Projection(binder.fold_with(folder)),
-            ty::Predicate::WellFormed(data) =>
-                ty::Predicate::WellFormed(data.fold_with(folder)),
-            ty::Predicate::ObjectSafe(trait_def_id) =>
-                ty::Predicate::ObjectSafe(trait_def_id),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        match *self {
-            ty::Predicate::Trait(ref a) => a.visit_with(visitor),
-            ty::Predicate::Equate(ref binder) => binder.visit_with(visitor),
-            ty::Predicate::RegionOutlives(ref binder) => binder.visit_with(visitor),
-            ty::Predicate::TypeOutlives(ref binder) => binder.visit_with(visitor),
-            ty::Predicate::Projection(ref binder) => binder.visit_with(visitor),
-            ty::Predicate::WellFormed(data) => data.visit_with(visitor),
-            ty::Predicate::ObjectSafe(_trait_def_id) => false,
-        }
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionPredicate<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::ProjectionPredicate {
-            projection_ty: self.projection_ty.fold_with(folder),
-            ty: self.ty.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.projection_ty.visit_with(visitor) || self.ty.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionTy<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::ProjectionTy {
-            trait_ref: self.trait_ref.fold_with(folder),
-            item_name: self.item_name,
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.trait_ref.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::InstantiatedPredicates<'tcx> {
-    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::InstantiatedPredicates {
-            predicates: self.predicates.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.predicates.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::EquatePredicate<'tcx> {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::EquatePredicate(self.0.fold_with(folder),
-                            self.1.fold_with(folder))
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.0.visit_with(visitor) || self.1.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::TraitPredicate<'tcx> {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::TraitPredicate {
-            trait_ref: self.trait_ref.fold_with(folder)
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.trait_ref.visit_with(visitor)
-    }
-}
-
-impl<'tcx,T,U> TypeFoldable<'tcx> for ty::OutlivesPredicate<T,U>
-    where T : TypeFoldable<'tcx>,
-          U : TypeFoldable<'tcx>,
-{
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::OutlivesPredicate(self.0.fold_with(folder),
-                              self.1.fold_with(folder))
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.0.visit_with(visitor) || self.1.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::ClosureUpvar<'tcx> {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::ClosureUpvar {
-            def: self.def,
-            span: self.span,
-            ty: self.ty.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.ty.visit_with(visitor)
-    }
-}
-
-impl<'a, 'tcx> TypeFoldable<'tcx> for ty::ParameterEnvironment<'a, 'tcx> where 'tcx: 'a {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::ParameterEnvironment {
-            tcx: self.tcx,
-            free_substs: self.free_substs.fold_with(folder),
-            implicit_region_bound: self.implicit_region_bound.fold_with(folder),
-            caller_bounds: self.caller_bounds.fold_with(folder),
-            selection_cache: traits::SelectionCache::new(),
-            evaluation_cache: traits::EvaluationCache::new(),
-            free_id_outlive: self.free_id_outlive,
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.free_substs.visit_with(visitor) ||
-            self.implicit_region_bound.visit_with(visitor) ||
-            self.caller_bounds.visit_with(visitor)
-    }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::TypeScheme<'tcx>  {
-    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
-        ty::TypeScheme {
-            generics: self.generics.fold_with(folder),
-            ty: self.ty.fold_with(folder),
-        }
-    }
-
-    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
-        self.generics.visit_with(visitor) || self.ty.visit_with(visitor)
-    }
-}
diff --git a/src/librustc/middle/ty/sty.rs b/src/librustc/middle/ty/sty.rs
deleted file mode 100644 (file)
index 2a13c47..0000000
+++ /dev/null
@@ -1,1246 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! This module contains TypeVariants and its major components
-
-use middle::cstore;
-use middle::def_id::DefId;
-use middle::region;
-use middle::subst::{self, Substs};
-use middle::traits;
-use middle::ty::{self, AdtDef, ToPredicate, TypeFlags, Ty, TyS, TypeFoldable};
-use util::common::ErrorReported;
-
-use collections::enum_set::{self, EnumSet, CLike};
-use std::fmt;
-use std::ops;
-use std::mem;
-use syntax::abi;
-use syntax::ast::{self, Name};
-use syntax::parse::token::special_idents;
-
-use serialize::{Decodable, Decoder};
-
-use rustc_front::hir;
-
-use self::FnOutput::*;
-use self::InferTy::*;
-use self::TypeVariants::*;
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub struct TypeAndMut<'tcx> {
-    pub ty: Ty<'tcx>,
-    pub mutbl: hir::Mutability,
-}
-
-#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash,
-         RustcEncodable, RustcDecodable, Copy)]
-/// A "free" region `fr` can be interpreted as "some region
-/// at least as big as the scope `fr.scope`".
-pub struct FreeRegion {
-    pub scope: region::CodeExtent,
-    pub bound_region: BoundRegion
-}
-
-#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash,
-         RustcEncodable, RustcDecodable, Copy)]
-pub enum BoundRegion {
-    /// An anonymous region parameter for a given fn (&T)
-    BrAnon(u32),
-
-    /// Named region parameters for functions (a in &'a T)
-    ///
-    /// The def-id is needed to distinguish free regions in
-    /// the event of shadowing.
-    BrNamed(DefId, Name),
-
-    /// Fresh bound identifiers created during GLB computations.
-    BrFresh(u32),
-
-    // Anonymous region for the implicit env pointer parameter
-    // to a closure
-    BrEnv
-}
-
-// NB: If you change this, you'll probably want to change the corresponding
-// AST structure in libsyntax/ast.rs as well.
-#[derive(Clone, PartialEq, Eq, Hash, Debug)]
-pub enum TypeVariants<'tcx> {
-    /// The primitive boolean type. Written as `bool`.
-    TyBool,
-
-    /// The primitive character type; holds a Unicode scalar value
-    /// (a non-surrogate code point).  Written as `char`.
-    TyChar,
-
-    /// A primitive signed integer type. For example, `i32`.
-    TyInt(ast::IntTy),
-
-    /// A primitive unsigned integer type. For example, `u32`.
-    TyUint(ast::UintTy),
-
-    /// A primitive floating-point type. For example, `f64`.
-    TyFloat(ast::FloatTy),
-
-    /// An enumerated type, defined with `enum`.
-    ///
-    /// Substs here, possibly against intuition, *may* contain `TyParam`s.
-    /// That is, even after substitution it is possible that there are type
-    /// variables. This happens when the `TyEnum` corresponds to an enum
-    /// definition and not a concrete use of it. To get the correct `TyEnum`
-    /// from the tcx, use the `NodeId` from the `ast::Ty` and look it up in
-    /// the `ast_ty_to_ty_cache`. This is probably true for `TyStruct` as
-    /// well.
-    TyEnum(AdtDef<'tcx>, &'tcx Substs<'tcx>),
-
-    /// A structure type, defined with `struct`.
-    ///
-    /// See warning about substitutions for enumerated types.
-    TyStruct(AdtDef<'tcx>, &'tcx Substs<'tcx>),
-
-    /// `Box<T>`; this is nominally a struct in the documentation, but is
-    /// special-cased internally. For example, it is possible to implicitly
-    /// move the contents of a box out of that box, and methods of any type
-    /// can have type `Box<Self>`.
-    TyBox(Ty<'tcx>),
-
-    /// The pointee of a string slice. Written as `str`.
-    TyStr,
-
-    /// An array with the given length. Written as `[T; n]`.
-    TyArray(Ty<'tcx>, usize),
-
-    /// The pointee of an array slice.  Written as `[T]`.
-    TySlice(Ty<'tcx>),
-
-    /// A raw pointer. Written as `*mut T` or `*const T`
-    TyRawPtr(TypeAndMut<'tcx>),
-
-    /// A reference; a pointer with an associated lifetime. Written as
-    /// `&a mut T` or `&'a T`.
-    TyRef(&'tcx Region, TypeAndMut<'tcx>),
-
-    /// If the def-id is Some(_), then this is the type of a specific
-    /// fn item. Otherwise, if None(_), it is a fn pointer type.
-    ///
-    /// FIXME: Conflating function pointers and the type of a
-    /// function is probably a terrible idea; a function pointer is a
-    /// value with a specific type, but a function can be polymorphic
-    /// or dynamically dispatched.
-    TyBareFn(Option<DefId>, &'tcx BareFnTy<'tcx>),
-
-    /// A trait, defined with `trait`.
-    TyTrait(Box<TraitTy<'tcx>>),
-
-    /// The anonymous type of a closure. Used to represent the type of
-    /// `|a| a`.
-    TyClosure(DefId, Box<ClosureSubsts<'tcx>>),
-
-    /// A tuple type.  For example, `(i32, bool)`.
-    TyTuple(Vec<Ty<'tcx>>),
-
-    /// The projection of an associated type.  For example,
-    /// `<T as Trait<..>>::N`.
-    TyProjection(ProjectionTy<'tcx>),
-
-    /// A type parameter; for example, `T` in `fn f<T>(x: T) {}
-    TyParam(ParamTy),
-
-    /// A type variable used during type-checking.
-    TyInfer(InferTy),
-
-    /// A placeholder for a type which could not be computed; this is
-    /// propagated to avoid useless error messages.
-    TyError,
-}
-
-/// A closure can be modeled as a struct that looks like:
-///
-///     struct Closure<'l0...'li, T0...Tj, U0...Uk> {
-///         upvar0: U0,
-///         ...
-///         upvark: Uk
-///     }
-///
-/// where 'l0...'li and T0...Tj are the lifetime and type parameters
-/// in scope on the function that defined the closure, and U0...Uk are
-/// type parameters representing the types of its upvars (borrowed, if
-/// appropriate).
-///
-/// So, for example, given this function:
-///
-///     fn foo<'a, T>(data: &'a mut T) {
-///          do(|| data.count += 1)
-///     }
-///
-/// the type of the closure would be something like:
-///
-///     struct Closure<'a, T, U0> {
-///         data: U0
-///     }
-///
-/// Note that the type of the upvar is not specified in the struct.
-/// You may wonder how the impl would then be able to use the upvar,
-/// if it doesn't know it's type? The answer is that the impl is
-/// (conceptually) not fully generic over Closure but rather tied to
-/// instances with the expected upvar types:
-///
-///     impl<'b, 'a, T> FnMut() for Closure<'a, T, &'b mut &'a mut T> {
-///         ...
-///     }
-///
-/// You can see that the *impl* fully specified the type of the upvar
-/// and thus knows full well that `data` has type `&'b mut &'a mut T`.
-/// (Here, I am assuming that `data` is mut-borrowed.)
-///
-/// Now, the last question you may ask is: Why include the upvar types
-/// as extra type parameters? The reason for this design is that the
-/// upvar types can reference lifetimes that are internal to the
-/// creating function. In my example above, for example, the lifetime
-/// `'b` represents the extent of the closure itself; this is some
-/// subset of `foo`, probably just the extent of the call to the to
-/// `do()`. If we just had the lifetime/type parameters from the
-/// enclosing function, we couldn't name this lifetime `'b`. Note that
-/// there can also be lifetimes in the types of the upvars themselves,
-/// if one of them happens to be a reference to something that the
-/// creating fn owns.
-///
-/// OK, you say, so why not create a more minimal set of parameters
-/// that just includes the extra lifetime parameters? The answer is
-/// primarily that it would be hard --- we don't know at the time when
-/// we create the closure type what the full types of the upvars are,
-/// nor do we know which are borrowed and which are not. In this
-/// design, we can just supply a fresh type parameter and figure that
-/// out later.
-///
-/// All right, you say, but why include the type parameters from the
-/// original function then? The answer is that trans may need them
-/// when monomorphizing, and they may not appear in the upvars.  A
-/// closure could capture no variables but still make use of some
-/// in-scope type parameter with a bound (e.g., if our example above
-/// had an extra `U: Default`, and the closure called `U::default()`).
-///
-/// There is another reason. This design (implicitly) prohibits
-/// closures from capturing themselves (except via a trait
-/// object). This simplifies closure inference considerably, since it
-/// means that when we infer the kind of a closure or its upvars, we
-/// don't have to handle cycles where the decisions we make for
-/// closure C wind up influencing the decisions we ought to make for
-/// closure C (which would then require fixed point iteration to
-/// handle). Plus it fixes an ICE. :P
-#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
-pub struct ClosureSubsts<'tcx> {
-    /// Lifetime and type parameters from the enclosing function.
-    /// These are separated out because trans wants to pass them around
-    /// when monomorphizing.
-    pub func_substs: &'tcx Substs<'tcx>,
-
-    /// The types of the upvars. The list parallels the freevars and
-    /// `upvar_borrows` lists. These are kept distinct so that we can
-    /// easily index into them.
-    pub upvar_tys: Vec<Ty<'tcx>>
-}
-
-impl<'tcx> Decodable for &'tcx ClosureSubsts<'tcx> {
-    fn decode<S: Decoder>(s: &mut S) -> Result<&'tcx ClosureSubsts<'tcx>, S::Error> {
-        let closure_substs = try! { Decodable::decode(s) };
-        let dummy_def_id: DefId = unsafe { mem::zeroed() };
-
-        cstore::tls::with_decoding_context(s, |dcx, _| {
-            // Intern the value
-            let ty = dcx.tcx().mk_closure_from_closure_substs(dummy_def_id,
-                                                              Box::new(closure_substs));
-            match ty.sty {
-                TyClosure(_, ref closure_substs) => Ok(&**closure_substs),
-                _ => unreachable!()
-            }
-        })
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, Hash)]
-pub struct TraitTy<'tcx> {
-    pub principal: ty::PolyTraitRef<'tcx>,
-    pub bounds: ExistentialBounds<'tcx>,
-}
-
-impl<'tcx> TraitTy<'tcx> {
-    pub fn principal_def_id(&self) -> DefId {
-        self.principal.0.def_id
-    }
-
-    /// Object types don't have a self-type specified. Therefore, when
-    /// we convert the principal trait-ref into a normal trait-ref,
-    /// you must give *some* self-type. A common choice is `mk_err()`
-    /// or some skolemized type.
-    pub fn principal_trait_ref_with_self_ty(&self,
-                                            tcx: &ty::ctxt<'tcx>,
-                                            self_ty: Ty<'tcx>)
-                                            -> ty::PolyTraitRef<'tcx>
-    {
-        // otherwise the escaping regions would be captured by the binder
-        assert!(!self_ty.has_escaping_regions());
-
-        ty::Binder(TraitRef {
-            def_id: self.principal.0.def_id,
-            substs: tcx.mk_substs(self.principal.0.substs.with_self_ty(self_ty)),
-        })
-    }
-
-    pub fn projection_bounds_with_self_ty(&self,
-                                          tcx: &ty::ctxt<'tcx>,
-                                          self_ty: Ty<'tcx>)
-                                          -> Vec<ty::PolyProjectionPredicate<'tcx>>
-    {
-        // otherwise the escaping regions would be captured by the binders
-        assert!(!self_ty.has_escaping_regions());
-
-        self.bounds.projection_bounds.iter()
-            .map(|in_poly_projection_predicate| {
-                let in_projection_ty = &in_poly_projection_predicate.0.projection_ty;
-                let substs = tcx.mk_substs(in_projection_ty.trait_ref.substs.with_self_ty(self_ty));
-                let trait_ref = ty::TraitRef::new(in_projection_ty.trait_ref.def_id,
-                                              substs);
-                let projection_ty = ty::ProjectionTy {
-                    trait_ref: trait_ref,
-                    item_name: in_projection_ty.item_name
-                };
-                ty::Binder(ty::ProjectionPredicate {
-                    projection_ty: projection_ty,
-                    ty: in_poly_projection_predicate.0.ty
-                })
-            })
-            .collect()
-    }
-}
-
-/// A complete reference to a trait. These take numerous guises in syntax,
-/// but perhaps the most recognizable form is in a where clause:
-///
-///     T : Foo<U>
-///
-/// This would be represented by a trait-reference where the def-id is the
-/// def-id for the trait `Foo` and the substs defines `T` as parameter 0 in the
-/// `SelfSpace` and `U` as parameter 0 in the `TypeSpace`.
-///
-/// Trait references also appear in object types like `Foo<U>`, but in
-/// that case the `Self` parameter is absent from the substitutions.
-///
-/// Note that a `TraitRef` introduces a level of region binding, to
-/// account for higher-ranked trait bounds like `T : for<'a> Foo<&'a
-/// U>` or higher-ranked object types.
-#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-pub struct TraitRef<'tcx> {
-    pub def_id: DefId,
-    pub substs: &'tcx Substs<'tcx>,
-}
-
-pub type PolyTraitRef<'tcx> = Binder<TraitRef<'tcx>>;
-
-impl<'tcx> PolyTraitRef<'tcx> {
-    pub fn self_ty(&self) -> Ty<'tcx> {
-        self.0.self_ty()
-    }
-
-    pub fn def_id(&self) -> DefId {
-        self.0.def_id
-    }
-
-    pub fn substs(&self) -> &'tcx Substs<'tcx> {
-        // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<>
-        self.0.substs
-    }
-
-    pub fn input_types(&self) -> &[Ty<'tcx>] {
-        // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<>
-        self.0.input_types()
-    }
-
-    pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> {
-        // Note that we preserve binding levels
-        Binder(ty::TraitPredicate { trait_ref: self.0.clone() })
-    }
-}
-
-/// Binder is a binder for higher-ranked lifetimes. It is part of the
-/// compiler's representation for things like `for<'a> Fn(&'a isize)`
-/// (which would be represented by the type `PolyTraitRef ==
-/// Binder<TraitRef>`). Note that when we skolemize, instantiate,
-/// erase, or otherwise "discharge" these bound regions, we change the
-/// type from `Binder<T>` to just `T` (see
-/// e.g. `liberate_late_bound_regions`).
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub struct Binder<T>(pub T);
-
-impl<T> Binder<T> {
-    /// Skips the binder and returns the "bound" value. This is a
-    /// risky thing to do because it's easy to get confused about
-    /// debruijn indices and the like. It is usually better to
-    /// discharge the binder using `no_late_bound_regions` or
-    /// `replace_late_bound_regions` or something like
-    /// that. `skip_binder` is only valid when you are either
-    /// extracting data that has nothing to do with bound regions, you
-    /// are doing some sort of test that does not involve bound
-    /// regions, or you are being very careful about your depth
-    /// accounting.
-    ///
-    /// Some examples where `skip_binder` is reasonable:
-    /// - extracting the def-id from a PolyTraitRef;
-    /// - comparing the self type of a PolyTraitRef to see if it is equal to
-    ///   a type parameter `X`, since the type `X`  does not reference any regions
-    pub fn skip_binder(&self) -> &T {
-        &self.0
-    }
-
-    pub fn as_ref(&self) -> Binder<&T> {
-        ty::Binder(&self.0)
-    }
-
-    pub fn map_bound_ref<F,U>(&self, f: F) -> Binder<U>
-        where F: FnOnce(&T) -> U
-    {
-        self.as_ref().map_bound(f)
-    }
-
-    pub fn map_bound<F,U>(self, f: F) -> Binder<U>
-        where F: FnOnce(T) -> U
-    {
-        ty::Binder(f(self.0))
-    }
-}
-
-impl fmt::Debug for TypeFlags {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "{}", self.bits)
-    }
-}
-
-/// Represents the projection of an associated type. In explicit UFCS
-/// form this would be written `<T as Trait<..>>::N`.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub struct ProjectionTy<'tcx> {
-    /// The trait reference `T as Trait<..>`.
-    pub trait_ref: ty::TraitRef<'tcx>,
-
-    /// The name `N` of the associated type.
-    pub item_name: Name,
-}
-
-impl<'tcx> ProjectionTy<'tcx> {
-    pub fn sort_key(&self) -> (DefId, Name) {
-        (self.trait_ref.def_id, self.item_name)
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, Hash, Debug)]
-pub struct BareFnTy<'tcx> {
-    pub unsafety: hir::Unsafety,
-    pub abi: abi::Abi,
-    pub sig: PolyFnSig<'tcx>,
-}
-
-#[derive(Clone, PartialEq, Eq, Hash)]
-pub struct ClosureTy<'tcx> {
-    pub unsafety: hir::Unsafety,
-    pub abi: abi::Abi,
-    pub sig: PolyFnSig<'tcx>,
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
-pub enum FnOutput<'tcx> {
-    FnConverging(Ty<'tcx>),
-    FnDiverging
-}
-
-impl<'tcx> FnOutput<'tcx> {
-    pub fn diverges(&self) -> bool {
-        *self == FnDiverging
-    }
-
-    pub fn unwrap(self) -> Ty<'tcx> {
-        match self {
-            ty::FnConverging(t) => t,
-            ty::FnDiverging => unreachable!()
-        }
-    }
-
-    pub fn unwrap_or(self, def: Ty<'tcx>) -> Ty<'tcx> {
-        match self {
-            ty::FnConverging(t) => t,
-            ty::FnDiverging => def
-        }
-    }
-}
-
-pub type PolyFnOutput<'tcx> = Binder<FnOutput<'tcx>>;
-
-impl<'tcx> PolyFnOutput<'tcx> {
-    pub fn diverges(&self) -> bool {
-        self.0.diverges()
-    }
-}
-
-/// Signature of a function type, which I have arbitrarily
-/// decided to use to refer to the input/output types.
-///
-/// - `inputs` is the list of arguments and their modes.
-/// - `output` is the return type.
-/// - `variadic` indicates whether this is a variadic function. (only true for foreign fns)
-#[derive(Clone, PartialEq, Eq, Hash)]
-pub struct FnSig<'tcx> {
-    pub inputs: Vec<Ty<'tcx>>,
-    pub output: FnOutput<'tcx>,
-    pub variadic: bool
-}
-
-pub type PolyFnSig<'tcx> = Binder<FnSig<'tcx>>;
-
-impl<'tcx> PolyFnSig<'tcx> {
-    pub fn inputs(&self) -> ty::Binder<Vec<Ty<'tcx>>> {
-        self.map_bound_ref(|fn_sig| fn_sig.inputs.clone())
-    }
-    pub fn input(&self, index: usize) -> ty::Binder<Ty<'tcx>> {
-        self.map_bound_ref(|fn_sig| fn_sig.inputs[index])
-    }
-    pub fn output(&self) -> ty::Binder<FnOutput<'tcx>> {
-        self.map_bound_ref(|fn_sig| fn_sig.output.clone())
-    }
-    pub fn variadic(&self) -> bool {
-        self.skip_binder().variadic
-    }
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-pub struct ParamTy {
-    pub space: subst::ParamSpace,
-    pub idx: u32,
-    pub name: Name,
-}
-
-impl ParamTy {
-    pub fn new(space: subst::ParamSpace,
-               index: u32,
-               name: Name)
-               -> ParamTy {
-        ParamTy { space: space, idx: index, name: name }
-    }
-
-    pub fn for_self() -> ParamTy {
-        ParamTy::new(subst::SelfSpace, 0, special_idents::type_self.name)
-    }
-
-    pub fn for_def(def: &ty::TypeParameterDef) -> ParamTy {
-        ParamTy::new(def.space, def.index, def.name)
-    }
-
-    pub fn to_ty<'tcx>(self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
-        tcx.mk_param(self.space, self.idx, self.name)
-    }
-
-    pub fn is_self(&self) -> bool {
-        self.space == subst::SelfSpace && self.idx == 0
-    }
-}
-
-/// A [De Bruijn index][dbi] is a standard means of representing
-/// regions (and perhaps later types) in a higher-ranked setting. In
-/// particular, imagine a type like this:
-///
-///     for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char)
-///     ^          ^            |        |         |
-///     |          |            |        |         |
-///     |          +------------+ 1      |         |
-///     |                                |         |
-///     +--------------------------------+ 2       |
-///     |                                          |
-///     +------------------------------------------+ 1
-///
-/// In this type, there are two binders (the outer fn and the inner
-/// fn). We need to be able to determine, for any given region, which
-/// fn type it is bound by, the inner or the outer one. There are
-/// various ways you can do this, but a De Bruijn index is one of the
-/// more convenient and has some nice properties. The basic idea is to
-/// count the number of binders, inside out. Some examples should help
-/// clarify what I mean.
-///
-/// Let's start with the reference type `&'b isize` that is the first
-/// argument to the inner function. This region `'b` is assigned a De
-/// Bruijn index of 1, meaning "the innermost binder" (in this case, a
-/// fn). The region `'a` that appears in the second argument type (`&'a
-/// isize`) would then be assigned a De Bruijn index of 2, meaning "the
-/// second-innermost binder". (These indices are written on the arrays
-/// in the diagram).
-///
-/// What is interesting is that De Bruijn index attached to a particular
-/// variable will vary depending on where it appears. For example,
-/// the final type `&'a char` also refers to the region `'a` declared on
-/// the outermost fn. But this time, this reference is not nested within
-/// any other binders (i.e., it is not an argument to the inner fn, but
-/// rather the outer one). Therefore, in this case, it is assigned a
-/// De Bruijn index of 1, because the innermost binder in that location
-/// is the outer fn.
-///
-/// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index
-#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug, Copy)]
-pub struct DebruijnIndex {
-    // We maintain the invariant that this is never 0. So 1 indicates
-    // the innermost binder. To ensure this, create with `DebruijnIndex::new`.
-    pub depth: u32,
-}
-
-/// Representation of regions.
-///
-/// Unlike types, most region variants are "fictitious", not concrete,
-/// regions. Among these, `ReStatic`, `ReEmpty` and `ReScope` are the only
-/// ones representing concrete regions.
-///
-/// ## Bound Regions
-///
-/// These are regions that are stored behind a binder and must be substituted
-/// with some concrete region before being used. There are 2 kind of
-/// bound regions: early-bound, which are bound in a TypeScheme/TraitDef,
-/// and are substituted by a Substs,  and late-bound, which are part of
-/// higher-ranked types (e.g. `for<'a> fn(&'a ())`) and are substituted by
-/// the likes of `liberate_late_bound_regions`. The distinction exists
-/// because higher-ranked lifetimes aren't supported in all places. See [1][2].
-///
-/// Unlike TyParam-s, bound regions are not supposed to exist "in the wild"
-/// outside their binder, e.g. in types passed to type inference, and
-/// should first be substituted (by skolemized regions, free regions,
-/// or region variables).
-///
-/// ## Skolemized and Free Regions
-///
-/// One often wants to work with bound regions without knowing their precise
-/// identity. For example, when checking a function, the lifetime of a borrow
-/// can end up being assigned to some region parameter. In these cases,
-/// it must be ensured that bounds on the region can't be accidentally
-/// assumed without being checked.
-///
-/// The process of doing that is called "skolemization". The bound regions
-/// are replaced by skolemized markers, which don't satisfy any relation
-/// not explicity provided.
-///
-/// There are 2 kinds of skolemized regions in rustc: `ReFree` and
-/// `ReSkolemized`. When checking an item's body, `ReFree` is supposed
-/// to be used. These also support explicit bounds: both the internally-stored
-/// *scope*, which the region is assumed to outlive, as well as other
-/// relations stored in the `FreeRegionMap`. Note that these relations
-/// aren't checked when you `make_subregion` (or `mk_eqty`), only by
-/// `resolve_regions_and_report_errors`.
-///
-/// When working with higher-ranked types, some region relations aren't
-/// yet known, so you can't just call `resolve_regions_and_report_errors`.
-/// `ReSkolemized` is designed for this purpose. In these contexts,
-/// there's also the risk that some inference variable laying around will
-/// get unified with your skolemized region: if you want to check whether
-/// `for<'a> Foo<'_>: 'a`, and you substitute your bound region `'a`
-/// with a skolemized region `'%a`, the variable `'_` would just be
-/// instantiated to the skolemized region `'%a`, which is wrong because
-/// the inference variable is supposed to satisfy the relation
-/// *for every value of the skolemized region*. To ensure that doesn't
-/// happen, you can use `leak_check`. This is more clearly explained
-/// by infer/higher_ranked/README.md.
-///
-/// [1] http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/
-/// [2] http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/
-#[derive(Clone, PartialEq, Eq, Hash, Copy, RustcEncodable, RustcDecodable)]
-pub enum Region {
-    // Region bound in a type or fn declaration which will be
-    // substituted 'early' -- that is, at the same time when type
-    // parameters are substituted.
-    ReEarlyBound(EarlyBoundRegion),
-
-    // Region bound in a function scope, which will be substituted when the
-    // function is called.
-    ReLateBound(DebruijnIndex, BoundRegion),
-
-    /// When checking a function body, the types of all arguments and so forth
-    /// that refer to bound region parameters are modified to refer to free
-    /// region parameters.
-    ReFree(FreeRegion),
-
-    /// A concrete region naming some statically determined extent
-    /// (e.g. an expression or sequence of statements) within the
-    /// current function.
-    ReScope(region::CodeExtent),
-
-    /// Static data that has an "infinite" lifetime. Top in the region lattice.
-    ReStatic,
-
-    /// A region variable.  Should not exist after typeck.
-    ReVar(RegionVid),
-
-    /// A skolemized region - basically the higher-ranked version of ReFree.
-    /// Should not exist after typeck.
-    ReSkolemized(SkolemizedRegionVid, BoundRegion),
-
-    /// Empty lifetime is for data that is never accessed.
-    /// Bottom in the region lattice. We treat ReEmpty somewhat
-    /// specially; at least right now, we do not generate instances of
-    /// it during the GLB computations, but rather
-    /// generate an error instead. This is to improve error messages.
-    /// The only way to get an instance of ReEmpty is to have a region
-    /// variable with no constraints.
-    ReEmpty,
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)]
-pub struct EarlyBoundRegion {
-    pub space: subst::ParamSpace,
-    pub index: u32,
-    pub name: Name,
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-pub struct TyVid {
-    pub index: u32
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-pub struct IntVid {
-    pub index: u32
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-pub struct FloatVid {
-    pub index: u32
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)]
-pub struct RegionVid {
-    pub index: u32
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
-pub struct SkolemizedRegionVid {
-    pub index: u32
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-pub enum InferTy {
-    TyVar(TyVid),
-    IntVar(IntVid),
-    FloatVar(FloatVid),
-
-    /// A `FreshTy` is one that is generated as a replacement for an
-    /// unbound type variable. This is convenient for caching etc. See
-    /// `middle::infer::freshen` for more details.
-    FreshTy(u32),
-    FreshIntTy(u32),
-    FreshFloatTy(u32)
-}
-
-/// Bounds suitable for an existentially quantified type parameter
-/// such as those that appear in object types or closure types.
-#[derive(PartialEq, Eq, Hash, Clone)]
-pub struct ExistentialBounds<'tcx> {
-    pub region_bound: ty::Region,
-    pub builtin_bounds: BuiltinBounds,
-    pub projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>,
-}
-
-impl<'tcx> ExistentialBounds<'tcx> {
-    pub fn new(region_bound: ty::Region,
-               builtin_bounds: BuiltinBounds,
-               projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>)
-               -> Self {
-        let mut projection_bounds = projection_bounds;
-        projection_bounds.sort_by(|a, b| a.sort_key().cmp(&b.sort_key()));
-        ExistentialBounds {
-            region_bound: region_bound,
-            builtin_bounds: builtin_bounds,
-            projection_bounds: projection_bounds
-        }
-    }
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub struct BuiltinBounds(EnumSet<BuiltinBound>);
-
-impl BuiltinBounds {
-    pub fn empty() -> BuiltinBounds {
-        BuiltinBounds(EnumSet::new())
-    }
-
-    pub fn iter(&self) -> enum_set::Iter<BuiltinBound> {
-        self.into_iter()
-    }
-
-    pub fn to_predicates<'tcx>(&self,
-                               tcx: &ty::ctxt<'tcx>,
-                               self_ty: Ty<'tcx>) -> Vec<ty::Predicate<'tcx>> {
-        self.iter().filter_map(|builtin_bound|
-            match traits::trait_ref_for_builtin_bound(tcx, builtin_bound, self_ty) {
-                Ok(trait_ref) => Some(trait_ref.to_predicate()),
-                Err(ErrorReported) => { None }
-            }
-        ).collect()
-    }
-}
-
-impl ops::Deref for BuiltinBounds {
-    type Target = EnumSet<BuiltinBound>;
-    fn deref(&self) -> &Self::Target { &self.0 }
-}
-
-impl ops::DerefMut for BuiltinBounds {
-    fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 }
-}
-
-impl<'a> IntoIterator for &'a BuiltinBounds {
-    type Item = BuiltinBound;
-    type IntoIter = enum_set::Iter<BuiltinBound>;
-    fn into_iter(self) -> Self::IntoIter {
-        (**self).into_iter()
-    }
-}
-
-#[derive(Clone, RustcEncodable, PartialEq, Eq, RustcDecodable, Hash,
-           Debug, Copy)]
-#[repr(usize)]
-pub enum BuiltinBound {
-    Send,
-    Sized,
-    Copy,
-    Sync,
-}
-
-impl CLike for BuiltinBound {
-    fn to_usize(&self) -> usize {
-        *self as usize
-    }
-    fn from_usize(v: usize) -> BuiltinBound {
-        unsafe { mem::transmute(v) }
-    }
-}
-
-impl<'tcx> ty::ctxt<'tcx> {
-    pub fn try_add_builtin_trait(&self,
-                                 trait_def_id: DefId,
-                                 builtin_bounds: &mut EnumSet<BuiltinBound>)
-                                 -> bool
-    {
-        //! Checks whether `trait_ref` refers to one of the builtin
-        //! traits, like `Send`, and adds the corresponding
-        //! bound to the set `builtin_bounds` if so. Returns true if `trait_ref`
-        //! is a builtin trait.
-
-        match self.lang_items.to_builtin_kind(trait_def_id) {
-            Some(bound) => { builtin_bounds.insert(bound); true }
-            None => false
-        }
-    }
-}
-
-impl DebruijnIndex {
-    pub fn new(depth: u32) -> DebruijnIndex {
-        assert!(depth > 0);
-        DebruijnIndex { depth: depth }
-    }
-
-    pub fn shifted(&self, amount: u32) -> DebruijnIndex {
-        DebruijnIndex { depth: self.depth + amount }
-    }
-}
-
-// Region utilities
-impl Region {
-    pub fn is_bound(&self) -> bool {
-        match *self {
-            ty::ReEarlyBound(..) => true,
-            ty::ReLateBound(..) => true,
-            _ => false
-        }
-    }
-
-    pub fn needs_infer(&self) -> bool {
-        match *self {
-            ty::ReVar(..) | ty::ReSkolemized(..) => true,
-            _ => false
-        }
-    }
-
-    pub fn escapes_depth(&self, depth: u32) -> bool {
-        match *self {
-            ty::ReLateBound(debruijn, _) => debruijn.depth > depth,
-            _ => false,
-        }
-    }
-
-    /// Returns the depth of `self` from the (1-based) binding level `depth`
-    pub fn from_depth(&self, depth: u32) -> Region {
-        match *self {
-            ty::ReLateBound(debruijn, r) => ty::ReLateBound(DebruijnIndex {
-                depth: debruijn.depth - (depth - 1)
-            }, r),
-            r => r
-        }
-    }
-}
-
-// Type utilities
-impl<'tcx> TyS<'tcx> {
-    pub fn as_opt_param_ty(&self) -> Option<ty::ParamTy> {
-        match self.sty {
-            ty::TyParam(ref d) => Some(d.clone()),
-            _ => None,
-        }
-    }
-
-    pub fn is_nil(&self) -> bool {
-        match self.sty {
-            TyTuple(ref tys) => tys.is_empty(),
-            _ => false
-        }
-    }
-
-    pub fn is_empty(&self, _cx: &ty::ctxt) -> bool {
-        // FIXME(#24885): be smarter here
-        match self.sty {
-            TyEnum(def, _) | TyStruct(def, _) => def.is_empty(),
-            _ => false
-        }
-    }
-
-    pub fn is_primitive(&self) -> bool {
-        match self.sty {
-            TyBool | TyChar | TyInt(_) | TyUint(_) | TyFloat(_) => true,
-            _ => false,
-        }
-    }
-
-    pub fn is_ty_var(&self) -> bool {
-        match self.sty {
-            TyInfer(TyVar(_)) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_phantom_data(&self) -> bool {
-        if let TyStruct(def, _) = self.sty {
-            def.is_phantom_data()
-        } else {
-            false
-        }
-    }
-
-    pub fn is_bool(&self) -> bool { self.sty == TyBool }
-
-    pub fn is_param(&self, space: subst::ParamSpace, index: u32) -> bool {
-        match self.sty {
-            ty::TyParam(ref data) => data.space == space && data.idx == index,
-            _ => false,
-        }
-    }
-
-    pub fn is_self(&self) -> bool {
-        match self.sty {
-            TyParam(ref p) => p.space == subst::SelfSpace,
-            _ => false
-        }
-    }
-
-    fn is_slice(&self) -> bool {
-        match self.sty {
-            TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty {
-                TySlice(_) | TyStr => true,
-                _ => false,
-            },
-            _ => false
-        }
-    }
-
-    pub fn is_structural(&self) -> bool {
-        match self.sty {
-            TyStruct(..) | TyTuple(_) | TyEnum(..) |
-            TyArray(..) | TyClosure(..) => true,
-            _ => self.is_slice() | self.is_trait()
-        }
-    }
-
-    #[inline]
-    pub fn is_simd(&self) -> bool {
-        match self.sty {
-            TyStruct(def, _) => def.is_simd(),
-            _ => false
-        }
-    }
-
-    pub fn sequence_element_type(&self, cx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
-        match self.sty {
-            TyArray(ty, _) | TySlice(ty) => ty,
-            TyStr => cx.mk_mach_uint(ast::UintTy::U8),
-            _ => cx.sess.bug(&format!("sequence_element_type called on non-sequence value: {}",
-                                      self)),
-        }
-    }
-
-    pub fn simd_type(&self, cx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
-        match self.sty {
-            TyStruct(def, substs) => {
-                def.struct_variant().fields[0].ty(cx, substs)
-            }
-            _ => panic!("simd_type called on invalid type")
-        }
-    }
-
-    pub fn simd_size(&self, _cx: &ty::ctxt) -> usize {
-        match self.sty {
-            TyStruct(def, _) => def.struct_variant().fields.len(),
-            _ => panic!("simd_size called on invalid type")
-        }
-    }
-
-    pub fn is_region_ptr(&self) -> bool {
-        match self.sty {
-            TyRef(..) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_unsafe_ptr(&self) -> bool {
-        match self.sty {
-            TyRawPtr(_) => return true,
-            _ => return false
-        }
-    }
-
-    pub fn is_unique(&self) -> bool {
-        match self.sty {
-            TyBox(_) => true,
-            _ => false
-        }
-    }
-
-    /*
-     A scalar type is one that denotes an atomic datum, with no sub-components.
-     (A TyRawPtr is scalar because it represents a non-managed pointer, so its
-     contents are abstract to rustc.)
-    */
-    pub fn is_scalar(&self) -> bool {
-        match self.sty {
-            TyBool | TyChar | TyInt(_) | TyFloat(_) | TyUint(_) |
-            TyInfer(IntVar(_)) | TyInfer(FloatVar(_)) |
-            TyBareFn(..) | TyRawPtr(_) => true,
-            _ => false
-        }
-    }
-
-    /// Returns true if this type is a floating point type and false otherwise.
-    pub fn is_floating_point(&self) -> bool {
-        match self.sty {
-            TyFloat(_) |
-            TyInfer(FloatVar(_)) => true,
-            _ => false,
-        }
-    }
-
-    pub fn is_trait(&self) -> bool {
-        match self.sty {
-            TyTrait(..) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_integral(&self) -> bool {
-        match self.sty {
-            TyInfer(IntVar(_)) | TyInt(_) | TyUint(_) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_fresh(&self) -> bool {
-        match self.sty {
-            TyInfer(FreshTy(_)) => true,
-            TyInfer(FreshIntTy(_)) => true,
-            TyInfer(FreshFloatTy(_)) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_uint(&self) -> bool {
-        match self.sty {
-            TyInfer(IntVar(_)) | TyUint(ast::UintTy::Us) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_char(&self) -> bool {
-        match self.sty {
-            TyChar => true,
-            _ => false
-        }
-    }
-
-    pub fn is_bare_fn(&self) -> bool {
-        match self.sty {
-            TyBareFn(..) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_bare_fn_item(&self) -> bool {
-        match self.sty {
-            TyBareFn(Some(_), _) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_fp(&self) -> bool {
-        match self.sty {
-            TyInfer(FloatVar(_)) | TyFloat(_) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_numeric(&self) -> bool {
-        self.is_integral() || self.is_fp()
-    }
-
-    pub fn is_signed(&self) -> bool {
-        match self.sty {
-            TyInt(_) => true,
-            _ => false
-        }
-    }
-
-    pub fn is_machine(&self) -> bool {
-        match self.sty {
-            TyInt(ast::IntTy::Is) | TyUint(ast::UintTy::Us) => false,
-            TyInt(..) | TyUint(..) | TyFloat(..) => true,
-            _ => false
-        }
-    }
-
-    // Returns the type and mutability of *ty.
-    //
-    // The parameter `explicit` indicates if this is an *explicit* dereference.
-    // Some types---notably unsafe ptrs---can only be dereferenced explicitly.
-    pub fn builtin_deref(&self, explicit: bool, pref: ty::LvaluePreference)
-        -> Option<TypeAndMut<'tcx>>
-    {
-        match self.sty {
-            TyBox(ty) => {
-                Some(TypeAndMut {
-                    ty: ty,
-                    mutbl: if pref == ty::PreferMutLvalue {
-                        hir::MutMutable
-                    } else {
-                        hir::MutImmutable
-                    },
-                })
-            },
-            TyRef(_, mt) => Some(mt),
-            TyRawPtr(mt) if explicit => Some(mt),
-            _ => None
-        }
-    }
-
-    // Returns the type of ty[i]
-    pub fn builtin_index(&self) -> Option<Ty<'tcx>> {
-        match self.sty {
-            TyArray(ty, _) | TySlice(ty) => Some(ty),
-            _ => None
-        }
-    }
-
-    pub fn fn_sig(&self) -> &'tcx PolyFnSig<'tcx> {
-        match self.sty {
-            TyBareFn(_, ref f) => &f.sig,
-            _ => panic!("Ty::fn_sig() called on non-fn type: {:?}", self)
-        }
-    }
-
-    /// Returns the ABI of the given function.
-    pub fn fn_abi(&self) -> abi::Abi {
-        match self.sty {
-            TyBareFn(_, ref f) => f.abi,
-            _ => panic!("Ty::fn_abi() called on non-fn type"),
-        }
-    }
-
-    // Type accessors for substructures of types
-    pub fn fn_args(&self) -> ty::Binder<Vec<Ty<'tcx>>> {
-        self.fn_sig().inputs()
-    }
-
-    pub fn fn_ret(&self) -> Binder<FnOutput<'tcx>> {
-        self.fn_sig().output()
-    }
-
-    pub fn is_fn(&self) -> bool {
-        match self.sty {
-            TyBareFn(..) => true,
-            _ => false
-        }
-    }
-
-    pub fn ty_to_def_id(&self) -> Option<DefId> {
-        match self.sty {
-            TyTrait(ref tt) => Some(tt.principal_def_id()),
-            TyStruct(def, _) |
-            TyEnum(def, _) => Some(def.did),
-            TyClosure(id, _) => Some(id),
-            _ => None
-        }
-    }
-
-    pub fn ty_adt_def(&self) -> Option<AdtDef<'tcx>> {
-        match self.sty {
-            TyStruct(adt, _) | TyEnum(adt, _) => Some(adt),
-            _ => None
-        }
-    }
-
-    /// Returns the regions directly referenced from this type (but
-    /// not types reachable from this type via `walk_tys`). This
-    /// ignores late-bound regions binders.
-    pub fn regions(&self) -> Vec<ty::Region> {
-        match self.sty {
-            TyRef(region, _) => {
-                vec![*region]
-            }
-            TyTrait(ref obj) => {
-                let mut v = vec![obj.bounds.region_bound];
-                v.extend_from_slice(obj.principal.skip_binder()
-                                       .substs.regions().as_slice());
-                v
-            }
-            TyEnum(_, substs) |
-            TyStruct(_, substs) => {
-                substs.regions().as_slice().to_vec()
-            }
-            TyClosure(_, ref substs) => {
-                substs.func_substs.regions().as_slice().to_vec()
-            }
-            TyProjection(ref data) => {
-                data.trait_ref.substs.regions().as_slice().to_vec()
-            }
-            TyBareFn(..) |
-            TyBool |
-            TyChar |
-            TyInt(_) |
-            TyUint(_) |
-            TyFloat(_) |
-            TyBox(_) |
-            TyStr |
-            TyArray(_, _) |
-            TySlice(_) |
-            TyRawPtr(_) |
-            TyTuple(_) |
-            TyParam(_) |
-            TyInfer(_) |
-            TyError => {
-                vec![]
-            }
-        }
-    }
-}
diff --git a/src/librustc/middle/ty/trait_def.rs b/src/librustc/middle/ty/trait_def.rs
deleted file mode 100644 (file)
index db001ce..0000000
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use dep_graph::DepNode;
-use middle::def_id::DefId;
-use middle::ty;
-use middle::ty::fast_reject;
-use middle::ty::Ty;
-use std::borrow::{Borrow};
-use std::cell::{Cell, Ref, RefCell};
-use syntax::ast::Name;
-use rustc_front::hir;
-use util::nodemap::FnvHashMap;
-
-/// As `TypeScheme` but for a trait ref.
-pub struct TraitDef<'tcx> {
-    pub unsafety: hir::Unsafety,
-
-    /// If `true`, then this trait had the `#[rustc_paren_sugar]`
-    /// attribute, indicating that it should be used with `Foo()`
-    /// sugar. This is a temporary thing -- eventually any trait wil
-    /// be usable with the sugar (or without it).
-    pub paren_sugar: bool,
-
-    /// Generic type definitions. Note that `Self` is listed in here
-    /// as having a single bound, the trait itself (e.g., in the trait
-    /// `Eq`, there is a single bound `Self : Eq`). This is so that
-    /// default methods get to assume that the `Self` parameters
-    /// implements the trait.
-    pub generics: ty::Generics<'tcx>,
-
-    pub trait_ref: ty::TraitRef<'tcx>,
-
-    /// A list of the associated types defined in this trait. Useful
-    /// for resolving `X::Foo` type markers.
-    pub associated_type_names: Vec<Name>,
-
-    // Impls of this trait. To allow for quicker lookup, the impls are indexed
-    // by a simplified version of their Self type: impls with a simplifiable
-    // Self are stored in nonblanket_impls keyed by it, while all other impls
-    // are stored in blanket_impls.
-    //
-    // These lists are tracked by `DepNode::TraitImpls`; we don't use
-    // a DepTrackingMap but instead have the `TraitDef` insert the
-    // required reads/writes.
-
-    /// Impls of the trait.
-    nonblanket_impls: RefCell<
-        FnvHashMap<fast_reject::SimplifiedType, Vec<DefId>>
-    >,
-
-    /// Blanket impls associated with the trait.
-    blanket_impls: RefCell<Vec<DefId>>,
-
-    /// Various flags
-    pub flags: Cell<TraitFlags>
-}
-
-impl<'tcx> TraitDef<'tcx> {
-    pub fn new(unsafety: hir::Unsafety,
-               paren_sugar: bool,
-               generics: ty::Generics<'tcx>,
-               trait_ref: ty::TraitRef<'tcx>,
-               associated_type_names: Vec<Name>)
-               -> TraitDef<'tcx> {
-        TraitDef {
-            paren_sugar: paren_sugar,
-            unsafety: unsafety,
-            generics: generics,
-            trait_ref: trait_ref,
-            associated_type_names: associated_type_names,
-            nonblanket_impls: RefCell::new(FnvHashMap()),
-            blanket_impls: RefCell::new(vec![]),
-            flags: Cell::new(ty::TraitFlags::NO_TRAIT_FLAGS)
-        }
-    }
-
-    pub fn def_id(&self) -> DefId {
-        self.trait_ref.def_id
-    }
-
-    // returns None if not yet calculated
-    pub fn object_safety(&self) -> Option<bool> {
-        if self.flags.get().intersects(TraitFlags::OBJECT_SAFETY_VALID) {
-            Some(self.flags.get().intersects(TraitFlags::IS_OBJECT_SAFE))
-        } else {
-            None
-        }
-    }
-
-    pub fn set_object_safety(&self, is_safe: bool) {
-        assert!(self.object_safety().map(|cs| cs == is_safe).unwrap_or(true));
-        self.flags.set(
-            self.flags.get() | if is_safe {
-                TraitFlags::OBJECT_SAFETY_VALID | TraitFlags::IS_OBJECT_SAFE
-            } else {
-                TraitFlags::OBJECT_SAFETY_VALID
-            }
-        );
-    }
-
-    fn write_trait_impls(&self, tcx: &ty::ctxt<'tcx>) {
-        tcx.dep_graph.write(DepNode::TraitImpls(self.trait_ref.def_id));
-    }
-
-    fn read_trait_impls(&self, tcx: &ty::ctxt<'tcx>) {
-        tcx.dep_graph.read(DepNode::TraitImpls(self.trait_ref.def_id));
-    }
-
-    /// Records a trait-to-implementation mapping.
-    pub fn record_impl(&self,
-                       tcx: &ty::ctxt<'tcx>,
-                       impl_def_id: DefId,
-                       impl_trait_ref: ty::TraitRef<'tcx>) {
-        debug!("TraitDef::record_impl for {:?}, from {:?}",
-               self, impl_trait_ref);
-
-        // Record the write into the impl set, but only for local
-        // impls: external impls are handled differently.
-        if impl_def_id.is_local() {
-            self.write_trait_impls(tcx);
-        }
-
-        // We don't want to borrow_mut after we already populated all impls,
-        // so check if an impl is present with an immutable borrow first.
-        if let Some(sty) = fast_reject::simplify_type(tcx,
-                                                      impl_trait_ref.self_ty(), false) {
-            if let Some(is) = self.nonblanket_impls.borrow().get(&sty) {
-                if is.contains(&impl_def_id) {
-                    return // duplicate - skip
-                }
-            }
-
-            self.nonblanket_impls.borrow_mut().entry(sty).or_insert(vec![]).push(impl_def_id)
-        } else {
-            if self.blanket_impls.borrow().contains(&impl_def_id) {
-                return // duplicate - skip
-            }
-            self.blanket_impls.borrow_mut().push(impl_def_id)
-        }
-    }
-
-    pub fn for_each_impl<F: FnMut(DefId)>(&self, tcx: &ty::ctxt<'tcx>, mut f: F)  {
-        self.read_trait_impls(tcx);
-
-        tcx.populate_implementations_for_trait_if_necessary(self.trait_ref.def_id);
-
-        for &impl_def_id in self.blanket_impls.borrow().iter() {
-            f(impl_def_id);
-        }
-
-        for v in self.nonblanket_impls.borrow().values() {
-            for &impl_def_id in v {
-                f(impl_def_id);
-            }
-        }
-    }
-
-    /// Iterate over every impl that could possibly match the
-    /// self-type `self_ty`.
-    pub fn for_each_relevant_impl<F: FnMut(DefId)>(&self,
-                                                   tcx: &ty::ctxt<'tcx>,
-                                                   self_ty: Ty<'tcx>,
-                                                   mut f: F)
-    {
-        self.read_trait_impls(tcx);
-
-        tcx.populate_implementations_for_trait_if_necessary(self.trait_ref.def_id);
-
-        for &impl_def_id in self.blanket_impls.borrow().iter() {
-            f(impl_def_id);
-        }
-
-        // simplify_type(.., false) basically replaces type parameters and
-        // projections with infer-variables. This is, of course, done on
-        // the impl trait-ref when it is instantiated, but not on the
-        // predicate trait-ref which is passed here.
-        //
-        // for example, if we match `S: Copy` against an impl like
-        // `impl<T:Copy> Copy for Option<T>`, we replace the type variable
-        // in `Option<T>` with an infer variable, to `Option<_>` (this
-        // doesn't actually change fast_reject output), but we don't
-        // replace `S` with anything - this impl of course can't be
-        // selected, and as there are hundreds of similar impls,
-        // considering them would significantly harm performance.
-        if let Some(simp) = fast_reject::simplify_type(tcx, self_ty, true) {
-            if let Some(impls) = self.nonblanket_impls.borrow().get(&simp) {
-                for &impl_def_id in impls {
-                    f(impl_def_id);
-                }
-            }
-        } else {
-            for v in self.nonblanket_impls.borrow().values() {
-                for &impl_def_id in v {
-                    f(impl_def_id);
-                }
-            }
-        }
-    }
-
-    pub fn borrow_impl_lists<'s>(&'s self, tcx: &ty::ctxt<'tcx>)
-                                 -> (Ref<'s, Vec<DefId>>,
-                                     Ref<'s, FnvHashMap<fast_reject::SimplifiedType, Vec<DefId>>>) {
-        self.read_trait_impls(tcx);
-        (self.blanket_impls.borrow(), self.nonblanket_impls.borrow())
-    }
-
-}
-
-bitflags! {
-    flags TraitFlags: u32 {
-        const NO_TRAIT_FLAGS        = 0,
-        const HAS_DEFAULT_IMPL      = 1 << 0,
-        const IS_OBJECT_SAFE        = 1 << 1,
-        const OBJECT_SAFETY_VALID   = 1 << 2,
-        const IMPLS_VALID           = 1 << 3,
-    }
-}
-
diff --git a/src/librustc/middle/ty/util.rs b/src/librustc/middle/ty/util.rs
deleted file mode 100644 (file)
index 0b5c0d1..0000000
+++ /dev/null
@@ -1,892 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! misc. type-system utilities too small to deserve their own file
-
-use back::svh::Svh;
-use middle::const_eval::{self, ConstVal, ErrKind};
-use middle::const_eval::EvalHint::UncheckedExprHint;
-use middle::def_id::DefId;
-use middle::subst::{self, Subst, Substs};
-use middle::infer;
-use middle::pat_util;
-use middle::traits;
-use middle::ty::{self, Ty, TypeAndMut, TypeFlags, TypeFoldable};
-use middle::ty::{Disr, ParameterEnvironment};
-use middle::ty::TypeVariants::*;
-use util::num::ToPrimitive;
-
-use std::cmp;
-use std::hash::{Hash, SipHasher, Hasher};
-use std::rc::Rc;
-use syntax::ast::{self, Name};
-use syntax::attr::{self, AttrMetaMethods, SignedInt, UnsignedInt};
-use syntax::codemap::Span;
-
-use rustc_front::hir;
-
-pub trait IntTypeExt {
-    fn to_ty<'tcx>(&self, cx: &ty::ctxt<'tcx>) -> Ty<'tcx>;
-    fn i64_to_disr(&self, val: i64) -> Option<Disr>;
-    fn u64_to_disr(&self, val: u64) -> Option<Disr>;
-    fn disr_incr(&self, val: Disr) -> Option<Disr>;
-    fn disr_string(&self, val: Disr) -> String;
-    fn disr_wrap_incr(&self, val: Option<Disr>) -> Disr;
-}
-
-impl IntTypeExt for attr::IntType {
-    fn to_ty<'tcx>(&self, cx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
-        match *self {
-            SignedInt(ast::IntTy::I8)      => cx.types.i8,
-            SignedInt(ast::IntTy::I16)     => cx.types.i16,
-            SignedInt(ast::IntTy::I32)     => cx.types.i32,
-            SignedInt(ast::IntTy::I64)     => cx.types.i64,
-            SignedInt(ast::IntTy::Is)   => cx.types.isize,
-            UnsignedInt(ast::UintTy::U8)    => cx.types.u8,
-            UnsignedInt(ast::UintTy::U16)   => cx.types.u16,
-            UnsignedInt(ast::UintTy::U32)   => cx.types.u32,
-            UnsignedInt(ast::UintTy::U64)   => cx.types.u64,
-            UnsignedInt(ast::UintTy::Us) => cx.types.usize,
-        }
-    }
-
-    fn i64_to_disr(&self, val: i64) -> Option<Disr> {
-        match *self {
-            SignedInt(ast::IntTy::I8)    => val.to_i8()  .map(|v| v as Disr),
-            SignedInt(ast::IntTy::I16)   => val.to_i16() .map(|v| v as Disr),
-            SignedInt(ast::IntTy::I32)   => val.to_i32() .map(|v| v as Disr),
-            SignedInt(ast::IntTy::I64)   => val.to_i64() .map(|v| v as Disr),
-            UnsignedInt(ast::UintTy::U8)  => val.to_u8()  .map(|v| v as Disr),
-            UnsignedInt(ast::UintTy::U16) => val.to_u16() .map(|v| v as Disr),
-            UnsignedInt(ast::UintTy::U32) => val.to_u32() .map(|v| v as Disr),
-            UnsignedInt(ast::UintTy::U64) => val.to_u64() .map(|v| v as Disr),
-
-            UnsignedInt(ast::UintTy::Us) |
-            SignedInt(ast::IntTy::Is) => unreachable!(),
-        }
-    }
-
-    fn u64_to_disr(&self, val: u64) -> Option<Disr> {
-        match *self {
-            SignedInt(ast::IntTy::I8)    => val.to_i8()  .map(|v| v as Disr),
-            SignedInt(ast::IntTy::I16)   => val.to_i16() .map(|v| v as Disr),
-            SignedInt(ast::IntTy::I32)   => val.to_i32() .map(|v| v as Disr),
-            SignedInt(ast::IntTy::I64)   => val.to_i64() .map(|v| v as Disr),
-            UnsignedInt(ast::UintTy::U8)  => val.to_u8()  .map(|v| v as Disr),
-            UnsignedInt(ast::UintTy::U16) => val.to_u16() .map(|v| v as Disr),
-            UnsignedInt(ast::UintTy::U32) => val.to_u32() .map(|v| v as Disr),
-            UnsignedInt(ast::UintTy::U64) => val.to_u64() .map(|v| v as Disr),
-
-            UnsignedInt(ast::UintTy::Us) |
-            SignedInt(ast::IntTy::Is) => unreachable!(),
-        }
-    }
-
-    fn disr_incr(&self, val: Disr) -> Option<Disr> {
-        macro_rules! add1 {
-            ($e:expr) => { $e.and_then(|v|v.checked_add(1)).map(|v| v as Disr) }
-        }
-        match *self {
-            // SignedInt repr means we *want* to reinterpret the bits
-            // treating the highest bit of Disr as a sign-bit, so
-            // cast to i64 before range-checking.
-            SignedInt(ast::IntTy::I8)    => add1!((val as i64).to_i8()),
-            SignedInt(ast::IntTy::I16)   => add1!((val as i64).to_i16()),
-            SignedInt(ast::IntTy::I32)   => add1!((val as i64).to_i32()),
-            SignedInt(ast::IntTy::I64)   => add1!(Some(val as i64)),
-
-            UnsignedInt(ast::UintTy::U8)  => add1!(val.to_u8()),
-            UnsignedInt(ast::UintTy::U16) => add1!(val.to_u16()),
-            UnsignedInt(ast::UintTy::U32) => add1!(val.to_u32()),
-            UnsignedInt(ast::UintTy::U64) => add1!(Some(val)),
-
-            UnsignedInt(ast::UintTy::Us) |
-            SignedInt(ast::IntTy::Is) => unreachable!(),
-        }
-    }
-
-    // This returns a String because (1.) it is only used for
-    // rendering an error message and (2.) a string can represent the
-    // full range from `i64::MIN` through `u64::MAX`.
-    fn disr_string(&self, val: Disr) -> String {
-        match *self {
-            SignedInt(ast::IntTy::I8)    => format!("{}", val as i8 ),
-            SignedInt(ast::IntTy::I16)   => format!("{}", val as i16),
-            SignedInt(ast::IntTy::I32)   => format!("{}", val as i32),
-            SignedInt(ast::IntTy::I64)   => format!("{}", val as i64),
-            UnsignedInt(ast::UintTy::U8)  => format!("{}", val as u8 ),
-            UnsignedInt(ast::UintTy::U16) => format!("{}", val as u16),
-            UnsignedInt(ast::UintTy::U32) => format!("{}", val as u32),
-            UnsignedInt(ast::UintTy::U64) => format!("{}", val as u64),
-
-            UnsignedInt(ast::UintTy::Us) |
-            SignedInt(ast::IntTy::Is) => unreachable!(),
-        }
-    }
-
-    fn disr_wrap_incr(&self, val: Option<Disr>) -> Disr {
-        macro_rules! add1 {
-            ($e:expr) => { ($e).wrapping_add(1) as Disr }
-        }
-        let val = val.unwrap_or(ty::INITIAL_DISCRIMINANT_VALUE);
-        match *self {
-            SignedInt(ast::IntTy::I8)    => add1!(val as i8 ),
-            SignedInt(ast::IntTy::I16)   => add1!(val as i16),
-            SignedInt(ast::IntTy::I32)   => add1!(val as i32),
-            SignedInt(ast::IntTy::I64)   => add1!(val as i64),
-            UnsignedInt(ast::UintTy::U8)  => add1!(val as u8 ),
-            UnsignedInt(ast::UintTy::U16) => add1!(val as u16),
-            UnsignedInt(ast::UintTy::U32) => add1!(val as u32),
-            UnsignedInt(ast::UintTy::U64) => add1!(val as u64),
-
-            UnsignedInt(ast::UintTy::Us) |
-            SignedInt(ast::IntTy::Is) => unreachable!(),
-        }
-    }
-}
-
-
-#[derive(Copy, Clone)]
-pub enum CopyImplementationError {
-    InfrigingField(Name),
-    InfrigingVariant(Name),
-    NotAnAdt,
-    HasDestructor
-}
-
-/// Describes whether a type is representable. For types that are not
-/// representable, 'SelfRecursive' and 'ContainsRecursive' are used to
-/// distinguish between types that are recursive with themselves and types that
-/// contain a different recursive type. These cases can therefore be treated
-/// differently when reporting errors.
-///
-/// The ordering of the cases is significant. They are sorted so that cmp::max
-/// will keep the "more erroneous" of two values.
-#[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Debug)]
-pub enum Representability {
-    Representable,
-    ContainsRecursive,
-    SelfRecursive,
-}
-
-impl<'a, 'tcx> ParameterEnvironment<'a, 'tcx> {
-    pub fn can_type_implement_copy(&self, self_type: Ty<'tcx>, span: Span)
-                                   -> Result<(),CopyImplementationError> {
-        let tcx = self.tcx;
-
-        // FIXME: (@jroesch) float this code up
-        let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(self.clone()));
-
-        let adt = match self_type.sty {
-            ty::TyStruct(struct_def, substs) => {
-                for field in struct_def.all_fields() {
-                    let field_ty = field.ty(tcx, substs);
-                    if infcx.type_moves_by_default(field_ty, span) {
-                        return Err(CopyImplementationError::InfrigingField(
-                            field.name))
-                    }
-                }
-                struct_def
-            }
-            ty::TyEnum(enum_def, substs) => {
-                for variant in &enum_def.variants {
-                    for field in &variant.fields {
-                        let field_ty = field.ty(tcx, substs);
-                        if infcx.type_moves_by_default(field_ty, span) {
-                            return Err(CopyImplementationError::InfrigingVariant(
-                                variant.name))
-                        }
-                    }
-                }
-                enum_def
-            }
-            _ => return Err(CopyImplementationError::NotAnAdt),
-        };
-
-        if adt.has_dtor() {
-            return Err(CopyImplementationError::HasDestructor)
-        }
-
-        Ok(())
-    }
-}
-
-impl<'tcx> ty::ctxt<'tcx> {
-    pub fn pat_contains_ref_binding(&self, pat: &hir::Pat) -> Option<hir::Mutability> {
-        pat_util::pat_contains_ref_binding(&self.def_map, pat)
-    }
-
-    pub fn arm_contains_ref_binding(&self, arm: &hir::Arm) -> Option<hir::Mutability> {
-        pat_util::arm_contains_ref_binding(&self.def_map, arm)
-    }
-
-    /// Returns the type of element at index `i` in tuple or tuple-like type `t`.
-    /// For an enum `t`, `variant` is None only if `t` is a univariant enum.
-    pub fn positional_element_ty(&self,
-                                 ty: Ty<'tcx>,
-                                 i: usize,
-                                 variant: Option<DefId>) -> Option<Ty<'tcx>> {
-        match (&ty.sty, variant) {
-            (&TyStruct(def, substs), None) => {
-                def.struct_variant().fields.get(i).map(|f| f.ty(self, substs))
-            }
-            (&TyEnum(def, substs), Some(vid)) => {
-                def.variant_with_id(vid).fields.get(i).map(|f| f.ty(self, substs))
-            }
-            (&TyEnum(def, substs), None) => {
-                assert!(def.is_univariant());
-                def.variants[0].fields.get(i).map(|f| f.ty(self, substs))
-            }
-            (&TyTuple(ref v), None) => v.get(i).cloned(),
-            _ => None
-        }
-    }
-
-    /// Returns the type of element at field `n` in struct or struct-like type `t`.
-    /// For an enum `t`, `variant` must be some def id.
-    pub fn named_element_ty(&self,
-                            ty: Ty<'tcx>,
-                            n: Name,
-                            variant: Option<DefId>) -> Option<Ty<'tcx>> {
-        match (&ty.sty, variant) {
-            (&TyStruct(def, substs), None) => {
-                def.struct_variant().find_field_named(n).map(|f| f.ty(self, substs))
-            }
-            (&TyEnum(def, substs), Some(vid)) => {
-                def.variant_with_id(vid).find_field_named(n).map(|f| f.ty(self, substs))
-            }
-            _ => return None
-        }
-    }
-
-    /// Returns `(normalized_type, ty)`, where `normalized_type` is the
-    /// IntType representation of one of {i64,i32,i16,i8,u64,u32,u16,u8},
-    /// and `ty` is the original type (i.e. may include `isize` or
-    /// `usize`).
-    pub fn enum_repr_type(&self, opt_hint: Option<&attr::ReprAttr>)
-                          -> (attr::IntType, Ty<'tcx>) {
-        let repr_type = match opt_hint {
-            // Feed in the given type
-            Some(&attr::ReprInt(_, int_t)) => int_t,
-            // ... but provide sensible default if none provided
-            //
-            // NB. Historically `fn enum_variants` generate i64 here, while
-            // rustc_typeck::check would generate isize.
-            _ => SignedInt(ast::IntTy::Is),
-        };
-
-        let repr_type_ty = repr_type.to_ty(self);
-        let repr_type = match repr_type {
-            SignedInt(ast::IntTy::Is) =>
-                SignedInt(self.sess.target.int_type),
-            UnsignedInt(ast::UintTy::Us) =>
-                UnsignedInt(self.sess.target.uint_type),
-            other => other
-        };
-
-        (repr_type, repr_type_ty)
-    }
-
-    /// Returns the deeply last field of nested structures, or the same type,
-    /// if not a structure at all. Corresponds to the only possible unsized
-    /// field, and its type can be used to determine unsizing strategy.
-    pub fn struct_tail(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
-        while let TyStruct(def, substs) = ty.sty {
-            match def.struct_variant().fields.last() {
-                Some(f) => ty = f.ty(self, substs),
-                None => break
-            }
-        }
-        ty
-    }
-
-    /// Same as applying struct_tail on `source` and `target`, but only
-    /// keeps going as long as the two types are instances of the same
-    /// structure definitions.
-    /// For `(Foo<Foo<T>>, Foo<Trait>)`, the result will be `(Foo<T>, Trait)`,
-    /// whereas struct_tail produces `T`, and `Trait`, respectively.
-    pub fn struct_lockstep_tails(&self,
-                                 source: Ty<'tcx>,
-                                 target: Ty<'tcx>)
-                                 -> (Ty<'tcx>, Ty<'tcx>) {
-        let (mut a, mut b) = (source, target);
-        while let (&TyStruct(a_def, a_substs), &TyStruct(b_def, b_substs)) = (&a.sty, &b.sty) {
-            if a_def != b_def {
-                break;
-            }
-            if let Some(f) = a_def.struct_variant().fields.last() {
-                a = f.ty(self, a_substs);
-                b = f.ty(self, b_substs);
-            } else {
-                break;
-            }
-        }
-        (a, b)
-    }
-
-    /// Returns the repeat count for a repeating vector expression.
-    pub fn eval_repeat_count(&self, count_expr: &hir::Expr) -> usize {
-        let hint = UncheckedExprHint(self.types.usize);
-        match const_eval::eval_const_expr_partial(self, count_expr, hint, None) {
-            Ok(val) => {
-                let found = match val {
-                    ConstVal::Uint(count) => return count as usize,
-                    ConstVal::Int(count) if count >= 0 => return count as usize,
-                    const_val => const_val.description(),
-                };
-                span_err!(self.sess, count_expr.span, E0306,
-                    "expected positive integer for repeat count, found {}",
-                    found);
-            }
-            Err(err) => {
-                let err_msg = match count_expr.node {
-                    hir::ExprPath(None, hir::Path {
-                        global: false,
-                        ref segments,
-                        ..
-                    }) if segments.len() == 1 =>
-                        format!("found variable"),
-                    _ => match err.kind {
-                        ErrKind::MiscCatchAll => format!("but found {}", err.description()),
-                        _ => format!("but {}", err.description())
-                    }
-                };
-                span_err!(self.sess, count_expr.span, E0307,
-                    "expected constant integer for repeat count, {}", err_msg);
-            }
-        }
-        0
-    }
-
-    /// Given a set of predicates that apply to an object type, returns
-    /// the region bounds that the (erased) `Self` type must
-    /// outlive. Precisely *because* the `Self` type is erased, the
-    /// parameter `erased_self_ty` must be supplied to indicate what type
-    /// has been used to represent `Self` in the predicates
-    /// themselves. This should really be a unique type; `FreshTy(0)` is a
-    /// popular choice.
-    ///
-    /// NB: in some cases, particularly around higher-ranked bounds,
-    /// this function returns a kind of conservative approximation.
-    /// That is, all regions returned by this function are definitely
-    /// required, but there may be other region bounds that are not
-    /// returned, as well as requirements like `for<'a> T: 'a`.
-    ///
-    /// Requires that trait definitions have been processed so that we can
-    /// elaborate predicates and walk supertraits.
-    pub fn required_region_bounds(&self,
-                                  erased_self_ty: Ty<'tcx>,
-                                  predicates: Vec<ty::Predicate<'tcx>>)
-                                  -> Vec<ty::Region>    {
-        debug!("required_region_bounds(erased_self_ty={:?}, predicates={:?})",
-               erased_self_ty,
-               predicates);
-
-        assert!(!erased_self_ty.has_escaping_regions());
-
-        traits::elaborate_predicates(self, predicates)
-            .filter_map(|predicate| {
-                match predicate {
-                    ty::Predicate::Projection(..) |
-                    ty::Predicate::Trait(..) |
-                    ty::Predicate::Equate(..) |
-                    ty::Predicate::WellFormed(..) |
-                    ty::Predicate::ObjectSafe(..) |
-                    ty::Predicate::RegionOutlives(..) => {
-                        None
-                    }
-                    ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(t, r))) => {
-                        // Search for a bound of the form `erased_self_ty
-                        // : 'a`, but be wary of something like `for<'a>
-                        // erased_self_ty : 'a` (we interpret a
-                        // higher-ranked bound like that as 'static,
-                        // though at present the code in `fulfill.rs`
-                        // considers such bounds to be unsatisfiable, so
-                        // it's kind of a moot point since you could never
-                        // construct such an object, but this seems
-                        // correct even if that code changes).
-                        if t == erased_self_ty && !r.has_escaping_regions() {
-                            Some(r)
-                        } else {
-                            None
-                        }
-                    }
-                }
-            })
-            .collect()
-    }
-
-    /// Creates a hash of the type `Ty` which will be the same no matter what crate
-    /// context it's calculated within. This is used by the `type_id` intrinsic.
-    pub fn hash_crate_independent(&self, ty: Ty<'tcx>, svh: &Svh) -> u64 {
-        let mut state = SipHasher::new();
-        helper(self, ty, svh, &mut state);
-        return state.finish();
-
-        fn helper<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, svh: &Svh,
-                        state: &mut SipHasher) {
-            macro_rules! byte { ($b:expr) => { ($b as u8).hash(state) } }
-            macro_rules! hash { ($e:expr) => { $e.hash(state) }  }
-
-            let region = |state: &mut SipHasher, r: ty::Region| {
-                match r {
-                    ty::ReStatic => {}
-                    ty::ReLateBound(db, ty::BrAnon(i)) => {
-                        db.hash(state);
-                        i.hash(state);
-                    }
-                    ty::ReEmpty |
-                    ty::ReEarlyBound(..) |
-                    ty::ReLateBound(..) |
-                    ty::ReFree(..) |
-                    ty::ReScope(..) |
-                    ty::ReVar(..) |
-                    ty::ReSkolemized(..) => {
-                        tcx.sess.bug("unexpected region found when hashing a type")
-                    }
-                }
-            };
-            let did = |state: &mut SipHasher, did: DefId| {
-                let h = if did.is_local() {
-                    svh.clone()
-                } else {
-                    tcx.sess.cstore.crate_hash(did.krate)
-                };
-                h.as_str().hash(state);
-                did.index.hash(state);
-            };
-            let mt = |state: &mut SipHasher, mt: TypeAndMut| {
-                mt.mutbl.hash(state);
-            };
-            let fn_sig = |state: &mut SipHasher, sig: &ty::Binder<ty::FnSig<'tcx>>| {
-                let sig = tcx.anonymize_late_bound_regions(sig).0;
-                for a in &sig.inputs { helper(tcx, *a, svh, state); }
-                if let ty::FnConverging(output) = sig.output {
-                    helper(tcx, output, svh, state);
-                }
-            };
-            ty.maybe_walk(|ty| {
-                match ty.sty {
-                    TyBool => byte!(2),
-                    TyChar => byte!(3),
-                    TyInt(i) => {
-                        byte!(4);
-                        hash!(i);
-                    }
-                    TyUint(u) => {
-                        byte!(5);
-                        hash!(u);
-                    }
-                    TyFloat(f) => {
-                        byte!(6);
-                        hash!(f);
-                    }
-                    TyStr => {
-                        byte!(7);
-                    }
-                    TyEnum(d, _) => {
-                        byte!(8);
-                        did(state, d.did);
-                    }
-                    TyBox(_) => {
-                        byte!(9);
-                    }
-                    TyArray(_, n) => {
-                        byte!(10);
-                        n.hash(state);
-                    }
-                    TySlice(_) => {
-                        byte!(11);
-                    }
-                    TyRawPtr(m) => {
-                        byte!(12);
-                        mt(state, m);
-                    }
-                    TyRef(r, m) => {
-                        byte!(13);
-                        region(state, *r);
-                        mt(state, m);
-                    }
-                    TyBareFn(opt_def_id, ref b) => {
-                        byte!(14);
-                        hash!(opt_def_id);
-                        hash!(b.unsafety);
-                        hash!(b.abi);
-                        fn_sig(state, &b.sig);
-                        return false;
-                    }
-                    TyTrait(ref data) => {
-                        byte!(17);
-                        did(state, data.principal_def_id());
-                        hash!(data.bounds);
-
-                        let principal = tcx.anonymize_late_bound_regions(&data.principal).0;
-                        for subty in &principal.substs.types {
-                            helper(tcx, subty, svh, state);
-                        }
-
-                        return false;
-                    }
-                    TyStruct(d, _) => {
-                        byte!(18);
-                        did(state, d.did);
-                    }
-                    TyTuple(ref inner) => {
-                        byte!(19);
-                        hash!(inner.len());
-                    }
-                    TyParam(p) => {
-                        byte!(20);
-                        hash!(p.space);
-                        hash!(p.idx);
-                        hash!(p.name.as_str());
-                    }
-                    TyInfer(_) => unreachable!(),
-                    TyError => byte!(21),
-                    TyClosure(d, _) => {
-                        byte!(22);
-                        did(state, d);
-                    }
-                    TyProjection(ref data) => {
-                        byte!(23);
-                        did(state, data.trait_ref.def_id);
-                        hash!(data.item_name.as_str());
-                    }
-                }
-                true
-            });
-        }
-    }
-
-    /// Returns true if this ADT is a dtorck type.
-    ///
-    /// Invoking the destructor of a dtorck type during usual cleanup
-    /// (e.g. the glue emitted for stack unwinding) requires all
-    /// lifetimes in the type-structure of `adt` to strictly outlive
-    /// the adt value itself.
-    ///
-    /// If `adt` is not dtorck, then the adt's destructor can be
-    /// invoked even when there are lifetimes in the type-structure of
-    /// `adt` that do not strictly outlive the adt value itself.
-    /// (This allows programs to make cyclic structures without
-    /// resorting to unasfe means; see RFCs 769 and 1238).
-    pub fn is_adt_dtorck(&self, adt: ty::AdtDef<'tcx>) -> bool {
-        let dtor_method = match adt.destructor() {
-            Some(dtor) => dtor,
-            None => return false
-        };
-
-        // RFC 1238: if the destructor method is tagged with the
-        // attribute `unsafe_destructor_blind_to_params`, then the
-        // compiler is being instructed to *assume* that the
-        // destructor will not access borrowed data,
-        // even if such data is otherwise reachable.
-        //
-        // Such access can be in plain sight (e.g. dereferencing
-        // `*foo.0` of `Foo<'a>(&'a u32)`) or indirectly hidden
-        // (e.g. calling `foo.0.clone()` of `Foo<T:Clone>`).
-        return !self.has_attr(dtor_method, "unsafe_destructor_blind_to_params");
-    }
-}
-
-#[derive(Debug)]
-pub struct ImplMethod<'tcx> {
-    pub method: Rc<ty::Method<'tcx>>,
-    pub substs: Substs<'tcx>,
-    pub is_provided: bool
-}
-
-impl<'tcx> ty::ctxt<'tcx> {
-    pub fn get_impl_method(&self,
-                           impl_def_id: DefId,
-                           substs: Substs<'tcx>,
-                           name: Name)
-                           -> ImplMethod<'tcx>
-    {
-        // there don't seem to be nicer accessors to these:
-        let impl_or_trait_items_map = self.impl_or_trait_items.borrow();
-
-        for impl_item in &self.impl_items.borrow()[&impl_def_id] {
-            if let ty::MethodTraitItem(ref meth) =
-                impl_or_trait_items_map[&impl_item.def_id()] {
-                if meth.name == name {
-                    return ImplMethod {
-                        method: meth.clone(),
-                        substs: substs,
-                        is_provided: false
-                    }
-                }
-            }
-        }
-
-        // It is not in the impl - get the default from the trait.
-        let trait_ref = self.impl_trait_ref(impl_def_id).unwrap();
-        for trait_item in self.trait_items(trait_ref.def_id).iter() {
-            if let &ty::MethodTraitItem(ref meth) = trait_item {
-                if meth.name == name {
-                    let impl_to_trait_substs = self
-                        .make_substs_for_receiver_types(&trait_ref, meth);
-                    return ImplMethod {
-                        method: meth.clone(),
-                        substs: impl_to_trait_substs.subst(self, &substs),
-                        is_provided: true
-                    }
-                }
-            }
-        }
-
-        self.sess.bug(&format!("method {:?} not found in {:?}",
-                               name, impl_def_id))
-    }
-}
-
-impl<'tcx> ty::TyS<'tcx> {
-    fn impls_bound<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
-                       bound: ty::BuiltinBound,
-                       span: Span)
-                       -> bool
-    {
-        let tcx = param_env.tcx;
-        let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(param_env.clone()));
-
-        let is_impld = traits::type_known_to_meet_builtin_bound(&infcx,
-                                                                self, bound, span);
-
-        debug!("Ty::impls_bound({:?}, {:?}) = {:?}",
-               self, bound, is_impld);
-
-        is_impld
-    }
-
-    // FIXME (@jroesch): I made this public to use it, not sure if should be private
-    pub fn moves_by_default<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
-                           span: Span) -> bool {
-        if self.flags.get().intersects(TypeFlags::MOVENESS_CACHED) {
-            return self.flags.get().intersects(TypeFlags::MOVES_BY_DEFAULT);
-        }
-
-        assert!(!self.needs_infer());
-
-        // Fast-path for primitive types
-        let result = match self.sty {
-            TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
-            TyRawPtr(..) | TyBareFn(..) | TyRef(_, TypeAndMut {
-                mutbl: hir::MutImmutable, ..
-            }) => Some(false),
-
-            TyStr | TyBox(..) | TyRef(_, TypeAndMut {
-                mutbl: hir::MutMutable, ..
-            }) => Some(true),
-
-            TyArray(..) | TySlice(_) | TyTrait(..) | TyTuple(..) |
-            TyClosure(..) | TyEnum(..) | TyStruct(..) |
-            TyProjection(..) | TyParam(..) | TyInfer(..) | TyError => None
-        }.unwrap_or_else(|| !self.impls_bound(param_env, ty::BoundCopy, span));
-
-        if !self.has_param_types() && !self.has_self_ty() {
-            self.flags.set(self.flags.get() | if result {
-                TypeFlags::MOVENESS_CACHED | TypeFlags::MOVES_BY_DEFAULT
-            } else {
-                TypeFlags::MOVENESS_CACHED
-            });
-        }
-
-        result
-    }
-
-    #[inline]
-    pub fn is_sized<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
-                        span: Span) -> bool
-    {
-        if self.flags.get().intersects(TypeFlags::SIZEDNESS_CACHED) {
-            return self.flags.get().intersects(TypeFlags::IS_SIZED);
-        }
-
-        self.is_sized_uncached(param_env, span)
-    }
-
-    fn is_sized_uncached<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
-                             span: Span) -> bool {
-        assert!(!self.needs_infer());
-
-        // Fast-path for primitive types
-        let result = match self.sty {
-            TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
-            TyBox(..) | TyRawPtr(..) | TyRef(..) | TyBareFn(..) |
-            TyArray(..) | TyTuple(..) | TyClosure(..) => Some(true),
-
-            TyStr | TyTrait(..) | TySlice(_) => Some(false),
-
-            TyEnum(..) | TyStruct(..) | TyProjection(..) | TyParam(..) |
-            TyInfer(..) | TyError => None
-        }.unwrap_or_else(|| self.impls_bound(param_env, ty::BoundSized, span));
-
-        if !self.has_param_types() && !self.has_self_ty() {
-            self.flags.set(self.flags.get() | if result {
-                TypeFlags::SIZEDNESS_CACHED | TypeFlags::IS_SIZED
-            } else {
-                TypeFlags::SIZEDNESS_CACHED
-            });
-        }
-
-        result
-    }
-
-
-    /// Check whether a type is representable. This means it cannot contain unboxed
-    /// structural recursion. This check is needed for structs and enums.
-    pub fn is_representable(&'tcx self, cx: &ty::ctxt<'tcx>, sp: Span) -> Representability {
-
-        // Iterate until something non-representable is found
-        fn find_nonrepresentable<'tcx, It: Iterator<Item=Ty<'tcx>>>(cx: &ty::ctxt<'tcx>,
-                                                                    sp: Span,
-                                                                    seen: &mut Vec<Ty<'tcx>>,
-                                                                    iter: It)
-                                                                    -> Representability {
-            iter.fold(Representability::Representable,
-                      |r, ty| cmp::max(r, is_type_structurally_recursive(cx, sp, seen, ty)))
-        }
-
-        fn are_inner_types_recursive<'tcx>(cx: &ty::ctxt<'tcx>, sp: Span,
-                                           seen: &mut Vec<Ty<'tcx>>, ty: Ty<'tcx>)
-                                           -> Representability {
-            match ty.sty {
-                TyTuple(ref ts) => {
-                    find_nonrepresentable(cx, sp, seen, ts.iter().cloned())
-                }
-                // Fixed-length vectors.
-                // FIXME(#11924) Behavior undecided for zero-length vectors.
-                TyArray(ty, _) => {
-                    is_type_structurally_recursive(cx, sp, seen, ty)
-                }
-                TyStruct(def, substs) | TyEnum(def, substs) => {
-                    find_nonrepresentable(cx,
-                                          sp,
-                                          seen,
-                                          def.all_fields().map(|f| f.ty(cx, substs)))
-                }
-                TyClosure(..) => {
-                    // this check is run on type definitions, so we don't expect
-                    // to see closure types
-                    cx.sess.bug(&format!("requires check invoked on inapplicable type: {:?}", ty))
-                }
-                _ => Representability::Representable,
-            }
-        }
-
-        fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: ty::AdtDef<'tcx>) -> bool {
-            match ty.sty {
-                TyStruct(ty_def, _) | TyEnum(ty_def, _) => {
-                     ty_def == def
-                }
-                _ => false
-            }
-        }
-
-        fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
-            match (&a.sty, &b.sty) {
-                (&TyStruct(did_a, ref substs_a), &TyStruct(did_b, ref substs_b)) |
-                (&TyEnum(did_a, ref substs_a), &TyEnum(did_b, ref substs_b)) => {
-                    if did_a != did_b {
-                        return false;
-                    }
-
-                    let types_a = substs_a.types.get_slice(subst::TypeSpace);
-                    let types_b = substs_b.types.get_slice(subst::TypeSpace);
-
-                    let mut pairs = types_a.iter().zip(types_b);
-
-                    pairs.all(|(&a, &b)| same_type(a, b))
-                }
-                _ => {
-                    a == b
-                }
-            }
-        }
-
-        // Does the type `ty` directly (without indirection through a pointer)
-        // contain any types on stack `seen`?
-        fn is_type_structurally_recursive<'tcx>(cx: &ty::ctxt<'tcx>,
-                                                sp: Span,
-                                                seen: &mut Vec<Ty<'tcx>>,
-                                                ty: Ty<'tcx>) -> Representability {
-            debug!("is_type_structurally_recursive: {:?}", ty);
-
-            match ty.sty {
-                TyStruct(def, _) | TyEnum(def, _) => {
-                    {
-                        // Iterate through stack of previously seen types.
-                        let mut iter = seen.iter();
-
-                        // The first item in `seen` is the type we are actually curious about.
-                        // We want to return SelfRecursive if this type contains itself.
-                        // It is important that we DON'T take generic parameters into account
-                        // for this check, so that Bar<T> in this example counts as SelfRecursive:
-                        //
-                        // struct Foo;
-                        // struct Bar<T> { x: Bar<Foo> }
-
-                        match iter.next() {
-                            Some(&seen_type) => {
-                                if same_struct_or_enum(seen_type, def) {
-                                    debug!("SelfRecursive: {:?} contains {:?}",
-                                           seen_type,
-                                           ty);
-                                    return Representability::SelfRecursive;
-                                }
-                            }
-                            None => {}
-                        }
-
-                        // We also need to know whether the first item contains other types
-                        // that are structurally recursive. If we don't catch this case, we
-                        // will recurse infinitely for some inputs.
-                        //
-                        // It is important that we DO take generic parameters into account
-                        // here, so that code like this is considered SelfRecursive, not
-                        // ContainsRecursive:
-                        //
-                        // struct Foo { Option<Option<Foo>> }
-
-                        for &seen_type in iter {
-                            if same_type(ty, seen_type) {
-                                debug!("ContainsRecursive: {:?} contains {:?}",
-                                       seen_type,
-                                       ty);
-                                return Representability::ContainsRecursive;
-                            }
-                        }
-                    }
-
-                    // For structs and enums, track all previously seen types by pushing them
-                    // onto the 'seen' stack.
-                    seen.push(ty);
-                    let out = are_inner_types_recursive(cx, sp, seen, ty);
-                    seen.pop();
-                    out
-                }
-                _ => {
-                    // No need to push in other cases.
-                    are_inner_types_recursive(cx, sp, seen, ty)
-                }
-            }
-        }
-
-        debug!("is_type_representable: {:?}", self);
-
-        // To avoid a stack overflow when checking an enum variant or struct that
-        // contains a different, structurally recursive type, maintain a stack
-        // of seen types and check recursion for each of them (issues #3008, #3779).
-        let mut seen: Vec<Ty> = Vec::new();
-        let r = is_type_structurally_recursive(cx, sp, &mut seen, self);
-        debug!("is_type_representable: {:?} is {:?}", self, r);
-        r
-    }
-}
diff --git a/src/librustc/middle/ty/walk.rs b/src/librustc/middle/ty/walk.rs
deleted file mode 100644 (file)
index 81cad44..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! An iterator over the type substructure.
-//! WARNING: this does not keep track of the region depth.
-
-use middle::ty::{self, Ty};
-use std::iter::Iterator;
-use std::vec::IntoIter;
-
-pub struct TypeWalker<'tcx> {
-    stack: Vec<Ty<'tcx>>,
-    last_subtree: usize,
-}
-
-impl<'tcx> TypeWalker<'tcx> {
-    pub fn new(ty: Ty<'tcx>) -> TypeWalker<'tcx> {
-        TypeWalker { stack: vec!(ty), last_subtree: 1, }
-    }
-
-    /// Skips the subtree of types corresponding to the last type
-    /// returned by `next()`.
-    ///
-    /// Example: Imagine you are walking `Foo<Bar<int>, usize>`.
-    ///
-    /// ```
-    /// let mut iter: TypeWalker = ...;
-    /// iter.next(); // yields Foo
-    /// iter.next(); // yields Bar<int>
-    /// iter.skip_current_subtree(); // skips int
-    /// iter.next(); // yields usize
-    /// ```
-    pub fn skip_current_subtree(&mut self) {
-        self.stack.truncate(self.last_subtree);
-    }
-}
-
-impl<'tcx> Iterator for TypeWalker<'tcx> {
-    type Item = Ty<'tcx>;
-
-    fn next(&mut self) -> Option<Ty<'tcx>> {
-        debug!("next(): stack={:?}", self.stack);
-        match self.stack.pop() {
-            None => {
-                return None;
-            }
-            Some(ty) => {
-                self.last_subtree = self.stack.len();
-                push_subtypes(&mut self.stack, ty);
-                debug!("next: stack={:?}", self.stack);
-                Some(ty)
-            }
-        }
-    }
-}
-
-pub fn walk_shallow<'tcx>(ty: Ty<'tcx>) -> IntoIter<Ty<'tcx>> {
-    let mut stack = vec![];
-    push_subtypes(&mut stack, ty);
-    stack.into_iter()
-}
-
-fn push_subtypes<'tcx>(stack: &mut Vec<Ty<'tcx>>, parent_ty: Ty<'tcx>) {
-    match parent_ty.sty {
-        ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) |
-        ty::TyStr | ty::TyInfer(_) | ty::TyParam(_) | ty::TyError => {
-        }
-        ty::TyBox(ty) | ty::TyArray(ty, _) | ty::TySlice(ty) => {
-            stack.push(ty);
-        }
-        ty::TyRawPtr(ref mt) | ty::TyRef(_, ref mt) => {
-            stack.push(mt.ty);
-        }
-        ty::TyProjection(ref data) => {
-            push_reversed(stack, data.trait_ref.substs.types.as_slice());
-        }
-        ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => {
-            push_reversed(stack, principal.substs().types.as_slice());
-            push_reversed(stack, &bounds.projection_bounds.iter().map(|pred| {
-                pred.0.ty
-            }).collect::<Vec<_>>());
-        }
-        ty::TyEnum(_, ref substs) |
-        ty::TyStruct(_, ref substs) => {
-            push_reversed(stack, substs.types.as_slice());
-        }
-        ty::TyClosure(_, ref substs) => {
-            push_reversed(stack, substs.func_substs.types.as_slice());
-            push_reversed(stack, &substs.upvar_tys);
-        }
-        ty::TyTuple(ref ts) => {
-            push_reversed(stack, ts);
-        }
-        ty::TyBareFn(_, ref ft) => {
-            push_sig_subtypes(stack, &ft.sig);
-        }
-    }
-}
-
-fn push_sig_subtypes<'tcx>(stack: &mut Vec<Ty<'tcx>>, sig: &ty::PolyFnSig<'tcx>) {
-    match sig.0.output {
-        ty::FnConverging(output) => { stack.push(output); }
-        ty::FnDiverging => { }
-    }
-    push_reversed(stack, &sig.0.inputs);
-}
-
-fn push_reversed<'tcx>(stack: &mut Vec<Ty<'tcx>>, tys: &[Ty<'tcx>]) {
-    // We push slices on the stack in reverse order so as to
-    // maintain a pre-order traversal. As of the time of this
-    // writing, the fact that the traversal is pre-order is not
-    // known to be significant to any code, but it seems like the
-    // natural order one would expect (basically, the order of the
-    // types as they are written).
-    for &ty in tys.iter().rev() {
-        stack.push(ty);
-    }
-}
diff --git a/src/librustc/middle/ty/wf.rs b/src/librustc/middle/ty/wf.rs
deleted file mode 100644 (file)
index 5f0fc30..0000000
+++ /dev/null
@@ -1,509 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::def_id::DefId;
-use middle::infer::InferCtxt;
-use middle::ty::outlives::{self, Component};
-use middle::subst::Substs;
-use middle::traits;
-use middle::ty::{self, ToPredicate, Ty, TypeFoldable};
-use std::iter::once;
-use syntax::ast;
-use syntax::codemap::Span;
-use util::common::ErrorReported;
-
-/// Returns the set of obligations needed to make `ty` well-formed.
-/// If `ty` contains unresolved inference variables, this may include
-/// further WF obligations. However, if `ty` IS an unresolved
-/// inference variable, returns `None`, because we are not able to
-/// make any progress at all. This is to prevent "livelock" where we
-/// say "$0 is WF if $0 is WF".
-pub fn obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                            body_id: ast::NodeId,
-                            ty: Ty<'tcx>,
-                            span: Span)
-                            -> Option<Vec<traits::PredicateObligation<'tcx>>>
-{
-    let mut wf = WfPredicates { infcx: infcx,
-                                body_id: body_id,
-                                span: span,
-                                out: vec![] };
-    if wf.compute(ty) {
-        debug!("wf::obligations({:?}, body_id={:?}) = {:?}", ty, body_id, wf.out);
-        let result = wf.normalize();
-        debug!("wf::obligations({:?}, body_id={:?}) ~~> {:?}", ty, body_id, result);
-        Some(result)
-    } else {
-        None // no progress made, return None
-    }
-}
-
-/// Returns the obligations that make this trait reference
-/// well-formed.  For example, if there is a trait `Set` defined like
-/// `trait Set<K:Eq>`, then the trait reference `Foo: Set<Bar>` is WF
-/// if `Bar: Eq`.
-pub fn trait_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                  body_id: ast::NodeId,
-                                  trait_ref: &ty::TraitRef<'tcx>,
-                                  span: Span)
-                                  -> Vec<traits::PredicateObligation<'tcx>>
-{
-    let mut wf = WfPredicates { infcx: infcx, body_id: body_id, span: span, out: vec![] };
-    wf.compute_trait_ref(trait_ref);
-    wf.normalize()
-}
-
-pub fn predicate_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>,
-                                      body_id: ast::NodeId,
-                                      predicate: &ty::Predicate<'tcx>,
-                                      span: Span)
-                                      -> Vec<traits::PredicateObligation<'tcx>>
-{
-    let mut wf = WfPredicates { infcx: infcx, body_id: body_id, span: span, out: vec![] };
-
-    // (*) ok to skip binders, because wf code is prepared for it
-    match *predicate {
-        ty::Predicate::Trait(ref t) => {
-            wf.compute_trait_ref(&t.skip_binder().trait_ref); // (*)
-        }
-        ty::Predicate::Equate(ref t) => {
-            wf.compute(t.skip_binder().0);
-            wf.compute(t.skip_binder().1);
-        }
-        ty::Predicate::RegionOutlives(..) => {
-        }
-        ty::Predicate::TypeOutlives(ref t) => {
-            wf.compute(t.skip_binder().0);
-        }
-        ty::Predicate::Projection(ref t) => {
-            let t = t.skip_binder(); // (*)
-            wf.compute_projection(t.projection_ty);
-            wf.compute(t.ty);
-        }
-        ty::Predicate::WellFormed(t) => {
-            wf.compute(t);
-        }
-        ty::Predicate::ObjectSafe(_) => {
-        }
-    }
-
-    wf.normalize()
-}
-
-/// Implied bounds are region relationships that we deduce
-/// automatically.  The idea is that (e.g.) a caller must check that a
-/// function's argument types are well-formed immediately before
-/// calling that fn, and hence the *callee* can assume that its
-/// argument types are well-formed. This may imply certain relationships
-/// between generic parameters. For example:
-///
-///     fn foo<'a,T>(x: &'a T)
-///
-/// can only be called with a `'a` and `T` such that `&'a T` is WF.
-/// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`.
-#[derive(Debug)]
-pub enum ImpliedBound<'tcx> {
-    RegionSubRegion(ty::Region, ty::Region),
-    RegionSubParam(ty::Region, ty::ParamTy),
-    RegionSubProjection(ty::Region, ty::ProjectionTy<'tcx>),
-}
-
-/// Compute the implied bounds that a callee/impl can assume based on
-/// the fact that caller/projector has ensured that `ty` is WF.  See
-/// the `ImpliedBound` type for more details.
-pub fn implied_bounds<'a,'tcx>(
-    infcx: &'a InferCtxt<'a,'tcx>,
-    body_id: ast::NodeId,
-    ty: Ty<'tcx>,
-    span: Span)
-    -> Vec<ImpliedBound<'tcx>>
-{
-    // Sometimes when we ask what it takes for T: WF, we get back that
-    // U: WF is required; in that case, we push U onto this stack and
-    // process it next. Currently (at least) these resulting
-    // predicates are always guaranteed to be a subset of the original
-    // type, so we need not fear non-termination.
-    let mut wf_types = vec![ty];
-
-    let mut implied_bounds = vec![];
-
-    while let Some(ty) = wf_types.pop() {
-        // Compute the obligations for `ty` to be well-formed. If `ty` is
-        // an unresolved inference variable, just substituted an empty set
-        // -- because the return type here is going to be things we *add*
-        // to the environment, it's always ok for this set to be smaller
-        // than the ultimate set. (Note: normally there won't be
-        // unresolved inference variables here anyway, but there might be
-        // during typeck under some circumstances.)
-        let obligations = obligations(infcx, body_id, ty, span).unwrap_or(vec![]);
-
-        // From the full set of obligations, just filter down to the
-        // region relationships.
-        implied_bounds.extend(
-            obligations
-            .into_iter()
-            .flat_map(|obligation| {
-                assert!(!obligation.has_escaping_regions());
-                match obligation.predicate {
-                    ty::Predicate::Trait(..) |
-                    ty::Predicate::Equate(..) |
-                    ty::Predicate::Projection(..) |
-                    ty::Predicate::ObjectSafe(..) =>
-                        vec![],
-
-                    ty::Predicate::WellFormed(subty) => {
-                        wf_types.push(subty);
-                        vec![]
-                    }
-
-                    ty::Predicate::RegionOutlives(ref data) =>
-                        match infcx.tcx.no_late_bound_regions(data) {
-                            None =>
-                                vec![],
-                            Some(ty::OutlivesPredicate(r_a, r_b)) =>
-                                vec![ImpliedBound::RegionSubRegion(r_b, r_a)],
-                        },
-
-                    ty::Predicate::TypeOutlives(ref data) =>
-                        match infcx.tcx.no_late_bound_regions(data) {
-                            None => vec![],
-                            Some(ty::OutlivesPredicate(ty_a, r_b)) => {
-                                let components = outlives::components(infcx, ty_a);
-                                implied_bounds_from_components(r_b, components)
-                            }
-                        },
-                }}));
-    }
-
-    implied_bounds
-}
-
-/// When we have an implied bound that `T: 'a`, we can further break
-/// this down to determine what relationships would have to hold for
-/// `T: 'a` to hold. We get to assume that the caller has validated
-/// those relationships.
-fn implied_bounds_from_components<'tcx>(sub_region: ty::Region,
-                                        sup_components: Vec<Component<'tcx>>)
-                                        -> Vec<ImpliedBound<'tcx>>
-{
-    sup_components
-        .into_iter()
-        .flat_map(|component| {
-            match component {
-                Component::Region(r) =>
-                    vec!(ImpliedBound::RegionSubRegion(sub_region, r)),
-                Component::Param(p) =>
-                    vec!(ImpliedBound::RegionSubParam(sub_region, p)),
-                Component::Projection(p) =>
-                    vec!(ImpliedBound::RegionSubProjection(sub_region, p)),
-                Component::EscapingProjection(_) =>
-                    // If the projection has escaping regions, don't
-                    // try to infer any implied bounds even for its
-                    // free components. This is conservative, because
-                    // the caller will still have to prove that those
-                    // free components outlive `sub_region`. But the
-                    // idea is that the WAY that the caller proves
-                    // that may change in the future and we want to
-                    // give ourselves room to get smarter here.
-                    vec!(),
-                Component::UnresolvedInferenceVariable(..) =>
-                    vec!(),
-            }
-        })
-        .collect()
-}
-
-struct WfPredicates<'a,'tcx:'a> {
-    infcx: &'a InferCtxt<'a, 'tcx>,
-    body_id: ast::NodeId,
-    span: Span,
-    out: Vec<traits::PredicateObligation<'tcx>>,
-}
-
-impl<'a,'tcx> WfPredicates<'a,'tcx> {
-    fn cause(&mut self, code: traits::ObligationCauseCode<'tcx>) -> traits::ObligationCause<'tcx> {
-        traits::ObligationCause::new(self.span, self.body_id, code)
-    }
-
-    fn normalize(&mut self) -> Vec<traits::PredicateObligation<'tcx>> {
-        let cause = self.cause(traits::MiscObligation);
-        let infcx = &mut self.infcx;
-        self.out.iter()
-                .inspect(|pred| assert!(!pred.has_escaping_regions()))
-                .flat_map(|pred| {
-                    let mut selcx = traits::SelectionContext::new(infcx);
-                    let pred = traits::normalize(&mut selcx, cause.clone(), pred);
-                    once(pred.value).chain(pred.obligations)
-                })
-                .collect()
-    }
-
-    /// Pushes the obligations required for `trait_ref` to be WF into
-    /// `self.out`.
-    fn compute_trait_ref(&mut self, trait_ref: &ty::TraitRef<'tcx>) {
-        let obligations = self.nominal_obligations(trait_ref.def_id, trait_ref.substs);
-        self.out.extend(obligations);
-
-        let cause = self.cause(traits::MiscObligation);
-        self.out.extend(
-            trait_ref.substs.types
-                            .as_slice()
-                            .iter()
-                            .filter(|ty| !ty.has_escaping_regions())
-                            .map(|ty| traits::Obligation::new(cause.clone(),
-                                                              ty::Predicate::WellFormed(ty))));
-    }
-
-    /// Pushes the obligations required for `trait_ref::Item` to be WF
-    /// into `self.out`.
-    fn compute_projection(&mut self, data: ty::ProjectionTy<'tcx>) {
-        // A projection is well-formed if (a) the trait ref itself is
-        // WF WF and (b) the trait-ref holds.  (It may also be
-        // normalizable and be WF that way.)
-
-        self.compute_trait_ref(&data.trait_ref);
-
-        if !data.has_escaping_regions() {
-            let predicate = data.trait_ref.to_predicate();
-            let cause = self.cause(traits::ProjectionWf(data));
-            self.out.push(traits::Obligation::new(cause, predicate));
-        }
-    }
-
-    /// Push new obligations into `out`. Returns true if it was able
-    /// to generate all the predicates needed to validate that `ty0`
-    /// is WF. Returns false if `ty0` is an unresolved type variable,
-    /// in which case we are not able to simplify at all.
-    fn compute(&mut self, ty0: Ty<'tcx>) -> bool {
-        let mut subtys = ty0.walk();
-        while let Some(ty) = subtys.next() {
-            match ty.sty {
-                ty::TyBool |
-                ty::TyChar |
-                ty::TyInt(..) |
-                ty::TyUint(..) |
-                ty::TyFloat(..) |
-                ty::TyError |
-                ty::TyStr |
-                ty::TyParam(_) => {
-                    // WfScalar, WfParameter, etc
-                }
-
-                ty::TySlice(subty) |
-                ty::TyArray(subty, _) => {
-                    if !subty.has_escaping_regions() {
-                        let cause = self.cause(traits::SliceOrArrayElem);
-                        match traits::trait_ref_for_builtin_bound(self.infcx.tcx,
-                                                                  ty::BoundSized,
-                                                                  subty) {
-                            Ok(trait_ref) => {
-                                self.out.push(
-                                    traits::Obligation::new(cause,
-                                                            trait_ref.to_predicate()));
-                            }
-                            Err(ErrorReported) => { }
-                        }
-                    }
-                }
-
-                ty::TyBox(_) |
-                ty::TyTuple(_) |
-                ty::TyRawPtr(_) => {
-                    // simple cases that are WF if their type args are WF
-                }
-
-                ty::TyProjection(data) => {
-                    subtys.skip_current_subtree(); // subtree handled by compute_projection
-                    self.compute_projection(data);
-                }
-
-                ty::TyEnum(def, substs) |
-                ty::TyStruct(def, substs) => {
-                    // WfNominalType
-                    let obligations = self.nominal_obligations(def.did, substs);
-                    self.out.extend(obligations);
-                }
-
-                ty::TyRef(r, mt) => {
-                    // WfReference
-                    if !r.has_escaping_regions() && !mt.ty.has_escaping_regions() {
-                        let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
-                        self.out.push(
-                            traits::Obligation::new(
-                                cause,
-                                ty::Predicate::TypeOutlives(
-                                    ty::Binder(
-                                        ty::OutlivesPredicate(mt.ty, *r)))));
-                    }
-                }
-
-                ty::TyClosure(..) => {
-                    // the types in a closure are always the types of
-                    // local variables (or possibly references to local
-                    // variables), we'll walk those.
-                    //
-                    // (Though, local variables are probably not
-                    // needed, as they are separately checked w/r/t
-                    // WFedness.)
-                }
-
-                ty::TyBareFn(..) => {
-                    // let the loop iterator into the argument/return
-                    // types appearing in the fn signature
-                }
-
-                ty::TyTrait(ref data) => {
-                    // WfObject
-                    //
-                    // Here, we defer WF checking due to higher-ranked
-                    // regions. This is perhaps not ideal.
-                    self.from_object_ty(ty, data);
-
-                    // FIXME(#27579) RFC also considers adding trait
-                    // obligations that don't refer to Self and
-                    // checking those
-
-                    let cause = self.cause(traits::MiscObligation);
-                    self.out.push(
-                        traits::Obligation::new(
-                            cause,
-                            ty::Predicate::ObjectSafe(data.principal_def_id())));
-                }
-
-                // Inference variables are the complicated case, since we don't
-                // know what type they are. We do two things:
-                //
-                // 1. Check if they have been resolved, and if so proceed with
-                //    THAT type.
-                // 2. If not, check whether this is the type that we
-                //    started with (ty0). In that case, we've made no
-                //    progress at all, so return false. Otherwise,
-                //    we've at least simplified things (i.e., we went
-                //    from `Vec<$0>: WF` to `$0: WF`, so we can
-                //    register a pending obligation and keep
-                //    moving. (Goal is that an "inductive hypothesis"
-                //    is satisfied to ensure termination.)
-                ty::TyInfer(_) => {
-                    let ty = self.infcx.shallow_resolve(ty);
-                    if let ty::TyInfer(_) = ty.sty { // not yet resolved...
-                        if ty == ty0 { // ...this is the type we started from! no progress.
-                            return false;
-                        }
-
-                        let cause = self.cause(traits::MiscObligation);
-                        self.out.push( // ...not the type we started from, so we made progress.
-                            traits::Obligation::new(cause, ty::Predicate::WellFormed(ty)));
-                    } else {
-                        // Yes, resolved, proceed with the
-                        // result. Should never return false because
-                        // `ty` is not a TyInfer.
-                        assert!(self.compute(ty));
-                    }
-                }
-            }
-        }
-
-        // if we made it through that loop above, we made progress!
-        return true;
-    }
-
-    fn nominal_obligations(&mut self,
-                           def_id: DefId,
-                           substs: &Substs<'tcx>)
-                           -> Vec<traits::PredicateObligation<'tcx>>
-    {
-        let predicates =
-            self.infcx.tcx.lookup_predicates(def_id)
-                          .instantiate(self.infcx.tcx, substs);
-        let cause = self.cause(traits::ItemObligation(def_id));
-        predicates.predicates
-                  .into_iter()
-                  .map(|pred| traits::Obligation::new(cause.clone(), pred))
-                  .filter(|pred| !pred.has_escaping_regions())
-                  .collect()
-    }
-
-    fn from_object_ty(&mut self, ty: Ty<'tcx>, data: &ty::TraitTy<'tcx>) {
-        // Imagine a type like this:
-        //
-        //     trait Foo { }
-        //     trait Bar<'c> : 'c { }
-        //
-        //     &'b (Foo+'c+Bar<'d>)
-        //         ^
-        //
-        // In this case, the following relationships must hold:
-        //
-        //     'b <= 'c
-        //     'd <= 'c
-        //
-        // The first conditions is due to the normal region pointer
-        // rules, which say that a reference cannot outlive its
-        // referent.
-        //
-        // The final condition may be a bit surprising. In particular,
-        // you may expect that it would have been `'c <= 'd`, since
-        // usually lifetimes of outer things are conservative
-        // approximations for inner things. However, it works somewhat
-        // differently with trait objects: here the idea is that if the
-        // user specifies a region bound (`'c`, in this case) it is the
-        // "master bound" that *implies* that bounds from other traits are
-        // all met. (Remember that *all bounds* in a type like
-        // `Foo+Bar+Zed` must be met, not just one, hence if we write
-        // `Foo<'x>+Bar<'y>`, we know that the type outlives *both* 'x and
-        // 'y.)
-        //
-        // Note: in fact we only permit builtin traits, not `Bar<'d>`, I
-        // am looking forward to the future here.
-
-        if !data.has_escaping_regions() {
-            let implicit_bounds =
-                object_region_bounds(self.infcx.tcx,
-                                     &data.principal,
-                                     data.bounds.builtin_bounds);
-
-            let explicit_bound = data.bounds.region_bound;
-
-            for implicit_bound in implicit_bounds {
-                let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
-                let outlives = ty::Binder(ty::OutlivesPredicate(explicit_bound, implicit_bound));
-                self.out.push(traits::Obligation::new(cause, outlives.to_predicate()));
-            }
-        }
-    }
-}
-
-/// Given an object type like `SomeTrait+Send`, computes the lifetime
-/// bounds that must hold on the elided self type. These are derived
-/// from the declarations of `SomeTrait`, `Send`, and friends -- if
-/// they declare `trait SomeTrait : 'static`, for example, then
-/// `'static` would appear in the list. The hard work is done by
-/// `ty::required_region_bounds`, see that for more information.
-pub fn object_region_bounds<'tcx>(
-    tcx: &ty::ctxt<'tcx>,
-    principal: &ty::PolyTraitRef<'tcx>,
-    others: ty::BuiltinBounds)
-    -> Vec<ty::Region>
-{
-    // Since we don't actually *know* the self type for an object,
-    // this "open(err)" serves as a kind of dummy standin -- basically
-    // a skolemized type.
-    let open_ty = tcx.mk_infer(ty::FreshTy(0));
-
-    // Note that we preserve the overall binding levels here.
-    assert!(!open_ty.has_escaping_regions());
-    let substs = tcx.mk_substs(principal.0.substs.with_self_ty(open_ty));
-    let trait_refs = vec!(ty::Binder(ty::TraitRef::new(principal.0.def_id, substs)));
-
-    let mut predicates = others.to_predicates(tcx, open_ty);
-    predicates.extend(trait_refs.iter().map(|t| t.to_predicate()));
-
-    tcx.required_region_bounds(open_ty, predicates)
-}
index 6059d7ee74e390416f43a3f5eabc739990bd880a..74bb41785cbe143446c0975bb0ceaa404c9e9fe5 100644 (file)
@@ -18,9 +18,9 @@ use middle::lang_items;
 use syntax::ast;
 use syntax::codemap::Span;
 use syntax::parse::token::InternedString;
-use rustc_front::intravisit::Visitor;
-use rustc_front::intravisit;
-use rustc_front::hir;
+use hir::intravisit::Visitor;
+use hir::intravisit;
+use hir;
 
 use std::collections::HashSet;
 
index 32e78b04676395428c936061eb45f5b5e6c17b1d..1a34699aff491754aea088576fdefa5c47f00106 100644 (file)
@@ -8,31 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use dep_graph::DepNode;
 use util::nodemap::NodeMap;
 use mir::repr::Mir;
-use mir::transform::MirPass;
-use middle::ty;
-use middle::infer;
 
 pub struct MirMap<'tcx> {
     pub map: NodeMap<Mir<'tcx>>,
 }
-
-impl<'tcx> MirMap<'tcx> {
-    pub fn run_passes(&mut self, passes: &mut [Box<MirPass>], tcx: &ty::ctxt<'tcx>) {
-        if passes.is_empty() { return; }
-
-        for (&id, mir) in &mut self.map {
-            let did = tcx.map.local_def_id(id);
-            let _task = tcx.dep_graph.in_task(DepNode::MirMapConstruction(did));
-
-            let param_env = ty::ParameterEnvironment::for_item(tcx, id);
-            let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(param_env));
-
-            for pass in &mut *passes {
-                pass.run_on_mir(mir, &infcx)
-            }
-        }
-    }
-}
index ce7b1ceb355401d02b08fd3a4c5362affffee396..28437fa13368aff0b1fd14780839a1fed91822d0 100644 (file)
@@ -9,12 +9,14 @@
 // except according to those terms.
 
 use graphviz::IntoCow;
-use middle::const_eval::ConstVal;
-use middle::def_id::DefId;
-use middle::subst::Substs;
-use middle::ty::{self, AdtDef, ClosureSubsts, FnOutput, Region, Ty};
+use middle::const_val::ConstVal;
+use rustc_const_math::{ConstUsize, ConstInt};
+use hir::def_id::DefId;
+use ty::subst::Substs;
+use ty::{self, AdtDef, ClosureSubsts, FnOutput, Region, Ty};
+use util::ppaux;
 use rustc_back::slice;
-use rustc_front::hir::InlineAsm;
+use hir::InlineAsm;
 use std::ascii;
 use std::borrow::{Cow};
 use std::fmt::{self, Debug, Formatter, Write};
@@ -30,6 +32,10 @@ pub struct Mir<'tcx> {
     /// that indexes into this vector.
     pub basic_blocks: Vec<BasicBlockData<'tcx>>,
 
+    /// List of lexical scopes; these are referenced by statements and
+    /// used (eventually) for debuginfo. Indexed by a `ScopeId`.
+    pub scopes: Vec<ScopeData>,
+
     /// Return type of the function.
     pub return_ty: FnOutput<'tcx>,
 
@@ -150,9 +156,21 @@ pub enum BorrowKind {
 /// decl, a let, etc.
 #[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
 pub struct VarDecl<'tcx> {
+    /// `let mut x` vs `let x`
     pub mutability: Mutability,
+
+    /// name that user gave the variable; not that, internally,
+    /// mir references variables by index
     pub name: Name,
+
+    /// type inferred for this variable (`let x: ty = ...`)
     pub ty: Ty<'tcx>,
+
+    /// scope in which variable was declared
+    pub scope: ScopeId,
+
+    /// span where variable was declared
+    pub span: Span,
 }
 
 /// A "temp" is a temporary that we place on the stack. They are
@@ -176,6 +194,10 @@ pub struct TempDecl<'tcx> {
 #[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
 pub struct ArgDecl<'tcx> {
     pub ty: Ty<'tcx>,
+
+    /// If true, this argument is a tuple after monomorphization,
+    /// and has to be collected from multiple actual arguments.
+    pub spread: bool
 }
 
 ///////////////////////////////////////////////////////////////////////////
@@ -185,7 +207,7 @@ pub struct ArgDecl<'tcx> {
 /// list of the `Mir`.
 ///
 /// (We use a `u32` internally just to save memory.)
-#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
 pub struct BasicBlock(u32);
 
 impl BasicBlock {
@@ -207,17 +229,39 @@ impl Debug for BasicBlock {
 }
 
 ///////////////////////////////////////////////////////////////////////////
-// BasicBlock and Terminator
+// BasicBlockData and Terminator
 
 #[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
 pub struct BasicBlockData<'tcx> {
+    /// List of statements in this block.
     pub statements: Vec<Statement<'tcx>>,
+
+    /// Terminator for this block.
+    ///
+    /// NB. This should generally ONLY be `None` during construction.
+    /// Therefore, you should generally access it via the
+    /// `terminator()` or `terminator_mut()` methods. The only
+    /// exception is that certain passes, such as `simplify_cfg`, swap
+    /// out the terminator temporarily with `None` while they continue
+    /// to recurse over the set of basic blocks.
     pub terminator: Option<Terminator<'tcx>>,
+
+    /// If true, this block lies on an unwind path. This is used
+    /// during trans where distinct kinds of basic blocks may be
+    /// generated (particularly for MSVC cleanup). Unwind blocks must
+    /// only branch to other unwind blocks.
     pub is_cleanup: bool,
 }
 
+#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
+pub struct Terminator<'tcx> {
+    pub span: Span,
+    pub scope: ScopeId,
+    pub kind: TerminatorKind<'tcx>
+}
+
 #[derive(Clone, RustcEncodable, RustcDecodable)]
-pub enum Terminator<'tcx> {
+pub enum TerminatorKind<'tcx> {
     /// block should have one successor in the graph; we jump there
     Goto {
         target: BasicBlock,
@@ -287,7 +331,17 @@ pub enum Terminator<'tcx> {
 
 impl<'tcx> Terminator<'tcx> {
     pub fn successors(&self) -> Cow<[BasicBlock]> {
-        use self::Terminator::*;
+        self.kind.successors()
+    }
+
+    pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> {
+        self.kind.successors_mut()
+    }
+}
+
+impl<'tcx> TerminatorKind<'tcx> {
+    pub fn successors(&self) -> Cow<[BasicBlock]> {
+        use self::TerminatorKind::*;
         match *self {
             Goto { target: ref b } => slice::ref_slice(b).into_cow(),
             If { targets: (b1, b2), .. } => vec![b1, b2].into_cow(),
@@ -308,7 +362,7 @@ impl<'tcx> Terminator<'tcx> {
     // FIXME: no mootable cow. I’m honestly not sure what a “cow” between `&mut [BasicBlock]` and
     // `Vec<&mut BasicBlock>` would look like in the first place.
     pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> {
-        use self::Terminator::*;
+        use self::TerminatorKind::*;
         match *self {
             Goto { target: ref mut b } => vec![b],
             If { targets: (ref mut b1, ref mut b2), .. } => vec![b1, b2],
@@ -348,9 +402,9 @@ impl<'tcx> BasicBlockData<'tcx> {
     }
 }
 
-impl<'tcx> Debug for Terminator<'tcx> {
+impl<'tcx> Debug for TerminatorKind<'tcx> {
     fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
-        try!(self.fmt_head(fmt));
+        self.fmt_head(fmt)?;
         let successors = self.successors();
         let labels = self.fmt_successor_labels();
         assert_eq!(successors.len(), labels.len());
@@ -361,12 +415,12 @@ impl<'tcx> Debug for Terminator<'tcx> {
             1 => write!(fmt, " -> {:?}", successors[0]),
 
             _ => {
-                try!(write!(fmt, " -> ["));
+                write!(fmt, " -> [")?;
                 for (i, target) in successors.iter().enumerate() {
                     if i > 0 {
-                        try!(write!(fmt, ", "));
+                        write!(fmt, ", ")?;
                     }
-                    try!(write!(fmt, "{}: {:?}", labels[i], target));
+                    write!(fmt, "{}: {:?}", labels[i], target)?;
                 }
                 write!(fmt, "]")
             }
@@ -375,12 +429,12 @@ impl<'tcx> Debug for Terminator<'tcx> {
     }
 }
 
-impl<'tcx> Terminator<'tcx> {
+impl<'tcx> TerminatorKind<'tcx> {
     /// Write the "head" part of the terminator; that is, its name and the data it uses to pick the
     /// successor basic block, if any. The only information not inlcuded is the list of possible
     /// successors, which may be rendered differently between the text and the graphviz format.
     pub fn fmt_head<W: Write>(&self, fmt: &mut W) -> fmt::Result {
-        use self::Terminator::*;
+        use self::TerminatorKind::*;
         match *self {
             Goto { .. } => write!(fmt, "goto"),
             If { cond: ref lv, .. } => write!(fmt, "if({:?})", lv),
@@ -391,14 +445,14 @@ impl<'tcx> Terminator<'tcx> {
             Drop { ref value, .. } => write!(fmt, "drop({:?})", value),
             Call { ref func, ref args, ref destination, .. } => {
                 if let Some((ref destination, _)) = *destination {
-                    try!(write!(fmt, "{:?} = ", destination));
+                    write!(fmt, "{:?} = ", destination)?;
                 }
-                try!(write!(fmt, "{:?}(", func));
+                write!(fmt, "{:?}(", func)?;
                 for (index, arg) in args.iter().enumerate() {
                     if index > 0 {
-                        try!(write!(fmt, ", "));
+                        write!(fmt, ", ")?;
                     }
-                    try!(write!(fmt, "{:?}", arg));
+                    write!(fmt, "{:?}", arg)?;
                 }
                 write!(fmt, ")")
             }
@@ -407,7 +461,7 @@ impl<'tcx> Terminator<'tcx> {
 
     /// Return the list of labels for the edges to the successor basic blocks.
     pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
-        use self::Terminator::*;
+        use self::TerminatorKind::*;
         match *self {
             Return | Resume => vec![],
             Goto { .. } => vec!["".into()],
@@ -446,6 +500,7 @@ impl<'tcx> Terminator<'tcx> {
 #[derive(Clone, RustcEncodable, RustcDecodable)]
 pub struct Statement<'tcx> {
     pub span: Span,
+    pub scope: ScopeId,
     pub kind: StatementKind<'tcx>,
 }
 
@@ -462,6 +517,7 @@ impl<'tcx> Debug for Statement<'tcx> {
         }
     }
 }
+
 ///////////////////////////////////////////////////////////////////////////
 // Lvalues
 
@@ -493,13 +549,13 @@ pub enum Lvalue<'tcx> {
 /// or `*B` or `B[index]`. Note that it is parameterized because it is
 /// shared between `Constant` and `Lvalue`. See the aliases
 /// `LvalueProjection` etc below.
-#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
 pub struct Projection<'tcx, B, V> {
     pub base: B,
     pub elem: ProjectionElem<'tcx, V>,
 }
 
-#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
 pub enum ProjectionElem<'tcx, V> {
     Deref,
     Field(Field, Ty<'tcx>),
@@ -607,13 +663,50 @@ impl<'tcx> Debug for Lvalue<'tcx> {
     }
 }
 
+///////////////////////////////////////////////////////////////////////////
+// Scopes
+
+impl Index<ScopeId> for Vec<ScopeData> {
+    type Output = ScopeData;
+
+    #[inline]
+    fn index(&self, index: ScopeId) -> &ScopeData {
+        &self[index.index()]
+    }
+}
+
+impl IndexMut<ScopeId> for Vec<ScopeData> {
+    #[inline]
+    fn index_mut(&mut self, index: ScopeId) -> &mut ScopeData {
+        &mut self[index.index()]
+    }
+}
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, RustcEncodable, RustcDecodable)]
+pub struct ScopeId(u32);
+
+impl ScopeId {
+    pub fn new(index: usize) -> ScopeId {
+        assert!(index < (u32::MAX as usize));
+        ScopeId(index as u32)
+    }
+
+    pub fn index(self) -> usize {
+        self.0 as usize
+    }
+}
+
+#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
+pub struct ScopeData {
+    pub parent_scope: Option<ScopeId>,
+}
+
 ///////////////////////////////////////////////////////////////////////////
 // Operands
-//
+
 /// These are values that can appear inside an rvalue (or an index
 /// lvalue). They are intentionally limited to prevent rvalues from
 /// being nested in one another.
-
 #[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)]
 pub enum Operand<'tcx> {
     Consume(Lvalue<'tcx>),
@@ -674,7 +767,11 @@ pub enum Rvalue<'tcx> {
         from_end: usize,
     },
 
-    InlineAsm(InlineAsm),
+    InlineAsm {
+        asm: InlineAsm,
+        outputs: Vec<Lvalue<'tcx>>,
+        inputs: Vec<Operand<'tcx>>
+    }
 }
 
 #[derive(Clone, Copy, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
@@ -759,7 +856,9 @@ impl<'tcx> Debug for Rvalue<'tcx> {
             BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
             UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a),
             Box(ref t) => write!(fmt, "Box({:?})", t),
-            InlineAsm(ref asm) => write!(fmt, "InlineAsm({:?})", asm),
+            InlineAsm { ref asm, ref outputs, ref inputs } => {
+                write!(fmt, "asm!({:?} : {:?} : {:?})", asm, outputs, inputs)
+            }
             Slice { ref input, from_start, from_end } =>
                 write!(fmt, "{:?}[{:?}..-{:?}]", input, from_start, from_end),
 
@@ -774,8 +873,8 @@ impl<'tcx> Debug for Rvalue<'tcx> {
             Aggregate(ref kind, ref lvs) => {
                 use self::AggregateKind::*;
 
-                fn fmt_tuple(fmt: &mut Formatter, name: &str, lvs: &[Operand]) -> fmt::Result {
-                    let mut tuple_fmt = fmt.debug_tuple(name);
+                fn fmt_tuple(fmt: &mut Formatter, lvs: &[Operand]) -> fmt::Result {
+                    let mut tuple_fmt = fmt.debug_tuple("");
                     for lv in lvs {
                         tuple_fmt.field(lv);
                     }
@@ -789,19 +888,24 @@ impl<'tcx> Debug for Rvalue<'tcx> {
                         match lvs.len() {
                             0 => write!(fmt, "()"),
                             1 => write!(fmt, "({:?},)", lvs[0]),
-                            _ => fmt_tuple(fmt, "", lvs),
+                            _ => fmt_tuple(fmt, lvs),
                         }
                     }
 
-                    Adt(adt_def, variant, _) => {
+                    Adt(adt_def, variant, substs) => {
                         let variant_def = &adt_def.variants[variant];
-                        let name = ty::tls::with(|tcx| tcx.item_path_str(variant_def.did));
+
+                        ppaux::parameterized(fmt, substs, variant_def.did,
+                                             ppaux::Ns::Value, &[],
+                                             |tcx| {
+                            tcx.lookup_item_type(variant_def.did).generics
+                        })?;
 
                         match variant_def.kind() {
-                            ty::VariantKind::Unit => write!(fmt, "{}", name),
-                            ty::VariantKind::Tuple => fmt_tuple(fmt, &name, lvs),
+                            ty::VariantKind::Unit => Ok(()),
+                            ty::VariantKind::Tuple => fmt_tuple(fmt, lvs),
                             ty::VariantKind::Struct => {
-                                let mut struct_fmt = fmt.debug_struct(&name);
+                                let mut struct_fmt = fmt.debug_struct("");
                                 for (field, lv) in variant_def.fields.iter().zip(lvs) {
                                     struct_fmt.field(&field.name.as_str(), lv);
                                 }
@@ -840,7 +944,7 @@ impl<'tcx> Debug for Rvalue<'tcx> {
 /// this does not necessarily mean that they are "==" in Rust -- in
 /// particular one must be wary of `NaN`!
 
-#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)]
+#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
 pub struct Constant<'tcx> {
     pub span: Span,
     pub ty: Ty<'tcx>,
@@ -851,30 +955,19 @@ pub struct Constant<'tcx> {
 pub struct TypedConstVal<'tcx> {
     pub ty: Ty<'tcx>,
     pub span: Span,
-    pub value: ConstVal
+    pub value: ConstUsize,
 }
 
 impl<'tcx> Debug for TypedConstVal<'tcx> {
     fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
-        try!(write!(fmt, "const "));
-        fmt_const_val(fmt, &self.value)
+        write!(fmt, "const {}", ConstInt::Usize(self.value))
     }
 }
 
-#[derive(Clone, Copy, Debug, PartialEq, RustcEncodable, RustcDecodable)]
-pub enum ItemKind {
-    Constant,
-    /// This is any sort of callable (usually those that have a type of `fn(…) -> …`). This
-    /// includes functions, constructors, but not methods which have their own ItemKind.
-    Function,
-    Method,
-}
-
-#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)]
+#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
 pub enum Literal<'tcx> {
     Item {
         def_id: DefId,
-        kind: ItemKind,
         substs: &'tcx Substs<'tcx>,
     },
     Value {
@@ -892,10 +985,12 @@ impl<'tcx> Debug for Literal<'tcx> {
     fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
         use self::Literal::*;
         match *self {
-            Item { def_id, .. } =>
-                write!(fmt, "{}", item_path_str(def_id)),
+            Item { def_id, substs } => {
+                ppaux::parameterized(fmt, substs, def_id, ppaux::Ns::Value, &[],
+                                     |tcx| tcx.lookup_item_type(def_id).generics)
+            }
             Value { ref value } => {
-                try!(write!(fmt, "const "));
+                write!(fmt, "const ")?;
                 fmt_const_val(fmt, value)
             }
         }
@@ -904,11 +999,10 @@ impl<'tcx> Debug for Literal<'tcx> {
 
 /// Write a `ConstVal` in a way closer to the original source code than the `Debug` output.
 fn fmt_const_val<W: Write>(fmt: &mut W, const_val: &ConstVal) -> fmt::Result {
-    use middle::const_eval::ConstVal::*;
+    use middle::const_val::ConstVal::*;
     match *const_val {
         Float(f) => write!(fmt, "{:?}", f),
-        Int(n) => write!(fmt, "{:?}", n),
-        Uint(n) => write!(fmt, "{:?}", n),
+        Integral(n) => write!(fmt, "{}", n),
         Str(ref s) => write!(fmt, "{:?}", s),
         ByteStr(ref bytes) => {
             let escaped: String = bytes
@@ -921,6 +1015,8 @@ fn fmt_const_val<W: Write>(fmt: &mut W, const_val: &ConstVal) -> fmt::Result {
         Function(def_id) => write!(fmt, "{}", item_path_str(def_id)),
         Struct(node_id) | Tuple(node_id) | Array(node_id, _) | Repeat(node_id, _) =>
             write!(fmt, "{}", node_to_string(node_id)),
+        Char(c) => write!(fmt, "{:?}", c),
+        Dummy => bug!(),
     }
 }
 
index 20e083f840f8cf908af68e039912957a0480ec06..d710417bf20d8b152f0d91e2e31e7869b4dc5273 100644 (file)
  */
 
 use mir::repr::*;
-use middle::const_eval::ConstVal;
-use middle::subst::{Subst, Substs};
-use middle::ty::{self, AdtDef, Ty};
-use rustc_front::hir;
+use ty::subst::{Subst, Substs};
+use ty::{self, AdtDef, Ty, TyCtxt};
+use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
+use hir;
 
 #[derive(Copy, Clone, Debug)]
 pub enum LvalueTy<'tcx> {
@@ -35,7 +35,7 @@ impl<'tcx> LvalueTy<'tcx> {
         LvalueTy::Ty { ty: ty }
     }
 
-    pub fn to_ty(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
+    pub fn to_ty(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
         match *self {
             LvalueTy::Ty { ty } =>
                 ty,
@@ -45,7 +45,7 @@ impl<'tcx> LvalueTy<'tcx> {
     }
 
     pub fn projection_ty(self,
-                         tcx: &ty::ctxt<'tcx>,
+                         tcx: &TyCtxt<'tcx>,
                          elem: &LvalueElem<'tcx>)
                          -> LvalueTy<'tcx>
     {
@@ -70,7 +70,7 @@ impl<'tcx> LvalueTy<'tcx> {
                                              variant_index: index }
                     }
                     _ => {
-                        tcx.sess.bug(&format!("cannot downcast non-enum type: `{:?}`", self))
+                        bug!("cannot downcast non-enum type: `{:?}`", self)
                     }
                 },
             ProjectionElem::Field(_, fty) => LvalueTy::Ty { ty: fty }
@@ -78,9 +78,32 @@ impl<'tcx> LvalueTy<'tcx> {
     }
 }
 
+impl<'tcx> TypeFoldable<'tcx> for LvalueTy<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        match *self {
+            LvalueTy::Ty { ty } => LvalueTy::Ty { ty: ty.fold_with(folder) },
+            LvalueTy::Downcast { adt_def, substs, variant_index } => {
+                let substs = substs.fold_with(folder);
+                LvalueTy::Downcast {
+                    adt_def: adt_def,
+                    substs: folder.tcx().mk_substs(substs),
+                    variant_index: variant_index
+                }
+            }
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match *self {
+            LvalueTy::Ty { ty } => ty.visit_with(visitor),
+            LvalueTy::Downcast { substs, .. } => substs.visit_with(visitor)
+        }
+    }
+}
+
 impl<'tcx> Mir<'tcx> {
     pub fn operand_ty(&self,
-                      tcx: &ty::ctxt<'tcx>,
+                      tcx: &TyCtxt<'tcx>,
                       operand: &Operand<'tcx>)
                       -> Ty<'tcx>
     {
@@ -91,7 +114,7 @@ impl<'tcx> Mir<'tcx> {
     }
 
     pub fn binop_ty(&self,
-                    tcx: &ty::ctxt<'tcx>,
+                    tcx: &TyCtxt<'tcx>,
                     op: BinOp,
                     lhs_ty: Ty<'tcx>,
                     rhs_ty: Ty<'tcx>)
@@ -116,7 +139,7 @@ impl<'tcx> Mir<'tcx> {
     }
 
     pub fn lvalue_ty(&self,
-                     tcx: &ty::ctxt<'tcx>,
+                     tcx: &TyCtxt<'tcx>,
                      lvalue: &Lvalue<'tcx>)
                      -> LvalueTy<'tcx>
     {
@@ -137,19 +160,17 @@ impl<'tcx> Mir<'tcx> {
     }
 
     pub fn rvalue_ty(&self,
-                     tcx: &ty::ctxt<'tcx>,
+                     tcx: &TyCtxt<'tcx>,
                      rvalue: &Rvalue<'tcx>)
                      -> Option<Ty<'tcx>>
     {
         match *rvalue {
             Rvalue::Use(ref operand) => Some(self.operand_ty(tcx, operand)),
             Rvalue::Repeat(ref operand, ref count) => {
-                if let ConstVal::Uint(u) = count.value {
-                    let op_ty = self.operand_ty(tcx, operand);
-                    Some(tcx.mk_array(op_ty, u as usize))
-                } else {
-                    None
-                }
+                let op_ty = self.operand_ty(tcx, operand);
+                let count = count.value.as_u64(tcx.sess.target.uint_type);
+                assert_eq!(count as usize as u64, count);
+                Some(tcx.mk_array(op_ty, count as usize))
             }
             Rvalue::Ref(reg, bk, ref lv) => {
                 let lv_ty = self.lvalue_ty(tcx, lv).to_ty(tcx);
@@ -199,7 +220,7 @@ impl<'tcx> Mir<'tcx> {
                 }
             }
             Rvalue::Slice { .. } => None,
-            Rvalue::InlineAsm(..) => None
+            Rvalue::InlineAsm { .. } => None
         }
     }
 }
index cc417f5a99ea6f93f1824bad0904d2c018f17a79..410e3f9d06628c06d3e0818a80512fc090479d49 100644 (file)
@@ -8,9 +8,68 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use mir::mir_map::MirMap;
 use mir::repr::Mir;
-use middle::infer::InferCtxt;
+use ty::TyCtxt;
+use syntax::ast::NodeId;
 
-pub trait MirPass {
-    fn run_on_mir<'a, 'tcx>(&mut self, mir: &mut Mir<'tcx>, infcx: &InferCtxt<'a, 'tcx>);
+/// Various information about pass.
+pub trait Pass {
+    // fn name() for printouts of various sorts?
+    // fn should_run(Session) to check if pass should run?
+}
+
+/// A pass which inspects the whole MirMap.
+pub trait MirMapPass<'tcx>: Pass {
+    fn run_pass(&mut self, cx: &TyCtxt<'tcx>, map: &mut MirMap<'tcx>);
+}
+
+/// A pass which inspects Mir of functions in isolation.
+pub trait MirPass<'tcx>: Pass {
+    fn run_pass(&mut self, cx: &TyCtxt<'tcx>, id: NodeId, mir: &mut Mir<'tcx>);
+}
+
+impl<'tcx, T: MirPass<'tcx>> MirMapPass<'tcx> for T {
+    fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, map: &mut MirMap<'tcx>) {
+        for (&id, mir) in &mut map.map {
+            MirPass::run_pass(self, tcx, id, mir);
+        }
+    }
+}
+
+/// A manager for MIR passes.
+pub struct Passes {
+    passes: Vec<Box<for<'tcx> MirMapPass<'tcx>>>,
+    plugin_passes: Vec<Box<for<'tcx> MirMapPass<'tcx>>>
+}
+
+impl Passes {
+    pub fn new() -> Passes {
+        let passes = Passes {
+            passes: Vec::new(),
+            plugin_passes: Vec::new()
+        };
+        passes
+    }
+
+    pub fn run_passes<'tcx>(&mut self, pcx: &TyCtxt<'tcx>, map: &mut MirMap<'tcx>) {
+        for pass in &mut self.plugin_passes {
+            pass.run_pass(pcx, map);
+        }
+        for pass in &mut self.passes {
+            pass.run_pass(pcx, map);
+        }
+    }
+
+    /// Pushes a built-in pass.
+    pub fn push_pass(&mut self, pass: Box<for<'a> MirMapPass<'a>>) {
+        self.passes.push(pass);
+    }
+}
+
+/// Copies the plugin passes.
+impl ::std::iter::Extend<Box<for<'a> MirMapPass<'a>>> for Passes {
+    fn extend<I: IntoIterator<Item=Box<for <'a> MirMapPass<'a>>>>(&mut self, it: I) {
+        self.plugin_passes.extend(it);
+    }
 }
index 5e3c6e028a32524159aad94610b58b2494561767..450d25b60671942be2ca8f4ac9d5b4d5f141974e 100644 (file)
@@ -8,12 +8,79 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use middle::def_id::DefId;
-use middle::ty::Region;
+use middle::const_val::ConstVal;
+use hir::def_id::DefId;
+use ty::subst::Substs;
+use ty::{ClosureSubsts, FnOutput, Region, Ty};
 use mir::repr::*;
+use rustc_const_math::ConstUsize;
 use rustc_data_structures::tuple_slice::TupleSlice;
 use syntax::codemap::Span;
 
+// # The MIR Visitor
+//
+// ## Overview
+//
+// There are two visitors, one for immutable and one for mutable references,
+// but both are generated by the following macro. The code is written according
+// to the following conventions:
+//
+// - introduce a `visit_foo` and a `super_foo` method for every MIR type
+// - `visit_foo`, by default, calls `super_foo`
+// - `super_foo`, by default, destructures the `foo` and calls `visit_foo`
+//
+// This allows you as a user to override `visit_foo` for types are
+// interested in, and invoke (within that method) call
+// `self.super_foo` to get the default behavior. Just as in an OO
+// language, you should never call `super` methods ordinarily except
+// in that circumstance.
+//
+// For the most part, we do not destructure things external to the
+// MIR, e.g. types, spans, etc, but simply visit them and stop. This
+// avoids duplication with other visitors like `TypeFoldable`. But
+// there is one exception: we do destructure the `FnOutput` to reach
+// the type within. Just because.
+//
+// ## Updating
+//
+// The code is written in a very deliberate style intended to minimize
+// the chance of things being overlooked. You'll notice that we always
+// use pattern matching to reference fields and we ensure that all
+// matches are exhaustive.
+//
+// For example, the `super_basic_block_data` method begins like this:
+//
+// ```rust
+// fn super_basic_block_data(&mut self,
+//                           block: BasicBlock,
+//                           data: & $($mutability)* BasicBlockData<'tcx>) {
+//     let BasicBlockData {
+//         ref $($mutability)* statements,
+//         ref $($mutability)* terminator,
+//         is_cleanup: _
+//     } = *data;
+//
+//     for statement in statements {
+//         self.visit_statement(block, statement);
+//     }
+//
+//     ...
+// }
+// ```
+//
+// Here we used `let BasicBlockData { <fields> } = *data` deliberately,
+// rather than writing `data.statements` in the body. This is because if one
+// adds a new field to `BasicBlockData`, one will be forced to revise this code,
+// and hence one will (hopefully) invoke the correct visit methods (if any).
+//
+// For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS.
+// That means you never write `..` to skip over fields, nor do you write `_`
+// to skip over variants in a `match`.
+//
+// The only place that `_` is acceptable is to match a field (or
+// variant argument) that does not require visiting, as in
+// `is_cleanup` above.
+
 macro_rules! make_mir_visitor {
     ($visitor_trait_name:ident, $($mutability:ident)*) => {
         pub trait $visitor_trait_name<'tcx> {
@@ -30,6 +97,11 @@ macro_rules! make_mir_visitor {
                 self.super_basic_block_data(block, data);
             }
 
+            fn visit_scope_data(&mut self,
+                                scope_data: & $($mutability)* ScopeData) {
+                self.super_scope_data(scope_data);
+            }
+
             fn visit_statement(&mut self,
                                block: BasicBlock,
                                statement: & $($mutability)* Statement<'tcx>) {
@@ -49,6 +121,12 @@ macro_rules! make_mir_visitor {
                 self.super_terminator(block, terminator);
             }
 
+            fn visit_terminator_kind(&mut self,
+                                     block: BasicBlock,
+                                     kind: & $($mutability)* TerminatorKind<'tcx>) {
+                self.super_terminator_kind(block, kind);
+            }
+
             fn visit_rvalue(&mut self,
                             rvalue: & $($mutability)* Rvalue<'tcx>) {
                 self.super_rvalue(rvalue);
@@ -65,6 +143,18 @@ macro_rules! make_mir_visitor {
                 self.super_lvalue(lvalue, context);
             }
 
+            fn visit_projection(&mut self,
+                                lvalue: & $($mutability)* LvalueProjection<'tcx>,
+                                context: LvalueContext) {
+                self.super_projection(lvalue, context);
+            }
+
+            fn visit_projection_elem(&mut self,
+                                     lvalue: & $($mutability)* LvalueElem<'tcx>,
+                                     context: LvalueContext) {
+                self.super_projection_elem(lvalue, context);
+            }
+
             fn visit_branch(&mut self,
                             source: BasicBlock,
                             target: BasicBlock) {
@@ -91,35 +181,143 @@ macro_rules! make_mir_visitor {
                 self.super_span(span);
             }
 
+            fn visit_fn_output(&mut self,
+                               fn_output: & $($mutability)* FnOutput<'tcx>) {
+                self.super_fn_output(fn_output);
+            }
+
+            fn visit_ty(&mut self,
+                        ty: & $($mutability)* Ty<'tcx>) {
+                self.super_ty(ty);
+            }
+
+            fn visit_substs(&mut self,
+                            substs: & $($mutability)* &'tcx Substs<'tcx>) {
+                self.super_substs(substs);
+            }
+
+            fn visit_closure_substs(&mut self,
+                                    substs: & $($mutability)* &'tcx ClosureSubsts<'tcx>) {
+                self.super_closure_substs(substs);
+            }
+
+            fn visit_const_val(&mut self,
+                               const_val: & $($mutability)* ConstVal) {
+                self.super_const_val(const_val);
+            }
+
+            fn visit_const_usize(&mut self,
+                                 const_usize: & $($mutability)* ConstUsize) {
+                self.super_const_usize(const_usize);
+            }
+
+            fn visit_typed_const_val(&mut self,
+                                     val: & $($mutability)* TypedConstVal<'tcx>) {
+                self.super_typed_const_val(val);
+            }
+
+            fn visit_var_decl(&mut self,
+                              var_decl: & $($mutability)* VarDecl<'tcx>) {
+                self.super_var_decl(var_decl);
+            }
+
+            fn visit_temp_decl(&mut self,
+                               temp_decl: & $($mutability)* TempDecl<'tcx>) {
+                self.super_temp_decl(temp_decl);
+            }
+
+            fn visit_arg_decl(&mut self,
+                              arg_decl: & $($mutability)* ArgDecl<'tcx>) {
+                self.super_arg_decl(arg_decl);
+            }
+
+            fn visit_scope_id(&mut self,
+                              scope_id: & $($mutability)* ScopeId) {
+                self.super_scope_id(scope_id);
+            }
+
             // The `super_xxx` methods comprise the default behavior and are
             // not meant to be overridden.
 
             fn super_mir(&mut self,
                          mir: & $($mutability)* Mir<'tcx>) {
-                for block in mir.all_basic_blocks() {
-                    let data = & $($mutability)* mir[block];
+                let Mir {
+                    ref $($mutability)* basic_blocks,
+                    ref $($mutability)* scopes,
+                    ref $($mutability)* return_ty,
+                    ref $($mutability)* var_decls,
+                    ref $($mutability)* arg_decls,
+                    ref $($mutability)* temp_decls,
+                    ref $($mutability)* span,
+                } = *mir;
+
+                for (index, data) in basic_blocks.into_iter().enumerate() {
+                    let block = BasicBlock::new(index);
                     self.visit_basic_block_data(block, data);
                 }
+
+                for scope in scopes {
+                    self.visit_scope_data(scope);
+                }
+
+                self.visit_fn_output(return_ty);
+
+                for var_decl in var_decls {
+                    self.visit_var_decl(var_decl);
+                }
+
+                for arg_decl in arg_decls {
+                    self.visit_arg_decl(arg_decl);
+                }
+
+                for temp_decl in temp_decls {
+                    self.visit_temp_decl(temp_decl);
+                }
+
+                self.visit_span(span);
             }
 
             fn super_basic_block_data(&mut self,
                                       block: BasicBlock,
                                       data: & $($mutability)* BasicBlockData<'tcx>) {
-                for statement in & $($mutability)* data.statements {
+                let BasicBlockData {
+                    ref $($mutability)* statements,
+                    ref $($mutability)* terminator,
+                    is_cleanup: _
+                } = *data;
+
+                for statement in statements {
                     self.visit_statement(block, statement);
                 }
 
-                if let Some(ref $($mutability)* terminator) = data.terminator {
+                if let Some(ref $($mutability)* terminator) = *terminator {
                     self.visit_terminator(block, terminator);
                 }
             }
 
+            fn super_scope_data(&mut self,
+                                scope_data: & $($mutability)* ScopeData) {
+                let ScopeData {
+                    ref $($mutability)* parent_scope,
+                } = *scope_data;
+
+                if let Some(ref $($mutability)* parent_scope) = *parent_scope {
+                    self.visit_scope_id(parent_scope);
+                }
+            }
+
             fn super_statement(&mut self,
                                block: BasicBlock,
                                statement: & $($mutability)* Statement<'tcx>) {
-                self.visit_span(& $($mutability)* statement.span);
-
-                match statement.kind {
+                let Statement {
+                    ref $($mutability)* span,
+                    ref $($mutability)* scope,
+                    ref $($mutability)* kind,
+                } = *statement;
+
+                self.visit_span(span);
+                self.visit_scope_id(scope);
+                match *kind {
                     StatementKind::Assign(ref $($mutability)* lvalue,
                                           ref $($mutability)* rvalue) => {
                         self.visit_assign(block, lvalue, rvalue);
@@ -138,58 +336,78 @@ macro_rules! make_mir_visitor {
             fn super_terminator(&mut self,
                                 block: BasicBlock,
                                 terminator: &$($mutability)* Terminator<'tcx>) {
-                match *terminator {
-                    Terminator::Goto { target } => {
+                let Terminator {
+                    ref $($mutability)* span,
+                    ref $($mutability)* scope,
+                    ref $($mutability)* kind,
+                } = *terminator;
+
+                self.visit_span(span);
+                self.visit_scope_id(scope);
+                self.visit_terminator_kind(block, kind);
+            }
+
+            fn super_terminator_kind(&mut self,
+                                     block: BasicBlock,
+                                     kind: & $($mutability)* TerminatorKind<'tcx>) {
+                match *kind {
+                    TerminatorKind::Goto { target } => {
                         self.visit_branch(block, target);
                     }
 
-                    Terminator::If { ref $($mutability)* cond,
-                                     ref $($mutability)* targets } => {
+                    TerminatorKind::If { ref $($mutability)* cond,
+                                         ref $($mutability)* targets } => {
                         self.visit_operand(cond);
                         for &target in targets.as_slice() {
                             self.visit_branch(block, target);
                         }
                     }
 
-                    Terminator::Switch { ref $($mutability)* discr,
-                                         adt_def: _,
-                                         ref targets } => {
+                    TerminatorKind::Switch { ref $($mutability)* discr,
+                                             adt_def: _,
+                                             ref targets } => {
                         self.visit_lvalue(discr, LvalueContext::Inspect);
                         for &target in targets {
                             self.visit_branch(block, target);
                         }
                     }
 
-                    Terminator::SwitchInt { ref $($mutability)* discr,
-                                            switch_ty: _,
-                                            values: _,
-                                            ref targets } => {
+                    TerminatorKind::SwitchInt { ref $($mutability)* discr,
+                                                ref $($mutability)* switch_ty,
+                                                ref $($mutability)* values,
+                                                ref targets } => {
                         self.visit_lvalue(discr, LvalueContext::Inspect);
+                        self.visit_ty(switch_ty);
+                        for value in values {
+                            self.visit_const_val(value);
+                        }
                         for &target in targets {
                             self.visit_branch(block, target);
                         }
                     }
 
-                    Terminator::Resume |
-                    Terminator::Return => {
+                    TerminatorKind::Resume |
+                    TerminatorKind::Return => {
                     }
 
-                    Terminator::Drop { ref $($mutability)* value, target, unwind } => {
+                    TerminatorKind::Drop { ref $($mutability)* value,
+                                           target,
+                                           unwind } => {
                         self.visit_lvalue(value, LvalueContext::Drop);
                         self.visit_branch(block, target);
                         unwind.map(|t| self.visit_branch(block, t));
                     }
 
-                    Terminator::Call { ref $($mutability)* func,
-                                       ref $($mutability)* args,
-                                       ref $($mutability)* destination,
-                                       cleanup } => {
+                    TerminatorKind::Call { ref $($mutability)* func,
+                                           ref $($mutability)* args,
+                                           ref $($mutability)* destination,
+                                           cleanup } => {
                         self.visit_operand(func);
                         for arg in args {
                             self.visit_operand(arg);
                         }
                         if let Some((ref $($mutability)* destination, target)) = *destination {
-                            self.visit_lvalue(destination, LvalueContext::Store);
+                            self.visit_lvalue(destination, LvalueContext::Call);
                             self.visit_branch(block, target);
                         }
                         cleanup.map(|t| self.visit_branch(block, t));
@@ -205,8 +423,9 @@ macro_rules! make_mir_visitor {
                     }
 
                     Rvalue::Repeat(ref $($mutability)* value,
-                                   _) => {
+                                   ref $($mutability)* typed_const_val) => {
                         self.visit_operand(value);
+                        self.visit_typed_const_val(typed_const_val);
                     }
 
                     Rvalue::Ref(r, bk, ref $($mutability)* path) => {
@@ -220,34 +439,48 @@ macro_rules! make_mir_visitor {
                         self.visit_lvalue(path, LvalueContext::Inspect);
                     }
 
-                    Rvalue::Cast(_, ref $($mutability)* operand, _) => {
+                    Rvalue::Cast(_cast_kind,
+                                 ref $($mutability)* operand,
+                                 ref $($mutability)* ty) => {
                         self.visit_operand(operand);
+                        self.visit_ty(ty);
                     }
 
-                    Rvalue::BinaryOp(_,
+                    Rvalue::BinaryOp(_bin_op,
                                      ref $($mutability)* lhs,
                                      ref $($mutability)* rhs) => {
                         self.visit_operand(lhs);
                         self.visit_operand(rhs);
                     }
 
-                    Rvalue::UnaryOp(_, ref $($mutability)* op) => {
+                    Rvalue::UnaryOp(_un_op, ref $($mutability)* op) => {
                         self.visit_operand(op);
                     }
 
-                    Rvalue::Box(_) => {
+                    Rvalue::Box(ref $($mutability)* ty) => {
+                        self.visit_ty(ty);
                     }
 
                     Rvalue::Aggregate(ref $($mutability)* kind,
                                       ref $($mutability)* operands) => {
                         match *kind {
-                            AggregateKind::Closure(ref $($mutability)* def_id, _) => {
+                            AggregateKind::Vec => {
+                            }
+                            AggregateKind::Tuple => {
+                            }
+                            AggregateKind::Adt(_adt_def,
+                                               _variant_index,
+                                               ref $($mutability)* substs) => {
+                                self.visit_substs(substs);
+                            }
+                            AggregateKind::Closure(ref $($mutability)* def_id,
+                                                   ref $($mutability)* closure_substs) => {
                                 self.visit_def_id(def_id);
+                                self.visit_closure_substs(closure_substs);
                             }
-                            _ => { /* nothing to do */ }
                         }
 
-                        for operand in & $($mutability)* operands[..] {
+                        for operand in operands {
                             self.visit_operand(operand);
                         }
                     }
@@ -261,7 +494,15 @@ macro_rules! make_mir_visitor {
                         });
                     }
 
-                    Rvalue::InlineAsm(_) => {
+                    Rvalue::InlineAsm { ref $($mutability)* outputs,
+                                        ref $($mutability)* inputs,
+                                        asm: _ } => {
+                        for output in & $($mutability)* outputs[..] {
+                            self.visit_lvalue(output, LvalueContext::Store);
+                        }
+                        for input in & $($mutability)* inputs[..] {
+                            self.visit_operand(input);
+                        }
                     }
                 }
             }
@@ -280,7 +521,7 @@ macro_rules! make_mir_visitor {
 
             fn super_lvalue(&mut self,
                             lvalue: & $($mutability)* Lvalue<'tcx>,
-                            _context: LvalueContext) {
+                            context: LvalueContext) {
                 match *lvalue {
                     Lvalue::Var(_) |
                     Lvalue::Temp(_) |
@@ -291,12 +532,81 @@ macro_rules! make_mir_visitor {
                         self.visit_def_id(def_id);
                     }
                     Lvalue::Projection(ref $($mutability)* proj) => {
-                        self.visit_lvalue(& $($mutability)* proj.base,
-                                          LvalueContext::Projection);
+                        self.visit_projection(proj, context);
                     }
                 }
             }
 
+            fn super_projection(&mut self,
+                                proj: & $($mutability)* LvalueProjection<'tcx>,
+                                context: LvalueContext) {
+                let Projection {
+                    ref $($mutability)* base,
+                    ref $($mutability)* elem,
+                } = *proj;
+                self.visit_lvalue(base, LvalueContext::Projection);
+                self.visit_projection_elem(elem, context);
+            }
+
+            fn super_projection_elem(&mut self,
+                                     proj: & $($mutability)* LvalueElem<'tcx>,
+                                     _context: LvalueContext) {
+                match *proj {
+                    ProjectionElem::Deref => {
+                    }
+                    ProjectionElem::Field(_field, ref $($mutability)* ty) => {
+                        self.visit_ty(ty);
+                    }
+                    ProjectionElem::Index(ref $($mutability)* operand) => {
+                        self.visit_operand(operand);
+                    }
+                    ProjectionElem::ConstantIndex { offset: _,
+                                                    min_length: _,
+                                                    from_end: _ } => {
+                    }
+                    ProjectionElem::Downcast(_adt_def, _variant_index) => {
+                    }
+                }
+            }
+
+            fn super_var_decl(&mut self,
+                              var_decl: & $($mutability)* VarDecl<'tcx>) {
+                let VarDecl {
+                    mutability: _,
+                    name: _,
+                    ref $($mutability)* ty,
+                    ref $($mutability)* scope,
+                    ref $($mutability)* span,
+                } = *var_decl;
+
+                self.visit_ty(ty);
+                self.visit_scope_id(scope);
+                self.visit_span(span);
+            }
+
+            fn super_temp_decl(&mut self,
+                               temp_decl: & $($mutability)* TempDecl<'tcx>) {
+                let TempDecl {
+                    ref $($mutability)* ty,
+                } = *temp_decl;
+
+                self.visit_ty(ty);
+            }
+
+            fn super_arg_decl(&mut self,
+                              arg_decl: & $($mutability)* ArgDecl<'tcx>) {
+                let ArgDecl {
+                    ref $($mutability)* ty,
+                    spread: _
+                } = *arg_decl;
+
+                self.visit_ty(ty);
+            }
+
+            fn super_scope_id(&mut self,
+                              _scope_id: & $($mutability)* ScopeId) {
+            }
+
             fn super_branch(&mut self,
                             _source: BasicBlock,
                             _target: BasicBlock) {
@@ -304,18 +614,40 @@ macro_rules! make_mir_visitor {
 
             fn super_constant(&mut self,
                               constant: & $($mutability)* Constant<'tcx>) {
-                self.visit_span(& $($mutability)* constant.span);
-                self.visit_literal(& $($mutability)* constant.literal);
+                let Constant {
+                    ref $($mutability)* span,
+                    ref $($mutability)* ty,
+                    ref $($mutability)* literal,
+                } = *constant;
+
+                self.visit_span(span);
+                self.visit_ty(ty);
+                self.visit_literal(literal);
+            }
+
+            fn super_typed_const_val(&mut self,
+                                     constant: & $($mutability)* TypedConstVal<'tcx>) {
+                let TypedConstVal {
+                    ref $($mutability)* span,
+                    ref $($mutability)* ty,
+                    ref $($mutability)* value,
+                } = *constant;
+
+                self.visit_span(span);
+                self.visit_ty(ty);
+                self.visit_const_usize(value);
             }
 
             fn super_literal(&mut self,
                              literal: & $($mutability)* Literal<'tcx>) {
                 match *literal {
-                    Literal::Item { ref $($mutability)* def_id, .. } => {
+                    Literal::Item { ref $($mutability)* def_id,
+                                    ref $($mutability)* substs } => {
                         self.visit_def_id(def_id);
+                        self.visit_substs(substs);
                     },
-                    Literal::Value { .. } => {
-                        // Nothing to do
+                    Literal::Value { ref $($mutability)* value } => {
+                        self.visit_const_val(value);
                     }
                 }
             }
@@ -325,6 +657,32 @@ macro_rules! make_mir_visitor {
 
             fn super_span(&mut self, _span: & $($mutability)* Span) {
             }
+
+            fn super_fn_output(&mut self, fn_output: & $($mutability)* FnOutput<'tcx>) {
+                match *fn_output {
+                    FnOutput::FnConverging(ref $($mutability)* ty) => {
+                        self.visit_ty(ty);
+                    }
+                    FnOutput::FnDiverging => {
+                    }
+                }
+            }
+
+            fn super_ty(&mut self, _ty: & $($mutability)* Ty<'tcx>) {
+            }
+
+            fn super_substs(&mut self, _substs: & $($mutability)* &'tcx Substs<'tcx>) {
+            }
+
+            fn super_closure_substs(&mut self,
+                                    _substs: & $($mutability)* &'tcx ClosureSubsts<'tcx>) {
+            }
+
+            fn super_const_val(&mut self, _substs: & $($mutability)* ConstVal) {
+            }
+
+            fn super_const_usize(&mut self, _substs: & $($mutability)* ConstUsize) {
+            }
         }
     }
 }
@@ -334,9 +692,12 @@ make_mir_visitor!(MutVisitor,mut);
 
 #[derive(Copy, Clone, Debug)]
 pub enum LvalueContext {
-    // Appears as LHS of an assignment or as dest of a call
+    // Appears as LHS of an assignment
     Store,
 
+    // Dest of a call
+    Call,
+
     // Being dropped
     Drop,
 
index 6421629cc7ab8ab6e4ccd4fe857117168207271d..574c927bd75d2cca969fbe58db01866c46cd0896 100644 (file)
@@ -38,8 +38,6 @@ use std::env;
 use std::fmt;
 use std::path::PathBuf;
 
-use llvm;
-
 pub struct Config {
     pub target: Target,
     pub int_type: IntTy,
@@ -141,11 +139,9 @@ pub struct Options {
     pub continue_parse_after_error: bool,
     pub mir_opt_level: usize,
 
-    /// if true, build up the dep-graph
-    pub build_dep_graph: bool,
-
-    /// if true, -Z dump-dep-graph was passed to dump out the dep-graph
-    pub dump_dep_graph: bool,
+    /// if Some, enable incremental compilation, using the given
+    /// directory to store intermediate results
+    pub incremental: Option<PathBuf>,
 
     pub no_analysis: bool,
     pub debugging_opts: DebuggingOptions,
@@ -173,8 +169,12 @@ pub enum PrintRequest {
 pub enum Input {
     /// Load source from file
     File(PathBuf),
-    /// The string is the source
-    Str(String)
+    Str {
+        /// String that is shown in place of a filename
+        name: String,
+        /// Anonymous source string
+        input: String,
+    },
 }
 
 impl Input {
@@ -182,7 +182,7 @@ impl Input {
         match *self {
             Input::File(ref ifile) => ifile.file_stem().unwrap()
                                            .to_str().unwrap().to_string(),
-            Input::Str(_) => "rust_out".to_string(),
+            Input::Str { .. } => "rust_out".to_string(),
         }
     }
 }
@@ -258,8 +258,7 @@ pub fn basic_options() -> Options {
         treat_err_as_bug: false,
         continue_parse_after_error: false,
         mir_opt_level: 1,
-        build_dep_graph: false,
-        dump_dep_graph: false,
+        incremental: None,
         no_analysis: false,
         debugging_opts: basic_debugging_options(),
         prints: Vec::new(),
@@ -274,6 +273,15 @@ pub fn basic_options() -> Options {
     }
 }
 
+impl Options {
+    /// True if there is a reason to build the dep graph.
+    pub fn build_dep_graph(&self) -> bool {
+        self.incremental.is_some() ||
+            self.debugging_opts.dump_dep_graph ||
+            self.debugging_opts.query_dep_graph
+    }
+}
+
 // The type of entry function, so
 // users can have their own entry
 // functions that don't start a
@@ -360,7 +368,7 @@ macro_rules! options {
                                                              value, $outputname,
                                                              key, type_desc))
                         }
-                        (None, None) => unreachable!()
+                        (None, None) => bug!()
                     }
                 }
                 found = true;
@@ -633,10 +641,12 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
           "treat all errors that occur as bugs"),
     continue_parse_after_error: bool = (false, parse_bool,
           "attempt to recover from parse errors (experimental)"),
-    incr_comp: bool = (false, parse_bool,
+    incremental: Option<String> = (None, parse_opt_string,
           "enable incremental compilation (experimental)"),
     dump_dep_graph: bool = (false, parse_bool,
           "dump the dependency graph to $RUST_DEP_GRAPH (default: /tmp/dep_graph.gv)"),
+    query_dep_graph: bool = (false, parse_bool,
+          "enable queries of the dependency graph for regression testing"),
     no_analysis: bool = (false, parse_bool,
           "parse and expand the source, but run no analysis"),
     extra_plugins: Vec<String> = (Vec::new(), parse_list,
@@ -663,6 +673,10 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
           "print the result of the translation item collection pass"),
     mir_opt_level: Option<usize> = (None, parse_opt_uint,
           "set the MIR optimization level (0-3)"),
+    dump_mir: Option<String> = (None, parse_opt_string,
+          "dump MIR state at various points in translation"),
+    orbit: bool = (false, parse_bool,
+          "get MIR where it belongs - everywhere; most importantly, in orbit"),
 }
 
 pub fn default_lib_output() -> CrateType {
@@ -779,12 +793,12 @@ impl RustcOptGroup {
         self.stability == OptionStability::Stable
     }
 
-    fn stable(g: getopts::OptGroup) -> RustcOptGroup {
+    pub fn stable(g: getopts::OptGroup) -> RustcOptGroup {
         RustcOptGroup { opt_group: g, stability: OptionStability::Stable }
     }
 
     #[allow(dead_code)] // currently we have no "truly unstable" options
-    fn unstable(g: getopts::OptGroup) -> RustcOptGroup {
+    pub fn unstable(g: getopts::OptGroup) -> RustcOptGroup {
         RustcOptGroup { opt_group: g, stability: OptionStability::Unstable }
     }
 
@@ -891,7 +905,7 @@ pub fn rustc_short_optgroups() -> Vec<RustcOptGroup> {
                  "[asm|llvm-bc|llvm-ir|obj|link|dep-info]"),
         opt::multi_s("", "print", "Comma separated list of compiler information to \
                                print on stdout",
-                 "[crate-name|file-names|sysroot|target-list]"),
+                 "[crate-name|file-names|sysroot|cfg|target-list]"),
         opt::flagmulti_s("g",  "",  "Equivalent to -C debuginfo=2"),
         opt::flagmulti_s("O", "", "Equivalent to -C opt-level=2"),
         opt::opt_s("o", "", "Write output to <filename>", "FILENAME"),
@@ -920,33 +934,32 @@ pub fn rustc_short_optgroups() -> Vec<RustcOptGroup> {
 pub fn rustc_optgroups() -> Vec<RustcOptGroup> {
     let mut opts = rustc_short_optgroups();
     opts.extend_from_slice(&[
-        opt::multi_s("", "extern", "Specify where an external rust library is \
-                                located",
-                 "NAME=PATH"),
+        opt::multi_s("", "extern", "Specify where an external rust library is located",
+                     "NAME=PATH"),
         opt::opt_s("", "sysroot", "Override the system root", "PATH"),
         opt::multi_ubnr("Z", "", "Set internal debugging options", "FLAG"),
         opt::opt_ubnr("", "error-format",
                       "How errors and other messages are produced",
                       "human|json"),
         opt::opt_s("", "color", "Configure coloring of output:
-            auto   = colorize, if output goes to a tty (default);
-            always = always colorize output;
-            never  = never colorize output", "auto|always|never"),
+                                 auto   = colorize, if output goes to a tty (default);
+                                 always = always colorize output;
+                                 never  = never colorize output", "auto|always|never"),
 
         opt::flagopt_ubnr("", "pretty",
-                   "Pretty-print the input instead of compiling;
-                   valid types are: `normal` (un-annotated source),
-                   `expanded` (crates expanded), or
-                   `expanded,identified` (fully parenthesized, AST nodes with IDs).",
-                 "TYPE"),
+                          "Pretty-print the input instead of compiling;
+                           valid types are: `normal` (un-annotated source),
+                           `expanded` (crates expanded), or
+                           `expanded,identified` (fully parenthesized, AST nodes with IDs).",
+                          "TYPE"),
         opt::flagopt_ubnr("", "unpretty",
-                     "Present the input source, unstable (and less-pretty) variants;
-                      valid types are any of the types for `--pretty`, as well as:
-                      `flowgraph=<nodeid>` (graphviz formatted flowgraph for node),
-                      `everybody_loops` (all function bodies replaced with `loop {}`),
-                      `hir` (the HIR), `hir,identified`, or
-                      `hir,typed` (HIR with types for each node).",
-                     "TYPE"),
+                          "Present the input source, unstable (and less-pretty) variants;
+                           valid types are any of the types for `--pretty`, as well as:
+                           `flowgraph=<nodeid>` (graphviz formatted flowgraph for node),
+                           `everybody_loops` (all function bodies replaced with `loop {}`),
+                           `hir` (the HIR), `hir,identified`, or
+                           `hir,typed` (HIR with types for each node).",
+                          "TYPE"),
 
         // new options here should **not** use the `_ubnr` functions, all new
         // unstable options should use the short variants to indicate that they
@@ -1045,14 +1058,8 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options {
     let treat_err_as_bug = debugging_opts.treat_err_as_bug;
     let continue_parse_after_error = debugging_opts.continue_parse_after_error;
     let mir_opt_level = debugging_opts.mir_opt_level.unwrap_or(1);
-    let incremental_compilation = debugging_opts.incr_comp;
-    let dump_dep_graph = debugging_opts.dump_dep_graph;
     let no_analysis = debugging_opts.no_analysis;
 
-    if debugging_opts.debug_llvm {
-        unsafe { llvm::LLVMSetDebug(1); }
-    }
-
     let mut output_types = HashMap::new();
     if !debugging_opts.parse_only && !no_trans {
         for list in matches.opt_strs("emit") {
@@ -1100,6 +1107,10 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options {
         }
     }
 
+    if cg.codegen_units < 1 {
+        early_error(error_format, "Value for codegen units must be a positive nonzero integer");
+    }
+
     let cg = cg;
 
     let sysroot_opt = matches.opt_str("sysroot").map(|m| PathBuf::from(&m));
@@ -1206,6 +1217,8 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options {
 
     let crate_name = matches.opt_str("crate-name");
 
+    let incremental = debugging_opts.incremental.as_ref().map(|m| PathBuf::from(m));
+
     Options {
         crate_types: crate_types,
         gc: gc,
@@ -1225,8 +1238,7 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options {
         treat_err_as_bug: treat_err_as_bug,
         continue_parse_after_error: continue_parse_after_error,
         mir_opt_level: mir_opt_level,
-        build_dep_graph: incremental_compilation || dump_dep_graph,
-        dump_dep_graph: dump_dep_graph,
+        incremental: incremental,
         no_analysis: no_analysis,
         debugging_opts: debugging_opts,
         prints: prints,
@@ -1257,7 +1269,6 @@ pub fn get_unstable_features_setting() -> UnstableFeatures {
 }
 
 pub fn parse_crate_types_from_list(list_list: Vec<String>) -> Result<Vec<CrateType>, String> {
-
     let mut crate_types: Vec<CrateType> = Vec::new();
     for unparsed_crate_type in &list_list {
         for part in unparsed_crate_type.split(',') {
@@ -1281,6 +1292,72 @@ pub fn parse_crate_types_from_list(list_list: Vec<String>) -> Result<Vec<CrateTy
     return Ok(crate_types);
 }
 
+pub mod nightly_options {
+    use getopts;
+    use syntax::feature_gate::UnstableFeatures;
+    use super::{ErrorOutputType, OptionStability, RustcOptGroup, get_unstable_features_setting};
+    use session::{early_error, early_warn};
+
+    pub fn is_unstable_enabled(matches: &getopts::Matches) -> bool {
+        is_nightly_build() && matches.opt_strs("Z").iter().any(|x| *x == "unstable-options")
+    }
+
+    fn is_nightly_build() -> bool {
+        match get_unstable_features_setting() {
+            UnstableFeatures::Allow | UnstableFeatures::Cheat => true,
+            _ => false,
+        }
+    }
+
+    pub fn check_nightly_options(matches: &getopts::Matches, flags: &[RustcOptGroup]) {
+        let has_z_unstable_option = matches.opt_strs("Z").iter().any(|x| *x == "unstable-options");
+        let really_allows_unstable_options = match get_unstable_features_setting() {
+            UnstableFeatures::Disallow => false,
+            _ => true,
+        };
+
+        for opt in flags.iter() {
+            if opt.stability == OptionStability::Stable {
+                continue
+            }
+            let opt_name = if opt.opt_group.long_name.is_empty() {
+                &opt.opt_group.short_name
+            } else {
+                &opt.opt_group.long_name
+            };
+            if !matches.opt_present(opt_name) {
+                continue
+            }
+            if opt_name != "Z" && !has_z_unstable_option {
+                early_error(ErrorOutputType::default(),
+                            &format!("the `-Z unstable-options` flag must also be passed to enable \
+                                      the flag `{}`",
+                                     opt_name));
+            }
+            if really_allows_unstable_options {
+                continue
+            }
+            match opt.stability {
+                OptionStability::Unstable => {
+                    let msg = format!("the option `{}` is only accepted on the \
+                                       nightly compiler", opt_name);
+                    early_error(ErrorOutputType::default(), &msg);
+                }
+                OptionStability::UnstableButNotReally => {
+                    let msg = format!("the option `{}` is is unstable and should \
+                                       only be used on the nightly compiler, but \
+                                       it is currently accepted for backwards \
+                                       compatibility; this will soon change, \
+                                       see issue #31847 for more details",
+                                      opt_name);
+                    early_warn(ErrorOutputType::default(), &msg);
+                }
+                OptionStability::Stable => {}
+            }
+        }
+    }
+}
+
 impl fmt::Display for CrateType {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match *self {
index 09c6b54d99cf88242a6e5e09c9dc0690a650046b..e54acf3fdc3078f9f004686d0faa4d08b0676863 100644 (file)
@@ -155,14 +155,14 @@ pub fn get_or_default_sysroot() -> PathBuf {
                 // gcc chokes on verbatim paths which fs::canonicalize generates
                 // so we try to avoid those kinds of paths.
                 Ok(canon) => Some(rustcfs::fix_windows_verbatim_for_gcc(&canon)),
-                Err(e) => panic!("failed to get realpath: {}", e),
+                Err(e) => bug!("failed to get realpath: {}", e),
             }
         })
     }
 
     match canonicalize(env::current_exe().ok()) {
         Some(mut p) => { p.pop(); p.pop(); p }
-        None => panic!("can't determine value for sysroot")
+        None => bug!("can't determine value for sysroot")
     }
 }
 
index d2f8d3f09fd7c9fb35e6ceb648ba01b3deb3561d..815e60a8e0342a1a25960cfdf426a1e50b808504 100644 (file)
@@ -12,8 +12,9 @@ use lint;
 use middle::cstore::CrateStore;
 use middle::dependency_format;
 use session::search_paths::PathKind;
+use ty::tls;
 use util::nodemap::{NodeMap, FnvHashMap};
-use mir::transform::MirPass;
+use mir::transform as mir_pass;
 
 use syntax::ast::{NodeId, NodeIdAssigner, Name};
 use syntax::codemap::{Span, MultiSpan};
@@ -24,6 +25,7 @@ use syntax::diagnostics;
 use syntax::feature_gate;
 use syntax::parse;
 use syntax::parse::ParseSess;
+use syntax::parse::token;
 use syntax::{ast, codemap};
 use syntax::feature_gate::AttributeType;
 
@@ -34,6 +36,7 @@ use std::cell::{Cell, RefCell};
 use std::collections::{HashMap, HashSet};
 use std::env;
 use std::rc::Rc;
+use std::fmt;
 
 pub mod config;
 pub mod filesearch;
@@ -60,11 +63,16 @@ pub struct Session {
     pub lint_store: RefCell<lint::LintStore>,
     pub lints: RefCell<NodeMap<Vec<(lint::LintId, Span, String)>>>,
     pub plugin_llvm_passes: RefCell<Vec<String>>,
-    pub plugin_mir_passes: RefCell<Vec<Box<MirPass>>>,
+    pub mir_passes: RefCell<mir_pass::Passes>,
     pub plugin_attributes: RefCell<Vec<(String, AttributeType)>>,
     pub crate_types: RefCell<Vec<config::CrateType>>,
     pub dependency_formats: RefCell<dependency_format::Dependencies>,
-    pub crate_metadata: RefCell<Vec<String>>,
+    // The crate_disambiguator is constructed out of all the `-C metadata`
+    // arguments passed to the compiler. Its value together with the crate-name
+    // forms a unique global identifier for the crate. It is used to allow
+    // multiple crates with the same name to coexist. See the
+    // trans::back::symbol_names module for more information.
+    pub crate_disambiguator: Cell<ast::Name>,
     pub features: RefCell<feature_gate::Features>,
 
     /// The maximum recursion limit for potentially infinitely recursive
@@ -210,22 +218,10 @@ impl Session {
             None => self.warn(msg),
         }
     }
-    pub fn opt_span_bug<S: Into<MultiSpan>>(&self, opt_sp: Option<S>, msg: &str) -> ! {
-        match opt_sp {
-            Some(sp) => self.span_bug(sp, msg),
-            None => self.bug(msg),
-        }
-    }
     /// Delay a span_bug() call until abort_if_errors()
     pub fn delay_span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
         self.diagnostic().delay_span_bug(sp, msg)
     }
-    pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
-        self.diagnostic().span_bug(sp, msg)
-    }
-    pub fn bug(&self, msg: &str) -> ! {
-        self.diagnostic().bug(msg)
-    }
     pub fn note_without_error(&self, msg: &str) {
         self.diagnostic().note_without_error(msg)
     }
@@ -246,7 +242,13 @@ impl Session {
         let lint_id = lint::LintId::of(lint);
         let mut lints = self.lints.borrow_mut();
         match lints.get_mut(&id) {
-            Some(arr) => { arr.push((lint_id, sp, msg)); return; }
+            Some(arr) => {
+                let tuple = (lint_id, sp, msg);
+                if !arr.contains(&tuple) {
+                    arr.push(tuple);
+                }
+                return;
+            }
             None => {}
         }
         lints.insert(id, vec!((lint_id, sp, msg)));
@@ -256,7 +258,7 @@ impl Session {
 
         match id.checked_add(count) {
             Some(next) => self.next_node_id.set(next),
-            None => self.bug("Input too large, ran out of node ids!")
+            None => bug!("Input too large, ran out of node ids!")
         }
 
         id
@@ -267,11 +269,6 @@ impl Session {
     pub fn codemap<'a>(&'a self) -> &'a codemap::CodeMap {
         self.parse_sess.codemap()
     }
-    // This exists to help with refactoring to eliminate impossible
-    // cases later on
-    pub fn impossible_case<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
-        self.span_bug(sp, &format!("impossible case reached: {}", msg));
-    }
     pub fn verbose(&self) -> bool { self.opts.debugging_opts.verbose }
     pub fn time_passes(&self) -> bool { self.opts.debugging_opts.time_passes }
     pub fn count_llvm_insns(&self) -> bool {
@@ -336,6 +333,10 @@ impl NodeIdAssigner for Session {
     fn peek_node_id(&self) -> NodeId {
         self.next_node_id.get().checked_add(1).unwrap()
     }
+
+    fn diagnostic(&self) -> &errors::Handler {
+        self.diagnostic()
+    }
 }
 
 fn split_msg_into_multilines(msg: &str) -> Option<String> {
@@ -350,11 +351,11 @@ fn split_msg_into_multilines(msg: &str) -> Option<String> {
             return None
     }
     let first = msg.match_indices("expected").filter(|s| {
-        s.0 > 0 && (msg.char_at_reverse(s.0) == ' ' ||
-                    msg.char_at_reverse(s.0) == '(')
+        let last = msg[..s.0].chars().rev().next();
+        last == Some(' ') || last == Some('(')
     }).map(|(a, b)| (a - 1, a + b.len()));
     let second = msg.match_indices("found").filter(|s| {
-        msg.char_at_reverse(s.0) == ' '
+        msg[..s.0].chars().rev().next() == Some(' ')
     }).map(|(a, b)| (a - 1, a + b.len()));
 
     let mut new_msg = String::new();
@@ -477,11 +478,11 @@ pub fn build_session_(sopts: config::Options,
         lint_store: RefCell::new(lint::LintStore::new()),
         lints: RefCell::new(NodeMap()),
         plugin_llvm_passes: RefCell::new(Vec::new()),
-        plugin_mir_passes: RefCell::new(Vec::new()),
+        mir_passes: RefCell::new(mir_pass::Passes::new()),
         plugin_attributes: RefCell::new(Vec::new()),
         crate_types: RefCell::new(Vec::new()),
         dependency_formats: RefCell::new(FnvHashMap()),
-        crate_metadata: RefCell::new(Vec::new()),
+        crate_disambiguator: Cell::new(token::intern("")),
         features: RefCell::new(feature_gate::Features::new()),
         recursion_limit: Cell::new(64),
         next_node_id: Cell::new(1),
@@ -525,3 +526,35 @@ pub fn compile_result_from_err_count(err_count: usize) -> CompileResult {
         Err(err_count)
     }
 }
+
+#[cold]
+#[inline(never)]
+pub fn bug_fmt(file: &'static str, line: u32, args: fmt::Arguments) -> ! {
+    // this wrapper mostly exists so I don't have to write a fully
+    // qualified path of None::<Span> inside the bug!() macro defintion
+    opt_span_bug_fmt(file, line, None::<Span>, args);
+}
+
+#[cold]
+#[inline(never)]
+pub fn span_bug_fmt<S: Into<MultiSpan>>(file: &'static str,
+                                        line: u32,
+                                        span: S,
+                                        args: fmt::Arguments) -> ! {
+    opt_span_bug_fmt(file, line, Some(span), args);
+}
+
+fn opt_span_bug_fmt<S: Into<MultiSpan>>(file: &'static str,
+                                          line: u32,
+                                          span: Option<S>,
+                                          args: fmt::Arguments) -> ! {
+    tls::with_opt(move |tcx| {
+        let msg = format!("{}:{}: {}", file, line, args);
+        match (tcx, span) {
+            (Some(tcx), Some(span)) => tcx.sess.diagnostic().span_bug(span, &msg),
+            (Some(tcx), None) => tcx.sess.diagnostic().bug(&msg),
+            (None, _) => panic!(msg)
+        }
+    });
+    unreachable!();
+}
diff --git a/src/librustc/traits/README.md b/src/librustc/traits/README.md
new file mode 100644 (file)
index 0000000..ff72f9d
--- /dev/null
@@ -0,0 +1,470 @@
+# TRAIT RESOLUTION
+
+This document describes the general process and points out some non-obvious
+things.
+
+## Major concepts
+
+Trait resolution is the process of pairing up an impl with each
+reference to a trait. So, for example, if there is a generic function like:
+
+    fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> { ... }
+
+and then a call to that function:
+
+    let v: Vec<isize> = clone_slice([1, 2, 3])
+
+it is the job of trait resolution to figure out (in which case)
+whether there exists an impl of `isize : Clone`
+
+Note that in some cases, like generic functions, we may not be able to
+find a specific impl, but we can figure out that the caller must
+provide an impl. To see what I mean, consider the body of `clone_slice`:
+
+    fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> {
+        let mut v = Vec::new();
+        for e in &x {
+            v.push((*e).clone()); // (*)
+        }
+    }
+
+The line marked `(*)` is only legal if `T` (the type of `*e`)
+implements the `Clone` trait. Naturally, since we don't know what `T`
+is, we can't find the specific impl; but based on the bound `T:Clone`,
+we can say that there exists an impl which the caller must provide.
+
+We use the term *obligation* to refer to a trait reference in need of
+an impl.
+
+## Overview
+
+Trait resolution consists of three major parts:
+
+- SELECTION: Deciding how to resolve a specific obligation. For
+  example, selection might decide that a specific obligation can be
+  resolved by employing an impl which matches the self type, or by
+  using a parameter bound. In the case of an impl, Selecting one
+  obligation can create *nested obligations* because of where clauses
+  on the impl itself. It may also require evaluating those nested
+  obligations to resolve ambiguities.
+
+- FULFILLMENT: The fulfillment code is what tracks that obligations
+  are completely fulfilled. Basically it is a worklist of obligations
+  to be selected: once selection is successful, the obligation is
+  removed from the worklist and any nested obligations are enqueued.
+
+- COHERENCE: The coherence checks are intended to ensure that there
+  are never overlapping impls, where two impls could be used with
+  equal precedence.
+
+## Selection
+
+Selection is the process of deciding whether an obligation can be
+resolved and, if so, how it is to be resolved (via impl, where clause, etc).
+The main interface is the `select()` function, which takes an obligation
+and returns a `SelectionResult`. There are three possible outcomes:
+
+- `Ok(Some(selection))` -- yes, the obligation can be resolved, and
+  `selection` indicates how. If the impl was resolved via an impl,
+  then `selection` may also indicate nested obligations that are required
+  by the impl.
+
+- `Ok(None)` -- we are not yet sure whether the obligation can be
+  resolved or not. This happens most commonly when the obligation
+  contains unbound type variables.
+
+- `Err(err)` -- the obligation definitely cannot be resolved due to a
+  type error, or because there are no impls that could possibly apply,
+  etc.
+
+The basic algorithm for selection is broken into two big phases:
+candidate assembly and confirmation.
+
+### Candidate assembly
+
+Searches for impls/where-clauses/etc that might
+possibly be used to satisfy the obligation. Each of those is called
+a candidate. To avoid ambiguity, we want to find exactly one
+candidate that is definitively applicable. In some cases, we may not
+know whether an impl/where-clause applies or not -- this occurs when
+the obligation contains unbound inference variables.
+
+The basic idea for candidate assembly is to do a first pass in which
+we identify all possible candidates. During this pass, all that we do
+is try and unify the type parameters. (In particular, we ignore any
+nested where clauses.) Presuming that this unification succeeds, the
+impl is added as a candidate.
+
+Once this first pass is done, we can examine the set of candidates. If
+it is a singleton set, then we are done: this is the only impl in
+scope that could possibly apply. Otherwise, we can winnow down the set
+of candidates by using where clauses and other conditions. If this
+reduced set yields a single, unambiguous entry, we're good to go,
+otherwise the result is considered ambiguous.
+
+#### The basic process: Inferring based on the impls we see
+
+This process is easier if we work through some examples. Consider
+the following trait:
+
+```
+trait Convert<Target> {
+    fn convert(&self) -> Target;
+}
+```
+
+This trait just has one method. It's about as simple as it gets. It
+converts from the (implicit) `Self` type to the `Target` type. If we
+wanted to permit conversion between `isize` and `usize`, we might
+implement `Convert` like so:
+
+```rust
+impl Convert<usize> for isize { ... } // isize -> usize
+impl Convert<isize> for usize { ... } // usize -> isize
+```
+
+Now imagine there is some code like the following:
+
+```rust
+let x: isize = ...;
+let y = x.convert();
+```
+
+The call to convert will generate a trait reference `Convert<$Y> for
+isize`, where `$Y` is the type variable representing the type of
+`y`. When we match this against the two impls we can see, we will find
+that only one remains: `Convert<usize> for isize`. Therefore, we can
+select this impl, which will cause the type of `$Y` to be unified to
+`usize`. (Note that while assembling candidates, we do the initial
+unifications in a transaction, so that they don't affect one another.)
+
+There are tests to this effect in src/test/run-pass:
+
+   traits-multidispatch-infer-convert-source-and-target.rs
+   traits-multidispatch-infer-convert-target.rs
+
+#### Winnowing: Resolving ambiguities
+
+But what happens if there are multiple impls where all the types
+unify? Consider this example:
+
+```rust
+trait Get {
+    fn get(&self) -> Self;
+}
+
+impl<T:Copy> Get for T {
+    fn get(&self) -> T { *self }
+}
+
+impl<T:Get> Get for Box<T> {
+    fn get(&self) -> Box<T> { box get_it(&**self) }
+}
+```
+
+What happens when we invoke `get_it(&box 1_u16)`, for example? In this
+case, the `Self` type is `Box<u16>` -- that unifies with both impls,
+because the first applies to all types, and the second to all
+boxes. In the olden days we'd have called this ambiguous. But what we
+do now is do a second *winnowing* pass that considers where clauses
+and attempts to remove candidates -- in this case, the first impl only
+applies if `Box<u16> : Copy`, which doesn't hold. After winnowing,
+then, we are left with just one candidate, so we can proceed. There is
+a test of this in `src/test/run-pass/traits-conditional-dispatch.rs`.
+
+#### Matching
+
+The subroutines that decide whether a particular impl/where-clause/etc
+applies to a particular obligation. At the moment, this amounts to
+unifying the self types, but in the future we may also recursively
+consider some of the nested obligations, in the case of an impl.
+
+#### Lifetimes and selection
+
+Because of how that lifetime inference works, it is not possible to
+give back immediate feedback as to whether a unification or subtype
+relationship between lifetimes holds or not. Therefore, lifetime
+matching is *not* considered during selection. This is reflected in
+the fact that subregion assignment is infallible. This may yield
+lifetime constraints that will later be found to be in error (in
+contrast, the non-lifetime-constraints have already been checked
+during selection and can never cause an error, though naturally they
+may lead to other errors downstream).
+
+#### Where clauses
+
+Besides an impl, the other major way to resolve an obligation is via a
+where clause. The selection process is always given a *parameter
+environment* which contains a list of where clauses, which are
+basically obligations that can assume are satisfiable. We will iterate
+over that list and check whether our current obligation can be found
+in that list, and if so it is considered satisfied. More precisely, we
+want to check whether there is a where-clause obligation that is for
+the same trait (or some subtrait) and for which the self types match,
+using the definition of *matching* given above.
+
+Consider this simple example:
+
+     trait A1 { ... }
+     trait A2 : A1 { ... }
+
+     trait B { ... }
+
+     fn foo<X:A2+B> { ... }
+
+Clearly we can use methods offered by `A1`, `A2`, or `B` within the
+body of `foo`. In each case, that will incur an obligation like `X :
+A1` or `X : A2`. The parameter environment will contain two
+where-clauses, `X : A2` and `X : B`. For each obligation, then, we
+search this list of where-clauses.  To resolve an obligation `X:A1`,
+we would note that `X:A2` implies that `X:A1`.
+
+### Confirmation
+
+Confirmation unifies the output type parameters of the trait with the
+values found in the obligation, possibly yielding a type error.  If we
+return to our example of the `Convert` trait from the previous
+section, confirmation is where an error would be reported, because the
+impl specified that `T` would be `usize`, but the obligation reported
+`char`. Hence the result of selection would be an error.
+
+### Selection during translation
+
+During type checking, we do not store the results of trait selection.
+We simply wish to verify that trait selection will succeed. Then
+later, at trans time, when we have all concrete types available, we
+can repeat the trait selection.  In this case, we do not consider any
+where-clauses to be in scope. We know that therefore each resolution
+will resolve to a particular impl.
+
+One interesting twist has to do with nested obligations. In general, in trans,
+we only need to do a "shallow" selection for an obligation. That is, we wish to
+identify which impl applies, but we do not (yet) need to decide how to select
+any nested obligations. Nonetheless, we *do* currently do a complete resolution,
+and that is because it can sometimes inform the results of type inference. That is,
+we do not have the full substitutions in terms of the type variables of the impl available
+to us, so we must run trait selection to figure everything out.
+
+Here is an example:
+
+    trait Foo { ... }
+    impl<U,T:Bar<U>> Foo for Vec<T> { ... }
+
+    impl Bar<usize> for isize { ... }
+
+After one shallow round of selection for an obligation like `Vec<isize>
+: Foo`, we would know which impl we want, and we would know that
+`T=isize`, but we do not know the type of `U`.  We must select the
+nested obligation `isize : Bar<U>` to find out that `U=usize`.
+
+It would be good to only do *just as much* nested resolution as
+necessary. Currently, though, we just do a full resolution.
+
+# Higher-ranked trait bounds
+
+One of the more subtle concepts at work are *higher-ranked trait
+bounds*. An example of such a bound is `for<'a> MyTrait<&'a isize>`.
+Let's walk through how selection on higher-ranked trait references
+works.
+
+## Basic matching and skolemization leaks
+
+Let's walk through the test `compile-fail/hrtb-just-for-static.rs` to see
+how it works. The test starts with the trait `Foo`:
+
+```rust
+trait Foo<X> {
+    fn foo(&self, x: X) { }
+}
+```
+
+Let's say we have a function `want_hrtb` that wants a type which
+implements `Foo<&'a isize>` for any `'a`:
+
+```rust
+fn want_hrtb<T>() where T : for<'a> Foo<&'a isize> { ... }
+```
+
+Now we have a struct `AnyInt` that implements `Foo<&'a isize>` for any
+`'a`:
+
+```rust
+struct AnyInt;
+impl<'a> Foo<&'a isize> for AnyInt { }
+```
+
+And the question is, does `AnyInt : for<'a> Foo<&'a isize>`? We want the
+answer to be yes. The algorithm for figuring it out is closely related
+to the subtyping for higher-ranked types (which is described in
+`middle::infer::higher_ranked::doc`, but also in a [paper by SPJ] that
+I recommend you read).
+
+1. Skolemize the obligation.
+2. Match the impl against the skolemized obligation.
+3. Check for skolemization leaks.
+
+[paper by SPJ]: http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/
+
+So let's work through our example. The first thing we would do is to
+skolemize the obligation, yielding `AnyInt : Foo<&'0 isize>` (here `'0`
+represents skolemized region #0). Note that now have no quantifiers;
+in terms of the compiler type, this changes from a `ty::PolyTraitRef`
+to a `TraitRef`. We would then create the `TraitRef` from the impl,
+using fresh variables for it's bound regions (and thus getting
+`Foo<&'$a isize>`, where `'$a` is the inference variable for `'a`). Next
+we relate the two trait refs, yielding a graph with the constraint
+that `'0 == '$a`. Finally, we check for skolemization "leaks" -- a
+leak is basically any attempt to relate a skolemized region to another
+skolemized region, or to any region that pre-existed the impl match.
+The leak check is done by searching from the skolemized region to find
+the set of regions that it is related to in any way. This is called
+the "taint" set. To pass the check, that set must consist *solely* of
+itself and region variables from the impl. If the taint set includes
+any other region, then the match is a failure. In this case, the taint
+set for `'0` is `{'0, '$a}`, and hence the check will succeed.
+
+Let's consider a failure case. Imagine we also have a struct
+
+```rust
+struct StaticInt;
+impl Foo<&'static isize> for StaticInt;
+```
+
+We want the obligation `StaticInt : for<'a> Foo<&'a isize>` to be
+considered unsatisfied. The check begins just as before. `'a` is
+skolemized to `'0` and the impl trait reference is instantiated to
+`Foo<&'static isize>`. When we relate those two, we get a constraint
+like `'static == '0`. This means that the taint set for `'0` is `{'0,
+'static}`, which fails the leak check.
+
+## Higher-ranked trait obligations
+
+Once the basic matching is done, we get to another interesting topic:
+how to deal with impl obligations. I'll work through a simple example
+here. Imagine we have the traits `Foo` and `Bar` and an associated impl:
+
+```
+trait Foo<X> {
+    fn foo(&self, x: X) { }
+}
+
+trait Bar<X> {
+    fn bar(&self, x: X) { }
+}
+
+impl<X,F> Foo<X> for F
+    where F : Bar<X>
+{
+}
+```
+
+Now let's say we have a obligation `for<'a> Foo<&'a isize>` and we match
+this impl. What obligation is generated as a result? We want to get
+`for<'a> Bar<&'a isize>`, but how does that happen?
+
+After the matching, we are in a position where we have a skolemized
+substitution like `X => &'0 isize`. If we apply this substitution to the
+impl obligations, we get `F : Bar<&'0 isize>`. Obviously this is not
+directly usable because the skolemized region `'0` cannot leak out of
+our computation.
+
+What we do is to create an inverse mapping from the taint set of `'0`
+back to the original bound region (`'a`, here) that `'0` resulted
+from. (This is done in `higher_ranked::plug_leaks`). We know that the
+leak check passed, so this taint set consists solely of the skolemized
+region itself plus various intermediate region variables. We then walk
+the trait-reference and convert every region in that taint set back to
+a late-bound region, so in this case we'd wind up with `for<'a> F :
+Bar<&'a isize>`.
+
+# Caching and subtle considerations therewith
+
+In general we attempt to cache the results of trait selection.  This
+is a somewhat complex process. Part of the reason for this is that we
+want to be able to cache results even when all the types in the trait
+reference are not fully known. In that case, it may happen that the
+trait selection process is also influencing type variables, so we have
+to be able to not only cache the *result* of the selection process,
+but *replay* its effects on the type variables.
+
+## An example
+
+The high-level idea of how the cache works is that we first replace
+all unbound inference variables with skolemized versions. Therefore,
+if we had a trait reference `usize : Foo<$1>`, where `$n` is an unbound
+inference variable, we might replace it with `usize : Foo<%0>`, where
+`%n` is a skolemized type. We would then look this up in the cache.
+If we found a hit, the hit would tell us the immediate next step to
+take in the selection process: i.e., apply impl #22, or apply where
+clause `X : Foo<Y>`. Let's say in this case there is no hit.
+Therefore, we search through impls and where clauses and so forth, and
+we come to the conclusion that the only possible impl is this one,
+with def-id 22:
+
+    impl Foo<isize> for usize { ... } // Impl #22
+
+We would then record in the cache `usize : Foo<%0> ==>
+ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which
+would (as a side-effect) unify `$1` with `isize`.
+
+Now, at some later time, we might come along and see a `usize :
+Foo<$3>`.  When skolemized, this would yield `usize : Foo<%0>`, just as
+before, and hence the cache lookup would succeed, yielding
+`ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would
+(as a side-effect) unify `$3` with `isize`.
+
+## Where clauses and the local vs global cache
+
+One subtle interaction is that the results of trait lookup will vary
+depending on what where clauses are in scope. Therefore, we actually
+have *two* caches, a local and a global cache. The local cache is
+attached to the `ParameterEnvironment` and the global cache attached
+to the `tcx`. We use the local cache whenever the result might depend
+on the where clauses that are in scope. The determination of which
+cache to use is done by the method `pick_candidate_cache` in
+`select.rs`. At the moment, we use a very simple, conservative rule:
+if there are any where-clauses in scope, then we use the local cache.
+We used to try and draw finer-grained distinctions, but that led to a
+serious of annoying and weird bugs like #22019 and #18290. This simple
+rule seems to be pretty clearly safe and also still retains a very
+high hit rate (~95% when compiling rustc).
+
+# Specialization
+
+Defined in the `specialize` module.
+
+The basic strategy is to build up a *specialization graph* during
+coherence checking. Insertion into the graph locates the right place
+to put an impl in the specialization hierarchy; if there is no right
+place (due to partial overlap but no containment), you get an overlap
+error. Specialization is consulted when selecting an impl (of course),
+and the graph is consulted when propagating defaults down the
+specialization hierarchy.
+
+You might expect that the specialization graph would be used during
+selection -- i.e., when actually performing specialization. This is
+not done for two reasons:
+
+- It's merely an optimization: given a set of candidates that apply,
+  we can determine the most specialized one by comparing them directly
+  for specialization, rather than consulting the graph. Given that we
+  also cache the results of selection, the benefit of this
+  optimization is questionable.
+
+- To build the specialization graph in the first place, we need to use
+  selection (because we need to determine whether one impl specializes
+  another). Dealing with this reentrancy would require some additional
+  mode switch for selection. Given that there seems to be no strong
+  reason to use the graph anyway, we stick with a simpler approach in
+  selection, and use the graph only for propagating default
+  implementations.
+
+Trait impl selection can succeed even when multiple impls can apply,
+as long as they are part of the same specialization family. In that
+case, it returns a *single* impl on success -- this is the most
+specialized impl *known* to apply. However, if there are any inference
+variables in play, the returned impl may not be the actual impl we
+will use at trans time. Thus, we take special care to avoid projecting
+associated types unless either (1) the associated type does not use
+`default` and thus cannot be overridden or (2) all input types are
+known concretely.
diff --git a/src/librustc/traits/coherence.rs b/src/librustc/traits/coherence.rs
new file mode 100644 (file)
index 0000000..6710d2a
--- /dev/null
@@ -0,0 +1,297 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! See `README.md` for high-level documentation
+
+use super::{SelectionContext, Obligation, ObligationCause};
+
+use middle::cstore::LOCAL_CRATE;
+use hir::def_id::DefId;
+use ty::subst::TypeSpace;
+use ty::{self, Ty, TyCtxt};
+use infer::{self, InferCtxt, TypeOrigin};
+use syntax::codemap::DUMMY_SP;
+
+#[derive(Copy, Clone)]
+struct InferIsLocal(bool);
+
+/// If there are types that satisfy both impls, returns a suitably-freshened
+/// `ImplHeader` with those types substituted
+pub fn overlapping_impls<'cx, 'tcx>(infcx: &InferCtxt<'cx, 'tcx>,
+                                    impl1_def_id: DefId,
+                                    impl2_def_id: DefId)
+                                    -> Option<ty::ImplHeader<'tcx>>
+{
+    debug!("impl_can_satisfy(\
+           impl1_def_id={:?}, \
+           impl2_def_id={:?})",
+           impl1_def_id,
+           impl2_def_id);
+
+    let selcx = &mut SelectionContext::intercrate(infcx);
+    overlap(selcx, impl1_def_id, impl2_def_id)
+}
+
+/// Can both impl `a` and impl `b` be satisfied by a common type (including
+/// `where` clauses)? If so, returns an `ImplHeader` that unifies the two impls.
+fn overlap<'cx, 'tcx>(selcx: &mut SelectionContext<'cx, 'tcx>,
+                      a_def_id: DefId,
+                      b_def_id: DefId)
+                      -> Option<ty::ImplHeader<'tcx>>
+{
+    debug!("overlap(a_def_id={:?}, b_def_id={:?})",
+           a_def_id,
+           b_def_id);
+
+    let a_impl_header = ty::ImplHeader::with_fresh_ty_vars(selcx, a_def_id);
+    let b_impl_header = ty::ImplHeader::with_fresh_ty_vars(selcx, b_def_id);
+
+    debug!("overlap: a_impl_header={:?}", a_impl_header);
+    debug!("overlap: b_impl_header={:?}", b_impl_header);
+
+    // Do `a` and `b` unify? If not, no overlap.
+    if let Err(_) = infer::mk_eq_impl_headers(selcx.infcx(),
+                                              true,
+                                              TypeOrigin::Misc(DUMMY_SP),
+                                              &a_impl_header,
+                                              &b_impl_header) {
+        return None;
+    }
+
+    debug!("overlap: unification check succeeded");
+
+    // Are any of the obligations unsatisfiable? If so, no overlap.
+    let infcx = selcx.infcx();
+    let opt_failing_obligation =
+        a_impl_header.predicates
+                     .iter()
+                     .chain(&b_impl_header.predicates)
+                     .map(|p| infcx.resolve_type_vars_if_possible(p))
+                     .map(|p| Obligation { cause: ObligationCause::dummy(),
+                                           recursion_depth: 0,
+                                           predicate: p })
+                     .find(|o| !selcx.evaluate_obligation(o));
+
+    if let Some(failing_obligation) = opt_failing_obligation {
+        debug!("overlap: obligation unsatisfiable {:?}", failing_obligation);
+        return None
+    }
+
+    Some(selcx.infcx().resolve_type_vars_if_possible(&a_impl_header))
+}
+
+pub fn trait_ref_is_knowable<'tcx>(tcx: &TyCtxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool
+{
+    debug!("trait_ref_is_knowable(trait_ref={:?})", trait_ref);
+
+    // if the orphan rules pass, that means that no ancestor crate can
+    // impl this, so it's up to us.
+    if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() {
+        debug!("trait_ref_is_knowable: orphan check passed");
+        return true;
+    }
+
+    // if the trait is not marked fundamental, then it's always possible that
+    // an ancestor crate will impl this in the future, if they haven't
+    // already
+    if
+        trait_ref.def_id.krate != LOCAL_CRATE &&
+        !tcx.has_attr(trait_ref.def_id, "fundamental")
+    {
+        debug!("trait_ref_is_knowable: trait is neither local nor fundamental");
+        return false;
+    }
+
+    // find out when some downstream (or cousin) crate could impl this
+    // trait-ref, presuming that all the parameters were instantiated
+    // with downstream types. If not, then it could only be
+    // implemented by an upstream crate, which means that the impl
+    // must be visible to us, and -- since the trait is fundamental
+    // -- we can test.
+    orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err()
+}
+
+pub enum OrphanCheckErr<'tcx> {
+    NoLocalInputType,
+    UncoveredTy(Ty<'tcx>),
+}
+
+/// Checks the coherence orphan rules. `impl_def_id` should be the
+/// def-id of a trait impl. To pass, either the trait must be local, or else
+/// two conditions must be satisfied:
+///
+/// 1. All type parameters in `Self` must be "covered" by some local type constructor.
+/// 2. Some local type must appear in `Self`.
+pub fn orphan_check<'tcx>(tcx: &TyCtxt<'tcx>,
+                          impl_def_id: DefId)
+                          -> Result<(), OrphanCheckErr<'tcx>>
+{
+    debug!("orphan_check({:?})", impl_def_id);
+
+    // We only except this routine to be invoked on implementations
+    // of a trait, not inherent implementations.
+    let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
+    debug!("orphan_check: trait_ref={:?}", trait_ref);
+
+    // If the *trait* is local to the crate, ok.
+    if trait_ref.def_id.is_local() {
+        debug!("trait {:?} is local to current crate",
+               trait_ref.def_id);
+        return Ok(());
+    }
+
+    orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false))
+}
+
+fn orphan_check_trait_ref<'tcx>(tcx: &TyCtxt<'tcx>,
+                                trait_ref: &ty::TraitRef<'tcx>,
+                                infer_is_local: InferIsLocal)
+                                -> Result<(), OrphanCheckErr<'tcx>>
+{
+    debug!("orphan_check_trait_ref(trait_ref={:?}, infer_is_local={})",
+           trait_ref, infer_is_local.0);
+
+    // First, create an ordered iterator over all the type parameters to the trait, with the self
+    // type appearing first.
+    let input_tys = Some(trait_ref.self_ty());
+    let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace));
+
+    // Find the first input type that either references a type parameter OR
+    // some local type.
+    for input_ty in input_tys {
+        if ty_is_local(tcx, input_ty, infer_is_local) {
+            debug!("orphan_check_trait_ref: ty_is_local `{:?}`", input_ty);
+
+            // First local input type. Check that there are no
+            // uncovered type parameters.
+            let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local);
+            for uncovered_ty in uncovered_tys {
+                if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) {
+                    debug!("orphan_check_trait_ref: uncovered type `{:?}`", param);
+                    return Err(OrphanCheckErr::UncoveredTy(param));
+                }
+            }
+
+            // OK, found local type, all prior types upheld invariant.
+            return Ok(());
+        }
+
+        // Otherwise, enforce invariant that there are no type
+        // parameters reachable.
+        if !infer_is_local.0 {
+            if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) {
+                debug!("orphan_check_trait_ref: uncovered type `{:?}`", param);
+                return Err(OrphanCheckErr::UncoveredTy(param));
+            }
+        }
+    }
+
+    // If we exit above loop, never found a local type.
+    debug!("orphan_check_trait_ref: no local type");
+    return Err(OrphanCheckErr::NoLocalInputType);
+}
+
+fn uncovered_tys<'tcx>(tcx: &TyCtxt<'tcx>,
+                       ty: Ty<'tcx>,
+                       infer_is_local: InferIsLocal)
+                       -> Vec<Ty<'tcx>>
+{
+    if ty_is_local_constructor(tcx, ty, infer_is_local) {
+        vec![]
+    } else if fundamental_ty(tcx, ty) {
+        ty.walk_shallow()
+          .flat_map(|t| uncovered_tys(tcx, t, infer_is_local))
+          .collect()
+    } else {
+        vec![ty]
+    }
+}
+
+fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool {
+    match ty.sty {
+        // FIXME(#20590) straighten story about projection types
+        ty::TyProjection(..) | ty::TyParam(..) => true,
+        _ => false,
+    }
+}
+
+fn ty_is_local<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool
+{
+    ty_is_local_constructor(tcx, ty, infer_is_local) ||
+        fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local))
+}
+
+fn fundamental_ty<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool
+{
+    match ty.sty {
+        ty::TyBox(..) | ty::TyRef(..) =>
+            true,
+        ty::TyEnum(def, _) | ty::TyStruct(def, _) =>
+            def.is_fundamental(),
+        ty::TyTrait(ref data) =>
+            tcx.has_attr(data.principal_def_id(), "fundamental"),
+        _ =>
+            false
+    }
+}
+
+fn ty_is_local_constructor<'tcx>(tcx: &TyCtxt<'tcx>,
+                                 ty: Ty<'tcx>,
+                                 infer_is_local: InferIsLocal)
+                                 -> bool
+{
+    debug!("ty_is_local_constructor({:?})", ty);
+
+    match ty.sty {
+        ty::TyBool |
+        ty::TyChar |
+        ty::TyInt(..) |
+        ty::TyUint(..) |
+        ty::TyFloat(..) |
+        ty::TyStr |
+        ty::TyFnDef(..) |
+        ty::TyFnPtr(_) |
+        ty::TyArray(..) |
+        ty::TySlice(..) |
+        ty::TyRawPtr(..) |
+        ty::TyRef(..) |
+        ty::TyTuple(..) |
+        ty::TyParam(..) |
+        ty::TyProjection(..) => {
+            false
+        }
+
+        ty::TyInfer(..) => {
+            infer_is_local.0
+        }
+
+        ty::TyEnum(def, _) |
+        ty::TyStruct(def, _) => {
+            def.did.is_local()
+        }
+
+        ty::TyBox(_) => { // Box<T>
+            let krate = tcx.lang_items.owned_box().map(|d| d.krate);
+            krate == Some(LOCAL_CRATE)
+        }
+
+        ty::TyTrait(ref tt) => {
+            tt.principal_def_id().is_local()
+        }
+
+        ty::TyError => {
+            true
+        }
+
+        ty::TyClosure(..) => {
+            bug!("ty_is_local invoked on unexpected type: {:?}", ty)
+        }
+    }
+}
diff --git a/src/librustc/traits/error_reporting.rs b/src/librustc/traits/error_reporting.rs
new file mode 100644 (file)
index 0000000..286733c
--- /dev/null
@@ -0,0 +1,886 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::{
+    FulfillmentError,
+    FulfillmentErrorCode,
+    MismatchedProjectionTypes,
+    Obligation,
+    ObligationCause,
+    ObligationCauseCode,
+    OutputTypeParameterMismatch,
+    TraitNotObjectSafe,
+    PredicateObligation,
+    SelectionContext,
+    SelectionError,
+    ObjectSafetyViolation,
+    MethodViolationCode,
+    object_safety_violations,
+};
+
+use fmt_macros::{Parser, Piece, Position};
+use hir::def_id::DefId;
+use infer::InferCtxt;
+use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt};
+use ty::fast_reject;
+use ty::fold::{TypeFoldable, TypeFolder};
+use util::nodemap::{FnvHashMap, FnvHashSet};
+
+use std::cmp;
+use std::fmt;
+use syntax::attr::{AttributeMethods, AttrMetaMethods};
+use syntax::codemap::Span;
+use syntax::errors::DiagnosticBuilder;
+
+#[derive(Debug, PartialEq, Eq, Hash)]
+pub struct TraitErrorKey<'tcx> {
+    span: Span,
+    predicate: ty::Predicate<'tcx>
+}
+
+impl<'tcx> TraitErrorKey<'tcx> {
+    fn from_error<'a>(infcx: &InferCtxt<'a, 'tcx>,
+                      e: &FulfillmentError<'tcx>) -> Self {
+        let predicate =
+            infcx.resolve_type_vars_if_possible(&e.obligation.predicate);
+        TraitErrorKey {
+            span: e.obligation.cause.span,
+            predicate: infcx.tcx.erase_regions(&predicate)
+        }
+    }
+}
+
+pub fn report_fulfillment_errors<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                           errors: &Vec<FulfillmentError<'tcx>>) {
+    for error in errors {
+        report_fulfillment_error(infcx, error);
+    }
+}
+
+fn report_fulfillment_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                      error: &FulfillmentError<'tcx>) {
+    let error_key = TraitErrorKey::from_error(infcx, error);
+    debug!("report_fulfillment_errors({:?}) - key={:?}",
+           error, error_key);
+    if !infcx.reported_trait_errors.borrow_mut().insert(error_key) {
+        debug!("report_fulfillment_errors: skipping duplicate");
+        return;
+    }
+    match error.code {
+        FulfillmentErrorCode::CodeSelectionError(ref e) => {
+            report_selection_error(infcx, &error.obligation, e);
+        }
+        FulfillmentErrorCode::CodeProjectionError(ref e) => {
+            report_projection_error(infcx, &error.obligation, e);
+        }
+        FulfillmentErrorCode::CodeAmbiguity => {
+            maybe_report_ambiguity(infcx, &error.obligation);
+        }
+    }
+}
+
+pub fn report_projection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                         obligation: &PredicateObligation<'tcx>,
+                                         error: &MismatchedProjectionTypes<'tcx>)
+{
+    let predicate =
+        infcx.resolve_type_vars_if_possible(&obligation.predicate);
+
+    if !predicate.references_error() {
+        let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0271,
+            "type mismatch resolving `{}`: {}",
+            predicate,
+            error.err);
+        note_obligation_cause(infcx, &mut err, obligation);
+        err.emit();
+    }
+}
+
+fn on_unimplemented_note<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                   trait_ref: ty::PolyTraitRef<'tcx>,
+                                   span: Span) -> Option<String> {
+    let trait_ref = trait_ref.skip_binder();
+    let def_id = trait_ref.def_id;
+    let mut report = None;
+    for item in infcx.tcx.get_attrs(def_id).iter() {
+        if item.check_name("rustc_on_unimplemented") {
+            let err_sp = item.meta().span.substitute_dummy(span);
+            let def = infcx.tcx.lookup_trait_def(def_id);
+            let trait_str = def.trait_ref.to_string();
+            if let Some(ref istring) = item.value_str() {
+                let mut generic_map = def.generics.types.iter_enumerated()
+                                         .map(|(param, i, gen)| {
+                                               (gen.name.as_str().to_string(),
+                                                trait_ref.substs.types.get(param, i)
+                                                         .to_string())
+                                              }).collect::<FnvHashMap<String, String>>();
+                generic_map.insert("Self".to_string(),
+                                   trait_ref.self_ty().to_string());
+                let parser = Parser::new(&istring);
+                let mut errored = false;
+                let err: String = parser.filter_map(|p| {
+                    match p {
+                        Piece::String(s) => Some(s),
+                        Piece::NextArgument(a) => match a.position {
+                            Position::ArgumentNamed(s) => match generic_map.get(s) {
+                                Some(val) => Some(val),
+                                None => {
+                                    span_err!(infcx.tcx.sess, err_sp, E0272,
+                                                   "the #[rustc_on_unimplemented] \
+                                                            attribute on \
+                                                            trait definition for {} refers to \
+                                                            non-existent type parameter {}",
+                                                           trait_str, s);
+                                    errored = true;
+                                    None
+                                }
+                            },
+                            _ => {
+                                     span_err!(infcx.tcx.sess, err_sp, E0273,
+                                               "the #[rustc_on_unimplemented] \
+                                                        attribute on \
+                                                        trait definition for {} must have named \
+                                                        format arguments, \
+                                                        eg `#[rustc_on_unimplemented = \
+                                                        \"foo {{T}}\"]`",
+                                                       trait_str);
+                                errored = true;
+                                None
+                            }
+                        }
+                    }
+                }).collect();
+                // Report only if the format string checks out
+                if !errored {
+                    report = Some(err);
+                }
+            } else {
+                span_err!(infcx.tcx.sess, err_sp, E0274,
+                                        "the #[rustc_on_unimplemented] attribute on \
+                                                 trait definition for {} must have a value, \
+                                                 eg `#[rustc_on_unimplemented = \"foo\"]`",
+                                                 trait_str);
+            }
+            break;
+        }
+    }
+    report
+}
+
+fn find_similar_impl_candidates<'a, 'tcx>(
+    infcx: &InferCtxt<'a, 'tcx>,
+    trait_ref: ty::PolyTraitRef<'tcx>)
+    -> Vec<ty::TraitRef<'tcx>>
+{
+    let simp = fast_reject::simplify_type(infcx.tcx,
+                                          trait_ref.skip_binder().self_ty(),
+                                          true);
+    let mut impl_candidates = Vec::new();
+    let trait_def = infcx.tcx.lookup_trait_def(trait_ref.def_id());
+
+    match simp {
+        Some(simp) => trait_def.for_each_impl(infcx.tcx, |def_id| {
+            let imp = infcx.tcx.impl_trait_ref(def_id).unwrap();
+            let imp_simp = fast_reject::simplify_type(infcx.tcx,
+                                                      imp.self_ty(),
+                                                      true);
+            if let Some(imp_simp) = imp_simp {
+                if simp != imp_simp {
+                    return;
+                }
+            }
+            impl_candidates.push(imp);
+        }),
+        None => trait_def.for_each_impl(infcx.tcx, |def_id| {
+            impl_candidates.push(
+                infcx.tcx.impl_trait_ref(def_id).unwrap());
+        })
+    };
+    impl_candidates
+}
+
+fn report_similar_impl_candidates(span: Span,
+                                  err: &mut DiagnosticBuilder,
+                                  impl_candidates: &[ty::TraitRef])
+{
+    err.fileline_help(span, &format!("the following implementations were found:"));
+
+    let end = cmp::min(4, impl_candidates.len());
+    for candidate in &impl_candidates[0..end] {
+        err.fileline_help(span, &format!("  {:?}", candidate));
+    }
+    if impl_candidates.len() > 4 {
+        err.fileline_help(span, &format!("and {} others", impl_candidates.len()-4));
+    }
+}
+
+/// Reports that an overflow has occurred and halts compilation. We
+/// halt compilation unconditionally because it is important that
+/// overflows never be masked -- they basically represent computations
+/// whose result could not be truly determined and thus we can't say
+/// if the program type checks or not -- and they are unusual
+/// occurrences in any case.
+pub fn report_overflow_error<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>,
+                                          obligation: &Obligation<'tcx, T>,
+                                          suggest_increasing_limit: bool)
+                                          -> !
+    where T: fmt::Display + TypeFoldable<'tcx>
+{
+    let predicate =
+        infcx.resolve_type_vars_if_possible(&obligation.predicate);
+    let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0275,
+                                   "overflow evaluating the requirement `{}`",
+                                   predicate);
+
+    if suggest_increasing_limit {
+        suggest_new_overflow_limit(infcx.tcx, &mut err, obligation.cause.span);
+    }
+
+    note_obligation_cause(infcx, &mut err, obligation);
+
+    err.emit();
+    infcx.tcx.sess.abort_if_errors();
+    bug!();
+}
+
+/// Reports that a cycle was detected which led to overflow and halts
+/// compilation. This is equivalent to `report_overflow_error` except
+/// that we can give a more helpful error message (and, in particular,
+/// we do not suggest increasing the overflow limit, which is not
+/// going to help).
+pub fn report_overflow_error_cycle<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                             cycle: &Vec<PredicateObligation<'tcx>>)
+                                             -> !
+{
+    assert!(cycle.len() > 1);
+
+    debug!("report_overflow_error_cycle(cycle length = {})", cycle.len());
+
+    let cycle = infcx.resolve_type_vars_if_possible(cycle);
+
+    debug!("report_overflow_error_cycle: cycle={:?}", cycle);
+
+    assert_eq!(&cycle[0].predicate, &cycle.last().unwrap().predicate);
+
+    try_report_overflow_error_type_of_infinite_size(infcx, &cycle);
+    report_overflow_error(infcx, &cycle[0], false);
+}
+
+/// If a cycle results from evaluated whether something is Sized, that
+/// is a particular special case that always results from a struct or
+/// enum definition that lacks indirection (e.g., `struct Foo { x: Foo
+/// }`). We wish to report a targeted error for this case.
+pub fn try_report_overflow_error_type_of_infinite_size<'a, 'tcx>(
+    infcx: &InferCtxt<'a, 'tcx>,
+    cycle: &[PredicateObligation<'tcx>])
+{
+    let sized_trait = match infcx.tcx.lang_items.sized_trait() {
+        Some(v) => v,
+        None => return,
+    };
+    let top_is_sized = {
+        match cycle[0].predicate {
+            ty::Predicate::Trait(ref data) => data.def_id() == sized_trait,
+            _ => false,
+        }
+    };
+    if !top_is_sized {
+        return;
+    }
+
+    // The only way to have a type of infinite size is to have,
+    // somewhere, a struct/enum type involved. Identify all such types
+    // and report the cycle to the user.
+
+    let struct_enum_tys: Vec<_> =
+        cycle.iter()
+             .flat_map(|obligation| match obligation.predicate {
+                 ty::Predicate::Trait(ref data) => {
+                     assert_eq!(data.def_id(), sized_trait);
+                     let self_ty = data.skip_binder().trait_ref.self_ty(); // (*)
+                     // (*) ok to skip binder because this is just
+                     // error reporting and regions don't really
+                     // matter
+                     match self_ty.sty {
+                         ty::TyEnum(..) | ty::TyStruct(..) => Some(self_ty),
+                         _ => None,
+                     }
+                 }
+                 _ => {
+                     span_bug!(obligation.cause.span,
+                               "Sized cycle involving non-trait-ref: {:?}",
+                               obligation.predicate);
+                 }
+             })
+             .collect();
+
+    assert!(!struct_enum_tys.is_empty());
+
+    // This is a bit tricky. We want to pick a "main type" in the
+    // listing that is local to the current crate, so we can give a
+    // good span to the user. But it might not be the first one in our
+    // cycle list. So find the first one that is local and then
+    // rotate.
+    let (main_index, main_def_id) =
+        struct_enum_tys.iter()
+                       .enumerate()
+                       .filter_map(|(index, ty)| match ty.sty {
+                           ty::TyEnum(adt_def, _) | ty::TyStruct(adt_def, _)
+                               if adt_def.did.is_local() =>
+                               Some((index, adt_def.did)),
+                           _ =>
+                               None,
+                       })
+                       .next()
+                       .unwrap(); // should always be SOME local type involved!
+
+    // Rotate so that the "main" type is at index 0.
+    let struct_enum_tys: Vec<_> =
+        struct_enum_tys.iter()
+                       .cloned()
+                       .skip(main_index)
+                       .chain(struct_enum_tys.iter().cloned().take(main_index))
+                       .collect();
+
+    let tcx = infcx.tcx;
+    let mut err = recursive_type_with_infinite_size_error(tcx, main_def_id);
+    let len = struct_enum_tys.len();
+    if len > 2 {
+        let span = tcx.map.span_if_local(main_def_id).unwrap();
+        err.fileline_note(span,
+                          &format!("type `{}` is embedded within `{}`...",
+                                   struct_enum_tys[0],
+                                   struct_enum_tys[1]));
+        for &next_ty in &struct_enum_tys[1..len-1] {
+            err.fileline_note(span,
+                              &format!("...which in turn is embedded within `{}`...", next_ty));
+        }
+        err.fileline_note(span,
+                          &format!("...which in turn is embedded within `{}`, \
+                                    completing the cycle.",
+                                   struct_enum_tys[len-1]));
+    }
+    err.emit();
+    infcx.tcx.sess.abort_if_errors();
+    bug!();
+}
+
+pub fn recursive_type_with_infinite_size_error<'tcx>(tcx: &TyCtxt<'tcx>,
+                                                     type_def_id: DefId)
+                                                     -> DiagnosticBuilder<'tcx>
+{
+    assert!(type_def_id.is_local());
+    let span = tcx.map.span_if_local(type_def_id).unwrap();
+    let mut err = struct_span_err!(tcx.sess, span, E0072, "recursive type `{}` has infinite size",
+                                   tcx.item_path_str(type_def_id));
+    err.fileline_help(span, &format!("insert indirection (e.g., a `Box`, `Rc`, or `&`) \
+                                      at some point to make `{}` representable",
+                                     tcx.item_path_str(type_def_id)));
+    err
+}
+
+pub fn report_selection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                        obligation: &PredicateObligation<'tcx>,
+                                        error: &SelectionError<'tcx>)
+{
+    match *error {
+        SelectionError::Unimplemented => {
+            if let ObligationCauseCode::CompareImplMethodObligation = obligation.cause.code {
+                span_err!(
+                    infcx.tcx.sess, obligation.cause.span, E0276,
+                    "the requirement `{}` appears on the impl \
+                     method but not on the corresponding trait method",
+                    obligation.predicate);
+            } else {
+                match obligation.predicate {
+                    ty::Predicate::Trait(ref trait_predicate) => {
+                        let trait_predicate =
+                            infcx.resolve_type_vars_if_possible(trait_predicate);
+
+                        if !infcx.tcx.sess.has_errors() || !trait_predicate.references_error() {
+                            let trait_ref = trait_predicate.to_poly_trait_ref();
+                            let mut err = struct_span_err!(
+                                infcx.tcx.sess, obligation.cause.span, E0277,
+                                "the trait bound `{}` is not satisfied",
+                                trait_ref.to_predicate());
+
+                            // Try to report a help message
+
+                            if !trait_ref.has_infer_types() &&
+                                predicate_can_apply(infcx, trait_ref)
+                            {
+                                // If a where-clause may be useful, remind the
+                                // user that they can add it.
+                                //
+                                // don't display an on-unimplemented note, as
+                                // these notes will often be of the form
+                                //     "the type `T` can't be frobnicated"
+                                // which is somewhat confusing.
+                                err.fileline_help(obligation.cause.span, &format!(
+                                    "consider adding a `where {}` bound",
+                                    trait_ref.to_predicate()
+                                    ));
+                            } else if let Some(s) = on_unimplemented_note(infcx, trait_ref,
+                                                                          obligation.cause.span) {
+                                // Otherwise, if there is an on-unimplemented note,
+                                // display it.
+                                err.fileline_note(obligation.cause.span, &s);
+                            } else {
+                                // If we can't show anything useful, try to find
+                                // similar impls.
+
+                                let impl_candidates =
+                                    find_similar_impl_candidates(infcx, trait_ref);
+                                if impl_candidates.len() > 0 {
+                                    report_similar_impl_candidates(obligation.cause.span,
+                                                                   &mut err, &impl_candidates);
+                                }
+                            }
+                            note_obligation_cause(infcx, &mut err, obligation);
+                            err.emit();
+                        }
+                    },
+                    ty::Predicate::Equate(ref predicate) => {
+                        let predicate = infcx.resolve_type_vars_if_possible(predicate);
+                        let err = infcx.equality_predicate(obligation.cause.span,
+                                                           &predicate).err().unwrap();
+                        let mut err = struct_span_err!(
+                            infcx.tcx.sess, obligation.cause.span, E0278,
+                            "the requirement `{}` is not satisfied (`{}`)",
+                            predicate,
+                            err);
+                        note_obligation_cause(infcx, &mut err, obligation);
+                        err.emit();
+                    }
+
+                    ty::Predicate::RegionOutlives(ref predicate) => {
+                        let predicate = infcx.resolve_type_vars_if_possible(predicate);
+                        let err = infcx.region_outlives_predicate(obligation.cause.span,
+                                                                  &predicate).err().unwrap();
+                        let mut err = struct_span_err!(
+                            infcx.tcx.sess, obligation.cause.span, E0279,
+                            "the requirement `{}` is not satisfied (`{}`)",
+                            predicate,
+                            err);
+                        note_obligation_cause(infcx, &mut err, obligation);
+                        err.emit();
+                    }
+
+                    ty::Predicate::Projection(..) | ty::Predicate::TypeOutlives(..) => {
+                        let predicate =
+                            infcx.resolve_type_vars_if_possible(&obligation.predicate);
+                        let mut err = struct_span_err!(
+                            infcx.tcx.sess, obligation.cause.span, E0280,
+                            "the requirement `{}` is not satisfied",
+                            predicate);
+                        note_obligation_cause(infcx, &mut err, obligation);
+                        err.emit();
+                    }
+
+                    ty::Predicate::ObjectSafe(trait_def_id) => {
+                        let violations = object_safety_violations(
+                            infcx.tcx, trait_def_id);
+                        let mut err = report_object_safety_error(infcx.tcx,
+                                                                 obligation.cause.span,
+                                                                 trait_def_id,
+                                                                 violations);
+                        note_obligation_cause(infcx, &mut err, obligation);
+                        err.emit();
+                    }
+
+                    ty::Predicate::WellFormed(ty) => {
+                        // WF predicates cannot themselves make
+                        // errors. They can only block due to
+                        // ambiguity; otherwise, they always
+                        // degenerate into other obligations
+                        // (which may fail).
+                        span_bug!(
+                            obligation.cause.span,
+                            "WF predicate not satisfied for {:?}",
+                            ty);
+                    }
+                }
+            }
+        }
+
+        OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
+            let expected_trait_ref = infcx.resolve_type_vars_if_possible(&*expected_trait_ref);
+            let actual_trait_ref = infcx.resolve_type_vars_if_possible(&*actual_trait_ref);
+            if !actual_trait_ref.self_ty().references_error() {
+                let mut err = struct_span_err!(
+                    infcx.tcx.sess, obligation.cause.span, E0281,
+                    "type mismatch: the type `{}` implements the trait `{}`, \
+                     but the trait `{}` is required ({})",
+                    expected_trait_ref.self_ty(),
+                    expected_trait_ref,
+                    actual_trait_ref,
+                    e);
+                note_obligation_cause(infcx, &mut err, obligation);
+                err.emit();
+            }
+        }
+
+        TraitNotObjectSafe(did) => {
+            let violations = object_safety_violations(infcx.tcx, did);
+            let mut err = report_object_safety_error(infcx.tcx, obligation.cause.span, did,
+                                                     violations);
+            note_obligation_cause(infcx, &mut err, obligation);
+            err.emit();
+        }
+    }
+}
+
+pub fn report_object_safety_error<'tcx>(tcx: &TyCtxt<'tcx>,
+                                        span: Span,
+                                        trait_def_id: DefId,
+                                        violations: Vec<ObjectSafetyViolation>)
+                                        -> DiagnosticBuilder<'tcx>
+{
+    let mut err = struct_span_err!(
+        tcx.sess, span, E0038,
+        "the trait `{}` cannot be made into an object",
+        tcx.item_path_str(trait_def_id));
+
+    let mut reported_violations = FnvHashSet();
+    for violation in violations {
+        if !reported_violations.insert(violation.clone()) {
+            continue;
+        }
+        match violation {
+            ObjectSafetyViolation::SizedSelf => {
+                err.fileline_note(
+                    span,
+                    "the trait cannot require that `Self : Sized`");
+            }
+
+            ObjectSafetyViolation::SupertraitSelf => {
+                err.fileline_note(
+                    span,
+                    "the trait cannot use `Self` as a type parameter \
+                     in the supertrait listing");
+            }
+
+            ObjectSafetyViolation::Method(method,
+                                          MethodViolationCode::StaticMethod) => {
+                err.fileline_note(
+                    span,
+                    &format!("method `{}` has no receiver",
+                             method.name));
+            }
+
+            ObjectSafetyViolation::Method(method,
+                                          MethodViolationCode::ReferencesSelf) => {
+                err.fileline_note(
+                    span,
+                    &format!("method `{}` references the `Self` type \
+                              in its arguments or return type",
+                             method.name));
+            }
+
+            ObjectSafetyViolation::Method(method,
+                                          MethodViolationCode::Generic) => {
+                err.fileline_note(
+                    span,
+                    &format!("method `{}` has generic type parameters",
+                             method.name));
+            }
+        }
+    }
+    err
+}
+
+pub fn maybe_report_ambiguity<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                        obligation: &PredicateObligation<'tcx>) {
+    // Unable to successfully determine, probably means
+    // insufficient type information, but could mean
+    // ambiguous impls. The latter *ought* to be a
+    // coherence violation, so we don't report it here.
+
+    let predicate = infcx.resolve_type_vars_if_possible(&obligation.predicate);
+
+    debug!("maybe_report_ambiguity(predicate={:?}, obligation={:?})",
+           predicate,
+           obligation);
+
+    match predicate {
+        ty::Predicate::Trait(ref data) => {
+            let trait_ref = data.to_poly_trait_ref();
+            let self_ty = trait_ref.self_ty();
+            let all_types = &trait_ref.substs().types;
+            if all_types.references_error() {
+            } else {
+                // Typically, this ambiguity should only happen if
+                // there are unresolved type inference variables
+                // (otherwise it would suggest a coherence
+                // failure). But given #21974 that is not necessarily
+                // the case -- we can have multiple where clauses that
+                // are only distinguished by a region, which results
+                // in an ambiguity even when all types are fully
+                // known, since we don't dispatch based on region
+                // relationships.
+
+                // This is kind of a hack: it frequently happens that some earlier
+                // error prevents types from being fully inferred, and then we get
+                // a bunch of uninteresting errors saying something like "<generic
+                // #0> doesn't implement Sized".  It may even be true that we
+                // could just skip over all checks where the self-ty is an
+                // inference variable, but I was afraid that there might be an
+                // inference variable created, registered as an obligation, and
+                // then never forced by writeback, and hence by skipping here we'd
+                // be ignoring the fact that we don't KNOW the type works
+                // out. Though even that would probably be harmless, given that
+                // we're only talking about builtin traits, which are known to be
+                // inhabited. But in any case I just threw in this check for
+                // has_errors() to be sure that compilation isn't happening
+                // anyway. In that case, why inundate the user.
+                if !infcx.tcx.sess.has_errors() {
+                    if
+                        infcx.tcx.lang_items.sized_trait()
+                        .map_or(false, |sized_id| sized_id == trait_ref.def_id())
+                    {
+                        need_type_info(infcx, obligation.cause.span, self_ty);
+                    } else {
+                        let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0283,
+                                                       "type annotations required: \
+                                                        cannot resolve `{}`",
+                                                       predicate);
+                        note_obligation_cause(infcx, &mut err, obligation);
+                        err.emit();
+                    }
+                }
+            }
+        }
+
+        ty::Predicate::WellFormed(ty) => {
+            // Same hacky approach as above to avoid deluging user
+            // with error messages.
+            if !ty.references_error() && !infcx.tcx.sess.has_errors() {
+                need_type_info(infcx, obligation.cause.span, ty);
+            }
+        }
+
+        _ => {
+            if !infcx.tcx.sess.has_errors() {
+                let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0284,
+                                               "type annotations required: cannot resolve `{}`",
+                                               predicate);
+                note_obligation_cause(infcx, &mut err, obligation);
+                err.emit();
+            }
+        }
+    }
+}
+
+/// Returns whether the trait predicate may apply for *some* assignment
+/// to the type parameters.
+fn predicate_can_apply<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                 pred: ty::PolyTraitRef<'tcx>)
+                                 -> bool
+{
+    struct ParamToVarFolder<'a, 'tcx: 'a> {
+        infcx: &'a InferCtxt<'a, 'tcx>,
+        var_map: FnvHashMap<Ty<'tcx>, Ty<'tcx>>
+    }
+
+    impl<'a, 'tcx> TypeFolder<'tcx> for ParamToVarFolder<'a, 'tcx>
+    {
+        fn tcx(&self) -> &TyCtxt<'tcx> { self.infcx.tcx }
+
+        fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+            if let ty::TyParam(..) = ty.sty {
+                let infcx = self.infcx;
+                self.var_map.entry(ty).or_insert_with(|| infcx.next_ty_var())
+            } else {
+                ty.super_fold_with(self)
+            }
+        }
+    }
+
+    infcx.probe(|_| {
+        let mut selcx = SelectionContext::new(infcx);
+
+        let cleaned_pred = pred.fold_with(&mut ParamToVarFolder {
+            infcx: infcx,
+            var_map: FnvHashMap()
+        });
+
+        let cleaned_pred = super::project::normalize(
+            &mut selcx,
+            ObligationCause::dummy(),
+            &cleaned_pred
+        ).value;
+
+        let obligation = Obligation::new(
+            ObligationCause::dummy(),
+            cleaned_pred.to_predicate()
+        );
+
+        selcx.evaluate_obligation(&obligation)
+    })
+}
+
+
+fn need_type_info<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                            span: Span,
+                            ty: Ty<'tcx>)
+{
+    span_err!(infcx.tcx.sess, span, E0282,
+              "unable to infer enough type information about `{}`; \
+               type annotations or generic parameter binding required",
+              ty);
+}
+
+fn note_obligation_cause<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>,
+                                      err: &mut DiagnosticBuilder,
+                                      obligation: &Obligation<'tcx, T>)
+    where T: fmt::Display
+{
+    note_obligation_cause_code(infcx,
+                               err,
+                               &obligation.predicate,
+                               obligation.cause.span,
+                               &obligation.cause.code);
+}
+
+fn note_obligation_cause_code<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>,
+                                           err: &mut DiagnosticBuilder,
+                                           predicate: &T,
+                                           cause_span: Span,
+                                           cause_code: &ObligationCauseCode<'tcx>)
+    where T: fmt::Display
+{
+    let tcx = infcx.tcx;
+    match *cause_code {
+        ObligationCauseCode::MiscObligation => { }
+        ObligationCauseCode::SliceOrArrayElem => {
+            err.fileline_note(
+                cause_span,
+                "slice and array elements must have `Sized` type");
+        }
+        ObligationCauseCode::ProjectionWf(data) => {
+            err.fileline_note(
+                cause_span,
+                &format!("required so that the projection `{}` is well-formed",
+                         data));
+        }
+        ObligationCauseCode::ReferenceOutlivesReferent(ref_ty) => {
+            err.fileline_note(
+                cause_span,
+                &format!("required so that reference `{}` does not outlive its referent",
+                         ref_ty));
+        }
+        ObligationCauseCode::ItemObligation(item_def_id) => {
+            let item_name = tcx.item_path_str(item_def_id);
+            err.fileline_note(
+                cause_span,
+                &format!("required by `{}`", item_name));
+        }
+        ObligationCauseCode::ObjectCastObligation(object_ty) => {
+            err.fileline_note(
+                cause_span,
+                &format!(
+                    "required for the cast to the object type `{}`",
+                    infcx.ty_to_string(object_ty)));
+        }
+        ObligationCauseCode::RepeatVec => {
+            err.fileline_note(
+                cause_span,
+                "the `Copy` trait is required because the \
+                 repeated element will be copied");
+        }
+        ObligationCauseCode::VariableType(_) => {
+            err.fileline_note(
+                cause_span,
+                "all local variables must have a statically known size");
+        }
+        ObligationCauseCode::ReturnType => {
+            err.fileline_note(
+                cause_span,
+                "the return type of a function must have a \
+                 statically known size");
+        }
+        ObligationCauseCode::AssignmentLhsSized => {
+            err.fileline_note(
+                cause_span,
+                "the left-hand-side of an assignment must have a statically known size");
+        }
+        ObligationCauseCode::StructInitializerSized => {
+            err.fileline_note(
+                cause_span,
+                "structs must have a statically known size to be initialized");
+        }
+        ObligationCauseCode::ClosureCapture(var_id, _, builtin_bound) => {
+            let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
+            let trait_name = tcx.item_path_str(def_id);
+            let name = tcx.local_var_name_str(var_id);
+            err.fileline_note(
+                cause_span,
+                &format!("the closure that captures `{}` requires that all captured variables \
+                          implement the trait `{}`",
+                         name,
+                         trait_name));
+        }
+        ObligationCauseCode::FieldSized => {
+            err.fileline_note(
+                cause_span,
+                "only the last field of a struct or enum variant \
+                 may have a dynamically sized type");
+        }
+        ObligationCauseCode::SharedStatic => {
+            err.fileline_note(
+                cause_span,
+                "shared static variables must have a type that implements `Sync`");
+        }
+        ObligationCauseCode::BuiltinDerivedObligation(ref data) => {
+            let parent_trait_ref = infcx.resolve_type_vars_if_possible(&data.parent_trait_ref);
+            err.fileline_note(
+                cause_span,
+                &format!("required because it appears within the type `{}`",
+                         parent_trait_ref.0.self_ty()));
+            let parent_predicate = parent_trait_ref.to_predicate();
+            note_obligation_cause_code(infcx,
+                                       err,
+                                       &parent_predicate,
+                                       cause_span,
+                                       &data.parent_code);
+        }
+        ObligationCauseCode::ImplDerivedObligation(ref data) => {
+            let parent_trait_ref = infcx.resolve_type_vars_if_possible(&data.parent_trait_ref);
+            err.fileline_note(
+                cause_span,
+                &format!("required because of the requirements on the impl of `{}` for `{}`",
+                         parent_trait_ref,
+                         parent_trait_ref.0.self_ty()));
+            let parent_predicate = parent_trait_ref.to_predicate();
+            note_obligation_cause_code(infcx,
+                                       err,
+                                       &parent_predicate,
+                                       cause_span,
+                                       &data.parent_code);
+        }
+        ObligationCauseCode::CompareImplMethodObligation => {
+            err.fileline_note(
+                cause_span,
+                &format!("the requirement `{}` appears on the impl method \
+                          but not on the corresponding trait method",
+                         predicate));
+        }
+    }
+}
+
+fn suggest_new_overflow_limit(tcx: &TyCtxt, err:&mut DiagnosticBuilder, span: Span) {
+    let current_limit = tcx.sess.recursion_limit.get();
+    let suggested_limit = current_limit * 2;
+    err.fileline_note(
+        span,
+        &format!(
+            "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
+            suggested_limit));
+}
diff --git a/src/librustc/traits/fulfill.rs b/src/librustc/traits/fulfill.rs
new file mode 100644 (file)
index 0000000..11e8dae
--- /dev/null
@@ -0,0 +1,799 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use dep_graph::DepGraph;
+use infer::{InferCtxt, InferOk};
+use ty::{self, Ty, TyCtxt, TypeFoldable, ToPolyTraitRef};
+use rustc_data_structures::obligation_forest::{Backtrace, ObligationForest, Error};
+use std::iter;
+use syntax::ast;
+use util::common::ErrorReported;
+use util::nodemap::{FnvHashMap, FnvHashSet, NodeMap};
+
+use super::CodeAmbiguity;
+use super::CodeProjectionError;
+use super::CodeSelectionError;
+use super::is_object_safe;
+use super::FulfillmentError;
+use super::FulfillmentErrorCode;
+use super::ObligationCause;
+use super::PredicateObligation;
+use super::project;
+use super::report_overflow_error_cycle;
+use super::select::SelectionContext;
+use super::Unimplemented;
+use super::util::predicate_for_builtin_bound;
+
+pub struct GlobalFulfilledPredicates<'tcx> {
+    set: FnvHashSet<ty::PolyTraitPredicate<'tcx>>,
+    dep_graph: DepGraph,
+}
+
+#[derive(Debug)]
+pub struct LocalFulfilledPredicates<'tcx> {
+    set: FnvHashSet<ty::Predicate<'tcx>>
+}
+
+/// The fulfillment context is used to drive trait resolution.  It
+/// consists of a list of obligations that must be (eventually)
+/// satisfied. The job is to track which are satisfied, which yielded
+/// errors, and which are still pending. At any point, users can call
+/// `select_where_possible`, and the fulfilment context will try to do
+/// selection, retaining only those obligations that remain
+/// ambiguous. This may be helpful in pushing type inference
+/// along. Once all type inference constraints have been generated, the
+/// method `select_all_or_error` can be used to report any remaining
+/// ambiguous cases as errors.
+pub struct FulfillmentContext<'tcx> {
+    // a simple cache that aims to cache *exact duplicate obligations*
+    // and avoid adding them twice. This serves a different purpose
+    // than the `SelectionCache`: it avoids duplicate errors and
+    // permits recursive obligations, which are often generated from
+    // traits like `Send` et al.
+    //
+    // Note that because of type inference, a predicate can still
+    // occur twice in the predicates list, for example when 2
+    // initially-distinct type variables are unified after being
+    // inserted. Deduplicating the predicate set on selection had a
+    // significant performance cost the last time I checked.
+    duplicate_set: LocalFulfilledPredicates<'tcx>,
+
+    // A list of all obligations that have been registered with this
+    // fulfillment context.
+    predicates: ObligationForest<PendingPredicateObligation<'tcx>,
+                                 LocalFulfilledPredicates<'tcx>>,
+
+    // A set of constraints that regionck must validate. Each
+    // constraint has the form `T:'a`, meaning "some type `T` must
+    // outlive the lifetime 'a". These constraints derive from
+    // instantiated type parameters. So if you had a struct defined
+    // like
+    //
+    //     struct Foo<T:'static> { ... }
+    //
+    // then in some expression `let x = Foo { ... }` it will
+    // instantiate the type parameter `T` with a fresh type `$0`. At
+    // the same time, it will record a region obligation of
+    // `$0:'static`. This will get checked later by regionck. (We
+    // can't generally check these things right away because we have
+    // to wait until types are resolved.)
+    //
+    // These are stored in a map keyed to the id of the innermost
+    // enclosing fn body / static initializer expression. This is
+    // because the location where the obligation was incurred can be
+    // relevant with respect to which sublifetime assumptions are in
+    // place. The reason that we store under the fn-id, and not
+    // something more fine-grained, is so that it is easier for
+    // regionck to be sure that it has found *all* the region
+    // obligations (otherwise, it's easy to fail to walk to a
+    // particular node-id).
+    region_obligations: NodeMap<Vec<RegionObligation<'tcx>>>,
+}
+
+#[derive(Clone)]
+pub struct RegionObligation<'tcx> {
+    pub sub_region: ty::Region,
+    pub sup_type: Ty<'tcx>,
+    pub cause: ObligationCause<'tcx>,
+}
+
+#[derive(Clone, Debug)]
+pub struct PendingPredicateObligation<'tcx> {
+    pub obligation: PredicateObligation<'tcx>,
+    pub stalled_on: Vec<Ty<'tcx>>,
+}
+
+impl<'tcx> FulfillmentContext<'tcx> {
+    /// Creates a new fulfillment context.
+    pub fn new() -> FulfillmentContext<'tcx> {
+        FulfillmentContext {
+            duplicate_set: LocalFulfilledPredicates::new(),
+            predicates: ObligationForest::new(),
+            region_obligations: NodeMap(),
+        }
+    }
+
+    /// "Normalize" a projection type `<SomeType as SomeTrait>::X` by
+    /// creating a fresh type variable `$0` as well as a projection
+    /// predicate `<SomeType as SomeTrait>::X == $0`. When the
+    /// inference engine runs, it will attempt to find an impl of
+    /// `SomeTrait` or a where clause that lets us unify `$0` with
+    /// something concrete. If this fails, we'll unify `$0` with
+    /// `projection_ty` again.
+    pub fn normalize_projection_type<'a>(&mut self,
+                                         infcx: &InferCtxt<'a,'tcx>,
+                                         projection_ty: ty::ProjectionTy<'tcx>,
+                                         cause: ObligationCause<'tcx>)
+                                         -> Ty<'tcx>
+    {
+        debug!("normalize_projection_type(projection_ty={:?})",
+               projection_ty);
+
+        assert!(!projection_ty.has_escaping_regions());
+
+        // FIXME(#20304) -- cache
+
+        let mut selcx = SelectionContext::new(infcx);
+        let normalized = project::normalize_projection_type(&mut selcx, projection_ty, cause, 0);
+
+        for obligation in normalized.obligations {
+            self.register_predicate_obligation(infcx, obligation);
+        }
+
+        debug!("normalize_projection_type: result={:?}", normalized.value);
+
+        normalized.value
+    }
+
+    pub fn register_builtin_bound<'a>(&mut self,
+                                      infcx: &InferCtxt<'a,'tcx>,
+                                      ty: Ty<'tcx>,
+                                      builtin_bound: ty::BuiltinBound,
+                                      cause: ObligationCause<'tcx>)
+    {
+        match predicate_for_builtin_bound(infcx.tcx, cause, builtin_bound, 0, ty) {
+            Ok(predicate) => {
+                self.register_predicate_obligation(infcx, predicate);
+            }
+            Err(ErrorReported) => { }
+        }
+    }
+
+    pub fn register_region_obligation<'a>(&mut self,
+                                          t_a: Ty<'tcx>,
+                                          r_b: ty::Region,
+                                          cause: ObligationCause<'tcx>)
+    {
+        register_region_obligation(t_a, r_b, cause, &mut self.region_obligations);
+    }
+
+    pub fn register_predicate_obligation<'a>(&mut self,
+                                             infcx: &InferCtxt<'a,'tcx>,
+                                             obligation: PredicateObligation<'tcx>)
+    {
+        // this helps to reduce duplicate errors, as well as making
+        // debug output much nicer to read and so on.
+        let obligation = infcx.resolve_type_vars_if_possible(&obligation);
+
+        assert!(!obligation.has_escaping_regions());
+
+        if self.is_duplicate_or_add(infcx.tcx, &obligation.predicate) {
+            debug!("register_predicate_obligation({:?}) -- already seen, skip", obligation);
+            return;
+        }
+
+        debug!("register_predicate_obligation({:?})", obligation);
+        let obligation = PendingPredicateObligation {
+            obligation: obligation,
+            stalled_on: vec![]
+        };
+        self.predicates.push_tree(obligation, LocalFulfilledPredicates::new());
+    }
+
+    pub fn region_obligations(&self,
+                              body_id: ast::NodeId)
+                              -> &[RegionObligation<'tcx>]
+    {
+        match self.region_obligations.get(&body_id) {
+            None => Default::default(),
+            Some(vec) => vec,
+        }
+    }
+
+    pub fn select_all_or_error<'a>(&mut self,
+                                   infcx: &InferCtxt<'a,'tcx>)
+                                   -> Result<(),Vec<FulfillmentError<'tcx>>>
+    {
+        self.select_where_possible(infcx)?;
+        let errors: Vec<_> =
+            self.predicates.to_errors(CodeAmbiguity)
+                           .into_iter()
+                           .map(|e| to_fulfillment_error(e))
+                           .collect();
+        if errors.is_empty() {
+            Ok(())
+        } else {
+            Err(errors)
+        }
+    }
+
+    pub fn select_where_possible<'a>(&mut self,
+                                     infcx: &InferCtxt<'a,'tcx>)
+                                     -> Result<(),Vec<FulfillmentError<'tcx>>>
+    {
+        let mut selcx = SelectionContext::new(infcx);
+        self.select(&mut selcx)
+    }
+
+    pub fn pending_obligations(&self) -> Vec<PendingPredicateObligation<'tcx>> {
+        self.predicates.pending_obligations()
+    }
+
+    fn is_duplicate_or_add(&mut self,
+                           tcx: &TyCtxt<'tcx>,
+                           predicate: &ty::Predicate<'tcx>)
+                           -> bool {
+        // For "global" predicates -- that is, predicates that don't
+        // involve type parameters, inference variables, or regions
+        // other than 'static -- we can check the cache in the tcx,
+        // which allows us to leverage work from other threads. Note
+        // that we don't add anything to this cache yet (unlike the
+        // local cache).  This is because the tcx cache maintains the
+        // invariant that it only contains things that have been
+        // proven, and we have not yet proven that `predicate` holds.
+        if tcx.fulfilled_predicates.borrow().check_duplicate(predicate) {
+            return true;
+        }
+
+        // If `predicate` is not global, or not present in the tcx
+        // cache, we can still check for it in our local cache and add
+        // it if not present. Note that if we find this predicate in
+        // the local cache we can stop immediately, without reporting
+        // any errors, even though we don't know yet if it is
+        // true. This is because, while we don't yet know if the
+        // predicate holds, we know that this same fulfillment context
+        // already is in the process of finding out.
+        self.duplicate_set.is_duplicate_or_add(predicate)
+    }
+
+    /// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it
+    /// only attempts to select obligations that haven't been seen before.
+    fn select<'a>(&mut self,
+                  selcx: &mut SelectionContext<'a, 'tcx>)
+                  -> Result<(),Vec<FulfillmentError<'tcx>>>
+    {
+        debug!("select(obligation-forest-size={})", self.predicates.len());
+
+        let mut errors = Vec::new();
+
+        loop {
+            debug!("select: starting another iteration");
+
+            // Process pending obligations.
+            let outcome = {
+                let region_obligations = &mut self.region_obligations;
+                self.predicates.process_obligations(
+                    |obligation, tree, backtrace| process_predicate(selcx,
+                                                                     tree,
+                                                                     obligation,
+                                                                     backtrace,
+                                                                     region_obligations))
+            };
+
+            debug!("select: outcome={:?}", outcome);
+
+            // these are obligations that were proven to be true.
+            for pending_obligation in outcome.completed {
+                let predicate = &pending_obligation.obligation.predicate;
+                selcx.tcx().fulfilled_predicates.borrow_mut().add_if_global(predicate);
+            }
+
+            errors.extend(
+                outcome.errors.into_iter()
+                              .map(|e| to_fulfillment_error(e)));
+
+            // If nothing new was added, no need to keep looping.
+            if outcome.stalled {
+                break;
+            }
+        }
+
+        debug!("select({} predicates remaining, {} errors) done",
+               self.predicates.len(), errors.len());
+
+        if errors.is_empty() {
+            Ok(())
+        } else {
+            Err(errors)
+        }
+    }
+}
+
+/// Like `process_predicate1`, but wrap result into a pending predicate.
+fn process_predicate<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
+                              tree_cache: &mut LocalFulfilledPredicates<'tcx>,
+                              pending_obligation: &mut PendingPredicateObligation<'tcx>,
+                              backtrace: Backtrace<PendingPredicateObligation<'tcx>>,
+                              region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>)
+                              -> Result<Option<Vec<PendingPredicateObligation<'tcx>>>,
+                                        FulfillmentErrorCode<'tcx>>
+{
+    match process_predicate1(selcx, pending_obligation, region_obligations) {
+        Ok(Some(v)) => process_child_obligations(selcx,
+                                                 tree_cache,
+                                                 &pending_obligation.obligation,
+                                                 backtrace,
+                                                 v),
+        Ok(None) => Ok(None),
+        Err(e) => Err(e)
+    }
+}
+
+fn process_child_obligations<'a,'tcx>(
+    selcx: &mut SelectionContext<'a,'tcx>,
+    tree_cache: &mut LocalFulfilledPredicates<'tcx>,
+    pending_obligation: &PredicateObligation<'tcx>,
+    backtrace: Backtrace<PendingPredicateObligation<'tcx>>,
+    child_obligations: Vec<PredicateObligation<'tcx>>)
+    -> Result<Option<Vec<PendingPredicateObligation<'tcx>>>,
+              FulfillmentErrorCode<'tcx>>
+{
+    // FIXME(#30977) The code below is designed to detect (and
+    // permit) DAGs, while still ensuring that the reasoning
+    // is acyclic. However, it does a few things
+    // suboptimally. For example, it refreshes type variables
+    // a lot, probably more than needed, but also less than
+    // you might want.
+    //
+    //   - more than needed: I want to be very sure we don't
+    //     accidentally treat a cycle as a DAG, so I am
+    //     refreshing type variables as we walk the ancestors;
+    //     but we are going to repeat this a lot, which is
+    //     sort of silly, and it would be nicer to refresh
+    //     them *in place* so that later predicate processing
+    //     can benefit from the same work;
+    //   - less than you might want: we only add items in the cache here,
+    //     but maybe we learn more about type variables and could add them into
+    //     the cache later on.
+
+    let tcx = selcx.tcx();
+
+    let mut ancestor_set = AncestorSet::new(&backtrace);
+
+    let pending_predicate_obligations: Vec<_> =
+        child_obligations
+        .into_iter()
+        .filter_map(|obligation| {
+            // Probably silly, but remove any inference
+            // variables. This is actually crucial to the ancestor
+            // check marked (*) below, but it's not clear that it
+            // makes sense to ALWAYS do it.
+            let obligation = selcx.infcx().resolve_type_vars_if_possible(&obligation);
+
+            // Screen out obligations that we know globally
+            // are true.
+            if tcx.fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) {
+                return None;
+            }
+
+            // Check whether this obligation appears
+            // somewhere else in the tree. If not, we have to
+            // process it for sure.
+            if !tree_cache.is_duplicate_or_add(&obligation.predicate) {
+                return Some(PendingPredicateObligation {
+                    obligation: obligation,
+                    stalled_on: vec![]
+                });
+            }
+
+            debug!("process_child_obligations: duplicate={:?}",
+                   obligation.predicate);
+
+            // OK, the obligation appears elsewhere in the tree.
+            // This is either a fatal error or else something we can
+            // ignore. If the obligation appears in our *ancestors*
+            // (rather than some more distant relative), that
+            // indicates a cycle. Cycles are either considered
+            // resolved (if this is a coinductive case) or a fatal
+            // error.
+            if let Some(index) = ancestor_set.has(selcx.infcx(), &obligation.predicate) {
+                //                            ~~~ (*) see above
+                debug!("process_child_obligations: cycle index = {}", index);
+
+                let backtrace = backtrace.clone();
+                let cycle: Vec<_> =
+                    iter::once(&obligation)
+                    .chain(Some(pending_obligation))
+                    .chain(backtrace.take(index + 1).map(|p| &p.obligation))
+                    .cloned()
+                    .collect();
+                if coinductive_match(selcx, &cycle) {
+                    debug!("process_child_obligations: coinductive match");
+                    None
+                } else {
+                    report_overflow_error_cycle(selcx.infcx(), &cycle);
+                }
+            } else {
+                // Not a cycle. Just ignore this obligation then,
+                // we're already in the process of proving it.
+                debug!("process_child_obligations: not a cycle");
+                None
+            }
+        })
+        .collect();
+
+    Ok(Some(pending_predicate_obligations))
+}
+
+struct AncestorSet<'b, 'tcx: 'b> {
+    populated: bool,
+    cache: FnvHashMap<ty::Predicate<'tcx>, usize>,
+    backtrace: Backtrace<'b, PendingPredicateObligation<'tcx>>,
+}
+
+impl<'b, 'tcx> AncestorSet<'b, 'tcx> {
+    fn new(backtrace: &Backtrace<'b, PendingPredicateObligation<'tcx>>) -> Self {
+        AncestorSet {
+            populated: false,
+            cache: FnvHashMap(),
+            backtrace: backtrace.clone(),
+        }
+    }
+
+    /// Checks whether any of the ancestors in the backtrace are equal
+    /// to `predicate` (`predicate` is assumed to be fully
+    /// type-resolved).  Returns `None` if not; otherwise, returns
+    /// `Some` with the index within the backtrace.
+    fn has<'a>(&mut self,
+               infcx: &InferCtxt<'a, 'tcx>,
+               predicate: &ty::Predicate<'tcx>)
+               -> Option<usize> {
+        // the first time, we have to populate the cache
+        if !self.populated {
+            let backtrace = self.backtrace.clone();
+            for (index, ancestor) in backtrace.enumerate() {
+                // Ugh. This just feels ridiculously
+                // inefficient.  But we need to compare
+                // predicates without being concerned about
+                // the vagaries of type inference, so for now
+                // just ensure that they are always
+                // up-to-date. (I suppose we could just use a
+                // snapshot and check if they are unifiable?)
+                let resolved_predicate =
+                    infcx.resolve_type_vars_if_possible(
+                        &ancestor.obligation.predicate);
+
+                // Though we try to avoid it, it can happen that a
+                // cycle already exists in the predecessors. This
+                // happens if the type variables were not fully known
+                // at the time that the ancestors were pushed. We'll
+                // just ignore such cycles for now, on the premise
+                // that they will repeat themselves and we'll deal
+                // with them properly then.
+                self.cache.entry(resolved_predicate)
+                          .or_insert(index);
+            }
+            self.populated = true;
+        }
+
+        self.cache.get(predicate).cloned()
+    }
+}
+
+/// Return the set of type variables contained in a trait ref
+fn trait_ref_type_vars<'a, 'tcx>(selcx: &mut SelectionContext<'a, 'tcx>,
+                                 t: ty::PolyTraitRef<'tcx>) -> Vec<Ty<'tcx>>
+{
+    t.skip_binder() // ok b/c this check doesn't care about regions
+     .input_types()
+     .iter()
+     .map(|t| selcx.infcx().resolve_type_vars_if_possible(t))
+     .filter(|t| t.has_infer_types())
+     .flat_map(|t| t.walk())
+     .filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false })
+     .collect()
+}
+
+/// Processes a predicate obligation and returns either:
+/// - `Ok(Some(v))` if the predicate is true, presuming that `v` are also true
+/// - `Ok(None)` if we don't have enough info to be sure
+/// - `Err` if the predicate does not hold
+fn process_predicate1<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
+                               pending_obligation: &mut PendingPredicateObligation<'tcx>,
+                               region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>)
+                               -> Result<Option<Vec<PredicateObligation<'tcx>>>,
+                                         FulfillmentErrorCode<'tcx>>
+{
+    // if we were stalled on some unresolved variables, first check
+    // whether any of them have been resolved; if not, don't bother
+    // doing more work yet
+    if !pending_obligation.stalled_on.is_empty() {
+        if pending_obligation.stalled_on.iter().all(|&ty| {
+            let resolved_ty = selcx.infcx().shallow_resolve(&ty);
+            resolved_ty == ty // nothing changed here
+        }) {
+            debug!("process_predicate: pending obligation {:?} still stalled on {:?}",
+                   selcx.infcx().resolve_type_vars_if_possible(&pending_obligation.obligation),
+                   pending_obligation.stalled_on);
+            return Ok(None);
+        }
+        pending_obligation.stalled_on = vec![];
+    }
+
+    let obligation = &mut pending_obligation.obligation;
+
+    if obligation.predicate.has_infer_types() {
+        obligation.predicate = selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate);
+    }
+
+    match obligation.predicate {
+        ty::Predicate::Trait(ref data) => {
+            if selcx.tcx().fulfilled_predicates.borrow().check_duplicate_trait(data) {
+                return Ok(Some(vec![]));
+            }
+
+            let trait_obligation = obligation.with(data.clone());
+            match selcx.select(&trait_obligation) {
+                Ok(Some(vtable)) => {
+                    info!("selecting trait `{:?}` at depth {} yielded Ok(Some)",
+                          data, obligation.recursion_depth);
+                    Ok(Some(vtable.nested_obligations()))
+                }
+                Ok(None) => {
+                    info!("selecting trait `{:?}` at depth {} yielded Ok(None)",
+                          data, obligation.recursion_depth);
+
+                    // This is a bit subtle: for the most part, the
+                    // only reason we can fail to make progress on
+                    // trait selection is because we don't have enough
+                    // information about the types in the trait. One
+                    // exception is that we sometimes haven't decided
+                    // what kind of closure a closure is. *But*, in
+                    // that case, it turns out, the type of the
+                    // closure will also change, because the closure
+                    // also includes references to its upvars as part
+                    // of its type, and those types are resolved at
+                    // the same time.
+                    pending_obligation.stalled_on =
+                        trait_ref_type_vars(selcx, data.to_poly_trait_ref());
+
+                    debug!("process_predicate: pending obligation {:?} now stalled on {:?}",
+                           selcx.infcx().resolve_type_vars_if_possible(obligation),
+                           pending_obligation.stalled_on);
+
+                    Ok(None)
+                }
+                Err(selection_err) => {
+                    info!("selecting trait `{:?}` at depth {} yielded Err",
+                          data, obligation.recursion_depth);
+                    Err(CodeSelectionError(selection_err))
+                }
+            }
+        }
+
+        ty::Predicate::Equate(ref binder) => {
+            match selcx.infcx().equality_predicate(obligation.cause.span, binder) {
+                Ok(InferOk { obligations, .. }) => {
+                    // FIXME(#32730) propagate obligations
+                    assert!(obligations.is_empty());
+                    Ok(Some(Vec::new()))
+                },
+                Err(_) => Err(CodeSelectionError(Unimplemented)),
+            }
+        }
+
+        ty::Predicate::RegionOutlives(ref binder) => {
+            match selcx.infcx().region_outlives_predicate(obligation.cause.span, binder) {
+                Ok(()) => Ok(Some(Vec::new())),
+                Err(_) => Err(CodeSelectionError(Unimplemented)),
+            }
+        }
+
+        ty::Predicate::TypeOutlives(ref binder) => {
+            // Check if there are higher-ranked regions.
+            match selcx.tcx().no_late_bound_regions(binder) {
+                // If there are, inspect the underlying type further.
+                None => {
+                    // Convert from `Binder<OutlivesPredicate<Ty, Region>>` to `Binder<Ty>`.
+                    let binder = binder.map_bound_ref(|pred| pred.0);
+
+                    // Check if the type has any bound regions.
+                    match selcx.tcx().no_late_bound_regions(&binder) {
+                        // If so, this obligation is an error (for now). Eventually we should be
+                        // able to support additional cases here, like `for<'a> &'a str: 'a`.
+                        None => {
+                            Err(CodeSelectionError(Unimplemented))
+                        }
+                        // Otherwise, we have something of the form
+                        // `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`.
+                        Some(t_a) => {
+                            register_region_obligation(t_a, ty::ReStatic,
+                                                       obligation.cause.clone(),
+                                                       region_obligations);
+                            Ok(Some(vec![]))
+                        }
+                    }
+                }
+                // If there aren't, register the obligation.
+                Some(ty::OutlivesPredicate(t_a, r_b)) => {
+                    register_region_obligation(t_a, r_b,
+                                               obligation.cause.clone(),
+                                               region_obligations);
+                    Ok(Some(vec![]))
+                }
+            }
+        }
+
+        ty::Predicate::Projection(ref data) => {
+            let project_obligation = obligation.with(data.clone());
+            match project::poly_project_and_unify_type(selcx, &project_obligation) {
+                Ok(None) => {
+                    pending_obligation.stalled_on =
+                        trait_ref_type_vars(selcx, data.to_poly_trait_ref());
+                    Ok(None)
+                }
+                Ok(v) => Ok(v),
+                Err(e) => Err(CodeProjectionError(e))
+            }
+        }
+
+        ty::Predicate::ObjectSafe(trait_def_id) => {
+            if !is_object_safe(selcx.tcx(), trait_def_id) {
+                Err(CodeSelectionError(Unimplemented))
+            } else {
+                Ok(Some(Vec::new()))
+            }
+        }
+
+        ty::Predicate::WellFormed(ty) => {
+            match ty::wf::obligations(selcx.infcx(), obligation.cause.body_id,
+                                      ty, obligation.cause.span) {
+                None => {
+                    pending_obligation.stalled_on = vec![ty];
+                    Ok(None)
+                }
+                s => Ok(s)
+            }
+        }
+    }
+}
+
+/// For defaulted traits, we use a co-inductive strategy to solve, so
+/// that recursion is ok. This routine returns true if the top of the
+/// stack (`cycle[0]`):
+/// - is a defaulted trait, and
+/// - it also appears in the backtrace at some position `X`; and,
+/// - all the predicates at positions `X..` between `X` an the top are
+///   also defaulted traits.
+fn coinductive_match<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
+                              cycle: &[PredicateObligation<'tcx>])
+                              -> bool
+{
+    let len = cycle.len();
+
+    assert_eq!(cycle[0].predicate, cycle[len - 1].predicate);
+
+    cycle[0..len-1]
+        .iter()
+        .all(|bt_obligation| {
+            let result = coinductive_obligation(selcx, bt_obligation);
+            debug!("coinductive_match: bt_obligation={:?} coinductive={}",
+                   bt_obligation, result);
+            result
+        })
+}
+
+fn coinductive_obligation<'a, 'tcx>(selcx: &SelectionContext<'a, 'tcx>,
+                                    obligation: &PredicateObligation<'tcx>)
+                                    -> bool {
+    match obligation.predicate {
+        ty::Predicate::Trait(ref data) => {
+            selcx.tcx().trait_has_default_impl(data.def_id())
+        }
+        _ => {
+            false
+        }
+    }
+}
+
+fn register_region_obligation<'tcx>(t_a: Ty<'tcx>,
+                                    r_b: ty::Region,
+                                    cause: ObligationCause<'tcx>,
+                                    region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>)
+{
+    let region_obligation = RegionObligation { sup_type: t_a,
+                                               sub_region: r_b,
+                                               cause: cause };
+
+    debug!("register_region_obligation({:?}, cause={:?})",
+           region_obligation, region_obligation.cause);
+
+    region_obligations.entry(region_obligation.cause.body_id)
+                      .or_insert(vec![])
+                      .push(region_obligation);
+
+}
+
+impl<'tcx> LocalFulfilledPredicates<'tcx> {
+    pub fn new() -> LocalFulfilledPredicates<'tcx> {
+        LocalFulfilledPredicates {
+            set: FnvHashSet()
+        }
+    }
+
+    fn is_duplicate_or_add(&mut self, key: &ty::Predicate<'tcx>) -> bool {
+        // For a `LocalFulfilledPredicates`, if we find a match, we
+        // don't need to add a read edge to the dep-graph. This is
+        // because it means that the predicate has already been
+        // considered by this `FulfillmentContext`, and hence the
+        // containing task will already have an edge. (Here we are
+        // assuming each `FulfillmentContext` only gets used from one
+        // task; but to do otherwise makes no sense)
+        !self.set.insert(key.clone())
+    }
+}
+
+impl<'tcx> GlobalFulfilledPredicates<'tcx> {
+    pub fn new(dep_graph: DepGraph) -> GlobalFulfilledPredicates<'tcx> {
+        GlobalFulfilledPredicates {
+            set: FnvHashSet(),
+            dep_graph: dep_graph,
+        }
+    }
+
+    pub fn check_duplicate(&self, key: &ty::Predicate<'tcx>) -> bool {
+        if let ty::Predicate::Trait(ref data) = *key {
+            self.check_duplicate_trait(data)
+        } else {
+            false
+        }
+    }
+
+    pub fn check_duplicate_trait(&self, data: &ty::PolyTraitPredicate<'tcx>) -> bool {
+        // For the global predicate registry, when we find a match, it
+        // may have been computed by some other task, so we want to
+        // add a read from the node corresponding to the predicate
+        // processing to make sure we get the transitive dependencies.
+        if self.set.contains(data) {
+            debug_assert!(data.is_global());
+            self.dep_graph.read(data.dep_node());
+            debug!("check_duplicate: global predicate `{:?}` already proved elsewhere", data);
+
+            info!("check_duplicate_trait hit: `{:?}`", data);
+
+            true
+        } else {
+            false
+        }
+    }
+
+    fn add_if_global(&mut self, key: &ty::Predicate<'tcx>) {
+        if let ty::Predicate::Trait(ref data) = *key {
+            // We only add things to the global predicate registry
+            // after the current task has proved them, and hence
+            // already has the required read edges, so we don't need
+            // to add any more edges here.
+            if data.is_global() {
+                if self.set.insert(data.clone()) {
+                    debug!("add_if_global: global predicate `{:?}` added", data);
+                    info!("check_duplicate_trait entry: `{:?}`", data);
+                }
+            }
+        }
+    }
+}
+
+fn to_fulfillment_error<'tcx>(
+    error: Error<PendingPredicateObligation<'tcx>, FulfillmentErrorCode<'tcx>>)
+    -> FulfillmentError<'tcx>
+{
+    let obligation = error.backtrace.into_iter().next().unwrap().obligation;
+    FulfillmentError::new(obligation, error.error)
+}
diff --git a/src/librustc/traits/mod.rs b/src/librustc/traits/mod.rs
new file mode 100644 (file)
index 0000000..a160465
--- /dev/null
@@ -0,0 +1,623 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Trait Resolution. See the Book for more.
+
+pub use self::SelectionError::*;
+pub use self::FulfillmentErrorCode::*;
+pub use self::Vtable::*;
+pub use self::ObligationCauseCode::*;
+
+use hir::def_id::DefId;
+use middle::free_region::FreeRegionMap;
+use ty::subst;
+use ty::{self, Ty, TypeFoldable};
+use infer::{self, fixup_err_to_string, InferCtxt};
+
+use std::rc::Rc;
+use syntax::ast;
+use syntax::codemap::{Span, DUMMY_SP};
+
+pub use self::error_reporting::TraitErrorKey;
+pub use self::error_reporting::recursive_type_with_infinite_size_error;
+pub use self::error_reporting::report_fulfillment_errors;
+pub use self::error_reporting::report_overflow_error;
+pub use self::error_reporting::report_overflow_error_cycle;
+pub use self::error_reporting::report_selection_error;
+pub use self::error_reporting::report_object_safety_error;
+pub use self::coherence::orphan_check;
+pub use self::coherence::overlapping_impls;
+pub use self::coherence::OrphanCheckErr;
+pub use self::fulfill::{FulfillmentContext, GlobalFulfilledPredicates, RegionObligation};
+pub use self::project::{MismatchedProjectionTypes, ProjectionMode};
+pub use self::project::{normalize, Normalized};
+pub use self::object_safety::is_object_safe;
+pub use self::object_safety::astconv_object_safety_violations;
+pub use self::object_safety::object_safety_violations;
+pub use self::object_safety::ObjectSafetyViolation;
+pub use self::object_safety::MethodViolationCode;
+pub use self::object_safety::is_vtable_safe_method;
+pub use self::select::{EvaluationCache, SelectionContext, SelectionCache};
+pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch};
+pub use self::select::{MethodMatchedData}; // intentionally don't export variants
+pub use self::specialize::{Overlap, specialization_graph, specializes, translate_substs};
+pub use self::util::elaborate_predicates;
+pub use self::util::get_vtable_index_of_object_method;
+pub use self::util::trait_ref_for_builtin_bound;
+pub use self::util::predicate_for_trait_def;
+pub use self::util::supertraits;
+pub use self::util::Supertraits;
+pub use self::util::supertrait_def_ids;
+pub use self::util::SupertraitDefIds;
+pub use self::util::transitive_bounds;
+pub use self::util::upcast;
+
+mod coherence;
+mod error_reporting;
+mod fulfill;
+mod project;
+mod object_safety;
+mod select;
+mod specialize;
+mod structural_impls;
+mod util;
+
+/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for
+/// which the vtable must be found.  The process of finding a vtable is
+/// called "resolving" the `Obligation`. This process consists of
+/// either identifying an `impl` (e.g., `impl Eq for int`) that
+/// provides the required vtable, or else finding a bound that is in
+/// scope. The eventual result is usually a `Selection` (defined below).
+#[derive(Clone, PartialEq, Eq)]
+pub struct Obligation<'tcx, T> {
+    pub cause: ObligationCause<'tcx>,
+    pub recursion_depth: usize,
+    pub predicate: T,
+}
+
+pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>;
+pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>;
+
+/// Why did we incur this obligation? Used for error reporting.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct ObligationCause<'tcx> {
+    pub span: Span,
+
+    // The id of the fn body that triggered this obligation. This is
+    // used for region obligations to determine the precise
+    // environment in which the region obligation should be evaluated
+    // (in particular, closures can add new assumptions). See the
+    // field `region_obligations` of the `FulfillmentContext` for more
+    // information.
+    pub body_id: ast::NodeId,
+
+    pub code: ObligationCauseCode<'tcx>
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum ObligationCauseCode<'tcx> {
+    /// Not well classified or should be obvious from span.
+    MiscObligation,
+
+    /// This is the trait reference from the given projection
+    SliceOrArrayElem,
+
+    /// This is the trait reference from the given projection
+    ProjectionWf(ty::ProjectionTy<'tcx>),
+
+    /// In an impl of trait X for type Y, type Y must
+    /// also implement all supertraits of X.
+    ItemObligation(DefId),
+
+    /// A type like `&'a T` is WF only if `T: 'a`.
+    ReferenceOutlivesReferent(Ty<'tcx>),
+
+    /// Obligation incurred due to an object cast.
+    ObjectCastObligation(/* Object type */ Ty<'tcx>),
+
+    /// Various cases where expressions must be sized/copy/etc:
+    AssignmentLhsSized,        // L = X implies that L is Sized
+    StructInitializerSized,    // S { ... } must be Sized
+    VariableType(ast::NodeId), // Type of each variable must be Sized
+    ReturnType,                // Return type must be Sized
+    RepeatVec,                 // [T,..n] --> T must be Copy
+
+    // Captures of variable the given id by a closure (span is the
+    // span of the closure)
+    ClosureCapture(ast::NodeId, Span, ty::BuiltinBound),
+
+    // Types of fields (other than the last) in a struct must be sized.
+    FieldSized,
+
+    // static items must have `Sync` type
+    SharedStatic,
+
+    BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
+
+    ImplDerivedObligation(DerivedObligationCause<'tcx>),
+
+    CompareImplMethodObligation,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct DerivedObligationCause<'tcx> {
+    /// The trait reference of the parent obligation that led to the
+    /// current obligation. Note that only trait obligations lead to
+    /// derived obligations, so we just store the trait reference here
+    /// directly.
+    parent_trait_ref: ty::PolyTraitRef<'tcx>,
+
+    /// The parent trait had this cause
+    parent_code: Rc<ObligationCauseCode<'tcx>>
+}
+
+pub type Obligations<'tcx, O> = Vec<Obligation<'tcx, O>>;
+pub type PredicateObligations<'tcx> = Vec<PredicateObligation<'tcx>>;
+pub type TraitObligations<'tcx> = Vec<TraitObligation<'tcx>>;
+
+pub type Selection<'tcx> = Vtable<'tcx, PredicateObligation<'tcx>>;
+
+#[derive(Clone,Debug)]
+pub enum SelectionError<'tcx> {
+    Unimplemented,
+    OutputTypeParameterMismatch(ty::PolyTraitRef<'tcx>,
+                                ty::PolyTraitRef<'tcx>,
+                                ty::error::TypeError<'tcx>),
+    TraitNotObjectSafe(DefId),
+}
+
+pub struct FulfillmentError<'tcx> {
+    pub obligation: PredicateObligation<'tcx>,
+    pub code: FulfillmentErrorCode<'tcx>
+}
+
+#[derive(Clone)]
+pub enum FulfillmentErrorCode<'tcx> {
+    CodeSelectionError(SelectionError<'tcx>),
+    CodeProjectionError(MismatchedProjectionTypes<'tcx>),
+    CodeAmbiguity,
+}
+
+/// When performing resolution, it is typically the case that there
+/// can be one of three outcomes:
+///
+/// - `Ok(Some(r))`: success occurred with result `r`
+/// - `Ok(None)`: could not definitely determine anything, usually due
+///   to inconclusive type inference.
+/// - `Err(e)`: error `e` occurred
+pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
+
+/// Given the successful resolution of an obligation, the `Vtable`
+/// indicates where the vtable comes from. Note that while we call this
+/// a "vtable", it does not necessarily indicate dynamic dispatch at
+/// runtime. `Vtable` instances just tell the compiler where to find
+/// methods, but in generic code those methods are typically statically
+/// dispatched -- only when an object is constructed is a `Vtable`
+/// instance reified into an actual vtable.
+///
+/// For example, the vtable may be tied to a specific impl (case A),
+/// or it may be relative to some bound that is in scope (case B).
+///
+///
+/// ```
+/// impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
+/// impl<T:Clone> Clone<T> for Box<T> { ... }    // Impl_2
+/// impl Clone for int { ... }             // Impl_3
+///
+/// fn foo<T:Clone>(concrete: Option<Box<int>>,
+///                 param: T,
+///                 mixed: Option<T>) {
+///
+///    // Case A: Vtable points at a specific impl. Only possible when
+///    // type is concretely known. If the impl itself has bounded
+///    // type parameters, Vtable will carry resolutions for those as well:
+///    concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
+///
+///    // Case B: Vtable must be provided by caller. This applies when
+///    // type is a type parameter.
+///    param.clone();    // VtableParam
+///
+///    // Case C: A mix of cases A and B.
+///    mixed.clone();    // Vtable(Impl_1, [VtableParam])
+/// }
+/// ```
+///
+/// ### The type parameter `N`
+///
+/// See explanation on `VtableImplData`.
+#[derive(Clone)]
+pub enum Vtable<'tcx, N> {
+    /// Vtable identifying a particular impl.
+    VtableImpl(VtableImplData<'tcx, N>),
+
+    /// Vtable for default trait implementations
+    /// This carries the information and nested obligations with regards
+    /// to a default implementation for a trait `Trait`. The nested obligations
+    /// ensure the trait implementation holds for all the constituent types.
+    VtableDefaultImpl(VtableDefaultImplData<N>),
+
+    /// Successful resolution to an obligation provided by the caller
+    /// for some type parameter. The `Vec<N>` represents the
+    /// obligations incurred from normalizing the where-clause (if
+    /// any).
+    VtableParam(Vec<N>),
+
+    /// Virtual calls through an object
+    VtableObject(VtableObjectData<'tcx>),
+
+    /// Successful resolution for a builtin trait.
+    VtableBuiltin(VtableBuiltinData<N>),
+
+    /// Vtable automatically generated for a closure. The def ID is the ID
+    /// of the closure expression. This is a `VtableImpl` in spirit, but the
+    /// impl is generated by the compiler and does not appear in the source.
+    VtableClosure(VtableClosureData<'tcx, N>),
+
+    /// Same as above, but for a fn pointer type with the given signature.
+    VtableFnPointer(ty::Ty<'tcx>),
+}
+
+/// Identifies a particular impl in the source, along with a set of
+/// substitutions from the impl's type/lifetime parameters. The
+/// `nested` vector corresponds to the nested obligations attached to
+/// the impl's type parameters.
+///
+/// The type parameter `N` indicates the type used for "nested
+/// obligations" that are required by the impl. During type check, this
+/// is `Obligation`, as one might expect. During trans, however, this
+/// is `()`, because trans only requires a shallow resolution of an
+/// impl, and nested obligations are satisfied later.
+#[derive(Clone, PartialEq, Eq)]
+pub struct VtableImplData<'tcx, N> {
+    pub impl_def_id: DefId,
+    pub substs: &'tcx subst::Substs<'tcx>,
+    pub nested: Vec<N>
+}
+
+#[derive(Clone, PartialEq, Eq)]
+pub struct VtableClosureData<'tcx, N> {
+    pub closure_def_id: DefId,
+    pub substs: ty::ClosureSubsts<'tcx>,
+    /// Nested obligations. This can be non-empty if the closure
+    /// signature contains associated types.
+    pub nested: Vec<N>
+}
+
+#[derive(Clone)]
+pub struct VtableDefaultImplData<N> {
+    pub trait_def_id: DefId,
+    pub nested: Vec<N>
+}
+
+#[derive(Clone)]
+pub struct VtableBuiltinData<N> {
+    pub nested: Vec<N>
+}
+
+/// A vtable for some object-safe trait `Foo` automatically derived
+/// for the object type `Foo`.
+#[derive(PartialEq,Eq,Clone)]
+pub struct VtableObjectData<'tcx> {
+    /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
+    pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
+
+    /// The vtable is formed by concatenating together the method lists of
+    /// the base object trait and all supertraits; this is the start of
+    /// `upcast_trait_ref`'s methods in that vtable.
+    pub vtable_base: usize
+}
+
+/// Creates predicate obligations from the generic bounds.
+pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>,
+                                     generic_bounds: &ty::InstantiatedPredicates<'tcx>)
+                                     -> PredicateObligations<'tcx>
+{
+    util::predicates_for_generics(cause, 0, generic_bounds)
+}
+
+/// Determines whether the type `ty` is known to meet `bound` and
+/// returns true if so. Returns false if `ty` either does not meet
+/// `bound` or is not known to meet bound (note that this is
+/// conservative towards *no impl*, which is the opposite of the
+/// `evaluate` methods).
+pub fn type_known_to_meet_builtin_bound<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
+                                                 ty: Ty<'tcx>,
+                                                 bound: ty::BuiltinBound,
+                                                 span: Span)
+                                                 -> bool
+{
+    debug!("type_known_to_meet_builtin_bound(ty={:?}, bound={:?})",
+           ty,
+           bound);
+
+    let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID);
+    let obligation =
+        util::predicate_for_builtin_bound(infcx.tcx, cause, bound, 0, ty);
+    let obligation = match obligation {
+        Ok(o) => o,
+        Err(..) => return false
+    };
+    let result = SelectionContext::new(infcx)
+        .evaluate_obligation_conservatively(&obligation);
+    debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} => {:?}",
+           ty, bound, result);
+
+    if result && (ty.has_infer_types() || ty.has_closure_types()) {
+        // Because of inference "guessing", selection can sometimes claim
+        // to succeed while the success requires a guess. To ensure
+        // this function's result remains infallible, we must confirm
+        // that guess. While imperfect, I believe this is sound.
+
+        let mut fulfill_cx = FulfillmentContext::new();
+
+        // We can use a dummy node-id here because we won't pay any mind
+        // to region obligations that arise (there shouldn't really be any
+        // anyhow).
+        let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID);
+
+        fulfill_cx.register_builtin_bound(infcx, ty, bound, cause);
+
+        // Note: we only assume something is `Copy` if we can
+        // *definitively* show that it implements `Copy`. Otherwise,
+        // assume it is move; linear is always ok.
+        match fulfill_cx.select_all_or_error(infcx) {
+            Ok(()) => {
+                debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} success",
+                       ty,
+                       bound);
+                true
+            }
+            Err(e) => {
+                debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} errors={:?}",
+                       ty,
+                       bound,
+                       e);
+                false
+            }
+        }
+    } else {
+        result
+    }
+}
+
+// FIXME: this is gonna need to be removed ...
+/// Normalizes the parameter environment, reporting errors if they occur.
+pub fn normalize_param_env_or_error<'a,'tcx>(unnormalized_env: ty::ParameterEnvironment<'a,'tcx>,
+                                             cause: ObligationCause<'tcx>)
+                                             -> ty::ParameterEnvironment<'a,'tcx>
+{
+    // I'm not wild about reporting errors here; I'd prefer to
+    // have the errors get reported at a defined place (e.g.,
+    // during typeck). Instead I have all parameter
+    // environments, in effect, going through this function
+    // and hence potentially reporting errors. This ensurse of
+    // course that we never forget to normalize (the
+    // alternative seemed like it would involve a lot of
+    // manual invocations of this fn -- and then we'd have to
+    // deal with the errors at each of those sites).
+    //
+    // In any case, in practice, typeck constructs all the
+    // parameter environments once for every fn as it goes,
+    // and errors will get reported then; so after typeck we
+    // can be sure that no errors should occur.
+
+    let tcx = unnormalized_env.tcx;
+    let span = cause.span;
+    let body_id = cause.body_id;
+
+    debug!("normalize_param_env_or_error(unnormalized_env={:?})",
+           unnormalized_env);
+
+    let predicates: Vec<_> =
+        util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.clone())
+        .filter(|p| !p.is_global()) // (*)
+        .collect();
+
+    // (*) Any predicate like `i32: Trait<u32>` or whatever doesn't
+    // need to be in the *environment* to be proven, so screen those
+    // out. This is important for the soundness of inter-fn
+    // caching. Note though that we should probably check that these
+    // predicates hold at the point where the environment is
+    // constructed, but I am not currently doing so out of laziness.
+    // -nmatsakis
+
+    debug!("normalize_param_env_or_error: elaborated-predicates={:?}",
+           predicates);
+
+    let elaborated_env = unnormalized_env.with_caller_bounds(predicates);
+
+    let infcx = infer::new_infer_ctxt(tcx,
+                                      &tcx.tables,
+                                      Some(elaborated_env),
+                                      ProjectionMode::AnyFinal);
+    let predicates = match fully_normalize(&infcx,
+                                           cause,
+                                           &infcx.parameter_environment.caller_bounds) {
+        Ok(predicates) => predicates,
+        Err(errors) => {
+            report_fulfillment_errors(&infcx, &errors);
+            return infcx.parameter_environment; // an unnormalized env is better than nothing
+        }
+    };
+
+    debug!("normalize_param_env_or_error: normalized predicates={:?}",
+           predicates);
+
+    let free_regions = FreeRegionMap::new();
+    infcx.resolve_regions_and_report_errors(&free_regions, body_id);
+    let predicates = match infcx.fully_resolve(&predicates) {
+        Ok(predicates) => predicates,
+        Err(fixup_err) => {
+            // If we encounter a fixup error, it means that some type
+            // variable wound up unconstrained. I actually don't know
+            // if this can happen, and I certainly don't expect it to
+            // happen often, but if it did happen it probably
+            // represents a legitimate failure due to some kind of
+            // unconstrained variable, and it seems better not to ICE,
+            // all things considered.
+            let err_msg = fixup_err_to_string(fixup_err);
+            tcx.sess.span_err(span, &err_msg);
+            return infcx.parameter_environment; // an unnormalized env is better than nothing
+        }
+    };
+
+    debug!("normalize_param_env_or_error: resolved predicates={:?}",
+           predicates);
+
+    infcx.parameter_environment.with_caller_bounds(predicates)
+}
+
+pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
+                                  cause: ObligationCause<'tcx>,
+                                  value: &T)
+                                  -> Result<T, Vec<FulfillmentError<'tcx>>>
+    where T : TypeFoldable<'tcx>
+{
+    debug!("fully_normalize(value={:?})", value);
+
+    let mut selcx = &mut SelectionContext::new(infcx);
+    // FIXME (@jroesch) ISSUE 26721
+    // I'm not sure if this is a bug or not, needs further investigation.
+    // It appears that by reusing the fulfillment_cx here we incur more
+    // obligations and later trip an asssertion on regionck.rs line 337.
+    //
+    // The two possibilities I see is:
+    //      - normalization is not actually fully happening and we
+    //        have a bug else where
+    //      - we are adding a duplicate bound into the list causing
+    //        its size to change.
+    //
+    // I think we should probably land this refactor and then come
+    // back to this is a follow-up patch.
+    let mut fulfill_cx = FulfillmentContext::new();
+
+    let Normalized { value: normalized_value, obligations } =
+        project::normalize(selcx, cause, value);
+    debug!("fully_normalize: normalized_value={:?} obligations={:?}",
+           normalized_value,
+           obligations);
+    for obligation in obligations {
+        fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation);
+    }
+
+    debug!("fully_normalize: select_all_or_error start");
+    match fulfill_cx.select_all_or_error(infcx) {
+        Ok(()) => { }
+        Err(e) => {
+            debug!("fully_normalize: error={:?}", e);
+            return Err(e);
+        }
+    }
+    debug!("fully_normalize: select_all_or_error complete");
+    let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value);
+    debug!("fully_normalize: resolved_value={:?}", resolved_value);
+    Ok(resolved_value)
+}
+
+impl<'tcx,O> Obligation<'tcx,O> {
+    pub fn new(cause: ObligationCause<'tcx>,
+               trait_ref: O)
+               -> Obligation<'tcx, O>
+    {
+        Obligation { cause: cause,
+                     recursion_depth: 0,
+                     predicate: trait_ref }
+    }
+
+    fn with_depth(cause: ObligationCause<'tcx>,
+                  recursion_depth: usize,
+                  trait_ref: O)
+                  -> Obligation<'tcx, O>
+    {
+        Obligation { cause: cause,
+                     recursion_depth: recursion_depth,
+                     predicate: trait_ref }
+    }
+
+    pub fn misc(span: Span, body_id: ast::NodeId, trait_ref: O) -> Obligation<'tcx, O> {
+        Obligation::new(ObligationCause::misc(span, body_id), trait_ref)
+    }
+
+    pub fn with<P>(&self, value: P) -> Obligation<'tcx,P> {
+        Obligation { cause: self.cause.clone(),
+                     recursion_depth: self.recursion_depth,
+                     predicate: value }
+    }
+}
+
+impl<'tcx> ObligationCause<'tcx> {
+    pub fn new(span: Span,
+               body_id: ast::NodeId,
+               code: ObligationCauseCode<'tcx>)
+               -> ObligationCause<'tcx> {
+        ObligationCause { span: span, body_id: body_id, code: code }
+    }
+
+    pub fn misc(span: Span, body_id: ast::NodeId) -> ObligationCause<'tcx> {
+        ObligationCause { span: span, body_id: body_id, code: MiscObligation }
+    }
+
+    pub fn dummy() -> ObligationCause<'tcx> {
+        ObligationCause { span: DUMMY_SP, body_id: 0, code: MiscObligation }
+    }
+}
+
+impl<'tcx, N> Vtable<'tcx, N> {
+    pub fn nested_obligations(self) -> Vec<N> {
+        match self {
+            VtableImpl(i) => i.nested,
+            VtableParam(n) => n,
+            VtableBuiltin(i) => i.nested,
+            VtableDefaultImpl(d) => d.nested,
+            VtableClosure(c) => c.nested,
+            VtableObject(_) | VtableFnPointer(..) => vec![]
+        }
+    }
+
+    pub fn map<M, F>(self, f: F) -> Vtable<'tcx, M> where F: FnMut(N) -> M {
+        match self {
+            VtableImpl(i) => VtableImpl(VtableImplData {
+                impl_def_id: i.impl_def_id,
+                substs: i.substs,
+                nested: i.nested.into_iter().map(f).collect()
+            }),
+            VtableParam(n) => VtableParam(n.into_iter().map(f).collect()),
+            VtableBuiltin(i) => VtableBuiltin(VtableBuiltinData {
+                nested: i.nested.into_iter().map(f).collect()
+            }),
+            VtableObject(o) => VtableObject(o),
+            VtableDefaultImpl(d) => VtableDefaultImpl(VtableDefaultImplData {
+                trait_def_id: d.trait_def_id,
+                nested: d.nested.into_iter().map(f).collect()
+            }),
+            VtableFnPointer(f) => VtableFnPointer(f),
+            VtableClosure(c) => VtableClosure(VtableClosureData {
+                closure_def_id: c.closure_def_id,
+                substs: c.substs,
+                nested: c.nested.into_iter().map(f).collect(),
+            })
+        }
+    }
+}
+
+impl<'tcx> FulfillmentError<'tcx> {
+    fn new(obligation: PredicateObligation<'tcx>,
+           code: FulfillmentErrorCode<'tcx>)
+           -> FulfillmentError<'tcx>
+    {
+        FulfillmentError { obligation: obligation, code: code }
+    }
+}
+
+impl<'tcx> TraitObligation<'tcx> {
+    fn self_ty(&self) -> ty::Binder<Ty<'tcx>> {
+        ty::Binder(self.predicate.skip_binder().self_ty())
+    }
+}
diff --git a/src/librustc/traits/object_safety.rs b/src/librustc/traits/object_safety.rs
new file mode 100644 (file)
index 0000000..9af2cfb
--- /dev/null
@@ -0,0 +1,379 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! "Object safety" refers to the ability for a trait to be converted
+//! to an object. In general, traits may only be converted to an
+//! object if all of their methods meet certain criteria. In particular,
+//! they must:
+//!
+//!   - have a suitable receiver from which we can extract a vtable;
+//!   - not reference the erased type `Self` except for in this receiver;
+//!   - not have generic type parameters
+
+use super::supertraits;
+use super::elaborate_predicates;
+
+use hir::def_id::DefId;
+use ty::subst::{self, SelfSpace, TypeSpace};
+use traits;
+use ty::{self, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable};
+use std::rc::Rc;
+use syntax::ast;
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub enum ObjectSafetyViolation<'tcx> {
+    /// Self : Sized declared on the trait
+    SizedSelf,
+
+    /// Supertrait reference references `Self` an in illegal location
+    /// (e.g. `trait Foo : Bar<Self>`)
+    SupertraitSelf,
+
+    /// Method has something illegal
+    Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
+}
+
+/// Reasons a method might not be object-safe.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum MethodViolationCode {
+    /// e.g., `fn foo()`
+    StaticMethod,
+
+    /// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
+    ReferencesSelf,
+
+    /// e.g., `fn foo<A>()`
+    Generic,
+}
+
+pub fn is_object_safe<'tcx>(tcx: &TyCtxt<'tcx>,
+                            trait_def_id: DefId)
+                            -> bool
+{
+    // Because we query yes/no results frequently, we keep a cache:
+    let def = tcx.lookup_trait_def(trait_def_id);
+
+    let result = def.object_safety().unwrap_or_else(|| {
+        let result = object_safety_violations(tcx, trait_def_id).is_empty();
+
+        // Record just a yes/no result in the cache; this is what is
+        // queried most frequently. Note that this may overwrite a
+        // previous result, but always with the same thing.
+        def.set_object_safety(result);
+
+        result
+    });
+
+    debug!("is_object_safe({:?}) = {}", trait_def_id, result);
+
+    result
+}
+
+/// Returns the object safety violations that affect
+/// astconv - currently, Self in supertraits. This is needed
+/// because `object_safety_violations` can't be used during
+/// type collection.
+pub fn astconv_object_safety_violations<'tcx>(tcx: &TyCtxt<'tcx>,
+                                              trait_def_id: DefId)
+                                              -> Vec<ObjectSafetyViolation<'tcx>>
+{
+    let mut violations = vec![];
+
+    if supertraits_reference_self(tcx, trait_def_id) {
+        violations.push(ObjectSafetyViolation::SupertraitSelf);
+    }
+
+    debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
+           trait_def_id,
+           violations);
+
+    violations
+}
+
+pub fn object_safety_violations<'tcx>(tcx: &TyCtxt<'tcx>,
+                                      trait_def_id: DefId)
+                                      -> Vec<ObjectSafetyViolation<'tcx>>
+{
+    traits::supertrait_def_ids(tcx, trait_def_id)
+        .flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
+        .collect()
+}
+
+fn object_safety_violations_for_trait<'tcx>(tcx: &TyCtxt<'tcx>,
+                                            trait_def_id: DefId)
+                                            -> Vec<ObjectSafetyViolation<'tcx>>
+{
+    // Check methods for violations.
+    let mut violations: Vec<_> =
+        tcx.trait_items(trait_def_id).iter()
+        .filter_map(|item| {
+            match *item {
+                ty::MethodTraitItem(ref m) => {
+                    object_safety_violation_for_method(tcx, trait_def_id, &m)
+                        .map(|code| ObjectSafetyViolation::Method(m.clone(), code))
+                }
+                _ => None,
+            }
+        })
+        .collect();
+
+    // Check the trait itself.
+    if trait_has_sized_self(tcx, trait_def_id) {
+        violations.push(ObjectSafetyViolation::SizedSelf);
+    }
+    if supertraits_reference_self(tcx, trait_def_id) {
+        violations.push(ObjectSafetyViolation::SupertraitSelf);
+    }
+
+    debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
+           trait_def_id,
+           violations);
+
+    violations
+}
+
+pub fn supertraits_reference_self<'tcx>(tcx: &TyCtxt<'tcx>,
+                                        trait_def_id: DefId)
+                                        -> bool
+{
+    let trait_def = tcx.lookup_trait_def(trait_def_id);
+    let trait_ref = trait_def.trait_ref.clone();
+    let trait_ref = trait_ref.to_poly_trait_ref();
+    let predicates = tcx.lookup_super_predicates(trait_def_id);
+    predicates
+        .predicates
+        .into_iter()
+        .map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
+        .any(|predicate| {
+            match predicate {
+                ty::Predicate::Trait(ref data) => {
+                    // In the case of a trait predicate, we can skip the "self" type.
+                    data.0.trait_ref.substs.types.get_slice(TypeSpace)
+                                                 .iter()
+                                                 .cloned()
+                                                 .any(|t| t.has_self_ty())
+                }
+                ty::Predicate::Projection(..) |
+                ty::Predicate::WellFormed(..) |
+                ty::Predicate::ObjectSafe(..) |
+                ty::Predicate::TypeOutlives(..) |
+                ty::Predicate::RegionOutlives(..) |
+                ty::Predicate::Equate(..) => {
+                    false
+                }
+            }
+        })
+}
+
+fn trait_has_sized_self<'tcx>(tcx: &TyCtxt<'tcx>,
+                              trait_def_id: DefId)
+                              -> bool
+{
+    let trait_def = tcx.lookup_trait_def(trait_def_id);
+    let trait_predicates = tcx.lookup_predicates(trait_def_id);
+    generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
+}
+
+fn generics_require_sized_self<'tcx>(tcx: &TyCtxt<'tcx>,
+                                     generics: &ty::Generics<'tcx>,
+                                     predicates: &ty::GenericPredicates<'tcx>)
+                                     -> bool
+{
+    let sized_def_id = match tcx.lang_items.sized_trait() {
+        Some(def_id) => def_id,
+        None => { return false; /* No Sized trait, can't require it! */ }
+    };
+
+    // Search for a predicate like `Self : Sized` amongst the trait bounds.
+    let free_substs = tcx.construct_free_substs(generics,
+                                                tcx.region_maps.node_extent(ast::DUMMY_NODE_ID));
+    let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
+    elaborate_predicates(tcx, predicates)
+        .any(|predicate| {
+            match predicate {
+                ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
+                    trait_pred.0.self_ty().is_self()
+                }
+                ty::Predicate::Projection(..) |
+                ty::Predicate::Trait(..) |
+                ty::Predicate::Equate(..) |
+                ty::Predicate::RegionOutlives(..) |
+                ty::Predicate::WellFormed(..) |
+                ty::Predicate::ObjectSafe(..) |
+                ty::Predicate::TypeOutlives(..) => {
+                    false
+                }
+            }
+        })
+}
+
+/// Returns `Some(_)` if this method makes the containing trait not object safe.
+fn object_safety_violation_for_method<'tcx>(tcx: &TyCtxt<'tcx>,
+                                            trait_def_id: DefId,
+                                            method: &ty::Method<'tcx>)
+                                            -> Option<MethodViolationCode>
+{
+    // Any method that has a `Self : Sized` requisite is otherwise
+    // exempt from the regulations.
+    if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
+        return None;
+    }
+
+    virtual_call_violation_for_method(tcx, trait_def_id, method)
+}
+
+/// We say a method is *vtable safe* if it can be invoked on a trait
+/// object.  Note that object-safe traits can have some
+/// non-vtable-safe methods, so long as they require `Self:Sized` or
+/// otherwise ensure that they cannot be used when `Self=Trait`.
+pub fn is_vtable_safe_method<'tcx>(tcx: &TyCtxt<'tcx>,
+                                   trait_def_id: DefId,
+                                   method: &ty::Method<'tcx>)
+                                   -> bool
+{
+    virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
+}
+
+/// Returns `Some(_)` if this method cannot be called on a trait
+/// object; this does not necessarily imply that the enclosing trait
+/// is not object safe, because the method might have a where clause
+/// `Self:Sized`.
+fn virtual_call_violation_for_method<'tcx>(tcx: &TyCtxt<'tcx>,
+                                           trait_def_id: DefId,
+                                           method: &ty::Method<'tcx>)
+                                           -> Option<MethodViolationCode>
+{
+    // The method's first parameter must be something that derefs (or
+    // autorefs) to `&self`. For now, we only accept `self`, `&self`
+    // and `Box<Self>`.
+    match method.explicit_self {
+        ty::ExplicitSelfCategory::Static => {
+            return Some(MethodViolationCode::StaticMethod);
+        }
+
+        ty::ExplicitSelfCategory::ByValue |
+        ty::ExplicitSelfCategory::ByReference(..) |
+        ty::ExplicitSelfCategory::ByBox => {
+        }
+    }
+
+    // The `Self` type is erased, so it should not appear in list of
+    // arguments or return type apart from the receiver.
+    let ref sig = method.fty.sig;
+    for &input_ty in &sig.0.inputs[1..] {
+        if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
+            return Some(MethodViolationCode::ReferencesSelf);
+        }
+    }
+    if let ty::FnConverging(result_type) = sig.0.output {
+        if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
+            return Some(MethodViolationCode::ReferencesSelf);
+        }
+    }
+
+    // We can't monomorphize things like `fn foo<A>(...)`.
+    if !method.generics.types.is_empty_in(subst::FnSpace) {
+        return Some(MethodViolationCode::Generic);
+    }
+
+    None
+}
+
+fn contains_illegal_self_type_reference<'tcx>(tcx: &TyCtxt<'tcx>,
+                                              trait_def_id: DefId,
+                                              ty: Ty<'tcx>)
+                                              -> bool
+{
+    // This is somewhat subtle. In general, we want to forbid
+    // references to `Self` in the argument and return types,
+    // since the value of `Self` is erased. However, there is one
+    // exception: it is ok to reference `Self` in order to access
+    // an associated type of the current trait, since we retain
+    // the value of those associated types in the object type
+    // itself.
+    //
+    // ```rust
+    // trait SuperTrait {
+    //     type X;
+    // }
+    //
+    // trait Trait : SuperTrait {
+    //     type Y;
+    //     fn foo(&self, x: Self) // bad
+    //     fn foo(&self) -> Self // bad
+    //     fn foo(&self) -> Option<Self> // bad
+    //     fn foo(&self) -> Self::Y // OK, desugars to next example
+    //     fn foo(&self) -> <Self as Trait>::Y // OK
+    //     fn foo(&self) -> Self::X // OK, desugars to next example
+    //     fn foo(&self) -> <Self as SuperTrait>::X // OK
+    // }
+    // ```
+    //
+    // However, it is not as simple as allowing `Self` in a projected
+    // type, because there are illegal ways to use `Self` as well:
+    //
+    // ```rust
+    // trait Trait : SuperTrait {
+    //     ...
+    //     fn foo(&self) -> <Self as SomeOtherTrait>::X;
+    // }
+    // ```
+    //
+    // Here we will not have the type of `X` recorded in the
+    // object type, and we cannot resolve `Self as SomeOtherTrait`
+    // without knowing what `Self` is.
+
+    let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
+    let mut error = false;
+    ty.maybe_walk(|ty| {
+        match ty.sty {
+            ty::TyParam(ref param_ty) => {
+                if param_ty.space == SelfSpace {
+                    error = true;
+                }
+
+                false // no contained types to walk
+            }
+
+            ty::TyProjection(ref data) => {
+                // This is a projected type `<Foo as SomeTrait>::X`.
+
+                // Compute supertraits of current trait lazily.
+                if supertraits.is_none() {
+                    let trait_def = tcx.lookup_trait_def(trait_def_id);
+                    let trait_ref = ty::Binder(trait_def.trait_ref.clone());
+                    supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
+                }
+
+                // Determine whether the trait reference `Foo as
+                // SomeTrait` is in fact a supertrait of the
+                // current trait. In that case, this type is
+                // legal, because the type `X` will be specified
+                // in the object type.  Note that we can just use
+                // direct equality here because all of these types
+                // are part of the formal parameter listing, and
+                // hence there should be no inference variables.
+                let projection_trait_ref = ty::Binder(data.trait_ref.clone());
+                let is_supertrait_of_current_trait =
+                    supertraits.as_ref().unwrap().contains(&projection_trait_ref);
+
+                if is_supertrait_of_current_trait {
+                    false // do not walk contained types, do not report error, do collect $200
+                } else {
+                    true // DO walk contained types, POSSIBLY reporting an error
+                }
+            }
+
+            _ => true, // walk contained types, if any
+        }
+    });
+
+    error
+}
diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs
new file mode 100644 (file)
index 0000000..a0d6f5f
--- /dev/null
@@ -0,0 +1,1178 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Code for projecting associated types out of trait references.
+
+use super::elaborate_predicates;
+use super::report_overflow_error;
+use super::specialization_graph;
+use super::translate_substs;
+use super::Obligation;
+use super::ObligationCause;
+use super::PredicateObligation;
+use super::SelectionContext;
+use super::SelectionError;
+use super::VtableClosureData;
+use super::VtableImplData;
+use super::util;
+
+use hir::def_id::DefId;
+use infer::{self, InferOk, TypeOrigin};
+use ty::subst::Subst;
+use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt};
+use ty::fold::{TypeFoldable, TypeFolder};
+use syntax::parse::token;
+use syntax::ast;
+use util::common::FN_OUTPUT_NAME;
+
+use std::rc::Rc;
+
+/// Depending on the stage of compilation, we want projection to be
+/// more or less conservative.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum ProjectionMode {
+    /// FIXME (#32205)
+    /// At coherence-checking time, we're still constructing the
+    /// specialization graph, and thus we only project project
+    /// non-`default` associated types that are defined directly in
+    /// the applicable impl. (This behavior should be improved over
+    /// time, to allow for successful projections modulo cycles
+    /// between different impls).
+    ///
+    /// Here's an example that will fail due to the restriction:
+    ///
+    /// ```
+    /// trait Assoc {
+    ///     type Output;
+    /// }
+    ///
+    /// impl<T> Assoc for T {
+    ///     type Output = bool;
+    /// }
+    ///
+    /// impl Assoc for u8 {} // <- inherits the non-default type from above
+    ///
+    /// trait Foo {}
+    /// impl Foo for u32 {}
+    /// impl Foo for <u8 as Assoc>::Output {}  // <- this projection will fail
+    /// ```
+    ///
+    /// The projection would succeed if `Output` had been defined
+    /// directly in the impl for `u8`.
+    Topmost,
+
+    /// At type-checking time, we refuse to project any associated
+    /// type that is marked `default`. Non-`default` ("final") types
+    /// are always projected. This is necessary in general for
+    /// soundness of specialization. However, we *could* allow
+    /// projections in fully-monomorphic cases. We choose not to,
+    /// because we prefer for `default type` to force the type
+    /// definition to be treated abstractly by any consumers of the
+    /// impl. Concretely, that means that the following example will
+    /// fail to compile:
+    ///
+    /// ```
+    /// trait Assoc {
+    ///     type Output;
+    /// }
+    ///
+    /// impl<T> Assoc for T {
+    ///     default type Output = bool;
+    /// }
+    ///
+    /// fn main() {
+    ///     let <() as Assoc>::Output = true;
+    /// }
+    AnyFinal,
+
+    /// At trans time, all projections will succeed.
+    Any,
+}
+
+impl ProjectionMode {
+    pub fn is_topmost(&self) -> bool {
+        match *self {
+            ProjectionMode::Topmost => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_any_final(&self) -> bool {
+        match *self {
+            ProjectionMode::AnyFinal => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_any(&self) -> bool {
+        match *self {
+            ProjectionMode::Any => true,
+            _ => false,
+        }
+    }
+}
+
+
+pub type PolyProjectionObligation<'tcx> =
+    Obligation<'tcx, ty::PolyProjectionPredicate<'tcx>>;
+
+pub type ProjectionObligation<'tcx> =
+    Obligation<'tcx, ty::ProjectionPredicate<'tcx>>;
+
+pub type ProjectionTyObligation<'tcx> =
+    Obligation<'tcx, ty::ProjectionTy<'tcx>>;
+
+/// When attempting to resolve `<T as TraitRef>::Name` ...
+#[derive(Debug)]
+pub enum ProjectionTyError<'tcx> {
+    /// ...we found multiple sources of information and couldn't resolve the ambiguity.
+    TooManyCandidates,
+
+    /// ...an error occurred matching `T : TraitRef`
+    TraitSelectionError(SelectionError<'tcx>),
+}
+
+#[derive(Clone)]
+pub struct MismatchedProjectionTypes<'tcx> {
+    pub err: ty::error::TypeError<'tcx>
+}
+
+#[derive(PartialEq, Eq, Debug)]
+enum ProjectionTyCandidate<'tcx> {
+    // from a where-clause in the env or object type
+    ParamEnv(ty::PolyProjectionPredicate<'tcx>),
+
+    // from the definition of `Trait` when you have something like <<A as Trait>::B as Trait2>::C
+    TraitDef(ty::PolyProjectionPredicate<'tcx>),
+
+    // defined in an impl
+    Impl(VtableImplData<'tcx, PredicateObligation<'tcx>>),
+
+    // closure return type
+    Closure(VtableClosureData<'tcx, PredicateObligation<'tcx>>),
+
+    // fn pointer return type
+    FnPointer(Ty<'tcx>),
+}
+
+struct ProjectionTyCandidateSet<'tcx> {
+    vec: Vec<ProjectionTyCandidate<'tcx>>,
+    ambiguous: bool
+}
+
+/// Evaluates constraints of the form:
+///
+///     for<...> <T as Trait>::U == V
+///
+/// If successful, this may result in additional obligations.
+pub fn poly_project_and_unify_type<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &PolyProjectionObligation<'tcx>)
+    -> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>>
+{
+    debug!("poly_project_and_unify_type(obligation={:?})",
+           obligation);
+
+    let infcx = selcx.infcx();
+    infcx.commit_if_ok(|snapshot| {
+        let (skol_predicate, skol_map) =
+            infcx.skolemize_late_bound_regions(&obligation.predicate, snapshot);
+
+        let skol_obligation = obligation.with(skol_predicate);
+        match project_and_unify_type(selcx, &skol_obligation) {
+            Ok(result) => {
+                match infcx.leak_check(&skol_map, snapshot) {
+                    Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, &result)),
+                    Err(e) => Err(MismatchedProjectionTypes { err: e }),
+                }
+            }
+            Err(e) => {
+                Err(e)
+            }
+        }
+    })
+}
+
+/// Evaluates constraints of the form:
+///
+///     <T as Trait>::U == V
+///
+/// If successful, this may result in additional obligations.
+fn project_and_unify_type<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionObligation<'tcx>)
+    -> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>>
+{
+    debug!("project_and_unify_type(obligation={:?})",
+           obligation);
+
+    let Normalized { value: normalized_ty, obligations } =
+        match opt_normalize_projection_type(selcx,
+                                            obligation.predicate.projection_ty.clone(),
+                                            obligation.cause.clone(),
+                                            obligation.recursion_depth) {
+            Some(n) => n,
+            None => {
+                consider_unification_despite_ambiguity(selcx, obligation);
+                return Ok(None);
+            }
+        };
+
+    debug!("project_and_unify_type: normalized_ty={:?} obligations={:?}",
+           normalized_ty,
+           obligations);
+
+    let infcx = selcx.infcx();
+    let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
+    match infer::mk_eqty(infcx, true, origin, normalized_ty, obligation.predicate.ty) {
+        Ok(InferOk { obligations: inferred_obligations, .. }) => {
+            // FIXME(#32730) propagate obligations
+            assert!(inferred_obligations.is_empty());
+            Ok(Some(obligations))
+        },
+        Err(err) => Err(MismatchedProjectionTypes { err: err }),
+    }
+}
+
+fn consider_unification_despite_ambiguity<'cx,'tcx>(selcx: &mut SelectionContext<'cx,'tcx>,
+                                                    obligation: &ProjectionObligation<'tcx>) {
+    debug!("consider_unification_despite_ambiguity(obligation={:?})",
+           obligation);
+
+    let def_id = obligation.predicate.projection_ty.trait_ref.def_id;
+    match selcx.tcx().lang_items.fn_trait_kind(def_id) {
+        Some(_) => { }
+        None => { return; }
+    }
+
+    let infcx = selcx.infcx();
+    let self_ty = obligation.predicate.projection_ty.trait_ref.self_ty();
+    let self_ty = infcx.shallow_resolve(self_ty);
+    debug!("consider_unification_despite_ambiguity: self_ty.sty={:?}",
+           self_ty.sty);
+    match self_ty.sty {
+        ty::TyClosure(closure_def_id, ref substs) => {
+            let closure_typer = selcx.closure_typer();
+            let closure_type = closure_typer.closure_type(closure_def_id, substs);
+            let ty::Binder((_, ret_type)) =
+                util::closure_trait_ref_and_return_type(infcx.tcx,
+                                                        def_id,
+                                                        self_ty,
+                                                        &closure_type.sig,
+                                                        util::TupleArgumentsFlag::No);
+            // We don't have to normalize the return type here - this is only
+            // reached for TyClosure: Fn inputs where the closure kind is
+            // still unknown, which should only occur in typeck where the
+            // closure type is already normalized.
+            let (ret_type, _) =
+                infcx.replace_late_bound_regions_with_fresh_var(
+                    obligation.cause.span,
+                    infer::AssocTypeProjection(obligation.predicate.projection_ty.item_name),
+                    &ty::Binder(ret_type));
+
+            debug!("consider_unification_despite_ambiguity: ret_type={:?}",
+                   ret_type);
+            let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
+            let obligation_ty = obligation.predicate.ty;
+            match infer::mk_eqty(infcx, true, origin, obligation_ty, ret_type) {
+                Ok(InferOk { obligations, .. }) => {
+                    // FIXME(#32730) propagate obligations
+                    assert!(obligations.is_empty());
+                }
+                Err(_) => { /* ignore errors */ }
+            }
+        }
+        _ => { }
+    }
+}
+
+/// Normalizes any associated type projections in `value`, replacing
+/// them with a fully resolved type where possible. The return value
+/// combines the normalized result and any additional obligations that
+/// were incurred as result.
+pub fn normalize<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>,
+                               cause: ObligationCause<'tcx>,
+                               value: &T)
+                               -> Normalized<'tcx, T>
+    where T : TypeFoldable<'tcx>
+{
+    normalize_with_depth(selcx, cause, 0, value)
+}
+
+/// As `normalize`, but with a custom depth.
+pub fn normalize_with_depth<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>,
+                                          cause: ObligationCause<'tcx>,
+                                          depth: usize,
+                                          value: &T)
+                                          -> Normalized<'tcx, T>
+    where T : TypeFoldable<'tcx>
+{
+    let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth);
+    let result = normalizer.fold(value);
+
+    Normalized {
+        value: result,
+        obligations: normalizer.obligations,
+    }
+}
+
+struct AssociatedTypeNormalizer<'a,'b:'a,'tcx:'b> {
+    selcx: &'a mut SelectionContext<'b,'tcx>,
+    cause: ObligationCause<'tcx>,
+    obligations: Vec<PredicateObligation<'tcx>>,
+    depth: usize,
+}
+
+impl<'a,'b,'tcx> AssociatedTypeNormalizer<'a,'b,'tcx> {
+    fn new(selcx: &'a mut SelectionContext<'b,'tcx>,
+           cause: ObligationCause<'tcx>,
+           depth: usize)
+           -> AssociatedTypeNormalizer<'a,'b,'tcx>
+    {
+        AssociatedTypeNormalizer {
+            selcx: selcx,
+            cause: cause,
+            obligations: vec!(),
+            depth: depth,
+        }
+    }
+
+    fn fold<T:TypeFoldable<'tcx>>(&mut self, value: &T) -> T {
+        let value = self.selcx.infcx().resolve_type_vars_if_possible(value);
+
+        if !value.has_projection_types() {
+            value.clone()
+        } else {
+            value.fold_with(self)
+        }
+    }
+}
+
+impl<'a,'b,'tcx> TypeFolder<'tcx> for AssociatedTypeNormalizer<'a,'b,'tcx> {
+    fn tcx(&self) -> &TyCtxt<'tcx> {
+        self.selcx.tcx()
+    }
+
+    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        // We don't want to normalize associated types that occur inside of region
+        // binders, because they may contain bound regions, and we can't cope with that.
+        //
+        // Example:
+        //
+        //     for<'a> fn(<T as Foo<&'a>>::A)
+        //
+        // Instead of normalizing `<T as Foo<&'a>>::A` here, we'll
+        // normalize it when we instantiate those bound regions (which
+        // should occur eventually).
+
+        let ty = ty.super_fold_with(self);
+        match ty.sty {
+            ty::TyProjection(ref data) if !data.has_escaping_regions() => { // (*)
+
+                // (*) This is kind of hacky -- we need to be able to
+                // handle normalization within binders because
+                // otherwise we wind up a need to normalize when doing
+                // trait matching (since you can have a trait
+                // obligation like `for<'a> T::B : Fn(&'a int)`), but
+                // we can't normalize with bound regions in scope. So
+                // far now we just ignore binders but only normalize
+                // if all bound regions are gone (and then we still
+                // have to renormalize whenever we instantiate a
+                // binder). It would be better to normalize in a
+                // binding-aware fashion.
+
+                let Normalized { value: ty, obligations } =
+                    normalize_projection_type(self.selcx,
+                                              data.clone(),
+                                              self.cause.clone(),
+                                              self.depth);
+                self.obligations.extend(obligations);
+                ty
+            }
+
+            _ => {
+                ty
+            }
+        }
+    }
+}
+
+#[derive(Clone)]
+pub struct Normalized<'tcx,T> {
+    pub value: T,
+    pub obligations: Vec<PredicateObligation<'tcx>>,
+}
+
+pub type NormalizedTy<'tcx> = Normalized<'tcx, Ty<'tcx>>;
+
+impl<'tcx,T> Normalized<'tcx,T> {
+    pub fn with<U>(self, value: U) -> Normalized<'tcx,U> {
+        Normalized { value: value, obligations: self.obligations }
+    }
+}
+
+/// The guts of `normalize`: normalize a specific projection like `<T
+/// as Trait>::Item`. The result is always a type (and possibly
+/// additional obligations). If ambiguity arises, which implies that
+/// there are unresolved type variables in the projection, we will
+/// substitute a fresh type variable `$X` and generate a new
+/// obligation `<T as Trait>::Item == $X` for later.
+pub fn normalize_projection_type<'a,'b,'tcx>(
+    selcx: &'a mut SelectionContext<'b,'tcx>,
+    projection_ty: ty::ProjectionTy<'tcx>,
+    cause: ObligationCause<'tcx>,
+    depth: usize)
+    -> NormalizedTy<'tcx>
+{
+    opt_normalize_projection_type(selcx, projection_ty.clone(), cause.clone(), depth)
+        .unwrap_or_else(move || {
+            // if we bottom out in ambiguity, create a type variable
+            // and a deferred predicate to resolve this when more type
+            // information is available.
+
+            let ty_var = selcx.infcx().next_ty_var();
+            let projection = ty::Binder(ty::ProjectionPredicate {
+                projection_ty: projection_ty,
+                ty: ty_var
+            });
+            let obligation = Obligation::with_depth(
+                cause, depth + 1, projection.to_predicate());
+            Normalized {
+                value: ty_var,
+                obligations: vec!(obligation)
+            }
+        })
+}
+
+/// The guts of `normalize`: normalize a specific projection like `<T
+/// as Trait>::Item`. The result is always a type (and possibly
+/// additional obligations). Returns `None` in the case of ambiguity,
+/// which indicates that there are unbound type variables.
+fn opt_normalize_projection_type<'a,'b,'tcx>(
+    selcx: &'a mut SelectionContext<'b,'tcx>,
+    projection_ty: ty::ProjectionTy<'tcx>,
+    cause: ObligationCause<'tcx>,
+    depth: usize)
+    -> Option<NormalizedTy<'tcx>>
+{
+    debug!("normalize_projection_type(\
+           projection_ty={:?}, \
+           depth={})",
+           projection_ty,
+           depth);
+
+    let obligation = Obligation::with_depth(cause.clone(), depth, projection_ty.clone());
+    match project_type(selcx, &obligation) {
+        Ok(ProjectedTy::Progress(projected_ty, mut obligations)) => {
+            // if projection succeeded, then what we get out of this
+            // is also non-normalized (consider: it was derived from
+            // an impl, where-clause etc) and hence we must
+            // re-normalize it
+
+            debug!("normalize_projection_type: projected_ty={:?} depth={} obligations={:?}",
+                   projected_ty,
+                   depth,
+                   obligations);
+
+            if projected_ty.has_projection_types() {
+                let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth+1);
+                let normalized_ty = normalizer.fold(&projected_ty);
+
+                debug!("normalize_projection_type: normalized_ty={:?} depth={}",
+                       normalized_ty,
+                       depth);
+
+                obligations.extend(normalizer.obligations);
+                Some(Normalized {
+                    value: normalized_ty,
+                    obligations: obligations,
+                })
+            } else {
+                Some(Normalized {
+                    value: projected_ty,
+                    obligations: obligations,
+                })
+            }
+        }
+        Ok(ProjectedTy::NoProgress(projected_ty)) => {
+            debug!("normalize_projection_type: projected_ty={:?} no progress",
+                   projected_ty);
+            Some(Normalized {
+                value: projected_ty,
+                obligations: vec!()
+            })
+        }
+        Err(ProjectionTyError::TooManyCandidates) => {
+            debug!("normalize_projection_type: too many candidates");
+            None
+        }
+        Err(ProjectionTyError::TraitSelectionError(_)) => {
+            debug!("normalize_projection_type: ERROR");
+            // if we got an error processing the `T as Trait` part,
+            // just return `ty::err` but add the obligation `T :
+            // Trait`, which when processed will cause the error to be
+            // reported later
+
+            Some(normalize_to_error(selcx, projection_ty, cause, depth))
+        }
+    }
+}
+
+/// If we are projecting `<T as Trait>::Item`, but `T: Trait` does not
+/// hold. In various error cases, we cannot generate a valid
+/// normalized projection. Therefore, we create an inference variable
+/// return an associated obligation that, when fulfilled, will lead to
+/// an error.
+///
+/// Note that we used to return `TyError` here, but that was quite
+/// dubious -- the premise was that an error would *eventually* be
+/// reported, when the obligation was processed. But in general once
+/// you see a `TyError` you are supposed to be able to assume that an
+/// error *has been* reported, so that you can take whatever heuristic
+/// paths you want to take. To make things worse, it was possible for
+/// cycles to arise, where you basically had a setup like `<MyType<$0>
+/// as Trait>::Foo == $0`. Here, normalizing `<MyType<$0> as
+/// Trait>::Foo> to `[type error]` would lead to an obligation of
+/// `<MyType<[type error]> as Trait>::Foo`.  We are supposed to report
+/// an error for this obligation, but we legitimately should not,
+/// because it contains `[type error]`. Yuck! (See issue #29857 for
+/// one case where this arose.)
+fn normalize_to_error<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
+                               projection_ty: ty::ProjectionTy<'tcx>,
+                               cause: ObligationCause<'tcx>,
+                               depth: usize)
+                               -> NormalizedTy<'tcx>
+{
+    let trait_ref = projection_ty.trait_ref.to_poly_trait_ref();
+    let trait_obligation = Obligation { cause: cause,
+                                        recursion_depth: depth,
+                                        predicate: trait_ref.to_predicate() };
+    let new_value = selcx.infcx().next_ty_var();
+    Normalized {
+        value: new_value,
+        obligations: vec!(trait_obligation)
+    }
+}
+
+enum ProjectedTy<'tcx> {
+    Progress(Ty<'tcx>, Vec<PredicateObligation<'tcx>>),
+    NoProgress(Ty<'tcx>),
+}
+
+/// Compute the result of a projection type (if we can).
+fn project_type<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>)
+    -> Result<ProjectedTy<'tcx>, ProjectionTyError<'tcx>>
+{
+    debug!("project(obligation={:?})",
+           obligation);
+
+    let recursion_limit = selcx.tcx().sess.recursion_limit.get();
+    if obligation.recursion_depth >= recursion_limit {
+        debug!("project: overflow!");
+        report_overflow_error(selcx.infcx(), &obligation, true);
+    }
+
+    let obligation_trait_ref =
+        selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate.trait_ref);
+
+    debug!("project: obligation_trait_ref={:?}", obligation_trait_ref);
+
+    if obligation_trait_ref.references_error() {
+        return Ok(ProjectedTy::Progress(selcx.tcx().types.err, vec!()));
+    }
+
+    let mut candidates = ProjectionTyCandidateSet {
+        vec: Vec::new(),
+        ambiguous: false,
+    };
+
+    assemble_candidates_from_param_env(selcx,
+                                       obligation,
+                                       &obligation_trait_ref,
+                                       &mut candidates);
+
+    assemble_candidates_from_trait_def(selcx,
+                                       obligation,
+                                       &obligation_trait_ref,
+                                       &mut candidates);
+
+    if let Err(e) = assemble_candidates_from_impls(selcx,
+                                                   obligation,
+                                                   &obligation_trait_ref,
+                                                   &mut candidates) {
+        return Err(ProjectionTyError::TraitSelectionError(e));
+    }
+
+    debug!("{} candidates, ambiguous={}",
+           candidates.vec.len(),
+           candidates.ambiguous);
+
+    // Inherent ambiguity that prevents us from even enumerating the
+    // candidates.
+    if candidates.ambiguous {
+        return Err(ProjectionTyError::TooManyCandidates);
+    }
+
+    // Drop duplicates.
+    //
+    // Note: `candidates.vec` seems to be on the critical path of the
+    // compiler. Replacing it with an hash set was also tried, which would
+    // render the following dedup unnecessary. It led to cleaner code but
+    // prolonged compiling time of `librustc` from 5m30s to 6m in one test, or
+    // ~9% performance lost.
+    if candidates.vec.len() > 1 {
+        let mut i = 0;
+        while i < candidates.vec.len() {
+            let has_dup = (0..i).any(|j| candidates.vec[i] == candidates.vec[j]);
+            if has_dup {
+                candidates.vec.swap_remove(i);
+            } else {
+                i += 1;
+            }
+        }
+    }
+
+    // Prefer where-clauses. As in select, if there are multiple
+    // candidates, we prefer where-clause candidates over impls.  This
+    // may seem a bit surprising, since impls are the source of
+    // "truth" in some sense, but in fact some of the impls that SEEM
+    // applicable are not, because of nested obligations. Where
+    // clauses are the safer choice. See the comment on
+    // `select::SelectionCandidate` and #21974 for more details.
+    if candidates.vec.len() > 1 {
+        debug!("retaining param-env candidates only from {:?}", candidates.vec);
+        candidates.vec.retain(|c| match *c {
+            ProjectionTyCandidate::ParamEnv(..) => true,
+            ProjectionTyCandidate::Impl(..) |
+            ProjectionTyCandidate::Closure(..) |
+            ProjectionTyCandidate::TraitDef(..) |
+            ProjectionTyCandidate::FnPointer(..) => false,
+        });
+        debug!("resulting candidate set: {:?}", candidates.vec);
+        if candidates.vec.len() != 1 {
+            return Err(ProjectionTyError::TooManyCandidates);
+        }
+    }
+
+    assert!(candidates.vec.len() <= 1);
+
+    let possible_candidate = candidates.vec.pop().and_then(|candidate| {
+        // In Any (i.e. trans) mode, all projections succeed;
+        // otherwise, we need to be sensitive to `default` and
+        // specialization.
+        if !selcx.projection_mode().is_any() {
+            if let ProjectionTyCandidate::Impl(ref impl_data) = candidate {
+                if let Some(node_item) = assoc_ty_def(selcx,
+                                                      impl_data.impl_def_id,
+                                                      obligation.predicate.item_name) {
+                    if node_item.node.is_from_trait() {
+                        if node_item.item.ty.is_some() {
+                            // If the associated type has a default from the
+                            // trait, that should be considered `default` and
+                            // hence not projected.
+                            //
+                            // Note, however, that we allow a projection from
+                            // the trait specifically in the case that the trait
+                            // does *not* give a default. This is purely to
+                            // avoid spurious errors: the situation can only
+                            // arise when *no* impl in the specialization chain
+                            // has provided a definition for the type. When we
+                            // confirm the candidate, we'll turn the projection
+                            // into a TyError, since the actual error will be
+                            // reported in `check_impl_items_against_trait`.
+                            return None;
+                        }
+                    } else if node_item.item.defaultness.is_default() {
+                        return None;
+                    }
+                } else {
+                    // Normally this situation could only arise througha
+                    // compiler bug, but at coherence-checking time we only look
+                    // at the topmost impl (we don't even consider the trait
+                    // itself) for the definition -- so we can fail to find a
+                    // definition of the type even if it exists.
+
+                    // For now, we just unconditionally ICE, because otherwise,
+                    // examples like the following will succeed:
+                    //
+                    // ```
+                    // trait Assoc {
+                    //     type Output;
+                    // }
+                    //
+                    // impl<T> Assoc for T {
+                    //     default type Output = bool;
+                    // }
+                    //
+                    // impl Assoc for u8 {}
+                    // impl Assoc for u16 {}
+                    //
+                    // trait Foo {}
+                    // impl Foo for <u8 as Assoc>::Output {}
+                    // impl Foo for <u16 as Assoc>::Output {}
+                    //     return None;
+                    // }
+                    // ```
+                    //
+                    // The essential problem here is that the projection fails,
+                    // leaving two unnormalized types, which appear not to unify
+                    // -- so the overlap check succeeds, when it should fail.
+                    bug!("Tried to project an inherited associated type during \
+                          coherence checking, which is currently not supported.");
+                }
+            }
+        }
+        Some(candidate)
+    });
+
+    match possible_candidate {
+        Some(candidate) => {
+            let (ty, obligations) = confirm_candidate(selcx, obligation, candidate);
+            Ok(ProjectedTy::Progress(ty, obligations))
+        }
+        None => {
+            Ok(ProjectedTy::NoProgress(selcx.tcx().mk_projection(
+                obligation.predicate.trait_ref.clone(),
+                obligation.predicate.item_name)))
+        }
+    }
+}
+
+/// The first thing we have to do is scan through the parameter
+/// environment to see whether there are any projection predicates
+/// there that can answer this question.
+fn assemble_candidates_from_param_env<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    obligation_trait_ref: &ty::TraitRef<'tcx>,
+    candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
+{
+    debug!("assemble_candidates_from_param_env(..)");
+    let env_predicates = selcx.param_env().caller_bounds.iter().cloned();
+    assemble_candidates_from_predicates(selcx,
+                                        obligation,
+                                        obligation_trait_ref,
+                                        candidate_set,
+                                        ProjectionTyCandidate::ParamEnv,
+                                        env_predicates);
+}
+
+/// In the case of a nested projection like <<A as Foo>::FooT as Bar>::BarT, we may find
+/// that the definition of `Foo` has some clues:
+///
+/// ```
+/// trait Foo {
+///     type FooT : Bar<BarT=i32>
+/// }
+/// ```
+///
+/// Here, for example, we could conclude that the result is `i32`.
+fn assemble_candidates_from_trait_def<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    obligation_trait_ref: &ty::TraitRef<'tcx>,
+    candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
+{
+    debug!("assemble_candidates_from_trait_def(..)");
+
+    // Check whether the self-type is itself a projection.
+    let trait_ref = match obligation_trait_ref.self_ty().sty {
+        ty::TyProjection(ref data) => data.trait_ref.clone(),
+        ty::TyInfer(ty::TyVar(_)) => {
+            // If the self-type is an inference variable, then it MAY wind up
+            // being a projected type, so induce an ambiguity.
+            candidate_set.ambiguous = true;
+            return;
+        }
+        _ => { return; }
+    };
+
+    // If so, extract what we know from the trait and try to come up with a good answer.
+    let trait_predicates = selcx.tcx().lookup_predicates(trait_ref.def_id);
+    let bounds = trait_predicates.instantiate(selcx.tcx(), trait_ref.substs);
+    let bounds = elaborate_predicates(selcx.tcx(), bounds.predicates.into_vec());
+    assemble_candidates_from_predicates(selcx,
+                                        obligation,
+                                        obligation_trait_ref,
+                                        candidate_set,
+                                        ProjectionTyCandidate::TraitDef,
+                                        bounds)
+}
+
+fn assemble_candidates_from_predicates<'cx,'tcx,I>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    obligation_trait_ref: &ty::TraitRef<'tcx>,
+    candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
+    ctor: fn(ty::PolyProjectionPredicate<'tcx>) -> ProjectionTyCandidate<'tcx>,
+    env_predicates: I)
+    where I: Iterator<Item=ty::Predicate<'tcx>>
+{
+    debug!("assemble_candidates_from_predicates(obligation={:?})",
+           obligation);
+    let infcx = selcx.infcx();
+    for predicate in env_predicates {
+        debug!("assemble_candidates_from_predicates: predicate={:?}",
+               predicate);
+        match predicate {
+            ty::Predicate::Projection(ref data) => {
+                let same_name = data.item_name() == obligation.predicate.item_name;
+
+                let is_match = same_name && infcx.probe(|_| {
+                    let origin = TypeOrigin::Misc(obligation.cause.span);
+                    let data_poly_trait_ref =
+                        data.to_poly_trait_ref();
+                    let obligation_poly_trait_ref =
+                        obligation_trait_ref.to_poly_trait_ref();
+                    infcx.sub_poly_trait_refs(false,
+                                              origin,
+                                              data_poly_trait_ref,
+                                              obligation_poly_trait_ref)
+                        // FIXME(#32730) propagate obligations
+                        .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
+                        .is_ok()
+                });
+
+                debug!("assemble_candidates_from_predicates: candidate={:?} \
+                                                             is_match={} same_name={}",
+                       data, is_match, same_name);
+
+                if is_match {
+                    candidate_set.vec.push(ctor(data.clone()));
+                }
+            }
+            _ => { }
+        }
+    }
+}
+
+fn assemble_candidates_from_object_type<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation:  &ProjectionTyObligation<'tcx>,
+    obligation_trait_ref: &ty::TraitRef<'tcx>,
+    candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
+{
+    let self_ty = obligation_trait_ref.self_ty();
+    let object_ty = selcx.infcx().shallow_resolve(self_ty);
+    debug!("assemble_candidates_from_object_type(object_ty={:?})",
+           object_ty);
+    let data = match object_ty.sty {
+        ty::TyTrait(ref data) => data,
+        _ => {
+            span_bug!(
+                obligation.cause.span,
+                "assemble_candidates_from_object_type called with non-object: {:?}",
+                object_ty);
+        }
+    };
+    let projection_bounds = data.projection_bounds_with_self_ty(selcx.tcx(), object_ty);
+    let env_predicates = projection_bounds.iter()
+                                          .map(|p| p.to_predicate())
+                                          .collect();
+    let env_predicates = elaborate_predicates(selcx.tcx(), env_predicates);
+    assemble_candidates_from_predicates(selcx,
+                                        obligation,
+                                        obligation_trait_ref,
+                                        candidate_set,
+                                        ProjectionTyCandidate::ParamEnv,
+                                        env_predicates)
+}
+
+fn assemble_candidates_from_impls<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    obligation_trait_ref: &ty::TraitRef<'tcx>,
+    candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
+    -> Result<(), SelectionError<'tcx>>
+{
+    // If we are resolving `<T as TraitRef<...>>::Item == Type`,
+    // start out by selecting the predicate `T as TraitRef<...>`:
+    let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref();
+    let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate());
+    let vtable = match selcx.select(&trait_obligation) {
+        Ok(Some(vtable)) => vtable,
+        Ok(None) => {
+            candidate_set.ambiguous = true;
+            return Ok(());
+        }
+        Err(e) => {
+            debug!("assemble_candidates_from_impls: selection error {:?}",
+                   e);
+            return Err(e);
+        }
+    };
+
+    match vtable {
+        super::VtableImpl(data) => {
+            debug!("assemble_candidates_from_impls: impl candidate {:?}",
+                   data);
+
+            candidate_set.vec.push(
+                ProjectionTyCandidate::Impl(data));
+        }
+        super::VtableObject(_) => {
+            assemble_candidates_from_object_type(
+                selcx, obligation, obligation_trait_ref, candidate_set);
+        }
+        super::VtableClosure(data) => {
+            candidate_set.vec.push(
+                ProjectionTyCandidate::Closure(data));
+        }
+        super::VtableFnPointer(fn_type) => {
+            candidate_set.vec.push(
+                ProjectionTyCandidate::FnPointer(fn_type));
+        }
+        super::VtableParam(..) => {
+            // This case tell us nothing about the value of an
+            // associated type. Consider:
+            //
+            // ```
+            // trait SomeTrait { type Foo; }
+            // fn foo<T:SomeTrait>(...) { }
+            // ```
+            //
+            // If the user writes `<T as SomeTrait>::Foo`, then the `T
+            // : SomeTrait` binding does not help us decide what the
+            // type `Foo` is (at least, not more specifically than
+            // what we already knew).
+            //
+            // But wait, you say! What about an example like this:
+            //
+            // ```
+            // fn bar<T:SomeTrait<Foo=usize>>(...) { ... }
+            // ```
+            //
+            // Doesn't the `T : Sometrait<Foo=usize>` predicate help
+            // resolve `T::Foo`? And of course it does, but in fact
+            // that single predicate is desugared into two predicates
+            // in the compiler: a trait predicate (`T : SomeTrait`) and a
+            // projection. And the projection where clause is handled
+            // in `assemble_candidates_from_param_env`.
+        }
+        super::VtableDefaultImpl(..) |
+        super::VtableBuiltin(..) => {
+            // These traits have no associated types.
+            span_bug!(
+                obligation.cause.span,
+                "Cannot project an associated type from `{:?}`",
+                vtable);
+        }
+    }
+
+    Ok(())
+}
+
+fn confirm_candidate<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    candidate: ProjectionTyCandidate<'tcx>)
+    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+{
+    debug!("confirm_candidate(candidate={:?}, obligation={:?})",
+           candidate,
+           obligation);
+
+    match candidate {
+        ProjectionTyCandidate::ParamEnv(poly_projection) |
+        ProjectionTyCandidate::TraitDef(poly_projection) => {
+            confirm_param_env_candidate(selcx, obligation, poly_projection)
+        }
+
+        ProjectionTyCandidate::Impl(impl_vtable) => {
+            confirm_impl_candidate(selcx, obligation, impl_vtable)
+        }
+
+        ProjectionTyCandidate::Closure(closure_vtable) => {
+            confirm_closure_candidate(selcx, obligation, closure_vtable)
+        }
+
+        ProjectionTyCandidate::FnPointer(fn_type) => {
+            confirm_fn_pointer_candidate(selcx, obligation, fn_type)
+        }
+    }
+}
+
+fn confirm_fn_pointer_candidate<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    fn_type: Ty<'tcx>)
+    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+{
+    let fn_type = selcx.infcx().shallow_resolve(fn_type);
+    let sig = fn_type.fn_sig();
+    confirm_callable_candidate(selcx, obligation, sig, util::TupleArgumentsFlag::Yes)
+}
+
+fn confirm_closure_candidate<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    vtable: VtableClosureData<'tcx, PredicateObligation<'tcx>>)
+    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+{
+    let closure_typer = selcx.closure_typer();
+    let closure_type = closure_typer.closure_type(vtable.closure_def_id, &vtable.substs);
+    let Normalized {
+        value: closure_type,
+        mut obligations
+    } = normalize_with_depth(selcx,
+                             obligation.cause.clone(),
+                             obligation.recursion_depth+1,
+                             &closure_type);
+    let (ty, mut cc_obligations) = confirm_callable_candidate(selcx,
+                                                              obligation,
+                                                              &closure_type.sig,
+                                                              util::TupleArgumentsFlag::No);
+    obligations.append(&mut cc_obligations);
+    (ty, obligations)
+}
+
+fn confirm_callable_candidate<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    fn_sig: &ty::PolyFnSig<'tcx>,
+    flag: util::TupleArgumentsFlag)
+    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+{
+    let tcx = selcx.tcx();
+
+    debug!("confirm_callable_candidate({:?},{:?})",
+           obligation,
+           fn_sig);
+
+    // the `Output` associated type is declared on `FnOnce`
+    let fn_once_def_id = tcx.lang_items.fn_once_trait().unwrap();
+
+    // Note: we unwrap the binder here but re-create it below (1)
+    let ty::Binder((trait_ref, ret_type)) =
+        util::closure_trait_ref_and_return_type(tcx,
+                                                fn_once_def_id,
+                                                obligation.predicate.trait_ref.self_ty(),
+                                                fn_sig,
+                                                flag);
+
+    let predicate = ty::Binder(ty::ProjectionPredicate { // (1) recreate binder here
+        projection_ty: ty::ProjectionTy {
+            trait_ref: trait_ref,
+            item_name: token::intern(FN_OUTPUT_NAME),
+        },
+        ty: ret_type
+    });
+
+    confirm_param_env_candidate(selcx, obligation, predicate)
+}
+
+fn confirm_param_env_candidate<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    poly_projection: ty::PolyProjectionPredicate<'tcx>)
+    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+{
+    let infcx = selcx.infcx();
+
+    let projection =
+        infcx.replace_late_bound_regions_with_fresh_var(
+            obligation.cause.span,
+            infer::LateBoundRegionConversionTime::HigherRankedType,
+            &poly_projection).0;
+
+    assert_eq!(projection.projection_ty.item_name,
+               obligation.predicate.item_name);
+
+    let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
+    match infcx.eq_trait_refs(false,
+                              origin,
+                              obligation.predicate.trait_ref.clone(),
+                              projection.projection_ty.trait_ref.clone()) {
+        Ok(InferOk { obligations, .. }) => {
+            // FIXME(#32730) propagate obligations
+            assert!(obligations.is_empty());
+        }
+        Err(e) => {
+            span_bug!(
+                obligation.cause.span,
+                "Failed to unify `{:?}` and `{:?}` in projection: {}",
+                obligation,
+                projection,
+                e);
+        }
+    }
+
+    (projection.ty, vec!())
+}
+
+fn confirm_impl_candidate<'cx,'tcx>(
+    selcx: &mut SelectionContext<'cx,'tcx>,
+    obligation: &ProjectionTyObligation<'tcx>,
+    impl_vtable: VtableImplData<'tcx, PredicateObligation<'tcx>>)
+    -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+{
+    let VtableImplData { substs, nested, impl_def_id } = impl_vtable;
+
+    let tcx = selcx.tcx();
+    let trait_ref = obligation.predicate.trait_ref;
+    let assoc_ty = assoc_ty_def(selcx, impl_def_id, obligation.predicate.item_name);
+
+    match assoc_ty {
+        Some(node_item) => {
+            let ty = node_item.item.ty.unwrap_or_else(|| {
+                // This means that the impl is missing a definition for the
+                // associated type. This error will be reported by the type
+                // checker method `check_impl_items_against_trait`, so here we
+                // just return TyError.
+                debug!("confirm_impl_candidate: no associated type {:?} for {:?}",
+                       node_item.item.name,
+                       obligation.predicate.trait_ref);
+                tcx.types.err
+            });
+            let substs = translate_substs(selcx.infcx(), impl_def_id, substs, node_item.node);
+            (ty.subst(tcx, substs), nested)
+        }
+        None => {
+            span_bug!(obligation.cause.span,
+                      "No associated type for {:?}",
+                      trait_ref);
+        }
+    }
+}
+
+/// Locate the definition of an associated type in the specialization hierarchy,
+/// starting from the given impl.
+///
+/// Based on the "projection mode", this lookup may in fact only examine the
+/// topmost impl. See the comments for `ProjectionMode` for more details.
+fn assoc_ty_def<'cx, 'tcx>(selcx: &SelectionContext<'cx, 'tcx>,
+                           impl_def_id: DefId,
+                           assoc_ty_name: ast::Name)
+                           -> Option<specialization_graph::NodeItem<Rc<ty::AssociatedType<'tcx>>>>
+{
+    let trait_def_id = selcx.tcx().impl_trait_ref(impl_def_id).unwrap().def_id;
+
+    if selcx.projection_mode().is_topmost() {
+        let impl_node = specialization_graph::Node::Impl(impl_def_id);
+        for item in impl_node.items(selcx.tcx()) {
+            if let ty::TypeTraitItem(assoc_ty) = item {
+                if assoc_ty.name == assoc_ty_name {
+                    return Some(specialization_graph::NodeItem {
+                        node: specialization_graph::Node::Impl(impl_def_id),
+                        item: assoc_ty,
+                    });
+                }
+            }
+        }
+        None
+    } else {
+        selcx.tcx().lookup_trait_def(trait_def_id)
+            .ancestors(impl_def_id)
+            .type_defs(selcx.tcx(), assoc_ty_name)
+            .next()
+    }
+}
diff --git a/src/librustc/traits/select.rs b/src/librustc/traits/select.rs
new file mode 100644 (file)
index 0000000..8d7df6e
--- /dev/null
@@ -0,0 +1,3058 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! See `README.md` for high-level documentation
+
+pub use self::MethodMatchResult::*;
+pub use self::MethodMatchedData::*;
+use self::SelectionCandidate::*;
+use self::BuiltinBoundConditions::*;
+use self::EvaluationResult::*;
+
+use super::coherence;
+use super::DerivedObligationCause;
+use super::project;
+use super::project::{normalize_with_depth, Normalized};
+use super::{PredicateObligation, TraitObligation, ObligationCause};
+use super::report_overflow_error;
+use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation};
+use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch};
+use super::{ObjectCastObligation, Obligation};
+use super::ProjectionMode;
+use super::TraitNotObjectSafe;
+use super::Selection;
+use super::SelectionResult;
+use super::{VtableBuiltin, VtableImpl, VtableParam, VtableClosure,
+            VtableFnPointer, VtableObject, VtableDefaultImpl};
+use super::{VtableImplData, VtableObjectData, VtableBuiltinData,
+            VtableClosureData, VtableDefaultImplData};
+use super::object_safety;
+use super::util;
+
+use hir::def_id::DefId;
+use infer;
+use infer::{InferCtxt, InferOk, TypeFreshener, TypeOrigin};
+use ty::subst::{Subst, Substs, TypeSpace};
+use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable};
+use traits;
+use ty::fast_reject;
+use ty::relate::TypeRelation;
+
+use std::cell::RefCell;
+use std::fmt;
+use std::rc::Rc;
+use syntax::abi::Abi;
+use hir;
+use util::common::ErrorReported;
+use util::nodemap::FnvHashMap;
+
+pub struct SelectionContext<'cx, 'tcx:'cx> {
+    infcx: &'cx InferCtxt<'cx, 'tcx>,
+
+    /// Freshener used specifically for skolemizing entries on the
+    /// obligation stack. This ensures that all entries on the stack
+    /// at one time will have the same set of skolemized entries,
+    /// which is important for checking for trait bounds that
+    /// recursively require themselves.
+    freshener: TypeFreshener<'cx, 'tcx>,
+
+    /// If true, indicates that the evaluation should be conservative
+    /// and consider the possibility of types outside this crate.
+    /// This comes up primarily when resolving ambiguity. Imagine
+    /// there is some trait reference `$0 : Bar` where `$0` is an
+    /// inference variable. If `intercrate` is true, then we can never
+    /// say for sure that this reference is not implemented, even if
+    /// there are *no impls at all for `Bar`*, because `$0` could be
+    /// bound to some type that in a downstream crate that implements
+    /// `Bar`. This is the suitable mode for coherence. Elsewhere,
+    /// though, we set this to false, because we are only interested
+    /// in types that the user could actually have written --- in
+    /// other words, we consider `$0 : Bar` to be unimplemented if
+    /// there is no type that the user could *actually name* that
+    /// would satisfy it. This avoids crippling inference, basically.
+    intercrate: bool,
+}
+
+// A stack that walks back up the stack frame.
+struct TraitObligationStack<'prev, 'tcx: 'prev> {
+    obligation: &'prev TraitObligation<'tcx>,
+
+    /// Trait ref from `obligation` but skolemized with the
+    /// selection-context's freshener. Used to check for recursion.
+    fresh_trait_ref: ty::PolyTraitRef<'tcx>,
+
+    previous: TraitObligationStackList<'prev, 'tcx>,
+}
+
+#[derive(Clone)]
+pub struct SelectionCache<'tcx> {
+    hashmap: RefCell<FnvHashMap<ty::TraitRef<'tcx>,
+                                SelectionResult<'tcx, SelectionCandidate<'tcx>>>>,
+}
+
+pub enum MethodMatchResult {
+    MethodMatched(MethodMatchedData),
+    MethodAmbiguous(/* list of impls that could apply */ Vec<DefId>),
+    MethodDidNotMatch,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum MethodMatchedData {
+    // In the case of a precise match, we don't really need to store
+    // how the match was found. So don't.
+    PreciseMethodMatch,
+
+    // In the case of a coercion, we need to know the precise impl so
+    // that we can determine the type to which things were coerced.
+    CoerciveMethodMatch(/* impl we matched */ DefId)
+}
+
+/// The selection process begins by considering all impls, where
+/// clauses, and so forth that might resolve an obligation.  Sometimes
+/// we'll be able to say definitively that (e.g.) an impl does not
+/// apply to the obligation: perhaps it is defined for `usize` but the
+/// obligation is for `int`. In that case, we drop the impl out of the
+/// list.  But the other cases are considered *candidates*.
+///
+/// For selection to succeed, there must be exactly one matching
+/// candidate. If the obligation is fully known, this is guaranteed
+/// by coherence. However, if the obligation contains type parameters
+/// or variables, there may be multiple such impls.
+///
+/// It is not a real problem if multiple matching impls exist because
+/// of type variables - it just means the obligation isn't sufficiently
+/// elaborated. In that case we report an ambiguity, and the caller can
+/// try again after more type information has been gathered or report a
+/// "type annotations required" error.
+///
+/// However, with type parameters, this can be a real problem - type
+/// parameters don't unify with regular types, but they *can* unify
+/// with variables from blanket impls, and (unless we know its bounds
+/// will always be satisfied) picking the blanket impl will be wrong
+/// for at least *some* substitutions. To make this concrete, if we have
+///
+///    trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; }
+///    impl<T: fmt::Debug> AsDebug for T {
+///        type Out = T;
+///        fn debug(self) -> fmt::Debug { self }
+///    }
+///    fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); }
+///
+/// we can't just use the impl to resolve the <T as AsDebug> obligation
+/// - a type from another crate (that doesn't implement fmt::Debug) could
+/// implement AsDebug.
+///
+/// Because where-clauses match the type exactly, multiple clauses can
+/// only match if there are unresolved variables, and we can mostly just
+/// report this ambiguity in that case. This is still a problem - we can't
+/// *do anything* with ambiguities that involve only regions. This is issue
+/// #21974.
+///
+/// If a single where-clause matches and there are no inference
+/// variables left, then it definitely matches and we can just select
+/// it.
+///
+/// In fact, we even select the where-clause when the obligation contains
+/// inference variables. The can lead to inference making "leaps of logic",
+/// for example in this situation:
+///
+///    pub trait Foo<T> { fn foo(&self) -> T; }
+///    impl<T> Foo<()> for T { fn foo(&self) { } }
+///    impl Foo<bool> for bool { fn foo(&self) -> bool { *self } }
+///
+///    pub fn foo<T>(t: T) where T: Foo<bool> {
+///       println!("{:?}", <T as Foo<_>>::foo(&t));
+///    }
+///    fn main() { foo(false); }
+///
+/// Here the obligation <T as Foo<$0>> can be matched by both the blanket
+/// impl and the where-clause. We select the where-clause and unify $0=bool,
+/// so the program prints "false". However, if the where-clause is omitted,
+/// the blanket impl is selected, we unify $0=(), and the program prints
+/// "()".
+///
+/// Exactly the same issues apply to projection and object candidates, except
+/// that we can have both a projection candidate and a where-clause candidate
+/// for the same obligation. In that case either would do (except that
+/// different "leaps of logic" would occur if inference variables are
+/// present), and we just pick the where-clause. This is, for example,
+/// required for associated types to work in default impls, as the bounds
+/// are visible both as projection bounds and as where-clauses from the
+/// parameter environment.
+#[derive(PartialEq,Eq,Debug,Clone)]
+enum SelectionCandidate<'tcx> {
+    BuiltinCandidate(ty::BuiltinBound),
+    ParamCandidate(ty::PolyTraitRef<'tcx>),
+    ImplCandidate(DefId),
+    DefaultImplCandidate(DefId),
+    DefaultImplObjectCandidate(DefId),
+
+    /// This is a trait matching with a projected type as `Self`, and
+    /// we found an applicable bound in the trait definition.
+    ProjectionCandidate,
+
+    /// Implementation of a `Fn`-family trait by one of the
+    /// anonymous types generated for a `||` expression.
+    ClosureCandidate(/* closure */ DefId, &'tcx ty::ClosureSubsts<'tcx>),
+
+    /// Implementation of a `Fn`-family trait by one of the anonymous
+    /// types generated for a fn pointer type (e.g., `fn(int)->int`)
+    FnPointerCandidate,
+
+    ObjectCandidate,
+
+    BuiltinObjectCandidate,
+
+    BuiltinUnsizeCandidate,
+}
+
+struct SelectionCandidateSet<'tcx> {
+    // a list of candidates that definitely apply to the current
+    // obligation (meaning: types unify).
+    vec: Vec<SelectionCandidate<'tcx>>,
+
+    // if this is true, then there were candidates that might or might
+    // not have applied, but we couldn't tell. This occurs when some
+    // of the input types are type variables, in which case there are
+    // various "builtin" rules that might or might not trigger.
+    ambiguous: bool,
+}
+
+#[derive(PartialEq,Eq,Debug,Clone)]
+struct EvaluatedCandidate<'tcx> {
+    candidate: SelectionCandidate<'tcx>,
+    evaluation: EvaluationResult,
+}
+
+enum BuiltinBoundConditions<'tcx> {
+    If(ty::Binder<Vec<Ty<'tcx>>>),
+    ParameterBuiltin,
+    AmbiguousBuiltin
+}
+
+#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
+/// The result of trait evaluation. The order is important
+/// here as the evaluation of a list is the maximum of the
+/// evaluations.
+enum EvaluationResult {
+    /// Evaluation successful
+    EvaluatedToOk,
+    /// Evaluation failed because of recursion - treated as ambiguous
+    EvaluatedToUnknown,
+    /// Evaluation is known to be ambiguous
+    EvaluatedToAmbig,
+    /// Evaluation failed
+    EvaluatedToErr,
+}
+
+#[derive(Clone)]
+pub struct EvaluationCache<'tcx> {
+    hashmap: RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, EvaluationResult>>
+}
+
+impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
+    pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
+        SelectionContext {
+            infcx: infcx,
+            freshener: infcx.freshener(),
+            intercrate: false,
+        }
+    }
+
+    pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
+        SelectionContext {
+            infcx: infcx,
+            freshener: infcx.freshener(),
+            intercrate: true,
+        }
+    }
+
+    pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
+        self.infcx
+    }
+
+    pub fn tcx(&self) -> &'cx TyCtxt<'tcx> {
+        self.infcx.tcx
+    }
+
+    pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'cx, 'tcx> {
+        self.infcx.param_env()
+    }
+
+    pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'tcx> {
+        self.infcx
+    }
+
+    pub fn projection_mode(&self) -> ProjectionMode {
+        self.infcx.projection_mode()
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // Selection
+    //
+    // The selection phase tries to identify *how* an obligation will
+    // be resolved. For example, it will identify which impl or
+    // parameter bound is to be used. The process can be inconclusive
+    // if the self type in the obligation is not fully inferred. Selection
+    // can result in an error in one of two ways:
+    //
+    // 1. If no applicable impl or parameter bound can be found.
+    // 2. If the output type parameters in the obligation do not match
+    //    those specified by the impl/bound. For example, if the obligation
+    //    is `Vec<Foo>:Iterable<Bar>`, but the impl specifies
+    //    `impl<T> Iterable<T> for Vec<T>`, than an error would result.
+
+    /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
+    /// type environment by performing unification.
+    pub fn select(&mut self, obligation: &TraitObligation<'tcx>)
+                  -> SelectionResult<'tcx, Selection<'tcx>> {
+        debug!("select({:?})", obligation);
+        assert!(!obligation.predicate.has_escaping_regions());
+
+        let dep_node = obligation.predicate.dep_node();
+        let _task = self.tcx().dep_graph.in_task(dep_node);
+
+        let stack = self.push_stack(TraitObligationStackList::empty(), obligation);
+        match self.candidate_from_obligation(&stack)? {
+            None => {
+                self.consider_unification_despite_ambiguity(obligation);
+                Ok(None)
+            }
+            Some(candidate) => Ok(Some(self.confirm_candidate(obligation, candidate)?)),
+        }
+    }
+
+    /// In the particular case of unboxed closure obligations, we can
+    /// sometimes do some amount of unification for the
+    /// argument/return types even though we can't yet fully match obligation.
+    /// The particular case we are interesting in is an obligation of the form:
+    ///
+    ///    C : FnFoo<A>
+    ///
+    /// where `C` is an unboxed closure type and `FnFoo` is one of the
+    /// `Fn` traits. Because we know that users cannot write impls for closure types
+    /// themselves, the only way that `C : FnFoo` can fail to match is under two
+    /// conditions:
+    ///
+    /// 1. The closure kind for `C` is not yet known, because inference isn't complete.
+    /// 2. The closure kind for `C` *is* known, but doesn't match what is needed.
+    ///    For example, `C` may be a `FnOnce` closure, but a `Fn` closure is needed.
+    ///
+    /// In either case, we always know what argument types are
+    /// expected by `C`, no matter what kind of `Fn` trait it
+    /// eventually matches. So we can go ahead and unify the argument
+    /// types, even though the end result is ambiguous.
+    ///
+    /// Note that this is safe *even if* the trait would never be
+    /// matched (case 2 above). After all, in that case, an error will
+    /// result, so it kind of doesn't matter what we do --- unifying
+    /// the argument types can only be helpful to the user, because
+    /// once they patch up the kind of closure that is expected, the
+    /// argment types won't really change.
+    fn consider_unification_despite_ambiguity(&mut self, obligation: &TraitObligation<'tcx>) {
+        // Is this a `C : FnFoo(...)` trait reference for some trait binding `FnFoo`?
+        match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) {
+            Some(_) => { }
+            None => { return; }
+        }
+
+        // Is the self-type a closure type? We ignore bindings here
+        // because if it is a closure type, it must be a closure type from
+        // within this current fn, and hence none of the higher-ranked
+        // lifetimes can appear inside the self-type.
+        let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
+        let (closure_def_id, substs) = match self_ty.sty {
+            ty::TyClosure(id, ref substs) => (id, substs),
+            _ => { return; }
+        };
+        assert!(!substs.has_escaping_regions());
+
+        // It is OK to call the unnormalized variant here - this is only
+        // reached for TyClosure: Fn inputs where the closure kind is
+        // still unknown, which should only occur in typeck where the
+        // closure type is already normalized.
+        let closure_trait_ref = self.closure_trait_ref_unnormalized(obligation,
+                                                                    closure_def_id,
+                                                                    substs);
+
+        match self.confirm_poly_trait_refs(obligation.cause.clone(),
+                                           obligation.predicate.to_poly_trait_ref(),
+                                           closure_trait_ref) {
+            Ok(()) => { }
+            Err(_) => { /* Silently ignore errors. */ }
+        }
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // EVALUATION
+    //
+    // Tests whether an obligation can be selected or whether an impl
+    // can be applied to particular types. It skips the "confirmation"
+    // step and hence completely ignores output type parameters.
+    //
+    // The result is "true" if the obligation *may* hold and "false" if
+    // we can be sure it does not.
+
+    /// Evaluates whether the obligation `obligation` can be satisfied (by any means).
+    pub fn evaluate_obligation(&mut self,
+                               obligation: &PredicateObligation<'tcx>)
+                               -> bool
+    {
+        debug!("evaluate_obligation({:?})",
+               obligation);
+
+        self.infcx.probe(|_| {
+            self.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation)
+                .may_apply()
+        })
+    }
+
+    /// Evaluates whether the obligation `obligation` can be satisfied,
+    /// and returns `false` if not certain. However, this is not entirely
+    /// accurate if inference variables are involved.
+    pub fn evaluate_obligation_conservatively(&mut self,
+                                              obligation: &PredicateObligation<'tcx>)
+                                              -> bool
+    {
+        debug!("evaluate_obligation_conservatively({:?})",
+               obligation);
+
+        self.infcx.probe(|_| {
+            self.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation)
+                == EvaluatedToOk
+        })
+    }
+
+    /// Evaluates the predicates in `predicates` recursively. Note that
+    /// this applies projections in the predicates, and therefore
+    /// is run within an inference probe.
+    fn evaluate_predicates_recursively<'a,'o,I>(&mut self,
+                                                stack: TraitObligationStackList<'o, 'tcx>,
+                                                predicates: I)
+                                                -> EvaluationResult
+        where I : Iterator<Item=&'a PredicateObligation<'tcx>>, 'tcx:'a
+    {
+        let mut result = EvaluatedToOk;
+        for obligation in predicates {
+            let eval = self.evaluate_predicate_recursively(stack, obligation);
+            debug!("evaluate_predicate_recursively({:?}) = {:?}",
+                   obligation, eval);
+            match eval {
+                EvaluatedToErr => { return EvaluatedToErr; }
+                EvaluatedToAmbig => { result = EvaluatedToAmbig; }
+                EvaluatedToUnknown => {
+                    if result < EvaluatedToUnknown {
+                        result = EvaluatedToUnknown;
+                    }
+                }
+                EvaluatedToOk => { }
+            }
+        }
+        result
+    }
+
+    fn evaluate_predicate_recursively<'o>(&mut self,
+                                          previous_stack: TraitObligationStackList<'o, 'tcx>,
+                                          obligation: &PredicateObligation<'tcx>)
+                                           -> EvaluationResult
+    {
+        debug!("evaluate_predicate_recursively({:?})",
+               obligation);
+
+        // Check the cache from the tcx of predicates that we know
+        // have been proven elsewhere. This cache only contains
+        // predicates that are global in scope and hence unaffected by
+        // the current environment.
+        if self.tcx().fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) {
+            return EvaluatedToOk;
+        }
+
+        match obligation.predicate {
+            ty::Predicate::Trait(ref t) => {
+                assert!(!t.has_escaping_regions());
+                let obligation = obligation.with(t.clone());
+                self.evaluate_obligation_recursively(previous_stack, &obligation)
+            }
+
+            ty::Predicate::Equate(ref p) => {
+                // does this code ever run?
+                match self.infcx.equality_predicate(obligation.cause.span, p) {
+                    Ok(InferOk { obligations, .. }) => {
+                        // FIXME(#32730) propagate obligations
+                        assert!(obligations.is_empty());
+                        EvaluatedToOk
+                    },
+                    Err(_) => EvaluatedToErr
+                }
+            }
+
+            ty::Predicate::WellFormed(ty) => {
+                match ty::wf::obligations(self.infcx, obligation.cause.body_id,
+                                          ty, obligation.cause.span) {
+                    Some(obligations) =>
+                        self.evaluate_predicates_recursively(previous_stack, obligations.iter()),
+                    None =>
+                        EvaluatedToAmbig,
+                }
+            }
+
+            ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => {
+                // we do not consider region relationships when
+                // evaluating trait matches
+                EvaluatedToOk
+            }
+
+            ty::Predicate::ObjectSafe(trait_def_id) => {
+                if object_safety::is_object_safe(self.tcx(), trait_def_id) {
+                    EvaluatedToOk
+                } else {
+                    EvaluatedToErr
+                }
+            }
+
+            ty::Predicate::Projection(ref data) => {
+                let project_obligation = obligation.with(data.clone());
+                match project::poly_project_and_unify_type(self, &project_obligation) {
+                    Ok(Some(subobligations)) => {
+                        self.evaluate_predicates_recursively(previous_stack,
+                                                             subobligations.iter())
+                    }
+                    Ok(None) => {
+                        EvaluatedToAmbig
+                    }
+                    Err(_) => {
+                        EvaluatedToErr
+                    }
+                }
+            }
+        }
+    }
+
+    fn evaluate_obligation_recursively<'o>(&mut self,
+                                           previous_stack: TraitObligationStackList<'o, 'tcx>,
+                                           obligation: &TraitObligation<'tcx>)
+                                           -> EvaluationResult
+    {
+        debug!("evaluate_obligation_recursively({:?})",
+               obligation);
+
+        let stack = self.push_stack(previous_stack, obligation);
+        let fresh_trait_ref = stack.fresh_trait_ref;
+        if let Some(result) = self.check_evaluation_cache(fresh_trait_ref) {
+            debug!("CACHE HIT: EVAL({:?})={:?}",
+                   fresh_trait_ref,
+                   result);
+            return result;
+        }
+
+        let result = self.evaluate_stack(&stack);
+
+        debug!("CACHE MISS: EVAL({:?})={:?}",
+               fresh_trait_ref,
+               result);
+        self.insert_evaluation_cache(fresh_trait_ref, result);
+
+        result
+    }
+
+    fn evaluate_stack<'o>(&mut self,
+                          stack: &TraitObligationStack<'o, 'tcx>)
+                          -> EvaluationResult
+    {
+        // In intercrate mode, whenever any of the types are unbound,
+        // there can always be an impl. Even if there are no impls in
+        // this crate, perhaps the type would be unified with
+        // something from another crate that does provide an impl.
+        //
+        // In intra mode, we must still be conservative. The reason is
+        // that we want to avoid cycles. Imagine an impl like:
+        //
+        //     impl<T:Eq> Eq for Vec<T>
+        //
+        // and a trait reference like `$0 : Eq` where `$0` is an
+        // unbound variable. When we evaluate this trait-reference, we
+        // will unify `$0` with `Vec<$1>` (for some fresh variable
+        // `$1`), on the condition that `$1 : Eq`. We will then wind
+        // up with many candidates (since that are other `Eq` impls
+        // that apply) and try to winnow things down. This results in
+        // a recursive evaluation that `$1 : Eq` -- as you can
+        // imagine, this is just where we started. To avoid that, we
+        // check for unbound variables and return an ambiguous (hence possible)
+        // match if we've seen this trait before.
+        //
+        // This suffices to allow chains like `FnMut` implemented in
+        // terms of `Fn` etc, but we could probably make this more
+        // precise still.
+        let input_types = stack.fresh_trait_ref.0.input_types();
+        let unbound_input_types = input_types.iter().any(|ty| ty.is_fresh());
+        if unbound_input_types && self.intercrate {
+            debug!("evaluate_stack({:?}) --> unbound argument, intercrate -->  ambiguous",
+                   stack.fresh_trait_ref);
+            return EvaluatedToAmbig;
+        }
+        if unbound_input_types &&
+              stack.iter().skip(1).any(
+                  |prev| self.match_fresh_trait_refs(&stack.fresh_trait_ref,
+                                                     &prev.fresh_trait_ref))
+        {
+            debug!("evaluate_stack({:?}) --> unbound argument, recursive --> giving up",
+                   stack.fresh_trait_ref);
+            return EvaluatedToUnknown;
+        }
+
+        // If there is any previous entry on the stack that precisely
+        // matches this obligation, then we can assume that the
+        // obligation is satisfied for now (still all other conditions
+        // must be met of course). One obvious case this comes up is
+        // marker traits like `Send`. Think of a linked list:
+        //
+        //    struct List<T> { data: T, next: Option<Box<List<T>>> {
+        //
+        // `Box<List<T>>` will be `Send` if `T` is `Send` and
+        // `Option<Box<List<T>>>` is `Send`, and in turn
+        // `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
+        // `Send`.
+        //
+        // Note that we do this comparison using the `fresh_trait_ref`
+        // fields. Because these have all been skolemized using
+        // `self.freshener`, we can be sure that (a) this will not
+        // affect the inferencer state and (b) that if we see two
+        // skolemized types with the same index, they refer to the
+        // same unbound type variable.
+        if
+            stack.iter()
+            .skip(1) // skip top-most frame
+            .any(|prev| stack.fresh_trait_ref == prev.fresh_trait_ref)
+        {
+            debug!("evaluate_stack({:?}) --> recursive",
+                   stack.fresh_trait_ref);
+            return EvaluatedToOk;
+        }
+
+        match self.candidate_from_obligation(stack) {
+            Ok(Some(c)) => self.evaluate_candidate(stack, &c),
+            Ok(None) => EvaluatedToAmbig,
+            Err(..) => EvaluatedToErr
+        }
+    }
+
+    /// Further evaluate `candidate` to decide whether all type parameters match and whether nested
+    /// obligations are met. Returns true if `candidate` remains viable after this further
+    /// scrutiny.
+    fn evaluate_candidate<'o>(&mut self,
+                              stack: &TraitObligationStack<'o, 'tcx>,
+                              candidate: &SelectionCandidate<'tcx>)
+                              -> EvaluationResult
+    {
+        debug!("evaluate_candidate: depth={} candidate={:?}",
+               stack.obligation.recursion_depth, candidate);
+        let result = self.infcx.probe(|_| {
+            let candidate = (*candidate).clone();
+            match self.confirm_candidate(stack.obligation, candidate) {
+                Ok(selection) => {
+                    self.evaluate_predicates_recursively(
+                        stack.list(),
+                        selection.nested_obligations().iter())
+                }
+                Err(..) => EvaluatedToErr
+            }
+        });
+        debug!("evaluate_candidate: depth={} result={:?}",
+               stack.obligation.recursion_depth, result);
+        result
+    }
+
+    fn pick_evaluation_cache(&self) -> &EvaluationCache<'tcx> {
+        // see comment in `pick_candidate_cache`
+        if self.intercrate ||
+            !self.param_env().caller_bounds.is_empty()
+        {
+            &self.param_env().evaluation_cache
+        } else
+        {
+            &self.tcx().evaluation_cache
+        }
+    }
+
+    fn check_evaluation_cache(&self, trait_ref: ty::PolyTraitRef<'tcx>)
+                              -> Option<EvaluationResult>
+    {
+        let cache = self.pick_evaluation_cache();
+        cache.hashmap.borrow().get(&trait_ref).cloned()
+    }
+
+    fn insert_evaluation_cache(&mut self,
+                               trait_ref: ty::PolyTraitRef<'tcx>,
+                               result: EvaluationResult)
+    {
+        // Avoid caching results that depend on more than just the trait-ref:
+        // The stack can create EvaluatedToUnknown, and closure signatures
+        // being yet uninferred can create "spurious" EvaluatedToAmbig
+        // and EvaluatedToOk.
+        if result == EvaluatedToUnknown ||
+            ((result == EvaluatedToAmbig || result == EvaluatedToOk)
+             && trait_ref.has_closure_types())
+        {
+            return;
+        }
+
+        let cache = self.pick_evaluation_cache();
+        cache.hashmap.borrow_mut().insert(trait_ref, result);
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // CANDIDATE ASSEMBLY
+    //
+    // The selection process begins by examining all in-scope impls,
+    // caller obligations, and so forth and assembling a list of
+    // candidates. See `README.md` and the `Candidate` type for more
+    // details.
+
+    fn candidate_from_obligation<'o>(&mut self,
+                                     stack: &TraitObligationStack<'o, 'tcx>)
+                                     -> SelectionResult<'tcx, SelectionCandidate<'tcx>>
+    {
+        // Watch out for overflow. This intentionally bypasses (and does
+        // not update) the cache.
+        let recursion_limit = self.infcx.tcx.sess.recursion_limit.get();
+        if stack.obligation.recursion_depth >= recursion_limit {
+            report_overflow_error(self.infcx(), &stack.obligation, true);
+        }
+
+        // Check the cache. Note that we skolemize the trait-ref
+        // separately rather than using `stack.fresh_trait_ref` -- this
+        // is because we want the unbound variables to be replaced
+        // with fresh skolemized types starting from index 0.
+        let cache_fresh_trait_pred =
+            self.infcx.freshen(stack.obligation.predicate.clone());
+        debug!("candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})",
+               cache_fresh_trait_pred,
+               stack);
+        assert!(!stack.obligation.predicate.has_escaping_regions());
+
+        match self.check_candidate_cache(&cache_fresh_trait_pred) {
+            Some(c) => {
+                debug!("CACHE HIT: SELECT({:?})={:?}",
+                       cache_fresh_trait_pred,
+                       c);
+                return c;
+            }
+            None => { }
+        }
+
+        // If no match, compute result and insert into cache.
+        let candidate = self.candidate_from_obligation_no_cache(stack);
+
+        if self.should_update_candidate_cache(&cache_fresh_trait_pred, &candidate) {
+            debug!("CACHE MISS: SELECT({:?})={:?}",
+                   cache_fresh_trait_pred, candidate);
+            self.insert_candidate_cache(cache_fresh_trait_pred, candidate.clone());
+        }
+
+        candidate
+    }
+
+    // Treat negative impls as unimplemented
+    fn filter_negative_impls(&self, candidate: SelectionCandidate<'tcx>)
+                             -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
+        if let ImplCandidate(def_id) = candidate {
+            if self.tcx().trait_impl_polarity(def_id) == Some(hir::ImplPolarity::Negative) {
+                return Err(Unimplemented)
+            }
+        }
+        Ok(Some(candidate))
+    }
+
+    fn candidate_from_obligation_no_cache<'o>(&mut self,
+                                              stack: &TraitObligationStack<'o, 'tcx>)
+                                              -> SelectionResult<'tcx, SelectionCandidate<'tcx>>
+    {
+        if stack.obligation.predicate.references_error() {
+            // If we encounter a `TyError`, we generally prefer the
+            // most "optimistic" result in response -- that is, the
+            // one least likely to report downstream errors. But
+            // because this routine is shared by coherence and by
+            // trait selection, there isn't an obvious "right" choice
+            // here in that respect, so we opt to just return
+            // ambiguity and let the upstream clients sort it out.
+            return Ok(None);
+        }
+
+        if !self.is_knowable(stack) {
+            debug!("coherence stage: not knowable");
+            return Ok(None);
+        }
+
+        let candidate_set = self.assemble_candidates(stack)?;
+
+        if candidate_set.ambiguous {
+            debug!("candidate set contains ambig");
+            return Ok(None);
+        }
+
+        let mut candidates = candidate_set.vec;
+
+        debug!("assembled {} candidates for {:?}: {:?}",
+               candidates.len(),
+               stack,
+               candidates);
+
+        // At this point, we know that each of the entries in the
+        // candidate set is *individually* applicable. Now we have to
+        // figure out if they contain mutual incompatibilities. This
+        // frequently arises if we have an unconstrained input type --
+        // for example, we are looking for $0:Eq where $0 is some
+        // unconstrained type variable. In that case, we'll get a
+        // candidate which assumes $0 == int, one that assumes $0 ==
+        // usize, etc. This spells an ambiguity.
+
+        // If there is more than one candidate, first winnow them down
+        // by considering extra conditions (nested obligations and so
+        // forth). We don't winnow if there is exactly one
+        // candidate. This is a relatively minor distinction but it
+        // can lead to better inference and error-reporting. An
+        // example would be if there was an impl:
+        //
+        //     impl<T:Clone> Vec<T> { fn push_clone(...) { ... } }
+        //
+        // and we were to see some code `foo.push_clone()` where `boo`
+        // is a `Vec<Bar>` and `Bar` does not implement `Clone`.  If
+        // we were to winnow, we'd wind up with zero candidates.
+        // Instead, we select the right impl now but report `Bar does
+        // not implement Clone`.
+        if candidates.len() == 1 {
+            return self.filter_negative_impls(candidates.pop().unwrap());
+        }
+
+        // Winnow, but record the exact outcome of evaluation, which
+        // is needed for specialization.
+        let mut candidates: Vec<_> = candidates.into_iter().filter_map(|c| {
+            let eval = self.evaluate_candidate(stack, &c);
+            if eval.may_apply() {
+                Some(EvaluatedCandidate {
+                    candidate: c,
+                    evaluation: eval,
+                })
+            } else {
+                None
+            }
+        }).collect();
+
+        // If there are STILL multiple candidate, we can further
+        // reduce the list by dropping duplicates -- including
+        // resolving specializations.
+        if candidates.len() > 1 {
+            let mut i = 0;
+            while i < candidates.len() {
+                let is_dup =
+                    (0..candidates.len())
+                    .filter(|&j| i != j)
+                    .any(|j| self.candidate_should_be_dropped_in_favor_of(&candidates[i],
+                                                                          &candidates[j]));
+                if is_dup {
+                    debug!("Dropping candidate #{}/{}: {:?}",
+                           i, candidates.len(), candidates[i]);
+                    candidates.swap_remove(i);
+                } else {
+                    debug!("Retaining candidate #{}/{}: {:?}",
+                           i, candidates.len(), candidates[i]);
+                    i += 1;
+                }
+            }
+        }
+
+        // If there are *STILL* multiple candidates, give up and
+        // report ambiguity.
+        if candidates.len() > 1 {
+            debug!("multiple matches, ambig");
+            return Ok(None);
+        }
+
+        // If there are *NO* candidates, then there are no impls --
+        // that we know of, anyway. Note that in the case where there
+        // are unbound type variables within the obligation, it might
+        // be the case that you could still satisfy the obligation
+        // from another crate by instantiating the type variables with
+        // a type from another crate that does have an impl. This case
+        // is checked for in `evaluate_stack` (and hence users
+        // who might care about this case, like coherence, should use
+        // that function).
+        if candidates.is_empty() {
+            return Err(Unimplemented);
+        }
+
+        // Just one candidate left.
+        self.filter_negative_impls(candidates.pop().unwrap().candidate)
+    }
+
+    fn is_knowable<'o>(&mut self,
+                       stack: &TraitObligationStack<'o, 'tcx>)
+                       -> bool
+    {
+        debug!("is_knowable(intercrate={})", self.intercrate);
+
+        if !self.intercrate {
+            return true;
+        }
+
+        let obligation = &stack.obligation;
+        let predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate);
+
+        // ok to skip binder because of the nature of the
+        // trait-ref-is-knowable check, which does not care about
+        // bound regions
+        let trait_ref = &predicate.skip_binder().trait_ref;
+
+        coherence::trait_ref_is_knowable(self.tcx(), trait_ref)
+    }
+
+    fn pick_candidate_cache(&self) -> &SelectionCache<'tcx> {
+        // If there are any where-clauses in scope, then we always use
+        // a cache local to this particular scope. Otherwise, we
+        // switch to a global cache. We used to try and draw
+        // finer-grained distinctions, but that led to a serious of
+        // annoying and weird bugs like #22019 and #18290. This simple
+        // rule seems to be pretty clearly safe and also still retains
+        // a very high hit rate (~95% when compiling rustc).
+        if !self.param_env().caller_bounds.is_empty() {
+            return &self.param_env().selection_cache;
+        }
+
+        // Avoid using the master cache during coherence and just rely
+        // on the local cache. This effectively disables caching
+        // during coherence. It is really just a simplification to
+        // avoid us having to fear that coherence results "pollute"
+        // the master cache. Since coherence executes pretty quickly,
+        // it's not worth going to more trouble to increase the
+        // hit-rate I don't think.
+        if self.intercrate {
+            return &self.param_env().selection_cache;
+        }
+
+        // Otherwise, we can use the global cache.
+        &self.tcx().selection_cache
+    }
+
+    fn check_candidate_cache(&mut self,
+                             cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>)
+                             -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>>
+    {
+        let cache = self.pick_candidate_cache();
+        let hashmap = cache.hashmap.borrow();
+        hashmap.get(&cache_fresh_trait_pred.0.trait_ref).cloned()
+    }
+
+    fn insert_candidate_cache(&mut self,
+                              cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
+                              candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>)
+    {
+        let cache = self.pick_candidate_cache();
+        let mut hashmap = cache.hashmap.borrow_mut();
+        hashmap.insert(cache_fresh_trait_pred.0.trait_ref.clone(), candidate);
+    }
+
+    fn should_update_candidate_cache(&mut self,
+                                     cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>,
+                                     candidate: &SelectionResult<'tcx, SelectionCandidate<'tcx>>)
+                                     -> bool
+    {
+        // In general, it's a good idea to cache results, even
+        // ambiguous ones, to save us some trouble later. But we have
+        // to be careful not to cache results that could be
+        // invalidated later by advances in inference. Normally, this
+        // is not an issue, because any inference variables whose
+        // types are not yet bound are "freshened" in the cache key,
+        // which means that if we later get the same request once that
+        // type variable IS bound, we'll have a different cache key.
+        // For example, if we have `Vec<_#0t> : Foo`, and `_#0t` is
+        // not yet known, we may cache the result as `None`. But if
+        // later `_#0t` is bound to `Bar`, then when we freshen we'll
+        // have `Vec<Bar> : Foo` as the cache key.
+        //
+        // HOWEVER, it CAN happen that we get an ambiguity result in
+        // one particular case around closures where the cache key
+        // would not change. That is when the precise types of the
+        // upvars that a closure references have not yet been figured
+        // out (i.e., because it is not yet known if they are captured
+        // by ref, and if by ref, what kind of ref). In these cases,
+        // when matching a builtin bound, we will yield back an
+        // ambiguous result. But the *cache key* is just the closure type,
+        // it doesn't capture the state of the upvar computation.
+        //
+        // To avoid this trap, just don't cache ambiguous results if
+        // the self-type contains no inference byproducts (that really
+        // shouldn't happen in other circumstances anyway, given
+        // coherence).
+
+        match *candidate {
+            Ok(Some(_)) | Err(_) => true,
+            Ok(None) => {
+                cache_fresh_trait_pred.0.trait_ref.substs.types.has_infer_types()
+            }
+        }
+    }
+
+    fn assemble_candidates<'o>(&mut self,
+                               stack: &TraitObligationStack<'o, 'tcx>)
+                               -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>>
+    {
+        let TraitObligationStack { obligation, .. } = *stack;
+        let ref obligation = Obligation {
+            cause: obligation.cause.clone(),
+            recursion_depth: obligation.recursion_depth,
+            predicate: self.infcx().resolve_type_vars_if_possible(&obligation.predicate)
+        };
+
+        if obligation.predicate.skip_binder().self_ty().is_ty_var() {
+            // FIXME(#20297): Self is a type variable (e.g. `_: AsRef<str>`).
+            //
+            // This is somewhat problematic, as the current scheme can't really
+            // handle it turning to be a projection. This does end up as truly
+            // ambiguous in most cases anyway.
+            //
+            // Until this is fixed, take the fast path out - this also improves
+            // performance by preventing assemble_candidates_from_impls from
+            // matching every impl for this trait.
+            return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true });
+        }
+
+        let mut candidates = SelectionCandidateSet {
+            vec: Vec::new(),
+            ambiguous: false
+        };
+
+        // Other bounds. Consider both in-scope bounds from fn decl
+        // and applicable impls. There is a certain set of precedence rules here.
+
+        match self.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) {
+            Some(ty::BoundCopy) => {
+                debug!("obligation self ty is {:?}",
+                       obligation.predicate.0.self_ty());
+
+                // User-defined copy impls are permitted, but only for
+                // structs and enums.
+                self.assemble_candidates_from_impls(obligation, &mut candidates)?;
+
+                // For other types, we'll use the builtin rules.
+                self.assemble_builtin_bound_candidates(ty::BoundCopy,
+                                                       obligation,
+                                                       &mut candidates)?;
+            }
+            Some(bound @ ty::BoundSized) => {
+                // Sized is never implementable by end-users, it is
+                // always automatically computed.
+                self.assemble_builtin_bound_candidates(bound,
+                                                       obligation,
+                                                       &mut candidates)?;
+            }
+
+            None if self.tcx().lang_items.unsize_trait() ==
+                    Some(obligation.predicate.def_id()) => {
+                self.assemble_candidates_for_unsizing(obligation, &mut candidates);
+            }
+
+            Some(ty::BoundSend) |
+            Some(ty::BoundSync) |
+            None => {
+                self.assemble_closure_candidates(obligation, &mut candidates)?;
+                self.assemble_fn_pointer_candidates(obligation, &mut candidates)?;
+                self.assemble_candidates_from_impls(obligation, &mut candidates)?;
+                self.assemble_candidates_from_object_ty(obligation, &mut candidates);
+            }
+        }
+
+        self.assemble_candidates_from_projected_tys(obligation, &mut candidates);
+        self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?;
+        // Default implementations have lower priority, so we only
+        // consider triggering a default if there is no other impl that can apply.
+        if candidates.vec.is_empty() {
+            self.assemble_candidates_from_default_impls(obligation, &mut candidates)?;
+        }
+        debug!("candidate list size: {}", candidates.vec.len());
+        Ok(candidates)
+    }
+
+    fn assemble_candidates_from_projected_tys(&mut self,
+                                              obligation: &TraitObligation<'tcx>,
+                                              candidates: &mut SelectionCandidateSet<'tcx>)
+    {
+        debug!("assemble_candidates_for_projected_tys({:?})", obligation);
+
+        // FIXME(#20297) -- just examining the self-type is very simplistic
+
+        // before we go into the whole skolemization thing, just
+        // quickly check if the self-type is a projection at all.
+        let trait_def_id = match obligation.predicate.0.trait_ref.self_ty().sty {
+            ty::TyProjection(ref data) => data.trait_ref.def_id,
+            ty::TyInfer(ty::TyVar(_)) => {
+                span_bug!(obligation.cause.span,
+                    "Self=_ should have been handled by assemble_candidates");
+            }
+            _ => { return; }
+        };
+
+        debug!("assemble_candidates_for_projected_tys: trait_def_id={:?}",
+               trait_def_id);
+
+        let result = self.infcx.probe(|snapshot| {
+            self.match_projection_obligation_against_bounds_from_trait(obligation,
+                                                                       snapshot)
+        });
+
+        if result {
+            candidates.vec.push(ProjectionCandidate);
+        }
+    }
+
+    fn match_projection_obligation_against_bounds_from_trait(
+        &mut self,
+        obligation: &TraitObligation<'tcx>,
+        snapshot: &infer::CombinedSnapshot)
+        -> bool
+    {
+        let poly_trait_predicate =
+            self.infcx().resolve_type_vars_if_possible(&obligation.predicate);
+        let (skol_trait_predicate, skol_map) =
+            self.infcx().skolemize_late_bound_regions(&poly_trait_predicate, snapshot);
+        debug!("match_projection_obligation_against_bounds_from_trait: \
+                skol_trait_predicate={:?} skol_map={:?}",
+               skol_trait_predicate,
+               skol_map);
+
+        let projection_trait_ref = match skol_trait_predicate.trait_ref.self_ty().sty {
+            ty::TyProjection(ref data) => &data.trait_ref,
+            _ => {
+                span_bug!(
+                    obligation.cause.span,
+                    "match_projection_obligation_against_bounds_from_trait() called \
+                     but self-ty not a projection: {:?}",
+                    skol_trait_predicate.trait_ref.self_ty());
+            }
+        };
+        debug!("match_projection_obligation_against_bounds_from_trait: \
+                projection_trait_ref={:?}",
+               projection_trait_ref);
+
+        let trait_predicates = self.tcx().lookup_predicates(projection_trait_ref.def_id);
+        let bounds = trait_predicates.instantiate(self.tcx(), projection_trait_ref.substs);
+        debug!("match_projection_obligation_against_bounds_from_trait: \
+                bounds={:?}",
+               bounds);
+
+        let matching_bound =
+            util::elaborate_predicates(self.tcx(), bounds.predicates.into_vec())
+            .filter_to_traits()
+            .find(
+                |bound| self.infcx.probe(
+                    |_| self.match_projection(obligation,
+                                              bound.clone(),
+                                              skol_trait_predicate.trait_ref.clone(),
+                                              &skol_map,
+                                              snapshot)));
+
+        debug!("match_projection_obligation_against_bounds_from_trait: \
+                matching_bound={:?}",
+               matching_bound);
+        match matching_bound {
+            None => false,
+            Some(bound) => {
+                // Repeat the successful match, if any, this time outside of a probe.
+                let result = self.match_projection(obligation,
+                                                   bound,
+                                                   skol_trait_predicate.trait_ref.clone(),
+                                                   &skol_map,
+                                                   snapshot);
+                assert!(result);
+                true
+            }
+        }
+    }
+
+    fn match_projection(&mut self,
+                        obligation: &TraitObligation<'tcx>,
+                        trait_bound: ty::PolyTraitRef<'tcx>,
+                        skol_trait_ref: ty::TraitRef<'tcx>,
+                        skol_map: &infer::SkolemizationMap,
+                        snapshot: &infer::CombinedSnapshot)
+                        -> bool
+    {
+        assert!(!skol_trait_ref.has_escaping_regions());
+        let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
+        match self.infcx.sub_poly_trait_refs(false,
+                                             origin,
+                                             trait_bound.clone(),
+                                             ty::Binder(skol_trait_ref.clone())) {
+            Ok(InferOk { obligations, .. }) => {
+                // FIXME(#32730) propagate obligations
+                assert!(obligations.is_empty());
+            }
+            Err(_) => { return false; }
+        }
+
+        self.infcx.leak_check(skol_map, snapshot).is_ok()
+    }
+
+    /// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller
+    /// supplied to find out whether it is listed among them.
+    ///
+    /// Never affects inference environment.
+    fn assemble_candidates_from_caller_bounds<'o>(&mut self,
+                                                  stack: &TraitObligationStack<'o, 'tcx>,
+                                                  candidates: &mut SelectionCandidateSet<'tcx>)
+                                                  -> Result<(),SelectionError<'tcx>>
+    {
+        debug!("assemble_candidates_from_caller_bounds({:?})",
+               stack.obligation);
+
+        let all_bounds =
+            self.param_env().caller_bounds
+                            .iter()
+                            .filter_map(|o| o.to_opt_poly_trait_ref());
+
+        let matching_bounds =
+            all_bounds.filter(
+                |bound| self.evaluate_where_clause(stack, bound.clone()).may_apply());
+
+        let param_candidates =
+            matching_bounds.map(|bound| ParamCandidate(bound));
+
+        candidates.vec.extend(param_candidates);
+
+        Ok(())
+    }
+
+    fn evaluate_where_clause<'o>(&mut self,
+                                 stack: &TraitObligationStack<'o, 'tcx>,
+                                 where_clause_trait_ref: ty::PolyTraitRef<'tcx>)
+                                 -> EvaluationResult
+    {
+        self.infcx().probe(move |_| {
+            match self.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
+                Ok(obligations) => {
+                    self.evaluate_predicates_recursively(stack.list(), obligations.iter())
+                }
+                Err(()) => EvaluatedToErr
+            }
+        })
+    }
+
+    /// Check for the artificial impl that the compiler will create for an obligation like `X :
+    /// FnMut<..>` where `X` is a closure type.
+    ///
+    /// Note: the type parameters on a closure candidate are modeled as *output* type
+    /// parameters and hence do not affect whether this trait is a match or not. They will be
+    /// unified during the confirmation step.
+    fn assemble_closure_candidates(&mut self,
+                                   obligation: &TraitObligation<'tcx>,
+                                   candidates: &mut SelectionCandidateSet<'tcx>)
+                                   -> Result<(),SelectionError<'tcx>>
+    {
+        let kind = match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) {
+            Some(k) => k,
+            None => { return Ok(()); }
+        };
+
+        // ok to skip binder because the substs on closure types never
+        // touch bound regions, they just capture the in-scope
+        // type/region parameters
+        let self_ty = *obligation.self_ty().skip_binder();
+        let (closure_def_id, substs) = match self_ty.sty {
+            ty::TyClosure(id, ref substs) => (id, substs),
+            ty::TyInfer(ty::TyVar(_)) => {
+                debug!("assemble_unboxed_closure_candidates: ambiguous self-type");
+                candidates.ambiguous = true;
+                return Ok(());
+            }
+            _ => { return Ok(()); }
+        };
+
+        debug!("assemble_unboxed_candidates: self_ty={:?} kind={:?} obligation={:?}",
+               self_ty,
+               kind,
+               obligation);
+
+        match self.infcx.closure_kind(closure_def_id) {
+            Some(closure_kind) => {
+                debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind);
+                if closure_kind.extends(kind) {
+                    candidates.vec.push(ClosureCandidate(closure_def_id, substs));
+                }
+            }
+            None => {
+                debug!("assemble_unboxed_candidates: closure_kind not yet known");
+                candidates.ambiguous = true;
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Implement one of the `Fn()` family for a fn pointer.
+    fn assemble_fn_pointer_candidates(&mut self,
+                                      obligation: &TraitObligation<'tcx>,
+                                      candidates: &mut SelectionCandidateSet<'tcx>)
+                                      -> Result<(),SelectionError<'tcx>>
+    {
+        // We provide impl of all fn traits for fn pointers.
+        if self.tcx().lang_items.fn_trait_kind(obligation.predicate.def_id()).is_none() {
+            return Ok(());
+        }
+
+        // ok to skip binder because what we are inspecting doesn't involve bound regions
+        let self_ty = *obligation.self_ty().skip_binder();
+        match self_ty.sty {
+            ty::TyInfer(ty::TyVar(_)) => {
+                debug!("assemble_fn_pointer_candidates: ambiguous self-type");
+                candidates.ambiguous = true; // could wind up being a fn() type
+            }
+
+            // provide an impl, but only for suitable `fn` pointers
+            ty::TyFnDef(_, _, &ty::BareFnTy {
+                unsafety: hir::Unsafety::Normal,
+                abi: Abi::Rust,
+                sig: ty::Binder(ty::FnSig {
+                    inputs: _,
+                    output: ty::FnConverging(_),
+                    variadic: false
+                })
+            }) |
+            ty::TyFnPtr(&ty::BareFnTy {
+                unsafety: hir::Unsafety::Normal,
+                abi: Abi::Rust,
+                sig: ty::Binder(ty::FnSig {
+                    inputs: _,
+                    output: ty::FnConverging(_),
+                    variadic: false
+                })
+            }) => {
+                candidates.vec.push(FnPointerCandidate);
+            }
+
+            _ => { }
+        }
+
+        Ok(())
+    }
+
+    /// Search for impls that might apply to `obligation`.
+    fn assemble_candidates_from_impls(&mut self,
+                                      obligation: &TraitObligation<'tcx>,
+                                      candidates: &mut SelectionCandidateSet<'tcx>)
+                                      -> Result<(), SelectionError<'tcx>>
+    {
+        debug!("assemble_candidates_from_impls(obligation={:?})", obligation);
+
+        let def = self.tcx().lookup_trait_def(obligation.predicate.def_id());
+
+        def.for_each_relevant_impl(
+            self.tcx(),
+            obligation.predicate.0.trait_ref.self_ty(),
+            |impl_def_id| {
+                self.infcx.probe(|snapshot| {
+                    if let Ok(_) = self.match_impl(impl_def_id, obligation, snapshot) {
+                        candidates.vec.push(ImplCandidate(impl_def_id));
+                    }
+                });
+            }
+        );
+
+        Ok(())
+    }
+
+    fn assemble_candidates_from_default_impls(&mut self,
+                                              obligation: &TraitObligation<'tcx>,
+                                              candidates: &mut SelectionCandidateSet<'tcx>)
+                                              -> Result<(), SelectionError<'tcx>>
+    {
+        // OK to skip binder here because the tests we do below do not involve bound regions
+        let self_ty = *obligation.self_ty().skip_binder();
+        debug!("assemble_candidates_from_default_impls(self_ty={:?})", self_ty);
+
+        let def_id = obligation.predicate.def_id();
+
+        if self.tcx().trait_has_default_impl(def_id) {
+            match self_ty.sty {
+                ty::TyTrait(..) => {
+                    // For object types, we don't know what the closed
+                    // over types are. For most traits, this means we
+                    // conservatively say nothing; a candidate may be
+                    // added by `assemble_candidates_from_object_ty`.
+                    // However, for the kind of magic reflect trait,
+                    // we consider it to be implemented even for
+                    // object types, because it just lets you reflect
+                    // onto the object type, not into the object's
+                    // interior.
+                    if self.tcx().has_attr(def_id, "rustc_reflect_like") {
+                        candidates.vec.push(DefaultImplObjectCandidate(def_id));
+                    }
+                }
+                ty::TyParam(..) |
+                ty::TyProjection(..) => {
+                    // In these cases, we don't know what the actual
+                    // type is.  Therefore, we cannot break it down
+                    // into its constituent types. So we don't
+                    // consider the `..` impl but instead just add no
+                    // candidates: this means that typeck will only
+                    // succeed if there is another reason to believe
+                    // that this obligation holds. That could be a
+                    // where-clause or, in the case of an object type,
+                    // it could be that the object type lists the
+                    // trait (e.g. `Foo+Send : Send`). See
+                    // `compile-fail/typeck-default-trait-impl-send-param.rs`
+                    // for an example of a test case that exercises
+                    // this path.
+                }
+                ty::TyInfer(ty::TyVar(_)) => {
+                    // the defaulted impl might apply, we don't know
+                    candidates.ambiguous = true;
+                }
+                _ => {
+                    candidates.vec.push(DefaultImplCandidate(def_id.clone()))
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Search for impls that might apply to `obligation`.
+    fn assemble_candidates_from_object_ty(&mut self,
+                                          obligation: &TraitObligation<'tcx>,
+                                          candidates: &mut SelectionCandidateSet<'tcx>)
+    {
+        debug!("assemble_candidates_from_object_ty(self_ty={:?})",
+               obligation.self_ty().skip_binder());
+
+        // Object-safety candidates are only applicable to object-safe
+        // traits. Including this check is useful because it helps
+        // inference in cases of traits like `BorrowFrom`, which are
+        // not object-safe, and which rely on being able to infer the
+        // self-type from one of the other inputs. Without this check,
+        // these cases wind up being considered ambiguous due to a
+        // (spurious) ambiguity introduced here.
+        let predicate_trait_ref = obligation.predicate.to_poly_trait_ref();
+        if !object_safety::is_object_safe(self.tcx(), predicate_trait_ref.def_id()) {
+            return;
+        }
+
+        self.infcx.commit_if_ok(|snapshot| {
+            let (self_ty, _) =
+                self.infcx().skolemize_late_bound_regions(&obligation.self_ty(), snapshot);
+            let poly_trait_ref = match self_ty.sty {
+                ty::TyTrait(ref data) => {
+                    match self.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) {
+                        Some(bound @ ty::BoundSend) | Some(bound @ ty::BoundSync) => {
+                            if data.bounds.builtin_bounds.contains(&bound) {
+                                debug!("assemble_candidates_from_object_ty: matched builtin bound, \
+                                        pushing candidate");
+                                candidates.vec.push(BuiltinObjectCandidate);
+                                return Ok(());
+                            }
+                        }
+                        _ => {}
+                    }
+
+                    data.principal_trait_ref_with_self_ty(self.tcx(), self_ty)
+                }
+                ty::TyInfer(ty::TyVar(_)) => {
+                    debug!("assemble_candidates_from_object_ty: ambiguous");
+                    candidates.ambiguous = true; // could wind up being an object type
+                    return Ok(());
+                }
+                _ => {
+                    return Ok(());
+                }
+            };
+
+            debug!("assemble_candidates_from_object_ty: poly_trait_ref={:?}",
+                   poly_trait_ref);
+
+            // Count only those upcast versions that match the trait-ref
+            // we are looking for. Specifically, do not only check for the
+            // correct trait, but also the correct type parameters.
+            // For example, we may be trying to upcast `Foo` to `Bar<i32>`,
+            // but `Foo` is declared as `trait Foo : Bar<u32>`.
+            let upcast_trait_refs =
+                util::supertraits(self.tcx(), poly_trait_ref)
+                .filter(|upcast_trait_ref| {
+                    self.infcx.probe(|_| {
+                        let upcast_trait_ref = upcast_trait_ref.clone();
+                        self.match_poly_trait_ref(obligation, upcast_trait_ref).is_ok()
+                    })
+                })
+                .count();
+
+            if upcast_trait_refs > 1 {
+                // can be upcast in many ways; need more type information
+                candidates.ambiguous = true;
+            } else if upcast_trait_refs == 1 {
+                candidates.vec.push(ObjectCandidate);
+            }
+
+            Ok::<(),()>(())
+        }).unwrap();
+    }
+
+    /// Search for unsizing that might apply to `obligation`.
+    fn assemble_candidates_for_unsizing(&mut self,
+                                        obligation: &TraitObligation<'tcx>,
+                                        candidates: &mut SelectionCandidateSet<'tcx>) {
+        // We currently never consider higher-ranked obligations e.g.
+        // `for<'a> &'a T: Unsize<Trait+'a>` to be implemented. This is not
+        // because they are a priori invalid, and we could potentially add support
+        // for them later, it's just that there isn't really a strong need for it.
+        // A `T: Unsize<U>` obligation is always used as part of a `T: CoerceUnsize<U>`
+        // impl, and those are generally applied to concrete types.
+        //
+        // That said, one might try to write a fn with a where clause like
+        //     for<'a> Foo<'a, T>: Unsize<Foo<'a, Trait>>
+        // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`.
+        // Still, you'd be more likely to write that where clause as
+        //     T: Trait
+        // so it seems ok if we (conservatively) fail to accept that `Unsize`
+        // obligation above. Should be possible to extend this in the future.
+        let source = match self.tcx().no_late_bound_regions(&obligation.self_ty()) {
+            Some(t) => t,
+            None => {
+                // Don't add any candidates if there are bound regions.
+                return;
+            }
+        };
+        let target = obligation.predicate.0.input_types()[0];
+
+        debug!("assemble_candidates_for_unsizing(source={:?}, target={:?})",
+               source, target);
+
+        let may_apply = match (&source.sty, &target.sty) {
+            // Trait+Kx+'a -> Trait+Ky+'b (upcasts).
+            (&ty::TyTrait(ref data_a), &ty::TyTrait(ref data_b)) => {
+                // Upcasts permit two things:
+                //
+                // 1. Dropping builtin bounds, e.g. `Foo+Send` to `Foo`
+                // 2. Tightening the region bound, e.g. `Foo+'a` to `Foo+'b` if `'a : 'b`
+                //
+                // Note that neither of these changes requires any
+                // change at runtime.  Eventually this will be
+                // generalized.
+                //
+                // We always upcast when we can because of reason
+                // #2 (region bounds).
+                data_a.principal.def_id() == data_a.principal.def_id() &&
+                data_a.bounds.builtin_bounds.is_superset(&data_b.bounds.builtin_bounds)
+            }
+
+            // T -> Trait.
+            (_, &ty::TyTrait(_)) => true,
+
+            // Ambiguous handling is below T -> Trait, because inference
+            // variables can still implement Unsize<Trait> and nested
+            // obligations will have the final say (likely deferred).
+            (&ty::TyInfer(ty::TyVar(_)), _) |
+            (_, &ty::TyInfer(ty::TyVar(_))) => {
+                debug!("assemble_candidates_for_unsizing: ambiguous");
+                candidates.ambiguous = true;
+                false
+            }
+
+            // [T; n] -> [T].
+            (&ty::TyArray(_, _), &ty::TySlice(_)) => true,
+
+            // Struct<T> -> Struct<U>.
+            (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
+                def_id_a == def_id_b
+            }
+
+            _ => false
+        };
+
+        if may_apply {
+            candidates.vec.push(BuiltinUnsizeCandidate);
+        }
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // WINNOW
+    //
+    // Winnowing is the process of attempting to resolve ambiguity by
+    // probing further. During the winnowing process, we unify all
+    // type variables (ignoring skolemization) and then we also
+    // attempt to evaluate recursive bounds to see if they are
+    // satisfied.
+
+    /// Returns true if `candidate_i` should be dropped in favor of
+    /// `candidate_j`.  Generally speaking we will drop duplicate
+    /// candidates and prefer where-clause candidates.
+    /// Returns true if `victim` should be dropped in favor of
+    /// `other`.  Generally speaking we will drop duplicate
+    /// candidates and prefer where-clause candidates.
+    ///
+    /// See the comment for "SelectionCandidate" for more details.
+    fn candidate_should_be_dropped_in_favor_of<'o>(
+        &mut self,
+        victim: &EvaluatedCandidate<'tcx>,
+        other: &EvaluatedCandidate<'tcx>)
+        -> bool
+    {
+        if victim.candidate == other.candidate {
+            return true;
+        }
+
+        match other.candidate {
+            ObjectCandidate |
+            ParamCandidate(_) | ProjectionCandidate => match victim.candidate {
+                DefaultImplCandidate(..) => {
+                    bug!(
+                        "default implementations shouldn't be recorded \
+                         when there are other valid candidates");
+                }
+                ImplCandidate(..) |
+                ClosureCandidate(..) |
+                FnPointerCandidate |
+                BuiltinObjectCandidate |
+                BuiltinUnsizeCandidate |
+                DefaultImplObjectCandidate(..) |
+                BuiltinCandidate(..) => {
+                    // We have a where-clause so don't go around looking
+                    // for impls.
+                    true
+                }
+                ObjectCandidate |
+                ProjectionCandidate => {
+                    // Arbitrarily give param candidates priority
+                    // over projection and object candidates.
+                    true
+                },
+                ParamCandidate(..) => false,
+            },
+            ImplCandidate(other_def) => {
+                // See if we can toss out `victim` based on specialization.
+                // This requires us to know *for sure* that the `other` impl applies
+                // i.e. EvaluatedToOk:
+                if other.evaluation == EvaluatedToOk {
+                    if let ImplCandidate(victim_def) = victim.candidate {
+                        return traits::specializes(self.tcx(), other_def, victim_def);
+                    }
+                }
+
+                false
+            },
+            _ => false
+        }
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // BUILTIN BOUNDS
+    //
+    // These cover the traits that are built-in to the language
+    // itself.  This includes `Copy` and `Sized` for sure. For the
+    // moment, it also includes `Send` / `Sync` and a few others, but
+    // those will hopefully change to library-defined traits in the
+    // future.
+
+    fn assemble_builtin_bound_candidates<'o>(&mut self,
+                                             bound: ty::BuiltinBound,
+                                             obligation: &TraitObligation<'tcx>,
+                                             candidates: &mut SelectionCandidateSet<'tcx>)
+                                             -> Result<(),SelectionError<'tcx>>
+    {
+        match self.builtin_bound(bound, obligation) {
+            Ok(If(..)) => {
+                debug!("builtin_bound: bound={:?}",
+                       bound);
+                candidates.vec.push(BuiltinCandidate(bound));
+                Ok(())
+            }
+            Ok(ParameterBuiltin) => { Ok(()) }
+            Ok(AmbiguousBuiltin) => {
+                debug!("assemble_builtin_bound_candidates: ambiguous builtin");
+                Ok(candidates.ambiguous = true)
+            }
+            Err(e) => { Err(e) }
+        }
+    }
+
+    fn builtin_bound(&mut self,
+                     bound: ty::BuiltinBound,
+                     obligation: &TraitObligation<'tcx>)
+                     -> Result<BuiltinBoundConditions<'tcx>,SelectionError<'tcx>>
+    {
+        // Note: these tests operate on types that may contain bound
+        // regions. To be proper, we ought to skolemize here, but we
+        // forego the skolemization and defer it until the
+        // confirmation step.
+
+        let self_ty = self.infcx.shallow_resolve(obligation.predicate.0.self_ty());
+        return match self_ty.sty {
+            ty::TyInfer(ty::IntVar(_)) |
+            ty::TyInfer(ty::FloatVar(_)) |
+            ty::TyUint(_) |
+            ty::TyInt(_) |
+            ty::TyBool |
+            ty::TyFloat(_) |
+            ty::TyFnDef(..) |
+            ty::TyFnPtr(_) |
+            ty::TyChar => {
+                // safe for everything
+                ok_if(Vec::new())
+            }
+
+            ty::TyBox(_) => {  // Box<T>
+                match bound {
+                    ty::BoundCopy => Err(Unimplemented),
+
+                    ty::BoundSized => ok_if(Vec::new()),
+
+                    ty::BoundSync | ty::BoundSend => {
+                        bug!("Send/Sync shouldn't occur in builtin_bounds()");
+                    }
+                }
+            }
+
+            ty::TyRawPtr(..) => {     // *const T, *mut T
+                match bound {
+                    ty::BoundCopy | ty::BoundSized => ok_if(Vec::new()),
+
+                    ty::BoundSync | ty::BoundSend => {
+                        bug!("Send/Sync shouldn't occur in builtin_bounds()");
+                    }
+                }
+            }
+
+            ty::TyTrait(ref data) => {
+                match bound {
+                    ty::BoundSized => Err(Unimplemented),
+                    ty::BoundCopy => {
+                        if data.bounds.builtin_bounds.contains(&bound) {
+                            ok_if(Vec::new())
+                        } else {
+                            // Recursively check all supertraits to find out if any further
+                            // bounds are required and thus we must fulfill.
+                            let principal =
+                                data.principal_trait_ref_with_self_ty(self.tcx(),
+                                                                      self.tcx().types.err);
+                            let copy_def_id = obligation.predicate.def_id();
+                            for tr in util::supertraits(self.tcx(), principal) {
+                                if tr.def_id() == copy_def_id {
+                                    return ok_if(Vec::new())
+                                }
+                            }
+
+                            Err(Unimplemented)
+                        }
+                    }
+                    ty::BoundSync | ty::BoundSend => {
+                        bug!("Send/Sync shouldn't occur in builtin_bounds()");
+                    }
+                }
+            }
+
+            ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl }) => {
+                // &mut T or &T
+                match bound {
+                    ty::BoundCopy => {
+                        match mutbl {
+                            // &mut T is affine and hence never `Copy`
+                            hir::MutMutable => Err(Unimplemented),
+
+                            // &T is always copyable
+                            hir::MutImmutable => ok_if(Vec::new()),
+                        }
+                    }
+
+                    ty::BoundSized => ok_if(Vec::new()),
+
+                    ty::BoundSync | ty::BoundSend => {
+                        bug!("Send/Sync shouldn't occur in builtin_bounds()");
+                    }
+                }
+            }
+
+            ty::TyArray(element_ty, _) => {
+                // [T; n]
+                match bound {
+                    ty::BoundCopy => ok_if(vec![element_ty]),
+                    ty::BoundSized => ok_if(Vec::new()),
+                    ty::BoundSync | ty::BoundSend => {
+                        bug!("Send/Sync shouldn't occur in builtin_bounds()");
+                    }
+                }
+            }
+
+            ty::TyStr | ty::TySlice(_) => {
+                match bound {
+                    ty::BoundSync | ty::BoundSend => {
+                        bug!("Send/Sync shouldn't occur in builtin_bounds()");
+                    }
+
+                    ty::BoundCopy | ty::BoundSized => Err(Unimplemented),
+                }
+            }
+
+            // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
+            ty::TyTuple(ref tys) => ok_if(tys.clone()),
+
+            ty::TyClosure(_, ref substs) => {
+                // FIXME -- This case is tricky. In the case of by-ref
+                // closures particularly, we need the results of
+                // inference to decide how to reflect the type of each
+                // upvar (the upvar may have type `T`, but the runtime
+                // type could be `&mut`, `&`, or just `T`). For now,
+                // though, we'll do this unsoundly and assume that all
+                // captures are by value. Really what we ought to do
+                // is reserve judgement and then intertwine this
+                // analysis with closure inference.
+
+                // Unboxed closures shouldn't be
+                // implicitly copyable
+                if bound == ty::BoundCopy {
+                    return Ok(ParameterBuiltin);
+                }
+
+                // Upvars are always local variables or references to
+                // local variables, and local variables cannot be
+                // unsized, so the closure struct as a whole must be
+                // Sized.
+                if bound == ty::BoundSized {
+                    return ok_if(Vec::new());
+                }
+
+                ok_if(substs.upvar_tys.clone())
+            }
+
+            ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
+                let types: Vec<Ty> = def.all_fields().map(|f| {
+                    f.ty(self.tcx(), substs)
+                }).collect();
+                nominal(bound, types)
+            }
+
+            ty::TyProjection(_) | ty::TyParam(_) => {
+                // Note: A type parameter is only considered to meet a
+                // particular bound if there is a where clause telling
+                // us that it does, and that case is handled by
+                // `assemble_candidates_from_caller_bounds()`.
+                Ok(ParameterBuiltin)
+            }
+
+            ty::TyInfer(ty::TyVar(_)) => {
+                // Unbound type variable. Might or might not have
+                // applicable impls and so forth, depending on what
+                // those type variables wind up being bound to.
+                debug!("assemble_builtin_bound_candidates: ambiguous builtin");
+                Ok(AmbiguousBuiltin)
+            }
+
+            ty::TyError => ok_if(Vec::new()),
+
+            ty::TyInfer(ty::FreshTy(_))
+            | ty::TyInfer(ty::FreshIntTy(_))
+            | ty::TyInfer(ty::FreshFloatTy(_)) => {
+                bug!("asked to assemble builtin bounds of unexpected type: {:?}",
+                     self_ty);
+            }
+        };
+
+        fn ok_if<'tcx>(v: Vec<Ty<'tcx>>)
+                       -> Result<BuiltinBoundConditions<'tcx>, SelectionError<'tcx>> {
+            Ok(If(ty::Binder(v)))
+        }
+
+        fn nominal<'cx, 'tcx>(bound: ty::BuiltinBound,
+                              types: Vec<Ty<'tcx>>)
+                              -> Result<BuiltinBoundConditions<'tcx>, SelectionError<'tcx>>
+        {
+            // First check for markers and other nonsense.
+            match bound {
+                // Fallback to whatever user-defined impls exist in this case.
+                ty::BoundCopy => Ok(ParameterBuiltin),
+
+                // Sized if all the component types are sized.
+                ty::BoundSized => ok_if(types),
+
+                // Shouldn't be coming through here.
+                ty::BoundSend | ty::BoundSync => bug!(),
+            }
+        }
+    }
+
+    /// For default impls, we need to break apart a type into its
+    /// "constituent types" -- meaning, the types that it contains.
+    ///
+    /// Here are some (simple) examples:
+    ///
+    /// ```
+    /// (i32, u32) -> [i32, u32]
+    /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
+    /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
+    /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
+    /// ```
+    fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
+        match t.sty {
+            ty::TyUint(_) |
+            ty::TyInt(_) |
+            ty::TyBool |
+            ty::TyFloat(_) |
+            ty::TyFnDef(..) |
+            ty::TyFnPtr(_) |
+            ty::TyStr |
+            ty::TyError |
+            ty::TyInfer(ty::IntVar(_)) |
+            ty::TyInfer(ty::FloatVar(_)) |
+            ty::TyChar => {
+                Vec::new()
+            }
+
+            ty::TyTrait(..) |
+            ty::TyParam(..) |
+            ty::TyProjection(..) |
+            ty::TyInfer(ty::TyVar(_)) |
+            ty::TyInfer(ty::FreshTy(_)) |
+            ty::TyInfer(ty::FreshIntTy(_)) |
+            ty::TyInfer(ty::FreshFloatTy(_)) => {
+                bug!("asked to assemble constituent types of unexpected type: {:?}",
+                     t);
+            }
+
+            ty::TyBox(referent_ty) => {  // Box<T>
+                vec![referent_ty]
+            }
+
+            ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) |
+            ty::TyRef(_, ty::TypeAndMut { ty: element_ty, ..}) => {
+                vec![element_ty]
+            },
+
+            ty::TyArray(element_ty, _) | ty::TySlice(element_ty) => {
+                vec![element_ty]
+            }
+
+            ty::TyTuple(ref tys) => {
+                // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
+                tys.clone()
+            }
+
+            ty::TyClosure(_, ref substs) => {
+                // FIXME(#27086). We are invariant w/r/t our
+                // substs.func_substs, but we don't see them as
+                // constituent types; this seems RIGHT but also like
+                // something that a normal type couldn't simulate. Is
+                // this just a gap with the way that PhantomData and
+                // OIBIT interact? That is, there is no way to say
+                // "make me invariant with respect to this TYPE, but
+                // do not act as though I can reach it"
+                substs.upvar_tys.clone()
+            }
+
+            // for `PhantomData<T>`, we pass `T`
+            ty::TyStruct(def, substs) if def.is_phantom_data() => {
+                substs.types.get_slice(TypeSpace).to_vec()
+            }
+
+            ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
+                def.all_fields()
+                    .map(|f| f.ty(self.tcx(), substs))
+                    .collect()
+            }
+        }
+    }
+
+    fn collect_predicates_for_types(&mut self,
+                                    obligation: &TraitObligation<'tcx>,
+                                    trait_def_id: DefId,
+                                    types: ty::Binder<Vec<Ty<'tcx>>>)
+                                    -> Vec<PredicateObligation<'tcx>>
+    {
+        let derived_cause = match self.tcx().lang_items.to_builtin_kind(trait_def_id) {
+            Some(_) => {
+                self.derived_cause(obligation, BuiltinDerivedObligation)
+            },
+            None => {
+                self.derived_cause(obligation, ImplDerivedObligation)
+            }
+        };
+
+        // Because the types were potentially derived from
+        // higher-ranked obligations they may reference late-bound
+        // regions. For example, `for<'a> Foo<&'a int> : Copy` would
+        // yield a type like `for<'a> &'a int`. In general, we
+        // maintain the invariant that we never manipulate bound
+        // regions, so we have to process these bound regions somehow.
+        //
+        // The strategy is to:
+        //
+        // 1. Instantiate those regions to skolemized regions (e.g.,
+        //    `for<'a> &'a int` becomes `&0 int`.
+        // 2. Produce something like `&'0 int : Copy`
+        // 3. Re-bind the regions back to `for<'a> &'a int : Copy`
+
+        // Move the binder into the individual types
+        let bound_types: Vec<ty::Binder<Ty<'tcx>>> =
+            types.skip_binder()
+                 .iter()
+                 .map(|&nested_ty| ty::Binder(nested_ty))
+                 .collect();
+
+        // For each type, produce a vector of resulting obligations
+        let obligations: Result<Vec<Vec<_>>, _> = bound_types.iter().map(|nested_ty| {
+            self.infcx.commit_if_ok(|snapshot| {
+                let (skol_ty, skol_map) =
+                    self.infcx().skolemize_late_bound_regions(nested_ty, snapshot);
+                let Normalized { value: normalized_ty, mut obligations } =
+                    project::normalize_with_depth(self,
+                                                  obligation.cause.clone(),
+                                                  obligation.recursion_depth + 1,
+                                                  &skol_ty);
+                let skol_obligation =
+                    util::predicate_for_trait_def(self.tcx(),
+                                                  derived_cause.clone(),
+                                                  trait_def_id,
+                                                  obligation.recursion_depth + 1,
+                                                  normalized_ty,
+                                                  vec![]);
+                obligations.push(skol_obligation);
+                Ok(self.infcx().plug_leaks(skol_map, snapshot, &obligations))
+            })
+        }).collect();
+
+        // Flatten those vectors (couldn't do it above due `collect`)
+        match obligations {
+            Ok(obligations) => obligations.into_iter().flat_map(|o| o).collect(),
+            Err(ErrorReported) => Vec::new(),
+        }
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // CONFIRMATION
+    //
+    // Confirmation unifies the output type parameters of the trait
+    // with the values found in the obligation, possibly yielding a
+    // type error.  See `README.md` for more details.
+
+    fn confirm_candidate(&mut self,
+                         obligation: &TraitObligation<'tcx>,
+                         candidate: SelectionCandidate<'tcx>)
+                         -> Result<Selection<'tcx>,SelectionError<'tcx>>
+    {
+        debug!("confirm_candidate({:?}, {:?})",
+               obligation,
+               candidate);
+
+        match candidate {
+            BuiltinCandidate(builtin_bound) => {
+                Ok(VtableBuiltin(
+                    self.confirm_builtin_candidate(obligation, builtin_bound)?))
+            }
+
+            ParamCandidate(param) => {
+                let obligations = self.confirm_param_candidate(obligation, param);
+                Ok(VtableParam(obligations))
+            }
+
+            DefaultImplCandidate(trait_def_id) => {
+                let data = self.confirm_default_impl_candidate(obligation, trait_def_id);
+                Ok(VtableDefaultImpl(data))
+            }
+
+            DefaultImplObjectCandidate(trait_def_id) => {
+                let data = self.confirm_default_impl_object_candidate(obligation, trait_def_id);
+                Ok(VtableDefaultImpl(data))
+            }
+
+            ImplCandidate(impl_def_id) => {
+                let vtable_impl =
+                    self.confirm_impl_candidate(obligation, impl_def_id)?;
+                Ok(VtableImpl(vtable_impl))
+            }
+
+            ClosureCandidate(closure_def_id, substs) => {
+                let vtable_closure =
+                    self.confirm_closure_candidate(obligation, closure_def_id, substs)?;
+                Ok(VtableClosure(vtable_closure))
+            }
+
+            BuiltinObjectCandidate => {
+                // This indicates something like `(Trait+Send) :
+                // Send`. In this case, we know that this holds
+                // because that's what the object type is telling us,
+                // and there's really no additional obligations to
+                // prove and no types in particular to unify etc.
+                Ok(VtableParam(Vec::new()))
+            }
+
+            ObjectCandidate => {
+                let data = self.confirm_object_candidate(obligation);
+                Ok(VtableObject(data))
+            }
+
+            FnPointerCandidate => {
+                let fn_type =
+                    self.confirm_fn_pointer_candidate(obligation)?;
+                Ok(VtableFnPointer(fn_type))
+            }
+
+            ProjectionCandidate => {
+                self.confirm_projection_candidate(obligation);
+                Ok(VtableParam(Vec::new()))
+            }
+
+            BuiltinUnsizeCandidate => {
+                let data = self.confirm_builtin_unsize_candidate(obligation)?;
+                Ok(VtableBuiltin(data))
+            }
+        }
+    }
+
+    fn confirm_projection_candidate(&mut self,
+                                    obligation: &TraitObligation<'tcx>)
+    {
+        let _: Result<(),()> =
+            self.infcx.commit_if_ok(|snapshot| {
+                let result =
+                    self.match_projection_obligation_against_bounds_from_trait(obligation,
+                                                                               snapshot);
+                assert!(result);
+                Ok(())
+            });
+    }
+
+    fn confirm_param_candidate(&mut self,
+                               obligation: &TraitObligation<'tcx>,
+                               param: ty::PolyTraitRef<'tcx>)
+                               -> Vec<PredicateObligation<'tcx>>
+    {
+        debug!("confirm_param_candidate({:?},{:?})",
+               obligation,
+               param);
+
+        // During evaluation, we already checked that this
+        // where-clause trait-ref could be unified with the obligation
+        // trait-ref. Repeat that unification now without any
+        // transactional boundary; it should not fail.
+        match self.match_where_clause_trait_ref(obligation, param.clone()) {
+            Ok(obligations) => obligations,
+            Err(()) => {
+                bug!("Where clause `{:?}` was applicable to `{:?}` but now is not",
+                     param,
+                     obligation);
+            }
+        }
+    }
+
+    fn confirm_builtin_candidate(&mut self,
+                                 obligation: &TraitObligation<'tcx>,
+                                 bound: ty::BuiltinBound)
+                                 -> Result<VtableBuiltinData<PredicateObligation<'tcx>>,
+                                           SelectionError<'tcx>>
+    {
+        debug!("confirm_builtin_candidate({:?})",
+               obligation);
+
+        match self.builtin_bound(bound, obligation)? {
+            If(nested) => Ok(self.vtable_builtin_data(obligation, bound, nested)),
+            AmbiguousBuiltin | ParameterBuiltin => {
+                span_bug!(
+                    obligation.cause.span,
+                    "builtin bound for {:?} was ambig",
+                    obligation);
+            }
+        }
+    }
+
+    fn vtable_builtin_data(&mut self,
+                           obligation: &TraitObligation<'tcx>,
+                           bound: ty::BuiltinBound,
+                           nested: ty::Binder<Vec<Ty<'tcx>>>)
+                           -> VtableBuiltinData<PredicateObligation<'tcx>>
+    {
+        debug!("vtable_builtin_data(obligation={:?}, bound={:?}, nested={:?})",
+               obligation, bound, nested);
+
+        let trait_def = match self.tcx().lang_items.from_builtin_kind(bound) {
+            Ok(def_id) => def_id,
+            Err(_) => {
+                bug!("builtin trait definition not found");
+            }
+        };
+
+        let obligations = self.collect_predicates_for_types(obligation, trait_def, nested);
+
+        debug!("vtable_builtin_data: obligations={:?}",
+               obligations);
+
+        VtableBuiltinData { nested: obligations }
+    }
+
+    /// This handles the case where a `impl Foo for ..` impl is being used.
+    /// The idea is that the impl applies to `X : Foo` if the following conditions are met:
+    ///
+    /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds
+    /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds.
+    fn confirm_default_impl_candidate(&mut self,
+                                      obligation: &TraitObligation<'tcx>,
+                                      trait_def_id: DefId)
+                                      -> VtableDefaultImplData<PredicateObligation<'tcx>>
+    {
+        debug!("confirm_default_impl_candidate({:?}, {:?})",
+               obligation,
+               trait_def_id);
+
+        // binder is moved below
+        let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
+        let types = self.constituent_types_for_ty(self_ty);
+        self.vtable_default_impl(obligation, trait_def_id, ty::Binder(types))
+    }
+
+    fn confirm_default_impl_object_candidate(&mut self,
+                                             obligation: &TraitObligation<'tcx>,
+                                             trait_def_id: DefId)
+                                             -> VtableDefaultImplData<PredicateObligation<'tcx>>
+    {
+        debug!("confirm_default_impl_object_candidate({:?}, {:?})",
+               obligation,
+               trait_def_id);
+
+        assert!(self.tcx().has_attr(trait_def_id, "rustc_reflect_like"));
+
+        // OK to skip binder, it is reintroduced below
+        let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
+        match self_ty.sty {
+            ty::TyTrait(ref data) => {
+                // OK to skip the binder, it is reintroduced below
+                let input_types = data.principal.skip_binder().substs.types.get_slice(TypeSpace);
+                let assoc_types = data.bounds.projection_bounds
+                                             .iter()
+                                             .map(|pb| pb.skip_binder().ty);
+                let all_types: Vec<_> = input_types.iter().cloned()
+                                                          .chain(assoc_types)
+                                                          .collect();
+
+                // reintroduce the two binding levels we skipped, then flatten into one
+                let all_types = ty::Binder(ty::Binder(all_types));
+                let all_types = self.tcx().flatten_late_bound_regions(&all_types);
+
+                self.vtable_default_impl(obligation, trait_def_id, all_types)
+            }
+            _ => {
+                bug!("asked to confirm default object implementation for non-object type: {:?}",
+                     self_ty);
+            }
+        }
+    }
+
+    /// See `confirm_default_impl_candidate`
+    fn vtable_default_impl(&mut self,
+                           obligation: &TraitObligation<'tcx>,
+                           trait_def_id: DefId,
+                           nested: ty::Binder<Vec<Ty<'tcx>>>)
+                           -> VtableDefaultImplData<PredicateObligation<'tcx>>
+    {
+        debug!("vtable_default_impl_data: nested={:?}", nested);
+
+        let mut obligations = self.collect_predicates_for_types(obligation,
+                                                                trait_def_id,
+                                                                nested);
+
+        let trait_obligations: Result<Vec<_>,()> = self.infcx.commit_if_ok(|snapshot| {
+            let poly_trait_ref = obligation.predicate.to_poly_trait_ref();
+            let (trait_ref, skol_map) =
+                self.infcx().skolemize_late_bound_regions(&poly_trait_ref, snapshot);
+            Ok(self.impl_or_trait_obligations(obligation.cause.clone(),
+                                              obligation.recursion_depth + 1,
+                                              trait_def_id,
+                                              &trait_ref.substs,
+                                              skol_map,
+                                              snapshot))
+        });
+
+        // no Errors in that code above
+        obligations.append(&mut trait_obligations.unwrap());
+
+        debug!("vtable_default_impl_data: obligations={:?}", obligations);
+
+        VtableDefaultImplData {
+            trait_def_id: trait_def_id,
+            nested: obligations
+        }
+    }
+
+    fn confirm_impl_candidate(&mut self,
+                              obligation: &TraitObligation<'tcx>,
+                              impl_def_id: DefId)
+                              -> Result<VtableImplData<'tcx, PredicateObligation<'tcx>>,
+                                        SelectionError<'tcx>>
+    {
+        debug!("confirm_impl_candidate({:?},{:?})",
+               obligation,
+               impl_def_id);
+
+        // First, create the substitutions by matching the impl again,
+        // this time not in a probe.
+        self.infcx.commit_if_ok(|snapshot| {
+            let (substs, skol_map) =
+                self.rematch_impl(impl_def_id, obligation,
+                                  snapshot);
+            debug!("confirm_impl_candidate substs={:?}", substs);
+            Ok(self.vtable_impl(impl_def_id, substs, obligation.cause.clone(),
+                                obligation.recursion_depth + 1, skol_map, snapshot))
+        })
+    }
+
+    fn vtable_impl(&mut self,
+                   impl_def_id: DefId,
+                   mut substs: Normalized<'tcx, Substs<'tcx>>,
+                   cause: ObligationCause<'tcx>,
+                   recursion_depth: usize,
+                   skol_map: infer::SkolemizationMap,
+                   snapshot: &infer::CombinedSnapshot)
+                   -> VtableImplData<'tcx, PredicateObligation<'tcx>>
+    {
+        debug!("vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={}, skol_map={:?})",
+               impl_def_id,
+               substs,
+               recursion_depth,
+               skol_map);
+
+        let mut impl_obligations =
+            self.impl_or_trait_obligations(cause,
+                                           recursion_depth,
+                                           impl_def_id,
+                                           &substs.value,
+                                           skol_map,
+                                           snapshot);
+
+        debug!("vtable_impl: impl_def_id={:?} impl_obligations={:?}",
+               impl_def_id,
+               impl_obligations);
+
+        // Because of RFC447, the impl-trait-ref and obligations
+        // are sufficient to determine the impl substs, without
+        // relying on projections in the impl-trait-ref.
+        //
+        // e.g. `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V`
+        impl_obligations.append(&mut substs.obligations);
+
+        VtableImplData { impl_def_id: impl_def_id,
+                         substs: self.tcx().mk_substs(substs.value),
+                         nested: impl_obligations }
+    }
+
+    fn confirm_object_candidate(&mut self,
+                                obligation: &TraitObligation<'tcx>)
+                                -> VtableObjectData<'tcx>
+    {
+        debug!("confirm_object_candidate({:?})",
+               obligation);
+
+        // FIXME skipping binder here seems wrong -- we should
+        // probably flatten the binder from the obligation and the
+        // binder from the object. Have to try to make a broken test
+        // case that results. -nmatsakis
+        let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
+        let poly_trait_ref = match self_ty.sty {
+            ty::TyTrait(ref data) => {
+                data.principal_trait_ref_with_self_ty(self.tcx(), self_ty)
+            }
+            _ => {
+                span_bug!(obligation.cause.span,
+                          "object candidate with non-object");
+            }
+        };
+
+        let mut upcast_trait_ref = None;
+        let vtable_base;
+
+        {
+            // We want to find the first supertrait in the list of
+            // supertraits that we can unify with, and do that
+            // unification. We know that there is exactly one in the list
+            // where we can unify because otherwise select would have
+            // reported an ambiguity. (When we do find a match, also
+            // record it for later.)
+            let nonmatching =
+                util::supertraits(self.tcx(), poly_trait_ref)
+                .take_while(|&t| {
+                    match
+                        self.infcx.commit_if_ok(
+                            |_| self.match_poly_trait_ref(obligation, t))
+                    {
+                        Ok(_) => { upcast_trait_ref = Some(t); false }
+                        Err(_) => { true }
+                    }
+                });
+
+            // Additionally, for each of the nonmatching predicates that
+            // we pass over, we sum up the set of number of vtable
+            // entries, so that we can compute the offset for the selected
+            // trait.
+            vtable_base =
+                nonmatching.map(|t| util::count_own_vtable_entries(self.tcx(), t))
+                           .sum();
+
+        }
+
+        VtableObjectData {
+            upcast_trait_ref: upcast_trait_ref.unwrap(),
+            vtable_base: vtable_base,
+        }
+    }
+
+    fn confirm_fn_pointer_candidate(&mut self,
+                                    obligation: &TraitObligation<'tcx>)
+                                    -> Result<ty::Ty<'tcx>,SelectionError<'tcx>>
+    {
+        debug!("confirm_fn_pointer_candidate({:?})",
+               obligation);
+
+        // ok to skip binder; it is reintroduced below
+        let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
+        let sig = self_ty.fn_sig();
+        let trait_ref =
+            util::closure_trait_ref_and_return_type(self.tcx(),
+                                                    obligation.predicate.def_id(),
+                                                    self_ty,
+                                                    sig,
+                                                    util::TupleArgumentsFlag::Yes)
+            .map_bound(|(trait_ref, _)| trait_ref);
+
+        self.confirm_poly_trait_refs(obligation.cause.clone(),
+                                     obligation.predicate.to_poly_trait_ref(),
+                                     trait_ref)?;
+        Ok(self_ty)
+    }
+
+    fn confirm_closure_candidate(&mut self,
+                                 obligation: &TraitObligation<'tcx>,
+                                 closure_def_id: DefId,
+                                 substs: &ty::ClosureSubsts<'tcx>)
+                                 -> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>,
+                                           SelectionError<'tcx>>
+    {
+        debug!("confirm_closure_candidate({:?},{:?},{:?})",
+               obligation,
+               closure_def_id,
+               substs);
+
+        let Normalized {
+            value: trait_ref,
+            obligations
+        } = self.closure_trait_ref(obligation, closure_def_id, substs);
+
+        debug!("confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})",
+               closure_def_id,
+               trait_ref,
+               obligations);
+
+        self.confirm_poly_trait_refs(obligation.cause.clone(),
+                                     obligation.predicate.to_poly_trait_ref(),
+                                     trait_ref)?;
+
+        Ok(VtableClosureData {
+            closure_def_id: closure_def_id,
+            substs: substs.clone(),
+            nested: obligations
+        })
+    }
+
+    /// In the case of closure types and fn pointers,
+    /// we currently treat the input type parameters on the trait as
+    /// outputs. This means that when we have a match we have only
+    /// considered the self type, so we have to go back and make sure
+    /// to relate the argument types too.  This is kind of wrong, but
+    /// since we control the full set of impls, also not that wrong,
+    /// and it DOES yield better error messages (since we don't report
+    /// errors as if there is no applicable impl, but rather report
+    /// errors are about mismatched argument types.
+    ///
+    /// Here is an example. Imagine we have a closure expression
+    /// and we desugared it so that the type of the expression is
+    /// `Closure`, and `Closure` expects an int as argument. Then it
+    /// is "as if" the compiler generated this impl:
+    ///
+    ///     impl Fn(int) for Closure { ... }
+    ///
+    /// Now imagine our obligation is `Fn(usize) for Closure`. So far
+    /// we have matched the self-type `Closure`. At this point we'll
+    /// compare the `int` to `usize` and generate an error.
+    ///
+    /// Note that this checking occurs *after* the impl has selected,
+    /// because these output type parameters should not affect the
+    /// selection of the impl. Therefore, if there is a mismatch, we
+    /// report an error to the user.
+    fn confirm_poly_trait_refs(&mut self,
+                               obligation_cause: ObligationCause,
+                               obligation_trait_ref: ty::PolyTraitRef<'tcx>,
+                               expected_trait_ref: ty::PolyTraitRef<'tcx>)
+                               -> Result<(), SelectionError<'tcx>>
+    {
+        let origin = TypeOrigin::RelateOutputImplTypes(obligation_cause.span);
+
+        let obligation_trait_ref = obligation_trait_ref.clone();
+        self.infcx.sub_poly_trait_refs(false,
+                                       origin,
+                                       expected_trait_ref.clone(),
+                                       obligation_trait_ref.clone())
+            // FIXME(#32730) propagate obligations
+            .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
+            .map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e))
+    }
+
+    fn confirm_builtin_unsize_candidate(&mut self,
+                                        obligation: &TraitObligation<'tcx>,)
+                                        -> Result<VtableBuiltinData<PredicateObligation<'tcx>>,
+                                                  SelectionError<'tcx>> {
+        let tcx = self.tcx();
+
+        // assemble_candidates_for_unsizing should ensure there are no late bound
+        // regions here. See the comment there for more details.
+        let source = self.infcx.shallow_resolve(
+            tcx.no_late_bound_regions(&obligation.self_ty()).unwrap());
+        let target = self.infcx.shallow_resolve(obligation.predicate.0.input_types()[0]);
+
+        debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})",
+               source, target);
+
+        let mut nested = vec![];
+        match (&source.sty, &target.sty) {
+            // Trait+Kx+'a -> Trait+Ky+'b (upcasts).
+            (&ty::TyTrait(ref data_a), &ty::TyTrait(ref data_b)) => {
+                // See assemble_candidates_for_unsizing for more info.
+                let bounds = ty::ExistentialBounds {
+                    region_bound: data_b.bounds.region_bound,
+                    builtin_bounds: data_b.bounds.builtin_bounds,
+                    projection_bounds: data_a.bounds.projection_bounds.clone(),
+                };
+
+                let new_trait = tcx.mk_trait(data_a.principal.clone(), bounds);
+                let origin = TypeOrigin::Misc(obligation.cause.span);
+                let InferOk { obligations, .. } =
+                    self.infcx.sub_types(false, origin, new_trait, target)
+                    .map_err(|_| Unimplemented)?;
+                // FIXME(#32730) propagate obligations
+                assert!(obligations.is_empty());
+
+                // Register one obligation for 'a: 'b.
+                let cause = ObligationCause::new(obligation.cause.span,
+                                                 obligation.cause.body_id,
+                                                 ObjectCastObligation(target));
+                let outlives = ty::OutlivesPredicate(data_a.bounds.region_bound,
+                                                     data_b.bounds.region_bound);
+                nested.push(Obligation::with_depth(cause,
+                                                   obligation.recursion_depth + 1,
+                                                   ty::Binder(outlives).to_predicate()));
+            }
+
+            // T -> Trait.
+            (_, &ty::TyTrait(ref data)) => {
+                let object_did = data.principal_def_id();
+                if !object_safety::is_object_safe(tcx, object_did) {
+                    return Err(TraitNotObjectSafe(object_did));
+                }
+
+                let cause = ObligationCause::new(obligation.cause.span,
+                                                 obligation.cause.body_id,
+                                                 ObjectCastObligation(target));
+                let mut push = |predicate| {
+                    nested.push(Obligation::with_depth(cause.clone(),
+                                                       obligation.recursion_depth + 1,
+                                                       predicate));
+                };
+
+                // Create the obligation for casting from T to Trait.
+                push(data.principal_trait_ref_with_self_ty(tcx, source).to_predicate());
+
+                // We can only make objects from sized types.
+                let mut builtin_bounds = data.bounds.builtin_bounds;
+                builtin_bounds.insert(ty::BoundSized);
+
+                // Create additional obligations for all the various builtin
+                // bounds attached to the object cast. (In other words, if the
+                // object type is Foo+Send, this would create an obligation
+                // for the Send check.)
+                for bound in &builtin_bounds {
+                    if let Ok(tr) = util::trait_ref_for_builtin_bound(tcx, bound, source) {
+                        push(tr.to_predicate());
+                    } else {
+                        return Err(Unimplemented);
+                    }
+                }
+
+                // Create obligations for the projection predicates.
+                for bound in data.projection_bounds_with_self_ty(tcx, source) {
+                    push(bound.to_predicate());
+                }
+
+                // If the type is `Foo+'a`, ensures that the type
+                // being cast to `Foo+'a` outlives `'a`:
+                let outlives = ty::OutlivesPredicate(source,
+                                                     data.bounds.region_bound);
+                push(ty::Binder(outlives).to_predicate());
+            }
+
+            // [T; n] -> [T].
+            (&ty::TyArray(a, _), &ty::TySlice(b)) => {
+                let origin = TypeOrigin::Misc(obligation.cause.span);
+                let InferOk { obligations, .. } =
+                    self.infcx.sub_types(false, origin, a, b)
+                    .map_err(|_| Unimplemented)?;
+                // FIXME(#32730) propagate obligations
+                assert!(obligations.is_empty());
+            }
+
+            // Struct<T> -> Struct<U>.
+            (&ty::TyStruct(def, substs_a), &ty::TyStruct(_, substs_b)) => {
+                let fields = def
+                    .all_fields()
+                    .map(|f| f.unsubst_ty())
+                    .collect::<Vec<_>>();
+
+                // The last field of the structure has to exist and contain type parameters.
+                let field = if let Some(&field) = fields.last() {
+                    field
+                } else {
+                    return Err(Unimplemented);
+                };
+                let mut ty_params = vec![];
+                for ty in field.walk() {
+                    if let ty::TyParam(p) = ty.sty {
+                        assert!(p.space == TypeSpace);
+                        let idx = p.idx as usize;
+                        if !ty_params.contains(&idx) {
+                            ty_params.push(idx);
+                        }
+                    }
+                }
+                if ty_params.is_empty() {
+                    return Err(Unimplemented);
+                }
+
+                // Replace type parameters used in unsizing with
+                // TyError and ensure they do not affect any other fields.
+                // This could be checked after type collection for any struct
+                // with a potentially unsized trailing field.
+                let mut new_substs = substs_a.clone();
+                for &i in &ty_params {
+                    new_substs.types.get_mut_slice(TypeSpace)[i] = tcx.types.err;
+                }
+                for &ty in fields.split_last().unwrap().1 {
+                    if ty.subst(tcx, &new_substs).references_error() {
+                        return Err(Unimplemented);
+                    }
+                }
+
+                // Extract Field<T> and Field<U> from Struct<T> and Struct<U>.
+                let inner_source = field.subst(tcx, substs_a);
+                let inner_target = field.subst(tcx, substs_b);
+
+                // Check that the source structure with the target's
+                // type parameters is a subtype of the target.
+                for &i in &ty_params {
+                    let param_b = *substs_b.types.get(TypeSpace, i);
+                    new_substs.types.get_mut_slice(TypeSpace)[i] = param_b;
+                }
+                let new_struct = tcx.mk_struct(def, tcx.mk_substs(new_substs));
+                let origin = TypeOrigin::Misc(obligation.cause.span);
+                let InferOk { obligations, .. } =
+                    self.infcx.sub_types(false, origin, new_struct, target)
+                    .map_err(|_| Unimplemented)?;
+                // FIXME(#32730) propagate obligations
+                assert!(obligations.is_empty());
+
+                // Construct the nested Field<T>: Unsize<Field<U>> predicate.
+                nested.push(util::predicate_for_trait_def(tcx,
+                    obligation.cause.clone(),
+                    obligation.predicate.def_id(),
+                    obligation.recursion_depth + 1,
+                    inner_source,
+                    vec![inner_target]));
+            }
+
+            _ => bug!()
+        };
+
+        Ok(VtableBuiltinData { nested: nested })
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // Matching
+    //
+    // Matching is a common path used for both evaluation and
+    // confirmation.  It basically unifies types that appear in impls
+    // and traits. This does affect the surrounding environment;
+    // therefore, when used during evaluation, match routines must be
+    // run inside of a `probe()` so that their side-effects are
+    // contained.
+
+    fn rematch_impl(&mut self,
+                    impl_def_id: DefId,
+                    obligation: &TraitObligation<'tcx>,
+                    snapshot: &infer::CombinedSnapshot)
+                    -> (Normalized<'tcx, Substs<'tcx>>, infer::SkolemizationMap)
+    {
+        match self.match_impl(impl_def_id, obligation, snapshot) {
+            Ok((substs, skol_map)) => (substs, skol_map),
+            Err(()) => {
+                bug!("Impl {:?} was matchable against {:?} but now is not",
+                     impl_def_id,
+                     obligation);
+            }
+        }
+    }
+
+    fn match_impl(&mut self,
+                  impl_def_id: DefId,
+                  obligation: &TraitObligation<'tcx>,
+                  snapshot: &infer::CombinedSnapshot)
+                  -> Result<(Normalized<'tcx, Substs<'tcx>>,
+                             infer::SkolemizationMap), ()>
+    {
+        let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
+
+        // Before we create the substitutions and everything, first
+        // consider a "quick reject". This avoids creating more types
+        // and so forth that we need to.
+        if self.fast_reject_trait_refs(obligation, &impl_trait_ref) {
+            return Err(());
+        }
+
+        let (skol_obligation, skol_map) = self.infcx().skolemize_late_bound_regions(
+            &obligation.predicate,
+            snapshot);
+        let skol_obligation_trait_ref = skol_obligation.trait_ref;
+
+        let impl_substs = util::fresh_type_vars_for_impl(self.infcx,
+                                                         obligation.cause.span,
+                                                         impl_def_id);
+
+        let impl_trait_ref = impl_trait_ref.subst(self.tcx(),
+                                                  &impl_substs);
+
+        let impl_trait_ref =
+            project::normalize_with_depth(self,
+                                          obligation.cause.clone(),
+                                          obligation.recursion_depth + 1,
+                                          &impl_trait_ref);
+
+        debug!("match_impl(impl_def_id={:?}, obligation={:?}, \
+               impl_trait_ref={:?}, skol_obligation_trait_ref={:?})",
+               impl_def_id,
+               obligation,
+               impl_trait_ref,
+               skol_obligation_trait_ref);
+
+        let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
+        let InferOk { obligations, .. } =
+            self.infcx.eq_trait_refs(false,
+                                     origin,
+                                     impl_trait_ref.value.clone(),
+                                     skol_obligation_trait_ref)
+            .map_err(|e| {
+                debug!("match_impl: failed eq_trait_refs due to `{}`", e);
+                ()
+            })?;
+        // FIXME(#32730) propagate obligations
+        assert!(obligations.is_empty());
+
+        if let Err(e) = self.infcx.leak_check(&skol_map, snapshot) {
+            debug!("match_impl: failed leak check due to `{}`", e);
+            return Err(());
+        }
+
+        debug!("match_impl: success impl_substs={:?}", impl_substs);
+        Ok((Normalized {
+            value: impl_substs,
+            obligations: impl_trait_ref.obligations
+        }, skol_map))
+    }
+
+    fn fast_reject_trait_refs(&mut self,
+                              obligation: &TraitObligation,
+                              impl_trait_ref: &ty::TraitRef)
+                              -> bool
+    {
+        // We can avoid creating type variables and doing the full
+        // substitution if we find that any of the input types, when
+        // simplified, do not match.
+
+        obligation.predicate.0.input_types().iter()
+            .zip(impl_trait_ref.input_types())
+            .any(|(&obligation_ty, &impl_ty)| {
+                let simplified_obligation_ty =
+                    fast_reject::simplify_type(self.tcx(), obligation_ty, true);
+                let simplified_impl_ty =
+                    fast_reject::simplify_type(self.tcx(), impl_ty, false);
+
+                simplified_obligation_ty.is_some() &&
+                    simplified_impl_ty.is_some() &&
+                    simplified_obligation_ty != simplified_impl_ty
+            })
+    }
+
+    /// Normalize `where_clause_trait_ref` and try to match it against
+    /// `obligation`.  If successful, return any predicates that
+    /// result from the normalization. Normalization is necessary
+    /// because where-clauses are stored in the parameter environment
+    /// unnormalized.
+    fn match_where_clause_trait_ref(&mut self,
+                                    obligation: &TraitObligation<'tcx>,
+                                    where_clause_trait_ref: ty::PolyTraitRef<'tcx>)
+                                    -> Result<Vec<PredicateObligation<'tcx>>,()>
+    {
+        self.match_poly_trait_ref(obligation, where_clause_trait_ref)?;
+        Ok(Vec::new())
+    }
+
+    /// Returns `Ok` if `poly_trait_ref` being true implies that the
+    /// obligation is satisfied.
+    fn match_poly_trait_ref(&self,
+                            obligation: &TraitObligation<'tcx>,
+                            poly_trait_ref: ty::PolyTraitRef<'tcx>)
+                            -> Result<(),()>
+    {
+        debug!("match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}",
+               obligation,
+               poly_trait_ref);
+
+        let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
+        self.infcx.sub_poly_trait_refs(false,
+                                       origin,
+                                       poly_trait_ref,
+                                       obligation.predicate.to_poly_trait_ref())
+            // FIXME(#32730) propagate obligations
+            .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
+            .map_err(|_| ())
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+    // Miscellany
+
+    fn match_fresh_trait_refs(&self,
+                              previous: &ty::PolyTraitRef<'tcx>,
+                              current: &ty::PolyTraitRef<'tcx>)
+                              -> bool
+    {
+        let mut matcher = ty::_match::Match::new(self.tcx());
+        matcher.relate(previous, current).is_ok()
+    }
+
+    fn push_stack<'o,'s:'o>(&mut self,
+                            previous_stack: TraitObligationStackList<'s, 'tcx>,
+                            obligation: &'o TraitObligation<'tcx>)
+                            -> TraitObligationStack<'o, 'tcx>
+    {
+        let fresh_trait_ref =
+            obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener);
+
+        TraitObligationStack {
+            obligation: obligation,
+            fresh_trait_ref: fresh_trait_ref,
+            previous: previous_stack,
+        }
+    }
+
+    fn closure_trait_ref_unnormalized(&mut self,
+                                      obligation: &TraitObligation<'tcx>,
+                                      closure_def_id: DefId,
+                                      substs: &ty::ClosureSubsts<'tcx>)
+                                      -> ty::PolyTraitRef<'tcx>
+    {
+        let closure_type = self.infcx.closure_type(closure_def_id, substs);
+        let ty::Binder((trait_ref, _)) =
+            util::closure_trait_ref_and_return_type(self.tcx(),
+                                                    obligation.predicate.def_id(),
+                                                    obligation.predicate.0.self_ty(), // (1)
+                                                    &closure_type.sig,
+                                                    util::TupleArgumentsFlag::No);
+        // (1) Feels icky to skip the binder here, but OTOH we know
+        // that the self-type is an unboxed closure type and hence is
+        // in fact unparameterized (or at least does not reference any
+        // regions bound in the obligation). Still probably some
+        // refactoring could make this nicer.
+
+        ty::Binder(trait_ref)
+    }
+
+    fn closure_trait_ref(&mut self,
+                         obligation: &TraitObligation<'tcx>,
+                         closure_def_id: DefId,
+                         substs: &ty::ClosureSubsts<'tcx>)
+                         -> Normalized<'tcx, ty::PolyTraitRef<'tcx>>
+    {
+        let trait_ref = self.closure_trait_ref_unnormalized(
+            obligation, closure_def_id, substs);
+
+        // A closure signature can contain associated types which
+        // must be normalized.
+        normalize_with_depth(self,
+                             obligation.cause.clone(),
+                             obligation.recursion_depth+1,
+                             &trait_ref)
+    }
+
+    /// Returns the obligations that are implied by instantiating an
+    /// impl or trait. The obligations are substituted and fully
+    /// normalized. This is used when confirming an impl or default
+    /// impl.
+    fn impl_or_trait_obligations(&mut self,
+                                 cause: ObligationCause<'tcx>,
+                                 recursion_depth: usize,
+                                 def_id: DefId, // of impl or trait
+                                 substs: &Substs<'tcx>, // for impl or trait
+                                 skol_map: infer::SkolemizationMap,
+                                 snapshot: &infer::CombinedSnapshot)
+                                 -> Vec<PredicateObligation<'tcx>>
+    {
+        debug!("impl_or_trait_obligations(def_id={:?})", def_id);
+        let tcx = self.tcx();
+
+        // To allow for one-pass evaluation of the nested obligation,
+        // each predicate must be preceded by the obligations required
+        // to normalize it.
+        // for example, if we have:
+        //    impl<U: Iterator, V: Iterator<Item=U>> Foo for V where U::Item: Copy
+        // the impl will have the following predicates:
+        //    <V as Iterator>::Item = U,
+        //    U: Iterator, U: Sized,
+        //    V: Iterator, V: Sized,
+        //    <U as Iterator>::Item: Copy
+        // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last
+        // obligation will normalize to `<$0 as Iterator>::Item = $1` and
+        // `$1: Copy`, so we must ensure the obligations are emitted in
+        // that order.
+        let predicates = tcx
+            .lookup_predicates(def_id)
+            .predicates.iter()
+            .flat_map(|predicate| {
+                let predicate =
+                    normalize_with_depth(self, cause.clone(), recursion_depth,
+                                         &predicate.subst(tcx, substs));
+                predicate.obligations.into_iter().chain(
+                    Some(Obligation {
+                        cause: cause.clone(),
+                        recursion_depth: recursion_depth,
+                        predicate: predicate.value
+                    }))
+            }).collect();
+        self.infcx().plug_leaks(skol_map, snapshot, &predicates)
+    }
+
+    #[allow(unused_comparisons)]
+    fn derived_cause(&self,
+                     obligation: &TraitObligation<'tcx>,
+                     variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>)
+                     -> ObligationCause<'tcx>
+    {
+        /*!
+         * Creates a cause for obligations that are derived from
+         * `obligation` by a recursive search (e.g., for a builtin
+         * bound, or eventually a `impl Foo for ..`). If `obligation`
+         * is itself a derived obligation, this is just a clone, but
+         * otherwise we create a "derived obligation" cause so as to
+         * keep track of the original root obligation for error
+         * reporting.
+         */
+
+        // NOTE(flaper87): As of now, it keeps track of the whole error
+        // chain. Ideally, we should have a way to configure this either
+        // by using -Z verbose or just a CLI argument.
+        if obligation.recursion_depth >= 0 {
+            let derived_cause = DerivedObligationCause {
+                parent_trait_ref: obligation.predicate.to_poly_trait_ref(),
+                parent_code: Rc::new(obligation.cause.code.clone())
+            };
+            let derived_code = variant(derived_cause);
+            ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code)
+        } else {
+            obligation.cause.clone()
+        }
+    }
+}
+
+impl<'tcx> SelectionCache<'tcx> {
+    pub fn new() -> SelectionCache<'tcx> {
+        SelectionCache {
+            hashmap: RefCell::new(FnvHashMap())
+        }
+    }
+}
+
+impl<'tcx> EvaluationCache<'tcx> {
+    pub fn new() -> EvaluationCache<'tcx> {
+        EvaluationCache {
+            hashmap: RefCell::new(FnvHashMap())
+        }
+    }
+}
+
+impl<'o,'tcx> TraitObligationStack<'o,'tcx> {
+    fn list(&'o self) -> TraitObligationStackList<'o,'tcx> {
+        TraitObligationStackList::with(self)
+    }
+
+    fn iter(&'o self) -> TraitObligationStackList<'o,'tcx> {
+        self.list()
+    }
+}
+
+#[derive(Copy, Clone)]
+struct TraitObligationStackList<'o,'tcx:'o> {
+    head: Option<&'o TraitObligationStack<'o,'tcx>>
+}
+
+impl<'o,'tcx> TraitObligationStackList<'o,'tcx> {
+    fn empty() -> TraitObligationStackList<'o,'tcx> {
+        TraitObligationStackList { head: None }
+    }
+
+    fn with(r: &'o TraitObligationStack<'o,'tcx>) -> TraitObligationStackList<'o,'tcx> {
+        TraitObligationStackList { head: Some(r) }
+    }
+}
+
+impl<'o,'tcx> Iterator for TraitObligationStackList<'o,'tcx>{
+    type Item = &'o TraitObligationStack<'o,'tcx>;
+
+    fn next(&mut self) -> Option<&'o TraitObligationStack<'o,'tcx>> {
+        match self.head {
+            Some(o) => {
+                *self = o.previous;
+                Some(o)
+            }
+            None => None
+        }
+    }
+}
+
+impl<'o,'tcx> fmt::Debug for TraitObligationStack<'o,'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "TraitObligationStack({:?})", self.obligation)
+    }
+}
+
+impl EvaluationResult {
+    fn may_apply(&self) -> bool {
+        match *self {
+            EvaluatedToOk |
+            EvaluatedToAmbig |
+            EvaluatedToUnknown => true,
+
+            EvaluatedToErr => false
+        }
+    }
+}
+
+impl MethodMatchResult {
+    pub fn may_apply(&self) -> bool {
+        match *self {
+            MethodMatched(_) => true,
+            MethodAmbiguous(_) => true,
+            MethodDidNotMatch => false,
+        }
+    }
+}
diff --git a/src/librustc/traits/specialize/mod.rs b/src/librustc/traits/specialize/mod.rs
new file mode 100644 (file)
index 0000000..6364751
--- /dev/null
@@ -0,0 +1,219 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Logic and data structures related to impl specialization, explained in
+// greater detail below.
+//
+// At the moment, this implementation support only the simple "chain" rule:
+// If any two impls overlap, one must be a strict subset of the other.
+//
+// See traits/README.md for a bit more detail on how specialization
+// fits together with the rest of the trait machinery.
+
+use super::{SelectionContext, FulfillmentContext};
+use super::util::{fresh_type_vars_for_impl, impl_trait_ref_and_oblig};
+
+use middle::cstore::CrateStore;
+use hir::def_id::DefId;
+use infer::{self, InferCtxt, TypeOrigin};
+use middle::region;
+use ty::subst::{Subst, Substs};
+use traits::{self, ProjectionMode, ObligationCause, Normalized};
+use ty::{self, TyCtxt};
+use syntax::codemap::DUMMY_SP;
+
+pub mod specialization_graph;
+
+/// Information pertinent to an overlapping impl error.
+pub struct Overlap<'a, 'tcx: 'a> {
+    pub in_context: InferCtxt<'a, 'tcx>,
+    pub with_impl: DefId,
+    pub on_trait_ref: ty::TraitRef<'tcx>,
+}
+
+/// Given a subst for the requested impl, translate it to a subst
+/// appropriate for the actual item definition (whether it be in that impl,
+/// a parent impl, or the trait).
+/// When we have selected one impl, but are actually using item definitions from
+/// a parent impl providing a default, we need a way to translate between the
+/// type parameters of the two impls. Here the `source_impl` is the one we've
+/// selected, and `source_substs` is a substitution of its generics (and
+/// possibly some relevant `FnSpace` variables as well). And `target_node` is
+/// the impl/trait we're actually going to get the definition from. The resulting
+/// substitution will map from `target_node`'s generics to `source_impl`'s
+/// generics as instantiated by `source_subst`.
+///
+/// For example, consider the following scenario:
+///
+/// ```rust
+/// trait Foo { ... }
+/// impl<T, U> Foo for (T, U) { ... }  // target impl
+/// impl<V> Foo for (V, V) { ... }     // source impl
+/// ```
+///
+/// Suppose we have selected "source impl" with `V` instantiated with `u32`.
+/// This function will produce a substitution with `T` and `U` both mapping to `u32`.
+///
+/// Where clauses add some trickiness here, because they can be used to "define"
+/// an argument indirectly:
+///
+/// ```rust
+/// impl<'a, I, T: 'a> Iterator for Cloned<I>
+///    where I: Iterator<Item=&'a T>, T: Clone
+/// ```
+///
+/// In a case like this, the substitution for `T` is determined indirectly,
+/// through associated type projection. We deal with such cases by using
+/// *fulfillment* to relate the two impls, requiring that all projections are
+/// resolved.
+pub fn translate_substs<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                  source_impl: DefId,
+                                  source_substs: &'tcx Substs<'tcx>,
+                                  target_node: specialization_graph::Node)
+                                  -> &'tcx Substs<'tcx> {
+    let source_trait_ref = infcx.tcx
+                                .impl_trait_ref(source_impl)
+                                .unwrap()
+                                .subst(infcx.tcx, &source_substs);
+
+    // translate the Self and TyParam parts of the substitution, since those
+    // vary across impls
+    let target_substs = match target_node {
+        specialization_graph::Node::Impl(target_impl) => {
+            // no need to translate if we're targetting the impl we started with
+            if source_impl == target_impl {
+                return source_substs;
+            }
+
+            fulfill_implication(infcx, source_trait_ref, target_impl).unwrap_or_else(|_| {
+                bug!("When translating substitutions for specialization, the expected \
+                      specializaiton failed to hold")
+            })
+        }
+        specialization_graph::Node::Trait(..) => source_trait_ref.substs.clone(),
+    };
+
+    // directly inherent the method generics, since those do not vary across impls
+    infcx.tcx.mk_substs(target_substs.with_method_from_subst(source_substs))
+}
+
+/// Is impl1 a specialization of impl2?
+///
+/// Specialization is determined by the sets of types to which the impls apply;
+/// impl1 specializes impl2 if it applies to a subset of the types impl2 applies
+/// to.
+pub fn specializes(tcx: &TyCtxt, impl1_def_id: DefId, impl2_def_id: DefId) -> bool {
+    // The feature gate should prevent introducing new specializations, but not
+    // taking advantage of upstream ones.
+    if !tcx.sess.features.borrow().specialization &&
+        (impl1_def_id.is_local() || impl2_def_id.is_local()) {
+        return false;
+    }
+
+    // We determine whether there's a subset relationship by:
+    //
+    // - skolemizing impl1,
+    // - assuming the where clauses for impl1,
+    // - instantiating impl2 with fresh inference variables,
+    // - unifying,
+    // - attempting to prove the where clauses for impl2
+    //
+    // The last three steps are encapsulated in `fulfill_implication`.
+    //
+    // See RFC 1210 for more details and justification.
+
+    // Currently we do not allow e.g. a negative impl to specialize a positive one
+    if tcx.trait_impl_polarity(impl1_def_id) != tcx.trait_impl_polarity(impl2_def_id) {
+        return false;
+    }
+
+    let mut infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Topmost);
+
+    // create a parameter environment corresponding to a (skolemized) instantiation of impl1
+    let scheme = tcx.lookup_item_type(impl1_def_id);
+    let predicates = tcx.lookup_predicates(impl1_def_id);
+    let mut penv = tcx.construct_parameter_environment(DUMMY_SP,
+                                                       &scheme.generics,
+                                                       &predicates,
+                                                       region::DUMMY_CODE_EXTENT);
+    let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id)
+                             .unwrap()
+                             .subst(tcx, &penv.free_substs);
+
+    // Normalize the trait reference, adding any obligations that arise into the impl1 assumptions
+    let Normalized { value: impl1_trait_ref, obligations: normalization_obligations } = {
+        let selcx = &mut SelectionContext::new(&infcx);
+        traits::normalize(selcx, ObligationCause::dummy(), &impl1_trait_ref)
+    };
+    penv.caller_bounds.extend(normalization_obligations.into_iter().map(|o| o.predicate));
+
+    // Install the parameter environment, taking the predicates of impl1 as assumptions:
+    infcx.parameter_environment = penv;
+
+    // Attempt to prove that impl2 applies, given all of the above.
+    fulfill_implication(&infcx, impl1_trait_ref, impl2_def_id).is_ok()
+}
+
+/// Attempt to fulfill all obligations of `target_impl` after unification with
+/// `source_trait_ref`. If successful, returns a substitution for *all* the
+/// generics of `target_impl`, including both those needed to unify with
+/// `source_trait_ref` and those whose identity is determined via a where
+/// clause in the impl.
+fn fulfill_implication<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                 source_trait_ref: ty::TraitRef<'tcx>,
+                                 target_impl: DefId)
+                                 -> Result<Substs<'tcx>, ()> {
+    infcx.commit_if_ok(|_| {
+        let selcx = &mut SelectionContext::new(&infcx);
+        let target_substs = fresh_type_vars_for_impl(&infcx, DUMMY_SP, target_impl);
+        let (target_trait_ref, obligations) = impl_trait_ref_and_oblig(selcx,
+                                                                       target_impl,
+                                                                       &target_substs);
+
+        // do the impls unify? If not, no specialization.
+        if let Err(_) = infer::mk_eq_trait_refs(&infcx,
+                                                true,
+                                                TypeOrigin::Misc(DUMMY_SP),
+                                                source_trait_ref,
+                                                target_trait_ref) {
+            debug!("fulfill_implication: {:?} does not unify with {:?}",
+                   source_trait_ref,
+                   target_trait_ref);
+            return Err(());
+        }
+
+        // attempt to prove all of the predicates for impl2 given those for impl1
+        // (which are packed up in penv)
+
+        let mut fulfill_cx = FulfillmentContext::new();
+        for oblig in obligations.into_iter() {
+            fulfill_cx.register_predicate_obligation(&infcx, oblig);
+        }
+
+        if let Err(errors) = infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()) {
+            // no dice!
+            debug!("fulfill_implication: for impls on {:?} and {:?}, could not fulfill: {:?} given \
+                    {:?}",
+                   source_trait_ref,
+                   target_trait_ref,
+                   errors,
+                   infcx.parameter_environment.caller_bounds);
+            Err(())
+        } else {
+            debug!("fulfill_implication: an impl for {:?} specializes {:?}",
+                   source_trait_ref,
+                   target_trait_ref);
+
+            // Now resolve the *substitution* we built for the target earlier, replacing
+            // the inference variables inside with whatever we got from fulfillment.
+            Ok(infcx.resolve_type_vars_if_possible(&target_substs))
+        }
+    })
+}
diff --git a/src/librustc/traits/specialize/specialization_graph.rs b/src/librustc/traits/specialize/specialization_graph.rs
new file mode 100644 (file)
index 0000000..3c65e36
--- /dev/null
@@ -0,0 +1,486 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::cell;
+use std::rc::Rc;
+
+use super::{Overlap, specializes};
+
+use middle::cstore::CrateStore;
+use hir::def_id::DefId;
+use infer;
+use traits::{self, ProjectionMode};
+use ty::{self, TyCtxt, ImplOrTraitItem, TraitDef, TypeFoldable};
+use ty::fast_reject::{self, SimplifiedType};
+use syntax::ast::Name;
+use util::nodemap::{DefIdMap, FnvHashMap};
+
+/// A per-trait graph of impls in specialization order. At the moment, this
+/// graph forms a tree rooted with the trait itself, with all other nodes
+/// representing impls, and parent-child relationships representing
+/// specializations.
+///
+/// The graph provides two key services:
+///
+/// - Construction, which implicitly checks for overlapping impls (i.e., impls
+///   that overlap but where neither specializes the other -- an artifact of the
+///   simple "chain" rule.
+///
+/// - Parent extraction. In particular, the graph can give you the *immediate*
+///   parents of a given specializing impl, which is needed for extracting
+///   default items amongst other thigns. In the simple "chain" rule, every impl
+///   has at most one parent.
+pub struct Graph {
+    // all impls have a parent; the "root" impls have as their parent the def_id
+    // of the trait
+    parent: DefIdMap<DefId>,
+
+    // the "root" impls are found by looking up the trait's def_id.
+    children: DefIdMap<Children>,
+}
+
+/// Children of a given impl, grouped into blanket/non-blanket varieties as is
+/// done in `TraitDef`.
+struct Children {
+    // Impls of a trait (or specializations of a given impl). To allow for
+    // quicker lookup, the impls are indexed by a simplified version of their
+    // `Self` type: impls with a simplifiable `Self` are stored in
+    // `nonblanket_impls` keyed by it, while all other impls are stored in
+    // `blanket_impls`.
+    //
+    // A similar division is used within `TraitDef`, but the lists there collect
+    // together *all* the impls for a trait, and are populated prior to building
+    // the specialization graph.
+
+    /// Impls of the trait.
+    nonblanket_impls: FnvHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
+
+    /// Blanket impls associated with the trait.
+    blanket_impls: Vec<DefId>,
+}
+
+/// The result of attempting to insert an impl into a group of children.
+enum InsertResult<'a, 'tcx: 'a> {
+    /// The impl was inserted as a new child in this group of children.
+    BecameNewSibling,
+
+    /// The impl replaced an existing impl that specializes it.
+    Replaced(DefId),
+
+    /// The impl is a specialization of an existing child.
+    ShouldRecurseOn(DefId),
+
+    /// The impl has an unresolvable overlap with an existing child (neither
+    /// specializes the other).
+    Overlapped(Overlap<'a, 'tcx>),
+}
+
+impl Children {
+    fn new() -> Children {
+        Children {
+            nonblanket_impls: FnvHashMap(),
+            blanket_impls: vec![],
+        }
+    }
+
+    /// Insert an impl into this set of children without comparing to any existing impls
+    fn insert_blindly(&mut self, tcx: &TyCtxt, impl_def_id: DefId) {
+        let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
+        if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) {
+            self.nonblanket_impls.entry(sty).or_insert(vec![]).push(impl_def_id)
+        } else {
+            self.blanket_impls.push(impl_def_id)
+        }
+    }
+
+    /// Attempt to insert an impl into this set of children, while comparing for
+    /// specialiation relationships.
+    fn insert<'a, 'tcx>(&mut self,
+                        tcx: &'a TyCtxt<'tcx>,
+                        impl_def_id: DefId,
+                        simplified_self: Option<SimplifiedType>)
+                        -> InsertResult<'a, 'tcx>
+    {
+        for slot in match simplified_self {
+            Some(sty) => self.filtered_mut(sty),
+            None => self.iter_mut(),
+        } {
+            let possible_sibling = *slot;
+
+            let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::Topmost);
+            let overlap = traits::overlapping_impls(&infcx, possible_sibling, impl_def_id);
+
+            if let Some(impl_header) = overlap {
+                let le = specializes(tcx, impl_def_id, possible_sibling);
+                let ge = specializes(tcx, possible_sibling, impl_def_id);
+
+                if le && !ge {
+                    debug!("descending as child of TraitRef {:?}",
+                           tcx.impl_trait_ref(possible_sibling).unwrap());
+
+                    // the impl specializes possible_sibling
+                    return InsertResult::ShouldRecurseOn(possible_sibling);
+                } else if ge && !le {
+                    debug!("placing as parent of TraitRef {:?}",
+                           tcx.impl_trait_ref(possible_sibling).unwrap());
+
+                    // possible_sibling specializes the impl
+                    *slot = impl_def_id;
+                    return InsertResult::Replaced(possible_sibling);
+                } else {
+                    // overlap, but no specialization; error out
+                    return InsertResult::Overlapped(Overlap {
+                        with_impl: possible_sibling,
+                        on_trait_ref: impl_header.trait_ref.unwrap(),
+                        in_context: infcx,
+                    });
+                }
+            }
+        }
+
+        // no overlap with any potential siblings, so add as a new sibling
+        debug!("placing as new sibling");
+        self.insert_blindly(tcx, impl_def_id);
+        InsertResult::BecameNewSibling
+    }
+
+    fn iter_mut<'a>(&'a mut self) -> Box<Iterator<Item = &'a mut DefId> + 'a> {
+        let nonblanket = self.nonblanket_impls.iter_mut().flat_map(|(_, v)| v.iter_mut());
+        Box::new(self.blanket_impls.iter_mut().chain(nonblanket))
+    }
+
+    fn filtered_mut<'a>(&'a mut self, sty: SimplifiedType)
+                        -> Box<Iterator<Item = &'a mut DefId> + 'a> {
+        let nonblanket = self.nonblanket_impls.entry(sty).or_insert(vec![]).iter_mut();
+        Box::new(self.blanket_impls.iter_mut().chain(nonblanket))
+    }
+}
+
+impl Graph {
+    pub fn new() -> Graph {
+        Graph {
+            parent: Default::default(),
+            children: Default::default(),
+        }
+    }
+
+    /// Insert a local impl into the specialization graph. If an existing impl
+    /// conflicts with it (has overlap, but neither specializes the other),
+    /// information about the area of overlap is returned in the `Err`.
+    pub fn insert<'a, 'tcx>(&mut self,
+                            tcx: &'a TyCtxt<'tcx>,
+                            impl_def_id: DefId)
+                            -> Result<(), Overlap<'a, 'tcx>> {
+        assert!(impl_def_id.is_local());
+
+        let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
+        let trait_def_id = trait_ref.def_id;
+
+        debug!("insert({:?}): inserting TraitRef {:?} into specialization graph",
+               impl_def_id, trait_ref);
+
+        // if the reference itself contains an earlier error (e.g., due to a
+        // resolution failure), then we just insert the impl at the top level of
+        // the graph and claim that there's no overlap (in order to supress
+        // bogus errors).
+        if trait_ref.references_error() {
+            debug!("insert: inserting dummy node for erroneous TraitRef {:?}, \
+                    impl_def_id={:?}, trait_def_id={:?}",
+                   trait_ref, impl_def_id, trait_def_id);
+
+            self.parent.insert(impl_def_id, trait_def_id);
+            self.children.entry(trait_def_id).or_insert(Children::new())
+                .insert_blindly(tcx, impl_def_id);
+            return Ok(());
+        }
+
+        let mut parent = trait_def_id;
+        let simplified = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false);
+
+        // Descend the specialization tree, where `parent` is the current parent node
+        loop {
+            use self::InsertResult::*;
+
+            let insert_result = self.children.entry(parent).or_insert(Children::new())
+                .insert(tcx, impl_def_id, simplified);
+
+            match insert_result {
+                BecameNewSibling => {
+                    break;
+                }
+                Replaced(new_child) => {
+                    self.parent.insert(new_child, impl_def_id);
+                    let mut new_children = Children::new();
+                    new_children.insert_blindly(tcx, new_child);
+                    self.children.insert(impl_def_id, new_children);
+                    break;
+                }
+                ShouldRecurseOn(new_parent) => {
+                    parent = new_parent;
+                }
+                Overlapped(error) => {
+                    return Err(error);
+                }
+            }
+        }
+
+        self.parent.insert(impl_def_id, parent);
+        Ok(())
+    }
+
+    /// Insert cached metadata mapping from a child impl back to its parent.
+    pub fn record_impl_from_cstore(&mut self, tcx: &TyCtxt, parent: DefId, child: DefId) {
+        if self.parent.insert(child, parent).is_some() {
+            bug!("When recording an impl from the crate store, information about its parent \
+                  was already present.");
+        }
+
+        self.children.entry(parent).or_insert(Children::new()).insert_blindly(tcx, child);
+    }
+
+    /// The parent of a given impl, which is the def id of the trait when the
+    /// impl is a "specialization root".
+    pub fn parent(&self, child: DefId) -> DefId {
+        *self.parent.get(&child).unwrap()
+    }
+}
+
+/// A node in the specialization graph is either an impl or a trait
+/// definition; either can serve as a source of item definitions.
+/// There is always exactly one trait definition node: the root.
+#[derive(Debug, Copy, Clone)]
+pub enum Node {
+    Impl(DefId),
+    Trait(DefId),
+}
+
+impl Node {
+    pub fn is_from_trait(&self) -> bool {
+        match *self {
+            Node::Trait(..) => true,
+            _ => false,
+        }
+    }
+
+    /// Iterate over the items defined directly by the given (impl or trait) node.
+    pub fn items<'a, 'tcx>(&self, tcx: &'a TyCtxt<'tcx>) -> NodeItems<'a, 'tcx> {
+        match *self {
+            Node::Impl(impl_def_id) => {
+                NodeItems::Impl {
+                    tcx: tcx,
+                    items: cell::Ref::map(tcx.impl_items.borrow(),
+                                          |impl_items| &impl_items[&impl_def_id]),
+                    idx: 0,
+                }
+            }
+            Node::Trait(trait_def_id) => {
+                NodeItems::Trait {
+                    items: tcx.trait_items(trait_def_id).clone(),
+                    idx: 0,
+                }
+            }
+        }
+    }
+
+    pub fn def_id(&self) -> DefId {
+        match *self {
+            Node::Impl(did) => did,
+            Node::Trait(did) => did,
+        }
+    }
+}
+
+/// An iterator over the items defined within a trait or impl.
+pub enum NodeItems<'a, 'tcx: 'a> {
+    Impl {
+        tcx: &'a TyCtxt<'tcx>,
+        items: cell::Ref<'a, Vec<ty::ImplOrTraitItemId>>,
+        idx: usize,
+    },
+    Trait {
+        items: Rc<Vec<ImplOrTraitItem<'tcx>>>,
+        idx: usize,
+    },
+}
+
+impl<'a, 'tcx> Iterator for NodeItems<'a, 'tcx> {
+    type Item = ImplOrTraitItem<'tcx>;
+    fn next(&mut self) -> Option<ImplOrTraitItem<'tcx>> {
+        match *self {
+            NodeItems::Impl { tcx, ref items, ref mut idx } => {
+                let items_table = tcx.impl_or_trait_items.borrow();
+                if *idx < items.len() {
+                    let item_def_id = items[*idx].def_id();
+                    let item = items_table[&item_def_id].clone();
+                    *idx += 1;
+                    Some(item)
+                } else {
+                    None
+                }
+            }
+            NodeItems::Trait { ref items, ref mut idx } => {
+                if *idx < items.len() {
+                    let item = items[*idx].clone();
+                    *idx += 1;
+                    Some(item)
+                } else {
+                    None
+                }
+            }
+        }
+    }
+}
+
+pub struct Ancestors<'a, 'tcx: 'a> {
+    trait_def: &'a TraitDef<'tcx>,
+    current_source: Option<Node>,
+}
+
+impl<'a, 'tcx> Iterator for Ancestors<'a, 'tcx> {
+    type Item = Node;
+    fn next(&mut self) -> Option<Node> {
+        let cur = self.current_source.take();
+        if let Some(Node::Impl(cur_impl)) = cur {
+            let parent = self.trait_def.specialization_graph.borrow().parent(cur_impl);
+            if parent == self.trait_def.def_id() {
+                self.current_source = Some(Node::Trait(parent));
+            } else {
+                self.current_source = Some(Node::Impl(parent));
+            }
+        }
+        cur
+    }
+}
+
+pub struct NodeItem<T> {
+    pub node: Node,
+    pub item: T,
+}
+
+impl<T> NodeItem<T> {
+    pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> NodeItem<U> {
+        NodeItem {
+            node: self.node,
+            item: f(self.item),
+        }
+    }
+}
+
+pub struct TypeDefs<'a, 'tcx: 'a> {
+    // generally only invoked once or twice, so the box doesn't hurt
+    iter: Box<Iterator<Item = NodeItem<Rc<ty::AssociatedType<'tcx>>>> + 'a>,
+}
+
+impl<'a, 'tcx> Iterator for TypeDefs<'a, 'tcx> {
+    type Item = NodeItem<Rc<ty::AssociatedType<'tcx>>>;
+    fn next(&mut self) -> Option<Self::Item> {
+        self.iter.next()
+    }
+}
+
+pub struct FnDefs<'a, 'tcx: 'a> {
+    // generally only invoked once or twice, so the box doesn't hurt
+    iter: Box<Iterator<Item = NodeItem<Rc<ty::Method<'tcx>>>> + 'a>,
+}
+
+impl<'a, 'tcx> Iterator for FnDefs<'a, 'tcx> {
+    type Item = NodeItem<Rc<ty::Method<'tcx>>>;
+    fn next(&mut self) -> Option<Self::Item> {
+        self.iter.next()
+    }
+}
+
+pub struct ConstDefs<'a, 'tcx: 'a> {
+    // generally only invoked once or twice, so the box doesn't hurt
+    iter: Box<Iterator<Item = NodeItem<Rc<ty::AssociatedConst<'tcx>>>> + 'a>,
+}
+
+impl<'a, 'tcx> Iterator for ConstDefs<'a, 'tcx> {
+    type Item = NodeItem<Rc<ty::AssociatedConst<'tcx>>>;
+    fn next(&mut self) -> Option<Self::Item> {
+        self.iter.next()
+    }
+}
+
+impl<'a, 'tcx> Ancestors<'a, 'tcx> {
+    /// Search the items from the given ancestors, returning each type definition
+    /// with the given name.
+    pub fn type_defs(self, tcx: &'a TyCtxt<'tcx>, name: Name) -> TypeDefs<'a, 'tcx> {
+        let iter = self.flat_map(move |node| {
+            node.items(tcx)
+                .filter_map(move |item| {
+                    if let ty::TypeTraitItem(assoc_ty) = item {
+                        if assoc_ty.name == name {
+                            return Some(NodeItem {
+                                node: node,
+                                item: assoc_ty,
+                            });
+                        }
+                    }
+                    None
+                })
+
+        });
+        TypeDefs { iter: Box::new(iter) }
+    }
+
+    /// Search the items from the given ancestors, returning each fn definition
+    /// with the given name.
+    pub fn fn_defs(self, tcx: &'a TyCtxt<'tcx>, name: Name) -> FnDefs<'a, 'tcx> {
+        let iter = self.flat_map(move |node| {
+            node.items(tcx)
+                .filter_map(move |item| {
+                    if let ty::MethodTraitItem(method) = item {
+                        if method.name == name {
+                            return Some(NodeItem {
+                                node: node,
+                                item: method,
+                            });
+                        }
+                    }
+                    None
+                })
+
+        });
+        FnDefs { iter: Box::new(iter) }
+    }
+
+    /// Search the items from the given ancestors, returning each const
+    /// definition with the given name.
+    pub fn const_defs(self, tcx: &'a TyCtxt<'tcx>, name: Name) -> ConstDefs<'a, 'tcx> {
+        let iter = self.flat_map(move |node| {
+            node.items(tcx)
+                .filter_map(move |item| {
+                    if let ty::ConstTraitItem(konst) = item {
+                        if konst.name == name {
+                            return Some(NodeItem {
+                                node: node,
+                                item: konst,
+                            });
+                        }
+                    }
+                    None
+                })
+
+        });
+        ConstDefs { iter: Box::new(iter) }
+    }
+}
+
+/// Walk up the specialization ancestors of a given impl, starting with that
+/// impl itself.
+pub fn ancestors<'a, 'tcx>(trait_def: &'a TraitDef<'tcx>,
+                           start_from_impl: DefId)
+                           -> Ancestors<'a, 'tcx> {
+    Ancestors {
+        trait_def: trait_def,
+        current_source: Some(Node::Impl(start_from_impl)),
+    }
+}
diff --git a/src/librustc/traits/structural_impls.rs b/src/librustc/traits/structural_impls.rs
new file mode 100644 (file)
index 0000000..367e5f3
--- /dev/null
@@ -0,0 +1,256 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use traits;
+use traits::project::Normalized;
+use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
+
+use std::fmt;
+
+// structural impls for the structs in traits
+
+impl<'tcx, T: fmt::Debug> fmt::Debug for Normalized<'tcx, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "Normalized({:?},{:?})",
+               self.value,
+               self.obligations)
+    }
+}
+
+impl<'tcx> fmt::Debug for traits::RegionObligation<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "RegionObligation(sub_region={:?}, sup_type={:?})",
+               self.sub_region,
+               self.sup_type)
+    }
+}
+impl<'tcx, O: fmt::Debug> fmt::Debug for traits::Obligation<'tcx, O> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "Obligation(predicate={:?},depth={})",
+               self.predicate,
+               self.recursion_depth)
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::Vtable<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            super::VtableImpl(ref v) =>
+                write!(f, "{:?}", v),
+
+            super::VtableDefaultImpl(ref t) =>
+                write!(f, "{:?}", t),
+
+            super::VtableClosure(ref d) =>
+                write!(f, "{:?}", d),
+
+            super::VtableFnPointer(ref d) =>
+                write!(f, "VtableFnPointer({:?})", d),
+
+            super::VtableObject(ref d) =>
+                write!(f, "{:?}", d),
+
+            super::VtableParam(ref n) =>
+                write!(f, "VtableParam({:?})", n),
+
+            super::VtableBuiltin(ref d) =>
+                write!(f, "{:?}", d)
+        }
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableImplData<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "VtableImpl(impl_def_id={:?}, substs={:?}, nested={:?})",
+               self.impl_def_id,
+               self.substs,
+               self.nested)
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableClosureData<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "VtableClosure(closure_def_id={:?}, substs={:?}, nested={:?})",
+               self.closure_def_id,
+               self.substs,
+               self.nested)
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableBuiltinData<N> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "VtableBuiltin(nested={:?})", self.nested)
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableDefaultImplData<N> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "VtableDefaultImplData(trait_def_id={:?}, nested={:?})",
+               self.trait_def_id,
+               self.nested)
+    }
+}
+
+impl<'tcx> fmt::Debug for traits::VtableObjectData<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "VtableObject(upcast={:?}, vtable_base={})",
+               self.upcast_trait_ref,
+               self.vtable_base)
+    }
+}
+
+impl<'tcx> fmt::Debug for traits::FulfillmentError<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "FulfillmentError({:?},{:?})",
+               self.obligation,
+               self.code)
+    }
+}
+
+impl<'tcx> fmt::Debug for traits::FulfillmentErrorCode<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            super::CodeSelectionError(ref e) => write!(f, "{:?}", e),
+            super::CodeProjectionError(ref e) => write!(f, "{:?}", e),
+            super::CodeAmbiguity => write!(f, "Ambiguity")
+        }
+    }
+}
+
+impl<'tcx> fmt::Debug for traits::MismatchedProjectionTypes<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "MismatchedProjectionTypes({:?})", self.err)
+    }
+}
+
+impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx, O>
+{
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        traits::Obligation {
+            cause: self.cause.clone(),
+            recursion_depth: self.recursion_depth,
+            predicate: self.predicate.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.predicate.visit_with(visitor)
+    }
+}
+
+impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableImplData<'tcx, N> {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let substs = self.substs.fold_with(folder);
+        traits::VtableImplData {
+            impl_def_id: self.impl_def_id,
+            substs: folder.tcx().mk_substs(substs),
+            nested: self.nested.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.substs.visit_with(visitor) || self.nested.visit_with(visitor)
+    }
+}
+
+impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableClosureData<'tcx, N> {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        traits::VtableClosureData {
+            closure_def_id: self.closure_def_id,
+            substs: self.substs.fold_with(folder),
+            nested: self.nested.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.substs.visit_with(visitor) || self.nested.visit_with(visitor)
+    }
+}
+
+impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableDefaultImplData<N> {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        traits::VtableDefaultImplData {
+            trait_def_id: self.trait_def_id,
+            nested: self.nested.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.nested.visit_with(visitor)
+    }
+}
+
+impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableBuiltinData<N> {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        traits::VtableBuiltinData {
+            nested: self.nested.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.nested.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for traits::VtableObjectData<'tcx> {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        traits::VtableObjectData {
+            upcast_trait_ref: self.upcast_trait_ref.fold_with(folder),
+            vtable_base: self.vtable_base
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.upcast_trait_ref.visit_with(visitor)
+    }
+}
+
+impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        match *self {
+            traits::VtableImpl(ref v) => traits::VtableImpl(v.fold_with(folder)),
+            traits::VtableDefaultImpl(ref t) => traits::VtableDefaultImpl(t.fold_with(folder)),
+            traits::VtableClosure(ref d) => {
+                traits::VtableClosure(d.fold_with(folder))
+            }
+            traits::VtableFnPointer(ref d) => {
+                traits::VtableFnPointer(d.fold_with(folder))
+            }
+            traits::VtableParam(ref n) => traits::VtableParam(n.fold_with(folder)),
+            traits::VtableBuiltin(ref d) => traits::VtableBuiltin(d.fold_with(folder)),
+            traits::VtableObject(ref d) => traits::VtableObject(d.fold_with(folder)),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match *self {
+            traits::VtableImpl(ref v) => v.visit_with(visitor),
+            traits::VtableDefaultImpl(ref t) => t.visit_with(visitor),
+            traits::VtableClosure(ref d) => d.visit_with(visitor),
+            traits::VtableFnPointer(ref d) => d.visit_with(visitor),
+            traits::VtableParam(ref n) => n.visit_with(visitor),
+            traits::VtableBuiltin(ref d) => d.visit_with(visitor),
+            traits::VtableObject(ref d) => d.visit_with(visitor),
+        }
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Normalized<'tcx, T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        Normalized {
+            value: self.value.fold_with(folder),
+            obligations: self.obligations.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.value.visit_with(visitor) || self.obligations.visit_with(visitor)
+    }
+}
diff --git a/src/librustc/traits/util.rs b/src/librustc/traits/util.rs
new file mode 100644 (file)
index 0000000..7668b8b
--- /dev/null
@@ -0,0 +1,508 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir::def_id::DefId;
+use infer::InferCtxt;
+use ty::subst::{Subst, Substs};
+use ty::{self, Ty, TyCtxt, ToPredicate, ToPolyTraitRef};
+use syntax::codemap::Span;
+use util::common::ErrorReported;
+use util::nodemap::FnvHashSet;
+
+use super::{Obligation, ObligationCause, PredicateObligation, SelectionContext, Normalized};
+
+struct PredicateSet<'a,'tcx:'a> {
+    tcx: &'a TyCtxt<'tcx>,
+    set: FnvHashSet<ty::Predicate<'tcx>>,
+}
+
+impl<'a,'tcx> PredicateSet<'a,'tcx> {
+    fn new(tcx: &'a TyCtxt<'tcx>) -> PredicateSet<'a,'tcx> {
+        PredicateSet { tcx: tcx, set: FnvHashSet() }
+    }
+
+    fn insert(&mut self, pred: &ty::Predicate<'tcx>) -> bool {
+        // We have to be careful here because we want
+        //
+        //    for<'a> Foo<&'a int>
+        //
+        // and
+        //
+        //    for<'b> Foo<&'b int>
+        //
+        // to be considered equivalent. So normalize all late-bound
+        // regions before we throw things into the underlying set.
+        let normalized_pred = match *pred {
+            ty::Predicate::Trait(ref data) =>
+                ty::Predicate::Trait(self.tcx.anonymize_late_bound_regions(data)),
+
+            ty::Predicate::Equate(ref data) =>
+                ty::Predicate::Equate(self.tcx.anonymize_late_bound_regions(data)),
+
+            ty::Predicate::RegionOutlives(ref data) =>
+                ty::Predicate::RegionOutlives(self.tcx.anonymize_late_bound_regions(data)),
+
+            ty::Predicate::TypeOutlives(ref data) =>
+                ty::Predicate::TypeOutlives(self.tcx.anonymize_late_bound_regions(data)),
+
+            ty::Predicate::Projection(ref data) =>
+                ty::Predicate::Projection(self.tcx.anonymize_late_bound_regions(data)),
+
+            ty::Predicate::WellFormed(data) =>
+                ty::Predicate::WellFormed(data),
+
+            ty::Predicate::ObjectSafe(data) =>
+                ty::Predicate::ObjectSafe(data),
+        };
+        self.set.insert(normalized_pred)
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// `Elaboration` iterator
+///////////////////////////////////////////////////////////////////////////
+
+/// "Elaboration" is the process of identifying all the predicates that
+/// are implied by a source predicate. Currently this basically means
+/// walking the "supertraits" and other similar assumptions. For
+/// example, if we know that `T : Ord`, the elaborator would deduce
+/// that `T : PartialOrd` holds as well. Similarly, if we have `trait
+/// Foo : 'static`, and we know that `T : Foo`, then we know that `T :
+/// 'static`.
+pub struct Elaborator<'cx, 'tcx:'cx> {
+    tcx: &'cx TyCtxt<'tcx>,
+    stack: Vec<ty::Predicate<'tcx>>,
+    visited: PredicateSet<'cx,'tcx>,
+}
+
+pub fn elaborate_trait_ref<'cx, 'tcx>(
+    tcx: &'cx TyCtxt<'tcx>,
+    trait_ref: ty::PolyTraitRef<'tcx>)
+    -> Elaborator<'cx, 'tcx>
+{
+    elaborate_predicates(tcx, vec![trait_ref.to_predicate()])
+}
+
+pub fn elaborate_trait_refs<'cx, 'tcx>(
+    tcx: &'cx TyCtxt<'tcx>,
+    trait_refs: &[ty::PolyTraitRef<'tcx>])
+    -> Elaborator<'cx, 'tcx>
+{
+    let predicates = trait_refs.iter()
+                               .map(|trait_ref| trait_ref.to_predicate())
+                               .collect();
+    elaborate_predicates(tcx, predicates)
+}
+
+pub fn elaborate_predicates<'cx, 'tcx>(
+    tcx: &'cx TyCtxt<'tcx>,
+    mut predicates: Vec<ty::Predicate<'tcx>>)
+    -> Elaborator<'cx, 'tcx>
+{
+    let mut visited = PredicateSet::new(tcx);
+    predicates.retain(|pred| visited.insert(pred));
+    Elaborator { tcx: tcx, stack: predicates, visited: visited }
+}
+
+impl<'cx, 'tcx> Elaborator<'cx, 'tcx> {
+    pub fn filter_to_traits(self) -> FilterToTraits<Elaborator<'cx, 'tcx>> {
+        FilterToTraits::new(self)
+    }
+
+    fn push(&mut self, predicate: &ty::Predicate<'tcx>) {
+        match *predicate {
+            ty::Predicate::Trait(ref data) => {
+                // Predicates declared on the trait.
+                let predicates = self.tcx.lookup_super_predicates(data.def_id());
+
+                let mut predicates: Vec<_> =
+                    predicates.predicates
+                              .iter()
+                              .map(|p| p.subst_supertrait(self.tcx, &data.to_poly_trait_ref()))
+                              .collect();
+
+                debug!("super_predicates: data={:?} predicates={:?}",
+                       data, predicates);
+
+                // Only keep those bounds that we haven't already
+                // seen.  This is necessary to prevent infinite
+                // recursion in some cases.  One common case is when
+                // people define `trait Sized: Sized { }` rather than `trait
+                // Sized { }`.
+                predicates.retain(|r| self.visited.insert(r));
+
+                self.stack.extend(predicates);
+            }
+            ty::Predicate::WellFormed(..) => {
+                // Currently, we do not elaborate WF predicates,
+                // although we easily could.
+            }
+            ty::Predicate::ObjectSafe(..) => {
+                // Currently, we do not elaborate object-safe
+                // predicates.
+            }
+            ty::Predicate::Equate(..) => {
+                // Currently, we do not "elaborate" predicates like
+                // `X == Y`, though conceivably we might. For example,
+                // `&X == &Y` implies that `X == Y`.
+            }
+            ty::Predicate::Projection(..) => {
+                // Nothing to elaborate in a projection predicate.
+            }
+            ty::Predicate::RegionOutlives(..) |
+            ty::Predicate::TypeOutlives(..) => {
+                // Currently, we do not "elaborate" predicates like
+                // `'a : 'b` or `T : 'a`.  We could conceivably do
+                // more here.  For example,
+                //
+                //     &'a int : 'b
+                //
+                // implies that
+                //
+                //     'a : 'b
+                //
+                // and we could get even more if we took WF
+                // constraints into account. For example,
+                //
+                //     &'a &'b int : 'c
+                //
+                // implies that
+                //
+                //     'b : 'a
+                //     'a : 'c
+            }
+        }
+    }
+}
+
+impl<'cx, 'tcx> Iterator for Elaborator<'cx, 'tcx> {
+    type Item = ty::Predicate<'tcx>;
+
+    fn next(&mut self) -> Option<ty::Predicate<'tcx>> {
+        // Extract next item from top-most stack frame, if any.
+        let next_predicate = match self.stack.pop() {
+            Some(predicate) => predicate,
+            None => {
+                // No more stack frames. Done.
+                return None;
+            }
+        };
+        self.push(&next_predicate);
+        return Some(next_predicate);
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Supertrait iterator
+///////////////////////////////////////////////////////////////////////////
+
+pub type Supertraits<'cx, 'tcx> = FilterToTraits<Elaborator<'cx, 'tcx>>;
+
+pub fn supertraits<'cx, 'tcx>(tcx: &'cx TyCtxt<'tcx>,
+                              trait_ref: ty::PolyTraitRef<'tcx>)
+                              -> Supertraits<'cx, 'tcx>
+{
+    elaborate_trait_ref(tcx, trait_ref).filter_to_traits()
+}
+
+pub fn transitive_bounds<'cx, 'tcx>(tcx: &'cx TyCtxt<'tcx>,
+                                    bounds: &[ty::PolyTraitRef<'tcx>])
+                                    -> Supertraits<'cx, 'tcx>
+{
+    elaborate_trait_refs(tcx, bounds).filter_to_traits()
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Iterator over def-ids of supertraits
+
+pub struct SupertraitDefIds<'cx, 'tcx:'cx> {
+    tcx: &'cx TyCtxt<'tcx>,
+    stack: Vec<DefId>,
+    visited: FnvHashSet<DefId>,
+}
+
+pub fn supertrait_def_ids<'cx, 'tcx>(tcx: &'cx TyCtxt<'tcx>,
+                                     trait_def_id: DefId)
+                                     -> SupertraitDefIds<'cx, 'tcx>
+{
+    SupertraitDefIds {
+        tcx: tcx,
+        stack: vec![trait_def_id],
+        visited: Some(trait_def_id).into_iter().collect(),
+    }
+}
+
+impl<'cx, 'tcx> Iterator for SupertraitDefIds<'cx, 'tcx> {
+    type Item = DefId;
+
+    fn next(&mut self) -> Option<DefId> {
+        let def_id = match self.stack.pop() {
+            Some(def_id) => def_id,
+            None => { return None; }
+        };
+
+        let predicates = self.tcx.lookup_super_predicates(def_id);
+        let visited = &mut self.visited;
+        self.stack.extend(
+            predicates.predicates
+                      .iter()
+                      .filter_map(|p| p.to_opt_poly_trait_ref())
+                      .map(|t| t.def_id())
+                      .filter(|&super_def_id| visited.insert(super_def_id)));
+        Some(def_id)
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Other
+///////////////////////////////////////////////////////////////////////////
+
+/// A filter around an iterator of predicates that makes it yield up
+/// just trait references.
+pub struct FilterToTraits<I> {
+    base_iterator: I
+}
+
+impl<I> FilterToTraits<I> {
+    fn new(base: I) -> FilterToTraits<I> {
+        FilterToTraits { base_iterator: base }
+    }
+}
+
+impl<'tcx,I:Iterator<Item=ty::Predicate<'tcx>>> Iterator for FilterToTraits<I> {
+    type Item = ty::PolyTraitRef<'tcx>;
+
+    fn next(&mut self) -> Option<ty::PolyTraitRef<'tcx>> {
+        loop {
+            match self.base_iterator.next() {
+                None => {
+                    return None;
+                }
+                Some(ty::Predicate::Trait(data)) => {
+                    return Some(data.to_poly_trait_ref());
+                }
+                Some(_) => {
+                }
+            }
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Other
+///////////////////////////////////////////////////////////////////////////
+
+/// Instantiate all bound parameters of the impl with the given substs,
+/// returning the resulting trait ref and all obligations that arise.
+/// The obligations are closed under normalization.
+pub fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
+                                         impl_def_id: DefId,
+                                         impl_substs: &Substs<'tcx>)
+                                         -> (ty::TraitRef<'tcx>,
+                                             Vec<PredicateObligation<'tcx>>)
+{
+    let impl_trait_ref =
+        selcx.tcx().impl_trait_ref(impl_def_id).unwrap();
+    let impl_trait_ref =
+        impl_trait_ref.subst(selcx.tcx(), impl_substs);
+    let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } =
+        super::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref);
+
+    let predicates = selcx.tcx().lookup_predicates(impl_def_id);
+    let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
+    let Normalized { value: predicates, obligations: normalization_obligations2 } =
+        super::normalize(selcx, ObligationCause::dummy(), &predicates);
+    let impl_obligations =
+        predicates_for_generics(ObligationCause::dummy(), 0, &predicates);
+
+    let impl_obligations: Vec<_> =
+        impl_obligations.into_iter()
+        .chain(normalization_obligations1)
+        .chain(normalization_obligations2)
+        .collect();
+
+    (impl_trait_ref, impl_obligations)
+}
+
+// determine the `self` type, using fresh variables for all variables
+// declared on the impl declaration e.g., `impl<A,B> for Box<[(A,B)]>`
+// would return ($0, $1) where $0 and $1 are freshly instantiated type
+// variables.
+pub fn fresh_type_vars_for_impl<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                          span: Span,
+                                          impl_def_id: DefId)
+                                          -> Substs<'tcx>
+{
+    let tcx = infcx.tcx;
+    let impl_generics = tcx.lookup_item_type(impl_def_id).generics;
+    infcx.fresh_substs_for_generics(span, &impl_generics)
+}
+
+/// See `super::obligations_for_generics`
+pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>,
+                                     recursion_depth: usize,
+                                     generic_bounds: &ty::InstantiatedPredicates<'tcx>)
+                                     -> Vec<PredicateObligation<'tcx>>
+{
+    debug!("predicates_for_generics(generic_bounds={:?})",
+           generic_bounds);
+
+    generic_bounds.predicates.iter().map(|predicate| {
+        Obligation { cause: cause.clone(),
+                     recursion_depth: recursion_depth,
+                     predicate: predicate.clone() }
+    }).collect()
+}
+
+pub fn trait_ref_for_builtin_bound<'tcx>(
+    tcx: &TyCtxt<'tcx>,
+    builtin_bound: ty::BuiltinBound,
+    param_ty: Ty<'tcx>)
+    -> Result<ty::TraitRef<'tcx>, ErrorReported>
+{
+    match tcx.lang_items.from_builtin_kind(builtin_bound) {
+        Ok(def_id) => {
+            Ok(ty::TraitRef {
+                def_id: def_id,
+                substs: tcx.mk_substs(Substs::empty().with_self_ty(param_ty))
+            })
+        }
+        Err(e) => {
+            tcx.sess.err(&e);
+            Err(ErrorReported)
+        }
+    }
+}
+
+pub fn predicate_for_trait_ref<'tcx>(
+    cause: ObligationCause<'tcx>,
+    trait_ref: ty::TraitRef<'tcx>,
+    recursion_depth: usize)
+    -> PredicateObligation<'tcx>
+{
+    Obligation {
+        cause: cause,
+        recursion_depth: recursion_depth,
+        predicate: trait_ref.to_predicate(),
+    }
+}
+
+pub fn predicate_for_trait_def<'tcx>(
+    tcx: &TyCtxt<'tcx>,
+    cause: ObligationCause<'tcx>,
+    trait_def_id: DefId,
+    recursion_depth: usize,
+    param_ty: Ty<'tcx>,
+    ty_params: Vec<Ty<'tcx>>)
+    -> PredicateObligation<'tcx>
+{
+    let trait_ref = ty::TraitRef {
+        def_id: trait_def_id,
+        substs: tcx.mk_substs(Substs::new_trait(ty_params, vec![], param_ty))
+    };
+    predicate_for_trait_ref(cause, trait_ref, recursion_depth)
+}
+
+pub fn predicate_for_builtin_bound<'tcx>(
+    tcx: &TyCtxt<'tcx>,
+    cause: ObligationCause<'tcx>,
+    builtin_bound: ty::BuiltinBound,
+    recursion_depth: usize,
+    param_ty: Ty<'tcx>)
+    -> Result<PredicateObligation<'tcx>, ErrorReported>
+{
+    let trait_ref = trait_ref_for_builtin_bound(tcx, builtin_bound, param_ty)?;
+    Ok(predicate_for_trait_ref(cause, trait_ref, recursion_depth))
+}
+
+/// Cast a trait reference into a reference to one of its super
+/// traits; returns `None` if `target_trait_def_id` is not a
+/// supertrait.
+pub fn upcast<'tcx>(tcx: &TyCtxt<'tcx>,
+                    source_trait_ref: ty::PolyTraitRef<'tcx>,
+                    target_trait_def_id: DefId)
+                    -> Vec<ty::PolyTraitRef<'tcx>>
+{
+    if source_trait_ref.def_id() == target_trait_def_id {
+        return vec![source_trait_ref]; // shorcut the most common case
+    }
+
+    supertraits(tcx, source_trait_ref)
+        .filter(|r| r.def_id() == target_trait_def_id)
+        .collect()
+}
+
+/// Given a trait `trait_ref`, returns the number of vtable entries
+/// that come from `trait_ref`, excluding its supertraits. Used in
+/// computing the vtable base for an upcast trait of a trait object.
+pub fn count_own_vtable_entries<'tcx>(tcx: &TyCtxt<'tcx>,
+                                      trait_ref: ty::PolyTraitRef<'tcx>)
+                                      -> usize {
+    let mut entries = 0;
+    // Count number of methods and add them to the total offset.
+    // Skip over associated types and constants.
+    for trait_item in &tcx.trait_items(trait_ref.def_id())[..] {
+        if let ty::MethodTraitItem(_) = *trait_item {
+            entries += 1;
+        }
+    }
+    entries
+}
+
+/// Given an upcast trait object described by `object`, returns the
+/// index of the method `method_def_id` (which should be part of
+/// `object.upcast_trait_ref`) within the vtable for `object`.
+pub fn get_vtable_index_of_object_method<'tcx>(tcx: &TyCtxt<'tcx>,
+                                               object: &super::VtableObjectData<'tcx>,
+                                               method_def_id: DefId) -> usize {
+    // Count number of methods preceding the one we are selecting and
+    // add them to the total offset.
+    // Skip over associated types and constants.
+    let mut entries = object.vtable_base;
+    for trait_item in &tcx.trait_items(object.upcast_trait_ref.def_id())[..] {
+        if trait_item.def_id() == method_def_id {
+            // The item with the ID we were given really ought to be a method.
+            assert!(match *trait_item {
+                ty::MethodTraitItem(_) => true,
+                _ => false
+            });
+
+            return entries;
+        }
+        if let ty::MethodTraitItem(_) = *trait_item {
+            entries += 1;
+        }
+    }
+
+    bug!("get_vtable_index_of_object_method: {:?} was not found",
+         method_def_id);
+}
+
+pub enum TupleArgumentsFlag { Yes, No }
+
+pub fn closure_trait_ref_and_return_type<'tcx>(
+    tcx: &TyCtxt<'tcx>,
+    fn_trait_def_id: DefId,
+    self_ty: Ty<'tcx>,
+    sig: &ty::PolyFnSig<'tcx>,
+    tuple_arguments: TupleArgumentsFlag)
+    -> ty::Binder<(ty::TraitRef<'tcx>, Ty<'tcx>)>
+{
+    let arguments_tuple = match tuple_arguments {
+        TupleArgumentsFlag::No => sig.0.inputs[0],
+        TupleArgumentsFlag::Yes => tcx.mk_tup(sig.0.inputs.to_vec()),
+    };
+    let trait_substs = Substs::new_trait(vec![arguments_tuple], vec![], self_ty);
+    let trait_ref = ty::TraitRef {
+        def_id: fn_trait_def_id,
+        substs: tcx.mk_substs(trait_substs),
+    };
+    ty::Binder((trait_ref, sig.0.output.unwrap_or(tcx.mk_nil())))
+}
diff --git a/src/librustc/ty/_match.rs b/src/librustc/ty/_match.rs
new file mode 100644 (file)
index 0000000..d0ccc3e
--- /dev/null
@@ -0,0 +1,96 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use ty::{self, Ty, TyCtxt};
+use ty::error::TypeError;
+use ty::relate::{self, Relate, TypeRelation, RelateResult};
+
+/// A type "A" *matches* "B" if the fresh types in B could be
+/// substituted with values so as to make it equal to A. Matching is
+/// intended to be used only on freshened types, and it basically
+/// indicates if the non-freshened versions of A and B could have been
+/// unified.
+///
+/// It is only an approximation. If it yields false, unification would
+/// definitely fail, but a true result doesn't mean unification would
+/// succeed. This is because we don't track the "side-constraints" on
+/// type variables, nor do we track if the same freshened type appears
+/// more than once. To some extent these approximations could be
+/// fixed, given effort.
+///
+/// Like subtyping, matching is really a binary relation, so the only
+/// important thing about the result is Ok/Err. Also, matching never
+/// affects any type variables or unification state.
+pub struct Match<'a, 'tcx: 'a> {
+    tcx: &'a TyCtxt<'tcx>
+}
+
+impl<'a, 'tcx> Match<'a, 'tcx> {
+    pub fn new(tcx: &'a TyCtxt<'tcx>) -> Match<'a, 'tcx> {
+        Match { tcx: tcx }
+    }
+}
+
+impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Match<'a, 'tcx> {
+    fn tag(&self) -> &'static str { "Match" }
+    fn tcx(&self) -> &'a TyCtxt<'tcx> { self.tcx }
+    fn a_is_expected(&self) -> bool { true } // irrelevant
+
+    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
+                                               _: ty::Variance,
+                                               a: &T,
+                                               b: &T)
+                                               -> RelateResult<'tcx, T>
+    {
+        self.relate(a, b)
+    }
+
+    fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+        debug!("{}.regions({:?}, {:?})",
+               self.tag(),
+               a,
+               b);
+        Ok(a)
+    }
+
+    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+        debug!("{}.tys({:?}, {:?})", self.tag(),
+               a, b);
+        if a == b { return Ok(a); }
+
+        match (&a.sty, &b.sty) {
+            (_, &ty::TyInfer(ty::FreshTy(_))) |
+            (_, &ty::TyInfer(ty::FreshIntTy(_))) |
+            (_, &ty::TyInfer(ty::FreshFloatTy(_))) => {
+                Ok(a)
+            }
+
+            (&ty::TyInfer(_), _) |
+            (_, &ty::TyInfer(_)) => {
+                Err(TypeError::Sorts(relate::expected_found(self, &a, &b)))
+            }
+
+            (&ty::TyError, _) | (_, &ty::TyError) => {
+                Ok(self.tcx().types.err)
+            }
+
+            _ => {
+                relate::super_relate_tys(self, a, b)
+            }
+        }
+    }
+
+    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
+                  -> RelateResult<'tcx, ty::Binder<T>>
+        where T: Relate<'a,'tcx>
+    {
+        Ok(ty::Binder(self.relate(a.skip_binder(), b.skip_binder())?))
+    }
+}
diff --git a/src/librustc/ty/adjustment.rs b/src/librustc/ty/adjustment.rs
new file mode 100644 (file)
index 0000000..4100882
--- /dev/null
@@ -0,0 +1,259 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use self::AutoAdjustment::*;
+pub use self::AutoRef::*;
+
+use ty::{self, Ty, TyCtxt, TypeAndMut, TypeFoldable};
+use ty::LvaluePreference::{NoPreference};
+
+use syntax::ast;
+use syntax::codemap::Span;
+
+use hir;
+
+#[derive(Copy, Clone)]
+pub enum AutoAdjustment<'tcx> {
+    AdjustReifyFnPointer,    // go from a fn-item type to a fn-pointer type
+    AdjustUnsafeFnPointer,   // go from a safe fn pointer to an unsafe fn pointer
+    AdjustMutToConstPointer, // go from a mut raw pointer to a const raw pointer
+    AdjustDerefRef(AutoDerefRef<'tcx>),
+}
+
+/// Represents coercing a pointer to a different kind of pointer - where 'kind'
+/// here means either or both of raw vs borrowed vs unique and fat vs thin.
+///
+/// We transform pointers by following the following steps in order:
+/// 1. Deref the pointer `self.autoderefs` times (may be 0).
+/// 2. If `autoref` is `Some(_)`, then take the address and produce either a
+///    `&` or `*` pointer.
+/// 3. If `unsize` is `Some(_)`, then apply the unsize transformation,
+///    which will do things like convert thin pointers to fat
+///    pointers, or convert structs containing thin pointers to
+///    structs containing fat pointers, or convert between fat
+///    pointers.  We don't store the details of how the transform is
+///    done (in fact, we don't know that, because it might depend on
+///    the precise type parameters). We just store the target
+///    type. Trans figures out what has to be done at monomorphization
+///    time based on the precise source/target type at hand.
+///
+/// To make that more concrete, here are some common scenarios:
+///
+/// 1. The simplest cases are where the pointer is not adjusted fat vs thin.
+/// Here the pointer will be dereferenced N times (where a dereference can
+/// happen to raw or borrowed pointers or any smart pointer which implements
+/// Deref, including Box<_>). The number of dereferences is given by
+/// `autoderefs`.  It can then be auto-referenced zero or one times, indicated
+/// by `autoref`, to either a raw or borrowed pointer. In these cases unsize is
+/// None.
+///
+/// 2. A thin-to-fat coercon involves unsizing the underlying data. We start
+/// with a thin pointer, deref a number of times, unsize the underlying data,
+/// then autoref. The 'unsize' phase may change a fixed length array to a
+/// dynamically sized one, a concrete object to a trait object, or statically
+/// sized struct to a dyncamically sized one. E.g., &[i32; 4] -> &[i32] is
+/// represented by:
+///
+/// ```
+/// AutoDerefRef {
+///     autoderefs: 1,          // &[i32; 4] -> [i32; 4]
+///     autoref: Some(AutoPtr), // [i32] -> &[i32]
+///     unsize: Some([i32]),    // [i32; 4] -> [i32]
+/// }
+/// ```
+///
+/// Note that for a struct, the 'deep' unsizing of the struct is not recorded.
+/// E.g., `struct Foo<T> { x: T }` we can coerce &Foo<[i32; 4]> to &Foo<[i32]>
+/// The autoderef and -ref are the same as in the above example, but the type
+/// stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about
+/// the underlying conversions from `[i32; 4]` to `[i32]`.
+///
+/// 3. Coercing a `Box<T>` to `Box<Trait>` is an interesting special case.  In
+/// that case, we have the pointer we need coming in, so there are no
+/// autoderefs, and no autoref. Instead we just do the `Unsize` transformation.
+/// At some point, of course, `Box` should move out of the compiler, in which
+/// case this is analogous to transformating a struct. E.g., Box<[i32; 4]> ->
+/// Box<[i32]> is represented by:
+///
+/// ```
+/// AutoDerefRef {
+///     autoderefs: 0,
+///     autoref: None,
+///     unsize: Some(Box<[i32]>),
+/// }
+/// ```
+#[derive(Copy, Clone)]
+pub struct AutoDerefRef<'tcx> {
+    /// Step 1. Apply a number of dereferences, producing an lvalue.
+    pub autoderefs: usize,
+
+    /// Step 2. Optionally produce a pointer/reference from the value.
+    pub autoref: Option<AutoRef<'tcx>>,
+
+    /// Step 3. Unsize a pointer/reference value, e.g. `&[T; n]` to
+    /// `&[T]`. The stored type is the target pointer type. Note that
+    /// the source could be a thin or fat pointer.
+    pub unsize: Option<Ty<'tcx>>,
+}
+
+impl<'tcx> AutoAdjustment<'tcx> {
+    pub fn is_identity(&self) -> bool {
+        match *self {
+            AdjustReifyFnPointer |
+            AdjustUnsafeFnPointer |
+            AdjustMutToConstPointer => false,
+            AdjustDerefRef(ref r) => r.is_identity(),
+        }
+    }
+}
+impl<'tcx> AutoDerefRef<'tcx> {
+    pub fn is_identity(&self) -> bool {
+        self.autoderefs == 0 && self.unsize.is_none() && self.autoref.is_none()
+    }
+}
+
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum AutoRef<'tcx> {
+    /// Convert from T to &T.
+    AutoPtr(&'tcx ty::Region, hir::Mutability),
+
+    /// Convert from T to *T.
+    /// Value to thin pointer.
+    AutoUnsafe(hir::Mutability),
+}
+
+#[derive(Clone, Copy, RustcEncodable, RustcDecodable, Debug)]
+pub enum CustomCoerceUnsized {
+    /// Records the index of the field being coerced.
+    Struct(usize)
+}
+
+impl<'tcx> ty::TyS<'tcx> {
+    /// See `expr_ty_adjusted`
+    pub fn adjust<F>(&'tcx self, cx: &TyCtxt<'tcx>,
+                     span: Span,
+                     expr_id: ast::NodeId,
+                     adjustment: Option<&AutoAdjustment<'tcx>>,
+                     mut method_type: F)
+                     -> Ty<'tcx> where
+        F: FnMut(ty::MethodCall) -> Option<Ty<'tcx>>,
+    {
+        if let ty::TyError = self.sty {
+            return self;
+        }
+
+        return match adjustment {
+            Some(adjustment) => {
+                match *adjustment {
+                    AdjustReifyFnPointer => {
+                        match self.sty {
+                            ty::TyFnDef(_, _, b) => {
+                                cx.mk_ty(ty::TyFnPtr(b))
+                            }
+                            _ => {
+                                bug!("AdjustReifyFnPointer adjustment on non-fn-item: {:?}",
+                                     self);
+                            }
+                        }
+                    }
+
+                    AdjustUnsafeFnPointer => {
+                        match self.sty {
+                            ty::TyFnPtr(b) => cx.safe_to_unsafe_fn_ty(b),
+                            ref b => {
+                                bug!("AdjustUnsafeFnPointer adjustment on non-fn-ptr: {:?}",
+                                     b);
+                            }
+                        }
+                    }
+
+                    AdjustMutToConstPointer => {
+                        match self.sty {
+                            ty::TyRawPtr(mt) => cx.mk_ptr(ty::TypeAndMut {
+                                ty: mt.ty,
+                                mutbl: hir::MutImmutable
+                            }),
+                            ref b => {
+                                bug!("AdjustMutToConstPointer on non-raw-ptr: {:?}",
+                                     b);
+                            }
+                        }
+                    }
+
+                    AdjustDerefRef(ref adj) => {
+                        let mut adjusted_ty = self;
+
+                        if !adjusted_ty.references_error() {
+                            for i in 0..adj.autoderefs {
+                                adjusted_ty =
+                                    adjusted_ty.adjust_for_autoderef(cx,
+                                                                     expr_id,
+                                                                     span,
+                                                                     i as u32,
+                                                                     &mut method_type);
+                            }
+                        }
+
+                        if let Some(target) = adj.unsize {
+                            target
+                        } else {
+                            adjusted_ty.adjust_for_autoref(cx, adj.autoref)
+                        }
+                    }
+                }
+            }
+            None => self
+        };
+    }
+
+    pub fn adjust_for_autoderef<F>(&'tcx self,
+                                   cx: &TyCtxt<'tcx>,
+                                   expr_id: ast::NodeId,
+                                   expr_span: Span,
+                                   autoderef: u32, // how many autoderefs so far?
+                                   mut method_type: F)
+                                   -> Ty<'tcx> where
+        F: FnMut(ty::MethodCall) -> Option<Ty<'tcx>>,
+    {
+        let method_call = ty::MethodCall::autoderef(expr_id, autoderef);
+        let mut adjusted_ty = self;
+        if let Some(method_ty) = method_type(method_call) {
+            // Method calls always have all late-bound regions
+            // fully instantiated.
+            let fn_ret = cx.no_late_bound_regions(&method_ty.fn_ret()).unwrap();
+            adjusted_ty = fn_ret.unwrap();
+        }
+        match adjusted_ty.builtin_deref(true, NoPreference) {
+            Some(mt) => mt.ty,
+            None => {
+                span_bug!(
+                    expr_span,
+                    "the {}th autoderef failed: {}",
+                    autoderef,
+                    adjusted_ty);
+            }
+        }
+    }
+
+    pub fn adjust_for_autoref(&'tcx self, cx: &TyCtxt<'tcx>,
+                              autoref: Option<AutoRef<'tcx>>)
+                              -> Ty<'tcx> {
+        match autoref {
+            None => self,
+            Some(AutoPtr(r, m)) => {
+                cx.mk_ref(r, TypeAndMut { ty: self, mutbl: m })
+            }
+            Some(AutoUnsafe(m)) => {
+                cx.mk_ptr(TypeAndMut { ty: self, mutbl: m })
+            }
+        }
+    }
+}
diff --git a/src/librustc/ty/cast.rs b/src/librustc/ty/cast.rs
new file mode 100644 (file)
index 0000000..c8d282d
--- /dev/null
@@ -0,0 +1,76 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Helpers for handling cast expressions, used in both
+// typeck and trans.
+
+use ty::{self, Ty};
+
+use syntax::ast;
+
+/// Types that are represented as ints.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum IntTy {
+    U(ast::UintTy),
+    I,
+    CEnum,
+    Bool,
+    Char
+}
+
+// Valid types for the result of a non-coercion cast
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CastTy<'tcx> {
+    /// Various types that are represented as ints and handled mostly
+    /// in the same way, merged for easier matching.
+    Int(IntTy),
+    /// Floating-Point types
+    Float,
+    /// Function Pointers
+    FnPtr,
+    /// Raw pointers
+    Ptr(&'tcx ty::TypeAndMut<'tcx>),
+    /// References
+    RPtr(&'tcx ty::TypeAndMut<'tcx>),
+}
+
+/// Cast Kind. See RFC 401 (or librustc_typeck/check/cast.rs)
+#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
+pub enum CastKind {
+    CoercionCast,
+    PtrPtrCast,
+    PtrAddrCast,
+    AddrPtrCast,
+    NumericCast,
+    EnumCast,
+    PrimIntCast,
+    U8CharCast,
+    ArrayPtrCast,
+    FnPtrPtrCast,
+    FnPtrAddrCast
+}
+
+impl<'tcx> CastTy<'tcx> {
+    pub fn from_ty(t: Ty<'tcx>) -> Option<CastTy<'tcx>> {
+        match t.sty {
+            ty::TyBool => Some(CastTy::Int(IntTy::Bool)),
+            ty::TyChar => Some(CastTy::Int(IntTy::Char)),
+            ty::TyInt(_) => Some(CastTy::Int(IntTy::I)),
+            ty::TyUint(u) => Some(CastTy::Int(IntTy::U(u))),
+            ty::TyFloat(_) => Some(CastTy::Float),
+            ty::TyEnum(d,_) if d.is_payloadfree() =>
+                Some(CastTy::Int(IntTy::CEnum)),
+            ty::TyRawPtr(ref mt) => Some(CastTy::Ptr(mt)),
+            ty::TyRef(_, ref mt) => Some(CastTy::RPtr(mt)),
+            ty::TyFnPtr(..) => Some(CastTy::FnPtr),
+            _ => None,
+        }
+    }
+}
diff --git a/src/librustc/ty/contents.rs b/src/librustc/ty/contents.rs
new file mode 100644 (file)
index 0000000..f5dedb1
--- /dev/null
@@ -0,0 +1,267 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir::def_id::{DefId};
+use ty::{self, Ty, TyCtxt};
+use util::common::MemoizationMap;
+use util::nodemap::FnvHashMap;
+
+use std::fmt;
+use std::ops;
+
+use syntax::ast;
+
+/// Type contents is how the type checker reasons about kinds.
+/// They track what kinds of things are found within a type.  You can
+/// think of them as kind of an "anti-kind".  They track the kinds of values
+/// and thinks that are contained in types.  Having a larger contents for
+/// a type tends to rule that type *out* from various kinds.  For example,
+/// a type that contains a reference is not sendable.
+///
+/// The reason we compute type contents and not kinds is that it is
+/// easier for me (nmatsakis) to think about what is contained within
+/// a type than to think about what is *not* contained within a type.
+#[derive(Clone, Copy)]
+pub struct TypeContents {
+    pub bits: u64
+}
+
+macro_rules! def_type_content_sets {
+    (mod $mname:ident { $($name:ident = $bits:expr),+ }) => {
+        #[allow(non_snake_case)]
+        mod $mname {
+            use super::TypeContents;
+            $(
+                #[allow(non_upper_case_globals)]
+                pub const $name: TypeContents = TypeContents { bits: $bits };
+             )+
+        }
+    }
+}
+
+def_type_content_sets! {
+    mod TC {
+        None                                = 0b0000_0000__0000_0000__0000,
+
+        // Things that are interior to the value (first nibble):
+        InteriorUnsafe                      = 0b0000_0000__0000_0000__0010,
+        InteriorParam                       = 0b0000_0000__0000_0000__0100,
+        // InteriorAll                         = 0b00000000__00000000__1111,
+
+        // Things that are owned by the value (second and third nibbles):
+        OwnsOwned                           = 0b0000_0000__0000_0001__0000,
+        OwnsDtor                            = 0b0000_0000__0000_0010__0000,
+        OwnsAll                             = 0b0000_0000__1111_1111__0000,
+
+        // Things that mean drop glue is necessary
+        NeedsDrop                           = 0b0000_0000__0000_0111__0000,
+
+        // All bits
+        All                                 = 0b1111_1111__1111_1111__1111
+    }
+}
+
+impl TypeContents {
+    pub fn when(&self, cond: bool) -> TypeContents {
+        if cond {*self} else {TC::None}
+    }
+
+    pub fn intersects(&self, tc: TypeContents) -> bool {
+        (self.bits & tc.bits) != 0
+    }
+
+    pub fn owns_owned(&self) -> bool {
+        self.intersects(TC::OwnsOwned)
+    }
+
+    pub fn interior_param(&self) -> bool {
+        self.intersects(TC::InteriorParam)
+    }
+
+    pub fn interior_unsafe(&self) -> bool {
+        self.intersects(TC::InteriorUnsafe)
+    }
+
+    pub fn needs_drop(&self, _: &TyCtxt) -> bool {
+        self.intersects(TC::NeedsDrop)
+    }
+
+    /// Includes only those bits that still apply when indirected through a `Box` pointer
+    pub fn owned_pointer(&self) -> TypeContents {
+        TC::OwnsOwned | (*self & TC::OwnsAll)
+    }
+
+    pub fn union<T, F>(v: &[T], mut f: F) -> TypeContents where
+        F: FnMut(&T) -> TypeContents,
+    {
+        v.iter().fold(TC::None, |tc, ty| tc | f(ty))
+    }
+
+    pub fn has_dtor(&self) -> bool {
+        self.intersects(TC::OwnsDtor)
+    }
+}
+
+impl ops::BitOr for TypeContents {
+    type Output = TypeContents;
+
+    fn bitor(self, other: TypeContents) -> TypeContents {
+        TypeContents {bits: self.bits | other.bits}
+    }
+}
+
+impl ops::BitAnd for TypeContents {
+    type Output = TypeContents;
+
+    fn bitand(self, other: TypeContents) -> TypeContents {
+        TypeContents {bits: self.bits & other.bits}
+    }
+}
+
+impl ops::Sub for TypeContents {
+    type Output = TypeContents;
+
+    fn sub(self, other: TypeContents) -> TypeContents {
+        TypeContents {bits: self.bits & !other.bits}
+    }
+}
+
+impl fmt::Debug for TypeContents {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "TypeContents({:b})", self.bits)
+    }
+}
+
+impl<'tcx> ty::TyS<'tcx> {
+    pub fn type_contents(&'tcx self, cx: &TyCtxt<'tcx>) -> TypeContents {
+        return cx.tc_cache.memoize(self, || tc_ty(cx, self, &mut FnvHashMap()));
+
+        fn tc_ty<'tcx>(cx: &TyCtxt<'tcx>,
+                       ty: Ty<'tcx>,
+                       cache: &mut FnvHashMap<Ty<'tcx>, TypeContents>) -> TypeContents
+        {
+            // Subtle: Note that we are *not* using cx.tc_cache here but rather a
+            // private cache for this walk.  This is needed in the case of cyclic
+            // types like:
+            //
+            //     struct List { next: Box<Option<List>>, ... }
+            //
+            // When computing the type contents of such a type, we wind up deeply
+            // recursing as we go.  So when we encounter the recursive reference
+            // to List, we temporarily use TC::None as its contents.  Later we'll
+            // patch up the cache with the correct value, once we've computed it
+            // (this is basically a co-inductive process, if that helps).  So in
+            // the end we'll compute TC::OwnsOwned, in this case.
+            //
+            // The problem is, as we are doing the computation, we will also
+            // compute an *intermediate* contents for, e.g., Option<List> of
+            // TC::None.  This is ok during the computation of List itself, but if
+            // we stored this intermediate value into cx.tc_cache, then later
+            // requests for the contents of Option<List> would also yield TC::None
+            // which is incorrect.  This value was computed based on the crutch
+            // value for the type contents of list.  The correct value is
+            // TC::OwnsOwned.  This manifested as issue #4821.
+            match cache.get(&ty) {
+                Some(tc) => { return *tc; }
+                None => {}
+            }
+            match cx.tc_cache.borrow().get(&ty) {    // Must check both caches!
+                Some(tc) => { return *tc; }
+                None => {}
+            }
+            cache.insert(ty, TC::None);
+
+            let result = match ty.sty {
+                // usize and isize are ffi-unsafe
+                ty::TyUint(ast::UintTy::Us) | ty::TyInt(ast::IntTy::Is) => {
+                    TC::None
+                }
+
+                // Scalar and unique types are sendable, and durable
+                ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) |
+                ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) |
+                ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar => {
+                    TC::None
+                }
+
+                ty::TyBox(typ) => {
+                    tc_ty(cx, typ, cache).owned_pointer()
+                }
+
+                ty::TyTrait(_) => {
+                    TC::All - TC::InteriorParam
+                }
+
+                ty::TyRawPtr(_) => {
+                    TC::None
+                }
+
+                ty::TyRef(_, _) => {
+                    TC::None
+                }
+
+                ty::TyArray(ty, _) => {
+                    tc_ty(cx, ty, cache)
+                }
+
+                ty::TySlice(ty) => {
+                    tc_ty(cx, ty, cache)
+                }
+                ty::TyStr => TC::None,
+
+                ty::TyClosure(_, ref substs) => {
+                    TypeContents::union(&substs.upvar_tys, |ty| tc_ty(cx, &ty, cache))
+                }
+
+                ty::TyTuple(ref tys) => {
+                    TypeContents::union(&tys[..],
+                                        |ty| tc_ty(cx, *ty, cache))
+                }
+
+                ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
+                    let mut res =
+                        TypeContents::union(&def.variants, |v| {
+                            TypeContents::union(&v.fields, |f| {
+                                tc_ty(cx, f.ty(cx, substs), cache)
+                            })
+                        });
+
+                    if def.has_dtor() {
+                        res = res | TC::OwnsDtor;
+                    }
+
+                    apply_lang_items(cx, def.did, res)
+                }
+
+                ty::TyProjection(..) |
+                ty::TyParam(_) => {
+                    TC::All
+                }
+
+                ty::TyInfer(_) |
+                ty::TyError => {
+                    bug!("asked to compute contents of error type");
+                }
+            };
+
+            cache.insert(ty, result);
+            result
+        }
+
+        fn apply_lang_items(cx: &TyCtxt, did: DefId, tc: TypeContents)
+                            -> TypeContents {
+            if Some(did) == cx.lang_items.unsafe_cell_type() {
+                tc | TC::InteriorUnsafe
+            } else {
+                tc
+            }
+        }
+    }
+}
diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs
new file mode 100644 (file)
index 0000000..8b07a97
--- /dev/null
@@ -0,0 +1,1100 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! type context book-keeping
+
+use dep_graph::{DepGraph, DepTrackingMap};
+use hir::map as ast_map;
+use session::Session;
+use lint;
+use middle;
+use middle::cstore::{CrateStore, LOCAL_CRATE};
+use hir::def::DefMap;
+use hir::def_id::DefId;
+use middle::free_region::FreeRegionMap;
+use middle::region::RegionMaps;
+use middle::resolve_lifetime;
+use middle::stability;
+use ty::subst::{self, Subst, Substs};
+use traits;
+use ty::{self, TraitRef, Ty, TypeAndMut};
+use ty::{TyS, TypeVariants};
+use ty::{AdtDef, ClosureSubsts, ExistentialBounds, Region};
+use hir::FreevarMap;
+use ty::{BareFnTy, InferTy, ParamTy, ProjectionTy, TraitTy};
+use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid};
+use ty::TypeVariants::*;
+use ty::layout::{Layout, TargetDataLayout};
+use ty::maps;
+use util::common::MemoizationMap;
+use util::nodemap::{NodeMap, NodeSet, DefIdMap, DefIdSet};
+use util::nodemap::FnvHashMap;
+
+use arena::TypedArena;
+use std::borrow::Borrow;
+use std::cell::{Cell, RefCell, Ref};
+use std::hash::{Hash, Hasher};
+use std::rc::Rc;
+use syntax::ast::{self, Name, NodeId};
+use syntax::attr;
+use syntax::parse::token::{self, special_idents};
+
+use hir;
+
+/// Internal storage
+pub struct CtxtArenas<'tcx> {
+    // internings
+    type_: TypedArena<TyS<'tcx>>,
+    substs: TypedArena<Substs<'tcx>>,
+    bare_fn: TypedArena<BareFnTy<'tcx>>,
+    region: TypedArena<Region>,
+    stability: TypedArena<attr::Stability>,
+    layout: TypedArena<Layout>,
+
+    // references
+    trait_defs: TypedArena<ty::TraitDef<'tcx>>,
+    adt_defs: TypedArena<ty::AdtDefData<'tcx, 'tcx>>,
+}
+
+impl<'tcx> CtxtArenas<'tcx> {
+    pub fn new() -> CtxtArenas<'tcx> {
+        CtxtArenas {
+            type_: TypedArena::new(),
+            substs: TypedArena::new(),
+            bare_fn: TypedArena::new(),
+            region: TypedArena::new(),
+            stability: TypedArena::new(),
+            layout: TypedArena::new(),
+
+            trait_defs: TypedArena::new(),
+            adt_defs: TypedArena::new()
+        }
+    }
+}
+
+pub struct CommonTypes<'tcx> {
+    pub bool: Ty<'tcx>,
+    pub char: Ty<'tcx>,
+    pub isize: Ty<'tcx>,
+    pub i8: Ty<'tcx>,
+    pub i16: Ty<'tcx>,
+    pub i32: Ty<'tcx>,
+    pub i64: Ty<'tcx>,
+    pub usize: Ty<'tcx>,
+    pub u8: Ty<'tcx>,
+    pub u16: Ty<'tcx>,
+    pub u32: Ty<'tcx>,
+    pub u64: Ty<'tcx>,
+    pub f32: Ty<'tcx>,
+    pub f64: Ty<'tcx>,
+    pub err: Ty<'tcx>,
+}
+
+pub struct Tables<'tcx> {
+    /// Stores the types for various nodes in the AST.  Note that this table
+    /// is not guaranteed to be populated until after typeck.  See
+    /// typeck::check::fn_ctxt for details.
+    pub node_types: NodeMap<Ty<'tcx>>,
+
+    /// Stores the type parameters which were substituted to obtain the type
+    /// of this node.  This only applies to nodes that refer to entities
+    /// parameterized by type parameters, such as generic fns, types, or
+    /// other items.
+    pub item_substs: NodeMap<ty::ItemSubsts<'tcx>>,
+
+    pub adjustments: NodeMap<ty::adjustment::AutoAdjustment<'tcx>>,
+
+    pub method_map: ty::MethodMap<'tcx>,
+
+    /// Borrows
+    pub upvar_capture_map: ty::UpvarCaptureMap,
+
+    /// Records the type of each closure. The def ID is the ID of the
+    /// expression defining the closure.
+    pub closure_tys: DefIdMap<ty::ClosureTy<'tcx>>,
+
+    /// Records the type of each closure. The def ID is the ID of the
+    /// expression defining the closure.
+    pub closure_kinds: DefIdMap<ty::ClosureKind>,
+
+    /// For each fn, records the "liberated" types of its arguments
+    /// and return type. Liberated means that all bound regions
+    /// (including late-bound regions) are replaced with free
+    /// equivalents. This table is not used in trans (since regions
+    /// are erased there) and hence is not serialized to metadata.
+    pub liberated_fn_sigs: NodeMap<ty::FnSig<'tcx>>,
+
+    /// For each FRU expression, record the normalized types of the fields
+    /// of the struct - this is needed because it is non-trivial to
+    /// normalize while preserving regions. This table is used only in
+    /// MIR construction and hence is not serialized to metadata.
+    pub fru_field_types: NodeMap<Vec<Ty<'tcx>>>
+}
+
+impl<'tcx> Tables<'tcx> {
+    pub fn empty() -> Tables<'tcx> {
+        Tables {
+            node_types: FnvHashMap(),
+            item_substs: NodeMap(),
+            adjustments: NodeMap(),
+            method_map: FnvHashMap(),
+            upvar_capture_map: FnvHashMap(),
+            closure_tys: DefIdMap(),
+            closure_kinds: DefIdMap(),
+            liberated_fn_sigs: NodeMap(),
+            fru_field_types: NodeMap()
+        }
+    }
+
+    pub fn closure_kind(this: &RefCell<Self>,
+                        tcx: &TyCtxt<'tcx>,
+                        def_id: DefId)
+                        -> ty::ClosureKind {
+        // If this is a local def-id, it should be inserted into the
+        // tables by typeck; else, it will be retreived from
+        // the external crate metadata.
+        if let Some(&kind) = this.borrow().closure_kinds.get(&def_id) {
+            return kind;
+        }
+
+        let kind = tcx.sess.cstore.closure_kind(tcx, def_id);
+        this.borrow_mut().closure_kinds.insert(def_id, kind);
+        kind
+    }
+
+    pub fn closure_type(this: &RefCell<Self>,
+                        tcx: &TyCtxt<'tcx>,
+                        def_id: DefId,
+                        substs: &ClosureSubsts<'tcx>)
+                        -> ty::ClosureTy<'tcx>
+    {
+        // If this is a local def-id, it should be inserted into the
+        // tables by typeck; else, it will be retreived from
+        // the external crate metadata.
+        if let Some(ty) = this.borrow().closure_tys.get(&def_id) {
+            return ty.subst(tcx, &substs.func_substs);
+        }
+
+        let ty = tcx.sess.cstore.closure_ty(tcx, def_id);
+        this.borrow_mut().closure_tys.insert(def_id, ty.clone());
+        ty.subst(tcx, &substs.func_substs)
+    }
+}
+
+impl<'tcx> CommonTypes<'tcx> {
+    fn new(arena: &'tcx TypedArena<TyS<'tcx>>,
+           interner: &RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>)
+           -> CommonTypes<'tcx>
+    {
+        let mk = |sty| TyCtxt::intern_ty(arena, interner, sty);
+        CommonTypes {
+            bool: mk(TyBool),
+            char: mk(TyChar),
+            err: mk(TyError),
+            isize: mk(TyInt(ast::IntTy::Is)),
+            i8: mk(TyInt(ast::IntTy::I8)),
+            i16: mk(TyInt(ast::IntTy::I16)),
+            i32: mk(TyInt(ast::IntTy::I32)),
+            i64: mk(TyInt(ast::IntTy::I64)),
+            usize: mk(TyUint(ast::UintTy::Us)),
+            u8: mk(TyUint(ast::UintTy::U8)),
+            u16: mk(TyUint(ast::UintTy::U16)),
+            u32: mk(TyUint(ast::UintTy::U32)),
+            u64: mk(TyUint(ast::UintTy::U64)),
+            f32: mk(TyFloat(ast::FloatTy::F32)),
+            f64: mk(TyFloat(ast::FloatTy::F64)),
+        }
+    }
+}
+
+/// The data structure to keep track of all the information that typechecker
+/// generates so that so that it can be reused and doesn't have to be redone
+/// later on.
+pub struct TyCtxt<'tcx> {
+    /// The arenas that types etc are allocated from.
+    arenas: &'tcx CtxtArenas<'tcx>,
+
+    /// Specifically use a speedy hash algorithm for this hash map, it's used
+    /// quite often.
+    // FIXME(eddyb) use a FnvHashSet<InternedTy<'tcx>> when equivalent keys can
+    // queried from a HashSet.
+    interner: RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
+
+    // FIXME as above, use a hashset if equivalent elements can be queried.
+    substs_interner: RefCell<FnvHashMap<&'tcx Substs<'tcx>, &'tcx Substs<'tcx>>>,
+    bare_fn_interner: RefCell<FnvHashMap<&'tcx BareFnTy<'tcx>, &'tcx BareFnTy<'tcx>>>,
+    region_interner: RefCell<FnvHashMap<&'tcx Region, &'tcx Region>>,
+    stability_interner: RefCell<FnvHashMap<&'tcx attr::Stability, &'tcx attr::Stability>>,
+    layout_interner: RefCell<FnvHashMap<&'tcx Layout, &'tcx Layout>>,
+
+    pub dep_graph: DepGraph,
+
+    /// Common types, pre-interned for your convenience.
+    pub types: CommonTypes<'tcx>,
+
+    pub sess: &'tcx Session,
+    pub def_map: RefCell<DefMap>,
+
+    pub named_region_map: resolve_lifetime::NamedRegionMap,
+
+    pub region_maps: RegionMaps,
+
+    // For each fn declared in the local crate, type check stores the
+    // free-region relationships that were deduced from its where
+    // clauses and parameter types. These are then read-again by
+    // borrowck. (They are not used during trans, and hence are not
+    // serialized or needed for cross-crate fns.)
+    free_region_maps: RefCell<NodeMap<FreeRegionMap>>,
+    // FIXME: jroesch make this a refcell
+
+    pub tables: RefCell<Tables<'tcx>>,
+
+    /// Maps from a trait item to the trait item "descriptor"
+    pub impl_or_trait_items: RefCell<DepTrackingMap<maps::ImplOrTraitItems<'tcx>>>,
+
+    /// Maps from a trait def-id to a list of the def-ids of its trait items
+    pub trait_item_def_ids: RefCell<DepTrackingMap<maps::TraitItemDefIds<'tcx>>>,
+
+    /// A cache for the trait_items() routine; note that the routine
+    /// itself pushes the `TraitItems` dependency node.
+    trait_items_cache: RefCell<DepTrackingMap<maps::TraitItems<'tcx>>>,
+
+    pub impl_trait_refs: RefCell<DepTrackingMap<maps::ImplTraitRefs<'tcx>>>,
+    pub trait_defs: RefCell<DepTrackingMap<maps::TraitDefs<'tcx>>>,
+    pub adt_defs: RefCell<DepTrackingMap<maps::AdtDefs<'tcx>>>,
+
+    /// Maps from the def-id of an item (trait/struct/enum/fn) to its
+    /// associated predicates.
+    pub predicates: RefCell<DepTrackingMap<maps::Predicates<'tcx>>>,
+
+    /// Maps from the def-id of a trait to the list of
+    /// super-predicates. This is a subset of the full list of
+    /// predicates. We store these in a separate map because we must
+    /// evaluate them even during type conversion, often before the
+    /// full predicates are available (note that supertraits have
+    /// additional acyclicity requirements).
+    pub super_predicates: RefCell<DepTrackingMap<maps::Predicates<'tcx>>>,
+
+    pub map: ast_map::Map<'tcx>,
+
+    // Records the free variables refrenced by every closure
+    // expression. Do not track deps for this, just recompute it from
+    // scratch every time.
+    pub freevars: RefCell<FreevarMap>,
+
+    // Records the type of every item.
+    pub tcache: RefCell<DepTrackingMap<maps::Tcache<'tcx>>>,
+
+    // Internal cache for metadata decoding. No need to track deps on this.
+    pub rcache: RefCell<FnvHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
+
+    // Cache for the type-contents routine. FIXME -- track deps?
+    pub tc_cache: RefCell<FnvHashMap<Ty<'tcx>, ty::contents::TypeContents>>,
+
+    // Cache for various types within a method body and so forth.
+    //
+    // FIXME this should be made local to typeck, but it is currently used by one lint
+    pub ast_ty_to_ty_cache: RefCell<NodeMap<Ty<'tcx>>>,
+
+    // FIXME no dep tracking, but we should be able to remove this
+    pub ty_param_defs: RefCell<NodeMap<ty::TypeParameterDef<'tcx>>>,
+
+    // FIXME dep tracking -- should be harmless enough
+    pub normalized_cache: RefCell<FnvHashMap<Ty<'tcx>, Ty<'tcx>>>,
+
+    pub lang_items: middle::lang_items::LanguageItems,
+
+    /// Maps from def-id of a type or region parameter to its
+    /// (inferred) variance.
+    pub item_variance_map: RefCell<DepTrackingMap<maps::ItemVariances<'tcx>>>,
+
+    /// True if the variance has been computed yet; false otherwise.
+    pub variance_computed: Cell<bool>,
+
+    /// Maps a DefId of a type to a list of its inherent impls.
+    /// Contains implementations of methods that are inherent to a type.
+    /// Methods in these implementations don't need to be exported.
+    pub inherent_impls: RefCell<DepTrackingMap<maps::InherentImpls<'tcx>>>,
+
+    /// Maps a DefId of an impl to a list of its items.
+    /// Note that this contains all of the impls that we know about,
+    /// including ones in other crates. It's not clear that this is the best
+    /// way to do it.
+    pub impl_items: RefCell<DepTrackingMap<maps::ImplItems<'tcx>>>,
+
+    /// Set of used unsafe nodes (functions or blocks). Unsafe nodes not
+    /// present in this set can be warned about.
+    pub used_unsafe: RefCell<NodeSet>,
+
+    /// Set of nodes which mark locals as mutable which end up getting used at
+    /// some point. Local variable definitions not in this set can be warned
+    /// about.
+    pub used_mut_nodes: RefCell<NodeSet>,
+
+    /// The set of external nominal types whose implementations have been read.
+    /// This is used for lazy resolution of methods.
+    pub populated_external_types: RefCell<DefIdSet>,
+
+    /// The set of external primitive types whose implementations have been read.
+    /// FIXME(arielb1): why is this separate from populated_external_types?
+    pub populated_external_primitive_impls: RefCell<DefIdSet>,
+
+    /// Cache used by const_eval when decoding external constants.
+    /// Contains `None` when the constant has been fetched but doesn't exist.
+    /// Constains `Some(expr_id, type)` otherwise.
+    /// `type` is `None` in case it's not a primitive type
+    pub extern_const_statics: RefCell<DefIdMap<Option<(NodeId, Option<Ty<'tcx>>)>>>,
+    /// Cache used by const_eval when decoding extern const fns
+    pub extern_const_fns: RefCell<DefIdMap<NodeId>>,
+
+    pub node_lint_levels: RefCell<FnvHashMap<(NodeId, lint::LintId),
+                                              lint::LevelSource>>,
+
+    /// Maps any item's def-id to its stability index.
+    pub stability: RefCell<stability::Index<'tcx>>,
+
+    /// Caches the results of trait selection. This cache is used
+    /// for things that do not have to do with the parameters in scope.
+    pub selection_cache: traits::SelectionCache<'tcx>,
+
+    /// Caches the results of trait evaluation. This cache is used
+    /// for things that do not have to do with the parameters in scope.
+    /// Merge this with `selection_cache`?
+    pub evaluation_cache: traits::EvaluationCache<'tcx>,
+
+    /// A set of predicates that have been fulfilled *somewhere*.
+    /// This is used to avoid duplicate work. Predicates are only
+    /// added to this set when they mention only "global" names
+    /// (i.e., no type or lifetime parameters).
+    pub fulfilled_predicates: RefCell<traits::GlobalFulfilledPredicates<'tcx>>,
+
+    /// Caches the representation hints for struct definitions.
+    repr_hint_cache: RefCell<DepTrackingMap<maps::ReprHints<'tcx>>>,
+
+    /// Maps Expr NodeId's to their constant qualification.
+    pub const_qualif_map: RefCell<NodeMap<middle::const_qualif::ConstQualif>>,
+
+    /// Caches CoerceUnsized kinds for impls on custom types.
+    pub custom_coerce_unsized_kinds: RefCell<DefIdMap<ty::adjustment::CustomCoerceUnsized>>,
+
+    /// Maps a cast expression to its kind. This is keyed on the
+    /// *from* expression of the cast, not the cast itself.
+    pub cast_kinds: RefCell<NodeMap<ty::cast::CastKind>>,
+
+    /// Maps Fn items to a collection of fragment infos.
+    ///
+    /// The main goal is to identify data (each of which may be moved
+    /// or assigned) whose subparts are not moved nor assigned
+    /// (i.e. their state is *unfragmented*) and corresponding ast
+    /// nodes where the path to that data is moved or assigned.
+    ///
+    /// In the long term, unfragmented values will have their
+    /// destructor entirely driven by a single stack-local drop-flag,
+    /// and their parents, the collections of the unfragmented values
+    /// (or more simply, "fragmented values"), are mapped to the
+    /// corresponding collections of stack-local drop-flags.
+    ///
+    /// (However, in the short term that is not the case; e.g. some
+    /// unfragmented paths still need to be zeroed, namely when they
+    /// reference parent data from an outer scope that was not
+    /// entirely moved, and therefore that needs to be zeroed so that
+    /// we do not get double-drop when we hit the end of the parent
+    /// scope.)
+    ///
+    /// Also: currently the table solely holds keys for node-ids of
+    /// unfragmented values (see `FragmentInfo` enum definition), but
+    /// longer-term we will need to also store mappings from
+    /// fragmented data to the set of unfragmented pieces that
+    /// constitute it.
+    pub fragment_infos: RefCell<DefIdMap<Vec<ty::FragmentInfo>>>,
+
+    /// The definite name of the current crate after taking into account
+    /// attributes, commandline parameters, etc.
+    pub crate_name: token::InternedString,
+
+    /// Data layout specification for the current target.
+    pub data_layout: TargetDataLayout,
+
+    /// Cache for layouts computed from types.
+    pub layout_cache: RefCell<FnvHashMap<Ty<'tcx>, &'tcx Layout>>,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn crate_name(&self, cnum: ast::CrateNum) -> token::InternedString {
+        if cnum == LOCAL_CRATE {
+            self.crate_name.clone()
+        } else {
+            self.sess.cstore.crate_name(cnum)
+        }
+    }
+
+    pub fn crate_disambiguator(&self, cnum: ast::CrateNum) -> token::InternedString {
+        if cnum == LOCAL_CRATE {
+            self.sess.crate_disambiguator.get().as_str()
+        } else {
+            self.sess.cstore.crate_disambiguator(cnum)
+        }
+    }
+
+    pub fn type_parameter_def(&self,
+                              node_id: NodeId)
+                              -> ty::TypeParameterDef<'tcx>
+    {
+        self.ty_param_defs.borrow().get(&node_id).unwrap().clone()
+    }
+
+    pub fn node_types(&self) -> Ref<NodeMap<Ty<'tcx>>> {
+        fn projection<'a, 'tcx>(tables: &'a Tables<'tcx>) -> &'a NodeMap<Ty<'tcx>> {
+            &tables.node_types
+        }
+
+        Ref::map(self.tables.borrow(), projection)
+    }
+
+    pub fn node_type_insert(&self, id: NodeId, ty: Ty<'tcx>) {
+        self.tables.borrow_mut().node_types.insert(id, ty);
+    }
+
+    pub fn intern_trait_def(&self, def: ty::TraitDef<'tcx>)
+                            -> &'tcx ty::TraitDef<'tcx> {
+        let did = def.trait_ref.def_id;
+        let interned = self.arenas.trait_defs.alloc(def);
+        if let Some(prev) = self.trait_defs.borrow_mut().insert(did, interned) {
+            bug!("Tried to overwrite interned TraitDef: {:?}", prev)
+        }
+        interned
+    }
+
+    pub fn alloc_trait_def(&self, def: ty::TraitDef<'tcx>)
+                           -> &'tcx ty::TraitDef<'tcx> {
+        self.arenas.trait_defs.alloc(def)
+    }
+
+    pub fn intern_adt_def(&self,
+                          did: DefId,
+                          kind: ty::AdtKind,
+                          variants: Vec<ty::VariantDefData<'tcx, 'tcx>>)
+                          -> ty::AdtDefMaster<'tcx> {
+        let def = ty::AdtDefData::new(self, did, kind, variants);
+        let interned = self.arenas.adt_defs.alloc(def);
+        // this will need a transmute when reverse-variance is removed
+        if let Some(prev) = self.adt_defs.borrow_mut().insert(did, interned) {
+            bug!("Tried to overwrite interned AdtDef: {:?}", prev)
+        }
+        interned
+    }
+
+    pub fn intern_stability(&self, stab: attr::Stability) -> &'tcx attr::Stability {
+        if let Some(st) = self.stability_interner.borrow().get(&stab) {
+            return st;
+        }
+
+        let interned = self.arenas.stability.alloc(stab);
+        if let Some(prev) = self.stability_interner
+                                .borrow_mut()
+                                .insert(interned, interned) {
+            bug!("Tried to overwrite interned Stability: {:?}", prev)
+        }
+        interned
+    }
+
+    pub fn intern_layout(&self, layout: Layout) -> &'tcx Layout {
+        if let Some(layout) = self.layout_interner.borrow().get(&layout) {
+            return layout;
+        }
+
+        let interned = self.arenas.layout.alloc(layout);
+        if let Some(prev) = self.layout_interner
+                                .borrow_mut()
+                                .insert(interned, interned) {
+            bug!("Tried to overwrite interned Layout: {:?}", prev)
+        }
+        interned
+    }
+
+    pub fn store_free_region_map(&self, id: NodeId, map: FreeRegionMap) {
+        if self.free_region_maps.borrow_mut().insert(id, map).is_some() {
+            bug!("Tried to overwrite interned FreeRegionMap for NodeId {:?}", id)
+        }
+    }
+
+    pub fn free_region_map(&self, id: NodeId) -> FreeRegionMap {
+        self.free_region_maps.borrow()[&id].clone()
+    }
+
+    pub fn lift<T: ?Sized + Lift<'tcx>>(&self, value: &T) -> Option<T::Lifted> {
+        value.lift_to_tcx(self)
+    }
+
+    /// Create a type context and call the closure with a `&TyCtxt` reference
+    /// to the context. The closure enforces that the type context and any interned
+    /// value (types, substs, etc.) can only be used while `ty::tls` has a valid
+    /// reference to the context, to allow formatting values that need it.
+    pub fn create_and_enter<F, R>(s: &'tcx Session,
+                                 arenas: &'tcx CtxtArenas<'tcx>,
+                                 def_map: RefCell<DefMap>,
+                                 named_region_map: resolve_lifetime::NamedRegionMap,
+                                 map: ast_map::Map<'tcx>,
+                                 freevars: FreevarMap,
+                                 region_maps: RegionMaps,
+                                 lang_items: middle::lang_items::LanguageItems,
+                                 stability: stability::Index<'tcx>,
+                                 crate_name: &str,
+                                 f: F) -> R
+                                 where F: FnOnce(&TyCtxt<'tcx>) -> R
+    {
+        let data_layout = TargetDataLayout::parse(s);
+        let interner = RefCell::new(FnvHashMap());
+        let common_types = CommonTypes::new(&arenas.type_, &interner);
+        let dep_graph = map.dep_graph.clone();
+        let fulfilled_predicates = traits::GlobalFulfilledPredicates::new(dep_graph.clone());
+        tls::enter(TyCtxt {
+            arenas: arenas,
+            interner: interner,
+            substs_interner: RefCell::new(FnvHashMap()),
+            bare_fn_interner: RefCell::new(FnvHashMap()),
+            region_interner: RefCell::new(FnvHashMap()),
+            stability_interner: RefCell::new(FnvHashMap()),
+            layout_interner: RefCell::new(FnvHashMap()),
+            dep_graph: dep_graph.clone(),
+            types: common_types,
+            named_region_map: named_region_map,
+            region_maps: region_maps,
+            free_region_maps: RefCell::new(FnvHashMap()),
+            item_variance_map: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            variance_computed: Cell::new(false),
+            sess: s,
+            def_map: def_map,
+            tables: RefCell::new(Tables::empty()),
+            impl_trait_refs: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            trait_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            adt_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            super_predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            fulfilled_predicates: RefCell::new(fulfilled_predicates),
+            map: map,
+            freevars: RefCell::new(freevars),
+            tcache: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            rcache: RefCell::new(FnvHashMap()),
+            tc_cache: RefCell::new(FnvHashMap()),
+            ast_ty_to_ty_cache: RefCell::new(NodeMap()),
+            impl_or_trait_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            trait_item_def_ids: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            trait_items_cache: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            ty_param_defs: RefCell::new(NodeMap()),
+            normalized_cache: RefCell::new(FnvHashMap()),
+            lang_items: lang_items,
+            inherent_impls: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            impl_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            used_unsafe: RefCell::new(NodeSet()),
+            used_mut_nodes: RefCell::new(NodeSet()),
+            populated_external_types: RefCell::new(DefIdSet()),
+            populated_external_primitive_impls: RefCell::new(DefIdSet()),
+            extern_const_statics: RefCell::new(DefIdMap()),
+            extern_const_fns: RefCell::new(DefIdMap()),
+            node_lint_levels: RefCell::new(FnvHashMap()),
+            stability: RefCell::new(stability),
+            selection_cache: traits::SelectionCache::new(),
+            evaluation_cache: traits::EvaluationCache::new(),
+            repr_hint_cache: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
+            const_qualif_map: RefCell::new(NodeMap()),
+            custom_coerce_unsized_kinds: RefCell::new(DefIdMap()),
+            cast_kinds: RefCell::new(NodeMap()),
+            fragment_infos: RefCell::new(DefIdMap()),
+            crate_name: token::intern_and_get_ident(crate_name),
+            data_layout: data_layout,
+            layout_cache: RefCell::new(FnvHashMap()),
+       }, f)
+    }
+}
+
+/// A trait implemented for all X<'a> types which can be safely and
+/// efficiently converted to X<'tcx> as long as they are part of the
+/// provided TyCtxt<'tcx>.
+/// This can be done, for example, for Ty<'tcx> or &'tcx Substs<'tcx>
+/// by looking them up in their respective interners.
+/// None is returned if the value or one of the components is not part
+/// of the provided context.
+/// For Ty, None can be returned if either the type interner doesn't
+/// contain the TypeVariants key or if the address of the interned
+/// pointer differs. The latter case is possible if a primitive type,
+/// e.g. `()` or `u8`, was interned in a different context.
+pub trait Lift<'tcx> {
+    type Lifted;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted>;
+}
+
+impl<'a, 'tcx> Lift<'tcx> for Ty<'a> {
+    type Lifted = Ty<'tcx>;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Ty<'tcx>> {
+        if let Some(&ty) = tcx.interner.borrow().get(&self.sty) {
+            if *self as *const _ == ty as *const _ {
+                return Some(ty);
+            }
+        }
+        None
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for &'a Substs<'a> {
+    type Lifted = &'tcx Substs<'tcx>;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<&'tcx Substs<'tcx>> {
+        if let Some(&substs) = tcx.substs_interner.borrow().get(*self) {
+            if *self as *const _ == substs as *const _ {
+                return Some(substs);
+            }
+        }
+        None
+    }
+}
+
+
+pub mod tls {
+    use ty::TyCtxt;
+
+    use std::cell::Cell;
+    use std::fmt;
+    use syntax::codemap;
+
+    /// Marker type used for the scoped TLS slot.
+    /// The type context cannot be used directly because the scoped TLS
+    /// in libstd doesn't allow types generic over lifetimes.
+    struct ThreadLocalTyCx;
+
+    thread_local! {
+        static TLS_TCX: Cell<Option<*const ThreadLocalTyCx>> = Cell::new(None)
+    }
+
+    fn span_debug(span: codemap::Span, f: &mut fmt::Formatter) -> fmt::Result {
+        with(|tcx| {
+            write!(f, "{}", tcx.sess.codemap().span_to_string(span))
+        })
+    }
+
+    pub fn enter<'tcx, F: FnOnce(&TyCtxt<'tcx>) -> R, R>(tcx: TyCtxt<'tcx>, f: F) -> R {
+        codemap::SPAN_DEBUG.with(|span_dbg| {
+            let original_span_debug = span_dbg.get();
+            span_dbg.set(span_debug);
+            let tls_ptr = &tcx as *const _ as *const ThreadLocalTyCx;
+            let result = TLS_TCX.with(|tls| {
+                let prev = tls.get();
+                tls.set(Some(tls_ptr));
+                let ret = f(&tcx);
+                tls.set(prev);
+                ret
+            });
+            span_dbg.set(original_span_debug);
+            result
+        })
+    }
+
+    pub fn with<F: FnOnce(&TyCtxt) -> R, R>(f: F) -> R {
+        TLS_TCX.with(|tcx| {
+            let tcx = tcx.get().unwrap();
+            f(unsafe { &*(tcx as *const TyCtxt) })
+        })
+    }
+
+    pub fn with_opt<F: FnOnce(Option<&TyCtxt>) -> R, R>(f: F) -> R {
+        if TLS_TCX.with(|tcx| tcx.get().is_some()) {
+            with(|v| f(Some(v)))
+        } else {
+            f(None)
+        }
+    }
+}
+
+macro_rules! sty_debug_print {
+    ($ctxt: expr, $($variant: ident),*) => {{
+        // curious inner module to allow variant names to be used as
+        // variable names.
+        #[allow(non_snake_case)]
+        mod inner {
+            use ty::{self, TyCtxt};
+            #[derive(Copy, Clone)]
+            struct DebugStat {
+                total: usize,
+                region_infer: usize,
+                ty_infer: usize,
+                both_infer: usize,
+            }
+
+            pub fn go(tcx: &TyCtxt) {
+                let mut total = DebugStat {
+                    total: 0,
+                    region_infer: 0, ty_infer: 0, both_infer: 0,
+                };
+                $(let mut $variant = total;)*
+
+
+                for (_, t) in tcx.interner.borrow().iter() {
+                    let variant = match t.sty {
+                        ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) |
+                            ty::TyFloat(..) | ty::TyStr => continue,
+                        ty::TyError => /* unimportant */ continue,
+                        $(ty::$variant(..) => &mut $variant,)*
+                    };
+                    let region = t.flags.get().intersects(ty::TypeFlags::HAS_RE_INFER);
+                    let ty = t.flags.get().intersects(ty::TypeFlags::HAS_TY_INFER);
+
+                    variant.total += 1;
+                    total.total += 1;
+                    if region { total.region_infer += 1; variant.region_infer += 1 }
+                    if ty { total.ty_infer += 1; variant.ty_infer += 1 }
+                    if region && ty { total.both_infer += 1; variant.both_infer += 1 }
+                }
+                println!("Ty interner             total           ty region  both");
+                $(println!("    {:18}: {uses:6} {usespc:4.1}%, \
+{ty:4.1}% {region:5.1}% {both:4.1}%",
+                           stringify!($variant),
+                           uses = $variant.total,
+                           usespc = $variant.total as f64 * 100.0 / total.total as f64,
+                           ty = $variant.ty_infer as f64 * 100.0  / total.total as f64,
+                           region = $variant.region_infer as f64 * 100.0  / total.total as f64,
+                           both = $variant.both_infer as f64 * 100.0  / total.total as f64);
+                  )*
+                println!("                  total {uses:6}        \
+{ty:4.1}% {region:5.1}% {both:4.1}%",
+                         uses = total.total,
+                         ty = total.ty_infer as f64 * 100.0  / total.total as f64,
+                         region = total.region_infer as f64 * 100.0  / total.total as f64,
+                         both = total.both_infer as f64 * 100.0  / total.total as f64)
+            }
+        }
+
+        inner::go($ctxt)
+    }}
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn print_debug_stats(&self) {
+        sty_debug_print!(
+            self,
+            TyEnum, TyBox, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr,
+            TyTrait, TyStruct, TyClosure, TyTuple, TyParam, TyInfer, TyProjection);
+
+        println!("Substs interner: #{}", self.substs_interner.borrow().len());
+        println!("BareFnTy interner: #{}", self.bare_fn_interner.borrow().len());
+        println!("Region interner: #{}", self.region_interner.borrow().len());
+        println!("Stability interner: #{}", self.stability_interner.borrow().len());
+        println!("Layout interner: #{}", self.layout_interner.borrow().len());
+    }
+}
+
+
+/// An entry in the type interner.
+pub struct InternedTy<'tcx> {
+    ty: Ty<'tcx>
+}
+
+// NB: An InternedTy compares and hashes as a sty.
+impl<'tcx> PartialEq for InternedTy<'tcx> {
+    fn eq(&self, other: &InternedTy<'tcx>) -> bool {
+        self.ty.sty == other.ty.sty
+    }
+}
+
+impl<'tcx> Eq for InternedTy<'tcx> {}
+
+impl<'tcx> Hash for InternedTy<'tcx> {
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        self.ty.sty.hash(s)
+    }
+}
+
+impl<'tcx> Borrow<TypeVariants<'tcx>> for InternedTy<'tcx> {
+    fn borrow<'a>(&'a self) -> &'a TypeVariants<'tcx> {
+        &self.ty.sty
+    }
+}
+
+fn bound_list_is_sorted(bounds: &[ty::PolyProjectionPredicate]) -> bool {
+    bounds.is_empty() ||
+        bounds[1..].iter().enumerate().all(
+            |(index, bound)| bounds[index].sort_key() <= bound.sort_key())
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    // Type constructors
+    pub fn mk_substs(&self, substs: Substs<'tcx>) -> &'tcx Substs<'tcx> {
+        if let Some(substs) = self.substs_interner.borrow().get(&substs) {
+            return *substs;
+        }
+
+        let substs = self.arenas.substs.alloc(substs);
+        self.substs_interner.borrow_mut().insert(substs, substs);
+        substs
+    }
+
+    /// Create an unsafe fn ty based on a safe fn ty.
+    pub fn safe_to_unsafe_fn_ty(&self, bare_fn: &BareFnTy<'tcx>) -> Ty<'tcx> {
+        assert_eq!(bare_fn.unsafety, hir::Unsafety::Normal);
+        self.mk_fn_ptr(ty::BareFnTy {
+            unsafety: hir::Unsafety::Unsafe,
+            abi: bare_fn.abi,
+            sig: bare_fn.sig.clone()
+        })
+    }
+
+    pub fn mk_bare_fn(&self, bare_fn: BareFnTy<'tcx>) -> &'tcx BareFnTy<'tcx> {
+        if let Some(bare_fn) = self.bare_fn_interner.borrow().get(&bare_fn) {
+            return *bare_fn;
+        }
+
+        let bare_fn = self.arenas.bare_fn.alloc(bare_fn);
+        self.bare_fn_interner.borrow_mut().insert(bare_fn, bare_fn);
+        bare_fn
+    }
+
+    pub fn mk_region(&self, region: Region) -> &'tcx Region {
+        if let Some(region) = self.region_interner.borrow().get(&region) {
+            return *region;
+        }
+
+        let region = self.arenas.region.alloc(region);
+        self.region_interner.borrow_mut().insert(region, region);
+        region
+    }
+
+    fn intern_ty(type_arena: &'tcx TypedArena<TyS<'tcx>>,
+                 interner: &RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
+                 st: TypeVariants<'tcx>)
+                 -> Ty<'tcx> {
+        let ty: Ty /* don't be &mut TyS */ = {
+            let mut interner = interner.borrow_mut();
+            match interner.get(&st) {
+                Some(ty) => return *ty,
+                _ => ()
+            }
+
+            let flags = super::flags::FlagComputation::for_sty(&st);
+
+            let ty = match () {
+                () => type_arena.alloc(TyS { sty: st,
+                                             flags: Cell::new(flags.flags),
+                                             region_depth: flags.depth, }),
+            };
+
+            interner.insert(InternedTy { ty: ty }, ty);
+            ty
+        };
+
+        debug!("Interned type: {:?} Pointer: {:?}",
+            ty, ty as *const TyS);
+        ty
+    }
+
+    // Interns a type/name combination, stores the resulting box in cx.interner,
+    // and returns the box as cast to an unsafe ptr (see comments for Ty above).
+    pub fn mk_ty(&self, st: TypeVariants<'tcx>) -> Ty<'tcx> {
+        TyCtxt::intern_ty(&self.arenas.type_, &self.interner, st)
+    }
+
+    pub fn mk_mach_int(&self, tm: ast::IntTy) -> Ty<'tcx> {
+        match tm {
+            ast::IntTy::Is   => self.types.isize,
+            ast::IntTy::I8   => self.types.i8,
+            ast::IntTy::I16  => self.types.i16,
+            ast::IntTy::I32  => self.types.i32,
+            ast::IntTy::I64  => self.types.i64,
+        }
+    }
+
+    pub fn mk_mach_uint(&self, tm: ast::UintTy) -> Ty<'tcx> {
+        match tm {
+            ast::UintTy::Us   => self.types.usize,
+            ast::UintTy::U8   => self.types.u8,
+            ast::UintTy::U16  => self.types.u16,
+            ast::UintTy::U32  => self.types.u32,
+            ast::UintTy::U64  => self.types.u64,
+        }
+    }
+
+    pub fn mk_mach_float(&self, tm: ast::FloatTy) -> Ty<'tcx> {
+        match tm {
+            ast::FloatTy::F32  => self.types.f32,
+            ast::FloatTy::F64  => self.types.f64,
+        }
+    }
+
+    pub fn mk_str(&self) -> Ty<'tcx> {
+        self.mk_ty(TyStr)
+    }
+
+    pub fn mk_static_str(&self) -> Ty<'tcx> {
+        self.mk_imm_ref(self.mk_region(ty::ReStatic), self.mk_str())
+    }
+
+    pub fn mk_enum(&self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
+        // take a copy of substs so that we own the vectors inside
+        self.mk_ty(TyEnum(def, substs))
+    }
+
+    pub fn mk_box(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(TyBox(ty))
+    }
+
+    pub fn mk_ptr(&self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(TyRawPtr(tm))
+    }
+
+    pub fn mk_ref(&self, r: &'tcx Region, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(TyRef(r, tm))
+    }
+
+    pub fn mk_mut_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutMutable})
+    }
+
+    pub fn mk_imm_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutImmutable})
+    }
+
+    pub fn mk_mut_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutMutable})
+    }
+
+    pub fn mk_imm_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutImmutable})
+    }
+
+    pub fn mk_nil_ptr(&self) -> Ty<'tcx> {
+        self.mk_imm_ptr(self.mk_nil())
+    }
+
+    pub fn mk_array(&self, ty: Ty<'tcx>, n: usize) -> Ty<'tcx> {
+        self.mk_ty(TyArray(ty, n))
+    }
+
+    pub fn mk_slice(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(TySlice(ty))
+    }
+
+    pub fn mk_tup(&self, ts: Vec<Ty<'tcx>>) -> Ty<'tcx> {
+        self.mk_ty(TyTuple(ts))
+    }
+
+    pub fn mk_nil(&self) -> Ty<'tcx> {
+        self.mk_tup(Vec::new())
+    }
+
+    pub fn mk_bool(&self) -> Ty<'tcx> {
+        self.mk_ty(TyBool)
+    }
+
+    pub fn mk_fn_def(&self, def_id: DefId,
+                     substs: &'tcx Substs<'tcx>,
+                     fty: BareFnTy<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(TyFnDef(def_id, substs, self.mk_bare_fn(fty)))
+    }
+
+    pub fn mk_fn_ptr(&self, fty: BareFnTy<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(TyFnPtr(self.mk_bare_fn(fty)))
+    }
+
+    pub fn mk_trait(&self,
+                    principal: ty::PolyTraitRef<'tcx>,
+                    bounds: ExistentialBounds<'tcx>)
+                    -> Ty<'tcx>
+    {
+        assert!(bound_list_is_sorted(&bounds.projection_bounds));
+
+        let inner = box TraitTy {
+            principal: principal,
+            bounds: bounds
+        };
+        self.mk_ty(TyTrait(inner))
+    }
+
+    pub fn mk_projection(&self,
+                         trait_ref: TraitRef<'tcx>,
+                         item_name: Name)
+                         -> Ty<'tcx> {
+        // take a copy of substs so that we own the vectors inside
+        let inner = ProjectionTy { trait_ref: trait_ref, item_name: item_name };
+        self.mk_ty(TyProjection(inner))
+    }
+
+    pub fn mk_struct(&self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
+        // take a copy of substs so that we own the vectors inside
+        self.mk_ty(TyStruct(def, substs))
+    }
+
+    pub fn mk_closure(&self,
+                      closure_id: DefId,
+                      substs: &'tcx Substs<'tcx>,
+                      tys: Vec<Ty<'tcx>>)
+                      -> Ty<'tcx> {
+        self.mk_closure_from_closure_substs(closure_id, Box::new(ClosureSubsts {
+            func_substs: substs,
+            upvar_tys: tys
+        }))
+    }
+
+    pub fn mk_closure_from_closure_substs(&self,
+                                          closure_id: DefId,
+                                          closure_substs: Box<ClosureSubsts<'tcx>>)
+                                          -> Ty<'tcx> {
+        self.mk_ty(TyClosure(closure_id, closure_substs))
+    }
+
+    pub fn mk_var(&self, v: TyVid) -> Ty<'tcx> {
+        self.mk_infer(TyVar(v))
+    }
+
+    pub fn mk_int_var(&self, v: IntVid) -> Ty<'tcx> {
+        self.mk_infer(IntVar(v))
+    }
+
+    pub fn mk_float_var(&self, v: FloatVid) -> Ty<'tcx> {
+        self.mk_infer(FloatVar(v))
+    }
+
+    pub fn mk_infer(&self, it: InferTy) -> Ty<'tcx> {
+        self.mk_ty(TyInfer(it))
+    }
+
+    pub fn mk_param(&self,
+                    space: subst::ParamSpace,
+                    index: u32,
+                    name: Name) -> Ty<'tcx> {
+        self.mk_ty(TyParam(ParamTy { space: space, idx: index, name: name }))
+    }
+
+    pub fn mk_self_type(&self) -> Ty<'tcx> {
+        self.mk_param(subst::SelfSpace, 0, special_idents::type_self.name)
+    }
+
+    pub fn mk_param_from_def(&self, def: &ty::TypeParameterDef) -> Ty<'tcx> {
+        self.mk_param(def.space, def.index, def.name)
+    }
+
+    pub fn trait_items(&self, trait_did: DefId) -> Rc<Vec<ty::ImplOrTraitItem<'tcx>>> {
+        self.trait_items_cache.memoize(trait_did, || {
+            let def_ids = self.trait_item_def_ids(trait_did);
+            Rc::new(def_ids.iter()
+                           .map(|d| self.impl_or_trait_item(d.def_id()))
+                           .collect())
+        })
+    }
+
+    /// Obtain the representation annotation for a struct definition.
+    pub fn lookup_repr_hints(&self, did: DefId) -> Rc<Vec<attr::ReprAttr>> {
+        self.repr_hint_cache.memoize(did, || {
+            Rc::new(if did.is_local() {
+                self.get_attrs(did).iter().flat_map(|meta| {
+                    attr::find_repr_attrs(self.sess.diagnostic(), meta).into_iter()
+                }).collect()
+            } else {
+                self.sess.cstore.repr_attrs(did)
+            })
+        })
+    }
+}
diff --git a/src/librustc/ty/error.rs b/src/librustc/ty/error.rs
new file mode 100644 (file)
index 0000000..73bafc5
--- /dev/null
@@ -0,0 +1,343 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir::def_id::DefId;
+use ty::subst;
+use infer::type_variable;
+use ty::{self, BoundRegion, Region, Ty, TyCtxt};
+
+use std::fmt;
+use syntax::abi;
+use syntax::ast::{self, Name};
+use syntax::codemap::Span;
+use syntax::errors::DiagnosticBuilder;
+
+use hir;
+
+#[derive(Clone, Copy, Debug)]
+pub struct ExpectedFound<T> {
+    pub expected: T,
+    pub found: T
+}
+
+// Data structures used in type unification
+#[derive(Clone, Debug)]
+pub enum TypeError<'tcx> {
+    Mismatch,
+    UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
+    AbiMismatch(ExpectedFound<abi::Abi>),
+    Mutability,
+    BoxMutability,
+    PtrMutability,
+    RefMutability,
+    VecMutability,
+    TupleSize(ExpectedFound<usize>),
+    FixedArraySize(ExpectedFound<usize>),
+    TyParamSize(ExpectedFound<usize>),
+    ArgCount,
+    RegionsDoesNotOutlive(Region, Region),
+    RegionsNotSame(Region, Region),
+    RegionsNoOverlap(Region, Region),
+    RegionsInsufficientlyPolymorphic(BoundRegion, Region),
+    RegionsOverlyPolymorphic(BoundRegion, Region),
+    Sorts(ExpectedFound<Ty<'tcx>>),
+    IntegerAsChar,
+    IntMismatch(ExpectedFound<ty::IntVarValue>),
+    FloatMismatch(ExpectedFound<ast::FloatTy>),
+    Traits(ExpectedFound<DefId>),
+    BuiltinBoundsMismatch(ExpectedFound<ty::BuiltinBounds>),
+    VariadicMismatch(ExpectedFound<bool>),
+    CyclicTy,
+    ConvergenceMismatch(ExpectedFound<bool>),
+    ProjectionNameMismatched(ExpectedFound<Name>),
+    ProjectionBoundsLength(ExpectedFound<usize>),
+    TyParamDefaultMismatch(ExpectedFound<type_variable::Default<'tcx>>)
+}
+
+#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)]
+pub enum UnconstrainedNumeric {
+    UnconstrainedFloat,
+    UnconstrainedInt,
+    Neither,
+}
+
+/// Explains the source of a type err in a short, human readable way. This is meant to be placed
+/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
+/// afterwards to present additional details, particularly when it comes to lifetime-related
+/// errors.
+impl<'tcx> fmt::Display for TypeError<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        use self::TypeError::*;
+        fn report_maybe_different(f: &mut fmt::Formatter,
+                                  expected: String, found: String) -> fmt::Result {
+            // A naive approach to making sure that we're not reporting silly errors such as:
+            // (expected closure, found closure).
+            if expected == found {
+                write!(f, "expected {}, found a different {}", expected, found)
+            } else {
+                write!(f, "expected {}, found {}", expected, found)
+            }
+        }
+
+        match *self {
+            CyclicTy => write!(f, "cyclic type of infinite size"),
+            Mismatch => write!(f, "types differ"),
+            UnsafetyMismatch(values) => {
+                write!(f, "expected {} fn, found {} fn",
+                       values.expected,
+                       values.found)
+            }
+            AbiMismatch(values) => {
+                write!(f, "expected {} fn, found {} fn",
+                       values.expected,
+                       values.found)
+            }
+            Mutability => write!(f, "values differ in mutability"),
+            BoxMutability => {
+                write!(f, "boxed values differ in mutability")
+            }
+            VecMutability => write!(f, "vectors differ in mutability"),
+            PtrMutability => write!(f, "pointers differ in mutability"),
+            RefMutability => write!(f, "references differ in mutability"),
+            TyParamSize(values) => {
+                write!(f, "expected a type with {} type params, \
+                           found one with {} type params",
+                       values.expected,
+                       values.found)
+            }
+            FixedArraySize(values) => {
+                write!(f, "expected an array with a fixed size of {} elements, \
+                           found one with {} elements",
+                       values.expected,
+                       values.found)
+            }
+            TupleSize(values) => {
+                write!(f, "expected a tuple with {} elements, \
+                           found one with {} elements",
+                       values.expected,
+                       values.found)
+            }
+            ArgCount => {
+                write!(f, "incorrect number of function parameters")
+            }
+            RegionsDoesNotOutlive(..) => {
+                write!(f, "lifetime mismatch")
+            }
+            RegionsNotSame(..) => {
+                write!(f, "lifetimes are not the same")
+            }
+            RegionsNoOverlap(..) => {
+                write!(f, "lifetimes do not intersect")
+            }
+            RegionsInsufficientlyPolymorphic(br, _) => {
+                write!(f, "expected bound lifetime parameter {}, \
+                           found concrete lifetime", br)
+            }
+            RegionsOverlyPolymorphic(br, _) => {
+                write!(f, "expected concrete lifetime, \
+                           found bound lifetime parameter {}", br)
+            }
+            Sorts(values) => ty::tls::with(|tcx| {
+                report_maybe_different(f, values.expected.sort_string(tcx),
+                                       values.found.sort_string(tcx))
+            }),
+            Traits(values) => ty::tls::with(|tcx| {
+                report_maybe_different(f,
+                                       format!("trait `{}`",
+                                               tcx.item_path_str(values.expected)),
+                                       format!("trait `{}`",
+                                               tcx.item_path_str(values.found)))
+            }),
+            BuiltinBoundsMismatch(values) => {
+                if values.expected.is_empty() {
+                    write!(f, "expected no bounds, found `{}`",
+                           values.found)
+                } else if values.found.is_empty() {
+                    write!(f, "expected bounds `{}`, found no bounds",
+                           values.expected)
+                } else {
+                    write!(f, "expected bounds `{}`, found bounds `{}`",
+                           values.expected,
+                           values.found)
+                }
+            }
+            IntegerAsChar => {
+                write!(f, "expected an integral type, found `char`")
+            }
+            IntMismatch(ref values) => {
+                write!(f, "expected `{:?}`, found `{:?}`",
+                       values.expected,
+                       values.found)
+            }
+            FloatMismatch(ref values) => {
+                write!(f, "expected `{:?}`, found `{:?}`",
+                       values.expected,
+                       values.found)
+            }
+            VariadicMismatch(ref values) => {
+                write!(f, "expected {} fn, found {} function",
+                       if values.expected { "variadic" } else { "non-variadic" },
+                       if values.found { "variadic" } else { "non-variadic" })
+            }
+            ConvergenceMismatch(ref values) => {
+                write!(f, "expected {} fn, found {} function",
+                       if values.expected { "converging" } else { "diverging" },
+                       if values.found { "converging" } else { "diverging" })
+            }
+            ProjectionNameMismatched(ref values) => {
+                write!(f, "expected {}, found {}",
+                       values.expected,
+                       values.found)
+            }
+            ProjectionBoundsLength(ref values) => {
+                write!(f, "expected {} associated type bindings, found {}",
+                       values.expected,
+                       values.found)
+            },
+            TyParamDefaultMismatch(ref values) => {
+                write!(f, "conflicting type parameter defaults `{}` and `{}`",
+                       values.expected.ty,
+                       values.found.ty)
+            }
+        }
+    }
+}
+
+impl<'tcx> ty::TyS<'tcx> {
+    fn sort_string(&self, cx: &TyCtxt) -> String {
+        match self.sty {
+            ty::TyBool | ty::TyChar | ty::TyInt(_) |
+            ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr => self.to_string(),
+            ty::TyTuple(ref tys) if tys.is_empty() => self.to_string(),
+
+            ty::TyEnum(def, _) => format!("enum `{}`", cx.item_path_str(def.did)),
+            ty::TyBox(_) => "box".to_string(),
+            ty::TyArray(_, n) => format!("array of {} elements", n),
+            ty::TySlice(_) => "slice".to_string(),
+            ty::TyRawPtr(_) => "*-ptr".to_string(),
+            ty::TyRef(_, _) => "&-ptr".to_string(),
+            ty::TyFnDef(..) => format!("fn item"),
+            ty::TyFnPtr(_) => "fn pointer".to_string(),
+            ty::TyTrait(ref inner) => {
+                format!("trait {}", cx.item_path_str(inner.principal_def_id()))
+            }
+            ty::TyStruct(def, _) => {
+                format!("struct `{}`", cx.item_path_str(def.did))
+            }
+            ty::TyClosure(..) => "closure".to_string(),
+            ty::TyTuple(_) => "tuple".to_string(),
+            ty::TyInfer(ty::TyVar(_)) => "inferred type".to_string(),
+            ty::TyInfer(ty::IntVar(_)) => "integral variable".to_string(),
+            ty::TyInfer(ty::FloatVar(_)) => "floating-point variable".to_string(),
+            ty::TyInfer(ty::FreshTy(_)) => "skolemized type".to_string(),
+            ty::TyInfer(ty::FreshIntTy(_)) => "skolemized integral type".to_string(),
+            ty::TyInfer(ty::FreshFloatTy(_)) => "skolemized floating-point type".to_string(),
+            ty::TyProjection(_) => "associated type".to_string(),
+            ty::TyParam(ref p) => {
+                if p.space == subst::SelfSpace {
+                    "Self".to_string()
+                } else {
+                    "type parameter".to_string()
+                }
+            }
+            ty::TyError => "type error".to_string(),
+        }
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn note_and_explain_type_err(&self,
+                                     db: &mut DiagnosticBuilder,
+                                     err: &TypeError<'tcx>,
+                                     sp: Span) {
+        use self::TypeError::*;
+
+        match err.clone() {
+            RegionsDoesNotOutlive(subregion, superregion) => {
+                self.note_and_explain_region(db, "", subregion, "...");
+                self.note_and_explain_region(db, "...does not necessarily outlive ",
+                                           superregion, "");
+            }
+            RegionsNotSame(region1, region2) => {
+                self.note_and_explain_region(db, "", region1, "...");
+                self.note_and_explain_region(db, "...is not the same lifetime as ",
+                                           region2, "");
+            }
+            RegionsNoOverlap(region1, region2) => {
+                self.note_and_explain_region(db, "", region1, "...");
+                self.note_and_explain_region(db, "...does not overlap ",
+                                           region2, "");
+            }
+            RegionsInsufficientlyPolymorphic(_, conc_region) => {
+                self.note_and_explain_region(db, "concrete lifetime that was found is ",
+                                           conc_region, "");
+            }
+            RegionsOverlyPolymorphic(_, ty::ReVar(_)) => {
+                // don't bother to print out the message below for
+                // inference variables, it's not very illuminating.
+            }
+            RegionsOverlyPolymorphic(_, conc_region) => {
+                self.note_and_explain_region(db, "expected concrete lifetime is ",
+                                           conc_region, "");
+            }
+            Sorts(values) => {
+                let expected_str = values.expected.sort_string(self);
+                let found_str = values.found.sort_string(self);
+                if expected_str == found_str && expected_str == "closure" {
+                    db.span_note(sp,
+                        "no two closures, even if identical, have the same type");
+                    db.span_help(sp,
+                        "consider boxing your closure and/or using it as a trait object");
+                }
+            },
+            TyParamDefaultMismatch(values) => {
+                let expected = values.expected;
+                let found = values.found;
+                db.span_note(sp, &format!("conflicting type parameter defaults `{}` and `{}`",
+                                          expected.ty,
+                                          found.ty));
+
+                match
+                    self.map.as_local_node_id(expected.def_id)
+                            .and_then(|node_id| self.map.opt_span(node_id))
+                {
+                    Some(span) => {
+                        db.span_note(span, "a default was defined here...");
+                    }
+                    None => {
+                        db.note(&format!("a default is defined on `{}`",
+                                         self.item_path_str(expected.def_id)));
+                    }
+                }
+
+                db.span_note(
+                    expected.origin_span,
+                    "...that was applied to an unconstrained type variable here");
+
+                match
+                    self.map.as_local_node_id(found.def_id)
+                            .and_then(|node_id| self.map.opt_span(node_id))
+                {
+                    Some(span) => {
+                        db.span_note(span, "a second default was defined here...");
+                    }
+                    None => {
+                        db.note(&format!("a second default is defined on `{}`",
+                                         self.item_path_str(found.def_id)));
+                    }
+                }
+
+                db.span_note(found.origin_span,
+                             "...that also applies to the same type variable here");
+            }
+            _ => {}
+        }
+    }
+}
diff --git a/src/librustc/ty/fast_reject.rs b/src/librustc/ty/fast_reject.rs
new file mode 100644 (file)
index 0000000..2964725
--- /dev/null
@@ -0,0 +1,103 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir::def_id::DefId;
+use ty::{self, Ty, TyCtxt};
+use syntax::ast;
+
+use self::SimplifiedType::*;
+
+/// See `simplify_type
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub enum SimplifiedType {
+    BoolSimplifiedType,
+    CharSimplifiedType,
+    IntSimplifiedType(ast::IntTy),
+    UintSimplifiedType(ast::UintTy),
+    FloatSimplifiedType(ast::FloatTy),
+    EnumSimplifiedType(DefId),
+    StrSimplifiedType,
+    VecSimplifiedType,
+    PtrSimplifiedType,
+    TupleSimplifiedType(usize),
+    TraitSimplifiedType(DefId),
+    StructSimplifiedType(DefId),
+    ClosureSimplifiedType(DefId),
+    FunctionSimplifiedType(usize),
+    ParameterSimplifiedType,
+}
+
+/// Tries to simplify a type by dropping type parameters, deref'ing away any reference types, etc.
+/// The idea is to get something simple that we can use to quickly decide if two types could unify
+/// during method lookup.
+///
+/// If `can_simplify_params` is false, then we will fail to simplify type parameters entirely. This
+/// is useful when those type parameters would be instantiated with fresh type variables, since
+/// then we can't say much about whether two types would unify. Put another way,
+/// `can_simplify_params` should be true if type parameters appear free in `ty` and `false` if they
+/// are to be considered bound.
+pub fn simplify_type(tcx: &TyCtxt,
+                     ty: Ty,
+                     can_simplify_params: bool)
+                     -> Option<SimplifiedType>
+{
+    match ty.sty {
+        ty::TyBool => Some(BoolSimplifiedType),
+        ty::TyChar => Some(CharSimplifiedType),
+        ty::TyInt(int_type) => Some(IntSimplifiedType(int_type)),
+        ty::TyUint(uint_type) => Some(UintSimplifiedType(uint_type)),
+        ty::TyFloat(float_type) => Some(FloatSimplifiedType(float_type)),
+        ty::TyEnum(def, _) => Some(EnumSimplifiedType(def.did)),
+        ty::TyStr => Some(StrSimplifiedType),
+        ty::TyArray(..) | ty::TySlice(_) => Some(VecSimplifiedType),
+        ty::TyRawPtr(_) => Some(PtrSimplifiedType),
+        ty::TyTrait(ref trait_info) => {
+            Some(TraitSimplifiedType(trait_info.principal_def_id()))
+        }
+        ty::TyStruct(def, _) => {
+            Some(StructSimplifiedType(def.did))
+        }
+        ty::TyRef(_, mt) => {
+            // since we introduce auto-refs during method lookup, we
+            // just treat &T and T as equivalent from the point of
+            // view of possibly unifying
+            simplify_type(tcx, mt.ty, can_simplify_params)
+        }
+        ty::TyBox(_) => {
+            // treat like we would treat `Box`
+            match tcx.lang_items.require_owned_box() {
+                Ok(def_id) => Some(StructSimplifiedType(def_id)),
+                Err(msg) => tcx.sess.fatal(&msg),
+            }
+        }
+        ty::TyClosure(def_id, _) => {
+            Some(ClosureSimplifiedType(def_id))
+        }
+        ty::TyTuple(ref tys) => {
+            Some(TupleSimplifiedType(tys.len()))
+        }
+        ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => {
+            Some(FunctionSimplifiedType(f.sig.0.inputs.len()))
+        }
+        ty::TyProjection(_) | ty::TyParam(_) => {
+            if can_simplify_params {
+                // In normalized types, projections don't unify with
+                // anything. when lazy normalization happens, this
+                // will change. It would still be nice to have a way
+                // to deal with known-not-to-unify-with-anything
+                // projections (e.g. the likes of <__S as Encoder>::Error).
+                Some(ParameterSimplifiedType)
+            } else {
+                None
+            }
+        }
+        ty::TyInfer(_) | ty::TyError => None,
+    }
+}
diff --git a/src/librustc/ty/flags.rs b/src/librustc/ty/flags.rs
new file mode 100644 (file)
index 0000000..f3dccce
--- /dev/null
@@ -0,0 +1,205 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use ty::subst;
+use ty::{self, Ty, TypeFlags, TypeFoldable};
+
+pub struct FlagComputation {
+    pub flags: TypeFlags,
+
+    // maximum depth of any bound region that we have seen thus far
+    pub depth: u32,
+}
+
+impl FlagComputation {
+    fn new() -> FlagComputation {
+        FlagComputation { flags: TypeFlags::empty(), depth: 0 }
+    }
+
+    pub fn for_sty(st: &ty::TypeVariants) -> FlagComputation {
+        let mut result = FlagComputation::new();
+        result.add_sty(st);
+        result
+    }
+
+    fn add_flags(&mut self, flags: TypeFlags) {
+        self.flags = self.flags | (flags & TypeFlags::NOMINAL_FLAGS);
+    }
+
+    fn add_depth(&mut self, depth: u32) {
+        if depth > self.depth {
+            self.depth = depth;
+        }
+    }
+
+    /// Adds the flags/depth from a set of types that appear within the current type, but within a
+    /// region binder.
+    fn add_bound_computation(&mut self, computation: &FlagComputation) {
+        self.add_flags(computation.flags);
+
+        // The types that contributed to `computation` occurred within
+        // a region binder, so subtract one from the region depth
+        // within when adding the depth to `self`.
+        let depth = computation.depth;
+        if depth > 0 {
+            self.add_depth(depth - 1);
+        }
+    }
+
+    fn add_sty(&mut self, st: &ty::TypeVariants) {
+        match st {
+            &ty::TyBool |
+            &ty::TyChar |
+            &ty::TyInt(_) |
+            &ty::TyFloat(_) |
+            &ty::TyUint(_) |
+            &ty::TyStr => {
+            }
+
+            // You might think that we could just return TyError for
+            // any type containing TyError as a component, and get
+            // rid of the TypeFlags::HAS_TY_ERR flag -- likewise for ty_bot (with
+            // the exception of function types that return bot).
+            // But doing so caused sporadic memory corruption, and
+            // neither I (tjc) nor nmatsakis could figure out why,
+            // so we're doing it this way.
+            &ty::TyError => {
+                self.add_flags(TypeFlags::HAS_TY_ERR)
+            }
+
+            &ty::TyParam(ref p) => {
+                self.add_flags(TypeFlags::HAS_LOCAL_NAMES);
+                if p.space == subst::SelfSpace {
+                    self.add_flags(TypeFlags::HAS_SELF);
+                } else {
+                    self.add_flags(TypeFlags::HAS_PARAMS);
+                }
+            }
+
+            &ty::TyClosure(_, ref substs) => {
+                self.add_flags(TypeFlags::HAS_TY_CLOSURE);
+                self.add_flags(TypeFlags::HAS_LOCAL_NAMES);
+                self.add_substs(&substs.func_substs);
+                self.add_tys(&substs.upvar_tys);
+            }
+
+            &ty::TyInfer(_) => {
+                self.add_flags(TypeFlags::HAS_LOCAL_NAMES); // it might, right?
+                self.add_flags(TypeFlags::HAS_TY_INFER)
+            }
+
+            &ty::TyEnum(_, substs) | &ty::TyStruct(_, substs) => {
+                self.add_substs(substs);
+            }
+
+            &ty::TyProjection(ref data) => {
+                self.add_flags(TypeFlags::HAS_PROJECTION);
+                self.add_projection_ty(data);
+            }
+
+            &ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => {
+                let mut computation = FlagComputation::new();
+                computation.add_substs(principal.0.substs);
+                for projection_bound in &bounds.projection_bounds {
+                    let mut proj_computation = FlagComputation::new();
+                    proj_computation.add_projection_predicate(&projection_bound.0);
+                    self.add_bound_computation(&proj_computation);
+                }
+                self.add_bound_computation(&computation);
+
+                self.add_bounds(bounds);
+            }
+
+            &ty::TyBox(tt) | &ty::TyArray(tt, _) | &ty::TySlice(tt) => {
+                self.add_ty(tt)
+            }
+
+            &ty::TyRawPtr(ref m) => {
+                self.add_ty(m.ty);
+            }
+
+            &ty::TyRef(r, ref m) => {
+                self.add_region(*r);
+                self.add_ty(m.ty);
+            }
+
+            &ty::TyTuple(ref ts) => {
+                self.add_tys(&ts[..]);
+            }
+
+            &ty::TyFnDef(_, substs, ref f) => {
+                self.add_substs(substs);
+                self.add_fn_sig(&f.sig);
+            }
+
+            &ty::TyFnPtr(ref f) => {
+                self.add_fn_sig(&f.sig);
+            }
+        }
+    }
+
+    fn add_ty(&mut self, ty: Ty) {
+        self.add_flags(ty.flags.get());
+        self.add_depth(ty.region_depth);
+    }
+
+    fn add_tys(&mut self, tys: &[Ty]) {
+        for &ty in tys {
+            self.add_ty(ty);
+        }
+    }
+
+    fn add_fn_sig(&mut self, fn_sig: &ty::PolyFnSig) {
+        let mut computation = FlagComputation::new();
+
+        computation.add_tys(&fn_sig.0.inputs);
+
+        if let ty::FnConverging(output) = fn_sig.0.output {
+            computation.add_ty(output);
+        }
+
+        self.add_bound_computation(&computation);
+    }
+
+    fn add_region(&mut self, r: ty::Region) {
+        match r {
+            ty::ReVar(..) |
+            ty::ReSkolemized(..) => { self.add_flags(TypeFlags::HAS_RE_INFER); }
+            ty::ReLateBound(debruijn, _) => { self.add_depth(debruijn.depth); }
+            ty::ReEarlyBound(..) => { self.add_flags(TypeFlags::HAS_RE_EARLY_BOUND); }
+            ty::ReStatic => {}
+            _ => { self.add_flags(TypeFlags::HAS_FREE_REGIONS); }
+        }
+
+        if !r.is_global() {
+            self.add_flags(TypeFlags::HAS_LOCAL_NAMES);
+        }
+    }
+
+    fn add_projection_predicate(&mut self, projection_predicate: &ty::ProjectionPredicate) {
+        self.add_projection_ty(&projection_predicate.projection_ty);
+        self.add_ty(projection_predicate.ty);
+    }
+
+    fn add_projection_ty(&mut self, projection_ty: &ty::ProjectionTy) {
+        self.add_substs(projection_ty.trait_ref.substs);
+    }
+
+    fn add_substs(&mut self, substs: &subst::Substs) {
+        self.add_tys(substs.types.as_slice());
+        for &r in &substs.regions {
+            self.add_region(r);
+        }
+    }
+
+    fn add_bounds(&mut self, bounds: &ty::ExistentialBounds) {
+        self.add_region(bounds.region_bound);
+    }
+}
diff --git a/src/librustc/ty/fold.rs b/src/librustc/ty/fold.rs
new file mode 100644 (file)
index 0000000..54223e1
--- /dev/null
@@ -0,0 +1,628 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Generalized type folding mechanism. The setup is a bit convoluted
+//! but allows for convenient usage. Let T be an instance of some
+//! "foldable type" (one which implements `TypeFoldable`) and F be an
+//! instance of a "folder" (a type which implements `TypeFolder`). Then
+//! the setup is intended to be:
+//!
+//!   T.fold_with(F) --calls--> F.fold_T(T) --calls--> T.super_fold_with(F)
+//!
+//! This way, when you define a new folder F, you can override
+//! `fold_T()` to customize the behavior, and invoke `T.super_fold_with()`
+//! to get the original behavior. Meanwhile, to actually fold
+//! something, you can just write `T.fold_with(F)`, which is
+//! convenient. (Note that `fold_with` will also transparently handle
+//! things like a `Vec<T>` where T is foldable and so on.)
+//!
+//! In this ideal setup, the only function that actually *does*
+//! anything is `T.super_fold_with()`, which traverses the type `T`.
+//! Moreover, `T.super_fold_with()` should only ever call `T.fold_with()`.
+//!
+//! In some cases, we follow a degenerate pattern where we do not have
+//! a `fold_T` method. Instead, `T.fold_with` traverses the structure directly.
+//! This is suboptimal because the behavior cannot be overridden, but it's
+//! much less work to implement. If you ever *do* need an override that
+//! doesn't exist, it's not hard to convert the degenerate pattern into the
+//! proper thing.
+//!
+//! A `TypeFoldable` T can also be visited by a `TypeVisitor` V using similar setup:
+//!   T.visit_with(V) --calls--> V.visit_T(T) --calls--> T.super_visit_with(V).
+//! These methods return true to indicate that the visitor has found what it is looking for
+//! and does not need to visit anything else.
+
+use middle::region;
+use ty::subst;
+use ty::adjustment;
+use ty::{self, Binder, Ty, TyCtxt, TypeFlags};
+
+use std::fmt;
+use util::nodemap::{FnvHashMap, FnvHashSet};
+
+/// The TypeFoldable trait is implemented for every type that can be folded.
+/// Basically, every type that has a corresponding method in TypeFolder.
+pub trait TypeFoldable<'tcx>: fmt::Debug + Clone {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self;
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.super_fold_with(folder)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool;
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.super_visit_with(visitor)
+    }
+
+    fn has_regions_escaping_depth(&self, depth: u32) -> bool {
+        self.visit_with(&mut HasEscapingRegionsVisitor { depth: depth })
+    }
+    fn has_escaping_regions(&self) -> bool {
+        self.has_regions_escaping_depth(0)
+    }
+
+    fn has_type_flags(&self, flags: TypeFlags) -> bool {
+        self.visit_with(&mut HasTypeFlagsVisitor { flags: flags })
+    }
+    fn has_projection_types(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_PROJECTION)
+    }
+    fn references_error(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_TY_ERR)
+    }
+    fn has_param_types(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_PARAMS)
+    }
+    fn has_self_ty(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_SELF)
+    }
+    fn has_infer_types(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_TY_INFER)
+    }
+    fn needs_infer(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER)
+    }
+    fn needs_subst(&self) -> bool {
+        self.has_type_flags(TypeFlags::NEEDS_SUBST)
+    }
+    fn has_closure_types(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_TY_CLOSURE)
+    }
+    fn has_erasable_regions(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_RE_EARLY_BOUND |
+                            TypeFlags::HAS_RE_INFER |
+                            TypeFlags::HAS_FREE_REGIONS)
+    }
+    /// Indicates whether this value references only 'global'
+    /// types/lifetimes that are the same regardless of what fn we are
+    /// in. This is used for caching. Errs on the side of returning
+    /// false.
+    fn is_global(&self) -> bool {
+        !self.has_type_flags(TypeFlags::HAS_LOCAL_NAMES)
+    }
+}
+
+/// The TypeFolder trait defines the actual *folding*. There is a
+/// method defined for every foldable type. Each of these has a
+/// default implementation that does an "identity" fold. Within each
+/// identity fold, it should invoke `foo.fold_with(self)` to fold each
+/// sub-item.
+pub trait TypeFolder<'tcx> : Sized {
+    fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx>;
+
+    fn fold_binder<T>(&mut self, t: &Binder<T>) -> Binder<T>
+        where T : TypeFoldable<'tcx>
+    {
+        t.super_fold_with(self)
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        t.super_fold_with(self)
+    }
+
+    fn fold_mt(&mut self, t: &ty::TypeAndMut<'tcx>) -> ty::TypeAndMut<'tcx> {
+        t.super_fold_with(self)
+    }
+
+    fn fold_trait_ref(&mut self, t: &ty::TraitRef<'tcx>) -> ty::TraitRef<'tcx> {
+        t.super_fold_with(self)
+    }
+
+    fn fold_impl_header(&mut self, imp: &ty::ImplHeader<'tcx>) -> ty::ImplHeader<'tcx> {
+        imp.super_fold_with(self)
+    }
+
+    fn fold_substs(&mut self,
+                   substs: &subst::Substs<'tcx>)
+                   -> subst::Substs<'tcx> {
+        substs.super_fold_with(self)
+    }
+
+    fn fold_fn_sig(&mut self,
+                   sig: &ty::FnSig<'tcx>)
+                   -> ty::FnSig<'tcx> {
+        sig.super_fold_with(self)
+    }
+
+    fn fold_output(&mut self,
+                      output: &ty::FnOutput<'tcx>)
+                      -> ty::FnOutput<'tcx> {
+        output.super_fold_with(self)
+    }
+
+    fn fold_bare_fn_ty(&mut self,
+                       fty: &ty::BareFnTy<'tcx>)
+                       -> ty::BareFnTy<'tcx>
+    {
+        fty.super_fold_with(self)
+    }
+
+    fn fold_closure_ty(&mut self,
+                       fty: &ty::ClosureTy<'tcx>)
+                       -> ty::ClosureTy<'tcx> {
+        fty.super_fold_with(self)
+    }
+
+    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+        r.super_fold_with(self)
+    }
+
+    fn fold_existential_bounds(&mut self, s: &ty::ExistentialBounds<'tcx>)
+                               -> ty::ExistentialBounds<'tcx> {
+        s.super_fold_with(self)
+    }
+
+    fn fold_autoref(&mut self, ar: &adjustment::AutoRef<'tcx>)
+                    -> adjustment::AutoRef<'tcx> {
+        ar.super_fold_with(self)
+    }
+}
+
+pub trait TypeVisitor<'tcx> : Sized {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+        t.super_visit_with(self)
+    }
+
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+        t.super_visit_with(self)
+    }
+
+    fn visit_region(&mut self, r: ty::Region) -> bool {
+        r.super_visit_with(self)
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Some sample folders
+
+pub struct BottomUpFolder<'a, 'tcx: 'a, F> where F: FnMut(Ty<'tcx>) -> Ty<'tcx> {
+    pub tcx: &'a TyCtxt<'tcx>,
+    pub fldop: F,
+}
+
+impl<'a, 'tcx, F> TypeFolder<'tcx> for BottomUpFolder<'a, 'tcx, F> where
+    F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
+{
+    fn tcx(&self) -> &TyCtxt<'tcx> { self.tcx }
+
+    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        let t1 = ty.super_fold_with(self);
+        (self.fldop)(t1)
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Region folder
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Collects the free and escaping regions in `value` into `region_set`. Returns
+    /// whether any late-bound regions were skipped
+    pub fn collect_regions<T>(&self,
+        value: &T,
+        region_set: &mut FnvHashSet<ty::Region>)
+        -> bool
+        where T : TypeFoldable<'tcx>
+    {
+        let mut have_bound_regions = false;
+        self.fold_regions(value, &mut have_bound_regions,
+                          |r, d| { region_set.insert(r.from_depth(d)); r });
+        have_bound_regions
+    }
+
+    /// Folds the escaping and free regions in `value` using `f`, and
+    /// sets `skipped_regions` to true if any late-bound region was found
+    /// and skipped.
+    pub fn fold_regions<T,F>(&self,
+        value: &T,
+        skipped_regions: &mut bool,
+        mut f: F)
+        -> T
+        where F : FnMut(ty::Region, u32) -> ty::Region,
+              T : TypeFoldable<'tcx>,
+    {
+        value.fold_with(&mut RegionFolder::new(self, skipped_regions, &mut f))
+    }
+}
+
+/// Folds over the substructure of a type, visiting its component
+/// types and all regions that occur *free* within it.
+///
+/// That is, `Ty` can contain function or method types that bind
+/// regions at the call site (`ReLateBound`), and occurrences of
+/// regions (aka "lifetimes") that are bound within a type are not
+/// visited by this folder; only regions that occur free will be
+/// visited by `fld_r`.
+
+pub struct RegionFolder<'a, 'tcx: 'a> {
+    tcx: &'a TyCtxt<'tcx>,
+    skipped_regions: &'a mut bool,
+    current_depth: u32,
+    fld_r: &'a mut (FnMut(ty::Region, u32) -> ty::Region + 'a),
+}
+
+impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
+    pub fn new<F>(tcx: &'a TyCtxt<'tcx>,
+                  skipped_regions: &'a mut bool,
+                  fld_r: &'a mut F) -> RegionFolder<'a, 'tcx>
+        where F : FnMut(ty::Region, u32) -> ty::Region
+    {
+        RegionFolder {
+            tcx: tcx,
+            skipped_regions: skipped_regions,
+            current_depth: 1,
+            fld_r: fld_r,
+        }
+    }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx>
+{
+    fn tcx(&self) -> &TyCtxt<'tcx> { self.tcx }
+
+    fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
+        self.current_depth += 1;
+        let t = t.super_fold_with(self);
+        self.current_depth -= 1;
+        t
+    }
+
+    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+        match r {
+            ty::ReLateBound(debruijn, _) if debruijn.depth < self.current_depth => {
+                debug!("RegionFolder.fold_region({:?}) skipped bound region (current depth={})",
+                       r, self.current_depth);
+                *self.skipped_regions = true;
+                r
+            }
+            _ => {
+                debug!("RegionFolder.fold_region({:?}) folding free region (current_depth={})",
+                       r, self.current_depth);
+                (self.fld_r)(r, self.current_depth)
+            }
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Late-bound region replacer
+
+// Replaces the escaping regions in a type.
+
+struct RegionReplacer<'a, 'tcx: 'a> {
+    tcx: &'a TyCtxt<'tcx>,
+    current_depth: u32,
+    fld_r: &'a mut (FnMut(ty::BoundRegion) -> ty::Region + 'a),
+    map: FnvHashMap<ty::BoundRegion, ty::Region>
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn replace_late_bound_regions<T,F>(&self,
+        value: &Binder<T>,
+        mut f: F)
+        -> (T, FnvHashMap<ty::BoundRegion, ty::Region>)
+        where F : FnMut(ty::BoundRegion) -> ty::Region,
+              T : TypeFoldable<'tcx>,
+    {
+        debug!("replace_late_bound_regions({:?})", value);
+        let mut replacer = RegionReplacer::new(self, &mut f);
+        let result = value.skip_binder().fold_with(&mut replacer);
+        (result, replacer.map)
+    }
+
+
+    /// Replace any late-bound regions bound in `value` with free variants attached to scope-id
+    /// `scope_id`.
+    pub fn liberate_late_bound_regions<T>(&self,
+        all_outlive_scope: region::CodeExtent,
+        value: &Binder<T>)
+        -> T
+        where T : TypeFoldable<'tcx>
+    {
+        self.replace_late_bound_regions(value, |br| {
+            ty::ReFree(ty::FreeRegion{scope: all_outlive_scope, bound_region: br})
+        }).0
+    }
+
+    /// Flattens two binding levels into one. So `for<'a> for<'b> Foo`
+    /// becomes `for<'a,'b> Foo`.
+    pub fn flatten_late_bound_regions<T>(&self, bound2_value: &Binder<Binder<T>>)
+                                         -> Binder<T>
+        where T: TypeFoldable<'tcx>
+    {
+        let bound0_value = bound2_value.skip_binder().skip_binder();
+        let value = self.fold_regions(bound0_value, &mut false,
+                                      |region, current_depth| {
+            match region {
+                ty::ReLateBound(debruijn, br) if debruijn.depth >= current_depth => {
+                    // should be true if no escaping regions from bound2_value
+                    assert!(debruijn.depth - current_depth <= 1);
+                    ty::ReLateBound(ty::DebruijnIndex::new(current_depth), br)
+                }
+                _ => {
+                    region
+                }
+            }
+        });
+        Binder(value)
+    }
+
+    pub fn no_late_bound_regions<T>(&self, value: &Binder<T>) -> Option<T>
+        where T : TypeFoldable<'tcx>
+    {
+        if value.0.has_escaping_regions() {
+            None
+        } else {
+            Some(value.0.clone())
+        }
+    }
+
+    /// Replace any late-bound regions bound in `value` with `'static`. Useful in trans but also
+    /// method lookup and a few other places where precise region relationships are not required.
+    pub fn erase_late_bound_regions<T>(&self, value: &Binder<T>) -> T
+        where T : TypeFoldable<'tcx>
+    {
+        self.replace_late_bound_regions(value, |_| ty::ReStatic).0
+    }
+
+    /// Rewrite any late-bound regions so that they are anonymous.  Region numbers are
+    /// assigned starting at 1 and increasing monotonically in the order traversed
+    /// by the fold operation.
+    ///
+    /// The chief purpose of this function is to canonicalize regions so that two
+    /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become
+    /// structurally identical.  For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and
+    /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization.
+    pub fn anonymize_late_bound_regions<T>(&self, sig: &Binder<T>) -> Binder<T>
+        where T : TypeFoldable<'tcx>,
+    {
+        let mut counter = 0;
+        Binder(self.replace_late_bound_regions(sig, |_| {
+            counter += 1;
+            ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(counter))
+        }).0)
+    }
+}
+
+impl<'a, 'tcx> RegionReplacer<'a, 'tcx> {
+    fn new<F>(tcx: &'a TyCtxt<'tcx>, fld_r: &'a mut F) -> RegionReplacer<'a, 'tcx>
+        where F : FnMut(ty::BoundRegion) -> ty::Region
+    {
+        RegionReplacer {
+            tcx: tcx,
+            current_depth: 1,
+            fld_r: fld_r,
+            map: FnvHashMap()
+        }
+    }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for RegionReplacer<'a, 'tcx>
+{
+    fn tcx(&self) -> &TyCtxt<'tcx> { self.tcx }
+
+    fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
+        self.current_depth += 1;
+        let t = t.super_fold_with(self);
+        self.current_depth -= 1;
+        t
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        if !t.has_regions_escaping_depth(self.current_depth-1) {
+            return t;
+        }
+
+        t.super_fold_with(self)
+    }
+
+    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+        match r {
+            ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => {
+                debug!("RegionReplacer.fold_region({:?}) folding region (current_depth={})",
+                       r, self.current_depth);
+                let fld_r = &mut self.fld_r;
+                let region = *self.map.entry(br).or_insert_with(|| fld_r(br));
+                if let ty::ReLateBound(debruijn1, br) = region {
+                    // If the callback returns a late-bound region,
+                    // that region should always use depth 1. Then we
+                    // adjust it to the correct depth.
+                    assert_eq!(debruijn1.depth, 1);
+                    ty::ReLateBound(debruijn, br)
+                } else {
+                    region
+                }
+            }
+            r => r
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Region eraser
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Returns an equivalent value with all free regions removed (note
+    /// that late-bound regions remain, because they are important for
+    /// subtyping, but they are anonymized and normalized as well)..
+    pub fn erase_regions<T>(&self, value: &T) -> T
+        where T : TypeFoldable<'tcx>
+    {
+        let value1 = value.fold_with(&mut RegionEraser(self));
+        debug!("erase_regions({:?}) = {:?}",
+               value, value1);
+        return value1;
+
+        struct RegionEraser<'a, 'tcx: 'a>(&'a TyCtxt<'tcx>);
+
+        impl<'a, 'tcx> TypeFolder<'tcx> for RegionEraser<'a, 'tcx> {
+            fn tcx(&self) -> &TyCtxt<'tcx> { self.0 }
+
+            fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+                match self.tcx().normalized_cache.borrow().get(&ty).cloned() {
+                    None => {}
+                    Some(u) => return u
+                }
+
+                let t_norm = ty.super_fold_with(self);
+                self.tcx().normalized_cache.borrow_mut().insert(ty, t_norm);
+                return t_norm;
+            }
+
+            fn fold_binder<T>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T>
+                where T : TypeFoldable<'tcx>
+            {
+                let u = self.tcx().anonymize_late_bound_regions(t);
+                u.super_fold_with(self)
+            }
+
+            fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+                // because late-bound regions affect subtyping, we can't
+                // erase the bound/free distinction, but we can replace
+                // all free regions with 'static.
+                //
+                // Note that we *CAN* replace early-bound regions -- the
+                // type system never "sees" those, they get substituted
+                // away. In trans, they will always be erased to 'static
+                // whenever a substitution occurs.
+                match r {
+                    ty::ReLateBound(..) => r,
+                    _ => ty::ReStatic
+                }
+            }
+
+            fn fold_substs(&mut self,
+                           substs: &subst::Substs<'tcx>)
+                           -> subst::Substs<'tcx> {
+                subst::Substs { regions: substs.regions.fold_with(self),
+                                types: substs.types.fold_with(self) }
+            }
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Region shifter
+//
+// Shifts the De Bruijn indices on all escaping bound regions by a
+// fixed amount. Useful in substitution or when otherwise introducing
+// a binding level that is not intended to capture the existing bound
+// regions. See comment on `shift_regions_through_binders` method in
+// `subst.rs` for more details.
+
+pub fn shift_region(region: ty::Region, amount: u32) -> ty::Region {
+    match region {
+        ty::ReLateBound(debruijn, br) => {
+            ty::ReLateBound(debruijn.shifted(amount), br)
+        }
+        _ => {
+            region
+        }
+    }
+}
+
+pub fn shift_regions<'tcx, T:TypeFoldable<'tcx>>(tcx: &TyCtxt<'tcx>,
+                                                 amount: u32, value: &T) -> T {
+    debug!("shift_regions(value={:?}, amount={})",
+           value, amount);
+
+    value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| {
+        shift_region(region, amount)
+    }))
+}
+
+/// An "escaping region" is a bound region whose binder is not part of `t`.
+///
+/// So, for example, consider a type like the following, which has two binders:
+///
+///    for<'a> fn(x: for<'b> fn(&'a isize, &'b isize))
+///    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
+///                  ^~~~~~~~~~~~~~~~~~~~~~~~~~~~  inner scope
+///
+/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the
+/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
+/// fn type*, that type has an escaping region: `'a`.
+///
+/// Note that what I'm calling an "escaping region" is often just called a "free region". However,
+/// we already use the term "free region". It refers to the regions that we use to represent bound
+/// regions on a fn definition while we are typechecking its body.
+///
+/// To clarify, conceptually there is no particular difference between an "escaping" region and a
+/// "free" region. However, there is a big difference in practice. Basically, when "entering" a
+/// binding level, one is generally required to do some sort of processing to a bound region, such
+/// as replacing it with a fresh/skolemized region, or making an entry in the environment to
+/// represent the scope to which it is attached, etc. An escaping region represents a bound region
+/// for which this processing has not yet been done.
+struct HasEscapingRegionsVisitor {
+    depth: u32,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for HasEscapingRegionsVisitor {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+        self.depth += 1;
+        let result = t.super_visit_with(self);
+        self.depth -= 1;
+        result
+    }
+
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+        t.region_depth > self.depth
+    }
+
+    fn visit_region(&mut self, r: ty::Region) -> bool {
+        r.escapes_depth(self.depth)
+    }
+}
+
+struct HasTypeFlagsVisitor {
+    flags: ty::TypeFlags,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
+    fn visit_ty(&mut self, t: Ty) -> bool {
+        t.flags.get().intersects(self.flags)
+    }
+
+    fn visit_region(&mut self, r: ty::Region) -> bool {
+        if self.flags.intersects(ty::TypeFlags::HAS_LOCAL_NAMES) {
+            // does this represent a region that cannot be named
+            // in a global way? used in fulfillment caching.
+            match r {
+                ty::ReStatic | ty::ReEmpty => {}
+                _ => return true,
+            }
+        }
+        if self.flags.intersects(ty::TypeFlags::HAS_RE_INFER) {
+            match r {
+                ty::ReVar(_) | ty::ReSkolemized(..) => { return true }
+                _ => {}
+            }
+        }
+        false
+    }
+}
diff --git a/src/librustc/ty/item_path.rs b/src/librustc/ty/item_path.rs
new file mode 100644 (file)
index 0000000..5c1e19a
--- /dev/null
@@ -0,0 +1,364 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir::map::DefPathData;
+use middle::cstore::LOCAL_CRATE;
+use hir::def_id::{DefId, CRATE_DEF_INDEX};
+use ty::{self, Ty, TyCtxt};
+use syntax::ast;
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Returns a string identifying this def-id. This string is
+    /// suitable for user output. It is relative to the current crate
+    /// root.
+    pub fn item_path_str(&self, def_id: DefId) -> String {
+        let mut buffer = LocalPathBuffer::new(RootMode::Local);
+        self.push_item_path(&mut buffer, def_id);
+        buffer.into_string()
+    }
+
+    /// Returns a string identifying this local node-id.
+    pub fn node_path_str(&self, id: ast::NodeId) -> String {
+        self.item_path_str(self.map.local_def_id(id))
+    }
+
+    /// Returns a string identifying this def-id. This string is
+    /// suitable for user output. It always begins with a crate identifier.
+    pub fn absolute_item_path_str(&self, def_id: DefId) -> String {
+        let mut buffer = LocalPathBuffer::new(RootMode::Absolute);
+        self.push_item_path(&mut buffer, def_id);
+        buffer.into_string()
+    }
+
+    /// Returns the "path" to a particular crate. This can proceed in
+    /// various ways, depending on the `root_mode` of the `buffer`.
+    /// (See `RootMode` enum for more details.)
+    pub fn push_krate_path<T>(&self, buffer: &mut T, cnum: ast::CrateNum)
+        where T: ItemPathBuffer
+    {
+        match *buffer.root_mode() {
+            RootMode::Local => {
+                // In local mode, when we encounter a crate other than
+                // LOCAL_CRATE, execution proceeds in one of two ways:
+                //
+                // 1. for a direct dependency, where user added an
+                //    `extern crate` manually, we put the `extern
+                //    crate` as the parent. So you wind up with
+                //    something relative to the current crate.
+                // 2. for an indirect crate, where there is no extern
+                //    crate, we just prepend the crate name.
+                //
+                // Returns `None` for the local crate.
+                if cnum != LOCAL_CRATE {
+                    let opt_extern_crate = self.sess.cstore.extern_crate(cnum);
+                    let opt_extern_crate = opt_extern_crate.and_then(|extern_crate| {
+                        if extern_crate.direct {
+                            Some(extern_crate.def_id)
+                        } else {
+                            None
+                        }
+                    });
+                    if let Some(extern_crate_def_id) = opt_extern_crate {
+                        self.push_item_path(buffer, extern_crate_def_id);
+                    } else {
+                        buffer.push(&self.crate_name(cnum));
+                    }
+                }
+            }
+            RootMode::Absolute => {
+                // In absolute mode, just write the crate name
+                // unconditionally.
+                buffer.push(&self.crate_name(cnum));
+            }
+        }
+    }
+
+    /// If possible, this pushes a global path resolving to `external_def_id` that is visible
+    /// from at least one local module and returns true. If the crate defining `external_def_id` is
+    /// declared with an `extern crate`, the path is guarenteed to use the `extern crate`.
+    pub fn try_push_visible_item_path<T>(&self, buffer: &mut T, external_def_id: DefId) -> bool
+        where T: ItemPathBuffer
+    {
+        let visible_parent_map = self.sess.cstore.visible_parent_map();
+
+        let (mut cur_def, mut cur_path) = (external_def_id, Vec::<ast::Name>::new());
+        loop {
+            // If `cur_def` is a direct or injected extern crate, push the path to the crate
+            // followed by the path to the item within the crate and return.
+            if cur_def.index == CRATE_DEF_INDEX {
+                match self.sess.cstore.extern_crate(cur_def.krate) {
+                    Some(extern_crate) if extern_crate.direct => {
+                        self.push_item_path(buffer, extern_crate.def_id);
+                        cur_path.iter().rev().map(|segment| buffer.push(&segment.as_str())).count();
+                        return true;
+                    }
+                    None => {
+                        buffer.push(&self.crate_name(cur_def.krate));
+                        cur_path.iter().rev().map(|segment| buffer.push(&segment.as_str())).count();
+                        return true;
+                    }
+                    _ => {},
+                }
+            }
+
+            cur_path.push(self.sess.cstore.item_name(cur_def));
+            match visible_parent_map.get(&cur_def) {
+                Some(&def) => cur_def = def,
+                None => return false,
+            };
+        }
+    }
+
+    pub fn push_item_path<T>(&self, buffer: &mut T, def_id: DefId)
+        where T: ItemPathBuffer
+    {
+        match *buffer.root_mode() {
+            RootMode::Local if !def_id.is_local() =>
+                if self.try_push_visible_item_path(buffer, def_id) { return },
+            _ => {}
+        }
+
+        let key = self.def_key(def_id);
+        match key.disambiguated_data.data {
+            DefPathData::CrateRoot => {
+                assert!(key.parent.is_none());
+                self.push_krate_path(buffer, def_id.krate);
+            }
+
+            DefPathData::InlinedRoot(ref root_path) => {
+                assert!(key.parent.is_none());
+                self.push_item_path(buffer, root_path.def_id);
+            }
+
+            DefPathData::Impl => {
+                self.push_impl_path(buffer, def_id);
+            }
+
+            // Unclear if there is any value in distinguishing these.
+            // Probably eventually (and maybe we would even want
+            // finer-grained distinctions, e.g. between enum/struct).
+            data @ DefPathData::Misc |
+            data @ DefPathData::TypeNs(..) |
+            data @ DefPathData::ValueNs(..) |
+            data @ DefPathData::TypeParam(..) |
+            data @ DefPathData::LifetimeDef(..) |
+            data @ DefPathData::EnumVariant(..) |
+            data @ DefPathData::Field(..) |
+            data @ DefPathData::StructCtor |
+            data @ DefPathData::Initializer |
+            data @ DefPathData::MacroDef(..) |
+            data @ DefPathData::ClosureExpr |
+            data @ DefPathData::Binding(..) => {
+                let parent_def_id = self.parent_def_id(def_id).unwrap();
+                self.push_item_path(buffer, parent_def_id);
+                buffer.push(&data.as_interned_str());
+            }
+        }
+    }
+
+    fn push_impl_path<T>(&self,
+                         buffer: &mut T,
+                         impl_def_id: DefId)
+        where T: ItemPathBuffer
+    {
+        let parent_def_id = self.parent_def_id(impl_def_id).unwrap();
+
+        let use_types = if !impl_def_id.is_local() {
+            // always have full types available for extern crates
+            true
+        } else {
+            // for local crates, check whether type info is
+            // available; typeck might not have completed yet
+            self.impl_trait_refs.borrow().contains_key(&impl_def_id)
+        };
+
+        if !use_types {
+            return self.push_impl_path_fallback(buffer, impl_def_id);
+        }
+
+        // Decide whether to print the parent path for the impl.
+        // Logically, since impls are global, it's never needed, but
+        // users may find it useful. Currently, we omit the parent if
+        // the impl is either in the same module as the self-type or
+        // as the trait.
+        let self_ty = self.lookup_item_type(impl_def_id).ty;
+        let in_self_mod = match self.characteristic_def_id_of_type(self_ty) {
+            None => false,
+            Some(ty_def_id) => self.parent_def_id(ty_def_id) == Some(parent_def_id),
+        };
+
+        let impl_trait_ref = self.impl_trait_ref(impl_def_id);
+        let in_trait_mod = match impl_trait_ref {
+            None => false,
+            Some(trait_ref) => self.parent_def_id(trait_ref.def_id) == Some(parent_def_id),
+        };
+
+        if !in_self_mod && !in_trait_mod {
+            // If the impl is not co-located with either self-type or
+            // trait-type, then fallback to a format that identifies
+            // the module more clearly.
+            self.push_item_path(buffer, parent_def_id);
+            if let Some(trait_ref) = impl_trait_ref {
+                buffer.push(&format!("<impl {} for {}>", trait_ref, self_ty));
+            } else {
+                buffer.push(&format!("<impl {}>", self_ty));
+            }
+            return;
+        }
+
+        // Otherwise, try to give a good form that would be valid language
+        // syntax. Preferably using associated item notation.
+
+        if let Some(trait_ref) = impl_trait_ref {
+            // Trait impls.
+            buffer.push(&format!("<{} as {}>",
+                                 self_ty,
+                                 trait_ref));
+            return;
+        }
+
+        // Inherent impls. Try to print `Foo::bar` for an inherent
+        // impl on `Foo`, but fallback to `<Foo>::bar` if self-type is
+        // anything other than a simple path.
+        match self_ty.sty {
+            ty::TyStruct(adt_def, substs) |
+            ty::TyEnum(adt_def, substs) => {
+                if substs.types.is_empty() { // ignore regions
+                    self.push_item_path(buffer, adt_def.did);
+                } else {
+                    buffer.push(&format!("<{}>", self_ty));
+                }
+            }
+
+            ty::TyBool |
+            ty::TyChar |
+            ty::TyInt(_) |
+            ty::TyUint(_) |
+            ty::TyFloat(_) |
+            ty::TyStr => {
+                buffer.push(&format!("{}", self_ty));
+            }
+
+            _ => {
+                buffer.push(&format!("<{}>", self_ty));
+            }
+        }
+    }
+
+    fn push_impl_path_fallback<T>(&self,
+                                  buffer: &mut T,
+                                  impl_def_id: DefId)
+        where T: ItemPathBuffer
+    {
+        // If no type info is available, fall back to
+        // pretty printing some span information. This should
+        // only occur very early in the compiler pipeline.
+        let parent_def_id = self.parent_def_id(impl_def_id).unwrap();
+        self.push_item_path(buffer, parent_def_id);
+        let node_id = self.map.as_local_node_id(impl_def_id).unwrap();
+        let item = self.map.expect_item(node_id);
+        let span_str = self.sess.codemap().span_to_string(item.span);
+        buffer.push(&format!("<impl at {}>", span_str));
+    }
+
+    /// As a heuristic, when we see an impl, if we see that the
+    /// 'self-type' is a type defined in the same module as the impl,
+    /// we can omit including the path to the impl itself. This
+    /// function tries to find a "characteristic def-id" for a
+    /// type. It's just a heuristic so it makes some questionable
+    /// decisions and we may want to adjust it later.
+    fn characteristic_def_id_of_type(&self, ty: Ty<'tcx>) -> Option<DefId> {
+        match ty.sty {
+            ty::TyStruct(adt_def, _) |
+            ty::TyEnum(adt_def, _) =>
+                Some(adt_def.did),
+
+            ty::TyTrait(ref data) =>
+                Some(data.principal_def_id()),
+
+            ty::TyBox(subty) =>
+                self.characteristic_def_id_of_type(subty),
+
+            ty::TyRawPtr(mt) |
+            ty::TyRef(_, mt) =>
+                self.characteristic_def_id_of_type(mt.ty),
+
+            ty::TyTuple(ref tys) =>
+                tys.iter()
+                   .filter_map(|ty| self.characteristic_def_id_of_type(ty))
+                   .next(),
+
+            _ =>
+                None
+        }
+    }
+
+    /// Returns the def-id of `def_id`'s parent in the def tree. If
+    /// this returns `None`, then `def_id` represents a crate root or
+    /// inlined root.
+    fn parent_def_id(&self, def_id: DefId) -> Option<DefId> {
+        let key = self.def_key(def_id);
+        key.parent.map(|index| DefId { krate: def_id.krate, index: index })
+    }
+}
+
+/// Unifying Trait for different kinds of item paths we might
+/// construct. The basic interface is that components get pushed: the
+/// instance can also customize how we handle the root of a crate.
+pub trait ItemPathBuffer {
+    fn root_mode(&self) -> &RootMode;
+    fn push(&mut self, text: &str);
+}
+
+#[derive(Debug)]
+pub enum RootMode {
+    /// Try to make a path relative to the local crate.  In
+    /// particular, local paths have no prefix, and if the path comes
+    /// from an extern crate, start with the path to the `extern
+    /// crate` declaration.
+    Local,
+
+    /// Always prepend the crate name to the path, forming an absolute
+    /// path from within a given set of crates.
+    Absolute,
+}
+
+#[derive(Debug)]
+struct LocalPathBuffer {
+    root_mode: RootMode,
+    str: String,
+}
+
+impl LocalPathBuffer {
+    fn new(root_mode: RootMode) -> LocalPathBuffer {
+        LocalPathBuffer {
+            root_mode: root_mode,
+            str: String::new()
+        }
+    }
+
+    fn into_string(self) -> String {
+        self.str
+    }
+
+}
+
+impl ItemPathBuffer for LocalPathBuffer {
+    fn root_mode(&self) -> &RootMode {
+        &self.root_mode
+    }
+
+    fn push(&mut self, text: &str) {
+        if !self.str.is_empty() {
+            self.str.push_str("::");
+        }
+        self.str.push_str(text);
+    }
+}
diff --git a/src/librustc/ty/ivar.rs b/src/librustc/ty/ivar.rs
new file mode 100644 (file)
index 0000000..88327ab
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use dep_graph::DepNode;
+use hir::def_id::DefId;
+use ty::{Ty, TyS};
+use ty::tls;
+
+use rustc_data_structures::ivar;
+
+use std::fmt;
+use std::marker::PhantomData;
+use core::nonzero::NonZero;
+
+/// An IVar that contains a Ty. 'lt is a (reverse-variant) upper bound
+/// on the lifetime of the IVar. This is required because of variance
+/// problems: the IVar needs to be variant with respect to 'tcx (so
+/// it can be referred to from Ty) but can only be modified if its
+/// lifetime is exactly 'tcx.
+///
+/// Safety invariants:
+///     (A) self.0, if fulfilled, is a valid Ty<'tcx>
+///     (B) no aliases to this value with a 'tcx longer than this
+///         value's 'lt exist
+///
+/// Dependency tracking: each ivar does not know what node in the
+/// dependency graph it is associated with, so when you get/fulfill
+/// you must supply a `DepNode` id. This should always be the same id!
+///
+/// NonZero is used rather than Unique because Unique isn't Copy.
+pub struct TyIVar<'tcx, 'lt: 'tcx>(ivar::Ivar<NonZero<*const TyS<'static>>>,
+                                   PhantomData<fn(TyS<'lt>)->TyS<'tcx>>);
+
+impl<'tcx, 'lt> TyIVar<'tcx, 'lt> {
+    #[inline]
+    pub fn new() -> Self {
+        // Invariant (A) satisfied because the IVar is unfulfilled
+        // Invariant (B) because 'lt : 'tcx
+        TyIVar(ivar::Ivar::new(), PhantomData)
+    }
+
+    #[inline]
+    pub fn get(&self, dep_node: DepNode<DefId>) -> Option<Ty<'tcx>> {
+        tls::with(|tcx| tcx.dep_graph.read(dep_node));
+        self.untracked_get()
+    }
+
+    #[inline]
+    fn untracked_get(&self) -> Option<Ty<'tcx>> {
+        match self.0.get() {
+            None => None,
+            // valid because of invariant (A)
+            Some(v) => Some(unsafe { &*(*v as *const TyS<'tcx>) })
+        }
+    }
+
+    #[inline]
+    pub fn unwrap(&self, dep_node: DepNode<DefId>) -> Ty<'tcx> {
+        self.get(dep_node).unwrap()
+    }
+
+    pub fn fulfill(&self, dep_node: DepNode<DefId>, value: Ty<'lt>) {
+        tls::with(|tcx| tcx.dep_graph.write(dep_node));
+
+        // Invariant (A) is fulfilled, because by (B), every alias
+        // of this has a 'tcx longer than 'lt.
+        let value: *const TyS<'lt> = value;
+        // FIXME(27214): unneeded [as *const ()]
+        let value = value as *const () as *const TyS<'static>;
+        self.0.fulfill(unsafe { NonZero::new(value) })
+    }
+}
+
+impl<'tcx, 'lt> fmt::Debug for TyIVar<'tcx, 'lt> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.untracked_get() {
+            Some(val) => write!(f, "TyIVar({:?})", val),
+            None => f.write_str("TyIVar(<unfulfilled>)")
+        }
+    }
+}
diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs
new file mode 100644 (file)
index 0000000..3ea691b
--- /dev/null
@@ -0,0 +1,1336 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use self::Integer::*;
+pub use self::Layout::*;
+pub use self::Primitive::*;
+
+use infer::{InferCtxt, drain_fulfillment_cx_or_panic};
+use session::Session;
+use traits;
+use ty::{self, Ty, TyCtxt, TypeFoldable};
+
+use syntax::ast::{FloatTy, IntTy, UintTy};
+use syntax::attr;
+use syntax::codemap::DUMMY_SP;
+
+use std::cmp;
+use std::fmt;
+use std::i64;
+
+/// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
+/// for a target, which contains everything needed to compute layouts.
+pub struct TargetDataLayout {
+    pub endian: Endian,
+    pub i1_align: Align,
+    pub i8_align: Align,
+    pub i16_align: Align,
+    pub i32_align: Align,
+    pub i64_align: Align,
+    pub f32_align: Align,
+    pub f64_align: Align,
+    pub pointer_size: Size,
+    pub pointer_align: Align,
+    pub aggregate_align: Align,
+
+    /// Alignments for vector types.
+    pub vector_align: Vec<(Size, Align)>
+}
+
+impl Default for TargetDataLayout {
+    fn default() -> TargetDataLayout {
+        TargetDataLayout {
+            endian: Endian::Big,
+            i1_align: Align::from_bits(8, 8).unwrap(),
+            i8_align: Align::from_bits(8, 8).unwrap(),
+            i16_align: Align::from_bits(16, 16).unwrap(),
+            i32_align: Align::from_bits(32, 32).unwrap(),
+            i64_align: Align::from_bits(32, 64).unwrap(),
+            f32_align: Align::from_bits(32, 32).unwrap(),
+            f64_align: Align::from_bits(64, 64).unwrap(),
+            pointer_size: Size::from_bits(64),
+            pointer_align: Align::from_bits(64, 64).unwrap(),
+            aggregate_align: Align::from_bits(0, 64).unwrap(),
+            vector_align: vec![
+                (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
+                (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
+            ]
+        }
+    }
+}
+
+impl TargetDataLayout {
+    pub fn parse(sess: &Session) -> TargetDataLayout {
+        // Parse a bit count from a string.
+        let parse_bits = |s: &str, kind: &str, cause: &str| {
+            s.parse::<u64>().unwrap_or_else(|err| {
+                sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
+                                  kind, s, cause, err));
+                0
+            })
+        };
+
+        // Parse a size string.
+        let size = |s: &str, cause: &str| {
+            Size::from_bits(parse_bits(s, "size", cause))
+        };
+
+        // Parse an alignment string.
+        let align = |s: &[&str], cause: &str| {
+            if s.is_empty() {
+                sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
+            }
+            let abi = parse_bits(s[0], "alignment", cause);
+            let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
+            Align::from_bits(abi, pref).unwrap_or_else(|err| {
+                sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
+                                  cause, err));
+                Align::from_bits(8, 8).unwrap()
+            })
+        };
+
+        let mut dl = TargetDataLayout::default();
+        for spec in sess.target.target.data_layout.split("-") {
+            match &spec.split(":").collect::<Vec<_>>()[..] {
+                ["e"] => dl.endian = Endian::Little,
+                ["E"] => dl.endian = Endian::Big,
+                ["a", a..] => dl.aggregate_align = align(a, "a"),
+                ["f32", a..] => dl.f32_align = align(a, "f32"),
+                ["f64", a..] => dl.f64_align = align(a, "f64"),
+                [p @ "p", s, a..] | [p @ "p0", s, a..] => {
+                    dl.pointer_size = size(s, p);
+                    dl.pointer_align = align(a, p);
+                }
+                [s, a..] if s.starts_with("i") => {
+                    let ty_align = match s[1..].parse::<u64>() {
+                        Ok(1) => &mut dl.i8_align,
+                        Ok(8) => &mut dl.i8_align,
+                        Ok(16) => &mut dl.i16_align,
+                        Ok(32) => &mut dl.i32_align,
+                        Ok(64) => &mut dl.i64_align,
+                        Ok(_) => continue,
+                        Err(_) => {
+                            size(&s[1..], "i"); // For the user error.
+                            continue;
+                        }
+                    };
+                    *ty_align = align(a, s);
+                }
+                [s, a..] if s.starts_with("v") => {
+                    let v_size = size(&s[1..], "v");
+                    let a = align(a, s);
+                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
+                        v.1 = a;
+                        continue;
+                    }
+                    // No existing entry, add a new one.
+                    dl.vector_align.push((v_size, a));
+                }
+                _ => {} // Ignore everything else.
+            }
+        }
+
+        // Perform consistency checks against the Target information.
+        let endian_str = match dl.endian {
+            Endian::Little => "little",
+            Endian::Big => "big"
+        };
+        if endian_str != sess.target.target.target_endian {
+            sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
+                               architecture is {}-endian, while \"target-endian\" is `{}`",
+                              endian_str, sess.target.target.target_endian));
+        }
+
+        if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
+            sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
+                               pointers are {}-bit, while \"target-pointer-width\" is `{}`",
+                              dl.pointer_size.bits(), sess.target.target.target_pointer_width));
+        }
+
+        dl
+    }
+
+    /// Return exclusive upper bound on object size.
+    ///
+    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
+    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
+    /// index every address within an object along with one byte past the end, along with allowing
+    /// `isize` to store the difference between any two pointers into an object.
+    ///
+    /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
+    /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
+    /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
+    /// address space on 64-bit ARMv8 and x86_64.
+    pub fn obj_size_bound(&self) -> u64 {
+        match self.pointer_size.bits() {
+            32 => 1 << 31,
+            64 => 1 << 47,
+            bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
+        }
+    }
+
+    pub fn ptr_sized_integer(&self) -> Integer {
+        match self.pointer_size.bits() {
+            32 => I32,
+            64 => I64,
+            bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
+        }
+    }
+}
+
+/// Endianness of the target, which must match cfg(target-endian).
+#[derive(Copy, Clone)]
+pub enum Endian {
+    Little,
+    Big
+}
+
+/// Size of a type in bytes.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+pub struct Size {
+    raw: u64
+}
+
+impl Size {
+    pub fn from_bits(bits: u64) -> Size {
+        Size::from_bytes((bits + 7) / 8)
+    }
+
+    pub fn from_bytes(bytes: u64) -> Size {
+        if bytes >= (1 << 61) {
+            bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
+        }
+        Size {
+            raw: bytes
+        }
+    }
+
+    pub fn bytes(self) -> u64 {
+        self.raw
+    }
+
+    pub fn bits(self) -> u64 {
+        self.bytes() * 8
+    }
+
+    pub fn abi_align(self, align: Align) -> Size {
+        let mask = align.abi() - 1;
+        Size::from_bytes((self.bytes() + mask) & !mask)
+    }
+
+    pub fn checked_add(self, offset: Size, dl: &TargetDataLayout) -> Option<Size> {
+        // Each Size is less than dl.obj_size_bound(), so the sum is
+        // also less than 1 << 62 (and therefore can't overflow).
+        let bytes = self.bytes() + offset.bytes();
+
+        if bytes < dl.obj_size_bound() {
+            Some(Size::from_bytes(bytes))
+        } else {
+            None
+        }
+    }
+
+    pub fn checked_mul(self, count: u64, dl: &TargetDataLayout) -> Option<Size> {
+        // Each Size is less than dl.obj_size_bound(), so the sum is
+        // also less than 1 << 62 (and therefore can't overflow).
+        match self.bytes().checked_mul(count) {
+            Some(bytes) if bytes < dl.obj_size_bound() => {
+                Some(Size::from_bytes(bytes))
+            }
+            _ => None
+        }
+    }
+}
+
+/// Alignment of a type in bytes, both ABI-mandated and preferred.
+/// Since alignments are always powers of 2, we can pack both in one byte,
+/// giving each a nibble (4 bits) for a maximum alignment of 2^15 = 32768.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct Align {
+    raw: u8
+}
+
+impl Align {
+    pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
+        Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
+    }
+
+    pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
+        let pack = |align: u64| {
+            // Treat an alignment of 0 bytes like 1-byte alignment.
+            if align == 0 {
+                return Ok(0);
+            }
+
+            let mut bytes = align;
+            let mut pow: u8 = 0;
+            while (bytes & 1) == 0 {
+                pow += 1;
+                bytes >>= 1;
+            }
+            if bytes != 1 {
+                Err(format!("`{}` is not a power of 2", align))
+            } else if pow > 0x0f {
+                Err(format!("`{}` is too large", align))
+            } else {
+                Ok(pow)
+            }
+        };
+
+        Ok(Align {
+            raw: pack(abi)? | (pack(pref)? << 4)
+        })
+    }
+
+    pub fn abi(self) -> u64 {
+        1 << (self.raw & 0xf)
+    }
+
+    pub fn pref(self) -> u64 {
+        1 << (self.raw >> 4)
+    }
+
+    pub fn min(self, other: Align) -> Align {
+        let abi = cmp::min(self.raw & 0x0f, other.raw & 0x0f);
+        let pref = cmp::min(self.raw & 0xf0, other.raw & 0xf0);
+        Align {
+            raw: abi | pref
+        }
+    }
+
+    pub fn max(self, other: Align) -> Align {
+        let abi = cmp::max(self.raw & 0x0f, other.raw & 0x0f);
+        let pref = cmp::max(self.raw & 0xf0, other.raw & 0xf0);
+        Align {
+            raw: abi | pref
+        }
+    }
+}
+
+/// Integers, also used for enum discriminants.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+pub enum Integer {
+    I1,
+    I8,
+    I16,
+    I32,
+    I64
+}
+
+impl Integer {
+    /// Find the smallest Integer type which can represent the signed value.
+    pub fn fit_signed(x: i64) -> Integer {
+        match x {
+            -0x0000_0001...0x0000_0000 => I1,
+            -0x0000_0080...0x0000_007f => I8,
+            -0x0000_8000...0x0000_7fff => I16,
+            -0x8000_0000...0x7fff_ffff => I32,
+            _ => I64
+        }
+    }
+
+    /// Find the smallest Integer type which can represent the unsigned value.
+    pub fn fit_unsigned(x: u64) -> Integer {
+        match x {
+            0...0x0000_0001 => I1,
+            0...0x0000_00ff => I8,
+            0...0x0000_ffff => I16,
+            0...0xffff_ffff => I32,
+            _ => I64
+        }
+    }
+
+    /// Get the Integer type from an attr::IntType.
+    pub fn from_attr(dl: &TargetDataLayout, ity: attr::IntType) -> Integer {
+        match ity {
+            attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
+            attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
+            attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
+            attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
+            attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
+                dl.ptr_sized_integer()
+            }
+        }
+    }
+
+    /// Find the appropriate Integer type and signedness for the given
+    /// signed discriminant range and #[repr] attribute.
+    /// N.B.: u64 values above i64::MAX will be treated as signed, but
+    /// that shouldn't affect anything, other than maybe debuginfo.
+    pub fn repr_discr(tcx: &TyCtxt, hint: attr::ReprAttr, min: i64, max: i64)
+                      -> (Integer, bool) {
+        // Theoretically, negative values could be larger in unsigned representation
+        // than the unsigned representation of the signed minimum. However, if there
+        // are any negative values, the only valid unsigned representation is u64
+        // which can fit all i64 values, so the result remains unaffected.
+        let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
+        let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
+
+        let at_least = match hint {
+            attr::ReprInt(span, ity) => {
+                let discr = Integer::from_attr(&tcx.data_layout, ity);
+                let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
+                if discr < fit {
+                    span_bug!(span, "representation hint insufficient for discriminant range")
+                }
+                return (discr, ity.is_signed());
+            }
+            attr::ReprExtern => {
+                match &tcx.sess.target.target.arch[..] {
+                    // WARNING: the ARM EABI has two variants; the one corresponding
+                    // to `at_least == I32` appears to be used on Linux and NetBSD,
+                    // but some systems may use the variant corresponding to no
+                    // lower bound.  However, we don't run on those yet...?
+                    "arm" => I32,
+                    _ => I32,
+                }
+            }
+            attr::ReprAny => I8,
+            attr::ReprPacked => {
+                bug!("Integer::repr_discr: found #[repr(packed)] on an enum");
+            }
+            attr::ReprSimd => {
+                bug!("Integer::repr_discr: found #[repr(simd)] on an enum");
+            }
+        };
+
+        // If there are no negative values, we can use the unsigned fit.
+        if min >= 0 {
+            (cmp::max(unsigned_fit, at_least), false)
+        } else {
+            (cmp::max(signed_fit, at_least), true)
+        }
+    }
+}
+
+/// Fundamental unit of memory access and layout.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub enum Primitive {
+    Int(Integer),
+    F32,
+    F64,
+    Pointer
+}
+
+impl Primitive {
+    pub fn size(self, dl: &TargetDataLayout) -> Size {
+        match self {
+            Int(I1) | Int(I8) => Size::from_bits(8),
+            Int(I16) => Size::from_bits(16),
+            Int(I32) | F32 => Size::from_bits(32),
+            Int(I64) | F64 => Size::from_bits(64),
+            Pointer => dl.pointer_size
+        }
+    }
+
+    pub fn align(self, dl: &TargetDataLayout) -> Align {
+        match self {
+            Int(I1) => dl.i1_align,
+            Int(I8) => dl.i8_align,
+            Int(I16) => dl.i16_align,
+            Int(I32) => dl.i32_align,
+            Int(I64) => dl.i64_align,
+            F32 => dl.f32_align,
+            F64 => dl.f64_align,
+            Pointer => dl.pointer_align
+        }
+    }
+}
+
+/// Path through fields of nested structures.
+// FIXME(eddyb) use small vector optimization for the common case.
+pub type FieldPath = Vec<u32>;
+
+/// A structure, a product type in ADT terms.
+#[derive(PartialEq, Eq, Hash, Debug)]
+pub struct Struct {
+    pub align: Align,
+
+    /// If true, no alignment padding is used.
+    pub packed: bool,
+
+    /// If true, the size is exact, otherwise it's only a lower bound.
+    pub sized: bool,
+
+    /// Offsets for the first byte after each field.
+    /// That is, field_offset(i) = offset_after_field[i - 1] and the
+    /// whole structure's size is the last offset, excluding padding.
+    // FIXME(eddyb) use small vector optimization for the common case.
+    pub offset_after_field: Vec<Size>
+}
+
+impl Struct {
+    pub fn new(dl: &TargetDataLayout, packed: bool) -> Struct {
+        Struct {
+            align: if packed { dl.i8_align } else { dl.aggregate_align },
+            packed: packed,
+            sized: true,
+            offset_after_field: vec![]
+        }
+    }
+
+    /// Extend the Struct with more fields.
+    pub fn extend<'a, 'tcx, I>(&mut self, dl: &TargetDataLayout,
+                               fields: I,
+                               scapegoat: Ty<'tcx>)
+                               -> Result<(), LayoutError<'tcx>>
+    where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
+        self.offset_after_field.reserve(fields.size_hint().0);
+
+        for field in fields {
+            if !self.sized {
+                bug!("Struct::compute: field #{} of `{}` comes after unsized field",
+                     self.offset_after_field.len(), scapegoat);
+            }
+
+            let field = field?;
+            if field.is_unsized() {
+                self.sized = false;
+            }
+
+            // Invariant: offset < dl.obj_size_bound() <= 1<<61
+            let mut offset = if !self.packed {
+                let align = field.align(dl);
+                self.align = self.align.max(align);
+                self.offset_after_field.last_mut().map_or(Size::from_bytes(0), |last| {
+                    *last = last.abi_align(align);
+                    *last
+                })
+            } else {
+                self.offset_after_field.last().map_or(Size::from_bytes(0), |&last| last)
+            };
+
+            offset = offset.checked_add(field.size(dl), dl)
+                           .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
+
+            self.offset_after_field.push(offset);
+        }
+
+        Ok(())
+    }
+
+    /// Get the size without trailing alignment padding.
+    pub fn min_size(&self) -> Size {
+        self.offset_after_field.last().map_or(Size::from_bytes(0), |&last| last)
+    }
+
+    /// Get the size with trailing aligment padding.
+    pub fn stride(&self) -> Size {
+        self.min_size().abi_align(self.align)
+    }
+
+    /// Determine whether a structure would be zero-sized, given its fields.
+    pub fn would_be_zero_sized<'a, 'tcx, I>(dl: &TargetDataLayout, fields: I)
+                                            -> Result<bool, LayoutError<'tcx>>
+    where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
+        for field in fields {
+            let field = field?;
+            if field.is_unsized() || field.size(dl).bytes() > 0 {
+                return Ok(false);
+            }
+        }
+        Ok(true)
+    }
+
+    /// Find the path leading to a non-zero leaf field, starting from
+    /// the given type and recursing through aggregates.
+    // FIXME(eddyb) track value ranges and traverse already optimized enums.
+    pub fn non_zero_field_in_type<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                            ty: Ty<'tcx>)
+                                            -> Result<Option<FieldPath>, LayoutError<'tcx>> {
+        let tcx = infcx.tcx;
+        match (ty.layout(infcx)?, &ty.sty) {
+            (&Scalar { non_zero: true, .. }, _) => Ok(Some(vec![])),
+            (&FatPointer { non_zero: true, .. }, _) => {
+                Ok(Some(vec![FAT_PTR_ADDR as u32]))
+            }
+
+            // Is this the NonZero lang item wrapping a pointer or integer type?
+            (&Univariant { non_zero: true, .. }, &ty::TyStruct(def, substs)) => {
+                let fields = &def.struct_variant().fields;
+                assert_eq!(fields.len(), 1);
+                let ty = normalize_associated_type(infcx, fields[0].ty(tcx, substs));
+                match *ty.layout(infcx)? {
+                    // FIXME(eddyb) also allow floating-point types here.
+                    Scalar { value: Int(_), non_zero: false } |
+                    Scalar { value: Pointer, non_zero: false } => {
+                        Ok(Some(vec![0]))
+                    }
+                    FatPointer { non_zero: false, .. } => {
+                        Ok(Some(vec![FAT_PTR_ADDR as u32, 0]))
+                    }
+                    _ => Ok(None)
+                }
+            }
+
+            // Perhaps one of the fields of this struct is non-zero
+            // let's recurse and find out
+            (_, &ty::TyStruct(def, substs)) => {
+                Struct::non_zero_field_path(infcx, def.struct_variant().fields
+                                                      .iter().map(|field| {
+                    normalize_associated_type(infcx, field.ty(tcx, substs))
+                }))
+            }
+
+            // Perhaps one of the upvars of this closure is non-zero
+            // Let's recurse and find out!
+            (_, &ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. })) |
+            // Can we use one of the fields in this tuple?
+            (_, &ty::TyTuple(ref tys)) => {
+                Struct::non_zero_field_path(infcx, tys.iter().cloned())
+            }
+
+            // Is this a fixed-size array of something non-zero
+            // with at least one element?
+            (_, &ty::TyArray(ety, d)) if d > 0 => {
+                Struct::non_zero_field_path(infcx, Some(ety).into_iter())
+            }
+
+            // Anything else is not a non-zero type.
+            _ => Ok(None)
+        }
+    }
+
+    /// Find the path leading to a non-zero leaf field, starting from
+    /// the given set of fields and recursing through aggregates.
+    pub fn non_zero_field_path<'a, 'tcx, I>(infcx: &InferCtxt<'a, 'tcx>,
+                                            fields: I)
+                                            -> Result<Option<FieldPath>, LayoutError<'tcx>>
+    where I: Iterator<Item=Ty<'tcx>> {
+        for (i, ty) in fields.enumerate() {
+            if let Some(mut path) = Struct::non_zero_field_in_type(infcx, ty)? {
+                path.push(i as u32);
+                return Ok(Some(path));
+            }
+        }
+        Ok(None)
+    }
+}
+
+/// The first half of a fat pointer.
+/// - For a trait object, this is the address of the box.
+/// - For a slice, this is the base address.
+pub const FAT_PTR_ADDR: usize = 0;
+
+/// The second half of a fat pointer.
+/// - For a trait object, this is the address of the vtable.
+/// - For a slice, this is the length.
+pub const FAT_PTR_EXTRA: usize = 1;
+
+/// Type layout, from which size and alignment can be cheaply computed.
+/// For ADTs, it also includes field placement and enum optimizations.
+/// NOTE: Because Layout is interned, redundant information should be
+/// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
+#[derive(Debug, PartialEq, Eq, Hash)]
+pub enum Layout {
+    /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
+    Scalar {
+        value: Primitive,
+        // If true, the value cannot represent a bit pattern of all zeroes.
+        non_zero: bool
+    },
+
+    /// SIMD vectors, from TyStruct marked with #[repr(simd)].
+    Vector {
+        element: Primitive,
+        count: u64
+    },
+
+    /// TyArray, TySlice or TyStr.
+    Array {
+        /// If true, the size is exact, otherwise it's only a lower bound.
+        sized: bool,
+        align: Align,
+        size: Size
+    },
+
+    /// TyRawPtr or TyRef with a !Sized pointee.
+    FatPointer {
+        metadata: Primitive,
+        // If true, the pointer cannot be null.
+        non_zero: bool
+    },
+
+    // Remaining variants are all ADTs such as TyStruct, TyEnum or TyTuple.
+
+    /// C-like enums; basically an integer.
+    CEnum {
+        discr: Integer,
+        signed: bool,
+        // Inclusive discriminant range.
+        // If min > max, it represents min...u64::MAX followed by 0...max.
+        // FIXME(eddyb) always use the shortest range, e.g. by finding
+        // the largest space between two consecutive discriminants and
+        // taking everything else as the (shortest) discriminant range.
+        min: u64,
+        max: u64
+    },
+
+    /// Single-case enums, and structs/tuples.
+    Univariant {
+        variant: Struct,
+        // If true, the structure is NonZero.
+        // FIXME(eddyb) use a newtype Layout kind for this.
+        non_zero: bool
+    },
+
+    /// General-case enums: for each case there is a struct, and they
+    /// all start with a field for the discriminant.
+    General {
+        discr: Integer,
+        variants: Vec<Struct>,
+        size: Size,
+        align: Align
+    },
+
+    /// Two cases distinguished by a nullable pointer: the case with discriminant
+    /// `nndiscr` must have single field which is known to be nonnull due to its type.
+    /// The other case is known to be zero sized. Hence we represent the enum
+    /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
+    /// otherwise it indicates the other case.
+    ///
+    /// For example, `std::option::Option` instantiated at a safe pointer type
+    /// is represented such that `None` is a null pointer and `Some` is the
+    /// identity function.
+    RawNullablePointer {
+        nndiscr: u64,
+        value: Primitive
+    },
+
+    /// Two cases distinguished by a nullable pointer: the case with discriminant
+    /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
+    /// field is known to be nonnull due to its type; if that field is null, then
+    /// it represents the other case, which is known to be zero sized.
+    StructWrappedNullablePointer {
+        nndiscr: u64,
+        nonnull: Struct,
+        // N.B. There is a 0 at the start, for LLVM GEP through a pointer.
+        discrfield: FieldPath
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum LayoutError<'tcx> {
+    Unknown(Ty<'tcx>),
+    SizeOverflow(Ty<'tcx>)
+}
+
+impl<'tcx> fmt::Display for LayoutError<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            LayoutError::Unknown(ty) => {
+                write!(f, "the type `{:?}` has an unknown layout", ty)
+            }
+            LayoutError::SizeOverflow(ty) => {
+                write!(f, "the type `{:?}` is too big for the current architecture", ty)
+            }
+        }
+    }
+}
+
+/// Helper function for normalizing associated types in an inference context.
+fn normalize_associated_type<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                       ty: Ty<'tcx>)
+                                       -> Ty<'tcx> {
+    if !ty.has_projection_types() {
+        return ty;
+    }
+
+    let mut selcx = traits::SelectionContext::new(infcx);
+    let cause = traits::ObligationCause::dummy();
+    let traits::Normalized { value: result, obligations } =
+        traits::normalize(&mut selcx, cause, &ty);
+
+    let mut fulfill_cx = traits::FulfillmentContext::new();
+
+    for obligation in obligations {
+        fulfill_cx.register_predicate_obligation(infcx, obligation);
+    }
+
+    drain_fulfillment_cx_or_panic(DUMMY_SP, infcx, &mut fulfill_cx, &result)
+}
+
+impl Layout {
+    pub fn compute_uncached<'a, 'tcx>(ty: Ty<'tcx>,
+                                      infcx: &InferCtxt<'a, 'tcx>)
+                                      -> Result<Layout, LayoutError<'tcx>> {
+        let tcx = infcx.tcx;
+        let dl = &tcx.data_layout;
+        assert!(!ty.has_infer_types());
+
+        let layout = match ty.sty {
+            // Basic scalars.
+            ty::TyBool => Scalar { value: Int(I1), non_zero: false },
+            ty::TyChar => Scalar { value: Int(I32), non_zero: false },
+            ty::TyInt(ity) => {
+                Scalar {
+                    value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
+                    non_zero: false
+                }
+            }
+            ty::TyUint(ity) => {
+                Scalar {
+                    value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
+                    non_zero: false
+                }
+            }
+            ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
+            ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
+            ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
+
+            // Potentially-fat pointers.
+            ty::TyBox(pointee) |
+            ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
+            ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+                let non_zero = !ty.is_unsafe_ptr();
+                if pointee.is_sized(&infcx.parameter_environment, DUMMY_SP) {
+                    Scalar { value: Pointer, non_zero: non_zero }
+                } else {
+                    let unsized_part = tcx.struct_tail(pointee);
+                    let meta = match unsized_part.sty {
+                        ty::TySlice(_) | ty::TyStr => {
+                            Int(dl.ptr_sized_integer())
+                        }
+                        ty::TyTrait(_) => Pointer,
+                        _ => return Err(LayoutError::Unknown(unsized_part))
+                    };
+                    FatPointer { metadata: meta, non_zero: non_zero }
+                }
+            }
+
+            // Arrays and slices.
+            ty::TyArray(element, count) => {
+                let element = element.layout(infcx)?;
+                Array {
+                    sized: true,
+                    align: element.align(dl),
+                    size: element.size(dl).checked_mul(count as u64, dl)
+                                 .map_or(Err(LayoutError::SizeOverflow(ty)), Ok)?
+                }
+            }
+            ty::TySlice(element) => {
+                Array {
+                    sized: false,
+                    align: element.layout(infcx)?.align(dl),
+                    size: Size::from_bytes(0)
+                }
+            }
+            ty::TyStr => {
+                Array {
+                    sized: false,
+                    align: dl.i8_align,
+                    size: Size::from_bytes(0)
+                }
+            }
+
+            // Odd unit types.
+            ty::TyFnDef(..) => {
+                Univariant {
+                    variant: Struct::new(dl, false),
+                    non_zero: false
+                }
+            }
+            ty::TyTrait(_) => {
+                let mut unit = Struct::new(dl, false);
+                unit.sized = false;
+                Univariant { variant: unit, non_zero: false }
+            }
+
+            // Tuples.
+            ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. }) |
+            ty::TyTuple(ref tys) => {
+                let mut st = Struct::new(dl, false);
+                st.extend(dl, tys.iter().map(|ty| ty.layout(infcx)), ty)?;
+                Univariant { variant: st, non_zero: false }
+            }
+
+            // ADTs.
+            ty::TyStruct(def, substs) => {
+                if ty.is_simd() {
+                    // SIMD vector types.
+                    let element = ty.simd_type(tcx);
+                    match *element.layout(infcx)? {
+                        Scalar { value, .. } => {
+                            return Ok(Vector {
+                                element: value,
+                                count: ty.simd_size(tcx) as u64
+                            });
+                        }
+                        _ => {
+                            tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
+                                                     a non-machine element type `{}`",
+                                                    ty, element));
+                        }
+                    }
+                }
+                let fields = def.struct_variant().fields.iter().map(|field| {
+                    normalize_associated_type(infcx, field.ty(tcx, substs))
+                        .layout(infcx)
+                });
+                let packed = tcx.lookup_packed(def.did);
+                let mut st = Struct::new(dl, packed);
+                st.extend(dl, fields, ty)?;
+
+                // FIXME(16758) don't add a drop flag to unsized structs, as it
+                // won't actually be in the location we say it is because it'll be after
+                // the unsized field. Several other pieces of code assume that the unsized
+                // field is definitely the last one.
+                if def.dtor_kind().has_drop_flag() &&
+                   ty.is_sized(&infcx.parameter_environment, DUMMY_SP) {
+                    st.extend(dl, Some(Ok(&Scalar {
+                        value: Int(I8),
+                        non_zero: false
+                    })).into_iter(), ty)?;
+                }
+                Univariant {
+                    variant: st,
+                    non_zero: Some(def.did) == tcx.lang_items.non_zero()
+                }
+            }
+            ty::TyEnum(def, substs) => {
+                let hint = *tcx.lookup_repr_hints(def.did).get(0)
+                    .unwrap_or(&attr::ReprAny);
+
+                let dtor = def.dtor_kind().has_drop_flag();
+                let drop_flag = if dtor {
+                    Some(Scalar { value: Int(I8), non_zero: false })
+                } else {
+                    None
+                };
+
+                if def.variants.is_empty() {
+                    // Uninhabitable; represent as unit
+                    // (Typechecking will reject discriminant-sizing attrs.)
+                    assert_eq!(hint, attr::ReprAny);
+
+                    let mut st = Struct::new(dl, false);
+                    st.extend(dl, drop_flag.iter().map(Ok), ty)?;
+                    return Ok(Univariant { variant: st, non_zero: false });
+                }
+
+                if !dtor && def.variants.iter().all(|v| v.fields.is_empty()) {
+                    // All bodies empty -> intlike
+                    let (mut min, mut max) = (i64::MAX, i64::MIN);
+                    for v in &def.variants {
+                        let x = v.disr_val.to_u64_unchecked() as i64;
+                        if x < min { min = x; }
+                        if x > max { max = x; }
+                    }
+
+                    let (discr, signed) = Integer::repr_discr(tcx, hint, min, max);
+                    return Ok(CEnum {
+                        discr: discr,
+                        signed: signed,
+                        min: min as u64,
+                        max: max as u64
+                    });
+                }
+
+                // Since there's at least one
+                // non-empty body, explicit discriminants should have
+                // been rejected by a checker before this point.
+                for (i, v) in def.variants.iter().enumerate() {
+                    if i as u64 != v.disr_val.to_u64_unchecked() {
+                        bug!("non-C-like enum {} with specified discriminants",
+                             tcx.item_path_str(def.did));
+                    }
+                }
+
+                if def.variants.len() == 1 {
+                    // Equivalent to a struct/tuple/newtype.
+                    // (Typechecking will reject discriminant-sizing attrs.)
+                    assert_eq!(hint, attr::ReprAny);
+                    let fields = def.variants[0].fields.iter().map(|field| {
+                        normalize_associated_type(infcx, field.ty(tcx, substs))
+                            .layout(infcx)
+                    });
+                    let mut st = Struct::new(dl, false);
+                    st.extend(dl, fields.chain(drop_flag.iter().map(Ok)), ty)?;
+                    return Ok(Univariant { variant: st, non_zero: false });
+                }
+
+                // Cache the substituted and normalized variant field types.
+                let variants = def.variants.iter().map(|v| {
+                    v.fields.iter().map(|field| {
+                        normalize_associated_type(infcx, field.ty(tcx, substs))
+                    }).collect::<Vec<_>>()
+                }).collect::<Vec<_>>();
+
+                if !dtor && variants.len() == 2 && hint == attr::ReprAny {
+                    // Nullable pointer optimization
+                    for discr in 0..2 {
+                        let other_fields = variants[1 - discr].iter().map(|ty| {
+                            ty.layout(infcx)
+                        });
+                        if !Struct::would_be_zero_sized(dl, other_fields)? {
+                            continue;
+                        }
+                        let path = Struct::non_zero_field_path(infcx,
+                            variants[discr].iter().cloned())?;
+                        let mut path = if let Some(p) = path { p } else { continue };
+
+                        // FIXME(eddyb) should take advantage of a newtype.
+                        if path == &[0] && variants[discr].len() == 1 {
+                            match *variants[discr][0].layout(infcx)? {
+                                Scalar { value, .. } => {
+                                    return Ok(RawNullablePointer {
+                                        nndiscr: discr as u64,
+                                        value: value
+                                    });
+                                }
+                                _ => {
+                                    bug!("Layout::compute: `{}`'s non-zero \
+                                          `{}` field not scalar?!",
+                                         ty, variants[discr][0])
+                                }
+                            }
+                        }
+
+                        path.push(0); // For GEP through a pointer.
+                        path.reverse();
+                        let mut st = Struct::new(dl, false);
+                        st.extend(dl, variants[discr].iter().map(|ty| {
+                            ty.layout(infcx)
+                        }), ty)?;
+                        return Ok(StructWrappedNullablePointer {
+                            nndiscr: discr as u64,
+                            nonnull: st,
+                            discrfield: path
+                        });
+                    }
+                }
+
+                // The general case.
+                let discr_max = (variants.len() - 1) as i64;
+                assert!(discr_max >= 0);
+                let (min_ity, _) = Integer::repr_discr(tcx, hint, 0, discr_max);
+
+                let mut align = dl.aggregate_align;
+                let mut size = Size::from_bytes(0);
+
+                // We're interested in the smallest alignment, so start large.
+                let mut start_align = Align::from_bytes(256, 256).unwrap();
+
+                // Create the set of structs that represent each variant
+                // Use the minimum integer type we figured out above
+                let discr = Some(Scalar { value: Int(min_ity), non_zero: false });
+                let mut variants = variants.into_iter().map(|fields| {
+                    let mut found_start = false;
+                    let fields = fields.into_iter().map(|field| {
+                        let field = field.layout(infcx)?;
+                        if !found_start {
+                            // Find the first field we can't move later
+                            // to make room for a larger discriminant.
+                            let field_align = field.align(dl);
+                            if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
+                                start_align = start_align.min(field_align);
+                                found_start = true;
+                            }
+                        }
+                        Ok(field)
+                    });
+                    let mut st = Struct::new(dl, false);
+                    st.extend(dl, discr.iter().map(Ok).chain(fields)
+                                              .chain(drop_flag.iter().map(Ok)), ty)?;
+                    size = cmp::max(size, st.min_size());
+                    align = align.max(st.align);
+                    Ok(st)
+                }).collect::<Result<Vec<_>, _>>()?;
+
+                // Align the maximum variant size to the largest alignment.
+                size = size.abi_align(align);
+
+                if size.bytes() >= dl.obj_size_bound() {
+                    return Err(LayoutError::SizeOverflow(ty));
+                }
+
+                // Check to see if we should use a different type for the
+                // discriminant. We can safely use a type with the same size
+                // as the alignment of the first field of each variant.
+                // We increase the size of the discriminant to avoid LLVM copying
+                // padding when it doesn't need to. This normally causes unaligned
+                // load/stores and excessive memcpy/memset operations. By using a
+                // bigger integer size, LLVM can be sure about it's contents and
+                // won't be so conservative.
+
+                // Use the initial field alignment
+                let wanted = start_align.abi();
+                let mut ity = min_ity;
+                for &candidate in &[I16, I32, I64] {
+                    let ty = Int(candidate);
+                    if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
+                        ity = candidate;
+                        break;
+                    }
+                }
+
+                // FIXME(eddyb) conservative only to avoid diverging from trans::adt.
+                if align.abi() != start_align.abi() {
+                    ity = min_ity;
+                }
+
+                // If the alignment is not larger than the chosen discriminant size,
+                // don't use the alignment as the final size.
+                if ity <= min_ity {
+                    ity = min_ity;
+                } else {
+                    // Patch up the variants' first few fields.
+                    let old_ity_size = Int(min_ity).size(dl);
+                    let new_ity_size = Int(ity).size(dl);
+                    for variant in &mut variants {
+                        for offset in &mut variant.offset_after_field {
+                            if *offset > old_ity_size {
+                                break;
+                            }
+                            *offset = new_ity_size;
+                        }
+                    }
+                }
+
+                General {
+                    discr: ity,
+                    variants: variants,
+                    size: size,
+                    align: align
+                }
+            }
+
+            // Types with no meaningful known layout.
+            ty::TyProjection(_) | ty::TyParam(_) => {
+                return Err(LayoutError::Unknown(ty));
+            }
+            ty::TyInfer(_) | ty::TyError => {
+                bug!("Layout::compute: unexpected type `{}`", ty)
+            }
+        };
+
+        Ok(layout)
+    }
+
+    /// Returns true if the layout corresponds to an unsized type.
+    pub fn is_unsized(&self) -> bool {
+        match *self {
+            Scalar {..} | Vector {..} | FatPointer {..} |
+            CEnum {..} | General {..} |
+            RawNullablePointer {..} |
+            StructWrappedNullablePointer {..} => false,
+
+            Array { sized, .. } |
+            Univariant { variant: Struct { sized, .. }, .. } => !sized
+        }
+    }
+
+    pub fn size(&self, dl: &TargetDataLayout) -> Size {
+        match *self {
+            Scalar { value, .. } | RawNullablePointer { value, .. } => {
+                value.size(dl)
+            }
+
+            Vector { element, count } => {
+                let elem_size = element.size(dl);
+                let vec_size = match elem_size.checked_mul(count, dl) {
+                    Some(size) => size,
+                    None => bug!("Layout::size({:?}): {} * {} overflowed",
+                                 self, elem_size.bytes(), count)
+                };
+                vec_size.abi_align(self.align(dl))
+            }
+
+            FatPointer { metadata, .. } => {
+                // Effectively a (ptr, meta) tuple.
+                Pointer.size(dl).abi_align(metadata.align(dl))
+                       .checked_add(metadata.size(dl), dl).unwrap()
+                       .abi_align(self.align(dl))
+            }
+
+            CEnum { discr, .. } => Int(discr).size(dl),
+            Array { size, .. } | General { size, .. } => size,
+
+            Univariant { ref variant, .. } |
+            StructWrappedNullablePointer { nonnull: ref variant, .. } => {
+                variant.stride()
+            }
+        }
+    }
+
+    pub fn align(&self, dl: &TargetDataLayout) -> Align {
+        match *self {
+            Scalar { value, .. } | RawNullablePointer { value, .. } => {
+                value.align(dl)
+            }
+
+            Vector { element, count } => {
+                let elem_size = element.size(dl);
+                let vec_size = match elem_size.checked_mul(count, dl) {
+                    Some(size) => size,
+                    None => bug!("Layout::align({:?}): {} * {} overflowed",
+                                 self, elem_size.bytes(), count)
+                };
+                for &(size, align) in &dl.vector_align {
+                    if size == vec_size {
+                        return align;
+                    }
+                }
+                // Default to natural alignment, which is what LLVM does.
+                // That is, use the size, rounded up to a power of 2.
+                let align = vec_size.bytes().next_power_of_two();
+                Align::from_bytes(align, align).unwrap()
+            }
+
+            FatPointer { metadata, .. } => {
+                // Effectively a (ptr, meta) tuple.
+                Pointer.align(dl).max(metadata.align(dl))
+            }
+
+            CEnum { discr, .. } => Int(discr).align(dl),
+            Array { align, .. } | General { align, .. } => align,
+
+            Univariant { ref variant, .. } |
+            StructWrappedNullablePointer { nonnull: ref variant, .. } => {
+                variant.align
+            }
+        }
+    }
+}
+
+/// Type size "skeleton", i.e. the only information determining a type's size.
+/// While this is conservative, (aside from constant sizes, only pointers,
+/// newtypes thereof and null pointer optimized enums are allowed), it is
+/// enough to statically check common usecases of transmute.
+#[derive(Copy, Clone, Debug)]
+pub enum SizeSkeleton<'tcx> {
+    /// Any statically computable Layout.
+    Known(Size),
+
+    /// A potentially-fat pointer.
+    Pointer {
+        // If true, this pointer is never null.
+        non_zero: bool,
+        // The type which determines the unsized metadata, if any,
+        // of this pointer. Either a type parameter or a projection
+        // depending on one, with regions erased.
+        tail: Ty<'tcx>
+    }
+}
+
+impl<'tcx> SizeSkeleton<'tcx> {
+    pub fn compute<'a>(ty: Ty<'tcx>, infcx: &InferCtxt<'a, 'tcx>)
+                       -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
+        let tcx = infcx.tcx;
+        assert!(!ty.has_infer_types());
+
+        // First try computing a static layout.
+        let err = match ty.layout(infcx) {
+            Ok(layout) => {
+                return Ok(SizeSkeleton::Known(layout.size(&tcx.data_layout)));
+            }
+            Err(err) => err
+        };
+
+        match ty.sty {
+            ty::TyBox(pointee) |
+            ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
+            ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+                let non_zero = !ty.is_unsafe_ptr();
+                let tail = tcx.struct_tail(pointee);
+                match tail.sty {
+                    ty::TyParam(_) | ty::TyProjection(_) => {
+                        assert!(tail.has_param_types() || tail.has_self_ty());
+                        Ok(SizeSkeleton::Pointer {
+                            non_zero: non_zero,
+                            tail: tcx.erase_regions(&tail)
+                        })
+                    }
+                    _ => {
+                        bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
+                              tail `{}` is not a type parameter or a projection",
+                             ty, err, tail)
+                    }
+                }
+            }
+
+            ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
+                // Only newtypes and enums w/ nullable pointer optimization.
+                if def.variants.is_empty() || def.variants.len() > 2 {
+                    return Err(err);
+                }
+
+                // If there's a drop flag, it can't be just a pointer.
+                if def.dtor_kind().has_drop_flag() {
+                    return Err(err);
+                }
+
+                // Get a zero-sized variant or a pointer newtype.
+                let zero_or_ptr_variant = |i: usize| {
+                    let fields = def.variants[i].fields.iter().map(|field| {
+                        let ty = normalize_associated_type(infcx, &field.ty(tcx, substs));
+                        SizeSkeleton::compute(ty, infcx)
+                    });
+                    let mut ptr = None;
+                    for field in fields {
+                        let field = field?;
+                        match field {
+                            SizeSkeleton::Known(size) => {
+                                if size.bytes() > 0 {
+                                    return Err(err);
+                                }
+                            }
+                            SizeSkeleton::Pointer {..} => {
+                                if ptr.is_some() {
+                                    return Err(err);
+                                }
+                                ptr = Some(field);
+                            }
+                        }
+                    }
+                    Ok(ptr)
+                };
+
+                let v0 = zero_or_ptr_variant(0)?;
+                // Newtype.
+                if def.variants.len() == 1 {
+                    if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
+                        return Ok(SizeSkeleton::Pointer {
+                            non_zero: non_zero ||
+                                Some(def.did) == tcx.lang_items.non_zero(),
+                            tail: tail
+                        });
+                    } else {
+                        return Err(err);
+                    }
+                }
+
+                let v1 = zero_or_ptr_variant(1)?;
+                // Nullable pointer enum optimization.
+                match (v0, v1) {
+                    (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
+                    (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
+                        Ok(SizeSkeleton::Pointer {
+                            non_zero: false,
+                            tail: tail
+                        })
+                    }
+                    _ => Err(err)
+                }
+            }
+
+            _ => Err(err)
+        }
+    }
+
+    pub fn same_size(self, other: SizeSkeleton) -> bool {
+        match (self, other) {
+            (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
+            (SizeSkeleton::Pointer { tail: a, .. },
+             SizeSkeleton::Pointer { tail: b, .. }) => a == b,
+            _ => false
+        }
+    }
+}
diff --git a/src/librustc/ty/maps.rs b/src/librustc/ty/maps.rs
new file mode 100644 (file)
index 0000000..57b1dd6
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use dep_graph::{DepNode, DepTrackingMapConfig};
+use hir::def_id::DefId;
+use ty;
+use std::marker::PhantomData;
+use std::rc::Rc;
+use syntax::{attr, ast};
+
+macro_rules! dep_map_ty {
+    ($ty_name:ident : $node_name:ident ($key:ty) -> $value:ty) => {
+        pub struct $ty_name<'tcx> {
+            data: PhantomData<&'tcx ()>
+        }
+
+        impl<'tcx> DepTrackingMapConfig for $ty_name<'tcx> {
+            type Key = $key;
+            type Value = $value;
+            fn to_dep_node(key: &$key) -> DepNode<DefId> { DepNode::$node_name(*key) }
+        }
+    }
+}
+
+dep_map_ty! { ImplOrTraitItems: ImplOrTraitItems(DefId) -> ty::ImplOrTraitItem<'tcx> }
+dep_map_ty! { Tcache: ItemSignature(DefId) -> ty::TypeScheme<'tcx> }
+dep_map_ty! { Predicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> }
+dep_map_ty! { SuperPredicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> }
+dep_map_ty! { TraitItemDefIds: TraitItemDefIds(DefId) -> Rc<Vec<ty::ImplOrTraitItemId>> }
+dep_map_ty! { ImplTraitRefs: ItemSignature(DefId) -> Option<ty::TraitRef<'tcx>> }
+dep_map_ty! { TraitDefs: ItemSignature(DefId) -> &'tcx ty::TraitDef<'tcx> }
+dep_map_ty! { AdtDefs: ItemSignature(DefId) -> ty::AdtDefMaster<'tcx> }
+dep_map_ty! { ItemVariances: ItemSignature(DefId) -> Rc<ty::ItemVariances> }
+dep_map_ty! { InherentImpls: InherentImpls(DefId) -> Rc<Vec<DefId>> }
+dep_map_ty! { ImplItems: ImplItems(DefId) -> Vec<ty::ImplOrTraitItemId> }
+dep_map_ty! { TraitItems: TraitItems(DefId) -> Rc<Vec<ty::ImplOrTraitItem<'tcx>>> }
+dep_map_ty! { ReprHints: ReprHints(DefId) -> Rc<Vec<attr::ReprAttr>> }
+dep_map_ty! { InlinedClosures: Hir(DefId) -> ast::NodeId }
diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs
new file mode 100644 (file)
index 0000000..76e1856
--- /dev/null
@@ -0,0 +1,2724 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use self::ImplOrTraitItemId::*;
+pub use self::Variance::*;
+pub use self::DtorKind::*;
+pub use self::ImplOrTraitItemContainer::*;
+pub use self::BorrowKind::*;
+pub use self::ImplOrTraitItem::*;
+pub use self::IntVarValue::*;
+pub use self::LvaluePreference::*;
+pub use self::fold::TypeFoldable;
+
+use dep_graph::{self, DepNode};
+use hir::map as ast_map;
+use middle;
+use middle::cstore::{self, CrateStore, LOCAL_CRATE};
+use hir::def::{self, Def, ExportMap};
+use hir::def_id::DefId;
+use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem};
+use middle::region::{CodeExtent};
+use traits;
+use ty;
+use ty::fold::TypeFolder;
+use ty::subst::{Subst, Substs, VecPerParamSpace};
+use ty::walk::TypeWalker;
+use util::common::MemoizationMap;
+use util::nodemap::NodeSet;
+use util::nodemap::FnvHashMap;
+
+use serialize::{Encodable, Encoder, Decodable, Decoder};
+use std::borrow::{Borrow, Cow};
+use std::cell::Cell;
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::rc::Rc;
+use std::slice;
+use std::vec::IntoIter;
+use syntax::ast::{self, CrateNum, Name, NodeId};
+use syntax::attr::{self, AttrMetaMethods};
+use syntax::codemap::{DUMMY_SP, Span};
+use syntax::parse::token::InternedString;
+
+use rustc_const_math::ConstInt;
+
+use hir;
+use hir::{ItemImpl, ItemTrait, PatKind};
+use hir::intravisit::Visitor;
+
+pub use self::sty::{Binder, DebruijnIndex};
+pub use self::sty::{BuiltinBound, BuiltinBounds, ExistentialBounds};
+pub use self::sty::{BareFnTy, FnSig, PolyFnSig, FnOutput, PolyFnOutput};
+pub use self::sty::{ClosureTy, InferTy, ParamTy, ProjectionTy, TraitTy};
+pub use self::sty::{ClosureSubsts, TypeAndMut};
+pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef};
+pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region};
+pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid};
+pub use self::sty::BoundRegion::*;
+pub use self::sty::FnOutput::*;
+pub use self::sty::InferTy::*;
+pub use self::sty::Region::*;
+pub use self::sty::TypeVariants::*;
+
+pub use self::sty::BuiltinBound::Send as BoundSend;
+pub use self::sty::BuiltinBound::Sized as BoundSized;
+pub use self::sty::BuiltinBound::Copy as BoundCopy;
+pub use self::sty::BuiltinBound::Sync as BoundSync;
+
+pub use self::contents::TypeContents;
+pub use self::context::{TyCtxt, tls};
+pub use self::context::{CtxtArenas, Lift, Tables};
+
+pub use self::trait_def::{TraitDef, TraitFlags};
+
+pub mod adjustment;
+pub mod cast;
+pub mod error;
+pub mod fast_reject;
+pub mod fold;
+pub mod item_path;
+pub mod layout;
+pub mod _match;
+pub mod maps;
+pub mod outlives;
+pub mod relate;
+pub mod subst;
+pub mod trait_def;
+pub mod walk;
+pub mod wf;
+pub mod util;
+
+mod contents;
+mod context;
+mod flags;
+mod ivar;
+mod structural_impls;
+mod sty;
+
+pub type Disr = ConstInt;
+
+// Data types
+
+/// The complete set of all analyses described in this module. This is
+/// produced by the driver and fed to trans and later passes.
+pub struct CrateAnalysis<'a> {
+    pub export_map: ExportMap,
+    pub access_levels: middle::privacy::AccessLevels,
+    pub reachable: NodeSet,
+    pub name: &'a str,
+    pub glob_map: Option<hir::GlobMap>,
+}
+
+#[derive(Copy, Clone)]
+pub enum DtorKind {
+    NoDtor,
+    TraitDtor(bool)
+}
+
+impl DtorKind {
+    pub fn is_present(&self) -> bool {
+        match *self {
+            TraitDtor(..) => true,
+            _ => false
+        }
+    }
+
+    pub fn has_drop_flag(&self) -> bool {
+        match self {
+            &NoDtor => false,
+            &TraitDtor(flag) => flag
+        }
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum ImplOrTraitItemContainer {
+    TraitContainer(DefId),
+    ImplContainer(DefId),
+}
+
+impl ImplOrTraitItemContainer {
+    pub fn id(&self) -> DefId {
+        match *self {
+            TraitContainer(id) => id,
+            ImplContainer(id) => id,
+        }
+    }
+}
+
+/// The "header" of an impl is everything outside the body: a Self type, a trait
+/// ref (in the case of a trait impl), and a set of predicates (from the
+/// bounds/where clauses).
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub struct ImplHeader<'tcx> {
+    pub impl_def_id: DefId,
+    pub self_ty: Ty<'tcx>,
+    pub trait_ref: Option<TraitRef<'tcx>>,
+    pub predicates: Vec<Predicate<'tcx>>,
+}
+
+impl<'tcx> ImplHeader<'tcx> {
+    pub fn with_fresh_ty_vars<'a>(selcx: &mut traits::SelectionContext<'a, 'tcx>,
+                                  impl_def_id: DefId)
+                                  -> ImplHeader<'tcx>
+    {
+        let tcx = selcx.tcx();
+        let impl_generics = tcx.lookup_item_type(impl_def_id).generics;
+        let impl_substs = selcx.infcx().fresh_substs_for_generics(DUMMY_SP, &impl_generics);
+
+        let header = ImplHeader {
+            impl_def_id: impl_def_id,
+            self_ty: tcx.lookup_item_type(impl_def_id).ty,
+            trait_ref: tcx.impl_trait_ref(impl_def_id),
+            predicates: tcx.lookup_predicates(impl_def_id).predicates.into_vec(),
+        }.subst(tcx, &impl_substs);
+
+        let traits::Normalized { value: mut header, obligations } =
+            traits::normalize(selcx, traits::ObligationCause::dummy(), &header);
+
+        header.predicates.extend(obligations.into_iter().map(|o| o.predicate));
+        header
+    }
+}
+
+#[derive(Clone)]
+pub enum ImplOrTraitItem<'tcx> {
+    ConstTraitItem(Rc<AssociatedConst<'tcx>>),
+    MethodTraitItem(Rc<Method<'tcx>>),
+    TypeTraitItem(Rc<AssociatedType<'tcx>>),
+}
+
+impl<'tcx> ImplOrTraitItem<'tcx> {
+    fn id(&self) -> ImplOrTraitItemId {
+        match *self {
+            ConstTraitItem(ref associated_const) => {
+                ConstTraitItemId(associated_const.def_id)
+            }
+            MethodTraitItem(ref method) => MethodTraitItemId(method.def_id),
+            TypeTraitItem(ref associated_type) => {
+                TypeTraitItemId(associated_type.def_id)
+            }
+        }
+    }
+
+    pub fn def(&self) -> Def {
+        match *self {
+            ConstTraitItem(ref associated_const) => Def::AssociatedConst(associated_const.def_id),
+            MethodTraitItem(ref method) => Def::Method(method.def_id),
+            TypeTraitItem(ref ty) => Def::AssociatedTy(ty.container.id(), ty.def_id),
+        }
+    }
+
+    pub fn def_id(&self) -> DefId {
+        match *self {
+            ConstTraitItem(ref associated_const) => associated_const.def_id,
+            MethodTraitItem(ref method) => method.def_id,
+            TypeTraitItem(ref associated_type) => associated_type.def_id,
+        }
+    }
+
+    pub fn name(&self) -> Name {
+        match *self {
+            ConstTraitItem(ref associated_const) => associated_const.name,
+            MethodTraitItem(ref method) => method.name,
+            TypeTraitItem(ref associated_type) => associated_type.name,
+        }
+    }
+
+    pub fn vis(&self) -> Visibility {
+        match *self {
+            ConstTraitItem(ref associated_const) => associated_const.vis,
+            MethodTraitItem(ref method) => method.vis,
+            TypeTraitItem(ref associated_type) => associated_type.vis,
+        }
+    }
+
+    pub fn container(&self) -> ImplOrTraitItemContainer {
+        match *self {
+            ConstTraitItem(ref associated_const) => associated_const.container,
+            MethodTraitItem(ref method) => method.container,
+            TypeTraitItem(ref associated_type) => associated_type.container,
+        }
+    }
+
+    pub fn as_opt_method(&self) -> Option<Rc<Method<'tcx>>> {
+        match *self {
+            MethodTraitItem(ref m) => Some((*m).clone()),
+            _ => None,
+        }
+    }
+}
+
+#[derive(Clone, Copy, Debug)]
+pub enum ImplOrTraitItemId {
+    ConstTraitItemId(DefId),
+    MethodTraitItemId(DefId),
+    TypeTraitItemId(DefId),
+}
+
+impl ImplOrTraitItemId {
+    pub fn def_id(&self) -> DefId {
+        match *self {
+            ConstTraitItemId(def_id) => def_id,
+            MethodTraitItemId(def_id) => def_id,
+            TypeTraitItemId(def_id) => def_id,
+        }
+    }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Copy)]
+pub enum Visibility {
+    /// Visible everywhere (including in other crates).
+    Public,
+    /// Visible only in the given crate-local module.
+    Restricted(NodeId),
+    /// Not visible anywhere in the local crate. This is the visibility of private external items.
+    PrivateExternal,
+}
+
+impl Visibility {
+    pub fn from_hir(visibility: &hir::Visibility, id: NodeId, tcx: &TyCtxt) -> Self {
+        match *visibility {
+            hir::Public => Visibility::Public,
+            hir::Inherited => Visibility::Restricted(tcx.map.get_module_parent(id)),
+        }
+    }
+
+    /// Returns true if an item with this visibility is accessible from the given block.
+    pub fn is_accessible_from(self, block: NodeId, map: &ast_map::Map) -> bool {
+        let restriction = match self {
+            // Public items are visible everywhere.
+            Visibility::Public => return true,
+            // Private items from other crates are visible nowhere.
+            Visibility::PrivateExternal => return false,
+            // Restricted items are visible in an arbitrary local module.
+            Visibility::Restricted(module) => module,
+        };
+
+        let mut block_ancestor = block;
+        loop {
+            if block_ancestor == restriction { return true }
+            let block_ancestor_parent = map.get_module_parent(block_ancestor);
+            if block_ancestor_parent == block_ancestor { return false }
+            block_ancestor = block_ancestor_parent;
+        }
+    }
+
+    /// Returns true if this visibility is at least as accessible as the given visibility
+    pub fn is_at_least(self, vis: Visibility, map: &ast_map::Map) -> bool {
+        let vis_restriction = match vis {
+            Visibility::Public => return self == Visibility::Public,
+            Visibility::PrivateExternal => return true,
+            Visibility::Restricted(module) => module,
+        };
+
+        self.is_accessible_from(vis_restriction, map)
+    }
+}
+
+#[derive(Clone, Debug)]
+pub struct Method<'tcx> {
+    pub name: Name,
+    pub generics: Generics<'tcx>,
+    pub predicates: GenericPredicates<'tcx>,
+    pub fty: BareFnTy<'tcx>,
+    pub explicit_self: ExplicitSelfCategory,
+    pub vis: Visibility,
+    pub defaultness: hir::Defaultness,
+    pub def_id: DefId,
+    pub container: ImplOrTraitItemContainer,
+}
+
+impl<'tcx> Method<'tcx> {
+    pub fn new(name: Name,
+               generics: ty::Generics<'tcx>,
+               predicates: GenericPredicates<'tcx>,
+               fty: BareFnTy<'tcx>,
+               explicit_self: ExplicitSelfCategory,
+               vis: Visibility,
+               defaultness: hir::Defaultness,
+               def_id: DefId,
+               container: ImplOrTraitItemContainer)
+               -> Method<'tcx> {
+        Method {
+            name: name,
+            generics: generics,
+            predicates: predicates,
+            fty: fty,
+            explicit_self: explicit_self,
+            vis: vis,
+            defaultness: defaultness,
+            def_id: def_id,
+            container: container,
+        }
+    }
+
+    pub fn container_id(&self) -> DefId {
+        match self.container {
+            TraitContainer(id) => id,
+            ImplContainer(id) => id,
+        }
+    }
+}
+
+impl<'tcx> PartialEq for Method<'tcx> {
+    #[inline]
+    fn eq(&self, other: &Self) -> bool { self.def_id == other.def_id }
+}
+
+impl<'tcx> Eq for Method<'tcx> {}
+
+impl<'tcx> Hash for Method<'tcx> {
+    #[inline]
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        self.def_id.hash(s)
+    }
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct AssociatedConst<'tcx> {
+    pub name: Name,
+    pub ty: Ty<'tcx>,
+    pub vis: Visibility,
+    pub defaultness: hir::Defaultness,
+    pub def_id: DefId,
+    pub container: ImplOrTraitItemContainer,
+    pub has_value: bool
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct AssociatedType<'tcx> {
+    pub name: Name,
+    pub ty: Option<Ty<'tcx>>,
+    pub vis: Visibility,
+    pub defaultness: hir::Defaultness,
+    pub def_id: DefId,
+    pub container: ImplOrTraitItemContainer,
+}
+
+#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
+pub struct ItemVariances {
+    pub types: VecPerParamSpace<Variance>,
+    pub regions: VecPerParamSpace<Variance>,
+}
+
+#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable, Copy)]
+pub enum Variance {
+    Covariant,      // T<A> <: T<B> iff A <: B -- e.g., function return type
+    Invariant,      // T<A> <: T<B> iff B == A -- e.g., type of mutable cell
+    Contravariant,  // T<A> <: T<B> iff B <: A -- e.g., function param type
+    Bivariant,      // T<A> <: T<B>            -- e.g., unused type parameter
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct MethodCallee<'tcx> {
+    /// Impl method ID, for inherent methods, or trait method ID, otherwise.
+    pub def_id: DefId,
+    pub ty: Ty<'tcx>,
+    pub substs: &'tcx subst::Substs<'tcx>
+}
+
+/// With method calls, we store some extra information in
+/// side tables (i.e method_map). We use
+/// MethodCall as a key to index into these tables instead of
+/// just directly using the expression's NodeId. The reason
+/// for this being that we may apply adjustments (coercions)
+/// with the resulting expression also needing to use the
+/// side tables. The problem with this is that we don't
+/// assign a separate NodeId to this new expression
+/// and so it would clash with the base expression if both
+/// needed to add to the side tables. Thus to disambiguate
+/// we also keep track of whether there's an adjustment in
+/// our key.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+pub struct MethodCall {
+    pub expr_id: NodeId,
+    pub autoderef: u32
+}
+
+impl MethodCall {
+    pub fn expr(id: NodeId) -> MethodCall {
+        MethodCall {
+            expr_id: id,
+            autoderef: 0
+        }
+    }
+
+    pub fn autoderef(expr_id: NodeId, autoderef: u32) -> MethodCall {
+        MethodCall {
+            expr_id: expr_id,
+            autoderef: 1 + autoderef
+        }
+    }
+}
+
+// maps from an expression id that corresponds to a method call to the details
+// of the method to be invoked
+pub type MethodMap<'tcx> = FnvHashMap<MethodCall, MethodCallee<'tcx>>;
+
+// Contains information needed to resolve types and (in the future) look up
+// the types of AST nodes.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct CReaderCacheKey {
+    pub cnum: CrateNum,
+    pub pos: usize,
+}
+
+/// Describes the fragment-state associated with a NodeId.
+///
+/// Currently only unfragmented paths have entries in the table,
+/// but longer-term this enum is expected to expand to also
+/// include data for fragmented paths.
+#[derive(Copy, Clone, Debug)]
+pub enum FragmentInfo {
+    Moved { var: NodeId, move_expr: NodeId },
+    Assigned { var: NodeId, assign_expr: NodeId, assignee_id: NodeId },
+}
+
+// Flags that we track on types. These flags are propagated upwards
+// through the type during type construction, so that we can quickly
+// check whether the type has various kinds of types in it without
+// recursing over the type itself.
+bitflags! {
+    flags TypeFlags: u32 {
+        const HAS_PARAMS         = 1 << 0,
+        const HAS_SELF           = 1 << 1,
+        const HAS_TY_INFER       = 1 << 2,
+        const HAS_RE_INFER       = 1 << 3,
+        const HAS_RE_EARLY_BOUND = 1 << 4,
+        const HAS_FREE_REGIONS   = 1 << 5,
+        const HAS_TY_ERR         = 1 << 6,
+        const HAS_PROJECTION     = 1 << 7,
+        const HAS_TY_CLOSURE     = 1 << 8,
+
+        // true if there are "names" of types and regions and so forth
+        // that are local to a particular fn
+        const HAS_LOCAL_NAMES   = 1 << 9,
+
+        const NEEDS_SUBST        = TypeFlags::HAS_PARAMS.bits |
+                                   TypeFlags::HAS_SELF.bits |
+                                   TypeFlags::HAS_RE_EARLY_BOUND.bits,
+
+        // Flags representing the nominal content of a type,
+        // computed by FlagsComputation. If you add a new nominal
+        // flag, it should be added here too.
+        const NOMINAL_FLAGS     = TypeFlags::HAS_PARAMS.bits |
+                                  TypeFlags::HAS_SELF.bits |
+                                  TypeFlags::HAS_TY_INFER.bits |
+                                  TypeFlags::HAS_RE_INFER.bits |
+                                  TypeFlags::HAS_RE_EARLY_BOUND.bits |
+                                  TypeFlags::HAS_FREE_REGIONS.bits |
+                                  TypeFlags::HAS_TY_ERR.bits |
+                                  TypeFlags::HAS_PROJECTION.bits |
+                                  TypeFlags::HAS_TY_CLOSURE.bits |
+                                  TypeFlags::HAS_LOCAL_NAMES.bits,
+
+        // Caches for type_is_sized, type_moves_by_default
+        const SIZEDNESS_CACHED  = 1 << 16,
+        const IS_SIZED          = 1 << 17,
+        const MOVENESS_CACHED   = 1 << 18,
+        const MOVES_BY_DEFAULT  = 1 << 19,
+    }
+}
+
+pub struct TyS<'tcx> {
+    pub sty: TypeVariants<'tcx>,
+    pub flags: Cell<TypeFlags>,
+
+    // the maximal depth of any bound regions appearing in this type.
+    region_depth: u32,
+}
+
+impl<'tcx> PartialEq for TyS<'tcx> {
+    #[inline]
+    fn eq(&self, other: &TyS<'tcx>) -> bool {
+        // (self as *const _) == (other as *const _)
+        (self as *const TyS<'tcx>) == (other as *const TyS<'tcx>)
+    }
+}
+impl<'tcx> Eq for TyS<'tcx> {}
+
+impl<'tcx> Hash for TyS<'tcx> {
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        (self as *const TyS).hash(s)
+    }
+}
+
+pub type Ty<'tcx> = &'tcx TyS<'tcx>;
+
+impl<'tcx> Encodable for Ty<'tcx> {
+    fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
+        cstore::tls::with_encoding_context(s, |ecx, rbml_w| {
+            ecx.encode_ty(rbml_w, *self);
+            Ok(())
+        })
+    }
+}
+
+impl<'tcx> Decodable for Ty<'tcx> {
+    fn decode<D: Decoder>(d: &mut D) -> Result<Ty<'tcx>, D::Error> {
+        cstore::tls::with_decoding_context(d, |dcx, rbml_r| {
+            Ok(dcx.decode_ty(rbml_r))
+        })
+    }
+}
+
+
+/// Upvars do not get their own node-id. Instead, we use the pair of
+/// the original var id (that is, the root variable that is referenced
+/// by the upvar) and the id of the closure expression.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct UpvarId {
+    pub var_id: NodeId,
+    pub closure_expr_id: NodeId,
+}
+
+#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, Copy)]
+pub enum BorrowKind {
+    /// Data must be immutable and is aliasable.
+    ImmBorrow,
+
+    /// Data must be immutable but not aliasable.  This kind of borrow
+    /// cannot currently be expressed by the user and is used only in
+    /// implicit closure bindings. It is needed when you the closure
+    /// is borrowing or mutating a mutable referent, e.g.:
+    ///
+    ///    let x: &mut isize = ...;
+    ///    let y = || *x += 5;
+    ///
+    /// If we were to try to translate this closure into a more explicit
+    /// form, we'd encounter an error with the code as written:
+    ///
+    ///    struct Env { x: & &mut isize }
+    ///    let x: &mut isize = ...;
+    ///    let y = (&mut Env { &x }, fn_ptr);  // Closure is pair of env and fn
+    ///    fn fn_ptr(env: &mut Env) { **env.x += 5; }
+    ///
+    /// This is then illegal because you cannot mutate a `&mut` found
+    /// in an aliasable location. To solve, you'd have to translate with
+    /// an `&mut` borrow:
+    ///
+    ///    struct Env { x: & &mut isize }
+    ///    let x: &mut isize = ...;
+    ///    let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x
+    ///    fn fn_ptr(env: &mut Env) { **env.x += 5; }
+    ///
+    /// Now the assignment to `**env.x` is legal, but creating a
+    /// mutable pointer to `x` is not because `x` is not mutable. We
+    /// could fix this by declaring `x` as `let mut x`. This is ok in
+    /// user code, if awkward, but extra weird for closures, since the
+    /// borrow is hidden.
+    ///
+    /// So we introduce a "unique imm" borrow -- the referent is
+    /// immutable, but not aliasable. This solves the problem. For
+    /// simplicity, we don't give users the way to express this
+    /// borrow, it's just used when translating closures.
+    UniqueImmBorrow,
+
+    /// Data is mutable and not aliasable.
+    MutBorrow
+}
+
+/// Information describing the capture of an upvar. This is computed
+/// during `typeck`, specifically by `regionck`.
+#[derive(PartialEq, Clone, Debug, Copy)]
+pub enum UpvarCapture {
+    /// Upvar is captured by value. This is always true when the
+    /// closure is labeled `move`, but can also be true in other cases
+    /// depending on inference.
+    ByValue,
+
+    /// Upvar is captured by reference.
+    ByRef(UpvarBorrow),
+}
+
+#[derive(PartialEq, Clone, Copy)]
+pub struct UpvarBorrow {
+    /// The kind of borrow: by-ref upvars have access to shared
+    /// immutable borrows, which are not part of the normal language
+    /// syntax.
+    pub kind: BorrowKind,
+
+    /// Region of the resulting reference.
+    pub region: ty::Region,
+}
+
+pub type UpvarCaptureMap = FnvHashMap<UpvarId, UpvarCapture>;
+
+#[derive(Copy, Clone)]
+pub struct ClosureUpvar<'tcx> {
+    pub def: Def,
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+}
+
+#[derive(Clone, Copy, PartialEq)]
+pub enum IntVarValue {
+    IntType(ast::IntTy),
+    UintType(ast::UintTy),
+}
+
+/// Default region to use for the bound of objects that are
+/// supplied as the value for this type parameter. This is derived
+/// from `T:'a` annotations appearing in the type definition.  If
+/// this is `None`, then the default is inherited from the
+/// surrounding context. See RFC #599 for details.
+#[derive(Copy, Clone)]
+pub enum ObjectLifetimeDefault {
+    /// Require an explicit annotation. Occurs when multiple
+    /// `T:'a` constraints are found.
+    Ambiguous,
+
+    /// Use the base default, typically 'static, but in a fn body it is a fresh variable
+    BaseDefault,
+
+    /// Use the given region as the default.
+    Specific(Region),
+}
+
+#[derive(Clone)]
+pub struct TypeParameterDef<'tcx> {
+    pub name: Name,
+    pub def_id: DefId,
+    pub space: subst::ParamSpace,
+    pub index: u32,
+    pub default_def_id: DefId, // for use in error reporing about defaults
+    pub default: Option<Ty<'tcx>>,
+    pub object_lifetime_default: ObjectLifetimeDefault,
+}
+
+#[derive(Clone)]
+pub struct RegionParameterDef {
+    pub name: Name,
+    pub def_id: DefId,
+    pub space: subst::ParamSpace,
+    pub index: u32,
+    pub bounds: Vec<ty::Region>,
+}
+
+impl RegionParameterDef {
+    pub fn to_early_bound_region(&self) -> ty::Region {
+        ty::ReEarlyBound(ty::EarlyBoundRegion {
+            space: self.space,
+            index: self.index,
+            name: self.name,
+        })
+    }
+    pub fn to_bound_region(&self) -> ty::BoundRegion {
+        ty::BoundRegion::BrNamed(self.def_id, self.name)
+    }
+}
+
+/// Information about the formal type/lifetime parameters associated
+/// with an item or method. Analogous to hir::Generics.
+#[derive(Clone, Debug)]
+pub struct Generics<'tcx> {
+    pub types: VecPerParamSpace<TypeParameterDef<'tcx>>,
+    pub regions: VecPerParamSpace<RegionParameterDef>,
+}
+
+impl<'tcx> Generics<'tcx> {
+    pub fn empty() -> Generics<'tcx> {
+        Generics {
+            types: VecPerParamSpace::empty(),
+            regions: VecPerParamSpace::empty(),
+        }
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.types.is_empty() && self.regions.is_empty()
+    }
+
+    pub fn has_type_params(&self, space: subst::ParamSpace) -> bool {
+        !self.types.is_empty_in(space)
+    }
+
+    pub fn has_region_params(&self, space: subst::ParamSpace) -> bool {
+        !self.regions.is_empty_in(space)
+    }
+}
+
+/// Bounds on generics.
+#[derive(Clone)]
+pub struct GenericPredicates<'tcx> {
+    pub predicates: VecPerParamSpace<Predicate<'tcx>>,
+}
+
+impl<'tcx> GenericPredicates<'tcx> {
+    pub fn empty() -> GenericPredicates<'tcx> {
+        GenericPredicates {
+            predicates: VecPerParamSpace::empty(),
+        }
+    }
+
+    pub fn instantiate(&self, tcx: &TyCtxt<'tcx>, substs: &Substs<'tcx>)
+                       -> InstantiatedPredicates<'tcx> {
+        InstantiatedPredicates {
+            predicates: self.predicates.subst(tcx, substs),
+        }
+    }
+
+    pub fn instantiate_supertrait(&self,
+                                  tcx: &TyCtxt<'tcx>,
+                                  poly_trait_ref: &ty::PolyTraitRef<'tcx>)
+                                  -> InstantiatedPredicates<'tcx>
+    {
+        InstantiatedPredicates {
+            predicates: self.predicates.map(|pred| pred.subst_supertrait(tcx, poly_trait_ref))
+        }
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub enum Predicate<'tcx> {
+    /// Corresponds to `where Foo : Bar<A,B,C>`. `Foo` here would be
+    /// the `Self` type of the trait reference and `A`, `B`, and `C`
+    /// would be the parameters in the `TypeSpace`.
+    Trait(PolyTraitPredicate<'tcx>),
+
+    /// where `T1 == T2`.
+    Equate(PolyEquatePredicate<'tcx>),
+
+    /// where 'a : 'b
+    RegionOutlives(PolyRegionOutlivesPredicate),
+
+    /// where T : 'a
+    TypeOutlives(PolyTypeOutlivesPredicate<'tcx>),
+
+    /// where <T as TraitRef>::Name == X, approximately.
+    /// See `ProjectionPredicate` struct for details.
+    Projection(PolyProjectionPredicate<'tcx>),
+
+    /// no syntax: T WF
+    WellFormed(Ty<'tcx>),
+
+    /// trait must be object-safe
+    ObjectSafe(DefId),
+}
+
+impl<'tcx> Predicate<'tcx> {
+    /// Performs a substitution suitable for going from a
+    /// poly-trait-ref to supertraits that must hold if that
+    /// poly-trait-ref holds. This is slightly different from a normal
+    /// substitution in terms of what happens with bound regions.  See
+    /// lengthy comment below for details.
+    pub fn subst_supertrait(&self,
+                            tcx: &TyCtxt<'tcx>,
+                            trait_ref: &ty::PolyTraitRef<'tcx>)
+                            -> ty::Predicate<'tcx>
+    {
+        // The interaction between HRTB and supertraits is not entirely
+        // obvious. Let me walk you (and myself) through an example.
+        //
+        // Let's start with an easy case. Consider two traits:
+        //
+        //     trait Foo<'a> : Bar<'a,'a> { }
+        //     trait Bar<'b,'c> { }
+        //
+        // Now, if we have a trait reference `for<'x> T : Foo<'x>`, then
+        // we can deduce that `for<'x> T : Bar<'x,'x>`. Basically, if we
+        // knew that `Foo<'x>` (for any 'x) then we also know that
+        // `Bar<'x,'x>` (for any 'x). This more-or-less falls out from
+        // normal substitution.
+        //
+        // In terms of why this is sound, the idea is that whenever there
+        // is an impl of `T:Foo<'a>`, it must show that `T:Bar<'a,'a>`
+        // holds.  So if there is an impl of `T:Foo<'a>` that applies to
+        // all `'a`, then we must know that `T:Bar<'a,'a>` holds for all
+        // `'a`.
+        //
+        // Another example to be careful of is this:
+        //
+        //     trait Foo1<'a> : for<'b> Bar1<'a,'b> { }
+        //     trait Bar1<'b,'c> { }
+        //
+        // Here, if we have `for<'x> T : Foo1<'x>`, then what do we know?
+        // The answer is that we know `for<'x,'b> T : Bar1<'x,'b>`. The
+        // reason is similar to the previous example: any impl of
+        // `T:Foo1<'x>` must show that `for<'b> T : Bar1<'x, 'b>`.  So
+        // basically we would want to collapse the bound lifetimes from
+        // the input (`trait_ref`) and the supertraits.
+        //
+        // To achieve this in practice is fairly straightforward. Let's
+        // consider the more complicated scenario:
+        //
+        // - We start out with `for<'x> T : Foo1<'x>`. In this case, `'x`
+        //   has a De Bruijn index of 1. We want to produce `for<'x,'b> T : Bar1<'x,'b>`,
+        //   where both `'x` and `'b` would have a DB index of 1.
+        //   The substitution from the input trait-ref is therefore going to be
+        //   `'a => 'x` (where `'x` has a DB index of 1).
+        // - The super-trait-ref is `for<'b> Bar1<'a,'b>`, where `'a` is an
+        //   early-bound parameter and `'b' is a late-bound parameter with a
+        //   DB index of 1.
+        // - If we replace `'a` with `'x` from the input, it too will have
+        //   a DB index of 1, and thus we'll have `for<'x,'b> Bar1<'x,'b>`
+        //   just as we wanted.
+        //
+        // There is only one catch. If we just apply the substitution `'a
+        // => 'x` to `for<'b> Bar1<'a,'b>`, the substitution code will
+        // adjust the DB index because we substituting into a binder (it
+        // tries to be so smart...) resulting in `for<'x> for<'b>
+        // Bar1<'x,'b>` (we have no syntax for this, so use your
+        // imagination). Basically the 'x will have DB index of 2 and 'b
+        // will have DB index of 1. Not quite what we want. So we apply
+        // the substitution to the *contents* of the trait reference,
+        // rather than the trait reference itself (put another way, the
+        // substitution code expects equal binding levels in the values
+        // from the substitution and the value being substituted into, and
+        // this trick achieves that).
+
+        let substs = &trait_ref.0.substs;
+        match *self {
+            Predicate::Trait(ty::Binder(ref data)) =>
+                Predicate::Trait(ty::Binder(data.subst(tcx, substs))),
+            Predicate::Equate(ty::Binder(ref data)) =>
+                Predicate::Equate(ty::Binder(data.subst(tcx, substs))),
+            Predicate::RegionOutlives(ty::Binder(ref data)) =>
+                Predicate::RegionOutlives(ty::Binder(data.subst(tcx, substs))),
+            Predicate::TypeOutlives(ty::Binder(ref data)) =>
+                Predicate::TypeOutlives(ty::Binder(data.subst(tcx, substs))),
+            Predicate::Projection(ty::Binder(ref data)) =>
+                Predicate::Projection(ty::Binder(data.subst(tcx, substs))),
+            Predicate::WellFormed(data) =>
+                Predicate::WellFormed(data.subst(tcx, substs)),
+            Predicate::ObjectSafe(trait_def_id) =>
+                Predicate::ObjectSafe(trait_def_id),
+        }
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct TraitPredicate<'tcx> {
+    pub trait_ref: TraitRef<'tcx>
+}
+pub type PolyTraitPredicate<'tcx> = ty::Binder<TraitPredicate<'tcx>>;
+
+impl<'tcx> TraitPredicate<'tcx> {
+    pub fn def_id(&self) -> DefId {
+        self.trait_ref.def_id
+    }
+
+    /// Creates the dep-node for selecting/evaluating this trait reference.
+    fn dep_node(&self) -> DepNode<DefId> {
+        DepNode::TraitSelect(self.def_id())
+    }
+
+    pub fn input_types(&self) -> &[Ty<'tcx>] {
+        self.trait_ref.substs.types.as_slice()
+    }
+
+    pub fn self_ty(&self) -> Ty<'tcx> {
+        self.trait_ref.self_ty()
+    }
+}
+
+impl<'tcx> PolyTraitPredicate<'tcx> {
+    pub fn def_id(&self) -> DefId {
+        // ok to skip binder since trait def-id does not care about regions
+        self.0.def_id()
+    }
+
+    pub fn dep_node(&self) -> DepNode<DefId> {
+        // ok to skip binder since depnode does not care about regions
+        self.0.dep_node()
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub struct EquatePredicate<'tcx>(pub Ty<'tcx>, pub Ty<'tcx>); // `0 == 1`
+pub type PolyEquatePredicate<'tcx> = ty::Binder<EquatePredicate<'tcx>>;
+
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub struct OutlivesPredicate<A,B>(pub A, pub B); // `A : B`
+pub type PolyOutlivesPredicate<A,B> = ty::Binder<OutlivesPredicate<A,B>>;
+pub type PolyRegionOutlivesPredicate = PolyOutlivesPredicate<ty::Region, ty::Region>;
+pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate<Ty<'tcx>, ty::Region>;
+
+/// This kind of predicate has no *direct* correspondent in the
+/// syntax, but it roughly corresponds to the syntactic forms:
+///
+/// 1. `T : TraitRef<..., Item=Type>`
+/// 2. `<T as TraitRef<...>>::Item == Type` (NYI)
+///
+/// In particular, form #1 is "desugared" to the combination of a
+/// normal trait predicate (`T : TraitRef<...>`) and one of these
+/// predicates. Form #2 is a broader form in that it also permits
+/// equality between arbitrary types. Processing an instance of Form
+/// #2 eventually yields one of these `ProjectionPredicate`
+/// instances to normalize the LHS.
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct ProjectionPredicate<'tcx> {
+    pub projection_ty: ProjectionTy<'tcx>,
+    pub ty: Ty<'tcx>,
+}
+
+pub type PolyProjectionPredicate<'tcx> = Binder<ProjectionPredicate<'tcx>>;
+
+impl<'tcx> PolyProjectionPredicate<'tcx> {
+    pub fn item_name(&self) -> Name {
+        self.0.projection_ty.item_name // safe to skip the binder to access a name
+    }
+
+    pub fn sort_key(&self) -> (DefId, Name) {
+        self.0.projection_ty.sort_key()
+    }
+}
+
+pub trait ToPolyTraitRef<'tcx> {
+    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx>;
+}
+
+impl<'tcx> ToPolyTraitRef<'tcx> for TraitRef<'tcx> {
+    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
+        assert!(!self.has_escaping_regions());
+        ty::Binder(self.clone())
+    }
+}
+
+impl<'tcx> ToPolyTraitRef<'tcx> for PolyTraitPredicate<'tcx> {
+    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
+        self.map_bound_ref(|trait_pred| trait_pred.trait_ref)
+    }
+}
+
+impl<'tcx> ToPolyTraitRef<'tcx> for PolyProjectionPredicate<'tcx> {
+    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
+        // Note: unlike with TraitRef::to_poly_trait_ref(),
+        // self.0.trait_ref is permitted to have escaping regions.
+        // This is because here `self` has a `Binder` and so does our
+        // return value, so we are preserving the number of binding
+        // levels.
+        ty::Binder(self.0.projection_ty.trait_ref)
+    }
+}
+
+pub trait ToPredicate<'tcx> {
+    fn to_predicate(&self) -> Predicate<'tcx>;
+}
+
+impl<'tcx> ToPredicate<'tcx> for TraitRef<'tcx> {
+    fn to_predicate(&self) -> Predicate<'tcx> {
+        // we're about to add a binder, so let's check that we don't
+        // accidentally capture anything, or else that might be some
+        // weird debruijn accounting.
+        assert!(!self.has_escaping_regions());
+
+        ty::Predicate::Trait(ty::Binder(ty::TraitPredicate {
+            trait_ref: self.clone()
+        }))
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyTraitRef<'tcx> {
+    fn to_predicate(&self) -> Predicate<'tcx> {
+        ty::Predicate::Trait(self.to_poly_trait_predicate())
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyEquatePredicate<'tcx> {
+    fn to_predicate(&self) -> Predicate<'tcx> {
+        Predicate::Equate(self.clone())
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate {
+    fn to_predicate(&self) -> Predicate<'tcx> {
+        Predicate::RegionOutlives(self.clone())
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
+    fn to_predicate(&self) -> Predicate<'tcx> {
+        Predicate::TypeOutlives(self.clone())
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
+    fn to_predicate(&self) -> Predicate<'tcx> {
+        Predicate::Projection(self.clone())
+    }
+}
+
+impl<'tcx> Predicate<'tcx> {
+    /// Iterates over the types in this predicate. Note that in all
+    /// cases this is skipping over a binder, so late-bound regions
+    /// with depth 0 are bound by the predicate.
+    pub fn walk_tys(&self) -> IntoIter<Ty<'tcx>> {
+        let vec: Vec<_> = match *self {
+            ty::Predicate::Trait(ref data) => {
+                data.0.trait_ref.substs.types.as_slice().to_vec()
+            }
+            ty::Predicate::Equate(ty::Binder(ref data)) => {
+                vec![data.0, data.1]
+            }
+            ty::Predicate::TypeOutlives(ty::Binder(ref data)) => {
+                vec![data.0]
+            }
+            ty::Predicate::RegionOutlives(..) => {
+                vec![]
+            }
+            ty::Predicate::Projection(ref data) => {
+                let trait_inputs = data.0.projection_ty.trait_ref.substs.types.as_slice();
+                trait_inputs.iter()
+                            .cloned()
+                            .chain(Some(data.0.ty))
+                            .collect()
+            }
+            ty::Predicate::WellFormed(data) => {
+                vec![data]
+            }
+            ty::Predicate::ObjectSafe(_trait_def_id) => {
+                vec![]
+            }
+        };
+
+        // The only reason to collect into a vector here is that I was
+        // too lazy to make the full (somewhat complicated) iterator
+        // type that would be needed here. But I wanted this fn to
+        // return an iterator conceptually, rather than a `Vec`, so as
+        // to be closer to `Ty::walk`.
+        vec.into_iter()
+    }
+
+    pub fn to_opt_poly_trait_ref(&self) -> Option<PolyTraitRef<'tcx>> {
+        match *self {
+            Predicate::Trait(ref t) => {
+                Some(t.to_poly_trait_ref())
+            }
+            Predicate::Projection(..) |
+            Predicate::Equate(..) |
+            Predicate::RegionOutlives(..) |
+            Predicate::WellFormed(..) |
+            Predicate::ObjectSafe(..) |
+            Predicate::TypeOutlives(..) => {
+                None
+            }
+        }
+    }
+}
+
+/// Represents the bounds declared on a particular set of type
+/// parameters.  Should eventually be generalized into a flag list of
+/// where clauses.  You can obtain a `InstantiatedPredicates` list from a
+/// `GenericPredicates` by using the `instantiate` method. Note that this method
+/// reflects an important semantic invariant of `InstantiatedPredicates`: while
+/// the `GenericPredicates` are expressed in terms of the bound type
+/// parameters of the impl/trait/whatever, an `InstantiatedPredicates` instance
+/// represented a set of bounds for some particular instantiation,
+/// meaning that the generic parameters have been substituted with
+/// their values.
+///
+/// Example:
+///
+///     struct Foo<T,U:Bar<T>> { ... }
+///
+/// Here, the `GenericPredicates` for `Foo` would contain a list of bounds like
+/// `[[], [U:Bar<T>]]`.  Now if there were some particular reference
+/// like `Foo<isize,usize>`, then the `InstantiatedPredicates` would be `[[],
+/// [usize:Bar<isize>]]`.
+#[derive(Clone)]
+pub struct InstantiatedPredicates<'tcx> {
+    pub predicates: VecPerParamSpace<Predicate<'tcx>>,
+}
+
+impl<'tcx> InstantiatedPredicates<'tcx> {
+    pub fn empty() -> InstantiatedPredicates<'tcx> {
+        InstantiatedPredicates { predicates: VecPerParamSpace::empty() }
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.predicates.is_empty()
+    }
+}
+
+impl<'tcx> TraitRef<'tcx> {
+    pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> TraitRef<'tcx> {
+        TraitRef { def_id: def_id, substs: substs }
+    }
+
+    pub fn self_ty(&self) -> Ty<'tcx> {
+        self.substs.self_ty().unwrap()
+    }
+
+    pub fn input_types(&self) -> &[Ty<'tcx>] {
+        // Select only the "input types" from a trait-reference. For
+        // now this is all the types that appear in the
+        // trait-reference, but it should eventually exclude
+        // associated types.
+        self.substs.types.as_slice()
+    }
+}
+
+/// When type checking, we use the `ParameterEnvironment` to track
+/// details about the type/lifetime parameters that are in scope.
+/// It primarily stores the bounds information.
+///
+/// Note: This information might seem to be redundant with the data in
+/// `tcx.ty_param_defs`, but it is not. That table contains the
+/// parameter definitions from an "outside" perspective, but this
+/// struct will contain the bounds for a parameter as seen from inside
+/// the function body. Currently the only real distinction is that
+/// bound lifetime parameters are replaced with free ones, but in the
+/// future I hope to refine the representation of types so as to make
+/// more distinctions clearer.
+#[derive(Clone)]
+pub struct ParameterEnvironment<'a, 'tcx:'a> {
+    pub tcx: &'a TyCtxt<'tcx>,
+
+    /// See `construct_free_substs` for details.
+    pub free_substs: Substs<'tcx>,
+
+    /// Each type parameter has an implicit region bound that
+    /// indicates it must outlive at least the function body (the user
+    /// may specify stronger requirements). This field indicates the
+    /// region of the callee.
+    pub implicit_region_bound: ty::Region,
+
+    /// Obligations that the caller must satisfy. This is basically
+    /// the set of bounds on the in-scope type parameters, translated
+    /// into Obligations, and elaborated and normalized.
+    pub caller_bounds: Vec<ty::Predicate<'tcx>>,
+
+    /// Caches the results of trait selection. This cache is used
+    /// for things that have to do with the parameters in scope.
+    pub selection_cache: traits::SelectionCache<'tcx>,
+
+    /// Caches the results of trait evaluation.
+    pub evaluation_cache: traits::EvaluationCache<'tcx>,
+
+    /// Scope that is attached to free regions for this scope. This
+    /// is usually the id of the fn body, but for more abstract scopes
+    /// like structs we often use the node-id of the struct.
+    ///
+    /// FIXME(#3696). It would be nice to refactor so that free
+    /// regions don't have this implicit scope and instead introduce
+    /// relationships in the environment.
+    pub free_id_outlive: CodeExtent,
+}
+
+impl<'a, 'tcx> ParameterEnvironment<'a, 'tcx> {
+    pub fn with_caller_bounds(&self,
+                              caller_bounds: Vec<ty::Predicate<'tcx>>)
+                              -> ParameterEnvironment<'a,'tcx>
+    {
+        ParameterEnvironment {
+            tcx: self.tcx,
+            free_substs: self.free_substs.clone(),
+            implicit_region_bound: self.implicit_region_bound,
+            caller_bounds: caller_bounds,
+            selection_cache: traits::SelectionCache::new(),
+            evaluation_cache: traits::EvaluationCache::new(),
+            free_id_outlive: self.free_id_outlive,
+        }
+    }
+
+    /// Construct a parameter environment given an item, impl item, or trait item
+    pub fn for_item(cx: &'a TyCtxt<'tcx>, id: NodeId) -> ParameterEnvironment<'a, 'tcx> {
+        match cx.map.find(id) {
+            Some(ast_map::NodeImplItem(ref impl_item)) => {
+                match impl_item.node {
+                    hir::ImplItemKind::Type(_) => {
+                        // associated types don't have their own entry (for some reason),
+                        // so for now just grab environment for the impl
+                        let impl_id = cx.map.get_parent(id);
+                        let impl_def_id = cx.map.local_def_id(impl_id);
+                        let scheme = cx.lookup_item_type(impl_def_id);
+                        let predicates = cx.lookup_predicates(impl_def_id);
+                        cx.construct_parameter_environment(impl_item.span,
+                                                           &scheme.generics,
+                                                           &predicates,
+                                                           cx.region_maps.item_extent(id))
+                    }
+                    hir::ImplItemKind::Const(_, _) => {
+                        let def_id = cx.map.local_def_id(id);
+                        let scheme = cx.lookup_item_type(def_id);
+                        let predicates = cx.lookup_predicates(def_id);
+                        cx.construct_parameter_environment(impl_item.span,
+                                                           &scheme.generics,
+                                                           &predicates,
+                                                           cx.region_maps.item_extent(id))
+                    }
+                    hir::ImplItemKind::Method(_, ref body) => {
+                        let method_def_id = cx.map.local_def_id(id);
+                        match cx.impl_or_trait_item(method_def_id) {
+                            MethodTraitItem(ref method_ty) => {
+                                let method_generics = &method_ty.generics;
+                                let method_bounds = &method_ty.predicates;
+                                cx.construct_parameter_environment(
+                                    impl_item.span,
+                                    method_generics,
+                                    method_bounds,
+                                    cx.region_maps.call_site_extent(id, body.id))
+                            }
+                            _ => {
+                                bug!("ParameterEnvironment::for_item(): \
+                                      got non-method item from impl method?!")
+                            }
+                        }
+                    }
+                }
+            }
+            Some(ast_map::NodeTraitItem(trait_item)) => {
+                match trait_item.node {
+                    hir::TypeTraitItem(..) => {
+                        // associated types don't have their own entry (for some reason),
+                        // so for now just grab environment for the trait
+                        let trait_id = cx.map.get_parent(id);
+                        let trait_def_id = cx.map.local_def_id(trait_id);
+                        let trait_def = cx.lookup_trait_def(trait_def_id);
+                        let predicates = cx.lookup_predicates(trait_def_id);
+                        cx.construct_parameter_environment(trait_item.span,
+                                                           &trait_def.generics,
+                                                           &predicates,
+                                                           cx.region_maps.item_extent(id))
+                    }
+                    hir::ConstTraitItem(..) => {
+                        let def_id = cx.map.local_def_id(id);
+                        let scheme = cx.lookup_item_type(def_id);
+                        let predicates = cx.lookup_predicates(def_id);
+                        cx.construct_parameter_environment(trait_item.span,
+                                                           &scheme.generics,
+                                                           &predicates,
+                                                           cx.region_maps.item_extent(id))
+                    }
+                    hir::MethodTraitItem(_, ref body) => {
+                        // Use call-site for extent (unless this is a
+                        // trait method with no default; then fallback
+                        // to the method id).
+                        let method_def_id = cx.map.local_def_id(id);
+                        match cx.impl_or_trait_item(method_def_id) {
+                            MethodTraitItem(ref method_ty) => {
+                                let method_generics = &method_ty.generics;
+                                let method_bounds = &method_ty.predicates;
+                                let extent = if let Some(ref body) = *body {
+                                    // default impl: use call_site extent as free_id_outlive bound.
+                                    cx.region_maps.call_site_extent(id, body.id)
+                                } else {
+                                    // no default impl: use item extent as free_id_outlive bound.
+                                    cx.region_maps.item_extent(id)
+                                };
+                                cx.construct_parameter_environment(
+                                    trait_item.span,
+                                    method_generics,
+                                    method_bounds,
+                                    extent)
+                            }
+                            _ => {
+                                bug!("ParameterEnvironment::for_item(): \
+                                      got non-method item from provided \
+                                      method?!")
+                            }
+                        }
+                    }
+                }
+            }
+            Some(ast_map::NodeItem(item)) => {
+                match item.node {
+                    hir::ItemFn(_, _, _, _, _, ref body) => {
+                        // We assume this is a function.
+                        let fn_def_id = cx.map.local_def_id(id);
+                        let fn_scheme = cx.lookup_item_type(fn_def_id);
+                        let fn_predicates = cx.lookup_predicates(fn_def_id);
+
+                        cx.construct_parameter_environment(item.span,
+                                                           &fn_scheme.generics,
+                                                           &fn_predicates,
+                                                           cx.region_maps.call_site_extent(id,
+                                                                                           body.id))
+                    }
+                    hir::ItemEnum(..) |
+                    hir::ItemStruct(..) |
+                    hir::ItemImpl(..) |
+                    hir::ItemConst(..) |
+                    hir::ItemStatic(..) => {
+                        let def_id = cx.map.local_def_id(id);
+                        let scheme = cx.lookup_item_type(def_id);
+                        let predicates = cx.lookup_predicates(def_id);
+                        cx.construct_parameter_environment(item.span,
+                                                           &scheme.generics,
+                                                           &predicates,
+                                                           cx.region_maps.item_extent(id))
+                    }
+                    hir::ItemTrait(..) => {
+                        let def_id = cx.map.local_def_id(id);
+                        let trait_def = cx.lookup_trait_def(def_id);
+                        let predicates = cx.lookup_predicates(def_id);
+                        cx.construct_parameter_environment(item.span,
+                                                           &trait_def.generics,
+                                                           &predicates,
+                                                           cx.region_maps.item_extent(id))
+                    }
+                    _ => {
+                        span_bug!(item.span,
+                                  "ParameterEnvironment::for_item():
+                                   can't create a parameter \
+                                   environment for this kind of item")
+                    }
+                }
+            }
+            Some(ast_map::NodeExpr(..)) => {
+                // This is a convenience to allow closures to work.
+                ParameterEnvironment::for_item(cx, cx.map.get_parent(id))
+            }
+            _ => {
+                bug!("ParameterEnvironment::from_item(): \
+                      `{}` is not an item",
+                     cx.map.node_to_string(id))
+            }
+        }
+    }
+}
+
+/// A "type scheme", in ML terminology, is a type combined with some
+/// set of generic types that the type is, well, generic over. In Rust
+/// terms, it is the "type" of a fn item or struct -- this type will
+/// include various generic parameters that must be substituted when
+/// the item/struct is referenced. That is called converting the type
+/// scheme to a monotype.
+///
+/// - `generics`: the set of type parameters and their bounds
+/// - `ty`: the base types, which may reference the parameters defined
+///   in `generics`
+///
+/// Note that TypeSchemes are also sometimes called "polytypes" (and
+/// in fact this struct used to carry that name, so you may find some
+/// stray references in a comment or something). We try to reserve the
+/// "poly" prefix to refer to higher-ranked things, as in
+/// `PolyTraitRef`.
+///
+/// Note that each item also comes with predicates, see
+/// `lookup_predicates`.
+#[derive(Clone, Debug)]
+pub struct TypeScheme<'tcx> {
+    pub generics: Generics<'tcx>,
+    pub ty: Ty<'tcx>,
+}
+
+bitflags! {
+    flags AdtFlags: u32 {
+        const NO_ADT_FLAGS        = 0,
+        const IS_ENUM             = 1 << 0,
+        const IS_DTORCK           = 1 << 1, // is this a dtorck type?
+        const IS_DTORCK_VALID     = 1 << 2,
+        const IS_PHANTOM_DATA     = 1 << 3,
+        const IS_SIMD             = 1 << 4,
+        const IS_FUNDAMENTAL      = 1 << 5,
+        const IS_NO_DROP_FLAG     = 1 << 6,
+    }
+}
+
+pub type AdtDef<'tcx> = &'tcx AdtDefData<'tcx, 'static>;
+pub type VariantDef<'tcx> = &'tcx VariantDefData<'tcx, 'static>;
+pub type FieldDef<'tcx> = &'tcx FieldDefData<'tcx, 'static>;
+
+// See comment on AdtDefData for explanation
+pub type AdtDefMaster<'tcx> = &'tcx AdtDefData<'tcx, 'tcx>;
+pub type VariantDefMaster<'tcx> = &'tcx VariantDefData<'tcx, 'tcx>;
+pub type FieldDefMaster<'tcx> = &'tcx FieldDefData<'tcx, 'tcx>;
+
+pub struct VariantDefData<'tcx, 'container: 'tcx> {
+    /// The variant's DefId. If this is a tuple-like struct,
+    /// this is the DefId of the struct's ctor.
+    pub did: DefId,
+    pub name: Name, // struct's name if this is a struct
+    pub disr_val: Disr,
+    pub fields: Vec<FieldDefData<'tcx, 'container>>,
+    pub kind: VariantKind,
+}
+
+pub struct FieldDefData<'tcx, 'container: 'tcx> {
+    /// The field's DefId. NOTE: the fields of tuple-like enum variants
+    /// are not real items, and don't have entries in tcache etc.
+    pub did: DefId,
+    pub name: Name,
+    pub vis: Visibility,
+    /// TyIVar is used here to allow for variance (see the doc at
+    /// AdtDefData).
+    ///
+    /// Note: direct accesses to `ty` must also add dep edges.
+    ty: ivar::TyIVar<'tcx, 'container>
+}
+
+/// The definition of an abstract data type - a struct or enum.
+///
+/// These are all interned (by intern_adt_def) into the adt_defs
+/// table.
+///
+/// Because of the possibility of nested tcx-s, this type
+/// needs 2 lifetimes: the traditional variant lifetime ('tcx)
+/// bounding the lifetime of the inner types is of course necessary.
+/// However, it is not sufficient - types from a child tcx must
+/// not be leaked into the master tcx by being stored in an AdtDefData.
+///
+/// The 'container lifetime ensures that by outliving the container
+/// tcx and preventing shorter-lived types from being inserted. When
+/// write access is not needed, the 'container lifetime can be
+/// erased to 'static, which can be done by the AdtDef wrapper.
+pub struct AdtDefData<'tcx, 'container: 'tcx> {
+    pub did: DefId,
+    pub variants: Vec<VariantDefData<'tcx, 'container>>,
+    destructor: Cell<Option<DefId>>,
+    flags: Cell<AdtFlags>,
+}
+
+impl<'tcx, 'container> PartialEq for AdtDefData<'tcx, 'container> {
+    // AdtDefData are always interned and this is part of TyS equality
+    #[inline]
+    fn eq(&self, other: &Self) -> bool { self as *const _ == other as *const _ }
+}
+
+impl<'tcx, 'container> Eq for AdtDefData<'tcx, 'container> {}
+
+impl<'tcx, 'container> Hash for AdtDefData<'tcx, 'container> {
+    #[inline]
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        (self as *const AdtDefData).hash(s)
+    }
+}
+
+impl<'tcx> Encodable for AdtDef<'tcx> {
+    fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
+        self.did.encode(s)
+    }
+}
+
+impl<'tcx> Decodable for AdtDef<'tcx> {
+    fn decode<D: Decoder>(d: &mut D) -> Result<AdtDef<'tcx>, D::Error> {
+        let def_id: DefId = Decodable::decode(d)?;
+
+        cstore::tls::with_decoding_context(d, |dcx, _| {
+            let def_id = dcx.translate_def_id(def_id);
+            Ok(dcx.tcx().lookup_adt_def(def_id))
+        })
+    }
+}
+
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum AdtKind { Struct, Enum }
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
+pub enum VariantKind { Struct, Tuple, Unit }
+
+impl VariantKind {
+    pub fn from_variant_data(vdata: &hir::VariantData) -> Self {
+        match *vdata {
+            hir::VariantData::Struct(..) => VariantKind::Struct,
+            hir::VariantData::Tuple(..) => VariantKind::Tuple,
+            hir::VariantData::Unit(..) => VariantKind::Unit,
+        }
+    }
+}
+
+impl<'tcx, 'container> AdtDefData<'tcx, 'container> {
+    fn new(tcx: &TyCtxt<'tcx>,
+           did: DefId,
+           kind: AdtKind,
+           variants: Vec<VariantDefData<'tcx, 'container>>) -> Self {
+        let mut flags = AdtFlags::NO_ADT_FLAGS;
+        let attrs = tcx.get_attrs(did);
+        if attr::contains_name(&attrs, "fundamental") {
+            flags = flags | AdtFlags::IS_FUNDAMENTAL;
+        }
+        if attr::contains_name(&attrs, "unsafe_no_drop_flag") {
+            flags = flags | AdtFlags::IS_NO_DROP_FLAG;
+        }
+        if tcx.lookup_simd(did) {
+            flags = flags | AdtFlags::IS_SIMD;
+        }
+        if Some(did) == tcx.lang_items.phantom_data() {
+            flags = flags | AdtFlags::IS_PHANTOM_DATA;
+        }
+        if let AdtKind::Enum = kind {
+            flags = flags | AdtFlags::IS_ENUM;
+        }
+        AdtDefData {
+            did: did,
+            variants: variants,
+            flags: Cell::new(flags),
+            destructor: Cell::new(None)
+        }
+    }
+
+    fn calculate_dtorck(&'tcx self, tcx: &TyCtxt<'tcx>) {
+        if tcx.is_adt_dtorck(self) {
+            self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK);
+        }
+        self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK_VALID)
+    }
+
+    /// Returns the kind of the ADT - Struct or Enum.
+    #[inline]
+    pub fn adt_kind(&self) -> AdtKind {
+        if self.flags.get().intersects(AdtFlags::IS_ENUM) {
+            AdtKind::Enum
+        } else {
+            AdtKind::Struct
+        }
+    }
+
+    /// Returns whether this is a dtorck type. If this returns
+    /// true, this type being safe for destruction requires it to be
+    /// alive; Otherwise, only the contents are required to be.
+    #[inline]
+    pub fn is_dtorck(&'tcx self, tcx: &TyCtxt<'tcx>) -> bool {
+        if !self.flags.get().intersects(AdtFlags::IS_DTORCK_VALID) {
+            self.calculate_dtorck(tcx)
+        }
+        self.flags.get().intersects(AdtFlags::IS_DTORCK)
+    }
+
+    /// Returns whether this type is #[fundamental] for the purposes
+    /// of coherence checking.
+    #[inline]
+    pub fn is_fundamental(&self) -> bool {
+        self.flags.get().intersects(AdtFlags::IS_FUNDAMENTAL)
+    }
+
+    #[inline]
+    pub fn is_simd(&self) -> bool {
+        self.flags.get().intersects(AdtFlags::IS_SIMD)
+    }
+
+    /// Returns true if this is PhantomData<T>.
+    #[inline]
+    pub fn is_phantom_data(&self) -> bool {
+        self.flags.get().intersects(AdtFlags::IS_PHANTOM_DATA)
+    }
+
+    /// Returns whether this type has a destructor.
+    pub fn has_dtor(&self) -> bool {
+        match self.dtor_kind() {
+            NoDtor => false,
+            TraitDtor(..) => true
+        }
+    }
+
+    /// Asserts this is a struct and returns the struct's unique
+    /// variant.
+    pub fn struct_variant(&self) -> &VariantDefData<'tcx, 'container> {
+        assert_eq!(self.adt_kind(), AdtKind::Struct);
+        &self.variants[0]
+    }
+
+    #[inline]
+    pub fn type_scheme(&self, tcx: &TyCtxt<'tcx>) -> TypeScheme<'tcx> {
+        tcx.lookup_item_type(self.did)
+    }
+
+    #[inline]
+    pub fn predicates(&self, tcx: &TyCtxt<'tcx>) -> GenericPredicates<'tcx> {
+        tcx.lookup_predicates(self.did)
+    }
+
+    /// Returns an iterator over all fields contained
+    /// by this ADT.
+    #[inline]
+    pub fn all_fields(&self) ->
+            iter::FlatMap<
+                slice::Iter<VariantDefData<'tcx, 'container>>,
+                slice::Iter<FieldDefData<'tcx, 'container>>,
+                for<'s> fn(&'s VariantDefData<'tcx, 'container>)
+                    -> slice::Iter<'s, FieldDefData<'tcx, 'container>>
+            > {
+        self.variants.iter().flat_map(VariantDefData::fields_iter)
+    }
+
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.variants.is_empty()
+    }
+
+    #[inline]
+    pub fn is_univariant(&self) -> bool {
+        self.variants.len() == 1
+    }
+
+    pub fn is_payloadfree(&self) -> bool {
+        !self.variants.is_empty() &&
+            self.variants.iter().all(|v| v.fields.is_empty())
+    }
+
+    pub fn variant_with_id(&self, vid: DefId) -> &VariantDefData<'tcx, 'container> {
+        self.variants
+            .iter()
+            .find(|v| v.did == vid)
+            .expect("variant_with_id: unknown variant")
+    }
+
+    pub fn variant_index_with_id(&self, vid: DefId) -> usize {
+        self.variants
+            .iter()
+            .position(|v| v.did == vid)
+            .expect("variant_index_with_id: unknown variant")
+    }
+
+    pub fn variant_of_def(&self, def: Def) -> &VariantDefData<'tcx, 'container> {
+        match def {
+            Def::Variant(_, vid) => self.variant_with_id(vid),
+            Def::Struct(..) | Def::TyAlias(..) => self.struct_variant(),
+            _ => bug!("unexpected def {:?} in variant_of_def", def)
+        }
+    }
+
+    pub fn destructor(&self) -> Option<DefId> {
+        self.destructor.get()
+    }
+
+    pub fn set_destructor(&self, dtor: DefId) {
+        self.destructor.set(Some(dtor));
+    }
+
+    pub fn dtor_kind(&self) -> DtorKind {
+        match self.destructor.get() {
+            Some(_) => {
+                TraitDtor(!self.flags.get().intersects(AdtFlags::IS_NO_DROP_FLAG))
+            }
+            None => NoDtor,
+        }
+    }
+}
+
+impl<'tcx, 'container> VariantDefData<'tcx, 'container> {
+    #[inline]
+    fn fields_iter(&self) -> slice::Iter<FieldDefData<'tcx, 'container>> {
+        self.fields.iter()
+    }
+
+    pub fn kind(&self) -> VariantKind {
+        self.kind
+    }
+
+    pub fn is_tuple_struct(&self) -> bool {
+        self.kind() == VariantKind::Tuple
+    }
+
+    #[inline]
+    pub fn find_field_named(&self,
+                            name: ast::Name)
+                            -> Option<&FieldDefData<'tcx, 'container>> {
+        self.fields.iter().find(|f| f.name == name)
+    }
+
+    #[inline]
+    pub fn index_of_field_named(&self,
+                                name: ast::Name)
+                                -> Option<usize> {
+        self.fields.iter().position(|f| f.name == name)
+    }
+
+    #[inline]
+    pub fn field_named(&self, name: ast::Name) -> &FieldDefData<'tcx, 'container> {
+        self.find_field_named(name).unwrap()
+    }
+}
+
+impl<'tcx, 'container> FieldDefData<'tcx, 'container> {
+    pub fn new(did: DefId,
+               name: Name,
+               vis: Visibility) -> Self {
+        FieldDefData {
+            did: did,
+            name: name,
+            vis: vis,
+            ty: ivar::TyIVar::new()
+        }
+    }
+
+    pub fn ty(&self, tcx: &TyCtxt<'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> {
+        self.unsubst_ty().subst(tcx, subst)
+    }
+
+    pub fn unsubst_ty(&self) -> Ty<'tcx> {
+        self.ty.unwrap(DepNode::FieldTy(self.did))
+    }
+
+    pub fn fulfill_ty(&self, ty: Ty<'container>) {
+        self.ty.fulfill(DepNode::FieldTy(self.did), ty);
+    }
+}
+
+/// Records the substitutions used to translate the polytype for an
+/// item into the monotype of an item reference.
+#[derive(Clone)]
+pub struct ItemSubsts<'tcx> {
+    pub substs: Substs<'tcx>,
+}
+
+#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Debug, RustcEncodable, RustcDecodable)]
+pub enum ClosureKind {
+    // Warning: Ordering is significant here! The ordering is chosen
+    // because the trait Fn is a subtrait of FnMut and so in turn, and
+    // hence we order it so that Fn < FnMut < FnOnce.
+    Fn,
+    FnMut,
+    FnOnce,
+}
+
+impl ClosureKind {
+    pub fn trait_did(&self, cx: &TyCtxt) -> DefId {
+        let result = match *self {
+            ClosureKind::Fn => cx.lang_items.require(FnTraitLangItem),
+            ClosureKind::FnMut => {
+                cx.lang_items.require(FnMutTraitLangItem)
+            }
+            ClosureKind::FnOnce => {
+                cx.lang_items.require(FnOnceTraitLangItem)
+            }
+        };
+        match result {
+            Ok(trait_did) => trait_did,
+            Err(err) => cx.sess.fatal(&err[..]),
+        }
+    }
+
+    /// True if this a type that impls this closure kind
+    /// must also implement `other`.
+    pub fn extends(self, other: ty::ClosureKind) -> bool {
+        match (self, other) {
+            (ClosureKind::Fn, ClosureKind::Fn) => true,
+            (ClosureKind::Fn, ClosureKind::FnMut) => true,
+            (ClosureKind::Fn, ClosureKind::FnOnce) => true,
+            (ClosureKind::FnMut, ClosureKind::FnMut) => true,
+            (ClosureKind::FnMut, ClosureKind::FnOnce) => true,
+            (ClosureKind::FnOnce, ClosureKind::FnOnce) => true,
+            _ => false,
+        }
+    }
+}
+
+impl<'tcx> TyS<'tcx> {
+    /// Iterator that walks `self` and any types reachable from
+    /// `self`, in depth-first order. Note that just walks the types
+    /// that appear in `self`, it does not descend into the fields of
+    /// structs or variants. For example:
+    ///
+    /// ```notrust
+    /// isize => { isize }
+    /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
+    /// [isize] => { [isize], isize }
+    /// ```
+    pub fn walk(&'tcx self) -> TypeWalker<'tcx> {
+        TypeWalker::new(self)
+    }
+
+    /// Iterator that walks the immediate children of `self`.  Hence
+    /// `Foo<Bar<i32>, u32>` yields the sequence `[Bar<i32>, u32]`
+    /// (but not `i32`, like `walk`).
+    pub fn walk_shallow(&'tcx self) -> IntoIter<Ty<'tcx>> {
+        walk::walk_shallow(self)
+    }
+
+    /// Walks `ty` and any types appearing within `ty`, invoking the
+    /// callback `f` on each type. If the callback returns false, then the
+    /// children of the current type are ignored.
+    ///
+    /// Note: prefer `ty.walk()` where possible.
+    pub fn maybe_walk<F>(&'tcx self, mut f: F)
+        where F : FnMut(Ty<'tcx>) -> bool
+    {
+        let mut walker = self.walk();
+        while let Some(ty) = walker.next() {
+            if !f(ty) {
+                walker.skip_current_subtree();
+            }
+        }
+    }
+}
+
+impl<'tcx> ItemSubsts<'tcx> {
+    pub fn empty() -> ItemSubsts<'tcx> {
+        ItemSubsts { substs: Substs::empty() }
+    }
+
+    pub fn is_noop(&self) -> bool {
+        self.substs.is_noop()
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum LvaluePreference {
+    PreferMutLvalue,
+    NoPreference
+}
+
+impl LvaluePreference {
+    pub fn from_mutbl(m: hir::Mutability) -> Self {
+        match m {
+            hir::MutMutable => PreferMutLvalue,
+            hir::MutImmutable => NoPreference,
+        }
+    }
+}
+
+/// Helper for looking things up in the various maps that are populated during
+/// typeck::collect (e.g., `cx.impl_or_trait_items`, `cx.tcache`, etc).  All of
+/// these share the pattern that if the id is local, it should have been loaded
+/// into the map by the `typeck::collect` phase.  If the def-id is external,
+/// then we have to go consult the crate loading code (and cache the result for
+/// the future).
+fn lookup_locally_or_in_crate_store<M, F>(descr: &str,
+                                          def_id: DefId,
+                                          map: &M,
+                                          load_external: F)
+                                          -> M::Value where
+    M: MemoizationMap<Key=DefId>,
+    F: FnOnce() -> M::Value,
+{
+    map.memoize(def_id, || {
+        if def_id.is_local() {
+            bug!("No def'n found for {:?} in tcx.{}", def_id, descr);
+        }
+        load_external()
+    })
+}
+
+impl BorrowKind {
+    pub fn from_mutbl(m: hir::Mutability) -> BorrowKind {
+        match m {
+            hir::MutMutable => MutBorrow,
+            hir::MutImmutable => ImmBorrow,
+        }
+    }
+
+    /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
+    /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
+    /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
+    /// question.
+    pub fn to_mutbl_lossy(self) -> hir::Mutability {
+        match self {
+            MutBorrow => hir::MutMutable,
+            ImmBorrow => hir::MutImmutable,
+
+            // We have no type corresponding to a unique imm borrow, so
+            // use `&mut`. It gives all the capabilities of an `&uniq`
+            // and hence is a safe "over approximation".
+            UniqueImmBorrow => hir::MutMutable,
+        }
+    }
+
+    pub fn to_user_str(&self) -> &'static str {
+        match *self {
+            MutBorrow => "mutable",
+            ImmBorrow => "immutable",
+            UniqueImmBorrow => "uniquely immutable",
+        }
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn node_id_to_type(&self, id: NodeId) -> Ty<'tcx> {
+        match self.node_id_to_type_opt(id) {
+           Some(ty) => ty,
+           None => bug!("node_id_to_type: no type for node `{}`",
+                        self.map.node_to_string(id))
+        }
+    }
+
+    pub fn node_id_to_type_opt(&self, id: NodeId) -> Option<Ty<'tcx>> {
+        self.tables.borrow().node_types.get(&id).cloned()
+    }
+
+    pub fn node_id_item_substs(&self, id: NodeId) -> ItemSubsts<'tcx> {
+        match self.tables.borrow().item_substs.get(&id) {
+            None => ItemSubsts::empty(),
+            Some(ts) => ts.clone(),
+        }
+    }
+
+    // Returns the type of a pattern as a monotype. Like @expr_ty, this function
+    // doesn't provide type parameter substitutions.
+    pub fn pat_ty(&self, pat: &hir::Pat) -> Ty<'tcx> {
+        self.node_id_to_type(pat.id)
+    }
+    pub fn pat_ty_opt(&self, pat: &hir::Pat) -> Option<Ty<'tcx>> {
+        self.node_id_to_type_opt(pat.id)
+    }
+
+    // Returns the type of an expression as a monotype.
+    //
+    // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression.  That is, in
+    // some cases, we insert `AutoAdjustment` annotations such as auto-deref or
+    // auto-ref.  The type returned by this function does not consider such
+    // adjustments.  See `expr_ty_adjusted()` instead.
+    //
+    // NB (2): This type doesn't provide type parameter substitutions; e.g. if you
+    // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
+    // instead of "fn(ty) -> T with T = isize".
+    pub fn expr_ty(&self, expr: &hir::Expr) -> Ty<'tcx> {
+        self.node_id_to_type(expr.id)
+    }
+
+    pub fn expr_ty_opt(&self, expr: &hir::Expr) -> Option<Ty<'tcx>> {
+        self.node_id_to_type_opt(expr.id)
+    }
+
+    /// Returns the type of `expr`, considering any `AutoAdjustment`
+    /// entry recorded for that expression.
+    ///
+    /// It would almost certainly be better to store the adjusted ty in with
+    /// the `AutoAdjustment`, but I opted not to do this because it would
+    /// require serializing and deserializing the type and, although that's not
+    /// hard to do, I just hate that code so much I didn't want to touch it
+    /// unless it was to fix it properly, which seemed a distraction from the
+    /// thread at hand! -nmatsakis
+    pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> Ty<'tcx> {
+        self.expr_ty(expr)
+            .adjust(self, expr.span, expr.id,
+                    self.tables.borrow().adjustments.get(&expr.id),
+                    |method_call| {
+            self.tables.borrow().method_map.get(&method_call).map(|method| method.ty)
+        })
+    }
+
+    pub fn expr_ty_adjusted_opt(&self, expr: &hir::Expr) -> Option<Ty<'tcx>> {
+        self.expr_ty_opt(expr).map(|t| t.adjust(self,
+                                                expr.span,
+                                                expr.id,
+                                                self.tables.borrow().adjustments.get(&expr.id),
+                                                |method_call| {
+            self.tables.borrow().method_map.get(&method_call).map(|method| method.ty)
+        }))
+    }
+
+    pub fn expr_span(&self, id: NodeId) -> Span {
+        match self.map.find(id) {
+            Some(ast_map::NodeExpr(e)) => {
+                e.span
+            }
+            Some(f) => {
+                bug!("Node id {} is not an expr: {:?}", id, f);
+            }
+            None => {
+                bug!("Node id {} is not present in the node map", id);
+            }
+        }
+    }
+
+    pub fn local_var_name_str(&self, id: NodeId) -> InternedString {
+        match self.map.find(id) {
+            Some(ast_map::NodeLocal(pat)) => {
+                match pat.node {
+                    PatKind::Ident(_, ref path1, _) => path1.node.name.as_str(),
+                    _ => {
+                        bug!("Variable id {} maps to {:?}, not local", id, pat);
+                    },
+                }
+            },
+            r => bug!("Variable id {} maps to {:?}, not local", id, r),
+        }
+    }
+
+    pub fn resolve_expr(&self, expr: &hir::Expr) -> Def {
+        match self.def_map.borrow().get(&expr.id) {
+            Some(def) => def.full_def(),
+            None => {
+                span_bug!(expr.span, "no def-map entry for expr {}", expr.id);
+            }
+        }
+    }
+
+    pub fn expr_is_lval(&self, expr: &hir::Expr) -> bool {
+         match expr.node {
+            hir::ExprPath(..) => {
+                // We can't use resolve_expr here, as this needs to run on broken
+                // programs. We don't need to through - associated items are all
+                // rvalues.
+                match self.def_map.borrow().get(&expr.id) {
+                    Some(&def::PathResolution {
+                        base_def: Def::Static(..), ..
+                    }) | Some(&def::PathResolution {
+                        base_def: Def::Upvar(..), ..
+                    }) | Some(&def::PathResolution {
+                        base_def: Def::Local(..), ..
+                    }) => {
+                        true
+                    }
+                    Some(&def::PathResolution { base_def: Def::Err, .. })=> true,
+                    Some(..) => false,
+                    None => span_bug!(expr.span, "no def for path {}", expr.id)
+                }
+            }
+
+            hir::ExprType(ref e, _) => {
+                self.expr_is_lval(e)
+            }
+
+            hir::ExprUnary(hir::UnDeref, _) |
+            hir::ExprField(..) |
+            hir::ExprTupField(..) |
+            hir::ExprIndex(..) => {
+                true
+            }
+
+            hir::ExprCall(..) |
+            hir::ExprMethodCall(..) |
+            hir::ExprStruct(..) |
+            hir::ExprTup(..) |
+            hir::ExprIf(..) |
+            hir::ExprMatch(..) |
+            hir::ExprClosure(..) |
+            hir::ExprBlock(..) |
+            hir::ExprRepeat(..) |
+            hir::ExprVec(..) |
+            hir::ExprBreak(..) |
+            hir::ExprAgain(..) |
+            hir::ExprRet(..) |
+            hir::ExprWhile(..) |
+            hir::ExprLoop(..) |
+            hir::ExprAssign(..) |
+            hir::ExprInlineAsm(..) |
+            hir::ExprAssignOp(..) |
+            hir::ExprLit(_) |
+            hir::ExprUnary(..) |
+            hir::ExprBox(..) |
+            hir::ExprAddrOf(..) |
+            hir::ExprBinary(..) |
+            hir::ExprCast(..) => {
+                false
+            }
+        }
+    }
+
+    pub fn provided_trait_methods(&self, id: DefId) -> Vec<Rc<Method<'tcx>>> {
+        if let Some(id) = self.map.as_local_node_id(id) {
+            if let ItemTrait(_, _, _, ref ms) = self.map.expect_item(id).node {
+                ms.iter().filter_map(|ti| {
+                    if let hir::MethodTraitItem(_, Some(_)) = ti.node {
+                        match self.impl_or_trait_item(self.map.local_def_id(ti.id)) {
+                            MethodTraitItem(m) => Some(m),
+                            _ => {
+                                bug!("provided_trait_methods(): \
+                                      non-method item found from \
+                                      looking up provided method?!")
+                            }
+                        }
+                    } else {
+                        None
+                    }
+                }).collect()
+            } else {
+                bug!("provided_trait_methods: `{:?}` is not a trait", id)
+            }
+        } else {
+            self.sess.cstore.provided_trait_methods(self, id)
+        }
+    }
+
+    pub fn associated_consts(&self, id: DefId) -> Vec<Rc<AssociatedConst<'tcx>>> {
+        if let Some(id) = self.map.as_local_node_id(id) {
+            match self.map.expect_item(id).node {
+                ItemTrait(_, _, _, ref tis) => {
+                    tis.iter().filter_map(|ti| {
+                        if let hir::ConstTraitItem(_, _) = ti.node {
+                            match self.impl_or_trait_item(self.map.local_def_id(ti.id)) {
+                                ConstTraitItem(ac) => Some(ac),
+                                _ => {
+                                    bug!("associated_consts(): \
+                                          non-const item found from \
+                                          looking up a constant?!")
+                                }
+                            }
+                        } else {
+                            None
+                        }
+                    }).collect()
+                }
+                ItemImpl(_, _, _, _, _, ref iis) => {
+                    iis.iter().filter_map(|ii| {
+                        if let hir::ImplItemKind::Const(_, _) = ii.node {
+                            match self.impl_or_trait_item(self.map.local_def_id(ii.id)) {
+                                ConstTraitItem(ac) => Some(ac),
+                                _ => {
+                                    bug!("associated_consts(): \
+                                          non-const item found from \
+                                          looking up a constant?!")
+                                }
+                            }
+                        } else {
+                            None
+                        }
+                    }).collect()
+                }
+                _ => {
+                    bug!("associated_consts: `{:?}` is not a trait or impl", id)
+                }
+            }
+        } else {
+            self.sess.cstore.associated_consts(self, id)
+        }
+    }
+
+    pub fn trait_impl_polarity(&self, id: DefId) -> Option<hir::ImplPolarity> {
+        if let Some(id) = self.map.as_local_node_id(id) {
+            match self.map.find(id) {
+                Some(ast_map::NodeItem(item)) => {
+                    match item.node {
+                        hir::ItemImpl(_, polarity, _, _, _, _) => Some(polarity),
+                        _ => None
+                    }
+                }
+                _ => None
+            }
+        } else {
+            self.sess.cstore.impl_polarity(id)
+        }
+    }
+
+    pub fn custom_coerce_unsized_kind(&self, did: DefId) -> adjustment::CustomCoerceUnsized {
+        self.custom_coerce_unsized_kinds.memoize(did, || {
+            let (kind, src) = if did.krate != LOCAL_CRATE {
+                (self.sess.cstore.custom_coerce_unsized_kind(did), "external")
+            } else {
+                (None, "local")
+            };
+
+            match kind {
+                Some(kind) => kind,
+                None => {
+                    bug!("custom_coerce_unsized_kind: \
+                          {} impl `{}` is missing its kind",
+                          src, self.item_path_str(did));
+                }
+            }
+        })
+    }
+
+    pub fn impl_or_trait_item(&self, id: DefId) -> ImplOrTraitItem<'tcx> {
+        lookup_locally_or_in_crate_store(
+            "impl_or_trait_items", id, &self.impl_or_trait_items,
+            || self.sess.cstore.impl_or_trait_item(self, id)
+                   .expect("missing ImplOrTraitItem in metadata"))
+    }
+
+    pub fn trait_item_def_ids(&self, id: DefId) -> Rc<Vec<ImplOrTraitItemId>> {
+        lookup_locally_or_in_crate_store(
+            "trait_item_def_ids", id, &self.trait_item_def_ids,
+            || Rc::new(self.sess.cstore.trait_item_def_ids(id)))
+    }
+
+    /// Returns the trait-ref corresponding to a given impl, or None if it is
+    /// an inherent impl.
+    pub fn impl_trait_ref(&self, id: DefId) -> Option<TraitRef<'tcx>> {
+        lookup_locally_or_in_crate_store(
+            "impl_trait_refs", id, &self.impl_trait_refs,
+            || self.sess.cstore.impl_trait_ref(self, id))
+    }
+
+    /// Returns whether this DefId refers to an impl
+    pub fn is_impl(&self, id: DefId) -> bool {
+        if let Some(id) = self.map.as_local_node_id(id) {
+            if let Some(ast_map::NodeItem(
+                &hir::Item { node: hir::ItemImpl(..), .. })) = self.map.find(id) {
+                true
+            } else {
+                false
+            }
+        } else {
+            self.sess.cstore.is_impl(id)
+        }
+    }
+
+    pub fn trait_ref_to_def_id(&self, tr: &hir::TraitRef) -> DefId {
+        self.def_map.borrow().get(&tr.ref_id).expect("no def-map entry for trait").def_id()
+    }
+
+    pub fn def_key(&self, id: DefId) -> ast_map::DefKey {
+        if id.is_local() {
+            self.map.def_key(id)
+        } else {
+            self.sess.cstore.def_key(id)
+        }
+    }
+
+    /// Returns the `DefPath` of an item. Note that if `id` is not
+    /// local to this crate -- or is inlined into this crate -- the
+    /// result will be a non-local `DefPath`.
+    pub fn def_path(&self, id: DefId) -> ast_map::DefPath {
+        if id.is_local() {
+            self.map.def_path(id)
+        } else {
+            self.sess.cstore.relative_def_path(id)
+        }
+    }
+
+    pub fn item_name(&self, id: DefId) -> ast::Name {
+        if let Some(id) = self.map.as_local_node_id(id) {
+            self.map.name(id)
+        } else {
+            self.sess.cstore.item_name(id)
+        }
+    }
+
+    // Register a given item type
+    pub fn register_item_type(&self, did: DefId, ty: TypeScheme<'tcx>) {
+        self.tcache.borrow_mut().insert(did, ty);
+    }
+
+    // If the given item is in an external crate, looks up its type and adds it to
+    // the type cache. Returns the type parameters and type.
+    pub fn lookup_item_type(&self, did: DefId) -> TypeScheme<'tcx> {
+        lookup_locally_or_in_crate_store(
+            "tcache", did, &self.tcache,
+            || self.sess.cstore.item_type(self, did))
+    }
+
+    /// Given the did of a trait, returns its canonical trait ref.
+    pub fn lookup_trait_def(&self, did: DefId) -> &'tcx TraitDef<'tcx> {
+        lookup_locally_or_in_crate_store(
+            "trait_defs", did, &self.trait_defs,
+            || self.alloc_trait_def(self.sess.cstore.trait_def(self, did))
+        )
+    }
+
+    /// Given the did of an ADT, return a master reference to its
+    /// definition. Unless you are planning on fulfilling the ADT's fields,
+    /// use lookup_adt_def instead.
+    pub fn lookup_adt_def_master(&self, did: DefId) -> AdtDefMaster<'tcx> {
+        lookup_locally_or_in_crate_store(
+            "adt_defs", did, &self.adt_defs,
+            || self.sess.cstore.adt_def(self, did)
+        )
+    }
+
+    /// Given the did of an ADT, return a reference to its definition.
+    pub fn lookup_adt_def(&self, did: DefId) -> AdtDef<'tcx> {
+        // when reverse-variance goes away, a transmute::<AdtDefMaster,AdtDef>
+        // would be needed here.
+        self.lookup_adt_def_master(did)
+    }
+
+    /// Given the did of an item, returns its full set of predicates.
+    pub fn lookup_predicates(&self, did: DefId) -> GenericPredicates<'tcx> {
+        lookup_locally_or_in_crate_store(
+            "predicates", did, &self.predicates,
+            || self.sess.cstore.item_predicates(self, did))
+    }
+
+    /// Given the did of a trait, returns its superpredicates.
+    pub fn lookup_super_predicates(&self, did: DefId) -> GenericPredicates<'tcx> {
+        lookup_locally_or_in_crate_store(
+            "super_predicates", did, &self.super_predicates,
+            || self.sess.cstore.item_super_predicates(self, did))
+    }
+
+    /// If `type_needs_drop` returns true, then `ty` is definitely
+    /// non-copy and *might* have a destructor attached; if it returns
+    /// false, then `ty` definitely has no destructor (i.e. no drop glue).
+    ///
+    /// (Note that this implies that if `ty` has a destructor attached,
+    /// then `type_needs_drop` will definitely return `true` for `ty`.)
+    pub fn type_needs_drop_given_env<'a>(&self,
+                                         ty: Ty<'tcx>,
+                                         param_env: &ty::ParameterEnvironment<'a,'tcx>) -> bool {
+        // Issue #22536: We first query type_moves_by_default.  It sees a
+        // normalized version of the type, and therefore will definitely
+        // know whether the type implements Copy (and thus needs no
+        // cleanup/drop/zeroing) ...
+        let implements_copy = !ty.moves_by_default(param_env, DUMMY_SP);
+
+        if implements_copy { return false; }
+
+        // ... (issue #22536 continued) but as an optimization, still use
+        // prior logic of asking if the `needs_drop` bit is set; we need
+        // not zero non-Copy types if they have no destructor.
+
+        // FIXME(#22815): Note that calling `ty::type_contents` is a
+        // conservative heuristic; it may report that `needs_drop` is set
+        // when actual type does not actually have a destructor associated
+        // with it. But since `ty` absolutely did not have the `Copy`
+        // bound attached (see above), it is sound to treat it as having a
+        // destructor (e.g. zero its memory on move).
+
+        let contents = ty.type_contents(self);
+        debug!("type_needs_drop ty={:?} contents={:?}", ty, contents);
+        contents.needs_drop(self)
+    }
+
+    /// Get the attributes of a definition.
+    pub fn get_attrs(&self, did: DefId) -> Cow<'tcx, [ast::Attribute]> {
+        if let Some(id) = self.map.as_local_node_id(did) {
+            Cow::Borrowed(self.map.attrs(id))
+        } else {
+            Cow::Owned(self.sess.cstore.item_attrs(did))
+        }
+    }
+
+    /// Determine whether an item is annotated with an attribute
+    pub fn has_attr(&self, did: DefId, attr: &str) -> bool {
+        self.get_attrs(did).iter().any(|item| item.check_name(attr))
+    }
+
+    /// Determine whether an item is annotated with `#[repr(packed)]`
+    pub fn lookup_packed(&self, did: DefId) -> bool {
+        self.lookup_repr_hints(did).contains(&attr::ReprPacked)
+    }
+
+    /// Determine whether an item is annotated with `#[simd]`
+    pub fn lookup_simd(&self, did: DefId) -> bool {
+        self.has_attr(did, "simd")
+            || self.lookup_repr_hints(did).contains(&attr::ReprSimd)
+    }
+
+    pub fn item_variances(&self, item_id: DefId) -> Rc<ItemVariances> {
+        lookup_locally_or_in_crate_store(
+            "item_variance_map", item_id, &self.item_variance_map,
+            || Rc::new(self.sess.cstore.item_variances(item_id)))
+    }
+
+    pub fn trait_has_default_impl(&self, trait_def_id: DefId) -> bool {
+        self.populate_implementations_for_trait_if_necessary(trait_def_id);
+
+        let def = self.lookup_trait_def(trait_def_id);
+        def.flags.get().intersects(TraitFlags::HAS_DEFAULT_IMPL)
+    }
+
+    /// Records a trait-to-implementation mapping.
+    pub fn record_trait_has_default_impl(&self, trait_def_id: DefId) {
+        let def = self.lookup_trait_def(trait_def_id);
+        def.flags.set(def.flags.get() | TraitFlags::HAS_DEFAULT_IMPL)
+    }
+
+    /// Load primitive inherent implementations if necessary
+    pub fn populate_implementations_for_primitive_if_necessary(&self,
+                                                               primitive_def_id: DefId) {
+        if primitive_def_id.is_local() {
+            return
+        }
+
+        // The primitive is not local, hence we are reading this out
+        // of metadata.
+        let _ignore = self.dep_graph.in_ignore();
+
+        if self.populated_external_primitive_impls.borrow().contains(&primitive_def_id) {
+            return
+        }
+
+        debug!("populate_implementations_for_primitive_if_necessary: searching for {:?}",
+               primitive_def_id);
+
+        let impl_items = self.sess.cstore.impl_items(primitive_def_id);
+
+        // Store the implementation info.
+        self.impl_items.borrow_mut().insert(primitive_def_id, impl_items);
+        self.populated_external_primitive_impls.borrow_mut().insert(primitive_def_id);
+    }
+
+    /// Populates the type context with all the inherent implementations for
+    /// the given type if necessary.
+    pub fn populate_inherent_implementations_for_type_if_necessary(&self,
+                                                                   type_id: DefId) {
+        if type_id.is_local() {
+            return
+        }
+
+        // The type is not local, hence we are reading this out of
+        // metadata and don't need to track edges.
+        let _ignore = self.dep_graph.in_ignore();
+
+        if self.populated_external_types.borrow().contains(&type_id) {
+            return
+        }
+
+        debug!("populate_inherent_implementations_for_type_if_necessary: searching for {:?}",
+               type_id);
+
+        let inherent_impls = self.sess.cstore.inherent_implementations_for_type(type_id);
+        for &impl_def_id in &inherent_impls {
+            // Store the implementation info.
+            let impl_items = self.sess.cstore.impl_items(impl_def_id);
+            self.impl_items.borrow_mut().insert(impl_def_id, impl_items);
+        }
+
+        self.inherent_impls.borrow_mut().insert(type_id, Rc::new(inherent_impls));
+        self.populated_external_types.borrow_mut().insert(type_id);
+    }
+
+    /// Populates the type context with all the implementations for the given
+    /// trait if necessary.
+    pub fn populate_implementations_for_trait_if_necessary(&self, trait_id: DefId) {
+        if trait_id.is_local() {
+            return
+        }
+
+        // The type is not local, hence we are reading this out of
+        // metadata and don't need to track edges.
+        let _ignore = self.dep_graph.in_ignore();
+
+        let def = self.lookup_trait_def(trait_id);
+        if def.flags.get().intersects(TraitFlags::IMPLS_VALID) {
+            return;
+        }
+
+        debug!("populate_implementations_for_trait_if_necessary: searching for {:?}", def);
+
+        if self.sess.cstore.is_defaulted_trait(trait_id) {
+            self.record_trait_has_default_impl(trait_id);
+        }
+
+        for impl_def_id in self.sess.cstore.implementations_of_trait(trait_id) {
+            let impl_items = self.sess.cstore.impl_items(impl_def_id);
+            let trait_ref = self.impl_trait_ref(impl_def_id).unwrap();
+
+            // Record the trait->implementation mapping.
+            if let Some(parent) = self.sess.cstore.impl_parent(impl_def_id) {
+                def.record_remote_impl(self, impl_def_id, trait_ref, parent);
+            } else {
+                def.record_remote_impl(self, impl_def_id, trait_ref, trait_id);
+            }
+
+            // For any methods that use a default implementation, add them to
+            // the map. This is a bit unfortunate.
+            for impl_item_def_id in &impl_items {
+                let method_def_id = impl_item_def_id.def_id();
+                // load impl items eagerly for convenience
+                // FIXME: we may want to load these lazily
+                self.impl_or_trait_item(method_def_id);
+            }
+
+            // Store the implementation info.
+            self.impl_items.borrow_mut().insert(impl_def_id, impl_items);
+        }
+
+        def.flags.set(def.flags.get() | TraitFlags::IMPLS_VALID);
+    }
+
+    pub fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind {
+        Tables::closure_kind(&self.tables, self, def_id)
+    }
+
+    pub fn closure_type(&self,
+                        def_id: DefId,
+                        substs: &ClosureSubsts<'tcx>)
+                        -> ty::ClosureTy<'tcx>
+    {
+        Tables::closure_type(&self.tables, self, def_id, substs)
+    }
+
+    /// Given the def_id of an impl, return the def_id of the trait it implements.
+    /// If it implements no trait, return `None`.
+    pub fn trait_id_of_impl(&self, def_id: DefId) -> Option<DefId> {
+        self.impl_trait_ref(def_id).map(|tr| tr.def_id)
+    }
+
+    /// If the given def ID describes a method belonging to an impl, return the
+    /// ID of the impl that the method belongs to. Otherwise, return `None`.
+    pub fn impl_of_method(&self, def_id: DefId) -> Option<DefId> {
+        if def_id.krate != LOCAL_CRATE {
+            return self.sess.cstore.impl_or_trait_item(self, def_id).and_then(|item| {
+                match item.container() {
+                    TraitContainer(_) => None,
+                    ImplContainer(def_id) => Some(def_id),
+                }
+            });
+        }
+        match self.impl_or_trait_items.borrow().get(&def_id).cloned() {
+            Some(trait_item) => {
+                match trait_item.container() {
+                    TraitContainer(_) => None,
+                    ImplContainer(def_id) => Some(def_id),
+                }
+            }
+            None => None
+        }
+    }
+
+    /// If the given def ID describes an item belonging to a trait (either a
+    /// default method or an implementation of a trait method), return the ID of
+    /// the trait that the method belongs to. Otherwise, return `None`.
+    pub fn trait_of_item(&self, def_id: DefId) -> Option<DefId> {
+        if def_id.krate != LOCAL_CRATE {
+            return self.sess.cstore.trait_of_item(self, def_id);
+        }
+        match self.impl_or_trait_items.borrow().get(&def_id).cloned() {
+            Some(impl_or_trait_item) => {
+                match impl_or_trait_item.container() {
+                    TraitContainer(def_id) => Some(def_id),
+                    ImplContainer(def_id) => self.trait_id_of_impl(def_id),
+                }
+            }
+            None => None
+        }
+    }
+
+    /// If the given def ID describes an item belonging to a trait, (either a
+    /// default method or an implementation of a trait method), return the ID of
+    /// the method inside trait definition (this means that if the given def ID
+    /// is already that of the original trait method, then the return value is
+    /// the same).
+    /// Otherwise, return `None`.
+    pub fn trait_item_of_item(&self, def_id: DefId) -> Option<ImplOrTraitItemId> {
+        let impl_item = match self.impl_or_trait_items.borrow().get(&def_id) {
+            Some(m) => m.clone(),
+            None => return None,
+        };
+        let name = impl_item.name();
+        match self.trait_of_item(def_id) {
+            Some(trait_did) => {
+                self.trait_items(trait_did).iter()
+                    .find(|item| item.name() == name)
+                    .map(|item| item.id())
+            }
+            None => None
+        }
+    }
+
+    /// Construct a parameter environment suitable for static contexts or other contexts where there
+    /// are no free type/lifetime parameters in scope.
+    pub fn empty_parameter_environment<'a>(&'a self)
+                                           -> ParameterEnvironment<'a,'tcx> {
+
+        // for an empty parameter environment, there ARE no free
+        // regions, so it shouldn't matter what we use for the free id
+        let free_id_outlive = self.region_maps.node_extent(ast::DUMMY_NODE_ID);
+        ty::ParameterEnvironment { tcx: self,
+                                   free_substs: Substs::empty(),
+                                   caller_bounds: Vec::new(),
+                                   implicit_region_bound: ty::ReEmpty,
+                                   selection_cache: traits::SelectionCache::new(),
+                                   evaluation_cache: traits::EvaluationCache::new(),
+                                   free_id_outlive: free_id_outlive }
+    }
+
+    /// Constructs and returns a substitution that can be applied to move from
+    /// the "outer" view of a type or method to the "inner" view.
+    /// In general, this means converting from bound parameters to
+    /// free parameters. Since we currently represent bound/free type
+    /// parameters in the same way, this only has an effect on regions.
+    pub fn construct_free_substs(&self, generics: &Generics<'tcx>,
+                                 free_id_outlive: CodeExtent) -> Substs<'tcx> {
+        // map T => T
+        let mut types = VecPerParamSpace::empty();
+        for def in generics.types.as_slice() {
+            debug!("construct_parameter_environment(): push_types_from_defs: def={:?}",
+                    def);
+            types.push(def.space, self.mk_param_from_def(def));
+        }
+
+        // map bound 'a => free 'a
+        let mut regions = VecPerParamSpace::empty();
+        for def in generics.regions.as_slice() {
+            let region =
+                ReFree(FreeRegion { scope: free_id_outlive,
+                                    bound_region: BrNamed(def.def_id, def.name) });
+            debug!("push_region_params {:?}", region);
+            regions.push(def.space, region);
+        }
+
+        Substs {
+            types: types,
+            regions: regions,
+        }
+    }
+
+    /// See `ParameterEnvironment` struct def'n for details.
+    /// If you were using `free_id: NodeId`, you might try `self.region_maps.item_extent(free_id)`
+    /// for the `free_id_outlive` parameter. (But note that that is not always quite right.)
+    pub fn construct_parameter_environment<'a>(&'a self,
+                                               span: Span,
+                                               generics: &ty::Generics<'tcx>,
+                                               generic_predicates: &ty::GenericPredicates<'tcx>,
+                                               free_id_outlive: CodeExtent)
+                                               -> ParameterEnvironment<'a, 'tcx>
+    {
+        //
+        // Construct the free substs.
+        //
+
+        let free_substs = self.construct_free_substs(generics, free_id_outlive);
+
+        //
+        // Compute the bounds on Self and the type parameters.
+        //
+
+        let bounds = generic_predicates.instantiate(self, &free_substs);
+        let bounds = self.liberate_late_bound_regions(free_id_outlive, &ty::Binder(bounds));
+        let predicates = bounds.predicates.into_vec();
+
+        // Finally, we have to normalize the bounds in the environment, in
+        // case they contain any associated type projections. This process
+        // can yield errors if the put in illegal associated types, like
+        // `<i32 as Foo>::Bar` where `i32` does not implement `Foo`. We
+        // report these errors right here; this doesn't actually feel
+        // right to me, because constructing the environment feels like a
+        // kind of a "idempotent" action, but I'm not sure where would be
+        // a better place. In practice, we construct environments for
+        // every fn once during type checking, and we'll abort if there
+        // are any errors at that point, so after type checking you can be
+        // sure that this will succeed without errors anyway.
+        //
+
+        let unnormalized_env = ty::ParameterEnvironment {
+            tcx: self,
+            free_substs: free_substs,
+            implicit_region_bound: ty::ReScope(free_id_outlive),
+            caller_bounds: predicates,
+            selection_cache: traits::SelectionCache::new(),
+            evaluation_cache: traits::EvaluationCache::new(),
+            free_id_outlive: free_id_outlive,
+        };
+
+        let cause = traits::ObligationCause::misc(span, free_id_outlive.node_id(&self.region_maps));
+        traits::normalize_param_env_or_error(unnormalized_env, cause)
+    }
+
+    pub fn is_method_call(&self, expr_id: NodeId) -> bool {
+        self.tables.borrow().method_map.contains_key(&MethodCall::expr(expr_id))
+    }
+
+    pub fn is_overloaded_autoderef(&self, expr_id: NodeId, autoderefs: u32) -> bool {
+        self.tables.borrow().method_map.contains_key(&MethodCall::autoderef(expr_id,
+                                                                            autoderefs))
+    }
+
+    pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
+        Some(self.tables.borrow().upvar_capture_map.get(&upvar_id).unwrap().clone())
+    }
+
+    pub fn visit_all_items_in_krate<V,F>(&self,
+                                         dep_node_fn: F,
+                                         visitor: &mut V)
+        where F: FnMut(DefId) -> DepNode<DefId>, V: Visitor<'tcx>
+    {
+        dep_graph::visit_all_items_in_krate(self, dep_node_fn, visitor);
+    }
+
+    /// Looks up the span of `impl_did` if the impl is local; otherwise returns `Err`
+    /// with the name of the crate containing the impl.
+    pub fn span_of_impl(&self, impl_did: DefId) -> Result<Span, InternedString> {
+        if impl_did.is_local() {
+            let node_id = self.map.as_local_node_id(impl_did).unwrap();
+            Ok(self.map.span(node_id))
+        } else {
+            Err(self.sess.cstore.crate_name(impl_did.krate))
+        }
+    }
+}
+
+/// The category of explicit self.
+#[derive(Clone, Copy, Eq, PartialEq, Debug)]
+pub enum ExplicitSelfCategory {
+    Static,
+    ByValue,
+    ByReference(Region, hir::Mutability),
+    ByBox,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn with_freevars<T, F>(&self, fid: NodeId, f: F) -> T where
+        F: FnOnce(&[hir::Freevar]) -> T,
+    {
+        match self.freevars.borrow().get(&fid) {
+            None => f(&[]),
+            Some(d) => f(&d[..])
+        }
+    }
+}
diff --git a/src/librustc/ty/outlives.rs b/src/librustc/ty/outlives.rs
new file mode 100644 (file)
index 0000000..898c9d6
--- /dev/null
@@ -0,0 +1,220 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// The outlines relation `T: 'a` or `'a: 'b`. This code frequently
+// refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that
+// RFC for reference.
+
+use infer::InferCtxt;
+use ty::{self, Ty, TypeFoldable};
+
+#[derive(Debug)]
+pub enum Component<'tcx> {
+    Region(ty::Region),
+    Param(ty::ParamTy),
+    UnresolvedInferenceVariable(ty::InferTy),
+
+    // Projections like `T::Foo` are tricky because a constraint like
+    // `T::Foo: 'a` can be satisfied in so many ways. There may be a
+    // where-clause that says `T::Foo: 'a`, or the defining trait may
+    // include a bound like `type Foo: 'static`, or -- in the most
+    // conservative way -- we can prove that `T: 'a` (more generally,
+    // that all components in the projection outlive `'a`). This code
+    // is not in a position to judge which is the best technique, so
+    // we just product the projection as a component and leave it to
+    // the consumer to decide (but see `EscapingProjection` below).
+    Projection(ty::ProjectionTy<'tcx>),
+
+    // In the case where a projection has escaping regions -- meaning
+    // regions bound within the type itself -- we always use
+    // the most conservative rule, which requires that all components
+    // outlive the bound. So for example if we had a type like this:
+    //
+    //     for<'a> Trait1<  <T as Trait2<'a,'b>>::Foo  >
+    //                      ~~~~~~~~~~~~~~~~~~~~~~~~~
+    //
+    // then the inner projection (underlined) has an escaping region
+    // `'a`. We consider that outer trait `'c` to meet a bound if `'b`
+    // outlives `'b: 'c`, and we don't consider whether the trait
+    // declares that `Foo: 'static` etc. Therefore, we just return the
+    // free components of such a projection (in this case, `'b`).
+    //
+    // However, in the future, we may want to get smarter, and
+    // actually return a "higher-ranked projection" here. Therefore,
+    // we mark that these components are part of an escaping
+    // projection, so that implied bounds code can avoid relying on
+    // them. This gives us room to improve the regionck reasoning in
+    // the future without breaking backwards compat.
+    EscapingProjection(Vec<Component<'tcx>>),
+}
+
+/// Returns all the things that must outlive `'a` for the condition
+/// `ty0: 'a` to hold.
+pub fn components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
+                           ty0: Ty<'tcx>)
+                           -> Vec<Component<'tcx>> {
+    let mut components = vec![];
+    compute_components(infcx, ty0, &mut components);
+    debug!("components({:?}) = {:?}", ty0, components);
+    components
+}
+
+fn compute_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
+                               ty: Ty<'tcx>,
+                               out: &mut Vec<Component<'tcx>>) {
+    // Descend through the types, looking for the various "base"
+    // components and collecting them into `out`. This is not written
+    // with `collect()` because of the need to sometimes skip subtrees
+    // in the `subtys` iterator (e.g., when encountering a
+    // projection).
+    match ty.sty {
+        ty::TyClosure(_, ref substs) => {
+            // FIXME(#27086). We do not accumulate from substs, since they
+            // don't represent reachable data. This means that, in
+            // practice, some of the lifetime parameters might not
+            // be in scope when the body runs, so long as there is
+            // no reachable data with that lifetime. For better or
+            // worse, this is consistent with fn types, however,
+            // which can also encapsulate data in this fashion
+            // (though it's somewhat harder, and typically
+            // requires virtual dispatch).
+            //
+            // Note that changing this (in a naive way, at least)
+            // causes regressions for what appears to be perfectly
+            // reasonable code like this:
+            //
+            // ```
+            // fn foo<'a>(p: &Data<'a>) {
+            //    bar(|q: &mut Parser| q.read_addr())
+            // }
+            // fn bar(p: Box<FnMut(&mut Parser)+'static>) {
+            // }
+            // ```
+            //
+            // Note that `p` (and `'a`) are not used in the
+            // closure at all, but to meet the requirement that
+            // the closure type `C: 'static` (so it can be coerced
+            // to the object type), we get the requirement that
+            // `'a: 'static` since `'a` appears in the closure
+            // type `C`.
+            //
+            // A smarter fix might "prune" unused `func_substs` --
+            // this would avoid breaking simple examples like
+            // this, but would still break others (which might
+            // indeed be invalid, depending on your POV). Pruning
+            // would be a subtle process, since we have to see
+            // what func/type parameters are used and unused,
+            // taking into consideration UFCS and so forth.
+
+            for &upvar_ty in &substs.upvar_tys {
+                compute_components(infcx, upvar_ty, out);
+            }
+        }
+
+        // OutlivesTypeParameterEnv -- the actual checking that `X:'a`
+        // is implied by the environment is done in regionck.
+        ty::TyParam(p) => {
+            out.push(Component::Param(p));
+        }
+
+        // For projections, we prefer to generate an obligation like
+        // `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the
+        // regionck more ways to prove that it holds. However,
+        // regionck is not (at least currently) prepared to deal with
+        // higher-ranked regions that may appear in the
+        // trait-ref. Therefore, if we see any higher-ranke regions,
+        // we simply fallback to the most restrictive rule, which
+        // requires that `Pi: 'a` for all `i`.
+        ty::TyProjection(ref data) => {
+            if !data.has_escaping_regions() {
+                // best case: no escaping regions, so push the
+                // projection and skip the subtree (thus generating no
+                // constraints for Pi). This defers the choice between
+                // the rules OutlivesProjectionEnv,
+                // OutlivesProjectionTraitDef, and
+                // OutlivesProjectionComponents to regionck.
+                out.push(Component::Projection(*data));
+            } else {
+                // fallback case: hard code
+                // OutlivesProjectionComponents.  Continue walking
+                // through and constrain Pi.
+                let subcomponents = capture_components(infcx, ty);
+                out.push(Component::EscapingProjection(subcomponents));
+            }
+        }
+
+        // If we encounter an inference variable, try to resolve it
+        // and proceed with resolved version. If we cannot resolve it,
+        // then record the unresolved variable as a component.
+        ty::TyInfer(_) => {
+            let ty = infcx.resolve_type_vars_if_possible(&ty);
+            if let ty::TyInfer(infer_ty) = ty.sty {
+                out.push(Component::UnresolvedInferenceVariable(infer_ty));
+            } else {
+                compute_components(infcx, ty, out);
+            }
+        }
+
+        // Most types do not introduce any region binders, nor
+        // involve any other subtle cases, and so the WF relation
+        // simply constraints any regions referenced directly by
+        // the type and then visits the types that are lexically
+        // contained within. (The comments refer to relevant rules
+        // from RFC1214.)
+        ty::TyBool |            // OutlivesScalar
+        ty::TyChar |            // OutlivesScalar
+        ty::TyInt(..) |         // OutlivesScalar
+        ty::TyUint(..) |        // OutlivesScalar
+        ty::TyFloat(..) |       // OutlivesScalar
+        ty::TyEnum(..) |        // OutlivesNominalType
+        ty::TyStruct(..) |      // OutlivesNominalType
+        ty::TyBox(..) |         // OutlivesNominalType (ish)
+        ty::TyStr |             // OutlivesScalar (ish)
+        ty::TyArray(..) |       // ...
+        ty::TySlice(..) |       // ...
+        ty::TyRawPtr(..) |      // ...
+        ty::TyRef(..) |         // OutlivesReference
+        ty::TyTuple(..) |       // ...
+        ty::TyFnDef(..) |       // OutlivesFunction (*)
+        ty::TyFnPtr(_) |        // OutlivesFunction (*)
+        ty::TyTrait(..) |       // OutlivesObject, OutlivesFragment (*)
+        ty::TyError => {
+            // (*) Bare functions and traits are both binders. In the
+            // RFC, this means we would add the bound regions to the
+            // "bound regions list".  In our representation, no such
+            // list is maintained explicitly, because bound regions
+            // themselves can be readily identified.
+
+            push_region_constraints(out, ty.regions());
+            for subty in ty.walk_shallow() {
+                compute_components(infcx, subty, out);
+            }
+        }
+    }
+}
+
+fn capture_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
+                               ty: Ty<'tcx>)
+                               -> Vec<Component<'tcx>> {
+    let mut temp = vec![];
+    push_region_constraints(&mut temp, ty.regions());
+    for subty in ty.walk_shallow() {
+        compute_components(infcx, subty, &mut temp);
+    }
+    temp
+}
+
+fn push_region_constraints<'tcx>(out: &mut Vec<Component<'tcx>>, regions: Vec<ty::Region>) {
+    for r in regions {
+        if !r.is_bound() {
+            out.push(Component::Region(r));
+        }
+    }
+}
diff --git a/src/librustc/ty/relate.rs b/src/librustc/ty/relate.rs
new file mode 100644 (file)
index 0000000..f14e680
--- /dev/null
@@ -0,0 +1,695 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Generalized type relating mechanism. A type relation R relates a
+//! pair of values (A, B). A and B are usually types or regions but
+//! can be other things. Examples of type relations are subtyping,
+//! type equality, etc.
+
+use hir::def_id::DefId;
+use ty::subst::{ParamSpace, Substs};
+use ty::{self, Ty, TyCtxt, TypeFoldable};
+use ty::error::{ExpectedFound, TypeError};
+use std::rc::Rc;
+use syntax::abi;
+use hir as ast;
+
+pub type RelateResult<'tcx, T> = Result<T, TypeError<'tcx>>;
+
+#[derive(Clone, Debug)]
+pub enum Cause {
+    ExistentialRegionBound, // relating an existential region bound
+}
+
+pub trait TypeRelation<'a,'tcx> : Sized {
+    fn tcx(&self) -> &'a TyCtxt<'tcx>;
+
+    /// Returns a static string we can use for printouts.
+    fn tag(&self) -> &'static str;
+
+    /// Returns true if the value `a` is the "expected" type in the
+    /// relation. Just affects error messages.
+    fn a_is_expected(&self) -> bool;
+
+    fn with_cause<F,R>(&mut self, _cause: Cause, f: F) -> R
+        where F: FnOnce(&mut Self) -> R
+    {
+        f(self)
+    }
+
+    /// Generic relation routine suitable for most anything.
+    fn relate<T:Relate<'a,'tcx>>(&mut self, a: &T, b: &T) -> RelateResult<'tcx, T> {
+        Relate::relate(self, a, b)
+    }
+
+    /// Relete elements of two slices pairwise.
+    fn relate_zip<T:Relate<'a,'tcx>>(&mut self, a: &[T], b: &[T]) -> RelateResult<'tcx, Vec<T>> {
+        assert_eq!(a.len(), b.len());
+        a.iter().zip(b).map(|(a, b)| self.relate(a, b)).collect()
+    }
+
+    /// Switch variance for the purpose of relating `a` and `b`.
+    fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
+                                               variance: ty::Variance,
+                                               a: &T,
+                                               b: &T)
+                                               -> RelateResult<'tcx, T>;
+
+    // Overrideable relations. You shouldn't typically call these
+    // directly, instead call `relate()`, which in turn calls
+    // these. This is both more uniform but also allows us to add
+    // additional hooks for other types in the future if needed
+    // without making older code, which called `relate`, obsolete.
+
+    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>)
+           -> RelateResult<'tcx, Ty<'tcx>>;
+
+    fn regions(&mut self, a: ty::Region, b: ty::Region)
+               -> RelateResult<'tcx, ty::Region>;
+
+    fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
+                  -> RelateResult<'tcx, ty::Binder<T>>
+        where T: Relate<'a,'tcx>;
+}
+
+pub trait Relate<'a,'tcx>: TypeFoldable<'tcx> {
+    fn relate<R:TypeRelation<'a,'tcx>>(relation: &mut R,
+                                       a: &Self,
+                                       b: &Self)
+                                       -> RelateResult<'tcx, Self>;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Relate impls
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::TypeAndMut<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::TypeAndMut<'tcx>,
+                 b: &ty::TypeAndMut<'tcx>)
+                 -> RelateResult<'tcx, ty::TypeAndMut<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        debug!("{}.mts({:?}, {:?})",
+               relation.tag(),
+               a,
+               b);
+        if a.mutbl != b.mutbl {
+            Err(TypeError::Mutability)
+        } else {
+            let mutbl = a.mutbl;
+            let variance = match mutbl {
+                ast::Mutability::MutImmutable => ty::Covariant,
+                ast::Mutability::MutMutable => ty::Invariant,
+            };
+            let ty = relation.relate_with_variance(variance, &a.ty, &b.ty)?;
+            Ok(ty::TypeAndMut {ty: ty, mutbl: mutbl})
+        }
+    }
+}
+
+// substitutions are not themselves relatable without more context,
+// but they is an important subroutine for things that ARE relatable,
+// like traits etc.
+fn relate_item_substs<'a,'tcx:'a,R>(relation: &mut R,
+                                    item_def_id: DefId,
+                                    a_subst: &Substs<'tcx>,
+                                    b_subst: &Substs<'tcx>)
+                                    -> RelateResult<'tcx, Substs<'tcx>>
+    where R: TypeRelation<'a,'tcx>
+{
+    debug!("substs: item_def_id={:?} a_subst={:?} b_subst={:?}",
+           item_def_id,
+           a_subst,
+           b_subst);
+
+    let variances;
+    let opt_variances = if relation.tcx().variance_computed.get() {
+        variances = relation.tcx().item_variances(item_def_id);
+        Some(&*variances)
+    } else {
+        None
+    };
+    relate_substs(relation, opt_variances, a_subst, b_subst)
+}
+
+pub fn relate_substs<'a,'tcx:'a,R>(relation: &mut R,
+                                   variances: Option<&ty::ItemVariances>,
+                                   a_subst: &Substs<'tcx>,
+                                   b_subst: &Substs<'tcx>)
+                                   -> RelateResult<'tcx, Substs<'tcx>>
+    where R: TypeRelation<'a,'tcx>
+{
+    let mut substs = Substs::empty();
+
+    for &space in &ParamSpace::all() {
+        let a_tps = a_subst.types.get_slice(space);
+        let b_tps = b_subst.types.get_slice(space);
+        let t_variances = variances.map(|v| v.types.get_slice(space));
+        let tps = relate_type_params(relation, t_variances, a_tps, b_tps)?;
+        substs.types.replace(space, tps);
+    }
+
+    for &space in &ParamSpace::all() {
+        let a_regions = a_subst.regions.get_slice(space);
+        let b_regions = b_subst.regions.get_slice(space);
+        let r_variances = variances.map(|v| v.regions.get_slice(space));
+        let regions = relate_region_params(relation,
+                                           r_variances,
+                                           a_regions,
+                                           b_regions)?;
+        substs.regions.replace(space, regions);
+    }
+
+    Ok(substs)
+}
+
+fn relate_type_params<'a,'tcx:'a,R>(relation: &mut R,
+                                    variances: Option<&[ty::Variance]>,
+                                    a_tys: &[Ty<'tcx>],
+                                    b_tys: &[Ty<'tcx>])
+                                    -> RelateResult<'tcx, Vec<Ty<'tcx>>>
+    where R: TypeRelation<'a,'tcx>
+{
+    if a_tys.len() != b_tys.len() {
+        return Err(TypeError::TyParamSize(expected_found(relation,
+                                                         &a_tys.len(),
+                                                         &b_tys.len())));
+    }
+
+    (0 .. a_tys.len())
+        .map(|i| {
+            let a_ty = a_tys[i];
+            let b_ty = b_tys[i];
+            let v = variances.map_or(ty::Invariant, |v| v[i]);
+            relation.relate_with_variance(v, &a_ty, &b_ty)
+        })
+        .collect()
+}
+
+fn relate_region_params<'a,'tcx:'a,R>(relation: &mut R,
+                                      variances: Option<&[ty::Variance]>,
+                                      a_rs: &[ty::Region],
+                                      b_rs: &[ty::Region])
+                                      -> RelateResult<'tcx, Vec<ty::Region>>
+    where R: TypeRelation<'a,'tcx>
+{
+    let num_region_params = a_rs.len();
+
+    debug!("relate_region_params(a_rs={:?}, \
+            b_rs={:?}, variances={:?})",
+           a_rs,
+           b_rs,
+           variances);
+
+    assert_eq!(num_region_params,
+               variances.map_or(num_region_params,
+                                |v| v.len()));
+
+    assert_eq!(num_region_params, b_rs.len());
+
+    (0..a_rs.len())
+        .map(|i| {
+            let a_r = a_rs[i];
+            let b_r = b_rs[i];
+            let variance = variances.map_or(ty::Invariant, |v| v[i]);
+            relation.relate_with_variance(variance, &a_r, &b_r)
+        })
+        .collect()
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::BareFnTy<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::BareFnTy<'tcx>,
+                 b: &ty::BareFnTy<'tcx>)
+                 -> RelateResult<'tcx, ty::BareFnTy<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        let unsafety = relation.relate(&a.unsafety, &b.unsafety)?;
+        let abi = relation.relate(&a.abi, &b.abi)?;
+        let sig = relation.relate(&a.sig, &b.sig)?;
+        Ok(ty::BareFnTy {unsafety: unsafety,
+                         abi: abi,
+                         sig: sig})
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::FnSig<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::FnSig<'tcx>,
+                 b: &ty::FnSig<'tcx>)
+                 -> RelateResult<'tcx, ty::FnSig<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        if a.variadic != b.variadic {
+            return Err(TypeError::VariadicMismatch(
+                expected_found(relation, &a.variadic, &b.variadic)));
+        }
+
+        let inputs = relate_arg_vecs(relation,
+                                     &a.inputs,
+                                     &b.inputs)?;
+
+        let output = match (a.output, b.output) {
+            (ty::FnConverging(a_ty), ty::FnConverging(b_ty)) =>
+                Ok(ty::FnConverging(relation.relate(&a_ty, &b_ty)?)),
+            (ty::FnDiverging, ty::FnDiverging) =>
+                Ok(ty::FnDiverging),
+            (a, b) =>
+                Err(TypeError::ConvergenceMismatch(
+                    expected_found(relation, &(a != ty::FnDiverging), &(b != ty::FnDiverging)))),
+        }?;
+
+        return Ok(ty::FnSig {inputs: inputs,
+                             output: output,
+                             variadic: a.variadic});
+    }
+}
+
+fn relate_arg_vecs<'a,'tcx:'a,R>(relation: &mut R,
+                                 a_args: &[Ty<'tcx>],
+                                 b_args: &[Ty<'tcx>])
+                                 -> RelateResult<'tcx, Vec<Ty<'tcx>>>
+    where R: TypeRelation<'a,'tcx>
+{
+    if a_args.len() != b_args.len() {
+        return Err(TypeError::ArgCount);
+    }
+
+    a_args.iter().zip(b_args)
+          .map(|(a, b)| relation.relate_with_variance(ty::Contravariant, a, b))
+          .collect()
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ast::Unsafety {
+    fn relate<R>(relation: &mut R,
+                 a: &ast::Unsafety,
+                 b: &ast::Unsafety)
+                 -> RelateResult<'tcx, ast::Unsafety>
+        where R: TypeRelation<'a,'tcx>
+    {
+        if a != b {
+            Err(TypeError::UnsafetyMismatch(expected_found(relation, a, b)))
+        } else {
+            Ok(*a)
+        }
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for abi::Abi {
+    fn relate<R>(relation: &mut R,
+                 a: &abi::Abi,
+                 b: &abi::Abi)
+                 -> RelateResult<'tcx, abi::Abi>
+        where R: TypeRelation<'a,'tcx>
+    {
+        if a == b {
+            Ok(*a)
+        } else {
+            Err(TypeError::AbiMismatch(expected_found(relation, a, b)))
+        }
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ProjectionTy<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::ProjectionTy<'tcx>,
+                 b: &ty::ProjectionTy<'tcx>)
+                 -> RelateResult<'tcx, ty::ProjectionTy<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        if a.item_name != b.item_name {
+            Err(TypeError::ProjectionNameMismatched(
+                expected_found(relation, &a.item_name, &b.item_name)))
+        } else {
+            let trait_ref = relation.relate(&a.trait_ref, &b.trait_ref)?;
+            Ok(ty::ProjectionTy { trait_ref: trait_ref, item_name: a.item_name })
+        }
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ProjectionPredicate<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::ProjectionPredicate<'tcx>,
+                 b: &ty::ProjectionPredicate<'tcx>)
+                 -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        let projection_ty = relation.relate(&a.projection_ty, &b.projection_ty)?;
+        let ty = relation.relate(&a.ty, &b.ty)?;
+        Ok(ty::ProjectionPredicate { projection_ty: projection_ty, ty: ty })
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for Vec<ty::PolyProjectionPredicate<'tcx>> {
+    fn relate<R>(relation: &mut R,
+                 a: &Vec<ty::PolyProjectionPredicate<'tcx>>,
+                 b: &Vec<ty::PolyProjectionPredicate<'tcx>>)
+                 -> RelateResult<'tcx, Vec<ty::PolyProjectionPredicate<'tcx>>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        // To be compatible, `a` and `b` must be for precisely the
+        // same set of traits and item names. We always require that
+        // projection bounds lists are sorted by trait-def-id and item-name,
+        // so we can just iterate through the lists pairwise, so long as they are the
+        // same length.
+        if a.len() != b.len() {
+            Err(TypeError::ProjectionBoundsLength(expected_found(relation, &a.len(), &b.len())))
+        } else {
+            a.iter().zip(b)
+                .map(|(a, b)| relation.relate(a, b))
+                .collect()
+        }
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ExistentialBounds<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::ExistentialBounds<'tcx>,
+                 b: &ty::ExistentialBounds<'tcx>)
+                 -> RelateResult<'tcx, ty::ExistentialBounds<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        let r =
+            relation.with_cause(
+                Cause::ExistentialRegionBound,
+                |relation| relation.relate_with_variance(ty::Contravariant,
+                                                         &a.region_bound,
+                                                         &b.region_bound))?;
+        let nb = relation.relate(&a.builtin_bounds, &b.builtin_bounds)?;
+        let pb = relation.relate(&a.projection_bounds, &b.projection_bounds)?;
+        Ok(ty::ExistentialBounds { region_bound: r,
+                                   builtin_bounds: nb,
+                                   projection_bounds: pb })
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::BuiltinBounds {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::BuiltinBounds,
+                 b: &ty::BuiltinBounds)
+                 -> RelateResult<'tcx, ty::BuiltinBounds>
+        where R: TypeRelation<'a,'tcx>
+    {
+        // Two sets of builtin bounds are only relatable if they are
+        // precisely the same (but see the coercion code).
+        if a != b {
+            Err(TypeError::BuiltinBoundsMismatch(expected_found(relation, a, b)))
+        } else {
+            Ok(*a)
+        }
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::TraitRef<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::TraitRef<'tcx>,
+                 b: &ty::TraitRef<'tcx>)
+                 -> RelateResult<'tcx, ty::TraitRef<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        // Different traits cannot be related
+        if a.def_id != b.def_id {
+            Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id)))
+        } else {
+            let substs = relate_item_substs(relation, a.def_id, a.substs, b.substs)?;
+            Ok(ty::TraitRef { def_id: a.def_id, substs: relation.tcx().mk_substs(substs) })
+        }
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for Ty<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &Ty<'tcx>,
+                 b: &Ty<'tcx>)
+                 -> RelateResult<'tcx, Ty<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        relation.tys(a, b)
+    }
+}
+
+/// The main "type relation" routine. Note that this does not handle
+/// inference artifacts, so you should filter those out before calling
+/// it.
+pub fn super_relate_tys<'a,'tcx:'a,R>(relation: &mut R,
+                                      a: Ty<'tcx>,
+                                      b: Ty<'tcx>)
+                                      -> RelateResult<'tcx, Ty<'tcx>>
+    where R: TypeRelation<'a,'tcx>
+{
+    let tcx = relation.tcx();
+    let a_sty = &a.sty;
+    let b_sty = &b.sty;
+    debug!("super_tys: a_sty={:?} b_sty={:?}", a_sty, b_sty);
+    match (a_sty, b_sty) {
+        (&ty::TyInfer(_), _) |
+        (_, &ty::TyInfer(_)) =>
+        {
+            // The caller should handle these cases!
+            bug!("var types encountered in super_relate_tys")
+        }
+
+        (&ty::TyError, _) | (_, &ty::TyError) =>
+        {
+            Ok(tcx.types.err)
+        }
+
+        (&ty::TyChar, _) |
+        (&ty::TyBool, _) |
+        (&ty::TyInt(_), _) |
+        (&ty::TyUint(_), _) |
+        (&ty::TyFloat(_), _) |
+        (&ty::TyStr, _)
+            if a == b =>
+        {
+            Ok(a)
+        }
+
+        (&ty::TyParam(ref a_p), &ty::TyParam(ref b_p))
+            if a_p.idx == b_p.idx && a_p.space == b_p.space =>
+        {
+            Ok(a)
+        }
+
+        (&ty::TyEnum(a_def, a_substs), &ty::TyEnum(b_def, b_substs))
+            if a_def == b_def =>
+        {
+            let substs = relate_item_substs(relation, a_def.did, a_substs, b_substs)?;
+            Ok(tcx.mk_enum(a_def, tcx.mk_substs(substs)))
+        }
+
+        (&ty::TyTrait(ref a_), &ty::TyTrait(ref b_)) =>
+        {
+            let principal = relation.relate(&a_.principal, &b_.principal)?;
+            let bounds = relation.relate(&a_.bounds, &b_.bounds)?;
+            Ok(tcx.mk_trait(principal, bounds))
+        }
+
+        (&ty::TyStruct(a_def, a_substs), &ty::TyStruct(b_def, b_substs))
+            if a_def == b_def =>
+        {
+            let substs = relate_item_substs(relation, a_def.did, a_substs, b_substs)?;
+            Ok(tcx.mk_struct(a_def, tcx.mk_substs(substs)))
+        }
+
+        (&ty::TyClosure(a_id, ref a_substs),
+         &ty::TyClosure(b_id, ref b_substs))
+            if a_id == b_id =>
+        {
+            // All TyClosure types with the same id represent
+            // the (anonymous) type of the same closure expression. So
+            // all of their regions should be equated.
+            let substs = relation.relate(a_substs, b_substs)?;
+            Ok(tcx.mk_closure_from_closure_substs(a_id, substs))
+        }
+
+        (&ty::TyBox(a_inner), &ty::TyBox(b_inner)) =>
+        {
+            let typ = relation.relate(&a_inner, &b_inner)?;
+            Ok(tcx.mk_box(typ))
+        }
+
+        (&ty::TyRawPtr(ref a_mt), &ty::TyRawPtr(ref b_mt)) =>
+        {
+            let mt = relation.relate(a_mt, b_mt)?;
+            Ok(tcx.mk_ptr(mt))
+        }
+
+        (&ty::TyRef(a_r, ref a_mt), &ty::TyRef(b_r, ref b_mt)) =>
+        {
+            let r = relation.relate_with_variance(ty::Contravariant, a_r, b_r)?;
+            let mt = relation.relate(a_mt, b_mt)?;
+            Ok(tcx.mk_ref(tcx.mk_region(r), mt))
+        }
+
+        (&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) =>
+        {
+            let t = relation.relate(&a_t, &b_t)?;
+            if sz_a == sz_b {
+                Ok(tcx.mk_array(t, sz_a))
+            } else {
+                Err(TypeError::FixedArraySize(expected_found(relation, &sz_a, &sz_b)))
+            }
+        }
+
+        (&ty::TySlice(a_t), &ty::TySlice(b_t)) =>
+        {
+            let t = relation.relate(&a_t, &b_t)?;
+            Ok(tcx.mk_slice(t))
+        }
+
+        (&ty::TyTuple(ref as_), &ty::TyTuple(ref bs)) =>
+        {
+            if as_.len() == bs.len() {
+                let ts = as_.iter().zip(bs)
+                            .map(|(a, b)| relation.relate(a, b))
+                            .collect::<Result<_, _>>()?;
+                Ok(tcx.mk_tup(ts))
+            } else if !(as_.is_empty() || bs.is_empty()) {
+                Err(TypeError::TupleSize(
+                    expected_found(relation, &as_.len(), &bs.len())))
+            } else {
+                Err(TypeError::Sorts(expected_found(relation, &a, &b)))
+            }
+        }
+
+        (&ty::TyFnDef(a_def_id, a_substs, a_fty),
+         &ty::TyFnDef(b_def_id, b_substs, b_fty))
+            if a_def_id == b_def_id =>
+        {
+            let substs = relate_substs(relation, None, a_substs, b_substs)?;
+            let fty = relation.relate(a_fty, b_fty)?;
+            Ok(tcx.mk_fn_def(a_def_id, tcx.mk_substs(substs), fty))
+        }
+
+        (&ty::TyFnPtr(a_fty), &ty::TyFnPtr(b_fty)) =>
+        {
+            let fty = relation.relate(a_fty, b_fty)?;
+            Ok(tcx.mk_fn_ptr(fty))
+        }
+
+        (&ty::TyProjection(ref a_data), &ty::TyProjection(ref b_data)) =>
+        {
+            let projection_ty = relation.relate(a_data, b_data)?;
+            Ok(tcx.mk_projection(projection_ty.trait_ref, projection_ty.item_name))
+        }
+
+        _ =>
+        {
+            Err(TypeError::Sorts(expected_found(relation, &a, &b)))
+        }
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ClosureSubsts<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::ClosureSubsts<'tcx>,
+                 b: &ty::ClosureSubsts<'tcx>)
+                 -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        let func_substs = relate_substs(relation, None, a.func_substs, b.func_substs)?;
+        let upvar_tys = relation.relate_zip(&a.upvar_tys, &b.upvar_tys)?;
+        Ok(ty::ClosureSubsts { func_substs: relation.tcx().mk_substs(func_substs),
+                               upvar_tys: upvar_tys })
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for Substs<'tcx> {
+    fn relate<R>(relation: &mut R,
+                 a: &Substs<'tcx>,
+                 b: &Substs<'tcx>)
+                 -> RelateResult<'tcx, Substs<'tcx>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        relate_substs(relation, None, a, b)
+    }
+}
+
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::Region {
+    fn relate<R>(relation: &mut R,
+                 a: &ty::Region,
+                 b: &ty::Region)
+                 -> RelateResult<'tcx, ty::Region>
+        where R: TypeRelation<'a,'tcx>
+    {
+        relation.regions(*a, *b)
+    }
+}
+
+impl<'a,'tcx:'a,T> Relate<'a,'tcx> for ty::Binder<T>
+    where T: Relate<'a,'tcx>
+{
+    fn relate<R>(relation: &mut R,
+                 a: &ty::Binder<T>,
+                 b: &ty::Binder<T>)
+                 -> RelateResult<'tcx, ty::Binder<T>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        relation.binders(a, b)
+    }
+}
+
+impl<'a,'tcx:'a,T> Relate<'a,'tcx> for Rc<T>
+    where T: Relate<'a,'tcx>
+{
+    fn relate<R>(relation: &mut R,
+                 a: &Rc<T>,
+                 b: &Rc<T>)
+                 -> RelateResult<'tcx, Rc<T>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        let a: &T = a;
+        let b: &T = b;
+        Ok(Rc::new(relation.relate(a, b)?))
+    }
+}
+
+impl<'a,'tcx:'a,T> Relate<'a,'tcx> for Box<T>
+    where T: Relate<'a,'tcx>
+{
+    fn relate<R>(relation: &mut R,
+                 a: &Box<T>,
+                 b: &Box<T>)
+                 -> RelateResult<'tcx, Box<T>>
+        where R: TypeRelation<'a,'tcx>
+    {
+        let a: &T = a;
+        let b: &T = b;
+        Ok(Box::new(relation.relate(a, b)?))
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Error handling
+
+pub fn expected_found<'a,'tcx:'a,R,T>(relation: &mut R,
+                                      a: &T,
+                                      b: &T)
+                                      -> ExpectedFound<T>
+    where R: TypeRelation<'a,'tcx>, T: Clone
+{
+    expected_found_bool(relation.a_is_expected(), a, b)
+}
+
+pub fn expected_found_bool<T>(a_is_expected: bool,
+                              a: &T,
+                              b: &T)
+                              -> ExpectedFound<T>
+    where T: Clone
+{
+    let a = a.clone();
+    let b = b.clone();
+    if a_is_expected {
+        ExpectedFound {expected: a, found: b}
+    } else {
+        ExpectedFound {expected: b, found: a}
+    }
+}
diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs
new file mode 100644 (file)
index 0000000..bbf6d03
--- /dev/null
@@ -0,0 +1,785 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use ty::subst::{self, VecPerParamSpace};
+use traits;
+use ty::{self, Lift, TraitRef, Ty, TyCtxt};
+use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
+
+use std::rc::Rc;
+use syntax::abi;
+use syntax::ptr::P;
+
+use hir;
+
+///////////////////////////////////////////////////////////////////////////
+// Lift implementations
+
+impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) {
+    type Lifted = (A::Lifted, B::Lifted);
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.0).and_then(|a| tcx.lift(&self.1).map(|b| (a, b)))
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for [T] {
+    type Lifted = Vec<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        // type annotation needed to inform `projection_must_outlive`
+        let mut result : Vec<<T as Lift<'tcx>>::Lifted>
+            = Vec::with_capacity(self.len());
+        for x in self {
+            if let Some(value) = tcx.lift(x) {
+                result.push(value);
+            } else {
+                return None;
+            }
+        }
+        Some(result)
+    }
+}
+
+impl<'tcx> Lift<'tcx> for ty::Region {
+    type Lifted = Self;
+    fn lift_to_tcx(&self, _: &TyCtxt<'tcx>) -> Option<ty::Region> {
+        Some(*self)
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for TraitRef<'a> {
+    type Lifted = TraitRef<'tcx>;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<TraitRef<'tcx>> {
+        tcx.lift(&self.substs).map(|substs| TraitRef {
+            def_id: self.def_id,
+            substs: substs
+        })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> {
+    type Lifted = ty::TraitPredicate<'tcx>;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> {
+        tcx.lift(&self.trait_ref).map(|trait_ref| ty::TraitPredicate {
+            trait_ref: trait_ref
+        })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::EquatePredicate<'a> {
+    type Lifted = ty::EquatePredicate<'tcx>;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<ty::EquatePredicate<'tcx>> {
+        tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::EquatePredicate(a, b))
+    }
+}
+
+impl<'tcx, A: Copy+Lift<'tcx>, B: Copy+Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> {
+    type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b))
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> {
+    type Lifted = ty::ProjectionPredicate<'tcx>;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> {
+        tcx.lift(&(self.projection_ty.trait_ref, self.ty)).map(|(trait_ref, ty)| {
+            ty::ProjectionPredicate {
+                projection_ty: ty::ProjectionTy {
+                    trait_ref: trait_ref,
+                    item_name: self.projection_ty.item_name
+                },
+                ty: ty
+            }
+        })
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<T> {
+    type Lifted = ty::Binder<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.0).map(|x| ty::Binder(x))
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// TypeFoldable implementations.
+//
+// Ideally, each type should invoke `folder.fold_foo(self)` and
+// nothing else. In some cases, though, we haven't gotten around to
+// adding methods on the `folder` yet, and thus the folding is
+// hard-coded here. This is less-flexible, because folders cannot
+// override the behavior, but there are a lot of random types and one
+// can easily refactor the folding into the TypeFolder trait as
+// needed.
+
+macro_rules! CopyImpls {
+    ($($ty:ty),+) => {
+        $(
+            impl<'tcx> TypeFoldable<'tcx> for $ty {
+                fn super_fold_with<F:TypeFolder<'tcx>>(&self, _: &mut F) -> $ty {
+                    *self
+                }
+
+                fn super_visit_with<F: TypeVisitor<'tcx>>(&self, _: &mut F) -> bool {
+                    false
+                }
+            }
+        )+
+    }
+}
+
+CopyImpls! { (), hir::Unsafety, abi::Abi }
+
+impl<'tcx, T:TypeFoldable<'tcx>, U:TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> (T, U) {
+        (self.0.fold_with(folder), self.1.fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.0.visit_with(visitor) || self.1.visit_with(visitor)
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Option<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.as_ref().map(|t| t.fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        Rc::new((**self).fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        (**self).visit_with(visitor)
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let content: T = (**self).fold_with(folder);
+        box content
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        (**self).visit_with(visitor)
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.iter().map(|t| t.fold_with(folder)).collect()
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::Binder(self.0.fold_with(folder))
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_binder(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.0.visit_with(visitor)
+    }
+
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        visitor.visit_binder(self)
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for P<[T]> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.iter().map(|t| t.fold_with(folder)).collect()
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for VecPerParamSpace<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.map(|elem| elem.fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|elem| elem.visit_with(visitor))
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::TraitTy<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::TraitTy {
+            principal: self.principal.fold_with(folder),
+            bounds: self.bounds.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.principal.visit_with(visitor) || self.bounds.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let sty = match self.sty {
+            ty::TyBox(typ) => ty::TyBox(typ.fold_with(folder)),
+            ty::TyRawPtr(ref tm) => ty::TyRawPtr(tm.fold_with(folder)),
+            ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz),
+            ty::TySlice(typ) => ty::TySlice(typ.fold_with(folder)),
+            ty::TyEnum(tid, ref substs) => {
+                let substs = substs.fold_with(folder);
+                ty::TyEnum(tid, folder.tcx().mk_substs(substs))
+            }
+            ty::TyTrait(ref trait_ty) => ty::TyTrait(trait_ty.fold_with(folder)),
+            ty::TyTuple(ref ts) => ty::TyTuple(ts.fold_with(folder)),
+            ty::TyFnDef(def_id, substs, ref f) => {
+                let substs = substs.fold_with(folder);
+                let bfn = f.fold_with(folder);
+                ty::TyFnDef(def_id,
+                            folder.tcx().mk_substs(substs),
+                            folder.tcx().mk_bare_fn(bfn))
+            }
+            ty::TyFnPtr(ref f) => {
+                let bfn = f.fold_with(folder);
+                ty::TyFnPtr(folder.tcx().mk_bare_fn(bfn))
+            }
+            ty::TyRef(r, ref tm) => {
+                let r = r.fold_with(folder);
+                ty::TyRef(folder.tcx().mk_region(r), tm.fold_with(folder))
+            }
+            ty::TyStruct(did, ref substs) => {
+                let substs = substs.fold_with(folder);
+                ty::TyStruct(did, folder.tcx().mk_substs(substs))
+            }
+            ty::TyClosure(did, ref substs) => {
+                ty::TyClosure(did, substs.fold_with(folder))
+            }
+            ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)),
+            ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) |
+            ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) |
+            ty::TyParam(..) => self.sty.clone(),
+        };
+        folder.tcx().mk_ty(sty)
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_ty(*self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match self.sty {
+            ty::TyBox(typ) => typ.visit_with(visitor),
+            ty::TyRawPtr(ref tm) => tm.visit_with(visitor),
+            ty::TyArray(typ, _sz) => typ.visit_with(visitor),
+            ty::TySlice(typ) => typ.visit_with(visitor),
+            ty::TyEnum(_tid, ref substs) => substs.visit_with(visitor),
+            ty::TyTrait(ref trait_ty) => trait_ty.visit_with(visitor),
+            ty::TyTuple(ref ts) => ts.visit_with(visitor),
+            ty::TyFnDef(_, substs, ref f) => {
+                substs.visit_with(visitor) || f.visit_with(visitor)
+            }
+            ty::TyFnPtr(ref f) => f.visit_with(visitor),
+            ty::TyRef(r, ref tm) => r.visit_with(visitor) || tm.visit_with(visitor),
+            ty::TyStruct(_did, ref substs) => substs.visit_with(visitor),
+            ty::TyClosure(_did, ref substs) => substs.visit_with(visitor),
+            ty::TyProjection(ref data) => data.visit_with(visitor),
+            ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) |
+            ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) |
+            ty::TyParam(..) => false,
+        }
+    }
+
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        visitor.visit_ty(self)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::BareFnTy<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::BareFnTy { sig: self.sig.fold_with(folder),
+                       abi: self.abi,
+                       unsafety: self.unsafety }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_bare_fn_ty(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.sig.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ClosureTy<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+       ty::ClosureTy {
+            sig: self.sig.fold_with(folder),
+            unsafety: self.unsafety,
+            abi: self.abi,
+        }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_closure_ty(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.sig.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::TypeAndMut<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::TypeAndMut { ty: self.ty.fold_with(folder), mutbl: self.mutbl }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_mt(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.ty.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::FnOutput<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        match *self {
+            ty::FnConverging(ref ty) => ty::FnConverging(ty.fold_with(folder)),
+            ty::FnDiverging => ty::FnDiverging
+        }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_output(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match *self {
+            ty::FnConverging(ref ty) => ty.visit_with(visitor),
+            ty::FnDiverging => false,
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::FnSig { inputs: self.inputs.fold_with(folder),
+                    output: self.output.fold_with(folder),
+                    variadic: self.variadic }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_fn_sig(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.inputs.visit_with(visitor) || self.output.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::TraitRef<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let substs = self.substs.fold_with(folder);
+        ty::TraitRef {
+            def_id: self.def_id,
+            substs: folder.tcx().mk_substs(substs),
+        }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_trait_ref(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.substs.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ImplHeader<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::ImplHeader {
+            impl_def_id: self.impl_def_id,
+            self_ty: self.self_ty.fold_with(folder),
+            trait_ref: self.trait_ref.map(|t| t.fold_with(folder)),
+            predicates: self.predicates.iter().map(|p| p.fold_with(folder)).collect(),
+        }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_impl_header(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.self_ty.visit_with(visitor) ||
+            self.trait_ref.map(|r| r.visit_with(visitor)).unwrap_or(false) ||
+            self.predicates.iter().any(|p| p.visit_with(visitor))
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Region {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self {
+        *self
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_region(*self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
+        false
+    }
+
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        visitor.visit_region(*self)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for subst::Substs<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        subst::Substs { regions: self.regions.fold_with(folder),
+                        types: self.types.fold_with(folder) }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_substs(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.types.visit_with(visitor) || self.regions.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let func_substs = self.func_substs.fold_with(folder);
+        ty::ClosureSubsts {
+            func_substs: folder.tcx().mk_substs(func_substs),
+            upvar_tys: self.upvar_tys.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.func_substs.visit_with(visitor) || self.upvar_tys.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ItemSubsts<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::ItemSubsts {
+            substs: self.substs.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.substs.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::AutoRef<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        match *self {
+            ty::adjustment::AutoPtr(r, m) => {
+                let r = r.fold_with(folder);
+                ty::adjustment::AutoPtr(folder.tcx().mk_region(r), m)
+            }
+            ty::adjustment::AutoUnsafe(m) => ty::adjustment::AutoUnsafe(m)
+        }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_autoref(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match *self {
+            ty::adjustment::AutoPtr(r, _m) => r.visit_with(visitor),
+            ty::adjustment::AutoUnsafe(_m) => false,
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::BuiltinBounds {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self {
+        *self
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
+        false
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialBounds<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::ExistentialBounds {
+            region_bound: self.region_bound.fold_with(folder),
+            builtin_bounds: self.builtin_bounds,
+            projection_bounds: self.projection_bounds.fold_with(folder),
+        }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_existential_bounds(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.region_bound.visit_with(visitor) || self.projection_bounds.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::TypeParameterDef<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::TypeParameterDef {
+            name: self.name,
+            def_id: self.def_id,
+            space: self.space,
+            index: self.index,
+            default: self.default.fold_with(folder),
+            default_def_id: self.default_def_id,
+            object_lifetime_default: self.object_lifetime_default.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.default.visit_with(visitor) ||
+            self.object_lifetime_default.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        match *self {
+            ty::ObjectLifetimeDefault::Ambiguous =>
+                ty::ObjectLifetimeDefault::Ambiguous,
+
+            ty::ObjectLifetimeDefault::BaseDefault =>
+                ty::ObjectLifetimeDefault::BaseDefault,
+
+            ty::ObjectLifetimeDefault::Specific(r) =>
+                ty::ObjectLifetimeDefault::Specific(r.fold_with(folder)),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match *self {
+            ty::ObjectLifetimeDefault::Specific(r) => r.visit_with(visitor),
+            _ => false,
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::RegionParameterDef {
+            name: self.name,
+            def_id: self.def_id,
+            space: self.space,
+            index: self.index,
+            bounds: self.bounds.fold_with(folder)
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.bounds.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Generics<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::Generics {
+            types: self.types.fold_with(folder),
+            regions: self.regions.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.types.visit_with(visitor) || self.regions.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::GenericPredicates<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::GenericPredicates {
+            predicates: self.predicates.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.predicates.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        match *self {
+            ty::Predicate::Trait(ref a) =>
+                ty::Predicate::Trait(a.fold_with(folder)),
+            ty::Predicate::Equate(ref binder) =>
+                ty::Predicate::Equate(binder.fold_with(folder)),
+            ty::Predicate::RegionOutlives(ref binder) =>
+                ty::Predicate::RegionOutlives(binder.fold_with(folder)),
+            ty::Predicate::TypeOutlives(ref binder) =>
+                ty::Predicate::TypeOutlives(binder.fold_with(folder)),
+            ty::Predicate::Projection(ref binder) =>
+                ty::Predicate::Projection(binder.fold_with(folder)),
+            ty::Predicate::WellFormed(data) =>
+                ty::Predicate::WellFormed(data.fold_with(folder)),
+            ty::Predicate::ObjectSafe(trait_def_id) =>
+                ty::Predicate::ObjectSafe(trait_def_id),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match *self {
+            ty::Predicate::Trait(ref a) => a.visit_with(visitor),
+            ty::Predicate::Equate(ref binder) => binder.visit_with(visitor),
+            ty::Predicate::RegionOutlives(ref binder) => binder.visit_with(visitor),
+            ty::Predicate::TypeOutlives(ref binder) => binder.visit_with(visitor),
+            ty::Predicate::Projection(ref binder) => binder.visit_with(visitor),
+            ty::Predicate::WellFormed(data) => data.visit_with(visitor),
+            ty::Predicate::ObjectSafe(_trait_def_id) => false,
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionPredicate<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::ProjectionPredicate {
+            projection_ty: self.projection_ty.fold_with(folder),
+            ty: self.ty.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.projection_ty.visit_with(visitor) || self.ty.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionTy<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::ProjectionTy {
+            trait_ref: self.trait_ref.fold_with(folder),
+            item_name: self.item_name,
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.trait_ref.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::InstantiatedPredicates<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::InstantiatedPredicates {
+            predicates: self.predicates.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.predicates.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::EquatePredicate<'tcx> {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::EquatePredicate(self.0.fold_with(folder),
+                            self.1.fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.0.visit_with(visitor) || self.1.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::TraitPredicate<'tcx> {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::TraitPredicate {
+            trait_ref: self.trait_ref.fold_with(folder)
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.trait_ref.visit_with(visitor)
+    }
+}
+
+impl<'tcx,T,U> TypeFoldable<'tcx> for ty::OutlivesPredicate<T,U>
+    where T : TypeFoldable<'tcx>,
+          U : TypeFoldable<'tcx>,
+{
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::OutlivesPredicate(self.0.fold_with(folder),
+                              self.1.fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.0.visit_with(visitor) || self.1.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ClosureUpvar<'tcx> {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::ClosureUpvar {
+            def: self.def,
+            span: self.span,
+            ty: self.ty.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.ty.visit_with(visitor)
+    }
+}
+
+impl<'a, 'tcx> TypeFoldable<'tcx> for ty::ParameterEnvironment<'a, 'tcx> where 'tcx: 'a {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::ParameterEnvironment {
+            tcx: self.tcx,
+            free_substs: self.free_substs.fold_with(folder),
+            implicit_region_bound: self.implicit_region_bound.fold_with(folder),
+            caller_bounds: self.caller_bounds.fold_with(folder),
+            selection_cache: traits::SelectionCache::new(),
+            evaluation_cache: traits::EvaluationCache::new(),
+            free_id_outlive: self.free_id_outlive,
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.free_substs.visit_with(visitor) ||
+            self.implicit_region_bound.visit_with(visitor) ||
+            self.caller_bounds.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::TypeScheme<'tcx>  {
+    fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ty::TypeScheme {
+            generics: self.generics.fold_with(folder),
+            ty: self.ty.fold_with(folder),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.generics.visit_with(visitor) || self.ty.visit_with(visitor)
+    }
+}
diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs
new file mode 100644 (file)
index 0000000..c0fb60d
--- /dev/null
@@ -0,0 +1,1239 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This module contains TypeVariants and its major components
+
+use middle::cstore;
+use hir::def_id::DefId;
+use middle::region;
+use ty::subst::{self, Substs};
+use traits;
+use ty::{self, AdtDef, ToPredicate, TypeFlags, Ty, TyCtxt, TyS, TypeFoldable};
+use util::common::ErrorReported;
+
+use collections::enum_set::{self, EnumSet, CLike};
+use std::fmt;
+use std::ops;
+use std::mem;
+use syntax::abi;
+use syntax::ast::{self, Name};
+use syntax::parse::token::special_idents;
+
+use serialize::{Decodable, Decoder};
+
+use hir;
+
+use self::FnOutput::*;
+use self::InferTy::*;
+use self::TypeVariants::*;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+pub struct TypeAndMut<'tcx> {
+    pub ty: Ty<'tcx>,
+    pub mutbl: hir::Mutability,
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash,
+         RustcEncodable, RustcDecodable, Copy)]
+/// A "free" region `fr` can be interpreted as "some region
+/// at least as big as the scope `fr.scope`".
+pub struct FreeRegion {
+    pub scope: region::CodeExtent,
+    pub bound_region: BoundRegion
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash,
+         RustcEncodable, RustcDecodable, Copy)]
+pub enum BoundRegion {
+    /// An anonymous region parameter for a given fn (&T)
+    BrAnon(u32),
+
+    /// Named region parameters for functions (a in &'a T)
+    ///
+    /// The def-id is needed to distinguish free regions in
+    /// the event of shadowing.
+    BrNamed(DefId, Name),
+
+    /// Fresh bound identifiers created during GLB computations.
+    BrFresh(u32),
+
+    // Anonymous region for the implicit env pointer parameter
+    // to a closure
+    BrEnv
+}
+
+// NB: If you change this, you'll probably want to change the corresponding
+// AST structure in libsyntax/ast.rs as well.
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub enum TypeVariants<'tcx> {
+    /// The primitive boolean type. Written as `bool`.
+    TyBool,
+
+    /// The primitive character type; holds a Unicode scalar value
+    /// (a non-surrogate code point).  Written as `char`.
+    TyChar,
+
+    /// A primitive signed integer type. For example, `i32`.
+    TyInt(ast::IntTy),
+
+    /// A primitive unsigned integer type. For example, `u32`.
+    TyUint(ast::UintTy),
+
+    /// A primitive floating-point type. For example, `f64`.
+    TyFloat(ast::FloatTy),
+
+    /// An enumerated type, defined with `enum`.
+    ///
+    /// Substs here, possibly against intuition, *may* contain `TyParam`s.
+    /// That is, even after substitution it is possible that there are type
+    /// variables. This happens when the `TyEnum` corresponds to an enum
+    /// definition and not a concrete use of it. To get the correct `TyEnum`
+    /// from the tcx, use the `NodeId` from the `ast::Ty` and look it up in
+    /// the `ast_ty_to_ty_cache`. This is probably true for `TyStruct` as
+    /// well.
+    TyEnum(AdtDef<'tcx>, &'tcx Substs<'tcx>),
+
+    /// A structure type, defined with `struct`.
+    ///
+    /// See warning about substitutions for enumerated types.
+    TyStruct(AdtDef<'tcx>, &'tcx Substs<'tcx>),
+
+    /// `Box<T>`; this is nominally a struct in the documentation, but is
+    /// special-cased internally. For example, it is possible to implicitly
+    /// move the contents of a box out of that box, and methods of any type
+    /// can have type `Box<Self>`.
+    TyBox(Ty<'tcx>),
+
+    /// The pointee of a string slice. Written as `str`.
+    TyStr,
+
+    /// An array with the given length. Written as `[T; n]`.
+    TyArray(Ty<'tcx>, usize),
+
+    /// The pointee of an array slice.  Written as `[T]`.
+    TySlice(Ty<'tcx>),
+
+    /// A raw pointer. Written as `*mut T` or `*const T`
+    TyRawPtr(TypeAndMut<'tcx>),
+
+    /// A reference; a pointer with an associated lifetime. Written as
+    /// `&a mut T` or `&'a T`.
+    TyRef(&'tcx Region, TypeAndMut<'tcx>),
+
+    /// The anonymous type of a function declaration/definition. Each
+    /// function has a unique type.
+    TyFnDef(DefId, &'tcx Substs<'tcx>, &'tcx BareFnTy<'tcx>),
+
+    /// A pointer to a function.  Written as `fn() -> i32`.
+    /// FIXME: This is currently also used to represent the callee of a method;
+    /// see ty::MethodCallee etc.
+    TyFnPtr(&'tcx BareFnTy<'tcx>),
+
+    /// A trait, defined with `trait`.
+    TyTrait(Box<TraitTy<'tcx>>),
+
+    /// The anonymous type of a closure. Used to represent the type of
+    /// `|a| a`.
+    TyClosure(DefId, Box<ClosureSubsts<'tcx>>),
+
+    /// A tuple type.  For example, `(i32, bool)`.
+    TyTuple(Vec<Ty<'tcx>>),
+
+    /// The projection of an associated type.  For example,
+    /// `<T as Trait<..>>::N`.
+    TyProjection(ProjectionTy<'tcx>),
+
+    /// A type parameter; for example, `T` in `fn f<T>(x: T) {}
+    TyParam(ParamTy),
+
+    /// A type variable used during type-checking.
+    TyInfer(InferTy),
+
+    /// A placeholder for a type which could not be computed; this is
+    /// propagated to avoid useless error messages.
+    TyError,
+}
+
+/// A closure can be modeled as a struct that looks like:
+///
+///     struct Closure<'l0...'li, T0...Tj, U0...Uk> {
+///         upvar0: U0,
+///         ...
+///         upvark: Uk
+///     }
+///
+/// where 'l0...'li and T0...Tj are the lifetime and type parameters
+/// in scope on the function that defined the closure, and U0...Uk are
+/// type parameters representing the types of its upvars (borrowed, if
+/// appropriate).
+///
+/// So, for example, given this function:
+///
+///     fn foo<'a, T>(data: &'a mut T) {
+///          do(|| data.count += 1)
+///     }
+///
+/// the type of the closure would be something like:
+///
+///     struct Closure<'a, T, U0> {
+///         data: U0
+///     }
+///
+/// Note that the type of the upvar is not specified in the struct.
+/// You may wonder how the impl would then be able to use the upvar,
+/// if it doesn't know it's type? The answer is that the impl is
+/// (conceptually) not fully generic over Closure but rather tied to
+/// instances with the expected upvar types:
+///
+///     impl<'b, 'a, T> FnMut() for Closure<'a, T, &'b mut &'a mut T> {
+///         ...
+///     }
+///
+/// You can see that the *impl* fully specified the type of the upvar
+/// and thus knows full well that `data` has type `&'b mut &'a mut T`.
+/// (Here, I am assuming that `data` is mut-borrowed.)
+///
+/// Now, the last question you may ask is: Why include the upvar types
+/// as extra type parameters? The reason for this design is that the
+/// upvar types can reference lifetimes that are internal to the
+/// creating function. In my example above, for example, the lifetime
+/// `'b` represents the extent of the closure itself; this is some
+/// subset of `foo`, probably just the extent of the call to the to
+/// `do()`. If we just had the lifetime/type parameters from the
+/// enclosing function, we couldn't name this lifetime `'b`. Note that
+/// there can also be lifetimes in the types of the upvars themselves,
+/// if one of them happens to be a reference to something that the
+/// creating fn owns.
+///
+/// OK, you say, so why not create a more minimal set of parameters
+/// that just includes the extra lifetime parameters? The answer is
+/// primarily that it would be hard --- we don't know at the time when
+/// we create the closure type what the full types of the upvars are,
+/// nor do we know which are borrowed and which are not. In this
+/// design, we can just supply a fresh type parameter and figure that
+/// out later.
+///
+/// All right, you say, but why include the type parameters from the
+/// original function then? The answer is that trans may need them
+/// when monomorphizing, and they may not appear in the upvars.  A
+/// closure could capture no variables but still make use of some
+/// in-scope type parameter with a bound (e.g., if our example above
+/// had an extra `U: Default`, and the closure called `U::default()`).
+///
+/// There is another reason. This design (implicitly) prohibits
+/// closures from capturing themselves (except via a trait
+/// object). This simplifies closure inference considerably, since it
+/// means that when we infer the kind of a closure or its upvars, we
+/// don't have to handle cycles where the decisions we make for
+/// closure C wind up influencing the decisions we ought to make for
+/// closure C (which would then require fixed point iteration to
+/// handle). Plus it fixes an ICE. :P
+#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
+pub struct ClosureSubsts<'tcx> {
+    /// Lifetime and type parameters from the enclosing function.
+    /// These are separated out because trans wants to pass them around
+    /// when monomorphizing.
+    pub func_substs: &'tcx Substs<'tcx>,
+
+    /// The types of the upvars. The list parallels the freevars and
+    /// `upvar_borrows` lists. These are kept distinct so that we can
+    /// easily index into them.
+    pub upvar_tys: Vec<Ty<'tcx>>
+}
+
+impl<'tcx> Decodable for &'tcx ClosureSubsts<'tcx> {
+    fn decode<S: Decoder>(s: &mut S) -> Result<&'tcx ClosureSubsts<'tcx>, S::Error> {
+        let closure_substs = Decodable::decode(s)?;
+        let dummy_def_id: DefId = unsafe { mem::zeroed() };
+
+        cstore::tls::with_decoding_context(s, |dcx, _| {
+            // Intern the value
+            let ty = dcx.tcx().mk_closure_from_closure_substs(dummy_def_id,
+                                                              Box::new(closure_substs));
+            match ty.sty {
+                TyClosure(_, ref closure_substs) => Ok(&**closure_substs),
+                _ => bug!()
+            }
+        })
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct TraitTy<'tcx> {
+    pub principal: ty::PolyTraitRef<'tcx>,
+    pub bounds: ExistentialBounds<'tcx>,
+}
+
+impl<'tcx> TraitTy<'tcx> {
+    pub fn principal_def_id(&self) -> DefId {
+        self.principal.0.def_id
+    }
+
+    /// Object types don't have a self-type specified. Therefore, when
+    /// we convert the principal trait-ref into a normal trait-ref,
+    /// you must give *some* self-type. A common choice is `mk_err()`
+    /// or some skolemized type.
+    pub fn principal_trait_ref_with_self_ty(&self,
+                                            tcx: &TyCtxt<'tcx>,
+                                            self_ty: Ty<'tcx>)
+                                            -> ty::PolyTraitRef<'tcx>
+    {
+        // otherwise the escaping regions would be captured by the binder
+        assert!(!self_ty.has_escaping_regions());
+
+        ty::Binder(TraitRef {
+            def_id: self.principal.0.def_id,
+            substs: tcx.mk_substs(self.principal.0.substs.with_self_ty(self_ty)),
+        })
+    }
+
+    pub fn projection_bounds_with_self_ty(&self,
+                                          tcx: &TyCtxt<'tcx>,
+                                          self_ty: Ty<'tcx>)
+                                          -> Vec<ty::PolyProjectionPredicate<'tcx>>
+    {
+        // otherwise the escaping regions would be captured by the binders
+        assert!(!self_ty.has_escaping_regions());
+
+        self.bounds.projection_bounds.iter()
+            .map(|in_poly_projection_predicate| {
+                let in_projection_ty = &in_poly_projection_predicate.0.projection_ty;
+                let substs = tcx.mk_substs(in_projection_ty.trait_ref.substs.with_self_ty(self_ty));
+                let trait_ref = ty::TraitRef::new(in_projection_ty.trait_ref.def_id,
+                                              substs);
+                let projection_ty = ty::ProjectionTy {
+                    trait_ref: trait_ref,
+                    item_name: in_projection_ty.item_name
+                };
+                ty::Binder(ty::ProjectionPredicate {
+                    projection_ty: projection_ty,
+                    ty: in_poly_projection_predicate.0.ty
+                })
+            })
+            .collect()
+    }
+}
+
+/// A complete reference to a trait. These take numerous guises in syntax,
+/// but perhaps the most recognizable form is in a where clause:
+///
+///     T : Foo<U>
+///
+/// This would be represented by a trait-reference where the def-id is the
+/// def-id for the trait `Foo` and the substs defines `T` as parameter 0 in the
+/// `SelfSpace` and `U` as parameter 0 in the `TypeSpace`.
+///
+/// Trait references also appear in object types like `Foo<U>`, but in
+/// that case the `Self` parameter is absent from the substitutions.
+///
+/// Note that a `TraitRef` introduces a level of region binding, to
+/// account for higher-ranked trait bounds like `T : for<'a> Foo<&'a
+/// U>` or higher-ranked object types.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct TraitRef<'tcx> {
+    pub def_id: DefId,
+    pub substs: &'tcx Substs<'tcx>,
+}
+
+pub type PolyTraitRef<'tcx> = Binder<TraitRef<'tcx>>;
+
+impl<'tcx> PolyTraitRef<'tcx> {
+    pub fn self_ty(&self) -> Ty<'tcx> {
+        self.0.self_ty()
+    }
+
+    pub fn def_id(&self) -> DefId {
+        self.0.def_id
+    }
+
+    pub fn substs(&self) -> &'tcx Substs<'tcx> {
+        // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<>
+        self.0.substs
+    }
+
+    pub fn input_types(&self) -> &[Ty<'tcx>] {
+        // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<>
+        self.0.input_types()
+    }
+
+    pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> {
+        // Note that we preserve binding levels
+        Binder(ty::TraitPredicate { trait_ref: self.0.clone() })
+    }
+}
+
+/// Binder is a binder for higher-ranked lifetimes. It is part of the
+/// compiler's representation for things like `for<'a> Fn(&'a isize)`
+/// (which would be represented by the type `PolyTraitRef ==
+/// Binder<TraitRef>`). Note that when we skolemize, instantiate,
+/// erase, or otherwise "discharge" these bound regions, we change the
+/// type from `Binder<T>` to just `T` (see
+/// e.g. `liberate_late_bound_regions`).
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct Binder<T>(pub T);
+
+impl<T> Binder<T> {
+    /// Skips the binder and returns the "bound" value. This is a
+    /// risky thing to do because it's easy to get confused about
+    /// debruijn indices and the like. It is usually better to
+    /// discharge the binder using `no_late_bound_regions` or
+    /// `replace_late_bound_regions` or something like
+    /// that. `skip_binder` is only valid when you are either
+    /// extracting data that has nothing to do with bound regions, you
+    /// are doing some sort of test that does not involve bound
+    /// regions, or you are being very careful about your depth
+    /// accounting.
+    ///
+    /// Some examples where `skip_binder` is reasonable:
+    /// - extracting the def-id from a PolyTraitRef;
+    /// - comparing the self type of a PolyTraitRef to see if it is equal to
+    ///   a type parameter `X`, since the type `X`  does not reference any regions
+    pub fn skip_binder(&self) -> &T {
+        &self.0
+    }
+
+    pub fn as_ref(&self) -> Binder<&T> {
+        ty::Binder(&self.0)
+    }
+
+    pub fn map_bound_ref<F,U>(&self, f: F) -> Binder<U>
+        where F: FnOnce(&T) -> U
+    {
+        self.as_ref().map_bound(f)
+    }
+
+    pub fn map_bound<F,U>(self, f: F) -> Binder<U>
+        where F: FnOnce(T) -> U
+    {
+        ty::Binder(f(self.0))
+    }
+}
+
+impl fmt::Debug for TypeFlags {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{}", self.bits)
+    }
+}
+
+/// Represents the projection of an associated type. In explicit UFCS
+/// form this would be written `<T as Trait<..>>::N`.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct ProjectionTy<'tcx> {
+    /// The trait reference `T as Trait<..>`.
+    pub trait_ref: ty::TraitRef<'tcx>,
+
+    /// The name `N` of the associated type.
+    pub item_name: Name,
+}
+
+impl<'tcx> ProjectionTy<'tcx> {
+    pub fn sort_key(&self) -> (DefId, Name) {
+        (self.trait_ref.def_id, self.item_name)
+    }
+}
+
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub struct BareFnTy<'tcx> {
+    pub unsafety: hir::Unsafety,
+    pub abi: abi::Abi,
+    pub sig: PolyFnSig<'tcx>,
+}
+
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct ClosureTy<'tcx> {
+    pub unsafety: hir::Unsafety,
+    pub abi: abi::Abi,
+    pub sig: PolyFnSig<'tcx>,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
+pub enum FnOutput<'tcx> {
+    FnConverging(Ty<'tcx>),
+    FnDiverging
+}
+
+impl<'tcx> FnOutput<'tcx> {
+    pub fn diverges(&self) -> bool {
+        *self == FnDiverging
+    }
+
+    pub fn unwrap(self) -> Ty<'tcx> {
+        match self {
+            ty::FnConverging(t) => t,
+            ty::FnDiverging => bug!()
+        }
+    }
+
+    pub fn unwrap_or(self, def: Ty<'tcx>) -> Ty<'tcx> {
+        match self {
+            ty::FnConverging(t) => t,
+            ty::FnDiverging => def
+        }
+    }
+}
+
+pub type PolyFnOutput<'tcx> = Binder<FnOutput<'tcx>>;
+
+impl<'tcx> PolyFnOutput<'tcx> {
+    pub fn diverges(&self) -> bool {
+        self.0.diverges()
+    }
+}
+
+/// Signature of a function type, which I have arbitrarily
+/// decided to use to refer to the input/output types.
+///
+/// - `inputs` is the list of arguments and their modes.
+/// - `output` is the return type.
+/// - `variadic` indicates whether this is a variadic function. (only true for foreign fns)
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct FnSig<'tcx> {
+    pub inputs: Vec<Ty<'tcx>>,
+    pub output: FnOutput<'tcx>,
+    pub variadic: bool
+}
+
+pub type PolyFnSig<'tcx> = Binder<FnSig<'tcx>>;
+
+impl<'tcx> PolyFnSig<'tcx> {
+    pub fn inputs(&self) -> ty::Binder<Vec<Ty<'tcx>>> {
+        self.map_bound_ref(|fn_sig| fn_sig.inputs.clone())
+    }
+    pub fn input(&self, index: usize) -> ty::Binder<Ty<'tcx>> {
+        self.map_bound_ref(|fn_sig| fn_sig.inputs[index])
+    }
+    pub fn output(&self) -> ty::Binder<FnOutput<'tcx>> {
+        self.map_bound_ref(|fn_sig| fn_sig.output.clone())
+    }
+    pub fn variadic(&self) -> bool {
+        self.skip_binder().variadic
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct ParamTy {
+    pub space: subst::ParamSpace,
+    pub idx: u32,
+    pub name: Name,
+}
+
+impl ParamTy {
+    pub fn new(space: subst::ParamSpace,
+               index: u32,
+               name: Name)
+               -> ParamTy {
+        ParamTy { space: space, idx: index, name: name }
+    }
+
+    pub fn for_self() -> ParamTy {
+        ParamTy::new(subst::SelfSpace, 0, special_idents::type_self.name)
+    }
+
+    pub fn for_def(def: &ty::TypeParameterDef) -> ParamTy {
+        ParamTy::new(def.space, def.index, def.name)
+    }
+
+    pub fn to_ty<'tcx>(self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+        tcx.mk_param(self.space, self.idx, self.name)
+    }
+
+    pub fn is_self(&self) -> bool {
+        self.space == subst::SelfSpace && self.idx == 0
+    }
+}
+
+/// A [De Bruijn index][dbi] is a standard means of representing
+/// regions (and perhaps later types) in a higher-ranked setting. In
+/// particular, imagine a type like this:
+///
+///     for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char)
+///     ^          ^            |        |         |
+///     |          |            |        |         |
+///     |          +------------+ 1      |         |
+///     |                                |         |
+///     +--------------------------------+ 2       |
+///     |                                          |
+///     +------------------------------------------+ 1
+///
+/// In this type, there are two binders (the outer fn and the inner
+/// fn). We need to be able to determine, for any given region, which
+/// fn type it is bound by, the inner or the outer one. There are
+/// various ways you can do this, but a De Bruijn index is one of the
+/// more convenient and has some nice properties. The basic idea is to
+/// count the number of binders, inside out. Some examples should help
+/// clarify what I mean.
+///
+/// Let's start with the reference type `&'b isize` that is the first
+/// argument to the inner function. This region `'b` is assigned a De
+/// Bruijn index of 1, meaning "the innermost binder" (in this case, a
+/// fn). The region `'a` that appears in the second argument type (`&'a
+/// isize`) would then be assigned a De Bruijn index of 2, meaning "the
+/// second-innermost binder". (These indices are written on the arrays
+/// in the diagram).
+///
+/// What is interesting is that De Bruijn index attached to a particular
+/// variable will vary depending on where it appears. For example,
+/// the final type `&'a char` also refers to the region `'a` declared on
+/// the outermost fn. But this time, this reference is not nested within
+/// any other binders (i.e., it is not an argument to the inner fn, but
+/// rather the outer one). Therefore, in this case, it is assigned a
+/// De Bruijn index of 1, because the innermost binder in that location
+/// is the outer fn.
+///
+/// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index
+#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug, Copy)]
+pub struct DebruijnIndex {
+    // We maintain the invariant that this is never 0. So 1 indicates
+    // the innermost binder. To ensure this, create with `DebruijnIndex::new`.
+    pub depth: u32,
+}
+
+/// Representation of regions.
+///
+/// Unlike types, most region variants are "fictitious", not concrete,
+/// regions. Among these, `ReStatic`, `ReEmpty` and `ReScope` are the only
+/// ones representing concrete regions.
+///
+/// ## Bound Regions
+///
+/// These are regions that are stored behind a binder and must be substituted
+/// with some concrete region before being used. There are 2 kind of
+/// bound regions: early-bound, which are bound in a TypeScheme/TraitDef,
+/// and are substituted by a Substs,  and late-bound, which are part of
+/// higher-ranked types (e.g. `for<'a> fn(&'a ())`) and are substituted by
+/// the likes of `liberate_late_bound_regions`. The distinction exists
+/// because higher-ranked lifetimes aren't supported in all places. See [1][2].
+///
+/// Unlike TyParam-s, bound regions are not supposed to exist "in the wild"
+/// outside their binder, e.g. in types passed to type inference, and
+/// should first be substituted (by skolemized regions, free regions,
+/// or region variables).
+///
+/// ## Skolemized and Free Regions
+///
+/// One often wants to work with bound regions without knowing their precise
+/// identity. For example, when checking a function, the lifetime of a borrow
+/// can end up being assigned to some region parameter. In these cases,
+/// it must be ensured that bounds on the region can't be accidentally
+/// assumed without being checked.
+///
+/// The process of doing that is called "skolemization". The bound regions
+/// are replaced by skolemized markers, which don't satisfy any relation
+/// not explicity provided.
+///
+/// There are 2 kinds of skolemized regions in rustc: `ReFree` and
+/// `ReSkolemized`. When checking an item's body, `ReFree` is supposed
+/// to be used. These also support explicit bounds: both the internally-stored
+/// *scope*, which the region is assumed to outlive, as well as other
+/// relations stored in the `FreeRegionMap`. Note that these relations
+/// aren't checked when you `make_subregion` (or `mk_eqty`), only by
+/// `resolve_regions_and_report_errors`.
+///
+/// When working with higher-ranked types, some region relations aren't
+/// yet known, so you can't just call `resolve_regions_and_report_errors`.
+/// `ReSkolemized` is designed for this purpose. In these contexts,
+/// there's also the risk that some inference variable laying around will
+/// get unified with your skolemized region: if you want to check whether
+/// `for<'a> Foo<'_>: 'a`, and you substitute your bound region `'a`
+/// with a skolemized region `'%a`, the variable `'_` would just be
+/// instantiated to the skolemized region `'%a`, which is wrong because
+/// the inference variable is supposed to satisfy the relation
+/// *for every value of the skolemized region*. To ensure that doesn't
+/// happen, you can use `leak_check`. This is more clearly explained
+/// by infer/higher_ranked/README.md.
+///
+/// [1] http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/
+/// [2] http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/
+#[derive(Clone, PartialEq, Eq, Hash, Copy, RustcEncodable, RustcDecodable)]
+pub enum Region {
+    // Region bound in a type or fn declaration which will be
+    // substituted 'early' -- that is, at the same time when type
+    // parameters are substituted.
+    ReEarlyBound(EarlyBoundRegion),
+
+    // Region bound in a function scope, which will be substituted when the
+    // function is called.
+    ReLateBound(DebruijnIndex, BoundRegion),
+
+    /// When checking a function body, the types of all arguments and so forth
+    /// that refer to bound region parameters are modified to refer to free
+    /// region parameters.
+    ReFree(FreeRegion),
+
+    /// A concrete region naming some statically determined extent
+    /// (e.g. an expression or sequence of statements) within the
+    /// current function.
+    ReScope(region::CodeExtent),
+
+    /// Static data that has an "infinite" lifetime. Top in the region lattice.
+    ReStatic,
+
+    /// A region variable.  Should not exist after typeck.
+    ReVar(RegionVid),
+
+    /// A skolemized region - basically the higher-ranked version of ReFree.
+    /// Should not exist after typeck.
+    ReSkolemized(SkolemizedRegionVid, BoundRegion),
+
+    /// Empty lifetime is for data that is never accessed.
+    /// Bottom in the region lattice. We treat ReEmpty somewhat
+    /// specially; at least right now, we do not generate instances of
+    /// it during the GLB computations, but rather
+    /// generate an error instead. This is to improve error messages.
+    /// The only way to get an instance of ReEmpty is to have a region
+    /// variable with no constraints.
+    ReEmpty,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)]
+pub struct EarlyBoundRegion {
+    pub space: subst::ParamSpace,
+    pub index: u32,
+    pub name: Name,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct TyVid {
+    pub index: u32
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct IntVid {
+    pub index: u32
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct FloatVid {
+    pub index: u32
+}
+
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)]
+pub struct RegionVid {
+    pub index: u32
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub struct SkolemizedRegionVid {
+    pub index: u32
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub enum InferTy {
+    TyVar(TyVid),
+    IntVar(IntVid),
+    FloatVar(FloatVid),
+
+    /// A `FreshTy` is one that is generated as a replacement for an
+    /// unbound type variable. This is convenient for caching etc. See
+    /// `infer::freshen` for more details.
+    FreshTy(u32),
+    FreshIntTy(u32),
+    FreshFloatTy(u32)
+}
+
+/// Bounds suitable for an existentially quantified type parameter
+/// such as those that appear in object types or closure types.
+#[derive(PartialEq, Eq, Hash, Clone)]
+pub struct ExistentialBounds<'tcx> {
+    pub region_bound: ty::Region,
+    pub builtin_bounds: BuiltinBounds,
+    pub projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>,
+}
+
+impl<'tcx> ExistentialBounds<'tcx> {
+    pub fn new(region_bound: ty::Region,
+               builtin_bounds: BuiltinBounds,
+               projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>)
+               -> Self {
+        let mut projection_bounds = projection_bounds;
+        projection_bounds.sort_by(|a, b| a.sort_key().cmp(&b.sort_key()));
+        ExistentialBounds {
+            region_bound: region_bound,
+            builtin_bounds: builtin_bounds,
+            projection_bounds: projection_bounds
+        }
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+pub struct BuiltinBounds(EnumSet<BuiltinBound>);
+
+impl BuiltinBounds {
+    pub fn empty() -> BuiltinBounds {
+        BuiltinBounds(EnumSet::new())
+    }
+
+    pub fn iter(&self) -> enum_set::Iter<BuiltinBound> {
+        self.into_iter()
+    }
+
+    pub fn to_predicates<'tcx>(&self,
+                               tcx: &TyCtxt<'tcx>,
+                               self_ty: Ty<'tcx>) -> Vec<ty::Predicate<'tcx>> {
+        self.iter().filter_map(|builtin_bound|
+            match traits::trait_ref_for_builtin_bound(tcx, builtin_bound, self_ty) {
+                Ok(trait_ref) => Some(trait_ref.to_predicate()),
+                Err(ErrorReported) => { None }
+            }
+        ).collect()
+    }
+}
+
+impl ops::Deref for BuiltinBounds {
+    type Target = EnumSet<BuiltinBound>;
+    fn deref(&self) -> &Self::Target { &self.0 }
+}
+
+impl ops::DerefMut for BuiltinBounds {
+    fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 }
+}
+
+impl<'a> IntoIterator for &'a BuiltinBounds {
+    type Item = BuiltinBound;
+    type IntoIter = enum_set::Iter<BuiltinBound>;
+    fn into_iter(self) -> Self::IntoIter {
+        (**self).into_iter()
+    }
+}
+
+#[derive(Clone, RustcEncodable, PartialEq, Eq, RustcDecodable, Hash,
+           Debug, Copy)]
+#[repr(usize)]
+pub enum BuiltinBound {
+    Send,
+    Sized,
+    Copy,
+    Sync,
+}
+
+impl CLike for BuiltinBound {
+    fn to_usize(&self) -> usize {
+        *self as usize
+    }
+    fn from_usize(v: usize) -> BuiltinBound {
+        unsafe { mem::transmute(v) }
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn try_add_builtin_trait(&self,
+                                 trait_def_id: DefId,
+                                 builtin_bounds: &mut EnumSet<BuiltinBound>)
+                                 -> bool
+    {
+        //! Checks whether `trait_ref` refers to one of the builtin
+        //! traits, like `Send`, and adds the corresponding
+        //! bound to the set `builtin_bounds` if so. Returns true if `trait_ref`
+        //! is a builtin trait.
+
+        match self.lang_items.to_builtin_kind(trait_def_id) {
+            Some(bound) => { builtin_bounds.insert(bound); true }
+            None => false
+        }
+    }
+}
+
+impl DebruijnIndex {
+    pub fn new(depth: u32) -> DebruijnIndex {
+        assert!(depth > 0);
+        DebruijnIndex { depth: depth }
+    }
+
+    pub fn shifted(&self, amount: u32) -> DebruijnIndex {
+        DebruijnIndex { depth: self.depth + amount }
+    }
+}
+
+// Region utilities
+impl Region {
+    pub fn is_bound(&self) -> bool {
+        match *self {
+            ty::ReEarlyBound(..) => true,
+            ty::ReLateBound(..) => true,
+            _ => false
+        }
+    }
+
+    pub fn needs_infer(&self) -> bool {
+        match *self {
+            ty::ReVar(..) | ty::ReSkolemized(..) => true,
+            _ => false
+        }
+    }
+
+    pub fn escapes_depth(&self, depth: u32) -> bool {
+        match *self {
+            ty::ReLateBound(debruijn, _) => debruijn.depth > depth,
+            _ => false,
+        }
+    }
+
+    /// Returns the depth of `self` from the (1-based) binding level `depth`
+    pub fn from_depth(&self, depth: u32) -> Region {
+        match *self {
+            ty::ReLateBound(debruijn, r) => ty::ReLateBound(DebruijnIndex {
+                depth: debruijn.depth - (depth - 1)
+            }, r),
+            r => r
+        }
+    }
+}
+
+// Type utilities
+impl<'tcx> TyS<'tcx> {
+    pub fn as_opt_param_ty(&self) -> Option<ty::ParamTy> {
+        match self.sty {
+            ty::TyParam(ref d) => Some(d.clone()),
+            _ => None,
+        }
+    }
+
+    pub fn is_nil(&self) -> bool {
+        match self.sty {
+            TyTuple(ref tys) => tys.is_empty(),
+            _ => false
+        }
+    }
+
+    pub fn is_empty(&self, _cx: &TyCtxt) -> bool {
+        // FIXME(#24885): be smarter here
+        match self.sty {
+            TyEnum(def, _) | TyStruct(def, _) => def.is_empty(),
+            _ => false
+        }
+    }
+
+    pub fn is_primitive(&self) -> bool {
+        match self.sty {
+            TyBool | TyChar | TyInt(_) | TyUint(_) | TyFloat(_) => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_ty_var(&self) -> bool {
+        match self.sty {
+            TyInfer(TyVar(_)) => true,
+            _ => false
+        }
+    }
+
+    pub fn is_phantom_data(&self) -> bool {
+        if let TyStruct(def, _) = self.sty {
+            def.is_phantom_data()
+        } else {
+            false
+        }
+    }
+
+    pub fn is_bool(&self) -> bool { self.sty == TyBool }
+
+    pub fn is_param(&self, space: subst::ParamSpace, index: u32) -> bool {
+        match self.sty {
+            ty::TyParam(ref data) => data.space == space && data.idx == index,
+            _ => false,
+        }
+    }
+
+    pub fn is_self(&self) -> bool {
+        match self.sty {
+            TyParam(ref p) => p.space == subst::SelfSpace,
+            _ => false
+        }
+    }
+
+    pub fn is_slice(&self) -> bool {
+        match self.sty {
+            TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty {
+                TySlice(_) | TyStr => true,
+                _ => false,
+            },
+            _ => false
+        }
+    }
+
+    pub fn is_structural(&self) -> bool {
+        match self.sty {
+            TyStruct(..) | TyTuple(_) | TyEnum(..) |
+            TyArray(..) | TyClosure(..) => true,
+            _ => self.is_slice() | self.is_trait()
+        }
+    }
+
+    #[inline]
+    pub fn is_simd(&self) -> bool {
+        match self.sty {
+            TyStruct(def, _) => def.is_simd(),
+            _ => false
+        }
+    }
+
+    pub fn sequence_element_type(&self, cx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+        match self.sty {
+            TyArray(ty, _) | TySlice(ty) => ty,
+            TyStr => cx.mk_mach_uint(ast::UintTy::U8),
+            _ => bug!("sequence_element_type called on non-sequence value: {}", self),
+        }
+    }
+
+    pub fn simd_type(&self, cx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+        match self.sty {
+            TyStruct(def, substs) => {
+                def.struct_variant().fields[0].ty(cx, substs)
+            }
+            _ => bug!("simd_type called on invalid type")
+        }
+    }
+
+    pub fn simd_size(&self, _cx: &TyCtxt) -> usize {
+        match self.sty {
+            TyStruct(def, _) => def.struct_variant().fields.len(),
+            _ => bug!("simd_size called on invalid type")
+        }
+    }
+
+    pub fn is_region_ptr(&self) -> bool {
+        match self.sty {
+            TyRef(..) => true,
+            _ => false
+        }
+    }
+
+    pub fn is_unsafe_ptr(&self) -> bool {
+        match self.sty {
+            TyRawPtr(_) => return true,
+            _ => return false
+        }
+    }
+
+    pub fn is_unique(&self) -> bool {
+        match self.sty {
+            TyBox(_) => true,
+            _ => false
+        }
+    }
+
+    /*
+     A scalar type is one that denotes an atomic datum, with no sub-components.
+     (A TyRawPtr is scalar because it represents a non-managed pointer, so its
+     contents are abstract to rustc.)
+    */
+    pub fn is_scalar(&self) -> bool {
+        match self.sty {
+            TyBool | TyChar | TyInt(_) | TyFloat(_) | TyUint(_) |
+            TyInfer(IntVar(_)) | TyInfer(FloatVar(_)) |
+            TyFnDef(..) | TyFnPtr(_) | TyRawPtr(_) => true,
+            _ => false
+        }
+    }
+
+    /// Returns true if this type is a floating point type and false otherwise.
+    pub fn is_floating_point(&self) -> bool {
+        match self.sty {
+            TyFloat(_) |
+            TyInfer(FloatVar(_)) => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_trait(&self) -> bool {
+        match self.sty {
+            TyTrait(..) => true,
+            _ => false
+        }
+    }
+
+    pub fn is_integral(&self) -> bool {
+        match self.sty {
+            TyInfer(IntVar(_)) | TyInt(_) | TyUint(_) => true,
+            _ => false
+        }
+    }
+
+    pub fn is_fresh(&self) -> bool {
+        match self.sty {
+            TyInfer(FreshTy(_)) => true,
+            TyInfer(FreshIntTy(_)) => true,
+            TyInfer(FreshFloatTy(_)) => true,
+            _ => false
+        }
+    }
+
+    pub fn is_uint(&self) -> bool {
+        match self.sty {
+            TyInfer(IntVar(_)) | TyUint(ast::UintTy::Us) => true,
+            _ => false
+        }
+    }
+
+    pub fn is_char(&self) -> bool {
+        match self.sty {
+            TyChar => true,
+            _ => false
+        }
+    }
+
+    pub fn is_fp(&self) -> bool {
+        match self.sty {
+            TyInfer(FloatVar(_)) | TyFloat(_) => true,
+            _ => false
+        }
+    }
+
+    pub fn is_numeric(&self) -> bool {
+        self.is_integral() || self.is_fp()
+    }
+
+    pub fn is_signed(&self) -> bool {
+        match self.sty {
+            TyInt(_) => true,
+            _ => false
+        }
+    }
+
+    pub fn is_machine(&self) -> bool {
+        match self.sty {
+            TyInt(ast::IntTy::Is) | TyUint(ast::UintTy::Us) => false,
+            TyInt(..) | TyUint(..) | TyFloat(..) => true,
+            _ => false
+        }
+    }
+
+    pub fn has_concrete_skeleton(&self) -> bool {
+        match self.sty {
+            TyParam(_) | TyInfer(_) | TyError => false,
+            _ => true,
+        }
+    }
+
+    // Returns the type and mutability of *ty.
+    //
+    // The parameter `explicit` indicates if this is an *explicit* dereference.
+    // Some types---notably unsafe ptrs---can only be dereferenced explicitly.
+    pub fn builtin_deref(&self, explicit: bool, pref: ty::LvaluePreference)
+        -> Option<TypeAndMut<'tcx>>
+    {
+        match self.sty {
+            TyBox(ty) => {
+                Some(TypeAndMut {
+                    ty: ty,
+                    mutbl: if pref == ty::PreferMutLvalue {
+                        hir::MutMutable
+                    } else {
+                        hir::MutImmutable
+                    },
+                })
+            },
+            TyRef(_, mt) => Some(mt),
+            TyRawPtr(mt) if explicit => Some(mt),
+            _ => None
+        }
+    }
+
+    // Returns the type of ty[i]
+    pub fn builtin_index(&self) -> Option<Ty<'tcx>> {
+        match self.sty {
+            TyArray(ty, _) | TySlice(ty) => Some(ty),
+            _ => None
+        }
+    }
+
+    pub fn fn_sig(&self) -> &'tcx PolyFnSig<'tcx> {
+        match self.sty {
+            TyFnDef(_, _, ref f) | TyFnPtr(ref f) => &f.sig,
+            _ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self)
+        }
+    }
+
+    /// Returns the ABI of the given function.
+    pub fn fn_abi(&self) -> abi::Abi {
+        match self.sty {
+            TyFnDef(_, _, ref f) | TyFnPtr(ref f) => f.abi,
+            _ => bug!("Ty::fn_abi() called on non-fn type"),
+        }
+    }
+
+    // Type accessors for substructures of types
+    pub fn fn_args(&self) -> ty::Binder<Vec<Ty<'tcx>>> {
+        self.fn_sig().inputs()
+    }
+
+    pub fn fn_ret(&self) -> Binder<FnOutput<'tcx>> {
+        self.fn_sig().output()
+    }
+
+    pub fn is_fn(&self) -> bool {
+        match self.sty {
+            TyFnDef(..) | TyFnPtr(_) => true,
+            _ => false
+        }
+    }
+
+    pub fn ty_to_def_id(&self) -> Option<DefId> {
+        match self.sty {
+            TyTrait(ref tt) => Some(tt.principal_def_id()),
+            TyStruct(def, _) |
+            TyEnum(def, _) => Some(def.did),
+            TyClosure(id, _) => Some(id),
+            _ => None
+        }
+    }
+
+    pub fn ty_adt_def(&self) -> Option<AdtDef<'tcx>> {
+        match self.sty {
+            TyStruct(adt, _) | TyEnum(adt, _) => Some(adt),
+            _ => None
+        }
+    }
+
+    /// Returns the regions directly referenced from this type (but
+    /// not types reachable from this type via `walk_tys`). This
+    /// ignores late-bound regions binders.
+    pub fn regions(&self) -> Vec<ty::Region> {
+        match self.sty {
+            TyRef(region, _) => {
+                vec![*region]
+            }
+            TyTrait(ref obj) => {
+                let mut v = vec![obj.bounds.region_bound];
+                v.extend_from_slice(obj.principal.skip_binder()
+                                       .substs.regions.as_slice());
+                v
+            }
+            TyEnum(_, substs) |
+            TyStruct(_, substs) => {
+                substs.regions.as_slice().to_vec()
+            }
+            TyClosure(_, ref substs) => {
+                substs.func_substs.regions.as_slice().to_vec()
+            }
+            TyProjection(ref data) => {
+                data.trait_ref.substs.regions.as_slice().to_vec()
+            }
+            TyFnDef(..) |
+            TyFnPtr(_) |
+            TyBool |
+            TyChar |
+            TyInt(_) |
+            TyUint(_) |
+            TyFloat(_) |
+            TyBox(_) |
+            TyStr |
+            TyArray(_, _) |
+            TySlice(_) |
+            TyRawPtr(_) |
+            TyTuple(_) |
+            TyParam(_) |
+            TyInfer(_) |
+            TyError => {
+                vec![]
+            }
+        }
+    }
+}
diff --git a/src/librustc/ty/subst.rs b/src/librustc/ty/subst.rs
new file mode 100644 (file)
index 0000000..dd547da
--- /dev/null
@@ -0,0 +1,736 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Type substitutions.
+
+pub use self::ParamSpace::*;
+
+use middle::cstore;
+use hir::def_id::DefId;
+use ty::{self, Ty, TyCtxt};
+use ty::fold::{TypeFoldable, TypeFolder};
+
+use serialize::{Encodable, Encoder, Decodable, Decoder};
+use std::fmt;
+use std::iter::IntoIterator;
+use std::slice::Iter;
+use std::vec::{Vec, IntoIter};
+use syntax::codemap::{Span, DUMMY_SP};
+
+///////////////////////////////////////////////////////////////////////////
+
+/// A substitution mapping type/region parameters to new values. We
+/// identify each in-scope parameter by an *index* and a *parameter
+/// space* (which indices where the parameter is defined; see
+/// `ParamSpace`).
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct Substs<'tcx> {
+    pub types: VecPerParamSpace<Ty<'tcx>>,
+    pub regions: VecPerParamSpace<ty::Region>,
+}
+
+impl<'tcx> Substs<'tcx> {
+    pub fn new(t: VecPerParamSpace<Ty<'tcx>>,
+               r: VecPerParamSpace<ty::Region>)
+               -> Substs<'tcx>
+    {
+        Substs { types: t, regions: r }
+    }
+
+    pub fn new_type(t: Vec<Ty<'tcx>>,
+                    r: Vec<ty::Region>)
+                    -> Substs<'tcx>
+    {
+        Substs::new(VecPerParamSpace::new(t, Vec::new(), Vec::new()),
+                    VecPerParamSpace::new(r, Vec::new(), Vec::new()))
+    }
+
+    pub fn new_trait(t: Vec<Ty<'tcx>>,
+                     r: Vec<ty::Region>,
+                     s: Ty<'tcx>)
+                    -> Substs<'tcx>
+    {
+        Substs::new(VecPerParamSpace::new(t, vec!(s), Vec::new()),
+                    VecPerParamSpace::new(r, Vec::new(), Vec::new()))
+    }
+
+    pub fn empty() -> Substs<'tcx> {
+        Substs {
+            types: VecPerParamSpace::empty(),
+            regions: VecPerParamSpace::empty(),
+        }
+    }
+
+    pub fn is_noop(&self) -> bool {
+        self.regions.is_empty() && self.types.is_empty()
+    }
+
+    pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
+        *self.types.get(ty_param_def.space, ty_param_def.index as usize)
+    }
+
+    pub fn self_ty(&self) -> Option<Ty<'tcx>> {
+        self.types.get_self().cloned()
+    }
+
+    pub fn with_self_ty(&self, self_ty: Ty<'tcx>) -> Substs<'tcx> {
+        assert!(self.self_ty().is_none());
+        let mut s = (*self).clone();
+        s.types.push(SelfSpace, self_ty);
+        s
+    }
+
+    pub fn erase_regions(self) -> Substs<'tcx> {
+        let Substs { types, regions } = self;
+        let regions = regions.map(|_| ty::ReStatic);
+        Substs { types: types, regions: regions }
+    }
+
+    pub fn with_method(self,
+                       m_types: Vec<Ty<'tcx>>,
+                       m_regions: Vec<ty::Region>)
+                       -> Substs<'tcx>
+    {
+        let Substs { types, regions } = self;
+        let types = types.with_slice(FnSpace, &m_types);
+        let regions = regions.with_slice(FnSpace, &m_regions);
+        Substs { types: types, regions: regions }
+    }
+
+    pub fn with_method_from(&self,
+                            meth_substs: &Substs<'tcx>)
+                            -> Substs<'tcx>
+    {
+        let Substs { types, regions } = self.clone();
+        let types = types.with_slice(FnSpace, meth_substs.types.get_slice(FnSpace));
+        let regions = regions.with_slice(FnSpace, meth_substs.regions.get_slice(FnSpace));
+        Substs { types: types, regions: regions }
+    }
+
+    pub fn with_method_from_subst(self, other: &Substs<'tcx>) -> Substs<'tcx> {
+        let Substs { types, regions } = self;
+        let types = types.with_slice(FnSpace, other.types.get_slice(FnSpace));
+        let regions = regions.with_slice(FnSpace, other.regions.get_slice(FnSpace));
+        Substs { types: types, regions: regions }
+    }
+
+    /// Creates a trait-ref out of this substs, ignoring the FnSpace substs
+    pub fn to_trait_ref(&self, tcx: &TyCtxt<'tcx>, trait_id: DefId)
+                        -> ty::TraitRef<'tcx> {
+        let Substs { mut types, mut regions } = self.clone();
+        types.truncate(FnSpace, 0);
+        regions.truncate(FnSpace, 0);
+
+        ty::TraitRef {
+            def_id: trait_id,
+            substs: tcx.mk_substs(Substs { types: types, regions: regions })
+        }
+    }
+}
+
+impl<'tcx> Encodable for Substs<'tcx> {
+
+    fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
+        cstore::tls::with_encoding_context(s, |ecx, rbml_w| {
+            ecx.encode_substs(rbml_w, self);
+            Ok(())
+        })
+    }
+}
+
+impl<'tcx> Decodable for Substs<'tcx> {
+    fn decode<D: Decoder>(d: &mut D) -> Result<Substs<'tcx>, D::Error> {
+        cstore::tls::with_decoding_context(d, |dcx, rbml_r| {
+            Ok(dcx.decode_substs(rbml_r))
+        })
+    }
+}
+
+impl<'tcx> Decodable for &'tcx Substs<'tcx> {
+    fn decode<D: Decoder>(d: &mut D) -> Result<&'tcx Substs<'tcx>, D::Error> {
+        let substs = cstore::tls::with_decoding_context(d, |dcx, rbml_r| {
+            let substs = dcx.decode_substs(rbml_r);
+            dcx.tcx().mk_substs(substs)
+        });
+
+        Ok(substs)
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// ParamSpace
+
+#[derive(PartialOrd, Ord, PartialEq, Eq, Copy,
+           Clone, Hash, RustcEncodable, RustcDecodable, Debug)]
+pub enum ParamSpace {
+    TypeSpace,  // Type parameters attached to a type definition, trait, or impl
+    SelfSpace,  // Self parameter on a trait
+    FnSpace,    // Type parameters attached to a method or fn
+}
+
+impl ParamSpace {
+    pub fn all() -> [ParamSpace; 3] {
+        [TypeSpace, SelfSpace, FnSpace]
+    }
+
+    pub fn to_uint(self) -> usize {
+        match self {
+            TypeSpace => 0,
+            SelfSpace => 1,
+            FnSpace => 2,
+        }
+    }
+
+    pub fn from_uint(u: usize) -> ParamSpace {
+        match u {
+            0 => TypeSpace,
+            1 => SelfSpace,
+            2 => FnSpace,
+            _ => bug!("Invalid ParamSpace: {}", u)
+        }
+    }
+}
+
+/// Vector of things sorted by param space. Used to keep
+/// the set of things declared on the type, self, or method
+/// distinct.
+#[derive(PartialEq, Eq, Clone, Hash, RustcEncodable, RustcDecodable)]
+pub struct VecPerParamSpace<T> {
+    // This was originally represented as a tuple with one Vec<T> for
+    // each variant of ParamSpace, and that remains the abstraction
+    // that it provides to its clients.
+    //
+    // Here is how the representation corresponds to the abstraction
+    // i.e. the "abstraction function" AF:
+    //
+    // AF(self) = (self.content[..self.type_limit],
+    //             self.content[self.type_limit..self.self_limit],
+    //             self.content[self.self_limit..])
+    type_limit: usize,
+    self_limit: usize,
+    content: Vec<T>,
+}
+
+/// The `split` function converts one `VecPerParamSpace` into this
+/// `SeparateVecsPerParamSpace` structure.
+pub struct SeparateVecsPerParamSpace<T> {
+    pub types: Vec<T>,
+    pub selfs: Vec<T>,
+    pub fns: Vec<T>,
+}
+
+impl<T: fmt::Debug> fmt::Debug for VecPerParamSpace<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "[{:?};{:?};{:?}]",
+               self.get_slice(TypeSpace),
+               self.get_slice(SelfSpace),
+               self.get_slice(FnSpace))
+    }
+}
+
+impl<T> VecPerParamSpace<T> {
+    fn limits(&self, space: ParamSpace) -> (usize, usize) {
+        match space {
+            TypeSpace => (0, self.type_limit),
+            SelfSpace => (self.type_limit, self.self_limit),
+            FnSpace => (self.self_limit, self.content.len()),
+        }
+    }
+
+    pub fn empty() -> VecPerParamSpace<T> {
+        VecPerParamSpace {
+            type_limit: 0,
+            self_limit: 0,
+            content: Vec::new()
+        }
+    }
+
+    /// `t` is the type space.
+    /// `s` is the self space.
+    /// `f` is the fn space.
+    pub fn new(t: Vec<T>, s: Vec<T>, f: Vec<T>) -> VecPerParamSpace<T> {
+        let type_limit = t.len();
+        let self_limit = type_limit + s.len();
+
+        let mut content = t;
+        content.extend(s);
+        content.extend(f);
+
+        VecPerParamSpace {
+            type_limit: type_limit,
+            self_limit: self_limit,
+            content: content,
+        }
+    }
+
+    fn new_internal(content: Vec<T>, type_limit: usize, self_limit: usize)
+                    -> VecPerParamSpace<T>
+    {
+        VecPerParamSpace {
+            type_limit: type_limit,
+            self_limit: self_limit,
+            content: content,
+        }
+    }
+
+    /// Appends `value` to the vector associated with `space`.
+    ///
+    /// Unlike the `push` method in `Vec`, this should not be assumed
+    /// to be a cheap operation (even when amortized over many calls).
+    pub fn push(&mut self, space: ParamSpace, value: T) {
+        let (_, limit) = self.limits(space);
+        match space {
+            TypeSpace => { self.type_limit += 1; self.self_limit += 1; }
+            SelfSpace => { self.self_limit += 1; }
+            FnSpace => { }
+        }
+        self.content.insert(limit, value);
+    }
+
+    /// Appends `values` to the vector associated with `space`.
+    ///
+    /// Unlike the `extend` method in `Vec`, this should not be assumed
+    /// to be a cheap operation (even when amortized over many calls).
+    pub fn extend<I:Iterator<Item=T>>(&mut self, space: ParamSpace, values: I) {
+        // This could be made more efficient, obviously.
+        for item in values {
+            self.push(space, item);
+        }
+    }
+
+    pub fn pop(&mut self, space: ParamSpace) -> Option<T> {
+        let (start, limit) = self.limits(space);
+        if start == limit {
+            None
+        } else {
+            match space {
+                TypeSpace => { self.type_limit -= 1; self.self_limit -= 1; }
+                SelfSpace => { self.self_limit -= 1; }
+                FnSpace => {}
+            }
+            if self.content.is_empty() {
+                None
+            } else {
+                Some(self.content.remove(limit - 1))
+            }
+        }
+    }
+
+    pub fn truncate(&mut self, space: ParamSpace, len: usize) {
+        // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
+        while self.len(space) > len {
+            self.pop(space);
+        }
+    }
+
+    pub fn replace(&mut self, space: ParamSpace, elems: Vec<T>) {
+        // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
+        self.truncate(space, 0);
+        for t in elems {
+            self.push(space, t);
+        }
+    }
+
+    pub fn get_self<'a>(&'a self) -> Option<&'a T> {
+        let v = self.get_slice(SelfSpace);
+        assert!(v.len() <= 1);
+        if v.is_empty() { None } else { Some(&v[0]) }
+    }
+
+    pub fn len(&self, space: ParamSpace) -> usize {
+        self.get_slice(space).len()
+    }
+
+    pub fn is_empty_in(&self, space: ParamSpace) -> bool {
+        self.len(space) == 0
+    }
+
+    pub fn get_slice<'a>(&'a self, space: ParamSpace) -> &'a [T] {
+        let (start, limit) = self.limits(space);
+        &self.content[start.. limit]
+    }
+
+    pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] {
+        let (start, limit) = self.limits(space);
+        &mut self.content[start.. limit]
+    }
+
+    pub fn opt_get<'a>(&'a self,
+                       space: ParamSpace,
+                       index: usize)
+                       -> Option<&'a T> {
+        let v = self.get_slice(space);
+        if index < v.len() { Some(&v[index]) } else { None }
+    }
+
+    pub fn get<'a>(&'a self, space: ParamSpace, index: usize) -> &'a T {
+        &self.get_slice(space)[index]
+    }
+
+    pub fn iter<'a>(&'a self) -> Iter<'a,T> {
+        self.content.iter()
+    }
+
+    pub fn into_iter(self) -> IntoIter<T> {
+        self.content.into_iter()
+    }
+
+    pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> {
+        EnumeratedItems::new(self)
+    }
+
+    pub fn as_slice(&self) -> &[T] {
+        &self.content
+    }
+
+    pub fn into_vec(self) -> Vec<T> {
+        self.content
+    }
+
+    pub fn all_vecs<P>(&self, mut pred: P) -> bool where
+        P: FnMut(&[T]) -> bool,
+    {
+        let spaces = [TypeSpace, SelfSpace, FnSpace];
+        spaces.iter().all(|&space| { pred(self.get_slice(space)) })
+    }
+
+    pub fn all<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
+        self.iter().all(pred)
+    }
+
+    pub fn any<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
+        self.iter().any(pred)
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.all_vecs(|v| v.is_empty())
+    }
+
+    pub fn map<U, P>(&self, pred: P) -> VecPerParamSpace<U> where P: FnMut(&T) -> U {
+        let result = self.iter().map(pred).collect();
+        VecPerParamSpace::new_internal(result,
+                                       self.type_limit,
+                                       self.self_limit)
+    }
+
+    pub fn map_enumerated<U, P>(&self, pred: P) -> VecPerParamSpace<U> where
+        P: FnMut((ParamSpace, usize, &T)) -> U,
+    {
+        let result = self.iter_enumerated().map(pred).collect();
+        VecPerParamSpace::new_internal(result,
+                                       self.type_limit,
+                                       self.self_limit)
+    }
+
+    pub fn split(self) -> SeparateVecsPerParamSpace<T> {
+        let VecPerParamSpace { type_limit, self_limit, content } = self;
+
+        let mut content_iter = content.into_iter();
+
+        SeparateVecsPerParamSpace {
+            types: content_iter.by_ref().take(type_limit).collect(),
+            selfs: content_iter.by_ref().take(self_limit - type_limit).collect(),
+            fns: content_iter.collect()
+        }
+    }
+
+    pub fn with_slice(mut self, space: ParamSpace, slice: &[T])
+                    -> VecPerParamSpace<T>
+        where T: Clone
+    {
+        assert!(self.is_empty_in(space));
+        for t in slice {
+            self.push(space, t.clone());
+        }
+
+        self
+    }
+}
+
+#[derive(Clone)]
+pub struct EnumeratedItems<'a,T:'a> {
+    vec: &'a VecPerParamSpace<T>,
+    space_index: usize,
+    elem_index: usize
+}
+
+impl<'a,T> EnumeratedItems<'a,T> {
+    fn new(v: &'a VecPerParamSpace<T>) -> EnumeratedItems<'a,T> {
+        let mut result = EnumeratedItems { vec: v, space_index: 0, elem_index: 0 };
+        result.adjust_space();
+        result
+    }
+
+    fn adjust_space(&mut self) {
+        let spaces = ParamSpace::all();
+        while
+            self.space_index < spaces.len() &&
+            self.elem_index >= self.vec.len(spaces[self.space_index])
+        {
+            self.space_index += 1;
+            self.elem_index = 0;
+        }
+    }
+}
+
+impl<'a,T> Iterator for EnumeratedItems<'a,T> {
+    type Item = (ParamSpace, usize, &'a T);
+
+    fn next(&mut self) -> Option<(ParamSpace, usize, &'a T)> {
+        let spaces = ParamSpace::all();
+        if self.space_index < spaces.len() {
+            let space = spaces[self.space_index];
+            let index = self.elem_index;
+            let item = self.vec.get(space, index);
+
+            self.elem_index += 1;
+            self.adjust_space();
+
+            Some((space, index, item))
+        } else {
+            None
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let size = self.vec.as_slice().len();
+        (size, Some(size))
+    }
+}
+
+impl<T> IntoIterator for VecPerParamSpace<T> {
+    type Item = T;
+    type IntoIter = IntoIter<T>;
+
+    fn into_iter(self) -> IntoIter<T> {
+        self.into_vec().into_iter()
+    }
+}
+
+impl<'a,T> IntoIterator for &'a VecPerParamSpace<T> {
+    type Item = &'a T;
+    type IntoIter = Iter<'a, T>;
+
+    fn into_iter(self) -> Iter<'a, T> {
+        self.as_slice().into_iter()
+    }
+}
+
+
+///////////////////////////////////////////////////////////////////////////
+// Public trait `Subst`
+//
+// Just call `foo.subst(tcx, substs)` to perform a substitution across
+// `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when
+// there is more information available (for better errors).
+
+pub trait Subst<'tcx> : Sized {
+    fn subst(&self, tcx: &TyCtxt<'tcx>, substs: &Substs<'tcx>) -> Self {
+        self.subst_spanned(tcx, substs, None)
+    }
+
+    fn subst_spanned(&self, tcx: &TyCtxt<'tcx>,
+                     substs: &Substs<'tcx>,
+                     span: Option<Span>)
+                     -> Self;
+}
+
+impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T {
+    fn subst_spanned(&self,
+                     tcx: &TyCtxt<'tcx>,
+                     substs: &Substs<'tcx>,
+                     span: Option<Span>)
+                     -> T
+    {
+        let mut folder = SubstFolder { tcx: tcx,
+                                       substs: substs,
+                                       span: span,
+                                       root_ty: None,
+                                       ty_stack_depth: 0,
+                                       region_binders_passed: 0 };
+        (*self).fold_with(&mut folder)
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// The actual substitution engine itself is a type folder.
+
+struct SubstFolder<'a, 'tcx: 'a> {
+    tcx: &'a TyCtxt<'tcx>,
+    substs: &'a Substs<'tcx>,
+
+    // The location for which the substitution is performed, if available.
+    span: Option<Span>,
+
+    // The root type that is being substituted, if available.
+    root_ty: Option<Ty<'tcx>>,
+
+    // Depth of type stack
+    ty_stack_depth: usize,
+
+    // Number of region binders we have passed through while doing the substitution
+    region_binders_passed: u32,
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
+    fn tcx(&self) -> &TyCtxt<'tcx> { self.tcx }
+
+    fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
+        self.region_binders_passed += 1;
+        let t = t.super_fold_with(self);
+        self.region_binders_passed -= 1;
+        t
+    }
+
+    fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+        // Note: This routine only handles regions that are bound on
+        // type declarations and other outer declarations, not those
+        // bound in *fn types*. Region substitution of the bound
+        // regions that appear in a function signature is done using
+        // the specialized routine `ty::replace_late_regions()`.
+        match r {
+            ty::ReEarlyBound(data) => {
+                match self.substs.regions.opt_get(data.space, data.index as usize) {
+                    Some(&r) => {
+                        self.shift_region_through_binders(r)
+                    }
+                    None => {
+                        let span = self.span.unwrap_or(DUMMY_SP);
+                        span_bug!(
+                            span,
+                            "Region parameter out of range \
+                             when substituting in region {} (root type={:?}) \
+                             (space={:?}, index={})",
+                            data.name,
+                            self.root_ty,
+                            data.space,
+                            data.index);
+                    }
+                }
+            }
+            _ => r
+        }
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        if !t.needs_subst() {
+            return t;
+        }
+
+        // track the root type we were asked to substitute
+        let depth = self.ty_stack_depth;
+        if depth == 0 {
+            self.root_ty = Some(t);
+        }
+        self.ty_stack_depth += 1;
+
+        let t1 = match t.sty {
+            ty::TyParam(p) => {
+                self.ty_for_param(p, t)
+            }
+            _ => {
+                t.super_fold_with(self)
+            }
+        };
+
+        assert_eq!(depth + 1, self.ty_stack_depth);
+        self.ty_stack_depth -= 1;
+        if depth == 0 {
+            self.root_ty = None;
+        }
+
+        return t1;
+    }
+}
+
+impl<'a,'tcx> SubstFolder<'a,'tcx> {
+    fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
+        // Look up the type in the substitutions. It really should be in there.
+        let opt_ty = self.substs.types.opt_get(p.space, p.idx as usize);
+        let ty = match opt_ty {
+            Some(t) => *t,
+            None => {
+                let span = self.span.unwrap_or(DUMMY_SP);
+                span_bug!(
+                    span,
+                    "Type parameter `{:?}` ({:?}/{:?}/{}) out of range \
+                         when substituting (root type={:?}) substs={:?}",
+                    p,
+                    source_ty,
+                    p.space,
+                    p.idx,
+                    self.root_ty,
+                    self.substs);
+            }
+        };
+
+        self.shift_regions_through_binders(ty)
+    }
+
+    /// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs
+    /// when we are substituting a type with escaping regions into a context where we have passed
+    /// through region binders. That's quite a mouthful. Let's see an example:
+    ///
+    /// ```
+    /// type Func<A> = fn(A);
+    /// type MetaFunc = for<'a> fn(Func<&'a int>)
+    /// ```
+    ///
+    /// The type `MetaFunc`, when fully expanded, will be
+    ///
+    ///     for<'a> fn(fn(&'a int))
+    ///             ^~ ^~ ^~~
+    ///             |  |  |
+    ///             |  |  DebruijnIndex of 2
+    ///             Binders
+    ///
+    /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the
+    /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip
+    /// over the inner binder (remember that we count Debruijn indices from 1). However, in the
+    /// definition of `MetaFunc`, the binder is not visible, so the type `&'a int` will have a
+    /// debruijn index of 1. It's only during the substitution that we can see we must increase the
+    /// depth by 1 to account for the binder that we passed through.
+    ///
+    /// As a second example, consider this twist:
+    ///
+    /// ```
+    /// type FuncTuple<A> = (A,fn(A));
+    /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>)
+    /// ```
+    ///
+    /// Here the final type will be:
+    ///
+    ///     for<'a> fn((&'a int, fn(&'a int)))
+    ///                 ^~~         ^~~
+    ///                 |           |
+    ///          DebruijnIndex of 1 |
+    ///                      DebruijnIndex of 2
+    ///
+    /// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the
+    /// first case we do not increase the Debruijn index and in the second case we do. The reason
+    /// is that only in the second case have we passed through a fn binder.
+    fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        debug!("shift_regions(ty={:?}, region_binders_passed={:?}, has_escaping_regions={:?})",
+               ty, self.region_binders_passed, ty.has_escaping_regions());
+
+        if self.region_binders_passed == 0 || !ty.has_escaping_regions() {
+            return ty;
+        }
+
+        let result = ty::fold::shift_regions(self.tcx(), self.region_binders_passed, &ty);
+        debug!("shift_regions: shifted result = {:?}", result);
+
+        result
+    }
+
+    fn shift_region_through_binders(&self, region: ty::Region) -> ty::Region {
+        ty::fold::shift_region(region, self.region_binders_passed)
+    }
+}
diff --git a/src/librustc/ty/trait_def.rs b/src/librustc/ty/trait_def.rs
new file mode 100644 (file)
index 0000000..39a3837
--- /dev/null
@@ -0,0 +1,281 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use dep_graph::DepNode;
+use hir::def_id::DefId;
+use traits::{self, specialization_graph};
+use ty;
+use ty::fast_reject;
+use ty::{Ty, TyCtxt, TraitRef};
+use std::borrow::{Borrow};
+use std::cell::{Cell, RefCell};
+use syntax::ast::Name;
+use hir;
+use util::nodemap::FnvHashMap;
+
+/// As `TypeScheme` but for a trait ref.
+pub struct TraitDef<'tcx> {
+    pub unsafety: hir::Unsafety,
+
+    /// If `true`, then this trait had the `#[rustc_paren_sugar]`
+    /// attribute, indicating that it should be used with `Foo()`
+    /// sugar. This is a temporary thing -- eventually any trait wil
+    /// be usable with the sugar (or without it).
+    pub paren_sugar: bool,
+
+    /// Generic type definitions. Note that `Self` is listed in here
+    /// as having a single bound, the trait itself (e.g., in the trait
+    /// `Eq`, there is a single bound `Self : Eq`). This is so that
+    /// default methods get to assume that the `Self` parameters
+    /// implements the trait.
+    pub generics: ty::Generics<'tcx>,
+
+    pub trait_ref: ty::TraitRef<'tcx>,
+
+    /// A list of the associated types defined in this trait. Useful
+    /// for resolving `X::Foo` type markers.
+    pub associated_type_names: Vec<Name>,
+
+    // Impls of a trait. To allow for quicker lookup, the impls are indexed by a
+    // simplified version of their `Self` type: impls with a simplifiable `Self`
+    // are stored in `nonblanket_impls` keyed by it, while all other impls are
+    // stored in `blanket_impls`.
+    //
+    // A similar division is used within `specialization_graph`, but the ones
+    // here are (1) stored as a flat list for the trait and (2) populated prior
+    // to -- and used while -- determining specialization order.
+    //
+    // FIXME: solve the reentrancy issues and remove these lists in favor of the
+    // ones in `specialization_graph`.
+    //
+    // These lists are tracked by `DepNode::TraitImpls`; we don't use
+    // a DepTrackingMap but instead have the `TraitDef` insert the
+    // required reads/writes.
+
+    /// Impls of the trait.
+    nonblanket_impls: RefCell<
+        FnvHashMap<fast_reject::SimplifiedType, Vec<DefId>>
+    >,
+
+    /// Blanket impls associated with the trait.
+    blanket_impls: RefCell<Vec<DefId>>,
+
+    /// The specialization order for impls of this trait.
+    pub specialization_graph: RefCell<traits::specialization_graph::Graph>,
+
+    /// Various flags
+    pub flags: Cell<TraitFlags>
+}
+
+impl<'tcx> TraitDef<'tcx> {
+    pub fn new(unsafety: hir::Unsafety,
+               paren_sugar: bool,
+               generics: ty::Generics<'tcx>,
+               trait_ref: ty::TraitRef<'tcx>,
+               associated_type_names: Vec<Name>)
+               -> TraitDef<'tcx> {
+        TraitDef {
+            paren_sugar: paren_sugar,
+            unsafety: unsafety,
+            generics: generics,
+            trait_ref: trait_ref,
+            associated_type_names: associated_type_names,
+            nonblanket_impls: RefCell::new(FnvHashMap()),
+            blanket_impls: RefCell::new(vec![]),
+            flags: Cell::new(ty::TraitFlags::NO_TRAIT_FLAGS),
+            specialization_graph: RefCell::new(traits::specialization_graph::Graph::new()),
+        }
+    }
+
+    pub fn def_id(&self) -> DefId {
+        self.trait_ref.def_id
+    }
+
+    // returns None if not yet calculated
+    pub fn object_safety(&self) -> Option<bool> {
+        if self.flags.get().intersects(TraitFlags::OBJECT_SAFETY_VALID) {
+            Some(self.flags.get().intersects(TraitFlags::IS_OBJECT_SAFE))
+        } else {
+            None
+        }
+    }
+
+    pub fn set_object_safety(&self, is_safe: bool) {
+        assert!(self.object_safety().map(|cs| cs == is_safe).unwrap_or(true));
+        self.flags.set(
+            self.flags.get() | if is_safe {
+                TraitFlags::OBJECT_SAFETY_VALID | TraitFlags::IS_OBJECT_SAFE
+            } else {
+                TraitFlags::OBJECT_SAFETY_VALID
+            }
+        );
+    }
+
+    fn write_trait_impls(&self, tcx: &TyCtxt<'tcx>) {
+        tcx.dep_graph.write(DepNode::TraitImpls(self.trait_ref.def_id));
+    }
+
+    fn read_trait_impls(&self, tcx: &TyCtxt<'tcx>) {
+        tcx.dep_graph.read(DepNode::TraitImpls(self.trait_ref.def_id));
+    }
+
+    /// Records a basic trait-to-implementation mapping.
+    ///
+    /// Returns `true` iff the impl has not previously been recorded.
+    fn record_impl(&self,
+                   tcx: &TyCtxt<'tcx>,
+                   impl_def_id: DefId,
+                   impl_trait_ref: TraitRef<'tcx>)
+                   -> bool {
+        debug!("TraitDef::record_impl for {:?}, from {:?}",
+               self, impl_trait_ref);
+
+        // Record the write into the impl set, but only for local
+        // impls: external impls are handled differently.
+        if impl_def_id.is_local() {
+            self.write_trait_impls(tcx);
+        }
+
+        // We don't want to borrow_mut after we already populated all impls,
+        // so check if an impl is present with an immutable borrow first.
+        if let Some(sty) = fast_reject::simplify_type(tcx,
+                                                      impl_trait_ref.self_ty(), false) {
+            if let Some(is) = self.nonblanket_impls.borrow().get(&sty) {
+                if is.contains(&impl_def_id) {
+                    return false; // duplicate - skip
+                }
+            }
+
+            self.nonblanket_impls.borrow_mut().entry(sty).or_insert(vec![]).push(impl_def_id)
+        } else {
+            if self.blanket_impls.borrow().contains(&impl_def_id) {
+                return false; // duplicate - skip
+            }
+            self.blanket_impls.borrow_mut().push(impl_def_id)
+        }
+
+        true
+    }
+
+    /// Records a trait-to-implementation mapping for a crate-local impl.
+    pub fn record_local_impl(&self,
+                             tcx: &TyCtxt<'tcx>,
+                             impl_def_id: DefId,
+                             impl_trait_ref: TraitRef<'tcx>) {
+        assert!(impl_def_id.is_local());
+        let was_new = self.record_impl(tcx, impl_def_id, impl_trait_ref);
+        assert!(was_new);
+    }
+
+    /// Records a trait-to-implementation mapping for a non-local impl.
+    ///
+    /// The `parent_impl` is the immediately-less-specialized impl, or the
+    /// trait's def ID if the impl is is not a specialization -- information that
+    /// should be pulled from the metadata.
+    pub fn record_remote_impl(&self,
+                              tcx: &TyCtxt<'tcx>,
+                              impl_def_id: DefId,
+                              impl_trait_ref: TraitRef<'tcx>,
+                              parent_impl: DefId) {
+        assert!(!impl_def_id.is_local());
+
+        // if the impl has not previously been recorded
+        if self.record_impl(tcx, impl_def_id, impl_trait_ref) {
+            // if the impl is non-local, it's placed directly into the
+            // specialization graph using parent information drawn from metadata.
+            self.specialization_graph.borrow_mut()
+                .record_impl_from_cstore(tcx, parent_impl, impl_def_id)
+        }
+    }
+
+    /// Adds a local impl into the specialization graph, returning an error with
+    /// overlap information if the impl overlaps but does not specialize an
+    /// existing impl.
+    pub fn add_impl_for_specialization<'a>(&self,
+                                           tcx: &'a TyCtxt<'tcx>,
+                                           impl_def_id: DefId)
+                                           -> Result<(), traits::Overlap<'a, 'tcx>> {
+        assert!(impl_def_id.is_local());
+
+        self.specialization_graph.borrow_mut()
+            .insert(tcx, impl_def_id)
+    }
+
+    pub fn ancestors<'a>(&'a self, of_impl: DefId) -> specialization_graph::Ancestors<'a, 'tcx> {
+        specialization_graph::ancestors(self, of_impl)
+    }
+
+        pub fn for_each_impl<F: FnMut(DefId)>(&self, tcx: &TyCtxt<'tcx>, mut f: F)  {
+            self.read_trait_impls(tcx);
+        tcx.populate_implementations_for_trait_if_necessary(self.trait_ref.def_id);
+
+        for &impl_def_id in self.blanket_impls.borrow().iter() {
+            f(impl_def_id);
+        }
+
+        for v in self.nonblanket_impls.borrow().values() {
+            for &impl_def_id in v {
+                f(impl_def_id);
+            }
+        }
+    }
+
+    /// Iterate over every impl that could possibly match the
+    /// self-type `self_ty`.
+    pub fn for_each_relevant_impl<F: FnMut(DefId)>(&self,
+                                                   tcx: &TyCtxt<'tcx>,
+                                                   self_ty: Ty<'tcx>,
+                                                   mut f: F)
+    {
+        self.read_trait_impls(tcx);
+
+        tcx.populate_implementations_for_trait_if_necessary(self.trait_ref.def_id);
+
+        for &impl_def_id in self.blanket_impls.borrow().iter() {
+            f(impl_def_id);
+        }
+
+        // simplify_type(.., false) basically replaces type parameters and
+        // projections with infer-variables. This is, of course, done on
+        // the impl trait-ref when it is instantiated, but not on the
+        // predicate trait-ref which is passed here.
+        //
+        // for example, if we match `S: Copy` against an impl like
+        // `impl<T:Copy> Copy for Option<T>`, we replace the type variable
+        // in `Option<T>` with an infer variable, to `Option<_>` (this
+        // doesn't actually change fast_reject output), but we don't
+        // replace `S` with anything - this impl of course can't be
+        // selected, and as there are hundreds of similar impls,
+        // considering them would significantly harm performance.
+        if let Some(simp) = fast_reject::simplify_type(tcx, self_ty, true) {
+            if let Some(impls) = self.nonblanket_impls.borrow().get(&simp) {
+                for &impl_def_id in impls {
+                    f(impl_def_id);
+                }
+            }
+        } else {
+            for v in self.nonblanket_impls.borrow().values() {
+                for &impl_def_id in v {
+                    f(impl_def_id);
+                }
+            }
+        }
+    }
+}
+
+bitflags! {
+    flags TraitFlags: u32 {
+        const NO_TRAIT_FLAGS        = 0,
+        const HAS_DEFAULT_IMPL      = 1 << 0,
+        const IS_OBJECT_SAFE        = 1 << 1,
+        const OBJECT_SAFETY_VALID   = 1 << 2,
+        const IMPLS_VALID           = 1 << 3,
+    }
+}
diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs
new file mode 100644 (file)
index 0000000..60fc474
--- /dev/null
@@ -0,0 +1,768 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! misc. type-system utilities too small to deserve their own file
+
+use hir::svh::Svh;
+use hir::def_id::DefId;
+use ty::subst;
+use infer;
+use hir::pat_util;
+use traits::{self, ProjectionMode};
+use ty::{self, Ty, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable};
+use ty::{Disr, ParameterEnvironment};
+use ty::layout::{Layout, LayoutError};
+use ty::TypeVariants::*;
+
+use rustc_const_math::{ConstInt, ConstIsize, ConstUsize};
+
+use std::cmp;
+use std::hash::{Hash, SipHasher, Hasher};
+use syntax::ast::{self, Name};
+use syntax::attr::{self, AttrMetaMethods, SignedInt, UnsignedInt};
+use syntax::codemap::Span;
+
+use hir;
+
+pub trait IntTypeExt {
+    fn to_ty<'tcx>(&self, cx: &TyCtxt<'tcx>) -> Ty<'tcx>;
+    fn disr_incr(&self, val: Disr) -> Option<Disr>;
+    fn assert_ty_matches(&self, val: Disr);
+    fn initial_discriminant(&self, tcx: &TyCtxt) -> Disr;
+}
+
+impl IntTypeExt for attr::IntType {
+    fn to_ty<'tcx>(&self, cx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+        match *self {
+            SignedInt(ast::IntTy::I8)      => cx.types.i8,
+            SignedInt(ast::IntTy::I16)     => cx.types.i16,
+            SignedInt(ast::IntTy::I32)     => cx.types.i32,
+            SignedInt(ast::IntTy::I64)     => cx.types.i64,
+            SignedInt(ast::IntTy::Is)   => cx.types.isize,
+            UnsignedInt(ast::UintTy::U8)    => cx.types.u8,
+            UnsignedInt(ast::UintTy::U16)   => cx.types.u16,
+            UnsignedInt(ast::UintTy::U32)   => cx.types.u32,
+            UnsignedInt(ast::UintTy::U64)   => cx.types.u64,
+            UnsignedInt(ast::UintTy::Us) => cx.types.usize,
+        }
+    }
+
+    fn initial_discriminant(&self, tcx: &TyCtxt) -> Disr {
+        match *self {
+            SignedInt(ast::IntTy::I8)    => ConstInt::I8(0),
+            SignedInt(ast::IntTy::I16)   => ConstInt::I16(0),
+            SignedInt(ast::IntTy::I32)   => ConstInt::I32(0),
+            SignedInt(ast::IntTy::I64)   => ConstInt::I64(0),
+            SignedInt(ast::IntTy::Is) => match tcx.sess.target.int_type {
+                ast::IntTy::I32 => ConstInt::Isize(ConstIsize::Is32(0)),
+                ast::IntTy::I64 => ConstInt::Isize(ConstIsize::Is64(0)),
+                _ => bug!(),
+            },
+            UnsignedInt(ast::UintTy::U8)  => ConstInt::U8(0),
+            UnsignedInt(ast::UintTy::U16) => ConstInt::U16(0),
+            UnsignedInt(ast::UintTy::U32) => ConstInt::U32(0),
+            UnsignedInt(ast::UintTy::U64) => ConstInt::U64(0),
+            UnsignedInt(ast::UintTy::Us) => match tcx.sess.target.uint_type {
+                ast::UintTy::U32 => ConstInt::Usize(ConstUsize::Us32(0)),
+                ast::UintTy::U64 => ConstInt::Usize(ConstUsize::Us64(0)),
+                _ => bug!(),
+            },
+        }
+    }
+
+    fn assert_ty_matches(&self, val: Disr) {
+        match (*self, val) {
+            (SignedInt(ast::IntTy::I8), ConstInt::I8(_)) => {},
+            (SignedInt(ast::IntTy::I16), ConstInt::I16(_)) => {},
+            (SignedInt(ast::IntTy::I32), ConstInt::I32(_)) => {},
+            (SignedInt(ast::IntTy::I64), ConstInt::I64(_)) => {},
+            (SignedInt(ast::IntTy::Is), ConstInt::Isize(_)) => {},
+            (UnsignedInt(ast::UintTy::U8), ConstInt::U8(_)) => {},
+            (UnsignedInt(ast::UintTy::U16), ConstInt::U16(_)) => {},
+            (UnsignedInt(ast::UintTy::U32), ConstInt::U32(_)) => {},
+            (UnsignedInt(ast::UintTy::U64), ConstInt::U64(_)) => {},
+            (UnsignedInt(ast::UintTy::Us), ConstInt::Usize(_)) => {},
+            _ => bug!("disr type mismatch: {:?} vs {:?}", self, val),
+        }
+    }
+
+    fn disr_incr(&self, val: Disr) -> Option<Disr> {
+        self.assert_ty_matches(val);
+        (val + ConstInt::Infer(1)).ok()
+    }
+}
+
+
+#[derive(Copy, Clone)]
+pub enum CopyImplementationError {
+    InfrigingField(Name),
+    InfrigingVariant(Name),
+    NotAnAdt,
+    HasDestructor
+}
+
+/// Describes whether a type is representable. For types that are not
+/// representable, 'SelfRecursive' and 'ContainsRecursive' are used to
+/// distinguish between types that are recursive with themselves and types that
+/// contain a different recursive type. These cases can therefore be treated
+/// differently when reporting errors.
+///
+/// The ordering of the cases is significant. They are sorted so that cmp::max
+/// will keep the "more erroneous" of two values.
+#[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Debug)]
+pub enum Representability {
+    Representable,
+    ContainsRecursive,
+    SelfRecursive,
+}
+
+impl<'a, 'tcx> ParameterEnvironment<'a, 'tcx> {
+    pub fn can_type_implement_copy(&self, self_type: Ty<'tcx>, span: Span)
+                                   -> Result<(),CopyImplementationError> {
+        let tcx = self.tcx;
+
+        // FIXME: (@jroesch) float this code up
+        let infcx = infer::new_infer_ctxt(tcx,
+                                          &tcx.tables,
+                                          Some(self.clone()),
+                                          ProjectionMode::Topmost);
+
+        let adt = match self_type.sty {
+            ty::TyStruct(struct_def, substs) => {
+                for field in struct_def.all_fields() {
+                    let field_ty = field.ty(tcx, substs);
+                    if infcx.type_moves_by_default(field_ty, span) {
+                        return Err(CopyImplementationError::InfrigingField(
+                            field.name))
+                    }
+                }
+                struct_def
+            }
+            ty::TyEnum(enum_def, substs) => {
+                for variant in &enum_def.variants {
+                    for field in &variant.fields {
+                        let field_ty = field.ty(tcx, substs);
+                        if infcx.type_moves_by_default(field_ty, span) {
+                            return Err(CopyImplementationError::InfrigingVariant(
+                                variant.name))
+                        }
+                    }
+                }
+                enum_def
+            }
+            _ => return Err(CopyImplementationError::NotAnAdt),
+        };
+
+        if adt.has_dtor() {
+            return Err(CopyImplementationError::HasDestructor)
+        }
+
+        Ok(())
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn pat_contains_ref_binding(&self, pat: &hir::Pat) -> Option<hir::Mutability> {
+        pat_util::pat_contains_ref_binding(&self.def_map, pat)
+    }
+
+    pub fn arm_contains_ref_binding(&self, arm: &hir::Arm) -> Option<hir::Mutability> {
+        pat_util::arm_contains_ref_binding(&self.def_map, arm)
+    }
+
+    /// Returns the type of element at index `i` in tuple or tuple-like type `t`.
+    /// For an enum `t`, `variant` is None only if `t` is a univariant enum.
+    pub fn positional_element_ty(&self,
+                                 ty: Ty<'tcx>,
+                                 i: usize,
+                                 variant: Option<DefId>) -> Option<Ty<'tcx>> {
+        match (&ty.sty, variant) {
+            (&TyStruct(def, substs), None) => {
+                def.struct_variant().fields.get(i).map(|f| f.ty(self, substs))
+            }
+            (&TyEnum(def, substs), Some(vid)) => {
+                def.variant_with_id(vid).fields.get(i).map(|f| f.ty(self, substs))
+            }
+            (&TyEnum(def, substs), None) => {
+                assert!(def.is_univariant());
+                def.variants[0].fields.get(i).map(|f| f.ty(self, substs))
+            }
+            (&TyTuple(ref v), None) => v.get(i).cloned(),
+            _ => None
+        }
+    }
+
+    /// Returns the type of element at field `n` in struct or struct-like type `t`.
+    /// For an enum `t`, `variant` must be some def id.
+    pub fn named_element_ty(&self,
+                            ty: Ty<'tcx>,
+                            n: Name,
+                            variant: Option<DefId>) -> Option<Ty<'tcx>> {
+        match (&ty.sty, variant) {
+            (&TyStruct(def, substs), None) => {
+                def.struct_variant().find_field_named(n).map(|f| f.ty(self, substs))
+            }
+            (&TyEnum(def, substs), Some(vid)) => {
+                def.variant_with_id(vid).find_field_named(n).map(|f| f.ty(self, substs))
+            }
+            _ => return None
+        }
+    }
+
+    /// Returns the IntType representation.
+    /// This used to ensure `int_ty` doesn't contain `usize` and `isize`
+    /// by converting them to their actual types. That doesn't happen anymore.
+    pub fn enum_repr_type(&self, opt_hint: Option<&attr::ReprAttr>) -> attr::IntType {
+        match opt_hint {
+            // Feed in the given type
+            Some(&attr::ReprInt(_, int_t)) => int_t,
+            // ... but provide sensible default if none provided
+            //
+            // NB. Historically `fn enum_variants` generate i64 here, while
+            // rustc_typeck::check would generate isize.
+            _ => SignedInt(ast::IntTy::Is),
+        }
+    }
+
+    /// Returns the deeply last field of nested structures, or the same type,
+    /// if not a structure at all. Corresponds to the only possible unsized
+    /// field, and its type can be used to determine unsizing strategy.
+    pub fn struct_tail(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
+        while let TyStruct(def, substs) = ty.sty {
+            match def.struct_variant().fields.last() {
+                Some(f) => ty = f.ty(self, substs),
+                None => break
+            }
+        }
+        ty
+    }
+
+    /// Same as applying struct_tail on `source` and `target`, but only
+    /// keeps going as long as the two types are instances of the same
+    /// structure definitions.
+    /// For `(Foo<Foo<T>>, Foo<Trait>)`, the result will be `(Foo<T>, Trait)`,
+    /// whereas struct_tail produces `T`, and `Trait`, respectively.
+    pub fn struct_lockstep_tails(&self,
+                                 source: Ty<'tcx>,
+                                 target: Ty<'tcx>)
+                                 -> (Ty<'tcx>, Ty<'tcx>) {
+        let (mut a, mut b) = (source, target);
+        while let (&TyStruct(a_def, a_substs), &TyStruct(b_def, b_substs)) = (&a.sty, &b.sty) {
+            if a_def != b_def {
+                break;
+            }
+            if let Some(f) = a_def.struct_variant().fields.last() {
+                a = f.ty(self, a_substs);
+                b = f.ty(self, b_substs);
+            } else {
+                break;
+            }
+        }
+        (a, b)
+    }
+
+    /// Given a set of predicates that apply to an object type, returns
+    /// the region bounds that the (erased) `Self` type must
+    /// outlive. Precisely *because* the `Self` type is erased, the
+    /// parameter `erased_self_ty` must be supplied to indicate what type
+    /// has been used to represent `Self` in the predicates
+    /// themselves. This should really be a unique type; `FreshTy(0)` is a
+    /// popular choice.
+    ///
+    /// NB: in some cases, particularly around higher-ranked bounds,
+    /// this function returns a kind of conservative approximation.
+    /// That is, all regions returned by this function are definitely
+    /// required, but there may be other region bounds that are not
+    /// returned, as well as requirements like `for<'a> T: 'a`.
+    ///
+    /// Requires that trait definitions have been processed so that we can
+    /// elaborate predicates and walk supertraits.
+    pub fn required_region_bounds(&self,
+                                  erased_self_ty: Ty<'tcx>,
+                                  predicates: Vec<ty::Predicate<'tcx>>)
+                                  -> Vec<ty::Region>    {
+        debug!("required_region_bounds(erased_self_ty={:?}, predicates={:?})",
+               erased_self_ty,
+               predicates);
+
+        assert!(!erased_self_ty.has_escaping_regions());
+
+        traits::elaborate_predicates(self, predicates)
+            .filter_map(|predicate| {
+                match predicate {
+                    ty::Predicate::Projection(..) |
+                    ty::Predicate::Trait(..) |
+                    ty::Predicate::Equate(..) |
+                    ty::Predicate::WellFormed(..) |
+                    ty::Predicate::ObjectSafe(..) |
+                    ty::Predicate::RegionOutlives(..) => {
+                        None
+                    }
+                    ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(t, r))) => {
+                        // Search for a bound of the form `erased_self_ty
+                        // : 'a`, but be wary of something like `for<'a>
+                        // erased_self_ty : 'a` (we interpret a
+                        // higher-ranked bound like that as 'static,
+                        // though at present the code in `fulfill.rs`
+                        // considers such bounds to be unsatisfiable, so
+                        // it's kind of a moot point since you could never
+                        // construct such an object, but this seems
+                        // correct even if that code changes).
+                        if t == erased_self_ty && !r.has_escaping_regions() {
+                            Some(r)
+                        } else {
+                            None
+                        }
+                    }
+                }
+            })
+            .collect()
+    }
+
+    /// Creates a hash of the type `Ty` which will be the same no matter what crate
+    /// context it's calculated within. This is used by the `type_id` intrinsic.
+    pub fn hash_crate_independent(&self, ty: Ty<'tcx>, svh: &Svh) -> u64 {
+        let mut state = SipHasher::new();
+        helper(self, ty, svh, &mut state);
+        return state.finish();
+
+        fn helper<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>, svh: &Svh,
+                        state: &mut SipHasher) {
+            macro_rules! byte { ($b:expr) => { ($b as u8).hash(state) } }
+            macro_rules! hash { ($e:expr) => { $e.hash(state) }  }
+
+            let region = |state: &mut SipHasher, r: ty::Region| {
+                match r {
+                    ty::ReStatic => {}
+                    ty::ReLateBound(db, ty::BrAnon(i)) => {
+                        db.hash(state);
+                        i.hash(state);
+                    }
+                    ty::ReEmpty |
+                    ty::ReEarlyBound(..) |
+                    ty::ReLateBound(..) |
+                    ty::ReFree(..) |
+                    ty::ReScope(..) |
+                    ty::ReVar(..) |
+                    ty::ReSkolemized(..) => {
+                        bug!("unexpected region found when hashing a type")
+                    }
+                }
+            };
+            let did = |state: &mut SipHasher, did: DefId| {
+                let h = if did.is_local() {
+                    svh.clone()
+                } else {
+                    tcx.sess.cstore.crate_hash(did.krate)
+                };
+                h.as_str().hash(state);
+                did.index.hash(state);
+            };
+            let mt = |state: &mut SipHasher, mt: TypeAndMut| {
+                mt.mutbl.hash(state);
+            };
+            let fn_sig = |state: &mut SipHasher, sig: &ty::Binder<ty::FnSig<'tcx>>| {
+                let sig = tcx.anonymize_late_bound_regions(sig).0;
+                for a in &sig.inputs { helper(tcx, *a, svh, state); }
+                if let ty::FnConverging(output) = sig.output {
+                    helper(tcx, output, svh, state);
+                }
+            };
+            ty.maybe_walk(|ty| {
+                match ty.sty {
+                    TyBool => byte!(2),
+                    TyChar => byte!(3),
+                    TyInt(i) => {
+                        byte!(4);
+                        hash!(i);
+                    }
+                    TyUint(u) => {
+                        byte!(5);
+                        hash!(u);
+                    }
+                    TyFloat(f) => {
+                        byte!(6);
+                        hash!(f);
+                    }
+                    TyStr => {
+                        byte!(7);
+                    }
+                    TyEnum(d, _) => {
+                        byte!(8);
+                        did(state, d.did);
+                    }
+                    TyBox(_) => {
+                        byte!(9);
+                    }
+                    TyArray(_, n) => {
+                        byte!(10);
+                        n.hash(state);
+                    }
+                    TySlice(_) => {
+                        byte!(11);
+                    }
+                    TyRawPtr(m) => {
+                        byte!(12);
+                        mt(state, m);
+                    }
+                    TyRef(r, m) => {
+                        byte!(13);
+                        region(state, *r);
+                        mt(state, m);
+                    }
+                    TyFnDef(def_id, _, _) => {
+                        byte!(14);
+                        hash!(def_id);
+                    }
+                    TyFnPtr(ref b) => {
+                        byte!(15);
+                        hash!(b.unsafety);
+                        hash!(b.abi);
+                        fn_sig(state, &b.sig);
+                        return false;
+                    }
+                    TyTrait(ref data) => {
+                        byte!(17);
+                        did(state, data.principal_def_id());
+                        hash!(data.bounds);
+
+                        let principal = tcx.anonymize_late_bound_regions(&data.principal).0;
+                        for subty in &principal.substs.types {
+                            helper(tcx, subty, svh, state);
+                        }
+
+                        return false;
+                    }
+                    TyStruct(d, _) => {
+                        byte!(18);
+                        did(state, d.did);
+                    }
+                    TyTuple(ref inner) => {
+                        byte!(19);
+                        hash!(inner.len());
+                    }
+                    TyParam(p) => {
+                        byte!(20);
+                        hash!(p.space);
+                        hash!(p.idx);
+                        hash!(p.name.as_str());
+                    }
+                    TyInfer(_) => bug!(),
+                    TyError => byte!(21),
+                    TyClosure(d, _) => {
+                        byte!(22);
+                        did(state, d);
+                    }
+                    TyProjection(ref data) => {
+                        byte!(23);
+                        did(state, data.trait_ref.def_id);
+                        hash!(data.item_name.as_str());
+                    }
+                }
+                true
+            });
+        }
+    }
+
+    /// Returns true if this ADT is a dtorck type.
+    ///
+    /// Invoking the destructor of a dtorck type during usual cleanup
+    /// (e.g. the glue emitted for stack unwinding) requires all
+    /// lifetimes in the type-structure of `adt` to strictly outlive
+    /// the adt value itself.
+    ///
+    /// If `adt` is not dtorck, then the adt's destructor can be
+    /// invoked even when there are lifetimes in the type-structure of
+    /// `adt` that do not strictly outlive the adt value itself.
+    /// (This allows programs to make cyclic structures without
+    /// resorting to unasfe means; see RFCs 769 and 1238).
+    pub fn is_adt_dtorck(&self, adt: ty::AdtDef<'tcx>) -> bool {
+        let dtor_method = match adt.destructor() {
+            Some(dtor) => dtor,
+            None => return false
+        };
+
+        // RFC 1238: if the destructor method is tagged with the
+        // attribute `unsafe_destructor_blind_to_params`, then the
+        // compiler is being instructed to *assume* that the
+        // destructor will not access borrowed data,
+        // even if such data is otherwise reachable.
+        //
+        // Such access can be in plain sight (e.g. dereferencing
+        // `*foo.0` of `Foo<'a>(&'a u32)`) or indirectly hidden
+        // (e.g. calling `foo.0.clone()` of `Foo<T:Clone>`).
+        return !self.has_attr(dtor_method, "unsafe_destructor_blind_to_params");
+    }
+}
+
+impl<'tcx> ty::TyS<'tcx> {
+    fn impls_bound<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
+                       bound: ty::BuiltinBound,
+                       span: Span)
+                       -> bool
+    {
+        let tcx = param_env.tcx;
+        let infcx = infer::new_infer_ctxt(tcx,
+                                          &tcx.tables,
+                                          Some(param_env.clone()),
+                                          ProjectionMode::Topmost);
+
+        let is_impld = traits::type_known_to_meet_builtin_bound(&infcx,
+                                                                self, bound, span);
+
+        debug!("Ty::impls_bound({:?}, {:?}) = {:?}",
+               self, bound, is_impld);
+
+        is_impld
+    }
+
+    // FIXME (@jroesch): I made this public to use it, not sure if should be private
+    pub fn moves_by_default<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
+                           span: Span) -> bool {
+        if self.flags.get().intersects(TypeFlags::MOVENESS_CACHED) {
+            return self.flags.get().intersects(TypeFlags::MOVES_BY_DEFAULT);
+        }
+
+        assert!(!self.needs_infer());
+
+        // Fast-path for primitive types
+        let result = match self.sty {
+            TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
+            TyRawPtr(..) | TyFnDef(..) | TyFnPtr(_) | TyRef(_, TypeAndMut {
+                mutbl: hir::MutImmutable, ..
+            }) => Some(false),
+
+            TyStr | TyBox(..) | TyRef(_, TypeAndMut {
+                mutbl: hir::MutMutable, ..
+            }) => Some(true),
+
+            TyArray(..) | TySlice(_) | TyTrait(..) | TyTuple(..) |
+            TyClosure(..) | TyEnum(..) | TyStruct(..) |
+            TyProjection(..) | TyParam(..) | TyInfer(..) | TyError => None
+        }.unwrap_or_else(|| !self.impls_bound(param_env, ty::BoundCopy, span));
+
+        if !self.has_param_types() && !self.has_self_ty() {
+            self.flags.set(self.flags.get() | if result {
+                TypeFlags::MOVENESS_CACHED | TypeFlags::MOVES_BY_DEFAULT
+            } else {
+                TypeFlags::MOVENESS_CACHED
+            });
+        }
+
+        result
+    }
+
+    #[inline]
+    pub fn is_sized<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
+                        span: Span) -> bool
+    {
+        if self.flags.get().intersects(TypeFlags::SIZEDNESS_CACHED) {
+            return self.flags.get().intersects(TypeFlags::IS_SIZED);
+        }
+
+        self.is_sized_uncached(param_env, span)
+    }
+
+    fn is_sized_uncached<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
+                             span: Span) -> bool {
+        assert!(!self.needs_infer());
+
+        // Fast-path for primitive types
+        let result = match self.sty {
+            TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
+            TyBox(..) | TyRawPtr(..) | TyRef(..) | TyFnDef(..) | TyFnPtr(_) |
+            TyArray(..) | TyTuple(..) | TyClosure(..) => Some(true),
+
+            TyStr | TyTrait(..) | TySlice(_) => Some(false),
+
+            TyEnum(..) | TyStruct(..) | TyProjection(..) | TyParam(..) |
+            TyInfer(..) | TyError => None
+        }.unwrap_or_else(|| self.impls_bound(param_env, ty::BoundSized, span));
+
+        if !self.has_param_types() && !self.has_self_ty() {
+            self.flags.set(self.flags.get() | if result {
+                TypeFlags::SIZEDNESS_CACHED | TypeFlags::IS_SIZED
+            } else {
+                TypeFlags::SIZEDNESS_CACHED
+            });
+        }
+
+        result
+    }
+
+    #[inline]
+    pub fn layout<'a>(&'tcx self, infcx: &infer::InferCtxt<'a, 'tcx>)
+                      -> Result<&'tcx Layout, LayoutError<'tcx>> {
+        let can_cache = !self.has_param_types() && !self.has_self_ty();
+        if can_cache {
+            if let Some(&cached) = infcx.tcx.layout_cache.borrow().get(&self) {
+                return Ok(cached);
+            }
+        }
+
+        let layout = Layout::compute_uncached(self, infcx)?;
+        let layout = infcx.tcx.intern_layout(layout);
+        if can_cache {
+            infcx.tcx.layout_cache.borrow_mut().insert(self, layout);
+        }
+        Ok(layout)
+    }
+
+
+    /// Check whether a type is representable. This means it cannot contain unboxed
+    /// structural recursion. This check is needed for structs and enums.
+    pub fn is_representable(&'tcx self, cx: &TyCtxt<'tcx>, sp: Span) -> Representability {
+
+        // Iterate until something non-representable is found
+        fn find_nonrepresentable<'tcx, It: Iterator<Item=Ty<'tcx>>>(cx: &TyCtxt<'tcx>,
+                                                                    sp: Span,
+                                                                    seen: &mut Vec<Ty<'tcx>>,
+                                                                    iter: It)
+                                                                    -> Representability {
+            iter.fold(Representability::Representable,
+                      |r, ty| cmp::max(r, is_type_structurally_recursive(cx, sp, seen, ty)))
+        }
+
+        fn are_inner_types_recursive<'tcx>(cx: &TyCtxt<'tcx>, sp: Span,
+                                           seen: &mut Vec<Ty<'tcx>>, ty: Ty<'tcx>)
+                                           -> Representability {
+            match ty.sty {
+                TyTuple(ref ts) => {
+                    find_nonrepresentable(cx, sp, seen, ts.iter().cloned())
+                }
+                // Fixed-length vectors.
+                // FIXME(#11924) Behavior undecided for zero-length vectors.
+                TyArray(ty, _) => {
+                    is_type_structurally_recursive(cx, sp, seen, ty)
+                }
+                TyStruct(def, substs) | TyEnum(def, substs) => {
+                    find_nonrepresentable(cx,
+                                          sp,
+                                          seen,
+                                          def.all_fields().map(|f| f.ty(cx, substs)))
+                }
+                TyClosure(..) => {
+                    // this check is run on type definitions, so we don't expect
+                    // to see closure types
+                    bug!("requires check invoked on inapplicable type: {:?}", ty)
+                }
+                _ => Representability::Representable,
+            }
+        }
+
+        fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: ty::AdtDef<'tcx>) -> bool {
+            match ty.sty {
+                TyStruct(ty_def, _) | TyEnum(ty_def, _) => {
+                     ty_def == def
+                }
+                _ => false
+            }
+        }
+
+        fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
+            match (&a.sty, &b.sty) {
+                (&TyStruct(did_a, ref substs_a), &TyStruct(did_b, ref substs_b)) |
+                (&TyEnum(did_a, ref substs_a), &TyEnum(did_b, ref substs_b)) => {
+                    if did_a != did_b {
+                        return false;
+                    }
+
+                    let types_a = substs_a.types.get_slice(subst::TypeSpace);
+                    let types_b = substs_b.types.get_slice(subst::TypeSpace);
+
+                    let mut pairs = types_a.iter().zip(types_b);
+
+                    pairs.all(|(&a, &b)| same_type(a, b))
+                }
+                _ => {
+                    a == b
+                }
+            }
+        }
+
+        // Does the type `ty` directly (without indirection through a pointer)
+        // contain any types on stack `seen`?
+        fn is_type_structurally_recursive<'tcx>(cx: &TyCtxt<'tcx>,
+                                                sp: Span,
+                                                seen: &mut Vec<Ty<'tcx>>,
+                                                ty: Ty<'tcx>) -> Representability {
+            debug!("is_type_structurally_recursive: {:?}", ty);
+
+            match ty.sty {
+                TyStruct(def, _) | TyEnum(def, _) => {
+                    {
+                        // Iterate through stack of previously seen types.
+                        let mut iter = seen.iter();
+
+                        // The first item in `seen` is the type we are actually curious about.
+                        // We want to return SelfRecursive if this type contains itself.
+                        // It is important that we DON'T take generic parameters into account
+                        // for this check, so that Bar<T> in this example counts as SelfRecursive:
+                        //
+                        // struct Foo;
+                        // struct Bar<T> { x: Bar<Foo> }
+
+                        match iter.next() {
+                            Some(&seen_type) => {
+                                if same_struct_or_enum(seen_type, def) {
+                                    debug!("SelfRecursive: {:?} contains {:?}",
+                                           seen_type,
+                                           ty);
+                                    return Representability::SelfRecursive;
+                                }
+                            }
+                            None => {}
+                        }
+
+                        // We also need to know whether the first item contains other types
+                        // that are structurally recursive. If we don't catch this case, we
+                        // will recurse infinitely for some inputs.
+                        //
+                        // It is important that we DO take generic parameters into account
+                        // here, so that code like this is considered SelfRecursive, not
+                        // ContainsRecursive:
+                        //
+                        // struct Foo { Option<Option<Foo>> }
+
+                        for &seen_type in iter {
+                            if same_type(ty, seen_type) {
+                                debug!("ContainsRecursive: {:?} contains {:?}",
+                                       seen_type,
+                                       ty);
+                                return Representability::ContainsRecursive;
+                            }
+                        }
+                    }
+
+                    // For structs and enums, track all previously seen types by pushing them
+                    // onto the 'seen' stack.
+                    seen.push(ty);
+                    let out = are_inner_types_recursive(cx, sp, seen, ty);
+                    seen.pop();
+                    out
+                }
+                _ => {
+                    // No need to push in other cases.
+                    are_inner_types_recursive(cx, sp, seen, ty)
+                }
+            }
+        }
+
+        debug!("is_type_representable: {:?}", self);
+
+        // To avoid a stack overflow when checking an enum variant or struct that
+        // contains a different, structurally recursive type, maintain a stack
+        // of seen types and check recursion for each of them (issues #3008, #3779).
+        let mut seen: Vec<Ty> = Vec::new();
+        let r = is_type_structurally_recursive(cx, sp, &mut seen, self);
+        debug!("is_type_representable: {:?} is {:?}", self, r);
+        r
+    }
+}
diff --git a/src/librustc/ty/walk.rs b/src/librustc/ty/walk.rs
new file mode 100644 (file)
index 0000000..fa7c9b7
--- /dev/null
@@ -0,0 +1,129 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! An iterator over the type substructure.
+//! WARNING: this does not keep track of the region depth.
+
+use ty::{self, Ty};
+use std::iter::Iterator;
+use std::vec::IntoIter;
+
+pub struct TypeWalker<'tcx> {
+    stack: Vec<Ty<'tcx>>,
+    last_subtree: usize,
+}
+
+impl<'tcx> TypeWalker<'tcx> {
+    pub fn new(ty: Ty<'tcx>) -> TypeWalker<'tcx> {
+        TypeWalker { stack: vec!(ty), last_subtree: 1, }
+    }
+
+    /// Skips the subtree of types corresponding to the last type
+    /// returned by `next()`.
+    ///
+    /// Example: Imagine you are walking `Foo<Bar<int>, usize>`.
+    ///
+    /// ```
+    /// let mut iter: TypeWalker = ...;
+    /// iter.next(); // yields Foo
+    /// iter.next(); // yields Bar<int>
+    /// iter.skip_current_subtree(); // skips int
+    /// iter.next(); // yields usize
+    /// ```
+    pub fn skip_current_subtree(&mut self) {
+        self.stack.truncate(self.last_subtree);
+    }
+}
+
+impl<'tcx> Iterator for TypeWalker<'tcx> {
+    type Item = Ty<'tcx>;
+
+    fn next(&mut self) -> Option<Ty<'tcx>> {
+        debug!("next(): stack={:?}", self.stack);
+        match self.stack.pop() {
+            None => {
+                return None;
+            }
+            Some(ty) => {
+                self.last_subtree = self.stack.len();
+                push_subtypes(&mut self.stack, ty);
+                debug!("next: stack={:?}", self.stack);
+                Some(ty)
+            }
+        }
+    }
+}
+
+pub fn walk_shallow<'tcx>(ty: Ty<'tcx>) -> IntoIter<Ty<'tcx>> {
+    let mut stack = vec![];
+    push_subtypes(&mut stack, ty);
+    stack.into_iter()
+}
+
+fn push_subtypes<'tcx>(stack: &mut Vec<Ty<'tcx>>, parent_ty: Ty<'tcx>) {
+    match parent_ty.sty {
+        ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) |
+        ty::TyStr | ty::TyInfer(_) | ty::TyParam(_) | ty::TyError => {
+        }
+        ty::TyBox(ty) | ty::TyArray(ty, _) | ty::TySlice(ty) => {
+            stack.push(ty);
+        }
+        ty::TyRawPtr(ref mt) | ty::TyRef(_, ref mt) => {
+            stack.push(mt.ty);
+        }
+        ty::TyProjection(ref data) => {
+            push_reversed(stack, data.trait_ref.substs.types.as_slice());
+        }
+        ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => {
+            push_reversed(stack, principal.substs().types.as_slice());
+            push_reversed(stack, &bounds.projection_bounds.iter().map(|pred| {
+                pred.0.ty
+            }).collect::<Vec<_>>());
+        }
+        ty::TyEnum(_, ref substs) |
+        ty::TyStruct(_, ref substs) => {
+            push_reversed(stack, substs.types.as_slice());
+        }
+        ty::TyClosure(_, ref substs) => {
+            push_reversed(stack, substs.func_substs.types.as_slice());
+            push_reversed(stack, &substs.upvar_tys);
+        }
+        ty::TyTuple(ref ts) => {
+            push_reversed(stack, ts);
+        }
+        ty::TyFnDef(_, substs, ref ft) => {
+            push_reversed(stack, substs.types.as_slice());
+            push_sig_subtypes(stack, &ft.sig);
+        }
+        ty::TyFnPtr(ref ft) => {
+            push_sig_subtypes(stack, &ft.sig);
+        }
+    }
+}
+
+fn push_sig_subtypes<'tcx>(stack: &mut Vec<Ty<'tcx>>, sig: &ty::PolyFnSig<'tcx>) {
+    match sig.0.output {
+        ty::FnConverging(output) => { stack.push(output); }
+        ty::FnDiverging => { }
+    }
+    push_reversed(stack, &sig.0.inputs);
+}
+
+fn push_reversed<'tcx>(stack: &mut Vec<Ty<'tcx>>, tys: &[Ty<'tcx>]) {
+    // We push slices on the stack in reverse order so as to
+    // maintain a pre-order traversal. As of the time of this
+    // writing, the fact that the traversal is pre-order is not
+    // known to be significant to any code, but it seems like the
+    // natural order one would expect (basically, the order of the
+    // types as they are written).
+    for &ty in tys.iter().rev() {
+        stack.push(ty);
+    }
+}
diff --git a/src/librustc/ty/wf.rs b/src/librustc/ty/wf.rs
new file mode 100644 (file)
index 0000000..9ea634b
--- /dev/null
@@ -0,0 +1,509 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use hir::def_id::DefId;
+use infer::InferCtxt;
+use ty::outlives::{self, Component};
+use ty::subst::Substs;
+use traits;
+use ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable};
+use std::iter::once;
+use syntax::ast;
+use syntax::codemap::Span;
+use util::common::ErrorReported;
+
+/// Returns the set of obligations needed to make `ty` well-formed.
+/// If `ty` contains unresolved inference variables, this may include
+/// further WF obligations. However, if `ty` IS an unresolved
+/// inference variable, returns `None`, because we are not able to
+/// make any progress at all. This is to prevent "livelock" where we
+/// say "$0 is WF if $0 is WF".
+pub fn obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                            body_id: ast::NodeId,
+                            ty: Ty<'tcx>,
+                            span: Span)
+                            -> Option<Vec<traits::PredicateObligation<'tcx>>>
+{
+    let mut wf = WfPredicates { infcx: infcx,
+                                body_id: body_id,
+                                span: span,
+                                out: vec![] };
+    if wf.compute(ty) {
+        debug!("wf::obligations({:?}, body_id={:?}) = {:?}", ty, body_id, wf.out);
+        let result = wf.normalize();
+        debug!("wf::obligations({:?}, body_id={:?}) ~~> {:?}", ty, body_id, result);
+        Some(result)
+    } else {
+        None // no progress made, return None
+    }
+}
+
+/// Returns the obligations that make this trait reference
+/// well-formed.  For example, if there is a trait `Set` defined like
+/// `trait Set<K:Eq>`, then the trait reference `Foo: Set<Bar>` is WF
+/// if `Bar: Eq`.
+pub fn trait_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                  body_id: ast::NodeId,
+                                  trait_ref: &ty::TraitRef<'tcx>,
+                                  span: Span)
+                                  -> Vec<traits::PredicateObligation<'tcx>>
+{
+    let mut wf = WfPredicates { infcx: infcx, body_id: body_id, span: span, out: vec![] };
+    wf.compute_trait_ref(trait_ref);
+    wf.normalize()
+}
+
+pub fn predicate_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+                                      body_id: ast::NodeId,
+                                      predicate: &ty::Predicate<'tcx>,
+                                      span: Span)
+                                      -> Vec<traits::PredicateObligation<'tcx>>
+{
+    let mut wf = WfPredicates { infcx: infcx, body_id: body_id, span: span, out: vec![] };
+
+    // (*) ok to skip binders, because wf code is prepared for it
+    match *predicate {
+        ty::Predicate::Trait(ref t) => {
+            wf.compute_trait_ref(&t.skip_binder().trait_ref); // (*)
+        }
+        ty::Predicate::Equate(ref t) => {
+            wf.compute(t.skip_binder().0);
+            wf.compute(t.skip_binder().1);
+        }
+        ty::Predicate::RegionOutlives(..) => {
+        }
+        ty::Predicate::TypeOutlives(ref t) => {
+            wf.compute(t.skip_binder().0);
+        }
+        ty::Predicate::Projection(ref t) => {
+            let t = t.skip_binder(); // (*)
+            wf.compute_projection(t.projection_ty);
+            wf.compute(t.ty);
+        }
+        ty::Predicate::WellFormed(t) => {
+            wf.compute(t);
+        }
+        ty::Predicate::ObjectSafe(_) => {
+        }
+    }
+
+    wf.normalize()
+}
+
+/// Implied bounds are region relationships that we deduce
+/// automatically.  The idea is that (e.g.) a caller must check that a
+/// function's argument types are well-formed immediately before
+/// calling that fn, and hence the *callee* can assume that its
+/// argument types are well-formed. This may imply certain relationships
+/// between generic parameters. For example:
+///
+///     fn foo<'a,T>(x: &'a T)
+///
+/// can only be called with a `'a` and `T` such that `&'a T` is WF.
+/// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`.
+#[derive(Debug)]
+pub enum ImpliedBound<'tcx> {
+    RegionSubRegion(ty::Region, ty::Region),
+    RegionSubParam(ty::Region, ty::ParamTy),
+    RegionSubProjection(ty::Region, ty::ProjectionTy<'tcx>),
+}
+
+/// Compute the implied bounds that a callee/impl can assume based on
+/// the fact that caller/projector has ensured that `ty` is WF.  See
+/// the `ImpliedBound` type for more details.
+pub fn implied_bounds<'a,'tcx>(
+    infcx: &'a InferCtxt<'a,'tcx>,
+    body_id: ast::NodeId,
+    ty: Ty<'tcx>,
+    span: Span)
+    -> Vec<ImpliedBound<'tcx>>
+{
+    // Sometimes when we ask what it takes for T: WF, we get back that
+    // U: WF is required; in that case, we push U onto this stack and
+    // process it next. Currently (at least) these resulting
+    // predicates are always guaranteed to be a subset of the original
+    // type, so we need not fear non-termination.
+    let mut wf_types = vec![ty];
+
+    let mut implied_bounds = vec![];
+
+    while let Some(ty) = wf_types.pop() {
+        // Compute the obligations for `ty` to be well-formed. If `ty` is
+        // an unresolved inference variable, just substituted an empty set
+        // -- because the return type here is going to be things we *add*
+        // to the environment, it's always ok for this set to be smaller
+        // than the ultimate set. (Note: normally there won't be
+        // unresolved inference variables here anyway, but there might be
+        // during typeck under some circumstances.)
+        let obligations = obligations(infcx, body_id, ty, span).unwrap_or(vec![]);
+
+        // From the full set of obligations, just filter down to the
+        // region relationships.
+        implied_bounds.extend(
+            obligations
+            .into_iter()
+            .flat_map(|obligation| {
+                assert!(!obligation.has_escaping_regions());
+                match obligation.predicate {
+                    ty::Predicate::Trait(..) |
+                    ty::Predicate::Equate(..) |
+                    ty::Predicate::Projection(..) |
+                    ty::Predicate::ObjectSafe(..) =>
+                        vec![],
+
+                    ty::Predicate::WellFormed(subty) => {
+                        wf_types.push(subty);
+                        vec![]
+                    }
+
+                    ty::Predicate::RegionOutlives(ref data) =>
+                        match infcx.tcx.no_late_bound_regions(data) {
+                            None =>
+                                vec![],
+                            Some(ty::OutlivesPredicate(r_a, r_b)) =>
+                                vec![ImpliedBound::RegionSubRegion(r_b, r_a)],
+                        },
+
+                    ty::Predicate::TypeOutlives(ref data) =>
+                        match infcx.tcx.no_late_bound_regions(data) {
+                            None => vec![],
+                            Some(ty::OutlivesPredicate(ty_a, r_b)) => {
+                                let components = outlives::components(infcx, ty_a);
+                                implied_bounds_from_components(r_b, components)
+                            }
+                        },
+                }}));
+    }
+
+    implied_bounds
+}
+
+/// When we have an implied bound that `T: 'a`, we can further break
+/// this down to determine what relationships would have to hold for
+/// `T: 'a` to hold. We get to assume that the caller has validated
+/// those relationships.
+fn implied_bounds_from_components<'tcx>(sub_region: ty::Region,
+                                        sup_components: Vec<Component<'tcx>>)
+                                        -> Vec<ImpliedBound<'tcx>>
+{
+    sup_components
+        .into_iter()
+        .flat_map(|component| {
+            match component {
+                Component::Region(r) =>
+                    vec!(ImpliedBound::RegionSubRegion(sub_region, r)),
+                Component::Param(p) =>
+                    vec!(ImpliedBound::RegionSubParam(sub_region, p)),
+                Component::Projection(p) =>
+                    vec!(ImpliedBound::RegionSubProjection(sub_region, p)),
+                Component::EscapingProjection(_) =>
+                    // If the projection has escaping regions, don't
+                    // try to infer any implied bounds even for its
+                    // free components. This is conservative, because
+                    // the caller will still have to prove that those
+                    // free components outlive `sub_region`. But the
+                    // idea is that the WAY that the caller proves
+                    // that may change in the future and we want to
+                    // give ourselves room to get smarter here.
+                    vec!(),
+                Component::UnresolvedInferenceVariable(..) =>
+                    vec!(),
+            }
+        })
+        .collect()
+}
+
+struct WfPredicates<'a,'tcx:'a> {
+    infcx: &'a InferCtxt<'a, 'tcx>,
+    body_id: ast::NodeId,
+    span: Span,
+    out: Vec<traits::PredicateObligation<'tcx>>,
+}
+
+impl<'a,'tcx> WfPredicates<'a,'tcx> {
+    fn cause(&mut self, code: traits::ObligationCauseCode<'tcx>) -> traits::ObligationCause<'tcx> {
+        traits::ObligationCause::new(self.span, self.body_id, code)
+    }
+
+    fn normalize(&mut self) -> Vec<traits::PredicateObligation<'tcx>> {
+        let cause = self.cause(traits::MiscObligation);
+        let infcx = &mut self.infcx;
+        self.out.iter()
+                .inspect(|pred| assert!(!pred.has_escaping_regions()))
+                .flat_map(|pred| {
+                    let mut selcx = traits::SelectionContext::new(infcx);
+                    let pred = traits::normalize(&mut selcx, cause.clone(), pred);
+                    once(pred.value).chain(pred.obligations)
+                })
+                .collect()
+    }
+
+    /// Pushes the obligations required for `trait_ref` to be WF into
+    /// `self.out`.
+    fn compute_trait_ref(&mut self, trait_ref: &ty::TraitRef<'tcx>) {
+        let obligations = self.nominal_obligations(trait_ref.def_id, trait_ref.substs);
+        self.out.extend(obligations);
+
+        let cause = self.cause(traits::MiscObligation);
+        self.out.extend(
+            trait_ref.substs.types
+                            .as_slice()
+                            .iter()
+                            .filter(|ty| !ty.has_escaping_regions())
+                            .map(|ty| traits::Obligation::new(cause.clone(),
+                                                              ty::Predicate::WellFormed(ty))));
+    }
+
+    /// Pushes the obligations required for `trait_ref::Item` to be WF
+    /// into `self.out`.
+    fn compute_projection(&mut self, data: ty::ProjectionTy<'tcx>) {
+        // A projection is well-formed if (a) the trait ref itself is
+        // WF WF and (b) the trait-ref holds.  (It may also be
+        // normalizable and be WF that way.)
+
+        self.compute_trait_ref(&data.trait_ref);
+
+        if !data.has_escaping_regions() {
+            let predicate = data.trait_ref.to_predicate();
+            let cause = self.cause(traits::ProjectionWf(data));
+            self.out.push(traits::Obligation::new(cause, predicate));
+        }
+    }
+
+    /// Push new obligations into `out`. Returns true if it was able
+    /// to generate all the predicates needed to validate that `ty0`
+    /// is WF. Returns false if `ty0` is an unresolved type variable,
+    /// in which case we are not able to simplify at all.
+    fn compute(&mut self, ty0: Ty<'tcx>) -> bool {
+        let mut subtys = ty0.walk();
+        while let Some(ty) = subtys.next() {
+            match ty.sty {
+                ty::TyBool |
+                ty::TyChar |
+                ty::TyInt(..) |
+                ty::TyUint(..) |
+                ty::TyFloat(..) |
+                ty::TyError |
+                ty::TyStr |
+                ty::TyParam(_) => {
+                    // WfScalar, WfParameter, etc
+                }
+
+                ty::TySlice(subty) |
+                ty::TyArray(subty, _) => {
+                    if !subty.has_escaping_regions() {
+                        let cause = self.cause(traits::SliceOrArrayElem);
+                        match traits::trait_ref_for_builtin_bound(self.infcx.tcx,
+                                                                  ty::BoundSized,
+                                                                  subty) {
+                            Ok(trait_ref) => {
+                                self.out.push(
+                                    traits::Obligation::new(cause,
+                                                            trait_ref.to_predicate()));
+                            }
+                            Err(ErrorReported) => { }
+                        }
+                    }
+                }
+
+                ty::TyBox(_) |
+                ty::TyTuple(_) |
+                ty::TyRawPtr(_) => {
+                    // simple cases that are WF if their type args are WF
+                }
+
+                ty::TyProjection(data) => {
+                    subtys.skip_current_subtree(); // subtree handled by compute_projection
+                    self.compute_projection(data);
+                }
+
+                ty::TyEnum(def, substs) |
+                ty::TyStruct(def, substs) => {
+                    // WfNominalType
+                    let obligations = self.nominal_obligations(def.did, substs);
+                    self.out.extend(obligations);
+                }
+
+                ty::TyRef(r, mt) => {
+                    // WfReference
+                    if !r.has_escaping_regions() && !mt.ty.has_escaping_regions() {
+                        let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
+                        self.out.push(
+                            traits::Obligation::new(
+                                cause,
+                                ty::Predicate::TypeOutlives(
+                                    ty::Binder(
+                                        ty::OutlivesPredicate(mt.ty, *r)))));
+                    }
+                }
+
+                ty::TyClosure(..) => {
+                    // the types in a closure are always the types of
+                    // local variables (or possibly references to local
+                    // variables), we'll walk those.
+                    //
+                    // (Though, local variables are probably not
+                    // needed, as they are separately checked w/r/t
+                    // WFedness.)
+                }
+
+                ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+                    // let the loop iterate into the argument/return
+                    // types appearing in the fn signature
+                }
+
+                ty::TyTrait(ref data) => {
+                    // WfObject
+                    //
+                    // Here, we defer WF checking due to higher-ranked
+                    // regions. This is perhaps not ideal.
+                    self.from_object_ty(ty, data);
+
+                    // FIXME(#27579) RFC also considers adding trait
+                    // obligations that don't refer to Self and
+                    // checking those
+
+                    let cause = self.cause(traits::MiscObligation);
+                    self.out.push(
+                        traits::Obligation::new(
+                            cause,
+                            ty::Predicate::ObjectSafe(data.principal_def_id())));
+                }
+
+                // Inference variables are the complicated case, since we don't
+                // know what type they are. We do two things:
+                //
+                // 1. Check if they have been resolved, and if so proceed with
+                //    THAT type.
+                // 2. If not, check whether this is the type that we
+                //    started with (ty0). In that case, we've made no
+                //    progress at all, so return false. Otherwise,
+                //    we've at least simplified things (i.e., we went
+                //    from `Vec<$0>: WF` to `$0: WF`, so we can
+                //    register a pending obligation and keep
+                //    moving. (Goal is that an "inductive hypothesis"
+                //    is satisfied to ensure termination.)
+                ty::TyInfer(_) => {
+                    let ty = self.infcx.shallow_resolve(ty);
+                    if let ty::TyInfer(_) = ty.sty { // not yet resolved...
+                        if ty == ty0 { // ...this is the type we started from! no progress.
+                            return false;
+                        }
+
+                        let cause = self.cause(traits::MiscObligation);
+                        self.out.push( // ...not the type we started from, so we made progress.
+                            traits::Obligation::new(cause, ty::Predicate::WellFormed(ty)));
+                    } else {
+                        // Yes, resolved, proceed with the
+                        // result. Should never return false because
+                        // `ty` is not a TyInfer.
+                        assert!(self.compute(ty));
+                    }
+                }
+            }
+        }
+
+        // if we made it through that loop above, we made progress!
+        return true;
+    }
+
+    fn nominal_obligations(&mut self,
+                           def_id: DefId,
+                           substs: &Substs<'tcx>)
+                           -> Vec<traits::PredicateObligation<'tcx>>
+    {
+        let predicates =
+            self.infcx.tcx.lookup_predicates(def_id)
+                          .instantiate(self.infcx.tcx, substs);
+        let cause = self.cause(traits::ItemObligation(def_id));
+        predicates.predicates
+                  .into_iter()
+                  .map(|pred| traits::Obligation::new(cause.clone(), pred))
+                  .filter(|pred| !pred.has_escaping_regions())
+                  .collect()
+    }
+
+    fn from_object_ty(&mut self, ty: Ty<'tcx>, data: &ty::TraitTy<'tcx>) {
+        // Imagine a type like this:
+        //
+        //     trait Foo { }
+        //     trait Bar<'c> : 'c { }
+        //
+        //     &'b (Foo+'c+Bar<'d>)
+        //         ^
+        //
+        // In this case, the following relationships must hold:
+        //
+        //     'b <= 'c
+        //     'd <= 'c
+        //
+        // The first conditions is due to the normal region pointer
+        // rules, which say that a reference cannot outlive its
+        // referent.
+        //
+        // The final condition may be a bit surprising. In particular,
+        // you may expect that it would have been `'c <= 'd`, since
+        // usually lifetimes of outer things are conservative
+        // approximations for inner things. However, it works somewhat
+        // differently with trait objects: here the idea is that if the
+        // user specifies a region bound (`'c`, in this case) it is the
+        // "master bound" that *implies* that bounds from other traits are
+        // all met. (Remember that *all bounds* in a type like
+        // `Foo+Bar+Zed` must be met, not just one, hence if we write
+        // `Foo<'x>+Bar<'y>`, we know that the type outlives *both* 'x and
+        // 'y.)
+        //
+        // Note: in fact we only permit builtin traits, not `Bar<'d>`, I
+        // am looking forward to the future here.
+
+        if !data.has_escaping_regions() {
+            let implicit_bounds =
+                object_region_bounds(self.infcx.tcx,
+                                     &data.principal,
+                                     data.bounds.builtin_bounds);
+
+            let explicit_bound = data.bounds.region_bound;
+
+            for implicit_bound in implicit_bounds {
+                let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
+                let outlives = ty::Binder(ty::OutlivesPredicate(explicit_bound, implicit_bound));
+                self.out.push(traits::Obligation::new(cause, outlives.to_predicate()));
+            }
+        }
+    }
+}
+
+/// Given an object type like `SomeTrait+Send`, computes the lifetime
+/// bounds that must hold on the elided self type. These are derived
+/// from the declarations of `SomeTrait`, `Send`, and friends -- if
+/// they declare `trait SomeTrait : 'static`, for example, then
+/// `'static` would appear in the list. The hard work is done by
+/// `ty::required_region_bounds`, see that for more information.
+pub fn object_region_bounds<'tcx>(
+    tcx: &TyCtxt<'tcx>,
+    principal: &ty::PolyTraitRef<'tcx>,
+    others: ty::BuiltinBounds)
+    -> Vec<ty::Region>
+{
+    // Since we don't actually *know* the self type for an object,
+    // this "open(err)" serves as a kind of dummy standin -- basically
+    // a skolemized type.
+    let open_ty = tcx.mk_infer(ty::FreshTy(0));
+
+    // Note that we preserve the overall binding levels here.
+    assert!(!open_ty.has_escaping_regions());
+    let substs = tcx.mk_substs(principal.0.substs.with_self_ty(open_ty));
+    let trait_refs = vec!(ty::Binder(ty::TraitRef::new(principal.0.def_id, substs)));
+
+    let mut predicates = others.to_predicates(tcx, open_ty);
+    predicates.extend(trait_refs.iter().map(|t| t.to_predicate()));
+
+    tcx.required_region_bounds(open_ty, predicates)
+}
index 888a623b24a83d1960d14ffb14264d396faa09ef..bdfb97549d5d512647ce80179eb8d38579cd1378 100644 (file)
@@ -19,9 +19,9 @@ use std::iter::repeat;
 use std::path::Path;
 use std::time::Instant;
 
-use rustc_front::hir;
-use rustc_front::intravisit;
-use rustc_front::intravisit::Visitor;
+use hir;
+use hir::intravisit;
+use hir::intravisit::Visitor;
 
 // The name of the associated type for `Fn` return types
 pub const FN_OUTPUT_NAME: &'static str = "Output";
index 4a45797602d811f270571b29794150c9b4c579ef..69bcc9cbfffea807018c7fa2c73b2fab2b8f8164 100644 (file)
@@ -12,7 +12,7 @@
 
 #![allow(non_snake_case)]
 
-use middle::def_id::DefId;
+use hir::def_id::DefId;
 use syntax::ast;
 
 pub use rustc_data_structures::fnv::FnvHashMap;
index 5868f233776c20af519338084f3ad3994f4cb713..2173b919d1336ed53badd81056b7590bb00223bf 100644 (file)
@@ -9,21 +9,22 @@
 // except according to those terms.
 
 
-use middle::def_id::DefId;
-use middle::subst::{self, Subst};
-use middle::ty::{BrAnon, BrEnv, BrFresh, BrNamed};
-use middle::ty::{TyBool, TyChar, TyStruct, TyEnum};
-use middle::ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyBareFn};
-use middle::ty::{TyParam, TyRawPtr, TyRef, TyTuple};
-use middle::ty::TyClosure;
-use middle::ty::{TyBox, TyTrait, TyInt, TyUint, TyInfer};
-use middle::ty::{self, Ty, TypeFoldable};
-
+use hir::def_id::DefId;
+use ty::subst::{self, Subst};
+use ty::{BrAnon, BrEnv, BrFresh, BrNamed};
+use ty::{TyBool, TyChar, TyStruct, TyEnum};
+use ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyFnDef, TyFnPtr};
+use ty::{TyParam, TyRawPtr, TyRef, TyTuple};
+use ty::TyClosure;
+use ty::{TyBox, TyTrait, TyInt, TyUint, TyInfer};
+use ty::{self, Ty, TyCtxt, TypeFoldable};
+
+use std::cell::Cell;
 use std::fmt;
 use syntax::abi::Abi;
 use syntax::parse::token;
 use syntax::ast::CRATE_NODE_ID;
-use rustc_front::hir;
+use hir;
 
 pub fn verbose() -> bool {
     ty::tls::with(|tcx| tcx.sess.verbose())
@@ -34,23 +35,23 @@ fn fn_sig(f: &mut fmt::Formatter,
           variadic: bool,
           output: ty::FnOutput)
           -> fmt::Result {
-    try!(write!(f, "("));
+    write!(f, "(")?;
     let mut inputs = inputs.iter();
     if let Some(&ty) = inputs.next() {
-        try!(write!(f, "{}", ty));
+        write!(f, "{}", ty)?;
         for &ty in inputs {
-            try!(write!(f, ", {}", ty));
+            write!(f, ", {}", ty)?;
         }
         if variadic {
-            try!(write!(f, ", ..."));
+            write!(f, ", ...")?;
         }
     }
-    try!(write!(f, ")"));
+    write!(f, ")")?;
 
     match output {
         ty::FnConverging(ty) => {
             if !ty.is_nil() {
-                try!(write!(f, " -> {}", ty));
+                write!(f, " -> {}", ty)?;
             }
             Ok(())
         }
@@ -60,136 +61,177 @@ fn fn_sig(f: &mut fmt::Formatter,
     }
 }
 
-fn parameterized<GG>(f: &mut fmt::Formatter,
-                     substs: &subst::Substs,
-                     did: DefId,
-                     projections: &[ty::ProjectionPredicate],
-                     get_generics: GG)
-                     -> fmt::Result
-    where GG: for<'tcx> FnOnce(&ty::ctxt<'tcx>) -> ty::Generics<'tcx>
-{
-    let (fn_trait_kind, verbose) = try!(ty::tls::with(|tcx| {
-        try!(write!(f, "{}", tcx.item_path_str(did)));
-        Ok((tcx.lang_items.fn_trait_kind(did), tcx.sess.verbose()))
-    }));
-
-    let mut empty = true;
-    let mut start_or_continue = |f: &mut fmt::Formatter, start: &str, cont: &str| {
-        if empty {
-            empty = false;
-            write!(f, "{}", start)
-        } else {
-            write!(f, "{}", cont)
-        }
-    };
+/// Namespace of the path given to parameterized to print.
+#[derive(Copy, Clone, PartialEq)]
+pub enum Ns {
+    Type,
+    Value
+}
 
-    if verbose {
-        match substs.regions {
-            subst::ErasedRegions => {
-                try!(start_or_continue(f, "<", ", "));
-                try!(write!(f, ".."));
-            }
-            subst::NonerasedRegions(ref regions) => {
-                for region in regions {
-                    try!(start_or_continue(f, "<", ", "));
-                    try!(write!(f, "{:?}", region));
+fn number_of_supplied_defaults<'tcx, GG>(tcx: &ty::TyCtxt<'tcx>,
+                                         substs: &subst::Substs,
+                                         space: subst::ParamSpace,
+                                         get_generics: GG)
+                                         -> usize
+    where GG: FnOnce(&TyCtxt<'tcx>) -> ty::Generics<'tcx>
+{
+    let generics = get_generics(tcx);
+
+    let has_self = substs.self_ty().is_some();
+    let ty_params = generics.types.get_slice(space);
+    let tps = substs.types.get_slice(space);
+    if ty_params.last().map_or(false, |def| def.default.is_some()) {
+        let substs = tcx.lift(&substs);
+        ty_params.iter().zip(tps).rev().take_while(|&(def, &actual)| {
+            match def.default {
+                Some(default) => {
+                    if !has_self && default.has_self_ty() {
+                        // In an object type, there is no `Self`, and
+                        // thus if the default value references Self,
+                        // the user will be required to give an
+                        // explicit value. We can't even do the
+                        // substitution below to check without causing
+                        // an ICE. (#18956).
+                        false
+                    } else {
+                        let default = tcx.lift(&default);
+                        substs.and_then(|substs| default.subst(tcx, substs))
+                            == Some(actual)
+                    }
                 }
+                None => false
             }
-        }
-        for &ty in &substs.types {
-            try!(start_or_continue(f, "<", ", "));
-            try!(write!(f, "{}", ty));
-        }
-        for projection in projections {
-            try!(start_or_continue(f, "<", ", "));
-            try!(write!(f, "{}={}",
-                        projection.projection_ty.item_name,
-                        projection.ty));
-        }
-        return start_or_continue(f, "", ">");
+        }).count()
+    } else {
+        0
+    }
+}
+
+pub fn parameterized<GG>(f: &mut fmt::Formatter,
+                         substs: &subst::Substs,
+                         did: DefId,
+                         ns: Ns,
+                         projections: &[ty::ProjectionPredicate],
+                         get_generics: GG)
+                         -> fmt::Result
+    where GG: for<'tcx> FnOnce(&TyCtxt<'tcx>) -> ty::Generics<'tcx>
+{
+    if let (Ns::Value, Some(self_ty)) = (ns, substs.self_ty()) {
+        write!(f, "<{} as ", self_ty)?;
     }
 
-    if fn_trait_kind.is_some() && projections.len() == 1 {
+    let (fn_trait_kind, verbose, item_name) = ty::tls::with(|tcx| {
+        let (did, item_name) = if ns == Ns::Value {
+            // Try to get the impl/trait parent, if this is an
+            // associated value item (method or constant).
+            tcx.trait_of_item(did).or_else(|| tcx.impl_of_method(did))
+               .map_or((did, None), |parent| (parent, Some(tcx.item_name(did))))
+        } else {
+            (did, None)
+        };
+        write!(f, "{}", tcx.item_path_str(did))?;
+        Ok((tcx.lang_items.fn_trait_kind(did), tcx.sess.verbose(), item_name))
+    })?;
+
+    if !verbose && fn_trait_kind.is_some() && projections.len() == 1 {
         let projection_ty = projections[0].ty;
         if let TyTuple(ref args) = substs.types.get_slice(subst::TypeSpace)[0].sty {
             return fn_sig(f, args, false, ty::FnConverging(projection_ty));
         }
     }
 
-    match substs.regions {
-        subst::ErasedRegions => { }
-        subst::NonerasedRegions(ref regions) => {
-            for &r in regions {
-                try!(start_or_continue(f, "<", ", "));
-                let s = r.to_string();
-                if s.is_empty() {
-                    // This happens when the value of the region
-                    // parameter is not easily serialized. This may be
-                    // because the user omitted it in the first place,
-                    // or because it refers to some block in the code,
-                    // etc. I'm not sure how best to serialize this.
-                    try!(write!(f, "'_"));
-                } else {
-                    try!(write!(f, "{}", s));
-                }
+    let empty = Cell::new(true);
+    let start_or_continue = |f: &mut fmt::Formatter, start: &str, cont: &str| {
+        if empty.get() {
+            empty.set(false);
+            write!(f, "{}", start)
+        } else {
+            write!(f, "{}", cont)
+        }
+    };
+    let print_region = |f: &mut fmt::Formatter, region: &ty::Region| -> _ {
+        if verbose {
+            write!(f, "{:?}", region)
+        } else {
+            let s = region.to_string();
+            if s.is_empty() {
+                // This happens when the value of the region
+                // parameter is not easily serialized. This may be
+                // because the user omitted it in the first place,
+                // or because it refers to some block in the code,
+                // etc. I'm not sure how best to serialize this.
+                write!(f, "'_")
+            } else {
+                write!(f, "{}", s)
             }
         }
+    };
+
+    for region in substs.regions.get_slice(subst::TypeSpace) {
+        start_or_continue(f, "<", ", ")?;
+        print_region(f, region)?;
     }
 
-    // It is important to execute this conditionally, only if -Z
-    // verbose is false. Otherwise, debug logs can sometimes cause
-    // ICEs trying to fetch the generics early in the pipeline. This
-    // is kind of a hacky workaround in that -Z verbose is required to
-    // avoid those ICEs.
+    let num_supplied_defaults = if verbose {
+        0
+    } else {
+        // It is important to execute this conditionally, only if -Z
+        // verbose is false. Otherwise, debug logs can sometimes cause
+        // ICEs trying to fetch the generics early in the pipeline. This
+        // is kind of a hacky workaround in that -Z verbose is required to
+        // avoid those ICEs.
+        ty::tls::with(|tcx| {
+            number_of_supplied_defaults(tcx, substs, subst::TypeSpace, get_generics)
+        })
+    };
+
     let tps = substs.types.get_slice(subst::TypeSpace);
-    let num_defaults = ty::tls::with(|tcx| {
-        let generics = get_generics(tcx);
-
-        let has_self = substs.self_ty().is_some();
-        let ty_params = generics.types.get_slice(subst::TypeSpace);
-        if ty_params.last().map_or(false, |def| def.default.is_some()) {
-            let substs = tcx.lift(&substs);
-            ty_params.iter().zip(tps).rev().take_while(|&(def, &actual)| {
-                match def.default {
-                    Some(default) => {
-                        if !has_self && default.has_self_ty() {
-                            // In an object type, there is no `Self`, and
-                            // thus if the default value references Self,
-                            // the user will be required to give an
-                            // explicit value. We can't even do the
-                            // substitution below to check without causing
-                            // an ICE. (#18956).
-                            false
-                        } else {
-                            let default = tcx.lift(&default);
-                            substs.and_then(|substs| default.subst(tcx, substs)) == Some(actual)
-                        }
-                    }
-                    None => false
-                }
-            }).count()
-        } else {
-            0
-        }
-    });
 
-    for &ty in &tps[..tps.len() - num_defaults] {
-        try!(start_or_continue(f, "<", ", "));
-        try!(write!(f, "{}", ty));
+    for &ty in &tps[..tps.len() - num_supplied_defaults] {
+        start_or_continue(f, "<", ", ")?;
+        write!(f, "{}", ty)?;
     }
 
     for projection in projections {
-        try!(start_or_continue(f, "<", ", "));
-        try!(write!(f, "{}={}",
-                    projection.projection_ty.item_name,
-                    projection.ty));
+        start_or_continue(f, "<", ", ")?;
+        write!(f, "{}={}",
+               projection.projection_ty.item_name,
+               projection.ty)?;
+    }
+
+    start_or_continue(f, "", ">")?;
+
+    // For values, also print their name and type parameters.
+    if ns == Ns::Value {
+        empty.set(true);
+
+        if substs.self_ty().is_some() {
+            write!(f, ">")?;
+        }
+
+        if let Some(item_name) = item_name {
+            write!(f, "::{}", item_name)?;
+        }
+
+        for region in substs.regions.get_slice(subst::FnSpace) {
+            start_or_continue(f, "::<", ", ")?;
+            print_region(f, region)?;
+        }
+
+        // FIXME: consider being smart with defaults here too
+        for ty in substs.types.get_slice(subst::FnSpace) {
+            start_or_continue(f, "::<", ", ")?;
+            write!(f, "{}", ty)?;
+        }
+
+        start_or_continue(f, "", ">")?;
     }
 
-    start_or_continue(f, "", ">")
+    Ok(())
 }
 
 fn in_binder<'tcx, T, U>(f: &mut fmt::Formatter,
-                         tcx: &ty::ctxt<'tcx>,
+                         tcx: &TyCtxt<'tcx>,
                          original: &ty::Binder<T>,
                          lifted: Option<ty::Binder<U>>) -> fmt::Result
     where T: fmt::Display, U: fmt::Display + TypeFoldable<'tcx>
@@ -232,7 +274,7 @@ fn in_binder<'tcx, T, U>(f: &mut fmt::Formatter,
         })
     }).0;
 
-    try!(start_or_continue(f, "", "> "));
+    start_or_continue(f, "", "> ")?;
     write!(f, "{}", new_value)
 }
 
@@ -265,6 +307,7 @@ impl<'tcx> fmt::Display for TraitAndProjections<'tcx> {
         let TraitAndProjections(ref trait_ref, ref projection_bounds) = *self;
         parameterized(f, trait_ref.substs,
                       trait_ref.def_id,
+                      Ns::Type,
                       projection_bounds,
                       |tcx| tcx.lookup_trait_def(trait_ref.def_id).generics.clone())
     }
@@ -275,7 +318,7 @@ impl<'tcx> fmt::Display for ty::TraitTy<'tcx> {
         let bounds = &self.bounds;
 
         // Generate the main trait ref, including associated types.
-        try!(ty::tls::with(|tcx| {
+        ty::tls::with(|tcx| {
             let principal = tcx.lift(&self.principal.0)
                                .expect("could not lift TraitRef for printing");
             let projections = tcx.lift(&bounds.projection_bounds[..])
@@ -284,11 +327,11 @@ impl<'tcx> fmt::Display for ty::TraitTy<'tcx> {
 
             let tap = ty::Binder(TraitAndProjections(principal, projections));
             in_binder(f, tcx, &ty::Binder(""), Some(tap))
-        }));
+        })?;
 
         // Builtin bounds.
         for bound in &bounds.builtin_bounds {
-            try!(write!(f, " + {:?}", bound));
+            write!(f, " + {:?}", bound)?;
         }
 
         // FIXME: It'd be nice to compute from context when this bound
@@ -298,7 +341,7 @@ impl<'tcx> fmt::Display for ty::TraitTy<'tcx> {
         // people aware that it's there.
         let bound = bounds.region_bound.to_string();
         if !bound.is_empty() {
-            try!(write!(f, " + {}", bound));
+            write!(f, " + {}", bound)?;
         }
 
         Ok(())
@@ -351,15 +394,6 @@ impl<'tcx> fmt::Debug for ty::ItemSubsts<'tcx> {
     }
 }
 
-impl fmt::Debug for subst::RegionSubsts {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            subst::ErasedRegions => write!(f, "erased"),
-            subst::NonerasedRegions(ref regions) => write!(f, "{:?}", regions)
-        }
-    }
-}
-
 impl<'tcx> fmt::Debug for ty::TraitRef<'tcx> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         // when printing out the debug representation, we don't need
@@ -606,19 +640,19 @@ impl<'tcx> fmt::Debug for ty::InstantiatedPredicates<'tcx> {
 
 impl<'tcx> fmt::Debug for ty::ImplOrTraitItem<'tcx> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(f, "ImplOrTraitItem("));
-        try!(match *self {
+        write!(f, "ImplOrTraitItem(")?;
+        match *self {
             ty::ImplOrTraitItem::MethodTraitItem(ref i) => write!(f, "{:?}", i),
             ty::ImplOrTraitItem::ConstTraitItem(ref i) => write!(f, "{:?}", i),
             ty::ImplOrTraitItem::TypeTraitItem(ref i) => write!(f, "{:?}", i),
-        });
+        }?;
         write!(f, ")")
     }
 }
 
 impl<'tcx> fmt::Display for ty::FnSig<'tcx> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(f, "fn"));
+        write!(f, "fn")?;
         fn_sig(f, &self.inputs, self.variadic, self.output)
     }
 }
@@ -637,18 +671,18 @@ impl<'tcx> fmt::Debug for ty::ExistentialBounds<'tcx> {
 
         let region_str = format!("{:?}", self.region_bound);
         if !region_str.is_empty() {
-            try!(maybe_continue(f));
-            try!(write!(f, "{}", region_str));
+            maybe_continue(f)?;
+            write!(f, "{}", region_str)?;
         }
 
         for bound in &self.builtin_bounds {
-            try!(maybe_continue(f));
-            try!(write!(f, "{:?}", bound));
+            maybe_continue(f)?;
+            write!(f, "{:?}", bound)?;
         }
 
         for projection_bound in &self.projection_bounds {
-            try!(maybe_continue(f));
-            try!(write!(f, "{:?}", projection_bound));
+            maybe_continue(f)?;
+            write!(f, "{:?}", projection_bound)?;
         }
 
         Ok(())
@@ -659,9 +693,9 @@ impl fmt::Display for ty::BuiltinBounds {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         let mut bounds = self.iter();
         if let Some(bound) = bounds.next() {
-            try!(write!(f, "{:?}", bound));
+            write!(f, "{:?}", bound)?;
             for bound in bounds {
-                try!(write!(f, " + {:?}", bound));
+                write!(f, " + {:?}", bound)?;
             }
         }
         Ok(())
@@ -769,7 +803,7 @@ impl fmt::Display for ty::Binder<ty::OutlivesPredicate<ty::Region, ty::Region>>
 
 impl<'tcx> fmt::Display for ty::TraitRef<'tcx> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        parameterized(f, self.substs, self.def_id, &[],
+        parameterized(f, self.substs, self.def_id, Ns::Type, &[],
                       |tcx| tcx.lookup_trait_def(self.def_id).generics.clone())
     }
 }
@@ -790,45 +824,52 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> {
                 },  tm.ty)
             }
             TyRef(r, ref tm) => {
-                try!(write!(f, "&"));
+                write!(f, "&")?;
                 let s = r.to_string();
-                try!(write!(f, "{}", s));
+                write!(f, "{}", s)?;
                 if !s.is_empty() {
-                    try!(write!(f, " "));
+                    write!(f, " ")?;
                 }
                 write!(f, "{}", tm)
             }
             TyTuple(ref tys) => {
-                try!(write!(f, "("));
+                write!(f, "(")?;
                 let mut tys = tys.iter();
                 if let Some(&ty) = tys.next() {
-                    try!(write!(f, "{},", ty));
+                    write!(f, "{},", ty)?;
                     if let Some(&ty) = tys.next() {
-                        try!(write!(f, " {}", ty));
+                        write!(f, " {}", ty)?;
                         for &ty in tys {
-                            try!(write!(f, ", {}", ty));
+                            write!(f, ", {}", ty)?;
                         }
                     }
                 }
                 write!(f, ")")
             }
-            TyBareFn(opt_def_id, ref bare_fn) => {
+            TyFnDef(def_id, substs, ref bare_fn) => {
                 if bare_fn.unsafety == hir::Unsafety::Unsafe {
-                    try!(write!(f, "unsafe "));
+                    write!(f, "unsafe ")?;
                 }
 
                 if bare_fn.abi != Abi::Rust {
-                    try!(write!(f, "extern {} ", bare_fn.abi));
+                    write!(f, "extern {} ", bare_fn.abi)?;
                 }
 
-                try!(write!(f, "{}", bare_fn.sig.0));
+                write!(f, "{} {{", bare_fn.sig.0)?;
+                parameterized(f, substs, def_id, Ns::Value, &[],
+                              |tcx| tcx.lookup_item_type(def_id).generics)?;
+                write!(f, "}}")
+            }
+            TyFnPtr(ref bare_fn) => {
+                if bare_fn.unsafety == hir::Unsafety::Unsafe {
+                    write!(f, "unsafe ")?;
+                }
 
-                if let Some(def_id) = opt_def_id {
-                    try!(write!(f, " {{{}}}", ty::tls::with(|tcx| {
-                        tcx.item_path_str(def_id)
-                    })));
+                if bare_fn.abi != Abi::Rust {
+                    write!(f, "extern {} ", bare_fn.abi)?;
                 }
-                Ok(())
+
+                write!(f, "{}", bare_fn.sig.0)
             }
             TyInfer(infer_ty) => write!(f, "{}", infer_ty),
             TyError => write!(f, "[type error]"),
@@ -839,7 +880,7 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> {
                           !tcx.tcache.borrow().contains_key(&def.did) {
                         write!(f, "{}<..>", tcx.item_path_str(def.did))
                     } else {
-                        parameterized(f, substs, def.did, &[],
+                        parameterized(f, substs, def.did, Ns::Type, &[],
                                       |tcx| tcx.lookup_item_type(def.did).generics)
                     }
                 })
@@ -848,30 +889,30 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> {
             ty::TyProjection(ref data) => write!(f, "{}", data),
             TyStr => write!(f, "str"),
             TyClosure(did, ref substs) => ty::tls::with(|tcx| {
-                try!(write!(f, "[closure"));
+                write!(f, "[closure")?;
 
                 if let Some(node_id) = tcx.map.as_local_node_id(did) {
-                    try!(write!(f, "@{:?}", tcx.map.span(node_id)));
+                    write!(f, "@{:?}", tcx.map.span(node_id))?;
                     let mut sep = " ";
-                    try!(tcx.with_freevars(node_id, |freevars| {
+                    tcx.with_freevars(node_id, |freevars| {
                         for (freevar, upvar_ty) in freevars.iter().zip(&substs.upvar_tys) {
                             let node_id = freevar.def.var_id();
-                            try!(write!(f,
+                            write!(f,
                                         "{}{}:{}",
                                         sep,
                                         tcx.local_var_name_str(node_id),
-                                        upvar_ty));
+                                        upvar_ty)?;
                             sep = ", ";
                         }
                         Ok(())
-                    }))
+                    })?
                 } else {
                     // cross-crate closure types should only be
                     // visible in trans bug reports, I imagine.
-                    try!(write!(f, "@{:?}", did));
+                    write!(f, "@{:?}", did)?;
                     let mut sep = " ";
                     for (index, upvar_ty) in substs.upvar_tys.iter().enumerate() {
-                        try!(write!(f, "{}{}:{}", sep, index, upvar_ty));
+                        write!(f, "{}{}:{}", sep, index, upvar_ty)?;
                         sep = ", ";
                     }
                 }
@@ -970,9 +1011,7 @@ impl<'tcx> fmt::Debug for ty::TraitPredicate<'tcx> {
 
 impl<'tcx> fmt::Display for ty::TraitPredicate<'tcx> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "{} : {}",
-               self.trait_ref.self_ty(),
-               self.trait_ref)
+        write!(f, "{}: {}", self.trait_ref.self_ty(), self.trait_ref)
     }
 }
 
index 255ca60e2b8f4a2f2e3586f00ac75b97322d54ad..85e861b405a9f8620b9480902e0208ce33666abd 100644 (file)
@@ -11,8 +11,6 @@ crate-type = ["dylib"]
 [dependencies]
 syntax = { path = "../libsyntax" }
 serialize = { path = "../libserialize" }
-rustc_llvm = { path = "../librustc_llvm" }
-rustc_front = { path = "../librustc_front" }
 log = { path = "../liblog" }
 
 [features]
diff --git a/src/librustc_back/abi.rs b/src/librustc_back/abi.rs
deleted file mode 100644 (file)
index c3a3a8d..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub const BOX_FIELD_DROP_GLUE: usize = 1;
-pub const BOX_FIELD_BODY: usize = 4;
-
-/// The first half of a fat pointer.
-/// - For a closure, this is the code address.
-/// - For an object or trait instance, this is the address of the box.
-/// - For a slice, this is the base address.
-pub const FAT_PTR_ADDR: usize = 0;
-
-/// The second half of a fat pointer.
-/// - For a closure, this is the address of the environment.
-/// - For an object or trait instance, this is the address of the vtable.
-/// - For a slice, this is the length.
-pub const FAT_PTR_EXTRA: usize = 1;
diff --git a/src/librustc_back/dynamic_lib.rs b/src/librustc_back/dynamic_lib.rs
new file mode 100644 (file)
index 0000000..2f86262
--- /dev/null
@@ -0,0 +1,314 @@
+// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Dynamic library facilities.
+//!
+//! A simple wrapper over the platform's dynamic library facilities
+
+use std::env;
+use std::ffi::{CString, OsString};
+use std::path::{Path, PathBuf};
+
+pub struct DynamicLibrary {
+    handle: *mut u8
+}
+
+impl Drop for DynamicLibrary {
+    fn drop(&mut self) {
+        unsafe {
+            dl::close(self.handle)
+        }
+    }
+}
+
+impl DynamicLibrary {
+    /// Lazily open a dynamic library. When passed None it gives a
+    /// handle to the calling process
+    pub fn open(filename: Option<&Path>) -> Result<DynamicLibrary, String> {
+        let maybe_library = dl::open(filename.map(|path| path.as_os_str()));
+
+        // The dynamic library must not be constructed if there is
+        // an error opening the library so the destructor does not
+        // run.
+        match maybe_library {
+            Err(err) => Err(err),
+            Ok(handle) => Ok(DynamicLibrary { handle: handle })
+        }
+    }
+
+    /// Prepends a path to this process's search path for dynamic libraries
+    pub fn prepend_search_path(path: &Path) {
+        let mut search_path = DynamicLibrary::search_path();
+        search_path.insert(0, path.to_path_buf());
+        env::set_var(DynamicLibrary::envvar(), &DynamicLibrary::create_path(&search_path));
+    }
+
+    /// From a slice of paths, create a new vector which is suitable to be an
+    /// environment variable for this platforms dylib search path.
+    pub fn create_path(path: &[PathBuf]) -> OsString {
+        let mut newvar = OsString::new();
+        for (i, path) in path.iter().enumerate() {
+            if i > 0 { newvar.push(DynamicLibrary::separator()); }
+            newvar.push(path);
+        }
+        return newvar;
+    }
+
+    /// Returns the environment variable for this process's dynamic library
+    /// search path
+    pub fn envvar() -> &'static str {
+        if cfg!(windows) {
+            "PATH"
+        } else if cfg!(target_os = "macos") {
+            "DYLD_LIBRARY_PATH"
+        } else {
+            "LD_LIBRARY_PATH"
+        }
+    }
+
+    fn separator() -> &'static str {
+        if cfg!(windows) { ";" } else { ":" }
+    }
+
+    /// Returns the current search path for dynamic libraries being used by this
+    /// process
+    pub fn search_path() -> Vec<PathBuf> {
+        match env::var_os(DynamicLibrary::envvar()) {
+            Some(var) => env::split_paths(&var).collect(),
+            None => Vec::new(),
+        }
+    }
+
+    /// Accesses the value at the symbol of the dynamic library.
+    pub unsafe fn symbol<T>(&self, symbol: &str) -> Result<*mut T, String> {
+        // This function should have a lifetime constraint of 'a on
+        // T but that feature is still unimplemented
+
+        let raw_string = CString::new(symbol).unwrap();
+        let maybe_symbol_value = dl::symbol(self.handle, raw_string.as_ptr());
+
+        // The value must not be constructed if there is an error so
+        // the destructor does not run.
+        match maybe_symbol_value {
+            Err(err) => Err(err),
+            Ok(symbol_value) => Ok(symbol_value as *mut T)
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use libc;
+    use std::mem;
+
+    #[test]
+    fn test_loading_cosine() {
+        if cfg!(windows) {
+            return
+        }
+
+        // The math library does not need to be loaded since it is already
+        // statically linked in
+        let libm = match DynamicLibrary::open(None) {
+            Err(error) => panic!("Could not load self as module: {}", error),
+            Ok(libm) => libm
+        };
+
+        let cosine: extern fn(libc::c_double) -> libc::c_double = unsafe {
+            match libm.symbol("cos") {
+                Err(error) => panic!("Could not load function cos: {}", error),
+                Ok(cosine) => mem::transmute::<*mut u8, _>(cosine)
+            }
+        };
+
+        let argument = 0.0;
+        let expected_result = 1.0;
+        let result = cosine(argument);
+        if result != expected_result {
+            panic!("cos({}) != {} but equaled {} instead", argument,
+                   expected_result, result)
+        }
+    }
+
+    #[test]
+    fn test_errors_do_not_crash() {
+        use std::path::Path;
+
+        if !cfg!(unix) {
+            return
+        }
+
+        // Open /dev/null as a library to get an error, and make sure
+        // that only causes an error, and not a crash.
+        let path = Path::new("/dev/null");
+        match DynamicLibrary::open(Some(&path)) {
+            Err(_) => {}
+            Ok(_) => panic!("Successfully opened the empty library.")
+        }
+    }
+}
+
+#[cfg(unix)]
+mod dl {
+    use libc;
+    use std::ffi::{CStr, OsStr, CString};
+    use std::os::unix::prelude::*;
+    use std::ptr;
+    use std::str;
+
+    pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> {
+        check_for_errors_in(|| {
+            unsafe {
+                match filename {
+                    Some(filename) => open_external(filename),
+                    None => open_internal(),
+                }
+            }
+        })
+    }
+
+    const LAZY: libc::c_int = 1;
+
+    unsafe fn open_external(filename: &OsStr) -> *mut u8 {
+        let s = CString::new(filename.as_bytes()).unwrap();
+        libc::dlopen(s.as_ptr(), LAZY) as *mut u8
+    }
+
+    unsafe fn open_internal() -> *mut u8 {
+        libc::dlopen(ptr::null(), LAZY) as *mut u8
+    }
+
+    pub fn check_for_errors_in<T, F>(f: F) -> Result<T, String> where
+        F: FnOnce() -> T,
+    {
+        use std::sync::StaticMutex;
+        static LOCK: StaticMutex = StaticMutex::new();
+        unsafe {
+            // dlerror isn't thread safe, so we need to lock around this entire
+            // sequence
+            let _guard = LOCK.lock();
+            let _old_error = libc::dlerror();
+
+            let result = f();
+
+            let last_error = libc::dlerror() as *const _;
+            let ret = if ptr::null() == last_error {
+                Ok(result)
+            } else {
+                let s = CStr::from_ptr(last_error).to_bytes();
+                Err(str::from_utf8(s).unwrap().to_owned())
+            };
+
+            ret
+        }
+    }
+
+    pub unsafe fn symbol(handle: *mut u8,
+                         symbol: *const libc::c_char)
+                         -> Result<*mut u8, String> {
+        check_for_errors_in(|| {
+            libc::dlsym(handle as *mut libc::c_void, symbol) as *mut u8
+        })
+    }
+    pub unsafe fn close(handle: *mut u8) {
+        libc::dlclose(handle as *mut libc::c_void); ()
+    }
+}
+
+#[cfg(windows)]
+mod dl {
+    use std::ffi::OsStr;
+    use std::io;
+    use std::os::windows::prelude::*;
+    use std::ptr;
+
+    use libc::{c_uint, c_void, c_char};
+
+    type DWORD = u32;
+    type HMODULE = *mut u8;
+    type BOOL = i32;
+    type LPCWSTR = *const u16;
+    type LPCSTR = *const i8;
+
+    extern "system" {
+        fn SetThreadErrorMode(dwNewMode: DWORD,
+                              lpOldMode: *mut DWORD) -> c_uint;
+        fn LoadLibraryW(name: LPCWSTR) -> HMODULE;
+        fn GetModuleHandleExW(dwFlags: DWORD,
+                              name: LPCWSTR,
+                              handle: *mut HMODULE) -> BOOL;
+        fn GetProcAddress(handle: HMODULE,
+                          name: LPCSTR) -> *mut c_void;
+        fn FreeLibrary(handle: HMODULE) -> BOOL;
+    }
+
+    pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> {
+        // disable "dll load failed" error dialog.
+        let prev_error_mode = unsafe {
+            // SEM_FAILCRITICALERRORS 0x01
+            let new_error_mode = 1;
+            let mut prev_error_mode = 0;
+            let result = SetThreadErrorMode(new_error_mode,
+                                            &mut prev_error_mode);
+            if result == 0 {
+                return Err(io::Error::last_os_error().to_string())
+            }
+            prev_error_mode
+        };
+
+        let result = match filename {
+            Some(filename) => {
+                let filename_str: Vec<_> =
+                    filename.encode_wide().chain(Some(0)).collect();
+                let result = unsafe {
+                    LoadLibraryW(filename_str.as_ptr())
+                };
+                ptr_result(result)
+            }
+            None => {
+                let mut handle = ptr::null_mut();
+                let succeeded = unsafe {
+                    GetModuleHandleExW(0 as DWORD, ptr::null(), &mut handle)
+                };
+                if succeeded == 0 {
+                    Err(io::Error::last_os_error().to_string())
+                } else {
+                    Ok(handle as *mut u8)
+                }
+            }
+        };
+
+        unsafe {
+            SetThreadErrorMode(prev_error_mode, ptr::null_mut());
+        }
+
+        result
+    }
+
+    pub unsafe fn symbol(handle: *mut u8,
+                         symbol: *const c_char)
+                         -> Result<*mut u8, String> {
+        let ptr = GetProcAddress(handle as HMODULE, symbol) as *mut u8;
+        ptr_result(ptr)
+    }
+
+    pub unsafe fn close(handle: *mut u8) {
+        FreeLibrary(handle as HMODULE);
+    }
+
+    fn ptr_result<T>(ptr: *mut T) -> Result<*mut T, String> {
+        if ptr.is_null() {
+            Err(io::Error::last_os_error().to_string())
+        } else {
+            Ok(ptr)
+        }
+    }
+}
index 364c2977bfb057fca7c45071befc3ba22e9e063c..67b11a930d6f7fbbef6da55e3ef4bf84752d6b0f 100644 (file)
 #![cfg_attr(not(stage0), deny(warnings))]
 
 #![feature(box_syntax)]
-#![feature(copy_from_slice)]
+#![feature(const_fn)]
 #![feature(libc)]
 #![feature(rand)]
 #![feature(rustc_private)]
 #![feature(staged_api)]
 #![feature(step_by)]
+#![feature(question_mark)]
+#![cfg_attr(unix, feature(static_mutex))]
 #![cfg_attr(test, feature(test, rand))]
 
 extern crate syntax;
 extern crate libc;
 extern crate serialize;
-extern crate rustc_llvm;
-extern crate rustc_front;
 #[macro_use] extern crate log;
 
-pub mod abi;
 pub mod tempdir;
 pub mod rpath;
 pub mod sha2;
-pub mod svh;
 pub mod target;
 pub mod slice;
+pub mod dynamic_lib;
diff --git a/src/librustc_back/svh.rs b/src/librustc_back/svh.rs
deleted file mode 100644 (file)
index b291163..0000000
+++ /dev/null
@@ -1,441 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Calculation and management of a Strict Version Hash for crates
-//!
-//! # Today's ABI problem
-//!
-//! In today's implementation of rustc, it is incredibly difficult to achieve
-//! forward binary compatibility without resorting to C-like interfaces. Within
-//! rust code itself, abi details such as symbol names suffer from a variety of
-//! unrelated factors to code changing such as the "def id drift" problem. This
-//! ends up yielding confusing error messages about metadata mismatches and
-//! such.
-//!
-//! The core of this problem is when an upstream dependency changes and
-//! downstream dependents are not recompiled. This causes compile errors because
-//! the upstream crate's metadata has changed but the downstream crates are
-//! still referencing the older crate's metadata.
-//!
-//! This problem exists for many reasons, the primary of which is that rust does
-//! not currently support forwards ABI compatibility (in place upgrades of a
-//! crate).
-//!
-//! # SVH and how it alleviates the problem
-//!
-//! With all of this knowledge on hand, this module contains the implementation
-//! of a notion of a "Strict Version Hash" for a crate. This is essentially a
-//! hash of all contents of a crate which can somehow be exposed to downstream
-//! crates.
-//!
-//! This hash is currently calculated by just hashing the AST, but this is
-//! obviously wrong (doc changes should not result in an incompatible ABI).
-//! Implementation-wise, this is required at this moment in time.
-//!
-//! By encoding this strict version hash into all crate's metadata, stale crates
-//! can be detected immediately and error'd about by rustc itself.
-//!
-//! # Relevant links
-//!
-//! Original issue: https://github.com/rust-lang/rust/issues/10207
-
-use std::fmt;
-use std::hash::{Hash, SipHasher, Hasher};
-use rustc_front::hir;
-use rustc_front::intravisit as visit;
-
-#[derive(Clone, PartialEq, Debug)]
-pub struct Svh {
-    hash: String,
-}
-
-impl Svh {
-    pub fn new(hash: &str) -> Svh {
-        assert!(hash.len() == 16);
-        Svh { hash: hash.to_string() }
-    }
-
-    pub fn as_str<'a>(&'a self) -> &'a str {
-        &self.hash
-    }
-
-    pub fn calculate(metadata: &Vec<String>, krate: &hir::Crate) -> Svh {
-        // FIXME (#14132): This is better than it used to be, but it still not
-        // ideal. We now attempt to hash only the relevant portions of the
-        // Crate AST as well as the top-level crate attributes. (However,
-        // the hashing of the crate attributes should be double-checked
-        // to ensure it is not incorporating implementation artifacts into
-        // the hash that are not otherwise visible.)
-
-        // FIXME: this should use SHA1, not SipHash. SipHash is not built to
-        //        avoid collisions.
-        let mut state = SipHasher::new();
-
-        for data in metadata {
-            data.hash(&mut state);
-        }
-
-        {
-            let mut visit = svh_visitor::make(&mut state, krate);
-            visit::walk_crate(&mut visit, krate);
-        }
-
-        // FIXME (#14132): This hash is still sensitive to e.g. the
-        // spans of the crate Attributes and their underlying
-        // MetaItems; we should make ContentHashable impl for those
-        // types and then use hash_content.  But, since all crate
-        // attributes should appear near beginning of the file, it is
-        // not such a big deal to be sensitive to their spans for now.
-        //
-        // We hash only the MetaItems instead of the entire Attribute
-        // to avoid hashing the AttrId
-        for attr in &krate.attrs {
-            attr.node.value.hash(&mut state);
-        }
-
-        let hash = state.finish();
-        return Svh {
-            hash: (0..64).step_by(4).map(|i| hex(hash >> i)).collect()
-        };
-
-        fn hex(b: u64) -> char {
-            let b = (b & 0xf) as u8;
-            let b = match b {
-                0 ... 9 => '0' as u8 + b,
-                _ => 'a' as u8 + b - 10,
-            };
-            b as char
-        }
-    }
-}
-
-impl fmt::Display for Svh {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        f.pad(self.as_str())
-    }
-}
-
-// FIXME (#14132): Even this SVH computation still has implementation
-// artifacts: namely, the order of item declaration will affect the
-// hash computation, but for many kinds of items the order of
-// declaration should be irrelevant to the ABI.
-
-mod svh_visitor {
-    pub use self::SawExprComponent::*;
-    pub use self::SawStmtComponent::*;
-    use self::SawAbiComponent::*;
-    use syntax::ast::{self, Name, NodeId};
-    use syntax::codemap::Span;
-    use syntax::parse::token;
-    use rustc_front::intravisit as visit;
-    use rustc_front::intravisit::{Visitor, FnKind};
-    use rustc_front::hir::*;
-    use rustc_front::hir;
-
-    use std::hash::{Hash, SipHasher};
-
-    pub struct StrictVersionHashVisitor<'a> {
-        pub krate: &'a Crate,
-        pub st: &'a mut SipHasher,
-    }
-
-    pub fn make<'a>(st: &'a mut SipHasher, krate: &'a Crate) -> StrictVersionHashVisitor<'a> {
-        StrictVersionHashVisitor { st: st, krate: krate }
-    }
-
-    // To off-load the bulk of the hash-computation on #[derive(Hash)],
-    // we define a set of enums corresponding to the content that our
-    // crate visitor will encounter as it traverses the ast.
-    //
-    // The important invariant is that all of the Saw*Component enums
-    // do not carry any Spans, Names, or Idents.
-    //
-    // Not carrying any Names/Idents is the important fix for problem
-    // noted on PR #13948: using the ident.name as the basis for a
-    // hash leads to unstable SVH, because ident.name is just an index
-    // into intern table (i.e. essentially a random address), not
-    // computed from the name content.
-    //
-    // With the below enums, the SVH computation is not sensitive to
-    // artifacts of how rustc was invoked nor of how the source code
-    // was laid out.  (Or at least it is *less* sensitive.)
-
-    // This enum represents the different potential bits of code the
-    // visitor could encounter that could affect the ABI for the crate,
-    // and assigns each a distinct tag to feed into the hash computation.
-    #[derive(Hash)]
-    enum SawAbiComponent<'a> {
-
-        // FIXME (#14132): should we include (some function of)
-        // ident.ctxt as well?
-        SawIdent(token::InternedString),
-        SawStructDef(token::InternedString),
-
-        SawLifetime(token::InternedString),
-        SawLifetimeDef(token::InternedString),
-
-        SawMod,
-        SawForeignItem,
-        SawItem,
-        SawDecl,
-        SawTy,
-        SawGenerics,
-        SawFn,
-        SawTraitItem,
-        SawImplItem,
-        SawStructField,
-        SawVariant,
-        SawExplicitSelf,
-        SawPath,
-        SawBlock,
-        SawPat,
-        SawLocal,
-        SawArm,
-        SawExpr(SawExprComponent<'a>),
-        SawStmt(SawStmtComponent),
-    }
-
-    /// SawExprComponent carries all of the information that we want
-    /// to include in the hash that *won't* be covered by the
-    /// subsequent recursive traversal of the expression's
-    /// substructure by the visitor.
-    ///
-    /// We know every Expr_ variant is covered by a variant because
-    /// `fn saw_expr` maps each to some case below.  Ensuring that
-    /// each variant carries an appropriate payload has to be verified
-    /// by hand.
-    ///
-    /// (However, getting that *exactly* right is not so important
-    /// because the SVH is just a developer convenience; there is no
-    /// guarantee of collision-freedom, hash collisions are just
-    /// (hopefully) unlikely.)
-    #[derive(Hash)]
-    pub enum SawExprComponent<'a> {
-
-        SawExprLoop(Option<token::InternedString>),
-        SawExprField(token::InternedString),
-        SawExprTupField(usize),
-        SawExprBreak(Option<token::InternedString>),
-        SawExprAgain(Option<token::InternedString>),
-
-        SawExprBox,
-        SawExprVec,
-        SawExprCall,
-        SawExprMethodCall,
-        SawExprTup,
-        SawExprBinary(hir::BinOp_),
-        SawExprUnary(hir::UnOp),
-        SawExprLit(ast::LitKind),
-        SawExprCast,
-        SawExprType,
-        SawExprIf,
-        SawExprWhile,
-        SawExprMatch,
-        SawExprClosure,
-        SawExprBlock,
-        SawExprAssign,
-        SawExprAssignOp(hir::BinOp_),
-        SawExprIndex,
-        SawExprRange,
-        SawExprPath(Option<usize>),
-        SawExprAddrOf(hir::Mutability),
-        SawExprRet,
-        SawExprInlineAsm(&'a hir::InlineAsm),
-        SawExprStruct,
-        SawExprRepeat,
-    }
-
-    fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> {
-        match *node {
-            ExprBox(..)              => SawExprBox,
-            ExprVec(..)              => SawExprVec,
-            ExprCall(..)             => SawExprCall,
-            ExprMethodCall(..)       => SawExprMethodCall,
-            ExprTup(..)              => SawExprTup,
-            ExprBinary(op, _, _)     => SawExprBinary(op.node),
-            ExprUnary(op, _)         => SawExprUnary(op),
-            ExprLit(ref lit)         => SawExprLit(lit.node.clone()),
-            ExprCast(..)             => SawExprCast,
-            ExprType(..)             => SawExprType,
-            ExprIf(..)               => SawExprIf,
-            ExprWhile(..)            => SawExprWhile,
-            ExprLoop(_, id)          => SawExprLoop(id.map(|id| id.name.as_str())),
-            ExprMatch(..)            => SawExprMatch,
-            ExprClosure(..)          => SawExprClosure,
-            ExprBlock(..)            => SawExprBlock,
-            ExprAssign(..)           => SawExprAssign,
-            ExprAssignOp(op, _, _)   => SawExprAssignOp(op.node),
-            ExprField(_, name)       => SawExprField(name.node.as_str()),
-            ExprTupField(_, id)      => SawExprTupField(id.node),
-            ExprIndex(..)            => SawExprIndex,
-            ExprRange(..)            => SawExprRange,
-            ExprPath(ref qself, _)   => SawExprPath(qself.as_ref().map(|q| q.position)),
-            ExprAddrOf(m, _)         => SawExprAddrOf(m),
-            ExprBreak(id)            => SawExprBreak(id.map(|id| id.node.name.as_str())),
-            ExprAgain(id)            => SawExprAgain(id.map(|id| id.node.name.as_str())),
-            ExprRet(..)              => SawExprRet,
-            ExprInlineAsm(ref asm)   => SawExprInlineAsm(asm),
-            ExprStruct(..)           => SawExprStruct,
-            ExprRepeat(..)           => SawExprRepeat,
-        }
-    }
-
-    /// SawStmtComponent is analogous to SawExprComponent, but for statements.
-    #[derive(Hash)]
-    pub enum SawStmtComponent {
-        SawStmtDecl,
-        SawStmtExpr,
-        SawStmtSemi,
-    }
-
-    fn saw_stmt(node: &Stmt_) -> SawStmtComponent {
-        match *node {
-            StmtDecl(..) => SawStmtDecl,
-            StmtExpr(..) => SawStmtExpr,
-            StmtSemi(..) => SawStmtSemi,
-        }
-    }
-
-    impl<'a> Visitor<'a> for StrictVersionHashVisitor<'a> {
-        fn visit_nested_item(&mut self, item: ItemId) {
-            self.visit_item(self.krate.item(item.id))
-        }
-
-        fn visit_variant_data(&mut self, s: &'a VariantData, name: Name,
-                              g: &'a Generics, _: NodeId, _: Span) {
-            SawStructDef(name.as_str()).hash(self.st);
-            visit::walk_generics(self, g);
-            visit::walk_struct_def(self, s)
-        }
-
-        fn visit_variant(&mut self, v: &'a Variant, g: &'a Generics, item_id: NodeId) {
-            SawVariant.hash(self.st);
-            // walk_variant does not call walk_generics, so do it here.
-            visit::walk_generics(self, g);
-            visit::walk_variant(self, v, g, item_id)
-        }
-
-        // All of the remaining methods just record (in the hash
-        // SipHasher) that the visitor saw that particular variant
-        // (with its payload), and continue walking as the default
-        // visitor would.
-        //
-        // Some of the implementations have some notes as to how one
-        // might try to make their SVH computation less discerning
-        // (e.g. by incorporating reachability analysis).  But
-        // currently all of their implementations are uniform and
-        // uninteresting.
-        //
-        // (If you edit a method such that it deviates from the
-        // pattern, please move that method up above this comment.)
-
-        fn visit_name(&mut self, _: Span, name: Name) {
-            SawIdent(name.as_str()).hash(self.st);
-        }
-
-        fn visit_lifetime(&mut self, l: &'a Lifetime) {
-            SawLifetime(l.name.as_str()).hash(self.st);
-        }
-
-        fn visit_lifetime_def(&mut self, l: &'a LifetimeDef) {
-            SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st);
-        }
-
-        // We do recursively walk the bodies of functions/methods
-        // (rather than omitting their bodies from the hash) since
-        // monomorphization and cross-crate inlining generally implies
-        // that a change to a crate body will require downstream
-        // crates to be recompiled.
-        fn visit_expr(&mut self, ex: &'a Expr) {
-            SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex)
-        }
-
-        fn visit_stmt(&mut self, s: &'a Stmt) {
-            SawStmt(saw_stmt(&s.node)).hash(self.st); visit::walk_stmt(self, s)
-        }
-
-        fn visit_foreign_item(&mut self, i: &'a ForeignItem) {
-            // FIXME (#14132) ideally we would incorporate privacy (or
-            // perhaps reachability) somewhere here, so foreign items
-            // that do not leak into downstream crates would not be
-            // part of the ABI.
-            SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i)
-        }
-
-        fn visit_item(&mut self, i: &'a Item) {
-            // FIXME (#14132) ideally would incorporate reachability
-            // analysis somewhere here, so items that never leak into
-            // downstream crates (e.g. via monomorphisation or
-            // inlining) would not be part of the ABI.
-            SawItem.hash(self.st); visit::walk_item(self, i)
-        }
-
-        fn visit_mod(&mut self, m: &'a Mod, _s: Span, _n: NodeId) {
-            SawMod.hash(self.st); visit::walk_mod(self, m)
-        }
-
-        fn visit_decl(&mut self, d: &'a Decl) {
-            SawDecl.hash(self.st); visit::walk_decl(self, d)
-        }
-
-        fn visit_ty(&mut self, t: &'a Ty) {
-            SawTy.hash(self.st); visit::walk_ty(self, t)
-        }
-
-        fn visit_generics(&mut self, g: &'a Generics) {
-            SawGenerics.hash(self.st); visit::walk_generics(self, g)
-        }
-
-        fn visit_fn(&mut self, fk: FnKind<'a>, fd: &'a FnDecl,
-                    b: &'a Block, s: Span, _: NodeId) {
-            SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s)
-        }
-
-        fn visit_trait_item(&mut self, ti: &'a TraitItem) {
-            SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti)
-        }
-
-        fn visit_impl_item(&mut self, ii: &'a ImplItem) {
-            SawImplItem.hash(self.st); visit::walk_impl_item(self, ii)
-        }
-
-        fn visit_struct_field(&mut self, s: &'a StructField) {
-            SawStructField.hash(self.st); visit::walk_struct_field(self, s)
-        }
-
-        fn visit_explicit_self(&mut self, es: &'a ExplicitSelf) {
-            SawExplicitSelf.hash(self.st); visit::walk_explicit_self(self, es)
-        }
-
-        fn visit_path(&mut self, path: &'a Path, _: ast::NodeId) {
-            SawPath.hash(self.st); visit::walk_path(self, path)
-        }
-
-        fn visit_path_list_item(&mut self, prefix: &'a Path, item: &'a PathListItem) {
-            SawPath.hash(self.st); visit::walk_path_list_item(self, prefix, item)
-        }
-
-        fn visit_block(&mut self, b: &'a Block) {
-            SawBlock.hash(self.st); visit::walk_block(self, b)
-        }
-
-        fn visit_pat(&mut self, p: &'a Pat) {
-            SawPat.hash(self.st); visit::walk_pat(self, p)
-        }
-
-        fn visit_local(&mut self, l: &'a Local) {
-            SawLocal.hash(self.st); visit::walk_local(self, l)
-        }
-
-        fn visit_arm(&mut self, a: &'a Arm) {
-            SawArm.hash(self.st); visit::walk_arm(self, a)
-        }
-    }
-}
index e1242560e62c76b167568aaf797b7a45bd3d6500..2f0a043f9a7de907158e28a1bb7a617659b94ef2 100644 (file)
@@ -16,6 +16,7 @@ pub fn target() -> Target {
         llvm_target: "arm64-apple-ios".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
         target_os: "ios".to_string(),
         target_env: "".to_string(),
index c6901a4cc4270942b16a7a194241732389cac70e..c4212e702129cb93fe36c9dc8393722f90823855 100644 (file)
@@ -15,6 +15,7 @@ pub fn target() -> Target {
         llvm_target: "aarch64-linux-android".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
         target_os: "android".to_string(),
         target_env: "".to_string(),
index 51abab6609a86c52997e73b576723a21a4204833..3bf4e92fb6aab134e4a71fee62f1005cddd22878 100644 (file)
@@ -17,6 +17,7 @@ pub fn target() -> Target {
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
         target_env: "gnu".to_string(),
+        data_layout: "e-m:e-i64:64-i128:128-n32:64-S128".to_string(),
         arch: "aarch64".to_string(),
         target_os: "linux".to_string(),
         target_vendor: "unknown".to_string(),
index 732f1a353a8bd719c1f363e5fec3c02c9837765d..0a61b14763476f72f5389c8b300338a211fa5fb1 100644 (file)
@@ -18,6 +18,7 @@ pub fn target() -> Target {
         llvm_target: "arm-linux-androideabi".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
         target_os: "android".to_string(),
         target_env: "gnu".to_string(),
index 7c35b43fd4b7544ea442bbed0273daffb491af97..0cb0949d4622fa7066adc8f3820fdbac4c6bdf9d 100644 (file)
@@ -16,6 +16,7 @@ pub fn target() -> Target {
         llvm_target: "arm-unknown-linux-gnueabi".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnueabi".to_string(),
index a99ec45996c2e833c2b14505f4471581aa7b2532..05b9401a0635c99d1337c8092b6f45924bd2e1e3 100644 (file)
@@ -16,6 +16,7 @@ pub fn target() -> Target {
         llvm_target: "arm-unknown-linux-gnueabihf".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnueabihf".to_string(),
index d30648002912e6831cf8bd4ff36f7e8bc33e8b95..d131f8b2ef002b9e405ba7555e1b530f271058f4 100644 (file)
@@ -16,6 +16,7 @@ pub fn target() -> Target {
         llvm_target: "armv7-apple-ios".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
         arch: "arm".to_string(),
         target_os: "ios".to_string(),
         target_env: "".to_string(),
index d7dcd714a10b759c9b38564cb3b49580f4fe74e9..9c9bb72f76cfdecfd43c6d1536e7ecde98a3052e 100644 (file)
@@ -16,6 +16,7 @@ pub fn target() -> Target {
         llvm_target: "armv7-unknown-linux-gnueabihf".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
         arch: "arm".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnueabihf".to_string(),
index 66ec6efca0e6700a925f60a830b0631eab6cb549..d317589bf3680fc2075b8960daaef5d54eb76df0 100644 (file)
@@ -16,6 +16,7 @@ pub fn target() -> Target {
         llvm_target: "armv7s-apple-ios".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
         arch: "arm".to_string(),
         target_os: "ios".to_string(),
         target_env: "".to_string(),
index 4330e2e7b5fb41c764faaafb5953009126baf1ee..546f9df605b30f49ab82ccc56a6bc869d3eb27da 100644 (file)
@@ -31,6 +31,7 @@ pub fn target() -> Target {
         target_os: "emscripten".to_string(),
         target_env: "".to_string(),
         target_vendor: "unknown".to_string(),
+        data_layout: "e-p:32:32-i64:64-v128:32:128-n32-S128".to_string(),
         arch: "asmjs".to_string(),
         options: opts,
     }
index 52b5901192c657ae436d6bd9d46a2d86c1b9645d..d149d4bbdc2d6e51e045923703eeb975970303bf 100644 (file)
@@ -16,6 +16,7 @@ pub fn target() -> Target {
         llvm_target: "i386-apple-ios".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(),
         arch: "x86".to_string(),
         target_os: "ios".to_string(),
         target_env: "".to_string(),
diff --git a/src/librustc_back/target/i586_pc_windows_msvc.rs b/src/librustc_back/target/i586_pc_windows_msvc.rs
new file mode 100644 (file)
index 0000000..12bed37
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::Target;
+
+pub fn target() -> Target {
+    let mut base = super::i686_pc_windows_msvc::target();
+    base.options.cpu = "pentium".to_string();
+    base.llvm_target = "i586-pc-windows-msvc".to_string();
+    return base
+}
index 42d5674a2c86911a00c172b4361fbbed57d518a5..6eb64556381903de28a6a382e838bba0bca854c8 100644 (file)
 use target::Target;
 
 pub fn target() -> Target {
-    let mut base = super::linux_base::opts();
-    base.cpu = "pentium".to_string();
-    base.pre_link_args.push("-m32".to_string());
-
-    Target {
-        llvm_target: "i586-unknown-linux-gnu".to_string(),
-        target_endian: "little".to_string(),
-        target_pointer_width: "32".to_string(),
-        arch: "x86".to_string(),
-        target_os: "linux".to_string(),
-        target_env: "gnu".to_string(),
-        target_vendor: "unknown".to_string(),
-        options: base,
-    }
+    let mut base = super::i686_unknown_linux_gnu::target();
+    base.options.cpu = "pentium".to_string();
+    base.llvm_target = "i586-unknown-linux-gnu".to_string();
+    return base
 }
index 98f4654ecab41ab9390d71e2443ed6b8dcf2e6c7..b6e2f4d8e8ac9bf2f2b7f1d548e42e2f5dde27d1 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "i686-apple-darwin".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(),
         arch: "x86".to_string(),
         target_os: "macos".to_string(),
         target_env: "".to_string(),
index f548fdad3cbed9d81fd6f6a76007ea3d838ccccb..b338a971ff74cb730645e9fe54dff480ceee30b1 100644 (file)
@@ -18,6 +18,7 @@ pub fn target() -> Target {
         llvm_target: "i686-linux-android".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
         arch: "x86".to_string(),
         target_os: "android".to_string(),
         target_env: "gnu".to_string(),
index fa12bbd89323c78e2a56e758b48468e0a337747c..48203cc74d6a1df74673d1cf94c6ef61b4032129 100644 (file)
@@ -22,6 +22,7 @@ pub fn target() -> Target {
         llvm_target: "i686-pc-windows-gnu".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32".to_string(),
         arch: "x86".to_string(),
         target_os: "windows".to_string(),
         target_env: "gnu".to_string(),
index 7fe6590618941acfaa55182e7bfb484d64404be6..501219ad607cf6be9bf7533e19335bab1d442fbe 100644 (file)
@@ -27,6 +27,7 @@ pub fn target() -> Target {
         llvm_target: "i686-pc-windows-msvc".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32".to_string(),
         arch: "x86".to_string(),
         target_os: "windows".to_string(),
         target_env: "msvc".to_string(),
index 32a15b9f2d4d10c0ce3ab290a98ce1543197f17f..cdbbd5eafddbb7239c67cf3541d4e6fec797be2b 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "i686-unknown-dragonfly".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
         arch: "x86".to_string(),
         target_os: "dragonfly".to_string(),
         target_env: "".to_string(),
index 812ba11cd796b3ae39a7c8511c7f64f1aaaef2fc..fadedc24149c7cf73dacd5a692989d358c149afe 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "i686-unknown-freebsd".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
         arch: "x86".to_string(),
         target_os: "freebsd".to_string(),
         target_env: "".to_string(),
index ac2af0c64fd6ad2b1239860af8803b38f277022c..a1f3ab769070f99600bbbad2ee7c21126b753ecc 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "i686-unknown-linux-gnu".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
         arch: "x86".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnu".to_string(),
index 77bc7bb51755d6a6fe52775130f5c69cdfbe6706..cce023b843016474deb4999c3a1b2a0e08747bfc 100644 (file)
@@ -37,6 +37,7 @@ pub fn target() -> Target {
         llvm_target: "i686-unknown-linux-musl".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
         arch: "x86".to_string(),
         target_os: "linux".to_string(),
         target_env: "musl".to_string(),
index d37102e558d4a5e970e2eab698ac4031c8206c2e..472b73302a358e15dc75a35a821bd1b7650cecbf 100644 (file)
@@ -34,6 +34,7 @@ pub fn target() -> Target {
         target_os: "nacl".to_string(),
         target_env: "newlib".to_string(),
         target_vendor: "unknown".to_string(),
+        data_layout: "e-i64:64:64-p:32:32:32-v128:32:32".to_string(),
         arch: "le32".to_string(),
         options: opts,
     }
index 01f2de4a2694a0d535745c4956420789761ef751..863f5ceab0ae09c8a4e5146e230eccb58c578364 100644 (file)
@@ -15,6 +15,7 @@ pub fn target() -> Target {
         llvm_target: "mips-unknown-linux-gnu".to_string(),
         target_endian: "big".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnu".to_string(),
index 050fb91aa1903417c22746bbd04b196712bf13cb..ac0fde5449ff4eb889d55f883918e64d836b8417 100644 (file)
@@ -15,6 +15,7 @@ pub fn target() -> Target {
         llvm_target: "mips-unknown-linux-musl".to_string(),
         target_endian: "big".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
         target_os: "linux".to_string(),
         target_env: "musl".to_string(),
index e9eef72e8c39313cb774a956cbf72bbd547495a2..ff33effa3e79b708fe9db77025d0494d6fde351a 100644 (file)
@@ -15,6 +15,7 @@ pub fn target() -> Target {
         llvm_target: "mipsel-unknown-linux-gnu".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnu".to_string(),
index 383a0d891ca7c059f7bf5bbdce61e5d9b484db6a..d9fb14050365de27860e83759321d4417036f69d 100644 (file)
@@ -15,6 +15,7 @@ pub fn target() -> Target {
         llvm_target: "mipsel-unknown-linux-musl".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(),
         arch: "mips".to_string(),
         target_os: "linux".to_string(),
         target_env: "musl".to_string(),
index e0743c339ce9d152caaf4091a8e32227b9214dee..cdd1e4c799d4cff267bae79eac14ee832fc388fb 100644 (file)
 //! this module defines the format the JSON file should take, though each
 //! underscore in the field names should be replaced with a hyphen (`-`) in the
 //! JSON file. Some fields are required in every target specification, such as
-//! `data-layout`, `llvm-target`, `target-endian`, `target-pointer-width`, and
-//! `arch`. In general, options passed to rustc with `-C` override the target's
-//! settings, though `target-feature` and `link-args` will *add* to the list
-//! specified by the target, rather than replace.
+//! `llvm-target`, `target-endian`, `target-pointer-width`, `data-layout`,
+//! `arch`, and `os`. In general, options passed to rustc with `-C` override
+//! the target's settings, though `target-feature` and `link-args` will *add*
+//! to the list specified by the target, rather than replace.
 
 use serialize::json::Json;
 use std::default::Default;
@@ -76,7 +76,8 @@ macro_rules! supported_targets {
             if false { }
             $(
                 else if target == stringify!($module) {
-                    let t = $module::target();
+                    let mut t = $module::target();
+                    t.options.is_builtin = true;
                     debug!("Got builtin target: {:?}", t);
                     return Some(t);
                 }
@@ -136,6 +137,7 @@ supported_targets! {
 
     ("x86_64-pc-windows-msvc", x86_64_pc_windows_msvc),
     ("i686-pc-windows-msvc", i686_pc_windows_msvc),
+    ("i586-pc-windows-msvc", i586_pc_windows_msvc),
 
     ("le32-unknown-nacl", le32_unknown_nacl),
     ("asmjs-unknown-emscripten", asmjs_unknown_emscripten)
@@ -161,6 +163,8 @@ pub struct Target {
     /// Architecture to use for ABI considerations. Valid options: "x86",
     /// "x86_64", "arm", "aarch64", "mips", "powerpc", and "powerpc64".
     pub arch: String,
+    /// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM.
+    pub data_layout: String,
     /// Optional settings with defaults.
     pub options: TargetOptions,
 }
@@ -171,8 +175,9 @@ pub struct Target {
 /// these try to take "minimal defaults" that don't assume anything about the runtime they run in.
 #[derive(Clone, Debug)]
 pub struct TargetOptions {
-    /// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM.
-    pub data_layout: Option<String>,
+    /// Whether the target is built-in or loaded from a custom target specification.
+    pub is_builtin: bool,
+
     /// Linker to invoke. Defaults to "cc".
     pub linker: String,
     /// Archive utility to use when managing archives. Defaults to "ar".
@@ -293,7 +298,7 @@ impl Default for TargetOptions {
     /// incomplete, and if used for compilation, will certainly not work.
     fn default() -> TargetOptions {
         TargetOptions {
-            data_layout: None,
+            is_builtin: false,
             linker: option_env!("CFG_DEFAULT_LINKER").unwrap_or("cc").to_string(),
             ar: option_env!("CFG_DEFAULT_AR").unwrap_or("ar").to_string(),
             pre_link_args: Vec::new(),
@@ -378,6 +383,7 @@ impl Target {
             llvm_target: get_req_field("llvm-target"),
             target_endian: get_req_field("target-endian"),
             target_pointer_width: get_req_field("target-pointer-width"),
+            data_layout: get_req_field("data-layout"),
             arch: get_req_field("arch"),
             target_os: get_req_field("os"),
             target_env: get_opt_field("env", ""),
@@ -426,7 +432,6 @@ impl Target {
         key!(staticlib_prefix);
         key!(staticlib_suffix);
         key!(features);
-        key!(data_layout, optional);
         key!(dynamic_linking, bool);
         key!(executables, bool);
         key!(disable_redzone, bool);
@@ -463,11 +468,11 @@ impl Target {
         use serialize::json;
 
         fn load_file(path: &Path) -> Result<Target, String> {
-            let mut f = try!(File::open(path).map_err(|e| e.to_string()));
+            let mut f = File::open(path).map_err(|e| e.to_string())?;
             let mut contents = Vec::new();
-            try!(f.read_to_end(&mut contents).map_err(|e| e.to_string()));
-            let obj = try!(json::from_reader(&mut &contents[..])
-                                .map_err(|e| e.to_string()));
+            f.read_to_end(&mut contents).map_err(|e| e.to_string())?;
+            let obj = json::from_reader(&mut &contents[..])
+                           .map_err(|e| e.to_string())?;
             Ok(Target::from_json(obj))
         }
 
index 3ba0c671d2ef0d3fa7627f629a66c526ef3ce6ab..fe7daaec1cdebac8445e54865832263b6cb9bfad 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "powerpc64-unknown-linux-gnu".to_string(),
         target_endian: "big".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "E-m:e-i64:64-n32:64".to_string(),
         arch: "powerpc64".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnu".to_string(),
index f0fac14dae08a24a679539d2df6fe991c7f3087c..4aab2b1802c9871fef58a8b3ecfe1ded2aba8a80 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-n32:64".to_string(),
         arch: "powerpc64".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnu".to_string(),
index 6664abf5458b715253ba2177079de42e9bcd6358..1df36442c06884bf205311b2965fb619bcb72325 100644 (file)
@@ -18,6 +18,7 @@ pub fn target() -> Target {
         llvm_target: "powerpc-unknown-linux-gnu".to_string(),
         target_endian: "big".to_string(),
         target_pointer_width: "32".to_string(),
+        data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
         arch: "powerpc".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnu".to_string(),
index 3e19e1482909e2b85e317755c2b5f5e70e71f2f5..c8b5dd0eccc5cfe19741bc6eccbcf1d8f1c90a73 100644 (file)
@@ -20,6 +20,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-apple-darwin".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "macos".to_string(),
         target_env: "".to_string(),
index 63234c0baee8cf14055b360dee9a57b81d1e0afb..d038e88f2b4fd780fb7a1064c8e697e3823cfe13 100644 (file)
@@ -16,6 +16,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-apple-ios".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "ios".to_string(),
         target_env: "".to_string(),
index 3e8438539156f02f0909616abfe820be230e77b0..f0a09ae71efeb7c3b02d67666473b080479b76f4 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-pc-windows-gnu".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:w-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "windows".to_string(),
         target_env: "gnu".to_string(),
index 14ce27350519ce95c6f4e145f74a75d4f0c2a0ec..b3fbd6ef05170a04c5810fa841940c7c8aabd299 100644 (file)
@@ -18,6 +18,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-pc-windows-msvc".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:w-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "windows".to_string(),
         target_env: "msvc".to_string(),
index c97b434b9e0d1155321841208509b7a0365c3965..652159d10fda2edf6e2cdcfe894389ec99ca2803 100644 (file)
@@ -27,6 +27,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-rumprun-netbsd".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "netbsd".to_string(),
         target_env: "".to_string(),
index 541c1306b33409f2334d33b38482d93da7f9d9be..5aa08ea9c8ca9fa3a067727f062bca3b845395b8 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-pc-solaris".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "solaris".to_string(),
         target_env: "".to_string(),
index 04456b1b2714a495a786a7d3ac3270425cad6e7a..e8b95ed80d9e65d2f74c404db81b56ee0c0e8769 100644 (file)
@@ -18,6 +18,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-unknown-bitrig".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "bitrig".to_string(),
         target_env: "".to_string(),
index 62654176aa486738201c7b3b7fab3d0e62307d87..3fa46c31a5eb19ed618a6a613fefe8865e0e2e97 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-unknown-dragonfly".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "dragonfly".to_string(),
         target_env: "".to_string(),
index 888b7f58bffca9264418b961cfb6dcd5d56ab1c0..d345a32179423e691feb9c824cbc96b4a2400035 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-unknown-freebsd".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "freebsd".to_string(),
         target_env: "".to_string(),
index e3ccd9c4c7e7dd8ae7c9e68e316b64c3de6bdee9..69e333a135002239b2ee85c472dc4ff39033696e 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-unknown-linux-gnu".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "linux".to_string(),
         target_env: "gnu".to_string(),
index dafbb924a9ca5739b5a3e397bd40d6329700565d..622a1fe8baf14b242d5abf551c65c451aad219a9 100644 (file)
@@ -73,6 +73,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-unknown-linux-musl".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "linux".to_string(),
         target_env: "musl".to_string(),
index 4101fabe73480eb518ed67387a298a7592105a5e..74bf71891199a4da9d33748f31323233ce9ca100 100644 (file)
@@ -18,6 +18,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-unknown-netbsd".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "netbsd".to_string(),
         target_env: "".to_string(),
index 8c995113c217564fbb16c0a5c847ce9ca8d5cde6..521de5373d0b16267ad135153df159e7d62a1d75 100644 (file)
@@ -19,6 +19,7 @@ pub fn target() -> Target {
         llvm_target: "x86_64-unknown-openbsd".to_string(),
         target_endian: "little".to_string(),
         target_pointer_width: "64".to_string(),
+        data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
         arch: "x86_64".to_string(),
         target_os: "openbsd".to_string(),
         target_env: "".to_string(),
index 04739c7418cd0333e8516723de5f794a6c258df2..e3e89223f2d656f0295fef24a641fb3717a7b00b 100644 (file)
@@ -45,7 +45,7 @@ impl TempDir {
         let storage;
         let mut tmpdir = tmpdir;
         if !tmpdir.is_absolute() {
-            let cur_dir = try!(env::current_dir());
+            let cur_dir = env::current_dir()?;
             storage = cur_dir.join(tmpdir);
             tmpdir = &storage;
             // return TempDir::new_in(&cur_dir.join(tmpdir), prefix);
index f78f6fb86ae2213a07a04ad62de595d6624c32d5..6da87f97fb79fdcded43f2cd4c79dfd35450ecf3 100644 (file)
@@ -13,4 +13,4 @@ log = { path = "../liblog" }
 syntax = { path = "../libsyntax" }
 graphviz = { path = "../libgraphviz" }
 rustc = { path = "../librustc" }
-rustc_front = { path = "../librustc_front" }
+rustc_mir = { path = "../librustc_mir" }
diff --git a/src/librustc_borrowck/bitslice.rs b/src/librustc_borrowck/bitslice.rs
new file mode 100644 (file)
index 0000000..a4aa7ae
--- /dev/null
@@ -0,0 +1,105 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::mem;
+
+/// `BitSlice` provides helper methods for treating a `[usize]`
+/// as a bitvector.
+pub trait BitSlice {
+    fn clear_bit(&mut self, idx: usize) -> bool;
+    fn set_bit(&mut self, idx: usize) -> bool;
+    fn get_bit(&self, idx: usize) -> bool;
+}
+
+impl BitSlice for [usize] {
+    /// Clears bit at `idx` to 0; returns true iff this changed `self.`
+    fn clear_bit(&mut self, idx: usize) -> bool {
+        let words = self;
+        debug!("clear_bit: words={} idx={}",
+               bits_to_string(words, words.len() * mem::size_of::<usize>()), bit_str(idx));
+        let BitLookup { word, bit_in_word, bit_mask } = bit_lookup(idx);
+        debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask);
+        let oldv = words[word];
+        let newv = oldv & !bit_mask;
+        words[word] = newv;
+        oldv != newv
+    }
+
+    /// Sets bit at `idx` to 1; returns true iff this changed `self.`
+    fn set_bit(&mut self, idx: usize) -> bool {
+        let words = self;
+        debug!("set_bit: words={} idx={}",
+               bits_to_string(words, words.len() * mem::size_of::<usize>()), bit_str(idx));
+        let BitLookup { word, bit_in_word, bit_mask } = bit_lookup(idx);
+        debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask);
+        let oldv = words[word];
+        let newv = oldv | bit_mask;
+        words[word] = newv;
+        oldv != newv
+    }
+
+    /// Extracts value of bit at `idx` in `self`.
+    fn get_bit(&self, idx: usize) -> bool {
+        let words = self;
+        let BitLookup { word, bit_mask, .. } = bit_lookup(idx);
+        (words[word] & bit_mask) != 0
+    }
+}
+
+struct BitLookup {
+    /// An index of the word holding the bit in original `[usize]` of query.
+    word: usize,
+    /// Index of the particular bit within the word holding the bit.
+    bit_in_word: usize,
+    /// Word with single 1-bit set corresponding to where the bit is located.
+    bit_mask: usize,
+}
+
+#[inline]
+fn bit_lookup(bit: usize) -> BitLookup {
+    let usize_bits = mem::size_of::<usize>() * 8;
+    let word = bit / usize_bits;
+    let bit_in_word = bit % usize_bits;
+    let bit_mask = 1 << bit_in_word;
+    BitLookup { word: word, bit_in_word: bit_in_word, bit_mask: bit_mask }
+}
+
+
+fn bit_str(bit: usize) -> String {
+    let byte = bit >> 8;
+    let lobits = 1 << (bit & 0xFF);
+    format!("[{}:{}-{:02x}]", bit, byte, lobits)
+}
+
+pub fn bits_to_string(words: &[usize], bytes: usize) -> String {
+    let mut result = String::new();
+    let mut sep = '[';
+
+    // Note: this is a little endian printout of bytes.
+
+    let mut i = 0;
+    for &word in words.iter() {
+        let mut v = word;
+        for _ in 0..mem::size_of::<usize>() {
+            let byte = v & 0xFF;
+            if i >= bytes {
+                assert!(byte == 0);
+            } else {
+                result.push(sep);
+                result.push_str(&format!("{:02x}", byte));
+            }
+            v >>= 8;
+            i += 1;
+            sep = '-';
+        }
+    }
+    result.push(']');
+    return result
+}
index f19a8658a057b5e1fb0332175f3bac1ccef90604..d79ba213aca14f6763cc23fb79559df5efa928cd 100644 (file)
@@ -22,14 +22,15 @@ use borrowck::*;
 use borrowck::InteriorKind::{InteriorElement, InteriorField};
 use rustc::middle::expr_use_visitor as euv;
 use rustc::middle::expr_use_visitor::MutateMode;
-use rustc::middle::infer;
+use rustc::infer;
 use rustc::middle::mem_categorization as mc;
 use rustc::middle::mem_categorization::Categorization;
 use rustc::middle::region;
-use rustc::middle::ty;
+use rustc::ty::{self, TyCtxt};
+use rustc::traits::ProjectionMode;
 use syntax::ast;
 use syntax::codemap::Span;
-use rustc_front::hir;
+use rustc::hir;
 
 use std::rc::Rc;
 
@@ -202,7 +203,10 @@ pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
     debug!("check_loans(body id={})", body.id);
 
     let param_env = ty::ParameterEnvironment::for_item(bccx.tcx, fn_id);
-    let infcx = infer::new_infer_ctxt(bccx.tcx, &bccx.tcx.tables, Some(param_env));
+    let infcx = infer::new_infer_ctxt(bccx.tcx,
+                                      &bccx.tcx.tables,
+                                      Some(param_env),
+                                      ProjectionMode::AnyFinal);
 
     let mut clcx = CheckLoanCtxt {
         bccx: bccx,
@@ -231,7 +235,7 @@ fn compatible_borrow_kinds(borrow_kind1: ty::BorrowKind,
 }
 
 impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
-    pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx }
+    pub fn tcx(&self) -> &'a TyCtxt<'tcx> { self.bccx.tcx }
 
     pub fn each_issued_loan<F>(&self, node: ast::NodeId, mut op: F) -> bool where
         F: FnMut(&Loan<'tcx>) -> bool,
index c5e2b69683b10cc3676f4141c3186d37bb12d490..0b1c4efa9ac042ada249ff831c75487868bb1ec4 100644 (file)
@@ -20,14 +20,14 @@ use borrowck::LoanPathKind::{LpVar, LpUpvar, LpDowncast, LpExtend};
 use borrowck::LoanPathElem::{LpDeref, LpInterior};
 use borrowck::move_data::InvalidMovePathIndex;
 use borrowck::move_data::{MoveData, MovePathIndex};
-use rustc::middle::def_id::{DefId};
-use rustc::middle::ty;
+use rustc::hir::def_id::{DefId};
+use rustc::ty::{self, TyCtxt};
 use rustc::middle::mem_categorization as mc;
 
 use std::mem;
 use std::rc::Rc;
 use syntax::ast;
-use syntax::codemap::Span;
+use syntax::codemap::{Span, DUMMY_SP};
 use syntax::attr::AttrMetaMethods;
 
 #[derive(PartialEq, Eq, PartialOrd, Ord)]
@@ -200,7 +200,7 @@ impl FragmentSets {
 }
 
 pub fn instrument_move_fragments<'tcx>(this: &MoveData<'tcx>,
-                                       tcx: &ty::ctxt<'tcx>,
+                                       tcx: &TyCtxt<'tcx>,
                                        sp: Span,
                                        id: ast::NodeId) {
     let span_err = tcx.map.attrs(id).iter()
@@ -245,7 +245,7 @@ pub fn instrument_move_fragments<'tcx>(this: &MoveData<'tcx>,
 ///
 /// Note: "left-over fragments" means paths that were not directly referenced in moves nor
 /// assignments, but must nonetheless be tracked as potential drop obligations.
-pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) {
+pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &TyCtxt<'tcx>) {
 
     let mut fragments = this.fragments.borrow_mut();
 
@@ -347,7 +347,7 @@ pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) {
 /// example, if `lp` represents `s.x.j`, then adds moves paths for `s.x.i` and `s.x.k`, the
 /// siblings of `s.x.j`.
 fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>,
-                               tcx: &ty::ctxt<'tcx>,
+                               tcx: &TyCtxt<'tcx>,
                                gathered_fragments: &mut Vec<Fragment>,
                                lp: Rc<LoanPath<'tcx>>,
                                origin_id: Option<ast::NodeId>) {
@@ -406,7 +406,7 @@ fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>,
 /// We have determined that `origin_lp` destructures to LpExtend(parent, original_field_name).
 /// Based on this, add move paths for all of the siblings of `origin_lp`.
 fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
-                                             tcx: &ty::ctxt<'tcx>,
+                                             tcx: &TyCtxt<'tcx>,
                                              gathered_fragments: &mut Vec<Fragment>,
                                              parent_lp: &Rc<LoanPath<'tcx>>,
                                              mc: mc::MutabilityCategory,
@@ -428,8 +428,8 @@ fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
             let tuple_idx = match *origin_field_name {
                 mc::PositionalField(tuple_idx) => tuple_idx,
                 mc::NamedField(_) =>
-                    panic!("tuple type {:?} should not have named fields.",
-                           parent_ty),
+                    bug!("tuple type {:?} should not have named fields.",
+                         parent_ty),
             };
             let tuple_len = v.len();
             for i in 0..tuple_len {
@@ -493,10 +493,11 @@ fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
         }
 
         ref sty_and_variant_info => {
-            let msg = format!("type {:?} ({:?}) is not fragmentable",
-                              parent_ty, sty_and_variant_info);
             let opt_span = origin_id.and_then(|id|tcx.map.opt_span(id));
-            tcx.sess.opt_span_bug(opt_span, &msg[..])
+            span_bug!(opt_span.unwrap_or(DUMMY_SP),
+                      "type {:?} ({:?}) is not fragmentable",
+                      parent_ty,
+                      sty_and_variant_info);
         }
     }
 }
@@ -504,7 +505,7 @@ fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
 /// Adds the single sibling `LpExtend(parent, new_field_name)` of `origin_lp` (the original
 /// loan-path).
 fn add_fragment_sibling_core<'tcx>(this: &MoveData<'tcx>,
-                                   tcx: &ty::ctxt<'tcx>,
+                                   tcx: &TyCtxt<'tcx>,
                                    gathered_fragments: &mut Vec<Fragment>,
                                    parent: Rc<LoanPath<'tcx>>,
                                    mc: mc::MutabilityCategory,
index 4e507284a1b46bb130f00110d160835d9cd987ae..c85d69fa8a60d40ac8a1ab42c6c22772a834785d 100644 (file)
@@ -18,12 +18,12 @@ use rustc::middle::expr_use_visitor as euv;
 use rustc::middle::mem_categorization as mc;
 use rustc::middle::mem_categorization::Categorization;
 use rustc::middle::mem_categorization::InteriorOffsetKind as Kind;
-use rustc::middle::ty;
+use rustc::ty;
 
 use std::rc::Rc;
 use syntax::ast;
 use syntax::codemap::Span;
-use rustc_front::hir::{self, PatKind};
+use rustc::hir::{self, PatKind};
 
 struct GatherMoveInfo<'tcx> {
     id: ast::NodeId,
@@ -78,8 +78,8 @@ pub fn gather_match_variant<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
                 LpDowncast(ref base_lp, _) =>
                     move_data.add_variant_match(
                         tcx, lp.clone(), move_pat.id, base_lp.clone(), mode),
-                _ => panic!("should only call gather_match_variant \
-                             for cat_downcast cmt"),
+                _ => bug!("should only call gather_match_variant \
+                           for cat_downcast cmt"),
             }
         }
         None => {
index 84dce6d35702afce454775d061da3c44a71a8f77..c2492bba6247330c0cb6ca9d30a8477c7d596c71 100644 (file)
@@ -16,7 +16,7 @@ use rustc::middle::expr_use_visitor as euv;
 use rustc::middle::mem_categorization as mc;
 use rustc::middle::mem_categorization::Categorization;
 use rustc::middle::region;
-use rustc::middle::ty;
+use rustc::ty;
 
 use syntax::ast;
 use syntax::codemap::Span;
index 38157d04a5d6d605f475579eecf6479763a2d76d..893c27f0ede32ff903294a4c2abae04513d80ff1 100644 (file)
 use borrowck::*;
 use borrowck::move_data::MoveData;
 use rustc::middle::expr_use_visitor as euv;
-use rustc::middle::infer;
+use rustc::infer;
 use rustc::middle::mem_categorization as mc;
 use rustc::middle::mem_categorization::Categorization;
 use rustc::middle::region;
-use rustc::middle::ty;
+use rustc::ty::{self, TyCtxt};
+use rustc::traits::ProjectionMode;
 
 use syntax::ast;
 use syntax::codemap::Span;
 use syntax::ast::NodeId;
-use rustc_front::hir;
-use rustc_front::hir::Expr;
-use rustc_front::intravisit;
-use rustc_front::intravisit::Visitor;
+use rustc::hir;
+use rustc::hir::Expr;
+use rustc::hir::intravisit;
+use rustc::hir::intravisit::Visitor;
 
 use self::restrictions::RestrictionResult;
 
@@ -55,7 +56,10 @@ pub fn gather_loans_in_fn<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
     };
 
     let param_env = ty::ParameterEnvironment::for_item(bccx.tcx, fn_id);
-    let infcx = infer::new_infer_ctxt(bccx.tcx, &bccx.tcx.tables, Some(param_env));
+    let infcx = infer::new_infer_ctxt(bccx.tcx,
+                                      &bccx.tcx.tables,
+                                      Some(param_env),
+                                      ProjectionMode::AnyFinal);
     {
         let mut euv = euv::ExprUseVisitor::new(&mut glcx, &infcx);
         euv.walk_fn(decl, body);
@@ -253,7 +257,7 @@ fn check_mutability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
 }
 
 impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
-    pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx }
+    pub fn tcx(&self) -> &'a TyCtxt<'tcx> { self.bccx.tcx }
 
     /// Guarantees that `cmt` is assignable, or reports an error.
     fn guarantee_assignment_valid(&mut self,
@@ -374,10 +378,10 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
                     ty::ReEarlyBound(..) |
                     ty::ReVar(..) |
                     ty::ReSkolemized(..) => {
-                        self.tcx().sess.span_bug(
+                        span_bug!(
                             cmt.span,
-                            &format!("invalid borrow lifetime: {:?}",
-                                    loan_region));
+                            "invalid borrow lifetime: {:?}",
+                            loan_region);
                     }
                 };
                 debug!("loan_scope = {:?}", loan_scope);
@@ -525,7 +529,10 @@ struct StaticInitializerCtxt<'a, 'tcx: 'a> {
 impl<'a, 'tcx, 'v> Visitor<'v> for StaticInitializerCtxt<'a, 'tcx> {
     fn visit_expr(&mut self, ex: &Expr) {
         if let hir::ExprAddrOf(mutbl, ref base) = ex.node {
-            let infcx = infer::new_infer_ctxt(self.bccx.tcx, &self.bccx.tcx.tables, None);
+            let infcx = infer::new_infer_ctxt(self.bccx.tcx,
+                                              &self.bccx.tcx.tables,
+                                              None,
+                                              ProjectionMode::AnyFinal);
             let mc = mc::MemCategorizationContext::new(&infcx);
             let base_cmt = mc.cat_expr(&base).unwrap();
             let borrow_kind = ty::BorrowKind::from_mutbl(mutbl);
index 4cb9673785ecb36303ffd689cec92dd222c5ce2a..3d94f5b186f0ff3371b74f15905ebe733290d473 100644 (file)
@@ -12,11 +12,11 @@ use borrowck::BorrowckCtxt;
 use rustc::middle::mem_categorization as mc;
 use rustc::middle::mem_categorization::Categorization;
 use rustc::middle::mem_categorization::InteriorOffsetKind as Kind;
-use rustc::middle::ty;
+use rustc::ty;
 use syntax::ast;
 use syntax::codemap;
 use syntax::errors::DiagnosticBuilder;
-use rustc_front::hir;
+use rustc::hir;
 
 pub struct MoveErrorCollector<'tcx> {
     errors: Vec<MoveError<'tcx>>
@@ -134,8 +134,7 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
                                   a non-copy fixed-size array",
                                  b.ty)
             } else {
-                bccx.span_bug(move_from.span, "this path should not cause illegal move");
-                unreachable!();
+                span_bug!(move_from.span, "this path should not cause illegal move");
             }
         }
 
@@ -150,14 +149,12 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
                                      b.ty)
                 },
                 _ => {
-                    bccx.span_bug(move_from.span, "this path should not cause illegal move");
-                    unreachable!();
+                    span_bug!(move_from.span, "this path should not cause illegal move");
                 }
             }
         }
         _ => {
-            bccx.span_bug(move_from.span, "this path should not cause illegal move");
-            unreachable!();
+            span_bug!(move_from.span, "this path should not cause illegal move");
         }
     }
 }
index 2a0d8ef276648a30f34d57973a90595b3cd6c870..ee8c3aff2ec80545f6c229fe8cb043c3420f0856 100644 (file)
@@ -14,7 +14,7 @@ use borrowck::*;
 use rustc::middle::expr_use_visitor as euv;
 use rustc::middle::mem_categorization as mc;
 use rustc::middle::mem_categorization::Categorization;
-use rustc::middle::ty;
+use rustc::ty;
 use syntax::codemap::Span;
 
 use borrowck::ToInteriorKind;
diff --git a/src/librustc_borrowck/borrowck/mir/abs_domain.rs b/src/librustc_borrowck/borrowck/mir/abs_domain.rs
new file mode 100644 (file)
index 0000000..aa885eb
--- /dev/null
@@ -0,0 +1,62 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The move-analysis portion of borrowck needs to work in an abstract
+//! domain of lifted Lvalues.  Most of the Lvalue variants fall into a
+//! one-to-one mapping between the concrete and abstract (e.g. a
+//! field-deref on a local-variable, `x.field`, has the same meaning
+//! in both domains). Indexed-Projections are the exception: `a[x]`
+//! needs to be treated as mapping to the same move path as `a[y]` as
+//! well as `a[13]`, et cetera.
+//!
+//! (In theory the analysis could be extended to work with sets of
+//! paths, so that `a[0]` and `a[13]` could be kept distinct, while
+//! `a[x]` would still overlap them both. But that is not this
+//! representation does today.)
+
+use rustc::mir::repr::{Lvalue, LvalueElem};
+use rustc::mir::repr::{Operand, Projection, ProjectionElem};
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct AbstractOperand;
+pub type AbstractProjection<'tcx> =
+    Projection<'tcx, Lvalue<'tcx>, AbstractOperand>;
+pub type AbstractElem<'tcx> =
+    ProjectionElem<'tcx, AbstractOperand>;
+
+pub trait Lift {
+    type Abstract;
+    fn lift(&self) -> Self::Abstract;
+}
+impl<'tcx> Lift for Operand<'tcx> {
+    type Abstract = AbstractOperand;
+    fn lift(&self) -> Self::Abstract { AbstractOperand }
+}
+impl<'tcx> Lift for LvalueElem<'tcx> {
+    type Abstract = AbstractElem<'tcx>;
+    fn lift(&self) -> Self::Abstract {
+        match *self {
+            ProjectionElem::Deref =>
+                ProjectionElem::Deref,
+            ProjectionElem::Field(ref f, ty) =>
+                ProjectionElem::Field(f.clone(), ty.clone()),
+            ProjectionElem::Index(ref i) =>
+                ProjectionElem::Index(i.lift()),
+            ProjectionElem::ConstantIndex {offset,min_length,from_end} =>
+                ProjectionElem::ConstantIndex {
+                    offset: offset,
+                    min_length: min_length,
+                    from_end: from_end
+                },
+            ProjectionElem::Downcast(a, u) =>
+                ProjectionElem::Downcast(a.clone(), u.clone()),
+        }
+    }
+}
diff --git a/src/librustc_borrowck/borrowck/mir/dataflow.rs b/src/librustc_borrowck/borrowck/mir/dataflow.rs
new file mode 100644 (file)
index 0000000..a8176c0
--- /dev/null
@@ -0,0 +1,504 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use syntax::attr::AttrMetaMethods;
+
+use rustc::ty::TyCtxt;
+use rustc::mir::repr::{self, Mir};
+
+use std::io;
+use std::mem;
+use std::usize;
+
+use super::MirBorrowckCtxt;
+use super::gather_moves::{Location, MoveData, MovePathData, MovePathIndex, MoveOutIndex, PathMap};
+use super::graphviz;
+use bitslice::BitSlice; // adds set_bit/get_bit to &[usize] bitvector rep.
+
+pub trait Dataflow {
+    fn dataflow(&mut self);
+}
+
+impl<'b, 'a: 'b, 'tcx: 'a> Dataflow for MirBorrowckCtxt<'b, 'a, 'tcx> {
+    fn dataflow(&mut self) {
+        self.build_gen_and_kill_sets();
+        self.pre_dataflow_instrumentation().unwrap();
+        self.propagate();
+        self.post_dataflow_instrumentation().unwrap();
+    }
+}
+
+struct PropagationContext<'c, 'b: 'c, 'a: 'b, 'tcx: 'a, OnReturn>
+    where OnReturn: Fn(&MoveData, &mut [usize], &repr::Lvalue)
+{
+    mbcx: &'c mut MirBorrowckCtxt<'b, 'a, 'tcx>,
+    changed: bool,
+    on_return: OnReturn
+}
+
+impl<'b, 'a: 'b, 'tcx: 'a> MirBorrowckCtxt<'b, 'a, 'tcx> {
+    fn propagate(&mut self) {
+        let mut temp = vec![0; self.flow_state.sets.words_per_block];
+        let mut propcx = PropagationContext {
+            mbcx: &mut *self,
+            changed: true,
+            on_return: |move_data, in_out, dest_lval| {
+                let move_path_index = move_data.rev_lookup.find(dest_lval);
+                on_all_children_bits(in_out,
+                                     &move_data.path_map,
+                                     &move_data.move_paths,
+                                     move_path_index,
+                                     &|in_out, mpi| {
+                                         in_out.clear_bit(mpi.idx());
+                                     });
+            },
+        };
+        while propcx.changed {
+            propcx.changed = false;
+            propcx.reset(&mut temp);
+            propcx.walk_cfg(&mut temp);
+        }
+    }
+
+    fn build_gen_and_kill_sets(&mut self) {
+        // First we need to build the gen- and kill-sets. The
+        // gather_moves information provides a high-level mapping from
+        // mir-locations to the MoveOuts (and those correspond
+        // directly to gen-sets here). But we still need to figure out
+        // the kill-sets.
+
+        let move_data = &self.flow_state.operator;
+        let move_paths = &move_data.move_paths;
+        let loc_map = &move_data.loc_map;
+        let path_map = &move_data.path_map;
+        let rev_lookup = &move_data.rev_lookup;
+
+        for bb in self.mir.all_basic_blocks() {
+            let &repr::BasicBlockData { ref statements,
+                                        ref terminator,
+                                        is_cleanup: _ } =
+                self.mir.basic_block_data(bb);
+
+            let mut sets = self.flow_state.sets.for_block(bb.index());
+            for (j, stmt) in statements.iter().enumerate() {
+                let loc = Location { block: bb, index: j };
+                debug!("stmt {:?} at loc {:?} moves out of move_indexes {:?}",
+                       stmt, loc, &loc_map[loc]);
+                for move_index in &loc_map[loc] {
+                    // Every path deinitialized by a *particular move*
+                    // has corresponding bit, "gen'ed" (i.e. set)
+                    // here, in dataflow vector
+                    zero_to_one(&mut sets.gen_set, *move_index);
+                }
+                match stmt.kind {
+                    repr::StatementKind::Assign(ref lvalue, _) => {
+                        // assigning into this `lvalue` kills all
+                        // MoveOuts from it, and *also* all MoveOuts
+                        // for children and associated fragment sets.
+                        let move_path_index = rev_lookup.find(lvalue);
+
+                        on_all_children_bits(sets.kill_set,
+                                             path_map,
+                                             move_paths,
+                                             move_path_index,
+                                             &|kill_set, mpi| {
+                                                 kill_set.set_bit(mpi.idx());
+                                             });
+                    }
+                }
+            }
+
+            let loc = Location { block: bb, index: statements.len() };
+            debug!("terminator {:?} at loc {:?} moves out of move_indexes {:?}",
+                   terminator, loc, &loc_map[loc]);
+            for move_index in &loc_map[loc] {
+                zero_to_one(&mut sets.gen_set, *move_index);
+            }
+        }
+
+        fn zero_to_one(gen_set: &mut [usize], move_index: MoveOutIndex) {
+            let retval = gen_set.set_bit(move_index.idx());
+            assert!(retval);
+        }
+    }
+}
+
+fn on_all_children_bits<Each>(set: &mut [usize],
+                              path_map: &PathMap,
+                              move_paths: &MovePathData,
+                              move_path_index: MovePathIndex,
+                              each_child: &Each)
+    where Each: Fn(&mut [usize], MoveOutIndex)
+{
+    // 1. invoke `each_child` callback for all moves that directly
+    //    influence path for `move_path_index`
+    for move_index in &path_map[move_path_index] {
+        each_child(set, *move_index);
+    }
+
+    // 2. for each child of the path (that is named in this
+    //    function), recur.
+    //
+    // (Unnamed children are irrelevant to dataflow; by
+    // definition they have no associated moves.)
+    let mut next_child_index = move_paths[move_path_index].first_child;
+    while let Some(child_index) = next_child_index {
+        on_all_children_bits(set, path_map, move_paths, child_index, each_child);
+        next_child_index = move_paths[child_index].next_sibling;
+    }
+}
+
+impl<'c, 'b: 'c, 'a: 'b, 'tcx: 'a, OnReturn> PropagationContext<'c, 'b, 'a, 'tcx, OnReturn>
+    where OnReturn: Fn(&MoveData, &mut [usize], &repr::Lvalue)
+{
+    fn reset(&mut self, bits: &mut [usize]) {
+        let e = if self.mbcx.flow_state.operator.initial_value() {usize::MAX} else {0};
+        for b in bits {
+            *b = e;
+        }
+    }
+
+    fn walk_cfg(&mut self, in_out: &mut [usize]) {
+        let &mut MirBorrowckCtxt { ref mir, ref mut flow_state, .. } = self.mbcx;
+        for (idx, bb) in mir.basic_blocks.iter().enumerate() {
+            {
+                let sets = flow_state.sets.for_block(idx);
+                debug_assert!(in_out.len() == sets.on_entry.len());
+                in_out.clone_from_slice(sets.on_entry);
+                bitwise(in_out, sets.gen_set, &Union);
+                bitwise(in_out, sets.kill_set, &Subtract);
+            }
+            flow_state.propagate_bits_into_graph_successors_of(in_out,
+                                                               &mut self.changed,
+                                                               bb,
+                                                               &self.on_return);
+        }
+    }
+}
+
+impl<'b, 'a: 'b, 'tcx: 'a> MirBorrowckCtxt<'b, 'a, 'tcx> {
+    fn pre_dataflow_instrumentation(&self) -> io::Result<()> {
+        self.if_attr_meta_name_found(
+            "borrowck_graphviz_preflow",
+            |this, path: &str| {
+                graphviz::print_borrowck_graph_to(this, "preflow", path)
+            })
+    }
+
+    fn post_dataflow_instrumentation(&self) -> io::Result<()> {
+        self.if_attr_meta_name_found(
+            "borrowck_graphviz_postflow",
+            |this, path: &str| {
+                graphviz::print_borrowck_graph_to(this, "postflow", path)
+            })
+    }
+
+    fn if_attr_meta_name_found<F>(&self,
+                                  name: &str,
+                                  callback: F) -> io::Result<()>
+        where F: for <'aa, 'bb> FnOnce(&'aa Self, &'bb str) -> io::Result<()>
+    {
+        for attr in self.attributes {
+            if attr.check_name("rustc_mir") {
+                let items = attr.meta_item_list();
+                for item in items.iter().flat_map(|l| l.iter()) {
+                    if item.check_name(name) {
+                        if let Some(s) = item.value_str() {
+                            return callback(self, &s);
+                        } else {
+                            self.bcx.tcx.sess.span_err(
+                                item.span,
+                                &format!("{} attribute requires a path", item.name()));
+                        }
+                    }
+                }
+            }
+        }
+
+        Ok(())
+    }
+}
+
+/// Maps each block to a set of bits
+#[derive(Clone, Debug)]
+struct Bits {
+    bits: Vec<usize>,
+}
+
+impl Bits {
+    fn new(init_word: usize, num_words: usize) -> Self {
+        Bits { bits: vec![init_word; num_words] }
+    }
+}
+
+pub struct DataflowState<O: BitDenotation>
+{
+    /// All the sets for the analysis. (Factored into its
+    /// own structure so that we can borrow it mutably
+    /// on its own separate from other fields.)
+    pub sets: AllSets,
+
+    /// operator used to initialize, combine, and interpret bits.
+    operator: O,
+}
+
+pub struct AllSets {
+    /// Analysis bitwidth for each block.
+    bits_per_block: usize,
+
+    /// Number of words associated with each block entry
+    /// equal to bits_per_block / usize::BITS, rounded up.
+    words_per_block: usize,
+
+    /// For each block, bits generated by executing the statements in
+    /// the block. (For comparison, the Terminator for each block is
+    /// handled in a flow-specific manner during propagation.)
+    gen_sets: Bits,
+
+    /// For each block, bits killed by executing the statements in the
+    /// block. (For comparison, the Terminator for each block is
+    /// handled in a flow-specific manner during propagation.)
+    kill_sets: Bits,
+
+    /// For each block, bits valid on entry to the block.
+    on_entry_sets: Bits,
+}
+
+pub struct BlockSets<'a> {
+    on_entry: &'a mut [usize],
+    gen_set: &'a mut [usize],
+    kill_set: &'a mut [usize],
+}
+
+impl AllSets {
+    pub fn bits_per_block(&self) -> usize { self.bits_per_block }
+    pub fn bytes_per_block(&self) -> usize { (self.bits_per_block + 7) / 8 }
+    pub fn for_block(&mut self, block_idx: usize) -> BlockSets {
+        let offset = self.words_per_block * block_idx;
+        let range = offset..(offset + self.words_per_block);
+        BlockSets {
+            on_entry: &mut self.on_entry_sets.bits[range.clone()],
+            gen_set: &mut self.gen_sets.bits[range.clone()],
+            kill_set: &mut self.kill_sets.bits[range],
+        }
+    }
+
+    fn lookup_set_for<'a>(&self, sets: &'a Bits, block_idx: usize) -> &'a [usize] {
+        let offset = self.words_per_block * block_idx;
+        &sets.bits[offset..(offset + self.words_per_block)]
+    }
+    pub fn gen_set_for(&self, block_idx: usize) -> &[usize] {
+        self.lookup_set_for(&self.gen_sets, block_idx)
+    }
+    pub fn kill_set_for(&self, block_idx: usize) -> &[usize] {
+        self.lookup_set_for(&self.kill_sets, block_idx)
+    }
+    pub fn on_entry_set_for(&self, block_idx: usize) -> &[usize] {
+        self.lookup_set_for(&self.on_entry_sets, block_idx)
+    }
+}
+
+impl<O: BitDenotation> DataflowState<O> {
+    fn each_bit<F>(&self, words: &[usize], mut f: F)
+        where F: FnMut(usize) {
+        //! Helper for iterating over the bits in a bitvector.
+
+        for (word_index, &word) in words.iter().enumerate() {
+            if word != 0 {
+                let usize_bits: usize = mem::size_of::<usize>();
+                let base_index = word_index * usize_bits;
+                for offset in 0..usize_bits {
+                    let bit = 1 << offset;
+                    if (word & bit) != 0 {
+                        // NB: we round up the total number of bits
+                        // that we store in any given bit set so that
+                        // it is an even multiple of usize::BITS. This
+                        // means that there may be some stray bits at
+                        // the end that do not correspond to any
+                        // actual value; that's why we first check
+                        // that we are in range of bits_per_block.
+                        let bit_index = base_index + offset as usize;
+                        if bit_index >= self.sets.bits_per_block() {
+                            return;
+                        } else {
+                            f(bit_index);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    pub fn interpret_set(&self, words: &[usize]) -> Vec<&O::Bit> {
+        let mut v = Vec::new();
+        self.each_bit(words, |i| {
+            v.push(self.operator.interpret(i));
+        });
+        v
+    }
+}
+
+pub trait BitwiseOperator {
+    /// Joins two predecessor bits together, typically either `|` or `&`
+    fn join(&self, pred1: usize, pred2: usize) -> usize;
+}
+
+/// Parameterization for the precise form of data flow that is used.
+pub trait DataflowOperator : BitwiseOperator {
+    /// Specifies the initial value for each bit in the `on_entry` set
+    fn initial_value(&self) -> bool;
+}
+
+pub trait BitDenotation: DataflowOperator {
+    /// Specifies what is represented by each bit in the dataflow bitvector.
+    type Bit;
+    /// Size of each bivector allocated for each block in the analysis.
+    fn bits_per_block(&self) -> usize;
+    /// Provides the meaning of each entry in the dataflow bitvector.
+    /// (Mostly intended for use for better debug instrumentation.)
+    fn interpret(&self, idx: usize) -> &Self::Bit;
+}
+
+impl<D: BitDenotation> DataflowState<D> {
+    pub fn new(mir: &Mir, denotation: D) -> Self {
+        let bits_per_block = denotation.bits_per_block();
+        let usize_bits = mem::size_of::<usize>() * 8;
+        let words_per_block = (bits_per_block + usize_bits - 1) / usize_bits;
+        let num_blocks = mir.basic_blocks.len();
+        let num_words = num_blocks * words_per_block;
+
+        let entry = if denotation.initial_value() { usize::MAX } else {0};
+
+        let zeroes = Bits::new(0, num_words);
+        let on_entry = Bits::new(entry, num_words);
+
+        DataflowState {
+            sets: AllSets {
+                bits_per_block: bits_per_block,
+                words_per_block: words_per_block,
+                gen_sets: zeroes.clone(),
+                kill_sets: zeroes,
+                on_entry_sets: on_entry,
+            },
+            operator: denotation,
+        }
+    }
+}
+
+impl<D: BitDenotation> DataflowState<D> {
+    /// Propagates the bits of `in_out` into all the successors of `bb`,
+    /// using bitwise operator denoted by `self.operator`.
+    ///
+    /// For most blocks, this is entirely uniform. However, for blocks
+    /// that end with a call terminator, the effect of the call on the
+    /// dataflow state may depend on whether the call returned
+    /// successfully or unwound. To reflect this, the `on_return`
+    /// callback mutates `in_out` when propagating `in_out` via a call
+    /// terminator; such mutation is performed *last*, to ensure its
+    /// side-effects do not leak elsewhere (e.g. into unwind target).
+    fn propagate_bits_into_graph_successors_of<OnReturn>(
+        &mut self,
+        in_out: &mut [usize],
+        changed: &mut bool,
+        bb: &repr::BasicBlockData,
+        on_return: OnReturn) where OnReturn: Fn(&D, &mut [usize], &repr::Lvalue)
+    {
+        match bb.terminator().kind {
+            repr::TerminatorKind::Return |
+            repr::TerminatorKind::Resume => {}
+            repr::TerminatorKind::Goto { ref target } |
+            repr::TerminatorKind::Drop { ref target, value: _, unwind: None } => {
+                self.propagate_bits_into_entry_set_for(in_out, changed, target);
+            }
+            repr::TerminatorKind::Drop { ref target, value: _, unwind: Some(ref unwind) } => {
+                self.propagate_bits_into_entry_set_for(in_out, changed, target);
+                self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
+            }
+            repr::TerminatorKind::If { ref targets, .. } => {
+                self.propagate_bits_into_entry_set_for(in_out, changed, &targets.0);
+                self.propagate_bits_into_entry_set_for(in_out, changed, &targets.1);
+            }
+            repr::TerminatorKind::Switch { ref targets, .. } |
+            repr::TerminatorKind::SwitchInt { ref targets, .. } => {
+                for target in targets {
+                    self.propagate_bits_into_entry_set_for(in_out, changed, target);
+                }
+            }
+            repr::TerminatorKind::Call { ref cleanup, ref destination, func: _, args: _ } => {
+                if let Some(ref unwind) = *cleanup {
+                    self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
+                }
+                if let Some((ref dest_lval, ref dest_bb)) = *destination {
+                    // N.B.: This must be done *last*, after all other
+                    // propagation, as documented in comment above.
+                    on_return(&self.operator, in_out, dest_lval);
+                    self.propagate_bits_into_entry_set_for(in_out, changed, dest_bb);
+                }
+            }
+        }
+    }
+
+    fn propagate_bits_into_entry_set_for(&mut self,
+                                         in_out: &[usize],
+                                         changed: &mut bool,
+                                         bb: &repr::BasicBlock) {
+        let entry_set = self.sets.for_block(bb.index()).on_entry;
+        let set_changed = bitwise(entry_set, in_out, &self.operator);
+        if set_changed {
+            *changed = true;
+        }
+    }
+}
+
+
+impl<'tcx> DataflowState<MoveData<'tcx>> {
+    pub fn new_move_analysis(mir: &Mir<'tcx>, tcx: &TyCtxt<'tcx>) -> Self {
+        let move_data = MoveData::gather_moves(mir, tcx);
+        DataflowState::new(mir, move_data)
+    }
+}
+
+impl<'tcx> BitwiseOperator for MoveData<'tcx> {
+    #[inline]
+    fn join(&self, pred1: usize, pred2: usize) -> usize {
+        pred1 | pred2 // moves from both preds are in scope
+    }
+}
+
+impl<'tcx> DataflowOperator for MoveData<'tcx> {
+    #[inline]
+    fn initial_value(&self) -> bool {
+        false // no loans in scope by default
+    }
+}
+
+#[inline]
+fn bitwise<Op:BitwiseOperator>(out_vec: &mut [usize],
+                               in_vec: &[usize],
+                               op: &Op) -> bool {
+    assert_eq!(out_vec.len(), in_vec.len());
+    let mut changed = false;
+    for (out_elt, in_elt) in out_vec.iter_mut().zip(in_vec) {
+        let old_val = *out_elt;
+        let new_val = op.join(old_val, *in_elt);
+        *out_elt = new_val;
+        changed |= old_val != new_val;
+    }
+    changed
+}
+
+struct Union;
+impl BitwiseOperator for Union {
+    fn join(&self, a: usize, b: usize) -> usize { a | b }
+}
+struct Subtract;
+impl BitwiseOperator for Subtract {
+    fn join(&self, a: usize, b: usize) -> usize { a & !b }
+}
diff --git a/src/librustc_borrowck/borrowck/mir/gather_moves.rs b/src/librustc_borrowck/borrowck/mir/gather_moves.rs
new file mode 100644 (file)
index 0000000..2b1b743
--- /dev/null
@@ -0,0 +1,744 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+use rustc::ty::TyCtxt;
+use rustc::mir::repr::*;
+use rustc::util::nodemap::FnvHashMap;
+
+use std::cell::{Cell};
+use std::collections::hash_map::Entry;
+use std::fmt;
+use std::iter;
+use std::ops::Index;
+
+use super::dataflow::BitDenotation;
+use super::abs_domain::{AbstractElem, Lift};
+
+// This submodule holds some newtype'd Index wrappers that are using
+// NonZero to ensure that Option<Index> occupies only a single word.
+// They are in a submodule to impose privacy restrictions; namely, to
+// ensure that other code does not accidentally access `index.0`
+// (which is likely to yield a subtle off-by-one error).
+mod indexes {
+    use core::nonzero::NonZero;
+
+    macro_rules! new_index {
+        ($Index:ident) => {
+            #[derive(Copy, Clone, PartialEq, Eq, Debug)]
+            pub struct $Index(NonZero<usize>);
+
+            impl $Index {
+                pub fn new(idx: usize) -> Self {
+                    unsafe { $Index(NonZero::new(idx + 1)) }
+                }
+                pub fn idx(&self) -> usize {
+                    *self.0 - 1
+                }
+            }
+        }
+    }
+
+    /// Index into MovePathData.move_paths
+    new_index!(MovePathIndex);
+
+    /// Index into MoveData.moves.
+    new_index!(MoveOutIndex);
+}
+
+pub use self::indexes::MovePathIndex;
+pub use self::indexes::MoveOutIndex;
+
+/// `MovePath` is a canonicalized representation of a path that is
+/// moved or assigned to.
+///
+/// It follows a tree structure.
+///
+/// Given `struct X { m: M, n: N }` and `x: X`, moves like `drop x.m;`
+/// move *out* of the l-value `x.m`.
+///
+/// The MovePaths representing `x.m` and `x.n` are siblings (that is,
+/// one of them will link to the other via the `next_sibling` field,
+/// and the other will have no entry in its `next_sibling` field), and
+/// they both have the MovePath representing `x` as their parent.
+#[derive(Clone)]
+pub struct MovePath<'tcx> {
+    pub next_sibling: Option<MovePathIndex>,
+    pub first_child: Option<MovePathIndex>,
+    pub parent: Option<MovePathIndex>,
+    pub content: MovePathContent<'tcx>,
+}
+
+/// MovePaths usually represent a single l-value. The exceptions are
+/// forms that arise due to erroneous input code: static data holds
+/// l-values that we cannot actually move out of. Therefore we map
+/// statics to a special marker value (`MovePathContent::Static`)
+/// representing an invalid origin.
+#[derive(Clone, Debug)]
+pub enum MovePathContent<'tcx> {
+    Lvalue(Lvalue<'tcx>),
+    Static,
+}
+
+/// During construction of the MovePath's, we use PreMovePath to
+/// represent accumulated state while we are gathering up all the
+/// children of each path.
+#[derive(Clone)]
+struct PreMovePath<'tcx> {
+    pub next_sibling: Option<MovePathIndex>,
+    pub first_child: Cell<Option<MovePathIndex>>,
+    pub parent: Option<MovePathIndex>,
+    pub content: MovePathContent<'tcx>,
+}
+
+impl<'tcx> PreMovePath<'tcx> {
+    fn into_move_path(self) -> MovePath<'tcx> {
+        MovePath {
+            next_sibling: self.next_sibling,
+            parent: self.parent,
+            content: self.content,
+            first_child: self.first_child.get(),
+        }
+    }
+}
+
+impl<'tcx> fmt::Debug for MovePath<'tcx> {
+    fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
+        write!(w, "MovePath {{")?;
+        if let Some(parent) = self.parent {
+            write!(w, " parent: {:?},", parent)?;
+        }
+        if let Some(first_child) = self.first_child {
+            write!(w, " first_child: {:?},", first_child)?;
+        }
+        if let Some(next_sibling) = self.next_sibling {
+            write!(w, " next_sibling: {:?}", next_sibling)?;
+        }
+        write!(w, " content: {:?} }}", self.content)
+    }
+}
+
+pub struct MoveData<'tcx> {
+    pub move_paths: MovePathData<'tcx>,
+    pub moves: Vec<MoveOut>,
+    pub loc_map: LocMap,
+    pub path_map: PathMap,
+    pub rev_lookup: MovePathLookup<'tcx>,
+}
+
+pub struct LocMap {
+    /// Location-indexed (BasicBlock for outer index, index within BB
+    /// for inner index) map to list of MoveOutIndex's.
+    ///
+    /// Each Location `l` is mapped to the MoveOut's that are effects
+    /// of executing the code at `l`. (There can be multiple MoveOut's
+    /// for a given `l` because each MoveOut is associated with one
+    /// particular path being moved.)
+    map: Vec<Vec<Vec<MoveOutIndex>>>,
+}
+
+impl Index<Location> for LocMap {
+    type Output = [MoveOutIndex];
+    fn index(&self, index: Location) -> &Self::Output {
+        assert!(index.block.index() < self.map.len());
+        assert!(index.index < self.map[index.block.index()].len());
+        &self.map[index.block.index()][index.index]
+    }
+}
+
+pub struct PathMap {
+    /// Path-indexed map to list of MoveOutIndex's.
+    ///
+    /// Each Path `p` is mapped to the MoveOut's that move out of `p`.
+    map: Vec<Vec<MoveOutIndex>>,
+}
+
+impl Index<MovePathIndex> for PathMap {
+    type Output = [MoveOutIndex];
+    fn index(&self, index: MovePathIndex) -> &Self::Output {
+        &self.map[index.idx()]
+    }
+}
+
+/// `MoveOut` represents a point in a program that moves out of some
+/// L-value; i.e., "creates" uninitialized memory.
+///
+/// With respect to dataflow analysis:
+/// - Generated by moves and declaration of uninitialized variables.
+/// - Killed by assignments to the memory.
+#[derive(Copy, Clone)]
+pub struct MoveOut {
+    /// path being moved
+    pub path: MovePathIndex,
+    /// location of move
+    pub source: Location,
+}
+
+impl fmt::Debug for MoveOut {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "p{}@{:?}", self.path.idx(), self.source)
+    }
+}
+
+#[derive(Copy, Clone)]
+pub struct Location {
+    /// block where action is located
+    pub block: BasicBlock,
+    /// index within above block; statement when < statments.len) or
+    /// the terminator (when = statements.len).
+    pub index: usize,
+}
+
+impl fmt::Debug for Location {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "{:?}[{}]", self.block, self.index)
+    }
+}
+
+pub struct MovePathData<'tcx> {
+    move_paths: Vec<MovePath<'tcx>>,
+}
+
+impl<'tcx> Index<MovePathIndex> for MovePathData<'tcx> {
+    type Output = MovePath<'tcx>;
+    fn index(&self, i: MovePathIndex) -> &MovePath<'tcx> {
+        &self.move_paths[i.idx()]
+    }
+}
+
+/// MovePathInverseMap maps from a uint in an lvalue-category to the
+/// MovePathIndex for the MovePath for that lvalue.
+type MovePathInverseMap = Vec<Option<MovePathIndex>>;
+
+struct MovePathDataBuilder<'a, 'tcx: 'a> {
+    mir: &'a Mir<'tcx>,
+    pre_move_paths: Vec<PreMovePath<'tcx>>,
+    rev_lookup: MovePathLookup<'tcx>,
+}
+
+/// Tables mapping from an l-value to its MovePathIndex.
+pub struct MovePathLookup<'tcx> {
+    vars: MovePathInverseMap,
+    temps: MovePathInverseMap,
+    args: MovePathInverseMap,
+
+    /// The move path representing the return value is constructed
+    /// lazily when we first encounter it in the input MIR.
+    return_ptr: Option<MovePathIndex>,
+
+    /// A single move path (representing any static data referenced)
+    /// is constructed lazily when we first encounter statics in the
+    /// input MIR.
+    statics: Option<MovePathIndex>,
+
+    /// projections are made from a base-lvalue and a projection
+    /// elem. The base-lvalue will have a unique MovePathIndex; we use
+    /// the latter as the index into the outer vector (narrowing
+    /// subsequent search so that it is solely relative to that
+    /// base-lvalue). For the remaining lookup, we map the projection
+    /// elem to the associated MovePathIndex.
+    projections: Vec<FnvHashMap<AbstractElem<'tcx>, MovePathIndex>>,
+
+    /// Tracks the next index to allocate during construction of the
+    /// MovePathData. Unused after MovePathData is fully constructed.
+    next_index: MovePathIndex,
+}
+
+trait FillTo {
+    type T;
+    fn fill_to_with(&mut self, idx: usize, x: Self::T);
+    fn fill_to(&mut self, idx: usize) where Self::T: Default {
+        self.fill_to_with(idx, Default::default())
+    }
+}
+impl<T:Clone> FillTo for Vec<T> {
+    type T = T;
+    fn fill_to_with(&mut self, idx: usize, x: T) {
+        if idx >= self.len() {
+            let delta = idx + 1 - self.len();
+            assert_eq!(idx + 1, self.len() + delta);
+            self.extend(iter::repeat(x).take(delta))
+        }
+        debug_assert!(idx < self.len());
+    }
+}
+
+#[derive(Clone, Debug)]
+enum LookupKind { Generate, Reuse }
+struct Lookup<T>(LookupKind, T);
+
+impl Lookup<MovePathIndex> {
+    fn idx(&self) -> usize { (self.1).idx() }
+}
+
+impl<'tcx> MovePathLookup<'tcx> {
+    fn new() -> Self {
+        MovePathLookup {
+            vars: vec![],
+            temps: vec![],
+            args: vec![],
+            statics: None,
+            return_ptr: None,
+            projections: vec![],
+            next_index: MovePathIndex::new(0),
+        }
+    }
+
+    fn next_index(next: &mut MovePathIndex) -> MovePathIndex {
+        let i = *next;
+        *next = MovePathIndex::new(i.idx() + 1);
+        i
+    }
+
+    fn lookup_or_generate(vec: &mut Vec<Option<MovePathIndex>>,
+                          idx: u32,
+                          next_index: &mut MovePathIndex) -> Lookup<MovePathIndex> {
+        let idx = idx as usize;
+        vec.fill_to_with(idx, None);
+        let entry = &mut vec[idx];
+        match *entry {
+            None => {
+                let i = Self::next_index(next_index);
+                *entry = Some(i);
+                Lookup(LookupKind::Generate, i)
+            }
+            Some(entry_idx) => {
+                Lookup(LookupKind::Reuse, entry_idx)
+            }
+        }
+    }
+
+    fn lookup_var(&mut self, var_idx: u32) -> Lookup<MovePathIndex> {
+        Self::lookup_or_generate(&mut self.vars,
+                                 var_idx,
+                                 &mut self.next_index)
+    }
+
+    fn lookup_temp(&mut self, temp_idx: u32) -> Lookup<MovePathIndex> {
+        Self::lookup_or_generate(&mut self.temps,
+                                 temp_idx,
+                                 &mut self.next_index)
+    }
+
+    fn lookup_arg(&mut self, arg_idx: u32) -> Lookup<MovePathIndex> {
+        Self::lookup_or_generate(&mut self.args,
+                                 arg_idx,
+                                 &mut self.next_index)
+    }
+
+    fn lookup_static(&mut self) -> Lookup<MovePathIndex> {
+        match self.statics {
+            Some(mpi) => {
+                Lookup(LookupKind::Reuse, mpi)
+            }
+            ref mut ret @ None => {
+                let mpi = Self::next_index(&mut self.next_index);
+                *ret = Some(mpi);
+                Lookup(LookupKind::Generate, mpi)
+            }
+        }
+    }
+
+    fn lookup_return_pointer(&mut self) -> Lookup<MovePathIndex> {
+        match self.return_ptr {
+            Some(mpi) => {
+                Lookup(LookupKind::Reuse, mpi)
+            }
+            ref mut ret @ None => {
+                let mpi = Self::next_index(&mut self.next_index);
+                *ret = Some(mpi);
+                Lookup(LookupKind::Generate, mpi)
+            }
+        }
+    }
+
+    fn lookup_proj(&mut self,
+                   proj: &LvalueProjection<'tcx>,
+                   base: MovePathIndex) -> Lookup<MovePathIndex> {
+        let MovePathLookup { ref mut projections,
+                             ref mut next_index, .. } = *self;
+        projections.fill_to(base.idx());
+        match projections[base.idx()].entry(proj.elem.lift()) {
+            Entry::Occupied(ent) => {
+                Lookup(LookupKind::Reuse, *ent.get())
+            }
+            Entry::Vacant(ent) => {
+                let mpi = Self::next_index(next_index);
+                ent.insert(mpi);
+                Lookup(LookupKind::Generate, mpi)
+            }
+        }
+    }
+}
+
+impl<'tcx> MovePathLookup<'tcx> {
+    // Unlike the builder `fn move_path_for` below, this lookup
+    // alternative will *not* create a MovePath on the fly for an
+    // unknown l-value; it will simply panic.
+    pub fn find(&self, lval: &Lvalue<'tcx>) -> MovePathIndex {
+        match *lval {
+            Lvalue::Var(var_idx) => self.vars[var_idx as usize].unwrap(),
+            Lvalue::Temp(temp_idx) => self.temps[temp_idx as usize].unwrap(),
+            Lvalue::Arg(arg_idx) => self.args[arg_idx as usize].unwrap(),
+            Lvalue::Static(ref _def_id) => self.statics.unwrap(),
+            Lvalue::ReturnPointer => self.return_ptr.unwrap(),
+            Lvalue::Projection(ref proj) => {
+                let base_index = self.find(&proj.base);
+                self.projections[base_index.idx()][&proj.elem.lift()]
+            }
+        }
+    }
+}
+
+impl<'a, 'tcx> MovePathDataBuilder<'a, 'tcx> {
+    fn lookup(&mut self, lval: &Lvalue<'tcx>) -> Lookup<MovePathIndex> {
+        let proj = match *lval {
+            Lvalue::Var(var_idx) =>
+                return self.rev_lookup.lookup_var(var_idx),
+            Lvalue::Temp(temp_idx) =>
+                return self.rev_lookup.lookup_temp(temp_idx),
+            Lvalue::Arg(arg_idx) =>
+                return self.rev_lookup.lookup_arg(arg_idx),
+            Lvalue::Static(_def_id) =>
+                return self.rev_lookup.lookup_static(),
+            Lvalue::ReturnPointer =>
+                return self.rev_lookup.lookup_return_pointer(),
+            Lvalue::Projection(ref proj) => {
+                proj
+            }
+        };
+
+        let base_index = self.move_path_for(&proj.base);
+        self.rev_lookup.lookup_proj(proj, base_index)
+    }
+
+    fn move_path_for(&mut self, lval: &Lvalue<'tcx>) -> MovePathIndex {
+        let lookup: Lookup<MovePathIndex> = self.lookup(lval);
+
+        // `lookup` is either the previously assigned index or a
+        // newly-allocated one.
+        debug_assert!(lookup.idx() <= self.pre_move_paths.len());
+
+        if let Lookup(LookupKind::Generate, mpi) = lookup {
+            let parent;
+            let sibling;
+            // tracks whether content is Some non-static; statics map to None.
+            let content: Option<&Lvalue<'tcx>>;
+
+            match *lval {
+                Lvalue::Static(_) => {
+                    content = None;
+                    sibling = None;
+                    parent = None;
+                }
+
+                Lvalue::Var(_) | Lvalue::Temp(_) | Lvalue::Arg(_) |
+                Lvalue::ReturnPointer => {
+                    content = Some(lval);
+                    sibling = None;
+                    parent = None;
+                }
+                Lvalue::Projection(ref proj) => {
+                    content = Some(lval);
+
+                    // Here, install new MovePath as new first_child.
+
+                    // Note: `parent` previously allocated (Projection
+                    // case of match above established this).
+                    let idx = self.move_path_for(&proj.base);
+                    parent = Some(idx);
+
+                    let parent_move_path = &mut self.pre_move_paths[idx.idx()];
+
+                    // At last: Swap in the new first_child.
+                    sibling = parent_move_path.first_child.get();
+                    parent_move_path.first_child.set(Some(mpi));
+                }
+            };
+
+            let content = match content {
+                Some(lval) => MovePathContent::Lvalue(lval.clone()),
+                None => MovePathContent::Static,
+            };
+
+            let move_path = PreMovePath {
+                next_sibling: sibling,
+                parent: parent,
+                content: content,
+                first_child: Cell::new(None),
+            };
+
+            self.pre_move_paths.push(move_path);
+        }
+
+        return lookup.1;
+    }
+}
+
+impl<'tcx> MoveData<'tcx> {
+    pub fn gather_moves(mir: &Mir<'tcx>, tcx: &TyCtxt<'tcx>) -> Self {
+        gather_moves(mir, tcx)
+    }
+}
+
+#[derive(Debug)]
+enum StmtKind {
+    Use, Repeat, Cast, BinaryOp, UnaryOp, Box,
+    Aggregate, Drop, CallFn, CallArg, Return,
+}
+
+fn gather_moves<'tcx>(mir: &Mir<'tcx>, tcx: &TyCtxt<'tcx>) -> MoveData<'tcx> {
+    use self::StmtKind as SK;
+
+    let bbs = mir.all_basic_blocks();
+    let mut moves = Vec::with_capacity(bbs.len());
+    let mut loc_map: Vec<_> = iter::repeat(Vec::new()).take(bbs.len()).collect();
+    let mut path_map = Vec::new();
+
+    // this is mutable only because we will move it to and fro' the
+    // BlockContexts constructed on each iteration. (Moving is more
+    // straight-forward than mutable borrows in this instance.)
+    let mut builder = MovePathDataBuilder {
+        mir: mir,
+        pre_move_paths: Vec::new(),
+        rev_lookup: MovePathLookup::new(),
+    };
+
+    for bb in bbs {
+        let loc_map_bb = &mut loc_map[bb.index()];
+        let bb_data = mir.basic_block_data(bb);
+
+        debug_assert!(loc_map_bb.len() == 0);
+        let len = bb_data.statements.len();
+        loc_map_bb.fill_to(len);
+        debug_assert!(loc_map_bb.len() == len + 1);
+
+        let mut bb_ctxt = BlockContext {
+            tcx: tcx,
+            moves: &mut moves,
+            builder: builder,
+            path_map: &mut path_map,
+            loc_map_bb: loc_map_bb,
+        };
+
+        for (i, stmt) in bb_data.statements.iter().enumerate() {
+            let source = Location { block: bb, index: i };
+            match stmt.kind {
+                StatementKind::Assign(ref lval, ref rval) => {
+                    // ensure MovePath created for `lval`.
+                    bb_ctxt.builder.move_path_for(lval);
+
+                    match *rval {
+                        Rvalue::Use(ref operand) => {
+                            bb_ctxt.on_operand(SK::Use, operand, source)
+                        }
+                        Rvalue::Repeat(ref operand, ref _const) =>
+                            bb_ctxt.on_operand(SK::Repeat, operand, source),
+                        Rvalue::Cast(ref _kind, ref operand, ref _ty) =>
+                            bb_ctxt.on_operand(SK::Cast, operand, source),
+                        Rvalue::BinaryOp(ref _binop, ref operand1, ref operand2) => {
+                            bb_ctxt.on_operand(SK::BinaryOp, operand1, source);
+                            bb_ctxt.on_operand(SK::BinaryOp, operand2, source);
+                        }
+                        Rvalue::UnaryOp(ref _unop, ref operand) => {
+                            bb_ctxt.on_operand(SK::UnaryOp, operand, source);
+                        }
+                        Rvalue::Box(ref _ty) => {
+                            // this is creating uninitialized
+                            // memory that needs to be initialized.
+                            let deref_lval = Lvalue::Projection(Box::new(Projection {
+                                base: lval.clone(),
+                                elem: ProjectionElem::Deref,
+                            }));
+                            bb_ctxt.on_move_out_lval(SK::Box, &deref_lval, source);
+                        }
+                        Rvalue::Aggregate(ref _kind, ref operands) => {
+                            for operand in operands {
+                                bb_ctxt.on_operand(SK::Aggregate, operand, source);
+                            }
+                        }
+                        Rvalue::Ref(..) |
+                        Rvalue::Len(..) |
+                        Rvalue::InlineAsm { .. } => {}
+
+                        Rvalue::Slice {..} => {
+                            bug!("cannot move out of slice");
+                        }
+                    }
+                }
+            }
+        }
+
+        match bb_data.terminator().kind {
+            TerminatorKind::Goto { target: _ } | TerminatorKind::Resume => { }
+
+            TerminatorKind::Return => {
+                let source = Location { block: bb,
+                                        index: bb_data.statements.len() };
+                let lval = &Lvalue::ReturnPointer.deref();
+                bb_ctxt.on_move_out_lval(SK::Return, lval, source);
+            }
+
+            TerminatorKind::If { ref cond, targets: _ } => {
+                // The `cond` is always of (copyable) type `bool`,
+                // so there will never be anything to move.
+                let _ = cond;
+            }
+
+            TerminatorKind::SwitchInt { switch_ty: _, values: _, targets: _, ref discr } |
+            TerminatorKind::Switch { adt_def: _, targets: _, ref discr } => {
+                // The `discr` is not consumed; that is instead
+                // encoded on specific match arms (and for
+                // SwitchInt`, it is always a copyable integer
+                // type anyway).
+                let _ = discr;
+            }
+
+            TerminatorKind::Drop { value: ref lval, target: _, unwind: _ } => {
+                let source = Location { block: bb,
+                                        index: bb_data.statements.len() };
+                bb_ctxt.on_move_out_lval(SK::Drop, lval, source);
+            }
+
+            TerminatorKind::Call { ref func, ref args, ref destination, cleanup: _ } => {
+                let source = Location { block: bb,
+                                        index: bb_data.statements.len() };
+                bb_ctxt.on_operand(SK::CallFn, func, source);
+                for arg in args {
+                    bb_ctxt.on_operand(SK::CallArg, arg, source);
+                }
+                if let Some((ref destination, _bb)) = *destination {
+                    // Create MovePath for `destination`, then
+                    // discard returned index.
+                    bb_ctxt.builder.move_path_for(destination);
+                }
+            }
+        }
+
+        builder = bb_ctxt.builder;
+    }
+
+    // At this point, we may have created some MovePaths that do not
+    // have corresponding entries in the path map.
+    //
+    // (For example, creating the path `a.b.c` may, as a side-effect,
+    // create a path for the parent path `a.b`.)
+    //
+    // All such paths were not referenced ...
+    //
+    // well you know, lets actually try just asserting that the path map *is* complete.
+    assert_eq!(path_map.len(), builder.pre_move_paths.len());
+    path_map.fill_to(builder.pre_move_paths.len() - 1);
+
+    let pre_move_paths = builder.pre_move_paths;
+    let move_paths: Vec<_> = pre_move_paths.into_iter()
+        .map(|p| p.into_move_path())
+        .collect();
+
+    debug!("{}", {
+        let mut seen: Vec<_> = move_paths.iter().map(|_| false).collect();
+        for (j, &MoveOut { ref path, ref source }) in moves.iter().enumerate() {
+            debug!("MovePathData moves[{}]: MoveOut {{ path: {:?} = {:?}, source: {:?} }}",
+                   j, path, move_paths[path.idx()], source);
+            seen[path.idx()] = true;
+        }
+        for (j, path) in move_paths.iter().enumerate() {
+            if !seen[j] {
+                debug!("MovePathData move_paths[{}]: {:?}", j, path);
+            }
+        }
+        "done dumping MovePathData"
+    });
+
+    MoveData {
+        move_paths: MovePathData { move_paths: move_paths, },
+        moves: moves,
+        loc_map: LocMap { map: loc_map },
+        path_map: PathMap { map: path_map },
+        rev_lookup: builder.rev_lookup,
+    }
+}
+
+struct BlockContext<'b, 'a: 'b, 'tcx: 'a> {
+    tcx: &'b TyCtxt<'tcx>,
+    moves: &'b mut Vec<MoveOut>,
+    builder: MovePathDataBuilder<'a, 'tcx>,
+    path_map: &'b mut Vec<Vec<MoveOutIndex>>,
+    loc_map_bb: &'b mut Vec<Vec<MoveOutIndex>>,
+}
+
+impl<'b, 'a: 'b, 'tcx: 'a> BlockContext<'b, 'a, 'tcx> {
+    fn on_move_out_lval(&mut self,
+                        stmt_kind: StmtKind,
+                        lval: &Lvalue<'tcx>,
+                        source: Location) {
+        let tcx = self.tcx;
+        let lval_ty = self.builder.mir.lvalue_ty(tcx, lval);
+
+        // FIXME: does lvalue_ty ever return TyError, or is it
+        // guaranteed to always return non-Infer/non-Error values?
+
+        // This code is just trying to avoid creating a MoveOut
+        // entry for values that do not need move semantics.
+        //
+        // type_contents is imprecise (may claim needs drop for
+        // types that in fact have no destructor). But that is
+        // still usable for our purposes here.
+        let consumed = lval_ty.to_ty(tcx).type_contents(tcx).needs_drop(tcx);
+
+        if !consumed {
+            debug!("ctxt: {:?} no consume of lval: {:?} of type {:?}",
+                   stmt_kind, lval, lval_ty);
+            return;
+        }
+        let i = source.index;
+        let index = MoveOutIndex::new(self.moves.len());
+
+        let path = self.builder.move_path_for(lval);
+        self.moves.push(MoveOut { path: path, source: source.clone() });
+        self.path_map.fill_to(path.idx());
+
+        debug!("ctxt: {:?} add consume of lval: {:?} \
+                at index: {:?} \
+                to path_map for path: {:?} and \
+                to loc_map for loc: {:?}",
+               stmt_kind, lval, index, path, source);
+
+        debug_assert!(path.idx() < self.path_map.len());
+        // this is actually a questionable assert; at the very
+        // least, incorrect input code can probably cause it to
+        // fire.
+        assert!(self.path_map[path.idx()].iter().find(|idx| **idx == index).is_none());
+        self.path_map[path.idx()].push(index);
+
+        debug_assert!(i < self.loc_map_bb.len());
+        debug_assert!(self.loc_map_bb[i].iter().find(|idx| **idx == index).is_none());
+        self.loc_map_bb[i].push(index);
+    }
+
+    fn on_operand(&mut self, stmt_kind: StmtKind, operand: &Operand<'tcx>, source: Location) {
+        match *operand {
+            Operand::Constant(..) => {} // not-a-move
+            Operand::Consume(ref lval) => { // a move
+                self.on_move_out_lval(stmt_kind, lval, source);
+            }
+        }
+    }
+}
+
+impl<'tcx> BitDenotation for MoveData<'tcx>{
+    type Bit = MoveOut;
+    fn bits_per_block(&self) -> usize {
+        self.moves.len()
+    }
+    fn interpret(&self, idx: usize) -> &Self::Bit {
+        &self.moves[idx]
+    }
+}
diff --git a/src/librustc_borrowck/borrowck/mir/graphviz.rs b/src/librustc_borrowck/borrowck/mir/graphviz.rs
new file mode 100644 (file)
index 0000000..460c71d
--- /dev/null
@@ -0,0 +1,232 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Hook into libgraphviz for rendering dataflow graphs for MIR.
+
+use rustc::mir::repr::{BasicBlock, Mir};
+
+use dot;
+use dot::IntoCow;
+
+use std::fs::File;
+use std::io;
+use std::io::prelude::*;
+
+use super::MirBorrowckCtxt;
+use bitslice::bits_to_string;
+use super::gather_moves::MoveOut;
+
+struct Graph<'c, 'b:'c, 'a:'b, 'tcx:'a> { mbcx: &'c MirBorrowckCtxt<'b, 'a, 'tcx>,
+                                          context: &'b str }
+
+pub fn print_borrowck_graph_to(mbcx: &MirBorrowckCtxt,
+                               context: &str,
+                               path: &str) -> io::Result<()> {
+    let g = Graph { mbcx: mbcx, context: context };
+    let mut v = Vec::new();
+    dot::render(&g, &mut v)?;
+    println!("print_borrowck_graph_to path: {} context: {} node_id: {}",
+             path, context, mbcx.node_id);
+    File::create(path).and_then(|mut f| f.write_all(&v))
+}
+
+pub type Node = BasicBlock;
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct Edge { source: BasicBlock, index: usize }
+
+fn outgoing(mir: &Mir, bb: BasicBlock) -> Vec<Edge> {
+    let succ_len = mir.basic_block_data(bb).terminator().successors().len();
+    (0..succ_len).map(|index| Edge { source: bb, index: index}).collect()
+}
+
+impl<'c, 'b:'c, 'a:'b, 'tcx:'a> dot::Labeller<'c> for Graph<'c,'b,'a,'tcx> {
+    type Node = Node;
+    type Edge = Edge;
+    fn graph_id(&self) -> dot::Id {
+        dot::Id::new(format!("graph_for_node_{}_{}",
+                             self.mbcx.node_id,
+                             self.context))
+            .unwrap()
+    }
+
+    fn node_id(&self, n: &Node) -> dot::Id {
+        dot::Id::new(format!("bb_{}", n.index()))
+            .unwrap()
+    }
+
+    fn node_label(&self, n: &Node) -> dot::LabelText {
+        // A standard MIR label, as generated by write_node_label, is
+        // presented in a single column in a table.
+        //
+        // The code below does a bunch of formatting work to format a
+        // node (i.e. MIR basic-block) label with extra
+        // dataflow-enriched information.  In particular, the goal is
+        // to add extra columns that present the three dataflow
+        // bitvectors, and the data those bitvectors represent.
+        //
+        // It presents it in the following format (where I am
+        // presenting the table rendering via ASCII art, one line per
+        // row of the table, and a chunk size of 3 rather than 5):
+        //
+        // ------  -----------------------  ------------  --------------------
+        //                    [e1, e3, e4]
+        //             [e8, e9] "= ENTRY:"  <ENTRY-BITS>
+        // ------  -----------------------  ------------  --------------------
+        // Left
+        // Most
+        // Column
+        // Is
+        // Just
+        // Normal
+        // Series
+        // Of
+        // MIR
+        // Stmts
+        // ------  -----------------------  ------------  --------------------
+        //           [g1, g4, g5] "= GEN:"  <GEN-BITS>
+        // ------  -----------------------  ------------  --------------------
+        //                         "KILL:"  <KILL-BITS>   "=" [k1, k3, k8]
+        //                                                [k9]
+        // ------  -----------------------  ------------  --------------------
+        //
+        // (In addition, the added dataflow is rendered with a colored
+        // background just so it will stand out compared to the
+        // statements.)
+        let mut v = Vec::new();
+        let i = n.index();
+        let chunk_size = 5;
+        const BG_FLOWCONTENT: &'static str = r#"bgcolor="pink""#;
+        const ALIGN_RIGHT: &'static str = r#"align="right""#;
+        const FACE_MONOSPACE: &'static str = r#"FACE="Courier""#;
+        fn chunked_present_left<W:io::Write>(w: &mut W,
+                                             interpreted: &[&MoveOut],
+                                             chunk_size: usize)
+                                             -> io::Result<()>
+        {
+            // This function may emit a sequence of <tr>'s, but it
+            // always finishes with an (unfinished)
+            // <tr><td></td><td>
+            //
+            // Thus, after being called, one should finish both the
+            // pending <td> as well as the <tr> itself.
+            let mut seen_one = false;
+            for c in interpreted.chunks(chunk_size) {
+                if seen_one {
+                    // if not the first row, finish off the previous row
+                    write!(w, "</td><td></td><td></td></tr>")?;
+                }
+                write!(w, "<tr><td></td><td {bg} {align}>{objs:?}",
+                       bg = BG_FLOWCONTENT,
+                       align = ALIGN_RIGHT,
+                       objs = c)?;
+                seen_one = true;
+            }
+            if !seen_one {
+                write!(w, "<tr><td></td><td {bg} {align}>[]",
+                       bg = BG_FLOWCONTENT,
+                       align = ALIGN_RIGHT)?;
+            }
+            Ok(())
+        }
+        ::rustc_mir::graphviz::write_node_label(
+            *n, self.mbcx.mir, &mut v, 4,
+            |w| {
+                let flow = &self.mbcx.flow_state;
+                let entry = flow.interpret_set(flow.sets.on_entry_set_for(i));
+                chunked_present_left(w, &entry[..], chunk_size)?;
+                write!(w, "= ENTRY:</td><td {bg}><FONT {face}>{entrybits:?}</FONT></td>\
+                                        <td></td></tr>",
+                       bg = BG_FLOWCONTENT,
+                       face = FACE_MONOSPACE,
+                       entrybits=bits_to_string(flow.sets.on_entry_set_for(i),
+                                                flow.sets.bytes_per_block()))
+            },
+            |w| {
+                let flow = &self.mbcx.flow_state;
+                let gen = flow.interpret_set( flow.sets.gen_set_for(i));
+                let kill = flow.interpret_set(flow.sets.kill_set_for(i));
+                chunked_present_left(w, &gen[..], chunk_size)?;
+                write!(w, " = GEN:</td><td {bg}><FONT {face}>{genbits:?}</FONT></td>\
+                                       <td></td></tr>",
+                       bg = BG_FLOWCONTENT,
+                       face = FACE_MONOSPACE,
+                       genbits=bits_to_string( flow.sets.gen_set_for(i),
+                                               flow.sets.bytes_per_block()))?;
+                write!(w, "<tr><td></td><td {bg} {align}>KILL:</td>\
+                                        <td {bg}><FONT {face}>{killbits:?}</FONT></td>",
+                       bg = BG_FLOWCONTENT,
+                       align = ALIGN_RIGHT,
+                       face = FACE_MONOSPACE,
+                       killbits=bits_to_string(flow.sets.kill_set_for(i),
+                                               flow.sets.bytes_per_block()))?;
+
+                // (chunked_present_right)
+                let mut seen_one = false;
+                for k in kill.chunks(chunk_size) {
+                    if !seen_one {
+                        // continuation of row; this is fourth <td>
+                        write!(w, "<td {bg}>= {kill:?}</td></tr>",
+                               bg = BG_FLOWCONTENT,
+                               kill=k)?;
+                    } else {
+                        // new row, with indent of three <td>'s
+                        write!(w, "<tr><td></td><td></td><td></td><td {bg}>{kill:?}</td></tr>",
+                               bg = BG_FLOWCONTENT,
+                               kill=k)?;
+                    }
+                    seen_one = true;
+                }
+                if !seen_one {
+                    write!(w, "<td {bg}>= []</td></tr>",
+                           bg = BG_FLOWCONTENT)?;
+                }
+
+                Ok(())
+            })
+            .unwrap();
+        dot::LabelText::html(String::from_utf8(v).unwrap())
+    }
+
+    fn node_shape(&self, _n: &Node) -> Option<dot::LabelText> {
+        Some(dot::LabelText::label("none"))
+    }
+}
+
+impl<'c, 'b:'c, 'a:'b, 'tcx:'a> dot::GraphWalk<'c> for Graph<'c,'b,'a,'tcx> {
+    type Node = Node;
+    type Edge = Edge;
+    fn nodes(&self) -> dot::Nodes<Node> {
+        self.mbcx.mir.all_basic_blocks().into_cow()
+    }
+
+    fn edges(&self) -> dot::Edges<Edge> {
+        let mir = self.mbcx.mir;
+        let blocks = self.mbcx.mir.all_basic_blocks();
+        // base initial capacity on assumption every block has at
+        // least one outgoing edge (Which should be true for all
+        // blocks but one, the exit-block).
+        let mut edges = Vec::with_capacity(blocks.len());
+        for bb in blocks {
+            let outgoing = outgoing(mir, bb);
+            edges.extend(outgoing.into_iter());
+        }
+        edges.into_cow()
+    }
+
+    fn source(&self, edge: &Edge) -> Node {
+        edge.source
+    }
+
+    fn target(&self, edge: &Edge) -> Node {
+        let mir = self.mbcx.mir;
+        mir.basic_block_data(edge.source).terminator().successors()[edge.index]
+    }
+}
diff --git a/src/librustc_borrowck/borrowck/mir/mod.rs b/src/librustc_borrowck/borrowck/mir/mod.rs
new file mode 100644 (file)
index 0000000..672faea
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use borrowck::BorrowckCtxt;
+
+use syntax::ast;
+use syntax::codemap::Span;
+
+use rustc::hir;
+use rustc::hir::intravisit::{FnKind};
+
+use rustc::mir::repr::{BasicBlock, BasicBlockData, Mir, Statement, Terminator};
+
+mod abs_domain;
+mod dataflow;
+mod gather_moves;
+mod graphviz;
+
+use self::dataflow::{Dataflow, DataflowState};
+use self::gather_moves::{MoveData};
+
+pub fn borrowck_mir<'b, 'a: 'b, 'tcx: 'a>(
+    bcx: &'b mut BorrowckCtxt<'a, 'tcx>,
+    fk: FnKind,
+    _decl: &hir::FnDecl,
+    mir: &'a Mir<'tcx>,
+    body: &hir::Block,
+    _sp: Span,
+    id: ast::NodeId,
+    attributes: &[ast::Attribute]) {
+    match fk {
+        FnKind::ItemFn(name, _, _, _, _, _, _) |
+        FnKind::Method(name, _, _, _) => {
+            debug!("borrowck_mir({}) UNIMPLEMENTED", name);
+        }
+        FnKind::Closure(_) => {
+            debug!("borrowck_mir closure (body.id={}) UNIMPLEMENTED", body.id);
+        }
+    }
+
+    let mut mbcx = MirBorrowckCtxt {
+        bcx: bcx,
+        mir: mir,
+        node_id: id,
+        attributes: attributes,
+        flow_state: DataflowState::new_move_analysis(mir, bcx.tcx),
+    };
+
+    for bb in mir.all_basic_blocks() {
+        mbcx.process_basic_block(bb);
+    }
+
+    mbcx.dataflow();
+
+    debug!("borrowck_mir done");
+}
+
+pub struct MirBorrowckCtxt<'b, 'a: 'b, 'tcx: 'a> {
+    bcx: &'b mut BorrowckCtxt<'a, 'tcx>,
+    mir: &'b Mir<'tcx>,
+    node_id: ast::NodeId,
+    attributes: &'b [ast::Attribute],
+    flow_state: DataflowState<MoveData<'tcx>>,
+}
+
+impl<'b, 'a: 'b, 'tcx: 'a> MirBorrowckCtxt<'b, 'a, 'tcx> {
+    fn process_basic_block(&mut self, bb: BasicBlock) {
+        let &BasicBlockData { ref statements, ref terminator, is_cleanup: _ } =
+            self.mir.basic_block_data(bb);
+        for stmt in statements {
+            self.process_statement(bb, stmt);
+        }
+
+        self.process_terminator(bb, terminator);
+    }
+
+    fn process_statement(&mut self, bb: BasicBlock, stmt: &Statement<'tcx>) {
+        debug!("MirBorrowckCtxt::process_statement({:?}, {:?}", bb, stmt);
+    }
+
+    fn process_terminator(&mut self, bb: BasicBlock, term: &Option<Terminator<'tcx>>) {
+        debug!("MirBorrowckCtxt::process_terminator({:?}, {:?})", bb, term);
+    }
+}
index 1fa36a98ec5d975f42d211024aca0ceded036cf2..d7c928b8d6e1fe962525ae1ad595894b995bb8c5 100644 (file)
@@ -21,33 +21,35 @@ pub use self::MovedValueUseKind::*;
 use self::InteriorKind::*;
 
 use rustc::dep_graph::DepNode;
-use rustc::front::map as hir_map;
-use rustc::front::map::blocks::FnParts;
-use rustc::middle::cfg;
+use rustc::hir::map as hir_map;
+use rustc::hir::map::blocks::FnParts;
+use rustc::cfg;
 use rustc::middle::dataflow::DataFlowContext;
 use rustc::middle::dataflow::BitwiseOperator;
 use rustc::middle::dataflow::DataFlowOperator;
 use rustc::middle::dataflow::KillFrom;
-use rustc::middle::def_id::DefId;
+use rustc::hir::def_id::DefId;
 use rustc::middle::expr_use_visitor as euv;
 use rustc::middle::free_region::FreeRegionMap;
 use rustc::middle::mem_categorization as mc;
 use rustc::middle::mem_categorization::Categorization;
 use rustc::middle::region;
-use rustc::middle::ty::{self, Ty};
+use rustc::ty::{self, Ty, TyCtxt};
 
 use std::fmt;
 use std::mem;
 use std::rc::Rc;
 use syntax::ast;
+use syntax::attr::AttrMetaMethods;
 use syntax::codemap::Span;
 use syntax::errors::DiagnosticBuilder;
 
-use rustc_front::hir;
-use rustc_front::hir::{FnDecl, Block};
-use rustc_front::intravisit;
-use rustc_front::intravisit::{Visitor, FnKind};
-use rustc_front::util as hir_util;
+use rustc::hir;
+use rustc::hir::{FnDecl, Block};
+use rustc::hir::intravisit;
+use rustc::hir::intravisit::{Visitor, FnKind};
+
+use rustc::mir::mir_map::MirMap;
 
 pub mod check_loans;
 
@@ -55,6 +57,8 @@ pub mod gather_loans;
 
 pub mod move_data;
 
+mod mir;
+
 #[derive(Clone, Copy)]
 pub struct LoanDataFlowOperator;
 
@@ -66,15 +70,13 @@ impl<'a, 'tcx, 'v> Visitor<'v> for BorrowckCtxt<'a, 'tcx> {
         match fk {
             FnKind::ItemFn(..) |
             FnKind::Method(..) => {
-                let new_free_region_map = self.tcx.free_region_map(id);
-                let old_free_region_map =
-                    mem::replace(&mut self.free_region_map, new_free_region_map);
-                borrowck_fn(self, fk, fd, b, s, id);
-                self.free_region_map = old_free_region_map;
+                self.with_temp_region_map(id, |this| {
+                    borrowck_fn(this, fk, fd, b, s, id, fk.attrs())
+                });
             }
 
-            FnKind::Closure => {
-                borrowck_fn(self, fk, fd, b, s, id);
+            FnKind::Closure(..) => {
+                borrowck_fn(self, fk, fd, b, s, id, fk.attrs());
             }
         }
     }
@@ -98,9 +100,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for BorrowckCtxt<'a, 'tcx> {
     }
 }
 
-pub fn check_crate(tcx: &ty::ctxt) {
+pub fn check_crate<'tcx>(tcx: &TyCtxt<'tcx>, mir_map: &MirMap<'tcx>) {
     let mut bccx = BorrowckCtxt {
         tcx: tcx,
+        mir_map: Some(mir_map),
         free_region_map: FreeRegionMap::new(),
         stats: BorrowStats {
             loaned_paths_same: 0,
@@ -159,8 +162,17 @@ fn borrowck_fn(this: &mut BorrowckCtxt,
                decl: &hir::FnDecl,
                body: &hir::Block,
                sp: Span,
-               id: ast::NodeId) {
+               id: ast::NodeId,
+               attributes: &[ast::Attribute]) {
     debug!("borrowck_fn(id={})", id);
+
+    if attributes.iter().any(|item| item.check_name("rustc_mir_borrowck")) {
+        let mir = this.mir_map.unwrap().map.get(&id).unwrap();
+        this.with_temp_region_map(id, |this| {
+            mir::borrowck_mir(this, fk, decl, mir, body, sp, id, attributes)
+        });
+    }
+
     let cfg = cfg::CFG::new(this.tcx, body);
     let AnalysisData { all_loans,
                        loans: loan_dfcx,
@@ -197,7 +209,7 @@ fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>,
 {
     // Check the body of fn items.
     let tcx = this.tcx;
-    let id_range = hir_util::compute_id_range_for_fn_body(fk, decl, body, sp, id);
+    let id_range = intravisit::compute_id_range_for_fn_body(fk, decl, body, sp, id);
     let (all_loans, move_data) =
         gather_loans::gather_loans_in_fn(this, id, decl, body);
 
@@ -232,7 +244,8 @@ fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>,
 /// Accessor for introspective clients inspecting `AnalysisData` and
 /// the `BorrowckCtxt` itself , e.g. the flowgraph visualizer.
 pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>(
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
+    mir_map: Option<&'a MirMap<'tcx>>,
     fn_parts: FnParts<'a>,
     cfg: &cfg::CFG)
     -> (BorrowckCtxt<'a, 'tcx>, AnalysisData<'a, 'tcx>)
@@ -240,6 +253,7 @@ pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>(
 
     let mut bccx = BorrowckCtxt {
         tcx: tcx,
+        mir_map: mir_map,
         free_region_map: FreeRegionMap::new(),
         stats: BorrowStats {
             loaned_paths_same: 0,
@@ -264,7 +278,7 @@ pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>(
 // Type definitions
 
 pub struct BorrowckCtxt<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
 
     // Hacky. As we visit various fns, we have to load up the
     // free-region map for each one. This map is computed by during
@@ -279,9 +293,13 @@ pub struct BorrowckCtxt<'a, 'tcx: 'a> {
     free_region_map: FreeRegionMap,
 
     // Statistics:
-    stats: BorrowStats
+    stats: BorrowStats,
+
+    // NodeId to MIR mapping (for methods that carry the #[rustc_mir] attribute).
+    mir_map: Option<&'a MirMap<'tcx>>,
 }
 
+#[derive(Clone)]
 struct BorrowStats {
     loaned_paths_same: usize,
     loaned_paths_imm: usize,
@@ -394,22 +412,22 @@ pub enum LoanPathElem {
 }
 
 pub fn closure_to_block(closure_id: ast::NodeId,
-                        tcx: &ty::ctxt) -> ast::NodeId {
+                        tcx: &TyCtxt) -> ast::NodeId {
     match tcx.map.get(closure_id) {
         hir_map::NodeExpr(expr) => match expr.node {
             hir::ExprClosure(_, _, ref block) => {
                 block.id
             }
             _ => {
-                panic!("encountered non-closure id: {}", closure_id)
+                bug!("encountered non-closure id: {}", closure_id)
             }
         },
-        _ => panic!("encountered non-expr id: {}", closure_id)
+        _ => bug!("encountered non-expr id: {}", closure_id)
     }
 }
 
 impl<'tcx> LoanPath<'tcx> {
-    pub fn kill_scope(&self, tcx: &ty::ctxt<'tcx>) -> region::CodeExtent {
+    pub fn kill_scope(&self, tcx: &TyCtxt<'tcx>) -> region::CodeExtent {
         match self.kind {
             LpVar(local_id) => tcx.region_maps.var_scope(local_id),
             LpUpvar(upvar_id) => {
@@ -574,6 +592,15 @@ pub enum MovedValueUseKind {
 // Misc
 
 impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
+    fn with_temp_region_map<F>(&mut self, id: ast::NodeId, f: F)
+        where F: for <'b> FnOnce(&'b mut BorrowckCtxt<'a, 'tcx>)
+    {
+        let new_free_region_map = self.tcx.free_region_map(id);
+        let old_free_region_map = mem::replace(&mut self.free_region_map, new_free_region_map);
+        f(self);
+        self.free_region_map = old_free_region_map;
+    }
+
     pub fn is_subregion_of(&self, r_sub: ty::Region, r_sup: ty::Region)
                            -> bool
     {
@@ -676,10 +703,9 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
                         (self.tcx.expr_ty_adjusted(&expr), expr.span)
                     }
                     r => {
-                        self.tcx.sess.bug(&format!("MoveExpr({}) maps to \
-                                                   {:?}, not Expr",
-                                                  the_move.id,
-                                                  r))
+                        bug!("MoveExpr({}) maps to {:?}, not Expr",
+                             the_move.id,
+                             r)
                     }
                 };
                 let (suggestion, _) =
@@ -738,10 +764,9 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
                         (self.tcx.expr_ty_adjusted(&expr), expr.span)
                     }
                     r => {
-                        self.tcx.sess.bug(&format!("Captured({}) maps to \
-                                                   {:?}, not Expr",
-                                                  the_move.id,
-                                                  r))
+                        bug!("Captured({}) maps to {:?}, not Expr",
+                             the_move.id,
+                             r)
                     }
                 };
                 let (suggestion, help) =
@@ -824,10 +849,6 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
         self.tcx.sess.span_err_with_code(s, msg, code);
     }
 
-    pub fn span_bug(&self, s: Span, m: &str) {
-        self.tcx.sess.span_bug(s, m);
-    }
-
     pub fn bckerr_to_string(&self, err: &BckError<'tcx>) -> String {
         match err.code {
             err_mutbl => {
@@ -867,7 +888,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
                         format!("cannot borrow {} as mutable", descr)
                     }
                     BorrowViolation(euv::ClosureInvocation) => {
-                        self.tcx.sess.span_bug(err.span,
+                        span_bug!(err.span,
                             "err_mutbl with a closure invocation");
                     }
                 }
@@ -1007,9 +1028,9 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
                         // We need to determine which is the case here.
                         let kind = match err.cmt.upvar().unwrap().cat {
                             Categorization::Upvar(mc::Upvar { kind, .. }) => kind,
-                            _ => unreachable!()
+                            _ => bug!()
                         };
-                        if kind == ty::FnClosureKind {
+                        if kind == ty::ClosureKind::Fn {
                             db.span_help(
                                 self.tcx.map.span(upvar_id.closure_expr_id),
                                 "consider changing this closure to take \
@@ -1157,7 +1178,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
     }
 }
 
-fn statement_scope_span(tcx: &ty::ctxt, region: ty::Region) -> Option<Span> {
+fn statement_scope_span(tcx: &TyCtxt, region: ty::Region) -> Option<Span> {
     match region {
         ty::ReScope(scope) => {
             match tcx.map.find(scope.node_id(&tcx.region_maps)) {
index cbec32e358d81fbad18e71443939b3b3a9c43d15..80e408e9a6e797ec73889c611238c691e3b69d07 100644 (file)
 pub use self::MoveKind::*;
 
 use borrowck::*;
-use rustc::middle::cfg;
+use rustc::cfg;
 use rustc::middle::dataflow::DataFlowContext;
 use rustc::middle::dataflow::BitwiseOperator;
 use rustc::middle::dataflow::DataFlowOperator;
 use rustc::middle::dataflow::KillFrom;
 use rustc::middle::expr_use_visitor as euv;
 use rustc::middle::expr_use_visitor::MutateMode;
-use rustc::middle::ty;
+use rustc::ty::TyCtxt;
 use rustc::util::nodemap::{FnvHashMap, NodeSet};
 
 use std::cell::RefCell;
 use std::rc::Rc;
 use std::usize;
 use syntax::ast;
-use syntax::ast_util;
 use syntax::codemap::Span;
-use rustc_front::hir;
+use rustc::hir;
+use rustc::hir::intravisit::IdRange;
 
 #[path="fragments.rs"]
 pub mod fragments;
@@ -273,7 +273,7 @@ impl<'tcx> MoveData<'tcx> {
     /// Returns the existing move path index for `lp`, if any, and otherwise adds a new index for
     /// `lp` and any of its base paths that do not yet have an index.
     pub fn move_path(&self,
-                     tcx: &ty::ctxt<'tcx>,
+                     tcx: &TyCtxt<'tcx>,
                      lp: Rc<LoanPath<'tcx>>) -> MovePathIndex {
         match self.path_map.borrow().get(&lp) {
             Some(&index) => {
@@ -365,7 +365,7 @@ impl<'tcx> MoveData<'tcx> {
 
     /// Adds a new move entry for a move of `lp` that occurs at location `id` with kind `kind`.
     pub fn add_move(&self,
-                    tcx: &ty::ctxt<'tcx>,
+                    tcx: &TyCtxt<'tcx>,
                     lp: Rc<LoanPath<'tcx>>,
                     id: ast::NodeId,
                     kind: MoveKind) {
@@ -393,7 +393,7 @@ impl<'tcx> MoveData<'tcx> {
     /// Adds a new record for an assignment to `lp` that occurs at location `id` with the given
     /// `span`.
     pub fn add_assignment(&self,
-                          tcx: &ty::ctxt<'tcx>,
+                          tcx: &TyCtxt<'tcx>,
                           lp: Rc<LoanPath<'tcx>>,
                           assign_id: ast::NodeId,
                           span: Span,
@@ -438,7 +438,7 @@ impl<'tcx> MoveData<'tcx> {
     /// should be able to recover the span info from the
     /// `pattern_id` and the ast_map, I think.)
     pub fn add_variant_match(&self,
-                             tcx: &ty::ctxt<'tcx>,
+                             tcx: &TyCtxt<'tcx>,
                              lp: Rc<LoanPath<'tcx>>,
                              pattern_id: ast::NodeId,
                              base_lp: Rc<LoanPath<'tcx>>,
@@ -461,7 +461,7 @@ impl<'tcx> MoveData<'tcx> {
         self.variant_matches.borrow_mut().push(variant_match);
     }
 
-    fn fixup_fragment_sets(&self, tcx: &ty::ctxt<'tcx>) {
+    fn fixup_fragment_sets(&self, tcx: &TyCtxt<'tcx>) {
         fragments::fixup_fragment_sets(self, tcx)
     }
 
@@ -471,7 +471,7 @@ impl<'tcx> MoveData<'tcx> {
     /// scoping. Assignments are generated by assignment to variables and
     /// killed by scoping. See `README.md` for more details.
     fn add_gen_kills(&self,
-                     tcx: &ty::ctxt<'tcx>,
+                     tcx: &TyCtxt<'tcx>,
                      dfcx_moves: &mut MoveDataFlow,
                      dfcx_assign: &mut AssignDataFlow) {
         for (i, the_move) in self.moves.borrow().iter().enumerate() {
@@ -515,7 +515,7 @@ impl<'tcx> MoveData<'tcx> {
                                          assignment_index);
                 }
                 LpExtend(..) => {
-                    tcx.sess.bug("var assignment for non var path");
+                    bug!("var assignment for non var path");
                 }
             }
         }
@@ -600,9 +600,9 @@ impl<'tcx> MoveData<'tcx> {
 
 impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> {
     pub fn new(move_data: MoveData<'tcx>,
-               tcx: &'a ty::ctxt<'tcx>,
+               tcx: &'a TyCtxt<'tcx>,
                cfg: &cfg::CFG,
-               id_range: ast_util::IdRange,
+               id_range: IdRange,
                decl: &hir::FnDecl,
                body: &hir::Block)
                -> FlowedMoveData<'a, 'tcx> {
index 7a5491cdbe7f3cd612a36456fd95d0090df018fd..0da9525efd8566e53520961e8b4addd8e9231a94 100644 (file)
 
 pub use self::Variant::*;
 
-pub use rustc::middle::cfg::graphviz::{Node, Edge};
-use rustc::middle::cfg::graphviz as cfg_dot;
+pub use rustc::cfg::graphviz::{Node, Edge};
+use rustc::cfg::graphviz as cfg_dot;
 
 use borrowck;
 use borrowck::{BorrowckCtxt, LoanPath};
 use dot;
-use rustc::middle::cfg::CFGIndex;
+use rustc::cfg::CFGIndex;
 use rustc::middle::dataflow::{DataFlowOperator, DataFlowContext, EntryOrExit};
 use std::rc::Rc;
 use dot::IntoCow;
@@ -129,7 +129,9 @@ impl<'a, 'tcx> DataflowLabeller<'a, 'tcx> {
     }
 }
 
-impl<'a, 'tcx> dot::Labeller<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 'tcx> {
+impl<'a, 'tcx> dot::Labeller<'a> for DataflowLabeller<'a, 'tcx> {
+    type Node = Node<'a>;
+    type Edge = Edge<'a>;
     fn graph_id(&'a self) -> dot::Id<'a> { self.inner.graph_id() }
     fn node_id(&'a self, n: &Node<'a>) -> dot::Id<'a> { self.inner.node_id(n) }
     fn node_label(&'a self, n: &Node<'a>) -> dot::LabelText<'a> {
@@ -143,7 +145,9 @@ impl<'a, 'tcx> dot::Labeller<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 't
     fn edge_label(&'a self, e: &Edge<'a>) -> dot::LabelText<'a> { self.inner.edge_label(e) }
 }
 
-impl<'a, 'tcx> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 'tcx> {
+impl<'a, 'tcx> dot::GraphWalk<'a> for DataflowLabeller<'a, 'tcx> {
+    type Node = Node<'a>;
+    type Edge = Edge<'a>;
     fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> { self.inner.nodes() }
     fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> { self.inner.edges() }
     fn source(&'a self, edge: &Edge<'a>) -> Node<'a> { self.inner.source(edge) }
index e7f5fddc7bbf2b18a97ad34e02a49d10d538c76a..e38677de6625fc46a97eb015d011f76967d8e0d7 100644 (file)
 #![feature(rustc_diagnostic_macros)]
 #![feature(rustc_private)]
 #![feature(staged_api)]
-
+#![feature(associated_consts)]
+#![feature(nonzero)]
+#![feature(question_mark)]
 #[macro_use] extern crate log;
 #[macro_use] extern crate syntax;
 
 // for "clarity", rename the graphviz crate to dot; graphviz within `borrowck`
 // refers to the borrowck-specific graphviz adapter traits.
 extern crate graphviz as dot;
+#[macro_use]
 extern crate rustc;
-extern crate rustc_front;
+extern crate rustc_mir;
+extern crate core; // for NonZero
 
 pub use borrowck::check_crate;
 pub use borrowck::build_borrowck_dataflow_data_for_fn;
@@ -42,6 +46,7 @@ pub use borrowck::{AnalysisData, BorrowckCtxt};
 pub mod diagnostics;
 
 mod borrowck;
+mod bitslice;
 
 pub mod graphviz;
 
diff --git a/src/librustc_const_eval/Cargo.toml b/src/librustc_const_eval/Cargo.toml
new file mode 100644 (file)
index 0000000..c572284
--- /dev/null
@@ -0,0 +1,18 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "rustc_const_eval"
+version = "0.0.0"
+
+[lib]
+name = "rustc_const_eval"
+path = "lib.rs"
+crate-type = ["dylib"]
+
+[dependencies]
+log = { path = "../liblog" }
+serialize = { path = "../libserialize" }
+rustc = { path = "../librustc" }
+rustc_back = { path = "../librustc_back" }
+rustc_const_math = { path = "../librustc_const_math" }
+syntax = { path = "../libsyntax" }
+graphviz = { path = "../libgraphviz" }
diff --git a/src/librustc_const_eval/check_match.rs b/src/librustc_const_eval/check_match.rs
new file mode 100644 (file)
index 0000000..1e662d4
--- /dev/null
@@ -0,0 +1,1212 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use self::Constructor::*;
+use self::Usefulness::*;
+use self::WitnessPreference::*;
+
+use rustc::dep_graph::DepNode;
+use rustc::middle::const_val::ConstVal;
+use ::{eval_const_expr, eval_const_expr_partial, compare_const_vals};
+use ::{const_expr_to_pat, lookup_const_by_id};
+use ::EvalHint::ExprTypeChecked;
+use rustc::hir::def::*;
+use rustc::hir::def_id::{DefId};
+use rustc::middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor};
+use rustc::middle::expr_use_visitor::{LoanCause, MutateMode};
+use rustc::middle::expr_use_visitor as euv;
+use rustc::infer;
+use rustc::middle::mem_categorization::{cmt};
+use rustc::hir::pat_util::*;
+use rustc::traits::ProjectionMode;
+use rustc::ty::*;
+use rustc::ty;
+use std::cmp::Ordering;
+use std::fmt;
+use std::iter::{FromIterator, IntoIterator, repeat};
+
+use rustc::hir;
+use rustc::hir::{Pat, PatKind};
+use rustc::hir::intravisit::{self, IdVisitor, IdVisitingOperation, Visitor, FnKind};
+use rustc_back::slice;
+
+use syntax::ast::{self, DUMMY_NODE_ID, NodeId};
+use syntax::codemap::{Span, Spanned, DUMMY_SP};
+use rustc::hir::fold::{Folder, noop_fold_pat};
+use rustc::hir::print::pat_to_string;
+use syntax::ptr::P;
+use rustc::util::nodemap::FnvHashMap;
+
+pub const DUMMY_WILD_PAT: &'static Pat = &Pat {
+    id: DUMMY_NODE_ID,
+    node: PatKind::Wild,
+    span: DUMMY_SP
+};
+
+struct Matrix<'a>(Vec<Vec<&'a Pat>>);
+
+/// Pretty-printer for matrices of patterns, example:
+/// ++++++++++++++++++++++++++
+/// + _     + []             +
+/// ++++++++++++++++++++++++++
+/// + true  + [First]        +
+/// ++++++++++++++++++++++++++
+/// + true  + [Second(true)] +
+/// ++++++++++++++++++++++++++
+/// + false + [_]            +
+/// ++++++++++++++++++++++++++
+/// + _     + [_, _, ..tail] +
+/// ++++++++++++++++++++++++++
+impl<'a> fmt::Debug for Matrix<'a> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "\n")?;
+
+        let &Matrix(ref m) = self;
+        let pretty_printed_matrix: Vec<Vec<String>> = m.iter().map(|row| {
+            row.iter()
+               .map(|&pat| pat_to_string(&pat))
+               .collect::<Vec<String>>()
+        }).collect();
+
+        let column_count = m.iter().map(|row| row.len()).max().unwrap_or(0);
+        assert!(m.iter().all(|row| row.len() == column_count));
+        let column_widths: Vec<usize> = (0..column_count).map(|col| {
+            pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0)
+        }).collect();
+
+        let total_width = column_widths.iter().cloned().sum::<usize>() + column_count * 3 + 1;
+        let br = repeat('+').take(total_width).collect::<String>();
+        write!(f, "{}\n", br)?;
+        for row in pretty_printed_matrix {
+            write!(f, "+")?;
+            for (column, pat_str) in row.into_iter().enumerate() {
+                write!(f, " ")?;
+                write!(f, "{:1$}", pat_str, column_widths[column])?;
+                write!(f, " +")?;
+            }
+            write!(f, "\n")?;
+            write!(f, "{}\n", br)?;
+        }
+        Ok(())
+    }
+}
+
+impl<'a> FromIterator<Vec<&'a Pat>> for Matrix<'a> {
+    fn from_iter<T: IntoIterator<Item=Vec<&'a Pat>>>(iter: T) -> Matrix<'a> {
+        Matrix(iter.into_iter().collect())
+    }
+}
+
+//NOTE: appears to be the only place other then InferCtxt to contain a ParamEnv
+pub struct MatchCheckCtxt<'a, 'tcx: 'a> {
+    pub tcx: &'a TyCtxt<'tcx>,
+    pub param_env: ParameterEnvironment<'a, 'tcx>,
+}
+
+#[derive(Clone, PartialEq)]
+pub enum Constructor {
+    /// The constructor of all patterns that don't vary by constructor,
+    /// e.g. struct patterns and fixed-length arrays.
+    Single,
+    /// Enum variants.
+    Variant(DefId),
+    /// Literal values.
+    ConstantValue(ConstVal),
+    /// Ranges of literal values (2..5).
+    ConstantRange(ConstVal, ConstVal),
+    /// Array patterns of length n.
+    Slice(usize),
+    /// Array patterns with a subslice.
+    SliceWithSubslice(usize, usize)
+}
+
+#[derive(Clone, PartialEq)]
+enum Usefulness {
+    Useful,
+    UsefulWithWitness(Vec<P<Pat>>),
+    NotUseful
+}
+
+#[derive(Copy, Clone)]
+enum WitnessPreference {
+    ConstructWitness,
+    LeaveOutWitness
+}
+
+impl<'a, 'tcx, 'v> Visitor<'v> for MatchCheckCtxt<'a, 'tcx> {
+    fn visit_expr(&mut self, ex: &hir::Expr) {
+        check_expr(self, ex);
+    }
+    fn visit_local(&mut self, l: &hir::Local) {
+        check_local(self, l);
+    }
+    fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl,
+                b: &'v hir::Block, s: Span, n: NodeId) {
+        check_fn(self, fk, fd, b, s, n);
+    }
+}
+
+pub fn check_crate(tcx: &TyCtxt) {
+    tcx.visit_all_items_in_krate(DepNode::MatchCheck, &mut MatchCheckCtxt {
+        tcx: tcx,
+        param_env: tcx.empty_parameter_environment(),
+    });
+    tcx.sess.abort_if_errors();
+}
+
+fn check_expr(cx: &mut MatchCheckCtxt, ex: &hir::Expr) {
+    intravisit::walk_expr(cx, ex);
+    match ex.node {
+        hir::ExprMatch(ref scrut, ref arms, source) => {
+            for arm in arms {
+                // First, check legality of move bindings.
+                check_legality_of_move_bindings(cx,
+                                                arm.guard.is_some(),
+                                                &arm.pats);
+
+                // Second, if there is a guard on each arm, make sure it isn't
+                // assigning or borrowing anything mutably.
+                match arm.guard {
+                    Some(ref guard) => check_for_mutation_in_guard(cx, &guard),
+                    None => {}
+                }
+            }
+
+            let mut static_inliner = StaticInliner::new(cx.tcx, None);
+            let inlined_arms = arms.iter().map(|arm| {
+                (arm.pats.iter().map(|pat| {
+                    static_inliner.fold_pat((*pat).clone())
+                }).collect(), arm.guard.as_ref().map(|e| &**e))
+            }).collect::<Vec<(Vec<P<Pat>>, Option<&hir::Expr>)>>();
+
+            // Bail out early if inlining failed.
+            if static_inliner.failed {
+                return;
+            }
+
+            for pat in inlined_arms
+                .iter()
+                .flat_map(|&(ref pats, _)| pats) {
+                // Third, check legality of move bindings.
+                check_legality_of_bindings_in_at_patterns(cx, &pat);
+
+                // Fourth, check if there are any references to NaN that we should warn about.
+                check_for_static_nan(cx, &pat);
+
+                // Fifth, check if for any of the patterns that match an enumerated type
+                // are bindings with the same name as one of the variants of said type.
+                check_for_bindings_named_the_same_as_variants(cx, &pat);
+            }
+
+            // Fourth, check for unreachable arms.
+            check_arms(cx, &inlined_arms[..], source);
+
+            // Finally, check if the whole match expression is exhaustive.
+            // Check for empty enum, because is_useful only works on inhabited types.
+            let pat_ty = cx.tcx.node_id_to_type(scrut.id);
+            if inlined_arms.is_empty() {
+                if !pat_ty.is_empty(cx.tcx) {
+                    // We know the type is inhabited, so this must be wrong
+                    let mut err = struct_span_err!(cx.tcx.sess, ex.span, E0002,
+                                                   "non-exhaustive patterns: type {} is non-empty",
+                                                   pat_ty);
+                    span_help!(&mut err, ex.span,
+                        "Please ensure that all possible cases are being handled; \
+                         possibly adding wildcards or more match arms.");
+                    err.emit();
+                }
+                // If the type *is* empty, it's vacuously exhaustive
+                return;
+            }
+
+            let matrix: Matrix = inlined_arms
+                .iter()
+                .filter(|&&(_, guard)| guard.is_none())
+                .flat_map(|arm| &arm.0)
+                .map(|pat| vec![&**pat])
+                .collect();
+            check_exhaustive(cx, ex.span, &matrix, source);
+        },
+        _ => ()
+    }
+}
+
+fn check_for_bindings_named_the_same_as_variants(cx: &MatchCheckCtxt, pat: &Pat) {
+    pat.walk(|p| {
+        match p.node {
+            PatKind::Ident(hir::BindByValue(hir::MutImmutable), ident, None) => {
+                let pat_ty = cx.tcx.pat_ty(p);
+                if let ty::TyEnum(edef, _) = pat_ty.sty {
+                    let def = cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def());
+                    if let Some(Def::Local(..)) = def {
+                        if edef.variants.iter().any(|variant|
+                            variant.name == ident.node.unhygienic_name
+                                && variant.kind() == VariantKind::Unit
+                        ) {
+                            let ty_path = cx.tcx.item_path_str(edef.did);
+                            let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170,
+                                "pattern binding `{}` is named the same as one \
+                                 of the variants of the type `{}`",
+                                ident.node, ty_path);
+                            fileline_help!(err, p.span,
+                                "if you meant to match on a variant, \
+                                 consider making the path in the pattern qualified: `{}::{}`",
+                                ty_path, ident.node);
+                            err.emit();
+                        }
+                    }
+                }
+            }
+            _ => ()
+        }
+        true
+    });
+}
+
+// Check that we do not match against a static NaN (#6804)
+fn check_for_static_nan(cx: &MatchCheckCtxt, pat: &Pat) {
+    pat.walk(|p| {
+        if let PatKind::Lit(ref expr) = p.node {
+            match eval_const_expr_partial(cx.tcx, &expr, ExprTypeChecked, None) {
+                Ok(ConstVal::Float(f)) if f.is_nan() => {
+                    span_warn!(cx.tcx.sess, p.span, E0003,
+                               "unmatchable NaN in pattern, \
+                                use the is_nan method in a guard instead");
+                }
+                Ok(_) => {}
+
+                Err(err) => {
+                    let mut diag = struct_span_err!(cx.tcx.sess, err.span, E0471,
+                                                    "constant evaluation error: {}",
+                                                    err.description());
+                    if !p.span.contains(err.span) {
+                        diag.span_note(p.span, "in pattern here");
+                    }
+                    diag.emit();
+                }
+            }
+        }
+        true
+    });
+}
+
+// Check for unreachable patterns
+fn check_arms(cx: &MatchCheckCtxt,
+              arms: &[(Vec<P<Pat>>, Option<&hir::Expr>)],
+              source: hir::MatchSource) {
+    let mut seen = Matrix(vec![]);
+    let mut printed_if_let_err = false;
+    for &(ref pats, guard) in arms {
+        for pat in pats {
+            let v = vec![&**pat];
+
+            match is_useful(cx, &seen, &v[..], LeaveOutWitness) {
+                NotUseful => {
+                    match source {
+                        hir::MatchSource::IfLetDesugar { .. } => {
+                            if printed_if_let_err {
+                                // we already printed an irrefutable if-let pattern error.
+                                // We don't want two, that's just confusing.
+                            } else {
+                                // find the first arm pattern so we can use its span
+                                let &(ref first_arm_pats, _) = &arms[0];
+                                let first_pat = &first_arm_pats[0];
+                                let span = first_pat.span;
+                                span_err!(cx.tcx.sess, span, E0162, "irrefutable if-let pattern");
+                                printed_if_let_err = true;
+                            }
+                        },
+
+                        hir::MatchSource::WhileLetDesugar => {
+                            // find the first arm pattern so we can use its span
+                            let &(ref first_arm_pats, _) = &arms[0];
+                            let first_pat = &first_arm_pats[0];
+                            let span = first_pat.span;
+                            span_err!(cx.tcx.sess, span, E0165, "irrefutable while-let pattern");
+                        },
+
+                        hir::MatchSource::ForLoopDesugar => {
+                            // this is a bug, because on `match iter.next()` we cover
+                            // `Some(<head>)` and `None`. It's impossible to have an unreachable
+                            // pattern
+                            // (see libsyntax/ext/expand.rs for the full expansion of a for loop)
+                            span_bug!(pat.span, "unreachable for-loop pattern")
+                        },
+
+                        hir::MatchSource::Normal => {
+                            span_err!(cx.tcx.sess, pat.span, E0001, "unreachable pattern")
+                        },
+
+                        hir::MatchSource::TryDesugar => {
+                            span_bug!(pat.span, "unreachable try pattern")
+                        },
+                    }
+                }
+                Useful => (),
+                UsefulWithWitness(_) => bug!()
+            }
+            if guard.is_none() {
+                let Matrix(mut rows) = seen;
+                rows.push(v);
+                seen = Matrix(rows);
+            }
+        }
+    }
+}
+
+fn raw_pat<'a>(p: &'a Pat) -> &'a Pat {
+    match p.node {
+        PatKind::Ident(_, _, Some(ref s)) => raw_pat(&s),
+        _ => p
+    }
+}
+
+fn check_exhaustive(cx: &MatchCheckCtxt, sp: Span, matrix: &Matrix, source: hir::MatchSource) {
+    match is_useful(cx, matrix, &[DUMMY_WILD_PAT], ConstructWitness) {
+        UsefulWithWitness(pats) => {
+            let witnesses = if pats.is_empty() {
+                vec![DUMMY_WILD_PAT]
+            } else {
+                pats.iter().map(|w| &**w ).collect()
+            };
+            match source {
+                hir::MatchSource::ForLoopDesugar => {
+                    // `witnesses[0]` has the form `Some(<head>)`, peel off the `Some`
+                    let witness = match witnesses[0].node {
+                        PatKind::TupleStruct(_, Some(ref pats)) => match &pats[..] {
+                            [ref pat] => &**pat,
+                            _ => bug!(),
+                        },
+                        _ => bug!(),
+                    };
+                    span_err!(cx.tcx.sess, sp, E0297,
+                        "refutable pattern in `for` loop binding: \
+                                `{}` not covered",
+                                pat_to_string(witness));
+                },
+                _ => {
+                    let pattern_strings: Vec<_> = witnesses.iter().map(|w| {
+                        pat_to_string(w)
+                    }).collect();
+                    const LIMIT: usize = 3;
+                    let joined_patterns = match pattern_strings.len() {
+                        0 => bug!(),
+                        1 => format!("`{}`", pattern_strings[0]),
+                        2...LIMIT => {
+                            let (tail, head) = pattern_strings.split_last().unwrap();
+                            format!("`{}`", head.join("`, `") + "` and `" + tail)
+                        },
+                        _ => {
+                            let (head, tail) = pattern_strings.split_at(LIMIT);
+                            format!("`{}` and {} more", head.join("`, `"), tail.len())
+                        }
+                    };
+                    span_err!(cx.tcx.sess, sp, E0004,
+                        "non-exhaustive patterns: {} not covered",
+                        joined_patterns
+                    );
+                },
+            }
+        }
+        NotUseful => {
+            // This is good, wildcard pattern isn't reachable
+        },
+        _ => bug!()
+    }
+}
+
+fn const_val_to_expr(value: &ConstVal) -> P<hir::Expr> {
+    let node = match value {
+        &ConstVal::Bool(b) => ast::LitKind::Bool(b),
+        _ => bug!()
+    };
+    P(hir::Expr {
+        id: 0,
+        node: hir::ExprLit(P(Spanned { node: node, span: DUMMY_SP })),
+        span: DUMMY_SP,
+        attrs: None,
+    })
+}
+
+pub struct StaticInliner<'a, 'tcx: 'a> {
+    pub tcx: &'a TyCtxt<'tcx>,
+    pub failed: bool,
+    pub renaming_map: Option<&'a mut FnvHashMap<(NodeId, Span), NodeId>>,
+}
+
+impl<'a, 'tcx> StaticInliner<'a, 'tcx> {
+    pub fn new<'b>(tcx: &'b TyCtxt<'tcx>,
+                   renaming_map: Option<&'b mut FnvHashMap<(NodeId, Span), NodeId>>)
+                   -> StaticInliner<'b, 'tcx> {
+        StaticInliner {
+            tcx: tcx,
+            failed: false,
+            renaming_map: renaming_map
+        }
+    }
+}
+
+struct RenamingRecorder<'map> {
+    substituted_node_id: NodeId,
+    origin_span: Span,
+    renaming_map: &'map mut FnvHashMap<(NodeId, Span), NodeId>
+}
+
+impl<'map> IdVisitingOperation for RenamingRecorder<'map> {
+    fn visit_id(&mut self, node_id: NodeId) {
+        let key = (node_id, self.origin_span);
+        self.renaming_map.insert(key, self.substituted_node_id);
+    }
+}
+
+impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> {
+    fn fold_pat(&mut self, pat: P<Pat>) -> P<Pat> {
+        return match pat.node {
+            PatKind::Ident(..) | PatKind::Path(..) | PatKind::QPath(..) => {
+                let def = self.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def());
+                match def {
+                    Some(Def::AssociatedConst(did)) |
+                    Some(Def::Const(did)) => {
+                        let substs = Some(self.tcx.node_id_item_substs(pat.id).substs);
+                        if let Some((const_expr, _)) = lookup_const_by_id(self.tcx, did, substs) {
+                            match const_expr_to_pat(self.tcx, const_expr, pat.id, pat.span) {
+                                Ok(new_pat) => {
+                                    if let Some(ref mut map) = self.renaming_map {
+                                        // Record any renamings we do here
+                                        record_renamings(const_expr, &pat, map);
+                                    }
+                                    new_pat
+                                }
+                                Err(def_id) => {
+                                    self.failed = true;
+                                    self.tcx.sess.span_err(
+                                        pat.span,
+                                        &format!("constants of the type `{}` \
+                                                  cannot be used in patterns",
+                                                 self.tcx.item_path_str(def_id)));
+                                    pat
+                                }
+                            }
+                        } else {
+                            self.failed = true;
+                            span_err!(self.tcx.sess, pat.span, E0158,
+                                "statics cannot be referenced in patterns");
+                            pat
+                        }
+                    }
+                    _ => noop_fold_pat(pat, self)
+                }
+            }
+            _ => noop_fold_pat(pat, self)
+        };
+
+        fn record_renamings(const_expr: &hir::Expr,
+                            substituted_pat: &hir::Pat,
+                            renaming_map: &mut FnvHashMap<(NodeId, Span), NodeId>) {
+            let mut renaming_recorder = RenamingRecorder {
+                substituted_node_id: substituted_pat.id,
+                origin_span: substituted_pat.span,
+                renaming_map: renaming_map,
+            };
+
+            let mut id_visitor = IdVisitor::new(&mut renaming_recorder);
+
+            id_visitor.visit_expr(const_expr);
+        }
+    }
+}
+
+/// Constructs a partial witness for a pattern given a list of
+/// patterns expanded by the specialization step.
+///
+/// When a pattern P is discovered to be useful, this function is used bottom-up
+/// to reconstruct a complete witness, e.g. a pattern P' that covers a subset
+/// of values, V, where each value in that set is not covered by any previously
+/// used patterns and is covered by the pattern P'. Examples:
+///
+/// left_ty: tuple of 3 elements
+/// pats: [10, 20, _]           => (10, 20, _)
+///
+/// left_ty: struct X { a: (bool, &'static str), b: usize}
+/// pats: [(false, "foo"), 42]  => X { a: (false, "foo"), b: 42 }
+fn construct_witness<'a,'tcx>(cx: &MatchCheckCtxt<'a,'tcx>, ctor: &Constructor,
+                              pats: Vec<&Pat>, left_ty: Ty<'tcx>) -> P<Pat> {
+    let pats_len = pats.len();
+    let mut pats = pats.into_iter().map(|p| P((*p).clone()));
+    let pat = match left_ty.sty {
+        ty::TyTuple(_) => PatKind::Tup(pats.collect()),
+
+        ty::TyEnum(adt, _) | ty::TyStruct(adt, _)  => {
+            let v = ctor.variant_for_adt(adt);
+            match v.kind() {
+                VariantKind::Struct => {
+                    let field_pats: hir::HirVec<_> = v.fields.iter()
+                        .zip(pats)
+                        .filter(|&(_, ref pat)| pat.node != PatKind::Wild)
+                        .map(|(field, pat)| Spanned {
+                            span: DUMMY_SP,
+                            node: hir::FieldPat {
+                                name: field.name,
+                                pat: pat,
+                                is_shorthand: false,
+                            }
+                        }).collect();
+                    let has_more_fields = field_pats.len() < pats_len;
+                    PatKind::Struct(def_to_path(cx.tcx, v.did), field_pats, has_more_fields)
+                }
+                VariantKind::Tuple => {
+                    PatKind::TupleStruct(def_to_path(cx.tcx, v.did), Some(pats.collect()))
+                }
+                VariantKind::Unit => {
+                    PatKind::Path(def_to_path(cx.tcx, v.did))
+                }
+            }
+        }
+
+        ty::TyRef(_, ty::TypeAndMut { ty, mutbl }) => {
+            match ty.sty {
+               ty::TyArray(_, n) => match ctor {
+                    &Single => {
+                        assert_eq!(pats_len, n);
+                        PatKind::Vec(pats.collect(), None, hir::HirVec::new())
+                    },
+                    _ => bug!()
+                },
+                ty::TySlice(_) => match ctor {
+                    &Slice(n) => {
+                        assert_eq!(pats_len, n);
+                        PatKind::Vec(pats.collect(), None, hir::HirVec::new())
+                    },
+                    _ => bug!()
+                },
+                ty::TyStr => PatKind::Wild,
+
+                _ => {
+                    assert_eq!(pats_len, 1);
+                    PatKind::Ref(pats.nth(0).unwrap(), mutbl)
+                }
+            }
+        }
+
+        ty::TyArray(_, len) => {
+            assert_eq!(pats_len, len);
+            PatKind::Vec(pats.collect(), None, hir::HirVec::new())
+        }
+
+        _ => {
+            match *ctor {
+                ConstantValue(ref v) => PatKind::Lit(const_val_to_expr(v)),
+                _ => PatKind::Wild,
+            }
+        }
+    };
+
+    P(hir::Pat {
+        id: 0,
+        node: pat,
+        span: DUMMY_SP
+    })
+}
+
+impl Constructor {
+    fn variant_for_adt<'tcx, 'container, 'a>(&self,
+                                             adt: &'a ty::AdtDefData<'tcx, 'container>)
+                                             -> &'a VariantDefData<'tcx, 'container> {
+        match self {
+            &Variant(vid) => adt.variant_with_id(vid),
+            _ => adt.struct_variant()
+        }
+    }
+}
+
+fn missing_constructors(cx: &MatchCheckCtxt, &Matrix(ref rows): &Matrix,
+                       left_ty: Ty, max_slice_length: usize) -> Vec<Constructor> {
+    let used_constructors: Vec<Constructor> = rows.iter()
+        .flat_map(|row| pat_constructors(cx, row[0], left_ty, max_slice_length))
+        .collect();
+    all_constructors(cx, left_ty, max_slice_length)
+        .into_iter()
+        .filter(|c| !used_constructors.contains(c))
+        .collect()
+}
+
+/// This determines the set of all possible constructors of a pattern matching
+/// values of type `left_ty`. For vectors, this would normally be an infinite set
+/// but is instead bounded by the maximum fixed length of slice patterns in
+/// the column of patterns being analyzed.
+fn all_constructors(_cx: &MatchCheckCtxt, left_ty: Ty,
+                    max_slice_length: usize) -> Vec<Constructor> {
+    match left_ty.sty {
+        ty::TyBool =>
+            [true, false].iter().map(|b| ConstantValue(ConstVal::Bool(*b))).collect(),
+
+        ty::TyRef(_, ty::TypeAndMut { ty, .. }) => match ty.sty {
+            ty::TySlice(_) =>
+                (0..max_slice_length+1).map(|length| Slice(length)).collect(),
+            _ => vec![Single]
+        },
+
+        ty::TyEnum(def, _) => def.variants.iter().map(|v| Variant(v.did)).collect(),
+        _ => vec![Single]
+    }
+}
+
+// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html
+//
+// Whether a vector `v` of patterns is 'useful' in relation to a set of such
+// vectors `m` is defined as there being a set of inputs that will match `v`
+// but not any of the sets in `m`.
+//
+// This is used both for reachability checking (if a pattern isn't useful in
+// relation to preceding patterns, it is not reachable) and exhaustiveness
+// checking (if a wildcard pattern is useful in relation to a matrix, the
+// matrix isn't exhaustive).
+
+// Note: is_useful doesn't work on empty types, as the paper notes.
+// So it assumes that v is non-empty.
+fn is_useful(cx: &MatchCheckCtxt,
+             matrix: &Matrix,
+             v: &[&Pat],
+             witness: WitnessPreference)
+             -> Usefulness {
+    let &Matrix(ref rows) = matrix;
+    debug!("{:?}", matrix);
+    if rows.is_empty() {
+        return match witness {
+            ConstructWitness => UsefulWithWitness(vec!()),
+            LeaveOutWitness => Useful
+        };
+    }
+    if rows[0].is_empty() {
+        return NotUseful;
+    }
+    assert!(rows.iter().all(|r| r.len() == v.len()));
+    let real_pat = match rows.iter().find(|r| (*r)[0].id != DUMMY_NODE_ID) {
+        Some(r) => raw_pat(r[0]),
+        None if v.is_empty() => return NotUseful,
+        None => v[0]
+    };
+    let left_ty = if real_pat.id == DUMMY_NODE_ID {
+        cx.tcx.mk_nil()
+    } else {
+        let left_ty = cx.tcx.pat_ty(&real_pat);
+
+        match real_pat.node {
+            PatKind::Ident(hir::BindByRef(..), _, _) => {
+                left_ty.builtin_deref(false, NoPreference).unwrap().ty
+            }
+            _ => left_ty,
+        }
+    };
+
+    let max_slice_length = rows.iter().filter_map(|row| match row[0].node {
+        PatKind::Vec(ref before, _, ref after) => Some(before.len() + after.len()),
+        _ => None
+    }).max().map_or(0, |v| v + 1);
+
+    let constructors = pat_constructors(cx, v[0], left_ty, max_slice_length);
+    if constructors.is_empty() {
+        let constructors = missing_constructors(cx, matrix, left_ty, max_slice_length);
+        if constructors.is_empty() {
+            all_constructors(cx, left_ty, max_slice_length).into_iter().map(|c| {
+                match is_useful_specialized(cx, matrix, v, c.clone(), left_ty, witness) {
+                    UsefulWithWitness(pats) => UsefulWithWitness({
+                        let arity = constructor_arity(cx, &c, left_ty);
+                        let mut result = {
+                            let pat_slice = &pats[..];
+                            let subpats: Vec<_> = (0..arity).map(|i| {
+                                pat_slice.get(i).map_or(DUMMY_WILD_PAT, |p| &**p)
+                            }).collect();
+                            vec![construct_witness(cx, &c, subpats, left_ty)]
+                        };
+                        result.extend(pats.into_iter().skip(arity));
+                        result
+                    }),
+                    result => result
+                }
+            }).find(|result| result != &NotUseful).unwrap_or(NotUseful)
+        } else {
+            let matrix = rows.iter().filter_map(|r| {
+                if pat_is_binding_or_wild(&cx.tcx.def_map.borrow(), raw_pat(r[0])) {
+                    Some(r[1..].to_vec())
+                } else {
+                    None
+                }
+            }).collect();
+            match is_useful(cx, &matrix, &v[1..], witness) {
+                UsefulWithWitness(pats) => {
+                    let mut new_pats: Vec<_> = constructors.into_iter().map(|constructor| {
+                        let arity = constructor_arity(cx, &constructor, left_ty);
+                        let wild_pats = vec![DUMMY_WILD_PAT; arity];
+                        construct_witness(cx, &constructor, wild_pats, left_ty)
+                    }).collect();
+                    new_pats.extend(pats);
+                    UsefulWithWitness(new_pats)
+                },
+                result => result
+            }
+        }
+    } else {
+        constructors.into_iter().map(|c|
+            is_useful_specialized(cx, matrix, v, c.clone(), left_ty, witness)
+        ).find(|result| result != &NotUseful).unwrap_or(NotUseful)
+    }
+}
+
+fn is_useful_specialized(cx: &MatchCheckCtxt, &Matrix(ref m): &Matrix,
+                         v: &[&Pat], ctor: Constructor, lty: Ty,
+                         witness: WitnessPreference) -> Usefulness {
+    let arity = constructor_arity(cx, &ctor, lty);
+    let matrix = Matrix(m.iter().filter_map(|r| {
+        specialize(cx, &r[..], &ctor, 0, arity)
+    }).collect());
+    match specialize(cx, v, &ctor, 0, arity) {
+        Some(v) => is_useful(cx, &matrix, &v[..], witness),
+        None => NotUseful
+    }
+}
+
+/// Determines the constructors that the given pattern can be specialized to.
+///
+/// In most cases, there's only one constructor that a specific pattern
+/// represents, such as a specific enum variant or a specific literal value.
+/// Slice patterns, however, can match slices of different lengths. For instance,
+/// `[a, b, ..tail]` can match a slice of length 2, 3, 4 and so on.
+///
+/// On the other hand, a wild pattern and an identifier pattern cannot be
+/// specialized in any way.
+fn pat_constructors(cx: &MatchCheckCtxt, p: &Pat,
+                    left_ty: Ty, max_slice_length: usize) -> Vec<Constructor> {
+    let pat = raw_pat(p);
+    match pat.node {
+        PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Path(..) | PatKind::Ident(..) =>
+            match cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def() {
+                Def::Const(..) | Def::AssociatedConst(..) =>
+                    span_bug!(pat.span, "const pattern should've \
+                                         been rewritten"),
+                Def::Struct(..) | Def::TyAlias(..) => vec![Single],
+                Def::Variant(_, id) => vec![Variant(id)],
+                Def::Local(..) => vec![],
+                def => span_bug!(pat.span, "pat_constructors: unexpected \
+                                            definition {:?}", def),
+            },
+        PatKind::QPath(..) =>
+            span_bug!(pat.span, "const pattern should've been rewritten"),
+        PatKind::Lit(ref expr) =>
+            vec!(ConstantValue(eval_const_expr(cx.tcx, &expr))),
+        PatKind::Range(ref lo, ref hi) =>
+            vec!(ConstantRange(eval_const_expr(cx.tcx, &lo), eval_const_expr(cx.tcx, &hi))),
+        PatKind::Vec(ref before, ref slice, ref after) =>
+            match left_ty.sty {
+                ty::TyArray(_, _) => vec!(Single),
+                _                      => if slice.is_some() {
+                    (before.len() + after.len()..max_slice_length+1)
+                        .map(|length| Slice(length))
+                        .collect()
+                } else {
+                    vec!(Slice(before.len() + after.len()))
+                }
+            },
+        PatKind::Box(_) | PatKind::Tup(_) | PatKind::Ref(..) =>
+            vec!(Single),
+        PatKind::Wild =>
+            vec!(),
+    }
+}
+
+/// This computes the arity of a constructor. The arity of a constructor
+/// is how many subpattern patterns of that constructor should be expanded to.
+///
+/// For instance, a tuple pattern (_, 42, Some([])) has the arity of 3.
+/// A struct pattern's arity is the number of fields it contains, etc.
+pub fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> usize {
+    match ty.sty {
+        ty::TyTuple(ref fs) => fs.len(),
+        ty::TyBox(_) => 1,
+        ty::TyRef(_, ty::TypeAndMut { ty, .. }) => match ty.sty {
+            ty::TySlice(_) => match *ctor {
+                Slice(length) => length,
+                ConstantValue(_) => 0,
+                _ => bug!()
+            },
+            ty::TyStr => 0,
+            _ => 1
+        },
+        ty::TyEnum(adt, _) | ty::TyStruct(adt, _) => {
+            ctor.variant_for_adt(adt).fields.len()
+        }
+        ty::TyArray(_, n) => n,
+        _ => 0
+    }
+}
+
+fn range_covered_by_constructor(ctor: &Constructor,
+                                from: &ConstVal, to: &ConstVal) -> Option<bool> {
+    let (c_from, c_to) = match *ctor {
+        ConstantValue(ref value)        => (value, value),
+        ConstantRange(ref from, ref to) => (from, to),
+        Single                          => return Some(true),
+        _                               => bug!()
+    };
+    let cmp_from = compare_const_vals(c_from, from);
+    let cmp_to = compare_const_vals(c_to, to);
+    match (cmp_from, cmp_to) {
+        (Some(cmp_from), Some(cmp_to)) => {
+            Some(cmp_from != Ordering::Less && cmp_to != Ordering::Greater)
+        }
+        _ => None
+    }
+}
+
+/// This is the main specialization step. It expands the first pattern in the given row
+/// into `arity` patterns based on the constructor. For most patterns, the step is trivial,
+/// for instance tuple patterns are flattened and box patterns expand into their inner pattern.
+///
+/// OTOH, slice patterns with a subslice pattern (..tail) can be expanded into multiple
+/// different patterns.
+/// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing
+/// fields filled with wild patterns.
+pub fn specialize<'a>(cx: &MatchCheckCtxt, r: &[&'a Pat],
+                      constructor: &Constructor, col: usize, arity: usize) -> Option<Vec<&'a Pat>> {
+    let &Pat {
+        id: pat_id, ref node, span: pat_span
+    } = raw_pat(r[col]);
+    let head: Option<Vec<&Pat>> = match *node {
+        PatKind::Wild =>
+            Some(vec![DUMMY_WILD_PAT; arity]),
+
+        PatKind::Path(..) | PatKind::Ident(..) => {
+            let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def();
+            match def {
+                Def::Const(..) | Def::AssociatedConst(..) =>
+                    span_bug!(pat_span, "const pattern should've \
+                                         been rewritten"),
+                Def::Variant(_, id) if *constructor != Variant(id) => None,
+                Def::Variant(..) | Def::Struct(..) => Some(Vec::new()),
+                Def::Local(..) => Some(vec![DUMMY_WILD_PAT; arity]),
+                _ => span_bug!(pat_span, "specialize: unexpected \
+                                          definition {:?}", def),
+            }
+        }
+
+        PatKind::TupleStruct(_, ref args) => {
+            let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def();
+            match def {
+                Def::Const(..) | Def::AssociatedConst(..) =>
+                    span_bug!(pat_span, "const pattern should've \
+                                         been rewritten"),
+                Def::Variant(_, id) if *constructor != Variant(id) => None,
+                Def::Variant(..) | Def::Struct(..) => {
+                    Some(match args {
+                        &Some(ref args) => args.iter().map(|p| &**p).collect(),
+                        &None => vec![DUMMY_WILD_PAT; arity],
+                    })
+                }
+                _ => None
+            }
+        }
+
+        PatKind::QPath(_, _) => {
+            span_bug!(pat_span, "const pattern should've been rewritten")
+        }
+
+        PatKind::Struct(_, ref pattern_fields, _) => {
+            let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def();
+            let adt = cx.tcx.node_id_to_type(pat_id).ty_adt_def().unwrap();
+            let variant = constructor.variant_for_adt(adt);
+            let def_variant = adt.variant_of_def(def);
+            if variant.did == def_variant.did {
+                Some(variant.fields.iter().map(|sf| {
+                    match pattern_fields.iter().find(|f| f.node.name == sf.name) {
+                        Some(ref f) => &*f.node.pat,
+                        _ => DUMMY_WILD_PAT
+                    }
+                }).collect())
+            } else {
+                None
+            }
+        }
+
+        PatKind::Tup(ref args) =>
+            Some(args.iter().map(|p| &**p).collect()),
+
+        PatKind::Box(ref inner) | PatKind::Ref(ref inner, _) =>
+            Some(vec![&**inner]),
+
+        PatKind::Lit(ref expr) => {
+            let expr_value = eval_const_expr(cx.tcx, &expr);
+            match range_covered_by_constructor(constructor, &expr_value, &expr_value) {
+                Some(true) => Some(vec![]),
+                Some(false) => None,
+                None => {
+                    span_err!(cx.tcx.sess, pat_span, E0298, "mismatched types between arms");
+                    None
+                }
+            }
+        }
+
+        PatKind::Range(ref from, ref to) => {
+            let from_value = eval_const_expr(cx.tcx, &from);
+            let to_value = eval_const_expr(cx.tcx, &to);
+            match range_covered_by_constructor(constructor, &from_value, &to_value) {
+                Some(true) => Some(vec![]),
+                Some(false) => None,
+                None => {
+                    span_err!(cx.tcx.sess, pat_span, E0299, "mismatched types between arms");
+                    None
+                }
+            }
+        }
+
+        PatKind::Vec(ref before, ref slice, ref after) => {
+            match *constructor {
+                // Fixed-length vectors.
+                Single => {
+                    let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect();
+                    pats.extend(repeat(DUMMY_WILD_PAT).take(arity - before.len() - after.len()));
+                    pats.extend(after.iter().map(|p| &**p));
+                    Some(pats)
+                },
+                Slice(length) if before.len() + after.len() <= length && slice.is_some() => {
+                    let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect();
+                    pats.extend(repeat(DUMMY_WILD_PAT).take(arity - before.len() - after.len()));
+                    pats.extend(after.iter().map(|p| &**p));
+                    Some(pats)
+                },
+                Slice(length) if before.len() + after.len() == length => {
+                    let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect();
+                    pats.extend(after.iter().map(|p| &**p));
+                    Some(pats)
+                },
+                SliceWithSubslice(prefix, suffix)
+                    if before.len() == prefix
+                        && after.len() == suffix
+                        && slice.is_some() => {
+                    let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect();
+                    pats.extend(after.iter().map(|p| &**p));
+                    Some(pats)
+                }
+                _ => None
+            }
+        }
+    };
+    head.map(|mut head| {
+        head.extend_from_slice(&r[..col]);
+        head.extend_from_slice(&r[col + 1..]);
+        head
+    })
+}
+
+fn check_local(cx: &mut MatchCheckCtxt, loc: &hir::Local) {
+    intravisit::walk_local(cx, loc);
+
+    let pat = StaticInliner::new(cx.tcx, None).fold_pat(loc.pat.clone());
+    check_irrefutable(cx, &pat, false);
+
+    // Check legality of move bindings and `@` patterns.
+    check_legality_of_move_bindings(cx, false, slice::ref_slice(&loc.pat));
+    check_legality_of_bindings_in_at_patterns(cx, &loc.pat);
+}
+
+fn check_fn(cx: &mut MatchCheckCtxt,
+            kind: FnKind,
+            decl: &hir::FnDecl,
+            body: &hir::Block,
+            sp: Span,
+            fn_id: NodeId) {
+    match kind {
+        FnKind::Closure(_) => {}
+        _ => cx.param_env = ParameterEnvironment::for_item(cx.tcx, fn_id),
+    }
+
+    intravisit::walk_fn(cx, kind, decl, body, sp);
+
+    for input in &decl.inputs {
+        check_irrefutable(cx, &input.pat, true);
+        check_legality_of_move_bindings(cx, false, slice::ref_slice(&input.pat));
+        check_legality_of_bindings_in_at_patterns(cx, &input.pat);
+    }
+}
+
+fn check_irrefutable(cx: &MatchCheckCtxt, pat: &Pat, is_fn_arg: bool) {
+    let origin = if is_fn_arg {
+        "function argument"
+    } else {
+        "local binding"
+    };
+
+    is_refutable(cx, pat, |uncovered_pat| {
+        span_err!(cx.tcx.sess, pat.span, E0005,
+            "refutable pattern in {}: `{}` not covered",
+            origin,
+            pat_to_string(uncovered_pat),
+        );
+    });
+}
+
+fn is_refutable<A, F>(cx: &MatchCheckCtxt, pat: &Pat, refutable: F) -> Option<A> where
+    F: FnOnce(&Pat) -> A,
+{
+    let pats = Matrix(vec!(vec!(pat)));
+    match is_useful(cx, &pats, &[DUMMY_WILD_PAT], ConstructWitness) {
+        UsefulWithWitness(pats) => Some(refutable(&pats[0])),
+        NotUseful => None,
+        Useful => bug!()
+    }
+}
+
+// Legality of move bindings checking
+fn check_legality_of_move_bindings(cx: &MatchCheckCtxt,
+                                   has_guard: bool,
+                                   pats: &[P<Pat>]) {
+    let tcx = cx.tcx;
+    let def_map = &tcx.def_map;
+    let mut by_ref_span = None;
+    for pat in pats {
+        pat_bindings(def_map, &pat, |bm, _, span, _path| {
+            match bm {
+                hir::BindByRef(_) => {
+                    by_ref_span = Some(span);
+                }
+                hir::BindByValue(_) => {
+                }
+            }
+        })
+    }
+
+    let check_move = |p: &Pat, sub: Option<&Pat>| {
+        // check legality of moving out of the enum
+
+        // x @ Foo(..) is legal, but x @ Foo(y) isn't.
+        if sub.map_or(false, |p| pat_contains_bindings(&def_map.borrow(), &p)) {
+            span_err!(cx.tcx.sess, p.span, E0007, "cannot bind by-move with sub-bindings");
+        } else if has_guard {
+            span_err!(cx.tcx.sess, p.span, E0008, "cannot bind by-move into a pattern guard");
+        } else if by_ref_span.is_some() {
+            let mut err = struct_span_err!(cx.tcx.sess, p.span, E0009,
+                                           "cannot bind by-move and by-ref in the same pattern");
+            span_note!(&mut err, by_ref_span.unwrap(), "by-ref binding occurs here");
+            err.emit();
+        }
+    };
+
+    for pat in pats {
+        pat.walk(|p| {
+            if pat_is_binding(&def_map.borrow(), &p) {
+                match p.node {
+                    PatKind::Ident(hir::BindByValue(_), _, ref sub) => {
+                        let pat_ty = tcx.node_id_to_type(p.id);
+                        //FIXME: (@jroesch) this code should be floated up as well
+                        let infcx = infer::new_infer_ctxt(cx.tcx,
+                                                          &cx.tcx.tables,
+                                                          Some(cx.param_env.clone()),
+                                                          ProjectionMode::AnyFinal);
+                        if infcx.type_moves_by_default(pat_ty, pat.span) {
+                            check_move(p, sub.as_ref().map(|p| &**p));
+                        }
+                    }
+                    PatKind::Ident(hir::BindByRef(_), _, _) => {
+                    }
+                    _ => {
+                        span_bug!(
+                            p.span,
+                            "binding pattern {} is not an identifier: {:?}",
+                            p.id,
+                            p.node);
+                    }
+                }
+            }
+            true
+        });
+    }
+}
+
+/// Ensures that a pattern guard doesn't borrow by mutable reference or
+/// assign.
+fn check_for_mutation_in_guard<'a, 'tcx>(cx: &'a MatchCheckCtxt<'a, 'tcx>,
+                                         guard: &hir::Expr) {
+    let mut checker = MutationChecker {
+        cx: cx,
+    };
+
+    let infcx = infer::new_infer_ctxt(cx.tcx,
+                                      &cx.tcx.tables,
+                                      Some(checker.cx.param_env.clone()),
+                                      ProjectionMode::AnyFinal);
+
+    let mut visitor = ExprUseVisitor::new(&mut checker, &infcx);
+    visitor.walk_expr(guard);
+}
+
+struct MutationChecker<'a, 'tcx: 'a> {
+    cx: &'a MatchCheckCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> {
+    fn matched_pat(&mut self, _: &Pat, _: cmt, _: euv::MatchMode) {}
+    fn consume(&mut self, _: NodeId, _: Span, _: cmt, _: ConsumeMode) {}
+    fn consume_pat(&mut self, _: &Pat, _: cmt, _: ConsumeMode) {}
+    fn borrow(&mut self,
+              _: NodeId,
+              span: Span,
+              _: cmt,
+              _: Region,
+              kind: BorrowKind,
+              _: LoanCause) {
+        match kind {
+            MutBorrow => {
+                span_err!(self.cx.tcx.sess, span, E0301,
+                          "cannot mutably borrow in a pattern guard")
+            }
+            ImmBorrow | UniqueImmBorrow => {}
+        }
+    }
+    fn decl_without_init(&mut self, _: NodeId, _: Span) {}
+    fn mutate(&mut self, _: NodeId, span: Span, _: cmt, mode: MutateMode) {
+        match mode {
+            MutateMode::JustWrite | MutateMode::WriteAndRead => {
+                span_err!(self.cx.tcx.sess, span, E0302, "cannot assign in a pattern guard")
+            }
+            MutateMode::Init => {}
+        }
+    }
+}
+
+/// Forbids bindings in `@` patterns. This is necessary for memory safety,
+/// because of the way rvalues are handled in the borrow check. (See issue
+/// #14587.)
+fn check_legality_of_bindings_in_at_patterns(cx: &MatchCheckCtxt, pat: &Pat) {
+    AtBindingPatternVisitor { cx: cx, bindings_allowed: true }.visit_pat(pat);
+}
+
+struct AtBindingPatternVisitor<'a, 'b:'a, 'tcx:'b> {
+    cx: &'a MatchCheckCtxt<'b, 'tcx>,
+    bindings_allowed: bool
+}
+
+impl<'a, 'b, 'tcx, 'v> Visitor<'v> for AtBindingPatternVisitor<'a, 'b, 'tcx> {
+    fn visit_pat(&mut self, pat: &Pat) {
+        if !self.bindings_allowed && pat_is_binding(&self.cx.tcx.def_map.borrow(), pat) {
+            span_err!(self.cx.tcx.sess, pat.span, E0303,
+                                      "pattern bindings are not allowed \
+                                       after an `@`");
+        }
+
+        match pat.node {
+            PatKind::Ident(_, _, Some(_)) => {
+                let bindings_were_allowed = self.bindings_allowed;
+                self.bindings_allowed = false;
+                intravisit::walk_pat(self, pat);
+                self.bindings_allowed = bindings_were_allowed;
+            }
+            _ => intravisit::walk_pat(self, pat),
+        }
+    }
+}
diff --git a/src/librustc_const_eval/diagnostics.rs b/src/librustc_const_eval/diagnostics.rs
new file mode 100644 (file)
index 0000000..4f5176f
--- /dev/null
@@ -0,0 +1,545 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_snake_case)]
+
+// Error messages for EXXXX errors.
+// Each message should start and end with a new line, and be wrapped to 80 characters.
+// In vim you can `:set tw=80` and use `gq` to wrap paragraphs. Use `:set tw=0` to disable.
+register_long_diagnostics! {
+
+E0001: r##"
+This error suggests that the expression arm corresponding to the noted pattern
+will never be reached as for all possible values of the expression being
+matched, one of the preceding patterns will match.
+
+This means that perhaps some of the preceding patterns are too general, this
+one is too specific or the ordering is incorrect.
+
+For example, the following `match` block has too many arms:
+
+```compile_fail
+match foo {
+    Some(bar) => {/* ... */}
+    None => {/* ... */}
+    _ => {/* ... */} // All possible cases have already been handled
+}
+```
+
+`match` blocks have their patterns matched in order, so, for example, putting
+a wildcard arm above a more specific arm will make the latter arm irrelevant.
+
+Ensure the ordering of the match arm is correct and remove any superfluous
+arms.
+"##,
+
+E0002: r##"
+This error indicates that an empty match expression is invalid because the type
+it is matching on is non-empty (there exist values of this type). In safe code
+it is impossible to create an instance of an empty type, so empty match
+expressions are almost never desired. This error is typically fixed by adding
+one or more cases to the match expression.
+
+An example of an empty type is `enum Empty { }`. So, the following will work:
+
+```
+enum Empty {}
+
+fn foo(x: Empty) {
+    match x {
+        // empty
+    }
+}
+```
+
+However, this won't:
+
+```compile_fail
+enum Empty {}
+
+fn foo(x: Option<String>) {
+    match x {
+        // empty
+    }
+}
+```
+"##,
+
+
+E0003: r##"
+Not-a-Number (NaN) values cannot be compared for equality and hence can never
+match the input to a match expression. So, the following will not compile:
+
+```compile_fail
+const NAN: f32 = 0.0 / 0.0;
+
+let number = 0.1f32;
+
+match number {
+    NAN => { /* ... */ },
+    _ => {}
+}
+```
+
+To match against NaN values, you should instead use the `is_nan()` method in a
+guard, like so:
+
+```
+let number = 0.1f32;
+
+match number {
+    x if x.is_nan() => { /* ... */ }
+    _ => {}
+}
+```
+"##,
+
+
+E0004: r##"
+This error indicates that the compiler cannot guarantee a matching pattern for
+one or more possible inputs to a match expression. Guaranteed matches are
+required in order to assign values to match expressions, or alternatively,
+determine the flow of execution. Erroneous code example:
+
+```compile_fail
+enum Terminator {
+    HastaLaVistaBaby,
+    TalkToMyHand,
+}
+
+let x = Terminator::HastaLaVistaBaby;
+
+match x { // error: non-exhaustive patterns: `HastaLaVistaBaby` not covered
+    Terminator::TalkToMyHand => {}
+}
+```
+
+If you encounter this error you must alter your patterns so that every possible
+value of the input type is matched. For types with a small number of variants
+(like enums) you should probably cover all cases explicitly. Alternatively, the
+underscore `_` wildcard pattern can be added after all other patterns to match
+"anything else". Example:
+
+```
+enum Terminator {
+    HastaLaVistaBaby,
+    TalkToMyHand,
+}
+
+let x = Terminator::HastaLaVistaBaby;
+
+match x {
+    Terminator::TalkToMyHand => {}
+    Terminator::HastaLaVistaBaby => {}
+}
+
+// or:
+
+match x {
+    Terminator::TalkToMyHand => {}
+    _ => {}
+}
+```
+"##,
+
+E0005: r##"
+Patterns used to bind names must be irrefutable, that is, they must guarantee
+that a name will be extracted in all cases. Erroneous code example:
+
+```compile_fail
+let x = Some(1);
+let Some(y) = x;
+// error: refutable pattern in local binding: `None` not covered
+```
+
+If you encounter this error you probably need to use a `match` or `if let` to
+deal with the possibility of failure. Example:
+
+```compile_fail
+let x = Some(1);
+
+match x {
+    Some(y) => {
+        // do something
+    },
+    None => {}
+}
+
+// or:
+
+if let Some(y) = x {
+    // do something
+}
+```
+"##,
+
+E0007: r##"
+This error indicates that the bindings in a match arm would require a value to
+be moved into more than one location, thus violating unique ownership. Code
+like the following is invalid as it requires the entire `Option<String>` to be
+moved into a variable called `op_string` while simultaneously requiring the
+inner `String` to be moved into a variable called `s`.
+
+```compile_fail
+let x = Some("s".to_string());
+
+match x {
+    op_string @ Some(s) => {},
+    None => {},
+}
+```
+
+See also the error E0303.
+"##,
+
+E0008: r##"
+Names bound in match arms retain their type in pattern guards. As such, if a
+name is bound by move in a pattern, it should also be moved to wherever it is
+referenced in the pattern guard code. Doing so however would prevent the name
+from being available in the body of the match arm. Consider the following:
+
+```compile_fail
+match Some("hi".to_string()) {
+    Some(s) if s.len() == 0 => {}, // use s.
+    _ => {},
+}
+```
+
+The variable `s` has type `String`, and its use in the guard is as a variable of
+type `String`. The guard code effectively executes in a separate scope to the
+body of the arm, so the value would be moved into this anonymous scope and
+therefore become unavailable in the body of the arm. Although this example seems
+innocuous, the problem is most clear when considering functions that take their
+argument by value.
+
+```compile_fail
+match Some("hi".to_string()) {
+    Some(s) if { drop(s); false } => (),
+    Some(s) => {}, // use s.
+    _ => {},
+}
+```
+
+The value would be dropped in the guard then become unavailable not only in the
+body of that arm but also in all subsequent arms! The solution is to bind by
+reference when using guards or refactor the entire expression, perhaps by
+putting the condition inside the body of the arm.
+"##,
+
+E0009: r##"
+In a pattern, all values that don't implement the `Copy` trait have to be bound
+the same way. The goal here is to avoid binding simultaneously by-move and
+by-ref.
+
+This limitation may be removed in a future version of Rust.
+
+Erroneous code example:
+
+```compile_fail
+struct X { x: (), }
+
+let x = Some((X { x: () }, X { x: () }));
+match x {
+    Some((y, ref z)) => {},
+    None => panic!()
+}
+```
+
+You have two solutions:
+
+Solution #1: Bind the pattern's values the same way.
+
+```
+struct X { x: (), }
+
+let x = Some((X { x: () }, X { x: () }));
+match x {
+    Some((ref y, ref z)) => {},
+    // or Some((y, z)) => {}
+    None => panic!()
+}
+```
+
+Solution #2: Implement the `Copy` trait for the `X` structure.
+
+However, please keep in mind that the first solution should be preferred.
+
+```
+#[derive(Clone, Copy)]
+struct X { x: (), }
+
+let x = Some((X { x: () }, X { x: () }));
+match x {
+    Some((y, ref z)) => {},
+    None => panic!()
+}
+```
+"##,
+
+E0158: r##"
+`const` and `static` mean different things. A `const` is a compile-time
+constant, an alias for a literal value. This property means you can match it
+directly within a pattern.
+
+The `static` keyword, on the other hand, guarantees a fixed location in memory.
+This does not always mean that the value is constant. For example, a global
+mutex can be declared `static` as well.
+
+If you want to match against a `static`, consider using a guard instead:
+
+```
+static FORTY_TWO: i32 = 42;
+
+match Some(42) {
+    Some(x) if x == FORTY_TWO => {}
+    _ => {}
+}
+```
+"##,
+
+E0162: r##"
+An if-let pattern attempts to match the pattern, and enters the body if the
+match was successful. If the match is irrefutable (when it cannot fail to
+match), use a regular `let`-binding instead. For instance:
+
+```compile_fail
+struct Irrefutable(i32);
+let irr = Irrefutable(0);
+
+// This fails to compile because the match is irrefutable.
+if let Irrefutable(x) = irr {
+    // This body will always be executed.
+    foo(x);
+}
+```
+
+Try this instead:
+
+```ignore
+struct Irrefutable(i32);
+let irr = Irrefutable(0);
+
+let Irrefutable(x) = irr;
+foo(x);
+```
+"##,
+
+E0165: r##"
+A while-let pattern attempts to match the pattern, and enters the body if the
+match was successful. If the match is irrefutable (when it cannot fail to
+match), use a regular `let`-binding inside a `loop` instead. For instance:
+
+```compile_fail
+struct Irrefutable(i32);
+let irr = Irrefutable(0);
+
+// This fails to compile because the match is irrefutable.
+while let Irrefutable(x) = irr {
+    ...
+}
+
+Try this instead:
+
+```
+struct Irrefutable(i32);
+let irr = Irrefutable(0);
+
+loop {
+    let Irrefutable(x) = irr;
+    ...
+}
+```
+"##,
+
+E0170: r##"
+Enum variants are qualified by default. For example, given this type:
+
+```
+enum Method {
+    GET,
+    POST,
+}
+```
+
+You would match it using:
+
+```
+enum Method {
+    GET,
+    POST,
+}
+
+let m = Method::GET;
+
+match m {
+    Method::GET => {},
+    Method::POST => {},
+}
+```
+
+If you don't qualify the names, the code will bind new variables named "GET" and
+"POST" instead. This behavior is likely not what you want, so `rustc` warns when
+that happens.
+
+Qualified names are good practice, and most code works well with them. But if
+you prefer them unqualified, you can import the variants into scope:
+
+```ignore
+use Method::*;
+enum Method { GET, POST }
+```
+
+If you want others to be able to import variants from your module directly, use
+`pub use`:
+
+```ignore
+pub use Method::*;
+enum Method { GET, POST }
+```
+"##,
+
+
+E0297: r##"
+Patterns used to bind names must be irrefutable. That is, they must guarantee
+that a name will be extracted in all cases. Instead of pattern matching the
+loop variable, consider using a `match` or `if let` inside the loop body. For
+instance:
+
+```compile_fail
+let xs : Vec<Option<i32>> = vec!(Some(1), None);
+
+// This fails because `None` is not covered.
+for Some(x) in xs {
+    // ...
+}
+```
+
+Match inside the loop instead:
+
+```
+let xs : Vec<Option<i32>> = vec!(Some(1), None);
+
+for item in xs {
+    match item {
+        Some(x) => {},
+        None => {},
+    }
+}
+```
+
+Or use `if let`:
+
+```
+let xs : Vec<Option<i32>> = vec!(Some(1), None);
+
+for item in xs {
+    if let Some(x) = item {
+        // ...
+    }
+}
+```
+"##,
+
+E0301: r##"
+Mutable borrows are not allowed in pattern guards, because matching cannot have
+side effects. Side effects could alter the matched object or the environment
+on which the match depends in such a way, that the match would not be
+exhaustive. For instance, the following would not match any arm if mutable
+borrows were allowed:
+
+```compile_fail
+match Some(()) {
+    None => { },
+    option if option.take().is_none() => {
+        /* impossible, option is `Some` */
+    },
+    Some(_) => { } // When the previous match failed, the option became `None`.
+}
+```
+"##,
+
+E0302: r##"
+Assignments are not allowed in pattern guards, because matching cannot have
+side effects. Side effects could alter the matched object or the environment
+on which the match depends in such a way, that the match would not be
+exhaustive. For instance, the following would not match any arm if assignments
+were allowed:
+
+```compile_fail
+match Some(()) {
+    None => { },
+    option if { option = None; false } { },
+    Some(_) => { } // When the previous match failed, the option became `None`.
+}
+```
+"##,
+
+E0303: r##"
+In certain cases it is possible for sub-bindings to violate memory safety.
+Updates to the borrow checker in a future version of Rust may remove this
+restriction, but for now patterns must be rewritten without sub-bindings.
+
+```ignore
+// Before.
+match Some("hi".to_string()) {
+    ref op_string_ref @ Some(s) => {},
+    None => {},
+}
+
+// After.
+match Some("hi".to_string()) {
+    Some(ref s) => {
+        let op_string_ref = &Some(s);
+        // ...
+    },
+    None => {},
+}
+```
+
+The `op_string_ref` binding has type `&Option<&String>` in both cases.
+
+See also https://github.com/rust-lang/rust/issues/14587
+"##,
+
+E0306: r##"
+In an array literal `[x; N]`, `N` is the number of elements in the array. This
+must be an unsigned integer. Erroneous code example:
+
+```compile_fail
+let x = [0i32; true]; // error: expected positive integer for repeat count,
+                      //        found boolean
+```
+
+Working example:
+
+```
+let x = [0i32; 2];
+```
+"##,
+
+E0307: r##"
+The length of an array is part of its type. For this reason, this length must
+be a compile-time constant. Erroneous code example:
+
+```compile_fail
+    let len = 10;
+    let x = [0i32; len]; // error: expected constant integer for repeat count,
+                         //        found variable
+```
+"##,
+
+}
+
+
+register_diagnostics! {
+E0298, // mismatched types between arms
+E0299, // mismatched types between arms
+E0471, // constant evaluation error: ..
+}
diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs
new file mode 100644 (file)
index 0000000..45a90bf
--- /dev/null
@@ -0,0 +1,1237 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//#![allow(non_camel_case_types)]
+
+use rustc::middle::const_val::ConstVal::*;
+use rustc::middle::const_val::ConstVal;
+use self::ErrKind::*;
+use self::EvalHint::*;
+
+use rustc::hir::map as ast_map;
+use rustc::hir::map::blocks::FnLikeNode;
+use rustc::middle::cstore::{self, CrateStore, InlinedItem};
+use rustc::{infer, traits};
+use rustc::hir::def::Def;
+use rustc::hir::def_id::DefId;
+use rustc::hir::pat_util::def_to_path;
+use rustc::ty::{self, Ty, TyCtxt, subst};
+use rustc::ty::util::IntTypeExt;
+use rustc::traits::ProjectionMode;
+use rustc::middle::astconv_util::ast_ty_to_prim_ty;
+use rustc::util::nodemap::NodeMap;
+use rustc::lint;
+
+use graphviz::IntoCow;
+use syntax::ast;
+use rustc::hir::{Expr, PatKind};
+use rustc::hir;
+use rustc::hir::intravisit::FnKind;
+use syntax::codemap::Span;
+use syntax::ptr::P;
+use syntax::codemap;
+use syntax::attr::IntType;
+
+use std::borrow::Cow;
+use std::cmp::Ordering;
+use std::collections::hash_map::Entry::Vacant;
+
+use rustc_const_math::*;
+
+macro_rules! math {
+    ($e:expr, $op:expr) => {
+        match $op {
+            Ok(val) => val,
+            Err(e) => signal!($e, Math(e)),
+        }
+    }
+}
+
+fn lookup_variant_by_id<'a>(tcx: &'a ty::TyCtxt,
+                            enum_def: DefId,
+                            variant_def: DefId)
+                            -> Option<&'a Expr> {
+    fn variant_expr<'a>(variants: &'a [hir::Variant], id: ast::NodeId)
+                        -> Option<&'a Expr> {
+        for variant in variants {
+            if variant.node.data.id() == id {
+                return variant.node.disr_expr.as_ref().map(|e| &**e);
+            }
+        }
+        None
+    }
+
+    if let Some(enum_node_id) = tcx.map.as_local_node_id(enum_def) {
+        let variant_node_id = tcx.map.as_local_node_id(variant_def).unwrap();
+        match tcx.map.find(enum_node_id) {
+            None => None,
+            Some(ast_map::NodeItem(it)) => match it.node {
+                hir::ItemEnum(hir::EnumDef { ref variants }, _) => {
+                    variant_expr(variants, variant_node_id)
+                }
+                _ => None
+            },
+            Some(_) => None
+        }
+    } else {
+        None
+    }
+}
+
+/// * `def_id` is the id of the constant.
+/// * `substs` is the monomorphized substitutions for the expression.
+///
+/// `substs` is optional and is used for associated constants.
+/// This generally happens in late/trans const evaluation.
+pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
+                                        def_id: DefId,
+                                        substs: Option<subst::Substs<'tcx>>)
+                                        -> Option<(&'tcx Expr, Option<ty::Ty<'tcx>>)> {
+    if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
+        match tcx.map.find(node_id) {
+            None => None,
+            Some(ast_map::NodeItem(it)) => match it.node {
+                hir::ItemConst(ref ty, ref const_expr) => {
+                    Some((&const_expr, ast_ty_to_prim_ty(tcx, ty)))
+                }
+                _ => None
+            },
+            Some(ast_map::NodeTraitItem(ti)) => match ti.node {
+                hir::ConstTraitItem(_, _) => {
+                    if let Some(substs) = substs {
+                        // If we have a trait item and the substitutions for it,
+                        // `resolve_trait_associated_const` will select an impl
+                        // or the default.
+                        let trait_id = tcx.trait_of_item(def_id).unwrap();
+                        resolve_trait_associated_const(tcx, ti, trait_id, substs)
+                    } else {
+                        // Technically, without knowing anything about the
+                        // expression that generates the obligation, we could
+                        // still return the default if there is one. However,
+                        // it's safer to return `None` than to return some value
+                        // that may differ from what you would get from
+                        // correctly selecting an impl.
+                        None
+                    }
+                }
+                _ => None
+            },
+            Some(ast_map::NodeImplItem(ii)) => match ii.node {
+                hir::ImplItemKind::Const(ref ty, ref expr) => {
+                    Some((&expr, ast_ty_to_prim_ty(tcx, ty)))
+                }
+                _ => None
+            },
+            Some(_) => None
+        }
+    } else {
+        match tcx.extern_const_statics.borrow().get(&def_id) {
+            Some(&None) => return None,
+            Some(&Some((expr_id, ty))) => {
+                return Some((tcx.map.expect_expr(expr_id), ty));
+            }
+            None => {}
+        }
+        let mut used_substs = false;
+        let expr_ty = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) {
+            cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => match item.node {
+                hir::ItemConst(ref ty, ref const_expr) => {
+                    Some((&**const_expr, ast_ty_to_prim_ty(tcx, ty)))
+                },
+                _ => None
+            },
+            cstore::FoundAst::Found(&InlinedItem::TraitItem(trait_id, ref ti)) => match ti.node {
+                hir::ConstTraitItem(_, _) => {
+                    used_substs = true;
+                    if let Some(substs) = substs {
+                        // As mentioned in the comments above for in-crate
+                        // constants, we only try to find the expression for
+                        // a trait-associated const if the caller gives us
+                        // the substitutions for the reference to it.
+                        resolve_trait_associated_const(tcx, ti, trait_id, substs)
+                    } else {
+                        None
+                    }
+                }
+                _ => None
+            },
+            cstore::FoundAst::Found(&InlinedItem::ImplItem(_, ref ii)) => match ii.node {
+                hir::ImplItemKind::Const(ref ty, ref expr) => {
+                    Some((&**expr, ast_ty_to_prim_ty(tcx, ty)))
+                },
+                _ => None
+            },
+            _ => None
+        };
+        // If we used the substitutions, particularly to choose an impl
+        // of a trait-associated const, don't cache that, because the next
+        // lookup with the same def_id may yield a different result.
+        if !used_substs {
+            tcx.extern_const_statics
+               .borrow_mut()
+               .insert(def_id, expr_ty.map(|(e, t)| (e.id, t)));
+        }
+        expr_ty
+    }
+}
+
+fn inline_const_fn_from_external_crate(tcx: &TyCtxt, def_id: DefId)
+                                       -> Option<ast::NodeId> {
+    match tcx.extern_const_fns.borrow().get(&def_id) {
+        Some(&ast::DUMMY_NODE_ID) => return None,
+        Some(&fn_id) => return Some(fn_id),
+        None => {}
+    }
+
+    if !tcx.sess.cstore.is_const_fn(def_id) {
+        tcx.extern_const_fns.borrow_mut().insert(def_id, ast::DUMMY_NODE_ID);
+        return None;
+    }
+
+    let fn_id = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) {
+        cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => Some(item.id),
+        cstore::FoundAst::Found(&InlinedItem::ImplItem(_, ref item)) => Some(item.id),
+        _ => None
+    };
+    tcx.extern_const_fns.borrow_mut().insert(def_id,
+                                             fn_id.unwrap_or(ast::DUMMY_NODE_ID));
+    fn_id
+}
+
+pub fn lookup_const_fn_by_id<'tcx>(tcx: &TyCtxt<'tcx>, def_id: DefId)
+                                   -> Option<FnLikeNode<'tcx>>
+{
+    let fn_id = if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
+        node_id
+    } else {
+        if let Some(fn_id) = inline_const_fn_from_external_crate(tcx, def_id) {
+            fn_id
+        } else {
+            return None;
+        }
+    };
+
+    let fn_like = match FnLikeNode::from_node(tcx.map.get(fn_id)) {
+        Some(fn_like) => fn_like,
+        None => return None
+    };
+
+    match fn_like.kind() {
+        FnKind::ItemFn(_, _, _, hir::Constness::Const, _, _, _) => {
+            Some(fn_like)
+        }
+        FnKind::Method(_, m, _, _) => {
+            if m.constness == hir::Constness::Const {
+                Some(fn_like)
+            } else {
+                None
+            }
+        }
+        _ => None
+    }
+}
+
+pub fn const_expr_to_pat(tcx: &ty::TyCtxt, expr: &Expr, pat_id: ast::NodeId, span: Span)
+                         -> Result<P<hir::Pat>, DefId> {
+    let pat_ty = tcx.expr_ty(expr);
+    debug!("expr={:?} pat_ty={:?} pat_id={}", expr, pat_ty, pat_id);
+    match pat_ty.sty {
+        ty::TyFloat(_) => {
+            tcx.sess.add_lint(
+                lint::builtin::ILLEGAL_FLOATING_POINT_CONSTANT_PATTERN,
+                pat_id,
+                span,
+                format!("floating point constants cannot be used in patterns"));
+        }
+        ty::TyEnum(adt_def, _) |
+        ty::TyStruct(adt_def, _) => {
+            if !tcx.has_attr(adt_def.did, "structural_match") {
+                tcx.sess.add_lint(
+                    lint::builtin::ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN,
+                    pat_id,
+                    span,
+                    format!("to use a constant of type `{}` \
+                             in a pattern, \
+                             `{}` must be annotated with `#[derive(PartialEq, Eq)]`",
+                            tcx.item_path_str(adt_def.did),
+                            tcx.item_path_str(adt_def.did)));
+            }
+        }
+        _ => { }
+    }
+    let pat = match expr.node {
+        hir::ExprTup(ref exprs) =>
+            PatKind::Tup(try!(exprs.iter()
+                                  .map(|expr| const_expr_to_pat(tcx, &expr,
+                                                                pat_id, span))
+                                  .collect())),
+
+        hir::ExprCall(ref callee, ref args) => {
+            let def = *tcx.def_map.borrow().get(&callee.id).unwrap();
+            if let Vacant(entry) = tcx.def_map.borrow_mut().entry(expr.id) {
+               entry.insert(def);
+            }
+            let path = match def.full_def() {
+                Def::Struct(def_id) => def_to_path(tcx, def_id),
+                Def::Variant(_, variant_did) => def_to_path(tcx, variant_did),
+                Def::Fn(..) => return Ok(P(hir::Pat {
+                    id: expr.id,
+                    node: PatKind::Lit(P(expr.clone())),
+                    span: span,
+                })),
+                _ => bug!()
+            };
+            let pats = try!(args.iter()
+                                .map(|expr| const_expr_to_pat(tcx, &**expr,
+                                                              pat_id, span))
+                                .collect());
+            PatKind::TupleStruct(path, Some(pats))
+        }
+
+        hir::ExprStruct(ref path, ref fields, None) => {
+            let field_pats =
+                try!(fields.iter()
+                           .map(|field| Ok(codemap::Spanned {
+                               span: codemap::DUMMY_SP,
+                               node: hir::FieldPat {
+                                   name: field.name.node,
+                                   pat: try!(const_expr_to_pat(tcx, &field.expr,
+                                                               pat_id, span)),
+                                   is_shorthand: false,
+                               },
+                           }))
+                           .collect());
+            PatKind::Struct(path.clone(), field_pats, false)
+        }
+
+        hir::ExprVec(ref exprs) => {
+            let pats = try!(exprs.iter()
+                                 .map(|expr| const_expr_to_pat(tcx, &expr,
+                                                               pat_id, span))
+                                 .collect());
+            PatKind::Vec(pats, None, hir::HirVec::new())
+        }
+
+        hir::ExprPath(_, ref path) => {
+            let opt_def = tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def());
+            match opt_def {
+                Some(Def::Struct(..)) | Some(Def::Variant(..)) =>
+                    PatKind::Path(path.clone()),
+                Some(Def::Const(def_id)) |
+                Some(Def::AssociatedConst(def_id)) => {
+                    let substs = Some(tcx.node_id_item_substs(expr.id).substs);
+                    let (expr, _ty) = lookup_const_by_id(tcx, def_id, substs).unwrap();
+                    return const_expr_to_pat(tcx, expr, pat_id, span);
+                },
+                _ => bug!(),
+            }
+        }
+
+        _ => PatKind::Lit(P(expr.clone()))
+    };
+    Ok(P(hir::Pat { id: expr.id, node: pat, span: span }))
+}
+
+pub fn eval_const_expr(tcx: &TyCtxt, e: &Expr) -> ConstVal {
+    match eval_const_expr_partial(tcx, e, ExprTypeChecked, None) {
+        Ok(r) => r,
+        // non-const path still needs to be a fatal error, because enums are funky
+        Err(s) => {
+            match s.kind {
+                NonConstPath |
+                UnimplementedConstVal(_) => tcx.sess.span_fatal(s.span, &s.description()),
+                _ => {
+                    tcx.sess.span_err(s.span, &s.description());
+                    Dummy
+                }
+            }
+        },
+    }
+}
+
+pub type FnArgMap<'a> = Option<&'a NodeMap<ConstVal>>;
+
+#[derive(Clone)]
+pub struct ConstEvalErr {
+    pub span: Span,
+    pub kind: ErrKind,
+}
+
+#[derive(Clone, PartialEq)]
+pub enum ErrKind {
+    CannotCast,
+    CannotCastTo(&'static str),
+    InvalidOpForInts(hir::BinOp_),
+    InvalidOpForBools(hir::BinOp_),
+    InvalidOpForFloats(hir::BinOp_),
+    InvalidOpForIntUint(hir::BinOp_),
+    InvalidOpForUintInt(hir::BinOp_),
+    NegateOn(ConstVal),
+    NotOn(ConstVal),
+    CallOn(ConstVal),
+
+    NegateWithOverflow(i64),
+    AddiWithOverflow(i64, i64),
+    SubiWithOverflow(i64, i64),
+    MuliWithOverflow(i64, i64),
+    AdduWithOverflow(u64, u64),
+    SubuWithOverflow(u64, u64),
+    MuluWithOverflow(u64, u64),
+    DivideByZero,
+    DivideWithOverflow,
+    ModuloByZero,
+    ModuloWithOverflow,
+    ShiftLeftWithOverflow,
+    ShiftRightWithOverflow,
+    MissingStructField,
+    NonConstPath,
+    UnimplementedConstVal(&'static str),
+    UnresolvedPath,
+    ExpectedConstTuple,
+    ExpectedConstStruct,
+    TupleIndexOutOfBounds,
+    IndexedNonVec,
+    IndexNegative,
+    IndexNotInt,
+    IndexOutOfBounds,
+    RepeatCountNotNatural,
+    RepeatCountNotInt,
+
+    MiscBinaryOp,
+    MiscCatchAll,
+
+    IndexOpFeatureGated,
+    Math(ConstMathErr),
+
+    IntermediateUnsignedNegative,
+    /// Expected, Got
+    TypeMismatch(String, ConstInt),
+    BadType(ConstVal),
+}
+
+impl From<ConstMathErr> for ErrKind {
+    fn from(err: ConstMathErr) -> ErrKind {
+        Math(err)
+    }
+}
+
+impl ConstEvalErr {
+    pub fn description(&self) -> Cow<str> {
+        use self::ErrKind::*;
+
+        match self.kind {
+            CannotCast => "can't cast this type".into_cow(),
+            CannotCastTo(s) => format!("can't cast this type to {}", s).into_cow(),
+            InvalidOpForInts(_) =>  "can't do this op on integrals".into_cow(),
+            InvalidOpForBools(_) =>  "can't do this op on bools".into_cow(),
+            InvalidOpForFloats(_) => "can't do this op on floats".into_cow(),
+            InvalidOpForIntUint(..) => "can't do this op on an isize and usize".into_cow(),
+            InvalidOpForUintInt(..) => "can't do this op on a usize and isize".into_cow(),
+            NegateOn(ref const_val) => format!("negate on {}", const_val.description()).into_cow(),
+            NotOn(ref const_val) => format!("not on {}", const_val.description()).into_cow(),
+            CallOn(ref const_val) => format!("call on {}", const_val.description()).into_cow(),
+
+            NegateWithOverflow(..) => "attempted to negate with overflow".into_cow(),
+            AddiWithOverflow(..) => "attempted to add with overflow".into_cow(),
+            SubiWithOverflow(..) => "attempted to sub with overflow".into_cow(),
+            MuliWithOverflow(..) => "attempted to mul with overflow".into_cow(),
+            AdduWithOverflow(..) => "attempted to add with overflow".into_cow(),
+            SubuWithOverflow(..) => "attempted to sub with overflow".into_cow(),
+            MuluWithOverflow(..) => "attempted to mul with overflow".into_cow(),
+            DivideByZero         => "attempted to divide by zero".into_cow(),
+            DivideWithOverflow   => "attempted to divide with overflow".into_cow(),
+            ModuloByZero         => "attempted remainder with a divisor of zero".into_cow(),
+            ModuloWithOverflow   => "attempted remainder with overflow".into_cow(),
+            ShiftLeftWithOverflow => "attempted left shift with overflow".into_cow(),
+            ShiftRightWithOverflow => "attempted right shift with overflow".into_cow(),
+            MissingStructField  => "nonexistent struct field".into_cow(),
+            NonConstPath        => "non-constant path in constant expression".into_cow(),
+            UnimplementedConstVal(what) =>
+                format!("unimplemented constant expression: {}", what).into_cow(),
+            UnresolvedPath => "unresolved path in constant expression".into_cow(),
+            ExpectedConstTuple => "expected constant tuple".into_cow(),
+            ExpectedConstStruct => "expected constant struct".into_cow(),
+            TupleIndexOutOfBounds => "tuple index out of bounds".into_cow(),
+            IndexedNonVec => "indexing is only supported for arrays".into_cow(),
+            IndexNegative => "indices must be non-negative integers".into_cow(),
+            IndexNotInt => "indices must be integers".into_cow(),
+            IndexOutOfBounds => "array index out of bounds".into_cow(),
+            RepeatCountNotNatural => "repeat count must be a natural number".into_cow(),
+            RepeatCountNotInt => "repeat count must be integers".into_cow(),
+
+            MiscBinaryOp => "bad operands for binary".into_cow(),
+            MiscCatchAll => "unsupported constant expr".into_cow(),
+            IndexOpFeatureGated => "the index operation on const values is unstable".into_cow(),
+            Math(ref err) => err.description().into_cow(),
+
+            IntermediateUnsignedNegative => "during the computation of an unsigned a negative \
+                                             number was encountered. This is most likely a bug in\
+                                             the constant evaluator".into_cow(),
+
+            TypeMismatch(ref expected, ref got) => {
+                format!("mismatched types: expected `{}`, found `{}`",
+                        expected, got.description()).into_cow()
+            },
+            BadType(ref i) => format!("value of wrong type: {:?}", i).into_cow(),
+        }
+    }
+}
+
+pub type EvalResult = Result<ConstVal, ConstEvalErr>;
+pub type CastResult = Result<ConstVal, ErrKind>;
+
+// FIXME: Long-term, this enum should go away: trying to evaluate
+// an expression which hasn't been type-checked is a recipe for
+// disaster.  That said, it's not clear how to fix ast_ty_to_ty
+// to avoid the ordering issue.
+
+/// Hint to determine how to evaluate constant expressions which
+/// might not be type-checked.
+#[derive(Copy, Clone, Debug)]
+pub enum EvalHint<'tcx> {
+    /// We have a type-checked expression.
+    ExprTypeChecked,
+    /// We have an expression which hasn't been type-checked, but we have
+    /// an idea of what the type will be because of the context. For example,
+    /// the length of an array is always `usize`. (This is referred to as
+    /// a hint because it isn't guaranteed to be consistent with what
+    /// type-checking would compute.)
+    UncheckedExprHint(Ty<'tcx>),
+    /// We have an expression which has not yet been type-checked, and
+    /// and we have no clue what the type will be.
+    UncheckedExprNoHint,
+}
+
+impl<'tcx> EvalHint<'tcx> {
+    fn erase_hint(&self) -> EvalHint<'tcx> {
+        match *self {
+            ExprTypeChecked => ExprTypeChecked,
+            UncheckedExprHint(_) | UncheckedExprNoHint => UncheckedExprNoHint,
+        }
+    }
+    fn checked_or(&self, ty: Ty<'tcx>) -> EvalHint<'tcx> {
+        match *self {
+            ExprTypeChecked => ExprTypeChecked,
+            _ => UncheckedExprHint(ty),
+        }
+    }
+}
+
+macro_rules! signal {
+    ($e:expr, $exn:expr) => {
+        return Err(ConstEvalErr { span: $e.span, kind: $exn })
+    }
+}
+
+/// Evaluate a constant expression in a context where the expression isn't
+/// guaranteed to be evaluatable. `ty_hint` is usually ExprTypeChecked,
+/// but a few places need to evaluate constants during type-checking, like
+/// computing the length of an array. (See also the FIXME above EvalHint.)
+pub fn eval_const_expr_partial<'tcx>(tcx: &TyCtxt<'tcx>,
+                                     e: &Expr,
+                                     ty_hint: EvalHint<'tcx>,
+                                     fn_args: FnArgMap) -> EvalResult {
+    // Try to compute the type of the expression based on the EvalHint.
+    // (See also the definition of EvalHint, and the FIXME above EvalHint.)
+    let ety = match ty_hint {
+        ExprTypeChecked => {
+            // After type-checking, expr_ty is guaranteed to succeed.
+            Some(tcx.expr_ty(e))
+        }
+        UncheckedExprHint(ty) => {
+            // Use the type hint; it's not guaranteed to be right, but it's
+            // usually good enough.
+            Some(ty)
+        }
+        UncheckedExprNoHint => {
+            // This expression might not be type-checked, and we have no hint.
+            // Try to query the context for a type anyway; we might get lucky
+            // (for example, if the expression was imported from another crate).
+            tcx.expr_ty_opt(e)
+        }
+    };
+    let result = match e.node {
+      hir::ExprUnary(hir::UnNeg, ref inner) => {
+        // unary neg literals already got their sign during creation
+        if let hir::ExprLit(ref lit) = inner.node {
+            use syntax::ast::*;
+            use syntax::ast::LitIntType::*;
+            const I8_OVERFLOW: u64 = ::std::i8::MAX as u64 + 1;
+            const I16_OVERFLOW: u64 = ::std::i16::MAX as u64 + 1;
+            const I32_OVERFLOW: u64 = ::std::i32::MAX as u64 + 1;
+            const I64_OVERFLOW: u64 = ::std::i64::MAX as u64 + 1;
+            match (&lit.node, ety.map(|t| &t.sty)) {
+                (&LitKind::Int(I8_OVERFLOW, Unsuffixed), Some(&ty::TyInt(IntTy::I8))) |
+                (&LitKind::Int(I8_OVERFLOW, Signed(IntTy::I8)), _) => {
+                    return Ok(Integral(I8(::std::i8::MIN)))
+                },
+                (&LitKind::Int(I16_OVERFLOW, Unsuffixed), Some(&ty::TyInt(IntTy::I16))) |
+                (&LitKind::Int(I16_OVERFLOW, Signed(IntTy::I16)), _) => {
+                    return Ok(Integral(I16(::std::i16::MIN)))
+                },
+                (&LitKind::Int(I32_OVERFLOW, Unsuffixed), Some(&ty::TyInt(IntTy::I32))) |
+                (&LitKind::Int(I32_OVERFLOW, Signed(IntTy::I32)), _) => {
+                    return Ok(Integral(I32(::std::i32::MIN)))
+                },
+                (&LitKind::Int(I64_OVERFLOW, Unsuffixed), Some(&ty::TyInt(IntTy::I64))) |
+                (&LitKind::Int(I64_OVERFLOW, Signed(IntTy::I64)), _) => {
+                    return Ok(Integral(I64(::std::i64::MIN)))
+                },
+                (&LitKind::Int(n, Unsuffixed), Some(&ty::TyInt(IntTy::Is))) |
+                (&LitKind::Int(n, Signed(IntTy::Is)), _) => {
+                    match tcx.sess.target.int_type {
+                        IntTy::I32 => if n == I32_OVERFLOW {
+                            return Ok(Integral(Isize(Is32(::std::i32::MIN))));
+                        },
+                        IntTy::I64 => if n == I64_OVERFLOW {
+                            return Ok(Integral(Isize(Is64(::std::i64::MIN))));
+                        },
+                        _ => bug!(),
+                    }
+                },
+                _ => {},
+            }
+        }
+        match eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)? {
+          Float(f) => Float(-f),
+          Integral(i) => Integral(math!(e, -i)),
+          const_val => signal!(e, NegateOn(const_val)),
+        }
+      }
+      hir::ExprUnary(hir::UnNot, ref inner) => {
+        match eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)? {
+          Integral(i) => Integral(math!(e, !i)),
+          Bool(b) => Bool(!b),
+          const_val => signal!(e, NotOn(const_val)),
+        }
+      }
+      hir::ExprUnary(hir::UnDeref, _) => signal!(e, UnimplementedConstVal("deref operation")),
+      hir::ExprBinary(op, ref a, ref b) => {
+        let b_ty = match op.node {
+            hir::BiShl | hir::BiShr => ty_hint.erase_hint(),
+            _ => ty_hint
+        };
+        // technically, if we don't have type hints, but integral eval
+        // gives us a type through a type-suffix, cast or const def type
+        // we need to re-eval the other value of the BinOp if it was
+        // not inferred
+        match (eval_const_expr_partial(tcx, &a, ty_hint, fn_args)?,
+               eval_const_expr_partial(tcx, &b, b_ty, fn_args)?) {
+          (Float(a), Float(b)) => {
+            match op.node {
+              hir::BiAdd => Float(a + b),
+              hir::BiSub => Float(a - b),
+              hir::BiMul => Float(a * b),
+              hir::BiDiv => Float(a / b),
+              hir::BiRem => Float(a % b),
+              hir::BiEq => Bool(a == b),
+              hir::BiLt => Bool(a < b),
+              hir::BiLe => Bool(a <= b),
+              hir::BiNe => Bool(a != b),
+              hir::BiGe => Bool(a >= b),
+              hir::BiGt => Bool(a > b),
+              _ => signal!(e, InvalidOpForFloats(op.node)),
+            }
+          }
+          (Integral(a), Integral(b)) => {
+            use std::cmp::Ordering::*;
+            match op.node {
+              hir::BiAdd => Integral(math!(e, a + b)),
+              hir::BiSub => Integral(math!(e, a - b)),
+              hir::BiMul => Integral(math!(e, a * b)),
+              hir::BiDiv => Integral(math!(e, a / b)),
+              hir::BiRem => Integral(math!(e, a % b)),
+              hir::BiBitAnd => Integral(math!(e, a & b)),
+              hir::BiBitOr => Integral(math!(e, a | b)),
+              hir::BiBitXor => Integral(math!(e, a ^ b)),
+              hir::BiShl => Integral(math!(e, a << b)),
+              hir::BiShr => Integral(math!(e, a >> b)),
+              hir::BiEq => Bool(math!(e, a.try_cmp(b)) == Equal),
+              hir::BiLt => Bool(math!(e, a.try_cmp(b)) == Less),
+              hir::BiLe => Bool(math!(e, a.try_cmp(b)) != Greater),
+              hir::BiNe => Bool(math!(e, a.try_cmp(b)) != Equal),
+              hir::BiGe => Bool(math!(e, a.try_cmp(b)) != Less),
+              hir::BiGt => Bool(math!(e, a.try_cmp(b)) == Greater),
+              _ => signal!(e, InvalidOpForInts(op.node)),
+            }
+          }
+          (Bool(a), Bool(b)) => {
+            Bool(match op.node {
+              hir::BiAnd => a && b,
+              hir::BiOr => a || b,
+              hir::BiBitXor => a ^ b,
+              hir::BiBitAnd => a & b,
+              hir::BiBitOr => a | b,
+              hir::BiEq => a == b,
+              hir::BiNe => a != b,
+              _ => signal!(e, InvalidOpForBools(op.node)),
+             })
+          }
+
+          _ => signal!(e, MiscBinaryOp),
+        }
+      }
+      hir::ExprCast(ref base, ref target_ty) => {
+        let ety = ast_ty_to_prim_ty(tcx, &target_ty).or_else(|| ety)
+                .unwrap_or_else(|| {
+                    tcx.sess.span_fatal(target_ty.span,
+                                        "target type not found for const cast")
+                });
+
+        let base_hint = if let ExprTypeChecked = ty_hint {
+            ExprTypeChecked
+        } else {
+            match tcx.expr_ty_opt(&base) {
+                Some(t) => UncheckedExprHint(t),
+                None => ty_hint
+            }
+        };
+
+        let val = match eval_const_expr_partial(tcx, &base, base_hint, fn_args) {
+            Ok(val) => val,
+            Err(ConstEvalErr { kind: TypeMismatch(_, val), .. }) => {
+                // Something like `5i8 as usize` doesn't need a type hint for the base
+                // instead take the type hint from the inner value
+                let hint = match val.int_type() {
+                    Some(IntType::UnsignedInt(ty)) => ty_hint.checked_or(tcx.mk_mach_uint(ty)),
+                    Some(IntType::SignedInt(ty)) => ty_hint.checked_or(tcx.mk_mach_int(ty)),
+                    // we had a type hint, so we can't have an unknown type
+                    None => bug!(),
+                };
+                eval_const_expr_partial(tcx, &base, hint, fn_args)?
+            },
+            Err(e) => return Err(e),
+        };
+        match cast_const(tcx, val, ety) {
+            Ok(val) => val,
+            Err(kind) => return Err(ConstEvalErr { span: e.span, kind: kind }),
+        }
+      }
+      hir::ExprPath(..) => {
+          let opt_def = if let Some(def) = tcx.def_map.borrow().get(&e.id) {
+              // After type-checking, def_map contains definition of the
+              // item referred to by the path. During type-checking, it
+              // can contain the raw output of path resolution, which
+              // might be a partially resolved path.
+              // FIXME: There's probably a better way to make sure we don't
+              // panic here.
+              if def.depth != 0 {
+                  signal!(e, UnresolvedPath);
+              }
+              def.full_def()
+          } else {
+              signal!(e, NonConstPath);
+          };
+          match opt_def {
+              Def::Const(def_id) |
+              Def::AssociatedConst(def_id) => {
+                  let substs = if let ExprTypeChecked = ty_hint {
+                      Some(tcx.node_id_item_substs(e.id).substs)
+                  } else {
+                      None
+                  };
+                  if let Some((e, ty)) = lookup_const_by_id(tcx, def_id, substs) {
+                      let item_hint = match ty {
+                          Some(ty) => ty_hint.checked_or(ty),
+                          None => ty_hint,
+                      };
+                      eval_const_expr_partial(tcx, e, item_hint, None)?
+                  } else {
+                      signal!(e, NonConstPath);
+                  }
+              },
+              Def::Variant(enum_def, variant_def) => {
+                  if let Some(const_expr) = lookup_variant_by_id(tcx, enum_def, variant_def) {
+                      eval_const_expr_partial(tcx, const_expr, ty_hint, None)?
+                  } else {
+                      signal!(e, UnimplementedConstVal("enum variants"));
+                  }
+              }
+              Def::Struct(..) => {
+                  ConstVal::Struct(e.id)
+              }
+              Def::Local(_, id) => {
+                  debug!("Def::Local({:?}): {:?}", id, fn_args);
+                  if let Some(val) = fn_args.and_then(|args| args.get(&id)) {
+                      val.clone()
+                  } else {
+                      signal!(e, NonConstPath);
+                  }
+              },
+              Def::Method(id) | Def::Fn(id) => Function(id),
+              _ => signal!(e, NonConstPath),
+          }
+      }
+      hir::ExprCall(ref callee, ref args) => {
+          let sub_ty_hint = ty_hint.erase_hint();
+          let callee_val = eval_const_expr_partial(tcx, callee, sub_ty_hint, fn_args)?;
+          let did = match callee_val {
+              Function(did) => did,
+              Struct(_) => signal!(e, UnimplementedConstVal("tuple struct constructors")),
+              callee => signal!(e, CallOn(callee)),
+          };
+          let (decl, result) = if let Some(fn_like) = lookup_const_fn_by_id(tcx, did) {
+              (fn_like.decl(), &fn_like.body().expr)
+          } else {
+              signal!(e, NonConstPath)
+          };
+          let result = result.as_ref().expect("const fn has no result expression");
+          assert_eq!(decl.inputs.len(), args.len());
+
+          let mut call_args = NodeMap();
+          for (arg, arg_expr) in decl.inputs.iter().zip(args.iter()) {
+              let arg_hint = ty_hint.erase_hint();
+              let arg_val = eval_const_expr_partial(
+                  tcx,
+                  arg_expr,
+                  arg_hint,
+                  fn_args
+              )?;
+              debug!("const call arg: {:?}", arg);
+              let old = call_args.insert(arg.pat.id, arg_val);
+              assert!(old.is_none());
+          }
+          debug!("const call({:?})", call_args);
+          eval_const_expr_partial(tcx, &result, ty_hint, Some(&call_args))?
+      },
+      hir::ExprLit(ref lit) => lit_to_const(&lit.node, tcx, ety, lit.span)?,
+      hir::ExprBlock(ref block) => {
+        match block.expr {
+            Some(ref expr) => eval_const_expr_partial(tcx, &expr, ty_hint, fn_args)?,
+            None => signal!(e, UnimplementedConstVal("empty block")),
+        }
+      }
+      hir::ExprType(ref e, _) => eval_const_expr_partial(tcx, &e, ty_hint, fn_args)?,
+      hir::ExprTup(_) => Tuple(e.id),
+      hir::ExprStruct(..) => Struct(e.id),
+      hir::ExprIndex(ref arr, ref idx) => {
+        if !tcx.sess.features.borrow().const_indexing {
+            signal!(e, IndexOpFeatureGated);
+        }
+        let arr_hint = ty_hint.erase_hint();
+        let arr = eval_const_expr_partial(tcx, arr, arr_hint, fn_args)?;
+        let idx_hint = ty_hint.checked_or(tcx.types.usize);
+        let idx = match eval_const_expr_partial(tcx, idx, idx_hint, fn_args)? {
+            Integral(Usize(i)) => i.as_u64(tcx.sess.target.uint_type),
+            Integral(_) => bug!(),
+            _ => signal!(idx, IndexNotInt),
+        };
+        assert_eq!(idx as usize as u64, idx);
+        match arr {
+            Array(_, n) if idx >= n => signal!(e, IndexOutOfBounds),
+            Array(v, n) => if let hir::ExprVec(ref v) = tcx.map.expect_expr(v).node {
+                assert_eq!(n as usize as u64, n);
+                eval_const_expr_partial(tcx, &v[idx as usize], ty_hint, fn_args)?
+            } else {
+                bug!()
+            },
+
+            Repeat(_, n) if idx >= n => signal!(e, IndexOutOfBounds),
+            Repeat(elem, _) => eval_const_expr_partial(
+                tcx,
+                &tcx.map.expect_expr(elem),
+                ty_hint,
+                fn_args,
+            )?,
+
+            ByteStr(ref data) if idx >= data.len() as u64 => signal!(e, IndexOutOfBounds),
+            ByteStr(data) => {
+                Integral(U8(data[idx as usize]))
+            },
+
+            Str(ref s) if idx as usize >= s.len() => signal!(e, IndexOutOfBounds),
+            // FIXME: return a const char
+            Str(_) => signal!(e, UnimplementedConstVal("indexing into str")),
+            _ => signal!(e, IndexedNonVec),
+        }
+      }
+      hir::ExprVec(ref v) => Array(e.id, v.len() as u64),
+      hir::ExprRepeat(_, ref n) => {
+          let len_hint = ty_hint.checked_or(tcx.types.usize);
+          Repeat(
+              e.id,
+              match eval_const_expr_partial(tcx, &n, len_hint, fn_args)? {
+                  Integral(Usize(i)) => i.as_u64(tcx.sess.target.uint_type),
+                  Integral(_) => signal!(e, RepeatCountNotNatural),
+                  _ => signal!(e, RepeatCountNotInt),
+              },
+          )
+      },
+      hir::ExprTupField(ref base, index) => {
+        let base_hint = ty_hint.erase_hint();
+        let c = eval_const_expr_partial(tcx, base, base_hint, fn_args)?;
+        if let Tuple(tup_id) = c {
+            if let hir::ExprTup(ref fields) = tcx.map.expect_expr(tup_id).node {
+                if index.node < fields.len() {
+                    eval_const_expr_partial(tcx, &fields[index.node], ty_hint, fn_args)?
+                } else {
+                    signal!(e, TupleIndexOutOfBounds);
+                }
+            } else {
+                bug!()
+            }
+        } else {
+            signal!(base, ExpectedConstTuple);
+        }
+      }
+      hir::ExprField(ref base, field_name) => {
+        let base_hint = ty_hint.erase_hint();
+        // Get the base expression if it is a struct and it is constant
+        let c = eval_const_expr_partial(tcx, base, base_hint, fn_args)?;
+        if let Struct(struct_id) = c {
+            if let hir::ExprStruct(_, ref fields, _) = tcx.map.expect_expr(struct_id).node {
+                // Check that the given field exists and evaluate it
+                // if the idents are compared run-pass/issue-19244 fails
+                if let Some(f) = fields.iter().find(|f| f.name.node
+                                                     == field_name.node) {
+                    eval_const_expr_partial(tcx, &f.expr, ty_hint, fn_args)?
+                } else {
+                    signal!(e, MissingStructField);
+                }
+            } else {
+                bug!()
+            }
+        } else {
+            signal!(base, ExpectedConstStruct);
+        }
+      }
+      hir::ExprAddrOf(..) => signal!(e, UnimplementedConstVal("address operator")),
+      _ => signal!(e, MiscCatchAll)
+    };
+
+    match (ety.map(|t| &t.sty), result) {
+        (Some(ref ty_hint), Integral(i)) => Ok(Integral(infer(i, tcx, ty_hint, e.span)?)),
+        (_, result) => Ok(result),
+    }
+}
+
+fn infer<'tcx>(
+    i: ConstInt,
+    tcx: &TyCtxt<'tcx>,
+    ty_hint: &ty::TypeVariants<'tcx>,
+    span: Span
+) -> Result<ConstInt, ConstEvalErr> {
+    use syntax::ast::*;
+
+    let err = |e| ConstEvalErr {
+        span: span,
+        kind: e,
+    };
+
+    match (ty_hint, i) {
+        (&ty::TyInt(IntTy::I8), result @ I8(_)) => Ok(result),
+        (&ty::TyInt(IntTy::I16), result @ I16(_)) => Ok(result),
+        (&ty::TyInt(IntTy::I32), result @ I32(_)) => Ok(result),
+        (&ty::TyInt(IntTy::I64), result @ I64(_)) => Ok(result),
+        (&ty::TyInt(IntTy::Is), result @ Isize(_)) => Ok(result),
+
+        (&ty::TyUint(UintTy::U8), result @ U8(_)) => Ok(result),
+        (&ty::TyUint(UintTy::U16), result @ U16(_)) => Ok(result),
+        (&ty::TyUint(UintTy::U32), result @ U32(_)) => Ok(result),
+        (&ty::TyUint(UintTy::U64), result @ U64(_)) => Ok(result),
+        (&ty::TyUint(UintTy::Us), result @ Usize(_)) => Ok(result),
+
+        (&ty::TyInt(IntTy::I8), Infer(i)) => Ok(I8(i as i64 as i8)),
+        (&ty::TyInt(IntTy::I16), Infer(i)) => Ok(I16(i as i64 as i16)),
+        (&ty::TyInt(IntTy::I32), Infer(i)) => Ok(I32(i as i64 as i32)),
+        (&ty::TyInt(IntTy::I64), Infer(i)) => Ok(I64(i as i64)),
+        (&ty::TyInt(IntTy::Is), Infer(i)) => {
+            match ConstIsize::new(i as i64, tcx.sess.target.int_type) {
+                Ok(val) => Ok(Isize(val)),
+                Err(_) => Ok(Isize(ConstIsize::Is32(i as i64 as i32))),
+            }
+        },
+
+        (&ty::TyInt(IntTy::I8), InferSigned(i)) => Ok(I8(i as i8)),
+        (&ty::TyInt(IntTy::I16), InferSigned(i)) => Ok(I16(i as i16)),
+        (&ty::TyInt(IntTy::I32), InferSigned(i)) => Ok(I32(i as i32)),
+        (&ty::TyInt(IntTy::I64), InferSigned(i)) => Ok(I64(i)),
+        (&ty::TyInt(IntTy::Is), InferSigned(i)) => {
+            match ConstIsize::new(i, tcx.sess.target.int_type) {
+                Ok(val) => Ok(Isize(val)),
+                Err(_) => Ok(Isize(ConstIsize::Is32(i as i32))),
+            }
+        },
+
+        (&ty::TyUint(UintTy::U8), Infer(i)) => Ok(U8(i as u8)),
+        (&ty::TyUint(UintTy::U16), Infer(i)) => Ok(U16(i as u16)),
+        (&ty::TyUint(UintTy::U32), Infer(i)) => Ok(U32(i as u32)),
+        (&ty::TyUint(UintTy::U64), Infer(i)) => Ok(U64(i)),
+        (&ty::TyUint(UintTy::Us), Infer(i)) => {
+            match ConstUsize::new(i, tcx.sess.target.uint_type) {
+                Ok(val) => Ok(Usize(val)),
+                Err(_) => Ok(Usize(ConstUsize::Us32(i as u32))),
+            }
+        },
+        (&ty::TyUint(_), InferSigned(_)) => Err(err(IntermediateUnsignedNegative)),
+
+        (&ty::TyInt(ity), i) => Err(err(TypeMismatch(ity.to_string(), i))),
+        (&ty::TyUint(ity), i) => Err(err(TypeMismatch(ity.to_string(), i))),
+
+        (&ty::TyEnum(ref adt, _), i) => {
+            let hints = tcx.lookup_repr_hints(adt.did);
+            let int_ty = tcx.enum_repr_type(hints.iter().next());
+            infer(i, tcx, &int_ty.to_ty(tcx).sty, span)
+        },
+        (_, i) => Err(err(BadType(ConstVal::Integral(i)))),
+    }
+}
+
+fn resolve_trait_associated_const<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
+                                                ti: &'tcx hir::TraitItem,
+                                                trait_id: DefId,
+                                                rcvr_substs: subst::Substs<'tcx>)
+                                                -> Option<(&'tcx Expr, Option<ty::Ty<'tcx>>)>
+{
+    let trait_ref = ty::Binder(
+        rcvr_substs.erase_regions().to_trait_ref(tcx, trait_id)
+    );
+    debug!("resolve_trait_associated_const: trait_ref={:?}",
+           trait_ref);
+
+    tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id());
+    let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::AnyFinal);
+
+    let mut selcx = traits::SelectionContext::new(&infcx);
+    let obligation = traits::Obligation::new(traits::ObligationCause::dummy(),
+                                             trait_ref.to_poly_trait_predicate());
+    let selection = match selcx.select(&obligation) {
+        Ok(Some(vtable)) => vtable,
+        // Still ambiguous, so give up and let the caller decide whether this
+        // expression is really needed yet. Some associated constant values
+        // can't be evaluated until monomorphization is done in trans.
+        Ok(None) => {
+            return None
+        }
+        Err(_) => {
+            return None
+        }
+    };
+
+    // NOTE: this code does not currently account for specialization, but when
+    // it does so, it should hook into the ProjectionMode to determine when the
+    // constant should resolve; this will also require plumbing through to this
+    // function whether we are in "trans mode" to pick the right ProjectionMode
+    // when constructing the inference context above.
+    match selection {
+        traits::VtableImpl(ref impl_data) => {
+            match tcx.associated_consts(impl_data.impl_def_id)
+                     .iter().find(|ic| ic.name == ti.name) {
+                Some(ic) => lookup_const_by_id(tcx, ic.def_id, None),
+                None => match ti.node {
+                    hir::ConstTraitItem(ref ty, Some(ref expr)) => {
+                        Some((&*expr, ast_ty_to_prim_ty(tcx, ty)))
+                    },
+                    _ => None,
+                },
+            }
+        }
+        _ => {
+            span_bug!(
+                ti.span,
+                "resolve_trait_associated_const: unexpected vtable type")
+        }
+    }
+}
+
+fn cast_const_int<'tcx>(tcx: &TyCtxt<'tcx>, val: ConstInt, ty: ty::Ty) -> CastResult {
+    let v = val.to_u64_unchecked();
+    match ty.sty {
+        ty::TyBool if v == 0 => Ok(Bool(false)),
+        ty::TyBool if v == 1 => Ok(Bool(true)),
+        ty::TyInt(ast::IntTy::I8) => Ok(Integral(I8(v as i64 as i8))),
+        ty::TyInt(ast::IntTy::I16) => Ok(Integral(I16(v as i64 as i16))),
+        ty::TyInt(ast::IntTy::I32) => Ok(Integral(I32(v as i64 as i32))),
+        ty::TyInt(ast::IntTy::I64) => Ok(Integral(I64(v as i64))),
+        ty::TyInt(ast::IntTy::Is) => {
+            match ConstIsize::new(v as i64, tcx.sess.target.int_type) {
+                Ok(val) => Ok(Integral(Isize(val))),
+                Err(_) => Ok(Integral(Isize(ConstIsize::Is32(v as i64 as i32)))),
+            }
+        },
+        ty::TyUint(ast::UintTy::U8) => Ok(Integral(U8(v as u8))),
+        ty::TyUint(ast::UintTy::U16) => Ok(Integral(U16(v as u16))),
+        ty::TyUint(ast::UintTy::U32) => Ok(Integral(U32(v as u32))),
+        ty::TyUint(ast::UintTy::U64) => Ok(Integral(U64(v))),
+        ty::TyUint(ast::UintTy::Us) => {
+            match ConstUsize::new(v, tcx.sess.target.uint_type) {
+                Ok(val) => Ok(Integral(Usize(val))),
+                Err(_) => Ok(Integral(Usize(ConstUsize::Us32(v as u32)))),
+            }
+        },
+        ty::TyFloat(ast::FloatTy::F64) if val.is_negative() => {
+            // FIXME: this could probably be prettier
+            // there's no easy way to turn an `Infer` into a f64
+            let val = (-val).map_err(Math)?;
+            let val = val.to_u64().unwrap() as f64;
+            let val = -val;
+            Ok(Float(val))
+        },
+        ty::TyFloat(ast::FloatTy::F64) => Ok(Float(val.to_u64().unwrap() as f64)),
+        ty::TyFloat(ast::FloatTy::F32) if val.is_negative() => {
+            let val = (-val).map_err(Math)?;
+            let val = val.to_u64().unwrap() as f32;
+            let val = -val;
+            Ok(Float(val as f64))
+        },
+        ty::TyFloat(ast::FloatTy::F32) => Ok(Float(val.to_u64().unwrap() as f32 as f64)),
+        ty::TyRawPtr(_) => Err(ErrKind::UnimplementedConstVal("casting an address to a raw ptr")),
+        _ => Err(CannotCast),
+    }
+}
+
+fn cast_const_float<'tcx>(tcx: &TyCtxt<'tcx>, f: f64, ty: ty::Ty) -> CastResult {
+    match ty.sty {
+        ty::TyInt(_) if f >= 0.0 => cast_const_int(tcx, Infer(f as u64), ty),
+        ty::TyInt(_) => cast_const_int(tcx, InferSigned(f as i64), ty),
+        ty::TyUint(_) if f >= 0.0 => cast_const_int(tcx, Infer(f as u64), ty),
+        ty::TyFloat(ast::FloatTy::F64) => Ok(Float(f)),
+        ty::TyFloat(ast::FloatTy::F32) => Ok(Float(f as f32 as f64)),
+        _ => Err(CannotCast),
+    }
+}
+
+fn cast_const<'tcx>(tcx: &TyCtxt<'tcx>, val: ConstVal, ty: ty::Ty) -> CastResult {
+    match val {
+        Integral(i) => cast_const_int(tcx, i, ty),
+        Bool(b) => cast_const_int(tcx, Infer(b as u64), ty),
+        Float(f) => cast_const_float(tcx, f, ty),
+        Char(c) => cast_const_int(tcx, Infer(c as u64), ty),
+        Function(_) => Err(UnimplementedConstVal("casting fn pointers")),
+        _ => Err(CannotCast),
+    }
+}
+
+fn lit_to_const<'tcx>(lit: &ast::LitKind,
+                      tcx: &TyCtxt<'tcx>,
+                      ty_hint: Option<Ty<'tcx>>,
+                      span: Span,
+                      ) -> Result<ConstVal, ConstEvalErr> {
+    use syntax::ast::*;
+    use syntax::ast::LitIntType::*;
+    match *lit {
+        LitKind::Str(ref s, _) => Ok(Str((*s).clone())),
+        LitKind::ByteStr(ref data) => Ok(ByteStr(data.clone())),
+        LitKind::Byte(n) => Ok(Integral(U8(n))),
+        LitKind::Int(n, Signed(ity)) => {
+            infer(InferSigned(n as i64), tcx, &ty::TyInt(ity), span).map(Integral)
+        },
+
+        LitKind::Int(n, Unsuffixed) => {
+            match ty_hint.map(|t| &t.sty) {
+                Some(&ty::TyInt(ity)) => {
+                    infer(InferSigned(n as i64), tcx, &ty::TyInt(ity), span).map(Integral)
+                },
+                Some(&ty::TyUint(uty)) => {
+                    infer(Infer(n), tcx, &ty::TyUint(uty), span).map(Integral)
+                },
+                None => Ok(Integral(Infer(n))),
+                Some(&ty::TyEnum(ref adt, _)) => {
+                    let hints = tcx.lookup_repr_hints(adt.did);
+                    let int_ty = tcx.enum_repr_type(hints.iter().next());
+                    infer(Infer(n), tcx, &int_ty.to_ty(tcx).sty, span).map(Integral)
+                },
+                Some(ty_hint) => bug!("bad ty_hint: {:?}, {:?}", ty_hint, lit),
+            }
+        },
+        LitKind::Int(n, Unsigned(ity)) => {
+            infer(Infer(n), tcx, &ty::TyUint(ity), span).map(Integral)
+        },
+
+        LitKind::Float(ref n, _) |
+        LitKind::FloatUnsuffixed(ref n) => {
+            if let Ok(x) = n.parse::<f64>() {
+                Ok(Float(x))
+            } else {
+                // FIXME(#31407) this is only necessary because float parsing is buggy
+                span_bug!(span, "could not evaluate float literal (see issue #31407)");
+            }
+        }
+        LitKind::Bool(b) => Ok(Bool(b)),
+        LitKind::Char(c) => Ok(Char(c)),
+    }
+}
+
+pub fn compare_const_vals(a: &ConstVal, b: &ConstVal) -> Option<Ordering> {
+    match (a, b) {
+        (&Integral(a), &Integral(b)) => a.try_cmp(b).ok(),
+        (&Float(a), &Float(b)) => {
+            // This is pretty bad but it is the existing behavior.
+            Some(if a == b {
+                Ordering::Equal
+            } else if a < b {
+                Ordering::Less
+            } else {
+                Ordering::Greater
+            })
+        }
+        (&Str(ref a), &Str(ref b)) => Some(a.cmp(b)),
+        (&Bool(a), &Bool(b)) => Some(a.cmp(&b)),
+        (&ByteStr(ref a), &ByteStr(ref b)) => Some(a.cmp(b)),
+        (&Char(a), &Char(ref b)) => Some(a.cmp(b)),
+        _ => None,
+    }
+}
+
+pub fn compare_lit_exprs<'tcx>(tcx: &TyCtxt<'tcx>,
+                               a: &Expr,
+                               b: &Expr) -> Option<Ordering> {
+    let a = match eval_const_expr_partial(tcx, a, ExprTypeChecked, None) {
+        Ok(a) => a,
+        Err(e) => {
+            tcx.sess.span_err(a.span, &e.description());
+            return None;
+        }
+    };
+    let b = match eval_const_expr_partial(tcx, b, ExprTypeChecked, None) {
+        Ok(b) => b,
+        Err(e) => {
+            tcx.sess.span_err(b.span, &e.description());
+            return None;
+        }
+    };
+    compare_const_vals(&a, &b)
+}
+
+
+/// Returns the repeat count for a repeating vector expression.
+pub fn eval_repeat_count(tcx: &TyCtxt, count_expr: &hir::Expr) -> usize {
+    let hint = UncheckedExprHint(tcx.types.usize);
+    match eval_const_expr_partial(tcx, count_expr, hint, None) {
+        Ok(Integral(Usize(count))) => {
+            let val = count.as_u64(tcx.sess.target.uint_type);
+            assert_eq!(val as usize as u64, val);
+            val as usize
+        },
+        Ok(const_val) => {
+            span_err!(tcx.sess, count_expr.span, E0306,
+                      "expected positive integer for repeat count, found {}",
+                      const_val.description());
+            0
+        }
+        Err(err) => {
+            let err_msg = match count_expr.node {
+                hir::ExprPath(None, hir::Path {
+                    global: false,
+                    ref segments,
+                    ..
+                }) if segments.len() == 1 =>
+                    format!("found variable"),
+                _ => match err.kind {
+                    MiscCatchAll => format!("but found {}", err.description()),
+                    _ => format!("but {}", err.description())
+                }
+            };
+            span_err!(tcx.sess, count_expr.span, E0307,
+                "expected constant integer for repeat count, {}", err_msg);
+            0
+        }
+    }
+}
diff --git a/src/librustc_const_eval/lib.rs b/src/librustc_const_eval/lib.rs
new file mode 100644 (file)
index 0000000..085888d
--- /dev/null
@@ -0,0 +1,52 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! constant evaluation on the HIR and code to validate patterns/matches
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![crate_name = "rustc_const_eval"]
+#![unstable(feature = "rustc_private", issue = "27812")]
+#![crate_type = "dylib"]
+#![crate_type = "rlib"]
+#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
+      html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
+      html_root_url = "https://doc.rust-lang.org/nightly/")]
+
+
+#![feature(rustc_private)]
+#![feature(staged_api)]
+#![feature(rustc_diagnostic_macros)]
+#![feature(slice_patterns)]
+#![feature(iter_arith)]
+#![feature(question_mark)]
+
+#[macro_use] extern crate syntax;
+#[macro_use] extern crate log;
+#[macro_use] extern crate rustc;
+extern crate rustc_back;
+extern crate rustc_const_math;
+extern crate graphviz;
+
+extern crate serialize as rustc_serialize; // used by deriving
+
+// NB: This module needs to be declared first so diagnostics are
+// registered before they are used.
+pub mod diagnostics;
+
+mod eval;
+pub mod check_match;
+
+pub use eval::*;
+
+// Build the diagnostics array at the end so that the metadata includes error use sites.
+__build_diagnostic_array! { librustc_const_eval, DIAGNOSTICS }
diff --git a/src/librustc_const_math/Cargo.toml b/src/librustc_const_math/Cargo.toml
new file mode 100644 (file)
index 0000000..10aadab
--- /dev/null
@@ -0,0 +1,14 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "rustc_const_math"
+version = "0.0.0"
+
+[lib]
+name = "rustc_const_math"
+path = "lib.rs"
+crate-type = ["dylib"]
+
+[dependencies]
+log = { path = "../liblog" }
+serialize = { path = "../libserialize" }
+syntax = { path = "../libsyntax" }
diff --git a/src/librustc_const_math/err.rs b/src/librustc_const_math/err.rs
new file mode 100644 (file)
index 0000000..126b382
--- /dev/null
@@ -0,0 +1,85 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use syntax::ast;
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum ConstMathErr {
+    NotInRange,
+    CmpBetweenUnequalTypes,
+    UnequalTypes(Op),
+    Overflow(Op),
+    ShiftNegative,
+    DivisionByZero,
+    RemainderByZero,
+    UnsignedNegation,
+    ULitOutOfRange(ast::UintTy),
+    LitOutOfRange(ast::IntTy),
+}
+pub use self::ConstMathErr::*;
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum Op {
+    Add,
+    Sub,
+    Mul,
+    Div,
+    Rem,
+    Shr,
+    Shl,
+    Neg,
+    BitAnd,
+    BitOr,
+    BitXor,
+}
+
+impl ConstMathErr {
+    pub fn description(&self) -> &'static str {
+        use self::Op::*;
+        match *self {
+            NotInRange => "inferred value out of range",
+            CmpBetweenUnequalTypes => "compared two integrals of different types",
+            UnequalTypes(Add) => "tried to add two integrals of different types",
+            UnequalTypes(Sub) => "tried to subtract two integrals of different types",
+            UnequalTypes(Mul) => "tried to multiply two integrals of different types",
+            UnequalTypes(Div) => "tried to divide two integrals of different types",
+            UnequalTypes(Rem) => {
+                "tried to calculate the remainder of two integrals of different types"
+            },
+            UnequalTypes(BitAnd) => "tried to bitand two integrals of different types",
+            UnequalTypes(BitOr) => "tried to bitor two integrals of different types",
+            UnequalTypes(BitXor) => "tried to xor two integrals of different types",
+            UnequalTypes(_) => unreachable!(),
+            Overflow(Add) => "attempted to add with overflow",
+            Overflow(Sub) => "attempted to subtract with overflow",
+            Overflow(Mul) => "attempted to multiply with overflow",
+            Overflow(Div) => "attempted to divide with overflow",
+            Overflow(Rem) => "attempted to calculate the remainder with overflow",
+            Overflow(Neg) => "attempted to negate with overflow",
+            Overflow(Shr) => "attempted to shift right with overflow",
+            Overflow(Shl) => "attempted to shift left with overflow",
+            Overflow(_) => unreachable!(),
+            ShiftNegative => "attempted to shift by a negative amount",
+            DivisionByZero => "attempted to divide by zero",
+            RemainderByZero => "attempted to calculate the remainder with a divisor of zero",
+            UnsignedNegation => "unary negation of unsigned integer",
+            ULitOutOfRange(ast::UintTy::U8) => "literal out of range for u8",
+            ULitOutOfRange(ast::UintTy::U16) => "literal out of range for u16",
+            ULitOutOfRange(ast::UintTy::U32) => "literal out of range for u32",
+            ULitOutOfRange(ast::UintTy::U64) => "literal out of range for u64",
+            ULitOutOfRange(ast::UintTy::Us) => "literal out of range for usize",
+            LitOutOfRange(ast::IntTy::I8) => "literal out of range for i8",
+            LitOutOfRange(ast::IntTy::I16) => "literal out of range for i16",
+            LitOutOfRange(ast::IntTy::I32) => "literal out of range for i32",
+            LitOutOfRange(ast::IntTy::I64) => "literal out of range for i64",
+            LitOutOfRange(ast::IntTy::Is) => "literal out of range for isize",
+        }
+    }
+}
diff --git a/src/librustc_const_math/int.rs b/src/librustc_const_math/int.rs
new file mode 100644 (file)
index 0000000..658d4d9
--- /dev/null
@@ -0,0 +1,569 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::cmp::Ordering;
+use syntax::attr::IntType;
+use syntax::ast::{IntTy, UintTy};
+
+use super::is::*;
+use super::us::*;
+use super::err::*;
+
+#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)]
+pub enum ConstInt {
+    I8(i8),
+    I16(i16),
+    I32(i32),
+    I64(i64),
+    Isize(ConstIsize),
+    U8(u8),
+    U16(u16),
+    U32(u32),
+    U64(u64),
+    Usize(ConstUsize),
+    Infer(u64),
+    InferSigned(i64),
+}
+pub use self::ConstInt::*;
+
+
+macro_rules! bounds {
+    ($($t:ident $min:ident $max:ident)*) => {
+        mod as_u64 {
+            $(
+                #[allow(dead_code)]
+                pub const $min: u64 = ::std::$t::MIN as u64;
+                #[allow(dead_code)]
+                pub const $max: u64 = ::std::$t::MAX as u64;
+            )*
+        }
+        mod as_i64 {
+            $(
+                #[allow(dead_code)]
+                pub const $min: i64 = ::std::$t::MIN as i64;
+                #[allow(dead_code)]
+                pub const $max: i64 = ::std::$t::MAX as i64;
+            )*
+        }
+    }
+}
+
+bounds!{
+    i8 I8MIN I8MAX i16 I16MIN I16MAX i32 I32MIN I32MAX i64 I64MIN I64MAX isize IMIN IMAX
+    u8 U8MIN U8MAX u16 U16MIN U16MAX u32 U32MIN U32MAX u64 U64MIN U64MAX usize UMIN UMAX
+}
+
+impl ConstInt {
+    /// If either value is `Infer` or `InferSigned`, try to turn the value into the type of
+    /// the other value. If both values have no type, don't do anything
+    pub fn infer(self, other: Self) -> Result<(Self, Self), ConstMathErr> {
+        let inferred = match (self, other) {
+            (InferSigned(_), InferSigned(_))
+            | (Infer(_), Infer(_)) => self, // no inference possible
+            // kindof wrong, you could have had values > I64MAX during computation of a
+            (Infer(a @ 0...as_u64::I64MAX), InferSigned(_)) => InferSigned(a as i64),
+            (Infer(_), InferSigned(_)) => return Err(ConstMathErr::NotInRange),
+            (_, InferSigned(_))
+            | (_, Infer(_)) => return other.infer(self).map(|(b, a)| (a, b)),
+
+            (Infer(a @ 0...as_u64::I8MAX), I8(_)) => I8(a as i64 as i8),
+            (Infer(a @ 0...as_u64::I16MAX), I16(_)) => I16(a as i64 as i16),
+            (Infer(a @ 0...as_u64::I32MAX), I32(_)) => I32(a as i64 as i32),
+            (Infer(a @ 0...as_u64::I64MAX), I64(_)) => I64(a as i64),
+            (Infer(a @ 0...as_u64::I32MAX), Isize(Is32(_))) => Isize(Is32(a as i64 as i32)),
+            (Infer(a @ 0...as_u64::I64MAX), Isize(Is64(_))) => Isize(Is64(a as i64)),
+            (Infer(a @ 0...as_u64::U8MAX), U8(_)) => U8(a as u8),
+            (Infer(a @ 0...as_u64::U16MAX), U16(_)) => U16(a as u16),
+            (Infer(a @ 0...as_u64::U32MAX), U32(_)) => U32(a as u32),
+            (Infer(a), U64(_)) => U64(a),
+            (Infer(a @ 0...as_u64::U32MAX), Usize(Us32(_))) => Usize(Us32(a as u32)),
+            (Infer(a), Usize(Us64(_))) => Usize(Us64(a)),
+
+            (Infer(_), _) => return Err(ConstMathErr::NotInRange),
+
+            (InferSigned(a @ as_i64::I8MIN...as_i64::I8MAX), I8(_)) => I8(a as i8),
+            (InferSigned(a @ as_i64::I16MIN...as_i64::I16MAX), I16(_)) => I16(a as i16),
+            (InferSigned(a @ as_i64::I32MIN...as_i64::I32MAX), I32(_)) => I32(a as i32),
+            (InferSigned(a), I64(_)) => I64(a),
+            (InferSigned(a @ as_i64::I32MIN...as_i64::I32MAX), Isize(Is32(_))) => {
+                Isize(Is32(a as i32))
+            },
+            (InferSigned(a), Isize(Is64(_))) => Isize(Is64(a)),
+            (InferSigned(a @ 0...as_i64::U8MAX), U8(_)) => U8(a as u8),
+            (InferSigned(a @ 0...as_i64::U16MAX), U16(_)) => U16(a as u16),
+            (InferSigned(a @ 0...as_i64::U32MAX), U32(_)) => U32(a as u32),
+            (InferSigned(a @ 0...as_i64::I64MAX), U64(_)) => U64(a as u64),
+            (InferSigned(a @ 0...as_i64::U32MAX), Usize(Us32(_))) => Usize(Us32(a as u32)),
+            (InferSigned(a @ 0...as_i64::I64MAX), Usize(Us64(_))) => Usize(Us64(a as u64)),
+            (InferSigned(_), _) => return Err(ConstMathErr::NotInRange),
+            _ => self, // already known types
+        };
+        Ok((inferred, other))
+    }
+
+    /// Turn this value into an `Infer` or an `InferSigned`
+    pub fn erase_type(self) -> Self {
+        match self {
+            Infer(i) => Infer(i),
+            InferSigned(i) if i < 0 => InferSigned(i),
+            I8(i) if i < 0 => InferSigned(i as i64),
+            I16(i) if i < 0 => InferSigned(i as i64),
+            I32(i) if i < 0 => InferSigned(i as i64),
+            I64(i) if i < 0 => InferSigned(i as i64),
+            Isize(Is32(i)) if i < 0 => InferSigned(i as i64),
+            Isize(Is64(i)) if i < 0 => InferSigned(i as i64),
+            InferSigned(i) => Infer(i as u64),
+            I8(i) => Infer(i as u64),
+            I16(i) => Infer(i as u64),
+            I32(i) => Infer(i as u64),
+            I64(i) => Infer(i as u64),
+            Isize(Is32(i)) => Infer(i as u64),
+            Isize(Is64(i)) => Infer(i as u64),
+            U8(i) => Infer(i as u64),
+            U16(i) => Infer(i as u64),
+            U32(i) => Infer(i as u64),
+            U64(i) => Infer(i as u64),
+            Usize(Us32(i)) => Infer(i as u64),
+            Usize(Us64(i)) => Infer(i),
+        }
+    }
+
+    /// Description of the type, not the value
+    pub fn description(&self) -> &'static str {
+        match *self {
+            Infer(_) => "not yet inferred integral",
+            InferSigned(_) => "not yet inferred signed integral",
+            I8(_) => "i8",
+            I16(_) => "i16",
+            I32(_) => "i32",
+            I64(_) => "i64",
+            Isize(_) => "isize",
+            U8(_) => "u8",
+            U16(_) => "u16",
+            U32(_) => "u32",
+            U64(_) => "u64",
+            Usize(_) => "usize",
+        }
+    }
+
+    /// Erases the type and returns a u64.
+    /// This is not the same as `-5i8 as u64` but as `-5i8 as i64 as u64`
+    pub fn to_u64_unchecked(self) -> u64 {
+        match self.erase_type() {
+            ConstInt::Infer(i) => i,
+            ConstInt::InferSigned(i) => i as u64,
+            _ => unreachable!(),
+        }
+    }
+
+    /// Converts the value to a `u32` if it's in the range 0...std::u32::MAX
+    pub fn to_u32(&self) -> Option<u32> {
+        match *self {
+            I8(v) if v >= 0 => Some(v as u32),
+            I16(v) if v >= 0 => Some(v as u32),
+            I32(v) if v >= 0 => Some(v as u32),
+            InferSigned(v)
+            | Isize(Is64(v))
+            | I64(v) if v >= 0 && v <= ::std::u32::MAX as i64 => Some(v as u32),
+            Isize(Is32(v)) if v >= 0 => Some(v as u32),
+            U8(v) => Some(v as u32),
+            U16(v) => Some(v as u32),
+            U32(v) => Some(v),
+            Infer(v)
+            | Usize(Us64(v))
+            | U64(v) if v <= ::std::u32::MAX as u64 => Some(v as u32),
+            Usize(Us32(v)) => Some(v),
+            _ => None,
+        }
+    }
+
+    /// Converts the value to a `u64` if it's >= 0
+    pub fn to_u64(&self) -> Option<u64> {
+        match *self {
+            Infer(v) => Some(v),
+            InferSigned(v) if v >= 0 => Some(v as u64),
+            I8(v) if v >= 0 => Some(v as u64),
+            I16(v) if v >= 0 => Some(v as u64),
+            I32(v) if v >= 0 => Some(v as u64),
+            I64(v) if v >= 0 => Some(v as u64),
+            Isize(Is32(v)) if v >= 0 => Some(v as u64),
+            Isize(Is64(v)) if v >= 0 => Some(v as u64),
+            U8(v) => Some(v as u64),
+            U16(v) => Some(v as u64),
+            U32(v) => Some(v as u64),
+            U64(v) => Some(v),
+            Usize(Us32(v)) => Some(v as u64),
+            Usize(Us64(v)) => Some(v),
+            _ => None,
+        }
+    }
+
+    pub fn is_negative(&self) -> bool {
+        match *self {
+            I8(v) => v < 0,
+            I16(v) => v < 0,
+            I32(v) => v < 0,
+            I64(v) => v < 0,
+            Isize(Is32(v)) => v < 0,
+            Isize(Is64(v)) => v < 0,
+            InferSigned(v) => v < 0,
+            _ => false,
+        }
+    }
+
+    /// Compares the values if they are of the same type
+    pub fn try_cmp(self, rhs: Self) -> Result<::std::cmp::Ordering, ConstMathErr> {
+        match self.infer(rhs)? {
+            (I8(a), I8(b)) => Ok(a.cmp(&b)),
+            (I16(a), I16(b)) => Ok(a.cmp(&b)),
+            (I32(a), I32(b)) => Ok(a.cmp(&b)),
+            (I64(a), I64(b)) => Ok(a.cmp(&b)),
+            (Isize(Is32(a)), Isize(Is32(b))) => Ok(a.cmp(&b)),
+            (Isize(Is64(a)), Isize(Is64(b))) => Ok(a.cmp(&b)),
+            (U8(a), U8(b)) => Ok(a.cmp(&b)),
+            (U16(a), U16(b)) => Ok(a.cmp(&b)),
+            (U32(a), U32(b)) => Ok(a.cmp(&b)),
+            (U64(a), U64(b)) => Ok(a.cmp(&b)),
+            (Usize(Us32(a)), Usize(Us32(b))) => Ok(a.cmp(&b)),
+            (Usize(Us64(a)), Usize(Us64(b))) => Ok(a.cmp(&b)),
+            (Infer(a), Infer(b)) => Ok(a.cmp(&b)),
+            (InferSigned(a), InferSigned(b)) => Ok(a.cmp(&b)),
+            _ => Err(CmpBetweenUnequalTypes),
+        }
+    }
+
+    /// Adds 1 to the value and wraps around if the maximum for the type is reached
+    pub fn wrap_incr(self) -> Self {
+        macro_rules! add1 {
+            ($e:expr) => { ($e).wrapping_add(1) }
+        }
+        match self {
+            ConstInt::I8(i) => ConstInt::I8(add1!(i)),
+            ConstInt::I16(i) => ConstInt::I16(add1!(i)),
+            ConstInt::I32(i) => ConstInt::I32(add1!(i)),
+            ConstInt::I64(i) => ConstInt::I64(add1!(i)),
+            ConstInt::Isize(ConstIsize::Is32(i)) => ConstInt::Isize(ConstIsize::Is32(add1!(i))),
+            ConstInt::Isize(ConstIsize::Is64(i)) => ConstInt::Isize(ConstIsize::Is64(add1!(i))),
+            ConstInt::U8(i) => ConstInt::U8(add1!(i)),
+            ConstInt::U16(i) => ConstInt::U16(add1!(i)),
+            ConstInt::U32(i) => ConstInt::U32(add1!(i)),
+            ConstInt::U64(i) => ConstInt::U64(add1!(i)),
+            ConstInt::Usize(ConstUsize::Us32(i)) => ConstInt::Usize(ConstUsize::Us32(add1!(i))),
+            ConstInt::Usize(ConstUsize::Us64(i)) => ConstInt::Usize(ConstUsize::Us64(add1!(i))),
+            ConstInt::Infer(_) | ConstInt::InferSigned(_) => panic!("no type info for const int"),
+        }
+    }
+
+    pub fn int_type(self) -> Option<IntType> {
+        match self {
+            ConstInt::I8(_) => Some(IntType::SignedInt(IntTy::I8)),
+            ConstInt::I16(_) => Some(IntType::SignedInt(IntTy::I16)),
+            ConstInt::I32(_) => Some(IntType::SignedInt(IntTy::I32)),
+            ConstInt::I64(_) => Some(IntType::SignedInt(IntTy::I64)),
+            ConstInt::Isize(_) => Some(IntType::SignedInt(IntTy::Is)),
+            ConstInt::U8(_) => Some(IntType::UnsignedInt(UintTy::U8)),
+            ConstInt::U16(_) => Some(IntType::UnsignedInt(UintTy::U16)),
+            ConstInt::U32(_) => Some(IntType::UnsignedInt(UintTy::U32)),
+            ConstInt::U64(_) => Some(IntType::UnsignedInt(UintTy::U64)),
+            ConstInt::Usize(_) => Some(IntType::UnsignedInt(UintTy::Us)),
+            _ => None,
+        }
+    }
+}
+
+impl ::std::cmp::PartialOrd for ConstInt {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        self.try_cmp(*other).ok()
+    }
+}
+
+impl ::std::cmp::Ord for ConstInt {
+    fn cmp(&self, other: &Self) -> Ordering {
+        self.try_cmp(*other).unwrap()
+    }
+}
+
+impl ::std::fmt::Display for ConstInt {
+    fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
+        match *self {
+            Infer(i) => write!(fmt, "{}", i),
+            InferSigned(i) => write!(fmt, "{}", i),
+            I8(i) => write!(fmt, "{}i8", i),
+            I16(i) => write!(fmt, "{}i16", i),
+            I32(i) => write!(fmt, "{}i32", i),
+            I64(i) => write!(fmt, "{}i64", i),
+            Isize(ConstIsize::Is64(i)) => write!(fmt, "{}isize", i),
+            Isize(ConstIsize::Is32(i)) => write!(fmt, "{}isize", i),
+            U8(i) => write!(fmt, "{}u8", i),
+            U16(i) => write!(fmt, "{}u16", i),
+            U32(i) => write!(fmt, "{}u32", i),
+            U64(i) => write!(fmt, "{}u64", i),
+            Usize(ConstUsize::Us64(i)) => write!(fmt, "{}usize", i),
+            Usize(ConstUsize::Us32(i)) => write!(fmt, "{}usize", i),
+        }
+    }
+}
+
+macro_rules! overflowing {
+    ($e:expr, $err:expr) => {{
+        if $e.1 {
+            return Err(Overflow($err));
+        } else {
+            $e.0
+        }
+    }}
+}
+
+macro_rules! impl_binop {
+    ($op:ident, $func:ident, $checked_func:ident) => {
+        impl ::std::ops::$op for ConstInt {
+            type Output = Result<Self, ConstMathErr>;
+            fn $func(self, rhs: Self) -> Result<Self, ConstMathErr> {
+                match self.infer(rhs)? {
+                    (I8(a), I8(b)) => a.$checked_func(b).map(I8),
+                    (I16(a), I16(b)) => a.$checked_func(b).map(I16),
+                    (I32(a), I32(b)) => a.$checked_func(b).map(I32),
+                    (I64(a), I64(b)) => a.$checked_func(b).map(I64),
+                    (Isize(Is32(a)), Isize(Is32(b))) => a.$checked_func(b).map(Is32).map(Isize),
+                    (Isize(Is64(a)), Isize(Is64(b))) => a.$checked_func(b).map(Is64).map(Isize),
+                    (U8(a), U8(b)) => a.$checked_func(b).map(U8),
+                    (U16(a), U16(b)) => a.$checked_func(b).map(U16),
+                    (U32(a), U32(b)) => a.$checked_func(b).map(U32),
+                    (U64(a), U64(b)) => a.$checked_func(b).map(U64),
+                    (Usize(Us32(a)), Usize(Us32(b))) => a.$checked_func(b).map(Us32).map(Usize),
+                    (Usize(Us64(a)), Usize(Us64(b))) => a.$checked_func(b).map(Us64).map(Usize),
+                    (Infer(a), Infer(b)) => a.$checked_func(b).map(Infer),
+                    (InferSigned(a), InferSigned(b)) => a.$checked_func(b).map(InferSigned),
+                    _ => return Err(UnequalTypes(Op::$op)),
+                }.ok_or(Overflow(Op::$op))
+            }
+        }
+    }
+}
+
+macro_rules! derive_binop {
+    ($op:ident, $func:ident) => {
+        impl ::std::ops::$op for ConstInt {
+            type Output = Result<Self, ConstMathErr>;
+            fn $func(self, rhs: Self) -> Result<Self, ConstMathErr> {
+                match self.infer(rhs)? {
+                    (I8(a), I8(b)) => Ok(I8(a.$func(b))),
+                    (I16(a), I16(b)) => Ok(I16(a.$func(b))),
+                    (I32(a), I32(b)) => Ok(I32(a.$func(b))),
+                    (I64(a), I64(b)) => Ok(I64(a.$func(b))),
+                    (Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a.$func(b)))),
+                    (Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a.$func(b)))),
+                    (U8(a), U8(b)) => Ok(U8(a.$func(b))),
+                    (U16(a), U16(b)) => Ok(U16(a.$func(b))),
+                    (U32(a), U32(b)) => Ok(U32(a.$func(b))),
+                    (U64(a), U64(b)) => Ok(U64(a.$func(b))),
+                    (Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a.$func(b)))),
+                    (Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a.$func(b)))),
+                    (Infer(a), Infer(b)) => Ok(Infer(a.$func(b))),
+                    (InferSigned(a), InferSigned(b)) => Ok(InferSigned(a.$func(b))),
+                    _ => Err(UnequalTypes(Op::$op)),
+                }
+            }
+        }
+    }
+}
+
+impl_binop!(Add, add, checked_add);
+impl_binop!(Sub, sub, checked_sub);
+impl_binop!(Mul, mul, checked_mul);
+derive_binop!(BitAnd, bitand);
+derive_binop!(BitOr, bitor);
+derive_binop!(BitXor, bitxor);
+
+fn check_division(
+    lhs: ConstInt,
+    rhs: ConstInt,
+    op: Op,
+    zerr: ConstMathErr,
+) -> Result<(), ConstMathErr> {
+    match (lhs, rhs) {
+        (I8(_), I8(0)) => Err(zerr),
+        (I16(_), I16(0)) => Err(zerr),
+        (I32(_), I32(0)) => Err(zerr),
+        (I64(_), I64(0)) => Err(zerr),
+        (Isize(_), Isize(Is32(0))) => Err(zerr),
+        (Isize(_), Isize(Is64(0))) => Err(zerr),
+        (InferSigned(_), InferSigned(0)) => Err(zerr),
+
+        (U8(_), U8(0)) => Err(zerr),
+        (U16(_), U16(0)) => Err(zerr),
+        (U32(_), U32(0)) => Err(zerr),
+        (U64(_), U64(0)) => Err(zerr),
+        (Usize(_), Usize(Us32(0))) => Err(zerr),
+        (Usize(_), Usize(Us64(0))) => Err(zerr),
+        (Infer(_), Infer(0)) => Err(zerr),
+
+        (I8(::std::i8::MIN), I8(-1)) => Err(Overflow(op)),
+        (I16(::std::i16::MIN), I16(-1)) => Err(Overflow(op)),
+        (I32(::std::i32::MIN), I32(-1)) => Err(Overflow(op)),
+        (I64(::std::i64::MIN), I64(-1)) => Err(Overflow(op)),
+        (Isize(Is32(::std::i32::MIN)), Isize(Is32(-1))) => Err(Overflow(op)),
+        (Isize(Is64(::std::i64::MIN)), Isize(Is64(-1))) => Err(Overflow(op)),
+        (InferSigned(::std::i64::MIN), InferSigned(-1)) => Err(Overflow(op)),
+
+        _ => Ok(()),
+    }
+}
+
+impl ::std::ops::Div for ConstInt {
+    type Output = Result<Self, ConstMathErr>;
+    fn div(self, rhs: Self) -> Result<Self, ConstMathErr> {
+        let (lhs, rhs) = self.infer(rhs)?;
+        check_division(lhs, rhs, Op::Div, DivisionByZero)?;
+        match (lhs, rhs) {
+            (I8(a), I8(b)) => Ok(I8(a/b)),
+            (I16(a), I16(b)) => Ok(I16(a/b)),
+            (I32(a), I32(b)) => Ok(I32(a/b)),
+            (I64(a), I64(b)) => Ok(I64(a/b)),
+            (Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a/b))),
+            (Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a/b))),
+            (InferSigned(a), InferSigned(b)) => Ok(InferSigned(a/b)),
+
+            (U8(a), U8(b)) => Ok(U8(a/b)),
+            (U16(a), U16(b)) => Ok(U16(a/b)),
+            (U32(a), U32(b)) => Ok(U32(a/b)),
+            (U64(a), U64(b)) => Ok(U64(a/b)),
+            (Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a/b))),
+            (Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a/b))),
+            (Infer(a), Infer(b)) => Ok(Infer(a/b)),
+
+            _ => Err(UnequalTypes(Op::Div)),
+        }
+    }
+}
+
+impl ::std::ops::Rem for ConstInt {
+    type Output = Result<Self, ConstMathErr>;
+    fn rem(self, rhs: Self) -> Result<Self, ConstMathErr> {
+        let (lhs, rhs) = self.infer(rhs)?;
+        // should INT_MIN%-1 be zero or an error?
+        check_division(lhs, rhs, Op::Rem, RemainderByZero)?;
+        match (lhs, rhs) {
+            (I8(a), I8(b)) => Ok(I8(a%b)),
+            (I16(a), I16(b)) => Ok(I16(a%b)),
+            (I32(a), I32(b)) => Ok(I32(a%b)),
+            (I64(a), I64(b)) => Ok(I64(a%b)),
+            (Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a%b))),
+            (Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a%b))),
+            (InferSigned(a), InferSigned(b)) => Ok(InferSigned(a%b)),
+
+            (U8(a), U8(b)) => Ok(U8(a%b)),
+            (U16(a), U16(b)) => Ok(U16(a%b)),
+            (U32(a), U32(b)) => Ok(U32(a%b)),
+            (U64(a), U64(b)) => Ok(U64(a%b)),
+            (Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a%b))),
+            (Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a%b))),
+            (Infer(a), Infer(b)) => Ok(Infer(a%b)),
+
+            _ => Err(UnequalTypes(Op::Rem)),
+        }
+    }
+}
+
+impl ::std::ops::Shl<ConstInt> for ConstInt {
+    type Output = Result<Self, ConstMathErr>;
+    fn shl(self, rhs: Self) -> Result<Self, ConstMathErr> {
+        let b = rhs.to_u32().ok_or(ShiftNegative)?;
+        match self {
+            I8(a) => Ok(I8(overflowing!(a.overflowing_shl(b), Op::Shl))),
+            I16(a) => Ok(I16(overflowing!(a.overflowing_shl(b), Op::Shl))),
+            I32(a) => Ok(I32(overflowing!(a.overflowing_shl(b), Op::Shl))),
+            I64(a) => Ok(I64(overflowing!(a.overflowing_shl(b), Op::Shl))),
+            Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_shl(b), Op::Shl)))),
+            Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_shl(b), Op::Shl)))),
+            U8(a) => Ok(U8(overflowing!(a.overflowing_shl(b), Op::Shl))),
+            U16(a) => Ok(U16(overflowing!(a.overflowing_shl(b), Op::Shl))),
+            U32(a) => Ok(U32(overflowing!(a.overflowing_shl(b), Op::Shl))),
+            U64(a) => Ok(U64(overflowing!(a.overflowing_shl(b), Op::Shl))),
+            Usize(Us32(a)) => Ok(Usize(Us32(overflowing!(a.overflowing_shl(b), Op::Shl)))),
+            Usize(Us64(a)) => Ok(Usize(Us64(overflowing!(a.overflowing_shl(b), Op::Shl)))),
+            Infer(a) => Ok(Infer(overflowing!(a.overflowing_shl(b), Op::Shl))),
+            InferSigned(a) => Ok(InferSigned(overflowing!(a.overflowing_shl(b), Op::Shl))),
+        }
+    }
+}
+
+impl ::std::ops::Shr<ConstInt> for ConstInt {
+    type Output = Result<Self, ConstMathErr>;
+    fn shr(self, rhs: Self) -> Result<Self, ConstMathErr> {
+        let b = rhs.to_u32().ok_or(ShiftNegative)?;
+        match self {
+            I8(a) => Ok(I8(overflowing!(a.overflowing_shr(b), Op::Shr))),
+            I16(a) => Ok(I16(overflowing!(a.overflowing_shr(b), Op::Shr))),
+            I32(a) => Ok(I32(overflowing!(a.overflowing_shr(b), Op::Shr))),
+            I64(a) => Ok(I64(overflowing!(a.overflowing_shr(b), Op::Shl))),
+            Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_shr(b), Op::Shr)))),
+            Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_shr(b), Op::Shr)))),
+            U8(a) => Ok(U8(overflowing!(a.overflowing_shr(b), Op::Shr))),
+            U16(a) => Ok(U16(overflowing!(a.overflowing_shr(b), Op::Shr))),
+            U32(a) => Ok(U32(overflowing!(a.overflowing_shr(b), Op::Shr))),
+            U64(a) => Ok(U64(overflowing!(a.overflowing_shr(b), Op::Shr))),
+            Usize(Us32(a)) => Ok(Usize(Us32(overflowing!(a.overflowing_shr(b), Op::Shr)))),
+            Usize(Us64(a)) => Ok(Usize(Us64(overflowing!(a.overflowing_shr(b), Op::Shr)))),
+            Infer(a) => Ok(Infer(overflowing!(a.overflowing_shr(b), Op::Shr))),
+            InferSigned(a) => Ok(InferSigned(overflowing!(a.overflowing_shr(b), Op::Shr))),
+        }
+    }
+}
+
+impl ::std::ops::Neg for ConstInt {
+    type Output = Result<Self, ConstMathErr>;
+    fn neg(self) -> Result<Self, ConstMathErr> {
+        match self {
+            I8(a) => Ok(I8(overflowing!(a.overflowing_neg(), Op::Neg))),
+            I16(a) => Ok(I16(overflowing!(a.overflowing_neg(), Op::Neg))),
+            I32(a) => Ok(I32(overflowing!(a.overflowing_neg(), Op::Neg))),
+            I64(a) => Ok(I64(overflowing!(a.overflowing_neg(), Op::Neg))),
+            Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_neg(), Op::Neg)))),
+            Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_neg(), Op::Neg)))),
+            U8(0) => Ok(U8(0)),
+            U16(0) => Ok(U16(0)),
+            U32(0) => Ok(U32(0)),
+            U64(0) => Ok(U64(0)),
+            Usize(Us32(0)) => Ok(Usize(Us32(0))),
+            Usize(Us64(0)) => Ok(Usize(Us64(0))),
+            U8(_) => Err(UnsignedNegation),
+            U16(_) => Err(UnsignedNegation),
+            U32(_) => Err(UnsignedNegation),
+            U64(_) => Err(UnsignedNegation),
+            Usize(_) => Err(UnsignedNegation),
+            Infer(a @ 0...as_u64::I64MAX) => Ok(InferSigned(-(a as i64))),
+            Infer(_) => Err(Overflow(Op::Neg)),
+            InferSigned(a) => Ok(InferSigned(overflowing!(a.overflowing_neg(), Op::Neg))),
+        }
+    }
+}
+
+impl ::std::ops::Not for ConstInt {
+    type Output = Result<Self, ConstMathErr>;
+    fn not(self) -> Result<Self, ConstMathErr> {
+        match self {
+            I8(a) => Ok(I8(!a)),
+            I16(a) => Ok(I16(!a)),
+            I32(a) => Ok(I32(!a)),
+            I64(a) => Ok(I64(!a)),
+            Isize(Is32(a)) => Ok(Isize(Is32(!a))),
+            Isize(Is64(a)) => Ok(Isize(Is64(!a))),
+            U8(a) => Ok(U8(!a)),
+            U16(a) => Ok(U16(!a)),
+            U32(a) => Ok(U32(!a)),
+            U64(a) => Ok(U64(!a)),
+            Usize(Us32(a)) => Ok(Usize(Us32(!a))),
+            Usize(Us64(a)) => Ok(Usize(Us64(!a))),
+            Infer(a) => Ok(Infer(!a)),
+            InferSigned(a) => Ok(InferSigned(!a)),
+        }
+    }
+}
diff --git a/src/librustc_const_math/is.rs b/src/librustc_const_math/is.rs
new file mode 100644 (file)
index 0000000..082c651
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use syntax::ast;
+use super::err::*;
+
+/// Depending on the target only one variant is ever used in a compilation.
+/// Anything else is an error. This invariant is checked at several locations
+#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)]
+pub enum ConstIsize {
+    Is32(i32),
+    Is64(i64),
+}
+pub use self::ConstIsize::*;
+
+impl ConstIsize {
+    pub fn as_i64(self, target_int_ty: ast::IntTy) -> i64 {
+        match (self, target_int_ty) {
+            (Is32(i), ast::IntTy::I32) => i as i64,
+            (Is64(i), ast::IntTy::I64) => i,
+            _ => panic!("got invalid isize size for target"),
+        }
+    }
+    pub fn new(i: i64, target_int_ty: ast::IntTy) -> Result<Self, ConstMathErr> {
+        match target_int_ty {
+            ast::IntTy::I32 if i as i32 as i64 == i => Ok(Is32(i as i32)),
+            ast::IntTy::I32 => Err(LitOutOfRange(ast::IntTy::Is)),
+            ast::IntTy::I64 => Ok(Is64(i)),
+            _ => unreachable!(),
+        }
+    }
+}
diff --git a/src/librustc_const_math/lib.rs b/src/librustc_const_math/lib.rs
new file mode 100644 (file)
index 0000000..9f66aac
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Rusty Mathematics
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![crate_name = "rustc_const_math"]
+#![unstable(feature = "rustc_private", issue = "27812")]
+#![crate_type = "dylib"]
+#![crate_type = "rlib"]
+#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
+      html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
+      html_root_url = "https://doc.rust-lang.org/nightly/")]
+
+
+#![feature(rustc_private)]
+#![feature(staged_api)]
+#![feature(question_mark)]
+
+#[macro_use] extern crate log;
+#[macro_use] extern crate syntax;
+
+extern crate serialize as rustc_serialize; // used by deriving
+
+mod int;
+mod us;
+mod is;
+mod err;
+
+pub use int::*;
+pub use us::*;
+pub use is::*;
+pub use err::ConstMathErr;
diff --git a/src/librustc_const_math/us.rs b/src/librustc_const_math/us.rs
new file mode 100644 (file)
index 0000000..e5a7086
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use syntax::ast;
+use super::err::*;
+
+/// Depending on the target only one variant is ever used in a compilation.
+/// Anything else is an error. This invariant is checked at several locations
+#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)]
+pub enum ConstUsize {
+    Us32(u32),
+    Us64(u64),
+}
+pub use self::ConstUsize::*;
+
+impl ConstUsize {
+    pub fn as_u64(self, target_uint_ty: ast::UintTy) -> u64 {
+        match (self, target_uint_ty) {
+            (Us32(i), ast::UintTy::U32) => i as u64,
+            (Us64(i), ast::UintTy::U64) => i,
+            _ => panic!("got invalid usize size for target"),
+        }
+    }
+    pub fn new(i: u64, target_uint_ty: ast::UintTy) -> Result<Self, ConstMathErr> {
+        match target_uint_ty {
+            ast::UintTy::U32 if i as u32 as u64 == i => Ok(Us32(i as u32)),
+            ast::UintTy::U32 => Err(ULitOutOfRange(ast::UintTy::Us)),
+            ast::UintTy::U64 => Ok(Us64(i)),
+            _ => unreachable!(),
+        }
+    }
+}
index 7b5dacece8c203b275d42f99f162c2067a3c6dc1..092b406ae9e3e23c474d7cae419e8e6a208b93a1 100644 (file)
@@ -8,9 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use std::iter::FromIterator;
+
 /// A very simple BitVector type.
+#[derive(Clone)]
 pub struct BitVector {
-    data: Vec<u64>
+    data: Vec<u64>,
 }
 
 impl BitVector {
@@ -40,7 +43,9 @@ impl BitVector {
         for (i, j) in self.data.iter_mut().zip(&all.data) {
             let value = *i;
             *i = value | *j;
-            if value != *i { changed = true; }
+            if value != *i {
+                changed = true;
+            }
         }
         changed
     }
@@ -48,7 +53,9 @@ impl BitVector {
     pub fn grow(&mut self, num_bits: usize) {
         let num_words = u64s(num_bits);
         let extra_words = self.data.len() - num_words;
-        self.data.extend((0..extra_words).map(|_| 0));
+        if extra_words > 0 {
+            self.data.extend((0..extra_words).map(|_| 0));
+        }
     }
 
     /// Iterates over indexes of set bits in a sorted order
@@ -56,7 +63,7 @@ impl BitVector {
         BitVectorIter {
             iter: self.data.iter(),
             current: 0,
-            idx: 0
+            idx: 0,
         }
     }
 }
@@ -64,7 +71,7 @@ impl BitVector {
 pub struct BitVectorIter<'a> {
     iter: ::std::slice::Iter<'a, u64>,
     current: u64,
-    idx: usize
+    idx: usize,
 }
 
 impl<'a> Iterator for BitVectorIter<'a> {
@@ -91,6 +98,27 @@ impl<'a> Iterator for BitVectorIter<'a> {
     }
 }
 
+impl FromIterator<bool> for BitVector {
+    fn from_iter<I>(iter: I) -> BitVector where I: IntoIterator<Item=bool> {
+        let iter = iter.into_iter();
+        let (len, _) = iter.size_hint();
+        // Make the minimum length for the bitvector 64 bits since that's
+        // the smallest non-zero size anyway.
+        let len = if len < 64 { 64 } else { len };
+        let mut bv = BitVector::new(len);
+        for (idx, val) in iter.enumerate() {
+            if idx > len {
+                bv.grow(idx);
+            }
+            if val {
+                bv.insert(idx);
+            }
+        }
+
+        bv
+    }
+}
+
 /// A "bit matrix" is basically a square matrix of booleans
 /// represented as one gigantic bitvector. In other words, it is as if
 /// you have N bitvectors, each of length N. Note that `elements` here is `N`/
@@ -108,7 +136,7 @@ impl BitMatrix {
         let u64s_per_elem = u64s(elements);
         BitMatrix {
             elements: elements,
-            vector: vec![0; elements * u64s_per_elem]
+            vector: vec![0; elements * u64s_per_elem],
         }
     }
 
@@ -123,9 +151,9 @@ impl BitMatrix {
         let (start, _) = self.range(source);
         let (word, mask) = word_mask(target);
         let mut vector = &mut self.vector[..];
-        let v1 = vector[start+word];
+        let v1 = vector[start + word];
         let v2 = v1 | mask;
-        vector[start+word] = v2;
+        vector[start + word] = v2;
         v1 != v2
     }
 
@@ -136,7 +164,7 @@ impl BitMatrix {
     pub fn contains(&self, source: usize, target: usize) -> bool {
         let (start, _) = self.range(source);
         let (word, mask) = word_mask(target);
-        (self.vector[start+word] & mask) != 0
+        (self.vector[start + word] & mask) != 0
     }
 
     /// Returns those indices that are reachable from both `a` and
@@ -150,8 +178,12 @@ impl BitMatrix {
         for (base, (i, j)) in (a_start..a_end).zip(b_start..b_end).enumerate() {
             let mut v = self.vector[i] & self.vector[j];
             for bit in 0..64 {
-                if v == 0 { break; }
-                if v & 0x1 != 0 { result.push(base*64 + bit); }
+                if v == 0 {
+                    break;
+                }
+                if v & 0x1 != 0 {
+                    result.push(base * 64 + bit);
+                }
                 v >>= 1;
             }
         }
@@ -170,9 +202,7 @@ impl BitMatrix {
         let (write_start, write_end) = self.range(write);
         let vector = &mut self.vector[..];
         let mut changed = false;
-        for (read_index, write_index) in
-            (read_start..read_end).zip(write_start..write_end)
-        {
+        for (read_index, write_index) in (read_start..read_end).zip(write_start..write_end) {
             let v1 = vector[write_index];
             let v2 = v1 | vector[read_index];
             vector[write_index] = v2;
@@ -204,7 +234,8 @@ fn bitvec_iter_works() {
     bitvec.insert(65);
     bitvec.insert(66);
     bitvec.insert(99);
-    assert_eq!(bitvec.iter().collect::<Vec<_>>(), [1, 10, 19, 62, 63, 64, 65, 66, 99]);
+    assert_eq!(bitvec.iter().collect::<Vec<_>>(),
+               [1, 10, 19, 62, 63, 64, 65, 66, 99]);
 }
 
 #[test]
@@ -217,7 +248,8 @@ fn bitvec_iter_works_2() {
     bitvec.insert(66);
     bitvec.insert(99);
     bitvec.insert(299);
-    assert_eq!(bitvec.iter().collect::<Vec<_>>(), [1, 10, 19, 62, 66, 99, 299]);
+    assert_eq!(bitvec.iter().collect::<Vec<_>>(),
+               [1, 10, 19, 62, 66, 99, 299]);
 
 }
 
index da5f9f20892007c764f0a27a54f27820505a2c67..0000c283a7a00245bac358a5253ebf57c1d2c154 100644 (file)
@@ -36,7 +36,9 @@ pub struct FnvHasher(u64);
 
 impl Default for FnvHasher {
     #[inline]
-    fn default() -> FnvHasher { FnvHasher(0xcbf29ce484222325) }
+    fn default() -> FnvHasher {
+        FnvHasher(0xcbf29ce484222325)
+    }
 }
 
 impl Hasher for FnvHasher {
@@ -51,5 +53,7 @@ impl Hasher for FnvHasher {
     }
 
     #[inline]
-    fn finish(&self) -> u64 { self.0 }
+    fn finish(&self) -> u64 {
+        self.0
+    }
 }
index f11856d751391ef1edba3830b1872257e4a9ff21..99a87d1e760c30498c7b352dbe84a5fc7287d668 100644 (file)
@@ -38,9 +38,9 @@ use snapshot_vec::{SnapshotVec, SnapshotVecDelegate};
 #[cfg(test)]
 mod tests;
 
-pub struct Graph<N,E> {
-    nodes: SnapshotVec<Node<N>> ,
-    edges: SnapshotVec<Edge<E>> ,
+pub struct Graph<N, E> {
+    nodes: SnapshotVec<Node<N>>,
+    edges: SnapshotVec<Edge<E>>,
 }
 
 pub struct Node<N> {
@@ -71,9 +71,13 @@ impl<N> SnapshotVecDelegate for Edge<N> {
 
 impl<E: Debug> Debug for Edge<E> {
     fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
-        write!(f, "Edge {{ next_edge: [{:?}, {:?}], source: {:?}, target: {:?}, data: {:?} }}",
-               self.next_edge[0], self.next_edge[1], self.source,
-               self.target, self.data)
+        write!(f,
+               "Edge {{ next_edge: [{:?}, {:?}], source: {:?}, target: {:?}, data: {:?} }}",
+               self.next_edge[0],
+               self.next_edge[1],
+               self.source,
+               self.target,
+               self.data)
     }
 }
 
@@ -87,7 +91,9 @@ pub const INVALID_EDGE_INDEX: EdgeIndex = EdgeIndex(usize::MAX);
 
 // Use a private field here to guarantee no more instances are created:
 #[derive(Copy, Clone, Debug, PartialEq)]
-pub struct Direction { repr: usize }
+pub struct Direction {
+    repr: usize,
+}
 
 pub const OUTGOING: Direction = Direction { repr: 0 };
 
@@ -95,24 +101,27 @@ pub const INCOMING: Direction = Direction { repr: 1 };
 
 impl NodeIndex {
     /// Returns unique id (unique with respect to the graph holding associated node).
-    pub fn node_id(&self) -> usize { self.0 }
+    pub fn node_id(&self) -> usize {
+        self.0
+    }
 }
 
 impl EdgeIndex {
     /// Returns unique id (unique with respect to the graph holding associated edge).
-    pub fn edge_id(&self) -> usize { self.0 }
+    pub fn edge_id(&self) -> usize {
+        self.0
+    }
 }
 
-impl<N:Debug,E:Debug> Graph<N,E> {
-    pub fn new() -> Graph<N,E> {
+impl<N: Debug, E: Debug> Graph<N, E> {
+    pub fn new() -> Graph<N, E> {
         Graph {
             nodes: SnapshotVec::new(),
             edges: SnapshotVec::new(),
         }
     }
 
-    ///////////////////////////////////////////////////////////////////////////
-    // Simple accessors
+    // # Simple accessors
 
     #[inline]
     pub fn all_nodes(&self) -> &[Node<N>] {
@@ -134,8 +143,7 @@ impl<N:Debug,E:Debug> Graph<N,E> {
         self.edges.len()
     }
 
-    ///////////////////////////////////////////////////////////////////////////
-    // Node construction
+    // # Node construction
 
     pub fn next_node_index(&self) -> NodeIndex {
         NodeIndex(self.nodes.len())
@@ -145,7 +153,7 @@ impl<N:Debug,E:Debug> Graph<N,E> {
         let idx = self.next_node_index();
         self.nodes.push(Node {
             first_edge: [INVALID_EDGE_INDEX, INVALID_EDGE_INDEX],
-            data: data
+            data: data,
         });
         idx
     }
@@ -162,26 +170,20 @@ impl<N:Debug,E:Debug> Graph<N,E> {
         &self.nodes[idx.0]
     }
 
-    ///////////////////////////////////////////////////////////////////////////
-    // Edge construction and queries
+    // # Edge construction and queries
 
     pub fn next_edge_index(&self) -> EdgeIndex {
         EdgeIndex(self.edges.len())
     }
 
-    pub fn add_edge(&mut self,
-                    source: NodeIndex,
-                    target: NodeIndex,
-                    data: E) -> EdgeIndex {
+    pub fn add_edge(&mut self, source: NodeIndex, target: NodeIndex, data: E) -> EdgeIndex {
         debug!("graph: add_edge({:?}, {:?}, {:?})", source, target, data);
 
         let idx = self.next_edge_index();
 
         // read current first of the list of edges from each node
-        let source_first = self.nodes[source.0]
-                                     .first_edge[OUTGOING.repr];
-        let target_first = self.nodes[target.0]
-                                     .first_edge[INCOMING.repr];
+        let source_first = self.nodes[source.0].first_edge[OUTGOING.repr];
+        let target_first = self.nodes[target.0].first_edge[INCOMING.repr];
 
         // create the new edge, with the previous firsts from each node
         // as the next pointers
@@ -189,7 +191,7 @@ impl<N:Debug,E:Debug> Graph<N,E> {
             next_edge: [source_first, target_first],
             source: source,
             target: target,
-            data: data
+            data: data,
         });
 
         // adjust the firsts for each node target be the next object.
@@ -227,46 +229,48 @@ impl<N:Debug,E:Debug> Graph<N,E> {
         self.edges[edge.0].next_edge[dir.repr]
     }
 
-    ///////////////////////////////////////////////////////////////////////////
-    // Iterating over nodes, edges
+    // # Iterating over nodes, edges
 
-    pub fn each_node<'a, F>(&'a self, mut f: F) -> bool where
-        F: FnMut(NodeIndex, &'a Node<N>) -> bool,
+    pub fn each_node<'a, F>(&'a self, mut f: F) -> bool
+        where F: FnMut(NodeIndex, &'a Node<N>) -> bool
     {
         //! Iterates over all edges defined in the graph.
         self.nodes.iter().enumerate().all(|(i, node)| f(NodeIndex(i), node))
     }
 
-    pub fn each_edge<'a, F>(&'a self, mut f: F) -> bool where
-        F: FnMut(EdgeIndex, &'a Edge<E>) -> bool,
+    pub fn each_edge<'a, F>(&'a self, mut f: F) -> bool
+        where F: FnMut(EdgeIndex, &'a Edge<E>) -> bool
     {
         //! Iterates over all edges defined in the graph
         self.edges.iter().enumerate().all(|(i, edge)| f(EdgeIndex(i), edge))
     }
 
-    pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges<N,E> {
+    pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges<N, E> {
         self.adjacent_edges(source, OUTGOING)
     }
 
-    pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges<N,E> {
+    pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges<N, E> {
         self.adjacent_edges(source, INCOMING)
     }
 
-    pub fn adjacent_edges(&self, source: NodeIndex, direction: Direction) -> AdjacentEdges<N,E> {
+    pub fn adjacent_edges(&self, source: NodeIndex, direction: Direction) -> AdjacentEdges<N, E> {
         let first_edge = self.node(source).first_edge[direction.repr];
-        AdjacentEdges { graph: self, direction: direction, next: first_edge }
+        AdjacentEdges {
+            graph: self,
+            direction: direction,
+            next: first_edge,
+        }
     }
 
-    pub fn successor_nodes(&self, source: NodeIndex) -> AdjacentTargets<N,E> {
+    pub fn successor_nodes(&self, source: NodeIndex) -> AdjacentTargets<N, E> {
         self.outgoing_edges(source).targets()
     }
 
-    pub fn predecessor_nodes(&self, target: NodeIndex) -> AdjacentSources<N,E> {
+    pub fn predecessor_nodes(&self, target: NodeIndex) -> AdjacentSources<N, E> {
         self.incoming_edges(target).sources()
     }
 
-    ///////////////////////////////////////////////////////////////////////////
-    // Fixed-point iteration
+    // # Fixed-point iteration
     //
     // A common use for graphs in our compiler is to perform
     // fixed-point iteration. In this case, each edge represents a
@@ -274,8 +278,8 @@ impl<N:Debug,E:Debug> Graph<N,E> {
     // variables or other bitsets. This method facilitates such a
     // computation.
 
-    pub fn iterate_until_fixed_point<'a, F>(&'a self, mut op: F) where
-        F: FnMut(usize, EdgeIndex, &'a Edge<E>) -> bool,
+    pub fn iterate_until_fixed_point<'a, F>(&'a self, mut op: F)
+        where F: FnMut(usize, EdgeIndex, &'a Edge<E>) -> bool
     {
         let mut iteration = 0;
         let mut changed = true;
@@ -288,7 +292,7 @@ impl<N:Debug,E:Debug> Graph<N,E> {
         }
     }
 
-    pub fn depth_traverse<'a>(&'a self, start: NodeIndex) -> DepthFirstTraversal<'a, N, E>  {
+    pub fn depth_traverse<'a>(&'a self, start: NodeIndex) -> DepthFirstTraversal<'a, N, E> {
         DepthFirstTraversal {
             graph: self,
             stack: vec![start],
@@ -297,28 +301,28 @@ impl<N:Debug,E:Debug> Graph<N,E> {
     }
 }
 
-///////////////////////////////////////////////////////////////////////////
-// Iterators
+// # Iterators
 
-pub struct AdjacentEdges<'g,N,E>
-    where N:'g, E:'g
+pub struct AdjacentEdges<'g, N, E>
+    where N: 'g,
+          E: 'g
 {
     graph: &'g Graph<N, E>,
     direction: Direction,
     next: EdgeIndex,
 }
 
-impl<'g,N,E> AdjacentEdges<'g,N,E> {
-    fn targets(self) -> AdjacentTargets<'g,N,E> {
+impl<'g, N, E> AdjacentEdges<'g, N, E> {
+    fn targets(self) -> AdjacentTargets<'g, N, E> {
         AdjacentTargets { edges: self }
     }
 
-    fn sources(self) -> AdjacentSources<'g,N,E> {
+    fn sources(self) -> AdjacentSources<'g, N, E> {
         AdjacentSources { edges: self }
     }
 }
 
-impl<'g, N:Debug, E:Debug> Iterator for AdjacentEdges<'g, N, E> {
+impl<'g, N: Debug, E: Debug> Iterator for AdjacentEdges<'g, N, E> {
     type Item = (EdgeIndex, &'g Edge<E>);
 
     fn next(&mut self) -> Option<(EdgeIndex, &'g Edge<E>)> {
@@ -333,13 +337,14 @@ impl<'g, N:Debug, E:Debug> Iterator for AdjacentEdges<'g, N, E> {
     }
 }
 
-pub struct AdjacentTargets<'g,N:'g,E:'g>
-    where N:'g, E:'g
+pub struct AdjacentTargets<'g, N: 'g, E: 'g>
+    where N: 'g,
+          E: 'g
 {
-    edges: AdjacentEdges<'g,N,E>,
+    edges: AdjacentEdges<'g, N, E>,
 }
 
-impl<'g, N:Debug, E:Debug> Iterator for AdjacentTargets<'g, N, E> {
+impl<'g, N: Debug, E: Debug> Iterator for AdjacentTargets<'g, N, E> {
     type Item = NodeIndex;
 
     fn next(&mut self) -> Option<NodeIndex> {
@@ -347,13 +352,14 @@ impl<'g, N:Debug, E:Debug> Iterator for AdjacentTargets<'g, N, E> {
     }
 }
 
-pub struct AdjacentSources<'g,N:'g,E:'g>
-    where N:'g, E:'g
+pub struct AdjacentSources<'g, N: 'g, E: 'g>
+    where N: 'g,
+          E: 'g
 {
-    edges: AdjacentEdges<'g,N,E>,
+    edges: AdjacentEdges<'g, N, E>,
 }
 
-impl<'g, N:Debug, E:Debug> Iterator for AdjacentSources<'g, N, E> {
+impl<'g, N: Debug, E: Debug> Iterator for AdjacentSources<'g, N, E> {
     type Item = NodeIndex;
 
     fn next(&mut self) -> Option<NodeIndex> {
@@ -361,13 +367,13 @@ impl<'g, N:Debug, E:Debug> Iterator for AdjacentSources<'g, N, E> {
     }
 }
 
-pub struct DepthFirstTraversal<'g, N:'g, E:'g> {
+pub struct DepthFirstTraversal<'g, N: 'g, E: 'g> {
     graph: &'g Graph<N, E>,
     stack: Vec<NodeIndex>,
-    visited: BitVector
+    visited: BitVector,
 }
 
-impl<'g, N:Debug, E:Debug> Iterator for DepthFirstTraversal<'g, N, E> {
+impl<'g, N: Debug, E: Debug> Iterator for DepthFirstTraversal<'g, N, E> {
     type Item = NodeIndex;
 
     fn next(&mut self) -> Option<NodeIndex> {
@@ -389,8 +395,8 @@ impl<'g, N:Debug, E:Debug> Iterator for DepthFirstTraversal<'g, N, E> {
     }
 }
 
-pub fn each_edge_index<F>(max_edge_index: EdgeIndex, mut f: F) where
-    F: FnMut(EdgeIndex) -> bool,
+pub fn each_edge_index<F>(max_edge_index: EdgeIndex, mut f: F)
+    where F: FnMut(EdgeIndex) -> bool
 {
     let mut i = 0;
     let n = max_edge_index.0;
index 33b2edd2e106d22ec0dd98f4b17a7067dc38f43f..be7f48d27e0416a5c34b7c557e1ce8654dfa1b70 100644 (file)
@@ -64,11 +64,11 @@ fn each_edge() {
     });
 }
 
-fn test_adjacent_edges<N:PartialEq+Debug,E:PartialEq+Debug>(graph: &Graph<N,E>,
-                                                            start_index: NodeIndex,
-                                                            start_data: N,
-                                                            expected_incoming: &[(E,N)],
-                                                            expected_outgoing: &[(E,N)]) {
+fn test_adjacent_edges<N: PartialEq + Debug, E: PartialEq + Debug>(graph: &Graph<N, E>,
+                                                                   start_index: NodeIndex,
+                                                                   start_data: N,
+                                                                   expected_incoming: &[(E, N)],
+                                                                   expected_outgoing: &[(E, N)]) {
     assert!(graph.node_data(start_index) == &start_data);
 
     let mut counter = 0;
@@ -76,7 +76,10 @@ fn test_adjacent_edges<N:PartialEq+Debug,E:PartialEq+Debug>(graph: &Graph<N,E>,
         assert!(graph.edge_data(edge_index) == &edge.data);
         assert!(counter < expected_incoming.len());
         debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}",
-               counter, expected_incoming[counter], edge_index, edge);
+               counter,
+               expected_incoming[counter],
+               edge_index,
+               edge);
         match expected_incoming[counter] {
             (ref e, ref n) => {
                 assert!(e == &edge.data);
@@ -93,7 +96,10 @@ fn test_adjacent_edges<N:PartialEq+Debug,E:PartialEq+Debug>(graph: &Graph<N,E>,
         assert!(graph.edge_data(edge_index) == &edge.data);
         assert!(counter < expected_outgoing.len());
         debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}",
-               counter, expected_outgoing[counter], edge_index, edge);
+               counter,
+               expected_outgoing[counter],
+               edge_index,
+               edge);
         match expected_outgoing[counter] {
             (ref e, ref n) => {
                 assert!(e == &edge.data);
@@ -109,31 +115,27 @@ fn test_adjacent_edges<N:PartialEq+Debug,E:PartialEq+Debug>(graph: &Graph<N,E>,
 #[test]
 fn each_adjacent_from_a() {
     let graph = create_graph();
-    test_adjacent_edges(&graph, NodeIndex(0), "A",
-                        &[],
-                        &[("AB", "B")]);
+    test_adjacent_edges(&graph, NodeIndex(0), "A", &[], &[("AB", "B")]);
 }
 
 #[test]
 fn each_adjacent_from_b() {
     let graph = create_graph();
-    test_adjacent_edges(&graph, NodeIndex(1), "B",
-                        &[("FB", "F"), ("AB", "A"),],
-                        &[("BD", "D"), ("BC", "C"),]);
+    test_adjacent_edges(&graph,
+                        NodeIndex(1),
+                        "B",
+                        &[("FB", "F"), ("AB", "A")],
+                        &[("BD", "D"), ("BC", "C")]);
 }
 
 #[test]
 fn each_adjacent_from_c() {
     let graph = create_graph();
-    test_adjacent_edges(&graph, NodeIndex(2), "C",
-                        &[("EC", "E"), ("BC", "B")],
-                        &[]);
+    test_adjacent_edges(&graph, NodeIndex(2), "C", &[("EC", "E"), ("BC", "B")], &[]);
 }
 
 #[test]
 fn each_adjacent_from_d() {
     let graph = create_graph();
-    test_adjacent_edges(&graph, NodeIndex(3), "D",
-                        &[("BD", "B")],
-                        &[("DE", "E")]);
+    test_adjacent_edges(&graph, NodeIndex(3), "D", &[("BD", "B")], &[("DE", "E")]);
 }
index dabe1b984df2a57fad9550d67be17c411f5b63ce..f842f4a41a118b36d9b7d4bc1162dc0d5afda1c5 100644 (file)
@@ -26,14 +26,12 @@ use std::cell::Cell;
 /// suffices for the current purposes.
 #[derive(PartialEq)]
 pub struct Ivar<T: Copy> {
-    data: Cell<Option<T>>
+    data: Cell<Option<T>>,
 }
 
 impl<T: Copy> Ivar<T> {
     pub fn new() -> Ivar<T> {
-        Ivar {
-            data: Cell::new(None)
-        }
+        Ivar { data: Cell::new(None) }
     }
 
     pub fn get(&self) -> Option<T> {
@@ -41,8 +39,7 @@ impl<T: Copy> Ivar<T> {
     }
 
     pub fn fulfill(&self, value: T) {
-        assert!(self.data.get().is_none(),
-                "Value already set!");
+        assert!(self.data.get().is_none(), "Value already set!");
         self.data.set(Some(value));
     }
 
@@ -55,11 +52,11 @@ impl<T: Copy> Ivar<T> {
     }
 }
 
-impl<T: Copy+fmt::Debug> fmt::Debug for Ivar<T> {
+impl<T: Copy + fmt::Debug> fmt::Debug for Ivar<T> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self.get() {
             Some(val) => write!(f, "Ivar({:?})", val),
-            None => f.write_str("Ivar(<unfulfilled>)")
+            None => f.write_str("Ivar(<unfulfilled>)"),
         }
     }
 }
@@ -68,7 +65,7 @@ impl<T: Copy> Clone for Ivar<T> {
     fn clone(&self) -> Ivar<T> {
         match self.get() {
             Some(val) => Ivar { data: Cell::new(Some(val)) },
-            None => Ivar::new()
+            None => Ivar::new(),
         }
     }
 }
index e4b13ff548a4538df0efda70e71bfd671132f2ec..2234325aa013bb8c4c428b4ebd07f3c706a8dd74 100644 (file)
@@ -32,7 +32,8 @@
 #![cfg_attr(test, feature(test))]
 
 extern crate core;
-#[macro_use] extern crate log;
+#[macro_use]
+extern crate log;
 extern crate serialize as rustc_serialize; // used by deriving
 
 pub mod bitvec;
index d76d7f6ba340e67c9e563594ed084378ce91cf09..982a2bacce1648459cffea4dc3cde3e83d0fb5b2 100644 (file)
@@ -60,7 +60,7 @@ which includes three bits of information:
   `process_obligations` would simply yield back further ambiguous
   results. This is used by the `FulfillmentContext` to decide when it
   has reached a steady state.
-  
+
 #### Snapshots
 
 The `ObligationForest` supports a limited form of snapshots; see
@@ -79,5 +79,3 @@ parent and (for convenience) its root (which may be itself). It also
 has a current state, described by `NodeState`. After each
 processing step, we compress the vector to remove completed and error
 nodes, which aren't needed anymore.
-
-  
index 25a77adba2820b50e6e0db74913aba76fd32178d..4f6d0d7e40562e4f302d05e1fb2c2bea01ab58d6 100644 (file)
@@ -28,7 +28,7 @@ use self::tree_index::TreeIndex;
 #[cfg(test)]
 mod test;
 
-pub struct ObligationForest<O,T> {
+pub struct ObligationForest<O, T> {
     /// The list of obligations. In between calls to
     /// `process_obligations`, this list only contains nodes in the
     /// `Pending` or `Success` state (with a non-zero number of
@@ -43,7 +43,7 @@ pub struct ObligationForest<O,T> {
     /// backtrace iterator (which uses `split_at`).
     nodes: Vec<Node<O>>,
     trees: Vec<Tree<T>>,
-    snapshots: Vec<usize>
+    snapshots: Vec<usize>,
 }
 
 pub struct Snapshot {
@@ -67,7 +67,9 @@ struct Node<O> {
 #[derive(Debug)]
 enum NodeState<O> {
     /// Obligation not yet resolved to success or error.
-    Pending { obligation: O },
+    Pending {
+        obligation: O,
+    },
 
     /// Obligation resolved to success; `num_incomplete_children`
     /// indicates the number of children still in an "incomplete"
@@ -77,7 +79,10 @@ enum NodeState<O> {
     ///
     /// Once all children have completed, success nodes are removed
     /// from the vector by the compression step.
-    Success { obligation: O, num_incomplete_children: usize },
+    Success {
+        obligation: O,
+        num_incomplete_children: usize,
+    },
 
     /// This obligation was resolved to an error. Error nodes are
     /// removed from the vector by the compression step.
@@ -85,13 +90,13 @@ enum NodeState<O> {
 }
 
 #[derive(Debug)]
-pub struct Outcome<O,E> {
+pub struct Outcome<O, E> {
     /// Obligations that were completely evaluated, including all
     /// (transitive) subobligations.
     pub completed: Vec<O>,
 
     /// Backtrace of obligations that were found to be in error.
-    pub errors: Vec<Error<O,E>>,
+    pub errors: Vec<Error<O, E>>,
 
     /// If true, then we saw no successful obligations, which means
     /// there is no point in further iteration. This is based on the
@@ -103,7 +108,7 @@ pub struct Outcome<O,E> {
 }
 
 #[derive(Debug, PartialEq, Eq)]
-pub struct Error<O,E> {
+pub struct Error<O, E> {
     pub error: E,
     pub backtrace: Vec<O>,
 }
@@ -113,7 +118,7 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
         ObligationForest {
             trees: vec![],
             nodes: vec![],
-            snapshots: vec![]
+            snapshots: vec![],
         }
     }
 
@@ -148,11 +153,12 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
         // snapshot but pushing trees, all nodes after that should be
         // roots of other trees as well
         let first_root_index = self.trees[trees_len].root.get();
-        debug_assert!(
-            self.nodes[first_root_index..]
-                .iter()
-                .zip(first_root_index..)
-                .all(|(root, root_index)| self.trees[root.tree.get()].root.get() == root_index));
+        debug_assert!(self.nodes[first_root_index..]
+                          .iter()
+                          .zip(first_root_index..)
+                          .all(|(root, root_index)| {
+                              self.trees[root.tree.get()].root.get() == root_index
+                          }));
 
         // Pop off tree/root pairs pushed during snapshot.
         self.trees.truncate(trees_len);
@@ -169,14 +175,17 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
     pub fn push_tree(&mut self, obligation: O, tree_state: T) {
         let index = NodeIndex::new(self.nodes.len());
         let tree = TreeIndex::new(self.trees.len());
-        self.trees.push(Tree { root: index, state: tree_state });
+        self.trees.push(Tree {
+            root: index,
+            state: tree_state,
+        });
         self.nodes.push(Node::new(tree, None, obligation));
     }
 
     /// Convert all remaining obligations to the given error.
     ///
     /// This cannot be done during a snapshot.
-    pub fn to_errors<E:Clone>(&mut self, error: E) -> Vec<Error<O,E>> {
+    pub fn to_errors<E: Clone>(&mut self, error: E) -> Vec<Error<O, E>> {
         assert!(!self.in_snapshot());
         let mut errors = vec![];
         for index in 0..self.nodes.len() {
@@ -184,7 +193,10 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
             self.inherit_error(index);
             if let NodeState::Pending { .. } = self.nodes[index].state {
                 let backtrace = self.backtrace(index);
-                errors.push(Error { error: error.clone(), backtrace: backtrace });
+                errors.push(Error {
+                    error: error.clone(),
+                    backtrace: backtrace,
+                });
             }
         }
         let successful_obligations = self.compress();
@@ -193,21 +205,27 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
     }
 
     /// Returns the set of obligations that are in a pending state.
-    pub fn pending_obligations(&self) -> Vec<O> where O: Clone {
-        self.nodes.iter()
-                  .filter_map(|n| match n.state {
-                      NodeState::Pending { ref obligation } => Some(obligation),
-                      _ => None,
-                  })
-                  .cloned()
-                  .collect()
+    pub fn pending_obligations(&self) -> Vec<O>
+        where O: Clone
+    {
+        self.nodes
+            .iter()
+            .filter_map(|n| {
+                match n.state {
+                    NodeState::Pending { ref obligation } => Some(obligation),
+                    _ => None,
+                }
+            })
+            .cloned()
+            .collect()
     }
 
     /// Process the obligations.
     ///
     /// This CANNOT be unrolled (presently, at least).
-    pub fn process_obligations<E,F>(&mut self, mut action: F) -> Outcome<O,E>
-        where E: Debug, F: FnMut(&mut O, &mut T, Backtrace<O>) -> Result<Option<Vec<O>>, E>
+    pub fn process_obligations<E, F>(&mut self, mut action: F) -> Outcome<O, E>
+        where E: Debug,
+              F: FnMut(&mut O, &mut T, Backtrace<O>) -> Result<Option<Vec<O>>, E>
     {
         debug!("process_obligations(len={})", self.nodes.len());
         assert!(!self.in_snapshot()); // cannot unroll this action
@@ -228,7 +246,8 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
             self.inherit_error(index);
 
             debug!("process_obligations: node {} == {:?}",
-                   index, self.nodes[index].state);
+                   index,
+                   self.nodes[index].state);
 
             let result = {
                 let Node { tree, parent, .. } = self.nodes[index];
@@ -236,14 +255,16 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
                 let backtrace = Backtrace::new(prefix, parent);
                 match suffix[0].state {
                     NodeState::Error |
-                    NodeState::Success { .. } =>
-                        continue,
-                    NodeState::Pending { ref mut obligation } =>
-                        action(obligation, &mut self.trees[tree.get()].state, backtrace),
+                    NodeState::Success { .. } => continue,
+                    NodeState::Pending { ref mut obligation } => {
+                        action(obligation, &mut self.trees[tree.get()].state, backtrace)
+                    }
                 }
             };
 
-            debug!("process_obligations: node {} got result {:?}", index, result);
+            debug!("process_obligations: node {} got result {:?}",
+                   index,
+                   result);
 
             match result {
                 Ok(None) => {
@@ -256,7 +277,10 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
                 }
                 Err(err) => {
                     let backtrace = self.backtrace(index);
-                    errors.push(Error { error: err, backtrace: backtrace });
+                    errors.push(Error {
+                        error: err,
+                        backtrace: backtrace,
+                    });
                 }
             }
         }
@@ -291,20 +315,21 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
             // create child work
             let tree_index = self.nodes[index].tree;
             let node_index = NodeIndex::new(index);
-            self.nodes.extend(
-                children.into_iter()
-                        .map(|o| Node::new(tree_index, Some(node_index), o)));
+            self.nodes.extend(children.into_iter()
+                                      .map(|o| Node::new(tree_index, Some(node_index), o)));
         }
 
         // change state from `Pending` to `Success`, temporarily swapping in `Error`
         let state = mem::replace(&mut self.nodes[index].state, NodeState::Error);
         self.nodes[index].state = match state {
-            NodeState::Pending { obligation } =>
-                NodeState::Success { obligation: obligation,
-                                     num_incomplete_children: num_incomplete_children },
+            NodeState::Pending { obligation } => {
+                NodeState::Success {
+                    obligation: obligation,
+                    num_incomplete_children: num_incomplete_children,
+                }
+            }
             NodeState::Success { .. } |
-            NodeState::Error =>
-                unreachable!()
+            NodeState::Error => unreachable!(),
         };
     }
 
@@ -358,14 +383,19 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
                     // there was an error in the ancestors, it should
                     // have been propagated down and we should never
                     // have tried to process this obligation
-                    panic!("encountered error in node {:?} when collecting stack trace", p);
+                    panic!("encountered error in node {:?} when collecting stack trace",
+                           p);
                 }
             }
 
             // loop to the parent
             match self.nodes[p].parent {
-                Some(q) => { p = q.get(); }
-                None => { return trace; }
+                Some(q) => {
+                    p = q.get();
+                }
+                None => {
+                    return trace;
+                }
             }
         }
     }
@@ -427,18 +457,19 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
 
         // Pop off all the nodes we killed and extract the success
         // stories.
-        let successful =
-            (0 .. dead_nodes)
-            .map(|_| self.nodes.pop().unwrap())
-            .flat_map(|node| match node.state {
-                NodeState::Error => None,
-                NodeState::Pending { .. } => unreachable!(),
-                NodeState::Success { obligation, num_incomplete_children } => {
-                    assert_eq!(num_incomplete_children, 0);
-                    Some(obligation)
-                }
-            })
-            .collect();
+        let successful = (0..dead_nodes)
+                             .map(|_| self.nodes.pop().unwrap())
+                             .flat_map(|node| {
+                                 match node.state {
+                                     NodeState::Error => None,
+                                     NodeState::Pending { .. } => unreachable!(),
+                                     NodeState::Success { obligation, num_incomplete_children } => {
+                                         assert_eq!(num_incomplete_children, 0);
+                                         Some(obligation)
+                                     }
+                                 }
+                             })
+                             .collect();
 
         // Adjust the various indices, since we compressed things.
         for tree in &mut self.trees {
@@ -484,7 +515,10 @@ pub struct Backtrace<'b, O: 'b> {
 
 impl<'b, O> Backtrace<'b, O> {
     fn new(nodes: &'b [Node<O>], pointer: Option<NodeIndex>) -> Backtrace<'b, O> {
-        Backtrace { nodes: nodes, pointer: pointer }
+        Backtrace {
+            nodes: nodes,
+            pointer: pointer,
+        }
     }
 }
 
@@ -497,9 +531,7 @@ impl<'b, O> Iterator for Backtrace<'b, O> {
             self.pointer = self.nodes[p.get()].parent;
             match self.nodes[p.get()].state {
                 NodeState::Pending { ref obligation } |
-                NodeState::Success { ref obligation, .. } => {
-                    Some(obligation)
-                }
+                NodeState::Success { ref obligation, .. } => Some(obligation),
                 NodeState::Error => {
                     panic!("Backtrace encountered an error.");
                 }
index 465cee0b60cc0d4391e85bd58c080037ce498524..1063bb3611ef3b3140a5ab277ce4a7cd1485a819 100644 (file)
@@ -13,19 +13,16 @@ use std::u32;
 
 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
 pub struct NodeIndex {
-    index: NonZero<u32>
+    index: NonZero<u32>,
 }
 
 impl NodeIndex {
     pub fn new(value: usize) -> NodeIndex {
         assert!(value < (u32::MAX as usize));
-        unsafe {
-            NodeIndex { index: NonZero::new((value as u32) + 1) }
-        }
+        unsafe { NodeIndex { index: NonZero::new((value as u32) + 1) } }
     }
 
     pub fn get(self) -> usize {
         (*self.index - 1) as usize
     }
 }
-
index 9a0a4218d4593d662ad1cee5b6217366abf58ab7..a8c24270217bdc5828539c78d43edad826eba81e 100644 (file)
@@ -21,19 +21,23 @@ fn push_pop() {
     //      A |-> A.1
     //        |-> A.2
     //        |-> A.3
-    let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations(|obligation, tree, _| {
-            assert_eq!(obligation.chars().next(), tree.chars().next());
-            match *obligation {
-                "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])),
-                "B" => Err("B is for broken"),
-                "C" => Ok(Some(vec![])),
-                _ => unreachable!(),
-            }
-        });
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(|obligation,
+                                                                                 tree,
+                                                                                 _| {
+        assert_eq!(obligation.chars().next(), tree.chars().next());
+        match *obligation {
+            "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])),
+            "B" => Err("B is for broken"),
+            "C" => Ok(Some(vec![])),
+            _ => unreachable!(),
+        }
+    });
     assert_eq!(ok, vec!["C"]);
-    assert_eq!(err, vec![Error {error: "B is for broken",
-                                backtrace: vec!["B"]}]);
+    assert_eq!(err,
+               vec![Error {
+                        error: "B is for broken",
+                        backtrace: vec!["B"],
+                    }]);
 
     // second round: two delays, one success, creating an uneven set of subtasks:
     //      A |-> A.1
@@ -61,33 +65,41 @@ fn push_pop() {
     // propagates to A.3.i, but not D.1 or D.2.
     //      D |-> D.1 |-> D.1.i
     //        |-> D.2 |-> D.2.i
-    let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations(|obligation, tree, _| {
-            assert_eq!(obligation.chars().next(), tree.chars().next());
-            match *obligation {
-                "A.1" => Ok(Some(vec![])),
-                "A.2" => Err("A is for apple"),
-                "D.1" => Ok(Some(vec!["D.1.i"])),
-                "D.2" => Ok(Some(vec!["D.2.i"])),
-                _ => unreachable!(),
-            }
-        });
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(|obligation,
+                                                                                 tree,
+                                                                                 _| {
+        assert_eq!(obligation.chars().next(), tree.chars().next());
+        match *obligation {
+            "A.1" => Ok(Some(vec![])),
+            "A.2" => Err("A is for apple"),
+            "D.1" => Ok(Some(vec!["D.1.i"])),
+            "D.2" => Ok(Some(vec!["D.2.i"])),
+            _ => unreachable!(),
+        }
+    });
     assert_eq!(ok, vec!["A.1"]);
-    assert_eq!(err, vec![Error { error: "A is for apple",
-                                 backtrace: vec!["A.2", "A"] }]);
+    assert_eq!(err,
+               vec![Error {
+                        error: "A is for apple",
+                        backtrace: vec!["A.2", "A"],
+                    }]);
 
     // fourth round: error in D.1.i that should propagate to D.2.i
-    let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations(|obligation, tree, _| {
-            assert_eq!(obligation.chars().next(), tree.chars().next());
-            match *obligation {
-                "D.1.i" => Err("D is for dumb"),
-                _ => panic!("unexpected obligation {:?}", obligation),
-            }
-        });
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(|obligation,
+                                                                                 tree,
+                                                                                 _| {
+        assert_eq!(obligation.chars().next(), tree.chars().next());
+        match *obligation {
+            "D.1.i" => Err("D is for dumb"),
+            _ => panic!("unexpected obligation {:?}", obligation),
+        }
+    });
     assert_eq!(ok, Vec::<&'static str>::new());
-    assert_eq!(err, vec![Error { error: "D is for dumb",
-                                 backtrace: vec!["D.1.i", "D.1", "D"] }]);
+    assert_eq!(err,
+               vec![Error {
+                        error: "D is for dumb",
+                        backtrace: vec!["D.1.i", "D.1", "D"],
+                    }]);
 }
 
 // Test that if a tree with grandchildren succeeds, everything is
@@ -104,7 +116,7 @@ fn success_in_grandchildren() {
     forest.push_tree("A", "A");
 
     let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations::<(),_>(|obligation, tree, _| {
+        forest.process_obligations::<(), _>(|obligation, tree, _| {
             assert_eq!(obligation.chars().next(), tree.chars().next());
             match *obligation {
                 "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])),
@@ -115,7 +127,7 @@ fn success_in_grandchildren() {
     assert!(err.is_empty());
 
     let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations::<(),_>(|obligation, tree, _| {
+        forest.process_obligations::<(), _>(|obligation, tree, _| {
             assert_eq!(obligation.chars().next(), tree.chars().next());
             match *obligation {
                 "A.1" => Ok(Some(vec![])),
@@ -128,7 +140,7 @@ fn success_in_grandchildren() {
     assert!(err.is_empty());
 
     let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations::<(),_>(|obligation, tree, _| {
+        forest.process_obligations::<(), _>(|obligation, tree, _| {
             assert_eq!(obligation.chars().next(), tree.chars().next());
             match *obligation {
                 "A.2.i" => Ok(Some(vec!["A.2.i.a"])),
@@ -140,7 +152,7 @@ fn success_in_grandchildren() {
     assert!(err.is_empty());
 
     let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations::<(),_>(|obligation, tree, _| {
+        forest.process_obligations::<(), _>(|obligation, tree, _| {
             assert_eq!(obligation.chars().next(), tree.chars().next());
             match *obligation {
                 "A.2.i.a" => Ok(Some(vec![])),
@@ -150,8 +162,11 @@ fn success_in_grandchildren() {
     assert_eq!(ok, vec!["A.2.i.a", "A.2.i", "A.2", "A"]);
     assert!(err.is_empty());
 
-    let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations::<(),_>(|_, _, _| unreachable!());
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations::<(), _>(|_,
+                                                                                          _,
+                                                                                          _| {
+        unreachable!()
+    });
     assert!(ok.is_empty());
     assert!(err.is_empty());
 }
@@ -163,7 +178,7 @@ fn to_errors_no_throw() {
     let mut forest = ObligationForest::new();
     forest.push_tree("A", "A");
     let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations::<(),_>(|obligation, tree, _| {
+        forest.process_obligations::<(), _>(|obligation, tree, _| {
             assert_eq!(obligation.chars().next(), tree.chars().next());
             match *obligation {
                 "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])),
@@ -183,7 +198,7 @@ fn backtrace() {
     let mut forest = ObligationForest::new();
     forest.push_tree("A", "A");
     let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations::<(),_>(|obligation, tree, mut backtrace| {
+        forest.process_obligations::<(), _>(|obligation, tree, mut backtrace| {
             assert_eq!(obligation.chars().next(), tree.chars().next());
             assert!(backtrace.next().is_none());
             match *obligation {
@@ -194,7 +209,7 @@ fn backtrace() {
     assert!(ok.is_empty());
     assert!(err.is_empty());
     let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations::<(),_>(|obligation, tree, mut backtrace| {
+        forest.process_obligations::<(), _>(|obligation, tree, mut backtrace| {
             assert_eq!(obligation.chars().next(), tree.chars().next());
             assert!(backtrace.next().unwrap() == &"A");
             assert!(backtrace.next().is_none());
@@ -206,7 +221,7 @@ fn backtrace() {
     assert!(ok.is_empty());
     assert!(err.is_empty());
     let Outcome { completed: ok, errors: err, .. } =
-        forest.process_obligations::<(),_>(|obligation, tree, mut backtrace| {
+        forest.process_obligations::<(), _>(|obligation, tree, mut backtrace| {
             assert_eq!(obligation.chars().next(), tree.chars().next());
             assert!(backtrace.next().unwrap() == &"A.1");
             assert!(backtrace.next().unwrap() == &"A");
index a9f5483f45b3e24ad71340cb43df2a58e26f0f7a..499448634acbdbeeea90e6ee39bfdec6371dd5ff 100644 (file)
@@ -12,7 +12,7 @@ use std::u32;
 
 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
 pub struct TreeIndex {
-    index: u32
+    index: u32,
 }
 
 impl TreeIndex {
@@ -25,4 +25,3 @@ impl TreeIndex {
         self.index as usize
     }
 }
-
index 5f89856afdbe33507cb5abb66ffb418dfd0eaf81..614e7aae74bbc9bd590c38a0377e2bda142e8cc5 100644 (file)
@@ -23,7 +23,7 @@ use self::UndoLog::*;
 use std::mem;
 use std::ops;
 
-pub enum UndoLog<D:SnapshotVecDelegate> {
+pub enum UndoLog<D: SnapshotVecDelegate> {
     /// Indicates where a snapshot started.
     OpenSnapshot,
 
@@ -37,10 +37,10 @@ pub enum UndoLog<D:SnapshotVecDelegate> {
     SetElem(usize, D::Value),
 
     /// Extensible set of actions
-    Other(D::Undo)
+    Other(D::Undo),
 }
 
-pub struct SnapshotVec<D:SnapshotVecDelegate> {
+pub struct SnapshotVec<D: SnapshotVecDelegate> {
     values: Vec<D::Value>,
     undo_log: Vec<UndoLog<D>>,
 }
@@ -58,7 +58,7 @@ pub trait SnapshotVecDelegate {
     fn reverse(values: &mut Vec<Self::Value>, action: Self::Undo);
 }
 
-impl<D:SnapshotVecDelegate> SnapshotVec<D> {
+impl<D: SnapshotVecDelegate> SnapshotVec<D> {
     pub fn new() -> SnapshotVec<D> {
         SnapshotVec {
             values: Vec::new(),
@@ -117,9 +117,7 @@ impl<D:SnapshotVecDelegate> SnapshotVec<D> {
         Snapshot { length: length }
     }
 
-    pub fn actions_since_snapshot(&self,
-                                  snapshot: &Snapshot)
-                                  -> &[UndoLog<D>] {
+    pub fn actions_since_snapshot(&self, snapshot: &Snapshot) -> &[UndoLog<D>] {
         &self.undo_log[snapshot.length..]
     }
 
@@ -128,11 +126,10 @@ impl<D:SnapshotVecDelegate> SnapshotVec<D> {
         assert!(self.undo_log.len() > snapshot.length);
 
         // Invariant established by start_snapshot():
-        assert!(
-            match self.undo_log[snapshot.length] {
-                OpenSnapshot => true,
-                _ => false
-            });
+        assert!(match self.undo_log[snapshot.length] {
+            OpenSnapshot => true,
+            _ => false,
+        });
     }
 
     pub fn rollback_to(&mut self, snapshot: Snapshot) {
@@ -168,7 +165,10 @@ impl<D:SnapshotVecDelegate> SnapshotVec<D> {
         }
 
         let v = self.undo_log.pop().unwrap();
-        assert!(match v { OpenSnapshot => true, _ => false });
+        assert!(match v {
+            OpenSnapshot => true,
+            _ => false,
+        });
         assert!(self.undo_log.len() == snapshot.length);
     }
 
@@ -188,20 +188,28 @@ impl<D:SnapshotVecDelegate> SnapshotVec<D> {
     }
 }
 
-impl<D:SnapshotVecDelegate> ops::Deref for SnapshotVec<D> {
+impl<D: SnapshotVecDelegate> ops::Deref for SnapshotVec<D> {
     type Target = [D::Value];
-    fn deref(&self) -> &[D::Value] { &*self.values }
+    fn deref(&self) -> &[D::Value] {
+        &*self.values
+    }
 }
 
-impl<D:SnapshotVecDelegate> ops::DerefMut for SnapshotVec<D> {
-    fn deref_mut(&mut self) -> &mut [D::Value] { &mut *self.values }
+impl<D: SnapshotVecDelegate> ops::DerefMut for SnapshotVec<D> {
+    fn deref_mut(&mut self) -> &mut [D::Value] {
+        &mut *self.values
+    }
 }
 
-impl<D:SnapshotVecDelegate> ops::Index<usize> for SnapshotVec<D> {
+impl<D: SnapshotVecDelegate> ops::Index<usize> for SnapshotVec<D> {
     type Output = D::Value;
-    fn index(&self, index: usize) -> &D::Value { self.get(index) }
+    fn index(&self, index: usize) -> &D::Value {
+        self.get(index)
+    }
 }
 
-impl<D:SnapshotVecDelegate> ops::IndexMut<usize> for SnapshotVec<D> {
-    fn index_mut(&mut self, index: usize) -> &mut D::Value { self.get_mut(index) }
+impl<D: SnapshotVecDelegate> ops::IndexMut<usize> for SnapshotVec<D> {
+    fn index_mut(&mut self, index: usize) -> &mut D::Value {
+        self.get_mut(index)
+    }
 }
index 7ea5cb8721d5932153df7dd852868b806499220e..c3a2f978e1a8a4945f39ed8c091edbe5857ef762 100644 (file)
@@ -14,7 +14,7 @@ use std::fmt::Debug;
 use std::mem;
 
 #[derive(Clone)]
-pub struct TransitiveRelation<T:Debug+PartialEq> {
+pub struct TransitiveRelation<T: Debug + PartialEq> {
     // List of elements. This is used to map from a T to a usize.  We
     // expect domain to be small so just use a linear list versus a
     // hashmap or something.
@@ -33,7 +33,7 @@ pub struct TransitiveRelation<T:Debug+PartialEq> {
     // are added with new elements. Perhaps better would be to ask the
     // user for a batch of edges to minimize this effect, but I
     // already wrote the code this way. :P -nmatsakis
-    closure: RefCell<Option<BitMatrix>>
+    closure: RefCell<Option<BitMatrix>>,
 }
 
 #[derive(Clone, PartialEq, PartialOrd)]
@@ -45,11 +45,13 @@ struct Edge {
     target: Index,
 }
 
-impl<T:Debug+PartialEq> TransitiveRelation<T> {
+impl<T: Debug + PartialEq> TransitiveRelation<T> {
     pub fn new() -> TransitiveRelation<T> {
-        TransitiveRelation { elements: vec![],
-                             edges: vec![],
-                             closure: RefCell::new(None) }
+        TransitiveRelation {
+            elements: vec![],
+            edges: vec![],
+            closure: RefCell::new(None),
+        }
     }
 
     fn index(&self, a: &T) -> Option<Index> {
@@ -74,7 +76,10 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
     pub fn add(&mut self, a: T, b: T) {
         let a = self.add_index(a);
         let b = self.add_index(b);
-        let edge = Edge { source: a, target: b };
+        let edge = Edge {
+            source: a,
+            target: b,
+        };
         if !self.edges.contains(&edge) {
             self.edges.push(edge);
 
@@ -86,10 +91,8 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
     /// Check whether `a < target` (transitively)
     pub fn contains(&self, a: &T, b: &T) -> bool {
         match (self.index(a), self.index(b)) {
-            (Some(a), Some(b)) =>
-                self.with_closure(|closure| closure.contains(a.0, b.0)),
-            (None, _) | (_, None) =>
-                false,
+            (Some(a), Some(b)) => self.with_closure(|closure| closure.contains(a.0, b.0)),
+            (None, _) | (_, None) => false,
         }
     }
 
@@ -156,7 +159,9 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
     pub fn minimal_upper_bounds(&self, a: &T, b: &T) -> Vec<&T> {
         let (mut a, mut b) = match (self.index(a), self.index(b)) {
             (Some(a), Some(b)) => (a, b),
-            (None, _) | (_, None) => { return vec![]; }
+            (None, _) | (_, None) => {
+                return vec![];
+            }
         };
 
         // in some cases, there are some arbitrary choices to be made;
@@ -233,7 +238,7 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
                    .collect()
     }
 
-    fn with_closure<OP,R>(&self, op: OP) -> R
+    fn with_closure<OP, R>(&self, op: OP) -> R
         where OP: FnOnce(&BitMatrix) -> R
     {
         let mut closure_cell = self.closure.borrow_mut();
@@ -431,14 +436,15 @@ fn pdub_crisscross() {
     // b -> b1 ---+
 
     let mut relation = TransitiveRelation::new();
-    relation.add("a",  "a1");
-    relation.add("a",  "b1");
-    relation.add("b",  "a1");
-    relation.add("b",  "b1");
+    relation.add("a", "a1");
+    relation.add("a", "b1");
+    relation.add("b", "a1");
+    relation.add("b", "b1");
     relation.add("a1", "x");
     relation.add("b1", "x");
 
-    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"a1", &"b1"]);
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"),
+               vec![&"a1", &"b1"]);
     assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x"));
 }
 
@@ -451,23 +457,25 @@ fn pdub_crisscross_more() {
     // b -> b1 -> b2 ---------+
 
     let mut relation = TransitiveRelation::new();
-    relation.add("a",  "a1");
-    relation.add("a",  "b1");
-    relation.add("b",  "a1");
-    relation.add("b",  "b1");
+    relation.add("a", "a1");
+    relation.add("a", "b1");
+    relation.add("b", "a1");
+    relation.add("b", "b1");
 
-    relation.add("a1",  "a2");
-    relation.add("a1",  "b2");
-    relation.add("b1",  "a2");
-    relation.add("b1",  "b2");
+    relation.add("a1", "a2");
+    relation.add("a1", "b2");
+    relation.add("b1", "a2");
+    relation.add("b1", "b2");
 
     relation.add("a2", "a3");
 
     relation.add("a3", "x");
     relation.add("b2", "x");
 
-    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"a1", &"b1"]);
-    assert_eq!(relation.minimal_upper_bounds(&"a1", &"b1"), vec![&"a2", &"b2"]);
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"),
+               vec![&"a1", &"b1"]);
+    assert_eq!(relation.minimal_upper_bounds(&"a1", &"b1"),
+               vec![&"a2", &"b2"]);
     assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x"));
 }
 
@@ -479,8 +487,8 @@ fn pdub_lub() {
     // b -> b1 ---+
 
     let mut relation = TransitiveRelation::new();
-    relation.add("a",  "a1");
-    relation.add("b",  "b1");
+    relation.add("a", "a1");
+    relation.add("b", "b1");
     relation.add("a1", "x");
     relation.add("b1", "x");
 
@@ -497,9 +505,9 @@ fn mubs_intermediate_node_on_one_side_only() {
 
     // "digraph { a -> c -> d; b -> d; }",
     let mut relation = TransitiveRelation::new();
-    relation.add("a",  "c");
-    relation.add("c",  "d");
-    relation.add("b",  "d");
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("b", "d");
 
     assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"d"]);
 }
@@ -516,11 +524,11 @@ fn mubs_scc_1() {
 
     // "digraph { a -> c -> d; d -> c; a -> d; b -> d; }",
     let mut relation = TransitiveRelation::new();
-    relation.add("a",  "c");
-    relation.add("c",  "d");
-    relation.add("d",  "c");
-    relation.add("a",  "d");
-    relation.add("b",  "d");
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("d", "c");
+    relation.add("a", "d");
+    relation.add("b", "d");
 
     assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
 }
@@ -536,11 +544,11 @@ fn mubs_scc_2() {
 
     // "digraph { a -> c -> d; d -> c; b -> d; b -> c; }",
     let mut relation = TransitiveRelation::new();
-    relation.add("a",  "c");
-    relation.add("c",  "d");
-    relation.add("d",  "c");
-    relation.add("b",  "d");
-    relation.add("b",  "c");
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("d", "c");
+    relation.add("b", "d");
+    relation.add("b", "c");
 
     assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
 }
@@ -556,12 +564,12 @@ fn mubs_scc_3() {
 
     // "digraph { a -> c -> d -> e -> c; b -> d; b -> e; }",
     let mut relation = TransitiveRelation::new();
-    relation.add("a",  "c");
-    relation.add("c",  "d");
-    relation.add("d",  "e");
-    relation.add("e",  "c");
-    relation.add("b",  "d");
-    relation.add("b",  "e");
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("d", "e");
+    relation.add("e", "c");
+    relation.add("b", "d");
+    relation.add("b", "e");
 
     assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
 }
@@ -578,12 +586,12 @@ fn mubs_scc_4() {
 
     // "digraph { a -> c -> d -> e -> c; a -> d; b -> e; }"
     let mut relation = TransitiveRelation::new();
-    relation.add("a",  "c");
-    relation.add("c",  "d");
-    relation.add("d",  "e");
-    relation.add("e",  "c");
-    relation.add("a",  "d");
-    relation.add("b",  "e");
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("d", "e");
+    relation.add("e", "c");
+    relation.add("a", "d");
+    relation.add("b", "e");
 
     assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
 }
index f157d82eda12b9d7d9d05dace89223d0f13a05f5..9a90ab8c09d44c58499da17b571a5388886980ac 100644 (file)
@@ -36,13 +36,13 @@ macro_rules! impl_tuple_slice {
     }
 }
 
-impl_tuple_slice!((T,T), 2);
-impl_tuple_slice!((T,T,T), 3);
-impl_tuple_slice!((T,T,T,T), 4);
-impl_tuple_slice!((T,T,T,T,T), 5);
-impl_tuple_slice!((T,T,T,T,T,T), 6);
-impl_tuple_slice!((T,T,T,T,T,T,T), 7);
-impl_tuple_slice!((T,T,T,T,T,T,T,T), 8);
+impl_tuple_slice!((T, T), 2);
+impl_tuple_slice!((T, T, T), 3);
+impl_tuple_slice!((T, T, T, T), 4);
+impl_tuple_slice!((T, T, T, T, T), 5);
+impl_tuple_slice!((T, T, T, T, T, T), 6);
+impl_tuple_slice!((T, T, T, T, T, T, T), 7);
+impl_tuple_slice!((T, T, T, T, T, T, T, T), 8);
 
 #[test]
 fn test_sliced_tuples() {
index c6da70eef750a89f0947b0388bb2ba650f396ee4..3feea3218d0138b95100fe70211841692509f0a7 100644 (file)
@@ -56,21 +56,21 @@ impl Combine for () {
 /// time of the algorithm under control. For more information, see
 /// <http://en.wikipedia.org/wiki/Disjoint-set_data_structure>.
 #[derive(PartialEq,Clone,Debug)]
-pub struct VarValue<K:UnifyKey> {
-    parent: K,       // if equal to self, this is a root
+pub struct VarValue<K: UnifyKey> {
+    parent: K, // if equal to self, this is a root
     value: K::Value, // value assigned (only relevant to root)
-    rank: u32,       // max depth (only relevant to root)
+    rank: u32, // max depth (only relevant to root)
 }
 
 /// Table of unification keys and their values.
-pub struct UnificationTable<K:UnifyKey> {
+pub struct UnificationTable<K: UnifyKey> {
     /// Indicates the current value of each key.
     values: sv::SnapshotVec<Delegate<K>>,
 }
 
 /// At any time, users may snapshot a unification table.  The changes
 /// made during the snapshot may either be *committed* or *rolled back*.
-pub struct Snapshot<K:UnifyKey> {
+pub struct Snapshot<K: UnifyKey> {
     // Link snapshot to the key type `K` of the table.
     marker: marker::PhantomData<K>,
     snapshot: sv::Snapshot,
@@ -79,15 +79,17 @@ pub struct Snapshot<K:UnifyKey> {
 #[derive(Copy, Clone)]
 struct Delegate<K>(PhantomData<K>);
 
-impl<K:UnifyKey> VarValue<K> {
+impl<K: UnifyKey> VarValue<K> {
     fn new_var(key: K, value: K::Value) -> VarValue<K> {
         VarValue::new(key, value, 0)
     }
 
     fn new(parent: K, value: K::Value, rank: u32) -> VarValue<K> {
-        VarValue { parent: parent, // this is a root
-                   value: value,
-                   rank: rank }
+        VarValue {
+            parent: parent, // this is a root
+            value: value,
+            rank: rank,
+        }
     }
 
     fn redirect(self, to: K) -> VarValue<K> {
@@ -95,7 +97,11 @@ impl<K:UnifyKey> VarValue<K> {
     }
 
     fn root(self, rank: u32, value: K::Value) -> VarValue<K> {
-        VarValue { rank: rank, value: value, ..self }
+        VarValue {
+            rank: rank,
+            value: value,
+            ..self
+        }
     }
 
     /// Returns the key of this node. Only valid if this is a root
@@ -122,18 +128,18 @@ impl<K:UnifyKey> VarValue<K> {
 // other type parameter U, and we have no way to say
 // Option<U>:LatticeValue.
 
-impl<K:UnifyKey> UnificationTable<K> {
+impl<K: UnifyKey> UnificationTable<K> {
     pub fn new() -> UnificationTable<K> {
-        UnificationTable {
-            values: sv::SnapshotVec::new()
-        }
+        UnificationTable { values: sv::SnapshotVec::new() }
     }
 
     /// Starts a new snapshot. Each snapshot must be either
     /// rolled back or committed in a "LIFO" (stack) order.
     pub fn snapshot(&mut self) -> Snapshot<K> {
-        Snapshot { marker: marker::PhantomData::<K>,
-                   snapshot: self.values.start_snapshot() }
+        Snapshot {
+            marker: marker::PhantomData::<K>,
+            snapshot: self.values.start_snapshot(),
+        }
     }
 
     /// Reverses all changes since the last snapshot. Also
@@ -154,9 +160,7 @@ impl<K:UnifyKey> UnificationTable<K> {
         let len = self.values.len();
         let key: K = UnifyKey::from_index(len as u32);
         self.values.push(VarValue::new_var(key, value));
-        debug!("{}: created new key: {:?}",
-               UnifyKey::tag(None::<K>),
-               key);
+        debug!("{}: created new key: {:?}", UnifyKey::tag(None::<K>), key);
         key
     }
 
@@ -179,9 +183,7 @@ impl<K:UnifyKey> UnificationTable<K> {
                 }
                 root
             }
-            None => {
-                value
-            }
+            None => value,
         }
     }
 
@@ -195,8 +197,7 @@ impl<K:UnifyKey> UnificationTable<K> {
     fn set(&mut self, key: K, new_value: VarValue<K>) {
         assert!(self.is_root(key));
 
-        debug!("Updating variable {:?} to {:?}",
-               key, new_value);
+        debug!("Updating variable {:?} to {:?}", key, new_value);
 
         let index = key.index() as usize;
         self.values.set(index, new_value);
@@ -210,7 +211,7 @@ impl<K:UnifyKey> UnificationTable<K> {
     /// really more of a building block. If the values associated with
     /// your key are non-trivial, you would probably prefer to call
     /// `unify_var_var` below.
-    fn unify(&mut self, root_a: VarValue<K>, root_b: VarValue<K>, new_value: K::Value) {
+    fn unify(&mut self, root_a: VarValue<K>, root_b: VarValue<K>, new_value: K::Value) -> K {
         debug!("unify(root_a(id={:?}, rank={:?}), root_b(id={:?}, rank={:?}))",
                root_a.key(),
                root_a.rank,
@@ -220,14 +221,14 @@ impl<K:UnifyKey> UnificationTable<K> {
         if root_a.rank > root_b.rank {
             // a has greater rank, so a should become b's parent,
             // i.e., b should redirect to a.
-            self.redirect_root(root_a.rank, root_b, root_a, new_value);
+            self.redirect_root(root_a.rank, root_b, root_a, new_value)
         } else if root_a.rank < root_b.rank {
             // b has greater rank, so a should redirect to b.
-            self.redirect_root(root_b.rank, root_a, root_b, new_value);
+            self.redirect_root(root_b.rank, root_a, root_b, new_value)
         } else {
             // If equal, redirect one to the other and increment the
             // other's rank.
-            self.redirect_root(root_a.rank + 1, root_a, root_b, new_value);
+            self.redirect_root(root_a.rank + 1, root_a, root_b, new_value)
         }
     }
 
@@ -235,35 +236,37 @@ impl<K:UnifyKey> UnificationTable<K> {
                      new_rank: u32,
                      old_root: VarValue<K>,
                      new_root: VarValue<K>,
-                     new_value: K::Value) {
+                     new_value: K::Value) -> K {
         let old_root_key = old_root.key();
         let new_root_key = new_root.key();
         self.set(old_root_key, old_root.redirect(new_root_key));
         self.set(new_root_key, new_root.root(new_rank, new_value));
+        new_root_key
     }
 }
 
-impl<K:UnifyKey> sv::SnapshotVecDelegate for Delegate<K> {
+impl<K: UnifyKey> sv::SnapshotVecDelegate for Delegate<K> {
     type Value = VarValue<K>;
     type Undo = ();
 
     fn reverse(_: &mut Vec<VarValue<K>>, _: ()) {}
 }
 
-///////////////////////////////////////////////////////////////////////////
-// Base union-find algorithm, where we are just making sets
+// # Base union-find algorithm, where we are just making sets
 
-impl<'tcx,K:UnifyKey> UnificationTable<K>
+impl<'tcx, K: UnifyKey> UnificationTable<K>
     where K::Value: Combine
 {
-    pub fn union(&mut self, a_id: K, b_id: K) {
+    pub fn union(&mut self, a_id: K, b_id: K) -> K {
         let node_a = self.get(a_id);
         let node_b = self.get(b_id);
         let a_id = node_a.key();
         let b_id = node_b.key();
         if a_id != b_id {
             let new_value = node_a.value.combine(&node_b.value);
-            self.unify(node_a, node_b, new_value);
+            self.unify(node_a, node_b, new_value)
+        } else {
+            a_id
         }
     }
 
@@ -280,35 +283,30 @@ impl<'tcx,K:UnifyKey> UnificationTable<K>
     }
 }
 
-///////////////////////////////////////////////////////////////////////////
+// # Non-subtyping unification
+//
 // Code to handle keys which carry a value, like ints,
 // floats---anything that doesn't have a subtyping relationship we
 // need to worry about.
 
-impl<'tcx,K,V> UnificationTable<K>
-    where K: UnifyKey<Value=Option<V>>,
-          V: Clone+PartialEq+Debug,
+impl<'tcx, K, V> UnificationTable<K>
+    where K: UnifyKey<Value = Option<V>>,
+          V: Clone + PartialEq + Debug
 {
-    pub fn unify_var_var(&mut self,
-                         a_id: K,
-                         b_id: K)
-                         -> Result<(),(V,V)>
-    {
+    pub fn unify_var_var(&mut self, a_id: K, b_id: K) -> Result<K, (V, V)> {
         let node_a = self.get(a_id);
         let node_b = self.get(b_id);
         let a_id = node_a.key();
         let b_id = node_b.key();
 
-        if a_id == b_id { return Ok(()); }
+        if a_id == b_id {
+            return Ok(a_id);
+        }
 
         let combined = {
             match (&node_a.value, &node_b.value) {
-                (&None, &None) => {
-                    None
-                }
-                (&Some(ref v), &None) | (&None, &Some(ref v)) => {
-                    Some(v.clone())
-                }
+                (&None, &None) => None,
+                (&Some(ref v), &None) | (&None, &Some(ref v)) => Some(v.clone()),
                 (&Some(ref v1), &Some(ref v2)) => {
                     if *v1 != *v2 {
                         return Err((v1.clone(), v2.clone()));
@@ -323,11 +321,7 @@ impl<'tcx,K,V> UnificationTable<K>
 
     /// Sets the value of the key `a_id` to `b`. Because simple keys do not have any subtyping
     /// relationships, if `a_id` already has a value, it must be the same as `b`.
-    pub fn unify_var_value(&mut self,
-                           a_id: K,
-                           b: V)
-                           -> Result<(),(V,V)>
-    {
+    pub fn unify_var_value(&mut self, a_id: K, b: V) -> Result<(), (V, V)> {
         let mut node_a = self.get(a_id);
 
         match node_a.value {
@@ -358,7 +352,13 @@ impl<'tcx,K,V> UnificationTable<K>
     pub fn unsolved_variables(&mut self) -> Vec<K> {
         self.values
             .iter()
-            .filter_map(|vv| if vv.value.is_some() { None } else { Some(vv.key()) })
+            .filter_map(|vv| {
+                if vv.value.is_some() {
+                    None
+                } else {
+                    Some(vv.key())
+                }
+            })
             .collect()
     }
 }
index 089e629a569d3ff43d39b4f73ccd09016c3becbb..f29a7132e831b3f0e926968ce0d4fc6d843200fe 100644 (file)
@@ -19,9 +19,15 @@ struct UnitKey(u32);
 
 impl UnifyKey for UnitKey {
     type Value = ();
-    fn index(&self) -> u32 { self.0 }
-    fn from_index(u: u32) -> UnitKey { UnitKey(u) }
-    fn tag(_: Option<UnitKey>) -> &'static str { "UnitKey" }
+    fn index(&self) -> u32 {
+        self.0
+    }
+    fn from_index(u: u32) -> UnitKey {
+        UnitKey(u)
+    }
+    fn tag(_: Option<UnitKey>) -> &'static str {
+        "UnitKey"
+    }
 }
 
 #[test]
@@ -45,7 +51,7 @@ fn big_array() {
     }
 
     for i in 1..MAX {
-        let l = keys[i-1];
+        let l = keys[i - 1];
         let r = keys[i];
         ut.union(l, r);
     }
@@ -68,7 +74,7 @@ fn big_array_bench(b: &mut Bencher) {
 
     b.iter(|| {
         for i in 1..MAX {
-            let l = keys[i-1];
+            let l = keys[i - 1];
             let r = keys[i];
             ut.union(l, r);
         }
@@ -90,16 +96,16 @@ fn even_odd() {
         keys.push(key);
 
         if i >= 2 {
-            ut.union(key, keys[i-2]);
+            ut.union(key, keys[i - 2]);
         }
     }
 
     for i in 1..MAX {
-        assert!(!ut.unioned(keys[i-1], keys[i]));
+        assert!(!ut.unioned(keys[i - 1], keys[i]));
     }
 
     for i in 2..MAX {
-        assert!(ut.unioned(keys[i-2], keys[i]));
+        assert!(ut.unioned(keys[i - 2], keys[i]));
     }
 }
 
@@ -108,9 +114,15 @@ struct IntKey(u32);
 
 impl UnifyKey for IntKey {
     type Value = Option<i32>;
-    fn index(&self) -> u32 { self.0 }
-    fn from_index(u: u32) -> IntKey { IntKey(u) }
-    fn tag(_: Option<IntKey>) -> &'static str { "IntKey" }
+    fn index(&self) -> u32 {
+        self.0
+    }
+    fn from_index(u: u32) -> IntKey {
+        IntKey(u)
+    }
+    fn tag(_: Option<IntKey>) -> &'static str {
+        "IntKey"
+    }
 }
 
 /// Test unifying a key whose value is `Some(_)`  with a key whose value is `None`.
@@ -191,4 +203,3 @@ fn unify_key_Some_x_val_x() {
     assert!(ut.unify_var_value(k1, 22).is_ok());
     assert_eq!(ut.probe(k1), Some(22));
 }
-
index 008642d9d6567e329239768b495c2f777039257f..054eee8829a4a73e0b38599ba2b3b058359b0b63 100644 (file)
@@ -12,11 +12,11 @@ use std::cell::UnsafeCell;
 use std::mem;
 
 pub struct VecCell<T> {
-    data: UnsafeCell<Vec<T>>
+    data: UnsafeCell<Vec<T>>,
 }
 
 impl<T> VecCell<T> {
-    pub fn with_capacity(capacity: usize) -> VecCell<T>{
+    pub fn with_capacity(capacity: usize) -> VecCell<T> {
         VecCell { data: UnsafeCell::new(Vec::with_capacity(capacity)) }
     }
 
index bf86e8f039ad4994fa22e9212dc5545ec148fd2f..4533946d26ea404fb0d9daae4d4e584cb9f96357 100644 (file)
@@ -11,20 +11,21 @@ crate-type = ["dylib"]
 [dependencies]
 arena = { path = "../libarena" }
 flate = { path = "../libflate" }
-getopts = { path = "../libgetopts" }
 graphviz = { path = "../libgraphviz" }
 log = { path = "../liblog" }
 rustc = { path = "../librustc" }
 rustc_back = { path = "../librustc_back" }
 rustc_borrowck = { path = "../librustc_borrowck" }
-rustc_front = { path = "../librustc_front" }
+rustc_const_eval = { path = "../librustc_const_eval" }
 rustc_lint = { path = "../librustc_lint" }
 rustc_llvm = { path = "../librustc_llvm" }
 rustc_mir = { path = "../librustc_mir" }
 rustc_plugin = { path = "../librustc_plugin" }
 rustc_passes = { path = "../librustc_passes" }
 rustc_privacy = { path = "../librustc_privacy" }
+rustc_incremental = { path = "../librustc_incremental" }
 rustc_resolve = { path = "../librustc_resolve" }
+rustc_save_analysis = { path = "../librustc_save_analysis" }
 rustc_trans = { path = "../librustc_trans" }
 rustc_typeck = { path = "../librustc_typeck" }
 rustc_metadata = { path = "../librustc_metadata" }
index 5c7b04ccedacbbd29b3312bc5fb64663769170fe..67c52bb6c36d780eb73719cf3b3a1693edb3bf11 100644 (file)
@@ -9,34 +9,36 @@
 // except according to those terms.
 
 use rustc::dep_graph::DepGraph;
-use rustc::front;
-use rustc::front::map as hir_map;
+use rustc::hir;
+use rustc::hir::map as hir_map;
 use rustc_mir as mir;
 use rustc::mir::mir_map::MirMap;
 use rustc::session::{Session, CompileResult, compile_result_from_err_count};
 use rustc::session::config::{self, Input, OutputFilenames, OutputType};
 use rustc::session::search_paths::PathKind;
 use rustc::lint;
-use rustc::middle::{dependency_format, stability, ty, reachable};
+use rustc::middle::{self, dependency_format, stability, reachable};
 use rustc::middle::privacy::AccessLevels;
-use rustc::middle;
+use rustc::ty::{self, TyCtxt};
 use rustc::util::common::time;
 use rustc::util::nodemap::NodeSet;
+use rustc_back::sha2::{Sha256, Digest};
 use rustc_borrowck as borrowck;
+use rustc_incremental;
 use rustc_resolve as resolve;
 use rustc_metadata::macro_import;
 use rustc_metadata::creader::LocalCrateReader;
 use rustc_metadata::cstore::CStore;
 use rustc_trans::back::link;
 use rustc_trans::back::write;
-use rustc_trans::trans;
+use rustc_trans as trans;
 use rustc_typeck as typeck;
 use rustc_privacy;
 use rustc_plugin::registry::Registry;
 use rustc_plugin as plugin;
-use rustc_front::hir;
-use rustc_front::lowering::{lower_crate, LoweringContext};
+use rustc::hir::lowering::{lower_crate, LoweringContext};
 use rustc_passes::{no_asm, loops, consts, const_fn, rvalues, static_recursion};
+use rustc_const_eval::check_match;
 use super::Compilation;
 
 use serialize::json;
@@ -48,12 +50,10 @@ use std::fs;
 use std::io::{self, Write};
 use std::path::{Path, PathBuf};
 use syntax::ast::{self, NodeIdAssigner};
-use syntax::attr;
-use syntax::attr::AttrMetaMethods;
+use syntax::attr::{self, AttrMetaMethods};
 use syntax::diagnostics;
 use syntax::fold::Folder;
-use syntax::parse;
-use syntax::parse::token;
+use syntax::parse::{self, PResult, token};
 use syntax::util::node_count::NodeCounter;
 use syntax::visit;
 use syntax;
@@ -86,7 +86,13 @@ pub fn compile_input(sess: &Session,
     // possible to keep the peak memory usage low
     let (outputs, trans) = {
         let (outputs, expanded_crate, id) = {
-            let krate = phase_1_parse_input(sess, cfg, input);
+            let krate = match phase_1_parse_input(sess, cfg, input) {
+                Ok(krate) => krate,
+                Err(mut parse_error) => {
+                    parse_error.emit();
+                    return Err(1);
+                }
+            };
 
             controller_entry_point!(after_parse,
                                     sess,
@@ -95,11 +101,11 @@ pub fn compile_input(sess: &Session,
 
             let outputs = build_output_filenames(input, outdir, output, &krate.attrs, sess);
             let id = link::find_crate_name(Some(sess), &krate.attrs, input);
-            let expanded_crate = try!(phase_2_configure_and_expand(sess,
-                                                                   &cstore,
-                                                                   krate,
-                                                                   &id[..],
-                                                                   addl_plugins));
+            let expanded_crate = phase_2_configure_and_expand(sess,
+                                                              &cstore,
+                                                              krate,
+                                                              &id[..],
+                                                              addl_plugins)?;
 
             (outputs, expanded_crate, id)
         };
@@ -116,7 +122,7 @@ pub fn compile_input(sess: &Session,
         let expanded_crate = assign_node_ids(sess, expanded_crate);
         // Lower ast -> hir.
         let lcx = LoweringContext::new(sess, Some(&expanded_crate));
-        let dep_graph = DepGraph::new(sess.opts.build_dep_graph);
+        let dep_graph = DepGraph::new(sess.opts.build_dep_graph());
         let mut hir_forest = time(sess.time_passes(),
                                   "lowering ast -> hir",
                                   || hir_map::Forest::new(lower_crate(&lcx, &expanded_crate),
@@ -149,7 +155,7 @@ pub fn compile_input(sess: &Session,
         }
 
         time(sess.time_passes(), "attribute checking", || {
-            front::check_attr::check_crate(sess, &expanded_crate);
+            hir::check_attr::check_crate(sess, &expanded_crate);
         });
 
         time(sess.time_passes(),
@@ -164,13 +170,13 @@ pub fn compile_input(sess: &Session,
             None
         };
 
-        try!(try!(phase_3_run_analysis_passes(sess,
-                                              &cstore,
-                                              hir_map,
-                                              &arenas,
-                                              &id,
-                                              control.make_glob_map,
-                                              |tcx, mir_map, analysis, result| {
+        phase_3_run_analysis_passes(sess,
+                                    &cstore,
+                                    hir_map,
+                                    &arenas,
+                                    &id,
+                                    control.make_glob_map,
+                                    |tcx, mir_map, analysis, result| {
             {
                 // Eventually, we will want to track plugins.
                 let _ignore = tcx.dep_graph.in_ignore();
@@ -192,7 +198,7 @@ pub fn compile_input(sess: &Session,
                 }
             }
 
-            try!(result);
+            result?;
 
             if log_enabled!(::log::INFO) {
                 println!("Pre-trans");
@@ -211,7 +217,7 @@ pub fn compile_input(sess: &Session,
             token::get_ident_interner().clear();
 
             Ok((outputs, trans))
-        })))
+        })??
     };
 
     let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs);
@@ -220,14 +226,13 @@ pub fn compile_input(sess: &Session,
                             sess,
                             CompileState::state_after_llvm(input, sess, outdir, &trans),
                             phase5_result);
-    try!(phase5_result);
+    phase5_result?;
 
     phase_6_link_output(sess, &trans, &outputs);
 
     Ok(())
 }
 
-
 /// The name used for source code that doesn't originate in a file
 /// (e.g. source from stdin or a string)
 pub fn anon_src() -> String {
@@ -238,7 +243,7 @@ pub fn source_name(input: &Input) -> String {
     match *input {
         // FIXME (#9639): This needs to handle non-utf8 paths
         Input::File(ref ifile) => ifile.to_str().unwrap().to_string(),
-        Input::Str(_) => anon_src(),
+        Input::Str { ref name, .. } => name.clone(),
     }
 }
 
@@ -313,7 +318,7 @@ pub struct CompileState<'a, 'ast: 'a, 'tcx: 'a> {
     pub ast_map: Option<&'a hir_map::Map<'ast>>,
     pub mir_map: Option<&'a MirMap<'tcx>>,
     pub analysis: Option<&'a ty::CrateAnalysis<'a>>,
-    pub tcx: Option<&'a ty::ctxt<'tcx>>,
+    pub tcx: Option<&'a TyCtxt<'tcx>>,
     pub lcx: Option<&'a LoweringContext<'a>>,
     pub trans: Option<&'a trans::CrateTranslation>,
 }
@@ -389,7 +394,7 @@ impl<'a, 'ast, 'tcx> CompileState<'a, 'ast, 'tcx> {
                             hir_crate: &'a hir::Crate,
                             analysis: &'a ty::CrateAnalysis,
                             mir_map: Option<&'a MirMap<'tcx>>,
-                            tcx: &'a ty::ctxt<'tcx>,
+                            tcx: &'a TyCtxt<'tcx>,
                             lcx: &'a LoweringContext<'a>,
                             crate_name: &'a str)
                             -> CompileState<'a, 'ast, 'tcx> {
@@ -415,7 +420,10 @@ impl<'a, 'ast, 'tcx> CompileState<'a, 'ast, 'tcx> {
     }
 }
 
-pub fn phase_1_parse_input(sess: &Session, cfg: ast::CrateConfig, input: &Input) -> ast::Crate {
+pub fn phase_1_parse_input<'a>(sess: &'a Session,
+                               cfg: ast::CrateConfig,
+                               input: &Input)
+                               -> PResult<'a, ast::Crate> {
     // These may be left in an incoherent state after a previous compile.
     // `clear_tables` and `get_ident_interner().clear()` can be used to free
     // memory, but they do not restore the initial state.
@@ -427,16 +435,16 @@ pub fn phase_1_parse_input(sess: &Session, cfg: ast::CrateConfig, input: &Input)
     let krate = time(sess.time_passes(), "parsing", || {
         match *input {
             Input::File(ref file) => {
-                parse::parse_crate_from_file(&(*file), cfg.clone(), &sess.parse_sess)
+                parse::parse_crate_from_file(file, cfg.clone(), &sess.parse_sess)
             }
-            Input::Str(ref src) => {
-                parse::parse_crate_from_source_str(anon_src().to_string(),
-                                                   src.to_string(),
+            Input::Str { ref input, ref name } => {
+                parse::parse_crate_from_source_str(name.clone(),
+                                                   input.clone(),
                                                    cfg.clone(),
                                                    &sess.parse_sess)
             }
         }
-    });
+    })?;
 
     sess.diagnostic().set_continue_after_error(true);
 
@@ -453,7 +461,7 @@ pub fn phase_1_parse_input(sess: &Session, cfg: ast::CrateConfig, input: &Input)
         syntax::show_span::run(sess.diagnostic(), s, &krate);
     }
 
-    krate
+    Ok(krate)
 }
 
 fn count_nodes(krate: &ast::Crate) -> usize {
@@ -489,22 +497,22 @@ pub fn phase_2_configure_and_expand(sess: &Session,
     // baz! should not use this definition unless foo is enabled.
 
     let mut feature_gated_cfgs = vec![];
-    krate = try!(time(time_passes, "configuration 1", || {
+    krate = time(time_passes, "configuration 1", || {
         sess.track_errors(|| {
             syntax::config::strip_unconfigured_items(sess.diagnostic(),
                                                      krate,
                                                      &mut feature_gated_cfgs)
         })
-    }));
+    })?;
 
     *sess.crate_types.borrow_mut() = collect_crate_types(sess, &krate.attrs);
-    *sess.crate_metadata.borrow_mut() = collect_crate_metadata(sess, &krate.attrs);
+    sess.crate_disambiguator.set(token::intern(&compute_crate_disambiguator(sess)));
 
     time(time_passes, "recursion limit", || {
         middle::recursion_limit::update_recursion_limit(sess, &krate);
     });
 
-    try!(time(time_passes, "gated macro checking", || {
+    time(time_passes, "gated macro checking", || {
         sess.track_errors(|| {
             let features =
               syntax::feature_gate::check_crate_macros(sess.codemap(),
@@ -514,7 +522,7 @@ pub fn phase_2_configure_and_expand(sess: &Session,
             // these need to be set "early" so that expansion sees `quote` if enabled.
             *sess.features.borrow_mut() = features;
         })
-    }));
+    })?;
 
 
     krate = time(time_passes, "crate injection", || {
@@ -523,11 +531,15 @@ pub fn phase_2_configure_and_expand(sess: &Session,
 
     let macros = time(time_passes,
                       "macro loading",
-                      || macro_import::read_macro_defs(sess, &cstore, &krate));
+                      || macro_import::read_macro_defs(sess, &cstore, &krate, crate_name));
 
     let mut addl_plugins = Some(addl_plugins);
     let registrars = time(time_passes, "plugin loading", || {
-        plugin::load::load_plugins(sess, &cstore, &krate, addl_plugins.take().unwrap())
+        plugin::load::load_plugins(sess,
+                                   &cstore,
+                                   &krate,
+                                   crate_name,
+                                   addl_plugins.take().unwrap())
     });
 
     let mut registry = Registry::new(sess, &krate);
@@ -551,7 +563,7 @@ pub fn phase_2_configure_and_expand(sess: &Session,
     let Registry { syntax_exts, early_lint_passes, late_lint_passes, lint_groups,
                    llvm_passes, attributes, mir_passes, .. } = registry;
 
-    try!(sess.track_errors(|| {
+    sess.track_errors(|| {
         let mut ls = sess.lint_store.borrow_mut();
         for pass in early_lint_passes {
             ls.register_early_pass(Some(sess), true, pass);
@@ -565,16 +577,16 @@ pub fn phase_2_configure_and_expand(sess: &Session,
         }
 
         *sess.plugin_llvm_passes.borrow_mut() = llvm_passes;
-        *sess.plugin_mir_passes.borrow_mut() = mir_passes;
+        sess.mir_passes.borrow_mut().extend(mir_passes);
         *sess.plugin_attributes.borrow_mut() = attributes.clone();
-    }));
+    })?;
 
     // Lint plugins are registered; now we can process command line flags.
     if sess.opts.describe_lints {
         super::describe_lints(&sess.lint_store.borrow(), true);
         return Err(0);
     }
-    try!(sess.track_errors(|| sess.lint_store.borrow_mut().process_command_line(sess)));
+    sess.track_errors(|| sess.lint_store.borrow_mut().process_command_line(sess))?;
 
     krate = time(time_passes, "expansion", || {
         // Windows dlls do not have rpaths, so they don't know how to find their
@@ -617,7 +629,7 @@ pub fn phase_2_configure_and_expand(sess: &Session,
     // of macro expansion.  This runs before #[cfg] to try to catch as
     // much as possible (e.g. help the programmer avoid platform
     // specific differences)
-    try!(time(time_passes, "complete gated feature checking 1", || {
+    time(time_passes, "complete gated feature checking 1", || {
         sess.track_errors(|| {
             let features = syntax::feature_gate::check_crate(sess.codemap(),
                                                              &sess.parse_sess.span_diagnostic,
@@ -626,12 +638,12 @@ pub fn phase_2_configure_and_expand(sess: &Session,
                                                              sess.opts.unstable_features);
             *sess.features.borrow_mut() = features;
         })
-    }));
+    })?;
 
     // JBC: make CFG processing part of expansion to avoid this problem:
 
     // strip again, in case expansion added anything with a #[cfg].
-    krate = try!(sess.track_errors(|| {
+    krate = sess.track_errors(|| {
         let krate = time(time_passes, "configuration 2", || {
             syntax::config::strip_unconfigured_items(sess.diagnostic(),
                                                      krate,
@@ -648,7 +660,7 @@ pub fn phase_2_configure_and_expand(sess: &Session,
         });
 
         krate
-    }));
+    })?;
 
     krate = time(time_passes, "maybe building test harness", || {
         syntax::test::modify_for_testing(&sess.parse_sess, &sess.opts.cfg, krate, sess.diagnostic())
@@ -669,7 +681,7 @@ pub fn phase_2_configure_and_expand(sess: &Session,
     // One final feature gating of the true AST that gets compiled
     // later, to make sure we've got everything (e.g. configuration
     // can insert new attributes via `cfg_attr`)
-    try!(time(time_passes, "complete gated feature checking 2", || {
+    time(time_passes, "complete gated feature checking 2", || {
         sess.track_errors(|| {
             let features = syntax::feature_gate::check_crate(sess.codemap(),
                                                              &sess.parse_sess.span_diagnostic,
@@ -678,11 +690,11 @@ pub fn phase_2_configure_and_expand(sess: &Session,
                                                              sess.opts.unstable_features);
             *sess.features.borrow_mut() = features;
         })
-    }));
+    })?;
 
-    try!(time(time_passes,
-              "const fn bodies and arguments",
-              || const_fn::check_crate(sess, &krate)));
+    time(time_passes,
+         "const fn bodies and arguments",
+         || const_fn::check_crate(sess, &krate))?;
 
     if sess.opts.debugging_opts.input_stats {
         println!("Post-expansion node count: {}", count_nodes(&krate));
@@ -734,7 +746,7 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
                                                make_glob_map: resolve::MakeGlobMap,
                                                f: F)
                                                -> Result<R, usize>
-    where F: FnOnce(&ty::ctxt<'tcx>, Option<MirMap<'tcx>>, ty::CrateAnalysis, CompileResult) -> R
+    where F: FnOnce(&TyCtxt<'tcx>, Option<MirMap<'tcx>>, ty::CrateAnalysis, CompileResult) -> R
 {
     macro_rules! try_with_f {
         ($e: expr, ($t: expr, $m: expr, $a: expr)) => {
@@ -752,20 +764,19 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
 
     time(time_passes,
          "external crate/lib resolution",
-         || LocalCrateReader::new(sess, cstore, &hir_map).read_crates());
+         || LocalCrateReader::new(sess, cstore, &hir_map, name).read_crates());
 
-    let lang_items = try!(time(time_passes, "language item collection", || {
+    let lang_items = time(time_passes, "language item collection", || {
         sess.track_errors(|| {
             middle::lang_items::collect_language_items(&sess, &hir_map)
         })
-    }));
+    })?;
 
     let resolve::CrateMap {
         def_map,
         freevars,
         export_map,
         trait_map,
-        external_exports,
         glob_map,
     } = time(time_passes,
              "resolution",
@@ -779,11 +790,11 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
         glob_map: glob_map,
     };
 
-    let named_region_map = try!(time(time_passes,
-                                     "lifetime resolution",
-                                     || middle::resolve_lifetime::krate(sess,
-                                                                        &hir_map,
-                                                                        &def_map.borrow())));
+    let named_region_map = time(time_passes,
+                                "lifetime resolution",
+                                || middle::resolve_lifetime::krate(sess,
+                                                                   &hir_map,
+                                                                   &def_map.borrow()))?;
 
     time(time_passes,
          "looking for entry point",
@@ -801,13 +812,13 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
          "loop checking",
          || loops::check_crate(sess, &hir_map));
 
-    try!(time(time_passes,
+    time(time_passes,
               "static item recursion checking",
-              || static_recursion::check_crate(sess, &def_map.borrow(), &hir_map)));
+              || static_recursion::check_crate(sess, &def_map.borrow(), &hir_map))?;
 
     let index = stability::Index::new(&hir_map);
 
-    ty::ctxt::create_and_enter(sess,
+    TyCtxt::create_and_enter(sess,
                                arenas,
                                def_map,
                                named_region_map,
@@ -816,7 +827,12 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
                                region_map,
                                lang_items,
                                index,
+                               name,
                                |tcx| {
+        time(time_passes,
+             "load_dep_graph",
+             || rustc_incremental::load_dep_graph(tcx));
+
         // passes are timed inside typeck
         try_with_f!(typeck::check_crate(tcx, trait_map), (tcx, None, analysis));
 
@@ -826,9 +842,7 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
 
         analysis.access_levels =
             time(time_passes, "privacy checking", || {
-                rustc_privacy::check_crate(tcx,
-                                           &analysis.export_map,
-                                           external_exports)
+                rustc_privacy::check_crate(tcx, &analysis.export_map)
             });
 
         // Do not move this check past lint
@@ -846,7 +860,7 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
 
         time(time_passes,
              "match checking",
-             || middle::check_match::check_crate(tcx));
+             || check_match::check_crate(tcx));
 
         // this must run before MIR dump, because
         // "not all control paths return a value" is reported here.
@@ -865,13 +879,20 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
                  "MIR dump",
                  || mir::mir_map::build_mir_for_crate(tcx));
 
-        time(time_passes,
-             "MIR passes",
-             || mir_map.run_passes(&mut sess.plugin_mir_passes.borrow_mut(), tcx));
+        time(time_passes, "MIR passes", || {
+            let mut passes = sess.mir_passes.borrow_mut();
+            // Push all the built-in passes.
+            passes.push_pass(box mir::transform::remove_dead_blocks::RemoveDeadBlocks);
+            passes.push_pass(box mir::transform::type_check::TypeckMir);
+            passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg);
+            passes.push_pass(box mir::transform::remove_dead_blocks::RemoveDeadBlocks);
+            // And run everything.
+            passes.run_passes(tcx, &mut mir_map);
+        });
 
         time(time_passes,
              "borrow checking",
-             || borrowck::check_crate(tcx));
+             || borrowck::check_crate(tcx, &mir_map));
 
         // Avoid overwhelming user with errors if type checking failed.
         // I'm not sure how helpful this is, to be honest, but it avoids
@@ -916,25 +937,40 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
 }
 
 /// Run the translation phase to LLVM, after which the AST and analysis can
-/// be discarded.
-pub fn phase_4_translate_to_llvm<'tcx>(tcx: &ty::ctxt<'tcx>,
+pub fn phase_4_translate_to_llvm<'tcx>(tcx: &TyCtxt<'tcx>,
                                        mut mir_map: MirMap<'tcx>,
-                                       analysis: ty::CrateAnalysis)
-                                       -> trans::CrateTranslation {
+                                       analysis: ty::CrateAnalysis) -> trans::CrateTranslation {
     let time_passes = tcx.sess.time_passes();
 
     time(time_passes,
          "resolving dependency formats",
          || dependency_format::calculate(&tcx.sess));
 
+    // Run the passes that transform the MIR into a more suitable for translation
+    // to LLVM code.
+    time(time_passes, "Prepare MIR codegen passes", || {
+        let mut passes = ::rustc::mir::transform::Passes::new();
+        passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads);
+        passes.push_pass(box mir::transform::remove_dead_blocks::RemoveDeadBlocks);
+        passes.push_pass(box mir::transform::erase_regions::EraseRegions);
+        passes.push_pass(box mir::transform::break_critical_edges::BreakCriticalEdges);
+        passes.run_passes(tcx, &mut mir_map);
+    });
+
+    let translation =
+        time(time_passes,
+             "translation",
+             move || trans::trans_crate(tcx, &mir_map, analysis));
+
     time(time_passes,
-         "erasing regions from MIR",
-         || mir::transform::erase_regions::erase_regions(tcx, &mut mir_map));
+         "assert dep graph",
+         move || rustc_incremental::assert_dep_graph(tcx));
 
-    // Option dance to work around the lack of stack once closures.
     time(time_passes,
-         "translation",
-         move || trans::trans_crate(tcx, &mir_map, analysis))
+         "serialize dep graph",
+         move || rustc_incremental::save_dep_graph(tcx));
+
+    translation
 }
 
 /// Run LLVM itself, producing a bitcode file, assembly file or object file
@@ -1019,16 +1055,16 @@ fn write_out_deps(sess: &Session, outputs: &OutputFilenames, id: &str) {
                                          .filter(|fmap| !fmap.is_imported())
                                          .map(|fmap| escape_dep_filename(&fmap.name))
                                          .collect();
-            let mut file = try!(fs::File::create(&deps_filename));
+            let mut file = fs::File::create(&deps_filename)?;
             for path in &out_filenames {
-                try!(write!(file, "{}: {}\n\n", path.display(), files.join(" ")));
+                write!(file, "{}: {}\n\n", path.display(), files.join(" "))?;
             }
 
             // Emit a fake target for each input file to the compilation. This
             // prevents `make` from spitting out an error if a file is later
             // deleted. For more info see #28735
             for path in files {
-                try!(writeln!(file, "{}:", path));
+                writeln!(file, "{}:", path)?;
             }
             Ok(())
         })();
@@ -1117,8 +1153,34 @@ pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec<c
         .collect()
 }
 
-pub fn collect_crate_metadata(session: &Session, _attrs: &[ast::Attribute]) -> Vec<String> {
-    session.opts.cg.metadata.clone()
+pub fn compute_crate_disambiguator(session: &Session) -> String {
+    let mut hasher = Sha256::new();
+
+    let mut metadata = session.opts.cg.metadata.clone();
+    // We don't want the crate_disambiguator to dependent on the order
+    // -C metadata arguments, so sort them:
+    metadata.sort();
+    // Every distinct -C metadata value is only incorporated once:
+    metadata.dedup();
+
+    hasher.input_str("metadata");
+    for s in &metadata {
+        // Also incorporate the length of a metadata string, so that we generate
+        // different values for `-Cmetadata=ab -Cmetadata=c` and
+        // `-Cmetadata=a -Cmetadata=bc`
+        hasher.input_str(&format!("{}", s.len())[..]);
+        hasher.input_str(&s[..]);
+    }
+
+    let mut hash = hasher.result_str();
+
+    // If this is an executable, add a special suffix, so that we don't get
+    // symbol conflicts when linking against a library of the same name.
+    if session.crate_types.borrow().contains(&config::CrateTypeExecutable) {
+       hash.push_str("-exe");
+    }
+
+    hash
 }
 
 pub fn build_output_filenames(input: &Input,
index d0f86cfcb46babaa030404ced5026709f17ead45..769449b96d2b1ed66e424576abf8cf779acaedbc 100644 (file)
@@ -30,6 +30,7 @@
 #![feature(rustc_private)]
 #![feature(set_stdio)]
 #![feature(staged_api)]
+#![feature(question_mark)]
 
 extern crate arena;
 extern crate flate;
@@ -39,14 +40,16 @@ extern crate libc;
 extern crate rustc;
 extern crate rustc_back;
 extern crate rustc_borrowck;
+extern crate rustc_const_eval;
 extern crate rustc_passes;
-extern crate rustc_front;
 extern crate rustc_lint;
 extern crate rustc_plugin;
 extern crate rustc_privacy;
+extern crate rustc_incremental;
 extern crate rustc_metadata;
 extern crate rustc_mir;
 extern crate rustc_resolve;
+extern crate rustc_save_analysis;
 extern crate rustc_trans;
 extern crate rustc_typeck;
 extern crate serialize;
@@ -61,11 +64,11 @@ use driver::CompileController;
 use pretty::{PpMode, UserIdentifiedItem};
 
 use rustc_resolve as resolve;
+use rustc_save_analysis as save;
 use rustc_trans::back::link;
-use rustc_trans::save;
 use rustc::session::{config, Session, build_session, CompileResult};
 use rustc::session::config::{Input, PrintRequest, OutputType, ErrorOutputType};
-use rustc::session::config::{get_unstable_features_setting, OptionStability};
+use rustc::session::config::{get_unstable_features_setting, nightly_options};
 use rustc::middle::cstore::CrateStore;
 use rustc::lint::Lint;
 use rustc::lint;
@@ -86,15 +89,15 @@ use std::str;
 use std::sync::{Arc, Mutex};
 use std::thread;
 
-use rustc::session::{early_error, early_warn};
+use rustc::session::early_error;
 
 use syntax::ast;
-use syntax::parse;
+use syntax::parse::{self, PResult};
 use syntax::errors;
 use syntax::errors::emitter::Emitter;
 use syntax::diagnostics;
 use syntax::parse::token;
-use syntax::feature_gate::UnstableFeatures;
+use syntax::feature_gate::{GatedCfg, UnstableFeatures};
 
 #[cfg(test)]
 pub mod test;
@@ -157,13 +160,17 @@ pub fn run_compiler<'a>(args: &[String],
         }
     }}
 
-    let matches = match handle_options(args.to_vec()) {
+    let matches = match handle_options(args) {
         Some(matches) => matches,
         None => return (Ok(()), None),
     };
 
     let sopts = config::build_session_options(&matches);
 
+    if sopts.debugging_opts.debug_llvm {
+        unsafe { llvm::LLVMSetDebug(1); }
+    }
+
     let descriptions = diagnostics_registry();
 
     do_or_return!(callbacks.early_callback(&matches,
@@ -223,7 +230,8 @@ fn make_input(free_matches: &[String]) -> Option<(Input, Option<PathBuf>)> {
         if ifile == "-" {
             let mut src = String::new();
             io::stdin().read_to_string(&mut src).unwrap();
-            Some((Input::Str(src), None))
+            Some((Input::Str { name: driver::anon_src(), input: src },
+                  None))
         } else {
             Some((Input::File(PathBuf::from(ifile)),
                   Some(PathBuf::from(ifile))))
@@ -334,10 +342,10 @@ pub struct RustcDefaultCalls;
 fn handle_explain(code: &str,
                   descriptions: &diagnostics::registry::Registry,
                   output: ErrorOutputType) {
-    let normalised = if !code.starts_with("E") {
-        format!("E{0:0>4}", code)
-    } else {
+    let normalised = if code.starts_with("E") {
         code.to_string()
+    } else {
+        format!("E{0:0>4}", code)
     };
     match descriptions.find_description(&normalised) {
         Some(ref description) => {
@@ -511,7 +519,7 @@ impl RustcDefaultCalls {
                         .unwrap();
                     println!("{}", String::from_utf8(v).unwrap());
                 }
-                &Input::Str(_) => {
+                &Input::Str { .. } => {
                     early_error(ErrorOutputType::default(), "cannot list metadata for stdin");
                 }
             }
@@ -531,7 +539,19 @@ impl RustcDefaultCalls {
             return Compilation::Continue;
         }
 
-        let attrs = input.map(|input| parse_crate_attrs(sess, input));
+        let attrs = match input {
+            None => None,
+            Some(input) => {
+                let result = parse_crate_attrs(sess, input);
+                match result {
+                    Ok(attrs) => Some(attrs),
+                    Err(mut parse_error) => {
+                        parse_error.emit();
+                        return Compilation::Stop;
+                    }
+                }
+            }
+        };
         for req in &sess.opts.prints {
             match *req {
                 PrintRequest::TargetList => {
@@ -554,8 +574,6 @@ impl RustcDefaultCalls {
                         continue;
                     }
                     let crate_types = driver::collect_crate_types(sess, attrs);
-                    let metadata = driver::collect_crate_metadata(sess, attrs);
-                    *sess.crate_metadata.borrow_mut() = metadata;
                     for &style in &crate_types {
                         let fname = link::filename_for_input(sess, style, &id, &t_outputs);
                         println!("{}",
@@ -565,7 +583,18 @@ impl RustcDefaultCalls {
                     }
                 }
                 PrintRequest::Cfg => {
-                    for cfg in config::build_configuration(sess) {
+                    let mut cfg = config::build_configuration(&sess);
+                    target_features::add_configuration(&mut cfg, &sess);
+
+                    let allow_unstable_cfg = match get_unstable_features_setting() {
+                        UnstableFeatures::Disallow => false,
+                        _ => true,
+                    };
+
+                    for cfg in cfg {
+                        if !allow_unstable_cfg && GatedCfg::gate(&*cfg).is_some() {
+                            continue;
+                        }
                         match cfg.node {
                             ast::MetaItemKind::Word(ref word) => println!("{}", word),
                             ast::MetaItemKind::NameValue(ref name, ref value) => {
@@ -846,9 +875,9 @@ fn print_flag_list<T>(cmdline_opt: &str,
 ///
 /// So with all that in mind, the comments below have some more detail about the
 /// contortions done here to get things to work out correctly.
-pub fn handle_options(mut args: Vec<String>) -> Option<getopts::Matches> {
+pub fn handle_options(args: &[String]) -> Option<getopts::Matches> {
     // Throw away the first argument, the name of the binary
-    let _binary = args.remove(0);
+    let args = &args[1..];
 
     if args.is_empty() {
         // user did not write `-v` nor `-Z unstable-options`, so do not
@@ -881,51 +910,7 @@ pub fn handle_options(mut args: Vec<String>) -> Option<getopts::Matches> {
     //   (unstable option being used on stable)
     // * If we're a historically stable-but-should-be-unstable option then we
     //   emit a warning that we're going to turn this into an error soon.
-    let has_z_unstable_options = matches.opt_strs("Z")
-                                        .iter()
-                                        .any(|x| *x == "unstable-options");
-    let really_allows_unstable_options = match get_unstable_features_setting() {
-        UnstableFeatures::Disallow => false,
-        _ => true,
-    };
-    for opt in config::rustc_optgroups() {
-        if opt.stability == OptionStability::Stable {
-            continue
-        }
-        let opt_name = if !opt.opt_group.long_name.is_empty() {
-            &opt.opt_group.long_name
-        } else {
-            &opt.opt_group.short_name
-        };
-        if !matches.opt_present(opt_name) {
-            continue
-        }
-        if opt_name != "Z" && !has_z_unstable_options {
-            let msg = format!("the `-Z unstable-options` flag must also be \
-                               passed to enable the flag `{}`", opt_name);
-            early_error(ErrorOutputType::default(), &msg);
-        }
-        if really_allows_unstable_options {
-            continue
-        }
-        match opt.stability {
-            OptionStability::Unstable => {
-                let msg = format!("the option `{}` is only accepted on the \
-                                   nightly compiler", opt_name);
-                early_error(ErrorOutputType::default(), &msg);
-            }
-            OptionStability::UnstableButNotReally => {
-                let msg = format!("the option `{}` is is unstable and should \
-                                   only be used on the nightly compiler, but \
-                                   it is currently accepted for backwards \
-                                   compatibility; this will soon change, \
-                                   see issue #31847 for more details",
-                                  opt_name);
-                early_warn(ErrorOutputType::default(), &msg);
-            }
-            OptionStability::Stable => {}
-        }
-    }
+    nightly_options::check_nightly_options(&matches, &config::rustc_optgroups());
 
     if matches.opt_present("h") || matches.opt_present("help") {
         // Only show unstable options in --help if we *really* accept unstable
@@ -933,12 +918,11 @@ pub fn handle_options(mut args: Vec<String>) -> Option<getopts::Matches> {
         // the stable channel of Rust which was accidentally allowed
         // historically.
         usage(matches.opt_present("verbose"),
-              has_z_unstable_options && really_allows_unstable_options);
+              nightly_options::is_unstable_enabled(&matches));
         return None;
     }
 
     // Don't handle -W help here, because we might first load plugins.
-
     let r = matches.opt_strs("Z");
     if r.iter().any(|x| *x == "help") {
         describe_debug_flags();
@@ -966,19 +950,18 @@ pub fn handle_options(mut args: Vec<String>) -> Option<getopts::Matches> {
     Some(matches)
 }
 
-fn parse_crate_attrs(sess: &Session, input: &Input) -> Vec<ast::Attribute> {
-    let result = match *input {
+fn parse_crate_attrs<'a>(sess: &'a Session, input: &Input) -> PResult<'a, Vec<ast::Attribute>> {
+    match *input {
         Input::File(ref ifile) => {
             parse::parse_crate_attrs_from_file(ifile, Vec::new(), &sess.parse_sess)
         }
-        Input::Str(ref src) => {
-            parse::parse_crate_attrs_from_source_str(driver::anon_src().to_string(),
-                                                     src.to_string(),
+        Input::Str { ref name, ref input } => {
+            parse::parse_crate_attrs_from_source_str(name.clone(),
+                                                     input.clone(),
                                                      Vec::new(),
                                                      &sess.parse_sess)
         }
-    };
-    result.into_iter().collect()
+    }
 }
 
 /// Run a procedure which will detect panics in the compiler and print nicer
@@ -1010,43 +993,41 @@ pub fn monitor<F: FnOnce() + Send + 'static>(f: F) {
         cfg = cfg.stack_size(STACK_SIZE);
     }
 
-    match cfg.spawn(move || {
-                 io::set_panic(box err);
-                 f()
-             })
-             .unwrap()
-             .join() {
-        Ok(()) => {
-            // fallthrough
-        }
-        Err(value) => {
-            // Thread panicked without emitting a fatal diagnostic
-            if !value.is::<errors::FatalError>() {
-                let mut emitter = errors::emitter::BasicEmitter::stderr(errors::ColorConfig::Auto);
-
-                // a .span_bug or .bug call has already printed what
-                // it wants to print.
-                if !value.is::<errors::ExplicitBug>() {
-                    emitter.emit(None, "unexpected panic", None, errors::Level::Bug);
-                }
+    let thread = cfg.spawn(move || {
+         io::set_panic(box err);
+         f()
+     });
 
-                let xs = ["the compiler unexpectedly panicked. this is a bug.".to_string(),
-                          format!("we would appreciate a bug report: {}", BUG_REPORT_URL)];
-                for note in &xs {
-                    emitter.emit(None, &note[..], None, errors::Level::Note)
-                }
-                if let None = env::var_os("RUST_BACKTRACE") {
-                    emitter.emit(None,
-                                 "run with `RUST_BACKTRACE=1` for a backtrace",
-                                 None,
-                                 errors::Level::Note);
-                }
+     if let Err(value) = thread.unwrap().join() {
+        // Thread panicked without emitting a fatal diagnostic
+        if !value.is::<errors::FatalError>() {
+            let mut emitter = errors::emitter::BasicEmitter::stderr(errors::ColorConfig::Auto);
 
-                println!("{}", str::from_utf8(&data.lock().unwrap()).unwrap());
+            // a .span_bug or .bug call has already printed what
+            // it wants to print.
+            if !value.is::<errors::ExplicitBug>() {
+                emitter.emit(None, "unexpected panic", None, errors::Level::Bug);
             }
 
-            exit_on_err();
+            let xs = ["the compiler unexpectedly panicked. this is a bug.".to_string(),
+                      format!("we would appreciate a bug report: {}", BUG_REPORT_URL)];
+            for note in &xs {
+                emitter.emit(None, &note[..], None, errors::Level::Note)
+            }
+            if match env::var_os("RUST_BACKTRACE") {
+                Some(val) => &val != "0",
+                None => false,
+            } {
+                emitter.emit(None,
+                             "run with `RUST_BACKTRACE=1` for a backtrace",
+                             None,
+                             errors::Level::Note);
+            }
+
+            println!("{}", str::from_utf8(&data.lock().unwrap()).unwrap());
         }
+
+        exit_on_err();
     }
 }
 
@@ -1068,6 +1049,7 @@ pub fn diagnostics_registry() -> diagnostics::registry::Registry {
     all_errors.extend_from_slice(&rustc_resolve::DIAGNOSTICS);
     all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS);
     all_errors.extend_from_slice(&rustc_trans::DIAGNOSTICS);
+    all_errors.extend_from_slice(&rustc_const_eval::DIAGNOSTICS);
 
     Registry::new(&all_errors)
 }
index e9db30f3cb24fd37363c2423663fbe7afc7558c6..cde5ba19859e03fa99d9c66681ddf331302abed2 100644 (file)
@@ -20,9 +20,9 @@ use rustc_trans::back::link;
 use {driver, abort_on_err};
 
 use rustc::dep_graph::DepGraph;
-use rustc::middle::ty;
-use rustc::middle::cfg;
-use rustc::middle::cfg::graphviz::LabelledCFG;
+use rustc::ty::{self, TyCtxt};
+use rustc::cfg;
+use rustc::cfg::graphviz::LabelledCFG;
 use rustc::session::Session;
 use rustc::session::config::Input;
 use rustc_borrowck as borrowck;
@@ -31,6 +31,7 @@ use rustc_resolve as resolve;
 use rustc_metadata::cstore::CStore;
 
 use rustc_mir::pretty::write_mir_pretty;
+use rustc_mir::graphviz::write_mir_graphviz;
 
 use syntax::ast::{self, BlockCheckMode};
 use syntax::codemap;
@@ -44,15 +45,18 @@ use graphviz as dot;
 
 use std::fs::File;
 use std::io::{self, Write};
+use std::iter;
 use std::option;
 use std::path::PathBuf;
 use std::str::FromStr;
 
-use rustc::front::map as hir_map;
-use rustc::front::map::{blocks, NodePrinter};
-use rustc_front::hir;
-use rustc_front::lowering::{lower_crate, LoweringContext};
-use rustc_front::print::pprust as pprust_hir;
+use rustc::hir::map as hir_map;
+use rustc::hir::map::{blocks, NodePrinter};
+use rustc::hir;
+use rustc::hir::lowering::{lower_crate, LoweringContext};
+use rustc::hir::print as pprust_hir;
+
+use rustc::mir::mir_map::MirMap;
 
 #[derive(Copy, Clone, PartialEq, Debug)]
 pub enum PpSourceMode {
@@ -80,6 +84,7 @@ pub enum PpMode {
     PpmHir(PpSourceMode),
     PpmFlowGraph(PpFlowGraphMode),
     PpmMir,
+    PpmMirCFG,
 }
 
 pub fn parse_pretty(sess: &Session,
@@ -100,6 +105,7 @@ pub fn parse_pretty(sess: &Session,
         ("hir,identified", true) => PpmHir(PpmIdentified),
         ("hir,typed", true) => PpmHir(PpmTyped),
         ("mir", true) => PpmMir,
+        ("mir-cfg", true) => PpmMirCFG,
         ("flowgraph", true) => PpmFlowGraph(PpFlowGraphMode::Default),
         ("flowgraph,unlabelled", true) => PpmFlowGraph(PpFlowGraphMode::UnlabelledEdges),
         _ => {
@@ -250,6 +256,15 @@ trait HirPrinterSupport<'ast>: pprust_hir::PpAnn {
     /// (Rust does not yet support upcasting from a trait object to
     /// an object for one of its super-traits.)
     fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn;
+
+    /// Computes an user-readable representation of a path, if possible.
+    fn node_path(&self, id: ast::NodeId) -> Option<String> {
+        self.ast_map().and_then(|map| map.def_path_from_id(id)).map(|path| {
+            path.data.into_iter().map(|elem| {
+                elem.data.to_string()
+            }).collect::<Vec<_>>().join("::")
+        })
+    }
 }
 
 struct NoAnn<'ast> {
@@ -319,24 +334,24 @@ impl<'ast> pprust::PpAnn for IdentifiedAnnotation<'ast> {
             pprust::NodeIdent(_) | pprust::NodeName(_) => Ok(()),
 
             pprust::NodeItem(item) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 s.synth_comment(item.id.to_string())
             }
             pprust::NodeSubItem(id) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 s.synth_comment(id.to_string())
             }
             pprust::NodeBlock(blk) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 s.synth_comment(format!("block {}", blk.id))
             }
             pprust::NodeExpr(expr) => {
-                try!(pp::space(&mut s.s));
-                try!(s.synth_comment(expr.id.to_string()));
+                pp::space(&mut s.s)?;
+                s.synth_comment(expr.id.to_string())?;
                 s.pclose()
             }
             pprust::NodePat(pat) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 s.synth_comment(format!("pat {}", pat.id))
             }
         }
@@ -368,24 +383,24 @@ impl<'ast> pprust_hir::PpAnn for IdentifiedAnnotation<'ast> {
         match node {
             pprust_hir::NodeName(_) => Ok(()),
             pprust_hir::NodeItem(item) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 s.synth_comment(item.id.to_string())
             }
             pprust_hir::NodeSubItem(id) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 s.synth_comment(id.to_string())
             }
             pprust_hir::NodeBlock(blk) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 s.synth_comment(format!("block {}", blk.id))
             }
             pprust_hir::NodeExpr(expr) => {
-                try!(pp::space(&mut s.s));
-                try!(s.synth_comment(expr.id.to_string()));
+                pp::space(&mut s.s)?;
+                s.synth_comment(expr.id.to_string())?;
                 s.pclose()
             }
             pprust_hir::NodePat(pat) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 s.synth_comment(format!("pat {}", pat.id))
             }
         }
@@ -415,13 +430,13 @@ impl<'ast> pprust::PpAnn for HygieneAnnotation<'ast> {
     fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> {
         match node {
             pprust::NodeIdent(&ast::Ident { name: ast::Name(nm), ctxt }) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 // FIXME #16420: this doesn't display the connections
                 // between syntax contexts
                 s.synth_comment(format!("{}#{}", nm, ctxt.0))
             }
             pprust::NodeName(&ast::Name(nm)) => {
-                try!(pp::space(&mut s.s));
+                pp::space(&mut s.s)?;
                 s.synth_comment(nm.to_string())
             }
             _ => Ok(()),
@@ -431,7 +446,7 @@ impl<'ast> pprust::PpAnn for HygieneAnnotation<'ast> {
 
 
 struct TypedAnnotation<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
 }
 
 impl<'b, 'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'b, 'tcx> {
@@ -446,6 +461,10 @@ impl<'b, 'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'b, 'tcx> {
     fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn {
         self
     }
+
+    fn node_path(&self, id: ast::NodeId) -> Option<String> {
+        Some(self.tcx.node_path_str(id))
+    }
 }
 
 impl<'a, 'tcx> pprust_hir::PpAnn for TypedAnnotation<'a, 'tcx> {
@@ -458,10 +477,10 @@ impl<'a, 'tcx> pprust_hir::PpAnn for TypedAnnotation<'a, 'tcx> {
     fn post(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> {
         match node {
             pprust_hir::NodeExpr(expr) => {
-                try!(pp::space(&mut s.s));
-                try!(pp::word(&mut s.s, "as"));
-                try!(pp::space(&mut s.s));
-                try!(pp::word(&mut s.s, &self.tcx.expr_ty(expr).to_string()));
+                pp::space(&mut s.s)?;
+                pp::word(&mut s.s, "as")?;
+                pp::space(&mut s.s)?;
+                pp::word(&mut s.s, &self.tcx.expr_ty(expr).to_string())?;
                 s.pclose()
             }
             _ => Ok(()),
@@ -574,6 +593,7 @@ fn needs_ast_map(ppm: &PpMode, opt_uii: &Option<UserIdentifiedItem>) -> bool {
         PpmSource(PpmExpandedHygiene) |
         PpmHir(_) |
         PpmMir |
+        PpmMirCFG |
         PpmFlowGraph(_) => true,
         PpmSource(PpmTyped) => panic!("invalid state"),
     }
@@ -590,6 +610,7 @@ fn needs_expansion(ppm: &PpMode) -> bool {
         PpmSource(PpmExpandedHygiene) |
         PpmHir(_) |
         PpmMir |
+        PpmMirCFG |
         PpmFlowGraph(_) => true,
         PpmSource(PpmTyped) => panic!("invalid state"),
     }
@@ -686,7 +707,7 @@ pub fn pretty_print_input(sess: Session,
                           ppm: PpMode,
                           opt_uii: Option<UserIdentifiedItem>,
                           ofile: Option<PathBuf>) {
-    let krate = driver::phase_1_parse_input(&sess, cfg, input);
+    let krate = panictry!(driver::phase_1_parse_input(&sess, cfg, input));
 
     let krate = if let PpmSource(PpmEveryBodyLoops) = ppm {
         let mut fold = ReplaceBodyWithLoop::new();
@@ -786,7 +807,7 @@ pub fn pretty_print_input(sess: Session,
                                        |annotation, (out,uii), _| {
                 debug!("pretty printing source code {:?}", s);
                 let sess = annotation.sess();
-                let ast_map = annotation.ast_map().expect("--pretty missing ast_map");
+                let ast_map = annotation.ast_map().expect("--unpretty missing HIR map");
                 let mut pp_state =
                     pprust_hir::State::new_from_input(sess.codemap(),
                                                       sess.diagnostic(),
@@ -798,18 +819,26 @@ pub fn pretty_print_input(sess: Session,
                                                       Some(ast_map.krate()));
                 for node_id in uii.all_matching_node_ids(ast_map) {
                     let node = ast_map.get(node_id);
-                    try!(pp_state.print_node(&node));
-                    try!(pp::space(&mut pp_state.s));
-                    try!(pp_state.synth_comment(ast_map.path_to_string(node_id)));
-                    try!(pp::hardbreak(&mut pp_state.s));
+                    pp_state.print_node(&node)?;
+                    pp::space(&mut pp_state.s)?;
+                    let path = annotation.node_path(node_id)
+                                         .expect("--unpretty missing node paths");
+                    pp_state.synth_comment(path)?;
+                    pp::hardbreak(&mut pp_state.s)?;
                 }
                 pp::eof(&mut pp_state.s)
             })
         }
 
-        (PpmMir, None) => {
-            debug!("pretty printing MIR for whole crate");
-            let ast_map = ast_map.expect("--unpretty mir missing ast_map");
+        (pp_type@PpmMir, uii) | (pp_type@PpmMirCFG, uii) => {
+            let ast_map = ast_map.expect("--unpretty missing ast_map");
+            let nodeid = if let Some(uii) = uii {
+                debug!("pretty printing MIR for {:?}", uii);
+                Some(uii.to_one_node_id("--unpretty", &sess, &ast_map))
+            } else {
+                debug!("pretty printing MIR for whole crate");
+                None
+            };
             abort_on_err(driver::phase_3_run_analysis_passes(&sess,
                                                              &cstore,
                                                              ast_map,
@@ -818,38 +847,25 @@ pub fn pretty_print_input(sess: Session,
                                                              resolve::MakeGlobMap::No,
                                                              |tcx, mir_map, _, _| {
                 if let Some(mir_map) = mir_map {
-                    for (nodeid, mir) in &mir_map.map {
-                        try!(writeln!(out, "MIR for {}", tcx.map.node_to_string(*nodeid)));
-                        try!(write_mir_pretty(mir, &mut out));
+                    if let Some(nodeid) = nodeid {
+                        let mir = mir_map.map.get(&nodeid).unwrap_or_else(|| {
+                            sess.fatal(&format!("no MIR map entry for node {}", nodeid))
+                        });
+                        match pp_type {
+                            PpmMir => write_mir_pretty(tcx, iter::once((&nodeid, mir)), &mut out),
+                            _ => write_mir_graphviz(tcx, iter::once((&nodeid, mir)), &mut out)
+                        }?;
+                    } else {
+                        match pp_type {
+                            PpmMir => write_mir_pretty(tcx, mir_map.map.iter(), &mut out),
+                            _ => write_mir_graphviz(tcx, mir_map.map.iter(), &mut out)
+                        }?;
                     }
                 }
                 Ok(())
             }), &sess)
         }
 
-        (PpmMir, Some(uii)) => {
-            debug!("pretty printing MIR for {:?}", uii);
-            let ast_map = ast_map.expect("--unpretty mir missing ast_map");
-            let nodeid = uii.to_one_node_id("--unpretty", &sess, &ast_map);
-
-            abort_on_err(driver::phase_3_run_analysis_passes(&sess,
-                                                             &cstore,
-                                                             ast_map,
-                                                             &arenas,
-                                                             &id,
-                                                             resolve::MakeGlobMap::No,
-                                                             |tcx, mir_map, _, _| {
-                if let Some(mir_map) = mir_map {
-                    try!(writeln!(out, "MIR for {}", tcx.map.node_to_string(nodeid)));
-                    let mir = mir_map.map.get(&nodeid).unwrap_or_else(|| {
-                        sess.fatal(&format!("no MIR map entry for node {}", nodeid))
-                    });
-                    try!(write_mir_pretty(mir, &mut out));
-                }
-                Ok(())
-            }), &sess)
-        }
-
         (PpmFlowGraph(mode), opt_uii) => {
             debug!("pretty printing flow graph for {:?}", opt_uii);
             let uii = opt_uii.unwrap_or_else(|| {
@@ -876,9 +892,10 @@ pub fn pretty_print_input(sess: Session,
                                                                      &arenas,
                                                                      &id,
                                                                      resolve::MakeGlobMap::No,
-                                                                     |tcx, _, _, _| {
+                                                                     |tcx, mir_map, _, _| {
                         print_flowgraph(variants,
                                         tcx,
+                                        mir_map.as_ref(),
                                         code,
                                         mode,
                                         out)
@@ -912,12 +929,13 @@ pub fn pretty_print_input(sess: Session,
     }
 }
 
-fn print_flowgraph<W: Write>(variants: Vec<borrowck_dot::Variant>,
-                             tcx: &ty::ctxt,
-                             code: blocks::Code,
-                             mode: PpFlowGraphMode,
-                             mut out: W)
-                             -> io::Result<()> {
+fn print_flowgraph<'tcx, W: Write>(variants: Vec<borrowck_dot::Variant>,
+                                   tcx: &TyCtxt<'tcx>,
+                                   mir_map: Option<&MirMap<'tcx>>,
+                                   code: blocks::Code,
+                                   mode: PpFlowGraphMode,
+                                   mut out: W)
+                                   -> io::Result<()> {
     let cfg = match code {
         blocks::BlockCode(block) => cfg::CFG::new(tcx, &block),
         blocks::FnLikeCode(fn_like) => cfg::CFG::new(tcx, &fn_like.body()),
@@ -943,6 +961,7 @@ fn print_flowgraph<W: Write>(variants: Vec<borrowck_dot::Variant>,
         blocks::FnLikeCode(fn_like) => {
             let (bccx, analysis_data) =
                 borrowck::build_borrowck_dataflow_data_for_fn(tcx,
+                                                              mir_map,
                                                               fn_like.to_fn_parts(),
                                                               &cfg);
 
index 3220295d9b88aca9f1d06eb3158b70b6ce6d6231..fc12d546288b47b0c4a4fe5770723521c955b82d 100644 (file)
@@ -14,22 +14,20 @@ use driver;
 use rustc::dep_graph::DepGraph;
 use rustc_lint;
 use rustc_resolve as resolve;
-use rustc_typeck::middle::lang_items;
-use rustc_typeck::middle::free_region::FreeRegionMap;
-use rustc_typeck::middle::region::{self, CodeExtent};
-use rustc_typeck::middle::region::CodeExtentData;
-use rustc_typeck::middle::resolve_lifetime;
-use rustc_typeck::middle::stability;
-use rustc_typeck::middle::subst;
-use rustc_typeck::middle::subst::Subst;
-use rustc_typeck::middle::ty::{self, Ty, TypeFoldable};
-use rustc_typeck::middle::ty::relate::TypeRelation;
-use rustc_typeck::middle::infer::{self, TypeOrigin};
-use rustc_typeck::middle::infer::lub::Lub;
-use rustc_typeck::middle::infer::glb::Glb;
-use rustc_typeck::middle::infer::sub::Sub;
+use rustc::middle::lang_items;
+use rustc::middle::free_region::FreeRegionMap;
+use rustc::middle::region::{self, CodeExtent};
+use rustc::middle::region::CodeExtentData;
+use rustc::middle::resolve_lifetime;
+use rustc::middle::stability;
+use rustc::ty::subst;
+use rustc::ty::subst::Subst;
+use rustc::traits::ProjectionMode;
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc::ty::relate::TypeRelation;
+use rustc::infer::{self, InferOk, InferResult, TypeOrigin};
 use rustc_metadata::cstore::CStore;
-use rustc::front::map as hir_map;
+use rustc::hir::map as hir_map;
 use rustc::session::{self, config};
 use std::rc::Rc;
 use syntax::ast;
@@ -41,8 +39,8 @@ use syntax::errors::{Level, RenderSpan};
 use syntax::parse::token;
 use syntax::feature_gate::UnstableFeatures;
 
-use rustc_front::lowering::{lower_crate, LoweringContext};
-use rustc_front::hir;
+use rustc::hir::lowering::{lower_crate, LoweringContext};
+use rustc::hir;
 
 struct Env<'a, 'tcx: 'a> {
     infcx: &'a infer::InferCtxt<'a, 'tcx>,
@@ -113,8 +111,11 @@ fn test_env<F>(source_string: &str,
                                        Rc::new(CodeMap::new()), cstore.clone());
     rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
     let krate_config = Vec::new();
-    let input = config::Input::Str(source_string.to_string());
-    let krate = driver::phase_1_parse_input(&sess, krate_config, &input);
+    let input = config::Input::Str {
+        name: driver::anon_src(),
+        input: source_string.to_string(),
+    };
+    let krate = driver::phase_1_parse_input(&sess, krate_config, &input).unwrap();
     let krate = driver::phase_2_configure_and_expand(&sess, &cstore, krate, "test", None)
                     .expect("phase 2 aborted");
 
@@ -133,7 +134,7 @@ fn test_env<F>(source_string: &str,
     let named_region_map = resolve_lifetime::krate(&sess, &ast_map, &def_map.borrow());
     let region_map = region::resolve_crate(&sess, &ast_map);
     let index = stability::Index::new(&ast_map);
-    ty::ctxt::create_and_enter(&sess,
+    TyCtxt::create_and_enter(&sess,
                                &arenas,
                                def_map,
                                named_region_map.unwrap(),
@@ -142,8 +143,12 @@ fn test_env<F>(source_string: &str,
                                region_map,
                                lang_items,
                                index,
+                               "test_crate",
                                |tcx| {
-                                   let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None);
+                                   let infcx = infer::new_infer_ctxt(tcx,
+                                                                     &tcx.tables,
+                                                                     None,
+                                                                     ProjectionMode::AnyFinal);
                                    body(Env { infcx: &infcx });
                                    let free_regions = FreeRegionMap::new();
                                    infcx.resolve_regions_and_report_errors(&free_regions,
@@ -153,7 +158,7 @@ fn test_env<F>(source_string: &str,
 }
 
 impl<'a, 'tcx> Env<'a, 'tcx> {
-    pub fn tcx(&self) -> &ty::ctxt<'tcx> {
+    pub fn tcx(&self) -> &TyCtxt<'tcx> {
         self.infcx.tcx
     }
 
@@ -261,16 +266,15 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
 
     pub fn t_fn(&self, input_tys: &[Ty<'tcx>], output_ty: Ty<'tcx>) -> Ty<'tcx> {
         let input_args = input_tys.iter().cloned().collect();
-        self.infcx.tcx.mk_fn(None,
-                             self.infcx.tcx.mk_bare_fn(ty::BareFnTy {
-                                 unsafety: hir::Unsafety::Normal,
-                                 abi: Abi::Rust,
-                                 sig: ty::Binder(ty::FnSig {
-                                     inputs: input_args,
-                                     output: ty::FnConverging(output_ty),
-                                     variadic: false,
-                                 }),
-                             }))
+        self.infcx.tcx.mk_fn_ptr(ty::BareFnTy {
+            unsafety: hir::Unsafety::Normal,
+            abi: Abi::Rust,
+            sig: ty::Binder(ty::FnSig {
+                inputs: input_args,
+                output: ty::FnConverging(output_ty),
+                variadic: false,
+            }),
+        })
     }
 
     pub fn t_nil(&self) -> Ty<'tcx> {
@@ -351,26 +355,29 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
         infer::TypeTrace::dummy(self.tcx())
     }
 
-    pub fn sub(&self) -> Sub<'a, 'tcx> {
+    pub fn sub(&self, t1: &Ty<'tcx>, t2: &Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
         let trace = self.dummy_type_trace();
-        self.infcx.sub(true, trace)
+        self.infcx.sub(true, trace, t1, t2)
     }
 
-    pub fn lub(&self) -> Lub<'a, 'tcx> {
+    pub fn lub(&self, t1: &Ty<'tcx>, t2: &Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
         let trace = self.dummy_type_trace();
-        self.infcx.lub(true, trace)
+        self.infcx.lub(true, trace, t1, t2)
     }
 
-    pub fn glb(&self) -> Glb<'a, 'tcx> {
+    pub fn glb(&self, t1: &Ty<'tcx>, t2: &Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
         let trace = self.dummy_type_trace();
-        self.infcx.glb(true, trace)
+        self.infcx.glb(true, trace, t1, t2)
     }
 
     /// Checks that `t1 <: t2` is true (this may register additional
     /// region checks).
     pub fn check_sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) {
-        match self.sub().relate(&t1, &t2) {
-            Ok(_) => {}
+        match self.sub(&t1, &t2) {
+            Ok(InferOk { obligations, .. }) => {
+                // FIXME(#32730) once obligations are being propagated, assert the right thing.
+                assert!(obligations.is_empty());
+            }
             Err(ref e) => {
                 panic!("unexpected error computing sub({:?},{:?}): {}", t1, t2, e);
             }
@@ -380,7 +387,7 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
     /// Checks that `t1 <: t2` is false (this may register additional
     /// region checks).
     pub fn check_not_sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) {
-        match self.sub().relate(&t1, &t2) {
+        match self.sub(&t1, &t2) {
             Err(_) => {}
             Ok(_) => {
                 panic!("unexpected success computing sub({:?},{:?})", t1, t2);
@@ -390,8 +397,11 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
 
     /// Checks that `LUB(t1,t2) == t_lub`
     pub fn check_lub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>, t_lub: Ty<'tcx>) {
-        match self.lub().relate(&t1, &t2) {
-            Ok(t) => {
+        match self.lub(&t1, &t2) {
+            Ok(InferOk { obligations, value: t }) => {
+                // FIXME(#32730) once obligations are being propagated, assert the right thing.
+                assert!(obligations.is_empty());
+
                 self.assert_eq(t, t_lub);
             }
             Err(ref e) => {
@@ -403,11 +413,14 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
     /// Checks that `GLB(t1,t2) == t_glb`
     pub fn check_glb(&self, t1: Ty<'tcx>, t2: Ty<'tcx>, t_glb: Ty<'tcx>) {
         debug!("check_glb(t1={}, t2={}, t_glb={})", t1, t2, t_glb);
-        match self.glb().relate(&t1, &t2) {
+        match self.glb(&t1, &t2) {
             Err(e) => {
                 panic!("unexpected error computing LUB: {:?}", e)
             }
-            Ok(t) => {
+            Ok(InferOk { obligations, value: t }) => {
+                // FIXME(#32730) once obligations are being propagated, assert the right thing.
+                assert!(obligations.is_empty());
+
                 self.assert_eq(t, t_glb);
 
                 // sanity check for good measure:
diff --git a/src/librustc_front/Cargo.toml b/src/librustc_front/Cargo.toml
deleted file mode 100644 (file)
index bf40cdb..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-[package]
-authors = ["The Rust Project Developers"]
-name = "rustc_front"
-version = "0.0.0"
-
-[lib]
-name = "rustc_front"
-path = "lib.rs"
-crate-type = ["dylib"]
-
-[dependencies]
-log = { path = "../liblog" }
-syntax = { path = "../libsyntax" }
-serialize = { path = "../libserialize" }
-rustc_bitflags = { path = "../librustc_bitflags" }
diff --git a/src/librustc_front/fold.rs b/src/librustc_front/fold.rs
deleted file mode 100644 (file)
index b5e56ed..0000000
+++ /dev/null
@@ -1,1179 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A Folder represents an HIR->HIR fold; it accepts a HIR piece,
-//! and returns a piece of the same type.
-
-use hir::*;
-use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, Attribute, Attribute_, MetaItem};
-use syntax::ast::MetaItemKind;
-use syntax::attr::ThinAttributesExt;
-use hir;
-use syntax::codemap::{respan, Span, Spanned};
-use syntax::ptr::P;
-use syntax::parse::token;
-use syntax::util::move_map::MoveMap;
-
-pub trait Folder : Sized {
-    // Any additions to this trait should happen in form
-    // of a call to a public `noop_*` function that only calls
-    // out to the folder again, not other `noop_*` functions.
-    //
-    // This is a necessary API workaround to the problem of not
-    // being able to call out to the super default method
-    // in an overridden default method.
-
-    fn fold_crate(&mut self, c: Crate) -> Crate {
-        noop_fold_crate(c, self)
-    }
-
-    fn fold_meta_items(&mut self, meta_items: HirVec<P<MetaItem>>) -> HirVec<P<MetaItem>> {
-        noop_fold_meta_items(meta_items, self)
-    }
-
-    fn fold_meta_item(&mut self, meta_item: P<MetaItem>) -> P<MetaItem> {
-        noop_fold_meta_item(meta_item, self)
-    }
-
-    fn fold_view_path(&mut self, view_path: P<ViewPath>) -> P<ViewPath> {
-        noop_fold_view_path(view_path, self)
-    }
-
-    fn fold_foreign_item(&mut self, ni: ForeignItem) -> ForeignItem {
-        noop_fold_foreign_item(ni, self)
-    }
-
-    fn fold_item(&mut self, i: Item) -> Item {
-        noop_fold_item(i, self)
-    }
-
-    fn fold_item_id(&mut self, i: ItemId) -> ItemId {
-        noop_fold_item_id(i, self)
-    }
-
-    fn fold_struct_field(&mut self, sf: StructField) -> StructField {
-        noop_fold_struct_field(sf, self)
-    }
-
-    fn fold_item_underscore(&mut self, i: Item_) -> Item_ {
-        noop_fold_item_underscore(i, self)
-    }
-
-    fn fold_trait_item(&mut self, i: TraitItem) -> TraitItem {
-        noop_fold_trait_item(i, self)
-    }
-
-    fn fold_impl_item(&mut self, i: ImplItem) -> ImplItem {
-        noop_fold_impl_item(i, self)
-    }
-
-    fn fold_fn_decl(&mut self, d: P<FnDecl>) -> P<FnDecl> {
-        noop_fold_fn_decl(d, self)
-    }
-
-    fn fold_block(&mut self, b: P<Block>) -> P<Block> {
-        noop_fold_block(b, self)
-    }
-
-    fn fold_stmt(&mut self, s: Stmt) -> Stmt {
-        noop_fold_stmt(s, self)
-    }
-
-    fn fold_arm(&mut self, a: Arm) -> Arm {
-        noop_fold_arm(a, self)
-    }
-
-    fn fold_pat(&mut self, p: P<Pat>) -> P<Pat> {
-        noop_fold_pat(p, self)
-    }
-
-    fn fold_decl(&mut self, d: P<Decl>) -> P<Decl> {
-        noop_fold_decl(d, self)
-    }
-
-    fn fold_expr(&mut self, e: P<Expr>) -> P<Expr> {
-        e.map(|e| noop_fold_expr(e, self))
-    }
-
-    fn fold_ty(&mut self, t: P<Ty>) -> P<Ty> {
-        noop_fold_ty(t, self)
-    }
-
-    fn fold_ty_binding(&mut self, t: TypeBinding) -> TypeBinding {
-        noop_fold_ty_binding(t, self)
-    }
-
-    fn fold_mod(&mut self, m: Mod) -> Mod {
-        noop_fold_mod(m, self)
-    }
-
-    fn fold_foreign_mod(&mut self, nm: ForeignMod) -> ForeignMod {
-        noop_fold_foreign_mod(nm, self)
-    }
-
-    fn fold_variant(&mut self, v: Variant) -> Variant {
-        noop_fold_variant(v, self)
-    }
-
-    fn fold_name(&mut self, n: Name) -> Name {
-        noop_fold_name(n, self)
-    }
-
-    fn fold_ident(&mut self, i: Ident) -> Ident {
-        noop_fold_ident(i, self)
-    }
-
-    fn fold_usize(&mut self, i: usize) -> usize {
-        noop_fold_usize(i, self)
-    }
-
-    fn fold_path(&mut self, p: Path) -> Path {
-        noop_fold_path(p, self)
-    }
-
-    fn fold_path_parameters(&mut self, p: PathParameters) -> PathParameters {
-        noop_fold_path_parameters(p, self)
-    }
-
-    fn fold_angle_bracketed_parameter_data(&mut self,
-                                           p: AngleBracketedParameterData)
-                                           -> AngleBracketedParameterData {
-        noop_fold_angle_bracketed_parameter_data(p, self)
-    }
-
-    fn fold_parenthesized_parameter_data(&mut self,
-                                         p: ParenthesizedParameterData)
-                                         -> ParenthesizedParameterData {
-        noop_fold_parenthesized_parameter_data(p, self)
-    }
-
-    fn fold_local(&mut self, l: P<Local>) -> P<Local> {
-        noop_fold_local(l, self)
-    }
-
-    fn fold_explicit_self(&mut self, es: ExplicitSelf) -> ExplicitSelf {
-        noop_fold_explicit_self(es, self)
-    }
-
-    fn fold_explicit_self_underscore(&mut self, es: ExplicitSelf_) -> ExplicitSelf_ {
-        noop_fold_explicit_self_underscore(es, self)
-    }
-
-    fn fold_lifetime(&mut self, l: Lifetime) -> Lifetime {
-        noop_fold_lifetime(l, self)
-    }
-
-    fn fold_lifetime_def(&mut self, l: LifetimeDef) -> LifetimeDef {
-        noop_fold_lifetime_def(l, self)
-    }
-
-    fn fold_attribute(&mut self, at: Attribute) -> Option<Attribute> {
-        noop_fold_attribute(at, self)
-    }
-
-    fn fold_arg(&mut self, a: Arg) -> Arg {
-        noop_fold_arg(a, self)
-    }
-
-    fn fold_generics(&mut self, generics: Generics) -> Generics {
-        noop_fold_generics(generics, self)
-    }
-
-    fn fold_trait_ref(&mut self, p: TraitRef) -> TraitRef {
-        noop_fold_trait_ref(p, self)
-    }
-
-    fn fold_poly_trait_ref(&mut self, p: PolyTraitRef) -> PolyTraitRef {
-        noop_fold_poly_trait_ref(p, self)
-    }
-
-    fn fold_variant_data(&mut self, vdata: VariantData) -> VariantData {
-        noop_fold_variant_data(vdata, self)
-    }
-
-    fn fold_lifetimes(&mut self, lts: HirVec<Lifetime>) -> HirVec<Lifetime> {
-        noop_fold_lifetimes(lts, self)
-    }
-
-    fn fold_lifetime_defs(&mut self, lts: HirVec<LifetimeDef>) -> HirVec<LifetimeDef> {
-        noop_fold_lifetime_defs(lts, self)
-    }
-
-    fn fold_ty_param(&mut self, tp: TyParam) -> TyParam {
-        noop_fold_ty_param(tp, self)
-    }
-
-    fn fold_ty_params(&mut self, tps: HirVec<TyParam>) -> HirVec<TyParam> {
-        noop_fold_ty_params(tps, self)
-    }
-
-    fn fold_opt_lifetime(&mut self, o_lt: Option<Lifetime>) -> Option<Lifetime> {
-        noop_fold_opt_lifetime(o_lt, self)
-    }
-
-    fn fold_opt_bounds(&mut self,
-                       b: Option<TyParamBounds>)
-                       -> Option<TyParamBounds> {
-        noop_fold_opt_bounds(b, self)
-    }
-
-    fn fold_bounds(&mut self, b: TyParamBounds) -> TyParamBounds {
-        noop_fold_bounds(b, self)
-    }
-
-    fn fold_ty_param_bound(&mut self, tpb: TyParamBound) -> TyParamBound {
-        noop_fold_ty_param_bound(tpb, self)
-    }
-
-    fn fold_mt(&mut self, mt: MutTy) -> MutTy {
-        noop_fold_mt(mt, self)
-    }
-
-    fn fold_field(&mut self, field: Field) -> Field {
-        noop_fold_field(field, self)
-    }
-
-    fn fold_where_clause(&mut self, where_clause: WhereClause) -> WhereClause {
-        noop_fold_where_clause(where_clause, self)
-    }
-
-    fn fold_where_predicate(&mut self, where_predicate: WherePredicate) -> WherePredicate {
-        noop_fold_where_predicate(where_predicate, self)
-    }
-
-    /// called for the `id` on each declaration
-    fn new_id(&mut self, i: NodeId) -> NodeId {
-        i
-    }
-
-    /// called for ids that are references (e.g., ItemDef)
-    fn map_id(&mut self, i: NodeId) -> NodeId {
-        i
-    }
-
-    fn new_span(&mut self, sp: Span) -> Span {
-        sp
-    }
-}
-
-pub fn noop_fold_meta_items<T: Folder>(meta_items: HirVec<P<MetaItem>>,
-                                       fld: &mut T)
-                                       -> HirVec<P<MetaItem>> {
-    meta_items.move_map(|x| fld.fold_meta_item(x))
-}
-
-pub fn noop_fold_view_path<T: Folder>(view_path: P<ViewPath>, fld: &mut T) -> P<ViewPath> {
-    view_path.map(|Spanned { node, span }| {
-        Spanned {
-            node: match node {
-                ViewPathSimple(name, path) => {
-                    ViewPathSimple(name, fld.fold_path(path))
-                }
-                ViewPathGlob(path) => {
-                    ViewPathGlob(fld.fold_path(path))
-                }
-                ViewPathList(path, path_list_idents) => {
-                    ViewPathList(fld.fold_path(path),
-                                 path_list_idents.move_map(|path_list_ident| {
-                                     Spanned {
-                                         node: match path_list_ident.node {
-                                             PathListIdent { id, name, rename } => PathListIdent {
-                                                 id: fld.new_id(id),
-                                                 name: name,
-                                                 rename: rename,
-                                             },
-                                             PathListMod { id, rename } => PathListMod {
-                                                 id: fld.new_id(id),
-                                                 rename: rename,
-                                             },
-                                         },
-                                         span: fld.new_span(path_list_ident.span),
-                                     }
-                                 }))
-                }
-            },
-            span: fld.new_span(span),
-        }
-    })
-}
-
-pub fn fold_attrs<T: Folder>(attrs: HirVec<Attribute>, fld: &mut T) -> HirVec<Attribute> {
-    attrs.move_flat_map(|x| fld.fold_attribute(x))
-}
-
-pub fn noop_fold_arm<T: Folder>(Arm { attrs, pats, guard, body }: Arm, fld: &mut T) -> Arm {
-    Arm {
-        attrs: fold_attrs(attrs, fld),
-        pats: pats.move_map(|x| fld.fold_pat(x)),
-        guard: guard.map(|x| fld.fold_expr(x)),
-        body: fld.fold_expr(body),
-    }
-}
-
-pub fn noop_fold_decl<T: Folder>(d: P<Decl>, fld: &mut T) -> P<Decl> {
-    d.map(|Spanned { node, span }| {
-        match node {
-            DeclLocal(l) => Spanned {
-                node: DeclLocal(fld.fold_local(l)),
-                span: fld.new_span(span),
-            },
-            DeclItem(it) => Spanned {
-                node: DeclItem(fld.fold_item_id(it)),
-                span: fld.new_span(span),
-            },
-        }
-    })
-}
-
-pub fn noop_fold_ty_binding<T: Folder>(b: TypeBinding, fld: &mut T) -> TypeBinding {
-    TypeBinding {
-        id: fld.new_id(b.id),
-        name: b.name,
-        ty: fld.fold_ty(b.ty),
-        span: fld.new_span(b.span),
-    }
-}
-
-pub fn noop_fold_ty<T: Folder>(t: P<Ty>, fld: &mut T) -> P<Ty> {
-    t.map(|Ty { id, node, span }| {
-        Ty {
-            id: fld.new_id(id),
-            node: match node {
-                TyInfer => node,
-                TyVec(ty) => TyVec(fld.fold_ty(ty)),
-                TyPtr(mt) => TyPtr(fld.fold_mt(mt)),
-                TyRptr(region, mt) => {
-                    TyRptr(fld.fold_opt_lifetime(region), fld.fold_mt(mt))
-                }
-                TyBareFn(f) => {
-                    TyBareFn(f.map(|BareFnTy { lifetimes, unsafety, abi, decl }| {
-                        BareFnTy {
-                            lifetimes: fld.fold_lifetime_defs(lifetimes),
-                            unsafety: unsafety,
-                            abi: abi,
-                            decl: fld.fold_fn_decl(decl),
-                        }
-                    }))
-                }
-                TyTup(tys) => TyTup(tys.move_map(|ty| fld.fold_ty(ty))),
-                TyPath(qself, path) => {
-                    let qself = qself.map(|QSelf { ty, position }| {
-                        QSelf {
-                            ty: fld.fold_ty(ty),
-                            position: position,
-                        }
-                    });
-                    TyPath(qself, fld.fold_path(path))
-                }
-                TyObjectSum(ty, bounds) => {
-                    TyObjectSum(fld.fold_ty(ty), fld.fold_bounds(bounds))
-                }
-                TyFixedLengthVec(ty, e) => {
-                    TyFixedLengthVec(fld.fold_ty(ty), fld.fold_expr(e))
-                }
-                TyTypeof(expr) => {
-                    TyTypeof(fld.fold_expr(expr))
-                }
-                TyPolyTraitRef(bounds) => {
-                    TyPolyTraitRef(bounds.move_map(|b| fld.fold_ty_param_bound(b)))
-                }
-            },
-            span: fld.new_span(span),
-        }
-    })
-}
-
-pub fn noop_fold_foreign_mod<T: Folder>(ForeignMod { abi, items }: ForeignMod,
-                                        fld: &mut T)
-                                        -> ForeignMod {
-    ForeignMod {
-        abi: abi,
-        items: items.move_map(|x| fld.fold_foreign_item(x)),
-    }
-}
-
-pub fn noop_fold_variant<T: Folder>(v: Variant, fld: &mut T) -> Variant {
-    Spanned {
-        node: Variant_ {
-            name: v.node.name,
-            attrs: fold_attrs(v.node.attrs, fld),
-            data: fld.fold_variant_data(v.node.data),
-            disr_expr: v.node.disr_expr.map(|e| fld.fold_expr(e)),
-        },
-        span: fld.new_span(v.span),
-    }
-}
-
-pub fn noop_fold_name<T: Folder>(n: Name, _: &mut T) -> Name {
-    n
-}
-
-pub fn noop_fold_ident<T: Folder>(i: Ident, _: &mut T) -> Ident {
-    i
-}
-
-pub fn noop_fold_usize<T: Folder>(i: usize, _: &mut T) -> usize {
-    i
-}
-
-pub fn noop_fold_path<T: Folder>(Path { global, segments, span }: Path, fld: &mut T) -> Path {
-    Path {
-        global: global,
-        segments: segments.move_map(|PathSegment { identifier, parameters }| {
-            PathSegment {
-                identifier: fld.fold_ident(identifier),
-                parameters: fld.fold_path_parameters(parameters),
-            }
-        }),
-        span: fld.new_span(span),
-    }
-}
-
-pub fn noop_fold_path_parameters<T: Folder>(path_parameters: PathParameters,
-                                            fld: &mut T)
-                                            -> PathParameters {
-    match path_parameters {
-        AngleBracketedParameters(data) =>
-            AngleBracketedParameters(fld.fold_angle_bracketed_parameter_data(data)),
-        ParenthesizedParameters(data) =>
-            ParenthesizedParameters(fld.fold_parenthesized_parameter_data(data)),
-    }
-}
-
-pub fn noop_fold_angle_bracketed_parameter_data<T: Folder>(data: AngleBracketedParameterData,
-                                                           fld: &mut T)
-                                                           -> AngleBracketedParameterData {
-    let AngleBracketedParameterData { lifetimes, types, bindings } = data;
-    AngleBracketedParameterData {
-        lifetimes: fld.fold_lifetimes(lifetimes),
-        types: types.move_map(|ty| fld.fold_ty(ty)),
-        bindings: bindings.move_map(|b| fld.fold_ty_binding(b)),
-    }
-}
-
-pub fn noop_fold_parenthesized_parameter_data<T: Folder>(data: ParenthesizedParameterData,
-                                                         fld: &mut T)
-                                                         -> ParenthesizedParameterData {
-    let ParenthesizedParameterData { inputs, output, span } = data;
-    ParenthesizedParameterData {
-        inputs: inputs.move_map(|ty| fld.fold_ty(ty)),
-        output: output.map(|ty| fld.fold_ty(ty)),
-        span: fld.new_span(span),
-    }
-}
-
-pub fn noop_fold_local<T: Folder>(l: P<Local>, fld: &mut T) -> P<Local> {
-    l.map(|Local { id, pat, ty, init, span, attrs }| {
-        Local {
-            id: fld.new_id(id),
-            ty: ty.map(|t| fld.fold_ty(t)),
-            pat: fld.fold_pat(pat),
-            init: init.map(|e| fld.fold_expr(e)),
-            span: fld.new_span(span),
-            attrs: attrs.map_thin_attrs(|attrs| fold_attrs(attrs.into(), fld).into()),
-        }
-    })
-}
-
-pub fn noop_fold_attribute<T: Folder>(at: Attribute, fld: &mut T) -> Option<Attribute> {
-    let Spanned {node: Attribute_ {id, style, value, is_sugared_doc}, span} = at;
-    Some(Spanned {
-        node: Attribute_ {
-            id: id,
-            style: style,
-            value: fld.fold_meta_item(value),
-            is_sugared_doc: is_sugared_doc,
-        },
-        span: fld.new_span(span),
-    })
-}
-
-pub fn noop_fold_explicit_self_underscore<T: Folder>(es: ExplicitSelf_,
-                                                     fld: &mut T)
-                                                     -> ExplicitSelf_ {
-    match es {
-        SelfStatic | SelfValue(_) => es,
-        SelfRegion(lifetime, m, name) => {
-            SelfRegion(fld.fold_opt_lifetime(lifetime), m, name)
-        }
-        SelfExplicit(typ, name) => {
-            SelfExplicit(fld.fold_ty(typ), name)
-        }
-    }
-}
-
-pub fn noop_fold_explicit_self<T: Folder>(Spanned { span, node }: ExplicitSelf,
-                                          fld: &mut T)
-                                          -> ExplicitSelf {
-    Spanned {
-        node: fld.fold_explicit_self_underscore(node),
-        span: fld.new_span(span),
-    }
-}
-
-pub fn noop_fold_meta_item<T: Folder>(mi: P<MetaItem>, fld: &mut T) -> P<MetaItem> {
-    mi.map(|Spanned { node, span }| {
-        Spanned {
-            node: match node {
-                MetaItemKind::Word(id) => MetaItemKind::Word(id),
-                MetaItemKind::List(id, mis) => {
-                    MetaItemKind::List(id, mis.move_map(|e| fld.fold_meta_item(e)))
-                }
-                MetaItemKind::NameValue(id, s) => MetaItemKind::NameValue(id, s),
-            },
-            span: fld.new_span(span),
-        }
-    })
-}
-
-pub fn noop_fold_arg<T: Folder>(Arg { id, pat, ty }: Arg, fld: &mut T) -> Arg {
-    Arg {
-        id: fld.new_id(id),
-        pat: fld.fold_pat(pat),
-        ty: fld.fold_ty(ty),
-    }
-}
-
-pub fn noop_fold_fn_decl<T: Folder>(decl: P<FnDecl>, fld: &mut T) -> P<FnDecl> {
-    decl.map(|FnDecl { inputs, output, variadic }| {
-        FnDecl {
-            inputs: inputs.move_map(|x| fld.fold_arg(x)),
-            output: match output {
-                Return(ty) => Return(fld.fold_ty(ty)),
-                DefaultReturn(span) => DefaultReturn(span),
-                NoReturn(span) => NoReturn(span),
-            },
-            variadic: variadic,
-        }
-    })
-}
-
-pub fn noop_fold_ty_param_bound<T>(tpb: TyParamBound, fld: &mut T) -> TyParamBound
-    where T: Folder
-{
-    match tpb {
-        TraitTyParamBound(ty, modifier) => TraitTyParamBound(fld.fold_poly_trait_ref(ty), modifier),
-        RegionTyParamBound(lifetime) => RegionTyParamBound(fld.fold_lifetime(lifetime)),
-    }
-}
-
-pub fn noop_fold_ty_param<T: Folder>(tp: TyParam, fld: &mut T) -> TyParam {
-    let TyParam {id, name, bounds, default, span} = tp;
-    TyParam {
-        id: fld.new_id(id),
-        name: name,
-        bounds: fld.fold_bounds(bounds),
-        default: default.map(|x| fld.fold_ty(x)),
-        span: span,
-    }
-}
-
-pub fn noop_fold_ty_params<T: Folder>(tps: HirVec<TyParam>,
-                                      fld: &mut T)
-                                      -> HirVec<TyParam> {
-    tps.move_map(|tp| fld.fold_ty_param(tp))
-}
-
-pub fn noop_fold_lifetime<T: Folder>(l: Lifetime, fld: &mut T) -> Lifetime {
-    Lifetime {
-        id: fld.new_id(l.id),
-        name: l.name,
-        span: fld.new_span(l.span),
-    }
-}
-
-pub fn noop_fold_lifetime_def<T: Folder>(l: LifetimeDef, fld: &mut T) -> LifetimeDef {
-    LifetimeDef {
-        lifetime: fld.fold_lifetime(l.lifetime),
-        bounds: fld.fold_lifetimes(l.bounds),
-    }
-}
-
-pub fn noop_fold_lifetimes<T: Folder>(lts: HirVec<Lifetime>, fld: &mut T) -> HirVec<Lifetime> {
-    lts.move_map(|l| fld.fold_lifetime(l))
-}
-
-pub fn noop_fold_lifetime_defs<T: Folder>(lts: HirVec<LifetimeDef>,
-                                          fld: &mut T)
-                                          -> HirVec<LifetimeDef> {
-    lts.move_map(|l| fld.fold_lifetime_def(l))
-}
-
-pub fn noop_fold_opt_lifetime<T: Folder>(o_lt: Option<Lifetime>, fld: &mut T) -> Option<Lifetime> {
-    o_lt.map(|lt| fld.fold_lifetime(lt))
-}
-
-pub fn noop_fold_generics<T: Folder>(Generics { ty_params, lifetimes, where_clause }: Generics,
-                                     fld: &mut T)
-                                     -> Generics {
-    Generics {
-        ty_params: fld.fold_ty_params(ty_params),
-        lifetimes: fld.fold_lifetime_defs(lifetimes),
-        where_clause: fld.fold_where_clause(where_clause),
-    }
-}
-
-pub fn noop_fold_where_clause<T: Folder>(WhereClause { id, predicates }: WhereClause,
-                                         fld: &mut T)
-                                         -> WhereClause {
-    WhereClause {
-        id: fld.new_id(id),
-        predicates: predicates.move_map(|predicate| fld.fold_where_predicate(predicate)),
-    }
-}
-
-pub fn noop_fold_where_predicate<T: Folder>(pred: WherePredicate, fld: &mut T) -> WherePredicate {
-    match pred {
-        hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{bound_lifetimes,
-                                                                     bounded_ty,
-                                                                     bounds,
-                                                                     span}) => {
-            hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
-                bound_lifetimes: fld.fold_lifetime_defs(bound_lifetimes),
-                bounded_ty: fld.fold_ty(bounded_ty),
-                bounds: bounds.move_map(|x| fld.fold_ty_param_bound(x)),
-                span: fld.new_span(span),
-            })
-        }
-        hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{lifetime,
-                                                                       bounds,
-                                                                       span}) => {
-            hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
-                span: fld.new_span(span),
-                lifetime: fld.fold_lifetime(lifetime),
-                bounds: bounds.move_map(|bound| fld.fold_lifetime(bound)),
-            })
-        }
-        hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{id,
-                                                               path,
-                                                               ty,
-                                                               span}) => {
-            hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
-                id: fld.new_id(id),
-                path: fld.fold_path(path),
-                ty: fld.fold_ty(ty),
-                span: fld.new_span(span),
-            })
-        }
-    }
-}
-
-pub fn noop_fold_variant_data<T: Folder>(vdata: VariantData, fld: &mut T) -> VariantData {
-    match vdata {
-        VariantData::Struct(fields, id) => {
-            VariantData::Struct(fields.move_map(|f| fld.fold_struct_field(f)),
-                                fld.new_id(id))
-        }
-        VariantData::Tuple(fields, id) => {
-            VariantData::Tuple(fields.move_map(|f| fld.fold_struct_field(f)),
-                               fld.new_id(id))
-        }
-        VariantData::Unit(id) => VariantData::Unit(fld.new_id(id)),
-    }
-}
-
-pub fn noop_fold_trait_ref<T: Folder>(p: TraitRef, fld: &mut T) -> TraitRef {
-    let id = fld.new_id(p.ref_id);
-    let TraitRef {
-        path,
-        ref_id: _,
-    } = p;
-    hir::TraitRef {
-        path: fld.fold_path(path),
-        ref_id: id,
-    }
-}
-
-pub fn noop_fold_poly_trait_ref<T: Folder>(p: PolyTraitRef, fld: &mut T) -> PolyTraitRef {
-    hir::PolyTraitRef {
-        bound_lifetimes: fld.fold_lifetime_defs(p.bound_lifetimes),
-        trait_ref: fld.fold_trait_ref(p.trait_ref),
-        span: fld.new_span(p.span),
-    }
-}
-
-pub fn noop_fold_struct_field<T: Folder>(f: StructField, fld: &mut T) -> StructField {
-    let StructField {node: StructField_ {id, kind, ty, attrs}, span} = f;
-    Spanned {
-        node: StructField_ {
-            id: fld.new_id(id),
-            kind: kind,
-            ty: fld.fold_ty(ty),
-            attrs: fold_attrs(attrs, fld),
-        },
-        span: fld.new_span(span),
-    }
-}
-
-pub fn noop_fold_field<T: Folder>(Field { name, expr, span }: Field, folder: &mut T) -> Field {
-    Field {
-        name: respan(folder.new_span(name.span), folder.fold_name(name.node)),
-        expr: folder.fold_expr(expr),
-        span: folder.new_span(span),
-    }
-}
-
-pub fn noop_fold_mt<T: Folder>(MutTy { ty, mutbl }: MutTy, folder: &mut T) -> MutTy {
-    MutTy {
-        ty: folder.fold_ty(ty),
-        mutbl: mutbl,
-    }
-}
-
-pub fn noop_fold_opt_bounds<T: Folder>(b: Option<TyParamBounds>,
-                                       folder: &mut T)
-                                       -> Option<TyParamBounds> {
-    b.map(|bounds| folder.fold_bounds(bounds))
-}
-
-fn noop_fold_bounds<T: Folder>(bounds: TyParamBounds, folder: &mut T) -> TyParamBounds {
-    bounds.move_map(|bound| folder.fold_ty_param_bound(bound))
-}
-
-pub fn noop_fold_block<T: Folder>(b: P<Block>, folder: &mut T) -> P<Block> {
-    b.map(|Block { id, stmts, expr, rules, span }| {
-        Block {
-            id: folder.new_id(id),
-            stmts: stmts.move_map(|s| folder.fold_stmt(s)),
-            expr: expr.map(|x| folder.fold_expr(x)),
-            rules: rules,
-            span: folder.new_span(span),
-        }
-    })
-}
-
-pub fn noop_fold_item_underscore<T: Folder>(i: Item_, folder: &mut T) -> Item_ {
-    match i {
-        ItemExternCrate(string) => ItemExternCrate(string),
-        ItemUse(view_path) => {
-            ItemUse(folder.fold_view_path(view_path))
-        }
-        ItemStatic(t, m, e) => {
-            ItemStatic(folder.fold_ty(t), m, folder.fold_expr(e))
-        }
-        ItemConst(t, e) => {
-            ItemConst(folder.fold_ty(t), folder.fold_expr(e))
-        }
-        ItemFn(decl, unsafety, constness, abi, generics, body) => {
-            ItemFn(folder.fold_fn_decl(decl),
-                   unsafety,
-                   constness,
-                   abi,
-                   folder.fold_generics(generics),
-                   folder.fold_block(body))
-        }
-        ItemMod(m) => ItemMod(folder.fold_mod(m)),
-        ItemForeignMod(nm) => ItemForeignMod(folder.fold_foreign_mod(nm)),
-        ItemTy(t, generics) => {
-            ItemTy(folder.fold_ty(t), folder.fold_generics(generics))
-        }
-        ItemEnum(enum_definition, generics) => {
-            ItemEnum(hir::EnumDef {
-                         variants: enum_definition.variants.move_map(|x| folder.fold_variant(x)),
-                     },
-                     folder.fold_generics(generics))
-        }
-        ItemStruct(struct_def, generics) => {
-            let struct_def = folder.fold_variant_data(struct_def);
-            ItemStruct(struct_def, folder.fold_generics(generics))
-        }
-        ItemDefaultImpl(unsafety, ref trait_ref) => {
-            ItemDefaultImpl(unsafety, folder.fold_trait_ref((*trait_ref).clone()))
-        }
-        ItemImpl(unsafety, polarity, generics, ifce, ty, impl_items) => {
-            let new_impl_items = impl_items
-                .move_map(|item| folder.fold_impl_item(item));
-            let ifce = match ifce {
-                None => None,
-                Some(ref trait_ref) => {
-                    Some(folder.fold_trait_ref((*trait_ref).clone()))
-                }
-            };
-            ItemImpl(unsafety,
-                     polarity,
-                     folder.fold_generics(generics),
-                     ifce,
-                     folder.fold_ty(ty),
-                     new_impl_items)
-        }
-        ItemTrait(unsafety, generics, bounds, items) => {
-            let bounds = folder.fold_bounds(bounds);
-            let items = items.move_map(|item| folder.fold_trait_item(item));
-            ItemTrait(unsafety, folder.fold_generics(generics), bounds, items)
-        }
-    }
-}
-
-pub fn noop_fold_trait_item<T: Folder>(i: TraitItem,
-                                       folder: &mut T)
-                                       -> TraitItem {
-    TraitItem {
-        id: folder.new_id(i.id),
-        name: folder.fold_name(i.name),
-        attrs: fold_attrs(i.attrs, folder),
-        node: match i.node {
-            ConstTraitItem(ty, default) => {
-                ConstTraitItem(folder.fold_ty(ty), default.map(|x| folder.fold_expr(x)))
-            }
-            MethodTraitItem(sig, body) => {
-                MethodTraitItem(noop_fold_method_sig(sig, folder),
-                                body.map(|x| folder.fold_block(x)))
-            }
-            TypeTraitItem(bounds, default) => {
-                TypeTraitItem(folder.fold_bounds(bounds),
-                              default.map(|x| folder.fold_ty(x)))
-            }
-        },
-        span: folder.new_span(i.span),
-    }
-}
-
-pub fn noop_fold_impl_item<T: Folder>(i: ImplItem, folder: &mut T) -> ImplItem {
-    ImplItem {
-        id: folder.new_id(i.id),
-        name: folder.fold_name(i.name),
-        attrs: fold_attrs(i.attrs, folder),
-        vis: i.vis,
-        node: match i.node {
-            ImplItemKind::Const(ty, expr) => {
-                ImplItemKind::Const(folder.fold_ty(ty), folder.fold_expr(expr))
-            }
-            ImplItemKind::Method(sig, body) => {
-                ImplItemKind::Method(noop_fold_method_sig(sig, folder), folder.fold_block(body))
-            }
-            ImplItemKind::Type(ty) => ImplItemKind::Type(folder.fold_ty(ty)),
-        },
-        span: folder.new_span(i.span),
-    }
-}
-
-pub fn noop_fold_mod<T: Folder>(Mod { inner, item_ids }: Mod, folder: &mut T) -> Mod {
-    Mod {
-        inner: folder.new_span(inner),
-        item_ids: item_ids.move_map(|x| folder.fold_item_id(x)),
-    }
-}
-
-pub fn noop_fold_crate<T: Folder>(Crate { module, attrs, config, span,
-                                          exported_macros, items }: Crate,
-                                  folder: &mut T)
-                                  -> Crate {
-    let config = folder.fold_meta_items(config);
-
-    let crate_mod = folder.fold_item(hir::Item {
-        name: token::special_idents::invalid.name,
-        attrs: attrs,
-        id: DUMMY_NODE_ID,
-        vis: hir::Public,
-        span: span,
-        node: hir::ItemMod(module),
-    });
-
-    let (module, attrs, span) = match crate_mod {
-        hir::Item { attrs, span, node, .. } => {
-            match node {
-                hir::ItemMod(m) => (m, attrs, span),
-                _ => panic!("fold converted a module to not a module"),
-            }
-        }
-    };
-
-    let items = items.into_iter()
-                     .map(|(id, item)| (id, folder.fold_item(item)))
-                     .collect();
-
-    Crate {
-        module: module,
-        attrs: attrs,
-        config: config,
-        span: span,
-        exported_macros: exported_macros,
-        items: items,
-    }
-}
-
-pub fn noop_fold_item_id<T: Folder>(i: ItemId, folder: &mut T) -> ItemId {
-    let id = folder.map_id(i.id);
-    ItemId { id: id }
-}
-
-// fold one item into one item
-pub fn noop_fold_item<T: Folder>(item: Item, folder: &mut T) -> Item {
-    let Item { id, name, attrs, node, vis, span } = item;
-    let id = folder.new_id(id);
-    let node = folder.fold_item_underscore(node);
-    // FIXME: we should update the impl_pretty_name, but it uses pretty printing.
-    // let ident = match node {
-    //     // The node may have changed, recompute the "pretty" impl name.
-    //     ItemImpl(_, _, _, ref maybe_trait, ref ty, _) => {
-    //         impl_pretty_name(maybe_trait, Some(&**ty))
-    //     }
-    //     _ => ident
-    // };
-
-    Item {
-        id: id,
-        name: folder.fold_name(name),
-        attrs: fold_attrs(attrs, folder),
-        node: node,
-        vis: vis,
-        span: folder.new_span(span),
-    }
-}
-
-pub fn noop_fold_foreign_item<T: Folder>(ni: ForeignItem, folder: &mut T) -> ForeignItem {
-    ForeignItem {
-        id: folder.new_id(ni.id),
-        name: folder.fold_name(ni.name),
-        attrs: fold_attrs(ni.attrs, folder),
-        node: match ni.node {
-            ForeignItemFn(fdec, generics) => {
-                ForeignItemFn(folder.fold_fn_decl(fdec), folder.fold_generics(generics))
-            }
-            ForeignItemStatic(t, m) => {
-                ForeignItemStatic(folder.fold_ty(t), m)
-            }
-        },
-        vis: ni.vis,
-        span: folder.new_span(ni.span),
-    }
-}
-
-pub fn noop_fold_method_sig<T: Folder>(sig: MethodSig, folder: &mut T) -> MethodSig {
-    MethodSig {
-        generics: folder.fold_generics(sig.generics),
-        abi: sig.abi,
-        explicit_self: folder.fold_explicit_self(sig.explicit_self),
-        unsafety: sig.unsafety,
-        constness: sig.constness,
-        decl: folder.fold_fn_decl(sig.decl),
-    }
-}
-
-pub fn noop_fold_pat<T: Folder>(p: P<Pat>, folder: &mut T) -> P<Pat> {
-    p.map(|Pat { id, node, span }| {
-        Pat {
-            id: folder.new_id(id),
-            node: match node {
-                PatKind::Wild => PatKind::Wild,
-                PatKind::Ident(binding_mode, pth1, sub) => {
-                    PatKind::Ident(binding_mode,
-                             Spanned {
-                                 span: folder.new_span(pth1.span),
-                                 node: folder.fold_ident(pth1.node),
-                             },
-                             sub.map(|x| folder.fold_pat(x)))
-                }
-                PatKind::Lit(e) => PatKind::Lit(folder.fold_expr(e)),
-                PatKind::TupleStruct(pth, pats) => {
-                    PatKind::TupleStruct(folder.fold_path(pth),
-                            pats.map(|pats| pats.move_map(|x| folder.fold_pat(x))))
-                }
-                PatKind::Path(pth) => {
-                    PatKind::Path(folder.fold_path(pth))
-                }
-                PatKind::QPath(qself, pth) => {
-                    let qself = QSelf { ty: folder.fold_ty(qself.ty), ..qself };
-                    PatKind::QPath(qself, folder.fold_path(pth))
-                }
-                PatKind::Struct(pth, fields, etc) => {
-                    let pth = folder.fold_path(pth);
-                    let fs = fields.move_map(|f| {
-                        Spanned {
-                            span: folder.new_span(f.span),
-                            node: hir::FieldPat {
-                                name: f.node.name,
-                                pat: folder.fold_pat(f.node.pat),
-                                is_shorthand: f.node.is_shorthand,
-                            },
-                        }
-                    });
-                    PatKind::Struct(pth, fs, etc)
-                }
-                PatKind::Tup(elts) => PatKind::Tup(elts.move_map(|x| folder.fold_pat(x))),
-                PatKind::Box(inner) => PatKind::Box(folder.fold_pat(inner)),
-                PatKind::Ref(inner, mutbl) => PatKind::Ref(folder.fold_pat(inner), mutbl),
-                PatKind::Range(e1, e2) => {
-                    PatKind::Range(folder.fold_expr(e1), folder.fold_expr(e2))
-                }
-                PatKind::Vec(before, slice, after) => {
-                    PatKind::Vec(before.move_map(|x| folder.fold_pat(x)),
-                           slice.map(|x| folder.fold_pat(x)),
-                           after.move_map(|x| folder.fold_pat(x)))
-                }
-            },
-            span: folder.new_span(span),
-        }
-    })
-}
-
-pub fn noop_fold_expr<T: Folder>(Expr { id, node, span, attrs }: Expr, folder: &mut T) -> Expr {
-    Expr {
-        id: folder.new_id(id),
-        node: match node {
-            ExprBox(e) => {
-                ExprBox(folder.fold_expr(e))
-            }
-            ExprVec(exprs) => {
-                ExprVec(exprs.move_map(|x| folder.fold_expr(x)))
-            }
-            ExprRepeat(expr, count) => {
-                ExprRepeat(folder.fold_expr(expr), folder.fold_expr(count))
-            }
-            ExprTup(elts) => ExprTup(elts.move_map(|x| folder.fold_expr(x))),
-            ExprCall(f, args) => {
-                ExprCall(folder.fold_expr(f), args.move_map(|x| folder.fold_expr(x)))
-            }
-            ExprMethodCall(name, tps, args) => {
-                ExprMethodCall(respan(folder.new_span(name.span), folder.fold_name(name.node)),
-                               tps.move_map(|x| folder.fold_ty(x)),
-                               args.move_map(|x| folder.fold_expr(x)))
-            }
-            ExprBinary(binop, lhs, rhs) => {
-                ExprBinary(binop, folder.fold_expr(lhs), folder.fold_expr(rhs))
-            }
-            ExprUnary(binop, ohs) => {
-                ExprUnary(binop, folder.fold_expr(ohs))
-            }
-            ExprLit(l) => ExprLit(l),
-            ExprCast(expr, ty) => {
-                ExprCast(folder.fold_expr(expr), folder.fold_ty(ty))
-            }
-            ExprType(expr, ty) => {
-                ExprType(folder.fold_expr(expr), folder.fold_ty(ty))
-            }
-            ExprAddrOf(m, ohs) => ExprAddrOf(m, folder.fold_expr(ohs)),
-            ExprIf(cond, tr, fl) => {
-                ExprIf(folder.fold_expr(cond),
-                       folder.fold_block(tr),
-                       fl.map(|x| folder.fold_expr(x)))
-            }
-            ExprWhile(cond, body, opt_ident) => {
-                ExprWhile(folder.fold_expr(cond),
-                          folder.fold_block(body),
-                          opt_ident.map(|i| folder.fold_ident(i)))
-            }
-            ExprLoop(body, opt_ident) => {
-                ExprLoop(folder.fold_block(body),
-                         opt_ident.map(|i| folder.fold_ident(i)))
-            }
-            ExprMatch(expr, arms, source) => {
-                ExprMatch(folder.fold_expr(expr),
-                          arms.move_map(|x| folder.fold_arm(x)),
-                          source)
-            }
-            ExprClosure(capture_clause, decl, body) => {
-                ExprClosure(capture_clause,
-                            folder.fold_fn_decl(decl),
-                            folder.fold_block(body))
-            }
-            ExprBlock(blk) => ExprBlock(folder.fold_block(blk)),
-            ExprAssign(el, er) => {
-                ExprAssign(folder.fold_expr(el), folder.fold_expr(er))
-            }
-            ExprAssignOp(op, el, er) => {
-                ExprAssignOp(op, folder.fold_expr(el), folder.fold_expr(er))
-            }
-            ExprField(el, name) => {
-                ExprField(folder.fold_expr(el),
-                          respan(folder.new_span(name.span), folder.fold_name(name.node)))
-            }
-            ExprTupField(el, index) => {
-                ExprTupField(folder.fold_expr(el),
-                             respan(folder.new_span(index.span), folder.fold_usize(index.node)))
-            }
-            ExprIndex(el, er) => {
-                ExprIndex(folder.fold_expr(el), folder.fold_expr(er))
-            }
-            ExprRange(e1, e2) => {
-                ExprRange(e1.map(|x| folder.fold_expr(x)),
-                          e2.map(|x| folder.fold_expr(x)))
-            }
-            ExprPath(qself, path) => {
-                let qself = qself.map(|QSelf { ty, position }| {
-                    QSelf {
-                        ty: folder.fold_ty(ty),
-                        position: position,
-                    }
-                });
-                ExprPath(qself, folder.fold_path(path))
-            }
-            ExprBreak(opt_ident) => ExprBreak(opt_ident.map(|label| {
-                respan(folder.new_span(label.span), folder.fold_ident(label.node))
-            })),
-            ExprAgain(opt_ident) => ExprAgain(opt_ident.map(|label| {
-                respan(folder.new_span(label.span), folder.fold_ident(label.node))
-            })),
-            ExprRet(e) => ExprRet(e.map(|x| folder.fold_expr(x))),
-            ExprInlineAsm(InlineAsm {
-                inputs,
-                outputs,
-                asm,
-                asm_str_style,
-                clobbers,
-                volatile,
-                alignstack,
-                dialect,
-                expn_id,
-            }) => ExprInlineAsm(InlineAsm {
-                inputs: inputs.move_map(|(c, input)| (c, folder.fold_expr(input))),
-                outputs: outputs.move_map(|out| {
-                    InlineAsmOutput {
-                        constraint: out.constraint,
-                        expr: folder.fold_expr(out.expr),
-                        is_rw: out.is_rw,
-                        is_indirect: out.is_indirect,
-                    }
-                }),
-                asm: asm,
-                asm_str_style: asm_str_style,
-                clobbers: clobbers,
-                volatile: volatile,
-                alignstack: alignstack,
-                dialect: dialect,
-                expn_id: expn_id,
-            }),
-            ExprStruct(path, fields, maybe_expr) => {
-                ExprStruct(folder.fold_path(path),
-                           fields.move_map(|x| folder.fold_field(x)),
-                           maybe_expr.map(|x| folder.fold_expr(x)))
-            }
-        },
-        span: folder.new_span(span),
-        attrs: attrs.map_thin_attrs(|attrs| fold_attrs(attrs.into(), folder).into()),
-    }
-}
-
-pub fn noop_fold_stmt<T: Folder>(stmt: Stmt, folder: &mut T) -> Stmt {
-    let span = folder.new_span(stmt.span);
-    match stmt.node {
-        StmtDecl(d, id) => {
-            let id = folder.new_id(id);
-            Spanned {
-                node: StmtDecl(folder.fold_decl(d), id),
-                span: span
-            }
-        }
-        StmtExpr(e, id) => {
-            let id = folder.new_id(id);
-            Spanned {
-                node: StmtExpr(folder.fold_expr(e), id),
-                span: span,
-            }
-        }
-        StmtSemi(e, id) => {
-            let id = folder.new_id(id);
-            Spanned {
-                node: StmtSemi(folder.fold_expr(e), id),
-                span: span,
-            }
-        }
-    }
-}
diff --git a/src/librustc_front/hir.rs b/src/librustc_front/hir.rs
deleted file mode 100644 (file)
index dbc1d71..0000000
+++ /dev/null
@@ -1,1449 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// The Rust HIR.
-
-pub use self::BindingMode::*;
-pub use self::BinOp_::*;
-pub use self::BlockCheckMode::*;
-pub use self::CaptureClause::*;
-pub use self::Decl_::*;
-pub use self::ExplicitSelf_::*;
-pub use self::Expr_::*;
-pub use self::FunctionRetTy::*;
-pub use self::ForeignItem_::*;
-pub use self::Item_::*;
-pub use self::Mutability::*;
-pub use self::PathListItem_::*;
-pub use self::PrimTy::*;
-pub use self::Stmt_::*;
-pub use self::StructFieldKind::*;
-pub use self::TraitItem_::*;
-pub use self::Ty_::*;
-pub use self::TyParamBound::*;
-pub use self::UnOp::*;
-pub use self::UnsafeSource::*;
-pub use self::ViewPath_::*;
-pub use self::Visibility::*;
-pub use self::PathParameters::*;
-
-use intravisit::Visitor;
-use std::collections::BTreeMap;
-use syntax::codemap::{self, Span, Spanned, DUMMY_SP, ExpnId};
-use syntax::abi::Abi;
-use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, TokenTree, AsmDialect};
-use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy, MetaItem};
-use syntax::attr::ThinAttributes;
-use syntax::parse::token::InternedString;
-use syntax::ptr::P;
-
-use print::pprust;
-use util;
-
-use std::fmt;
-use std::hash::{Hash, Hasher};
-use serialize::{Encodable, Decodable, Encoder, Decoder};
-
-/// HIR doesn't commit to a concrete storage type and have its own alias for a vector.
-/// It can be `Vec`, `P<[T]>` or potentially `Box<[T]>`, or some other container with similar
-/// behavior. Unlike AST, HIR is mostly a static structure, so we can use an owned slice instead
-/// of `Vec` to avoid keeping extra capacity.
-pub type HirVec<T> = P<[T]>;
-
-macro_rules! hir_vec {
-    ($elem:expr; $n:expr) => (
-        $crate::hir::HirVec::from(vec![$elem; $n])
-    );
-    ($($x:expr),*) => (
-        $crate::hir::HirVec::from(vec![$($x),*])
-    );
-    ($($x:expr,)*) => (vec![$($x),*])
-}
-
-/// Identifier in HIR
-#[derive(Clone, Copy, Eq)]
-pub struct Ident {
-    /// Hygienic name (renamed), should be used by default
-    pub name: Name,
-    /// Unhygienic name (original, not renamed), needed in few places in name resolution
-    pub unhygienic_name: Name,
-}
-
-impl Ident {
-    /// Creates a HIR identifier with both `name` and `unhygienic_name` initialized with
-    /// the argument. Hygiene properties of the created identifier depend entirely on this
-    /// argument. If the argument is a plain interned string `intern("iter")`, then the result
-    /// is unhygienic and can interfere with other entities named "iter". If the argument is
-    /// a "fresh" name created with `gensym("iter")`, then the result is hygienic and can't
-    /// interfere with other entities having the same string as a name.
-    pub fn from_name(name: Name) -> Ident {
-        Ident { name: name, unhygienic_name: name }
-    }
-}
-
-impl PartialEq for Ident {
-    fn eq(&self, other: &Ident) -> bool {
-        self.name == other.name
-    }
-}
-
-impl Hash for Ident {
-    fn hash<H: Hasher>(&self, state: &mut H) {
-        self.name.hash(state)
-    }
-}
-
-impl fmt::Debug for Ident {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Debug::fmt(&self.name, f)
-    }
-}
-
-impl fmt::Display for Ident {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Display::fmt(&self.name, f)
-    }
-}
-
-impl Encodable for Ident {
-    fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
-        self.name.encode(s)
-    }
-}
-
-impl Decodable for Ident {
-    fn decode<D: Decoder>(d: &mut D) -> Result<Ident, D::Error> {
-        Ok(Ident::from_name(try!(Name::decode(d))))
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)]
-pub struct Lifetime {
-    pub id: NodeId,
-    pub span: Span,
-    pub name: Name,
-}
-
-impl fmt::Debug for Lifetime {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f,
-               "lifetime({}: {})",
-               self.id,
-               pprust::lifetime_to_string(self))
-    }
-}
-
-/// A lifetime definition, eg `'a: 'b+'c+'d`
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct LifetimeDef {
-    pub lifetime: Lifetime,
-    pub bounds: HirVec<Lifetime>,
-}
-
-/// A "Path" is essentially Rust's notion of a name; for instance:
-/// std::cmp::PartialEq  .  It's represented as a sequence of identifiers,
-/// along with a bunch of supporting information.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
-pub struct Path {
-    pub span: Span,
-    /// A `::foo` path, is relative to the crate root rather than current
-    /// module (like paths in an import).
-    pub global: bool,
-    /// The segments in the path: the things separated by `::`.
-    pub segments: HirVec<PathSegment>,
-}
-
-impl fmt::Debug for Path {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "path({})", pprust::path_to_string(self))
-    }
-}
-
-impl fmt::Display for Path {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "{}", pprust::path_to_string(self))
-    }
-}
-
-/// A segment of a path: an identifier, an optional lifetime, and a set of
-/// types.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct PathSegment {
-    /// The identifier portion of this path segment.
-    ///
-    /// Hygiene properties of this identifier are worth noting.
-    /// Most path segments are not hygienic and they are not renamed during
-    /// lowering from AST to HIR (see comments to `fn lower_path`). However segments from
-    /// unqualified paths with one segment originating from `ExprPath` (local-variable-like paths)
-    /// can be hygienic, so they are renamed. You should not normally care about this peculiarity
-    /// and just use `identifier.name` unless you modify identifier resolution code
-    /// (`fn resolve_identifier` and other functions called by it in `rustc_resolve`).
-    pub identifier: Ident,
-
-    /// Type/lifetime parameters attached to this path. They come in
-    /// two flavors: `Path<A,B,C>` and `Path(A,B) -> C`. Note that
-    /// this is more than just simple syntactic sugar; the use of
-    /// parens affects the region binding rules, so we preserve the
-    /// distinction.
-    pub parameters: PathParameters,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum PathParameters {
-    /// The `<'a, A,B,C>` in `foo::bar::baz::<'a, A,B,C>`
-    AngleBracketedParameters(AngleBracketedParameterData),
-    /// The `(A,B)` and `C` in `Foo(A,B) -> C`
-    ParenthesizedParameters(ParenthesizedParameterData),
-}
-
-impl PathParameters {
-    pub fn none() -> PathParameters {
-        AngleBracketedParameters(AngleBracketedParameterData {
-            lifetimes: HirVec::new(),
-            types: HirVec::new(),
-            bindings: HirVec::new(),
-        })
-    }
-
-    pub fn is_empty(&self) -> bool {
-        match *self {
-            AngleBracketedParameters(ref data) => data.is_empty(),
-
-            // Even if the user supplied no types, something like
-            // `X()` is equivalent to `X<(),()>`.
-            ParenthesizedParameters(..) => false,
-        }
-    }
-
-    pub fn has_lifetimes(&self) -> bool {
-        match *self {
-            AngleBracketedParameters(ref data) => !data.lifetimes.is_empty(),
-            ParenthesizedParameters(_) => false,
-        }
-    }
-
-    pub fn has_types(&self) -> bool {
-        match *self {
-            AngleBracketedParameters(ref data) => !data.types.is_empty(),
-            ParenthesizedParameters(..) => true,
-        }
-    }
-
-    /// Returns the types that the user wrote. Note that these do not necessarily map to the type
-    /// parameters in the parenthesized case.
-    pub fn types(&self) -> HirVec<&P<Ty>> {
-        match *self {
-            AngleBracketedParameters(ref data) => {
-                data.types.iter().collect()
-            }
-            ParenthesizedParameters(ref data) => {
-                data.inputs
-                    .iter()
-                    .chain(data.output.iter())
-                    .collect()
-            }
-        }
-    }
-
-    pub fn lifetimes(&self) -> HirVec<&Lifetime> {
-        match *self {
-            AngleBracketedParameters(ref data) => {
-                data.lifetimes.iter().collect()
-            }
-            ParenthesizedParameters(_) => {
-                HirVec::new()
-            }
-        }
-    }
-
-    pub fn bindings(&self) -> HirVec<&TypeBinding> {
-        match *self {
-            AngleBracketedParameters(ref data) => {
-                data.bindings.iter().collect()
-            }
-            ParenthesizedParameters(_) => {
-                HirVec::new()
-            }
-        }
-    }
-}
-
-/// A path like `Foo<'a, T>`
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct AngleBracketedParameterData {
-    /// The lifetime parameters for this path segment.
-    pub lifetimes: HirVec<Lifetime>,
-    /// The type parameters for this path segment, if present.
-    pub types: HirVec<P<Ty>>,
-    /// Bindings (equality constraints) on associated types, if present.
-    /// E.g., `Foo<A=Bar>`.
-    pub bindings: HirVec<TypeBinding>,
-}
-
-impl AngleBracketedParameterData {
-    fn is_empty(&self) -> bool {
-        self.lifetimes.is_empty() && self.types.is_empty() && self.bindings.is_empty()
-    }
-}
-
-/// A path like `Foo(A,B) -> C`
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct ParenthesizedParameterData {
-    /// Overall span
-    pub span: Span,
-
-    /// `(A,B)`
-    pub inputs: HirVec<P<Ty>>,
-
-    /// `C`
-    pub output: Option<P<Ty>>,
-}
-
-/// The AST represents all type param bounds as types.
-/// typeck::collect::compute_bounds matches these against
-/// the "special" built-in traits (see middle::lang_items) and
-/// detects Copy, Send and Sync.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum TyParamBound {
-    TraitTyParamBound(PolyTraitRef, TraitBoundModifier),
-    RegionTyParamBound(Lifetime),
-}
-
-/// A modifier on a bound, currently this is only used for `?Sized`, where the
-/// modifier is `Maybe`. Negative bounds should also be handled here.
-#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum TraitBoundModifier {
-    None,
-    Maybe,
-}
-
-pub type TyParamBounds = HirVec<TyParamBound>;
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct TyParam {
-    pub name: Name,
-    pub id: NodeId,
-    pub bounds: TyParamBounds,
-    pub default: Option<P<Ty>>,
-    pub span: Span,
-}
-
-/// Represents lifetimes and type parameters attached to a declaration
-/// of a function, enum, trait, etc.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct Generics {
-    pub lifetimes: HirVec<LifetimeDef>,
-    pub ty_params: HirVec<TyParam>,
-    pub where_clause: WhereClause,
-}
-
-impl Generics {
-    pub fn is_lt_parameterized(&self) -> bool {
-        !self.lifetimes.is_empty()
-    }
-    pub fn is_type_parameterized(&self) -> bool {
-        !self.ty_params.is_empty()
-    }
-    pub fn is_parameterized(&self) -> bool {
-        self.is_lt_parameterized() || self.is_type_parameterized()
-    }
-}
-
-/// A `where` clause in a definition
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct WhereClause {
-    pub id: NodeId,
-    pub predicates: HirVec<WherePredicate>,
-}
-
-/// A single predicate in a `where` clause
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum WherePredicate {
-    /// A type binding, eg `for<'c> Foo: Send+Clone+'c`
-    BoundPredicate(WhereBoundPredicate),
-    /// A lifetime predicate, e.g. `'a: 'b+'c`
-    RegionPredicate(WhereRegionPredicate),
-    /// An equality predicate (unsupported)
-    EqPredicate(WhereEqPredicate),
-}
-
-/// A type bound, eg `for<'c> Foo: Send+Clone+'c`
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct WhereBoundPredicate {
-    pub span: Span,
-    /// Any lifetimes from a `for` binding
-    pub bound_lifetimes: HirVec<LifetimeDef>,
-    /// The type being bounded
-    pub bounded_ty: P<Ty>,
-    /// Trait and lifetime bounds (`Clone+Send+'static`)
-    pub bounds: TyParamBounds,
-}
-
-/// A lifetime predicate, e.g. `'a: 'b+'c`
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct WhereRegionPredicate {
-    pub span: Span,
-    pub lifetime: Lifetime,
-    pub bounds: HirVec<Lifetime>,
-}
-
-/// An equality predicate (unsupported), e.g. `T=int`
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct WhereEqPredicate {
-    pub id: NodeId,
-    pub span: Span,
-    pub path: Path,
-    pub ty: P<Ty>,
-}
-
-pub type CrateConfig = HirVec<P<MetaItem>>;
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug)]
-pub struct Crate {
-    pub module: Mod,
-    pub attrs: HirVec<Attribute>,
-    pub config: CrateConfig,
-    pub span: Span,
-    pub exported_macros: HirVec<MacroDef>,
-
-    // NB: We use a BTreeMap here so that `visit_all_items` iterates
-    // over the ids in increasing order. In principle it should not
-    // matter what order we visit things in, but in *practice* it
-    // does, because it can affect the order in which errors are
-    // detected, which in turn can make compile-fail tests yield
-    // slightly different results.
-    pub items: BTreeMap<NodeId, Item>,
-}
-
-impl Crate {
-    pub fn item(&self, id: NodeId) -> &Item {
-        &self.items[&id]
-    }
-
-    /// Visits all items in the crate in some determinstic (but
-    /// unspecified) order. If you just need to process every item,
-    /// but don't care about nesting, this method is the best choice.
-    ///
-    /// If you do care about nesting -- usually because your algorithm
-    /// follows lexical scoping rules -- then you want a different
-    /// approach. You should override `visit_nested_item` in your
-    /// visitor and then call `intravisit::walk_crate` instead.
-    pub fn visit_all_items<'hir, V:Visitor<'hir>>(&'hir self, visitor: &mut V) {
-        for (_, item) in &self.items {
-            visitor.visit_item(item);
-        }
-    }
-}
-
-/// A macro definition, in this crate or imported from another.
-///
-/// Not parsed directly, but created on macro import or `macro_rules!` expansion.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct MacroDef {
-    pub name: Name,
-    pub attrs: HirVec<Attribute>,
-    pub id: NodeId,
-    pub span: Span,
-    pub imported_from: Option<Name>,
-    pub export: bool,
-    pub use_locally: bool,
-    pub allow_internal_unstable: bool,
-    pub body: HirVec<TokenTree>,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct Block {
-    /// Statements in a block
-    pub stmts: HirVec<Stmt>,
-    /// An expression at the end of the block
-    /// without a semicolon, if any
-    pub expr: Option<P<Expr>>,
-    pub id: NodeId,
-    /// Distinguishes between `unsafe { ... }` and `{ ... }`
-    pub rules: BlockCheckMode,
-    pub span: Span,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
-pub struct Pat {
-    pub id: NodeId,
-    pub node: PatKind,
-    pub span: Span,
-}
-
-impl fmt::Debug for Pat {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "pat({}: {})", self.id, pprust::pat_to_string(self))
-    }
-}
-
-/// A single field in a struct pattern
-///
-/// Patterns like the fields of Foo `{ x, ref y, ref mut z }`
-/// are treated the same as` x: x, y: ref y, z: ref mut z`,
-/// except is_shorthand is true
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct FieldPat {
-    /// The identifier for the field
-    pub name: Name,
-    /// The pattern the field is destructured to
-    pub pat: P<Pat>,
-    pub is_shorthand: bool,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum BindingMode {
-    BindByRef(Mutability),
-    BindByValue(Mutability),
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum PatKind {
-    /// Represents a wildcard pattern (`_`)
-    Wild,
-
-    /// A `PatKind::Ident` may either be a new bound variable,
-    /// or a unit struct/variant pattern, or a const pattern (in the last two cases
-    /// the third field must be `None`).
-    ///
-    /// In the unit or const pattern case, the parser can't determine
-    /// which it is. The resolver determines this, and
-    /// records this pattern's `NodeId` in an auxiliary
-    /// set (of "PatIdents that refer to unit patterns or constants").
-    Ident(BindingMode, Spanned<Ident>, Option<P<Pat>>),
-
-    /// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`.
-    /// The `bool` is `true` in the presence of a `..`.
-    Struct(Path, HirVec<Spanned<FieldPat>>, bool),
-
-    /// A tuple struct/variant pattern `Variant(x, y, z)`.
-    /// "None" means a `Variant(..)` pattern where we don't bind the fields to names.
-    TupleStruct(Path, Option<HirVec<P<Pat>>>),
-
-    /// A path pattern.
-    /// Such pattern can be resolved to a unit struct/variant or a constant.
-    Path(Path),
-
-    /// An associated const named using the qualified path `<T>::CONST` or
-    /// `<T as Trait>::CONST`. Associated consts from inherent impls can be
-    /// referred to as simply `T::CONST`, in which case they will end up as
-    /// PatKind::Path, and the resolver will have to sort that out.
-    QPath(QSelf, Path),
-
-    /// A tuple pattern `(a, b)`
-    Tup(HirVec<P<Pat>>),
-    /// A `box` pattern
-    Box(P<Pat>),
-    /// A reference pattern, e.g. `&mut (a, b)`
-    Ref(P<Pat>, Mutability),
-    /// A literal
-    Lit(P<Expr>),
-    /// A range pattern, e.g. `1...2`
-    Range(P<Expr>, P<Expr>),
-    /// `[a, b, ..i, y, z]` is represented as:
-    ///     `PatKind::Vec(box [a, b], Some(i), box [y, z])`
-    Vec(HirVec<P<Pat>>, Option<P<Pat>>, HirVec<P<Pat>>),
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum Mutability {
-    MutMutable,
-    MutImmutable,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum BinOp_ {
-    /// The `+` operator (addition)
-    BiAdd,
-    /// The `-` operator (subtraction)
-    BiSub,
-    /// The `*` operator (multiplication)
-    BiMul,
-    /// The `/` operator (division)
-    BiDiv,
-    /// The `%` operator (modulus)
-    BiRem,
-    /// The `&&` operator (logical and)
-    BiAnd,
-    /// The `||` operator (logical or)
-    BiOr,
-    /// The `^` operator (bitwise xor)
-    BiBitXor,
-    /// The `&` operator (bitwise and)
-    BiBitAnd,
-    /// The `|` operator (bitwise or)
-    BiBitOr,
-    /// The `<<` operator (shift left)
-    BiShl,
-    /// The `>>` operator (shift right)
-    BiShr,
-    /// The `==` operator (equality)
-    BiEq,
-    /// The `<` operator (less than)
-    BiLt,
-    /// The `<=` operator (less than or equal to)
-    BiLe,
-    /// The `!=` operator (not equal to)
-    BiNe,
-    /// The `>=` operator (greater than or equal to)
-    BiGe,
-    /// The `>` operator (greater than)
-    BiGt,
-}
-
-pub type BinOp = Spanned<BinOp_>;
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum UnOp {
-    /// The `*` operator for dereferencing
-    UnDeref,
-    /// The `!` operator for logical inversion
-    UnNot,
-    /// The `-` operator for negation
-    UnNeg,
-}
-
-/// A statement
-pub type Stmt = Spanned<Stmt_>;
-
-impl fmt::Debug for Stmt_ {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        // Sadness.
-        let spanned = codemap::dummy_spanned(self.clone());
-        write!(f,
-               "stmt({}: {})",
-               util::stmt_id(&spanned),
-               pprust::stmt_to_string(&spanned))
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
-pub enum Stmt_ {
-    /// Could be an item or a local (let) binding:
-    StmtDecl(P<Decl>, NodeId),
-
-    /// Expr without trailing semi-colon (must have unit type):
-    StmtExpr(P<Expr>, NodeId),
-
-    /// Expr with trailing semi-colon (may have any type):
-    StmtSemi(P<Expr>, NodeId),
-}
-
-// FIXME (pending discussion of #1697, #2178...): local should really be
-// a refinement on pat.
-/// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;`
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct Local {
-    pub pat: P<Pat>,
-    pub ty: Option<P<Ty>>,
-    /// Initializer expression to set the value, if any
-    pub init: Option<P<Expr>>,
-    pub id: NodeId,
-    pub span: Span,
-    pub attrs: ThinAttributes,
-}
-
-pub type Decl = Spanned<Decl_>;
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum Decl_ {
-    /// A local (let) binding:
-    DeclLocal(P<Local>),
-    /// An item binding:
-    DeclItem(ItemId),
-}
-
-/// represents one arm of a 'match'
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct Arm {
-    pub attrs: HirVec<Attribute>,
-    pub pats: HirVec<P<Pat>>,
-    pub guard: Option<P<Expr>>,
-    pub body: P<Expr>,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct Field {
-    pub name: Spanned<Name>,
-    pub expr: P<Expr>,
-    pub span: Span,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum BlockCheckMode {
-    DefaultBlock,
-    UnsafeBlock(UnsafeSource),
-    PushUnsafeBlock(UnsafeSource),
-    PopUnsafeBlock(UnsafeSource),
-    // Within this block (but outside a PopUnstableBlock), we suspend checking of stability.
-    PushUnstableBlock,
-    PopUnstableBlock,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum UnsafeSource {
-    CompilerGenerated,
-    UserProvided,
-}
-
-/// An expression
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
-pub struct Expr {
-    pub id: NodeId,
-    pub node: Expr_,
-    pub span: Span,
-    pub attrs: ThinAttributes,
-}
-
-impl fmt::Debug for Expr {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "expr({}: {})", self.id, pprust::expr_to_string(self))
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum Expr_ {
-    /// A `box x` expression.
-    ExprBox(P<Expr>),
-    /// An array (`[a, b, c, d]`)
-    ExprVec(HirVec<P<Expr>>),
-    /// A function call
-    ///
-    /// The first field resolves to the function itself,
-    /// and the second field is the list of arguments
-    ExprCall(P<Expr>, HirVec<P<Expr>>),
-    /// A method call (`x.foo::<Bar, Baz>(a, b, c, d)`)
-    ///
-    /// The `Spanned<Name>` is the identifier for the method name.
-    /// The vector of `Ty`s are the ascripted type parameters for the method
-    /// (within the angle brackets).
-    ///
-    /// The first element of the vector of `Expr`s is the expression that evaluates
-    /// to the object on which the method is being called on (the receiver),
-    /// and the remaining elements are the rest of the arguments.
-    ///
-    /// Thus, `x.foo::<Bar, Baz>(a, b, c, d)` is represented as
-    /// `ExprMethodCall(foo, [Bar, Baz], [x, a, b, c, d])`.
-    ExprMethodCall(Spanned<Name>, HirVec<P<Ty>>, HirVec<P<Expr>>),
-    /// A tuple (`(a, b, c ,d)`)
-    ExprTup(HirVec<P<Expr>>),
-    /// A binary operation (For example: `a + b`, `a * b`)
-    ExprBinary(BinOp, P<Expr>, P<Expr>),
-    /// A unary operation (For example: `!x`, `*x`)
-    ExprUnary(UnOp, P<Expr>),
-    /// A literal (For example: `1u8`, `"foo"`)
-    ExprLit(P<Lit>),
-    /// A cast (`foo as f64`)
-    ExprCast(P<Expr>, P<Ty>),
-    ExprType(P<Expr>, P<Ty>),
-    /// An `if` block, with an optional else block
-    ///
-    /// `if expr { block } else { expr }`
-    ExprIf(P<Expr>, P<Block>, Option<P<Expr>>),
-    /// A while loop, with an optional label
-    ///
-    /// `'label: while expr { block }`
-    ExprWhile(P<Expr>, P<Block>, Option<Ident>),
-    /// Conditionless loop (can be exited with break, continue, or return)
-    ///
-    /// `'label: loop { block }`
-    ExprLoop(P<Block>, Option<Ident>),
-    /// A `match` block, with a source that indicates whether or not it is
-    /// the result of a desugaring, and if so, which kind.
-    ExprMatch(P<Expr>, HirVec<Arm>, MatchSource),
-    /// A closure (for example, `move |a, b, c| {a + b + c}`)
-    ExprClosure(CaptureClause, P<FnDecl>, P<Block>),
-    /// A block (`{ ... }`)
-    ExprBlock(P<Block>),
-
-    /// An assignment (`a = foo()`)
-    ExprAssign(P<Expr>, P<Expr>),
-    /// An assignment with an operator
-    ///
-    /// For example, `a += 1`.
-    ExprAssignOp(BinOp, P<Expr>, P<Expr>),
-    /// Access of a named struct field (`obj.foo`)
-    ExprField(P<Expr>, Spanned<Name>),
-    /// Access of an unnamed field of a struct or tuple-struct
-    ///
-    /// For example, `foo.0`.
-    ExprTupField(P<Expr>, Spanned<usize>),
-    /// An indexing operation (`foo[2]`)
-    ExprIndex(P<Expr>, P<Expr>),
-    /// A range (`1..2`, `1..`, or `..2`)
-    ExprRange(Option<P<Expr>>, Option<P<Expr>>),
-
-    /// Variable reference, possibly containing `::` and/or type
-    /// parameters, e.g. foo::bar::<baz>.
-    ///
-    /// Optionally "qualified",
-    /// e.g. `<HirVec<T> as SomeTrait>::SomeType`.
-    ExprPath(Option<QSelf>, Path),
-
-    /// A referencing operation (`&a` or `&mut a`)
-    ExprAddrOf(Mutability, P<Expr>),
-    /// A `break`, with an optional label to break
-    ExprBreak(Option<Spanned<Ident>>),
-    /// A `continue`, with an optional label
-    ExprAgain(Option<Spanned<Ident>>),
-    /// A `return`, with an optional value to be returned
-    ExprRet(Option<P<Expr>>),
-
-    /// Output of the `asm!()` macro
-    ExprInlineAsm(InlineAsm),
-
-    /// A struct literal expression.
-    ///
-    /// For example, `Foo {x: 1, y: 2}`, or
-    /// `Foo {x: 1, .. base}`, where `base` is the `Option<Expr>`.
-    ExprStruct(Path, HirVec<Field>, Option<P<Expr>>),
-
-    /// A vector literal constructed from one repeated element.
-    ///
-    /// For example, `[1u8; 5]`. The first expression is the element
-    /// to be repeated; the second is the number of times to repeat it.
-    ExprRepeat(P<Expr>, P<Expr>),
-}
-
-/// The explicit Self type in a "qualified path". The actual
-/// path, including the trait and the associated item, is stored
-/// separately. `position` represents the index of the associated
-/// item qualified with this Self type.
-///
-///     <HirVec<T> as a::b::Trait>::AssociatedItem
-///      ^~~~~     ~~~~~~~~~~~~~~^
-///      ty        position = 3
-///
-///     <HirVec<T>>::AssociatedItem
-///      ^~~~~    ^
-///      ty       position = 0
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct QSelf {
-    pub ty: P<Ty>,
-    pub position: usize,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum MatchSource {
-    Normal,
-    IfLetDesugar {
-        contains_else_clause: bool,
-    },
-    WhileLetDesugar,
-    ForLoopDesugar,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum CaptureClause {
-    CaptureByValue,
-    CaptureByRef,
-}
-
-// NB: If you change this, you'll probably want to change the corresponding
-// type structure in middle/ty.rs as well.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct MutTy {
-    pub ty: P<Ty>,
-    pub mutbl: Mutability,
-}
-
-/// Represents a method's signature in a trait declaration,
-/// or in an implementation.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct MethodSig {
-    pub unsafety: Unsafety,
-    pub constness: Constness,
-    pub abi: Abi,
-    pub decl: P<FnDecl>,
-    pub generics: Generics,
-    pub explicit_self: ExplicitSelf,
-}
-
-/// Represents a method declaration in a trait declaration, possibly including
-/// a default implementation A trait method is either required (meaning it
-/// doesn't have an implementation, just a signature) or provided (meaning it
-/// has a default implementation).
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct TraitItem {
-    pub id: NodeId,
-    pub name: Name,
-    pub attrs: HirVec<Attribute>,
-    pub node: TraitItem_,
-    pub span: Span,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum TraitItem_ {
-    ConstTraitItem(P<Ty>, Option<P<Expr>>),
-    MethodTraitItem(MethodSig, Option<P<Block>>),
-    TypeTraitItem(TyParamBounds, Option<P<Ty>>),
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct ImplItem {
-    pub id: NodeId,
-    pub name: Name,
-    pub vis: Visibility,
-    pub attrs: HirVec<Attribute>,
-    pub node: ImplItemKind,
-    pub span: Span,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum ImplItemKind {
-    Const(P<Ty>, P<Expr>),
-    Method(MethodSig, P<Block>),
-    Type(P<Ty>),
-}
-
-// Bind a type to an associated type: `A=Foo`.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct TypeBinding {
-    pub id: NodeId,
-    pub name: Name,
-    pub ty: P<Ty>,
-    pub span: Span,
-}
-
-
-// NB PartialEq method appears below.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
-pub struct Ty {
-    pub id: NodeId,
-    pub node: Ty_,
-    pub span: Span,
-}
-
-impl fmt::Debug for Ty {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "type({})", pprust::ty_to_string(self))
-    }
-}
-
-/// Not represented directly in the AST, referred to by name through a ty_path.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum PrimTy {
-    TyInt(IntTy),
-    TyUint(UintTy),
-    TyFloat(FloatTy),
-    TyStr,
-    TyBool,
-    TyChar,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct BareFnTy {
-    pub unsafety: Unsafety,
-    pub abi: Abi,
-    pub lifetimes: HirVec<LifetimeDef>,
-    pub decl: P<FnDecl>,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-/// The different kinds of types recognized by the compiler
-pub enum Ty_ {
-    TyVec(P<Ty>),
-    /// A fixed length array (`[T; n]`)
-    TyFixedLengthVec(P<Ty>, P<Expr>),
-    /// A raw pointer (`*const T` or `*mut T`)
-    TyPtr(MutTy),
-    /// A reference (`&'a T` or `&'a mut T`)
-    TyRptr(Option<Lifetime>, MutTy),
-    /// A bare function (e.g. `fn(usize) -> bool`)
-    TyBareFn(P<BareFnTy>),
-    /// A tuple (`(A, B, C, D,...)`)
-    TyTup(HirVec<P<Ty>>),
-    /// A path (`module::module::...::Type`), optionally
-    /// "qualified", e.g. `<HirVec<T> as SomeTrait>::SomeType`.
-    ///
-    /// Type parameters are stored in the Path itself
-    TyPath(Option<QSelf>, Path),
-    /// Something like `A+B`. Note that `B` must always be a path.
-    TyObjectSum(P<Ty>, TyParamBounds),
-    /// A type like `for<'a> Foo<&'a Bar>`
-    TyPolyTraitRef(TyParamBounds),
-    /// Unused for now
-    TyTypeof(P<Expr>),
-    /// TyInfer means the type should be inferred instead of it having been
-    /// specified. This can appear anywhere in a type.
-    TyInfer,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct InlineAsmOutput {
-    pub constraint: InternedString,
-    pub expr: P<Expr>,
-    pub is_rw: bool,
-    pub is_indirect: bool,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct InlineAsm {
-    pub asm: InternedString,
-    pub asm_str_style: StrStyle,
-    pub outputs: HirVec<InlineAsmOutput>,
-    pub inputs: HirVec<(InternedString, P<Expr>)>,
-    pub clobbers: HirVec<InternedString>,
-    pub volatile: bool,
-    pub alignstack: bool,
-    pub dialect: AsmDialect,
-    pub expn_id: ExpnId,
-}
-
-/// represents an argument in a function header
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct Arg {
-    pub ty: P<Ty>,
-    pub pat: P<Pat>,
-    pub id: NodeId,
-}
-
-impl Arg {
-    pub fn new_self(span: Span, mutability: Mutability, self_ident: Ident) -> Arg {
-        let path = Spanned {
-            span: span,
-            node: self_ident,
-        };
-        Arg {
-            // HACK(eddyb) fake type for the self argument.
-            ty: P(Ty {
-                id: DUMMY_NODE_ID,
-                node: TyInfer,
-                span: DUMMY_SP,
-            }),
-            pat: P(Pat {
-                id: DUMMY_NODE_ID,
-                node: PatKind::Ident(BindByValue(mutability), path, None),
-                span: span,
-            }),
-            id: DUMMY_NODE_ID,
-        }
-    }
-}
-
-/// Represents the header (not the body) of a function declaration
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct FnDecl {
-    pub inputs: HirVec<Arg>,
-    pub output: FunctionRetTy,
-    pub variadic: bool,
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum Unsafety {
-    Unsafe,
-    Normal,
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum Constness {
-    Const,
-    NotConst,
-}
-
-impl fmt::Display for Unsafety {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Display::fmt(match *self {
-                              Unsafety::Normal => "normal",
-                              Unsafety::Unsafe => "unsafe",
-                          },
-                          f)
-    }
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
-pub enum ImplPolarity {
-    /// `impl Trait for Type`
-    Positive,
-    /// `impl !Trait for Type`
-    Negative,
-}
-
-impl fmt::Debug for ImplPolarity {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            ImplPolarity::Positive => "positive".fmt(f),
-            ImplPolarity::Negative => "negative".fmt(f),
-        }
-    }
-}
-
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum FunctionRetTy {
-    /// Functions with return type `!`that always
-    /// raise an error or exit (i.e. never return to the caller)
-    NoReturn(Span),
-    /// Return type is not specified.
-    ///
-    /// Functions default to `()` and
-    /// closures default to inference. Span points to where return
-    /// type would be inserted.
-    DefaultReturn(Span),
-    /// Everything else
-    Return(P<Ty>),
-}
-
-impl FunctionRetTy {
-    pub fn span(&self) -> Span {
-        match *self {
-            NoReturn(span) => span,
-            DefaultReturn(span) => span,
-            Return(ref ty) => ty.span,
-        }
-    }
-}
-
-/// Represents the kind of 'self' associated with a method
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum ExplicitSelf_ {
-    /// No self
-    SelfStatic,
-    /// `self`
-    SelfValue(Name),
-    /// `&'lt self`, `&'lt mut self`
-    SelfRegion(Option<Lifetime>, Mutability, Name),
-    /// `self: TYPE`
-    SelfExplicit(P<Ty>, Name),
-}
-
-pub type ExplicitSelf = Spanned<ExplicitSelf_>;
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct Mod {
-    /// A span from the first token past `{` to the last token until `}`.
-    /// For `mod foo;`, the inner span ranges from the first token
-    /// to the last token in the external file.
-    pub inner: Span,
-    pub item_ids: HirVec<ItemId>,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct ForeignMod {
-    pub abi: Abi,
-    pub items: HirVec<ForeignItem>,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct EnumDef {
-    pub variants: HirVec<Variant>,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct Variant_ {
-    pub name: Name,
-    pub attrs: HirVec<Attribute>,
-    pub data: VariantData,
-    /// Explicit discriminant, eg `Foo = 1`
-    pub disr_expr: Option<P<Expr>>,
-}
-
-pub type Variant = Spanned<Variant_>;
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum PathListItem_ {
-    PathListIdent {
-        name: Name,
-        /// renamed in list, eg `use foo::{bar as baz};`
-        rename: Option<Name>,
-        id: NodeId,
-    },
-    PathListMod {
-        /// renamed in list, eg `use foo::{self as baz};`
-        rename: Option<Name>,
-        id: NodeId,
-    },
-}
-
-impl PathListItem_ {
-    pub fn id(&self) -> NodeId {
-        match *self {
-            PathListIdent { id, .. } | PathListMod { id, .. } => id,
-        }
-    }
-
-    pub fn name(&self) -> Option<Name> {
-        match *self {
-            PathListIdent { name, .. } => Some(name),
-            PathListMod { .. } => None,
-        }
-    }
-
-    pub fn rename(&self) -> Option<Name> {
-        match *self {
-            PathListIdent { rename, .. } | PathListMod { rename, .. } => rename,
-        }
-    }
-}
-
-pub type PathListItem = Spanned<PathListItem_>;
-
-pub type ViewPath = Spanned<ViewPath_>;
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum ViewPath_ {
-    /// `foo::bar::baz as quux`
-    ///
-    /// or just
-    ///
-    /// `foo::bar::baz` (with `as baz` implicitly on the right)
-    ViewPathSimple(Name, Path),
-
-    /// `foo::bar::*`
-    ViewPathGlob(Path),
-
-    /// `foo::bar::{a,b,c}`
-    ViewPathList(Path, HirVec<PathListItem>),
-}
-
-/// TraitRef's appear in impls.
-///
-/// resolve maps each TraitRef's ref_id to its defining trait; that's all
-/// that the ref_id is for. The impl_id maps to the "self type" of this impl.
-/// If this impl is an ItemImpl, the impl_id is redundant (it could be the
-/// same as the impl's node id).
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct TraitRef {
-    pub path: Path,
-    pub ref_id: NodeId,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct PolyTraitRef {
-    /// The `'a` in `<'a> Foo<&'a T>`
-    pub bound_lifetimes: HirVec<LifetimeDef>,
-
-    /// The `Foo<&'a T>` in `<'a> Foo<&'a T>`
-    pub trait_ref: TraitRef,
-
-    pub span: Span,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum Visibility {
-    Public,
-    Inherited,
-}
-
-impl Visibility {
-    pub fn inherit_from(&self, parent_visibility: Visibility) -> Visibility {
-        match self {
-            &Inherited => parent_visibility,
-            &Public => *self,
-        }
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct StructField_ {
-    pub kind: StructFieldKind,
-    pub id: NodeId,
-    pub ty: P<Ty>,
-    pub attrs: HirVec<Attribute>,
-}
-
-impl StructField_ {
-    pub fn name(&self) -> Option<Name> {
-        match self.kind {
-            NamedField(name, _) => Some(name),
-            UnnamedField(_) => None,
-        }
-    }
-}
-
-pub type StructField = Spanned<StructField_>;
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum StructFieldKind {
-    NamedField(Name, Visibility),
-    /// Element of a tuple-like struct
-    UnnamedField(Visibility),
-}
-
-impl StructFieldKind {
-    pub fn is_unnamed(&self) -> bool {
-        match *self {
-            UnnamedField(..) => true,
-            NamedField(..) => false,
-        }
-    }
-
-    pub fn visibility(&self) -> Visibility {
-        match *self {
-            NamedField(_, vis) | UnnamedField(vis) => vis,
-        }
-    }
-}
-
-/// Fields and Ids of enum variants and structs
-///
-/// For enum variants: `NodeId` represents both an Id of the variant itself (relevant for all
-/// variant kinds) and an Id of the variant's constructor (not relevant for `Struct`-variants).
-/// One shared Id can be successfully used for these two purposes.
-/// Id of the whole enum lives in `Item`.
-///
-/// For structs: `NodeId` represents an Id of the structure's constructor, so it is not actually
-/// used for `Struct`-structs (but still presents). Structures don't have an analogue of "Id of
-/// the variant itself" from enum variants.
-/// Id of the whole struct lives in `Item`.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum VariantData {
-    Struct(HirVec<StructField>, NodeId),
-    Tuple(HirVec<StructField>, NodeId),
-    Unit(NodeId),
-}
-
-impl VariantData {
-    pub fn fields(&self) -> &[StructField] {
-        match *self {
-            VariantData::Struct(ref fields, _) | VariantData::Tuple(ref fields, _) => fields,
-            _ => &[],
-        }
-    }
-    pub fn id(&self) -> NodeId {
-        match *self {
-            VariantData::Struct(_, id) | VariantData::Tuple(_, id) | VariantData::Unit(id) => id,
-        }
-    }
-    pub fn is_struct(&self) -> bool {
-        if let VariantData::Struct(..) = *self {
-            true
-        } else {
-            false
-        }
-    }
-    pub fn is_tuple(&self) -> bool {
-        if let VariantData::Tuple(..) = *self {
-            true
-        } else {
-            false
-        }
-    }
-    pub fn is_unit(&self) -> bool {
-        if let VariantData::Unit(..) = *self {
-            true
-        } else {
-            false
-        }
-    }
-}
-
-// The bodies for items are stored "out of line", in a separate
-// hashmap in the `Crate`. Here we just record the node-id of the item
-// so it can fetched later.
-#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct ItemId {
-    pub id: NodeId,
-}
-
-//  FIXME (#3300): Should allow items to be anonymous. Right now
-//  we just use dummy names for anon items.
-/// An item
-///
-/// The name might be a dummy name in case of anonymous items
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct Item {
-    pub name: Name,
-    pub attrs: HirVec<Attribute>,
-    pub id: NodeId,
-    pub node: Item_,
-    pub vis: Visibility,
-    pub span: Span,
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum Item_ {
-    /// An`extern crate` item, with optional original crate name,
-    ///
-    /// e.g. `extern crate foo` or `extern crate foo_bar as foo`
-    ItemExternCrate(Option<Name>),
-    /// A `use` or `pub use` item
-    ItemUse(P<ViewPath>),
-
-    /// A `static` item
-    ItemStatic(P<Ty>, Mutability, P<Expr>),
-    /// A `const` item
-    ItemConst(P<Ty>, P<Expr>),
-    /// A function declaration
-    ItemFn(P<FnDecl>, Unsafety, Constness, Abi, Generics, P<Block>),
-    /// A module
-    ItemMod(Mod),
-    /// An external module
-    ItemForeignMod(ForeignMod),
-    /// A type alias, e.g. `type Foo = Bar<u8>`
-    ItemTy(P<Ty>, Generics),
-    /// An enum definition, e.g. `enum Foo<A, B> {C<A>, D<B>}`
-    ItemEnum(EnumDef, Generics),
-    /// A struct definition, e.g. `struct Foo<A> {x: A}`
-    ItemStruct(VariantData, Generics),
-    /// Represents a Trait Declaration
-    ItemTrait(Unsafety, Generics, TyParamBounds, HirVec<TraitItem>),
-
-    // Default trait implementations
-    ///
-    /// `impl Trait for .. {}`
-    ItemDefaultImpl(Unsafety, TraitRef),
-    /// An implementation, eg `impl<A> Trait for Foo { .. }`
-    ItemImpl(Unsafety,
-             ImplPolarity,
-             Generics,
-             Option<TraitRef>, // (optional) trait this impl implements
-             P<Ty>, // self
-             HirVec<ImplItem>),
-}
-
-impl Item_ {
-    pub fn descriptive_variant(&self) -> &str {
-        match *self {
-            ItemExternCrate(..) => "extern crate",
-            ItemUse(..) => "use",
-            ItemStatic(..) => "static item",
-            ItemConst(..) => "constant item",
-            ItemFn(..) => "function",
-            ItemMod(..) => "module",
-            ItemForeignMod(..) => "foreign module",
-            ItemTy(..) => "type alias",
-            ItemEnum(..) => "enum",
-            ItemStruct(..) => "struct",
-            ItemTrait(..) => "trait",
-            ItemImpl(..) |
-            ItemDefaultImpl(..) => "item",
-        }
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct ForeignItem {
-    pub name: Name,
-    pub attrs: HirVec<Attribute>,
-    pub node: ForeignItem_,
-    pub id: NodeId,
-    pub span: Span,
-    pub vis: Visibility,
-}
-
-/// An item within an `extern` block
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum ForeignItem_ {
-    /// A foreign function
-    ForeignItemFn(P<FnDecl>, Generics),
-    /// A foreign static item (`static ext: u8`), with optional mutability
-    /// (the boolean is true when mutable)
-    ForeignItemStatic(P<Ty>, bool),
-}
-
-impl ForeignItem_ {
-    pub fn descriptive_variant(&self) -> &str {
-        match *self {
-            ForeignItemFn(..) => "foreign function",
-            ForeignItemStatic(..) => "foreign static item",
-        }
-    }
-}
diff --git a/src/librustc_front/intravisit.rs b/src/librustc_front/intravisit.rs
deleted file mode 100644 (file)
index c1bcaab..0000000
+++ /dev/null
@@ -1,823 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! HIR walker. Each overridden visit method has full control over what
-//! happens with its node, it can do its own traversal of the node's children,
-//! call `intravisit::walk_*` to apply the default traversal algorithm, or prevent
-//! deeper traversal by doing nothing.
-//!
-//! When visiting the HIR, the contents of nested items are NOT visited
-//! by default. This is different from the AST visitor, which does a deep walk.
-//! Hence this module is called `intravisit`; see the method `visit_nested_item`
-//! for more details.
-//!
-//! Note: it is an important invariant that the default visitor walks
-//! the body of a function in "execution order" (more concretely,
-//! reverse post-order with respect to the CFG implied by the AST),
-//! meaning that if AST node A may execute before AST node B, then A
-//! is visited first.  The borrow checker in particular relies on this
-//! property.
-
-use syntax::abi::Abi;
-use syntax::ast::{NodeId, CRATE_NODE_ID, Name, Attribute};
-use syntax::codemap::Span;
-use hir::*;
-
-#[derive(Copy, Clone, PartialEq, Eq)]
-pub enum FnKind<'a> {
-    /// fn foo() or extern "Abi" fn foo()
-    ItemFn(Name, &'a Generics, Unsafety, Constness, Abi, Visibility),
-
-    /// fn foo(&self)
-    Method(Name, &'a MethodSig, Option<Visibility>),
-
-    /// |x, y| {}
-    Closure,
-}
-
-/// Each method of the Visitor trait is a hook to be potentially
-/// overridden.  Each method's default implementation recursively visits
-/// the substructure of the input via the corresponding `walk` method;
-/// e.g. the `visit_mod` method by default calls `intravisit::walk_mod`.
-///
-/// Note that this visitor does NOT visit nested items by default
-/// (this is why the module is called `intravisit`, to distinguish it
-/// from the AST's `visit` module, which acts differently). If you
-/// simply want to visit all items in the crate in some order, you
-/// should call `Crate::visit_all_items`. Otherwise, see the comment
-/// on `visit_nested_item` for details on how to visit nested items.
-///
-/// If you want to ensure that your code handles every variant
-/// explicitly, you need to override each method.  (And you also need
-/// to monitor future changes to `Visitor` in case a new method with a
-/// new default implementation gets introduced.)
-pub trait Visitor<'v> : Sized {
-    ///////////////////////////////////////////////////////////////////////////
-    // Nested items.
-
-    /// Invoked when a nested item is encountered. By default, does
-    /// nothing. If you want a deep walk, you need to override to
-    /// fetch the item contents. But most of the time, it is easier
-    /// (and better) to invoke `Crate::visit_all_items`, which visits
-    /// all items in the crate in some order (but doesn't respect
-    /// nesting).
-    #[allow(unused_variables)]
-    fn visit_nested_item(&mut self, id: ItemId) {
-    }
-
-    /// Visit the top-level item and (optionally) nested items. See
-    /// `visit_nested_item` for details.
-    fn visit_item(&mut self, i: &'v Item) {
-        walk_item(self, i)
-    }
-
-    ///////////////////////////////////////////////////////////////////////////
-
-    fn visit_name(&mut self, _span: Span, _name: Name) {
-        // Nothing to do.
-    }
-    fn visit_ident(&mut self, span: Span, ident: Ident) {
-        walk_ident(self, span, ident);
-    }
-    fn visit_mod(&mut self, m: &'v Mod, _s: Span, _n: NodeId) {
-        walk_mod(self, m)
-    }
-    fn visit_foreign_item(&mut self, i: &'v ForeignItem) {
-        walk_foreign_item(self, i)
-    }
-    fn visit_local(&mut self, l: &'v Local) {
-        walk_local(self, l)
-    }
-    fn visit_block(&mut self, b: &'v Block) {
-        walk_block(self, b)
-    }
-    fn visit_stmt(&mut self, s: &'v Stmt) {
-        walk_stmt(self, s)
-    }
-    fn visit_arm(&mut self, a: &'v Arm) {
-        walk_arm(self, a)
-    }
-    fn visit_pat(&mut self, p: &'v Pat) {
-        walk_pat(self, p)
-    }
-    fn visit_decl(&mut self, d: &'v Decl) {
-        walk_decl(self, d)
-    }
-    fn visit_expr(&mut self, ex: &'v Expr) {
-        walk_expr(self, ex)
-    }
-    fn visit_expr_post(&mut self, _ex: &'v Expr) {
-    }
-    fn visit_ty(&mut self, t: &'v Ty) {
-        walk_ty(self, t)
-    }
-    fn visit_generics(&mut self, g: &'v Generics) {
-        walk_generics(self, g)
-    }
-    fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: &'v Block, s: Span, _: NodeId) {
-        walk_fn(self, fk, fd, b, s)
-    }
-    fn visit_trait_item(&mut self, ti: &'v TraitItem) {
-        walk_trait_item(self, ti)
-    }
-    fn visit_impl_item(&mut self, ii: &'v ImplItem) {
-        walk_impl_item(self, ii)
-    }
-    fn visit_trait_ref(&mut self, t: &'v TraitRef) {
-        walk_trait_ref(self, t)
-    }
-    fn visit_ty_param_bound(&mut self, bounds: &'v TyParamBound) {
-        walk_ty_param_bound(self, bounds)
-    }
-    fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef, m: &'v TraitBoundModifier) {
-        walk_poly_trait_ref(self, t, m)
-    }
-    fn visit_variant_data(&mut self,
-                          s: &'v VariantData,
-                          _: Name,
-                          _: &'v Generics,
-                          _: NodeId,
-                          _: Span) {
-        walk_struct_def(self, s)
-    }
-    fn visit_struct_field(&mut self, s: &'v StructField) {
-        walk_struct_field(self, s)
-    }
-    fn visit_enum_def(&mut self,
-                      enum_definition: &'v EnumDef,
-                      generics: &'v Generics,
-                      item_id: NodeId,
-                      _: Span) {
-        walk_enum_def(self, enum_definition, generics, item_id)
-    }
-    fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics, item_id: NodeId) {
-        walk_variant(self, v, g, item_id)
-    }
-    fn visit_lifetime(&mut self, lifetime: &'v Lifetime) {
-        walk_lifetime(self, lifetime)
-    }
-    fn visit_lifetime_def(&mut self, lifetime: &'v LifetimeDef) {
-        walk_lifetime_def(self, lifetime)
-    }
-    fn visit_explicit_self(&mut self, es: &'v ExplicitSelf) {
-        walk_explicit_self(self, es)
-    }
-    fn visit_path(&mut self, path: &'v Path, _id: NodeId) {
-        walk_path(self, path)
-    }
-    fn visit_path_list_item(&mut self, prefix: &'v Path, item: &'v PathListItem) {
-        walk_path_list_item(self, prefix, item)
-    }
-    fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v PathSegment) {
-        walk_path_segment(self, path_span, path_segment)
-    }
-    fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &'v PathParameters) {
-        walk_path_parameters(self, path_span, path_parameters)
-    }
-    fn visit_assoc_type_binding(&mut self, type_binding: &'v TypeBinding) {
-        walk_assoc_type_binding(self, type_binding)
-    }
-    fn visit_attribute(&mut self, _attr: &'v Attribute) {
-    }
-    fn visit_macro_def(&mut self, macro_def: &'v MacroDef) {
-        walk_macro_def(self, macro_def)
-    }
-}
-
-pub fn walk_opt_name<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, opt_name: Option<Name>) {
-    for name in opt_name {
-        visitor.visit_name(span, name);
-    }
-}
-
-pub fn walk_opt_ident<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, opt_ident: Option<Ident>) {
-    for ident in opt_ident {
-        visitor.visit_ident(span, ident);
-    }
-}
-
-pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, ident: Ident) {
-    visitor.visit_name(span, ident.name);
-}
-
-/// Walks the contents of a crate. See also `Crate::visit_all_items`.
-pub fn walk_crate<'v, V: Visitor<'v>>(visitor: &mut V, krate: &'v Crate) {
-    visitor.visit_mod(&krate.module, krate.span, CRATE_NODE_ID);
-    walk_list!(visitor, visit_attribute, &krate.attrs);
-    walk_list!(visitor, visit_macro_def, &krate.exported_macros);
-}
-
-pub fn walk_macro_def<'v, V: Visitor<'v>>(visitor: &mut V, macro_def: &'v MacroDef) {
-    visitor.visit_name(macro_def.span, macro_def.name);
-    walk_opt_name(visitor, macro_def.span, macro_def.imported_from);
-    walk_list!(visitor, visit_attribute, &macro_def.attrs);
-}
-
-pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod) {
-    for &item_id in &module.item_ids {
-        visitor.visit_nested_item(item_id);
-    }
-}
-
-pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local) {
-    visitor.visit_pat(&local.pat);
-    walk_list!(visitor, visit_ty, &local.ty);
-    walk_list!(visitor, visit_expr, &local.init);
-}
-
-pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) {
-    visitor.visit_name(lifetime.span, lifetime.name);
-}
-
-pub fn walk_lifetime_def<'v, V: Visitor<'v>>(visitor: &mut V, lifetime_def: &'v LifetimeDef) {
-    visitor.visit_lifetime(&lifetime_def.lifetime);
-    walk_list!(visitor, visit_lifetime, &lifetime_def.bounds);
-}
-
-pub fn walk_explicit_self<'v, V: Visitor<'v>>(visitor: &mut V, explicit_self: &'v ExplicitSelf) {
-    match explicit_self.node {
-        SelfStatic => {}
-        SelfValue(name) => {
-            visitor.visit_name(explicit_self.span, name)
-        }
-        SelfRegion(ref opt_lifetime, _, name) => {
-            visitor.visit_name(explicit_self.span, name);
-            walk_list!(visitor, visit_lifetime, opt_lifetime);
-        }
-        SelfExplicit(ref typ, name) => {
-            visitor.visit_name(explicit_self.span, name);
-            visitor.visit_ty(typ)
-        }
-    }
-}
-
-pub fn walk_poly_trait_ref<'v, V>(visitor: &mut V,
-                                  trait_ref: &'v PolyTraitRef,
-                                  _modifier: &'v TraitBoundModifier)
-    where V: Visitor<'v>
-{
-    walk_list!(visitor, visit_lifetime_def, &trait_ref.bound_lifetimes);
-    visitor.visit_trait_ref(&trait_ref.trait_ref);
-}
-
-pub fn walk_trait_ref<'v, V>(visitor: &mut V, trait_ref: &'v TraitRef)
-    where V: Visitor<'v>
-{
-    visitor.visit_path(&trait_ref.path, trait_ref.ref_id)
-}
-
-pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) {
-    visitor.visit_name(item.span, item.name);
-    match item.node {
-        ItemExternCrate(opt_name) => {
-            walk_opt_name(visitor, item.span, opt_name)
-        }
-        ItemUse(ref vp) => {
-            match vp.node {
-                ViewPathSimple(name, ref path) => {
-                    visitor.visit_name(vp.span, name);
-                    visitor.visit_path(path, item.id);
-                }
-                ViewPathGlob(ref path) => {
-                    visitor.visit_path(path, item.id);
-                }
-                ViewPathList(ref prefix, ref list) => {
-                    if !list.is_empty() {
-                        for item in list {
-                            visitor.visit_path_list_item(prefix, item)
-                        }
-                    } else {
-                        visitor.visit_path(prefix, item.id);
-                    }
-                }
-            }
-        }
-        ItemStatic(ref typ, _, ref expr) |
-        ItemConst(ref typ, ref expr) => {
-            visitor.visit_ty(typ);
-            visitor.visit_expr(expr);
-        }
-        ItemFn(ref declaration, unsafety, constness, abi, ref generics, ref body) => {
-            visitor.visit_fn(FnKind::ItemFn(item.name,
-                                            generics,
-                                            unsafety,
-                                            constness,
-                                            abi,
-                                            item.vis),
-                             declaration,
-                             body,
-                             item.span,
-                             item.id)
-        }
-        ItemMod(ref module) => {
-            visitor.visit_mod(module, item.span, item.id)
-        }
-        ItemForeignMod(ref foreign_module) => {
-            walk_list!(visitor, visit_foreign_item, &foreign_module.items);
-        }
-        ItemTy(ref typ, ref type_parameters) => {
-            visitor.visit_ty(typ);
-            visitor.visit_generics(type_parameters)
-        }
-        ItemEnum(ref enum_definition, ref type_parameters) => {
-            visitor.visit_generics(type_parameters);
-            visitor.visit_enum_def(enum_definition, type_parameters, item.id, item.span)
-        }
-        ItemDefaultImpl(_, ref trait_ref) => {
-            visitor.visit_trait_ref(trait_ref)
-        }
-        ItemImpl(_, _, ref type_parameters, ref opt_trait_reference, ref typ, ref impl_items) => {
-            visitor.visit_generics(type_parameters);
-            walk_list!(visitor, visit_trait_ref, opt_trait_reference);
-            visitor.visit_ty(typ);
-            walk_list!(visitor, visit_impl_item, impl_items);
-        }
-        ItemStruct(ref struct_definition, ref generics) => {
-            visitor.visit_generics(generics);
-            visitor.visit_variant_data(struct_definition, item.name, generics, item.id, item.span);
-        }
-        ItemTrait(_, ref generics, ref bounds, ref methods) => {
-            visitor.visit_generics(generics);
-            walk_list!(visitor, visit_ty_param_bound, bounds);
-            walk_list!(visitor, visit_trait_item, methods);
-        }
-    }
-    walk_list!(visitor, visit_attribute, &item.attrs);
-}
-
-pub fn walk_enum_def<'v, V: Visitor<'v>>(visitor: &mut V,
-                                         enum_definition: &'v EnumDef,
-                                         generics: &'v Generics,
-                                         item_id: NodeId) {
-    walk_list!(visitor,
-               visit_variant,
-               &enum_definition.variants,
-               generics,
-               item_id);
-}
-
-pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V,
-                                        variant: &'v Variant,
-                                        generics: &'v Generics,
-                                        item_id: NodeId) {
-    visitor.visit_name(variant.span, variant.node.name);
-    visitor.visit_variant_data(&variant.node.data,
-                               variant.node.name,
-                               generics,
-                               item_id,
-                               variant.span);
-    walk_list!(visitor, visit_expr, &variant.node.disr_expr);
-    walk_list!(visitor, visit_attribute, &variant.node.attrs);
-}
-
-pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) {
-    match typ.node {
-        TyVec(ref ty) => {
-            visitor.visit_ty(ty)
-        }
-        TyPtr(ref mutable_type) => {
-            visitor.visit_ty(&mutable_type.ty)
-        }
-        TyRptr(ref opt_lifetime, ref mutable_type) => {
-            walk_list!(visitor, visit_lifetime, opt_lifetime);
-            visitor.visit_ty(&mutable_type.ty)
-        }
-        TyTup(ref tuple_element_types) => {
-            walk_list!(visitor, visit_ty, tuple_element_types);
-        }
-        TyBareFn(ref function_declaration) => {
-            walk_fn_decl(visitor, &function_declaration.decl);
-            walk_list!(visitor, visit_lifetime_def, &function_declaration.lifetimes);
-        }
-        TyPath(ref maybe_qself, ref path) => {
-            if let Some(ref qself) = *maybe_qself {
-                visitor.visit_ty(&qself.ty);
-            }
-            visitor.visit_path(path, typ.id);
-        }
-        TyObjectSum(ref ty, ref bounds) => {
-            visitor.visit_ty(ty);
-            walk_list!(visitor, visit_ty_param_bound, bounds);
-        }
-        TyFixedLengthVec(ref ty, ref expression) => {
-            visitor.visit_ty(ty);
-            visitor.visit_expr(expression)
-        }
-        TyPolyTraitRef(ref bounds) => {
-            walk_list!(visitor, visit_ty_param_bound, bounds);
-        }
-        TyTypeof(ref expression) => {
-            visitor.visit_expr(expression)
-        }
-        TyInfer => {}
-    }
-}
-
-pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path) {
-    for segment in &path.segments {
-        visitor.visit_path_segment(path.span, segment);
-    }
-}
-
-pub fn walk_path_list_item<'v, V: Visitor<'v>>(visitor: &mut V,
-                                               prefix: &'v Path,
-                                               item: &'v PathListItem) {
-    for segment in &prefix.segments {
-        visitor.visit_path_segment(prefix.span, segment);
-    }
-
-    walk_opt_name(visitor, item.span, item.node.name());
-    walk_opt_name(visitor, item.span, item.node.rename());
-}
-
-pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V,
-                                             path_span: Span,
-                                             segment: &'v PathSegment) {
-    visitor.visit_ident(path_span, segment.identifier);
-    visitor.visit_path_parameters(path_span, &segment.parameters);
-}
-
-pub fn walk_path_parameters<'v, V: Visitor<'v>>(visitor: &mut V,
-                                                _path_span: Span,
-                                                path_parameters: &'v PathParameters) {
-    match *path_parameters {
-        AngleBracketedParameters(ref data) => {
-            walk_list!(visitor, visit_ty, &data.types);
-            walk_list!(visitor, visit_lifetime, &data.lifetimes);
-            walk_list!(visitor, visit_assoc_type_binding, &data.bindings);
-        }
-        ParenthesizedParameters(ref data) => {
-            walk_list!(visitor, visit_ty, &data.inputs);
-            walk_list!(visitor, visit_ty, &data.output);
-        }
-    }
-}
-
-pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(visitor: &mut V,
-                                                   type_binding: &'v TypeBinding) {
-    visitor.visit_name(type_binding.span, type_binding.name);
-    visitor.visit_ty(&type_binding.ty);
-}
-
-pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) {
-    match pattern.node {
-        PatKind::TupleStruct(ref path, ref opt_children) => {
-            visitor.visit_path(path, pattern.id);
-            if let Some(ref children) = *opt_children {
-                walk_list!(visitor, visit_pat, children);
-            }
-        }
-        PatKind::Path(ref path) => {
-            visitor.visit_path(path, pattern.id);
-        }
-        PatKind::QPath(ref qself, ref path) => {
-            visitor.visit_ty(&qself.ty);
-            visitor.visit_path(path, pattern.id)
-        }
-        PatKind::Struct(ref path, ref fields, _) => {
-            visitor.visit_path(path, pattern.id);
-            for field in fields {
-                visitor.visit_name(field.span, field.node.name);
-                visitor.visit_pat(&field.node.pat)
-            }
-        }
-        PatKind::Tup(ref tuple_elements) => {
-            walk_list!(visitor, visit_pat, tuple_elements);
-        }
-        PatKind::Box(ref subpattern) |
-        PatKind::Ref(ref subpattern, _) => {
-            visitor.visit_pat(subpattern)
-        }
-        PatKind::Ident(_, ref pth1, ref optional_subpattern) => {
-            visitor.visit_ident(pth1.span, pth1.node);
-            walk_list!(visitor, visit_pat, optional_subpattern);
-        }
-        PatKind::Lit(ref expression) => visitor.visit_expr(expression),
-        PatKind::Range(ref lower_bound, ref upper_bound) => {
-            visitor.visit_expr(lower_bound);
-            visitor.visit_expr(upper_bound)
-        }
-        PatKind::Wild => (),
-        PatKind::Vec(ref prepatterns, ref slice_pattern, ref postpatterns) => {
-            walk_list!(visitor, visit_pat, prepatterns);
-            walk_list!(visitor, visit_pat, slice_pattern);
-            walk_list!(visitor, visit_pat, postpatterns);
-        }
-    }
-}
-
-pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem) {
-    visitor.visit_name(foreign_item.span, foreign_item.name);
-
-    match foreign_item.node {
-        ForeignItemFn(ref function_declaration, ref generics) => {
-            walk_fn_decl(visitor, function_declaration);
-            visitor.visit_generics(generics)
-        }
-        ForeignItemStatic(ref typ, _) => visitor.visit_ty(typ),
-    }
-
-    walk_list!(visitor, visit_attribute, &foreign_item.attrs);
-}
-
-pub fn walk_ty_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v TyParamBound) {
-    match *bound {
-        TraitTyParamBound(ref typ, ref modifier) => {
-            visitor.visit_poly_trait_ref(typ, modifier);
-        }
-        RegionTyParamBound(ref lifetime) => {
-            visitor.visit_lifetime(lifetime);
-        }
-    }
-}
-
-pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics) {
-    for param in &generics.ty_params {
-        visitor.visit_name(param.span, param.name);
-        walk_list!(visitor, visit_ty_param_bound, &param.bounds);
-        walk_list!(visitor, visit_ty, &param.default);
-    }
-    walk_list!(visitor, visit_lifetime_def, &generics.lifetimes);
-    for predicate in &generics.where_clause.predicates {
-        match predicate {
-            &WherePredicate::BoundPredicate(WhereBoundPredicate{ref bounded_ty,
-                                                                          ref bounds,
-                                                                          ref bound_lifetimes,
-                                                                          ..}) => {
-                visitor.visit_ty(bounded_ty);
-                walk_list!(visitor, visit_ty_param_bound, bounds);
-                walk_list!(visitor, visit_lifetime_def, bound_lifetimes);
-            }
-            &WherePredicate::RegionPredicate(WhereRegionPredicate{ref lifetime,
-                                                                            ref bounds,
-                                                                            ..}) => {
-                visitor.visit_lifetime(lifetime);
-                walk_list!(visitor, visit_lifetime, bounds);
-            }
-            &WherePredicate::EqPredicate(WhereEqPredicate{id,
-                                                                    ref path,
-                                                                    ref ty,
-                                                                    ..}) => {
-                visitor.visit_path(path, id);
-                visitor.visit_ty(ty);
-            }
-        }
-    }
-}
-
-pub fn walk_fn_ret_ty<'v, V: Visitor<'v>>(visitor: &mut V, ret_ty: &'v FunctionRetTy) {
-    if let Return(ref output_ty) = *ret_ty {
-        visitor.visit_ty(output_ty)
-    }
-}
-
-pub fn walk_fn_decl<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) {
-    for argument in &function_declaration.inputs {
-        visitor.visit_pat(&argument.pat);
-        visitor.visit_ty(&argument.ty)
-    }
-    walk_fn_ret_ty(visitor, &function_declaration.output)
-}
-
-pub fn walk_fn_decl_nopat<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) {
-    for argument in &function_declaration.inputs {
-        visitor.visit_ty(&argument.ty)
-    }
-    walk_fn_ret_ty(visitor, &function_declaration.output)
-}
-
-pub fn walk_fn_kind<'v, V: Visitor<'v>>(visitor: &mut V, function_kind: FnKind<'v>) {
-    match function_kind {
-        FnKind::ItemFn(_, generics, _, _, _, _) => {
-            visitor.visit_generics(generics);
-        }
-        FnKind::Method(_, sig, _) => {
-            visitor.visit_generics(&sig.generics);
-            visitor.visit_explicit_self(&sig.explicit_self);
-        }
-        FnKind::Closure => {}
-    }
-}
-
-pub fn walk_fn<'v, V: Visitor<'v>>(visitor: &mut V,
-                                   function_kind: FnKind<'v>,
-                                   function_declaration: &'v FnDecl,
-                                   function_body: &'v Block,
-                                   _span: Span) {
-    walk_fn_decl(visitor, function_declaration);
-    walk_fn_kind(visitor, function_kind);
-    visitor.visit_block(function_body)
-}
-
-pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v TraitItem) {
-    visitor.visit_name(trait_item.span, trait_item.name);
-    walk_list!(visitor, visit_attribute, &trait_item.attrs);
-    match trait_item.node {
-        ConstTraitItem(ref ty, ref default) => {
-            visitor.visit_ty(ty);
-            walk_list!(visitor, visit_expr, default);
-        }
-        MethodTraitItem(ref sig, None) => {
-            visitor.visit_explicit_self(&sig.explicit_self);
-            visitor.visit_generics(&sig.generics);
-            walk_fn_decl(visitor, &sig.decl);
-        }
-        MethodTraitItem(ref sig, Some(ref body)) => {
-            visitor.visit_fn(FnKind::Method(trait_item.name, sig, None),
-                             &sig.decl,
-                             body,
-                             trait_item.span,
-                             trait_item.id);
-        }
-        TypeTraitItem(ref bounds, ref default) => {
-            walk_list!(visitor, visit_ty_param_bound, bounds);
-            walk_list!(visitor, visit_ty, default);
-        }
-    }
-}
-
-pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplItem) {
-    visitor.visit_name(impl_item.span, impl_item.name);
-    walk_list!(visitor, visit_attribute, &impl_item.attrs);
-    match impl_item.node {
-        ImplItemKind::Const(ref ty, ref expr) => {
-            visitor.visit_ty(ty);
-            visitor.visit_expr(expr);
-        }
-        ImplItemKind::Method(ref sig, ref body) => {
-            visitor.visit_fn(FnKind::Method(impl_item.name, sig, Some(impl_item.vis)),
-                             &sig.decl,
-                             body,
-                             impl_item.span,
-                             impl_item.id);
-        }
-        ImplItemKind::Type(ref ty) => {
-            visitor.visit_ty(ty);
-        }
-    }
-}
-
-pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: &'v VariantData) {
-    walk_list!(visitor, visit_struct_field, struct_definition.fields());
-}
-
-pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) {
-    walk_opt_name(visitor, struct_field.span, struct_field.node.name());
-    visitor.visit_ty(&struct_field.node.ty);
-    walk_list!(visitor, visit_attribute, &struct_field.node.attrs);
-}
-
-pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block) {
-    walk_list!(visitor, visit_stmt, &block.stmts);
-    walk_list!(visitor, visit_expr, &block.expr);
-}
-
-pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt) {
-    match statement.node {
-        StmtDecl(ref declaration, _) => visitor.visit_decl(declaration),
-        StmtExpr(ref expression, _) | StmtSemi(ref expression, _) => {
-            visitor.visit_expr(expression)
-        }
-    }
-}
-
-pub fn walk_decl<'v, V: Visitor<'v>>(visitor: &mut V, declaration: &'v Decl) {
-    match declaration.node {
-        DeclLocal(ref local) => visitor.visit_local(local),
-        DeclItem(item) => visitor.visit_nested_item(item),
-    }
-}
-
-pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) {
-    match expression.node {
-        ExprBox(ref subexpression) => {
-            visitor.visit_expr(subexpression)
-        }
-        ExprVec(ref subexpressions) => {
-            walk_list!(visitor, visit_expr, subexpressions);
-        }
-        ExprRepeat(ref element, ref count) => {
-            visitor.visit_expr(element);
-            visitor.visit_expr(count)
-        }
-        ExprStruct(ref path, ref fields, ref optional_base) => {
-            visitor.visit_path(path, expression.id);
-            for field in fields {
-                visitor.visit_name(field.name.span, field.name.node);
-                visitor.visit_expr(&field.expr)
-            }
-            walk_list!(visitor, visit_expr, optional_base);
-        }
-        ExprTup(ref subexpressions) => {
-            walk_list!(visitor, visit_expr, subexpressions);
-        }
-        ExprCall(ref callee_expression, ref arguments) => {
-            walk_list!(visitor, visit_expr, arguments);
-            visitor.visit_expr(callee_expression)
-        }
-        ExprMethodCall(ref name, ref types, ref arguments) => {
-            visitor.visit_name(name.span, name.node);
-            walk_list!(visitor, visit_expr, arguments);
-            walk_list!(visitor, visit_ty, types);
-        }
-        ExprBinary(_, ref left_expression, ref right_expression) => {
-            visitor.visit_expr(left_expression);
-            visitor.visit_expr(right_expression)
-        }
-        ExprAddrOf(_, ref subexpression) | ExprUnary(_, ref subexpression) => {
-            visitor.visit_expr(subexpression)
-        }
-        ExprLit(_) => {}
-        ExprCast(ref subexpression, ref typ) | ExprType(ref subexpression, ref typ) => {
-            visitor.visit_expr(subexpression);
-            visitor.visit_ty(typ)
-        }
-        ExprIf(ref head_expression, ref if_block, ref optional_else) => {
-            visitor.visit_expr(head_expression);
-            visitor.visit_block(if_block);
-            walk_list!(visitor, visit_expr, optional_else);
-        }
-        ExprWhile(ref subexpression, ref block, opt_ident) => {
-            visitor.visit_expr(subexpression);
-            visitor.visit_block(block);
-            walk_opt_ident(visitor, expression.span, opt_ident)
-        }
-        ExprLoop(ref block, opt_ident) => {
-            visitor.visit_block(block);
-            walk_opt_ident(visitor, expression.span, opt_ident)
-        }
-        ExprMatch(ref subexpression, ref arms, _) => {
-            visitor.visit_expr(subexpression);
-            walk_list!(visitor, visit_arm, arms);
-        }
-        ExprClosure(_, ref function_declaration, ref body) => {
-            visitor.visit_fn(FnKind::Closure,
-                             function_declaration,
-                             body,
-                             expression.span,
-                             expression.id)
-        }
-        ExprBlock(ref block) => visitor.visit_block(block),
-        ExprAssign(ref left_hand_expression, ref right_hand_expression) => {
-            visitor.visit_expr(right_hand_expression);
-            visitor.visit_expr(left_hand_expression)
-        }
-        ExprAssignOp(_, ref left_expression, ref right_expression) => {
-            visitor.visit_expr(right_expression);
-            visitor.visit_expr(left_expression)
-        }
-        ExprField(ref subexpression, ref name) => {
-            visitor.visit_expr(subexpression);
-            visitor.visit_name(name.span, name.node);
-        }
-        ExprTupField(ref subexpression, _) => {
-            visitor.visit_expr(subexpression);
-        }
-        ExprIndex(ref main_expression, ref index_expression) => {
-            visitor.visit_expr(main_expression);
-            visitor.visit_expr(index_expression)
-        }
-        ExprRange(ref start, ref end) => {
-            walk_list!(visitor, visit_expr, start);
-            walk_list!(visitor, visit_expr, end);
-        }
-        ExprPath(ref maybe_qself, ref path) => {
-            if let Some(ref qself) = *maybe_qself {
-                visitor.visit_ty(&qself.ty);
-            }
-            visitor.visit_path(path, expression.id)
-        }
-        ExprBreak(ref opt_sp_ident) | ExprAgain(ref opt_sp_ident) => {
-            for sp_ident in opt_sp_ident {
-                visitor.visit_ident(sp_ident.span, sp_ident.node);
-            }
-        }
-        ExprRet(ref optional_expression) => {
-            walk_list!(visitor, visit_expr, optional_expression);
-        }
-        ExprInlineAsm(ref ia) => {
-            for &(_, ref input) in &ia.inputs {
-                visitor.visit_expr(&input)
-            }
-            for output in &ia.outputs {
-                visitor.visit_expr(&output.expr)
-            }
-        }
-    }
-
-    visitor.visit_expr_post(expression)
-}
-
-pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm) {
-    walk_list!(visitor, visit_pat, &arm.pats);
-    walk_list!(visitor, visit_expr, &arm.guard);
-    visitor.visit_expr(&arm.body);
-    walk_list!(visitor, visit_attribute, &arm.attrs);
-}
diff --git a/src/librustc_front/lib.rs b/src/librustc_front/lib.rs
deleted file mode 100644 (file)
index 02ad69e..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The Rust compiler.
-//!
-//! # Note
-//!
-//! This API is completely unstable and subject to change.
-
-#![crate_name = "rustc_front"]
-#![unstable(feature = "rustc_private", issue = "27812")]
-#![crate_type = "dylib"]
-#![crate_type = "rlib"]
-#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
-       html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
-       html_root_url = "http://doc.rust-lang.org/nightly/")]
-#![cfg_attr(not(stage0), deny(warnings))]
-
-#![feature(associated_consts)]
-#![feature(box_patterns)]
-#![feature(box_syntax)]
-#![feature(const_fn)]
-#![feature(quote)]
-#![feature(rustc_diagnostic_macros)]
-#![feature(rustc_private)]
-#![feature(slice_patterns)]
-#![feature(staged_api)]
-#![feature(str_char)]
-
-extern crate serialize;
-#[macro_use]
-extern crate log;
-#[macro_use]
-extern crate syntax;
-#[macro_use]
-#[no_link]
-extern crate rustc_bitflags;
-
-extern crate serialize as rustc_serialize; // used by deriving
-
-#[macro_use]
-pub mod hir;
-pub mod lowering;
-pub mod fold;
-pub mod intravisit;
-pub mod util;
-
-pub mod print {
-    pub mod pprust;
-}
diff --git a/src/librustc_front/lowering.rs b/src/librustc_front/lowering.rs
deleted file mode 100644 (file)
index 0e7d9db..0000000
+++ /dev/null
@@ -1,1980 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Lowers the AST to the HIR.
-//
-// Since the AST and HIR are fairly similar, this is mostly a simple procedure,
-// much like a fold. Where lowering involves a bit more work things get more
-// interesting and there are some invariants you should know about. These mostly
-// concern spans and ids.
-//
-// Spans are assigned to AST nodes during parsing and then are modified during
-// expansion to indicate the origin of a node and the process it went through
-// being expanded. Ids are assigned to AST nodes just before lowering.
-//
-// For the simpler lowering steps, ids and spans should be preserved. Unlike
-// expansion we do not preserve the process of lowering in the spans, so spans
-// should not be modified here. When creating a new node (as opposed to
-// 'folding' an existing one), then you create a new id using `next_id()`.
-//
-// You must ensure that ids are unique. That means that you should only use the
-// id from an AST node in a single HIR node (you can assume that AST node ids
-// are unique). Every new node must have a unique id. Avoid cloning HIR nodes.
-// If you do, you must then set the new node's id to a fresh one.
-//
-// Lowering must be reproducable (the compiler only lowers once, but tools and
-// custom lints may lower an AST node to a HIR node to interact with the
-// compiler). The most interesting bit of this is ids - if you lower an AST node
-// and create new HIR nodes with fresh ids, when re-lowering the same node, you
-// must ensure you get the same ids! To do this, we keep track of the next id
-// when we translate a node which requires new ids. By checking this cache and
-// using node ids starting with the cached id, we ensure ids are reproducible.
-// To use this system, you just need to hold on to a CachedIdSetter object
-// whilst lowering. This is an RAII object that takes care of setting and
-// restoring the cached id, etc.
-//
-// This whole system relies on node ids being incremented one at a time and
-// all increments being for lowering. This means that you should not call any
-// non-lowering function which will use new node ids.
-//
-// We must also cache gensym'ed Idents to ensure that we get the same Ident
-// every time we lower a node with gensym'ed names. One consequence of this is
-// that you can only gensym a name once in a lowering (you don't need to worry
-// about nested lowering though). That's because we cache based on the name and
-// the currently cached node id, which is unique per lowered node.
-//
-// Spans are used for error messages and for tools to map semantics back to
-// source code. It is therefore not as important with spans as ids to be strict
-// about use (you can't break the compiler by screwing up a span). Obviously, a
-// HIR node can only have a single span. But multiple nodes can have the same
-// span and spans don't need to be kept in order, etc. Where code is preserved
-// by lowering, it should have the same span as in the AST. Where HIR nodes are
-// new it is probably best to give a span for the whole AST node being lowered.
-// All nodes should have real spans, don't use dummy spans. Tools are likely to
-// get confused if the spans from leaf AST nodes occur in multiple places
-// in the HIR, especially for multiple identifiers.
-
-use hir;
-
-use std::collections::BTreeMap;
-use std::collections::HashMap;
-use syntax::ast::*;
-use syntax::attr::{ThinAttributes, ThinAttributesExt};
-use syntax::ext::mtwt;
-use syntax::ptr::P;
-use syntax::codemap::{respan, Spanned, Span};
-use syntax::parse::token;
-use syntax::std_inject;
-use syntax::visit::{self, Visitor};
-
-use std::cell::{Cell, RefCell};
-
-pub struct LoweringContext<'a> {
-    crate_root: Option<&'static str>,
-    // Map AST ids to ids used for expanded nodes.
-    id_cache: RefCell<HashMap<NodeId, NodeId>>,
-    // Use if there are no cached ids for the current node.
-    id_assigner: &'a NodeIdAssigner,
-    // 0 == no cached id. Must be incremented to align with previous id
-    // incrementing.
-    cached_id: Cell<u32>,
-    // Keep track of gensym'ed idents.
-    gensym_cache: RefCell<HashMap<(NodeId, &'static str), hir::Ident>>,
-    // A copy of cached_id, but is also set to an id while a node is lowered for
-    // the first time.
-    gensym_key: Cell<u32>,
-}
-
-impl<'a, 'hir> LoweringContext<'a> {
-    pub fn new(id_assigner: &'a NodeIdAssigner, c: Option<&Crate>) -> LoweringContext<'a> {
-        let crate_root = c.and_then(|c| {
-            if std_inject::no_core(c) {
-                None
-            } else if std_inject::no_std(c) {
-                Some("core")
-            } else {
-                Some("std")
-            }
-        });
-
-        LoweringContext {
-            crate_root: crate_root,
-            id_cache: RefCell::new(HashMap::new()),
-            id_assigner: id_assigner,
-            cached_id: Cell::new(0),
-            gensym_cache: RefCell::new(HashMap::new()),
-            gensym_key: Cell::new(0),
-        }
-    }
-
-    fn next_id(&self) -> NodeId {
-        let cached_id = self.cached_id.get();
-        if cached_id == 0 {
-            return self.id_assigner.next_node_id();
-        }
-
-        self.cached_id.set(cached_id + 1);
-        cached_id
-    }
-
-    fn str_to_ident(&self, s: &'static str) -> hir::Ident {
-        let gensym_key = self.gensym_key.get();
-        if gensym_key == 0 {
-            return hir::Ident::from_name(token::gensym(s));
-        }
-
-        let cached = self.gensym_cache.borrow().contains_key(&(gensym_key, s));
-        if cached {
-            self.gensym_cache.borrow()[&(gensym_key, s)]
-        } else {
-            let result = hir::Ident::from_name(token::gensym(s));
-            self.gensym_cache.borrow_mut().insert((gensym_key, s), result);
-            result
-        }
-    }
-}
-
-// Utility fn for setting and unsetting the cached id.
-fn cache_ids<'a, OP, R>(lctx: &LoweringContext, expr_id: NodeId, op: OP) -> R
-    where OP: FnOnce(&LoweringContext) -> R
-{
-    // Only reset the id if it was previously 0, i.e., was not cached.
-    // If it was cached, we are in a nested node, but our id count will
-    // still count towards the parent's count.
-    let reset_cached_id = lctx.cached_id.get() == 0;
-    // We always reset gensym_key so that if we use the same name in a nested
-    // node and after that node, they get different values.
-    let old_gensym_key = lctx.gensym_key.get();
-
-    {
-        let id_cache: &mut HashMap<_, _> = &mut lctx.id_cache.borrow_mut();
-
-        if id_cache.contains_key(&expr_id) {
-            let cached_id = lctx.cached_id.get();
-            if cached_id == 0 {
-                // We're entering a node where we need to track ids, but are not
-                // yet tracking.
-                lctx.cached_id.set(id_cache[&expr_id]);
-            } else {
-                // We're already tracking - check that the tracked id is the same
-                // as the expected id.
-                assert!(cached_id == id_cache[&expr_id], "id mismatch");
-            }
-            lctx.gensym_key.set(id_cache[&expr_id]);
-        } else {
-            // We've never lowered this node before, remember it for next time.
-            let next_id = lctx.id_assigner.peek_node_id();
-            id_cache.insert(expr_id, next_id);
-            lctx.gensym_key.set(next_id);
-            // self.cached_id is not set when we lower a node for the first time,
-            // only on re-lowering.
-        }
-    }
-
-    let result = op(lctx);
-
-    if reset_cached_id {
-        lctx.cached_id.set(0);
-    }
-    lctx.gensym_key.set(old_gensym_key);
-
-    result
-}
-
-pub fn lower_ident(_lctx: &LoweringContext, ident: Ident) -> hir::Ident {
-    hir::Ident {
-        name: mtwt::resolve(ident),
-        unhygienic_name: ident.name,
-    }
-}
-
-pub fn lower_attrs(_lctx: &LoweringContext, attrs: &Vec<Attribute>) -> hir::HirVec<Attribute> {
-    attrs.clone().into()
-}
-
-pub fn lower_view_path(lctx: &LoweringContext, view_path: &ViewPath) -> P<hir::ViewPath> {
-    P(Spanned {
-        node: match view_path.node {
-            ViewPathSimple(ident, ref path) => {
-                hir::ViewPathSimple(ident.name, lower_path(lctx, path))
-            }
-            ViewPathGlob(ref path) => {
-                hir::ViewPathGlob(lower_path(lctx, path))
-            }
-            ViewPathList(ref path, ref path_list_idents) => {
-                hir::ViewPathList(lower_path(lctx, path),
-                                  path_list_idents.iter()
-                                                  .map(lower_path_list_item)
-                                                  .collect())
-            }
-        },
-        span: view_path.span,
-    })
-}
-
-fn lower_path_list_item(path_list_ident: &PathListItem) -> hir::PathListItem {
-    Spanned {
-        node: match path_list_ident.node {
-            PathListItemKind::Ident { id, name, rename } => hir::PathListIdent {
-                id: id,
-                name: name.name,
-                rename: rename.map(|x| x.name),
-            },
-            PathListItemKind::Mod { id, rename } => hir::PathListMod {
-                id: id,
-                rename: rename.map(|x| x.name),
-            },
-        },
-        span: path_list_ident.span,
-    }
-}
-
-pub fn lower_arm(lctx: &LoweringContext, arm: &Arm) -> hir::Arm {
-    hir::Arm {
-        attrs: lower_attrs(lctx, &arm.attrs),
-        pats: arm.pats.iter().map(|x| lower_pat(lctx, x)).collect(),
-        guard: arm.guard.as_ref().map(|ref x| lower_expr(lctx, x)),
-        body: lower_expr(lctx, &arm.body),
-    }
-}
-
-pub fn lower_decl(lctx: &LoweringContext, d: &Decl) -> P<hir::Decl> {
-    match d.node {
-        DeclKind::Local(ref l) => P(Spanned {
-            node: hir::DeclLocal(lower_local(lctx, l)),
-            span: d.span,
-        }),
-        DeclKind::Item(ref it) => P(Spanned {
-            node: hir::DeclItem(lower_item_id(lctx, it)),
-            span: d.span,
-        }),
-    }
-}
-
-pub fn lower_ty_binding(lctx: &LoweringContext, b: &TypeBinding) -> hir::TypeBinding {
-    hir::TypeBinding {
-        id: b.id,
-        name: b.ident.name,
-        ty: lower_ty(lctx, &b.ty),
-        span: b.span,
-    }
-}
-
-pub fn lower_ty(lctx: &LoweringContext, t: &Ty) -> P<hir::Ty> {
-    use syntax::ast::TyKind::*;
-    P(hir::Ty {
-        id: t.id,
-        node: match t.node {
-            Infer => hir::TyInfer,
-            Vec(ref ty) => hir::TyVec(lower_ty(lctx, ty)),
-            Ptr(ref mt) => hir::TyPtr(lower_mt(lctx, mt)),
-            Rptr(ref region, ref mt) => {
-                hir::TyRptr(lower_opt_lifetime(lctx, region), lower_mt(lctx, mt))
-            }
-            BareFn(ref f) => {
-                hir::TyBareFn(P(hir::BareFnTy {
-                    lifetimes: lower_lifetime_defs(lctx, &f.lifetimes),
-                    unsafety: lower_unsafety(lctx, f.unsafety),
-                    abi: f.abi,
-                    decl: lower_fn_decl(lctx, &f.decl),
-                }))
-            }
-            Tup(ref tys) => hir::TyTup(tys.iter().map(|ty| lower_ty(lctx, ty)).collect()),
-            Paren(ref ty) => {
-                return lower_ty(lctx, ty);
-            }
-            Path(ref qself, ref path) => {
-                let qself = qself.as_ref().map(|&QSelf { ref ty, position }| {
-                    hir::QSelf {
-                        ty: lower_ty(lctx, ty),
-                        position: position,
-                    }
-                });
-                hir::TyPath(qself, lower_path(lctx, path))
-            }
-            ObjectSum(ref ty, ref bounds) => {
-                hir::TyObjectSum(lower_ty(lctx, ty), lower_bounds(lctx, bounds))
-            }
-            FixedLengthVec(ref ty, ref e) => {
-                hir::TyFixedLengthVec(lower_ty(lctx, ty), lower_expr(lctx, e))
-            }
-            Typeof(ref expr) => {
-                hir::TyTypeof(lower_expr(lctx, expr))
-            }
-            PolyTraitRef(ref bounds) => {
-                hir::TyPolyTraitRef(bounds.iter().map(|b| lower_ty_param_bound(lctx, b)).collect())
-            }
-            Mac(_) => panic!("TyMac should have been expanded by now."),
-        },
-        span: t.span,
-    })
-}
-
-pub fn lower_foreign_mod(lctx: &LoweringContext, fm: &ForeignMod) -> hir::ForeignMod {
-    hir::ForeignMod {
-        abi: fm.abi,
-        items: fm.items.iter().map(|x| lower_foreign_item(lctx, x)).collect(),
-    }
-}
-
-pub fn lower_variant(lctx: &LoweringContext, v: &Variant) -> hir::Variant {
-    Spanned {
-        node: hir::Variant_ {
-            name: v.node.name.name,
-            attrs: lower_attrs(lctx, &v.node.attrs),
-            data: lower_variant_data(lctx, &v.node.data),
-            disr_expr: v.node.disr_expr.as_ref().map(|e| lower_expr(lctx, e)),
-        },
-        span: v.span,
-    }
-}
-
-// Path segments are usually unhygienic, hygienic path segments can occur only in
-// identifier-like paths originating from `ExprPath`.
-// Make life simpler for rustc_resolve by renaming only such segments.
-pub fn lower_path_full(lctx: &LoweringContext, p: &Path, maybe_hygienic: bool) -> hir::Path {
-    let maybe_hygienic = maybe_hygienic && !p.global && p.segments.len() == 1;
-    hir::Path {
-        global: p.global,
-        segments: p.segments
-                   .iter()
-                   .map(|&PathSegment { identifier, ref parameters }| {
-                       hir::PathSegment {
-                           identifier: if maybe_hygienic {
-                               lower_ident(lctx, identifier)
-                           } else {
-                               hir::Ident::from_name(identifier.name)
-                           },
-                           parameters: lower_path_parameters(lctx, parameters),
-                       }
-                   })
-                   .collect(),
-        span: p.span,
-    }
-}
-
-pub fn lower_path(lctx: &LoweringContext, p: &Path) -> hir::Path {
-    lower_path_full(lctx, p, false)
-}
-
-pub fn lower_path_parameters(lctx: &LoweringContext,
-                             path_parameters: &PathParameters)
-                             -> hir::PathParameters {
-    match *path_parameters {
-        PathParameters::AngleBracketed(ref data) =>
-            hir::AngleBracketedParameters(lower_angle_bracketed_parameter_data(lctx, data)),
-        PathParameters::Parenthesized(ref data) =>
-            hir::ParenthesizedParameters(lower_parenthesized_parameter_data(lctx, data)),
-    }
-}
-
-pub fn lower_angle_bracketed_parameter_data(lctx: &LoweringContext,
-                                            data: &AngleBracketedParameterData)
-                                            -> hir::AngleBracketedParameterData {
-    let &AngleBracketedParameterData { ref lifetimes, ref types, ref bindings } = data;
-    hir::AngleBracketedParameterData {
-        lifetimes: lower_lifetimes(lctx, lifetimes),
-        types: types.iter().map(|ty| lower_ty(lctx, ty)).collect(),
-        bindings: bindings.iter().map(|b| lower_ty_binding(lctx, b)).collect(),
-    }
-}
-
-pub fn lower_parenthesized_parameter_data(lctx: &LoweringContext,
-                                          data: &ParenthesizedParameterData)
-                                          -> hir::ParenthesizedParameterData {
-    let &ParenthesizedParameterData { ref inputs, ref output, span } = data;
-    hir::ParenthesizedParameterData {
-        inputs: inputs.iter().map(|ty| lower_ty(lctx, ty)).collect(),
-        output: output.as_ref().map(|ty| lower_ty(lctx, ty)),
-        span: span,
-    }
-}
-
-pub fn lower_local(lctx: &LoweringContext, l: &Local) -> P<hir::Local> {
-    P(hir::Local {
-        id: l.id,
-        ty: l.ty.as_ref().map(|t| lower_ty(lctx, t)),
-        pat: lower_pat(lctx, &l.pat),
-        init: l.init.as_ref().map(|e| lower_expr(lctx, e)),
-        span: l.span,
-        attrs: l.attrs.clone(),
-    })
-}
-
-pub fn lower_explicit_self_underscore(lctx: &LoweringContext,
-                                      es: &SelfKind)
-                                      -> hir::ExplicitSelf_ {
-    match *es {
-        SelfKind::Static => hir::SelfStatic,
-        SelfKind::Value(v) => hir::SelfValue(v.name),
-        SelfKind::Region(ref lifetime, m, ident) => {
-            hir::SelfRegion(lower_opt_lifetime(lctx, lifetime),
-                            lower_mutability(lctx, m),
-                            ident.name)
-        }
-        SelfKind::Explicit(ref typ, ident) => {
-            hir::SelfExplicit(lower_ty(lctx, typ), ident.name)
-        }
-    }
-}
-
-pub fn lower_mutability(_lctx: &LoweringContext, m: Mutability) -> hir::Mutability {
-    match m {
-        Mutability::Mutable => hir::MutMutable,
-        Mutability::Immutable => hir::MutImmutable,
-    }
-}
-
-pub fn lower_explicit_self(lctx: &LoweringContext, s: &ExplicitSelf) -> hir::ExplicitSelf {
-    Spanned {
-        node: lower_explicit_self_underscore(lctx, &s.node),
-        span: s.span,
-    }
-}
-
-pub fn lower_arg(lctx: &LoweringContext, arg: &Arg) -> hir::Arg {
-    hir::Arg {
-        id: arg.id,
-        pat: lower_pat(lctx, &arg.pat),
-        ty: lower_ty(lctx, &arg.ty),
-    }
-}
-
-pub fn lower_fn_decl(lctx: &LoweringContext, decl: &FnDecl) -> P<hir::FnDecl> {
-    P(hir::FnDecl {
-        inputs: decl.inputs.iter().map(|x| lower_arg(lctx, x)).collect(),
-        output: match decl.output {
-            FunctionRetTy::Ty(ref ty) => hir::Return(lower_ty(lctx, ty)),
-            FunctionRetTy::Default(span) => hir::DefaultReturn(span),
-            FunctionRetTy::None(span) => hir::NoReturn(span),
-        },
-        variadic: decl.variadic,
-    })
-}
-
-pub fn lower_ty_param_bound(lctx: &LoweringContext, tpb: &TyParamBound) -> hir::TyParamBound {
-    match *tpb {
-        TraitTyParamBound(ref ty, modifier) => {
-            hir::TraitTyParamBound(lower_poly_trait_ref(lctx, ty),
-                                   lower_trait_bound_modifier(lctx, modifier))
-        }
-        RegionTyParamBound(ref lifetime) => {
-            hir::RegionTyParamBound(lower_lifetime(lctx, lifetime))
-        }
-    }
-}
-
-pub fn lower_ty_param(lctx: &LoweringContext, tp: &TyParam) -> hir::TyParam {
-    hir::TyParam {
-        id: tp.id,
-        name: tp.ident.name,
-        bounds: lower_bounds(lctx, &tp.bounds),
-        default: tp.default.as_ref().map(|x| lower_ty(lctx, x)),
-        span: tp.span,
-    }
-}
-
-pub fn lower_ty_params(lctx: &LoweringContext,
-                       tps: &P<[TyParam]>)
-                       -> hir::HirVec<hir::TyParam> {
-    tps.iter().map(|tp| lower_ty_param(lctx, tp)).collect()
-}
-
-pub fn lower_lifetime(_lctx: &LoweringContext, l: &Lifetime) -> hir::Lifetime {
-    hir::Lifetime {
-        id: l.id,
-        name: l.name,
-        span: l.span,
-    }
-}
-
-pub fn lower_lifetime_def(lctx: &LoweringContext, l: &LifetimeDef) -> hir::LifetimeDef {
-    hir::LifetimeDef {
-        lifetime: lower_lifetime(lctx, &l.lifetime),
-        bounds: lower_lifetimes(lctx, &l.bounds),
-    }
-}
-
-pub fn lower_lifetimes(lctx: &LoweringContext, lts: &Vec<Lifetime>) -> hir::HirVec<hir::Lifetime> {
-    lts.iter().map(|l| lower_lifetime(lctx, l)).collect()
-}
-
-pub fn lower_lifetime_defs(lctx: &LoweringContext,
-                           lts: &Vec<LifetimeDef>)
-                           -> hir::HirVec<hir::LifetimeDef> {
-    lts.iter().map(|l| lower_lifetime_def(lctx, l)).collect()
-}
-
-pub fn lower_opt_lifetime(lctx: &LoweringContext,
-                          o_lt: &Option<Lifetime>)
-                          -> Option<hir::Lifetime> {
-    o_lt.as_ref().map(|lt| lower_lifetime(lctx, lt))
-}
-
-pub fn lower_generics(lctx: &LoweringContext, g: &Generics) -> hir::Generics {
-    hir::Generics {
-        ty_params: lower_ty_params(lctx, &g.ty_params),
-        lifetimes: lower_lifetime_defs(lctx, &g.lifetimes),
-        where_clause: lower_where_clause(lctx, &g.where_clause),
-    }
-}
-
-pub fn lower_where_clause(lctx: &LoweringContext, wc: &WhereClause) -> hir::WhereClause {
-    hir::WhereClause {
-        id: wc.id,
-        predicates: wc.predicates
-                      .iter()
-                      .map(|predicate| lower_where_predicate(lctx, predicate))
-                      .collect(),
-    }
-}
-
-pub fn lower_where_predicate(lctx: &LoweringContext,
-                             pred: &WherePredicate)
-                             -> hir::WherePredicate {
-    match *pred {
-        WherePredicate::BoundPredicate(WhereBoundPredicate{ ref bound_lifetimes,
-                                                            ref bounded_ty,
-                                                            ref bounds,
-                                                            span}) => {
-            hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
-                bound_lifetimes: lower_lifetime_defs(lctx, bound_lifetimes),
-                bounded_ty: lower_ty(lctx, bounded_ty),
-                bounds: bounds.iter().map(|x| lower_ty_param_bound(lctx, x)).collect(),
-                span: span,
-            })
-        }
-        WherePredicate::RegionPredicate(WhereRegionPredicate{ ref lifetime,
-                                                              ref bounds,
-                                                              span}) => {
-            hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
-                span: span,
-                lifetime: lower_lifetime(lctx, lifetime),
-                bounds: bounds.iter().map(|bound| lower_lifetime(lctx, bound)).collect(),
-            })
-        }
-        WherePredicate::EqPredicate(WhereEqPredicate{ id,
-                                                      ref path,
-                                                      ref ty,
-                                                      span}) => {
-            hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
-                id: id,
-                path: lower_path(lctx, path),
-                ty: lower_ty(lctx, ty),
-                span: span,
-            })
-        }
-    }
-}
-
-pub fn lower_variant_data(lctx: &LoweringContext, vdata: &VariantData) -> hir::VariantData {
-    match *vdata {
-        VariantData::Struct(ref fields, id) => {
-            hir::VariantData::Struct(fields.iter()
-                                           .map(|f| lower_struct_field(lctx, f))
-                                           .collect(),
-                                     id)
-        }
-        VariantData::Tuple(ref fields, id) => {
-            hir::VariantData::Tuple(fields.iter()
-                                          .map(|f| lower_struct_field(lctx, f))
-                                          .collect(),
-                                    id)
-        }
-        VariantData::Unit(id) => hir::VariantData::Unit(id),
-    }
-}
-
-pub fn lower_trait_ref(lctx: &LoweringContext, p: &TraitRef) -> hir::TraitRef {
-    hir::TraitRef {
-        path: lower_path(lctx, &p.path),
-        ref_id: p.ref_id,
-    }
-}
-
-pub fn lower_poly_trait_ref(lctx: &LoweringContext, p: &PolyTraitRef) -> hir::PolyTraitRef {
-    hir::PolyTraitRef {
-        bound_lifetimes: lower_lifetime_defs(lctx, &p.bound_lifetimes),
-        trait_ref: lower_trait_ref(lctx, &p.trait_ref),
-        span: p.span,
-    }
-}
-
-pub fn lower_struct_field(lctx: &LoweringContext, f: &StructField) -> hir::StructField {
-    Spanned {
-        node: hir::StructField_ {
-            id: f.node.id,
-            kind: lower_struct_field_kind(lctx, &f.node.kind),
-            ty: lower_ty(lctx, &f.node.ty),
-            attrs: lower_attrs(lctx, &f.node.attrs),
-        },
-        span: f.span,
-    }
-}
-
-pub fn lower_field(lctx: &LoweringContext, f: &Field) -> hir::Field {
-    hir::Field {
-        name: respan(f.ident.span, f.ident.node.name),
-        expr: lower_expr(lctx, &f.expr),
-        span: f.span,
-    }
-}
-
-pub fn lower_mt(lctx: &LoweringContext, mt: &MutTy) -> hir::MutTy {
-    hir::MutTy {
-        ty: lower_ty(lctx, &mt.ty),
-        mutbl: lower_mutability(lctx, mt.mutbl),
-    }
-}
-
-pub fn lower_opt_bounds(lctx: &LoweringContext,
-                        b: &Option<TyParamBounds>)
-                        -> Option<hir::TyParamBounds> {
-    b.as_ref().map(|ref bounds| lower_bounds(lctx, bounds))
-}
-
-fn lower_bounds(lctx: &LoweringContext, bounds: &TyParamBounds) -> hir::TyParamBounds {
-    bounds.iter().map(|bound| lower_ty_param_bound(lctx, bound)).collect()
-}
-
-pub fn lower_block(lctx: &LoweringContext, b: &Block) -> P<hir::Block> {
-    P(hir::Block {
-        id: b.id,
-        stmts: b.stmts.iter().map(|s| lower_stmt(lctx, s)).collect(),
-        expr: b.expr.as_ref().map(|ref x| lower_expr(lctx, x)),
-        rules: lower_block_check_mode(lctx, &b.rules),
-        span: b.span,
-    })
-}
-
-pub fn lower_item_kind(lctx: &LoweringContext, i: &ItemKind) -> hir::Item_ {
-    match *i {
-        ItemKind::ExternCrate(string) => hir::ItemExternCrate(string),
-        ItemKind::Use(ref view_path) => {
-            hir::ItemUse(lower_view_path(lctx, view_path))
-        }
-        ItemKind::Static(ref t, m, ref e) => {
-            hir::ItemStatic(lower_ty(lctx, t),
-                            lower_mutability(lctx, m),
-                            lower_expr(lctx, e))
-        }
-        ItemKind::Const(ref t, ref e) => {
-            hir::ItemConst(lower_ty(lctx, t), lower_expr(lctx, e))
-        }
-        ItemKind::Fn(ref decl, unsafety, constness, abi, ref generics, ref body) => {
-            hir::ItemFn(lower_fn_decl(lctx, decl),
-                        lower_unsafety(lctx, unsafety),
-                        lower_constness(lctx, constness),
-                        abi,
-                        lower_generics(lctx, generics),
-                        lower_block(lctx, body))
-        }
-        ItemKind::Mod(ref m) => hir::ItemMod(lower_mod(lctx, m)),
-        ItemKind::ForeignMod(ref nm) => hir::ItemForeignMod(lower_foreign_mod(lctx, nm)),
-        ItemKind::Ty(ref t, ref generics) => {
-            hir::ItemTy(lower_ty(lctx, t), lower_generics(lctx, generics))
-        }
-        ItemKind::Enum(ref enum_definition, ref generics) => {
-            hir::ItemEnum(hir::EnumDef {
-                              variants: enum_definition.variants
-                                                       .iter()
-                                                       .map(|x| lower_variant(lctx, x))
-                                                       .collect(),
-                          },
-                          lower_generics(lctx, generics))
-        }
-        ItemKind::Struct(ref struct_def, ref generics) => {
-            let struct_def = lower_variant_data(lctx, struct_def);
-            hir::ItemStruct(struct_def, lower_generics(lctx, generics))
-        }
-        ItemKind::DefaultImpl(unsafety, ref trait_ref) => {
-            hir::ItemDefaultImpl(lower_unsafety(lctx, unsafety),
-                                 lower_trait_ref(lctx, trait_ref))
-        }
-        ItemKind::Impl(unsafety, polarity, ref generics, ref ifce, ref ty, ref impl_items) => {
-            let new_impl_items = impl_items.iter()
-                                           .map(|item| lower_impl_item(lctx, item))
-                                           .collect();
-            let ifce = ifce.as_ref().map(|trait_ref| lower_trait_ref(lctx, trait_ref));
-            hir::ItemImpl(lower_unsafety(lctx, unsafety),
-                          lower_impl_polarity(lctx, polarity),
-                          lower_generics(lctx, generics),
-                          ifce,
-                          lower_ty(lctx, ty),
-                          new_impl_items)
-        }
-        ItemKind::Trait(unsafety, ref generics, ref bounds, ref items) => {
-            let bounds = lower_bounds(lctx, bounds);
-            let items = items.iter().map(|item| lower_trait_item(lctx, item)).collect();
-            hir::ItemTrait(lower_unsafety(lctx, unsafety),
-                           lower_generics(lctx, generics),
-                           bounds,
-                           items)
-        }
-        ItemKind::Mac(_) => panic!("Shouldn't still be around"),
-    }
-}
-
-pub fn lower_trait_item(lctx: &LoweringContext, i: &TraitItem) -> hir::TraitItem {
-    hir::TraitItem {
-        id: i.id,
-        name: i.ident.name,
-        attrs: lower_attrs(lctx, &i.attrs),
-        node: match i.node {
-            TraitItemKind::Const(ref ty, ref default) => {
-                hir::ConstTraitItem(lower_ty(lctx, ty),
-                                    default.as_ref().map(|x| lower_expr(lctx, x)))
-            }
-            TraitItemKind::Method(ref sig, ref body) => {
-                hir::MethodTraitItem(lower_method_sig(lctx, sig),
-                                     body.as_ref().map(|x| lower_block(lctx, x)))
-            }
-            TraitItemKind::Type(ref bounds, ref default) => {
-                hir::TypeTraitItem(lower_bounds(lctx, bounds),
-                                   default.as_ref().map(|x| lower_ty(lctx, x)))
-            }
-        },
-        span: i.span,
-    }
-}
-
-pub fn lower_impl_item(lctx: &LoweringContext, i: &ImplItem) -> hir::ImplItem {
-    hir::ImplItem {
-        id: i.id,
-        name: i.ident.name,
-        attrs: lower_attrs(lctx, &i.attrs),
-        vis: lower_visibility(lctx, i.vis),
-        node: match i.node {
-            ImplItemKind::Const(ref ty, ref expr) => {
-                hir::ImplItemKind::Const(lower_ty(lctx, ty), lower_expr(lctx, expr))
-            }
-            ImplItemKind::Method(ref sig, ref body) => {
-                hir::ImplItemKind::Method(lower_method_sig(lctx, sig), lower_block(lctx, body))
-            }
-            ImplItemKind::Type(ref ty) => hir::ImplItemKind::Type(lower_ty(lctx, ty)),
-            ImplItemKind::Macro(..) => panic!("Shouldn't exist any more"),
-        },
-        span: i.span,
-    }
-}
-
-pub fn lower_mod(lctx: &LoweringContext, m: &Mod) -> hir::Mod {
-    hir::Mod {
-        inner: m.inner,
-        item_ids: m.items.iter().map(|x| lower_item_id(lctx, x)).collect(),
-    }
-}
-
-struct ItemLowerer<'lcx, 'interner: 'lcx> {
-    items: BTreeMap<NodeId, hir::Item>,
-    lctx: &'lcx LoweringContext<'interner>,
-}
-
-impl<'lcx, 'interner> Visitor<'lcx> for ItemLowerer<'lcx, 'interner> {
-    fn visit_item(&mut self, item: &'lcx Item) {
-        self.items.insert(item.id, lower_item(self.lctx, item));
-        visit::walk_item(self, item);
-    }
-}
-
-pub fn lower_crate(lctx: &LoweringContext, c: &Crate) -> hir::Crate {
-    let items = {
-        let mut item_lowerer = ItemLowerer { items: BTreeMap::new(), lctx: lctx };
-        visit::walk_crate(&mut item_lowerer, c);
-        item_lowerer.items
-    };
-
-    hir::Crate {
-        module: lower_mod(lctx, &c.module),
-        attrs: lower_attrs(lctx, &c.attrs),
-        config: c.config.clone().into(),
-        span: c.span,
-        exported_macros: c.exported_macros.iter().map(|m| lower_macro_def(lctx, m)).collect(),
-        items: items,
-    }
-}
-
-pub fn lower_macro_def(lctx: &LoweringContext, m: &MacroDef) -> hir::MacroDef {
-    hir::MacroDef {
-        name: m.ident.name,
-        attrs: lower_attrs(lctx, &m.attrs),
-        id: m.id,
-        span: m.span,
-        imported_from: m.imported_from.map(|x| x.name),
-        export: m.export,
-        use_locally: m.use_locally,
-        allow_internal_unstable: m.allow_internal_unstable,
-        body: m.body.clone().into(),
-    }
-}
-
-pub fn lower_item_id(_lctx: &LoweringContext, i: &Item) -> hir::ItemId {
-    hir::ItemId { id: i.id }
-}
-
-pub fn lower_item(lctx: &LoweringContext, i: &Item) -> hir::Item {
-    let node = lower_item_kind(lctx, &i.node);
-
-    hir::Item {
-        id: i.id,
-        name: i.ident.name,
-        attrs: lower_attrs(lctx, &i.attrs),
-        node: node,
-        vis: lower_visibility(lctx, i.vis),
-        span: i.span,
-    }
-}
-
-pub fn lower_foreign_item(lctx: &LoweringContext, i: &ForeignItem) -> hir::ForeignItem {
-    hir::ForeignItem {
-        id: i.id,
-        name: i.ident.name,
-        attrs: lower_attrs(lctx, &i.attrs),
-        node: match i.node {
-            ForeignItemKind::Fn(ref fdec, ref generics) => {
-                hir::ForeignItemFn(lower_fn_decl(lctx, fdec), lower_generics(lctx, generics))
-            }
-            ForeignItemKind::Static(ref t, m) => {
-                hir::ForeignItemStatic(lower_ty(lctx, t), m)
-            }
-        },
-        vis: lower_visibility(lctx, i.vis),
-        span: i.span,
-    }
-}
-
-pub fn lower_method_sig(lctx: &LoweringContext, sig: &MethodSig) -> hir::MethodSig {
-    hir::MethodSig {
-        generics: lower_generics(lctx, &sig.generics),
-        abi: sig.abi,
-        explicit_self: lower_explicit_self(lctx, &sig.explicit_self),
-        unsafety: lower_unsafety(lctx, sig.unsafety),
-        constness: lower_constness(lctx, sig.constness),
-        decl: lower_fn_decl(lctx, &sig.decl),
-    }
-}
-
-pub fn lower_unsafety(_lctx: &LoweringContext, u: Unsafety) -> hir::Unsafety {
-    match u {
-        Unsafety::Unsafe => hir::Unsafety::Unsafe,
-        Unsafety::Normal => hir::Unsafety::Normal,
-    }
-}
-
-pub fn lower_constness(_lctx: &LoweringContext, c: Constness) -> hir::Constness {
-    match c {
-        Constness::Const => hir::Constness::Const,
-        Constness::NotConst => hir::Constness::NotConst,
-    }
-}
-
-pub fn lower_unop(_lctx: &LoweringContext, u: UnOp) -> hir::UnOp {
-    match u {
-        UnOp::Deref => hir::UnDeref,
-        UnOp::Not => hir::UnNot,
-        UnOp::Neg => hir::UnNeg,
-    }
-}
-
-pub fn lower_binop(_lctx: &LoweringContext, b: BinOp) -> hir::BinOp {
-    Spanned {
-        node: match b.node {
-            BinOpKind::Add => hir::BiAdd,
-            BinOpKind::Sub => hir::BiSub,
-            BinOpKind::Mul => hir::BiMul,
-            BinOpKind::Div => hir::BiDiv,
-            BinOpKind::Rem => hir::BiRem,
-            BinOpKind::And => hir::BiAnd,
-            BinOpKind::Or => hir::BiOr,
-            BinOpKind::BitXor => hir::BiBitXor,
-            BinOpKind::BitAnd => hir::BiBitAnd,
-            BinOpKind::BitOr => hir::BiBitOr,
-            BinOpKind::Shl => hir::BiShl,
-            BinOpKind::Shr => hir::BiShr,
-            BinOpKind::Eq => hir::BiEq,
-            BinOpKind::Lt => hir::BiLt,
-            BinOpKind::Le => hir::BiLe,
-            BinOpKind::Ne => hir::BiNe,
-            BinOpKind::Ge => hir::BiGe,
-            BinOpKind::Gt => hir::BiGt,
-        },
-        span: b.span,
-    }
-}
-
-pub fn lower_pat(lctx: &LoweringContext, p: &Pat) -> P<hir::Pat> {
-    P(hir::Pat {
-        id: p.id,
-        node: match p.node {
-            PatKind::Wild => hir::PatKind::Wild,
-            PatKind::Ident(ref binding_mode, pth1, ref sub) => {
-                hir::PatKind::Ident(lower_binding_mode(lctx, binding_mode),
-                              respan(pth1.span, lower_ident(lctx, pth1.node)),
-                              sub.as_ref().map(|x| lower_pat(lctx, x)))
-            }
-            PatKind::Lit(ref e) => hir::PatKind::Lit(lower_expr(lctx, e)),
-            PatKind::TupleStruct(ref pth, ref pats) => {
-                hir::PatKind::TupleStruct(lower_path(lctx, pth),
-                             pats.as_ref()
-                                 .map(|pats| pats.iter().map(|x| lower_pat(lctx, x)).collect()))
-            }
-            PatKind::Path(ref pth) => {
-                hir::PatKind::Path(lower_path(lctx, pth))
-            }
-            PatKind::QPath(ref qself, ref pth) => {
-                let qself = hir::QSelf {
-                    ty: lower_ty(lctx, &qself.ty),
-                    position: qself.position,
-                };
-                hir::PatKind::QPath(qself, lower_path(lctx, pth))
-            }
-            PatKind::Struct(ref pth, ref fields, etc) => {
-                let pth = lower_path(lctx, pth);
-                let fs = fields.iter()
-                               .map(|f| {
-                                   Spanned {
-                                       span: f.span,
-                                       node: hir::FieldPat {
-                                           name: f.node.ident.name,
-                                           pat: lower_pat(lctx, &f.node.pat),
-                                           is_shorthand: f.node.is_shorthand,
-                                       },
-                                   }
-                               })
-                               .collect();
-                hir::PatKind::Struct(pth, fs, etc)
-            }
-            PatKind::Tup(ref elts) => {
-                hir::PatKind::Tup(elts.iter().map(|x| lower_pat(lctx, x)).collect())
-            }
-            PatKind::Box(ref inner) => hir::PatKind::Box(lower_pat(lctx, inner)),
-            PatKind::Ref(ref inner, mutbl) => {
-                hir::PatKind::Ref(lower_pat(lctx, inner), lower_mutability(lctx, mutbl))
-            }
-            PatKind::Range(ref e1, ref e2) => {
-                hir::PatKind::Range(lower_expr(lctx, e1), lower_expr(lctx, e2))
-            }
-            PatKind::Vec(ref before, ref slice, ref after) => {
-                hir::PatKind::Vec(before.iter().map(|x| lower_pat(lctx, x)).collect(),
-                            slice.as_ref().map(|x| lower_pat(lctx, x)),
-                            after.iter().map(|x| lower_pat(lctx, x)).collect())
-            }
-            PatKind::Mac(_) => panic!("Shouldn't exist here"),
-        },
-        span: p.span,
-    })
-}
-
-pub fn lower_expr(lctx: &LoweringContext, e: &Expr) -> P<hir::Expr> {
-    P(hir::Expr {
-        id: e.id,
-        node: match e.node {
-            // Issue #22181:
-            // Eventually a desugaring for `box EXPR`
-            // (similar to the desugaring above for `in PLACE BLOCK`)
-            // should go here, desugaring
-            //
-            // to:
-            //
-            // let mut place = BoxPlace::make_place();
-            // let raw_place = Place::pointer(&mut place);
-            // let value = $value;
-            // unsafe {
-            //     ::std::ptr::write(raw_place, value);
-            //     Boxed::finalize(place)
-            // }
-            //
-            // But for now there are type-inference issues doing that.
-            ExprKind::Box(ref e) => {
-                hir::ExprBox(lower_expr(lctx, e))
-            }
-
-            // Desugar ExprBox: `in (PLACE) EXPR`
-            ExprKind::InPlace(ref placer, ref value_expr) => {
-                // to:
-                //
-                // let p = PLACE;
-                // let mut place = Placer::make_place(p);
-                // let raw_place = Place::pointer(&mut place);
-                // push_unsafe!({
-                //     std::intrinsics::move_val_init(raw_place, pop_unsafe!( EXPR ));
-                //     InPlace::finalize(place)
-                // })
-                return cache_ids(lctx, e.id, |lctx| {
-                    let placer_expr = lower_expr(lctx, placer);
-                    let value_expr = lower_expr(lctx, value_expr);
-
-                    let placer_ident = lctx.str_to_ident("placer");
-                    let place_ident = lctx.str_to_ident("place");
-                    let p_ptr_ident = lctx.str_to_ident("p_ptr");
-
-                    let make_place = ["ops", "Placer", "make_place"];
-                    let place_pointer = ["ops", "Place", "pointer"];
-                    let move_val_init = ["intrinsics", "move_val_init"];
-                    let inplace_finalize = ["ops", "InPlace", "finalize"];
-
-                    let make_call = |lctx: &LoweringContext, p, args| {
-                        let path = core_path(lctx, e.span, p);
-                        let path = expr_path(lctx, path, None);
-                        expr_call(lctx, e.span, path, args, None)
-                    };
-
-                    let mk_stmt_let = |lctx: &LoweringContext, bind, expr| {
-                        stmt_let(lctx, e.span, false, bind, expr, None)
-                    };
-
-                    let mk_stmt_let_mut = |lctx: &LoweringContext, bind, expr| {
-                        stmt_let(lctx, e.span, true, bind, expr, None)
-                    };
-
-                    // let placer = <placer_expr> ;
-                    let s1 = {
-                        let placer_expr = signal_block_expr(lctx,
-                                                            hir_vec![],
-                                                            placer_expr,
-                                                            e.span,
-                                                            hir::PopUnstableBlock,
-                                                            None);
-                        mk_stmt_let(lctx, placer_ident, placer_expr)
-                    };
-
-                    // let mut place = Placer::make_place(placer);
-                    let s2 = {
-                        let placer = expr_ident(lctx, e.span, placer_ident, None);
-                        let call = make_call(lctx, &make_place, hir_vec![placer]);
-                        mk_stmt_let_mut(lctx, place_ident, call)
-                    };
-
-                    // let p_ptr = Place::pointer(&mut place);
-                    let s3 = {
-                        let agent = expr_ident(lctx, e.span, place_ident, None);
-                        let args = hir_vec![expr_mut_addr_of(lctx, e.span, agent, None)];
-                        let call = make_call(lctx, &place_pointer, args);
-                        mk_stmt_let(lctx, p_ptr_ident, call)
-                    };
-
-                    // pop_unsafe!(EXPR));
-                    let pop_unsafe_expr = {
-                        let value_expr = signal_block_expr(lctx,
-                                                           hir_vec![],
-                                                           value_expr,
-                                                           e.span,
-                                                           hir::PopUnstableBlock,
-                                                           None);
-                        signal_block_expr(lctx,
-                                          hir_vec![],
-                                          value_expr,
-                                          e.span,
-                                          hir::PopUnsafeBlock(hir::CompilerGenerated), None)
-                    };
-
-                    // push_unsafe!({
-                    //     std::intrinsics::move_val_init(raw_place, pop_unsafe!( EXPR ));
-                    //     InPlace::finalize(place)
-                    // })
-                    let expr = {
-                        let ptr = expr_ident(lctx, e.span, p_ptr_ident, None);
-                        let call_move_val_init =
-                            hir::StmtSemi(
-                                make_call(lctx, &move_val_init, hir_vec![ptr, pop_unsafe_expr]),
-                                lctx.next_id());
-                        let call_move_val_init = respan(e.span, call_move_val_init);
-
-                        let place = expr_ident(lctx, e.span, place_ident, None);
-                        let call = make_call(lctx, &inplace_finalize, hir_vec![place]);
-                        signal_block_expr(lctx,
-                                          hir_vec![call_move_val_init],
-                                          call,
-                                          e.span,
-                                          hir::PushUnsafeBlock(hir::CompilerGenerated), None)
-                    };
-
-                    signal_block_expr(lctx,
-                                      hir_vec![s1, s2, s3],
-                                      expr,
-                                      e.span,
-                                      hir::PushUnstableBlock,
-                                      e.attrs.clone())
-                });
-            }
-
-            ExprKind::Vec(ref exprs) => {
-                hir::ExprVec(exprs.iter().map(|x| lower_expr(lctx, x)).collect())
-            }
-            ExprKind::Repeat(ref expr, ref count) => {
-                let expr = lower_expr(lctx, expr);
-                let count = lower_expr(lctx, count);
-                hir::ExprRepeat(expr, count)
-            }
-            ExprKind::Tup(ref elts) => {
-                hir::ExprTup(elts.iter().map(|x| lower_expr(lctx, x)).collect())
-            }
-            ExprKind::Call(ref f, ref args) => {
-                let f = lower_expr(lctx, f);
-                hir::ExprCall(f, args.iter().map(|x| lower_expr(lctx, x)).collect())
-            }
-            ExprKind::MethodCall(i, ref tps, ref args) => {
-                let tps = tps.iter().map(|x| lower_ty(lctx, x)).collect();
-                let args = args.iter().map(|x| lower_expr(lctx, x)).collect();
-                hir::ExprMethodCall(respan(i.span, i.node.name), tps, args)
-            }
-            ExprKind::Binary(binop, ref lhs, ref rhs) => {
-                let binop = lower_binop(lctx, binop);
-                let lhs = lower_expr(lctx, lhs);
-                let rhs = lower_expr(lctx, rhs);
-                hir::ExprBinary(binop, lhs, rhs)
-            }
-            ExprKind::Unary(op, ref ohs) => {
-                let op = lower_unop(lctx, op);
-                let ohs = lower_expr(lctx, ohs);
-                hir::ExprUnary(op, ohs)
-            }
-            ExprKind::Lit(ref l) => hir::ExprLit(P((**l).clone())),
-            ExprKind::Cast(ref expr, ref ty) => {
-                let expr = lower_expr(lctx, expr);
-                hir::ExprCast(expr, lower_ty(lctx, ty))
-            }
-            ExprKind::Type(ref expr, ref ty) => {
-                let expr = lower_expr(lctx, expr);
-                hir::ExprType(expr, lower_ty(lctx, ty))
-            }
-            ExprKind::AddrOf(m, ref ohs) => {
-                let m = lower_mutability(lctx, m);
-                let ohs = lower_expr(lctx, ohs);
-                hir::ExprAddrOf(m, ohs)
-            }
-            // More complicated than you might expect because the else branch
-            // might be `if let`.
-            ExprKind::If(ref cond, ref blk, ref else_opt) => {
-                let else_opt = else_opt.as_ref().map(|els| {
-                    match els.node {
-                        ExprKind::IfLet(..) => {
-                            cache_ids(lctx, e.id, |lctx| {
-                                // wrap the if-let expr in a block
-                                let span = els.span;
-                                let els = lower_expr(lctx, els);
-                                let id = lctx.next_id();
-                                let blk = P(hir::Block {
-                                    stmts: hir_vec![],
-                                    expr: Some(els),
-                                    id: id,
-                                    rules: hir::DefaultBlock,
-                                    span: span,
-                                });
-                                expr_block(lctx, blk, None)
-                            })
-                        }
-                        _ => lower_expr(lctx, els),
-                    }
-                });
-
-                hir::ExprIf(lower_expr(lctx, cond), lower_block(lctx, blk), else_opt)
-            }
-            ExprKind::While(ref cond, ref body, opt_ident) => {
-                hir::ExprWhile(lower_expr(lctx, cond), lower_block(lctx, body),
-                               opt_ident.map(|ident| lower_ident(lctx, ident)))
-            }
-            ExprKind::Loop(ref body, opt_ident) => {
-                hir::ExprLoop(lower_block(lctx, body),
-                              opt_ident.map(|ident| lower_ident(lctx, ident)))
-            }
-            ExprKind::Match(ref expr, ref arms) => {
-                hir::ExprMatch(lower_expr(lctx, expr),
-                               arms.iter().map(|x| lower_arm(lctx, x)).collect(),
-                               hir::MatchSource::Normal)
-            }
-            ExprKind::Closure(capture_clause, ref decl, ref body) => {
-                hir::ExprClosure(lower_capture_clause(lctx, capture_clause),
-                                 lower_fn_decl(lctx, decl),
-                                 lower_block(lctx, body))
-            }
-            ExprKind::Block(ref blk) => hir::ExprBlock(lower_block(lctx, blk)),
-            ExprKind::Assign(ref el, ref er) => {
-                hir::ExprAssign(lower_expr(lctx, el), lower_expr(lctx, er))
-            }
-            ExprKind::AssignOp(op, ref el, ref er) => {
-                hir::ExprAssignOp(lower_binop(lctx, op),
-                                  lower_expr(lctx, el),
-                                  lower_expr(lctx, er))
-            }
-            ExprKind::Field(ref el, ident) => {
-                hir::ExprField(lower_expr(lctx, el), respan(ident.span, ident.node.name))
-            }
-            ExprKind::TupField(ref el, ident) => {
-                hir::ExprTupField(lower_expr(lctx, el), ident)
-            }
-            ExprKind::Index(ref el, ref er) => {
-                hir::ExprIndex(lower_expr(lctx, el), lower_expr(lctx, er))
-            }
-            ExprKind::Range(ref e1, ref e2) => {
-                hir::ExprRange(e1.as_ref().map(|x| lower_expr(lctx, x)),
-                               e2.as_ref().map(|x| lower_expr(lctx, x)))
-            }
-            ExprKind::Path(ref qself, ref path) => {
-                let hir_qself = qself.as_ref().map(|&QSelf { ref ty, position }| {
-                    hir::QSelf {
-                        ty: lower_ty(lctx, ty),
-                        position: position,
-                    }
-                });
-                hir::ExprPath(hir_qself, lower_path_full(lctx, path, qself.is_none()))
-            }
-            ExprKind::Break(opt_ident) => hir::ExprBreak(opt_ident.map(|sp_ident| {
-                respan(sp_ident.span, lower_ident(lctx, sp_ident.node))
-            })),
-            ExprKind::Again(opt_ident) => hir::ExprAgain(opt_ident.map(|sp_ident| {
-                respan(sp_ident.span, lower_ident(lctx, sp_ident.node))
-            })),
-            ExprKind::Ret(ref e) => hir::ExprRet(e.as_ref().map(|x| lower_expr(lctx, x))),
-            ExprKind::InlineAsm(InlineAsm {
-                    ref inputs,
-                    ref outputs,
-                    ref asm,
-                    asm_str_style,
-                    ref clobbers,
-                    volatile,
-                    alignstack,
-                    dialect,
-                    expn_id,
-                }) => hir::ExprInlineAsm(hir::InlineAsm {
-                inputs: inputs.iter()
-                              .map(|&(ref c, ref input)| (c.clone(), lower_expr(lctx, input)))
-                              .collect(),
-                outputs: outputs.iter()
-                                .map(|out| {
-                                    hir::InlineAsmOutput {
-                                        constraint: out.constraint.clone(),
-                                        expr: lower_expr(lctx, &out.expr),
-                                        is_rw: out.is_rw,
-                                        is_indirect: out.is_indirect,
-                                    }
-                                })
-                                .collect(),
-                asm: asm.clone(),
-                asm_str_style: asm_str_style,
-                clobbers: clobbers.clone().into(),
-                volatile: volatile,
-                alignstack: alignstack,
-                dialect: dialect,
-                expn_id: expn_id,
-            }),
-            ExprKind::Struct(ref path, ref fields, ref maybe_expr) => {
-                hir::ExprStruct(lower_path(lctx, path),
-                                fields.iter().map(|x| lower_field(lctx, x)).collect(),
-                                maybe_expr.as_ref().map(|x| lower_expr(lctx, x)))
-            }
-            ExprKind::Paren(ref ex) => {
-                // merge attributes into the inner expression.
-                return lower_expr(lctx, ex).map(|mut ex| {
-                    ex.attrs.update(|attrs| {
-                        attrs.prepend(e.attrs.clone())
-                    });
-                    ex
-                });
-            }
-
-            // Desugar ExprIfLet
-            // From: `if let <pat> = <sub_expr> <body> [<else_opt>]`
-            ExprKind::IfLet(ref pat, ref sub_expr, ref body, ref else_opt) => {
-                // to:
-                //
-                //   match <sub_expr> {
-                //     <pat> => <body>,
-                //     [_ if <else_opt_if_cond> => <else_opt_if_body>,]
-                //     _ => [<else_opt> | ()]
-                //   }
-
-                return cache_ids(lctx, e.id, |lctx| {
-                    // `<pat> => <body>`
-                    let pat_arm = {
-                        let body = lower_block(lctx, body);
-                        let body_expr = expr_block(lctx, body, None);
-                        arm(hir_vec![lower_pat(lctx, pat)], body_expr)
-                    };
-
-                    // `[_ if <else_opt_if_cond> => <else_opt_if_body>,]`
-                    let mut else_opt = else_opt.as_ref().map(|e| lower_expr(lctx, e));
-                    let else_if_arms = {
-                        let mut arms = vec![];
-                        loop {
-                            let else_opt_continue = else_opt.and_then(|els| {
-                                els.and_then(|els| {
-                                    match els.node {
-                                        // else if
-                                        hir::ExprIf(cond, then, else_opt) => {
-                                            let pat_under = pat_wild(lctx, e.span);
-                                            arms.push(hir::Arm {
-                                                attrs: hir_vec![],
-                                                pats: hir_vec![pat_under],
-                                                guard: Some(cond),
-                                                body: expr_block(lctx, then, None),
-                                            });
-                                            else_opt.map(|else_opt| (else_opt, true))
-                                        }
-                                        _ => Some((P(els), false)),
-                                    }
-                                })
-                            });
-                            match else_opt_continue {
-                                Some((e, true)) => {
-                                    else_opt = Some(e);
-                                }
-                                Some((e, false)) => {
-                                    else_opt = Some(e);
-                                    break;
-                                }
-                                None => {
-                                    else_opt = None;
-                                    break;
-                                }
-                            }
-                        }
-                        arms
-                    };
-
-                    let contains_else_clause = else_opt.is_some();
-
-                    // `_ => [<else_opt> | ()]`
-                    let else_arm = {
-                        let pat_under = pat_wild(lctx, e.span);
-                        let else_expr =
-                            else_opt.unwrap_or_else(
-                                || expr_tuple(lctx, e.span, hir_vec![], None));
-                        arm(hir_vec![pat_under], else_expr)
-                    };
-
-                    let mut arms = Vec::with_capacity(else_if_arms.len() + 2);
-                    arms.push(pat_arm);
-                    arms.extend(else_if_arms);
-                    arms.push(else_arm);
-
-                    let sub_expr = lower_expr(lctx, sub_expr);
-                    // add attributes to the outer returned expr node
-                    expr(lctx,
-                         e.span,
-                         hir::ExprMatch(sub_expr,
-                                        arms.into(),
-                                        hir::MatchSource::IfLetDesugar {
-                                            contains_else_clause: contains_else_clause,
-                                        }),
-                         e.attrs.clone())
-                });
-            }
-
-            // Desugar ExprWhileLet
-            // From: `[opt_ident]: while let <pat> = <sub_expr> <body>`
-            ExprKind::WhileLet(ref pat, ref sub_expr, ref body, opt_ident) => {
-                // to:
-                //
-                //   [opt_ident]: loop {
-                //     match <sub_expr> {
-                //       <pat> => <body>,
-                //       _ => break
-                //     }
-                //   }
-
-                return cache_ids(lctx, e.id, |lctx| {
-                    // `<pat> => <body>`
-                    let pat_arm = {
-                        let body = lower_block(lctx, body);
-                        let body_expr = expr_block(lctx, body, None);
-                        arm(hir_vec![lower_pat(lctx, pat)], body_expr)
-                    };
-
-                    // `_ => break`
-                    let break_arm = {
-                        let pat_under = pat_wild(lctx, e.span);
-                        let break_expr = expr_break(lctx, e.span, None);
-                        arm(hir_vec![pat_under], break_expr)
-                    };
-
-                    // `match <sub_expr> { ... }`
-                    let arms = hir_vec![pat_arm, break_arm];
-                    let sub_expr = lower_expr(lctx, sub_expr);
-                    let match_expr = expr(lctx,
-                                          e.span,
-                                          hir::ExprMatch(sub_expr,
-                                                         arms,
-                                                         hir::MatchSource::WhileLetDesugar),
-                                          None);
-
-                    // `[opt_ident]: loop { ... }`
-                    let loop_block = block_expr(lctx, match_expr);
-                    let loop_expr = hir::ExprLoop(loop_block,
-                                                  opt_ident.map(|ident| lower_ident(lctx, ident)));
-                    // add attributes to the outer returned expr node
-                    expr(lctx, e.span, loop_expr, e.attrs.clone())
-                });
-            }
-
-            // Desugar ExprForLoop
-            // From: `[opt_ident]: for <pat> in <head> <body>`
-            ExprKind::ForLoop(ref pat, ref head, ref body, opt_ident) => {
-                // to:
-                //
-                //   {
-                //     let result = match ::std::iter::IntoIterator::into_iter(<head>) {
-                //       mut iter => {
-                //         [opt_ident]: loop {
-                //           match ::std::iter::Iterator::next(&mut iter) {
-                //             ::std::option::Option::Some(<pat>) => <body>,
-                //             ::std::option::Option::None => break
-                //           }
-                //         }
-                //       }
-                //     };
-                //     result
-                //   }
-
-                return cache_ids(lctx, e.id, |lctx| {
-                    // expand <head>
-                    let head = lower_expr(lctx, head);
-
-                    let iter = lctx.str_to_ident("iter");
-
-                    // `::std::option::Option::Some(<pat>) => <body>`
-                    let pat_arm = {
-                        let body_block = lower_block(lctx, body);
-                        let body_span = body_block.span;
-                        let body_expr = P(hir::Expr {
-                            id: lctx.next_id(),
-                            node: hir::ExprBlock(body_block),
-                            span: body_span,
-                            attrs: None,
-                        });
-                        let pat = lower_pat(lctx, pat);
-                        let some_pat = pat_some(lctx, e.span, pat);
-
-                        arm(hir_vec![some_pat], body_expr)
-                    };
-
-                    // `::std::option::Option::None => break`
-                    let break_arm = {
-                        let break_expr = expr_break(lctx, e.span, None);
-
-                        arm(hir_vec![pat_none(lctx, e.span)], break_expr)
-                    };
-
-                    // `match ::std::iter::Iterator::next(&mut iter) { ... }`
-                    let match_expr = {
-                        let next_path = {
-                            let strs = std_path(lctx, &["iter", "Iterator", "next"]);
-
-                            path_global(e.span, strs)
-                        };
-                        let iter = expr_ident(lctx, e.span, iter, None);
-                        let ref_mut_iter = expr_mut_addr_of(lctx, e.span, iter, None);
-                        let next_path = expr_path(lctx, next_path, None);
-                        let next_expr = expr_call(lctx,
-                                                  e.span,
-                                                  next_path,
-                                                  hir_vec![ref_mut_iter],
-                                                  None);
-                        let arms = hir_vec![pat_arm, break_arm];
-
-                        expr(lctx,
-                             e.span,
-                             hir::ExprMatch(next_expr, arms, hir::MatchSource::ForLoopDesugar),
-                             None)
-                    };
-
-                    // `[opt_ident]: loop { ... }`
-                    let loop_block = block_expr(lctx, match_expr);
-                    let loop_expr = hir::ExprLoop(loop_block,
-                                                  opt_ident.map(|ident| lower_ident(lctx, ident)));
-                    let loop_expr = expr(lctx, e.span, loop_expr, None);
-
-                    // `mut iter => { ... }`
-                    let iter_arm = {
-                        let iter_pat = pat_ident_binding_mode(lctx,
-                                                              e.span,
-                                                              iter,
-                                                              hir::BindByValue(hir::MutMutable));
-                        arm(hir_vec![iter_pat], loop_expr)
-                    };
-
-                    // `match ::std::iter::IntoIterator::into_iter(<head>) { ... }`
-                    let into_iter_expr = {
-                        let into_iter_path = {
-                            let strs = std_path(lctx, &["iter", "IntoIterator", "into_iter"]);
-
-                            path_global(e.span, strs)
-                        };
-
-                        let into_iter = expr_path(lctx, into_iter_path, None);
-                        expr_call(lctx, e.span, into_iter, hir_vec![head], None)
-                    };
-
-                    let match_expr = expr_match(lctx,
-                                                e.span,
-                                                into_iter_expr,
-                                                hir_vec![iter_arm],
-                                                hir::MatchSource::ForLoopDesugar,
-                                                None);
-
-                    // `{ let _result = ...; _result }`
-                    // underscore prevents an unused_variables lint if the head diverges
-                    let result_ident = lctx.str_to_ident("_result");
-                    let let_stmt = stmt_let(lctx, e.span, false, result_ident, match_expr, None);
-                    let result = expr_ident(lctx, e.span, result_ident, None);
-                    let block = block_all(lctx, e.span, hir_vec![let_stmt], Some(result));
-                    // add the attributes to the outer returned expr node
-                    expr_block(lctx, block, e.attrs.clone())
-                });
-            }
-
-            ExprKind::Mac(_) => panic!("Shouldn't exist here"),
-        },
-        span: e.span,
-        attrs: e.attrs.clone(),
-    })
-}
-
-pub fn lower_stmt(lctx: &LoweringContext, s: &Stmt) -> hir::Stmt {
-    match s.node {
-        StmtKind::Decl(ref d, id) => {
-            Spanned {
-                node: hir::StmtDecl(lower_decl(lctx, d), id),
-                span: s.span,
-            }
-        }
-        StmtKind::Expr(ref e, id) => {
-            Spanned {
-                node: hir::StmtExpr(lower_expr(lctx, e), id),
-                span: s.span,
-            }
-        }
-        StmtKind::Semi(ref e, id) => {
-            Spanned {
-                node: hir::StmtSemi(lower_expr(lctx, e), id),
-                span: s.span,
-            }
-        }
-        StmtKind::Mac(..) => panic!("Shouldn't exist here"),
-    }
-}
-
-pub fn lower_capture_clause(_lctx: &LoweringContext, c: CaptureBy) -> hir::CaptureClause {
-    match c {
-        CaptureBy::Value => hir::CaptureByValue,
-        CaptureBy::Ref => hir::CaptureByRef,
-    }
-}
-
-pub fn lower_visibility(_lctx: &LoweringContext, v: Visibility) -> hir::Visibility {
-    match v {
-        Visibility::Public => hir::Public,
-        Visibility::Inherited => hir::Inherited,
-    }
-}
-
-pub fn lower_block_check_mode(lctx: &LoweringContext, b: &BlockCheckMode) -> hir::BlockCheckMode {
-    match *b {
-        BlockCheckMode::Default => hir::DefaultBlock,
-        BlockCheckMode::Unsafe(u) => hir::UnsafeBlock(lower_unsafe_source(lctx, u)),
-    }
-}
-
-pub fn lower_binding_mode(lctx: &LoweringContext, b: &BindingMode) -> hir::BindingMode {
-    match *b {
-        BindingMode::ByRef(m) => hir::BindByRef(lower_mutability(lctx, m)),
-        BindingMode::ByValue(m) => hir::BindByValue(lower_mutability(lctx, m)),
-    }
-}
-
-pub fn lower_struct_field_kind(lctx: &LoweringContext,
-                               s: &StructFieldKind)
-                               -> hir::StructFieldKind {
-    match *s {
-        NamedField(ident, vis) => hir::NamedField(ident.name, lower_visibility(lctx, vis)),
-        UnnamedField(vis) => hir::UnnamedField(lower_visibility(lctx, vis)),
-    }
-}
-
-pub fn lower_unsafe_source(_lctx: &LoweringContext, u: UnsafeSource) -> hir::UnsafeSource {
-    match u {
-        CompilerGenerated => hir::CompilerGenerated,
-        UserProvided => hir::UserProvided,
-    }
-}
-
-pub fn lower_impl_polarity(_lctx: &LoweringContext, i: ImplPolarity) -> hir::ImplPolarity {
-    match i {
-        ImplPolarity::Positive => hir::ImplPolarity::Positive,
-        ImplPolarity::Negative => hir::ImplPolarity::Negative,
-    }
-}
-
-pub fn lower_trait_bound_modifier(_lctx: &LoweringContext,
-                                  f: TraitBoundModifier)
-                                  -> hir::TraitBoundModifier {
-    match f {
-        TraitBoundModifier::None => hir::TraitBoundModifier::None,
-        TraitBoundModifier::Maybe => hir::TraitBoundModifier::Maybe,
-    }
-}
-
-// Helper methods for building HIR.
-
-fn arm(pats: hir::HirVec<P<hir::Pat>>, expr: P<hir::Expr>) -> hir::Arm {
-    hir::Arm {
-        attrs: hir_vec![],
-        pats: pats,
-        guard: None,
-        body: expr,
-    }
-}
-
-fn expr_break(lctx: &LoweringContext, span: Span,
-              attrs: ThinAttributes) -> P<hir::Expr> {
-    expr(lctx, span, hir::ExprBreak(None), attrs)
-}
-
-fn expr_call(lctx: &LoweringContext,
-             span: Span,
-             e: P<hir::Expr>,
-             args: hir::HirVec<P<hir::Expr>>,
-             attrs: ThinAttributes)
-             -> P<hir::Expr> {
-    expr(lctx, span, hir::ExprCall(e, args), attrs)
-}
-
-fn expr_ident(lctx: &LoweringContext, span: Span, id: hir::Ident,
-              attrs: ThinAttributes) -> P<hir::Expr> {
-    expr_path(lctx, path_ident(span, id), attrs)
-}
-
-fn expr_mut_addr_of(lctx: &LoweringContext, span: Span, e: P<hir::Expr>,
-                    attrs: ThinAttributes) -> P<hir::Expr> {
-    expr(lctx, span, hir::ExprAddrOf(hir::MutMutable, e), attrs)
-}
-
-fn expr_path(lctx: &LoweringContext, path: hir::Path,
-             attrs: ThinAttributes) -> P<hir::Expr> {
-    expr(lctx, path.span, hir::ExprPath(None, path), attrs)
-}
-
-fn expr_match(lctx: &LoweringContext,
-              span: Span,
-              arg: P<hir::Expr>,
-              arms: hir::HirVec<hir::Arm>,
-              source: hir::MatchSource,
-              attrs: ThinAttributes)
-              -> P<hir::Expr> {
-    expr(lctx, span, hir::ExprMatch(arg, arms, source), attrs)
-}
-
-fn expr_block(lctx: &LoweringContext, b: P<hir::Block>,
-              attrs: ThinAttributes) -> P<hir::Expr> {
-    expr(lctx, b.span, hir::ExprBlock(b), attrs)
-}
-
-fn expr_tuple(lctx: &LoweringContext, sp: Span, exprs: hir::HirVec<P<hir::Expr>>,
-              attrs: ThinAttributes) -> P<hir::Expr> {
-    expr(lctx, sp, hir::ExprTup(exprs), attrs)
-}
-
-fn expr(lctx: &LoweringContext, span: Span, node: hir::Expr_,
-        attrs: ThinAttributes) -> P<hir::Expr> {
-    P(hir::Expr {
-        id: lctx.next_id(),
-        node: node,
-        span: span,
-        attrs: attrs,
-    })
-}
-
-fn stmt_let(lctx: &LoweringContext,
-            sp: Span,
-            mutbl: bool,
-            ident: hir::Ident,
-            ex: P<hir::Expr>,
-            attrs: ThinAttributes)
-            -> hir::Stmt {
-    let pat = if mutbl {
-        pat_ident_binding_mode(lctx, sp, ident, hir::BindByValue(hir::MutMutable))
-    } else {
-        pat_ident(lctx, sp, ident)
-    };
-    let local = P(hir::Local {
-        pat: pat,
-        ty: None,
-        init: Some(ex),
-        id: lctx.next_id(),
-        span: sp,
-        attrs: attrs,
-    });
-    let decl = respan(sp, hir::DeclLocal(local));
-    respan(sp, hir::StmtDecl(P(decl), lctx.next_id()))
-}
-
-fn block_expr(lctx: &LoweringContext, expr: P<hir::Expr>) -> P<hir::Block> {
-    block_all(lctx, expr.span, hir::HirVec::new(), Some(expr))
-}
-
-fn block_all(lctx: &LoweringContext,
-             span: Span,
-             stmts: hir::HirVec<hir::Stmt>,
-             expr: Option<P<hir::Expr>>)
-             -> P<hir::Block> {
-    P(hir::Block {
-        stmts: stmts,
-        expr: expr,
-        id: lctx.next_id(),
-        rules: hir::DefaultBlock,
-        span: span,
-    })
-}
-
-fn pat_some(lctx: &LoweringContext, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
-    let some = std_path(lctx, &["option", "Option", "Some"]);
-    let path = path_global(span, some);
-    pat_enum(lctx, span, path, hir_vec![pat])
-}
-
-fn pat_none(lctx: &LoweringContext, span: Span) -> P<hir::Pat> {
-    let none = std_path(lctx, &["option", "Option", "None"]);
-    let path = path_global(span, none);
-    pat_enum(lctx, span, path, hir_vec![])
-}
-
-fn pat_enum(lctx: &LoweringContext,
-            span: Span,
-            path: hir::Path,
-            subpats: hir::HirVec<P<hir::Pat>>)
-            -> P<hir::Pat> {
-    let pt = if subpats.is_empty() {
-        hir::PatKind::Path(path)
-    } else {
-        hir::PatKind::TupleStruct(path, Some(subpats))
-    };
-    pat(lctx, span, pt)
-}
-
-fn pat_ident(lctx: &LoweringContext, span: Span, ident: hir::Ident) -> P<hir::Pat> {
-    pat_ident_binding_mode(lctx, span, ident, hir::BindByValue(hir::MutImmutable))
-}
-
-fn pat_ident_binding_mode(lctx: &LoweringContext,
-                          span: Span,
-                          ident: hir::Ident,
-                          bm: hir::BindingMode)
-                          -> P<hir::Pat> {
-    let pat_ident = hir::PatKind::Ident(bm,
-                                  Spanned {
-                                      span: span,
-                                      node: ident,
-                                  },
-                                  None);
-    pat(lctx, span, pat_ident)
-}
-
-fn pat_wild(lctx: &LoweringContext, span: Span) -> P<hir::Pat> {
-    pat(lctx, span, hir::PatKind::Wild)
-}
-
-fn pat(lctx: &LoweringContext, span: Span, pat: hir::PatKind) -> P<hir::Pat> {
-    P(hir::Pat {
-        id: lctx.next_id(),
-        node: pat,
-        span: span,
-    })
-}
-
-fn path_ident(span: Span, id: hir::Ident) -> hir::Path {
-    path(span, vec![id])
-}
-
-fn path(span: Span, strs: Vec<hir::Ident>) -> hir::Path {
-    path_all(span, false, strs, hir::HirVec::new(), hir::HirVec::new(), hir::HirVec::new())
-}
-
-fn path_global(span: Span, strs: Vec<hir::Ident>) -> hir::Path {
-    path_all(span, true, strs, hir::HirVec::new(), hir::HirVec::new(), hir::HirVec::new())
-}
-
-fn path_all(sp: Span,
-            global: bool,
-            mut idents: Vec<hir::Ident>,
-            lifetimes: hir::HirVec<hir::Lifetime>,
-            types: hir::HirVec<P<hir::Ty>>,
-            bindings: hir::HirVec<hir::TypeBinding>)
-            -> hir::Path {
-    let last_identifier = idents.pop().unwrap();
-    let mut segments: Vec<hir::PathSegment> = idents.into_iter()
-                                                    .map(|ident| {
-                                                        hir::PathSegment {
-                                                            identifier: ident,
-                                                            parameters: hir::PathParameters::none(),
-                                                        }
-                                                    })
-                                                    .collect();
-    segments.push(hir::PathSegment {
-        identifier: last_identifier,
-        parameters: hir::AngleBracketedParameters(hir::AngleBracketedParameterData {
-            lifetimes: lifetimes,
-            types: types,
-            bindings: bindings,
-        }),
-    });
-    hir::Path {
-        span: sp,
-        global: global,
-        segments: segments.into(),
-    }
-}
-
-fn std_path(lctx: &LoweringContext, components: &[&str]) -> Vec<hir::Ident> {
-    let mut v = Vec::new();
-    if let Some(s) = lctx.crate_root {
-        v.push(hir::Ident::from_name(token::intern(s)));
-    }
-    v.extend(components.iter().map(|s| hir::Ident::from_name(token::intern(s))));
-    return v;
-}
-
-// Given suffix ["b","c","d"], returns path `::std::b::c::d` when
-// `fld.cx.use_std`, and `::core::b::c::d` otherwise.
-fn core_path(lctx: &LoweringContext, span: Span, components: &[&str]) -> hir::Path {
-    let idents = std_path(lctx, components);
-    path_global(span, idents)
-}
-
-fn signal_block_expr(lctx: &LoweringContext,
-                     stmts: hir::HirVec<hir::Stmt>,
-                     expr: P<hir::Expr>,
-                     span: Span,
-                     rule: hir::BlockCheckMode,
-                     attrs: ThinAttributes)
-                     -> P<hir::Expr> {
-    let id = lctx.next_id();
-    expr_block(lctx,
-               P(hir::Block {
-                   rules: rule,
-                   span: span,
-                   id: id,
-                   stmts: stmts,
-                   expr: Some(expr),
-               }),
-               attrs)
-}
-
-
-
-#[cfg(test)]
-mod test {
-    use super::*;
-    use syntax::ast::{self, NodeId, NodeIdAssigner};
-    use syntax::{parse, codemap};
-    use syntax::fold::Folder;
-    use std::cell::Cell;
-
-    struct MockAssigner {
-        next_id: Cell<NodeId>,
-    }
-
-    impl MockAssigner {
-        fn new() -> MockAssigner {
-            MockAssigner { next_id: Cell::new(0) }
-        }
-    }
-
-    trait FakeExtCtxt {
-        fn call_site(&self) -> codemap::Span;
-        fn cfg(&self) -> ast::CrateConfig;
-        fn ident_of(&self, st: &str) -> ast::Ident;
-        fn name_of(&self, st: &str) -> ast::Name;
-        fn parse_sess(&self) -> &parse::ParseSess;
-    }
-
-    impl FakeExtCtxt for parse::ParseSess {
-        fn call_site(&self) -> codemap::Span {
-            codemap::Span {
-                lo: codemap::BytePos(0),
-                hi: codemap::BytePos(0),
-                expn_id: codemap::NO_EXPANSION,
-            }
-        }
-        fn cfg(&self) -> ast::CrateConfig {
-            Vec::new()
-        }
-        fn ident_of(&self, st: &str) -> ast::Ident {
-            parse::token::str_to_ident(st)
-        }
-        fn name_of(&self, st: &str) -> ast::Name {
-            parse::token::intern(st)
-        }
-        fn parse_sess(&self) -> &parse::ParseSess {
-            self
-        }
-    }
-
-    impl NodeIdAssigner for MockAssigner {
-        fn next_node_id(&self) -> NodeId {
-            let result = self.next_id.get();
-            self.next_id.set(result + 1);
-            result
-        }
-
-        fn peek_node_id(&self) -> NodeId {
-            self.next_id.get()
-        }
-    }
-
-    impl Folder for MockAssigner {
-        fn new_id(&mut self, old_id: NodeId) -> NodeId {
-            assert_eq!(old_id, ast::DUMMY_NODE_ID);
-            self.next_node_id()
-        }
-    }
-
-    #[test]
-    fn test_preserves_ids() {
-        let cx = parse::ParseSess::new();
-        let mut assigner = MockAssigner::new();
-
-        let ast_if_let = quote_expr!(&cx,
-                                     if let Some(foo) = baz {
-                                         bar(foo);
-                                     });
-        let ast_if_let = assigner.fold_expr(ast_if_let);
-        let ast_while_let = quote_expr!(&cx,
-                                        while let Some(foo) = baz {
-                                            bar(foo);
-                                        });
-        let ast_while_let = assigner.fold_expr(ast_while_let);
-        let ast_for = quote_expr!(&cx,
-                                  for i in 0..10 {
-                                      for j in 0..10 {
-                                          foo(i, j);
-                                      }
-                                  });
-        let ast_for = assigner.fold_expr(ast_for);
-        let ast_in = quote_expr!(&cx, in HEAP { foo() });
-        let ast_in = assigner.fold_expr(ast_in);
-
-        let lctx = LoweringContext::new(&assigner, None);
-        let hir1 = lower_expr(&lctx, &ast_if_let);
-        let hir2 = lower_expr(&lctx, &ast_if_let);
-        assert!(hir1 == hir2);
-
-        let hir1 = lower_expr(&lctx, &ast_while_let);
-        let hir2 = lower_expr(&lctx, &ast_while_let);
-        assert!(hir1 == hir2);
-
-        let hir1 = lower_expr(&lctx, &ast_for);
-        let hir2 = lower_expr(&lctx, &ast_for);
-        assert!(hir1 == hir2);
-
-        let hir1 = lower_expr(&lctx, &ast_in);
-        let hir2 = lower_expr(&lctx, &ast_in);
-        assert!(hir1 == hir2);
-    }
-}
diff --git a/src/librustc_front/print/pprust.rs b/src/librustc_front/print/pprust.rs
deleted file mode 100644 (file)
index d837ab0..0000000
+++ /dev/null
@@ -1,2423 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub use self::AnnNode::*;
-
-use syntax::abi::Abi;
-use syntax::ast;
-use syntax::codemap::{self, CodeMap, BytePos, Spanned};
-use syntax::errors;
-use syntax::parse::token::{self, BinOpToken};
-use syntax::parse::lexer::comments;
-use syntax::parse;
-use syntax::print::pp::{self, break_offset, word, space, hardbreak};
-use syntax::print::pp::{Breaks, eof};
-use syntax::print::pp::Breaks::{Consistent, Inconsistent};
-use syntax::print::pprust::{self as ast_pp, PrintState};
-use syntax::ptr::P;
-
-use hir;
-use hir::{Crate, PatKind, RegionTyParamBound, TraitTyParamBound, TraitBoundModifier};
-
-use std::io::{self, Write, Read};
-
-pub enum AnnNode<'a> {
-    NodeName(&'a ast::Name),
-    NodeBlock(&'a hir::Block),
-    NodeItem(&'a hir::Item),
-    NodeSubItem(ast::NodeId),
-    NodeExpr(&'a hir::Expr),
-    NodePat(&'a hir::Pat),
-}
-
-pub trait PpAnn {
-    fn pre(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> {
-        Ok(())
-    }
-    fn post(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> {
-        Ok(())
-    }
-}
-
-#[derive(Copy, Clone)]
-pub struct NoAnn;
-
-impl PpAnn for NoAnn {}
-
-
-pub struct State<'a> {
-    krate: Option<&'a Crate>,
-    pub s: pp::Printer<'a>,
-    cm: Option<&'a CodeMap>,
-    comments: Option<Vec<comments::Comment>>,
-    literals: Option<Vec<comments::Literal>>,
-    cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral,
-    boxes: Vec<pp::Breaks>,
-    ann: &'a (PpAnn + 'a),
-}
-
-impl<'a> PrintState<'a> for State<'a> {
-    fn writer(&mut self) -> &mut pp::Printer<'a> {
-        &mut self.s
-    }
-
-    fn boxes(&mut self) -> &mut Vec<pp::Breaks> {
-        &mut self.boxes
-    }
-
-    fn comments(&mut self) -> &mut Option<Vec<comments::Comment>> {
-        &mut self.comments
-    }
-
-    fn cur_cmnt_and_lit(&mut self) -> &mut ast_pp::CurrentCommentAndLiteral {
-        &mut self.cur_cmnt_and_lit
-    }
-
-    fn literals(&self) -> &Option<Vec<comments::Literal>> {
-        &self.literals
-    }
-}
-
-pub fn rust_printer<'a>(writer: Box<Write + 'a>, krate: Option<&'a Crate>) -> State<'a> {
-    static NO_ANN: NoAnn = NoAnn;
-    rust_printer_annotated(writer, &NO_ANN, krate)
-}
-
-pub fn rust_printer_annotated<'a>(writer: Box<Write + 'a>,
-                                  ann: &'a PpAnn,
-                                  krate: Option<&'a Crate>)
-                                  -> State<'a> {
-    State {
-        krate: krate,
-        s: pp::mk_printer(writer, default_columns),
-        cm: None,
-        comments: None,
-        literals: None,
-        cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral {
-            cur_cmnt: 0,
-            cur_lit: 0,
-        },
-        boxes: Vec::new(),
-        ann: ann,
-    }
-}
-
-#[allow(non_upper_case_globals)]
-pub const indent_unit: usize = 4;
-
-#[allow(non_upper_case_globals)]
-pub const default_columns: usize = 78;
-
-
-/// Requires you to pass an input filename and reader so that
-/// it can scan the input text for comments and literals to
-/// copy forward.
-pub fn print_crate<'a>(cm: &'a CodeMap,
-                       span_diagnostic: &errors::Handler,
-                       krate: &hir::Crate,
-                       filename: String,
-                       input: &mut Read,
-                       out: Box<Write + 'a>,
-                       ann: &'a PpAnn,
-                       is_expanded: bool)
-                       -> io::Result<()> {
-    let mut s = State::new_from_input(cm, span_diagnostic, filename, input,
-                                      out, ann, is_expanded, Some(krate));
-
-    // When printing the AST, we sometimes need to inject `#[no_std]` here.
-    // Since you can't compile the HIR, it's not necessary.
-
-    try!(s.print_mod(&krate.module, &krate.attrs));
-    try!(s.print_remaining_comments());
-    eof(&mut s.s)
-}
-
-impl<'a> State<'a> {
-    pub fn new_from_input(cm: &'a CodeMap,
-                          span_diagnostic: &errors::Handler,
-                          filename: String,
-                          input: &mut Read,
-                          out: Box<Write + 'a>,
-                          ann: &'a PpAnn,
-                          is_expanded: bool,
-                          krate: Option<&'a Crate>)
-                          -> State<'a> {
-        let (cmnts, lits) = comments::gather_comments_and_literals(span_diagnostic,
-                                                                   filename,
-                                                                   input);
-
-        State::new(cm,
-                   out,
-                   ann,
-                   Some(cmnts),
-                   // If the code is post expansion, don't use the table of
-                   // literals, since it doesn't correspond with the literals
-                   // in the AST anymore.
-                   if is_expanded {
-                       None
-                   } else {
-                       Some(lits)
-                   },
-                   krate)
-    }
-
-    pub fn new(cm: &'a CodeMap,
-               out: Box<Write + 'a>,
-               ann: &'a PpAnn,
-               comments: Option<Vec<comments::Comment>>,
-               literals: Option<Vec<comments::Literal>>,
-               krate: Option<&'a Crate>)
-               -> State<'a> {
-        State {
-            krate: krate,
-            s: pp::mk_printer(out, default_columns),
-            cm: Some(cm),
-            comments: comments.clone(),
-            literals: literals.clone(),
-            cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral {
-                cur_cmnt: 0,
-                cur_lit: 0,
-            },
-            boxes: Vec::new(),
-            ann: ann,
-        }
-    }
-}
-
-pub fn to_string<F>(f: F) -> String
-    where F: FnOnce(&mut State) -> io::Result<()>
-{
-    let mut wr = Vec::new();
-    {
-        let mut printer = rust_printer(Box::new(&mut wr), None);
-        f(&mut printer).unwrap();
-        eof(&mut printer.s).unwrap();
-    }
-    String::from_utf8(wr).unwrap()
-}
-
-pub fn binop_to_string(op: BinOpToken) -> &'static str {
-    match op {
-        token::Plus => "+",
-        token::Minus => "-",
-        token::Star => "*",
-        token::Slash => "/",
-        token::Percent => "%",
-        token::Caret => "^",
-        token::And => "&",
-        token::Or => "|",
-        token::Shl => "<<",
-        token::Shr => ">>",
-    }
-}
-
-pub fn ty_to_string(ty: &hir::Ty) -> String {
-    to_string(|s| s.print_type(ty))
-}
-
-pub fn bounds_to_string(bounds: &[hir::TyParamBound]) -> String {
-    to_string(|s| s.print_bounds("", bounds))
-}
-
-pub fn pat_to_string(pat: &hir::Pat) -> String {
-    to_string(|s| s.print_pat(pat))
-}
-
-pub fn arm_to_string(arm: &hir::Arm) -> String {
-    to_string(|s| s.print_arm(arm))
-}
-
-pub fn expr_to_string(e: &hir::Expr) -> String {
-    to_string(|s| s.print_expr(e))
-}
-
-pub fn lifetime_to_string(e: &hir::Lifetime) -> String {
-    to_string(|s| s.print_lifetime(e))
-}
-
-pub fn stmt_to_string(stmt: &hir::Stmt) -> String {
-    to_string(|s| s.print_stmt(stmt))
-}
-
-pub fn item_to_string(i: &hir::Item) -> String {
-    to_string(|s| s.print_item(i))
-}
-
-pub fn impl_item_to_string(i: &hir::ImplItem) -> String {
-    to_string(|s| s.print_impl_item(i))
-}
-
-pub fn trait_item_to_string(i: &hir::TraitItem) -> String {
-    to_string(|s| s.print_trait_item(i))
-}
-
-pub fn generics_to_string(generics: &hir::Generics) -> String {
-    to_string(|s| s.print_generics(generics))
-}
-
-pub fn where_clause_to_string(i: &hir::WhereClause) -> String {
-    to_string(|s| s.print_where_clause(i))
-}
-
-pub fn fn_block_to_string(p: &hir::FnDecl) -> String {
-    to_string(|s| s.print_fn_block_args(p))
-}
-
-pub fn path_to_string(p: &hir::Path) -> String {
-    to_string(|s| s.print_path(p, false, 0))
-}
-
-pub fn name_to_string(name: ast::Name) -> String {
-    to_string(|s| s.print_name(name))
-}
-
-pub fn fun_to_string(decl: &hir::FnDecl,
-                     unsafety: hir::Unsafety,
-                     constness: hir::Constness,
-                     name: ast::Name,
-                     opt_explicit_self: Option<&hir::ExplicitSelf_>,
-                     generics: &hir::Generics)
-                     -> String {
-    to_string(|s| {
-        try!(s.head(""));
-        try!(s.print_fn(decl,
-                        unsafety,
-                        constness,
-                        Abi::Rust,
-                        Some(name),
-                        generics,
-                        opt_explicit_self,
-                        hir::Inherited));
-        try!(s.end()); // Close the head box
-        s.end() // Close the outer box
-    })
-}
-
-pub fn block_to_string(blk: &hir::Block) -> String {
-    to_string(|s| {
-        // containing cbox, will be closed by print-block at }
-        try!(s.cbox(indent_unit));
-        // head-ibox, will be closed by print-block after {
-        try!(s.ibox(0));
-        s.print_block(blk)
-    })
-}
-
-pub fn explicit_self_to_string(explicit_self: &hir::ExplicitSelf_) -> String {
-    to_string(|s| s.print_explicit_self(explicit_self, hir::MutImmutable).map(|_| {}))
-}
-
-pub fn variant_to_string(var: &hir::Variant) -> String {
-    to_string(|s| s.print_variant(var))
-}
-
-pub fn arg_to_string(arg: &hir::Arg) -> String {
-    to_string(|s| s.print_arg(arg, false))
-}
-
-pub fn visibility_qualified(vis: hir::Visibility, s: &str) -> String {
-    match vis {
-        hir::Public => format!("pub {}", s),
-        hir::Inherited => s.to_string(),
-    }
-}
-
-fn needs_parentheses(expr: &hir::Expr) -> bool {
-    match expr.node {
-        hir::ExprAssign(..) |
-        hir::ExprBinary(..) |
-        hir::ExprClosure(..) |
-        hir::ExprAssignOp(..) |
-        hir::ExprCast(..) |
-        hir::ExprType(..) => true,
-        _ => false,
-    }
-}
-
-impl<'a> State<'a> {
-    pub fn cbox(&mut self, u: usize) -> io::Result<()> {
-        self.boxes.push(pp::Breaks::Consistent);
-        pp::cbox(&mut self.s, u)
-    }
-
-    pub fn nbsp(&mut self) -> io::Result<()> {
-        word(&mut self.s, " ")
-    }
-
-    pub fn word_nbsp(&mut self, w: &str) -> io::Result<()> {
-        try!(word(&mut self.s, w));
-        self.nbsp()
-    }
-
-    pub fn head(&mut self, w: &str) -> io::Result<()> {
-        // outer-box is consistent
-        try!(self.cbox(indent_unit));
-        // head-box is inconsistent
-        try!(self.ibox(w.len() + 1));
-        // keyword that starts the head
-        if !w.is_empty() {
-            try!(self.word_nbsp(w));
-        }
-        Ok(())
-    }
-
-    pub fn bopen(&mut self) -> io::Result<()> {
-        try!(word(&mut self.s, "{"));
-        self.end() // close the head-box
-    }
-
-    pub fn bclose_(&mut self, span: codemap::Span, indented: usize) -> io::Result<()> {
-        self.bclose_maybe_open(span, indented, true)
-    }
-    pub fn bclose_maybe_open(&mut self,
-                             span: codemap::Span,
-                             indented: usize,
-                             close_box: bool)
-                             -> io::Result<()> {
-        try!(self.maybe_print_comment(span.hi));
-        try!(self.break_offset_if_not_bol(1, -(indented as isize)));
-        try!(word(&mut self.s, "}"));
-        if close_box {
-            try!(self.end()); // close the outer-box
-        }
-        Ok(())
-    }
-    pub fn bclose(&mut self, span: codemap::Span) -> io::Result<()> {
-        self.bclose_(span, indent_unit)
-    }
-
-    pub fn in_cbox(&self) -> bool {
-        match self.boxes.last() {
-            Some(&last_box) => last_box == pp::Breaks::Consistent,
-            None => false,
-        }
-    }
-    pub fn space_if_not_bol(&mut self) -> io::Result<()> {
-        if !self.is_bol() {
-            try!(space(&mut self.s));
-        }
-        Ok(())
-    }
-    pub fn break_offset_if_not_bol(&mut self, n: usize, off: isize) -> io::Result<()> {
-        if !self.is_bol() {
-            break_offset(&mut self.s, n, off)
-        } else {
-            if off != 0 && self.s.last_token().is_hardbreak_tok() {
-                // We do something pretty sketchy here: tuck the nonzero
-                // offset-adjustment we were going to deposit along with the
-                // break into the previous hardbreak.
-                self.s.replace_last_token(pp::hardbreak_tok_offset(off));
-            }
-            Ok(())
-        }
-    }
-
-    // Synthesizes a comment that was not textually present in the original source
-    // file.
-    pub fn synth_comment(&mut self, text: String) -> io::Result<()> {
-        try!(word(&mut self.s, "/*"));
-        try!(space(&mut self.s));
-        try!(word(&mut self.s, &text[..]));
-        try!(space(&mut self.s));
-        word(&mut self.s, "*/")
-    }
-
-
-    pub fn commasep_cmnt<T, F, G>(&mut self,
-                                  b: Breaks,
-                                  elts: &[T],
-                                  mut op: F,
-                                  mut get_span: G)
-                                  -> io::Result<()>
-        where F: FnMut(&mut State, &T) -> io::Result<()>,
-              G: FnMut(&T) -> codemap::Span
-    {
-        try!(self.rbox(0, b));
-        let len = elts.len();
-        let mut i = 0;
-        for elt in elts {
-            try!(self.maybe_print_comment(get_span(elt).hi));
-            try!(op(self, elt));
-            i += 1;
-            if i < len {
-                try!(word(&mut self.s, ","));
-                try!(self.maybe_print_trailing_comment(get_span(elt), Some(get_span(&elts[i]).hi)));
-                try!(self.space_if_not_bol());
-            }
-        }
-        self.end()
-    }
-
-    pub fn commasep_exprs(&mut self, b: Breaks, exprs: &[P<hir::Expr>]) -> io::Result<()> {
-        self.commasep_cmnt(b, exprs, |s, e| s.print_expr(&e), |e| e.span)
-    }
-
-    pub fn print_mod(&mut self, _mod: &hir::Mod, attrs: &[ast::Attribute]) -> io::Result<()> {
-        try!(self.print_inner_attributes(attrs));
-        for item_id in &_mod.item_ids {
-            try!(self.print_item_id(item_id));
-        }
-        Ok(())
-    }
-
-    pub fn print_foreign_mod(&mut self,
-                             nmod: &hir::ForeignMod,
-                             attrs: &[ast::Attribute])
-                             -> io::Result<()> {
-        try!(self.print_inner_attributes(attrs));
-        for item in &nmod.items {
-            try!(self.print_foreign_item(item));
-        }
-        Ok(())
-    }
-
-    pub fn print_opt_lifetime(&mut self, lifetime: &Option<hir::Lifetime>) -> io::Result<()> {
-        if let Some(l) = *lifetime {
-            try!(self.print_lifetime(&l));
-            try!(self.nbsp());
-        }
-        Ok(())
-    }
-
-    pub fn print_type(&mut self, ty: &hir::Ty) -> io::Result<()> {
-        try!(self.maybe_print_comment(ty.span.lo));
-        try!(self.ibox(0));
-        match ty.node {
-            hir::TyVec(ref ty) => {
-                try!(word(&mut self.s, "["));
-                try!(self.print_type(&ty));
-                try!(word(&mut self.s, "]"));
-            }
-            hir::TyPtr(ref mt) => {
-                try!(word(&mut self.s, "*"));
-                match mt.mutbl {
-                    hir::MutMutable => try!(self.word_nbsp("mut")),
-                    hir::MutImmutable => try!(self.word_nbsp("const")),
-                }
-                try!(self.print_type(&mt.ty));
-            }
-            hir::TyRptr(ref lifetime, ref mt) => {
-                try!(word(&mut self.s, "&"));
-                try!(self.print_opt_lifetime(lifetime));
-                try!(self.print_mt(mt));
-            }
-            hir::TyTup(ref elts) => {
-                try!(self.popen());
-                try!(self.commasep(Inconsistent, &elts[..], |s, ty| s.print_type(&ty)));
-                if elts.len() == 1 {
-                    try!(word(&mut self.s, ","));
-                }
-                try!(self.pclose());
-            }
-            hir::TyBareFn(ref f) => {
-                let generics = hir::Generics {
-                    lifetimes: f.lifetimes.clone(),
-                    ty_params: hir::HirVec::new(),
-                    where_clause: hir::WhereClause {
-                        id: ast::DUMMY_NODE_ID,
-                        predicates: hir::HirVec::new(),
-                    },
-                };
-                try!(self.print_ty_fn(f.abi, f.unsafety, &f.decl, None, &generics, None));
-            }
-            hir::TyPath(None, ref path) => {
-                try!(self.print_path(path, false, 0));
-            }
-            hir::TyPath(Some(ref qself), ref path) => {
-                try!(self.print_qpath(path, qself, false))
-            }
-            hir::TyObjectSum(ref ty, ref bounds) => {
-                try!(self.print_type(&ty));
-                try!(self.print_bounds("+", &bounds[..]));
-            }
-            hir::TyPolyTraitRef(ref bounds) => {
-                try!(self.print_bounds("", &bounds[..]));
-            }
-            hir::TyFixedLengthVec(ref ty, ref v) => {
-                try!(word(&mut self.s, "["));
-                try!(self.print_type(&ty));
-                try!(word(&mut self.s, "; "));
-                try!(self.print_expr(&v));
-                try!(word(&mut self.s, "]"));
-            }
-            hir::TyTypeof(ref e) => {
-                try!(word(&mut self.s, "typeof("));
-                try!(self.print_expr(&e));
-                try!(word(&mut self.s, ")"));
-            }
-            hir::TyInfer => {
-                try!(word(&mut self.s, "_"));
-            }
-        }
-        self.end()
-    }
-
-    pub fn print_foreign_item(&mut self, item: &hir::ForeignItem) -> io::Result<()> {
-        try!(self.hardbreak_if_not_bol());
-        try!(self.maybe_print_comment(item.span.lo));
-        try!(self.print_outer_attributes(&item.attrs));
-        match item.node {
-            hir::ForeignItemFn(ref decl, ref generics) => {
-                try!(self.head(""));
-                try!(self.print_fn(decl,
-                                   hir::Unsafety::Normal,
-                                   hir::Constness::NotConst,
-                                   Abi::Rust,
-                                   Some(item.name),
-                                   generics,
-                                   None,
-                                   item.vis));
-                try!(self.end()); // end head-ibox
-                try!(word(&mut self.s, ";"));
-                self.end() // end the outer fn box
-            }
-            hir::ForeignItemStatic(ref t, m) => {
-                try!(self.head(&visibility_qualified(item.vis, "static")));
-                if m {
-                    try!(self.word_space("mut"));
-                }
-                try!(self.print_name(item.name));
-                try!(self.word_space(":"));
-                try!(self.print_type(&t));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end the head-ibox
-                self.end() // end the outer cbox
-            }
-        }
-    }
-
-    fn print_associated_const(&mut self,
-                              name: ast::Name,
-                              ty: &hir::Ty,
-                              default: Option<&hir::Expr>,
-                              vis: hir::Visibility)
-                              -> io::Result<()> {
-        try!(word(&mut self.s, &visibility_qualified(vis, "")));
-        try!(self.word_space("const"));
-        try!(self.print_name(name));
-        try!(self.word_space(":"));
-        try!(self.print_type(ty));
-        if let Some(expr) = default {
-            try!(space(&mut self.s));
-            try!(self.word_space("="));
-            try!(self.print_expr(expr));
-        }
-        word(&mut self.s, ";")
-    }
-
-    fn print_associated_type(&mut self,
-                             name: ast::Name,
-                             bounds: Option<&hir::TyParamBounds>,
-                             ty: Option<&hir::Ty>)
-                             -> io::Result<()> {
-        try!(self.word_space("type"));
-        try!(self.print_name(name));
-        if let Some(bounds) = bounds {
-            try!(self.print_bounds(":", bounds));
-        }
-        if let Some(ty) = ty {
-            try!(space(&mut self.s));
-            try!(self.word_space("="));
-            try!(self.print_type(ty));
-        }
-        word(&mut self.s, ";")
-    }
-
-    pub fn print_item_id(&mut self, item_id: &hir::ItemId) -> io::Result<()> {
-        if let Some(krate) = self.krate {
-            // skip nested items if krate context was not provided
-            let item = &krate.items[&item_id.id];
-            self.print_item(item)
-        } else {
-            Ok(())
-        }
-    }
-
-    /// Pretty-print an item
-    pub fn print_item(&mut self, item: &hir::Item) -> io::Result<()> {
-        try!(self.hardbreak_if_not_bol());
-        try!(self.maybe_print_comment(item.span.lo));
-        try!(self.print_outer_attributes(&item.attrs));
-        try!(self.ann.pre(self, NodeItem(item)));
-        match item.node {
-            hir::ItemExternCrate(ref optional_path) => {
-                try!(self.head(&visibility_qualified(item.vis, "extern crate")));
-                if let Some(p) = *optional_path {
-                    let val = p.as_str();
-                    if val.contains("-") {
-                        try!(self.print_string(&val, ast::StrStyle::Cooked));
-                    } else {
-                        try!(self.print_name(p));
-                    }
-                    try!(space(&mut self.s));
-                    try!(word(&mut self.s, "as"));
-                    try!(space(&mut self.s));
-                }
-                try!(self.print_name(item.name));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end inner head-block
-                try!(self.end()); // end outer head-block
-            }
-            hir::ItemUse(ref vp) => {
-                try!(self.head(&visibility_qualified(item.vis, "use")));
-                try!(self.print_view_path(&vp));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end inner head-block
-                try!(self.end()); // end outer head-block
-            }
-            hir::ItemStatic(ref ty, m, ref expr) => {
-                try!(self.head(&visibility_qualified(item.vis, "static")));
-                if m == hir::MutMutable {
-                    try!(self.word_space("mut"));
-                }
-                try!(self.print_name(item.name));
-                try!(self.word_space(":"));
-                try!(self.print_type(&ty));
-                try!(space(&mut self.s));
-                try!(self.end()); // end the head-ibox
-
-                try!(self.word_space("="));
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end the outer cbox
-            }
-            hir::ItemConst(ref ty, ref expr) => {
-                try!(self.head(&visibility_qualified(item.vis, "const")));
-                try!(self.print_name(item.name));
-                try!(self.word_space(":"));
-                try!(self.print_type(&ty));
-                try!(space(&mut self.s));
-                try!(self.end()); // end the head-ibox
-
-                try!(self.word_space("="));
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end the outer cbox
-            }
-            hir::ItemFn(ref decl, unsafety, constness, abi, ref typarams, ref body) => {
-                try!(self.head(""));
-                try!(self.print_fn(decl,
-                                   unsafety,
-                                   constness,
-                                   abi,
-                                   Some(item.name),
-                                   typarams,
-                                   None,
-                                   item.vis));
-                try!(word(&mut self.s, " "));
-                try!(self.print_block_with_attrs(&body, &item.attrs));
-            }
-            hir::ItemMod(ref _mod) => {
-                try!(self.head(&visibility_qualified(item.vis, "mod")));
-                try!(self.print_name(item.name));
-                try!(self.nbsp());
-                try!(self.bopen());
-                try!(self.print_mod(_mod, &item.attrs));
-                try!(self.bclose(item.span));
-            }
-            hir::ItemForeignMod(ref nmod) => {
-                try!(self.head("extern"));
-                try!(self.word_nbsp(&nmod.abi.to_string()));
-                try!(self.bopen());
-                try!(self.print_foreign_mod(nmod, &item.attrs));
-                try!(self.bclose(item.span));
-            }
-            hir::ItemTy(ref ty, ref params) => {
-                try!(self.ibox(indent_unit));
-                try!(self.ibox(0));
-                try!(self.word_nbsp(&visibility_qualified(item.vis, "type")));
-                try!(self.print_name(item.name));
-                try!(self.print_generics(params));
-                try!(self.end()); // end the inner ibox
-
-                try!(self.print_where_clause(&params.where_clause));
-                try!(space(&mut self.s));
-                try!(self.word_space("="));
-                try!(self.print_type(&ty));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end the outer ibox
-            }
-            hir::ItemEnum(ref enum_definition, ref params) => {
-                try!(self.print_enum_def(enum_definition, params, item.name, item.span, item.vis));
-            }
-            hir::ItemStruct(ref struct_def, ref generics) => {
-                try!(self.head(&visibility_qualified(item.vis, "struct")));
-                try!(self.print_struct(struct_def, generics, item.name, item.span, true));
-            }
-
-            hir::ItemDefaultImpl(unsafety, ref trait_ref) => {
-                try!(self.head(""));
-                try!(self.print_visibility(item.vis));
-                try!(self.print_unsafety(unsafety));
-                try!(self.word_nbsp("impl"));
-                try!(self.print_trait_ref(trait_ref));
-                try!(space(&mut self.s));
-                try!(self.word_space("for"));
-                try!(self.word_space(".."));
-                try!(self.bopen());
-                try!(self.bclose(item.span));
-            }
-            hir::ItemImpl(unsafety,
-                          polarity,
-                          ref generics,
-                          ref opt_trait,
-                          ref ty,
-                          ref impl_items) => {
-                try!(self.head(""));
-                try!(self.print_visibility(item.vis));
-                try!(self.print_unsafety(unsafety));
-                try!(self.word_nbsp("impl"));
-
-                if generics.is_parameterized() {
-                    try!(self.print_generics(generics));
-                    try!(space(&mut self.s));
-                }
-
-                match polarity {
-                    hir::ImplPolarity::Negative => {
-                        try!(word(&mut self.s, "!"));
-                    }
-                    _ => {}
-                }
-
-                match opt_trait {
-                    &Some(ref t) => {
-                        try!(self.print_trait_ref(t));
-                        try!(space(&mut self.s));
-                        try!(self.word_space("for"));
-                    }
-                    &None => {}
-                }
-
-                try!(self.print_type(&ty));
-                try!(self.print_where_clause(&generics.where_clause));
-
-                try!(space(&mut self.s));
-                try!(self.bopen());
-                try!(self.print_inner_attributes(&item.attrs));
-                for impl_item in impl_items {
-                    try!(self.print_impl_item(impl_item));
-                }
-                try!(self.bclose(item.span));
-            }
-            hir::ItemTrait(unsafety, ref generics, ref bounds, ref trait_items) => {
-                try!(self.head(""));
-                try!(self.print_visibility(item.vis));
-                try!(self.print_unsafety(unsafety));
-                try!(self.word_nbsp("trait"));
-                try!(self.print_name(item.name));
-                try!(self.print_generics(generics));
-                let mut real_bounds = Vec::with_capacity(bounds.len());
-                for b in bounds.iter() {
-                    if let TraitTyParamBound(ref ptr, hir::TraitBoundModifier::Maybe) = *b {
-                        try!(space(&mut self.s));
-                        try!(self.word_space("for ?"));
-                        try!(self.print_trait_ref(&ptr.trait_ref));
-                    } else {
-                        real_bounds.push(b.clone());
-                    }
-                }
-                try!(self.print_bounds(":", &real_bounds[..]));
-                try!(self.print_where_clause(&generics.where_clause));
-                try!(word(&mut self.s, " "));
-                try!(self.bopen());
-                for trait_item in trait_items {
-                    try!(self.print_trait_item(trait_item));
-                }
-                try!(self.bclose(item.span));
-            }
-        }
-        self.ann.post(self, NodeItem(item))
-    }
-
-    fn print_trait_ref(&mut self, t: &hir::TraitRef) -> io::Result<()> {
-        self.print_path(&t.path, false, 0)
-    }
-
-    fn print_formal_lifetime_list(&mut self, lifetimes: &[hir::LifetimeDef]) -> io::Result<()> {
-        if !lifetimes.is_empty() {
-            try!(word(&mut self.s, "for<"));
-            let mut comma = false;
-            for lifetime_def in lifetimes {
-                if comma {
-                    try!(self.word_space(","))
-                }
-                try!(self.print_lifetime_def(lifetime_def));
-                comma = true;
-            }
-            try!(word(&mut self.s, ">"));
-        }
-        Ok(())
-    }
-
-    fn print_poly_trait_ref(&mut self, t: &hir::PolyTraitRef) -> io::Result<()> {
-        try!(self.print_formal_lifetime_list(&t.bound_lifetimes));
-        self.print_trait_ref(&t.trait_ref)
-    }
-
-    pub fn print_enum_def(&mut self,
-                          enum_definition: &hir::EnumDef,
-                          generics: &hir::Generics,
-                          name: ast::Name,
-                          span: codemap::Span,
-                          visibility: hir::Visibility)
-                          -> io::Result<()> {
-        try!(self.head(&visibility_qualified(visibility, "enum")));
-        try!(self.print_name(name));
-        try!(self.print_generics(generics));
-        try!(self.print_where_clause(&generics.where_clause));
-        try!(space(&mut self.s));
-        self.print_variants(&enum_definition.variants, span)
-    }
-
-    pub fn print_variants(&mut self,
-                          variants: &[hir::Variant],
-                          span: codemap::Span)
-                          -> io::Result<()> {
-        try!(self.bopen());
-        for v in variants {
-            try!(self.space_if_not_bol());
-            try!(self.maybe_print_comment(v.span.lo));
-            try!(self.print_outer_attributes(&v.node.attrs));
-            try!(self.ibox(indent_unit));
-            try!(self.print_variant(v));
-            try!(word(&mut self.s, ","));
-            try!(self.end());
-            try!(self.maybe_print_trailing_comment(v.span, None));
-        }
-        self.bclose(span)
-    }
-
-    pub fn print_visibility(&mut self, vis: hir::Visibility) -> io::Result<()> {
-        match vis {
-            hir::Public => self.word_nbsp("pub"),
-            hir::Inherited => Ok(()),
-        }
-    }
-
-    pub fn print_struct(&mut self,
-                        struct_def: &hir::VariantData,
-                        generics: &hir::Generics,
-                        name: ast::Name,
-                        span: codemap::Span,
-                        print_finalizer: bool)
-                        -> io::Result<()> {
-        try!(self.print_name(name));
-        try!(self.print_generics(generics));
-        if !struct_def.is_struct() {
-            if struct_def.is_tuple() {
-                try!(self.popen());
-                try!(self.commasep(Inconsistent, struct_def.fields(), |s, field| {
-                    match field.node.kind {
-                        hir::NamedField(..) => panic!("unexpected named field"),
-                        hir::UnnamedField(vis) => {
-                            try!(s.print_visibility(vis));
-                            try!(s.maybe_print_comment(field.span.lo));
-                            s.print_type(&field.node.ty)
-                        }
-                    }
-                }));
-                try!(self.pclose());
-            }
-            try!(self.print_where_clause(&generics.where_clause));
-            if print_finalizer {
-                try!(word(&mut self.s, ";"));
-            }
-            try!(self.end());
-            self.end() // close the outer-box
-        } else {
-            try!(self.print_where_clause(&generics.where_clause));
-            try!(self.nbsp());
-            try!(self.bopen());
-            try!(self.hardbreak_if_not_bol());
-
-            for field in struct_def.fields() {
-                match field.node.kind {
-                    hir::UnnamedField(..) => panic!("unexpected unnamed field"),
-                    hir::NamedField(name, visibility) => {
-                        try!(self.hardbreak_if_not_bol());
-                        try!(self.maybe_print_comment(field.span.lo));
-                        try!(self.print_outer_attributes(&field.node.attrs));
-                        try!(self.print_visibility(visibility));
-                        try!(self.print_name(name));
-                        try!(self.word_nbsp(":"));
-                        try!(self.print_type(&field.node.ty));
-                        try!(word(&mut self.s, ","));
-                    }
-                }
-            }
-
-            self.bclose(span)
-        }
-    }
-
-    pub fn print_variant(&mut self, v: &hir::Variant) -> io::Result<()> {
-        try!(self.head(""));
-        let generics = ::util::empty_generics();
-        try!(self.print_struct(&v.node.data, &generics, v.node.name, v.span, false));
-        match v.node.disr_expr {
-            Some(ref d) => {
-                try!(space(&mut self.s));
-                try!(self.word_space("="));
-                self.print_expr(&d)
-            }
-            _ => Ok(()),
-        }
-    }
-    pub fn print_method_sig(&mut self,
-                            name: ast::Name,
-                            m: &hir::MethodSig,
-                            vis: hir::Visibility)
-                            -> io::Result<()> {
-        self.print_fn(&m.decl,
-                      m.unsafety,
-                      m.constness,
-                      m.abi,
-                      Some(name),
-                      &m.generics,
-                      Some(&m.explicit_self.node),
-                      vis)
-    }
-
-    pub fn print_trait_item(&mut self, ti: &hir::TraitItem) -> io::Result<()> {
-        try!(self.ann.pre(self, NodeSubItem(ti.id)));
-        try!(self.hardbreak_if_not_bol());
-        try!(self.maybe_print_comment(ti.span.lo));
-        try!(self.print_outer_attributes(&ti.attrs));
-        match ti.node {
-            hir::ConstTraitItem(ref ty, ref default) => {
-                try!(self.print_associated_const(ti.name,
-                                                 &ty,
-                                                 default.as_ref().map(|expr| &**expr),
-                                                 hir::Inherited));
-            }
-            hir::MethodTraitItem(ref sig, ref body) => {
-                if body.is_some() {
-                    try!(self.head(""));
-                }
-                try!(self.print_method_sig(ti.name, sig, hir::Inherited));
-                if let Some(ref body) = *body {
-                    try!(self.nbsp());
-                    try!(self.print_block_with_attrs(body, &ti.attrs));
-                } else {
-                    try!(word(&mut self.s, ";"));
-                }
-            }
-            hir::TypeTraitItem(ref bounds, ref default) => {
-                try!(self.print_associated_type(ti.name,
-                                                Some(bounds),
-                                                default.as_ref().map(|ty| &**ty)));
-            }
-        }
-        self.ann.post(self, NodeSubItem(ti.id))
-    }
-
-    pub fn print_impl_item(&mut self, ii: &hir::ImplItem) -> io::Result<()> {
-        try!(self.ann.pre(self, NodeSubItem(ii.id)));
-        try!(self.hardbreak_if_not_bol());
-        try!(self.maybe_print_comment(ii.span.lo));
-        try!(self.print_outer_attributes(&ii.attrs));
-        match ii.node {
-            hir::ImplItemKind::Const(ref ty, ref expr) => {
-                try!(self.print_associated_const(ii.name, &ty, Some(&expr), ii.vis));
-            }
-            hir::ImplItemKind::Method(ref sig, ref body) => {
-                try!(self.head(""));
-                try!(self.print_method_sig(ii.name, sig, ii.vis));
-                try!(self.nbsp());
-                try!(self.print_block_with_attrs(body, &ii.attrs));
-            }
-            hir::ImplItemKind::Type(ref ty) => {
-                try!(self.print_associated_type(ii.name, None, Some(ty)));
-            }
-        }
-        self.ann.post(self, NodeSubItem(ii.id))
-    }
-
-    pub fn print_stmt(&mut self, st: &hir::Stmt) -> io::Result<()> {
-        try!(self.maybe_print_comment(st.span.lo));
-        match st.node {
-            hir::StmtDecl(ref decl, _) => {
-                try!(self.print_decl(&decl));
-            }
-            hir::StmtExpr(ref expr, _) => {
-                try!(self.space_if_not_bol());
-                try!(self.print_expr(&expr));
-            }
-            hir::StmtSemi(ref expr, _) => {
-                try!(self.space_if_not_bol());
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, ";"));
-            }
-        }
-        if stmt_ends_with_semi(&st.node) {
-            try!(word(&mut self.s, ";"));
-        }
-        self.maybe_print_trailing_comment(st.span, None)
-    }
-
-    pub fn print_block(&mut self, blk: &hir::Block) -> io::Result<()> {
-        self.print_block_with_attrs(blk, &[])
-    }
-
-    pub fn print_block_unclosed(&mut self, blk: &hir::Block) -> io::Result<()> {
-        self.print_block_unclosed_indent(blk, indent_unit)
-    }
-
-    pub fn print_block_unclosed_indent(&mut self,
-                                       blk: &hir::Block,
-                                       indented: usize)
-                                       -> io::Result<()> {
-        self.print_block_maybe_unclosed(blk, indented, &[], false)
-    }
-
-    pub fn print_block_with_attrs(&mut self,
-                                  blk: &hir::Block,
-                                  attrs: &[ast::Attribute])
-                                  -> io::Result<()> {
-        self.print_block_maybe_unclosed(blk, indent_unit, attrs, true)
-    }
-
-    pub fn print_block_maybe_unclosed(&mut self,
-                                      blk: &hir::Block,
-                                      indented: usize,
-                                      attrs: &[ast::Attribute],
-                                      close_box: bool)
-                                      -> io::Result<()> {
-        match blk.rules {
-            hir::UnsafeBlock(..) => try!(self.word_space("unsafe")),
-            hir::PushUnsafeBlock(..) => try!(self.word_space("push_unsafe")),
-            hir::PopUnsafeBlock(..) => try!(self.word_space("pop_unsafe")),
-            hir::PushUnstableBlock => try!(self.word_space("push_unstable")),
-            hir::PopUnstableBlock => try!(self.word_space("pop_unstable")),
-            hir::DefaultBlock => (),
-        }
-        try!(self.maybe_print_comment(blk.span.lo));
-        try!(self.ann.pre(self, NodeBlock(blk)));
-        try!(self.bopen());
-
-        try!(self.print_inner_attributes(attrs));
-
-        for st in &blk.stmts {
-            try!(self.print_stmt(st));
-        }
-        match blk.expr {
-            Some(ref expr) => {
-                try!(self.space_if_not_bol());
-                try!(self.print_expr(&expr));
-                try!(self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi)));
-            }
-            _ => (),
-        }
-        try!(self.bclose_maybe_open(blk.span, indented, close_box));
-        self.ann.post(self, NodeBlock(blk))
-    }
-
-    fn print_else(&mut self, els: Option<&hir::Expr>) -> io::Result<()> {
-        match els {
-            Some(_else) => {
-                match _else.node {
-                    // "another else-if"
-                    hir::ExprIf(ref i, ref then, ref e) => {
-                        try!(self.cbox(indent_unit - 1));
-                        try!(self.ibox(0));
-                        try!(word(&mut self.s, " else if "));
-                        try!(self.print_expr(&i));
-                        try!(space(&mut self.s));
-                        try!(self.print_block(&then));
-                        self.print_else(e.as_ref().map(|e| &**e))
-                    }
-                    // "final else"
-                    hir::ExprBlock(ref b) => {
-                        try!(self.cbox(indent_unit - 1));
-                        try!(self.ibox(0));
-                        try!(word(&mut self.s, " else "));
-                        self.print_block(&b)
-                    }
-                    // BLEAH, constraints would be great here
-                    _ => {
-                        panic!("print_if saw if with weird alternative");
-                    }
-                }
-            }
-            _ => Ok(()),
-        }
-    }
-
-    pub fn print_if(&mut self,
-                    test: &hir::Expr,
-                    blk: &hir::Block,
-                    elseopt: Option<&hir::Expr>)
-                    -> io::Result<()> {
-        try!(self.head("if"));
-        try!(self.print_expr(test));
-        try!(space(&mut self.s));
-        try!(self.print_block(blk));
-        self.print_else(elseopt)
-    }
-
-    pub fn print_if_let(&mut self,
-                        pat: &hir::Pat,
-                        expr: &hir::Expr,
-                        blk: &hir::Block,
-                        elseopt: Option<&hir::Expr>)
-                        -> io::Result<()> {
-        try!(self.head("if let"));
-        try!(self.print_pat(pat));
-        try!(space(&mut self.s));
-        try!(self.word_space("="));
-        try!(self.print_expr(expr));
-        try!(space(&mut self.s));
-        try!(self.print_block(blk));
-        self.print_else(elseopt)
-    }
-
-
-    fn print_call_post(&mut self, args: &[P<hir::Expr>]) -> io::Result<()> {
-        try!(self.popen());
-        try!(self.commasep_exprs(Inconsistent, args));
-        self.pclose()
-    }
-
-    pub fn print_expr_maybe_paren(&mut self, expr: &hir::Expr) -> io::Result<()> {
-        let needs_par = needs_parentheses(expr);
-        if needs_par {
-            try!(self.popen());
-        }
-        try!(self.print_expr(expr));
-        if needs_par {
-            try!(self.pclose());
-        }
-        Ok(())
-    }
-
-    fn print_expr_vec(&mut self, exprs: &[P<hir::Expr>]) -> io::Result<()> {
-        try!(self.ibox(indent_unit));
-        try!(word(&mut self.s, "["));
-        try!(self.commasep_exprs(Inconsistent, &exprs[..]));
-        try!(word(&mut self.s, "]"));
-        self.end()
-    }
-
-    fn print_expr_repeat(&mut self, element: &hir::Expr, count: &hir::Expr) -> io::Result<()> {
-        try!(self.ibox(indent_unit));
-        try!(word(&mut self.s, "["));
-        try!(self.print_expr(element));
-        try!(self.word_space(";"));
-        try!(self.print_expr(count));
-        try!(word(&mut self.s, "]"));
-        self.end()
-    }
-
-    fn print_expr_struct(&mut self,
-                         path: &hir::Path,
-                         fields: &[hir::Field],
-                         wth: &Option<P<hir::Expr>>)
-                         -> io::Result<()> {
-        try!(self.print_path(path, true, 0));
-        try!(word(&mut self.s, "{"));
-        try!(self.commasep_cmnt(Consistent,
-                                &fields[..],
-                                |s, field| {
-                                    try!(s.ibox(indent_unit));
-                                    try!(s.print_name(field.name.node));
-                                    try!(s.word_space(":"));
-                                    try!(s.print_expr(&field.expr));
-                                    s.end()
-                                },
-                                |f| f.span));
-        match *wth {
-            Some(ref expr) => {
-                try!(self.ibox(indent_unit));
-                if !fields.is_empty() {
-                    try!(word(&mut self.s, ","));
-                    try!(space(&mut self.s));
-                }
-                try!(word(&mut self.s, ".."));
-                try!(self.print_expr(&expr));
-                try!(self.end());
-            }
-            _ => if !fields.is_empty() {
-                try!(word(&mut self.s, ","))
-            },
-        }
-        try!(word(&mut self.s, "}"));
-        Ok(())
-    }
-
-    fn print_expr_tup(&mut self, exprs: &[P<hir::Expr>]) -> io::Result<()> {
-        try!(self.popen());
-        try!(self.commasep_exprs(Inconsistent, &exprs[..]));
-        if exprs.len() == 1 {
-            try!(word(&mut self.s, ","));
-        }
-        self.pclose()
-    }
-
-    fn print_expr_call(&mut self, func: &hir::Expr, args: &[P<hir::Expr>]) -> io::Result<()> {
-        try!(self.print_expr_maybe_paren(func));
-        self.print_call_post(args)
-    }
-
-    fn print_expr_method_call(&mut self,
-                              name: Spanned<ast::Name>,
-                              tys: &[P<hir::Ty>],
-                              args: &[P<hir::Expr>])
-                              -> io::Result<()> {
-        let base_args = &args[1..];
-        try!(self.print_expr(&args[0]));
-        try!(word(&mut self.s, "."));
-        try!(self.print_name(name.node));
-        if !tys.is_empty() {
-            try!(word(&mut self.s, "::<"));
-            try!(self.commasep(Inconsistent, tys, |s, ty| s.print_type(&ty)));
-            try!(word(&mut self.s, ">"));
-        }
-        self.print_call_post(base_args)
-    }
-
-    fn print_expr_binary(&mut self,
-                         op: hir::BinOp,
-                         lhs: &hir::Expr,
-                         rhs: &hir::Expr)
-                         -> io::Result<()> {
-        try!(self.print_expr(lhs));
-        try!(space(&mut self.s));
-        try!(self.word_space(::util::binop_to_string(op.node)));
-        self.print_expr(rhs)
-    }
-
-    fn print_expr_unary(&mut self, op: hir::UnOp, expr: &hir::Expr) -> io::Result<()> {
-        try!(word(&mut self.s, ::util::unop_to_string(op)));
-        self.print_expr_maybe_paren(expr)
-    }
-
-    fn print_expr_addr_of(&mut self,
-                          mutability: hir::Mutability,
-                          expr: &hir::Expr)
-                          -> io::Result<()> {
-        try!(word(&mut self.s, "&"));
-        try!(self.print_mutability(mutability));
-        self.print_expr_maybe_paren(expr)
-    }
-
-    pub fn print_expr(&mut self, expr: &hir::Expr) -> io::Result<()> {
-        try!(self.maybe_print_comment(expr.span.lo));
-        try!(self.ibox(indent_unit));
-        try!(self.ann.pre(self, NodeExpr(expr)));
-        match expr.node {
-            hir::ExprBox(ref expr) => {
-                try!(self.word_space("box"));
-                try!(self.print_expr(expr));
-            }
-            hir::ExprVec(ref exprs) => {
-                try!(self.print_expr_vec(&exprs[..]));
-            }
-            hir::ExprRepeat(ref element, ref count) => {
-                try!(self.print_expr_repeat(&element, &count));
-            }
-            hir::ExprStruct(ref path, ref fields, ref wth) => {
-                try!(self.print_expr_struct(path, &fields[..], wth));
-            }
-            hir::ExprTup(ref exprs) => {
-                try!(self.print_expr_tup(&exprs[..]));
-            }
-            hir::ExprCall(ref func, ref args) => {
-                try!(self.print_expr_call(&func, &args[..]));
-            }
-            hir::ExprMethodCall(name, ref tys, ref args) => {
-                try!(self.print_expr_method_call(name, &tys[..], &args[..]));
-            }
-            hir::ExprBinary(op, ref lhs, ref rhs) => {
-                try!(self.print_expr_binary(op, &lhs, &rhs));
-            }
-            hir::ExprUnary(op, ref expr) => {
-                try!(self.print_expr_unary(op, &expr));
-            }
-            hir::ExprAddrOf(m, ref expr) => {
-                try!(self.print_expr_addr_of(m, &expr));
-            }
-            hir::ExprLit(ref lit) => {
-                try!(self.print_literal(&lit));
-            }
-            hir::ExprCast(ref expr, ref ty) => {
-                try!(self.print_expr(&expr));
-                try!(space(&mut self.s));
-                try!(self.word_space("as"));
-                try!(self.print_type(&ty));
-            }
-            hir::ExprType(ref expr, ref ty) => {
-                try!(self.print_expr(&expr));
-                try!(self.word_space(":"));
-                try!(self.print_type(&ty));
-            }
-            hir::ExprIf(ref test, ref blk, ref elseopt) => {
-                try!(self.print_if(&test, &blk, elseopt.as_ref().map(|e| &**e)));
-            }
-            hir::ExprWhile(ref test, ref blk, opt_ident) => {
-                if let Some(ident) = opt_ident {
-                    try!(self.print_name(ident.name));
-                    try!(self.word_space(":"));
-                }
-                try!(self.head("while"));
-                try!(self.print_expr(&test));
-                try!(space(&mut self.s));
-                try!(self.print_block(&blk));
-            }
-            hir::ExprLoop(ref blk, opt_ident) => {
-                if let Some(ident) = opt_ident {
-                    try!(self.print_name(ident.name));
-                    try!(self.word_space(":"));
-                }
-                try!(self.head("loop"));
-                try!(space(&mut self.s));
-                try!(self.print_block(&blk));
-            }
-            hir::ExprMatch(ref expr, ref arms, _) => {
-                try!(self.cbox(indent_unit));
-                try!(self.ibox(4));
-                try!(self.word_nbsp("match"));
-                try!(self.print_expr(&expr));
-                try!(space(&mut self.s));
-                try!(self.bopen());
-                for arm in arms {
-                    try!(self.print_arm(arm));
-                }
-                try!(self.bclose_(expr.span, indent_unit));
-            }
-            hir::ExprClosure(capture_clause, ref decl, ref body) => {
-                try!(self.print_capture_clause(capture_clause));
-
-                try!(self.print_fn_block_args(&decl));
-                try!(space(&mut self.s));
-
-                let default_return = match decl.output {
-                    hir::DefaultReturn(..) => true,
-                    _ => false,
-                };
-
-                if !default_return || !body.stmts.is_empty() || body.expr.is_none() {
-                    try!(self.print_block_unclosed(&body));
-                } else {
-                    // we extract the block, so as not to create another set of boxes
-                    match body.expr.as_ref().unwrap().node {
-                        hir::ExprBlock(ref blk) => {
-                            try!(self.print_block_unclosed(&blk));
-                        }
-                        _ => {
-                            // this is a bare expression
-                            try!(self.print_expr(body.expr.as_ref().map(|e| &**e).unwrap()));
-                            try!(self.end()); // need to close a box
-                        }
-                    }
-                }
-                // a box will be closed by print_expr, but we didn't want an overall
-                // wrapper so we closed the corresponding opening. so create an
-                // empty box to satisfy the close.
-                try!(self.ibox(0));
-            }
-            hir::ExprBlock(ref blk) => {
-                // containing cbox, will be closed by print-block at }
-                try!(self.cbox(indent_unit));
-                // head-box, will be closed by print-block after {
-                try!(self.ibox(0));
-                try!(self.print_block(&blk));
-            }
-            hir::ExprAssign(ref lhs, ref rhs) => {
-                try!(self.print_expr(&lhs));
-                try!(space(&mut self.s));
-                try!(self.word_space("="));
-                try!(self.print_expr(&rhs));
-            }
-            hir::ExprAssignOp(op, ref lhs, ref rhs) => {
-                try!(self.print_expr(&lhs));
-                try!(space(&mut self.s));
-                try!(word(&mut self.s, ::util::binop_to_string(op.node)));
-                try!(self.word_space("="));
-                try!(self.print_expr(&rhs));
-            }
-            hir::ExprField(ref expr, name) => {
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, "."));
-                try!(self.print_name(name.node));
-            }
-            hir::ExprTupField(ref expr, id) => {
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, "."));
-                try!(self.print_usize(id.node));
-            }
-            hir::ExprIndex(ref expr, ref index) => {
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, "["));
-                try!(self.print_expr(&index));
-                try!(word(&mut self.s, "]"));
-            }
-            hir::ExprRange(ref start, ref end) => {
-                if let &Some(ref e) = start {
-                    try!(self.print_expr(&e));
-                }
-                try!(word(&mut self.s, ".."));
-                if let &Some(ref e) = end {
-                    try!(self.print_expr(&e));
-                }
-            }
-            hir::ExprPath(None, ref path) => {
-                try!(self.print_path(path, true, 0))
-            }
-            hir::ExprPath(Some(ref qself), ref path) => {
-                try!(self.print_qpath(path, qself, true))
-            }
-            hir::ExprBreak(opt_ident) => {
-                try!(word(&mut self.s, "break"));
-                try!(space(&mut self.s));
-                if let Some(ident) = opt_ident {
-                    try!(self.print_name(ident.node.name));
-                    try!(space(&mut self.s));
-                }
-            }
-            hir::ExprAgain(opt_ident) => {
-                try!(word(&mut self.s, "continue"));
-                try!(space(&mut self.s));
-                if let Some(ident) = opt_ident {
-                    try!(self.print_name(ident.node.name));
-                    try!(space(&mut self.s))
-                }
-            }
-            hir::ExprRet(ref result) => {
-                try!(word(&mut self.s, "return"));
-                match *result {
-                    Some(ref expr) => {
-                        try!(word(&mut self.s, " "));
-                        try!(self.print_expr(&expr));
-                    }
-                    _ => (),
-                }
-            }
-            hir::ExprInlineAsm(ref a) => {
-                try!(word(&mut self.s, "asm!"));
-                try!(self.popen());
-                try!(self.print_string(&a.asm, a.asm_str_style));
-                try!(self.word_space(":"));
-
-                try!(self.commasep(Inconsistent, &a.outputs, |s, out| {
-                    match out.constraint.slice_shift_char() {
-                        Some(('=', operand)) if out.is_rw => {
-                            try!(s.print_string(&format!("+{}", operand), ast::StrStyle::Cooked))
-                        }
-                        _ => try!(s.print_string(&out.constraint, ast::StrStyle::Cooked)),
-                    }
-                    try!(s.popen());
-                    try!(s.print_expr(&out.expr));
-                    try!(s.pclose());
-                    Ok(())
-                }));
-                try!(space(&mut self.s));
-                try!(self.word_space(":"));
-
-                try!(self.commasep(Inconsistent, &a.inputs, |s, &(ref co, ref o)| {
-                    try!(s.print_string(&co, ast::StrStyle::Cooked));
-                    try!(s.popen());
-                    try!(s.print_expr(&o));
-                    try!(s.pclose());
-                    Ok(())
-                }));
-                try!(space(&mut self.s));
-                try!(self.word_space(":"));
-
-                try!(self.commasep(Inconsistent, &a.clobbers, |s, co| {
-                    try!(s.print_string(&co, ast::StrStyle::Cooked));
-                    Ok(())
-                }));
-
-                let mut options = vec![];
-                if a.volatile {
-                    options.push("volatile");
-                }
-                if a.alignstack {
-                    options.push("alignstack");
-                }
-                if a.dialect == ast::AsmDialect::Intel {
-                    options.push("intel");
-                }
-
-                if !options.is_empty() {
-                    try!(space(&mut self.s));
-                    try!(self.word_space(":"));
-                    try!(self.commasep(Inconsistent, &options, |s, &co| {
-                        try!(s.print_string(co, ast::StrStyle::Cooked));
-                        Ok(())
-                    }));
-                }
-
-                try!(self.pclose());
-            }
-        }
-        try!(self.ann.post(self, NodeExpr(expr)));
-        self.end()
-    }
-
-    pub fn print_local_decl(&mut self, loc: &hir::Local) -> io::Result<()> {
-        try!(self.print_pat(&loc.pat));
-        if let Some(ref ty) = loc.ty {
-            try!(self.word_space(":"));
-            try!(self.print_type(&ty));
-        }
-        Ok(())
-    }
-
-    pub fn print_decl(&mut self, decl: &hir::Decl) -> io::Result<()> {
-        try!(self.maybe_print_comment(decl.span.lo));
-        match decl.node {
-            hir::DeclLocal(ref loc) => {
-                try!(self.space_if_not_bol());
-                try!(self.ibox(indent_unit));
-                try!(self.word_nbsp("let"));
-
-                try!(self.ibox(indent_unit));
-                try!(self.print_local_decl(&loc));
-                try!(self.end());
-                if let Some(ref init) = loc.init {
-                    try!(self.nbsp());
-                    try!(self.word_space("="));
-                    try!(self.print_expr(&init));
-                }
-                self.end()
-            }
-            hir::DeclItem(ref item) => {
-                self.print_item_id(item)
-            }
-        }
-    }
-
-    pub fn print_usize(&mut self, i: usize) -> io::Result<()> {
-        word(&mut self.s, &i.to_string())
-    }
-
-    pub fn print_name(&mut self, name: ast::Name) -> io::Result<()> {
-        try!(word(&mut self.s, &name.as_str()));
-        self.ann.post(self, NodeName(&name))
-    }
-
-    pub fn print_for_decl(&mut self, loc: &hir::Local, coll: &hir::Expr) -> io::Result<()> {
-        try!(self.print_local_decl(loc));
-        try!(space(&mut self.s));
-        try!(self.word_space("in"));
-        self.print_expr(coll)
-    }
-
-    fn print_path(&mut self,
-                  path: &hir::Path,
-                  colons_before_params: bool,
-                  depth: usize)
-                  -> io::Result<()> {
-        try!(self.maybe_print_comment(path.span.lo));
-
-        let mut first = !path.global;
-        for segment in &path.segments[..path.segments.len() - depth] {
-            if first {
-                first = false
-            } else {
-                try!(word(&mut self.s, "::"))
-            }
-
-            try!(self.print_name(segment.identifier.name));
-
-            try!(self.print_path_parameters(&segment.parameters, colons_before_params));
-        }
-
-        Ok(())
-    }
-
-    fn print_qpath(&mut self,
-                   path: &hir::Path,
-                   qself: &hir::QSelf,
-                   colons_before_params: bool)
-                   -> io::Result<()> {
-        try!(word(&mut self.s, "<"));
-        try!(self.print_type(&qself.ty));
-        if qself.position > 0 {
-            try!(space(&mut self.s));
-            try!(self.word_space("as"));
-            let depth = path.segments.len() - qself.position;
-            try!(self.print_path(&path, false, depth));
-        }
-        try!(word(&mut self.s, ">"));
-        try!(word(&mut self.s, "::"));
-        let item_segment = path.segments.last().unwrap();
-        try!(self.print_name(item_segment.identifier.name));
-        self.print_path_parameters(&item_segment.parameters, colons_before_params)
-    }
-
-    fn print_path_parameters(&mut self,
-                             parameters: &hir::PathParameters,
-                             colons_before_params: bool)
-                             -> io::Result<()> {
-        if parameters.is_empty() {
-            return Ok(());
-        }
-
-        if colons_before_params {
-            try!(word(&mut self.s, "::"))
-        }
-
-        match *parameters {
-            hir::AngleBracketedParameters(ref data) => {
-                try!(word(&mut self.s, "<"));
-
-                let mut comma = false;
-                for lifetime in &data.lifetimes {
-                    if comma {
-                        try!(self.word_space(","))
-                    }
-                    try!(self.print_lifetime(lifetime));
-                    comma = true;
-                }
-
-                if !data.types.is_empty() {
-                    if comma {
-                        try!(self.word_space(","))
-                    }
-                    try!(self.commasep(Inconsistent, &data.types, |s, ty| s.print_type(&ty)));
-                    comma = true;
-                }
-
-                for binding in data.bindings.iter() {
-                    if comma {
-                        try!(self.word_space(","))
-                    }
-                    try!(self.print_name(binding.name));
-                    try!(space(&mut self.s));
-                    try!(self.word_space("="));
-                    try!(self.print_type(&binding.ty));
-                    comma = true;
-                }
-
-                try!(word(&mut self.s, ">"))
-            }
-
-            hir::ParenthesizedParameters(ref data) => {
-                try!(word(&mut self.s, "("));
-                try!(self.commasep(Inconsistent, &data.inputs, |s, ty| s.print_type(&ty)));
-                try!(word(&mut self.s, ")"));
-
-                match data.output {
-                    None => {}
-                    Some(ref ty) => {
-                        try!(self.space_if_not_bol());
-                        try!(self.word_space("->"));
-                        try!(self.print_type(&ty));
-                    }
-                }
-            }
-        }
-
-        Ok(())
-    }
-
-    pub fn print_pat(&mut self, pat: &hir::Pat) -> io::Result<()> {
-        try!(self.maybe_print_comment(pat.span.lo));
-        try!(self.ann.pre(self, NodePat(pat)));
-        // Pat isn't normalized, but the beauty of it
-        // is that it doesn't matter
-        match pat.node {
-            PatKind::Wild => try!(word(&mut self.s, "_")),
-            PatKind::Ident(binding_mode, ref path1, ref sub) => {
-                match binding_mode {
-                    hir::BindByRef(mutbl) => {
-                        try!(self.word_nbsp("ref"));
-                        try!(self.print_mutability(mutbl));
-                    }
-                    hir::BindByValue(hir::MutImmutable) => {}
-                    hir::BindByValue(hir::MutMutable) => {
-                        try!(self.word_nbsp("mut"));
-                    }
-                }
-                try!(self.print_name(path1.node.name));
-                match *sub {
-                    Some(ref p) => {
-                        try!(word(&mut self.s, "@"));
-                        try!(self.print_pat(&p));
-                    }
-                    None => (),
-                }
-            }
-            PatKind::TupleStruct(ref path, ref args_) => {
-                try!(self.print_path(path, true, 0));
-                match *args_ {
-                    None => try!(word(&mut self.s, "(..)")),
-                    Some(ref args) => {
-                        try!(self.popen());
-                        try!(self.commasep(Inconsistent, &args[..], |s, p| s.print_pat(&p)));
-                        try!(self.pclose());
-                    }
-                }
-            }
-            PatKind::Path(ref path) => {
-                try!(self.print_path(path, true, 0));
-            }
-            PatKind::QPath(ref qself, ref path) => {
-                try!(self.print_qpath(path, qself, false));
-            }
-            PatKind::Struct(ref path, ref fields, etc) => {
-                try!(self.print_path(path, true, 0));
-                try!(self.nbsp());
-                try!(self.word_space("{"));
-                try!(self.commasep_cmnt(Consistent,
-                                        &fields[..],
-                                        |s, f| {
-                                            try!(s.cbox(indent_unit));
-                                            if !f.node.is_shorthand {
-                                                try!(s.print_name(f.node.name));
-                                                try!(s.word_nbsp(":"));
-                                            }
-                                            try!(s.print_pat(&f.node.pat));
-                                            s.end()
-                                        },
-                                        |f| f.node.pat.span));
-                if etc {
-                    if !fields.is_empty() {
-                        try!(self.word_space(","));
-                    }
-                    try!(word(&mut self.s, ".."));
-                }
-                try!(space(&mut self.s));
-                try!(word(&mut self.s, "}"));
-            }
-            PatKind::Tup(ref elts) => {
-                try!(self.popen());
-                try!(self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&p)));
-                if elts.len() == 1 {
-                    try!(word(&mut self.s, ","));
-                }
-                try!(self.pclose());
-            }
-            PatKind::Box(ref inner) => {
-                try!(word(&mut self.s, "box "));
-                try!(self.print_pat(&inner));
-            }
-            PatKind::Ref(ref inner, mutbl) => {
-                try!(word(&mut self.s, "&"));
-                if mutbl == hir::MutMutable {
-                    try!(word(&mut self.s, "mut "));
-                }
-                try!(self.print_pat(&inner));
-            }
-            PatKind::Lit(ref e) => try!(self.print_expr(&e)),
-            PatKind::Range(ref begin, ref end) => {
-                try!(self.print_expr(&begin));
-                try!(space(&mut self.s));
-                try!(word(&mut self.s, "..."));
-                try!(self.print_expr(&end));
-            }
-            PatKind::Vec(ref before, ref slice, ref after) => {
-                try!(word(&mut self.s, "["));
-                try!(self.commasep(Inconsistent, &before[..], |s, p| s.print_pat(&p)));
-                if let Some(ref p) = *slice {
-                    if !before.is_empty() {
-                        try!(self.word_space(","));
-                    }
-                    if p.node != PatKind::Wild {
-                        try!(self.print_pat(&p));
-                    }
-                    try!(word(&mut self.s, ".."));
-                    if !after.is_empty() {
-                        try!(self.word_space(","));
-                    }
-                }
-                try!(self.commasep(Inconsistent, &after[..], |s, p| s.print_pat(&p)));
-                try!(word(&mut self.s, "]"));
-            }
-        }
-        self.ann.post(self, NodePat(pat))
-    }
-
-    fn print_arm(&mut self, arm: &hir::Arm) -> io::Result<()> {
-        // I have no idea why this check is necessary, but here it
-        // is :(
-        if arm.attrs.is_empty() {
-            try!(space(&mut self.s));
-        }
-        try!(self.cbox(indent_unit));
-        try!(self.ibox(0));
-        try!(self.print_outer_attributes(&arm.attrs));
-        let mut first = true;
-        for p in &arm.pats {
-            if first {
-                first = false;
-            } else {
-                try!(space(&mut self.s));
-                try!(self.word_space("|"));
-            }
-            try!(self.print_pat(&p));
-        }
-        try!(space(&mut self.s));
-        if let Some(ref e) = arm.guard {
-            try!(self.word_space("if"));
-            try!(self.print_expr(&e));
-            try!(space(&mut self.s));
-        }
-        try!(self.word_space("=>"));
-
-        match arm.body.node {
-            hir::ExprBlock(ref blk) => {
-                // the block will close the pattern's ibox
-                try!(self.print_block_unclosed_indent(&blk, indent_unit));
-
-                // If it is a user-provided unsafe block, print a comma after it
-                if let hir::UnsafeBlock(hir::UserProvided) = blk.rules {
-                    try!(word(&mut self.s, ","));
-                }
-            }
-            _ => {
-                try!(self.end()); // close the ibox for the pattern
-                try!(self.print_expr(&arm.body));
-                try!(word(&mut self.s, ","));
-            }
-        }
-        self.end() // close enclosing cbox
-    }
-
-    // Returns whether it printed anything
-    fn print_explicit_self(&mut self,
-                           explicit_self: &hir::ExplicitSelf_,
-                           mutbl: hir::Mutability)
-                           -> io::Result<bool> {
-        try!(self.print_mutability(mutbl));
-        match *explicit_self {
-            hir::SelfStatic => {
-                return Ok(false);
-            }
-            hir::SelfValue(_) => {
-                try!(word(&mut self.s, "self"));
-            }
-            hir::SelfRegion(ref lt, m, _) => {
-                try!(word(&mut self.s, "&"));
-                try!(self.print_opt_lifetime(lt));
-                try!(self.print_mutability(m));
-                try!(word(&mut self.s, "self"));
-            }
-            hir::SelfExplicit(ref typ, _) => {
-                try!(word(&mut self.s, "self"));
-                try!(self.word_space(":"));
-                try!(self.print_type(&typ));
-            }
-        }
-        return Ok(true);
-    }
-
-    pub fn print_fn(&mut self,
-                    decl: &hir::FnDecl,
-                    unsafety: hir::Unsafety,
-                    constness: hir::Constness,
-                    abi: Abi,
-                    name: Option<ast::Name>,
-                    generics: &hir::Generics,
-                    opt_explicit_self: Option<&hir::ExplicitSelf_>,
-                    vis: hir::Visibility)
-                    -> io::Result<()> {
-        try!(self.print_fn_header_info(unsafety, constness, abi, vis));
-
-        if let Some(name) = name {
-            try!(self.nbsp());
-            try!(self.print_name(name));
-        }
-        try!(self.print_generics(generics));
-        try!(self.print_fn_args_and_ret(decl, opt_explicit_self));
-        self.print_where_clause(&generics.where_clause)
-    }
-
-    pub fn print_fn_args(&mut self,
-                         decl: &hir::FnDecl,
-                         opt_explicit_self: Option<&hir::ExplicitSelf_>,
-                         is_closure: bool)
-                         -> io::Result<()> {
-        // It is unfortunate to duplicate the commasep logic, but we want the
-        // self type and the args all in the same box.
-        try!(self.rbox(0, Inconsistent));
-        let mut first = true;
-        if let Some(explicit_self) = opt_explicit_self {
-            let m = match explicit_self {
-                &hir::SelfStatic => hir::MutImmutable,
-                _ => match decl.inputs[0].pat.node {
-                    PatKind::Ident(hir::BindByValue(m), _, _) => m,
-                    _ => hir::MutImmutable,
-                },
-            };
-            first = !try!(self.print_explicit_self(explicit_self, m));
-        }
-
-        // HACK(eddyb) ignore the separately printed self argument.
-        let args = if first {
-            &decl.inputs[..]
-        } else {
-            &decl.inputs[1..]
-        };
-
-        for arg in args {
-            if first {
-                first = false;
-            } else {
-                try!(self.word_space(","));
-            }
-            try!(self.print_arg(arg, is_closure));
-        }
-
-        self.end()
-    }
-
-    pub fn print_fn_args_and_ret(&mut self,
-                                 decl: &hir::FnDecl,
-                                 opt_explicit_self: Option<&hir::ExplicitSelf_>)
-                                 -> io::Result<()> {
-        try!(self.popen());
-        try!(self.print_fn_args(decl, opt_explicit_self, false));
-        if decl.variadic {
-            try!(word(&mut self.s, ", ..."));
-        }
-        try!(self.pclose());
-
-        self.print_fn_output(decl)
-    }
-
-    pub fn print_fn_block_args(&mut self, decl: &hir::FnDecl) -> io::Result<()> {
-        try!(word(&mut self.s, "|"));
-        try!(self.print_fn_args(decl, None, true));
-        try!(word(&mut self.s, "|"));
-
-        if let hir::DefaultReturn(..) = decl.output {
-            return Ok(());
-        }
-
-        try!(self.space_if_not_bol());
-        try!(self.word_space("->"));
-        match decl.output {
-            hir::Return(ref ty) => {
-                try!(self.print_type(&ty));
-                self.maybe_print_comment(ty.span.lo)
-            }
-            hir::DefaultReturn(..) => unreachable!(),
-            hir::NoReturn(span) => {
-                try!(self.word_nbsp("!"));
-                self.maybe_print_comment(span.lo)
-            }
-        }
-    }
-
-    pub fn print_capture_clause(&mut self, capture_clause: hir::CaptureClause) -> io::Result<()> {
-        match capture_clause {
-            hir::CaptureByValue => self.word_space("move"),
-            hir::CaptureByRef => Ok(()),
-        }
-    }
-
-    pub fn print_bounds(&mut self, prefix: &str, bounds: &[hir::TyParamBound]) -> io::Result<()> {
-        if !bounds.is_empty() {
-            try!(word(&mut self.s, prefix));
-            let mut first = true;
-            for bound in bounds {
-                try!(self.nbsp());
-                if first {
-                    first = false;
-                } else {
-                    try!(self.word_space("+"));
-                }
-
-                try!(match *bound {
-                    TraitTyParamBound(ref tref, TraitBoundModifier::None) => {
-                        self.print_poly_trait_ref(tref)
-                    }
-                    TraitTyParamBound(ref tref, TraitBoundModifier::Maybe) => {
-                        try!(word(&mut self.s, "?"));
-                        self.print_poly_trait_ref(tref)
-                    }
-                    RegionTyParamBound(ref lt) => {
-                        self.print_lifetime(lt)
-                    }
-                })
-            }
-            Ok(())
-        } else {
-            Ok(())
-        }
-    }
-
-    pub fn print_lifetime(&mut self, lifetime: &hir::Lifetime) -> io::Result<()> {
-        self.print_name(lifetime.name)
-    }
-
-    pub fn print_lifetime_def(&mut self, lifetime: &hir::LifetimeDef) -> io::Result<()> {
-        try!(self.print_lifetime(&lifetime.lifetime));
-        let mut sep = ":";
-        for v in &lifetime.bounds {
-            try!(word(&mut self.s, sep));
-            try!(self.print_lifetime(v));
-            sep = "+";
-        }
-        Ok(())
-    }
-
-    pub fn print_generics(&mut self, generics: &hir::Generics) -> io::Result<()> {
-        let total = generics.lifetimes.len() + generics.ty_params.len();
-        if total == 0 {
-            return Ok(());
-        }
-
-        try!(word(&mut self.s, "<"));
-
-        let mut ints = Vec::new();
-        for i in 0..total {
-            ints.push(i);
-        }
-
-        try!(self.commasep(Inconsistent, &ints[..], |s, &idx| {
-            if idx < generics.lifetimes.len() {
-                let lifetime = &generics.lifetimes[idx];
-                s.print_lifetime_def(lifetime)
-            } else {
-                let idx = idx - generics.lifetimes.len();
-                let param = &generics.ty_params[idx];
-                s.print_ty_param(param)
-            }
-        }));
-
-        try!(word(&mut self.s, ">"));
-        Ok(())
-    }
-
-    pub fn print_ty_param(&mut self, param: &hir::TyParam) -> io::Result<()> {
-        try!(self.print_name(param.name));
-        try!(self.print_bounds(":", &param.bounds));
-        match param.default {
-            Some(ref default) => {
-                try!(space(&mut self.s));
-                try!(self.word_space("="));
-                self.print_type(&default)
-            }
-            _ => Ok(()),
-        }
-    }
-
-    pub fn print_where_clause(&mut self, where_clause: &hir::WhereClause) -> io::Result<()> {
-        if where_clause.predicates.is_empty() {
-            return Ok(());
-        }
-
-        try!(space(&mut self.s));
-        try!(self.word_space("where"));
-
-        for (i, predicate) in where_clause.predicates.iter().enumerate() {
-            if i != 0 {
-                try!(self.word_space(","));
-            }
-
-            match predicate {
-                &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ref bound_lifetimes,
-                                                                              ref bounded_ty,
-                                                                              ref bounds,
-                                                                              ..}) => {
-                    try!(self.print_formal_lifetime_list(bound_lifetimes));
-                    try!(self.print_type(&bounded_ty));
-                    try!(self.print_bounds(":", bounds));
-                }
-                &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime,
-                                                                                ref bounds,
-                                                                                ..}) => {
-                    try!(self.print_lifetime(lifetime));
-                    try!(word(&mut self.s, ":"));
-
-                    for (i, bound) in bounds.iter().enumerate() {
-                        try!(self.print_lifetime(bound));
-
-                        if i != 0 {
-                            try!(word(&mut self.s, ":"));
-                        }
-                    }
-                }
-                &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{ref path, ref ty, ..}) => {
-                    try!(self.print_path(path, false, 0));
-                    try!(space(&mut self.s));
-                    try!(self.word_space("="));
-                    try!(self.print_type(&ty));
-                }
-            }
-        }
-
-        Ok(())
-    }
-
-    pub fn print_view_path(&mut self, vp: &hir::ViewPath) -> io::Result<()> {
-        match vp.node {
-            hir::ViewPathSimple(name, ref path) => {
-                try!(self.print_path(path, false, 0));
-
-                if path.segments.last().unwrap().identifier.name != name {
-                    try!(space(&mut self.s));
-                    try!(self.word_space("as"));
-                    try!(self.print_name(name));
-                }
-
-                Ok(())
-            }
-
-            hir::ViewPathGlob(ref path) => {
-                try!(self.print_path(path, false, 0));
-                word(&mut self.s, "::*")
-            }
-
-            hir::ViewPathList(ref path, ref segments) => {
-                if path.segments.is_empty() {
-                    try!(word(&mut self.s, "{"));
-                } else {
-                    try!(self.print_path(path, false, 0));
-                    try!(word(&mut self.s, "::{"));
-                }
-                try!(self.commasep(Inconsistent, &segments[..], |s, w| {
-                    match w.node {
-                        hir::PathListIdent { name, .. } => {
-                            s.print_name(name)
-                        }
-                        hir::PathListMod { .. } => {
-                            word(&mut s.s, "self")
-                        }
-                    }
-                }));
-                word(&mut self.s, "}")
-            }
-        }
-    }
-
-    pub fn print_mutability(&mut self, mutbl: hir::Mutability) -> io::Result<()> {
-        match mutbl {
-            hir::MutMutable => self.word_nbsp("mut"),
-            hir::MutImmutable => Ok(()),
-        }
-    }
-
-    pub fn print_mt(&mut self, mt: &hir::MutTy) -> io::Result<()> {
-        try!(self.print_mutability(mt.mutbl));
-        self.print_type(&mt.ty)
-    }
-
-    pub fn print_arg(&mut self, input: &hir::Arg, is_closure: bool) -> io::Result<()> {
-        try!(self.ibox(indent_unit));
-        match input.ty.node {
-            hir::TyInfer if is_closure => try!(self.print_pat(&input.pat)),
-            _ => {
-                match input.pat.node {
-                    PatKind::Ident(_, ref path1, _) if
-                        path1.node.name ==
-                            parse::token::special_idents::invalid.name => {
-                        // Do nothing.
-                    }
-                    _ => {
-                        try!(self.print_pat(&input.pat));
-                        try!(word(&mut self.s, ":"));
-                        try!(space(&mut self.s));
-                    }
-                }
-                try!(self.print_type(&input.ty));
-            }
-        }
-        self.end()
-    }
-
-    pub fn print_fn_output(&mut self, decl: &hir::FnDecl) -> io::Result<()> {
-        if let hir::DefaultReturn(..) = decl.output {
-            return Ok(());
-        }
-
-        try!(self.space_if_not_bol());
-        try!(self.ibox(indent_unit));
-        try!(self.word_space("->"));
-        match decl.output {
-            hir::NoReturn(_) => try!(self.word_nbsp("!")),
-            hir::DefaultReturn(..) => unreachable!(),
-            hir::Return(ref ty) => try!(self.print_type(&ty)),
-        }
-        try!(self.end());
-
-        match decl.output {
-            hir::Return(ref output) => self.maybe_print_comment(output.span.lo),
-            _ => Ok(()),
-        }
-    }
-
-    pub fn print_ty_fn(&mut self,
-                       abi: Abi,
-                       unsafety: hir::Unsafety,
-                       decl: &hir::FnDecl,
-                       name: Option<ast::Name>,
-                       generics: &hir::Generics,
-                       opt_explicit_self: Option<&hir::ExplicitSelf_>)
-                       -> io::Result<()> {
-        try!(self.ibox(indent_unit));
-        if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() {
-            try!(word(&mut self.s, "for"));
-            try!(self.print_generics(generics));
-        }
-        let generics = hir::Generics {
-            lifetimes: hir::HirVec::new(),
-            ty_params: hir::HirVec::new(),
-            where_clause: hir::WhereClause {
-                id: ast::DUMMY_NODE_ID,
-                predicates: hir::HirVec::new(),
-            },
-        };
-        try!(self.print_fn(decl,
-                           unsafety,
-                           hir::Constness::NotConst,
-                           abi,
-                           name,
-                           &generics,
-                           opt_explicit_self,
-                           hir::Inherited));
-        self.end()
-    }
-
-    pub fn maybe_print_trailing_comment(&mut self,
-                                        span: codemap::Span,
-                                        next_pos: Option<BytePos>)
-                                        -> io::Result<()> {
-        let cm = match self.cm {
-            Some(cm) => cm,
-            _ => return Ok(()),
-        };
-        match self.next_comment() {
-            Some(ref cmnt) => {
-                if (*cmnt).style != comments::Trailing {
-                    return Ok(());
-                }
-                let span_line = cm.lookup_char_pos(span.hi);
-                let comment_line = cm.lookup_char_pos((*cmnt).pos);
-                let mut next = (*cmnt).pos + BytePos(1);
-                match next_pos {
-                    None => (),
-                    Some(p) => next = p,
-                }
-                if span.hi < (*cmnt).pos && (*cmnt).pos < next &&
-                   span_line.line == comment_line.line {
-                    try!(self.print_comment(cmnt));
-                    self.cur_cmnt_and_lit.cur_cmnt += 1;
-                }
-            }
-            _ => (),
-        }
-        Ok(())
-    }
-
-    pub fn print_remaining_comments(&mut self) -> io::Result<()> {
-        // If there aren't any remaining comments, then we need to manually
-        // make sure there is a line break at the end.
-        if self.next_comment().is_none() {
-            try!(hardbreak(&mut self.s));
-        }
-        loop {
-            match self.next_comment() {
-                Some(ref cmnt) => {
-                    try!(self.print_comment(cmnt));
-                    self.cur_cmnt_and_lit.cur_cmnt += 1;
-                }
-                _ => break,
-            }
-        }
-        Ok(())
-    }
-
-    pub fn print_opt_abi_and_extern_if_nondefault(&mut self,
-                                                  opt_abi: Option<Abi>)
-                                                  -> io::Result<()> {
-        match opt_abi {
-            Some(Abi::Rust) => Ok(()),
-            Some(abi) => {
-                try!(self.word_nbsp("extern"));
-                self.word_nbsp(&abi.to_string())
-            }
-            None => Ok(()),
-        }
-    }
-
-    pub fn print_extern_opt_abi(&mut self, opt_abi: Option<Abi>) -> io::Result<()> {
-        match opt_abi {
-            Some(abi) => {
-                try!(self.word_nbsp("extern"));
-                self.word_nbsp(&abi.to_string())
-            }
-            None => Ok(()),
-        }
-    }
-
-    pub fn print_fn_header_info(&mut self,
-                                unsafety: hir::Unsafety,
-                                constness: hir::Constness,
-                                abi: Abi,
-                                vis: hir::Visibility)
-                                -> io::Result<()> {
-        try!(word(&mut self.s, &visibility_qualified(vis, "")));
-        try!(self.print_unsafety(unsafety));
-
-        match constness {
-            hir::Constness::NotConst => {}
-            hir::Constness::Const => try!(self.word_nbsp("const")),
-        }
-
-        if abi != Abi::Rust {
-            try!(self.word_nbsp("extern"));
-            try!(self.word_nbsp(&abi.to_string()));
-        }
-
-        word(&mut self.s, "fn")
-    }
-
-    pub fn print_unsafety(&mut self, s: hir::Unsafety) -> io::Result<()> {
-        match s {
-            hir::Unsafety::Normal => Ok(()),
-            hir::Unsafety::Unsafe => self.word_nbsp("unsafe"),
-        }
-    }
-}
-
-// Dup'ed from parse::classify, but adapted for the HIR.
-/// Does this expression require a semicolon to be treated
-/// as a statement? The negation of this: 'can this expression
-/// be used as a statement without a semicolon' -- is used
-/// as an early-bail-out in the parser so that, for instance,
-///     if true {...} else {...}
-///      |x| 5
-/// isn't parsed as (if true {...} else {...} | x) | 5
-fn expr_requires_semi_to_be_stmt(e: &hir::Expr) -> bool {
-    match e.node {
-        hir::ExprIf(..) |
-        hir::ExprMatch(..) |
-        hir::ExprBlock(_) |
-        hir::ExprWhile(..) |
-        hir::ExprLoop(..) => false,
-        _ => true,
-    }
-}
-
-/// this statement requires a semicolon after it.
-/// note that in one case (stmt_semi), we've already
-/// seen the semicolon, and thus don't need another.
-fn stmt_ends_with_semi(stmt: &hir::Stmt_) -> bool {
-    match *stmt {
-        hir::StmtDecl(ref d, _) => {
-            match d.node {
-                hir::DeclLocal(_) => true,
-                hir::DeclItem(_) => false,
-            }
-        }
-        hir::StmtExpr(ref e, _) => {
-            expr_requires_semi_to_be_stmt(&e)
-        }
-        hir::StmtSemi(..) => {
-            false
-        }
-    }
-}
diff --git a/src/librustc_front/util.rs b/src/librustc_front/util.rs
deleted file mode 100644 (file)
index 8140ea1..0000000
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use hir;
-use hir::*;
-use intravisit::{self, Visitor, FnKind};
-use syntax::ast_util;
-use syntax::ast::{Name, NodeId, DUMMY_NODE_ID};
-use syntax::codemap::Span;
-use syntax::ptr::P;
-
-pub fn walk_pat<F>(pat: &Pat, mut it: F) -> bool
-    where F: FnMut(&Pat) -> bool
-{
-    // FIXME(#19596) this is a workaround, but there should be a better way
-    fn walk_pat_<G>(pat: &Pat, it: &mut G) -> bool
-        where G: FnMut(&Pat) -> bool
-    {
-        if !it(pat) {
-            return false;
-        }
-
-        match pat.node {
-            PatKind::Ident(_, _, Some(ref p)) => walk_pat_(&p, it),
-            PatKind::Struct(_, ref fields, _) => {
-                fields.iter().all(|field| walk_pat_(&field.node.pat, it))
-            }
-            PatKind::TupleStruct(_, Some(ref s)) | PatKind::Tup(ref s) => {
-                s.iter().all(|p| walk_pat_(&p, it))
-            }
-            PatKind::Box(ref s) | PatKind::Ref(ref s, _) => {
-                walk_pat_(&s, it)
-            }
-            PatKind::Vec(ref before, ref slice, ref after) => {
-                before.iter().all(|p| walk_pat_(&p, it)) &&
-                slice.iter().all(|p| walk_pat_(&p, it)) &&
-                after.iter().all(|p| walk_pat_(&p, it))
-            }
-            PatKind::Wild |
-            PatKind::Lit(_) |
-            PatKind::Range(_, _) |
-            PatKind::Ident(_, _, _) |
-            PatKind::TupleStruct(..) |
-            PatKind::Path(..) |
-            PatKind::QPath(_, _) => {
-                true
-            }
-        }
-    }
-
-    walk_pat_(pat, &mut it)
-}
-
-pub fn binop_to_string(op: BinOp_) -> &'static str {
-    match op {
-        BiAdd => "+",
-        BiSub => "-",
-        BiMul => "*",
-        BiDiv => "/",
-        BiRem => "%",
-        BiAnd => "&&",
-        BiOr => "||",
-        BiBitXor => "^",
-        BiBitAnd => "&",
-        BiBitOr => "|",
-        BiShl => "<<",
-        BiShr => ">>",
-        BiEq => "==",
-        BiLt => "<",
-        BiLe => "<=",
-        BiNe => "!=",
-        BiGe => ">=",
-        BiGt => ">",
-    }
-}
-
-pub fn stmt_id(s: &Stmt) -> NodeId {
-    match s.node {
-        StmtDecl(_, id) => id,
-        StmtExpr(_, id) => id,
-        StmtSemi(_, id) => id,
-    }
-}
-
-pub fn lazy_binop(b: BinOp_) -> bool {
-    match b {
-        BiAnd => true,
-        BiOr => true,
-        _ => false,
-    }
-}
-
-pub fn is_shift_binop(b: BinOp_) -> bool {
-    match b {
-        BiShl => true,
-        BiShr => true,
-        _ => false,
-    }
-}
-
-pub fn is_comparison_binop(b: BinOp_) -> bool {
-    match b {
-        BiEq | BiLt | BiLe | BiNe | BiGt | BiGe => true,
-        BiAnd |
-        BiOr |
-        BiAdd |
-        BiSub |
-        BiMul |
-        BiDiv |
-        BiRem |
-        BiBitXor |
-        BiBitAnd |
-        BiBitOr |
-        BiShl |
-        BiShr => false,
-    }
-}
-
-/// Returns `true` if the binary operator takes its arguments by value
-pub fn is_by_value_binop(b: BinOp_) -> bool {
-    !is_comparison_binop(b)
-}
-
-/// Returns `true` if the unary operator takes its argument by value
-pub fn is_by_value_unop(u: UnOp) -> bool {
-    match u {
-        UnNeg | UnNot => true,
-        _ => false,
-    }
-}
-
-pub fn unop_to_string(op: UnOp) -> &'static str {
-    match op {
-        UnDeref => "*",
-        UnNot => "!",
-        UnNeg => "-",
-    }
-}
-
-pub struct IdVisitor<'a, O: 'a> {
-    operation: &'a mut O,
-
-    // In general, the id visitor visits the contents of an item, but
-    // not including nested trait/impl items, nor other nested items.
-    // The base visitor itself always skips nested items, but not
-    // trait/impl items. This means in particular that if you start by
-    // visiting a trait or an impl, you should not visit the
-    // trait/impl items respectively.  This is handled by setting
-    // `skip_members` to true when `visit_item` is on the stack. This
-    // way, if the user begins by calling `visit_trait_item`, we will
-    // visit the trait item, but if they begin with `visit_item`, we
-    // won't visit the (nested) trait items.
-    skip_members: bool,
-}
-
-impl<'a, O: ast_util::IdVisitingOperation> IdVisitor<'a, O> {
-    pub fn new(operation: &'a mut O) -> IdVisitor<'a, O> {
-        IdVisitor { operation: operation, skip_members: false }
-    }
-
-    fn visit_generics_helper(&mut self, generics: &Generics) {
-        for type_parameter in generics.ty_params.iter() {
-            self.operation.visit_id(type_parameter.id)
-        }
-        for lifetime in &generics.lifetimes {
-            self.operation.visit_id(lifetime.lifetime.id)
-        }
-    }
-}
-
-impl<'a, 'v, O: ast_util::IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> {
-    fn visit_mod(&mut self, module: &Mod, _: Span, node_id: NodeId) {
-        self.operation.visit_id(node_id);
-        intravisit::walk_mod(self, module)
-    }
-
-    fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
-        self.operation.visit_id(foreign_item.id);
-        intravisit::walk_foreign_item(self, foreign_item)
-    }
-
-    fn visit_item(&mut self, item: &Item) {
-        assert!(!self.skip_members);
-        self.skip_members = true;
-
-        self.operation.visit_id(item.id);
-        match item.node {
-            ItemUse(ref view_path) => {
-                match view_path.node {
-                    ViewPathSimple(_, _) |
-                    ViewPathGlob(_) => {}
-                    ViewPathList(_, ref paths) => {
-                        for path in paths {
-                            self.operation.visit_id(path.node.id())
-                        }
-                    }
-                }
-            }
-            _ => {}
-        }
-        intravisit::walk_item(self, item);
-
-        self.skip_members = false;
-    }
-
-    fn visit_local(&mut self, local: &Local) {
-        self.operation.visit_id(local.id);
-        intravisit::walk_local(self, local)
-    }
-
-    fn visit_block(&mut self, block: &Block) {
-        self.operation.visit_id(block.id);
-        intravisit::walk_block(self, block)
-    }
-
-    fn visit_stmt(&mut self, statement: &Stmt) {
-        self.operation.visit_id(stmt_id(statement));
-        intravisit::walk_stmt(self, statement)
-    }
-
-    fn visit_pat(&mut self, pattern: &Pat) {
-        self.operation.visit_id(pattern.id);
-        intravisit::walk_pat(self, pattern)
-    }
-
-    fn visit_expr(&mut self, expression: &Expr) {
-        self.operation.visit_id(expression.id);
-        intravisit::walk_expr(self, expression)
-    }
-
-    fn visit_ty(&mut self, typ: &Ty) {
-        self.operation.visit_id(typ.id);
-        intravisit::walk_ty(self, typ)
-    }
-
-    fn visit_generics(&mut self, generics: &Generics) {
-        self.visit_generics_helper(generics);
-        intravisit::walk_generics(self, generics)
-    }
-
-    fn visit_fn(&mut self,
-                function_kind: FnKind<'v>,
-                function_declaration: &'v FnDecl,
-                block: &'v Block,
-                span: Span,
-                node_id: NodeId) {
-        self.operation.visit_id(node_id);
-
-        match function_kind {
-            FnKind::ItemFn(_, generics, _, _, _, _) => {
-                self.visit_generics_helper(generics)
-            }
-            FnKind::Method(_, sig, _) => {
-                self.visit_generics_helper(&sig.generics)
-            }
-            FnKind::Closure => {}
-        }
-
-        for argument in &function_declaration.inputs {
-            self.operation.visit_id(argument.id)
-        }
-
-        intravisit::walk_fn(self, function_kind, function_declaration, block, span);
-    }
-
-    fn visit_struct_field(&mut self, struct_field: &StructField) {
-        self.operation.visit_id(struct_field.node.id);
-        intravisit::walk_struct_field(self, struct_field)
-    }
-
-    fn visit_variant_data(&mut self,
-                          struct_def: &VariantData,
-                          _: Name,
-                          _: &hir::Generics,
-                          _: NodeId,
-                          _: Span) {
-        self.operation.visit_id(struct_def.id());
-        intravisit::walk_struct_def(self, struct_def);
-    }
-
-    fn visit_trait_item(&mut self, ti: &hir::TraitItem) {
-        if !self.skip_members {
-            self.operation.visit_id(ti.id);
-            intravisit::walk_trait_item(self, ti);
-        }
-    }
-
-    fn visit_impl_item(&mut self, ii: &hir::ImplItem) {
-        if !self.skip_members {
-            self.operation.visit_id(ii.id);
-            intravisit::walk_impl_item(self, ii);
-        }
-    }
-
-    fn visit_lifetime(&mut self, lifetime: &Lifetime) {
-        self.operation.visit_id(lifetime.id);
-    }
-
-    fn visit_lifetime_def(&mut self, def: &LifetimeDef) {
-        self.visit_lifetime(&def.lifetime);
-    }
-
-    fn visit_trait_ref(&mut self, trait_ref: &TraitRef) {
-        self.operation.visit_id(trait_ref.ref_id);
-        intravisit::walk_trait_ref(self, trait_ref);
-    }
-}
-
-/// Computes the id range for a single fn body, ignoring nested items.
-pub fn compute_id_range_for_fn_body(fk: FnKind,
-                                    decl: &FnDecl,
-                                    body: &Block,
-                                    sp: Span,
-                                    id: NodeId)
-                                    -> ast_util::IdRange {
-    let mut visitor = ast_util::IdRangeComputingVisitor { result: ast_util::IdRange::max() };
-    let mut id_visitor = IdVisitor::new(&mut visitor);
-    id_visitor.visit_fn(fk, decl, body, sp, id);
-    id_visitor.operation.result
-}
-
-pub fn is_path(e: P<Expr>) -> bool {
-    match e.node {
-        ExprPath(..) => true,
-        _ => false,
-    }
-}
-
-pub fn empty_generics() -> Generics {
-    Generics {
-        lifetimes: HirVec::new(),
-        ty_params: HirVec::new(),
-        where_clause: WhereClause {
-            id: DUMMY_NODE_ID,
-            predicates: HirVec::new(),
-        },
-    }
-}
-
-// convert a span and an identifier to the corresponding
-// 1-segment path
-pub fn ident_to_path(s: Span, ident: Ident) -> Path {
-    hir::Path {
-        span: s,
-        global: false,
-        segments: hir_vec![hir::PathSegment {
-            identifier: ident,
-            parameters: hir::AngleBracketedParameters(hir::AngleBracketedParameterData {
-                lifetimes: HirVec::new(),
-                types: HirVec::new(),
-                bindings: HirVec::new(),
-            }),
-        }],
-    }
-}
diff --git a/src/librustc_incremental/Cargo.toml b/src/librustc_incremental/Cargo.toml
new file mode 100644 (file)
index 0000000..927e412
--- /dev/null
@@ -0,0 +1,18 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "rustc_incremental"
+version = "0.0.0"
+
+[lib]
+name = "rustc_incremental"
+path = "lib.rs"
+crate-type = ["dylib"]
+
+[dependencies]
+graphviz = { path = "../libgraphviz" }
+rbml = { path = "../librbml" }
+rustc = { path = "../librustc" }
+rustc_data_structures = { path = "../librustc_data_structures" }
+serialize = { path = "../libserialize" }
+log = { path = "../liblog" }
+syntax = { path = "../libsyntax" }
diff --git a/src/librustc_incremental/assert_dep_graph.rs b/src/librustc_incremental/assert_dep_graph.rs
new file mode 100644 (file)
index 0000000..88d8ed8
--- /dev/null
@@ -0,0 +1,438 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This pass is only used for the UNIT TESTS and DEBUGGING NEEDS
+//! around dependency graph construction. It serves two purposes; it
+//! will dump graphs in graphviz form to disk, and it searches for
+//! `#[rustc_if_this_changed]` and `#[rustc_then_this_would_need]`
+//! annotations. These annotations can be used to test whether paths
+//! exist in the graph. These checks run after trans, so they view the
+//! the final state of the dependency graph. Note that there are
+//! similar assertions found in `persist::dirty_clean` which check the
+//! **initial** state of the dependency graph, just after it has been
+//! loaded from disk.
+//!
+//! In this code, we report errors on each `rustc_if_this_changed`
+//! annotation. If a path exists in all cases, then we would report
+//! "all path(s) exist". Otherwise, we report: "no path to `foo`" for
+//! each case where no path exists.  `compile-fail` tests can then be
+//! used to check when paths exist or do not.
+//!
+//! The full form of the `rustc_if_this_changed` annotation is
+//! `#[rustc_if_this_changed(id)]`. The `"id"` is optional and
+//! defaults to `"id"` if omitted.
+//!
+//! Example:
+//!
+//! ```
+//! #[rustc_if_this_changed]
+//! fn foo() { }
+//!
+//! #[rustc_then_this_would_need("trans")] //~ ERROR no path from `foo`
+//! fn bar() { }
+//!
+//! #[rustc_then_this_would_need("trans")] //~ ERROR OK
+//! fn baz() { foo(); }
+//! ```
+
+use graphviz as dot;
+use rustc::dep_graph::{DepGraphQuery, DepNode};
+use rustc::hir::def_id::DefId;
+use rustc::ty::TyCtxt;
+use rustc_data_structures::fnv::{FnvHashMap, FnvHashSet};
+use rustc_data_structures::graph::{Direction, INCOMING, OUTGOING, NodeIndex};
+use rustc::hir;
+use rustc::hir::intravisit::Visitor;
+use graphviz::IntoCow;
+use std::env;
+use std::fs::File;
+use std::io::Write;
+use syntax::ast;
+use syntax::attr::AttrMetaMethods;
+use syntax::codemap::Span;
+use syntax::parse::token::InternedString;
+
+const IF_THIS_CHANGED: &'static str = "rustc_if_this_changed";
+const THEN_THIS_WOULD_NEED: &'static str = "rustc_then_this_would_need";
+const ID: &'static str = "id";
+
+pub fn assert_dep_graph(tcx: &TyCtxt) {
+    let _ignore = tcx.dep_graph.in_ignore();
+
+    if tcx.sess.opts.debugging_opts.dump_dep_graph {
+        dump_graph(tcx);
+    }
+
+    // Find annotations supplied by user (if any).
+    let (if_this_changed, then_this_would_need) = {
+        let mut visitor = IfThisChanged { tcx: tcx,
+                                          if_this_changed: FnvHashMap(),
+                                          then_this_would_need: FnvHashMap() };
+        tcx.map.krate().visit_all_items(&mut visitor);
+        (visitor.if_this_changed, visitor.then_this_would_need)
+    };
+
+    if !if_this_changed.is_empty() || !then_this_would_need.is_empty() {
+        assert!(tcx.sess.opts.debugging_opts.query_dep_graph,
+                "cannot use the `#[{}]` or `#[{}]` annotations \
+                 without supplying `-Z query-dep-graph`",
+                IF_THIS_CHANGED, THEN_THIS_WOULD_NEED);
+    }
+
+    // Check paths.
+    check_paths(tcx, &if_this_changed, &then_this_would_need);
+}
+
+type SourceHashMap =
+    FnvHashMap<InternedString,
+               FnvHashSet<(Span, DefId, DepNode<DefId>)>>;
+type TargetHashMap =
+    FnvHashMap<InternedString,
+               FnvHashSet<(Span, InternedString, ast::NodeId, DepNode<DefId>)>>;
+
+struct IfThisChanged<'a, 'tcx:'a> {
+    tcx: &'a TyCtxt<'tcx>,
+    if_this_changed: SourceHashMap,
+    then_this_would_need: TargetHashMap,
+}
+
+impl<'a, 'tcx> IfThisChanged<'a, 'tcx> {
+    fn process_attrs(&mut self, node_id: ast::NodeId, def_id: DefId) {
+        for attr in self.tcx.get_attrs(def_id).iter() {
+            if attr.check_name(IF_THIS_CHANGED) {
+                let mut id = None;
+                for meta_item in attr.meta_item_list().unwrap_or_default() {
+                    match meta_item.node {
+                        ast::MetaItemKind::Word(ref s) if id.is_none() => id = Some(s.clone()),
+                        _ => {
+                            self.tcx.sess.span_err(
+                                meta_item.span,
+                                &format!("unexpected meta-item {:?}", meta_item.node));
+                        }
+                    }
+                }
+                let id = id.unwrap_or(InternedString::new(ID));
+                self.if_this_changed.entry(id)
+                                    .or_insert(FnvHashSet())
+                                    .insert((attr.span, def_id, DepNode::Hir(def_id)));
+            } else if attr.check_name(THEN_THIS_WOULD_NEED) {
+                let mut dep_node_interned = None;
+                let mut id = None;
+                for meta_item in attr.meta_item_list().unwrap_or_default() {
+                    match meta_item.node {
+                        ast::MetaItemKind::Word(ref s) if dep_node_interned.is_none() =>
+                            dep_node_interned = Some(s.clone()),
+                        ast::MetaItemKind::Word(ref s) if id.is_none() =>
+                            id = Some(s.clone()),
+                        _ => {
+                            self.tcx.sess.span_err(
+                                meta_item.span,
+                                &format!("unexpected meta-item {:?}", meta_item.node));
+                        }
+                    }
+                }
+                let dep_node = match dep_node_interned {
+                    Some(ref n) => {
+                        match DepNode::from_label_string(&n[..], def_id) {
+                            Ok(n) => n,
+                            Err(()) => {
+                                self.tcx.sess.span_fatal(
+                                    attr.span,
+                                    &format!("unrecognized DepNode variant {:?}", n));
+                            }
+                        }
+                    }
+                    None => {
+                        self.tcx.sess.span_fatal(
+                            attr.span,
+                            &format!("missing DepNode variant"));
+                    }
+                };
+                let id = id.unwrap_or(InternedString::new(ID));
+                self.then_this_would_need
+                    .entry(id)
+                    .or_insert(FnvHashSet())
+                    .insert((attr.span, dep_node_interned.clone().unwrap(), node_id, dep_node));
+            }
+        }
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for IfThisChanged<'a, 'tcx> {
+    fn visit_item(&mut self, item: &'tcx hir::Item) {
+        let def_id = self.tcx.map.local_def_id(item.id);
+        self.process_attrs(item.id, def_id);
+    }
+}
+
+fn check_paths(tcx: &TyCtxt,
+               if_this_changed: &SourceHashMap,
+               then_this_would_need: &TargetHashMap)
+{
+    // Return early here so as not to construct the query, which is not cheap.
+    if if_this_changed.is_empty() {
+        return;
+    }
+    let query = tcx.dep_graph.query();
+    for (id, sources) in if_this_changed {
+        let targets = match then_this_would_need.get(id) {
+            Some(targets) => targets,
+            None => {
+                for &(source_span, _, _) in sources.iter().take(1) {
+                    tcx.sess.span_err(
+                        source_span,
+                        &format!("no targets for id `{}`", id));
+                }
+                continue;
+            }
+        };
+
+        for &(_, source_def_id, source_dep_node) in sources {
+            let dependents = query.transitive_dependents(source_dep_node);
+            for &(target_span, ref target_pass, _, ref target_dep_node) in targets {
+                if !dependents.contains(&target_dep_node) {
+                    tcx.sess.span_err(
+                        target_span,
+                        &format!("no path from `{}` to `{}`",
+                                 tcx.item_path_str(source_def_id),
+                                 target_pass));
+                } else {
+                    tcx.sess.span_err(
+                        target_span,
+                        &format!("OK"));
+                }
+            }
+        }
+    }
+}
+
+fn dump_graph(tcx: &TyCtxt) {
+    let path: String = env::var("RUST_DEP_GRAPH").unwrap_or_else(|_| format!("dep_graph"));
+    let query = tcx.dep_graph.query();
+
+    let nodes = match env::var("RUST_DEP_GRAPH_FILTER") {
+        Ok(string) => {
+            // Expect one of: "-> target", "source -> target", or "source ->".
+            let parts: Vec<_> = string.split("->").collect();
+            if parts.len() > 2 {
+                bug!("Invalid RUST_DEP_GRAPH_FILTER: expected '[source] -> [target]'");
+            }
+            let sources = node_set(&query, &parts[0]);
+            let targets = node_set(&query, &parts[1]);
+            filter_nodes(&query, &sources, &targets)
+        }
+        Err(_) => {
+            query.nodes()
+                 .into_iter()
+                 .collect()
+        }
+    };
+    let edges = filter_edges(&query, &nodes);
+
+    { // dump a .txt file with just the edges:
+        let txt_path = format!("{}.txt", path);
+        let mut file = File::create(&txt_path).unwrap();
+        for &(source, target) in &edges {
+            write!(file, "{:?} -> {:?}\n", source, target).unwrap();
+        }
+    }
+
+    { // dump a .dot file in graphviz format:
+        let dot_path = format!("{}.dot", path);
+        let mut v = Vec::new();
+        dot::render(&GraphvizDepGraph(nodes, edges), &mut v).unwrap();
+        File::create(&dot_path).and_then(|mut f| f.write_all(&v)).unwrap();
+    }
+}
+
+pub struct GraphvizDepGraph(FnvHashSet<DepNode<DefId>>,
+                            Vec<(DepNode<DefId>, DepNode<DefId>)>);
+
+impl<'a, 'tcx> dot::GraphWalk<'a> for GraphvizDepGraph {
+    type Node = DepNode<DefId>;
+    type Edge = (DepNode<DefId>, DepNode<DefId>);
+    fn nodes(&self) -> dot::Nodes<DepNode<DefId>> {
+        let nodes: Vec<_> = self.0.iter().cloned().collect();
+        nodes.into_cow()
+    }
+    fn edges(&self) -> dot::Edges<(DepNode<DefId>, DepNode<DefId>)> {
+        self.1[..].into_cow()
+    }
+    fn source(&self, edge: &(DepNode<DefId>, DepNode<DefId>)) -> DepNode<DefId> {
+        edge.0
+    }
+    fn target(&self, edge: &(DepNode<DefId>, DepNode<DefId>)) -> DepNode<DefId> {
+        edge.1
+    }
+}
+
+impl<'a, 'tcx> dot::Labeller<'a> for GraphvizDepGraph {
+    type Node = DepNode<DefId>;
+    type Edge = (DepNode<DefId>, DepNode<DefId>);
+    fn graph_id(&self) -> dot::Id {
+        dot::Id::new("DependencyGraph").unwrap()
+    }
+    fn node_id(&self, n: &DepNode<DefId>) -> dot::Id {
+        let s: String =
+            format!("{:?}", n).chars()
+                              .map(|c| if c == '_' || c.is_alphanumeric() { c } else { '_' })
+                              .collect();
+        debug!("n={:?} s={:?}", n, s);
+        dot::Id::new(s).unwrap()
+    }
+    fn node_label(&self, n: &DepNode<DefId>) -> dot::LabelText {
+        dot::LabelText::label(format!("{:?}", n))
+    }
+}
+
+// Given an optional filter like `"x,y,z"`, returns either `None` (no
+// filter) or the set of nodes whose labels contain all of those
+// substrings.
+fn node_set(query: &DepGraphQuery<DefId>, filter: &str)
+            -> Option<FnvHashSet<DepNode<DefId>>>
+{
+    debug!("node_set(filter={:?})", filter);
+
+    if filter.trim().is_empty() {
+        return None;
+    }
+
+    let filters: Vec<&str> = filter.split("&").map(|s| s.trim()).collect();
+
+    debug!("node_set: filters={:?}", filters);
+
+    Some(query.nodes()
+         .into_iter()
+         .filter(|n| {
+             let s = format!("{:?}", n);
+             filters.iter().all(|f| s.contains(f))
+         })
+        .collect())
+}
+
+fn filter_nodes(query: &DepGraphQuery<DefId>,
+                sources: &Option<FnvHashSet<DepNode<DefId>>>,
+                targets: &Option<FnvHashSet<DepNode<DefId>>>)
+                -> FnvHashSet<DepNode<DefId>>
+{
+    if let &Some(ref sources) = sources {
+        if let &Some(ref targets) = targets {
+            walk_between(query, sources, targets)
+        } else {
+            walk_nodes(query, sources, OUTGOING)
+        }
+    } else if let &Some(ref targets) = targets {
+        walk_nodes(query, targets, INCOMING)
+    } else {
+        query.nodes().into_iter().collect()
+    }
+}
+
+fn walk_nodes(query: &DepGraphQuery<DefId>,
+              starts: &FnvHashSet<DepNode<DefId>>,
+              direction: Direction)
+              -> FnvHashSet<DepNode<DefId>>
+{
+    let mut set = FnvHashSet();
+    for start in starts {
+        debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING);
+        if set.insert(*start) {
+            let mut stack = vec![query.indices[start]];
+            while let Some(index) = stack.pop() {
+                for (_, edge) in query.graph.adjacent_edges(index, direction) {
+                    let neighbor_index = edge.source_or_target(direction);
+                    let neighbor = query.graph.node_data(neighbor_index);
+                    if set.insert(*neighbor) {
+                        stack.push(neighbor_index);
+                    }
+                }
+            }
+        }
+    }
+    set
+}
+
+fn walk_between(query: &DepGraphQuery<DefId>,
+                sources: &FnvHashSet<DepNode<DefId>>,
+                targets: &FnvHashSet<DepNode<DefId>>)
+                -> FnvHashSet<DepNode<DefId>>
+{
+    // This is a bit tricky. We want to include a node only if it is:
+    // (a) reachable from a source and (b) will reach a target. And we
+    // have to be careful about cycles etc.  Luckily efficiency is not
+    // a big concern!
+
+    #[derive(Copy, Clone, PartialEq)]
+    enum State { Undecided, Deciding, Included, Excluded }
+
+    let mut node_states = vec![State::Undecided; query.graph.len_nodes()];
+
+    for &target in targets {
+        node_states[query.indices[&target].0] = State::Included;
+    }
+
+    for source in sources.iter().map(|n| query.indices[n]) {
+        recurse(query, &mut node_states, source);
+    }
+
+    return query.nodes()
+                .into_iter()
+                .filter(|n| {
+                    let index = query.indices[n];
+                    node_states[index.0] == State::Included
+                })
+                .collect();
+
+    fn recurse(query: &DepGraphQuery<DefId>,
+               node_states: &mut [State],
+               node: NodeIndex)
+               -> bool
+    {
+        match node_states[node.0] {
+            // known to reach a target
+            State::Included => return true,
+
+            // known not to reach a target
+            State::Excluded => return false,
+
+            // backedge, not yet known, say false
+            State::Deciding => return false,
+
+            State::Undecided => { }
+        }
+
+        node_states[node.0] = State::Deciding;
+
+        for neighbor_index in query.graph.successor_nodes(node) {
+            if recurse(query, node_states, neighbor_index) {
+                node_states[node.0] = State::Included;
+            }
+        }
+
+        // if we didn't find a path to target, then set to excluded
+        if node_states[node.0] == State::Deciding {
+            node_states[node.0] = State::Excluded;
+            false
+        } else {
+            assert!(node_states[node.0] == State::Included);
+            true
+        }
+    }
+}
+
+fn filter_edges(query: &DepGraphQuery<DefId>,
+                nodes: &FnvHashSet<DepNode<DefId>>)
+                -> Vec<(DepNode<DefId>, DepNode<DefId>)>
+{
+    query.edges()
+         .into_iter()
+         .filter(|&(source, target)| nodes.contains(&source) && nodes.contains(&target))
+         .collect()
+}
diff --git a/src/librustc_incremental/calculate_svh.rs b/src/librustc_incremental/calculate_svh.rs
new file mode 100644 (file)
index 0000000..ab1c6f5
--- /dev/null
@@ -0,0 +1,422 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Calculation of a Strict Version Hash for crates.  For a length
+//! comment explaining the general idea, see `librustc/middle/svh.rs`.
+
+use std::hash::{Hash, SipHasher, Hasher};
+use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
+use rustc::hir::svh::Svh;
+use rustc::ty;
+use rustc::hir::intravisit::{self, Visitor};
+
+use self::svh_visitor::StrictVersionHashVisitor;
+
+pub trait SvhCalculate {
+    /// Calculate the SVH for an entire krate.
+    fn calculate_krate_hash(&self) -> Svh;
+
+    /// Calculate the SVH for a particular item.
+    fn calculate_item_hash(&self, def_id: DefId) -> u64;
+}
+
+impl<'tcx> SvhCalculate for ty::TyCtxt<'tcx> {
+    fn calculate_krate_hash(&self) -> Svh {
+        // FIXME (#14132): This is better than it used to be, but it still not
+        // ideal. We now attempt to hash only the relevant portions of the
+        // Crate AST as well as the top-level crate attributes. (However,
+        // the hashing of the crate attributes should be double-checked
+        // to ensure it is not incorporating implementation artifacts into
+        // the hash that are not otherwise visible.)
+
+        let crate_disambiguator = self.sess.crate_disambiguator.get();
+        let krate = self.map.krate();
+
+        // FIXME: this should use SHA1, not SipHash. SipHash is not built to
+        //        avoid collisions.
+        let mut state = SipHasher::new();
+        debug!("state: {:?}", state);
+
+        // FIXME(#32753) -- at (*) we `to_le` for endianness, but is
+        // this enough, and does it matter anyway?
+        "crate_disambiguator".hash(&mut state);
+        crate_disambiguator.as_str().len().to_le().hash(&mut state); // (*)
+        crate_disambiguator.as_str().hash(&mut state);
+
+        debug!("crate_disambiguator: {:?}", crate_disambiguator.as_str());
+        debug!("state: {:?}", state);
+
+        {
+            let mut visit = StrictVersionHashVisitor::new(&mut state, self);
+            krate.visit_all_items(&mut visit);
+        }
+
+        // FIXME (#14132): This hash is still sensitive to e.g. the
+        // spans of the crate Attributes and their underlying
+        // MetaItems; we should make ContentHashable impl for those
+        // types and then use hash_content.  But, since all crate
+        // attributes should appear near beginning of the file, it is
+        // not such a big deal to be sensitive to their spans for now.
+        //
+        // We hash only the MetaItems instead of the entire Attribute
+        // to avoid hashing the AttrId
+        for attr in &krate.attrs {
+            debug!("krate attr {:?}", attr);
+            attr.node.value.hash(&mut state);
+        }
+
+        Svh::from_hash(state.finish())
+    }
+
+    fn calculate_item_hash(&self, def_id: DefId) -> u64 {
+        assert!(def_id.is_local());
+
+        let mut state = SipHasher::new();
+
+        {
+            let mut visit = StrictVersionHashVisitor::new(&mut state, self);
+            if def_id.index == CRATE_DEF_INDEX {
+                // the crate root itself is not registered in the map
+                // as an item, so we have to fetch it this way
+                let krate = self.map.krate();
+                intravisit::walk_crate(&mut visit, krate);
+            } else {
+                let node_id = self.map.as_local_node_id(def_id).unwrap();
+                visit.visit_item(self.map.expect_item(node_id));
+            }
+        }
+
+        state.finish()
+    }
+}
+
+// FIXME (#14132): Even this SVH computation still has implementation
+// artifacts: namely, the order of item declaration will affect the
+// hash computation, but for many kinds of items the order of
+// declaration should be irrelevant to the ABI.
+
+mod svh_visitor {
+    pub use self::SawExprComponent::*;
+    pub use self::SawStmtComponent::*;
+    use self::SawAbiComponent::*;
+    use syntax::ast::{self, Name, NodeId};
+    use syntax::codemap::Span;
+    use syntax::parse::token;
+    use rustc::ty;
+    use rustc::hir;
+    use rustc::hir::*;
+    use rustc::hir::intravisit as visit;
+    use rustc::hir::intravisit::{Visitor, FnKind};
+
+    use std::hash::{Hash, SipHasher};
+
+    pub struct StrictVersionHashVisitor<'a, 'tcx: 'a> {
+        pub tcx: &'a ty::TyCtxt<'tcx>,
+        pub st: &'a mut SipHasher,
+    }
+
+    impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> {
+        pub fn new(st: &'a mut SipHasher,
+                   tcx: &'a ty::TyCtxt<'tcx>)
+                   -> Self {
+            StrictVersionHashVisitor { st: st, tcx: tcx }
+        }
+    }
+
+    // To off-load the bulk of the hash-computation on #[derive(Hash)],
+    // we define a set of enums corresponding to the content that our
+    // crate visitor will encounter as it traverses the ast.
+    //
+    // The important invariant is that all of the Saw*Component enums
+    // do not carry any Spans, Names, or Idents.
+    //
+    // Not carrying any Names/Idents is the important fix for problem
+    // noted on PR #13948: using the ident.name as the basis for a
+    // hash leads to unstable SVH, because ident.name is just an index
+    // into intern table (i.e. essentially a random address), not
+    // computed from the name content.
+    //
+    // With the below enums, the SVH computation is not sensitive to
+    // artifacts of how rustc was invoked nor of how the source code
+    // was laid out.  (Or at least it is *less* sensitive.)
+
+    // This enum represents the different potential bits of code the
+    // visitor could encounter that could affect the ABI for the crate,
+    // and assigns each a distinct tag to feed into the hash computation.
+    #[derive(Hash)]
+    enum SawAbiComponent<'a> {
+
+        // FIXME (#14132): should we include (some function of)
+        // ident.ctxt as well?
+        SawIdent(token::InternedString),
+        SawStructDef(token::InternedString),
+
+        SawLifetime(token::InternedString),
+        SawLifetimeDef(token::InternedString),
+
+        SawMod,
+        SawForeignItem,
+        SawItem,
+        SawDecl,
+        SawTy,
+        SawGenerics,
+        SawFn,
+        SawTraitItem,
+        SawImplItem,
+        SawStructField,
+        SawVariant,
+        SawExplicitSelf,
+        SawPath,
+        SawBlock,
+        SawPat,
+        SawLocal,
+        SawArm,
+        SawExpr(SawExprComponent<'a>),
+        SawStmt(SawStmtComponent),
+    }
+
+    /// SawExprComponent carries all of the information that we want
+    /// to include in the hash that *won't* be covered by the
+    /// subsequent recursive traversal of the expression's
+    /// substructure by the visitor.
+    ///
+    /// We know every Expr_ variant is covered by a variant because
+    /// `fn saw_expr` maps each to some case below.  Ensuring that
+    /// each variant carries an appropriate payload has to be verified
+    /// by hand.
+    ///
+    /// (However, getting that *exactly* right is not so important
+    /// because the SVH is just a developer convenience; there is no
+    /// guarantee of collision-freedom, hash collisions are just
+    /// (hopefully) unlikely.)
+    #[derive(Hash)]
+    pub enum SawExprComponent<'a> {
+
+        SawExprLoop(Option<token::InternedString>),
+        SawExprField(token::InternedString),
+        SawExprTupField(usize),
+        SawExprBreak(Option<token::InternedString>),
+        SawExprAgain(Option<token::InternedString>),
+
+        SawExprBox,
+        SawExprVec,
+        SawExprCall,
+        SawExprMethodCall,
+        SawExprTup,
+        SawExprBinary(hir::BinOp_),
+        SawExprUnary(hir::UnOp),
+        SawExprLit(ast::LitKind),
+        SawExprCast,
+        SawExprType,
+        SawExprIf,
+        SawExprWhile,
+        SawExprMatch,
+        SawExprClosure,
+        SawExprBlock,
+        SawExprAssign,
+        SawExprAssignOp(hir::BinOp_),
+        SawExprIndex,
+        SawExprPath(Option<usize>),
+        SawExprAddrOf(hir::Mutability),
+        SawExprRet,
+        SawExprInlineAsm(&'a hir::InlineAsm),
+        SawExprStruct,
+        SawExprRepeat,
+    }
+
+    fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> {
+        match *node {
+            ExprBox(..)              => SawExprBox,
+            ExprVec(..)              => SawExprVec,
+            ExprCall(..)             => SawExprCall,
+            ExprMethodCall(..)       => SawExprMethodCall,
+            ExprTup(..)              => SawExprTup,
+            ExprBinary(op, _, _)     => SawExprBinary(op.node),
+            ExprUnary(op, _)         => SawExprUnary(op),
+            ExprLit(ref lit)         => SawExprLit(lit.node.clone()),
+            ExprCast(..)             => SawExprCast,
+            ExprType(..)             => SawExprType,
+            ExprIf(..)               => SawExprIf,
+            ExprWhile(..)            => SawExprWhile,
+            ExprLoop(_, id)          => SawExprLoop(id.map(|id| id.name.as_str())),
+            ExprMatch(..)            => SawExprMatch,
+            ExprClosure(..)          => SawExprClosure,
+            ExprBlock(..)            => SawExprBlock,
+            ExprAssign(..)           => SawExprAssign,
+            ExprAssignOp(op, _, _)   => SawExprAssignOp(op.node),
+            ExprField(_, name)       => SawExprField(name.node.as_str()),
+            ExprTupField(_, id)      => SawExprTupField(id.node),
+            ExprIndex(..)            => SawExprIndex,
+            ExprPath(ref qself, _)   => SawExprPath(qself.as_ref().map(|q| q.position)),
+            ExprAddrOf(m, _)         => SawExprAddrOf(m),
+            ExprBreak(id)            => SawExprBreak(id.map(|id| id.node.name.as_str())),
+            ExprAgain(id)            => SawExprAgain(id.map(|id| id.node.name.as_str())),
+            ExprRet(..)              => SawExprRet,
+            ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a),
+            ExprStruct(..)           => SawExprStruct,
+            ExprRepeat(..)           => SawExprRepeat,
+        }
+    }
+
+    /// SawStmtComponent is analogous to SawExprComponent, but for statements.
+    #[derive(Hash)]
+    pub enum SawStmtComponent {
+        SawStmtDecl,
+        SawStmtExpr,
+        SawStmtSemi,
+    }
+
+    fn saw_stmt(node: &Stmt_) -> SawStmtComponent {
+        match *node {
+            StmtDecl(..) => SawStmtDecl,
+            StmtExpr(..) => SawStmtExpr,
+            StmtSemi(..) => SawStmtSemi,
+        }
+    }
+
+    impl<'a, 'tcx> Visitor<'a> for StrictVersionHashVisitor<'a, 'tcx> {
+        fn visit_nested_item(&mut self, item: ItemId) {
+            debug!("visit_nested_item: {:?} st={:?}", item, self.st);
+            let def_path = self.tcx.map.def_path_from_id(item.id);
+            def_path.hash(self.st);
+        }
+
+        fn visit_variant_data(&mut self, s: &'a VariantData, name: Name,
+                              g: &'a Generics, _: NodeId, _: Span) {
+            SawStructDef(name.as_str()).hash(self.st);
+            visit::walk_generics(self, g);
+            visit::walk_struct_def(self, s)
+        }
+
+        fn visit_variant(&mut self, v: &'a Variant, g: &'a Generics, item_id: NodeId) {
+            SawVariant.hash(self.st);
+            // walk_variant does not call walk_generics, so do it here.
+            visit::walk_generics(self, g);
+            visit::walk_variant(self, v, g, item_id)
+        }
+
+        // All of the remaining methods just record (in the hash
+        // SipHasher) that the visitor saw that particular variant
+        // (with its payload), and continue walking as the default
+        // visitor would.
+        //
+        // Some of the implementations have some notes as to how one
+        // might try to make their SVH computation less discerning
+        // (e.g. by incorporating reachability analysis).  But
+        // currently all of their implementations are uniform and
+        // uninteresting.
+        //
+        // (If you edit a method such that it deviates from the
+        // pattern, please move that method up above this comment.)
+
+        fn visit_name(&mut self, _: Span, name: Name) {
+            SawIdent(name.as_str()).hash(self.st);
+        }
+
+        fn visit_lifetime(&mut self, l: &'a Lifetime) {
+            SawLifetime(l.name.as_str()).hash(self.st);
+        }
+
+        fn visit_lifetime_def(&mut self, l: &'a LifetimeDef) {
+            SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st);
+        }
+
+        // We do recursively walk the bodies of functions/methods
+        // (rather than omitting their bodies from the hash) since
+        // monomorphization and cross-crate inlining generally implies
+        // that a change to a crate body will require downstream
+        // crates to be recompiled.
+        fn visit_expr(&mut self, ex: &'a Expr) {
+            SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex)
+        }
+
+        fn visit_stmt(&mut self, s: &'a Stmt) {
+            SawStmt(saw_stmt(&s.node)).hash(self.st); visit::walk_stmt(self, s)
+        }
+
+        fn visit_foreign_item(&mut self, i: &'a ForeignItem) {
+            // FIXME (#14132) ideally we would incorporate privacy (or
+            // perhaps reachability) somewhere here, so foreign items
+            // that do not leak into downstream crates would not be
+            // part of the ABI.
+            SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i)
+        }
+
+        fn visit_item(&mut self, i: &'a Item) {
+            debug!("visit_item: {:?} st={:?}", i, self.st);
+            // FIXME (#14132) ideally would incorporate reachability
+            // analysis somewhere here, so items that never leak into
+            // downstream crates (e.g. via monomorphisation or
+            // inlining) would not be part of the ABI.
+            SawItem.hash(self.st); visit::walk_item(self, i)
+        }
+
+        fn visit_mod(&mut self, m: &'a Mod, _s: Span, _n: NodeId) {
+            SawMod.hash(self.st); visit::walk_mod(self, m)
+        }
+
+        fn visit_decl(&mut self, d: &'a Decl) {
+            SawDecl.hash(self.st); visit::walk_decl(self, d)
+        }
+
+        fn visit_ty(&mut self, t: &'a Ty) {
+            SawTy.hash(self.st); visit::walk_ty(self, t)
+        }
+
+        fn visit_generics(&mut self, g: &'a Generics) {
+            SawGenerics.hash(self.st); visit::walk_generics(self, g)
+        }
+
+        fn visit_fn(&mut self, fk: FnKind<'a>, fd: &'a FnDecl,
+                    b: &'a Block, s: Span, _: NodeId) {
+            SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s)
+        }
+
+        fn visit_trait_item(&mut self, ti: &'a TraitItem) {
+            SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti)
+        }
+
+        fn visit_impl_item(&mut self, ii: &'a ImplItem) {
+            SawImplItem.hash(self.st); visit::walk_impl_item(self, ii)
+        }
+
+        fn visit_struct_field(&mut self, s: &'a StructField) {
+            SawStructField.hash(self.st); visit::walk_struct_field(self, s)
+        }
+
+        fn visit_explicit_self(&mut self, es: &'a ExplicitSelf) {
+            SawExplicitSelf.hash(self.st); visit::walk_explicit_self(self, es)
+        }
+
+        fn visit_path(&mut self, path: &'a Path, _: ast::NodeId) {
+            SawPath.hash(self.st); visit::walk_path(self, path)
+        }
+
+        fn visit_path_list_item(&mut self, prefix: &'a Path, item: &'a PathListItem) {
+            SawPath.hash(self.st); visit::walk_path_list_item(self, prefix, item)
+        }
+
+        fn visit_block(&mut self, b: &'a Block) {
+            SawBlock.hash(self.st); visit::walk_block(self, b)
+        }
+
+        fn visit_pat(&mut self, p: &'a Pat) {
+            SawPat.hash(self.st); visit::walk_pat(self, p)
+        }
+
+        fn visit_local(&mut self, l: &'a Local) {
+            SawLocal.hash(self.st); visit::walk_local(self, l)
+        }
+
+        fn visit_arm(&mut self, a: &'a Arm) {
+            SawArm.hash(self.st); visit::walk_arm(self, a)
+        }
+    }
+}
diff --git a/src/librustc_incremental/lib.rs b/src/librustc_incremental/lib.rs
new file mode 100644 (file)
index 0000000..005146d
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Support for serializing the dep-graph and reloading it.
+
+#![crate_name = "rustc_incremental"]
+#![unstable(feature = "rustc_private", issue = "27812")]
+#![crate_type = "dylib"]
+#![crate_type = "rlib"]
+#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
+      html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
+      html_root_url = "https://doc.rust-lang.org/nightly/")]
+#![cfg_attr(not(stage0), deny(warnings))]
+
+#![feature(rustc_private)]
+#![feature(staged_api)]
+
+extern crate graphviz;
+extern crate rbml;
+#[macro_use] extern crate rustc;
+extern crate rustc_data_structures;
+extern crate serialize as rustc_serialize;
+
+#[macro_use] extern crate log;
+#[macro_use] extern crate syntax;
+
+mod assert_dep_graph;
+mod calculate_svh;
+mod persist;
+
+pub use assert_dep_graph::assert_dep_graph;
+pub use calculate_svh::SvhCalculate;
+pub use persist::load_dep_graph;
+pub use persist::save_dep_graph;
diff --git a/src/librustc_incremental/persist/README.md b/src/librustc_incremental/persist/README.md
new file mode 100644 (file)
index 0000000..95e0940
--- /dev/null
@@ -0,0 +1,13 @@
+This is the code to load/save the dependency graph. Loading is assumed
+to run early in compilation, and saving at the very end. When loading,
+the basic idea is that we will load up the dependency graph from the
+previous compilation and compare the hashes of our HIR nodes to the
+hashes of the HIR nodes that existed at the time. For each node whose
+hash has changed, or which no longer exists in the new HIR, we can
+remove that node from the old graph along with any nodes that depend
+on it. Then we add what's left to the new graph (if any such nodes or
+edges already exist, then there would be no effect, but since we do
+this first thing, they do not).
+
+
+
diff --git a/src/librustc_incremental/persist/data.rs b/src/librustc_incremental/persist/data.rs
new file mode 100644 (file)
index 0000000..8be8bd6
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The data that we will serialize and deserialize.
+
+use rustc::dep_graph::DepNode;
+use rustc_serialize::{Decoder as RustcDecoder,
+                      Encodable as RustcEncodable, Encoder as RustcEncoder};
+
+use super::directory::DefPathIndex;
+
+#[derive(Debug, RustcEncodable, RustcDecodable)]
+pub struct SerializedDepGraph {
+    pub nodes: Vec<DepNode<DefPathIndex>>,
+    pub edges: Vec<SerializedEdge>,
+    pub hashes: Vec<SerializedHash>,
+}
+
+pub type SerializedEdge = (DepNode<DefPathIndex>, DepNode<DefPathIndex>);
+
+#[derive(Debug, RustcEncodable, RustcDecodable)]
+pub struct SerializedHash {
+    pub index: DefPathIndex,
+
+    /// the hash itself, computed by `calculate_item_hash`
+    pub hash: u64,
+}
+
diff --git a/src/librustc_incremental/persist/directory.rs b/src/librustc_incremental/persist/directory.rs
new file mode 100644 (file)
index 0000000..0d0054c
--- /dev/null
@@ -0,0 +1,118 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Code to convert a DefId into a DefPath (when serializing) and then
+//! back again (when deserializing). Note that the new DefId
+//! necessarily will not be the same as the old (and of course the
+//! item might even be removed in the meantime).
+
+use rustc::dep_graph::DepNode;
+use rustc::hir::map::DefPath;
+use rustc::hir::def_id::DefId;
+use rustc::ty;
+use rustc::util::nodemap::DefIdMap;
+use rustc_serialize::{Decoder as RustcDecoder,
+                      Encodable as RustcEncodable, Encoder as RustcEncoder};
+use std::fmt::{self, Debug};
+
+/// Index into the DefIdDirectory
+#[derive(Copy, Clone, Debug, PartialOrd, Ord, Hash, PartialEq, Eq,
+         RustcEncodable, RustcDecodable)]
+pub struct DefPathIndex {
+    index: u32
+}
+
+#[derive(RustcEncodable, RustcDecodable)]
+pub struct DefIdDirectory {
+    // N.B. don't use Removable here because these def-ids are loaded
+    // directly without remapping, so loading them should not fail.
+    paths: Vec<DefPath>
+}
+
+impl DefIdDirectory {
+    pub fn new() -> DefIdDirectory {
+        DefIdDirectory { paths: vec![] }
+    }
+
+    pub fn retrace(&self, tcx: &ty::TyCtxt) -> RetracedDefIdDirectory {
+        let ids = self.paths.iter()
+                            .map(|path| tcx.map.retrace_path(path))
+                            .collect();
+        RetracedDefIdDirectory { ids: ids }
+    }
+}
+
+#[derive(Debug, RustcEncodable, RustcDecodable)]
+pub struct RetracedDefIdDirectory {
+    ids: Vec<Option<DefId>>
+}
+
+impl RetracedDefIdDirectory {
+    pub fn def_id(&self, index: DefPathIndex) -> Option<DefId> {
+        self.ids[index.index as usize]
+    }
+
+    pub fn map(&self, node: DepNode<DefPathIndex>) -> Option<DepNode<DefId>> {
+        node.map_def(|&index| self.def_id(index))
+    }
+}
+
+pub struct DefIdDirectoryBuilder<'a,'tcx:'a> {
+    tcx: &'a ty::TyCtxt<'tcx>,
+    hash: DefIdMap<Option<DefPathIndex>>,
+    directory: DefIdDirectory,
+}
+
+impl<'a,'tcx> DefIdDirectoryBuilder<'a,'tcx> {
+    pub fn new(tcx: &'a ty::TyCtxt<'tcx>) -> DefIdDirectoryBuilder<'a, 'tcx> {
+        DefIdDirectoryBuilder {
+            tcx: tcx,
+            hash: DefIdMap(),
+            directory: DefIdDirectory::new()
+        }
+    }
+
+    pub fn add(&mut self, def_id: DefId) -> Option<DefPathIndex> {
+        if !def_id.is_local() {
+            // FIXME(#32015) clarify story about cross-crate dep tracking
+            return None;
+        }
+
+        let tcx = self.tcx;
+        let paths = &mut self.directory.paths;
+        self.hash.entry(def_id)
+                 .or_insert_with(|| {
+                     let def_path = tcx.def_path(def_id);
+                     if !def_path.is_local() {
+                         return None;
+                     }
+                     let index = paths.len() as u32;
+                     paths.push(def_path);
+                     Some(DefPathIndex { index: index })
+                 })
+                 .clone()
+    }
+
+    pub fn map(&mut self, node: DepNode<DefId>) -> Option<DepNode<DefPathIndex>> {
+        node.map_def(|&def_id| self.add(def_id))
+    }
+
+    pub fn into_directory(self) -> DefIdDirectory {
+        self.directory
+    }
+}
+
+impl Debug for DefIdDirectory {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+        fmt.debug_list()
+           .entries(self.paths.iter().enumerate())
+           .finish()
+    }
+}
diff --git a/src/librustc_incremental/persist/dirty_clean.rs b/src/librustc_incremental/persist/dirty_clean.rs
new file mode 100644 (file)
index 0000000..35fa695
--- /dev/null
@@ -0,0 +1,151 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Debugging code to test the state of the dependency graph just
+//! after it is loaded from disk. For each node marked with
+//! `#[rustc_clean]` or `#[rustc_dirty]`, we will check that a
+//! suitable node for that item either appears or does not appear in
+//! the dep-graph, as appropriate:
+//!
+//! - `#[rustc_dirty(label="TypeckItemBody", cfg="rev2")]` if we are
+//!   in `#[cfg(rev2)]`, then there MUST NOT be a node
+//!   `DepNode::TypeckItemBody(X)` where `X` is the def-id of the
+//!   current node.
+//! - `#[rustc_clean(label="TypeckItemBody", cfg="rev2")]` same as above,
+//!   except that the node MUST exist.
+//!
+//! Errors are reported if we are in the suitable configuration but
+//! the required condition is not met.
+
+use rustc::dep_graph::{DepGraphQuery, DepNode};
+use rustc::hir;
+use rustc::hir::def_id::DefId;
+use rustc::hir::intravisit::Visitor;
+use syntax::ast::{self, Attribute, MetaItem};
+use syntax::attr::AttrMetaMethods;
+use syntax::parse::token::InternedString;
+use rustc::ty;
+
+const DIRTY: &'static str = "rustc_dirty";
+const CLEAN: &'static str = "rustc_clean";
+const LABEL: &'static str = "label";
+const CFG: &'static str = "cfg";
+
+pub fn check_dirty_clean_annotations(tcx: &ty::TyCtxt) {
+    let _ignore = tcx.dep_graph.in_ignore();
+    let query = tcx.dep_graph.query();
+    let krate = tcx.map.krate();
+    krate.visit_all_items(&mut DirtyCleanVisitor {
+        tcx: tcx,
+        query: &query,
+    });
+}
+
+pub struct DirtyCleanVisitor<'a, 'tcx:'a> {
+    tcx: &'a ty::TyCtxt<'tcx>,
+    query: &'a DepGraphQuery<DefId>,
+}
+
+impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> {
+    fn expect_associated_value(&self, item: &MetaItem) -> InternedString {
+        if let Some(value) = item.value_str() {
+            value
+        } else {
+            self.tcx.sess.span_fatal(
+                item.span,
+                &format!("associated value expected for `{}`", item.name()));
+        }
+    }
+
+    /// Given a `#[rustc_dirty]` or `#[rustc_clean]` attribute, scan
+    /// for a `cfg="foo"` attribute and check whether we have a cfg
+    /// flag called `foo`.
+    fn check_config(&self, attr: &ast::Attribute) -> bool {
+        debug!("check_config(attr={:?})", attr);
+        let config = &self.tcx.map.krate().config;
+        debug!("check_config: config={:?}", config);
+        for item in attr.meta_item_list().unwrap_or(&[]) {
+            if item.check_name(CFG) {
+                let value = self.expect_associated_value(item);
+                debug!("check_config: searching for cfg {:?}", value);
+                for cfg in &config[..] {
+                    if cfg.check_name(&value[..]) {
+                        debug!("check_config: matched {:?}", cfg);
+                        return true;
+                    }
+                }
+            }
+        }
+        debug!("check_config: no match found");
+        return false;
+    }
+
+    fn dep_node(&self, attr: &Attribute, def_id: DefId) -> DepNode<DefId> {
+        for item in attr.meta_item_list().unwrap_or(&[]) {
+            if item.check_name(LABEL) {
+                let value = self.expect_associated_value(item);
+                match DepNode::from_label_string(&value[..], def_id) {
+                    Ok(def_id) => return def_id,
+                    Err(()) => {
+                        self.tcx.sess.span_fatal(
+                            item.span,
+                            &format!("dep-node label `{}` not recognized", value));
+                    }
+                }
+            }
+        }
+
+        self.tcx.sess.span_fatal(attr.span, "no `label` found");
+    }
+
+    fn dep_node_str(&self, dep_node: DepNode<DefId>) -> DepNode<String> {
+        dep_node.map_def(|&def_id| Some(self.tcx.item_path_str(def_id))).unwrap()
+    }
+
+    fn assert_dirty(&self, item: &hir::Item, dep_node: DepNode<DefId>) {
+        debug!("assert_dirty({:?})", dep_node);
+
+        if self.query.contains_node(&dep_node) {
+            let dep_node_str = self.dep_node_str(dep_node);
+            self.tcx.sess.span_err(
+                item.span,
+                &format!("`{:?}` found in dep graph, but should be dirty", dep_node_str));
+        }
+    }
+
+    fn assert_clean(&self, item: &hir::Item, dep_node: DepNode<DefId>) {
+        debug!("assert_clean({:?})", dep_node);
+
+        if !self.query.contains_node(&dep_node) {
+            let dep_node_str = self.dep_node_str(dep_node);
+            self.tcx.sess.span_err(
+                item.span,
+                &format!("`{:?}` not found in dep graph, but should be clean", dep_node_str));
+        }
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for DirtyCleanVisitor<'a, 'tcx> {
+    fn visit_item(&mut self, item: &'tcx hir::Item) {
+        let def_id = self.tcx.map.local_def_id(item.id);
+        for attr in self.tcx.get_attrs(def_id).iter() {
+            if attr.check_name(DIRTY) {
+                if self.check_config(attr) {
+                    self.assert_dirty(item, self.dep_node(attr, def_id));
+                }
+            } else if attr.check_name(CLEAN) {
+                if self.check_config(attr) {
+                    self.assert_clean(item, self.dep_node(attr, def_id));
+                }
+            }
+        }
+    }
+}
+
diff --git a/src/librustc_incremental/persist/load.rs b/src/librustc_incremental/persist/load.rs
new file mode 100644 (file)
index 0000000..196c451
--- /dev/null
@@ -0,0 +1,214 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Code to save/load the dep-graph from files.
+
+use calculate_svh::SvhCalculate;
+use rbml::Error;
+use rbml::opaque::Decoder;
+use rustc::dep_graph::DepNode;
+use rustc::hir::def_id::DefId;
+use rustc::ty;
+use rustc_data_structures::fnv::FnvHashSet;
+use rustc_serialize::Decodable as RustcDecodable;
+use std::io::Read;
+use std::fs::File;
+use std::path::Path;
+
+use super::data::*;
+use super::directory::*;
+use super::dirty_clean;
+use super::util::*;
+
+type DirtyNodes = FnvHashSet<DepNode<DefId>>;
+
+type CleanEdges = Vec<(DepNode<DefId>, DepNode<DefId>)>;
+
+/// If we are in incremental mode, and a previous dep-graph exists,
+/// then load up those nodes/edges that are still valid into the
+/// dep-graph for this session. (This is assumed to be running very
+/// early in compilation, before we've really done any work, but
+/// actually it doesn't matter all that much.) See `README.md` for
+/// more general overview.
+pub fn load_dep_graph<'tcx>(tcx: &ty::TyCtxt<'tcx>) {
+    let _ignore = tcx.dep_graph.in_ignore();
+
+    if let Some(dep_graph) = dep_graph_path(tcx) {
+        // FIXME(#32754) lock file?
+        load_dep_graph_if_exists(tcx, &dep_graph);
+        dirty_clean::check_dirty_clean_annotations(tcx);
+    }
+}
+
+pub fn load_dep_graph_if_exists<'tcx>(tcx: &ty::TyCtxt<'tcx>, path: &Path) {
+    if !path.exists() {
+        return;
+    }
+
+    let mut data = vec![];
+    match
+        File::open(path)
+        .and_then(|mut file| file.read_to_end(&mut data))
+    {
+        Ok(_) => { }
+        Err(err) => {
+            tcx.sess.err(
+                &format!("could not load dep-graph from `{}`: {}",
+                         path.display(), err));
+            return;
+        }
+    }
+
+    match decode_dep_graph(tcx, &data) {
+        Ok(dirty) => dirty,
+        Err(err) => {
+            bug!("decoding error in dep-graph from `{}`: {}", path.display(), err);
+        }
+    }
+}
+
+pub fn decode_dep_graph<'tcx>(tcx: &ty::TyCtxt<'tcx>, data: &[u8])
+                              -> Result<(), Error>
+{
+    // Deserialize the directory and dep-graph.
+    let mut decoder = Decoder::new(data, 0);
+    let directory = try!(DefIdDirectory::decode(&mut decoder));
+    let serialized_dep_graph = try!(SerializedDepGraph::decode(&mut decoder));
+
+    debug!("decode_dep_graph: directory = {:#?}", directory);
+    debug!("decode_dep_graph: serialized_dep_graph = {:#?}", serialized_dep_graph);
+
+    // Retrace the paths in the directory to find their current location (if any).
+    let retraced = directory.retrace(tcx);
+
+    debug!("decode_dep_graph: retraced = {:#?}", retraced);
+
+    // Compute the set of Hir nodes whose data has changed.
+    let mut dirty_nodes =
+        initial_dirty_nodes(tcx, &serialized_dep_graph.hashes, &retraced);
+
+    debug!("decode_dep_graph: initial dirty_nodes = {:#?}", dirty_nodes);
+
+    // Find all DepNodes reachable from that core set. This loop
+    // iterates repeatedly over the list of edges whose source is not
+    // known to be dirty (`clean_edges`). If it finds an edge whose
+    // source is dirty, it removes it from that list and adds the
+    // target to `dirty_nodes`. It stops when it reaches a fixed
+    // point.
+    let clean_edges = compute_clean_edges(&serialized_dep_graph.edges,
+                                          &retraced,
+                                          &mut dirty_nodes);
+
+    // Add synthetic `foo->foo` edges for each clean node `foo` that
+    // we had before. This is sort of a hack to create clean nodes in
+    // the graph, since the existence of a node is a signal that the
+    // work it represents need not be repeated.
+    let clean_nodes =
+        serialized_dep_graph.nodes
+                            .iter()
+                            .filter_map(|&node| retraced.map(node))
+                            .filter(|node| !dirty_nodes.contains(node))
+                            .map(|node| (node, node));
+
+    // Add nodes and edges that are not dirty into our main graph.
+    let dep_graph = tcx.dep_graph.clone();
+    for (source, target) in clean_edges.into_iter().chain(clean_nodes) {
+        let _task = dep_graph.in_task(target);
+        dep_graph.read(source);
+
+        debug!("decode_dep_graph: clean edge: {:?} -> {:?}", source, target);
+    }
+
+    Ok(())
+}
+
+fn initial_dirty_nodes<'tcx>(tcx: &ty::TyCtxt<'tcx>,
+                             hashed_items: &[SerializedHash],
+                             retraced: &RetracedDefIdDirectory)
+                             -> DirtyNodes {
+    let mut items_removed = false;
+    let mut dirty_nodes = FnvHashSet();
+    for hashed_item in hashed_items {
+        match retraced.def_id(hashed_item.index) {
+            Some(def_id) => {
+                // FIXME(#32753) -- should we use a distinct hash here
+                let current_hash = tcx.calculate_item_hash(def_id);
+                debug!("initial_dirty_nodes: hash of {:?} is {:?}, was {:?}",
+                       def_id, current_hash, hashed_item.hash);
+                if current_hash != hashed_item.hash {
+                    dirty_nodes.insert(DepNode::Hir(def_id));
+                }
+            }
+            None => {
+                items_removed = true;
+            }
+        }
+    }
+
+    // If any of the items in the krate have changed, then we consider
+    // the meta-node `Krate` to be dirty, since that means something
+    // which (potentially) read the contents of every single item.
+    if items_removed || !dirty_nodes.is_empty() {
+        dirty_nodes.insert(DepNode::Krate);
+    }
+
+    dirty_nodes
+}
+
+fn compute_clean_edges(serialized_edges: &[(SerializedEdge)],
+                       retraced: &RetracedDefIdDirectory,
+                       dirty_nodes: &mut DirtyNodes)
+                       -> CleanEdges {
+    // Build up an initial list of edges. Include an edge (source,
+    // target) if neither node has been removed. If the source has
+    // been removed, add target to the list of dirty nodes.
+    let mut clean_edges = Vec::with_capacity(serialized_edges.len());
+    for &(serialized_source, serialized_target) in serialized_edges {
+        if let Some(target) = retraced.map(serialized_target) {
+            if let Some(source) = retraced.map(serialized_source) {
+                clean_edges.push((source, target))
+            } else {
+                // source removed, target must be dirty
+                dirty_nodes.insert(target);
+            }
+        } else {
+            // target removed, ignore the edge
+        }
+    }
+
+    debug!("compute_clean_edges: dirty_nodes={:#?}", dirty_nodes);
+
+    // Propagate dirty marks by iterating repeatedly over
+    // `clean_edges`. If we find an edge `(source, target)` where
+    // `source` is dirty, add `target` to the list of dirty nodes and
+    // remove it. Keep doing this until we find no more dirty nodes.
+    let mut previous_size = 0;
+    while dirty_nodes.len() > previous_size {
+        debug!("compute_clean_edges: previous_size={}", previous_size);
+        previous_size = dirty_nodes.len();
+        let mut i = 0;
+        while i < clean_edges.len() {
+            if dirty_nodes.contains(&clean_edges[i].0) {
+                let (source, target) = clean_edges.swap_remove(i);
+                debug!("compute_clean_edges: dirty source {:?} -> {:?}",
+                       source, target);
+                dirty_nodes.insert(target);
+            } else if dirty_nodes.contains(&clean_edges[i].1) {
+                let (source, target) = clean_edges.swap_remove(i);
+                debug!("compute_clean_edges: dirty target {:?} -> {:?}",
+                       source, target);
+            } else {
+                i += 1;
+            }
+        }
+    }
+
+    clean_edges
+}
diff --git a/src/librustc_incremental/persist/mod.rs b/src/librustc_incremental/persist/mod.rs
new file mode 100644 (file)
index 0000000..8d04fd3
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! When in incremental mode, this pass dumps out the dependency graph
+//! into the given directory. At the same time, it also hashes the
+//! various HIR nodes.
+
+mod data;
+mod directory;
+mod dirty_clean;
+mod load;
+mod save;
+mod util;
+
+pub use self::load::load_dep_graph;
+pub use self::save::save_dep_graph;
diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs
new file mode 100644 (file)
index 0000000..d88f9e4
--- /dev/null
@@ -0,0 +1,136 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use calculate_svh::SvhCalculate;
+use rbml::opaque::Encoder;
+use rustc::dep_graph::DepNode;
+use rustc::ty;
+use rustc_serialize::{Encodable as RustcEncodable};
+use std::io::{self, Cursor, Write};
+use std::fs::{self, File};
+
+use super::data::*;
+use super::directory::*;
+use super::util::*;
+
+pub fn save_dep_graph<'tcx>(tcx: &ty::TyCtxt<'tcx>) {
+    let _ignore = tcx.dep_graph.in_ignore();
+
+    if let Some(dep_graph) = dep_graph_path(tcx) {
+        // FIXME(#32754) lock file?
+
+        // delete the old dep-graph, if any
+        if dep_graph.exists() {
+            match fs::remove_file(&dep_graph) {
+                Ok(()) => { }
+                Err(err) => {
+                    tcx.sess.err(
+                        &format!("unable to delete old dep-graph at `{}`: {}",
+                                 dep_graph.display(), err));
+                    return;
+                }
+            }
+        }
+
+        // generate the data in a memory buffer
+        let mut wr = Cursor::new(Vec::new());
+        match encode_dep_graph(tcx, &mut Encoder::new(&mut wr)) {
+            Ok(()) => { }
+            Err(err) => {
+                tcx.sess.err(
+                    &format!("could not encode dep-graph to `{}`: {}",
+                             dep_graph.display(), err));
+                return;
+            }
+        }
+
+        // write the data out
+        let data = wr.into_inner();
+        match
+            File::create(&dep_graph)
+            .and_then(|mut file| file.write_all(&data))
+        {
+            Ok(_) => { }
+            Err(err) => {
+                tcx.sess.err(
+                    &format!("failed to write dep-graph to `{}`: {}",
+                             dep_graph.display(), err));
+                return;
+            }
+        }
+    }
+}
+
+pub fn encode_dep_graph<'tcx>(tcx: &ty::TyCtxt<'tcx>,
+                              encoder: &mut Encoder)
+                              -> io::Result<()>
+{
+    // Here we take advantage of how RBML allows us to skip around
+    // and encode the depgraph as a two-part structure:
+    //
+    // ```
+    // <dep-graph>[SerializedDepGraph]</dep-graph> // tag 0
+    // <directory>[DefIdDirectory]</directory>     // tag 1
+    // ```
+    //
+    // Then later we can load the directory by skipping to find tag 1.
+
+    let query = tcx.dep_graph.query();
+
+    let mut builder = DefIdDirectoryBuilder::new(tcx);
+
+    // Create hashes for things we can persist.
+    let hashes =
+        query.nodes()
+             .into_iter()
+             .filter_map(|dep_node| match dep_node {
+                 DepNode::Hir(def_id) => {
+                     assert!(def_id.is_local());
+                     builder.add(def_id)
+                            .map(|index| {
+                                // FIXME(#32753) -- should we use a distinct hash here
+                                let hash = tcx.calculate_item_hash(def_id);
+                                SerializedHash { index: index, hash: hash }
+                            })
+                 }
+                 _ => None
+             })
+             .collect();
+
+    // Create the serialized dep-graph, dropping nodes that are
+    // from other crates or from inlined items.
+    //
+    // FIXME(#32015) fix handling of other crates
+    let graph = SerializedDepGraph {
+        nodes: query.nodes().into_iter()
+                            .flat_map(|node| builder.map(node))
+                            .collect(),
+        edges: query.edges().into_iter()
+                            .flat_map(|(source_node, target_node)| {
+                                builder.map(source_node)
+                                       .and_then(|source| {
+                                           builder.map(target_node)
+                                                  .map(|target| (source, target))
+                                       })
+                            })
+                            .collect(),
+        hashes: hashes,
+    };
+
+    debug!("graph = {:#?}", graph);
+
+    // Encode the directory and then the graph data.
+    let directory = builder.into_directory();
+    try!(directory.encode(encoder));
+    try!(graph.encode(encoder));
+
+    Ok(())
+}
+
diff --git a/src/librustc_incremental/persist/serialize.rs b/src/librustc_incremental/persist/serialize.rs
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/librustc_incremental/persist/util.rs b/src/librustc_incremental/persist/util.rs
new file mode 100644 (file)
index 0000000..9b4e599
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::ty;
+use std::fs;
+use std::path::PathBuf;
+
+pub fn dep_graph_path<'tcx>(tcx: &ty::TyCtxt<'tcx>) -> Option<PathBuf> {
+    // For now, just save/load dep-graph from
+    // directory/dep_graph.rbml
+    tcx.sess.opts.incremental.as_ref().and_then(|incr_dir| {
+        match fs::create_dir_all(&incr_dir){
+            Ok(()) => {}
+            Err(err) => {
+                tcx.sess.err(
+                    &format!("could not create the directory `{}`: {}",
+                             incr_dir.display(), err));
+                return None;
+            }
+        }
+
+        Some(incr_dir.join("dep_graph.rbml"))
+    })
+}
+
index 33443957d1b3b7d71cf8957565f231b4fe3fba47..3f0cd397e769c5858e91c257949e85edcbcb501c 100644 (file)
@@ -12,5 +12,5 @@ crate-type = ["dylib"]
 log = { path = "../liblog" }
 rustc = { path = "../librustc" }
 rustc_back = { path = "../librustc_back" }
-rustc_front = { path = "../librustc_front" }
+rustc_const_eval = { path = "../librustc_const_eval" }
 syntax = { path = "../libsyntax" }
index 16ef6e001ddbd867c4ae6c7d75762d2729fd47b2..f4fb226d3525f7aaf598530dcf940cdd3a04018e 100644 (file)
@@ -8,8 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use middle::def::Def;
-use middle::ty;
+use rustc::hir::def::Def;
+use rustc::ty;
 use lint::{LateContext, LintContext, LintArray};
 use lint::{LintPass, LateLintPass};
 
@@ -17,8 +17,8 @@ use syntax::ast;
 use syntax::attr::{self, AttrMetaMethods};
 use syntax::codemap::Span;
 
-use rustc_front::hir::{self, PatKind};
-use rustc_front::intravisit::FnKind;
+use rustc::hir::{self, PatKind};
+use rustc::hir::intravisit::FnKind;
 
 #[derive(PartialEq)]
 pub enum MethodLateContext {
@@ -30,7 +30,7 @@ pub enum MethodLateContext {
 pub fn method_context(cx: &LateContext, id: ast::NodeId, span: Span) -> MethodLateContext {
     let def_id = cx.tcx.map.local_def_id(id);
     match cx.tcx.impl_or_trait_items.borrow().get(&def_id) {
-        None => cx.sess().span_bug(span, "missing method descriptor?!"),
+        None => span_bug!(span, "missing method descriptor?!"),
         Some(item) => match item.container() {
             ty::TraitContainer(..) => MethodLateContext::TraitDefaultImpl,
             ty::ImplContainer(cid) => {
@@ -63,7 +63,9 @@ impl NonCamelCaseTypes {
 
             // start with a non-lowercase letter rather than non-uppercase
             // ones (some scripts don't have a concept of upper/lowercase)
-            !name.is_empty() && !name.char_at(0).is_lowercase() && !name.contains('_')
+            !name.is_empty() &&
+                !name.chars().next().unwrap().is_lowercase() &&
+                !name.contains('_')
         }
 
         fn to_camel_case(s: &str) -> String {
@@ -237,7 +239,7 @@ impl LateLintPass for NonSnakeCase {
                 fk: FnKind, _: &hir::FnDecl,
                 _: &hir::Block, span: Span, id: ast::NodeId) {
         match fk {
-            FnKind::Method(name, _, _) => match method_context(cx, id, span) {
+            FnKind::Method(name, _, _, _) => match method_context(cx, id, span) {
                 MethodLateContext::PlainImpl => {
                     self.check_snake_case(cx, "method", &name.as_str(), Some(span))
                 },
@@ -246,10 +248,10 @@ impl LateLintPass for NonSnakeCase {
                 },
                 _ => (),
             },
-            FnKind::ItemFn(name, _, _, _, _, _) => {
+            FnKind::ItemFn(name, _, _, _, _, _, _) => {
                 self.check_snake_case(cx, "function", &name.as_str(), Some(span))
             },
-            _ => (),
+            FnKind::Closure(_) => (),
         }
     }
 
@@ -283,10 +285,7 @@ impl LateLintPass for NonSnakeCase {
     fn check_struct_def(&mut self, cx: &LateContext, s: &hir::VariantData,
                         _: ast::Name, _: &hir::Generics, _: ast::NodeId) {
         for sf in s.fields() {
-            if let hir::StructField_ { kind: hir::NamedField(name, _), .. } = sf.node {
-                self.check_snake_case(cx, "structure field", &name.as_str(),
-                                      Some(sf.span));
-            }
+            self.check_snake_case(cx, "structure field", &sf.name.as_str(), Some(sf.span));
         }
     }
 }
index 2780baa6de3fd72109adcb7a28aaa410ee030284..5e3a47701ebbfb767a61f5960fcd4f86317df1e8 100644 (file)
 //! Use the former for unit-like structs and the latter for structs with
 //! a `pub fn new()`.
 
-use middle::{cfg, infer, stability, traits};
-use middle::def::Def;
+use rustc::hir::def::Def;
 use middle::cstore::CrateStore;
-use middle::def_id::DefId;
-use middle::subst::Substs;
-use middle::ty::{self, Ty};
-use middle::ty::adjustment;
-use rustc::front::map as hir_map;
+use rustc::hir::def_id::DefId;
+use middle::stability;
+use rustc::{cfg, infer};
+use rustc::ty::subst::Substs;
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::adjustment;
+use rustc::traits::{self, ProjectionMode};
+use rustc::hir::map as hir_map;
 use util::nodemap::{NodeSet};
 use lint::{Level, LateContext, LintContext, LintArray, Lint};
 use lint::{LintPass, LateLintPass};
@@ -46,8 +48,8 @@ use syntax::{ast};
 use syntax::attr::{self, AttrMetaMethods};
 use syntax::codemap::{self, Span};
 
-use rustc_front::hir::{self, PatKind};
-use rustc_front::intravisit::FnKind;
+use rustc::hir::{self, PatKind};
+use rustc::hir::intravisit::FnKind;
 
 use bad_style::{MethodLateContext, method_context};
 
@@ -126,7 +128,7 @@ impl LateLintPass for BoxPointers {
             hir::ItemStruct(ref struct_def, _) => {
                 for struct_field in struct_def.fields() {
                     self.check_heap_type(cx, struct_field.span,
-                                         cx.tcx.node_id_to_type(struct_field.node.id));
+                                         cx.tcx.node_id_to_type(struct_field.id));
                 }
             }
             _ => ()
@@ -222,10 +224,10 @@ impl LateLintPass for UnsafeCode {
     fn check_fn(&mut self, cx: &LateContext, fk: FnKind, _: &hir::FnDecl,
                 _: &hir::Block, span: Span, _: ast::NodeId) {
         match fk {
-            FnKind::ItemFn(_, _, hir::Unsafety::Unsafe, _, _, _) =>
+            FnKind::ItemFn(_, _, hir::Unsafety::Unsafe, _, _, _, _) =>
                 cx.span_lint(UNSAFE_CODE, span, "declaration of an `unsafe` function"),
 
-            FnKind::Method(_, sig, _) => {
+            FnKind::Method(_, sig, _, _) => {
                 if sig.unsafety == hir::Unsafety::Unsafe {
                     cx.span_lint(UNSAFE_CODE, span, "implementation of an `unsafe` method")
                 }
@@ -428,12 +430,12 @@ impl LateLintPass for MissingDoc {
     }
 
     fn check_struct_field(&mut self, cx: &LateContext, sf: &hir::StructField) {
-        if let hir::NamedField(_, vis) = sf.node.kind {
-            if vis == hir::Public || self.in_variant {
+        if !sf.is_positional() {
+            if sf.vis == hir::Public || self.in_variant {
                 let cur_struct_def = *self.struct_def_stack.last()
                     .expect("empty struct_def_stack");
                 self.check_missing_docs_attrs(cx, Some(cur_struct_def),
-                                              &sf.node.attrs, sf.span,
+                                              &sf.attrs, sf.span,
                                               "a struct field")
             }
         }
@@ -669,7 +671,7 @@ impl LateLintPass for UnconditionalRecursion {
                 cx.tcx.impl_or_trait_item(cx.tcx.map.local_def_id(id)).as_opt_method()
             }
             // closures can't recur, so they don't matter.
-            FnKind::Closure => return
+            FnKind::Closure(_) => return
         };
 
         // Walk through this function (say `f`) looking to see if
@@ -774,7 +776,7 @@ impl LateLintPass for UnconditionalRecursion {
         // Functions for identifying if the given Expr NodeId `id`
         // represents a call to the function `fn_id`/method `method`.
 
-        fn expr_refers_to_this_fn(tcx: &ty::ctxt,
+        fn expr_refers_to_this_fn(tcx: &TyCtxt,
                                   fn_id: ast::NodeId,
                                   id: ast::NodeId) -> bool {
             match tcx.map.get(id) {
@@ -790,7 +792,7 @@ impl LateLintPass for UnconditionalRecursion {
         }
 
         // Check if the expression `id` performs a call to `method`.
-        fn expr_refers_to_this_method(tcx: &ty::ctxt,
+        fn expr_refers_to_this_method(tcx: &TyCtxt,
                                       method: &ty::Method,
                                       id: ast::NodeId) -> bool {
             // Check for method calls and overloaded operators.
@@ -838,7 +840,7 @@ impl LateLintPass for UnconditionalRecursion {
 
         // Check if the method call to the method with the ID `callee_id`
         // and instantiated with `callee_substs` refers to method `method`.
-        fn method_call_refers_to_method<'tcx>(tcx: &ty::ctxt<'tcx>,
+        fn method_call_refers_to_method<'tcx>(tcx: &TyCtxt<'tcx>,
                                               method: &ty::Method,
                                               callee_id: DefId,
                                               callee_substs: &Substs<'tcx>,
@@ -868,7 +870,10 @@ impl LateLintPass for UnconditionalRecursion {
                     let node_id = tcx.map.as_local_node_id(method.def_id).unwrap();
 
                     let param_env = ty::ParameterEnvironment::for_item(tcx, node_id);
-                    let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(param_env));
+                    let infcx = infer::new_infer_ctxt(tcx,
+                                                      &tcx.tables,
+                                                      Some(param_env),
+                                                      ProjectionMode::AnyFinal);
                     let mut selcx = traits::SelectionContext::new(&infcx);
                     match selcx.select(&obligation) {
                         // The method comes from a `T: Trait` bound.
@@ -1065,7 +1070,7 @@ impl LateLintPass for MutableTransmutes {
                 }
                 let typ = cx.tcx.node_id_to_type(expr.id);
                 match typ.sty {
-                    ty::TyBareFn(_, ref bare_fn) if bare_fn.abi == RustIntrinsic => {
+                    ty::TyFnDef(_, _, ref bare_fn) if bare_fn.abi == RustIntrinsic => {
                         if let ty::FnConverging(to) = bare_fn.sig.0.output {
                             let from = bare_fn.sig.0.inputs[0];
                             return Some((&from.sty, &to.sty));
@@ -1079,13 +1084,10 @@ impl LateLintPass for MutableTransmutes {
 
         fn def_id_is_transmute(cx: &LateContext, def_id: DefId) -> bool {
             match cx.tcx.lookup_item_type(def_id).ty.sty {
-                ty::TyBareFn(_, ref bfty) if bfty.abi == RustIntrinsic => (),
+                ty::TyFnDef(_, _, ref bfty) if bfty.abi == RustIntrinsic => (),
                 _ => return false
             }
-            cx.tcx.with_path(def_id, |path| match path.last() {
-                Some(ref last) => last.name().as_str() == "transmute",
-                _ => false
-            })
+            cx.tcx.item_name(def_id).as_str() == "transmute"
         }
     }
 }
index 1cf0339c086e5137269a141fe784d630e033f1b9..e7c9097a56a58ea27bf26a1548c9a3db6edb0705 100644 (file)
@@ -36,7 +36,6 @@
 #![feature(rustc_private)]
 #![feature(slice_patterns)]
 #![feature(staged_api)]
-#![feature(str_char)]
 
 #[macro_use]
 extern crate syntax;
@@ -44,8 +43,8 @@ extern crate syntax;
 extern crate rustc;
 #[macro_use]
 extern crate log;
-extern crate rustc_front;
 extern crate rustc_back;
+extern crate rustc_const_eval;
 
 pub use rustc::lint as lint;
 pub use rustc::middle as middle;
@@ -166,11 +165,31 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) {
             id: LintId::of(INVALID_TYPE_PARAM_DEFAULT),
             reference: "PR 30742 <https://github.com/rust-lang/rust/pull/30724>",
         },
+        FutureIncompatibleInfo {
+            id: LintId::of(SUPER_OR_SELF_IN_GLOBAL_PATH),
+            reference: "PR #32403 <https://github.com/rust-lang/rust/pull/32403>",
+        },
         FutureIncompatibleInfo {
             id: LintId::of(MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT),
             reference: "RFC 218 <https://github.com/rust-lang/rfcs/blob/\
                         master/text/0218-empty-struct-with-braces.md>",
         },
+        FutureIncompatibleInfo {
+            id: LintId::of(TRANSMUTE_FROM_FN_ITEM_TYPES),
+            reference: "issue #19925 <https://github.com/rust-lang/rust/issues/19925>",
+        },
+        FutureIncompatibleInfo {
+            id: LintId::of(OVERLAPPING_INHERENT_IMPLS),
+            reference: "issue #22889 <https://github.com/rust-lang/rust/issues/22889>",
+        },
+        FutureIncompatibleInfo {
+            id: LintId::of(ILLEGAL_FLOATING_POINT_CONSTANT_PATTERN),
+            reference: "RFC 1445 <https://github.com/rust-lang/rfcs/pull/1445>",
+        },
+        FutureIncompatibleInfo {
+            id: LintId::of(ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN),
+            reference: "RFC 1445 <https://github.com/rust-lang/rfcs/pull/1445>",
+        },
         ]);
 
     // We have one lint pass defined specially
index 203f6626f51d3acc7791dea505b3ecdbf74ee0ed..891731cb29604ee615801933bafe6d6e49c3fa33 100644 (file)
 
 #![allow(non_snake_case)]
 
-use middle::{infer};
-use middle::def_id::DefId;
-use middle::subst::Substs;
-use middle::ty::{self, Ty};
-use middle::const_eval::{eval_const_expr_partial, ConstVal};
-use middle::const_eval::EvalHint::ExprTypeChecked;
+use rustc::hir::def_id::DefId;
+use rustc::infer;
+use rustc::ty::subst::Substs;
+use rustc::ty::{self, Ty, TyCtxt};
+use middle::const_val::ConstVal;
+use rustc_const_eval::eval_const_expr_partial;
+use rustc_const_eval::EvalHint::ExprTypeChecked;
 use util::nodemap::{FnvHashSet};
 use lint::{LateContext, LintContext, LintArray};
 use lint::{LintPass, LateLintPass};
@@ -28,9 +29,7 @@ use syntax::abi::Abi;
 use syntax::attr::{self, AttrMetaMethods};
 use syntax::codemap::{self, Span};
 
-use rustc_front::hir;
-use rustc_front::intravisit::{self, Visitor};
-use rustc_front::util::is_shift_binop;
+use rustc::hir;
 
 register_long_diagnostics! {
 E0519: r##"
@@ -130,7 +129,7 @@ impl LateLintPass for TypeLimits {
                                  "comparison is useless due to type limits");
                 }
 
-                if is_shift_binop(binop.node) {
+                if binop.node.is_shift() {
                     let opt_ty_bits = match cx.tcx.node_id_to_type(l.id).sty {
                         ty::TyInt(t) => Some(int_ty_bits(t, cx.sess().target.int_type)),
                         ty::TyUint(t) => Some(uint_ty_bits(t, cx.sess().target.uint_type)),
@@ -143,8 +142,11 @@ impl LateLintPass for TypeLimits {
                             else { false }
                         } else {
                             match eval_const_expr_partial(cx.tcx, &r, ExprTypeChecked, None) {
-                                Ok(ConstVal::Int(shift)) => { shift as u64 >= bits },
-                                Ok(ConstVal::Uint(shift)) => { shift >= bits },
+                                Ok(ConstVal::Integral(i)) => {
+                                    i.is_negative() || i.to_u64()
+                                                        .map(|i| i >= bits)
+                                                        .unwrap_or(true)
+                                },
                                 _ => { false }
                             }
                         };
@@ -178,7 +180,7 @@ impl LateLintPass for TypeLimits {
                                     return;
                                 }
                             }
-                            _ => panic!()
+                            _ => bug!()
                         };
                     },
                     ty::TyUint(t) => {
@@ -192,7 +194,7 @@ impl LateLintPass for TypeLimits {
                             // _v is u8, within range by definition
                             ast::LitKind::Byte(_v) => return,
                             ast::LitKind::Int(v, _) => v,
-                            _ => panic!()
+                            _ => bug!()
                         };
                         if lit_val < min || lit_val > max {
                             cx.span_lint(OVERFLOWING_LITERALS, e.span,
@@ -209,7 +211,7 @@ impl LateLintPass for TypeLimits {
                                     Err(_) => return
                                 }
                             }
-                            _ => panic!()
+                            _ => bug!()
                         };
                         if lit_val < min || lit_val > max {
                             cx.span_lint(OVERFLOWING_LITERALS, e.span,
@@ -230,7 +232,7 @@ impl LateLintPass for TypeLimits {
                 hir::BiGt => v >= min && v <  max,
                 hir::BiGe => v >  min && v <= max,
                 hir::BiEq | hir::BiNe => v >= min && v <= max,
-                _ => panic!()
+                _ => bug!()
             }
         }
 
@@ -293,7 +295,7 @@ impl LateLintPass for TypeLimits {
             }
         }
 
-        fn check_limits(tcx: &ty::ctxt, binop: hir::BinOp,
+        fn check_limits(tcx: &TyCtxt, binop: hir::BinOp,
                         l: &hir::Expr, r: &hir::Expr) -> bool {
             let (lit, expr, swap) = match (&l.node, &r.node) {
                 (&hir::ExprLit(_), _) => (l, r, true),
@@ -316,7 +318,7 @@ impl LateLintPass for TypeLimits {
                             ast::LitKind::Int(v, ast::LitIntType::Unsuffixed) => v as i64,
                             _ => return true
                         },
-                        _ => panic!()
+                        _ => bug!()
                     };
                     is_valid(norm_binop, lit_val, min, max)
                 }
@@ -327,7 +329,7 @@ impl LateLintPass for TypeLimits {
                             ast::LitKind::Int(v, _) => v,
                             _ => return true
                         },
-                        _ => panic!()
+                        _ => bug!()
                     };
                     is_valid(norm_binop, lit_val, min, max)
                 }
@@ -374,7 +376,7 @@ enum FfiResult {
 /// to function pointers and references, but could be
 /// expanded to cover NonZero raw pointers and newtypes.
 /// FIXME: This duplicates code in trans.
-fn is_repr_nullable_ptr<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn is_repr_nullable_ptr<'tcx>(tcx: &TyCtxt<'tcx>,
                               def: ty::AdtDef<'tcx>,
                               substs: &Substs<'tcx>)
                               -> bool {
@@ -391,7 +393,7 @@ fn is_repr_nullable_ptr<'tcx>(tcx: &ty::ctxt<'tcx>,
 
         if def.variants[data_idx].fields.len() == 1 {
             match def.variants[data_idx].fields[0].ty(tcx, substs).sty {
-                ty::TyBareFn(None, _) => { return true; }
+                ty::TyFnPtr(_) => { return true; }
                 ty::TyRef(..) => { return true; }
                 _ => { }
             }
@@ -400,16 +402,6 @@ fn is_repr_nullable_ptr<'tcx>(tcx: &ty::ctxt<'tcx>,
     false
 }
 
-fn ast_ty_to_normalized<'tcx>(tcx: &ty::ctxt<'tcx>,
-                              id: ast::NodeId)
-                              -> Ty<'tcx> {
-    let tty = match tcx.ast_ty_to_ty_cache.borrow().get(&id) {
-        Some(&t) => t,
-        None => panic!("ast_ty_to_ty_cache was incomplete after typeck!")
-    };
-    infer::normalize_associated_type(tcx, &tty)
-}
-
 impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
     /// Check if the given type is "ffi-safe" (has a stable, well-defined
     /// representation which can be exported to C code).
@@ -556,7 +548,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
                 self.check_type_for_ffi(cache, ty)
             }
 
-            ty::TyBareFn(None, bare_fn) => {
+            ty::TyFnPtr(bare_fn) => {
                 match bare_fn.abi {
                     Abi::Rust |
                     Abi::RustIntrinsic |
@@ -595,16 +587,18 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
 
             ty::TyParam(..) | ty::TyInfer(..) | ty::TyError |
             ty::TyClosure(..) | ty::TyProjection(..) |
-            ty::TyBareFn(Some(_), _) => {
-                panic!("Unexpected type in foreign function")
+            ty::TyFnDef(..) => {
+                bug!("Unexpected type in foreign function")
             }
         }
     }
 
-    fn check_def(&mut self, sp: Span, id: ast::NodeId) {
-        let tty = ast_ty_to_normalized(self.cx.tcx, id);
+    fn check_type_for_ffi_and_report_errors(&mut self, sp: Span, ty: Ty<'tcx>) {
+        // it is only OK to use this function because extern fns cannot have
+        // any generic types right now:
+        let ty = infer::normalize_associated_type(self.cx.tcx, &ty);
 
-        match ImproperCTypesVisitor::check_type_for_ffi(self, &mut FnvHashSet(), tty) {
+        match self.check_type_for_ffi(&mut FnvHashSet(), ty) {
             FfiResult::FfiSafe => {}
             FfiResult::FfiUnsafe(s) => {
                 self.cx.span_lint(IMPROPER_CTYPES, sp, s);
@@ -625,27 +619,30 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
             }
         }
     }
-}
 
-impl<'a, 'tcx, 'v> Visitor<'v> for ImproperCTypesVisitor<'a, 'tcx> {
-    fn visit_ty(&mut self, ty: &hir::Ty) {
-        match ty.node {
-            hir::TyPath(..) |
-            hir::TyBareFn(..) => self.check_def(ty.span, ty.id),
-            hir::TyVec(..) => {
-                self.cx.span_lint(IMPROPER_CTYPES, ty.span,
-                    "found Rust slice type in foreign module, consider \
-                     using a raw pointer instead");
-            }
-            hir::TyFixedLengthVec(ref ty, _) => self.visit_ty(ty),
-            hir::TyTup(..) => {
-                self.cx.span_lint(IMPROPER_CTYPES, ty.span,
-                    "found Rust tuple type in foreign module; \
-                     consider using a struct instead`")
+    fn check_foreign_fn(&mut self, id: ast::NodeId, decl: &hir::FnDecl) {
+        let def_id = self.cx.tcx.map.local_def_id(id);
+        let scheme = self.cx.tcx.lookup_item_type(def_id);
+        let sig = scheme.ty.fn_sig();
+        let sig = self.cx.tcx.erase_late_bound_regions(&sig);
+
+        for (&input_ty, input_hir) in sig.inputs.iter().zip(&decl.inputs) {
+            self.check_type_for_ffi_and_report_errors(input_hir.ty.span, &input_ty);
+        }
+
+        if let hir::Return(ref ret_hir) = decl.output {
+            let ret_ty = sig.output.unwrap();
+            if !ret_ty.is_nil() {
+                self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty);
             }
-            _ => intravisit::walk_ty(self, ty)
         }
     }
+
+    fn check_foreign_static(&mut self, id: ast::NodeId, span: Span) {
+        let def_id = self.cx.tcx.map.local_def_id(id);
+        let scheme = self.cx.tcx.lookup_item_type(def_id);
+        self.check_type_for_ffi_and_report_errors(span, scheme.ty);
+    }
 }
 
 #[derive(Copy, Clone)]
@@ -659,29 +656,17 @@ impl LintPass for ImproperCTypes {
 
 impl LateLintPass for ImproperCTypes {
     fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
-        fn check_ty(cx: &LateContext, ty: &hir::Ty) {
-            let mut vis = ImproperCTypesVisitor { cx: cx };
-            vis.visit_ty(ty);
-        }
-
-        fn check_foreign_fn(cx: &LateContext, decl: &hir::FnDecl) {
-            for input in &decl.inputs {
-                check_ty(cx, &input.ty);
-            }
-            if let hir::Return(ref ret_ty) = decl.output {
-                let tty = ast_ty_to_normalized(cx.tcx, ret_ty.id);
-                if !tty.is_nil() {
-                    check_ty(cx, &ret_ty);
-                }
-            }
-        }
-
+        let mut vis = ImproperCTypesVisitor { cx: cx };
         if let hir::ItemForeignMod(ref nmod) = it.node {
             if nmod.abi != Abi::RustIntrinsic && nmod.abi != Abi::PlatformIntrinsic {
                 for ni in &nmod.items {
                     match ni.node {
-                        hir::ForeignItemFn(ref decl, _) => check_foreign_fn(cx, &decl),
-                        hir::ForeignItemStatic(ref t, _) => check_ty(cx, &t)
+                        hir::ForeignItemFn(ref decl, _) => {
+                            vis.check_foreign_fn(ni.id, decl);
+                        }
+                        hir::ForeignItemStatic(ref ty, _) => {
+                            vis.check_foreign_static(ni.id, ty.span);
+                        }
                     }
                 }
             }
index 36ed06a6c00b47363c6823d731e6f3640bcf9d07..d1595f243c9bfba65f14b3170757f463a0bf8f11 100644 (file)
@@ -8,9 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use middle::pat_util;
-use middle::ty;
-use middle::ty::adjustment;
+use rustc::hir::pat_util;
+use rustc::ty;
+use rustc::ty::adjustment;
 use util::nodemap::FnvHashMap;
 use lint::{LateContext, EarlyContext, LintContext, LintArray};
 use lint::{LintPass, EarlyLintPass, LateLintPass};
@@ -24,8 +24,8 @@ use syntax::feature_gate::{KNOWN_ATTRIBUTES, AttributeType};
 use syntax::ptr::P;
 
 use rustc_back::slice;
-use rustc_front::hir;
-use rustc_front::intravisit::FnKind;
+use rustc::hir;
+use rustc::hir::intravisit::FnKind;
 
 declare_lint! {
     pub UNUSED_MUT,
index 59164161b3d5ec12289ebec50f883f762dca9517..dcfb518ba793815527411421cafe914845875474 100644 (file)
@@ -47,14 +47,20 @@ fn main() {
     // the host platform. This only really works if the host LLVM and target
     // LLVM are compiled the same way, but for us that's typically the case.
     //
-    // We detect this cross compiling situation by asking llvm-config what it's
-    // host-target is. If that's not the TARGET, then we're cross compiling.
-    // This generally just means that we can't trust all the output of
-    // llvm-config becaues it might be targeted for the host rather than the
-    // target.
+    // We *want* detect this cross compiling situation by asking llvm-config
+    // what it's host-target is. If that's not the TARGET, then we're cross
+    // compiling. Unfortunately `llvm-config` seems either be buggy, or we're
+    // misconfiguring it, because the `i686-pc-windows-gnu` build of LLVM will
+    // report itself with a `--host-target` of `x86_64-pc-windows-gnu`. This
+    // tricks us into thinking we're doing a cross build when we aren't, so
+    // havoc ensues.
+    //
+    // In any case, if we're cross compiling, this generally just means that we
+    // can't trust all the output of llvm-config becaues it might be targeted
+    // for the host rather than the target. As a result a bunch of blocks below
+    // are gated on `if !is_crossed`
     let target = env::var("TARGET").unwrap();
-    let host = output(Command::new(&llvm_config).arg("--host-target"));
-    let host = host.trim();
+    let host = env::var("HOST").unwrap();
     let is_crossed = target != host;
 
     let optional_components = ["x86", "arm", "aarch64", "mips", "powerpc",
index 13acf79a0f1d05cd81b058f33855e3f7c6918e12..4df2da801f92588201e2401a0d9058c1164a3119 100644 (file)
@@ -33,8 +33,6 @@
 extern crate libc;
 #[macro_use] #[no_link] extern crate rustc_bitflags;
 
-pub use self::OtherAttribute::*;
-pub use self::SpecialAttribute::*;
 pub use self::AttributeSet::*;
 pub use self::IntPredicate::*;
 pub use self::RealPredicate::*;
@@ -133,6 +131,7 @@ pub enum DLLStorageClassTypes {
 }
 
 bitflags! {
+    #[derive(Default, Debug)]
     flags Attribute : u64 {
         const ZExt            = 1 << 0,
         const SExt            = 1 << 1,
@@ -150,138 +149,95 @@ bitflags! {
         const OptimizeForSize = 1 << 13,
         const StackProtect    = 1 << 14,
         const StackProtectReq = 1 << 15,
-        const Alignment       = 1 << 16,
         const NoCapture       = 1 << 21,
         const NoRedZone       = 1 << 22,
         const NoImplicitFloat = 1 << 23,
         const Naked           = 1 << 24,
         const InlineHint      = 1 << 25,
-        const Stack           = 7 << 26,
         const ReturnsTwice    = 1 << 29,
         const UWTable         = 1 << 30,
         const NonLazyBind     = 1 << 31,
+
+        // Some of these are missing from the LLVM C API, the rest are
+        // present, but commented out, and preceded by the following warning:
+        // FIXME: These attributes are currently not included in the C API as
+        // a temporary measure until the API/ABI impact to the C API is understood
+        // and the path forward agreed upon.
+        const SanitizeAddress = 1 << 32,
+        const MinSize         = 1 << 33,
+        const NoDuplicate     = 1 << 34,
+        const StackProtectStrong = 1 << 35,
+        const SanitizeThread  = 1 << 36,
+        const SanitizeMemory  = 1 << 37,
+        const NoBuiltin       = 1 << 38,
+        const Returned        = 1 << 39,
+        const Cold            = 1 << 40,
+        const Builtin         = 1 << 41,
         const OptimizeNone    = 1 << 42,
+        const InAlloca        = 1 << 43,
+        const NonNull         = 1 << 44,
+        const JumpTable       = 1 << 45,
+        const Convergent      = 1 << 46,
+        const SafeStack       = 1 << 47,
+        const NoRecurse       = 1 << 48,
+        const InaccessibleMemOnly         = 1 << 49,
+        const InaccessibleMemOrArgMemOnly = 1 << 50,
     }
 }
 
-
-#[repr(u64)]
-#[derive(Copy, Clone)]
-pub enum OtherAttribute {
-    // The following are not really exposed in
-    // the LLVM C api so instead to add these
-    // we call a wrapper function in RustWrapper
-    // that uses the C++ api.
-    SanitizeAddressAttribute = 1 << 32,
-    MinSizeAttribute = 1 << 33,
-    NoDuplicateAttribute = 1 << 34,
-    StackProtectStrongAttribute = 1 << 35,
-    SanitizeThreadAttribute = 1 << 36,
-    SanitizeMemoryAttribute = 1 << 37,
-    NoBuiltinAttribute = 1 << 38,
-    ReturnedAttribute = 1 << 39,
-    ColdAttribute = 1 << 40,
-    BuiltinAttribute = 1 << 41,
-    OptimizeNoneAttribute = 1 << 42,
-    InAllocaAttribute = 1 << 43,
-    NonNullAttribute = 1 << 44,
+#[derive(Copy, Clone, Default, Debug)]
+pub struct Attributes {
+    regular: Attribute,
+    dereferenceable_bytes: u64
 }
 
-#[derive(Copy, Clone)]
-pub enum SpecialAttribute {
-    DereferenceableAttribute(u64)
-}
-
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub enum AttributeSet {
-    ReturnIndex = 0,
-    FunctionIndex = !0
-}
-
-pub trait AttrHelper {
-    fn apply_llfn(&self, idx: c_uint, llfn: ValueRef);
-    fn apply_callsite(&self, idx: c_uint, callsite: ValueRef);
-}
-
-impl AttrHelper for Attribute {
-    fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) {
-        unsafe {
-            LLVMAddFunctionAttribute(llfn, idx, self.bits() as uint64_t);
-        }
+impl Attributes {
+    pub fn set(&mut self, attr: Attribute) -> &mut Self {
+        self.regular = self.regular | attr;
+        self
     }
 
-    fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) {
-        unsafe {
-            LLVMAddCallSiteAttribute(callsite, idx, self.bits() as uint64_t);
-        }
+    pub fn unset(&mut self, attr: Attribute) -> &mut Self {
+        self.regular = self.regular - attr;
+        self
     }
-}
 
-impl AttrHelper for OtherAttribute {
-    fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) {
-        unsafe {
-            LLVMAddFunctionAttribute(llfn, idx, *self as uint64_t);
-        }
+    pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self {
+        self.dereferenceable_bytes = bytes;
+        self
     }
 
-    fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) {
-        unsafe {
-            LLVMAddCallSiteAttribute(callsite, idx, *self as uint64_t);
-        }
+    pub fn unset_dereferenceable(&mut self) -> &mut Self {
+        self.dereferenceable_bytes = 0;
+        self
     }
-}
 
-impl AttrHelper for SpecialAttribute {
-    fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) {
-        match *self {
-            DereferenceableAttribute(bytes) => unsafe {
-                LLVMAddDereferenceableAttr(llfn, idx, bytes as uint64_t);
+    pub fn apply_llfn(&self, idx: usize, llfn: ValueRef) {
+        unsafe {
+            LLVMAddFunctionAttribute(llfn, idx as c_uint, self.regular.bits());
+            if self.dereferenceable_bytes != 0 {
+                LLVMAddDereferenceableAttr(llfn, idx as c_uint,
+                                           self.dereferenceable_bytes);
             }
         }
     }
 
-    fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) {
-        match *self {
-            DereferenceableAttribute(bytes) => unsafe {
-                LLVMAddDereferenceableCallSiteAttr(callsite, idx, bytes as uint64_t);
+    pub fn apply_callsite(&self, idx: usize, callsite: ValueRef) {
+        unsafe {
+            LLVMAddCallSiteAttribute(callsite, idx as c_uint, self.regular.bits());
+            if self.dereferenceable_bytes != 0 {
+                LLVMAddDereferenceableCallSiteAttr(callsite, idx as c_uint,
+                                                   self.dereferenceable_bytes);
             }
         }
     }
 }
 
-pub struct AttrBuilder {
-    attrs: Vec<(usize, Box<AttrHelper+'static>)>
-}
-
-impl AttrBuilder {
-    pub fn new() -> AttrBuilder {
-        AttrBuilder {
-            attrs: Vec::new()
-        }
-    }
-
-    pub fn arg<T: AttrHelper + 'static>(&mut self, idx: usize, a: T) -> &mut AttrBuilder {
-        self.attrs.push((idx, box a as Box<AttrHelper+'static>));
-        self
-    }
-
-    pub fn ret<T: AttrHelper + 'static>(&mut self, a: T) -> &mut AttrBuilder {
-        self.attrs.push((ReturnIndex as usize, box a as Box<AttrHelper+'static>));
-        self
-    }
-
-    pub fn apply_llfn(&self, llfn: ValueRef) {
-        for &(idx, ref attr) in &self.attrs {
-            attr.apply_llfn(idx as c_uint, llfn);
-        }
-    }
-
-    pub fn apply_callsite(&self, callsite: ValueRef) {
-        for &(idx, ref attr) in &self.attrs {
-            attr.apply_callsite(idx as c_uint, callsite);
-        }
-    }
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub enum AttributeSet {
+    ReturnIndex = 0,
+    FunctionIndex = !0
 }
 
 // enum for the LLVM IntPredicate type
@@ -1008,9 +964,10 @@ extern {
     pub fn LLVMAddFunctionAttrStringValue(Fn: ValueRef, index: c_uint,
                                           Name: *const c_char,
                                           Value: *const c_char);
+    pub fn LLVMRemoveFunctionAttributes(Fn: ValueRef, index: c_uint, attr: uint64_t);
     pub fn LLVMRemoveFunctionAttrString(Fn: ValueRef, index: c_uint, Name: *const c_char);
-    pub fn LLVMGetFunctionAttr(Fn: ValueRef) -> c_ulonglong;
-    pub fn LLVMRemoveFunctionAttr(Fn: ValueRef, val: c_ulonglong);
+    pub fn LLVMGetFunctionAttr(Fn: ValueRef) -> c_uint;
+    pub fn LLVMRemoveFunctionAttr(Fn: ValueRef, val: c_uint);
 
     /* Operations on parameters */
     pub fn LLVMCountParams(Fn: ValueRef) -> c_uint;
@@ -1354,6 +1311,7 @@ extern {
                          -> ValueRef;
     pub fn LLVMBuildNot(B: BuilderRef, V: ValueRef, Name: *const c_char)
                         -> ValueRef;
+    pub fn LLVMRustSetHasUnsafeAlgebra(Instr: ValueRef);
 
     /* Memory */
     pub fn LLVMBuildAlloca(B: BuilderRef, Ty: TypeRef, Name: *const c_char)
@@ -2165,6 +2123,11 @@ extern {
                                          NumInputs: c_uint)
                                          -> OperandBundleDefRef;
     pub fn LLVMRustFreeOperandBundleDef(Bundle: OperandBundleDefRef);
+
+    pub fn LLVMRustPositionBuilderAtStart(B: BuilderRef, BB: BasicBlockRef);
+
+    pub fn LLVMRustSetComdat(M: ModuleRef, V: ValueRef, Name: *const c_char);
+    pub fn LLVMRustUnsetComdat(V: ValueRef);
 }
 
 // LLVM requires symbols from this library, but apparently they're not printed
@@ -2189,6 +2152,24 @@ pub fn SetLinkage(global: ValueRef, link: Linkage) {
     }
 }
 
+// Externally visible symbols that might appear in multiple translation units need to appear in
+// their own comdat section so that the duplicates can be discarded at link time. This can for
+// example happen for generics when using multiple codegen units. This function simply uses the
+// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the
+// function.
+// For more details on COMDAT sections see e.g. http://www.airs.com/blog/archives/52
+pub fn SetUniqueComdat(llmod: ModuleRef, val: ValueRef) {
+    unsafe {
+        LLVMRustSetComdat(llmod, val, LLVMGetValueName(val));
+    }
+}
+
+pub fn UnsetComdat(val: ValueRef) {
+    unsafe {
+        LLVMRustUnsetComdat(val);
+    }
+}
+
 pub fn SetDLLStorageClass(global: ValueRef, class: DLLStorageClassTypes) {
     unsafe {
         LLVMRustSetDLLStorageClass(global, class);
@@ -2225,6 +2206,13 @@ pub fn SetFunctionAttribute(fn_: ValueRef, attr: Attribute) {
     }
 }
 
+pub fn RemoveFunctionAttributes(fn_: ValueRef, attr: Attribute) {
+    unsafe {
+        LLVMRemoveFunctionAttributes(fn_, FunctionIndex as c_uint,
+                                           attr.bits() as uint64_t)
+    }
+}
+
 /* Memory-managed interface to target data. */
 
 pub struct TargetData {
index d6bb4b157a6a3d2471c531c50d4e0ab8094c6190..697cbe39c67fcb61c69eb02372ec8c866727fe2f 100644 (file)
@@ -15,7 +15,7 @@ rbml = { path = "../librbml" }
 rustc = { path = "../librustc" }
 rustc_back = { path = "../librustc_back" }
 rustc_bitflags = { path = "../librustc_bitflags" }
-rustc_front = { path = "../librustc_front" }
+rustc_const_math = { path = "../librustc_const_math" }
 rustc_llvm = { path = "../librustc_llvm" }
 serialize = { path = "../libserialize" }
 syntax = { path = "../libsyntax" }
index fe4df865a0e61cd910bd0613827227256038ce65..6fd9f27f0fb9fb9567911e01f0cb1ee5b07858b9 100644 (file)
 // FIXME: remove this after snapshot, and Results are handled
 #![allow(unused_must_use)]
 
-use rustc::front::map as ast_map;
+use rustc::hir::map as ast_map;
 use rustc::session::Session;
 
-use rustc_front::hir;
-use rustc_front::fold;
-use rustc_front::fold::Folder;
+use rustc::hir;
+use rustc::hir::fold;
+use rustc::hir::fold::Folder;
+use rustc::hir::intravisit::{IdRange, IdRangeComputingVisitor, IdVisitingOperation};
 
 use common as c;
 use cstore;
@@ -27,17 +28,16 @@ use tydecode;
 use tyencode;
 
 use middle::cstore::{InlinedItem, InlinedItemRef};
-use middle::ty::adjustment;
-use middle::ty::cast;
+use rustc::ty::adjustment;
+use rustc::ty::cast;
 use middle::const_qualif::ConstQualif;
-use middle::def::{self, Def};
-use middle::def_id::DefId;
-use middle::privacy::{AllPublic, LastMod};
+use rustc::hir::def::{self, Def};
+use rustc::hir::def_id::DefId;
 use middle::region;
-use middle::subst;
-use middle::ty::{self, Ty};
+use rustc::ty::subst;
+use rustc::ty::{self, Ty, TyCtxt};
 
-use syntax::{ast, ast_util, codemap};
+use syntax::{ast, codemap};
 use syntax::ast::NodeIdAssigner;
 use syntax::ptr::P;
 
@@ -56,14 +56,14 @@ use serialize::EncoderHelpers;
 #[cfg(test)] use std::io::Cursor;
 #[cfg(test)] use syntax::parse;
 #[cfg(test)] use syntax::ast::NodeId;
-#[cfg(test)] use rustc_front::print::pprust;
-#[cfg(test)] use rustc_front::lowering::{lower_item, LoweringContext};
+#[cfg(test)] use rustc::hir::print as pprust;
+#[cfg(test)] use rustc::hir::lowering::{lower_item, LoweringContext};
 
 struct DecodeContext<'a, 'b, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     cdata: &'b cstore::crate_metadata,
-    from_id_range: ast_util::IdRange,
-    to_id_range: ast_util::IdRange,
+    from_id_range: IdRange,
+    to_id_range: IdRange,
     // Cache the last used filemap for translating spans as an optimization.
     last_filemap_index: Cell<usize>,
 }
@@ -85,7 +85,7 @@ pub fn encode_inlined_item(ecx: &e::EncodeContext,
         InlinedItemRef::ImplItem(_, ii) => ii.id,
     };
     debug!("> Encoding inlined item: {} ({:?})",
-           ecx.tcx.map.path_to_string(id),
+           ecx.tcx.node_path_str(id),
            rbml_w.writer.seek(SeekFrom::Current(0)));
 
     // Folding could be avoided with a smarter encoder.
@@ -99,7 +99,7 @@ pub fn encode_inlined_item(ecx: &e::EncodeContext,
     rbml_w.end_tag();
 
     debug!("< Encoded inlined fn: {} ({:?})",
-           ecx.tcx.map.path_to_string(id),
+           ecx.tcx.node_path_str(id),
            rbml_w.writer.seek(SeekFrom::Current(0)));
 }
 
@@ -123,77 +123,60 @@ impl<'a, 'b, 'c, 'tcx> ast_map::FoldOps for &'a DecodeContext<'b, 'c, 'tcx> {
 /// Decodes an item from its AST in the cdata's metadata and adds it to the
 /// ast-map.
 pub fn decode_inlined_item<'tcx>(cdata: &cstore::crate_metadata,
-                                 tcx: &ty::ctxt<'tcx>,
-                                 parent_path: Vec<ast_map::PathElem>,
+                                 tcx: &TyCtxt<'tcx>,
                                  parent_def_path: ast_map::DefPath,
-                                 par_doc: rbml::Doc,
+                                 parent_did: DefId,
+                                 ast_doc: rbml::Doc,
                                  orig_did: DefId)
-                                 -> Result<&'tcx InlinedItem, (Vec<ast_map::PathElem>,
-                                                               ast_map::DefPath)> {
-    match par_doc.opt_child(c::tag_ast) {
-      None => Err((parent_path, parent_def_path)),
-      Some(ast_doc) => {
-        let mut path_as_str = None;
-        debug!("> Decoding inlined fn: {:?}::?",
-        {
-            // Do an Option dance to use the path after it is moved below.
-            let s = ast_map::path_to_string(parent_path.iter().cloned());
-            path_as_str = Some(s);
-            path_as_str.as_ref().map(|x| &x[..])
-        });
-        let mut ast_dsr = reader::Decoder::new(ast_doc);
-        let from_id_range = Decodable::decode(&mut ast_dsr).unwrap();
-        let to_id_range = reserve_id_range(&tcx.sess, from_id_range);
-        let dcx = &DecodeContext {
-            cdata: cdata,
-            tcx: tcx,
-            from_id_range: from_id_range,
-            to_id_range: to_id_range,
-            last_filemap_index: Cell::new(0)
-        };
-        let raw_ii = decode_ast(ast_doc);
-        let ii = ast_map::map_decoded_item(&dcx.tcx.map,
-                                           parent_path,
-                                           parent_def_path,
-                                           raw_ii,
-                                           dcx);
-        let name = match *ii {
-            InlinedItem::Item(ref i) => i.name,
-            InlinedItem::Foreign(ref i) => i.name,
-            InlinedItem::TraitItem(_, ref ti) => ti.name,
-            InlinedItem::ImplItem(_, ref ii) => ii.name
-        };
-        debug!("Fn named: {}", name);
-        debug!("< Decoded inlined fn: {}::{}",
-               path_as_str.unwrap(),
-               name);
-        region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii);
-        decode_side_tables(dcx, ast_doc);
-        copy_item_types(dcx, ii, orig_did);
-        match *ii {
-          InlinedItem::Item(ref i) => {
-            debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<",
-                   ::rustc_front::print::pprust::item_to_string(&i));
-          }
-          _ => { }
-        }
-
-        Ok(ii)
-      }
-    }
+                                 -> &'tcx InlinedItem {
+    debug!("> Decoding inlined fn: {:?}", tcx.item_path_str(orig_did));
+    let mut ast_dsr = reader::Decoder::new(ast_doc);
+    let from_id_range = Decodable::decode(&mut ast_dsr).unwrap();
+    let to_id_range = reserve_id_range(&tcx.sess, from_id_range);
+    let dcx = &DecodeContext {
+        cdata: cdata,
+        tcx: tcx,
+        from_id_range: from_id_range,
+        to_id_range: to_id_range,
+        last_filemap_index: Cell::new(0)
+    };
+    let ii = ast_map::map_decoded_item(&dcx.tcx.map,
+                                       parent_def_path,
+                                       parent_did,
+                                       decode_ast(ast_doc),
+                                       dcx);
+    let name = match *ii {
+        InlinedItem::Item(ref i) => i.name,
+        InlinedItem::Foreign(ref i) => i.name,
+        InlinedItem::TraitItem(_, ref ti) => ti.name,
+        InlinedItem::ImplItem(_, ref ii) => ii.name
+    };
+    debug!("Fn named: {}", name);
+    debug!("< Decoded inlined fn: {}::{}",
+            tcx.item_path_str(parent_did),
+            name);
+    region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii);
+    decode_side_tables(dcx, ast_doc);
+    copy_item_types(dcx, ii, orig_did);
+    if let InlinedItem::Item(ref i) = *ii {
+        debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<",
+               ::rustc::hir::print::item_to_string(&i));
+    }
+
+    ii
 }
 
 // ______________________________________________________________________
 // Enumerating the IDs which appear in an AST
 
 fn reserve_id_range(sess: &Session,
-                    from_id_range: ast_util::IdRange) -> ast_util::IdRange {
+                    from_id_range: IdRange) -> IdRange {
     // Handle the case of an empty range:
     if from_id_range.empty() { return from_id_range; }
     let cnt = from_id_range.max - from_id_range.min;
     let to_id_min = sess.reserve_node_ids(cnt);
     let to_id_max = to_id_min + cnt;
-    ast_util::IdRange { min: to_id_min, max: to_id_max }
+    IdRange { min: to_id_min, max: to_id_max }
 }
 
 impl<'a, 'b, 'tcx> DecodeContext<'a, 'b, 'tcx> {
@@ -254,7 +237,7 @@ trait def_id_encoder_helpers {
 }
 
 impl<S:serialize::Encoder> def_id_encoder_helpers for S
-    where <S as serialize::serialize::Encoder>::Error: Debug
+    where <S as serialize::Encoder>::Error: Debug
 {
     fn emit_def_id(&mut self, did: DefId) {
         did.encode(self).unwrap()
@@ -268,7 +251,7 @@ trait def_id_decoder_helpers {
 }
 
 impl<D:serialize::Decoder> def_id_decoder_helpers for D
-    where <D as serialize::serialize::Decoder>::Error: Debug
+    where <D as serialize::Decoder>::Error: Debug
 {
     fn read_def_id(&mut self, dcx: &DecodeContext) -> DefId {
         let did: DefId = Decodable::decode(self).unwrap();
@@ -360,8 +343,8 @@ fn simplify_ast(ii: InlinedItemRef) -> InlinedItem {
     }
 }
 
-fn decode_ast(par_doc: rbml::Doc) -> InlinedItem {
-    let chi_doc = par_doc.get(c::tag_tree as usize);
+fn decode_ast(item_doc: rbml::Doc) -> InlinedItem {
+    let chi_doc = item_doc.get(c::tag_tree as usize);
     let mut rbml_r = reader::Decoder::new(chi_doc);
     rbml_r.read_opaque(|decoder, _| Decodable::decode(decoder)).unwrap()
 }
@@ -418,20 +401,20 @@ impl tr for Def {
 // ______________________________________________________________________
 // Encoding and decoding of freevar information
 
-fn encode_freevar_entry(rbml_w: &mut Encoder, fv: &ty::Freevar) {
+fn encode_freevar_entry(rbml_w: &mut Encoder, fv: &hir::Freevar) {
     (*fv).encode(rbml_w).unwrap();
 }
 
 trait rbml_decoder_helper {
     fn read_freevar_entry(&mut self, dcx: &DecodeContext)
-                          -> ty::Freevar;
+                          -> hir::Freevar;
     fn read_capture_mode(&mut self) -> hir::CaptureClause;
 }
 
 impl<'a> rbml_decoder_helper for reader::Decoder<'a> {
     fn read_freevar_entry(&mut self, dcx: &DecodeContext)
-                          -> ty::Freevar {
-        let fv: ty::Freevar = Decodable::decode(self).unwrap();
+                          -> hir::Freevar {
+        let fv: hir::Freevar = Decodable::decode(self).unwrap();
         fv.tr(dcx)
     }
 
@@ -441,9 +424,9 @@ impl<'a> rbml_decoder_helper for reader::Decoder<'a> {
     }
 }
 
-impl tr for ty::Freevar {
-    fn tr(&self, dcx: &DecodeContext) -> ty::Freevar {
-        ty::Freevar {
+impl tr for hir::Freevar {
+    fn tr(&self, dcx: &DecodeContext) -> hir::Freevar {
+        hir::Freevar {
             def: self.def.tr(dcx),
             span: self.span.tr(dcx),
         }
@@ -714,7 +697,7 @@ struct SideTableEncodingIdVisitor<'a, 'b:'a, 'c:'a, 'tcx:'c> {
     rbml_w: &'a mut Encoder<'b>,
 }
 
-impl<'a, 'b, 'c, 'tcx> ast_util::IdVisitingOperation for
+impl<'a, 'b, 'c, 'tcx> IdVisitingOperation for
         SideTableEncodingIdVisitor<'a, 'b, 'c, 'tcx> {
     fn visit_id(&mut self, id: ast::NodeId) {
         encode_side_tables_for_id(self.ecx, self.rbml_w, id)
@@ -879,18 +862,18 @@ trait rbml_decoder_decoder_helpers<'tcx> {
     // Versions of the type reading functions that don't need the full
     // DecodeContext.
     fn read_ty_nodcx(&mut self,
-                     tcx: &ty::ctxt<'tcx>, cdata: &cstore::crate_metadata) -> Ty<'tcx>;
+                     tcx: &TyCtxt<'tcx>, cdata: &cstore::crate_metadata) -> Ty<'tcx>;
     fn read_tys_nodcx(&mut self,
-                      tcx: &ty::ctxt<'tcx>,
+                      tcx: &TyCtxt<'tcx>,
                       cdata: &cstore::crate_metadata) -> Vec<Ty<'tcx>>;
-    fn read_substs_nodcx(&mut self, tcx: &ty::ctxt<'tcx>,
+    fn read_substs_nodcx(&mut self, tcx: &TyCtxt<'tcx>,
                          cdata: &cstore::crate_metadata)
                          -> subst::Substs<'tcx>;
 }
 
 impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
     fn read_ty_nodcx(&mut self,
-                     tcx: &ty::ctxt<'tcx>,
+                     tcx: &TyCtxt<'tcx>,
                      cdata: &cstore::crate_metadata)
                      -> Ty<'tcx> {
         self.read_opaque(|_, doc| {
@@ -902,7 +885,7 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
     }
 
     fn read_tys_nodcx(&mut self,
-                      tcx: &ty::ctxt<'tcx>,
+                      tcx: &TyCtxt<'tcx>,
                       cdata: &cstore::crate_metadata) -> Vec<Ty<'tcx>> {
         self.read_to_vec(|this| Ok(this.read_ty_nodcx(tcx, cdata)) )
             .unwrap()
@@ -911,7 +894,7 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
     }
 
     fn read_substs_nodcx(&mut self,
-                         tcx: &ty::ctxt<'tcx>,
+                         tcx: &TyCtxt<'tcx>,
                          cdata: &cstore::crate_metadata)
                          -> subst::Substs<'tcx>
     {
@@ -1000,7 +983,7 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
                         region: this.read_enum_variant_arg(1,
                                     |this| Ok(this.read_region(dcx))).unwrap()
                     }),
-                    _ => panic!("bad enum variant for ty::UpvarCapture")
+                    _ => bug!("bad enum variant for ty::UpvarCapture")
                 })
             })
         }).unwrap()
@@ -1022,7 +1005,7 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
 
                         adjustment::AdjustDerefRef(auto_deref_ref)
                     }
-                    _ => panic!("bad enum variant for adjustment::AutoAdjustment")
+                    _ => bug!("bad enum variant for adjustment::AutoAdjustment")
                 })
             })
         }).unwrap()
@@ -1081,7 +1064,7 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
 
                         adjustment::AutoUnsafe(m)
                     }
-                    _ => panic!("bad enum variant for adjustment::AutoRef")
+                    _ => bug!("bad enum variant for adjustment::AutoRef")
                 })
             })
         }).unwrap()
@@ -1149,9 +1132,7 @@ fn decode_side_tables(dcx: &DecodeContext,
         let decoded_tag: Option<c::astencode_tag> = c::astencode_tag::from_u32(tag);
         match decoded_tag {
             None => {
-                dcx.tcx.sess.bug(
-                    &format!("unknown tag found in side tables: {:x}",
-                            tag));
+                bug!("unknown tag found in side tables: {:x}", tag);
             }
             Some(value) => {
                 let val_dsr = &mut entry_dsr;
@@ -1161,8 +1142,6 @@ fn decode_side_tables(dcx: &DecodeContext,
                         let def = decode_def(dcx, val_dsr);
                         dcx.tcx.def_map.borrow_mut().insert(id, def::PathResolution {
                             base_def: def,
-                            // This doesn't matter cross-crate.
-                            last_private: LastMod(AllPublic),
                             depth: 0
                         });
                     }
@@ -1217,9 +1196,7 @@ fn decode_side_tables(dcx: &DecodeContext,
                         dcx.tcx.const_qualif_map.borrow_mut().insert(id, qualif);
                     }
                     _ => {
-                        dcx.tcx.sess.bug(
-                            &format!("unknown tag found in side tables: {:x}",
-                                    tag));
+                        bug!("unknown tag found in side tables: {:x}", tag);
                     }
                 }
             }
@@ -1276,8 +1253,8 @@ fn copy_item_types(dcx: &DecodeContext, ii: &InlinedItem, orig_did: DefId) {
     }
 }
 
-fn inlined_item_id_range(v: &InlinedItem) -> ast_util::IdRange {
-    let mut visitor = ast_util::IdRangeComputingVisitor::new();
+fn inlined_item_id_range(v: &InlinedItem) -> IdRange {
+    let mut visitor = IdRangeComputingVisitor::new();
     v.visit_ids(&mut visitor);
     visitor.result()
 }
@@ -1293,8 +1270,8 @@ fn encode_item_ast(rbml_w: &mut Encoder, item: &hir::Item) {
 }
 
 #[cfg(test)]
-fn decode_item_ast(par_doc: rbml::Doc) -> hir::Item {
-    let chi_doc = par_doc.get(c::tag_tree as usize);
+fn decode_item_ast(item_doc: rbml::Doc) -> hir::Item {
+    let chi_doc = item_doc.get(c::tag_tree as usize);
     let mut d = reader::Decoder::new(chi_doc);
     Decodable::decode(&mut d).unwrap()
 }
@@ -1415,6 +1392,6 @@ fn test_simplification() {
         assert!(pprust::item_to_string(&item_out) ==
                 pprust::item_to_string(&item_exp));
       }
-      _ => panic!()
+      _ => bug!()
     }
 }
index 991cbe137ecf91cf41aaa670d1aee666a0642757..ea4e25754202c7ff23466d0a135306355bd70830 100644 (file)
@@ -73,6 +73,7 @@ pub const tag_crate_dep: usize = 0x35;
 
 pub const tag_crate_hash: usize = 0x103; // top-level only
 pub const tag_crate_crate_name: usize = 0x104; // top-level only
+pub const tag_crate_disambiguator: usize = 0x113; // top-level only
 
 pub const tag_crate_dep_crate_name: usize = 0x36;
 pub const tag_crate_dep_hash: usize = 0x37;
@@ -85,13 +86,10 @@ pub const tag_item_trait_ref: usize = 0x3b;
 // discriminator value for variants
 pub const tag_disr_val: usize = 0x3c;
 
-// used to encode ast_map::PathElem
-pub const tag_path: usize = 0x3d;
-pub const tag_path_len: usize = 0x3e;
-pub const tag_path_elem_mod: usize = 0x3f;
-pub const tag_path_elem_name: usize = 0x40;
-pub const tag_item_field: usize = 0x41;
+// GAP 0x3d, 0x3e, 0x3f, 0x40
 
+pub const tag_item_field: usize = 0x41;
+// GAP 0x42
 pub const tag_item_variances: usize = 0x43;
 /*
   trait items contain tag_item_trait_item elements,
@@ -241,6 +239,10 @@ pub const tag_items_data_item_constness: usize = 0xa6;
 
 pub const tag_items_data_item_deprecation: usize = 0xa7;
 
+pub const tag_items_data_item_defaultness: usize = 0xa8;
+
+pub const tag_items_data_parent_impl: usize = 0xa9;
+
 pub const tag_rustc_version: usize = 0x10f;
 pub fn rustc_version() -> String {
     format!(
index 35d7a0d4b9c27640be03ffd9839eecd49a2af58f..df9072835b9e8ee92a6409baa755b8643fdbe140 100644 (file)
@@ -17,13 +17,13 @@ use cstore::{self, CStore, CrateSource, MetadataBlob};
 use decoder;
 use loader::{self, CratePaths};
 
-use rustc::back::svh::Svh;
+use rustc::hir::svh::Svh;
 use rustc::dep_graph::DepNode;
 use rustc::session::{config, Session};
 use rustc::session::search_paths::PathKind;
-use rustc::middle::cstore::{CrateStore, validate_crate_name};
+use rustc::middle::cstore::{CrateStore, validate_crate_name, ExternCrate};
 use rustc::util::nodemap::FnvHashMap;
-use rustc::front::map as hir_map;
+use rustc::hir::map as hir_map;
 
 use std::cell::{RefCell, Cell};
 use std::path::PathBuf;
@@ -36,11 +36,9 @@ use syntax::codemap::{self, Span, mk_sp, Pos};
 use syntax::parse;
 use syntax::attr;
 use syntax::attr::AttrMetaMethods;
-use syntax::errors::FatalError;
 use syntax::parse::token::InternedString;
-use syntax::util::small_vector::SmallVector;
-use rustc_front::intravisit::Visitor;
-use rustc_front::hir;
+use rustc::hir::intravisit::Visitor;
+use rustc::hir;
 use log;
 
 pub struct LocalCrateReader<'a, 'b:'a> {
@@ -55,6 +53,7 @@ pub struct CrateReader<'a> {
     cstore: &'a CStore,
     next_crate_num: ast::CrateNum,
     foreign_item_map: FnvHashMap<String, Vec<ast::NodeId>>,
+    local_crate_name: String,
 }
 
 impl<'a, 'b, 'hir> Visitor<'hir> for LocalCrateReader<'a, 'b> {
@@ -146,12 +145,15 @@ impl PMDSource {
 }
 
 impl<'a> CrateReader<'a> {
-    pub fn new(sess: &'a Session, cstore: &'a CStore) -> CrateReader<'a> {
+    pub fn new(sess: &'a Session,
+               cstore: &'a CStore,
+               local_crate_name: &str) -> CrateReader<'a> {
         CrateReader {
             sess: sess,
             cstore: cstore,
             next_crate_num: cstore.next_crate_num(),
             foreign_item_map: FnvHashMap(),
+            local_crate_name: local_crate_name.to_owned(),
         }
     }
 
@@ -259,17 +261,51 @@ impl<'a> CrateReader<'a> {
                             metadata: &MetadataBlob) {
         let crate_rustc_version = decoder::crate_rustc_version(metadata.as_slice());
         if crate_rustc_version != Some(rustc_version()) {
-            span_fatal!(self.sess, span, E0514,
-                        "the crate `{}` has been compiled with {}, which is \
-                         incompatible with this version of rustc",
-                        name,
-                        crate_rustc_version
-                            .as_ref().map(|s| &**s)
-                            .unwrap_or("an old version of rustc")
-            );
+            let mut err = struct_span_fatal!(self.sess, span, E0514,
+                                             "the crate `{}` has been compiled with {}, which is \
+                                              incompatible with this version of rustc",
+                                              name,
+                                              crate_rustc_version
+                                              .as_ref().map(|s| &**s)
+                                              .unwrap_or("an old version of rustc"));
+            err.fileline_help(span, "consider removing the compiled binaries and recompiling \
+                                     with your current version of rustc");
+            err.emit();
         }
     }
 
+    fn verify_no_symbol_conflicts(&self,
+                                  span: Span,
+                                  metadata: &MetadataBlob) {
+        let disambiguator = decoder::get_crate_disambiguator(metadata.as_slice());
+        let crate_name = decoder::get_crate_name(metadata.as_slice());
+
+        // Check for (potential) conflicts with the local crate
+        if self.local_crate_name == crate_name &&
+           self.sess.crate_disambiguator.get().as_str() == disambiguator {
+            span_fatal!(self.sess, span, E0519,
+                        "the current crate is indistinguishable from one of its \
+                         dependencies: it has the same crate-name `{}` and was \
+                         compiled with the same `-C metadata` arguments. This \
+                         will result in symbol conflicts between the two.",
+                        crate_name)
+        }
+
+        let svh = decoder::get_crate_hash(metadata.as_slice());
+        // Check for conflicts with any crate loaded so far
+        self.cstore.iter_crate_data(|_, other| {
+            if other.name() == crate_name && // same crate-name
+               other.disambiguator() == disambiguator &&  // same crate-disambiguator
+               other.hash() != svh { // but different SVH
+                span_fatal!(self.sess, span, E0523,
+                        "found two different crates with name `{}` that are \
+                         not distinguished by differing `-C metadata`. This \
+                         will result in symbol conflicts between the two.",
+                        crate_name)
+            }
+        });
+    }
+
     fn register_crate(&mut self,
                       root: &Option<CratePaths>,
                       ident: &str,
@@ -280,6 +316,7 @@ impl<'a> CrateReader<'a> {
                       -> (ast::CrateNum, Rc<cstore::crate_metadata>,
                           cstore::CrateSource) {
         self.verify_rustc_version(name, span, &lib.metadata);
+        self.verify_no_symbol_conflicts(span, &lib.metadata);
 
         // Claim this crate number and cache it
         let cnum = self.next_crate_num;
@@ -305,15 +342,13 @@ impl<'a> CrateReader<'a> {
 
         let cmeta = Rc::new(cstore::crate_metadata {
             name: name.to_string(),
-            local_path: RefCell::new(SmallVector::zero()),
-            local_def_path: RefCell::new(vec![]),
+            extern_crate: Cell::new(None),
             index: decoder::load_index(metadata.as_slice()),
             xref_index: decoder::load_xrefs(metadata.as_slice()),
             data: metadata,
             cnum_map: RefCell::new(cnum_map),
             cnum: cnum,
             codemap_import_info: RefCell::new(vec![]),
-            span: span,
             staged_api: staged_api,
             explicitly_linked: Cell::new(explicitly_linked),
         });
@@ -347,8 +382,7 @@ impl<'a> CrateReader<'a> {
                      span: Span,
                      kind: PathKind,
                      explicitly_linked: bool)
-                         -> (ast::CrateNum, Rc<cstore::crate_metadata>,
-                             cstore::CrateSource) {
+                     -> (ast::CrateNum, Rc<cstore::crate_metadata>, cstore::CrateSource) {
         enum LookupResult {
             Previous(ast::CrateNum),
             Loaded(loader::Library),
@@ -405,23 +439,54 @@ impl<'a> CrateReader<'a> {
         }
     }
 
+    fn update_extern_crate(&mut self,
+                           cnum: ast::CrateNum,
+                           mut extern_crate: ExternCrate)
+    {
+        let cmeta = self.cstore.get_crate_data(cnum);
+        let old_extern_crate = cmeta.extern_crate.get();
+
+        // Prefer:
+        // - something over nothing (tuple.0);
+        // - direct extern crate to indirect (tuple.1);
+        // - shorter paths to longer (tuple.2).
+        let new_rank = (true, extern_crate.direct, !extern_crate.path_len);
+        let old_rank = match old_extern_crate {
+            None => (false, false, !0),
+            Some(ref c) => (true, c.direct, !c.path_len),
+        };
+
+        if old_rank >= new_rank {
+            return; // no change needed
+        }
+
+        cmeta.extern_crate.set(Some(extern_crate));
+
+        // Propagate the extern crate info to dependencies.
+        extern_crate.direct = false;
+        for &dep_cnum in cmeta.cnum_map.borrow().values() {
+            self.update_extern_crate(dep_cnum, extern_crate);
+        }
+    }
+
     // Go through the crate metadata and load any crates that it references
     fn resolve_crate_deps(&mut self,
                           root: &Option<CratePaths>,
-                          cdata: &[u8], span : Span)
-                       -> cstore::cnum_map {
+                          cdata: &[u8],
+                          span : Span)
+                          -> cstore::cnum_map {
         debug!("resolving deps of external crate");
         // The map from crate numbers in the crate we're resolving to local crate
         // numbers
         decoder::get_crate_deps(cdata).iter().map(|dep| {
             debug!("resolving dep crate {} hash: `{}`", dep.name, dep.hash);
             let (local_cnum, _, _) = self.resolve_crate(root,
-                                                   &dep.name,
-                                                   &dep.name,
-                                                   Some(&dep.hash),
-                                                   span,
-                                                   PathKind::Dependency,
-                                                   dep.explicitly_linked);
+                                                        &dep.name,
+                                                        &dep.name,
+                                                        Some(&dep.hash),
+                                                        span,
+                                                        PathKind::Dependency,
+                                                        dep.explicitly_linked);
             (dep.cnum, local_cnum)
         }).collect()
     }
@@ -461,7 +526,7 @@ impl<'a> CrateReader<'a> {
                 load_ctxt.filesearch = self.sess.target_filesearch(PathKind::Crate);
                 load_ctxt.load_library_crate()
             }
-            None => { load_ctxt.report_load_errs(); unreachable!() },
+            None => { load_ctxt.report_load_errs(); },
         };
 
         let dylib = library.dylib.clone();
@@ -507,7 +572,8 @@ impl<'a> CrateReader<'a> {
                     Ok(body) => body,
                     Err(mut err) => {
                         err.emit();
-                        panic!(FatalError);
+                        self.sess.abort_if_errors();
+                        unreachable!();
                     }
                 };
                 let local_span = mk_sp(lo, p.last_span.hi);
@@ -711,12 +777,15 @@ impl<'a> CrateReader<'a> {
 }
 
 impl<'a, 'b> LocalCrateReader<'a, 'b> {
-    pub fn new(sess: &'a Session, cstore: &'a CStore,
-               map: &'a hir_map::Map<'b>) -> LocalCrateReader<'a, 'b> {
+    pub fn new(sess: &'a Session,
+               cstore: &'a CStore,
+               map: &'a hir_map::Map<'b>,
+               local_crate_name: &str)
+               -> LocalCrateReader<'a, 'b> {
         LocalCrateReader {
             sess: sess,
             cstore: cstore,
-            creader: CrateReader::new(sess, cstore),
+            creader: CrateReader::new(sess, cstore, local_crate_name),
             ast_map: map,
         }
     }
@@ -760,19 +829,24 @@ impl<'a, 'b> LocalCrateReader<'a, 'b> {
 
                 match self.creader.extract_crate_info_hir(i) {
                     Some(info) => {
-                        let (cnum, cmeta, _) = self.creader.resolve_crate(&None,
-                                                              &info.ident,
-                                                              &info.name,
-                                                              None,
-                                                              i.span,
-                                                              PathKind::Crate,
-                                                              true);
+                        let (cnum, _, _) = self.creader.resolve_crate(&None,
+                                                                          &info.ident,
+                                                                          &info.name,
+                                                                          None,
+                                                                          i.span,
+                                                                          PathKind::Crate,
+                                                                          true);
                         let def_id = self.ast_map.local_def_id(i.id);
-                        let def_path = self.ast_map.def_path(def_id);
-                        cmeta.update_local_def_path(def_path);
-                        self.ast_map.with_path(i.id, |path| {
-                            cmeta.update_local_path(path)
-                        });
+
+                        let len = self.ast_map.def_path(def_id).data.len();
+
+                        self.creader.update_extern_crate(cnum,
+                                                         ExternCrate {
+                                                             def_id: def_id,
+                                                             span: i.span,
+                                                             direct: true,
+                                                             path_len: len,
+                                                         });
                         self.cstore.add_extern_mod_stmt_cnum(info.id, cnum);
                     }
                     None => ()
index 9b534df075bef5dc5a1a751fb9680c23ff48642f..8e7be0e3a0f5e0d3fd5682de643f90a854c801af 100644 (file)
@@ -8,23 +8,22 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use astencode;
 use cstore;
 use decoder;
 use encoder;
 use loader;
 
-use middle::cstore::{CrateStore, CrateSource, ChildItem, FoundAst};
+use middle::cstore::{CrateStore, CrateSource, ChildItem, ExternCrate, FoundAst, DefLike};
 use middle::cstore::{NativeLibraryKind, LinkMeta, LinkagePreference};
-use middle::def;
+use rustc::hir::def;
 use middle::lang_items;
-use middle::ty::{self, Ty, VariantKind};
-use middle::def_id::{DefId, DefIndex};
+use rustc::ty::{self, Ty, TyCtxt, VariantKind};
+use rustc::hir::def_id::{DefId, DefIndex, CRATE_DEF_INDEX};
 
-use rustc::front::map as hir_map;
+use rustc::hir::map as hir_map;
 use rustc::mir::repr::Mir;
 use rustc::mir::mir_map::MirMap;
-use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet};
+use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap};
 
 use std::cell::RefCell;
 use std::rc::Rc;
@@ -32,9 +31,9 @@ use std::path::PathBuf;
 use syntax::ast;
 use syntax::attr;
 use syntax::parse::token;
-use rustc_back::svh::Svh;
+use rustc::hir::svh::Svh;
 use rustc_back::target::Target;
-use rustc_front::hir;
+use rustc::hir;
 
 impl<'tcx> CrateStore<'tcx> for cstore::CStore {
     fn stability(&self, def: DefId) -> Option<attr::Stability>
@@ -49,14 +48,19 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         decoder::get_deprecation(&cdata, def.index)
     }
 
-    fn closure_kind(&self, _tcx: &ty::ctxt<'tcx>, def_id: DefId) -> ty::ClosureKind
+    fn visibility(&self, def: DefId) -> ty::Visibility {
+        let cdata = self.get_crate_data(def.krate);
+        decoder::get_visibility(&cdata, def.index)
+    }
+
+    fn closure_kind(&self, _tcx: &TyCtxt<'tcx>, def_id: DefId) -> ty::ClosureKind
     {
         assert!(!def_id.is_local());
         let cdata = self.get_crate_data(def_id.krate);
         decoder::closure_kind(&cdata, def_id.index)
     }
 
-    fn closure_ty(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) -> ty::ClosureTy<'tcx>
+    fn closure_ty(&self, tcx: &TyCtxt<'tcx>, def_id: DefId) -> ty::ClosureTy<'tcx>
     {
         assert!(!def_id.is_local());
         let cdata = self.get_crate_data(def_id.krate);
@@ -73,21 +77,21 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         decoder::get_repr_attrs(&cdata, def.index)
     }
 
-    fn item_type(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn item_type(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                  -> ty::TypeScheme<'tcx>
     {
         let cdata = self.get_crate_data(def.krate);
         decoder::get_type(&cdata, def.index, tcx)
     }
 
-    fn item_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn item_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                        -> ty::GenericPredicates<'tcx>
     {
         let cdata = self.get_crate_data(def.krate);
         decoder::get_predicates(&cdata, def.index, tcx)
     }
 
-    fn item_super_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn item_super_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                              -> ty::GenericPredicates<'tcx>
     {
         let cdata = self.get_crate_data(def.krate);
@@ -106,13 +110,13 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         decoder::get_symbol(&cdata, def.index)
     }
 
-    fn trait_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId) -> ty::TraitDef<'tcx>
+    fn trait_def(&self, tcx: &TyCtxt<'tcx>, def: DefId) -> ty::TraitDef<'tcx>
     {
         let cdata = self.get_crate_data(def.krate);
         decoder::get_trait_def(&cdata, def.index, tcx)
     }
 
-    fn adt_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>
+    fn adt_def(&self, tcx: &TyCtxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>
     {
         let cdata = self.get_crate_data(def.krate);
         decoder::get_adt_def(&self.intr, &cdata, def.index, tcx)
@@ -124,29 +128,6 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         decoder::get_method_arg_names(&cdata, did.index)
     }
 
-    fn item_path(&self, def: DefId) -> Vec<hir_map::PathElem> {
-        let cdata = self.get_crate_data(def.krate);
-        let path = decoder::get_item_path(&cdata, def.index);
-
-        cdata.with_local_path(|cpath| {
-            let mut r = Vec::with_capacity(cpath.len() + path.len());
-            r.extend_from_slice(cpath);
-            r.extend_from_slice(&path);
-            r
-        })
-    }
-
-    fn extern_item_path(&self, def: DefId) -> Vec<hir_map::PathElem> {
-        let cdata = self.get_crate_data(def.krate);
-        let path = decoder::get_item_path(&cdata, def.index);
-
-        let mut r = Vec::with_capacity(path.len() + 1);
-        let crate_name = hir_map::PathMod(token::intern(&cdata.name));
-        r.push(crate_name);
-        r.extend_from_slice(&path);
-        r
-    }
-
     fn item_name(&self, def: DefId) -> ast::Name {
         let cdata = self.get_crate_data(def.krate);
         decoder::get_item_name(&self.intr, &cdata, def.index)
@@ -173,7 +154,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         result
     }
 
-    fn provided_trait_methods(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn provided_trait_methods(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                               -> Vec<Rc<ty::Method<'tcx>>>
     {
         let cdata = self.get_crate_data(def.krate);
@@ -199,7 +180,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         decoder::get_impl_polarity(&cdata, def.index)
     }
 
-    fn impl_trait_ref(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn impl_trait_ref(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                       -> Option<ty::TraitRef<'tcx>>
     {
         let cdata = self.get_crate_data(def.krate);
@@ -214,20 +195,25 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
     }
 
     // FIXME: killme
-    fn associated_consts(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn associated_consts(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                          -> Vec<Rc<ty::AssociatedConst<'tcx>>> {
         let cdata = self.get_crate_data(def.krate);
         decoder::get_associated_consts(self.intr.clone(), &cdata, def.index, tcx)
     }
 
-    fn trait_of_item(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) -> Option<DefId>
+    fn impl_parent(&self, impl_def: DefId) -> Option<DefId> {
+        let cdata = self.get_crate_data(impl_def.krate);
+        decoder::get_parent_impl(&*cdata, impl_def.index)
+    }
+
+    fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId) -> Option<DefId>
     {
         let cdata = self.get_crate_data(def_id.krate);
         decoder::get_trait_of_item(&cdata, def_id.index, tcx)
     }
 
-    fn impl_or_trait_item(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
-                          -> ty::ImplOrTraitItem<'tcx>
+    fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId)
+                          -> Option<ty::ImplOrTraitItem<'tcx>>
     {
         let cdata = self.get_crate_data(def.krate);
         decoder::get_impl_or_trait_item(
@@ -260,7 +246,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         decoder::is_default_impl(&cdata, impl_did.index)
     }
 
-    fn is_extern_item(&self, tcx: &ty::ctxt<'tcx>, did: DefId) -> bool {
+    fn is_extern_item(&self, tcx: &TyCtxt<'tcx>, did: DefId) -> bool {
         let cdata = self.get_crate_data(did.krate);
         decoder::is_extern_item(&cdata, did.index, tcx)
     }
@@ -325,9 +311,19 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         decoder::get_crate_attributes(self.get_crate_data(cnum).data())
     }
 
-    fn crate_name(&self, cnum: ast::CrateNum) -> String
+    fn crate_name(&self, cnum: ast::CrateNum) -> token::InternedString
     {
-        self.get_crate_data(cnum).name.clone()
+        token::intern_and_get_ident(&self.get_crate_data(cnum).name[..])
+    }
+
+    fn original_crate_name(&self, cnum: ast::CrateNum) -> token::InternedString
+    {
+        token::intern_and_get_ident(&self.get_crate_data(cnum).name())
+    }
+
+    fn extern_crate(&self, cnum: ast::CrateNum) -> Option<ExternCrate>
+    {
+        self.get_crate_data(cnum).extern_crate.get()
     }
 
     fn crate_hash(&self, cnum: ast::CrateNum) -> Svh
@@ -336,6 +332,12 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         decoder::get_crate_hash(cdata.data())
     }
 
+    fn crate_disambiguator(&self, cnum: ast::CrateNum) -> token::InternedString
+    {
+        let cdata = self.get_crate_data(cnum);
+        token::intern_and_get_ident(decoder::get_crate_disambiguator(cdata.data()))
+    }
+
     fn crate_struct_field_attrs(&self, cnum: ast::CrateNum)
                                 -> FnvHashMap<DefId, Vec<ast::Attribute>>
     {
@@ -363,12 +365,17 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         decoder::get_reachable_ids(&cdata)
     }
 
-    fn def_path(&self, def: DefId) -> hir_map::DefPath
-    {
+    /// Returns the `DefKey` for a given `DefId`. This indicates the
+    /// parent `DefId` as well as some idea of what kind of data the
+    /// `DefId` refers to.
+    fn def_key(&self, def: DefId) -> hir_map::DefKey {
+        let cdata = self.get_crate_data(def.krate);
+        decoder::def_key(&cdata, def.index)
+    }
+
+    fn relative_def_path(&self, def: DefId) -> hir_map::DefPath {
         let cdata = self.get_crate_data(def.krate);
-        let path = decoder::def_path(&cdata, def.index);
-        let local_path = cdata.local_def_path();
-        local_path.into_iter().chain(path).collect()
+        decoder::def_path(&cdata, def.index)
     }
 
     fn variant_kind(&self, def_id: DefId) -> Option<VariantKind> {
@@ -425,15 +432,14 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
         result
     }
 
-    fn maybe_get_item_ast(&'tcx self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn maybe_get_item_ast(&'tcx self, tcx: &TyCtxt<'tcx>, def: DefId)
                           -> FoundAst<'tcx>
     {
         let cdata = self.get_crate_data(def.krate);
-        let decode_inlined_item = Box::new(astencode::decode_inlined_item);
-        decoder::maybe_get_item_ast(&cdata, tcx, def.index, decode_inlined_item)
+        decoder::maybe_get_item_ast(&cdata, tcx, def.index)
     }
 
-    fn maybe_get_item_mir(&self, tcx: &ty::ctxt<'tcx>, def: DefId)
+    fn maybe_get_item_mir(&self, tcx: &TyCtxt<'tcx>, def: DefId)
                           -> Option<Mir<'tcx>> {
         let cdata = self.get_crate_data(def.krate);
         decoder::maybe_get_item_mir(&cdata, tcx, def.index)
@@ -470,9 +476,13 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
     {
         loader::meta_section_name(target)
     }
-    fn encode_type(&self, tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> Vec<u8>
+    fn encode_type(&self,
+                   tcx: &TyCtxt<'tcx>,
+                   ty: Ty<'tcx>,
+                   def_id_to_string: fn(&TyCtxt<'tcx>, DefId) -> String)
+                   -> Vec<u8>
     {
-        encoder::encoded_ty(tcx, ty)
+        encoder::encoded_ty(tcx, ty, def_id_to_string)
     }
 
     fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option<PathBuf>)>
@@ -491,7 +501,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
     }
 
     fn encode_metadata(&self,
-                       tcx: &ty::ctxt<'tcx>,
+                       tcx: &TyCtxt<'tcx>,
                        reexports: &def::ExportMap,
                        item_symbols: &RefCell<NodeMap<String>>,
                        link_meta: &LinkMeta,
@@ -499,21 +509,18 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
                        mir_map: &MirMap<'tcx>,
                        krate: &hir::Crate) -> Vec<u8>
     {
-        let encode_inlined_item: encoder::EncodeInlinedItem =
-            Box::new(|ecx, rbml_w, ii| astencode::encode_inlined_item(ecx, rbml_w, ii));
-
-        let encode_params = encoder::EncodeParams {
+        let ecx = encoder::EncodeContext {
             diag: tcx.sess.diagnostic(),
             tcx: tcx,
             reexports: reexports,
             item_symbols: item_symbols,
             link_meta: link_meta,
             cstore: self,
-            encode_inlined_item: encode_inlined_item,
             reachable: reachable,
             mir_map: mir_map,
+            type_abbrevs: RefCell::new(FnvHashMap()),
         };
-        encoder::encode_metadata(encode_params, krate)
+        encoder::encode_metadata(ecx, krate)
 
     }
 
@@ -521,4 +528,59 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
     {
         encoder::metadata_encoding_version
     }
+
+    /// Returns a map from a sufficiently visible external item (i.e. an external item that is
+    /// visible from at least one local module) to a sufficiently visible parent (considering
+    /// modules that re-export the external item to be parents).
+    fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap<DefId>> {
+        let mut visible_parent_map = self.visible_parent_map.borrow_mut();
+        if !visible_parent_map.is_empty() { return visible_parent_map; }
+
+        use rustc::middle::cstore::{CrateStore, ChildItem};
+        use std::collections::vec_deque::VecDeque;
+        use std::collections::hash_map::Entry;
+        for cnum in 1 .. self.next_crate_num() {
+            let cdata = self.get_crate_data(cnum);
+
+            match cdata.extern_crate.get() {
+                // Ignore crates without a corresponding local `extern crate` item.
+                Some(extern_crate) if !extern_crate.direct => continue,
+                _ => {},
+            }
+
+            let mut bfs_queue = &mut VecDeque::new();
+            let mut add_child = |bfs_queue: &mut VecDeque<_>, child: ChildItem, parent: DefId| {
+                let child = match child.def {
+                    DefLike::DlDef(def) if child.vis == ty::Visibility::Public => def.def_id(),
+                    _ => return,
+                };
+
+                match visible_parent_map.entry(child) {
+                    Entry::Occupied(mut entry) => {
+                        // If `child` is defined in crate `cnum`, ensure
+                        // that it is mapped to a parent in `cnum`.
+                        if child.krate == cnum && entry.get().krate != cnum {
+                            entry.insert(parent);
+                        }
+                    }
+                    Entry::Vacant(entry) => {
+                        entry.insert(parent);
+                        bfs_queue.push_back(child);
+                    }
+                }
+            };
+
+            let croot = DefId { krate: cnum, index: CRATE_DEF_INDEX };
+            for child in self.crate_top_level_items(cnum) {
+                add_child(bfs_queue, child, croot);
+            }
+            while let Some(def) = bfs_queue.pop_front() {
+                for child in self.item_children(def) {
+                    add_child(bfs_queue, child, def);
+                }
+            }
+        }
+
+        visible_parent_map
+    }
 }
index a96da6bf4d66aaf85025f0dd98ff15d6f9775c6c..d5a9adafe7dcad71406089649d1fa7ce61da5bb6 100644 (file)
@@ -20,9 +20,10 @@ use decoder;
 use index;
 use loader;
 
-use rustc::back::svh::Svh;
-use rustc::front::map as ast_map;
-use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet};
+use rustc::hir::def_id::DefId;
+use rustc::hir::svh::Svh;
+use rustc::middle::cstore::{ExternCrate};
+use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap};
 
 use std::cell::{RefCell, Ref, Cell};
 use std::rc::Rc;
@@ -31,9 +32,7 @@ use flate::Bytes;
 use syntax::ast;
 use syntax::attr;
 use syntax::codemap;
-use syntax::parse::token;
 use syntax::parse::token::IdentInterner;
-use syntax::util::small_vector::SmallVector;
 
 pub use middle::cstore::{NativeLibraryKind, LinkagePreference};
 pub use middle::cstore::{NativeStatic, NativeFramework, NativeUnknown};
@@ -63,13 +62,16 @@ pub struct ImportedFileMap {
 
 pub struct crate_metadata {
     pub name: String,
-    pub local_path: RefCell<SmallVector<ast_map::PathElem>>,
-    pub local_def_path: RefCell<ast_map::DefPath>,
+
+    /// Information about the extern crate that caused this crate to
+    /// be loaded. If this is `None`, then the crate was injected
+    /// (e.g., by the allocator)
+    pub extern_crate: Cell<Option<ExternCrate>>,
+
     pub data: MetadataBlob,
     pub cnum_map: RefCell<cnum_map>,
     pub cnum: ast::CrateNum,
     pub codemap_import_info: RefCell<Vec<ImportedFileMap>>,
-    pub span: codemap::Span,
     pub staged_api: bool,
 
     pub index: index::Index,
@@ -91,6 +93,7 @@ pub struct CStore {
     used_link_args: RefCell<Vec<String>>,
     statically_included_foreign_items: RefCell<NodeSet>,
     pub intr: Rc<IdentInterner>,
+    pub visible_parent_map: RefCell<DefIdMap<DefId>>,
 }
 
 impl CStore {
@@ -103,6 +106,7 @@ impl CStore {
             used_link_args: RefCell::new(Vec::new()),
             intr: intr,
             statically_included_foreign_items: RefCell::new(NodeSet()),
+            visible_parent_map: RefCell::new(FnvHashMap()),
         }
     }
 
@@ -248,8 +252,11 @@ impl CStore {
 
 impl crate_metadata {
     pub fn data<'a>(&'a self) -> &'a [u8] { self.data.as_slice() }
-    pub fn name(&self) -> String { decoder::get_crate_name(self.data()) }
+    pub fn name(&self) -> &str { decoder::get_crate_name(self.data()) }
     pub fn hash(&self) -> Svh { decoder::get_crate_hash(self.data()) }
+    pub fn disambiguator(&self) -> &str {
+        decoder::get_crate_disambiguator(self.data())
+    }
     pub fn imported_filemaps<'a>(&'a self, codemap: &codemap::CodeMap)
                                  -> Ref<'a, Vec<ImportedFileMap>> {
         let filemaps = self.codemap_import_info.borrow();
@@ -265,50 +272,6 @@ impl crate_metadata {
         }
     }
 
-    pub fn with_local_path<T, F>(&self, f: F) -> T
-        where F: Fn(&[ast_map::PathElem]) -> T
-    {
-        let cpath = self.local_path.borrow();
-        if cpath.is_empty() {
-            let name = ast_map::PathMod(token::intern(&self.name));
-            f(&[name])
-        } else {
-            f(cpath.as_slice())
-        }
-    }
-
-    pub fn update_local_path<'a, 'b>(&self, candidate: ast_map::PathElems<'a, 'b>) {
-        let mut cpath = self.local_path.borrow_mut();
-        let cap = cpath.len();
-        match cap {
-            0 => *cpath = candidate.collect(),
-            1 => (),
-            _ => {
-                let candidate: SmallVector<_> = candidate.collect();
-                if candidate.len() < cap {
-                    *cpath = candidate;
-                }
-            },
-        }
-    }
-
-    pub fn local_def_path(&self) -> ast_map::DefPath {
-        let local_def_path = self.local_def_path.borrow();
-        if local_def_path.is_empty() {
-            let name = ast_map::DefPathData::DetachedCrate(token::intern(&self.name));
-            vec![ast_map::DisambiguatedDefPathData { data: name, disambiguator: 0 }]
-        } else {
-            local_def_path.clone()
-        }
-    }
-
-    pub fn update_local_def_path(&self, candidate: ast_map::DefPath) {
-        let mut local_def_path = self.local_def_path.borrow_mut();
-        if local_def_path.is_empty() || candidate.len() < local_def_path.len() {
-            *local_def_path = candidate;
-        }
-    }
-
     pub fn is_allocator(&self) -> bool {
         let attrs = decoder::get_crate_attributes(self.data());
         attr::contains_name(&attrs, "allocator")
index 8295ffebda3e6508b62bcd20a509064abb2a0871..6a634d061fa92f86778d3d022dca31e1ebda7cd4 100644 (file)
@@ -14,6 +14,7 @@
 
 use self::Family::*;
 
+use astencode::decode_inlined_item;
 use cstore::{self, crate_metadata};
 use common::*;
 use encoder::def_to_u64;
@@ -21,19 +22,21 @@ use index;
 use tls_context;
 use tydecode::TyDecoder;
 
-use rustc::back::svh::Svh;
-use rustc::front::map as hir_map;
+use rustc::hir::svh::Svh;
+use rustc::hir::map as hir_map;
 use rustc::util::nodemap::FnvHashMap;
-use rustc_front::hir;
+use rustc::hir;
 
 use middle::cstore::{LOCAL_CRATE, FoundAst, InlinedItem, LinkagePreference};
 use middle::cstore::{DefLike, DlDef, DlField, DlImpl, tls};
-use middle::def::Def;
-use middle::def_id::{DefId, DefIndex};
+use rustc::hir::def::Def;
+use rustc::hir::def_id::{DefId, DefIndex};
 use middle::lang_items;
-use middle::subst;
-use middle::ty::{ImplContainer, TraitContainer};
-use middle::ty::{self, Ty, TypeFoldable, VariantKind};
+use rustc::ty::subst;
+use rustc::ty::{ImplContainer, TraitContainer};
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, VariantKind};
+
+use rustc_const_math::ConstInt;
 
 use rustc::mir;
 use rustc::mir::visit::MutVisitor;
@@ -48,8 +51,7 @@ use rbml::reader;
 use rbml;
 use serialize::Decodable;
 use syntax::attr;
-use syntax::parse::token::{IdentInterner, special_idents};
-use syntax::parse::token;
+use syntax::parse::token::{self, IdentInterner};
 use syntax::ast;
 use syntax::abi::Abi;
 use syntax::codemap::{self, Span, BytePos, NO_EXPANSION};
@@ -68,7 +70,7 @@ impl crate_metadata {
 
     fn lookup_item(&self, item_id: DefIndex) -> rbml::Doc {
         match self.get_item(item_id) {
-            None => panic!("lookup_item: id not found: {:?}", item_id),
+            None => bug!("lookup_item: id not found: {:?}", item_id),
             Some(d) => d
         }
     }
@@ -134,18 +136,18 @@ fn item_family(item: rbml::Doc) -> Family {
       'u' => Struct(VariantKind::Unit),
       'g' => PublicField,
       'N' => InheritedField,
-       c => panic!("unexpected family char: {}", c)
+       c => bug!("unexpected family char: {}", c)
     }
 }
 
-fn item_visibility(item: rbml::Doc) -> hir::Visibility {
+fn item_visibility(item: rbml::Doc) -> ty::Visibility {
     match reader::maybe_get_doc(item, tag_items_data_item_visibility) {
-        None => hir::Public,
+        None => ty::Visibility::Public,
         Some(visibility_doc) => {
             match reader::doc_as_u8(visibility_doc) as char {
-                'y' => hir::Public,
-                'i' => hir::Inherited,
-                _ => panic!("unknown visibility character")
+                'y' => ty::Visibility::Public,
+                'i' => ty::Visibility::PrivateExternal,
+                _ => bug!("unknown visibility character")
             }
         }
     }
@@ -158,7 +160,20 @@ fn fn_constness(item: rbml::Doc) -> hir::Constness {
             match reader::doc_as_u8(constness_doc) as char {
                 'c' => hir::Constness::Const,
                 'n' => hir::Constness::NotConst,
-                _ => panic!("unknown constness character")
+                _ => bug!("unknown constness character")
+            }
+        }
+    }
+}
+
+fn item_defaultness(item: rbml::Doc) -> hir::Defaultness {
+    match reader::maybe_get_doc(item, tag_items_data_item_defaultness) {
+        None => hir::Defaultness::Default, // should occur only for default impls on traits
+        Some(defaultness_doc) => {
+            match reader::doc_as_u8(defaultness_doc) as char {
+                'd' => hir::Defaultness::Default,
+                'f' => hir::Defaultness::Final,
+                _ => bug!("unknown defaultness character")
             }
         }
     }
@@ -199,7 +214,7 @@ fn reexports<'a>(d: rbml::Doc<'a>) -> reader::TaggedDocsIterator<'a> {
     reader::tagged_docs(d, tag_items_data_item_reexport)
 }
 
-fn variant_disr_val(d: rbml::Doc) -> Option<ty::Disr> {
+fn variant_disr_val(d: rbml::Doc) -> Option<u64> {
     reader::maybe_get_doc(d, tag_disr_val).and_then(|val_doc| {
         reader::with_doc_data(val_doc, |data| {
             str::from_utf8(data).ok().and_then(|s| s.parse().ok())
@@ -207,14 +222,14 @@ fn variant_disr_val(d: rbml::Doc) -> Option<ty::Disr> {
     })
 }
 
-fn doc_type<'tcx>(doc: rbml::Doc, tcx: &ty::ctxt<'tcx>, cdata: Cmd) -> Ty<'tcx> {
+fn doc_type<'tcx>(doc: rbml::Doc, tcx: &TyCtxt<'tcx>, cdata: Cmd) -> Ty<'tcx> {
     let tp = reader::get_doc(doc, tag_items_data_item_type);
     TyDecoder::with_doc(tcx, cdata.cnum, tp,
                         &mut |did| translate_def_id(cdata, did))
         .parse_ty()
 }
 
-fn maybe_doc_type<'tcx>(doc: rbml::Doc, tcx: &ty::ctxt<'tcx>, cdata: Cmd) -> Option<Ty<'tcx>> {
+fn maybe_doc_type<'tcx>(doc: rbml::Doc, tcx: &TyCtxt<'tcx>, cdata: Cmd) -> Option<Ty<'tcx>> {
     reader::maybe_get_doc(doc, tag_items_data_item_type).map(|tp| {
         TyDecoder::with_doc(tcx, cdata.cnum, tp,
                             &mut |did| translate_def_id(cdata, did))
@@ -223,39 +238,23 @@ fn maybe_doc_type<'tcx>(doc: rbml::Doc, tcx: &ty::ctxt<'tcx>, cdata: Cmd) -> Opt
 }
 
 pub fn item_type<'tcx>(_item_id: DefId, item: rbml::Doc,
-                       tcx: &ty::ctxt<'tcx>, cdata: Cmd) -> Ty<'tcx> {
+                       tcx: &TyCtxt<'tcx>, cdata: Cmd) -> Ty<'tcx> {
     doc_type(item, tcx, cdata)
 }
 
-fn doc_trait_ref<'tcx>(doc: rbml::Doc, tcx: &ty::ctxt<'tcx>, cdata: Cmd)
+fn doc_trait_ref<'tcx>(doc: rbml::Doc, tcx: &TyCtxt<'tcx>, cdata: Cmd)
                        -> ty::TraitRef<'tcx> {
     TyDecoder::with_doc(tcx, cdata.cnum, doc,
                         &mut |did| translate_def_id(cdata, did))
         .parse_trait_ref()
 }
 
-fn item_trait_ref<'tcx>(doc: rbml::Doc, tcx: &ty::ctxt<'tcx>, cdata: Cmd)
+fn item_trait_ref<'tcx>(doc: rbml::Doc, tcx: &TyCtxt<'tcx>, cdata: Cmd)
                         -> ty::TraitRef<'tcx> {
     let tp = reader::get_doc(doc, tag_item_trait_ref);
     doc_trait_ref(tp, tcx, cdata)
 }
 
-fn item_path(item_doc: rbml::Doc) -> Vec<hir_map::PathElem> {
-    let path_doc = reader::get_doc(item_doc, tag_path);
-    reader::docs(path_doc).filter_map(|(tag, elt_doc)| {
-        if tag == tag_path_elem_mod {
-            let s = elt_doc.as_str_slice();
-            Some(hir_map::PathMod(token::intern(s)))
-        } else if tag == tag_path_elem_name {
-            let s = elt_doc.as_str_slice();
-            Some(hir_map::PathName(token::intern(s)))
-        } else {
-            // ignore tag_path_len element
-            None
-        }
-    }).collect()
-}
-
 fn item_name(intr: &IdentInterner, item: rbml::Doc) -> ast::Name {
     let name = reader::get_doc(item, tag_paths_data_name);
     let string = name.as_str_slice();
@@ -352,7 +351,7 @@ fn parse_associated_type_names(item_doc: rbml::Doc) -> Vec<ast::Name> {
 
 pub fn get_trait_def<'tcx>(cdata: Cmd,
                            item_id: DefIndex,
-                           tcx: &ty::ctxt<'tcx>) -> ty::TraitDef<'tcx>
+                           tcx: &TyCtxt<'tcx>) -> ty::TraitDef<'tcx>
 {
     let item_doc = cdata.lookup_item(item_id);
     let generics = doc_generics(item_doc, tcx, cdata, tag_item_generics);
@@ -370,18 +369,17 @@ pub fn get_trait_def<'tcx>(cdata: Cmd,
 pub fn get_adt_def<'tcx>(intr: &IdentInterner,
                          cdata: Cmd,
                          item_id: DefIndex,
-                         tcx: &ty::ctxt<'tcx>) -> ty::AdtDefMaster<'tcx>
+                         tcx: &TyCtxt<'tcx>) -> ty::AdtDefMaster<'tcx>
 {
-    fn expect_variant_kind<'tcx>(family: Family, tcx: &ty::ctxt<'tcx>) -> ty::VariantKind {
+    fn expect_variant_kind(family: Family) -> ty::VariantKind {
         match family_to_variant_kind(family) {
             Some(kind) => kind,
-            _ => tcx.sess.bug(&format!("unexpected family: {:?}", family)),
+            _ => bug!("unexpected family: {:?}", family),
         }
     }
     fn get_enum_variants<'tcx>(intr: &IdentInterner,
                                cdata: Cmd,
-                               doc: rbml::Doc,
-                               tcx: &ty::ctxt<'tcx>) -> Vec<ty::VariantDefData<'tcx, 'tcx>> {
+                               doc: rbml::Doc) -> Vec<ty::VariantDefData<'tcx, 'tcx>> {
         let mut disr_val = 0;
         reader::tagged_docs(doc, tag_items_data_item_variant).map(|p| {
             let did = translated_def_id(cdata, p);
@@ -396,43 +394,43 @@ pub fn get_adt_def<'tcx>(intr: &IdentInterner,
             ty::VariantDefData {
                 did: did,
                 name: item_name(intr, item),
-                fields: get_variant_fields(intr, cdata, item, tcx),
-                disr_val: disr,
-                kind: expect_variant_kind(item_family(item), tcx),
+                fields: get_variant_fields(intr, cdata, item),
+                disr_val: ConstInt::Infer(disr),
+                kind: expect_variant_kind(item_family(item)),
             }
         }).collect()
     }
     fn get_variant_fields<'tcx>(intr: &IdentInterner,
                                 cdata: Cmd,
-                                doc: rbml::Doc,
-                                tcx: &ty::ctxt<'tcx>) -> Vec<ty::FieldDefData<'tcx, 'tcx>> {
+                                doc: rbml::Doc) -> Vec<ty::FieldDefData<'tcx, 'tcx>> {
+        let mut index = 0;
         reader::tagged_docs(doc, tag_item_field).map(|f| {
             let ff = item_family(f);
             match ff {
                 PublicField | InheritedField => {},
-                _ => tcx.sess.bug(&format!("expected field, found {:?}", ff))
+                _ => bug!("expected field, found {:?}", ff)
             };
             ty::FieldDefData::new(item_def_id(f, cdata),
                                   item_name(intr, f),
                                   struct_field_family_to_visibility(ff))
         }).chain(reader::tagged_docs(doc, tag_item_unnamed_field).map(|f| {
             let ff = item_family(f);
-            ty::FieldDefData::new(item_def_id(f, cdata),
-                                  special_idents::unnamed_field.name,
+            let name = intr.intern(&index.to_string());
+            index += 1;
+            ty::FieldDefData::new(item_def_id(f, cdata), name,
                                   struct_field_family_to_visibility(ff))
         })).collect()
     }
     fn get_struct_variant<'tcx>(intr: &IdentInterner,
                                 cdata: Cmd,
                                 doc: rbml::Doc,
-                                did: DefId,
-                                tcx: &ty::ctxt<'tcx>) -> ty::VariantDefData<'tcx, 'tcx> {
+                                did: DefId) -> ty::VariantDefData<'tcx, 'tcx> {
         ty::VariantDefData {
             did: did,
             name: item_name(intr, doc),
-            fields: get_variant_fields(intr, cdata, doc, tcx),
-            disr_val: 0,
-            kind: expect_variant_kind(item_family(doc), tcx),
+            fields: get_variant_fields(intr, cdata, doc),
+            disr_val: ConstInt::Infer(0),
+            kind: expect_variant_kind(item_family(doc)),
         }
     }
 
@@ -441,18 +439,17 @@ pub fn get_adt_def<'tcx>(intr: &IdentInterner,
     let (kind, variants) = match item_family(doc) {
         Enum => {
             (ty::AdtKind::Enum,
-             get_enum_variants(intr, cdata, doc, tcx))
+             get_enum_variants(intr, cdata, doc))
         }
         Struct(..) => {
             let ctor_did =
                 reader::maybe_get_doc(doc, tag_items_data_item_struct_ctor).
                 map_or(did, |ctor_doc| translated_def_id(cdata, ctor_doc));
             (ty::AdtKind::Struct,
-             vec![get_struct_variant(intr, cdata, doc, ctor_did, tcx)])
+             vec![get_struct_variant(intr, cdata, doc, ctor_did)])
         }
-        _ => tcx.sess.bug(
-            &format!("get_adt_def called on a non-ADT {:?} - {:?}",
-                     item_family(doc), did))
+        _ => bug!("get_adt_def called on a non-ADT {:?} - {:?}",
+                  item_family(doc), did)
     };
 
     let adt = tcx.intern_adt_def(did, kind, variants);
@@ -471,14 +468,14 @@ pub fn get_adt_def<'tcx>(intr: &IdentInterner,
                    variant.name,
                    ctor_ty);
             let field_tys = match ctor_ty.sty {
-                ty::TyBareFn(_, &ty::BareFnTy { sig: ty::Binder(ty::FnSig {
+                ty::TyFnDef(_, _, &ty::BareFnTy { sig: ty::Binder(ty::FnSig {
                     ref inputs, ..
                 }), ..}) => {
                     // tuple-struct constructors don't have escaping regions
                     assert!(!inputs.has_escaping_regions());
                     inputs
                 },
-                _ => tcx.sess.bug("tuple-variant ctor is not an ADT")
+                _ => bug!("tuple-variant ctor is not an ADT")
             };
             for (field, &ty) in variant.fields.iter().zip(field_tys.iter()) {
                 field.fulfill_ty(ty);
@@ -499,7 +496,7 @@ pub fn get_adt_def<'tcx>(intr: &IdentInterner,
 
 pub fn get_predicates<'tcx>(cdata: Cmd,
                             item_id: DefIndex,
-                            tcx: &ty::ctxt<'tcx>)
+                            tcx: &TyCtxt<'tcx>)
                             -> ty::GenericPredicates<'tcx>
 {
     let item_doc = cdata.lookup_item(item_id);
@@ -508,14 +505,14 @@ pub fn get_predicates<'tcx>(cdata: Cmd,
 
 pub fn get_super_predicates<'tcx>(cdata: Cmd,
                                   item_id: DefIndex,
-                                  tcx: &ty::ctxt<'tcx>)
+                                  tcx: &TyCtxt<'tcx>)
                                   -> ty::GenericPredicates<'tcx>
 {
     let item_doc = cdata.lookup_item(item_id);
     doc_predicates(item_doc, tcx, cdata, tag_item_super_predicates)
 }
 
-pub fn get_type<'tcx>(cdata: Cmd, id: DefIndex, tcx: &ty::ctxt<'tcx>)
+pub fn get_type<'tcx>(cdata: Cmd, id: DefIndex, tcx: &TyCtxt<'tcx>)
                       -> ty::TypeScheme<'tcx>
 {
     let item_doc = cdata.lookup_item(id);
@@ -544,6 +541,17 @@ pub fn get_deprecation(cdata: Cmd, id: DefIndex) -> Option<attr::Deprecation> {
     })
 }
 
+pub fn get_visibility(cdata: Cmd, id: DefIndex) -> ty::Visibility {
+    item_visibility(cdata.lookup_item(id))
+}
+
+pub fn get_parent_impl(cdata: Cmd, id: DefIndex) -> Option<DefId> {
+    let item = cdata.lookup_item(id);
+    reader::maybe_get_doc(item, tag_items_data_parent_impl).map(|doc| {
+        translated_def_id(cdata, doc)
+    })
+}
+
 pub fn get_repr_attrs(cdata: Cmd, id: DefIndex) -> Vec<attr::ReprAttr> {
     let item = cdata.lookup_item(id);
     match reader::maybe_get_doc(item, tag_items_data_item_repr).map(|doc| {
@@ -583,7 +591,7 @@ pub fn get_custom_coerce_unsized_kind<'tcx>(
 
 pub fn get_impl_trait<'tcx>(cdata: Cmd,
                             id: DefIndex,
-                            tcx: &ty::ctxt<'tcx>)
+                            tcx: &TyCtxt<'tcx>)
                             -> Option<ty::TraitRef<'tcx>>
 {
     let item_doc = cdata.lookup_item(id);
@@ -631,7 +639,7 @@ fn each_child_of_item_or_crate<F, G>(intr: Rc<IdentInterner>,
                                      item_doc: rbml::Doc,
                                      mut get_crate_data: G,
                                      mut callback: F) where
-    F: FnMut(DefLike, ast::Name, hir::Visibility),
+    F: FnMut(DefLike, ast::Name, ty::Visibility),
     G: FnMut(ast::CrateNum) -> Rc<crate_metadata>,
 {
     // Iterate over all children.
@@ -715,7 +723,7 @@ fn each_child_of_item_or_crate<F, G>(intr: Rc<IdentInterner>,
             let def_like = item_to_def_like(crate_data, child_item_doc, child_def_id);
             // These items have a public visibility because they're part of
             // a public re-export.
-            callback(def_like, token::intern(name), hir::Public);
+            callback(def_like, token::intern(name), ty::Visibility::Public);
         }
     }
 }
@@ -726,7 +734,7 @@ pub fn each_child_of_item<F, G>(intr: Rc<IdentInterner>,
                                id: DefIndex,
                                get_crate_data: G,
                                callback: F) where
-    F: FnMut(DefLike, ast::Name, hir::Visibility),
+    F: FnMut(DefLike, ast::Name, ty::Visibility),
     G: FnMut(ast::CrateNum) -> Rc<crate_metadata>,
 {
     // Find the item.
@@ -747,7 +755,7 @@ pub fn each_top_level_item_of_crate<F, G>(intr: Rc<IdentInterner>,
                                           cdata: Cmd,
                                           get_crate_data: G,
                                           callback: F) where
-    F: FnMut(DefLike, ast::Name, hir::Visibility),
+    F: FnMut(DefLike, ast::Name, ty::Visibility),
     G: FnMut(ast::CrateNum) -> Rc<crate_metadata>,
 {
     let root_doc = rbml::Doc::new(cdata.data());
@@ -762,72 +770,52 @@ pub fn each_top_level_item_of_crate<F, G>(intr: Rc<IdentInterner>,
                                 callback)
 }
 
-pub fn get_item_path(cdata: Cmd, id: DefIndex) -> Vec<hir_map::PathElem> {
-    item_path(cdata.lookup_item(id))
-}
-
 pub fn get_item_name(intr: &IdentInterner, cdata: Cmd, id: DefIndex) -> ast::Name {
     item_name(intr, cdata.lookup_item(id))
 }
 
-pub type DecodeInlinedItem<'a> =
-    Box<for<'tcx> FnMut(Cmd,
-                        &ty::ctxt<'tcx>,
-                        Vec<hir_map::PathElem>, // parent_path
-                        hir_map::DefPath,       // parent_def_path
-                        rbml::Doc,
-                        DefId)
-                        -> Result<&'tcx InlinedItem, (Vec<hir_map::PathElem>,
-                                                      hir_map::DefPath)> + 'a>;
-
-pub fn maybe_get_item_ast<'tcx>(cdata: Cmd,
-                                tcx: &ty::ctxt<'tcx>,
-                                id: DefIndex,
-                                mut decode_inlined_item: DecodeInlinedItem)
+pub fn maybe_get_item_ast<'tcx>(cdata: Cmd, tcx: &TyCtxt<'tcx>, id: DefIndex)
                                 -> FoundAst<'tcx> {
     debug!("Looking up item: {:?}", id);
     let item_doc = cdata.lookup_item(id);
     let item_did = item_def_id(item_doc, cdata);
-    let parent_path = {
-        let mut path = item_path(item_doc);
-        path.pop();
-        path
-    };
-    let parent_def_path = {
-        let mut def_path = def_path(cdata, id);
-        def_path.pop();
-        def_path
+    let parent_def_id = DefId {
+        krate: cdata.cnum,
+        index: def_key(cdata, id).parent.unwrap()
     };
-    match decode_inlined_item(cdata,
-                              tcx,
-                              parent_path,
-                              parent_def_path,
-                              item_doc,
-                              item_did) {
-        Ok(ii) => FoundAst::Found(ii),
-        Err((mut parent_path, mut parent_def_path)) => {
-            match item_parent_item(cdata, item_doc) {
-                Some(parent_did) => {
-                    // Remove the last element from the paths, since we are now
-                    // trying to inline the parent.
-                    parent_path.pop();
-                    parent_def_path.pop();
-
-                    let parent_item = cdata.lookup_item(parent_did.index);
-                    match decode_inlined_item(cdata,
-                                              tcx,
-                                              parent_path,
-                                              parent_def_path,
-                                              parent_item,
-                                              parent_did) {
-                        Ok(ii) => FoundAst::FoundParent(parent_did, ii),
-                        Err(_) => FoundAst::NotFound
-                    }
-                }
-                None => FoundAst::NotFound
+    let mut parent_def_path = def_path(cdata, id);
+    parent_def_path.data.pop();
+    if let Some(ast_doc) = reader::maybe_get_doc(item_doc, tag_ast as usize) {
+        let ii = decode_inlined_item(cdata,
+                                     tcx,
+                                     parent_def_path,
+                                     parent_def_id,
+                                     ast_doc,
+                                     item_did);
+        return FoundAst::Found(ii);
+    } else if let Some(parent_did) = item_parent_item(cdata, item_doc) {
+        // Remove the last element from the paths, since we are now
+        // trying to inline the parent.
+        let grandparent_def_id = DefId {
+            krate: cdata.cnum,
+            index: def_key(cdata, parent_def_id.index).parent.unwrap()
+        };
+        let mut grandparent_def_path = parent_def_path;
+        grandparent_def_path.data.pop();
+        let parent_doc = cdata.lookup_item(parent_did.index);
+        if let Some(ast_doc) = reader::maybe_get_doc(parent_doc, tag_ast as usize) {
+            let ii = decode_inlined_item(cdata,
+                                         tcx,
+                                         grandparent_def_path,
+                                         grandparent_def_id,
+                                         ast_doc,
+                                         parent_did);
+            if let &InlinedItem::Item(ref i) = ii {
+                return FoundAst::FoundParent(parent_did, i);
             }
         }
     }
+    FoundAst::NotFound
 }
 
 pub fn is_item_mir_available<'tcx>(cdata: Cmd, id: DefIndex) -> bool {
@@ -839,7 +827,7 @@ pub fn is_item_mir_available<'tcx>(cdata: Cmd, id: DefIndex) -> bool {
 }
 
 pub fn maybe_get_item_mir<'tcx>(cdata: Cmd,
-                                tcx: &ty::ctxt<'tcx>,
+                                tcx: &TyCtxt<'tcx>,
                                 id: DefIndex)
                                 -> Option<mir::repr::Mir<'tcx>> {
     let item_doc = cdata.lookup_item(id);
@@ -897,7 +885,7 @@ fn get_explicit_self(item: rbml::Doc) -> ty::ExplicitSelfCategory {
         match ch as char {
             'i' => hir::MutImmutable,
             'm' => hir::MutMutable,
-            _ => panic!("unknown mutability character: `{}`", ch as char),
+            _ => bug!("unknown mutability character: `{}`", ch as char),
         }
     }
 
@@ -915,7 +903,7 @@ fn get_explicit_self(item: rbml::Doc) -> ty::ExplicitSelfCategory {
                 ty::ReEmpty,
                 get_mutability(string.as_bytes()[1]))
         }
-        _ => panic!("unknown self type code: `{}`", explicit_self_kind as char)
+        _ => bug!("unknown self type code: `{}`", explicit_self_kind as char)
     }
 }
 
@@ -928,7 +916,7 @@ pub fn get_impl_items(cdata: Cmd, impl_id: DefIndex)
             Some('C') | Some('c') => ty::ConstTraitItemId(def_id),
             Some('r') | Some('p') => ty::MethodTraitItemId(def_id),
             Some('t') => ty::TypeTraitItemId(def_id),
-            _ => panic!("unknown impl item sort"),
+            _ => bug!("unknown impl item sort"),
         }
     }).collect()
 }
@@ -954,13 +942,17 @@ pub fn is_static_method(cdata: Cmd, id: DefIndex) -> bool {
 pub fn get_impl_or_trait_item<'tcx>(intr: Rc<IdentInterner>,
                                     cdata: Cmd,
                                     id: DefIndex,
-                                    tcx: &ty::ctxt<'tcx>)
-                                    -> ty::ImplOrTraitItem<'tcx> {
+                                    tcx: &TyCtxt<'tcx>)
+                                    -> Option<ty::ImplOrTraitItem<'tcx>> {
     let item_doc = cdata.lookup_item(id);
 
     let def_id = item_def_id(item_doc, cdata);
 
-    let container_id = item_require_parent_item(cdata, item_doc);
+    let container_id = if let Some(id) = item_parent_item(cdata, item_doc) {
+        id
+    } else {
+        return None;
+    };
     let container_doc = cdata.lookup_item(container_id.index);
     let container = match item_family(container_doc) {
         Trait => TraitContainer(container_id),
@@ -969,14 +961,16 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc<IdentInterner>,
 
     let name = item_name(&intr, item_doc);
     let vis = item_visibility(item_doc);
+    let defaultness = item_defaultness(item_doc);
 
-    match item_sort(item_doc) {
+    Some(match item_sort(item_doc) {
         sort @ Some('C') | sort @ Some('c') => {
             let ty = doc_type(item_doc, tcx, cdata);
             ty::ConstTraitItem(Rc::new(ty::AssociatedConst {
                 name: name,
                 ty: ty,
                 vis: vis,
+                defaultness: defaultness,
                 def_id: def_id,
                 container: container,
                 has_value: sort == Some('C')
@@ -987,10 +981,10 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc<IdentInterner>,
             let predicates = doc_predicates(item_doc, tcx, cdata, tag_method_ty_generics);
             let ity = tcx.lookup_item_type(def_id).ty;
             let fty = match ity.sty {
-                ty::TyBareFn(_, fty) => fty.clone(),
-                _ => tcx.sess.bug(&format!(
+                ty::TyFnDef(_, _, fty) => fty.clone(),
+                _ => bug!(
                     "the type {:?} of the method {:?} is not a function?",
-                    ity, name))
+                    ity, name)
             };
             let explicit_self = get_explicit_self(item_doc);
 
@@ -1000,6 +994,7 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc<IdentInterner>,
                                                         fty,
                                                         explicit_self,
                                                         vis,
+                                                        defaultness,
                                                         def_id,
                                                         container)))
         }
@@ -1009,12 +1004,13 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc<IdentInterner>,
                 name: name,
                 ty: ty,
                 vis: vis,
+                defaultness: defaultness,
                 def_id: def_id,
                 container: container,
             }))
         }
-        _ => panic!("unknown impl/trait item sort"),
-    }
+        _ => return None
+    })
 }
 
 pub fn get_trait_item_def_ids(cdata: Cmd, id: DefIndex)
@@ -1026,7 +1022,7 @@ pub fn get_trait_item_def_ids(cdata: Cmd, id: DefIndex)
             Some('C') | Some('c') => ty::ConstTraitItemId(def_id),
             Some('r') | Some('p') => ty::MethodTraitItemId(def_id),
             Some('t') => ty::TypeTraitItemId(def_id),
-            _ => panic!("unknown trait item sort"),
+            _ => bug!("unknown trait item sort"),
         }
     }).collect()
 }
@@ -1041,7 +1037,7 @@ pub fn get_item_variances(cdata: Cmd, id: DefIndex) -> ty::ItemVariances {
 pub fn get_provided_trait_methods<'tcx>(intr: Rc<IdentInterner>,
                                         cdata: Cmd,
                                         id: DefIndex,
-                                        tcx: &ty::ctxt<'tcx>)
+                                        tcx: &TyCtxt<'tcx>)
                                         -> Vec<Rc<ty::Method<'tcx>>> {
     let item = cdata.lookup_item(id);
 
@@ -1054,7 +1050,7 @@ pub fn get_provided_trait_methods<'tcx>(intr: Rc<IdentInterner>,
                                                     cdata,
                                                     did.index,
                                                     tcx);
-            if let ty::MethodTraitItem(ref method) = trait_item {
+            if let Some(ty::MethodTraitItem(ref method)) = trait_item {
                 Some((*method).clone())
             } else {
                 None
@@ -1068,7 +1064,7 @@ pub fn get_provided_trait_methods<'tcx>(intr: Rc<IdentInterner>,
 pub fn get_associated_consts<'tcx>(intr: Rc<IdentInterner>,
                                    cdata: Cmd,
                                    id: DefIndex,
-                                   tcx: &ty::ctxt<'tcx>)
+                                   tcx: &TyCtxt<'tcx>)
                                    -> Vec<Rc<ty::AssociatedConst<'tcx>>> {
     let item = cdata.lookup_item(id);
 
@@ -1083,7 +1079,7 @@ pub fn get_associated_consts<'tcx>(intr: Rc<IdentInterner>,
                                                             cdata,
                                                             did.index,
                                                             tcx);
-                    if let ty::ConstTraitItem(ref ac) = trait_item {
+                    if let Some(ty::ConstTraitItem(ref ac)) = trait_item {
                         Some((*ac).clone())
                     } else {
                         None
@@ -1142,21 +1138,24 @@ pub fn get_struct_field_attrs(cdata: Cmd) -> FnvHashMap<DefId, Vec<ast::Attribut
     }).collect()
 }
 
-fn struct_field_family_to_visibility(family: Family) -> hir::Visibility {
+fn struct_field_family_to_visibility(family: Family) -> ty::Visibility {
     match family {
-      PublicField => hir::Public,
-      InheritedField => hir::Inherited,
-      _ => panic!()
+        PublicField => ty::Visibility::Public,
+        InheritedField => ty::Visibility::PrivateExternal,
+        _ => bug!()
     }
 }
 
 pub fn get_struct_field_names(intr: &IdentInterner, cdata: Cmd, id: DefIndex)
     -> Vec<ast::Name> {
     let item = cdata.lookup_item(id);
+    let mut index = 0;
     reader::tagged_docs(item, tag_item_field).map(|an_item| {
         item_name(intr, an_item)
     }).chain(reader::tagged_docs(item, tag_item_unnamed_field).map(|_| {
-        special_idents::unnamed_field.name
+        let name = intr.intern(&index.to_string());
+        index += 1;
+        name
     })).collect()
 }
 
@@ -1210,11 +1209,11 @@ fn get_attributes(md: rbml::Doc) -> Vec<ast::Attribute> {
 
 fn list_crate_attributes(md: rbml::Doc, hash: &Svh,
                          out: &mut io::Write) -> io::Result<()> {
-    try!(write!(out, "=Crate Attributes ({})=\n", *hash));
+    write!(out, "=Crate Attributes ({})=\n", *hash)?;
 
     let r = get_attributes(md);
     for attr in &r {
-        try!(write!(out, "{}\n", pprust::attribute_to_string(attr)));
+        write!(out, "{}\n", pprust::attribute_to_string(attr))?;
     }
 
     write!(out, "\n\n")
@@ -1243,7 +1242,7 @@ pub fn get_crate_deps(data: &[u8]) -> Vec<CrateDep> {
 
     reader::tagged_docs(depsdoc, tag_crate_dep).enumerate().map(|(crate_num, depdoc)| {
         let name = docstr(depdoc, tag_crate_dep_crate_name);
-        let hash = Svh::new(&docstr(depdoc, tag_crate_dep_hash));
+        let hash = Svh::new(docstr(depdoc, tag_crate_dep_hash));
         let doc = reader::get_doc(depdoc, tag_crate_dep_explicitly_linked);
         let explicitly_linked = reader::doc_as_u8(doc) != 0;
         CrateDep {
@@ -1256,48 +1255,55 @@ pub fn get_crate_deps(data: &[u8]) -> Vec<CrateDep> {
 }
 
 fn list_crate_deps(data: &[u8], out: &mut io::Write) -> io::Result<()> {
-    try!(write!(out, "=External Dependencies=\n"));
+    write!(out, "=External Dependencies=\n")?;
     for dep in &get_crate_deps(data) {
-        try!(write!(out, "{} {}-{}\n", dep.cnum, dep.name, dep.hash));
+        write!(out, "{} {}-{}\n", dep.cnum, dep.name, dep.hash)?;
     }
-    try!(write!(out, "\n"));
+    write!(out, "\n")?;
     Ok(())
 }
 
 pub fn maybe_get_crate_hash(data: &[u8]) -> Option<Svh> {
     let cratedoc = rbml::Doc::new(data);
     reader::maybe_get_doc(cratedoc, tag_crate_hash).map(|doc| {
-        Svh::new(doc.as_str_slice())
+        Svh::new(doc.as_str_slice().to_string())
     })
 }
 
 pub fn get_crate_hash(data: &[u8]) -> Svh {
     let cratedoc = rbml::Doc::new(data);
     let hashdoc = reader::get_doc(cratedoc, tag_crate_hash);
-    Svh::new(hashdoc.as_str_slice())
+    Svh::new(hashdoc.as_str_slice().to_string())
 }
 
-pub fn maybe_get_crate_name(data: &[u8]) -> Option<String> {
+pub fn maybe_get_crate_name(data: &[u8]) -> Option<&str> {
     let cratedoc = rbml::Doc::new(data);
     reader::maybe_get_doc(cratedoc, tag_crate_crate_name).map(|doc| {
-        doc.as_str_slice().to_string()
+        doc.as_str_slice()
     })
 }
 
+pub fn get_crate_disambiguator<'a>(data: &'a [u8]) -> &'a str {
+    let crate_doc = rbml::Doc::new(data);
+    let disambiguator_doc = reader::get_doc(crate_doc, tag_crate_disambiguator);
+    let slice: &'a str = disambiguator_doc.as_str_slice();
+    slice
+}
+
 pub fn get_crate_triple(data: &[u8]) -> Option<String> {
     let cratedoc = rbml::Doc::new(data);
     let triple_doc = reader::maybe_get_doc(cratedoc, tag_crate_triple);
     triple_doc.map(|s| s.as_str().to_string())
 }
 
-pub fn get_crate_name(data: &[u8]) -> String {
+pub fn get_crate_name(data: &[u8]) -> &str {
     maybe_get_crate_name(data).expect("no crate name in crate")
 }
 
 pub fn list_crate_metadata(bytes: &[u8], out: &mut io::Write) -> io::Result<()> {
     let hash = get_crate_hash(bytes);
     let md = rbml::Doc::new(bytes);
-    try!(list_crate_attributes(md, &hash, out));
+    list_crate_attributes(md, &hash, out)?;
     list_crate_deps(bytes, out)
 }
 
@@ -1318,7 +1324,7 @@ pub fn translate_def_id(cdata: Cmd, did: DefId) -> DefId {
                 index: did.index,
             }
         }
-        None => panic!("didn't find a crate in the cnum_map")
+        None => bug!("didn't find a crate in the cnum_map")
     }
 }
 
@@ -1432,7 +1438,7 @@ pub fn each_implementation_for_trait<F>(cdata: Cmd,
     }
 }
 
-pub fn get_trait_of_item(cdata: Cmd, id: DefIndex, tcx: &ty::ctxt)
+pub fn get_trait_of_item(cdata: Cmd, id: DefIndex, tcx: &TyCtxt)
                          -> Option<DefId> {
     let item_doc = cdata.lookup_item(id);
     let parent_item_id = match item_parent_item(cdata, item_doc) {
@@ -1508,7 +1514,7 @@ pub fn get_dylib_dependency_formats(cdata: Cmd)
         let cnum: ast::CrateNum = cnum.parse().unwrap();
         let cnum = match cdata.cnum_map.borrow().get(&cnum) {
             Some(&n) => n,
-            None => panic!("didn't find a crate in the cnum_map")
+            None => bug!("didn't find a crate in the cnum_map")
         };
         result.push((cnum, if link == "d" {
             LinkagePreference::RequireDynamic
@@ -1567,7 +1573,7 @@ pub fn is_const_fn(cdata: Cmd, id: DefIndex) -> bool {
     }
 }
 
-pub fn is_extern_item(cdata: Cmd, id: DefIndex, tcx: &ty::ctxt) -> bool {
+pub fn is_extern_item(cdata: Cmd, id: DefIndex, tcx: &TyCtxt) -> bool {
     let item_doc = match cdata.get_item(id) {
         Some(doc) => doc,
         None => return false,
@@ -1578,7 +1584,8 @@ pub fn is_extern_item(cdata: Cmd, id: DefIndex, tcx: &ty::ctxt) -> bool {
             let ty::TypeScheme { generics, ty } = get_type(cdata, id, tcx);
             let no_generics = generics.types.is_empty();
             match ty.sty {
-                ty::TyBareFn(_, fn_ty) if fn_ty.abi != Abi::Rust => return no_generics,
+                ty::TyFnDef(_, _, fn_ty) | ty::TyFnPtr(fn_ty)
+                    if fn_ty.abi != Abi::Rust => return no_generics,
                 _ => no_generics,
             }
         },
@@ -1602,7 +1609,7 @@ pub fn is_impl(cdata: Cmd, id: DefIndex) -> bool {
 }
 
 fn doc_generics<'tcx>(base_doc: rbml::Doc,
-                      tcx: &ty::ctxt<'tcx>,
+                      tcx: &TyCtxt<'tcx>,
                       cdata: Cmd,
                       tag: usize)
                       -> ty::Generics<'tcx>
@@ -1651,7 +1658,7 @@ fn doc_generics<'tcx>(base_doc: rbml::Doc,
 
 fn doc_predicate<'tcx>(cdata: Cmd,
                        doc: rbml::Doc,
-                       tcx: &ty::ctxt<'tcx>)
+                       tcx: &TyCtxt<'tcx>)
                        -> ty::Predicate<'tcx>
 {
     let predicate_pos = cdata.xref_index.lookup(
@@ -1663,7 +1670,7 @@ fn doc_predicate<'tcx>(cdata: Cmd,
 }
 
 fn doc_predicates<'tcx>(base_doc: rbml::Doc,
-                        tcx: &ty::ctxt<'tcx>,
+                        tcx: &TyCtxt<'tcx>,
                         cdata: Cmd,
                         tag: usize)
                         -> ty::GenericPredicates<'tcx>
@@ -1718,7 +1725,7 @@ pub fn closure_kind(cdata: Cmd, closure_id: DefIndex) -> ty::ClosureKind {
     ty::ClosureKind::decode(&mut decoder).unwrap()
 }
 
-pub fn closure_ty<'tcx>(cdata: Cmd, closure_id: DefIndex, tcx: &ty::ctxt<'tcx>)
+pub fn closure_ty<'tcx>(cdata: Cmd, closure_id: DefIndex, tcx: &TyCtxt<'tcx>)
                         -> ty::ClosureTy<'tcx> {
     let closure_doc = cdata.lookup_item(closure_id);
     let closure_ty_doc = reader::get_doc(closure_doc, tag_items_closure_ty);
@@ -1726,14 +1733,16 @@ pub fn closure_ty<'tcx>(cdata: Cmd, closure_id: DefIndex, tcx: &ty::ctxt<'tcx>)
         .parse_closure_ty()
 }
 
-fn def_key(item_doc: rbml::Doc) -> hir_map::DefKey {
+pub fn def_key(cdata: Cmd, id: DefIndex) -> hir_map::DefKey {
+    debug!("def_key: id={:?}", id);
+    let item_doc = cdata.lookup_item(id);
     match reader::maybe_get_doc(item_doc, tag_def_key) {
         Some(def_key_doc) => {
             let mut decoder = reader::Decoder::new(def_key_doc);
             hir_map::DefKey::decode(&mut decoder).unwrap()
         }
         None => {
-            panic!("failed to find block with tag {:?} for item with family {:?}",
+            bug!("failed to find block with tag {:?} for item with family {:?}",
                    tag_def_key,
                    item_family(item_doc))
         }
@@ -1742,9 +1751,5 @@ fn def_key(item_doc: rbml::Doc) -> hir_map::DefKey {
 
 pub fn def_path(cdata: Cmd, id: DefIndex) -> hir_map::DefPath {
     debug!("def_path(id={:?})", id);
-    hir_map::definitions::make_def_path(id, |parent| {
-        debug!("def_path: parent={:?}", parent);
-        let parent_doc = cdata.lookup_item(parent);
-        def_key(parent_doc)
-    })
+    hir_map::DefPath::make(cdata.cnum, id, |parent| def_key(cdata, parent))
 }
index 50b9ea575508677520068c8957ec3ada6452d390..8fa23de9a2d16ffc618ea2a865a6d50f9b7def8d 100644 (file)
@@ -87,4 +87,6 @@ register_diagnostics! {
     E0468, // an `extern crate` loading macros must be at the crate root
     E0469, // imported macro not found
     E0470, // reexported macro not found
+    E0519, // local crate and dependency have same (crate-name, disambiguator)
+    E0523, // two dependencies have same (crate-name, disambiguator) but different SVH
 }
index d707c61cbb44079257859a40da4f1789d5896598..69f61cf97c0b271387ce18fac7eef9d84f23e2c0 100644 (file)
@@ -13,6 +13,7 @@
 #![allow(unused_must_use)] // everything is just a MemWriter, can't fail
 #![allow(non_camel_case_types)]
 
+use astencode::encode_inlined_item;
 use common::*;
 use cstore;
 use decoder;
@@ -20,16 +21,16 @@ use tyencode;
 use index::{self, IndexData};
 
 use middle::cstore::{LOCAL_CRATE, CrateStore, InlinedItemRef, LinkMeta, tls};
-use middle::def;
-use middle::def_id::{CRATE_DEF_INDEX, DefId};
+use rustc::hir::def;
+use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
 use middle::dependency_format::Linkage;
 use middle::stability;
-use middle::subst;
-use middle::ty::{self, Ty};
+use rustc::ty::subst;
+use rustc::traits::specialization_graph;
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::util::IntTypeExt;
 
-use rustc::back::svh::Svh;
-use rustc::front::map::{LinkedPath, PathElem, PathElems};
-use rustc::front::map as ast_map;
+use rustc::hir::svh::Svh;
 use rustc::mir::mir_map::MirMap;
 use rustc::session::config;
 use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet};
@@ -46,37 +47,20 @@ use syntax::codemap::BytePos;
 use syntax::attr;
 use syntax::attr::AttrMetaMethods;
 use syntax::errors::Handler;
-use syntax::parse::token::special_idents;
 use syntax;
 use rbml::writer::Encoder;
 
-use rustc_front::hir::{self, PatKind};
-use rustc_front::intravisit::Visitor;
-use rustc_front::intravisit;
-
-pub type EncodeInlinedItem<'a> =
-    Box<FnMut(&EncodeContext, &mut Encoder, InlinedItemRef) + 'a>;
-
-pub struct EncodeParams<'a, 'tcx: 'a> {
-    pub diag: &'a Handler,
-    pub tcx: &'a ty::ctxt<'tcx>,
-    pub reexports: &'a def::ExportMap,
-    pub item_symbols: &'a RefCell<NodeMap<String>>,
-    pub link_meta: &'a LinkMeta,
-    pub cstore: &'a cstore::CStore,
-    pub encode_inlined_item: EncodeInlinedItem<'a>,
-    pub reachable: &'a NodeSet,
-    pub mir_map: &'a MirMap<'tcx>,
-}
+use rustc::hir::{self, PatKind};
+use rustc::hir::intravisit::Visitor;
+use rustc::hir::intravisit;
 
 pub struct EncodeContext<'a, 'tcx: 'a> {
     pub diag: &'a Handler,
-    pub tcx: &'a ty::ctxt<'tcx>,
+    pub tcx: &'a TyCtxt<'tcx>,
     pub reexports: &'a def::ExportMap,
     pub item_symbols: &'a RefCell<NodeMap<String>>,
     pub link_meta: &'a LinkMeta,
     pub cstore: &'a cstore::CStore,
-    pub encode_inlined_item: RefCell<EncodeInlinedItem<'a>>,
     pub type_abbrevs: tyencode::abbrev_map<'tcx>,
     pub reachable: &'a NodeSet,
     pub mir_map: &'a MirMap<'tcx>,
@@ -157,7 +141,7 @@ pub fn def_to_u64(did: DefId) -> u64 {
     (did.krate as u64) << 32 | (did.index.as_usize() as u64)
 }
 
-pub fn def_to_string(did: DefId) -> String {
+pub fn def_to_string(_tcx: &TyCtxt, did: DefId) -> String {
     format!("{}:{}", did.krate, did.index.as_usize())
 }
 
@@ -231,7 +215,7 @@ fn encode_symbol(ecx: &EncodeContext,
             rbml_w.wr_tagged_str(tag_items_data_item_symbol, x);
         }
         None => {
-            ecx.diag.bug(&format!("encode_symbol: id not found {}", id));
+            bug!("encode_symbol: id not found {}", id);
         }
     }
 }
@@ -239,7 +223,8 @@ fn encode_symbol(ecx: &EncodeContext,
 fn encode_disr_val(_: &EncodeContext,
                    rbml_w: &mut Encoder,
                    disr_val: ty::Disr) {
-    rbml_w.wr_tagged_str(tag_disr_val, &disr_val.to_string());
+    // convert to u64 so just the number is printed, without any type info
+    rbml_w.wr_tagged_str(tag_disr_val, &disr_val.to_u64_unchecked().to_string());
 }
 
 fn encode_parent_item(rbml_w: &mut Encoder, id: DefId) {
@@ -249,7 +234,7 @@ fn encode_parent_item(rbml_w: &mut Encoder, id: DefId) {
 fn encode_struct_fields(rbml_w: &mut Encoder,
                         variant: ty::VariantDef) {
     for f in &variant.fields {
-        if f.name == special_idents::unnamed_field.name {
+        if variant.is_tuple_struct() {
             rbml_w.start_tag(tag_item_unnamed_field);
         } else {
             rbml_w.start_tag(tag_item_field);
@@ -263,13 +248,14 @@ fn encode_struct_fields(rbml_w: &mut Encoder,
 
 fn encode_enum_variant_info<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                       rbml_w: &mut Encoder,
-                                      id: NodeId,
-                                      vis: hir::Visibility,
+                                      did: DefId,
+                                      vis: &hir::Visibility,
                                       index: &mut CrateIndex<'tcx>) {
-    debug!("encode_enum_variant_info(id={})", id);
-
-    let mut disr_val = 0;
-    let def = ecx.tcx.lookup_adt_def(ecx.tcx.map.local_def_id(id));
+    debug!("encode_enum_variant_info(did={:?})", did);
+    let repr_hints = ecx.tcx.lookup_repr_hints(did);
+    let repr_type = ecx.tcx.enum_repr_type(repr_hints.get(0));
+    let mut disr_val = repr_type.initial_discriminant(&ecx.tcx);
+    let def = ecx.tcx.lookup_adt_def(did);
     for variant in &def.variants {
         let vid = variant.did;
         let variant_node_id = ecx.local_id(vid);
@@ -291,7 +277,7 @@ fn encode_enum_variant_info<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
             ty::VariantKind::Unit => 'w',
         });
         encode_name(rbml_w, variant.name);
-        encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(id));
+        encode_parent_item(rbml_w, did);
         encode_visibility(rbml_w, vis);
 
         let attrs = ecx.tcx.get_attrs(vid);
@@ -312,24 +298,9 @@ fn encode_enum_variant_info<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         }
         encode_bounds_and_type_for_item(rbml_w, ecx, index, variant_node_id);
 
-        ecx.tcx.map.with_path(variant_node_id, |path| encode_path(rbml_w, path));
         rbml_w.end_tag();
-        disr_val = disr_val.wrapping_add(1);
-    }
-}
-
-fn encode_path<PI: Iterator<Item=PathElem>>(rbml_w: &mut Encoder, path: PI) {
-    let path = path.collect::<Vec<_>>();
-    rbml_w.start_tag(tag_path);
-    rbml_w.wr_tagged_u32(tag_path_len, path.len() as u32);
-    for pe in &path {
-        let tag = match *pe {
-            ast_map::PathMod(_) => tag_path_elem_mod,
-            ast_map::PathName(_) => tag_path_elem_name
-        };
-        rbml_w.wr_tagged_str(tag, &pe.name().as_str());
+        disr_val = disr_val.wrap_incr();
     }
-    rbml_w.end_tag();
 }
 
 /// Iterates through "auxiliary node IDs", which are node IDs that describe
@@ -383,9 +354,8 @@ fn encode_info_for_mod(ecx: &EncodeContext,
                        md: &hir::Mod,
                        attrs: &[ast::Attribute],
                        id: NodeId,
-                       path: PathElems,
                        name: Name,
-                       vis: hir::Visibility) {
+                       vis: &hir::Visibility) {
     rbml_w.start_tag(tag_items_data_item);
     encode_def_id_and_key(ecx, rbml_w, ecx.tcx.map.local_def_id(id));
     encode_family(rbml_w, 'm');
@@ -405,7 +375,6 @@ fn encode_info_for_mod(ecx: &EncodeContext,
         });
     }
 
-    encode_path(rbml_w, path.clone());
     encode_visibility(rbml_w, vis);
 
     let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(id));
@@ -414,7 +383,7 @@ fn encode_info_for_mod(ecx: &EncodeContext,
     encode_deprecation(rbml_w, depr);
 
     // Encode the reexports of this module, if this module is public.
-    if vis == hir::Public {
+    if *vis == hir::Public {
         debug!("(encoding info for module) encoding reexports for {}", id);
         encode_reexports(ecx, rbml_w, id);
     }
@@ -424,21 +393,31 @@ fn encode_info_for_mod(ecx: &EncodeContext,
 }
 
 fn encode_struct_field_family(rbml_w: &mut Encoder,
-                              visibility: hir::Visibility) {
-    encode_family(rbml_w, match visibility {
-        hir::Public => 'g',
-        hir::Inherited => 'N'
-    });
+                              visibility: ty::Visibility) {
+    encode_family(rbml_w, if visibility.is_public() { 'g' } else { 'N' });
 }
 
-fn encode_visibility(rbml_w: &mut Encoder, visibility: hir::Visibility) {
-    let ch = match visibility {
-        hir::Public => 'y',
-        hir::Inherited => 'i',
-    };
+fn encode_visibility<T: HasVisibility>(rbml_w: &mut Encoder, visibility: T) {
+    let ch = if visibility.is_public() { 'y' } else { 'i' };
     rbml_w.wr_tagged_u8(tag_items_data_item_visibility, ch as u8);
 }
 
+trait HasVisibility: Sized {
+    fn is_public(self) -> bool;
+}
+
+impl<'a> HasVisibility for &'a hir::Visibility {
+    fn is_public(self) -> bool {
+        *self == hir::Public
+    }
+}
+
+impl HasVisibility for ty::Visibility {
+    fn is_public(self) -> bool {
+        self == ty::Visibility::Public
+    }
+}
+
 fn encode_constness(rbml_w: &mut Encoder, constness: hir::Constness) {
     rbml_w.start_tag(tag_items_data_item_constness);
     let ch = match constness {
@@ -449,6 +428,14 @@ fn encode_constness(rbml_w: &mut Encoder, constness: hir::Constness) {
     rbml_w.end_tag();
 }
 
+fn encode_defaultness(rbml_w: &mut Encoder, defaultness: hir::Defaultness) {
+    let ch = match defaultness {
+        hir::Defaultness::Default => 'd',
+        hir::Defaultness::Final => 'f',
+    };
+    rbml_w.wr_tagged_u8(tag_items_data_item_defaultness, ch as u8);
+}
+
 fn encode_explicit_self(rbml_w: &mut Encoder,
                         explicit_self: &ty::ExplicitSelfCategory) {
     let tag = tag_item_trait_method_explicit_self;
@@ -525,7 +512,6 @@ fn encode_info_for_struct_ctor<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
     });
     encode_bounds_and_type_for_item(rbml_w, ecx, index, ctor_id);
     encode_name(rbml_w, name);
-    ecx.tcx.map.with_path(ctor_id, |path| encode_path(rbml_w, path));
     encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(struct_id));
 
     if ecx.item_symbols.borrow().contains_key(&ctor_id) {
@@ -641,7 +627,6 @@ fn encode_info_for_associated_const<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                               rbml_w: &mut Encoder,
                                               index: &mut CrateIndex<'tcx>,
                                               associated_const: &ty::AssociatedConst,
-                                              impl_path: PathElems,
                                               parent_id: NodeId,
                                               impl_item_opt: Option<&hir::ImplItem>) {
     debug!("encode_info_for_associated_const({:?},{:?})",
@@ -667,15 +652,14 @@ fn encode_info_for_associated_const<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
     encode_stability(rbml_w, stab);
     encode_deprecation(rbml_w, depr);
 
-    let elem = ast_map::PathName(associated_const.name);
-    encode_path(rbml_w, impl_path.chain(Some(elem)));
-
     if let Some(ii) = impl_item_opt {
         encode_attributes(rbml_w, &ii.attrs);
+        encode_defaultness(rbml_w, ii.defaultness);
         encode_inlined_item(ecx,
                             rbml_w,
                             InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id),
                                                      ii));
+        encode_mir(ecx, rbml_w, ii.id);
     }
 
     rbml_w.end_tag();
@@ -685,7 +669,6 @@ fn encode_info_for_method<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                     rbml_w: &mut Encoder,
                                     index: &mut CrateIndex<'tcx>,
                                     m: &ty::Method<'tcx>,
-                                    impl_path: PathElems,
                                     is_default_impl: bool,
                                     parent_id: NodeId,
                                     impl_item_opt: Option<&hir::ImplItem>) {
@@ -707,8 +690,6 @@ fn encode_info_for_method<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
     let m_node_id = ecx.local_id(m.def_id);
     encode_bounds_and_type_for_item(rbml_w, ecx, index, m_node_id);
 
-    let elem = ast_map::PathName(m.name);
-    encode_path(rbml_w, impl_path.chain(Some(elem)));
     if let Some(impl_item) = impl_item_opt {
         if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node {
             encode_attributes(rbml_w, &impl_item.attrs);
@@ -721,8 +702,10 @@ fn encode_info_for_method<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                     rbml_w,
                                     InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id),
                                                              impl_item));
+                encode_mir(ecx, rbml_w, impl_item.id);
             }
             encode_constness(rbml_w, sig.constness);
+            encode_defaultness(rbml_w, impl_item.defaultness);
             if !any_types {
                 let m_id = ecx.local_id(m.def_id);
                 encode_symbol(ecx, rbml_w, m_id);
@@ -738,7 +721,6 @@ fn encode_info_for_associated_type<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                              rbml_w: &mut Encoder,
                                              index: &mut CrateIndex<'tcx>,
                                              associated_type: &ty::AssociatedType<'tcx>,
-                                             impl_path: PathElems,
                                              parent_id: NodeId,
                                              impl_item_opt: Option<&hir::ImplItem>) {
     debug!("encode_info_for_associated_type({:?},{:?})",
@@ -760,11 +742,9 @@ fn encode_info_for_associated_type<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
     encode_stability(rbml_w, stab);
     encode_deprecation(rbml_w, depr);
 
-    let elem = ast_map::PathName(associated_type.name);
-    encode_path(rbml_w, impl_path.chain(Some(elem)));
-
     if let Some(ii) = impl_item_opt {
         encode_attributes(rbml_w, &ii.attrs);
+        encode_defaultness(rbml_w, ii.defaultness);
     } else {
         encode_predicates(rbml_w, ecx, index,
                           &ecx.tcx.lookup_predicates(associated_type.def_id),
@@ -806,23 +786,6 @@ fn encode_repr_attrs(rbml_w: &mut Encoder,
     rbml_w.end_tag();
 }
 
-fn encode_inlined_item(ecx: &EncodeContext,
-                       rbml_w: &mut Encoder,
-                       ii: InlinedItemRef) {
-    let mut eii = ecx.encode_inlined_item.borrow_mut();
-    let eii: &mut EncodeInlinedItem = &mut *eii;
-    eii(ecx, rbml_w, ii);
-
-    let node_id = match ii {
-        InlinedItemRef::Item(item) => item.id,
-        InlinedItemRef::TraitItem(_, trait_item) => trait_item.id,
-        InlinedItemRef::ImplItem(_, impl_item) => impl_item.id,
-        InlinedItemRef::Foreign(foreign_item) => foreign_item.id
-    };
-
-    encode_mir(ecx, rbml_w, node_id);
-}
-
 fn encode_mir(ecx: &EncodeContext, rbml_w: &mut Encoder, node_id: NodeId) {
     if let Some(mir) = ecx.mir_map.map.get(&node_id) {
         rbml_w.start_tag(tag_mir as usize);
@@ -871,6 +834,12 @@ fn encode_deprecation(rbml_w: &mut Encoder, depr_opt: Option<attr::Deprecation>)
     });
 }
 
+fn encode_parent_impl(rbml_w: &mut Encoder, parent_opt: Option<DefId>) {
+    parent_opt.map(|parent| {
+        rbml_w.wr_tagged_u64(tag_items_data_parent_impl, def_to_u64(parent));
+    });
+}
+
 fn encode_xrefs<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                           rbml_w: &mut Encoder,
                           xrefs: FnvHashMap<XRef<'tcx>, u32>)
@@ -896,14 +865,13 @@ fn encode_xrefs<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
 fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                   rbml_w: &mut Encoder,
                                   item: &hir::Item,
-                                  index: &mut CrateIndex<'tcx>,
-                                  path: PathElems,
-                                  vis: hir::Visibility) {
+                                  index: &mut CrateIndex<'tcx>) {
     let tcx = ecx.tcx;
 
     debug!("encoding info for item at {}",
            tcx.sess.codemap().span_to_string(item.span));
 
+    let vis = &item.vis;
     let def_id = ecx.tcx.map.local_def_id(item.id);
     let stab = stability::lookup_stability(tcx, ecx.tcx.map.local_def_id(item.id));
     let depr = stability::lookup_deprecation(tcx, ecx.tcx.map.local_def_id(item.id));
@@ -921,7 +889,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id);
         encode_symbol(ecx, rbml_w, item.id);
         encode_name(rbml_w, item.name);
-        encode_path(rbml_w, path);
         encode_visibility(rbml_w, vis);
         encode_stability(rbml_w, stab);
         encode_deprecation(rbml_w, depr);
@@ -935,9 +902,9 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         encode_family(rbml_w, 'C');
         encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id);
         encode_name(rbml_w, item.name);
-        encode_path(rbml_w, path);
         encode_attributes(rbml_w, &item.attrs);
         encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item));
+        encode_mir(ecx, rbml_w, item.id);
         encode_visibility(rbml_w, vis);
         encode_stability(rbml_w, stab);
         encode_deprecation(rbml_w, depr);
@@ -951,11 +918,11 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         let tps_len = generics.ty_params.len();
         encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id);
         encode_name(rbml_w, item.name);
-        encode_path(rbml_w, path);
         encode_attributes(rbml_w, &item.attrs);
         let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs);
         if needs_inline || constness == hir::Constness::Const {
             encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item));
+            encode_mir(ecx, rbml_w, item.id);
         }
         if tps_len == 0 {
             encode_symbol(ecx, rbml_w, item.id);
@@ -974,9 +941,8 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                             m,
                             &item.attrs,
                             item.id,
-                            path,
                             item.name,
-                            item.vis);
+                            &item.vis);
       }
       hir::ItemForeignMod(ref fm) => {
         index.record(def_id, rbml_w);
@@ -984,7 +950,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         encode_def_id_and_key(ecx, rbml_w, def_id);
         encode_family(rbml_w, 'n');
         encode_name(rbml_w, item.name);
-        encode_path(rbml_w, path);
 
         // Encode all the items in this module.
         for foreign_item in &fm.items {
@@ -1003,7 +968,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         encode_family(rbml_w, 'y');
         encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id);
         encode_name(rbml_w, item.name);
-        encode_path(rbml_w, path);
         encode_visibility(rbml_w, vis);
         encode_stability(rbml_w, stab);
         encode_deprecation(rbml_w, depr);
@@ -1024,7 +988,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
             encode_variant_id(rbml_w, ecx.tcx.map.local_def_id(v.node.data.id()));
         }
         encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item));
-        encode_path(rbml_w, path);
+        encode_mir(ecx, rbml_w, item.id);
 
         // Encode inherent implementations for this enumeration.
         encode_inherent_implementations(ecx, rbml_w, def_id);
@@ -1036,7 +1000,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
 
         encode_enum_variant_info(ecx,
                                  rbml_w,
-                                 item.id,
+                                 def_id,
                                  vis,
                                  index);
       }
@@ -1060,7 +1024,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         encode_item_variances(rbml_w, ecx, item.id);
         encode_name(rbml_w, item.name);
         encode_attributes(rbml_w, &item.attrs);
-        encode_path(rbml_w, path.clone());
         encode_stability(rbml_w, stab);
         encode_deprecation(rbml_w, depr);
         encode_visibility(rbml_w, vis);
@@ -1072,6 +1035,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         encode_struct_fields(rbml_w, variant);
 
         encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item));
+        encode_mir(ecx, rbml_w, item.id);
 
         // Encode inherent implementations for this structure.
         encode_inherent_implementations(ecx, rbml_w, def_id);
@@ -1148,10 +1112,20 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
             }
             rbml_w.end_tag();
         }
-        if let Some(trait_ref) = tcx.impl_trait_ref(ecx.tcx.map.local_def_id(item.id)) {
+        let did = ecx.tcx.map.local_def_id(item.id);
+        if let Some(trait_ref) = tcx.impl_trait_ref(did) {
             encode_trait_ref(rbml_w, ecx, trait_ref, tag_item_trait_ref);
+
+            let trait_def = tcx.lookup_trait_def(trait_ref.def_id);
+            let parent = trait_def.ancestors(did)
+                .skip(1)
+                .next()
+                .and_then(|node| match node {
+                    specialization_graph::Node::Impl(parent) => Some(parent),
+                    _ => None,
+                });
+            encode_parent_impl(rbml_w, parent);
         }
-        encode_path(rbml_w, path.clone());
         encode_stability(rbml_w, stab);
         encode_deprecation(rbml_w, depr);
         rbml_w.end_tag();
@@ -1174,7 +1148,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                                      rbml_w,
                                                      index,
                                                      &associated_const,
-                                                     path.clone(),
                                                      item.id,
                                                      ast_item)
                 }
@@ -1183,7 +1156,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                            rbml_w,
                                            index,
                                            &method_type,
-                                           path.clone(),
                                            false,
                                            item.id,
                                            ast_item)
@@ -1193,7 +1165,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                                     rbml_w,
                                                     index,
                                                     &associated_type,
-                                                    path.clone(),
                                                     item.id,
                                                     ast_item)
                 }
@@ -1245,7 +1216,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
             rbml_w.wr_tagged_u64(tag_mod_child,
                                  def_to_u64(method_def_id.def_id()));
         }
-        encode_path(rbml_w, path.clone());
 
         // Encode inherent implementations for this trait.
         encode_inherent_implementations(ecx, rbml_w, def_id);
@@ -1276,10 +1246,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                     encode_def_id_and_key(ecx, rbml_w, associated_const.def_id);
                     encode_visibility(rbml_w, associated_const.vis);
 
-                    let elem = ast_map::PathName(associated_const.name);
-                    encode_path(rbml_w,
-                                path.clone().chain(Some(elem)));
-
                     encode_family(rbml_w, 'C');
 
                     encode_bounds_and_type_for_item(rbml_w, ecx, index,
@@ -1292,10 +1258,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
 
                     encode_method_ty_fields(ecx, rbml_w, index, &method_ty);
 
-                    let elem = ast_map::PathName(method_ty.name);
-                    encode_path(rbml_w,
-                                path.clone().chain(Some(elem)));
-
                     match method_ty.explicit_self {
                         ty::ExplicitSelfCategory::Static => {
                             encode_family(rbml_w,
@@ -1315,11 +1277,6 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                 ty::TypeTraitItem(associated_type) => {
                     encode_name(rbml_w, associated_type.name);
                     encode_def_id_and_key(ecx, rbml_w, associated_type.def_id);
-
-                    let elem = ast_map::PathName(associated_type.name);
-                    encode_path(rbml_w,
-                                path.clone().chain(Some(elem)));
-
                     encode_item_sort(rbml_w, 't');
                     encode_family(rbml_w, 'y');
 
@@ -1343,6 +1300,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
 
                     encode_inlined_item(ecx, rbml_w,
                                         InlinedItemRef::TraitItem(def_id, trait_item));
+                    encode_mir(ecx, rbml_w, trait_item.id);
                 }
                 hir::MethodTraitItem(ref sig, ref body) => {
                     // If this is a static method, we've already
@@ -1358,6 +1316,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                         encode_item_sort(rbml_w, 'p');
                         encode_inlined_item(ecx, rbml_w,
                                             InlinedItemRef::TraitItem(def_id, trait_item));
+                        encode_mir(ecx, rbml_w, trait_item.id);
                     } else {
                         encode_item_sort(rbml_w, 'r');
                     }
@@ -1379,15 +1338,15 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
 fn encode_info_for_foreign_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                                           rbml_w: &mut Encoder,
                                           nitem: &hir::ForeignItem,
-                                          index: &mut CrateIndex<'tcx>,
-                                          path: PathElems,
-                                          abi: Abi) {
+                                          index: &mut CrateIndex<'tcx>) {
+    debug!("writing foreign item {}", ecx.tcx.node_path_str(nitem.id));
     let def_id = ecx.tcx.map.local_def_id(nitem.id);
+    let abi = ecx.tcx.map.get_foreign_abi(nitem.id);
 
     index.record(def_id, rbml_w);
     rbml_w.start_tag(tag_items_data_item);
     encode_def_id_and_key(ecx, rbml_w, def_id);
-    encode_visibility(rbml_w, nitem.vis);
+    encode_visibility(rbml_w, &nitem.vis);
     match nitem.node {
       hir::ForeignItemFn(ref fndecl, _) => {
         encode_family(rbml_w, FN_FAMILY);
@@ -1395,13 +1354,15 @@ fn encode_info_for_foreign_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         encode_name(rbml_w, nitem.name);
         if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
             encode_inlined_item(ecx, rbml_w, InlinedItemRef::Foreign(nitem));
+            encode_mir(ecx, rbml_w, nitem.id);
+        } else {
+            encode_symbol(ecx, rbml_w, nitem.id);
         }
         encode_attributes(rbml_w, &nitem.attrs);
         let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id));
         let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id));
         encode_stability(rbml_w, stab);
         encode_deprecation(rbml_w, depr);
-        encode_symbol(ecx, rbml_w, nitem.id);
         encode_method_argument_names(rbml_w, &fndecl);
       }
       hir::ForeignItemStatic(_, mutbl) => {
@@ -1420,7 +1381,6 @@ fn encode_info_for_foreign_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
         encode_name(rbml_w, nitem.name);
       }
     }
-    encode_path(rbml_w, path);
     rbml_w.end_tag();
 }
 
@@ -1445,8 +1405,6 @@ fn my_visit_expr(expr: &hir::Expr,
             ecx.tcx.closure_kind(def_id).encode(rbml_w).unwrap();
             rbml_w.end_tag();
 
-            ecx.tcx.map.with_path(expr.id, |path| encode_path(rbml_w, path));
-
             assert!(ecx.mir_map.map.contains_key(&expr.id));
             encode_mir(ecx, rbml_w, expr.id);
 
@@ -1456,31 +1414,6 @@ fn my_visit_expr(expr: &hir::Expr,
     }
 }
 
-fn my_visit_item<'a, 'tcx>(i: &hir::Item,
-                           rbml_w: &mut Encoder,
-                           ecx: &EncodeContext<'a, 'tcx>,
-                           index: &mut CrateIndex<'tcx>) {
-    ecx.tcx.map.with_path(i.id, |path| {
-        encode_info_for_item(ecx, rbml_w, i, index, path, i.vis);
-    });
-}
-
-fn my_visit_foreign_item<'a, 'tcx>(ni: &hir::ForeignItem,
-                                   rbml_w: &mut Encoder,
-                                   ecx: &EncodeContext<'a, 'tcx>,
-                                   index: &mut CrateIndex<'tcx>) {
-    debug!("writing foreign item {}::{}",
-            ecx.tcx.map.path_to_string(ni.id),
-            ni.name);
-
-    let abi = ecx.tcx.map.get_foreign_abi(ni.id);
-    ecx.tcx.map.with_path(ni.id, |path| {
-        encode_info_for_foreign_item(ecx, rbml_w,
-                                     ni, index,
-                                     path, abi);
-    });
-}
-
 struct EncodeVisitor<'a, 'b:'a, 'c:'a, 'tcx:'c> {
     rbml_w_for_visit_item: &'a mut Encoder<'b>,
     ecx: &'a EncodeContext<'c,'tcx>,
@@ -1494,11 +1427,11 @@ impl<'a, 'b, 'c, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'c, 'tcx> {
     }
     fn visit_item(&mut self, i: &'tcx hir::Item) {
         intravisit::walk_item(self, i);
-        my_visit_item(i, self.rbml_w_for_visit_item, self.ecx, self.index);
+        encode_info_for_item(self.ecx, self.rbml_w_for_visit_item, i, self.index);
     }
     fn visit_foreign_item(&mut self, ni: &'tcx hir::ForeignItem) {
         intravisit::walk_foreign_item(self, ni);
-        my_visit_foreign_item(ni, self.rbml_w_for_visit_item, self.ecx, self.index);
+        encode_info_for_foreign_item(self.ecx, self.rbml_w_for_visit_item, ni, self.index);
     }
 }
 
@@ -1519,9 +1452,8 @@ fn encode_info_for_items<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
                         &krate.module,
                         &[],
                         CRATE_NODE_ID,
-                        [].iter().cloned().chain(LinkedPath::empty()),
                         syntax::parse::token::intern(&ecx.link_meta.crate_name),
-                        hir::Public);
+                        &hir::Public);
 
     krate.visit_all_items(&mut EncodeVisitor {
         index: &mut index,
@@ -1752,9 +1684,9 @@ fn encode_struct_field_attrs(ecx: &EncodeContext,
     impl<'a, 'b, 'c, 'tcx, 'v> Visitor<'v> for StructFieldVisitor<'a, 'b, 'c, 'tcx> {
         fn visit_struct_field(&mut self, field: &hir::StructField) {
             self.rbml_w.start_tag(tag_struct_field);
-            let def_id = self.ecx.tcx.map.local_def_id(field.node.id);
+            let def_id = self.ecx.tcx.map.local_def_id(field.id);
             encode_def_id(self.rbml_w, def_id);
-            encode_attributes(self.rbml_w, &field.node.attrs);
+            encode_attributes(self.rbml_w, &field.attrs);
             self.rbml_w.end_tag();
         }
     }
@@ -1767,7 +1699,7 @@ fn encode_struct_field_attrs(ecx: &EncodeContext,
 
 
 struct ImplVisitor<'a, 'tcx:'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     impls: FnvHashMap<DefId, Vec<DefId>>
 }
 
@@ -1868,6 +1800,10 @@ fn encode_crate_name(rbml_w: &mut Encoder, crate_name: &str) {
     rbml_w.wr_tagged_str(tag_crate_crate_name, crate_name);
 }
 
+fn encode_crate_disambiguator(rbml_w: &mut Encoder, crate_disambiguator: &str) {
+    rbml_w.wr_tagged_str(tag_crate_disambiguator, crate_disambiguator);
+}
+
 fn encode_crate_triple(rbml_w: &mut Encoder, triple: &str) {
     rbml_w.wr_tagged_str(tag_crate_triple, triple);
 }
@@ -1897,32 +1833,7 @@ fn encode_dylib_dependency_formats(rbml_w: &mut Encoder, ecx: &EncodeContext) {
 #[allow(non_upper_case_globals)]
 pub const metadata_encoding_version : &'static [u8] = &[b'r', b'u', b's', b't', 0, 0, 0, 2 ];
 
-pub fn encode_metadata(parms: EncodeParams, krate: &hir::Crate) -> Vec<u8> {
-    let EncodeParams {
-        item_symbols,
-        diag,
-        tcx,
-        reexports,
-        cstore,
-        encode_inlined_item,
-        link_meta,
-        reachable,
-        mir_map,
-        ..
-    } = parms;
-    let ecx = EncodeContext {
-        diag: diag,
-        tcx: tcx,
-        reexports: reexports,
-        item_symbols: item_symbols,
-        link_meta: link_meta,
-        cstore: cstore,
-        encode_inlined_item: RefCell::new(encode_inlined_item),
-        type_abbrevs: RefCell::new(FnvHashMap()),
-        reachable: reachable,
-        mir_map: mir_map,
-    };
-
+pub fn encode_metadata(ecx: EncodeContext, krate: &hir::Crate) -> Vec<u8> {
     let mut wr = Cursor::new(Vec::new());
 
     {
@@ -2003,6 +1914,7 @@ fn encode_metadata_inner(rbml_w: &mut Encoder,
     encode_crate_name(rbml_w, &ecx.link_meta.crate_name);
     encode_crate_triple(rbml_w, &ecx.tcx.sess.opts.target_triple);
     encode_hash(rbml_w, &ecx.link_meta.crate_hash);
+    encode_crate_disambiguator(rbml_w, &ecx.tcx.sess.crate_disambiguator.get().as_str());
     encode_dylib_dependency_formats(rbml_w, &ecx);
 
     let mut i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap();
@@ -2094,11 +2006,14 @@ fn encode_metadata_inner(rbml_w: &mut Encoder,
 }
 
 // Get the encoded string for a type
-pub fn encoded_ty<'tcx>(tcx: &ty::ctxt<'tcx>, t: Ty<'tcx>) -> Vec<u8> {
+pub fn encoded_ty<'tcx>(tcx: &TyCtxt<'tcx>,
+                        t: Ty<'tcx>,
+                        def_id_to_string: fn(&TyCtxt<'tcx>, DefId) -> String)
+                        -> Vec<u8> {
     let mut wr = Cursor::new(Vec::new());
     tyencode::enc_ty(&mut wr, &tyencode::ctxt {
         diag: tcx.sess.diagnostic(),
-        ds: def_to_string,
+        ds: def_id_to_string,
         tcx: tcx,
         abbrevs: &RefCell::new(FnvHashMap())
     }, t);
index 60bbdaddd751607c7e75c1ffeade30db3d440852..9c066f9f5f1f1ad688e7237baee70d5c47d48340 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use middle::def_id::{DefId, DefIndex};
+use rustc::hir::def_id::{DefId, DefIndex};
 use rbml;
 use std::io::{Cursor, Write};
 use std::slice;
index 5386c5b77c2121255ef4f7f0fe1a5b3d4fdfb479..139462d41bf1fd670183e4153a1617b70a44af31 100644 (file)
@@ -23,6 +23,7 @@
 #![feature(rustc_diagnostic_macros)]
 #![feature(rustc_private)]
 #![feature(staged_api)]
+#![feature(question_mark)]
 
 #[macro_use] extern crate log;
 #[macro_use] extern crate syntax;
@@ -32,10 +33,11 @@ extern crate flate;
 extern crate rbml;
 extern crate serialize;
 
+#[macro_use]
 extern crate rustc;
 extern crate rustc_back;
-extern crate rustc_front;
 extern crate rustc_llvm;
+extern crate rustc_const_math;
 
 pub use rustc::middle;
 
index 07a736d9959c853144e32c1abb97c3075a92338d..c7cd8ae2dd7f32372cb73ab374221a88eecf32cf 100644 (file)
@@ -216,7 +216,7 @@ use cstore::{MetadataBlob, MetadataVec, MetadataArchive};
 use decoder;
 use encoder;
 
-use rustc::back::svh::Svh;
+use rustc::hir::svh::Svh;
 use rustc::session::Session;
 use rustc::session::filesearch::{FileSearch, FileMatches, FileDoesntMatch};
 use rustc::session::search_paths::PathKind;
@@ -300,16 +300,10 @@ impl<'a> Context<'a> {
     }
 
     pub fn load_library_crate(&mut self) -> Library {
-        match self.find_library_crate() {
-            Some(t) => t,
-            None => {
-                self.report_load_errs();
-                unreachable!()
-            }
-        }
+        self.find_library_crate().unwrap_or_else(|| self.report_load_errs())
     }
 
-    pub fn report_load_errs(&mut self) {
+    pub fn report_load_errs(&mut self) -> ! {
         let add = match self.root {
             &None => String::new(),
             &Some(ref r) => format!(" which `{}` depends on",
@@ -374,6 +368,7 @@ impl<'a> Context<'a> {
 
         err.emit();
         self.sess.abort_if_errors();
+        unreachable!();
     }
 
     fn find_library_crate(&mut self) -> Option<Library> {
index 102bcc10face13c51e98f17c2438b52f6c4c021e..911ca7e315c1f0147cc21110409495793d5a1dd1 100644 (file)
@@ -32,11 +32,11 @@ struct MacroLoader<'a> {
 }
 
 impl<'a> MacroLoader<'a> {
-    fn new(sess: &'a Session, cstore: &'a CStore) -> MacroLoader<'a> {
+    fn new(sess: &'a Session, cstore: &'a CStore, crate_name: &str) -> MacroLoader<'a> {
         MacroLoader {
             sess: sess,
             span_whitelist: HashSet::new(),
-            reader: CrateReader::new(sess, cstore),
+            reader: CrateReader::new(sess, cstore, crate_name),
             macros: vec![],
         }
     }
@@ -47,10 +47,13 @@ pub fn call_bad_macro_reexport(a: &Session, b: Span) {
 }
 
 /// Read exported macros.
-pub fn read_macro_defs(sess: &Session, cstore: &CStore, krate: &ast::Crate)
+pub fn read_macro_defs(sess: &Session,
+                       cstore: &CStore,
+                       krate: &ast::Crate,
+                       crate_name: &str)
                        -> Vec<ast::MacroDef>
 {
-    let mut loader = MacroLoader::new(sess, cstore);
+    let mut loader = MacroLoader::new(sess, cstore, crate_name);
 
     // We need to error on `#[macro_use] extern crate` when it isn't at the
     // crate root, because `$crate` won't work properly. Identify these by
index 37e661c21e15a1383f12fe24b60fd93c3649b4bd..782c7cba26ec59f472a7a4ec9ab88482a55d1c29 100644 (file)
@@ -14,9 +14,9 @@
 use rbml::opaque::Encoder as OpaqueEncoder;
 use rbml::opaque::Decoder as OpaqueDecoder;
 use rustc::middle::cstore::tls;
-use rustc::middle::def_id::DefId;
-use rustc::middle::subst::Substs;
-use rustc::middle::ty;
+use rustc::hir::def_id::DefId;
+use rustc::ty::subst::Substs;
+use rustc::ty::{self, TyCtxt};
 
 use decoder::{self, Cmd};
 use encoder;
@@ -25,7 +25,7 @@ use tyencode;
 
 impl<'a, 'tcx: 'a> tls::EncodingContext<'tcx> for encoder::EncodeContext<'a, 'tcx> {
 
-    fn tcx<'s>(&'s self) -> &'s ty::ctxt<'tcx> {
+    fn tcx<'s>(&'s self) -> &'s TyCtxt<'tcx> {
         &self.tcx
     }
 
@@ -40,12 +40,12 @@ impl<'a, 'tcx: 'a> tls::EncodingContext<'tcx> for encoder::EncodeContext<'a, 'tc
 
 pub struct DecodingContext<'a, 'tcx: 'a> {
     pub crate_metadata: Cmd<'a>,
-    pub tcx: &'a ty::ctxt<'tcx>,
+    pub tcx: &'a TyCtxt<'tcx>,
 }
 
 impl<'a, 'tcx: 'a> tls::DecodingContext<'tcx> for DecodingContext<'a, 'tcx> {
 
-    fn tcx<'s>(&'s self) -> &'s ty::ctxt<'tcx> {
+    fn tcx<'s>(&'s self) -> &'s TyCtxt<'tcx> {
         &self.tcx
     }
 
index 5a48d6019d69937b455ea860d760ed43e10e048f..3004246d1d7e7e9bdf279fd7b529e6b7a462c49e 100644 (file)
 
 #![allow(non_camel_case_types)]
 
-use rustc_front::hir;
+use rustc::hir;
 
-use middle::def_id::{DefId, DefIndex};
+use rustc::hir::def_id::{DefId, DefIndex};
 use middle::region;
-use middle::subst;
-use middle::subst::VecPerParamSpace;
-use middle::ty::{self, ToPredicate, Ty, TypeFoldable};
+use rustc::ty::subst;
+use rustc::ty::subst::VecPerParamSpace;
+use rustc::ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable};
 
 use rbml;
 use rbml::leb128;
@@ -41,12 +41,12 @@ pub struct TyDecoder<'a, 'tcx: 'a> {
     data: &'a [u8],
     krate: ast::CrateNum,
     pos: usize,
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     conv_def_id: DefIdConvert<'a>,
 }
 
 impl<'a,'tcx> TyDecoder<'a,'tcx> {
-    pub fn with_doc(tcx: &'a ty::ctxt<'tcx>,
+    pub fn with_doc(tcx: &'a TyCtxt<'tcx>,
                     crate_num: ast::CrateNum,
                     doc: rbml::Doc<'a>,
                     conv: DefIdConvert<'a>)
@@ -57,7 +57,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
     pub fn new(data: &'a [u8],
                crate_num: ast::CrateNum,
                pos: usize,
-               tcx: &'a ty::ctxt<'tcx>,
+               tcx: &'a TyCtxt<'tcx>,
                conv: DefIdConvert<'a>)
                -> TyDecoder<'a, 'tcx> {
         TyDecoder {
@@ -144,22 +144,11 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
     }
 
     pub fn parse_substs(&mut self) -> subst::Substs<'tcx> {
-        let regions = self.parse_region_substs();
+        let regions = self.parse_vec_per_param_space(|this| this.parse_region());
         let types = self.parse_vec_per_param_space(|this| this.parse_ty());
         subst::Substs { types: types, regions: regions }
     }
 
-    fn parse_region_substs(&mut self) -> subst::RegionSubsts {
-        match self.next() {
-            'e' => subst::ErasedRegions,
-            'n' => {
-                subst::NonerasedRegions(
-                    self.parse_vec_per_param_space(|this| this.parse_region()))
-            }
-            _ => panic!("parse_bound_region: bad input")
-        }
-    }
-
     fn parse_bound_region(&mut self) -> ty::BoundRegion {
         match self.next() {
             'a' => {
@@ -178,7 +167,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
                 ty::BrFresh(id)
             }
             'e' => ty::BrEnv,
-            _ => panic!("parse_bound_region: bad input")
+            _ => bug!("parse_bound_region: bad input")
         }
     }
 
@@ -225,7 +214,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
             'e' => {
                 ty::ReStatic
             }
-            _ => panic!("parse_region: bad input")
+            _ => bug!("parse_region: bad input")
         }
     }
 
@@ -277,7 +266,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
                 };
                 region::CodeExtentData::Remainder(block_remainder)
             }
-            _ => panic!("parse_scope: bad input")
+            _ => bug!("parse_scope: bad input")
         })
     }
 
@@ -287,7 +276,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
         match self.next() {
             'n' => None,
             's' => Some(f(self)),
-            _ => panic!("parse_opt: bad input")
+            _ => bug!("parse_opt: bad input")
         }
     }
 
@@ -326,7 +315,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
                     'D' => return tcx.types.i64,
                     'f' => return tcx.types.f32,
                     'F' => return tcx.types.f64,
-                    _ => panic!("parse_ty: bad numeric type")
+                    _ => bug!("parse_ty: bad numeric type")
                 }
             }
             'c' => return tcx.types.char,
@@ -380,10 +369,11 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
             }
             'F' => {
                 let def_id = self.parse_def();
-                return tcx.mk_fn(Some(def_id), tcx.mk_bare_fn(self.parse_bare_fn_ty()));
+                let substs = self.tcx.mk_substs(self.parse_substs());
+                return tcx.mk_fn_def(def_id, substs, self.parse_bare_fn_ty());
             }
             'G' => {
-                return tcx.mk_fn(None, tcx.mk_bare_fn(self.parse_bare_fn_ty()));
+                return tcx.mk_fn_ptr(self.parse_bare_fn_ty());
             }
             '#' => {
                 // This is a hacky little caching scheme. The idea is that if we encode
@@ -451,7 +441,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
             'e' => {
                 return tcx.types.err;
             }
-            c => { panic!("unexpected char in type string: {}", c);}
+            c => { bug!("unexpected char in type string: {}", c);}
         }
     }
 
@@ -533,7 +523,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
         let variadic = match self.next() {
             'V' => true,
             'N' => false,
-            r => panic!(format!("bad variadic: {}", r)),
+            r => bug!("bad variadic: {}", r),
         };
         let output = match self.peek() {
             'z' => {
@@ -563,7 +553,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
                 assert_eq!(self.next(), '|');
                 ty::Predicate::ObjectSafe(def_id)
             }
-            c => panic!("Encountered invalid character in metadata: {}", c)
+            c => bug!("Encountered invalid character in metadata: {}", c)
         }
     }
 
@@ -612,7 +602,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
                 'R' => bounds.push(self.parse_region()),
                 '.' => { break; }
                 c => {
-                    panic!("parse_region_param_def: bad bounds ('{}')", c)
+                    bug!("parse_region_param_def: bad bounds ('{}')", c)
                 }
             }
         }
@@ -634,7 +624,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
                 let region = self.parse_region();
                 ty::ObjectLifetimeDefault::Specific(region)
             }
-            _ => panic!("parse_object_lifetime_default: bad input")
+            _ => bug!("parse_object_lifetime_default: bad input")
         }
     }
 
@@ -650,7 +640,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
                 }
                 '.' => { break; }
                 c => {
-                    panic!("parse_bounds: bad bounds ('{}')", c)
+                    bug!("parse_bounds: bad bounds ('{}')", c)
                 }
             }
         }
@@ -679,7 +669,7 @@ impl<'a,'tcx> TyDecoder<'a,'tcx> {
                     return builtin_bounds;
                 }
                 c => {
-                    panic!("parse_bounds: bad builtin bounds ('{}')", c)
+                    bug!("parse_bounds: bad builtin bounds ('{}')", c)
                 }
             }
         }
@@ -693,7 +683,7 @@ fn parse_defid(buf: &[u8]) -> DefId {
     while colon_idx < len && buf[colon_idx] != ':' as u8 { colon_idx += 1; }
     if colon_idx == len {
         error!("didn't find ':' when parsing def id");
-        panic!();
+        bug!();
     }
 
     let crate_part = &buf[0..colon_idx];
@@ -703,14 +693,14 @@ fn parse_defid(buf: &[u8]) -> DefId {
         s.parse::<usize>().ok()
     }) {
         Some(cn) => cn as ast::CrateNum,
-        None => panic!("internal error: parse_defid: crate number expected, found {:?}",
+        None => bug!("internal error: parse_defid: crate number expected, found {:?}",
                        crate_part)
     };
     let def_num = match str::from_utf8(def_part).ok().and_then(|s| {
         s.parse::<usize>().ok()
     }) {
         Some(dn) => dn,
-        None => panic!("internal error: parse_defid: id expected, found {:?}",
+        None => bug!("internal error: parse_defid: id expected, found {:?}",
                        def_part)
     };
     let index = DefIndex::new(def_num);
@@ -721,6 +711,6 @@ fn parse_unsafety(c: char) -> hir::Unsafety {
     match c {
         'u' => hir::Unsafety::Unsafe,
         'n' => hir::Unsafety::Normal,
-        _ => panic!("parse_unsafety: bad unsafety {}", c)
+        _ => bug!("parse_unsafety: bad unsafety {}", c)
     }
 }
index b41ff5977fef154fc76d482b8ff7b4f0862301d8..f49c2e22c6a804cc97a7bbf9abcac3fc1f9d21bf 100644 (file)
@@ -17,15 +17,15 @@ use std::cell::RefCell;
 use std::io::Cursor;
 use std::io::prelude::*;
 
-use middle::def_id::DefId;
+use rustc::hir::def_id::DefId;
 use middle::region;
-use middle::subst;
-use middle::subst::VecPerParamSpace;
-use middle::ty::ParamTy;
-use middle::ty::{self, Ty};
+use rustc::ty::subst;
+use rustc::ty::subst::VecPerParamSpace;
+use rustc::ty::ParamTy;
+use rustc::ty::{self, Ty, TyCtxt};
 use rustc::util::nodemap::FnvHashMap;
 
-use rustc_front::hir;
+use rustc::hir;
 
 use syntax::abi::Abi;
 use syntax::ast;
@@ -37,9 +37,9 @@ use encoder;
 pub struct ctxt<'a, 'tcx: 'a> {
     pub diag: &'a Handler,
     // Def -> str Callback:
-    pub ds: fn(DefId) -> String,
+    pub ds: fn(&TyCtxt<'tcx>, DefId) -> String,
     // The type context.
-    pub tcx: &'a ty::ctxt<'tcx>,
+    pub tcx: &'a TyCtxt<'tcx>,
     pub abbrevs: &'a abbrev_map<'tcx>
 }
 
@@ -99,7 +99,7 @@ pub fn enc_ty<'a, 'tcx>(w: &mut Cursor<Vec<u8>>, cx: &ctxt<'a, 'tcx>, t: Ty<'tcx
             };
         }
         ty::TyEnum(def, substs) => {
-            write!(w, "t[{}|", (cx.ds)(def.did));
+            write!(w, "t[{}|", (cx.ds)(cx.tcx, def.did));
             enc_substs(w, cx, substs);
             write!(w, "]");
         }
@@ -135,28 +135,29 @@ pub fn enc_ty<'a, 'tcx>(w: &mut Cursor<Vec<u8>>, cx: &ctxt<'a, 'tcx>, t: Ty<'tcx
         ty::TyStr => {
             write!(w, "v");
         }
-        ty::TyBareFn(Some(def_id), f) => {
+        ty::TyFnDef(def_id, substs, f) => {
             write!(w, "F");
-            write!(w, "{}|", (cx.ds)(def_id));
+            write!(w, "{}|", (cx.ds)(cx.tcx, def_id));
+            enc_substs(w, cx, substs);
             enc_bare_fn_ty(w, cx, f);
         }
-        ty::TyBareFn(None, f) => {
+        ty::TyFnPtr(f) => {
             write!(w, "G");
             enc_bare_fn_ty(w, cx, f);
         }
         ty::TyInfer(_) => {
-            cx.diag.bug("cannot encode inference variable types");
+            bug!("cannot encode inference variable types");
         }
         ty::TyParam(ParamTy {space, idx, name}) => {
             write!(w, "p[{}|{}|{}]", idx, space.to_uint(), name);
         }
         ty::TyStruct(def, substs) => {
-            write!(w, "a[{}|", (cx.ds)(def.did));
+            write!(w, "a[{}|", (cx.ds)(cx.tcx, def.did));
             enc_substs(w, cx, substs);
             write!(w, "]");
         }
         ty::TyClosure(def, ref substs) => {
-            write!(w, "k[{}|", (cx.ds)(def));
+            write!(w, "k[{}|", (cx.ds)(cx.tcx, def));
             enc_substs(w, cx, &substs.func_substs);
             for ty in &substs.upvar_tys {
                 enc_ty(w, cx, ty);
@@ -245,24 +246,12 @@ fn enc_vec_per_param_space<'a, 'tcx, T, F>(w: &mut Cursor<Vec<u8>>,
 
 pub fn enc_substs<'a, 'tcx>(w: &mut Cursor<Vec<u8>>, cx: &ctxt<'a, 'tcx>,
                             substs: &subst::Substs<'tcx>) {
-    enc_region_substs(w, cx, &substs.regions);
+    enc_vec_per_param_space(w, cx, &substs.regions,
+                            |w, cx, &r| enc_region(w, cx, r));
     enc_vec_per_param_space(w, cx, &substs.types,
                             |w, cx, &ty| enc_ty(w, cx, ty));
 }
 
-fn enc_region_substs(w: &mut Cursor<Vec<u8>>, cx: &ctxt, substs: &subst::RegionSubsts) {
-    match *substs {
-        subst::ErasedRegions => {
-            write!(w, "e");
-        }
-        subst::NonerasedRegions(ref regions) => {
-            write!(w, "n");
-            enc_vec_per_param_space(w, cx, regions,
-                                    |w, cx, &r| enc_region(w, cx, r));
-        }
-    }
-}
-
 pub fn enc_region(w: &mut Cursor<Vec<u8>>, cx: &ctxt, r: ty::Region) {
     match r {
         ty::ReLateBound(id, br) => {
@@ -296,7 +285,7 @@ pub fn enc_region(w: &mut Cursor<Vec<u8>>, cx: &ctxt, r: ty::Region) {
         }
         ty::ReVar(_) | ty::ReSkolemized(..) => {
             // these should not crop up after typeck
-            cx.diag.bug("cannot encode region variables");
+            bug!("cannot encode region variables");
         }
     }
 }
@@ -321,7 +310,7 @@ fn enc_bound_region(w: &mut Cursor<Vec<u8>>, cx: &ctxt, br: ty::BoundRegion) {
         }
         ty::BrNamed(d, name) => {
             write!(w, "[{}|{}]",
-                     (cx.ds)(d),
+                     (cx.ds)(cx.tcx, d),
                      name);
         }
         ty::BrFresh(id) => {
@@ -335,7 +324,7 @@ fn enc_bound_region(w: &mut Cursor<Vec<u8>>, cx: &ctxt, br: ty::BoundRegion) {
 
 pub fn enc_trait_ref<'a, 'tcx>(w: &mut Cursor<Vec<u8>>, cx: &ctxt<'a, 'tcx>,
                                s: ty::TraitRef<'tcx>) {
-    write!(w, "{}|", (cx.ds)(s.def_id));
+    write!(w, "{}|", (cx.ds)(cx.tcx, s.def_id));
     enc_substs(w, cx, s.substs);
 }
 
@@ -419,8 +408,8 @@ pub fn enc_existential_bounds<'a,'tcx>(w: &mut Cursor<Vec<u8>>,
 pub fn enc_type_param_def<'a, 'tcx>(w: &mut Cursor<Vec<u8>>, cx: &ctxt<'a, 'tcx>,
                                     v: &ty::TypeParameterDef<'tcx>) {
     write!(w, "{}:{}|{}|{}|{}|",
-             v.name, (cx.ds)(v.def_id),
-             v.space.to_uint(), v.index, (cx.ds)(v.default_def_id));
+             v.name, (cx.ds)(cx.tcx, v.def_id),
+             v.space.to_uint(), v.index, (cx.ds)(cx.tcx, v.default_def_id));
     enc_opt(w, v.default, |w, t| enc_ty(w, cx, t));
     enc_object_lifetime_default(w, cx, v.object_lifetime_default);
 }
@@ -428,7 +417,7 @@ pub fn enc_type_param_def<'a, 'tcx>(w: &mut Cursor<Vec<u8>>, cx: &ctxt<'a, 'tcx>
 pub fn enc_region_param_def(w: &mut Cursor<Vec<u8>>, cx: &ctxt,
                             v: &ty::RegionParameterDef) {
     write!(w, "{}:{}|{}|{}|",
-             v.name, (cx.ds)(v.def_id),
+             v.name, (cx.ds)(cx.tcx, v.def_id),
              v.space.to_uint(), v.index);
     for &r in &v.bounds {
         write!(w, "R");
@@ -488,7 +477,7 @@ pub fn enc_predicate<'a, 'tcx>(w: &mut Cursor<Vec<u8>>,
             enc_ty(w, cx, data);
         }
         ty::Predicate::ObjectSafe(trait_def_id) => {
-            write!(w, "O{}|", (cx.ds)(trait_def_id));
+            write!(w, "O{}|", (cx.ds)(cx.tcx, trait_def_id));
         }
     }
 }
index 93817ab0db61b782f7d0512e12430e8a69c277d6..1c41ca6f416e19dabda36e2884e68fb137772754 100644 (file)
@@ -13,6 +13,7 @@ graphviz = { path = "../libgraphviz" }
 log = { path = "../liblog" }
 rustc = { path = "../librustc" }
 rustc_back = { path = "../librustc_back" }
+rustc_const_eval = { path = "../librustc_const_eval" }
+rustc_const_math = { path = "../librustc_const_math" }
 rustc_data_structures = { path = "../librustc_data_structures" }
-rustc_front = { path = "../librustc_front" }
 syntax = { path = "../libsyntax" }
index 12b9130b48c612c9081914e2af66f6597a819951..8c98408e2390ae2cfb8998c4720681a765824396 100644 (file)
@@ -11,7 +11,7 @@
 use build::{BlockAnd, BlockAndExtension, Builder};
 use hair::*;
 use rustc::mir::repr::*;
-use rustc_front::hir;
+use rustc::hir;
 
 impl<'a,'tcx> Builder<'a,'tcx> {
     pub fn ast_block(&mut self,
@@ -20,15 +20,67 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                      ast_block: &'tcx hir::Block)
                      -> BlockAnd<()> {
         let Block { extent, span, stmts, expr } = self.hir.mirror(ast_block);
-        self.in_scope(extent, block, move |this| {
-            unpack!(block = this.stmts(block, stmts));
-            match expr {
-                Some(expr) => this.into(destination, block, expr),
-                None => {
-                    this.cfg.push_assign_unit(block, span, destination);
-                    block.unit()
+        self.in_scope(extent, block, move |this, _| {
+            // This convoluted structure is to avoid using recursion as we walk down a list
+            // of statements. Basically, the structure we get back is something like:
+            //
+            //    let x = <init> in {
+            //       expr1;
+            //       let y = <init> in {
+            //           expr2;
+            //           expr3;
+            //           ...
+            //       }
+            //    }
+            //
+            // The let bindings are valid till the end of block so all we have to do is to pop all
+            // the let-scopes at the end.
+            //
+            // First we build all the statements in the block.
+            let mut let_extent_stack = Vec::with_capacity(8);
+            for stmt in stmts {
+                let Stmt { span: _, kind } = this.hir.mirror(stmt);
+                match kind {
+                    StmtKind::Expr { scope, expr } => {
+                        unpack!(block = this.in_scope(scope, block, |this, _| {
+                            let expr = this.hir.mirror(expr);
+                            let expr_span = expr.span;
+                            let temp = this.temp(expr.ty.clone());
+                            unpack!(block = this.into(&temp, block, expr));
+                            unpack!(block = this.build_drop(block, expr_span, temp));
+                            block.unit()
+                        }));
+                    }
+                    StmtKind::Let { remainder_scope, init_scope, pattern, initializer } => {
+                        let remainder_scope_id = this.push_scope(remainder_scope, block);
+                        let_extent_stack.push(remainder_scope);
+                        unpack!(block = this.in_scope(init_scope, block, move |this, _| {
+                            // FIXME #30046                              ^~~~
+                            if let Some(init) = initializer {
+                                this.expr_into_pattern(block, remainder_scope_id, pattern, init)
+                            } else {
+                                this.declare_bindings(remainder_scope_id, &pattern);
+                                block.unit()
+                            }
+                        }));
+                    }
                 }
             }
+            // Then, the block may have an optional trailing expression which is a “return” value
+            // of the block.
+            if let Some(expr) = expr {
+                unpack!(block = this.into(destination, block, expr));
+            } else {
+                // FIXME(#31472)
+                let scope_id = this.innermost_scope_id();
+                this.cfg.push_assign_unit(block, scope_id, span, destination);
+            }
+            // Finally, we pop all the let scopes before exiting out from the scope of block
+            // itself.
+            for extent in let_extent_stack.into_iter().rev() {
+                unpack!(block = this.pop_scope(extent, block));
+            }
+            block.unit()
         })
     }
 }
index c7147d111aace02a0dddeb90e6e8107a629f700c..4859257f291c9767f694b9d4171ddcfeaabb0805 100644 (file)
@@ -13,7 +13,7 @@
 
 //! Routines for manipulating the control-flow graph.
 
-use build::CFG;
+use build::{CFG, Location};
 use rustc::mir::repr::*;
 use syntax::codemap::Span;
 
@@ -43,12 +43,19 @@ impl<'tcx> CFG<'tcx> {
         self.block_data_mut(block).statements.push(statement);
     }
 
+    pub fn current_location(&mut self, block: BasicBlock) -> Location {
+        let index = self.block_data(block).statements.len();
+        Location { block: block, statement_index: index }
+    }
+
     pub fn push_assign(&mut self,
                        block: BasicBlock,
+                       scope: ScopeId,
                        span: Span,
                        lvalue: &Lvalue<'tcx>,
                        rvalue: Rvalue<'tcx>) {
         self.push(block, Statement {
+            scope: scope,
             span: span,
             kind: StatementKind::Assign(lvalue.clone(), rvalue)
         });
@@ -56,26 +63,35 @@ impl<'tcx> CFG<'tcx> {
 
     pub fn push_assign_constant(&mut self,
                                 block: BasicBlock,
+                                scope: ScopeId,
                                 span: Span,
                                 temp: &Lvalue<'tcx>,
                                 constant: Constant<'tcx>) {
-        self.push_assign(block, span, temp, Rvalue::Use(Operand::Constant(constant)));
+        self.push_assign(block, scope, span, temp,
+                         Rvalue::Use(Operand::Constant(constant)));
     }
 
     pub fn push_assign_unit(&mut self,
                             block: BasicBlock,
+                            scope: ScopeId,
                             span: Span,
                             lvalue: &Lvalue<'tcx>) {
-        self.push_assign(block, span, lvalue, Rvalue::Aggregate(
+        self.push_assign(block, scope, span, lvalue, Rvalue::Aggregate(
             AggregateKind::Tuple, vec![]
         ));
     }
 
     pub fn terminate(&mut self,
                      block: BasicBlock,
-                     terminator: Terminator<'tcx>) {
+                     scope: ScopeId,
+                     span: Span,
+                     kind: TerminatorKind<'tcx>) {
         debug_assert!(self.block_data(block).terminator.is_none(),
                       "terminate: block {:?} already has a terminator set", block);
-        self.block_data_mut(block).terminator = Some(terminator);
+        self.block_data_mut(block).terminator = Some(Terminator {
+            span: span,
+            scope: scope,
+            kind: kind,
+        });
     }
 }
index 6f186b8ada426f5ed909861e83639f64e5cadc2a..d97245a5fc21bc6efb853c30e00198f84c58a5cd 100644 (file)
@@ -33,9 +33,10 @@ impl<'a,'tcx> Builder<'a,'tcx> {
             ExprKind::Literal { literal } =>
                 Constant { span: span, ty: ty, literal: literal },
             _ =>
-                this.hir.span_bug(
+                span_bug!(
                     span,
-                    &format!("expression is not a valid constant {:?}", kind)),
+                    "expression is not a valid constant {:?}",
+                    kind),
         }
     }
 }
index b2c7507ed7b282aedca237bd5e6d53c0fa258908..0c9323f4af3781ccacb2965cb57ab8e561370760 100644 (file)
@@ -34,10 +34,11 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         debug!("expr_as_lvalue(block={:?}, expr={:?})", block, expr);
 
         let this = self;
+        let scope_id = this.innermost_scope_id();
         let expr_span = expr.span;
         match expr.kind {
             ExprKind::Scope { extent, value } => {
-                this.in_scope(extent, block, |this| this.as_lvalue(block, value))
+                this.in_scope(extent, block, |this, _| this.as_lvalue(block, value))
             }
             ExprKind::Field { lhs, name } => {
                 let lvalue = unpack!(block = this.as_lvalue(block, lhs));
@@ -58,16 +59,18 @@ impl<'a,'tcx> Builder<'a,'tcx> {
 
                 // bounds check:
                 let (len, lt) = (this.temp(usize_ty.clone()), this.temp(bool_ty));
-                this.cfg.push_assign(block, expr_span, // len = len(slice)
+                this.cfg.push_assign(block, scope_id, expr_span, // len = len(slice)
                                      &len, Rvalue::Len(slice.clone()));
-                this.cfg.push_assign(block, expr_span, // lt = idx < len
+                this.cfg.push_assign(block, scope_id, expr_span, // lt = idx < len
                                      &lt, Rvalue::BinaryOp(BinOp::Lt,
                                                            idx.clone(),
                                                            Operand::Consume(len.clone())));
 
                 let (success, failure) = (this.cfg.start_new_block(), this.cfg.start_new_block());
                 this.cfg.terminate(block,
-                                   Terminator::If {
+                                   scope_id,
+                                   expr_span,
+                                   TerminatorKind::If {
                                        cond: Operand::Consume(lt),
                                        targets: (success, failure),
                                    });
index 7738ebca26b47e17caf5667232bb827ce8e3737d..661d01ce989cd17595224db55a8b85c7d511d566 100644 (file)
@@ -35,7 +35,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         let this = self;
 
         if let ExprKind::Scope { extent, value } = expr.kind {
-            return this.in_scope(extent, block, |this| this.as_operand(block, value));
+            return this.in_scope(extent, block, |this, _| this.as_operand(block, value));
         }
 
         let category = Category::of(&expr.kind).unwrap();
index 2338d7df01a854096460e830136be12b907d1249..b340d933e64c3f35f764399a378be2f28f0da6a0 100644 (file)
@@ -33,14 +33,27 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         debug!("expr_as_rvalue(block={:?}, expr={:?})", block, expr);
 
         let this = self;
+        let scope_id = this.innermost_scope_id();
         let expr_span = expr.span;
 
         match expr.kind {
             ExprKind::Scope { extent, value } => {
-                this.in_scope(extent, block, |this| this.as_rvalue(block, value))
+                this.in_scope(extent, block, |this, _| this.as_rvalue(block, value))
             }
-            ExprKind::InlineAsm { asm } => {
-                block.and(Rvalue::InlineAsm(asm.clone()))
+            ExprKind::InlineAsm { asm, outputs, inputs } => {
+                let outputs = outputs.into_iter().map(|output| {
+                    unpack!(block = this.as_lvalue(block, output))
+                }).collect();
+
+                let inputs = inputs.into_iter().map(|input| {
+                    unpack!(block = this.as_operand(block, input))
+                }).collect();
+
+                block.and(Rvalue::InlineAsm {
+                    asm: asm.clone(),
+                    outputs: outputs,
+                    inputs: inputs
+                })
             }
             ExprKind::Repeat { value, count } => {
                 let value_operand = unpack!(block = this.as_operand(block, value));
@@ -63,8 +76,8 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 let value = this.hir.mirror(value);
                 let result = this.temp(expr.ty);
                 // to start, malloc some memory of suitable type (thus far, uninitialized):
-                this.cfg.push_assign(block, expr_span, &result, Rvalue::Box(value.ty));
-                this.in_scope(value_extents, block, |this| {
+                this.cfg.push_assign(block, scope_id, expr_span, &result, Rvalue::Box(value.ty));
+                this.in_scope(value_extents, block, |this, _| {
                     // schedule a shallow free of that memory, lest we unwind:
                     this.schedule_box_free(expr_span, value_extents, &result, value.ty);
                     // initialize the box contents:
@@ -73,8 +86,13 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 })
             }
             ExprKind::Cast { source } => {
-                let source = unpack!(block = this.as_operand(block, source));
-                block.and(Rvalue::Cast(CastKind::Misc, source, expr.ty))
+                let source = this.hir.mirror(source);
+                if source.ty == expr.ty {
+                    this.expr_as_rvalue(block, source)
+                } else {
+                    let source = unpack!(block = this.as_operand(block, source));
+                    block.and(Rvalue::Cast(CastKind::Misc, source, expr.ty))
+                }
             }
             ExprKind::ReifyFnPointer { source } => {
                 let source = unpack!(block = this.as_operand(block, source));
index 27c374e1ac2c468591118dcb40e0654fd73115cf..a2f7d2c9d725fdac9ae2ef1d2eb45ec5068019c4 100644 (file)
@@ -30,7 +30,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         let this = self;
 
         if let ExprKind::Scope { extent, value } = expr.kind {
-            return this.in_scope(extent, block, |this| this.as_temp(block, value));
+            return this.in_scope(extent, block, |this, _| this.as_temp(block, value));
         }
 
         let expr_ty = expr.ty.clone();
@@ -38,7 +38,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         let temp_lifetime = match expr.temp_lifetime {
             Some(t) => t,
             None => {
-                this.hir.span_bug(expr.span, "no temp_lifetime for expr");
+                span_bug!(expr.span, "no temp_lifetime for expr");
             }
         };
         this.schedule_drop(expr.span, temp_lifetime, &temp, expr_ty);
@@ -55,7 +55,8 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 let expr_span = expr.span;
                 let lvalue = unpack!(block = this.as_lvalue(block, expr));
                 let rvalue = Rvalue::Use(Operand::Consume(lvalue));
-                this.cfg.push_assign(block, expr_span, &temp, rvalue);
+                let scope_id = this.innermost_scope_id();
+                this.cfg.push_assign(block, scope_id, expr_span, &temp, rvalue);
             }
             _ => {
                 unpack!(block = this.into(&temp, block, expr));
index ca00b99b108987b2aa57f8e2d95c49ddeccc115b..231d7da10a02dc53ad2e8615f6a5b0e388167e93 100644 (file)
@@ -15,7 +15,7 @@ use build::expr::category::{Category, RvalueFunc};
 use build::scope::LoopScope;
 use hair::*;
 use rustc::middle::region::CodeExtent;
-use rustc::middle::ty;
+use rustc::ty;
 use rustc::mir::repr::*;
 use syntax::codemap::Span;
 
@@ -36,10 +36,11 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         // just use the name `this` uniformly
         let this = self;
         let expr_span = expr.span;
+        let scope_id = this.innermost_scope_id();
 
         match expr.kind {
             ExprKind::Scope { extent, value } => {
-                this.in_scope(extent, block, |this| this.into(destination, block, value))
+                this.in_scope(extent, block, |this, _| this.into(destination, block, value))
             }
             ExprKind::Block { body: ast_block } => {
                 this.ast_block(destination, block, ast_block)
@@ -52,7 +53,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
 
                 let mut then_block = this.cfg.start_new_block();
                 let mut else_block = this.cfg.start_new_block();
-                this.cfg.terminate(block, Terminator::If {
+                this.cfg.terminate(block, scope_id, expr_span, TerminatorKind::If {
                     cond: operand,
                     targets: (then_block, else_block)
                 });
@@ -63,13 +64,20 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 } else {
                     // Body of the `if` expression without an `else` clause must return `()`, thus
                     // we implicitly generate a `else {}` if it is not specified.
-                    this.cfg.push_assign_unit(else_block, expr_span, destination);
+                    let scope_id = this.innermost_scope_id();
+                    this.cfg.push_assign_unit(else_block, scope_id, expr_span, destination);
                     else_block
                 };
 
                 let join_block = this.cfg.start_new_block();
-                this.cfg.terminate(then_block, Terminator::Goto { target: join_block });
-                this.cfg.terminate(else_block, Terminator::Goto { target: join_block });
+                this.cfg.terminate(then_block,
+                                   scope_id,
+                                   expr_span,
+                                   TerminatorKind::Goto { target: join_block });
+                this.cfg.terminate(else_block,
+                                   scope_id,
+                                   expr_span,
+                                   TerminatorKind::Goto { target: join_block });
 
                 join_block.unit()
             }
@@ -95,16 +103,19 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                     LogicalOp::And => (else_block, false_block),
                     LogicalOp::Or => (true_block, else_block),
                 };
-                this.cfg.terminate(block, Terminator::If { cond: lhs, targets: blocks });
+                this.cfg.terminate(block,
+                                   scope_id,
+                                   expr_span,
+                                   TerminatorKind::If { cond: lhs, targets: blocks });
 
                 let rhs = unpack!(else_block = this.as_operand(else_block, rhs));
-                this.cfg.terminate(else_block, Terminator::If {
+                this.cfg.terminate(else_block, scope_id, expr_span, TerminatorKind::If {
                     cond: rhs,
                     targets: (true_block, false_block)
                 });
 
                 this.cfg.push_assign_constant(
-                    true_block, expr_span, destination,
+                    true_block, scope_id, expr_span, destination,
                     Constant {
                         span: expr_span,
                         ty: this.hir.bool_ty(),
@@ -112,15 +123,21 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                     });
 
                 this.cfg.push_assign_constant(
-                    false_block, expr_span, destination,
+                    false_block, scope_id, expr_span, destination,
                     Constant {
                         span: expr_span,
                         ty: this.hir.bool_ty(),
                         literal: this.hir.false_literal(),
                     });
 
-                this.cfg.terminate(true_block, Terminator::Goto { target: join_block });
-                this.cfg.terminate(false_block, Terminator::Goto { target: join_block });
+                this.cfg.terminate(true_block,
+                                   scope_id,
+                                   expr_span,
+                                   TerminatorKind::Goto { target: join_block });
+                this.cfg.terminate(false_block,
+                                   scope_id,
+                                   expr_span,
+                                   TerminatorKind::Goto { target: join_block });
 
                 join_block.unit()
             }
@@ -144,7 +161,10 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 let exit_block = this.cfg.start_new_block();
 
                 // start the loop
-                this.cfg.terminate(block, Terminator::Goto { target: loop_block });
+                this.cfg.terminate(block,
+                                   scope_id,
+                                   expr_span,
+                                   TerminatorKind::Goto { target: loop_block });
 
                 let might_break = this.in_loop_scope(loop_block, exit_block, move |this| {
                     // conduct the test, if necessary
@@ -157,7 +177,9 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                         let cond = unpack!(loop_block_end = this.as_operand(loop_block, cond_expr));
                         body_block = this.cfg.start_new_block();
                         this.cfg.terminate(loop_block_end,
-                                           Terminator::If {
+                                           scope_id,
+                                           expr_span,
+                                           TerminatorKind::If {
                                                cond: cond,
                                                targets: (body_block, exit_block)
                                            });
@@ -173,12 +195,15 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                     let tmp = this.get_unit_temp();
                     // Execute the body, branching back to the test.
                     let body_block_end = unpack!(this.into(&tmp, body_block, body));
-                    this.cfg.terminate(body_block_end, Terminator::Goto { target: loop_block });
+                    this.cfg.terminate(body_block_end,
+                                       scope_id,
+                                       expr_span,
+                                       TerminatorKind::Goto { target: loop_block });
                 });
                 // If the loop may reach its exit_block, we assign an empty tuple to the
                 // destination to keep the MIR well-formed.
                 if might_break {
-                    this.cfg.push_assign_unit(exit_block, expr_span, destination);
+                    this.cfg.push_assign_unit(exit_block, scope_id, expr_span, destination);
                 }
                 exit_block.unit()
             }
@@ -186,10 +211,12 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 // Note: we evaluate assignments right-to-left. This
                 // is better for borrowck interaction with overloaded
                 // operators like x[j] = x[i].
+                let lhs = this.hir.mirror(lhs);
+                let lhs_span = lhs.span;
                 let rhs = unpack!(block = this.as_operand(block, rhs));
                 let lhs = unpack!(block = this.as_lvalue(block, lhs));
-                unpack!(block = this.build_drop(block, lhs.clone()));
-                this.cfg.push_assign(block, expr_span, &lhs, Rvalue::Use(rhs));
+                unpack!(block = this.build_drop(block, lhs_span, lhs.clone()));
+                this.cfg.push_assign(block, scope_id, expr_span, &lhs, Rvalue::Use(rhs));
                 block.unit()
             }
             ExprKind::AssignOp { op, lhs, rhs } => {
@@ -208,7 +235,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 // we don't have to drop prior contents or anything
                 // because AssignOp is only legal for Copy types
                 // (overloaded ops should be desugared into a call).
-                this.cfg.push_assign(block, expr_span, &lhs,
+                this.cfg.push_assign(block, scope_id, expr_span, &lhs,
                                      Rvalue::BinaryOp(op,
                                                       Operand::Consume(lhs.clone()),
                                                       rhs));
@@ -229,17 +256,20 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 block = match value {
                     Some(value) => unpack!(this.into(&Lvalue::ReturnPointer, block, value)),
                     None => {
-                        this.cfg.push_assign_unit(block, expr_span, &Lvalue::ReturnPointer);
+                        this.cfg.push_assign_unit(block, scope_id,
+                                                  expr_span, &Lvalue::ReturnPointer);
                         block
                     }
                 };
-                let extent = this.extent_of_outermost_scope();
+                let extent = this.extent_of_return_scope();
                 this.exit_scope(expr_span, extent, block, END_BLOCK);
                 this.cfg.start_new_block().unit()
             }
             ExprKind::Call { ty, fun, args } => {
                 let diverges = match ty.sty {
-                    ty::TyBareFn(_, ref f) => f.sig.0.output.diverges(),
+                    ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => {
+                        f.sig.0.output.diverges()
+                    }
                     _ => false
                 };
                 let fun = unpack!(block = this.as_operand(block, fun));
@@ -250,7 +280,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
 
                 let success = this.cfg.start_new_block();
                 let cleanup = this.diverge_cleanup();
-                this.cfg.terminate(block, Terminator::Call {
+                this.cfg.terminate(block, scope_id, expr_span, TerminatorKind::Call {
                     func: fun,
                     args: args,
                     cleanup: cleanup,
@@ -291,7 +321,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 });
 
                 let rvalue = unpack!(block = this.as_rvalue(block, expr));
-                this.cfg.push_assign(block, expr_span, destination, rvalue);
+                this.cfg.push_assign(block, scope_id, expr_span, destination, rvalue);
                 block.unit()
             }
         }
index e6430b7d63404cbff54f12d8543b37f7bc0cedc6..cabf5c955466c784db03f1a1393f581886594f16 100644 (file)
@@ -15,9 +15,8 @@
 
 use build::{BlockAnd, BlockAndExtension, Builder};
 use rustc_data_structures::fnv::FnvHashMap;
-use rustc::middle::const_eval::ConstVal;
-use rustc::middle::region::CodeExtent;
-use rustc::middle::ty::{AdtDef, Ty};
+use rustc::middle::const_val::ConstVal;
+use rustc::ty::{AdtDef, Ty};
 use rustc::mir::repr::*;
 use hair::*;
 use syntax::ast::{Name, NodeId};
@@ -42,9 +41,9 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         // suitable extent for all of the bindings in this match. It's
         // easiest to do this up front because some of these arms may
         // be unreachable or reachable multiple times.
-        let var_extent = self.extent_of_innermost_scope();
+        let var_scope_id = self.innermost_scope_id();
         for arm in &arms {
-            self.declare_bindings(var_extent, &arm.patterns[0]);
+            self.declare_bindings(var_scope_id, &arm.patterns[0]);
         }
 
         let mut arm_blocks = ArmBlocks {
@@ -72,6 +71,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 })
                 .map(|(arm_index, pattern, guard)| {
                     Candidate {
+                        span: pattern.span,
                         match_pairs: vec![MatchPair::new(discriminant_lvalue.clone(), pattern)],
                         bindings: vec![],
                         guard: guard,
@@ -88,7 +88,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         // an empty vector to be returned here, but the algorithm is
         // not entirely precise
         if !otherwise.is_empty() {
-            let join_block = self.join_otherwise_blocks(otherwise);
+            let join_block = self.join_otherwise_blocks(span, otherwise);
             self.panic(join_block, "something about matches algorithm not being precise", span);
         }
 
@@ -98,7 +98,10 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         for (arm_index, arm_body) in arm_bodies.into_iter().enumerate() {
             let mut arm_block = arm_blocks.blocks[arm_index];
             unpack!(arm_block = self.into(destination, arm_block, arm_body));
-            self.cfg.terminate(arm_block, Terminator::Goto { target: end_block });
+            self.cfg.terminate(arm_block,
+                               var_scope_id,
+                               span,
+                               TerminatorKind::Goto { target: end_block });
         }
 
         end_block.unit()
@@ -106,7 +109,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
 
     pub fn expr_into_pattern(&mut self,
                              mut block: BasicBlock,
-                             var_extent: CodeExtent, // lifetime of vars
+                             var_scope_id: ScopeId, // lifetime of vars
                              irrefutable_pat: Pattern<'tcx>,
                              initializer: ExprRef<'tcx>)
                              -> BlockAnd<()> {
@@ -118,7 +121,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                                    var,
                                    ty,
                                    subpattern: None } => {
-                let index = self.declare_binding(var_extent,
+                let index = self.declare_binding(var_scope_id,
                                                  mutability,
                                                  name,
                                                  var,
@@ -131,22 +134,23 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         }
         let lvalue = unpack!(block = self.as_lvalue(block, initializer));
         self.lvalue_into_pattern(block,
-                                 var_extent,
+                                 var_scope_id,
                                  irrefutable_pat,
                                  &lvalue)
     }
 
     pub fn lvalue_into_pattern(&mut self,
                                mut block: BasicBlock,
-                               var_extent: CodeExtent,
+                               var_scope_id: ScopeId,
                                irrefutable_pat: Pattern<'tcx>,
                                initializer: &Lvalue<'tcx>)
                                -> BlockAnd<()> {
         // first, creating the bindings
-        self.declare_bindings(var_extent, &irrefutable_pat);
+        self.declare_bindings(var_scope_id, &irrefutable_pat);
 
         // create a dummy candidate
         let mut candidate = Candidate {
+            span: irrefutable_pat.span,
             match_pairs: vec![MatchPair::new(initializer.clone(), &irrefutable_pat)],
             bindings: vec![],
             guard: None,
@@ -158,10 +162,10 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         unpack!(block = self.simplify_candidate(block, &mut candidate));
 
         if !candidate.match_pairs.is_empty() {
-            self.hir.span_bug(candidate.match_pairs[0].pattern.span,
-                              &format!("match pairs {:?} remaining after simplifying \
-                                        irrefutable pattern",
-                                       candidate.match_pairs));
+            span_bug!(candidate.match_pairs[0].pattern.span,
+                      "match pairs {:?} remaining after simplifying \
+                       irrefutable pattern",
+                      candidate.match_pairs);
         }
 
         // now apply the bindings, which will also declare the variables
@@ -170,29 +174,29 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         block.unit()
     }
 
-    pub fn declare_bindings(&mut self, var_extent: CodeExtent, pattern: &Pattern<'tcx>) {
+    pub fn declare_bindings(&mut self, var_scope_id: ScopeId, pattern: &Pattern<'tcx>) {
         match *pattern.kind {
             PatternKind::Binding { mutability, name, mode: _, var, ty, ref subpattern } => {
-                self.declare_binding(var_extent, mutability, name, var, ty, pattern.span);
+                self.declare_binding(var_scope_id, mutability, name, var, ty, pattern.span);
                 if let Some(subpattern) = subpattern.as_ref() {
-                    self.declare_bindings(var_extent, subpattern);
+                    self.declare_bindings(var_scope_id, subpattern);
                 }
             }
             PatternKind::Array { ref prefix, ref slice, ref suffix } |
             PatternKind::Slice { ref prefix, ref slice, ref suffix } => {
                 for subpattern in prefix.iter().chain(slice).chain(suffix) {
-                    self.declare_bindings(var_extent, subpattern);
+                    self.declare_bindings(var_scope_id, subpattern);
                 }
             }
             PatternKind::Constant { .. } | PatternKind::Range { .. } | PatternKind::Wild => {
             }
             PatternKind::Deref { ref subpattern } => {
-                self.declare_bindings(var_extent, subpattern);
+                self.declare_bindings(var_scope_id, subpattern);
             }
             PatternKind::Leaf { ref subpatterns } |
             PatternKind::Variant { ref subpatterns, .. } => {
                 for subpattern in subpatterns {
-                    self.declare_bindings(var_extent, &subpattern.pattern);
+                    self.declare_bindings(var_scope_id, &subpattern.pattern);
                 }
             }
         }
@@ -207,6 +211,9 @@ struct ArmBlocks {
 
 #[derive(Clone, Debug)]
 pub struct Candidate<'pat, 'tcx:'pat> {
+    // span of the original pattern that gave rise to this candidate
+    span: Span,
+
     // all of these must be satisfied...
     match_pairs: Vec<MatchPair<'pat, 'tcx>>,
 
@@ -238,6 +245,13 @@ pub struct MatchPair<'pat, 'tcx:'pat> {
 
     // ... must match this pattern.
     pattern: &'pat Pattern<'tcx>,
+
+    // HACK(eddyb) This is used to toggle whether a Slice pattern
+    // has had its length checked. This is only necessary because
+    // the "rest" part of the pattern right now has type &[T] and
+    // as such, it requires an Rvalue::Slice to be generated.
+    // See RFC 495 / issue #23121 for the eventual (proper) solution.
+    slice_len_checked: bool
 }
 
 #[derive(Clone, Debug, PartialEq)]
@@ -269,7 +283,7 @@ enum TestKind<'tcx> {
 
     // test length of the slice is equal to len
     Len {
-        len: usize,
+        len: u64,
         op: BinOp,
     },
 }
@@ -364,20 +378,25 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         }
 
         // Otherwise, let's process those remaining candidates.
-        let join_block = self.join_otherwise_blocks(otherwise);
+        let join_block = self.join_otherwise_blocks(span, otherwise);
         self.match_candidates(span, arm_blocks, untested_candidates, join_block)
     }
 
     fn join_otherwise_blocks(&mut self,
+                             span: Span,
                              otherwise: Vec<BasicBlock>)
                              -> BasicBlock
     {
+        let scope_id = self.innermost_scope_id();
         if otherwise.len() == 1 {
             otherwise[0]
         } else {
             let join_block = self.cfg.start_new_block();
             for block in otherwise {
-                self.cfg.terminate(block, Terminator::Goto { target: join_block });
+                self.cfg.terminate(block,
+                                   scope_id,
+                                   span,
+                                   TerminatorKind::Goto { target: join_block });
             }
             join_block
         }
@@ -544,16 +563,25 @@ impl<'a,'tcx> Builder<'a,'tcx> {
 
         let arm_block = arm_blocks.blocks[candidate.arm_index];
 
+        let scope_id = self.innermost_scope_id();
         if let Some(guard) = candidate.guard {
             // the block to branch to if the guard fails; if there is no
             // guard, this block is simply unreachable
+            let guard = self.hir.mirror(guard);
+            let guard_span = guard.span;
             let cond = unpack!(block = self.as_operand(block, guard));
             let otherwise = self.cfg.start_new_block();
-            self.cfg.terminate(block, Terminator::If { cond: cond,
-                                                       targets: (arm_block, otherwise)});
+            self.cfg.terminate(block,
+                               scope_id,
+                               guard_span,
+                               TerminatorKind::If { cond: cond,
+                                                    targets: (arm_block, otherwise)});
             Some(otherwise)
         } else {
-            self.cfg.terminate(block, Terminator::Goto { target: arm_block });
+            self.cfg.terminate(block,
+                               scope_id,
+                               candidate.span,
+                               TerminatorKind::Goto { target: arm_block });
             None
         }
     }
@@ -578,12 +606,14 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                     Rvalue::Ref(region, borrow_kind, binding.source),
             };
 
-            self.cfg.push_assign(block, binding.span, &Lvalue::Var(var_index), rvalue);
+            let scope_id = self.innermost_scope_id();
+            self.cfg.push_assign(block, scope_id, binding.span,
+                                 &Lvalue::Var(var_index), rvalue);
         }
     }
 
     fn declare_binding(&mut self,
-                       var_extent: CodeExtent,
+                       var_scope_id: ScopeId,
                        mutability: Mutability,
                        name: Name,
                        var_id: NodeId,
@@ -591,17 +621,20 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                        span: Span)
                        -> u32
     {
-        debug!("declare_binding(var_id={:?}, name={:?}, var_ty={:?}, var_extent={:?}, span={:?})",
-               var_id, name, var_ty, var_extent, span);
+        debug!("declare_binding(var_id={:?}, name={:?}, var_ty={:?}, var_scope_id={:?}, span={:?})",
+               var_id, name, var_ty, var_scope_id, span);
 
         let index = self.var_decls.len();
         self.var_decls.push(VarDecl::<'tcx> {
+            scope: var_scope_id,
             mutability: mutability,
             name: name,
             ty: var_ty.clone(),
+            span: span,
         });
         let index = index as u32;
-        self.schedule_drop(span, var_extent, &Lvalue::Var(index), var_ty);
+        let extent = self.scope_auxiliary[var_scope_id].extent;
+        self.schedule_drop(span, extent, &Lvalue::Var(index), var_ty);
         self.var_indices.insert(var_id, index);
 
         debug!("declare_binding: index={:?}", index);
index 2c8e1c1ccf673e9d44f5e7861552d6b86a0f2ca1..a3337badf884b92d9e0533052dc94fd4a5b98975 100644 (file)
@@ -95,7 +95,18 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 Err(match_pair)
             }
 
-            PatternKind::Array { ref prefix, ref slice, ref suffix } => {
+            PatternKind::Range { .. } |
+            PatternKind::Variant { .. } => {
+                // cannot simplify, test is required
+                Err(match_pair)
+            }
+
+            PatternKind::Slice { .. } if !match_pair.slice_len_checked => {
+                Err(match_pair)
+            }
+
+            PatternKind::Array { ref prefix, ref slice, ref suffix } |
+            PatternKind::Slice { ref prefix, ref slice, ref suffix } => {
                 unpack!(block = self.prefix_suffix_slice(&mut candidate.match_pairs,
                                                          block,
                                                          match_pair.lvalue.clone(),
@@ -105,13 +116,6 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 Ok(block)
             }
 
-            PatternKind::Slice { .. } |
-            PatternKind::Range { .. } |
-            PatternKind::Variant { .. } => {
-                // cannot simplify, test is required
-                Err(match_pair)
-            }
-
             PatternKind::Leaf { ref subpatterns } => {
                 // tuple struct, match subpats (if any)
                 candidate.match_pairs
index 02f32da2b83fc23d796bda27ff78ebc1a09d93e6..f70d4321a49b7669442abb9cfc73b52a76a48efa 100644 (file)
@@ -19,8 +19,8 @@ use build::Builder;
 use build::matches::{Candidate, MatchPair, Test, TestKind};
 use hair::*;
 use rustc_data_structures::fnv::FnvHashMap;
-use rustc::middle::const_eval::ConstVal;
-use rustc::middle::ty::{self, Ty};
+use rustc::middle::const_val::ConstVal;
+use rustc::ty::{self, Ty};
 use rustc::mir::repr::*;
 use syntax::codemap::Span;
 
@@ -75,7 +75,8 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 }
             }
 
-            PatternKind::Slice { ref prefix, ref slice, ref suffix } => {
+            PatternKind::Slice { ref prefix, ref slice, ref suffix }
+                    if !match_pair.slice_len_checked => {
                 let len = prefix.len() + suffix.len();
                 let op = if slice.is_some() {
                     BinOp::Ge
@@ -84,11 +85,12 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 };
                 Test {
                     span: match_pair.pattern.span,
-                    kind: TestKind::Len { len: len, op: op },
+                    kind: TestKind::Len { len: len as u64, op: op },
                 }
             }
 
             PatternKind::Array { .. } |
+            PatternKind::Slice { .. } |
             PatternKind::Wild |
             PatternKind::Binding { .. } |
             PatternKind::Leaf { .. } |
@@ -144,13 +146,14 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                         lvalue: &Lvalue<'tcx>,
                         test: &Test<'tcx>)
                         -> Vec<BasicBlock> {
+        let scope_id = self.innermost_scope_id();
         match test.kind {
             TestKind::Switch { adt_def } => {
                 let num_enum_variants = self.hir.num_variants(adt_def);
                 let target_blocks: Vec<_> =
                     (0..num_enum_variants).map(|_| self.cfg.start_new_block())
                                           .collect();
-                self.cfg.terminate(block, Terminator::Switch {
+                self.cfg.terminate(block, scope_id, test.span, TerminatorKind::Switch {
                     discr: lvalue.clone(),
                     adt_def: adt_def,
                     targets: target_blocks.clone()
@@ -165,23 +168,90 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                            .map(|_| self.cfg.start_new_block())
                            .chain(Some(otherwise))
                            .collect();
-                self.cfg.terminate(block, Terminator::SwitchInt {
-                    discr: lvalue.clone(),
-                    switch_ty: switch_ty,
-                    values: options.clone(),
-                    targets: targets.clone(),
-                });
+                self.cfg.terminate(block,
+                                   scope_id,
+                                   test.span,
+                                   TerminatorKind::SwitchInt {
+                                       discr: lvalue.clone(),
+                                       switch_ty: switch_ty,
+                                       values: options.clone(),
+                                       targets: targets.clone(),
+                                   });
                 targets
             }
 
-            TestKind::Eq { ref value, ty } => {
-                let expect = self.literal_operand(test.span, ty.clone(), Literal::Value {
-                    value: value.clone()
-                });
-                let val = Operand::Consume(lvalue.clone());
+            TestKind::Eq { ref value, mut ty } => {
+                let mut val = Operand::Consume(lvalue.clone());
+
+                // If we're using b"..." as a pattern, we need to insert an
+                // unsizing coercion, as the byte string has the type &[u8; N].
+                let expect = if let ConstVal::ByteStr(ref bytes) = *value {
+                    let tcx = self.hir.tcx();
+
+                    // Unsize the lvalue to &[u8], too, if necessary.
+                    if let ty::TyRef(region, mt) = ty.sty {
+                        if let ty::TyArray(_, _) = mt.ty.sty {
+                            ty = tcx.mk_imm_ref(region, tcx.mk_slice(tcx.types.u8));
+                            let val_slice = self.temp(ty);
+                            self.cfg.push_assign(block, scope_id, test.span, &val_slice,
+                                                 Rvalue::Cast(CastKind::Unsize, val, ty));
+                            val = Operand::Consume(val_slice);
+                        }
+                    }
+
+                    assert!(ty.is_slice());
+
+                    let array_ty = tcx.mk_array(tcx.types.u8, bytes.len());
+                    let array_ref = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), array_ty);
+                    let array = self.literal_operand(test.span, array_ref, Literal::Value {
+                        value: value.clone()
+                    });
+
+                    let slice = self.temp(ty);
+                    self.cfg.push_assign(block, scope_id, test.span, &slice,
+                                         Rvalue::Cast(CastKind::Unsize, array, ty));
+                    Operand::Consume(slice)
+                } else {
+                    self.literal_operand(test.span, ty, Literal::Value {
+                        value: value.clone()
+                    })
+                };
+
+                // Use PartialEq::eq for &str and &[u8] slices, instead of BinOp::Eq.
                 let fail = self.cfg.start_new_block();
-                let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val.clone());
-                vec![block, fail]
+                if let ty::TyRef(_, mt) = ty.sty {
+                    assert!(ty.is_slice());
+                    let eq_def_id = self.hir.tcx().lang_items.eq_trait().unwrap();
+                    let ty = mt.ty;
+                    let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, vec![ty]);
+
+                    let bool_ty = self.hir.bool_ty();
+                    let eq_result = self.temp(bool_ty);
+                    let eq_block = self.cfg.start_new_block();
+                    let cleanup = self.diverge_cleanup();
+                    self.cfg.terminate(block, scope_id, test.span, TerminatorKind::Call {
+                        func: Operand::Constant(Constant {
+                            span: test.span,
+                            ty: mty,
+                            literal: method
+                        }),
+                        args: vec![val, expect],
+                        destination: Some((eq_result.clone(), eq_block)),
+                        cleanup: cleanup,
+                    });
+
+                    // check the result
+                    let block = self.cfg.start_new_block();
+                    self.cfg.terminate(eq_block, scope_id, test.span, TerminatorKind::If {
+                        cond: Operand::Consume(eq_result),
+                        targets: (block, fail),
+                    });
+
+                    vec![block, fail]
+                } else {
+                    let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val);
+                    vec![block, fail]
+                }
             }
 
             TestKind::Range { ref lo, ref hi, ty } => {
@@ -202,13 +272,15 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 let (actual, result) = (self.temp(usize_ty), self.temp(bool_ty));
 
                 // actual = len(lvalue)
-                self.cfg.push_assign(block, test.span, &actual, Rvalue::Len(lvalue.clone()));
+                self.cfg.push_assign(block, scope_id, test.span,
+                                     &actual, Rvalue::Len(lvalue.clone()));
 
                 // expected = <N>
-                let expected = self.push_usize(block, test.span, len);
+                let expected = self.push_usize(block, scope_id, test.span, len);
 
                 // result = actual == expected OR result = actual < expected
                 self.cfg.push_assign(block,
+                                     scope_id,
                                      test.span,
                                      &result,
                                      Rvalue::BinaryOp(op,
@@ -218,7 +290,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 // branch based on result
                 let target_blocks: Vec<_> = vec![self.cfg.start_new_block(),
                                                  self.cfg.start_new_block()];
-                self.cfg.terminate(block, Terminator::If {
+                self.cfg.terminate(block, scope_id, test.span, TerminatorKind::If {
                     cond: Operand::Consume(result),
                     targets: (target_blocks[0], target_blocks[1])
                 });
@@ -239,11 +311,13 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         let result = self.temp(bool_ty);
 
         // result = op(left, right)
-        self.cfg.push_assign(block, span, &result, Rvalue::BinaryOp(op, left, right));
+        let scope_id = self.innermost_scope_id();
+        self.cfg.push_assign(block, scope_id, span, &result,
+                             Rvalue::BinaryOp(op, left, right));
 
         // branch based on result
         let target_block = self.cfg.start_new_block();
-        self.cfg.terminate(block, Terminator::If {
+        self.cfg.terminate(block, scope_id, span, TerminatorKind::If {
             cond: Operand::Consume(result),
             targets: (target_block, fail_block)
         });
@@ -349,9 +423,26 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 }
             }
 
-            TestKind::Eq { .. } |
-            TestKind::Range { .. } |
+            // If we are performing a length check, then this
+            // informs slice patterns, but nothing else.
             TestKind::Len { .. } => {
+                let pattern_test = self.test(&match_pair);
+                match *match_pair.pattern.kind {
+                    PatternKind::Slice { .. } if pattern_test.kind == test.kind => {
+                        let mut new_candidate = candidate.clone();
+
+                        // Set up the MatchKind to simplify this like an array.
+                        new_candidate.match_pairs[match_pair_index]
+                                     .slice_len_checked = true;
+                        resulting_candidates[0].push(new_candidate);
+                        true
+                    }
+                    _ => false
+                }
+            }
+
+            TestKind::Eq { .. } |
+            TestKind::Range { .. } => {
                 // These are all binary tests.
                 //
                 // FIXME(#29623) we can be more clever here
@@ -379,6 +470,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                                  .map(|(_, mp)| mp.clone())
                                  .collect();
         Candidate {
+            span: candidate.span,
             match_pairs: other_match_pairs,
             bindings: candidate.bindings.clone(),
             guard: candidate.guard.clone(),
@@ -405,7 +497,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                        .map(|subpattern| {
                            // e.g., `(x as Variant).0`
                            let lvalue = downcast_lvalue.clone().field(subpattern.field,
-                                                                      subpattern.field_ty());
+                                                                      subpattern.pattern.ty);
                            // e.g., `(x as Variant).0 @ P1`
                            MatchPair::new(lvalue, &subpattern.pattern)
                        });
@@ -420,6 +512,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         let all_match_pairs = consequent_match_pairs.chain(other_match_pairs).collect();
 
         Candidate {
+            span: candidate.span,
             match_pairs: all_match_pairs,
             bindings: candidate.bindings.clone(),
             guard: candidate.guard.clone(),
@@ -428,8 +521,9 @@ impl<'a,'tcx> Builder<'a,'tcx> {
     }
 
     fn error_simplifyable<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> ! {
-        self.hir.span_bug(match_pair.pattern.span,
-                          &format!("simplifyable pattern found: {:?}", match_pair.pattern))
+        span_bug!(match_pair.pattern.span,
+                  "simplifyable pattern found: {:?}",
+                  match_pair.pattern)
     }
 }
 
index c295ed168badb0307928a7b05c6d10209bb718e0..101d7594309a9a505b2ca2b12187627a37d8468d 100644 (file)
@@ -22,7 +22,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         subpatterns.iter()
                    .map(|fieldpat| {
                        let lvalue = lvalue.clone().field(fieldpat.field,
-                                                         fieldpat.field_ty());
+                                                         fieldpat.pattern.ty);
                        MatchPair::new(lvalue, &fieldpat.pattern)
                    })
                    .collect()
@@ -61,7 +61,8 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 from_end: suffix_len,
             };
             let temp = self.temp(slice.ty.clone()); // no need to schedule drop, temp is always copy
-            self.cfg.push_assign(block, slice.span, &temp, rvalue);
+            let scope_id = self.innermost_scope_id();
+            self.cfg.push_assign(block, scope_id, slice.span, &temp, rvalue);
             match_pairs.push(MatchPair::new(temp, slice));
         }
 
@@ -118,6 +119,7 @@ impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
         MatchPair {
             lvalue: lvalue,
             pattern: pattern,
+            slice_len_checked: false,
         }
     }
 }
index 5d040bcb40ad8e72c2a1e111e47923df09e05d99..86f15a6319399cac89d9c90d07db6dd71b5b15af 100644 (file)
@@ -12,8 +12,7 @@
 //! kind of thing.
 
 use build::Builder;
-use hair::*;
-use rustc::middle::ty::Ty;
+use rustc::ty::Ty;
 use rustc::mir::repr::*;
 use std::u32;
 use syntax::codemap::Span;
@@ -47,11 +46,16 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         Operand::Constant(constant)
     }
 
-    pub fn push_usize(&mut self, block: BasicBlock, span: Span, value: usize) -> Lvalue<'tcx> {
+    pub fn push_usize(&mut self,
+                      block: BasicBlock,
+                      scope_id: ScopeId,
+                      span: Span,
+                      value: u64)
+                      -> Lvalue<'tcx> {
         let usize_ty = self.hir.usize_ty();
         let temp = self.temp(usize_ty);
         self.cfg.push_assign_constant(
-            block, span, &temp,
+            block, scope_id, span, &temp,
             Constant {
                 span: span,
                 ty: self.hir.usize_ty(),
@@ -59,16 +63,4 @@ impl<'a,'tcx> Builder<'a,'tcx> {
             });
         temp
     }
-
-    pub fn item_ref_operand(&mut self,
-                            span: Span,
-                            item_ref: ItemRef<'tcx>)
-                            -> Operand<'tcx> {
-        let literal = Literal::Item {
-            def_id: item_ref.def_id,
-            kind: item_ref.kind,
-            substs: item_ref.substs,
-        };
-        self.literal_operand(span, item_ref.ty, literal)
-    }
 }
index b83d0f6a9803a96c2990b61910367fcc36822b73..2e5b6a952b72dce6adedf892dcacbfe6f68fafac 100644 (file)
 // except according to those terms.
 
 use hair::cx::Cx;
-use rustc::middle::region::CodeExtent;
-use rustc::middle::ty::{FnOutput, Ty};
+use rustc::middle::region::{CodeExtent, CodeExtentData};
+use rustc::ty::{FnOutput, Ty};
 use rustc::mir::repr::*;
 use rustc_data_structures::fnv::FnvHashMap;
-use rustc_front::hir;
-
+use rustc::hir;
+use std::ops::{Index, IndexMut};
 use syntax::ast;
 use syntax::codemap::Span;
 
 pub struct Builder<'a, 'tcx: 'a> {
     hir: Cx<'a, 'tcx>,
     cfg: CFG<'tcx>,
+
+    fn_span: Span,
+
+    // the current set of scopes, updated as we traverse;
+    // see the `scope` module for more details
     scopes: Vec<scope::Scope<'tcx>>,
+
+    // for each scope, a span of blocks that defines it;
+    // we track these for use in region and borrow checking,
+    // but these are liable to get out of date once optimization
+    // begins. They are also hopefully temporary, and will be
+    // no longer needed when we adopt graph-based regions.
+    scope_auxiliary: ScopeAuxiliaryVec,
+
+    // the current set of loops; see the `scope` module for more
+    // details
     loop_scopes: Vec<scope::LoopScope>,
+
+    // the vector of all scopes that we have created thus far;
+    // we track this for debuginfo later
+    scope_datas: Vec<ScopeData>,
+
     var_decls: Vec<VarDecl<'tcx>>,
     var_indices: FnvHashMap<ast::NodeId, u32>,
     temp_decls: Vec<TempDecl<'tcx>>,
     unit_temp: Option<Lvalue<'tcx>>,
+
+    // cached block with a RESUME terminator; we create this at the
+    // first panic
+    cached_resume_block: Option<BasicBlock>,
 }
 
 struct CFG<'tcx> {
     basic_blocks: Vec<BasicBlockData<'tcx>>,
 }
 
+/// For each scope, we track the extent (from the HIR) and a
+/// single-entry-multiple-exit subgraph that contains all the
+/// statements/terminators within it.
+///
+/// This information is separated out from the main `ScopeData`
+/// because it is short-lived. First, the extent contains node-ids,
+/// so it cannot be saved and re-loaded. Second, any optimization will mess up
+/// the dominator/postdominator information.
+///
+/// The intention is basically to use this information to do
+/// regionck/borrowck and then throw it away once we are done.
+pub struct ScopeAuxiliary {
+    /// extent of this scope from the MIR.
+    pub extent: CodeExtent,
+
+    /// "entry point": dominator of all nodes in the scope
+    pub dom: Location,
+
+    /// "exit points": mutual postdominators of all nodes in the scope
+    pub postdoms: Vec<Location>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct Location {
+    /// the location is within this block
+    pub block: BasicBlock,
+
+    /// the location is the start of the this statement; or, if `statement_index`
+    /// == num-statements, then the start of the terminator.
+    pub statement_index: usize,
+}
+
+pub struct ScopeAuxiliaryVec {
+    pub vec: Vec<ScopeAuxiliary>
+}
+
+impl Index<ScopeId> for ScopeAuxiliaryVec {
+    type Output = ScopeAuxiliary;
+
+    #[inline]
+    fn index(&self, index: ScopeId) -> &ScopeAuxiliary {
+        &self.vec[index.index()]
+    }
+}
+
+impl IndexMut<ScopeId> for ScopeAuxiliaryVec {
+    #[inline]
+    fn index_mut(&mut self, index: ScopeId) -> &mut ScopeAuxiliary {
+        &mut self.vec[index.index()]
+    }
+}
+
 ///////////////////////////////////////////////////////////////////////////
 /// The `BlockAnd` "monad" packages up the new basic block along with a
 /// produced value (sometimes just unit, of course). The `unpack!`
@@ -81,46 +157,85 @@ macro_rules! unpack {
 
 pub fn construct<'a,'tcx>(hir: Cx<'a,'tcx>,
                           span: Span,
+                          fn_id: ast::NodeId,
+                          body_id: ast::NodeId,
                           implicit_arguments: Vec<Ty<'tcx>>,
                           explicit_arguments: Vec<(Ty<'tcx>, &'tcx hir::Pat)>,
-                          argument_extent: CodeExtent,
                           return_ty: FnOutput<'tcx>,
                           ast_block: &'tcx hir::Block)
-                          -> Mir<'tcx> {
+                          -> (Mir<'tcx>, ScopeAuxiliaryVec) {
+    let tcx = hir.tcx();
     let cfg = CFG { basic_blocks: vec![] };
 
     let mut builder = Builder {
         hir: hir,
         cfg: cfg,
+        fn_span: span,
         scopes: vec![],
+        scope_datas: vec![],
+        scope_auxiliary: ScopeAuxiliaryVec { vec: vec![] },
         loop_scopes: vec![],
         temp_decls: vec![],
         var_decls: vec![],
         var_indices: FnvHashMap(),
         unit_temp: None,
+        cached_resume_block: None,
     };
 
     assert_eq!(builder.cfg.start_new_block(), START_BLOCK);
     assert_eq!(builder.cfg.start_new_block(), END_BLOCK);
 
-    let mut block = START_BLOCK;
-    let arg_decls = unpack!(block = builder.args_and_body(block,
-                                                          implicit_arguments,
-                                                          explicit_arguments,
-                                                          argument_extent,
-                                                          ast_block));
-
-    builder.cfg.terminate(block, Terminator::Goto { target: END_BLOCK });
-    builder.cfg.terminate(END_BLOCK, Terminator::Return);
-
-    Mir {
-        basic_blocks: builder.cfg.basic_blocks,
-        var_decls: builder.var_decls,
-        arg_decls: arg_decls,
-        temp_decls: builder.temp_decls,
-        return_ty: return_ty,
-        span: span
-    }
+
+    let mut arg_decls = None; // assigned to `Some` in closures below
+    let call_site_extent =
+        tcx.region_maps.lookup_code_extent(
+            CodeExtentData::CallSiteScope { fn_id: fn_id, body_id: body_id });
+    let _ = builder.in_scope(call_site_extent, START_BLOCK, |builder, call_site_scope_id| {
+        let mut block = START_BLOCK;
+        let arg_extent =
+            tcx.region_maps.lookup_code_extent(
+                CodeExtentData::ParameterScope { fn_id: fn_id, body_id: body_id });
+        unpack!(block = builder.in_scope(arg_extent, block, |builder, arg_scope_id| {
+            arg_decls = Some(unpack!(block = builder.args_and_body(block,
+                                                                   implicit_arguments,
+                                                                   explicit_arguments,
+                                                                   arg_scope_id,
+                                                                   ast_block)));
+            block.unit()
+        }));
+
+        builder.cfg.terminate(block, call_site_scope_id, span,
+                              TerminatorKind::Goto { target: END_BLOCK });
+        builder.cfg.terminate(END_BLOCK, call_site_scope_id, span,
+                              TerminatorKind::Return);
+
+        END_BLOCK.unit()
+    });
+
+    assert!(
+        builder.cfg.basic_blocks
+                   .iter()
+                   .enumerate()
+                   .all(|(index, block)| {
+                       if block.terminator.is_none() {
+                           bug!("no terminator on block {:?} in fn {:?}",
+                                index, fn_id)
+                       }
+                       true
+                   }));
+
+    (
+        Mir {
+            basic_blocks: builder.cfg.basic_blocks,
+            scopes: builder.scope_datas,
+            var_decls: builder.var_decls,
+            arg_decls: arg_decls.take().expect("args never built?"),
+            temp_decls: builder.temp_decls,
+            return_ty: return_ty,
+            span: span
+        },
+        builder.scope_auxiliary,
+    )
 }
 
 impl<'a,'tcx> Builder<'a,'tcx> {
@@ -128,36 +243,40 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                      mut block: BasicBlock,
                      implicit_arguments: Vec<Ty<'tcx>>,
                      explicit_arguments: Vec<(Ty<'tcx>, &'tcx hir::Pat)>,
-                     argument_extent: CodeExtent,
+                     argument_scope_id: ScopeId,
                      ast_block: &'tcx hir::Block)
                      -> BlockAnd<Vec<ArgDecl<'tcx>>>
     {
-        self.in_scope(argument_extent, block, |this| {
-            // to start, translate the argument patterns and collect the argument types.
-            let implicits = implicit_arguments.into_iter().map(|ty| (ty, None));
-            let explicits = explicit_arguments.into_iter().map(|(ty, pat)| (ty, Some(pat)));
+        // to start, translate the argument patterns and collect the argument types.
+        let implicits = implicit_arguments.into_iter().map(|ty| (ty, None));
+        let explicits = explicit_arguments.into_iter().map(|(ty, pat)| (ty, Some(pat)));
             let arg_decls =
-                implicits
-                .chain(explicits)
-                .enumerate()
-                .map(|(index, (ty, pattern))| {
-                    if let Some(pattern) = pattern {
-                        let lvalue = Lvalue::Arg(index as u32);
-                        let pattern = this.hir.irrefutable_pat(pattern);
-                        unpack!(block = this.lvalue_into_pattern(block,
-                                                                 argument_extent,
-                                                                 pattern,
-                                                                 &lvalue));
-                    }
-                    ArgDecl { ty: ty }
-                })
-                .collect();
-
-            // start the first basic block and translate the body
-            unpack!(block = this.ast_block(&Lvalue::ReturnPointer, block, ast_block));
-
-            block.and(arg_decls)
-        })
+            implicits
+            .chain(explicits)
+            .enumerate()
+            .map(|(index, (ty, pattern))| {
+                let lvalue = Lvalue::Arg(index as u32);
+                if let Some(pattern) = pattern {
+                    let pattern = self.hir.irrefutable_pat(pattern);
+                    unpack!(block = self.lvalue_into_pattern(block,
+                                                             argument_scope_id,
+                                                             pattern,
+                                                             &lvalue));
+                }
+
+                // Make sure we drop (parts of) the argument even when not matched on.
+                let argument_extent = self.scope_auxiliary[argument_scope_id].extent;
+                self.schedule_drop(pattern.as_ref().map_or(ast_block.span, |pat| pat.span),
+                                   argument_extent, &lvalue, ty);
+
+                ArgDecl { ty: ty, spread: false }
+            })
+            .collect();
+
+        // start the first basic block and translate the body
+        unpack!(block = self.ast_block(&Lvalue::ReturnPointer, block, ast_block));
+
+        block.and(arg_decls)
     }
 
     fn get_unit_temp(&mut self) -> Lvalue<'tcx> {
@@ -185,4 +304,3 @@ mod into;
 mod matches;
 mod misc;
 mod scope;
-mod stmt;
index 7be70867796ff0088aa7350ebaf1384eb7afdb32..bda9cf058f6e966420dc7663f6d3864b4261070f 100644 (file)
@@ -86,45 +86,78 @@ should go to.
 
 */
 
-use build::{BlockAnd, BlockAndExtension, Builder, CFG};
-use rustc::middle::region::CodeExtent;
+use build::{BlockAnd, BlockAndExtension, Builder, CFG, ScopeAuxiliary};
+use rustc::middle::region::{CodeExtent, CodeExtentData};
 use rustc::middle::lang_items;
-use rustc::middle::subst::{Substs, Subst, VecPerParamSpace};
-use rustc::middle::ty::{self, Ty};
+use rustc::ty::subst::{Substs, Subst, VecPerParamSpace};
+use rustc::ty::{self, Ty, TyCtxt};
 use rustc::mir::repr::*;
 use syntax::codemap::{Span, DUMMY_SP};
 use syntax::parse::token::intern_and_get_ident;
+use rustc::middle::const_val::ConstVal;
+use rustc_const_math::ConstInt;
 
 pub struct Scope<'tcx> {
+    /// the scope-id within the scope_datas
+    id: ScopeId,
+
+    /// the extent of this scope within source code; also stored in
+    /// `ScopeAuxiliary`, but kept here for convenience
     extent: CodeExtent,
+
+    /// set of lvalues to drop when exiting this scope. This starts
+    /// out empty but grows as variables are declared during the
+    /// building process. This is a stack, so we always drop from the
+    /// end of the vector (top of the stack) first.
     drops: Vec<DropData<'tcx>>,
-    // A scope may only have one associated free, because:
-    // 1. We require a `free` to only be scheduled in the scope of `EXPR` in `box EXPR`;
-    // 2. It only makes sense to have it translated into the diverge-path.
-    //
-    // This kind of drop will be run *after* all the regular drops scheduled onto this scope,
-    // because drops may have dependencies on the allocated memory.
-    //
-    // This is expected to go away once `box EXPR` becomes a sugar for placement protocol and gets
-    // desugared in some earlier stage.
+
+    /// A scope may only have one associated free, because:
+    ///
+    /// 1. We require a `free` to only be scheduled in the scope of
+    ///    `EXPR` in `box EXPR`;
+    /// 2. It only makes sense to have it translated into the diverge-path.
+    ///
+    /// This kind of drop will be run *after* all the regular drops
+    /// scheduled onto this scope, because drops may have dependencies
+    /// on the allocated memory.
+    ///
+    /// This is expected to go away once `box EXPR` becomes a sugar
+    /// for placement protocol and gets desugared in some earlier
+    /// stage.
     free: Option<FreeData<'tcx>>,
+
+    /// The cached block for the cleanups-on-diverge path. This block
+    /// contains a block that will just do a RESUME to an appropriate
+    /// place. This block does not execute any of the drops or free:
+    /// each of those has their own cached-blocks, which will branch
+    /// to this point.
+    cached_block: Option<BasicBlock>
 }
 
 struct DropData<'tcx> {
+    /// span where drop obligation was incurred (typically where lvalue was declared)
+    span: Span,
+
+    /// lvalue to drop
     value: Lvalue<'tcx>,
-    // NB: per-drop “cache” is necessary for the build_scope_drops function below.
-    /// The cached block for the cleanups-on-diverge path. This block contains code to run the
-    /// current drop and all the preceding drops (i.e. those having lower index in Drop’s
-    /// Scope drop array)
+
+    /// The cached block for the cleanups-on-diverge path. This block
+    /// contains code to run the current drop and all the preceding
+    /// drops (i.e. those having lower index in Drop’s Scope drop
+    /// array)
     cached_block: Option<BasicBlock>
 }
 
 struct FreeData<'tcx> {
+    /// span where free obligation was incurred
     span: Span,
+
     /// Lvalue containing the allocated box.
     value: Lvalue<'tcx>,
+
     /// type of item for which the box was allocated for (i.e. the T in Box<T>).
     item_ty: Ty<'tcx>,
+
     /// The cached block containing code to run the free. The block will also execute all the drops
     /// in the scope.
     cached_block: Option<BasicBlock>
@@ -149,6 +182,7 @@ impl<'tcx> Scope<'tcx> {
     /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
     /// larger extent of code.
     fn invalidate_cache(&mut self) {
+        self.cached_block = None;
         for dropdata in &mut self.drops {
             dropdata.cached_block = None;
         }
@@ -203,11 +237,11 @@ impl<'a,'tcx> Builder<'a,'tcx> {
     /// Convenience wrapper that pushes a scope and then executes `f`
     /// to build its contents, popping the scope afterwards.
     pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R>
-        where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>
+        where F: FnOnce(&mut Builder<'a, 'tcx>, ScopeId) -> BlockAnd<R>
     {
         debug!("in_scope(extent={:?}, block={:?})", extent, block);
-        self.push_scope(extent);
-        let rv = unpack!(block = f(self));
+        let id = self.push_scope(extent, block);
+        let rv = unpack!(block = f(self, id));
         unpack!(block = self.pop_scope(extent, block));
         debug!("in_scope: exiting extent={:?} block={:?}", extent, block);
         block.and(rv)
@@ -217,26 +251,46 @@ impl<'a,'tcx> Builder<'a,'tcx> {
     /// scope and call `pop_scope` afterwards. Note that these two
     /// calls must be paired; using `in_scope` as a convenience
     /// wrapper maybe preferable.
-    pub fn push_scope(&mut self, extent: CodeExtent) {
+    pub fn push_scope(&mut self, extent: CodeExtent, entry: BasicBlock) -> ScopeId {
         debug!("push_scope({:?})", extent);
+        let parent_id = self.scopes.last().map(|s| s.id);
+        let id = ScopeId::new(self.scope_datas.len());
+        self.scope_datas.push(ScopeData {
+            parent_scope: parent_id,
+        });
         self.scopes.push(Scope {
-            extent: extent.clone(),
+            id: id,
+            extent: extent,
             drops: vec![],
-            free: None
+            free: None,
+            cached_block: None,
         });
+        self.scope_auxiliary.vec.push(ScopeAuxiliary {
+            extent: extent,
+            dom: self.cfg.current_location(entry),
+            postdoms: vec![]
+        });
+        id
     }
 
     /// Pops a scope, which should have extent `extent`, adding any
     /// drops onto the end of `block` that are needed.  This must
     /// match 1-to-1 with `push_scope`.
-    pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) -> BlockAnd<()> {
+    pub fn pop_scope(&mut self,
+                     extent: CodeExtent,
+                     mut block: BasicBlock)
+                     -> BlockAnd<()> {
         debug!("pop_scope({:?}, {:?})", extent, block);
         // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup
         // to make sure all the `cached_block`s are filled in.
         self.diverge_cleanup();
         let scope = self.scopes.pop().unwrap();
         assert_eq!(scope.extent, extent);
-        build_scope_drops(&mut self.cfg, &scope, &self.scopes[..], block)
+        unpack!(block = build_scope_drops(&mut self.cfg, &scope, &self.scopes, block));
+        self.scope_auxiliary[scope.id]
+            .postdoms
+            .push(self.cfg.current_location(block));
+        block.unit()
     }
 
 
@@ -249,9 +303,10 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                       extent: CodeExtent,
                       mut block: BasicBlock,
                       target: BasicBlock) {
+        debug!("exit_scope(extent={:?}, block={:?}, target={:?})", extent, block, target);
         let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent)
                                                       .unwrap_or_else(||{
-            self.hir.span_bug(span, &format!("extent {:?} does not enclose", extent))
+            span_bug!(span, "extent {:?} does not enclose", extent)
         });
 
         let tmp = self.get_unit_temp();
@@ -262,12 +317,24 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                                               block));
             if let Some(ref free_data) = scope.free {
                 let next = self.cfg.start_new_block();
-                let free = build_free(self.hir.tcx(), tmp.clone(), free_data, next);
-                self.cfg.terminate(block, free);
+                let free = build_free(self.hir.tcx(), &tmp, free_data, next);
+                self.cfg.terminate(block, scope.id, span, free);
                 block = next;
             }
+            self.scope_auxiliary[scope.id]
+                .postdoms
+                .push(self.cfg.current_location(block));
         }
-        self.cfg.terminate(block, Terminator::Goto { target: target });
+
+        assert!(scope_count < self.scopes.len(),
+                "should never use `exit_scope` to pop *ALL* scopes");
+        let scope = self.scopes.iter().rev().skip(scope_count)
+                                            .next()
+                                            .unwrap();
+        self.cfg.terminate(block,
+                           scope.id,
+                           span,
+                           TerminatorKind::Goto { target: target });
     }
 
     // Finding scopes
@@ -278,7 +345,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                            span: Span,
                            label: Option<CodeExtent>)
                            -> &mut LoopScope {
-        let Builder { ref mut loop_scopes, ref mut hir, .. } = *self;
+        let loop_scopes = &mut self.loop_scopes;
         match label {
             None => {
                 // no label? return the innermost loop scope
@@ -291,15 +358,28 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                            .filter(|loop_scope| loop_scope.extent == label)
                            .next()
             }
-        }.unwrap_or_else(|| hir.span_bug(span, "no enclosing loop scope found?"))
+        }.unwrap_or_else(|| span_bug!(span, "no enclosing loop scope found?"))
+    }
+
+    pub fn innermost_scope_id(&self) -> ScopeId {
+        self.scopes.last().map(|scope| scope.id).unwrap()
     }
 
     pub fn extent_of_innermost_scope(&self) -> CodeExtent {
         self.scopes.last().map(|scope| scope.extent).unwrap()
     }
 
-    pub fn extent_of_outermost_scope(&self) -> CodeExtent {
-        self.scopes.first().map(|scope| scope.extent).unwrap()
+    /// Returns the extent of the scope which should be exited by a
+    /// return.
+    pub fn extent_of_return_scope(&self) -> CodeExtent {
+        // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
+        // We want `scopes[1]`, which is the `ParameterScope`.
+        assert!(self.scopes.len() >= 2);
+        assert!(match self.hir.tcx().region_maps.code_extent_data(self.scopes[1].extent) {
+            CodeExtentData::ParameterScope { .. } => true,
+            _ => false,
+        });
+        self.scopes[1].extent
     }
 
     // Scheduling drops
@@ -319,6 +399,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 // No need to invalidate any caches here. The just-scheduled drop will branch into
                 // the drop that comes before it in the vector.
                 scope.drops.push(DropData {
+                    span: span,
                     value: lvalue.clone(),
                     cached_block: None
                 });
@@ -329,8 +410,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 scope.invalidate_cache()
             }
         }
-        self.hir.span_bug(span,
-                          &format!("extent {:?} not in scope to drop {:?}", extent, lvalue));
+        span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
     }
 
     /// Schedule dropping of a not-yet-fully-initialised box.
@@ -363,8 +443,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
                 scope.invalidate_cache();
             }
         }
-        self.hir.span_bug(span,
-                          &format!("extent {:?} not in scope to free {:?}", extent, value));
+        span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value);
     }
 
     // Other
@@ -375,36 +454,60 @@ impl<'a,'tcx> Builder<'a,'tcx> {
     /// See module comment for more details. None indicates there’s no
     /// cleanup to do at this point.
     pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
-        if self.scopes.is_empty() {
+        if self.scopes.iter().all(|scope| scope.drops.is_empty() && scope.free.is_none()) {
             return None;
         }
+        assert!(!self.scopes.is_empty()); // or `all` above would be true
+
         let unit_temp = self.get_unit_temp();
-        let Builder { ref mut hir, ref mut cfg, ref mut scopes, .. } = *self;
-        let mut next_block = None;
-
-        // Given an array of scopes, we generate these from the outermost scope to the innermost
-        // one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will
-        // generate B0 <- B1 <- B2 in left-to-right order. Control flow of the generated blocks
-        // always ends up at a block with the Resume terminator.
-        for scope in scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some()) {
-            next_block = Some(build_diverge_scope(hir.tcx(),
-                                                  cfg,
-                                                  unit_temp.clone(),
-                                                  scope,
-                                                  next_block));
+        let Builder { ref mut hir, ref mut cfg, ref mut scopes,
+                      ref mut cached_resume_block, .. } = *self;
+
+        // Build up the drops in **reverse** order. The end result will
+        // look like:
+        //
+        //    scopes[n] -> scopes[n-1] -> ... -> scopes[0]
+        //
+        // However, we build this in **reverse order**. That is, we
+        // process scopes[0], then scopes[1], etc, pointing each one at
+        // the result generates from the one before. Along the way, we
+        // store caches. If everything is cached, we'll just walk right
+        // to left reading the cached results but never created anything.
+
+        // To start, create the resume terminator.
+        let mut target = if let Some(target) = *cached_resume_block {
+            target
+        } else {
+            let resumeblk = cfg.start_new_cleanup_block();
+            cfg.terminate(resumeblk, scopes[0].id, self.fn_span, TerminatorKind::Resume);
+            *cached_resume_block = Some(resumeblk);
+            resumeblk
+        };
+
+        for scope in scopes {
+            target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, scope, target);
         }
-        scopes.iter().rev().flat_map(|x| x.cached_block()).next()
+
+        Some(target)
     }
 
     /// Utility function for *non*-scope code to build their own drops
-    pub fn build_drop(&mut self, block: BasicBlock, value: Lvalue<'tcx>) -> BlockAnd<()> {
+    pub fn build_drop(&mut self,
+                      block: BasicBlock,
+                      span: Span,
+                      value: Lvalue<'tcx>)
+                      -> BlockAnd<()> {
+        let scope_id = self.innermost_scope_id();
         let next_target = self.cfg.start_new_block();
         let diverge_target = self.diverge_cleanup();
-        self.cfg.terminate(block, Terminator::Drop {
-            value: value,
-            target: next_target,
-            unwind: diverge_target,
-        });
+        self.cfg.terminate(block,
+                           scope_id,
+                           span,
+                           TerminatorKind::Drop {
+                               value: value,
+                               target: next_target,
+                               unwind: diverge_target,
+                           });
         next_target.unit()
     }
 
@@ -413,10 +516,10 @@ impl<'a,'tcx> Builder<'a,'tcx> {
     // =========
     // FIXME: should be moved into their own module
     pub fn panic_bounds_check(&mut self,
-                             block: BasicBlock,
-                             index: Operand<'tcx>,
-                             len: Operand<'tcx>,
-                             span: Span) {
+                              block: BasicBlock,
+                              index: Operand<'tcx>,
+                              len: Operand<'tcx>,
+                              span: Span) {
         // fn(&(filename: &'static str, line: u32), index: usize, length: usize) -> !
         let region = ty::ReStatic; // FIXME(mir-borrowck): use a better region?
         let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem);
@@ -426,22 +529,23 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         let tup_ty = if let ty::TyRef(_, tyandmut) = ref_ty.sty {
             tyandmut.ty
         } else {
-            self.hir.span_bug(span, &format!("unexpected panic_bound_check type: {:?}", func.ty));
+            span_bug!(span, "unexpected panic_bound_check type: {:?}", func.ty);
         };
 
         let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty));
         let (file, line) = self.span_to_fileline_args(span);
         let elems = vec![Operand::Constant(file), Operand::Constant(line)];
+        let scope_id = self.innermost_scope_id();
         // FIXME: We should have this as a constant, rather than a stack variable (to not pollute
         // icache with cold branch code), however to achieve that we either have to rely on rvalue
         // promotion or have some way, in MIR, to create constants.
-        self.cfg.push_assign(block, span, &tuple, // tuple = (file_arg, line_arg);
+        self.cfg.push_assign(block, scope_id, span, &tuple, // tuple = (file_arg, line_arg);
                              Rvalue::Aggregate(AggregateKind::Tuple, elems));
         // FIXME: is this region really correct here?
-        self.cfg.push_assign(block, span, &tuple_ref, // tuple_ref = &tuple;
+        self.cfg.push_assign(block, scope_id, span, &tuple_ref, // tuple_ref = &tuple;
                              Rvalue::Ref(region, BorrowKind::Shared, tuple));
         let cleanup = self.diverge_cleanup();
-        self.cfg.terminate(block, Terminator::Call {
+        self.cfg.terminate(block, scope_id, span, TerminatorKind::Call {
             func: Operand::Constant(func),
             args: vec![Operand::Consume(tuple_ref), index, len],
             destination: None,
@@ -460,7 +564,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         let tup_ty = if let ty::TyRef(_, tyandmut) = ref_ty.sty {
             tyandmut.ty
         } else {
-            self.hir.span_bug(span, &format!("unexpected panic type: {:?}", func.ty));
+            span_bug!(span, "unexpected panic type: {:?}", func.ty);
         };
 
         let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty));
@@ -473,16 +577,18 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         let elems = vec![Operand::Constant(message),
                          Operand::Constant(file),
                          Operand::Constant(line)];
+        let scope_id = self.innermost_scope_id();
         // FIXME: We should have this as a constant, rather than a stack variable (to not pollute
         // icache with cold branch code), however to achieve that we either have to rely on rvalue
         // promotion or have some way, in MIR, to create constants.
-        self.cfg.push_assign(block, span, &tuple, // tuple = (message_arg, file_arg, line_arg);
+        self.cfg.push_assign(block, scope_id, span, &tuple, // [1]
                              Rvalue::Aggregate(AggregateKind::Tuple, elems));
+        // [1] tuple = (message_arg, file_arg, line_arg);
         // FIXME: is this region really correct here?
-        self.cfg.push_assign(block, span, &tuple_ref, // tuple_ref = &tuple;
+        self.cfg.push_assign(block, scope_id, span, &tuple_ref, // tuple_ref = &tuple;
                              Rvalue::Ref(region, BorrowKind::Shared, tuple));
         let cleanup = self.diverge_cleanup();
-        self.cfg.terminate(block, Terminator::Call {
+        self.cfg.terminate(block, scope_id, span, TerminatorKind::Call {
             func: Operand::Constant(func),
             args: vec![Operand::Consume(tuple_ref)],
             cleanup: cleanup,
@@ -502,7 +608,6 @@ impl<'a,'tcx> Builder<'a,'tcx> {
             ty: self.hir.tcx().lookup_item_type(funcdid).ty,
             literal: Literal::Item {
                 def_id: funcdid,
-                kind: ItemKind::Function,
                 substs: self.hir.tcx().mk_substs(Substs::empty())
             }
         }
@@ -517,7 +622,9 @@ impl<'a,'tcx> Builder<'a,'tcx> {
         }, Constant {
             span: span,
             ty: self.hir.tcx().types.u32,
-            literal: self.hir.usize_literal(span_lines.line)
+            literal: Literal::Value {
+                value: ConstVal::Integral(ConstInt::U32(span_lines.line as u32)),
+            },
         })
     }
 
@@ -540,7 +647,7 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
             earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
         });
         let next = cfg.start_new_block();
-        cfg.terminate(block, Terminator::Drop {
+        cfg.terminate(block, scope.id, drop_data.span, TerminatorKind::Drop {
             value: drop_data.value.clone(),
             target: next,
             unwind: on_diverge
@@ -550,102 +657,87 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
     block.unit()
 }
 
-fn build_diverge_scope<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn build_diverge_scope<'tcx>(tcx: &TyCtxt<'tcx>,
                              cfg: &mut CFG<'tcx>,
-                             unit_temp: Lvalue<'tcx>,
+                             unit_temp: &Lvalue<'tcx>,
                              scope: &mut Scope<'tcx>,
-                             target: Option<BasicBlock>)
-                             -> BasicBlock {
-    debug_assert!(!scope.drops.is_empty() || scope.free.is_some());
-
-    // First, we build the drops, iterating the drops array in reverse. We do that so that as soon
-    // as we find a `cached_block`, we know that we’re finished and don’t need to do anything else.
-    let mut previous = None;
-    let mut last_drop_block = None;
-    for drop_data in scope.drops.iter_mut().rev() {
-        if let Some(cached_block) = drop_data.cached_block {
-            if let Some((previous_block, previous_value)) = previous {
-                cfg.terminate(previous_block, Terminator::Drop {
-                    value: previous_value,
-                    target: cached_block,
-                    unwind: None
-                });
-                return last_drop_block.unwrap();
-            } else {
-                return cached_block;
-            }
-        } else {
-            let block = cfg.start_new_cleanup_block();
-            drop_data.cached_block = Some(block);
-            if let Some((previous_block, previous_value)) = previous {
-                cfg.terminate(previous_block, Terminator::Drop {
-                    value: previous_value,
-                    target: block,
-                    unwind: None
-                });
-            } else {
-                last_drop_block = Some(block);
-            }
-            previous = Some((block, drop_data.value.clone()));
-        }
-    }
-
-    // Prepare the end target for this chain.
-    let mut target = target.unwrap_or_else(||{
-        let b = cfg.start_new_cleanup_block();
-        cfg.terminate(b, Terminator::Resume);
-        b
-    });
+                             mut target: BasicBlock)
+                             -> BasicBlock
+{
+    // Build up the drops in **reverse** order. The end result will
+    // look like:
+    //
+    //    [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
+    //    |                                    |
+    //    +------------------------------------+
+    //     code for scope
+    //
+    // The code in this function reads from right to left. At each
+    // point, we check for cached blocks representing the
+    // remainder. If everything is cached, we'll just walk right to
+    // left reading the cached results but never created anything.
 
-    // Then, build the free branching into the prepared target.
+    // Next, build up any free.
     if let Some(ref mut free_data) = scope.free {
         target = if let Some(cached_block) = free_data.cached_block {
             cached_block
         } else {
             let into = cfg.start_new_cleanup_block();
-            cfg.terminate(into, build_free(tcx, unit_temp, free_data, target));
+            cfg.terminate(into,
+                          scope.id,
+                          free_data.span,
+                          build_free(tcx, unit_temp, free_data, target));
             free_data.cached_block = Some(into);
             into
-        }
-    };
-
-    if let Some((previous_block, previous_value)) = previous {
-        // Finally, branch into that just-built `target` from the `previous_block`.
-        cfg.terminate(previous_block, Terminator::Drop {
-            value: previous_value,
-            target: target,
-            unwind: None
-        });
-        last_drop_block.unwrap()
-    } else {
-        // If `previous.is_none()`, there were no drops in this scope – we return the
-        // target.
-        target
+        };
+    }
+
+    // Next, build up the drops. Here we iterate the vector in
+    // *forward* order, so that we generate drops[0] first (right to
+    // left in diagram above).
+    for drop_data in &mut scope.drops {
+        target = if let Some(cached_block) = drop_data.cached_block {
+            cached_block
+        } else {
+            let block = cfg.start_new_cleanup_block();
+            cfg.terminate(block,
+                          scope.id,
+                          drop_data.span,
+                          TerminatorKind::Drop {
+                              value: drop_data.value.clone(),
+                              target: target,
+                              unwind: None
+                          });
+            drop_data.cached_block = Some(block);
+            block
+        };
     }
+
+    target
 }
 
-fn build_free<'tcx>(tcx: &ty::ctxt<'tcx>,
-                    unit_temp: Lvalue<'tcx>,
+fn build_free<'tcx>(tcx: &TyCtxt<'tcx>,
+                    unit_temp: &Lvalue<'tcx>,
                     data: &FreeData<'tcx>,
-                    target: BasicBlock) -> Terminator<'tcx> {
+                    target: BasicBlock)
+                    -> TerminatorKind<'tcx> {
     let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem)
                        .unwrap_or_else(|e| tcx.sess.fatal(&e));
     let substs = tcx.mk_substs(Substs::new(
         VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]),
         VecPerParamSpace::new(vec![], vec![], vec![])
     ));
-    Terminator::Call {
+    TerminatorKind::Call {
         func: Operand::Constant(Constant {
             span: data.span,
             ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs),
             literal: Literal::Item {
                 def_id: free_func,
-                kind: ItemKind::Function,
                 substs: substs
             }
         }),
         args: vec![Operand::Consume(data.value.clone())],
-        destination: Some((unit_temp, target)),
+        destination: Some((unit_temp.clone(), target)),
         cleanup: None
     }
 }
diff --git a/src/librustc_mir/build/stmt.rs b/src/librustc_mir/build/stmt.rs
deleted file mode 100644 (file)
index 6c0f1c7..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use build::{BlockAnd, BlockAndExtension, Builder};
-use hair::*;
-use rustc::mir::repr::*;
-
-impl<'a,'tcx> Builder<'a,'tcx> {
-    pub fn stmts(&mut self, mut block: BasicBlock, stmts: Vec<StmtRef<'tcx>>) -> BlockAnd<()> {
-        // This convoluted structure is to avoid using recursion as we walk down a list
-        // of statements. Basically, the structure we get back is something like:
-        //
-        //    let x = <init> in {
-        //       let y = <init> in {
-        //           expr1;
-        //           expr2;
-        //       }
-        //    }
-        //
-        // To process this, we keep a stack of (Option<CodeExtent>,
-        // vec::IntoIter<Stmt>) pairs.  At each point we pull off the
-        // top most pair and extract one statement from the
-        // iterator. Once it's complete, we pop the scope from the
-        // first half the pair.
-        let this = self;
-        let mut stmt_lists = vec![(None, stmts.into_iter())];
-        while !stmt_lists.is_empty() {
-            let stmt = {
-                let &mut (_, ref mut stmts) = stmt_lists.last_mut().unwrap();
-                stmts.next()
-            };
-
-            let stmt = match stmt {
-                Some(stmt) => stmt,
-                None => {
-                    let (extent, _) = stmt_lists.pop().unwrap();
-                    if let Some(extent) = extent {
-                        unpack!(block = this.pop_scope(extent, block));
-                    }
-                    continue
-                }
-            };
-
-            let Stmt { span: _, kind } = this.hir.mirror(stmt);
-            match kind {
-                StmtKind::Let { remainder_scope, init_scope, pattern, initializer, stmts } => {
-                    this.push_scope(remainder_scope);
-                    stmt_lists.push((Some(remainder_scope), stmts.into_iter()));
-                    unpack!(block = this.in_scope(init_scope, block, move |this| {
-                        // FIXME #30046                              ^~~~
-                        match initializer {
-                            Some(initializer) => {
-                                this.expr_into_pattern(block, remainder_scope, pattern, initializer)
-                            }
-                            None => {
-                                this.declare_bindings(remainder_scope, &pattern);
-                                block.unit()
-                            }
-                        }
-                    }));
-                }
-
-                StmtKind::Expr { scope, expr } => {
-                    unpack!(block = this.in_scope(scope, block, |this| {
-                        let expr = this.hir.mirror(expr);
-                        let temp = this.temp(expr.ty.clone());
-                        unpack!(block = this.into(&temp, block, expr));
-                        unpack!(block = this.build_drop(block, temp));
-                        block.unit()
-                    }));
-                }
-            }
-        }
-        block.unit()
-    }
-}
index 1b8fe6505583c43664cc0f7ea3c9c5bdf9a697df..069bd7826bcdc1fc9dca1453dd72cd9b3b08082f 100644 (file)
 
 use dot;
 use rustc::mir::repr::*;
-use rustc::middle::ty;
+use rustc::ty;
 use std::fmt::Debug;
 use std::io::{self, Write};
+use syntax::ast::NodeId;
 
-/// Write a graphviz DOT graph for the given MIR.
-pub fn write_mir_graphviz<W: Write>(mir: &Mir, w: &mut W) -> io::Result<()> {
-    try!(writeln!(w, "digraph Mir {{"));
+/// Write a graphviz DOT graph of a list of MIRs.
+pub fn write_mir_graphviz<'a, 't, W, I>(tcx: &ty::TyCtxt<'t>, iter: I, w: &mut W) -> io::Result<()>
+where W: Write, I: Iterator<Item=(&'a NodeId, &'a Mir<'a>)> {
+    for (&nodeid, mir) in iter {
+        writeln!(w, "digraph Mir_{} {{", nodeid)?;
 
-    // Global graph properties
-    try!(writeln!(w, r#"    graph [fontname="monospace"];"#));
-    try!(writeln!(w, r#"    node [fontname="monospace"];"#));
-    try!(writeln!(w, r#"    edge [fontname="monospace"];"#));
+        // Global graph properties
+        writeln!(w, r#"    graph [fontname="monospace"];"#)?;
+        writeln!(w, r#"    node [fontname="monospace"];"#)?;
+        writeln!(w, r#"    edge [fontname="monospace"];"#)?;
 
-    // Graph label
-    try!(write_graph_label(mir, w));
+        // Graph label
+        write_graph_label(tcx, nodeid, mir, w)?;
 
-    // Nodes
-    for block in mir.all_basic_blocks() {
-        try!(write_node(block, mir, w));
-    }
+        // Nodes
+        for block in mir.all_basic_blocks() {
+            write_node(block, mir, w)?;
+        }
 
-    // Edges
-    for source in mir.all_basic_blocks() {
-        try!(write_edges(source, mir, w));
+        // Edges
+        for source in mir.all_basic_blocks() {
+            write_edges(source, mir, w)?;
+        }
+        writeln!(w, "}}")?
     }
-
-    writeln!(w, "}}")
+    Ok(())
 }
 
-/// Write a graphviz DOT node for the given basic block.
-fn write_node<W: Write>(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> {
+/// Write a graphviz HTML-styled label for the given basic block, with
+/// all necessary escaping already performed. (This is suitable for
+/// emitting directly, as is done in this module, or for use with the
+/// LabelText::HtmlStr from libgraphviz.)
+///
+/// `init` and `fini` are callbacks for emitting additional rows of
+/// data (using HTML enclosed with `<tr>` in the emitted text).
+pub fn write_node_label<W: Write, INIT, FINI>(block: BasicBlock,
+                                              mir: &Mir,
+                                              w: &mut W,
+                                              num_cols: u32,
+                                              init: INIT,
+                                              fini: FINI) -> io::Result<()>
+    where INIT: Fn(&mut W) -> io::Result<()>,
+          FINI: Fn(&mut W) -> io::Result<()>
+{
     let data = mir.basic_block_data(block);
 
-    // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
-    try!(write!(w, r#"    {} [shape="none", label=<"#, node(block)));
-    try!(write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#));
+    write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#)?;
 
     // Basic block number at the top.
-    try!(write!(w, r#"<tr><td bgcolor="gray" align="center">{}</td></tr>"#, block.index()));
+    write!(w, r#"<tr><td {attrs} colspan="{colspan}">{blk}</td></tr>"#,
+           attrs=r#"bgcolor="gray" align="center""#,
+           colspan=num_cols,
+           blk=block.index())?;
+
+    init(w)?;
 
     // List of statements in the middle.
     if !data.statements.is_empty() {
-        try!(write!(w, r#"<tr><td align="left" balign="left">"#));
+        write!(w, r#"<tr><td align="left" balign="left">"#)?;
         for statement in &data.statements {
-            try!(write!(w, "{}<br/>", escape(statement)));
+            write!(w, "{}<br/>", escape(statement))?;
         }
-        try!(write!(w, "</td></tr>"));
+        write!(w, "</td></tr>")?;
     }
 
     // Terminator head at the bottom, not including the list of successor blocks. Those will be
     // displayed as labels on the edges between blocks.
     let mut terminator_head = String::new();
-    data.terminator().fmt_head(&mut terminator_head).unwrap();
-    try!(write!(w, r#"<tr><td align="left">{}</td></tr>"#, dot::escape_html(&terminator_head)));
+    data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+    write!(w, r#"<tr><td align="left">{}</td></tr>"#, dot::escape_html(&terminator_head))?;
+
+    fini(w)?;
+
+    // Close the table
+    writeln!(w, "</table>")
+}
 
-    // Close the table, node label, and the node itself.
-    writeln!(w, "</table>>];")
+/// Write a graphviz DOT node for the given basic block.
+fn write_node<W: Write>(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> {
+    // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
+    write!(w, r#"    {} [shape="none", label=<"#, node(block))?;
+    write_node_label(block, mir, w, 1, |_| Ok(()), |_| Ok(()))?;
+    // Close the node label and the node itself.
+    writeln!(w, ">];")
 }
 
 /// Write graphviz DOT edges with labels between the given basic block and all of its successors.
 fn write_edges<W: Write>(source: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> {
     let terminator = &mir.basic_block_data(source).terminator();
-    let labels = terminator.fmt_successor_labels();
+    let labels = terminator.kind.fmt_successor_labels();
 
     for (&target, label) in terminator.successors().iter().zip(labels) {
-        try!(writeln!(w, r#"    {} -> {} [label="{}"];"#, node(source), node(target), label));
+        writeln!(w, r#"    {} -> {} [label="{}"];"#, node(source), node(target), label)?;
     }
 
     Ok(())
@@ -84,41 +116,42 @@ fn write_edges<W: Write>(source: BasicBlock, mir: &Mir, w: &mut W) -> io::Result
 /// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
 /// will appear below the graph, showing the type of the `fn` this MIR represents and the types of
 /// all the variables and temporaries.
-fn write_graph_label<W: Write>(mir: &Mir, w: &mut W) -> io::Result<()> {
-    try!(write!(w, "    label=<fn("));
+fn write_graph_label<W: Write>(tcx: &ty::TyCtxt, nid: NodeId, mir: &Mir, w: &mut W)
+-> io::Result<()> {
+    write!(w, "    label=<fn {}(", dot::escape_html(&tcx.node_path_str(nid)))?;
 
     // fn argument types.
     for (i, arg) in mir.arg_decls.iter().enumerate() {
         if i > 0 {
-            try!(write!(w, ", "));
+            write!(w, ", ")?;
         }
-        try!(write!(w, "{:?}: {}", Lvalue::Arg(i as u32), escape(&arg.ty)));
+        write!(w, "{:?}: {}", Lvalue::Arg(i as u32), escape(&arg.ty))?;
     }
 
-    try!(write!(w, ") -&gt; "));
+    write!(w, ") -&gt; ")?;
 
     // fn return type.
     match mir.return_ty {
-        ty::FnOutput::FnConverging(ty) => try!(write!(w, "{}", escape(ty))),
-        ty::FnOutput::FnDiverging => try!(write!(w, "!")),
+        ty::FnOutput::FnConverging(ty) => write!(w, "{}", escape(ty))?,
+        ty::FnOutput::FnDiverging => write!(w, "!")?,
     }
 
-    try!(write!(w, r#"<br align="left"/>"#));
+    write!(w, r#"<br align="left"/>"#)?;
 
     // User variable types (including the user's name in a comment).
     for (i, var) in mir.var_decls.iter().enumerate() {
-        try!(write!(w, "let "));
+        write!(w, "let ")?;
         if var.mutability == Mutability::Mut {
-            try!(write!(w, "mut "));
+            write!(w, "mut ")?;
         }
-        try!(write!(w, r#"{:?}: {}; // {}<br align="left"/>"#,
-                    Lvalue::Var(i as u32), escape(&var.ty), var.name));
+        write!(w, r#"{:?}: {}; // {}<br align="left"/>"#,
+               Lvalue::Var(i as u32), escape(&var.ty), var.name)?;
     }
 
     // Compiler-introduced temporary types.
     for (i, temp) in mir.temp_decls.iter().enumerate() {
-        try!(write!(w, r#"let mut {:?}: {};<br align="left"/>"#,
-                    Lvalue::Temp(i as u32), escape(&temp.ty)));
+        write!(w, r#"let mut {:?}: {};<br align="left"/>"#,
+               Lvalue::Temp(i as u32), escape(&temp.ty))?;
     }
 
     writeln!(w, ">;")
index 49617ed5171bdd39e100bc8cc8185e120c83f952..f0ed25902f7156aaf9d604bf1a53685f4b53a9b7 100644 (file)
@@ -12,7 +12,7 @@ use hair::*;
 use hair::cx::Cx;
 use hair::cx::to_ref::ToRef;
 use rustc::middle::region::{BlockRemainder, CodeExtentData};
-use rustc_front::hir;
+use rustc::hir;
 use syntax::ast;
 
 impl<'tcx> Mirror<'tcx> for &'tcx hir::Block {
@@ -21,62 +21,52 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Block {
     fn make_mirror<'a>(self, cx: &mut Cx<'a, 'tcx>) -> Block<'tcx> {
         // We have to eagerly translate the "spine" of the statements
         // in order to get the lexical scoping correctly.
-        let stmts = mirror_stmts(cx, self.id, self.stmts.iter().enumerate());
+        let stmts = mirror_stmts(cx, self.id, &*self.stmts);
         Block {
             extent: cx.tcx.region_maps.node_extent(self.id),
             span: self.span,
             stmts: stmts,
-            expr: self.expr.to_ref(),
+            expr: self.expr.to_ref()
         }
     }
 }
 
-fn mirror_stmts<'a,'tcx:'a,STMTS>(cx: &mut Cx<'a,'tcx>,
-                                  block_id: ast::NodeId,
-                                  mut stmts: STMTS)
-                                  -> Vec<StmtRef<'tcx>>
-    where STMTS: Iterator<Item=(usize, &'tcx hir::Stmt)>
+fn mirror_stmts<'a,'tcx:'a>(cx: &mut Cx<'a,'tcx>,
+                            block_id: ast::NodeId,
+                            stmts: &'tcx [hir::Stmt])
+                            -> Vec<StmtRef<'tcx>>
 {
     let mut result = vec![];
-    while let Some((index, stmt)) = stmts.next() {
+    for (index, stmt) in stmts.iter().enumerate() {
         match stmt.node {
             hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) =>
-                result.push(
-                    StmtRef::Mirror(
-                        Box::new(Stmt { span: stmt.span,
-                                        kind: StmtKind::Expr {
-                                            scope: cx.tcx.region_maps.node_extent(id),
-                                            expr: expr.to_ref() } }))),
-
-            hir::StmtDecl(ref decl, id) => {
-                match decl.node {
-                    hir::DeclItem(..) => { /* ignore for purposes of the MIR */ }
-                    hir::DeclLocal(ref local) => {
-                        let remainder_extent = CodeExtentData::Remainder(BlockRemainder {
-                            block: block_id,
-                            first_statement_index: index as u32,
-                        });
-                        let remainder_extent =
-                            cx.tcx.region_maps.lookup_code_extent(remainder_extent);
-
-                        // pull in all following statements, since
-                        // they are within the scope of this let:
-                        let following_stmts = mirror_stmts(cx, block_id, stmts);
-
-                        let pattern = cx.irrefutable_pat(&local.pat);
-                        result.push(StmtRef::Mirror(Box::new(Stmt {
-                            span: stmt.span,
-                            kind: StmtKind::Let {
-                                remainder_scope: remainder_extent,
-                                init_scope: cx.tcx.region_maps.node_extent(id),
-                                pattern: pattern,
-                                initializer: local.init.to_ref(),
-                                stmts: following_stmts,
-                            },
-                        })));
-
-                        return result;
+                result.push(StmtRef::Mirror(Box::new(Stmt {
+                    span: stmt.span,
+                    kind: StmtKind::Expr {
+                        scope: cx.tcx.region_maps.node_extent(id),
+                        expr: expr.to_ref()
                     }
+                }))),
+            hir::StmtDecl(ref decl, id) => match decl.node {
+                hir::DeclItem(..) => { /* ignore for purposes of the MIR */ }
+                hir::DeclLocal(ref local) => {
+                    let remainder_extent = CodeExtentData::Remainder(BlockRemainder {
+                        block: block_id,
+                        first_statement_index: index as u32,
+                    });
+                    let remainder_extent =
+                        cx.tcx.region_maps.lookup_code_extent(remainder_extent);
+
+                    let pattern = cx.irrefutable_pat(&local.pat);
+                    result.push(StmtRef::Mirror(Box::new(Stmt {
+                        span: stmt.span,
+                        kind: StmtKind::Let {
+                            remainder_scope: remainder_extent,
+                            init_scope: cx.tcx.region_maps.node_extent(id),
+                            pattern: pattern,
+                            initializer: local.init.to_ref(),
+                        },
+                    })));
                 }
             }
         }
index ac1cff527fe91f68cf6976db76b5ed4e5a7034aa..12dcb32da3fcd81fbf9ff3f6892ff66244553a90 100644 (file)
 
 use hair::*;
 use rustc_data_structures::fnv::FnvHashMap;
+use rustc_const_math::ConstInt;
 use hair::cx::Cx;
 use hair::cx::block;
 use hair::cx::to_ref::ToRef;
-use rustc::front::map;
-use rustc::middle::def::Def;
-use rustc::middle::const_eval;
+use rustc::hir::map;
+use rustc::hir::def::Def;
+use rustc::middle::const_val::ConstVal;
+use rustc_const_eval as const_eval;
 use rustc::middle::region::CodeExtent;
-use rustc::middle::pat_util;
-use rustc::middle::ty::{self, VariantDef, Ty};
+use rustc::hir::pat_util;
+use rustc::ty::{self, VariantDef, Ty};
 use rustc::mir::repr::*;
-use rustc_front::hir;
-use rustc_front::util as hir_util;
-use syntax::parse::token;
+use rustc::hir;
 use syntax::ptr::P;
 
 impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
@@ -62,12 +62,12 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
                     let method = method_callee(cx, self, ty::MethodCall::expr(self.id));
 
                     let sig = match method.ty.sty {
-                        ty::TyBareFn(_, fn_ty) => &fn_ty.sig,
-                        _ => cx.tcx.sess.span_bug(self.span, "type of method is not an fn")
+                        ty::TyFnDef(_, _, fn_ty) => &fn_ty.sig,
+                        _ => span_bug!(self.span, "type of method is not an fn")
                     };
 
                     let sig = cx.tcx.no_late_bound_regions(sig).unwrap_or_else(|| {
-                        cx.tcx.sess.span_bug(self.span, "method call has late-bound regions")
+                        span_bug!(self.span, "method call has late-bound regions")
                     });
 
                     assert_eq!(sig.inputs.len(), 2);
@@ -127,7 +127,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
             hir::ExprAddrOf(mutbl, ref expr) => {
                 let region = match expr_ty.sty {
                     ty::TyRef(r, _) => r,
-                    _ => cx.tcx.sess.span_bug(expr.span, "type of & not region"),
+                    _ => span_bug!(expr.span, "type of & not region"),
                 };
                 ExprKind::Borrow {
                     region: *region,
@@ -149,7 +149,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
 
             hir::ExprAssignOp(op, ref lhs, ref rhs) => {
                 if cx.tcx.is_method_call(self.id) {
-                    let pass_args = if hir_util::is_by_value_binop(op.node) {
+                    let pass_args = if op.node.is_by_value() {
                         PassArgs::ByValue
                     } else {
                         PassArgs::ByRef
@@ -171,7 +171,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
 
             hir::ExprBinary(op, ref lhs, ref rhs) => {
                 if cx.tcx.is_method_call(self.id) {
-                    let pass_args = if hir_util::is_by_value_binop(op.node) {
+                    let pass_args = if op.node.is_by_value() {
                         PassArgs::ByValue
                     } else {
                         PassArgs::ByRef
@@ -228,28 +228,37 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
                 }
             }
 
-            hir::ExprUnary(op, ref arg) => {
+            hir::ExprUnary(hir::UnOp::UnNot, ref arg) => {
                 if cx.tcx.is_method_call(self.id) {
                     overloaded_operator(cx, self, ty::MethodCall::expr(self.id),
                                         PassArgs::ByValue, arg.to_ref(), vec![])
                 } else {
-                    // FIXME overflow
-                    let op = match op {
-                        hir::UnOp::UnNot => UnOp::Not,
-                        hir::UnOp::UnNeg => UnOp::Neg,
-                        hir::UnOp::UnDeref => {
-                            cx.tcx.sess.span_bug(
-                                self.span,
-                                "UnDeref should have been handled elsewhere");
-                        }
-                    };
                     ExprKind::Unary {
-                        op: op,
+                        op: UnOp::Not,
                         arg: arg.to_ref(),
                     }
                 }
             }
 
+            hir::ExprUnary(hir::UnOp::UnNeg, ref arg) => {
+                if cx.tcx.is_method_call(self.id) {
+                    overloaded_operator(cx, self, ty::MethodCall::expr(self.id),
+                                        PassArgs::ByValue, arg.to_ref(), vec![])
+                } else {
+                    // FIXME runtime-overflow
+                    if let hir::ExprLit(_) = arg.node {
+                        ExprKind::Literal {
+                            literal: cx.const_eval_literal(self),
+                        }
+                    } else {
+                        ExprKind::Unary {
+                            op: UnOp::Neg,
+                            arg: arg.to_ref(),
+                        }
+                    }
+                }
+            }
+
             hir::ExprStruct(_, ref fields, ref base) => {
                 match expr_ty.sty {
                     ty::TyStruct(adt, substs) => {
@@ -287,16 +296,18 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
                                 }
                             }
                             ref def => {
-                                cx.tcx.sess.span_bug(
+                                span_bug!(
                                     self.span,
-                                    &format!("unexpected def: {:?}", def));
+                                    "unexpected def: {:?}",
+                                    def);
                             }
                         }
                     }
                     _ => {
-                        cx.tcx.sess.span_bug(
+                        span_bug!(
                             self.span,
-                            &format!("unexpected type for struct literal: {:?}", expr_ty));
+                            "unexpected type for struct literal: {:?}",
+                            expr_ty);
                     }
                 }
             }
@@ -306,9 +317,9 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
                 let (def_id, substs) = match closure_ty.sty {
                     ty::TyClosure(def_id, ref substs) => (def_id, substs),
                     _ => {
-                        cx.tcx.sess.span_bug(self.span,
-                                             &format!("closure expr w/o closure type: {:?}",
-                                                      closure_ty));
+                        span_bug!(self.span,
+                                  "closure expr w/o closure type: {:?}",
+                                  closure_ty);
                     }
                 };
                 let upvars = cx.tcx.with_freevars(self.id, |freevars| {
@@ -324,44 +335,16 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
                 }
             }
 
-            hir::ExprRange(ref start, ref end) => {
-                let range_ty = cx.tcx.expr_ty(self);
-                let (adt_def, substs) = match range_ty.sty {
-                    ty::TyStruct(adt_def, substs) => (adt_def, substs),
-                    _ => {
-                        cx.tcx.sess.span_bug(self.span, "unexpanded ast");
-                    }
-                };
-
-                let field_expr_ref = |s: &'tcx P<hir::Expr>, name: &str| {
-                    let name = token::intern(name);
-                    let index = adt_def.variants[0].index_of_field_named(name).unwrap();
-                    FieldExprRef { name: Field::new(index), expr: s.to_ref() }
-                };
-
-                let start_field = start.as_ref()
-                                       .into_iter()
-                                       .map(|s| field_expr_ref(s, "start"));
-
-                let end_field = end.as_ref()
-                                   .into_iter()
-                                   .map(|e| field_expr_ref(e, "end"));
-
-                ExprKind::Adt {
-                    adt_def: adt_def,
-                    variant_index: 0,
-                    substs: substs,
-                    fields: start_field.chain(end_field).collect(),
-                    base: None,
-                }
-            }
-
             hir::ExprPath(..) => {
                 convert_path_expr(cx, self)
             }
 
-            hir::ExprInlineAsm(ref asm) => {
-                ExprKind::InlineAsm { asm: asm }
+            hir::ExprInlineAsm(ref asm, ref outputs, ref inputs) => {
+                ExprKind::InlineAsm {
+                    asm: asm,
+                    outputs: outputs.to_ref(),
+                    inputs: inputs.to_ref()
+                }
             }
 
             // Now comes the rote stuff:
@@ -371,7 +354,10 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
                 count: TypedConstVal {
                     ty: cx.tcx.expr_ty(c),
                     span: c.span,
-                    value: const_eval::eval_const_expr(cx.tcx, c)
+                    value: match const_eval::eval_const_expr(cx.tcx, c) {
+                        ConstVal::Integral(ConstInt::Usize(u)) => u,
+                        other => bug!("constant evaluation of repeat count yielded {:?}", other),
+                    },
                 }
             },
             hir::ExprRet(ref v) =>
@@ -398,14 +384,16 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
                     ty::TyStruct(adt_def, _) =>
                         adt_def.variants[0].index_of_field_named(name.node),
                     ref ty =>
-                        cx.tcx.sess.span_bug(
+                        span_bug!(
                             self.span,
-                            &format!("field of non-struct: {:?}", ty)),
+                            "field of non-struct: {:?}",
+                            ty),
                 };
                 let index = index.unwrap_or_else(|| {
-                    cx.tcx.sess.span_bug(
+                    span_bug!(
                         self.span,
-                        &format!("no index found for field `{}`", name.node));
+                        "no index found for field `{}`",
+                        name.node)
                 });
                 ExprKind::Field { lhs: source.to_ref(), name: Field::new(index) }
             }
@@ -489,8 +477,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
                             Some(ty::FnConverging(&ty::TyS {
                                 sty: ty::TyRef(region, mt), ..
                             })) => (region, mt.mutbl),
-                            _ => cx.tcx.sess.span_bug(
-                                expr.span, "autoderef returned bad type")
+                            _ => span_bug!(expr.span, "autoderef returned bad type")
                         };
 
                         expr = Expr {
@@ -614,7 +601,6 @@ fn method_callee<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
         kind: ExprKind::Literal {
             literal: Literal::Item {
                 def_id: callee.def_id,
-                kind: ItemKind::Method,
                 substs: callee.substs,
             },
         },
@@ -651,14 +637,13 @@ fn convert_path_expr<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr)
     let substs = cx.tcx.mk_substs(cx.tcx.node_id_item_substs(expr.id).substs);
     // Otherwise there may be def_map borrow conflicts
     let def = cx.tcx.def_map.borrow()[&expr.id].full_def();
-    let (def_id, kind) = match def {
+    let def_id = match def {
         // A regular function.
-        Def::Fn(def_id) => (def_id, ItemKind::Function),
-        Def::Method(def_id) => (def_id, ItemKind::Method),
+        Def::Fn(def_id) | Def::Method(def_id) => def_id,
         Def::Struct(def_id) => match cx.tcx.node_id_to_type(expr.id).sty {
             // A tuple-struct constructor. Should only be reached if not called in the same
             // expression.
-            ty::TyBareFn(..) => (def_id, ItemKind::Function),
+            ty::TyFnDef(..) => def_id,
             // A unit struct which is used as a value. We return a completely different ExprKind
             // here to account for this special case.
             ty::TyStruct(adt_def, substs) => return ExprKind::Adt {
@@ -668,12 +653,12 @@ fn convert_path_expr<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr)
                 fields: vec![],
                 base: None
             },
-            ref sty => panic!("unexpected sty: {:?}", sty)
+            ref sty => bug!("unexpected sty: {:?}", sty)
         },
         Def::Variant(enum_id, variant_id) => match cx.tcx.node_id_to_type(expr.id).sty {
             // A variant constructor. Should only be reached if not called in the same
             // expression.
-            ty::TyBareFn(..) => (variant_id, ItemKind::Function),
+            ty::TyFnDef(..) => variant_id,
             // A unit variant, similar special case to the struct case above.
             ty::TyEnum(adt_def, substs) => {
                 debug_assert!(adt_def.did == enum_id);
@@ -686,15 +671,20 @@ fn convert_path_expr<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr)
                     base: None
                 };
             },
-            ref sty => panic!("unexpected sty: {:?}", sty)
+            ref sty => bug!("unexpected sty: {:?}", sty)
         },
         Def::Const(def_id) |
         Def::AssociatedConst(def_id) => {
-            if let Some(v) = cx.try_const_eval_literal(expr) {
-                return ExprKind::Literal { literal: v };
-            } else {
-                (def_id, ItemKind::Constant)
+            let substs = Some(cx.tcx.node_id_item_substs(expr.id).substs);
+            if let Some((e, _)) = const_eval::lookup_const_by_id(cx.tcx, def_id, substs) {
+                // FIXME ConstVal can't be yet used with adjustments, as they would be lost.
+                if !cx.tcx.tables.borrow().adjustments.contains_key(&e.id) {
+                    if let Some(v) = cx.try_const_eval_literal(e) {
+                        return ExprKind::Literal { literal: v };
+                    }
+                }
             }
+            def_id
         }
 
         Def::Static(node_id, _) => return ExprKind::StaticRef {
@@ -705,12 +695,13 @@ fn convert_path_expr<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr)
         def @ Def::Upvar(..) => return convert_var(cx, expr, def),
 
         def =>
-            cx.tcx.sess.span_bug(
+            span_bug!(
                 expr.span,
-                &format!("def `{:?}` not yet implemented", def)),
+                "def `{:?}` not yet implemented",
+                def),
     };
     ExprKind::Literal {
-        literal: Literal::Item { def_id: def_id, kind: kind, substs: substs }
+        literal: Literal::Item { def_id: def_id, substs: substs }
     }
 }
 
@@ -736,12 +727,12 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
                     match expr.node {
                         hir::ExprClosure(_, _, ref body) => body.id,
                         _ => {
-                            cx.tcx.sess.span_bug(expr.span, "closure expr is not a closure expr");
+                            span_bug!(expr.span, "closure expr is not a closure expr");
                         }
                     }
                 }
                 _ => {
-                    cx.tcx.sess.span_bug(expr.span, "ast-map has garbage for closure expr");
+                    span_bug!(expr.span, "ast-map has garbage for closure expr");
                 }
             };
 
@@ -758,7 +749,7 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
             let region = cx.tcx.mk_region(region);
 
             let self_expr = match cx.tcx.closure_kind(cx.tcx.map.local_def_id(closure_expr_id)) {
-                ty::ClosureKind::FnClosureKind => {
+                ty::ClosureKind::Fn => {
                     let ref_closure_ty =
                         cx.tcx.mk_ref(region,
                                    ty::TypeAndMut { ty: closure_ty,
@@ -777,7 +768,7 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
                         }
                     }
                 }
-                ty::ClosureKind::FnMutClosureKind => {
+                ty::ClosureKind::FnMut => {
                     let ref_closure_ty =
                         cx.tcx.mk_ref(region,
                                    ty::TypeAndMut { ty: closure_ty,
@@ -796,7 +787,7 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
                         }
                     }
                 }
-                ty::ClosureKind::FnOnceClosureKind => {
+                ty::ClosureKind::FnOnce => {
                     Expr {
                         ty: closure_ty,
                         temp_lifetime: temp_lifetime,
@@ -821,9 +812,10 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
             let upvar_capture = match cx.tcx.upvar_capture(upvar_id) {
                 Some(c) => c,
                 None => {
-                    cx.tcx.sess.span_bug(
+                    span_bug!(
                         expr.span,
-                        &format!("no upvar_capture for {:?}", upvar_id));
+                        "no upvar_capture for {:?}",
+                        upvar_id);
                 }
             };
             match upvar_capture {
@@ -846,7 +838,7 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
             }
         }
 
-        _ => cx.tcx.sess.span_bug(expr.span, "type of & not region"),
+        _ => span_bug!(expr.span, "type of & not region"),
     }
 }
 
@@ -869,7 +861,7 @@ fn bin_op(op: hir::BinOp_) -> BinOp {
         hir::BinOp_::BiNe => BinOp::Ne,
         hir::BinOp_::BiGe => BinOp::Ge,
         hir::BinOp_::BiGt => BinOp::Gt,
-        _ => panic!("no equivalent for ast binop {:?}", op),
+        _ => bug!("no equivalent for ast binop {:?}", op),
     }
 }
 
@@ -966,7 +958,7 @@ fn overloaded_lvalue<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
 
 fn capture_freevar<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
                                  closure_expr: &'tcx hir::Expr,
-                                 freevar: &ty::Freevar,
+                                 freevar: &hir::Freevar,
                                  freevar_ty: Ty<'tcx>)
                                  -> ExprRef<'tcx> {
     let id_var = freevar.def.var_id();
@@ -1009,7 +1001,7 @@ fn loop_label<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr) -> Cod
     match cx.tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def()) {
         Some(Def::Label(loop_id)) => cx.tcx.region_maps.node_extent(loop_id),
         d => {
-            cx.tcx.sess.span_bug(expr.span, &format!("loop scope resolved to {:?}", d));
+            span_bug!(expr.span, "loop scope resolved to {:?}", d);
         }
     }
 }
index 7019b40bb252117b73efa8e8ce4c5168b42474dd..c3a5fbd967c84b4279ee846bcee42a1cf994b49c 100644 (file)
 use hair::*;
 use rustc::mir::repr::*;
 
-use rustc::middle::const_eval::{self, ConstVal};
-use rustc::middle::infer::InferCtxt;
-use rustc::middle::ty::{self, Ty};
-use syntax::codemap::Span;
+use rustc::middle::const_val::ConstVal;
+use rustc_const_eval as const_eval;
+use rustc::hir::def_id::DefId;
+use rustc::infer::InferCtxt;
+use rustc::ty::subst::{Subst, Substs};
+use rustc::ty::{self, Ty, TyCtxt};
 use syntax::parse::token;
-use rustc_front::hir;
+use rustc::hir;
+use rustc_const_math::{ConstInt, ConstUsize};
 
 #[derive(Copy, Clone)]
 pub struct Cx<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     infcx: &'a InferCtxt<'a, 'tcx>,
 }
 
@@ -50,8 +53,11 @@ impl<'a,'tcx:'a> Cx<'a, 'tcx> {
         self.tcx.types.usize
     }
 
-    pub fn usize_literal(&mut self, value: usize) -> Literal<'tcx> {
-        Literal::Value { value: ConstVal::Uint(value as u64) }
+    pub fn usize_literal(&mut self, value: u64) -> Literal<'tcx> {
+        match ConstUsize::new(value, self.tcx.sess.target.uint_type) {
+            Ok(val) => Literal::Value { value: ConstVal::Integral(ConstInt::Usize(val))},
+            Err(_) => bug!("usize literal out of range for target"),
+        }
     }
 
     pub fn bool_ty(&mut self) -> Ty<'tcx> {
@@ -80,9 +86,44 @@ impl<'a,'tcx:'a> Cx<'a, 'tcx> {
 
     pub fn try_const_eval_literal(&mut self, e: &hir::Expr) -> Option<Literal<'tcx>> {
         let hint = const_eval::EvalHint::ExprTypeChecked;
-        const_eval::eval_const_expr_partial(self.tcx, e, hint, None)
-            .ok()
-            .map(|v| Literal::Value { value: v })
+        const_eval::eval_const_expr_partial(self.tcx, e, hint, None).ok().and_then(|v| {
+            match v {
+                // All of these contain local IDs, unsuitable for storing in MIR.
+                ConstVal::Struct(_) | ConstVal::Tuple(_) |
+                ConstVal::Array(..) | ConstVal::Repeat(..) |
+                ConstVal::Function(_) => None,
+
+                _ => Some(Literal::Value { value: v })
+            }
+        })
+    }
+
+    pub fn trait_method(&mut self,
+                        trait_def_id: DefId,
+                        method_name: &str,
+                        self_ty: Ty<'tcx>,
+                        params: Vec<Ty<'tcx>>)
+                        -> (Ty<'tcx>, Literal<'tcx>) {
+        let method_name = token::intern(method_name);
+        let substs = Substs::new_trait(params, vec![], self_ty);
+        for trait_item in self.tcx.trait_items(trait_def_id).iter() {
+            match *trait_item {
+                ty::ImplOrTraitItem::MethodTraitItem(ref method) => {
+                    if method.name == method_name {
+                        let method_ty = self.tcx.lookup_item_type(method.def_id);
+                        let method_ty = method_ty.ty.subst(self.tcx, &substs);
+                        return (method_ty, Literal::Item {
+                            def_id: method.def_id,
+                            substs: self.tcx.mk_substs(substs),
+                        });
+                    }
+                }
+                ty::ImplOrTraitItem::ConstTraitItem(..) |
+                ty::ImplOrTraitItem::TypeTraitItem(..) => {}
+            }
+        }
+
+        bug!("found no method `{}` in `{:?}`", method_name, trait_def_id);
     }
 
     pub fn num_variants(&mut self, adt_def: ty::AdtDef<'tcx>) -> usize {
@@ -99,11 +140,7 @@ impl<'a,'tcx:'a> Cx<'a, 'tcx> {
         self.tcx.type_needs_drop_given_env(ty, &self.infcx.parameter_environment)
     }
 
-    pub fn span_bug(&mut self, span: Span, message: &str) -> ! {
-        self.tcx.sess.span_bug(span, message)
-    }
-
-    pub fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+    pub fn tcx(&self) -> &'a TyCtxt<'tcx> {
         self.tcx
     }
 }
index 6f4375d53ec4bf95f24531532d08b93682fa575d..990ba9e786c3019435a02d715532e4cd4a98a0c8 100644 (file)
 use hair::*;
 use hair::cx::Cx;
 use rustc_data_structures::fnv::FnvHashMap;
-use rustc::middle::const_eval;
-use rustc::middle::def::Def;
-use rustc::middle::pat_util::{pat_is_resolved_const, pat_is_binding};
-use rustc::middle::ty::{self, Ty};
+use rustc_const_eval as const_eval;
+use rustc::hir::def::Def;
+use rustc::hir::pat_util::{pat_is_resolved_const, pat_is_binding};
+use rustc::ty::{self, Ty};
 use rustc::mir::repr::*;
-use rustc_front::hir::{self, PatKind};
+use rustc::hir::{self, PatKind};
 use syntax::ast;
 use syntax::codemap::Span;
 use syntax::ptr::P;
@@ -63,6 +63,8 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
     }
 
     fn to_pattern(&mut self, pat: &hir::Pat) -> Pattern<'tcx> {
+        let mut ty = self.cx.tcx.node_id_to_type(pat.id);
+
         let kind = match pat.node {
             PatKind::Wild => PatternKind::Wild,
 
@@ -84,24 +86,34 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
             {
                 let def = self.cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def();
                 match def {
-                    Def::Const(def_id) | Def::AssociatedConst(def_id) =>
-                        match const_eval::lookup_const_by_id(self.cx.tcx, def_id,
-                                                             Some(pat.id), None) {
-                            Some(const_expr) => {
-                                let pat = const_eval::const_expr_to_pat(self.cx.tcx, const_expr,
-                                                                        pat.span);
-                                return self.to_pattern(&pat);
+                    Def::Const(def_id) | Def::AssociatedConst(def_id) => {
+                        let substs = Some(self.cx.tcx.node_id_item_substs(pat.id).substs);
+                        match const_eval::lookup_const_by_id(self.cx.tcx, def_id, substs) {
+                            Some((const_expr, _const_ty)) => {
+                                match const_eval::const_expr_to_pat(self.cx.tcx,
+                                                                    const_expr,
+                                                                    pat.id,
+                                                                    pat.span) {
+                                    Ok(pat) =>
+                                        return self.to_pattern(&pat),
+                                    Err(_) =>
+                                        span_bug!(
+                                            pat.span, "illegal constant"),
+                                }
                             }
                             None => {
-                                self.cx.tcx.sess.span_bug(
+                                span_bug!(
                                     pat.span,
-                                    &format!("cannot eval constant: {:?}", def_id))
+                                    "cannot eval constant: {:?}",
+                                    def_id)
                             }
-                        },
+                        }
+                    }
                     _ =>
-                        self.cx.tcx.sess.span_bug(
+                        span_bug!(
                             pat.span,
-                            &format!("def not a constant: {:?}", def)),
+                            "def not a constant: {:?}",
+                            def),
                 }
             }
 
@@ -128,9 +140,10 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
                         self.slice_or_array_pattern(pat.span, ty, prefix, slice, suffix),
 
                     ref sty =>
-                        self.cx.tcx.sess.span_bug(
+                        span_bug!(
                             pat.span,
-                            &format!("unexpanded type for vector pattern: {:?}", sty)),
+                            "unexpanded type for vector pattern: {:?}",
+                            sty),
                 }
             }
 
@@ -169,6 +182,17 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
                     hir::BindByRef(hir::MutImmutable) =>
                         (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Shared)),
                 };
+
+                // A ref x pattern is the same node used for x, and as such it has
+                // x's type, which is &T, where we want T (the type being matched).
+                if let hir::BindByRef(_) = bm {
+                    if let ty::TyRef(_, mt) = ty.sty {
+                        ty = mt.ty;
+                    } else {
+                        bug!("`ref {}` has wrong type {}", ident.node, ty);
+                    }
+                }
+
                 PatternKind::Binding {
                     mutability: mutability,
                     mode: mode,
@@ -201,7 +225,7 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
                 let adt_def = match pat_ty.sty {
                     ty::TyStruct(adt_def, _) | ty::TyEnum(adt_def, _) => adt_def,
                     _ => {
-                        self.cx.tcx.sess.span_bug(
+                        span_bug!(
                             pat.span,
                             "struct pattern not applied to struct or enum");
                     }
@@ -215,9 +239,10 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
                           .map(|field| {
                               let index = variant_def.index_of_field_named(field.node.name);
                               let index = index.unwrap_or_else(|| {
-                                  self.cx.tcx.sess.span_bug(
+                                  span_bug!(
                                       pat.span,
-                                      &format!("no field with name {:?}", field.node.name));
+                                      "no field with name {:?}",
+                                      field.node.name);
                               });
                               FieldPattern {
                                   field: Field::new(index),
@@ -230,12 +255,10 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
             }
 
             PatKind::QPath(..) => {
-                self.cx.tcx.sess.span_bug(pat.span, "unexpanded macro or bad constant etc");
+                span_bug!(pat.span, "unexpanded macro or bad constant etc");
             }
         };
 
-        let ty = self.cx.tcx.node_id_to_type(pat.id);
-
         Pattern {
             span: pat.span,
             ty: ty,
@@ -279,7 +302,7 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
             }
 
             _ => {
-                self.cx.tcx.sess.span_bug(span, "unexpanded macro or bad constant etc");
+                span_bug!(span, "unexpanded macro or bad constant etc");
             }
         }
     }
@@ -308,26 +331,8 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
             }
 
             _ => {
-                self.cx.tcx.sess.span_bug(pat.span,
-                                          &format!("inappropriate def for pattern: {:?}", def));
+                span_bug!(pat.span, "inappropriate def for pattern: {:?}", def);
             }
         }
     }
 }
-
-impl<'tcx> FieldPattern<'tcx> {
-    pub fn field_ty(&self) -> Ty<'tcx> {
-        debug!("field_ty({:?},ty={:?})", self, self.pattern.ty);
-        let r = match *self.pattern.kind {
-            PatternKind::Binding { mode: BindingMode::ByRef(..), ..} => {
-                match self.pattern.ty.sty {
-                    ty::TyRef(_, mt) => mt.ty,
-                    _ => unreachable!()
-                }
-            }
-            _ => self.pattern.ty
-        };
-        debug!("field_ty -> {:?}", r);
-        r
-    }
-}
index 24fcc2f4fcd56a6eac016b554ff81be6dc5d7914..63dbde474380a11900436da0ca7a69c11d796719 100644 (file)
@@ -10,7 +10,7 @@
 
 use hair::*;
 
-use rustc_front::hir;
+use rustc::hir;
 use syntax::ptr::P;
 
 pub trait ToRef {
index e8edd4067e2f827dd0971a80b130aa17e2b896c1..51f2cc2687a9926d2170270b22faa34b7dbb625c 100644 (file)
 //! unit-tested and separated from the Rust source and compiler data
 //! structures.
 
-use rustc::mir::repr::{BinOp, BorrowKind, Field, Literal, Mutability, UnOp, ItemKind,
+use rustc::mir::repr::{BinOp, BorrowKind, Field, Literal, Mutability, UnOp,
     TypedConstVal};
-use rustc::middle::const_eval::ConstVal;
-use rustc::middle::def_id::DefId;
+use rustc::middle::const_val::ConstVal;
+use rustc::hir::def_id::DefId;
 use rustc::middle::region::CodeExtent;
-use rustc::middle::subst::Substs;
-use rustc::middle::ty::{self, AdtDef, ClosureSubsts, Region, Ty};
-use rustc_front::hir;
+use rustc::ty::subst::Substs;
+use rustc::ty::{self, AdtDef, ClosureSubsts, Region, Ty};
+use rustc::hir;
 use syntax::ast;
 use syntax::codemap::Span;
 use self::cx::Cx;
 
 pub mod cx;
 
-#[derive(Clone, Debug)]
-pub struct ItemRef<'tcx> {
-    pub ty: Ty<'tcx>,
-    pub kind: ItemKind,
-    pub def_id: DefId,
-    pub substs: &'tcx Substs<'tcx>,
-}
-
 #[derive(Clone, Debug)]
 pub struct Block<'tcx> {
     pub extent: CodeExtent,
@@ -78,10 +70,7 @@ pub enum StmtKind<'tcx> {
         pattern: Pattern<'tcx>,
 
         /// let pat = <INIT> ...
-        initializer: Option<ExprRef<'tcx>>,
-
-        /// let pat = init; <STMTS>
-        stmts: Vec<StmtRef<'tcx>>,
+        initializer: Option<ExprRef<'tcx>>
     },
 }
 
@@ -241,6 +230,8 @@ pub enum ExprKind<'tcx> {
     },
     InlineAsm {
         asm: &'tcx hir::InlineAsm,
+        outputs: Vec<ExprRef<'tcx>>,
+        inputs: Vec<ExprRef<'tcx>>
     },
 }
 
index 5d915f37f6ebf8653ffbcfc6e1081f4eac617e54..ced73f34e0d92f9631c9eedea56ea8e912e26f6a 100644 (file)
@@ -23,14 +23,17 @@ Rust MIR: a lowered representation of Rust. Also: an experiment!
 #![feature(box_patterns)]
 #![feature(rustc_private)]
 #![feature(staged_api)]
+#![feature(question_mark)]
 
 #[macro_use] extern crate log;
 extern crate graphviz as dot;
+#[macro_use]
 extern crate rustc;
 extern crate rustc_data_structures;
-extern crate rustc_front;
 extern crate rustc_back;
 extern crate syntax;
+extern crate rustc_const_math;
+extern crate rustc_const_eval;
 
 pub mod build;
 pub mod graphviz;
@@ -38,3 +41,4 @@ mod hair;
 pub mod mir_map;
 pub mod pretty;
 pub mod transform;
+pub mod traversal;
index 70c3354012135513a716f3f91f65b354f4a9cf9b..40334f652eed35d4fe42498b7f6608a7def6a233 100644 (file)
 //! - `#[rustc_mir(pretty="file.mir")]`
 
 extern crate syntax;
-extern crate rustc_front;
 
 use build;
-use graphviz;
-use pretty;
-use transform::{clear_dead_blocks, simplify_cfg, type_check};
-use transform::{no_landing_pads};
 use rustc::dep_graph::DepNode;
 use rustc::mir::repr::Mir;
+use pretty;
 use hair::cx::Cx;
-use std::fs::File;
 
-use rustc::mir::transform::MirPass;
 use rustc::mir::mir_map::MirMap;
-use rustc::middle::infer;
-use rustc::middle::region::CodeExtentData;
-use rustc::middle::ty::{self, Ty};
+use rustc::infer;
+use rustc::traits::ProjectionMode;
+use rustc::ty::{self, Ty, TyCtxt};
 use rustc::util::common::ErrorReported;
 use rustc::util::nodemap::NodeMap;
-use rustc_front::hir;
-use rustc_front::intravisit::{self, Visitor};
+use rustc::hir;
+use rustc::hir::intravisit::{self, Visitor};
+use syntax::abi::Abi;
 use syntax::ast;
 use syntax::attr::AttrMetaMethods;
 use syntax::codemap::Span;
 
-pub fn build_mir_for_crate<'tcx>(tcx: &ty::ctxt<'tcx>) -> MirMap<'tcx> {
+pub fn build_mir_for_crate<'tcx>(tcx: &TyCtxt<'tcx>) -> MirMap<'tcx> {
     let mut map = MirMap {
         map: NodeMap(),
     };
@@ -60,7 +55,7 @@ pub fn build_mir_for_crate<'tcx>(tcx: &ty::ctxt<'tcx>) -> MirMap<'tcx> {
 // OuterDump -- walks a crate, looking for fn items and methods to build MIR from
 
 struct OuterDump<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     map: &'a mut MirMap<'tcx>,
 }
 
@@ -116,7 +111,7 @@ impl<'a, 'tcx> Visitor<'tcx> for OuterDump<'a, 'tcx> {
 // InnerDump -- dumps MIR for a single fn and its contained closures
 
 struct InnerDump<'a, 'm, 'tcx: 'a + 'm> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     map: &'m mut MirMap<'tcx>,
     attr: Option<&'a ast::Attribute>,
 }
@@ -136,61 +131,20 @@ impl<'a, 'm, 'tcx> Visitor<'tcx> for InnerDump<'a,'m,'tcx> {
                 body: &'tcx hir::Block,
                 span: Span,
                 id: ast::NodeId) {
-        let (prefix, implicit_arg_tys) = match fk {
-            intravisit::FnKind::Closure =>
-                (format!("{}-", id), vec![closure_self_ty(&self.tcx, id, body.id)]),
-            _ =>
-                (format!(""), vec![]),
+        let implicit_arg_tys = if let intravisit::FnKind::Closure(..) = fk {
+            vec![closure_self_ty(&self.tcx, id, body.id)]
+        } else {
+            vec![]
         };
 
         let param_env = ty::ParameterEnvironment::for_item(self.tcx, id);
-
-        let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, Some(param_env));
+        let infcx = infer::new_infer_ctxt(self.tcx,
+                                          &self.tcx.tables,
+                                          Some(param_env),
+                                          ProjectionMode::AnyFinal);
 
         match build_mir(Cx::new(&infcx), implicit_arg_tys, id, span, decl, body) {
-            Ok(mut mir) => {
-                clear_dead_blocks::ClearDeadBlocks::new().run_on_mir(&mut mir, &infcx);
-                type_check::TypeckMir::new().run_on_mir(&mut mir, &infcx);
-                no_landing_pads::NoLandingPads.run_on_mir(&mut mir, &infcx);
-                if self.tcx.sess.opts.mir_opt_level > 0 {
-                    simplify_cfg::SimplifyCfg::new().run_on_mir(&mut mir, &infcx);
-                }
-                let meta_item_list = self.attr
-                                         .iter()
-                                         .flat_map(|a| a.meta_item_list())
-                                         .flat_map(|l| l.iter());
-                for item in meta_item_list {
-                    if item.check_name("graphviz") || item.check_name("pretty") {
-                        match item.value_str() {
-                            Some(s) => {
-                                let filename = format!("{}{}", prefix, s);
-                                let result = File::create(&filename).and_then(|ref mut output| {
-                                    if item.check_name("graphviz") {
-                                        graphviz::write_mir_graphviz(&mir, output)
-                                    } else {
-                                        pretty::write_mir_pretty(&mir, output)
-                                    }
-                                });
-
-                                if let Err(e) = result {
-                                    self.tcx.sess.span_fatal(
-                                        item.span,
-                                        &format!("Error writing MIR {} results to `{}`: {}",
-                                                 item.name(), filename, e));
-                                }
-                            }
-                            None => {
-                                self.tcx.sess.span_err(
-                                    item.span,
-                                    &format!("{} attribute requires a path", item.name()));
-                            }
-                        }
-                    }
-                }
-
-                let previous = self.map.map.insert(id, mir);
-                assert!(previous.is_none());
-            }
+            Ok(mir) => assert!(self.map.map.insert(id, mir).is_none()),
             Err(ErrorReported) => {}
         }
 
@@ -210,8 +164,7 @@ fn build_mir<'a,'tcx:'a>(cx: Cx<'a,'tcx>,
     let fn_sig = match cx.tcx().tables.borrow().liberated_fn_sigs.get(&fn_id) {
         Some(f) => f.clone(),
         None => {
-            cx.tcx().sess.span_bug(span,
-                                   &format!("no liberated fn sig for {:?}", fn_id));
+            span_bug!(span, "no liberated fn sig for {:?}", fn_id);
         }
     };
 
@@ -224,19 +177,37 @@ fn build_mir<'a,'tcx:'a>(cx: Cx<'a,'tcx>,
             })
             .collect();
 
-    let parameter_scope =
-        cx.tcx().region_maps.lookup_code_extent(
-            CodeExtentData::ParameterScope { fn_id: fn_id, body_id: body.id });
-    Ok(build::construct(cx,
-                        span,
-                        implicit_arg_tys,
-                        arguments,
-                        parameter_scope,
-                        fn_sig.output,
-                        body))
+    let (mut mir, scope_auxiliary) =
+        build::construct(cx,
+                         span,
+                         fn_id,
+                         body.id,
+                         implicit_arg_tys,
+                         arguments,
+                         fn_sig.output,
+                         body);
+
+    match cx.tcx().node_id_to_type(fn_id).sty {
+        ty::TyFnDef(_, _, f) if f.abi == Abi::RustCall => {
+            // RustCall pseudo-ABI untuples the last argument.
+            if let Some(arg_decl) = mir.arg_decls.last_mut() {
+                arg_decl.spread = true;
+            }
+        }
+        _ => {}
+    }
+
+    pretty::dump_mir(cx.tcx(),
+                     "mir_map",
+                     &0,
+                     fn_id,
+                     &mir,
+                     Some(&scope_auxiliary));
+
+    Ok(mir)
 }
 
-fn closure_self_ty<'a, 'tcx>(tcx: &ty::ctxt<'tcx>,
+fn closure_self_ty<'a, 'tcx>(tcx: &TyCtxt<'tcx>,
                              closure_expr_id: ast::NodeId,
                              body_id: ast::NodeId)
                              -> Ty<'tcx> {
@@ -252,15 +223,15 @@ fn closure_self_ty<'a, 'tcx>(tcx: &ty::ctxt<'tcx>,
     let region = tcx.mk_region(region);
 
     match tcx.closure_kind(tcx.map.local_def_id(closure_expr_id)) {
-        ty::ClosureKind::FnClosureKind =>
+        ty::ClosureKind::Fn =>
             tcx.mk_ref(region,
                        ty::TypeAndMut { ty: closure_ty,
                                         mutbl: hir::MutImmutable }),
-        ty::ClosureKind::FnMutClosureKind =>
+        ty::ClosureKind::FnMut =>
             tcx.mk_ref(region,
                        ty::TypeAndMut { ty: closure_ty,
                                         mutbl: hir::MutMutable }),
-        ty::ClosureKind::FnOnceClosureKind =>
+        ty::ClosureKind::FnOnce =>
             closure_ty
     }
 }
index ea4036a4d375f6ec68435bf707ca8322fd5d68f7..d0c66b340d96e28c1a804b8d870a6c3719284a91 100644 (file)
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use build::{Location, ScopeAuxiliaryVec};
 use rustc::mir::repr::*;
-use rustc::middle::ty;
+use rustc::ty::{self, TyCtxt};
+use rustc_data_structures::fnv::FnvHashMap;
+use std::fmt::Display;
+use std::fs;
 use std::io::{self, Write};
+use syntax::ast::NodeId;
+use syntax::codemap::Span;
 
 const INDENT: &'static str = "    ";
 
+/// If the session is properly configured, dumps a human-readable
+/// representation of the mir into:
+///
+/// ```
+/// rustc.node<node_id>.<pass_name>.<disambiguator>
+/// ```
+///
+/// Output from this function is controlled by passing `-Z dump-mir=<filter>`,
+/// where `<filter>` takes the following forms:
+///
+/// - `all` -- dump MIR for all fns, all passes, all everything
+/// - `substring1&substring2,...` -- `&`-separated list of substrings
+///   that can appear in the pass-name or the `item_path_str` for the given
+///   node-id. If any one of the substrings match, the data is dumped out.
+pub fn dump_mir<'a, 'tcx>(tcx: &TyCtxt<'tcx>,
+                          pass_name: &str,
+                          disambiguator: &Display,
+                          node_id: NodeId,
+                          mir: &Mir<'tcx>,
+                          auxiliary: Option<&ScopeAuxiliaryVec>) {
+    let filters = match tcx.sess.opts.debugging_opts.dump_mir {
+        None => return,
+        Some(ref filters) => filters,
+    };
+    let node_path = tcx.item_path_str(tcx.map.local_def_id(node_id));
+    let is_matched =
+        filters.split("&")
+               .any(|filter| {
+                   filter == "all" ||
+                       pass_name.contains(filter) ||
+                       node_path.contains(filter)
+               });
+    if !is_matched {
+        return;
+    }
+
+    let file_name = format!("rustc.node{}.{}.{}.mir",
+                            node_id, pass_name, disambiguator);
+    let _ = fs::File::create(&file_name).and_then(|mut file| {
+        try!(writeln!(file, "// MIR for `{}`", node_path));
+        try!(writeln!(file, "// node_id = {}", node_id));
+        try!(writeln!(file, "// pass_name = {}", pass_name));
+        try!(writeln!(file, "// disambiguator = {}", disambiguator));
+        try!(writeln!(file, ""));
+        try!(write_mir_fn(tcx, node_id, mir, &mut file, auxiliary));
+        Ok(())
+    });
+}
+
 /// Write out a human-readable textual representation for the given MIR.
-pub fn write_mir_pretty<W: Write>(mir: &Mir, w: &mut W) -> io::Result<()> {
-    try!(write_mir_intro(mir, w));
+pub fn write_mir_pretty<'a, 'tcx, I>(tcx: &TyCtxt<'tcx>,
+                                     iter: I,
+                                     w: &mut Write)
+                                     -> io::Result<()>
+    where I: Iterator<Item=(&'a NodeId, &'a Mir<'tcx>)>, 'tcx: 'a
+{
+    for (&node_id, mir) in iter {
+        write_mir_fn(tcx, node_id, mir, w, None)?;
+    }
+    Ok(())
+}
 
-    // Nodes
+enum Annotation {
+    EnterScope(ScopeId),
+    ExitScope(ScopeId),
+}
+
+pub fn write_mir_fn<'tcx>(tcx: &TyCtxt<'tcx>,
+                          node_id: NodeId,
+                          mir: &Mir<'tcx>,
+                          w: &mut Write,
+                          auxiliary: Option<&ScopeAuxiliaryVec>)
+                          -> io::Result<()> {
+    // compute scope/entry exit annotations
+    let mut annotations = FnvHashMap();
+    if let Some(auxiliary) = auxiliary {
+        for (index, auxiliary) in auxiliary.vec.iter().enumerate() {
+            let scope_id = ScopeId::new(index);
+
+            annotations.entry(auxiliary.dom)
+                       .or_insert(vec![])
+                       .push(Annotation::EnterScope(scope_id));
+
+            for &loc in &auxiliary.postdoms {
+                annotations.entry(loc)
+                           .or_insert(vec![])
+                           .push(Annotation::ExitScope(scope_id));
+            }
+        }
+    }
+
+    write_mir_intro(tcx, node_id, mir, w)?;
     for block in mir.all_basic_blocks() {
-        try!(write_basic_block(block, mir, w));
+        write_basic_block(tcx, block, mir, w, &annotations)?;
+    }
+
+    // construct a scope tree and write it out
+    let mut scope_tree: FnvHashMap<Option<ScopeId>, Vec<ScopeId>> = FnvHashMap();
+    for (index, scope_data) in mir.scopes.iter().enumerate() {
+        scope_tree.entry(scope_data.parent_scope)
+                  .or_insert(vec![])
+                  .push(ScopeId::new(index));
     }
+    write_scope_tree(tcx, mir, auxiliary, &scope_tree, w, None, 1)?;
 
-    writeln!(w, "}}")
+    writeln!(w, "}}")?;
+    Ok(())
 }
 
 /// Write out a human-readable textual representation for the given basic block.
-fn write_basic_block<W: Write>(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> {
+fn write_basic_block(tcx: &TyCtxt,
+                     block: BasicBlock,
+                     mir: &Mir,
+                     w: &mut Write,
+                     annotations: &FnvHashMap<Location, Vec<Annotation>>)
+                     -> io::Result<()> {
     let data = mir.basic_block_data(block);
 
     // Basic block label at the top.
-    try!(writeln!(w, "\n{}{:?}: {{", INDENT, block));
+    writeln!(w, "\n{}{:?}: {{", INDENT, block)?;
 
     // List of statements in the middle.
+    let mut current_location = Location { block: block, statement_index: 0 };
     for statement in &data.statements {
-        try!(writeln!(w, "{0}{0}{1:?};", INDENT, statement));
+        if let Some(ref annotations) = annotations.get(&current_location) {
+            for annotation in annotations.iter() {
+                match *annotation {
+                    Annotation::EnterScope(id) =>
+                        writeln!(w, "{0}{0}// Enter Scope({1})",
+                                 INDENT, id.index())?,
+                    Annotation::ExitScope(id) =>
+                        writeln!(w, "{0}{0}// Exit Scope({1})",
+                                 INDENT, id.index())?,
+                }
+            }
+        }
+
+        writeln!(w, "{0}{0}{1:?}; // {2}",
+                 INDENT,
+                 statement,
+                 comment(tcx, statement.scope, statement.span))?;
+
+        current_location.statement_index += 1;
     }
 
     // Terminator at the bottom.
-    try!(writeln!(w, "{0}{0}{1:?};", INDENT, data.terminator()));
+    writeln!(w, "{0}{0}{1:?}; // {2}",
+             INDENT,
+             data.terminator().kind,
+             comment(tcx, data.terminator().scope, data.terminator().span))?;
 
     writeln!(w, "{}}}", INDENT)
 }
 
+fn comment(tcx: &TyCtxt,
+           scope: ScopeId,
+           span: Span)
+           -> String {
+    format!("Scope({}) at {}", scope.index(), tcx.sess.codemap().span_to_string(span))
+}
+
+fn write_scope_tree(tcx: &TyCtxt,
+                    mir: &Mir,
+                    auxiliary: Option<&ScopeAuxiliaryVec>,
+                    scope_tree: &FnvHashMap<Option<ScopeId>, Vec<ScopeId>>,
+                    w: &mut Write,
+                    parent: Option<ScopeId>,
+                    depth: usize)
+                    -> io::Result<()> {
+    for &child in scope_tree.get(&parent).unwrap_or(&vec![]) {
+        let indent = depth * INDENT.len();
+        let data = &mir.scopes[child];
+        assert_eq!(data.parent_scope, parent);
+        writeln!(w, "{0:1$}Scope({2}) {{", "", indent, child.index())?;
+
+        let indent = indent + INDENT.len();
+        if let Some(parent) = parent {
+            writeln!(w, "{0:1$}Parent: Scope({2})", "", indent, parent.index())?;
+        }
+
+        if let Some(auxiliary) = auxiliary {
+            let extent = auxiliary[child].extent;
+            let data = tcx.region_maps.code_extent_data(extent);
+            writeln!(w, "{0:1$}Extent: {2:?}", "", indent, data)?;
+        }
+
+        write_scope_tree(tcx, mir, auxiliary, scope_tree, w,
+                         Some(child), depth + 1)?;
+    }
+    Ok(())
+}
+
 /// Write out a human-readable textual representation of the MIR's `fn` type and the types of its
 /// local variables (both user-defined bindings and compiler temporaries).
-fn write_mir_intro<W: Write>(mir: &Mir, w: &mut W) -> io::Result<()> {
-    try!(write!(w, "fn("));
+fn write_mir_intro(tcx: &TyCtxt, nid: NodeId, mir: &Mir, w: &mut Write)
+                   -> io::Result<()> {
+    write!(w, "fn {}(", tcx.node_path_str(nid))?;
 
     // fn argument types.
     for (i, arg) in mir.arg_decls.iter().enumerate() {
         if i > 0 {
-            try!(write!(w, ", "));
+            write!(w, ", ")?;
         }
-        try!(write!(w, "{:?}: {}", Lvalue::Arg(i as u32), arg.ty));
+        write!(w, "{:?}: {}", Lvalue::Arg(i as u32), arg.ty)?;
     }
 
-    try!(write!(w, ") -> "));
+    write!(w, ") -> ")?;
 
     // fn return type.
     match mir.return_ty {
-        ty::FnOutput::FnConverging(ty) => try!(write!(w, "{}", ty)),
-        ty::FnOutput::FnDiverging => try!(write!(w, "!")),
+        ty::FnOutput::FnConverging(ty) => write!(w, "{}", ty)?,
+        ty::FnOutput::FnDiverging => write!(w, "!")?,
     }
 
-    try!(writeln!(w, " {{"));
+    writeln!(w, " {{")?;
 
     // User variable types (including the user's name in a comment).
     for (i, var) in mir.var_decls.iter().enumerate() {
-        try!(write!(w, "{}let ", INDENT));
+        write!(w, "{}let ", INDENT)?;
         if var.mutability == Mutability::Mut {
-            try!(write!(w, "mut "));
+            write!(w, "mut ")?;
         }
-        try!(writeln!(w, "{:?}: {}; // {}", Lvalue::Var(i as u32), var.ty, var.name));
+        writeln!(w, "{:?}: {}; // {}", Lvalue::Var(i as u32), var.ty, var.name)?;
     }
 
     // Compiler-introduced temporary types.
     for (i, temp) in mir.temp_decls.iter().enumerate() {
-        try!(writeln!(w, "{}let mut {:?}: {};", INDENT, Lvalue::Temp(i as u32), temp.ty));
+        writeln!(w, "{}let mut {:?}: {};", INDENT, Lvalue::Temp(i as u32), temp.ty)?;
     }
 
     Ok(())
diff --git a/src/librustc_mir/transform/break_critical_edges.rs b/src/librustc_mir/transform/break_critical_edges.rs
new file mode 100644 (file)
index 0000000..e1fb5df
--- /dev/null
@@ -0,0 +1,117 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::ty::TyCtxt;
+use rustc::mir::repr::*;
+use rustc::mir::transform::{MirPass, Pass};
+use syntax::ast::NodeId;
+
+use rustc_data_structures::bitvec::BitVector;
+
+use traversal;
+
+pub struct BreakCriticalEdges;
+
+/**
+ * Breaks critical edges in the MIR.
+ *
+ * Critical edges are edges that are neither the only edge leaving a
+ * block, nor the only edge entering one.
+ *
+ * When you want something to happen "along" an edge, you can either
+ * do at the end of the predecessor block, or at the start of the
+ * successor block. Critical edges have to be broken in order to prevent
+ * "edge actions" from affecting other edges.
+ *
+ * This function will break those edges by inserting new blocks along them.
+ *
+ * A special case is Drop and Call terminators with unwind/cleanup successors,
+ * They use `invoke` in LLVM, which terminates a block, meaning that code cannot
+ * be inserted after them, so even if an edge is the only edge leaving a block
+ * like that, we still insert blocks if the edge is one of many entering the
+ * target.
+ *
+ * NOTE: Simplify CFG will happily undo most of the work this pass does.
+ *
+ */
+
+impl<'tcx> MirPass<'tcx> for BreakCriticalEdges {
+    fn run_pass(&mut self, _: &TyCtxt<'tcx>, _: NodeId, mir: &mut Mir<'tcx>) {
+        break_critical_edges(mir);
+    }
+}
+
+impl Pass for BreakCriticalEdges {}
+
+fn break_critical_edges(mir: &mut Mir) {
+    let mut pred_count = vec![0u32; mir.basic_blocks.len()];
+
+    // Build the precedecessor map for the MIR
+    for (_, data) in traversal::preorder(mir) {
+        if let Some(ref term) = data.terminator {
+            for &tgt in term.successors().iter() {
+                pred_count[tgt.index()] += 1;
+            }
+        }
+    }
+
+    let cleanup_map : BitVector = mir.basic_blocks
+        .iter().map(|bb| bb.is_cleanup).collect();
+
+    // We need a place to store the new blocks generated
+    let mut new_blocks = Vec::new();
+
+    let bbs = mir.all_basic_blocks();
+    let cur_len = mir.basic_blocks.len();
+
+    for &bb in &bbs {
+        let data = mir.basic_block_data_mut(bb);
+
+        if let Some(ref mut term) = data.terminator {
+            let is_invoke = term_is_invoke(term);
+            let term_span = term.span;
+            let term_scope = term.scope;
+            let succs = term.successors_mut();
+            if succs.len() > 1 || (succs.len() > 0 && is_invoke) {
+                for tgt in succs {
+                    let num_preds = pred_count[tgt.index()];
+                    if num_preds > 1 {
+                        // It's a critical edge, break it
+                        let goto = Terminator {
+                            span: term_span,
+                            scope: term_scope,
+                            kind: TerminatorKind::Goto { target: *tgt }
+                        };
+                        let mut data = BasicBlockData::new(Some(goto));
+                        data.is_cleanup = cleanup_map.contains(tgt.index());
+
+                        // Get the index it will be when inserted into the MIR
+                        let idx = cur_len + new_blocks.len();
+                        new_blocks.push(data);
+                        *tgt = BasicBlock::new(idx);
+                    }
+                }
+            }
+        }
+    }
+
+    debug!("Broke {} N edges", new_blocks.len());
+
+    mir.basic_blocks.extend_from_slice(&new_blocks);
+}
+
+// Returns true if the terminator would use an invoke in LLVM.
+fn term_is_invoke(term: &Terminator) -> bool {
+    match term.kind {
+        TerminatorKind::Call { cleanup: Some(_), .. } |
+        TerminatorKind::Drop { unwind: Some(_), .. } => true,
+        _ => false
+    }
+}
diff --git a/src/librustc_mir/transform/clear_dead_blocks.rs b/src/librustc_mir/transform/clear_dead_blocks.rs
deleted file mode 100644 (file)
index b35d8c0..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A pass that erases the contents of dead blocks. This pass must
-//! run before any analysis passes because some of the dead blocks
-//! can be ill-typed.
-//!
-//! The main problem is that typeck lets most blocks whose end is not
-//! reachable have an arbitrary return type, rather than having the
-//! usual () return type (as a note, typeck's notion of reachability
-//! is in fact slightly weaker than MIR CFG reachability - see #31617).
-//!
-//! A standard example of the situation is:
-//! ```rust
-//!   fn example() {
-//!       let _a: char = { return; };
-//!   }
-//! ```
-//!
-//! Here the block (`{ return; }`) has the return type `char`,
-//! rather than `()`, but the MIR we naively generate still contains
-//! the `_a = ()` write in the unreachable block "after" the return.
-//!
-//! As we have to run this pass even when we want to debug the MIR,
-//! this pass just replaces the blocks with empty "return" blocks
-//! and does not renumber anything.
-
-use rustc::middle::infer;
-use rustc::mir::repr::*;
-use rustc::mir::transform::MirPass;
-
-pub struct ClearDeadBlocks;
-
-impl ClearDeadBlocks {
-    pub fn new() -> ClearDeadBlocks {
-        ClearDeadBlocks
-    }
-
-    fn clear_dead_blocks(&self, mir: &mut Mir) {
-        let mut seen = vec![false; mir.basic_blocks.len()];
-
-        // These blocks are always required.
-        seen[START_BLOCK.index()] = true;
-        seen[END_BLOCK.index()] = true;
-
-        let mut worklist = vec![START_BLOCK];
-        while let Some(bb) = worklist.pop() {
-            for succ in mir.basic_block_data(bb).terminator().successors().iter() {
-                if !seen[succ.index()] {
-                    seen[succ.index()] = true;
-                    worklist.push(*succ);
-                }
-            }
-        }
-
-        for (n, (block, seen)) in mir.basic_blocks.iter_mut().zip(seen).enumerate() {
-            if !seen {
-                info!("clearing block #{}: {:?}", n, block);
-                *block = BasicBlockData {
-                    statements: vec![],
-                    terminator: Some(Terminator::Return),
-                    is_cleanup: false
-                };
-            }
-        }
-    }
-}
-
-impl MirPass for ClearDeadBlocks {
-    fn run_on_mir<'a, 'tcx>(&mut self, mir: &mut Mir<'tcx>, _: &infer::InferCtxt<'a, 'tcx>)
-    {
-        self.clear_dead_blocks(mir);
-    }
-}
index a82d1fc53991f5855aac1b98eff04be8063651d7..12bfa3aebc2017ebf0f4c443529f7f586a483a3a 100644 (file)
 //! We want to do this once just before trans, so trans does not have to take
 //! care erasing regions all over the place.
 
-use rustc::middle::ty;
+use rustc::ty::subst::Substs;
+use rustc::ty::{Ty, TyCtxt};
 use rustc::mir::repr::*;
 use rustc::mir::visit::MutVisitor;
-use rustc::mir::mir_map::MirMap;
-
-pub fn erase_regions<'tcx>(tcx: &ty::ctxt<'tcx>, mir_map: &mut MirMap<'tcx>) {
-    for (_, mir) in &mut mir_map.map {
-        EraseRegionsVisitor::new(tcx).visit_mir(mir);
-    }
-}
+use rustc::mir::transform::{MirPass, Pass};
+use syntax::ast::NodeId;
 
 struct EraseRegionsVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
 }
 
 impl<'a, 'tcx> EraseRegionsVisitor<'a, 'tcx> {
-    pub fn new(tcx: &'a ty::ctxt<'tcx>) -> Self {
+    pub fn new(tcx: &'a TyCtxt<'tcx>) -> Self {
         EraseRegionsVisitor {
             tcx: tcx
         }
     }
-
-    fn erase_regions_return_ty(&mut self, fn_output: &mut ty::FnOutput<'tcx>) {
-        match *fn_output {
-            ty::FnConverging(ref mut ty) => {
-                *ty = self.tcx.erase_regions(ty);
-            },
-            ty::FnDiverging => {}
-        }
-    }
-
-    fn erase_regions_tys<'b, T>(&mut self, tys: T)
-        where T: Iterator<Item = &'b mut ty::Ty<'tcx>>,
-              'tcx: 'b
-    {
-        for ty in tys {
-            *ty = self.tcx.erase_regions(ty);
-        }
-    }
 }
 
 impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> {
-    fn visit_mir(&mut self, mir: &mut Mir<'tcx>) {
-        self.erase_regions_return_ty(&mut mir.return_ty);
-        self.erase_regions_tys(mir.var_decls.iter_mut().map(|d| &mut d.ty));
-        self.erase_regions_tys(mir.arg_decls.iter_mut().map(|d| &mut d.ty));
-        self.erase_regions_tys(mir.temp_decls.iter_mut().map(|d| &mut d.ty));
-        self.super_mir(mir);
+    fn visit_ty(&mut self, ty: &mut Ty<'tcx>) {
+        let old_ty = *ty;
+        *ty = self.tcx.erase_regions(&old_ty);
     }
 
-    fn visit_terminator(&mut self, bb: BasicBlock, terminator: &mut Terminator<'tcx>) {
-        match *terminator {
-            Terminator::Goto { .. } |
-            Terminator::Resume |
-            Terminator::Return |
-            Terminator::If { .. } |
-            Terminator::Switch { .. } |
-            Terminator::Drop { .. } |
-            Terminator::Call { .. } => {
-                /* nothing to do */
-            },
-            Terminator::SwitchInt { ref mut switch_ty, .. } => {
-                *switch_ty = self.tcx.erase_regions(switch_ty);
-            },
-        }
-        self.super_terminator(bb, terminator);
+    fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>) {
+        *substs = self.tcx.mk_substs(self.tcx.erase_regions(*substs));
     }
+}
 
-    fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>) {
-        match *rvalue {
-            Rvalue::Use(_) |
-            Rvalue::Len(_) |
-            Rvalue::BinaryOp(_, _, _) |
-            Rvalue::UnaryOp(_, _) |
-            Rvalue::Slice { input: _, from_start: _, from_end: _ } |
-            Rvalue::InlineAsm(_) => {},
-
-            Rvalue::Repeat(_, ref mut value) => value.ty = self.tcx.erase_regions(&value.ty),
-            Rvalue::Ref(ref mut region, _, _) => *region = ty::ReStatic,
-            Rvalue::Cast(_, _, ref mut ty) => *ty = self.tcx.erase_regions(ty),
-            Rvalue::Box(ref mut ty) => *ty = self.tcx.erase_regions(ty),
+pub struct EraseRegions;
 
+impl Pass for EraseRegions {}
 
-            Rvalue::Aggregate(AggregateKind::Vec, _) |
-            Rvalue::Aggregate(AggregateKind::Tuple, _) => {},
-            Rvalue::Aggregate(AggregateKind::Adt(_, _, ref mut substs), _) =>
-                *substs = self.tcx.mk_substs(self.tcx.erase_regions(*substs)),
-            Rvalue::Aggregate(AggregateKind::Closure(def_id, ref mut closure_substs), _) => {
-                let cloned = Box::new(closure_substs.clone());
-                let ty = self.tcx.mk_closure_from_closure_substs(def_id, cloned);
-                let erased = self.tcx.erase_regions(&ty);
-                *closure_substs = match erased.sty {
-                    ty::TyClosure(_, ref closure_substs) => &*closure_substs,
-                    _ => unreachable!()
-                };
-            }
-        }
-        self.super_rvalue(rvalue);
-    }
-
-    fn visit_constant(&mut self, constant: &mut Constant<'tcx>) {
-        constant.ty = self.tcx.erase_regions(&constant.ty);
-        match constant.literal {
-            Literal::Item { ref mut substs, .. } => {
-                *substs = self.tcx.mk_substs(self.tcx.erase_regions(substs));
-            }
-            Literal::Value { .. } => { /* nothing to do */ }
-        }
-        self.super_constant(constant);
+impl<'tcx> MirPass<'tcx> for EraseRegions {
+    fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, _: NodeId, mir: &mut Mir<'tcx>) {
+        EraseRegionsVisitor::new(tcx).visit_mir(mir);
     }
 }
index adca68114fd0152e929c8719ab5077fd064f95df..a52a8edc211730eb31b5b5a6e20c11a3b8ead040 100644 (file)
@@ -8,8 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-pub mod clear_dead_blocks;
+pub mod remove_dead_blocks;
 pub mod simplify_cfg;
 pub mod erase_regions;
 pub mod no_landing_pads;
 pub mod type_check;
+pub mod break_critical_edges;
index e2c93bd4e8751cd87b721b0b9fd2296edf5dae99..edfe75b8430d4910f33ae16616b662768715885f 100644 (file)
 //! This pass removes the unwind branch of all the terminators when the no-landing-pads option is
 //! specified.
 
-use rustc::middle::infer;
+use rustc::ty::TyCtxt;
 use rustc::mir::repr::*;
 use rustc::mir::visit::MutVisitor;
-use rustc::mir::transform::MirPass;
+use rustc::mir::transform::{Pass, MirPass};
+use syntax::ast::NodeId;
 
 pub struct NoLandingPads;
 
 impl<'tcx> MutVisitor<'tcx> for NoLandingPads {
     fn visit_terminator(&mut self, bb: BasicBlock, terminator: &mut Terminator<'tcx>) {
-        match *terminator {
-            Terminator::Goto { .. } |
-            Terminator::Resume |
-            Terminator::Return |
-            Terminator::If { .. } |
-            Terminator::Switch { .. } |
-            Terminator::SwitchInt { .. } => {
+        match terminator.kind {
+            TerminatorKind::Goto { .. } |
+            TerminatorKind::Resume |
+            TerminatorKind::Return |
+            TerminatorKind::If { .. } |
+            TerminatorKind::Switch { .. } |
+            TerminatorKind::SwitchInt { .. } => {
                 /* nothing to do */
             },
-            Terminator::Drop { ref mut unwind, .. } => {
+            TerminatorKind::Drop { ref mut unwind, .. } => {
                 unwind.take();
             },
-            Terminator::Call { ref mut cleanup, .. } => {
+            TerminatorKind::Call { ref mut cleanup, .. } => {
                 cleanup.take();
             },
         }
@@ -40,11 +41,12 @@ impl<'tcx> MutVisitor<'tcx> for NoLandingPads {
     }
 }
 
-impl MirPass for NoLandingPads {
-    fn run_on_mir<'a, 'tcx>(&mut self, mir: &mut Mir<'tcx>,
-                            infcx: &infer::InferCtxt<'a, 'tcx>) {
-        if infcx.tcx.sess.no_landing_pads() {
+impl<'tcx> MirPass<'tcx> for NoLandingPads {
+    fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, _: NodeId, mir: &mut Mir<'tcx>) {
+        if tcx.sess.no_landing_pads() {
             self.visit_mir(mir);
         }
     }
 }
+
+impl Pass for NoLandingPads {}
diff --git a/src/librustc_mir/transform/remove_dead_blocks.rs b/src/librustc_mir/transform/remove_dead_blocks.rs
new file mode 100644 (file)
index 0000000..dc1ddad
--- /dev/null
@@ -0,0 +1,87 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A pass that erases the contents of dead blocks. This pass must
+//! run before any analysis passes because some of the dead blocks
+//! can be ill-typed.
+//!
+//! The main problem is that typeck lets most blocks whose end is not
+//! reachable have an arbitrary return type, rather than having the
+//! usual () return type (as a note, typeck's notion of reachability
+//! is in fact slightly weaker than MIR CFG reachability - see #31617).
+//!
+//! A standard example of the situation is:
+//! ```rust
+//!   fn example() {
+//!       let _a: char = { return; };
+//!   }
+//! ```
+//!
+//! Here the block (`{ return; }`) has the return type `char`,
+//! rather than `()`, but the MIR we naively generate still contains
+//! the `_a = ()` write in the unreachable block "after" the return.
+//!
+//! As we have to run this pass even when we want to debug the MIR,
+//! this pass just replaces the blocks with empty "return" blocks
+//! and does not renumber anything.
+
+use rustc_data_structures::bitvec::BitVector;
+use rustc::ty::TyCtxt;
+use rustc::mir::repr::*;
+use rustc::mir::transform::{Pass, MirPass};
+use syntax::ast::NodeId;
+
+pub struct RemoveDeadBlocks;
+
+impl<'tcx> MirPass<'tcx> for RemoveDeadBlocks {
+    fn run_pass(&mut self, _: &TyCtxt<'tcx>, _: NodeId, mir: &mut Mir<'tcx>) {
+        let mut seen = BitVector::new(mir.basic_blocks.len());
+        // These blocks are always required.
+        seen.insert(START_BLOCK.index());
+        seen.insert(END_BLOCK.index());
+
+        let mut worklist = Vec::with_capacity(4);
+        worklist.push(START_BLOCK);
+        while let Some(bb) = worklist.pop() {
+            for succ in mir.basic_block_data(bb).terminator().successors().iter() {
+                if seen.insert(succ.index()) {
+                    worklist.push(*succ);
+                }
+            }
+        }
+        retain_basic_blocks(mir, &seen);
+    }
+}
+
+impl Pass for RemoveDeadBlocks {}
+
+/// Mass removal of basic blocks to keep the ID-remapping cheap.
+fn retain_basic_blocks(mir: &mut Mir, keep: &BitVector) {
+    let num_blocks = mir.basic_blocks.len();
+
+    let mut replacements: Vec<_> = (0..num_blocks).map(BasicBlock::new).collect();
+    let mut used_blocks = 0;
+    for alive_index in keep.iter() {
+        replacements[alive_index] = BasicBlock::new(used_blocks);
+        if alive_index != used_blocks {
+            // Swap the next alive block data with the current available slot. Since alive_index is
+            // non-decreasing this is a valid operation.
+            mir.basic_blocks.swap(alive_index, used_blocks);
+        }
+        used_blocks += 1;
+    }
+    mir.basic_blocks.truncate(used_blocks);
+
+    for bb in mir.all_basic_blocks() {
+        for target in mir.basic_block_data_mut(bb).terminator_mut().successors_mut() {
+            *target = replacements[target.index()];
+        }
+    }
+}
index 785e6db57a53843f8089deecb23414064beac26d..00b8f5c093043485c7ae3a217d531fa4a11d2541 100644 (file)
@@ -8,11 +8,14 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use rustc_data_structures::bitvec::BitVector;
-use rustc::middle::const_eval::ConstVal;
-use rustc::middle::infer;
+use rustc::middle::const_val::ConstVal;
+use rustc::ty::TyCtxt;
 use rustc::mir::repr::*;
-use rustc::mir::transform::MirPass;
+use rustc::mir::transform::{MirPass, Pass};
+use pretty;
+use syntax::ast::NodeId;
+
+use super::remove_dead_blocks::RemoveDeadBlocks;
 
 pub struct SimplifyCfg;
 
@@ -21,41 +24,29 @@ impl SimplifyCfg {
         SimplifyCfg
     }
 
-    fn remove_dead_blocks(&self, mir: &mut Mir) {
-        let mut seen = BitVector::new(mir.basic_blocks.len());
-        // These blocks are always required.
-        seen.insert(START_BLOCK.index());
-        seen.insert(END_BLOCK.index());
-
-        let mut worklist = Vec::with_capacity(4);
-        worklist.push(START_BLOCK);
-        while let Some(bb) = worklist.pop() {
-            for succ in mir.basic_block_data(bb).terminator().successors().iter() {
-                if seen.insert(succ.index()) {
-                    worklist.push(*succ);
-                }
-            }
-        }
-        retain_basic_blocks(mir, &seen);
-    }
-
     fn remove_goto_chains(&self, mir: &mut Mir) -> bool {
-
         // Find the target at the end of the jump chain, return None if there is a loop
         fn final_target(mir: &Mir, mut target: BasicBlock) -> Option<BasicBlock> {
             // Keep track of already seen blocks to detect loops
             let mut seen: Vec<BasicBlock> = Vec::with_capacity(8);
 
             while mir.basic_block_data(target).statements.is_empty() {
-                match mir.basic_block_data(target).terminator {
-                    Some(Terminator::Goto { target: next }) => {
-                        if seen.contains(&next) {
-                            return None;
+                // NB -- terminator may have been swapped with `None`
+                // below, in which case we have a cycle and just want
+                // to stop
+                if let Some(ref terminator) = mir.basic_block_data(target).terminator {
+                    match terminator.kind {
+                        TerminatorKind::Goto { target: next } => {
+                            if seen.contains(&next) {
+                                return None;
+                            }
+                            seen.push(next);
+                            target = next;
                         }
-                        seen.push(next);
-                        target = next;
+                        _ => break
                     }
-                    _ => break
+                } else {
+                    break
                 }
             }
 
@@ -68,6 +59,8 @@ impl SimplifyCfg {
             let mut terminator = mir.basic_block_data_mut(bb).terminator.take()
                                     .expect("invalid terminator state");
 
+            debug!("remove_goto_chains: bb={:?} terminator={:?}", bb, terminator);
+
             for target in terminator.successors_mut() {
                 let new_target = match final_target(mir, *target) {
                     Some(new_target) => new_target,
@@ -88,27 +81,27 @@ impl SimplifyCfg {
         for bb in mir.all_basic_blocks() {
             let basic_block = mir.basic_block_data_mut(bb);
             let mut terminator = basic_block.terminator_mut();
-            *terminator = match *terminator {
-                Terminator::If { ref targets, .. } if targets.0 == targets.1 => {
+            terminator.kind = match terminator.kind {
+                TerminatorKind::If { ref targets, .. } if targets.0 == targets.1 => {
                     changed = true;
-                    Terminator::Goto { target: targets.0 }
+                    TerminatorKind::Goto { target: targets.0 }
                 }
 
-                Terminator::If { ref targets, cond: Operand::Constant(Constant {
+                TerminatorKind::If { ref targets, cond: Operand::Constant(Constant {
                     literal: Literal::Value {
                         value: ConstVal::Bool(cond)
                     }, ..
                 }) } => {
                     changed = true;
                     if cond {
-                        Terminator::Goto { target: targets.0 }
+                        TerminatorKind::Goto { target: targets.0 }
                     } else {
-                        Terminator::Goto { target: targets.1 }
+                        TerminatorKind::Goto { target: targets.1 }
                     }
                 }
 
-                Terminator::SwitchInt { ref targets, .. }  if targets.len() == 1 => {
-                    Terminator::Goto { target: targets[0] }
+                TerminatorKind::SwitchInt { ref targets, .. } if targets.len() == 1 => {
+                    TerminatorKind::Goto { target: targets[0] }
                 }
                 _ => continue
             }
@@ -118,39 +111,20 @@ impl SimplifyCfg {
     }
 }
 
-impl MirPass for SimplifyCfg {
-    fn run_on_mir<'a, 'tcx>(&mut self, mir: &mut Mir<'tcx>, _: &infer::InferCtxt<'a, 'tcx>) {
+impl<'tcx> MirPass<'tcx> for SimplifyCfg {
+    fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, id: NodeId, mir: &mut Mir<'tcx>) {
+        let mut counter = 0;
         let mut changed = true;
         while changed {
+            pretty::dump_mir(tcx, "simplify_cfg", &counter, id, mir, None);
+            counter += 1;
             changed = self.simplify_branches(mir);
             changed |= self.remove_goto_chains(mir);
-            self.remove_dead_blocks(mir);
+            RemoveDeadBlocks.run_pass(tcx, id, mir);
         }
         // FIXME: Should probably be moved into some kind of pass manager
         mir.basic_blocks.shrink_to_fit();
     }
 }
 
-/// Mass removal of basic blocks to keep the ID-remapping cheap.
-fn retain_basic_blocks(mir: &mut Mir, keep: &BitVector) {
-    let num_blocks = mir.basic_blocks.len();
-
-    let mut replacements: Vec<_> = (0..num_blocks).map(BasicBlock::new).collect();
-    let mut used_blocks = 0;
-    for alive_index in keep.iter() {
-        replacements[alive_index] = BasicBlock::new(used_blocks);
-        if alive_index != used_blocks {
-            // Swap the next alive block data with the current available slot. Since alive_index is
-            // non-decreasing this is a valid operation.
-            mir.basic_blocks.swap(alive_index, used_blocks);
-        }
-        used_blocks += 1;
-    }
-    mir.basic_blocks.truncate(used_blocks);
-
-    for bb in mir.all_basic_blocks() {
-        for target in mir.basic_block_data_mut(bb).terminator_mut().successors_mut() {
-            *target = replacements[target.index()];
-        }
-    }
-}
+impl Pass for SimplifyCfg {}
index 0e97e3629064be16abd9b6fe7a1b1fb98323554c..11ac1fa8f82a18e30a7116198afc42695a4d7838 100644 (file)
 //! This pass type-checks the MIR to ensure it is not broken.
 #![allow(unreachable_code)]
 
-use rustc::middle::infer::{self, InferCtxt};
-use rustc::middle::traits;
-use rustc::middle::ty::{self, Ty};
-use rustc::middle::ty::fold::TypeFoldable;
+use rustc::dep_graph::DepNode;
+use rustc::infer::{self, InferCtxt, InferOk};
+use rustc::traits::{self, ProjectionMode};
+use rustc::ty::fold::TypeFoldable;
+use rustc::ty::{self, Ty, TyCtxt};
 use rustc::mir::repr::*;
 use rustc::mir::tcx::LvalueTy;
-use rustc::mir::transform::MirPass;
+use rustc::mir::transform::{MirPass, Pass};
 use rustc::mir::visit::{self, Visitor};
-
-use syntax::codemap::{Span, DUMMY_SP};
 use std::fmt;
+use syntax::ast::NodeId;
+use syntax::codemap::{Span, DUMMY_SP};
 
 macro_rules! span_mirbug {
     ($context:expr, $elem:expr, $($message:tt)*) => ({
@@ -113,7 +114,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
         }
     }
 
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+    fn tcx(&self) -> &'a TyCtxt<'tcx> {
         self.cx.infcx.tcx
     }
 
@@ -337,6 +338,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
     {
         infer::mk_subty(self.infcx, false, infer::TypeOrigin::Misc(span),
                         sup, sub)
+            // FIXME(#32730) propagate obligations
+            .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
     }
 
     fn mk_eqty(&self, span: Span, a: Ty<'tcx>, b: Ty<'tcx>)
@@ -344,9 +347,11 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
     {
         infer::mk_eqty(self.infcx, false, infer::TypeOrigin::Misc(span),
                        a, b)
+            // FIXME(#32730) propagate obligations
+            .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
     }
 
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+    fn tcx(&self) -> &'a TyCtxt<'tcx> {
         self.infcx.tcx
     }
 
@@ -375,15 +380,15 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
                         term: &Terminator<'tcx>) {
         debug!("check_terminator: {:?}", term);
         let tcx = self.tcx();
-        match *term {
-            Terminator::Goto { .. } |
-            Terminator::Resume |
-            Terminator::Return |
-            Terminator::Drop { .. } => {
+        match term.kind {
+            TerminatorKind::Goto { .. } |
+            TerminatorKind::Resume |
+            TerminatorKind::Return |
+            TerminatorKind::Drop { .. } => {
                 // no checks needed for these
             }
 
-            Terminator::If { ref cond, .. } => {
+            TerminatorKind::If { ref cond, .. } => {
                 let cond_ty = mir.operand_ty(tcx, cond);
                 match cond_ty.sty {
                     ty::TyBool => {}
@@ -392,7 +397,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
                     }
                 }
             }
-            Terminator::SwitchInt { ref discr, switch_ty, .. } => {
+            TerminatorKind::SwitchInt { ref discr, switch_ty, .. } => {
                 let discr_ty = mir.lvalue_ty(tcx, discr).to_ty(tcx);
                 if let Err(terr) = self.mk_subty(self.last_span, discr_ty, switch_ty) {
                     span_mirbug!(self, term, "bad SwitchInt ({:?} on {:?}): {:?}",
@@ -405,7 +410,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
                 }
                 // FIXME: check the values
             }
-            Terminator::Switch { ref discr, adt_def, ref targets } => {
+            TerminatorKind::Switch { ref discr, adt_def, ref targets } => {
                 let discr_ty = mir.lvalue_ty(tcx, discr).to_ty(tcx);
                 match discr_ty.sty {
                     ty::TyEnum(def, _)
@@ -417,11 +422,11 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
                     }
                 }
             }
-            Terminator::Call { ref func, ref args, ref destination, .. } => {
+            TerminatorKind::Call { ref func, ref args, ref destination, .. } => {
                 let func_ty = mir.operand_ty(tcx, func);
                 debug!("check_terminator: call, func_ty={:?}", func_ty);
                 let func_ty = match func_ty.sty {
-                    ty::TyBareFn(_, func_ty) => func_ty,
+                    ty::TyFnDef(_, _, func_ty) | ty::TyFnPtr(func_ty) => func_ty,
                     _ => {
                         span_mirbug!(self, term, "call to non-function {:?}", func_ty);
                         return;
@@ -572,17 +577,21 @@ impl TypeckMir {
     }
 }
 
-impl MirPass for TypeckMir {
-    fn run_on_mir<'a, 'tcx>(&mut self, mir: &mut Mir<'tcx>, infcx: &InferCtxt<'a, 'tcx>)
-    {
-        if infcx.tcx.sess.err_count() > 0 {
+impl<'tcx> MirPass<'tcx> for TypeckMir {
+    fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, id: NodeId, mir: &mut Mir<'tcx>) {
+        if tcx.sess.err_count() > 0 {
             // compiling a broken program can obviously result in a
             // broken MIR, so try not to report duplicate errors.
             return;
         }
-
-        let mut checker = TypeChecker::new(infcx);
-
+        let def_id = tcx.map.local_def_id(id);
+        let _task = tcx.dep_graph.in_task(DepNode::MirTypeck(def_id));
+        let param_env = ty::ParameterEnvironment::for_item(tcx, id);
+        let infcx = infer::new_infer_ctxt(tcx,
+                                          &tcx.tables,
+                                          Some(param_env),
+                                          ProjectionMode::AnyFinal);
+        let mut checker = TypeChecker::new(&infcx);
         {
             let mut verifier = TypeVerifier::new(&mut checker, mir);
             verifier.visit_mir(mir);
@@ -591,8 +600,9 @@ impl MirPass for TypeckMir {
                 return;
             }
         }
-
         checker.typeck_mir(mir);
         checker.verify_obligations(mir);
     }
 }
+
+impl Pass for TypeckMir {}
diff --git a/src/librustc_mir/traversal.rs b/src/librustc_mir/traversal.rs
new file mode 100644 (file)
index 0000000..8b68211
--- /dev/null
@@ -0,0 +1,276 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::vec;
+
+use rustc_data_structures::bitvec::BitVector;
+
+use rustc::mir::repr::*;
+
+/// Preorder traversal of a graph.
+///
+/// Preorder traversal is when each node is visited before an of it's
+/// successors
+///
+///         A
+///        / \
+///       /   \
+///      B     C
+///       \   /
+///        \ /
+///         D
+///
+/// A preorder traversal of this graph is either `A B D C` or `A C D B`
+#[derive(Clone)]
+pub struct Preorder<'a, 'tcx: 'a> {
+    mir: &'a Mir<'tcx>,
+    visited: BitVector,
+    worklist: Vec<BasicBlock>,
+}
+
+impl<'a, 'tcx> Preorder<'a, 'tcx> {
+    pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> {
+        let worklist = vec![root];
+
+        Preorder {
+            mir: mir,
+            visited: BitVector::new(mir.basic_blocks.len()),
+            worklist: worklist
+        }
+    }
+}
+
+pub fn preorder<'a, 'tcx>(mir: &'a Mir<'tcx>) -> Preorder<'a, 'tcx> {
+    Preorder::new(mir, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
+    type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+    fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+        while let Some(idx) = self.worklist.pop() {
+            if !self.visited.insert(idx.index()) {
+                continue;
+            }
+
+            let data = self.mir.basic_block_data(idx);
+
+            if let Some(ref term) = data.terminator {
+                for &succ in term.successors().iter() {
+                    self.worklist.push(succ);
+                }
+            }
+
+            return Some((idx, data));
+        }
+
+        None
+    }
+}
+
+/// Postorder traversal of a graph.
+///
+/// Postorder traversal is when each node is visited after all of it's
+/// successors, except when the successor is only reachable by a back-edge
+///
+///         A
+///        / \
+///       /   \
+///      B     C
+///       \   /
+///        \ /
+///         D
+///
+/// A Postorder traversal of this graph is `D B C A` or `D C B A`
+pub struct Postorder<'a, 'tcx: 'a> {
+    mir: &'a Mir<'tcx>,
+    visited: BitVector,
+    visit_stack: Vec<(BasicBlock, vec::IntoIter<BasicBlock>)>
+}
+
+impl<'a, 'tcx> Postorder<'a, 'tcx> {
+    pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> Postorder<'a, 'tcx> {
+        let mut po = Postorder {
+            mir: mir,
+            visited: BitVector::new(mir.basic_blocks.len()),
+            visit_stack: Vec::new()
+        };
+
+
+        let data = po.mir.basic_block_data(root);
+
+        if let Some(ref term) = data.terminator {
+            po.visited.insert(root.index());
+
+            let succs = term.successors().into_owned().into_iter();
+
+            po.visit_stack.push((root, succs));
+            po.traverse_successor();
+        }
+
+        po
+    }
+
+    fn traverse_successor(&mut self) {
+        // This is quite a complex loop due to 1. the borrow checker not liking it much
+        // and 2. what exactly is going on is not clear
+        //
+        // It does the actual traversal of the graph, while the `next` method on the iterator
+        // just pops off of the stack. `visit_stack` is a stack containing pairs of nodes and
+        // iterators over the sucessors of those nodes. Each iteration attempts to get the next
+        // node from the top of the stack, then pushes that node and an iterator over the
+        // successors to the top of the stack. This loop only grows `visit_stack`, stopping when
+        // we reach a child that has no children that we haven't already visited.
+        //
+        // For a graph that looks like this:
+        //
+        //         A
+        //        / \
+        //       /   \
+        //      B     C
+        //      |     |
+        //      |     |
+        //      D     |
+        //       \   /
+        //        \ /
+        //         E
+        //
+        // The state of the stack starts out with just the root node (`A` in this case);
+        //     [(A, [B, C])]
+        //
+        // When the first call to `traverse_sucessor` happens, the following happens:
+        //
+        //     [(B, [D]),  // `B` taken from the successors of `A`, pushed to the
+        //                 // top of the stack along with the successors of `B`
+        //      (A, [C])]
+        //
+        //     [(D, [E]),  // `D` taken from successors of `B`, pushed to stack
+        //      (B, []),
+        //      (A, [C])]
+        //
+        //     [(E, []),   // `E` taken from successors of `D`, pushed to stack
+        //      (D, []),
+        //      (B, []),
+        //      (A, [C])]
+        //
+        // Now that the top of the stack has no successors we can traverse, each item will
+        // be popped off during iteration until we get back to `A`. This yeilds [E, D, B].
+        //
+        // When we yield `B` and call `traverse_successor`, we push `C` to the stack, but
+        // since we've already visited `E`, that child isn't added to the stack. The last
+        // two iterations yield `C` and finally `A` for a final traversal of [E, D, B, C, A]
+        loop {
+            let bb = if let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() {
+                if let Some(bb) = iter.next() {
+                    bb
+                } else {
+                    break;
+                }
+            } else {
+                break;
+            };
+
+            if self.visited.insert(bb.index()) {
+                let data = self.mir.basic_block_data(bb);
+
+                if let Some(ref term) = data.terminator {
+                    let succs = term.successors().into_owned().into_iter();
+                    self.visit_stack.push((bb, succs));
+                }
+            }
+        }
+    }
+}
+
+pub fn postorder<'a, 'tcx>(mir: &'a Mir<'tcx>) -> Postorder<'a, 'tcx> {
+    Postorder::new(mir, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> {
+    type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+    fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+        let next = self.visit_stack.pop();
+        if next.is_some() {
+            self.traverse_successor();
+        }
+
+        next.map(|(bb, _)| {
+            let data = self.mir.basic_block_data(bb);
+            (bb, data)
+        })
+    }
+}
+
+/// Reverse postorder traversal of a graph
+///
+/// Reverse postorder is the reverse order of a postorder traversal.
+/// This is different to a preorder traversal and represents a natural
+/// linearisation of control-flow.
+///
+///         A
+///        / \
+///       /   \
+///      B     C
+///       \   /
+///        \ /
+///         D
+///
+/// A reverse postorder traversal of this graph is either `A B C D` or `A C B D`
+/// Note that for a graph containing no loops (i.e. A DAG), this is equivalent to
+/// a topological sort.
+///
+/// Construction of a `ReversePostorder` traversal requires doing a full
+/// postorder traversal of the graph, therefore this traversal should be
+/// constructed as few times as possible. Use the `reset` method to be able
+/// to re-use the traversal
+#[derive(Clone)]
+pub struct ReversePostorder<'a, 'tcx: 'a> {
+    mir: &'a Mir<'tcx>,
+    blocks: Vec<BasicBlock>,
+    idx: usize
+}
+
+impl<'a, 'tcx> ReversePostorder<'a, 'tcx> {
+    pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> {
+        let blocks : Vec<_> = Postorder::new(mir, root).map(|(bb, _)| bb).collect();
+
+        let len = blocks.len();
+
+        ReversePostorder {
+            mir: mir,
+            blocks: blocks,
+            idx: len
+        }
+    }
+
+    pub fn reset(&mut self) {
+        self.idx = self.blocks.len();
+    }
+}
+
+
+pub fn reverse_postorder<'a, 'tcx>(mir: &'a Mir<'tcx>) -> ReversePostorder<'a, 'tcx> {
+    ReversePostorder::new(mir, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> {
+    type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+    fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+        if self.idx == 0 { return None; }
+        self.idx -= 1;
+
+        self.blocks.get(self.idx).map(|&bb| {
+            let data = self.mir.basic_block_data(bb);
+            (bb, data)
+        })
+    }
+}
index 7cc6510fa1fd1ca656a70efb0d46e7bf3ce0b469..fa6bd3dfb67ddf7cc9684380a73572d89727c671 100644 (file)
@@ -11,5 +11,5 @@ crate-type = ["dylib"]
 [dependencies]
 log = { path = "../liblog" }
 rustc = { path = "../librustc" }
-rustc_front = { path = "../librustc_front" }
+rustc_const_eval = { path = "../librustc_const_eval" }
 syntax = { path = "../libsyntax" }
index 54dcda27b77a3b6823f22f6dab83ecc55d9cca46..97a4c14863d59c083b54ea56fce231dea707ede8 100644 (file)
@@ -44,13 +44,13 @@ impl<'a, 'v> Visitor<'v> for CheckBlock<'a> {
             visit::walk_expr(self, e);
         }
     }
-    fn visit_item(&mut self, _i: &'v ast::Item) { panic!("should be handled in CheckConstFn") }
+    fn visit_item(&mut self, _i: &'v ast::Item) { bug!("should be handled in CheckConstFn") }
     fn visit_fn(&mut self,
                 _fk: FnKind<'v>,
                 _fd: &'v ast::FnDecl,
                 _b: &'v ast::Block,
                 _s: Span,
-                _fn_id: ast::NodeId) { panic!("should be handled in CheckConstFn") }
+                _fn_id: ast::NodeId) { bug!("should be handled in CheckConstFn") }
 }
 
 fn check_block(sess: &Session, b: &ast::Block, kind: &'static str) {
@@ -67,7 +67,7 @@ fn check_block(sess: &Session, b: &ast::Block, kind: &'static str) {
             }
             ast::StmtKind::Expr(ref expr, _) => expr.span,
             ast::StmtKind::Semi(ref semi, _) => semi.span,
-            ast::StmtKind::Mac(..) => unreachable!(),
+            ast::StmtKind::Mac(..) => bug!(),
         };
         span_err!(sess, span, E0016,
                   "blocks in {}s are limited to items and tail expressions", kind);
index b0d459063ef29a5bccf2c5858d10ce574a3e7368..4659bb389dc6b4f7e70c15d121f6b3e6e8fbadd3 100644 (file)
 // by borrowck::gather_loans
 
 use rustc::dep_graph::DepNode;
-use rustc::middle::ty::cast::{CastKind};
-use rustc::middle::const_eval::{self, ConstEvalErr};
-use rustc::middle::const_eval::ErrKind::IndexOpFeatureGated;
-use rustc::middle::const_eval::EvalHint::ExprTypeChecked;
-use rustc::middle::def::Def;
-use rustc::middle::def_id::DefId;
+use rustc::ty::cast::{CastKind};
+use rustc_const_eval::{ConstEvalErr, lookup_const_fn_by_id, compare_lit_exprs};
+use rustc_const_eval::{eval_const_expr_partial, lookup_const_by_id};
+use rustc_const_eval::ErrKind::{IndexOpFeatureGated, UnimplementedConstVal};
+use rustc_const_eval::EvalHint::ExprTypeChecked;
+use rustc::hir::def::Def;
+use rustc::hir::def_id::DefId;
 use rustc::middle::expr_use_visitor as euv;
-use rustc::middle::infer;
+use rustc::infer;
 use rustc::middle::mem_categorization as mc;
 use rustc::middle::mem_categorization::Categorization;
-use rustc::middle::traits;
-use rustc::middle::ty::{self, Ty};
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::traits::{self, ProjectionMode};
 use rustc::util::nodemap::NodeMap;
 use rustc::middle::const_qualif::ConstQualif;
 use rustc::lint::builtin::CONST_ERR;
 
-use rustc_front::hir::{self, PatKind};
+use rustc::hir::{self, PatKind};
 use syntax::ast;
 use syntax::codemap::Span;
 use syntax::feature_gate::UnstableFeatures;
-use rustc_front::intravisit::{self, FnKind, Visitor};
+use rustc::hir::intravisit::{self, FnKind, Visitor};
 
 use std::collections::hash_map::Entry;
 use std::cmp::Ordering;
@@ -65,7 +66,7 @@ enum Mode {
 }
 
 struct CheckCrateVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     mode: Mode,
     qualif: ConstQualif,
     rvalue_borrows: NodeMap<hir::Mutability>
@@ -92,7 +93,10 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
             None => self.tcx.empty_parameter_environment()
         };
 
-        let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, Some(param_env));
+        let infcx = infer::new_infer_ctxt(self.tcx,
+                                          &self.tcx.tables,
+                                          Some(param_env),
+                                          ProjectionMode::AnyFinal);
 
         f(&mut euv::ExprUseVisitor::new(self, &infcx))
     }
@@ -106,6 +110,16 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
                 entry.insert(ConstQualif::empty());
             }
         }
+        if let Err(err) = eval_const_expr_partial(self.tcx, expr, ExprTypeChecked, None) {
+            match err.kind {
+                UnimplementedConstVal(_) => {},
+                IndexOpFeatureGated => {},
+                _ => self.tcx.sess.add_lint(CONST_ERR, expr.id, expr.span,
+                                         format!("constant evaluation error: {}. This will \
+                                                 become a HARD ERROR in the future",
+                                                 err.description())),
+            }
+        }
         self.with_mode(mode, |this| {
             this.with_euv(None, |euv| euv.consume_expr(expr));
             this.visit_expr(expr);
@@ -129,10 +143,10 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
         }
 
         let mode = match fk {
-            FnKind::ItemFn(_, _, _, hir::Constness::Const, _, _) => {
+            FnKind::ItemFn(_, _, _, hir::Constness::Const, _, _, _) => {
                 Mode::ConstFn
             }
-            FnKind::Method(_, m, _) => {
+            FnKind::Method(_, m, _, _) => {
                 if m.constness == hir::Constness::Const {
                     Mode::ConstFn
                 } else {
@@ -166,7 +180,7 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
                             def_id: DefId,
                             ret_ty: Ty<'tcx>)
                             -> bool {
-        if let Some(fn_like) = const_eval::lookup_const_fn_by_id(self.tcx, def_id) {
+        if let Some(fn_like) = lookup_const_fn_by_id(self.tcx, def_id) {
             if
                 // we are in a static/const initializer
                 self.mode != Mode::Var &&
@@ -225,7 +239,7 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
             Mode::Const => "constant",
             Mode::ConstFn => "constant function",
             Mode::StaticMut | Mode::Static => "static",
-            Mode::Var => unreachable!(),
+            Mode::Var => bug!(),
         }
     }
 
@@ -247,7 +261,10 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
 
     fn check_static_type(&self, e: &hir::Expr) {
         let ty = self.tcx.node_id_to_type(e.id);
-        let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, None);
+        let infcx = infer::new_infer_ctxt(self.tcx,
+                                          &self.tcx.tables,
+                                          None,
+                                          ProjectionMode::AnyFinal);
         let cause = traits::ObligationCause::new(e.span, e.id, traits::SharedStatic);
         let mut fulfillment_cx = traits::FulfillmentContext::new();
         fulfillment_cx.register_builtin_bound(&infcx, ty, ty::BoundSync, cause);
@@ -329,7 +346,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> {
                 self.global_expr(Mode::Const, &start);
                 self.global_expr(Mode::Const, &end);
 
-                match const_eval::compare_lit_exprs(self.tcx, start, end) {
+                match compare_lit_exprs(self.tcx, start, end) {
                     Some(Ordering::Less) |
                     Some(Ordering::Equal) => {}
                     Some(Ordering::Greater) => {
@@ -393,7 +410,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> {
                 // The count is checked elsewhere (typeck).
                 let count = match node_ty.sty {
                     ty::TyArray(_, n) => n,
-                    _ => unreachable!()
+                    _ => bug!()
                 };
                 // [element; 0] is always zero-sized.
                 if count == 0 {
@@ -425,9 +442,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> {
                 match node_ty.sty {
                     ty::TyUint(_) | ty::TyInt(_) if div_or_rem => {
                         if !self.qualif.intersects(ConstQualif::NOT_CONST) {
-                            match const_eval::eval_const_expr_partial(
+                            match eval_const_expr_partial(
                                     self.tcx, ex, ExprTypeChecked, None) {
                                 Ok(_) => {}
+                                Err(ConstEvalErr { kind: UnimplementedConstVal(_), ..}) |
                                 Err(ConstEvalErr { kind: IndexOpFeatureGated, ..}) => {},
                                 Err(msg) => {
                                     self.tcx.sess.add_lint(CONST_ERR, ex.id,
@@ -563,7 +581,7 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>,
         hir::ExprCast(ref from, _) => {
             debug!("Checking const cast(id={})", from.id);
             match v.tcx.cast_kinds.borrow().get(&from.id) {
-                None => v.tcx.sess.span_bug(e.span, "no kind for cast"),
+                None => span_bug!(e.span, "no kind for cast"),
                 Some(&CastKind::PtrAddrCast) | Some(&CastKind::FnPtrAddrCast) => {
                     v.add_qualif(ConstQualif::NOT_CONST);
                     if v.mode != Mode::Var {
@@ -582,7 +600,7 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>,
                     v.add_qualif(ConstQualif::NON_ZERO_SIZED);
                 }
                 Some(Def::Struct(..)) => {
-                    if let ty::TyBareFn(..) = node_ty.sty {
+                    if let ty::TyFnDef(..) = node_ty.sty {
                         // Count the function pointer.
                         v.add_qualif(ConstQualif::NON_ZERO_SIZED);
                     }
@@ -604,9 +622,8 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>,
                 }
                 Some(Def::Const(did)) |
                 Some(Def::AssociatedConst(did)) => {
-                    if let Some(expr) = const_eval::lookup_const_by_id(v.tcx, did,
-                                                                       Some(e.id),
-                                                                       None) {
+                    let substs = Some(v.tcx.node_id_item_substs(e.id).substs);
+                    if let Some((expr, _)) = lookup_const_by_id(v.tcx, did, substs) {
                         let inner = v.global_expr(Mode::Const, expr);
                         v.add_qualif(inner);
                     }
@@ -747,13 +764,10 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>,
         hir::ExprAgain(_) |
         hir::ExprRet(_) |
 
-        // Miscellaneous expressions that could be implemented.
-        hir::ExprRange(..) |
-
         // Expressions with side-effects.
         hir::ExprAssign(..) |
         hir::ExprAssignOp(..) |
-        hir::ExprInlineAsm(_) => {
+        hir::ExprInlineAsm(..) => {
             v.add_qualif(ConstQualif::NOT_CONST);
             if v.mode != Mode::Var {
                 span_err!(v.tcx.sess, e.span, E0019,
@@ -788,7 +802,7 @@ fn check_adjustments<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Exp
     }
 }
 
-pub fn check_crate(tcx: &ty::ctxt) {
+pub fn check_crate(tcx: &TyCtxt) {
     tcx.visit_all_items_in_krate(DepNode::CheckConst, &mut CheckCrateVisitor {
         tcx: tcx,
         mode: Mode::Var,
index 91bfb19aa9d60371c563703b021bf3c9d0014e86..53ae1b30f7c1796dd48c3d2d776015f50d6e22c3 100644 (file)
@@ -28,8 +28,8 @@
 #![feature(rustc_private)]
 
 extern crate core;
-extern crate rustc;
-extern crate rustc_front;
+#[macro_use] extern crate rustc;
+extern crate rustc_const_eval;
 
 #[macro_use] extern crate log;
 #[macro_use] extern crate syntax;
index a89627aea82fbc50a46af29c5f5657f0cb85ed5c..9a58a704c52ccb8f84d831617ebd6c82b8329550 100644 (file)
@@ -12,9 +12,9 @@ use self::Context::*;
 use rustc::session::Session;
 
 use rustc::dep_graph::DepNode;
-use rustc::front::map::Map;
-use rustc_front::intravisit::{self, Visitor};
-use rustc_front::hir;
+use rustc::hir::map::Map;
+use rustc::hir::intravisit::{self, Visitor};
+use rustc::hir;
 use syntax::codemap::Span;
 
 #[derive(Clone, Copy, PartialEq)]
index f5cc020932b6308087e21508fe43fde53f4ad095..754d5ef8f528dac080b1a49934e0cc1327adef38 100644 (file)
 
 use rustc::dep_graph::DepNode;
 use rustc::middle::expr_use_visitor as euv;
-use rustc::middle::infer;
+use rustc::infer;
 use rustc::middle::mem_categorization as mc;
-use rustc::middle::ty::{self, ParameterEnvironment};
+use rustc::ty::{self, TyCtxt, ParameterEnvironment};
+use rustc::traits::ProjectionMode;
 
-use rustc_front::hir;
-use rustc_front::intravisit;
+use rustc::hir;
+use rustc::hir::intravisit;
 use syntax::ast;
 use syntax::codemap::Span;
 
-pub fn check_crate(tcx: &ty::ctxt) {
+pub fn check_crate(tcx: &TyCtxt) {
     let mut rvcx = RvalueContext { tcx: tcx };
     tcx.visit_all_items_in_krate(DepNode::RvalueCheck, &mut rvcx);
 }
 
 struct RvalueContext<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
 }
 
 impl<'a, 'tcx, 'v> intravisit::Visitor<'v> for RvalueContext<'a, 'tcx> {
@@ -43,7 +44,8 @@ impl<'a, 'tcx, 'v> intravisit::Visitor<'v> for RvalueContext<'a, 'tcx> {
             let param_env = ParameterEnvironment::for_item(self.tcx, fn_id);
             let infcx = infer::new_infer_ctxt(self.tcx,
                                               &self.tcx.tables,
-                                              Some(param_env.clone()));
+                                              Some(param_env.clone()),
+                                              ProjectionMode::AnyFinal);
             let mut delegate = RvalueContextDelegate { tcx: self.tcx, param_env: &param_env };
             let mut euv = euv::ExprUseVisitor::new(&mut delegate, &infcx);
             euv.walk_fn(fd, b);
@@ -53,7 +55,7 @@ impl<'a, 'tcx, 'v> intravisit::Visitor<'v> for RvalueContext<'a, 'tcx> {
 }
 
 struct RvalueContextDelegate<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     param_env: &'a ty::ParameterEnvironment<'a,'tcx>,
 }
 
index 329ce21edbef40deaa2ed58ecb569641054c7e09..245960a04f030972917105cdded1ea60fe619df3 100644 (file)
 // recursively.
 
 use rustc::dep_graph::DepNode;
-use rustc::front::map as ast_map;
+use rustc::hir::map as ast_map;
 use rustc::session::{Session, CompileResult};
-use rustc::middle::def::{Def, DefMap};
+use rustc::hir::def::{Def, DefMap};
 use rustc::util::nodemap::NodeMap;
 
 use syntax::{ast};
 use syntax::codemap::Span;
 use syntax::feature_gate::{GateIssue, emit_feature_err};
-use rustc_front::intravisit::{self, Visitor};
-use rustc_front::hir;
+use rustc::hir::intravisit::{self, Visitor};
+use rustc::hir;
 
 use std::cell::RefCell;
 
@@ -218,9 +218,9 @@ impl<'a, 'ast: 'a> Visitor<'ast> for CheckItemRecursionVisitor<'a, 'ast> {
             // borrow fall out of scope, so that we can reborrow farther down.
             maybe_expr = (*get_expr).clone();
         } else {
-            self.sess.span_bug(variant.span,
-                               "`check_static_recursion` attempted to visit \
-                                variant with unknown discriminant")
+            span_bug!(variant.span,
+                      "`check_static_recursion` attempted to visit \
+                      variant with unknown discriminant")
         }
         // If `maybe_expr` is `None`, that's because no discriminant is
         // specified that affects this variant. Thus, no risk of recursion.
@@ -254,10 +254,10 @@ impl<'a, 'ast: 'a> Visitor<'ast> for CheckItemRecursionVisitor<'a, 'ast> {
                                     self.visit_impl_item(item),
                                 ast_map::NodeForeignItem(_) => {},
                                 _ => {
-                                    self.sess.span_bug(
+                                    span_bug!(
                                         e.span,
-                                        &format!("expected item, found {}",
-                                                 self.ast_map.node_to_string(node_id)));
+                                        "expected item, found {}",
+                                        self.ast_map.node_to_string(node_id));
                                 }
                             }
                         }
@@ -277,9 +277,9 @@ impl<'a, 'ast: 'a> Visitor<'ast> for CheckItemRecursionVisitor<'a, 'ast> {
                                 let variant = self.ast_map.expect_variant(variant_id);
                                 self.visit_variant(variant, generics, enum_id);
                             } else {
-                                self.sess.span_bug(e.span,
-                                                   "`check_static_recursion` found \
-                                                    non-enum in Def::Variant");
+                                span_bug!(e.span,
+                                          "`check_static_recursion` found \
+                                           non-enum in Def::Variant");
                             }
                         }
                     }
index 97bf4f854ccb432b17112fc13911413a2f1287a6..92f37f974efdccf5ea0623e692149da48779a95c 100644 (file)
@@ -7,7 +7,3 @@ version = "0.0.0"
 name = "rustc_platform_intrinsics"
 path = "lib.rs"
 crate-type = ["dylib"]
-
-[dependencies]
-rustc_llvm = { path = "../librustc_llvm" }
-rustc = { path = "../librustc" }
index fda65554cd2f23f4df334a0e061a636fe1e1e05f..0fb8513e138f3d9ee8fde83369bb4acbce4a0280 100644 (file)
 
 #![allow(unused_imports)]
 
-use {Intrinsic, i, i_, u, u_, f, v, v_, agg, p, void};
+use {Intrinsic, Type};
 use IntrinsicDef::Named;
-use rustc::middle::ty;
 
 // The default inlining settings trigger a pathological behaviour in
 // LLVM, which causes makes compilation very slow. See #28273.
 #[inline(never)]
-pub fn find<'tcx>(_tcx: &ty::ctxt<'tcx>, name: &str) -> Option<Intrinsic> {
+pub fn find(name: &str) -> Option<Intrinsic> {
     if !name.starts_with("aarch64_v") { return None }
     Some(match &name["aarch64_v".len()..] {
         "hadd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.shadd.v8i8")
         },
         "hadd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uhadd.v8i8")
         },
         "hadd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.shadd.v4i16")
         },
         "hadd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uhadd.v4i16")
         },
         "hadd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.shadd.v2i32")
         },
         "hadd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uhadd.v2i32")
         },
         "haddq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.shadd.v16i8")
         },
         "haddq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.uhadd.v16i8")
         },
         "haddq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.shadd.v8i16")
         },
         "haddq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.uhadd.v8i16")
         },
         "haddq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.shadd.v4i32")
         },
         "haddq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.uhadd.v4i32")
         },
         "rhadd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.srhadd.v8i8")
         },
         "rhadd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.urhadd.v8i8")
         },
         "rhadd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.srhadd.v4i16")
         },
         "rhadd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.urhadd.v4i16")
         },
         "rhadd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.srhadd.v2i32")
         },
         "rhadd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.urhadd.v2i32")
         },
         "rhaddq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.srhadd.v16i8")
         },
         "rhaddq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.urhadd.v16i8")
         },
         "rhaddq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.srhadd.v8i16")
         },
         "rhaddq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.urhadd.v8i16")
         },
         "rhaddq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.srhadd.v4i32")
         },
         "rhaddq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.urhadd.v4i32")
         },
         "qadd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqadd.v8i8")
         },
         "qadd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uqadd.v8i8")
         },
         "qadd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqadd.v4i16")
         },
         "qadd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uqadd.v4i16")
         },
         "qadd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqadd.v2i32")
         },
         "qadd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uqadd.v2i32")
         },
         "qadd_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.sqadd.v1i64")
         },
         "qadd_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.aarch64.neon.uqadd.v1i64")
         },
         "qaddq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.sqadd.v16i8")
         },
         "qaddq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.uqadd.v16i8")
         },
         "qaddq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sqadd.v8i16")
         },
         "qaddq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.uqadd.v8i16")
         },
         "qaddq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sqadd.v4i32")
         },
         "qaddq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.uqadd.v4i32")
         },
         "qaddq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.sqadd.v2i64")
         },
         "qaddq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.uqadd.v2i64")
         },
         "uqadd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(u(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::U8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.suqadd.v16i8")
         },
         "uqadd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(u(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.suqadd.v8i16")
         },
         "uqadd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(u(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.suqadd.v4i32")
         },
         "uqadd_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(u(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.suqadd.v2i64")
         },
         "sqadd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.usqadd.v16i8")
         },
         "sqadd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.usqadd.v8i16")
         },
         "sqadd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.usqadd.v4i32")
         },
         "sqadd_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.usqadd.v2i64")
         },
         "raddhn_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.raddhn.v8i8")
         },
         "raddhn_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.raddhn.v8i8")
         },
         "raddhn_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.raddhn.v4i16")
         },
         "raddhn_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.raddhn.v4i16")
         },
         "raddhn_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.raddhn.v2i32")
         },
         "raddhn_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.raddhn.v2i32")
         },
         "fmulx_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fmulx.v2f32")
         },
         "fmulx_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1), v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.fmulx.v1f64")
         },
         "fmulxq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fmulx.v4f32")
         },
         "fmulxq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fmulx.v2f64")
         },
         "fma_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.fma.v2f32")
         },
         "fma_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1), v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.fma.v1f64")
         },
         "fmaq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.fma.v4f32")
         },
         "fmaq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.fma.v2f64")
         },
         "qdmulh_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqdmulh.v4i16")
         },
         "qdmulh_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqdmulh.v2i32")
         },
         "qdmulhq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sqdmulh.v8i16")
         },
         "qdmulhq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sqdmulh.v4i32")
         },
         "qrdmulh_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqrdmulh.v4i16")
         },
         "qrdmulh_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqrdmulh.v2i32")
         },
         "qrdmulhq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sqrdmulh.v8i16")
         },
         "qrdmulhq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sqrdmulh.v4i32")
         },
         "mull_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.smull.v8i16")
         },
         "mull_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.umull.v8i16")
         },
         "mull_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.smull.v4i32")
         },
         "mull_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.umull.v4i32")
         },
         "mull_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.smull.v2i64")
         },
         "mull_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.umull.v2i64")
         },
         "qdmullq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sqdmull.v8i16")
         },
         "qdmullq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sqdmull.v4i32")
         },
         "hsub_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.shsub.v8i8")
         },
         "hsub_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uhsub.v8i8")
         },
         "hsub_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.shsub.v4i16")
         },
         "hsub_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uhsub.v4i16")
         },
         "hsub_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.shsub.v2i32")
         },
         "hsub_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uhsub.v2i32")
         },
         "hsubq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.shsub.v16i8")
         },
         "hsubq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.uhsub.v16i8")
         },
         "hsubq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.shsub.v8i16")
         },
         "hsubq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.uhsub.v8i16")
         },
         "hsubq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.shsub.v4i32")
         },
         "hsubq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.uhsub.v4i32")
         },
         "qsub_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqsub.v8i8")
         },
         "qsub_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uqsub.v8i8")
         },
         "qsub_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqsub.v4i16")
         },
         "qsub_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uqsub.v4i16")
         },
         "qsub_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqsub.v2i32")
         },
         "qsub_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uqsub.v2i32")
         },
         "qsub_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.sqsub.v1i64")
         },
         "qsub_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.aarch64.neon.uqsub.v1i64")
         },
         "qsubq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.sqsub.v16i8")
         },
         "qsubq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.uqsub.v16i8")
         },
         "qsubq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sqsub.v8i16")
         },
         "qsubq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.uqsub.v8i16")
         },
         "qsubq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sqsub.v4i32")
         },
         "qsubq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.uqsub.v4i32")
         },
         "qsubq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.sqsub.v2i64")
         },
         "qsubq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.uqsub.v2i64")
         },
         "rsubhn_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.rsubhn.v8i8")
         },
         "rsubhn_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.rsubhn.v8i8")
         },
         "rsubhn_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.rsubhn.v4i16")
         },
         "rsubhn_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.rsubhn.v4i16")
         },
         "rsubhn_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.rsubhn.v2i32")
         },
         "rsubhn_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.rsubhn.v2i32")
         },
         "abd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sabd.v8i8")
         },
         "abd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uabd.v8i8")
         },
         "abd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sabd.v4i16")
         },
         "abd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uabd.v4i16")
         },
         "abd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sabd.v2i32")
         },
         "abd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uabd.v2i32")
         },
         "abd_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fabd.v2f32")
         },
         "abd_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1), v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.fabd.v1f64")
         },
         "abdq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.sabd.v16i8")
         },
         "abdq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.uabd.v16i8")
         },
         "abdq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sabd.v8i16")
         },
         "abdq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.uabd.v8i16")
         },
         "abdq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sabd.v4i32")
         },
         "abdq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.uabd.v4i32")
         },
         "abdq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fabd.v4f32")
         },
         "abdq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fabd.v2f64")
         },
         "max_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.smax.v8i8")
         },
         "max_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.umax.v8i8")
         },
         "max_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.smax.v4i16")
         },
         "max_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.umax.v4i16")
         },
         "max_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.smax.v2i32")
         },
         "max_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.umax.v2i32")
         },
         "max_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fmax.v2f32")
         },
         "max_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1), v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.fmax.v1f64")
         },
         "maxq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.smax.v16i8")
         },
         "maxq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.umax.v16i8")
         },
         "maxq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.smax.v8i16")
         },
         "maxq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.umax.v8i16")
         },
         "maxq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.smax.v4i32")
         },
         "maxq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.umax.v4i32")
         },
         "maxq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fmax.v4f32")
         },
         "maxq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fmax.v2f64")
         },
         "min_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.smin.v8i8")
         },
         "min_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.umin.v8i8")
         },
         "min_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.smin.v4i16")
         },
         "min_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.umin.v4i16")
         },
         "min_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.smin.v2i32")
         },
         "min_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.umin.v2i32")
         },
         "min_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fmin.v2f32")
         },
         "min_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1), v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.fmin.v1f64")
         },
         "minq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.smin.v16i8")
         },
         "minq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.umin.v16i8")
         },
         "minq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.smin.v8i16")
         },
         "minq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.umin.v8i16")
         },
         "minq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.smin.v4i32")
         },
         "minq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.umin.v4i32")
         },
         "minq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fmin.v4f32")
         },
         "minq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fmin.v2f64")
         },
         "maxnm_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fmaxnm.v2f32")
         },
         "maxnm_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1), v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.fmaxnm.v1f64")
         },
         "maxnmq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fmaxnm.v4f32")
         },
         "maxnmq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fmaxnm.v2f64")
         },
         "minnm_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fminnm.v2f32")
         },
         "minnm_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1), v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.fminnm.v1f64")
         },
         "minnmq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fminnm.v4f32")
         },
         "minnmq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fminnm.v2f64")
         },
         "shl_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sshl.v8i8")
         },
         "shl_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(i(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.ushl.v8i8")
         },
         "shl_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sshl.v4i16")
         },
         "shl_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(i(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.ushl.v4i16")
         },
         "shl_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sshl.v2i32")
         },
         "shl_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(i(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.ushl.v2i32")
         },
         "shl_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.sshl.v1i64")
         },
         "shl_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(i(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.aarch64.neon.ushl.v1i64")
         },
         "shlq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.sshl.v16i8")
         },
         "shlq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.ushl.v16i8")
         },
         "shlq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sshl.v8i16")
         },
         "shlq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.ushl.v8i16")
         },
         "shlq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sshl.v4i32")
         },
         "shlq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.ushl.v4i32")
         },
         "shlq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.sshl.v2i64")
         },
         "shlq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.ushl.v2i64")
         },
         "qshl_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqshl.v8i8")
         },
         "qshl_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(i(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uqshl.v8i8")
         },
         "qshl_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqshl.v4i16")
         },
         "qshl_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(i(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uqshl.v4i16")
         },
         "qshl_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqshl.v2i32")
         },
         "qshl_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(i(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uqshl.v2i32")
         },
         "qshl_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.sqshl.v1i64")
         },
         "qshl_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(i(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.aarch64.neon.uqshl.v1i64")
         },
         "qshlq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.sqshl.v16i8")
         },
         "qshlq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.uqshl.v16i8")
         },
         "qshlq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sqshl.v8i16")
         },
         "qshlq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.uqshl.v8i16")
         },
         "qshlq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sqshl.v4i32")
         },
         "qshlq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.uqshl.v4i32")
         },
         "qshlq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.sqshl.v2i64")
         },
         "qshlq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.uqshl.v2i64")
         },
         "rshl_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.srshl.v8i8")
         },
         "rshl_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(i(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.urshl.v8i8")
         },
         "rshl_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.srshl.v4i16")
         },
         "rshl_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(i(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.urshl.v4i16")
         },
         "rshl_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.srshl.v2i32")
         },
         "rshl_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(i(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.urshl.v2i32")
         },
         "rshl_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.srshl.v1i64")
         },
         "rshl_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(i(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.aarch64.neon.urshl.v1i64")
         },
         "rshlq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.srshl.v16i8")
         },
         "rshlq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.urshl.v16i8")
         },
         "rshlq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.srshl.v8i16")
         },
         "rshlq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.urshl.v8i16")
         },
         "rshlq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.srshl.v4i32")
         },
         "rshlq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.urshl.v4i32")
         },
         "rshlq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.srshl.v2i64")
         },
         "rshlq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.urshl.v2i64")
         },
         "qrshl_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqrshl.v8i8")
         },
         "qrshl_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(i(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uqrshl.v8i8")
         },
         "qrshl_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqrshl.v4i16")
         },
         "qrshl_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(i(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uqrshl.v4i16")
         },
         "qrshl_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqrshl.v2i32")
         },
         "qrshl_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(i(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uqrshl.v2i32")
         },
         "qrshl_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.sqrshl.v1i64")
         },
         "qrshl_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(i(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.aarch64.neon.uqrshl.v1i64")
         },
         "qrshlq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.sqrshl.v16i8")
         },
         "qrshlq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.uqrshl.v16i8")
         },
         "qrshlq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sqrshl.v8i16")
         },
         "qrshlq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.uqrshl.v8i16")
         },
         "qrshlq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sqrshl.v4i32")
         },
         "qrshlq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.uqrshl.v4i32")
         },
         "qrshlq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.sqrshl.v2i64")
         },
         "qrshlq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.uqrshl.v2i64")
         },
         "qshrun_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqshrun.v8i8")
         },
         "qshrun_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqshrun.v4i16")
         },
         "qshrun_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqshrun.v2i32")
         },
         "qrshrun_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqrshrun.v8i8")
         },
         "qrshrun_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqrshrun.v4i16")
         },
         "qrshrun_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqrshrun.v2i32")
         },
         "qshrn_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqshrn.v8i8")
         },
         "qshrn_n_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), u(32)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uqshrn.v8i8")
         },
         "qshrn_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqshrn.v4i16")
         },
         "qshrn_n_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), u(32)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uqshrn.v4i16")
         },
         "qshrn_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqshrn.v2i32")
         },
         "qshrn_n_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), u(32)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uqshrn.v2i32")
         },
         "rshrn_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.rshrn.v8i8")
         },
         "rshrn_n_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), u(32)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.rshrn.v8i8")
         },
         "rshrn_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.rshrn.v4i16")
         },
         "rshrn_n_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), u(32)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.rshrn.v4i16")
         },
         "rshrn_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.rshrn.v2i32")
         },
         "rshrn_n_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), u(32)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.rshrn.v2i32")
         },
         "qrshrn_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqrshrn.v8i8")
         },
         "qrshrn_n_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), u(32)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uqrshrn.v8i8")
         },
         "qrshrn_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqrshrn.v4i16")
         },
         "qrshrn_n_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), u(32)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uqrshrn.v4i16")
         },
         "qrshrn_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqrshrn.v2i32")
         },
         "qrshrn_n_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), u(32)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uqrshrn.v2i32")
         },
         "sri_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.vsri.v8i8")
         },
         "sri_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.vsri.v8i8")
         },
         "sri_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.vsri.v4i16")
         },
         "sri_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.vsri.v4i16")
         },
         "sri_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.vsri.v2i32")
         },
         "sri_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.vsri.v2i32")
         },
         "sri_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.vsri.v1i64")
         },
         "sri_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.aarch64.neon.vsri.v1i64")
         },
         "sriq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.vsri.v16i8")
         },
         "sriq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.vsri.v16i8")
         },
         "sriq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.vsri.v8i16")
         },
         "sriq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.vsri.v8i16")
         },
         "sriq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.vsri.v4i32")
         },
         "sriq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.vsri.v4i32")
         },
         "sriq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.vsri.v2i64")
         },
         "sriq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.vsri.v2i64")
         },
         "sli_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.vsli.v8i8")
         },
         "sli_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.vsli.v8i8")
         },
         "sli_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.vsli.v4i16")
         },
         "sli_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.vsli.v4i16")
         },
         "sli_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.vsli.v2i32")
         },
         "sli_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.vsli.v2i32")
         },
         "sli_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.vsli.v1i64")
         },
         "sli_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.aarch64.neon.vsli.v1i64")
         },
         "sliq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.vsli.v16i8")
         },
         "sliq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.vsli.v16i8")
         },
         "sliq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.vsli.v8i16")
         },
         "sliq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.vsli.v8i16")
         },
         "sliq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.vsli.v4i32")
         },
         "sliq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.vsli.v4i32")
         },
         "sliq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.vsli.v2i64")
         },
         "sliq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.vsli.v2i64")
         },
         "vqmovn_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqxtn.v8i8")
         },
         "vqmovn_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uqxtn.v8i8")
         },
         "vqmovn_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqxtn.v4i16")
         },
         "vqmovn_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uqxtn.v4i16")
         },
         "vqmovn_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqxtn.v2i32")
         },
         "vqmovn_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uqxtn.v2i32")
         },
         "abs_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.abs.v8i8")
         },
         "abs_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.abs.v4i16")
         },
         "abs_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.abs.v2i32")
         },
         "abs_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.abs.v1i64")
         },
         "absq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.abs.v16i8")
         },
         "absq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.abs.v8i16")
         },
         "absq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.abs.v4i32")
         },
         "absq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.abs.v2i64")
         },
         "abs_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.fabs.v2f32")
         },
         "abs_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.fabs.v1f64")
         },
         "absq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.fabs.v4f32")
         },
         "absq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.fabs.v2f64")
         },
         "qabs_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqabs.v8i8")
         },
         "qabs_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqabs.v4i16")
         },
         "qabs_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqabs.v2i32")
         },
         "qabs_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.sqabs.v1i64")
         },
         "qabsq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.sqabs.v16i8")
         },
         "qabsq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sqabs.v8i16")
         },
         "qabsq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sqabs.v4i32")
         },
         "qabsq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.sqabs.v2i64")
         },
         "qneg_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sqneg.v8i8")
         },
         "qneg_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sqneg.v4i16")
         },
         "qneg_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sqneg.v2i32")
         },
         "qneg_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.sqneg.v1i64")
         },
         "qnegq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.sqneg.v16i8")
         },
         "qnegq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sqneg.v8i16")
         },
         "qnegq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sqneg.v4i32")
         },
         "qnegq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.sqneg.v2i64")
         },
         "clz_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.ctlz.v8i8")
         },
         "clz_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.ctlz.v8i8")
         },
         "clz_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.ctlz.v4i16")
         },
         "clz_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.ctlz.v4i16")
         },
         "clz_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.ctlz.v2i32")
         },
         "clz_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.ctlz.v2i32")
         },
         "clzq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.ctlz.v16i8")
         },
         "clzq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.ctlz.v16i8")
         },
         "clzq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.ctlz.v8i16")
         },
         "clzq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.ctlz.v8i16")
         },
         "clzq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.ctlz.v4i32")
         },
         "clzq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.ctlz.v4i32")
         },
         "cls_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.cls.v8i8")
         },
         "cls_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.cls.v8i8")
         },
         "cls_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.cls.v4i16")
         },
         "cls_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.cls.v4i16")
         },
         "cls_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.cls.v2i32")
         },
         "cls_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.cls.v2i32")
         },
         "clsq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.cls.v16i8")
         },
         "clsq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.cls.v16i8")
         },
         "clsq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.cls.v8i16")
         },
         "clsq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.cls.v8i16")
         },
         "clsq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.cls.v4i32")
         },
         "clsq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.cls.v4i32")
         },
         "cnt_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.ctpop.v8i8")
         },
         "cnt_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.ctpop.v8i8")
         },
         "cntq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.ctpop.v16i8")
         },
         "cntq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.ctpop.v16i8")
         },
         "recpe_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.urecpe.v2i32")
         },
         "recpe_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.frecpe.v2f32")
         },
         "recpe_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.frecpe.v1f64")
         },
         "recpeq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.urecpe.v4i32")
         },
         "recpeq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.frecpe.v4f32")
         },
         "recpeq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.frecpe.v2f64")
         },
         "recps_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.frecps.v2f32")
         },
         "recps_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1), v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.frecps.v1f64")
         },
         "recpsq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.frecps.v4f32")
         },
         "recpsq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.frecps.v2f64")
         },
         "sqrt_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.sqrt.v2f32")
         },
         "sqrt_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.sqrt.v1f64")
         },
         "sqrtq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.sqrt.v4f32")
         },
         "sqrtq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.sqrt.v2f64")
         },
         "rsqrte_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.ursqrte.v2i32")
         },
         "rsqrte_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.frsqrte.v2f32")
         },
         "rsqrte_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.frsqrte.v1f64")
         },
         "rsqrteq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.ursqrte.v4i32")
         },
         "rsqrteq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.frsqrte.v4f32")
         },
         "rsqrteq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.frsqrte.v2f64")
         },
         "rsqrts_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.frsqrts.v2f32")
         },
         "rsqrts_f64" => Intrinsic {
-            inputs: vec![v(f(64), 1), v(f(64), 1)],
-            output: v(f(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS },
+            output: &::F64x1,
             definition: Named("llvm.aarch64.neon.frsqrts.v1f64")
         },
         "rsqrtsq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.frsqrts.v4f32")
         },
         "rsqrtsq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.frsqrts.v2f64")
         },
         "rbit_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.rbit.v8i8")
         },
         "rbit_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.rbit.v8i8")
         },
         "rbitq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.rbit.v16i8")
         },
         "rbitq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.rbit.v16i8")
         },
         "ld2_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), Some(v(i(8), 8)))],
-            output: agg(false, vec![v(i(8), 8), v(i(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v8i8.p0v8i8")
         },
         "ld2_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), Some(v(u(8), 8)))],
-            output: agg(false, vec![v(u(8), 8), v(u(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v8i8.p0v8i8")
         },
         "ld2_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), Some(v(i(16), 4)))],
-            output: agg(false, vec![v(i(16), 4), v(i(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4i16.p0v4i16")
         },
         "ld2_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), Some(v(u(16), 4)))],
-            output: agg(false, vec![v(u(16), 4), v(u(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4i16.p0v4i16")
         },
         "ld2_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), Some(v(i(32), 2)))],
-            output: agg(false, vec![v(i(32), 2), v(i(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2i32.p0v2i32")
         },
         "ld2_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), Some(v(u(32), 2)))],
-            output: agg(false, vec![v(u(32), 2), v(u(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2i32.p0v2i32")
         },
         "ld2_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), Some(v(i(64), 1)))],
-            output: agg(false, vec![v(i(64), 1), v(i(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x1), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v1i64.p0v1i64")
         },
         "ld2_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), Some(v(u(64), 1)))],
-            output: agg(false, vec![v(u(64), 1), v(u(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x1), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v1i64.p0v1i64")
         },
         "ld2_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), Some(v(f(32), 2)))],
-            output: agg(false, vec![v(f(32), 2), v(f(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2f32.p0v2f32")
         },
         "ld2_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), Some(v(f(64), 1)))],
-            output: agg(false, vec![v(f(64), 1), v(f(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x1), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v1f64.p0v1f64")
         },
         "ld2q_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), Some(v(i(8), 16)))],
-            output: agg(false, vec![v(i(8), 16), v(i(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x16), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v16i8.p0v16i8")
         },
         "ld2q_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), Some(v(u(8), 16)))],
-            output: agg(false, vec![v(u(8), 16), v(u(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x16), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v16i8.p0v16i8")
         },
         "ld2q_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), Some(v(i(16), 8)))],
-            output: agg(false, vec![v(i(16), 8), v(i(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v8i16.p0v8i16")
         },
         "ld2q_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), Some(v(u(16), 8)))],
-            output: agg(false, vec![v(u(16), 8), v(u(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v8i16.p0v8i16")
         },
         "ld2q_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), Some(v(i(32), 4)))],
-            output: agg(false, vec![v(i(32), 4), v(i(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4i32.p0v4i32")
         },
         "ld2q_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), Some(v(u(32), 4)))],
-            output: agg(false, vec![v(u(32), 4), v(u(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4i32.p0v4i32")
         },
         "ld2q_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), Some(v(i(64), 2)))],
-            output: agg(false, vec![v(i(64), 2), v(i(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2i64.p0v2i64")
         },
         "ld2q_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), Some(v(u(64), 2)))],
-            output: agg(false, vec![v(u(64), 2), v(u(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2i64.p0v2i64")
         },
         "ld2q_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), Some(v(f(32), 4)))],
-            output: agg(false, vec![v(f(32), 4), v(f(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4f32.p0v4f32")
         },
         "ld2q_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), Some(v(f(64), 2)))],
-            output: agg(false, vec![v(f(64), 2), v(f(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2f64.p0v2f64")
         },
         "ld3_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), Some(v(i(8), 8)))],
-            output: agg(false, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v8i8.p0v8i8")
         },
         "ld3_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), Some(v(u(8), 8)))],
-            output: agg(false, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v8i8.p0v8i8")
         },
         "ld3_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), Some(v(i(16), 4)))],
-            output: agg(false, vec![v(i(16), 4), v(i(16), 4), v(i(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4i16.p0v4i16")
         },
         "ld3_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), Some(v(u(16), 4)))],
-            output: agg(false, vec![v(u(16), 4), v(u(16), 4), v(u(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4i16.p0v4i16")
         },
         "ld3_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), Some(v(i(32), 2)))],
-            output: agg(false, vec![v(i(32), 2), v(i(32), 2), v(i(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2i32.p0v2i32")
         },
         "ld3_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), Some(v(u(32), 2)))],
-            output: agg(false, vec![v(u(32), 2), v(u(32), 2), v(u(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2i32.p0v2i32")
         },
         "ld3_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), Some(v(i(64), 1)))],
-            output: agg(false, vec![v(i(64), 1), v(i(64), 1), v(i(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x1), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v1i64.p0v1i64")
         },
         "ld3_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), Some(v(u(64), 1)))],
-            output: agg(false, vec![v(u(64), 1), v(u(64), 1), v(u(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x1), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v1i64.p0v1i64")
         },
         "ld3_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), Some(v(f(32), 2)))],
-            output: agg(false, vec![v(f(32), 2), v(f(32), 2), v(f(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2f32.p0v2f32")
         },
         "ld3_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), Some(v(f(64), 1)))],
-            output: agg(false, vec![v(f(64), 1), v(f(64), 1), v(f(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x1), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v1f64.p0v1f64")
         },
         "ld3q_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), Some(v(i(8), 16)))],
-            output: agg(false, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x16), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v16i8.p0v16i8")
         },
         "ld3q_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), Some(v(u(8), 16)))],
-            output: agg(false, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x16), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v16i8.p0v16i8")
         },
         "ld3q_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), Some(v(i(16), 8)))],
-            output: agg(false, vec![v(i(16), 8), v(i(16), 8), v(i(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v8i16.p0v8i16")
         },
         "ld3q_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), Some(v(u(16), 8)))],
-            output: agg(false, vec![v(u(16), 8), v(u(16), 8), v(u(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v8i16.p0v8i16")
         },
         "ld3q_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), Some(v(i(32), 4)))],
-            output: agg(false, vec![v(i(32), 4), v(i(32), 4), v(i(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4i32.p0v4i32")
         },
         "ld3q_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), Some(v(u(32), 4)))],
-            output: agg(false, vec![v(u(32), 4), v(u(32), 4), v(u(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4i32.p0v4i32")
         },
         "ld3q_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), Some(v(i(64), 2)))],
-            output: agg(false, vec![v(i(64), 2), v(i(64), 2), v(i(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2i64.p0v2i64")
         },
         "ld3q_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), Some(v(u(64), 2)))],
-            output: agg(false, vec![v(u(64), 2), v(u(64), 2), v(u(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2i64.p0v2i64")
         },
         "ld3q_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), Some(v(f(32), 4)))],
-            output: agg(false, vec![v(f(32), 4), v(f(32), 4), v(f(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4f32.p0v4f32")
         },
         "ld3q_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), Some(v(f(64), 2)))],
-            output: agg(false, vec![v(f(64), 2), v(f(64), 2), v(f(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2f64.p0v2f64")
         },
         "ld4_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), Some(v(i(8), 8)))],
-            output: agg(false, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8), v(i(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v8i8.p0v8i8")
         },
         "ld4_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), Some(v(u(8), 8)))],
-            output: agg(false, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8), v(u(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v8i8.p0v8i8")
         },
         "ld4_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), Some(v(i(16), 4)))],
-            output: agg(false, vec![v(i(16), 4), v(i(16), 4), v(i(16), 4), v(i(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x4, &::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4i16.p0v4i16")
         },
         "ld4_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), Some(v(u(16), 4)))],
-            output: agg(false, vec![v(u(16), 4), v(u(16), 4), v(u(16), 4), v(u(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x4, &::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4i16.p0v4i16")
         },
         "ld4_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), Some(v(i(32), 2)))],
-            output: agg(false, vec![v(i(32), 2), v(i(32), 2), v(i(32), 2), v(i(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x2, &::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2i32.p0v2i32")
         },
         "ld4_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), Some(v(u(32), 2)))],
-            output: agg(false, vec![v(u(32), 2), v(u(32), 2), v(u(32), 2), v(u(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x2, &::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2i32.p0v2i32")
         },
         "ld4_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), Some(v(i(64), 1)))],
-            output: agg(false, vec![v(i(64), 1), v(i(64), 1), v(i(64), 1), v(i(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x1), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x1, &::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v1i64.p0v1i64")
         },
         "ld4_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), Some(v(u(64), 1)))],
-            output: agg(false, vec![v(u(64), 1), v(u(64), 1), v(u(64), 1), v(u(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x1), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x1, &::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v1i64.p0v1i64")
         },
         "ld4_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), Some(v(f(32), 2)))],
-            output: agg(false, vec![v(f(32), 2), v(f(32), 2), v(f(32), 2), v(f(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x2, &::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2f32.p0v2f32")
         },
         "ld4_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), Some(v(f(64), 1)))],
-            output: agg(false, vec![v(f(64), 1), v(f(64), 1), v(f(64), 1), v(f(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x1), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x1, &::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v1f64.p0v1f64")
         },
         "ld4q_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), Some(v(i(8), 16)))],
-            output: agg(false, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x16), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v16i8.p0v16i8")
         },
         "ld4q_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), Some(v(u(8), 16)))],
-            output: agg(false, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x16), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v16i8.p0v16i8")
         },
         "ld4q_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), Some(v(i(16), 8)))],
-            output: agg(false, vec![v(i(16), 8), v(i(16), 8), v(i(16), 8), v(i(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x8, &::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v8i16.p0v8i16")
         },
         "ld4q_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), Some(v(u(16), 8)))],
-            output: agg(false, vec![v(u(16), 8), v(u(16), 8), v(u(16), 8), v(u(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x8), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x8, &::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v8i16.p0v8i16")
         },
         "ld4q_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), Some(v(i(32), 4)))],
-            output: agg(false, vec![v(i(32), 4), v(i(32), 4), v(i(32), 4), v(i(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x4, &::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4i32.p0v4i32")
         },
         "ld4q_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), Some(v(u(32), 4)))],
-            output: agg(false, vec![v(u(32), 4), v(u(32), 4), v(u(32), 4), v(u(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x4, &::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4i32.p0v4i32")
         },
         "ld4q_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), Some(v(i(64), 2)))],
-            output: agg(false, vec![v(i(64), 2), v(i(64), 2), v(i(64), 2), v(i(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x2, &::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2i64.p0v2i64")
         },
         "ld4q_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), Some(v(u(64), 2)))],
-            output: agg(false, vec![v(u(64), 2), v(u(64), 2), v(u(64), 2), v(u(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x2, &::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2i64.p0v2i64")
         },
         "ld4q_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), Some(v(f(32), 4)))],
-            output: agg(false, vec![v(f(32), 4), v(f(32), 4), v(f(32), 4), v(f(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x4), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x4, &::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4f32.p0v4f32")
         },
         "ld4q_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), Some(v(f(64), 2)))],
-            output: agg(false, vec![v(f(64), 2), v(f(64), 2), v(f(64), 2), v(f(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x2), true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x2, &::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2f64.p0v2f64")
         },
         "ld2_dup_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), None)],
-            output: agg(false, vec![v(i(8), 8), v(i(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v8i8.p0i8")
         },
         "ld2_dup_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), None)],
-            output: agg(false, vec![v(u(8), 8), v(u(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v8i8.p0i8")
         },
         "ld2_dup_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), None)],
-            output: agg(false, vec![v(i(16), 4), v(i(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4i16.p0i16")
         },
         "ld2_dup_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), None)],
-            output: agg(false, vec![v(u(16), 4), v(u(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4i16.p0i16")
         },
         "ld2_dup_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), None)],
-            output: agg(false, vec![v(i(32), 2), v(i(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2i32.p0i32")
         },
         "ld2_dup_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), None)],
-            output: agg(false, vec![v(u(32), 2), v(u(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2i32.p0i32")
         },
         "ld2_dup_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), None)],
-            output: agg(false, vec![v(i(64), 1), v(i(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v1i64.p0i64")
         },
         "ld2_dup_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), None)],
-            output: agg(false, vec![v(u(64), 1), v(u(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v1i64.p0i64")
         },
         "ld2_dup_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), None)],
-            output: agg(false, vec![v(f(32), 2), v(f(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2f32.p0f32")
         },
         "ld2_dup_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), None)],
-            output: agg(false, vec![v(f(64), 1), v(f(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v1f64.p0f64")
         },
         "ld2q_dup_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), None)],
-            output: agg(false, vec![v(i(8), 16), v(i(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v16i8.p0i8")
         },
         "ld2q_dup_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), None)],
-            output: agg(false, vec![v(u(8), 16), v(u(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v16i8.p0i8")
         },
         "ld2q_dup_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), None)],
-            output: agg(false, vec![v(i(16), 8), v(i(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v8i16.p0i16")
         },
         "ld2q_dup_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), None)],
-            output: agg(false, vec![v(u(16), 8), v(u(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v8i16.p0i16")
         },
         "ld2q_dup_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), None)],
-            output: agg(false, vec![v(i(32), 4), v(i(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4i32.p0i32")
         },
         "ld2q_dup_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), None)],
-            output: agg(false, vec![v(u(32), 4), v(u(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4i32.p0i32")
         },
         "ld2q_dup_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), None)],
-            output: agg(false, vec![v(i(64), 2), v(i(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2i64.p0i64")
         },
         "ld2q_dup_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), None)],
-            output: agg(false, vec![v(u(64), 2), v(u(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2i64.p0i64")
         },
         "ld2q_dup_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), None)],
-            output: agg(false, vec![v(f(32), 4), v(f(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v4f32.p0f32")
         },
         "ld2q_dup_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), None)],
-            output: agg(false, vec![v(f(64), 2), v(f(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld2.v2f64.p0f64")
         },
         "ld3_dup_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), None)],
-            output: agg(false, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v8i8.p0i8")
         },
         "ld3_dup_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), None)],
-            output: agg(false, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v8i8.p0i8")
         },
         "ld3_dup_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), None)],
-            output: agg(false, vec![v(i(16), 4), v(i(16), 4), v(i(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4i16.p0i16")
         },
         "ld3_dup_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), None)],
-            output: agg(false, vec![v(u(16), 4), v(u(16), 4), v(u(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4i16.p0i16")
         },
         "ld3_dup_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), None)],
-            output: agg(false, vec![v(i(32), 2), v(i(32), 2), v(i(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2i32.p0i32")
         },
         "ld3_dup_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), None)],
-            output: agg(false, vec![v(u(32), 2), v(u(32), 2), v(u(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2i32.p0i32")
         },
         "ld3_dup_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), None)],
-            output: agg(false, vec![v(i(64), 1), v(i(64), 1), v(i(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v1i64.p0i64")
         },
         "ld3_dup_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), None)],
-            output: agg(false, vec![v(u(64), 1), v(u(64), 1), v(u(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v1i64.p0i64")
         },
         "ld3_dup_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), None)],
-            output: agg(false, vec![v(f(32), 2), v(f(32), 2), v(f(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2f32.p0f32")
         },
         "ld3_dup_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), None)],
-            output: agg(false, vec![v(f(64), 1), v(f(64), 1), v(f(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v1f64.p0f64")
         },
         "ld3q_dup_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), None)],
-            output: agg(false, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v16i8.p0i8")
         },
         "ld3q_dup_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), None)],
-            output: agg(false, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v16i8.p0i8")
         },
         "ld3q_dup_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), None)],
-            output: agg(false, vec![v(i(16), 8), v(i(16), 8), v(i(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v8i16.p0i16")
         },
         "ld3q_dup_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), None)],
-            output: agg(false, vec![v(u(16), 8), v(u(16), 8), v(u(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v8i16.p0i16")
         },
         "ld3q_dup_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), None)],
-            output: agg(false, vec![v(i(32), 4), v(i(32), 4), v(i(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4i32.p0i32")
         },
         "ld3q_dup_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), None)],
-            output: agg(false, vec![v(u(32), 4), v(u(32), 4), v(u(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4i32.p0i32")
         },
         "ld3q_dup_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), None)],
-            output: agg(false, vec![v(i(64), 2), v(i(64), 2), v(i(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2i64.p0i64")
         },
         "ld3q_dup_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), None)],
-            output: agg(false, vec![v(u(64), 2), v(u(64), 2), v(u(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2i64.p0i64")
         },
         "ld3q_dup_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), None)],
-            output: agg(false, vec![v(f(32), 4), v(f(32), 4), v(f(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v4f32.p0f32")
         },
         "ld3q_dup_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), None)],
-            output: agg(false, vec![v(f(64), 2), v(f(64), 2), v(f(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld3.v2f64.p0f64")
         },
         "ld4_dup_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), None)],
-            output: agg(false, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8), v(i(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v8i8.p0i8")
         },
         "ld4_dup_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), None)],
-            output: agg(false, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8), v(u(8), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v8i8.p0i8")
         },
         "ld4_dup_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), None)],
-            output: agg(false, vec![v(i(16), 4), v(i(16), 4), v(i(16), 4), v(i(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x4, &::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4i16.p0i16")
         },
         "ld4_dup_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), None)],
-            output: agg(false, vec![v(u(16), 4), v(u(16), 4), v(u(16), 4), v(u(16), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x4, &::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4i16.p0i16")
         },
         "ld4_dup_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), None)],
-            output: agg(false, vec![v(i(32), 2), v(i(32), 2), v(i(32), 2), v(i(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x2, &::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2i32.p0i32")
         },
         "ld4_dup_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), None)],
-            output: agg(false, vec![v(u(32), 2), v(u(32), 2), v(u(32), 2), v(u(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x2, &::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2i32.p0i32")
         },
         "ld4_dup_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), None)],
-            output: agg(false, vec![v(i(64), 1), v(i(64), 1), v(i(64), 1), v(i(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x1, &::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v1i64.p0i64")
         },
         "ld4_dup_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), None)],
-            output: agg(false, vec![v(u(64), 1), v(u(64), 1), v(u(64), 1), v(u(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x1, &::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v1i64.p0i64")
         },
         "ld4_dup_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), None)],
-            output: agg(false, vec![v(f(32), 2), v(f(32), 2), v(f(32), 2), v(f(32), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x2, &::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2f32.p0f32")
         },
         "ld4_dup_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), None)],
-            output: agg(false, vec![v(f(64), 1), v(f(64), 1), v(f(64), 1), v(f(64), 1)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x1, &::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v1f64.p0f64")
         },
         "ld4q_dup_s8" => Intrinsic {
-            inputs: vec![p(true, i(8), None)],
-            output: agg(false, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v16i8.p0i8")
         },
         "ld4q_dup_u8" => Intrinsic {
-            inputs: vec![p(true, u(8), None)],
-            output: agg(false, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v16i8.p0i8")
         },
         "ld4q_dup_s16" => Intrinsic {
-            inputs: vec![p(true, i(16), None)],
-            output: agg(false, vec![v(i(16), 8), v(i(16), 8), v(i(16), 8), v(i(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x8, &::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v8i16.p0i16")
         },
         "ld4q_dup_u16" => Intrinsic {
-            inputs: vec![p(true, u(16), None)],
-            output: agg(false, vec![v(u(16), 8), v(u(16), 8), v(u(16), 8), v(u(16), 8)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x8, &::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v8i16.p0i16")
         },
         "ld4q_dup_s32" => Intrinsic {
-            inputs: vec![p(true, i(32), None)],
-            output: agg(false, vec![v(i(32), 4), v(i(32), 4), v(i(32), 4), v(i(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x4, &::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4i32.p0i32")
         },
         "ld4q_dup_u32" => Intrinsic {
-            inputs: vec![p(true, u(32), None)],
-            output: agg(false, vec![v(u(32), 4), v(u(32), 4), v(u(32), 4), v(u(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x4, &::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4i32.p0i32")
         },
         "ld4q_dup_s64" => Intrinsic {
-            inputs: vec![p(true, i(64), None)],
-            output: agg(false, vec![v(i(64), 2), v(i(64), 2), v(i(64), 2), v(i(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x2, &::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2i64.p0i64")
         },
         "ld4q_dup_u64" => Intrinsic {
-            inputs: vec![p(true, u(64), None)],
-            output: agg(false, vec![v(u(64), 2), v(u(64), 2), v(u(64), 2), v(u(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x2, &::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2i64.p0i64")
         },
         "ld4q_dup_f32" => Intrinsic {
-            inputs: vec![p(true, f(32), None)],
-            output: agg(false, vec![v(f(32), 4), v(f(32), 4), v(f(32), 4), v(f(32), 4)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x4, &::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v4f32.p0f32")
         },
         "ld4q_dup_f64" => Intrinsic {
-            inputs: vec![p(true, f(64), None)],
-            output: agg(false, vec![v(f(64), 2), v(f(64), 2), v(f(64), 2), v(f(64), 2)]),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS },
+            output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x2, &::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG },
             definition: Named("llvm.aarch64.neon.ld4.v2f64.p0f64")
         },
         "padd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.addp.v8i8")
         },
         "padd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.addp.v8i8")
         },
         "padd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.addp.v4i16")
         },
         "padd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.addp.v4i16")
         },
         "padd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.addp.v2i32")
         },
         "padd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.addp.v2i32")
         },
         "padd_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.addp.v2f32")
         },
         "paddq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.addp.v16i8")
         },
         "paddq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.addp.v16i8")
         },
         "paddq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.addp.v8i16")
         },
         "paddq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.addp.v8i16")
         },
         "paddq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.addp.v4i32")
         },
         "paddq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.addp.v4i32")
         },
         "paddq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.addp.v4f32")
         },
         "paddq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.addp.v2i64")
         },
         "paddq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.addp.v2i64")
         },
         "paddq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.addp.v2f64")
         },
         "paddl_s16" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.saddlp.v4i16.v8i8")
         },
         "paddl_u16" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uaddlp.v4i16.v8i8")
         },
         "paddl_s32" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.saddlp.v2i32.v4i16")
         },
         "paddl_u32" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uaddlp.v2i32.v4i16")
         },
         "paddl_s64" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.aarch64.neon.saddlp.v1i64.v2i32")
         },
         "paddl_u64" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.aarch64.neon.uaddlp.v1i64.v2i32")
         },
         "paddlq_s16" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.saddlp.v8i16.v16i8")
         },
         "paddlq_u16" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.uaddlp.v8i16.v16i8")
         },
         "paddlq_s32" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.saddlp.v4i32.v8i16")
         },
         "paddlq_u32" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.uaddlp.v4i32.v8i16")
         },
         "paddlq_s64" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.saddlp.v2i64.v4i32")
         },
         "paddlq_u64" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.uaddlp.v2i64.v4i32")
         },
         "pmax_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.smaxp.v8i8")
         },
         "pmax_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.umaxp.v8i8")
         },
         "pmax_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.smaxp.v4i16")
         },
         "pmax_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.umaxp.v4i16")
         },
         "pmax_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.smaxp.v2i32")
         },
         "pmax_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.umaxp.v2i32")
         },
         "pmax_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fmaxp.v2f32")
         },
         "pmaxq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.smaxp.v16i8")
         },
         "pmaxq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.umaxp.v16i8")
         },
         "pmaxq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.smaxp.v8i16")
         },
         "pmaxq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.umaxp.v8i16")
         },
         "pmaxq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.smaxp.v4i32")
         },
         "pmaxq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.umaxp.v4i32")
         },
         "pmaxq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fmaxp.v4f32")
         },
         "pmaxq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.smaxp.v2i64")
         },
         "pmaxq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.umaxp.v2i64")
         },
         "pmaxq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fmaxp.v2f64")
         },
         "pmin_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.sminp.v8i8")
         },
         "pmin_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.uminp.v8i8")
         },
         "pmin_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.sminp.v4i16")
         },
         "pmin_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.uminp.v4i16")
         },
         "pmin_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.sminp.v2i32")
         },
         "pmin_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.uminp.v2i32")
         },
         "pmin_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fminp.v2f32")
         },
         "pminq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.sminp.v16i8")
         },
         "pminq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.uminp.v16i8")
         },
         "pminq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.sminp.v8i16")
         },
         "pminq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.uminp.v8i16")
         },
         "pminq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.sminp.v4i32")
         },
         "pminq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.uminp.v4i32")
         },
         "pminq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fminp.v4f32")
         },
         "pminq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.sminp.v2i64")
         },
         "pminq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.uminp.v2i64")
         },
         "pminq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fminp.v2f64")
         },
         "pmaxnm_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.smaxnmp.v8i8")
         },
         "pmaxnm_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.umaxnmp.v8i8")
         },
         "pmaxnm_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.aarch64.neon.smaxnmp.v4i16")
         },
         "pmaxnm_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.aarch64.neon.umaxnmp.v4i16")
         },
         "pmaxnm_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.aarch64.neon.smaxnmp.v2i32")
         },
         "pmaxnm_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.aarch64.neon.umaxnmp.v2i32")
         },
         "pmaxnm_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fmaxnmp.v2f32")
         },
         "pmaxnmq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.smaxnmp.v16i8")
         },
         "pmaxnmq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.umaxnmp.v16i8")
         },
         "pmaxnmq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.aarch64.neon.smaxnmp.v8i16")
         },
         "pmaxnmq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.aarch64.neon.umaxnmp.v8i16")
         },
         "pmaxnmq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.aarch64.neon.smaxnmp.v4i32")
         },
         "pmaxnmq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.aarch64.neon.umaxnmp.v4i32")
         },
         "pmaxnmq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fmaxnmp.v4f32")
         },
         "pmaxnmq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.aarch64.neon.smaxnmp.v2i64")
         },
         "pmaxnmq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.aarch64.neon.umaxnmp.v2i64")
         },
         "pmaxnmq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fmaxnmp.v2f64")
         },
         "pminnm_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.aarch64.neon.fminnmp.v2f32")
         },
         "pminnmq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.aarch64.neon.fminnmp.v4f32")
         },
         "pminnmq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.aarch64.neon.fminnmp.v2f64")
         },
         "addv_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: i(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8,
             definition: Named("llvm.aarch64.neon.saddv.i8.v8i8")
         },
         "addv_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: u(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8,
             definition: Named("llvm.aarch64.neon.uaddv.i8.v8i8")
         },
         "addv_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: i(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16,
             definition: Named("llvm.aarch64.neon.saddv.i16.v4i16")
         },
         "addv_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: u(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U16,
             definition: Named("llvm.aarch64.neon.uaddv.i16.v4i16")
         },
         "addv_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.aarch64.neon.saddv.i32.v2i32")
         },
         "addv_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: u(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32,
             definition: Named("llvm.aarch64.neon.uaddv.i32.v2i32")
         },
         "addv_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.faddv.f32.v2f32")
         },
         "addvq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: i(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8,
             definition: Named("llvm.aarch64.neon.saddv.i8.v16i8")
         },
         "addvq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: u(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8,
             definition: Named("llvm.aarch64.neon.uaddv.i8.v16i8")
         },
         "addvq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: i(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16,
             definition: Named("llvm.aarch64.neon.saddv.i16.v8i16")
         },
         "addvq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: u(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U16,
             definition: Named("llvm.aarch64.neon.uaddv.i16.v8i16")
         },
         "addvq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.aarch64.neon.saddv.i32.v4i32")
         },
         "addvq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: u(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32,
             definition: Named("llvm.aarch64.neon.uaddv.i32.v4i32")
         },
         "addvq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.faddv.f32.v4f32")
         },
         "addvq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2)],
-            output: i(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS },
+            output: &::I64,
             definition: Named("llvm.aarch64.neon.saddv.i64.v2i64")
         },
         "addvq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2)],
-            output: u(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS },
+            output: &::U64,
             definition: Named("llvm.aarch64.neon.uaddv.i64.v2i64")
         },
         "addvq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: f(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64,
             definition: Named("llvm.aarch64.neon.faddv.f64.v2f64")
         },
         "addlv_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: i(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I16,
             definition: Named("llvm.aarch64.neon.saddlv.i16.v8i8")
         },
         "addlv_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: u(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U16,
             definition: Named("llvm.aarch64.neon.uaddlv.i16.v8i8")
         },
         "addlv_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.aarch64.neon.saddlv.i32.v4i16")
         },
         "addlv_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: u(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U32,
             definition: Named("llvm.aarch64.neon.uaddlv.i32.v4i16")
         },
         "addlv_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: i(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I64,
             definition: Named("llvm.aarch64.neon.saddlv.i64.v2i32")
         },
         "addlv_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: u(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U64,
             definition: Named("llvm.aarch64.neon.uaddlv.i64.v2i32")
         },
         "addlvq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: i(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I16,
             definition: Named("llvm.aarch64.neon.saddlv.i16.v16i8")
         },
         "addlvq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: u(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U16,
             definition: Named("llvm.aarch64.neon.uaddlv.i16.v16i8")
         },
         "addlvq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.aarch64.neon.saddlv.i32.v8i16")
         },
         "addlvq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: u(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U32,
             definition: Named("llvm.aarch64.neon.uaddlv.i32.v8i16")
         },
         "addlvq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: i(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I64,
             definition: Named("llvm.aarch64.neon.saddlv.i64.v4i32")
         },
         "addlvq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: u(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U64,
             definition: Named("llvm.aarch64.neon.uaddlv.i64.v4i32")
         },
         "maxv_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: i(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8,
             definition: Named("llvm.aarch64.neon.smaxv.i8.v8i8")
         },
         "maxv_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: u(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8,
             definition: Named("llvm.aarch64.neon.umaxv.i8.v8i8")
         },
         "maxv_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: i(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16,
             definition: Named("llvm.aarch64.neon.smaxv.i16.v4i16")
         },
         "maxv_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: u(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U16,
             definition: Named("llvm.aarch64.neon.umaxv.i16.v4i16")
         },
         "maxv_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.aarch64.neon.smaxv.i32.v2i32")
         },
         "maxv_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: u(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32,
             definition: Named("llvm.aarch64.neon.umaxv.i32.v2i32")
         },
         "maxv_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.fmaxv.f32.v2f32")
         },
         "maxvq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: i(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8,
             definition: Named("llvm.aarch64.neon.smaxv.i8.v16i8")
         },
         "maxvq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: u(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8,
             definition: Named("llvm.aarch64.neon.umaxv.i8.v16i8")
         },
         "maxvq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: i(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16,
             definition: Named("llvm.aarch64.neon.smaxv.i16.v8i16")
         },
         "maxvq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: u(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U16,
             definition: Named("llvm.aarch64.neon.umaxv.i16.v8i16")
         },
         "maxvq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.aarch64.neon.smaxv.i32.v4i32")
         },
         "maxvq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: u(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32,
             definition: Named("llvm.aarch64.neon.umaxv.i32.v4i32")
         },
         "maxvq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.fmaxv.f32.v4f32")
         },
         "maxvq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: f(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64,
             definition: Named("llvm.aarch64.neon.fmaxv.f64.v2f64")
         },
         "minv_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: i(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8,
             definition: Named("llvm.aarch64.neon.sminv.i8.v8i8")
         },
         "minv_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: u(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8,
             definition: Named("llvm.aarch64.neon.uminv.i8.v8i8")
         },
         "minv_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: i(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16,
             definition: Named("llvm.aarch64.neon.sminv.i16.v4i16")
         },
         "minv_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: u(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U16,
             definition: Named("llvm.aarch64.neon.uminv.i16.v4i16")
         },
         "minv_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.aarch64.neon.sminv.i32.v2i32")
         },
         "minv_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: u(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32,
             definition: Named("llvm.aarch64.neon.uminv.i32.v2i32")
         },
         "minv_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.fminv.f32.v2f32")
         },
         "minvq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: i(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8,
             definition: Named("llvm.aarch64.neon.sminv.i8.v16i8")
         },
         "minvq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: u(8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8,
             definition: Named("llvm.aarch64.neon.uminv.i8.v16i8")
         },
         "minvq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: i(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16,
             definition: Named("llvm.aarch64.neon.sminv.i16.v8i16")
         },
         "minvq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: u(16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U16,
             definition: Named("llvm.aarch64.neon.uminv.i16.v8i16")
         },
         "minvq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.aarch64.neon.sminv.i32.v4i32")
         },
         "minvq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: u(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32,
             definition: Named("llvm.aarch64.neon.uminv.i32.v4i32")
         },
         "minvq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.fminv.f32.v4f32")
         },
         "minvq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: f(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64,
             definition: Named("llvm.aarch64.neon.fminv.f64.v2f64")
         },
         "maxnmv_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.fmaxnmv.f32.v2f32")
         },
         "maxnmvq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.fmaxnmv.f32.v4f32")
         },
         "maxnmvq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: f(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64,
             definition: Named("llvm.aarch64.neon.fmaxnmv.f64.v2f64")
         },
         "minnmv_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.fminnmv.f32.v2f32")
         },
         "minnmvq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: f(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32,
             definition: Named("llvm.aarch64.neon.fminnmv.f32.v4f32")
         },
         "minnmvq_f64" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: f(64),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64,
             definition: Named("llvm.aarch64.neon.fminnmv.f64.v2f64")
         },
         "qtbl1_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.tbl1.v8i8")
         },
         "qtbl1_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.tbl1.v8i8")
         },
         "qtbl1q_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(u(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::U8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.tbl1.v16i8")
         },
         "qtbl1q_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.tbl1.v16i8")
         },
         "qtbx1_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 16), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, &::I8x16, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.tbx1.v8i8")
         },
         "qtbx1_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 16), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, &::U8x16, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.tbx1.v8i8")
         },
         "qtbx1q_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16), v(u(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::U8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.tbx1.v16i8")
         },
         "qtbx1q_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.tbx1.v16i8")
         },
         "qtbl2_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.tbl2.v8i8")
         },
         "qtbl2_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.tbl2.v8i8")
         },
         "qtbl2q_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16)]), v(u(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.tbl2.v16i8")
         },
         "qtbl2q_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16)]), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.tbl2.v16i8")
         },
         "qtbx2_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.tbx2.v8i8")
         },
         "qtbx2_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.tbx2.v8i8")
         },
         "qtbx2q_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16)]), v(u(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.tbx2.v16i8")
         },
         "qtbx2q_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16)]), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.tbx2.v16i8")
         },
         "qtbl3_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.tbl3.v8i8")
         },
         "qtbl3_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.tbl3.v8i8")
         },
         "qtbl3q_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.tbl3.v16i8")
         },
         "qtbl3q_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.tbl3.v16i8")
         },
         "qtbx3_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.tbx3.v8i8")
         },
         "qtbx3_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.tbx3.v8i8")
         },
         "qtbx3q_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.tbx3.v16i8")
         },
         "qtbx3q_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.tbx3.v16i8")
         },
         "qtbl4_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.tbl4.v8i8")
         },
         "qtbl4_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.tbl4.v8i8")
         },
         "qtbl4q_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.tbl4.v16i8")
         },
         "qtbl4q_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.tbl4.v16i8")
         },
         "qtbx4_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.aarch64.neon.tbx4.v8i8")
         },
         "qtbx4_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.aarch64.neon.tbx4.v8i8")
         },
         "qtbx4q_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.aarch64.neon.tbx4.v16i8")
         },
         "qtbx4q_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.aarch64.neon.tbx4.v16i8")
         },
         _ => return None,
index 166bf66d819c77c2eb8f5608a0a4c112269d95d7..834528aaaa314ccf0f3c5eb697e65bbc431c11d7 100644 (file)
 
 #![allow(unused_imports)]
 
-use {Intrinsic, i, i_, u, u_, f, v, v_, agg, p, void};
+use {Intrinsic, Type};
 use IntrinsicDef::Named;
-use rustc::middle::ty;
 
 // The default inlining settings trigger a pathological behaviour in
 // LLVM, which causes makes compilation very slow. See #28273.
 #[inline(never)]
-pub fn find<'tcx>(_tcx: &ty::ctxt<'tcx>, name: &str) -> Option<Intrinsic> {
+pub fn find(name: &str) -> Option<Intrinsic> {
     if !name.starts_with("arm_v") { return None }
     Some(match &name["arm_v".len()..] {
         "hadd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vhadds.v8i8")
         },
         "hadd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vhaddu.v8i8")
         },
         "hadd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vhadds.v4i16")
         },
         "hadd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vhaddu.v4i16")
         },
         "hadd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vhadds.v2i32")
         },
         "hadd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vhaddu.v2i32")
         },
         "haddq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vhadds.v16i8")
         },
         "haddq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vhaddu.v16i8")
         },
         "haddq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vhadds.v8i16")
         },
         "haddq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vhaddu.v8i16")
         },
         "haddq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vhadds.v4i32")
         },
         "haddq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vhaddu.v4i32")
         },
         "rhadd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vrhadds.v8i8")
         },
         "rhadd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vrhaddu.v8i8")
         },
         "rhadd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vrhadds.v4i16")
         },
         "rhadd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vrhaddu.v4i16")
         },
         "rhadd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vrhadds.v2i32")
         },
         "rhadd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vrhaddu.v2i32")
         },
         "rhaddq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vrhadds.v16i8")
         },
         "rhaddq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vrhaddu.v16i8")
         },
         "rhaddq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vrhadds.v8i16")
         },
         "rhaddq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vrhaddu.v8i16")
         },
         "rhaddq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vrhadds.v4i32")
         },
         "rhaddq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vrhaddu.v4i32")
         },
         "qadd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vqadds.v8i8")
         },
         "qadd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vqaddu.v8i8")
         },
         "qadd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vqadds.v4i16")
         },
         "qadd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vqaddu.v4i16")
         },
         "qadd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vqadds.v2i32")
         },
         "qadd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vqaddu.v2i32")
         },
         "qadd_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vqadds.v1i64")
         },
         "qadd_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vqaddu.v1i64")
         },
         "qaddq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vqadds.v16i8")
         },
         "qaddq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vqaddu.v16i8")
         },
         "qaddq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vqadds.v8i16")
         },
         "qaddq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vqaddu.v8i16")
         },
         "qaddq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vqadds.v4i32")
         },
         "qaddq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vqaddu.v4i32")
         },
         "qaddq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vqadds.v2i64")
         },
         "qaddq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vqaddu.v2i64")
         },
         "raddhn_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vraddhn.v8i8")
         },
         "raddhn_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vraddhn.v8i8")
         },
         "raddhn_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vraddhn.v4i16")
         },
         "raddhn_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vraddhn.v4i16")
         },
         "raddhn_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vraddhn.v2i32")
         },
         "raddhn_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vraddhn.v2i32")
         },
         "fma_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.fma.v2f32")
         },
         "fmaq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.fma.v4f32")
         },
         "qdmulh_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vsqdmulh.v4i16")
         },
         "qdmulh_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vsqdmulh.v2i32")
         },
         "qdmulhq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vsqdmulh.v8i16")
         },
         "qdmulhq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vsqdmulh.v4i32")
         },
         "qrdmulh_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vsqrdmulh.v4i16")
         },
         "qrdmulh_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vsqrdmulh.v2i32")
         },
         "qrdmulhq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vsqrdmulh.v8i16")
         },
         "qrdmulhq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vsqrdmulh.v4i32")
         },
         "mull_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vmulls.v8i16")
         },
         "mull_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vmullu.v8i16")
         },
         "mull_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vmulls.v4i32")
         },
         "mull_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vmullu.v4i32")
         },
         "mull_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vmulls.v2i64")
         },
         "mull_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vmullu.v2i64")
         },
         "qdmullq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vsqdmull.v8i16")
         },
         "qdmullq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vsqdmull.v4i32")
         },
         "hsub_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vhsubs.v8i8")
         },
         "hsub_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vhsubu.v8i8")
         },
         "hsub_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vhsubs.v4i16")
         },
         "hsub_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vhsubu.v4i16")
         },
         "hsub_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vhsubs.v2i32")
         },
         "hsub_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vhsubu.v2i32")
         },
         "hsubq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vhsubs.v16i8")
         },
         "hsubq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vhsubu.v16i8")
         },
         "hsubq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vhsubs.v8i16")
         },
         "hsubq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vhsubu.v8i16")
         },
         "hsubq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vhsubs.v4i32")
         },
         "hsubq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vhsubu.v4i32")
         },
         "qsub_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vqsubs.v8i8")
         },
         "qsub_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vqsubu.v8i8")
         },
         "qsub_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vqsubs.v4i16")
         },
         "qsub_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vqsubu.v4i16")
         },
         "qsub_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vqsubs.v2i32")
         },
         "qsub_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vqsubu.v2i32")
         },
         "qsub_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vqsubs.v1i64")
         },
         "qsub_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vqsubu.v1i64")
         },
         "qsubq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vqsubs.v16i8")
         },
         "qsubq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vqsubu.v16i8")
         },
         "qsubq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vqsubs.v8i16")
         },
         "qsubq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vqsubu.v8i16")
         },
         "qsubq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vqsubs.v4i32")
         },
         "qsubq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vqsubu.v4i32")
         },
         "qsubq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vqsubs.v2i64")
         },
         "qsubq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vqsubu.v2i64")
         },
         "rsubhn_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vrsubhn.v8i8")
         },
         "rsubhn_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vrsubhn.v8i8")
         },
         "rsubhn_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vrsubhn.v4i16")
         },
         "rsubhn_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vrsubhn.v4i16")
         },
         "rsubhn_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vrsubhn.v2i32")
         },
         "rsubhn_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vrsubhn.v2i32")
         },
         "abd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vabds.v8i8")
         },
         "abd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vabdu.v8i8")
         },
         "abd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vabds.v4i16")
         },
         "abd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vabdu.v4i16")
         },
         "abd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vabds.v2i32")
         },
         "abd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vabdu.v2i32")
         },
         "abd_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vabdf.v2f32")
         },
         "abdq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vabds.v16i8")
         },
         "abdq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vabdu.v16i8")
         },
         "abdq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vabds.v8i16")
         },
         "abdq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vabdu.v8i16")
         },
         "abdq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vabds.v4i32")
         },
         "abdq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vabdu.v4i32")
         },
         "abdq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.neon.vabdf.v4f32")
         },
         "max_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vmaxs.v8i8")
         },
         "max_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vmaxu.v8i8")
         },
         "max_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vmaxs.v4i16")
         },
         "max_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vmaxu.v4i16")
         },
         "max_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vmaxs.v2i32")
         },
         "max_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vmaxu.v2i32")
         },
         "max_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vmaxf.v2f32")
         },
         "maxq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vmaxs.v16i8")
         },
         "maxq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vmaxu.v16i8")
         },
         "maxq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vmaxs.v8i16")
         },
         "maxq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vmaxu.v8i16")
         },
         "maxq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vmaxs.v4i32")
         },
         "maxq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vmaxu.v4i32")
         },
         "maxq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.neon.vmaxf.v4f32")
         },
         "min_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vmins.v8i8")
         },
         "min_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vminu.v8i8")
         },
         "min_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vmins.v4i16")
         },
         "min_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vminu.v4i16")
         },
         "min_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vmins.v2i32")
         },
         "min_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vminu.v2i32")
         },
         "min_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vminf.v2f32")
         },
         "minq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vmins.v16i8")
         },
         "minq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vminu.v16i8")
         },
         "minq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vmins.v8i16")
         },
         "minq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vminu.v8i16")
         },
         "minq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vmins.v4i32")
         },
         "minq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vminu.v4i32")
         },
         "minq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.neon.vminf.v4f32")
         },
         "shl_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vshls.v8i8")
         },
         "shl_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(i(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vshlu.v8i8")
         },
         "shl_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vshls.v4i16")
         },
         "shl_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(i(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vshlu.v4i16")
         },
         "shl_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vshls.v2i32")
         },
         "shl_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(i(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vshlu.v2i32")
         },
         "shl_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vshls.v1i64")
         },
         "shl_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(i(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vshlu.v1i64")
         },
         "shlq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vshls.v16i8")
         },
         "shlq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vshlu.v16i8")
         },
         "shlq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vshls.v8i16")
         },
         "shlq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vshlu.v8i16")
         },
         "shlq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vshls.v4i32")
         },
         "shlq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vshlu.v4i32")
         },
         "shlq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vshls.v2i64")
         },
         "shlq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vshlu.v2i64")
         },
         "qshl_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vqshls.v8i8")
         },
         "qshl_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(i(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vqshlu.v8i8")
         },
         "qshl_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vqshls.v4i16")
         },
         "qshl_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(i(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vqshlu.v4i16")
         },
         "qshl_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vqshls.v2i32")
         },
         "qshl_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(i(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vqshlu.v2i32")
         },
         "qshl_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vqshls.v1i64")
         },
         "qshl_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(i(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vqshlu.v1i64")
         },
         "qshlq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vqshls.v16i8")
         },
         "qshlq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vqshlu.v16i8")
         },
         "qshlq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vqshls.v8i16")
         },
         "qshlq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vqshlu.v8i16")
         },
         "qshlq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vqshls.v4i32")
         },
         "qshlq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vqshlu.v4i32")
         },
         "qshlq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vqshls.v2i64")
         },
         "qshlq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vqshlu.v2i64")
         },
         "rshl_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vrshls.v8i8")
         },
         "rshl_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(i(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vrshlu.v8i8")
         },
         "rshl_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vrshls.v4i16")
         },
         "rshl_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(i(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vrshlu.v4i16")
         },
         "rshl_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vrshls.v2i32")
         },
         "rshl_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(i(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vrshlu.v2i32")
         },
         "rshl_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vrshls.v1i64")
         },
         "rshl_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(i(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vrshlu.v1i64")
         },
         "rshlq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vrshls.v16i8")
         },
         "rshlq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vrshlu.v16i8")
         },
         "rshlq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vrshls.v8i16")
         },
         "rshlq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vrshlu.v8i16")
         },
         "rshlq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vrshls.v4i32")
         },
         "rshlq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vrshlu.v4i32")
         },
         "rshlq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vrshls.v2i64")
         },
         "rshlq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vrshlu.v2i64")
         },
         "qrshl_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vqrshls.v8i8")
         },
         "qrshl_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(i(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vqrshlu.v8i8")
         },
         "qrshl_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vqrshls.v4i16")
         },
         "qrshl_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(i(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vqrshlu.v4i16")
         },
         "qrshl_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vqrshls.v2i32")
         },
         "qrshl_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(i(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vqrshlu.v2i32")
         },
         "qrshl_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vqrshls.v1i64")
         },
         "qrshl_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(i(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vqrshlu.v1i64")
         },
         "qrshlq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vqrshls.v16i8")
         },
         "qrshlq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vqrshlu.v16i8")
         },
         "qrshlq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vqrshls.v8i16")
         },
         "qrshlq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vqrshlu.v8i16")
         },
         "qrshlq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vqrshls.v4i32")
         },
         "qrshlq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vqrshlu.v4i32")
         },
         "qrshlq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vqrshls.v2i64")
         },
         "qrshlq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vqrshlu.v2i64")
         },
         "qshrun_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vsqshrun.v8i8")
         },
         "qshrun_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vsqshrun.v4i16")
         },
         "qshrun_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vsqshrun.v2i32")
         },
         "qrshrun_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vsqrshrun.v8i8")
         },
         "qrshrun_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vsqrshrun.v4i16")
         },
         "qrshrun_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vsqrshrun.v2i32")
         },
         "qshrn_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vqshrns.v8i8")
         },
         "qshrn_n_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), u(32)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vqshrnu.v8i8")
         },
         "qshrn_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vqshrns.v4i16")
         },
         "qshrn_n_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), u(32)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vqshrnu.v4i16")
         },
         "qshrn_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vqshrns.v2i32")
         },
         "qshrn_n_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), u(32)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vqshrnu.v2i32")
         },
         "rshrn_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vrshrn.v8i8")
         },
         "rshrn_n_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), u(32)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vrshrn.v8i8")
         },
         "rshrn_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vrshrn.v4i16")
         },
         "rshrn_n_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), u(32)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vrshrn.v4i16")
         },
         "rshrn_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vrshrn.v2i32")
         },
         "rshrn_n_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), u(32)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vrshrn.v2i32")
         },
         "qrshrn_n_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), u(32)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vqrshrns.v8i8")
         },
         "qrshrn_n_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), u(32)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vqrshrnu.v8i8")
         },
         "qrshrn_n_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), u(32)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vqrshrns.v4i16")
         },
         "qrshrn_n_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), u(32)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vqrshrnu.v4i16")
         },
         "qrshrn_n_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), u(32)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vqrshrns.v2i32")
         },
         "qrshrn_n_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), u(32)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vqrshrnu.v2i32")
         },
         "sri_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vvsri.v8i8")
         },
         "sri_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vvsri.v8i8")
         },
         "sri_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vvsri.v4i16")
         },
         "sri_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vvsri.v4i16")
         },
         "sri_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vvsri.v2i32")
         },
         "sri_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vvsri.v2i32")
         },
         "sri_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vvsri.v1i64")
         },
         "sri_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vvsri.v1i64")
         },
         "sriq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vvsri.v16i8")
         },
         "sriq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vvsri.v16i8")
         },
         "sriq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vvsri.v8i16")
         },
         "sriq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vvsri.v8i16")
         },
         "sriq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vvsri.v4i32")
         },
         "sriq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vvsri.v4i32")
         },
         "sriq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vvsri.v2i64")
         },
         "sriq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vvsri.v2i64")
         },
         "sli_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vvsli.v8i8")
         },
         "sli_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vvsli.v8i8")
         },
         "sli_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vvsli.v4i16")
         },
         "sli_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vvsli.v4i16")
         },
         "sli_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vvsli.v2i32")
         },
         "sli_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vvsli.v2i32")
         },
         "sli_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vvsli.v1i64")
         },
         "sli_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vvsli.v1i64")
         },
         "sliq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vvsli.v16i8")
         },
         "sliq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vvsli.v16i8")
         },
         "sliq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vvsli.v8i16")
         },
         "sliq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vvsli.v8i16")
         },
         "sliq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vvsli.v4i32")
         },
         "sliq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vvsli.v4i32")
         },
         "sliq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vvsli.v2i64")
         },
         "sliq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vvsli.v2i64")
         },
         "vqmovn_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vqxtns.v8i8")
         },
         "vqmovn_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vqxtnu.v8i8")
         },
         "vqmovn_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vqxtns.v4i16")
         },
         "vqmovn_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vqxtnu.v4i16")
         },
         "vqmovn_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vqxtns.v2i32")
         },
         "vqmovn_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vqxtnu.v2i32")
         },
         "abs_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vabs.v8i8")
         },
         "abs_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vabs.v4i16")
         },
         "abs_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vabs.v2i32")
         },
         "absq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vabs.v16i8")
         },
         "absq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vabs.v8i16")
         },
         "absq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vabs.v4i32")
         },
         "abs_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.fabs.v2f32")
         },
         "absq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.fabs.v4f32")
         },
         "qabs_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vsqabs.v8i8")
         },
         "qabs_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vsqabs.v4i16")
         },
         "qabs_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vsqabs.v2i32")
         },
         "qabsq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vsqabs.v16i8")
         },
         "qabsq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vsqabs.v8i16")
         },
         "qabsq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vsqabs.v4i32")
         },
         "qneg_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vsqneg.v8i8")
         },
         "qneg_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vsqneg.v4i16")
         },
         "qneg_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vsqneg.v2i32")
         },
         "qnegq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vsqneg.v16i8")
         },
         "qnegq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vsqneg.v8i16")
         },
         "qnegq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vsqneg.v4i32")
         },
         "clz_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.ctlz.v8i8")
         },
         "clz_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.ctlz.v8i8")
         },
         "clz_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.ctlz.v4i16")
         },
         "clz_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.ctlz.v4i16")
         },
         "clz_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.ctlz.v2i32")
         },
         "clz_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.ctlz.v2i32")
         },
         "clzq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.ctlz.v16i8")
         },
         "clzq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.ctlz.v16i8")
         },
         "clzq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.ctlz.v8i16")
         },
         "clzq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.ctlz.v8i16")
         },
         "clzq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.ctlz.v4i32")
         },
         "clzq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.ctlz.v4i32")
         },
         "cls_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vcls.v8i8")
         },
         "cls_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vcls.v8i8")
         },
         "cls_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vcls.v4i16")
         },
         "cls_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vcls.v4i16")
         },
         "cls_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vcls.v2i32")
         },
         "cls_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vcls.v2i32")
         },
         "clsq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vcls.v16i8")
         },
         "clsq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vcls.v16i8")
         },
         "clsq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vcls.v8i16")
         },
         "clsq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vcls.v8i16")
         },
         "clsq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vcls.v4i32")
         },
         "clsq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vcls.v4i32")
         },
         "cnt_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.ctpop.v8i8")
         },
         "cnt_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.ctpop.v8i8")
         },
         "cntq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.ctpop.v16i8")
         },
         "cntq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.ctpop.v16i8")
         },
         "recpe_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vrecpe.v2i32")
         },
         "recpe_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vrecpe.v2f32")
         },
         "recpeq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vrecpe.v4i32")
         },
         "recpeq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.neon.vrecpe.v4f32")
         },
         "recps_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vfrecps.v2f32")
         },
         "recpsq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.neon.vfrecps.v4f32")
         },
         "sqrt_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.sqrt.v2f32")
         },
         "sqrtq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.sqrt.v4f32")
         },
         "rsqrte_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vrsqrte.v2i32")
         },
         "rsqrte_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vrsqrte.v2f32")
         },
         "rsqrteq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vrsqrte.v4i32")
         },
         "rsqrteq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.neon.vrsqrte.v4f32")
         },
         "rsqrts_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vrsqrts.v2f32")
         },
         "rsqrtsq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.neon.vrsqrts.v4f32")
         },
         "bsl_s8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vbsl.v8i8")
         },
         "bsl_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vbsl.v8i8")
         },
         "bsl_s16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vbsl.v4i16")
         },
         "bsl_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vbsl.v4i16")
         },
         "bsl_s32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vbsl.v2i32")
         },
         "bsl_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vbsl.v2i32")
         },
         "bsl_s64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(i(64), 1)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vbsl.v1i64")
         },
         "bsl_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(64), 1)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vbsl.v1i64")
         },
         "bslq_s8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vbsl.v16i8")
         },
         "bslq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vbsl.v16i8")
         },
         "bslq_s16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vbsl.v8i16")
         },
         "bslq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vbsl.v8i16")
         },
         "bslq_s32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vbsl.v4i32")
         },
         "bslq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vbsl.v4i32")
         },
         "bslq_s64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vbsl.v2i64")
         },
         "bslq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vbsl.v2i64")
         },
         "padd_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vpadd.v8i8")
         },
         "padd_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vpadd.v8i8")
         },
         "padd_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vpadd.v4i16")
         },
         "padd_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vpadd.v4i16")
         },
         "padd_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vpadd.v2i32")
         },
         "padd_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vpadd.v2i32")
         },
         "padd_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vpadd.v2f32")
         },
         "paddl_s16" => Intrinsic {
-            inputs: vec![v(i(8), 8)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vpaddls.v4i16.v8i8")
         },
         "paddl_u16" => Intrinsic {
-            inputs: vec![v(u(8), 8)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vpaddlu.v4i16.v8i8")
         },
         "paddl_s32" => Intrinsic {
-            inputs: vec![v(i(16), 4)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vpaddls.v2i32.v4i16")
         },
         "paddl_u32" => Intrinsic {
-            inputs: vec![v(u(16), 4)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vpaddlu.v2i32.v4i16")
         },
         "paddl_s64" => Intrinsic {
-            inputs: vec![v(i(32), 2)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vpaddls.v1i64.v2i32")
         },
         "paddl_u64" => Intrinsic {
-            inputs: vec![v(u(32), 2)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vpaddlu.v1i64.v2i32")
         },
         "paddlq_s16" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vpaddls.v8i16.v16i8")
         },
         "paddlq_u16" => Intrinsic {
-            inputs: vec![v(u(8), 16)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vpaddlu.v8i16.v16i8")
         },
         "paddlq_s32" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vpaddls.v4i32.v8i16")
         },
         "paddlq_u32" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vpaddlu.v4i32.v8i16")
         },
         "paddlq_s64" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vpaddls.v2i64.v4i32")
         },
         "paddlq_u64" => Intrinsic {
-            inputs: vec![v(u(32), 4)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vpaddlu.v2i64.v4i32")
         },
         "padal_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(8), 8)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I8x8]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vpadals.v4i16.v4i16")
         },
         "padal_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(8), 8)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U8x8]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vpadalu.v4i16.v4i16")
         },
         "padal_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(16), 4)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I16x4]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vpadals.v2i32.v2i32")
         },
         "padal_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(16), 4)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U16x4]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vpadalu.v2i32.v2i32")
         },
         "padal_s64" => Intrinsic {
-            inputs: vec![v(i(64), 1), v(i(32), 2)],
-            output: v(i(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I32x2]; &INPUTS },
+            output: &::I64x1,
             definition: Named("llvm.neon.vpadals.v1i64.v1i64")
         },
         "padal_u64" => Intrinsic {
-            inputs: vec![v(u(64), 1), v(u(32), 2)],
-            output: v(u(64), 1),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U32x2]; &INPUTS },
+            output: &::U64x1,
             definition: Named("llvm.neon.vpadalu.v1i64.v1i64")
         },
         "padalq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(8), 16)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I8x16]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vpadals.v8i16.v8i16")
         },
         "padalq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(8), 16)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U8x16]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vpadalu.v8i16.v8i16")
         },
         "padalq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(16), 8)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I16x8]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vpadals.v4i32.v4i32")
         },
         "padalq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(16), 8)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U16x8]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vpadalu.v4i32.v4i32")
         },
         "padalq_s64" => Intrinsic {
-            inputs: vec![v(i(64), 2), v(i(32), 4)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I32x4]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.neon.vpadals.v2i64.v2i64")
         },
         "padalq_u64" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(32), 4)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32x4]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.neon.vpadalu.v2i64.v2i64")
         },
         "pmax_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vpmaxs.v8i8")
         },
         "pmax_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vpmaxu.v8i8")
         },
         "pmax_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vpmaxs.v4i16")
         },
         "pmax_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vpmaxu.v4i16")
         },
         "pmax_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vpmaxs.v2i32")
         },
         "pmax_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vpmaxu.v2i32")
         },
         "pmax_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vpmaxf.v2f32")
         },
         "pmin_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vpmins.v8i8")
         },
         "pmin_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vpminu.v8i8")
         },
         "pmin_s16" => Intrinsic {
-            inputs: vec![v(i(16), 4), v(i(16), 4)],
-            output: v(i(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
+            output: &::I16x4,
             definition: Named("llvm.neon.vpmins.v4i16")
         },
         "pmin_u16" => Intrinsic {
-            inputs: vec![v(u(16), 4), v(u(16), 4)],
-            output: v(u(16), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
+            output: &::U16x4,
             definition: Named("llvm.neon.vpminu.v4i16")
         },
         "pmin_s32" => Intrinsic {
-            inputs: vec![v(i(32), 2), v(i(32), 2)],
-            output: v(i(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
+            output: &::I32x2,
             definition: Named("llvm.neon.vpmins.v2i32")
         },
         "pmin_u32" => Intrinsic {
-            inputs: vec![v(u(32), 2), v(u(32), 2)],
-            output: v(u(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
+            output: &::U32x2,
             definition: Named("llvm.neon.vpminu.v2i32")
         },
         "pmin_f32" => Intrinsic {
-            inputs: vec![v(f(32), 2), v(f(32), 2)],
-            output: v(f(32), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
+            output: &::F32x2,
             definition: Named("llvm.neon.vpminf.v2f32")
         },
         "pminq_s8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.neon.vpmins.v16i8")
         },
         "pminq_u8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.neon.vpminu.v16i8")
         },
         "pminq_s16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.neon.vpmins.v8i16")
         },
         "pminq_u16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.neon.vpminu.v8i16")
         },
         "pminq_s32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.neon.vpmins.v4i32")
         },
         "pminq_u32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.neon.vpminu.v4i32")
         },
         "pminq_f32" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.neon.vpminf.v4f32")
         },
         "tbl1_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vtbl1")
         },
         "tbl1_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vtbl1")
         },
         "tbx1_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), v(i(8), 8), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vtbx1")
         },
         "tbx1_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vtbx1")
         },
         "tbl2_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 8), v(i(8), 8)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vtbl2")
         },
         "tbl2_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 8), v(u(8), 8)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vtbl2")
         },
         "tbx2_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 8), v(i(8), 8)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vtbx2")
         },
         "tbx2_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 8), v(u(8), 8)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vtbx2")
         },
         "tbl3_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vtbl3")
         },
         "tbl3_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vtbl3")
         },
         "tbx3_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), agg(true, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vtbx3")
         },
         "tbx3_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), agg(true, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vtbx3")
         },
         "tbl4_s8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8), v(i(8), 8)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vtbl4")
         },
         "tbl4_u8" => Intrinsic {
-            inputs: vec![agg(true, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8), v(u(8), 8)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vtbl4")
         },
         "tbx4_s8" => Intrinsic {
-            inputs: vec![v(i(8), 8), agg(true, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8), v(i(8), 8)]), v(u(8), 8)],
-            output: v(i(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::I8x8,
             definition: Named("llvm.neon.vtbx4")
         },
         "tbx4_u8" => Intrinsic {
-            inputs: vec![v(u(8), 8), agg(true, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8), v(u(8), 8)]), v(u(8), 8)],
-            output: v(u(8), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
+            output: &::U8x8,
             definition: Named("llvm.neon.vtbx4")
         },
         _ => return None,
index 6a43ef65d6a068a55b417a033e48cd1734fe9d69..6fe1f0c2b9cc6ca08d83d58424c0e2ed8f2baace 100644 (file)
 #![unstable(feature = "rustc_private", issue = "27812")]
 #![crate_type = "dylib"]
 #![crate_type = "rlib"]
-#![feature(staged_api, rustc_private)]
+#![feature(staged_api)]
 #![cfg_attr(not(stage0), deny(warnings))]
-
-extern crate rustc_llvm as llvm;
-extern crate rustc;
-
-use rustc::middle::ty;
+#![allow(bad_style)]
 
 pub struct Intrinsic {
-    pub inputs: Vec<Type>,
-    pub output: Type,
+    pub inputs: &'static [&'static Type],
+    pub output: &'static Type,
 
     pub definition: IntrinsicDef,
 }
@@ -32,47 +28,82 @@ pub enum Type {
     Void,
     Integer(/* signed */ bool, u8, /* llvm width */ u8),
     Float(u8),
-    Pointer(Box<Type>, Option<Box<Type>>, /* const */ bool),
-    Vector(Box<Type>, Option<Box<Type>>, u8),
-    Aggregate(bool, Vec<Type>),
+    Pointer(&'static Type, Option<&'static Type>, /* const */ bool),
+    Vector(&'static Type, Option<&'static Type>, u8),
+    Aggregate(bool, &'static [&'static Type]),
 }
 
 pub enum IntrinsicDef {
     Named(&'static str),
 }
 
-fn i(width: u8) -> Type { Type::Integer(true, width, width) }
-fn i_(width: u8, llvm_width: u8) -> Type { Type::Integer(true, width, llvm_width) }
-fn u(width: u8) -> Type { Type::Integer(false, width, width) }
-#[allow(dead_code)]
-fn u_(width: u8, llvm_width: u8) -> Type { Type::Integer(false, width, llvm_width) }
-fn f(width: u8) -> Type { Type::Float(width) }
-fn v(x: Type, length: u8) -> Type { Type::Vector(Box::new(x), None, length) }
-fn v_(x: Type, bitcast: Type, length: u8) -> Type {
-    Type::Vector(Box::new(x), Some(Box::new(bitcast)), length)
-}
-fn agg(flatten: bool, types: Vec<Type>) -> Type {
-    Type::Aggregate(flatten, types)
-}
-fn p(const_: bool, elem: Type, llvm_elem: Option<Type>) -> Type {
-    Type::Pointer(Box::new(elem), llvm_elem.map(Box::new), const_)
-}
-fn void() -> Type {
-    Type::Void
-}
+static I8: Type = Type::Integer(true, 8, 8);
+static I16: Type = Type::Integer(true, 16, 16);
+static I32: Type = Type::Integer(true, 32, 32);
+static I64: Type = Type::Integer(true, 64, 64);
+static U8: Type = Type::Integer(false, 8, 8);
+static U16: Type = Type::Integer(false, 16, 16);
+static U32: Type = Type::Integer(false, 32, 32);
+static U64: Type = Type::Integer(false, 64, 64);
+static F32: Type = Type::Float(32);
+static F64: Type = Type::Float(64);
+
+static I32_8: Type = Type::Integer(true, 32, 8);
+
+static I8x8: Type = Type::Vector(&I8, None, 8);
+static U8x8: Type = Type::Vector(&U8, None, 8);
+static I8x16: Type = Type::Vector(&I8, None, 16);
+static U8x16: Type = Type::Vector(&U8, None, 16);
+static I8x32: Type = Type::Vector(&I8, None, 32);
+static U8x32: Type = Type::Vector(&U8, None, 32);
+
+static I16x4: Type = Type::Vector(&I16, None, 4);
+static U16x4: Type = Type::Vector(&U16, None, 4);
+static I16x8: Type = Type::Vector(&I16, None, 8);
+static U16x8: Type = Type::Vector(&U16, None, 8);
+static I16x16: Type = Type::Vector(&I16, None, 16);
+static U16x16: Type = Type::Vector(&U16, None, 16);
+
+static I32x2: Type = Type::Vector(&I32, None, 2);
+static U32x2: Type = Type::Vector(&U32, None, 2);
+static I32x4: Type = Type::Vector(&I32, None, 4);
+static U32x4: Type = Type::Vector(&U32, None, 4);
+static I32x8: Type = Type::Vector(&I32, None, 8);
+static U32x8: Type = Type::Vector(&U32, None, 8);
+
+static I64x1: Type = Type::Vector(&I64, None, 1);
+static U64x1: Type = Type::Vector(&U64, None, 1);
+static I64x2: Type = Type::Vector(&I64, None, 2);
+static U64x2: Type = Type::Vector(&U64, None, 2);
+static I64x4: Type = Type::Vector(&I64, None, 4);
+static U64x4: Type = Type::Vector(&U64, None, 4);
+
+static F32x2: Type = Type::Vector(&F32, None, 2);
+static F32x4: Type = Type::Vector(&F32, None, 4);
+static F32x8: Type = Type::Vector(&F32, None, 8);
+static F64x1: Type = Type::Vector(&F64, None, 1);
+static F64x2: Type = Type::Vector(&F64, None, 2);
+static F64x4: Type = Type::Vector(&F64, None, 4);
+
+static I32x4_F32: Type = Type::Vector(&I32, Some(&F32), 4);
+static I32x8_F32: Type = Type::Vector(&I32, Some(&F32), 8);
+static I64x2_F64: Type = Type::Vector(&I64, Some(&F64), 2);
+static I64x4_F64: Type = Type::Vector(&I64, Some(&F64), 4);
+
+static VOID: Type = Type::Void;
 
 mod x86;
 mod arm;
 mod aarch64;
 
 impl Intrinsic {
-    pub fn find<'tcx>(tcx: &ty::ctxt<'tcx>, name: &str) -> Option<Intrinsic> {
+    pub fn find(name: &str) -> Option<Intrinsic> {
         if name.starts_with("x86_") {
-            x86::find(tcx, name)
+            x86::find(name)
         } else if name.starts_with("arm_") {
-            arm::find(tcx, name)
+            arm::find(name)
         } else if name.starts_with("aarch64_") {
-            aarch64::find(tcx, name)
+            aarch64::find(name)
         } else {
             None
         }
index 144fd5f4e761b1ac53f0464cd2d5707abddca71c..54e2cb929b87a38ebab56d1aa1285941ee3199b5 100644 (file)
 
 #![allow(unused_imports)]
 
-use {Intrinsic, i, i_, u, u_, f, v, v_, agg, p, void};
+use {Intrinsic, Type};
 use IntrinsicDef::Named;
-use rustc::middle::ty;
 
 // The default inlining settings trigger a pathological behaviour in
 // LLVM, which causes makes compilation very slow. See #28273.
 #[inline(never)]
-pub fn find<'tcx>(_tcx: &ty::ctxt<'tcx>, name: &str) -> Option<Intrinsic> {
+pub fn find(name: &str) -> Option<Intrinsic> {
     if !name.starts_with("x86_mm") { return None }
     Some(match &name["x86_mm".len()..] {
         "_movemask_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse.movmsk.ps")
         },
         "_max_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.sse.max.ps")
         },
         "_min_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.sse.min.ps")
         },
         "_rsqrt_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.sse.rsqrt.ps")
         },
         "_rcp_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.sse.rcp.ps")
         },
         "_sqrt_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.sqrt.v4f32")
         },
         "_storeu_ps" => Intrinsic {
-            inputs: vec![p(false, f(32), Some(i(8))), v(f(32), 4)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), false); &PTR }, &::F32x4]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.sse.storeu.ps")
         },
         "_adds_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.sse2.padds.b")
         },
         "_adds_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.x86.sse2.paddus.b")
         },
         "_adds_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.sse2.padds.w")
         },
         "_adds_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.x86.sse2.paddus.w")
         },
         "_avg_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.x86.sse2.pavg.b")
         },
         "_avg_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.x86.sse2.pavg.w")
         },
         "_lfence" => Intrinsic {
-            inputs: vec![],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.sse2.lfence")
         },
         "_madd_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.sse2.pmadd.wd")
         },
         "_maskmoveu_si128" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16), p(false, u(8), None)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, { static PTR: Type = Type::Pointer(&::U8, None, false); &PTR }]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.sse2.maskmov.dqu")
         },
         "_max_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.sse2.pmaxs.w")
         },
         "_max_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.x86.sse2.pmaxu.b")
         },
         "_max_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.sse2.max.pd")
         },
         "_mfence" => Intrinsic {
-            inputs: vec![],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.sse2.fence")
         },
         "_min_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.sse2.pmins.w")
         },
         "_min_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.x86.sse2.pminu.b")
         },
         "_min_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.sse2.min.pd")
         },
         "_movemask_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse2.movmsk.pd")
         },
         "_movemask_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse2.pmovmskb.128")
         },
         "_mul_epu32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.x86.sse2.pmulu.dq")
         },
         "_mulhi_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.sse2.pmulh.w")
         },
         "_mulhi_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.x86.sse2.pmulhu.w")
         },
         "_packs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.sse2.packsswb.128")
         },
         "_packs_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.sse2.packssdw.128")
         },
         "_packus_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.x86.sse2.packuswb.128")
         },
         "_sad_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U64x2,
             definition: Named("llvm.x86.sse2.psad.bw")
         },
         "_sfence" => Intrinsic {
-            inputs: vec![],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.sse2.sfence")
         },
         "_sqrt_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.sqrt.v2f64")
         },
         "_storeu_pd" => Intrinsic {
-            inputs: vec![p(false, f(64), Some(u(8))), v(f(64), 2)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::U8), false); &PTR }, &::F64x2]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.sse2.storeu.pd")
         },
         "_storeu_si128" => Intrinsic {
-            inputs: vec![p(false, v(u(8), 16), Some(u(8))), v(u(8), 16)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::U8x16, Some(&::U8), false); &PTR }, &::U8x16]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.sse2.storeu.dq")
         },
         "_subs_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.sse2.psubs.b")
         },
         "_subs_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16)],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.x86.sse2.psubus.b")
         },
         "_subs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.sse2.psubs.w")
         },
         "_subs_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.x86.sse2.psubus.w")
         },
         "_addsub_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.sse3.addsub.ps")
         },
         "_addsub_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.sse3.addsub.pd")
         },
         "_hadd_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.sse3.hadd.ps")
         },
         "_hadd_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.sse3.hadd.pd")
         },
         "_hsub_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.sse3.hsub.ps")
         },
         "_hsub_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.sse3.hsub.pd")
         },
         "_lddqu_si128" => Intrinsic {
-            inputs: vec![p(true, v(u(8), 16), Some(i(8)))],
-            output: v(u(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8x16, Some(&::I8), true); &PTR }]; &INPUTS },
+            output: &::U8x16,
             definition: Named("llvm.x86.sse3.ldu.dq")
         },
         "_abs_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.ssse3.pabs.b.128")
         },
         "_abs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.ssse3.pabs.w.128")
         },
         "_abs_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.ssse3.pabs.d.128")
         },
         "_hadd_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.ssse3.phadd.w.128")
         },
         "_hadd_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.ssse3.phadd.d.128")
         },
         "_hadds_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.ssse3.phadd.sw.128")
         },
         "_hsub_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.ssse3.phsub.w.128")
         },
         "_hsub_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.ssse3.phsub.d.128")
         },
         "_hsubs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.ssse3.phsub.sw.128")
         },
         "_maddubs_epi16" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(i(8), 16)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.ssse3.pmadd.ub.sw.128")
         },
         "_mulhrs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.ssse3.pmul.hr.sw.128")
         },
         "_shuffle_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.ssse3.pshuf.b.128")
         },
         "_sign_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.ssse3.psign.b.128")
         },
         "_sign_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 8), v(i(16), 8)],
-            output: v(i(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
+            output: &::I16x8,
             definition: Named("llvm.x86.ssse3.psign.w.128")
         },
         "_sign_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.ssse3.psign.d.128")
         },
         "_dp_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4), i_(32, 8)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::I32_8]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.sse41.dpps")
         },
         "_dp_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2), i_(32, 8)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::I32_8]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.sse41.dppd")
         },
         "_max_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.sse41.pmaxsb")
         },
         "_max_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.x86.sse41.pmaxuw")
         },
         "_max_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.sse41.pmaxsd")
         },
         "_max_epu32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.x86.sse41.pmaxud")
         },
         "_min_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.sse41.pminsb")
         },
         "_min_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 8), v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.x86.sse41.pminuw")
         },
         "_min_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.sse41.pminsd")
         },
         "_min_epu32" => Intrinsic {
-            inputs: vec![v(u(32), 4), v(u(32), 4)],
-            output: v(u(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
+            output: &::U32x4,
             definition: Named("llvm.x86.sse41.pminud")
         },
         "_minpos_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.x86.sse41.phminposuw")
         },
         "_mpsadbw_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 16), v(u(8), 16), i_(32, 8)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::I32_8]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.x86.sse41.mpsadbw")
         },
         "_mul_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.x86.sse41.pmuldq")
         },
         "_packus_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), v(i(32), 4)],
-            output: v(u(16), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
+            output: &::U16x8,
             definition: Named("llvm.x86.sse41.packusdw")
         },
         "_testc_si128" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse41.ptestc")
         },
         "_testnzc_si128" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse41.ptestnzc")
         },
         "_testz_si128" => Intrinsic {
-            inputs: vec![v(u(64), 2), v(u(64), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse41.ptestz")
         },
         "_cmpestra" => Intrinsic {
-            inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpestria128")
         },
         "_cmpestrc" => Intrinsic {
-            inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpestric128")
         },
         "_cmpestri" => Intrinsic {
-            inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpestri128")
         },
         "_cmpestrm" => Intrinsic {
-            inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.sse42.pcmpestrm128")
         },
         "_cmpestro" => Intrinsic {
-            inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpestrio128")
         },
         "_cmpestrs" => Intrinsic {
-            inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpestris128")
         },
         "_cmpestrz" => Intrinsic {
-            inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpestriz128")
         },
         "_cmpistra" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpistria128")
         },
         "_cmpistrc" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpistric128")
         },
         "_cmpistri" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpistri128")
         },
         "_cmpistrm" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)],
-            output: v(i(8), 16),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS },
+            output: &::I8x16,
             definition: Named("llvm.x86.sse42.pcmpistrm128")
         },
         "_cmpistro" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpistrio128")
         },
         "_cmpistrs" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpistris128")
         },
         "_cmpistrz" => Intrinsic {
-            inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.sse42.pcmpistriz128")
         },
         "256_addsub_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(f(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.addsub.ps.256")
         },
         "256_addsub_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), v(f(64), 4)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.x86.avx.addsub.pd.256")
         },
+        "256_blendv_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.avx.blendv.ps.256")
+        },
+        "256_blendv_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.avx.blendv.pd.256")
+        },
+        "256_broadcast_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.avx.vbroadcastf128.ps.256")
+        },
+        "256_broadcast_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.avx.vbroadcastf128.pd.256")
+        },
+        "256_cmp_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::I8]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.avx.cmp.ps.256")
+        },
+        "256_cmp_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::I8]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.avx.cmp.pd.256")
+        },
+        "256_cvtepi32_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.avx.cvtdq2.pd.256")
+        },
+        "256_cvtepi32_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x8]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.avx.cvtdq2.ps.256")
+        },
+        "256_cvtpd_epi32" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS },
+            output: &::I32x4,
+            definition: Named("llvm.x86.avx.cvt.pd2dq.256")
+        },
+        "256_cvtpd_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS },
+            output: &::F32x4,
+            definition: Named("llvm.x86.avx.cvt.pd2.ps.256")
+        },
+        "256_cvtps_epi32" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS },
+            output: &::I32x8,
+            definition: Named("llvm.x86.avx.cvt.ps2dq.256")
+        },
+        "256_cvtps_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.avx.cvt.ps2.pd.256")
+        },
+        "256_cvttpd_epi32" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS },
+            output: &::I32x4,
+            definition: Named("llvm.x86.avx.cvtt.pd2dq.256")
+        },
+        "256_cvttps_epi32" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS },
+            output: &::I32x8,
+            definition: Named("llvm.x86.avx.cvtt.ps2dq.256")
+        },
         "256_dp_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(f(32), 8), i_(32, 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::I32_8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.dp.ps.256")
         },
         "256_hadd_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(f(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.hadd.ps.256")
         },
         "256_hadd_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), v(f(64), 4)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.x86.avx.hadd.pd.256")
         },
         "256_hsub_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(f(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.hsub.ps.256")
         },
         "256_hsub_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), v(f(64), 4)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.x86.avx.hsub.pd.256")
         },
         "256_max_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(f(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.max.ps.256")
         },
         "256_max_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), v(f(64), 4)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.x86.avx.max.pd.256")
         },
         "_maskload_ps" => Intrinsic {
-            inputs: vec![p(true, f(32), Some(i(8))), v_(i(32), f(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x4_F32]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.avx.maskload.ps")
         },
         "_maskload_pd" => Intrinsic {
-            inputs: vec![p(true, f(64), Some(i(8))), v_(i(64), f(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x2_F64]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.avx.maskload.pd")
         },
         "256_maskload_ps" => Intrinsic {
-            inputs: vec![p(true, f(32), Some(i(8))), v_(i(32), f(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x8_F32]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.maskload.ps.256")
         },
         "256_maskload_pd" => Intrinsic {
-            inputs: vec![p(true, f(64), Some(i(8))), v_(i(64), f(64), 4)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x4_F64]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.x86.avx.maskload.pd.256")
         },
         "_maskstore_ps" => Intrinsic {
-            inputs: vec![p(false, f(32), Some(i(8))), v_(i(32), f(32), 4), v(f(32), 4)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), false); &PTR }, &::I32x4_F32, &::F32x4]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx.maskstore.ps")
         },
         "_maskstore_pd" => Intrinsic {
-            inputs: vec![p(false, f(64), Some(i(8))), v_(i(64), f(64), 2), v(f(64), 2)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), false); &PTR }, &::I64x2_F64, &::F64x2]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx.maskstore.pd")
         },
         "256_maskstore_ps" => Intrinsic {
-            inputs: vec![p(false, f(32), Some(i(8))), v_(i(32), f(32), 8), v(f(32), 8)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), false); &PTR }, &::I32x8_F32, &::F32x8]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx.maskstore.ps.256")
         },
         "256_maskstore_pd" => Intrinsic {
-            inputs: vec![p(false, f(64), Some(i(8))), v_(i(64), f(64), 4), v(f(64), 4)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), false); &PTR }, &::I64x4_F64, &::F64x4]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx.maskstore.pd.256")
         },
         "256_min_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(f(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.min.ps.256")
         },
         "256_min_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), v(f(64), 4)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.x86.avx.min.pd.256")
         },
         "256_movemask_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.movmsk.ps.256")
         },
         "256_movemask_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.movmsk.pd.256")
         },
         "_permutevar_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(i(32), 4)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::I32x4]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.avx.vpermilvar.ps")
         },
         "_permutevar_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(i(64), 2)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::I64x2]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.avx.vpermilvar.pd")
         },
         "256_permutevar_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(i(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::I32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.vpermilvar.ps.256")
         },
         "256_permutevar_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), v(i(64), 4)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::I64x4]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.x86.avx.vpermilvar.pd.256")
         },
         "256_rcp_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.rcp.ps.256")
         },
         "256_rsqrt_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx.rsqrt.ps.256")
         },
         "256_storeu_ps" => Intrinsic {
-            inputs: vec![p(false, v(f(32), 8), Some(u(8))), v(f(32), 8)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32x8, Some(&::U8), false); &PTR }, &::F32x8]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx.storeu.ps.256")
         },
         "256_storeu_pd" => Intrinsic {
-            inputs: vec![p(false, v(f(64), 4), Some(u(8))), v(f(64), 4)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64x4, Some(&::U8), false); &PTR }, &::F64x4]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx.storeu.ps.256")
         },
         "256_storeu_si256" => Intrinsic {
-            inputs: vec![p(false, v(u(8), 32), Some(u(8))), v(u(8), 32)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::U8x32, Some(&::U8), false); &PTR }, &::U8x32]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx.storeu.dq.256")
         },
         "256_sqrt_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.sqrt.v8f32")
         },
         "256_sqrt_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.sqrt.v4f64")
         },
         "_testc_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestc.ps")
         },
         "256_testc_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(f(32), 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestc.ps.256")
         },
         "_testc_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestc.pd")
         },
         "256_testc_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), v(f(64), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestc.pd.256")
         },
         "256_testc_si256" => Intrinsic {
-            inputs: vec![v(u(64), 4), v(u(64), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x4, &::U64x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.ptestc.256")
         },
         "_testnzc_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestnzc.ps")
         },
         "256_testnzc_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(f(32), 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestnzc.ps.256")
         },
         "_testnzc_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestnzc.pd")
         },
         "256_testnzc_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), v(f(64), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestnzc.pd.256")
         },
         "256_testnzc_si256" => Intrinsic {
-            inputs: vec![v(u(64), 4), v(u(64), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x4, &::U64x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.ptestnzc.256")
         },
         "_testz_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), v(f(32), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestz.ps")
         },
         "256_testz_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(f(32), 8)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestz.ps.256")
         },
         "_testz_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), v(f(64), 2)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestz.pd")
         },
         "256_testz_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), v(f(64), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.vtestz.pd.256")
         },
         "256_testz_si256" => Intrinsic {
-            inputs: vec![v(u(64), 4), v(u(64), 4)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U64x4, &::U64x4]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx.ptestz.256")
         },
         "256_zeroall" => Intrinsic {
-            inputs: vec![],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx.vzeroall")
         },
         "256_zeroupper" => Intrinsic {
-            inputs: vec![],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx.vzeroupper")
         },
         "256_abs_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 32)],
-            output: v(i(8), 32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x32]; &INPUTS },
+            output: &::I8x32,
             definition: Named("llvm.x86.avx2.pabs.b")
         },
         "256_abs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.pabs.w")
         },
         "256_abs_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I32x8]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.pabs.d")
         },
         "256_adds_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 32), v(i(8), 32)],
-            output: v(i(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS },
+            output: &::I8x32,
             definition: Named("llvm.x86.avx2.padds.b")
         },
         "256_adds_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 32), v(u(8), 32)],
-            output: v(u(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS },
+            output: &::U8x32,
             definition: Named("llvm.x86.avx2.paddus.b")
         },
         "256_adds_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.padds.w")
         },
         "256_adds_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 16), v(u(16), 16)],
-            output: v(u(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS },
+            output: &::U16x16,
             definition: Named("llvm.x86.avx2.paddus.w")
         },
         "256_avg_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 32), v(u(8), 32)],
-            output: v(u(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS },
+            output: &::U8x32,
             definition: Named("llvm.x86.avx2.pavg.b")
         },
         "256_avg_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 16), v(u(16), 16)],
-            output: v(u(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS },
+            output: &::U16x16,
             definition: Named("llvm.x86.avx2.pavg.w")
         },
         "256_hadd_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.phadd.w")
         },
         "256_hadd_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8), v(i(32), 8)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.phadd.d")
         },
         "256_hadds_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.phadd.sw")
         },
         "256_hsub_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.phsub.w")
         },
         "256_hsub_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8), v(i(32), 8)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.phsub.d")
         },
         "256_hsubs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.phsub.sw")
         },
         "256_madd_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.pmadd.wd")
         },
         "256_maddubs_epi16" => Intrinsic {
-            inputs: vec![v(i(8), 32), v(i(8), 32)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.pmadd.ub.sw")
         },
         "_mask_i32gather_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), p(true, i(32), Some(i(8))), v(i(32), 4), v(i(32), 4), i_(32, 8)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I32x4, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I32x4, &::I32x4, &::I32_8]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.avx2.gather.d.d")
         },
         "_mask_i32gather_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), p(true, f(32), Some(i(8))), v(i(32), 4), v_(i(32), f(32), 4), i_(32, 8)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::F32x4, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x4, &::I32x4_F32, &::I32_8]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.avx2.gather.d.ps")
         },
         "256_mask_i32gather_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8), p(true, i(32), Some(i(8))), v(i(32), 8), v(i(32), 8), i_(32, 8)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I32x8, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I32x8, &::I32x8, &::I32_8]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.gather.d.d.256")
         },
         "256_mask_i32gather_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), p(true, f(32), Some(i(8))), v(i(32), 8), v_(i(32), f(32), 8), i_(32, 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::F32x8, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x8, &::I32x8_F32, &::I32_8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx2.gather.d.ps.256")
         },
         "_mask_i32gather_epi64" => Intrinsic {
-            inputs: vec![v(i(64), 2), p(true, i(64), Some(i(8))), v(i(32), 4), v(i(64), 2), i_(32, 8)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I64x2, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x2, &::I32_8]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.x86.avx2.gather.d.q")
         },
         "_mask_i32gather_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), p(true, f(64), Some(i(8))), v(i(32), 4), v_(i(64), f(64), 2), i_(32, 8)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::F64x2, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x2_F64, &::I32_8]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.avx2.gather.d.pd")
         },
         "256_mask_i32gather_epi64" => Intrinsic {
-            inputs: vec![v(i(64), 4), p(true, i(64), Some(i(8))), v(i(32), 4), v(i(64), 4), i_(32, 8)],
-            output: v(i(64), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I64x4, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x4, &::I32_8]; &INPUTS },
+            output: &::I64x4,
             definition: Named("llvm.x86.avx2.gather.d.q.256")
         },
         "256_mask_i32gather_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), p(true, f(64), Some(i(8))), v(i(32), 4), v_(i(64), f(64), 4), i_(32, 8)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::F64x4, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x4_F64, &::I32_8]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.x86.avx2.gather.d.pd.256")
         },
         "_mask_i64gather_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), p(true, i(32), Some(i(8))), v(i(64), 2), v(i(32), 4), i_(32, 8)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I32x4, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I64x2, &::I32x4, &::I32_8]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.avx2.gather.q.d")
         },
         "_mask_i64gather_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), p(true, f(32), Some(i(8))), v(i(64), 2), v_(i(32), f(32), 4), i_(32, 8)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::F32x4, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I64x2, &::I32x4_F32, &::I32_8]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.avx2.gather.q.ps")
         },
         "256_mask_i64gather_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 4), p(true, i(32), Some(i(8))), v(i(64), 4), v(i(32), 4), i_(32, 8)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I32x4, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I64x4, &::I32x4, &::I32_8]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.avx2.gather.q.d")
         },
         "256_mask_i64gather_ps" => Intrinsic {
-            inputs: vec![v(f(32), 4), p(true, f(32), Some(i(8))), v(i(64), 4), v_(i(32), f(32), 4), i_(32, 8)],
-            output: v(f(32), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::F32x4, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I64x4, &::I32x4_F32, &::I32_8]; &INPUTS },
+            output: &::F32x4,
             definition: Named("llvm.x86.avx2.gather.q.ps")
         },
         "_mask_i64gather_epi64" => Intrinsic {
-            inputs: vec![v(i(64), 2), p(true, i(64), Some(i(8))), v(i(64), 2), v(i(64), 2), i_(32, 8)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I64x2, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I64x2, &::I64x2, &::I32_8]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.x86.avx2.gather.q.q")
         },
         "_mask_i64gather_pd" => Intrinsic {
-            inputs: vec![v(f(64), 2), p(true, f(64), Some(i(8))), v(i(64), 2), v_(i(64), f(64), 2), i_(32, 8)],
-            output: v(f(64), 2),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::F64x2, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x2, &::I64x2_F64, &::I32_8]; &INPUTS },
+            output: &::F64x2,
             definition: Named("llvm.x86.avx2.gather.q.pd")
         },
         "256_mask_i64gather_epi64" => Intrinsic {
-            inputs: vec![v(i(64), 4), p(true, i(64), Some(i(8))), v(i(64), 4), v(i(64), 4), i_(32, 8)],
-            output: v(i(64), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::I64x4, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I64x4, &::I64x4, &::I32_8]; &INPUTS },
+            output: &::I64x4,
             definition: Named("llvm.x86.avx2.gather.q.q.256")
         },
         "256_mask_i64gather_pd" => Intrinsic {
-            inputs: vec![v(f(64), 4), p(true, f(64), Some(i(8))), v(i(64), 4), v_(i(64), f(64), 4), i_(32, 8)],
-            output: v(f(64), 4),
+            inputs: { static INPUTS: [&'static Type; 5] = [&::F64x4, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x4, &::I64x4_F64, &::I32_8]; &INPUTS },
+            output: &::F64x4,
             definition: Named("llvm.x86.avx2.gather.q.pd.256")
         },
         "_maskload_epi32" => Intrinsic {
-            inputs: vec![p(true, v(i(32), 4), Some(i(8))), v(i(32), 4)],
-            output: v(i(32), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I32x4, Some(&::I8), true); &PTR }, &::I32x4]; &INPUTS },
+            output: &::I32x4,
             definition: Named("llvm.x86.avx2.maskload.d")
         },
         "_maskload_epi64" => Intrinsic {
-            inputs: vec![p(true, v(i(64), 2), Some(i(8))), v(i(64), 2)],
-            output: v(i(64), 2),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I64x2, Some(&::I8), true); &PTR }, &::I64x2]; &INPUTS },
+            output: &::I64x2,
             definition: Named("llvm.x86.avx2.maskload.q")
         },
         "256_maskload_epi32" => Intrinsic {
-            inputs: vec![p(true, v(i(32), 8), Some(i(8))), v(i(32), 8)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I32x8, Some(&::I8), true); &PTR }, &::I32x8]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.maskload.d.256")
         },
         "256_maskload_epi64" => Intrinsic {
-            inputs: vec![p(true, v(i(64), 4), Some(i(8))), v(i(64), 4)],
-            output: v(i(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I64x4, Some(&::I8), true); &PTR }, &::I64x4]; &INPUTS },
+            output: &::I64x4,
             definition: Named("llvm.x86.avx2.maskload.q.256")
         },
         "_maskstore_epi32" => Intrinsic {
-            inputs: vec![p(false, i(32), Some(i(8))), v(i(32), 4), v(i(32), 4)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I8), false); &PTR }, &::I32x4, &::I32x4]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx2.maskstore.d")
         },
         "_maskstore_epi64" => Intrinsic {
-            inputs: vec![p(false, i(64), Some(i(8))), v(i(64), 2), v(i(64), 2)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I8), false); &PTR }, &::I64x2, &::I64x2]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx2.maskstore.q")
         },
         "256_maskstore_epi32" => Intrinsic {
-            inputs: vec![p(false, i(32), Some(i(8))), v(i(32), 8), v(i(32), 8)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I8), false); &PTR }, &::I32x8, &::I32x8]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx2.maskstore.d.256")
         },
         "256_maskstore_epi64" => Intrinsic {
-            inputs: vec![p(false, i(64), Some(i(8))), v(i(64), 4), v(i(64), 4)],
-            output: void(),
+            inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I8), false); &PTR }, &::I64x4, &::I64x4]; &INPUTS },
+            output: &::VOID,
             definition: Named("llvm.x86.avx2.maskstore.q.256")
         },
         "256_max_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 32), v(i(8), 32)],
-            output: v(i(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS },
+            output: &::I8x32,
             definition: Named("llvm.x86.avx2.pmaxs.b")
         },
         "256_max_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 32), v(u(8), 32)],
-            output: v(u(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS },
+            output: &::U8x32,
             definition: Named("llvm.x86.avx2.pmaxu.b")
         },
         "256_max_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.pmaxs.w")
         },
         "256_max_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 16), v(u(16), 16)],
-            output: v(u(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS },
+            output: &::U16x16,
             definition: Named("llvm.x86.avx2.pmaxu.w")
         },
         "256_max_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8), v(i(32), 8)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.pmaxs.d")
         },
         "256_max_epu32" => Intrinsic {
-            inputs: vec![v(u(32), 8), v(u(32), 8)],
-            output: v(u(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x8, &::U32x8]; &INPUTS },
+            output: &::U32x8,
             definition: Named("llvm.x86.avx2.pmaxu.d")
         },
         "256_min_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 32), v(i(8), 32)],
-            output: v(i(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS },
+            output: &::I8x32,
             definition: Named("llvm.x86.avx2.pmins.b")
         },
         "256_min_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 32), v(u(8), 32)],
-            output: v(u(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS },
+            output: &::U8x32,
             definition: Named("llvm.x86.avx2.pminu.b")
         },
         "256_min_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.pmins.w")
         },
         "256_min_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 16), v(u(16), 16)],
-            output: v(u(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS },
+            output: &::U16x16,
             definition: Named("llvm.x86.avx2.pminu.w")
         },
         "256_min_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8), v(i(32), 8)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.pmins.d")
         },
         "256_min_epu32" => Intrinsic {
-            inputs: vec![v(u(32), 8), v(u(32), 8)],
-            output: v(u(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x8, &::U32x8]; &INPUTS },
+            output: &::U32x8,
             definition: Named("llvm.x86.avx2.pminu.d")
         },
         "256_movemask_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 32)],
-            output: i(32),
+            inputs: { static INPUTS: [&'static Type; 1] = [&::I8x32]; &INPUTS },
+            output: &::I32,
             definition: Named("llvm.x86.avx2.pmovmskb")
         },
         "256_mpsadbw_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 32), v(u(8), 32), i_(32, 8)],
-            output: v(u(16), 16),
+            inputs: { static INPUTS: [&'static Type; 3] = [&::U8x32, &::U8x32, &::I32_8]; &INPUTS },
+            output: &::U16x16,
             definition: Named("llvm.x86.avx2.mpsadbw")
         },
         "256_mul_epi64" => Intrinsic {
-            inputs: vec![v(i(32), 8), v(i(32), 8)],
-            output: v(i(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS },
+            output: &::I64x4,
             definition: Named("llvm.x86.avx2.pmulq.dq")
         },
         "256_mul_epu64" => Intrinsic {
-            inputs: vec![v(u(32), 8), v(u(32), 8)],
-            output: v(u(64), 4),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U32x8, &::U32x8]; &INPUTS },
+            output: &::U64x4,
             definition: Named("llvm.x86.avx2.pmulq.dq")
         },
         "256_mulhi_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.pmulhw.w")
         },
         "256_mulhi_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 16), v(u(16), 16)],
-            output: v(u(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS },
+            output: &::U16x16,
             definition: Named("llvm.x86.avx2.pmulhw.w")
         },
         "256_mulhrs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.pmul.hr.sw")
         },
         "256_packs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I8x32,
             definition: Named("llvm.x86.avx2.packsswb")
         },
         "256_packus_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(u(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::U8x32,
             definition: Named("llvm.x86.avx2.packuswb")
         },
         "256_packs_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8), v(i(32), 8)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.packssdw")
         },
         "256_packus_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8), v(i(32), 8)],
-            output: v(u(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS },
+            output: &::U16x16,
             definition: Named("llvm.x86.avx2.packusdw")
         },
         "256_permutevar8x32_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8), v(i(32), 8)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.permd")
         },
         "256_permutevar8x32_ps" => Intrinsic {
-            inputs: vec![v(f(32), 8), v(i(32), 8)],
-            output: v(f(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::I32x8]; &INPUTS },
+            output: &::F32x8,
             definition: Named("llvm.x86.avx2.permps")
         },
         "256_sad_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 32), v(u(8), 32)],
-            output: v(u(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS },
+            output: &::U8x32,
             definition: Named("llvm.x86.avx2.psad.bw")
         },
         "256_shuffle_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 32), v(i(8), 32)],
-            output: v(i(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS },
+            output: &::I8x32,
             definition: Named("llvm.x86.avx2.pshuf.b")
         },
         "256_sign_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 32), v(i(8), 32)],
-            output: v(i(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS },
+            output: &::I8x32,
             definition: Named("llvm.x86.avx2.psign.b")
         },
         "256_sign_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.psign.w")
         },
         "256_sign_epi32" => Intrinsic {
-            inputs: vec![v(i(32), 8), v(i(32), 8)],
-            output: v(i(32), 8),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS },
+            output: &::I32x8,
             definition: Named("llvm.x86.avx2.psign.d")
         },
         "256_subs_epi8" => Intrinsic {
-            inputs: vec![v(i(8), 32), v(i(8), 32)],
-            output: v(i(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS },
+            output: &::I8x32,
             definition: Named("llvm.x86.avx2.psubs.b")
         },
         "256_subs_epu8" => Intrinsic {
-            inputs: vec![v(u(8), 32), v(u(8), 32)],
-            output: v(u(8), 32),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS },
+            output: &::U8x32,
             definition: Named("llvm.x86.avx2.psubus.b")
         },
         "256_subs_epi16" => Intrinsic {
-            inputs: vec![v(i(16), 16), v(i(16), 16)],
-            output: v(i(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS },
+            output: &::I16x16,
             definition: Named("llvm.x86.avx2.psubs.w")
         },
         "256_subs_epu16" => Intrinsic {
-            inputs: vec![v(u(16), 16), v(u(16), 16)],
-            output: v(u(16), 16),
+            inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS },
+            output: &::U16x16,
             definition: Named("llvm.x86.avx2.psubus.w")
         },
+        "_fmadd_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
+            definition: Named("llvm.x86.fma.vfmadd.ps")
+        },
+        "_fmadd_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
+            definition: Named("llvm.x86.fma.vfmadd.pd")
+        },
+        "256_fmadd_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.fma.vfmadd.ps.256")
+        },
+        "256_fmadd_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.fma.vfmadd.pd.256")
+        },
+        "_fmaddsub_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
+            definition: Named("llvm.x86.fma.vfmaddsub.ps")
+        },
+        "_fmaddsub_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
+            definition: Named("llvm.x86.fma.vfmaddsub.pd")
+        },
+        "256_fmaddsub_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.fma.vfmaddsub.ps.256")
+        },
+        "256_fmaddsub_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.fma.vfmaddsub.pd.256")
+        },
+        "_fmsub_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
+            definition: Named("llvm.x86.fma.vfmsub.ps")
+        },
+        "_fmsub_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
+            definition: Named("llvm.x86.fma.vfmsub.pd")
+        },
+        "256_fmsub_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.fma.vfmsub.ps.256")
+        },
+        "256_fmsub_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.fma.vfmsub.pd.256")
+        },
+        "_fmsubadd_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
+            definition: Named("llvm.x86.fma.vfmsubadd.ps")
+        },
+        "_fmsubadd_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
+            definition: Named("llvm.x86.fma.vfmsubadd.pd")
+        },
+        "256_fmsubadd_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.fma.vfmsubadd.ps.256")
+        },
+        "256_fmsubadd_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.fma.vfmsubadd.pd.256")
+        },
+        "_fnmadd_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
+            definition: Named("llvm.x86.fma.vfnmadd.ps")
+        },
+        "_fnmadd_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
+            definition: Named("llvm.x86.fma.vfnmadd.pd")
+        },
+        "256_fnmadd_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.fma.vfnmadd.ps.256")
+        },
+        "256_fnmadd_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.fma.vfnmadd.pd.256")
+        },
+        "_fnmsub_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS },
+            output: &::F32x4,
+            definition: Named("llvm.x86.fma.vfnmsub.ps")
+        },
+        "_fnmsub_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS },
+            output: &::F64x2,
+            definition: Named("llvm.x86.fma.vfnmsub.pd")
+        },
+        "256_fnmsub_ps" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS },
+            output: &::F32x8,
+            definition: Named("llvm.x86.fma.vfnmsub.ps.256")
+        },
+        "256_fnmsub_pd" => Intrinsic {
+            inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS },
+            output: &::F64x4,
+            definition: Named("llvm.x86.fma.vfnmsub.pd.256")
+        },
         _ => return None,
     })
 }
index 0b88f3130473c4d1ce48c2e9ef47702a4f047ce6..e9a32e53a9fe6372ad31fad1fbcabe8b0616cf4a 100644 (file)
@@ -11,8 +11,8 @@ crate-type = ["dylib"]
 [dependencies]
 log = { path = "../liblog" }
 rustc = { path = "../librustc" }
+rustc_back = { path = "../librustc_back" }
 rustc_bitflags = { path = "../librustc_bitflags" }
-rustc_front = { path = "../librustc_front" }
 rustc_metadata = { path = "../librustc_metadata" }
 rustc_mir = { path = "../librustc_mir" }
 syntax = { path = "../libsyntax" }
index fe83b609334497f1ff09e99382da95f897f21f7f..4c7755b1b0937bf8e62795104a6e9ba0a7021282 100644 (file)
@@ -15,9 +15,9 @@ use syntax::attr;
 use syntax::codemap::Span;
 use syntax::errors;
 use rustc::dep_graph::DepNode;
-use rustc::front::map::Map;
-use rustc_front::intravisit::Visitor;
-use rustc_front::hir;
+use rustc::hir::map::Map;
+use rustc::hir::intravisit::Visitor;
+use rustc::hir;
 
 struct RegistrarFinder {
     registrars: Vec<(ast::NodeId, Span)> ,
index 464899d5d4fe390e61ebf53c8509d8dc6e801f3f..5fa29771c57fbd1b423a197f81216833e6582c3d 100644 (file)
@@ -59,7 +59,6 @@
        html_root_url = "https://doc.rust-lang.org/nightly/")]
 #![cfg_attr(not(stage0), deny(warnings))]
 
-#![feature(dynamic_lib)]
 #![feature(staged_api)]
 #![feature(rustc_diagnostic_macros)]
 #![feature(rustc_private)]
@@ -69,7 +68,7 @@
 #[macro_use] #[no_link] extern crate rustc_bitflags;
 
 extern crate rustc;
-extern crate rustc_front;
+extern crate rustc_back;
 extern crate rustc_metadata;
 extern crate rustc_mir;
 
index c2f8d092b08fdb972e9d2f0342fbc4bf200dcd22..ac40215bbb1d0855ab60eaa855e634278f01a6c9 100644 (file)
@@ -44,9 +44,12 @@ fn call_malformed_plugin_attribute(a: &Session, b: Span) {
 }
 
 /// Read plugin metadata and dynamically load registrar functions.
-pub fn load_plugins(sess: &Session, cstore: &CStore, krate: &ast::Crate,
+pub fn load_plugins(sess: &Session,
+                    cstore: &CStore,
+                    krate: &ast::Crate,
+                    crate_name: &str,
                     addl_plugins: Option<Vec<String>>) -> Vec<PluginRegistrar> {
-    let mut loader = PluginLoader::new(sess, cstore);
+    let mut loader = PluginLoader::new(sess, cstore, crate_name);
 
     for attr in &krate.attrs {
         if !attr.check_name("plugin") {
@@ -82,10 +85,10 @@ pub fn load_plugins(sess: &Session, cstore: &CStore, krate: &ast::Crate,
 }
 
 impl<'a> PluginLoader<'a> {
-    fn new(sess: &'a Session, cstore: &'a CStore) -> PluginLoader<'a> {
+    fn new(sess: &'a Session, cstore: &'a CStore, crate_name: &str) -> PluginLoader<'a> {
         PluginLoader {
             sess: sess,
-            reader: CrateReader::new(sess, cstore),
+            reader: CrateReader::new(sess, cstore, crate_name),
             plugins: vec![],
         }
     }
@@ -103,12 +106,11 @@ impl<'a> PluginLoader<'a> {
     }
 
     // Dynamically link a registrar function into the compiler process.
-    #[allow(deprecated)]
     fn dylink_registrar(&mut self,
                         span: Span,
                         path: PathBuf,
                         symbol: String) -> PluginRegistrarFun {
-        use std::dynamic_lib::DynamicLibrary;
+        use rustc_back::dynamic_lib::DynamicLibrary;
 
         // Make sure the path contains a / or the linker will search for it.
         let path = env::current_dir().unwrap().join(&path);
index a51fd58db88dd1cda706cba1b62616d93b35a7e1..3cfd6a76dda63005649773a72f86d92d936d66ca 100644 (file)
@@ -13,7 +13,7 @@
 use rustc::lint::{EarlyLintPassObject, LateLintPassObject, LintId, Lint};
 use rustc::session::Session;
 
-use rustc::mir::transform::MirPass;
+use rustc::mir::transform::MirMapPass;
 
 use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT};
 use syntax::ext::base::{IdentTT, MultiModifier, MultiDecorator};
@@ -56,7 +56,7 @@ pub struct Registry<'a> {
     pub late_lint_passes: Vec<LateLintPassObject>,
 
     #[doc(hidden)]
-    pub mir_passes: Vec<Box<MirPass>>,
+    pub mir_passes: Vec<Box<for<'pcx> MirMapPass<'pcx>>>,
 
     #[doc(hidden)]
     pub lint_groups: HashMap<&'static str, Vec<LintId>>,
@@ -141,7 +141,7 @@ impl<'a> Registry<'a> {
     }
 
     /// Register a MIR pass
-    pub fn register_mir_pass(&mut self, pass: Box<MirPass>) {
+    pub fn register_mir_pass(&mut self, pass: Box<for<'pcx> MirMapPass<'pcx>>) {
         self.mir_passes.push(pass);
     }
 
index 56d6ca5543a0e592ae4aa05dd506b6a7bedd357f..0553e54e3aa9b145626ec09d6bcf6df224691257 100644 (file)
@@ -11,5 +11,4 @@ crate-type = ["dylib"]
 [dependencies]
 log = { path = "../liblog" }
 rustc = { path = "../librustc" }
-rustc_front = { path = "../librustc_front" }
 syntax = { path = "../libsyntax" }
index 8908dac7a36dd72c9cd355cd6e94e3da09c9a13a..a6ce4cc3ee41a443997e7cf8bf2e3823d63bf351 100644 (file)
 #[macro_use] extern crate log;
 #[macro_use] extern crate syntax;
 
-extern crate rustc;
-extern crate rustc_front;
-
-use self::PrivacyResult::*;
-use self::FieldName::*;
+#[macro_use] extern crate rustc;
 
 use std::cmp;
 use std::mem::replace;
 
-use rustc_front::hir::{self, PatKind};
-use rustc_front::intravisit::{self, Visitor};
+use rustc::hir::{self, PatKind};
+use rustc::hir::intravisit::{self, Visitor};
 
 use rustc::dep_graph::DepNode;
 use rustc::lint;
-use rustc::middle::def::{self, Def};
-use rustc::middle::def_id::DefId;
+use rustc::middle::cstore::CrateStore;
+use rustc::hir::def::{self, Def};
+use rustc::hir::def_id::DefId;
 use rustc::middle::privacy::{AccessLevel, AccessLevels};
-use rustc::middle::privacy::ImportUse::*;
-use rustc::middle::privacy::LastPrivate::*;
-use rustc::middle::privacy::PrivateDep::*;
-use rustc::middle::privacy::ExternalExports;
-use rustc::middle::ty;
-use rustc::util::nodemap::{NodeMap, NodeSet};
-use rustc::front::map as ast_map;
+use rustc::ty::{self, TyCtxt};
+use rustc::util::nodemap::NodeSet;
+use rustc::hir::map as ast_map;
 
 use syntax::ast;
 use syntax::codemap::Span;
@@ -61,104 +54,12 @@ type Context<'a, 'tcx> = (&'a ty::MethodMap<'tcx>, &'a def::ExportMap);
 /// optionally the same for a note about the error.
 type CheckResult = Option<(Span, String, Option<(Span, String)>)>;
 
-////////////////////////////////////////////////////////////////////////////////
-/// The parent visitor, used to determine what's the parent of what (node-wise)
-////////////////////////////////////////////////////////////////////////////////
-
-struct ParentVisitor<'a, 'tcx:'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    parents: NodeMap<ast::NodeId>,
-    curparent: ast::NodeId,
-}
-
-impl<'a, 'tcx, 'v> Visitor<'v> for ParentVisitor<'a, 'tcx> {
-    /// We want to visit items in the context of their containing
-    /// module and so forth, so supply a crate for doing a deep walk.
-    fn visit_nested_item(&mut self, item: hir::ItemId) {
-        self.visit_item(self.tcx.map.expect_item(item.id))
-    }
-    fn visit_item(&mut self, item: &hir::Item) {
-        self.parents.insert(item.id, self.curparent);
-
-        let prev = self.curparent;
-        match item.node {
-            hir::ItemMod(..) => { self.curparent = item.id; }
-            // Enum variants are parented to the enum definition itself because
-            // they inherit privacy
-            hir::ItemEnum(ref def, _) => {
-                for variant in &def.variants {
-                    // The parent is considered the enclosing enum because the
-                    // enum will dictate the privacy visibility of this variant
-                    // instead.
-                    self.parents.insert(variant.node.data.id(), item.id);
-                }
-            }
-
-            // Trait methods are always considered "public", but if the trait is
-            // private then we need some private item in the chain from the
-            // method to the root. In this case, if the trait is private, then
-            // parent all the methods to the trait to indicate that they're
-            // private.
-            hir::ItemTrait(_, _, _, ref trait_items) if item.vis != hir::Public => {
-                for trait_item in trait_items {
-                    self.parents.insert(trait_item.id, item.id);
-                }
-            }
-
-            _ => {}
-        }
-        intravisit::walk_item(self, item);
-        self.curparent = prev;
-    }
-
-    fn visit_foreign_item(&mut self, a: &hir::ForeignItem) {
-        self.parents.insert(a.id, self.curparent);
-        intravisit::walk_foreign_item(self, a);
-    }
-
-    fn visit_fn(&mut self, a: intravisit::FnKind<'v>, b: &'v hir::FnDecl,
-                c: &'v hir::Block, d: Span, id: ast::NodeId) {
-        // We already took care of some trait methods above, otherwise things
-        // like impl methods and pub trait methods are parented to the
-        // containing module, not the containing trait.
-        if !self.parents.contains_key(&id) {
-            self.parents.insert(id, self.curparent);
-        }
-        intravisit::walk_fn(self, a, b, c, d);
-    }
-
-    fn visit_impl_item(&mut self, ii: &'v hir::ImplItem) {
-        // visit_fn handles methods, but associated consts have to be handled
-        // here.
-        if !self.parents.contains_key(&ii.id) {
-            self.parents.insert(ii.id, self.curparent);
-        }
-        intravisit::walk_impl_item(self, ii);
-    }
-
-    fn visit_variant_data(&mut self, s: &hir::VariantData, _: ast::Name,
-                        _: &'v hir::Generics, item_id: ast::NodeId, _: Span) {
-        // Struct constructors are parented to their struct definitions because
-        // they essentially are the struct definitions.
-        if !s.is_struct() {
-            self.parents.insert(s.id(), item_id);
-        }
-
-        // While we have the id of the struct definition, go ahead and parent
-        // all the fields.
-        for field in s.fields() {
-            self.parents.insert(field.node.id, self.curparent);
-        }
-        intravisit::walk_struct_def(self, s)
-    }
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 /// The embargo visitor, used to determine the exports of the ast
 ////////////////////////////////////////////////////////////////////////////////
 
 struct EmbargoVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     export_map: &'a def::ExportMap,
 
     // Accessibility levels for reachable nodes
@@ -262,7 +163,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> {
                 for variant in &def.variants {
                     let variant_level = self.update(variant.node.data.id(), item_level);
                     for field in variant.node.data.fields() {
-                        self.update(field.node.id, variant_level);
+                        self.update(field.id, variant_level);
                     }
                 }
             }
@@ -288,8 +189,8 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> {
                     self.update(def.id(), item_level);
                 }
                 for field in def.fields() {
-                    if field.node.kind.visibility() == hir::Public {
-                        self.update(field.node.id, item_level);
+                    if field.vis == hir::Public {
+                        self.update(field.id, item_level);
                     }
                 }
             }
@@ -347,7 +248,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> {
                 if item_level.is_some() {
                     self.reach().visit_generics(generics);
                     for field in struct_def.fields() {
-                        if self.get(field.node.id).is_some() {
+                        if self.get(field.id).is_some() {
                             self.reach().visit_struct_field(field);
                         }
                     }
@@ -475,505 +376,40 @@ impl<'b, 'a, 'tcx: 'a, 'v> Visitor<'v> for ReachEverythingInTheInterfaceVisitor<
 ////////////////////////////////////////////////////////////////////////////////
 
 struct PrivacyVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     curitem: ast::NodeId,
     in_foreign: bool,
-    parents: NodeMap<ast::NodeId>,
-    external_exports: ExternalExports,
-}
-
-#[derive(Debug)]
-enum PrivacyResult {
-    Allowable,
-    ExternallyDenied,
-    DisallowedBy(ast::NodeId),
-}
-
-enum FieldName {
-    UnnamedField(usize), // index
-    NamedField(ast::Name),
 }
 
 impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> {
-    // used when debugging
-    fn nodestr(&self, id: ast::NodeId) -> String {
-        self.tcx.map.node_to_string(id).to_string()
-    }
-
-    // Determines whether the given definition is public from the point of view
-    // of the current item.
-    fn def_privacy(&self, did: DefId) -> PrivacyResult {
-        let node_id = if let Some(node_id) = self.tcx.map.as_local_node_id(did) {
-            node_id
-        } else {
-            if self.external_exports.contains(&did) {
-                debug!("privacy - {:?} was externally exported", did);
-                return Allowable;
-            }
-            debug!("privacy - is {:?} a public method", did);
-
-            return match self.tcx.impl_or_trait_items.borrow().get(&did) {
-                Some(&ty::ConstTraitItem(ref ac)) => {
-                    debug!("privacy - it's a const: {:?}", *ac);
-                    match ac.container {
-                        ty::TraitContainer(id) => {
-                            debug!("privacy - recursing on trait {:?}", id);
-                            self.def_privacy(id)
-                        }
-                        ty::ImplContainer(id) => {
-                            match self.tcx.impl_trait_ref(id) {
-                                Some(t) => {
-                                    debug!("privacy - impl of trait {:?}", id);
-                                    self.def_privacy(t.def_id)
-                                }
-                                None => {
-                                    debug!("privacy - found inherent \
-                                            associated constant {:?}",
-                                            ac.vis);
-                                    if ac.vis == hir::Public {
-                                        Allowable
-                                    } else {
-                                        ExternallyDenied
-                                    }
-                                }
-                            }
-                        }
-                    }
-                }
-                Some(&ty::MethodTraitItem(ref meth)) => {
-                    debug!("privacy - well at least it's a method: {:?}",
-                           *meth);
-                    match meth.container {
-                        ty::TraitContainer(id) => {
-                            debug!("privacy - recursing on trait {:?}", id);
-                            self.def_privacy(id)
-                        }
-                        ty::ImplContainer(id) => {
-                            match self.tcx.impl_trait_ref(id) {
-                                Some(t) => {
-                                    debug!("privacy - impl of trait {:?}", id);
-                                    self.def_privacy(t.def_id)
-                                }
-                                None => {
-                                    debug!("privacy - found a method {:?}",
-                                            meth.vis);
-                                    if meth.vis == hir::Public {
-                                        Allowable
-                                    } else {
-                                        ExternallyDenied
-                                    }
-                                }
-                            }
-                        }
-                    }
-                }
-                Some(&ty::TypeTraitItem(ref typedef)) => {
-                    match typedef.container {
-                        ty::TraitContainer(id) => {
-                            debug!("privacy - recursing on trait {:?}", id);
-                            self.def_privacy(id)
-                        }
-                        ty::ImplContainer(id) => {
-                            match self.tcx.impl_trait_ref(id) {
-                                Some(t) => {
-                                    debug!("privacy - impl of trait {:?}", id);
-                                    self.def_privacy(t.def_id)
-                                }
-                                None => {
-                                    debug!("privacy - found a typedef {:?}",
-                                            typedef.vis);
-                                    if typedef.vis == hir::Public {
-                                        Allowable
-                                    } else {
-                                        ExternallyDenied
-                                    }
-                                }
-                            }
-                        }
-                    }
-                }
-                None => {
-                    debug!("privacy - nope, not even a method");
-                    ExternallyDenied
-                }
-            };
-        };
-
-        debug!("privacy - local {} not public all the way down",
-               self.tcx.map.node_to_string(node_id));
-        // return quickly for things in the same module
-        if self.parents.get(&node_id) == self.parents.get(&self.curitem) {
-            debug!("privacy - same parent, we're done here");
-            return Allowable;
-        }
-
-        // We now know that there is at least one private member between the
-        // destination and the root.
-        let mut closest_private_id = node_id;
-        loop {
-            debug!("privacy - examining {}", self.nodestr(closest_private_id));
-            let vis = match self.tcx.map.find(closest_private_id) {
-                // If this item is a method, then we know for sure that it's an
-                // actual method and not a static method. The reason for this is
-                // that these cases are only hit in the ExprMethodCall
-                // expression, and ExprCall will have its path checked later
-                // (the path of the trait/impl) if it's a static method.
-                //
-                // With this information, then we can completely ignore all
-                // trait methods. The privacy violation would be if the trait
-                // couldn't get imported, not if the method couldn't be used
-                // (all trait methods are public).
-                //
-                // However, if this is an impl method, then we dictate this
-                // decision solely based on the privacy of the method
-                // invocation.
-                // FIXME(#10573) is this the right behavior? Why not consider
-                //               where the method was defined?
-                Some(ast_map::NodeImplItem(ii)) => {
-                    match ii.node {
-                        hir::ImplItemKind::Const(..) |
-                        hir::ImplItemKind::Method(..) => {
-                            let imp = self.tcx.map
-                                          .get_parent_did(closest_private_id);
-                            match self.tcx.impl_trait_ref(imp) {
-                                Some(..) => return Allowable,
-                                _ if ii.vis == hir::Public => {
-                                    return Allowable
-                                }
-                                _ => ii.vis
-                            }
-                        }
-                        hir::ImplItemKind::Type(_) => return Allowable,
-                    }
-                }
-                Some(ast_map::NodeTraitItem(_)) => {
-                    return Allowable;
-                }
-
-                // This is not a method call, extract the visibility as one
-                // would normally look at it
-                Some(ast_map::NodeItem(it)) => it.vis,
-                Some(ast_map::NodeForeignItem(_)) => {
-                    self.tcx.map.get_foreign_vis(closest_private_id)
-                }
-                Some(ast_map::NodeVariant(..)) => {
-                    hir::Public // need to move up a level (to the enum)
-                }
-                _ => hir::Public,
-            };
-            if vis != hir::Public { break }
-            // if we've reached the root, then everything was allowable and this
-            // access is public.
-            if closest_private_id == ast::CRATE_NODE_ID { return Allowable }
-            closest_private_id = *self.parents.get(&closest_private_id).unwrap();
-
-            // If we reached the top, then we were public all the way down and
-            // we can allow this access.
-            if closest_private_id == ast::DUMMY_NODE_ID { return Allowable }
-        }
-        debug!("privacy - closest priv {}", self.nodestr(closest_private_id));
-        if self.private_accessible(closest_private_id) {
-            Allowable
-        } else {
-            DisallowedBy(closest_private_id)
-        }
-    }
-
-    /// True if `id` is both local and private-accessible
-    fn local_private_accessible(&self, did: DefId) -> bool {
-        if let Some(node_id) = self.tcx.map.as_local_node_id(did) {
-            self.private_accessible(node_id)
-        } else {
-            false
-        }
-    }
-
-    /// For a local private node in the AST, this function will determine
-    /// whether the node is accessible by the current module that iteration is
-    /// inside.
-    fn private_accessible(&self, id: ast::NodeId) -> bool {
-        let parent = *self.parents.get(&id).unwrap();
-        debug!("privacy - accessible parent {}", self.nodestr(parent));
-
-        // After finding `did`'s closest private member, we roll ourselves back
-        // to see if this private member's parent is anywhere in our ancestry.
-        // By the privacy rules, we can access all of our ancestor's private
-        // members, so that's why we test the parent, and not the did itself.
-        let mut cur = self.curitem;
-        loop {
-            debug!("privacy - questioning {}, {}", self.nodestr(cur), cur);
-            match cur {
-                // If the relevant parent is in our history, then we're allowed
-                // to look inside any of our ancestor's immediate private items,
-                // so this access is valid.
-                x if x == parent => return true,
-
-                // If we've reached the root, then we couldn't access this item
-                // in the first place
-                ast::DUMMY_NODE_ID => return false,
-
-                // Keep going up
-                _ => {}
-            }
-
-            cur = *self.parents.get(&cur).unwrap();
-        }
-    }
-
-    fn report_error(&self, result: CheckResult) -> bool {
-        match result {
-            None => true,
-            Some((span, msg, note)) => {
-                let mut err = self.tcx.sess.struct_span_err(span, &msg[..]);
-                if let Some((span, msg)) = note {
-                    err.span_note(span, &msg[..]);
-                }
-                err.emit();
-                false
-            },
-        }
-    }
-
-    /// Guarantee that a particular definition is public. Returns a CheckResult
-    /// which contains any errors found. These can be reported using `report_error`.
-    /// If the result is `None`, no errors were found.
-    fn ensure_public(&self,
-                     span: Span,
-                     to_check: DefId,
-                     source_did: Option<DefId>,
-                     msg: &str)
-                     -> CheckResult {
-        use rustc_front::hir::Item_::ItemExternCrate;
-        debug!("ensure_public(span={:?}, to_check={:?}, source_did={:?}, msg={:?})",
-               span, to_check, source_did, msg);
-        let def_privacy = self.def_privacy(to_check);
-        debug!("ensure_public: def_privacy={:?}", def_privacy);
-        let id = match def_privacy {
-            ExternallyDenied => {
-                return Some((span, format!("{} is private", msg), None))
-            }
-            Allowable => return None,
-            DisallowedBy(id) => id,
-        };
-
-        // If we're disallowed by a particular id, then we attempt to
-        // give a nice error message to say why it was disallowed. It
-        // was either because the item itself is private or because
-        // its parent is private and its parent isn't in our
-        // ancestry. (Both the item being checked and its parent must
-        // be local.)
-        let def_id = source_did.unwrap_or(to_check);
-        let node_id = self.tcx.map.as_local_node_id(def_id);
-
-        // Warn when using a inaccessible extern crate.
-        if let Some(node_id) = self.tcx.map.as_local_node_id(to_check) {
-            match self.tcx.map.get(node_id) {
-                ast_map::Node::NodeItem(&hir::Item { node: ItemExternCrate(_), name, .. }) => {
-                    self.tcx.sess.add_lint(lint::builtin::INACCESSIBLE_EXTERN_CRATE,
-                                           node_id,
-                                           span,
-                                           format!("extern crate `{}` is private", name));
-                    return None;
-                }
-                _ => {}
-            }
-        }
-
-        let (err_span, err_msg) = if Some(id) == node_id {
-            return Some((span, format!("{} is private", msg), None));
-        } else {
-            (span, format!("{} is inaccessible", msg))
-        };
-        let item = match self.tcx.map.find(id) {
-            Some(ast_map::NodeItem(item)) => {
-                match item.node {
-                    // If an impl disallowed this item, then this is resolve's
-                    // way of saying that a struct/enum's static method was
-                    // invoked, and the struct/enum itself is private. Crawl
-                    // back up the chains to find the relevant struct/enum that
-                    // was private.
-                    hir::ItemImpl(_, _, _, _, ref ty, _) => {
-                        match ty.node {
-                            hir::TyPath(..) => {}
-                            _ => return Some((err_span, err_msg, None)),
-                        };
-                        let def = self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def();
-                        let did = def.def_id();
-                        let node_id = self.tcx.map.as_local_node_id(did).unwrap();
-                        match self.tcx.map.get(node_id) {
-                            ast_map::NodeItem(item) => item,
-                            _ => self.tcx.sess.span_bug(item.span,
-                                                        "path is not an item")
-                        }
-                    }
-                    _ => item
-                }
-            }
-            Some(..) | None => return Some((err_span, err_msg, None)),
-        };
-        let desc = match item.node {
-            hir::ItemMod(..) => "module",
-            hir::ItemTrait(..) => "trait",
-            hir::ItemStruct(..) => "struct",
-            hir::ItemEnum(..) => "enum",
-            _ => return Some((err_span, err_msg, None))
-        };
-        let msg = format!("{} `{}` is private", desc, item.name);
-        Some((err_span, err_msg, Some((span, msg))))
+    fn item_is_accessible(&self, did: DefId) -> bool {
+        match self.tcx.map.as_local_node_id(did) {
+            Some(node_id) =>
+                ty::Visibility::from_hir(&self.tcx.map.expect_item(node_id).vis, node_id, self.tcx),
+            None => self.tcx.sess.cstore.visibility(did),
+        }.is_accessible_from(self.curitem, &self.tcx.map)
     }
 
     // Checks that a field is in scope.
-    fn check_field(&mut self,
-                   span: Span,
-                   def: ty::AdtDef<'tcx>,
-                   v: ty::VariantDef<'tcx>,
-                   name: FieldName) {
-        let field = match name {
-            NamedField(f_name) => {
-                debug!("privacy - check named field {} in struct {:?}", f_name, def);
-                v.field_named(f_name)
-            }
-            UnnamedField(idx) => &v.fields[idx]
-        };
-        if field.vis == hir::Public || self.local_private_accessible(field.did) {
-            return;
-        }
-
-        let struct_desc = match def.adt_kind() {
-            ty::AdtKind::Struct =>
-                format!("struct `{}`", self.tcx.item_path_str(def.did)),
-            // struct variant fields have inherited visibility
-            ty::AdtKind::Enum => return
-        };
-        let msg = match name {
-            NamedField(name) => format!("field `{}` of {} is private",
-                                        name, struct_desc),
-            UnnamedField(idx) => format!("field #{} of {} is private",
-                                         idx, struct_desc),
-        };
-        span_err!(self.tcx.sess, span, E0451,
-                  "{}", &msg[..]);
-    }
-
-    // Given the ID of a method, checks to ensure it's in scope.
-    fn check_static_method(&mut self,
-                           span: Span,
-                           method_id: DefId,
-                           name: ast::Name) {
-        self.report_error(self.ensure_public(span,
-                                             method_id,
-                                             None,
-                                             &format!("method `{}`",
-                                                     name)));
-    }
-
-    // Checks that a path is in scope.
-    fn check_path(&mut self, span: Span, path_id: ast::NodeId, last: ast::Name) {
-        debug!("privacy - path {}", self.nodestr(path_id));
-        let path_res = *self.tcx.def_map.borrow().get(&path_id).unwrap();
-        let ck = |tyname: &str| {
-            let ck_public = |def: DefId| {
-                debug!("privacy - ck_public {:?}", def);
-                let origdid = path_res.def_id();
-                self.ensure_public(span,
-                                   def,
-                                   Some(origdid),
-                                   &format!("{} `{}`", tyname, last))
-            };
-
-            match path_res.last_private {
-                LastMod(AllPublic) => {},
-                LastMod(DependsOn(def)) => {
-                    self.report_error(ck_public(def));
-                },
-                LastImport { value_priv,
-                             value_used: check_value,
-                             type_priv,
-                             type_used: check_type } => {
-                    // This dance with found_error is because we don't want to
-                    // report a privacy error twice for the same directive.
-                    let found_error = match (type_priv, check_type) {
-                        (Some(DependsOn(def)), Used) => {
-                            !self.report_error(ck_public(def))
-                        },
-                        _ => false,
-                    };
-                    if !found_error {
-                        match (value_priv, check_value) {
-                            (Some(DependsOn(def)), Used) => {
-                                self.report_error(ck_public(def));
-                            },
-                            _ => {},
-                        }
-                    }
-                    // If an import is not used in either namespace, we still
-                    // want to check that it could be legal. Therefore we check
-                    // in both namespaces and only report an error if both would
-                    // be illegal. We only report one error, even if it is
-                    // illegal to import from both namespaces.
-                    match (value_priv, check_value, type_priv, check_type) {
-                        (Some(p), Unused, None, _) |
-                        (None, _, Some(p), Unused) => {
-                            let p = match p {
-                                AllPublic => None,
-                                DependsOn(def) => ck_public(def),
-                            };
-                            if p.is_some() {
-                                self.report_error(p);
-                            }
-                        },
-                        (Some(v), Unused, Some(t), Unused) => {
-                            let v = match v {
-                                AllPublic => None,
-                                DependsOn(def) => ck_public(def),
-                            };
-                            let t = match t {
-                                AllPublic => None,
-                                DependsOn(def) => ck_public(def),
-                            };
-                            if let (Some(_), Some(t)) = (v, t) {
-                                self.report_error(Some(t));
-                            }
-                        },
-                        _ => {},
-                    }
-                },
-            }
-        };
-        // FIXME(#12334) Imports can refer to definitions in both the type and
-        // value namespaces. The privacy information is aware of this, but the
-        // def map is not. Therefore the names we work out below will not always
-        // be accurate and we can get slightly wonky error messages (but type
-        // checking is always correct).
-        match path_res.full_def() {
-            Def::Fn(..) => ck("function"),
-            Def::Static(..) => ck("static"),
-            Def::Const(..) => ck("const"),
-            Def::AssociatedConst(..) => ck("associated const"),
-            Def::Variant(..) => ck("variant"),
-            Def::TyAlias(..) => ck("type"),
-            Def::Enum(..) => ck("enum"),
-            Def::Trait(..) => ck("trait"),
-            Def::Struct(..) => ck("struct"),
-            Def::Method(..) => ck("method"),
-            Def::Mod(..) => ck("module"),
-            _ => {}
+    fn check_field(&mut self, span: Span, def: ty::AdtDef<'tcx>, field: ty::FieldDef<'tcx>) {
+        if def.adt_kind() == ty::AdtKind::Struct &&
+           !field.vis.is_accessible_from(self.curitem, &self.tcx.map) {
+            span_err!(self.tcx.sess, span, E0451, "field `{}` of struct `{}` is private",
+                      field.name, self.tcx.item_path_str(def.did));
         }
     }
 
     // Checks that a method is in scope.
-    fn check_method(&mut self, span: Span, method_def_id: DefId,
-                    name: ast::Name) {
+    fn check_method(&mut self, span: Span, method_def_id: DefId) {
         match self.tcx.impl_or_trait_item(method_def_id).container() {
-            ty::ImplContainer(_) => {
-                self.check_static_method(span, method_def_id, name)
-            }
             // Trait methods are always all public. The only controlling factor
             // is whether the trait itself is accessible or not.
-            ty::TraitContainer(trait_def_id) => {
-                self.report_error(self.ensure_public(span, trait_def_id,
-                                                     None, "source trait"));
+            ty::TraitContainer(trait_def_id) if !self.item_is_accessible(trait_def_id) => {
+                let msg = format!("source trait `{}` is private",
+                                  self.tcx.item_path_str(trait_def_id));
+                self.tcx.sess.span_err(span, &msg);
             }
+            _ => {}
         }
     }
 }
@@ -993,27 +429,11 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> {
 
     fn visit_expr(&mut self, expr: &hir::Expr) {
         match expr.node {
-            hir::ExprField(ref base, name) => {
-                if let ty::TyStruct(def, _) = self.tcx.expr_ty_adjusted(&base).sty {
-                    self.check_field(expr.span,
-                                     def,
-                                     def.struct_variant(),
-                                     NamedField(name.node));
-                }
-            }
-            hir::ExprTupField(ref base, idx) => {
-                if let ty::TyStruct(def, _) = self.tcx.expr_ty_adjusted(&base).sty {
-                    self.check_field(expr.span,
-                                     def,
-                                     def.struct_variant(),
-                                     UnnamedField(idx.node));
-                }
-            }
-            hir::ExprMethodCall(name, _, _) => {
+            hir::ExprMethodCall(..) => {
                 let method_call = ty::MethodCall::expr(expr.id);
                 let method = self.tcx.tables.borrow().method_map[&method_call];
                 debug!("(privacy checking) checking impl method");
-                self.check_method(expr.span, method.def_id, name.node);
+                self.check_method(expr.span, method.def_id);
             }
             hir::ExprStruct(..) => {
                 let adt = self.tcx.expr_ty(expr).ty_adt_def().unwrap();
@@ -1022,7 +442,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> {
                 // Rather than computing the set of unmentioned fields
                 // (i.e. `all_fields - fields`), just check them all.
                 for field in &variant.fields {
-                    self.check_field(expr.span, adt, variant, NamedField(field.name));
+                    self.check_field(expr.span, adt, field);
                 }
             }
             hir::ExprPath(..) => {
@@ -1030,13 +450,13 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> {
                 if let Def::Struct(..) = self.tcx.resolve_expr(expr) {
                     let expr_ty = self.tcx.expr_ty(expr);
                     let def = match expr_ty.sty {
-                        ty::TyBareFn(_, &ty::BareFnTy { sig: ty::Binder(ty::FnSig {
+                        ty::TyFnDef(_, _, &ty::BareFnTy { sig: ty::Binder(ty::FnSig {
                             output: ty::FnConverging(ty), ..
                         }), ..}) => ty,
                         _ => expr_ty
                     }.ty_adt_def().unwrap();
                     let any_priv = def.struct_variant().fields.iter().any(|f| {
-                        f.vis != hir::Public && !self.local_private_accessible(f.did)
+                        !f.vis.is_accessible_from(self.curitem, &self.tcx.map)
                     });
                     if any_priv {
                         span_err!(self.tcx.sess, expr.span, E0450,
@@ -1064,8 +484,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> {
                 let def = self.tcx.def_map.borrow().get(&pattern.id).unwrap().full_def();
                 let variant = adt.variant_of_def(def);
                 for field in fields {
-                    self.check_field(pattern.span, adt, variant,
-                                     NamedField(field.node.name));
+                    self.check_field(pattern.span, adt, variant.field_named(field.node.name));
                 }
             }
 
@@ -1078,10 +497,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> {
                             if let PatKind::Wild = field.node {
                                 continue
                             }
-                            self.check_field(field.span,
-                                             def,
-                                             def.struct_variant(),
-                                             UnnamedField(i));
+                            self.check_field(field.span, def, &def.struct_variant().fields[i]);
                         }
                     }
                     ty::TyEnum(..) => {
@@ -1102,25 +518,6 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> {
         intravisit::walk_foreign_item(self, fi);
         self.in_foreign = false;
     }
-
-    fn visit_path(&mut self, path: &hir::Path, id: ast::NodeId) {
-        if !path.segments.is_empty() {
-            self.check_path(path.span, id, path.segments.last().unwrap().identifier.name);
-            intravisit::walk_path(self, path);
-        }
-    }
-
-    fn visit_path_list_item(&mut self, prefix: &hir::Path, item: &hir::PathListItem) {
-        let name = if let hir::PathListIdent { name, .. } = item.node {
-            name
-        } else if !prefix.segments.is_empty() {
-            prefix.segments.last().unwrap().identifier.name
-        } else {
-            self.tcx.sess.bug("`self` import in an import list with empty prefix");
-        };
-        self.check_path(item.span, item.node.id(), name);
-        intravisit::walk_path_list_item(self, prefix, item);
-    }
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -1128,47 +525,23 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> {
 ////////////////////////////////////////////////////////////////////////////////
 
 struct SanePrivacyVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    in_block: bool,
+    tcx: &'a TyCtxt<'tcx>,
 }
 
 impl<'a, 'tcx, 'v> Visitor<'v> for SanePrivacyVisitor<'a, 'tcx> {
-    /// We want to visit items in the context of their containing
-    /// module and so forth, so supply a crate for doing a deep walk.
-    fn visit_nested_item(&mut self, item: hir::ItemId) {
-        self.visit_item(self.tcx.map.expect_item(item.id))
-    }
-
     fn visit_item(&mut self, item: &hir::Item) {
         self.check_sane_privacy(item);
-        if self.in_block {
-            self.check_all_inherited(item);
-        }
-
-        let orig_in_block = self.in_block;
-
-        // Modules turn privacy back on, otherwise we inherit
-        self.in_block = if let hir::ItemMod(..) = item.node { false } else { orig_in_block };
-
         intravisit::walk_item(self, item);
-        self.in_block = orig_in_block;
-    }
-
-    fn visit_block(&mut self, b: &'v hir::Block) {
-        let orig_in_block = replace(&mut self.in_block, true);
-        intravisit::walk_block(self, b);
-        self.in_block = orig_in_block;
     }
 }
 
 impl<'a, 'tcx> SanePrivacyVisitor<'a, 'tcx> {
-    /// Validates all of the visibility qualifiers placed on the item given. This
-    /// ensures that there are no extraneous qualifiers that don't actually do
-    /// anything. In theory these qualifiers wouldn't parse, but that may happen
-    /// later on down the road...
+    /// Validate that items that shouldn't have visibility qualifiers don't have them.
+    /// Such qualifiers can be set by syntax extensions even if the parser doesn't allow them,
+    /// so we check things like variant fields too.
     fn check_sane_privacy(&self, item: &hir::Item) {
-        let check_inherited = |sp, vis, note: &str| {
-            if vis != hir::Inherited {
+        let check_inherited = |sp, vis: &hir::Visibility, note: &str| {
+            if *vis != hir::Inherited {
                 let mut err = struct_span_err!(self.tcx.sess, sp, E0449,
                                                "unnecessary visibility qualifier");
                 if !note.is_empty() {
@@ -1179,62 +552,35 @@ impl<'a, 'tcx> SanePrivacyVisitor<'a, 'tcx> {
         };
 
         match item.node {
-            // implementations of traits don't need visibility qualifiers because
-            // that's controlled by having the trait in scope.
             hir::ItemImpl(_, _, _, Some(..), _, ref impl_items) => {
-                check_inherited(item.span, item.vis,
+                check_inherited(item.span, &item.vis,
                                 "visibility qualifiers have no effect on trait impls");
                 for impl_item in impl_items {
-                    check_inherited(impl_item.span, impl_item.vis, "");
+                    check_inherited(impl_item.span, &impl_item.vis,
+                                    "visibility qualifiers have no effect on trait impl items");
                 }
             }
             hir::ItemImpl(_, _, _, None, _, _) => {
-                check_inherited(item.span, item.vis,
+                check_inherited(item.span, &item.vis,
                                 "place qualifiers on individual methods instead");
             }
             hir::ItemDefaultImpl(..) => {
-                check_inherited(item.span, item.vis,
+                check_inherited(item.span, &item.vis,
                                 "visibility qualifiers have no effect on trait impls");
             }
             hir::ItemForeignMod(..) => {
-                check_inherited(item.span, item.vis,
+                check_inherited(item.span, &item.vis,
                                 "place qualifiers on individual functions instead");
             }
-            hir::ItemStruct(..) | hir::ItemEnum(..) | hir::ItemTrait(..) |
-            hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) |
-            hir::ItemMod(..) | hir::ItemExternCrate(..) |
-            hir::ItemUse(..) | hir::ItemTy(..) => {}
-        }
-    }
-
-    /// When inside of something like a function or a method, visibility has no
-    /// control over anything so this forbids any mention of any visibility
-    fn check_all_inherited(&self, item: &hir::Item) {
-        let check_inherited = |sp, vis| {
-            if vis != hir::Inherited {
-                span_err!(self.tcx.sess, sp, E0447,
-                          "visibility has no effect inside functions or block expressions");
-            }
-        };
-
-        check_inherited(item.span, item.vis);
-        match item.node {
-            hir::ItemImpl(_, _, _, _, _, ref impl_items) => {
-                for impl_item in impl_items {
-                    check_inherited(impl_item.span, impl_item.vis);
-                }
-            }
-            hir::ItemForeignMod(ref fm) => {
-                for fi in &fm.items {
-                    check_inherited(fi.span, fi.vis);
-                }
-            }
-            hir::ItemStruct(ref vdata, _) => {
-                for f in vdata.fields() {
-                    check_inherited(f.span, f.node.kind.visibility());
+            hir::ItemEnum(ref def, _) => {
+                for variant in &def.variants {
+                    for field in variant.node.data.fields() {
+                        check_inherited(field.span, &field.vis,
+                                        "visibility qualifiers have no effect on variant fields");
+                    }
                 }
             }
-            hir::ItemDefaultImpl(..) | hir::ItemEnum(..) | hir::ItemTrait(..) |
+            hir::ItemStruct(..) | hir::ItemTrait(..) |
             hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) |
             hir::ItemMod(..) | hir::ItemExternCrate(..) |
             hir::ItemUse(..) | hir::ItemTy(..) => {}
@@ -1250,7 +596,7 @@ impl<'a, 'tcx> SanePrivacyVisitor<'a, 'tcx> {
 ///////////////////////////////////////////////////////////////////////////////
 
 struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     access_levels: &'a AccessLevels,
     in_variant: bool,
     // set of errors produced by this obsolete visitor
@@ -1305,8 +651,8 @@ impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
         }
     }
 
-    fn item_is_public(&self, id: &ast::NodeId, vis: hir::Visibility) -> bool {
-        self.access_levels.is_reachable(*id) || vis == hir::Public
+    fn item_is_public(&self, id: &ast::NodeId, vis: &hir::Visibility) -> bool {
+        self.access_levels.is_reachable(*id) || *vis == hir::Public
     }
 }
 
@@ -1435,7 +781,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx>
                                 match impl_item.node {
                                     hir::ImplItemKind::Const(..) |
                                     hir::ImplItemKind::Method(..)
-                                        if self.item_is_public(&impl_item.id, impl_item.vis) =>
+                                        if self.item_is_public(&impl_item.id, &impl_item.vis) =>
                                     {
                                         intravisit::walk_impl_item(self, impl_item)
                                     }
@@ -1477,14 +823,14 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx>
                     for impl_item in impl_items {
                         match impl_item.node {
                             hir::ImplItemKind::Const(..) => {
-                                if self.item_is_public(&impl_item.id, impl_item.vis) {
+                                if self.item_is_public(&impl_item.id, &impl_item.vis) {
                                     found_pub_static = true;
                                     intravisit::walk_impl_item(self, impl_item);
                                 }
                             }
                             hir::ImplItemKind::Method(ref sig, _) => {
                                 if sig.explicit_self.node == hir::SelfStatic &&
-                                      self.item_is_public(&impl_item.id, impl_item.vis) {
+                                      self.item_is_public(&impl_item.id, &impl_item.vis) {
                                     found_pub_static = true;
                                     intravisit::walk_impl_item(self, impl_item);
                                 }
@@ -1504,7 +850,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx>
             hir::ItemTy(..) => return,
 
             // not at all public, so we don't care
-            _ if !self.item_is_public(&item.id, item.vis) => {
+            _ if !self.item_is_public(&item.id, &item.vis) => {
                 return;
             }
 
@@ -1565,10 +911,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx>
     }
 
     fn visit_struct_field(&mut self, s: &hir::StructField) {
-        let vis = match s.node.kind {
-            hir::NamedField(_, vis) | hir::UnnamedField(vis) => vis
-        };
-        if vis == hir::Public || self.in_variant {
+        if s.vis == hir::Public || self.in_variant {
             intravisit::walk_struct_field(self, s);
         }
     }
@@ -1592,28 +935,42 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx>
 ///////////////////////////////////////////////////////////////////////////////
 
 struct SearchInterfaceForPrivateItemsVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    // Do not report an error when a private type is found
-    is_quiet: bool,
-    // Is private component found?
-    is_public: bool,
+    tcx: &'a TyCtxt<'tcx>,
+    /// The visitor checks that each component type is at least this visible
+    required_visibility: ty::Visibility,
+    /// The visibility of the least visible component that has been visited
+    min_visibility: ty::Visibility,
     old_error_set: &'a NodeSet,
 }
 
 impl<'a, 'tcx: 'a> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> {
-    // Check if the type alias contain private types when substituted
-    fn is_public_type_alias(&self, item: &hir::Item, path: &hir::Path) -> bool {
+    fn new(tcx: &'a TyCtxt<'tcx>, old_error_set: &'a NodeSet) -> Self {
+        SearchInterfaceForPrivateItemsVisitor {
+            tcx: tcx,
+            min_visibility: ty::Visibility::Public,
+            required_visibility: ty::Visibility::PrivateExternal,
+            old_error_set: old_error_set,
+        }
+    }
+}
+
+impl<'a, 'tcx: 'a> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> {
+    // Return the visibility of the type alias's least visible component type when substituted
+    fn substituted_alias_visibility(&self, item: &hir::Item, path: &hir::Path)
+                                    -> Option<ty::Visibility> {
         // We substitute type aliases only when determining impl publicity
         // FIXME: This will probably change and all type aliases will be substituted,
         // requires an amendment to RFC 136.
-        if !self.is_quiet {
-            return false
+        if self.required_visibility != ty::Visibility::PrivateExternal {
+            return None;
         }
         // Type alias is considered public if the aliased type is
         // public, even if the type alias itself is private. So, something
         // like `type A = u8; pub fn f() -> A {...}` doesn't cause an error.
         if let hir::ItemTy(ref ty, ref generics) = item.node {
-            let mut check = SearchInterfaceForPrivateItemsVisitor { is_public: true, ..*self };
+            let mut check = SearchInterfaceForPrivateItemsVisitor {
+                min_visibility: ty::Visibility::Public, ..*self
+            };
             check.visit_ty(ty);
             // If a private type alias with default type parameters is used in public
             // interface we must ensure, that the defaults are public if they are actually used.
@@ -1627,26 +984,23 @@ impl<'a, 'tcx: 'a> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> {
                     check.visit_ty(default_ty);
                 }
             }
-            check.is_public
+            Some(check.min_visibility)
         } else {
-            false
+            None
         }
     }
 }
 
 impl<'a, 'tcx: 'a, 'v> Visitor<'v> for SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> {
     fn visit_ty(&mut self, ty: &hir::Ty) {
-        if self.is_quiet && !self.is_public {
-            // We are in quiet mode and a private type is already found, no need to proceed
-            return
-        }
         if let hir::TyPath(_, ref path) = ty.node {
             let def = self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def();
             match def {
                 Def::PrimTy(..) | Def::SelfTy(..) | Def::TyParam(..) => {
                     // Public
                 }
-                Def::AssociatedTy(..) if self.is_quiet => {
+                Def::AssociatedTy(..)
+                    if self.required_visibility == ty::Visibility::PrivateExternal => {
                     // Conservatively approximate the whole type alias as public without
                     // recursing into its components when determining impl publicity.
                     // For example, `impl <Type as Trait>::Alias {...}` may be a public impl
@@ -1660,21 +1014,24 @@ impl<'a, 'tcx: 'a, 'v> Visitor<'v> for SearchInterfaceForPrivateItemsVisitor<'a,
                     // Non-local means public (private items can't leave their crate, modulo bugs)
                     if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) {
                         let item = self.tcx.map.expect_item(node_id);
-                        if item.vis != hir::Public && !self.is_public_type_alias(item, path) {
-                            if !self.is_quiet {
-                                if self.old_error_set.contains(&ty.id) {
-                                    span_err!(self.tcx.sess, ty.span, E0446,
-                                              "private type in public interface");
-                                } else {
-                                    self.tcx.sess.add_lint (
-                                        lint::builtin::PRIVATE_IN_PUBLIC,
-                                        node_id,
-                                        ty.span,
-                                        format!("private type in public interface"),
-                                    );
-                                }
+                        let vis = match self.substituted_alias_visibility(item, path) {
+                            Some(vis) => vis,
+                            None => ty::Visibility::from_hir(&item.vis, node_id, &self.tcx),
+                        };
+
+                        if !vis.is_at_least(self.min_visibility, &self.tcx.map) {
+                            self.min_visibility = vis;
+                        }
+                        if !vis.is_at_least(self.required_visibility, &self.tcx.map) {
+                            if self.old_error_set.contains(&ty.id) {
+                                span_err!(self.tcx.sess, ty.span, E0446,
+                                          "private type in public interface");
+                            } else {
+                                self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC,
+                                                       node_id,
+                                                       ty.span,
+                                                       format!("private type in public interface"));
                             }
-                            self.is_public = false;
                         }
                     }
                 }
@@ -1686,28 +1043,26 @@ impl<'a, 'tcx: 'a, 'v> Visitor<'v> for SearchInterfaceForPrivateItemsVisitor<'a,
     }
 
     fn visit_trait_ref(&mut self, trait_ref: &hir::TraitRef) {
-        if self.is_quiet && !self.is_public {
-            // We are in quiet mode and a private type is already found, no need to proceed
-            return
-        }
         // Non-local means public (private items can't leave their crate, modulo bugs)
         let def_id = self.tcx.trait_ref_to_def_id(trait_ref);
         if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) {
             let item = self.tcx.map.expect_item(node_id);
-            if item.vis != hir::Public {
-                if !self.is_quiet {
-                    if self.old_error_set.contains(&trait_ref.ref_id) {
-                        span_err!(self.tcx.sess, trait_ref.path.span, E0445,
-                                  "private trait in public interface");
-                    } else {
-                        self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC,
-                                               node_id,
-                                               trait_ref.path.span,
-                                               "private trait in public interface (error E0445)"
-                                                    .to_string());
-                    }
+            let vis = ty::Visibility::from_hir(&item.vis, node_id, &self.tcx);
+
+            if !vis.is_at_least(self.min_visibility, &self.tcx.map) {
+                self.min_visibility = vis;
+            }
+            if !vis.is_at_least(self.required_visibility, &self.tcx.map) {
+                if self.old_error_set.contains(&trait_ref.ref_id) {
+                    span_err!(self.tcx.sess, trait_ref.path.span, E0445,
+                              "private trait in public interface");
+                } else {
+                    self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC,
+                                           node_id,
+                                           trait_ref.path.span,
+                                           "private trait in public interface (error E0445)"
+                                                .to_string());
                 }
-                self.is_public = false;
             }
         }
 
@@ -1723,35 +1078,35 @@ impl<'a, 'tcx: 'a, 'v> Visitor<'v> for SearchInterfaceForPrivateItemsVisitor<'a,
 }
 
 struct PrivateItemsInPublicInterfacesVisitor<'a, 'tcx: 'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     old_error_set: &'a NodeSet,
 }
 
 impl<'a, 'tcx> PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
     // A type is considered public if it doesn't contain any private components
-    fn is_public_ty(&self, ty: &hir::Ty) -> bool {
-        let mut check = SearchInterfaceForPrivateItemsVisitor {
-            tcx: self.tcx, is_quiet: true, is_public: true, old_error_set: self.old_error_set
-        };
+    fn ty_visibility(&self, ty: &hir::Ty) -> ty::Visibility {
+        let mut check = SearchInterfaceForPrivateItemsVisitor::new(self.tcx, self.old_error_set);
         check.visit_ty(ty);
-        check.is_public
+        check.min_visibility
     }
 
     // A trait reference is considered public if it doesn't contain any private components
-    fn is_public_trait_ref(&self, trait_ref: &hir::TraitRef) -> bool {
-        let mut check = SearchInterfaceForPrivateItemsVisitor {
-            tcx: self.tcx, is_quiet: true, is_public: true, old_error_set: self.old_error_set
-        };
+    fn trait_ref_visibility(&self, trait_ref: &hir::TraitRef) -> ty::Visibility {
+        let mut check = SearchInterfaceForPrivateItemsVisitor::new(self.tcx, self.old_error_set);
         check.visit_trait_ref(trait_ref);
-        check.is_public
+        check.min_visibility
     }
 }
 
 impl<'a, 'tcx, 'v> Visitor<'v> for PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
     fn visit_item(&mut self, item: &hir::Item) {
-        let mut check = SearchInterfaceForPrivateItemsVisitor {
-            tcx: self.tcx, is_quiet: false, is_public: true, old_error_set: self.old_error_set
+        let min = |vis1: ty::Visibility, vis2| {
+            if vis1.is_at_least(vis2, &self.tcx.map) { vis2 } else { vis1 }
         };
+
+        let mut check = SearchInterfaceForPrivateItemsVisitor::new(self.tcx, self.old_error_set);
+        let item_visibility = ty::Visibility::from_hir(&item.vis, item.id, &self.tcx);
+
         match item.node {
             // Crates are always public
             hir::ItemExternCrate(..) => {}
@@ -1762,27 +1117,26 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivateItemsInPublicInterfacesVisitor<'a, 'tc
             // Subitems of these items have inherited publicity
             hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) |
             hir::ItemEnum(..) | hir::ItemTrait(..) | hir::ItemTy(..) => {
-                if item.vis == hir::Public {
-                    check.visit_item(item);
-                }
+                check.required_visibility = item_visibility;
+                check.visit_item(item);
             }
             // Subitems of foreign modules have their own publicity
             hir::ItemForeignMod(ref foreign_mod) => {
                 for foreign_item in &foreign_mod.items {
-                    if foreign_item.vis == hir::Public {
-                        check.visit_foreign_item(foreign_item);
-                    }
+                    check.required_visibility =
+                        ty::Visibility::from_hir(&foreign_item.vis, item.id, &self.tcx);
+                    check.visit_foreign_item(foreign_item);
                 }
             }
             // Subitems of structs have their own publicity
             hir::ItemStruct(ref struct_def, ref generics) => {
-                if item.vis == hir::Public {
-                    check.visit_generics(generics);
-                    for field in struct_def.fields() {
-                        if field.node.kind.visibility() == hir::Public {
-                            check.visit_struct_field(field);
-                        }
-                    }
+                check.required_visibility = item_visibility;
+                check.visit_generics(generics);
+
+                for field in struct_def.fields() {
+                    let field_visibility = ty::Visibility::from_hir(&field.vis, item.id, &self.tcx);
+                    check.required_visibility = min(item_visibility, field_visibility);
+                    check.visit_struct_field(field);
                 }
             }
             // The interface is empty
@@ -1790,60 +1144,45 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivateItemsInPublicInterfacesVisitor<'a, 'tc
             // An inherent impl is public when its type is public
             // Subitems of inherent impls have their own publicity
             hir::ItemImpl(_, _, ref generics, None, ref ty, ref impl_items) => {
-                if self.is_public_ty(ty) {
-                    check.visit_generics(generics);
-                    for impl_item in impl_items {
-                        if impl_item.vis == hir::Public {
-                            check.visit_impl_item(impl_item);
-                        }
-                    }
+                let ty_vis = self.ty_visibility(ty);
+                check.required_visibility = ty_vis;
+                check.visit_generics(generics);
+
+                for impl_item in impl_items {
+                    let impl_item_vis =
+                        ty::Visibility::from_hir(&impl_item.vis, item.id, &self.tcx);
+                    check.required_visibility = min(impl_item_vis, ty_vis);
+                    check.visit_impl_item(impl_item);
                 }
             }
             // A trait impl is public when both its type and its trait are public
             // Subitems of trait impls have inherited publicity
             hir::ItemImpl(_, _, ref generics, Some(ref trait_ref), ref ty, ref impl_items) => {
-                if self.is_public_ty(ty) && self.is_public_trait_ref(trait_ref) {
-                    check.visit_generics(generics);
-                    for impl_item in impl_items {
-                        check.visit_impl_item(impl_item);
-                    }
+                let vis = min(self.ty_visibility(ty), self.trait_ref_visibility(trait_ref));
+                check.required_visibility = vis;
+                check.visit_generics(generics);
+                for impl_item in impl_items {
+                    check.visit_impl_item(impl_item);
                 }
             }
         }
     }
 }
 
-pub fn check_crate(tcx: &ty::ctxt,
-                   export_map: &def::ExportMap,
-                   external_exports: ExternalExports)
-                   -> AccessLevels {
+pub fn check_crate(tcx: &TyCtxt, export_map: &def::ExportMap) -> AccessLevels {
     let _task = tcx.dep_graph.in_task(DepNode::Privacy);
 
     let krate = tcx.map.krate();
 
-    // Sanity check to make sure that all privacy usage and controls are
-    // reasonable.
-    let mut visitor = SanePrivacyVisitor {
-        tcx: tcx,
-        in_block: false,
-    };
-    intravisit::walk_crate(&mut visitor, krate);
-
-    // Figure out who everyone's parent is
-    let mut visitor = ParentVisitor {
-        tcx: tcx,
-        parents: NodeMap(),
-        curparent: ast::DUMMY_NODE_ID,
-    };
-    intravisit::walk_crate(&mut visitor, krate);
+    // Sanity check to make sure that all privacy usage is reasonable.
+    let mut visitor = SanePrivacyVisitor { tcx: tcx };
+    krate.visit_all_items(&mut visitor);
 
     // Use the parent map to check the privacy of everything
     let mut visitor = PrivacyVisitor {
         curitem: ast::DUMMY_NODE_ID,
         in_foreign: false,
         tcx: tcx,
-        parents: visitor.parents,
-        external_exports: external_exports,
     };
     intravisit::walk_crate(&mut visitor, krate);
 
index 82e952d2beef8a7fa5681541ed5c7cc4600c8333..cf477c2a956aab6d661cb1a41fb0c4a2fd6463db 100644 (file)
@@ -13,5 +13,4 @@ log = { path = "../liblog" }
 syntax = { path = "../libsyntax" }
 rustc = { path = "../librustc" }
 rustc_bitflags = { path = "../librustc_bitflags" }
-rustc_front = { path = "../librustc_front" }
 arena = { path = "../libarena" }
index 385fae46cbae8672a5dc3101736df813a50f88b6..003450cd6fd7984b81d3e1add6f333175828b371 100644 (file)
 //! any imports resolved.
 
 use DefModifiers;
-use resolve_imports::ImportDirective;
-use resolve_imports::ImportDirectiveSubclass::{self, SingleImport, GlobImport};
+use resolve_imports::ImportDirectiveSubclass::{self, GlobImport};
 use Module;
 use Namespace::{self, TypeNS, ValueNS};
 use {NameBinding, NameBindingKind};
-use module_to_string;
 use ParentLink::{ModuleParentLink, BlockParentLink};
 use Resolver;
-use resolve_imports::Shadowable;
 use {resolve_error, resolve_struct_error, ResolutionError};
 
-use rustc::middle::cstore::{CrateStore, ChildItem, DlDef, DlField, DlImpl};
-use rustc::middle::def::*;
-use rustc::middle::def_id::{CRATE_DEF_INDEX, DefId};
-use rustc::middle::ty::VariantKind;
+use rustc::middle::cstore::{CrateStore, ChildItem, DlDef};
+use rustc::lint;
+use rustc::hir::def::*;
+use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
+use rustc::ty::{self, VariantKind};
 
-use syntax::ast::{Name, NodeId};
+use syntax::ast::Name;
 use syntax::attr::AttrMetaMethods;
-use syntax::parse::token::special_idents;
+use syntax::parse::token::{special_idents, SELF_KEYWORD_NAME, SUPER_KEYWORD_NAME};
 use syntax::codemap::{Span, DUMMY_SP};
 
-use rustc_front::hir;
-use rustc_front::hir::{Block, DeclItem};
-use rustc_front::hir::{ForeignItem, ForeignItemFn, ForeignItemStatic};
-use rustc_front::hir::{Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn};
-use rustc_front::hir::{ItemForeignMod, ItemImpl, ItemMod, ItemStatic, ItemDefaultImpl};
-use rustc_front::hir::{ItemStruct, ItemTrait, ItemTy, ItemUse};
-use rustc_front::hir::{NamedField, PathListIdent, PathListMod};
-use rustc_front::hir::StmtDecl;
-use rustc_front::hir::UnnamedField;
-use rustc_front::hir::{Variant, ViewPathGlob, ViewPathList, ViewPathSimple};
-use rustc_front::hir::Visibility;
-use rustc_front::intravisit::{self, Visitor};
-
-use std::mem::replace;
-use std::ops::{Deref, DerefMut};
-
-struct GraphBuilder<'a, 'b: 'a, 'tcx: 'b> {
-    resolver: &'a mut Resolver<'b, 'tcx>,
-}
-
-impl<'a, 'b:'a, 'tcx:'b> Deref for GraphBuilder<'a, 'b, 'tcx> {
-    type Target = Resolver<'b, 'tcx>;
-
-    fn deref(&self) -> &Resolver<'b, 'tcx> {
-        &*self.resolver
-    }
-}
-
-impl<'a, 'b:'a, 'tcx:'b> DerefMut for GraphBuilder<'a, 'b, 'tcx> {
-    fn deref_mut(&mut self) -> &mut Resolver<'b, 'tcx> {
-        &mut *self.resolver
-    }
-}
+use rustc::hir;
+use rustc::hir::{Block, DeclItem};
+use rustc::hir::{ForeignItem, ForeignItemFn, ForeignItemStatic};
+use rustc::hir::{Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn};
+use rustc::hir::{ItemForeignMod, ItemImpl, ItemMod, ItemStatic, ItemDefaultImpl};
+use rustc::hir::{ItemStruct, ItemTrait, ItemTy, ItemUse};
+use rustc::hir::{PathListIdent, PathListMod, StmtDecl};
+use rustc::hir::{Variant, ViewPathGlob, ViewPathList, ViewPathSimple};
+use rustc::hir::intravisit::{self, Visitor};
 
 trait ToNameBinding<'a> {
     fn to_name_binding(self) -> NameBinding<'a>;
@@ -86,12 +60,12 @@ impl<'a> ToNameBinding<'a> for (Def, Span, DefModifiers) {
     }
 }
 
-impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
+impl<'b, 'tcx:'b> Resolver<'b, 'tcx> {
     /// Constructs the reduced graph for the entire crate.
-    fn build_reduced_graph(self, krate: &hir::Crate) {
+    pub fn build_reduced_graph(&mut self, krate: &hir::Crate) {
         let mut visitor = BuildReducedGraphVisitor {
             parent: self.graph_root,
-            builder: self,
+            resolver: self,
         };
         intravisit::walk_crate(&mut visitor, krate);
     }
@@ -100,43 +74,15 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
     fn try_define<T>(&self, parent: Module<'b>, name: Name, ns: Namespace, def: T)
         where T: ToNameBinding<'b>
     {
-        let _ = parent.try_define_child(name, ns, self.new_name_binding(def.to_name_binding()));
+        let _ = parent.try_define_child(name, ns, def.to_name_binding());
     }
 
     /// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined;
     /// otherwise, reports an error.
     fn define<T: ToNameBinding<'b>>(&self, parent: Module<'b>, name: Name, ns: Namespace, def: T) {
-        let binding = self.new_name_binding(def.to_name_binding());
-        let old_binding = match parent.try_define_child(name, ns, binding) {
-            Ok(()) => return,
-            Err(old_binding) => old_binding,
-        };
-
-        let span = binding.span.unwrap_or(DUMMY_SP);
-        if !old_binding.is_extern_crate() && !binding.is_extern_crate() {
-            // Record an error here by looking up the namespace that had the duplicate
-            let ns_str = match ns { TypeNS => "type or module", ValueNS => "value" };
-            let resolution_error = ResolutionError::DuplicateDefinition(ns_str, name);
-            let mut err = resolve_struct_error(self, span, resolution_error);
-
-            if let Some(sp) = old_binding.span {
-                let note = format!("first definition of {} `{}` here", ns_str, name);
-                err.span_note(sp, &note);
-            }
-            err.emit();
-        } else if old_binding.is_extern_crate() && binding.is_extern_crate() {
-            span_err!(self.session,
-                      span,
-                      E0259,
-                      "an external crate named `{}` has already been imported into this module",
-                      name);
-        } else {
-            span_err!(self.session,
-                      span,
-                      E0260,
-                      "the name `{}` conflicts with an external crate \
-                      that has been imported into this module",
-                      name);
+        let binding = def.to_name_binding();
+        if let Err(old_binding) = parent.try_define_child(name, ns, binding.clone()) {
+            self.report_conflict(parent, name, ns, old_binding, &binding);
         }
     }
 
@@ -155,7 +101,8 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
     }
 
     /// Constructs the reduced graph for one item.
-    fn build_reduced_graph_for_item(&mut self, item: &Item, parent: Module<'b>) -> Module<'b> {
+    fn build_reduced_graph_for_item(&mut self, item: &Item, parent_ref: &mut Module<'b>) {
+        let parent = *parent_ref;
         let name = item.name;
         let sp = item.span;
         let is_public = item.vis == hir::Public;
@@ -170,8 +117,10 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
                 // Extract and intern the module part of the path. For
                 // globs and lists, the path is found directly in the AST;
                 // for simple paths we have to munge the path a little.
-                let module_path = match view_path.node {
+                let is_global;
+                let module_path: Vec<Name> = match view_path.node {
                     ViewPathSimple(_, ref full_path) => {
+                        is_global = full_path.global;
                         full_path.segments
                                  .split_last()
                                  .unwrap()
@@ -183,6 +132,7 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
 
                     ViewPathGlob(ref module_ident_path) |
                     ViewPathList(ref module_ident_path, _) => {
+                        is_global = module_ident_path.global;
                         module_ident_path.segments
                                          .iter()
                                          .map(|seg| seg.identifier.name)
@@ -190,15 +140,22 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
                     }
                 };
 
+                // Checking for special identifiers in path
+                // prevent `self` or `super` at beginning of global path
+                if is_global && (module_path.first() == Some(&SELF_KEYWORD_NAME) ||
+                                 module_path.first() == Some(&SUPER_KEYWORD_NAME)) {
+                    self.session.add_lint(
+                        lint::builtin::SUPER_OR_SELF_IN_GLOBAL_PATH,
+                        item.id,
+                        item.span,
+                        format!("expected identifier, found keyword `{}`",
+                                module_path.first().unwrap().as_str()));
+                }
+
                 // Build up the import directives.
-                let shadowable = item.attrs.iter().any(|attr| {
+                let is_prelude = item.attrs.iter().any(|attr| {
                     attr.name() == special_idents::prelude_import.name.as_str()
                 });
-                let shadowable = if shadowable {
-                    Shadowable::Always
-                } else {
-                    Shadowable::Never
-                };
 
                 match view_path.node {
                     ViewPathSimple(binding, ref full_path) => {
@@ -209,14 +166,14 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
                                           ResolutionError::SelfImportsOnlyAllowedWithin);
                         }
 
-                        let subclass = SingleImport(binding, source_name);
-                        self.build_import_directive(parent,
-                                                    module_path,
+                        let subclass = ImportDirectiveSubclass::single(binding, source_name);
+                        self.unresolved_imports += 1;
+                        parent.add_import_directive(module_path,
                                                     subclass,
                                                     view_path.span,
                                                     item.id,
                                                     is_public,
-                                                    shadowable);
+                                                    is_prelude);
                     }
                     ViewPathList(_, ref source_items) => {
                         // Make sure there's at most one `mod` import in the list.
@@ -260,26 +217,26 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
                                     (module_path.to_vec(), name, rename)
                                 }
                             };
-                            self.build_import_directive(parent,
-                                                        module_path,
-                                                        SingleImport(rename, name),
+                            let subclass = ImportDirectiveSubclass::single(rename, name);
+                            self.unresolved_imports += 1;
+                            parent.add_import_directive(module_path,
+                                                        subclass,
                                                         source_item.span,
                                                         source_item.node.id(),
                                                         is_public,
-                                                        shadowable);
+                                                        is_prelude);
                         }
                     }
                     ViewPathGlob(_) => {
-                        self.build_import_directive(parent,
-                                                    module_path,
+                        self.unresolved_imports += 1;
+                        parent.add_import_directive(module_path,
                                                     GlobImport,
                                                     view_path.span,
                                                     item.id,
                                                     is_public,
-                                                    shadowable);
+                                                    is_prelude);
                     }
                 }
-                parent
             }
 
             ItemExternCrate(_) => {
@@ -290,25 +247,13 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
                         krate: crate_id,
                         index: CRATE_DEF_INDEX,
                     };
-                    self.external_exports.insert(def_id);
                     let parent_link = ModuleParentLink(parent, name);
                     let def = Def::Mod(def_id);
-                    let local_def_id = self.ast_map.local_def_id(item.id);
-                    let external_module =
-                        self.new_extern_crate_module(parent_link, def, is_public, local_def_id);
-                    self.define(parent, name, TypeNS, (external_module, sp));
-
-                    if is_public {
-                        let export = Export { name: name, def_id: def_id };
-                        if let Some(def_id) = parent.def_id() {
-                            let node_id = self.resolver.ast_map.as_local_node_id(def_id).unwrap();
-                            self.export_map.entry(node_id).or_insert(Vec::new()).push(export);
-                        }
-                    }
+                    let module = self.new_extern_crate_module(parent_link, def, is_public, item.id);
+                    self.define(parent, name, TypeNS, (module, sp));
 
-                    self.build_reduced_graph_for_external_crate(external_module);
+                    self.build_reduced_graph_for_external_crate(module);
                 }
-                parent
             }
 
             ItemMod(..) => {
@@ -317,36 +262,30 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
                 let module = self.new_module(parent_link, Some(def), false, is_public);
                 self.define(parent, name, TypeNS, (module, sp));
                 parent.module_children.borrow_mut().insert(item.id, module);
-                module
+                *parent_ref = module;
             }
 
-            ItemForeignMod(..) => parent,
+            ItemForeignMod(..) => {}
 
             // These items live in the value namespace.
             ItemStatic(_, m, _) => {
                 let mutbl = m == hir::MutMutable;
                 let def = Def::Static(self.ast_map.local_def_id(item.id), mutbl);
                 self.define(parent, name, ValueNS, (def, sp, modifiers));
-                parent
             }
             ItemConst(_, _) => {
                 let def = Def::Const(self.ast_map.local_def_id(item.id));
                 self.define(parent, name, ValueNS, (def, sp, modifiers));
-                parent
             }
             ItemFn(_, _, _, _, _, _) => {
                 let def = Def::Fn(self.ast_map.local_def_id(item.id));
                 self.define(parent, name, ValueNS, (def, sp, modifiers));
-                parent
             }
 
             // These items live in the type namespace.
             ItemTy(..) => {
-                let parent_link = ModuleParentLink(parent, name);
                 let def = Def::TyAlias(self.ast_map.local_def_id(item.id));
-                let module = self.new_module(parent_link, Some(def), false, is_public);
-                self.define(parent, name, TypeNS, (module, sp));
-                parent
+                self.define(parent, name, TypeNS, (def, sp, modifiers));
             }
 
             ItemEnum(ref enum_definition, _) => {
@@ -365,7 +304,6 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
                     self.build_reduced_graph_for_variant(variant, item_def_id,
                                                          module, variant_modifiers);
                 }
-                parent
             }
 
             // These items live in both the type and value namespaces.
@@ -382,23 +320,15 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
                 }
 
                 // Record the def ID and fields of this struct.
-                let named_fields = struct_def.fields()
-                                             .iter()
-                                             .filter_map(|f| {
-                                                 match f.node.kind {
-                                                     NamedField(name, _) => Some(name),
-                                                     UnnamedField(_) => None,
-                                                 }
-                                             })
-                                             .collect();
+                let field_names = struct_def.fields()
+                                            .iter()
+                                            .map(|f| f.name)
+                                            .collect();
                 let item_def_id = self.ast_map.local_def_id(item.id);
-                self.structs.insert(item_def_id, named_fields);
-
-                parent
+                self.structs.insert(item_def_id, field_names);
             }
 
-            ItemDefaultImpl(_, _) |
-            ItemImpl(..) => parent,
+            ItemDefaultImpl(_, _) | ItemImpl(..) => {}
 
             ItemTrait(_, _, _, ref items) => {
                 let def_id = self.ast_map.local_def_id(item.id);
@@ -423,8 +353,6 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
 
                     self.trait_item_map.insert((item.name, def_id), item_def_id);
                 }
-
-                parent
             }
         }
     }
@@ -475,7 +403,7 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
         self.define(parent, name, ValueNS, (def, foreign_item.span, modifiers));
     }
 
-    fn build_reduced_graph_for_block(&mut self, block: &Block, parent: Module<'b>) -> Module<'b> {
+    fn build_reduced_graph_for_block(&mut self, block: &Block, parent: &mut Module<'b>) {
         if self.block_needs_anonymous_module(block) {
             let block_id = block.id;
 
@@ -486,57 +414,52 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
             let parent_link = BlockParentLink(parent, block_id);
             let new_module = self.new_module(parent_link, None, false, false);
             parent.module_children.borrow_mut().insert(block_id, new_module);
-            new_module
-        } else {
-            parent
+            *parent = new_module;
         }
     }
 
-    fn handle_external_def(&mut self,
-                           def: Def,
-                           vis: Visibility,
-                           final_ident: &str,
-                           name: Name,
-                           new_parent: Module<'b>) {
-        debug!("(building reduced graph for external crate) building external def {}, priv {:?}",
-               final_ident,
-               vis);
-        let is_public = vis == hir::Public;
+    /// Builds the reduced graph for a single item in an external crate.
+    fn build_reduced_graph_for_external_crate_def(&mut self, parent: Module<'b>, xcdef: ChildItem) {
+        let def = match xcdef.def {
+            DlDef(def) => def,
+            _ => return,
+        };
+
+        if let Def::ForeignMod(def_id) = def {
+            // Foreign modules have no names. Recur and populate eagerly.
+            for child in self.session.cstore.item_children(def_id) {
+                self.build_reduced_graph_for_external_crate_def(parent, child);
+            }
+            return;
+        }
+
+        let name = xcdef.name;
+        let is_public = xcdef.vis == ty::Visibility::Public || parent.is_trait();
 
         let mut modifiers = DefModifiers::empty();
         if is_public {
             modifiers = modifiers | DefModifiers::PUBLIC;
         }
-        if new_parent.is_normal() {
+        if parent.is_normal() {
             modifiers = modifiers | DefModifiers::IMPORTABLE;
         }
 
-        let is_exported = is_public &&
-                          match new_parent.def_id() {
-            None => true,
-            Some(did) => self.external_exports.contains(&did),
-        };
-        if is_exported {
-            self.external_exports.insert(def.def_id());
-        }
-
         match def {
-            Def::Mod(_) | Def::ForeignMod(_) | Def::Enum(..) | Def::TyAlias(..) => {
+            Def::Mod(_) | Def::ForeignMod(_) | Def::Enum(..) => {
                 debug!("(building reduced graph for external crate) building module {} {}",
-                       final_ident,
+                       name,
                        is_public);
-                let parent_link = ModuleParentLink(new_parent, name);
+                let parent_link = ModuleParentLink(parent, name);
                 let module = self.new_module(parent_link, Some(def), true, is_public);
-                self.try_define(new_parent, name, TypeNS, (module, DUMMY_SP));
+                self.try_define(parent, name, TypeNS, (module, DUMMY_SP));
             }
             Def::Variant(_, variant_id) => {
-                debug!("(building reduced graph for external crate) building variant {}",
-                       final_ident);
+                debug!("(building reduced graph for external crate) building variant {}", name);
                 // Variants are always treated as importable to allow them to be glob used.
                 // All variants are defined in both type and value namespaces as future-proofing.
                 let modifiers = DefModifiers::PUBLIC | DefModifiers::IMPORTABLE;
-                self.try_define(new_parent, name, TypeNS, (def, DUMMY_SP, modifiers));
-                self.try_define(new_parent, name, ValueNS, (def, DUMMY_SP, modifiers));
+                self.try_define(parent, name, TypeNS, (def, DUMMY_SP, modifiers));
+                self.try_define(parent, name, ValueNS, (def, DUMMY_SP, modifiers));
                 if self.session.cstore.variant_kind(variant_id) == Some(VariantKind::Struct) {
                     // Not adding fields for variants as they are not accessed with a self receiver
                     self.structs.insert(variant_id, Vec::new());
@@ -548,12 +471,11 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
             Def::AssociatedConst(..) |
             Def::Method(..) => {
                 debug!("(building reduced graph for external crate) building value (fn/static) {}",
-                       final_ident);
-                self.try_define(new_parent, name, ValueNS, (def, DUMMY_SP, modifiers));
+                       name);
+                self.try_define(parent, name, ValueNS, (def, DUMMY_SP, modifiers));
             }
             Def::Trait(def_id) => {
-                debug!("(building reduced graph for external crate) building type {}",
-                       final_ident);
+                debug!("(building reduced graph for external crate) building type {}", name);
 
                 // If this is a trait, add all the trait item names to the trait
                 // info.
@@ -568,30 +490,24 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
                            trait_item_name);
 
                     self.trait_item_map.insert((trait_item_name, def_id), trait_item_def.def_id());
-
-                    if is_exported {
-                        self.external_exports.insert(trait_item_def.def_id());
-                    }
                 }
 
-                let parent_link = ModuleParentLink(new_parent, name);
+                let parent_link = ModuleParentLink(parent, name);
                 let module = self.new_module(parent_link, Some(def), true, is_public);
-                self.try_define(new_parent, name, TypeNS, (module, DUMMY_SP));
+                self.try_define(parent, name, TypeNS, (module, DUMMY_SP));
             }
-            Def::AssociatedTy(..) => {
-                debug!("(building reduced graph for external crate) building type {}",
-                       final_ident);
-                self.try_define(new_parent, name, TypeNS, (def, DUMMY_SP, modifiers));
+            Def::TyAlias(..) | Def::AssociatedTy(..) => {
+                debug!("(building reduced graph for external crate) building type {}", name);
+                self.try_define(parent, name, TypeNS, (def, DUMMY_SP, modifiers));
             }
             Def::Struct(def_id)
                 if self.session.cstore.tuple_struct_definition_if_ctor(def_id).is_none() => {
-                debug!("(building reduced graph for external crate) building type and value for \
-                        {}",
-                       final_ident);
-                self.try_define(new_parent, name, TypeNS, (def, DUMMY_SP, modifiers));
+                debug!("(building reduced graph for external crate) building type and value for {}",
+                       name);
+                self.try_define(parent, name, TypeNS, (def, DUMMY_SP, modifiers));
                 if let Some(ctor_def_id) = self.session.cstore.struct_ctor_def_id(def_id) {
                     let def = Def::Struct(ctor_def_id);
-                    self.try_define(new_parent, name, ValueNS, (def, DUMMY_SP, modifiers));
+                    self.try_define(parent, name, ValueNS, (def, DUMMY_SP, modifiers));
                 }
 
                 // Record the def ID and fields of this struct.
@@ -606,74 +522,11 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
             Def::Label(..) |
             Def::SelfTy(..) |
             Def::Err => {
-                panic!("didn't expect `{:?}`", def);
+                bug!("didn't expect `{:?}`", def);
             }
         }
     }
 
-    /// Builds the reduced graph for a single item in an external crate.
-    fn build_reduced_graph_for_external_crate_def(&mut self,
-                                                  root: Module<'b>,
-                                                  xcdef: ChildItem) {
-        match xcdef.def {
-            DlDef(def) => {
-                // Add the new child item, if necessary.
-                match def {
-                    Def::ForeignMod(def_id) => {
-                        // Foreign modules have no names. Recur and populate
-                        // eagerly.
-                        for child in self.session.cstore.item_children(def_id) {
-                            self.build_reduced_graph_for_external_crate_def(root, child)
-                        }
-                    }
-                    _ => {
-                        self.handle_external_def(def,
-                                                 xcdef.vis,
-                                                 &xcdef.name.as_str(),
-                                                 xcdef.name,
-                                                 root);
-                    }
-                }
-            }
-            DlImpl(_) => {
-                debug!("(building reduced graph for external crate) ignoring impl");
-            }
-            DlField => {
-                debug!("(building reduced graph for external crate) ignoring field");
-            }
-        }
-    }
-
-    /// Builds the reduced graph rooted at the given external module.
-    fn populate_external_module(&mut self, module: Module<'b>) {
-        debug!("(populating external module) attempting to populate {}",
-               module_to_string(module));
-
-        let def_id = match module.def_id() {
-            None => {
-                debug!("(populating external module) ... no def ID!");
-                return;
-            }
-            Some(def_id) => def_id,
-        };
-
-        for child in self.session.cstore.item_children(def_id) {
-            debug!("(populating external module) ... found ident: {}",
-                   child.name);
-            self.build_reduced_graph_for_external_crate_def(module, child);
-        }
-        module.populated.set(true)
-    }
-
-    /// Ensures that the reduced graph rooted at the given external module
-    /// is built, building it if it is not.
-    fn populate_module_if_necessary(&mut self, module: Module<'b>) {
-        if !module.populated.get() {
-            self.populate_external_module(module)
-        }
-        assert!(module.populated.get())
-    }
-
     /// Builds the reduced graph rooted at the 'use' directive for an external
     /// crate.
     fn build_reduced_graph_for_external_crate(&mut self, root: Module<'b>) {
@@ -683,79 +536,42 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> {
         }
     }
 
-    /// Creates and adds an import directive to the given module.
-    fn build_import_directive(&mut self,
-                              module_: Module<'b>,
-                              module_path: Vec<Name>,
-                              subclass: ImportDirectiveSubclass,
-                              span: Span,
-                              id: NodeId,
-                              is_public: bool,
-                              shadowable: Shadowable) {
-        module_.unresolved_imports
-               .borrow_mut()
-               .push(ImportDirective::new(module_path, subclass, span, id, is_public, shadowable));
-        self.unresolved_imports += 1;
-
-        if is_public {
-            module_.inc_pub_count();
-        }
-
-        // Bump the reference count on the name. Or, if this is a glob, set
-        // the appropriate flag.
-
-        match subclass {
-            SingleImport(target, _) => {
-                module_.increment_outstanding_references_for(target, ValueNS);
-                module_.increment_outstanding_references_for(target, TypeNS);
-            }
-            GlobImport => {
-                // Set the glob flag. This tells us that we don't know the
-                // module's exports ahead of time.
-
-                module_.inc_glob_count();
-                if is_public {
-                    module_.inc_pub_glob_count();
-                }
-            }
+    /// Ensures that the reduced graph rooted at the given external module
+    /// is built, building it if it is not.
+    pub fn populate_module_if_necessary(&mut self, module: Module<'b>) {
+        if module.populated.get() { return }
+        for child in self.session.cstore.item_children(module.def_id().unwrap()) {
+            self.build_reduced_graph_for_external_crate_def(module, child);
         }
+        module.populated.set(true)
     }
 }
 
 struct BuildReducedGraphVisitor<'a, 'b: 'a, 'tcx: 'b> {
-    builder: GraphBuilder<'a, 'b, 'tcx>,
+    resolver: &'a mut Resolver<'b, 'tcx>,
     parent: Module<'b>,
 }
 
 impl<'a, 'b, 'v, 'tcx> Visitor<'v> for BuildReducedGraphVisitor<'a, 'b, 'tcx> {
     fn visit_nested_item(&mut self, item: hir::ItemId) {
-        self.visit_item(self.builder.resolver.ast_map.expect_item(item.id))
+        self.visit_item(self.resolver.ast_map.expect_item(item.id))
     }
 
     fn visit_item(&mut self, item: &Item) {
-        let p = self.builder.build_reduced_graph_for_item(item, &self.parent);
-        let old_parent = replace(&mut self.parent, p);
+        let old_parent = self.parent;
+        self.resolver.build_reduced_graph_for_item(item, &mut self.parent);
         intravisit::walk_item(self, item);
         self.parent = old_parent;
     }
 
     fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
-        self.builder.build_reduced_graph_for_foreign_item(foreign_item, &self.parent);
+        self.resolver.build_reduced_graph_for_foreign_item(foreign_item, &self.parent);
     }
 
     fn visit_block(&mut self, block: &Block) {
-        let np = self.builder.build_reduced_graph_for_block(block, &self.parent);
-        let old_parent = replace(&mut self.parent, np);
+        let old_parent = self.parent;
+        self.resolver.build_reduced_graph_for_block(block, &mut self.parent);
         intravisit::walk_block(self, block);
         self.parent = old_parent;
     }
 }
-
-pub fn build_reduced_graph(resolver: &mut Resolver, krate: &hir::Crate) {
-    GraphBuilder { resolver: resolver }.build_reduced_graph(krate);
-}
-
-pub fn populate_module_if_necessary<'a, 'tcx>(resolver: &mut Resolver<'a, 'tcx>,
-                                              module: Module<'a>) {
-    GraphBuilder { resolver: resolver }.populate_module_if_necessary(module);
-}
index 178e2a4d1bc7882d816188cebf605b148cb579d2..9135b656736a25672d97ef4c8d30a2db6f8846ce 100644 (file)
@@ -23,13 +23,12 @@ use Resolver;
 use Namespace::{TypeNS, ValueNS};
 
 use rustc::lint;
-use rustc::middle::privacy::{DependsOn, LastImport, Used, Unused};
 use syntax::ast;
 use syntax::codemap::{Span, DUMMY_SP};
 
-use rustc_front::hir;
-use rustc_front::hir::{ViewPathGlob, ViewPathList, ViewPathSimple};
-use rustc_front::intravisit::Visitor;
+use rustc::hir;
+use rustc::hir::{ViewPathGlob, ViewPathList, ViewPathSimple};
+use rustc::hir::intravisit::Visitor;
 
 struct UnusedImportCheckVisitor<'a, 'b: 'a, 'tcx: 'b> {
     resolver: &'a mut Resolver<'b, 'tcx>,
@@ -52,16 +51,8 @@ impl<'a, 'b, 'tcx:'b> DerefMut for UnusedImportCheckVisitor<'a, 'b, 'tcx> {
 
 impl<'a, 'b, 'tcx> UnusedImportCheckVisitor<'a, 'b, 'tcx> {
     // We have information about whether `use` (import) directives are actually
-    // used now. If an import is not used at all, we signal a lint error. If an
-    // import is only used for a single namespace, we remove the other namespace
-    // from the recorded privacy information. That means in privacy.rs, we will
-    // only check imports and namespaces which are used. In particular, this
-    // means that if an import could name either a public or private item, we
-    // will check the correct thing, dependent on how the import is used.
-    fn finalize_import(&mut self, id: ast::NodeId, span: Span) {
-        debug!("finalizing import uses for {:?}",
-               self.session.codemap().span_to_snippet(span));
-
+    // used now. If an import is not used at all, we signal a lint error.
+    fn check_import(&mut self, id: ast::NodeId, span: Span) {
         if !self.used_imports.contains(&(id, TypeNS)) &&
            !self.used_imports.contains(&(id, ValueNS)) {
             self.session.add_lint(lint::builtin::UNUSED_IMPORTS,
@@ -69,45 +60,6 @@ impl<'a, 'b, 'tcx> UnusedImportCheckVisitor<'a, 'b, 'tcx> {
                                   span,
                                   "unused import".to_string());
         }
-
-        let mut def_map = self.def_map.borrow_mut();
-        let path_res = if let Some(r) = def_map.get_mut(&id) {
-            r
-        } else {
-            return;
-        };
-        let (v_priv, t_priv) = match path_res.last_private {
-            LastImport { value_priv, type_priv, .. } => (value_priv, type_priv),
-            _ => {
-                panic!("we should only have LastImport for `use` directives")
-            }
-        };
-
-        let mut v_used = if self.used_imports.contains(&(id, ValueNS)) {
-            Used
-        } else {
-            Unused
-        };
-        let t_used = if self.used_imports.contains(&(id, TypeNS)) {
-            Used
-        } else {
-            Unused
-        };
-
-        match (v_priv, t_priv) {
-            // Since some items may be both in the value _and_ type namespaces (e.g., structs)
-            // we might have two LastPrivates pointing at the same thing. There is no point
-            // checking both, so lets not check the value one.
-            (Some(DependsOn(def_v)), Some(DependsOn(def_t))) if def_v == def_t => v_used = Unused,
-            _ => {}
-        }
-
-        path_res.last_private = LastImport {
-            value_priv: v_priv,
-            value_used: v_used,
-            type_priv: t_priv,
-            type_used: t_used,
-        };
     }
 }
 
@@ -135,23 +87,16 @@ impl<'a, 'b, 'v, 'tcx> Visitor<'v> for UnusedImportCheckVisitor<'a, 'b, 'tcx> {
             hir::ItemUse(ref p) => {
                 match p.node {
                     ViewPathSimple(_, _) => {
-                        self.finalize_import(item.id, p.span)
+                        self.check_import(item.id, p.span)
                     }
 
                     ViewPathList(_, ref list) => {
                         for i in list {
-                            self.finalize_import(i.node.id(), i.span);
+                            self.check_import(i.node.id(), i.span);
                         }
                     }
                     ViewPathGlob(_) => {
-                        if !self.used_imports.contains(&(item.id, TypeNS)) &&
-                           !self.used_imports.contains(&(item.id, ValueNS)) {
-                            self.session
-                                .add_lint(lint::builtin::UNUSED_IMPORTS,
-                                          item.id,
-                                          p.span,
-                                          "unused import".to_string());
-                        }
+                        self.check_import(item.id, p.span)
                     }
                 }
             }
index bfd8a6f1f61f0b385369e08967ed4ab1a375326c..8a196768ae5168ea178c4c7f1f511b787474c0c7 100644 (file)
@@ -205,51 +205,6 @@ about what constitutes an Item declaration and what does not:
 https://doc.rust-lang.org/reference.html#statements
 "##,
 
-E0317: r##"
-User-defined types or type parameters cannot shadow the primitive types.
-This error indicates you tried to define a type, struct or enum with the same
-name as an existing primitive type:
-
-```compile_fail
-struct u8 {
-    // ...
-}
-```
-
-To fix this, simply name it something else.
-
-Such an error may also occur if you define a type parameter which shadows a
-primitive type. An example would be something like:
-
-```compile_fail
-impl<u8> MyTrait for Option<u8> {
-    // ...
-}
-```
-
-In such a case, if you meant for `u8` to be a generic type parameter (i.e. any
-type can be used in its place), use something like `T` instead:
-
-```ignore
-impl<T> MyTrait for Option<T> {
-    // ...
-}
-```
-
-On the other hand, if you wished to refer to the specific type `u8`, remove it
-from the type parameter list:
-
-```ignore
-impl MyTrait for Option<u8> {
-    // ...
-}
-
-See the Types section of the reference for more information about the primitive
-types:
-
-https://doc.rust-lang.org/reference.html#types
-"##,
-
 E0364: r##"
 Private items cannot be publicly re-exported.  This error indicates that you
 attempted to `pub use` a type or value that was not itself public.
index 067d83a17e57bd29af185600731e4bff3fd013f8..a0c4d636fd38fdd8e1265549ec5b23c1cbdca2ff 100644 (file)
@@ -18,6 +18,7 @@
 #![cfg_attr(not(stage0), deny(warnings))]
 
 #![feature(associated_consts)]
+#![feature(borrow_state)]
 #![feature(rustc_diagnostic_macros)]
 #![feature(rustc_private)]
 #![feature(staged_api)]
@@ -30,7 +31,7 @@ extern crate arena;
 #[macro_use]
 #[no_link]
 extern crate rustc_bitflags;
-extern crate rustc_front;
+#[macro_use]
 extern crate rustc;
 
 use self::PatternBindingMode::*;
@@ -47,17 +48,16 @@ use self::ParentLink::*;
 use self::FallbackChecks::*;
 
 use rustc::dep_graph::DepNode;
-use rustc::front::map as hir_map;
+use rustc::hir::map as hir_map;
 use rustc::session::Session;
 use rustc::lint;
-use rustc::middle::cstore::{CrateStore, DefLike, DlDef};
-use rustc::middle::def::*;
-use rustc::middle::def_id::DefId;
-use rustc::middle::pat_util::pat_bindings;
-use rustc::middle::privacy::*;
-use rustc::middle::subst::{ParamSpace, FnSpace, TypeSpace};
-use rustc::middle::ty::{Freevar, FreevarMap, TraitMap, GlobMap};
-use rustc::util::nodemap::{NodeMap, DefIdSet, FnvHashMap};
+use rustc::middle::cstore::CrateStore;
+use rustc::hir::def::*;
+use rustc::hir::def_id::DefId;
+use rustc::hir::pat_util::pat_bindings;
+use rustc::ty::subst::{ParamSpace, FnSpace, TypeSpace};
+use rustc::hir::{Freevar, FreevarMap, TraitMap, GlobMap};
+use rustc::util::nodemap::{NodeMap, FnvHashMap, FnvHashSet};
 
 use syntax::ast::{self, FloatTy};
 use syntax::ast::{CRATE_NODE_ID, Name, NodeId, CrateNum, IntTy, UintTy};
@@ -67,24 +67,23 @@ use syntax::errors::DiagnosticBuilder;
 use syntax::parse::token::{self, special_names, special_idents};
 use syntax::util::lev_distance::find_best_match_for_name;
 
-use rustc_front::intravisit::{self, FnKind, Visitor};
-use rustc_front::hir;
-use rustc_front::hir::{Arm, BindByRef, BindByValue, BindingMode, Block};
-use rustc_front::hir::Crate;
-use rustc_front::hir::{Expr, ExprAgain, ExprBreak, ExprCall, ExprField};
-use rustc_front::hir::{ExprLoop, ExprWhile, ExprMethodCall};
-use rustc_front::hir::{ExprPath, ExprStruct, FnDecl};
-use rustc_front::hir::{ForeignItemFn, ForeignItemStatic, Generics};
-use rustc_front::hir::{ImplItem, Item, ItemConst, ItemEnum, ItemExternCrate};
-use rustc_front::hir::{ItemFn, ItemForeignMod, ItemImpl, ItemMod, ItemStatic, ItemDefaultImpl};
-use rustc_front::hir::{ItemStruct, ItemTrait, ItemTy, ItemUse};
-use rustc_front::hir::Local;
-use rustc_front::hir::{Pat, PatKind, Path, PrimTy};
-use rustc_front::hir::{PathSegment, PathParameters};
-use rustc_front::hir::HirVec;
-use rustc_front::hir::{TraitRef, Ty, TyBool, TyChar, TyFloat, TyInt};
-use rustc_front::hir::{TyRptr, TyStr, TyUint, TyPath, TyPtr};
-use rustc_front::util::walk_pat;
+use rustc::hir::intravisit::{self, FnKind, Visitor};
+use rustc::hir;
+use rustc::hir::{Arm, BindByRef, BindByValue, BindingMode, Block};
+use rustc::hir::Crate;
+use rustc::hir::{Expr, ExprAgain, ExprBreak, ExprCall, ExprField};
+use rustc::hir::{ExprLoop, ExprWhile, ExprMethodCall};
+use rustc::hir::{ExprPath, ExprStruct, FnDecl};
+use rustc::hir::{ForeignItemFn, ForeignItemStatic, Generics};
+use rustc::hir::{ImplItem, Item, ItemConst, ItemEnum, ItemExternCrate};
+use rustc::hir::{ItemFn, ItemForeignMod, ItemImpl, ItemMod, ItemStatic, ItemDefaultImpl};
+use rustc::hir::{ItemStruct, ItemTrait, ItemTy, ItemUse};
+use rustc::hir::Local;
+use rustc::hir::{Pat, PatKind, Path, PrimTy};
+use rustc::hir::{PathSegment, PathParameters};
+use rustc::hir::HirVec;
+use rustc::hir::{TraitRef, Ty, TyBool, TyChar, TyFloat, TyInt};
+use rustc::hir::{TyRptr, TyStr, TyUint, TyPath, TyPtr};
 
 use std::collections::{HashMap, HashSet};
 use std::cell::{Cell, RefCell};
@@ -95,7 +94,7 @@ use resolve_imports::{ImportDirective, NameResolution};
 
 // NB: This module needs to be declared first so diagnostics are
 // registered before they are used.
-pub mod diagnostics;
+mod diagnostics;
 
 mod check_unused;
 mod build_reduced_graph;
@@ -119,12 +118,12 @@ enum SuggestionType {
 }
 
 /// Candidates for a name resolution failure
-pub struct SuggestedCandidates {
+struct SuggestedCandidates {
     name: String,
     candidates: Vec<Path>,
 }
 
-pub enum ResolutionError<'a> {
+enum ResolutionError<'a> {
     /// error E0401: can't use type parameters from outer function
     TypeParametersFromOuterFunction,
     /// error E0402: cannot use an outer type parameter in this context
@@ -183,8 +182,6 @@ pub enum ResolutionError<'a> {
     UndeclaredLabel(&'a str),
     /// error E0427: cannot use `ref` binding mode with ...
     CannotUseRefBindingModeWith(&'a str),
-    /// error E0428: duplicate definition
-    DuplicateDefinition(&'a str, Name),
     /// error E0429: `self` imports are only allowed within a { } list
     SelfImportsOnlyAllowedWithin,
     /// error E0430: `self` import can only appear once in the list
@@ -203,7 +200,7 @@ pub enum ResolutionError<'a> {
 
 /// Context of where `ResolutionError::UnresolvedName` arose.
 #[derive(Clone, PartialEq, Eq, Debug)]
-pub enum UnresolvedNameContext {
+enum UnresolvedNameContext {
     /// `PathIsMod(id)` indicates that a given path, used in
     /// expression context, actually resolved to a module rather than
     /// a value. The `id` attached to the variant is the node id of
@@ -351,7 +348,8 @@ fn resolve_struct_error<'b, 'a: 'b, 'tcx: 'a>(resolver: &'b Resolver<'a, 'tcx>,
             if let Some(sp) = resolver.ast_map.span_if_local(did) {
                 err.span_note(sp, "constant defined here");
             }
-            if let Success(binding) = resolver.current_module.resolve_name(name, ValueNS, true) {
+            if let Some(binding) = resolver.current_module
+                                           .resolve_name_in_lexical_scope(name, ValueNS) {
                 if binding.is_import() {
                     err.span_note(binding.span.unwrap(), "constant imported here");
                 }
@@ -490,14 +488,6 @@ fn resolve_struct_error<'b, 'a: 'b, 'tcx: 'a>(resolver: &'b Resolver<'a, 'tcx>,
                              "cannot use `ref` binding mode with {}",
                              descr)
         }
-        ResolutionError::DuplicateDefinition(namespace, name) => {
-            struct_span_err!(resolver.session,
-                             span,
-                             E0428,
-                             "duplicate definition of {} `{}`",
-                             namespace,
-                             name)
-        }
         ResolutionError::SelfImportsOnlyAllowedWithin => {
             struct_span_err!(resolver.session,
                              span,
@@ -644,16 +634,16 @@ impl<'a, 'v, 'tcx> Visitor<'v> for Resolver<'a, 'tcx> {
                 _: Span,
                 node_id: NodeId) {
         let rib_kind = match function_kind {
-            FnKind::ItemFn(_, generics, _, _, _, _) => {
+            FnKind::ItemFn(_, generics, _, _, _, _, _) => {
                 self.visit_generics(generics);
                 ItemRibKind
             }
-            FnKind::Method(_, sig, _) => {
+            FnKind::Method(_, sig, _, _) => {
                 self.visit_generics(&sig.generics);
                 self.visit_explicit_self(&sig.explicit_self);
                 MethodRibKind
             }
-            FnKind::Closure => ClosureRibKind(node_id),
+            FnKind::Closure(_) => ClosureRibKind(node_id),
         };
         self.resolve_function(rib_kind, declaration, block);
     }
@@ -757,15 +747,15 @@ enum AssocItemResolveResult {
 
 #[derive(Copy, Clone)]
 enum BareIdentifierPatternResolution {
-    FoundStructOrEnumVariant(Def, LastPrivate),
-    FoundConst(Def, LastPrivate, Name),
+    FoundStructOrEnumVariant(Def),
+    FoundConst(Def, Name),
     BareIdentifierPatternUnresolved,
 }
 
 /// One local scope.
 #[derive(Debug)]
 struct Rib<'a> {
-    bindings: HashMap<Name, DefLike>,
+    bindings: HashMap<Name, Def>,
     kind: RibKind<'a>,
 }
 
@@ -793,6 +783,31 @@ impl LocalDef {
     }
 }
 
+enum LexicalScopeBinding<'a> {
+    Item(&'a NameBinding<'a>),
+    LocalDef(LocalDef),
+}
+
+impl<'a> LexicalScopeBinding<'a> {
+    fn local_def(self) -> LocalDef {
+        match self {
+            LexicalScopeBinding::LocalDef(local_def) => local_def,
+            LexicalScopeBinding::Item(binding) => LocalDef::from_def(binding.def().unwrap()),
+        }
+    }
+
+    fn def(self) -> Def {
+        self.local_def().def
+    }
+
+    fn module(self) -> Option<Module<'a>> {
+        match self {
+            LexicalScopeBinding::Item(binding) => binding.module(),
+            _ => None,
+        }
+    }
+}
+
 /// The link from a module up to its nearest parent node.
 #[derive(Clone,Debug)]
 enum ParentLink<'a> {
@@ -807,12 +822,12 @@ pub struct ModuleS<'a> {
     def: Option<Def>,
     is_public: bool,
 
-    // If the module is an extern crate, `def` is root of the external crate and `extern_crate_did`
-    // is the DefId of the local `extern crate` item (otherwise, `extern_crate_did` is None).
-    extern_crate_did: Option<DefId>,
+    // If the module is an extern crate, `def` is root of the external crate and `extern_crate_id`
+    // is the NodeId of the local `extern crate` item (otherwise, `extern_crate_id` is None).
+    extern_crate_id: Option<NodeId>,
 
-    resolutions: RefCell<HashMap<(Name, Namespace), NameResolution<'a>>>,
-    unresolved_imports: RefCell<Vec<ImportDirective>>,
+    resolutions: RefCell<HashMap<(Name, Namespace), &'a RefCell<NameResolution<'a>>>>,
+    unresolved_imports: RefCell<Vec<&'a ImportDirective<'a>>>,
 
     // The module children of this node, including normal modules and anonymous modules.
     // Anonymous children are pseudo-modules that are implicitly created around items
@@ -830,89 +845,50 @@ pub struct ModuleS<'a> {
     // entry block for `f`.
     module_children: RefCell<NodeMap<Module<'a>>>,
 
-    shadowed_traits: RefCell<Vec<&'a NameBinding<'a>>>,
-
-    // The number of unresolved globs that this module exports.
-    glob_count: Cell<usize>,
+    prelude: RefCell<Option<Module<'a>>>,
 
-    // The number of unresolved pub imports (both regular and globs) in this module
-    pub_count: Cell<usize>,
+    glob_importers: RefCell<Vec<(Module<'a>, &'a ImportDirective<'a>)>>,
+    globs: RefCell<Vec<&'a ImportDirective<'a>>>,
 
-    // The number of unresolved pub glob imports in this module
-    pub_glob_count: Cell<usize>,
+    // Used to memoize the traits in this module for faster searches through all traits in scope.
+    traits: RefCell<Option<Box<[&'a NameBinding<'a>]>>>,
 
     // Whether this module is populated. If not populated, any attempt to
     // access the children must be preceded with a
     // `populate_module_if_necessary` call.
     populated: Cell<bool>,
+
+    arenas: &'a ResolverArenas<'a>,
 }
 
 pub type Module<'a> = &'a ModuleS<'a>;
 
 impl<'a> ModuleS<'a> {
-
-    fn new(parent_link: ParentLink<'a>, def: Option<Def>, external: bool, is_public: bool) -> Self {
+    fn new(parent_link: ParentLink<'a>,
+           def: Option<Def>,
+           external: bool,
+           is_public: bool,
+           arenas: &'a ResolverArenas<'a>) -> Self {
         ModuleS {
             parent_link: parent_link,
             def: def,
             is_public: is_public,
-            extern_crate_did: None,
+            extern_crate_id: None,
             resolutions: RefCell::new(HashMap::new()),
             unresolved_imports: RefCell::new(Vec::new()),
             module_children: RefCell::new(NodeMap()),
-            shadowed_traits: RefCell::new(Vec::new()),
-            glob_count: Cell::new(0),
-            pub_count: Cell::new(0),
-            pub_glob_count: Cell::new(0),
+            prelude: RefCell::new(None),
+            glob_importers: RefCell::new(Vec::new()),
+            globs: RefCell::new((Vec::new())),
+            traits: RefCell::new(None),
             populated: Cell::new(!external),
-        }
-    }
-
-    fn resolve_name(&self, name: Name, ns: Namespace, allow_private_imports: bool)
-                    -> ResolveResult<&'a NameBinding<'a>> {
-        let glob_count =
-            if allow_private_imports { self.glob_count.get() } else { self.pub_glob_count.get() };
-
-        self.resolutions.borrow().get(&(name, ns)).cloned().unwrap_or_default().result(glob_count)
-            .and_then(|binding| {
-                let allowed = allow_private_imports || !binding.is_import() || binding.is_public();
-                if allowed { Success(binding) } else { Failed(None) }
-            })
-    }
-
-    // Define the name or return the existing binding if there is a collision.
-    fn try_define_child(&self, name: Name, ns: Namespace, binding: &'a NameBinding<'a>)
-                        -> Result<(), &'a NameBinding<'a>> {
-        let mut children = self.resolutions.borrow_mut();
-        let resolution = children.entry((name, ns)).or_insert_with(Default::default);
-
-        // FIXME #31379: We can use methods from imported traits shadowed by non-import items
-        if let Some(old_binding) = resolution.binding {
-            if !old_binding.is_import() && binding.is_import() {
-                if let Some(Def::Trait(_)) = binding.def() {
-                    self.shadowed_traits.borrow_mut().push(binding);
-                }
-            }
-        }
-
-        resolution.try_define(binding)
-    }
-
-    fn increment_outstanding_references_for(&self, name: Name, ns: Namespace) {
-        let mut children = self.resolutions.borrow_mut();
-        children.entry((name, ns)).or_insert_with(Default::default).outstanding_references += 1;
-    }
-
-    fn decrement_outstanding_references_for(&self, name: Name, ns: Namespace) {
-        match self.resolutions.borrow_mut().get_mut(&(name, ns)).unwrap().outstanding_references {
-            0 => panic!("No more outstanding references!"),
-            ref mut outstanding_references => { *outstanding_references -= 1; }
+            arenas: arenas
         }
     }
 
     fn for_each_child<F: FnMut(Name, Namespace, &'a NameBinding<'a>)>(&self, mut f: F) {
         for (&(name, ns), name_resolution) in self.resolutions.borrow().iter() {
-            name_resolution.binding.map(|binding| f(name, ns, binding));
+            name_resolution.borrow().binding.map(|binding| f(name, ns, binding));
         }
     }
 
@@ -920,16 +896,6 @@ impl<'a> ModuleS<'a> {
         self.def.as_ref().map(Def::def_id)
     }
 
-    // This returns the DefId of the crate local item that controls this module's visibility.
-    // It is only used to compute `LastPrivate` data, and it differs from `def_id` only for extern
-    // crates, whose `def_id` is the external crate's root, not the local `extern crate` item.
-    fn local_def_id(&self) -> Option<DefId> {
-        match self.extern_crate_did {
-            Some(def_id) => Some(def_id),
-            None => self.def_id(),
-        }
-    }
-
     fn is_normal(&self) -> bool {
         match self.def {
             Some(Def::Mod(_)) | Some(Def::ForeignMod(_)) => true,
@@ -944,26 +910,13 @@ impl<'a> ModuleS<'a> {
         }
     }
 
-    pub fn inc_glob_count(&self) {
-        self.glob_count.set(self.glob_count.get() + 1);
-    }
-    pub fn dec_glob_count(&self) {
-        assert!(self.glob_count.get() > 0);
-        self.glob_count.set(self.glob_count.get() - 1);
-    }
-    pub fn inc_pub_count(&self) {
-        self.pub_count.set(self.pub_count.get() + 1);
-    }
-    pub fn dec_pub_count(&self) {
-        assert!(self.pub_count.get() > 0);
-        self.pub_count.set(self.pub_count.get() - 1);
-    }
-    pub fn inc_pub_glob_count(&self) {
-        self.pub_glob_count.set(self.pub_glob_count.get() + 1);
-    }
-    pub fn dec_pub_glob_count(&self) {
-        assert!(self.pub_glob_count.get() > 0);
-        self.pub_glob_count.set(self.pub_glob_count.get() - 1);
+    fn is_ancestor_of(&self, module: Module<'a>) -> bool {
+        if self.def_id() == module.def_id() { return true }
+        match module.parent_link {
+            ParentLink::BlockParentLink(parent, _) |
+            ParentLink::ModuleParentLink(parent, _) => self.is_ancestor_of(parent),
+            _ => false,
+        }
     }
 }
 
@@ -990,29 +943,33 @@ bitflags! {
         // Variants are considered `PUBLIC`, but some of them live in private enums.
         // We need to track them to prohibit reexports like `pub use PrivEnum::Variant`.
         const PRIVATE_VARIANT = 1 << 2,
-        const PRELUDE = 1 << 3,
-        const GLOB_IMPORTED = 1 << 4,
+        const GLOB_IMPORTED = 1 << 3,
     }
 }
 
 // Records a possibly-private value, type, or module definition.
-#[derive(Debug)]
+#[derive(Clone, Debug)]
 pub struct NameBinding<'a> {
     modifiers: DefModifiers,
     kind: NameBindingKind<'a>,
     span: Option<Span>,
 }
 
-#[derive(Debug)]
+#[derive(Clone, Debug)]
 enum NameBindingKind<'a> {
     Def(Def),
     Module(Module<'a>),
     Import {
         binding: &'a NameBinding<'a>,
         id: NodeId,
+        // Some(error) if using this imported name causes the import to be a privacy error
+        privacy_error: Option<Box<PrivacyError<'a>>>,
     },
 }
 
+#[derive(Clone, Debug)]
+struct PrivacyError<'a>(Span, Name, &'a NameBinding<'a>);
+
 impl<'a> NameBinding<'a> {
     fn create_from_module(module: Module<'a>, span: Option<Span>) -> Self {
         let modifiers = if module.is_public {
@@ -1040,14 +997,6 @@ impl<'a> NameBinding<'a> {
         }
     }
 
-    fn local_def_id(&self) -> Option<DefId> {
-        match self.kind {
-            NameBindingKind::Def(def) => Some(def.def_id()),
-            NameBindingKind::Module(ref module) => module.local_def_id(),
-            NameBindingKind::Import { binding, .. } => binding.local_def_id(),
-        }
-    }
-
     fn defined_with(&self, modifiers: DefModifiers) -> bool {
         self.modifiers.contains(modifiers)
     }
@@ -1056,15 +1005,8 @@ impl<'a> NameBinding<'a> {
         self.defined_with(DefModifiers::PUBLIC)
     }
 
-    fn def_and_lp(&self) -> (Def, LastPrivate) {
-        let def = self.def().unwrap();
-        if let Def::Err = def { return (def, LastMod(AllPublic)) }
-        let lp = if self.is_public() { AllPublic } else { DependsOn(self.local_def_id().unwrap()) };
-        (def, LastMod(lp))
-    }
-
     fn is_extern_crate(&self) -> bool {
-        self.module().and_then(|module| module.extern_crate_did).is_some()
+        self.module().and_then(|module| module.extern_crate_id).is_some()
     }
 
     fn is_import(&self) -> bool {
@@ -1150,7 +1092,6 @@ pub struct Resolver<'a, 'tcx: 'a> {
     freevars_seen: NodeMap<NodeMap<usize>>,
     export_map: ExportMap,
     trait_map: TraitMap,
-    external_exports: ExternalExports,
 
     // Whether or not to print error messages. Can be set to true
     // when getting additional info for error message suggestions,
@@ -1170,13 +1111,32 @@ pub struct Resolver<'a, 'tcx: 'a> {
     // The intention is that the callback modifies this flag.
     // Once set, the resolver falls out of the walk, preserving the ribs.
     resolved: bool,
+    privacy_errors: Vec<PrivacyError<'a>>,
 
     arenas: &'a ResolverArenas<'a>,
 }
 
-pub struct ResolverArenas<'a> {
+struct ResolverArenas<'a> {
     modules: arena::TypedArena<ModuleS<'a>>,
     name_bindings: arena::TypedArena<NameBinding<'a>>,
+    import_directives: arena::TypedArena<ImportDirective<'a>>,
+    name_resolutions: arena::TypedArena<RefCell<NameResolution<'a>>>,
+}
+
+impl<'a> ResolverArenas<'a> {
+    fn alloc_module(&'a self, module: ModuleS<'a>) -> Module<'a> {
+        self.modules.alloc(module)
+    }
+    fn alloc_name_binding(&'a self, name_binding: NameBinding<'a>) -> &'a NameBinding<'a> {
+        self.name_bindings.alloc(name_binding)
+    }
+    fn alloc_import_directive(&'a self, import_directive: ImportDirective<'a>)
+                              -> &'a ImportDirective {
+        self.import_directives.alloc(import_directive)
+    }
+    fn alloc_name_resolution(&'a self) -> &'a RefCell<NameResolution<'a>> {
+        self.name_resolutions.alloc(Default::default())
+    }
 }
 
 #[derive(PartialEq)]
@@ -1192,8 +1152,9 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
            arenas: &'a ResolverArenas<'a>)
            -> Resolver<'a, 'tcx> {
         let root_def_id = ast_map.local_def_id(CRATE_NODE_ID);
-        let graph_root = ModuleS::new(NoParentLink, Some(Def::Mod(root_def_id)), false, true);
-        let graph_root = arenas.modules.alloc(graph_root);
+        let graph_root =
+            ModuleS::new(NoParentLink, Some(Def::Mod(root_def_id)), false, true, arenas);
+        let graph_root = arenas.alloc_module(graph_root);
 
         Resolver {
             session: session,
@@ -1210,8 +1171,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             unresolved_imports: 0,
 
             current_module: graph_root,
-            value_ribs: Vec::new(),
-            type_ribs: Vec::new(),
+            value_ribs: vec![Rib::new(ModuleRibKind(graph_root))],
+            type_ribs: vec![Rib::new(ModuleRibKind(graph_root))],
             label_ribs: Vec::new(),
 
             current_trait_ref: None,
@@ -1226,14 +1187,14 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             trait_map: NodeMap(),
             used_imports: HashSet::new(),
             used_crates: HashSet::new(),
-            external_exports: DefIdSet(),
 
             emit_errors: true,
             make_glob_map: make_glob_map == MakeGlobMap::Yes,
-            glob_map: HashMap::new(),
+            glob_map: NodeMap(),
 
             callback: None,
             resolved: false,
+            privacy_errors: Vec::new(),
 
             arenas: arenas,
         }
@@ -1243,6 +1204,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         ResolverArenas {
             modules: arena::TypedArena::new(),
             name_bindings: arena::TypedArena::new(),
+            import_directives: arena::TypedArena::new(),
+            name_resolutions: arena::TypedArena::new(),
         }
     }
 
@@ -1251,21 +1214,17 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                   def: Option<Def>,
                   external: bool,
                   is_public: bool) -> Module<'a> {
-        self.arenas.modules.alloc(ModuleS::new(parent_link, def, external, is_public))
-    }
-
-    fn new_name_binding(&self, name_binding: NameBinding<'a>) -> &'a NameBinding<'a> {
-        self.arenas.name_bindings.alloc(name_binding)
+        self.arenas.alloc_module(ModuleS::new(parent_link, def, external, is_public, self.arenas))
     }
 
     fn new_extern_crate_module(&self,
                                parent_link: ParentLink<'a>,
                                def: Def,
                                is_public: bool,
-                               local_def: DefId)
+                               local_node_id: NodeId)
                                -> Module<'a> {
-        let mut module = ModuleS::new(parent_link, Some(def), false, is_public);
-        module.extern_crate_did = Some(local_def);
+        let mut module = ModuleS::new(parent_link, Some(def), false, is_public, self.arenas);
+        module.extern_crate_id = Some(local_node_id);
         self.arenas.modules.alloc(module)
     }
 
@@ -1280,12 +1239,15 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             self.used_crates.insert(krate);
         }
 
-        let import_id = match binding.kind {
-            NameBindingKind::Import { id, .. } => id,
+        let (import_id, privacy_error) = match binding.kind {
+            NameBindingKind::Import { id, ref privacy_error, .. } => (id, privacy_error),
             _ => return,
         };
 
         self.used_imports.insert((import_id, ns));
+        if let Some(error) = privacy_error.as_ref() {
+            self.privacy_errors.push((**error).clone());
+        }
 
         if !self.make_glob_map {
             return;
@@ -1295,7 +1257,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             return;
         }
 
-        let mut new_set = HashSet::new();
+        let mut new_set = FnvHashSet();
         new_set.insert(name);
         self.glob_map.insert(import_id, new_set);
     }
@@ -1313,9 +1275,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                      module_: Module<'a>,
                                      module_path: &[Name],
                                      index: usize,
-                                     span: Span,
-                                     lp: LastPrivate)
-                                     -> ResolveResult<(Module<'a>, LastPrivate)> {
+                                     span: Span)
+                                     -> ResolveResult<Module<'a>> {
         fn search_parent_externals(needle: Name, module: Module) -> Option<Module> {
             match module.resolve_name(needle, TypeNS, false) {
                 Success(binding) if binding.is_extern_crate() => Some(module),
@@ -1331,7 +1292,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         let mut search_module = module_;
         let mut index = index;
         let module_path_len = module_path.len();
-        let mut closest_private = lp;
 
         // Resolve the module part of the path. This does not involve looking
         // upward though scope chains; we simply resolve names directly in
@@ -1379,15 +1339,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                     // Check to see whether there are type bindings, and, if
                     // so, whether there is a module within.
                     if let Some(module_def) = binding.module() {
+                        self.check_privacy(search_module, name, binding, span);
                         search_module = module_def;
-
-                        // Keep track of the closest private module used
-                        // when resolving this import chain.
-                        if !binding.is_public() {
-                            if let Some(did) = search_module.local_def_id() {
-                                closest_private = LastMod(DependsOn(did));
-                            }
-                        }
                     } else {
                         let msg = format!("Not a module `{}`", name);
                         return Failed(Some((span, msg)));
@@ -1398,52 +1351,30 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             index += 1;
         }
 
-        return Success((search_module, closest_private));
+        return Success(search_module);
     }
 
     /// Attempts to resolve the module part of an import directive or path
     /// rooted at the given module.
-    ///
-    /// On success, returns the resolved module, and the closest *private*
-    /// module found to the destination when resolving this path.
     fn resolve_module_path(&mut self,
-                           module_: Module<'a>,
                            module_path: &[Name],
                            use_lexical_scope: UseLexicalScopeFlag,
                            span: Span)
-                           -> ResolveResult<(Module<'a>, LastPrivate)> {
+                           -> ResolveResult<Module<'a>> {
         if module_path.len() == 0 {
-            return Success((self.graph_root, LastMod(AllPublic))) // Use the crate root
+            return Success(self.graph_root) // Use the crate root
         }
 
         debug!("(resolving module path for import) processing `{}` rooted at `{}`",
                names_to_string(module_path),
-               module_to_string(&module_));
+               module_to_string(self.current_module));
 
         // Resolve the module prefix, if any.
-        let module_prefix_result = self.resolve_module_prefix(module_, module_path);
+        let module_prefix_result = self.resolve_module_prefix(module_path, span);
 
         let search_module;
         let start_index;
-        let last_private;
         match module_prefix_result {
-            Failed(None) => {
-                let mpath = names_to_string(module_path);
-                let mpath = &mpath[..];
-                match mpath.rfind(':') {
-                    Some(idx) => {
-                        let msg = format!("Could not find `{}` in `{}`",
-                                          // idx +- 1 to account for the
-                                          // colons on either side
-                                          &mpath[idx + 1..],
-                                          &mpath[..idx - 1]);
-                        return Failed(Some((span, msg)));
-                    }
-                    None => {
-                        return Failed(None);
-                    }
-                }
-            }
             Failed(err) => return Failed(err),
             Indeterminate => {
                 debug!("(resolving module path for import) indeterminate; bailing");
@@ -1459,28 +1390,18 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                         // resolution process at index zero.
                         search_module = self.graph_root;
                         start_index = 0;
-                        last_private = LastMod(AllPublic);
                     }
                     UseLexicalScope => {
                         // This is not a crate-relative path. We resolve the
                         // first component of the path in the current lexical
                         // scope and then proceed to resolve below that.
-                        match self.resolve_item_in_lexical_scope(module_,
-                                                                 module_path[0],
-                                                                 TypeNS,
-                                                                 true) {
-                            Failed(err) => return Failed(err),
-                            Indeterminate => {
-                                debug!("(resolving module path for import) indeterminate; bailing");
-                                return Indeterminate;
-                            }
-                            Success(binding) => match binding.module() {
-                                Some(containing_module) => {
-                                    search_module = containing_module;
-                                    start_index = 1;
-                                    last_private = LastMod(AllPublic);
-                                }
-                                None => return Failed(None),
+                        let ident = hir::Ident::from_name(module_path[0]);
+                        match self.resolve_ident_in_lexical_scope(ident, TypeNS, true)
+                                  .and_then(LexicalScopeBinding::module) {
+                            None => return Failed(None),
+                            Some(containing_module) => {
+                                search_module = containing_module;
+                                start_index = 1;
                             }
                         }
                     }
@@ -1489,76 +1410,63 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             Success(PrefixFound(ref containing_module, index)) => {
                 search_module = containing_module;
                 start_index = index;
-                last_private = LastMod(DependsOn(containing_module.local_def_id()
-                                                                  .unwrap()));
             }
         }
 
         self.resolve_module_path_from_root(search_module,
                                            module_path,
                                            start_index,
-                                           span,
-                                           last_private)
+                                           span)
     }
 
+    /// This resolves the identifier `ident` in the namespace `ns` in the current lexical scope.
+    /// More specifically, we proceed up the hierarchy of scopes and return the binding for
+    /// `ident` in the first scope that defines it (or None if no scopes define it).
+    ///
+    /// A block's items are above its local variables in the scope hierarchy, regardless of where
+    /// the items are defined in the block. For example,
+    /// ```rust
+    /// fn f() {
+    ///    g(); // Since there are no local variables in scope yet, this resolves to the item.
+    ///    let g = || {};
+    ///    fn g() {}
+    ///    g(); // This resolves to the local variable `g` since it shadows the item.
+    /// }
+    /// ```
+    ///
     /// Invariant: This must only be called during main resolution, not during
     /// import resolution.
-    fn resolve_item_in_lexical_scope(&mut self,
-                                     module_: Module<'a>,
-                                     name: Name,
-                                     namespace: Namespace,
-                                     record_used: bool)
-                                     -> ResolveResult<&'a NameBinding<'a>> {
-        debug!("(resolving item in lexical scope) resolving `{}` in namespace {:?} in `{}`",
-               name,
-               namespace,
-               module_to_string(&module_));
-
-        // Proceed up the scope chain looking for parent modules.
-        let mut search_module = module_;
-        loop {
-            // Resolve the name in the parent module.
-            match self.resolve_name_in_module(search_module, name, namespace, true, record_used) {
-                Failed(Some((span, msg))) => {
-                    resolve_error(self, span, ResolutionError::FailedToResolve(&msg));
-                }
-                Failed(None) => (), // Continue up the search chain.
-                Indeterminate => {
-                    // We couldn't see through the higher scope because of an
-                    // unresolved import higher up. Bail.
-
-                    debug!("(resolving item in lexical scope) indeterminate higher scope; bailing");
-                    return Indeterminate;
-                }
-                Success(binding) => {
-                    // We found the module.
-                    debug!("(resolving item in lexical scope) found name in module, done");
-                    return Success(binding);
+    fn resolve_ident_in_lexical_scope(&mut self,
+                                      ident: hir::Ident,
+                                      ns: Namespace,
+                                      record_used: bool)
+                                      -> Option<LexicalScopeBinding<'a>> {
+        let name = match ns { ValueNS => ident.name, TypeNS => ident.unhygienic_name };
+
+        // Walk backwards up the ribs in scope.
+        for i in (0 .. self.get_ribs(ns).len()).rev() {
+            if let Some(def) = self.get_ribs(ns)[i].bindings.get(&name).cloned() {
+                // The ident resolves to a type parameter or local variable.
+                return Some(LexicalScopeBinding::LocalDef(LocalDef {
+                    ribs: Some((ns, i)),
+                    def: def,
+                }));
+            }
+
+            if let ModuleRibKind(module) = self.get_ribs(ns)[i].kind {
+                let name = ident.unhygienic_name;
+                let item = self.resolve_name_in_module(module, name, ns, true, record_used);
+                if let Success(binding) = item {
+                    // The ident resolves to an item.
+                    return Some(LexicalScopeBinding::Item(binding));
                 }
-            }
 
-            // Go to the next parent.
-            match search_module.parent_link {
-                NoParentLink => {
-                    // No more parents. This module was unresolved.
-                    debug!("(resolving item in lexical scope) unresolved module: no parent module");
-                    return Failed(None);
-                }
-                ModuleParentLink(parent_module_node, _) => {
-                    if search_module.is_normal() {
-                        // We stop the search here.
-                        debug!("(resolving item in lexical scope) unresolved module: not \
-                                searching through module parents");
-                            return Failed(None);
-                    } else {
-                        search_module = parent_module_node;
-                    }
-                }
-                BlockParentLink(parent_module_node, _) => {
-                    search_module = parent_module_node;
-                }
+                // We can only see through anonymous modules
+                if module.def.is_some() { return None; }
             }
         }
+
+        None
     }
 
     /// Returns the nearest normal module parent of the given module.
@@ -1594,9 +1502,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
     /// Resolves a "module prefix". A module prefix is one or both of (a) `self::`;
     /// (b) some chain of `super::`.
     /// grammar: (SELF MOD_SEP ) ? (SUPER MOD_SEP) *
-    fn resolve_module_prefix(&mut self,
-                             module_: Module<'a>,
-                             module_path: &[Name])
+    fn resolve_module_prefix(&mut self, module_path: &[Name], span: Span)
                              -> ResolveResult<ModulePrefixResult<'a>> {
         // Start at the current module if we see `self` or `super`, or at the
         // top of the crate otherwise.
@@ -1605,6 +1511,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             "super" => 0,
             _ => return Success(NoPrefixFound),
         };
+        let module_ = self.current_module;
         let mut containing_module = self.get_nearest_normal_module_parent_or_self(module_);
 
         // Now loop through all the `super`s we find.
@@ -1612,7 +1519,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             debug!("(resolving module prefix) resolving `super` at {}",
                    module_to_string(&containing_module));
             match self.get_nearest_normal_module_parent(containing_module) {
-                None => return Failed(None),
+                None => {
+                    let msg = "There are too many initial `super`s.".into();
+                    return Failed(Some((span, msg)));
+                }
                 Some(new_module) => {
                     containing_module = new_module;
                     i += 1;
@@ -1633,13 +1543,17 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                               module: Module<'a>,
                               name: Name,
                               namespace: Namespace,
-                              allow_private_imports: bool,
+                              use_lexical_scope: bool,
                               record_used: bool)
                               -> ResolveResult<&'a NameBinding<'a>> {
         debug!("(resolving name in module) resolving `{}` in `{}`", name, module_to_string(module));
 
-        build_reduced_graph::populate_module_if_necessary(self, module);
-        module.resolve_name(name, namespace, allow_private_imports).and_then(|binding| {
+        self.populate_module_if_necessary(module);
+        match use_lexical_scope {
+            true => module.resolve_name_in_lexical_scope(name, namespace)
+                          .map(Success).unwrap_or(Failed(None)),
+            false => module.resolve_name(name, namespace, false),
+        }.and_then(|binding| {
             if record_used {
                 self.record_use(name, namespace, binding);
             }
@@ -1647,18 +1561,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         })
     }
 
-    fn report_unresolved_imports(&mut self, module_: Module<'a>) {
-        for import in module_.unresolved_imports.borrow().iter() {
-            resolve_error(self, import.span, ResolutionError::UnresolvedImport(None));
-            break;
-        }
-
-        // Descend into children and anonymous children.
-        for (_, module_) in module_.module_children.borrow().iter() {
-            self.report_unresolved_imports(module_);
-        }
-    }
-
     // AST resolution
     //
     // We maintain a list of value ribs and type ribs.
@@ -1698,7 +1600,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
 
     /// Searches the current set of local scopes for labels.
     /// Stops after meeting a closure.
-    fn search_label(&self, name: Name) -> Option<DefLike> {
+    fn search_label(&self, name: Name) -> Option<Def> {
         for rib in self.label_ribs.iter().rev() {
             match rib.kind {
                 NormalRibKind => {
@@ -1723,15 +1625,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         intravisit::walk_crate(self, krate);
     }
 
-    fn check_if_primitive_type_name(&self, name: Name, span: Span) {
-        if let Some(_) = self.primitive_type_table.primitive_types.get(&name) {
-            span_err!(self.session,
-                      span,
-                      E0317,
-                      "user-defined types or type parameters cannot shadow the primitive types");
-        }
-    }
-
     fn resolve_item(&mut self, item: &Item) {
         let name = item.name;
 
@@ -1741,8 +1634,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             ItemEnum(_, ref generics) |
             ItemTy(_, ref generics) |
             ItemStruct(_, ref generics) => {
-                self.check_if_primitive_type_name(name, item.span);
-
                 self.with_type_parameter_rib(HasTypeParameters(generics, TypeSpace, ItemRibKind),
                                              |this| intravisit::walk_item(this, item));
             }
@@ -1763,8 +1654,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             }
 
             ItemTrait(_, ref generics, ref bounds, ref trait_items) => {
-                self.check_if_primitive_type_name(name, item.span);
-
                 // Create a new rib for the trait-wide type parameters.
                 self.with_type_parameter_rib(HasTypeParameters(generics,
                                                                TypeSpace,
@@ -1799,8 +1688,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                     });
                                 }
                                 hir::TypeTraitItem(..) => {
-                                    this.check_if_primitive_type_name(trait_item.name,
-                                                                      trait_item.span);
                                     this.with_type_parameter_rib(NoTypeParameters, |this| {
                                         intravisit::walk_trait_item(this, trait_item)
                                     });
@@ -1824,36 +1711,17 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             }
 
             ItemUse(ref view_path) => {
-                // check for imports shadowing primitive types
-                let check_rename = |this: &Self, id, name| {
-                    match this.def_map.borrow().get(&id).map(|d| d.full_def()) {
-                        Some(Def::Enum(..)) | Some(Def::TyAlias(..)) | Some(Def::Struct(..)) |
-                        Some(Def::Trait(..)) | None => {
-                            this.check_if_primitive_type_name(name, item.span);
-                        }
-                        _ => {}
-                    }
-                };
-
                 match view_path.node {
-                    hir::ViewPathSimple(name, _) => {
-                        check_rename(self, item.id, name);
-                    }
                     hir::ViewPathList(ref prefix, ref items) => {
-                        for item in items {
-                            if let Some(name) = item.node.rename() {
-                                check_rename(self, item.node.id(), name);
-                            }
-                        }
-
                         // Resolve prefix of an import with empty braces (issue #28388)
                         if items.is_empty() && !prefix.segments.is_empty() {
                             match self.resolve_crate_relative_path(prefix.span,
                                                                    &prefix.segments,
                                                                    TypeNS) {
-                                Some((def, lp)) =>
-                                    self.record_def(item.id, PathResolution::new(def, lp, 0)),
-                                None => {
+                                Ok(def) =>
+                                    self.record_def(item.id, PathResolution::new(def, 0)),
+                                Err(true) => self.record_def(item.id, err_path_resolution()),
+                                Err(false) => {
                                     resolve_error(self,
                                                   prefix.span,
                                                   ResolutionError::FailedToResolve(
@@ -1892,13 +1760,9 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                     seen_bindings.insert(name);
 
                     // plain insert (no renaming)
-                    function_type_rib.bindings
-                                     .insert(name,
-                                             DlDef(Def::TyParam(space,
-                                                              index as u32,
-                                                              self.ast_map
-                                                                  .local_def_id(type_parameter.id),
-                                                              name)));
+                    let def_id = self.ast_map.local_def_id(type_parameter.id);
+                    let def = Def::TyParam(space, index as u32, def_id, name);
+                    function_type_rib.bindings.insert(name, def);
                 }
                 self.type_ribs.push(function_type_rib);
             }
@@ -1976,7 +1840,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                trait_path: &Path,
                                path_depth: usize)
                                -> Result<PathResolution, ()> {
-        if let Some(path_res) = self.resolve_path(id, trait_path, path_depth, TypeNS, true) {
+        self.resolve_path(id, trait_path, path_depth, TypeNS).and_then(|path_res| {
             if let Def::Trait(_) = path_res.base_def {
                 debug!("(resolving trait) found trait def: {:?}", path_res);
                 Ok(path_res)
@@ -1988,14 +1852,18 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                                                                       path_depth)));
 
                 // If it's a typedef, give a note
-                if let Def::TyAlias(..) = path_res.base_def {
-                    err.span_note(trait_path.span,
+                if let Def::TyAlias(did) = path_res.base_def {
+                    err.fileline_note(trait_path.span,
                                   "`type` aliases cannot be used for traits");
+                    if let Some(sp) = self.ast_map.span_if_local(did) {
+                        err.span_note(sp, "type defined here");
+                    }
                 }
                 err.emit();
-                Err(())
+                Err(true)
             }
-        } else {
+        }).map_err(|error_reported| {
+            if error_reported { return }
 
             // find possible candidates
             let trait_name = trait_path.segments.last().unwrap().identifier.name;
@@ -2018,28 +1886,27 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                 );
 
             resolve_error(self, trait_path.span, error);
-            Err(())
-        }
+        })
     }
 
     fn resolve_generics(&mut self, generics: &Generics) {
-        for type_parameter in generics.ty_params.iter() {
-            self.check_if_primitive_type_name(type_parameter.name, type_parameter.span);
-        }
         for predicate in &generics.where_clause.predicates {
             match predicate {
                 &hir::WherePredicate::BoundPredicate(_) |
                 &hir::WherePredicate::RegionPredicate(_) => {}
                 &hir::WherePredicate::EqPredicate(ref eq_pred) => {
-                    let path_res = self.resolve_path(eq_pred.id, &eq_pred.path, 0, TypeNS, true);
-                    if let Some(PathResolution { base_def: Def::TyParam(..), .. }) = path_res {
-                        self.record_def(eq_pred.id, path_res.unwrap());
-                    } else {
-                        resolve_error(self,
-                                      eq_pred.span,
-                                      ResolutionError::UndeclaredAssociatedType);
+                    self.resolve_path(eq_pred.id, &eq_pred.path, 0, TypeNS).and_then(|path_res| {
+                        if let PathResolution { base_def: Def::TyParam(..), .. } = path_res {
+                            Ok(self.record_def(eq_pred.id, path_res))
+                        } else {
+                            Err(false)
+                        }
+                    }).map_err(|error_reported| {
                         self.record_def(eq_pred.id, err_path_resolution());
-                    }
+                        if error_reported { return }
+                        let error_variant = ResolutionError::UndeclaredAssociatedType;
+                        resolve_error(self, eq_pred.span, error_variant);
+                    }).unwrap_or(());
                 }
             }
         }
@@ -2087,7 +1954,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
 
         // plain insert (no renaming, types are not currently hygienic....)
         let name = special_names::type_self;
-        self_type_rib.bindings.insert(name, DlDef(self_def));
+        self_type_rib.bindings.insert(name, self_def);
         self.type_ribs.push(self_type_rib);
         f(self);
         if !self.resolved {
@@ -2297,8 +2164,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                 let resolution = match self.resolve_possibly_assoc_item(ty.id,
                                                                         maybe_qself.as_ref(),
                                                                         path,
-                                                                        TypeNS,
-                                                                        true) {
+                                                                        TypeNS) {
                     // `<T>::a::b::c` is resolved by typeck alone.
                     TypecheckRequired => {
                         // Resolve embedded types.
@@ -2310,21 +2176,18 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
 
                 // This is a path in the type namespace. Walk through scopes
                 // looking for it.
-                match resolution {
-                    Some(def) => {
-                        // Write the result into the def map.
-                        debug!("(resolving type) writing resolution for `{}` (id {}) = {:?}",
-                               path_names_to_string(path, 0),
-                               ty.id,
-                               def);
-                        self.record_def(ty.id, def);
-                    }
-                    None => {
-                        self.record_def(ty.id, err_path_resolution());
-
-                        // Keep reporting some errors even if they're ignored above.
-                        self.resolve_path(ty.id, path, 0, TypeNS, true);
+                if let Some(def) = resolution {
+                    // Write the result into the def map.
+                    debug!("(resolving type) writing resolution for `{}` (id {}) = {:?}",
+                           path_names_to_string(path, 0), ty.id, def);
+                    self.record_def(ty.id, def);
+                } else {
+                    self.record_def(ty.id, err_path_resolution());
 
+                    // Keep reporting some errors even if they're ignored above.
+                    if let Err(true) = self.resolve_path(ty.id, path, 0, TypeNS) {
+                        // `resolve_path` already reported the error
+                    } else {
                         let kind = if maybe_qself.is_some() {
                             "associated type"
                         } else {
@@ -2385,7 +2248,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                        // pattern that binds them
                        bindings_list: &mut HashMap<Name, NodeId>) {
         let pat_id = pattern.id;
-        walk_pat(pattern, |pattern| {
+        pattern.walk(|pattern| {
             match pattern.node {
                 PatKind::Ident(binding_mode, ref path1, ref at_rhs) => {
                     // The meaning of PatKind::Ident with no type parameters
@@ -2401,9 +2264,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                     let ident = path1.node;
                     let renamed = ident.name;
 
-                    match self.resolve_bare_identifier_pattern(ident.unhygienic_name,
-                                                               pattern.span) {
-                        FoundStructOrEnumVariant(def, lp) if const_ok => {
+                    match self.resolve_bare_identifier_pattern(ident, pattern.span) {
+                        FoundStructOrEnumVariant(def) if const_ok => {
                             debug!("(resolving pattern) resolving `{}` to struct or enum variant",
                                    renamed);
 
@@ -2413,7 +2275,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                             self.record_def(pattern.id,
                                             PathResolution {
                                                 base_def: def,
-                                                last_private: lp,
                                                 depth: 0,
                                             });
                         }
@@ -2426,18 +2287,17 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                             );
                             self.record_def(pattern.id, err_path_resolution());
                         }
-                        FoundConst(def, lp, _) if const_ok => {
+                        FoundConst(def, _) if const_ok => {
                             debug!("(resolving pattern) resolving `{}` to constant", renamed);
 
                             self.enforce_default_binding_mode(pattern, binding_mode, "a constant");
                             self.record_def(pattern.id,
                                             PathResolution {
                                                 base_def: def,
-                                                last_private: lp,
                                                 depth: 0,
                                             });
                         }
-                        FoundConst(def, _, name) => {
+                        FoundConst(def, name) => {
                             resolve_error(
                                 self,
                                 pattern.span,
@@ -2459,7 +2319,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                             self.record_def(pattern.id,
                                             PathResolution {
                                                 base_def: def,
-                                                last_private: LastMod(AllPublic),
                                                 depth: 0,
                                             });
 
@@ -2471,7 +2330,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                             if !bindings_list.contains_key(&renamed) {
                                 let this = &mut *self;
                                 let last_rib = this.value_ribs.last_mut().unwrap();
-                                last_rib.bindings.insert(renamed, DlDef(def));
+                                last_rib.bindings.insert(renamed, def);
                                 bindings_list.insert(renamed, pat_id);
                             } else if mode == ArgumentIrrefutableMode &&
                                bindings_list.contains_key(&renamed) {
@@ -2504,16 +2363,15 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                     let resolution = match self.resolve_possibly_assoc_item(pat_id,
                                                                             None,
                                                                             path,
-                                                                            ValueNS,
-                                                                            false) {
+                                                                            ValueNS) {
                         // The below shouldn't happen because all
                         // qualified paths should be in PatKind::QPath.
                         TypecheckRequired =>
-                            self.session.span_bug(path.span,
-                                                  "resolve_possibly_assoc_item claimed that a path \
-                                                   in PatKind::Path or PatKind::TupleStruct \
-                                                   requires typecheck to resolve, but qualified \
-                                                   paths should be PatKind::QPath"),
+                            span_bug!(path.span,
+                                      "resolve_possibly_assoc_item claimed that a path \
+                                       in PatKind::Path or PatKind::TupleStruct \
+                                       requires typecheck to resolve, but qualified \
+                                       paths should be PatKind::QPath"),
                         ResolveAttempt(resolution) => resolution,
                     };
                     if let Some(path_res) = resolution {
@@ -2577,8 +2435,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                     let resolution = match self.resolve_possibly_assoc_item(pat_id,
                                                                             Some(qself),
                                                                             path,
-                                                                            ValueNS,
-                                                                            false) {
+                                                                            ValueNS) {
                         TypecheckRequired => {
                             // All `<T>::CONST` should end up here, and will
                             // require use of the trait map to resolve
@@ -2628,12 +2485,12 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                 }
 
                 PatKind::Struct(ref path, _, _) => {
-                    match self.resolve_path(pat_id, path, 0, TypeNS, false) {
-                        Some(definition) => {
+                    match self.resolve_path(pat_id, path, 0, TypeNS) {
+                        Ok(definition) => {
                             self.record_def(pattern.id, definition);
                         }
-                        result => {
-                            debug!("(resolving pattern) didn't find struct def: {:?}", result);
+                        Err(true) => self.record_def(pattern.id, err_path_resolution()),
+                        Err(false) => {
                             resolve_error(
                                 self,
                                 path.span,
@@ -2658,50 +2515,21 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         });
     }
 
-    fn resolve_bare_identifier_pattern(&mut self,
-                                       name: Name,
-                                       span: Span)
+    fn resolve_bare_identifier_pattern(&mut self, ident: hir::Ident, span: Span)
                                        -> BareIdentifierPatternResolution {
-        let module = self.current_module;
-        match self.resolve_item_in_lexical_scope(module, name, ValueNS, true) {
-            Success(binding) => {
-                debug!("(resolve bare identifier pattern) succeeded in finding {} at {:?}",
-                       name,
-                       binding);
-                match binding.def() {
-                    None => {
-                        panic!("resolved name in the value namespace to a set of name bindings \
-                                with no def?!");
-                    }
-                    // For the two success cases, this lookup can be
-                    // considered as not having a private component because
-                    // the lookup happened only within the current module.
-                    Some(def @ Def::Variant(..)) | Some(def @ Def::Struct(..)) => {
-                        return FoundStructOrEnumVariant(def, LastMod(AllPublic));
-                    }
-                    Some(def @ Def::Const(..)) | Some(def @ Def::AssociatedConst(..)) => {
-                        return FoundConst(def, LastMod(AllPublic), name);
-                    }
-                    Some(Def::Static(..)) => {
-                        resolve_error(self, span, ResolutionError::StaticVariableReference);
-                        return BareIdentifierPatternUnresolved;
-                    }
-                    _ => return BareIdentifierPatternUnresolved
-                }
+        match self.resolve_ident_in_lexical_scope(ident, ValueNS, true)
+                  .map(LexicalScopeBinding::def) {
+            Some(def @ Def::Variant(..)) | Some(def @ Def::Struct(..)) => {
+                FoundStructOrEnumVariant(def)
             }
-
-            Indeterminate => return BareIdentifierPatternUnresolved,
-            Failed(err) => {
-                match err {
-                    Some((span, msg)) => {
-                        resolve_error(self, span, ResolutionError::FailedToResolve(&msg));
-                    }
-                    None => (),
-                }
-
-                debug!("(resolve bare identifier pattern) failed to find {}", name);
-                return BareIdentifierPatternUnresolved;
+            Some(def @ Def::Const(..)) | Some(def @ Def::AssociatedConst(..)) => {
+                FoundConst(def, ident.unhygienic_name)
+            }
+            Some(Def::Static(..)) => {
+                resolve_error(self, span, ResolutionError::StaticVariableReference);
+                BareIdentifierPatternUnresolved
             }
+            _ => BareIdentifierPatternUnresolved,
         }
     }
 
@@ -2710,8 +2538,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                    id: NodeId,
                                    maybe_qself: Option<&hir::QSelf>,
                                    path: &Path,
-                                   namespace: Namespace,
-                                   check_ribs: bool)
+                                   namespace: Namespace)
                                    -> AssocItemResolveResult {
         let max_assoc_types;
 
@@ -2730,14 +2557,14 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         }
 
         let mut resolution = self.with_no_errors(|this| {
-            this.resolve_path(id, path, 0, namespace, check_ribs)
+            this.resolve_path(id, path, 0, namespace).ok()
         });
         for depth in 1..max_assoc_types {
             if resolution.is_some() {
                 break;
             }
             self.with_no_errors(|this| {
-                resolution = this.resolve_path(id, path, depth, TypeNS, true);
+                resolution = this.resolve_path(id, path, depth, TypeNS).ok();
             });
         }
         if let Some(Def::Mod(_)) = resolution.map(|r| r.base_def) {
@@ -2747,21 +2574,14 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         ResolveAttempt(resolution)
     }
 
-    /// If `check_ribs` is true, checks the local definitions first; i.e.
-    /// doesn't skip straight to the containing module.
     /// Skips `path_depth` trailing segments, which is also reflected in the
-    /// returned value. See `middle::def::PathResolution` for more info.
-    pub fn resolve_path(&mut self,
-                        id: NodeId,
-                        path: &Path,
-                        path_depth: usize,
-                        namespace: Namespace,
-                        check_ribs: bool)
-                        -> Option<PathResolution> {
+    /// returned value. See `hir::def::PathResolution` for more info.
+    fn resolve_path(&mut self, id: NodeId, path: &Path, path_depth: usize, namespace: Namespace)
+                    -> Result<PathResolution, bool /* true if an error was reported */ > {
         let span = path.span;
         let segments = &path.segments[..path.segments.len() - path_depth];
 
-        let mk_res = |(def, lp)| PathResolution::new(def, lp, path_depth);
+        let mk_res = |def| PathResolution::new(def, path_depth);
 
         if path.global {
             let def = self.resolve_crate_relative_path(span, segments, namespace);
@@ -2770,18 +2590,40 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
 
         // Try to find a path to an item in a module.
         let last_ident = segments.last().unwrap().identifier;
-        if segments.len() <= 1 {
-            let unqualified_def = self.resolve_identifier(last_ident, namespace, check_ribs, true);
-            return unqualified_def.and_then(|def| self.adjust_local_def(def, span))
-                                  .map(|def| {
-                                      PathResolution::new(def, LastMod(AllPublic), path_depth)
-                                  });
-        }
+        // Resolve a single identifier with fallback to primitive types
+        let resolve_identifier_with_fallback = |this: &mut Self, record_used| {
+            let def = this.resolve_identifier(last_ident, namespace, record_used);
+            match def {
+                None | Some(LocalDef{def: Def::Mod(..), ..}) if namespace == TypeNS =>
+                    this.primitive_type_table
+                        .primitive_types
+                        .get(&last_ident.unhygienic_name)
+                        .map_or(def, |prim_ty| Some(LocalDef::from_def(Def::PrimTy(*prim_ty)))),
+                _ => def
+            }
+        };
 
-        let unqualified_def = self.resolve_identifier(last_ident, namespace, check_ribs, false);
+        if segments.len() == 1 {
+            // In `a(::assoc_item)*` `a` cannot be a module. If `a` does resolve to a module we
+            // don't report an error right away, but try to fallback to a primitive type.
+            // So, we are still able to successfully resolve something like
+            //
+            // use std::u8; // bring module u8 in scope
+            // fn f() -> u8 { // OK, resolves to primitive u8, not to std::u8
+            //     u8::max_value() // OK, resolves to associated function <u8>::max_value,
+            //                     // not to non-existent std::u8::max_value
+            // }
+            //
+            // Such behavior is required for backward compatibility.
+            // The same fallback is used when `a` resolves to nothing.
+            let def = resolve_identifier_with_fallback(self, true).ok_or(false);
+            return def.and_then(|def| self.adjust_local_def(def, span).ok_or(true)).map(mk_res);
+        }
+
+        let unqualified_def = resolve_identifier_with_fallback(self, false);
         let def = self.resolve_module_relative_path(span, segments, namespace);
         match (def, unqualified_def) {
-            (Some((ref d, _)), Some(ref ud)) if *d == ud.def => {
+            (Ok(d), Some(ref ud)) if d == ud.def => {
                 self.session
                     .add_lint(lint::builtin::UNUSED_QUALIFICATIONS,
                               id,
@@ -2798,40 +2640,14 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
     fn resolve_identifier(&mut self,
                           identifier: hir::Ident,
                           namespace: Namespace,
-                          check_ribs: bool,
                           record_used: bool)
                           -> Option<LocalDef> {
         if identifier.name == special_idents::invalid.name {
             return Some(LocalDef::from_def(Def::Err));
         }
 
-        // First, check to see whether the name is a primitive type.
-        if namespace == TypeNS {
-            if let Some(&prim_ty) = self.primitive_type_table
-                                        .primitive_types
-                                        .get(&identifier.unhygienic_name) {
-                return Some(LocalDef::from_def(Def::PrimTy(prim_ty)));
-            }
-        }
-
-        if check_ribs {
-            match self.resolve_identifier_in_local_ribs(identifier, namespace, record_used) {
-                Some(def) => return Some(def),
-                None => {}
-            }
-        }
-
-        // Check the items.
-        let module = self.current_module;
-        let name = identifier.unhygienic_name;
-        match self.resolve_item_in_lexical_scope(module, name, namespace, record_used) {
-            Success(binding) => binding.def().map(LocalDef::from_def),
-            Failed(Some((span, msg))) => {
-                resolve_error(self, span, ResolutionError::FailedToResolve(&msg));
-                None
-            }
-            _ => None,
-        }
+        self.resolve_ident_in_lexical_scope(identifier, namespace, record_used)
+            .map(LexicalScopeBinding::local_def)
     }
 
     // Resolve a local definition, potentially adjusting for closures.
@@ -2844,7 +2660,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         let mut def = local_def.def;
         match def {
             Def::Upvar(..) => {
-                self.session.span_bug(span, &format!("unexpected {:?} in bindings", def))
+                span_bug!(span, "unexpected {:?} in bindings", def)
             }
             Def::Local(_, node_id) => {
                 for rib in ribs {
@@ -2928,7 +2744,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                     span: Span,
                                     segments: &[hir::PathSegment],
                                     namespace: Namespace)
-                                    -> Option<(Def, LastPrivate)> {
+                                    -> Result<Def, bool /* true if an error was reported */> {
         let module_path = segments.split_last()
                                   .unwrap()
                                   .1
@@ -2937,9 +2753,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                   .collect::<Vec<_>>();
 
         let containing_module;
-        let last_private;
-        let current_module = self.current_module;
-        match self.resolve_module_path(current_module, &module_path, UseLexicalScope, span) {
+        match self.resolve_module_path(&module_path, UseLexicalScope, span) {
             Failed(err) => {
                 let (span, msg) = match err {
                     Some((span, msg)) => (span, msg),
@@ -2951,25 +2765,20 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                 };
 
                 resolve_error(self, span, ResolutionError::FailedToResolve(&msg));
-                return None;
+                return Err(true);
             }
-            Indeterminate => return None,
-            Success((resulting_module, resulting_last_private)) => {
+            Indeterminate => return Err(false),
+            Success(resulting_module) => {
                 containing_module = resulting_module;
-                last_private = resulting_last_private;
             }
         }
 
         let name = segments.last().unwrap().identifier.name;
         let result = self.resolve_name_in_module(containing_module, name, namespace, false, true);
-        let def = match result {
-            Success(binding) => {
-                let (def, lp) = binding.def_and_lp();
-                (def, last_private.or(lp))
-            }
-            _ => return None,
-        };
-        return Some(def);
+        result.success().map(|binding| {
+            self.check_privacy(containing_module, name, binding, span);
+            binding.def().unwrap()
+        }).ok_or(false)
     }
 
     /// Invariant: This must be called only during main resolution, not during
@@ -2978,7 +2787,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                    span: Span,
                                    segments: &[hir::PathSegment],
                                    namespace: Namespace)
-                                   -> Option<(Def, LastPrivate)> {
+                                   -> Result<Def, bool /* true if an error was reported */> {
         let module_path = segments.split_last()
                                   .unwrap()
                                   .1
@@ -2989,12 +2798,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         let root_module = self.graph_root;
 
         let containing_module;
-        let last_private;
         match self.resolve_module_path_from_root(root_module,
                                                  &module_path,
                                                  0,
-                                                 span,
-                                                 LastMod(AllPublic)) {
+                                                 span) {
             Failed(err) => {
                 let (span, msg) = match err {
                     Some((span, msg)) => (span, msg),
@@ -3006,73 +2813,22 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                 };
 
                 resolve_error(self, span, ResolutionError::FailedToResolve(&msg));
-                return None;
+                return Err(true);
             }
 
-            Indeterminate => return None,
+            Indeterminate => return Err(false),
 
-            Success((resulting_module, resulting_last_private)) => {
+            Success(resulting_module) => {
                 containing_module = resulting_module;
-                last_private = resulting_last_private;
             }
         }
 
         let name = segments.last().unwrap().identifier.name;
-        match self.resolve_name_in_module(containing_module, name, namespace, false, true) {
-            Success(binding) => {
-                let (def, lp) = binding.def_and_lp();
-                Some((def, last_private.or(lp)))
-            }
-            _ => None,
-        }
-    }
-
-    fn resolve_identifier_in_local_ribs(&mut self,
-                                        ident: hir::Ident,
-                                        namespace: Namespace,
-                                        record_used: bool)
-                                        -> Option<LocalDef> {
-        // Check the local set of ribs.
-        let name = match namespace { ValueNS => ident.name, TypeNS => ident.unhygienic_name };
-
-        for i in (0 .. self.get_ribs(namespace).len()).rev() {
-            if let Some(def_like) = self.get_ribs(namespace)[i].bindings.get(&name).cloned() {
-                match def_like {
-                    DlDef(def) => {
-                        debug!("(resolving path in local ribs) resolved `{}` to {:?} at {}",
-                               name,
-                               def,
-                               i);
-                        return Some(LocalDef {
-                            ribs: Some((namespace, i)),
-                            def: def,
-                        });
-                    }
-                    def_like => {
-                        debug!("(resolving path in local ribs) resolved `{}` to pseudo-def {:?}",
-                               name,
-                               def_like);
-                        return None;
-                    }
-                }
-            }
-
-            if let ModuleRibKind(module) = self.get_ribs(namespace)[i].kind {
-                if let Success(binding) = self.resolve_name_in_module(module,
-                                                                      ident.unhygienic_name,
-                                                                      namespace,
-                                                                      true,
-                                                                      record_used) {
-                    if let Some(def) = binding.def() {
-                        return Some(LocalDef::from_def(def));
-                    }
-                }
-                // We can only see through anonymous modules
-                if module.def.is_some() { return None; }
-            }
-        }
-
-        None
+        let result = self.resolve_name_in_module(containing_module, name, namespace, false, true);
+        result.success().map(|binding| {
+            self.check_privacy(containing_module, name, binding, span);
+            binding.def().unwrap()
+        }).ok_or(false)
     }
 
     fn with_no_errors<T, F>(&mut self, f: F) -> T
@@ -3103,20 +2859,16 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                 span: Span,
                                 name_path: &[ast::Name])
                                 -> Option<Module<'a>> {
-            let root = this.current_module;
             let last_name = name_path.last().unwrap();
 
             if name_path.len() == 1 {
                 match this.primitive_type_table.primitive_types.get(last_name) {
                     Some(_) => None,
-                    None => this.current_module.resolve_name(*last_name, TypeNS, true).success()
+                    None => this.current_module.resolve_name_in_lexical_scope(*last_name, TypeNS)
                                                .and_then(NameBinding::module)
                 }
             } else {
-                match this.resolve_module_path(root, &name_path, UseLexicalScope, span) {
-                    Success((module, _)) => Some(module),
-                    _ => None,
-                }
+                this.resolve_module_path(&name_path, UseLexicalScope, span).success()
             }
         }
 
@@ -3169,7 +2921,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
 
         // Look for a method in the current self type's impl module.
         if let Some(module) = get_module(self, path.span, &name_path) {
-            if let Success(binding) = module.resolve_name(name, ValueNS, true) {
+            if let Some(binding) = module.resolve_name_in_lexical_scope(name, ValueNS) {
                 if let Some(Def::Method(did)) = binding.def() {
                     if is_static_method(self, did) {
                         return StaticMethod(path_names_to_string(&path, 0));
@@ -3227,8 +2979,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                 let resolution = match self.resolve_possibly_assoc_item(expr.id,
                                                                         maybe_qself.as_ref(),
                                                                         path,
-                                                                        ValueNS,
-                                                                        true) {
+                                                                        ValueNS) {
                     // `<T>::a::b::c` is resolved by typeck alone.
                     TypecheckRequired => {
                         let method_name = path.segments.last().unwrap().identifier.name;
@@ -3288,29 +3039,30 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                     // structs, which wouldn't result in this error.)
                     let path_name = path_names_to_string(path, 0);
                     let type_res = self.with_no_errors(|this| {
-                        this.resolve_path(expr.id, path, 0, TypeNS, false)
+                        this.resolve_path(expr.id, path, 0, TypeNS)
                     });
 
                     self.record_def(expr.id, err_path_resolution());
-                    match type_res.map(|r| r.base_def) {
-                        Some(Def::Struct(..)) => {
-                            let mut err = resolve_struct_error(self,
-                                expr.span,
-                                ResolutionError::StructVariantUsedAsFunction(&path_name));
-
-                            let msg = format!("did you mean to write: `{} {{ /* fields */ }}`?",
-                                              path_name);
-                            if self.emit_errors {
-                                err.fileline_help(expr.span, &msg);
-                            } else {
-                                err.span_help(expr.span, &msg);
-                            }
-                            err.emit();
-                        }
-                        _ => {
-                            // Keep reporting some errors even if they're ignored above.
-                            self.resolve_path(expr.id, path, 0, ValueNS, true);
 
+                    if let Ok(Def::Struct(..)) = type_res.map(|r| r.base_def) {
+                        let error_variant =
+                            ResolutionError::StructVariantUsedAsFunction(&path_name);
+                        let mut err = resolve_struct_error(self, expr.span, error_variant);
+
+                        let msg = format!("did you mean to write: `{} {{ /* fields */ }}`?",
+                                          path_name);
+
+                        if self.emit_errors {
+                            err.fileline_help(expr.span, &msg);
+                        } else {
+                            err.span_help(expr.span, &msg);
+                        }
+                        err.emit();
+                    } else {
+                        // Keep reporting some errors even if they're ignored above.
+                        if let Err(true) = self.resolve_path(expr.id, path, 0, ValueNS) {
+                            // `resolve_path` already reported the error
+                        } else {
                             let mut method_scope = false;
                             self.value_ribs.iter().rev().all(|rib| {
                                 method_scope = match rib.kind {
@@ -3356,10 +3108,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                     let name_path = path.segments.iter()
                                                         .map(|seg| seg.identifier.name)
                                                         .collect::<Vec<_>>();
-                                    let current_module = self.current_module;
 
-                                    match self.resolve_module_path(current_module,
-                                                                   &name_path[..],
+                                    match self.resolve_module_path(&name_path[..],
                                                                    UseLexicalScope,
                                                                    expr.span) {
                                         Success(_) => {
@@ -3385,9 +3135,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                 // Resolve the path to the structure it goes to. We don't
                 // check to ensure that the path is actually a structure; that
                 // is checked later during typeck.
-                match self.resolve_path(expr.id, path, 0, TypeNS, false) {
-                    Some(definition) => self.record_def(expr.id, definition),
-                    None => {
+                match self.resolve_path(expr.id, path, 0, TypeNS) {
+                    Ok(definition) => self.record_def(expr.id, definition),
+                    Err(true) => self.record_def(expr.id, err_path_resolution()),
+                    Err(false) => {
                         debug!("(resolving expression) didn't find struct def",);
 
                         resolve_error(self,
@@ -3404,11 +3155,11 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
 
             ExprLoop(_, Some(label)) | ExprWhile(_, _, Some(label)) => {
                 self.with_label_rib(|this| {
-                    let def_like = DlDef(Def::Label(expr.id));
+                    let def = Def::Label(expr.id);
 
                     {
                         let rib = this.label_ribs.last_mut().unwrap();
-                        rib.bindings.insert(label.name, def_like);
+                        rib.bindings.insert(label.name, def);
                     }
 
                     intravisit::walk_expr(this, expr);
@@ -3423,17 +3174,16 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                                       label.span,
                                       ResolutionError::UndeclaredLabel(&label.node.name.as_str()))
                     }
-                    Some(DlDef(def @ Def::Label(_))) => {
+                    Some(def @ Def::Label(_)) => {
                         // Since this def is a label, it is never read.
                         self.record_def(expr.id,
                                         PathResolution {
                                             base_def: def,
-                                            last_private: LastMod(AllPublic),
                                             depth: 0,
                                         })
                     }
                     Some(_) => {
-                        self.session.span_bug(expr.span, "label wasn't mapped to a label def!")
+                        span_bug!(expr.span, "label wasn't mapped to a label def!")
                     }
                 }
             }
@@ -3477,46 +3227,45 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         }
 
         let mut found_traits = Vec::new();
-        let mut search_module = self.current_module;
-        loop {
-            // Look for the current trait.
-            match self.current_trait_ref {
-                Some((trait_def_id, _)) => {
-                    if self.trait_item_map.contains_key(&(name, trait_def_id)) {
-                        add_trait_info(&mut found_traits, trait_def_id, name);
-                    }
-                }
-                None => {} // Nothing to do.
+        // Look for the current trait.
+        if let Some((trait_def_id, _)) = self.current_trait_ref {
+            if self.trait_item_map.contains_key(&(name, trait_def_id)) {
+                add_trait_info(&mut found_traits, trait_def_id, name);
             }
+        }
 
+        let mut search_module = self.current_module;
+        loop {
             // Look for trait children.
-            build_reduced_graph::populate_module_if_necessary(self, &search_module);
-
-            search_module.for_each_child(|_, ns, name_binding| {
-                if ns != TypeNS { return }
-                let trait_def_id = match name_binding.def() {
-                    Some(Def::Trait(trait_def_id)) => trait_def_id,
-                    Some(..) | None => return,
-                };
-                if self.trait_item_map.contains_key(&(name, trait_def_id)) {
-                    add_trait_info(&mut found_traits, trait_def_id, name);
-                    let trait_name = self.get_trait_name(trait_def_id);
-                    self.record_use(trait_name, TypeNS, name_binding);
+            let mut search_in_module = |module: Module<'a>| {
+                let mut traits = module.traits.borrow_mut();
+                if traits.is_none() {
+                    let mut collected_traits = Vec::new();
+                    module.for_each_child(|_, ns, binding| {
+                        if ns != TypeNS { return }
+                        if let Some(Def::Trait(_)) = binding.def() {
+                            collected_traits.push(binding);
+                        }
+                    });
+                    *traits = Some(collected_traits.into_boxed_slice());
                 }
-            });
 
-            // Look for shadowed traits.
-            for binding in search_module.shadowed_traits.borrow().iter() {
-                let did = binding.def().unwrap().def_id();
-                if self.trait_item_map.contains_key(&(name, did)) {
-                    add_trait_info(&mut found_traits, did, name);
-                    let trait_name = self.get_trait_name(did);
-                    self.record_use(trait_name, TypeNS, binding);
+                for binding in traits.as_ref().unwrap().iter() {
+                    let trait_def_id = binding.def().unwrap().def_id();
+                    if self.trait_item_map.contains_key(&(name, trait_def_id)) {
+                        add_trait_info(&mut found_traits, trait_def_id, name);
+                        let trait_name = self.get_trait_name(trait_def_id);
+                        self.record_use(trait_name, TypeNS, binding);
+                    }
                 }
-            }
+            };
+            search_in_module(search_module);
 
             match search_module.parent_link {
-                NoParentLink | ModuleParentLink(..) => break,
+                NoParentLink | ModuleParentLink(..) => {
+                    search_module.prelude.borrow().map(search_in_module);
+                    break;
+                }
                 BlockParentLink(parent_module, _) => {
                     search_module = parent_module;
                 }
@@ -3546,7 +3295,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
         while let Some((in_module,
                         path_segments,
                         in_module_is_extern)) = worklist.pop() {
-            build_reduced_graph::populate_module_if_necessary(self, &in_module);
+            self.populate_module_if_necessary(in_module);
 
             in_module.for_each_child(|name, ns, name_binding| {
 
@@ -3601,7 +3350,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
                             paths.push(segm);
                             paths
                         }
-                        _ => unreachable!(),
+                        _ => bug!(),
                     };
 
                     if !in_module_is_extern || name_binding.is_public() {
@@ -3621,18 +3370,12 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
 
     fn record_def(&mut self, node_id: NodeId, resolution: PathResolution) {
         debug!("(recording def) recording {:?} for {}", resolution, node_id);
-        assert!(match resolution.last_private {
-                    LastImport{..} => false,
-                    _ => true,
-                },
-                "Import should only be used for `use` directives");
-
         if let Some(prev_res) = self.def_map.borrow_mut().insert(node_id, resolution) {
             let span = self.ast_map.opt_span(node_id).unwrap_or(codemap::DUMMY_SP);
-            self.session.span_bug(span,
-                                  &format!("path resolved multiple times ({:?} before, {:?} now)",
-                                           prev_res,
-                                           resolution));
+            span_bug!(span,
+                      "path resolved multiple times ({:?} before, {:?} now)",
+                      prev_res,
+                      resolution);
         }
     }
 
@@ -3649,8 +3392,93 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
             }
         }
     }
-}
 
+    fn is_visible(&self, binding: &'a NameBinding<'a>, parent: Module<'a>) -> bool {
+        binding.is_public() || parent.is_ancestor_of(self.current_module)
+    }
+
+    fn check_privacy(&mut self,
+                     module: Module<'a>,
+                     name: Name,
+                     binding: &'a NameBinding<'a>,
+                     span: Span) {
+        if !self.is_visible(binding, module) {
+            self.privacy_errors.push(PrivacyError(span, name, binding));
+        }
+    }
+
+    fn report_privacy_errors(&self) {
+        if self.privacy_errors.len() == 0 { return }
+        let mut reported_spans = HashSet::new();
+        for &PrivacyError(span, name, binding) in &self.privacy_errors {
+            if !reported_spans.insert(span) { continue }
+            if binding.is_extern_crate() {
+                // Warn when using an inaccessible extern crate.
+                let node_id = binding.module().unwrap().extern_crate_id.unwrap();
+                let msg = format!("extern crate `{}` is private", name);
+                self.session.add_lint(lint::builtin::INACCESSIBLE_EXTERN_CRATE, node_id, span, msg);
+            } else {
+                let def = binding.def().unwrap();
+                self.session.span_err(span, &format!("{} `{}` is private", def.kind_name(), name));
+            }
+        }
+    }
+
+    fn report_conflict(&self,
+                       parent: Module,
+                       name: Name,
+                       ns: Namespace,
+                       binding: &NameBinding,
+                       old_binding: &NameBinding) {
+        // Error on the second of two conflicting names
+        if old_binding.span.unwrap().lo > binding.span.unwrap().lo {
+            return self.report_conflict(parent, name, ns, old_binding, binding);
+        }
+
+        let container = match parent.def {
+            Some(Def::Mod(_)) => "module",
+            Some(Def::Trait(_)) => "trait",
+            None => "block",
+            _ => "enum",
+        };
+
+        let (participle, noun) = match old_binding.is_import() || old_binding.is_extern_crate() {
+            true => ("imported", "import"),
+            false => ("defined", "definition"),
+        };
+
+        let span = binding.span.unwrap();
+        let msg = {
+            let kind = match (ns, old_binding.module()) {
+                (ValueNS, _) => "a value",
+                (TypeNS, Some(module)) if module.extern_crate_id.is_some() => "an extern crate",
+                (TypeNS, Some(module)) if module.is_normal() => "a module",
+                (TypeNS, Some(module)) if module.is_trait() => "a trait",
+                (TypeNS, _) => "a type",
+            };
+            format!("{} named `{}` has already been {} in this {}",
+                    kind, name, participle, container)
+        };
+
+        let mut err = match (old_binding.is_extern_crate(), binding.is_extern_crate()) {
+            (true, true) => struct_span_err!(self.session, span, E0259, "{}", msg),
+            (true, _) | (_, true) if binding.is_import() || old_binding.is_import() =>
+                struct_span_err!(self.session, span, E0254, "{}", msg),
+            (true, _) | (_, true) => struct_span_err!(self.session, span, E0260, "{}", msg),
+            _ => match (old_binding.is_import(), binding.is_import()) {
+                (false, false) => struct_span_err!(self.session, span, E0428, "{}", msg),
+                (true, true) => struct_span_err!(self.session, span, E0252, "{}", msg),
+                _ => struct_span_err!(self.session, span, E0255, "{}", msg),
+            },
+        };
+
+        let span = old_binding.span.unwrap();
+        if span != codemap::DUMMY_SP {
+            err.span_note(span, &format!("previous {} of `{}` here", noun, name));
+        }
+        err.emit();
+    }
+}
 
 fn names_to_string(names: &[Name]) -> String {
     let mut first = true;
@@ -3700,7 +3528,7 @@ fn show_candidates(session: &mut DiagnosticBuilder,
             if paths.len() == 1 {
                 session.fileline_help(
                     span,
-                    &format!("you can to import it into scope: `use {};`.",
+                    &format!("you can import it into scope: `use {};`.",
                         &path_strings[0]),
                 );
             } else {
@@ -3764,7 +3592,6 @@ fn module_to_string(module: Module) -> String {
 fn err_path_resolution() -> PathResolution {
     PathResolution {
         base_def: Def::Err,
-        last_private: LastMod(AllPublic),
         depth: 0,
     }
 }
@@ -3775,7 +3602,6 @@ pub struct CrateMap {
     pub freevars: FreevarMap,
     pub export_map: ExportMap,
     pub trait_map: TraitMap,
-    pub external_exports: ExternalExports,
     pub glob_map: Option<GlobMap>,
 }
 
@@ -3806,13 +3632,13 @@ pub fn resolve_crate<'a, 'tcx>(session: &'a Session,
     resolver.resolve_crate(krate);
 
     check_unused::check_crate(&mut resolver, krate);
+    resolver.report_privacy_errors();
 
     CrateMap {
         def_map: resolver.def_map,
         freevars: resolver.freevars,
         export_map: resolver.export_map,
         trait_map: resolver.trait_map,
-        external_exports: resolver.external_exports,
         glob_map: if resolver.make_glob_map {
             Some(resolver.glob_map)
         } else {
@@ -3829,18 +3655,18 @@ pub fn resolve_crate<'a, 'tcx>(session: &'a Session,
 /// preserving the ribs + current module. This allows resolve_path
 /// calls to be made with the correct scope info. The node in the
 /// callback corresponds to the current node in the walk.
-pub fn create_resolver<'a, 'tcx>(session: &'a Session,
-                                 ast_map: &'a hir_map::Map<'tcx>,
-                                 krate: &'a Crate,
-                                 make_glob_map: MakeGlobMap,
-                                 arenas: &'a ResolverArenas<'a>,
-                                 callback: Option<Box<Fn(hir_map::Node, &mut bool) -> bool>>)
-                                 -> Resolver<'a, 'tcx> {
+fn create_resolver<'a, 'tcx>(session: &'a Session,
+                             ast_map: &'a hir_map::Map<'tcx>,
+                             krate: &'a Crate,
+                             make_glob_map: MakeGlobMap,
+                             arenas: &'a ResolverArenas<'a>,
+                             callback: Option<Box<Fn(hir_map::Node, &mut bool) -> bool>>)
+                             -> Resolver<'a, 'tcx> {
     let mut resolver = Resolver::new(session, ast_map, make_glob_map, arenas);
 
     resolver.callback = callback;
 
-    build_reduced_graph::build_reduced_graph(&mut resolver, krate);
+    resolver.build_reduced_graph(krate);
 
     resolve_imports::resolve_imports(&mut resolver);
 
index c068ff258b0e7b9c783c253aee887bcce4f1d1b4..1404b8cf3adf8b422fcd61f3627f93fa6f72c43a 100644 (file)
@@ -13,19 +13,16 @@ use self::ImportDirectiveSubclass::*;
 use DefModifiers;
 use Module;
 use Namespace::{self, TypeNS, ValueNS};
-use {NameBinding, NameBindingKind};
+use {NameBinding, NameBindingKind, PrivacyError};
 use ResolveResult;
 use ResolveResult::*;
 use Resolver;
-use UseLexicalScopeFlag;
+use UseLexicalScopeFlag::DontUseLexicalScope;
 use {names_to_string, module_to_string};
 use {resolve_error, ResolutionError};
 
-use build_reduced_graph;
-
 use rustc::lint;
-use rustc::middle::def::*;
-use rustc::middle::privacy::*;
+use rustc::hir::def::*;
 
 use syntax::ast::{NodeId, Name};
 use syntax::attr::AttrMetaMethods;
@@ -33,53 +30,48 @@ use syntax::codemap::Span;
 use syntax::util::lev_distance::find_best_match_for_name;
 
 use std::mem::replace;
+use std::cell::{Cell, RefCell};
 
 /// Contains data for specific types of import directives.
-#[derive(Copy, Clone,Debug)]
+#[derive(Clone, Debug)]
 pub enum ImportDirectiveSubclass {
-    SingleImport(Name /* target */, Name /* source */),
+    SingleImport {
+        target: Name,
+        source: Name,
+        type_determined: Cell<bool>,
+        value_determined: Cell<bool>,
+    },
     GlobImport,
 }
 
-/// Whether an import can be shadowed by another import.
-#[derive(Debug,PartialEq,Clone,Copy)]
-pub enum Shadowable {
-    Always,
-    Never,
+impl ImportDirectiveSubclass {
+    pub fn single(target: Name, source: Name) -> Self {
+        SingleImport {
+            target: target,
+            source: source,
+            type_determined: Cell::new(false),
+            value_determined: Cell::new(false),
+        }
+    }
 }
 
 /// One import directive.
 #[derive(Debug,Clone)]
-pub struct ImportDirective {
-    pub module_path: Vec<Name>,
-    pub subclass: ImportDirectiveSubclass,
-    pub span: Span,
-    pub id: NodeId,
-    pub is_public: bool, // see note in ImportResolutionPerNamespace about how to use this
-    pub shadowable: Shadowable,
+pub struct ImportDirective<'a> {
+    module_path: Vec<Name>,
+    target_module: Cell<Option<Module<'a>>>, // the resolution of `module_path`
+    subclass: ImportDirectiveSubclass,
+    span: Span,
+    id: NodeId,
+    is_public: bool, // see note in ImportResolutionPerNamespace about how to use this
+    is_prelude: bool,
 }
 
-impl ImportDirective {
-    pub fn new(module_path: Vec<Name>,
-               subclass: ImportDirectiveSubclass,
-               span: Span,
-               id: NodeId,
-               is_public: bool,
-               shadowable: Shadowable)
-               -> ImportDirective {
-        ImportDirective {
-            module_path: module_path,
-            subclass: subclass,
-            span: span,
-            id: id,
-            is_public: is_public,
-            shadowable: shadowable,
-        }
-    }
-
+impl<'a> ImportDirective<'a> {
     // Given the binding to which this directive resolves in a particular namespace,
     // this returns the binding for the name this directive defines in that namespace.
-    fn import<'a>(&self, binding: &'a NameBinding<'a>) -> NameBinding<'a> {
+    fn import(&self, binding: &'a NameBinding<'a>, privacy_error: Option<Box<PrivacyError<'a>>>)
+              -> NameBinding<'a> {
         let mut modifiers = match self.is_public {
             true => DefModifiers::PUBLIC | DefModifiers::IMPORTABLE,
             false => DefModifiers::empty(),
@@ -87,12 +79,13 @@ impl ImportDirective {
         if let GlobImport = self.subclass {
             modifiers = modifiers | DefModifiers::GLOB_IMPORTED;
         }
-        if self.shadowable == Shadowable::Always {
-            modifiers = modifiers | DefModifiers::PRELUDE;
-        }
 
         NameBinding {
-            kind: NameBindingKind::Import { binding: binding, id: self.id },
+            kind: NameBindingKind::Import {
+                binding: binding,
+                id: self.id,
+                privacy_error: privacy_error,
+            },
             span: Some(self.span),
             modifiers: modifiers,
         }
@@ -100,57 +93,261 @@ impl ImportDirective {
 }
 
 #[derive(Clone, Default)]
-/// Records information about the resolution of a name in a module.
+/// Records information about the resolution of a name in a namespace of a module.
 pub struct NameResolution<'a> {
-    /// The number of unresolved single imports that could define the name.
-    pub outstanding_references: usize,
+    /// The single imports that define the name in the namespace.
+    single_imports: SingleImports<'a>,
     /// The least shadowable known binding for this name, or None if there are no known bindings.
     pub binding: Option<&'a NameBinding<'a>>,
+    duplicate_globs: Vec<&'a NameBinding<'a>>,
+}
+
+#[derive(Clone, Debug)]
+enum SingleImports<'a> {
+    /// No single imports can define the name in the namespace.
+    None,
+    /// Only the given single import can define the name in the namespace.
+    MaybeOne(&'a ImportDirective<'a>),
+    /// At least one single import will define the name in the namespace.
+    AtLeastOne,
+}
+
+impl<'a> Default for SingleImports<'a> {
+    fn default() -> Self {
+        SingleImports::None
+    }
+}
+
+impl<'a> SingleImports<'a> {
+    fn add_directive(&mut self, directive: &'a ImportDirective<'a>) {
+        match *self {
+            SingleImports::None => *self = SingleImports::MaybeOne(directive),
+            // If two single imports can define the name in the namespace, we can assume that at
+            // least one of them will define it since otherwise both would have to define only one
+            // namespace, leading to a duplicate error.
+            SingleImports::MaybeOne(_) => *self = SingleImports::AtLeastOne,
+            SingleImports::AtLeastOne => {}
+        };
+    }
+
+    fn directive_failed(&mut self) {
+        match *self {
+            SingleImports::None => unreachable!(),
+            SingleImports::MaybeOne(_) => *self = SingleImports::None,
+            SingleImports::AtLeastOne => {}
+        }
+    }
 }
 
 impl<'a> NameResolution<'a> {
-    pub fn result(&self, outstanding_globs: usize) -> ResolveResult<&'a NameBinding<'a>> {
-        // If no unresolved imports (single or glob) can define the name, self.binding is final.
-        if self.outstanding_references == 0 && outstanding_globs == 0 {
-            return self.binding.map(Success).unwrap_or(Failed(None));
+    fn try_define(&mut self, binding: &'a NameBinding<'a>) -> Result<(), &'a NameBinding<'a>> {
+        if let Some(old_binding) = self.binding {
+            if binding.defined_with(DefModifiers::GLOB_IMPORTED) {
+                self.duplicate_globs.push(binding);
+            } else if old_binding.defined_with(DefModifiers::GLOB_IMPORTED) {
+                self.duplicate_globs.push(old_binding);
+                self.binding = Some(binding);
+            } else {
+                return Err(old_binding);
+            }
+        } else {
+            self.binding = Some(binding);
         }
 
-        if let Some(binding) = self.binding {
-            // Single imports will never be shadowable by other single or glob imports.
-            if !binding.defined_with(DefModifiers::GLOB_IMPORTED) { return Success(binding); }
-            // Non-PRELUDE glob imports will never be shadowable by other glob imports.
-            if self.outstanding_references == 0 && !binding.defined_with(DefModifiers::PRELUDE) {
-                return Success(binding);
+        Ok(())
+    }
+
+    // Returns the binding for the name if it is known or None if it not known.
+    fn binding(&self) -> Option<&'a NameBinding<'a>> {
+        self.binding.and_then(|binding| match self.single_imports {
+            SingleImports::None => Some(binding),
+            _ if !binding.defined_with(DefModifiers::GLOB_IMPORTED) => Some(binding),
+            _ => None, // The binding could be shadowed by a single import, so it is not known.
+        })
+    }
+
+    // Returns Some(the resolution of the name), or None if the resolution depends
+    // on whether more globs can define the name.
+    fn try_result(&self, ns: Namespace, allow_private_imports: bool)
+                  -> Option<ResolveResult<&'a NameBinding<'a>>> {
+        match self.binding {
+            Some(binding) if !binding.defined_with(DefModifiers::GLOB_IMPORTED) =>
+                return Some(Success(binding)),
+            _ => {} // Items and single imports are not shadowable
+        };
+
+        // Check if a single import can still define the name.
+        match self.single_imports {
+            SingleImports::None => {},
+            SingleImports::AtLeastOne => return Some(Indeterminate),
+            SingleImports::MaybeOne(directive) => {
+                // If (1) we don't allow private imports, (2) no public single import can define
+                // the name, and (3) no public glob has defined the name, the resolution depends
+                // on whether more globs can define the name.
+                if !allow_private_imports && !directive.is_public &&
+                   !self.binding.map(NameBinding::is_public).unwrap_or(false) {
+                    return None;
+                }
+
+                let target_module = match directive.target_module.get() {
+                    Some(target_module) => target_module,
+                    None => return Some(Indeterminate),
+                };
+                let name = match directive.subclass {
+                    SingleImport { source, .. } => source,
+                    GlobImport => unreachable!(),
+                };
+                match target_module.resolve_name(name, ns, false) {
+                    Failed(_) => {}
+                    _ => return Some(Indeterminate),
+                }
             }
         }
 
-        Indeterminate
+        self.binding.map(Success)
     }
 
-    // Define the name or return the existing binding if there is a collision.
-    pub fn try_define(&mut self, binding: &'a NameBinding<'a>) -> Result<(), &'a NameBinding<'a>> {
-        let is_prelude = |binding: &NameBinding| binding.defined_with(DefModifiers::PRELUDE);
-        let old_binding = match self.binding {
-            Some(_) if is_prelude(binding) => return Ok(()),
-            Some(old_binding) if !is_prelude(old_binding) => old_binding,
-            _ => { self.binding = Some(binding); return Ok(()); }
+    fn report_conflicts<F: FnMut(&NameBinding, &NameBinding)>(&self, mut report: F) {
+        let binding = match self.binding {
+            Some(binding) => binding,
+            None => return,
+        };
+
+        for duplicate_glob in self.duplicate_globs.iter() {
+            // FIXME #31337: We currently allow items to shadow glob-imported re-exports.
+            if !binding.is_import() {
+                if let NameBindingKind::Import { binding, .. } = duplicate_glob.kind {
+                    if binding.is_import() { continue }
+                }
+            }
+
+            report(duplicate_glob, binding);
+        }
+    }
+}
+
+impl<'a> ::ModuleS<'a> {
+    fn resolution(&self, name: Name, ns: Namespace) -> &'a RefCell<NameResolution<'a>> {
+        *self.resolutions.borrow_mut().entry((name, ns))
+             .or_insert_with(|| self.arenas.alloc_name_resolution())
+    }
+
+    pub fn resolve_name(&self, name: Name, ns: Namespace, allow_private_imports: bool)
+                        -> ResolveResult<&'a NameBinding<'a>> {
+        let resolution = self.resolution(name, ns);
+        let resolution = match resolution.borrow_state() {
+            ::std::cell::BorrowState::Unused => resolution.borrow_mut(),
+            _ => return Failed(None), // This happens when there is a cycle of imports
         };
 
-        // FIXME #31337: We currently allow items to shadow glob-imported re-exports.
-        if !old_binding.is_import() && binding.defined_with(DefModifiers::GLOB_IMPORTED) {
-            if let NameBindingKind::Import { binding, .. } = binding.kind {
-                if binding.is_import() { return Ok(()); }
+        if let Some(result) = resolution.try_result(ns, allow_private_imports) {
+            // If the resolution doesn't depend on glob definability, check privacy and return.
+            return result.and_then(|binding| {
+                let allowed = allow_private_imports || !binding.is_import() || binding.is_public();
+                if allowed { Success(binding) } else { Failed(None) }
+            });
+        }
+
+        // Check if the globs are determined
+        for directive in self.globs.borrow().iter() {
+            if !allow_private_imports && !directive.is_public { continue }
+            match directive.target_module.get() {
+                None => return Indeterminate,
+                Some(target_module) => match target_module.resolve_name(name, ns, false) {
+                    Indeterminate => return Indeterminate,
+                    _ => {}
+                }
+            }
+        }
+
+        Failed(None)
+    }
+
+    // Invariant: this may not be called until import resolution is complete.
+    pub fn resolve_name_in_lexical_scope(&self, name: Name, ns: Namespace)
+                                         -> Option<&'a NameBinding<'a>> {
+        self.resolution(name, ns).borrow().binding
+            .or_else(|| self.prelude.borrow().and_then(|prelude| {
+                prelude.resolve_name(name, ns, false).success()
+            }))
+    }
+
+    // Define the name or return the existing binding if there is a collision.
+    pub fn try_define_child(&self, name: Name, ns: Namespace, binding: NameBinding<'a>)
+                            -> Result<(), &'a NameBinding<'a>> {
+        self.update_resolution(name, ns, |resolution| {
+            resolution.try_define(self.arenas.alloc_name_binding(binding))
+        })
+    }
+
+    pub fn add_import_directive(&self,
+                                module_path: Vec<Name>,
+                                subclass: ImportDirectiveSubclass,
+                                span: Span,
+                                id: NodeId,
+                                is_public: bool,
+                                is_prelude: bool) {
+        let directive = self.arenas.alloc_import_directive(ImportDirective {
+            module_path: module_path,
+            target_module: Cell::new(None),
+            subclass: subclass,
+            span: span,
+            id: id,
+            is_public: is_public,
+            is_prelude: is_prelude,
+        });
+
+        self.unresolved_imports.borrow_mut().push(directive);
+        match directive.subclass {
+            SingleImport { target, .. } => {
+                for &ns in &[ValueNS, TypeNS] {
+                    self.resolution(target, ns).borrow_mut().single_imports
+                                                            .add_directive(directive);
+                }
             }
+            // We don't add prelude imports to the globs since they only affect lexical scopes,
+            // which are not relevant to import resolution.
+            GlobImport if directive.is_prelude => {}
+            GlobImport => self.globs.borrow_mut().push(directive),
         }
+    }
+
+    // Use `update` to mutate the resolution for the name.
+    // If the resolution becomes a success, define it in the module's glob importers.
+    fn update_resolution<T, F>(&self, name: Name, ns: Namespace, update: F) -> T
+        where F: FnOnce(&mut NameResolution<'a>) -> T
+    {
+        // Ensure that `resolution` isn't borrowed during `define_in_glob_importers`,
+        // where it might end up getting re-defined via a glob cycle.
+        let (new_binding, t) = {
+            let mut resolution = &mut *self.resolution(name, ns).borrow_mut();
+            let was_known = resolution.binding().is_some();
+
+            let t = update(resolution);
+
+            if was_known { return t; }
+            match resolution.binding() {
+                Some(binding) => (binding, t),
+                None => return t,
+            }
+        };
 
-        Err(old_binding)
+        self.define_in_glob_importers(name, ns, new_binding);
+        t
+    }
+
+    fn define_in_glob_importers(&self, name: Name, ns: Namespace, binding: &'a NameBinding<'a>) {
+        if !binding.defined_with(DefModifiers::PUBLIC | DefModifiers::IMPORTABLE) { return }
+        for &(importer, directive) in self.glob_importers.borrow_mut().iter() {
+            let _ = importer.try_define_child(name, ns, directive.import(binding, None));
+        }
     }
 }
 
 struct ImportResolvingError<'a> {
     /// Module where the error happened
     source_module: Module<'a>,
-    import_directive: ImportDirective,
+    import_directive: &'a ImportDirective<'a>,
     span: Span,
     help: String,
 }
@@ -184,21 +381,19 @@ impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
 
             if self.resolver.unresolved_imports == 0 {
                 debug!("(resolving imports) success");
+                self.finalize_resolutions(self.resolver.graph_root, false);
                 break;
             }
 
             if self.resolver.unresolved_imports == prev_unresolved_imports {
                 // resolving failed
-                if errors.len() > 0 {
-                    for e in errors {
-                        self.import_resolving_error(e)
-                    }
-                } else {
-                    // Report unresolved imports only if no hard error was already reported
-                    // to avoid generating multiple errors on the same import.
-                    // Imports that are still indeterminate at this point are actually blocked
-                    // by errored imports, so there is no point reporting them.
-                    self.resolver.report_unresolved_imports(self.resolver.graph_root);
+                // Report unresolved imports only if no hard error was already reported
+                // to avoid generating multiple errors on the same import.
+                // Imports that are still indeterminate at this point are actually blocked
+                // by errored imports, so there is no point reporting them.
+                self.finalize_resolutions(self.resolver.graph_root, errors.len() == 0);
+                for e in errors {
+                    self.import_resolving_error(e)
                 }
                 break;
             }
@@ -213,21 +408,20 @@ impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
     fn import_resolving_error(&self, e: ImportResolvingError<'b>) {
         // If it's a single failed import then create a "fake" import
         // resolution for it so that later resolve stages won't complain.
-        if let SingleImport(target, _) = e.import_directive.subclass {
-            let dummy_binding = self.resolver.new_name_binding(NameBinding {
-                modifiers: DefModifiers::PRELUDE,
+        if let SingleImport { target, .. } = e.import_directive.subclass {
+            let dummy_binding = self.resolver.arenas.alloc_name_binding(NameBinding {
+                modifiers: DefModifiers::GLOB_IMPORTED,
                 kind: NameBindingKind::Def(Def::Err),
                 span: None,
             });
-            let dummy_binding =
-                self.resolver.new_name_binding(e.import_directive.import(dummy_binding));
+            let dummy_binding = e.import_directive.import(dummy_binding, None);
 
-            let _ = e.source_module.try_define_child(target, ValueNS, dummy_binding);
+            let _ = e.source_module.try_define_child(target, ValueNS, dummy_binding.clone());
             let _ = e.source_module.try_define_child(target, TypeNS, dummy_binding);
         }
 
         let path = import_path_to_string(&e.import_directive.module_path,
-                                         e.import_directive.subclass);
+                                         &e.import_directive.subclass);
 
         resolve_error(self.resolver,
                       e.span,
@@ -242,7 +436,7 @@ impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
         debug!("(resolving imports for module subtree) resolving {}",
                module_to_string(&module_));
         let orig_module = replace(&mut self.resolver.current_module, module_);
-        self.resolve_imports_for_module(module_, errors);
+        self.resolve_imports_in_current_module(errors);
         self.resolver.current_module = orig_module;
 
         for (_, child_module) in module_.module_children.borrow().iter() {
@@ -251,29 +445,31 @@ impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
     }
 
     /// Attempts to resolve imports for the given module only.
-    fn resolve_imports_for_module(&mut self,
-                                  module: Module<'b>,
-                                  errors: &mut Vec<ImportResolvingError<'b>>) {
+    fn resolve_imports_in_current_module(&mut self, errors: &mut Vec<ImportResolvingError<'b>>) {
         let mut imports = Vec::new();
-        let mut unresolved_imports = module.unresolved_imports.borrow_mut();
+        let mut unresolved_imports = self.resolver.current_module.unresolved_imports.borrow_mut();
         ::std::mem::swap(&mut imports, &mut unresolved_imports);
 
         for import_directive in imports {
-            match self.resolve_import_for_module(module, &import_directive) {
+            match self.resolve_import(&import_directive) {
                 Failed(err) => {
                     let (span, help) = match err {
                         Some((span, msg)) => (span, format!(". {}", msg)),
                         None => (import_directive.span, String::new()),
                     };
                     errors.push(ImportResolvingError {
-                        source_module: module,
+                        source_module: self.resolver.current_module,
                         import_directive: import_directive,
                         span: span,
                         help: help,
                     });
                 }
                 Indeterminate => unresolved_imports.push(import_directive),
-                Success(()) => {}
+                Success(()) => {
+                    // Decrement the count of unresolved imports.
+                    assert!(self.resolver.unresolved_imports >= 1);
+                    self.resolver.unresolved_imports -= 1;
+                }
             }
         }
     }
@@ -283,83 +479,28 @@ impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
     /// don't know whether the name exists at the moment due to other
     /// currently-unresolved imports, or success if we know the name exists.
     /// If successful, the resolved bindings are written into the module.
-    fn resolve_import_for_module(&mut self,
-                                 module_: Module<'b>,
-                                 import_directive: &ImportDirective)
-                                 -> ResolveResult<()> {
+    fn resolve_import(&mut self, directive: &'b ImportDirective<'b>) -> ResolveResult<()> {
         debug!("(resolving import for module) resolving import `{}::...` in `{}`",
-               names_to_string(&import_directive.module_path),
-               module_to_string(&module_));
-
-        self.resolver
-            .resolve_module_path(module_,
-                                 &import_directive.module_path,
-                                 UseLexicalScopeFlag::DontUseLexicalScope,
-                                 import_directive.span)
-            .and_then(|(containing_module, lp)| {
-                // We found the module that the target is contained
-                // within. Attempt to resolve the import within it.
-                if let SingleImport(target, source) = import_directive.subclass {
-                    self.resolve_single_import(module_,
-                                               containing_module,
-                                               target,
-                                               source,
-                                               import_directive,
-                                               lp)
-                } else {
-                    self.resolve_glob_import(module_, containing_module, import_directive, lp)
-                }
-            })
-            .and_then(|()| {
-                // Decrement the count of unresolved imports.
-                assert!(self.resolver.unresolved_imports >= 1);
-                self.resolver.unresolved_imports -= 1;
-
-                if let GlobImport = import_directive.subclass {
-                    module_.dec_glob_count();
-                    if import_directive.is_public {
-                        module_.dec_pub_glob_count();
-                    }
-                }
-                if import_directive.is_public {
-                    module_.dec_pub_count();
-                }
-                Success(())
-            })
-    }
-
-    fn resolve_single_import(&mut self,
-                             module_: Module<'b>,
-                             target_module: Module<'b>,
-                             target: Name,
-                             source: Name,
-                             directive: &ImportDirective,
-                             lp: LastPrivate)
-                             -> ResolveResult<()> {
-        debug!("(resolving single import) resolving `{}` = `{}::{}` from `{}` id {}, last \
-                private {:?}",
-               target,
-               module_to_string(&target_module),
-               source,
-               module_to_string(module_),
-               directive.id,
-               lp);
-
-        let lp = match lp {
-            LastMod(lp) => lp,
-            LastImport {..} => {
-                self.resolver
-                    .session
-                    .span_bug(directive.span, "not expecting Import here, must be LastMod")
-            }
+               names_to_string(&directive.module_path),
+               module_to_string(self.resolver.current_module));
+
+        let target_module = match directive.target_module.get() {
+            Some(module) => module,
+            _ => match self.resolver.resolve_module_path(&directive.module_path,
+                                                         DontUseLexicalScope,
+                                                         directive.span) {
+                Success(module) => module,
+                Indeterminate => return Indeterminate,
+                Failed(err) => return Failed(err),
+            },
         };
 
-        // If this is a circular import, we temporarily count it as determined so that
-        // it fails (as opposed to being indeterminate) when nothing else can define it.
-        if target_module.def_id() == module_.def_id() && source == target {
-            module_.decrement_outstanding_references_for(target, ValueNS);
-            module_.decrement_outstanding_references_for(target, TypeNS);
-        }
+        directive.target_module.set(Some(target_module));
+        let (source, target, value_determined, type_determined) = match directive.subclass {
+            SingleImport { source, target, ref value_determined, ref type_determined } =>
+                (source, target, value_determined, type_determined),
+            GlobImport => return self.resolve_glob_import(target_module, directive),
+        };
 
         // We need to resolve both namespaces for this to succeed.
         let value_result =
@@ -367,16 +508,49 @@ impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
         let type_result =
             self.resolver.resolve_name_in_module(target_module, source, TypeNS, false, true);
 
-        if target_module.def_id() == module_.def_id() && source == target {
-            module_.increment_outstanding_references_for(target, ValueNS);
-            module_.increment_outstanding_references_for(target, TypeNS);
+        let module_ = self.resolver.current_module;
+        for &(ns, result, determined) in &[(ValueNS, &value_result, value_determined),
+                                           (TypeNS, &type_result, type_determined)] {
+            if determined.get() { continue }
+            if let Indeterminate = *result { continue }
+
+            determined.set(true);
+            if let Success(binding) = *result {
+                if !binding.defined_with(DefModifiers::IMPORTABLE) {
+                    let msg = format!("`{}` is not directly importable", target);
+                    span_err!(self.resolver.session, directive.span, E0253, "{}", &msg);
+                }
+
+                let privacy_error = if !self.resolver.is_visible(binding, target_module) {
+                    Some(Box::new(PrivacyError(directive.span, source, binding)))
+                } else {
+                    None
+                };
+
+                let imported_binding = directive.import(binding, privacy_error);
+                let conflict = module_.try_define_child(target, ns, imported_binding);
+                if let Err(old_binding) = conflict {
+                    let binding = &directive.import(binding, None);
+                    self.resolver.report_conflict(module_, target, ns, binding, old_binding);
+                }
+            } else {
+                module_.update_resolution(target, ns, |resolution| {
+                    resolution.single_imports.directive_failed();
+                });
+            }
         }
 
         match (&value_result, &type_result) {
             (&Indeterminate, _) | (_, &Indeterminate) => return Indeterminate,
             (&Failed(_), &Failed(_)) => {
-                let children = target_module.resolutions.borrow();
-                let names = children.keys().map(|&(ref name, _)| name);
+                let resolutions = target_module.resolutions.borrow();
+                let names = resolutions.iter().filter_map(|(&(ref name, _), resolution)| {
+                    match *resolution.borrow() {
+                        NameResolution { binding: Some(_), .. } => Some(name),
+                        NameResolution { single_imports: SingleImports::None, .. } => None,
+                        _ => Some(name),
+                    }
+                });
                 let lev_suggestion = match find_best_match_for_name(names, &source.as_str(), None) {
                     Some(name) => format!(". Did you mean to use `{}`?", name),
                     None => "".to_owned(),
@@ -401,77 +575,52 @@ impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
                     .emit();
             }
 
-            (_, &Success(name_binding)) if !name_binding.is_import() && directive.is_public => {
-                if !name_binding.is_public() {
-                    if name_binding.is_extern_crate() {
-                        let msg = format!("extern crate `{}` is private, and cannot be reexported \
-                                           (error E0364), consider declaring with `pub`",
-                                           source);
-                        self.resolver.session.add_lint(lint::builtin::PRIVATE_IN_PUBLIC,
-                                                       directive.id,
-                                                       directive.span,
-                                                       msg);
-                    } else {
-                        let msg = format!("`{}` is private, and cannot be reexported", source);
-                        let note_msg =
-                            format!("consider declaring type or module `{}` with `pub`", source);
-                        struct_span_err!(self.resolver.session, directive.span, E0365, "{}", &msg)
-                            .span_note(directive.span, &note_msg)
-                            .emit();
-                    }
-                } else if name_binding.defined_with(DefModifiers::PRIVATE_VARIANT) {
-                    let msg = format!("variant `{}` is private, and cannot be reexported \
-                                       (error E0364), consider declaring its enum as `pub`",
+            (_, &Success(name_binding)) if !name_binding.is_import() &&
+                                           directive.is_public &&
+                                           !name_binding.is_public() => {
+                if name_binding.is_extern_crate() {
+                    let msg = format!("extern crate `{}` is private, and cannot be reexported \
+                                       (error E0364), consider declaring with `pub`",
                                        source);
                     self.resolver.session.add_lint(lint::builtin::PRIVATE_IN_PUBLIC,
                                                    directive.id,
                                                    directive.span,
                                                    msg);
+                } else {
+                    let msg = format!("`{}` is private, and cannot be reexported", source);
+                    let note_msg =
+                        format!("consider declaring type or module `{}` with `pub`", source);
+                    struct_span_err!(self.resolver.session, directive.span, E0365, "{}", &msg)
+                        .span_note(directive.span, &note_msg)
+                        .emit();
                 }
             }
 
             _ => {}
         }
 
-        for &(ns, result) in &[(ValueNS, &value_result), (TypeNS, &type_result)] {
-            if let Success(binding) = *result {
-                if !binding.defined_with(DefModifiers::IMPORTABLE) {
-                    let msg = format!("`{}` is not directly importable", target);
-                    span_err!(self.resolver.session, directive.span, E0253, "{}", &msg);
-                }
-
-                self.define(module_, target, ns, directive.import(binding));
-            }
+        // Report a privacy error here if all successful namespaces are privacy errors.
+        let mut privacy_error = None;
+        for &ns in &[ValueNS, TypeNS] {
+            privacy_error = match module_.resolve_name(target, ns, true) {
+                Success(&NameBinding {
+                    kind: NameBindingKind::Import { ref privacy_error, .. }, ..
+                }) => privacy_error.as_ref().map(|error| (**error).clone()),
+                _ => continue,
+            };
+            if privacy_error.is_none() { break }
         }
+        privacy_error.map(|error| self.resolver.privacy_errors.push(error));
 
         // Record what this import resolves to for later uses in documentation,
         // this may resolve to either a value or a type, but for documentation
         // purposes it's good enough to just favor one over the other.
-        module_.decrement_outstanding_references_for(target, ValueNS);
-        module_.decrement_outstanding_references_for(target, TypeNS);
-
-        let def_and_priv = |binding: &NameBinding| {
-            let last_private =
-                if binding.is_public() { lp } else { DependsOn(binding.local_def_id().unwrap()) };
-            (binding.def().unwrap(), last_private)
-        };
-        let value_def_and_priv = value_result.success().map(&def_and_priv);
-        let type_def_and_priv = type_result.success().map(&def_and_priv);
-
-        let import_lp = LastImport {
-            value_priv: value_def_and_priv.map(|(_, p)| p),
-            value_used: Used,
-            type_priv: type_def_and_priv.map(|(_, p)| p),
-            type_used: Used,
-        };
-
-        let write_path_resolution = |(def, _)| {
-            let path_resolution =
-                PathResolution { base_def: def, last_private: import_lp, depth: 0 };
-            self.resolver.def_map.borrow_mut().insert(directive.id, path_resolution);
+        let def = match type_result.success().and_then(NameBinding::def) {
+            Some(def) => def,
+            None => value_result.success().and_then(NameBinding::def).unwrap(),
         };
-        value_def_and_priv.map(&write_path_resolution);
-        type_def_and_priv.map(&write_path_resolution);
+        let path_resolution = PathResolution { base_def: def, depth: 0 };
+        self.resolver.def_map.borrow_mut().insert(directive.id, path_resolution);
 
         debug!("(resolving single import) successfully resolved import");
         return Success(());
@@ -481,47 +630,44 @@ impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
     // succeeds or bails out (as importing * from an empty module or a module
     // that exports nothing is valid). target_module is the module we are
     // actually importing, i.e., `foo` in `use foo::*`.
-    fn resolve_glob_import(&mut self,
-                           module_: Module<'b>,
-                           target_module: Module<'b>,
-                           directive: &ImportDirective,
-                           lp: LastPrivate)
+    fn resolve_glob_import(&mut self, target_module: Module<'b>, directive: &'b ImportDirective<'b>)
                            -> ResolveResult<()> {
-        // We must bail out if the node has unresolved imports of any kind (including globs).
-        if target_module.pub_count.get() > 0 {
-            debug!("(resolving glob import) target module has unresolved pub imports; bailing out");
-            return Indeterminate;
+        if let Some(Def::Trait(_)) = target_module.def {
+            self.resolver.session.span_err(directive.span, "items in traits are not importable.");
         }
 
+        let module_ = self.resolver.current_module;
         if module_.def_id() == target_module.def_id() {
             // This means we are trying to glob import a module into itself, and it is a no-go
             let msg = "Cannot glob-import a module into itself.".into();
             return Failed(Some((directive.span, msg)));
         }
+        self.resolver.populate_module_if_necessary(target_module);
 
-        // Add all children from the containing module.
-        build_reduced_graph::populate_module_if_necessary(self.resolver, target_module);
-        target_module.for_each_child(|name, ns, binding| {
-            if !binding.defined_with(DefModifiers::IMPORTABLE | DefModifiers::PUBLIC) { return }
-            self.define(module_, name, ns, directive.import(binding));
-
-            if ns == TypeNS && directive.is_public &&
-               binding.defined_with(DefModifiers::PRIVATE_VARIANT) {
-                let msg = format!("variant `{}` is private, and cannot be reexported (error \
-                                   E0364), consider declaring its enum as `pub`", name);
-                self.resolver.session.add_lint(lint::builtin::PRIVATE_IN_PUBLIC,
-                                               directive.id,
-                                               directive.span,
-                                               msg);
+        if directive.is_prelude {
+            *module_.prelude.borrow_mut() = Some(target_module);
+            return Success(());
+        }
+
+        // Add to target_module's glob_importers
+        target_module.glob_importers.borrow_mut().push((module_, directive));
+
+        // Ensure that `resolutions` isn't borrowed during `try_define_child`,
+        // since it might get updated via a glob cycle.
+        let bindings = target_module.resolutions.borrow().iter().filter_map(|(name, resolution)| {
+            resolution.borrow().binding().map(|binding| (*name, binding))
+        }).collect::<Vec<_>>();
+        for ((name, ns), binding) in bindings {
+            if binding.defined_with(DefModifiers::IMPORTABLE | DefModifiers::PUBLIC) {
+                let _ = module_.try_define_child(name, ns, directive.import(binding, None));
             }
-        });
+        }
 
         // Record the destination of this import
         if let Some(did) = target_module.def_id() {
             self.resolver.def_map.borrow_mut().insert(directive.id,
                                                       PathResolution {
                                                           base_def: Def::Mod(did),
-                                                          last_private: lp,
                                                           depth: 0,
                                                       });
         }
@@ -530,86 +676,63 @@ impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
         return Success(());
     }
 
-    fn define(&mut self,
-              parent: Module<'b>,
-              name: Name,
-              ns: Namespace,
-              binding: NameBinding<'b>) {
-        let binding = self.resolver.new_name_binding(binding);
-        if let Err(old_binding) = parent.try_define_child(name, ns, binding) {
-            self.report_conflict(name, ns, binding, old_binding);
-        } else if binding.is_public() { // Add to the export map
-            if let (Some(parent_def_id), Some(def)) = (parent.def_id(), binding.def()) {
-                let parent_node_id = self.resolver.ast_map.as_local_node_id(parent_def_id).unwrap();
-                let export = Export { name: name, def_id: def.def_id() };
-                self.resolver.export_map.entry(parent_node_id).or_insert(Vec::new()).push(export);
+    // Miscellaneous post-processing, including recording reexports, recording shadowed traits,
+    // reporting conflicts, reporting the PRIVATE_IN_PUBLIC lint, and reporting unresolved imports.
+    fn finalize_resolutions(&mut self, module: Module<'b>, report_unresolved_imports: bool) {
+        // Since import resolution is finished, globs will not define any more names.
+        *module.globs.borrow_mut() = Vec::new();
+
+        let mut reexports = Vec::new();
+        for (&(name, ns), resolution) in module.resolutions.borrow().iter() {
+            let resolution = resolution.borrow();
+            resolution.report_conflicts(|b1, b2| {
+                self.resolver.report_conflict(module, name, ns, b1, b2)
+            });
+
+            let binding = match resolution.binding {
+                Some(binding) => binding,
+                None => continue,
+            };
+
+            if binding.is_public() && (binding.is_import() || binding.is_extern_crate()) {
+                if let Some(def) = binding.def() {
+                    reexports.push(Export { name: name, def_id: def.def_id() });
+                }
+            }
+
+            if let NameBindingKind::Import { binding: orig_binding, id, .. } = binding.kind {
+                if ns == TypeNS && binding.is_public() &&
+                   orig_binding.defined_with(DefModifiers::PRIVATE_VARIANT) {
+                    let msg = format!("variant `{}` is private, and cannot be reexported \
+                                       (error E0364), consider declaring its enum as `pub`",
+                                      name);
+                    let lint = lint::builtin::PRIVATE_IN_PUBLIC;
+                    self.resolver.session.add_lint(lint, id, binding.span.unwrap(), msg);
+                }
             }
         }
-    }
 
-    fn report_conflict(&mut self,
-                       name: Name,
-                       ns: Namespace,
-                       binding: &'b NameBinding<'b>,
-                       old_binding: &'b NameBinding<'b>) {
-        // Error on the second of two conflicting imports
-        if old_binding.is_import() && binding.is_import() &&
-           old_binding.span.unwrap().lo > binding.span.unwrap().lo {
-            self.report_conflict(name, ns, old_binding, binding);
-            return;
+        if reexports.len() > 0 {
+            if let Some(def_id) = module.def_id() {
+                let node_id = self.resolver.ast_map.as_local_node_id(def_id).unwrap();
+                self.resolver.export_map.insert(node_id, reexports);
+            }
         }
 
-        if old_binding.is_extern_crate() {
-            let msg = format!("import `{0}` conflicts with imported crate \
-                               in this module (maybe you meant `use {0}::*`?)",
-                              name);
-            span_err!(self.resolver.session, binding.span.unwrap(), E0254, "{}", &msg);
-        } else if old_binding.is_import() {
-            let ns_word = match (ns, old_binding.module()) {
-                (ValueNS, _) => "value",
-                (TypeNS, Some(module)) if module.is_normal() => "module",
-                (TypeNS, Some(module)) if module.is_trait() => "trait",
-                (TypeNS, _) => "type",
-            };
-            let mut err = struct_span_err!(self.resolver.session,
-                                           binding.span.unwrap(),
-                                           E0252,
-                                           "a {} named `{}` has already been imported \
-                                            in this module",
-                                           ns_word,
-                                           name);
-            err.span_note(old_binding.span.unwrap(),
-                          &format!("previous import of `{}` here", name));
-            err.emit();
-        } else if ns == ValueNS { // Check for item conflicts in the value namespace
-            let mut err = struct_span_err!(self.resolver.session,
-                                           binding.span.unwrap(),
-                                           E0255,
-                                           "import `{}` conflicts with value in this module",
-                                           name);
-            err.span_note(old_binding.span.unwrap(), "conflicting value here");
-            err.emit();
-        } else { // Check for item conflicts in the type namespace
-            let (what, note) = match old_binding.module() {
-                Some(ref module) if module.is_normal() =>
-                    ("existing submodule", "note conflicting module here"),
-                Some(ref module) if module.is_trait() =>
-                    ("trait in this module", "note conflicting trait here"),
-                _ => ("type in this module", "note conflicting type here"),
-            };
-            let mut err = struct_span_err!(self.resolver.session,
-                                           binding.span.unwrap(),
-                                           E0256,
-                                           "import `{}` conflicts with {}",
-                                           name,
-                                           what);
-            err.span_note(old_binding.span.unwrap(), note);
-            err.emit();
+        if report_unresolved_imports {
+            for import in module.unresolved_imports.borrow().iter() {
+                resolve_error(self.resolver, import.span, ResolutionError::UnresolvedImport(None));
+                break;
+            }
+        }
+
+        for (_, child) in module.module_children.borrow().iter() {
+            self.finalize_resolutions(child, report_unresolved_imports);
         }
     }
 }
 
-fn import_path_to_string(names: &[Name], subclass: ImportDirectiveSubclass) -> String {
+fn import_path_to_string(names: &[Name], subclass: &ImportDirectiveSubclass) -> String {
     if names.is_empty() {
         import_directive_subclass_to_string(subclass)
     } else {
@@ -620,9 +743,9 @@ fn import_path_to_string(names: &[Name], subclass: ImportDirectiveSubclass) -> S
     }
 }
 
-fn import_directive_subclass_to_string(subclass: ImportDirectiveSubclass) -> String {
-    match subclass {
-        SingleImport(_, source) => source.to_string(),
+fn import_directive_subclass_to_string(subclass: &ImportDirectiveSubclass) -> String {
+    match *subclass {
+        SingleImport { source, .. } => source.to_string(),
         GlobImport => "*".to_string(),
     }
 }
diff --git a/src/librustc_save_analysis/Cargo.toml b/src/librustc_save_analysis/Cargo.toml
new file mode 100644 (file)
index 0000000..52fa919
--- /dev/null
@@ -0,0 +1,14 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "rustc_save_analysis"
+version = "0.0.0"
+
+[lib]
+name = "rustc_save_analysis"
+path = "lib.rs"
+crate-type = ["dylib"]
+
+[dependencies]
+log = { path = "../liblog" }
+rustc = { path = "../librustc" }
+syntax = { path = "../libsyntax" }
diff --git a/src/librustc_save_analysis/csv_dumper.rs b/src/librustc_save_analysis/csv_dumper.rs
new file mode 100644 (file)
index 0000000..0e02830
--- /dev/null
@@ -0,0 +1,566 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::io::Write;
+
+use rustc::hir::def_id::{DefId, DefIndex};
+use syntax::codemap::Span;
+
+use super::data::*;
+use super::dump::Dump;
+use super::span_utils::SpanUtils;
+
+pub struct CsvDumper<'a, 'b, W: 'b> {
+    output: &'b mut W,
+    dump_spans: bool,
+    span: SpanUtils<'a>
+}
+
+impl<'a, 'b, W: Write> CsvDumper<'a, 'b, W> {
+    pub fn new(writer: &'b mut W, span: SpanUtils<'a>) -> CsvDumper<'a, 'b, W> {
+        CsvDumper { output: writer, dump_spans: false, span: span }
+    }
+
+    fn record(&mut self, kind: &str, span: Span, values: String) {
+        let span_str = self.span.extent_str(span);
+        if let Err(_) = write!(self.output, "{},{}{}\n", kind, span_str, values) {
+            error!("Error writing output");
+        }
+    }
+
+    fn record_raw(&mut self, info: &str) {
+        if let Err(_) = write!(self.output, "{}", info) {
+            error!("Error writing output '{}'", info);
+        }
+    }
+
+    pub fn dump_span(&mut self, kind: &str, span: Span) {
+        assert!(self.dump_spans);
+        let result = format!("span,kind,{},{},text,\"{}\"\n",
+                             kind,
+                             self.span.extent_str(span),
+                             escape(self.span.snippet(span)));
+        self.record_raw(&result);
+    }
+}
+
+impl<'a, 'b, W: Write + 'b> Dump for CsvDumper<'a, 'b, W> {
+    fn crate_prelude(&mut self, span: Span, data: CratePreludeData) {
+        let crate_root = data.crate_root.unwrap_or("<no source>".to_owned());
+
+        let values = make_values_str(&[
+            ("name", &data.crate_name),
+            ("crate_root", &crate_root)
+        ]);
+
+        self.record("crate", span, values);
+
+        for c in data.external_crates {
+            let num = c.num.to_string();
+            let lo_loc = self.span.sess.codemap().lookup_char_pos(span.lo);
+            let file_name = SpanUtils::make_path_string(&lo_loc.file.name);
+            let values = make_values_str(&[
+                ("name", &c.name),
+                ("crate", &num),
+                ("file_name", &file_name)
+            ]);
+
+            self.record_raw(&format!("external_crate{}\n", values));
+        }
+
+        self.record_raw("end_external_crates\n");
+    }
+
+    fn enum_data(&mut self, span: Span, data: EnumData) {
+        if self.dump_spans {
+            self.dump_span("enum", span);
+            return;
+        }
+
+        let id = data.id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("qualname", &data.qualname),
+            ("scopeid", &scope),
+            ("value", &data.value)
+        ]);
+
+        self.record("enum", data.span, values);
+    }
+
+    fn extern_crate(&mut self, span: Span, data: ExternCrateData) {
+        if self.dump_spans {
+            self.dump_span("extern_crate", span);
+            return;
+        }
+
+        let id = data.id.to_string();
+        let crate_num = data.crate_num.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("name", &data.name),
+            ("location", &data.location),
+            ("crate", &crate_num),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("extern_crate", data.span, values);
+    }
+
+    fn impl_data(&mut self, span: Span, data: ImplData) {
+        if self.dump_spans {
+            self.dump_span("impl", span);
+            return;
+        }
+
+        let self_ref = data.self_ref.unwrap_or(null_def_id());
+        let trait_ref = data.trait_ref.unwrap_or(null_def_id());
+
+        let id = data.id.to_string();
+        let ref_id = self_ref.index.as_usize().to_string();
+        let ref_id_crate = self_ref.krate.to_string();
+        let trait_id = trait_ref.index.as_usize().to_string();
+        let trait_id_crate = trait_ref.krate.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("refid", &ref_id),
+            ("refidcrate", &ref_id_crate),
+            ("traitid", &trait_id),
+            ("traitidcrate", &trait_id_crate),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("impl", data.span, values);
+    }
+
+    fn inheritance(&mut self, data: InheritanceData) {
+       if self.dump_spans {
+           return;
+       }
+
+       let base_id = data.base_id.index.as_usize().to_string();
+       let base_crate = data.base_id.krate.to_string();
+       let deriv_id = data.deriv_id.to_string();
+       let deriv_crate = 0.to_string();
+       let values = make_values_str(&[
+           ("base", &base_id),
+           ("basecrate", &base_crate),
+           ("derived", &deriv_id),
+           ("derivedcrate", &deriv_crate)
+       ]);
+
+       self.record("inheritance", data.span, values);
+    }
+
+    fn function(&mut self, span: Span, data: FunctionData) {
+        if self.dump_spans {
+            self.dump_span("function", span);
+            return;
+        }
+
+        let (decl_id, decl_crate) = match data.declaration {
+            Some(id) => (id.index.as_usize().to_string(), id.krate.to_string()),
+            None => (String::new(), String::new())
+        };
+
+        let id = data.id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("qualname", &data.qualname),
+            ("declid", &decl_id),
+            ("declidcrate", &decl_crate),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("function", data.span, values);
+    }
+
+    fn function_ref(&mut self, span: Span, data: FunctionRefData) {
+        if self.dump_spans {
+            self.dump_span("fn_ref", span);
+            return;
+        }
+
+        let ref_id = data.ref_id.index.as_usize().to_string();
+        let ref_crate = data.ref_id.krate.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("refid", &ref_id),
+            ("refidcrate", &ref_crate),
+            ("qualname", ""),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("fn_ref", data.span, values);
+    }
+
+    fn function_call(&mut self, span: Span, data: FunctionCallData) {
+        if self.dump_spans {
+            self.dump_span("fn_call", span);
+            return;
+        }
+
+        let ref_id = data.ref_id.index.as_usize().to_string();
+        let ref_crate = data.ref_id.krate.to_string();
+        let qualname = String::new();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("refid", &ref_id),
+            ("refidcrate", &ref_crate),
+            ("qualname", &qualname),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("fn_call", data.span, values);
+    }
+
+    fn method(&mut self, span: Span, data: MethodData) {
+        if self.dump_spans {
+            self.dump_span("method_decl", span);
+            return;
+        }
+
+        let id = data.id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("qualname", &data.qualname),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("method_decl", span, values);
+    }
+
+    fn method_call(&mut self, span: Span, data: MethodCallData) {
+        if self.dump_spans {
+            self.dump_span("method_call", span);
+            return;
+        }
+
+        let (dcn, dck) = match data.decl_id {
+            Some(declid) => (declid.index.as_usize().to_string(), declid.krate.to_string()),
+            None => (String::new(), String::new()),
+        };
+
+        let ref_id = data.ref_id.unwrap_or(null_def_id());
+
+        let def_id = ref_id.index.as_usize().to_string();
+        let def_crate = ref_id.krate.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("refid", &def_id),
+            ("refidcrate", &def_crate),
+            ("declid", &dcn),
+            ("declidcrate", &dck),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("method_call", data.span, values);
+    }
+
+    fn macro_data(&mut self, span: Span, data: MacroData) {
+        if self.dump_spans {
+            self.dump_span("macro", span);
+            return;
+        }
+
+        let values = make_values_str(&[
+            ("name", &data.name),
+            ("qualname", &data.qualname)
+        ]);
+
+        self.record("macro", data.span, values);
+    }
+
+    fn macro_use(&mut self, span: Span, data: MacroUseData) {
+        if self.dump_spans {
+            self.dump_span("macro_use", span);
+            return;
+        }
+
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("callee_name", &data.name),
+            ("qualname", &data.qualname),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("macro_use", data.span, values);
+    }
+
+    fn mod_data(&mut self, data: ModData) {
+        if self.dump_spans {
+            return;
+        }
+
+        let id = data.id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("qualname", &data.qualname),
+            ("scopeid", &scope),
+            ("def_file", &data.filename)
+        ]);
+
+        self.record("module", data.span, values);
+    }
+
+    fn mod_ref(&mut self, span: Span, data: ModRefData) {
+        if self.dump_spans {
+            self.dump_span("mod_ref", span);
+            return;
+        }
+
+        let (ref_id, ref_crate) = match data.ref_id {
+            Some(rid) => (rid.index.as_usize().to_string(), rid.krate.to_string()),
+            None => (0.to_string(), 0.to_string())
+        };
+
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("refid", &ref_id),
+            ("refidcrate", &ref_crate),
+            ("qualname", &data.qualname),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("mod_ref", data.span, values);
+    }
+
+    fn struct_data(&mut self, span: Span, data: StructData) {
+        if self.dump_spans {
+            self.dump_span("struct", span);
+            return;
+        }
+
+        let id = data.id.to_string();
+        let ctor_id = data.ctor_id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("ctor_id", &ctor_id),
+            ("qualname", &data.qualname),
+            ("scopeid", &scope),
+            ("value", &data.value)
+        ]);
+
+        self.record("struct", data.span, values);
+    }
+
+    fn struct_variant(&mut self, span: Span, data: StructVariantData) {
+        if self.dump_spans {
+            self.dump_span("variant_struct", span);
+            return;
+        }
+
+        let id = data.id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("ctor_id", &id),
+            ("qualname", &data.qualname),
+            ("type", &data.type_value),
+            ("value", &data.value),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("variant_struct", data.span, values);
+    }
+
+    fn trait_data(&mut self, span: Span, data: TraitData) {
+        if self.dump_spans {
+            self.dump_span("trait", span);
+            return;
+        }
+
+        let id = data.id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("qualname", &data.qualname),
+            ("scopeid", &scope),
+            ("value", &data.value)
+        ]);
+
+        self.record("trait", data.span, values);
+    }
+
+    fn tuple_variant(&mut self, span: Span, data: TupleVariantData) {
+        if self.dump_spans {
+            self.dump_span("variant", span);
+            return;
+        }
+
+        let id = data.id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("name", &data.name),
+            ("qualname", &data.qualname),
+            ("type", &data.type_value),
+            ("value", &data.value),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("variant", data.span, values);
+    }
+
+    fn type_ref(&mut self, span: Span, data: TypeRefData) {
+        if self.dump_spans {
+            self.dump_span("type_ref", span);
+            return;
+        }
+
+        let (ref_id, ref_crate) = match data.ref_id {
+            Some(id) => (id.index.as_usize().to_string(), id.krate.to_string()),
+            None => (0.to_string(), 0.to_string())
+        };
+
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("refid", &ref_id),
+            ("refidcrate", &ref_crate),
+            ("qualname", &data.qualname),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("type_ref", data.span, values);
+    }
+
+    fn typedef(&mut self, span: Span, data: TypedefData) {
+        if self.dump_spans {
+            self.dump_span("typedef", span);
+            return;
+        }
+
+        let id = data.id.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("qualname", &data.qualname),
+            ("value", &data.value)
+        ]);
+
+        self.record("typedef", data.span, values);
+    }
+
+    fn use_data(&mut self, span: Span, data: UseData) {
+        if self.dump_spans {
+            self.dump_span("use_alias", span);
+            return;
+        }
+
+        let mod_id = data.mod_id.unwrap_or(null_def_id());
+
+        let id = data.id.to_string();
+        let ref_id = mod_id.index.as_usize().to_string();
+        let ref_crate = mod_id.krate.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("refid", &ref_id),
+            ("refidcrate", &ref_crate),
+            ("name", &data.name),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("use_alias", data.span, values);
+    }
+
+    fn use_glob(&mut self, span: Span, data: UseGlobData) {
+        if self.dump_spans {
+            self.dump_span("use_glob", span);
+            return;
+        }
+
+        let names = data.names.join(", ");
+
+        let id = data.id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("value", &names),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("use_glob", data.span, values);
+    }
+
+    fn variable(&mut self, span: Span, data: VariableData) {
+        if self.dump_spans {
+            self.dump_span("variable", span);
+            return;
+        }
+
+        let id = data.id.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("id", &id),
+            ("name", &data.name),
+            ("qualname", &data.qualname),
+            ("value", &data.value),
+            ("type", &data.type_value),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("variable", data.span, values);
+    }
+
+    fn variable_ref(&mut self, span: Span, data: VariableRefData) {
+        if self.dump_spans {
+            self.dump_span("var_ref", span);
+            return;
+        }
+
+        let ref_id = data.ref_id.index.as_usize().to_string();
+        let ref_crate = data.ref_id.krate.to_string();
+        let scope = data.scope.to_string();
+        let values = make_values_str(&[
+            ("refid", &ref_id),
+            ("refidcrate", &ref_crate),
+            ("qualname", ""),
+            ("scopeid", &scope)
+        ]);
+
+        self.record("var_ref", data.span, values)
+    }
+}
+
+// Helper function to escape quotes in a string
+fn escape(s: String) -> String {
+    s.replace("\"", "\"\"")
+}
+
+fn make_values_str(pairs: &[(&'static str, &str)]) -> String {
+    let pairs = pairs.into_iter().map(|&(f, v)| {
+        // Never take more than 1020 chars
+        if v.len() > 1020 {
+            (f, &v[..1020])
+        } else {
+            (f, v)
+        }
+    });
+
+    let strs = pairs.map(|(f, v)| format!(",{},\"{}\"", f, escape(String::from(v))));
+    strs.fold(String::new(), |mut s, ss| {
+        s.push_str(&ss[..]);
+        s
+    })
+}
+
+fn null_def_id() -> DefId {
+    DefId {
+        krate: 0,
+        index: DefIndex::new(0),
+    }
+}
diff --git a/src/librustc_save_analysis/data.rs b/src/librustc_save_analysis/data.rs
new file mode 100644 (file)
index 0000000..7f2f261
--- /dev/null
@@ -0,0 +1,383 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Structs representing the analysis data from a crate.
+//!
+//! The `Dump` trait can be used together with `DumpVisitor` in order to
+//! retrieve the data from a crate.
+
+use std::hash::Hasher;
+
+use rustc::hir::def_id::DefId;
+use rustc::ty;
+use syntax::ast::{CrateNum, NodeId};
+use syntax::codemap::Span;
+
+pub struct CrateData {
+    pub name: String,
+    pub number: u32,
+}
+
+/// Data for any entity in the Rust language. The actual data contained varies
+/// with the kind of entity being queried. See the nested structs for details.
+#[derive(Debug)]
+pub enum Data {
+    /// Data for Enums.
+    EnumData(EnumData),
+    /// Data for extern crates.
+    ExternCrateData(ExternCrateData),
+    /// Data about a function call.
+    FunctionCallData(FunctionCallData),
+    /// Data for all kinds of functions and methods.
+    FunctionData(FunctionData),
+    /// Data about a function ref.
+    FunctionRefData(FunctionRefData),
+    /// Data for impls.
+    ImplData(ImplData2),
+    /// Data for trait inheritance.
+    InheritanceData(InheritanceData),
+    /// Data about a macro declaration.
+    MacroData(MacroData),
+    /// Data about a macro use.
+    MacroUseData(MacroUseData),
+    /// Data about a method call.
+    MethodCallData(MethodCallData),
+    /// Data for method declarations (methods with a body are treated as functions).
+    MethodData(MethodData),
+    /// Data for modules.
+    ModData(ModData),
+    /// Data for a reference to a module.
+    ModRefData(ModRefData),
+    /// Data for a struct declaration.
+    StructData(StructData),
+    /// Data for a struct variant.
+    StructVariantDat(StructVariantData),
+    /// Data for a trait declaration.
+    TraitData(TraitData),
+    /// Data for a tuple variant.
+    TupleVariantData(TupleVariantData),
+    /// Data for a typedef.
+    TypeDefData(TypedefData),
+    /// Data for a reference to a type or trait.
+    TypeRefData(TypeRefData),
+    /// Data for a use statement.
+    UseData(UseData),
+    /// Data for a global use statement.
+    UseGlobData(UseGlobData),
+    /// Data for local and global variables (consts and statics), and fields.
+    VariableData(VariableData),
+    /// Data for the use of some variable (e.g., the use of a local variable, which
+    /// will refere to that variables declaration).
+    VariableRefData(VariableRefData),
+}
+
+/// Data for the prelude of a crate.
+#[derive(Debug)]
+pub struct CratePreludeData {
+    pub crate_name: String,
+    pub crate_root: Option<String>,
+    pub external_crates: Vec<ExternalCrateData>
+}
+
+/// Data for external crates in the prelude of a crate.
+#[derive(Debug)]
+pub struct ExternalCrateData {
+    pub name: String,
+    pub num: CrateNum
+}
+
+/// Data for enum declarations.
+#[derive(Clone, Debug)]
+pub struct EnumData {
+    pub id: NodeId,
+    pub value: String,
+    pub qualname: String,
+    pub span: Span,
+    pub scope: NodeId,
+}
+
+/// Data for extern crates.
+#[derive(Debug)]
+pub struct ExternCrateData {
+    pub id: NodeId,
+    pub name: String,
+    pub crate_num: CrateNum,
+    pub location: String,
+    pub span: Span,
+    pub scope: NodeId,
+}
+
+/// Data about a function call.
+#[derive(Debug)]
+pub struct FunctionCallData {
+    pub span: Span,
+    pub scope: NodeId,
+    pub ref_id: DefId,
+}
+
+/// Data for all kinds of functions and methods.
+#[derive(Clone, Debug)]
+pub struct FunctionData {
+    pub id: NodeId,
+    pub name: String,
+    pub qualname: String,
+    pub declaration: Option<DefId>,
+    pub span: Span,
+    pub scope: NodeId,
+}
+
+/// Data about a function call.
+#[derive(Debug)]
+pub struct FunctionRefData {
+    pub span: Span,
+    pub scope: NodeId,
+    pub ref_id: DefId,
+}
+
+#[derive(Debug)]
+pub struct ImplData {
+    pub id: NodeId,
+    pub span: Span,
+    pub scope: NodeId,
+    pub trait_ref: Option<DefId>,
+    pub self_ref: Option<DefId>,
+}
+
+#[derive(Debug)]
+// FIXME: this struct should not exist. However, removing it requires heavy
+// refactoring of dump_visitor.rs. See PR 31838 for more info.
+pub struct ImplData2 {
+    pub id: NodeId,
+    pub span: Span,
+    pub scope: NodeId,
+    // FIXME: I'm not really sure inline data is the best way to do this. Seems
+    // OK in this case, but generalising leads to returning chunks of AST, which
+    // feels wrong.
+    pub trait_ref: Option<TypeRefData>,
+    pub self_ref: Option<TypeRefData>,
+}
+
+#[derive(Debug)]
+pub struct InheritanceData {
+    pub span: Span,
+    pub base_id: DefId,
+    pub deriv_id: NodeId
+}
+
+/// Data about a macro declaration.
+#[derive(Debug)]
+pub struct MacroData {
+    pub span: Span,
+    pub name: String,
+    pub qualname: String,
+}
+
+/// Data about a macro use.
+#[derive(Debug)]
+pub struct MacroUseData {
+    pub span: Span,
+    pub name: String,
+    pub qualname: String,
+    // Because macro expansion happens before ref-ids are determined,
+    // we use the callee span to reference the associated macro definition.
+    pub callee_span: Span,
+    pub scope: NodeId,
+    pub imported: bool,
+}
+
+/// Data about a method call.
+#[derive(Debug)]
+pub struct MethodCallData {
+    pub span: Span,
+    pub scope: NodeId,
+    pub ref_id: Option<DefId>,
+    pub decl_id: Option<DefId>,
+}
+
+/// Data for method declarations (methods with a body are treated as functions).
+#[derive(Clone, Debug)]
+pub struct MethodData {
+    pub id: NodeId,
+    pub qualname: String,
+    pub span: Span,
+    pub scope: NodeId,
+}
+
+/// Data for modules.
+#[derive(Debug)]
+pub struct ModData {
+    pub id: NodeId,
+    pub name: String,
+    pub qualname: String,
+    pub span: Span,
+    pub scope: NodeId,
+    pub filename: String,
+}
+
+/// Data for a reference to a module.
+#[derive(Debug)]
+pub struct ModRefData {
+    pub span: Span,
+    pub scope: NodeId,
+    pub ref_id: Option<DefId>,
+    pub qualname: String
+}
+
+#[derive(Debug)]
+pub struct StructData {
+    pub span: Span,
+    pub id: NodeId,
+    pub ctor_id: NodeId,
+    pub qualname: String,
+    pub scope: NodeId,
+    pub value: String
+}
+
+#[derive(Debug)]
+pub struct StructVariantData {
+    pub span: Span,
+    pub id: NodeId,
+    pub qualname: String,
+    pub type_value: String,
+    pub value: String,
+    pub scope: NodeId
+}
+
+#[derive(Debug)]
+pub struct TraitData {
+    pub span: Span,
+    pub id: NodeId,
+    pub qualname: String,
+    pub scope: NodeId,
+    pub value: String
+}
+
+#[derive(Debug)]
+pub struct TupleVariantData {
+    pub span: Span,
+    pub id: NodeId,
+    pub name: String,
+    pub qualname: String,
+    pub type_value: String,
+    pub value: String,
+    pub scope: NodeId
+}
+
+/// Data for a typedef.
+#[derive(Debug)]
+pub struct TypedefData {
+    pub id: NodeId,
+    pub span: Span,
+    pub qualname: String,
+    pub value: String,
+}
+
+/// Data for a reference to a type or trait.
+#[derive(Clone, Debug)]
+pub struct TypeRefData {
+    pub span: Span,
+    pub scope: NodeId,
+    pub ref_id: Option<DefId>,
+    pub qualname: String,
+}
+
+#[derive(Debug)]
+pub struct UseData {
+    pub id: NodeId,
+    pub span: Span,
+    pub name: String,
+    pub mod_id: Option<DefId>,
+    pub scope: NodeId
+}
+
+#[derive(Debug)]
+pub struct UseGlobData {
+    pub id: NodeId,
+    pub span: Span,
+    pub names: Vec<String>,
+    pub scope: NodeId
+}
+
+/// Data for local and global variables (consts and statics).
+#[derive(Debug)]
+pub struct VariableData {
+    pub id: NodeId,
+    pub name: String,
+    pub qualname: String,
+    pub span: Span,
+    pub scope: NodeId,
+    pub value: String,
+    pub type_value: String,
+}
+
+/// Data for the use of some item (e.g., the use of a local variable, which
+/// will refer to that variables declaration (by ref_id)).
+#[derive(Debug)]
+pub struct VariableRefData {
+    pub name: String,
+    pub span: Span,
+    pub scope: NodeId,
+    pub ref_id: DefId,
+}
+
+// Emitted ids are used to cross-reference items across crates. DefIds and
+// NodeIds do not usually correspond in any way. The strategy is to use the
+// index from the DefId as a crate-local id. However, within a crate, DefId
+// indices and NodeIds can overlap. So, we must adjust the NodeIds. If an
+// item can be identified by a DefId as well as a NodeId, then we use the
+// DefId index as the id. If it can't, then we have to use the NodeId, but
+// need to adjust it so it will not clash with any possible DefId index.
+pub fn normalize_node_id<'a>(tcx: &ty::TyCtxt<'a>, id: NodeId) -> usize {
+    match tcx.map.opt_local_def_id(id) {
+        Some(id) => id.index.as_usize(),
+        None => id as usize + tcx.map.num_local_def_ids()
+    }
+}
+
+// Macro to implement a normalize() function (see below for usage)
+macro_rules! impl_normalize {
+    ($($t:ty => $($field:ident),*);*) => {
+        $(
+            impl $t {
+                pub fn normalize<'a>(mut self, tcx: &ty::TyCtxt<'a>) -> $t {
+                    $(
+                        self.$field = normalize_node_id(tcx, self.$field) as u32;
+                    )*
+                    self
+                }
+            }
+        )*
+    }
+}
+
+impl_normalize! {
+    EnumData => id, scope;
+    ExternCrateData => id, scope;
+    FunctionCallData => scope;
+    FunctionData => id, scope;
+    FunctionRefData => scope;
+    ImplData => id, scope;
+    InheritanceData => deriv_id;
+    MacroUseData => scope;
+    MethodCallData => scope;
+    MethodData => id, scope;
+    ModData => id, scope;
+    ModRefData => scope;
+    StructData => ctor_id, id, scope;
+    StructVariantData => id, scope;
+    TupleVariantData => id, scope;
+    TraitData => id, scope;
+    TypedefData => id;
+    TypeRefData => scope;
+    UseData => id, scope;
+    UseGlobData => id, scope;
+    VariableData => id;
+    VariableRefData => scope
+}
diff --git a/src/librustc_save_analysis/dump.rs b/src/librustc_save_analysis/dump.rs
new file mode 100644 (file)
index 0000000..b0cc792
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use syntax::codemap::Span;
+
+use super::data::*;
+
+pub trait Dump {
+    fn crate_prelude(&mut self, _: Span, _: CratePreludeData) {}
+    fn enum_data(&mut self, _: Span, _: EnumData) {}
+    fn extern_crate(&mut self, _: Span, _: ExternCrateData) {}
+    fn impl_data(&mut self, _: Span, _: ImplData) {}
+    fn inheritance(&mut self, _: InheritanceData) {}
+    fn function(&mut self, _: Span, _: FunctionData) {}
+    fn function_ref(&mut self, _: Span, _: FunctionRefData) {}
+    fn function_call(&mut self, _: Span, _: FunctionCallData) {}
+    fn method(&mut self, _: Span, _: MethodData) {}
+    fn method_call(&mut self, _: Span, _: MethodCallData) {}
+    fn macro_data(&mut self, _: Span, _: MacroData) {}
+    fn macro_use(&mut self, _: Span, _: MacroUseData) {}
+    fn mod_data(&mut self, _: ModData) {}
+    fn mod_ref(&mut self, _: Span, _: ModRefData) {}
+    fn struct_data(&mut self, _: Span, _: StructData) {}
+    fn struct_variant(&mut self, _: Span, _: StructVariantData) {}
+    fn trait_data(&mut self, _: Span, _: TraitData) {}
+    fn tuple_variant(&mut self, _: Span, _: TupleVariantData) {}
+    fn type_ref(&mut self, _: Span, _: TypeRefData) {}
+    fn typedef(&mut self, _: Span, _: TypedefData) {}
+    fn use_data(&mut self, _: Span, _: UseData) {}
+    fn use_glob(&mut self, _: Span, _: UseGlobData) {}
+    fn variable(&mut self, _: Span, _: VariableData) {}
+    fn variable_ref(&mut self, _: Span, _: VariableRefData) {}
+}
diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs
new file mode 100644 (file)
index 0000000..3784c95
--- /dev/null
@@ -0,0 +1,1386 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Write the output of rustc's analysis to an implementor of Dump. The data is
+//! primarily designed to be used as input to the DXR tool, specifically its
+//! Rust plugin. It could also be used by IDEs or other code browsing, search, or
+//! cross-referencing tools.
+//!
+//! Dumping the analysis is implemented by walking the AST and getting a bunch of
+//! info out from all over the place. We use Def IDs to identify objects. The
+//! tricky part is getting syntactic (span, source text) and semantic (reference
+//! Def IDs) information for parts of expressions which the compiler has discarded.
+//! E.g., in a path `foo::bar::baz`, the compiler only keeps a span for the whole
+//! path and a reference to `baz`, but we want spans and references for all three
+//! idents.
+//!
+//! SpanUtils is used to manipulate spans. In particular, to extract sub-spans
+//! from spans (e.g., the span for `bar` from the above example path).
+//! DumpVisitor walks the AST and processes it, and an implementor of Dump
+//! is used for recording the output in a format-agnostic way (see CsvDumper
+//! for an example).
+
+use rustc::hir::def::Def;
+use rustc::hir::def_id::DefId;
+use rustc::session::Session;
+use rustc::ty::{self, TyCtxt};
+
+use std::collections::HashSet;
+use std::hash::*;
+
+use syntax::ast::{self, NodeId, PatKind};
+use syntax::codemap::*;
+use syntax::parse::token::{self, keywords};
+use syntax::visit::{self, Visitor};
+use syntax::print::pprust::{path_to_string, ty_to_string};
+use syntax::ptr::P;
+
+use rustc::hir::lowering::{lower_expr, LoweringContext};
+
+use super::{escape, generated_code, SaveContext, PathCollector};
+use super::data::*;
+use super::dump::Dump;
+use super::span_utils::SpanUtils;
+use super::recorder;
+
+macro_rules! down_cast_data {
+    ($id:ident, $kind:ident, $sp:expr) => {
+        let $id = if let super::Data::$kind(data) = $id {
+            data
+        } else {
+            span_bug!($sp, "unexpected data kind: {:?}", $id);
+        }
+    };
+}
+
+pub struct DumpVisitor<'l, 'tcx: 'l, D: 'l> {
+    save_ctxt: SaveContext<'l, 'tcx>,
+    sess: &'l Session,
+    tcx: &'l TyCtxt<'tcx>,
+    analysis: &'l ty::CrateAnalysis<'l>,
+    dumper: &'l mut D,
+
+    span: SpanUtils<'l>,
+
+    cur_scope: NodeId,
+
+    // Set of macro definition (callee) spans, and the set
+    // of macro use (callsite) spans. We store these to ensure
+    // we only write one macro def per unique macro definition, and
+    // one macro use per unique callsite span.
+    mac_defs: HashSet<Span>,
+    mac_uses: HashSet<Span>,
+
+}
+
+impl <'l, 'tcx, D> DumpVisitor<'l, 'tcx, D>
+where D: Dump
+{
+    pub fn new(tcx: &'l TyCtxt<'tcx>,
+               lcx: &'l LoweringContext<'l>,
+               analysis: &'l ty::CrateAnalysis<'l>,
+               dumper: &'l mut D)
+               -> DumpVisitor<'l, 'tcx, D> {
+        let span_utils = SpanUtils::new(&tcx.sess);
+        DumpVisitor {
+            sess: &tcx.sess,
+            tcx: tcx,
+            save_ctxt: SaveContext::from_span_utils(tcx, lcx, span_utils.clone()),
+            analysis: analysis,
+            dumper: dumper,
+            span: span_utils.clone(),
+            cur_scope: 0,
+            mac_defs: HashSet::new(),
+            mac_uses: HashSet::new(),
+        }
+    }
+
+    fn nest<F>(&mut self, scope_id: NodeId, f: F)
+        where F: FnOnce(&mut DumpVisitor<'l, 'tcx, D>)
+    {
+        let parent_scope = self.cur_scope;
+        self.cur_scope = scope_id;
+        f(self);
+        self.cur_scope = parent_scope;
+    }
+
+    pub fn dump_crate_info(&mut self, name: &str, krate: &ast::Crate) {
+        let source_file = self.tcx.sess.local_crate_source_file.as_ref();
+        let crate_root = source_file.map(|source_file| {
+            match source_file.file_name() {
+                Some(_) => source_file.parent().unwrap().display().to_string(),
+                None => source_file.display().to_string(),
+            }
+        });
+
+        // Info about all the external crates referenced from this crate.
+        let external_crates = self.save_ctxt.get_external_crates().into_iter().map(|c| {
+            ExternalCrateData {
+                name: c.name,
+                num: c.number
+            }
+        }).collect();
+
+        // The current crate.
+        let data = CratePreludeData {
+            crate_name: name.into(),
+            crate_root: crate_root,
+            external_crates: external_crates
+        };
+
+        self.dumper.crate_prelude(krate.span, data);
+    }
+
+    // Return all non-empty prefixes of a path.
+    // For each prefix, we return the span for the last segment in the prefix and
+    // a str representation of the entire prefix.
+    fn process_path_prefixes(&self, path: &ast::Path) -> Vec<(Span, String)> {
+        let spans = self.span.spans_for_path_segments(path);
+
+        // Paths to enums seem to not match their spans - the span includes all the
+        // variants too. But they seem to always be at the end, so I hope we can cope with
+        // always using the first ones. So, only error out if we don't have enough spans.
+        // What could go wrong...?
+        if spans.len() < path.segments.len() {
+            if generated_code(path.span) {
+                return vec!();
+            }
+            error!("Mis-calculated spans for path '{}'. Found {} spans, expected {}. Found spans:",
+                   path_to_string(path),
+                   spans.len(),
+                   path.segments.len());
+            for s in &spans {
+                let loc = self.sess.codemap().lookup_char_pos(s.lo);
+                error!("    '{}' in {}, line {}",
+                       self.span.snippet(*s),
+                       loc.file.name,
+                       loc.line);
+            }
+            return vec!();
+        }
+
+        let mut result: Vec<(Span, String)> = vec!();
+
+        let mut segs = vec!();
+        for (i, (seg, span)) in path.segments.iter().zip(&spans).enumerate() {
+            segs.push(seg.clone());
+            let sub_path = ast::Path {
+                span: *span, // span for the last segment
+                global: path.global,
+                segments: segs,
+            };
+            let qualname = if i == 0 && path.global {
+                format!("::{}", path_to_string(&sub_path))
+            } else {
+                path_to_string(&sub_path)
+            };
+            result.push((*span, qualname));
+            segs = sub_path.segments;
+        }
+
+        result
+    }
+
+    // The global arg allows us to override the global-ness of the path (which
+    // actually means 'does the path start with `::`', rather than 'is the path
+    // semantically global). We use the override for `use` imports (etc.) where
+    // the syntax is non-global, but the semantics are global.
+    fn write_sub_paths(&mut self, path: &ast::Path, global: bool) {
+        let sub_paths = self.process_path_prefixes(path);
+        for (i, &(ref span, ref qualname)) in sub_paths.iter().enumerate() {
+            let qualname = if i == 0 && global && !path.global {
+                format!("::{}", qualname)
+            } else {
+                qualname.clone()
+            };
+            self.dumper.mod_ref(path.span, ModRefData {
+                span: *span,
+                qualname: qualname,
+                scope: self.cur_scope,
+                ref_id: None
+            }.normalize(&self.tcx));
+        }
+    }
+
+    // As write_sub_paths, but does not process the last ident in the path (assuming it
+    // will be processed elsewhere). See note on write_sub_paths about global.
+    fn write_sub_paths_truncated(&mut self, path: &ast::Path, global: bool) {
+        let sub_paths = self.process_path_prefixes(path);
+        let len = sub_paths.len();
+        if len <= 1 {
+            return;
+        }
+
+        let sub_paths = &sub_paths[..len-1];
+        for (i, &(ref span, ref qualname)) in sub_paths.iter().enumerate() {
+            let qualname = if i == 0 && global && !path.global {
+                format!("::{}", qualname)
+            } else {
+                qualname.clone()
+            };
+            self.dumper.mod_ref(path.span, ModRefData {
+                span: *span,
+                qualname: qualname,
+                scope: self.cur_scope,
+                ref_id: None
+            }.normalize(&self.tcx));
+        }
+    }
+
+    // As write_sub_paths, but expects a path of the form module_path::trait::method
+    // Where trait could actually be a struct too.
+    fn write_sub_path_trait_truncated(&mut self, path: &ast::Path) {
+        let sub_paths = self.process_path_prefixes(path);
+        let len = sub_paths.len();
+        if len <= 1 {
+            return;
+        }
+        let sub_paths = &sub_paths[.. (len-1)];
+
+        // write the trait part of the sub-path
+        let (ref span, ref qualname) = sub_paths[len-2];
+        self.dumper.type_ref(path.span, TypeRefData {
+            ref_id: None,
+            span: *span,
+            qualname: qualname.to_owned(),
+            scope: 0
+        });
+
+        // write the other sub-paths
+        if len <= 2 {
+            return;
+        }
+        let sub_paths = &sub_paths[..len-2];
+        for &(ref span, ref qualname) in sub_paths {
+            self.dumper.mod_ref(path.span, ModRefData {
+                span: *span,
+                qualname: qualname.to_owned(),
+                scope: self.cur_scope,
+                ref_id: None
+            }.normalize(&self.tcx));
+        }
+    }
+
+    // looks up anything, not just a type
+    fn lookup_type_ref(&self, ref_id: NodeId) -> Option<DefId> {
+        if !self.tcx.def_map.borrow().contains_key(&ref_id) {
+            bug!("def_map has no key for {} in lookup_type_ref", ref_id);
+        }
+        let def = self.tcx.def_map.borrow().get(&ref_id).unwrap().full_def();
+        match def {
+            Def::PrimTy(..) => None,
+            Def::SelfTy(..) => None,
+            _ => Some(def.def_id()),
+        }
+    }
+
+    fn process_def_kind(&mut self,
+                        ref_id: NodeId,
+                        span: Span,
+                        sub_span: Option<Span>,
+                        def_id: DefId,
+                        scope: NodeId) {
+        if self.span.filter_generated(sub_span, span) {
+            return;
+        }
+
+        let def_map = self.tcx.def_map.borrow();
+        if !def_map.contains_key(&ref_id) {
+            span_bug!(span,
+                      "def_map has no key for {} in lookup_def_kind",
+                      ref_id);
+        }
+        let def = def_map.get(&ref_id).unwrap().full_def();
+        match def {
+            Def::Mod(_) |
+            Def::ForeignMod(_) => {
+                self.dumper.mod_ref(span, ModRefData {
+                    span: sub_span.expect("No span found for mod ref"),
+                    ref_id: Some(def_id),
+                    scope: scope,
+                    qualname: String::new()
+                }.normalize(&self.tcx));
+            }
+            Def::Struct(..) |
+            Def::Enum(..) |
+            Def::TyAlias(..) |
+            Def::AssociatedTy(..) |
+            Def::Trait(_) => {
+                self.dumper.type_ref(span, TypeRefData {
+                    span: sub_span.expect("No span found for type ref"),
+                    ref_id: Some(def_id),
+                    scope: scope,
+                    qualname: String::new()
+                }.normalize(&self.tcx));
+            }
+            Def::Static(_, _) |
+            Def::Const(_) |
+            Def::AssociatedConst(..) |
+            Def::Local(..) |
+            Def::Variant(..) |
+            Def::Upvar(..) => {
+                self.dumper.variable_ref(span, VariableRefData {
+                    span: sub_span.expect("No span found for var ref"),
+                    ref_id: def_id,
+                    scope: scope,
+                    name: String::new()
+                }.normalize(&self.tcx));
+            }
+            Def::Fn(..) => {
+                self.dumper.function_ref(span, FunctionRefData {
+                    span: sub_span.expect("No span found for fn ref"),
+                    ref_id: def_id,
+                    scope: scope
+                }.normalize(&self.tcx));
+            }
+            Def::SelfTy(..) |
+            Def::Label(_) |
+            Def::TyParam(..) |
+            Def::Method(..) |
+            Def::PrimTy(_) |
+            Def::Err => {
+               span_bug!(span,
+                         "process_def_kind for unexpected item: {:?}",
+                         def);
+            }
+        }
+    }
+
+    fn process_formals(&mut self, formals: &Vec<ast::Arg>, qualname: &str) {
+        for arg in formals {
+            self.visit_pat(&arg.pat);
+            let mut collector = PathCollector::new();
+            collector.visit_pat(&arg.pat);
+            let span_utils = self.span.clone();
+            for &(id, ref p, _, _) in &collector.collected_paths {
+                let typ = self.tcx.node_types().get(&id).unwrap().to_string();
+                // get the span only for the name of the variable (I hope the path is only ever a
+                // variable name, but who knows?)
+                let sub_span = span_utils.span_for_last_ident(p.span);
+                if !self.span.filter_generated(sub_span, p.span) {
+                    self.dumper.variable(p.span, VariableData {
+                        id: id,
+                        span: sub_span.expect("No span found for variable"),
+                        name: path_to_string(p),
+                        qualname: format!("{}::{}", qualname, path_to_string(p)),
+                        type_value: typ,
+                        value: String::new(),
+                        scope: 0
+                    }.normalize(&self.tcx));
+                }
+            }
+        }
+    }
+
+    fn process_method(&mut self,
+                      sig: &ast::MethodSig,
+                      body: Option<&ast::Block>,
+                      id: ast::NodeId,
+                      name: ast::Name,
+                      span: Span) {
+        debug!("process_method: {}:{}", id, name);
+
+        if let Some(method_data) = self.save_ctxt.get_method_data(id, name, span) {
+
+            if body.is_some() {
+                if !self.span.filter_generated(Some(method_data.span), span) {
+                    self.dumper.function(span, method_data.clone().normalize(&self.tcx));
+                }
+                self.process_formals(&sig.decl.inputs, &method_data.qualname);
+            } else {
+                if !self.span.filter_generated(Some(method_data.span), span) {
+                    self.dumper.method(span, MethodData {
+                        id: method_data.id,
+                        span: method_data.span,
+                        scope: method_data.scope,
+                        qualname: method_data.qualname.clone(),
+                    }.normalize(&self.tcx));
+                }
+            }
+            self.process_generic_params(&sig.generics, span, &method_data.qualname, id);
+        }
+
+        // walk arg and return types
+        for arg in &sig.decl.inputs {
+            self.visit_ty(&arg.ty);
+        }
+
+        if let ast::FunctionRetTy::Ty(ref ret_ty) = sig.decl.output {
+            self.visit_ty(ret_ty);
+        }
+
+        // walk the fn body
+        if let Some(body) = body {
+            self.nest(id, |v| v.visit_block(body));
+        }
+    }
+
+    fn process_trait_ref(&mut self, trait_ref: &ast::TraitRef) {
+        let trait_ref_data = self.save_ctxt.get_trait_ref_data(trait_ref, self.cur_scope);
+        if let Some(trait_ref_data) = trait_ref_data {
+            if !self.span.filter_generated(Some(trait_ref_data.span), trait_ref.path.span) {
+                self.dumper.type_ref(trait_ref.path.span, trait_ref_data.normalize(&self.tcx));
+            }
+
+            visit::walk_path(self, &trait_ref.path);
+        }
+    }
+
+    fn process_struct_field_def(&mut self, field: &ast::StructField, parent_id: NodeId) {
+        let field_data = self.save_ctxt.get_field_data(field, parent_id);
+        if let Some(mut field_data) = field_data {
+            if !self.span.filter_generated(Some(field_data.span), field.span) {
+                field_data.scope = normalize_node_id(&self.tcx, field_data.scope) as u32;
+                field_data.value = String::new();
+                self.dumper.variable(field.span, field_data.normalize(&self.tcx));
+            }
+        }
+    }
+
+    // Dump generic params bindings, then visit_generics
+    fn process_generic_params(&mut self,
+                              generics: &ast::Generics,
+                              full_span: Span,
+                              prefix: &str,
+                              id: NodeId) {
+        // We can't only use visit_generics since we don't have spans for param
+        // bindings, so we reparse the full_span to get those sub spans.
+        // However full span is the entire enum/fn/struct block, so we only want
+        // the first few to match the number of generics we're looking for.
+        let param_sub_spans = self.span.spans_for_ty_params(full_span,
+                                                            (generics.ty_params.len() as isize));
+        for (param, param_ss) in generics.ty_params.iter().zip(param_sub_spans) {
+            // Append $id to name to make sure each one is unique
+            let name = format!("{}::{}${}",
+                               prefix,
+                               escape(self.span.snippet(param_ss)),
+                               id);
+            if !self.span.filter_generated(Some(param_ss), full_span) {
+                self.dumper.typedef(full_span, TypedefData {
+                    span: param_ss,
+                    id: param.id,
+                    qualname: name,
+                    value: String::new()
+                }.normalize(&self.tcx));
+            }
+        }
+        self.visit_generics(generics);
+    }
+
+    fn process_fn(&mut self,
+                  item: &ast::Item,
+                  decl: &ast::FnDecl,
+                  ty_params: &ast::Generics,
+                  body: &ast::Block) {
+        if let Some(fn_data) = self.save_ctxt.get_item_data(item) {
+            down_cast_data!(fn_data, FunctionData, item.span);
+            if !self.span.filter_generated(Some(fn_data.span), item.span) {
+                self.dumper.function(item.span, fn_data.clone().normalize(&self.tcx));
+            }
+
+            self.process_formals(&decl.inputs, &fn_data.qualname);
+            self.process_generic_params(ty_params, item.span, &fn_data.qualname, item.id);
+        }
+
+        for arg in &decl.inputs {
+            self.visit_ty(&arg.ty);
+        }
+
+        if let ast::FunctionRetTy::Ty(ref ret_ty) = decl.output {
+            self.visit_ty(&ret_ty);
+        }
+
+        self.nest(item.id, |v| v.visit_block(&body));
+    }
+
+    fn process_static_or_const_item(&mut self, item: &ast::Item, typ: &ast::Ty, expr: &ast::Expr) {
+        if let Some(var_data) = self.save_ctxt.get_item_data(item) {
+            down_cast_data!(var_data, VariableData, item.span);
+            if !self.span.filter_generated(Some(var_data.span), item.span) {
+                let mut var_data = var_data;
+                var_data.scope = normalize_node_id(&self.tcx, var_data.scope) as u32;
+                self.dumper.variable(item.span, var_data.normalize(&self.tcx));
+            }
+        }
+        self.visit_ty(&typ);
+        self.visit_expr(expr);
+    }
+
+    fn process_const(&mut self,
+                     id: ast::NodeId,
+                     name: ast::Name,
+                     span: Span,
+                     typ: &ast::Ty,
+                     expr: &ast::Expr) {
+        let qualname = format!("::{}", self.tcx.node_path_str(id));
+
+        let sub_span = self.span.sub_span_after_keyword(span, keywords::Const);
+
+        if !self.span.filter_generated(sub_span, span) {
+            self.dumper.variable(span, VariableData {
+                span: sub_span.expect("No span found for variable"),
+                id: id,
+                name: name.to_string(),
+                qualname: qualname,
+                value: self.span.snippet(expr.span),
+                type_value: ty_to_string(&typ),
+                scope: normalize_node_id(&self.tcx, self.cur_scope) as u32
+            }.normalize(&self.tcx));
+        }
+
+        // walk type and init value
+        self.visit_ty(typ);
+        self.visit_expr(expr);
+    }
+
+    fn process_struct(&mut self,
+                      item: &ast::Item,
+                      def: &ast::VariantData,
+                      ty_params: &ast::Generics) {
+        let qualname = format!("::{}", self.tcx.node_path_str(item.id));
+
+        let val = self.span.snippet(item.span);
+        let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Struct);
+        if !self.span.filter_generated(sub_span, item.span) {
+            self.dumper.struct_data(item.span, StructData {
+                span: sub_span.expect("No span found for struct"),
+                id: item.id,
+                ctor_id: def.id(),
+                qualname: qualname.clone(),
+                scope: self.cur_scope,
+                value: val
+            }.normalize(&self.tcx));
+        }
+
+
+        // fields
+        for field in def.fields() {
+            self.process_struct_field_def(field, item.id);
+            self.visit_ty(&field.ty);
+        }
+
+        self.process_generic_params(ty_params, item.span, &qualname, item.id);
+    }
+
+    fn process_enum(&mut self,
+                    item: &ast::Item,
+                    enum_definition: &ast::EnumDef,
+                    ty_params: &ast::Generics) {
+        let enum_data = self.save_ctxt.get_item_data(item);
+        let enum_data = match enum_data {
+            None => return,
+            Some(data) => data,
+        };
+        down_cast_data!(enum_data, EnumData, item.span);
+        let normalized = enum_data.clone().normalize(&self.tcx);
+        if !self.span.filter_generated(Some(normalized.span), item.span) {
+            self.dumper.enum_data(item.span, normalized);
+        }
+
+        for variant in &enum_definition.variants {
+            let name = &variant.node.name.name.as_str();
+            let mut qualname = enum_data.qualname.clone();
+            qualname.push_str("::");
+            qualname.push_str(name);
+            let val = self.span.snippet(variant.span);
+
+            match variant.node.data {
+                ast::VariantData::Struct(..) => {
+                    let sub_span = self.span.span_for_first_ident(variant.span);
+                    if !self.span.filter_generated(sub_span, variant.span) {
+                        self.dumper.struct_variant(variant.span, StructVariantData {
+                            span: sub_span.expect("No span found for struct variant"),
+                            id: variant.node.data.id(),
+                            qualname: qualname,
+                            type_value: enum_data.qualname.clone(),
+                            value: val,
+                            scope: enum_data.scope
+                        }.normalize(&self.tcx));
+                    }
+                }
+                _ => {
+                    let sub_span = self.span.span_for_first_ident(variant.span);
+                    if !self.span.filter_generated(sub_span, variant.span) {
+                        self.dumper.tuple_variant(variant.span, TupleVariantData {
+                            span: sub_span.expect("No span found for tuple variant"),
+                            id: variant.node.data.id(),
+                            name: name.to_string(),
+                            qualname: qualname,
+                            type_value: enum_data.qualname.clone(),
+                            value: val,
+                            scope: enum_data.scope
+                        }.normalize(&self.tcx));
+                    }
+                }
+            }
+
+
+            for field in variant.node.data.fields() {
+                self.process_struct_field_def(field, variant.node.data.id());
+                self.visit_ty(&field.ty);
+            }
+        }
+        self.process_generic_params(ty_params, item.span, &enum_data.qualname, enum_data.id);
+    }
+
+    fn process_impl(&mut self,
+                    item: &ast::Item,
+                    type_parameters: &ast::Generics,
+                    trait_ref: &Option<ast::TraitRef>,
+                    typ: &ast::Ty,
+                    impl_items: &[ast::ImplItem]) {
+        let mut has_self_ref = false;
+        if let Some(impl_data) = self.save_ctxt.get_item_data(item) {
+            down_cast_data!(impl_data, ImplData, item.span);
+            if let Some(ref self_ref) = impl_data.self_ref {
+                has_self_ref = true;
+                if !self.span.filter_generated(Some(self_ref.span), item.span) {
+                    self.dumper.type_ref(item.span, self_ref.clone().normalize(&self.tcx));
+                }
+            }
+            if let Some(ref trait_ref_data) = impl_data.trait_ref {
+                if !self.span.filter_generated(Some(trait_ref_data.span), item.span) {
+                    self.dumper.type_ref(item.span, trait_ref_data.clone().normalize(&self.tcx));
+                }
+
+                visit::walk_path(self, &trait_ref.as_ref().unwrap().path);
+            }
+
+            if !self.span.filter_generated(Some(impl_data.span), item.span) {
+                self.dumper.impl_data(item.span, ImplData {
+                    id: impl_data.id,
+                    span: impl_data.span,
+                    scope: impl_data.scope,
+                    trait_ref: impl_data.trait_ref.map(|d| d.ref_id.unwrap()),
+                    self_ref: impl_data.self_ref.map(|d| d.ref_id.unwrap())
+                }.normalize(&self.tcx));
+            }
+        }
+        if !has_self_ref {
+            self.visit_ty(&typ);
+        }
+        self.process_generic_params(type_parameters, item.span, "", item.id);
+        for impl_item in impl_items {
+            self.visit_impl_item(impl_item);
+        }
+    }
+
+    fn process_trait(&mut self,
+                     item: &ast::Item,
+                     generics: &ast::Generics,
+                     trait_refs: &ast::TyParamBounds,
+                     methods: &[ast::TraitItem]) {
+        let qualname = format!("::{}", self.tcx.node_path_str(item.id));
+        let val = self.span.snippet(item.span);
+        let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Trait);
+        if !self.span.filter_generated(sub_span, item.span) {
+            self.dumper.trait_data(item.span, TraitData {
+                span: sub_span.expect("No span found for trait"),
+                id: item.id,
+                qualname: qualname.clone(),
+                scope: self.cur_scope,
+                value: val
+            }.normalize(&self.tcx));
+        }
+
+        // super-traits
+        for super_bound in trait_refs.iter() {
+            let trait_ref = match *super_bound {
+                ast::TraitTyParamBound(ref trait_ref, _) => {
+                    trait_ref
+                }
+                ast::RegionTyParamBound(..) => {
+                    continue;
+                }
+            };
+
+            let trait_ref = &trait_ref.trait_ref;
+            if let Some(id) = self.lookup_type_ref(trait_ref.ref_id) {
+                let sub_span = self.span.sub_span_for_type_name(trait_ref.path.span);
+                if !self.span.filter_generated(sub_span, trait_ref.path.span) {
+                    self.dumper.type_ref(trait_ref.path.span, TypeRefData {
+                        span: sub_span.expect("No span found for trait ref"),
+                        ref_id: Some(id),
+                        scope: self.cur_scope,
+                        qualname: String::new()
+                    }.normalize(&self.tcx));
+                }
+
+                if !self.span.filter_generated(sub_span, trait_ref.path.span) {
+                    let sub_span = sub_span.expect("No span for inheritance");
+                    self.dumper.inheritance(InheritanceData {
+                        span: sub_span,
+                        base_id: id,
+                        deriv_id: item.id
+                    }.normalize(&self.tcx));
+                }
+            }
+        }
+
+        // walk generics and methods
+        self.process_generic_params(generics, item.span, &qualname, item.id);
+        for method in methods {
+            self.visit_trait_item(method)
+        }
+    }
+
+    // `item` is the module in question, represented as an item.
+    fn process_mod(&mut self, item: &ast::Item) {
+        if let Some(mod_data) = self.save_ctxt.get_item_data(item) {
+            down_cast_data!(mod_data, ModData, item.span);
+            if !self.span.filter_generated(Some(mod_data.span), item.span) {
+                self.dumper.mod_data(mod_data.normalize(&self.tcx));
+            }
+        }
+    }
+
+    fn process_path(&mut self, id: NodeId, path: &ast::Path, ref_kind: Option<recorder::Row>) {
+        let path_data = self.save_ctxt.get_path_data(id, path);
+        if generated_code(path.span) && path_data.is_none() {
+            return;
+        }
+
+        let path_data = match path_data {
+            Some(pd) => pd,
+            None => {
+                span_bug!(path.span,
+                          "Unexpected def kind while looking up path in `{}`",
+                          self.span.snippet(path.span))
+            }
+        };
+
+        match path_data {
+            Data::VariableRefData(vrd) => {
+                // FIXME: this whole block duplicates the code in process_def_kind
+                if !self.span.filter_generated(Some(vrd.span), path.span) {
+                    match ref_kind {
+                        Some(recorder::TypeRef) => {
+                            self.dumper.type_ref(path.span, TypeRefData {
+                                span: vrd.span,
+                                ref_id: Some(vrd.ref_id),
+                                scope: vrd.scope,
+                                qualname: String::new()
+                            }.normalize(&self.tcx));
+                        }
+                        Some(recorder::FnRef) => {
+                            self.dumper.function_ref(path.span, FunctionRefData {
+                                span: vrd.span,
+                                ref_id: vrd.ref_id,
+                                scope: vrd.scope
+                            }.normalize(&self.tcx));
+                        }
+                        Some(recorder::ModRef) => {
+                            self.dumper.mod_ref(path.span, ModRefData {
+                                span: vrd.span,
+                                ref_id: Some(vrd.ref_id),
+                                scope: vrd.scope,
+                                qualname: String::new()
+                            }.normalize(&self.tcx));
+                        }
+                        Some(recorder::VarRef) | None
+                            => self.dumper.variable_ref(path.span, vrd.normalize(&self.tcx))
+                    }
+                }
+
+            }
+            Data::TypeRefData(trd) => {
+                if !self.span.filter_generated(Some(trd.span), path.span) {
+                    self.dumper.type_ref(path.span, trd.normalize(&self.tcx));
+                }
+            }
+            Data::MethodCallData(mcd) => {
+                if !self.span.filter_generated(Some(mcd.span), path.span) {
+                    self.dumper.method_call(path.span, mcd.normalize(&self.tcx));
+                }
+            }
+            Data::FunctionCallData(fcd) => {
+                if !self.span.filter_generated(Some(fcd.span), path.span) {
+                    self.dumper.function_call(path.span, fcd.normalize(&self.tcx));
+                }
+            }
+            _ => {
+               span_bug!(path.span, "Unexpected data: {:?}", path_data);
+            }
+        }
+
+        // Modules or types in the path prefix.
+        let def_map = self.tcx.def_map.borrow();
+        let def = def_map.get(&id).unwrap().full_def();
+        match def {
+            Def::Method(did) => {
+                let ti = self.tcx.impl_or_trait_item(did);
+                if let ty::MethodTraitItem(m) = ti {
+                    if m.explicit_self == ty::ExplicitSelfCategory::Static {
+                        self.write_sub_path_trait_truncated(path);
+                    }
+                }
+            }
+            Def::Local(..) |
+            Def::Static(_,_) |
+            Def::Const(..) |
+            Def::AssociatedConst(..) |
+            Def::Struct(..) |
+            Def::Variant(..) |
+            Def::Fn(..) => self.write_sub_paths_truncated(path, false),
+            _ => {}
+        }
+    }
+
+    fn process_struct_lit(&mut self,
+                          ex: &ast::Expr,
+                          path: &ast::Path,
+                          fields: &Vec<ast::Field>,
+                          variant: ty::VariantDef,
+                          base: &Option<P<ast::Expr>>) {
+        self.write_sub_paths_truncated(path, false);
+
+        if let Some(struct_lit_data) = self.save_ctxt.get_expr_data(ex) {
+            down_cast_data!(struct_lit_data, TypeRefData, ex.span);
+            if !self.span.filter_generated(Some(struct_lit_data.span), ex.span) {
+                self.dumper.type_ref(ex.span, struct_lit_data.normalize(&self.tcx));
+            }
+
+            let scope = self.save_ctxt.enclosing_scope(ex.id);
+
+            for field in fields {
+                if let Some(field_data) = self.save_ctxt
+                                              .get_field_ref_data(field, variant, scope) {
+
+                    if !self.span.filter_generated(Some(field_data.span), field.ident.span) {
+                        self.dumper.variable_ref(field.ident.span, field_data.normalize(&self.tcx));
+                    }
+                }
+
+                self.visit_expr(&field.expr)
+            }
+        }
+
+        walk_list!(self, visit_expr, base);
+    }
+
+    fn process_method_call(&mut self, ex: &ast::Expr, args: &Vec<P<ast::Expr>>) {
+        if let Some(mcd) = self.save_ctxt.get_expr_data(ex) {
+            down_cast_data!(mcd, MethodCallData, ex.span);
+            if !self.span.filter_generated(Some(mcd.span), ex.span) {
+                self.dumper.method_call(ex.span, mcd.normalize(&self.tcx));
+            }
+        }
+
+        // walk receiver and args
+        walk_list!(self, visit_expr, args);
+    }
+
+    fn process_pat(&mut self, p: &ast::Pat) {
+        match p.node {
+            PatKind::Struct(ref path, ref fields, _) => {
+                visit::walk_path(self, path);
+                let adt = self.tcx.node_id_to_type(p.id).ty_adt_def().unwrap();
+                let def = self.tcx.def_map.borrow()[&p.id].full_def();
+                let variant = adt.variant_of_def(def);
+
+                for &Spanned { node: ref field, span } in fields {
+                    let sub_span = self.span.span_for_first_ident(span);
+                    if let Some(f) = variant.find_field_named(field.ident.name) {
+                        if !self.span.filter_generated(sub_span, span) {
+                            self.dumper.variable_ref(span, VariableRefData {
+                                span: sub_span.expect("No span fund for var ref"),
+                                ref_id: f.did,
+                                scope: self.cur_scope,
+                                name: String::new()
+                            }.normalize(&self.tcx));
+                        }
+                    }
+                    self.visit_pat(&field.pat);
+                }
+            }
+            _ => visit::walk_pat(self, p),
+        }
+    }
+
+
+    fn process_var_decl(&mut self, p: &ast::Pat, value: String) {
+        // The local could declare multiple new vars, we must walk the
+        // pattern and collect them all.
+        let mut collector = PathCollector::new();
+        collector.visit_pat(&p);
+        self.visit_pat(&p);
+
+        for &(id, ref p, immut, _) in &collector.collected_paths {
+            let value = if immut == ast::Mutability::Immutable {
+                value.to_string()
+            } else {
+                "<mutable>".to_string()
+            };
+            let types = self.tcx.node_types();
+            let typ = types.get(&id).map(|t| t.to_string()).unwrap_or(String::new());
+            // Get the span only for the name of the variable (I hope the path
+            // is only ever a variable name, but who knows?).
+            let sub_span = self.span.span_for_last_ident(p.span);
+            // Rust uses the id of the pattern for var lookups, so we'll use it too.
+            if !self.span.filter_generated(sub_span, p.span) {
+                self.dumper.variable(p.span, VariableData {
+                    span: sub_span.expect("No span found for variable"),
+                    id: id,
+                    name: path_to_string(p),
+                    qualname: format!("{}${}", path_to_string(p), id),
+                    value: value,
+                    type_value: typ,
+                    scope: 0
+                }.normalize(&self.tcx));
+            }
+        }
+    }
+
+    /// Extract macro use and definition information from the AST node defined
+    /// by the given NodeId, using the expansion information from the node's
+    /// span.
+    ///
+    /// If the span is not macro-generated, do nothing, else use callee and
+    /// callsite spans to record macro definition and use data, using the
+    /// mac_uses and mac_defs sets to prevent multiples.
+    fn process_macro_use(&mut self, span: Span, id: NodeId) {
+        let data = match self.save_ctxt.get_macro_use_data(span, id) {
+            None => return,
+            Some(data) => data,
+        };
+        let mut hasher = SipHasher::new();
+        data.callee_span.hash(&mut hasher);
+        let hash = hasher.finish();
+        let qualname = format!("{}::{}", data.name, hash);
+        // Don't write macro definition for imported macros
+        if !self.mac_defs.contains(&data.callee_span)
+            && !data.imported {
+            self.mac_defs.insert(data.callee_span);
+            if let Some(sub_span) = self.span.span_for_macro_def_name(data.callee_span) {
+                self.dumper.macro_data(data.callee_span, MacroData {
+                    span: sub_span,
+                    name: data.name.clone(),
+                    qualname: qualname.clone()
+                });
+            }
+        }
+        if !self.mac_uses.contains(&data.span) {
+            self.mac_uses.insert(data.span);
+            if let Some(sub_span) = self.span.span_for_macro_use_name(data.span) {
+                self.dumper.macro_use(data.span, MacroUseData {
+                    span: sub_span,
+                    name: data.name,
+                    qualname: qualname,
+                    scope: data.scope,
+                    callee_span: data.callee_span,
+                    imported: data.imported
+                }.normalize(&self.tcx));
+            }
+        }
+    }
+}
+
+impl<'l, 'tcx, 'v, D: Dump + 'l> Visitor<'v> for DumpVisitor<'l, 'tcx, D> {
+    fn visit_item(&mut self, item: &ast::Item) {
+        use syntax::ast::ItemKind::*;
+        self.process_macro_use(item.span, item.id);
+        match item.node {
+            Use(ref use_item) => {
+                match use_item.node {
+                    ast::ViewPathSimple(ident, ref path) => {
+                        let sub_span = self.span.span_for_last_ident(path.span);
+                        let mod_id = match self.lookup_type_ref(item.id) {
+                            Some(def_id) => {
+                                let scope = self.cur_scope;
+                                self.process_def_kind(item.id, path.span, sub_span, def_id, scope);
+
+                                Some(def_id)
+                            }
+                            None => None,
+                        };
+
+                        // 'use' always introduces an alias, if there is not an explicit
+                        // one, there is an implicit one.
+                        let sub_span = match self.span.sub_span_after_keyword(use_item.span,
+                                                                              keywords::As) {
+                            Some(sub_span) => Some(sub_span),
+                            None => sub_span,
+                        };
+
+                        if !self.span.filter_generated(sub_span, path.span) {
+                            self.dumper.use_data(path.span, UseData {
+                                span: sub_span.expect("No span found for use"),
+                                id: item.id,
+                                mod_id: mod_id,
+                                name: ident.name.to_string(),
+                                scope: self.cur_scope
+                            }.normalize(&self.tcx));
+                        }
+                        self.write_sub_paths_truncated(path, true);
+                    }
+                    ast::ViewPathGlob(ref path) => {
+                        // Make a comma-separated list of names of imported modules.
+                        let mut names = vec![];
+                        let glob_map = &self.analysis.glob_map;
+                        let glob_map = glob_map.as_ref().unwrap();
+                        if glob_map.contains_key(&item.id) {
+                            for n in glob_map.get(&item.id).unwrap() {
+                                names.push(n.to_string());
+                            }
+                        }
+
+                        let sub_span = self.span
+                                           .sub_span_of_token(path.span, token::BinOp(token::Star));
+                        if !self.span.filter_generated(sub_span, path.span) {
+                            self.dumper.use_glob(path.span, UseGlobData {
+                                span: sub_span.expect("No span found for use glob"),
+                                id: item.id,
+                                names: names,
+                                scope: self.cur_scope
+                            }.normalize(&self.tcx));
+                        }
+                        self.write_sub_paths(path, true);
+                    }
+                    ast::ViewPathList(ref path, ref list) => {
+                        for plid in list {
+                            match plid.node {
+                                ast::PathListItemKind::Ident { id, .. } => {
+                                    let scope = self.cur_scope;
+                                    if let Some(def_id) = self.lookup_type_ref(id) {
+                                        self.process_def_kind(id,
+                                                              plid.span,
+                                                              Some(plid.span),
+                                                              def_id,
+                                                              scope);
+                                    }
+                                }
+                                ast::PathListItemKind::Mod { .. } => (),
+                            }
+                        }
+
+                        self.write_sub_paths(path, true);
+                    }
+                }
+            }
+            ExternCrate(ref s) => {
+                let location = match *s {
+                    Some(s) => s.to_string(),
+                    None => item.ident.to_string(),
+                };
+                let alias_span = self.span.span_for_last_ident(item.span);
+                let cnum = match self.sess.cstore.extern_mod_stmt_cnum(item.id) {
+                    Some(cnum) => cnum,
+                    None => 0,
+                };
+
+                if !self.span.filter_generated(alias_span, item.span) {
+                    self.dumper.extern_crate(item.span, ExternCrateData {
+                        id: item.id,
+                        name: item.ident.name.to_string(),
+                        crate_num: cnum,
+                        location: location,
+                        span: alias_span.expect("No span found for extern crate"),
+                        scope: self.cur_scope,
+                    }.normalize(&self.tcx));
+                }
+            }
+            Fn(ref decl, _, _, _, ref ty_params, ref body) =>
+                self.process_fn(item, &decl, ty_params, &body),
+            Static(ref typ, _, ref expr) =>
+                self.process_static_or_const_item(item, typ, expr),
+            Const(ref typ, ref expr) =>
+                self.process_static_or_const_item(item, &typ, &expr),
+            Struct(ref def, ref ty_params) => self.process_struct(item, def, ty_params),
+            Enum(ref def, ref ty_params) => self.process_enum(item, def, ty_params),
+            Impl(_, _,
+                          ref ty_params,
+                          ref trait_ref,
+                          ref typ,
+                          ref impl_items) => {
+                self.process_impl(item, ty_params, trait_ref, &typ, impl_items)
+            }
+            Trait(_, ref generics, ref trait_refs, ref methods) =>
+                self.process_trait(item, generics, trait_refs, methods),
+            Mod(ref m) => {
+                self.process_mod(item);
+                self.nest(item.id, |v| visit::walk_mod(v, m));
+            }
+            Ty(ref ty, ref ty_params) => {
+                let qualname = format!("::{}", self.tcx.node_path_str(item.id));
+                let value = ty_to_string(&ty);
+                let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Type);
+                if !self.span.filter_generated(sub_span, item.span) {
+                    self.dumper.typedef(item.span, TypedefData {
+                        span: sub_span.expect("No span found for typedef"),
+                        id: item.id,
+                        qualname: qualname.clone(),
+                        value: value
+                    }.normalize(&self.tcx));
+                }
+
+                self.visit_ty(&ty);
+                self.process_generic_params(ty_params, item.span, &qualname, item.id);
+            }
+            Mac(_) => (),
+            _ => visit::walk_item(self, item),
+        }
+    }
+
+    fn visit_generics(&mut self, generics: &ast::Generics) {
+        for param in generics.ty_params.iter() {
+            for bound in param.bounds.iter() {
+                if let ast::TraitTyParamBound(ref trait_ref, _) = *bound {
+                    self.process_trait_ref(&trait_ref.trait_ref);
+                }
+            }
+            if let Some(ref ty) = param.default {
+                self.visit_ty(&ty);
+            }
+        }
+    }
+
+    fn visit_trait_item(&mut self, trait_item: &ast::TraitItem) {
+        self.process_macro_use(trait_item.span, trait_item.id);
+        match trait_item.node {
+            ast::TraitItemKind::Const(ref ty, Some(ref expr)) => {
+                self.process_const(trait_item.id,
+                                   trait_item.ident.name,
+                                   trait_item.span,
+                                   &ty,
+                                   &expr);
+            }
+            ast::TraitItemKind::Method(ref sig, ref body) => {
+                self.process_method(sig,
+                                    body.as_ref().map(|x| &**x),
+                                    trait_item.id,
+                                    trait_item.ident.name,
+                                    trait_item.span);
+            }
+            ast::TraitItemKind::Const(_, None) |
+            ast::TraitItemKind::Type(..) => {}
+        }
+    }
+
+    fn visit_impl_item(&mut self, impl_item: &ast::ImplItem) {
+        self.process_macro_use(impl_item.span, impl_item.id);
+        match impl_item.node {
+            ast::ImplItemKind::Const(ref ty, ref expr) => {
+                self.process_const(impl_item.id,
+                                   impl_item.ident.name,
+                                   impl_item.span,
+                                   &ty,
+                                   &expr);
+            }
+            ast::ImplItemKind::Method(ref sig, ref body) => {
+                self.process_method(sig,
+                                    Some(body),
+                                    impl_item.id,
+                                    impl_item.ident.name,
+                                    impl_item.span);
+            }
+            ast::ImplItemKind::Type(_) |
+            ast::ImplItemKind::Macro(_) => {}
+        }
+    }
+
+    fn visit_ty(&mut self, t: &ast::Ty) {
+        self.process_macro_use(t.span, t.id);
+        match t.node {
+            ast::TyKind::Path(_, ref path) => {
+                if let Some(id) = self.lookup_type_ref(t.id) {
+                    let sub_span = self.span.sub_span_for_type_name(t.span);
+                    if !self.span.filter_generated(sub_span, t.span) {
+                        self.dumper.type_ref(t.span, TypeRefData {
+                            span: sub_span.expect("No span found for type ref"),
+                            ref_id: Some(id),
+                            scope: self.cur_scope,
+                            qualname: String::new()
+                        }.normalize(&self.tcx));
+                    }
+                }
+
+                self.write_sub_paths_truncated(path, false);
+
+                visit::walk_path(self, path);
+            }
+            _ => visit::walk_ty(self, t),
+        }
+    }
+
+    fn visit_expr(&mut self, ex: &ast::Expr) {
+        self.process_macro_use(ex.span, ex.id);
+        match ex.node {
+            ast::ExprKind::Call(ref _f, ref _args) => {
+                // Don't need to do anything for function calls,
+                // because just walking the callee path does what we want.
+                visit::walk_expr(self, ex);
+            }
+            ast::ExprKind::Path(_, ref path) => {
+                self.process_path(ex.id, path, None);
+                visit::walk_expr(self, ex);
+            }
+            ast::ExprKind::Struct(ref path, ref fields, ref base) => {
+                let hir_expr = lower_expr(self.save_ctxt.lcx, ex);
+                let adt = self.tcx.expr_ty(&hir_expr).ty_adt_def().unwrap();
+                let def = self.tcx.resolve_expr(&hir_expr);
+                self.process_struct_lit(ex, path, fields, adt.variant_of_def(def), base)
+            }
+            ast::ExprKind::MethodCall(_, _, ref args) => self.process_method_call(ex, args),
+            ast::ExprKind::Field(ref sub_ex, _) => {
+                self.visit_expr(&sub_ex);
+
+                if let Some(field_data) = self.save_ctxt.get_expr_data(ex) {
+                    down_cast_data!(field_data, VariableRefData, ex.span);
+                    if !self.span.filter_generated(Some(field_data.span), ex.span) {
+                        self.dumper.variable_ref(ex.span, field_data.normalize(&self.tcx));
+                    }
+                }
+            }
+            ast::ExprKind::TupField(ref sub_ex, idx) => {
+                self.visit_expr(&sub_ex);
+
+                let hir_node = lower_expr(self.save_ctxt.lcx, sub_ex);
+                let ty = &self.tcx.expr_ty_adjusted(&hir_node).sty;
+                match *ty {
+                    ty::TyStruct(def, _) => {
+                        let sub_span = self.span.sub_span_after_token(ex.span, token::Dot);
+                        if !self.span.filter_generated(sub_span, ex.span) {
+                            self.dumper.variable_ref(ex.span, VariableRefData {
+                                span: sub_span.expect("No span found for var ref"),
+                                ref_id: def.struct_variant().fields[idx.node].did,
+                                scope: self.cur_scope,
+                                name: String::new()
+                            }.normalize(&self.tcx));
+                        }
+                    }
+                    ty::TyTuple(_) => {}
+                    _ => span_bug!(ex.span,
+                                   "Expected struct or tuple type, found {:?}",
+                                   ty),
+                }
+            }
+            ast::ExprKind::Closure(_, ref decl, ref body) => {
+                let mut id = String::from("$");
+                id.push_str(&ex.id.to_string());
+                self.process_formals(&decl.inputs, &id);
+
+                // walk arg and return types
+                for arg in &decl.inputs {
+                    self.visit_ty(&arg.ty);
+                }
+
+                if let ast::FunctionRetTy::Ty(ref ret_ty) = decl.output {
+                    self.visit_ty(&ret_ty);
+                }
+
+                // walk the body
+                self.nest(ex.id, |v| v.visit_block(&body));
+            }
+            ast::ExprKind::ForLoop(ref pattern, ref subexpression, ref block, _) |
+            ast::ExprKind::WhileLet(ref pattern, ref subexpression, ref block, _) => {
+                let value = self.span.snippet(mk_sp(ex.span.lo, subexpression.span.hi));
+                self.process_var_decl(pattern, value);
+                visit::walk_expr(self, subexpression);
+                visit::walk_block(self, block);
+            }
+            ast::ExprKind::IfLet(ref pattern, ref subexpression, ref block, ref opt_else) => {
+                let value = self.span.snippet(mk_sp(ex.span.lo, subexpression.span.hi));
+                self.process_var_decl(pattern, value);
+                visit::walk_expr(self, subexpression);
+                visit::walk_block(self, block);
+                opt_else.as_ref().map(|el| visit::walk_expr(self, el));
+            }
+            _ => {
+                visit::walk_expr(self, ex)
+            }
+        }
+    }
+
+    fn visit_mac(&mut self, mac: &ast::Mac) {
+        // These shouldn't exist in the AST at this point, log a span bug.
+        span_bug!(mac.span, "macro invocation should have been expanded out of AST");
+    }
+
+    fn visit_pat(&mut self, p: &ast::Pat) {
+        self.process_macro_use(p.span, p.id);
+        self.process_pat(p);
+    }
+
+    fn visit_arm(&mut self, arm: &ast::Arm) {
+        let mut collector = PathCollector::new();
+        for pattern in &arm.pats {
+            // collect paths from the arm's patterns
+            collector.visit_pat(&pattern);
+            self.visit_pat(&pattern);
+        }
+
+        // This is to get around borrow checking, because we need mut self to call process_path.
+        let mut paths_to_process = vec![];
+
+        // process collected paths
+        for &(id, ref p, immut, ref_kind) in &collector.collected_paths {
+            let def_map = self.tcx.def_map.borrow();
+            if !def_map.contains_key(&id) {
+                span_bug!(p.span, "def_map has no key for {} in visit_arm", id);
+            }
+            let def = def_map.get(&id).unwrap().full_def();
+            match def {
+                Def::Local(_, id) => {
+                    let value = if immut == ast::Mutability::Immutable {
+                        self.span.snippet(p.span).to_string()
+                    } else {
+                        "<mutable>".to_string()
+                    };
+
+                    assert!(p.segments.len() == 1,
+                            "qualified path for local variable def in arm");
+                    if !self.span.filter_generated(Some(p.span), p.span) {
+                        self.dumper.variable(p.span, VariableData {
+                            span: p.span,
+                            id: id,
+                            name: path_to_string(p),
+                            qualname: format!("{}${}", path_to_string(p), id),
+                            value: value,
+                            type_value: String::new(),
+                            scope: 0
+                        }.normalize(&self.tcx));
+                    }
+                }
+                Def::Variant(..) | Def::Enum(..) |
+                Def::TyAlias(..) | Def::Struct(..) => {
+                    paths_to_process.push((id, p.clone(), Some(ref_kind)))
+                }
+                // FIXME(nrc) what are these doing here?
+                Def::Static(_, _) |
+                Def::Const(..) |
+                Def::AssociatedConst(..) => {}
+                _ => error!("unexpected definition kind when processing collected paths: {:?}",
+                            def),
+            }
+        }
+
+        for &(id, ref path, ref_kind) in &paths_to_process {
+            self.process_path(id, path, ref_kind);
+        }
+        walk_list!(self, visit_expr, &arm.guard);
+        self.visit_expr(&arm.body);
+    }
+
+    fn visit_stmt(&mut self, s: &ast::Stmt) {
+        let id = s.node.id();
+        self.process_macro_use(s.span, id.unwrap());
+        visit::walk_stmt(self, s)
+    }
+
+    fn visit_local(&mut self, l: &ast::Local) {
+        self.process_macro_use(l.span, l.id);
+        let value = self.span.snippet(l.span);
+        self.process_var_decl(&l.pat, value);
+
+        // Just walk the initialiser and type (don't want to walk the pattern again).
+        walk_list!(self, visit_ty, &l.ty);
+        walk_list!(self, visit_expr, &l.init);
+    }
+}
diff --git a/src/librustc_save_analysis/lib.rs b/src/librustc_save_analysis/lib.rs
new file mode 100644 (file)
index 0000000..4596398
--- /dev/null
@@ -0,0 +1,749 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name = "rustc_save_analysis"]
+#![unstable(feature = "rustc_private", issue = "27812")]
+#![crate_type = "dylib"]
+#![crate_type = "rlib"]
+#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
+      html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
+      html_root_url = "https://doc.rust-lang.org/nightly/")]
+#![cfg_attr(not(stage0), deny(warnings))]
+
+#![feature(custom_attribute)]
+#![allow(unused_attributes)]
+#![feature(rustc_private)]
+#![feature(staged_api)]
+
+#[macro_use] extern crate rustc;
+
+#[macro_use] extern crate log;
+#[macro_use] extern crate syntax;
+
+use rustc::hir::{self, lowering};
+use rustc::hir::map::NodeItem;
+use rustc::hir::def::Def;
+use rustc::hir::def_id::DefId;
+use rustc::session::config::CrateType::CrateTypeExecutable;
+use rustc::ty::{self, TyCtxt};
+
+use std::env;
+use std::fs::{self, File};
+use std::path::{Path, PathBuf};
+
+use syntax::ast::{self, NodeId, PatKind};
+use syntax::codemap::*;
+use syntax::parse::token::{self, keywords};
+use syntax::visit::{self, Visitor};
+use syntax::print::pprust::ty_to_string;
+
+mod csv_dumper;
+mod data;
+mod dump;
+mod dump_visitor;
+#[macro_use]
+pub mod span_utils;
+
+pub use self::csv_dumper::CsvDumper;
+pub use self::data::*;
+pub use self::dump::Dump;
+pub use self::dump_visitor::DumpVisitor;
+use self::span_utils::SpanUtils;
+
+// FIXME this is legacy code and should be removed
+pub mod recorder {
+    pub use self::Row::*;
+
+    #[derive(Copy, Clone, Debug, Eq, PartialEq)]
+    pub enum Row {
+        TypeRef,
+        ModRef,
+        VarRef,
+        FnRef,
+    }
+}
+
+pub struct SaveContext<'l, 'tcx: 'l> {
+    tcx: &'l TyCtxt<'tcx>,
+    lcx: &'l lowering::LoweringContext<'l>,
+    span_utils: SpanUtils<'l>,
+}
+
+macro_rules! option_try(
+    ($e:expr) => (match $e { Some(e) => e, None => return None })
+);
+
+impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> {
+    pub fn new(tcx: &'l TyCtxt<'tcx>,
+               lcx: &'l lowering::LoweringContext<'l>)
+               -> SaveContext<'l, 'tcx> {
+        let span_utils = SpanUtils::new(&tcx.sess);
+        SaveContext::from_span_utils(tcx, lcx, span_utils)
+    }
+
+    pub fn from_span_utils(tcx: &'l TyCtxt<'tcx>,
+                           lcx: &'l lowering::LoweringContext<'l>,
+                           span_utils: SpanUtils<'l>)
+                           -> SaveContext<'l, 'tcx> {
+        SaveContext {
+            tcx: tcx,
+            lcx: lcx,
+            span_utils: span_utils,
+        }
+    }
+
+    // List external crates used by the current crate.
+    pub fn get_external_crates(&self) -> Vec<CrateData> {
+        let mut result = Vec::new();
+
+        for n in self.tcx.sess.cstore.crates() {
+            result.push(CrateData {
+                name: (&self.tcx.sess.cstore.crate_name(n)[..]).to_owned(),
+                number: n,
+            });
+        }
+
+        result
+    }
+
+    pub fn get_item_data(&self, item: &ast::Item) -> Option<Data> {
+        match item.node {
+            ast::ItemKind::Fn(..) => {
+                let name = self.tcx.node_path_str(item.id);
+                let qualname = format!("::{}", name);
+                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Fn);
+                filter!(self.span_utils, sub_span, item.span, None);
+                Some(Data::FunctionData(FunctionData {
+                    id: item.id,
+                    name: name,
+                    qualname: qualname,
+                    declaration: None,
+                    span: sub_span.unwrap(),
+                    scope: self.enclosing_scope(item.id),
+                }))
+            }
+            ast::ItemKind::Static(ref typ, mt, ref expr) => {
+                let qualname = format!("::{}", self.tcx.node_path_str(item.id));
+
+                // If the variable is immutable, save the initialising expression.
+                let (value, keyword) = match mt {
+                    ast::Mutability::Mutable => (String::from("<mutable>"), keywords::Mut),
+                    ast::Mutability::Immutable => {
+                        (self.span_utils.snippet(expr.span), keywords::Static)
+                    },
+                };
+
+                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keyword);
+                filter!(self.span_utils, sub_span, item.span, None);
+                Some(Data::VariableData(VariableData {
+                    id: item.id,
+                    name: item.ident.to_string(),
+                    qualname: qualname,
+                    span: sub_span.unwrap(),
+                    scope: self.enclosing_scope(item.id),
+                    value: value,
+                    type_value: ty_to_string(&typ),
+                }))
+            }
+            ast::ItemKind::Const(ref typ, ref expr) => {
+                let qualname = format!("::{}", self.tcx.node_path_str(item.id));
+                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Const);
+                filter!(self.span_utils, sub_span, item.span, None);
+                Some(Data::VariableData(VariableData {
+                    id: item.id,
+                    name: item.ident.to_string(),
+                    qualname: qualname,
+                    span: sub_span.unwrap(),
+                    scope: self.enclosing_scope(item.id),
+                    value: self.span_utils.snippet(expr.span),
+                    type_value: ty_to_string(&typ),
+                }))
+            }
+            ast::ItemKind::Mod(ref m) => {
+                let qualname = format!("::{}", self.tcx.node_path_str(item.id));
+
+                let cm = self.tcx.sess.codemap();
+                let filename = cm.span_to_filename(m.inner);
+
+                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Mod);
+                filter!(self.span_utils, sub_span, item.span, None);
+                Some(Data::ModData(ModData {
+                    id: item.id,
+                    name: item.ident.to_string(),
+                    qualname: qualname,
+                    span: sub_span.unwrap(),
+                    scope: self.enclosing_scope(item.id),
+                    filename: filename,
+                }))
+            }
+            ast::ItemKind::Enum(..) => {
+                let enum_name = format!("::{}", self.tcx.node_path_str(item.id));
+                let val = self.span_utils.snippet(item.span);
+                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Enum);
+                filter!(self.span_utils, sub_span, item.span, None);
+                Some(Data::EnumData(EnumData {
+                    id: item.id,
+                    value: val,
+                    span: sub_span.unwrap(),
+                    qualname: enum_name,
+                    scope: self.enclosing_scope(item.id),
+                }))
+            }
+            ast::ItemKind::Impl(_, _, _, ref trait_ref, ref typ, _) => {
+                let mut type_data = None;
+                let sub_span;
+
+                let parent = self.enclosing_scope(item.id);
+
+                match typ.node {
+                    // Common case impl for a struct or something basic.
+                    ast::TyKind::Path(None, ref path) => {
+                        sub_span = self.span_utils.sub_span_for_type_name(path.span);
+                        filter!(self.span_utils, sub_span, path.span, None);
+                        type_data = self.lookup_ref_id(typ.id).map(|id| {
+                            TypeRefData {
+                                span: sub_span.unwrap(),
+                                scope: parent,
+                                ref_id: Some(id),
+                                qualname: String::new() // FIXME: generate the real qualname
+                            }
+                        });
+                    }
+                    _ => {
+                        // Less useful case, impl for a compound type.
+                        let span = typ.span;
+                        sub_span = self.span_utils.sub_span_for_type_name(span).or(Some(span));
+                    }
+                }
+
+                let trait_data = trait_ref.as_ref()
+                                          .and_then(|tr| self.get_trait_ref_data(tr, parent));
+
+                filter!(self.span_utils, sub_span, typ.span, None);
+                Some(Data::ImplData(ImplData2 {
+                    id: item.id,
+                    span: sub_span.unwrap(),
+                    scope: parent,
+                    trait_ref: trait_data,
+                    self_ref: type_data,
+                }))
+            }
+            _ => {
+                // FIXME
+                bug!();
+            }
+        }
+    }
+
+    pub fn get_field_data(&self, field: &ast::StructField,
+                          scope: NodeId) -> Option<VariableData> {
+        if let Some(ident) = field.ident {
+            let qualname = format!("::{}::{}", self.tcx.node_path_str(scope), ident);
+            let typ = self.tcx.node_types().get(&field.id).unwrap().to_string();
+            let sub_span = self.span_utils.sub_span_before_token(field.span, token::Colon);
+            filter!(self.span_utils, sub_span, field.span, None);
+            Some(VariableData {
+                id: field.id,
+                name: ident.to_string(),
+                qualname: qualname,
+                span: sub_span.unwrap(),
+                scope: scope,
+                value: "".to_owned(),
+                type_value: typ,
+            })
+        } else {
+            None
+        }
+    }
+
+    // FIXME would be nice to take a MethodItem here, but the ast provides both
+    // trait and impl flavours, so the caller must do the disassembly.
+    pub fn get_method_data(&self, id: ast::NodeId,
+                           name: ast::Name, span: Span) -> Option<FunctionData> {
+        // The qualname for a method is the trait name or name of the struct in an impl in
+        // which the method is declared in, followed by the method's name.
+        let qualname = match self.tcx.impl_of_method(self.tcx.map.local_def_id(id)) {
+            Some(impl_id) => match self.tcx.map.get_if_local(impl_id) {
+                Some(NodeItem(item)) => {
+                    match item.node {
+                        hir::ItemImpl(_, _, _, _, ref ty, _) => {
+                            let mut result = String::from("<");
+                            result.push_str(&rustc::hir::print::ty_to_string(&ty));
+
+                            match self.tcx.trait_of_item(self.tcx.map.local_def_id(id)) {
+                                Some(def_id) => {
+                                    result.push_str(" as ");
+                                    result.push_str(&self.tcx.item_path_str(def_id));
+                                }
+                                None => {}
+                            }
+                            result.push_str(">");
+                            result
+                        }
+                        _ => {
+                            span_bug!(span,
+                                      "Container {:?} for method {} not an impl?",
+                                      impl_id,
+                                      id);
+                        }
+                    }
+                }
+                r => {
+                    span_bug!(span,
+                              "Container {:?} for method {} is not a node item {:?}",
+                              impl_id,
+                              id,
+                              r);
+                }
+            },
+            None => match self.tcx.trait_of_item(self.tcx.map.local_def_id(id)) {
+                Some(def_id) => {
+                    match self.tcx.map.get_if_local(def_id) {
+                        Some(NodeItem(_)) => {
+                            format!("::{}", self.tcx.item_path_str(def_id))
+                        }
+                        r => {
+                            span_bug!(span,
+                                      "Could not find container {:?} for \
+                                       method {}, got {:?}",
+                                      def_id,
+                                      id,
+                                      r);
+                        }
+                    }
+                }
+                None => {
+                    span_bug!(span, "Could not find container for method {}", id);
+                }
+            },
+        };
+
+        let qualname = format!("{}::{}", qualname, name);
+
+        let def_id = self.tcx.map.local_def_id(id);
+        let decl_id = self.tcx.trait_item_of_item(def_id).and_then(|new_id| {
+            let new_def_id = new_id.def_id();
+            if new_def_id != def_id {
+                Some(new_def_id)
+            } else {
+                None
+            }
+        });
+
+        let sub_span = self.span_utils.sub_span_after_keyword(span, keywords::Fn);
+        filter!(self.span_utils, sub_span, span, None);
+        Some(FunctionData {
+            id: id,
+            name: name.to_string(),
+            qualname: qualname,
+            declaration: decl_id,
+            span: sub_span.unwrap(),
+            scope: self.enclosing_scope(id),
+        })
+    }
+
+    pub fn get_trait_ref_data(&self,
+                              trait_ref: &ast::TraitRef,
+                              parent: NodeId)
+                              -> Option<TypeRefData> {
+        self.lookup_ref_id(trait_ref.ref_id).and_then(|def_id| {
+            let span = trait_ref.path.span;
+            let sub_span = self.span_utils.sub_span_for_type_name(span).or(Some(span));
+            filter!(self.span_utils, sub_span, span, None);
+            Some(TypeRefData {
+                span: sub_span.unwrap(),
+                scope: parent,
+                ref_id: Some(def_id),
+                qualname: String::new() // FIXME: generate the real qualname
+            })
+        })
+    }
+
+    pub fn get_expr_data(&self, expr: &ast::Expr) -> Option<Data> {
+        let hir_node = lowering::lower_expr(self.lcx, expr);
+        let ty = self.tcx.expr_ty_adjusted_opt(&hir_node);
+        if ty.is_none() || ty.unwrap().sty == ty::TyError {
+            return None;
+        }
+        match expr.node {
+            ast::ExprKind::Field(ref sub_ex, ident) => {
+                let hir_node = lowering::lower_expr(self.lcx, sub_ex);
+                match self.tcx.expr_ty_adjusted(&hir_node).sty {
+                    ty::TyStruct(def, _) => {
+                        let f = def.struct_variant().field_named(ident.node.name);
+                        let sub_span = self.span_utils.span_for_last_ident(expr.span);
+                        filter!(self.span_utils, sub_span, expr.span, None);
+                        return Some(Data::VariableRefData(VariableRefData {
+                            name: ident.node.to_string(),
+                            span: sub_span.unwrap(),
+                            scope: self.enclosing_scope(expr.id),
+                            ref_id: f.did,
+                        }));
+                    }
+                    _ => {
+                        debug!("Expected struct type, found {:?}", ty);
+                        None
+                    }
+                }
+            }
+            ast::ExprKind::Struct(ref path, _, _) => {
+                let hir_node = lowering::lower_expr(self.lcx, expr);
+                match self.tcx.expr_ty_adjusted(&hir_node).sty {
+                    ty::TyStruct(def, _) => {
+                        let sub_span = self.span_utils.span_for_last_ident(path.span);
+                        filter!(self.span_utils, sub_span, path.span, None);
+                        Some(Data::TypeRefData(TypeRefData {
+                            span: sub_span.unwrap(),
+                            scope: self.enclosing_scope(expr.id),
+                            ref_id: Some(def.did),
+                            qualname: String::new() // FIXME: generate the real qualname
+                        }))
+                    }
+                    _ => {
+                        // FIXME ty could legitimately be a TyEnum, but then we will fail
+                        // later if we try to look up the fields.
+                        debug!("expected TyStruct, found {:?}", ty);
+                        None
+                    }
+                }
+            }
+            ast::ExprKind::MethodCall(..) => {
+                let method_call = ty::MethodCall::expr(expr.id);
+                let method_id = self.tcx.tables.borrow().method_map[&method_call].def_id;
+                let (def_id, decl_id) = match self.tcx.impl_or_trait_item(method_id).container() {
+                    ty::ImplContainer(_) => (Some(method_id), None),
+                    ty::TraitContainer(_) => (None, Some(method_id)),
+                };
+                let sub_span = self.span_utils.sub_span_for_meth_name(expr.span);
+                filter!(self.span_utils, sub_span, expr.span, None);
+                let parent = self.enclosing_scope(expr.id);
+                Some(Data::MethodCallData(MethodCallData {
+                    span: sub_span.unwrap(),
+                    scope: parent,
+                    ref_id: def_id,
+                    decl_id: decl_id,
+                }))
+            }
+            ast::ExprKind::Path(_, ref path) => {
+                self.get_path_data(expr.id, path)
+            }
+            _ => {
+                // FIXME
+                bug!();
+            }
+        }
+    }
+
+    pub fn get_path_data(&self, id: NodeId, path: &ast::Path) -> Option<Data> {
+        let def_map = self.tcx.def_map.borrow();
+        if !def_map.contains_key(&id) {
+            span_bug!(path.span, "def_map has no key for {} in visit_expr", id);
+        }
+        let def = def_map.get(&id).unwrap().full_def();
+        let sub_span = self.span_utils.span_for_last_ident(path.span);
+        filter!(self.span_utils, sub_span, path.span, None);
+        match def {
+            Def::Upvar(..) |
+            Def::Local(..) |
+            Def::Static(..) |
+            Def::Const(..) |
+            Def::AssociatedConst(..) |
+            Def::Variant(..) => {
+                Some(Data::VariableRefData(VariableRefData {
+                    name: self.span_utils.snippet(sub_span.unwrap()),
+                    span: sub_span.unwrap(),
+                    scope: self.enclosing_scope(id),
+                    ref_id: def.def_id(),
+                }))
+            }
+            Def::Struct(def_id) |
+            Def::Enum(def_id) |
+            Def::TyAlias(def_id) |
+            Def::Trait(def_id) |
+            Def::TyParam(_, _, def_id, _) => {
+                Some(Data::TypeRefData(TypeRefData {
+                    span: sub_span.unwrap(),
+                    ref_id: Some(def_id),
+                    scope: self.enclosing_scope(id),
+                    qualname: String::new() // FIXME: generate the real qualname
+                }))
+            }
+            Def::Method(decl_id) => {
+                let sub_span = self.span_utils.sub_span_for_meth_name(path.span);
+                filter!(self.span_utils, sub_span, path.span, None);
+                let def_id = if decl_id.is_local() {
+                    let ti = self.tcx.impl_or_trait_item(decl_id);
+                    match ti.container() {
+                        ty::TraitContainer(def_id) => {
+                            self.tcx
+                                .trait_items(def_id)
+                                .iter()
+                                .find(|mr| mr.name() == ti.name() && self.trait_method_has_body(mr))
+                                .map(|mr| mr.def_id())
+                        }
+                        ty::ImplContainer(def_id) => {
+                            let impl_items = self.tcx.impl_items.borrow();
+                            Some(impl_items.get(&def_id)
+                                           .unwrap()
+                                           .iter()
+                                           .find(|mr| {
+                                               self.tcx.impl_or_trait_item(mr.def_id()).name() ==
+                                               ti.name()
+                                           })
+                                           .unwrap()
+                                           .def_id())
+                        }
+                    }
+                } else {
+                    None
+                };
+                Some(Data::MethodCallData(MethodCallData {
+                    span: sub_span.unwrap(),
+                    scope: self.enclosing_scope(id),
+                    ref_id: def_id,
+                    decl_id: Some(decl_id),
+                }))
+            }
+            Def::Fn(def_id) => {
+                Some(Data::FunctionCallData(FunctionCallData {
+                    ref_id: def_id,
+                    span: sub_span.unwrap(),
+                    scope: self.enclosing_scope(id),
+                }))
+            }
+            Def::Mod(def_id) => {
+                Some(Data::ModRefData(ModRefData {
+                    ref_id: Some(def_id),
+                    span: sub_span.unwrap(),
+                    scope: self.enclosing_scope(id),
+                    qualname: String::new() // FIXME: generate the real qualname
+                }))
+            }
+            _ => None,
+        }
+    }
+
+    fn trait_method_has_body(&self, mr: &ty::ImplOrTraitItem) -> bool {
+        let def_id = mr.def_id();
+        if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) {
+            let trait_item = self.tcx.map.expect_trait_item(node_id);
+            if let hir::TraitItem_::MethodTraitItem(_, Some(_)) = trait_item.node {
+                true
+            } else {
+                false
+            }
+        } else {
+            false
+        }
+    }
+
+    pub fn get_field_ref_data(&self,
+                              field_ref: &ast::Field,
+                              variant: ty::VariantDef,
+                              parent: NodeId)
+                              -> Option<VariableRefData> {
+        let f = variant.field_named(field_ref.ident.node.name);
+        // We don't really need a sub-span here, but no harm done
+        let sub_span = self.span_utils.span_for_last_ident(field_ref.ident.span);
+        filter!(self.span_utils, sub_span, field_ref.ident.span, None);
+        Some(VariableRefData {
+            name: field_ref.ident.node.to_string(),
+            span: sub_span.unwrap(),
+            scope: parent,
+            ref_id: f.did,
+        })
+    }
+
+    /// Attempt to return MacroUseData for any AST node.
+    ///
+    /// For a given piece of AST defined by the supplied Span and NodeId,
+    /// returns None if the node is not macro-generated or the span is malformed,
+    /// else uses the expansion callsite and callee to return some MacroUseData.
+    pub fn get_macro_use_data(&self, span: Span, id: NodeId) -> Option<MacroUseData> {
+        if !generated_code(span) {
+            return None;
+        }
+        // Note we take care to use the source callsite/callee, to handle
+        // nested expansions and ensure we only generate data for source-visible
+        // macro uses.
+        let callsite = self.tcx.sess.codemap().source_callsite(span);
+        let callee = self.tcx.sess.codemap().source_callee(span);
+        let callee = option_try!(callee);
+        let callee_span = option_try!(callee.span);
+
+        // Ignore attribute macros, their spans are usually mangled
+        if let MacroAttribute(_) = callee.format {
+            return None;
+        }
+
+        // If the callee is an imported macro from an external crate, need to get
+        // the source span and name from the session, as their spans are localized
+        // when read in, and no longer correspond to the source.
+        if let Some(mac) = self.tcx.sess.imported_macro_spans.borrow().get(&callee_span) {
+            let &(ref mac_name, mac_span) = mac;
+            return Some(MacroUseData {
+                                        span: callsite,
+                                        name: mac_name.clone(),
+                                        callee_span: mac_span,
+                                        scope: self.enclosing_scope(id),
+                                        imported: true,
+                                        qualname: String::new()// FIXME: generate the real qualname
+                                    });
+        }
+
+        Some(MacroUseData {
+            span: callsite,
+            name: callee.name().to_string(),
+            callee_span: callee_span,
+            scope: self.enclosing_scope(id),
+            imported: false,
+            qualname: String::new() // FIXME: generate the real qualname
+        })
+    }
+
+    pub fn get_data_for_id(&self, _id: &NodeId) -> Data {
+        // FIXME
+        bug!();
+    }
+
+    fn lookup_ref_id(&self, ref_id: NodeId) -> Option<DefId> {
+        if !self.tcx.def_map.borrow().contains_key(&ref_id) {
+            bug!("def_map has no key for {} in lookup_type_ref", ref_id);
+        }
+        let def = self.tcx.def_map.borrow().get(&ref_id).unwrap().full_def();
+        match def {
+            Def::PrimTy(_) | Def::SelfTy(..) => None,
+            _ => Some(def.def_id()),
+        }
+    }
+
+    #[inline]
+    pub fn enclosing_scope(&self, id: NodeId) -> NodeId {
+        self.tcx.map.get_enclosing_scope(id).unwrap_or(0)
+    }
+}
+
+// An AST visitor for collecting paths from patterns.
+struct PathCollector {
+    // The Row field identifies the kind of pattern.
+    collected_paths: Vec<(NodeId, ast::Path, ast::Mutability, recorder::Row)>,
+}
+
+impl PathCollector {
+    fn new() -> PathCollector {
+        PathCollector { collected_paths: vec![] }
+    }
+}
+
+impl<'v> Visitor<'v> for PathCollector {
+    fn visit_pat(&mut self, p: &ast::Pat) {
+        match p.node {
+            PatKind::Struct(ref path, _, _) => {
+                self.collected_paths.push((p.id, path.clone(),
+                                           ast::Mutability::Mutable, recorder::TypeRef));
+            }
+            PatKind::TupleStruct(ref path, _) |
+            PatKind::Path(ref path) |
+            PatKind::QPath(_, ref path) => {
+                self.collected_paths.push((p.id, path.clone(),
+                                           ast::Mutability::Mutable, recorder::VarRef));
+            }
+            PatKind::Ident(bm, ref path1, _) => {
+                debug!("PathCollector, visit ident in pat {}: {:?} {:?}",
+                       path1.node,
+                       p.span,
+                       path1.span);
+                let immut = match bm {
+                    // Even if the ref is mut, you can't change the ref, only
+                    // the data pointed at, so showing the initialising expression
+                    // is still worthwhile.
+                    ast::BindingMode::ByRef(_) => ast::Mutability::Immutable,
+                    ast::BindingMode::ByValue(mt) => mt,
+                };
+                // collect path for either visit_local or visit_arm
+                let path = ast::Path::from_ident(path1.span, path1.node);
+                self.collected_paths.push((p.id, path, immut, recorder::VarRef));
+            }
+            _ => {}
+        }
+        visit::walk_pat(self, p);
+    }
+}
+
+pub fn process_crate<'l, 'tcx>(tcx: &'l TyCtxt<'tcx>,
+                               lcx: &'l lowering::LoweringContext<'l>,
+                               krate: &ast::Crate,
+                               analysis: &ty::CrateAnalysis,
+                               cratename: &str,
+                               odir: Option<&Path>) {
+    let _ignore = tcx.dep_graph.in_ignore();
+
+    assert!(analysis.glob_map.is_some());
+
+    info!("Dumping crate {}", cratename);
+
+    // find a path to dump our data to
+    let mut root_path = match env::var_os("DXR_RUST_TEMP_FOLDER") {
+        Some(val) => PathBuf::from(val),
+        None => match odir {
+            Some(val) => val.join("dxr"),
+            None => PathBuf::from("dxr-temp"),
+        },
+    };
+
+    if let Err(e) = fs::create_dir_all(&root_path) {
+        tcx.sess.err(&format!("Could not create directory {}: {}",
+                              root_path.display(),
+                              e));
+    }
+
+    {
+        let disp = root_path.display();
+        info!("Writing output to {}", disp);
+    }
+
+    // Create output file.
+    let executable = tcx.sess.crate_types.borrow().iter().any(|ct| *ct == CrateTypeExecutable);
+    let mut out_name = if executable {
+        "".to_owned()
+    } else {
+        "lib".to_owned()
+    };
+    out_name.push_str(&cratename);
+    out_name.push_str(&tcx.sess.opts.cg.extra_filename);
+    out_name.push_str(".csv");
+    root_path.push(&out_name);
+    let mut output_file = File::create(&root_path).unwrap_or_else(|e| {
+        let disp = root_path.display();
+        tcx.sess.fatal(&format!("Could not open {}: {}", disp, e));
+    });
+    root_path.pop();
+
+    let utils = SpanUtils::new(&tcx.sess);
+    let mut dumper = CsvDumper::new(&mut output_file, utils);
+    let mut visitor = DumpVisitor::new(tcx, lcx, analysis, &mut dumper);
+    // FIXME: we don't write anything!
+
+    visitor.dump_crate_info(cratename, krate);
+    visit::walk_crate(&mut visitor, krate);
+}
+
+// Utility functions for the module.
+
+// Helper function to escape quotes in a string
+fn escape(s: String) -> String {
+    s.replace("\"", "\"\"")
+}
+
+// Helper function to determine if a span came from a
+// macro expansion or syntax extension.
+pub fn generated_code(span: Span) -> bool {
+    span.expn_id != NO_EXPANSION || span == DUMMY_SP
+}
diff --git a/src/librustc_save_analysis/span_utils.rs b/src/librustc_save_analysis/span_utils.rs
new file mode 100644 (file)
index 0000000..f410d42
--- /dev/null
@@ -0,0 +1,461 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::session::Session;
+
+use generated_code;
+
+use std::cell::Cell;
+use std::env;
+use std::path::Path;
+
+use syntax::ast;
+use syntax::codemap::*;
+use syntax::parse::lexer;
+use syntax::parse::lexer::{Reader, StringReader};
+use syntax::parse::token;
+use syntax::parse::token::{keywords, Token};
+
+#[derive(Clone)]
+pub struct SpanUtils<'a> {
+    pub sess: &'a Session,
+    pub err_count: Cell<isize>,
+}
+
+impl<'a> SpanUtils<'a> {
+    pub fn new(sess: &'a Session) -> SpanUtils<'a> {
+        SpanUtils {
+            sess: sess,
+            err_count: Cell::new(0),
+        }
+    }
+
+    pub fn make_path_string(file_name: &str) -> String {
+        let path = Path::new(file_name);
+        if path.is_absolute() {
+            path.clone().display().to_string()
+        } else {
+            env::current_dir().unwrap().join(&path).display().to_string()
+        }
+    }
+
+    // Standard string for extents/location.
+    #[rustfmt_skip]
+    pub fn extent_str(&self, span: Span) -> String {
+        let lo_loc = self.sess.codemap().lookup_char_pos(span.lo);
+        let hi_loc = self.sess.codemap().lookup_char_pos(span.hi);
+        let lo_pos = self.sess.codemap().bytepos_to_file_charpos(span.lo);
+        let hi_pos = self.sess.codemap().bytepos_to_file_charpos(span.hi);
+        let lo_pos_byte = self.sess.codemap().lookup_byte_offset(span.lo).pos;
+        let hi_pos_byte = self.sess.codemap().lookup_byte_offset(span.hi).pos;
+
+        format!("file_name,\"{}\",file_line,{},file_col,{},extent_start,{},extent_start_bytes,{},\
+                 file_line_end,{},file_col_end,{},extent_end,{},extent_end_bytes,{}",
+                SpanUtils::make_path_string(&lo_loc.file.name),
+                lo_loc.line, lo_loc.col.to_usize(), lo_pos.to_usize(), lo_pos_byte.to_usize(),
+                hi_loc.line, hi_loc.col.to_usize(), hi_pos.to_usize(), hi_pos_byte.to_usize())
+    }
+
+    // sub_span starts at span.lo, so we need to adjust the positions etc.
+    // If sub_span is None, we don't need to adjust.
+    pub fn make_sub_span(&self, span: Span, sub_span: Option<Span>) -> Option<Span> {
+        match sub_span {
+            None => None,
+            Some(sub) => {
+                let FileMapAndBytePos {fm, pos} = self.sess.codemap().lookup_byte_offset(span.lo);
+                let base = pos + fm.start_pos;
+                Some(Span {
+                    lo: base + self.sess.codemap().lookup_byte_offset(sub.lo).pos,
+                    hi: base + self.sess.codemap().lookup_byte_offset(sub.hi).pos,
+                    expn_id: span.expn_id,
+                })
+            }
+        }
+    }
+
+    pub fn snippet(&self, span: Span) -> String {
+        match self.sess.codemap().span_to_snippet(span) {
+            Ok(s) => s,
+            Err(_) => String::new(),
+        }
+    }
+
+    pub fn retokenise_span(&self, span: Span) -> StringReader<'a> {
+        // sadness - we don't have spans for sub-expressions nor access to the tokens
+        // so in order to get extents for the function name itself (which dxr expects)
+        // we need to re-tokenise the fn definition
+
+        // Note: this is a bit awful - it adds the contents of span to the end of
+        // the codemap as a new filemap. This is mostly OK, but means we should
+        // not iterate over the codemap. Also, any spans over the new filemap
+        // are incompatible with spans over other filemaps.
+        let filemap = self.sess
+                          .codemap()
+                          .new_filemap(String::from("<anon-dxr>"), self.snippet(span));
+        let s = self.sess;
+        lexer::StringReader::new(s.diagnostic(), filemap)
+    }
+
+    // Re-parses a path and returns the span for the last identifier in the path
+    pub fn span_for_last_ident(&self, span: Span) -> Option<Span> {
+        let mut result = None;
+
+        let mut toks = self.retokenise_span(span);
+        let mut bracket_count = 0;
+        loop {
+            let ts = toks.real_token();
+            if ts.tok == token::Eof {
+                return self.make_sub_span(span, result)
+            }
+            if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) {
+                result = Some(ts.sp);
+            }
+
+            bracket_count += match ts.tok {
+                token::Lt => 1,
+                token::Gt => -1,
+                token::BinOp(token::Shr) => -2,
+                _ => 0,
+            }
+        }
+    }
+
+    // Return the span for the first identifier in the path.
+    pub fn span_for_first_ident(&self, span: Span) -> Option<Span> {
+        let mut toks = self.retokenise_span(span);
+        let mut bracket_count = 0;
+        loop {
+            let ts = toks.real_token();
+            if ts.tok == token::Eof {
+                return None;
+            }
+            if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) {
+                return self.make_sub_span(span, Some(ts.sp));
+            }
+
+            bracket_count += match ts.tok {
+                token::Lt => 1,
+                token::Gt => -1,
+                token::BinOp(token::Shr) => -2,
+                _ => 0,
+            }
+        }
+    }
+
+    // Return the span for the last ident before a `(` or `<` or '::<' and outside any
+    // any brackets, or the last span.
+    pub fn sub_span_for_meth_name(&self, span: Span) -> Option<Span> {
+        let mut toks = self.retokenise_span(span);
+        let mut prev = toks.real_token();
+        let mut result = None;
+        let mut bracket_count = 0;
+        let mut last_span = None;
+        while prev.tok != token::Eof {
+            last_span = None;
+            let mut next = toks.real_token();
+
+            if (next.tok == token::OpenDelim(token::Paren) || next.tok == token::Lt) &&
+               bracket_count == 0 && prev.tok.is_ident() {
+                result = Some(prev.sp);
+            }
+
+            if bracket_count == 0 && next.tok == token::ModSep {
+                let old = prev;
+                prev = next;
+                next = toks.real_token();
+                if next.tok == token::Lt && old.tok.is_ident() {
+                    result = Some(old.sp);
+                }
+            }
+
+            bracket_count += match prev.tok {
+                token::OpenDelim(token::Paren) | token::Lt => 1,
+                token::CloseDelim(token::Paren) | token::Gt => -1,
+                token::BinOp(token::Shr) => -2,
+                _ => 0,
+            };
+
+            if prev.tok.is_ident() && bracket_count == 0 {
+                last_span = Some(prev.sp);
+            }
+            prev = next;
+        }
+        if result.is_none() && last_span.is_some() {
+            return self.make_sub_span(span, last_span);
+        }
+        return self.make_sub_span(span, result);
+    }
+
+    // Return the span for the last ident before a `<` and outside any
+    // brackets, or the last span.
+    pub fn sub_span_for_type_name(&self, span: Span) -> Option<Span> {
+        let mut toks = self.retokenise_span(span);
+        let mut prev = toks.real_token();
+        let mut result = None;
+        let mut bracket_count = 0;
+        loop {
+            let next = toks.real_token();
+
+            if (next.tok == token::Lt || next.tok == token::Colon) && bracket_count == 0 &&
+               prev.tok.is_ident() {
+                result = Some(prev.sp);
+            }
+
+            bracket_count += match prev.tok {
+                token::Lt => 1,
+                token::Gt => -1,
+                token::BinOp(token::Shl) => 2,
+                token::BinOp(token::Shr) => -2,
+                _ => 0,
+            };
+
+            if next.tok == token::Eof {
+                break;
+            }
+            prev = next;
+        }
+        if bracket_count != 0 {
+            let loc = self.sess.codemap().lookup_char_pos(span.lo);
+            span_bug!(span,
+                      "Mis-counted brackets when breaking path? Parsing '{}' \
+                       in {}, line {}",
+                      self.snippet(span),
+                      loc.file.name,
+                      loc.line);
+        }
+        if result.is_none() && prev.tok.is_ident() && bracket_count == 0 {
+            return self.make_sub_span(span, Some(prev.sp));
+        }
+        self.make_sub_span(span, result)
+    }
+
+    // Reparse span and return an owned vector of sub spans of the first limit
+    // identifier tokens in the given nesting level.
+    // example with Foo<Bar<T,V>, Bar<T,V>>
+    // Nesting = 0: all idents outside of brackets: [Foo]
+    // Nesting = 1: idents within one level of brackets: [Bar, Bar]
+    pub fn spans_with_brackets(&self, span: Span, nesting: isize, limit: isize) -> Vec<Span> {
+        let mut result: Vec<Span> = vec!();
+
+        let mut toks = self.retokenise_span(span);
+        // We keep track of how many brackets we're nested in
+        let mut bracket_count: isize = 0;
+        let mut found_ufcs_sep = false;
+        loop {
+            let ts = toks.real_token();
+            if ts.tok == token::Eof {
+                if bracket_count != 0 {
+                    if generated_code(span) {
+                        return vec!();
+                    }
+                    let loc = self.sess.codemap().lookup_char_pos(span.lo);
+                    span_bug!(span,
+                              "Mis-counted brackets when breaking path? \
+                               Parsing '{}' in {}, line {}",
+                              self.snippet(span),
+                              loc.file.name,
+                              loc.line);
+                }
+                return result
+            }
+            if (result.len() as isize) == limit {
+                return result;
+            }
+            bracket_count += match ts.tok {
+                token::Lt => 1,
+                token::Gt => -1,
+                token::BinOp(token::Shl) => 2,
+                token::BinOp(token::Shr) => -2,
+                _ => 0,
+            };
+
+            // Ignore the `>::` in `<Type as Trait>::AssocTy`.
+
+            // The root cause of this hack is that the AST representation of
+            // qpaths is horrible. It treats <A as B>::C as a path with two
+            // segments, B and C and notes that there is also a self type A at
+            // position 0. Because we don't have spans for individual idents,
+            // only the whole path, we have to iterate over the tokens in the
+            // path, trying to pull out the non-nested idents (e.g., avoiding 'a
+            // in `<A as B<'a>>::C`). So we end up with a span for `B>::C` from
+            // the start of the first ident to the end of the path.
+            if !found_ufcs_sep && bracket_count == -1 {
+                found_ufcs_sep = true;
+                bracket_count += 1;
+            }
+            if ts.tok.is_ident() && bracket_count == nesting {
+                result.push(self.make_sub_span(span, Some(ts.sp)).unwrap());
+            }
+        }
+    }
+
+    pub fn sub_span_before_token(&self, span: Span, tok: Token) -> Option<Span> {
+        let mut toks = self.retokenise_span(span);
+        let mut prev = toks.real_token();
+        loop {
+            if prev.tok == token::Eof {
+                return None;
+            }
+            let next = toks.real_token();
+            if next.tok == tok {
+                return self.make_sub_span(span, Some(prev.sp));
+            }
+            prev = next;
+        }
+    }
+
+    pub fn sub_span_of_token(&self, span: Span, tok: Token) -> Option<Span> {
+        let mut toks = self.retokenise_span(span);
+        loop {
+            let next = toks.real_token();
+            if next.tok == token::Eof {
+                return None;
+            }
+            if next.tok == tok {
+                return self.make_sub_span(span, Some(next.sp));
+            }
+        }
+    }
+
+    pub fn sub_span_after_keyword(&self, span: Span, keyword: keywords::Keyword) -> Option<Span> {
+        self.sub_span_after(span, |t| t.is_keyword(keyword))
+    }
+
+    pub fn sub_span_after_token(&self, span: Span, tok: Token) -> Option<Span> {
+        self.sub_span_after(span, |t| t == tok)
+    }
+
+    fn sub_span_after<F: Fn(Token) -> bool>(&self, span: Span, f: F) -> Option<Span> {
+        let mut toks = self.retokenise_span(span);
+        loop {
+            let ts = toks.real_token();
+            if ts.tok == token::Eof {
+                return None;
+            }
+            if f(ts.tok) {
+                let ts = toks.real_token();
+                if ts.tok == token::Eof {
+                    return None
+                } else {
+                    return self.make_sub_span(span, Some(ts.sp));
+                }
+            }
+        }
+    }
+
+
+    // Returns a list of the spans of idents in a path.
+    // E.g., For foo::bar<x,t>::baz, we return [foo, bar, baz] (well, their spans)
+    pub fn spans_for_path_segments(&self, path: &ast::Path) -> Vec<Span> {
+        self.spans_with_brackets(path.span, 0, -1)
+    }
+
+    // Return an owned vector of the subspans of the param identifier
+    // tokens found in span.
+    pub fn spans_for_ty_params(&self, span: Span, number: isize) -> Vec<Span> {
+        // Type params are nested within one level of brackets:
+        // i.e. we want Vec<A, B> from Foo<A, B<T,U>>
+        self.spans_with_brackets(span, 1, number)
+    }
+
+    pub fn report_span_err(&self, kind: &str, span: Span) {
+        let loc = self.sess.codemap().lookup_char_pos(span.lo);
+        info!("({}) Could not find sub_span in `{}` in {}, line {}",
+              kind,
+              self.snippet(span),
+              loc.file.name,
+              loc.line);
+        self.err_count.set(self.err_count.get() + 1);
+        if self.err_count.get() > 1000 {
+            bug!("span errors reached 1000, giving up");
+        }
+    }
+
+    // Return the name for a macro definition (identifier after first `!`)
+    pub fn span_for_macro_def_name(&self, span: Span) -> Option<Span> {
+        let mut toks = self.retokenise_span(span);
+        loop {
+            let ts = toks.real_token();
+            if ts.tok == token::Eof {
+                return None;
+            }
+            if ts.tok == token::Not {
+                let ts = toks.real_token();
+                if ts.tok.is_ident() {
+                    return self.make_sub_span(span, Some(ts.sp));
+                } else {
+                    return None;
+                }
+            }
+        }
+    }
+
+    // Return the name for a macro use (identifier before first `!`).
+    pub fn span_for_macro_use_name(&self, span:Span) -> Option<Span> {
+        let mut toks = self.retokenise_span(span);
+        let mut prev = toks.real_token();
+        loop {
+            if prev.tok == token::Eof {
+                return None;
+            }
+            let ts = toks.real_token();
+            if ts.tok == token::Not {
+                if prev.tok.is_ident() {
+                    return self.make_sub_span(span, Some(prev.sp));
+                } else {
+                    return None;
+                }
+            }
+            prev = ts;
+        }
+    }
+
+    /// Return true if the span is generated code, and
+    /// it is not a subspan of the root callsite.
+    ///
+    /// Used to filter out spans of minimal value,
+    /// such as references to macro internal variables.
+    pub fn filter_generated(&self, sub_span: Option<Span>, parent: Span) -> bool {
+        if !generated_code(parent) {
+            if sub_span.is_none() {
+                // Edge case - this occurs on generated code with incorrect expansion info.
+                return true;
+            }
+            return false;
+        }
+        // If sub_span is none, filter out generated code.
+        if sub_span.is_none() {
+            return true;
+        }
+
+        //If the span comes from a fake filemap, filter it.
+        if !self.sess.codemap().lookup_char_pos(parent.lo).file.is_real_file() {
+            return true;
+        }
+
+        // Otherwise, a generated span is deemed invalid if it is not a sub-span of the root
+        // callsite. This filters out macro internal variables and most malformed spans.
+        let span = self.sess.codemap().source_callsite(parent);
+        !(span.contains(parent))
+    }
+}
+
+macro_rules! filter {
+    ($util: expr, $span: ident, $parent: expr, None) => {
+        if $util.filter_generated($span, $parent) {
+            return None;
+        }
+    };
+    ($util: expr, $span: ident, $parent: expr) => {
+        if $util.filter_generated($span, $parent) {
+            return;
+        }
+    };
+}
index 24320c4d563bc259ce81295b53998fc04b61b91a..ccb430fbb782fe3d8e777319e6645177ba610f8b 100644 (file)
@@ -11,13 +11,14 @@ crate-type = ["dylib"]
 [dependencies]
 arena = { path = "../libarena" }
 flate = { path = "../libflate" }
-getopts = { path = "../libgetopts" }
 graphviz = { path = "../libgraphviz" }
 log = { path = "../liblog" }
 rustc = { path = "../librustc" }
 rustc_back = { path = "../librustc_back" }
+rustc_const_eval = { path = "../librustc_const_eval" }
+rustc_const_math = { path = "../librustc_const_math" }
 rustc_data_structures = { path = "../librustc_data_structures" }
-rustc_front = { path = "../librustc_front" }
+rustc_incremental = { path = "../librustc_incremental" }
 rustc_llvm = { path = "../librustc_llvm" }
 rustc_mir = { path = "../librustc_mir" }
 rustc_platform_intrinsics = { path = "../librustc_platform_intrinsics" }
diff --git a/src/librustc_trans/_match.rs b/src/librustc_trans/_match.rs
new file mode 100644 (file)
index 0000000..d8b8f9f
--- /dev/null
@@ -0,0 +1,2008 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! # Compilation of match statements
+//!
+//! I will endeavor to explain the code as best I can.  I have only a loose
+//! understanding of some parts of it.
+//!
+//! ## Matching
+//!
+//! The basic state of the code is maintained in an array `m` of `Match`
+//! objects.  Each `Match` describes some list of patterns, all of which must
+//! match against the current list of values.  If those patterns match, then
+//! the arm listed in the match is the correct arm.  A given arm may have
+//! multiple corresponding match entries, one for each alternative that
+//! remains.  As we proceed these sets of matches are adjusted by the various
+//! `enter_XXX()` functions, each of which adjusts the set of options given
+//! some information about the value which has been matched.
+//!
+//! So, initially, there is one value and N matches, each of which have one
+//! constituent pattern.  N here is usually the number of arms but may be
+//! greater, if some arms have multiple alternatives.  For example, here:
+//!
+//!     enum Foo { A, B(int), C(usize, usize) }
+//!     match foo {
+//!         A => ...,
+//!         B(x) => ...,
+//!         C(1, 2) => ...,
+//!         C(_) => ...
+//!     }
+//!
+//! The value would be `foo`.  There would be four matches, each of which
+//! contains one pattern (and, in one case, a guard).  We could collect the
+//! various options and then compile the code for the case where `foo` is an
+//! `A`, a `B`, and a `C`.  When we generate the code for `C`, we would (1)
+//! drop the two matches that do not match a `C` and (2) expand the other two
+//! into two patterns each.  In the first case, the two patterns would be `1`
+//! and `2`, and the in the second case the _ pattern would be expanded into
+//! `_` and `_`.  The two values are of course the arguments to `C`.
+//!
+//! Here is a quick guide to the various functions:
+//!
+//! - `compile_submatch()`: The main workhouse.  It takes a list of values and
+//!   a list of matches and finds the various possibilities that could occur.
+//!
+//! - `enter_XXX()`: modifies the list of matches based on some information
+//!   about the value that has been matched.  For example,
+//!   `enter_rec_or_struct()` adjusts the values given that a record or struct
+//!   has been matched.  This is an infallible pattern, so *all* of the matches
+//!   must be either wildcards or record/struct patterns.  `enter_opt()`
+//!   handles the fallible cases, and it is correspondingly more complex.
+//!
+//! ## Bindings
+//!
+//! We store information about the bound variables for each arm as part of the
+//! per-arm `ArmData` struct.  There is a mapping from identifiers to
+//! `BindingInfo` structs.  These structs contain the mode/id/type of the
+//! binding, but they also contain an LLVM value which points at an alloca
+//! called `llmatch`. For by value bindings that are Copy, we also create
+//! an extra alloca that we copy the matched value to so that any changes
+//! we do to our copy is not reflected in the original and vice-versa.
+//! We don't do this if it's a move since the original value can't be used
+//! and thus allowing us to cheat in not creating an extra alloca.
+//!
+//! The `llmatch` binding always stores a pointer into the value being matched
+//! which points at the data for the binding.  If the value being matched has
+//! type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence
+//! `llmatch` has type `T**`).  So, if you have a pattern like:
+//!
+//!    let a: A = ...;
+//!    let b: B = ...;
+//!    match (a, b) { (ref c, d) => { ... } }
+//!
+//! For `c` and `d`, we would generate allocas of type `C*` and `D*`
+//! respectively.  These are called the `llmatch`.  As we match, when we come
+//! up against an identifier, we store the current pointer into the
+//! corresponding alloca.
+//!
+//! Once a pattern is completely matched, and assuming that there is no guard
+//! pattern, we will branch to a block that leads to the body itself.  For any
+//! by-value bindings, this block will first load the ptr from `llmatch` (the
+//! one of type `D*`) and then load a second time to get the actual value (the
+//! one of type `D`). For by ref bindings, the value of the local variable is
+//! simply the first alloca.
+//!
+//! So, for the example above, we would generate a setup kind of like this:
+//!
+//!        +-------+
+//!        | Entry |
+//!        +-------+
+//!            |
+//!        +--------------------------------------------+
+//!        | llmatch_c = (addr of first half of tuple)  |
+//!        | llmatch_d = (addr of second half of tuple) |
+//!        +--------------------------------------------+
+//!            |
+//!        +--------------------------------------+
+//!        | *llbinding_d = **llmatch_d           |
+//!        +--------------------------------------+
+//!
+//! If there is a guard, the situation is slightly different, because we must
+//! execute the guard code.  Moreover, we need to do so once for each of the
+//! alternatives that lead to the arm, because if the guard fails, they may
+//! have different points from which to continue the search. Therefore, in that
+//! case, we generate code that looks more like:
+//!
+//!        +-------+
+//!        | Entry |
+//!        +-------+
+//!            |
+//!        +-------------------------------------------+
+//!        | llmatch_c = (addr of first half of tuple) |
+//!        | llmatch_d = (addr of first half of tuple) |
+//!        +-------------------------------------------+
+//!            |
+//!        +-------------------------------------------------+
+//!        | *llbinding_d = **llmatch_d                      |
+//!        | check condition                                 |
+//!        | if false { goto next case }                     |
+//!        | if true { goto body }                           |
+//!        +-------------------------------------------------+
+//!
+//! The handling for the cleanups is a bit... sensitive.  Basically, the body
+//! is the one that invokes `add_clean()` for each binding.  During the guard
+//! evaluation, we add temporary cleanups and revoke them after the guard is
+//! evaluated (it could fail, after all). Note that guards and moves are
+//! just plain incompatible.
+//!
+//! Some relevant helper functions that manage bindings:
+//! - `create_bindings_map()`
+//! - `insert_lllocals()`
+//!
+//!
+//! ## Notes on vector pattern matching.
+//!
+//! Vector pattern matching is surprisingly tricky. The problem is that
+//! the structure of the vector isn't fully known, and slice matches
+//! can be done on subparts of it.
+//!
+//! The way that vector pattern matches are dealt with, then, is as
+//! follows. First, we make the actual condition associated with a
+//! vector pattern simply a vector length comparison. So the pattern
+//! [1, .. x] gets the condition "vec len >= 1", and the pattern
+//! [.. x] gets the condition "vec len >= 0". The problem here is that
+//! having the condition "vec len >= 1" hold clearly does not mean that
+//! only a pattern that has exactly that condition will match. This
+//! means that it may well be the case that a condition holds, but none
+//! of the patterns matching that condition match; to deal with this,
+//! when doing vector length matches, we have match failures proceed to
+//! the next condition to check.
+//!
+//! There are a couple more subtleties to deal with. While the "actual"
+//! condition associated with vector length tests is simply a test on
+//! the vector length, the actual vec_len Opt entry contains more
+//! information used to restrict which matches are associated with it.
+//! So that all matches in a submatch are matching against the same
+//! values from inside the vector, they are split up by how many
+//! elements they match at the front and at the back of the vector. In
+//! order to make sure that arms are properly checked in order, even
+//! with the overmatching conditions, each vec_len Opt entry is
+//! associated with a range of matches.
+//! Consider the following:
+//!
+//!   match &[1, 2, 3] {
+//!       [1, 1, .. _] => 0,
+//!       [1, 2, 2, .. _] => 1,
+//!       [1, 2, 3, .. _] => 2,
+//!       [1, 2, .. _] => 3,
+//!       _ => 4
+//!   }
+//! The proper arm to match is arm 2, but arms 0 and 3 both have the
+//! condition "len >= 2". If arm 3 was lumped in with arm 0, then the
+//! wrong branch would be taken. Instead, vec_len Opts are associated
+//! with a contiguous range of matches that have the same "shape".
+//! This is sort of ugly and requires a bunch of special handling of
+//! vec_len options.
+
+pub use self::BranchKind::*;
+pub use self::OptResult::*;
+pub use self::TransBindingMode::*;
+use self::Opt::*;
+use self::FailureHandler::*;
+
+use llvm::{ValueRef, BasicBlockRef};
+use rustc_const_eval::check_match::{self, StaticInliner};
+use rustc_const_eval::{compare_lit_exprs, eval_const_expr};
+use rustc::hir::def::{Def, DefMap};
+use rustc::hir::def_id::DefId;
+use middle::expr_use_visitor as euv;
+use rustc::infer;
+use middle::lang_items::StrEqFnLangItem;
+use middle::mem_categorization as mc;
+use middle::mem_categorization::Categorization;
+use rustc::hir::pat_util::*;
+use rustc::ty::subst::Substs;
+use adt;
+use base::*;
+use build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast};
+use build::{Not, Store, Sub, add_comment};
+use build;
+use callee::{Callee, ArgVals};
+use cleanup::{self, CleanupMethods, DropHintMethods};
+use common::*;
+use consts;
+use datum::*;
+use debuginfo::{self, DebugLoc, ToDebugLoc};
+use expr::{self, Dest};
+use monomorphize;
+use tvec;
+use type_of;
+use Disr;
+use value::Value;
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::traits::ProjectionMode;
+use session::config::NoDebugInfo;
+use util::common::indenter;
+use util::nodemap::FnvHashMap;
+use util::ppaux;
+
+use std;
+use std::cell::RefCell;
+use std::cmp::Ordering;
+use std::fmt;
+use std::rc::Rc;
+use rustc::hir::{self, PatKind};
+use syntax::ast::{self, DUMMY_NODE_ID, NodeId};
+use syntax::codemap::Span;
+use rustc::hir::fold::Folder;
+use syntax::ptr::P;
+
+#[derive(Copy, Clone, Debug)]
+struct ConstantExpr<'a>(&'a hir::Expr);
+
+impl<'a> ConstantExpr<'a> {
+    fn eq(self, other: ConstantExpr<'a>, tcx: &TyCtxt) -> bool {
+        match compare_lit_exprs(tcx, self.0, other.0) {
+            Some(result) => result == Ordering::Equal,
+            None => bug!("compare_list_exprs: type mismatch"),
+        }
+    }
+}
+
+// An option identifying a branch (either a literal, an enum variant or a range)
+#[derive(Debug)]
+enum Opt<'a, 'tcx> {
+    ConstantValue(ConstantExpr<'a>, DebugLoc),
+    ConstantRange(ConstantExpr<'a>, ConstantExpr<'a>, DebugLoc),
+    Variant(Disr, Rc<adt::Repr<'tcx>>, DefId, DebugLoc),
+    SliceLengthEqual(usize, DebugLoc),
+    SliceLengthGreaterOrEqual(/* prefix length */ usize,
+                              /* suffix length */ usize,
+                              DebugLoc),
+}
+
+impl<'a, 'tcx> Opt<'a, 'tcx> {
+    fn eq(&self, other: &Opt<'a, 'tcx>, tcx: &TyCtxt<'tcx>) -> bool {
+        match (self, other) {
+            (&ConstantValue(a, _), &ConstantValue(b, _)) => a.eq(b, tcx),
+            (&ConstantRange(a1, a2, _), &ConstantRange(b1, b2, _)) => {
+                a1.eq(b1, tcx) && a2.eq(b2, tcx)
+            }
+            (&Variant(a_disr, ref a_repr, a_def, _),
+             &Variant(b_disr, ref b_repr, b_def, _)) => {
+                a_disr == b_disr && *a_repr == *b_repr && a_def == b_def
+            }
+            (&SliceLengthEqual(a, _), &SliceLengthEqual(b, _)) => a == b,
+            (&SliceLengthGreaterOrEqual(a1, a2, _),
+             &SliceLengthGreaterOrEqual(b1, b2, _)) => {
+                a1 == b1 && a2 == b2
+            }
+            _ => false
+        }
+    }
+
+    fn trans<'blk>(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> {
+        use consts::TrueConst::Yes;
+        let _icx = push_ctxt("match::trans_opt");
+        let ccx = bcx.ccx();
+        match *self {
+            ConstantValue(ConstantExpr(lit_expr), _) => {
+                let lit_ty = bcx.tcx().node_id_to_type(lit_expr.id);
+                let expr = consts::const_expr(ccx, &lit_expr, bcx.fcx.param_substs, None, Yes);
+                let llval = match expr {
+                    Ok((llval, _)) => llval,
+                    Err(err) => bcx.ccx().sess().span_fatal(lit_expr.span, &err.description()),
+                };
+                let lit_datum = immediate_rvalue(llval, lit_ty);
+                let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
+                SingleResult(Result::new(bcx, lit_datum.val))
+            }
+            ConstantRange(ConstantExpr(ref l1), ConstantExpr(ref l2), _) => {
+                let l1 = match consts::const_expr(ccx, &l1, bcx.fcx.param_substs, None, Yes) {
+                    Ok((l1, _)) => l1,
+                    Err(err) => bcx.ccx().sess().span_fatal(l1.span, &err.description()),
+                };
+                let l2 = match consts::const_expr(ccx, &l2, bcx.fcx.param_substs, None, Yes) {
+                    Ok((l2, _)) => l2,
+                    Err(err) => bcx.ccx().sess().span_fatal(l2.span, &err.description()),
+                };
+                RangeResult(Result::new(bcx, l1), Result::new(bcx, l2))
+            }
+            Variant(disr_val, ref repr, _, _) => {
+                SingleResult(Result::new(bcx, adt::trans_case(bcx, &repr, disr_val)))
+            }
+            SliceLengthEqual(length, _) => {
+                SingleResult(Result::new(bcx, C_uint(ccx, length)))
+            }
+            SliceLengthGreaterOrEqual(prefix, suffix, _) => {
+                LowerBound(Result::new(bcx, C_uint(ccx, prefix + suffix)))
+            }
+        }
+    }
+
+    fn debug_loc(&self) -> DebugLoc {
+        match *self {
+            ConstantValue(_,debug_loc)                 |
+            ConstantRange(_, _, debug_loc)             |
+            Variant(_, _, _, debug_loc)                |
+            SliceLengthEqual(_, debug_loc)             |
+            SliceLengthGreaterOrEqual(_, _, debug_loc) => debug_loc
+        }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub enum BranchKind {
+    NoBranch,
+    Single,
+    Switch,
+    Compare,
+    CompareSliceLength
+}
+
+pub enum OptResult<'blk, 'tcx: 'blk> {
+    SingleResult(Result<'blk, 'tcx>),
+    RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>),
+    LowerBound(Result<'blk, 'tcx>)
+}
+
+#[derive(Clone, Copy, PartialEq)]
+pub enum TransBindingMode {
+    /// By-value binding for a copy type: copies from matched data
+    /// into a fresh LLVM alloca.
+    TrByCopy(/* llbinding */ ValueRef),
+
+    /// By-value binding for a non-copy type where we copy into a
+    /// fresh LLVM alloca; this most accurately reflects the language
+    /// semantics (e.g. it properly handles overwrites of the matched
+    /// input), but potentially injects an unwanted copy.
+    TrByMoveIntoCopy(/* llbinding */ ValueRef),
+
+    /// Binding a non-copy type by reference under the hood; this is
+    /// a codegen optimization to avoid unnecessary memory traffic.
+    TrByMoveRef,
+
+    /// By-ref binding exposed in the original source input.
+    TrByRef,
+}
+
+impl TransBindingMode {
+    /// if binding by making a fresh copy; returns the alloca that it
+    /// will copy into; otherwise None.
+    fn alloca_if_copy(&self) -> Option<ValueRef> {
+        match *self {
+            TrByCopy(llbinding) | TrByMoveIntoCopy(llbinding) => Some(llbinding),
+            TrByMoveRef | TrByRef => None,
+        }
+    }
+}
+
+/// Information about a pattern binding:
+/// - `llmatch` is a pointer to a stack slot.  The stack slot contains a
+///   pointer into the value being matched.  Hence, llmatch has type `T**`
+///   where `T` is the value being matched.
+/// - `trmode` is the trans binding mode
+/// - `id` is the node id of the binding
+/// - `ty` is the Rust type of the binding
+#[derive(Clone, Copy)]
+pub struct BindingInfo<'tcx> {
+    pub llmatch: ValueRef,
+    pub trmode: TransBindingMode,
+    pub id: ast::NodeId,
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+}
+
+type BindingsMap<'tcx> = FnvHashMap<ast::Name, BindingInfo<'tcx>>;
+
+struct ArmData<'p, 'blk, 'tcx: 'blk> {
+    bodycx: Block<'blk, 'tcx>,
+    arm: &'p hir::Arm,
+    bindings_map: BindingsMap<'tcx>
+}
+
+/// Info about Match.
+/// If all `pats` are matched then arm `data` will be executed.
+/// As we proceed `bound_ptrs` are filled with pointers to values to be bound,
+/// these pointers are stored in llmatch variables just before executing `data` arm.
+struct Match<'a, 'p: 'a, 'blk: 'a, 'tcx: 'blk> {
+    pats: Vec<&'p hir::Pat>,
+    data: &'a ArmData<'p, 'blk, 'tcx>,
+    bound_ptrs: Vec<(ast::Name, ValueRef)>,
+    // Thread along renamings done by the check_match::StaticInliner, so we can
+    // map back to original NodeIds
+    pat_renaming_map: Option<&'a FnvHashMap<(NodeId, Span), NodeId>>
+}
+
+impl<'a, 'p, 'blk, 'tcx> fmt::Debug for Match<'a, 'p, 'blk, 'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        if ppaux::verbose() {
+            // for many programs, this just take too long to serialize
+            write!(f, "{:?}", self.pats)
+        } else {
+            write!(f, "{} pats", self.pats.len())
+        }
+    }
+}
+
+fn has_nested_bindings(m: &[Match], col: usize) -> bool {
+    for br in m {
+        match br.pats[col].node {
+            PatKind::Ident(_, _, Some(_)) => return true,
+            _ => ()
+        }
+    }
+    return false;
+}
+
+// As noted in `fn match_datum`, we should eventually pass around a
+// `Datum<Lvalue>` for the `val`; but until we get to that point, this
+// `MatchInput` struct will serve -- it has everything `Datum<Lvalue>`
+// does except for the type field.
+#[derive(Copy, Clone)]
+pub struct MatchInput { val: ValueRef, lval: Lvalue }
+
+impl<'tcx> Datum<'tcx, Lvalue> {
+    pub fn match_input(&self) -> MatchInput {
+        MatchInput {
+            val: self.val,
+            lval: self.kind,
+        }
+    }
+}
+
+impl fmt::Debug for MatchInput {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&Value(self.val), f)
+    }
+}
+
+impl MatchInput {
+    fn from_val(val: ValueRef) -> MatchInput {
+        MatchInput {
+            val: val,
+            lval: Lvalue::new("MatchInput::from_val"),
+        }
+    }
+
+    fn to_datum<'tcx>(self, ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
+        Datum::new(self.val, ty, self.lval)
+    }
+}
+
+fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                              m: &[Match<'a, 'p, 'blk, 'tcx>],
+                                              col: usize,
+                                              val: MatchInput)
+                                              -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
+    debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={:?})",
+           bcx.to_str(), m, col, val);
+    let _indenter = indenter();
+
+    m.iter().map(|br| {
+        let mut bound_ptrs = br.bound_ptrs.clone();
+        let mut pat = br.pats[col];
+        loop {
+            pat = match pat.node {
+                PatKind::Ident(_, ref path, Some(ref inner)) => {
+                    bound_ptrs.push((path.node.name, val.val));
+                    &inner
+                },
+                _ => break
+            }
+        }
+
+        let mut pats = br.pats.clone();
+        pats[col] = pat;
+        Match {
+            pats: pats,
+            data: &br.data,
+            bound_ptrs: bound_ptrs,
+            pat_renaming_map: br.pat_renaming_map,
+        }
+    }).collect()
+}
+
+fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
+                                          dm: &RefCell<DefMap>,
+                                          m: &[Match<'a, 'p, 'blk, 'tcx>],
+                                          col: usize,
+                                          val: MatchInput,
+                                          mut e: F)
+                                          -> Vec<Match<'a, 'p, 'blk, 'tcx>> where
+    F: FnMut(&[&'p hir::Pat]) -> Option<Vec<&'p hir::Pat>>,
+{
+    debug!("enter_match(bcx={}, m={:?}, col={}, val={:?})",
+           bcx.to_str(), m, col, val);
+    let _indenter = indenter();
+
+    m.iter().filter_map(|br| {
+        e(&br.pats).map(|pats| {
+            let this = br.pats[col];
+            let mut bound_ptrs = br.bound_ptrs.clone();
+            match this.node {
+                PatKind::Ident(_, ref path, None) => {
+                    if pat_is_binding(&dm.borrow(), &this) {
+                        bound_ptrs.push((path.node.name, val.val));
+                    }
+                }
+                PatKind::Vec(ref before, Some(ref slice), ref after) => {
+                    if let PatKind::Ident(_, ref path, None) = slice.node {
+                        let subslice_val = bind_subslice_pat(
+                            bcx, this.id, val,
+                            before.len(), after.len());
+                        bound_ptrs.push((path.node.name, subslice_val));
+                    }
+                }
+                _ => {}
+            }
+            Match {
+                pats: pats,
+                data: br.data,
+                bound_ptrs: bound_ptrs,
+                pat_renaming_map: br.pat_renaming_map,
+            }
+        })
+    }).collect()
+}
+
+fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                     dm: &RefCell<DefMap>,
+                                     m: &[Match<'a, 'p, 'blk, 'tcx>],
+                                     col: usize,
+                                     val: MatchInput)
+                                     -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
+    debug!("enter_default(bcx={}, m={:?}, col={}, val={:?})",
+           bcx.to_str(), m, col, val);
+    let _indenter = indenter();
+
+    // Collect all of the matches that can match against anything.
+    enter_match(bcx, dm, m, col, val, |pats| {
+        if pat_is_binding_or_wild(&dm.borrow(), &pats[col]) {
+            let mut r = pats[..col].to_vec();
+            r.extend_from_slice(&pats[col + 1..]);
+            Some(r)
+        } else {
+            None
+        }
+    })
+}
+
+// <pcwalton> nmatsakis: what does enter_opt do?
+// <pcwalton> in trans/match
+// <pcwalton> trans/match.rs is like stumbling around in a dark cave
+// <nmatsakis> pcwalton: the enter family of functions adjust the set of
+//             patterns as needed
+// <nmatsakis> yeah, at some point I kind of achieved some level of
+//             understanding
+// <nmatsakis> anyhow, they adjust the patterns given that something of that
+//             kind has been found
+// <nmatsakis> pcwalton: ok, right, so enter_XXX() adjusts the patterns, as I
+//             said
+// <nmatsakis> enter_match() kind of embodies the generic code
+// <nmatsakis> it is provided with a function that tests each pattern to see
+//             if it might possibly apply and so forth
+// <nmatsakis> so, if you have a pattern like {a: _, b: _, _} and one like _
+// <nmatsakis> then _ would be expanded to (_, _)
+// <nmatsakis> one spot for each of the sub-patterns
+// <nmatsakis> enter_opt() is one of the more complex; it covers the fallible
+//             cases
+// <nmatsakis> enter_rec_or_struct() or enter_tuple() are simpler, since they
+//             are infallible patterns
+// <nmatsakis> so all patterns must either be records (resp. tuples) or
+//             wildcards
+
+/// The above is now outdated in that enter_match() now takes a function that
+/// takes the complete row of patterns rather than just the first one.
+/// Also, most of the enter_() family functions have been unified with
+/// the check_match specialization step.
+fn enter_opt<'a, 'p, 'blk, 'tcx>(
+             bcx: Block<'blk, 'tcx>,
+             _: ast::NodeId,
+             dm: &RefCell<DefMap>,
+             m: &[Match<'a, 'p, 'blk, 'tcx>],
+             opt: &Opt,
+             col: usize,
+             variant_size: usize,
+             val: MatchInput)
+             -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
+    debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={:?})",
+           bcx.to_str(), m, *opt, col, val);
+    let _indenter = indenter();
+
+    let ctor = match opt {
+        &ConstantValue(ConstantExpr(expr), _) => check_match::ConstantValue(
+            eval_const_expr(bcx.tcx(), &expr)
+        ),
+        &ConstantRange(ConstantExpr(lo), ConstantExpr(hi), _) => check_match::ConstantRange(
+            eval_const_expr(bcx.tcx(), &lo),
+            eval_const_expr(bcx.tcx(), &hi)
+        ),
+        &SliceLengthEqual(n, _) =>
+            check_match::Slice(n),
+        &SliceLengthGreaterOrEqual(before, after, _) =>
+            check_match::SliceWithSubslice(before, after),
+        &Variant(_, _, def_id, _) =>
+            check_match::Constructor::Variant(def_id)
+    };
+
+    let param_env = bcx.tcx().empty_parameter_environment();
+    let mcx = check_match::MatchCheckCtxt {
+        tcx: bcx.tcx(),
+        param_env: param_env,
+    };
+    enter_match(bcx, dm, m, col, val, |pats|
+        check_match::specialize(&mcx, &pats[..], &ctor, col, variant_size)
+    )
+}
+
+// Returns the options in one column of matches. An option is something that
+// needs to be conditionally matched at runtime; for example, the discriminant
+// on a set of enum variants or a literal.
+fn get_branches<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                    m: &[Match<'a, 'p, 'blk, 'tcx>],
+                                    col: usize)
+                                    -> Vec<Opt<'p, 'tcx>> {
+    let tcx = bcx.tcx();
+
+    let mut found: Vec<Opt> = vec![];
+    for br in m {
+        let cur = br.pats[col];
+        let debug_loc = match br.pat_renaming_map {
+            Some(pat_renaming_map) => {
+                match pat_renaming_map.get(&(cur.id, cur.span)) {
+                    Some(&id) => DebugLoc::At(id, cur.span),
+                    None => DebugLoc::At(cur.id, cur.span),
+                }
+            }
+            None => DebugLoc::None
+        };
+
+        let opt = match cur.node {
+            PatKind::Lit(ref l) => {
+                ConstantValue(ConstantExpr(&l), debug_loc)
+            }
+            PatKind::Ident(..) | PatKind::Path(..) |
+            PatKind::TupleStruct(..) | PatKind::Struct(..) => {
+                // This is either an enum variant or a variable binding.
+                let opt_def = tcx.def_map.borrow().get(&cur.id).map(|d| d.full_def());
+                match opt_def {
+                    Some(Def::Variant(enum_id, var_id)) => {
+                        let variant = tcx.lookup_adt_def(enum_id).variant_with_id(var_id);
+                        Variant(Disr::from(variant.disr_val),
+                                adt::represent_node(bcx, cur.id),
+                                var_id,
+                                debug_loc)
+                    }
+                    _ => continue
+                }
+            }
+            PatKind::Range(ref l1, ref l2) => {
+                ConstantRange(ConstantExpr(&l1), ConstantExpr(&l2), debug_loc)
+            }
+            PatKind::Vec(ref before, None, ref after) => {
+                SliceLengthEqual(before.len() + after.len(), debug_loc)
+            }
+            PatKind::Vec(ref before, Some(_), ref after) => {
+                SliceLengthGreaterOrEqual(before.len(), after.len(), debug_loc)
+            }
+            _ => continue
+        };
+
+        if !found.iter().any(|x| x.eq(&opt, tcx)) {
+            found.push(opt);
+        }
+    }
+    found
+}
+
+struct ExtractedBlock<'blk, 'tcx: 'blk> {
+    vals: Vec<ValueRef>,
+    bcx: Block<'blk, 'tcx>,
+}
+
+fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                    repr: &adt::Repr<'tcx>,
+                                    disr_val: Disr,
+                                    val: MatchInput)
+                                    -> ExtractedBlock<'blk, 'tcx> {
+    let _icx = push_ctxt("match::extract_variant_args");
+    // Assume enums are always sized for now.
+    let val = adt::MaybeSizedValue::sized(val.val);
+    let args = (0..adt::num_args(repr, disr_val)).map(|i| {
+        adt::trans_field_ptr(bcx, repr, val, disr_val, i)
+    }).collect();
+
+    ExtractedBlock { vals: args, bcx: bcx }
+}
+
+/// Helper for converting from the ValueRef that we pass around in the match code, which is always
+/// an lvalue, into a Datum. Eventually we should just pass around a Datum and be done with it.
+fn match_datum<'tcx>(val: MatchInput, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
+    val.to_datum(left_ty)
+}
+
+fn bind_subslice_pat(bcx: Block,
+                     pat_id: ast::NodeId,
+                     val: MatchInput,
+                     offset_left: usize,
+                     offset_right: usize) -> ValueRef {
+    let _icx = push_ctxt("match::bind_subslice_pat");
+    let vec_ty = node_id_type(bcx, pat_id);
+    let vec_ty_contents = match vec_ty.sty {
+        ty::TyBox(ty) => ty,
+        ty::TyRef(_, mt) | ty::TyRawPtr(mt) => mt.ty,
+        _ => vec_ty
+    };
+    let unit_ty = vec_ty_contents.sequence_element_type(bcx.tcx());
+    let vec_datum = match_datum(val, vec_ty);
+    let (base, len) = vec_datum.get_vec_base_and_len(bcx);
+
+    let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]);
+    let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right);
+    let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None);
+    let slice_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic),
+                                         bcx.tcx().mk_slice(unit_ty));
+    let scratch = rvalue_scratch_datum(bcx, slice_ty, "");
+    Store(bcx, slice_begin, expr::get_dataptr(bcx, scratch.val));
+    Store(bcx, slice_len, expr::get_meta(bcx, scratch.val));
+    scratch.val
+}
+
+fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                 left_ty: Ty<'tcx>,
+                                 before: usize,
+                                 after: usize,
+                                 val: MatchInput)
+                                 -> ExtractedBlock<'blk, 'tcx> {
+    let _icx = push_ctxt("match::extract_vec_elems");
+    let vec_datum = match_datum(val, left_ty);
+    let (base, len) = vec_datum.get_vec_base_and_len(bcx);
+    let mut elems = vec![];
+    elems.extend((0..before).map(|i| GEPi(bcx, base, &[i])));
+    elems.extend((0..after).rev().map(|i| {
+        InBoundsGEP(bcx, base, &[
+            Sub(bcx, len, C_uint(bcx.ccx(), i + 1), DebugLoc::None)
+        ])
+    }));
+    ExtractedBlock { vals: elems, bcx: bcx }
+}
+
+// Macro for deciding whether any of the remaining matches fit a given kind of
+// pattern.  Note that, because the macro is well-typed, either ALL of the
+// matches should fit that sort of pattern or NONE (however, some of the
+// matches may be wildcards like _ or identifiers).
+macro_rules! any_pat {
+    ($m:expr, $col:expr, $pattern:pat) => (
+        ($m).iter().any(|br| {
+            match br.pats[$col].node {
+                $pattern => true,
+                _ => false
+            }
+        })
+    )
+}
+
+fn any_uniq_pat(m: &[Match], col: usize) -> bool {
+    any_pat!(m, col, PatKind::Box(_))
+}
+
+fn any_region_pat(m: &[Match], col: usize) -> bool {
+    any_pat!(m, col, PatKind::Ref(..))
+}
+
+fn any_irrefutable_adt_pat(tcx: &TyCtxt, m: &[Match], col: usize) -> bool {
+    m.iter().any(|br| {
+        let pat = br.pats[col];
+        match pat.node {
+            PatKind::Tup(_) => true,
+            PatKind::Struct(..) | PatKind::TupleStruct(..) |
+            PatKind::Path(..) | PatKind::Ident(_, _, None) => {
+                match tcx.def_map.borrow().get(&pat.id).unwrap().full_def() {
+                    Def::Struct(..) | Def::TyAlias(..) => true,
+                    _ => false,
+                }
+            }
+            _ => false
+        }
+    })
+}
+
+/// What to do when the pattern match fails.
+enum FailureHandler {
+    Infallible,
+    JumpToBasicBlock(BasicBlockRef),
+    Unreachable
+}
+
+impl FailureHandler {
+    fn is_fallible(&self) -> bool {
+        match *self {
+            Infallible => false,
+            _ => true
+        }
+    }
+
+    fn is_infallible(&self) -> bool {
+        !self.is_fallible()
+    }
+
+    fn handle_fail(&self, bcx: Block) {
+        match *self {
+            Infallible =>
+                bug!("attempted to panic in a non-panicking panic handler!"),
+            JumpToBasicBlock(basic_block) =>
+                Br(bcx, basic_block, DebugLoc::None),
+            Unreachable =>
+                build::Unreachable(bcx)
+        }
+    }
+}
+
+fn pick_column_to_specialize(def_map: &RefCell<DefMap>, m: &[Match]) -> Option<usize> {
+    fn pat_score(def_map: &RefCell<DefMap>, pat: &hir::Pat) -> usize {
+        match pat.node {
+            PatKind::Ident(_, _, Some(ref inner)) => pat_score(def_map, &inner),
+            _ if pat_is_refutable(&def_map.borrow(), pat) => 1,
+            _ => 0
+        }
+    }
+
+    let column_score = |m: &[Match], col: usize| -> usize {
+        let total_score = m.iter()
+            .map(|row| row.pats[col])
+            .map(|pat| pat_score(def_map, pat))
+            .sum();
+
+        // Irrefutable columns always go first, they'd only be duplicated in the branches.
+        if total_score == 0 {
+            std::usize::MAX
+        } else {
+            total_score
+        }
+    };
+
+    let column_contains_any_nonwild_patterns = |&col: &usize| -> bool {
+        m.iter().any(|row| match row.pats[col].node {
+            PatKind::Wild => false,
+            _ => true
+        })
+    };
+
+    (0..m[0].pats.len())
+        .filter(column_contains_any_nonwild_patterns)
+        .map(|col| (col, column_score(m, col)))
+        .max_by_key(|&(_, score)| score)
+        .map(|(col, _)| col)
+}
+
+// Compiles a comparison between two things.
+fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+                              lhs: ValueRef,
+                              rhs: ValueRef,
+                              rhs_t: Ty<'tcx>,
+                              debug_loc: DebugLoc)
+                              -> Result<'blk, 'tcx> {
+    fn compare_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               lhs_data: ValueRef,
+                               lhs_len: ValueRef,
+                               rhs_data: ValueRef,
+                               rhs_len: ValueRef,
+                               rhs_t: Ty<'tcx>,
+                               debug_loc: DebugLoc)
+                               -> Result<'blk, 'tcx> {
+        let did = langcall(bcx,
+                           None,
+                           &format!("comparison of `{}`", rhs_t),
+                           StrEqFnLangItem);
+        let args = [lhs_data, lhs_len, rhs_data, rhs_len];
+        Callee::def(bcx.ccx(), did, bcx.tcx().mk_substs(Substs::empty()))
+            .call(bcx, debug_loc, ArgVals(&args), None)
+    }
+
+    let _icx = push_ctxt("compare_values");
+    if rhs_t.is_scalar() {
+        let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, hir::BiEq, debug_loc);
+        return Result::new(cx, cmp);
+    }
+
+    match rhs_t.sty {
+        ty::TyRef(_, mt) => match mt.ty.sty {
+            ty::TyStr => {
+                let lhs_data = Load(cx, expr::get_dataptr(cx, lhs));
+                let lhs_len = Load(cx, expr::get_meta(cx, lhs));
+                let rhs_data = Load(cx, expr::get_dataptr(cx, rhs));
+                let rhs_len = Load(cx, expr::get_meta(cx, rhs));
+                compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc)
+            }
+            ty::TyArray(ty, _) | ty::TySlice(ty) => match ty.sty {
+                ty::TyUint(ast::UintTy::U8) => {
+                    // NOTE: cast &[u8] and &[u8; N] to &str and abuse the str_eq lang item,
+                    // which calls memcmp().
+                    let pat_len = val_ty(rhs).element_type().array_length();
+                    let ty_str_slice = cx.tcx().mk_static_str();
+
+                    let rhs_data = GEPi(cx, rhs, &[0, 0]);
+                    let rhs_len = C_uint(cx.ccx(), pat_len);
+
+                    let lhs_data;
+                    let lhs_len;
+                    if val_ty(lhs) == val_ty(rhs) {
+                        // Both the discriminant and the pattern are thin pointers
+                        lhs_data = GEPi(cx, lhs, &[0, 0]);
+                        lhs_len = C_uint(cx.ccx(), pat_len);
+                    } else {
+                        // The discriminant is a fat pointer
+                        let llty_str_slice = type_of::type_of(cx.ccx(), ty_str_slice).ptr_to();
+                        let lhs_str = PointerCast(cx, lhs, llty_str_slice);
+                        lhs_data = Load(cx, expr::get_dataptr(cx, lhs_str));
+                        lhs_len = Load(cx, expr::get_meta(cx, lhs_str));
+                    }
+
+                    compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc)
+                },
+                _ => bug!("only byte strings supported in compare_values"),
+            },
+            _ => bug!("only string and byte strings supported in compare_values"),
+        },
+        _ => bug!("only scalars, byte strings, and strings supported in compare_values"),
+    }
+}
+
+/// For each binding in `data.bindings_map`, adds an appropriate entry into the `fcx.lllocals` map
+fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+                               bindings_map: &BindingsMap<'tcx>,
+                               cs: Option<cleanup::ScopeId>)
+                               -> Block<'blk, 'tcx> {
+    for (&name, &binding_info) in bindings_map {
+        let (llval, aliases_other_state) = match binding_info.trmode {
+            // By value mut binding for a copy type: load from the ptr
+            // into the matched value and copy to our alloca
+            TrByCopy(llbinding) |
+            TrByMoveIntoCopy(llbinding) => {
+                let llval = Load(bcx, binding_info.llmatch);
+                let lvalue = match binding_info.trmode {
+                    TrByCopy(..) =>
+                        Lvalue::new("_match::insert_lllocals"),
+                    TrByMoveIntoCopy(..) => {
+                        // match_input moves from the input into a
+                        // separate stack slot.
+                        //
+                        // E.g. consider moving the value `D(A)` out
+                        // of the tuple `(D(A), D(B))` and into the
+                        // local variable `x` via the pattern `(x,_)`,
+                        // leaving the remainder of the tuple `(_,
+                        // D(B))` still to be dropped in the future.
+                        //
+                        // Thus, here we must zero the place that we
+                        // are moving *from*, because we do not yet
+                        // track drop flags for a fragmented parent
+                        // match input expression.
+                        //
+                        // Longer term we will be able to map the move
+                        // into `(x, _)` up to the parent path that
+                        // owns the whole tuple, and mark the
+                        // corresponding stack-local drop-flag
+                        // tracking the first component of the tuple.
+                        let hint_kind = HintKind::ZeroAndMaintain;
+                        Lvalue::new_with_hint("_match::insert_lllocals (match_input)",
+                                              bcx, binding_info.id, hint_kind)
+                    }
+                    _ => bug!(),
+                };
+                let datum = Datum::new(llval, binding_info.ty, lvalue);
+                call_lifetime_start(bcx, llbinding);
+                bcx = datum.store_to(bcx, llbinding);
+                if let Some(cs) = cs {
+                    bcx.fcx.schedule_lifetime_end(cs, llbinding);
+                }
+
+                (llbinding, false)
+            },
+
+            // By value move bindings: load from the ptr into the matched value
+            TrByMoveRef => (Load(bcx, binding_info.llmatch), true),
+
+            // By ref binding: use the ptr into the matched value
+            TrByRef => (binding_info.llmatch, true),
+        };
+
+
+        // A local that aliases some other state must be zeroed, since
+        // the other state (e.g. some parent data that we matched
+        // into) will still have its subcomponents (such as this
+        // local) destructed at the end of the parent's scope. Longer
+        // term, we will properly map such parents to the set of
+        // unique drop flags for its fragments.
+        let hint_kind = if aliases_other_state {
+            HintKind::ZeroAndMaintain
+        } else {
+            HintKind::DontZeroJustUse
+        };
+        let lvalue = Lvalue::new_with_hint("_match::insert_lllocals (local)",
+                                           bcx,
+                                           binding_info.id,
+                                           hint_kind);
+        let datum = Datum::new(llval, binding_info.ty, lvalue);
+        if let Some(cs) = cs {
+            let opt_datum = lvalue.dropflag_hint(bcx);
+            bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch);
+            bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum);
+        }
+
+        debug!("binding {} to {:?}", binding_info.id, Value(llval));
+        bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum);
+        debuginfo::create_match_binding_metadata(bcx, name, binding_info);
+    }
+    bcx
+}
+
+fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                     guard_expr: &hir::Expr,
+                                     data: &ArmData<'p, 'blk, 'tcx>,
+                                     m: &[Match<'a, 'p, 'blk, 'tcx>],
+                                     vals: &[MatchInput],
+                                     chk: &FailureHandler,
+                                     has_genuine_default: bool)
+                                     -> Block<'blk, 'tcx> {
+    debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals={:?})",
+           bcx.to_str(), guard_expr, m, vals);
+    let _indenter = indenter();
+
+    let mut bcx = insert_lllocals(bcx, &data.bindings_map, None);
+
+    let val = unpack_datum!(bcx, expr::trans(bcx, guard_expr));
+    let val = val.to_llbool(bcx);
+
+    for (_, &binding_info) in &data.bindings_map {
+        if let Some(llbinding) = binding_info.trmode.alloca_if_copy() {
+            call_lifetime_end(bcx, llbinding)
+        }
+    }
+
+    for (_, &binding_info) in &data.bindings_map {
+        bcx.fcx.lllocals.borrow_mut().remove(&binding_info.id);
+    }
+
+    with_cond(bcx, Not(bcx, val, guard_expr.debug_loc()), |bcx| {
+        for (_, &binding_info) in &data.bindings_map {
+            call_lifetime_end(bcx, binding_info.llmatch);
+        }
+        match chk {
+            // If the default arm is the only one left, move on to the next
+            // condition explicitly rather than (possibly) falling back to
+            // the default arm.
+            &JumpToBasicBlock(_) if m.len() == 1 && has_genuine_default => {
+                chk.handle_fail(bcx);
+            }
+            _ => {
+                compile_submatch(bcx, m, vals, chk, has_genuine_default);
+            }
+        };
+        bcx
+    })
+}
+
+fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                        m: &[Match<'a, 'p, 'blk, 'tcx>],
+                                        vals: &[MatchInput],
+                                        chk: &FailureHandler,
+                                        has_genuine_default: bool) {
+    debug!("compile_submatch(bcx={}, m={:?}, vals=[{:?}])",
+           bcx.to_str(), m, vals);
+    let _indenter = indenter();
+    let _icx = push_ctxt("match::compile_submatch");
+    let mut bcx = bcx;
+    if m.is_empty() {
+        if chk.is_fallible() {
+            chk.handle_fail(bcx);
+        }
+        return;
+    }
+
+    let tcx = bcx.tcx();
+    let def_map = &tcx.def_map;
+    match pick_column_to_specialize(def_map, m) {
+        Some(col) => {
+            let val = vals[col];
+            if has_nested_bindings(m, col) {
+                let expanded = expand_nested_bindings(bcx, m, col, val);
+                compile_submatch_continue(bcx,
+                                          &expanded[..],
+                                          vals,
+                                          chk,
+                                          col,
+                                          val,
+                                          has_genuine_default)
+            } else {
+                compile_submatch_continue(bcx, m, vals, chk, col, val, has_genuine_default)
+            }
+        }
+        None => {
+            let data = &m[0].data;
+            for &(ref name, ref value_ptr) in &m[0].bound_ptrs {
+                let binfo = *data.bindings_map.get(name).unwrap();
+                call_lifetime_start(bcx, binfo.llmatch);
+                if binfo.trmode == TrByRef && type_is_fat_ptr(bcx.tcx(), binfo.ty) {
+                    expr::copy_fat_ptr(bcx, *value_ptr, binfo.llmatch);
+                }
+                else {
+                    Store(bcx, *value_ptr, binfo.llmatch);
+                }
+            }
+            match data.arm.guard {
+                Some(ref guard_expr) => {
+                    bcx = compile_guard(bcx,
+                                        &guard_expr,
+                                        m[0].data,
+                                        &m[1..m.len()],
+                                        vals,
+                                        chk,
+                                        has_genuine_default);
+                }
+                _ => ()
+            }
+            Br(bcx, data.bodycx.llbb, DebugLoc::None);
+        }
+    }
+}
+
+fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+                                                 m: &[Match<'a, 'p, 'blk, 'tcx>],
+                                                 vals: &[MatchInput],
+                                                 chk: &FailureHandler,
+                                                 col: usize,
+                                                 val: MatchInput,
+                                                 has_genuine_default: bool) {
+    let fcx = bcx.fcx;
+    let tcx = bcx.tcx();
+    let dm = &tcx.def_map;
+
+    let mut vals_left = vals[0..col].to_vec();
+    vals_left.extend_from_slice(&vals[col + 1..]);
+    let ccx = bcx.fcx.ccx;
+
+    // Find a real id (we're adding placeholder wildcard patterns, but
+    // each column is guaranteed to have at least one real pattern)
+    let pat_id = m.iter().map(|br| br.pats[col].id)
+                         .find(|&id| id != DUMMY_NODE_ID)
+                         .unwrap_or(DUMMY_NODE_ID);
+
+    let left_ty = if pat_id == DUMMY_NODE_ID {
+        tcx.mk_nil()
+    } else {
+        node_id_type(bcx, pat_id)
+    };
+
+    let mcx = check_match::MatchCheckCtxt {
+        tcx: bcx.tcx(),
+        param_env: bcx.tcx().empty_parameter_environment(),
+    };
+    let adt_vals = if any_irrefutable_adt_pat(bcx.tcx(), m, col) {
+        let repr = adt::represent_type(bcx.ccx(), left_ty);
+        let arg_count = adt::num_args(&repr, Disr(0));
+        let (arg_count, struct_val) = if type_is_sized(bcx.tcx(), left_ty) {
+            (arg_count, val.val)
+        } else {
+            // For an unsized ADT (i.e. DST struct), we need to treat
+            // the last field specially: instead of simply passing a
+            // ValueRef pointing to that field, as with all the others,
+            // we skip it and instead construct a 'fat ptr' below.
+            (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val.val)))
+        };
+        let mut field_vals: Vec<ValueRef> = (0..arg_count).map(|ix|
+            // By definition, these are all sized
+            adt::trans_field_ptr(bcx, &repr, adt::MaybeSizedValue::sized(struct_val), Disr(0), ix)
+        ).collect();
+
+        match left_ty.sty {
+            ty::TyStruct(def, substs) if !type_is_sized(bcx.tcx(), left_ty) => {
+                // The last field is technically unsized but
+                // since we can only ever match that field behind
+                // a reference we construct a fat ptr here.
+                let unsized_ty = def.struct_variant().fields.last().map(|field| {
+                    monomorphize::field_ty(bcx.tcx(), substs, field)
+                }).unwrap();
+                let scratch = alloc_ty(bcx, unsized_ty, "__struct_field_fat_ptr");
+
+                let meta = Load(bcx, expr::get_meta(bcx, val.val));
+                let struct_val = adt::MaybeSizedValue::unsized_(struct_val, meta);
+
+                let data = adt::trans_field_ptr(bcx, &repr, struct_val, Disr(0), arg_count);
+                Store(bcx, data, expr::get_dataptr(bcx, scratch));
+                Store(bcx, meta, expr::get_meta(bcx, scratch));
+                field_vals.push(scratch);
+            }
+            _ => {}
+        }
+        Some(field_vals)
+    } else if any_uniq_pat(m, col) || any_region_pat(m, col) {
+        Some(vec!(Load(bcx, val.val)))
+    } else {
+        match left_ty.sty {
+            ty::TyArray(_, n) => {
+                let args = extract_vec_elems(bcx, left_ty, n, 0, val);
+                Some(args.vals)
+            }
+            _ => None
+        }
+    };
+    match adt_vals {
+        Some(field_vals) => {
+            let pats = enter_match(bcx, dm, m, col, val, |pats|
+                check_match::specialize(&mcx, pats,
+                                        &check_match::Single, col,
+                                        field_vals.len())
+            );
+            let mut vals: Vec<_> = field_vals.into_iter()
+                .map(|v|MatchInput::from_val(v))
+                .collect();
+            vals.extend_from_slice(&vals_left);
+            compile_submatch(bcx, &pats, &vals, chk, has_genuine_default);
+            return;
+        }
+        _ => ()
+    }
+
+    // Decide what kind of branch we need
+    let opts = get_branches(bcx, m, col);
+    debug!("options={:?}", opts);
+    let mut kind = NoBranch;
+    let mut test_val = val.val;
+    debug!("test_val={:?}", Value(test_val));
+    if !opts.is_empty() {
+        match opts[0] {
+            ConstantValue(..) | ConstantRange(..) => {
+                test_val = load_if_immediate(bcx, val.val, left_ty);
+                kind = if left_ty.is_integral() {
+                    Switch
+                } else {
+                    Compare
+                };
+            }
+            Variant(_, ref repr, _, _) => {
+                let (the_kind, val_opt) = adt::trans_switch(bcx, &repr,
+                                                            val.val, true);
+                kind = the_kind;
+                if let Some(tval) = val_opt { test_val = tval; }
+            }
+            SliceLengthEqual(..) | SliceLengthGreaterOrEqual(..) => {
+                let (_, len) = tvec::get_base_and_len(bcx, val.val, left_ty);
+                test_val = len;
+                kind = Switch;
+            }
+        }
+    }
+    for o in &opts {
+        match *o {
+            ConstantRange(..) => { kind = Compare; break },
+            SliceLengthGreaterOrEqual(..) => { kind = CompareSliceLength; break },
+            _ => ()
+        }
+    }
+    let else_cx = match kind {
+        NoBranch | Single => bcx,
+        _ => bcx.fcx.new_temp_block("match_else")
+    };
+    let sw = if kind == Switch {
+        build::Switch(bcx, test_val, else_cx.llbb, opts.len())
+    } else {
+        C_int(ccx, 0) // Placeholder for when not using a switch
+    };
+
+    let defaults = enter_default(else_cx, dm, m, col, val);
+    let exhaustive = chk.is_infallible() && defaults.is_empty();
+    let len = opts.len();
+
+    if exhaustive && kind == Switch {
+        build::Unreachable(else_cx);
+    }
+
+    // Compile subtrees for each option
+    for (i, opt) in opts.iter().enumerate() {
+        // In some cases of range and vector pattern matching, we need to
+        // override the failure case so that instead of failing, it proceeds
+        // to try more matching. branch_chk, then, is the proper failure case
+        // for the current conditional branch.
+        let mut branch_chk = None;
+        let mut opt_cx = else_cx;
+        let debug_loc = opt.debug_loc();
+
+        if kind == Switch || !exhaustive || i + 1 < len {
+            opt_cx = bcx.fcx.new_temp_block("match_case");
+            match kind {
+                Single => Br(bcx, opt_cx.llbb, debug_loc),
+                Switch => {
+                    match opt.trans(bcx) {
+                        SingleResult(r) => {
+                            AddCase(sw, r.val, opt_cx.llbb);
+                            bcx = r.bcx;
+                        }
+                        _ => {
+                            bug!(
+                                "in compile_submatch, expected \
+                                 opt.trans() to return a SingleResult")
+                        }
+                    }
+                }
+                Compare | CompareSliceLength => {
+                    let t = if kind == Compare {
+                        left_ty
+                    } else {
+                        tcx.types.usize // vector length
+                    };
+                    let Result { bcx: after_cx, val: matches } = {
+                        match opt.trans(bcx) {
+                            SingleResult(Result { bcx, val }) => {
+                                compare_values(bcx, test_val, val, t, debug_loc)
+                            }
+                            RangeResult(Result { val: vbegin, .. },
+                                        Result { bcx, val: vend }) => {
+                                let llge = compare_scalar_types(bcx, test_val, vbegin,
+                                                                t, hir::BiGe, debug_loc);
+                                let llle = compare_scalar_types(bcx, test_val, vend,
+                                                                t, hir::BiLe, debug_loc);
+                                Result::new(bcx, And(bcx, llge, llle, DebugLoc::None))
+                            }
+                            LowerBound(Result { bcx, val }) => {
+                                Result::new(bcx, compare_scalar_types(bcx, test_val,
+                                                                      val, t, hir::BiGe,
+                                                                      debug_loc))
+                            }
+                        }
+                    };
+                    bcx = fcx.new_temp_block("compare_next");
+
+                    // If none of the sub-cases match, and the current condition
+                    // is guarded or has multiple patterns, move on to the next
+                    // condition, if there is any, rather than falling back to
+                    // the default.
+                    let guarded = m[i].data.arm.guard.is_some();
+                    let multi_pats = m[i].pats.len() > 1;
+                    if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) {
+                        branch_chk = Some(JumpToBasicBlock(bcx.llbb));
+                    }
+                    CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb, debug_loc);
+                }
+                _ => ()
+            }
+        } else if kind == Compare || kind == CompareSliceLength {
+            Br(bcx, else_cx.llbb, debug_loc);
+        }
+
+        let mut size = 0;
+        let mut unpacked = Vec::new();
+        match *opt {
+            Variant(disr_val, ref repr, _, _) => {
+                let ExtractedBlock {vals: argvals, bcx: new_bcx} =
+                    extract_variant_args(opt_cx, &repr, disr_val, val);
+                size = argvals.len();
+                unpacked = argvals;
+                opt_cx = new_bcx;
+            }
+            SliceLengthEqual(len, _) => {
+                let args = extract_vec_elems(opt_cx, left_ty, len, 0, val);
+                size = args.vals.len();
+                unpacked = args.vals.clone();
+                opt_cx = args.bcx;
+            }
+            SliceLengthGreaterOrEqual(before, after, _) => {
+                let args = extract_vec_elems(opt_cx, left_ty, before, after, val);
+                size = args.vals.len();
+                unpacked = args.vals.clone();
+                opt_cx = args.bcx;
+            }
+            ConstantValue(..) | ConstantRange(..) => ()
+        }
+        let opt_ms = enter_opt(opt_cx, pat_id, dm, m, opt, col, size, val);
+        let mut opt_vals: Vec<_> = unpacked.into_iter()
+            .map(|v|MatchInput::from_val(v))
+            .collect();
+        opt_vals.extend_from_slice(&vals_left[..]);
+        compile_submatch(opt_cx,
+                         &opt_ms[..],
+                         &opt_vals[..],
+                         branch_chk.as_ref().unwrap_or(chk),
+                         has_genuine_default);
+    }
+
+    // Compile the fall-through case, if any
+    if !exhaustive && kind != Single {
+        if kind == Compare || kind == CompareSliceLength {
+            Br(bcx, else_cx.llbb, DebugLoc::None);
+        }
+        match chk {
+            // If there is only one default arm left, move on to the next
+            // condition explicitly rather than (eventually) falling back to
+            // the last default arm.
+            &JumpToBasicBlock(_) if defaults.len() == 1 && has_genuine_default => {
+                chk.handle_fail(else_cx);
+            }
+            _ => {
+                compile_submatch(else_cx,
+                                 &defaults[..],
+                                 &vals_left[..],
+                                 chk,
+                                 has_genuine_default);
+            }
+        }
+    }
+}
+
+pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               match_expr: &hir::Expr,
+                               discr_expr: &hir::Expr,
+                               arms: &[hir::Arm],
+                               dest: Dest)
+                               -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("match::trans_match");
+    trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest)
+}
+
+/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body`
+fn is_discr_reassigned(bcx: Block, discr: &hir::Expr, body: &hir::Expr) -> bool {
+    let (vid, field) = match discr.node {
+        hir::ExprPath(..) => match bcx.def(discr.id) {
+            Def::Local(_, vid) | Def::Upvar(_, vid, _, _) => (vid, None),
+            _ => return false
+        },
+        hir::ExprField(ref base, field) => {
+            let vid = match bcx.tcx().def_map.borrow().get(&base.id).map(|d| d.full_def()) {
+                Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid,
+                _ => return false
+            };
+            (vid, Some(mc::NamedField(field.node)))
+        },
+        hir::ExprTupField(ref base, field) => {
+            let vid = match bcx.tcx().def_map.borrow().get(&base.id).map(|d| d.full_def()) {
+                Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid,
+                _ => return false
+            };
+            (vid, Some(mc::PositionalField(field.node)))
+        },
+        _ => return false
+    };
+
+    let mut rc = ReassignmentChecker {
+        node: vid,
+        field: field,
+        reassigned: false
+    };
+    {
+        let infcx = infer::normalizing_infer_ctxt(bcx.tcx(),
+                                                  &bcx.tcx().tables,
+                                                  ProjectionMode::Any);
+        let mut visitor = euv::ExprUseVisitor::new(&mut rc, &infcx);
+        visitor.walk_expr(body);
+    }
+    rc.reassigned
+}
+
+struct ReassignmentChecker {
+    node: ast::NodeId,
+    field: Option<mc::FieldName>,
+    reassigned: bool
+}
+
+// Determine if the expression we're matching on is reassigned to within
+// the body of the match's arm.
+// We only care for the `mutate` callback since this check only matters
+// for cases where the matched value is moved.
+impl<'tcx> euv::Delegate<'tcx> for ReassignmentChecker {
+    fn consume(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: euv::ConsumeMode) {}
+    fn matched_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::MatchMode) {}
+    fn consume_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::ConsumeMode) {}
+    fn borrow(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: ty::Region,
+              _: ty::BorrowKind, _: euv::LoanCause) {}
+    fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {}
+
+    fn mutate(&mut self, _: ast::NodeId, _: Span, cmt: mc::cmt, _: euv::MutateMode) {
+        match cmt.cat {
+            Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) |
+            Categorization::Local(vid) => self.reassigned |= self.node == vid,
+            Categorization::Interior(ref base_cmt, mc::InteriorField(field)) => {
+                match base_cmt.cat {
+                    Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) |
+                    Categorization::Local(vid) => {
+                        self.reassigned |= self.node == vid &&
+                            (self.field.is_none() || Some(field) == self.field)
+                    },
+                    _ => {}
+                }
+            },
+            _ => {}
+        }
+    }
+}
+
+fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &hir::Pat,
+                                   discr: &hir::Expr, body: &hir::Expr)
+                                   -> BindingsMap<'tcx> {
+    // Create the bindings map, which is a mapping from each binding name
+    // to an alloca() that will be the value for that local variable.
+    // Note that we use the names because each binding will have many ids
+    // from the various alternatives.
+    let ccx = bcx.ccx();
+    let tcx = bcx.tcx();
+    let reassigned = is_discr_reassigned(bcx, discr, body);
+    let mut bindings_map = FnvHashMap();
+    pat_bindings(&tcx.def_map, &pat, |bm, p_id, span, path1| {
+        let name = path1.node;
+        let variable_ty = node_id_type(bcx, p_id);
+        let llvariable_ty = type_of::type_of(ccx, variable_ty);
+        let tcx = bcx.tcx();
+        let param_env = tcx.empty_parameter_environment();
+
+        let llmatch;
+        let trmode;
+        let moves_by_default = variable_ty.moves_by_default(&param_env, span);
+        match bm {
+            hir::BindByValue(_) if !moves_by_default || reassigned =>
+            {
+                llmatch = alloca(bcx, llvariable_ty.ptr_to(), "__llmatch");
+                let llcopy = alloca(bcx, llvariable_ty, &bcx.name(name));
+                trmode = if moves_by_default {
+                    TrByMoveIntoCopy(llcopy)
+                } else {
+                    TrByCopy(llcopy)
+                };
+            }
+            hir::BindByValue(_) => {
+                // in this case, the final type of the variable will be T,
+                // but during matching we need to store a *T as explained
+                // above
+                llmatch = alloca(bcx, llvariable_ty.ptr_to(), &bcx.name(name));
+                trmode = TrByMoveRef;
+            }
+            hir::BindByRef(_) => {
+                llmatch = alloca(bcx, llvariable_ty, &bcx.name(name));
+                trmode = TrByRef;
+            }
+        };
+        bindings_map.insert(name, BindingInfo {
+            llmatch: llmatch,
+            trmode: trmode,
+            id: p_id,
+            span: span,
+            ty: variable_ty
+        });
+    });
+    return bindings_map;
+}
+
+fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>,
+                                 match_id: ast::NodeId,
+                                 discr_expr: &hir::Expr,
+                                 arms: &[hir::Arm],
+                                 dest: Dest) -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("match::trans_match_inner");
+    let fcx = scope_cx.fcx;
+    let mut bcx = scope_cx;
+    let tcx = bcx.tcx();
+
+    let discr_datum = unpack_datum!(bcx, expr::trans_to_lvalue(bcx, discr_expr,
+                                                               "match"));
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    let t = node_id_type(bcx, discr_expr.id);
+    let chk = if t.is_empty(tcx) {
+        Unreachable
+    } else {
+        Infallible
+    };
+
+    let arm_datas: Vec<ArmData> = arms.iter().map(|arm| ArmData {
+        bodycx: fcx.new_id_block("case_body", arm.body.id),
+        arm: arm,
+        bindings_map: create_bindings_map(bcx, &arm.pats[0], discr_expr, &arm.body)
+    }).collect();
+
+    let mut pat_renaming_map = if scope_cx.sess().opts.debuginfo != NoDebugInfo {
+        Some(FnvHashMap())
+    } else {
+        None
+    };
+
+    let arm_pats: Vec<Vec<P<hir::Pat>>> = {
+        let mut static_inliner = StaticInliner::new(scope_cx.tcx(),
+                                                    pat_renaming_map.as_mut());
+        arm_datas.iter().map(|arm_data| {
+            arm_data.arm.pats.iter().map(|p| static_inliner.fold_pat((*p).clone())).collect()
+        }).collect()
+    };
+
+    let mut matches = Vec::new();
+    for (arm_data, pats) in arm_datas.iter().zip(&arm_pats) {
+        matches.extend(pats.iter().map(|p| Match {
+            pats: vec![&p],
+            data: arm_data,
+            bound_ptrs: Vec::new(),
+            pat_renaming_map: pat_renaming_map.as_ref()
+        }));
+    }
+
+    // `compile_submatch` works one column of arm patterns a time and
+    // then peels that column off. So as we progress, it may become
+    // impossible to tell whether we have a genuine default arm, i.e.
+    // `_ => foo` or not. Sometimes it is important to know that in order
+    // to decide whether moving on to the next condition or falling back
+    // to the default arm.
+    let has_default = arms.last().map_or(false, |arm| {
+        arm.pats.len() == 1
+        && arm.pats.last().unwrap().node == PatKind::Wild
+    });
+
+    compile_submatch(bcx, &matches[..], &[discr_datum.match_input()], &chk, has_default);
+
+    let mut arm_cxs = Vec::new();
+    for arm_data in &arm_datas {
+        let mut bcx = arm_data.bodycx;
+
+        // insert bindings into the lllocals map and add cleanups
+        let cs = fcx.push_custom_cleanup_scope();
+        bcx = insert_lllocals(bcx, &arm_data.bindings_map, Some(cleanup::CustomScope(cs)));
+        bcx = expr::trans_into(bcx, &arm_data.arm.body, dest);
+        bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, cs);
+        arm_cxs.push(bcx);
+    }
+
+    bcx = scope_cx.fcx.join_blocks(match_id, &arm_cxs[..]);
+    return bcx;
+}
+
+/// Generates code for a local variable declaration like `let <pat>;` or `let <pat> =
+/// <opt_init_expr>`.
+pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               local: &hir::Local)
+                               -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("match::store_local");
+    let mut bcx = bcx;
+    let tcx = bcx.tcx();
+    let pat = &local.pat;
+
+    fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+                                       pat: &hir::Pat)
+                                       -> Block<'blk, 'tcx> {
+        let _icx = push_ctxt("create_dummy_locals");
+        // create dummy memory for the variables if we have no
+        // value to store into them immediately
+        let tcx = bcx.tcx();
+        pat_bindings(&tcx.def_map, pat, |_, p_id, _, path1| {
+            let scope = cleanup::var_scope(tcx, p_id);
+            bcx = mk_binding_alloca(
+                bcx, p_id, path1.node, scope, (),
+                "_match::store_local::create_dummy_locals",
+                |(), bcx, Datum { val: llval, ty, kind }| {
+                    // Dummy-locals start out uninitialized, so set their
+                    // drop-flag hints (if any) to "moved."
+                    if let Some(hint) = kind.dropflag_hint(bcx) {
+                        let moved_hint = adt::DTOR_MOVED_HINT;
+                        debug!("store moved_hint={} for hint={:?}, uninitialized dummy",
+                               moved_hint, hint);
+                        Store(bcx, C_u8(bcx.fcx.ccx, moved_hint), hint.to_value().value());
+                    }
+
+                    if kind.drop_flag_info.must_zero() {
+                        // if no drop-flag hint, or the hint requires
+                        // we maintain the embedded drop-flag, then
+                        // mark embedded drop-flag(s) as moved
+                        // (i.e. "already dropped").
+                        drop_done_fill_mem(bcx, llval, ty);
+                    }
+                    bcx
+                });
+        });
+        bcx
+    }
+
+    match local.init {
+        Some(ref init_expr) => {
+            // Optimize the "let x = expr" case. This just writes
+            // the result of evaluating `expr` directly into the alloca
+            // for `x`. Often the general path results in similar or the
+            // same code post-optimization, but not always. In particular,
+            // in unsafe code, you can have expressions like
+            //
+            //    let x = intrinsics::uninit();
+            //
+            // In such cases, the more general path is unsafe, because
+            // it assumes it is matching against a valid value.
+            match simple_name(pat) {
+                Some(name) => {
+                    let var_scope = cleanup::var_scope(tcx, local.id);
+                    return mk_binding_alloca(
+                        bcx, pat.id, name, var_scope, (),
+                        "_match::store_local",
+                        |(), bcx, Datum { val: v, .. }| expr::trans_into(bcx, &init_expr,
+                                                                         expr::SaveIn(v)));
+                }
+
+                None => {}
+            }
+
+            // General path.
+            let init_datum =
+                unpack_datum!(bcx, expr::trans_to_lvalue(bcx, &init_expr, "let"));
+            if bcx.sess().asm_comments() {
+                add_comment(bcx, "creating zeroable ref llval");
+            }
+            let var_scope = cleanup::var_scope(tcx, local.id);
+            bind_irrefutable_pat(bcx, pat, init_datum.match_input(), var_scope)
+        }
+        None => {
+            create_dummy_locals(bcx, pat)
+        }
+    }
+}
+
+fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>,
+                                       p_id: ast::NodeId,
+                                       name: ast::Name,
+                                       cleanup_scope: cleanup::ScopeId,
+                                       arg: A,
+                                       caller_name: &'static str,
+                                       populate: F)
+                                       -> Block<'blk, 'tcx> where
+    F: FnOnce(A, Block<'blk, 'tcx>, Datum<'tcx, Lvalue>) -> Block<'blk, 'tcx>,
+{
+    let var_ty = node_id_type(bcx, p_id);
+
+    // Allocate memory on stack for the binding.
+    let llval = alloc_ty(bcx, var_ty, &bcx.name(name));
+    let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse);
+    let datum = Datum::new(llval, var_ty, lvalue);
+
+    debug!("mk_binding_alloca cleanup_scope={:?} llval={:?} var_ty={:?}",
+           cleanup_scope, Value(llval), var_ty);
+
+    // Subtle: be sure that we *populate* the memory *before*
+    // we schedule the cleanup.
+    call_lifetime_start(bcx, llval);
+    let bcx = populate(arg, bcx, datum);
+    bcx.fcx.schedule_lifetime_end(cleanup_scope, llval);
+    bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty, lvalue.dropflag_hint(bcx));
+
+    // Now that memory is initialized and has cleanup scheduled,
+    // insert datum into the local variable map.
+    bcx.fcx.lllocals.borrow_mut().insert(p_id, datum);
+    bcx
+}
+
+/// A simple version of the pattern matching code that only handles
+/// irrefutable patterns. This is used in let/argument patterns,
+/// not in match statements. Unifying this code with the code above
+/// sounds nice, but in practice it produces very inefficient code,
+/// since the match code is so much more general. In most cases,
+/// LLVM is able to optimize the code, but it causes longer compile
+/// times and makes the generated code nigh impossible to read.
+///
+/// # Arguments
+/// - bcx: starting basic block context
+/// - pat: the irrefutable pattern being matched.
+/// - val: the value being matched -- must be an lvalue (by ref, with cleanup)
+pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                    pat: &hir::Pat,
+                                    val: MatchInput,
+                                    cleanup_scope: cleanup::ScopeId)
+                                    -> Block<'blk, 'tcx> {
+    debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={:?})",
+           bcx.to_str(), pat, val);
+
+    if bcx.sess().asm_comments() {
+        add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})",
+                                 pat));
+    }
+
+    let _indenter = indenter();
+
+    let _icx = push_ctxt("match::bind_irrefutable_pat");
+    let mut bcx = bcx;
+    let tcx = bcx.tcx();
+    let ccx = bcx.ccx();
+    match pat.node {
+        PatKind::Ident(pat_binding_mode, ref path1, ref inner) => {
+            if pat_is_binding(&tcx.def_map.borrow(), &pat) {
+                // Allocate the stack slot where the value of this
+                // binding will live and place it into the appropriate
+                // map.
+                bcx = mk_binding_alloca(
+                    bcx, pat.id, path1.node.name, cleanup_scope, (),
+                    "_match::bind_irrefutable_pat",
+                    |(), bcx, Datum { val: llval, ty, kind: _ }| {
+                        match pat_binding_mode {
+                            hir::BindByValue(_) => {
+                                // By value binding: move the value that `val`
+                                // points at into the binding's stack slot.
+                                let d = val.to_datum(ty);
+                                d.store_to(bcx, llval)
+                            }
+
+                            hir::BindByRef(_) => {
+                                // By ref binding: the value of the variable
+                                // is the pointer `val` itself or fat pointer referenced by `val`
+                                if type_is_fat_ptr(bcx.tcx(), ty) {
+                                    expr::copy_fat_ptr(bcx, val.val, llval);
+                                }
+                                else {
+                                    Store(bcx, val.val, llval);
+                                }
+
+                                bcx
+                            }
+                        }
+                    });
+            }
+
+            if let Some(ref inner_pat) = *inner {
+                bcx = bind_irrefutable_pat(bcx, &inner_pat, val, cleanup_scope);
+            }
+        }
+        PatKind::TupleStruct(_, ref sub_pats) => {
+            let opt_def = bcx.tcx().def_map.borrow().get(&pat.id).map(|d| d.full_def());
+            match opt_def {
+                Some(Def::Variant(enum_id, var_id)) => {
+                    let repr = adt::represent_node(bcx, pat.id);
+                    let vinfo = ccx.tcx().lookup_adt_def(enum_id).variant_with_id(var_id);
+                    let args = extract_variant_args(bcx,
+                                                    &repr,
+                                                    Disr::from(vinfo.disr_val),
+                                                    val);
+                    if let Some(ref sub_pat) = *sub_pats {
+                        for (i, &argval) in args.vals.iter().enumerate() {
+                            bcx = bind_irrefutable_pat(
+                                bcx,
+                                &sub_pat[i],
+                                MatchInput::from_val(argval),
+                                cleanup_scope);
+                        }
+                    }
+                }
+                Some(Def::Struct(..)) => {
+                    match *sub_pats {
+                        None => {
+                            // This is a unit-like struct. Nothing to do here.
+                        }
+                        Some(ref elems) => {
+                            // This is the tuple struct case.
+                            let repr = adt::represent_node(bcx, pat.id);
+                            let val = adt::MaybeSizedValue::sized(val.val);
+                            for (i, elem) in elems.iter().enumerate() {
+                                let fldptr = adt::trans_field_ptr(bcx, &repr,
+                                                                  val, Disr(0), i);
+                                bcx = bind_irrefutable_pat(
+                                    bcx,
+                                    &elem,
+                                    MatchInput::from_val(fldptr),
+                                    cleanup_scope);
+                            }
+                        }
+                    }
+                }
+                _ => {
+                    // Nothing to do here.
+                }
+            }
+        }
+        PatKind::Struct(_, ref fields, _) => {
+            let tcx = bcx.tcx();
+            let pat_ty = node_id_type(bcx, pat.id);
+            let pat_repr = adt::represent_type(bcx.ccx(), pat_ty);
+            let pat_v = VariantInfo::of_node(tcx, pat_ty, pat.id);
+
+            let val = if type_is_sized(tcx, pat_ty) {
+                adt::MaybeSizedValue::sized(val.val)
+            } else {
+                let data = Load(bcx, expr::get_dataptr(bcx, val.val));
+                let meta = Load(bcx, expr::get_meta(bcx, val.val));
+                adt::MaybeSizedValue::unsized_(data, meta)
+            };
+
+            for f in fields {
+                let name = f.node.name;
+                let field_idx = pat_v.field_index(name);
+                let mut fldptr = adt::trans_field_ptr(
+                    bcx,
+                    &pat_repr,
+                    val,
+                    pat_v.discr,
+                    field_idx);
+
+                let fty = pat_v.fields[field_idx].1;
+                // If it's not sized, then construct a fat pointer instead of
+                // a regular one
+                if !type_is_sized(tcx, fty) {
+                    let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr");
+                    debug!("Creating fat pointer {:?}", Value(scratch));
+                    Store(bcx, fldptr, expr::get_dataptr(bcx, scratch));
+                    Store(bcx, val.meta, expr::get_meta(bcx, scratch));
+                    fldptr = scratch;
+                }
+                bcx = bind_irrefutable_pat(bcx,
+                                           &f.node.pat,
+                                           MatchInput::from_val(fldptr),
+                                           cleanup_scope);
+            }
+        }
+        PatKind::Tup(ref elems) => {
+            let repr = adt::represent_node(bcx, pat.id);
+            let val = adt::MaybeSizedValue::sized(val.val);
+            for (i, elem) in elems.iter().enumerate() {
+                let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i);
+                bcx = bind_irrefutable_pat(
+                    bcx,
+                    &elem,
+                    MatchInput::from_val(fldptr),
+                    cleanup_scope);
+            }
+        }
+        PatKind::Box(ref inner) => {
+            let pat_ty = node_id_type(bcx, inner.id);
+            // Pass along DSTs as fat pointers.
+            let val = if type_is_fat_ptr(tcx, pat_ty) {
+                // We need to check for this, as the pattern could be binding
+                // a fat pointer by-value.
+                if let PatKind::Ident(hir::BindByRef(_),_,_) = inner.node {
+                    val.val
+                } else {
+                    Load(bcx, val.val)
+                }
+            } else if type_is_sized(tcx, pat_ty) {
+                Load(bcx, val.val)
+            } else {
+                val.val
+            };
+            bcx = bind_irrefutable_pat(
+                bcx, &inner, MatchInput::from_val(val), cleanup_scope);
+        }
+        PatKind::Ref(ref inner, _) => {
+            let pat_ty = node_id_type(bcx, inner.id);
+            // Pass along DSTs as fat pointers.
+            let val = if type_is_fat_ptr(tcx, pat_ty) {
+                // We need to check for this, as the pattern could be binding
+                // a fat pointer by-value.
+                if let PatKind::Ident(hir::BindByRef(_),_,_) = inner.node {
+                    val.val
+                } else {
+                    Load(bcx, val.val)
+                }
+            } else if type_is_sized(tcx, pat_ty) {
+                Load(bcx, val.val)
+            } else {
+                val.val
+            };
+            bcx = bind_irrefutable_pat(
+                bcx,
+                &inner,
+                MatchInput::from_val(val),
+                cleanup_scope);
+        }
+        PatKind::Vec(ref before, ref slice, ref after) => {
+            let pat_ty = node_id_type(bcx, pat.id);
+            let mut extracted = extract_vec_elems(bcx, pat_ty, before.len(), after.len(), val);
+            match slice {
+                &Some(_) => {
+                    extracted.vals.insert(
+                        before.len(),
+                        bind_subslice_pat(bcx, pat.id, val, before.len(), after.len())
+                    );
+                }
+                &None => ()
+            }
+            bcx = before
+                .iter()
+                .chain(slice.iter())
+                .chain(after.iter())
+                .zip(extracted.vals)
+                .fold(bcx, |bcx, (inner, elem)| {
+                    bind_irrefutable_pat(
+                        bcx,
+                        &inner,
+                        MatchInput::from_val(elem),
+                        cleanup_scope)
+                });
+        }
+        PatKind::Path(..) | PatKind::QPath(..) | PatKind::Wild | PatKind::Lit(_) |
+        PatKind::Range(_, _) => ()
+    }
+    return bcx;
+}
diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs
new file mode 100644 (file)
index 0000000..9bbe0cb
--- /dev/null
@@ -0,0 +1,546 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::{self, ValueRef};
+use base;
+use builder::Builder;
+use common::{type_is_fat_ptr, BlockAndBuilder};
+use context::CrateContext;
+use cabi_x86;
+use cabi_x86_64;
+use cabi_x86_win64;
+use cabi_arm;
+use cabi_aarch64;
+use cabi_powerpc;
+use cabi_powerpc64;
+use cabi_mips;
+use cabi_asmjs;
+use machine::{llalign_of_min, llsize_of, llsize_of_real};
+use type_::Type;
+use type_of;
+
+use rustc::hir;
+use rustc::ty::{self, Ty};
+
+use libc::c_uint;
+
+pub use syntax::abi::Abi;
+pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+enum ArgKind {
+    /// Pass the argument directly using the normal converted
+    /// LLVM type or by coercing to another specified type
+    Direct,
+    /// Pass the argument indirectly via a hidden pointer
+    Indirect,
+    /// Ignore the argument (useful for empty struct)
+    Ignore,
+}
+
+/// Information about how a specific C type
+/// should be passed to or returned from a function
+///
+/// This is borrowed from clang's ABIInfo.h
+#[derive(Clone, Copy, Debug)]
+pub struct ArgType {
+    kind: ArgKind,
+    /// Original LLVM type
+    pub original_ty: Type,
+    /// Sizing LLVM type (pointers are opaque).
+    /// Unlike original_ty, this is guaranteed to be complete.
+    ///
+    /// For example, while we're computing the function pointer type in
+    /// `struct Foo(fn(Foo));`, `original_ty` is still LLVM's `%Foo = {}`.
+    /// The field type will likely end up being `void(%Foo)*`, but we cannot
+    /// use `%Foo` to compute properties (e.g. size and alignment) of `Foo`,
+    /// until `%Foo` is completed by having all of its field types inserted,
+    /// so `ty` holds the "sizing type" of `Foo`, which replaces all pointers
+    /// with opaque ones, resulting in `{i8*}` for `Foo`.
+    /// ABI-specific logic can then look at the size, alignment and fields of
+    /// `{i8*}` in order to determine how the argument will be passed.
+    /// Only later will `original_ty` aka `%Foo` be used in the LLVM function
+    /// pointer type, without ever having introspected it.
+    pub ty: Type,
+    /// Signedness for integer types, None for other types
+    pub signedness: Option<bool>,
+    /// Coerced LLVM Type
+    pub cast: Option<Type>,
+    /// Dummy argument, which is emitted before the real argument
+    pub pad: Option<Type>,
+    /// LLVM attributes of argument
+    pub attrs: llvm::Attributes
+}
+
+impl ArgType {
+    fn new(original_ty: Type, ty: Type) -> ArgType {
+        ArgType {
+            kind: ArgKind::Direct,
+            original_ty: original_ty,
+            ty: ty,
+            signedness: None,
+            cast: None,
+            pad: None,
+            attrs: llvm::Attributes::default()
+        }
+    }
+
+    pub fn make_indirect(&mut self, ccx: &CrateContext) {
+        assert_eq!(self.kind, ArgKind::Direct);
+
+        // Wipe old attributes, likely not valid through indirection.
+        self.attrs = llvm::Attributes::default();
+
+        let llarg_sz = llsize_of_real(ccx, self.ty);
+
+        // For non-immediate arguments the callee gets its own copy of
+        // the value on the stack, so there are no aliases. It's also
+        // program-invisible so can't possibly capture
+        self.attrs.set(llvm::Attribute::NoAlias)
+                  .set(llvm::Attribute::NoCapture)
+                  .set_dereferenceable(llarg_sz);
+
+        self.kind = ArgKind::Indirect;
+    }
+
+    pub fn ignore(&mut self) {
+        assert_eq!(self.kind, ArgKind::Direct);
+        self.kind = ArgKind::Ignore;
+    }
+
+    pub fn extend_integer_width_to(&mut self, bits: u64) {
+        // Only integers have signedness
+        if let Some(signed) = self.signedness {
+            if self.ty.int_width() < bits {
+                self.attrs.set(if signed {
+                    llvm::Attribute::SExt
+                } else {
+                    llvm::Attribute::ZExt
+                });
+            }
+        }
+    }
+
+    pub fn is_indirect(&self) -> bool {
+        self.kind == ArgKind::Indirect
+    }
+
+    pub fn is_ignore(&self) -> bool {
+        self.kind == ArgKind::Ignore
+    }
+
+    /// Get the LLVM type for an lvalue of the original Rust type of
+    /// this argument/return, i.e. the result of `type_of::type_of`.
+    pub fn memory_ty(&self, ccx: &CrateContext) -> Type {
+        if self.original_ty == Type::i1(ccx) {
+            Type::i8(ccx)
+        } else {
+            self.original_ty
+        }
+    }
+
+    /// Store a direct/indirect value described by this ArgType into a
+    /// lvalue for the original Rust type of this argument/return.
+    /// Can be used for both storing formal arguments into Rust variables
+    /// or results of call/invoke instructions into their destinations.
+    pub fn store(&self, b: &Builder, mut val: ValueRef, dst: ValueRef) {
+        if self.is_ignore() {
+            return;
+        }
+        if self.is_indirect() {
+            let llsz = llsize_of(b.ccx, self.ty);
+            let llalign = llalign_of_min(b.ccx, self.ty);
+            base::call_memcpy(b, dst, val, llsz, llalign as u32);
+        } else if let Some(ty) = self.cast {
+            let cast_dst = b.pointercast(dst, ty.ptr_to());
+            let store = b.store(val, cast_dst);
+            let llalign = llalign_of_min(b.ccx, self.ty);
+            unsafe {
+                llvm::LLVMSetAlignment(store, llalign);
+            }
+        } else {
+            if self.original_ty == Type::i1(b.ccx) {
+                val = b.zext(val, Type::i8(b.ccx));
+            }
+            b.store(val, dst);
+        }
+    }
+
+    pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) {
+        if self.pad.is_some() {
+            *idx += 1;
+        }
+        if self.is_ignore() {
+            return;
+        }
+        let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint);
+        *idx += 1;
+        self.store(bcx, val, dst);
+    }
+}
+
+/// Metadata describing how the arguments to a native function
+/// should be passed in order to respect the native ABI.
+///
+/// I will do my best to describe this structure, but these
+/// comments are reverse-engineered and may be inaccurate. -NDM
+pub struct FnType {
+    /// The LLVM types of each argument.
+    pub args: Vec<ArgType>,
+
+    /// LLVM return type.
+    pub ret: ArgType,
+
+    pub variadic: bool,
+
+    pub cconv: llvm::CallConv
+}
+
+impl FnType {
+    pub fn new<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                         abi: Abi,
+                         sig: &ty::FnSig<'tcx>,
+                         extra_args: &[Ty<'tcx>]) -> FnType {
+        let mut fn_ty = FnType::unadjusted(ccx, abi, sig, extra_args);
+        fn_ty.adjust_for_abi(ccx, abi, sig);
+        fn_ty
+    }
+
+    pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                abi: Abi,
+                                sig: &ty::FnSig<'tcx>,
+                                extra_args: &[Ty<'tcx>]) -> FnType {
+        use self::Abi::*;
+        let cconv = match ccx.sess().target.target.adjust_abi(abi) {
+            RustIntrinsic | PlatformIntrinsic |
+            Rust | RustCall => llvm::CCallConv,
+
+            // It's the ABI's job to select this, not us.
+            System => bug!("system abi should be selected elsewhere"),
+
+            Stdcall => llvm::X86StdcallCallConv,
+            Fastcall => llvm::X86FastcallCallConv,
+            Vectorcall => llvm::X86_VectorCall,
+            C => llvm::CCallConv,
+            Win64 => llvm::X86_64_Win64,
+
+            // These API constants ought to be more specific...
+            Cdecl => llvm::CCallConv,
+            Aapcs => llvm::CCallConv,
+        };
+
+        let mut inputs = &sig.inputs[..];
+        let extra_args = if abi == RustCall {
+            assert!(!sig.variadic && extra_args.is_empty());
+
+            match inputs[inputs.len() - 1].sty {
+                ty::TyTuple(ref tupled_arguments) => {
+                    inputs = &inputs[..inputs.len() - 1];
+                    &tupled_arguments[..]
+                }
+                _ => {
+                    bug!("argument to function with \"rust-call\" ABI \
+                          is not a tuple");
+                }
+            }
+        } else {
+            assert!(sig.variadic || extra_args.is_empty());
+            extra_args
+        };
+
+        let target = &ccx.sess().target.target;
+        let win_x64_gnu = target.target_os == "windows"
+                       && target.arch == "x86_64"
+                       && target.target_env == "gnu";
+        let rust_abi = match abi {
+            RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
+            _ => false
+        };
+
+        let arg_of = |ty: Ty<'tcx>, is_return: bool| {
+            if ty.is_bool() {
+                let llty = Type::i1(ccx);
+                let mut arg = ArgType::new(llty, llty);
+                arg.attrs.set(llvm::Attribute::ZExt);
+                arg
+            } else {
+                let mut arg = ArgType::new(type_of::type_of(ccx, ty),
+                                           type_of::sizing_type_of(ccx, ty));
+                if ty.is_integral() {
+                    arg.signedness = Some(ty.is_signed());
+                }
+                if llsize_of_real(ccx, arg.ty) == 0 {
+                    // For some forsaken reason, x86_64-pc-windows-gnu
+                    // doesn't ignore zero-sized struct arguments.
+                    if is_return || rust_abi || !win_x64_gnu {
+                        arg.ignore();
+                    }
+                }
+                arg
+            }
+        };
+
+        let ret_ty = match sig.output {
+            ty::FnConverging(ret_ty) => ret_ty,
+            ty::FnDiverging => ccx.tcx().mk_nil()
+        };
+        let mut ret = arg_of(ret_ty, true);
+
+        if !type_is_fat_ptr(ccx.tcx(), ret_ty) {
+            // The `noalias` attribute on the return value is useful to a
+            // function ptr caller.
+            if let ty::TyBox(_) = ret_ty.sty {
+                // `Box` pointer return values never alias because ownership
+                // is transferred
+                ret.attrs.set(llvm::Attribute::NoAlias);
+            }
+
+            // We can also mark the return value as `dereferenceable` in certain cases
+            match ret_ty.sty {
+                // These are not really pointers but pairs, (pointer, len)
+                ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
+                ty::TyBox(ty) => {
+                    let llty = type_of::sizing_type_of(ccx, ty);
+                    let llsz = llsize_of_real(ccx, llty);
+                    ret.attrs.set_dereferenceable(llsz);
+                }
+                _ => {}
+            }
+        }
+
+        let mut args = Vec::with_capacity(inputs.len() + extra_args.len());
+
+        // Handle safe Rust thin and fat pointers.
+        let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty {
+            // `Box` pointer parameters never alias because ownership is transferred
+            ty::TyBox(inner) => {
+                arg.attrs.set(llvm::Attribute::NoAlias);
+                Some(inner)
+            }
+
+            ty::TyRef(b, mt) => {
+                use rustc::ty::{BrAnon, ReLateBound};
+
+                // `&mut` pointer parameters never alias other parameters, or mutable global data
+                //
+                // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
+                // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
+                // on memory dependencies rather than pointer equality
+                let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();
+
+                if mt.mutbl != hir::MutMutable && !interior_unsafe {
+                    arg.attrs.set(llvm::Attribute::NoAlias);
+                }
+
+                if mt.mutbl == hir::MutImmutable && !interior_unsafe {
+                    arg.attrs.set(llvm::Attribute::ReadOnly);
+                }
+
+                // When a reference in an argument has no named lifetime, it's
+                // impossible for that reference to escape this function
+                // (returned or stored beyond the call by a closure).
+                if let ReLateBound(_, BrAnon(_)) = *b {
+                    arg.attrs.set(llvm::Attribute::NoCapture);
+                }
+
+                Some(mt.ty)
+            }
+            _ => None
+        };
+
+        for ty in inputs.iter().chain(extra_args.iter()) {
+            let mut arg = arg_of(ty, false);
+
+            if type_is_fat_ptr(ccx.tcx(), ty) {
+                let original_tys = arg.original_ty.field_types();
+                let sizing_tys = arg.ty.field_types();
+                assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2));
+
+                let mut data = ArgType::new(original_tys[0], sizing_tys[0]);
+                let mut info = ArgType::new(original_tys[1], sizing_tys[1]);
+
+                if let Some(inner) = rust_ptr_attrs(ty, &mut data) {
+                    data.attrs.set(llvm::Attribute::NonNull);
+                    if ccx.tcx().struct_tail(inner).is_trait() {
+                        info.attrs.set(llvm::Attribute::NonNull);
+                    }
+                }
+                args.push(data);
+                args.push(info);
+            } else {
+                if let Some(inner) = rust_ptr_attrs(ty, &mut arg) {
+                    let llty = type_of::sizing_type_of(ccx, inner);
+                    let llsz = llsize_of_real(ccx, llty);
+                    arg.attrs.set_dereferenceable(llsz);
+                }
+                args.push(arg);
+            }
+        }
+
+        FnType {
+            args: args,
+            ret: ret,
+            variadic: sig.variadic,
+            cconv: cconv
+        }
+    }
+
+    pub fn adjust_for_abi<'a, 'tcx>(&mut self,
+                                    ccx: &CrateContext<'a, 'tcx>,
+                                    abi: Abi,
+                                    sig: &ty::FnSig<'tcx>) {
+        if abi == Abi::Rust || abi == Abi::RustCall ||
+           abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
+            let fixup = |arg: &mut ArgType| {
+                let mut llty = arg.ty;
+
+                // Replace newtypes with their inner-most type.
+                while llty.kind() == llvm::TypeKind::Struct {
+                    let inner = llty.field_types();
+                    if inner.len() != 1 {
+                        break;
+                    }
+                    llty = inner[0];
+                }
+
+                if !llty.is_aggregate() {
+                    // Scalars and vectors, always immediate.
+                    if llty != arg.ty {
+                        // Needs a cast as we've unpacked a newtype.
+                        arg.cast = Some(llty);
+                    }
+                    return;
+                }
+
+                let size = llsize_of_real(ccx, llty);
+                if size > llsize_of_real(ccx, ccx.int_type()) {
+                    arg.make_indirect(ccx);
+                } else if size > 0 {
+                    // We want to pass small aggregates as immediates, but using
+                    // a LLVM aggregate type for this leads to bad optimizations,
+                    // so we pick an appropriately sized integer type instead.
+                    arg.cast = Some(Type::ix(ccx, size * 8));
+                }
+            };
+            // Fat pointers are returned by-value.
+            if !self.ret.is_ignore() {
+                if !type_is_fat_ptr(ccx.tcx(), sig.output.unwrap()) {
+                    fixup(&mut self.ret);
+                }
+            }
+            for arg in &mut self.args {
+                if arg.is_ignore() { continue; }
+                fixup(arg);
+            }
+            if self.ret.is_indirect() {
+                self.ret.attrs.set(llvm::Attribute::StructRet);
+            }
+            return;
+        }
+
+        match &ccx.sess().target.target.arch[..] {
+            "x86" => cabi_x86::compute_abi_info(ccx, self),
+            "x86_64" => if ccx.sess().target.target.options.is_like_windows {
+                cabi_x86_win64::compute_abi_info(ccx, self);
+            } else {
+                cabi_x86_64::compute_abi_info(ccx, self);
+            },
+            "aarch64" => cabi_aarch64::compute_abi_info(ccx, self),
+            "arm" => {
+                let flavor = if ccx.sess().target.target.target_os == "ios" {
+                    cabi_arm::Flavor::Ios
+                } else {
+                    cabi_arm::Flavor::General
+                };
+                cabi_arm::compute_abi_info(ccx, self, flavor);
+            },
+            "mips" => cabi_mips::compute_abi_info(ccx, self),
+            "powerpc" => cabi_powerpc::compute_abi_info(ccx, self),
+            "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self),
+            "asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
+            a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
+        }
+
+        if self.ret.is_indirect() {
+            self.ret.attrs.set(llvm::Attribute::StructRet);
+        }
+    }
+
+    pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
+        let mut llargument_tys = Vec::new();
+
+        let llreturn_ty = if self.ret.is_ignore() {
+            Type::void(ccx)
+        } else if self.ret.is_indirect() {
+            llargument_tys.push(self.ret.original_ty.ptr_to());
+            Type::void(ccx)
+        } else {
+            self.ret.cast.unwrap_or(self.ret.original_ty)
+        };
+
+        for arg in &self.args {
+            if arg.is_ignore() {
+                continue;
+            }
+            // add padding
+            if let Some(ty) = arg.pad {
+                llargument_tys.push(ty);
+            }
+
+            let llarg_ty = if arg.is_indirect() {
+                arg.original_ty.ptr_to()
+            } else {
+                arg.cast.unwrap_or(arg.original_ty)
+            };
+
+            llargument_tys.push(llarg_ty);
+        }
+
+        if self.variadic {
+            Type::variadic_func(&llargument_tys, &llreturn_ty)
+        } else {
+            Type::func(&llargument_tys, &llreturn_ty)
+        }
+    }
+
+    pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
+        let mut i = if self.ret.is_indirect() { 1 } else { 0 };
+        if !self.ret.is_ignore() {
+            self.ret.attrs.apply_llfn(i, llfn);
+        }
+        i += 1;
+        for arg in &self.args {
+            if !arg.is_ignore() {
+                if arg.pad.is_some() { i += 1; }
+                arg.attrs.apply_llfn(i, llfn);
+                i += 1;
+            }
+        }
+    }
+
+    pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
+        let mut i = if self.ret.is_indirect() { 1 } else { 0 };
+        if !self.ret.is_ignore() {
+            self.ret.attrs.apply_callsite(i, callsite);
+        }
+        i += 1;
+        for arg in &self.args {
+            if !arg.is_ignore() {
+                if arg.pad.is_some() { i += 1; }
+                arg.attrs.apply_callsite(i, callsite);
+                i += 1;
+            }
+        }
+
+        if self.cconv != llvm::CCallConv {
+            llvm::SetInstructionCallConv(callsite, self.cconv);
+        }
+    }
+}
diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs
new file mode 100644 (file)
index 0000000..8922aa0
--- /dev/null
@@ -0,0 +1,1524 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! # Representation of Algebraic Data Types
+//!
+//! This module determines how to represent enums, structs, and tuples
+//! based on their monomorphized types; it is responsible both for
+//! choosing a representation and translating basic operations on
+//! values of those types.  (Note: exporting the representations for
+//! debuggers is handled in debuginfo.rs, not here.)
+//!
+//! Note that the interface treats everything as a general case of an
+//! enum, so structs/tuples/etc. have one pseudo-variant with
+//! discriminant 0; i.e., as if they were a univariant enum.
+//!
+//! Having everything in one place will enable improvements to data
+//! structure representation; possibilities include:
+//!
+//! - User-specified alignment (e.g., cacheline-aligning parts of
+//!   concurrently accessed data structures); LLVM can't represent this
+//!   directly, so we'd have to insert padding fields in any structure
+//!   that might contain one and adjust GEP indices accordingly.  See
+//!   issue #4578.
+//!
+//! - Store nested enums' discriminants in the same word.  Rather, if
+//!   some variants start with enums, and those enums representations
+//!   have unused alignment padding between discriminant and body, the
+//!   outer enum's discriminant can be stored there and those variants
+//!   can start at offset 0.  Kind of fancy, and might need work to
+//!   make copies of the inner enum type cooperate, but it could help
+//!   with `Option` or `Result` wrapped around another enum.
+//!
+//! - Tagged pointers would be neat, but given that any type can be
+//!   used unboxed and any field can have pointers (including mutable)
+//!   taken to it, implementing them for Rust seems difficult.
+
+pub use self::Repr::*;
+use super::Disr;
+
+use std;
+use std::rc::Rc;
+
+use llvm::{ValueRef, True, IntEQ, IntNE};
+use rustc::ty::subst;
+use rustc::ty::{self, Ty, TyCtxt};
+use syntax::ast;
+use syntax::attr;
+use syntax::attr::IntType;
+use _match;
+use abi::FAT_PTR_ADDR;
+use base::InitAlloca;
+use build::*;
+use cleanup;
+use cleanup::CleanupMethods;
+use common::*;
+use datum;
+use debuginfo::DebugLoc;
+use glue;
+use machine;
+use monomorphize;
+use type_::Type;
+use type_of;
+use value::Value;
+
+type Hint = attr::ReprAttr;
+
+// Representation of the context surrounding an unsized type. I want
+// to be able to track the drop flags that are injected by trans.
+#[derive(Clone, Copy, PartialEq, Debug)]
+pub struct TypeContext {
+    prefix: Type,
+    needs_drop_flag: bool,
+}
+
+impl TypeContext {
+    pub fn prefix(&self) -> Type { self.prefix }
+    pub fn needs_drop_flag(&self) -> bool { self.needs_drop_flag }
+
+    fn direct(t: Type) -> TypeContext {
+        TypeContext { prefix: t, needs_drop_flag: false }
+    }
+    fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext {
+        TypeContext { prefix: t, needs_drop_flag: needs_drop_flag }
+    }
+}
+
+/// Representations.
+#[derive(Eq, PartialEq, Debug)]
+pub enum Repr<'tcx> {
+    /// C-like enums; basically an int.
+    CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType)
+    /// Single-case variants, and structs/tuples/records.
+    ///
+    /// Structs with destructors need a dynamic destroyedness flag to
+    /// avoid running the destructor too many times; this is included
+    /// in the `Struct` if present.
+    /// (The flag if nonzero, represents the initialization value to use;
+    ///  if zero, then use no flag at all.)
+    Univariant(Struct<'tcx>, u8),
+    /// General-case enums: for each case there is a struct, and they
+    /// all start with a field for the discriminant.
+    ///
+    /// Types with destructors need a dynamic destroyedness flag to
+    /// avoid running the destructor too many times; the last argument
+    /// indicates whether such a flag is present.
+    /// (The flag, if nonzero, represents the initialization value to use;
+    ///  if zero, then use no flag at all.)
+    General(IntType, Vec<Struct<'tcx>>, u8),
+    /// Two cases distinguished by a nullable pointer: the case with discriminant
+    /// `nndiscr` must have single field which is known to be nonnull due to its type.
+    /// The other case is known to be zero sized. Hence we represent the enum
+    /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
+    /// otherwise it indicates the other case.
+    RawNullablePointer {
+        nndiscr: Disr,
+        nnty: Ty<'tcx>,
+        nullfields: Vec<Ty<'tcx>>
+    },
+    /// Two cases distinguished by a nullable pointer: the case with discriminant
+    /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
+    /// field is known to be nonnull due to its type; if that field is null, then
+    /// it represents the other case, which is inhabited by at most one value
+    /// (and all other fields are undefined/unused).
+    ///
+    /// For example, `std::option::Option` instantiated at a safe pointer type
+    /// is represented such that `None` is a null pointer and `Some` is the
+    /// identity function.
+    StructWrappedNullablePointer {
+        nonnull: Struct<'tcx>,
+        nndiscr: Disr,
+        discrfield: DiscrField,
+        nullfields: Vec<Ty<'tcx>>,
+    }
+}
+
+/// For structs, and struct-like parts of anything fancier.
+#[derive(Eq, PartialEq, Debug)]
+pub struct Struct<'tcx> {
+    // If the struct is DST, then the size and alignment do not take into
+    // account the unsized fields of the struct.
+    pub size: u64,
+    pub align: u32,
+    pub sized: bool,
+    pub packed: bool,
+    pub fields: Vec<Ty<'tcx>>,
+}
+
+#[derive(Copy, Clone)]
+pub struct MaybeSizedValue {
+    pub value: ValueRef,
+    pub meta: ValueRef,
+}
+
+impl MaybeSizedValue {
+    pub fn sized(value: ValueRef) -> MaybeSizedValue {
+        MaybeSizedValue {
+            value: value,
+            meta: std::ptr::null_mut()
+        }
+    }
+
+    pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue {
+        MaybeSizedValue {
+            value: value,
+            meta: meta
+        }
+    }
+
+    pub fn has_meta(&self) -> bool {
+        !self.meta.is_null()
+    }
+}
+
+/// Convenience for `represent_type`.  There should probably be more or
+/// these, for places in trans where the `Ty` isn't directly
+/// available.
+pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                  node: ast::NodeId) -> Rc<Repr<'tcx>> {
+    represent_type(bcx.ccx(), node_id_type(bcx, node))
+}
+
+/// Decides how to represent a given type.
+pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                t: Ty<'tcx>)
+                                -> Rc<Repr<'tcx>> {
+    debug!("Representing: {}", t);
+    match cx.adt_reprs().borrow().get(&t) {
+        Some(repr) => return repr.clone(),
+        None => {}
+    }
+
+    let repr = Rc::new(represent_type_uncached(cx, t));
+    debug!("Represented as: {:?}", repr);
+    cx.adt_reprs().borrow_mut().insert(t, repr.clone());
+    repr
+}
+
+const fn repeat_u8_as_u32(val: u8) -> u32 {
+    (val as u32) << 24 | (val as u32) << 16 | (val as u32) << 8 | val as u32
+}
+
+const fn repeat_u8_as_u64(val: u8) -> u64 {
+    (repeat_u8_as_u32(val) as u64) << 32 | repeat_u8_as_u32(val) as u64
+}
+
+/// `DTOR_NEEDED_HINT` is a stack-local hint that just means
+/// "we do not know whether the destructor has run or not; check the
+/// drop-flag embedded in the value itself."
+pub const DTOR_NEEDED_HINT: u8 = 0x3d;
+
+/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has
+/// definitely been moved; you do not need to run its destructor."
+///
+/// (However, for now, such values may still end up being explicitly
+/// zeroed by the generated code; this is the distinction between
+/// `datum::DropFlagInfo::ZeroAndMaintain` versus
+/// `datum::DropFlagInfo::DontZeroJustUse`.)
+pub const DTOR_MOVED_HINT: u8 = 0x2d;
+
+pub const DTOR_NEEDED: u8 = 0xd4;
+#[allow(dead_code)]
+pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64(DTOR_NEEDED);
+
+pub const DTOR_DONE: u8 = 0x1d;
+#[allow(dead_code)]
+pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64(DTOR_DONE);
+
+fn dtor_to_init_u8(dtor: bool) -> u8 {
+    if dtor { DTOR_NEEDED } else { 0 }
+}
+
+pub trait GetDtorType<'tcx> { fn dtor_type(&self) -> Ty<'tcx>; }
+impl<'tcx> GetDtorType<'tcx> for TyCtxt<'tcx> {
+    fn dtor_type(&self) -> Ty<'tcx> { self.types.u8 }
+}
+
+fn dtor_active(flag: u8) -> bool {
+    flag != 0
+}
+
+fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                     t: Ty<'tcx>) -> Repr<'tcx> {
+    match t.sty {
+        ty::TyTuple(ref elems) => {
+            Univariant(mk_struct(cx, &elems[..], false, t), 0)
+        }
+        ty::TyStruct(def, substs) => {
+            let mut ftys = def.struct_variant().fields.iter().map(|field| {
+                monomorphize::field_ty(cx.tcx(), substs, field)
+            }).collect::<Vec<_>>();
+            let packed = cx.tcx().lookup_packed(def.did);
+            // FIXME(16758) don't add a drop flag to unsized structs, as it
+            // won't actually be in the location we say it is because it'll be after
+            // the unsized field. Several other pieces of code assume that the unsized
+            // field is definitely the last one.
+            let dtor = def.dtor_kind().has_drop_flag() && type_is_sized(cx.tcx(), t);
+            if dtor {
+                ftys.push(cx.tcx().dtor_type());
+            }
+
+            Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor))
+        }
+        ty::TyClosure(_, ref substs) => {
+            Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0)
+        }
+        ty::TyEnum(def, substs) => {
+            let cases = get_cases(cx.tcx(), def, substs);
+            let hint = *cx.tcx().lookup_repr_hints(def.did).get(0)
+                .unwrap_or(&attr::ReprAny);
+
+            let dtor = def.dtor_kind().has_drop_flag();
+
+            if cases.is_empty() {
+                // Uninhabitable; represent as unit
+                // (Typechecking will reject discriminant-sizing attrs.)
+                assert_eq!(hint, attr::ReprAny);
+                let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() };
+                return Univariant(mk_struct(cx, &ftys[..], false, t),
+                                  dtor_to_init_u8(dtor));
+            }
+
+            if !dtor && cases.iter().all(|c| c.tys.is_empty()) {
+                // All bodies empty -> intlike
+                let discrs: Vec<_> = cases.iter().map(|c| Disr::from(c.discr)).collect();
+                let bounds = IntBounds {
+                    ulo: discrs.iter().min().unwrap().0,
+                    uhi: discrs.iter().max().unwrap().0,
+                    slo: discrs.iter().map(|n| n.0 as i64).min().unwrap(),
+                    shi: discrs.iter().map(|n| n.0 as i64).max().unwrap()
+                };
+                return mk_cenum(cx, hint, &bounds);
+            }
+
+            // Since there's at least one
+            // non-empty body, explicit discriminants should have
+            // been rejected by a checker before this point.
+            if !cases.iter().enumerate().all(|(i,c)| c.discr == Disr::from(i)) {
+                bug!("non-C-like enum {} with specified discriminants",
+                     cx.tcx().item_path_str(def.did));
+            }
+
+            if cases.len() == 1 {
+                // Equivalent to a struct/tuple/newtype.
+                // (Typechecking will reject discriminant-sizing attrs.)
+                assert_eq!(hint, attr::ReprAny);
+                let mut ftys = cases[0].tys.clone();
+                if dtor { ftys.push(cx.tcx().dtor_type()); }
+                return Univariant(mk_struct(cx, &ftys[..], false, t),
+                                  dtor_to_init_u8(dtor));
+            }
+
+            if !dtor && cases.len() == 2 && hint == attr::ReprAny {
+                // Nullable pointer optimization
+                let mut discr = 0;
+                while discr < 2 {
+                    if cases[1 - discr].is_zerolen(cx, t) {
+                        let st = mk_struct(cx, &cases[discr].tys,
+                                           false, t);
+                        match cases[discr].find_ptr(cx) {
+                            Some(ref df) if df.len() == 1 && st.fields.len() == 1 => {
+                                return RawNullablePointer {
+                                    nndiscr: Disr::from(discr),
+                                    nnty: st.fields[0],
+                                    nullfields: cases[1 - discr].tys.clone()
+                                };
+                            }
+                            Some(mut discrfield) => {
+                                discrfield.push(0);
+                                discrfield.reverse();
+                                return StructWrappedNullablePointer {
+                                    nndiscr: Disr::from(discr),
+                                    nonnull: st,
+                                    discrfield: discrfield,
+                                    nullfields: cases[1 - discr].tys.clone()
+                                };
+                            }
+                            None => {}
+                        }
+                    }
+                    discr += 1;
+                }
+            }
+
+            // The general case.
+            assert!((cases.len() - 1) as i64 >= 0);
+            let bounds = IntBounds { ulo: 0, uhi: (cases.len() - 1) as u64,
+                                     slo: 0, shi: (cases.len() - 1) as i64 };
+            let min_ity = range_to_inttype(cx, hint, &bounds);
+
+            // Create the set of structs that represent each variant
+            // Use the minimum integer type we figured out above
+            let fields : Vec<_> = cases.iter().map(|c| {
+                let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity));
+                ftys.extend_from_slice(&c.tys);
+                if dtor { ftys.push(cx.tcx().dtor_type()); }
+                mk_struct(cx, &ftys, false, t)
+            }).collect();
+
+
+            // Check to see if we should use a different type for the
+            // discriminant. If the overall alignment of the type is
+            // the same as the first field in each variant, we can safely use
+            // an alignment-sized type.
+            // We increase the size of the discriminant to avoid LLVM copying
+            // padding when it doesn't need to. This normally causes unaligned
+            // load/stores and excessive memcpy/memset operations. By using a
+            // bigger integer size, LLVM can be sure about it's contents and
+            // won't be so conservative.
+            // This check is needed to avoid increasing the size of types when
+            // the alignment of the first field is smaller than the overall
+            // alignment of the type.
+            let (_, align) = union_size_and_align(&fields);
+            let mut use_align = true;
+            for st in &fields {
+                // Get the first non-zero-sized field
+                let field = st.fields.iter().skip(1).filter(|ty| {
+                    let t = type_of::sizing_type_of(cx, **ty);
+                    machine::llsize_of_real(cx, t) != 0 ||
+                    // This case is only relevant for zero-sized types with large alignment
+                    machine::llalign_of_min(cx, t) != 1
+                }).next();
+
+                if let Some(field) = field {
+                    let field_align = type_of::align_of(cx, *field);
+                    if field_align != align {
+                        use_align = false;
+                        break;
+                    }
+                }
+            }
+
+            // If the alignment is smaller than the chosen discriminant size, don't use the
+            // alignment as the final size.
+            let min_ty = ll_inttype(&cx, min_ity);
+            let min_size = machine::llsize_of_real(cx, min_ty);
+            if (align as u64) < min_size {
+                use_align = false;
+            }
+
+            let ity = if use_align {
+                // Use the overall alignment
+                match align {
+                    1 => attr::UnsignedInt(ast::UintTy::U8),
+                    2 => attr::UnsignedInt(ast::UintTy::U16),
+                    4 => attr::UnsignedInt(ast::UintTy::U32),
+                    8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 =>
+                        attr::UnsignedInt(ast::UintTy::U64),
+                    _ => min_ity // use min_ity as a fallback
+                }
+            } else {
+                min_ity
+            };
+
+            let fields : Vec<_> = cases.iter().map(|c| {
+                let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity));
+                ftys.extend_from_slice(&c.tys);
+                if dtor { ftys.push(cx.tcx().dtor_type()); }
+                mk_struct(cx, &ftys[..], false, t)
+            }).collect();
+
+            ensure_enum_fits_in_address_space(cx, &fields[..], t);
+
+            General(ity, fields, dtor_to_init_u8(dtor))
+        }
+        _ => bug!("adt::represent_type called on non-ADT type: {}", t)
+    }
+}
+
+// this should probably all be in ty
+struct Case<'tcx> {
+    discr: Disr,
+    tys: Vec<Ty<'tcx>>
+}
+
+/// This represents the (GEP) indices to follow to get to the discriminant field
+pub type DiscrField = Vec<usize>;
+
+fn find_discr_field_candidate<'tcx>(tcx: &TyCtxt<'tcx>,
+                                    ty: Ty<'tcx>,
+                                    mut path: DiscrField) -> Option<DiscrField> {
+    match ty.sty {
+        // Fat &T/&mut T/Box<T> i.e. T is [T], str, or Trait
+        ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => {
+            path.push(FAT_PTR_ADDR);
+            Some(path)
+        },
+
+        // Regular thin pointer: &T/&mut T/Box<T>
+        ty::TyRef(..) | ty::TyBox(..) => Some(path),
+
+        // Function pointer: `fn() -> i32`
+        ty::TyFnPtr(_) => Some(path),
+
+        // Is this the NonZero lang item wrapping a pointer or integer type?
+        ty::TyStruct(def, substs) if Some(def.did) == tcx.lang_items.non_zero() => {
+            let nonzero_fields = &def.struct_variant().fields;
+            assert_eq!(nonzero_fields.len(), 1);
+            let field_ty = monomorphize::field_ty(tcx, substs, &nonzero_fields[0]);
+            match field_ty.sty {
+                ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if !type_is_sized(tcx, ty) => {
+                    path.extend_from_slice(&[0, FAT_PTR_ADDR]);
+                    Some(path)
+                },
+                ty::TyRawPtr(..) | ty::TyInt(..) | ty::TyUint(..) => {
+                    path.push(0);
+                    Some(path)
+                },
+                _ => None
+            }
+        },
+
+        // Perhaps one of the fields of this struct is non-zero
+        // let's recurse and find out
+        ty::TyStruct(def, substs) => {
+            for (j, field) in def.struct_variant().fields.iter().enumerate() {
+                let field_ty = monomorphize::field_ty(tcx, substs, field);
+                if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) {
+                    fpath.push(j);
+                    return Some(fpath);
+                }
+            }
+            None
+        },
+
+        // Perhaps one of the upvars of this struct is non-zero
+        // Let's recurse and find out!
+        ty::TyClosure(_, ref substs) => {
+            for (j, &ty) in substs.upvar_tys.iter().enumerate() {
+                if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) {
+                    fpath.push(j);
+                    return Some(fpath);
+                }
+            }
+            None
+        },
+
+        // Can we use one of the fields in this tuple?
+        ty::TyTuple(ref tys) => {
+            for (j, &ty) in tys.iter().enumerate() {
+                if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) {
+                    fpath.push(j);
+                    return Some(fpath);
+                }
+            }
+            None
+        },
+
+        // Is this a fixed-size array of something non-zero
+        // with at least one element?
+        ty::TyArray(ety, d) if d > 0 => {
+            if let Some(mut vpath) = find_discr_field_candidate(tcx, ety, path) {
+                vpath.push(0);
+                Some(vpath)
+            } else {
+                None
+            }
+        },
+
+        // Anything else is not a pointer
+        _ => None
+    }
+}
+
+impl<'tcx> Case<'tcx> {
+    fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool {
+        mk_struct(cx, &self.tys, false, scapegoat).size == 0
+    }
+
+    fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option<DiscrField> {
+        for (i, &ty) in self.tys.iter().enumerate() {
+            if let Some(mut path) = find_discr_field_candidate(cx.tcx(), ty, vec![]) {
+                path.push(i);
+                return Some(path);
+            }
+        }
+        None
+    }
+}
+
+fn get_cases<'tcx>(tcx: &TyCtxt<'tcx>,
+                   adt: ty::AdtDef<'tcx>,
+                   substs: &subst::Substs<'tcx>)
+                   -> Vec<Case<'tcx>> {
+    adt.variants.iter().map(|vi| {
+        let field_tys = vi.fields.iter().map(|field| {
+            monomorphize::field_ty(tcx, substs, field)
+        }).collect();
+        Case { discr: Disr::from(vi.disr_val), tys: field_tys }
+    }).collect()
+}
+
+fn mk_struct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                       tys: &[Ty<'tcx>], packed: bool,
+                       scapegoat: Ty<'tcx>)
+                       -> Struct<'tcx> {
+    let sized = tys.iter().all(|&ty| type_is_sized(cx.tcx(), ty));
+    let lltys : Vec<Type> = if sized {
+        tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
+    } else {
+        tys.iter().filter(|&ty| type_is_sized(cx.tcx(), *ty))
+           .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
+    };
+
+    ensure_struct_fits_in_address_space(cx, &lltys[..], packed, scapegoat);
+
+    let llty_rec = Type::struct_(cx, &lltys[..], packed);
+    Struct {
+        size: machine::llsize_of_alloc(cx, llty_rec),
+        align: machine::llalign_of_min(cx, llty_rec),
+        sized: sized,
+        packed: packed,
+        fields: tys.to_vec(),
+    }
+}
+
+#[derive(Debug)]
+struct IntBounds {
+    slo: i64,
+    shi: i64,
+    ulo: u64,
+    uhi: u64
+}
+
+fn mk_cenum<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                      hint: Hint, bounds: &IntBounds)
+                      -> Repr<'tcx> {
+    let it = range_to_inttype(cx, hint, bounds);
+    match it {
+        attr::SignedInt(_) => CEnum(it, Disr(bounds.slo as u64), Disr(bounds.shi as u64)),
+        attr::UnsignedInt(_) => CEnum(it, Disr(bounds.ulo), Disr(bounds.uhi))
+    }
+}
+
+fn range_to_inttype(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> IntType {
+    debug!("range_to_inttype: {:?} {:?}", hint, bounds);
+    // Lists of sizes to try.  u64 is always allowed as a fallback.
+    #[allow(non_upper_case_globals)]
+    const choose_shortest: &'static [IntType] = &[
+        attr::UnsignedInt(ast::UintTy::U8), attr::SignedInt(ast::IntTy::I8),
+        attr::UnsignedInt(ast::UintTy::U16), attr::SignedInt(ast::IntTy::I16),
+        attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)];
+    #[allow(non_upper_case_globals)]
+    const at_least_32: &'static [IntType] = &[
+        attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)];
+
+    let attempts;
+    match hint {
+        attr::ReprInt(span, ity) => {
+            if !bounds_usable(cx, ity, bounds) {
+                span_bug!(span, "representation hint insufficient for discriminant range")
+            }
+            return ity;
+        }
+        attr::ReprExtern => {
+            attempts = match &cx.sess().target.target.arch[..] {
+                // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32`
+                // appears to be used on Linux and NetBSD, but some systems may use the variant
+                // corresponding to `choose_shortest`.  However, we don't run on those yet...?
+                "arm" => at_least_32,
+                _ => at_least_32,
+            }
+        }
+        attr::ReprAny => {
+            attempts = choose_shortest;
+        },
+        attr::ReprPacked => {
+            bug!("range_to_inttype: found ReprPacked on an enum");
+        }
+        attr::ReprSimd => {
+            bug!("range_to_inttype: found ReprSimd on an enum");
+        }
+    }
+    for &ity in attempts {
+        if bounds_usable(cx, ity, bounds) {
+            return ity;
+        }
+    }
+    return attr::UnsignedInt(ast::UintTy::U64);
+}
+
+pub fn ll_inttype(cx: &CrateContext, ity: IntType) -> Type {
+    match ity {
+        attr::SignedInt(t) => Type::int_from_ty(cx, t),
+        attr::UnsignedInt(t) => Type::uint_from_ty(cx, t)
+    }
+}
+
+fn bounds_usable(cx: &CrateContext, ity: IntType, bounds: &IntBounds) -> bool {
+    debug!("bounds_usable: {:?} {:?}", ity, bounds);
+    match ity {
+        attr::SignedInt(_) => {
+            let lllo = C_integral(ll_inttype(cx, ity), bounds.slo as u64, true);
+            let llhi = C_integral(ll_inttype(cx, ity), bounds.shi as u64, true);
+            bounds.slo == const_to_int(lllo) as i64 && bounds.shi == const_to_int(llhi) as i64
+        }
+        attr::UnsignedInt(_) => {
+            let lllo = C_integral(ll_inttype(cx, ity), bounds.ulo, false);
+            let llhi = C_integral(ll_inttype(cx, ity), bounds.uhi, false);
+            bounds.ulo == const_to_uint(lllo) as u64 && bounds.uhi == const_to_uint(llhi) as u64
+        }
+    }
+}
+
+pub fn ty_of_inttype<'tcx>(tcx: &TyCtxt<'tcx>, ity: IntType) -> Ty<'tcx> {
+    match ity {
+        attr::SignedInt(t) => tcx.mk_mach_int(t),
+        attr::UnsignedInt(t) => tcx.mk_mach_uint(t)
+    }
+}
+
+// LLVM doesn't like types that don't fit in the address space
+fn ensure_struct_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                                 fields: &[Type],
+                                                 packed: bool,
+                                                 scapegoat: Ty<'tcx>) {
+    let mut offset = 0;
+    for &llty in fields {
+        // Invariant: offset < ccx.obj_size_bound() <= 1<<61
+        if !packed {
+            let type_align = machine::llalign_of_min(ccx, llty);
+            offset = roundup(offset, type_align);
+        }
+        // type_align is a power-of-2, so still offset < ccx.obj_size_bound()
+        // llsize_of_alloc(ccx, llty) is also less than ccx.obj_size_bound()
+        // so the sum is less than 1<<62 (and therefore can't overflow).
+        offset += machine::llsize_of_alloc(ccx, llty);
+
+        if offset >= ccx.obj_size_bound() {
+            ccx.report_overbig_object(scapegoat);
+        }
+    }
+}
+
+fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) {
+    let size = sts.iter().map(|st| st.size).max().unwrap();
+    let align = sts.iter().map(|st| st.align).max().unwrap();
+    (roundup(size, align), align)
+}
+
+fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                               fields: &[Struct],
+                                               scapegoat: Ty<'tcx>) {
+    let (total_size, _) = union_size_and_align(fields);
+
+    if total_size >= ccx.obj_size_bound() {
+        ccx.report_overbig_object(scapegoat);
+    }
+}
+
+
+/// LLVM-level types are a little complicated.
+///
+/// C-like enums need to be actual ints, not wrapped in a struct,
+/// because that changes the ABI on some platforms (see issue #10308).
+///
+/// For nominal types, in some cases, we need to use LLVM named structs
+/// and fill in the actual contents in a second pass to prevent
+/// unbounded recursion; see also the comments in `trans::type_of`.
+pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type {
+    let c = generic_type_of(cx, r, None, false, false, false);
+    assert!(!c.needs_drop_flag);
+    c.prefix
+}
+
+
+// Pass dst=true if the type you are passing is a DST. Yes, we could figure
+// this out, but if you call this on an unsized type without realising it, you
+// are going to get the wrong type (it will not include the unsized parts of it).
+pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                r: &Repr<'tcx>, dst: bool) -> Type {
+    let c = generic_type_of(cx, r, None, true, dst, false);
+    assert!(!c.needs_drop_flag);
+    c.prefix
+}
+pub fn sizing_type_context_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                        r: &Repr<'tcx>, dst: bool) -> TypeContext {
+    generic_type_of(cx, r, None, true, dst, true)
+}
+pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                    r: &Repr<'tcx>, name: &str) -> Type {
+    let c = generic_type_of(cx, r, Some(name), false, false, false);
+    assert!(!c.needs_drop_flag);
+    c.prefix
+}
+pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                r: &Repr<'tcx>, llty: &mut Type) {
+    match *r {
+        CEnum(..) | General(..) | RawNullablePointer { .. } => { }
+        Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } =>
+            llty.set_struct_body(&struct_llfields(cx, st, false, false),
+                                 st.packed)
+    }
+}
+
+fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                             r: &Repr<'tcx>,
+                             name: Option<&str>,
+                             sizing: bool,
+                             dst: bool,
+                             delay_drop_flag: bool) -> TypeContext {
+    debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {} delay_drop_flag: {}",
+           r, name, sizing, dst, delay_drop_flag);
+    match *r {
+        CEnum(ity, _, _) => TypeContext::direct(ll_inttype(cx, ity)),
+        RawNullablePointer { nnty, .. } =>
+            TypeContext::direct(type_of::sizing_type_of(cx, nnty)),
+        StructWrappedNullablePointer { nonnull: ref st, .. } => {
+            match name {
+                None => {
+                    TypeContext::direct(
+                        Type::struct_(cx, &struct_llfields(cx, st, sizing, dst),
+                                      st.packed))
+                }
+                Some(name) => {
+                    assert_eq!(sizing, false);
+                    TypeContext::direct(Type::named_struct(cx, name))
+                }
+            }
+        }
+        Univariant(ref st, dtor_needed) => {
+            let dtor_needed = dtor_needed != 0;
+            match name {
+                None => {
+                    let mut fields = struct_llfields(cx, st, sizing, dst);
+                    if delay_drop_flag && dtor_needed {
+                        fields.pop();
+                    }
+                    TypeContext::may_need_drop_flag(
+                        Type::struct_(cx, &fields,
+                                      st.packed),
+                        delay_drop_flag && dtor_needed)
+                }
+                Some(name) => {
+                    // Hypothesis: named_struct's can never need a
+                    // drop flag. (... needs validation.)
+                    assert_eq!(sizing, false);
+                    TypeContext::direct(Type::named_struct(cx, name))
+                }
+            }
+        }
+        General(ity, ref sts, dtor_needed) => {
+            let dtor_needed = dtor_needed != 0;
+            // We need a representation that has:
+            // * The alignment of the most-aligned field
+            // * The size of the largest variant (rounded up to that alignment)
+            // * No alignment padding anywhere any variant has actual data
+            //   (currently matters only for enums small enough to be immediate)
+            // * The discriminant in an obvious place.
+            //
+            // So we start with the discriminant, pad it up to the alignment with
+            // more of its own type, then use alignment-sized ints to get the rest
+            // of the size.
+            //
+            // FIXME #10604: this breaks when vector types are present.
+            let (size, align) = union_size_and_align(&sts[..]);
+            let align_s = align as u64;
+            let discr_ty = ll_inttype(cx, ity);
+            let discr_size = machine::llsize_of_alloc(cx, discr_ty);
+            let padded_discr_size = roundup(discr_size, align);
+            assert_eq!(size % align_s, 0); // Ensure division in align_units comes out evenly
+            let align_units = (size - padded_discr_size) / align_s;
+            let fill_ty = match align_s {
+                1 => Type::array(&Type::i8(cx), align_units),
+                2 => Type::array(&Type::i16(cx), align_units),
+                4 => Type::array(&Type::i32(cx), align_units),
+                8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 =>
+                                 Type::array(&Type::i64(cx), align_units),
+                a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4),
+                                                              align_units),
+                _ => bug!("unsupported enum alignment: {}", align)
+            };
+            assert_eq!(machine::llalign_of_min(cx, fill_ty), align);
+            assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly
+            let mut fields: Vec<Type> =
+                [discr_ty,
+                 Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size),
+                 fill_ty].iter().cloned().collect();
+            if delay_drop_flag && dtor_needed {
+                fields.pop();
+            }
+            match name {
+                None => {
+                    TypeContext::may_need_drop_flag(
+                        Type::struct_(cx, &fields[..], false),
+                        delay_drop_flag && dtor_needed)
+                }
+                Some(name) => {
+                    let mut llty = Type::named_struct(cx, name);
+                    llty.set_struct_body(&fields[..], false);
+                    TypeContext::may_need_drop_flag(
+                        llty,
+                        delay_drop_flag && dtor_needed)
+                }
+            }
+        }
+    }
+}
+
+fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>,
+                             sizing: bool, dst: bool) -> Vec<Type> {
+    if sizing {
+        st.fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty))
+            .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
+    } else {
+        st.fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect()
+    }
+}
+
+/// Obtain a representation of the discriminant sufficient to translate
+/// destructuring; this may or may not involve the actual discriminant.
+///
+/// This should ideally be less tightly tied to `_match`.
+pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                r: &Repr<'tcx>,
+                                scrutinee: ValueRef,
+                                range_assert: bool)
+                                -> (_match::BranchKind, Option<ValueRef>) {
+    match *r {
+        CEnum(..) | General(..) |
+        RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
+            (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None,
+                                                  range_assert)))
+        }
+        Univariant(..) => {
+            // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants).
+            (_match::Single, None)
+        }
+    }
+}
+
+pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool {
+    match *r {
+        CEnum(ity, _, _) => ity.is_signed(),
+        General(ity, _, _) => ity.is_signed(),
+        Univariant(..) => false,
+        RawNullablePointer { .. } => false,
+        StructWrappedNullablePointer { .. } => false,
+    }
+}
+
+/// Obtain the actual discriminant of a value.
+pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
+                                   scrutinee: ValueRef, cast_to: Option<Type>,
+                                   range_assert: bool)
+    -> ValueRef {
+    debug!("trans_get_discr r: {:?}", r);
+    let val = match *r {
+        CEnum(ity, min, max) => {
+            load_discr(bcx, ity, scrutinee, min, max, range_assert)
+        }
+        General(ity, ref cases, _) => {
+            let ptr = StructGEP(bcx, scrutinee, 0);
+            load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1),
+                       range_assert)
+        }
+        Univariant(..) => C_u8(bcx.ccx(), 0),
+        RawNullablePointer { nndiscr, nnty, .. } =>  {
+            let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE };
+            let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
+            ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None)
+        }
+        StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
+            struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee)
+        }
+    };
+    match cast_to {
+        None => val,
+        Some(llty) => if is_discr_signed(r) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) }
+    }
+}
+
+fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField,
+                                    scrutinee: ValueRef) -> ValueRef {
+    let llptrptr = GEPi(bcx, scrutinee, &discrfield[..]);
+    let llptr = Load(bcx, llptrptr);
+    let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE };
+    ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None)
+}
+
+/// Helper for cases where the discriminant is simply loaded.
+fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr,
+              range_assert: bool)
+    -> ValueRef {
+    let llty = ll_inttype(bcx.ccx(), ity);
+    assert_eq!(val_ty(ptr), llty.ptr_to());
+    let bits = machine::llbitsize_of_real(bcx.ccx(), llty);
+    assert!(bits <= 64);
+    let bits = bits as usize;
+    let mask = Disr(!0u64 >> (64 - bits));
+    // For a (max) discr of -1, max will be `-1 as usize`, which overflows.
+    // However, that is fine here (it would still represent the full range),
+    if max.wrapping_add(Disr(1)) & mask == min & mask || !range_assert {
+        // i.e., if the range is everything.  The lo==hi case would be
+        // rejected by the LLVM verifier (it would mean either an
+        // empty set, which is impossible, or the entire range of the
+        // type, which is pointless).
+        Load(bcx, ptr)
+    } else {
+        // llvm::ConstantRange can deal with ranges that wrap around,
+        // so an overflow on (max + 1) is fine.
+        LoadRangeAssert(bcx, ptr, min.0, max.0.wrapping_add(1), /* signed: */ True)
+    }
+}
+
+/// Yield information about how to dispatch a case of the
+/// discriminant-like value returned by `trans_switch`.
+///
+/// This should ideally be less tightly tied to `_match`.
+pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr)
+                              -> ValueRef {
+    match *r {
+        CEnum(ity, _, _) => {
+            C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true)
+        }
+        General(ity, _, _) => {
+            C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true)
+        }
+        Univariant(..) => {
+            bug!("no cases for univariants or structs")
+        }
+        RawNullablePointer { .. } |
+        StructWrappedNullablePointer { .. } => {
+            assert!(discr == Disr(0) || discr == Disr(1));
+            C_bool(bcx.ccx(), discr != Disr(0))
+        }
+    }
+}
+
+/// Set the discriminant for a new value of the given case of the given
+/// representation.
+pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
+                                   val: ValueRef, discr: Disr) {
+    match *r {
+        CEnum(ity, min, max) => {
+            assert_discr_in_range(ity, min, max, discr);
+            Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
+                  val);
+        }
+        General(ity, ref cases, dtor) => {
+            if dtor_active(dtor) {
+                let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr,
+                                          cases[discr.0 as usize].fields.len() - 2);
+                Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr);
+            }
+            Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
+                  StructGEP(bcx, val, 0));
+        }
+        Univariant(ref st, dtor) => {
+            assert_eq!(discr, Disr(0));
+            if dtor_active(dtor) {
+                Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED),
+                      StructGEP(bcx, val, st.fields.len() - 1));
+            }
+        }
+        RawNullablePointer { nndiscr, nnty, ..} => {
+            if discr != nndiscr {
+                let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
+                Store(bcx, C_null(llptrty), val);
+            }
+        }
+        StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
+            if discr != nndiscr {
+                let llptrptr = GEPi(bcx, val, &discrfield[..]);
+                let llptrty = val_ty(llptrptr).element_type();
+                Store(bcx, C_null(llptrty), llptrptr);
+            }
+        }
+    }
+}
+
+fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) {
+    match ity {
+        attr::UnsignedInt(_) => {
+            assert!(min <= discr);
+            assert!(discr <= max);
+        },
+        attr::SignedInt(_) => {
+            assert!(min.0 as i64 <= discr.0 as i64);
+            assert!(discr.0 as i64 <= max.0 as i64);
+        },
+    }
+}
+
+/// The number of fields in a given case; for use when obtaining this
+/// information from the type or definition is less convenient.
+pub fn num_args(r: &Repr, discr: Disr) -> usize {
+    match *r {
+        CEnum(..) => 0,
+        Univariant(ref st, dtor) => {
+            assert_eq!(discr, Disr(0));
+            st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 })
+        }
+        General(_, ref cases, dtor) => {
+            cases[discr.0 as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 })
+        }
+        RawNullablePointer { nndiscr, ref nullfields, .. } => {
+            if discr == nndiscr { 1 } else { nullfields.len() }
+        }
+        StructWrappedNullablePointer { ref nonnull, nndiscr,
+                                       ref nullfields, .. } => {
+            if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() }
+        }
+    }
+}
+
+/// Access a field, at a point when the value's case is known.
+pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
+                                   val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
+    trans_field_ptr_builder(&bcx.build(), r, val, discr, ix)
+}
+
+/// Access a field, at a point when the value's case is known.
+pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
+                                           r: &Repr<'tcx>,
+                                           val: MaybeSizedValue,
+                                           discr: Disr, ix: usize)
+                                           -> ValueRef {
+    // Note: if this ever needs to generate conditionals (e.g., if we
+    // decide to do some kind of cdr-coding-like non-unique repr
+    // someday), it will need to return a possibly-new bcx as well.
+    match *r {
+        CEnum(..) => {
+            bug!("element access in C-like enum")
+        }
+        Univariant(ref st, _dtor) => {
+            assert_eq!(discr, Disr(0));
+            struct_field_ptr(bcx, st, val, ix, false)
+        }
+        General(_, ref cases, _) => {
+            struct_field_ptr(bcx, &cases[discr.0 as usize], val, ix + 1, true)
+        }
+        RawNullablePointer { nndiscr, ref nullfields, .. } |
+        StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => {
+            // The unit-like case might have a nonzero number of unit-like fields.
+            // (e.d., Result of Either with (), as one side.)
+            let ty = type_of::type_of(bcx.ccx(), nullfields[ix]);
+            assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0);
+            // The contents of memory at this pointer can't matter, but use
+            // the value that's "reasonable" in case of pointer comparison.
+            if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
+            bcx.pointercast(val.value, ty.ptr_to())
+        }
+        RawNullablePointer { nndiscr, nnty, .. } => {
+            assert_eq!(ix, 0);
+            assert_eq!(discr, nndiscr);
+            let ty = type_of::type_of(bcx.ccx(), nnty);
+            if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
+            bcx.pointercast(val.value, ty.ptr_to())
+        }
+        StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
+            assert_eq!(discr, nndiscr);
+            struct_field_ptr(bcx, nonnull, val, ix, false)
+        }
+    }
+}
+
+fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
+                                st: &Struct<'tcx>, val: MaybeSizedValue,
+                                ix: usize, needs_cast: bool) -> ValueRef {
+    let ccx = bcx.ccx();
+    let fty = st.fields[ix];
+    let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
+    if bcx.is_unreachable() {
+        return C_undef(ll_fty.ptr_to());
+    }
+
+    let ptr_val = if needs_cast {
+        let fields = st.fields.iter().map(|&ty| {
+            type_of::in_memory_type_of(ccx, ty)
+        }).collect::<Vec<_>>();
+        let real_ty = Type::struct_(ccx, &fields[..], st.packed);
+        bcx.pointercast(val.value, real_ty.ptr_to())
+    } else {
+        val.value
+    };
+
+    // Simple case - we can just GEP the field
+    //   * First field - Always aligned properly
+    //   * Packed struct - There is no alignment padding
+    //   * Field is sized - pointer is properly aligned already
+    if ix == 0 || st.packed || type_is_sized(bcx.tcx(), fty) {
+        return bcx.struct_gep(ptr_val, ix);
+    }
+
+    // If the type of the last field is [T] or str, then we don't need to do
+    // any adjusments
+    match fty.sty {
+        ty::TySlice(..) | ty::TyStr => {
+            return bcx.struct_gep(ptr_val, ix);
+        }
+        _ => ()
+    }
+
+    // There's no metadata available, log the case and just do the GEP.
+    if !val.has_meta() {
+        debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
+               ix, Value(ptr_val));
+        return bcx.struct_gep(ptr_val, ix);
+    }
+
+    let dbloc = DebugLoc::None;
+
+    // We need to get the pointer manually now.
+    // We do this by casting to a *i8, then offsetting it by the appropriate amount.
+    // We do this instead of, say, simply adjusting the pointer from the result of a GEP
+    // because the field may have an arbitrary alignment in the LLVM representation
+    // anyway.
+    //
+    // To demonstrate:
+    //   struct Foo<T: ?Sized> {
+    //      x: u16,
+    //      y: T
+    //   }
+    //
+    // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
+    // the `y` field has 16-bit alignment.
+
+    let meta = val.meta;
+
+    // Calculate the unaligned offset of the unsized field.
+    let mut offset = 0;
+    for &ty in &st.fields[0..ix] {
+        let llty = type_of::sizing_type_of(ccx, ty);
+        let type_align = type_of::align_of(ccx, ty);
+        offset = roundup(offset, type_align);
+        offset += machine::llsize_of_alloc(ccx, llty);
+    }
+    let unaligned_offset = C_uint(bcx.ccx(), offset);
+
+    // Get the alignment of the field
+    let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
+
+    // Bump the unaligned offset up to the appropriate alignment using the
+    // following expression:
+    //
+    //   (unaligned offset + (align - 1)) & -align
+
+    // Calculate offset
+    dbloc.apply(bcx.fcx());
+    let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx(), 1u64));
+    let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
+                         bcx.neg(align));
+
+    debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
+
+    // Cast and adjust pointer
+    let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx()));
+    let byte_ptr = bcx.gep(byte_ptr, &[offset]);
+
+    // Finally, cast back to the type expected
+    let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
+    debug!("struct_field_ptr: Field type is {:?}", ll_fty);
+    bcx.pointercast(byte_ptr, ll_fty.ptr_to())
+}
+
+pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
+                                    r: &Repr<'tcx>,
+                                    value: ValueRef,
+                                    mut f: F)
+                                    -> Block<'blk, 'tcx> where
+    F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>,
+{
+    let fcx = bcx.fcx;
+    match *r {
+        Univariant(ref st, _) => {
+            f(bcx, st, value)
+        }
+        General(ity, ref cases, _) => {
+            let ccx = bcx.ccx();
+
+            // See the comments in trans/base.rs for more information (inside
+            // iter_structural_ty), but the gist here is that if the enum's
+            // discriminant is *not* in the range that we're expecting (in which
+            // case we'll take the fall-through branch on the switch
+            // instruction) then we can't just optimize this to an Unreachable
+            // block.
+            //
+            // Currently we still have filling drop, so this means that the drop
+            // glue for enums may be called when the enum has been paved over
+            // with the "I've been dropped" value. In this case the default
+            // branch of the switch instruction will actually be taken at
+            // runtime, so the basic block isn't actually unreachable, so we
+            // need to make it do something with defined behavior. In this case
+            // we just return early from the function.
+            //
+            // Note that this is also why the `trans_get_discr` below has
+            // `false` to indicate that loading the discriminant should
+            // not have a range assert.
+            let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void");
+            RetVoid(ret_void_cx, DebugLoc::None);
+
+            let discr_val = trans_get_discr(bcx, r, value, None, false);
+            let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len());
+            let bcx_next = fcx.new_temp_block("enum-variant-iter-next");
+
+            for (discr, case) in cases.iter().enumerate() {
+                let mut variant_cx = fcx.new_temp_block(
+                    &format!("enum-variant-iter-{}", &discr.to_string())
+                );
+                let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true);
+                AddCase(llswitch, rhs_val, variant_cx.llbb);
+
+                let fields = case.fields.iter().map(|&ty|
+                    type_of::type_of(bcx.ccx(), ty)).collect::<Vec<_>>();
+                let real_ty = Type::struct_(ccx, &fields[..], case.packed);
+                let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to());
+
+                variant_cx = f(variant_cx, case, variant_value);
+                Br(variant_cx, bcx_next.llbb, DebugLoc::None);
+            }
+
+            bcx_next
+        }
+        _ => bug!()
+    }
+}
+
+/// Access the struct drop flag, if present.
+pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+                                       r: &Repr<'tcx>,
+                                       val: ValueRef)
+                                       -> datum::DatumBlock<'blk, 'tcx, datum::Expr>
+{
+    let tcx = bcx.tcx();
+    let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type());
+    match *r {
+        Univariant(ref st, dtor) if dtor_active(dtor) => {
+            let flag_ptr = StructGEP(bcx, val, st.fields.len() - 1);
+            datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock()
+        }
+        General(_, _, dtor) if dtor_active(dtor) => {
+            let fcx = bcx.fcx;
+            let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
+            let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum(
+                bcx, tcx.dtor_type(), "drop_flag",
+                InitAlloca::Uninit("drop flag itself has no dtor"),
+                cleanup::CustomScope(custom_cleanup_scope), |bcx, _| {
+                    debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}",
+                           tcx.dtor_type());
+                    bcx
+                }
+            ));
+            bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
+                let ptr = struct_field_ptr(&variant_cx.build(), st,
+                                           MaybeSizedValue::sized(value),
+                                           (st.fields.len() - 1), false);
+                datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr"))
+                    .store_to(variant_cx, scratch.val)
+            });
+            let expr_datum = scratch.to_expr_datum();
+            fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
+            datum::DatumBlock::new(bcx, expr_datum)
+        }
+        _ => bug!("tried to get drop flag of non-droppable type")
+    }
+}
+
+/// Construct a constant value, suitable for initializing a
+/// GlobalVariable, given a case and constant values for its fields.
+/// Note that this may have a different LLVM type (and different
+/// alignment!) from the representation's `type_of`, so it needs a
+/// pointer cast before use.
+///
+/// The LLVM type system does not directly support unions, and only
+/// pointers can be bitcast, so a constant (and, by extension, the
+/// GlobalVariable initialized by it) will have a type that can vary
+/// depending on which case of an enum it is.
+///
+/// To understand the alignment situation, consider `enum E { V64(u64),
+/// V32(u32, u32) }` on Windows.  The type has 8-byte alignment to
+/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
+/// i32, i32}`, which is 4-byte aligned.
+///
+/// Currently the returned value has the same size as the type, but
+/// this could be changed in the future to avoid allocating unnecessary
+/// space after values of shorter-than-maximum cases.
+pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr: Disr,
+                             vals: &[ValueRef]) -> ValueRef {
+    match *r {
+        CEnum(ity, min, max) => {
+            assert_eq!(vals.len(), 0);
+            assert_discr_in_range(ity, min, max, discr);
+            C_integral(ll_inttype(ccx, ity), discr.0, true)
+        }
+        General(ity, ref cases, _) => {
+            let case = &cases[discr.0 as usize];
+            let (max_sz, _) = union_size_and_align(&cases[..]);
+            let lldiscr = C_integral(ll_inttype(ccx, ity), discr.0 as u64, true);
+            let mut f = vec![lldiscr];
+            f.extend_from_slice(vals);
+            let mut contents = build_const_struct(ccx, case, &f[..]);
+            contents.extend_from_slice(&[padding(ccx, max_sz - case.size)]);
+            C_struct(ccx, &contents[..], false)
+        }
+        Univariant(ref st, _dro) => {
+            assert_eq!(discr, Disr(0));
+            let contents = build_const_struct(ccx, st, vals);
+            C_struct(ccx, &contents[..], st.packed)
+        }
+        RawNullablePointer { nndiscr, nnty, .. } => {
+            if discr == nndiscr {
+                assert_eq!(vals.len(), 1);
+                vals[0]
+            } else {
+                C_null(type_of::sizing_type_of(ccx, nnty))
+            }
+        }
+        StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
+            if discr == nndiscr {
+                C_struct(ccx, &build_const_struct(ccx,
+                                                 nonnull,
+                                                 vals),
+                         false)
+            } else {
+                let vals = nonnull.fields.iter().map(|&ty| {
+                    // Always use null even if it's not the `discrfield`th
+                    // field; see #8506.
+                    C_null(type_of::sizing_type_of(ccx, ty))
+                }).collect::<Vec<ValueRef>>();
+                C_struct(ccx, &build_const_struct(ccx,
+                                                 nonnull,
+                                                 &vals[..]),
+                         false)
+            }
+        }
+    }
+}
+
+/// Compute struct field offsets relative to struct begin.
+fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                          st: &Struct<'tcx>) -> Vec<u64> {
+    let mut offsets = vec!();
+
+    let mut offset = 0;
+    for &ty in &st.fields {
+        let llty = type_of::sizing_type_of(ccx, ty);
+        if !st.packed {
+            let type_align = type_of::align_of(ccx, ty);
+            offset = roundup(offset, type_align);
+        }
+        offsets.push(offset);
+        offset += machine::llsize_of_alloc(ccx, llty);
+    }
+    assert_eq!(st.fields.len(), offsets.len());
+    offsets
+}
+
+/// Building structs is a little complicated, because we might need to
+/// insert padding if a field's value is less aligned than its type.
+///
+/// Continuing the example from `trans_const`, a value of type `(u32,
+/// E)` should have the `E` at offset 8, but if that field's
+/// initializer is 4-byte aligned then simply translating the tuple as
+/// a two-element struct will locate it at offset 4, and accesses to it
+/// will read the wrong memory.
+fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                st: &Struct<'tcx>, vals: &[ValueRef])
+                                -> Vec<ValueRef> {
+    assert_eq!(vals.len(), st.fields.len());
+
+    let target_offsets = compute_struct_field_offsets(ccx, st);
+
+    // offset of current value
+    let mut offset = 0;
+    let mut cfields = Vec::new();
+    for (&val, target_offset) in vals.iter().zip(target_offsets) {
+        if !st.packed {
+            let val_align = machine::llalign_of_min(ccx, val_ty(val));
+            offset = roundup(offset, val_align);
+        }
+        if offset != target_offset {
+            cfields.push(padding(ccx, target_offset - offset));
+            offset = target_offset;
+        }
+        assert!(!is_undef(val));
+        cfields.push(val);
+        offset += machine::llsize_of_alloc(ccx, val_ty(val));
+    }
+
+    assert!(st.sized && offset <= st.size);
+    if offset != st.size {
+        cfields.push(padding(ccx, st.size - offset));
+    }
+
+    cfields
+}
+
+fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
+    C_undef(Type::array(&Type::i8(ccx), size))
+}
+
+// FIXME this utility routine should be somewhere more general
+#[inline]
+fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
+
+/// Get the discriminant of a constant value.
+pub fn const_get_discrim(r: &Repr, val: ValueRef) -> Disr {
+    match *r {
+        CEnum(ity, _, _) => {
+            match ity {
+                attr::SignedInt(..) => Disr(const_to_int(val) as u64),
+                attr::UnsignedInt(..) => Disr(const_to_uint(val)),
+            }
+        }
+        General(ity, _, _) => {
+            match ity {
+                attr::SignedInt(..) => Disr(const_to_int(const_get_elt(val, &[0])) as u64),
+                attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(val, &[0])))
+            }
+        }
+        Univariant(..) => Disr(0),
+        RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
+            bug!("const discrim access of non c-like enum")
+        }
+    }
+}
+
+/// Extract a field of a constant value, as appropriate for its
+/// representation.
+///
+/// (Not to be confused with `common::const_get_elt`, which operates on
+/// raw LLVM-level structs and arrays.)
+pub fn const_get_field(r: &Repr, val: ValueRef, _discr: Disr,
+                       ix: usize) -> ValueRef {
+    match *r {
+        CEnum(..) => bug!("element access in C-like enum const"),
+        Univariant(..) => const_struct_field(val, ix),
+        General(..) => const_struct_field(val, ix + 1),
+        RawNullablePointer { .. } => {
+            assert_eq!(ix, 0);
+            val
+        },
+        StructWrappedNullablePointer{ .. } => const_struct_field(val, ix)
+    }
+}
+
+/// Extract field of struct-like const, skipping our alignment padding.
+fn const_struct_field(val: ValueRef, ix: usize) -> ValueRef {
+    // Get the ix-th non-undef element of the struct.
+    let mut real_ix = 0; // actual position in the struct
+    let mut ix = ix; // logical index relative to real_ix
+    let mut field;
+    loop {
+        loop {
+            field = const_get_elt(val, &[real_ix]);
+            if !is_undef(field) {
+                break;
+            }
+            real_ix = real_ix + 1;
+        }
+        if ix == 0 {
+            return field;
+        }
+        ix = ix - 1;
+        real_ix = real_ix + 1;
+    }
+}
diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs
new file mode 100644 (file)
index 0000000..e27bec6
--- /dev/null
@@ -0,0 +1,120 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! # Translation of inline assembly.
+
+use llvm::{self, ValueRef};
+use base;
+use build::*;
+use common::*;
+use datum::{Datum, Lvalue};
+use type_of;
+use type_::Type;
+
+use rustc::hir as ast;
+use std::ffi::CString;
+use syntax::ast::AsmDialect;
+use libc::{c_uint, c_char};
+
+// Take an inline assembly expression and splat it out via LLVM
+pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                    ia: &ast::InlineAsm,
+                                    outputs: Vec<Datum<'tcx, Lvalue>>,
+                                    mut inputs: Vec<ValueRef>) {
+    let mut ext_constraints = vec![];
+    let mut output_types = vec![];
+
+    // Prepare the output operands
+    let mut indirect_outputs = vec![];
+    for (i, (out, out_datum)) in ia.outputs.iter().zip(&outputs).enumerate() {
+        let val = if out.is_rw || out.is_indirect {
+            Some(base::load_ty(bcx, out_datum.val, out_datum.ty))
+        } else {
+            None
+        };
+        if out.is_rw {
+            inputs.push(val.unwrap());
+            ext_constraints.push(i.to_string());
+        }
+        if out.is_indirect {
+            indirect_outputs.push(val.unwrap());
+        } else {
+            output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
+        }
+    }
+    if !indirect_outputs.is_empty() {
+        indirect_outputs.extend_from_slice(&inputs);
+        inputs = indirect_outputs;
+    }
+
+    let clobbers = ia.clobbers.iter()
+                              .map(|s| format!("~{{{}}}", &s));
+
+    // Default per-arch clobbers
+    // Basically what clang does
+    let arch_clobbers = match &bcx.sess().target.target.arch[..] {
+        "x86" | "x86_64" => vec!("~{dirflag}", "~{fpsr}", "~{flags}"),
+        _                => Vec::new()
+    };
+
+    let all_constraints =
+        ia.outputs.iter().map(|out| out.constraint.to_string())
+          .chain(ia.inputs.iter().map(|s| s.to_string()))
+          .chain(ext_constraints)
+          .chain(clobbers)
+          .chain(arch_clobbers.iter().map(|s| s.to_string()))
+          .collect::<Vec<String>>().join(",");
+
+    debug!("Asm Constraints: {}", &all_constraints[..]);
+
+    // Depending on how many outputs we have, the return type is different
+    let num_outputs = output_types.len();
+    let output_type = match num_outputs {
+        0 => Type::void(bcx.ccx()),
+        1 => output_types[0],
+        _ => Type::struct_(bcx.ccx(), &output_types[..], false)
+    };
+
+    let dialect = match ia.dialect {
+        AsmDialect::Att   => llvm::AD_ATT,
+        AsmDialect::Intel => llvm::AD_Intel
+    };
+
+    let asm = CString::new(ia.asm.as_bytes()).unwrap();
+    let constraint_cstr = CString::new(all_constraints).unwrap();
+    let r = InlineAsmCall(bcx,
+                          asm.as_ptr(),
+                          constraint_cstr.as_ptr(),
+                          &inputs,
+                          output_type,
+                          ia.volatile,
+                          ia.alignstack,
+                          dialect);
+
+    // Again, based on how many outputs we have
+    let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
+    for (i, (_, datum)) in outputs.enumerate() {
+        let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) };
+        Store(bcx, v, datum.val);
+    }
+
+    // Store expn_id in a metadata node so we can map LLVM errors
+    // back to source locations.  See #17552.
+    unsafe {
+        let key = "srcloc";
+        let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(),
+            key.as_ptr() as *const c_char, key.len() as c_uint);
+
+        let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.into_u32() as i32);
+
+        llvm::LLVMSetMetadata(r, kind,
+            llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1));
+    }
+}
diff --git a/src/librustc_trans/attributes.rs b/src/librustc_trans/attributes.rs
new file mode 100644 (file)
index 0000000..d4930f3
--- /dev/null
@@ -0,0 +1,110 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//! Set and unset common attributes on LLVM values.
+
+use libc::c_uint;
+use llvm::{self, ValueRef};
+use session::config::NoDebugInfo;
+pub use syntax::attr::InlineAttr;
+use syntax::ast;
+use context::CrateContext;
+
+/// Mark LLVM function to use provided inline heuristic.
+#[inline]
+pub fn inline(val: ValueRef, inline: InlineAttr) {
+    use self::InlineAttr::*;
+    match inline {
+        Hint   => llvm::SetFunctionAttribute(val, llvm::Attribute::InlineHint),
+        Always => llvm::SetFunctionAttribute(val, llvm::Attribute::AlwaysInline),
+        Never  => llvm::SetFunctionAttribute(val, llvm::Attribute::NoInline),
+        None   => {
+            let attr = llvm::Attribute::InlineHint |
+                       llvm::Attribute::AlwaysInline |
+                       llvm::Attribute::NoInline;
+            llvm::RemoveFunctionAttributes(val, attr)
+        },
+    };
+}
+
+/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
+#[inline]
+pub fn emit_uwtable(val: ValueRef, emit: bool) {
+    if emit {
+        llvm::SetFunctionAttribute(val, llvm::Attribute::UWTable);
+    } else {
+        llvm::RemoveFunctionAttributes(val, llvm::Attribute::UWTable);
+    }
+}
+
+/// Tell LLVM whether the function can or cannot unwind.
+#[inline]
+pub fn unwind(val: ValueRef, can_unwind: bool) {
+    if can_unwind {
+        llvm::RemoveFunctionAttributes(val, llvm::Attribute::NoUnwind);
+    } else {
+        llvm::SetFunctionAttribute(val, llvm::Attribute::NoUnwind);
+    }
+}
+
+/// Tell LLVM whether it should optimise function for size.
+#[inline]
+#[allow(dead_code)] // possibly useful function
+pub fn set_optimize_for_size(val: ValueRef, optimize: bool) {
+    if optimize {
+        llvm::SetFunctionAttribute(val, llvm::Attribute::OptimizeForSize);
+    } else {
+        llvm::RemoveFunctionAttributes(val, llvm::Attribute::OptimizeForSize);
+    }
+}
+
+/// Tell LLVM if this function should be 'naked', i.e. skip the epilogue and prologue.
+#[inline]
+pub fn naked(val: ValueRef, is_naked: bool) {
+    if is_naked {
+        llvm::SetFunctionAttribute(val, llvm::Attribute::Naked);
+    } else {
+        llvm::RemoveFunctionAttributes(val, llvm::Attribute::Naked);
+    }
+}
+
+/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute])
+/// attributes.
+pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRef) {
+    use syntax::attr::*;
+    inline(llfn, find_inline_attr(Some(ccx.sess().diagnostic()), attrs));
+
+    // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a
+    // parameter.
+    let no_fp_elim = (ccx.sess().opts.debuginfo != NoDebugInfo) ||
+                     !ccx.sess().target.target.options.eliminate_frame_pointer;
+    if no_fp_elim {
+        unsafe {
+            let attr = "no-frame-pointer-elim\0".as_ptr() as *const _;
+            let val = "true\0".as_ptr() as *const _;
+            llvm::LLVMAddFunctionAttrStringValue(llfn,
+                                                 llvm::FunctionIndex as c_uint,
+                                                 attr, val);
+        }
+    }
+
+    for attr in attrs {
+        if attr.check_name("cold") {
+            llvm::Attributes::default().set(llvm::Attribute::Cold)
+                .apply_llfn(llvm::FunctionIndex as usize, llfn)
+        } else if attr.check_name("naked") {
+            naked(llfn, true);
+        } else if attr.check_name("allocator") {
+            llvm::Attributes::default().set(llvm::Attribute::NoAlias)
+                .apply_llfn(llvm::ReturnIndex as usize, llfn)
+        } else if attr.check_name("unwind") {
+            unwind(llfn, true);
+        }
+    }
+}
index cbdadac4dc77c5bca76e21dd0a300ad5ca7dbd24..514fc52d0085b8df8b1ab2d89f86475a0b4f8aae 100644 (file)
@@ -254,8 +254,8 @@ impl<'a> ArchiveBuilder<'a> {
         // want to modify this archive, so we use `io::copy` to not preserve
         // permission bits.
         if let Some(ref s) = self.config.src {
-            try!(io::copy(&mut try!(File::open(s)),
-                          &mut try!(File::create(&self.config.dst))));
+            io::copy(&mut File::open(s)?,
+                     &mut File::create(&self.config.dst)?)?;
         }
 
         if removals.len() > 0 {
@@ -267,12 +267,12 @@ impl<'a> ArchiveBuilder<'a> {
             match addition {
                 Addition::File { path, name_in_archive } => {
                     let dst = self.work_dir.path().join(&name_in_archive);
-                    try!(fs::copy(&path, &dst));
+                    fs::copy(&path, &dst)?;
                     members.push(PathBuf::from(name_in_archive));
                 }
                 Addition::Archive { archive, archive_name, mut skip } => {
-                    try!(self.add_archive_members(&mut members, archive,
-                                                  &archive_name, &mut *skip));
+                    self.add_archive_members(&mut members, archive,
+                                             &archive_name, &mut *skip)?;
                 }
             }
         }
@@ -334,7 +334,7 @@ impl<'a> ArchiveBuilder<'a> {
         // all SYMDEF files as these are just magical placeholders which get
         // re-created when we make a new archive anyway.
         for file in archive.iter() {
-            let file = try!(file.map_err(string_to_io_error));
+            let file = file.map_err(string_to_io_error)?;
             if !is_relevant_child(&file) {
                 continue
             }
@@ -388,7 +388,7 @@ impl<'a> ArchiveBuilder<'a> {
                 }
             }
             let dst = self.work_dir.path().join(&new_filename);
-            try!(try!(File::create(&dst)).write_all(file.data()));
+            File::create(&dst)?.write_all(file.data())?;
             members.push(PathBuf::from(new_filename));
         }
         Ok(())
@@ -455,7 +455,7 @@ impl<'a> ArchiveBuilder<'a> {
         unsafe {
             if let Some(archive) = self.src_archive() {
                 for child in archive.iter() {
-                    let child = try!(child.map_err(string_to_io_error));
+                    let child = child.map_err(string_to_io_error)?;
                     let child_name = match child.name() {
                         Some(s) => s,
                         None => continue,
@@ -464,7 +464,7 @@ impl<'a> ArchiveBuilder<'a> {
                         continue
                     }
 
-                    let name = try!(CString::new(child_name));
+                    let name = CString::new(child_name)?;
                     members.push(llvm::LLVMRustArchiveMemberNew(ptr::null(),
                                                                 name.as_ptr(),
                                                                 child.raw()));
@@ -474,8 +474,8 @@ impl<'a> ArchiveBuilder<'a> {
             for addition in mem::replace(&mut self.additions, Vec::new()) {
                 match addition {
                     Addition::File { path, name_in_archive } => {
-                        let path = try!(CString::new(path.to_str().unwrap()));
-                        let name = try!(CString::new(name_in_archive));
+                        let path = CString::new(path.to_str().unwrap())?;
+                        let name = CString::new(name_in_archive)?;
                         members.push(llvm::LLVMRustArchiveMemberNew(path.as_ptr(),
                                                                     name.as_ptr(),
                                                                     ptr::null_mut()));
@@ -484,7 +484,7 @@ impl<'a> ArchiveBuilder<'a> {
                     }
                     Addition::Archive { archive, archive_name: _, mut skip } => {
                         for child in archive.iter() {
-                            let child = try!(child.map_err(string_to_io_error));
+                            let child = child.map_err(string_to_io_error)?;
                             if !is_relevant_child(&child) {
                                 continue
                             }
@@ -502,7 +502,7 @@ impl<'a> ArchiveBuilder<'a> {
                             let child_name = Path::new(child_name)
                                                   .file_name().unwrap()
                                                   .to_str().unwrap();
-                            let name = try!(CString::new(child_name));
+                            let name = CString::new(child_name)?;
                             let m = llvm::LLVMRustArchiveMemberNew(ptr::null(),
                                                                    name.as_ptr(),
                                                                    child.raw());
@@ -515,7 +515,7 @@ impl<'a> ArchiveBuilder<'a> {
             }
 
             let dst = self.config.dst.to_str().unwrap().as_bytes();
-            let dst = try!(CString::new(dst));
+            let dst = CString::new(dst)?;
             let r = llvm::LLVMRustWriteArchive(dst.as_ptr(),
                                                members.len() as libc::size_t,
                                                members.as_ptr(),
index 76360dcc1b9725b79c15cb79b2f3877740d9600b..4e77b2bc06940b78c1312addb2edd098b07f62bd 100644 (file)
@@ -13,7 +13,6 @@ use super::linker::{Linker, GnuLinker, MsvcLinker};
 use super::rpath::RPathConfig;
 use super::rpath;
 use super::msvc;
-use super::svh::Svh;
 use session::config;
 use session::config::NoDebugInfo;
 use session::config::{OutputFilenames, Input, OutputType};
@@ -23,34 +22,28 @@ use session::Session;
 use middle::cstore::{self, CrateStore, LinkMeta};
 use middle::cstore::{LinkagePreference, NativeLibraryKind};
 use middle::dependency_format::Linkage;
-use middle::ty::{self, Ty};
-use rustc::front::map::DefPath;
-use trans::{CrateContext, CrateTranslation, gensym_name};
+use CrateTranslation;
 use util::common::time;
-use util::sha2::{Digest, Sha256};
 use util::fs::fix_windows_verbatim_for_gcc;
+use rustc::ty::TyCtxt;
 use rustc_back::tempdir::TempDir;
 
+use rustc_incremental::SvhCalculate;
 use std::ascii;
 use std::char;
 use std::env;
 use std::ffi::OsString;
 use std::fs;
 use std::io::{self, Read, Write};
-use std::iter::once;
 use std::mem;
 use std::path::{Path, PathBuf};
 use std::process::Command;
 use std::str;
 use flate;
-use serialize::hex::ToHex;
 use syntax::ast;
 use syntax::codemap::Span;
-use syntax::parse::token::{self, InternedString};
 use syntax::attr::AttrMetaMethods;
 
-use rustc_front::hir;
-
 // RLIB LLVM-BYTECODE OBJECT LAYOUT
 // Version 1
 // Bytes    Data
@@ -81,58 +74,6 @@ pub const RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET: usize =
     RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET + 8;
 
 
-/*
- * Name mangling and its relationship to metadata. This is complex. Read
- * carefully.
- *
- * The semantic model of Rust linkage is, broadly, that "there's no global
- * namespace" between crates. Our aim is to preserve the illusion of this
- * model despite the fact that it's not *quite* possible to implement on
- * modern linkers. We initially didn't use system linkers at all, but have
- * been convinced of their utility.
- *
- * There are a few issues to handle:
- *
- *  - Linkers operate on a flat namespace, so we have to flatten names.
- *    We do this using the C++ namespace-mangling technique. Foo::bar
- *    symbols and such.
- *
- *  - Symbols with the same name but different types need to get different
- *    linkage-names. We do this by hashing a string-encoding of the type into
- *    a fixed-size (currently 16-byte hex) cryptographic hash function (CHF:
- *    we use SHA256) to "prevent collisions". This is not airtight but 16 hex
- *    digits on uniform probability means you're going to need 2**32 same-name
- *    symbols in the same process before you're even hitting birthday-paradox
- *    collision probability.
- *
- *  - Symbols in different crates but with same names "within" the crate need
- *    to get different linkage-names.
- *
- *  - The hash shown in the filename needs to be predictable and stable for
- *    build tooling integration. It also needs to be using a hash function
- *    which is easy to use from Python, make, etc.
- *
- * So here is what we do:
- *
- *  - Consider the package id; every crate has one (specified with crate_id
- *    attribute).  If a package id isn't provided explicitly, we infer a
- *    versionless one from the output name. The version will end up being 0.0
- *    in this case. CNAME and CVERS are taken from this package id. For
- *    example, github.com/mozilla/CNAME#CVERS.
- *
- *  - Define CMH as SHA256(crateid).
- *
- *  - Define CMH8 as the first 8 characters of CMH.
- *
- *  - Compile our crate to lib CNAME-CMH8-CVERS.so
- *
- *  - Define STH(sym) as SHA256(CMH, type_str(sym))
- *
- *  - Suffix a mangled sym with ::STH@CVERS, so that it is unique in the
- *    name, non-name metadata, and type sense, and versioned in the way
- *    system linkers understand.
- */
-
 pub fn find_crate_name(sess: Option<&Session>,
                        attrs: &[ast::Attribute],
                        input: &Input) -> String {
@@ -180,195 +121,20 @@ pub fn find_crate_name(sess: Option<&Session>,
     }
 
     "rust_out".to_string()
+
 }
 
-pub fn build_link_meta(sess: &Session,
-                       krate: &hir::Crate,
+pub fn build_link_meta(tcx: &TyCtxt,
                        name: &str)
                        -> LinkMeta {
     let r = LinkMeta {
         crate_name: name.to_owned(),
-        crate_hash: Svh::calculate(&sess.opts.cg.metadata, krate),
+        crate_hash: tcx.calculate_krate_hash(),
     };
     info!("{:?}", r);
     return r;
 }
 
-fn truncated_hash_result(symbol_hasher: &mut Sha256) -> String {
-    let output = symbol_hasher.result_bytes();
-    // 64 bits should be enough to avoid collisions.
-    output[.. 8].to_hex().to_string()
-}
-
-
-// This calculates STH for a symbol, as defined above
-fn symbol_hash<'tcx>(tcx: &ty::ctxt<'tcx>,
-                     symbol_hasher: &mut Sha256,
-                     t: Ty<'tcx>,
-                     link_meta: &LinkMeta)
-                     -> String {
-    // NB: do *not* use abbrevs here as we want the symbol names
-    // to be independent of one another in the crate.
-
-    symbol_hasher.reset();
-    symbol_hasher.input_str(&link_meta.crate_name);
-    symbol_hasher.input_str("-");
-    symbol_hasher.input_str(link_meta.crate_hash.as_str());
-    for meta in tcx.sess.crate_metadata.borrow().iter() {
-        symbol_hasher.input_str(&meta[..]);
-    }
-    symbol_hasher.input_str("-");
-    symbol_hasher.input(&tcx.sess.cstore.encode_type(tcx, t));
-    // Prefix with 'h' so that it never blends into adjacent digits
-    let mut hash = String::from("h");
-    hash.push_str(&truncated_hash_result(symbol_hasher));
-    hash
-}
-
-fn get_symbol_hash<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> String {
-    if let Some(h) = ccx.type_hashcodes().borrow().get(&t) {
-        return h.to_string()
-    }
-
-    let mut symbol_hasher = ccx.symbol_hasher().borrow_mut();
-    let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, ccx.link_meta());
-    ccx.type_hashcodes().borrow_mut().insert(t, hash.clone());
-    hash
-}
-
-
-// Name sanitation. LLVM will happily accept identifiers with weird names, but
-// gas doesn't!
-// gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $
-pub fn sanitize(s: &str) -> String {
-    let mut result = String::new();
-    for c in s.chars() {
-        match c {
-            // Escape these with $ sequences
-            '@' => result.push_str("$SP$"),
-            '*' => result.push_str("$BP$"),
-            '&' => result.push_str("$RF$"),
-            '<' => result.push_str("$LT$"),
-            '>' => result.push_str("$GT$"),
-            '(' => result.push_str("$LP$"),
-            ')' => result.push_str("$RP$"),
-            ',' => result.push_str("$C$"),
-
-            // '.' doesn't occur in types and functions, so reuse it
-            // for ':' and '-'
-            '-' | ':' => result.push('.'),
-
-            // These are legal symbols
-            'a' ... 'z'
-            | 'A' ... 'Z'
-            | '0' ... '9'
-            | '_' | '.' | '$' => result.push(c),
-
-            _ => {
-                result.push('$');
-                for c in c.escape_unicode().skip(1) {
-                    match c {
-                        '{' => {},
-                        '}' => result.push('$'),
-                        c => result.push(c),
-                    }
-                }
-            }
-        }
-    }
-
-    // Underscore-qualify anything that didn't start as an ident.
-    if !result.is_empty() &&
-        result.as_bytes()[0] != '_' as u8 &&
-        ! (result.as_bytes()[0] as char).is_xid_start() {
-        return format!("_{}", &result[..]);
-    }
-
-    return result;
-}
-
-pub fn mangle<PI: Iterator<Item=InternedString>>(path: PI, hash: Option<&str>) -> String {
-    // Follow C++ namespace-mangling style, see
-    // http://en.wikipedia.org/wiki/Name_mangling for more info.
-    //
-    // It turns out that on OSX you can actually have arbitrary symbols in
-    // function names (at least when given to LLVM), but this is not possible
-    // when using unix's linker. Perhaps one day when we just use a linker from LLVM
-    // we won't need to do this name mangling. The problem with name mangling is
-    // that it seriously limits the available characters. For example we can't
-    // have things like &T in symbol names when one would theoretically
-    // want them for things like impls of traits on that type.
-    //
-    // To be able to work on all platforms and get *some* reasonable output, we
-    // use C++ name-mangling.
-
-    let mut n = String::from("_ZN"); // _Z == Begin name-sequence, N == nested
-
-    fn push(n: &mut String, s: &str) {
-        let sani = sanitize(s);
-        n.push_str(&format!("{}{}", sani.len(), sani));
-    }
-
-    // First, connect each component with <len, name> pairs.
-    for data in path {
-        push(&mut n, &data);
-    }
-
-    if let Some(s) = hash {
-        push(&mut n, s)
-    }
-
-    n.push('E'); // End name-sequence.
-    n
-}
-
-pub fn exported_name(path: DefPath, hash: &str) -> String {
-    let path = path.into_iter()
-                   .map(|e| e.data.as_interned_str());
-    mangle(path, Some(hash))
-}
-
-pub fn mangle_exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, path: DefPath,
-                                      t: Ty<'tcx>, id: ast::NodeId) -> String {
-    let mut hash = get_symbol_hash(ccx, t);
-
-    // Paths can be completely identical for different nodes,
-    // e.g. `fn foo() { { fn a() {} } { fn a() {} } }`, so we
-    // generate unique characters from the node id. For now
-    // hopefully 3 characters is enough to avoid collisions.
-    const EXTRA_CHARS: &'static str =
-        "abcdefghijklmnopqrstuvwxyz\
-         ABCDEFGHIJKLMNOPQRSTUVWXYZ\
-         0123456789";
-    let id = id as usize;
-    let extra1 = id % EXTRA_CHARS.len();
-    let id = id / EXTRA_CHARS.len();
-    let extra2 = id % EXTRA_CHARS.len();
-    let id = id / EXTRA_CHARS.len();
-    let extra3 = id % EXTRA_CHARS.len();
-    hash.push(EXTRA_CHARS.as_bytes()[extra1] as char);
-    hash.push(EXTRA_CHARS.as_bytes()[extra2] as char);
-    hash.push(EXTRA_CHARS.as_bytes()[extra3] as char);
-
-    exported_name(path, &hash[..])
-}
-
-pub fn mangle_internal_name_by_type_and_seq<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                      t: Ty<'tcx>,
-                                                      name: &str) -> String {
-    let path = [token::intern(&t.to_string()).as_str(), gensym_name(name).as_str()];
-    let hash = get_symbol_hash(ccx, t);
-    mangle(path.iter().cloned(), Some(&hash[..]))
-}
-
-pub fn mangle_internal_name_by_path_and_seq(path: DefPath, flav: &str) -> String {
-    let names =
-        path.into_iter()
-            .map(|e| e.data.as_interned_str())
-            .chain(once(gensym_name(flav).as_str())); // append unique version of "flav"
-    mangle(names, None)
-}
-
 pub fn get_linker(sess: &Session) -> (String, Command) {
     if let Some(ref linker) = sess.opts.cg.linker {
         (linker.clone(), Command::new(linker))
@@ -420,8 +186,8 @@ pub fn link_binary(sess: &Session,
     let mut out_filenames = Vec::new();
     for &crate_type in sess.crate_types.borrow().iter() {
         if invalid_output_for_target(sess, crate_type) {
-            sess.bug(&format!("invalid output type `{:?}` for target os `{}`",
-                             crate_type, sess.opts.target_triple));
+           bug!("invalid output type `{:?}` for target os `{}`",
+                crate_type, sess.opts.target_triple);
         }
         let out_file = link_binary_output(sess, trans, crate_type, outputs,
                                           crate_name);
@@ -515,7 +281,7 @@ pub fn each_linked_rlib(sess: &Session,
     let fmts = fmts.get(&config::CrateTypeExecutable).or_else(|| {
         fmts.get(&config::CrateTypeStaticlib)
     }).unwrap_or_else(|| {
-        sess.bug("could not find formats for rlibs")
+        bug!("could not find formats for rlibs")
     });
     for (cnum, path) in crates {
         match fmts[cnum as usize - 1] {
@@ -757,9 +523,9 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write,
                                  bc_data_deflated: &[u8]) -> io::Result<()> {
     let bc_data_deflated_size: u64 = bc_data_deflated.len() as u64;
 
-    try!(writer.write_all(RLIB_BYTECODE_OBJECT_MAGIC));
-    try!(writer.write_all(&[1, 0, 0, 0]));
-    try!(writer.write_all(&[
+    writer.write_all(RLIB_BYTECODE_OBJECT_MAGIC)?;
+    writer.write_all(&[1, 0, 0, 0])?;
+    writer.write_all(&[
         (bc_data_deflated_size >>  0) as u8,
         (bc_data_deflated_size >>  8) as u8,
         (bc_data_deflated_size >> 16) as u8,
@@ -768,8 +534,8 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write,
         (bc_data_deflated_size >> 40) as u8,
         (bc_data_deflated_size >> 48) as u8,
         (bc_data_deflated_size >> 56) as u8,
-    ]));
-    try!(writer.write_all(&bc_data_deflated));
+    ])?;
+    writer.write_all(&bc_data_deflated)?;
 
     let number_of_bytes_written_so_far =
         RLIB_BYTECODE_OBJECT_MAGIC.len() +                // magic id
@@ -781,7 +547,7 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write,
     // padding byte to make it even. This works around a crash bug in LLDB
     // (see issue #15950)
     if number_of_bytes_written_so_far % 2 == 1 {
-        try!(writer.write_all(&[0]));
+        writer.write_all(&[0])?;
     }
 
     return Ok(());
@@ -1128,7 +894,7 @@ fn add_local_native_libraries(cmd: &mut Linker, sess: &Session) {
         match kind {
             NativeLibraryKind::NativeUnknown => cmd.link_dylib(l),
             NativeLibraryKind::NativeFramework => cmd.link_framework(l),
-            NativeLibraryKind::NativeStatic => unreachable!(),
+            NativeLibraryKind::NativeStatic => bug!(),
         }
     }
 }
@@ -1314,7 +1080,7 @@ fn add_upstream_native_libraries(cmd: &mut Linker, sess: &Session) {
                 NativeLibraryKind::NativeUnknown => cmd.link_dylib(lib),
                 NativeLibraryKind::NativeFramework => cmd.link_framework(lib),
                 NativeLibraryKind::NativeStatic => {
-                    sess.bug("statics shouldn't be propagated");
+                    bug!("statics shouldn't be propagated");
                 }
             }
         }
index 55192bdf74484f547223e5f0eea7e000c0838458..c02a482f81275c2248e3a796608cf7e06e1fc078 100644 (file)
@@ -22,7 +22,7 @@ use session::Session;
 use session::config::CrateTypeDylib;
 use session::config;
 use syntax::ast;
-use trans::CrateTranslation;
+use CrateTranslation;
 
 /// Linker abstraction used by back::link to build up the command to invoke a
 /// linker.
@@ -271,10 +271,10 @@ impl<'a> Linker for MsvcLinker<'a> {
     }
 
     fn framework_path(&mut self, _path: &Path) {
-        panic!("frameworks are not supported on windows")
+        bug!("frameworks are not supported on windows")
     }
     fn link_framework(&mut self, _framework: &str) {
-        panic!("frameworks are not supported on windows")
+        bug!("frameworks are not supported on windows")
     }
 
     fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
@@ -327,16 +327,16 @@ impl<'a> Linker for MsvcLinker<'a> {
                       tmpdir: &Path) {
         let path = tmpdir.join("lib.def");
         let res = (|| -> io::Result<()> {
-            let mut f = BufWriter::new(try!(File::create(&path)));
+            let mut f = BufWriter::new(File::create(&path)?);
 
             // Start off with the standard module name header and then go
             // straight to exports.
-            try!(writeln!(f, "LIBRARY"));
-            try!(writeln!(f, "EXPORTS"));
+            writeln!(f, "LIBRARY")?;
+            writeln!(f, "EXPORTS")?;
 
             // Write out all our local symbols
             for sym in trans.reachable.iter() {
-                try!(writeln!(f, "  {}", sym));
+                writeln!(f, "  {}", sym)?;
             }
 
             // Take a look at how all upstream crates are linked into this
@@ -357,8 +357,9 @@ impl<'a> Linker for MsvcLinker<'a> {
                 cstore.item_symbol(did)
             });
             for symbol in symbols {
-                try!(writeln!(f, "  {}", symbol));
+                writeln!(f, "  {}", symbol)?;
             }
+
             Ok(())
         })();
         if let Err(e) = res {
diff --git a/src/librustc_trans/back/symbol_names.rs b/src/librustc_trans/back/symbol_names.rs
new file mode 100644 (file)
index 0000000..8127d1c
--- /dev/null
@@ -0,0 +1,379 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Rust Linkage Model and Symbol Names
+//! =======================================
+//!
+//! The semantic model of Rust linkage is, broadly, that "there's no global
+//! namespace" between crates. Our aim is to preserve the illusion of this
+//! model despite the fact that it's not *quite* possible to implement on
+//! modern linkers. We initially didn't use system linkers at all, but have
+//! been convinced of their utility.
+//!
+//! There are a few issues to handle:
+//!
+//!  - Linkers operate on a flat namespace, so we have to flatten names.
+//!    We do this using the C++ namespace-mangling technique. Foo::bar
+//!    symbols and such.
+//!
+//!  - Symbols for distinct items with the same *name* need to get different
+//!    linkage-names. Examples of this are monomorphizations of functions or
+//!    items within anonymous scopes that end up having the same path.
+//!
+//!  - Symbols in different crates but with same names "within" the crate need
+//!    to get different linkage-names.
+//!
+//!  - Symbol names should be deterministic: Two consecutive runs of the
+//!    compiler over the same code base should produce the same symbol names for
+//!    the same items.
+//!
+//!  - Symbol names should not depend on any global properties of the code base,
+//!    so that small modifications to the code base do not result in all symbols
+//!    changing. In previous versions of the compiler, symbol names incorporated
+//!    the SVH (Stable Version Hash) of the crate. This scheme turned out to be
+//!    infeasible when used in conjunction with incremental compilation because
+//!    small code changes would invalidate all symbols generated previously.
+//!
+//!  - Even symbols from different versions of the same crate should be able to
+//!    live next to each other without conflict.
+//!
+//! In order to fulfill the above requirements the following scheme is used by
+//! the compiler:
+//!
+//! The main tool for avoiding naming conflicts is the incorporation of a 64-bit
+//! hash value into every exported symbol name. Anything that makes a difference
+//! to the symbol being named, but does not show up in the regular path needs to
+//! be fed into this hash:
+//!
+//! - Different monomorphizations of the same item have the same path but differ
+//!   in their concrete type parameters, so these parameters are part of the
+//!   data being digested for the symbol hash.
+//!
+//! - Rust allows items to be defined in anonymous scopes, such as in
+//!   `fn foo() { { fn bar() {} } { fn bar() {} } }`. Both `bar` functions have
+//!   the path `foo::bar`, since the anonymous scopes do not contribute to the
+//!   path of an item. The compiler already handles this case via so-called
+//!   disambiguating `DefPaths` which use indices to distinguish items with the
+//!   same name. The DefPaths of the functions above are thus `foo[0]::bar[0]`
+//!   and `foo[0]::bar[1]`. In order to incorporate this disambiguation
+//!   information into the symbol name too, these indices are fed into the
+//!   symbol hash, so that the above two symbols would end up with different
+//!   hash values.
+//!
+//! The two measures described above suffice to avoid intra-crate conflicts. In
+//! order to also avoid inter-crate conflicts two more measures are taken:
+//!
+//! - The name of the crate containing the symbol is prepended to the symbol
+//!   name, i.e. symbols are "crate qualified". For example, a function `foo` in
+//!   module `bar` in crate `baz` would get a symbol name like
+//!   `baz::bar::foo::{hash}` instead of just `bar::foo::{hash}`. This avoids
+//!   simple conflicts between functions from different crates.
+//!
+//! - In order to be able to also use symbols from two versions of the same
+//!   crate (which naturally also have the same name), a stronger measure is
+//!   required: The compiler accepts an arbitrary "disambiguator" value via the
+//!   `-C metadata` commandline argument. This disambiguator is then fed into
+//!   the symbol hash of every exported item. Consequently, the symbols in two
+//!   identical crates but with different disambiguators are not in conflict
+//!   with each other. This facility is mainly intended to be used by build
+//!   tools like Cargo.
+//!
+//! A note on symbol name stability
+//! -------------------------------
+//! Previous versions of the compiler resorted to feeding NodeIds into the
+//! symbol hash in order to disambiguate between items with the same path. The
+//! current version of the name generation algorithm takes great care not to do
+//! that, since NodeIds are notoriously unstable: A small change to the
+//! code base will offset all NodeIds after the change and thus, much as using
+//! the SVH in the hash, invalidate an unbounded number of symbol names. This
+//! makes re-using previously compiled code for incremental compilation
+//! virtually impossible. Thus, symbol hash generation exclusively relies on
+//! DefPaths which are much more robust in the face of changes to the code base.
+
+use common::{CrateContext, gensym_name};
+use monomorphize::Instance;
+use util::sha2::{Digest, Sha256};
+
+use rustc::middle::cstore;
+use rustc::hir::def_id::DefId;
+use rustc::ty::{self, TypeFoldable};
+use rustc::ty::item_path::{ItemPathBuffer, RootMode};
+use rustc::hir::map::definitions::{DefPath, DefPathData};
+
+use std::fmt::Write;
+use syntax::parse::token::{self, InternedString};
+use serialize::hex::ToHex;
+
+pub fn def_id_to_string<'tcx>(tcx: &ty::TyCtxt<'tcx>, def_id: DefId) -> String {
+    let def_path = tcx.def_path(def_id);
+    def_path_to_string(tcx, &def_path)
+}
+
+pub fn def_path_to_string<'tcx>(tcx: &ty::TyCtxt<'tcx>, def_path: &DefPath) -> String {
+    let mut s = String::with_capacity(def_path.data.len() * 16);
+
+    s.push_str(&tcx.crate_name(def_path.krate));
+    s.push_str("/");
+    s.push_str(&tcx.crate_disambiguator(def_path.krate));
+
+    for component in &def_path.data {
+        write!(s,
+               "::{}[{}]",
+               component.data.as_interned_str(),
+               component.disambiguator)
+            .unwrap();
+    }
+
+    s
+}
+
+fn get_symbol_hash<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+
+                             // path to the item this name is for
+                             def_path: &DefPath,
+
+                             // type of the item, without any generic
+                             // parameters substituted; this is
+                             // included in the hash as a kind of
+                             // safeguard.
+                             item_type: ty::Ty<'tcx>,
+
+                             // values for generic type parameters,
+                             // if any.
+                             parameters: &[ty::Ty<'tcx>])
+                             -> String {
+    debug!("get_symbol_hash(def_path={:?}, parameters={:?})",
+           def_path, parameters);
+
+    let tcx = ccx.tcx();
+
+    let mut hash_state = ccx.symbol_hasher().borrow_mut();
+
+    hash_state.reset();
+
+    // the main symbol name is not necessarily unique; hash in the
+    // compiler's internal def-path, guaranteeing each symbol has a
+    // truly unique path
+    hash_state.input_str(&def_path_to_string(tcx, def_path));
+
+    // Include the main item-type. Note that, in this case, the
+    // assertions about `needs_subst` may not hold, but this item-type
+    // ought to be the same for every reference anyway.
+    assert!(!item_type.has_erasable_regions());
+    let encoded_item_type = tcx.sess.cstore.encode_type(tcx, item_type, def_id_to_string);
+    hash_state.input(&encoded_item_type[..]);
+
+    // also include any type parameters (for generic items)
+    for t in parameters {
+       assert!(!t.has_erasable_regions());
+       assert!(!t.needs_subst());
+       let encoded_type = tcx.sess.cstore.encode_type(tcx, t, def_id_to_string);
+       hash_state.input(&encoded_type[..]);
+    }
+
+    return format!("h{}", truncated_hash_result(&mut *hash_state));
+
+    fn truncated_hash_result(symbol_hasher: &mut Sha256) -> String {
+        let output = symbol_hasher.result_bytes();
+        // 64 bits should be enough to avoid collisions.
+        output[.. 8].to_hex()
+    }
+}
+
+fn exported_name_with_opt_suffix<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                           instance: &Instance<'tcx>,
+                                           suffix: Option<&str>)
+                                           -> String {
+    let &Instance { def: mut def_id, ref substs } = instance;
+
+    debug!("exported_name_with_opt_suffix(def_id={:?}, substs={:?}, suffix={:?})",
+           def_id, substs, suffix);
+
+    if let Some(node_id) = ccx.tcx().map.as_local_node_id(def_id) {
+        if let Some(&src_def_id) = ccx.external_srcs().borrow().get(&node_id) {
+            def_id = src_def_id;
+        }
+    }
+
+    let def_path = ccx.tcx().def_path(def_id);
+    assert_eq!(def_path.krate, def_id.krate);
+
+    // We want to compute the "type" of this item. Unfortunately, some
+    // kinds of items (e.g., closures) don't have an entry in the
+    // item-type array. So walk back up the find the closest parent
+    // that DOES have an entry.
+    let mut ty_def_id = def_id;
+    let instance_ty;
+    loop {
+        let key = ccx.tcx().def_key(ty_def_id);
+        match key.disambiguated_data.data {
+            DefPathData::TypeNs(_) |
+            DefPathData::ValueNs(_) => {
+                instance_ty = ccx.tcx().lookup_item_type(ty_def_id);
+                break;
+            }
+            _ => {
+                // if we're making a symbol for something, there ought
+                // to be a value or type-def or something in there
+                // *somewhere*
+                ty_def_id.index = key.parent.unwrap_or_else(|| {
+                    bug!("finding type for {:?}, encountered def-id {:?} with no \
+                         parent", def_id, ty_def_id);
+                });
+            }
+        }
+    }
+
+    // Erase regions because they may not be deterministic when hashed
+    // and should not matter anyhow.
+    let instance_ty = ccx.tcx().erase_regions(&instance_ty.ty);
+
+    let hash = get_symbol_hash(ccx, &def_path, instance_ty, substs.types.as_slice());
+
+    let mut buffer = SymbolPathBuffer {
+        names: Vec::with_capacity(def_path.data.len())
+    };
+    ccx.tcx().push_item_path(&mut buffer, def_id);
+
+    if let Some(suffix) = suffix {
+        buffer.push(suffix);
+    }
+
+    mangle(buffer.names.into_iter(), Some(&hash[..]))
+}
+
+struct SymbolPathBuffer {
+    names: Vec<InternedString>,
+}
+
+impl ItemPathBuffer for SymbolPathBuffer {
+    fn root_mode(&self) -> &RootMode {
+        const ABSOLUTE: &'static RootMode = &RootMode::Absolute;
+        ABSOLUTE
+    }
+
+    fn push(&mut self, text: &str) {
+        self.names.push(token::intern(text).as_str());
+    }
+}
+
+pub fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                               instance: &Instance<'tcx>)
+                               -> String {
+    exported_name_with_opt_suffix(ccx, instance, None)
+}
+
+pub fn exported_name_with_suffix<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                           instance: &Instance<'tcx>,
+                                           suffix: &str)
+                                           -> String {
+   exported_name_with_opt_suffix(ccx, instance, Some(suffix))
+}
+
+/// Only symbols that are invisible outside their compilation unit should use a
+/// name generated by this function.
+pub fn internal_name_from_type_and_suffix<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                                    t: ty::Ty<'tcx>,
+                                                    suffix: &str)
+                                                    -> String {
+    let path = [token::intern(&t.to_string()).as_str(),
+                gensym_name(suffix).as_str()];
+    let def_path = DefPath {
+        data: vec![],
+        krate: cstore::LOCAL_CRATE,
+    };
+    let hash = get_symbol_hash(ccx, &def_path, t, &[]);
+    mangle(path.iter().cloned(), Some(&hash[..]))
+}
+
+// Name sanitation. LLVM will happily accept identifiers with weird names, but
+// gas doesn't!
+// gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $
+pub fn sanitize(s: &str) -> String {
+    let mut result = String::new();
+    for c in s.chars() {
+        match c {
+            // Escape these with $ sequences
+            '@' => result.push_str("$SP$"),
+            '*' => result.push_str("$BP$"),
+            '&' => result.push_str("$RF$"),
+            '<' => result.push_str("$LT$"),
+            '>' => result.push_str("$GT$"),
+            '(' => result.push_str("$LP$"),
+            ')' => result.push_str("$RP$"),
+            ',' => result.push_str("$C$"),
+
+            // '.' doesn't occur in types and functions, so reuse it
+            // for ':' and '-'
+            '-' | ':' => result.push('.'),
+
+            // These are legal symbols
+            'a' ... 'z'
+            | 'A' ... 'Z'
+            | '0' ... '9'
+            | '_' | '.' | '$' => result.push(c),
+
+            _ => {
+                result.push('$');
+                for c in c.escape_unicode().skip(1) {
+                    match c {
+                        '{' => {},
+                        '}' => result.push('$'),
+                        c => result.push(c),
+                    }
+                }
+            }
+        }
+    }
+
+    // Underscore-qualify anything that didn't start as an ident.
+    if !result.is_empty() &&
+        result.as_bytes()[0] != '_' as u8 &&
+        ! (result.as_bytes()[0] as char).is_xid_start() {
+        return format!("_{}", &result[..]);
+    }
+
+    return result;
+}
+
+pub fn mangle<PI: Iterator<Item=InternedString>>(path: PI, hash: Option<&str>) -> String {
+    // Follow C++ namespace-mangling style, see
+    // http://en.wikipedia.org/wiki/Name_mangling for more info.
+    //
+    // It turns out that on OSX you can actually have arbitrary symbols in
+    // function names (at least when given to LLVM), but this is not possible
+    // when using unix's linker. Perhaps one day when we just use a linker from LLVM
+    // we won't need to do this name mangling. The problem with name mangling is
+    // that it seriously limits the available characters. For example we can't
+    // have things like &T in symbol names when one would theoretically
+    // want them for things like impls of traits on that type.
+    //
+    // To be able to work on all platforms and get *some* reasonable output, we
+    // use C++ name-mangling.
+
+    let mut n = String::from("_ZN"); // _Z == Begin name-sequence, N == nested
+
+    fn push(n: &mut String, s: &str) {
+        let sani = sanitize(s);
+        n.push_str(&format!("{}{}", sani.len(), sani));
+    }
+
+    // First, connect each component with <len, name> pairs.
+    for data in path {
+        push(&mut n, &data);
+    }
+
+    if let Some(s) = hash {
+        push(&mut n, s)
+    }
+
+    n.push('E'); // End name-sequence.
+    n
+}
index 92d8b928ef428bd8673dc25179ff32dc0ddcaa38..e6a51eb7c87e994a2f86398f40bff8c54f0f1e76 100644 (file)
@@ -16,7 +16,7 @@ use session::config::{self, OutputType};
 use llvm;
 use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef, ContextRef};
 use llvm::SMDiagnosticRef;
-use trans::{CrateTranslation, ModuleTranslation};
+use {CrateTranslation, ModuleTranslation};
 use util::common::time;
 use util::common::path2cstr;
 use syntax::codemap;
@@ -113,7 +113,7 @@ impl Emitter for SharedEmitter {
     }
 
     fn custom_emit(&mut self, _sp: &errors::RenderSpan, _msg: &str, _lvl: Level) {
-        panic!("SharedEmitter doesn't support custom_emit");
+        bug!("SharedEmitter doesn't support custom_emit");
     }
 }
 
@@ -159,7 +159,7 @@ pub fn create_target_machine(sess: &Session) -> TargetMachineRef {
                                  .cg
                                  .relocation_model));
             sess.abort_if_errors();
-            unreachable!();
+            bug!();
         }
     };
 
@@ -190,7 +190,7 @@ pub fn create_target_machine(sess: &Session) -> TargetMachineRef {
                                  .cg
                                  .code_model));
             sess.abort_if_errors();
-            unreachable!();
+            bug!();
         }
     };
 
diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs
new file mode 100644 (file)
index 0000000..104a74a
--- /dev/null
@@ -0,0 +1,2972 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Translate the completed AST to the LLVM IR.
+//!
+//! Some functions here, such as trans_block and trans_expr, return a value --
+//! the result of the translation to LLVM -- while others, such as trans_fn
+//! and trans_item, are called only for the side effect of adding a
+//! particular definition to the LLVM IR output we're producing.
+//!
+//! Hopefully useful general knowledge about trans:
+//!
+//!   * There's no way to find out the Ty type of a ValueRef.  Doing so
+//!     would be "trying to get the eggs out of an omelette" (credit:
+//!     pcwalton).  You can, instead, find out its TypeRef by calling val_ty,
+//!     but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
+//!     int) and rec(x=int, y=int, z=int) will have the same TypeRef.
+
+#![allow(non_camel_case_types)]
+
+pub use self::ValueOrigin::*;
+
+use super::CrateTranslation;
+use super::ModuleTranslation;
+
+use back::{link, symbol_names};
+use lint;
+use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
+use llvm;
+use rustc::cfg;
+use middle::cstore::CrateStore;
+use rustc::hir::def_id::DefId;
+use rustc::infer;
+use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
+use middle::weak_lang_items;
+use rustc::hir::pat_util::simple_name;
+use rustc::ty::subst::{self, Substs};
+use rustc::traits;
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc::ty::adjustment::CustomCoerceUnsized;
+use rustc::dep_graph::DepNode;
+use rustc::hir::map as hir_map;
+use rustc::util::common::time;
+use rustc::mir::mir_map::MirMap;
+use session::config::{self, NoDebugInfo, FullDebugInfo};
+use session::Session;
+use _match;
+use abi::{self, Abi, FnType};
+use adt;
+use attributes;
+use build::*;
+use builder::{Builder, noname};
+use callee::{Callee, CallArgs, ArgExprs, ArgVals};
+use cleanup::{self, CleanupMethods, DropHint};
+use closure;
+use common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral};
+use collector::{self, TransItem, TransItemState, TransItemCollectionMode};
+use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
+use common::{CrateContext, DropFlagHintsMap, Field, FunctionContext};
+use common::{Result, NodeIdAndSpan, VariantInfo};
+use common::{node_id_type, fulfill_obligation};
+use common::{type_is_immediate, type_is_zero_size, val_ty};
+use common;
+use consts;
+use context::SharedCrateContext;
+use controlflow;
+use datum;
+use debuginfo::{self, DebugLoc, ToDebugLoc};
+use declare;
+use expr;
+use glue;
+use inline;
+use machine;
+use machine::{llalign_of_min, llsize_of, llsize_of_real};
+use meth;
+use mir;
+use monomorphize::{self, Instance};
+use symbol_names_test;
+use tvec;
+use type_::Type;
+use type_of;
+use type_of::*;
+use value::Value;
+use Disr;
+use util::common::indenter;
+use util::sha2::Sha256;
+use util::nodemap::{NodeMap, NodeSet};
+
+use arena::TypedArena;
+use libc::c_uint;
+use std::ffi::{CStr, CString};
+use std::cell::{Cell, RefCell};
+use std::collections::{HashMap, HashSet};
+use std::str;
+use std::{i8, i16, i32, i64};
+use syntax::codemap::{Span, DUMMY_SP};
+use syntax::parse::token::InternedString;
+use syntax::attr::AttrMetaMethods;
+use syntax::attr;
+use rustc::hir::intravisit::{self, Visitor};
+use rustc::hir;
+use syntax::ast;
+
+thread_local! {
+    static TASK_LOCAL_INSN_KEY: RefCell<Option<Vec<&'static str>>> = {
+        RefCell::new(None)
+    }
+}
+
+pub fn with_insn_ctxt<F>(blk: F)
+    where F: FnOnce(&[&'static str])
+{
+    TASK_LOCAL_INSN_KEY.with(move |slot| {
+        slot.borrow().as_ref().map(move |s| blk(s));
+    })
+}
+
+pub fn init_insn_ctxt() {
+    TASK_LOCAL_INSN_KEY.with(|slot| {
+        *slot.borrow_mut() = Some(Vec::new());
+    });
+}
+
+pub struct _InsnCtxt {
+    _cannot_construct_outside_of_this_module: (),
+}
+
+impl Drop for _InsnCtxt {
+    fn drop(&mut self) {
+        TASK_LOCAL_INSN_KEY.with(|slot| {
+            match slot.borrow_mut().as_mut() {
+                Some(ctx) => {
+                    ctx.pop();
+                }
+                None => {}
+            }
+        })
+    }
+}
+
+pub fn push_ctxt(s: &'static str) -> _InsnCtxt {
+    debug!("new InsnCtxt: {}", s);
+    TASK_LOCAL_INSN_KEY.with(|slot| {
+        if let Some(ctx) = slot.borrow_mut().as_mut() {
+            ctx.push(s)
+        }
+    });
+    _InsnCtxt {
+        _cannot_construct_outside_of_this_module: (),
+    }
+}
+
+pub struct StatRecorder<'a, 'tcx: 'a> {
+    ccx: &'a CrateContext<'a, 'tcx>,
+    name: Option<String>,
+    istart: usize,
+}
+
+impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
+    pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> {
+        let istart = ccx.stats().n_llvm_insns.get();
+        StatRecorder {
+            ccx: ccx,
+            name: Some(name),
+            istart: istart,
+        }
+    }
+}
+
+impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
+    fn drop(&mut self) {
+        if self.ccx.sess().trans_stats() {
+            let iend = self.ccx.stats().n_llvm_insns.get();
+            self.ccx
+                .stats()
+                .fn_stats
+                .borrow_mut()
+                .push((self.name.take().unwrap(), iend - self.istart));
+            self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
+            // Reset LLVM insn count to avoid compound costs.
+            self.ccx.stats().n_llvm_insns.set(self.istart);
+        }
+    }
+}
+
+pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind {
+    *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap()
+}
+
+fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId {
+    match bcx.tcx().lang_items.require(it) {
+        Ok(id) => id,
+        Err(s) => {
+            bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s));
+        }
+    }
+}
+
+// The following malloc_raw_dyn* functions allocate a box to contain
+// a given type, but with a potentially dynamic size.
+
+pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                  llty_ptr: Type,
+                                  info_ty: Ty<'tcx>,
+                                  size: ValueRef,
+                                  align: ValueRef,
+                                  debug_loc: DebugLoc)
+                                  -> Result<'blk, 'tcx> {
+    let _icx = push_ctxt("malloc_raw_exchange");
+
+    // Allocate space:
+    let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem);
+    let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().mk_substs(Substs::empty()))
+        .call(bcx, debug_loc, ArgVals(&[size, align]), None);
+
+    Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
+}
+
+
+pub fn bin_op_to_icmp_predicate(op: hir::BinOp_,
+                                signed: bool)
+                                -> llvm::IntPredicate {
+    match op {
+        hir::BiEq => llvm::IntEQ,
+        hir::BiNe => llvm::IntNE,
+        hir::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT },
+        hir::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE },
+        hir::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
+        hir::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
+        op => {
+            bug!("comparison_op_to_icmp_predicate: expected comparison operator, \
+                  found {:?}",
+                 op)
+        }
+    }
+}
+
+pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate {
+    match op {
+        hir::BiEq => llvm::RealOEQ,
+        hir::BiNe => llvm::RealUNE,
+        hir::BiLt => llvm::RealOLT,
+        hir::BiLe => llvm::RealOLE,
+        hir::BiGt => llvm::RealOGT,
+        hir::BiGe => llvm::RealOGE,
+        op => {
+            bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \
+                  found {:?}",
+                 op);
+        }
+    }
+}
+
+pub fn compare_fat_ptrs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                    lhs_addr: ValueRef,
+                                    lhs_extra: ValueRef,
+                                    rhs_addr: ValueRef,
+                                    rhs_extra: ValueRef,
+                                    _t: Ty<'tcx>,
+                                    op: hir::BinOp_,
+                                    debug_loc: DebugLoc)
+                                    -> ValueRef {
+    match op {
+        hir::BiEq => {
+            let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
+            let extra_eq = ICmp(bcx, llvm::IntEQ, lhs_extra, rhs_extra, debug_loc);
+            And(bcx, addr_eq, extra_eq, debug_loc)
+        }
+        hir::BiNe => {
+            let addr_eq = ICmp(bcx, llvm::IntNE, lhs_addr, rhs_addr, debug_loc);
+            let extra_eq = ICmp(bcx, llvm::IntNE, lhs_extra, rhs_extra, debug_loc);
+            Or(bcx, addr_eq, extra_eq, debug_loc)
+        }
+        hir::BiLe | hir::BiLt | hir::BiGe | hir::BiGt => {
+            // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
+            let (op, strict_op) = match op {
+                hir::BiLt => (llvm::IntULT, llvm::IntULT),
+                hir::BiLe => (llvm::IntULE, llvm::IntULT),
+                hir::BiGt => (llvm::IntUGT, llvm::IntUGT),
+                hir::BiGe => (llvm::IntUGE, llvm::IntUGT),
+                _ => bug!(),
+            };
+
+            let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
+            let extra_op = ICmp(bcx, op, lhs_extra, rhs_extra, debug_loc);
+            let addr_eq_extra_op = And(bcx, addr_eq, extra_op, debug_loc);
+
+            let addr_strict = ICmp(bcx, strict_op, lhs_addr, rhs_addr, debug_loc);
+            Or(bcx, addr_strict, addr_eq_extra_op, debug_loc)
+        }
+        _ => {
+            bug!("unexpected fat ptr binop");
+        }
+    }
+}
+
+pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                        lhs: ValueRef,
+                                        rhs: ValueRef,
+                                        t: Ty<'tcx>,
+                                        op: hir::BinOp_,
+                                        debug_loc: DebugLoc)
+                                        -> ValueRef {
+    match t.sty {
+        ty::TyTuple(ref tys) if tys.is_empty() => {
+            // We don't need to do actual comparisons for nil.
+            // () == () holds but () < () does not.
+            match op {
+                hir::BiEq | hir::BiLe | hir::BiGe => return C_bool(bcx.ccx(), true),
+                hir::BiNe | hir::BiLt | hir::BiGt => return C_bool(bcx.ccx(), false),
+                // refinements would be nice
+                _ => bug!("compare_scalar_types: must be a comparison operator"),
+            }
+        }
+        ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyBool | ty::TyUint(_) | ty::TyChar => {
+            ICmp(bcx,
+                 bin_op_to_icmp_predicate(op, false),
+                 lhs,
+                 rhs,
+                 debug_loc)
+        }
+        ty::TyRawPtr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
+            ICmp(bcx,
+                 bin_op_to_icmp_predicate(op, false),
+                 lhs,
+                 rhs,
+                 debug_loc)
+        }
+        ty::TyRawPtr(_) => {
+            let lhs_addr = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_ADDR]));
+            let lhs_extra = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_EXTRA]));
+
+            let rhs_addr = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_ADDR]));
+            let rhs_extra = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_EXTRA]));
+            compare_fat_ptrs(bcx,
+                             lhs_addr,
+                             lhs_extra,
+                             rhs_addr,
+                             rhs_extra,
+                             t,
+                             op,
+                             debug_loc)
+        }
+        ty::TyInt(_) => {
+            ICmp(bcx,
+                 bin_op_to_icmp_predicate(op, true),
+                 lhs,
+                 rhs,
+                 debug_loc)
+        }
+        ty::TyFloat(_) => {
+            FCmp(bcx,
+                 bin_op_to_fcmp_predicate(op),
+                 lhs,
+                 rhs,
+                 debug_loc)
+        }
+        // Should never get here, because t is scalar.
+        _ => bug!("non-scalar type passed to compare_scalar_types"),
+    }
+}
+
+pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                      lhs: ValueRef,
+                                      rhs: ValueRef,
+                                      t: Ty<'tcx>,
+                                      ret_ty: Type,
+                                      op: hir::BinOp_,
+                                      debug_loc: DebugLoc)
+                                      -> ValueRef {
+    let signed = match t.sty {
+        ty::TyFloat(_) => {
+            let cmp = bin_op_to_fcmp_predicate(op);
+            return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty);
+        },
+        ty::TyUint(_) => false,
+        ty::TyInt(_) => true,
+        _ => bug!("compare_simd_types: invalid SIMD type"),
+    };
+
+    let cmp = bin_op_to_icmp_predicate(op, signed);
+    // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
+    // to get the correctly sized type. This will compile to a single instruction
+    // once the IR is converted to assembly if the SIMD instruction is supported
+    // by the target architecture.
+    SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty)
+}
+
+// Iterates through the elements of a structural type.
+pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
+                                         av: ValueRef,
+                                         t: Ty<'tcx>,
+                                         mut f: F)
+                                         -> Block<'blk, 'tcx>
+    where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
+{
+    let _icx = push_ctxt("iter_structural_ty");
+
+    fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
+                                   repr: &adt::Repr<'tcx>,
+                                   av: adt::MaybeSizedValue,
+                                   variant: ty::VariantDef<'tcx>,
+                                   substs: &Substs<'tcx>,
+                                   f: &mut F)
+                                   -> Block<'blk, 'tcx>
+        where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
+    {
+        let _icx = push_ctxt("iter_variant");
+        let tcx = cx.tcx();
+        let mut cx = cx;
+
+        for (i, field) in variant.fields.iter().enumerate() {
+            let arg = monomorphize::field_ty(tcx, substs, field);
+            cx = f(cx,
+                   adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i),
+                   arg);
+        }
+        return cx;
+    }
+
+    let value = if common::type_is_sized(cx.tcx(), t) {
+        adt::MaybeSizedValue::sized(av)
+    } else {
+        let data = Load(cx, expr::get_dataptr(cx, av));
+        let info = Load(cx, expr::get_meta(cx, av));
+        adt::MaybeSizedValue::unsized_(data, info)
+    };
+
+    let mut cx = cx;
+    match t.sty {
+        ty::TyStruct(..) => {
+            let repr = adt::represent_type(cx.ccx(), t);
+            let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
+            for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
+                let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i);
+
+                let val = if common::type_is_sized(cx.tcx(), field_ty) {
+                    llfld_a
+                } else {
+                    let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
+                    Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val));
+                    Store(cx, value.meta, expr::get_meta(cx, scratch.val));
+                    scratch.val
+                };
+                cx = f(cx, val, field_ty);
+            }
+        }
+        ty::TyClosure(_, ref substs) => {
+            let repr = adt::represent_type(cx.ccx(), t);
+            for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
+                let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
+                cx = f(cx, llupvar, upvar_ty);
+            }
+        }
+        ty::TyArray(_, n) => {
+            let (base, len) = tvec::get_fixed_base_and_len(cx, value.value, n);
+            let unit_ty = t.sequence_element_type(cx.tcx());
+            cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
+        }
+        ty::TySlice(_) | ty::TyStr => {
+            let unit_ty = t.sequence_element_type(cx.tcx());
+            cx = tvec::iter_vec_raw(cx, value.value, unit_ty, value.meta, f);
+        }
+        ty::TyTuple(ref args) => {
+            let repr = adt::represent_type(cx.ccx(), t);
+            for (i, arg) in args.iter().enumerate() {
+                let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
+                cx = f(cx, llfld_a, *arg);
+            }
+        }
+        ty::TyEnum(en, substs) => {
+            let fcx = cx.fcx;
+            let ccx = fcx.ccx;
+
+            let repr = adt::represent_type(ccx, t);
+            let n_variants = en.variants.len();
+
+            // NB: we must hit the discriminant first so that structural
+            // comparison know not to proceed when the discriminants differ.
+
+            match adt::trans_switch(cx, &repr, av, false) {
+                (_match::Single, None) => {
+                    if n_variants != 0 {
+                        assert!(n_variants == 1);
+                        cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av),
+                                          &en.variants[0], substs, &mut f);
+                    }
+                }
+                (_match::Switch, Some(lldiscrim_a)) => {
+                    cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
+
+                    // Create a fall-through basic block for the "else" case of
+                    // the switch instruction we're about to generate. Note that
+                    // we do **not** use an Unreachable instruction here, even
+                    // though most of the time this basic block will never be hit.
+                    //
+                    // When an enum is dropped it's contents are currently
+                    // overwritten to DTOR_DONE, which means the discriminant
+                    // could have changed value to something not within the actual
+                    // range of the discriminant. Currently this function is only
+                    // used for drop glue so in this case we just return quickly
+                    // from the outer function, and any other use case will only
+                    // call this for an already-valid enum in which case the `ret
+                    // void` will never be hit.
+                    let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void");
+                    RetVoid(ret_void_cx, DebugLoc::None);
+                    let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
+                    let next_cx = fcx.new_temp_block("enum-iter-next");
+
+                    for variant in &en.variants {
+                        let variant_cx = fcx.new_temp_block(&format!("enum-iter-variant-{}",
+                                                                     &variant.disr_val
+                                                                             .to_string()));
+                        let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val));
+                        AddCase(llswitch, case_val, variant_cx.llbb);
+                        let variant_cx = iter_variant(variant_cx,
+                                                      &repr,
+                                                      value,
+                                                      variant,
+                                                      substs,
+                                                      &mut f);
+                        Br(variant_cx, next_cx.llbb, DebugLoc::None);
+                    }
+                    cx = next_cx;
+                }
+                _ => ccx.sess().unimpl("value from adt::trans_switch in iter_structural_ty"),
+            }
+        }
+        _ => {
+            cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t))
+        }
+    }
+    return cx;
+}
+
+
+/// Retrieve the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit funny. It is intended for use
+/// in an upcast, where the new vtable for an object will be drived
+/// from the old one.
+pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
+                                source: Ty<'tcx>,
+                                target: Ty<'tcx>,
+                                old_info: Option<ValueRef>)
+                                -> ValueRef {
+    let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
+    match (&source.sty, &target.sty) {
+        (&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
+        (&ty::TyTrait(_), &ty::TyTrait(_)) => {
+            // For now, upcasts are limited to changes in marker
+            // traits, and hence never actually require an actual
+            // change to the vtable.
+            old_info.expect("unsized_info: missing old info for trait upcast")
+        }
+        (_, &ty::TyTrait(box ty::TraitTy { ref principal, .. })) => {
+            // Note that we preserve binding levels here:
+            let substs = principal.0.substs.with_self_ty(source).erase_regions();
+            let substs = ccx.tcx().mk_substs(substs);
+            let trait_ref = ty::Binder(ty::TraitRef {
+                def_id: principal.def_id(),
+                substs: substs,
+            });
+            consts::ptrcast(meth::get_vtable(ccx, trait_ref),
+                            Type::vtable_ptr(ccx))
+        }
+        _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
+                                     source,
+                                     target),
+    }
+}
+
+/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
+pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                   src: ValueRef,
+                                   src_ty: Ty<'tcx>,
+                                   dst_ty: Ty<'tcx>)
+                                   -> (ValueRef, ValueRef) {
+    debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
+    match (&src_ty.sty, &dst_ty.sty) {
+        (&ty::TyBox(a), &ty::TyBox(b)) |
+        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
+         &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
+        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
+         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
+        (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
+         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
+            assert!(common::type_is_sized(bcx.tcx(), a));
+            let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to();
+            (PointerCast(bcx, src, ptr_ty),
+             unsized_info(bcx.ccx(), a, b, None))
+        }
+        _ => bug!("unsize_thin_ptr: called on bad types"),
+    }
+}
+
+/// Coerce `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty` and store the result in `dst`
+pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                       src: ValueRef,
+                                       src_ty: Ty<'tcx>,
+                                       dst: ValueRef,
+                                       dst_ty: Ty<'tcx>) {
+    match (&src_ty.sty, &dst_ty.sty) {
+        (&ty::TyBox(..), &ty::TyBox(..)) |
+        (&ty::TyRef(..), &ty::TyRef(..)) |
+        (&ty::TyRef(..), &ty::TyRawPtr(..)) |
+        (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => {
+            let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) {
+                // fat-ptr to fat-ptr unsize preserves the vtable
+                load_fat_ptr(bcx, src, src_ty)
+            } else {
+                let base = load_ty(bcx, src, src_ty);
+                unsize_thin_ptr(bcx, base, src_ty, dst_ty)
+            };
+            store_fat_ptr(bcx, base, info, dst, dst_ty);
+        }
+
+        // This can be extended to enums and tuples in the future.
+        // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
+        (&ty::TyStruct(def_a, _), &ty::TyStruct(def_b, _)) => {
+            assert_eq!(def_a, def_b);
+
+            let src_repr = adt::represent_type(bcx.ccx(), src_ty);
+            let src_fields = match &*src_repr {
+                &adt::Repr::Univariant(ref s, _) => &s.fields,
+                _ => bug!("struct has non-univariant repr"),
+            };
+            let dst_repr = adt::represent_type(bcx.ccx(), dst_ty);
+            let dst_fields = match &*dst_repr {
+                &adt::Repr::Univariant(ref s, _) => &s.fields,
+                _ => bug!("struct has non-univariant repr"),
+            };
+
+            let src = adt::MaybeSizedValue::sized(src);
+            let dst = adt::MaybeSizedValue::sized(dst);
+
+            let iter = src_fields.iter().zip(dst_fields).enumerate();
+            for (i, (src_fty, dst_fty)) in iter {
+                if type_is_zero_size(bcx.ccx(), dst_fty) {
+                    continue;
+                }
+
+                let src_f = adt::trans_field_ptr(bcx, &src_repr, src, Disr(0), i);
+                let dst_f = adt::trans_field_ptr(bcx, &dst_repr, dst, Disr(0), i);
+                if src_fty == dst_fty {
+                    memcpy_ty(bcx, dst_f, src_f, src_fty);
+                } else {
+                    coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty);
+                }
+            }
+        }
+        _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
+                  src_ty,
+                  dst_ty),
+    }
+}
+
+pub fn custom_coerce_unsize_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
+                                             source_ty: Ty<'tcx>,
+                                             target_ty: Ty<'tcx>)
+                                             -> CustomCoerceUnsized {
+    let trait_substs = Substs::new(subst::VecPerParamSpace::new(vec![target_ty],
+                                                                vec![source_ty],
+                                                                Vec::new()),
+                                   subst::VecPerParamSpace::empty());
+
+    let trait_ref = ty::Binder(ty::TraitRef {
+        def_id: ccx.tcx().lang_items.coerce_unsized_trait().unwrap(),
+        substs: ccx.tcx().mk_substs(trait_substs)
+    });
+
+    match fulfill_obligation(ccx, DUMMY_SP, trait_ref) {
+        traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
+            ccx.tcx().custom_coerce_unsized_kind(impl_def_id)
+        }
+        vtable => {
+            bug!("invalid CoerceUnsized vtable: {:?}", vtable);
+        }
+    }
+}
+
+pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+    cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b))
+}
+
+pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+    cast_shift_rhs(op,
+                   lhs,
+                   rhs,
+                   |a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) },
+                   |a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) })
+}
+
+fn cast_shift_rhs<F, G>(op: hir::BinOp_,
+                        lhs: ValueRef,
+                        rhs: ValueRef,
+                        trunc: F,
+                        zext: G)
+                        -> ValueRef
+    where F: FnOnce(ValueRef, Type) -> ValueRef,
+          G: FnOnce(ValueRef, Type) -> ValueRef
+{
+    // Shifts may have any size int on the rhs
+    if op.is_shift() {
+        let mut rhs_llty = val_ty(rhs);
+        let mut lhs_llty = val_ty(lhs);
+        if rhs_llty.kind() == Vector {
+            rhs_llty = rhs_llty.element_type()
+        }
+        if lhs_llty.kind() == Vector {
+            lhs_llty = lhs_llty.element_type()
+        }
+        let rhs_sz = rhs_llty.int_width();
+        let lhs_sz = lhs_llty.int_width();
+        if lhs_sz < rhs_sz {
+            trunc(rhs, lhs_llty)
+        } else if lhs_sz > rhs_sz {
+            // FIXME (#1877: If shifting by negative
+            // values becomes not undefined then this is wrong.
+            zext(rhs, lhs_llty)
+        } else {
+            rhs
+        }
+    } else {
+        rhs
+    }
+}
+
+pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+                                              val_t: Ty<'tcx>)
+                                              -> (Type, u64) {
+    match val_t.sty {
+        ty::TyInt(t) => {
+            let llty = Type::int_from_ty(cx.ccx(), t);
+            let min = match t {
+                ast::IntTy::Is if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
+                ast::IntTy::Is => i64::MIN as u64,
+                ast::IntTy::I8 => i8::MIN as u64,
+                ast::IntTy::I16 => i16::MIN as u64,
+                ast::IntTy::I32 => i32::MIN as u64,
+                ast::IntTy::I64 => i64::MIN as u64,
+            };
+            (llty, min)
+        }
+        _ => bug!(),
+    }
+}
+
+pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+                                             call_info: NodeIdAndSpan,
+                                             divrem: hir::BinOp,
+                                             lhs: ValueRef,
+                                             rhs: ValueRef,
+                                             rhs_t: Ty<'tcx>)
+                                             -> Block<'blk, 'tcx> {
+    let (zero_text, overflow_text) = if divrem.node == hir::BiDiv {
+        ("attempted to divide by zero",
+         "attempted to divide with overflow")
+    } else {
+        ("attempted remainder with a divisor of zero",
+         "attempted remainder with overflow")
+    };
+    let debug_loc = call_info.debug_loc();
+
+    let (is_zero, is_signed) = match rhs_t.sty {
+        ty::TyInt(t) => {
+            let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false);
+            (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true)
+        }
+        ty::TyUint(t) => {
+            let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false);
+            (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false)
+        }
+        ty::TyStruct(def, _) if def.is_simd() => {
+            let mut res = C_bool(cx.ccx(), false);
+            for i in 0..rhs_t.simd_size(cx.tcx()) {
+                res = Or(cx,
+                         res,
+                         IsNull(cx, ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))),
+                         debug_loc);
+            }
+            (res, false)
+        }
+        _ => {
+            bug!("fail-if-zero on unexpected type: {}", rhs_t);
+        }
+    };
+    let bcx = with_cond(cx, is_zero, |bcx| {
+        controlflow::trans_fail(bcx, call_info, InternedString::new(zero_text))
+    });
+
+    // To quote LLVM's documentation for the sdiv instruction:
+    //
+    //      Division by zero leads to undefined behavior. Overflow also leads
+    //      to undefined behavior; this is a rare case, but can occur, for
+    //      example, by doing a 32-bit division of -2147483648 by -1.
+    //
+    // In order to avoid undefined behavior, we perform runtime checks for
+    // signed division/remainder which would trigger overflow. For unsigned
+    // integers, no action beyond checking for zero need be taken.
+    if is_signed {
+        let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t);
+        let minus_one = ICmp(bcx,
+                             llvm::IntEQ,
+                             rhs,
+                             C_integral(llty, !0, false),
+                             debug_loc);
+        with_cond(bcx, minus_one, |bcx| {
+            let is_min = ICmp(bcx,
+                              llvm::IntEQ,
+                              lhs,
+                              C_integral(llty, min, true),
+                              debug_loc);
+            with_cond(bcx, is_min, |bcx| {
+                controlflow::trans_fail(bcx, call_info, InternedString::new(overflow_text))
+            })
+        })
+    } else {
+        bcx
+    }
+}
+
+pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                          llfn: ValueRef,
+                          llargs: &[ValueRef],
+                          debug_loc: DebugLoc)
+                          -> (ValueRef, Block<'blk, 'tcx>) {
+    let _icx = push_ctxt("invoke_");
+    if bcx.unreachable.get() {
+        return (C_null(Type::i8(bcx.ccx())), bcx);
+    }
+
+    match bcx.opt_node_id {
+        None => {
+            debug!("invoke at ???");
+        }
+        Some(id) => {
+            debug!("invoke at {}", bcx.tcx().map.node_to_string(id));
+        }
+    }
+
+    if need_invoke(bcx) {
+        debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb);
+        for &llarg in llargs {
+            debug!("arg: {:?}", Value(llarg));
+        }
+        let normal_bcx = bcx.fcx.new_temp_block("normal-return");
+        let landing_pad = bcx.fcx.get_landing_pad();
+
+        let llresult = Invoke(bcx,
+                              llfn,
+                              &llargs[..],
+                              normal_bcx.llbb,
+                              landing_pad,
+                              debug_loc);
+        return (llresult, normal_bcx);
+    } else {
+        debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb);
+        for &llarg in llargs {
+            debug!("arg: {:?}", Value(llarg));
+        }
+
+        let llresult = Call(bcx, llfn, &llargs[..], debug_loc);
+        return (llresult, bcx);
+    }
+}
+
+/// Returns whether this session's target will use SEH-based unwinding.
+///
+/// This is only true for MSVC targets, and even then the 64-bit MSVC target
+/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
+/// 64-bit MinGW) instead of "full SEH".
+pub fn wants_msvc_seh(sess: &Session) -> bool {
+    sess.target.target.options.is_like_msvc
+}
+
+pub fn avoid_invoke(bcx: Block) -> bool {
+    bcx.sess().no_landing_pads() || bcx.lpad().is_some()
+}
+
+pub fn need_invoke(bcx: Block) -> bool {
+    if avoid_invoke(bcx) {
+        false
+    } else {
+        bcx.fcx.needs_invoke()
+    }
+}
+
+pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef {
+    let _icx = push_ctxt("load_if_immediate");
+    if type_is_immediate(cx.ccx(), t) {
+        return load_ty(cx, v, t);
+    }
+    return v;
+}
+
+/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
+/// differs from the type used for SSA values. Also handles various special cases where the type
+/// gives us better information about what we are loading.
+pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
+    if cx.unreachable.get() {
+        return C_undef(type_of::type_of(cx.ccx(), t));
+    }
+    load_ty_builder(&B(cx), ptr, t)
+}
+
+pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
+    let ccx = b.ccx;
+    if type_is_zero_size(ccx, t) {
+        return C_undef(type_of::type_of(ccx, t));
+    }
+
+    unsafe {
+        let global = llvm::LLVMIsAGlobalVariable(ptr);
+        if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
+            let val = llvm::LLVMGetInitializer(global);
+            if !val.is_null() {
+                if t.is_bool() {
+                    return llvm::LLVMConstTrunc(val, Type::i1(ccx).to_ref());
+                }
+                return val;
+            }
+        }
+    }
+
+    if t.is_bool() {
+        b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False), Type::i1(ccx))
+    } else if t.is_char() {
+        // a char is a Unicode codepoint, and so takes values from 0
+        // to 0x10FFFF inclusive only.
+        b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False)
+    } else if (t.is_region_ptr() || t.is_unique()) &&
+              !common::type_is_fat_ptr(ccx.tcx(), t) {
+        b.load_nonnull(ptr)
+    } else {
+        b.load(ptr)
+    }
+}
+
+/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
+/// differs from the type used for SSA values.
+pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
+    if cx.unreachable.get() {
+        return;
+    }
+
+    debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
+
+    if common::type_is_fat_ptr(cx.tcx(), t) {
+        Store(cx,
+              ExtractValue(cx, v, abi::FAT_PTR_ADDR),
+              expr::get_dataptr(cx, dst));
+        Store(cx,
+              ExtractValue(cx, v, abi::FAT_PTR_EXTRA),
+              expr::get_meta(cx, dst));
+    } else {
+        Store(cx, from_immediate(cx, v), dst);
+    }
+}
+
+pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+                                 data: ValueRef,
+                                 extra: ValueRef,
+                                 dst: ValueRef,
+                                 _ty: Ty<'tcx>) {
+    // FIXME: emit metadata
+    Store(cx, data, expr::get_dataptr(cx, dst));
+    Store(cx, extra, expr::get_meta(cx, dst));
+}
+
+pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+                                src: ValueRef,
+                                _ty: Ty<'tcx>)
+                                -> (ValueRef, ValueRef) {
+    // FIXME: emit metadata
+    (Load(cx, expr::get_dataptr(cx, src)),
+     Load(cx, expr::get_meta(cx, src)))
+}
+
+pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef {
+    if val_ty(val) == Type::i1(bcx.ccx()) {
+        ZExt(bcx, val, Type::i8(bcx.ccx()))
+    } else {
+        val
+    }
+}
+
+pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
+    if ty.is_bool() {
+        Trunc(bcx, val, Type::i1(bcx.ccx()))
+    } else {
+        val
+    }
+}
+
+pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> {
+    debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
+    let _indenter = indenter();
+    let _icx = push_ctxt("init_local");
+    _match::store_local(bcx, local)
+}
+
+pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
+                             llbb: BasicBlockRef)
+                             -> Block<'blk, 'tcx> {
+    common::BlockS::new(llbb, None, fcx)
+}
+
+pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx>
+    where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
+{
+    let _icx = push_ctxt("with_cond");
+
+    if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) {
+        return bcx;
+    }
+
+    let fcx = bcx.fcx;
+    let next_cx = fcx.new_temp_block("next");
+    let cond_cx = fcx.new_temp_block("cond");
+    CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
+    let after_cx = f(cond_cx);
+    if !after_cx.terminated.get() {
+        Br(after_cx, next_cx.llbb, DebugLoc::None);
+    }
+    next_cx
+}
+
+enum Lifetime { Start, End }
+
+// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
+// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
+// and the intrinsic for `lt` and passes them to `emit`, which is in
+// charge of generating code to call the passed intrinsic on whatever
+// block of generated code is targetted for the intrinsic.
+//
+// If LLVM lifetime intrinsic support is disabled (i.e.  optimizations
+// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
+fn core_lifetime_emit<'blk, 'tcx, F>(ccx: &'blk CrateContext<'blk, 'tcx>,
+                                     ptr: ValueRef,
+                                     lt: Lifetime,
+                                     emit: F)
+    where F: FnOnce(&'blk CrateContext<'blk, 'tcx>, machine::llsize, ValueRef)
+{
+    if ccx.sess().opts.optimize == config::OptLevel::No {
+        return;
+    }
+
+    let _icx = push_ctxt(match lt {
+        Lifetime::Start => "lifetime_start",
+        Lifetime::End => "lifetime_end"
+    });
+
+    let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type());
+    if size == 0 {
+        return;
+    }
+
+    let lifetime_intrinsic = ccx.get_intrinsic(match lt {
+        Lifetime::Start => "llvm.lifetime.start",
+        Lifetime::End => "llvm.lifetime.end"
+    });
+    emit(ccx, size, lifetime_intrinsic)
+}
+
+pub fn call_lifetime_start(cx: Block, ptr: ValueRef) {
+    core_lifetime_emit(cx.ccx(), ptr, Lifetime::Start, |ccx, size, lifetime_start| {
+        let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
+        Call(cx,
+             lifetime_start,
+             &[C_u64(ccx, size), ptr],
+             DebugLoc::None);
+    })
+}
+
+pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
+    core_lifetime_emit(cx.ccx(), ptr, Lifetime::End, |ccx, size, lifetime_end| {
+        let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
+        Call(cx,
+             lifetime_end,
+             &[C_u64(ccx, size), ptr],
+             DebugLoc::None);
+    })
+}
+
+// Generates code for resumption of unwind at the end of a landing pad.
+pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) {
+    if !bcx.sess().target.target.options.custom_unwind_resume {
+        Resume(bcx, lpval);
+    } else {
+        let exc_ptr = ExtractValue(bcx, lpval, 0);
+        bcx.fcx.eh_unwind_resume()
+            .call(bcx, DebugLoc::None, ArgVals(&[exc_ptr]), None);
+    }
+}
+
+pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
+                               dst: ValueRef,
+                               src: ValueRef,
+                               n_bytes: ValueRef,
+                               align: u32) {
+    let _icx = push_ctxt("call_memcpy");
+    let ccx = b.ccx;
+    let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
+    let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width);
+    let memcpy = ccx.get_intrinsic(&key);
+    let src_ptr = b.pointercast(src, Type::i8p(ccx));
+    let dst_ptr = b.pointercast(dst, Type::i8p(ccx));
+    let size = b.intcast(n_bytes, ccx.int_type());
+    let align = C_i32(ccx, align as i32);
+    let volatile = C_bool(ccx, false);
+    b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
+}
+
+pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) {
+    let _icx = push_ctxt("memcpy_ty");
+    let ccx = bcx.ccx();
+
+    if type_is_zero_size(ccx, t) || bcx.unreachable.get() {
+        return;
+    }
+
+    if t.is_structural() {
+        let llty = type_of::type_of(ccx, t);
+        let llsz = llsize_of(ccx, llty);
+        let llalign = type_of::align_of(ccx, t);
+        call_memcpy(&B(bcx), dst, src, llsz, llalign as u32);
+    } else if common::type_is_fat_ptr(bcx.tcx(), t) {
+        let (data, extra) = load_fat_ptr(bcx, src, t);
+        store_fat_ptr(bcx, data, extra, dst, t);
+    } else {
+        store_ty(bcx, load_ty(bcx, src, t), dst, t);
+    }
+}
+
+pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
+    if cx.unreachable.get() {
+        return;
+    }
+    let _icx = push_ctxt("drop_done_fill_mem");
+    let bcx = cx;
+    memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
+}
+
+pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
+    if cx.unreachable.get() {
+        return;
+    }
+    let _icx = push_ctxt("init_zero_mem");
+    let bcx = cx;
+    memfill(&B(bcx), llptr, t, 0);
+}
+
+// Always use this function instead of storing a constant byte to the memory
+// in question. e.g. if you store a zero constant, LLVM will drown in vreg
+// allocation for large data structures, and the generated code will be
+// awful. (A telltale sign of this is large quantities of
+// `mov [byte ptr foo],0` in the generated code.)
+fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) {
+    let _icx = push_ctxt("memfill");
+    let ccx = b.ccx;
+    let llty = type_of::type_of(ccx, ty);
+    let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
+    let llzeroval = C_u8(ccx, byte);
+    let size = machine::llsize_of(ccx, llty);
+    let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
+    call_memset(b, llptr, llzeroval, size, align, false);
+}
+
+pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
+                               ptr: ValueRef,
+                               fill_byte: ValueRef,
+                               size: ValueRef,
+                               align: ValueRef,
+                               volatile: bool) {
+    let ccx = b.ccx;
+    let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
+    let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
+    let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
+    let volatile = C_bool(ccx, volatile);
+    b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
+}
+
+
+/// In general, when we create an scratch value in an alloca, the
+/// creator may not know if the block (that initializes the scratch
+/// with the desired value) actually dominates the cleanup associated
+/// with the scratch value.
+///
+/// To deal with this, when we do an alloca (at the *start* of whole
+/// function body), we optionally can also set the associated
+/// dropped-flag state of the alloca to "dropped."
+#[derive(Copy, Clone, Debug)]
+pub enum InitAlloca {
+    /// Indicates that the state should have its associated drop flag
+    /// set to "dropped" at the point of allocation.
+    Dropped,
+    /// Indicates the value of the associated drop flag is irrelevant.
+    /// The embedded string literal is a programmer provided argument
+    /// for why. This is a safeguard forcing compiler devs to
+    /// document; it might be a good idea to also emit this as a
+    /// comment with the alloca itself when emitting LLVM output.ll.
+    Uninit(&'static str),
+}
+
+
+pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                            t: Ty<'tcx>,
+                            name: &str) -> ValueRef {
+    // pnkfelix: I do not know why alloc_ty meets the assumptions for
+    // passing Uninit, but it was never needed (even back when we had
+    // the original boolean `zero` flag on `lvalue_scratch_datum`).
+    alloc_ty_init(bcx, t, InitAlloca::Uninit("all alloc_ty are uninit"), name)
+}
+
+/// This variant of `fn alloc_ty` does not necessarily assume that the
+/// alloca should be created with no initial value. Instead the caller
+/// controls that assumption via the `init` flag.
+///
+/// Note that if the alloca *is* initialized via `init`, then we will
+/// also inject an `llvm.lifetime.start` before that initialization
+/// occurs, and thus callers should not call_lifetime_start
+/// themselves.  But if `init` says "uninitialized", then callers are
+/// in charge of choosing where to call_lifetime_start and
+/// subsequently populate the alloca.
+///
+/// (See related discussion on PR #30823.)
+pub fn alloc_ty_init<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                             t: Ty<'tcx>,
+                             init: InitAlloca,
+                             name: &str) -> ValueRef {
+    let _icx = push_ctxt("alloc_ty");
+    let ccx = bcx.ccx();
+    let ty = type_of::type_of(ccx, t);
+    assert!(!t.has_param_types());
+    match init {
+        InitAlloca::Dropped => alloca_dropped(bcx, t, name),
+        InitAlloca::Uninit(_) => alloca(bcx, ty, name),
+    }
+}
+
+pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef {
+    let _icx = push_ctxt("alloca_dropped");
+    let llty = type_of::type_of(cx.ccx(), ty);
+    if cx.unreachable.get() {
+        unsafe { return llvm::LLVMGetUndef(llty.ptr_to().to_ref()); }
+    }
+    let p = alloca(cx, llty, name);
+    let b = cx.fcx.ccx.builder();
+    b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
+
+    // This is just like `call_lifetime_start` (but latter expects a
+    // Block, which we do not have for `alloca_insert_pt`).
+    core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| {
+        let ptr = b.pointercast(p, Type::i8p(ccx));
+        b.call(lifetime_start, &[C_u64(ccx, size), ptr], None);
+    });
+    memfill(&b, p, ty, adt::DTOR_DONE);
+    p
+}
+
+pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
+    let _icx = push_ctxt("alloca");
+    if cx.unreachable.get() {
+        unsafe {
+            return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
+        }
+    }
+    debuginfo::clear_source_location(cx.fcx);
+    Alloca(cx, ty, name)
+}
+
+pub fn set_value_name(val: ValueRef, name: &str) {
+    unsafe {
+        let name = CString::new(name).unwrap();
+        llvm::LLVMSetValueName(val, name.as_ptr());
+    }
+}
+
+struct FindNestedReturn {
+    found: bool,
+}
+
+impl FindNestedReturn {
+    fn new() -> FindNestedReturn {
+        FindNestedReturn {
+            found: false,
+        }
+    }
+}
+
+impl<'v> Visitor<'v> for FindNestedReturn {
+    fn visit_expr(&mut self, e: &hir::Expr) {
+        match e.node {
+            hir::ExprRet(..) => {
+                self.found = true;
+            }
+            _ => intravisit::walk_expr(self, e),
+        }
+    }
+}
+
+fn build_cfg(tcx: &TyCtxt, id: ast::NodeId) -> (ast::NodeId, Option<cfg::CFG>) {
+    let blk = match tcx.map.find(id) {
+        Some(hir_map::NodeItem(i)) => {
+            match i.node {
+                hir::ItemFn(_, _, _, _, _, ref blk) => {
+                    blk
+                }
+                _ => bug!("unexpected item variant in has_nested_returns"),
+            }
+        }
+        Some(hir_map::NodeTraitItem(trait_item)) => {
+            match trait_item.node {
+                hir::MethodTraitItem(_, Some(ref body)) => body,
+                _ => {
+                    bug!("unexpected variant: trait item other than a provided method in \
+                          has_nested_returns")
+                }
+            }
+        }
+        Some(hir_map::NodeImplItem(impl_item)) => {
+            match impl_item.node {
+                hir::ImplItemKind::Method(_, ref body) => body,
+                _ => {
+                    bug!("unexpected variant: non-method impl item in has_nested_returns")
+                }
+            }
+        }
+        Some(hir_map::NodeExpr(e)) => {
+            match e.node {
+                hir::ExprClosure(_, _, ref blk) => blk,
+                _ => bug!("unexpected expr variant in has_nested_returns"),
+            }
+        }
+        Some(hir_map::NodeVariant(..)) |
+        Some(hir_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None),
+
+        // glue, shims, etc
+        None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None),
+
+        _ => bug!("unexpected variant in has_nested_returns: {}",
+                  tcx.node_path_str(id)),
+    };
+
+    (blk.id, Some(cfg::CFG::new(tcx, blk)))
+}
+
+// Checks for the presence of "nested returns" in a function.
+// Nested returns are when the inner expression of a return expression
+// (the 'expr' in 'return expr') contains a return expression. Only cases
+// where the outer return is actually reachable are considered. Implicit
+// returns from the end of blocks are considered as well.
+//
+// This check is needed to handle the case where the inner expression is
+// part of a larger expression that may have already partially-filled the
+// return slot alloca. This can cause errors related to clean-up due to
+// the clobbering of the existing value in the return slot.
+fn has_nested_returns(tcx: &TyCtxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
+    for index in cfg.graph.depth_traverse(cfg.entry) {
+        let n = cfg.graph.node_data(index);
+        match tcx.map.find(n.id()) {
+            Some(hir_map::NodeExpr(ex)) => {
+                if let hir::ExprRet(Some(ref ret_expr)) = ex.node {
+                    let mut visitor = FindNestedReturn::new();
+                    intravisit::walk_expr(&mut visitor, &ret_expr);
+                    if visitor.found {
+                        return true;
+                    }
+                }
+            }
+            Some(hir_map::NodeBlock(blk)) if blk.id == blk_id => {
+                let mut visitor = FindNestedReturn::new();
+                walk_list!(&mut visitor, visit_expr, &blk.expr);
+                if visitor.found {
+                    return true;
+                }
+            }
+            _ => {}
+        }
+    }
+
+    return false;
+}
+
+impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
+    /// Create a function context for the given function.
+    /// Beware that you must call `fcx.init` or `fcx.bind_args`
+    /// before doing anything with the returned function context.
+    pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>,
+               llfndecl: ValueRef,
+               fn_ty: FnType,
+               def_id: Option<DefId>,
+               param_substs: &'tcx Substs<'tcx>,
+               block_arena: &'blk TypedArena<common::BlockS<'blk, 'tcx>>)
+               -> FunctionContext<'blk, 'tcx> {
+        common::validate_substs(param_substs);
+
+        let inlined_did = def_id.and_then(|def_id| inline::get_local_instance(ccx, def_id));
+        let inlined_id = inlined_did.and_then(|id| ccx.tcx().map.as_local_node_id(id));
+        let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id));
+
+        debug!("FunctionContext::new(path={}, def_id={:?}, param_substs={:?})",
+            inlined_id.map_or(String::new(), |id| ccx.tcx().node_path_str(id)),
+            def_id,
+            param_substs);
+
+        let debug_context = debuginfo::create_function_debug_context(ccx,
+            inlined_id.unwrap_or(ast::DUMMY_NODE_ID), param_substs, llfndecl);
+
+        let cfg = inlined_id.map(|id| build_cfg(ccx.tcx(), id));
+        let nested_returns = if let Some((blk_id, Some(ref cfg))) = cfg {
+            has_nested_returns(ccx.tcx(), cfg, blk_id)
+        } else {
+            false
+        };
+
+        let check_attrs = |attrs: &[ast::Attribute]| {
+            let default_to_mir = ccx.sess().opts.debugging_opts.orbit;
+            let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" };
+            default_to_mir ^ attrs.iter().any(|item| item.check_name(invert))
+        };
+
+        let use_mir = if let Some(id) = local_id {
+            check_attrs(ccx.tcx().map.attrs(id))
+        } else if let Some(def_id) = def_id {
+            check_attrs(&ccx.sess().cstore.item_attrs(def_id))
+        } else {
+            check_attrs(&[])
+        };
+
+        let mir = if use_mir {
+            def_id.and_then(|id| ccx.get_mir(id))
+        } else {
+            None
+        };
+
+        FunctionContext {
+            needs_ret_allocas: nested_returns && mir.is_none(),
+            mir: mir,
+            llfn: llfndecl,
+            llretslotptr: Cell::new(None),
+            param_env: ccx.tcx().empty_parameter_environment(),
+            alloca_insert_pt: Cell::new(None),
+            llreturn: Cell::new(None),
+            landingpad_alloca: Cell::new(None),
+            lllocals: RefCell::new(NodeMap()),
+            llupvars: RefCell::new(NodeMap()),
+            lldropflag_hints: RefCell::new(DropFlagHintsMap::new()),
+            fn_ty: fn_ty,
+            param_substs: param_substs,
+            span: inlined_id.and_then(|id| ccx.tcx().map.opt_span(id)),
+            block_arena: block_arena,
+            lpad_arena: TypedArena::new(),
+            ccx: ccx,
+            debug_context: debug_context,
+            scopes: RefCell::new(Vec::new()),
+            cfg: cfg.and_then(|(_, cfg)| cfg)
+        }
+    }
+
+    /// Performs setup on a newly created function, creating the entry
+    /// scope block and allocating space for the return pointer.
+    pub fn init(&'blk self, skip_retptr: bool, fn_did: Option<DefId>)
+                -> Block<'blk, 'tcx> {
+        let entry_bcx = self.new_temp_block("entry-block");
+
+        // Use a dummy instruction as the insertion point for all allocas.
+        // This is later removed in FunctionContext::cleanup.
+        self.alloca_insert_pt.set(Some(unsafe {
+            Load(entry_bcx, C_null(Type::i8p(self.ccx)));
+            llvm::LLVMGetFirstInstruction(entry_bcx.llbb)
+        }));
+
+        if !self.fn_ty.ret.is_ignore() && !skip_retptr {
+            // We normally allocate the llretslotptr, unless we
+            // have been instructed to skip it for immediate return
+            // values, or there is nothing to return at all.
+
+            // We create an alloca to hold a pointer of type `ret.original_ty`
+            // which will hold the pointer to the right alloca which has the
+            // final ret value
+            let llty = self.fn_ty.ret.memory_ty(self.ccx);
+            let slot = if self.needs_ret_allocas {
+                // Let's create the stack slot
+                let slot = AllocaFcx(self, llty.ptr_to(), "llretslotptr");
+
+                // and if we're using an out pointer, then store that in our newly made slot
+                if self.fn_ty.ret.is_indirect() {
+                    let outptr = get_param(self.llfn, 0);
+
+                    let b = self.ccx.builder();
+                    b.position_before(self.alloca_insert_pt.get().unwrap());
+                    b.store(outptr, slot);
+                }
+
+                slot
+            } else {
+                // But if there are no nested returns, we skip the indirection
+                // and have a single retslot
+                if self.fn_ty.ret.is_indirect() {
+                    get_param(self.llfn, 0)
+                } else {
+                    AllocaFcx(self, llty, "sret_slot")
+                }
+            };
+
+            self.llretslotptr.set(Some(slot));
+        }
+
+        // Create the drop-flag hints for every unfragmented path in the function.
+        let tcx = self.ccx.tcx();
+        let tables = tcx.tables.borrow();
+        let mut hints = self.lldropflag_hints.borrow_mut();
+        let fragment_infos = tcx.fragment_infos.borrow();
+
+        // Intern table for drop-flag hint datums.
+        let mut seen = HashMap::new();
+
+        let fragment_infos = fn_did.and_then(|did| fragment_infos.get(&did));
+        if let Some(fragment_infos) = fragment_infos {
+            for &info in fragment_infos {
+
+                let make_datum = |id| {
+                    let init_val = C_u8(self.ccx, adt::DTOR_NEEDED_HINT);
+                    let llname = &format!("dropflag_hint_{}", id);
+                    debug!("adding hint {}", llname);
+                    let ty = tcx.types.u8;
+                    let ptr = alloc_ty(entry_bcx, ty, llname);
+                    Store(entry_bcx, init_val, ptr);
+                    let flag = datum::Lvalue::new_dropflag_hint("FunctionContext::init");
+                    datum::Datum::new(ptr, ty, flag)
+                };
+
+                let (var, datum) = match info {
+                    ty::FragmentInfo::Moved { var, .. } |
+                    ty::FragmentInfo::Assigned { var, .. } => {
+                        let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| {
+                            let ty = tables.node_types[&var];
+                            if self.type_needs_drop(ty) {
+                                let datum = make_datum(var);
+                                seen.insert(var, Some(datum.clone()));
+                                Some(datum)
+                            } else {
+                                // No drop call needed, so we don't need a dropflag hint
+                                None
+                            }
+                        });
+                        if let Some(datum) = opt_datum {
+                            (var, datum)
+                        } else {
+                            continue
+                        }
+                    }
+                };
+                match info {
+                    ty::FragmentInfo::Moved { move_expr: expr_id, .. } => {
+                        debug!("FragmentInfo::Moved insert drop hint for {}", expr_id);
+                        hints.insert(expr_id, DropHint::new(var, datum));
+                    }
+                    ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => {
+                        debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id);
+                        hints.insert(expr_id, DropHint::new(var, datum));
+                    }
+                }
+            }
+        }
+
+        entry_bcx
+    }
+
+    /// Creates lvalue datums for each of the incoming function arguments,
+    /// matches all argument patterns against them to produce bindings,
+    /// and returns the entry block (see FunctionContext::init).
+    fn bind_args(&'blk self,
+                 args: &[hir::Arg],
+                 abi: Abi,
+                 id: ast::NodeId,
+                 closure_env: closure::ClosureEnv,
+                 arg_scope: cleanup::CustomScopeIndex)
+                 -> Block<'blk, 'tcx> {
+        let _icx = push_ctxt("FunctionContext::bind_args");
+        let fn_did = self.ccx.tcx().map.local_def_id(id);
+        let mut bcx = self.init(false, Some(fn_did));
+        let arg_scope_id = cleanup::CustomScope(arg_scope);
+
+        let mut idx = 0;
+        let mut llarg_idx = self.fn_ty.ret.is_indirect() as usize;
+
+        let has_tupled_arg = match closure_env {
+            closure::ClosureEnv::NotClosure => abi == Abi::RustCall,
+            closure::ClosureEnv::Closure(..) => {
+                closure_env.load(bcx, arg_scope_id);
+                let env_arg = &self.fn_ty.args[idx];
+                idx += 1;
+                if env_arg.pad.is_some() {
+                    llarg_idx += 1;
+                }
+                if !env_arg.is_ignore() {
+                    llarg_idx += 1;
+                }
+                false
+            }
+        };
+        let tupled_arg_id = if has_tupled_arg {
+            args[args.len() - 1].id
+        } else {
+            ast::DUMMY_NODE_ID
+        };
+
+        // Return an array wrapping the ValueRefs that we get from `get_param` for
+        // each argument into datums.
+        //
+        // For certain mode/type combinations, the raw llarg values are passed
+        // by value.  However, within the fn body itself, we want to always
+        // have all locals and arguments be by-ref so that we can cancel the
+        // cleanup and for better interaction with LLVM's debug info.  So, if
+        // the argument would be passed by value, we store it into an alloca.
+        // This alloca should be optimized away by LLVM's mem-to-reg pass in
+        // the event it's not truly needed.
+        let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor");
+        for hir_arg in args {
+            let arg_ty = node_id_type(bcx, hir_arg.id);
+            let arg_datum = if hir_arg.id != tupled_arg_id {
+                let arg = &self.fn_ty.args[idx];
+                idx += 1;
+                if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo {
+                    // Don't copy an indirect argument to an alloca, the caller
+                    // already put it in a temporary alloca and gave it up, unless
+                    // we emit extra-debug-info, which requires local allocas :(.
+                    let llarg = get_param(self.llfn, llarg_idx as c_uint);
+                    llarg_idx += 1;
+                    self.schedule_lifetime_end(arg_scope_id, llarg);
+                    self.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None);
+
+                    datum::Datum::new(llarg,
+                                    arg_ty,
+                                    datum::Lvalue::new("FunctionContext::bind_args"))
+                } else {
+                    unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "",
+                                                                   uninit_reason,
+                                                                   arg_scope_id, |bcx, dst| {
+                        debug!("FunctionContext::bind_args: {:?}: {:?}", hir_arg, arg_ty);
+                        let b = &bcx.build();
+                        if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
+                            let meta = &self.fn_ty.args[idx];
+                            idx += 1;
+                            arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst));
+                            meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst));
+                        } else {
+                            arg.store_fn_arg(b, &mut llarg_idx, dst);
+                        }
+                        bcx
+                    }))
+                }
+            } else {
+                // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
+                let tupled_arg_tys = match arg_ty.sty {
+                    ty::TyTuple(ref tys) => tys,
+                    _ => bug!("last argument of `rust-call` fn isn't a tuple?!")
+                };
+
+                unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx,
+                                                            arg_ty,
+                                                            "tupled_args",
+                                                            uninit_reason,
+                                                            arg_scope_id,
+                                                            |bcx, llval| {
+                    debug!("FunctionContext::bind_args: tupled {:?}: {:?}", hir_arg, arg_ty);
+                    for (j, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
+                        let dst = StructGEP(bcx, llval, j);
+                        let arg = &self.fn_ty.args[idx];
+                        idx += 1;
+                        let b = &bcx.build();
+                        if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) {
+                            let meta = &self.fn_ty.args[idx];
+                            idx += 1;
+                            arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst));
+                            meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst));
+                        } else {
+                            arg.store_fn_arg(b, &mut llarg_idx, dst);
+                        }
+                    }
+                    bcx
+                }))
+            };
+
+            let pat = &hir_arg.pat;
+            bcx = if let Some(name) = simple_name(pat) {
+                // Generate nicer LLVM for the common case of fn a pattern
+                // like `x: T`
+                set_value_name(arg_datum.val, &bcx.name(name));
+                self.lllocals.borrow_mut().insert(pat.id, arg_datum);
+                bcx
+            } else {
+                // General path. Copy out the values that are used in the
+                // pattern.
+                _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id)
+            };
+            debuginfo::create_argument_metadata(bcx, hir_arg);
+        }
+
+        bcx
+    }
+
+    /// Ties up the llstaticallocas -> llloadenv -> lltop edges,
+    /// and builds the return block.
+    pub fn finish(&'blk self, last_bcx: Block<'blk, 'tcx>,
+                  ret_debug_loc: DebugLoc) {
+        let _icx = push_ctxt("FunctionContext::finish");
+
+        let ret_cx = match self.llreturn.get() {
+            Some(llreturn) => {
+                if !last_bcx.terminated.get() {
+                    Br(last_bcx, llreturn, DebugLoc::None);
+                }
+                raw_block(self, llreturn)
+            }
+            None => last_bcx,
+        };
+
+        self.build_return_block(ret_cx, ret_debug_loc);
+
+        debuginfo::clear_source_location(self);
+        self.cleanup();
+    }
+
+    // Builds the return block for a function.
+    pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>,
+                              ret_debug_location: DebugLoc) {
+        if self.llretslotptr.get().is_none() ||
+           ret_cx.unreachable.get() ||
+           (!self.needs_ret_allocas && self.fn_ty.ret.is_indirect()) {
+            return RetVoid(ret_cx, ret_debug_location);
+        }
+
+        let retslot = if self.needs_ret_allocas {
+            Load(ret_cx, self.llretslotptr.get().unwrap())
+        } else {
+            self.llretslotptr.get().unwrap()
+        };
+        let retptr = Value(retslot);
+        let llty = self.fn_ty.ret.original_ty;
+        match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) {
+            // If there's only a single store to the ret slot, we can directly return
+            // the value that was stored and omit the store and the alloca.
+            // However, we only want to do this when there is no cast needed.
+            (Some(s), None) => {
+                let mut retval = s.get_operand(0).unwrap().get();
+                s.erase_from_parent();
+
+                if retptr.has_no_uses() {
+                    retptr.erase_from_parent();
+                }
+
+                if self.fn_ty.ret.is_indirect() {
+                    Store(ret_cx, retval, get_param(self.llfn, 0));
+                    RetVoid(ret_cx, ret_debug_location)
+                } else {
+                    if llty == Type::i1(self.ccx) {
+                        retval = Trunc(ret_cx, retval, llty);
+                    }
+                    Ret(ret_cx, retval, ret_debug_location)
+                }
+            }
+            (_, cast_ty) if self.fn_ty.ret.is_indirect() => {
+                // Otherwise, copy the return value to the ret slot.
+                assert_eq!(cast_ty, None);
+                let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty);
+                let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty);
+                call_memcpy(&B(ret_cx), get_param(self.llfn, 0),
+                            retslot, llsz, llalign as u32);
+                RetVoid(ret_cx, ret_debug_location)
+            }
+            (_, Some(cast_ty)) => {
+                let load = Load(ret_cx, PointerCast(ret_cx, retslot, cast_ty.ptr_to()));
+                let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty);
+                unsafe {
+                    llvm::LLVMSetAlignment(load, llalign);
+                }
+                Ret(ret_cx, load, ret_debug_location)
+            }
+            (_, None) => {
+                let retval = if llty == Type::i1(self.ccx) {
+                    let val = LoadRangeAssert(ret_cx, retslot, 0, 2, llvm::False);
+                    Trunc(ret_cx, val, llty)
+                } else {
+                    Load(ret_cx, retslot)
+                };
+                Ret(ret_cx, retval, ret_debug_location)
+            }
+        }
+    }
+}
+
+/// Builds an LLVM function out of a source function.
+///
+/// If the function closes over its environment a closure will be returned.
+pub fn trans_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                               decl: &hir::FnDecl,
+                               body: &hir::Block,
+                               llfndecl: ValueRef,
+                               param_substs: &'tcx Substs<'tcx>,
+                               def_id: DefId,
+                               inlined_id: ast::NodeId,
+                               fn_ty: FnType,
+                               abi: Abi,
+                               closure_env: closure::ClosureEnv) {
+    ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
+
+    if collector::collecting_debug_information(ccx) {
+        ccx.record_translation_item_as_generated(
+            TransItem::Fn(Instance::new(def_id, param_substs)));
+    }
+
+    let _icx = push_ctxt("trans_closure");
+    attributes::emit_uwtable(llfndecl, true);
+
+    debug!("trans_closure(..., param_substs={:?})", param_substs);
+
+    let (arena, fcx): (TypedArena<_>, FunctionContext);
+    arena = TypedArena::new();
+    fcx = FunctionContext::new(ccx, llfndecl, fn_ty, Some(def_id), param_substs, &arena);
+
+    if fcx.mir.is_some() {
+        return mir::trans_mir(&fcx);
+    }
+
+    // cleanup scope for the incoming arguments
+    let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(
+        ccx, inlined_id, body.span, true);
+    let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
+
+    // Set up arguments to the function.
+    debug!("trans_closure: function: {:?}", Value(fcx.llfn));
+    let bcx = fcx.bind_args(&decl.inputs, abi, inlined_id, closure_env, arg_scope);
+
+    // Up until here, IR instructions for this function have explicitly not been annotated with
+    // source code location, so we don't step into call setup code. From here on, source location
+    // emitting should be enabled.
+    debuginfo::start_emitting_source_locations(&fcx);
+
+    let dest = if fcx.fn_ty.ret.is_ignore() {
+        expr::Ignore
+    } else {
+        expr::SaveIn(fcx.get_ret_slot(bcx, "iret_slot"))
+    };
+
+    // This call to trans_block is the place where we bridge between
+    // translation calls that don't have a return value (trans_crate,
+    // trans_mod, trans_item, et cetera) and those that do
+    // (trans_block, trans_expr, et cetera).
+    let mut bcx = controlflow::trans_block(bcx, body, dest);
+
+    match dest {
+        expr::SaveIn(slot) if fcx.needs_ret_allocas => {
+            Store(bcx, slot, fcx.llretslotptr.get().unwrap());
+        }
+        _ => {}
+    }
+
+    match fcx.llreturn.get() {
+        Some(_) => {
+            Br(bcx, fcx.return_exit_block(), DebugLoc::None);
+            fcx.pop_custom_cleanup_scope(arg_scope);
+        }
+        None => {
+            // Microoptimization writ large: avoid creating a separate
+            // llreturn basic block
+            bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope);
+        }
+    };
+
+    // Put return block after all other blocks.
+    // This somewhat improves single-stepping experience in debugger.
+    unsafe {
+        let llreturn = fcx.llreturn.get();
+        if let Some(llreturn) = llreturn {
+            llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb);
+        }
+    }
+
+    let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id, fn_cleanup_debug_loc.span);
+
+    // Insert the mandatory first few basic blocks before lltop.
+    fcx.finish(bcx, ret_debug_loc);
+}
+
+/// Creates an LLVM function corresponding to a source language function.
+pub fn trans_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                          decl: &hir::FnDecl,
+                          body: &hir::Block,
+                          llfndecl: ValueRef,
+                          param_substs: &'tcx Substs<'tcx>,
+                          id: ast::NodeId) {
+    let _s = StatRecorder::new(ccx, ccx.tcx().node_path_str(id));
+    debug!("trans_fn(param_substs={:?})", param_substs);
+    let _icx = push_ctxt("trans_fn");
+    let fn_ty = ccx.tcx().node_id_to_type(id);
+    let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fn_ty);
+    let sig = ccx.tcx().erase_late_bound_regions(fn_ty.fn_sig());
+    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+    let abi = fn_ty.fn_abi();
+    let fn_ty = FnType::new(ccx, abi, &sig, &[]);
+    let def_id = if let Some(&def_id) = ccx.external_srcs().borrow().get(&id) {
+        def_id
+    } else {
+        ccx.tcx().map.local_def_id(id)
+    };
+    trans_closure(ccx,
+                  decl,
+                  body,
+                  llfndecl,
+                  param_substs,
+                  def_id,
+                  id,
+                  fn_ty,
+                  abi,
+                  closure::ClosureEnv::NotClosure);
+}
+
+pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+                                                 ctor_ty: Ty<'tcx>,
+                                                 disr: Disr,
+                                                 args: CallArgs,
+                                                 dest: expr::Dest,
+                                                 debug_loc: DebugLoc)
+                                                 -> Result<'blk, 'tcx> {
+
+    let ccx = bcx.fcx.ccx;
+
+    let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
+    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+    let result_ty = sig.output.unwrap();
+
+    // Get location to store the result. If the user does not care about
+    // the result, just make a stack slot
+    let llresult = match dest {
+        expr::SaveIn(d) => d,
+        expr::Ignore => {
+            if !type_is_zero_size(ccx, result_ty) {
+                let llresult = alloc_ty(bcx, result_ty, "constructor_result");
+                call_lifetime_start(bcx, llresult);
+                llresult
+            } else {
+                C_undef(type_of::type_of(ccx, result_ty).ptr_to())
+            }
+        }
+    };
+
+    if !type_is_zero_size(ccx, result_ty) {
+        match args {
+            ArgExprs(exprs) => {
+                let fields = exprs.iter().map(|x| &**x).enumerate().collect::<Vec<_>>();
+                bcx = expr::trans_adt(bcx,
+                                      result_ty,
+                                      disr,
+                                      &fields[..],
+                                      None,
+                                      expr::SaveIn(llresult),
+                                      debug_loc);
+            }
+            _ => bug!("expected expr as arguments for variant/struct tuple constructor"),
+        }
+    } else {
+        // Just eval all the expressions (if any). Since expressions in Rust can have arbitrary
+        // contents, there could be side-effects we need from them.
+        match args {
+            ArgExprs(exprs) => {
+                for expr in exprs {
+                    bcx = expr::trans_into(bcx, expr, expr::Ignore);
+                }
+            }
+            _ => (),
+        }
+    }
+
+    // If the caller doesn't care about the result
+    // drop the temporary we made
+    let bcx = match dest {
+        expr::SaveIn(_) => bcx,
+        expr::Ignore => {
+            let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc);
+            if !type_is_zero_size(ccx, result_ty) {
+                call_lifetime_end(bcx, llresult);
+            }
+            bcx
+        }
+    };
+
+    Result::new(bcx, llresult)
+}
+
+pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                 ctor_id: ast::NodeId,
+                                 disr: Disr,
+                                 param_substs: &'tcx Substs<'tcx>,
+                                 llfndecl: ValueRef) {
+    let ctor_ty = ccx.tcx().node_id_to_type(ctor_id);
+    let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
+
+    let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
+    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+    let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
+
+    let (arena, fcx): (TypedArena<_>, FunctionContext);
+    arena = TypedArena::new();
+    fcx = FunctionContext::new(ccx, llfndecl, fn_ty,
+                               Some(ccx.tcx().map.local_def_id(ctor_id)),
+                               param_substs, &arena);
+    let bcx = fcx.init(false, None);
+
+    assert!(!fcx.needs_ret_allocas);
+
+    if !fcx.fn_ty.ret.is_ignore() {
+        let dest = fcx.get_ret_slot(bcx, "eret_slot");
+        let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
+        let repr = adt::represent_type(ccx, sig.output.unwrap());
+        let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
+        let mut arg_idx = 0;
+        for (i, arg_ty) in sig.inputs.into_iter().enumerate() {
+            let lldestptr = adt::trans_field_ptr(bcx, &repr, dest_val, Disr::from(disr), i);
+            let arg = &fcx.fn_ty.args[arg_idx];
+            arg_idx += 1;
+            let b = &bcx.build();
+            if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
+                let meta = &fcx.fn_ty.args[arg_idx];
+                arg_idx += 1;
+                arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, lldestptr));
+                meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, lldestptr));
+            } else {
+                arg.store_fn_arg(b, &mut llarg_idx, lldestptr);
+            }
+        }
+        adt::trans_set_discr(bcx, &repr, dest, disr);
+    }
+
+    fcx.finish(bcx, DebugLoc::None);
+}
+
+fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &hir::EnumDef, sp: Span, id: ast::NodeId) {
+    let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
+
+    let print_info = ccx.sess().print_enum_sizes();
+
+    let levels = ccx.tcx().node_lint_levels.borrow();
+    let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCES);
+    let lvlsrc = levels.get(&(id, lint_id));
+    let is_allow = lvlsrc.map_or(true, |&(lvl, _)| lvl == lint::Allow);
+
+    if is_allow && !print_info {
+        // we're not interested in anything here
+        return;
+    }
+
+    let ty = ccx.tcx().node_id_to_type(id);
+    let avar = adt::represent_type(ccx, ty);
+    match *avar {
+        adt::General(_, ref variants, _) => {
+            for var in variants {
+                let mut size = 0;
+                for field in var.fields.iter().skip(1) {
+                    // skip the discriminant
+                    size += llsize_of_real(ccx, sizing_type_of(ccx, *field));
+                }
+                sizes.push(size);
+            }
+        },
+        _ => { /* its size is either constant or unimportant */ }
+    }
+
+    let (largest, slargest, largest_index) = sizes.iter().enumerate().fold((0, 0, 0),
+        |(l, s, li), (idx, &size)|
+            if size > l {
+                (size, l, idx)
+            } else if size > s {
+                (l, size, li)
+            } else {
+                (l, s, li)
+            }
+    );
+
+    // FIXME(#30505) Should use logging for this.
+    if print_info {
+        let llty = type_of::sizing_type_of(ccx, ty);
+
+        let sess = &ccx.tcx().sess;
+        sess.span_note_without_error(sp,
+                                     &format!("total size: {} bytes", llsize_of_real(ccx, llty)));
+        match *avar {
+            adt::General(..) => {
+                for (i, var) in enum_def.variants.iter().enumerate() {
+                    ccx.tcx()
+                       .sess
+                       .span_note_without_error(var.span,
+                                                &format!("variant data: {} bytes", sizes[i]));
+                }
+            }
+            _ => {}
+        }
+    }
+
+    // we only warn if the largest variant is at least thrice as large as
+    // the second-largest.
+    if !is_allow && largest > slargest * 3 && slargest > 0 {
+        // Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
+        // pass for the latter already ran.
+        lint::raw_struct_lint(&ccx.tcx().sess,
+                              &ccx.tcx().sess.lint_store.borrow(),
+                              lint::builtin::VARIANT_SIZE_DIFFERENCES,
+                              *lvlsrc.unwrap(),
+                              Some(sp),
+                              &format!("enum variant is more than three times larger ({} bytes) \
+                                        than the next largest (ignoring padding)",
+                                       largest))
+            .span_note(enum_def.variants[largest_index].span,
+                       "this variant is the largest")
+            .emit();
+    }
+}
+
+pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
+    // Use the names from src/llvm/docs/LangRef.rst here. Most types are only
+    // applicable to variable declarations and may not really make sense for
+    // Rust code in the first place but whitelist them anyway and trust that
+    // the user knows what s/he's doing. Who knows, unanticipated use cases
+    // may pop up in the future.
+    //
+    // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
+    // and don't have to be, LLVM treats them as no-ops.
+    match name {
+        "appending" => Some(llvm::AppendingLinkage),
+        "available_externally" => Some(llvm::AvailableExternallyLinkage),
+        "common" => Some(llvm::CommonLinkage),
+        "extern_weak" => Some(llvm::ExternalWeakLinkage),
+        "external" => Some(llvm::ExternalLinkage),
+        "internal" => Some(llvm::InternalLinkage),
+        "linkonce" => Some(llvm::LinkOnceAnyLinkage),
+        "linkonce_odr" => Some(llvm::LinkOnceODRLinkage),
+        "private" => Some(llvm::PrivateLinkage),
+        "weak" => Some(llvm::WeakAnyLinkage),
+        "weak_odr" => Some(llvm::WeakODRLinkage),
+        _ => None,
+    }
+}
+
+
+/// Enum describing the origin of an LLVM `Value`, for linkage purposes.
+#[derive(Copy, Clone)]
+pub enum ValueOrigin {
+    /// The LLVM `Value` is in this context because the corresponding item was
+    /// assigned to the current compilation unit.
+    OriginalTranslation,
+    /// The `Value`'s corresponding item was assigned to some other compilation
+    /// unit, but the `Value` was translated in this context anyway because the
+    /// item is marked `#[inline]`.
+    InlinedCopy,
+}
+
+/// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
+/// If the `llval` is the direct translation of a specific Rust item, `id`
+/// should be set to the `NodeId` of that item.  (This mapping should be
+/// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
+/// `None`.)  `llval_origin` indicates whether `llval` is the translation of an
+/// item assigned to `ccx`'s compilation unit or an inlined copy of an item
+/// assigned to a different compilation unit.
+pub fn update_linkage(ccx: &CrateContext,
+                      llval: ValueRef,
+                      id: Option<ast::NodeId>,
+                      llval_origin: ValueOrigin) {
+    match llval_origin {
+        InlinedCopy => {
+            // `llval` is a translation of an item defined in a separate
+            // compilation unit.  This only makes sense if there are at least
+            // two compilation units.
+            assert!(ccx.sess().opts.cg.codegen_units > 1);
+            // `llval` is a copy of something defined elsewhere, so use
+            // `AvailableExternallyLinkage` to avoid duplicating code in the
+            // output.
+            llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage);
+            return;
+        },
+        OriginalTranslation => {},
+    }
+
+    if let Some(id) = id {
+        let item = ccx.tcx().map.get(id);
+        if let hir_map::NodeItem(i) = item {
+            if let Some(name) = attr::first_attr_value_str_by_name(&i.attrs, "linkage") {
+                if let Some(linkage) = llvm_linkage_by_name(&name) {
+                    llvm::SetLinkage(llval, linkage);
+                } else {
+                    ccx.sess().span_fatal(i.span, "invalid linkage specified");
+                }
+                return;
+            }
+        }
+    }
+
+    let (is_reachable, is_generic) = if let Some(id) = id {
+        (ccx.reachable().contains(&id), false)
+    } else {
+        (false, true)
+    };
+
+    // We need external linkage for items reachable from other translation units, this include
+    // other codegen units in case of parallel compilations.
+    if is_reachable || ccx.sess().opts.cg.codegen_units > 1 {
+        if is_generic {
+            // This only happens with multiple codegen units, in which case we need to use weak_odr
+            // linkage because other crates might expose the same symbol. We cannot use
+            // linkonce_odr here because the symbol might then get dropped before the other codegen
+            // units get to link it.
+            llvm::SetUniqueComdat(ccx.llmod(), llval);
+            llvm::SetLinkage(llval, llvm::WeakODRLinkage);
+        } else {
+            llvm::SetLinkage(llval, llvm::ExternalLinkage);
+        }
+    } else {
+        llvm::SetLinkage(llval, llvm::InternalLinkage);
+    }
+}
+
+fn set_global_section(ccx: &CrateContext, llval: ValueRef, i: &hir::Item) {
+    match attr::first_attr_value_str_by_name(&i.attrs, "link_section") {
+        Some(sect) => {
+            if contains_null(&sect) {
+                ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", &sect));
+            }
+            unsafe {
+                let buf = CString::new(sect.as_bytes()).unwrap();
+                llvm::LLVMSetSection(llval, buf.as_ptr());
+            }
+        },
+        None => ()
+    }
+}
+
+pub fn trans_item(ccx: &CrateContext, item: &hir::Item) {
+    let _icx = push_ctxt("trans_item");
+
+    let tcx = ccx.tcx();
+    let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
+
+    match item.node {
+        hir::ItemFn(ref decl, _, _, _, ref generics, ref body) => {
+            if !generics.is_type_parameterized() {
+                let trans_everywhere = attr::requests_inline(&item.attrs);
+                // Ignore `trans_everywhere` for cross-crate inlined items
+                // (`from_external`).  `trans_item` will be called once for each
+                // compilation unit that references the item, so it will still get
+                // translated everywhere it's needed.
+                for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
+                    let def_id = tcx.map.local_def_id(item.id);
+                    let empty_substs = ccx.empty_substs_for_def_id(def_id);
+                    let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val;
+                    trans_fn(ccx, &decl, &body, llfn, empty_substs, item.id);
+                    set_global_section(ccx, llfn, item);
+                    update_linkage(ccx,
+                                   llfn,
+                                   Some(item.id),
+                                   if is_origin {
+                                       OriginalTranslation
+                                   } else {
+                                       InlinedCopy
+                                   });
+
+                    if is_entry_fn(ccx.sess(), item.id) {
+                        create_entry_wrapper(ccx, item.span, llfn);
+                        // check for the #[rustc_error] annotation, which forces an
+                        // error in trans. This is used to write compile-fail tests
+                        // that actually test that compilation succeeds without
+                        // reporting an error.
+                        if tcx.has_attr(def_id, "rustc_error") {
+                            tcx.sess.span_fatal(item.span, "compilation successful");
+                        }
+                    }
+                }
+            }
+        }
+        hir::ItemImpl(_, _, ref generics, _, _, ref impl_items) => {
+            // Both here and below with generic methods, be sure to recurse and look for
+            // items that we need to translate.
+            if !generics.ty_params.is_empty() {
+                return;
+            }
+
+            for impl_item in impl_items {
+                if let hir::ImplItemKind::Method(ref sig, ref body) = impl_item.node {
+                    if sig.generics.ty_params.is_empty() {
+                        let trans_everywhere = attr::requests_inline(&impl_item.attrs);
+                        for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) {
+                            let def_id = tcx.map.local_def_id(impl_item.id);
+                            let empty_substs = ccx.empty_substs_for_def_id(def_id);
+                            let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val;
+                            trans_fn(ccx, &sig.decl, body, llfn, empty_substs, impl_item.id);
+                            update_linkage(ccx, llfn, Some(impl_item.id),
+                                if is_origin {
+                                    OriginalTranslation
+                                } else {
+                                    InlinedCopy
+                                });
+                        }
+                    }
+                }
+            }
+        }
+        hir::ItemEnum(ref enum_definition, ref gens) => {
+            if gens.ty_params.is_empty() {
+                // sizes only make sense for non-generic types
+                enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
+            }
+        }
+        hir::ItemStatic(_, m, ref expr) => {
+            let g = match consts::trans_static(ccx, m, expr, item.id, &item.attrs) {
+                Ok(g) => g,
+                Err(err) => ccx.tcx().sess.span_fatal(expr.span, &err.description()),
+            };
+            set_global_section(ccx, g, item);
+            update_linkage(ccx, g, Some(item.id), OriginalTranslation);
+        }
+        hir::ItemForeignMod(ref m) => {
+            if m.abi == Abi::RustIntrinsic || m.abi == Abi::PlatformIntrinsic {
+                return;
+            }
+            for fi in &m.items {
+                let lname = imported_name(fi.name, &fi.attrs).to_string();
+                ccx.item_symbols().borrow_mut().insert(fi.id, lname);
+            }
+        }
+        _ => {}
+    }
+}
+
+pub fn is_entry_fn(sess: &Session, node_id: ast::NodeId) -> bool {
+    match *sess.entry_fn.borrow() {
+        Some((entry_id, _)) => node_id == entry_id,
+        None => false,
+    }
+}
+
+/// Create the `main` function which will initialise the rust runtime and call users’ main
+/// function.
+pub fn create_entry_wrapper(ccx: &CrateContext, sp: Span, main_llfn: ValueRef) {
+    let et = ccx.sess().entry_type.get().unwrap();
+    match et {
+        config::EntryMain => {
+            create_entry_fn(ccx, sp, main_llfn, true);
+        }
+        config::EntryStart => create_entry_fn(ccx, sp, main_llfn, false),
+        config::EntryNone => {}    // Do nothing.
+    }
+
+    fn create_entry_fn(ccx: &CrateContext,
+                       sp: Span,
+                       rust_main: ValueRef,
+                       use_start_lang_item: bool) {
+        let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()], &ccx.int_type());
+
+        if declare::get_defined_value(ccx, "main").is_some() {
+            // FIXME: We should be smart and show a better diagnostic here.
+            ccx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
+                      .help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
+                      .emit();
+            ccx.sess().abort_if_errors();
+            bug!();
+        }
+        let llfn = declare::declare_cfn(ccx, "main", llfty);
+
+        let llbb = unsafe {
+            llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _)
+        };
+        let bld = ccx.raw_builder();
+        unsafe {
+            llvm::LLVMPositionBuilderAtEnd(bld, llbb);
+
+            debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx);
+
+            let (start_fn, args) = if use_start_lang_item {
+                let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
+                    Ok(id) => id,
+                    Err(s) => ccx.sess().fatal(&s)
+                };
+                let empty_substs = ccx.tcx().mk_substs(Substs::empty());
+                let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx).val;
+                let args = {
+                    let opaque_rust_main =
+                        llvm::LLVMBuildPointerCast(bld,
+                                                   rust_main,
+                                                   Type::i8p(ccx).to_ref(),
+                                                   "rust_main\0".as_ptr() as *const _);
+
+                    vec![opaque_rust_main, get_param(llfn, 0), get_param(llfn, 1)]
+                };
+                (start_fn, args)
+            } else {
+                debug!("using user-defined start fn");
+                let args = vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)];
+
+                (rust_main, args)
+            };
+
+            let result = llvm::LLVMRustBuildCall(bld,
+                                                 start_fn,
+                                                 args.as_ptr(),
+                                                 args.len() as c_uint,
+                                                 0 as *mut _,
+                                                 noname());
+
+            llvm::LLVMBuildRet(bld, result);
+        }
+    }
+}
+
+pub fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                               instance: Instance<'tcx>,
+                               attrs: &[ast::Attribute])
+                               -> String {
+    let id = ccx.tcx().map.as_local_node_id(instance.def).unwrap();
+
+    match ccx.external_srcs().borrow().get(&id) {
+        Some(&did) => {
+            let sym = ccx.sess().cstore.item_symbol(did);
+            debug!("found item {} in other crate...", sym);
+            return sym;
+        }
+        None => {}
+    }
+
+    match attr::find_export_name_attr(ccx.sess().diagnostic(), attrs) {
+        // Use provided name
+        Some(name) => name.to_string(),
+        _ => {
+            if attr::contains_name(attrs, "no_mangle") {
+                // Don't mangle
+                ccx.tcx().map.name(id).as_str().to_string()
+            } else {
+                match weak_lang_items::link_name(attrs) {
+                    Some(name) => name.to_string(),
+                    None => {
+                        // Usual name mangling
+                        symbol_names::exported_name(ccx, &instance)
+                    }
+                }
+            }
+        }
+    }
+}
+
+pub fn imported_name(name: ast::Name, attrs: &[ast::Attribute]) -> InternedString {
+    match attr::first_attr_value_str_by_name(attrs, "link_name") {
+        Some(ln) => ln.clone(),
+        None => match weak_lang_items::link_name(attrs) {
+            Some(name) => name,
+            None => name.as_str(),
+        }
+    }
+}
+
+fn contains_null(s: &str) -> bool {
+    s.bytes().any(|b| b == 0)
+}
+
+pub fn write_metadata<'a, 'tcx>(cx: &SharedCrateContext<'a, 'tcx>,
+                                krate: &hir::Crate,
+                                reachable: &NodeSet,
+                                mir_map: &MirMap<'tcx>)
+                                -> Vec<u8> {
+    use flate;
+
+    let any_library = cx.sess()
+                        .crate_types
+                        .borrow()
+                        .iter()
+                        .any(|ty| *ty != config::CrateTypeExecutable);
+    if !any_library {
+        return Vec::new();
+    }
+
+    let cstore = &cx.tcx().sess.cstore;
+    let metadata = cstore.encode_metadata(cx.tcx(),
+                                          cx.export_map(),
+                                          cx.item_symbols(),
+                                          cx.link_meta(),
+                                          reachable,
+                                          mir_map,
+                                          krate);
+    let mut compressed = cstore.metadata_encoding_version().to_vec();
+    compressed.extend_from_slice(&flate::deflate_bytes(&metadata));
+
+    let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed[..]);
+    let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false);
+    let name = format!("rust_metadata_{}_{}",
+                       cx.link_meta().crate_name,
+                       cx.link_meta().crate_hash);
+    let buf = CString::new(name).unwrap();
+    let llglobal = unsafe {
+        llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf.as_ptr())
+    };
+    unsafe {
+        llvm::LLVMSetInitializer(llglobal, llconst);
+        let name =
+            cx.tcx().sess.cstore.metadata_section_name(&cx.sess().target.target);
+        let name = CString::new(name).unwrap();
+        llvm::LLVMSetSection(llglobal, name.as_ptr())
+    }
+    return metadata;
+}
+
+/// Find any symbols that are defined in one compilation unit, but not declared
+/// in any other compilation unit.  Give these symbols internal linkage.
+fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<&str>) {
+    unsafe {
+        let mut declared = HashSet::new();
+
+        // Collect all external declarations in all compilation units.
+        for ccx in cx.iter() {
+            for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+                let linkage = llvm::LLVMGetLinkage(val);
+                // We only care about external declarations (not definitions)
+                // and available_externally definitions.
+                if !(linkage == llvm::ExternalLinkage as c_uint &&
+                     llvm::LLVMIsDeclaration(val) != 0) &&
+                   !(linkage == llvm::AvailableExternallyLinkage as c_uint) {
+                    continue;
+                }
+
+                let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
+                               .to_bytes()
+                               .to_vec();
+                declared.insert(name);
+            }
+        }
+
+        // Examine each external definition.  If the definition is not used in
+        // any other compilation unit, and is not reachable from other crates,
+        // then give it internal linkage.
+        for ccx in cx.iter() {
+            for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+                let linkage = llvm::LLVMGetLinkage(val);
+                // We only care about external definitions.
+                if !((linkage == llvm::ExternalLinkage as c_uint ||
+                      linkage == llvm::WeakODRLinkage as c_uint) &&
+                     llvm::LLVMIsDeclaration(val) == 0) {
+                    continue;
+                }
+
+                let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
+                               .to_bytes()
+                               .to_vec();
+                if !declared.contains(&name) &&
+                   !reachable.contains(str::from_utf8(&name).unwrap()) {
+                    llvm::SetLinkage(val, llvm::InternalLinkage);
+                    llvm::SetDLLStorageClass(val, llvm::DefaultStorageClass);
+                    llvm::UnsetComdat(val);
+                }
+            }
+        }
+    }
+}
+
+// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
+// This is required to satisfy `dllimport` references to static data in .rlibs
+// when using MSVC linker.  We do this only for data, as linker can fix up
+// code references on its own.
+// See #26591, #27438
+fn create_imps(cx: &SharedCrateContext) {
+    // The x86 ABI seems to require that leading underscores are added to symbol
+    // names, so we need an extra underscore on 32-bit. There's also a leading
+    // '\x01' here which disables LLVM's symbol mangling (e.g. no extra
+    // underscores added in front).
+    let prefix = if cx.sess().target.target.target_pointer_width == "32" {
+        "\x01__imp__"
+    } else {
+        "\x01__imp_"
+    };
+    unsafe {
+        for ccx in cx.iter() {
+            let exported: Vec<_> = iter_globals(ccx.llmod())
+                                       .filter(|&val| {
+                                           llvm::LLVMGetLinkage(val) ==
+                                           llvm::ExternalLinkage as c_uint &&
+                                           llvm::LLVMIsDeclaration(val) == 0
+                                       })
+                                       .collect();
+
+            let i8p_ty = Type::i8p(&ccx);
+            for val in exported {
+                let name = CStr::from_ptr(llvm::LLVMGetValueName(val));
+                let mut imp_name = prefix.as_bytes().to_vec();
+                imp_name.extend(name.to_bytes());
+                let imp_name = CString::new(imp_name).unwrap();
+                let imp = llvm::LLVMAddGlobal(ccx.llmod(),
+                                              i8p_ty.to_ref(),
+                                              imp_name.as_ptr() as *const _);
+                let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref());
+                llvm::LLVMSetInitializer(imp, init);
+                llvm::SetLinkage(imp, llvm::ExternalLinkage);
+            }
+        }
+    }
+}
+
+struct ValueIter {
+    cur: ValueRef,
+    step: unsafe extern "C" fn(ValueRef) -> ValueRef,
+}
+
+impl Iterator for ValueIter {
+    type Item = ValueRef;
+
+    fn next(&mut self) -> Option<ValueRef> {
+        let old = self.cur;
+        if !old.is_null() {
+            self.cur = unsafe { (self.step)(old) };
+            Some(old)
+        } else {
+            None
+        }
+    }
+}
+
+fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter {
+    unsafe {
+        ValueIter {
+            cur: llvm::LLVMGetFirstGlobal(llmod),
+            step: llvm::LLVMGetNextGlobal,
+        }
+    }
+}
+
+fn iter_functions(llmod: llvm::ModuleRef) -> ValueIter {
+    unsafe {
+        ValueIter {
+            cur: llvm::LLVMGetFirstFunction(llmod),
+            step: llvm::LLVMGetNextFunction,
+        }
+    }
+}
+
+/// The context provided lists a set of reachable ids as calculated by
+/// middle::reachable, but this contains far more ids and symbols than we're
+/// actually exposing from the object file. This function will filter the set in
+/// the context to the set of ids which correspond to symbols that are exposed
+/// from the object file being generated.
+///
+/// This list is later used by linkers to determine the set of symbols needed to
+/// be exposed from a dynamic library and it's also encoded into the metadata.
+pub fn filter_reachable_ids(ccx: &SharedCrateContext) -> NodeSet {
+    ccx.reachable().iter().map(|x| *x).filter(|id| {
+        // First, only worry about nodes which have a symbol name
+        ccx.item_symbols().borrow().contains_key(id)
+    }).filter(|&id| {
+        // Next, we want to ignore some FFI functions that are not exposed from
+        // this crate. Reachable FFI functions can be lumped into two
+        // categories:
+        //
+        // 1. Those that are included statically via a static library
+        // 2. Those included otherwise (e.g. dynamically or via a framework)
+        //
+        // Although our LLVM module is not literally emitting code for the
+        // statically included symbols, it's an export of our library which
+        // needs to be passed on to the linker and encoded in the metadata.
+        //
+        // As a result, if this id is an FFI item (foreign item) then we only
+        // let it through if it's included statically.
+        match ccx.tcx().map.get(id) {
+            hir_map::NodeForeignItem(..) => {
+                ccx.sess().cstore.is_statically_included_foreign_item(id)
+            }
+            _ => true,
+        }
+    }).collect()
+}
+
+pub fn trans_crate<'tcx>(tcx: &TyCtxt<'tcx>,
+                         mir_map: &MirMap<'tcx>,
+                         analysis: ty::CrateAnalysis)
+                         -> CrateTranslation {
+    let _task = tcx.dep_graph.in_task(DepNode::TransCrate);
+
+    // Be careful with this krate: obviously it gives access to the
+    // entire contents of the krate. So if you push any subtasks of
+    // `TransCrate`, you need to be careful to register "reads" of the
+    // particular items that will be processed.
+    let krate = tcx.map.krate();
+
+    let ty::CrateAnalysis { export_map, reachable, name, .. } = analysis;
+
+    let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks {
+        v
+    } else {
+        tcx.sess.opts.debug_assertions
+    };
+
+    let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks {
+        v
+    } else {
+        tcx.sess.opts.debug_assertions
+    };
+
+    // Before we touch LLVM, make sure that multithreading is enabled.
+    unsafe {
+        use std::sync::Once;
+        static INIT: Once = Once::new();
+        static mut POISONED: bool = false;
+        INIT.call_once(|| {
+            if llvm::LLVMStartMultithreaded() != 1 {
+                // use an extra bool to make sure that all future usage of LLVM
+                // cannot proceed despite the Once not running more than once.
+                POISONED = true;
+            }
+
+            ::back::write::configure_llvm(&tcx.sess);
+        });
+
+        if POISONED {
+            bug!("couldn't enable multi-threaded LLVM");
+        }
+    }
+
+    let link_meta = link::build_link_meta(&tcx, name);
+
+    let codegen_units = tcx.sess.opts.cg.codegen_units;
+    let shared_ccx = SharedCrateContext::new(&link_meta.crate_name,
+                                             codegen_units,
+                                             tcx,
+                                             &mir_map,
+                                             export_map,
+                                             Sha256::new(),
+                                             link_meta.clone(),
+                                             reachable,
+                                             check_overflow,
+                                             check_dropflag);
+
+    {
+        let ccx = shared_ccx.get_ccx(0);
+        collect_translation_items(&ccx);
+
+        // Translate all items. See `TransModVisitor` for
+        // details on why we walk in this particular way.
+        {
+            let _icx = push_ctxt("text");
+            intravisit::walk_mod(&mut TransItemsWithinModVisitor { ccx: &ccx }, &krate.module);
+            krate.visit_all_items(&mut TransModVisitor { ccx: &ccx });
+        }
+
+        collector::print_collection_results(&ccx);
+
+        symbol_names_test::report_symbol_names(&ccx);
+    }
+
+    for ccx in shared_ccx.iter() {
+        if ccx.sess().opts.debuginfo != NoDebugInfo {
+            debuginfo::finalize(&ccx);
+        }
+        for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() {
+            unsafe {
+                let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
+                llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+                llvm::LLVMDeleteGlobal(old_g);
+            }
+        }
+    }
+
+    let reachable_symbol_ids = filter_reachable_ids(&shared_ccx);
+
+    // Translate the metadata.
+    let metadata = time(tcx.sess.time_passes(), "write metadata", || {
+        write_metadata(&shared_ccx, krate, &reachable_symbol_ids, mir_map)
+    });
+
+    if shared_ccx.sess().trans_stats() {
+        let stats = shared_ccx.stats();
+        println!("--- trans stats ---");
+        println!("n_glues_created: {}", stats.n_glues_created.get());
+        println!("n_null_glues: {}", stats.n_null_glues.get());
+        println!("n_real_glues: {}", stats.n_real_glues.get());
+
+        println!("n_fns: {}", stats.n_fns.get());
+        println!("n_monos: {}", stats.n_monos.get());
+        println!("n_inlines: {}", stats.n_inlines.get());
+        println!("n_closures: {}", stats.n_closures.get());
+        println!("fn stats:");
+        stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
+            insns_b.cmp(&insns_a)
+        });
+        for tuple in stats.fn_stats.borrow().iter() {
+            match *tuple {
+                (ref name, insns) => {
+                    println!("{} insns, {}", insns, *name);
+                }
+            }
+        }
+    }
+    if shared_ccx.sess().count_llvm_insns() {
+        for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
+            println!("{:7} {}", *v, *k);
+        }
+    }
+
+    let modules = shared_ccx.iter()
+        .map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
+        .collect();
+
+    let sess = shared_ccx.sess();
+    let mut reachable_symbols = reachable_symbol_ids.iter().map(|id| {
+        shared_ccx.item_symbols().borrow()[id].to_string()
+    }).collect::<Vec<_>>();
+    if sess.entry_fn.borrow().is_some() {
+        reachable_symbols.push("main".to_string());
+    }
+
+    // For the purposes of LTO, we add to the reachable set all of the upstream
+    // reachable extern fns. These functions are all part of the public ABI of
+    // the final product, so LTO needs to preserve them.
+    if sess.lto() {
+        for cnum in sess.cstore.crates() {
+            let syms = sess.cstore.reachable_ids(cnum);
+            reachable_symbols.extend(syms.into_iter().filter(|did| {
+                sess.cstore.is_extern_item(shared_ccx.tcx(), *did)
+            }).map(|did| {
+                sess.cstore.item_symbol(did)
+            }));
+        }
+    }
+
+    if codegen_units > 1 {
+        internalize_symbols(&shared_ccx,
+                            &reachable_symbols.iter().map(|x| &x[..]).collect());
+    }
+
+    if sess.target.target.options.is_like_msvc &&
+       sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) {
+        create_imps(&shared_ccx);
+    }
+
+    let metadata_module = ModuleTranslation {
+        llcx: shared_ccx.metadata_llcx(),
+        llmod: shared_ccx.metadata_llmod(),
+    };
+    let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
+
+    CrateTranslation {
+        modules: modules,
+        metadata_module: metadata_module,
+        link: link_meta,
+        metadata: metadata,
+        reachable: reachable_symbols,
+        no_builtins: no_builtins,
+    }
+}
+
+/// We visit all the items in the krate and translate them.  We do
+/// this in two walks. The first walk just finds module items. It then
+/// walks the full contents of those module items and translates all
+/// the items within. Note that this entire process is O(n). The
+/// reason for this two phased walk is that each module is
+/// (potentially) placed into a distinct codegen-unit. This walk also
+/// ensures that the immediate contents of each module is processed
+/// entirely before we proceed to find more modules, helping to ensure
+/// an equitable distribution amongst codegen-units.
+pub struct TransModVisitor<'a, 'tcx: 'a> {
+    pub ccx: &'a CrateContext<'a, 'tcx>,
+}
+
+impl<'a, 'tcx, 'v> Visitor<'v> for TransModVisitor<'a, 'tcx> {
+    fn visit_item(&mut self, i: &hir::Item) {
+        match i.node {
+            hir::ItemMod(_) => {
+                let item_ccx = self.ccx.rotate();
+                intravisit::walk_item(&mut TransItemsWithinModVisitor { ccx: &item_ccx }, i);
+            }
+            _ => { }
+        }
+    }
+}
+
+/// Translates all the items within a given module. Expects owner to
+/// invoke `walk_item` on a module item. Ignores nested modules.
+pub struct TransItemsWithinModVisitor<'a, 'tcx: 'a> {
+    pub ccx: &'a CrateContext<'a, 'tcx>,
+}
+
+impl<'a, 'tcx, 'v> Visitor<'v> for TransItemsWithinModVisitor<'a, 'tcx> {
+    fn visit_nested_item(&mut self, item_id: hir::ItemId) {
+        self.visit_item(self.ccx.tcx().map.expect_item(item_id.id));
+    }
+
+    fn visit_item(&mut self, i: &hir::Item) {
+        match i.node {
+            hir::ItemMod(..) => {
+                // skip modules, they will be uncovered by the TransModVisitor
+            }
+            _ => {
+                let def_id = self.ccx.tcx().map.local_def_id(i.id);
+                let tcx = self.ccx.tcx();
+
+                // Create a subtask for trans'ing a particular item. We are
+                // giving `trans_item` access to this item, so also record a read.
+                tcx.dep_graph.with_task(DepNode::TransCrateItem(def_id), || {
+                    tcx.dep_graph.read(DepNode::Hir(def_id));
+
+                    // We are going to be accessing various tables
+                    // generated by TypeckItemBody; we also assume
+                    // that the body passes type check. These tables
+                    // are not individually tracked, so just register
+                    // a read here.
+                    tcx.dep_graph.read(DepNode::TypeckItemBody(def_id));
+
+                    trans_item(self.ccx, i);
+                });
+
+                intravisit::walk_item(self, i);
+            }
+        }
+    }
+}
+
+fn collect_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>) {
+    let time_passes = ccx.sess().time_passes();
+
+    let collection_mode = match ccx.sess().opts.debugging_opts.print_trans_items {
+        Some(ref s) => {
+            let mode_string = s.to_lowercase();
+            let mode_string = mode_string.trim();
+            if mode_string == "eager" {
+                TransItemCollectionMode::Eager
+            } else {
+                if mode_string != "lazy" {
+                    let message = format!("Unknown codegen-item collection mode '{}'. \
+                                           Falling back to 'lazy' mode.",
+                                           mode_string);
+                    ccx.sess().warn(&message);
+                }
+
+                TransItemCollectionMode::Lazy
+            }
+        }
+        None => TransItemCollectionMode::Lazy
+    };
+
+    let items = time(time_passes, "translation item collection", || {
+        collector::collect_crate_translation_items(&ccx, collection_mode)
+    });
+
+    if ccx.sess().opts.debugging_opts.print_trans_items.is_some() {
+        let mut item_keys: Vec<_> = items.iter()
+                                         .map(|i| i.to_string(ccx))
+                                         .collect();
+        item_keys.sort();
+
+        for item in item_keys {
+            println!("TRANS_ITEM {}", item);
+        }
+
+        let mut ccx_map = ccx.translation_items().borrow_mut();
+
+        for cgi in items {
+            ccx_map.insert(cgi, TransItemState::PredictedButNotGenerated);
+        }
+    }
+}
diff --git a/src/librustc_trans/basic_block.rs b/src/librustc_trans/basic_block.rs
new file mode 100644 (file)
index 0000000..60bd3fb
--- /dev/null
@@ -0,0 +1,58 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm;
+use llvm::BasicBlockRef;
+use value::{Users, Value};
+use std::iter::{Filter, Map};
+
+#[derive(Copy, Clone)]
+pub struct BasicBlock(pub BasicBlockRef);
+
+pub type Preds = Map<Filter<Users, fn(&Value) -> bool>, fn(Value) -> BasicBlock>;
+
+/// Wrapper for LLVM BasicBlockRef
+impl BasicBlock {
+    pub fn get(&self) -> BasicBlockRef {
+        let BasicBlock(v) = *self; v
+    }
+
+    pub fn as_value(self) -> Value {
+        unsafe {
+            Value(llvm::LLVMBasicBlockAsValue(self.get()))
+        }
+    }
+
+    pub fn pred_iter(self) -> Preds {
+        fn is_a_terminator_inst(user: &Value) -> bool { user.is_a_terminator_inst() }
+        let is_a_terminator_inst: fn(&Value) -> bool = is_a_terminator_inst;
+
+        fn get_parent(user: Value) -> BasicBlock { user.get_parent().unwrap() }
+        let get_parent: fn(Value) -> BasicBlock = get_parent;
+
+        self.as_value().user_iter()
+            .filter(is_a_terminator_inst)
+            .map(get_parent)
+    }
+
+    pub fn get_single_predecessor(self) -> Option<BasicBlock> {
+        let mut iter = self.pred_iter();
+        match (iter.next(), iter.next()) {
+            (Some(first), None) => Some(first),
+            _ => None
+        }
+    }
+
+    pub fn delete(self) {
+        unsafe {
+            llvm::LLVMDeleteBasicBlock(self.0);
+        }
+    }
+}
diff --git a/src/librustc_trans/build.rs b/src/librustc_trans/build.rs
new file mode 100644 (file)
index 0000000..0185d15
--- /dev/null
@@ -0,0 +1,1167 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)] // FFI wrappers
+#![allow(non_snake_case)]
+
+use llvm;
+use llvm::{AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
+use llvm::{Opcode, IntPredicate, RealPredicate};
+use llvm::{ValueRef, BasicBlockRef};
+use common::*;
+use syntax::codemap::Span;
+
+use builder::Builder;
+use type_::Type;
+use value::Value;
+use debuginfo::DebugLoc;
+
+use libc::{c_uint, c_char};
+
+pub fn terminate(cx: Block, _: &str) {
+    debug!("terminate({})", cx.to_str());
+    cx.terminated.set(true);
+}
+
+pub fn check_not_terminated(cx: Block) {
+    if cx.terminated.get() {
+        bug!("already terminated!");
+    }
+}
+
+pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> {
+    let b = cx.fcx.ccx.builder();
+    b.position_at_end(cx.llbb);
+    b
+}
+
+// The difference between a block being unreachable and being terminated is
+// somewhat obscure, and has to do with error checking. When a block is
+// terminated, we're saying that trying to add any further statements in the
+// block is an error. On the other hand, if something is unreachable, that
+// means that the block was terminated in some way that we don't want to check
+// for (panic/break/return statements, call to diverging functions, etc), and
+// further instructions to the block should simply be ignored.
+
+pub fn RetVoid(cx: Block, debug_loc: DebugLoc) {
+    if cx.unreachable.get() {
+        return;
+    }
+    check_not_terminated(cx);
+    terminate(cx, "RetVoid");
+    debug_loc.apply(cx.fcx);
+    B(cx).ret_void();
+}
+
+pub fn Ret(cx: Block, v: ValueRef, debug_loc: DebugLoc) {
+    if cx.unreachable.get() {
+        return;
+    }
+    check_not_terminated(cx);
+    terminate(cx, "Ret");
+    debug_loc.apply(cx.fcx);
+    B(cx).ret(v);
+}
+
+pub fn AggregateRet(cx: Block,
+                    ret_vals: &[ValueRef],
+                    debug_loc: DebugLoc) {
+    if cx.unreachable.get() {
+        return;
+    }
+    check_not_terminated(cx);
+    terminate(cx, "AggregateRet");
+    debug_loc.apply(cx.fcx);
+    B(cx).aggregate_ret(ret_vals);
+}
+
+pub fn Br(cx: Block, dest: BasicBlockRef, debug_loc: DebugLoc) {
+    if cx.unreachable.get() {
+        return;
+    }
+    check_not_terminated(cx);
+    terminate(cx, "Br");
+    debug_loc.apply(cx.fcx);
+    B(cx).br(dest);
+}
+
+pub fn CondBr(cx: Block,
+              if_: ValueRef,
+              then: BasicBlockRef,
+              else_: BasicBlockRef,
+              debug_loc: DebugLoc) {
+    if cx.unreachable.get() {
+        return;
+    }
+    check_not_terminated(cx);
+    terminate(cx, "CondBr");
+    debug_loc.apply(cx.fcx);
+    B(cx).cond_br(if_, then, else_);
+}
+
+pub fn Switch(cx: Block, v: ValueRef, else_: BasicBlockRef, num_cases: usize)
+    -> ValueRef {
+    if cx.unreachable.get() { return _Undef(v); }
+    check_not_terminated(cx);
+    terminate(cx, "Switch");
+    B(cx).switch(v, else_, num_cases)
+}
+
+pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) {
+    unsafe {
+        if llvm::LLVMIsUndef(s) == llvm::True { return; }
+        llvm::LLVMAddCase(s, on_val, dest);
+    }
+}
+
+pub fn IndirectBr(cx: Block,
+                  addr: ValueRef,
+                  num_dests: usize,
+                  debug_loc: DebugLoc) {
+    if cx.unreachable.get() {
+        return;
+    }
+    check_not_terminated(cx);
+    terminate(cx, "IndirectBr");
+    debug_loc.apply(cx.fcx);
+    B(cx).indirect_br(addr, num_dests);
+}
+
+pub fn Invoke(cx: Block,
+              fn_: ValueRef,
+              args: &[ValueRef],
+              then: BasicBlockRef,
+              catch: BasicBlockRef,
+              debug_loc: DebugLoc)
+              -> ValueRef {
+    if cx.unreachable.get() {
+        return C_null(Type::i8(cx.ccx()));
+    }
+    check_not_terminated(cx);
+    terminate(cx, "Invoke");
+    debug!("Invoke({:?} with arguments ({}))",
+           Value(fn_),
+           args.iter().map(|a| {
+                format!("{:?}", Value(*a))
+           }).collect::<Vec<String>>().join(", "));
+    debug_loc.apply(cx.fcx);
+    let bundle = cx.lpad().and_then(|b| b.bundle());
+    B(cx).invoke(fn_, args, then, catch, bundle)
+}
+
+pub fn Unreachable(cx: Block) {
+    if cx.unreachable.get() {
+        return
+    }
+    cx.unreachable.set(true);
+    if !cx.terminated.get() {
+        B(cx).unreachable();
+    }
+}
+
+pub fn _Undef(val: ValueRef) -> ValueRef {
+    unsafe {
+        return llvm::LLVMGetUndef(val_ty(val).to_ref());
+    }
+}
+
+/* Arithmetic */
+pub fn Add(cx: Block,
+           lhs: ValueRef,
+           rhs: ValueRef,
+           debug_loc: DebugLoc)
+           -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).add(lhs, rhs)
+}
+
+pub fn NSWAdd(cx: Block,
+              lhs: ValueRef,
+              rhs: ValueRef,
+              debug_loc: DebugLoc)
+              -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).nswadd(lhs, rhs)
+}
+
+pub fn NUWAdd(cx: Block,
+              lhs: ValueRef,
+              rhs: ValueRef,
+              debug_loc: DebugLoc)
+              -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).nuwadd(lhs, rhs)
+}
+
+pub fn FAdd(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).fadd(lhs, rhs)
+}
+
+pub fn FAddFast(cx: Block,
+                lhs: ValueRef,
+                rhs: ValueRef,
+                debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).fadd_fast(lhs, rhs)
+}
+
+pub fn Sub(cx: Block,
+           lhs: ValueRef,
+           rhs: ValueRef,
+           debug_loc: DebugLoc)
+           -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).sub(lhs, rhs)
+}
+
+pub fn NSWSub(cx: Block,
+              lhs: ValueRef,
+              rhs: ValueRef,
+              debug_loc: DebugLoc)
+              -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).nswsub(lhs, rhs)
+}
+
+pub fn NUWSub(cx: Block,
+              lhs: ValueRef,
+              rhs: ValueRef,
+              debug_loc: DebugLoc)
+              -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).nuwsub(lhs, rhs)
+}
+
+pub fn FSub(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).fsub(lhs, rhs)
+}
+
+pub fn FSubFast(cx: Block,
+                lhs: ValueRef,
+                rhs: ValueRef,
+                debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).fsub_fast(lhs, rhs)
+}
+
+pub fn Mul(cx: Block,
+           lhs: ValueRef,
+           rhs: ValueRef,
+           debug_loc: DebugLoc)
+           -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).mul(lhs, rhs)
+}
+
+pub fn NSWMul(cx: Block,
+              lhs: ValueRef,
+              rhs: ValueRef,
+              debug_loc: DebugLoc)
+              -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).nswmul(lhs, rhs)
+}
+
+pub fn NUWMul(cx: Block,
+              lhs: ValueRef,
+              rhs: ValueRef,
+              debug_loc: DebugLoc)
+              -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).nuwmul(lhs, rhs)
+}
+
+pub fn FMul(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).fmul(lhs, rhs)
+}
+
+pub fn FMulFast(cx: Block,
+                lhs: ValueRef,
+                rhs: ValueRef,
+                debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).fmul_fast(lhs, rhs)
+}
+
+pub fn UDiv(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).udiv(lhs, rhs)
+}
+
+pub fn SDiv(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).sdiv(lhs, rhs)
+}
+
+pub fn ExactSDiv(cx: Block,
+                 lhs: ValueRef,
+                 rhs: ValueRef,
+                 debug_loc: DebugLoc)
+                 -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).exactsdiv(lhs, rhs)
+}
+
+pub fn FDiv(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).fdiv(lhs, rhs)
+}
+
+pub fn FDivFast(cx: Block,
+                lhs: ValueRef,
+                rhs: ValueRef,
+                debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).fdiv_fast(lhs, rhs)
+}
+
+pub fn URem(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).urem(lhs, rhs)
+}
+
+pub fn SRem(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).srem(lhs, rhs)
+}
+
+pub fn FRem(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).frem(lhs, rhs)
+}
+
+pub fn FRemFast(cx: Block,
+                lhs: ValueRef,
+                rhs: ValueRef,
+                debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).frem_fast(lhs, rhs)
+}
+
+pub fn Shl(cx: Block,
+           lhs: ValueRef,
+           rhs: ValueRef,
+           debug_loc: DebugLoc)
+           -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).shl(lhs, rhs)
+}
+
+pub fn LShr(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).lshr(lhs, rhs)
+}
+
+pub fn AShr(cx: Block,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).ashr(lhs, rhs)
+}
+
+pub fn And(cx: Block,
+           lhs: ValueRef,
+           rhs: ValueRef,
+           debug_loc: DebugLoc)
+           -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).and(lhs, rhs)
+}
+
+pub fn Or(cx: Block,
+          lhs: ValueRef,
+          rhs: ValueRef,
+          debug_loc: DebugLoc)
+          -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).or(lhs, rhs)
+}
+
+pub fn Xor(cx: Block,
+           lhs: ValueRef,
+           rhs: ValueRef,
+           debug_loc: DebugLoc)
+           -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).xor(lhs, rhs)
+}
+
+pub fn BinOp(cx: Block,
+             op: Opcode,
+             lhs: ValueRef,
+             rhs: ValueRef,
+             debug_loc: DebugLoc)
+          -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(lhs);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).binop(op, lhs, rhs)
+}
+
+pub fn Neg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(v);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).neg(v)
+}
+
+pub fn NSWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(v);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).nswneg(v)
+}
+
+pub fn NUWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(v);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).nuwneg(v)
+}
+pub fn FNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(v);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).fneg(v)
+}
+
+pub fn Not(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+    if cx.unreachable.get() {
+        return _Undef(v);
+    }
+    debug_loc.apply(cx.fcx);
+    B(cx).not(v)
+}
+
+pub fn Alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); }
+        AllocaFcx(cx.fcx, ty, name)
+    }
+}
+
+pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef {
+    let b = fcx.ccx.builder();
+    b.position_before(fcx.alloca_insert_pt.get().unwrap());
+    DebugLoc::None.apply(fcx);
+    b.alloca(ty, name)
+}
+
+pub fn Free(cx: Block, pointer_val: ValueRef) {
+    if cx.unreachable.get() { return; }
+    B(cx).free(pointer_val)
+}
+
+pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef {
+    unsafe {
+        let ccx = cx.fcx.ccx;
+        if cx.unreachable.get() {
+            let ty = val_ty(pointer_val);
+            let eltty = if ty.kind() == llvm::Array {
+                ty.element_type()
+            } else {
+                ccx.int_type()
+            };
+            return llvm::LLVMGetUndef(eltty.to_ref());
+        }
+        B(cx).load(pointer_val)
+    }
+}
+
+pub fn VolatileLoad(cx: Block, pointer_val: ValueRef) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
+        }
+        B(cx).volatile_load(pointer_val)
+    }
+}
+
+pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef {
+    unsafe {
+        let ccx = cx.fcx.ccx;
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(ccx.int_type().to_ref());
+        }
+        B(cx).atomic_load(pointer_val, order)
+    }
+}
+
+
+pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: u64,
+                       hi: u64, signed: llvm::Bool) -> ValueRef {
+    if cx.unreachable.get() {
+        let ccx = cx.fcx.ccx;
+        let ty = val_ty(pointer_val);
+        let eltty = if ty.kind() == llvm::Array {
+            ty.element_type()
+        } else {
+            ccx.int_type()
+        };
+        unsafe {
+            llvm::LLVMGetUndef(eltty.to_ref())
+        }
+    } else {
+        B(cx).load_range_assert(pointer_val, lo, hi, signed)
+    }
+}
+
+pub fn LoadNonNull(cx: Block, ptr: ValueRef) -> ValueRef {
+    if cx.unreachable.get() {
+        let ccx = cx.fcx.ccx;
+        let ty = val_ty(ptr);
+        let eltty = if ty.kind() == llvm::Array {
+            ty.element_type()
+        } else {
+            ccx.int_type()
+        };
+        unsafe {
+            llvm::LLVMGetUndef(eltty.to_ref())
+        }
+    } else {
+        B(cx).load_nonnull(ptr)
+    }
+}
+
+pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef {
+    if cx.unreachable.get() { return C_nil(cx.ccx()); }
+    B(cx).store(val, ptr)
+}
+
+pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef {
+    if cx.unreachable.get() { return C_nil(cx.ccx()); }
+    B(cx).volatile_store(val, ptr)
+}
+
+pub fn AtomicStore(cx: Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
+    if cx.unreachable.get() { return; }
+    B(cx).atomic_store(val, ptr, order)
+}
+
+pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
+        }
+        B(cx).gep(pointer, indices)
+    }
+}
+
+// Simple wrapper around GEP that takes an array of ints and wraps them
+// in C_i32()
+#[inline]
+pub fn GEPi(cx: Block, base: ValueRef, ixs: &[usize]) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
+        }
+        B(cx).gepi(base, ixs)
+    }
+}
+
+pub fn InBoundsGEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
+        }
+        B(cx).inbounds_gep(pointer, indices)
+    }
+}
+
+pub fn StructGEP(cx: Block, pointer: ValueRef, idx: usize) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
+        }
+        B(cx).struct_gep(pointer, idx)
+    }
+}
+
+pub fn GlobalString(cx: Block, _str: *const c_char) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
+        }
+        B(cx).global_string(_str)
+    }
+}
+
+pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
+        }
+        B(cx).global_string_ptr(_str)
+    }
+}
+
+/* Casts */
+pub fn Trunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).trunc(val, dest_ty)
+    }
+}
+
+pub fn ZExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).zext(val, dest_ty)
+    }
+}
+
+pub fn SExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).sext(val, dest_ty)
+    }
+}
+
+pub fn FPToUI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).fptoui(val, dest_ty)
+    }
+}
+
+pub fn FPToSI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).fptosi(val, dest_ty)
+    }
+}
+
+pub fn UIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).uitofp(val, dest_ty)
+    }
+}
+
+pub fn SIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).sitofp(val, dest_ty)
+    }
+}
+
+pub fn FPTrunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).fptrunc(val, dest_ty)
+    }
+}
+
+pub fn FPExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).fpext(val, dest_ty)
+    }
+}
+
+pub fn PtrToInt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).ptrtoint(val, dest_ty)
+    }
+}
+
+pub fn IntToPtr(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).inttoptr(val, dest_ty)
+    }
+}
+
+pub fn BitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).bitcast(val, dest_ty)
+    }
+}
+
+pub fn ZExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).zext_or_bitcast(val, dest_ty)
+    }
+}
+
+pub fn SExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).sext_or_bitcast(val, dest_ty)
+    }
+}
+
+pub fn TruncOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).trunc_or_bitcast(val, dest_ty)
+    }
+}
+
+pub fn Cast(cx: Block, op: Opcode, val: ValueRef, dest_ty: Type,
+            _: *const u8)
+     -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).cast(op, val, dest_ty)
+    }
+}
+
+pub fn PointerCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).pointercast(val, dest_ty)
+    }
+}
+
+pub fn IntCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).intcast(val, dest_ty)
+    }
+}
+
+pub fn FPCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
+        B(cx).fpcast(val, dest_ty)
+    }
+}
+
+
+/* Comparisons */
+pub fn ICmp(cx: Block,
+            op: IntPredicate,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
+        }
+        debug_loc.apply(cx.fcx);
+        B(cx).icmp(op, lhs, rhs)
+    }
+}
+
+pub fn FCmp(cx: Block,
+            op: RealPredicate,
+            lhs: ValueRef,
+            rhs: ValueRef,
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
+        }
+        debug_loc.apply(cx.fcx);
+        B(cx).fcmp(op, lhs, rhs)
+    }
+}
+
+/* Miscellaneous instructions */
+pub fn EmptyPhi(cx: Block, ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
+        B(cx).empty_phi(ty)
+    }
+}
+
+pub fn Phi(cx: Block, ty: Type, vals: &[ValueRef],
+           bbs: &[BasicBlockRef]) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
+        B(cx).phi(ty, vals, bbs)
+    }
+}
+
+pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
+    unsafe {
+        if llvm::LLVMIsUndef(phi) == llvm::True { return; }
+        llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
+    }
+}
+
+pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef {
+    unsafe {
+        let ccx = cx.fcx.ccx;
+        let ty = val_ty(fn_);
+        let retty = if ty.kind() == llvm::Function {
+            ty.return_type()
+        } else {
+            ccx.int_type()
+        };
+        B(cx).count_insn("ret_undef");
+        llvm::LLVMGetUndef(retty.to_ref())
+    }
+}
+
+pub fn add_span_comment(cx: Block, sp: Span, text: &str) {
+    B(cx).add_span_comment(sp, text)
+}
+
+pub fn add_comment(cx: Block, text: &str) {
+    B(cx).add_comment(text)
+}
+
+pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char,
+                     inputs: &[ValueRef], output: Type,
+                     volatile: bool, alignstack: bool,
+                     dia: AsmDialect) -> ValueRef {
+    B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia)
+}
+
+pub fn Call(cx: Block,
+            fn_: ValueRef,
+            args: &[ValueRef],
+            debug_loc: DebugLoc)
+            -> ValueRef {
+    if cx.unreachable.get() {
+        return _UndefReturn(cx, fn_);
+    }
+    debug_loc.apply(cx.fcx);
+    let bundle = cx.lpad.get().and_then(|b| b.bundle());
+    B(cx).call(fn_, args, bundle)
+}
+
+pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) {
+    if cx.unreachable.get() { return; }
+    B(cx).atomic_fence(order, scope)
+}
+
+pub fn Select(cx: Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef {
+    if cx.unreachable.get() { return _Undef(then); }
+    B(cx).select(if_, then, else_)
+}
+
+pub fn VAArg(cx: Block, list: ValueRef, ty: Type) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
+        B(cx).va_arg(list, ty)
+    }
+}
+
+pub fn ExtractElement(cx: Block, vec_val: ValueRef, index: ValueRef) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
+        }
+        B(cx).extract_element(vec_val, index)
+    }
+}
+
+pub fn InsertElement(cx: Block, vec_val: ValueRef, elt_val: ValueRef,
+                     index: ValueRef) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
+        }
+        B(cx).insert_element(vec_val, elt_val, index)
+    }
+}
+
+pub fn ShuffleVector(cx: Block, v1: ValueRef, v2: ValueRef,
+                     mask: ValueRef) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
+        }
+        B(cx).shuffle_vector(v1, v2, mask)
+    }
+}
+
+pub fn VectorSplat(cx: Block, num_elts: usize, elt_val: ValueRef) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
+        }
+        B(cx).vector_splat(num_elts, elt_val)
+    }
+}
+
+pub fn ExtractValue(cx: Block, agg_val: ValueRef, index: usize) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
+        }
+        B(cx).extract_value(agg_val, index)
+    }
+}
+
+pub fn InsertValue(cx: Block, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
+        }
+        B(cx).insert_value(agg_val, elt_val, index)
+    }
+}
+
+pub fn IsNull(cx: Block, val: ValueRef) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
+        }
+        B(cx).is_null(val)
+    }
+}
+
+pub fn IsNotNull(cx: Block, val: ValueRef) -> ValueRef {
+    unsafe {
+        if cx.unreachable.get() {
+            return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
+        }
+        B(cx).is_not_null(val)
+    }
+}
+
+pub fn PtrDiff(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+    unsafe {
+        let ccx = cx.fcx.ccx;
+        if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); }
+        B(cx).ptrdiff(lhs, rhs)
+    }
+}
+
+pub fn Trap(cx: Block) {
+    if cx.unreachable.get() { return; }
+    B(cx).trap();
+}
+
+pub fn LandingPad(cx: Block, ty: Type, pers_fn: ValueRef,
+                  num_clauses: usize) -> ValueRef {
+    check_not_terminated(cx);
+    assert!(!cx.unreachable.get());
+    B(cx).landing_pad(ty, pers_fn, num_clauses, cx.fcx.llfn)
+}
+
+pub fn AddClause(cx: Block, landing_pad: ValueRef, clause: ValueRef) {
+    B(cx).add_clause(landing_pad, clause)
+}
+
+pub fn SetCleanup(cx: Block, landing_pad: ValueRef) {
+    B(cx).set_cleanup(landing_pad)
+}
+
+pub fn SetPersonalityFn(cx: Block, f: ValueRef) {
+    B(cx).set_personality_fn(f)
+}
+
+pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef {
+    check_not_terminated(cx);
+    terminate(cx, "Resume");
+    B(cx).resume(exn)
+}
+
+// Atomic Operations
+pub fn AtomicCmpXchg(cx: Block, dst: ValueRef,
+                     cmp: ValueRef, src: ValueRef,
+                     order: AtomicOrdering,
+                     failure_order: AtomicOrdering,
+                     weak: llvm::Bool) -> ValueRef {
+    B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order, weak)
+}
+pub fn AtomicRMW(cx: Block, op: AtomicBinOp,
+                 dst: ValueRef, src: ValueRef,
+                 order: AtomicOrdering) -> ValueRef {
+    B(cx).atomic_rmw(op, dst, src, order)
+}
+
+pub fn CleanupPad(cx: Block,
+                  parent: Option<ValueRef>,
+                  args: &[ValueRef]) -> ValueRef {
+    check_not_terminated(cx);
+    assert!(!cx.unreachable.get());
+    B(cx).cleanup_pad(parent, args)
+}
+
+pub fn CleanupRet(cx: Block,
+                  cleanup: ValueRef,
+                  unwind: Option<BasicBlockRef>) -> ValueRef {
+    check_not_terminated(cx);
+    terminate(cx, "CleanupRet");
+    B(cx).cleanup_ret(cleanup, unwind)
+}
+
+pub fn CatchPad(cx: Block,
+                parent: ValueRef,
+                args: &[ValueRef]) -> ValueRef {
+    check_not_terminated(cx);
+    assert!(!cx.unreachable.get());
+    B(cx).catch_pad(parent, args)
+}
+
+pub fn CatchRet(cx: Block, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef {
+    check_not_terminated(cx);
+    terminate(cx, "CatchRet");
+    B(cx).catch_ret(pad, unwind)
+}
+
+pub fn CatchSwitch(cx: Block,
+                   parent: Option<ValueRef>,
+                   unwind: Option<BasicBlockRef>,
+                   num_handlers: usize) -> ValueRef {
+    check_not_terminated(cx);
+    terminate(cx, "CatchSwitch");
+    B(cx).catch_switch(parent, unwind, num_handlers)
+}
+
+pub fn AddHandler(cx: Block, catch_switch: ValueRef, handler: BasicBlockRef) {
+    B(cx).add_handler(catch_switch, handler)
+}
diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs
new file mode 100644 (file)
index 0000000..92fb342
--- /dev/null
@@ -0,0 +1,1123 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)] // FFI wrappers
+
+use llvm;
+use llvm::{AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
+use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef};
+use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
+use base;
+use common::*;
+use machine::llalign_of_pref;
+use type_::Type;
+use value::Value;
+use util::nodemap::FnvHashMap;
+use libc::{c_uint, c_char};
+
+use std::ffi::CString;
+use std::ptr;
+use syntax::codemap::Span;
+
+pub struct Builder<'a, 'tcx: 'a> {
+    pub llbuilder: BuilderRef,
+    pub ccx: &'a CrateContext<'a, 'tcx>,
+}
+
+// This is a really awful way to get a zero-length c-string, but better (and a
+// lot more efficient) than doing str::as_c_str("", ...) every time.
+pub fn noname() -> *const c_char {
+    static CNULL: c_char = 0;
+    &CNULL
+}
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+    pub fn new(ccx: &'a CrateContext<'a, 'tcx>) -> Builder<'a, 'tcx> {
+        Builder {
+            llbuilder: ccx.raw_builder(),
+            ccx: ccx,
+        }
+    }
+
+    pub fn count_insn(&self, category: &str) {
+        if self.ccx.sess().trans_stats() {
+            self.ccx.stats().n_llvm_insns.set(self.ccx
+                                                .stats()
+                                                .n_llvm_insns
+                                                .get() + 1);
+        }
+        self.ccx.count_llvm_insn();
+        if self.ccx.sess().count_llvm_insns() {
+            base::with_insn_ctxt(|v| {
+                let mut h = self.ccx.stats().llvm_insns.borrow_mut();
+
+                // Build version of path with cycles removed.
+
+                // Pass 1: scan table mapping str -> rightmost pos.
+                let mut mm = FnvHashMap();
+                let len = v.len();
+                let mut i = 0;
+                while i < len {
+                    mm.insert(v[i], i);
+                    i += 1;
+                }
+
+                // Pass 2: concat strings for each elt, skipping
+                // forwards over any cycles by advancing to rightmost
+                // occurrence of each element in path.
+                let mut s = String::from(".");
+                i = 0;
+                while i < len {
+                    i = mm[v[i]];
+                    s.push('/');
+                    s.push_str(v[i]);
+                    i += 1;
+                }
+
+                s.push('/');
+                s.push_str(category);
+
+                let n = match h.get(&s) {
+                    Some(&n) => n,
+                    _ => 0
+                };
+                h.insert(s, n+1);
+            })
+        }
+    }
+
+    pub fn position_before(&self, insn: ValueRef) {
+        unsafe {
+            llvm::LLVMPositionBuilderBefore(self.llbuilder, insn);
+        }
+    }
+
+    pub fn position_at_end(&self, llbb: BasicBlockRef) {
+        unsafe {
+            llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
+        }
+    }
+
+    pub fn position_at_start(&self, llbb: BasicBlockRef) {
+        unsafe {
+            llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
+        }
+    }
+
+    pub fn ret_void(&self) {
+        self.count_insn("retvoid");
+        unsafe {
+            llvm::LLVMBuildRetVoid(self.llbuilder);
+        }
+    }
+
+    pub fn ret(&self, v: ValueRef) {
+        self.count_insn("ret");
+        unsafe {
+            llvm::LLVMBuildRet(self.llbuilder, v);
+        }
+    }
+
+    pub fn aggregate_ret(&self, ret_vals: &[ValueRef]) {
+        unsafe {
+            llvm::LLVMBuildAggregateRet(self.llbuilder,
+                                        ret_vals.as_ptr(),
+                                        ret_vals.len() as c_uint);
+        }
+    }
+
+    pub fn br(&self, dest: BasicBlockRef) {
+        self.count_insn("br");
+        unsafe {
+            llvm::LLVMBuildBr(self.llbuilder, dest);
+        }
+    }
+
+    pub fn cond_br(&self, cond: ValueRef, then_llbb: BasicBlockRef, else_llbb: BasicBlockRef) {
+        self.count_insn("condbr");
+        unsafe {
+            llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
+        }
+    }
+
+    pub fn switch(&self, v: ValueRef, else_llbb: BasicBlockRef, num_cases: usize) -> ValueRef {
+        unsafe {
+            llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
+        }
+    }
+
+    pub fn indirect_br(&self, addr: ValueRef, num_dests: usize) {
+        self.count_insn("indirectbr");
+        unsafe {
+            llvm::LLVMBuildIndirectBr(self.llbuilder, addr, num_dests as c_uint);
+        }
+    }
+
+    pub fn invoke(&self,
+                  llfn: ValueRef,
+                  args: &[ValueRef],
+                  then: BasicBlockRef,
+                  catch: BasicBlockRef,
+                  bundle: Option<&OperandBundleDef>)
+                  -> ValueRef {
+        self.count_insn("invoke");
+
+        debug!("Invoke {:?} with args ({})",
+               Value(llfn),
+               args.iter()
+                   .map(|&v| format!("{:?}", Value(v)))
+                   .collect::<Vec<String>>()
+                   .join(", "));
+
+        let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
+
+        unsafe {
+            llvm::LLVMRustBuildInvoke(self.llbuilder,
+                                      llfn,
+                                      args.as_ptr(),
+                                      args.len() as c_uint,
+                                      then,
+                                      catch,
+                                      bundle,
+                                      noname())
+        }
+    }
+
+    pub fn unreachable(&self) {
+        self.count_insn("unreachable");
+        unsafe {
+            llvm::LLVMBuildUnreachable(self.llbuilder);
+        }
+    }
+
+    /* Arithmetic */
+    pub fn add(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("add");
+        unsafe {
+            llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn nswadd(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("nswadd");
+        unsafe {
+            llvm::LLVMBuildNSWAdd(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn nuwadd(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("nuwadd");
+        unsafe {
+            llvm::LLVMBuildNUWAdd(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn fadd(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("fadd");
+        unsafe {
+            llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn fadd_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("fadd");
+        unsafe {
+            let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
+            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            instr
+        }
+    }
+
+    pub fn sub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("sub");
+        unsafe {
+            llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn nswsub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("nwsub");
+        unsafe {
+            llvm::LLVMBuildNSWSub(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn nuwsub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("nuwsub");
+        unsafe {
+            llvm::LLVMBuildNUWSub(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn fsub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("sub");
+        unsafe {
+            llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn fsub_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("sub");
+        unsafe {
+            let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
+            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            instr
+        }
+    }
+
+    pub fn mul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("mul");
+        unsafe {
+            llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn nswmul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("nswmul");
+        unsafe {
+            llvm::LLVMBuildNSWMul(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn nuwmul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("nuwmul");
+        unsafe {
+            llvm::LLVMBuildNUWMul(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn fmul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("fmul");
+        unsafe {
+            llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn fmul_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("fmul");
+        unsafe {
+            let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
+            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            instr
+        }
+    }
+
+
+    pub fn udiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("udiv");
+        unsafe {
+            llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn sdiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("sdiv");
+        unsafe {
+            llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn exactsdiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("exactsdiv");
+        unsafe {
+            llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn fdiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("fdiv");
+        unsafe {
+            llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn fdiv_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("fdiv");
+        unsafe {
+            let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
+            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            instr
+        }
+    }
+
+    pub fn urem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("urem");
+        unsafe {
+            llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn srem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("srem");
+        unsafe {
+            llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn frem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("frem");
+        unsafe {
+            llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn frem_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("frem");
+        unsafe {
+            let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
+            llvm::LLVMRustSetHasUnsafeAlgebra(instr);
+            instr
+        }
+    }
+
+    pub fn shl(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("shl");
+        unsafe {
+            llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn lshr(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("lshr");
+        unsafe {
+            llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn ashr(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("ashr");
+        unsafe {
+            llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn and(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("and");
+        unsafe {
+            llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn or(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("or");
+        unsafe {
+            llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn xor(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("xor");
+        unsafe {
+            llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn binop(&self, op: Opcode, lhs: ValueRef, rhs: ValueRef)
+              -> ValueRef {
+        self.count_insn("binop");
+        unsafe {
+            llvm::LLVMBuildBinOp(self.llbuilder, op, lhs, rhs, noname())
+        }
+    }
+
+    pub fn neg(&self, v: ValueRef) -> ValueRef {
+        self.count_insn("neg");
+        unsafe {
+            llvm::LLVMBuildNeg(self.llbuilder, v, noname())
+        }
+    }
+
+    pub fn nswneg(&self, v: ValueRef) -> ValueRef {
+        self.count_insn("nswneg");
+        unsafe {
+            llvm::LLVMBuildNSWNeg(self.llbuilder, v, noname())
+        }
+    }
+
+    pub fn nuwneg(&self, v: ValueRef) -> ValueRef {
+        self.count_insn("nuwneg");
+        unsafe {
+            llvm::LLVMBuildNUWNeg(self.llbuilder, v, noname())
+        }
+    }
+    pub fn fneg(&self, v: ValueRef) -> ValueRef {
+        self.count_insn("fneg");
+        unsafe {
+            llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
+        }
+    }
+
+    pub fn not(&self, v: ValueRef) -> ValueRef {
+        self.count_insn("not");
+        unsafe {
+            llvm::LLVMBuildNot(self.llbuilder, v, noname())
+        }
+    }
+
+    pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
+        self.count_insn("alloca");
+        unsafe {
+            if name.is_empty() {
+                llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(), noname())
+            } else {
+                let name = CString::new(name).unwrap();
+                llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(),
+                                      name.as_ptr())
+            }
+        }
+    }
+
+    pub fn free(&self, ptr: ValueRef) {
+        self.count_insn("free");
+        unsafe {
+            llvm::LLVMBuildFree(self.llbuilder, ptr);
+        }
+    }
+
+    pub fn load(&self, ptr: ValueRef) -> ValueRef {
+        self.count_insn("load");
+        unsafe {
+            llvm::LLVMBuildLoad(self.llbuilder, ptr, noname())
+        }
+    }
+
+    pub fn volatile_load(&self, ptr: ValueRef) -> ValueRef {
+        self.count_insn("load.volatile");
+        unsafe {
+            let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
+            llvm::LLVMSetVolatile(insn, llvm::True);
+            insn
+        }
+    }
+
+    pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef {
+        self.count_insn("load.atomic");
+        unsafe {
+            let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
+            let align = llalign_of_pref(self.ccx, ty.element_type());
+            llvm::LLVMBuildAtomicLoad(self.llbuilder, ptr, noname(), order,
+                                      align as c_uint)
+        }
+    }
+
+
+    pub fn load_range_assert(&self, ptr: ValueRef, lo: u64,
+                             hi: u64, signed: llvm::Bool) -> ValueRef {
+        let value = self.load(ptr);
+
+        unsafe {
+            let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(ptr));
+            let min = llvm::LLVMConstInt(t, lo, signed);
+            let max = llvm::LLVMConstInt(t, hi, signed);
+
+            let v = [min, max];
+
+            llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
+                                  llvm::LLVMMDNodeInContext(self.ccx.llcx(),
+                                                            v.as_ptr(),
+                                                            v.len() as c_uint));
+        }
+
+        value
+    }
+
+    pub fn load_nonnull(&self, ptr: ValueRef) -> ValueRef {
+        let value = self.load(ptr);
+        unsafe {
+            llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint,
+                                  llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0));
+        }
+
+        value
+    }
+
+    pub fn store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef {
+        debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
+        assert!(!self.llbuilder.is_null());
+        self.count_insn("store");
+        unsafe {
+            llvm::LLVMBuildStore(self.llbuilder, val, ptr)
+        }
+    }
+
+    pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef {
+        debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
+        assert!(!self.llbuilder.is_null());
+        self.count_insn("store.volatile");
+        unsafe {
+            let insn = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
+            llvm::LLVMSetVolatile(insn, llvm::True);
+            insn
+        }
+    }
+
+    pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
+        debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
+        self.count_insn("store.atomic");
+        unsafe {
+            let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
+            let align = llalign_of_pref(self.ccx, ty.element_type());
+            llvm::LLVMBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint);
+        }
+    }
+
+    pub fn gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef {
+        self.count_insn("gep");
+        unsafe {
+            llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
+                               indices.len() as c_uint, noname())
+        }
+    }
+
+    // Simple wrapper around GEP that takes an array of ints and wraps them
+    // in C_i32()
+    #[inline]
+    pub fn gepi(&self, base: ValueRef, ixs: &[usize]) -> ValueRef {
+        // Small vector optimization. This should catch 100% of the cases that
+        // we care about.
+        if ixs.len() < 16 {
+            let mut small_vec = [ C_i32(self.ccx, 0); 16 ];
+            for (small_vec_e, &ix) in small_vec.iter_mut().zip(ixs) {
+                *small_vec_e = C_i32(self.ccx, ix as i32);
+            }
+            self.inbounds_gep(base, &small_vec[..ixs.len()])
+        } else {
+            let v = ixs.iter().map(|i| C_i32(self.ccx, *i as i32)).collect::<Vec<ValueRef>>();
+            self.count_insn("gepi");
+            self.inbounds_gep(base, &v[..])
+        }
+    }
+
+    pub fn inbounds_gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef {
+        self.count_insn("inboundsgep");
+        unsafe {
+            llvm::LLVMBuildInBoundsGEP(
+                self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
+        }
+    }
+
+    pub fn struct_gep(&self, ptr: ValueRef, idx: usize) -> ValueRef {
+        self.count_insn("structgep");
+        unsafe {
+            llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
+        }
+    }
+
+    pub fn global_string(&self, _str: *const c_char) -> ValueRef {
+        self.count_insn("globalstring");
+        unsafe {
+            llvm::LLVMBuildGlobalString(self.llbuilder, _str, noname())
+        }
+    }
+
+    pub fn global_string_ptr(&self, _str: *const c_char) -> ValueRef {
+        self.count_insn("globalstringptr");
+        unsafe {
+            llvm::LLVMBuildGlobalStringPtr(self.llbuilder, _str, noname())
+        }
+    }
+
+    /* Casts */
+    pub fn trunc(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("trunc");
+        unsafe {
+            llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn zext(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("zext");
+        unsafe {
+            llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn sext(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("sext");
+        unsafe {
+            llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn fptoui(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("fptoui");
+        unsafe {
+            llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn fptosi(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("fptosi");
+        unsafe {
+            llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty.to_ref(),noname())
+        }
+    }
+
+    pub fn uitofp(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("uitofp");
+        unsafe {
+            llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn sitofp(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("sitofp");
+        unsafe {
+            llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn fptrunc(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("fptrunc");
+        unsafe {
+            llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn fpext(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("fpext");
+        unsafe {
+            llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn ptrtoint(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("ptrtoint");
+        unsafe {
+            llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn inttoptr(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("inttoptr");
+        unsafe {
+            llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("bitcast");
+        unsafe {
+            llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn zext_or_bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("zextorbitcast");
+        unsafe {
+            llvm::LLVMBuildZExtOrBitCast(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn sext_or_bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("sextorbitcast");
+        unsafe {
+            llvm::LLVMBuildSExtOrBitCast(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn trunc_or_bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("truncorbitcast");
+        unsafe {
+            llvm::LLVMBuildTruncOrBitCast(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn cast(&self, op: Opcode, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("cast");
+        unsafe {
+            llvm::LLVMBuildCast(self.llbuilder, op, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn pointercast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("pointercast");
+        unsafe {
+            llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn intcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("intcast");
+        unsafe {
+            llvm::LLVMBuildIntCast(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+    pub fn fpcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
+        self.count_insn("fpcast");
+        unsafe {
+            llvm::LLVMBuildFPCast(self.llbuilder, val, dest_ty.to_ref(), noname())
+        }
+    }
+
+
+    /* Comparisons */
+    pub fn icmp(&self, op: IntPredicate, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("icmp");
+        unsafe {
+            llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
+        }
+    }
+
+    pub fn fcmp(&self, op: RealPredicate, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("fcmp");
+        unsafe {
+            llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
+        }
+    }
+
+    /* Miscellaneous instructions */
+    pub fn empty_phi(&self, ty: Type) -> ValueRef {
+        self.count_insn("emptyphi");
+        unsafe {
+            llvm::LLVMBuildPhi(self.llbuilder, ty.to_ref(), noname())
+        }
+    }
+
+    pub fn phi(&self, ty: Type, vals: &[ValueRef], bbs: &[BasicBlockRef]) -> ValueRef {
+        assert_eq!(vals.len(), bbs.len());
+        let phi = self.empty_phi(ty);
+        self.count_insn("addincoming");
+        unsafe {
+            llvm::LLVMAddIncoming(phi, vals.as_ptr(),
+                                  bbs.as_ptr(),
+                                  vals.len() as c_uint);
+            phi
+        }
+    }
+
+    pub fn add_span_comment(&self, sp: Span, text: &str) {
+        if self.ccx.sess().asm_comments() {
+            let s = format!("{} ({})",
+                            text,
+                            self.ccx.sess().codemap().span_to_string(sp));
+            debug!("{}", &s[..]);
+            self.add_comment(&s[..]);
+        }
+    }
+
+    pub fn add_comment(&self, text: &str) {
+        if self.ccx.sess().asm_comments() {
+            let sanitized = text.replace("$", "");
+            let comment_text = format!("{} {}", "#",
+                                       sanitized.replace("\n", "\n\t# "));
+            self.count_insn("inlineasm");
+            let comment_text = CString::new(comment_text).unwrap();
+            let asm = unsafe {
+                llvm::LLVMConstInlineAsm(Type::func(&[], &Type::void(self.ccx)).to_ref(),
+                                         comment_text.as_ptr(), noname(), False,
+                                         False)
+            };
+            self.call(asm, &[], None);
+        }
+    }
+
+    pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
+                         inputs: &[ValueRef], output: Type,
+                         volatile: bool, alignstack: bool,
+                         dia: AsmDialect) -> ValueRef {
+        self.count_insn("inlineasm");
+
+        let volatile = if volatile { llvm::True }
+                       else        { llvm::False };
+        let alignstack = if alignstack { llvm::True }
+                         else          { llvm::False };
+
+        let argtys = inputs.iter().map(|v| {
+            debug!("Asm Input Type: {:?}", Value(*v));
+            val_ty(*v)
+        }).collect::<Vec<_>>();
+
+        debug!("Asm Output Type: {:?}", output);
+        let fty = Type::func(&argtys[..], &output);
+        unsafe {
+            let v = llvm::LLVMInlineAsm(
+                fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint);
+            self.call(v, inputs, None)
+        }
+    }
+
+    pub fn call(&self, llfn: ValueRef, args: &[ValueRef],
+                bundle: Option<&OperandBundleDef>) -> ValueRef {
+        self.count_insn("call");
+
+        debug!("Call {:?} with args ({})",
+               Value(llfn),
+               args.iter()
+                   .map(|&v| format!("{:?}", Value(v)))
+                   .collect::<Vec<String>>()
+                   .join(", "));
+
+        let mut fn_ty = val_ty(llfn);
+        // Strip off pointers
+        while fn_ty.kind() == llvm::TypeKind::Pointer {
+            fn_ty = fn_ty.element_type();
+        }
+
+        assert!(fn_ty.kind() == llvm::TypeKind::Function,
+                "builder::call not passed a function");
+
+        let param_tys = fn_ty.func_params();
+
+        let iter = param_tys.into_iter()
+            .zip(args.iter().map(|&v| val_ty(v)));
+        for (i, (expected_ty, actual_ty)) in iter.enumerate() {
+            if expected_ty != actual_ty {
+                bug!("Type mismatch in function call of {:?}. \
+                      Expected {:?} for param {}, got {:?}",
+                     Value(llfn),
+                     expected_ty, i, actual_ty);
+
+            }
+        }
+
+        let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
+
+        unsafe {
+            llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(),
+                                    args.len() as c_uint, bundle, noname())
+        }
+    }
+
+    pub fn select(&self, cond: ValueRef, then_val: ValueRef, else_val: ValueRef) -> ValueRef {
+        self.count_insn("select");
+        unsafe {
+            llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
+        }
+    }
+
+    pub fn va_arg(&self, list: ValueRef, ty: Type) -> ValueRef {
+        self.count_insn("vaarg");
+        unsafe {
+            llvm::LLVMBuildVAArg(self.llbuilder, list, ty.to_ref(), noname())
+        }
+    }
+
+    pub fn extract_element(&self, vec: ValueRef, idx: ValueRef) -> ValueRef {
+        self.count_insn("extractelement");
+        unsafe {
+            llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
+        }
+    }
+
+    pub fn insert_element(&self, vec: ValueRef, elt: ValueRef, idx: ValueRef) -> ValueRef {
+        self.count_insn("insertelement");
+        unsafe {
+            llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
+        }
+    }
+
+    pub fn shuffle_vector(&self, v1: ValueRef, v2: ValueRef, mask: ValueRef) -> ValueRef {
+        self.count_insn("shufflevector");
+        unsafe {
+            llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
+        }
+    }
+
+    pub fn vector_splat(&self, num_elts: usize, elt: ValueRef) -> ValueRef {
+        unsafe {
+            let elt_ty = val_ty(elt);
+            let undef = llvm::LLVMGetUndef(Type::vector(&elt_ty, num_elts as u64).to_ref());
+            let vec = self.insert_element(undef, elt, C_i32(self.ccx, 0));
+            let vec_i32_ty = Type::vector(&Type::i32(self.ccx), num_elts as u64);
+            self.shuffle_vector(vec, undef, C_null(vec_i32_ty))
+        }
+    }
+
+    pub fn extract_value(&self, agg_val: ValueRef, idx: usize) -> ValueRef {
+        self.count_insn("extractvalue");
+        unsafe {
+            llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
+        }
+    }
+
+    pub fn insert_value(&self, agg_val: ValueRef, elt: ValueRef,
+                       idx: usize) -> ValueRef {
+        self.count_insn("insertvalue");
+        unsafe {
+            llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
+                                       noname())
+        }
+    }
+
+    pub fn is_null(&self, val: ValueRef) -> ValueRef {
+        self.count_insn("isnull");
+        unsafe {
+            llvm::LLVMBuildIsNull(self.llbuilder, val, noname())
+        }
+    }
+
+    pub fn is_not_null(&self, val: ValueRef) -> ValueRef {
+        self.count_insn("isnotnull");
+        unsafe {
+            llvm::LLVMBuildIsNotNull(self.llbuilder, val, noname())
+        }
+    }
+
+    pub fn ptrdiff(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+        self.count_insn("ptrdiff");
+        unsafe {
+            llvm::LLVMBuildPtrDiff(self.llbuilder, lhs, rhs, noname())
+        }
+    }
+
+    pub fn trap(&self) {
+        unsafe {
+            let bb: BasicBlockRef = llvm::LLVMGetInsertBlock(self.llbuilder);
+            let fn_: ValueRef = llvm::LLVMGetBasicBlockParent(bb);
+            let m: ModuleRef = llvm::LLVMGetGlobalParent(fn_);
+            let p = "llvm.trap\0".as_ptr();
+            let t: ValueRef = llvm::LLVMGetNamedFunction(m, p as *const _);
+            assert!((t as isize != 0));
+            let args: &[ValueRef] = &[];
+            self.count_insn("trap");
+            llvm::LLVMRustBuildCall(self.llbuilder, t,
+                                    args.as_ptr(), args.len() as c_uint,
+                                    0 as *mut _,
+                                    noname());
+        }
+    }
+
+    pub fn landing_pad(&self, ty: Type, pers_fn: ValueRef,
+                       num_clauses: usize,
+                       llfn: ValueRef) -> ValueRef {
+        self.count_insn("landingpad");
+        unsafe {
+            llvm::LLVMRustBuildLandingPad(self.llbuilder, ty.to_ref(), pers_fn,
+                                          num_clauses as c_uint, noname(), llfn)
+        }
+    }
+
+    pub fn add_clause(&self, landing_pad: ValueRef, clause: ValueRef) {
+        unsafe {
+            llvm::LLVMAddClause(landing_pad, clause);
+        }
+    }
+
+    pub fn set_cleanup(&self, landing_pad: ValueRef) {
+        self.count_insn("setcleanup");
+        unsafe {
+            llvm::LLVMSetCleanup(landing_pad, llvm::True);
+        }
+    }
+
+    pub fn resume(&self, exn: ValueRef) -> ValueRef {
+        self.count_insn("resume");
+        unsafe {
+            llvm::LLVMBuildResume(self.llbuilder, exn)
+        }
+    }
+
+    pub fn cleanup_pad(&self,
+                       parent: Option<ValueRef>,
+                       args: &[ValueRef]) -> ValueRef {
+        self.count_insn("cleanuppad");
+        let parent = parent.unwrap_or(0 as *mut _);
+        let name = CString::new("cleanuppad").unwrap();
+        let ret = unsafe {
+            llvm::LLVMRustBuildCleanupPad(self.llbuilder,
+                                          parent,
+                                          args.len() as c_uint,
+                                          args.as_ptr(),
+                                          name.as_ptr())
+        };
+        assert!(!ret.is_null(), "LLVM does not have support for cleanuppad");
+        return ret
+    }
+
+    pub fn cleanup_ret(&self, cleanup: ValueRef,
+                       unwind: Option<BasicBlockRef>) -> ValueRef {
+        self.count_insn("cleanupret");
+        let unwind = unwind.unwrap_or(0 as *mut _);
+        let ret = unsafe {
+            llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind)
+        };
+        assert!(!ret.is_null(), "LLVM does not have support for cleanupret");
+        return ret
+    }
+
+    pub fn catch_pad(&self,
+                     parent: ValueRef,
+                     args: &[ValueRef]) -> ValueRef {
+        self.count_insn("catchpad");
+        let name = CString::new("catchpad").unwrap();
+        let ret = unsafe {
+            llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
+                                        args.len() as c_uint, args.as_ptr(),
+                                        name.as_ptr())
+        };
+        assert!(!ret.is_null(), "LLVM does not have support for catchpad");
+        return ret
+    }
+
+    pub fn catch_ret(&self, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef {
+        self.count_insn("catchret");
+        let ret = unsafe {
+            llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind)
+        };
+        assert!(!ret.is_null(), "LLVM does not have support for catchret");
+        return ret
+    }
+
+    pub fn catch_switch(&self,
+                        parent: Option<ValueRef>,
+                        unwind: Option<BasicBlockRef>,
+                        num_handlers: usize) -> ValueRef {
+        self.count_insn("catchswitch");
+        let parent = parent.unwrap_or(0 as *mut _);
+        let unwind = unwind.unwrap_or(0 as *mut _);
+        let name = CString::new("catchswitch").unwrap();
+        let ret = unsafe {
+            llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
+                                           num_handlers as c_uint,
+                                           name.as_ptr())
+        };
+        assert!(!ret.is_null(), "LLVM does not have support for catchswitch");
+        return ret
+    }
+
+    pub fn add_handler(&self, catch_switch: ValueRef, handler: BasicBlockRef) {
+        unsafe {
+            llvm::LLVMRustAddHandler(catch_switch, handler);
+        }
+    }
+
+    pub fn set_personality_fn(&self, personality: ValueRef) {
+        unsafe {
+            llvm::LLVMRustSetPersonalityFn(self.llbuilder, personality);
+        }
+    }
+
+    // Atomic Operations
+    pub fn atomic_cmpxchg(&self, dst: ValueRef,
+                         cmp: ValueRef, src: ValueRef,
+                         order: AtomicOrdering,
+                         failure_order: AtomicOrdering,
+                         weak: llvm::Bool) -> ValueRef {
+        unsafe {
+            llvm::LLVMBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src,
+                                         order, failure_order, weak)
+        }
+    }
+    pub fn atomic_rmw(&self, op: AtomicBinOp,
+                     dst: ValueRef, src: ValueRef,
+                     order: AtomicOrdering) -> ValueRef {
+        unsafe {
+            llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order, False)
+        }
+    }
+
+    pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) {
+        unsafe {
+            llvm::LLVMBuildAtomicFence(self.llbuilder, order, scope);
+        }
+    }
+}
diff --git a/src/librustc_trans/cabi_aarch64.rs b/src/librustc_trans/cabi_aarch64.rs
new file mode 100644 (file)
index 0000000..fc11e38
--- /dev/null
@@ -0,0 +1,242 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_upper_case_globals)]
+
+use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
+use abi::{FnType, ArgType};
+use context::CrateContext;
+use type_::Type;
+
+use std::cmp;
+
+fn align_up_to(off: usize, a: usize) -> usize {
+    return (off + a - 1) / a * a;
+}
+
+fn align(off: usize, ty: Type) -> usize {
+    let a = ty_align(ty);
+    return align_up_to(off, a);
+}
+
+fn ty_align(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => ((ty.int_width() as usize) + 7) / 8,
+        Pointer => 8,
+        Float => 4,
+        Double => 8,
+        Struct => {
+            if ty.is_packed() {
+                1
+            } else {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
+            }
+        }
+        Array => {
+            let elt = ty.element_type();
+            ty_align(elt)
+        }
+        Vector => {
+            let len = ty.vector_length();
+            let elt = ty.element_type();
+            ty_align(elt) * len
+        }
+        _ => bug!("ty_align: unhandled type")
+    }
+}
+
+fn ty_size(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => ((ty.int_width() as usize) + 7) / 8,
+        Pointer => 8,
+        Float => 4,
+        Double => 8,
+        Struct => {
+            if ty.is_packed() {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(0, |s, t| s + ty_size(*t))
+            } else {
+                let str_tys = ty.field_types();
+                let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
+                align(size, ty)
+            }
+        }
+        Array => {
+            let len = ty.array_length();
+            let elt = ty.element_type();
+            let eltsz = ty_size(elt);
+            len * eltsz
+        }
+        Vector => {
+            let len = ty.vector_length();
+            let elt = ty.element_type();
+            let eltsz = ty_size(elt);
+            len * eltsz
+        }
+        _ => bug!("ty_size: unhandled type")
+    }
+}
+
+fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> {
+    fn check_array(ty: Type) -> Option<(Type, u64)> {
+        let len = ty.array_length() as u64;
+        if len == 0 {
+            return None
+        }
+        let elt = ty.element_type();
+
+        // if our element is an HFA/HVA, so are we; multiply members by our len
+        is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members))
+    }
+
+    fn check_struct(ty: Type) -> Option<(Type, u64)> {
+        let str_tys = ty.field_types();
+        if str_tys.len() == 0 {
+            return None
+        }
+
+        let mut prev_base_ty = None;
+        let mut members = 0;
+        for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) {
+            match (prev_base_ty, opt_homog_agg) {
+                // field isn't itself an HFA, so we aren't either
+                (_, None) => return None,
+
+                // first field - store its type and number of members
+                (None, Some((field_ty, field_members))) => {
+                    prev_base_ty = Some(field_ty);
+                    members = field_members;
+                },
+
+                // 2nd or later field - give up if it's a different type; otherwise incr. members
+                (Some(prev_ty), Some((field_ty, field_members))) => {
+                    if prev_ty != field_ty {
+                        return None;
+                    }
+                    members += field_members;
+                }
+            }
+        }
+
+        // Because of previous checks, we know prev_base_ty is Some(...) because
+        //   1. str_tys has at least one element; and
+        //   2. prev_base_ty was filled in (or we would've returned early)
+        let (base_ty, members) = (prev_base_ty.unwrap(), members);
+
+        // Ensure there is no padding.
+        if ty_size(ty) == ty_size(base_ty) * (members as usize) {
+            Some((base_ty, members))
+        } else {
+            None
+        }
+    }
+
+    let homog_agg = match ty.kind() {
+        Float  => Some((ty, 1)),
+        Double => Some((ty, 1)),
+        Array  => check_array(ty),
+        Struct => check_struct(ty),
+        Vector => match ty_size(ty) {
+            4|8 => Some((ty, 1)),
+            _   => None
+        },
+        _ => None
+    };
+
+    // Ensure we have at most four uniquely addressable members
+    homog_agg.and_then(|(base_ty, members)| {
+        if members > 0 && members <= 4 {
+            Some((base_ty, members))
+        } else {
+            None
+        }
+    })
+}
+
+fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
+    if is_reg_ty(ret.ty) {
+        ret.extend_integer_width_to(32);
+        return;
+    }
+    if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) {
+        ret.cast = Some(Type::array(&base_ty, members));
+        return;
+    }
+    let size = ty_size(ret.ty);
+    if size <= 16 {
+        let llty = if size <= 1 {
+            Type::i8(ccx)
+        } else if size <= 2 {
+            Type::i16(ccx)
+        } else if size <= 4 {
+            Type::i32(ccx)
+        } else if size <= 8 {
+            Type::i64(ccx)
+        } else {
+            Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
+        };
+        ret.cast = Some(llty);
+        return;
+    }
+    ret.make_indirect(ccx);
+}
+
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) {
+    if is_reg_ty(arg.ty) {
+        arg.extend_integer_width_to(32);
+        return;
+    }
+    if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) {
+        arg.cast = Some(Type::array(&base_ty, members));
+        return;
+    }
+    let size = ty_size(arg.ty);
+    if size <= 16 {
+        let llty = if size == 0 {
+            Type::array(&Type::i64(ccx), 0)
+        } else if size == 1 {
+            Type::i8(ccx)
+        } else if size == 2 {
+            Type::i16(ccx)
+        } else if size <= 4 {
+            Type::i32(ccx)
+        } else if size <= 8 {
+            Type::i64(ccx)
+        } else {
+            Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
+        };
+        arg.cast = Some(llty);
+        return;
+    }
+    arg.make_indirect(ccx);
+}
+
+fn is_reg_ty(ty: Type) -> bool {
+    match ty.kind() {
+        Integer
+        | Pointer
+        | Float
+        | Double
+        | Vector => true,
+        _ => false
+    }
+}
+
+pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
+    if !fty.ret.is_ignore() {
+        classify_ret_ty(ccx, &mut fty.ret);
+    }
+
+    for arg in &mut fty.args {
+        if arg.is_ignore() { continue; }
+        classify_arg_ty(ccx, arg);
+    }
+}
diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs
new file mode 100644 (file)
index 0000000..68a2e8a
--- /dev/null
@@ -0,0 +1,192 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_upper_case_globals)]
+
+use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
+use abi::{FnType, ArgType};
+use context::CrateContext;
+use type_::Type;
+
+use std::cmp;
+
+pub enum Flavor {
+    General,
+    Ios
+}
+
+type TyAlignFn = fn(ty: Type) -> usize;
+
+fn align_up_to(off: usize, a: usize) -> usize {
+    return (off + a - 1) / a * a;
+}
+
+fn align(off: usize, ty: Type, align_fn: TyAlignFn) -> usize {
+    let a = align_fn(ty);
+    return align_up_to(off, a);
+}
+
+fn general_ty_align(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => ((ty.int_width() as usize) + 7) / 8,
+        Pointer => 4,
+        Float => 4,
+        Double => 8,
+        Struct => {
+            if ty.is_packed() {
+                1
+            } else {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(1, |a, t| cmp::max(a, general_ty_align(*t)))
+            }
+        }
+        Array => {
+            let elt = ty.element_type();
+            general_ty_align(elt)
+        }
+        Vector => {
+            let len = ty.vector_length();
+            let elt = ty.element_type();
+            general_ty_align(elt) * len
+        }
+        _ => bug!("ty_align: unhandled type")
+    }
+}
+
+// For more information see:
+// ARMv7
+// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual
+//    /iPhoneOSABIReference/Articles/ARMv7FunctionCallingConventions.html
+// ARMv6
+// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual
+//    /iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html
+fn ios_ty_align(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => cmp::min(4, ((ty.int_width() as usize) + 7) / 8),
+        Pointer => 4,
+        Float => 4,
+        Double => 4,
+        Struct => {
+            if ty.is_packed() {
+                1
+            } else {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(1, |a, t| cmp::max(a, ios_ty_align(*t)))
+            }
+        }
+        Array => {
+            let elt = ty.element_type();
+            ios_ty_align(elt)
+        }
+        Vector => {
+            let len = ty.vector_length();
+            let elt = ty.element_type();
+            ios_ty_align(elt) * len
+        }
+        _ => bug!("ty_align: unhandled type")
+    }
+}
+
+fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize {
+    match ty.kind() {
+        Integer => ((ty.int_width() as usize) + 7) / 8,
+        Pointer => 4,
+        Float => 4,
+        Double => 8,
+        Struct => {
+            if ty.is_packed() {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(0, |s, t| s + ty_size(*t, align_fn))
+            } else {
+                let str_tys = ty.field_types();
+                let size = str_tys.iter()
+                                  .fold(0, |s, t| {
+                                      align(s, *t, align_fn) + ty_size(*t, align_fn)
+                                  });
+                align(size, ty, align_fn)
+            }
+        }
+        Array => {
+            let len = ty.array_length();
+            let elt = ty.element_type();
+            let eltsz = ty_size(elt, align_fn);
+            len * eltsz
+        }
+        Vector => {
+            let len = ty.vector_length();
+            let elt = ty.element_type();
+            let eltsz = ty_size(elt, align_fn);
+            len * eltsz
+        }
+        _ => bug!("ty_size: unhandled type")
+    }
+}
+
+fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType, align_fn: TyAlignFn) {
+    if is_reg_ty(ret.ty) {
+        ret.extend_integer_width_to(32);
+        return;
+    }
+    let size = ty_size(ret.ty, align_fn);
+    if size <= 4 {
+        let llty = if size <= 1 {
+            Type::i8(ccx)
+        } else if size <= 2 {
+            Type::i16(ccx)
+        } else {
+            Type::i32(ccx)
+        };
+        ret.cast = Some(llty);
+        return;
+    }
+    ret.make_indirect(ccx);
+}
+
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, align_fn: TyAlignFn) {
+    if is_reg_ty(arg.ty) {
+        arg.extend_integer_width_to(32);
+        return;
+    }
+    let align = align_fn(arg.ty);
+    let size = ty_size(arg.ty, align_fn);
+    let llty = if align <= 4 {
+        Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64)
+    } else {
+        Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64)
+    };
+    arg.cast = Some(llty);
+}
+
+fn is_reg_ty(ty: Type) -> bool {
+    match ty.kind() {
+        Integer
+        | Pointer
+        | Float
+        | Double
+        | Vector => true,
+        _ => false
+    }
+}
+
+pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) {
+    let align_fn = match flavor {
+        Flavor::General => general_ty_align as TyAlignFn,
+        Flavor::Ios => ios_ty_align as TyAlignFn,
+    };
+
+    if !fty.ret.is_ignore() {
+        classify_ret_ty(ccx, &mut fty.ret, align_fn);
+    }
+
+    for arg in &mut fty.args {
+        if arg.is_ignore() { continue; }
+        classify_arg_ty(ccx, arg, align_fn);
+    }
+}
diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs
new file mode 100644 (file)
index 0000000..3cbc378
--- /dev/null
@@ -0,0 +1,55 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_upper_case_globals)]
+
+use llvm::{Struct, Array, Attribute};
+use abi::{FnType, ArgType};
+use context::CrateContext;
+
+// Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
+
+// See the https://github.com/kripken/emscripten-fastcomp-clang repository.
+// The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions.
+
+fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
+    match ret.ty.kind() {
+        Struct => {
+            let field_types = ret.ty.field_types();
+            if field_types.len() == 1 {
+                ret.cast = Some(field_types[0]);
+            } else {
+                ret.make_indirect(ccx);
+            }
+        }
+        Array => {
+            ret.make_indirect(ccx);
+        }
+        _ => {}
+    }
+}
+
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) {
+    if arg.ty.is_aggregate() {
+        arg.make_indirect(ccx);
+        arg.attrs.set(Attribute::ByVal);
+    }
+}
+
+pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
+    if !fty.ret.is_ignore() {
+        classify_ret_ty(ccx, &mut fty.ret);
+    }
+
+    for arg in &mut fty.args {
+        if arg.is_ignore() { continue; }
+        classify_arg_ty(ccx, arg);
+    }
+}
diff --git a/src/librustc_trans/cabi_mips.rs b/src/librustc_trans/cabi_mips.rs
new file mode 100644 (file)
index 0000000..680310e
--- /dev/null
@@ -0,0 +1,168 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_upper_case_globals)]
+
+use libc::c_uint;
+use std::cmp;
+use llvm;
+use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
+use abi::{ArgType, FnType};
+use context::CrateContext;
+use type_::Type;
+
+fn align_up_to(off: usize, a: usize) -> usize {
+    return (off + a - 1) / a * a;
+}
+
+fn align(off: usize, ty: Type) -> usize {
+    let a = ty_align(ty);
+    return align_up_to(off, a);
+}
+
+fn ty_align(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => ((ty.int_width() as usize) + 7) / 8,
+        Pointer => 4,
+        Float => 4,
+        Double => 8,
+        Struct => {
+          if ty.is_packed() {
+            1
+          } else {
+            let str_tys = ty.field_types();
+            str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
+          }
+        }
+        Array => {
+            let elt = ty.element_type();
+            ty_align(elt)
+        }
+        Vector => {
+            let len = ty.vector_length();
+            let elt = ty.element_type();
+            ty_align(elt) * len
+        }
+        _ => bug!("ty_align: unhandled type")
+    }
+}
+
+fn ty_size(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => ((ty.int_width() as usize) + 7) / 8,
+        Pointer => 4,
+        Float => 4,
+        Double => 8,
+        Struct => {
+            if ty.is_packed() {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(0, |s, t| s + ty_size(*t))
+            } else {
+                let str_tys = ty.field_types();
+                let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
+                align(size, ty)
+            }
+        }
+        Array => {
+            let len = ty.array_length();
+            let elt = ty.element_type();
+            let eltsz = ty_size(elt);
+            len * eltsz
+        }
+        Vector => {
+            let len = ty.vector_length();
+            let elt = ty.element_type();
+            let eltsz = ty_size(elt);
+            len * eltsz
+        }
+        _ => bug!("ty_size: unhandled type")
+    }
+}
+
+fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
+    if is_reg_ty(ret.ty) {
+        ret.extend_integer_width_to(32);
+    } else {
+        ret.make_indirect(ccx);
+    }
+}
+
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) {
+    let orig_offset = *offset;
+    let size = ty_size(arg.ty) * 8;
+    let mut align = ty_align(arg.ty);
+
+    align = cmp::min(cmp::max(align, 4), 8);
+    *offset = align_up_to(*offset, align);
+    *offset += align_up_to(size, align * 8) / 8;
+
+    if !is_reg_ty(arg.ty) {
+        arg.cast = Some(struct_ty(ccx, arg.ty));
+        arg.pad = padding_ty(ccx, align, orig_offset);
+    } else {
+        arg.extend_integer_width_to(32);
+    }
+}
+
+fn is_reg_ty(ty: Type) -> bool {
+    return match ty.kind() {
+        Integer
+        | Pointer
+        | Float
+        | Double
+        | Vector => true,
+        _ => false
+    };
+}
+
+fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option<Type> {
+    if ((align - 1 ) & offset) > 0 {
+        Some(Type::i32(ccx))
+    } else {
+        None
+    }
+}
+
+fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec<Type> {
+    let int_ty = Type::i32(ccx);
+    let mut args = Vec::new();
+
+    let mut n = size / 32;
+    while n > 0 {
+        args.push(int_ty);
+        n -= 1;
+    }
+
+    let r = size % 32;
+    if r > 0 {
+        unsafe {
+            args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
+        }
+    }
+
+    args
+}
+
+fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
+    let size = ty_size(ty) * 8;
+    Type::struct_(ccx, &coerce_to_int(ccx, size), false)
+}
+
+pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
+    if !fty.ret.is_ignore() {
+        classify_ret_ty(ccx, &mut fty.ret);
+    }
+
+    let mut offset = if fty.ret.is_indirect() { 4 } else { 0 };
+    for arg in &mut fty.args {
+        if arg.is_ignore() { continue; }
+        classify_arg_ty(ccx, arg, &mut offset);
+    }
+}
diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs
new file mode 100644 (file)
index 0000000..efbdce6
--- /dev/null
@@ -0,0 +1,163 @@
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use libc::c_uint;
+use llvm;
+use llvm::{Integer, Pointer, Float, Double, Struct, Array};
+use abi::{FnType, ArgType};
+use context::CrateContext;
+use type_::Type;
+
+use std::cmp;
+
+fn align_up_to(off: usize, a: usize) -> usize {
+    return (off + a - 1) / a * a;
+}
+
+fn align(off: usize, ty: Type) -> usize {
+    let a = ty_align(ty);
+    return align_up_to(off, a);
+}
+
+fn ty_align(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => {
+            unsafe {
+                ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8
+            }
+        }
+        Pointer => 4,
+        Float => 4,
+        Double => 8,
+        Struct => {
+          if ty.is_packed() {
+            1
+          } else {
+            let str_tys = ty.field_types();
+            str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
+          }
+        }
+        Array => {
+            let elt = ty.element_type();
+            ty_align(elt)
+        }
+        _ => bug!("ty_size: unhandled type")
+    }
+}
+
+fn ty_size(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => {
+            unsafe {
+                ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8
+            }
+        }
+        Pointer => 4,
+        Float => 4,
+        Double => 8,
+        Struct => {
+            if ty.is_packed() {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(0, |s, t| s + ty_size(*t))
+            } else {
+                let str_tys = ty.field_types();
+                let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
+                align(size, ty)
+            }
+        }
+        Array => {
+            let len = ty.array_length();
+            let elt = ty.element_type();
+            let eltsz = ty_size(elt);
+            len * eltsz
+        }
+        _ => bug!("ty_size: unhandled type")
+    }
+}
+
+fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
+    if is_reg_ty(ret.ty) {
+        ret.extend_integer_width_to(32);
+    } else {
+        ret.make_indirect(ccx);
+    }
+}
+
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) {
+    let orig_offset = *offset;
+    let size = ty_size(arg.ty) * 8;
+    let mut align = ty_align(arg.ty);
+
+    align = cmp::min(cmp::max(align, 4), 8);
+    *offset = align_up_to(*offset, align);
+    *offset += align_up_to(size, align * 8) / 8;
+
+    if !is_reg_ty(arg.ty) {
+        arg.cast = Some(struct_ty(ccx, arg.ty));
+        arg.pad = padding_ty(ccx, align, orig_offset);
+    } else {
+        arg.extend_integer_width_to(32);
+    }
+}
+
+fn is_reg_ty(ty: Type) -> bool {
+    return match ty.kind() {
+        Integer
+        | Pointer
+        | Float
+        | Double => true,
+        _ => false
+    };
+}
+
+fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option<Type> {
+    if ((align - 1 ) & offset) > 0 {
+        Some(Type::i32(ccx))
+    } else {
+        None
+    }
+}
+
+fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec<Type> {
+    let int_ty = Type::i32(ccx);
+    let mut args = Vec::new();
+
+    let mut n = size / 32;
+    while n > 0 {
+        args.push(int_ty);
+        n -= 1;
+    }
+
+    let r = size % 32;
+    if r > 0 {
+        unsafe {
+            args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
+        }
+    }
+
+    args
+}
+
+fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
+    let size = ty_size(ty) * 8;
+    Type::struct_(ccx, &coerce_to_int(ccx, size), false)
+}
+
+pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
+    if !fty.ret.is_ignore() {
+        classify_ret_ty(ccx, &mut fty.ret);
+    }
+
+    let mut offset = if fty.ret.is_indirect() { 4 } else { 0 };
+    for arg in &mut fty.args {
+        if arg.is_ignore() { continue; }
+        classify_arg_ty(ccx, arg, &mut offset);
+    }
+}
diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs
new file mode 100644 (file)
index 0000000..ba54e36
--- /dev/null
@@ -0,0 +1,245 @@
+// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// FIXME: The PowerPC64 ABI needs to zero or sign extend function
+// call parameters, but compute_abi_info() is passed LLVM types
+// which have no sign information.
+//
+// Alignment of 128 bit types is not currently handled, this will
+// need to be fixed when PowerPC vector support is added.
+
+use llvm::{Integer, Pointer, Float, Double, Struct, Array};
+use abi::{FnType, ArgType};
+use context::CrateContext;
+use type_::Type;
+
+use std::cmp;
+
+fn align_up_to(off: usize, a: usize) -> usize {
+    return (off + a - 1) / a * a;
+}
+
+fn align(off: usize, ty: Type) -> usize {
+    let a = ty_align(ty);
+    return align_up_to(off, a);
+}
+
+fn ty_align(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => ((ty.int_width() as usize) + 7) / 8,
+        Pointer => 8,
+        Float => 4,
+        Double => 8,
+        Struct => {
+            if ty.is_packed() {
+                1
+            } else {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
+            }
+        }
+        Array => {
+            let elt = ty.element_type();
+            ty_align(elt)
+        }
+        _ => bug!("ty_align: unhandled type")
+    }
+}
+
+fn ty_size(ty: Type) -> usize {
+    match ty.kind() {
+        Integer => ((ty.int_width() as usize) + 7) / 8,
+        Pointer => 8,
+        Float => 4,
+        Double => 8,
+        Struct => {
+            if ty.is_packed() {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(0, |s, t| s + ty_size(*t))
+            } else {
+                let str_tys = ty.field_types();
+                let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
+                align(size, ty)
+            }
+        }
+        Array => {
+            let len = ty.array_length();
+            let elt = ty.element_type();
+            let eltsz = ty_size(elt);
+            len * eltsz
+        }
+        _ => bug!("ty_size: unhandled type")
+    }
+}
+
+fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> {
+    fn check_array(ty: Type) -> Option<(Type, u64)> {
+        let len = ty.array_length() as u64;
+        if len == 0 {
+            return None
+        }
+        let elt = ty.element_type();
+
+        // if our element is an HFA/HVA, so are we; multiply members by our len
+        is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members))
+    }
+
+    fn check_struct(ty: Type) -> Option<(Type, u64)> {
+        let str_tys = ty.field_types();
+        if str_tys.len() == 0 {
+            return None
+        }
+
+        let mut prev_base_ty = None;
+        let mut members = 0;
+        for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) {
+            match (prev_base_ty, opt_homog_agg) {
+                // field isn't itself an HFA, so we aren't either
+                (_, None) => return None,
+
+                // first field - store its type and number of members
+                (None, Some((field_ty, field_members))) => {
+                    prev_base_ty = Some(field_ty);
+                    members = field_members;
+                },
+
+                // 2nd or later field - give up if it's a different type; otherwise incr. members
+                (Some(prev_ty), Some((field_ty, field_members))) => {
+                    if prev_ty != field_ty {
+                        return None;
+                    }
+                    members += field_members;
+                }
+            }
+        }
+
+        // Because of previous checks, we know prev_base_ty is Some(...) because
+        //   1. str_tys has at least one element; and
+        //   2. prev_base_ty was filled in (or we would've returned early)
+        let (base_ty, members) = (prev_base_ty.unwrap(), members);
+
+        // Ensure there is no padding.
+        if ty_size(ty) == ty_size(base_ty) * (members as usize) {
+            Some((base_ty, members))
+        } else {
+            None
+        }
+    }
+
+    let homog_agg = match ty.kind() {
+        Float  => Some((ty, 1)),
+        Double => Some((ty, 1)),
+        Array  => check_array(ty),
+        Struct => check_struct(ty),
+        _ => None
+    };
+
+    // Ensure we have at most eight uniquely addressable members
+    homog_agg.and_then(|(base_ty, members)| {
+        if members > 0 && members <= 8 {
+            Some((base_ty, members))
+        } else {
+            None
+        }
+    })
+}
+
+fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
+    if is_reg_ty(ret.ty) {
+        ret.extend_integer_width_to(64);
+        return;
+    }
+
+    // The PowerPC64 big endian ABI doesn't return aggregates in registers
+    if ccx.sess().target.target.target_endian == "big" {
+        ret.make_indirect(ccx);
+    }
+
+    if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) {
+        ret.cast = Some(Type::array(&base_ty, members));
+        return;
+    }
+    let size = ty_size(ret.ty);
+    if size <= 16 {
+        let llty = if size <= 1 {
+            Type::i8(ccx)
+        } else if size <= 2 {
+            Type::i16(ccx)
+        } else if size <= 4 {
+            Type::i32(ccx)
+        } else if size <= 8 {
+            Type::i64(ccx)
+        } else {
+            Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
+        };
+        ret.cast = Some(llty);
+        return;
+    }
+
+    ret.make_indirect(ccx);
+}
+
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) {
+    if is_reg_ty(arg.ty) {
+        arg.extend_integer_width_to(64);
+        return;
+    }
+
+    if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) {
+        arg.cast = Some(Type::array(&base_ty, members));
+        return;
+    }
+
+    arg.cast = Some(struct_ty(ccx, arg.ty));
+}
+
+fn is_reg_ty(ty: Type) -> bool {
+    match ty.kind() {
+        Integer
+        | Pointer
+        | Float
+        | Double => true,
+        _ => false
+    }
+}
+
+fn coerce_to_long(ccx: &CrateContext, size: usize) -> Vec<Type> {
+    let long_ty = Type::i64(ccx);
+    let mut args = Vec::new();
+
+    let mut n = size / 64;
+    while n > 0 {
+        args.push(long_ty);
+        n -= 1;
+    }
+
+    let r = size % 64;
+    if r > 0 {
+        args.push(Type::ix(ccx, r as u64));
+    }
+
+    args
+}
+
+fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
+    let size = ty_size(ty) * 8;
+    Type::struct_(ccx, &coerce_to_long(ccx, size), false)
+}
+
+pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
+    if !fty.ret.is_ignore() {
+        classify_ret_ty(ccx, &mut fty.ret);
+    }
+
+    for arg in &mut fty.args {
+        if arg.is_ignore() { continue; }
+        classify_arg_ty(ccx, arg);
+    }
+}
diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs
new file mode 100644 (file)
index 0000000..b52231f
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::*;
+use abi::FnType;
+use type_::Type;
+use super::common::*;
+use super::machine::*;
+
+pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
+    if !fty.ret.is_ignore() {
+        if fty.ret.ty.kind() == Struct {
+            // Returning a structure. Most often, this will use
+            // a hidden first argument. On some platforms, though,
+            // small structs are returned as integers.
+            //
+            // Some links:
+            // http://www.angelcode.com/dev/callconv/callconv.html
+            // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
+            let t = &ccx.sess().target.target;
+            if t.options.is_like_osx || t.options.is_like_windows {
+                match llsize_of_alloc(ccx, fty.ret.ty) {
+                    1 => fty.ret.cast = Some(Type::i8(ccx)),
+                    2 => fty.ret.cast = Some(Type::i16(ccx)),
+                    4 => fty.ret.cast = Some(Type::i32(ccx)),
+                    8 => fty.ret.cast = Some(Type::i64(ccx)),
+                    _ => fty.ret.make_indirect(ccx)
+                }
+            } else {
+                fty.ret.make_indirect(ccx);
+            }
+        } else {
+            fty.ret.extend_integer_width_to(32);
+        }
+    }
+
+    for arg in &mut fty.args {
+        if arg.is_ignore() { continue; }
+        if arg.ty.kind() == Struct {
+            arg.make_indirect(ccx);
+            arg.attrs.set(Attribute::ByVal);
+        } else {
+            arg.extend_integer_width_to(32);
+        }
+    }
+}
diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs
new file mode 100644 (file)
index 0000000..805c7d3
--- /dev/null
@@ -0,0 +1,451 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// The classification code for the x86_64 ABI is taken from the clay language
+// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
+
+#![allow(non_upper_case_globals)]
+use self::RegClass::*;
+
+use llvm::{Integer, Pointer, Float, Double};
+use llvm::{Struct, Array, Attribute, Vector};
+use abi::{ArgType, FnType};
+use context::CrateContext;
+use type_::Type;
+
+use std::cmp;
+
+#[derive(Clone, Copy, PartialEq)]
+enum RegClass {
+    NoClass,
+    Int,
+    SSEFs,
+    SSEFv,
+    SSEDs,
+    SSEDv,
+    SSEInt(/* bitwidth */ u64),
+    /// Data that can appear in the upper half of an SSE register.
+    SSEUp,
+    X87,
+    X87Up,
+    ComplexX87,
+    Memory
+}
+
+trait TypeMethods {
+    fn is_reg_ty(&self) -> bool;
+}
+
+impl TypeMethods for Type {
+    fn is_reg_ty(&self) -> bool {
+        match self.kind() {
+            Integer | Pointer | Float | Double => true,
+            _ => false
+        }
+    }
+}
+
+impl RegClass {
+    fn is_sse(&self) -> bool {
+        match *self {
+            SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true,
+            _ => false
+        }
+    }
+}
+
+trait ClassList {
+    fn is_pass_byval(&self) -> bool;
+    fn is_ret_bysret(&self) -> bool;
+}
+
+impl ClassList for [RegClass] {
+    fn is_pass_byval(&self) -> bool {
+        if self.is_empty() { return false; }
+
+        let class = self[0];
+           class == Memory
+        || class == X87
+        || class == ComplexX87
+    }
+
+    fn is_ret_bysret(&self) -> bool {
+        if self.is_empty() { return false; }
+
+        self[0] == Memory
+    }
+}
+
+fn classify_ty(ty: Type) -> Vec<RegClass> {
+    fn align(off: usize, ty: Type) -> usize {
+        let a = ty_align(ty);
+        return (off + a - 1) / a * a;
+    }
+
+    fn ty_align(ty: Type) -> usize {
+        match ty.kind() {
+            Integer => ((ty.int_width() as usize) + 7) / 8,
+            Pointer => 8,
+            Float => 4,
+            Double => 8,
+            Struct => {
+              if ty.is_packed() {
+                1
+              } else {
+                let str_tys = ty.field_types();
+                str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
+              }
+            }
+            Array => {
+                let elt = ty.element_type();
+                ty_align(elt)
+            }
+            Vector => {
+                let len = ty.vector_length();
+                let elt = ty.element_type();
+                ty_align(elt) * len
+            }
+            _ => bug!("ty_align: unhandled type")
+        }
+    }
+
+    fn ty_size(ty: Type) -> usize {
+        match ty.kind() {
+            Integer => (ty.int_width() as usize + 7) / 8,
+            Pointer => 8,
+            Float => 4,
+            Double => 8,
+            Struct => {
+                let str_tys = ty.field_types();
+                if ty.is_packed() {
+                    str_tys.iter().fold(0, |s, t| s + ty_size(*t))
+                } else {
+                    let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
+                    align(size, ty)
+                }
+            }
+            Array => {
+                let len = ty.array_length();
+                let elt = ty.element_type();
+                let eltsz = ty_size(elt);
+                len * eltsz
+            }
+            Vector => {
+                let len = ty.vector_length();
+                let elt = ty.element_type();
+                let eltsz = ty_size(elt);
+                len * eltsz
+            }
+
+            _ => bug!("ty_size: unhandled type")
+        }
+    }
+
+    fn all_mem(cls: &mut [RegClass]) {
+        for elt in cls {
+            *elt = Memory;
+        }
+    }
+
+    fn unify(cls: &mut [RegClass],
+             i: usize,
+             newv: RegClass) {
+        if cls[i] == newv { return }
+
+        let to_write = match (cls[i], newv) {
+            (NoClass,     _) => newv,
+            (_,           NoClass) => return,
+
+            (Memory,      _) |
+            (_,           Memory) => Memory,
+
+            (Int,         _) |
+            (_,           Int) => Int,
+
+            (X87,         _) |
+            (X87Up,       _) |
+            (ComplexX87,  _) |
+            (_,           X87) |
+            (_,           X87Up) |
+            (_,           ComplexX87) => Memory,
+
+            (SSEFv,       SSEUp) |
+            (SSEFs,       SSEUp) |
+            (SSEDv,       SSEUp) |
+            (SSEDs,       SSEUp) |
+            (SSEInt(_),   SSEUp) => return,
+
+            (_,           _) => newv
+        };
+        cls[i] = to_write;
+    }
+
+    fn classify_struct(tys: &[Type],
+                       cls: &mut [RegClass],
+                       i: usize,
+                       off: usize,
+                       packed: bool) {
+        let mut field_off = off;
+        for ty in tys {
+            if !packed {
+                field_off = align(field_off, *ty);
+            }
+            classify(*ty, cls, i, field_off);
+            field_off += ty_size(*ty);
+        }
+    }
+
+    fn classify(ty: Type,
+                cls: &mut [RegClass], ix: usize,
+                off: usize) {
+        let t_align = ty_align(ty);
+        let t_size = ty_size(ty);
+
+        let misalign = off % t_align;
+        if misalign != 0 {
+            let mut i = off / 8;
+            let e = (off + t_size + 7) / 8;
+            while i < e {
+                unify(cls, ix + i, Memory);
+                i += 1;
+            }
+            return;
+        }
+
+        match ty.kind() {
+            Integer |
+            Pointer => {
+                unify(cls, ix + off / 8, Int);
+            }
+            Float => {
+                if off % 8 == 4 {
+                    unify(cls, ix + off / 8, SSEFv);
+                } else {
+                    unify(cls, ix + off / 8, SSEFs);
+                }
+            }
+            Double => {
+                unify(cls, ix + off / 8, SSEDs);
+            }
+            Struct => {
+                classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed());
+            }
+            Array => {
+                let len = ty.array_length();
+                let elt = ty.element_type();
+                let eltsz = ty_size(elt);
+                let mut i = 0;
+                while i < len {
+                    classify(elt, cls, ix, off + i * eltsz);
+                    i += 1;
+                }
+            }
+            Vector => {
+                let len = ty.vector_length();
+                let elt = ty.element_type();
+                let eltsz = ty_size(elt);
+                let mut reg = match elt.kind() {
+                    Integer => SSEInt(elt.int_width()),
+                    Float => SSEFv,
+                    Double => SSEDv,
+                    _ => bug!("classify: unhandled vector element type")
+                };
+
+                let mut i = 0;
+                while i < len {
+                    unify(cls, ix + (off + i * eltsz) / 8, reg);
+
+                    // everything after the first one is the upper
+                    // half of a register.
+                    reg = SSEUp;
+                    i += 1;
+                }
+            }
+            _ => bug!("classify: unhandled type")
+        }
+    }
+
+    fn fixup(ty: Type, cls: &mut [RegClass]) {
+        let mut i = 0;
+        let ty_kind = ty.kind();
+        let e = cls.len();
+        if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) {
+            if cls[i].is_sse() {
+                i += 1;
+                while i < e {
+                    if cls[i] != SSEUp {
+                        all_mem(cls);
+                        return;
+                    }
+                    i += 1;
+                }
+            } else {
+                all_mem(cls);
+                return
+            }
+        } else {
+            while i < e {
+                if cls[i] == Memory {
+                    all_mem(cls);
+                    return;
+                }
+                if cls[i] == X87Up {
+                    // for darwin
+                    // cls[i] = SSEDs;
+                    all_mem(cls);
+                    return;
+                }
+                if cls[i] == SSEUp {
+                    cls[i] = SSEDv;
+                } else if cls[i].is_sse() {
+                    i += 1;
+                    while i != e && cls[i] == SSEUp { i += 1; }
+                } else if cls[i] == X87 {
+                    i += 1;
+                    while i != e && cls[i] == X87Up { i += 1; }
+                } else {
+                    i += 1;
+                }
+            }
+        }
+    }
+
+    let words = (ty_size(ty) + 7) / 8;
+    let mut cls = vec![NoClass; words];
+    if words > 4 {
+        all_mem(&mut cls);
+        return cls;
+    }
+    classify(ty, &mut cls, 0, 0);
+    fixup(ty, &mut cls);
+    return cls;
+}
+
+fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
+    fn llvec_len(cls: &[RegClass]) -> usize {
+        let mut len = 1;
+        for c in cls {
+            if *c != SSEUp {
+                break;
+            }
+            len += 1;
+        }
+        return len;
+    }
+
+    let mut tys = Vec::new();
+    let mut i = 0;
+    let e = cls.len();
+    while i < e {
+        match cls[i] {
+            Int => {
+                tys.push(Type::i64(ccx));
+            }
+            SSEFv | SSEDv | SSEInt(_) => {
+                let (elts_per_word, elt_ty) = match cls[i] {
+                    SSEFv => (2, Type::f32(ccx)),
+                    SSEDv => (1, Type::f64(ccx)),
+                    SSEInt(bits) => {
+                        assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64,
+                                "llreg_ty: unsupported SSEInt width {}", bits);
+                        (64 / bits, Type::ix(ccx, bits))
+                    }
+                    _ => bug!(),
+                };
+                let vec_len = llvec_len(&cls[i + 1..]);
+                let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word);
+                tys.push(vec_ty);
+                i += vec_len;
+                continue;
+            }
+            SSEFs => {
+                tys.push(Type::f32(ccx));
+            }
+            SSEDs => {
+                tys.push(Type::f64(ccx));
+            }
+            _ => bug!("llregtype: unhandled class")
+        }
+        i += 1;
+    }
+    if tys.len() == 1 && tys[0].kind() == Vector {
+        // if the type contains only a vector, pass it as that vector.
+        tys[0]
+    } else {
+        Type::struct_(ccx, &tys, false)
+    }
+}
+
+pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
+    fn x86_64_ty<F>(ccx: &CrateContext,
+                    arg: &mut ArgType,
+                    is_mem_cls: F,
+                    ind_attr: Option<Attribute>)
+        where F: FnOnce(&[RegClass]) -> bool
+    {
+        if !arg.ty.is_reg_ty() {
+            let cls = classify_ty(arg.ty);
+            if is_mem_cls(&cls) {
+                arg.make_indirect(ccx);
+                if let Some(attr) = ind_attr {
+                    arg.attrs.set(attr);
+                }
+            } else {
+                arg.cast = Some(llreg_ty(ccx, &cls));
+            }
+        } else {
+            arg.extend_integer_width_to(32);
+        }
+    }
+
+    let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9
+    let mut sse_regs = 8; // XMM0-7
+
+    if !fty.ret.is_ignore() {
+        x86_64_ty(ccx, &mut fty.ret, |cls| {
+            if cls.is_ret_bysret() {
+                // `sret` parameter thus one less register available
+                int_regs -= 1;
+                true
+            } else {
+                false
+            }
+        }, None);
+    }
+
+    for arg in &mut fty.args {
+        if arg.is_ignore() { continue; }
+        x86_64_ty(ccx, arg, |cls| {
+            let needed_int = cls.iter().filter(|&&c| c == Int).count() as isize;
+            let needed_sse = cls.iter().filter(|c| c.is_sse()).count() as isize;
+            let in_mem = cls.is_pass_byval() ||
+                         int_regs < needed_int ||
+                         sse_regs < needed_sse;
+            if in_mem {
+                // `byval` parameter thus one less integer register available
+                int_regs -= 1;
+            } else {
+                // split into sized chunks passed individually
+                int_regs -= needed_int;
+                sse_regs -= needed_sse;
+            }
+            in_mem
+        }, Some(Attribute::ByVal));
+
+        // An integer, pointer, double or float parameter
+        // thus the above closure passed to `x86_64_ty` won't
+        // get called.
+        match arg.ty.kind() {
+            Integer | Pointer => int_regs -= 1,
+            Double | Float => sse_regs -= 1,
+            _ => {}
+        }
+    }
+}
diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs
new file mode 100644 (file)
index 0000000..71ecb6e
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::*;
+use super::common::*;
+use super::machine::*;
+use abi::{ArgType, FnType};
+use type_::Type;
+
+// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
+
+pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
+    let fixup = |a: &mut ArgType| {
+        if a.ty.kind() == Struct {
+            match llsize_of_alloc(ccx, a.ty) {
+                1 => a.cast = Some(Type::i8(ccx)),
+                2 => a.cast = Some(Type::i16(ccx)),
+                4 => a.cast = Some(Type::i32(ccx)),
+                8 => a.cast = Some(Type::i64(ccx)),
+                _ => a.make_indirect(ccx)
+            }
+        } else {
+            a.extend_integer_width_to(32);
+        }
+    };
+
+    if !fty.ret.is_ignore() {
+        fixup(&mut fty.ret);
+    }
+    for arg in &mut fty.args {
+        if arg.is_ignore() { continue; }
+        fixup(arg);
+    }
+}
diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs
new file mode 100644 (file)
index 0000000..f86dd2d
--- /dev/null
@@ -0,0 +1,1039 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Handles translation of callees as well as other call-related
+//! things.  Callees are a superset of normal rust values and sometimes
+//! have different representations.  In particular, top-level fn items
+//! and methods are represented as just a fn ptr and not a full
+//! closure.
+
+pub use self::CalleeData::*;
+pub use self::CallArgs::*;
+
+use arena::TypedArena;
+use back::symbol_names;
+use llvm::{self, ValueRef, get_params};
+use middle::cstore::LOCAL_CRATE;
+use rustc::hir::def_id::DefId;
+use rustc::infer;
+use rustc::ty::subst;
+use rustc::ty::subst::{Substs};
+use rustc::traits;
+use rustc::hir::map as hir_map;
+use abi::{Abi, FnType};
+use adt;
+use attributes;
+use base;
+use base::*;
+use build::*;
+use cleanup;
+use cleanup::CleanupMethods;
+use closure;
+use common::{self, Block, Result, CrateContext, FunctionContext};
+use common::{C_uint, C_undef};
+use consts;
+use datum::*;
+use debuginfo::DebugLoc;
+use declare;
+use expr;
+use glue;
+use inline;
+use intrinsic;
+use machine::{llalign_of_min, llsize_of_store};
+use meth;
+use monomorphize::{self, Instance};
+use type_::Type;
+use type_of;
+use value::Value;
+use Disr;
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc::hir;
+
+use syntax::codemap::DUMMY_SP;
+use syntax::errors;
+use syntax::ptr::P;
+
+use std::cmp;
+
+#[derive(Debug)]
+pub enum CalleeData {
+    /// Constructor for enum variant/tuple-like-struct.
+    NamedTupleConstructor(Disr),
+
+    /// Function pointer.
+    Fn(ValueRef),
+
+    Intrinsic,
+
+    /// Trait object found in the vtable at that index.
+    Virtual(usize)
+}
+
+#[derive(Debug)]
+pub struct Callee<'tcx> {
+    pub data: CalleeData,
+    pub ty: Ty<'tcx>
+}
+
+impl<'tcx> Callee<'tcx> {
+    /// Function pointer.
+    pub fn ptr(datum: Datum<'tcx, Rvalue>) -> Callee<'tcx> {
+        Callee {
+            data: Fn(datum.val),
+            ty: datum.ty
+        }
+    }
+
+    /// Trait or impl method call.
+    pub fn method_call<'blk>(bcx: Block<'blk, 'tcx>,
+                             method_call: ty::MethodCall)
+                             -> Callee<'tcx> {
+        let method = bcx.tcx().tables.borrow().method_map[&method_call];
+        Callee::method(bcx, method)
+    }
+
+    /// Trait or impl method.
+    pub fn method<'blk>(bcx: Block<'blk, 'tcx>,
+                        method: ty::MethodCallee<'tcx>) -> Callee<'tcx> {
+        let substs = bcx.tcx().mk_substs(bcx.fcx.monomorphize(&method.substs));
+        Callee::def(bcx.ccx(), method.def_id, substs)
+    }
+
+    /// Function or method definition.
+    pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>,
+                   def_id: DefId,
+                   substs: &'tcx subst::Substs<'tcx>)
+                   -> Callee<'tcx> {
+        let tcx = ccx.tcx();
+
+        if substs.self_ty().is_some() {
+            // Only trait methods can have a Self parameter.
+            return Callee::trait_method(ccx, def_id, substs);
+        }
+
+        let maybe_node_id = inline::get_local_instance(ccx, def_id)
+            .and_then(|def_id| tcx.map.as_local_node_id(def_id));
+        let maybe_ast_node = maybe_node_id.and_then(|node_id| {
+            tcx.map.find(node_id)
+        });
+
+        let data = match maybe_ast_node {
+            Some(hir_map::NodeStructCtor(_)) => {
+                NamedTupleConstructor(Disr(0))
+            }
+            Some(hir_map::NodeVariant(_)) => {
+                let vinfo = common::inlined_variant_def(ccx, maybe_node_id.unwrap());
+                NamedTupleConstructor(Disr::from(vinfo.disr_val))
+            }
+            Some(hir_map::NodeForeignItem(fi)) if {
+                let abi = tcx.map.get_foreign_abi(fi.id);
+                abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic
+            } => Intrinsic,
+
+            _ => return Callee::ptr(get_fn(ccx, def_id, substs))
+        };
+
+        Callee {
+            data: data,
+            ty: def_ty(tcx, def_id, substs)
+        }
+    }
+
+    /// Trait method, which has to be resolved to an impl method.
+    pub fn trait_method<'a>(ccx: &CrateContext<'a, 'tcx>,
+                            def_id: DefId,
+                            substs: &'tcx subst::Substs<'tcx>)
+                            -> Callee<'tcx> {
+        let tcx = ccx.tcx();
+
+        let method_item = tcx.impl_or_trait_item(def_id);
+        let trait_id = method_item.container().id();
+        let trait_ref = ty::Binder(substs.to_trait_ref(tcx, trait_id));
+        let trait_ref = infer::normalize_associated_type(tcx, &trait_ref);
+        match common::fulfill_obligation(ccx, DUMMY_SP, trait_ref) {
+            traits::VtableImpl(vtable_impl) => {
+                let impl_did = vtable_impl.impl_def_id;
+                let mname = tcx.item_name(def_id);
+                // create a concatenated set of substitutions which includes
+                // those from the impl and those from the method:
+                let impl_substs = vtable_impl.substs.with_method_from(&substs);
+                let substs = tcx.mk_substs(impl_substs);
+                let mth = meth::get_impl_method(tcx, impl_did, substs, mname);
+
+                // Translate the function, bypassing Callee::def.
+                // That is because default methods have the same ID as the
+                // trait method used to look up the impl method that ended
+                // up here, so calling Callee::def would infinitely recurse.
+                Callee::ptr(get_fn(ccx, mth.method.def_id, mth.substs))
+            }
+            traits::VtableClosure(vtable_closure) => {
+                // The substitutions should have no type parameters remaining
+                // after passing through fulfill_obligation
+                let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
+                let llfn = closure::trans_closure_method(ccx,
+                                                         vtable_closure.closure_def_id,
+                                                         vtable_closure.substs,
+                                                         trait_closure_kind);
+
+                let method_ty = def_ty(tcx, def_id, substs);
+                let fn_ptr_ty = match method_ty.sty {
+                    ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)),
+                    _ => bug!("expected fn item type, found {}",
+                              method_ty)
+                };
+                Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
+            }
+            traits::VtableFnPointer(fn_ty) => {
+                let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
+                let llfn = trans_fn_pointer_shim(ccx, trait_closure_kind, fn_ty);
+
+                let method_ty = def_ty(tcx, def_id, substs);
+                let fn_ptr_ty = match method_ty.sty {
+                    ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)),
+                    _ => bug!("expected fn item type, found {}",
+                              method_ty)
+                };
+                Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
+            }
+            traits::VtableObject(ref data) => {
+                Callee {
+                    data: Virtual(traits::get_vtable_index_of_object_method(
+                        tcx, data, def_id)),
+                    ty: def_ty(tcx, def_id, substs)
+                }
+            }
+            vtable => {
+                bug!("resolved vtable bad vtable {:?} in trans", vtable);
+            }
+        }
+    }
+
+    /// Get the abi::FnType for a direct call. Mainly deals with the fact
+    /// that a Virtual call doesn't take the vtable, like its shim does.
+    /// The extra argument types are for variadic (extern "C") functions.
+    pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
+                              extra_args: &[Ty<'tcx>]) -> FnType {
+        let abi = self.ty.fn_abi();
+        let sig = ccx.tcx().erase_late_bound_regions(self.ty.fn_sig());
+        let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+        let mut fn_ty = FnType::unadjusted(ccx, abi, &sig, extra_args);
+        if let Virtual(_) = self.data {
+            // Don't pass the vtable, it's not an argument of the virtual fn.
+            fn_ty.args[1].ignore();
+        }
+        fn_ty.adjust_for_abi(ccx, abi, &sig);
+        fn_ty
+    }
+
+    /// This behemoth of a function translates function calls. Unfortunately, in
+    /// order to generate more efficient LLVM output at -O0, it has quite a complex
+    /// signature (refactoring this into two functions seems like a good idea).
+    ///
+    /// In particular, for lang items, it is invoked with a dest of None, and in
+    /// that case the return value contains the result of the fn. The lang item must
+    /// not return a structural type or else all heck breaks loose.
+    ///
+    /// For non-lang items, `dest` is always Some, and hence the result is written
+    /// into memory somewhere. Nonetheless we return the actual return value of the
+    /// function.
+    pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>,
+                          debug_loc: DebugLoc,
+                          args: CallArgs<'a, 'tcx>,
+                          dest: Option<expr::Dest>)
+                          -> Result<'blk, 'tcx> {
+        trans_call_inner(bcx, debug_loc, self, args, dest)
+    }
+
+    /// Turn the callee into a function pointer.
+    pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>)
+                     -> Datum<'tcx, Rvalue> {
+        let fn_ptr_ty = match self.ty.sty {
+            ty::TyFnDef(_, _, f) => ccx.tcx().mk_ty(ty::TyFnPtr(f)),
+            _ => self.ty
+        };
+        match self.data {
+            Fn(llfn) => {
+                immediate_rvalue(llfn, fn_ptr_ty)
+            }
+            Virtual(idx) => {
+                let llfn = meth::trans_object_shim(ccx, self.ty, idx);
+                immediate_rvalue(llfn, fn_ptr_ty)
+            }
+            NamedTupleConstructor(_) => match self.ty.sty {
+                ty::TyFnDef(def_id, substs, _) => {
+                    return get_fn(ccx, def_id, substs);
+                }
+                _ => bug!("expected fn item type, found {}", self.ty)
+            },
+            Intrinsic => bug!("intrinsic {} getting reified", self.ty)
+        }
+    }
+}
+
+/// Given a DefId and some Substs, produces the monomorphic item type.
+fn def_ty<'tcx>(tcx: &TyCtxt<'tcx>,
+                def_id: DefId,
+                substs: &'tcx subst::Substs<'tcx>)
+                -> Ty<'tcx> {
+    let ty = tcx.lookup_item_type(def_id).ty;
+    monomorphize::apply_param_substs(tcx, substs, &ty)
+}
+
+/// Translates an adapter that implements the `Fn` trait for a fn
+/// pointer. This is basically the equivalent of something like:
+///
+/// ```
+/// impl<'a> Fn(&'a int) -> &'a int for fn(&int) -> &int {
+///     extern "rust-abi" fn call(&self, args: (&'a int,)) -> &'a int {
+///         (*self)(args.0)
+///     }
+/// }
+/// ```
+///
+/// but for the bare function type given.
+pub fn trans_fn_pointer_shim<'a, 'tcx>(
+    ccx: &'a CrateContext<'a, 'tcx>,
+    closure_kind: ty::ClosureKind,
+    bare_fn_ty: Ty<'tcx>)
+    -> ValueRef
+{
+    let _icx = push_ctxt("trans_fn_pointer_shim");
+    let tcx = ccx.tcx();
+
+    // Normalize the type for better caching.
+    let bare_fn_ty = tcx.erase_regions(&bare_fn_ty);
+
+    // If this is an impl of `Fn` or `FnMut` trait, the receiver is `&self`.
+    let is_by_ref = match closure_kind {
+        ty::ClosureKind::Fn | ty::ClosureKind::FnMut => true,
+        ty::ClosureKind::FnOnce => false,
+    };
+
+    let llfnpointer = match bare_fn_ty.sty {
+        ty::TyFnDef(def_id, substs, _) => {
+            // Function definitions have to be turned into a pointer.
+            let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
+            if !is_by_ref {
+                // A by-value fn item is ignored, so the shim has
+                // the same signature as the original function.
+                return llfn;
+            }
+            Some(llfn)
+        }
+        _ => None
+    };
+
+    let bare_fn_ty_maybe_ref = if is_by_ref {
+        tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), bare_fn_ty)
+    } else {
+        bare_fn_ty
+    };
+
+    // Check if we already trans'd this shim.
+    match ccx.fn_pointer_shims().borrow().get(&bare_fn_ty_maybe_ref) {
+        Some(&llval) => { return llval; }
+        None => { }
+    }
+
+    debug!("trans_fn_pointer_shim(bare_fn_ty={:?})",
+           bare_fn_ty);
+
+    // Construct the "tuply" version of `bare_fn_ty`. It takes two arguments: `self`,
+    // which is the fn pointer, and `args`, which is the arguments tuple.
+    let sig = match bare_fn_ty.sty {
+        ty::TyFnDef(_, _,
+                    &ty::BareFnTy { unsafety: hir::Unsafety::Normal,
+                                    abi: Abi::Rust,
+                                    ref sig }) |
+        ty::TyFnPtr(&ty::BareFnTy { unsafety: hir::Unsafety::Normal,
+                                    abi: Abi::Rust,
+                                    ref sig }) => sig,
+
+        _ => {
+            bug!("trans_fn_pointer_shim invoked on invalid type: {}",
+                 bare_fn_ty);
+        }
+    };
+    let sig = tcx.erase_late_bound_regions(sig);
+    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+    let tuple_input_ty = tcx.mk_tup(sig.inputs.to_vec());
+    let sig = ty::FnSig {
+        inputs: vec![bare_fn_ty_maybe_ref,
+                     tuple_input_ty],
+        output: sig.output,
+        variadic: false
+    };
+    let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]);
+    let tuple_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
+        unsafety: hir::Unsafety::Normal,
+        abi: Abi::RustCall,
+        sig: ty::Binder(sig)
+    });
+    debug!("tuple_fn_ty: {:?}", tuple_fn_ty);
+
+    //
+    let function_name =
+        symbol_names::internal_name_from_type_and_suffix(ccx,
+                                                         bare_fn_ty,
+                                                         "fn_pointer_shim");
+    let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty);
+
+    //
+    let empty_substs = tcx.mk_substs(Substs::empty());
+    let (block_arena, fcx): (TypedArena<_>, FunctionContext);
+    block_arena = TypedArena::new();
+    fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &block_arena);
+    let mut bcx = fcx.init(false, None);
+
+    let llargs = get_params(fcx.llfn);
+
+    let self_idx = fcx.fn_ty.ret.is_indirect() as usize;
+    let llfnpointer = llfnpointer.unwrap_or_else(|| {
+        // the first argument (`self`) will be ptr to the fn pointer
+        if is_by_ref {
+            Load(bcx, llargs[self_idx])
+        } else {
+            llargs[self_idx]
+        }
+    });
+
+    assert!(!fcx.needs_ret_allocas);
+
+    let dest = fcx.llretslotptr.get().map(|_|
+        expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
+    );
+
+    let callee = Callee {
+        data: Fn(llfnpointer),
+        ty: bare_fn_ty
+    };
+    bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx;
+
+    fcx.finish(bcx, DebugLoc::None);
+
+    ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn);
+
+    llfn
+}
+
+/// Translates a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `ccx`: the crate context
+/// - `def_id`: def id of the fn or method item being referenced
+/// - `substs`: values for each of the fn/method's parameters
+fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                    def_id: DefId,
+                    substs: &'tcx subst::Substs<'tcx>)
+                    -> Datum<'tcx, Rvalue> {
+    let tcx = ccx.tcx();
+
+    debug!("get_fn(def_id={:?}, substs={:?})", def_id, substs);
+
+    assert!(!substs.types.needs_infer());
+    assert!(!substs.types.has_escaping_regions());
+
+    // Check whether this fn has an inlined copy and, if so, redirect
+    // def_id to the local id of the inlined copy.
+    let def_id = inline::maybe_instantiate_inline(ccx, def_id);
+
+    fn is_named_tuple_constructor(tcx: &TyCtxt, def_id: DefId) -> bool {
+        let node_id = match tcx.map.as_local_node_id(def_id) {
+            Some(n) => n,
+            None => { return false; }
+        };
+        let map_node = errors::expect(
+            &tcx.sess.diagnostic(),
+            tcx.map.find(node_id),
+            || "local item should be in ast map".to_string());
+
+        match map_node {
+            hir_map::NodeVariant(v) => {
+                v.node.data.is_tuple()
+            }
+            hir_map::NodeStructCtor(_) => true,
+            _ => false
+        }
+    }
+    let must_monomorphise =
+        !substs.types.is_empty() || is_named_tuple_constructor(tcx, def_id);
+
+    debug!("get_fn({:?}) must_monomorphise: {}",
+           def_id, must_monomorphise);
+
+    // Create a monomorphic version of generic functions
+    if must_monomorphise {
+        // Should be either intra-crate or inlined.
+        assert_eq!(def_id.krate, LOCAL_CRATE);
+
+        let substs = tcx.mk_substs(substs.clone().erase_regions());
+        let (val, fn_ty) = monomorphize::monomorphic_fn(ccx, def_id, substs);
+        let fn_ptr_ty = match fn_ty.sty {
+            ty::TyFnDef(_, _, fty) => {
+                // Create a fn pointer with the substituted signature.
+                tcx.mk_ty(ty::TyFnPtr(fty))
+            }
+            _ => bug!("expected fn item type, found {}", fn_ty)
+        };
+        assert_eq!(type_of::type_of(ccx, fn_ptr_ty), common::val_ty(val));
+        return immediate_rvalue(val, fn_ptr_ty);
+    }
+
+    // Find the actual function pointer.
+    let ty = ccx.tcx().lookup_item_type(def_id).ty;
+    let fn_ptr_ty = match ty.sty {
+        ty::TyFnDef(_, _, fty) => {
+            // Create a fn pointer with the normalized signature.
+            tcx.mk_fn_ptr(infer::normalize_associated_type(tcx, fty))
+        }
+        _ => bug!("expected fn item type, found {}", ty)
+    };
+
+    let instance = Instance::mono(ccx.tcx(), def_id);
+    if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
+        return immediate_rvalue(llfn, fn_ptr_ty);
+    }
+
+    let attrs;
+    let local_id = ccx.tcx().map.as_local_node_id(def_id);
+    let maybe_node = local_id.and_then(|id| tcx.map.find(id));
+    let (sym, attrs, local_item) = match maybe_node {
+        Some(hir_map::NodeItem(&hir::Item {
+            ref attrs, id, span, node: hir::ItemFn(..), ..
+        })) |
+        Some(hir_map::NodeTraitItem(&hir::TraitItem {
+            ref attrs, id, span, node: hir::MethodTraitItem(_, Some(_)), ..
+        })) |
+        Some(hir_map::NodeImplItem(&hir::ImplItem {
+            ref attrs, id, span, node: hir::ImplItemKind::Method(..), ..
+        })) => {
+            let sym = exported_name(ccx, instance, attrs);
+
+            if declare::get_defined_value(ccx, &sym).is_some() {
+                ccx.sess().span_fatal(span,
+                    &format!("symbol `{}` is already defined", sym));
+            }
+
+            (sym, &attrs[..], Some(id))
+        }
+
+        Some(hir_map::NodeForeignItem(&hir::ForeignItem {
+            ref attrs, name, node: hir::ForeignItemFn(..), ..
+        })) => {
+            (imported_name(name, attrs).to_string(), &attrs[..], None)
+        }
+
+        None => {
+            attrs = ccx.sess().cstore.item_attrs(def_id);
+            (ccx.sess().cstore.item_symbol(def_id), &attrs[..], None)
+        }
+
+        ref variant => {
+            bug!("get_fn: unexpected variant: {:?}", variant)
+        }
+    };
+
+    // This is subtle and surprising, but sometimes we have to bitcast
+    // the resulting fn pointer.  The reason has to do with external
+    // functions.  If you have two crates that both bind the same C
+    // library, they may not use precisely the same types: for
+    // example, they will probably each declare their own structs,
+    // which are distinct types from LLVM's point of view (nominal
+    // types).
+    //
+    // Now, if those two crates are linked into an application, and
+    // they contain inlined code, you can wind up with a situation
+    // where both of those functions wind up being loaded into this
+    // application simultaneously. In that case, the same function
+    // (from LLVM's point of view) requires two types. But of course
+    // LLVM won't allow one function to have two types.
+    //
+    // What we currently do, therefore, is declare the function with
+    // one of the two types (whichever happens to come first) and then
+    // bitcast as needed when the function is referenced to make sure
+    // it has the type we expect.
+    //
+    // This can occur on either a crate-local or crate-external
+    // reference. It also occurs when testing libcore and in some
+    // other weird situations. Annoying.
+
+    let llptrty = type_of::type_of(ccx, fn_ptr_ty);
+    let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) {
+        if common::val_ty(llfn) != llptrty {
+            if local_item.is_some() {
+                bug!("symbol `{}` previously declared as {:?}, now wanted as {:?}",
+                     sym, Value(llfn), llptrty);
+            }
+            debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
+            consts::ptrcast(llfn, llptrty)
+        } else {
+            debug!("get_fn: not casting pointer!");
+            llfn
+        }
+    } else {
+        let llfn = declare::declare_fn(ccx, &sym, ty);
+        assert_eq!(common::val_ty(llfn), llptrty);
+        debug!("get_fn: not casting pointer!");
+
+        attributes::from_fn_attrs(ccx, attrs, llfn);
+        if local_item.is_some() {
+            // FIXME(eddyb) Doubt all extern fn should allow unwinding.
+            attributes::unwind(llfn, true);
+        }
+
+        llfn
+    };
+
+    // Always insert into item_symbols, in case this item is exported.
+    if let Some(id) = local_item {
+        ccx.item_symbols().borrow_mut().insert(id, sym);
+    }
+
+    ccx.instances().borrow_mut().insert(instance, llfn);
+
+    immediate_rvalue(llfn, fn_ptr_ty)
+}
+
+// ______________________________________________________________________
+// Translating calls
+
+fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+                                    debug_loc: DebugLoc,
+                                    callee: Callee<'tcx>,
+                                    args: CallArgs<'a, 'tcx>,
+                                    dest: Option<expr::Dest>)
+                                    -> Result<'blk, 'tcx> {
+    // Introduce a temporary cleanup scope that will contain cleanups
+    // for the arguments while they are being evaluated. The purpose
+    // this cleanup is to ensure that, should a panic occur while
+    // evaluating argument N, the values for arguments 0...N-1 are all
+    // cleaned up. If no panic occurs, the values are handed off to
+    // the callee, and hence none of the cleanups in this temporary
+    // scope will ever execute.
+    let fcx = bcx.fcx;
+    let ccx = fcx.ccx;
+
+    let abi = callee.ty.fn_abi();
+    let sig = callee.ty.fn_sig();
+    let output = bcx.tcx().erase_late_bound_regions(&sig.output());
+    let output = infer::normalize_associated_type(bcx.tcx(), &output);
+
+    let extra_args = match args {
+        ArgExprs(args) if abi != Abi::RustCall => {
+            args[sig.0.inputs.len()..].iter().map(|expr| {
+                common::expr_ty_adjusted(bcx, expr)
+            }).collect()
+        }
+        _ => vec![]
+    };
+    let fn_ty = callee.direct_fn_type(ccx, &extra_args);
+
+    let mut callee = match callee.data {
+        Intrinsic => {
+            assert!(abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic);
+            assert!(dest.is_some());
+
+            return intrinsic::trans_intrinsic_call(bcx, callee.ty, &fn_ty,
+                                                   args, dest.unwrap(),
+                                                   debug_loc);
+        }
+        NamedTupleConstructor(disr) => {
+            assert!(dest.is_some());
+
+            return base::trans_named_tuple_constructor(bcx,
+                                                       callee.ty,
+                                                       disr,
+                                                       args,
+                                                       dest.unwrap(),
+                                                       debug_loc);
+        }
+        f => f
+    };
+
+    // Generate a location to store the result. If the user does
+    // not care about the result, just make a stack slot.
+    let opt_llretslot = dest.and_then(|dest| match dest {
+        expr::SaveIn(dst) => Some(dst),
+        expr::Ignore => {
+            let needs_drop = || match output {
+                ty::FnConverging(ret_ty) => bcx.fcx.type_needs_drop(ret_ty),
+                ty::FnDiverging => false
+            };
+            if fn_ty.ret.is_indirect() || fn_ty.ret.cast.is_some() || needs_drop() {
+                // Push the out-pointer if we use an out-pointer for this
+                // return type, otherwise push "undef".
+                if fn_ty.ret.is_ignore() {
+                    Some(C_undef(fn_ty.ret.original_ty.ptr_to()))
+                } else {
+                    let llresult = alloca(bcx, fn_ty.ret.original_ty, "__llret");
+                    call_lifetime_start(bcx, llresult);
+                    Some(llresult)
+                }
+            } else {
+                None
+            }
+        }
+    });
+
+    // If there no destination, return must be direct, with no cast.
+    if opt_llretslot.is_none() {
+        assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none());
+    }
+
+    let mut llargs = Vec::new();
+
+    if fn_ty.ret.is_indirect() {
+        let mut llretslot = opt_llretslot.unwrap();
+        if let Some(ty) = fn_ty.ret.cast {
+            llretslot = PointerCast(bcx, llretslot, ty.ptr_to());
+        }
+        llargs.push(llretslot);
+    }
+
+    let arg_cleanup_scope = fcx.push_custom_cleanup_scope();
+    bcx = trans_args(bcx, abi, &fn_ty, &mut callee, args, &mut llargs,
+                     cleanup::CustomScope(arg_cleanup_scope));
+    fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
+
+    let llfn = match callee {
+        Fn(f) => f,
+        _ => bug!("expected fn pointer callee, found {:?}", callee)
+    };
+
+    let (llret, mut bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
+    if !bcx.unreachable.get() {
+        fn_ty.apply_attrs_callsite(llret);
+    }
+
+    // If the function we just called does not use an outpointer,
+    // store the result into the rust outpointer. Cast the outpointer
+    // type to match because some ABIs will use a different type than
+    // the Rust type. e.g., a {u32,u32} struct could be returned as
+    // u64.
+    if !fn_ty.ret.is_ignore() && !fn_ty.ret.is_indirect() {
+        if let Some(llforeign_ret_ty) = fn_ty.ret.cast {
+            let llrust_ret_ty = fn_ty.ret.original_ty;
+            let llretslot = opt_llretslot.unwrap();
+
+            // The actual return type is a struct, but the ABI
+            // adaptation code has cast it into some scalar type.  The
+            // code that follows is the only reliable way I have
+            // found to do a transform like i64 -> {i32,i32}.
+            // Basically we dump the data onto the stack then memcpy it.
+            //
+            // Other approaches I tried:
+            // - Casting rust ret pointer to the foreign type and using Store
+            //   is (a) unsafe if size of foreign type > size of rust type and
+            //   (b) runs afoul of strict aliasing rules, yielding invalid
+            //   assembly under -O (specifically, the store gets removed).
+            // - Truncating foreign type to correct integral type and then
+            //   bitcasting to the struct type yields invalid cast errors.
+            let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast");
+            base::call_lifetime_start(bcx, llscratch);
+            Store(bcx, llret, llscratch);
+            let llscratch_i8 = PointerCast(bcx, llscratch, Type::i8(ccx).ptr_to());
+            let llretptr_i8 = PointerCast(bcx, llretslot, Type::i8(ccx).ptr_to());
+            let llrust_size = llsize_of_store(ccx, llrust_ret_ty);
+            let llforeign_align = llalign_of_min(ccx, llforeign_ret_ty);
+            let llrust_align = llalign_of_min(ccx, llrust_ret_ty);
+            let llalign = cmp::min(llforeign_align, llrust_align);
+            debug!("llrust_size={}", llrust_size);
+
+            if !bcx.unreachable.get() {
+                base::call_memcpy(&B(bcx), llretptr_i8, llscratch_i8,
+                                  C_uint(ccx, llrust_size), llalign as u32);
+            }
+            base::call_lifetime_end(bcx, llscratch);
+        } else if let Some(llretslot) = opt_llretslot {
+            base::store_ty(bcx, llret, llretslot, output.unwrap());
+        }
+    }
+
+    fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope);
+
+    // If the caller doesn't care about the result of this fn call,
+    // drop the temporary slot we made.
+    match (dest, opt_llretslot, output) {
+        (Some(expr::Ignore), Some(llretslot), ty::FnConverging(ret_ty)) => {
+            // drop the value if it is not being saved.
+            bcx = glue::drop_ty(bcx, llretslot, ret_ty, debug_loc);
+            call_lifetime_end(bcx, llretslot);
+        }
+        _ => {}
+    }
+
+    if output == ty::FnDiverging {
+        Unreachable(bcx);
+    }
+
+    Result::new(bcx, llret)
+}
+
+pub enum CallArgs<'a, 'tcx> {
+    /// Supply value of arguments as a list of expressions that must be
+    /// translated. This is used in the common case of `foo(bar, qux)`.
+    ArgExprs(&'a [P<hir::Expr>]),
+
+    /// Supply value of arguments as a list of LLVM value refs; frequently
+    /// used with lang items and so forth, when the argument is an internal
+    /// value.
+    ArgVals(&'a [ValueRef]),
+
+    /// For overloaded operators: `(lhs, Option(rhs))`.
+    /// `lhs` is the left-hand-side and `rhs` is the datum
+    /// of the right-hand-side argument (if any).
+    ArgOverloadedOp(Datum<'tcx, Expr>, Option<Datum<'tcx, Expr>>),
+
+    /// Supply value of arguments as a list of expressions that must be
+    /// translated, for overloaded call operators.
+    ArgOverloadedCall(Vec<&'a hir::Expr>),
+}
+
+fn trans_args_under_call_abi<'blk, 'tcx>(
+                             mut bcx: Block<'blk, 'tcx>,
+                             arg_exprs: &[P<hir::Expr>],
+                             callee: &mut CalleeData,
+                             fn_ty: &FnType,
+                             llargs: &mut Vec<ValueRef>,
+                             arg_cleanup_scope: cleanup::ScopeId)
+                             -> Block<'blk, 'tcx>
+{
+    let mut arg_idx = 0;
+
+    // Translate the `self` argument first.
+    let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
+    bcx = trans_arg_datum(bcx,
+                          arg_datum,
+                          callee, fn_ty, &mut arg_idx,
+                          arg_cleanup_scope,
+                          llargs);
+
+    // Now untuple the rest of the arguments.
+    let tuple_expr = &arg_exprs[1];
+    let tuple_type = common::node_id_type(bcx, tuple_expr.id);
+
+    match tuple_type.sty {
+        ty::TyTuple(ref field_types) => {
+            let tuple_datum = unpack_datum!(bcx,
+                                            expr::trans(bcx, &tuple_expr));
+            let tuple_lvalue_datum =
+                unpack_datum!(bcx,
+                              tuple_datum.to_lvalue_datum(bcx,
+                                                          "args",
+                                                          tuple_expr.id));
+            let repr = adt::represent_type(bcx.ccx(), tuple_type);
+            let repr_ptr = &repr;
+            for (i, field_type) in field_types.iter().enumerate() {
+                let arg_datum = tuple_lvalue_datum.get_element(
+                    bcx,
+                    field_type,
+                    |srcval| {
+                        adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i)
+                    }).to_expr_datum();
+                bcx = trans_arg_datum(bcx,
+                                      arg_datum,
+                                      callee, fn_ty, &mut arg_idx,
+                                      arg_cleanup_scope,
+                                      llargs);
+            }
+        }
+        _ => {
+            span_bug!(tuple_expr.span,
+                      "argument to `.call()` wasn't a tuple?!")
+        }
+    };
+
+    bcx
+}
+
+pub fn trans_args<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                  abi: Abi,
+                                  fn_ty: &FnType,
+                                  callee: &mut CalleeData,
+                                  args: CallArgs<'a, 'tcx>,
+                                  llargs: &mut Vec<ValueRef>,
+                                  arg_cleanup_scope: cleanup::ScopeId)
+                                  -> Block<'blk, 'tcx> {
+    debug!("trans_args(abi={})", abi);
+
+    let _icx = push_ctxt("trans_args");
+
+    let mut bcx = bcx;
+    let mut arg_idx = 0;
+
+    // First we figure out the caller's view of the types of the arguments.
+    // This will be needed if this is a generic call, because the callee has
+    // to cast her view of the arguments to the caller's view.
+    match args {
+        ArgExprs(arg_exprs) => {
+            if abi == Abi::RustCall {
+                // This is only used for direct calls to the `call`,
+                // `call_mut` or `call_once` functions.
+                return trans_args_under_call_abi(bcx,
+                                                 arg_exprs, callee, fn_ty,
+                                                 llargs,
+                                                 arg_cleanup_scope)
+            }
+
+            for arg_expr in arg_exprs {
+                let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_expr));
+                bcx = trans_arg_datum(bcx,
+                                      arg_datum,
+                                      callee, fn_ty, &mut arg_idx,
+                                      arg_cleanup_scope,
+                                      llargs);
+            }
+        }
+        ArgOverloadedCall(arg_exprs) => {
+            for expr in arg_exprs {
+                let arg_datum =
+                    unpack_datum!(bcx, expr::trans(bcx, expr));
+                bcx = trans_arg_datum(bcx,
+                                      arg_datum,
+                                      callee, fn_ty, &mut arg_idx,
+                                      arg_cleanup_scope,
+                                      llargs);
+            }
+        }
+        ArgOverloadedOp(lhs, rhs) => {
+            bcx = trans_arg_datum(bcx, lhs,
+                                  callee, fn_ty, &mut arg_idx,
+                                  arg_cleanup_scope,
+                                  llargs);
+
+            if let Some(rhs) = rhs {
+                bcx = trans_arg_datum(bcx, rhs,
+                                      callee, fn_ty, &mut arg_idx,
+                                      arg_cleanup_scope,
+                                      llargs);
+            }
+        }
+        ArgVals(vs) => {
+            match *callee {
+                Virtual(idx) => {
+                    llargs.push(vs[0]);
+
+                    let fn_ptr = meth::get_virtual_method(bcx, vs[1], idx);
+                    let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
+                    *callee = Fn(PointerCast(bcx, fn_ptr, llty));
+                    llargs.extend_from_slice(&vs[2..]);
+                }
+                _ => llargs.extend_from_slice(vs)
+            }
+        }
+    }
+
+    bcx
+}
+
+fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               arg_datum: Datum<'tcx, Expr>,
+                               callee: &mut CalleeData,
+                               fn_ty: &FnType,
+                               next_idx: &mut usize,
+                               arg_cleanup_scope: cleanup::ScopeId,
+                               llargs: &mut Vec<ValueRef>)
+                               -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_arg_datum");
+    let mut bcx = bcx;
+
+    debug!("trans_arg_datum({:?})", arg_datum);
+
+    let arg = &fn_ty.args[*next_idx];
+    *next_idx += 1;
+
+    // Fill padding with undef value, where applicable.
+    if let Some(ty) = arg.pad {
+        llargs.push(C_undef(ty));
+    }
+
+    // Determine whether we want a by-ref datum even if not appropriate.
+    let want_by_ref = arg.is_indirect() || arg.cast.is_some();
+
+    let fat_ptr = common::type_is_fat_ptr(bcx.tcx(), arg_datum.ty);
+    let (by_ref, val) = if fat_ptr && !bcx.fcx.type_needs_drop(arg_datum.ty) {
+        (true, arg_datum.val)
+    } else {
+        // Make this an rvalue, since we are going to be
+        // passing ownership.
+        let arg_datum = unpack_datum!(
+            bcx, arg_datum.to_rvalue_datum(bcx, "arg"));
+
+        // Now that arg_datum is owned, get it into the appropriate
+        // mode (ref vs value).
+        let arg_datum = unpack_datum!(bcx, if want_by_ref {
+            arg_datum.to_ref_datum(bcx)
+        } else {
+            arg_datum.to_appropriate_datum(bcx)
+        });
+
+        // Technically, ownership of val passes to the callee.
+        // However, we must cleanup should we panic before the
+        // callee is actually invoked.
+        (arg_datum.kind.is_by_ref(),
+         arg_datum.add_clean(bcx.fcx, arg_cleanup_scope))
+    };
+
+    if arg.is_ignore() {
+        return bcx;
+    }
+
+    debug!("--- trans_arg_datum passing {:?}", Value(val));
+
+    if fat_ptr {
+        // Fat pointers should be passed without any transformations.
+        assert!(!arg.is_indirect() && arg.cast.is_none());
+        llargs.push(Load(bcx, expr::get_dataptr(bcx, val)));
+
+        let info_arg = &fn_ty.args[*next_idx];
+        *next_idx += 1;
+        assert!(!info_arg.is_indirect() && info_arg.cast.is_none());
+        let info = Load(bcx, expr::get_meta(bcx, val));
+
+        if let Virtual(idx) = *callee {
+            // We have to grab the fn pointer from the vtable when
+            // handling the first argument, ensure that here.
+            assert_eq!(*next_idx, 2);
+            assert!(info_arg.is_ignore());
+            let fn_ptr = meth::get_virtual_method(bcx, info, idx);
+            let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
+            *callee = Fn(PointerCast(bcx, fn_ptr, llty));
+        } else {
+            assert!(!info_arg.is_ignore());
+            llargs.push(info);
+        }
+        return bcx;
+    }
+
+    let mut val = val;
+    if by_ref && !arg.is_indirect() {
+        // Have to load the argument, maybe while casting it.
+        if arg.original_ty == Type::i1(bcx.ccx()) {
+            // We store bools as i8 so we need to truncate to i1.
+            val = LoadRangeAssert(bcx, val, 0, 2, llvm::False);
+            val = Trunc(bcx, val, arg.original_ty);
+        } else if let Some(ty) = arg.cast {
+            val = Load(bcx, PointerCast(bcx, val, ty.ptr_to()));
+            if !bcx.unreachable.get() {
+                let llalign = llalign_of_min(bcx.ccx(), arg.ty);
+                unsafe {
+                    llvm::LLVMSetAlignment(val, llalign);
+                }
+            }
+        } else {
+            val = Load(bcx, val);
+        }
+    }
+
+    llargs.push(val);
+    bcx
+}
diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs
new file mode 100644 (file)
index 0000000..514e6bd
--- /dev/null
@@ -0,0 +1,1286 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! ## The Cleanup module
+//!
+//! The cleanup module tracks what values need to be cleaned up as scopes
+//! are exited, either via panic or just normal control flow. The basic
+//! idea is that the function context maintains a stack of cleanup scopes
+//! that are pushed/popped as we traverse the AST tree. There is typically
+//! at least one cleanup scope per AST node; some AST nodes may introduce
+//! additional temporary scopes.
+//!
+//! Cleanup items can be scheduled into any of the scopes on the stack.
+//! Typically, when a scope is popped, we will also generate the code for
+//! each of its cleanups at that time. This corresponds to a normal exit
+//! from a block (for example, an expression completing evaluation
+//! successfully without panic). However, it is also possible to pop a
+//! block *without* executing its cleanups; this is typically used to
+//! guard intermediate values that must be cleaned up on panic, but not
+//! if everything goes right. See the section on custom scopes below for
+//! more details.
+//!
+//! Cleanup scopes come in three kinds:
+//!
+//! - **AST scopes:** each AST node in a function body has a corresponding
+//!   AST scope. We push the AST scope when we start generate code for an AST
+//!   node and pop it once the AST node has been fully generated.
+//! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are
+//!   never scheduled into loop scopes; instead, they are used to record the
+//!   basic blocks that we should branch to when a `continue` or `break` statement
+//!   is encountered.
+//! - **Custom scopes:** custom scopes are typically used to ensure cleanup
+//!   of intermediate values.
+//!
+//! ### When to schedule cleanup
+//!
+//! Although the cleanup system is intended to *feel* fairly declarative,
+//! it's still important to time calls to `schedule_clean()` correctly.
+//! Basically, you should not schedule cleanup for memory until it has
+//! been initialized, because if an unwind should occur before the memory
+//! is fully initialized, then the cleanup will run and try to free or
+//! drop uninitialized memory. If the initialization itself produces
+//! byproducts that need to be freed, then you should use temporary custom
+//! scopes to ensure that those byproducts will get freed on unwind.  For
+//! example, an expression like `box foo()` will first allocate a box in the
+//! heap and then call `foo()` -- if `foo()` should panic, this box needs
+//! to be *shallowly* freed.
+//!
+//! ### Long-distance jumps
+//!
+//! In addition to popping a scope, which corresponds to normal control
+//! flow exiting the scope, we may also *jump out* of a scope into some
+//! earlier scope on the stack. This can occur in response to a `return`,
+//! `break`, or `continue` statement, but also in response to panic. In
+//! any of these cases, we will generate a series of cleanup blocks for
+//! each of the scopes that is exited. So, if the stack contains scopes A
+//! ... Z, and we break out of a loop whose corresponding cleanup scope is
+//! X, we would generate cleanup blocks for the cleanups in X, Y, and Z.
+//! After cleanup is done we would branch to the exit point for scope X.
+//! But if panic should occur, we would generate cleanups for all the
+//! scopes from A to Z and then resume the unwind process afterwards.
+//!
+//! To avoid generating tons of code, we cache the cleanup blocks that we
+//! create for breaks, returns, unwinds, and other jumps. Whenever a new
+//! cleanup is scheduled, though, we must clear these cached blocks. A
+//! possible improvement would be to keep the cached blocks but simply
+//! generate a new block which performs the additional cleanup and then
+//! branches to the existing cached blocks.
+//!
+//! ### AST and loop cleanup scopes
+//!
+//! AST cleanup scopes are pushed when we begin and end processing an AST
+//! node. They are used to house cleanups related to rvalue temporary that
+//! get referenced (e.g., due to an expression like `&Foo()`). Whenever an
+//! AST scope is popped, we always trans all the cleanups, adding the cleanup
+//! code after the postdominator of the AST node.
+//!
+//! AST nodes that represent breakable loops also push a loop scope; the
+//! loop scope never has any actual cleanups, it's just used to point to
+//! the basic blocks where control should flow after a "continue" or
+//! "break" statement. Popping a loop scope never generates code.
+//!
+//! ### Custom cleanup scopes
+//!
+//! Custom cleanup scopes are used for a variety of purposes. The most
+//! common though is to handle temporary byproducts, where cleanup only
+//! needs to occur on panic. The general strategy is to push a custom
+//! cleanup scope, schedule *shallow* cleanups into the custom scope, and
+//! then pop the custom scope (without transing the cleanups) when
+//! execution succeeds normally. This way the cleanups are only trans'd on
+//! unwind, and only up until the point where execution succeeded, at
+//! which time the complete value should be stored in an lvalue or some
+//! other place where normal cleanup applies.
+//!
+//! To spell it out, here is an example. Imagine an expression `box expr`.
+//! We would basically:
+//!
+//! 1. Push a custom cleanup scope C.
+//! 2. Allocate the box.
+//! 3. Schedule a shallow free in the scope C.
+//! 4. Trans `expr` into the box.
+//! 5. Pop the scope C.
+//! 6. Return the box as an rvalue.
+//!
+//! This way, if a panic occurs while transing `expr`, the custom
+//! cleanup scope C is pushed and hence the box will be freed. The trans
+//! code for `expr` itself is responsible for freeing any other byproducts
+//! that may be in play.
+
+pub use self::ScopeId::*;
+pub use self::CleanupScopeKind::*;
+pub use self::EarlyExitLabel::*;
+pub use self::Heap::*;
+
+use llvm::{BasicBlockRef, ValueRef};
+use base;
+use build;
+use common;
+use common::{Block, FunctionContext, NodeIdAndSpan, LandingPad};
+use datum::{Datum, Lvalue};
+use debuginfo::{DebugLoc, ToDebugLoc};
+use glue;
+use middle::region;
+use type_::Type;
+use value::Value;
+use rustc::ty::{Ty, TyCtxt};
+
+use std::fmt;
+use syntax::ast;
+
+pub struct CleanupScope<'blk, 'tcx: 'blk> {
+    // The id of this cleanup scope. If the id is None,
+    // this is a *temporary scope* that is pushed during trans to
+    // cleanup miscellaneous garbage that trans may generate whose
+    // lifetime is a subset of some expression.  See module doc for
+    // more details.
+    kind: CleanupScopeKind<'blk, 'tcx>,
+
+    // Cleanups to run upon scope exit.
+    cleanups: Vec<CleanupObj<'tcx>>,
+
+    // The debug location any drop calls generated for this scope will be
+    // associated with.
+    debug_loc: DebugLoc,
+
+    cached_early_exits: Vec<CachedEarlyExit>,
+    cached_landing_pad: Option<BasicBlockRef>,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct CustomScopeIndex {
+    index: usize
+}
+
+pub const EXIT_BREAK: usize = 0;
+pub const EXIT_LOOP: usize = 1;
+pub const EXIT_MAX: usize = 2;
+
+pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
+    CustomScopeKind,
+    AstScopeKind(ast::NodeId),
+    LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
+}
+
+impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            CustomScopeKind => write!(f, "CustomScopeKind"),
+            AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
+            LoopScopeKind(nid, ref blks) => {
+                write!(f, "LoopScopeKind({}, [", nid)?;
+                for blk in blks {
+                    write!(f, "{:p}, ", blk)?;
+                }
+                write!(f, "])")
+            }
+        }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum EarlyExitLabel {
+    UnwindExit(UnwindKind),
+    ReturnExit,
+    LoopExit(ast::NodeId, usize)
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum UnwindKind {
+    LandingPad,
+    CleanupPad(ValueRef),
+}
+
+#[derive(Copy, Clone)]
+pub struct CachedEarlyExit {
+    label: EarlyExitLabel,
+    cleanup_block: BasicBlockRef,
+    last_cleanup: usize,
+}
+
+pub trait Cleanup<'tcx> {
+    fn must_unwind(&self) -> bool;
+    fn is_lifetime_end(&self) -> bool;
+    fn trans<'blk>(&self,
+                   bcx: Block<'blk, 'tcx>,
+                   debug_loc: DebugLoc)
+                   -> Block<'blk, 'tcx>;
+}
+
+pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
+
+#[derive(Copy, Clone, Debug)]
+pub enum ScopeId {
+    AstScope(ast::NodeId),
+    CustomScope(CustomScopeIndex)
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct DropHint<K>(pub ast::NodeId, pub K);
+
+pub type DropHintDatum<'tcx> = DropHint<Datum<'tcx, Lvalue>>;
+pub type DropHintValue = DropHint<ValueRef>;
+
+impl<K> DropHint<K> {
+    pub fn new(id: ast::NodeId, k: K) -> DropHint<K> { DropHint(id, k) }
+}
+
+impl DropHint<ValueRef> {
+    pub fn value(&self) -> ValueRef { self.1 }
+}
+
+pub trait DropHintMethods {
+    type ValueKind;
+    fn to_value(&self) -> Self::ValueKind;
+}
+impl<'tcx> DropHintMethods for DropHintDatum<'tcx> {
+    type ValueKind = DropHintValue;
+    fn to_value(&self) -> DropHintValue { DropHint(self.0, self.1.val) }
+}
+
+impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
+    /// Invoked when we start to trans the code contained within a new cleanup scope.
+    fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
+        debug!("push_ast_cleanup_scope({})",
+               self.ccx.tcx().map.node_to_string(debug_loc.id));
+
+        // FIXME(#2202) -- currently closure bodies have a parent
+        // region, which messes up the assertion below, since there
+        // are no cleanup scopes on the stack at the start of
+        // trans'ing a closure body.  I think though that this should
+        // eventually be fixed by closure bodies not having a parent
+        // region, though that's a touch unclear, and it might also be
+        // better just to narrow this assertion more (i.e., by
+        // excluding id's that correspond to closure bodies only). For
+        // now we just say that if there is already an AST scope on the stack,
+        // this new AST scope had better be its immediate child.
+        let top_scope = self.top_ast_scope();
+        let region_maps = &self.ccx.tcx().region_maps;
+        if top_scope.is_some() {
+            assert!((region_maps
+                     .opt_encl_scope(region_maps.node_extent(debug_loc.id))
+                     .map(|s|s.node_id(region_maps)) == top_scope)
+                    ||
+                    (region_maps
+                     .opt_encl_scope(region_maps.lookup_code_extent(
+                         region::CodeExtentData::DestructionScope(debug_loc.id)))
+                     .map(|s|s.node_id(region_maps)) == top_scope));
+        }
+
+        self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
+                                          debug_loc.debug_loc()));
+    }
+
+    fn push_loop_cleanup_scope(&self,
+                               id: ast::NodeId,
+                               exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
+        debug!("push_loop_cleanup_scope({})",
+               self.ccx.tcx().map.node_to_string(id));
+        assert_eq!(Some(id), self.top_ast_scope());
+
+        // Just copy the debuginfo source location from the enclosing scope
+        let debug_loc = self.scopes
+                            .borrow()
+                            .last()
+                            .unwrap()
+                            .debug_loc;
+
+        self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
+    }
+
+    fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
+        let index = self.scopes_len();
+        debug!("push_custom_cleanup_scope(): {}", index);
+
+        // Just copy the debuginfo source location from the enclosing scope
+        let debug_loc = self.scopes
+                            .borrow()
+                            .last()
+                            .map(|opt_scope| opt_scope.debug_loc)
+                            .unwrap_or(DebugLoc::None);
+
+        self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
+        CustomScopeIndex { index: index }
+    }
+
+    fn push_custom_cleanup_scope_with_debug_loc(&self,
+                                                debug_loc: NodeIdAndSpan)
+                                                -> CustomScopeIndex {
+        let index = self.scopes_len();
+        debug!("push_custom_cleanup_scope(): {}", index);
+
+        self.push_scope(CleanupScope::new(CustomScopeKind,
+                                          debug_loc.debug_loc()));
+        CustomScopeIndex { index: index }
+    }
+
+    /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
+    /// stack, and generates the code to do its cleanups for normal exit.
+    fn pop_and_trans_ast_cleanup_scope(&self,
+                                       bcx: Block<'blk, 'tcx>,
+                                       cleanup_scope: ast::NodeId)
+                                       -> Block<'blk, 'tcx> {
+        debug!("pop_and_trans_ast_cleanup_scope({})",
+               self.ccx.tcx().map.node_to_string(cleanup_scope));
+
+        assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
+
+        let scope = self.pop_scope();
+        self.trans_scope_cleanups(bcx, &scope)
+    }
+
+    /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
+    /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
+    /// branching to a block generated by `normal_exit_block`.
+    fn pop_loop_cleanup_scope(&self,
+                              cleanup_scope: ast::NodeId) {
+        debug!("pop_loop_cleanup_scope({})",
+               self.ccx.tcx().map.node_to_string(cleanup_scope));
+
+        assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
+
+        let _ = self.pop_scope();
+    }
+
+    /// Removes the top cleanup scope from the stack without executing its cleanups. The top
+    /// cleanup scope must be the temporary scope `custom_scope`.
+    fn pop_custom_cleanup_scope(&self,
+                                custom_scope: CustomScopeIndex) {
+        debug!("pop_custom_cleanup_scope({})", custom_scope.index);
+        assert!(self.is_valid_to_pop_custom_scope(custom_scope));
+        let _ = self.pop_scope();
+    }
+
+    /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
+    /// generates the code to do its cleanups for normal exit.
+    fn pop_and_trans_custom_cleanup_scope(&self,
+                                          bcx: Block<'blk, 'tcx>,
+                                          custom_scope: CustomScopeIndex)
+                                          -> Block<'blk, 'tcx> {
+        debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
+        assert!(self.is_valid_to_pop_custom_scope(custom_scope));
+
+        let scope = self.pop_scope();
+        self.trans_scope_cleanups(bcx, &scope)
+    }
+
+    /// Returns the id of the top-most loop scope
+    fn top_loop_scope(&self) -> ast::NodeId {
+        for scope in self.scopes.borrow().iter().rev() {
+            if let LoopScopeKind(id, _) = scope.kind {
+                return id;
+            }
+        }
+        bug!("no loop scope found");
+    }
+
+    /// Returns a block to branch to which will perform all pending cleanups and
+    /// then break/continue (depending on `exit`) out of the loop with id
+    /// `cleanup_scope`
+    fn normal_exit_block(&'blk self,
+                         cleanup_scope: ast::NodeId,
+                         exit: usize) -> BasicBlockRef {
+        self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
+    }
+
+    /// Returns a block to branch to which will perform all pending cleanups and
+    /// then return from this function
+    fn return_exit_block(&'blk self) -> BasicBlockRef {
+        self.trans_cleanups_to_exit_scope(ReturnExit)
+    }
+
+    fn schedule_lifetime_end(&self,
+                             cleanup_scope: ScopeId,
+                             val: ValueRef) {
+        let drop = box LifetimeEnd {
+            ptr: val,
+        };
+
+        debug!("schedule_lifetime_end({:?}, val={:?})",
+               cleanup_scope, Value(val));
+
+        self.schedule_clean(cleanup_scope, drop as CleanupObj);
+    }
+
+    /// Schedules a (deep) drop of `val`, which is a pointer to an instance of
+    /// `ty`
+    fn schedule_drop_mem(&self,
+                         cleanup_scope: ScopeId,
+                         val: ValueRef,
+                         ty: Ty<'tcx>,
+                         drop_hint: Option<DropHintDatum<'tcx>>) {
+        if !self.type_needs_drop(ty) { return; }
+        let drop_hint = drop_hint.map(|hint|hint.to_value());
+        let drop = box DropValue {
+            is_immediate: false,
+            val: val,
+            ty: ty,
+            fill_on_drop: false,
+            skip_dtor: false,
+            drop_hint: drop_hint,
+        };
+
+        debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
+               cleanup_scope,
+               Value(val),
+               ty,
+               drop.fill_on_drop,
+               drop.skip_dtor);
+
+        self.schedule_clean(cleanup_scope, drop as CleanupObj);
+    }
+
+    /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty`
+    fn schedule_drop_and_fill_mem(&self,
+                                  cleanup_scope: ScopeId,
+                                  val: ValueRef,
+                                  ty: Ty<'tcx>,
+                                  drop_hint: Option<DropHintDatum<'tcx>>) {
+        if !self.type_needs_drop(ty) { return; }
+
+        let drop_hint = drop_hint.map(|datum|datum.to_value());
+        let drop = box DropValue {
+            is_immediate: false,
+            val: val,
+            ty: ty,
+            fill_on_drop: true,
+            skip_dtor: false,
+            drop_hint: drop_hint,
+        };
+
+        debug!("schedule_drop_and_fill_mem({:?}, val={:?}, ty={:?},
+                fill_on_drop={}, skip_dtor={}, has_drop_hint={})",
+               cleanup_scope,
+               Value(val),
+               ty,
+               drop.fill_on_drop,
+               drop.skip_dtor,
+               drop_hint.is_some());
+
+        self.schedule_clean(cleanup_scope, drop as CleanupObj);
+    }
+
+    /// Issue #23611: Schedules a (deep) drop of the contents of
+    /// `val`, which is a pointer to an instance of struct/enum type
+    /// `ty`. The scheduled code handles extracting the discriminant
+    /// and dropping the contents associated with that variant
+    /// *without* executing any associated drop implementation.
+    fn schedule_drop_adt_contents(&self,
+                                  cleanup_scope: ScopeId,
+                                  val: ValueRef,
+                                  ty: Ty<'tcx>) {
+        // `if` below could be "!contents_needs_drop"; skipping drop
+        // is just an optimization, so sound to be conservative.
+        if !self.type_needs_drop(ty) { return; }
+
+        let drop = box DropValue {
+            is_immediate: false,
+            val: val,
+            ty: ty,
+            fill_on_drop: false,
+            skip_dtor: true,
+            drop_hint: None,
+        };
+
+        debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
+               cleanup_scope,
+               Value(val),
+               ty,
+               drop.fill_on_drop,
+               drop.skip_dtor);
+
+        self.schedule_clean(cleanup_scope, drop as CleanupObj);
+    }
+
+    /// Schedules a (deep) drop of `val`, which is an instance of `ty`
+    fn schedule_drop_immediate(&self,
+                               cleanup_scope: ScopeId,
+                               val: ValueRef,
+                               ty: Ty<'tcx>) {
+
+        if !self.type_needs_drop(ty) { return; }
+        let drop = Box::new(DropValue {
+            is_immediate: true,
+            val: val,
+            ty: ty,
+            fill_on_drop: false,
+            skip_dtor: false,
+            drop_hint: None,
+        });
+
+        debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
+               cleanup_scope,
+               Value(val),
+               ty,
+               drop.fill_on_drop,
+               drop.skip_dtor);
+
+        self.schedule_clean(cleanup_scope, drop as CleanupObj);
+    }
+
+    /// Schedules a call to `free(val)`. Note that this is a shallow operation.
+    fn schedule_free_value(&self,
+                           cleanup_scope: ScopeId,
+                           val: ValueRef,
+                           heap: Heap,
+                           content_ty: Ty<'tcx>) {
+        let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
+
+        debug!("schedule_free_value({:?}, val={:?}, heap={:?})",
+               cleanup_scope, Value(val), heap);
+
+        self.schedule_clean(cleanup_scope, drop as CleanupObj);
+    }
+
+    fn schedule_clean(&self,
+                      cleanup_scope: ScopeId,
+                      cleanup: CleanupObj<'tcx>) {
+        match cleanup_scope {
+            AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
+            CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
+        }
+    }
+
+    /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
+    /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
+    /// scope.
+    fn schedule_clean_in_ast_scope(&self,
+                                   cleanup_scope: ast::NodeId,
+                                   cleanup: CleanupObj<'tcx>) {
+        debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
+               cleanup_scope);
+
+        for scope in self.scopes.borrow_mut().iter_mut().rev() {
+            if scope.kind.is_ast_with_id(cleanup_scope) {
+                scope.cleanups.push(cleanup);
+                scope.cached_landing_pad = None;
+                return;
+            } else {
+                // will be adding a cleanup to some enclosing scope
+                scope.clear_cached_exits();
+            }
+        }
+
+        bug!("no cleanup scope {} found",
+             self.ccx.tcx().map.node_to_string(cleanup_scope));
+    }
+
+    /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
+    fn schedule_clean_in_custom_scope(&self,
+                                      custom_scope: CustomScopeIndex,
+                                      cleanup: CleanupObj<'tcx>) {
+        debug!("schedule_clean_in_custom_scope(custom_scope={})",
+               custom_scope.index);
+
+        assert!(self.is_valid_custom_scope(custom_scope));
+
+        let mut scopes = self.scopes.borrow_mut();
+        let scope = &mut (*scopes)[custom_scope.index];
+        scope.cleanups.push(cleanup);
+        scope.cached_landing_pad = None;
+    }
+
+    /// Returns true if there are pending cleanups that should execute on panic.
+    fn needs_invoke(&self) -> bool {
+        self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
+    }
+
+    /// Returns a basic block to branch to in the event of a panic. This block
+    /// will run the panic cleanups and eventually resume the exception that
+    /// caused the landing pad to be run.
+    fn get_landing_pad(&'blk self) -> BasicBlockRef {
+        let _icx = base::push_ctxt("get_landing_pad");
+
+        debug!("get_landing_pad");
+
+        let orig_scopes_len = self.scopes_len();
+        assert!(orig_scopes_len > 0);
+
+        // Remove any scopes that do not have cleanups on panic:
+        let mut popped_scopes = vec!();
+        while !self.top_scope(|s| s.needs_invoke()) {
+            debug!("top scope does not need invoke");
+            popped_scopes.push(self.pop_scope());
+        }
+
+        // Check for an existing landing pad in the new topmost scope:
+        let llbb = self.get_or_create_landing_pad();
+
+        // Push the scopes we removed back on:
+        loop {
+            match popped_scopes.pop() {
+                Some(scope) => self.push_scope(scope),
+                None => break
+            }
+        }
+
+        assert_eq!(self.scopes_len(), orig_scopes_len);
+
+        return llbb;
+    }
+}
+
+impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
+    /// Returns the id of the current top-most AST scope, if any.
+    fn top_ast_scope(&self) -> Option<ast::NodeId> {
+        for scope in self.scopes.borrow().iter().rev() {
+            match scope.kind {
+                CustomScopeKind | LoopScopeKind(..) => {}
+                AstScopeKind(i) => {
+                    return Some(i);
+                }
+            }
+        }
+        None
+    }
+
+    fn top_nonempty_cleanup_scope(&self) -> Option<usize> {
+        self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
+    }
+
+    fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
+        self.is_valid_custom_scope(custom_scope) &&
+            custom_scope.index == self.scopes.borrow().len() - 1
+    }
+
+    fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
+        let scopes = self.scopes.borrow();
+        custom_scope.index < scopes.len() &&
+            (*scopes)[custom_scope.index].kind.is_temp()
+    }
+
+    /// Generates the cleanups for `scope` into `bcx`
+    fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
+                            bcx: Block<'blk, 'tcx>,
+                            scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
+
+        let mut bcx = bcx;
+        if !bcx.unreachable.get() {
+            for cleanup in scope.cleanups.iter().rev() {
+                bcx = cleanup.trans(bcx, scope.debug_loc);
+            }
+        }
+        bcx
+    }
+
+    fn scopes_len(&self) -> usize {
+        self.scopes.borrow().len()
+    }
+
+    fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
+        self.scopes.borrow_mut().push(scope)
+    }
+
+    fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
+        debug!("popping cleanup scope {}, {} scopes remaining",
+               self.top_scope(|s| s.block_name("")),
+               self.scopes_len() - 1);
+
+        self.scopes.borrow_mut().pop().unwrap()
+    }
+
+    fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
+        f(self.scopes.borrow().last().unwrap())
+    }
+
+    /// Used when the caller wishes to jump to an early exit, such as a return,
+    /// break, continue, or unwind. This function will generate all cleanups
+    /// between the top of the stack and the exit `label` and return a basic
+    /// block that the caller can branch to.
+    ///
+    /// For example, if the current stack of cleanups were as follows:
+    ///
+    ///      AST 22
+    ///      Custom 1
+    ///      AST 23
+    ///      Loop 23
+    ///      Custom 2
+    ///      AST 24
+    ///
+    /// and the `label` specifies a break from `Loop 23`, then this function
+    /// would generate a series of basic blocks as follows:
+    ///
+    ///      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
+    ///
+    /// where `break_blk` is the block specified in `Loop 23` as the target for
+    /// breaks. The return value would be the first basic block in that sequence
+    /// (`Cleanup(AST 24)`). The caller could then branch to `Cleanup(AST 24)`
+    /// and it will perform all cleanups and finally branch to the `break_blk`.
+    fn trans_cleanups_to_exit_scope(&'blk self,
+                                    label: EarlyExitLabel)
+                                    -> BasicBlockRef {
+        debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
+               label, self.scopes_len());
+
+        let orig_scopes_len = self.scopes_len();
+        let mut prev_llbb;
+        let mut popped_scopes = vec!();
+        let mut skip = 0;
+
+        // First we pop off all the cleanup stacks that are
+        // traversed until the exit is reached, pushing them
+        // onto the side vector `popped_scopes`. No code is
+        // generated at this time.
+        //
+        // So, continuing the example from above, we would wind up
+        // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
+        // (Presuming that there are no cached exits)
+        loop {
+            if self.scopes_len() == 0 {
+                match label {
+                    UnwindExit(val) => {
+                        // Generate a block that will resume unwinding to the
+                        // calling function
+                        let bcx = self.new_block("resume", None);
+                        match val {
+                            UnwindKind::LandingPad => {
+                                let addr = self.landingpad_alloca.get()
+                                               .unwrap();
+                                let lp = build::Load(bcx, addr);
+                                base::call_lifetime_end(bcx, addr);
+                                base::trans_unwind_resume(bcx, lp);
+                            }
+                            UnwindKind::CleanupPad(_) => {
+                                let pad = build::CleanupPad(bcx, None, &[]);
+                                build::CleanupRet(bcx, pad, None);
+                            }
+                        }
+                        prev_llbb = bcx.llbb;
+                        break;
+                    }
+
+                    ReturnExit => {
+                        prev_llbb = self.get_llreturn();
+                        break
+                    }
+
+                    LoopExit(id, _) => {
+                        bug!("cannot exit from scope {}, not in scope", id);
+                    }
+                }
+            }
+
+            // Pop off the scope, since we may be generating
+            // unwinding code for it.
+            let top_scope = self.pop_scope();
+            let cached_exit = top_scope.cached_early_exit(label);
+            popped_scopes.push(top_scope);
+
+            // Check if we have already cached the unwinding of this
+            // scope for this label. If so, we can stop popping scopes
+            // and branch to the cached label, since it contains the
+            // cleanups for any subsequent scopes.
+            if let Some((exit, last_cleanup)) = cached_exit {
+                prev_llbb = exit;
+                skip = last_cleanup;
+                break;
+            }
+
+            // If we are searching for a loop exit,
+            // and this scope is that loop, then stop popping and set
+            // `prev_llbb` to the appropriate exit block from the loop.
+            let scope = popped_scopes.last().unwrap();
+            match label {
+                UnwindExit(..) | ReturnExit => { }
+                LoopExit(id, exit) => {
+                    if let Some(exit) = scope.kind.early_exit_block(id, exit) {
+                        prev_llbb = exit;
+                        break
+                    }
+                }
+            }
+        }
+
+        debug!("trans_cleanups_to_exit_scope: popped {} scopes",
+               popped_scopes.len());
+
+        // Now push the popped scopes back on. As we go,
+        // we track in `prev_llbb` the exit to which this scope
+        // should branch when it's done.
+        //
+        // So, continuing with our example, we will start out with
+        // `prev_llbb` being set to `break_blk` (or possibly a cached
+        // early exit). We will then pop the scopes from `popped_scopes`
+        // and generate a basic block for each one, prepending it in the
+        // series and updating `prev_llbb`. So we begin by popping `Custom 2`
+        // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
+        // branch to `prev_llbb == break_blk`, giving us a sequence like:
+        //
+        //     Cleanup(Custom 2) -> prev_llbb
+        //
+        // We then pop `AST 24` and repeat the process, giving us the sequence:
+        //
+        //     Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
+        //
+        // At this point, `popped_scopes` is empty, and so the final block
+        // that we return to the user is `Cleanup(AST 24)`.
+        while let Some(mut scope) = popped_scopes.pop() {
+            if !scope.cleanups.is_empty() {
+                let name = scope.block_name("clean");
+                debug!("generating cleanups for {}", name);
+
+                let bcx_in = self.new_block(&name[..], None);
+                let exit_label = label.start(bcx_in);
+                let mut bcx_out = bcx_in;
+                let len = scope.cleanups.len();
+                for cleanup in scope.cleanups.iter().rev().take(len - skip) {
+                    bcx_out = cleanup.trans(bcx_out, scope.debug_loc);
+                }
+                skip = 0;
+                exit_label.branch(bcx_out, prev_llbb);
+                prev_llbb = bcx_in.llbb;
+
+                scope.add_cached_early_exit(exit_label, prev_llbb, len);
+            }
+            self.push_scope(scope);
+        }
+
+        debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb);
+
+        assert_eq!(self.scopes_len(), orig_scopes_len);
+        prev_llbb
+    }
+
+    /// Creates a landing pad for the top scope, if one does not exist.  The
+    /// landing pad will perform all cleanups necessary for an unwind and then
+    /// `resume` to continue error propagation:
+    ///
+    ///     landing_pad -> ... cleanups ... -> [resume]
+    ///
+    /// (The cleanups and resume instruction are created by
+    /// `trans_cleanups_to_exit_scope()`, not in this function itself.)
+    fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
+        let pad_bcx;
+
+        debug!("get_or_create_landing_pad");
+
+        // Check if a landing pad block exists; if not, create one.
+        {
+            let mut scopes = self.scopes.borrow_mut();
+            let last_scope = scopes.last_mut().unwrap();
+            match last_scope.cached_landing_pad {
+                Some(llbb) => return llbb,
+                None => {
+                    let name = last_scope.block_name("unwind");
+                    pad_bcx = self.new_block(&name[..], None);
+                    last_scope.cached_landing_pad = Some(pad_bcx.llbb);
+                }
+            }
+        };
+
+        let llpersonality = pad_bcx.fcx.eh_personality();
+
+        let val = if base::wants_msvc_seh(self.ccx.sess()) {
+            // A cleanup pad requires a personality function to be specified, so
+            // we do that here explicitly (happens implicitly below through
+            // creation of the landingpad instruction). We then create a
+            // cleanuppad instruction which has no filters to run cleanup on all
+            // exceptions.
+            build::SetPersonalityFn(pad_bcx, llpersonality);
+            let llretval = build::CleanupPad(pad_bcx, None, &[]);
+            UnwindKind::CleanupPad(llretval)
+        } else {
+            // The landing pad return type (the type being propagated). Not sure
+            // what this represents but it's determined by the personality
+            // function and this is what the EH proposal example uses.
+            let llretty = Type::struct_(self.ccx,
+                                        &[Type::i8p(self.ccx), Type::i32(self.ccx)],
+                                        false);
+
+            // The only landing pad clause will be 'cleanup'
+            let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1);
+
+            // The landing pad block is a cleanup
+            build::SetCleanup(pad_bcx, llretval);
+
+            let addr = match self.landingpad_alloca.get() {
+                Some(addr) => addr,
+                None => {
+                    let addr = base::alloca(pad_bcx, common::val_ty(llretval),
+                                            "");
+                    base::call_lifetime_start(pad_bcx, addr);
+                    self.landingpad_alloca.set(Some(addr));
+                    addr
+                }
+            };
+            build::Store(pad_bcx, llretval, addr);
+            UnwindKind::LandingPad
+        };
+
+        // Generate the cleanup block and branch to it.
+        let label = UnwindExit(val);
+        let cleanup_llbb = self.trans_cleanups_to_exit_scope(label);
+        label.branch(pad_bcx, cleanup_llbb);
+
+        return pad_bcx.llbb;
+    }
+}
+
+impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
+    fn new(kind: CleanupScopeKind<'blk, 'tcx>,
+           debug_loc: DebugLoc)
+        -> CleanupScope<'blk, 'tcx> {
+        CleanupScope {
+            kind: kind,
+            debug_loc: debug_loc,
+            cleanups: vec!(),
+            cached_early_exits: vec!(),
+            cached_landing_pad: None,
+        }
+    }
+
+    fn clear_cached_exits(&mut self) {
+        self.cached_early_exits = vec!();
+        self.cached_landing_pad = None;
+    }
+
+    fn cached_early_exit(&self,
+                         label: EarlyExitLabel)
+                         -> Option<(BasicBlockRef, usize)> {
+        self.cached_early_exits.iter().rev().
+            find(|e| e.label == label).
+            map(|e| (e.cleanup_block, e.last_cleanup))
+    }
+
+    fn add_cached_early_exit(&mut self,
+                             label: EarlyExitLabel,
+                             blk: BasicBlockRef,
+                             last_cleanup: usize) {
+        self.cached_early_exits.push(
+            CachedEarlyExit { label: label,
+                              cleanup_block: blk,
+                              last_cleanup: last_cleanup});
+    }
+
+    /// True if this scope has cleanups that need unwinding
+    fn needs_invoke(&self) -> bool {
+
+        self.cached_landing_pad.is_some() ||
+            self.cleanups.iter().any(|c| c.must_unwind())
+    }
+
+    /// Returns a suitable name to use for the basic block that handles this cleanup scope
+    fn block_name(&self, prefix: &str) -> String {
+        match self.kind {
+            CustomScopeKind => format!("{}_custom_", prefix),
+            AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
+            LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
+        }
+    }
+
+    /// Manipulate cleanup scope for call arguments. Conceptually, each
+    /// argument to a call is an lvalue, and performing the call moves each
+    /// of the arguments into a new rvalue (which gets cleaned up by the
+    /// callee). As an optimization, instead of actually performing all of
+    /// those moves, trans just manipulates the cleanup scope to obtain the
+    /// same effect.
+    pub fn drop_non_lifetime_clean(&mut self) {
+        self.cleanups.retain(|c| c.is_lifetime_end());
+        self.clear_cached_exits();
+    }
+}
+
+impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
+    fn is_temp(&self) -> bool {
+        match *self {
+            CustomScopeKind => true,
+            LoopScopeKind(..) | AstScopeKind(..) => false,
+        }
+    }
+
+    fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
+        match *self {
+            CustomScopeKind | LoopScopeKind(..) => false,
+            AstScopeKind(i) => i == id
+        }
+    }
+
+    fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
+        match *self {
+            CustomScopeKind | AstScopeKind(..) => false,
+            LoopScopeKind(i, _) => i == id
+        }
+    }
+
+    /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
+    fn early_exit_block(&self,
+                        id: ast::NodeId,
+                        exit: usize) -> Option<BasicBlockRef> {
+        match *self {
+            LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
+            _ => None,
+        }
+    }
+}
+
+impl EarlyExitLabel {
+    /// Generates a branch going from `from_bcx` to `to_llbb` where `self` is
+    /// the exit label attached to the start of `from_bcx`.
+    ///
+    /// Transitions from an exit label to other exit labels depend on the type
+    /// of label. For example with MSVC exceptions unwind exit labels will use
+    /// the `cleanupret` instruction instead of the `br` instruction.
+    fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) {
+        if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self {
+            build::CleanupRet(from_bcx, pad, Some(to_llbb));
+        } else {
+            build::Br(from_bcx, to_llbb, DebugLoc::None);
+        }
+    }
+
+    /// Generates the necessary instructions at the start of `bcx` to prepare
+    /// for the same kind of early exit label that `self` is.
+    ///
+    /// This function will appropriately configure `bcx` based on the kind of
+    /// label this is. For UnwindExit labels, the `lpad` field of the block will
+    /// be set to `Some`, and for MSVC exceptions this function will generate a
+    /// `cleanuppad` instruction at the start of the block so it may be jumped
+    /// to in the future (e.g. so this block can be cached as an early exit).
+    ///
+    /// Returns a new label which will can be used to cache `bcx` in the list of
+    /// early exits.
+    fn start(&self, bcx: Block) -> EarlyExitLabel {
+        match *self {
+            UnwindExit(UnwindKind::CleanupPad(..)) => {
+                let pad = build::CleanupPad(bcx, None, &[]);
+                bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::msvc(pad))));
+                UnwindExit(UnwindKind::CleanupPad(pad))
+            }
+            UnwindExit(UnwindKind::LandingPad) => {
+                bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu())));
+                *self
+            }
+            label => label,
+        }
+    }
+}
+
+impl PartialEq for UnwindKind {
+    fn eq(&self, val: &UnwindKind) -> bool {
+        match (*self, *val) {
+            (UnwindKind::LandingPad, UnwindKind::LandingPad) |
+            (UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true,
+            _ => false,
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Cleanup types
+
+#[derive(Copy, Clone)]
+pub struct DropValue<'tcx> {
+    is_immediate: bool,
+    val: ValueRef,
+    ty: Ty<'tcx>,
+    fill_on_drop: bool,
+    skip_dtor: bool,
+    drop_hint: Option<DropHintValue>,
+}
+
+impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
+    fn must_unwind(&self) -> bool {
+        true
+    }
+
+    fn is_lifetime_end(&self) -> bool {
+        false
+    }
+
+    fn trans<'blk>(&self,
+                   bcx: Block<'blk, 'tcx>,
+                   debug_loc: DebugLoc)
+                   -> Block<'blk, 'tcx> {
+        let skip_dtor = self.skip_dtor;
+        let _icx = if skip_dtor {
+            base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true")
+        } else {
+            base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false")
+        };
+        let bcx = if self.is_immediate {
+            glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
+        } else {
+            glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor, self.drop_hint)
+        };
+        if self.fill_on_drop {
+            base::drop_done_fill_mem(bcx, self.val, self.ty);
+        }
+        bcx
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum Heap {
+    HeapExchange
+}
+
+#[derive(Copy, Clone)]
+pub struct FreeValue<'tcx> {
+    ptr: ValueRef,
+    heap: Heap,
+    content_ty: Ty<'tcx>
+}
+
+impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
+    fn must_unwind(&self) -> bool {
+        true
+    }
+
+    fn is_lifetime_end(&self) -> bool {
+        false
+    }
+
+    fn trans<'blk>(&self,
+                   bcx: Block<'blk, 'tcx>,
+                   debug_loc: DebugLoc)
+                   -> Block<'blk, 'tcx> {
+        match self.heap {
+            HeapExchange => {
+                glue::trans_exchange_free_ty(bcx,
+                                             self.ptr,
+                                             self.content_ty,
+                                             debug_loc)
+            }
+        }
+    }
+}
+
+#[derive(Copy, Clone)]
+pub struct LifetimeEnd {
+    ptr: ValueRef,
+}
+
+impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
+    fn must_unwind(&self) -> bool {
+        false
+    }
+
+    fn is_lifetime_end(&self) -> bool {
+        true
+    }
+
+    fn trans<'blk>(&self,
+                   bcx: Block<'blk, 'tcx>,
+                   debug_loc: DebugLoc)
+                   -> Block<'blk, 'tcx> {
+        debug_loc.apply(bcx.fcx);
+        base::call_lifetime_end(bcx, self.ptr);
+        bcx
+    }
+}
+
+pub fn temporary_scope(tcx: &TyCtxt,
+                       id: ast::NodeId)
+                       -> ScopeId {
+    match tcx.region_maps.temporary_scope(id) {
+        Some(scope) => {
+            let r = AstScope(scope.node_id(&tcx.region_maps));
+            debug!("temporary_scope({}) = {:?}", id, r);
+            r
+        }
+        None => {
+            bug!("no temporary scope available for expr {}", id)
+        }
+    }
+}
+
+pub fn var_scope(tcx: &TyCtxt,
+                 id: ast::NodeId)
+                 -> ScopeId {
+    let r = AstScope(tcx.region_maps.var_scope(id).node_id(&tcx.region_maps));
+    debug!("var_scope({}) = {:?}", id, r);
+    r
+}
+
+///////////////////////////////////////////////////////////////////////////
+// These traits just exist to put the methods into this file.
+
+pub trait CleanupMethods<'blk, 'tcx> {
+    fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan);
+    fn push_loop_cleanup_scope(&self,
+                               id: ast::NodeId,
+                               exits: [Block<'blk, 'tcx>; EXIT_MAX]);
+    fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
+    fn push_custom_cleanup_scope_with_debug_loc(&self,
+                                                debug_loc: NodeIdAndSpan)
+                                                -> CustomScopeIndex;
+    fn pop_and_trans_ast_cleanup_scope(&self,
+                                       bcx: Block<'blk, 'tcx>,
+                                       cleanup_scope: ast::NodeId)
+                                       -> Block<'blk, 'tcx>;
+    fn pop_loop_cleanup_scope(&self,
+                              cleanup_scope: ast::NodeId);
+    fn pop_custom_cleanup_scope(&self,
+                                custom_scope: CustomScopeIndex);
+    fn pop_and_trans_custom_cleanup_scope(&self,
+                                          bcx: Block<'blk, 'tcx>,
+                                          custom_scope: CustomScopeIndex)
+                                          -> Block<'blk, 'tcx>;
+    fn top_loop_scope(&self) -> ast::NodeId;
+    fn normal_exit_block(&'blk self,
+                         cleanup_scope: ast::NodeId,
+                         exit: usize) -> BasicBlockRef;
+    fn return_exit_block(&'blk self) -> BasicBlockRef;
+    fn schedule_lifetime_end(&self,
+                         cleanup_scope: ScopeId,
+                         val: ValueRef);
+    fn schedule_drop_mem(&self,
+                         cleanup_scope: ScopeId,
+                         val: ValueRef,
+                         ty: Ty<'tcx>,
+                         drop_hint: Option<DropHintDatum<'tcx>>);
+    fn schedule_drop_and_fill_mem(&self,
+                                  cleanup_scope: ScopeId,
+                                  val: ValueRef,
+                                  ty: Ty<'tcx>,
+                                  drop_hint: Option<DropHintDatum<'tcx>>);
+    fn schedule_drop_adt_contents(&self,
+                                  cleanup_scope: ScopeId,
+                                  val: ValueRef,
+                                  ty: Ty<'tcx>);
+    fn schedule_drop_immediate(&self,
+                               cleanup_scope: ScopeId,
+                               val: ValueRef,
+                               ty: Ty<'tcx>);
+    fn schedule_free_value(&self,
+                           cleanup_scope: ScopeId,
+                           val: ValueRef,
+                           heap: Heap,
+                           content_ty: Ty<'tcx>);
+    fn schedule_clean(&self,
+                      cleanup_scope: ScopeId,
+                      cleanup: CleanupObj<'tcx>);
+    fn schedule_clean_in_ast_scope(&self,
+                                   cleanup_scope: ast::NodeId,
+                                   cleanup: CleanupObj<'tcx>);
+    fn schedule_clean_in_custom_scope(&self,
+                                    custom_scope: CustomScopeIndex,
+                                    cleanup: CleanupObj<'tcx>);
+    fn needs_invoke(&self) -> bool;
+    fn get_landing_pad(&'blk self) -> BasicBlockRef;
+}
+
+trait CleanupHelperMethods<'blk, 'tcx> {
+    fn top_ast_scope(&self) -> Option<ast::NodeId>;
+    fn top_nonempty_cleanup_scope(&self) -> Option<usize>;
+    fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
+    fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
+    fn trans_scope_cleanups(&self,
+                            bcx: Block<'blk, 'tcx>,
+                            scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
+    fn trans_cleanups_to_exit_scope(&'blk self,
+                                    label: EarlyExitLabel)
+                                    -> BasicBlockRef;
+    fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
+    fn scopes_len(&self) -> usize;
+    fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
+    fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
+    fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
+}
diff --git a/src/librustc_trans/closure.rs b/src/librustc_trans/closure.rs
new file mode 100644 (file)
index 0000000..6bd5fd3
--- /dev/null
@@ -0,0 +1,444 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use arena::TypedArena;
+use back::symbol_names;
+use llvm::{ValueRef, get_param, get_params};
+use rustc::hir::def_id::DefId;
+use rustc::infer;
+use rustc::traits::ProjectionMode;
+use abi::{Abi, FnType};
+use adt;
+use attributes;
+use base::*;
+use build::*;
+use callee::{self, ArgVals, Callee};
+use cleanup::{CleanupMethods, CustomScope, ScopeId};
+use common::*;
+use datum::{ByRef, Datum, lvalue_scratch_datum};
+use datum::{rvalue_scratch_datum, Rvalue};
+use debuginfo::{self, DebugLoc};
+use declare;
+use expr;
+use monomorphize::{Instance};
+use value::Value;
+use Disr;
+use rustc::ty::{self, Ty, TyCtxt};
+use session::config::FullDebugInfo;
+
+use syntax::ast;
+
+use rustc::hir;
+
+use libc::c_uint;
+
+fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                        closure_def_id: DefId,
+                                        arg_scope_id: ScopeId,
+                                        id: ast::NodeId) {
+    let _icx = push_ctxt("closure::load_closure_environment");
+    let kind = kind_for_closure(bcx.ccx(), closure_def_id);
+
+    let env_arg = &bcx.fcx.fn_ty.args[0];
+    let mut env_idx = bcx.fcx.fn_ty.ret.is_indirect() as usize;
+
+    // Special case for small by-value selfs.
+    let llenv = if kind == ty::ClosureKind::FnOnce && !env_arg.is_indirect() {
+        let closure_ty = node_id_type(bcx, id);
+        let llenv = rvalue_scratch_datum(bcx, closure_ty, "closure_env").val;
+        env_arg.store_fn_arg(&bcx.build(), &mut env_idx, llenv);
+        llenv
+    } else {
+        get_param(bcx.fcx.llfn, env_idx as c_uint)
+    };
+
+    // Store the pointer to closure data in an alloca for debug info because that's what the
+    // llvm.dbg.declare intrinsic expects
+    let env_pointer_alloca = if bcx.sess().opts.debuginfo == FullDebugInfo {
+        let alloc = alloca(bcx, val_ty(llenv), "__debuginfo_env_ptr");
+        Store(bcx, llenv, alloc);
+        Some(alloc)
+    } else {
+        None
+    };
+
+    bcx.tcx().with_freevars(id, |fv| {
+        for (i, freevar) in fv.iter().enumerate() {
+            let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
+                                        closure_expr_id: id };
+            let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap();
+            let mut upvar_ptr = StructGEP(bcx, llenv, i);
+            let captured_by_ref = match upvar_capture {
+                ty::UpvarCapture::ByValue => false,
+                ty::UpvarCapture::ByRef(..) => {
+                    upvar_ptr = Load(bcx, upvar_ptr);
+                    true
+                }
+            };
+            let node_id = freevar.def.var_id();
+            bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr);
+
+            if kind == ty::ClosureKind::FnOnce && !captured_by_ref {
+                let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id);
+                bcx.fcx.schedule_drop_mem(arg_scope_id,
+                                        upvar_ptr,
+                                        node_id_type(bcx, node_id),
+                                        hint)
+            }
+
+            if let Some(env_pointer_alloca) = env_pointer_alloca {
+                debuginfo::create_captured_var_metadata(
+                    bcx,
+                    node_id,
+                    env_pointer_alloca,
+                    i,
+                    captured_by_ref,
+                    freevar.span);
+            }
+        }
+    })
+}
+
+pub enum ClosureEnv {
+    NotClosure,
+    Closure(DefId, ast::NodeId),
+}
+
+impl ClosureEnv {
+    pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) {
+        if let ClosureEnv::Closure(def_id, id) = self {
+            load_closure_environment(bcx, def_id, arg_scope, id);
+        }
+    }
+}
+
+fn get_self_type<'tcx>(tcx: &TyCtxt<'tcx>,
+                       closure_id: DefId,
+                       fn_ty: Ty<'tcx>)
+                       -> Ty<'tcx> {
+    match tcx.closure_kind(closure_id) {
+        ty::ClosureKind::Fn => {
+            tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), fn_ty)
+        }
+        ty::ClosureKind::FnMut => {
+            tcx.mk_mut_ref(tcx.mk_region(ty::ReStatic), fn_ty)
+        }
+        ty::ClosureKind::FnOnce => fn_ty,
+    }
+}
+
+/// Returns the LLVM function declaration for a closure, creating it if
+/// necessary. If the ID does not correspond to a closure ID, returns None.
+fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                               closure_id: DefId,
+                                               substs: &ty::ClosureSubsts<'tcx>)
+                                               -> ValueRef {
+    // Normalize type so differences in regions and typedefs don't cause
+    // duplicate declarations
+    let tcx = ccx.tcx();
+    let substs = tcx.erase_regions(substs);
+    let instance = Instance::new(closure_id, &substs.func_substs);
+
+    if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
+        debug!("get_or_create_closure_declaration(): found closure {:?}: {:?}",
+               instance, Value(llfn));
+        return llfn;
+    }
+
+    let symbol = symbol_names::exported_name(ccx, &instance);
+
+    // Compute the rust-call form of the closure call method.
+    let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any);
+    let sig = &infcx.closure_type(closure_id, &substs).sig;
+    let sig = tcx.erase_late_bound_regions(sig);
+    let sig = infer::normalize_associated_type(tcx, &sig);
+    let closure_type = tcx.mk_closure_from_closure_substs(closure_id, Box::new(substs));
+    let function_type = tcx.mk_fn_ptr(ty::BareFnTy {
+        unsafety: hir::Unsafety::Normal,
+        abi: Abi::RustCall,
+        sig: ty::Binder(ty::FnSig {
+            inputs: Some(get_self_type(tcx, closure_id, closure_type))
+                        .into_iter().chain(sig.inputs).collect(),
+            output: sig.output,
+            variadic: false
+        })
+    });
+    let llfn = declare::define_internal_fn(ccx, &symbol, function_type);
+
+    // set an inline hint for all closures
+    attributes::inline(llfn, attributes::InlineAttr::Hint);
+
+    debug!("get_or_create_declaration_if_closure(): inserting new \
+            closure {:?}: {:?}",
+           instance, Value(llfn));
+    ccx.instances().borrow_mut().insert(instance, llfn);
+
+    llfn
+}
+
+pub enum Dest<'a, 'tcx: 'a> {
+    SaveIn(Block<'a, 'tcx>, ValueRef),
+    Ignore(&'a CrateContext<'a, 'tcx>)
+}
+
+pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
+                                    decl: &hir::FnDecl,
+                                    body: &hir::Block,
+                                    id: ast::NodeId,
+                                    closure_def_id: DefId, // (*)
+                                    closure_substs: &ty::ClosureSubsts<'tcx>)
+                                    -> Option<Block<'a, 'tcx>>
+{
+    // (*) Note that in the case of inlined functions, the `closure_def_id` will be the
+    // defid of the closure in its original crate, whereas `id` will be the id of the local
+    // inlined copy.
+
+    let param_substs = closure_substs.func_substs;
+
+    let ccx = match dest {
+        Dest::SaveIn(bcx, _) => bcx.ccx(),
+        Dest::Ignore(ccx) => ccx
+    };
+    let tcx = ccx.tcx();
+    let _icx = push_ctxt("closure::trans_closure_expr");
+
+    debug!("trans_closure_expr(id={:?}, closure_def_id={:?}, closure_substs={:?})",
+           id, closure_def_id, closure_substs);
+
+    let llfn = get_or_create_closure_declaration(ccx, closure_def_id, closure_substs);
+
+    // Get the type of this closure. Use the current `param_substs` as
+    // the closure substitutions. This makes sense because the closure
+    // takes the same set of type arguments as the enclosing fn, and
+    // this function (`trans_closure`) is invoked at the point
+    // of the closure expression.
+
+    let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any);
+    let function_type = infcx.closure_type(closure_def_id, closure_substs);
+
+    let sig = tcx.erase_late_bound_regions(&function_type.sig);
+    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+
+    let closure_type = tcx.mk_closure_from_closure_substs(closure_def_id,
+        Box::new(closure_substs.clone()));
+    let sig = ty::FnSig {
+        inputs: Some(get_self_type(tcx, closure_def_id, closure_type))
+                    .into_iter().chain(sig.inputs).collect(),
+        output: sig.output,
+        variadic: false
+    };
+    let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]);
+
+    trans_closure(ccx,
+                  decl,
+                  body,
+                  llfn,
+                  param_substs,
+                  closure_def_id,
+                  id,
+                  fn_ty,
+                  Abi::RustCall,
+                  ClosureEnv::Closure(closure_def_id, id));
+
+    // Don't hoist this to the top of the function. It's perfectly legitimate
+    // to have a zero-size closure (in which case dest will be `Ignore`) and
+    // we must still generate the closure body.
+    let (mut bcx, dest_addr) = match dest {
+        Dest::SaveIn(bcx, p) => (bcx, p),
+        Dest::Ignore(_) => {
+            debug!("trans_closure_expr() ignoring result");
+            return None;
+        }
+    };
+
+    let repr = adt::represent_type(ccx, node_id_type(bcx, id));
+
+    // Create the closure.
+    tcx.with_freevars(id, |fv| {
+        for (i, freevar) in fv.iter().enumerate() {
+            let datum = expr::trans_var(bcx, freevar.def);
+            let upvar_slot_dest = adt::trans_field_ptr(
+                bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i);
+            let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
+                                        closure_expr_id: id };
+            match tcx.upvar_capture(upvar_id).unwrap() {
+                ty::UpvarCapture::ByValue => {
+                    bcx = datum.store_to(bcx, upvar_slot_dest);
+                }
+                ty::UpvarCapture::ByRef(..) => {
+                    Store(bcx, datum.to_llref(), upvar_slot_dest);
+                }
+            }
+        }
+    });
+    adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0));
+
+    Some(bcx)
+}
+
+pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
+                                      closure_def_id: DefId,
+                                      substs: ty::ClosureSubsts<'tcx>,
+                                      trait_closure_kind: ty::ClosureKind)
+                                      -> ValueRef
+{
+    // If this is a closure, redirect to it.
+    let llfn = get_or_create_closure_declaration(ccx, closure_def_id, &substs);
+
+    // If the closure is a Fn closure, but a FnOnce is needed (etc),
+    // then adapt the self type
+    let llfn_closure_kind = ccx.tcx().closure_kind(closure_def_id);
+
+    let _icx = push_ctxt("trans_closure_adapter_shim");
+
+    debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \
+           trait_closure_kind={:?}, llfn={:?})",
+           llfn_closure_kind, trait_closure_kind, Value(llfn));
+
+    match (llfn_closure_kind, trait_closure_kind) {
+        (ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
+        (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
+        (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
+            // No adapter needed.
+            llfn
+        }
+        (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
+            // The closure fn `llfn` is a `fn(&self, ...)`.  We want a
+            // `fn(&mut self, ...)`. In fact, at trans time, these are
+            // basically the same thing, so we can just return llfn.
+            llfn
+        }
+        (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
+        (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
+            // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
+            // self, ...)`.  We want a `fn(self, ...)`. We can produce
+            // this by doing something like:
+            //
+            //     fn call_once(self, ...) { call_mut(&self, ...) }
+            //     fn call_once(mut self, ...) { call_mut(&mut self, ...) }
+            //
+            // These are both the same at trans time.
+            trans_fn_once_adapter_shim(ccx, closure_def_id, substs, llfn)
+        }
+        _ => {
+            bug!("trans_closure_adapter_shim: cannot convert {:?} to {:?}",
+                 llfn_closure_kind,
+                 trait_closure_kind);
+        }
+    }
+}
+
+fn trans_fn_once_adapter_shim<'a, 'tcx>(
+    ccx: &'a CrateContext<'a, 'tcx>,
+    closure_def_id: DefId,
+    substs: ty::ClosureSubsts<'tcx>,
+    llreffn: ValueRef)
+    -> ValueRef
+{
+    debug!("trans_fn_once_adapter_shim(closure_def_id={:?}, substs={:?}, llreffn={:?})",
+           closure_def_id, substs, Value(llreffn));
+
+    let tcx = ccx.tcx();
+    let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any);
+
+    // Find a version of the closure type. Substitute static for the
+    // region since it doesn't really matter.
+    let closure_ty = tcx.mk_closure_from_closure_substs(closure_def_id, Box::new(substs.clone()));
+    let ref_closure_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), closure_ty);
+
+    // Make a version with the type of by-ref closure.
+    let ty::ClosureTy { unsafety, abi, mut sig } = infcx.closure_type(closure_def_id, &substs);
+    sig.0.inputs.insert(0, ref_closure_ty); // sig has no self type as of yet
+    let llref_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
+        unsafety: unsafety,
+        abi: abi,
+        sig: sig.clone()
+    });
+    debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}",
+           llref_fn_ty);
+
+
+    // Make a version of the closure type with the same arguments, but
+    // with argument #0 being by value.
+    assert_eq!(abi, Abi::RustCall);
+    sig.0.inputs[0] = closure_ty;
+
+    let sig = tcx.erase_late_bound_regions(&sig);
+    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+    let fn_ty = FnType::new(ccx, abi, &sig, &[]);
+
+    let llonce_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
+        unsafety: unsafety,
+        abi: abi,
+        sig: ty::Binder(sig)
+    });
+
+    // Create the by-value helper.
+    let function_name =
+        symbol_names::internal_name_from_type_and_suffix(ccx, llonce_fn_ty, "once_shim");
+    let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty);
+
+    let (block_arena, fcx): (TypedArena<_>, FunctionContext);
+    block_arena = TypedArena::new();
+    fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, substs.func_substs, &block_arena);
+    let mut bcx = fcx.init(false, None);
+
+
+    // the first argument (`self`) will be the (by value) closure env.
+    let self_scope = fcx.push_custom_cleanup_scope();
+    let self_scope_id = CustomScope(self_scope);
+
+    let mut llargs = get_params(fcx.llfn);
+    let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize;
+    let env_arg = &fcx.fn_ty.args[0];
+    let llenv = if env_arg.is_indirect() {
+        Datum::new(llargs[self_idx], closure_ty, Rvalue::new(ByRef))
+            .add_clean(&fcx, self_scope_id)
+    } else {
+        unpack_datum!(bcx, lvalue_scratch_datum(bcx, closure_ty, "self",
+                                                InitAlloca::Dropped,
+                                                self_scope_id, |bcx, llval| {
+            let mut llarg_idx = self_idx;
+            env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, llval);
+            bcx.fcx.schedule_lifetime_end(self_scope_id, llval);
+            bcx
+        })).val
+    };
+
+    debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv));
+    // Adjust llargs such that llargs[self_idx..] has the call arguments.
+    // For zero-sized closures that means sneaking in a new argument.
+    if env_arg.is_ignore() {
+        if self_idx > 0 {
+            self_idx -= 1;
+            llargs[self_idx] = llenv;
+        } else {
+            llargs.insert(0, llenv);
+        }
+    } else {
+        llargs[self_idx] = llenv;
+    }
+
+    let dest =
+        fcx.llretslotptr.get().map(
+            |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
+
+    let callee = Callee {
+        data: callee::Fn(llreffn),
+        ty: llref_fn_ty
+    };
+    bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[self_idx..]), dest).bcx;
+
+    fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
+
+    fcx.finish(bcx, DebugLoc::None);
+
+    lloncefn
+}
diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs
new file mode 100644 (file)
index 0000000..c72a1a6
--- /dev/null
@@ -0,0 +1,1489 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Translation Item Collection
+//! ===========================
+//!
+//! This module is responsible for discovering all items that will contribute to
+//! to code generation of the crate. The important part here is that it not only
+//! needs to find syntax-level items (functions, structs, etc) but also all
+//! their monomorphized instantiations. Every non-generic, non-const function
+//! maps to one LLVM artifact. Every generic function can produce
+//! from zero to N artifacts, depending on the sets of type arguments it
+//! is instantiated with.
+//! This also applies to generic items from other crates: A generic definition
+//! in crate X might produce monomorphizations that are compiled into crate Y.
+//! We also have to collect these here.
+//!
+//! The following kinds of "translation items" are handled here:
+//!
+//! - Functions
+//! - Methods
+//! - Closures
+//! - Statics
+//! - Drop glue
+//!
+//! The following things also result in LLVM artifacts, but are not collected
+//! here, since we instantiate them locally on demand when needed in a given
+//! codegen unit:
+//!
+//! - Constants
+//! - Vtables
+//! - Object Shims
+//!
+//!
+//! General Algorithm
+//! -----------------
+//! Let's define some terms first:
+//!
+//! - A "translation item" is something that results in a function or global in
+//!   the LLVM IR of a codegen unit. Translation items do not stand on their
+//!   own, they can reference other translation items. For example, if function
+//!   `foo()` calls function `bar()` then the translation item for `foo()`
+//!   references the translation item for function `bar()`. In general, the
+//!   definition for translation item A referencing a translation item B is that
+//!   the LLVM artifact produced for A references the LLVM artifact produced
+//!   for B.
+//!
+//! - Translation items and the references between them for a directed graph,
+//!   where the translation items are the nodes and references form the edges.
+//!   Let's call this graph the "translation item graph".
+//!
+//! - The translation item graph for a program contains all translation items
+//!   that are needed in order to produce the complete LLVM IR of the program.
+//!
+//! The purpose of the algorithm implemented in this module is to build the
+//! translation item graph for the current crate. It runs in two phases:
+//!
+//! 1. Discover the roots of the graph by traversing the HIR of the crate.
+//! 2. Starting from the roots, find neighboring nodes by inspecting the MIR
+//!    representation of the item corresponding to a given node, until no more
+//!    new nodes are found.
+//!
+//! ### Discovering roots
+//!
+//! The roots of the translation item graph correspond to the non-generic
+//! syntactic items in the source code. We find them by walking the HIR of the
+//! crate, and whenever we hit upon a function, method, or static item, we
+//! create a translation item consisting of the items DefId and, since we only
+//! consider non-generic items, an empty type-substitution set.
+//!
+//! ### Finding neighbor nodes
+//! Given a translation item node, we can discover neighbors by inspecting its
+//! MIR. We walk the MIR and any time we hit upon something that signifies a
+//! reference to another translation item, we have found a neighbor. Since the
+//! translation item we are currently at is always monomorphic, we also know the
+//! concrete type arguments of its neighbors, and so all neighbors again will be
+//! monomorphic. The specific forms a reference to a neighboring node can take
+//! in MIR are quite diverse. Here is an overview:
+//!
+//! #### Calling Functions/Methods
+//! The most obvious form of one translation item referencing another is a
+//! function or method call (represented by a CALL terminator in MIR). But
+//! calls are not the only thing that might introduce a reference between two
+//! function translation items, and as we will see below, they are just a
+//! specialized of the form described next, and consequently will don't get any
+//! special treatment in the algorithm.
+//!
+//! #### Taking a reference to a function or method
+//! A function does not need to actually be called in order to be a neighbor of
+//! another function. It suffices to just take a reference in order to introduce
+//! an edge. Consider the following example:
+//!
+//! ```rust
+//! fn print_val<T: Display>(x: T) {
+//!     println!("{}", x);
+//! }
+//!
+//! fn call_fn(f: &Fn(i32), x: i32) {
+//!     f(x);
+//! }
+//!
+//! fn main() {
+//!     let print_i32 = print_val::<i32>;
+//!     call_fn(&print_i32, 0);
+//! }
+//! ```
+//! The MIR of none of these functions will contain an explicit call to
+//! `print_val::<i32>`. Nonetheless, in order to translate this program, we need
+//! an instance of this function. Thus, whenever we encounter a function or
+//! method in operand position, we treat it as a neighbor of the current
+//! translation item. Calls are just a special case of that.
+//!
+//! #### Closures
+//! In a way, closures are a simple case. Since every closure object needs to be
+//! constructed somewhere, we can reliably discover them by observing
+//! `RValue::Aggregate` expressions with `AggregateKind::Closure`. This is also
+//! true for closures inlined from other crates.
+//!
+//! #### Drop glue
+//! Drop glue translation items are introduced by MIR drop-statements. The
+//! generated translation item will again have drop-glue item neighbors if the
+//! type to be dropped contains nested values that also need to be dropped. It
+//! might also have a function item neighbor for the explicit `Drop::drop`
+//! implementation of its type.
+//!
+//! #### Unsizing Casts
+//! A subtle way of introducing neighbor edges is by casting to a trait object.
+//! Since the resulting fat-pointer contains a reference to a vtable, we need to
+//! instantiate all object-save methods of the trait, as we need to store
+//! pointers to these functions even if they never get called anywhere. This can
+//! be seen as a special case of taking a function reference.
+//!
+//! #### Boxes
+//! Since `Box` expression have special compiler support, no explicit calls to
+//! `exchange_malloc()` and `exchange_free()` may show up in MIR, even if the
+//! compiler will generate them. We have to observe `Rvalue::Box` expressions
+//! and Box-typed drop-statements for that purpose.
+//!
+//!
+//! Interaction with Cross-Crate Inlining
+//! -------------------------------------
+//! The binary of a crate will not only contain machine code for the items
+//! defined in the source code of that crate. It will also contain monomorphic
+//! instantiations of any extern generic functions and of functions marked with
+//! #[inline].
+//! The collection algorithm handles this more or less transparently. If it is
+//! about to create a translation item for something with an external `DefId`,
+//! it will take a look if the MIR for that item is available, and if so just
+//! proceed normally. If the MIR is not available, it assumes that that item is
+//! just linked to and no node is created; which is exactly what we want, since
+//! no machine code should be generated in the current crate for such an item.
+//!
+//! Eager and Lazy Collection Mode
+//! ------------------------------
+//! Translation item collection can be performed in one of two modes:
+//!
+//! - Lazy mode means that items will only be instantiated when actually
+//!   referenced. The goal is to produce the least amount of machine code
+//!   possible.
+//!
+//! - Eager mode is meant to be used in conjunction with incremental compilation
+//!   where a stable set of translation items is more important than a minimal
+//!   one. Thus, eager mode will instantiate drop-glue for every drop-able type
+//!   in the crate, even of no drop call for that type exists (yet). It will
+//!   also instantiate default implementations of trait methods, something that
+//!   otherwise is only done on demand.
+//!
+//!
+//! Open Issues
+//! -----------
+//! Some things are not yet fully implemented in the current version of this
+//! module.
+//!
+//! ### Initializers of Constants and Statics
+//! Since no MIR is constructed yet for initializer expressions of constants and
+//! statics we cannot inspect these properly.
+//!
+//! ### Const Fns
+//! Ideally, no translation item should be generated for const fns unless there
+//! is a call to them that cannot be evaluated at compile time. At the moment
+//! this is not implemented however: a translation item will be produced
+//! regardless of whether it is actually needed or not.
+
+use rustc::hir;
+use rustc::hir::intravisit as hir_visit;
+
+use rustc::hir::map as hir_map;
+use rustc::hir::def_id::DefId;
+use rustc::middle::lang_items::{ExchangeFreeFnLangItem, ExchangeMallocFnLangItem};
+use rustc::traits;
+use rustc::ty::subst::{self, Substs, Subst};
+use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::ty::adjustment::CustomCoerceUnsized;
+use rustc::mir::repr as mir;
+use rustc::mir::visit as mir_visit;
+use rustc::mir::visit::Visitor as MirVisitor;
+
+use syntax::ast::{self, NodeId};
+use syntax::codemap::DUMMY_SP;
+use syntax::errors;
+use syntax::parse::token;
+
+use base::custom_coerce_unsize_info;
+use context::CrateContext;
+use common::{fulfill_obligation, normalize_and_test_predicates,
+                    type_is_sized};
+use glue;
+use meth;
+use monomorphize::{self, Instance};
+use util::nodemap::{FnvHashSet, FnvHashMap, DefIdMap};
+
+use std::hash::{Hash, Hasher};
+
+#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
+pub enum TransItemCollectionMode {
+    Eager,
+    Lazy
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub enum TransItem<'tcx> {
+    DropGlue(Ty<'tcx>),
+    Fn(Instance<'tcx>),
+    Static(NodeId)
+}
+
+impl<'tcx> Hash for TransItem<'tcx> {
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        match *self {
+            TransItem::DropGlue(t) => {
+                0u8.hash(s);
+                t.hash(s);
+            },
+            TransItem::Fn(instance) => {
+                1u8.hash(s);
+                instance.def.hash(s);
+                (instance.substs as *const _ as usize).hash(s);
+            }
+            TransItem::Static(node_id) => {
+                2u8.hash(s);
+                node_id.hash(s);
+            }
+        };
+    }
+}
+
+pub fn collect_crate_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                                 mode: TransItemCollectionMode)
+                                                 -> FnvHashSet<TransItem<'tcx>> {
+    // We are not tracking dependencies of this pass as it has to be re-executed
+    // every time no matter what.
+    ccx.tcx().dep_graph.with_ignore(|| {
+        let roots = collect_roots(ccx, mode);
+
+        debug!("Building translation item graph, beginning at roots");
+        let mut visited = FnvHashSet();
+        let mut recursion_depths = DefIdMap();
+
+        for root in roots {
+            collect_items_rec(ccx, root, &mut visited, &mut recursion_depths);
+        }
+
+        visited
+    })
+}
+
+// Find all non-generic items by walking the HIR. These items serve as roots to
+// start monomorphizing from.
+fn collect_roots<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                           mode: TransItemCollectionMode)
+                           -> Vec<TransItem<'tcx>> {
+    debug!("Collecting roots");
+    let mut roots = Vec::new();
+
+    {
+        let mut visitor = RootCollector {
+            ccx: ccx,
+            mode: mode,
+            output: &mut roots,
+            enclosing_item: None,
+        };
+
+        ccx.tcx().map.krate().visit_all_items(&mut visitor);
+    }
+
+    roots
+}
+
+// Collect all monomorphized translation items reachable from `starting_point`
+fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
+                                   starting_point: TransItem<'tcx>,
+                                   visited: &mut FnvHashSet<TransItem<'tcx>>,
+                                   recursion_depths: &mut DefIdMap<usize>) {
+    if !visited.insert(starting_point.clone()) {
+        // We've been here already, no need to search again.
+        return;
+    }
+    debug!("BEGIN collect_items_rec({})", starting_point.to_string(ccx));
+
+    let mut neighbors = Vec::new();
+    let recursion_depth_reset;
+
+    match starting_point {
+        TransItem::DropGlue(t) => {
+            find_drop_glue_neighbors(ccx, t, &mut neighbors);
+            recursion_depth_reset = None;
+        }
+        TransItem::Static(_) => {
+            recursion_depth_reset = None;
+        }
+        TransItem::Fn(instance) => {
+            // Keep track of the monomorphization recursion depth
+            recursion_depth_reset = Some(check_recursion_limit(ccx,
+                                                               instance,
+                                                               recursion_depths));
+
+            // Scan the MIR in order to find function calls, closures, and
+            // drop-glue
+            let mir = errors::expect(ccx.sess().diagnostic(), ccx.get_mir(instance.def),
+                || format!("Could not find MIR for function: {}", instance));
+
+            let mut visitor = MirNeighborCollector {
+                ccx: ccx,
+                mir: &mir,
+                output: &mut neighbors,
+                param_substs: instance.substs
+            };
+
+            visitor.visit_mir(&mir);
+        }
+    }
+
+    for neighbour in neighbors {
+        collect_items_rec(ccx, neighbour, visited, recursion_depths);
+    }
+
+    if let Some((def_id, depth)) = recursion_depth_reset {
+        recursion_depths.insert(def_id, depth);
+    }
+
+    debug!("END collect_items_rec({})", starting_point.to_string(ccx));
+}
+
+fn check_recursion_limit<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
+                                       instance: Instance<'tcx>,
+                                       recursion_depths: &mut DefIdMap<usize>)
+                                       -> (DefId, usize) {
+    let recursion_depth = recursion_depths.get(&instance.def)
+                                          .map(|x| *x)
+                                          .unwrap_or(0);
+    debug!(" => recursion depth={}", recursion_depth);
+
+    // Code that needs to instantiate the same function recursively
+    // more than the recursion limit is assumed to be causing an
+    // infinite expansion.
+    if recursion_depth > ccx.sess().recursion_limit.get() {
+        let error = format!("reached the recursion limit while instantiating `{}`",
+                            instance);
+        if let Some(node_id) = ccx.tcx().map.as_local_node_id(instance.def) {
+            ccx.sess().span_fatal(ccx.tcx().map.span(node_id), &error);
+        } else {
+            ccx.sess().fatal(&error);
+        }
+    }
+
+    recursion_depths.insert(instance.def, recursion_depth + 1);
+
+    (instance.def, recursion_depth)
+}
+
+struct MirNeighborCollector<'a, 'tcx: 'a> {
+    ccx: &'a CrateContext<'a, 'tcx>,
+    mir: &'a mir::Mir<'tcx>,
+    output: &'a mut Vec<TransItem<'tcx>>,
+    param_substs: &'tcx Substs<'tcx>
+}
+
+impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
+
+    fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>) {
+        debug!("visiting rvalue {:?}", *rvalue);
+
+        match *rvalue {
+            mir::Rvalue::Aggregate(mir::AggregateKind::Closure(def_id,
+                                                               ref substs), _) => {
+                assert!(can_have_local_instance(self.ccx, def_id));
+                let trans_item = create_fn_trans_item(self.ccx,
+                                                      def_id,
+                                                      substs.func_substs,
+                                                      self.param_substs);
+                self.output.push(trans_item);
+            }
+            // When doing an cast from a regular pointer to a fat pointer, we
+            // have to instantiate all methods of the trait being cast to, so we
+            // can build the appropriate vtable.
+            mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, target_ty) => {
+                let target_ty = monomorphize::apply_param_substs(self.ccx.tcx(),
+                                                                 self.param_substs,
+                                                                 &target_ty);
+                let source_ty = self.mir.operand_ty(self.ccx.tcx(), operand);
+                let source_ty = monomorphize::apply_param_substs(self.ccx.tcx(),
+                                                                 self.param_substs,
+                                                                 &source_ty);
+                let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.ccx,
+                                                                            source_ty,
+                                                                            target_ty);
+                // This could also be a different Unsize instruction, like
+                // from a fixed sized array to a slice. But we are only
+                // interested in things that produce a vtable.
+                if target_ty.is_trait() && !source_ty.is_trait() {
+                    create_trans_items_for_vtable_methods(self.ccx,
+                                                          target_ty,
+                                                          source_ty,
+                                                          self.output);
+                }
+            }
+            mir::Rvalue::Box(_) => {
+                let exchange_malloc_fn_def_id =
+                    self.ccx
+                        .tcx()
+                        .lang_items
+                        .require(ExchangeMallocFnLangItem)
+                        .unwrap_or_else(|e| self.ccx.sess().fatal(&e));
+
+                assert!(can_have_local_instance(self.ccx, exchange_malloc_fn_def_id));
+                let exchange_malloc_fn_trans_item =
+                    create_fn_trans_item(self.ccx,
+                                         exchange_malloc_fn_def_id,
+                                         &Substs::empty(),
+                                         self.param_substs);
+
+                self.output.push(exchange_malloc_fn_trans_item);
+            }
+            _ => { /* not interesting */ }
+        }
+
+        self.super_rvalue(rvalue);
+    }
+
+    fn visit_lvalue(&mut self,
+                    lvalue: &mir::Lvalue<'tcx>,
+                    context: mir_visit::LvalueContext) {
+        debug!("visiting lvalue {:?}", *lvalue);
+
+        if let mir_visit::LvalueContext::Drop = context {
+            let ty = self.mir.lvalue_ty(self.ccx.tcx(), lvalue)
+                             .to_ty(self.ccx.tcx());
+
+            let ty = monomorphize::apply_param_substs(self.ccx.tcx(),
+                                                      self.param_substs,
+                                                      &ty);
+            let ty = self.ccx.tcx().erase_regions(&ty);
+            let ty = glue::get_drop_glue_type(self.ccx, ty);
+            self.output.push(TransItem::DropGlue(ty));
+        }
+
+        self.super_lvalue(lvalue, context);
+    }
+
+    fn visit_operand(&mut self, operand: &mir::Operand<'tcx>) {
+        debug!("visiting operand {:?}", *operand);
+
+        let callee = match *operand {
+            mir::Operand::Constant(mir::Constant { ty: &ty::TyS {
+                sty: ty::TyFnDef(def_id, substs, _), ..
+            }, .. }) => Some((def_id, substs)),
+            _ => None
+        };
+
+        if let Some((callee_def_id, callee_substs)) = callee {
+            debug!(" => operand is callable");
+
+            // `callee_def_id` might refer to a trait method instead of a
+            // concrete implementation, so we have to find the actual
+            // implementation. For example, the call might look like
+            //
+            // std::cmp::partial_cmp(0i32, 1i32)
+            //
+            // Calling do_static_dispatch() here will map the def_id of
+            // `std::cmp::partial_cmp` to the def_id of `i32::partial_cmp<i32>`
+            let dispatched = do_static_dispatch(self.ccx,
+                                                callee_def_id,
+                                                callee_substs,
+                                                self.param_substs);
+
+            if let Some((callee_def_id, callee_substs)) = dispatched {
+                // if we have a concrete impl (which we might not have
+                // in the case of something compiler generated like an
+                // object shim or a closure that is handled differently),
+                // we check if the callee is something that will actually
+                // result in a translation item ...
+                if can_result_in_trans_item(self.ccx, callee_def_id) {
+                    // ... and create one if it does.
+                    let trans_item = create_fn_trans_item(self.ccx,
+                                                          callee_def_id,
+                                                          callee_substs,
+                                                          self.param_substs);
+                    self.output.push(trans_item);
+                }
+            }
+        }
+
+        self.super_operand(operand);
+
+        fn can_result_in_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                              def_id: DefId)
+                                              -> bool {
+            if !match ccx.tcx().lookup_item_type(def_id).ty.sty {
+                ty::TyFnDef(def_id, _, _) => {
+                    // Some constructors also have type TyFnDef but they are
+                    // always instantiated inline and don't result in
+                    // translation item. Same for FFI functions.
+                    match ccx.tcx().map.get_if_local(def_id) {
+                        Some(hir_map::NodeVariant(_))    |
+                        Some(hir_map::NodeStructCtor(_)) |
+                        Some(hir_map::NodeForeignItem(_)) => false,
+                        Some(_) => true,
+                        None => {
+                            ccx.sess().cstore.variant_kind(def_id).is_none()
+                        }
+                    }
+                }
+                ty::TyClosure(..) => true,
+                _ => false
+            } {
+                return false;
+            }
+
+            can_have_local_instance(ccx, def_id)
+        }
+    }
+}
+
+fn can_have_local_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                     def_id: DefId)
+                                     -> bool {
+    // Take a look if we have the definition available. If not, we
+    // will not emit code for this item in the local crate, and thus
+    // don't create a translation item for it.
+    def_id.is_local() || ccx.sess().cstore.is_item_mir_available(def_id)
+}
+
+fn find_drop_glue_neighbors<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                      ty: ty::Ty<'tcx>,
+                                      output: &mut Vec<TransItem<'tcx>>)
+{
+    debug!("find_drop_glue_neighbors: {}", type_to_string(ccx, ty));
+
+    // Make sure the exchange_free_fn() lang-item gets translated if
+    // there is a boxed value.
+    if let ty::TyBox(_) = ty.sty {
+        let exchange_free_fn_def_id = ccx.tcx()
+                                         .lang_items
+                                         .require(ExchangeFreeFnLangItem)
+                                         .unwrap_or_else(|e| ccx.sess().fatal(&e));
+
+        assert!(can_have_local_instance(ccx, exchange_free_fn_def_id));
+        let exchange_free_fn_trans_item =
+            create_fn_trans_item(ccx,
+                                 exchange_free_fn_def_id,
+                                 &Substs::empty(),
+                                 &Substs::empty());
+
+        output.push(exchange_free_fn_trans_item);
+    }
+
+    // If the type implements Drop, also add a translation item for the
+    // monomorphized Drop::drop() implementation.
+    let destructor_did = match ty.sty {
+        ty::TyStruct(def, _) |
+        ty::TyEnum(def, _)   => def.destructor(),
+        _ => None
+    };
+
+    if let Some(destructor_did) = destructor_did {
+        use rustc::ty::ToPolyTraitRef;
+
+        let drop_trait_def_id = ccx.tcx()
+                                   .lang_items
+                                   .drop_trait()
+                                   .unwrap();
+
+        let self_type_substs = ccx.tcx().mk_substs(
+            Substs::empty().with_self_ty(ty));
+
+        let trait_ref = ty::TraitRef {
+            def_id: drop_trait_def_id,
+            substs: self_type_substs,
+        }.to_poly_trait_ref();
+
+        let substs = match fulfill_obligation(ccx, DUMMY_SP, trait_ref) {
+            traits::VtableImpl(data) => data.substs,
+            _ => bug!()
+        };
+
+        if can_have_local_instance(ccx, destructor_did) {
+            let trans_item = create_fn_trans_item(ccx,
+                                                  destructor_did,
+                                                  substs,
+                                                  &Substs::empty());
+            output.push(trans_item);
+        }
+    }
+
+    // Finally add the types of nested values
+    match ty.sty {
+        ty::TyBool      |
+        ty::TyChar      |
+        ty::TyInt(_)    |
+        ty::TyUint(_)   |
+        ty::TyStr       |
+        ty::TyFloat(_)  |
+        ty::TyRawPtr(_) |
+        ty::TyRef(..)   |
+        ty::TyFnDef(..) |
+        ty::TyFnPtr(_)  |
+        ty::TySlice(_)  |
+        ty::TyTrait(_)  => {
+            /* nothing to do */
+        }
+        ty::TyStruct(ref adt_def, substs) |
+        ty::TyEnum(ref adt_def, substs) => {
+            for field in adt_def.all_fields() {
+                let field_type = monomorphize::apply_param_substs(ccx.tcx(),
+                                                                  substs,
+                                                                  &field.unsubst_ty());
+                let field_type = glue::get_drop_glue_type(ccx, field_type);
+
+                if glue::type_needs_drop(ccx.tcx(), field_type) {
+                    output.push(TransItem::DropGlue(field_type));
+                }
+            }
+        }
+        ty::TyClosure(_, ref substs) => {
+            for upvar_ty in &substs.upvar_tys {
+                let upvar_ty = glue::get_drop_glue_type(ccx, upvar_ty);
+                if glue::type_needs_drop(ccx.tcx(), upvar_ty) {
+                    output.push(TransItem::DropGlue(upvar_ty));
+                }
+            }
+        }
+        ty::TyBox(inner_type)      |
+        ty::TyArray(inner_type, _) => {
+            let inner_type = glue::get_drop_glue_type(ccx, inner_type);
+            if glue::type_needs_drop(ccx.tcx(), inner_type) {
+                output.push(TransItem::DropGlue(inner_type));
+            }
+        }
+        ty::TyTuple(ref args) => {
+            for arg in args {
+                let arg = glue::get_drop_glue_type(ccx, arg);
+                if glue::type_needs_drop(ccx.tcx(), arg) {
+                    output.push(TransItem::DropGlue(arg));
+                }
+            }
+        }
+        ty::TyProjection(_) |
+        ty::TyParam(_)      |
+        ty::TyInfer(_)      |
+        ty::TyError         => {
+            bug!("encountered unexpected type");
+        }
+    }
+}
+
+fn do_static_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                fn_def_id: DefId,
+                                fn_substs: &'tcx Substs<'tcx>,
+                                param_substs: &'tcx Substs<'tcx>)
+                                -> Option<(DefId, &'tcx Substs<'tcx>)> {
+    debug!("do_static_dispatch(fn_def_id={}, fn_substs={:?}, param_substs={:?})",
+           def_id_to_string(ccx, fn_def_id),
+           fn_substs,
+           param_substs);
+
+    let is_trait_method = ccx.tcx().trait_of_item(fn_def_id).is_some();
+
+    if is_trait_method {
+        match ccx.tcx().impl_or_trait_item(fn_def_id) {
+            ty::MethodTraitItem(ref method) => {
+                match method.container {
+                    ty::TraitContainer(trait_def_id) => {
+                        debug!(" => trait method, attempting to find impl");
+                        do_static_trait_method_dispatch(ccx,
+                                                        method,
+                                                        trait_def_id,
+                                                        fn_substs,
+                                                        param_substs)
+                    }
+                    ty::ImplContainer(_) => {
+                        // This is already a concrete implementation
+                        debug!(" => impl method");
+                        Some((fn_def_id, fn_substs))
+                    }
+                }
+            }
+            _ => bug!()
+        }
+    } else {
+        debug!(" => regular function");
+        // The function is not part of an impl or trait, no dispatching
+        // to be done
+        Some((fn_def_id, fn_substs))
+    }
+}
+
+// Given a trait-method and substitution information, find out the actual
+// implementation of the trait method.
+fn do_static_trait_method_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                             trait_method: &ty::Method,
+                                             trait_id: DefId,
+                                             callee_substs: &'tcx Substs<'tcx>,
+                                             param_substs: &'tcx Substs<'tcx>)
+                                             -> Option<(DefId, &'tcx Substs<'tcx>)> {
+    let tcx = ccx.tcx();
+    debug!("do_static_trait_method_dispatch(trait_method={}, \
+                                            trait_id={}, \
+                                            callee_substs={:?}, \
+                                            param_substs={:?}",
+           def_id_to_string(ccx, trait_method.def_id),
+           def_id_to_string(ccx, trait_id),
+           callee_substs,
+           param_substs);
+
+    let rcvr_substs = monomorphize::apply_param_substs(tcx,
+                                                       param_substs,
+                                                       callee_substs);
+
+    let trait_ref = ty::Binder(rcvr_substs.to_trait_ref(tcx, trait_id));
+    let vtbl = fulfill_obligation(ccx, DUMMY_SP, trait_ref);
+
+    // Now that we know which impl is being used, we can dispatch to
+    // the actual function:
+    match vtbl {
+        traits::VtableImpl(traits::VtableImplData {
+            impl_def_id: impl_did,
+            substs: impl_substs,
+            nested: _ }) =>
+        {
+            let callee_substs = impl_substs.with_method_from(&rcvr_substs);
+            let impl_method = meth::get_impl_method(tcx,
+                                                    impl_did,
+                                                    tcx.mk_substs(callee_substs),
+                                                    trait_method.name);
+            Some((impl_method.method.def_id, &impl_method.substs))
+        }
+        // If we have a closure or a function pointer, we will also encounter
+        // the concrete closure/function somewhere else (during closure or fn
+        // pointer construction). That's where we track those things.
+        traits::VtableClosure(..) |
+        traits::VtableFnPointer(..) |
+        traits::VtableObject(..) => {
+            None
+        }
+        _ => {
+            bug!("static call to invalid vtable: {:?}", vtbl)
+        }
+    }
+}
+
+/// For given pair of source and target type that occur in an unsizing coercion,
+/// this function finds the pair of types that determines the vtable linking
+/// them.
+///
+/// For example, the source type might be `&SomeStruct` and the target type\
+/// might be `&SomeTrait` in a cast like:
+///
+/// let src: &SomeStruct = ...;
+/// let target = src as &SomeTrait;
+///
+/// Then the output of this function would be (SomeStruct, SomeTrait) since for
+/// constructing the `target` fat-pointer we need the vtable for that pair.
+///
+/// Things can get more complicated though because there's also the case where
+/// the unsized type occurs as a field:
+///
+/// ```rust
+/// struct ComplexStruct<T: ?Sized> {
+///    a: u32,
+///    b: f64,
+///    c: T
+/// }
+/// ```
+///
+/// In this case, if `T` is sized, `&ComplexStruct<T>` is a thin pointer. If `T`
+/// is unsized, `&SomeStruct` is a fat pointer, and the vtable it points to is
+/// for the pair of `T` (which is a trait) and the concrete type that `T` was
+/// originally coerced from:
+///
+/// let src: &ComplexStruct<SomeStruct> = ...;
+/// let target = src as &ComplexStruct<SomeTrait>;
+///
+/// Again, we want this `find_vtable_types_for_unsizing()` to provide the pair
+/// `(SomeStruct, SomeTrait)`.
+///
+/// Finally, there is also the case of custom unsizing coercions, e.g. for
+/// smart pointers such as `Rc` and `Arc`.
+fn find_vtable_types_for_unsizing<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                            source_ty: ty::Ty<'tcx>,
+                                            target_ty: ty::Ty<'tcx>)
+                                            -> (ty::Ty<'tcx>, ty::Ty<'tcx>) {
+    match (&source_ty.sty, &target_ty.sty) {
+        (&ty::TyBox(a), &ty::TyBox(b)) |
+        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
+         &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
+        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
+         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
+        (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
+         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
+            let (inner_source, inner_target) = (a, b);
+
+            if !type_is_sized(ccx.tcx(), inner_source) {
+                (inner_source, inner_target)
+            } else {
+                ccx.tcx().struct_lockstep_tails(inner_source, inner_target)
+            }
+        }
+
+        (&ty::TyStruct(source_adt_def, source_substs),
+         &ty::TyStruct(target_adt_def, target_substs)) => {
+            assert_eq!(source_adt_def, target_adt_def);
+
+            let kind = custom_coerce_unsize_info(ccx, source_ty, target_ty);
+
+            let coerce_index = match kind {
+                CustomCoerceUnsized::Struct(i) => i
+            };
+
+            let source_fields = &source_adt_def.struct_variant().fields;
+            let target_fields = &target_adt_def.struct_variant().fields;
+
+            assert!(coerce_index < source_fields.len() &&
+                    source_fields.len() == target_fields.len());
+
+            find_vtable_types_for_unsizing(ccx,
+                                           source_fields[coerce_index].ty(ccx.tcx(),
+                                                                          source_substs),
+                                           target_fields[coerce_index].ty(ccx.tcx(),
+                                                                          target_substs))
+        }
+        _ => bug!("find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}",
+                  source_ty,
+                  target_ty)
+    }
+}
+
+fn create_fn_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                  def_id: DefId,
+                                  fn_substs: &Substs<'tcx>,
+                                  param_substs: &Substs<'tcx>)
+                                  -> TransItem<'tcx>
+{
+    debug!("create_fn_trans_item(def_id={}, fn_substs={:?}, param_substs={:?})",
+            def_id_to_string(ccx, def_id),
+            fn_substs,
+            param_substs);
+
+    // We only get here, if fn_def_id either designates a local item or
+    // an inlineable external item. Non-inlineable external items are
+    // ignored because we don't want to generate any code for them.
+    let concrete_substs = monomorphize::apply_param_substs(ccx.tcx(),
+                                                           param_substs,
+                                                           fn_substs);
+    let concrete_substs = ccx.tcx().erase_regions(&concrete_substs);
+
+    let trans_item =
+        TransItem::Fn(Instance::new(def_id,
+                                    &ccx.tcx().mk_substs(concrete_substs)));
+
+    return trans_item;
+}
+
+/// Creates a `TransItem` for each method that is referenced by the vtable for
+/// the given trait/impl pair.
+fn create_trans_items_for_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                                   trait_ty: ty::Ty<'tcx>,
+                                                   impl_ty: ty::Ty<'tcx>,
+                                                   output: &mut Vec<TransItem<'tcx>>) {
+    assert!(!trait_ty.needs_subst() && !impl_ty.needs_subst());
+
+    if let ty::TyTrait(ref trait_ty) = trait_ty.sty {
+        let poly_trait_ref = trait_ty.principal_trait_ref_with_self_ty(ccx.tcx(),
+                                                                       impl_ty);
+
+        // Walk all methods of the trait, including those of its supertraits
+        for trait_ref in traits::supertraits(ccx.tcx(), poly_trait_ref) {
+            let vtable = fulfill_obligation(ccx, DUMMY_SP, trait_ref);
+            match vtable {
+                traits::VtableImpl(
+                    traits::VtableImplData {
+                        impl_def_id,
+                        substs,
+                        nested: _ }) => {
+                    let items = meth::get_vtable_methods(ccx, impl_def_id, substs)
+                        .into_iter()
+                        // filter out None values
+                        .filter_map(|opt_impl_method| opt_impl_method)
+                        // create translation items
+                        .filter_map(|impl_method| {
+                            if can_have_local_instance(ccx, impl_method.method.def_id) {
+                                Some(create_fn_trans_item(ccx,
+                                                          impl_method.method.def_id,
+                                                          &impl_method.substs,
+                                                          &Substs::empty()))
+                            } else {
+                                None
+                            }
+                        })
+                        .collect::<Vec<_>>();
+
+                    output.extend(items.into_iter());
+                }
+                _ => { /* */ }
+            }
+        }
+    }
+}
+
+//=-----------------------------------------------------------------------------
+// Root Collection
+//=-----------------------------------------------------------------------------
+
+struct RootCollector<'b, 'a: 'b, 'tcx: 'a + 'b> {
+    ccx: &'b CrateContext<'a, 'tcx>,
+    mode: TransItemCollectionMode,
+    output: &'b mut Vec<TransItem<'tcx>>,
+    enclosing_item: Option<&'tcx hir::Item>,
+}
+
+impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> {
+    fn visit_item(&mut self, item: &'v hir::Item) {
+        let old_enclosing_item = self.enclosing_item;
+        self.enclosing_item = Some(item);
+
+        match item.node {
+            hir::ItemExternCrate(..) |
+            hir::ItemUse(..)         |
+            hir::ItemForeignMod(..)  |
+            hir::ItemTy(..)          |
+            hir::ItemDefaultImpl(..) |
+            hir::ItemTrait(..)       |
+            hir::ItemConst(..)       |
+            hir::ItemMod(..)         => {
+                // Nothing to do, just keep recursing...
+            }
+
+            hir::ItemImpl(..) => {
+                if self.mode == TransItemCollectionMode::Eager {
+                    create_trans_items_for_default_impls(self.ccx,
+                                                         item,
+                                                         self.output);
+                }
+            }
+
+            hir::ItemEnum(_, ref generics)        |
+            hir::ItemStruct(_, ref generics)      => {
+                if !generics.is_parameterized() {
+                    let ty = {
+                        let tables = self.ccx.tcx().tables.borrow();
+                        tables.node_types[&item.id]
+                    };
+
+                    if self.mode == TransItemCollectionMode::Eager {
+                        debug!("RootCollector: ADT drop-glue for {}",
+                               def_id_to_string(self.ccx,
+                                                self.ccx.tcx().map.local_def_id(item.id)));
+
+                        let ty = glue::get_drop_glue_type(self.ccx, ty);
+                        self.output.push(TransItem::DropGlue(ty));
+                    }
+                }
+            }
+            hir::ItemStatic(..) => {
+                debug!("RootCollector: ItemStatic({})",
+                       def_id_to_string(self.ccx,
+                                        self.ccx.tcx().map.local_def_id(item.id)));
+                self.output.push(TransItem::Static(item.id));
+            }
+            hir::ItemFn(_, _, constness, _, ref generics, _) => {
+                if !generics.is_type_parameterized() &&
+                   constness == hir::Constness::NotConst {
+                    let def_id = self.ccx.tcx().map.local_def_id(item.id);
+
+                    debug!("RootCollector: ItemFn({})",
+                           def_id_to_string(self.ccx, def_id));
+
+                    let instance = Instance::mono(self.ccx.tcx(), def_id);
+                    self.output.push(TransItem::Fn(instance));
+                }
+            }
+        }
+
+        hir_visit::walk_item(self, item);
+        self.enclosing_item = old_enclosing_item;
+    }
+
+    fn visit_impl_item(&mut self, ii: &'v hir::ImplItem) {
+        match ii.node {
+            hir::ImplItemKind::Method(hir::MethodSig {
+                ref generics,
+                constness,
+                ..
+            }, _) if constness == hir::Constness::NotConst => {
+                let hir_map = &self.ccx.tcx().map;
+                let parent_node_id = hir_map.get_parent_node(ii.id);
+                let is_impl_generic = match hir_map.expect_item(parent_node_id) {
+                    &hir::Item {
+                        node: hir::ItemImpl(_, _, ref generics, _, _, _),
+                        ..
+                    } => {
+                        generics.is_type_parameterized()
+                    }
+                    _ => {
+                        bug!()
+                    }
+                };
+
+                if !generics.is_type_parameterized() && !is_impl_generic {
+                    let def_id = self.ccx.tcx().map.local_def_id(ii.id);
+
+                    debug!("RootCollector: MethodImplItem({})",
+                           def_id_to_string(self.ccx, def_id));
+
+                    let instance = Instance::mono(self.ccx.tcx(), def_id);
+                    self.output.push(TransItem::Fn(instance));
+                }
+            }
+            _ => { /* Nothing to do here */ }
+        }
+
+        hir_visit::walk_impl_item(self, ii)
+    }
+}
+
+fn create_trans_items_for_default_impls<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                                  item: &'tcx hir::Item,
+                                                  output: &mut Vec<TransItem<'tcx>>) {
+    match item.node {
+        hir::ItemImpl(_,
+                      _,
+                      ref generics,
+                      _,
+                      _,
+                      ref items) => {
+            if generics.is_type_parameterized() {
+                return
+            }
+
+            let tcx = ccx.tcx();
+            let impl_def_id = tcx.map.local_def_id(item.id);
+
+            debug!("create_trans_items_for_default_impls(item={})",
+                   def_id_to_string(ccx, impl_def_id));
+
+            if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) {
+                let default_impls = tcx.provided_trait_methods(trait_ref.def_id);
+                let callee_substs = tcx.mk_substs(tcx.erase_regions(trait_ref.substs));
+                let overridden_methods: FnvHashSet<_> = items.iter()
+                                                             .map(|item| item.name)
+                                                             .collect();
+                for default_impl in default_impls {
+                    if overridden_methods.contains(&default_impl.name) {
+                        continue;
+                    }
+
+                    if default_impl.generics.has_type_params(subst::FnSpace) {
+                        continue;
+                    }
+
+                    // The substitutions we have are on the impl, so we grab
+                    // the method type from the impl to substitute into.
+                    let mth = meth::get_impl_method(tcx,
+                                                    impl_def_id,
+                                                    callee_substs,
+                                                    default_impl.name);
+
+                    assert!(mth.is_provided);
+
+                    let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs);
+                    if !normalize_and_test_predicates(ccx, predicates.into_vec()) {
+                        continue;
+                    }
+
+                    if can_have_local_instance(ccx, default_impl.def_id) {
+                        let empty_substs = ccx.tcx().mk_substs(ccx.tcx().erase_regions(mth.substs));
+                        let item = create_fn_trans_item(ccx,
+                                                        default_impl.def_id,
+                                                        callee_substs,
+                                                        empty_substs);
+                        output.push(item);
+                    }
+                }
+            }
+        }
+        _ => {
+            bug!()
+        }
+    }
+}
+
+//=-----------------------------------------------------------------------------
+// TransItem String Keys
+//=-----------------------------------------------------------------------------
+
+// The code below allows for producing a unique string key for a trans item.
+// These keys are used by the handwritten auto-tests, so they need to be
+// predictable and human-readable.
+//
+// Note: A lot of this could looks very similar to what's already in the
+//       ppaux module. It would be good to refactor things so we only have one
+//       parameterizable implementation for printing types.
+
+/// Same as `unique_type_name()` but with the result pushed onto the given
+/// `output` parameter.
+pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                       t: ty::Ty<'tcx>,
+                                       output: &mut String) {
+    match t.sty {
+        ty::TyBool              => output.push_str("bool"),
+        ty::TyChar              => output.push_str("char"),
+        ty::TyStr               => output.push_str("str"),
+        ty::TyInt(ast::IntTy::Is)    => output.push_str("isize"),
+        ty::TyInt(ast::IntTy::I8)    => output.push_str("i8"),
+        ty::TyInt(ast::IntTy::I16)   => output.push_str("i16"),
+        ty::TyInt(ast::IntTy::I32)   => output.push_str("i32"),
+        ty::TyInt(ast::IntTy::I64)   => output.push_str("i64"),
+        ty::TyUint(ast::UintTy::Us)   => output.push_str("usize"),
+        ty::TyUint(ast::UintTy::U8)   => output.push_str("u8"),
+        ty::TyUint(ast::UintTy::U16)  => output.push_str("u16"),
+        ty::TyUint(ast::UintTy::U32)  => output.push_str("u32"),
+        ty::TyUint(ast::UintTy::U64)  => output.push_str("u64"),
+        ty::TyFloat(ast::FloatTy::F32) => output.push_str("f32"),
+        ty::TyFloat(ast::FloatTy::F64) => output.push_str("f64"),
+        ty::TyStruct(adt_def, substs) |
+        ty::TyEnum(adt_def, substs) => {
+            push_item_name(cx, adt_def.did, output);
+            push_type_params(cx, &substs.types, &[], output);
+        },
+        ty::TyTuple(ref component_types) => {
+            output.push('(');
+            for &component_type in component_types {
+                push_unique_type_name(cx, component_type, output);
+                output.push_str(", ");
+            }
+            if !component_types.is_empty() {
+                output.pop();
+                output.pop();
+            }
+            output.push(')');
+        },
+        ty::TyBox(inner_type) => {
+            output.push_str("Box<");
+            push_unique_type_name(cx, inner_type, output);
+            output.push('>');
+        },
+        ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
+            output.push('*');
+            match mutbl {
+                hir::MutImmutable => output.push_str("const "),
+                hir::MutMutable => output.push_str("mut "),
+            }
+
+            push_unique_type_name(cx, inner_type, output);
+        },
+        ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
+            output.push('&');
+            if mutbl == hir::MutMutable {
+                output.push_str("mut ");
+            }
+
+            push_unique_type_name(cx, inner_type, output);
+        },
+        ty::TyArray(inner_type, len) => {
+            output.push('[');
+            push_unique_type_name(cx, inner_type, output);
+            output.push_str(&format!("; {}", len));
+            output.push(']');
+        },
+        ty::TySlice(inner_type) => {
+            output.push('[');
+            push_unique_type_name(cx, inner_type, output);
+            output.push(']');
+        },
+        ty::TyTrait(ref trait_data) => {
+            push_item_name(cx, trait_data.principal.skip_binder().def_id, output);
+            push_type_params(cx,
+                             &trait_data.principal.skip_binder().substs.types,
+                             &trait_data.bounds.projection_bounds,
+                             output);
+        },
+        ty::TyFnDef(_, _, &ty::BareFnTy{ unsafety, abi, ref sig } ) |
+        ty::TyFnPtr(&ty::BareFnTy{ unsafety, abi, ref sig } ) => {
+            if unsafety == hir::Unsafety::Unsafe {
+                output.push_str("unsafe ");
+            }
+
+            if abi != ::abi::Abi::Rust {
+                output.push_str("extern \"");
+                output.push_str(abi.name());
+                output.push_str("\" ");
+            }
+
+            output.push_str("fn(");
+
+            let sig = cx.tcx().erase_late_bound_regions(sig);
+            if !sig.inputs.is_empty() {
+                for &parameter_type in &sig.inputs {
+                    push_unique_type_name(cx, parameter_type, output);
+                    output.push_str(", ");
+                }
+                output.pop();
+                output.pop();
+            }
+
+            if sig.variadic {
+                if !sig.inputs.is_empty() {
+                    output.push_str(", ...");
+                } else {
+                    output.push_str("...");
+                }
+            }
+
+            output.push(')');
+
+            match sig.output {
+                ty::FnConverging(result_type) if result_type.is_nil() => {}
+                ty::FnConverging(result_type) => {
+                    output.push_str(" -> ");
+                    push_unique_type_name(cx, result_type, output);
+                }
+                ty::FnDiverging => {
+                    output.push_str(" -> !");
+                }
+            }
+        },
+        ty::TyClosure(def_id, ref closure_substs) => {
+            push_item_name(cx, def_id, output);
+            output.push_str("{");
+            output.push_str(&format!("{}:{}", def_id.krate, def_id.index.as_usize()));
+            output.push_str("}");
+            push_type_params(cx, &closure_substs.func_substs.types, &[], output);
+        }
+        ty::TyError |
+        ty::TyInfer(_) |
+        ty::TyProjection(..) |
+        ty::TyParam(_) => {
+            bug!("debuginfo: Trying to create type name for \
+                  unexpected type: {:?}", t);
+        }
+    }
+}
+
+fn push_item_name(ccx: &CrateContext,
+                  def_id: DefId,
+                  output: &mut String) {
+    let def_path = ccx.tcx().def_path(def_id);
+
+    // some_crate::
+    output.push_str(&ccx.tcx().crate_name(def_path.krate));
+    output.push_str("::");
+
+    // foo::bar::ItemName::
+    for part in ccx.tcx().def_path(def_id).data {
+        output.push_str(&format!("{}[{}]::",
+                        part.data.as_interned_str(),
+                        part.disambiguator));
+    }
+
+    // remove final "::"
+    output.pop();
+    output.pop();
+}
+
+fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                              types: &'tcx subst::VecPerParamSpace<Ty<'tcx>>,
+                              projections: &[ty::PolyProjectionPredicate<'tcx>],
+                              output: &mut String) {
+    if types.is_empty() && projections.is_empty() {
+        return;
+    }
+
+    output.push('<');
+
+    for &type_parameter in types {
+        push_unique_type_name(cx, type_parameter, output);
+        output.push_str(", ");
+    }
+
+    for projection in projections {
+        let projection = projection.skip_binder();
+        let name = token::get_ident_interner().get(projection.projection_ty.item_name);
+        output.push_str(&name[..]);
+        output.push_str("=");
+        push_unique_type_name(cx, projection.ty, output);
+        output.push_str(", ");
+    }
+
+    output.pop();
+    output.pop();
+
+    output.push('>');
+}
+
+fn push_instance_as_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                     instance: Instance<'tcx>,
+                                     output: &mut String) {
+    push_item_name(ccx, instance.def, output);
+    push_type_params(ccx, &instance.substs.types, &[], output);
+}
+
+fn def_id_to_string(ccx: &CrateContext, def_id: DefId) -> String {
+    let mut output = String::new();
+    push_item_name(ccx, def_id, &mut output);
+    output
+}
+
+fn type_to_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                            ty: ty::Ty<'tcx>)
+                            -> String {
+    let mut output = String::new();
+    push_unique_type_name(ccx, ty, &mut output);
+    output
+}
+
+impl<'tcx> TransItem<'tcx> {
+
+    pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
+        let hir_map = &ccx.tcx().map;
+
+        return match *self {
+            TransItem::DropGlue(t) => {
+                let mut s = String::with_capacity(32);
+                s.push_str("drop-glue ");
+                push_unique_type_name(ccx, t, &mut s);
+                s
+            }
+            TransItem::Fn(instance) => {
+                to_string_internal(ccx, "fn ", instance)
+            },
+            TransItem::Static(node_id) => {
+                let def_id = hir_map.local_def_id(node_id);
+                let instance = Instance::mono(ccx.tcx(), def_id);
+                to_string_internal(ccx, "static ", instance)
+            },
+        };
+
+        fn to_string_internal<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                        prefix: &str,
+                                        instance: Instance<'tcx>)
+                                        -> String {
+            let mut result = String::with_capacity(32);
+            result.push_str(prefix);
+            push_instance_as_string(ccx, instance, &mut result);
+            result
+        }
+    }
+
+    fn to_raw_string(&self) -> String {
+        match *self {
+            TransItem::DropGlue(t) => {
+                format!("DropGlue({})", t as *const _ as usize)
+            }
+            TransItem::Fn(instance) => {
+                format!("Fn({:?}, {})",
+                         instance.def,
+                         instance.substs as *const _ as usize)
+            }
+            TransItem::Static(id) => {
+                format!("Static({:?})", id)
+            }
+        }
+    }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum TransItemState {
+    PredictedAndGenerated,
+    PredictedButNotGenerated,
+    NotPredictedButGenerated,
+}
+
+pub fn collecting_debug_information(ccx: &CrateContext) -> bool {
+    return cfg!(debug_assertions) &&
+           ccx.sess().opts.debugging_opts.print_trans_items.is_some();
+}
+
+pub fn print_collection_results<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>) {
+    use std::hash::{Hash, SipHasher, Hasher};
+
+    if !collecting_debug_information(ccx) {
+        return;
+    }
+
+    fn hash<T: Hash>(t: &T) -> u64 {
+        let mut s = SipHasher::new();
+        t.hash(&mut s);
+        s.finish()
+    }
+
+    let trans_items = ccx.translation_items().borrow();
+
+    {
+        // Check for duplicate item keys
+        let mut item_keys = FnvHashMap();
+
+        for (item, item_state) in trans_items.iter() {
+            let k = item.to_string(&ccx);
+
+            if item_keys.contains_key(&k) {
+                let prev: (TransItem, TransItemState) = item_keys[&k];
+                debug!("DUPLICATE KEY: {}", k);
+                debug!(" (1) {:?}, {:?}, hash: {}, raw: {}",
+                       prev.0,
+                       prev.1,
+                       hash(&prev.0),
+                       prev.0.to_raw_string());
+
+                debug!(" (2) {:?}, {:?}, hash: {}, raw: {}",
+                       *item,
+                       *item_state,
+                       hash(item),
+                       item.to_raw_string());
+            } else {
+                item_keys.insert(k, (*item, *item_state));
+            }
+        }
+    }
+
+    let mut predicted_but_not_generated = FnvHashSet();
+    let mut not_predicted_but_generated = FnvHashSet();
+    let mut predicted = FnvHashSet();
+    let mut generated = FnvHashSet();
+
+    for (item, item_state) in trans_items.iter() {
+        let item_key = item.to_string(&ccx);
+
+        match *item_state {
+            TransItemState::PredictedAndGenerated => {
+                predicted.insert(item_key.clone());
+                generated.insert(item_key);
+            }
+            TransItemState::PredictedButNotGenerated => {
+                predicted_but_not_generated.insert(item_key.clone());
+                predicted.insert(item_key);
+            }
+            TransItemState::NotPredictedButGenerated => {
+                not_predicted_but_generated.insert(item_key.clone());
+                generated.insert(item_key);
+            }
+        }
+    }
+
+    debug!("Total number of translation items predicted: {}", predicted.len());
+    debug!("Total number of translation items generated: {}", generated.len());
+    debug!("Total number of translation items predicted but not generated: {}",
+           predicted_but_not_generated.len());
+    debug!("Total number of translation items not predicted but generated: {}",
+           not_predicted_but_generated.len());
+
+    if generated.len() > 0 {
+        debug!("Failed to predict {}% of translation items",
+               (100 * not_predicted_but_generated.len()) / generated.len());
+    }
+    if generated.len() > 0 {
+        debug!("Predict {}% too many translation items",
+               (100 * predicted_but_not_generated.len()) / generated.len());
+    }
+
+    debug!("");
+    debug!("Not predicted but generated:");
+    debug!("============================");
+    for item in not_predicted_but_generated {
+        debug!(" - {}", item);
+    }
+
+    debug!("");
+    debug!("Predicted but not generated:");
+    debug!("============================");
+    for item in predicted_but_not_generated {
+        debug!(" - {}", item);
+    }
+}
diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs
new file mode 100644 (file)
index 0000000..5ce7caf
--- /dev/null
@@ -0,0 +1,1257 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_camel_case_types, non_snake_case)]
+
+//! Code that is useful in various trans modules.
+
+use session::Session;
+use llvm;
+use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
+use llvm::{True, False, Bool, OperandBundleDef};
+use rustc::cfg;
+use rustc::hir::def::Def;
+use rustc::hir::def_id::DefId;
+use rustc::infer;
+use middle::lang_items::LangItem;
+use rustc::ty::subst::Substs;
+use abi::{Abi, FnType};
+use base;
+use build;
+use builder::Builder;
+use callee::Callee;
+use cleanup;
+use consts;
+use datum;
+use debuginfo::{self, DebugLoc};
+use declare;
+use machine;
+use mir::CachedMir;
+use monomorphize;
+use type_::Type;
+use value::Value;
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::traits::{self, SelectionContext, ProjectionMode};
+use rustc::ty::fold::{TypeFolder, TypeFoldable};
+use rustc::hir;
+use util::nodemap::NodeMap;
+
+use arena::TypedArena;
+use libc::{c_uint, c_char};
+use std::ops::Deref;
+use std::ffi::CString;
+use std::cell::{Cell, RefCell};
+
+use syntax::ast;
+use syntax::codemap::{DUMMY_SP, Span};
+use syntax::parse::token::InternedString;
+use syntax::parse::token;
+
+pub use context::CrateContext;
+
+/// Is the type's representation size known at compile time?
+pub fn type_is_sized<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+    ty.is_sized(&tcx.empty_parameter_environment(), DUMMY_SP)
+}
+
+pub fn type_is_fat_ptr<'tcx>(cx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+    match ty.sty {
+        ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
+        ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
+        ty::TyBox(ty) => {
+            !type_is_sized(cx, ty)
+        }
+        _ => {
+            false
+        }
+    }
+}
+
+pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
+    use machine::llsize_of_alloc;
+    use type_of::sizing_type_of;
+
+    let tcx = ccx.tcx();
+    let simple = ty.is_scalar() ||
+        ty.is_unique() || ty.is_region_ptr() ||
+        ty.is_simd();
+    if simple && !type_is_fat_ptr(tcx, ty) {
+        return true;
+    }
+    if !type_is_sized(tcx, ty) {
+        return false;
+    }
+    match ty.sty {
+        ty::TyStruct(..) | ty::TyEnum(..) | ty::TyTuple(..) | ty::TyArray(_, _) |
+        ty::TyClosure(..) => {
+            let llty = sizing_type_of(ccx, ty);
+            llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type())
+        }
+        _ => type_is_zero_size(ccx, ty)
+    }
+}
+
+/// Identify types which have size zero at runtime.
+pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
+    use machine::llsize_of_alloc;
+    use type_of::sizing_type_of;
+    let llty = sizing_type_of(ccx, ty);
+    llsize_of_alloc(ccx, llty) == 0
+}
+
+/// Generates a unique symbol based off the name given. This is used to create
+/// unique symbols for things like closures.
+pub fn gensym_name(name: &str) -> ast::Name {
+    let num = token::gensym(name).0;
+    // use one colon which will get translated to a period by the mangler, and
+    // we're guaranteed that `num` is globally unique for this crate.
+    token::gensym(&format!("{}:{}", name, num))
+}
+
+/*
+* A note on nomenclature of linking: "extern", "foreign", and "upcall".
+*
+* An "extern" is an LLVM symbol we wind up emitting an undefined external
+* reference to. This means "we don't have the thing in this compilation unit,
+* please make sure you link it in at runtime". This could be a reference to
+* C code found in a C library, or rust code found in a rust crate.
+*
+* Most "externs" are implicitly declared (automatically) as a result of a
+* user declaring an extern _module_ dependency; this causes the rust driver
+* to locate an extern crate, scan its compilation metadata, and emit extern
+* declarations for any symbols used by the declaring crate.
+*
+* A "foreign" is an extern that references C (or other non-rust ABI) code.
+* There is no metadata to scan for extern references so in these cases either
+* a header-digester like bindgen, or manual function prototypes, have to
+* serve as declarators. So these are usually given explicitly as prototype
+* declarations, in rust code, with ABI attributes on them noting which ABI to
+* link via.
+*
+* An "upcall" is a foreign call generated by the compiler (not corresponding
+* to any user-written call in the code) into the runtime library, to perform
+* some helper task such as bringing a task to life, allocating memory, etc.
+*
+*/
+
+use Disr;
+
+#[derive(Copy, Clone)]
+pub struct NodeIdAndSpan {
+    pub id: ast::NodeId,
+    pub span: Span,
+}
+
+pub fn expr_info(expr: &hir::Expr) -> NodeIdAndSpan {
+    NodeIdAndSpan { id: expr.id, span: expr.span }
+}
+
+/// The concrete version of ty::FieldDef. The name is the field index if
+/// the field is numeric.
+pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>);
+
+/// The concrete version of ty::VariantDef
+pub struct VariantInfo<'tcx> {
+    pub discr: Disr,
+    pub fields: Vec<Field<'tcx>>
+}
+
+impl<'tcx> VariantInfo<'tcx> {
+    pub fn from_ty(tcx: &TyCtxt<'tcx>,
+                   ty: Ty<'tcx>,
+                   opt_def: Option<Def>)
+                   -> Self
+    {
+        match ty.sty {
+            ty::TyStruct(adt, substs) | ty::TyEnum(adt, substs) => {
+                let variant = match opt_def {
+                    None => adt.struct_variant(),
+                    Some(def) => adt.variant_of_def(def)
+                };
+
+                VariantInfo {
+                    discr: Disr::from(variant.disr_val),
+                    fields: variant.fields.iter().map(|f| {
+                        Field(f.name, monomorphize::field_ty(tcx, substs, f))
+                    }).collect()
+                }
+            }
+
+            ty::TyTuple(ref v) => {
+                VariantInfo {
+                    discr: Disr(0),
+                    fields: v.iter().enumerate().map(|(i, &t)| {
+                        Field(token::intern(&i.to_string()), t)
+                    }).collect()
+                }
+            }
+
+            _ => {
+                bug!("cannot get field types from the type {:?}", ty);
+            }
+        }
+    }
+
+    /// Return the variant corresponding to a given node (e.g. expr)
+    pub fn of_node(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self {
+        let node_def = tcx.def_map.borrow().get(&id).map(|v| v.full_def());
+        Self::from_ty(tcx, ty, node_def)
+    }
+
+    pub fn field_index(&self, name: ast::Name) -> usize {
+        self.fields.iter().position(|&Field(n,_)| n == name).unwrap_or_else(|| {
+            bug!("unknown field `{}`", name)
+        })
+    }
+}
+
+pub struct BuilderRef_res {
+    pub b: BuilderRef,
+}
+
+impl Drop for BuilderRef_res {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMDisposeBuilder(self.b);
+        }
+    }
+}
+
+pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res {
+    BuilderRef_res {
+        b: b
+    }
+}
+
+pub fn validate_substs(substs: &Substs) {
+    assert!(!substs.types.needs_infer());
+}
+
+// work around bizarre resolve errors
+type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
+pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>;
+
+#[derive(Clone, Debug)]
+struct HintEntry<'tcx> {
+    // The datum for the dropflag-hint itself; note that many
+    // source-level Lvalues will be associated with the same
+    // dropflag-hint datum.
+    datum: cleanup::DropHintDatum<'tcx>,
+}
+
+pub struct DropFlagHintsMap<'tcx> {
+    // Maps NodeId for expressions that read/write unfragmented state
+    // to that state's drop-flag "hint."  (A stack-local hint
+    // indicates either that (1.) it is certain that no-drop is
+    // needed, or (2.)  inline drop-flag must be consulted.)
+    node_map: NodeMap<HintEntry<'tcx>>,
+}
+
+impl<'tcx> DropFlagHintsMap<'tcx> {
+    pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } }
+    pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) }
+    pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) {
+        self.node_map.insert(id, HintEntry { datum: datum });
+    }
+    pub fn hint_datum(&self, id: ast::NodeId) -> Option<cleanup::DropHintDatum<'tcx>> {
+        self.node_map.get(&id).map(|t|t.datum)
+    }
+}
+
+// Function context.  Every LLVM function we create will have one of
+// these.
+pub struct FunctionContext<'a, 'tcx: 'a> {
+    // The MIR for this function. At present, this is optional because
+    // we only have MIR available for things that are local to the
+    // crate.
+    pub mir: Option<CachedMir<'a, 'tcx>>,
+
+    // The ValueRef returned from a call to llvm::LLVMAddFunction; the
+    // address of the first instruction in the sequence of
+    // instructions for this function that will go in the .text
+    // section of the executable we're generating.
+    pub llfn: ValueRef,
+
+    // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
+    pub param_env: ty::ParameterEnvironment<'a, 'tcx>,
+
+    // A pointer to where to store the return value. If the return type is
+    // immediate, this points to an alloca in the function. Otherwise, it's a
+    // pointer to the hidden first parameter of the function. After function
+    // construction, this should always be Some.
+    pub llretslotptr: Cell<Option<ValueRef>>,
+
+    // These pub elements: "hoisted basic blocks" containing
+    // administrative activities that have to happen in only one place in
+    // the function, due to LLVM's quirks.
+    // A marker for the place where we want to insert the function's static
+    // allocas, so that LLVM will coalesce them into a single alloca call.
+    pub alloca_insert_pt: Cell<Option<ValueRef>>,
+    pub llreturn: Cell<Option<BasicBlockRef>>,
+
+    // If the function has any nested return's, including something like:
+    // fn foo() -> Option<Foo> { Some(Foo { x: return None }) }, then
+    // we use a separate alloca for each return
+    pub needs_ret_allocas: bool,
+
+    // When working with landingpad-based exceptions this value is alloca'd and
+    // later loaded when using the resume instruction. This ends up being
+    // critical to chaining landing pads and resuing already-translated
+    // cleanups.
+    //
+    // Note that for cleanuppad-based exceptions this is not used.
+    pub landingpad_alloca: Cell<Option<ValueRef>>,
+
+    // Maps the DefId's for local variables to the allocas created for
+    // them in llallocas.
+    pub lllocals: RefCell<NodeMap<LvalueDatum<'tcx>>>,
+
+    // Same as above, but for closure upvars
+    pub llupvars: RefCell<NodeMap<ValueRef>>,
+
+    // Carries info about drop-flags for local bindings (longer term,
+    // paths) for the code being compiled.
+    pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>,
+
+    // Describes the return/argument LLVM types and their ABI handling.
+    pub fn_ty: FnType,
+
+    // If this function is being monomorphized, this contains the type
+    // substitutions used.
+    pub param_substs: &'tcx Substs<'tcx>,
+
+    // The source span and nesting context where this function comes from, for
+    // error reporting and symbol generation.
+    pub span: Option<Span>,
+
+    // The arena that blocks are allocated from.
+    pub block_arena: &'a TypedArena<BlockS<'a, 'tcx>>,
+
+    // The arena that landing pads are allocated from.
+    pub lpad_arena: TypedArena<LandingPad>,
+
+    // This function's enclosing crate context.
+    pub ccx: &'a CrateContext<'a, 'tcx>,
+
+    // Used and maintained by the debuginfo module.
+    pub debug_context: debuginfo::FunctionDebugContext,
+
+    // Cleanup scopes.
+    pub scopes: RefCell<Vec<cleanup::CleanupScope<'a, 'tcx>>>,
+
+    pub cfg: Option<cfg::CFG>,
+}
+
+impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
+    pub fn mir(&self) -> CachedMir<'a, 'tcx> {
+        self.mir.clone().expect("fcx.mir was empty")
+    }
+
+    pub fn cleanup(&self) {
+        unsafe {
+            llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt
+                                                     .get()
+                                                     .unwrap());
+        }
+    }
+
+    pub fn get_llreturn(&self) -> BasicBlockRef {
+        if self.llreturn.get().is_none() {
+
+            self.llreturn.set(Some(unsafe {
+                llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn,
+                                                    "return\0".as_ptr() as *const _)
+            }))
+        }
+
+        self.llreturn.get().unwrap()
+    }
+
+    pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, name: &str) -> ValueRef {
+        if self.needs_ret_allocas {
+            base::alloca(bcx, self.fn_ty.ret.memory_ty(self.ccx), name)
+        } else {
+            self.llretslotptr.get().unwrap()
+        }
+    }
+
+    pub fn new_block(&'a self,
+                     name: &str,
+                     opt_node_id: Option<ast::NodeId>)
+                     -> Block<'a, 'tcx> {
+        unsafe {
+            let name = CString::new(name).unwrap();
+            let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
+                                                           self.llfn,
+                                                           name.as_ptr());
+            BlockS::new(llbb, opt_node_id, self)
+        }
+    }
+
+    pub fn new_id_block(&'a self,
+                        name: &str,
+                        node_id: ast::NodeId)
+                        -> Block<'a, 'tcx> {
+        self.new_block(name, Some(node_id))
+    }
+
+    pub fn new_temp_block(&'a self,
+                          name: &str)
+                          -> Block<'a, 'tcx> {
+        self.new_block(name, None)
+    }
+
+    pub fn join_blocks(&'a self,
+                       id: ast::NodeId,
+                       in_cxs: &[Block<'a, 'tcx>])
+                       -> Block<'a, 'tcx> {
+        let out = self.new_id_block("join", id);
+        let mut reachable = false;
+        for bcx in in_cxs {
+            if !bcx.unreachable.get() {
+                build::Br(*bcx, out.llbb, DebugLoc::None);
+                reachable = true;
+            }
+        }
+        if !reachable {
+            build::Unreachable(out);
+        }
+        return out;
+    }
+
+    pub fn monomorphize<T>(&self, value: &T) -> T
+        where T : TypeFoldable<'tcx>
+    {
+        monomorphize::apply_param_substs(self.ccx.tcx(),
+                                         self.param_substs,
+                                         value)
+    }
+
+    /// This is the same as `common::type_needs_drop`, except that it
+    /// may use or update caches within this `FunctionContext`.
+    pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
+        self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env)
+    }
+
+    pub fn eh_personality(&self) -> ValueRef {
+        // The exception handling personality function.
+        //
+        // If our compilation unit has the `eh_personality` lang item somewhere
+        // within it, then we just need to translate that. Otherwise, we're
+        // building an rlib which will depend on some upstream implementation of
+        // this function, so we just codegen a generic reference to it. We don't
+        // specify any of the types for the function, we just make it a symbol
+        // that LLVM can later use.
+        //
+        // Note that MSVC is a little special here in that we don't use the
+        // `eh_personality` lang item at all. Currently LLVM has support for
+        // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+        // *name of the personality function* to decide what kind of unwind side
+        // tables/landing pads to emit. It looks like Dwarf is used by default,
+        // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+        // an "exception", but for MSVC we want to force SEH. This means that we
+        // can't actually have the personality function be our standard
+        // `rust_eh_personality` function, but rather we wired it up to the
+        // CRT's custom personality function, which forces LLVM to consider
+        // landing pads as "landing pads for SEH".
+        let ccx = self.ccx;
+        let tcx = ccx.tcx();
+        let target = &ccx.sess().target.target;
+        match tcx.lang_items.eh_personality() {
+            Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => {
+                Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty())).reify(ccx).val
+            }
+            _ => if let Some(llpersonality) = ccx.eh_personality().get() {
+                llpersonality
+            } else {
+                let name = if !base::wants_msvc_seh(ccx.sess()) {
+                    "rust_eh_personality"
+                } else if target.arch == "x86" {
+                    "_except_handler3"
+                } else {
+                    "__C_specific_handler"
+                };
+                let fty = Type::variadic_func(&[], &Type::i32(ccx));
+                let f = declare::declare_cfn(ccx, name, fty);
+                ccx.eh_personality().set(Some(f));
+                f
+            }
+        }
+    }
+
+    // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined,
+    // otherwise declares it as an external function.
+    pub fn eh_unwind_resume(&self) -> Callee<'tcx> {
+        use attributes;
+        let ccx = self.ccx;
+        let tcx = ccx.tcx();
+        assert!(ccx.sess().target.target.options.custom_unwind_resume);
+        if let Some(def_id) = tcx.lang_items.eh_unwind_resume() {
+            return Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty()));
+        }
+
+        let ty = tcx.mk_fn_ptr(ty::BareFnTy {
+            unsafety: hir::Unsafety::Unsafe,
+            abi: Abi::C,
+            sig: ty::Binder(ty::FnSig {
+                inputs: vec![tcx.mk_mut_ptr(tcx.types.u8)],
+                output: ty::FnDiverging,
+                variadic: false
+            }),
+        });
+
+        let unwresume = ccx.eh_unwind_resume();
+        if let Some(llfn) = unwresume.get() {
+            return Callee::ptr(datum::immediate_rvalue(llfn, ty));
+        }
+        let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty);
+        attributes::unwind(llfn, true);
+        unwresume.set(Some(llfn));
+        Callee::ptr(datum::immediate_rvalue(llfn, ty))
+    }
+}
+
+// Basic block context.  We create a block context for each basic block
+// (single-entry, single-exit sequence of instructions) we generate from Rust
+// code.  Each basic block we generate is attached to a function, typically
+// with many basic blocks per function.  All the basic blocks attached to a
+// function are organized as a directed graph.
+pub struct BlockS<'blk, 'tcx: 'blk> {
+    // The BasicBlockRef returned from a call to
+    // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
+    // block to the function pointed to by llfn.  We insert
+    // instructions into that block by way of this block context.
+    // The block pointing to this one in the function's digraph.
+    pub llbb: BasicBlockRef,
+    pub terminated: Cell<bool>,
+    pub unreachable: Cell<bool>,
+
+    // If this block part of a landing pad, then this is `Some` indicating what
+    // kind of landing pad its in, otherwise this is none.
+    pub lpad: Cell<Option<&'blk LandingPad>>,
+
+    // AST node-id associated with this block, if any. Used for
+    // debugging purposes only.
+    pub opt_node_id: Option<ast::NodeId>,
+
+    // The function context for the function to which this block is
+    // attached.
+    pub fcx: &'blk FunctionContext<'blk, 'tcx>,
+}
+
+pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>;
+
+impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
+    pub fn new(llbb: BasicBlockRef,
+               opt_node_id: Option<ast::NodeId>,
+               fcx: &'blk FunctionContext<'blk, 'tcx>)
+               -> Block<'blk, 'tcx> {
+        fcx.block_arena.alloc(BlockS {
+            llbb: llbb,
+            terminated: Cell::new(false),
+            unreachable: Cell::new(false),
+            lpad: Cell::new(None),
+            opt_node_id: opt_node_id,
+            fcx: fcx
+        })
+    }
+
+    pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
+        self.fcx.ccx
+    }
+    pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
+        self.fcx
+    }
+    pub fn tcx(&self) -> &'blk TyCtxt<'tcx> {
+        self.fcx.ccx.tcx()
+    }
+    pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() }
+
+    pub fn lpad(&self) -> Option<&'blk LandingPad> {
+        self.lpad.get()
+    }
+
+    pub fn mir(&self) -> CachedMir<'blk, 'tcx> {
+        self.fcx.mir()
+    }
+
+    pub fn name(&self, name: ast::Name) -> String {
+        name.to_string()
+    }
+
+    pub fn node_id_to_string(&self, id: ast::NodeId) -> String {
+        self.tcx().map.node_to_string(id).to_string()
+    }
+
+    pub fn def(&self, nid: ast::NodeId) -> Def {
+        match self.tcx().def_map.borrow().get(&nid) {
+            Some(v) => v.full_def(),
+            None => {
+                bug!("no def associated with node id {}", nid);
+            }
+        }
+    }
+
+    pub fn to_str(&self) -> String {
+        format!("[block {:p}]", self)
+    }
+
+    pub fn monomorphize<T>(&self, value: &T) -> T
+        where T : TypeFoldable<'tcx>
+    {
+        monomorphize::apply_param_substs(self.tcx(),
+                                         self.fcx.param_substs,
+                                         value)
+    }
+
+    pub fn build(&'blk self) -> BlockAndBuilder<'blk, 'tcx> {
+        BlockAndBuilder::new(self, OwnedBuilder::new_with_ccx(self.ccx()))
+    }
+}
+
+pub struct OwnedBuilder<'blk, 'tcx: 'blk> {
+    builder: Builder<'blk, 'tcx>
+}
+
+impl<'blk, 'tcx> OwnedBuilder<'blk, 'tcx> {
+    pub fn new_with_ccx(ccx: &'blk CrateContext<'blk, 'tcx>) -> Self {
+        // Create a fresh builder from the crate context.
+        let llbuilder = unsafe {
+            llvm::LLVMCreateBuilderInContext(ccx.llcx())
+        };
+        OwnedBuilder {
+            builder: Builder {
+                llbuilder: llbuilder,
+                ccx: ccx,
+            }
+        }
+    }
+}
+
+impl<'blk, 'tcx> Drop for OwnedBuilder<'blk, 'tcx> {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMDisposeBuilder(self.builder.llbuilder);
+        }
+    }
+}
+
+pub struct BlockAndBuilder<'blk, 'tcx: 'blk> {
+    bcx: Block<'blk, 'tcx>,
+    owned_builder: OwnedBuilder<'blk, 'tcx>,
+}
+
+impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> {
+    pub fn new(bcx: Block<'blk, 'tcx>, owned_builder: OwnedBuilder<'blk, 'tcx>) -> Self {
+        // Set the builder's position to this block's end.
+        owned_builder.builder.position_at_end(bcx.llbb);
+        BlockAndBuilder {
+            bcx: bcx,
+            owned_builder: owned_builder,
+        }
+    }
+
+    pub fn with_block<F, R>(&self, f: F) -> R
+        where F: FnOnce(Block<'blk, 'tcx>) -> R
+    {
+        let result = f(self.bcx);
+        self.position_at_end(self.bcx.llbb);
+        result
+    }
+
+    pub fn map_block<F>(self, f: F) -> Self
+        where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
+    {
+        let BlockAndBuilder { bcx, owned_builder } = self;
+        let bcx = f(bcx);
+        BlockAndBuilder::new(bcx, owned_builder)
+    }
+
+    pub fn at_start<F, R>(&self, f: F) -> R
+        where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R
+    {
+        self.position_at_start(self.bcx.llbb);
+        let r = f(self);
+        self.position_at_end(self.bcx.llbb);
+        r
+    }
+
+    // Methods delegated to bcx
+
+    pub fn is_unreachable(&self) -> bool {
+        self.bcx.unreachable.get()
+    }
+
+    pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
+        self.bcx.ccx()
+    }
+    pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
+        self.bcx.fcx()
+    }
+    pub fn tcx(&self) -> &'blk TyCtxt<'tcx> {
+        self.bcx.tcx()
+    }
+    pub fn sess(&self) -> &'blk Session {
+        self.bcx.sess()
+    }
+
+    pub fn llbb(&self) -> BasicBlockRef {
+        self.bcx.llbb
+    }
+
+    pub fn mir(&self) -> CachedMir<'blk, 'tcx> {
+        self.bcx.mir()
+    }
+
+    pub fn monomorphize<T>(&self, value: &T) -> T
+        where T: TypeFoldable<'tcx>
+    {
+        self.bcx.monomorphize(value)
+    }
+
+    pub fn set_lpad(&self, lpad: Option<LandingPad>) {
+        self.bcx.lpad.set(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p)))
+    }
+}
+
+impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> {
+    type Target = Builder<'blk, 'tcx>;
+    fn deref(&self) -> &Self::Target {
+        &self.owned_builder.builder
+    }
+}
+
+/// A structure representing an active landing pad for the duration of a basic
+/// block.
+///
+/// Each `Block` may contain an instance of this, indicating whether the block
+/// is part of a landing pad or not. This is used to make decision about whether
+/// to emit `invoke` instructions (e.g. in a landing pad we don't continue to
+/// use `invoke`) and also about various function call metadata.
+///
+/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
+/// just a bunch of `None` instances (not too interesting), but for MSVC
+/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
+/// When inside of a landing pad, each function call in LLVM IR needs to be
+/// annotated with which landing pad it's a part of. This is accomplished via
+/// the `OperandBundleDef` value created for MSVC landing pads.
+pub struct LandingPad {
+    cleanuppad: Option<ValueRef>,
+    operand: Option<OperandBundleDef>,
+}
+
+impl LandingPad {
+    pub fn gnu() -> LandingPad {
+        LandingPad { cleanuppad: None, operand: None }
+    }
+
+    pub fn msvc(cleanuppad: ValueRef) -> LandingPad {
+        LandingPad {
+            cleanuppad: Some(cleanuppad),
+            operand: Some(OperandBundleDef::new("funclet", &[cleanuppad])),
+        }
+    }
+
+    pub fn bundle(&self) -> Option<&OperandBundleDef> {
+        self.operand.as_ref()
+    }
+}
+
+impl Clone for LandingPad {
+    fn clone(&self) -> LandingPad {
+        LandingPad {
+            cleanuppad: self.cleanuppad,
+            operand: self.cleanuppad.map(|p| {
+                OperandBundleDef::new("funclet", &[p])
+            }),
+        }
+    }
+}
+
+pub struct Result<'blk, 'tcx: 'blk> {
+    pub bcx: Block<'blk, 'tcx>,
+    pub val: ValueRef
+}
+
+impl<'b, 'tcx> Result<'b, 'tcx> {
+    pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> {
+        Result {
+            bcx: bcx,
+            val: val,
+        }
+    }
+}
+
+pub fn val_ty(v: ValueRef) -> Type {
+    unsafe {
+        Type::from_ref(llvm::LLVMTypeOf(v))
+    }
+}
+
+// LLVM constant constructors.
+pub fn C_null(t: Type) -> ValueRef {
+    unsafe {
+        llvm::LLVMConstNull(t.to_ref())
+    }
+}
+
+pub fn C_undef(t: Type) -> ValueRef {
+    unsafe {
+        llvm::LLVMGetUndef(t.to_ref())
+    }
+}
+
+pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef {
+    unsafe {
+        llvm::LLVMConstInt(t.to_ref(), u, sign_extend as Bool)
+    }
+}
+
+pub fn C_floating(s: &str, t: Type) -> ValueRef {
+    unsafe {
+        let s = CString::new(s).unwrap();
+        llvm::LLVMConstRealOfString(t.to_ref(), s.as_ptr())
+    }
+}
+
+pub fn C_floating_f64(f: f64, t: Type) -> ValueRef {
+    unsafe {
+        llvm::LLVMConstReal(t.to_ref(), f)
+    }
+}
+
+pub fn C_nil(ccx: &CrateContext) -> ValueRef {
+    C_struct(ccx, &[], false)
+}
+
+pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef {
+    C_integral(Type::i1(ccx), val as u64, false)
+}
+
+pub fn C_i32(ccx: &CrateContext, i: i32) -> ValueRef {
+    C_integral(Type::i32(ccx), i as u64, true)
+}
+
+pub fn C_u32(ccx: &CrateContext, i: u32) -> ValueRef {
+    C_integral(Type::i32(ccx), i as u64, false)
+}
+
+pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef {
+    C_integral(Type::i64(ccx), i, false)
+}
+
+pub fn C_int<I: AsI64>(ccx: &CrateContext, i: I) -> ValueRef {
+    let v = i.as_i64();
+
+    let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type());
+
+    if bit_size < 64 {
+        // make sure it doesn't overflow
+        assert!(v < (1<<(bit_size-1)) && v >= -(1<<(bit_size-1)));
+    }
+
+    C_integral(ccx.int_type(), v as u64, true)
+}
+
+pub fn C_uint<I: AsU64>(ccx: &CrateContext, i: I) -> ValueRef {
+    let v = i.as_u64();
+
+    let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type());
+
+    if bit_size < 64 {
+        // make sure it doesn't overflow
+        assert!(v < (1<<bit_size));
+    }
+
+    C_integral(ccx.int_type(), v, false)
+}
+
+pub trait AsI64 { fn as_i64(self) -> i64; }
+pub trait AsU64 { fn as_u64(self) -> u64; }
+
+// FIXME: remove the intptr conversions, because they
+// are host-architecture-dependent
+impl AsI64 for i64 { fn as_i64(self) -> i64 { self as i64 }}
+impl AsI64 for i32 { fn as_i64(self) -> i64 { self as i64 }}
+impl AsI64 for isize { fn as_i64(self) -> i64 { self as i64 }}
+
+impl AsU64 for u64  { fn as_u64(self) -> u64 { self as u64 }}
+impl AsU64 for u32  { fn as_u64(self) -> u64 { self as u64 }}
+impl AsU64 for usize { fn as_u64(self) -> u64 { self as u64 }}
+
+pub fn C_u8(ccx: &CrateContext, i: u8) -> ValueRef {
+    C_integral(Type::i8(ccx), i as u64, false)
+}
+
+
+// This is a 'c-like' raw string, which differs from
+// our boxed-and-length-annotated strings.
+pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef {
+    unsafe {
+        if let Some(&llval) = cx.const_cstr_cache().borrow().get(&s) {
+            return llval;
+        }
+
+        let sc = llvm::LLVMConstStringInContext(cx.llcx(),
+                                                s.as_ptr() as *const c_char,
+                                                s.len() as c_uint,
+                                                !null_terminated as Bool);
+
+        let gsym = token::gensym("str");
+        let sym = format!("str{}", gsym.0);
+        let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{
+            bug!("symbol `{}` is already defined", sym);
+        });
+        llvm::LLVMSetInitializer(g, sc);
+        llvm::LLVMSetGlobalConstant(g, True);
+        llvm::SetLinkage(g, llvm::InternalLinkage);
+
+        cx.const_cstr_cache().borrow_mut().insert(s, g);
+        g
+    }
+}
+
+// NB: Do not use `do_spill_noroot` to make this into a constant string, or
+// you will be kicked off fast isel. See issue #4352 for an example of this.
+pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
+    let len = s.len();
+    let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx));
+    C_named_struct(cx.tn().find_type("str_slice").unwrap(), &[cs, C_uint(cx, len)])
+}
+
+pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
+    C_struct_in_context(cx.llcx(), elts, packed)
+}
+
+pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef {
+    unsafe {
+        llvm::LLVMConstStructInContext(llcx,
+                                       elts.as_ptr(), elts.len() as c_uint,
+                                       packed as Bool)
+    }
+}
+
+pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef {
+    unsafe {
+        llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint)
+    }
+}
+
+pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef {
+    unsafe {
+        return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint);
+    }
+}
+
+pub fn C_vector(elts: &[ValueRef]) -> ValueRef {
+    unsafe {
+        return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
+    }
+}
+
+pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef {
+    C_bytes_in_context(cx.llcx(), bytes)
+}
+
+pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
+    unsafe {
+        let ptr = bytes.as_ptr() as *const c_char;
+        return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
+    }
+}
+
+pub fn const_get_elt(v: ValueRef, us: &[c_uint])
+              -> ValueRef {
+    unsafe {
+        let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
+
+        debug!("const_get_elt(v={:?}, us={:?}, r={:?})",
+               Value(v), us, Value(r));
+
+        r
+    }
+}
+
+pub fn const_to_int(v: ValueRef) -> i64 {
+    unsafe {
+        llvm::LLVMConstIntGetSExtValue(v)
+    }
+}
+
+pub fn const_to_uint(v: ValueRef) -> u64 {
+    unsafe {
+        llvm::LLVMConstIntGetZExtValue(v)
+    }
+}
+
+fn is_const_integral(v: ValueRef) -> bool {
+    unsafe {
+        !llvm::LLVMIsAConstantInt(v).is_null()
+    }
+}
+
+pub fn const_to_opt_int(v: ValueRef) -> Option<i64> {
+    unsafe {
+        if is_const_integral(v) {
+            Some(llvm::LLVMConstIntGetSExtValue(v))
+        } else {
+            None
+        }
+    }
+}
+
+pub fn const_to_opt_uint(v: ValueRef) -> Option<u64> {
+    unsafe {
+        if is_const_integral(v) {
+            Some(llvm::LLVMConstIntGetZExtValue(v))
+        } else {
+            None
+        }
+    }
+}
+
+pub fn is_undef(val: ValueRef) -> bool {
+    unsafe {
+        llvm::LLVMIsUndef(val) != False
+    }
+}
+
+#[allow(dead_code)] // potentially useful
+pub fn is_null(val: ValueRef) -> bool {
+    unsafe {
+        llvm::LLVMIsNull(val) != False
+    }
+}
+
+pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
+    bcx.fcx.monomorphize(&t)
+}
+
+pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> {
+    let tcx = bcx.tcx();
+    let t = tcx.node_id_to_type(id);
+    monomorphize_type(bcx, t)
+}
+
+pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
+    node_id_type(bcx, ex.id)
+}
+
+pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
+    monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex))
+}
+
+/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
+/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
+/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
+pub fn fulfill_obligation<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                    span: Span,
+                                    trait_ref: ty::PolyTraitRef<'tcx>)
+                                    -> traits::Vtable<'tcx, ()>
+{
+    let tcx = ccx.tcx();
+
+    // Remove any references to regions; this helps improve caching.
+    let trait_ref = tcx.erase_regions(&trait_ref);
+
+    // First check the cache.
+    match ccx.trait_cache().borrow().get(&trait_ref) {
+        Some(vtable) => {
+            info!("Cache hit: {:?}", trait_ref);
+            return (*vtable).clone();
+        }
+        None => { }
+    }
+
+    debug!("trans fulfill_obligation: trait_ref={:?} def_id={:?}",
+           trait_ref, trait_ref.def_id());
+
+
+    // Do the initial selection for the obligation. This yields the
+    // shallow result we are looking for -- that is, what specific impl.
+    let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any);
+    let mut selcx = SelectionContext::new(&infcx);
+
+    let obligation =
+        traits::Obligation::new(traits::ObligationCause::misc(span, ast::DUMMY_NODE_ID),
+                                trait_ref.to_poly_trait_predicate());
+    let selection = match selcx.select(&obligation) {
+        Ok(Some(selection)) => selection,
+        Ok(None) => {
+            // Ambiguity can happen when monomorphizing during trans
+            // expands to some humongo type that never occurred
+            // statically -- this humongo type can then overflow,
+            // leading to an ambiguous result. So report this as an
+            // overflow bug, since I believe this is the only case
+            // where ambiguity can result.
+            debug!("Encountered ambiguity selecting `{:?}` during trans, \
+                    presuming due to overflow",
+                   trait_ref);
+            ccx.sess().span_fatal(
+                span,
+                "reached the recursion limit during monomorphization (selection ambiguity)");
+        }
+        Err(e) => {
+            span_bug!(
+                span,
+                "Encountered error `{:?}` selecting `{:?}` during trans",
+                e,
+                trait_ref)
+        }
+    };
+
+    // Currently, we use a fulfillment context to completely resolve
+    // all nested obligations. This is because they can inform the
+    // inference of the impl's type parameters.
+    let mut fulfill_cx = traits::FulfillmentContext::new();
+    let vtable = selection.map(|predicate| {
+        fulfill_cx.register_predicate_obligation(&infcx, predicate);
+    });
+    let vtable = infer::drain_fulfillment_cx_or_panic(
+        span, &infcx, &mut fulfill_cx, &vtable
+    );
+
+    info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
+
+    ccx.trait_cache().borrow_mut().insert(trait_ref, vtable.clone());
+
+    vtable
+}
+
+/// Normalizes the predicates and checks whether they hold.  If this
+/// returns false, then either normalize encountered an error or one
+/// of the predicates did not hold. Used when creating vtables to
+/// check for unsatisfiable methods.
+pub fn normalize_and_test_predicates<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                               predicates: Vec<ty::Predicate<'tcx>>)
+                                               -> bool
+{
+    debug!("normalize_and_test_predicates(predicates={:?})",
+           predicates);
+
+    let tcx = ccx.tcx();
+    let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any);
+    let mut selcx = SelectionContext::new(&infcx);
+    let mut fulfill_cx = traits::FulfillmentContext::new();
+    let cause = traits::ObligationCause::dummy();
+    let traits::Normalized { value: predicates, obligations } =
+        traits::normalize(&mut selcx, cause.clone(), &predicates);
+    for obligation in obligations {
+        fulfill_cx.register_predicate_obligation(&infcx, obligation);
+    }
+    for predicate in predicates {
+        let obligation = traits::Obligation::new(cause.clone(), predicate);
+        fulfill_cx.register_predicate_obligation(&infcx, obligation);
+    }
+
+    infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()).is_ok()
+}
+
+pub fn langcall(bcx: Block,
+                span: Option<Span>,
+                msg: &str,
+                li: LangItem)
+                -> DefId {
+    match bcx.tcx().lang_items.require(li) {
+        Ok(id) => id,
+        Err(s) => {
+            let msg = format!("{} {}", msg, s);
+            match span {
+                Some(span) => bcx.tcx().sess.span_fatal(span, &msg[..]),
+                None => bcx.tcx().sess.fatal(&msg[..]),
+            }
+        }
+    }
+}
+
+/// Return the VariantDef corresponding to an inlined variant node
+pub fn inlined_variant_def<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                     inlined_vid: ast::NodeId)
+                                     -> ty::VariantDef<'tcx>
+{
+
+    let ctor_ty = ccx.tcx().node_id_to_type(inlined_vid);
+    debug!("inlined_variant_def: ctor_ty={:?} inlined_vid={:?}", ctor_ty,
+           inlined_vid);
+    let adt_def = match ctor_ty.sty {
+        ty::TyFnDef(_, _, &ty::BareFnTy { sig: ty::Binder(ty::FnSig {
+            output: ty::FnConverging(ty), ..
+        }), ..}) => ty,
+        _ => ctor_ty
+    }.ty_adt_def().unwrap();
+    let inlined_vid_def_id = ccx.tcx().map.local_def_id(inlined_vid);
+    adt_def.variants.iter().find(|v| {
+        inlined_vid_def_id == v.did ||
+            ccx.external().borrow().get(&v.did) == Some(&Some(inlined_vid))
+    }).unwrap_or_else(|| {
+        bug!("no variant for {:?}::{}", adt_def, inlined_vid)
+    })
+}
+
+// To avoid UB from LLVM, these two functions mask RHS with an
+// appropriate mask unconditionally (i.e. the fallback behavior for
+// all shifts). For 32- and 64-bit types, this matches the semantics
+// of Java. (See related discussion on #1877 and #10183.)
+
+pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                          lhs: ValueRef,
+                                          rhs: ValueRef,
+                                          binop_debug_loc: DebugLoc) -> ValueRef {
+    let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs);
+    // #1877, #10183: Ensure that input is always valid
+    let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
+    build::Shl(bcx, lhs, rhs, binop_debug_loc)
+}
+
+pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                          lhs_t: Ty<'tcx>,
+                                          lhs: ValueRef,
+                                          rhs: ValueRef,
+                                          binop_debug_loc: DebugLoc) -> ValueRef {
+    let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
+    // #1877, #10183: Ensure that input is always valid
+    let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
+    let is_signed = lhs_t.is_signed();
+    if is_signed {
+        build::AShr(bcx, lhs, rhs, binop_debug_loc)
+    } else {
+        build::LShr(bcx, lhs, rhs, binop_debug_loc)
+    }
+}
+
+fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              rhs: ValueRef,
+                              debug_loc: DebugLoc) -> ValueRef {
+    let rhs_llty = val_ty(rhs);
+    build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
+}
+
+pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              llty: Type,
+                              mask_llty: Type,
+                              invert: bool) -> ValueRef {
+    let kind = llty.kind();
+    match kind {
+        TypeKind::Integer => {
+            // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
+            let val = llty.int_width() - 1;
+            if invert {
+                C_integral(mask_llty, !val, true)
+            } else {
+                C_integral(mask_llty, val, false)
+            }
+        },
+        TypeKind::Vector => {
+            let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
+            build::VectorSplat(bcx, mask_llty.vector_length(), mask)
+        },
+        _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
+    }
+}
diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs
new file mode 100644 (file)
index 0000000..89f3b29
--- /dev/null
@@ -0,0 +1,1207 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+use llvm;
+use llvm::{ConstFCmp, ConstICmp, SetLinkage, SetUnnamedAddr};
+use llvm::{InternalLinkage, ValueRef, Bool, True};
+use middle::const_qualif::ConstQualif;
+use rustc_const_eval::{ConstEvalErr, lookup_const_fn_by_id, lookup_const_by_id, ErrKind};
+use rustc_const_eval::eval_repeat_count;
+use rustc::hir::def::Def;
+use rustc::hir::def_id::DefId;
+use rustc::hir::map as hir_map;
+use {abi, adt, closure, debuginfo, expr, machine};
+use base::{self, exported_name, imported_name, push_ctxt};
+use callee::Callee;
+use collector::{self, TransItem};
+use common::{type_is_sized, C_nil, const_get_elt};
+use common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty};
+use common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint};
+use common::{type_is_fat_ptr, Field, C_vector, C_array, C_null};
+use datum::{Datum, Lvalue};
+use declare;
+use monomorphize::{self, Instance};
+use type_::Type;
+use type_of;
+use value::Value;
+use Disr;
+use rustc::ty::subst::Substs;
+use rustc::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
+use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::cast::{CastTy,IntTy};
+use util::nodemap::NodeMap;
+use rustc_const_math::{ConstInt, ConstMathErr, ConstUsize, ConstIsize};
+
+use rustc::hir;
+
+use std::ffi::{CStr, CString};
+use std::borrow::Cow;
+use libc::c_uint;
+use syntax::ast::{self, LitKind};
+use syntax::attr::{self, AttrMetaMethods};
+use syntax::parse::token;
+use syntax::ptr::P;
+
+pub type FnArgMap<'a> = Option<&'a NodeMap<ValueRef>>;
+
+pub fn const_lit(cx: &CrateContext, e: &hir::Expr, lit: &ast::Lit)
+    -> ValueRef {
+    let _icx = push_ctxt("trans_lit");
+    debug!("const_lit: {:?}", lit);
+    match lit.node {
+        LitKind::Byte(b) => C_integral(Type::uint_from_ty(cx, ast::UintTy::U8), b as u64, false),
+        LitKind::Char(i) => C_integral(Type::char(cx), i as u64, false),
+        LitKind::Int(i, ast::LitIntType::Signed(t)) => {
+            C_integral(Type::int_from_ty(cx, t), i, true)
+        }
+        LitKind::Int(u, ast::LitIntType::Unsigned(t)) => {
+            C_integral(Type::uint_from_ty(cx, t), u, false)
+        }
+        LitKind::Int(i, ast::LitIntType::Unsuffixed) => {
+            let lit_int_ty = cx.tcx().node_id_to_type(e.id);
+            match lit_int_ty.sty {
+                ty::TyInt(t) => {
+                    C_integral(Type::int_from_ty(cx, t), i as u64, true)
+                }
+                ty::TyUint(t) => {
+                    C_integral(Type::uint_from_ty(cx, t), i as u64, false)
+                }
+                _ => span_bug!(lit.span,
+                        "integer literal has type {:?} (expected int \
+                         or usize)",
+                        lit_int_ty)
+            }
+        }
+        LitKind::Float(ref fs, t) => {
+            C_floating(&fs, Type::float_from_ty(cx, t))
+        }
+        LitKind::FloatUnsuffixed(ref fs) => {
+            let lit_float_ty = cx.tcx().node_id_to_type(e.id);
+            match lit_float_ty.sty {
+                ty::TyFloat(t) => {
+                    C_floating(&fs, Type::float_from_ty(cx, t))
+                }
+                _ => {
+                    span_bug!(lit.span,
+                        "floating point literal doesn't have the right type");
+                }
+            }
+        }
+        LitKind::Bool(b) => C_bool(cx, b),
+        LitKind::Str(ref s, _) => C_str_slice(cx, (*s).clone()),
+        LitKind::ByteStr(ref data) => {
+            addr_of(cx, C_bytes(cx, &data[..]), 1, "byte_str")
+        }
+    }
+}
+
+pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef {
+    unsafe {
+        llvm::LLVMConstPointerCast(val, ty.to_ref())
+    }
+}
+
+fn addr_of_mut(ccx: &CrateContext,
+               cv: ValueRef,
+               align: machine::llalign,
+               kind: &str)
+               -> ValueRef {
+    unsafe {
+        // FIXME: this totally needs a better name generation scheme, perhaps a simple global
+        // counter? Also most other uses of gensym in trans.
+        let gsym = token::gensym("_");
+        let name = format!("{}{}", kind, gsym.0);
+        let gv = declare::define_global(ccx, &name[..], val_ty(cv)).unwrap_or_else(||{
+            bug!("symbol `{}` is already defined", name);
+        });
+        llvm::LLVMSetInitializer(gv, cv);
+        llvm::LLVMSetAlignment(gv, align);
+        SetLinkage(gv, InternalLinkage);
+        SetUnnamedAddr(gv, true);
+        gv
+    }
+}
+
+pub fn addr_of(ccx: &CrateContext,
+               cv: ValueRef,
+               align: machine::llalign,
+               kind: &str)
+               -> ValueRef {
+    match ccx.const_globals().borrow().get(&cv) {
+        Some(&gv) => {
+            unsafe {
+                // Upgrade the alignment in cases where the same constant is used with different
+                // alignment requirements
+                if align > llvm::LLVMGetAlignment(gv) {
+                    llvm::LLVMSetAlignment(gv, align);
+                }
+            }
+            return gv;
+        }
+        None => {}
+    }
+    let gv = addr_of_mut(ccx, cv, align, kind);
+    unsafe {
+        llvm::LLVMSetGlobalConstant(gv, True);
+    }
+    ccx.const_globals().borrow_mut().insert(cv, gv);
+    gv
+}
+
+/// Deref a constant pointer
+fn load_const(cx: &CrateContext, v: ValueRef, t: Ty) -> ValueRef {
+    let v = match cx.const_unsized().borrow().get(&v) {
+        Some(&v) => v,
+        None => v
+    };
+    let d = unsafe { llvm::LLVMGetInitializer(v) };
+    if t.is_bool() {
+        unsafe { llvm::LLVMConstTrunc(d, Type::i1(cx).to_ref()) }
+    } else {
+        d
+    }
+}
+
+fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                         v: ValueRef,
+                         ty: Ty<'tcx>)
+                         -> (ValueRef, Ty<'tcx>) {
+    match ty.builtin_deref(true, ty::NoPreference) {
+        Some(mt) => {
+            if type_is_sized(cx.tcx(), mt.ty) {
+                (load_const(cx, v, mt.ty), mt.ty)
+            } else {
+                // Derefing a fat pointer does not change the representation,
+                // just the type to the unsized contents.
+                (v, mt.ty)
+            }
+        }
+        None => {
+            bug!("unexpected dereferenceable type {:?}", ty)
+        }
+    }
+}
+
+fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                           def_id: DefId,
+                           substs: Substs<'tcx>,
+                           arg_vals: &[ValueRef],
+                           param_substs: &'tcx Substs<'tcx>,
+                           trueconst: TrueConst) -> Result<ValueRef, ConstEvalFailure> {
+    let fn_like = lookup_const_fn_by_id(ccx.tcx(), def_id);
+    let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call");
+
+    let body = match fn_like.body().expr {
+        Some(ref expr) => expr,
+        None => return Ok(C_nil(ccx))
+    };
+
+    let args = &fn_like.decl().inputs;
+    assert_eq!(args.len(), arg_vals.len());
+
+    let arg_ids = args.iter().map(|arg| arg.pat.id);
+    let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect();
+
+    let substs = monomorphize::apply_param_substs(ccx.tcx(),
+                                                  param_substs,
+                                                  &substs.erase_regions());
+    let substs = ccx.tcx().mk_substs(substs);
+
+    const_expr(ccx, body, substs, Some(&fn_args), trueconst).map(|(res, _)| res)
+}
+
+pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                def_id: DefId,
+                                ref_expr: &hir::Expr,
+                                param_substs: &'tcx Substs<'tcx>)
+                                -> &'tcx hir::Expr {
+    let substs = ccx.tcx().node_id_item_substs(ref_expr.id).substs;
+    let substs = monomorphize::apply_param_substs(ccx.tcx(),
+                                                  param_substs,
+                                                  &substs.erase_regions());
+    match lookup_const_by_id(ccx.tcx(), def_id, Some(substs)) {
+        Some((ref expr, _ty)) => expr,
+        None => {
+            span_bug!(ref_expr.span, "constant item not found")
+        }
+    }
+}
+
+pub enum ConstEvalFailure {
+    /// in case the const evaluator failed on something that panic at runtime
+    /// as defined in RFC 1229
+    Runtime(ConstEvalErr),
+    // in case we found a true constant
+    Compiletime(ConstEvalErr),
+}
+
+impl ConstEvalFailure {
+    fn into_inner(self) -> ConstEvalErr {
+        match self {
+            Runtime(e) => e,
+            Compiletime(e) => e,
+        }
+    }
+    pub fn description(&self) -> Cow<str> {
+        match self {
+            &Runtime(ref e) => e.description(),
+            &Compiletime(ref e) => e.description(),
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum TrueConst {
+    Yes, No
+}
+
+use self::ConstEvalFailure::*;
+
+fn get_const_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                           def_id: DefId,
+                           ref_expr: &hir::Expr,
+                           param_substs: &'tcx Substs<'tcx>)
+                           -> Result<ValueRef, ConstEvalFailure> {
+    let expr = get_const_expr(ccx, def_id, ref_expr, param_substs);
+    let empty_substs = ccx.tcx().mk_substs(Substs::empty());
+    match get_const_expr_as_global(ccx, expr, ConstQualif::empty(), empty_substs, TrueConst::Yes) {
+        Err(Runtime(err)) => {
+            ccx.tcx().sess.span_err(expr.span, &err.description());
+            Err(Compiletime(err))
+        },
+        other => other,
+    }
+}
+
+pub fn get_const_expr_as_global<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                          expr: &hir::Expr,
+                                          qualif: ConstQualif,
+                                          param_substs: &'tcx Substs<'tcx>,
+                                          trueconst: TrueConst)
+                                          -> Result<ValueRef, ConstEvalFailure> {
+    debug!("get_const_expr_as_global: {:?}", expr.id);
+    // Special-case constants to cache a common global for all uses.
+    if let hir::ExprPath(..) = expr.node {
+        // `def` must be its own statement and cannot be in the `match`
+        // otherwise the `def_map` will be borrowed for the entire match instead
+        // of just to get the `def` value
+        let def = ccx.tcx().def_map.borrow().get(&expr.id).unwrap().full_def();
+        match def {
+            Def::Const(def_id) | Def::AssociatedConst(def_id) => {
+                if !ccx.tcx().tables.borrow().adjustments.contains_key(&expr.id) {
+                    debug!("get_const_expr_as_global ({:?}): found const {:?}",
+                           expr.id, def_id);
+                    return get_const_val(ccx, def_id, expr, param_substs);
+                }
+            },
+            _ => {},
+        }
+    }
+
+    let key = (expr.id, param_substs);
+    if let Some(&val) = ccx.const_values().borrow().get(&key) {
+        return Ok(val);
+    }
+    let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs,
+                                              &ccx.tcx().expr_ty(expr));
+    let val = if qualif.intersects(ConstQualif::NON_STATIC_BORROWS) {
+        // Avoid autorefs as they would create global instead of stack
+        // references, even when only the latter are correct.
+        const_expr_unadjusted(ccx, expr, ty, param_substs, None, trueconst)?
+    } else {
+        const_expr(ccx, expr, param_substs, None, trueconst)?.0
+    };
+
+    // boolean SSA values are i1, but they have to be stored in i8 slots,
+    // otherwise some LLVM optimization passes don't work as expected
+    let val = unsafe {
+        if llvm::LLVMTypeOf(val) == Type::i1(ccx).to_ref() {
+            llvm::LLVMConstZExt(val, Type::i8(ccx).to_ref())
+        } else {
+            val
+        }
+    };
+
+    let lvalue = addr_of(ccx, val, type_of::align_of(ccx, ty), "const");
+    ccx.const_values().borrow_mut().insert(key, lvalue);
+    Ok(lvalue)
+}
+
+pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                            e: &hir::Expr,
+                            param_substs: &'tcx Substs<'tcx>,
+                            fn_args: FnArgMap,
+                            trueconst: TrueConst)
+                            -> Result<(ValueRef, Ty<'tcx>), ConstEvalFailure> {
+    let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs,
+                                               &cx.tcx().expr_ty(e));
+    let llconst = const_expr_unadjusted(cx, e, ety, param_substs, fn_args, trueconst)?;
+    let mut llconst = llconst;
+    let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs,
+                                                            &cx.tcx().expr_ty_adjusted(e));
+    let opt_adj = cx.tcx().tables.borrow().adjustments.get(&e.id).cloned();
+    match opt_adj {
+        Some(AdjustReifyFnPointer) => {
+            match ety.sty {
+                ty::TyFnDef(def_id, substs, _) => {
+                    llconst = Callee::def(cx, def_id, substs).reify(cx).val;
+                }
+                _ => {
+                    bug!("{} cannot be reified to a fn ptr", ety)
+                }
+            }
+        }
+        Some(AdjustUnsafeFnPointer) | Some(AdjustMutToConstPointer) => {
+            // purely a type-level thing
+        }
+        Some(AdjustDerefRef(adj)) => {
+            let mut ty = ety;
+            // Save the last autoderef in case we can avoid it.
+            if adj.autoderefs > 0 {
+                for _ in 0..adj.autoderefs-1 {
+                    let (dv, dt) = const_deref(cx, llconst, ty);
+                    llconst = dv;
+                    ty = dt;
+                }
+            }
+
+            if adj.autoref.is_some() {
+                if adj.autoderefs == 0 {
+                    // Don't copy data to do a deref+ref
+                    // (i.e., skip the last auto-deref).
+                    llconst = addr_of(cx, llconst, type_of::align_of(cx, ty), "autoref");
+                    ty = cx.tcx().mk_imm_ref(cx.tcx().mk_region(ty::ReStatic), ty);
+                }
+            } else if adj.autoderefs > 0 {
+                let (dv, dt) = const_deref(cx, llconst, ty);
+                llconst = dv;
+
+                // If we derefed a fat pointer then we will have an
+                // open type here. So we need to update the type with
+                // the one returned from const_deref.
+                ety_adjusted = dt;
+            }
+
+            if let Some(target) = adj.unsize {
+                let target = monomorphize::apply_param_substs(cx.tcx(),
+                                                              param_substs,
+                                                              &target);
+
+                let pointee_ty = ty.builtin_deref(true, ty::NoPreference)
+                    .expect("consts: unsizing got non-pointer type").ty;
+                let (base, old_info) = if !type_is_sized(cx.tcx(), pointee_ty) {
+                    // Normally, the source is a thin pointer and we are
+                    // adding extra info to make a fat pointer. The exception
+                    // is when we are upcasting an existing object fat pointer
+                    // to use a different vtable. In that case, we want to
+                    // load out the original data pointer so we can repackage
+                    // it.
+                    (const_get_elt(llconst, &[abi::FAT_PTR_ADDR as u32]),
+                     Some(const_get_elt(llconst, &[abi::FAT_PTR_EXTRA as u32])))
+                } else {
+                    (llconst, None)
+                };
+
+                let unsized_ty = target.builtin_deref(true, ty::NoPreference)
+                    .expect("consts: unsizing got non-pointer target type").ty;
+                let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to();
+                let base = ptrcast(base, ptr_ty);
+                let info = base::unsized_info(cx, pointee_ty, unsized_ty, old_info);
+
+                if old_info.is_none() {
+                    let prev_const = cx.const_unsized().borrow_mut()
+                                       .insert(base, llconst);
+                    assert!(prev_const.is_none() || prev_const == Some(llconst));
+                }
+                assert_eq!(abi::FAT_PTR_ADDR, 0);
+                assert_eq!(abi::FAT_PTR_EXTRA, 1);
+                llconst = C_struct(cx, &[base, info], false);
+            }
+        }
+        None => {}
+    };
+
+    let llty = type_of::sizing_type_of(cx, ety_adjusted);
+    let csize = machine::llsize_of_alloc(cx, val_ty(llconst));
+    let tsize = machine::llsize_of_alloc(cx, llty);
+    if csize != tsize {
+        cx.sess().abort_if_errors();
+        unsafe {
+            // FIXME these values could use some context
+            llvm::LLVMDumpValue(llconst);
+            llvm::LLVMDumpValue(C_undef(llty));
+        }
+        bug!("const {:?} of type {:?} has size {} instead of {}",
+             e, ety_adjusted,
+             csize, tsize);
+    }
+    Ok((llconst, ety_adjusted))
+}
+
+fn check_unary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty,
+                             te: ValueRef, trueconst: TrueConst) -> Result<(), ConstEvalFailure> {
+    // The only kind of unary expression that we check for validity
+    // here is `-expr`, to check if it "overflows" (e.g. `-i32::MIN`).
+    if let hir::ExprUnary(hir::UnNeg, ref inner_e) = e.node {
+
+        // An unfortunate special case: we parse e.g. -128 as a
+        // negation of the literal 128, which means if we're expecting
+        // a i8 (or if it was already suffixed, e.g. `-128_i8`), then
+        // 128 will have already overflowed to -128, and so then the
+        // constant evaluator thinks we're trying to negate -128.
+        //
+        // Catch this up front by looking for ExprLit directly,
+        // and just accepting it.
+        if let hir::ExprLit(_) = inner_e.node { return Ok(()); }
+        let cval = match to_const_int(te, t, cx.tcx()) {
+            Some(v) => v,
+            None => return Ok(()),
+        };
+        match -cval {
+            Ok(_) => return Ok(()),
+            Err(err) => const_err(cx, e, Err(err), trueconst),
+        }
+    } else {
+        Ok(())
+    }
+}
+
+fn to_const_int(value: ValueRef, t: Ty, tcx: &TyCtxt) -> Option<ConstInt> {
+    match t.sty {
+        ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type {
+            ast::IntTy::I8 => {
+                assert_eq!(input as i8 as i64, input);
+                Some(ConstInt::I8(input as i8))
+            },
+            ast::IntTy::I16 => {
+                assert_eq!(input as i16 as i64, input);
+                Some(ConstInt::I16(input as i16))
+            },
+            ast::IntTy::I32 => {
+                assert_eq!(input as i32 as i64, input);
+                Some(ConstInt::I32(input as i32))
+            },
+            ast::IntTy::I64 => {
+                Some(ConstInt::I64(input))
+            },
+            ast::IntTy::Is => {
+                ConstIsize::new(input, tcx.sess.target.int_type)
+                    .ok().map(ConstInt::Isize)
+            },
+        }),
+        ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type {
+            ast::UintTy::U8 => {
+                assert_eq!(input as u8 as u64, input);
+                Some(ConstInt::U8(input as u8))
+            },
+            ast::UintTy::U16 => {
+                assert_eq!(input as u16 as u64, input);
+                Some(ConstInt::U16(input as u16))
+            },
+            ast::UintTy::U32 => {
+                assert_eq!(input as u32 as u64, input);
+                Some(ConstInt::U32(input as u32))
+            },
+            ast::UintTy::U64 => {
+                Some(ConstInt::U64(input))
+            },
+            ast::UintTy::Us => {
+                ConstUsize::new(input, tcx.sess.target.uint_type)
+                    .ok().map(ConstInt::Usize)
+            },
+        }),
+        _ => None,
+    }
+}
+
+fn const_err(cx: &CrateContext,
+             e: &hir::Expr,
+             result: Result<ConstInt, ConstMathErr>,
+             trueconst: TrueConst)
+             -> Result<(), ConstEvalFailure> {
+    match (result, trueconst) {
+        (Ok(_), _) => {
+            // We do not actually care about a successful result.
+            Ok(())
+        },
+        (Err(err), TrueConst::Yes) => {
+            let err = ConstEvalErr{ span: e.span, kind: ErrKind::Math(err) };
+            cx.tcx().sess.span_err(e.span, &err.description());
+            Err(Compiletime(err))
+        },
+        (Err(err), TrueConst::No) => {
+            let err = ConstEvalErr{ span: e.span, kind: ErrKind::Math(err) };
+            cx.tcx().sess.span_warn(e.span, &err.description());
+            Err(Runtime(err))
+        },
+    }
+}
+
+fn check_binary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty,
+                              te1: ValueRef, te2: ValueRef,
+                              trueconst: TrueConst) -> Result<(), ConstEvalFailure> {
+    let b = if let hir::ExprBinary(b, _, _) = e.node { b } else { bug!() };
+    let (lhs, rhs) = match (to_const_int(te1, t, cx.tcx()), to_const_int(te2, t, cx.tcx())) {
+        (Some(v1), Some(v2)) => (v1, v2),
+        _ => return Ok(()),
+    };
+    let result = match b.node {
+        hir::BiAdd => lhs + rhs,
+        hir::BiSub => lhs - rhs,
+        hir::BiMul => lhs * rhs,
+        hir::BiDiv => lhs / rhs,
+        hir::BiRem => lhs % rhs,
+        hir::BiShl => lhs << rhs,
+        hir::BiShr => lhs >> rhs,
+        _ => return Ok(()),
+    };
+    const_err(cx, e, result, trueconst)
+}
+
+fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                   e: &hir::Expr,
+                                   ety: Ty<'tcx>,
+                                   param_substs: &'tcx Substs<'tcx>,
+                                   fn_args: FnArgMap,
+                                   trueconst: TrueConst)
+                                   -> Result<ValueRef, ConstEvalFailure>
+{
+    debug!("const_expr_unadjusted(e={:?}, ety={:?}, param_substs={:?})",
+           e,
+           ety,
+           param_substs);
+
+    let map_list = |exprs: &[P<hir::Expr>]| -> Result<Vec<ValueRef>, ConstEvalFailure> {
+        exprs.iter()
+             .map(|e| const_expr(cx, &e, param_substs, fn_args, trueconst).map(|(l, _)| l))
+             .collect::<Vec<Result<ValueRef, ConstEvalFailure>>>()
+             .into_iter()
+             .collect()
+         // this dance is necessary to eagerly run const_expr so all errors are reported
+    };
+    let _icx = push_ctxt("const_expr");
+    Ok(match e.node {
+        hir::ExprLit(ref lit) => const_lit(cx, e, &lit),
+        hir::ExprBinary(b, ref e1, ref e2) => {
+            /* Neither type is bottom, and we expect them to be unified
+             * already, so the following is safe. */
+            let (te1, ty) = const_expr(cx, &e1, param_substs, fn_args, trueconst)?;
+            debug!("const_expr_unadjusted: te1={:?}, ty={:?}",
+                   Value(te1), ty);
+            assert!(!ty.is_simd());
+            let is_float = ty.is_fp();
+            let signed = ty.is_signed();
+
+            let (te2, ty2) = const_expr(cx, &e2, param_substs, fn_args, trueconst)?;
+            debug!("const_expr_unadjusted: te2={:?}, ty={:?}",
+                   Value(te2), ty2);
+
+            check_binary_expr_validity(cx, e, ty, te1, te2, trueconst)?;
+
+            unsafe { match b.node {
+                hir::BiAdd if is_float => llvm::LLVMConstFAdd(te1, te2),
+                hir::BiAdd             => llvm::LLVMConstAdd(te1, te2),
+
+                hir::BiSub if is_float => llvm::LLVMConstFSub(te1, te2),
+                hir::BiSub             => llvm::LLVMConstSub(te1, te2),
+
+                hir::BiMul if is_float => llvm::LLVMConstFMul(te1, te2),
+                hir::BiMul             => llvm::LLVMConstMul(te1, te2),
+
+                hir::BiDiv if is_float => llvm::LLVMConstFDiv(te1, te2),
+                hir::BiDiv if signed   => llvm::LLVMConstSDiv(te1, te2),
+                hir::BiDiv             => llvm::LLVMConstUDiv(te1, te2),
+
+                hir::BiRem if is_float => llvm::LLVMConstFRem(te1, te2),
+                hir::BiRem if signed   => llvm::LLVMConstSRem(te1, te2),
+                hir::BiRem             => llvm::LLVMConstURem(te1, te2),
+
+                hir::BiAnd    => llvm::LLVMConstAnd(te1, te2),
+                hir::BiOr     => llvm::LLVMConstOr(te1, te2),
+                hir::BiBitXor => llvm::LLVMConstXor(te1, te2),
+                hir::BiBitAnd => llvm::LLVMConstAnd(te1, te2),
+                hir::BiBitOr  => llvm::LLVMConstOr(te1, te2),
+                hir::BiShl    => {
+                    let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
+                    llvm::LLVMConstShl(te1, te2)
+                },
+                hir::BiShr    => {
+                    let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
+                    if signed { llvm::LLVMConstAShr(te1, te2) }
+                    else      { llvm::LLVMConstLShr(te1, te2) }
+                },
+                hir::BiEq | hir::BiNe | hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe => {
+                    if is_float {
+                        let cmp = base::bin_op_to_fcmp_predicate(b.node);
+                        ConstFCmp(cmp, te1, te2)
+                    } else {
+                        let cmp = base::bin_op_to_icmp_predicate(b.node, signed);
+                        ConstICmp(cmp, te1, te2)
+                    }
+                },
+            } } // unsafe { match b.node {
+        },
+        hir::ExprUnary(u, ref inner_e) => {
+            let (te, ty) = const_expr(cx, &inner_e, param_substs, fn_args, trueconst)?;
+
+            check_unary_expr_validity(cx, e, ty, te, trueconst)?;
+
+            let is_float = ty.is_fp();
+            unsafe { match u {
+                hir::UnDeref           => const_deref(cx, te, ty).0,
+                hir::UnNot             => llvm::LLVMConstNot(te),
+                hir::UnNeg if is_float => llvm::LLVMConstFNeg(te),
+                hir::UnNeg             => llvm::LLVMConstNeg(te),
+            } }
+        },
+        hir::ExprField(ref base, field) => {
+            let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
+            let brepr = adt::represent_type(cx, bt);
+            let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None);
+            let ix = vinfo.field_index(field.node);
+            adt::const_get_field(&brepr, bv, vinfo.discr, ix)
+        },
+        hir::ExprTupField(ref base, idx) => {
+            let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
+            let brepr = adt::represent_type(cx, bt);
+            let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None);
+            adt::const_get_field(&brepr, bv, vinfo.discr, idx.node)
+        },
+        hir::ExprIndex(ref base, ref index) => {
+            let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
+            let iv = const_expr(cx, &index, param_substs, fn_args, TrueConst::Yes)?.0;
+            let iv = if let Some(iv) = const_to_opt_uint(iv) {
+                iv
+            } else {
+                span_bug!(index.span, "index is not an integer-constant expression");
+            };
+            let (arr, len) = match bt.sty {
+                ty::TyArray(_, u) => (bv, C_uint(cx, u)),
+                ty::TySlice(..) | ty::TyStr => {
+                    let e1 = const_get_elt(bv, &[0]);
+                    (load_const(cx, e1, bt), const_get_elt(bv, &[1]))
+                },
+                ty::TyRef(_, mt) => match mt.ty.sty {
+                    ty::TyArray(_, u) => {
+                        (load_const(cx, bv, mt.ty), C_uint(cx, u))
+                    },
+                    _ => span_bug!(base.span,
+                                   "index-expr base must be a vector \
+                                    or string type, found {:?}",
+                                   bt),
+                },
+                _ => span_bug!(base.span,
+                               "index-expr base must be a vector \
+                                or string type, found {:?}",
+                               bt),
+            };
+
+            let len = unsafe { llvm::LLVMConstIntGetZExtValue(len) as u64 };
+            let len = match bt.sty {
+                ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => match ty.sty {
+                    ty::TyStr => {
+                        assert!(len > 0);
+                        len - 1
+                    },
+                    _ => len,
+                },
+                _ => len,
+            };
+            if iv >= len {
+                // FIXME #3170: report this earlier on in the const-eval
+                // pass. Reporting here is a bit late.
+                span_err!(cx.sess(), e.span, E0515,
+                          "const index-expr is out of bounds");
+                C_undef(val_ty(arr).element_type())
+            } else {
+                const_get_elt(arr, &[iv as c_uint])
+            }
+        },
+        hir::ExprCast(ref base, _) => {
+            let t_cast = ety;
+            let llty = type_of::type_of(cx, t_cast);
+            let (v, t_expr) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
+            debug!("trans_const_cast({:?} as {:?})", t_expr, t_cast);
+            if expr::cast_is_noop(cx.tcx(), base, t_expr, t_cast) {
+                return Ok(v);
+            }
+            if type_is_fat_ptr(cx.tcx(), t_expr) {
+                // Fat pointer casts.
+                let t_cast_inner =
+                    t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty;
+                let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to();
+                let addr = ptrcast(const_get_elt(v, &[abi::FAT_PTR_ADDR as u32]),
+                                   ptr_ty);
+                if type_is_fat_ptr(cx.tcx(), t_cast) {
+                    let info = const_get_elt(v, &[abi::FAT_PTR_EXTRA as u32]);
+                    return Ok(C_struct(cx, &[addr, info], false))
+                } else {
+                    return Ok(addr);
+                }
+            }
+            unsafe { match (
+                CastTy::from_ty(t_expr).expect("bad input type for cast"),
+                CastTy::from_ty(t_cast).expect("bad output type for cast"),
+            ) {
+                (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => {
+                    let repr = adt::represent_type(cx, t_expr);
+                    let discr = adt::const_get_discrim(&repr, v);
+                    let iv = C_integral(cx.int_type(), discr.0, false);
+                    let s = adt::is_discr_signed(&repr) as Bool;
+                    llvm::LLVMConstIntCast(iv, llty.to_ref(), s)
+                },
+                (CastTy::Int(_), CastTy::Int(_)) => {
+                    let s = t_expr.is_signed() as Bool;
+                    llvm::LLVMConstIntCast(v, llty.to_ref(), s)
+                },
+                (CastTy::Int(_), CastTy::Float) => {
+                    if t_expr.is_signed() {
+                        llvm::LLVMConstSIToFP(v, llty.to_ref())
+                    } else {
+                        llvm::LLVMConstUIToFP(v, llty.to_ref())
+                    }
+                },
+                (CastTy::Float, CastTy::Float) => llvm::LLVMConstFPCast(v, llty.to_ref()),
+                (CastTy::Float, CastTy::Int(IntTy::I)) => llvm::LLVMConstFPToSI(v, llty.to_ref()),
+                (CastTy::Float, CastTy::Int(_)) => llvm::LLVMConstFPToUI(v, llty.to_ref()),
+                (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_))
+                | (CastTy::RPtr(_), CastTy::Ptr(_)) => {
+                    ptrcast(v, llty)
+                },
+                (CastTy::FnPtr, CastTy::FnPtr) => ptrcast(v, llty), // isn't this a coercion?
+                (CastTy::Int(_), CastTy::Ptr(_)) => llvm::LLVMConstIntToPtr(v, llty.to_ref()),
+                (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => {
+                  llvm::LLVMConstPtrToInt(v, llty.to_ref())
+                },
+                _ => {
+                  span_bug!(e.span, "bad combination of types for cast")
+                },
+            } } // unsafe { match ( ... ) {
+        },
+        hir::ExprAddrOf(hir::MutImmutable, ref sub) => {
+            // If this is the address of some static, then we need to return
+            // the actual address of the static itself (short circuit the rest
+            // of const eval).
+            let mut cur = sub;
+            loop {
+                match cur.node {
+                    hir::ExprBlock(ref blk) => {
+                        if let Some(ref sub) = blk.expr {
+                            cur = sub;
+                        } else {
+                            break;
+                        }
+                    },
+                    _ => break,
+                }
+            }
+            let opt_def = cx.tcx().def_map.borrow().get(&cur.id).map(|d| d.full_def());
+            if let Some(Def::Static(def_id, _)) = opt_def {
+                get_static(cx, def_id).val
+            } else {
+                // If this isn't the address of a static, then keep going through
+                // normal constant evaluation.
+                let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?;
+                addr_of(cx, v, type_of::align_of(cx, ty), "ref")
+            }
+        },
+        hir::ExprAddrOf(hir::MutMutable, ref sub) => {
+            let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?;
+            addr_of_mut(cx, v, type_of::align_of(cx, ty), "ref_mut_slice")
+        },
+        hir::ExprTup(ref es) => {
+            let repr = adt::represent_type(cx, ety);
+            let vals = map_list(&es[..])?;
+            adt::trans_const(cx, &repr, Disr(0), &vals[..])
+        },
+        hir::ExprStruct(_, ref fs, ref base_opt) => {
+            let repr = adt::represent_type(cx, ety);
+
+            let base_val = match *base_opt {
+                Some(ref base) => Some(const_expr(
+                    cx,
+                    &base,
+                    param_substs,
+                    fn_args,
+                    trueconst,
+                )?),
+                None => None
+            };
+
+            let VariantInfo { discr, fields } = VariantInfo::of_node(cx.tcx(), ety, e.id);
+            let cs = fields.iter().enumerate().map(|(ix, &Field(f_name, _))| {
+                match (fs.iter().find(|f| f_name == f.name.node), base_val) {
+                    (Some(ref f), _) => {
+                        const_expr(cx, &f.expr, param_substs, fn_args, trueconst).map(|(l, _)| l)
+                    },
+                    (_, Some((bv, _))) => Ok(adt::const_get_field(&repr, bv, discr, ix)),
+                    (_, None) => span_bug!(e.span, "missing struct field"),
+                }
+            })
+            .collect::<Vec<Result<_, ConstEvalFailure>>>()
+            .into_iter()
+            .collect::<Result<Vec<_>,ConstEvalFailure>>();
+            let cs = cs?;
+            if ety.is_simd() {
+                C_vector(&cs[..])
+            } else {
+                adt::trans_const(cx, &repr, discr, &cs[..])
+            }
+        },
+        hir::ExprVec(ref es) => {
+            let unit_ty = ety.sequence_element_type(cx.tcx());
+            let llunitty = type_of::type_of(cx, unit_ty);
+            let vs = es.iter()
+                       .map(|e| const_expr(
+                           cx,
+                           &e,
+                           param_substs,
+                           fn_args,
+                           trueconst,
+                       ).map(|(l, _)| l))
+                       .collect::<Vec<Result<_, ConstEvalFailure>>>()
+                       .into_iter()
+                       .collect::<Result<Vec<_>, ConstEvalFailure>>();
+            let vs = vs?;
+            // If the vector contains enums, an LLVM array won't work.
+            if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
+                C_struct(cx, &vs[..], false)
+            } else {
+                C_array(llunitty, &vs[..])
+            }
+        },
+        hir::ExprRepeat(ref elem, ref count) => {
+            let unit_ty = ety.sequence_element_type(cx.tcx());
+            let llunitty = type_of::type_of(cx, unit_ty);
+            let n = eval_repeat_count(cx.tcx(), count);
+            let unit_val = const_expr(cx, &elem, param_substs, fn_args, trueconst)?.0;
+            let vs = vec![unit_val; n];
+            if val_ty(unit_val) != llunitty {
+                C_struct(cx, &vs[..], false)
+            } else {
+                C_array(llunitty, &vs[..])
+            }
+        },
+        hir::ExprPath(..) => {
+            let def = cx.tcx().def_map.borrow().get(&e.id).unwrap().full_def();
+            match def {
+                Def::Local(_, id) => {
+                    if let Some(val) = fn_args.and_then(|args| args.get(&id).cloned()) {
+                        val
+                    } else {
+                        span_bug!(e.span, "const fn argument not found")
+                    }
+                }
+                Def::Fn(..) | Def::Method(..) => C_nil(cx),
+                Def::Const(def_id) | Def::AssociatedConst(def_id) => {
+                    load_const(cx, get_const_val(cx, def_id, e, param_substs)?,
+                               ety)
+                }
+                Def::Variant(enum_did, variant_did) => {
+                    let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did);
+                    match vinfo.kind() {
+                        ty::VariantKind::Unit => {
+                            let repr = adt::represent_type(cx, ety);
+                            adt::trans_const(cx, &repr, Disr::from(vinfo.disr_val), &[])
+                        }
+                        ty::VariantKind::Tuple => C_nil(cx),
+                        ty::VariantKind::Struct => {
+                            span_bug!(e.span, "path-expr refers to a dict variant!")
+                        }
+                    }
+                }
+                // Unit struct or ctor.
+                Def::Struct(..) => C_null(type_of::type_of(cx, ety)),
+                _ => {
+                    span_bug!(e.span, "expected a const, fn, struct, \
+                                       or variant def")
+                }
+            }
+        },
+        hir::ExprCall(ref callee, ref args) => {
+            let mut callee = &**callee;
+            loop {
+                callee = match callee.node {
+                    hir::ExprBlock(ref block) => match block.expr {
+                        Some(ref tail) => &tail,
+                        None => break,
+                    },
+                    _ => break,
+                };
+            }
+            let def = cx.tcx().def_map.borrow()[&callee.id].full_def();
+            let arg_vals = map_list(args)?;
+            match def {
+                Def::Fn(did) | Def::Method(did) => {
+                    const_fn_call(
+                        cx,
+                        did,
+                        cx.tcx().node_id_item_substs(callee.id).substs,
+                        &arg_vals,
+                        param_substs,
+                        trueconst,
+                    )?
+                }
+                Def::Struct(..) => {
+                    if ety.is_simd() {
+                        C_vector(&arg_vals[..])
+                    } else {
+                        let repr = adt::represent_type(cx, ety);
+                        adt::trans_const(cx, &repr, Disr(0), &arg_vals[..])
+                    }
+                }
+                Def::Variant(enum_did, variant_did) => {
+                    let repr = adt::represent_type(cx, ety);
+                    let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did);
+                    adt::trans_const(cx,
+                                     &repr,
+                                     Disr::from(vinfo.disr_val),
+                                     &arg_vals[..])
+                }
+                _ => span_bug!(e.span, "expected a struct, variant, or const fn def"),
+            }
+        },
+        hir::ExprMethodCall(_, _, ref args) => {
+            let arg_vals = map_list(args)?;
+            let method_call = ty::MethodCall::expr(e.id);
+            let method = cx.tcx().tables.borrow().method_map[&method_call];
+            const_fn_call(cx, method.def_id, method.substs.clone(),
+                          &arg_vals, param_substs, trueconst)?
+        },
+        hir::ExprType(ref e, _) => const_expr(cx, &e, param_substs, fn_args, trueconst)?.0,
+        hir::ExprBlock(ref block) => {
+            match block.expr {
+                Some(ref expr) => const_expr(
+                    cx,
+                    &expr,
+                    param_substs,
+                    fn_args,
+                    trueconst,
+                )?.0,
+                None => C_nil(cx),
+            }
+        },
+        hir::ExprClosure(_, ref decl, ref body) => {
+            match ety.sty {
+                ty::TyClosure(def_id, ref substs) => {
+                    closure::trans_closure_expr(closure::Dest::Ignore(cx),
+                                                decl,
+                                                body,
+                                                e.id,
+                                                def_id,
+                                                substs);
+                }
+                _ =>
+                    span_bug!(
+                        e.span,
+                        "bad type for closure expr: {:?}", ety)
+            }
+            C_null(type_of::type_of(cx, ety))
+        },
+        _ => span_bug!(e.span,
+                       "bad constant expression type in consts::const_expr"),
+    })
+}
+
+pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId)
+                            -> Datum<'tcx, Lvalue> {
+    let ty = ccx.tcx().lookup_item_type(def_id).ty;
+
+    let instance = Instance::mono(ccx.tcx(), def_id);
+    if let Some(&g) = ccx.instances().borrow().get(&instance) {
+        return Datum::new(g, ty, Lvalue::new("static"));
+    }
+
+    let g = if let Some(id) = ccx.tcx().map.as_local_node_id(def_id) {
+        let llty = type_of::type_of(ccx, ty);
+        match ccx.tcx().map.get(id) {
+            hir_map::NodeItem(&hir::Item {
+                ref attrs, span, node: hir::ItemStatic(..), ..
+            }) => {
+                // If this static came from an external crate, then
+                // we need to get the symbol from metadata instead of
+                // using the current crate's name/version
+                // information in the hash of the symbol
+                let sym = exported_name(ccx, instance, attrs);
+                debug!("making {}", sym);
+
+                // Create the global before evaluating the initializer;
+                // this is necessary to allow recursive statics.
+                let g = declare::define_global(ccx, &sym, llty).unwrap_or_else(|| {
+                    ccx.sess().span_fatal(span,
+                        &format!("symbol `{}` is already defined", sym))
+                });
+
+                ccx.item_symbols().borrow_mut().insert(id, sym);
+                g
+            }
+
+            hir_map::NodeForeignItem(&hir::ForeignItem {
+                ref attrs, name, span, node: hir::ForeignItemStatic(..), ..
+            }) => {
+                let ident = imported_name(name, attrs);
+                let g = if let Some(name) =
+                        attr::first_attr_value_str_by_name(&attrs, "linkage") {
+                    // If this is a static with a linkage specified, then we need to handle
+                    // it a little specially. The typesystem prevents things like &T and
+                    // extern "C" fn() from being non-null, so we can't just declare a
+                    // static and call it a day. Some linkages (like weak) will make it such
+                    // that the static actually has a null value.
+                    let linkage = match base::llvm_linkage_by_name(&name) {
+                        Some(linkage) => linkage,
+                        None => {
+                            ccx.sess().span_fatal(span, "invalid linkage specified");
+                        }
+                    };
+                    let llty2 = match ty.sty {
+                        ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty),
+                        _ => {
+                            ccx.sess().span_fatal(span, "must have type `*const T` or `*mut T`");
+                        }
+                    };
+                    unsafe {
+                        // Declare a symbol `foo` with the desired linkage.
+                        let g1 = declare::declare_global(ccx, &ident, llty2);
+                        llvm::SetLinkage(g1, linkage);
+
+                        // Declare an internal global `extern_with_linkage_foo` which
+                        // is initialized with the address of `foo`.  If `foo` is
+                        // discarded during linking (for example, if `foo` has weak
+                        // linkage and there are no definitions), then
+                        // `extern_with_linkage_foo` will instead be initialized to
+                        // zero.
+                        let mut real_name = "_rust_extern_with_linkage_".to_string();
+                        real_name.push_str(&ident);
+                        let g2 = declare::define_global(ccx, &real_name, llty).unwrap_or_else(||{
+                            ccx.sess().span_fatal(span,
+                                &format!("symbol `{}` is already defined", ident))
+                        });
+                        llvm::SetLinkage(g2, llvm::InternalLinkage);
+                        llvm::LLVMSetInitializer(g2, g1);
+                        g2
+                    }
+                } else {
+                    // Generate an external declaration.
+                    declare::declare_global(ccx, &ident, llty)
+                };
+
+                for attr in attrs {
+                    if attr.check_name("thread_local") {
+                        llvm::set_thread_local(g, true);
+                    }
+                }
+
+                g
+            }
+
+            item => bug!("get_static: expected static, found {:?}", item)
+        }
+    } else {
+        // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
+        // FIXME(nagisa): investigate whether it can be changed into define_global
+        let name = ccx.sess().cstore.item_symbol(def_id);
+        let g = declare::declare_global(ccx, &name, type_of::type_of(ccx, ty));
+        // Thread-local statics in some other crate need to *always* be linked
+        // against in a thread-local fashion, so we need to be sure to apply the
+        // thread-local attribute locally if it was present remotely. If we
+        // don't do this then linker errors can be generated where the linker
+        // complains that one object files has a thread local version of the
+        // symbol and another one doesn't.
+        for attr in ccx.tcx().get_attrs(def_id).iter() {
+            if attr.check_name("thread_local") {
+                llvm::set_thread_local(g, true);
+            }
+        }
+        if ccx.use_dll_storage_attrs() {
+            llvm::SetDLLStorageClass(g, llvm::DLLImportStorageClass);
+        }
+        g
+    };
+
+    ccx.instances().borrow_mut().insert(instance, g);
+    Datum::new(g, ty, Lvalue::new("static"))
+}
+
+pub fn trans_static(ccx: &CrateContext,
+                    m: hir::Mutability,
+                    expr: &hir::Expr,
+                    id: ast::NodeId,
+                    attrs: &[ast::Attribute])
+                    -> Result<ValueRef, ConstEvalErr> {
+
+    if collector::collecting_debug_information(ccx) {
+        ccx.record_translation_item_as_generated(TransItem::Static(id));
+    }
+
+    unsafe {
+        let _icx = push_ctxt("trans_static");
+        let def_id = ccx.tcx().map.local_def_id(id);
+        let datum = get_static(ccx, def_id);
+
+        let empty_substs = ccx.tcx().mk_substs(Substs::empty());
+        let (v, _) = const_expr(
+            ccx,
+            expr,
+            empty_substs,
+            None,
+            TrueConst::Yes,
+        ).map_err(|e| e.into_inner())?;
+
+        // boolean SSA values are i1, but they have to be stored in i8 slots,
+        // otherwise some LLVM optimization passes don't work as expected
+        let mut val_llty = val_ty(v);
+        let v = if val_llty == Type::i1(ccx) {
+            val_llty = Type::i8(ccx);
+            llvm::LLVMConstZExt(v, val_llty.to_ref())
+        } else {
+            v
+        };
+
+        let llty = type_of::type_of(ccx, datum.ty);
+        let g = if val_llty == llty {
+            datum.val
+        } else {
+            // If we created the global with the wrong type,
+            // correct the type.
+            let empty_string = CString::new("").unwrap();
+            let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(datum.val));
+            let name_string = CString::new(name_str_ref.to_bytes()).unwrap();
+            llvm::LLVMSetValueName(datum.val, empty_string.as_ptr());
+            let new_g = llvm::LLVMGetOrInsertGlobal(
+                ccx.llmod(), name_string.as_ptr(), val_llty.to_ref());
+            // To avoid breaking any invariants, we leave around the old
+            // global for the moment; we'll replace all references to it
+            // with the new global later. (See base::trans_crate.)
+            ccx.statics_to_rauw().borrow_mut().push((datum.val, new_g));
+            new_g
+        };
+        llvm::LLVMSetAlignment(g, type_of::align_of(ccx, datum.ty));
+        llvm::LLVMSetInitializer(g, v);
+
+        // As an optimization, all shared statics which do not have interior
+        // mutability are placed into read-only memory.
+        if m != hir::MutMutable {
+            let tcontents = datum.ty.type_contents(ccx.tcx());
+            if !tcontents.interior_unsafe() {
+                llvm::LLVMSetGlobalConstant(g, llvm::True);
+            }
+        }
+
+        debuginfo::create_global_var_metadata(ccx, id, g);
+
+        if attr::contains_name(attrs,
+                               "thread_local") {
+            llvm::set_thread_local(g, true);
+        }
+        Ok(g)
+    }
+}
diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs
new file mode 100644 (file)
index 0000000..1217b2b
--- /dev/null
@@ -0,0 +1,1087 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm;
+use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef};
+use rustc::dep_graph::{DepNode, DepTrackingMap, DepTrackingMapConfig};
+use middle::cstore::LinkMeta;
+use rustc::hir::def::ExportMap;
+use rustc::hir::def_id::DefId;
+use rustc::traits;
+use rustc::mir::mir_map::MirMap;
+use rustc::mir::repr as mir;
+use adt;
+use base;
+use builder::Builder;
+use common::BuilderRef_res;
+use debuginfo;
+use declare;
+use glue::DropGlueKind;
+use mir::CachedMir;
+use monomorphize::Instance;
+
+use collector::{TransItem, TransItemState};
+use type_::{Type, TypeNames};
+use rustc::ty::subst::{Substs, VecPerParamSpace};
+use rustc::ty::{self, Ty, TyCtxt};
+use session::config::NoDebugInfo;
+use session::Session;
+use util::sha2::Sha256;
+use util::nodemap::{NodeMap, NodeSet, DefIdMap, FnvHashMap, FnvHashSet};
+
+use std::ffi::{CStr, CString};
+use std::cell::{Cell, RefCell};
+use std::marker::PhantomData;
+use std::ptr;
+use std::rc::Rc;
+use std::str;
+use syntax::ast;
+use syntax::parse::token::InternedString;
+
+pub struct Stats {
+    pub n_glues_created: Cell<usize>,
+    pub n_null_glues: Cell<usize>,
+    pub n_real_glues: Cell<usize>,
+    pub n_fns: Cell<usize>,
+    pub n_monos: Cell<usize>,
+    pub n_inlines: Cell<usize>,
+    pub n_closures: Cell<usize>,
+    pub n_llvm_insns: Cell<usize>,
+    pub llvm_insns: RefCell<FnvHashMap<String, usize>>,
+    // (ident, llvm-instructions)
+    pub fn_stats: RefCell<Vec<(String, usize)> >,
+}
+
+/// The shared portion of a `CrateContext`.  There is one `SharedCrateContext`
+/// per crate.  The data here is shared between all compilation units of the
+/// crate, so it must not contain references to any LLVM data structures
+/// (aside from metadata-related ones).
+pub struct SharedCrateContext<'a, 'tcx: 'a> {
+    local_ccxs: Vec<LocalCrateContext<'tcx>>,
+
+    metadata_llmod: ModuleRef,
+    metadata_llcx: ContextRef,
+
+    export_map: ExportMap,
+    reachable: NodeSet,
+    item_symbols: RefCell<NodeMap<String>>,
+    link_meta: LinkMeta,
+    symbol_hasher: RefCell<Sha256>,
+    tcx: &'a TyCtxt<'tcx>,
+    stats: Stats,
+    check_overflow: bool,
+    check_drop_flag_for_sanity: bool,
+    mir_map: &'a MirMap<'tcx>,
+    mir_cache: RefCell<DefIdMap<Rc<mir::Mir<'tcx>>>>,
+
+    available_monomorphizations: RefCell<FnvHashSet<String>>,
+    available_drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, String>>,
+    use_dll_storage_attrs: bool,
+
+    translation_items: RefCell<FnvHashMap<TransItem<'tcx>, TransItemState>>,
+}
+
+/// The local portion of a `CrateContext`.  There is one `LocalCrateContext`
+/// per compilation unit.  Each one has its own LLVM `ContextRef` so that
+/// several compilation units may be optimized in parallel.  All other LLVM
+/// data structures in the `LocalCrateContext` are tied to that `ContextRef`.
+pub struct LocalCrateContext<'tcx> {
+    llmod: ModuleRef,
+    llcx: ContextRef,
+    tn: TypeNames,
+    needs_unwind_cleanup_cache: RefCell<FnvHashMap<Ty<'tcx>, bool>>,
+    fn_pointer_shims: RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>,
+    drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>>,
+    /// Track mapping of external ids to local items imported for inlining
+    external: RefCell<DefIdMap<Option<ast::NodeId>>>,
+    /// Backwards version of the `external` map (inlined items to where they
+    /// came from)
+    external_srcs: RefCell<NodeMap<DefId>>,
+    /// Cache instances of monomorphic and polymorphic items
+    instances: RefCell<FnvHashMap<Instance<'tcx>, ValueRef>>,
+    monomorphizing: RefCell<DefIdMap<usize>>,
+    /// Cache generated vtables
+    vtables: RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, ValueRef>>,
+    /// Cache of constant strings,
+    const_cstr_cache: RefCell<FnvHashMap<InternedString, ValueRef>>,
+
+    /// Reverse-direction for const ptrs cast from globals.
+    /// Key is a ValueRef holding a *T,
+    /// Val is a ValueRef holding a *[T].
+    ///
+    /// Needed because LLVM loses pointer->pointee association
+    /// when we ptrcast, and we have to ptrcast during translation
+    /// of a [T] const because we form a slice, a (*T,usize) pair, not
+    /// a pointer to an LLVM array type. Similar for trait objects.
+    const_unsized: RefCell<FnvHashMap<ValueRef, ValueRef>>,
+
+    /// Cache of emitted const globals (value -> global)
+    const_globals: RefCell<FnvHashMap<ValueRef, ValueRef>>,
+
+    /// Cache of emitted const values
+    const_values: RefCell<FnvHashMap<(ast::NodeId, &'tcx Substs<'tcx>), ValueRef>>,
+
+    /// Cache of external const values
+    extern_const_values: RefCell<DefIdMap<ValueRef>>,
+
+    impl_method_cache: RefCell<FnvHashMap<(DefId, ast::Name), DefId>>,
+
+    /// Cache of closure wrappers for bare fn's.
+    closure_bare_wrapper_cache: RefCell<FnvHashMap<ValueRef, ValueRef>>,
+
+    /// List of globals for static variables which need to be passed to the
+    /// LLVM function ReplaceAllUsesWith (RAUW) when translation is complete.
+    /// (We have to make sure we don't invalidate any ValueRefs referring
+    /// to constants.)
+    statics_to_rauw: RefCell<Vec<(ValueRef, ValueRef)>>,
+
+    lltypes: RefCell<FnvHashMap<Ty<'tcx>, Type>>,
+    llsizingtypes: RefCell<FnvHashMap<Ty<'tcx>, Type>>,
+    adt_reprs: RefCell<FnvHashMap<Ty<'tcx>, Rc<adt::Repr<'tcx>>>>,
+    type_hashcodes: RefCell<FnvHashMap<Ty<'tcx>, String>>,
+    int_type: Type,
+    opaque_vec_type: Type,
+    builder: BuilderRef_res,
+
+    /// Holds the LLVM values for closure IDs.
+    closure_vals: RefCell<FnvHashMap<Instance<'tcx>, ValueRef>>,
+
+    dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
+
+    eh_personality: Cell<Option<ValueRef>>,
+    eh_unwind_resume: Cell<Option<ValueRef>>,
+    rust_try_fn: Cell<Option<ValueRef>>,
+
+    intrinsics: RefCell<FnvHashMap<&'static str, ValueRef>>,
+
+    /// Number of LLVM instructions translated into this `LocalCrateContext`.
+    /// This is used to perform some basic load-balancing to keep all LLVM
+    /// contexts around the same size.
+    n_llvm_insns: Cell<usize>,
+
+    /// Depth of the current type-of computation - used to bail out
+    type_of_depth: Cell<usize>,
+
+    trait_cache: RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>>,
+}
+
+// Implement DepTrackingMapConfig for `trait_cache`
+pub struct TraitSelectionCache<'tcx> {
+    data: PhantomData<&'tcx ()>
+}
+
+impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> {
+    type Key = ty::PolyTraitRef<'tcx>;
+    type Value = traits::Vtable<'tcx, ()>;
+    fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode<DefId> {
+        ty::tls::with(|tcx| {
+            let lifted_key = tcx.lift(key).unwrap();
+            lifted_key.to_poly_trait_predicate().dep_node()
+        })
+    }
+}
+
+pub struct CrateContext<'a, 'tcx: 'a> {
+    shared: &'a SharedCrateContext<'a, 'tcx>,
+    local: &'a LocalCrateContext<'tcx>,
+    /// The index of `local` in `shared.local_ccxs`.  This is used in
+    /// `maybe_iter(true)` to identify the original `LocalCrateContext`.
+    index: usize,
+}
+
+pub struct CrateContextIterator<'a, 'tcx: 'a> {
+    shared: &'a SharedCrateContext<'a, 'tcx>,
+    index: usize,
+}
+
+impl<'a, 'tcx> Iterator for CrateContextIterator<'a,'tcx> {
+    type Item = CrateContext<'a, 'tcx>;
+
+    fn next(&mut self) -> Option<CrateContext<'a, 'tcx>> {
+        if self.index >= self.shared.local_ccxs.len() {
+            return None;
+        }
+
+        let index = self.index;
+        self.index += 1;
+
+        Some(CrateContext {
+            shared: self.shared,
+            local: &self.shared.local_ccxs[index],
+            index: index,
+        })
+    }
+}
+
+/// The iterator produced by `CrateContext::maybe_iter`.
+pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> {
+    shared: &'a SharedCrateContext<'a, 'tcx>,
+    index: usize,
+    single: bool,
+    origin: usize,
+}
+
+impl<'a, 'tcx> Iterator for CrateContextMaybeIterator<'a, 'tcx> {
+    type Item = (CrateContext<'a, 'tcx>, bool);
+
+    fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> {
+        if self.index >= self.shared.local_ccxs.len() {
+            return None;
+        }
+
+        let index = self.index;
+        self.index += 1;
+        if self.single {
+            self.index = self.shared.local_ccxs.len();
+        }
+
+        let ccx = CrateContext {
+            shared: self.shared,
+            local: &self.shared.local_ccxs[index],
+            index: index,
+        };
+        Some((ccx, index == self.origin))
+    }
+}
+
+unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) {
+    let llcx = llvm::LLVMContextCreate();
+    let mod_name = CString::new(mod_name).unwrap();
+    let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
+
+    // Ensure the data-layout values hardcoded remain the defaults.
+    if sess.target.target.options.is_builtin {
+        let tm = ::back::write::create_target_machine(sess);
+        llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
+        llvm::LLVMRustDisposeTargetMachine(tm);
+
+        let data_layout = llvm::LLVMGetDataLayout(llmod);
+        let data_layout = str::from_utf8(CStr::from_ptr(data_layout).to_bytes())
+            .ok().expect("got a non-UTF8 data-layout from LLVM");
+
+        if sess.target.target.data_layout != data_layout {
+            bug!("data-layout for builtin `{}` target, `{}`, \
+                  differs from LLVM default, `{}`",
+                 sess.target.target.llvm_target,
+                 sess.target.target.data_layout,
+                 data_layout);
+        }
+    }
+
+    let data_layout = CString::new(&sess.target.target.data_layout[..]).unwrap();
+    llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
+
+    let llvm_target = sess.target.target.llvm_target.as_bytes();
+    let llvm_target = CString::new(llvm_target).unwrap();
+    llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
+    (llcx, llmod)
+}
+
+impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
+    pub fn new(crate_name: &str,
+               local_count: usize,
+               tcx: &'b TyCtxt<'tcx>,
+               mir_map: &'b MirMap<'tcx>,
+               export_map: ExportMap,
+               symbol_hasher: Sha256,
+               link_meta: LinkMeta,
+               reachable: NodeSet,
+               check_overflow: bool,
+               check_drop_flag_for_sanity: bool)
+               -> SharedCrateContext<'b, 'tcx> {
+        let (metadata_llcx, metadata_llmod) = unsafe {
+            create_context_and_module(&tcx.sess, "metadata")
+        };
+
+        // An interesting part of Windows which MSVC forces our hand on (and
+        // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
+        // attributes in LLVM IR as well as native dependencies (in C these
+        // correspond to `__declspec(dllimport)`).
+        //
+        // Whenever a dynamic library is built by MSVC it must have its public
+        // interface specified by functions tagged with `dllexport` or otherwise
+        // they're not available to be linked against. This poses a few problems
+        // for the compiler, some of which are somewhat fundamental, but we use
+        // the `use_dll_storage_attrs` variable below to attach the `dllexport`
+        // attribute to all LLVM functions that are reachable (e.g. they're
+        // already tagged with external linkage). This is suboptimal for a few
+        // reasons:
+        //
+        // * If an object file will never be included in a dynamic library,
+        //   there's no need to attach the dllexport attribute. Most object
+        //   files in Rust are not destined to become part of a dll as binaries
+        //   are statically linked by default.
+        // * If the compiler is emitting both an rlib and a dylib, the same
+        //   source object file is currently used but with MSVC this may be less
+        //   feasible. The compiler may be able to get around this, but it may
+        //   involve some invasive changes to deal with this.
+        //
+        // The flipside of this situation is that whenever you link to a dll and
+        // you import a function from it, the import should be tagged with
+        // `dllimport`. At this time, however, the compiler does not emit
+        // `dllimport` for any declarations other than constants (where it is
+        // required), which is again suboptimal for even more reasons!
+        //
+        // * Calling a function imported from another dll without using
+        //   `dllimport` causes the linker/compiler to have extra overhead (one
+        //   `jmp` instruction on x86) when calling the function.
+        // * The same object file may be used in different circumstances, so a
+        //   function may be imported from a dll if the object is linked into a
+        //   dll, but it may be just linked against if linked into an rlib.
+        // * The compiler has no knowledge about whether native functions should
+        //   be tagged dllimport or not.
+        //
+        // For now the compiler takes the perf hit (I do not have any numbers to
+        // this effect) by marking very little as `dllimport` and praying the
+        // linker will take care of everything. Fixing this problem will likely
+        // require adding a few attributes to Rust itself (feature gated at the
+        // start) and then strongly recommending static linkage on MSVC!
+        let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc;
+
+        let mut shared_ccx = SharedCrateContext {
+            local_ccxs: Vec::with_capacity(local_count),
+            metadata_llmod: metadata_llmod,
+            metadata_llcx: metadata_llcx,
+            export_map: export_map,
+            reachable: reachable,
+            item_symbols: RefCell::new(NodeMap()),
+            link_meta: link_meta,
+            symbol_hasher: RefCell::new(symbol_hasher),
+            tcx: tcx,
+            mir_map: mir_map,
+            mir_cache: RefCell::new(DefIdMap()),
+            stats: Stats {
+                n_glues_created: Cell::new(0),
+                n_null_glues: Cell::new(0),
+                n_real_glues: Cell::new(0),
+                n_fns: Cell::new(0),
+                n_monos: Cell::new(0),
+                n_inlines: Cell::new(0),
+                n_closures: Cell::new(0),
+                n_llvm_insns: Cell::new(0),
+                llvm_insns: RefCell::new(FnvHashMap()),
+                fn_stats: RefCell::new(Vec::new()),
+            },
+            check_overflow: check_overflow,
+            check_drop_flag_for_sanity: check_drop_flag_for_sanity,
+            available_monomorphizations: RefCell::new(FnvHashSet()),
+            available_drop_glues: RefCell::new(FnvHashMap()),
+            use_dll_storage_attrs: use_dll_storage_attrs,
+            translation_items: RefCell::new(FnvHashMap()),
+        };
+
+        for i in 0..local_count {
+            // Append ".rs" to crate name as LLVM module identifier.
+            //
+            // LLVM code generator emits a ".file filename" directive
+            // for ELF backends. Value of the "filename" is set as the
+            // LLVM module identifier.  Due to a LLVM MC bug[1], LLVM
+            // crashes if the module identifier is same as other symbols
+            // such as a function name in the module.
+            // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
+            let llmod_id = format!("{}.{}.rs", crate_name, i);
+            let local_ccx = LocalCrateContext::new(&shared_ccx, &llmod_id[..]);
+            shared_ccx.local_ccxs.push(local_ccx);
+        }
+
+        shared_ccx
+    }
+
+    pub fn iter<'a>(&'a self) -> CrateContextIterator<'a, 'tcx> {
+        CrateContextIterator {
+            shared: self,
+            index: 0,
+        }
+    }
+
+    pub fn get_ccx<'a>(&'a self, index: usize) -> CrateContext<'a, 'tcx> {
+        CrateContext {
+            shared: self,
+            local: &self.local_ccxs[index],
+            index: index,
+        }
+    }
+
+    fn get_smallest_ccx<'a>(&'a self) -> CrateContext<'a, 'tcx> {
+        let (local_ccx, index) =
+            self.local_ccxs
+                .iter()
+                .zip(0..self.local_ccxs.len())
+                .min_by_key(|&(local_ccx, _idx)| local_ccx.n_llvm_insns.get())
+                .unwrap();
+        CrateContext {
+            shared: self,
+            local: local_ccx,
+            index: index,
+        }
+    }
+
+
+    pub fn metadata_llmod(&self) -> ModuleRef {
+        self.metadata_llmod
+    }
+
+    pub fn metadata_llcx(&self) -> ContextRef {
+        self.metadata_llcx
+    }
+
+    pub fn export_map<'a>(&'a self) -> &'a ExportMap {
+        &self.export_map
+    }
+
+    pub fn reachable<'a>(&'a self) -> &'a NodeSet {
+        &self.reachable
+    }
+
+    pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
+        &self.item_symbols
+    }
+
+    pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
+        &self.link_meta
+    }
+
+    pub fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    pub fn sess<'a>(&'a self) -> &'a Session {
+        &self.tcx.sess
+    }
+
+    pub fn stats<'a>(&'a self) -> &'a Stats {
+        &self.stats
+    }
+
+    pub fn use_dll_storage_attrs(&self) -> bool {
+        self.use_dll_storage_attrs
+    }
+}
+
+impl<'tcx> LocalCrateContext<'tcx> {
+    fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>,
+           name: &str)
+           -> LocalCrateContext<'tcx> {
+        unsafe {
+            let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, name);
+
+            let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo {
+                Some(debuginfo::CrateDebugContext::new(llmod))
+            } else {
+                None
+            };
+
+            let mut local_ccx = LocalCrateContext {
+                llmod: llmod,
+                llcx: llcx,
+                tn: TypeNames::new(),
+                needs_unwind_cleanup_cache: RefCell::new(FnvHashMap()),
+                fn_pointer_shims: RefCell::new(FnvHashMap()),
+                drop_glues: RefCell::new(FnvHashMap()),
+                external: RefCell::new(DefIdMap()),
+                external_srcs: RefCell::new(NodeMap()),
+                instances: RefCell::new(FnvHashMap()),
+                monomorphizing: RefCell::new(DefIdMap()),
+                vtables: RefCell::new(FnvHashMap()),
+                const_cstr_cache: RefCell::new(FnvHashMap()),
+                const_unsized: RefCell::new(FnvHashMap()),
+                const_globals: RefCell::new(FnvHashMap()),
+                const_values: RefCell::new(FnvHashMap()),
+                extern_const_values: RefCell::new(DefIdMap()),
+                impl_method_cache: RefCell::new(FnvHashMap()),
+                closure_bare_wrapper_cache: RefCell::new(FnvHashMap()),
+                statics_to_rauw: RefCell::new(Vec::new()),
+                lltypes: RefCell::new(FnvHashMap()),
+                llsizingtypes: RefCell::new(FnvHashMap()),
+                adt_reprs: RefCell::new(FnvHashMap()),
+                type_hashcodes: RefCell::new(FnvHashMap()),
+                int_type: Type::from_ref(ptr::null_mut()),
+                opaque_vec_type: Type::from_ref(ptr::null_mut()),
+                builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)),
+                closure_vals: RefCell::new(FnvHashMap()),
+                dbg_cx: dbg_cx,
+                eh_personality: Cell::new(None),
+                eh_unwind_resume: Cell::new(None),
+                rust_try_fn: Cell::new(None),
+                intrinsics: RefCell::new(FnvHashMap()),
+                n_llvm_insns: Cell::new(0),
+                type_of_depth: Cell::new(0),
+                trait_cache: RefCell::new(DepTrackingMap::new(shared.tcx
+                                                                    .dep_graph
+                                                                    .clone())),
+            };
+
+            local_ccx.int_type = Type::int(&local_ccx.dummy_ccx(shared));
+            local_ccx.opaque_vec_type = Type::opaque_vec(&local_ccx.dummy_ccx(shared));
+
+            // Done mutating local_ccx directly.  (The rest of the
+            // initialization goes through RefCell.)
+            {
+                let ccx = local_ccx.dummy_ccx(shared);
+
+                let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
+                str_slice_ty.set_struct_body(&[Type::i8p(&ccx), ccx.int_type()], false);
+                ccx.tn().associate_type("str_slice", &str_slice_ty);
+
+                if ccx.sess().count_llvm_insns() {
+                    base::init_insn_ctxt()
+                }
+            }
+
+            local_ccx
+        }
+    }
+
+    /// Create a dummy `CrateContext` from `self` and  the provided
+    /// `SharedCrateContext`.  This is somewhat dangerous because `self` may
+    /// not actually be an element of `shared.local_ccxs`, which can cause some
+    /// operations to panic unexpectedly.
+    ///
+    /// This is used in the `LocalCrateContext` constructor to allow calling
+    /// functions that expect a complete `CrateContext`, even before the local
+    /// portion is fully initialized and attached to the `SharedCrateContext`.
+    fn dummy_ccx<'a>(&'a self, shared: &'a SharedCrateContext<'a, 'tcx>)
+                     -> CrateContext<'a, 'tcx> {
+        CrateContext {
+            shared: shared,
+            local: self,
+            index: !0 as usize,
+        }
+    }
+}
+
+impl<'b, 'tcx> CrateContext<'b, 'tcx> {
+    pub fn shared(&self) -> &'b SharedCrateContext<'b, 'tcx> {
+        self.shared
+    }
+
+    pub fn local(&self) -> &'b LocalCrateContext<'tcx> {
+        self.local
+    }
+
+    /// Get a (possibly) different `CrateContext` from the same
+    /// `SharedCrateContext`.
+    pub fn rotate(&self) -> CrateContext<'b, 'tcx> {
+        self.shared.get_smallest_ccx()
+    }
+
+    /// Either iterate over only `self`, or iterate over all `CrateContext`s in
+    /// the `SharedCrateContext`.  The iterator produces `(ccx, is_origin)`
+    /// pairs, where `is_origin` is `true` if `ccx` is `self` and `false`
+    /// otherwise.  This method is useful for avoiding code duplication in
+    /// cases where it may or may not be necessary to translate code into every
+    /// context.
+    pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx> {
+        CrateContextMaybeIterator {
+            shared: self.shared,
+            index: if iter_all { 0 } else { self.index },
+            single: !iter_all,
+            origin: self.index,
+        }
+    }
+
+
+    pub fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx> {
+        self.shared.tcx
+    }
+
+    pub fn sess<'a>(&'a self) -> &'a Session {
+        &self.shared.tcx.sess
+    }
+
+    pub fn builder<'a>(&'a self) -> Builder<'a, 'tcx> {
+        Builder::new(self)
+    }
+
+    pub fn raw_builder<'a>(&'a self) -> BuilderRef {
+        self.local.builder.b
+    }
+
+    pub fn get_intrinsic(&self, key: &str) -> ValueRef {
+        if let Some(v) = self.intrinsics().borrow().get(key).cloned() {
+            return v;
+        }
+        match declare_intrinsic(self, key) {
+            Some(v) => return v,
+            None => bug!("unknown intrinsic '{}'", key)
+        }
+    }
+
+    pub fn llmod(&self) -> ModuleRef {
+        self.local.llmod
+    }
+
+    pub fn llcx(&self) -> ContextRef {
+        self.local.llcx
+    }
+
+    pub fn td(&self) -> llvm::TargetDataRef {
+        unsafe { llvm::LLVMRustGetModuleDataLayout(self.llmod()) }
+    }
+
+    pub fn tn<'a>(&'a self) -> &'a TypeNames {
+        &self.local.tn
+    }
+
+    pub fn export_map<'a>(&'a self) -> &'a ExportMap {
+        &self.shared.export_map
+    }
+
+    pub fn reachable<'a>(&'a self) -> &'a NodeSet {
+        &self.shared.reachable
+    }
+
+    pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
+        &self.shared.item_symbols
+    }
+
+    pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
+        &self.shared.link_meta
+    }
+
+    pub fn needs_unwind_cleanup_cache(&self) -> &RefCell<FnvHashMap<Ty<'tcx>, bool>> {
+        &self.local.needs_unwind_cleanup_cache
+    }
+
+    pub fn fn_pointer_shims(&self) -> &RefCell<FnvHashMap<Ty<'tcx>, ValueRef>> {
+        &self.local.fn_pointer_shims
+    }
+
+    pub fn drop_glues<'a>(&'a self) -> &'a RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>> {
+        &self.local.drop_glues
+    }
+
+    pub fn external<'a>(&'a self) -> &'a RefCell<DefIdMap<Option<ast::NodeId>>> {
+        &self.local.external
+    }
+
+    pub fn external_srcs<'a>(&'a self) -> &'a RefCell<NodeMap<DefId>> {
+        &self.local.external_srcs
+    }
+
+    pub fn instances<'a>(&'a self) -> &'a RefCell<FnvHashMap<Instance<'tcx>, ValueRef>> {
+        &self.local.instances
+    }
+
+    pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<usize>> {
+        &self.local.monomorphizing
+    }
+
+    pub fn vtables<'a>(&'a self) -> &'a RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, ValueRef>> {
+        &self.local.vtables
+    }
+
+    pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell<FnvHashMap<InternedString, ValueRef>> {
+        &self.local.const_cstr_cache
+    }
+
+    pub fn const_unsized<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
+        &self.local.const_unsized
+    }
+
+    pub fn const_globals<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
+        &self.local.const_globals
+    }
+
+    pub fn const_values<'a>(&'a self) -> &'a RefCell<FnvHashMap<(ast::NodeId, &'tcx Substs<'tcx>),
+                                                                ValueRef>> {
+        &self.local.const_values
+    }
+
+    pub fn extern_const_values<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
+        &self.local.extern_const_values
+    }
+
+    pub fn impl_method_cache<'a>(&'a self)
+            -> &'a RefCell<FnvHashMap<(DefId, ast::Name), DefId>> {
+        &self.local.impl_method_cache
+    }
+
+    pub fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
+        &self.local.closure_bare_wrapper_cache
+    }
+
+    pub fn statics_to_rauw<'a>(&'a self) -> &'a RefCell<Vec<(ValueRef, ValueRef)>> {
+        &self.local.statics_to_rauw
+    }
+
+    pub fn lltypes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Type>> {
+        &self.local.lltypes
+    }
+
+    pub fn llsizingtypes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Type>> {
+        &self.local.llsizingtypes
+    }
+
+    pub fn adt_reprs<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Rc<adt::Repr<'tcx>>>> {
+        &self.local.adt_reprs
+    }
+
+    pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
+        &self.shared.symbol_hasher
+    }
+
+    pub fn type_hashcodes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, String>> {
+        &self.local.type_hashcodes
+    }
+
+    pub fn stats<'a>(&'a self) -> &'a Stats {
+        &self.shared.stats
+    }
+
+    pub fn available_monomorphizations<'a>(&'a self) -> &'a RefCell<FnvHashSet<String>> {
+        &self.shared.available_monomorphizations
+    }
+
+    pub fn available_drop_glues(&self) -> &RefCell<FnvHashMap<DropGlueKind<'tcx>, String>> {
+        &self.shared.available_drop_glues
+    }
+
+    pub fn int_type(&self) -> Type {
+        self.local.int_type
+    }
+
+    pub fn opaque_vec_type(&self) -> Type {
+        self.local.opaque_vec_type
+    }
+
+    pub fn closure_vals<'a>(&'a self) -> &'a RefCell<FnvHashMap<Instance<'tcx>, ValueRef>> {
+        &self.local.closure_vals
+    }
+
+    pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext<'tcx>> {
+        &self.local.dbg_cx
+    }
+
+    pub fn eh_personality<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
+        &self.local.eh_personality
+    }
+
+    pub fn eh_unwind_resume<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
+        &self.local.eh_unwind_resume
+    }
+
+    pub fn rust_try_fn<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
+        &self.local.rust_try_fn
+    }
+
+    fn intrinsics<'a>(&'a self) -> &'a RefCell<FnvHashMap<&'static str, ValueRef>> {
+        &self.local.intrinsics
+    }
+
+    pub fn count_llvm_insn(&self) {
+        self.local.n_llvm_insns.set(self.local.n_llvm_insns.get() + 1);
+    }
+
+    pub fn trait_cache(&self) -> &RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>> {
+        &self.local.trait_cache
+    }
+
+    pub fn obj_size_bound(&self) -> u64 {
+        self.tcx().data_layout.obj_size_bound()
+    }
+
+    pub fn report_overbig_object(&self, obj: Ty<'tcx>) -> ! {
+        self.sess().fatal(
+            &format!("the type `{:?}` is too big for the current architecture",
+                    obj))
+    }
+
+    pub fn enter_type_of(&self, ty: Ty<'tcx>) -> TypeOfDepthLock<'b, 'tcx> {
+        let current_depth = self.local.type_of_depth.get();
+        debug!("enter_type_of({:?}) at depth {:?}", ty, current_depth);
+        if current_depth > self.sess().recursion_limit.get() {
+            self.sess().fatal(
+                &format!("overflow representing the type `{}`", ty))
+        }
+        self.local.type_of_depth.set(current_depth + 1);
+        TypeOfDepthLock(self.local)
+    }
+
+    pub fn check_overflow(&self) -> bool {
+        self.shared.check_overflow
+    }
+
+    pub fn check_drop_flag_for_sanity(&self) -> bool {
+        // This controls whether we emit a conditional llvm.debugtrap
+        // guarded on whether the dropflag is one of its (two) valid
+        // values.
+        self.shared.check_drop_flag_for_sanity
+    }
+
+    pub fn use_dll_storage_attrs(&self) -> bool {
+        self.shared.use_dll_storage_attrs()
+    }
+
+    pub fn get_mir(&self, def_id: DefId) -> Option<CachedMir<'b, 'tcx>> {
+        if def_id.is_local() {
+            let node_id = self.tcx().map.as_local_node_id(def_id).unwrap();
+            self.shared.mir_map.map.get(&node_id).map(CachedMir::Ref)
+        } else {
+            if let Some(mir) = self.shared.mir_cache.borrow().get(&def_id).cloned() {
+                return Some(CachedMir::Owned(mir));
+            }
+
+            let mir = self.sess().cstore.maybe_get_item_mir(self.tcx(), def_id);
+            let cached = mir.map(Rc::new);
+            if let Some(ref mir) = cached {
+                self.shared.mir_cache.borrow_mut().insert(def_id, mir.clone());
+            }
+            cached.map(CachedMir::Owned)
+        }
+    }
+
+    pub fn translation_items(&self) -> &RefCell<FnvHashMap<TransItem<'tcx>, TransItemState>> {
+        &self.shared.translation_items
+    }
+
+    pub fn record_translation_item_as_generated(&self, cgi: TransItem<'tcx>) {
+        if self.sess().opts.debugging_opts.print_trans_items.is_none() {
+            return;
+        }
+
+        let mut codegen_items = self.translation_items().borrow_mut();
+
+        if codegen_items.contains_key(&cgi) {
+            codegen_items.insert(cgi, TransItemState::PredictedAndGenerated);
+        } else {
+            codegen_items.insert(cgi, TransItemState::NotPredictedButGenerated);
+        }
+    }
+
+    /// Given the def-id of some item that has no type parameters, make
+    /// a suitable "empty substs" for it.
+    pub fn empty_substs_for_def_id(&self, item_def_id: DefId) -> &'tcx Substs<'tcx> {
+        let scheme = self.tcx().lookup_item_type(item_def_id);
+        self.empty_substs_for_scheme(&scheme)
+    }
+
+    pub fn empty_substs_for_scheme(&self, scheme: &ty::TypeScheme<'tcx>)
+                                   -> &'tcx Substs<'tcx> {
+        assert!(scheme.generics.types.is_empty());
+        self.tcx().mk_substs(
+            Substs::new(VecPerParamSpace::empty(),
+                        scheme.generics.regions.map(|_| ty::ReStatic)))
+    }
+}
+
+pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>);
+
+impl<'a, 'tcx> Drop for TypeOfDepthLock<'a, 'tcx> {
+    fn drop(&mut self) {
+        self.0.type_of_depth.set(self.0.type_of_depth.get() - 1);
+    }
+}
+
+/// Declare any llvm intrinsics that you might need
+fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
+    macro_rules! ifn {
+        ($name:expr, fn() -> $ret:expr) => (
+            if key == $name {
+                let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret));
+                llvm::SetUnnamedAddr(f, false);
+                ccx.intrinsics().borrow_mut().insert($name, f.clone());
+                return Some(f);
+            }
+        );
+        ($name:expr, fn(...) -> $ret:expr) => (
+            if key == $name {
+                let f = declare::declare_cfn(ccx, $name, Type::variadic_func(&[], &$ret));
+                llvm::SetUnnamedAddr(f, false);
+                ccx.intrinsics().borrow_mut().insert($name, f.clone());
+                return Some(f);
+            }
+        );
+        ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
+            if key == $name {
+                let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret));
+                llvm::SetUnnamedAddr(f, false);
+                ccx.intrinsics().borrow_mut().insert($name, f.clone());
+                return Some(f);
+            }
+        );
+    }
+    macro_rules! mk_struct {
+        ($($field_ty:expr),*) => (Type::struct_(ccx, &[$($field_ty),*], false))
+    }
+
+    let i8p = Type::i8p(ccx);
+    let void = Type::void(ccx);
+    let i1 = Type::i1(ccx);
+    let t_i8 = Type::i8(ccx);
+    let t_i16 = Type::i16(ccx);
+    let t_i32 = Type::i32(ccx);
+    let t_i64 = Type::i64(ccx);
+    let t_f32 = Type::f32(ccx);
+    let t_f64 = Type::f64(ccx);
+
+    ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void);
+    ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void);
+    ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void);
+    ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void);
+    ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void);
+    ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void);
+    ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void);
+    ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void);
+    ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void);
+
+    ifn!("llvm.trap", fn() -> void);
+    ifn!("llvm.debugtrap", fn() -> void);
+    ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
+
+    ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
+    ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
+    ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
+    ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
+
+    ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
+
+    ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
+    ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
+
+    ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
+
+    ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
+
+    ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
+    ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
+    ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
+
+    ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
+    ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
+    ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
+
+    ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
+    ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
+    ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
+    ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
+
+    ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8);
+    ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
+    ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
+    ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
+
+    ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8);
+    ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
+    ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
+    ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
+
+    ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
+    ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
+    ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
+
+    ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
+    ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
+    ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
+    ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
+
+    ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
+    ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
+    ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
+    ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
+
+    ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
+    ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
+    ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
+    ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
+
+    ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
+    ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
+    ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
+    ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
+
+    ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
+    ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
+    ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
+    ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
+
+    ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
+    ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
+    ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
+    ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
+
+    ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void);
+    ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void);
+
+    ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
+    ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
+    ifn!("llvm.localescape", fn(...) -> void);
+    ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
+    ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
+
+    // Some intrinsics were introduced in later versions of LLVM, but they have
+    // fallbacks in libc or libm and such.
+    macro_rules! compatible_ifn {
+        ($name:expr, noop($cname:ident ($($arg:expr),*) -> void), $llvm_version:expr) => (
+            if unsafe { llvm::LLVMVersionMinor() >= $llvm_version } {
+                // The `if key == $name` is already in ifn!
+                ifn!($name, fn($($arg),*) -> void);
+            } else if key == $name {
+                let f = declare::declare_cfn(ccx, stringify!($cname),
+                                             Type::func(&[$($arg),*], &void));
+                llvm::SetLinkage(f, llvm::InternalLinkage);
+
+                let bld = ccx.builder();
+                let llbb = unsafe {
+                    llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), f,
+                                                        "entry-block\0".as_ptr() as *const _)
+                };
+
+                bld.position_at_end(llbb);
+                bld.ret_void();
+
+                ccx.intrinsics().borrow_mut().insert($name, f.clone());
+                return Some(f);
+            }
+        );
+        ($name:expr, $cname:ident ($($arg:expr),*) -> $ret:expr, $llvm_version:expr) => (
+            if unsafe { llvm::LLVMVersionMinor() >= $llvm_version } {
+                // The `if key == $name` is already in ifn!
+                ifn!($name, fn($($arg),*) -> $ret);
+            } else if key == $name {
+                let f = declare::declare_cfn(ccx, stringify!($cname),
+                                             Type::func(&[$($arg),*], &$ret));
+                ccx.intrinsics().borrow_mut().insert($name, f.clone());
+                return Some(f);
+            }
+        )
+    }
+
+    compatible_ifn!("llvm.assume", noop(llvmcompat_assume(i1) -> void), 6);
+
+    if ccx.sess().opts.debuginfo != NoDebugInfo {
+        ifn!("llvm.dbg.declare", fn(Type::metadata(ccx), Type::metadata(ccx)) -> void);
+        ifn!("llvm.dbg.value", fn(Type::metadata(ccx), t_i64, Type::metadata(ccx)) -> void);
+    }
+    return None;
+}
diff --git a/src/librustc_trans/controlflow.rs b/src/librustc_trans/controlflow.rs
new file mode 100644 (file)
index 0000000..58971de
--- /dev/null
@@ -0,0 +1,434 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::ValueRef;
+use rustc::hir::def::Def;
+use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem};
+use rustc::ty::subst::Substs;
+use base::*;
+use basic_block::BasicBlock;
+use build::*;
+use callee::{Callee, ArgVals};
+use cleanup::CleanupMethods;
+use cleanup;
+use common::*;
+use consts;
+use debuginfo;
+use debuginfo::{DebugLoc, ToDebugLoc};
+use expr;
+use machine;
+
+use rustc::hir;
+
+use syntax::ast;
+use syntax::parse::token::InternedString;
+use syntax::parse::token;
+
+pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+                              s: &hir::Stmt)
+                              -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_stmt");
+    let fcx = cx.fcx;
+    debug!("trans_stmt({:?})", s);
+
+    if cx.unreachable.get() {
+        return cx;
+    }
+
+    if cx.sess().asm_comments() {
+        add_span_comment(cx, s.span, &format!("{:?}", s));
+    }
+
+    let mut bcx = cx;
+
+    let id = s.node.id();
+    let cleanup_debug_loc =
+        debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), id, s.span, false);
+    fcx.push_ast_cleanup_scope(cleanup_debug_loc);
+
+    match s.node {
+        hir::StmtExpr(ref e, _) | hir::StmtSemi(ref e, _) => {
+            bcx = trans_stmt_semi(bcx, &e);
+        }
+        hir::StmtDecl(ref d, _) => {
+            match d.node {
+                hir::DeclLocal(ref local) => {
+                    bcx = init_local(bcx, &local);
+                    debuginfo::create_local_var_metadata(bcx, &local);
+                }
+                // Inner items are visited by `trans_item`/`trans_meth`.
+                hir::DeclItem(_) => {},
+            }
+        }
+    }
+
+    bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, s.node.id());
+
+    return bcx;
+}
+
+pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &hir::Expr)
+                                   -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_stmt_semi");
+
+    if cx.unreachable.get() {
+        return cx;
+    }
+
+    let ty = expr_ty(cx, e);
+    if cx.fcx.type_needs_drop(ty) {
+        expr::trans_to_lvalue(cx, e, "stmt").bcx
+    } else {
+        expr::trans_into(cx, e, expr::Ignore)
+    }
+}
+
+pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               b: &hir::Block,
+                               mut dest: expr::Dest)
+                               -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_block");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    let fcx = bcx.fcx;
+    let mut bcx = bcx;
+
+    let cleanup_debug_loc =
+        debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true);
+    fcx.push_ast_cleanup_scope(cleanup_debug_loc);
+
+    for s in &b.stmts {
+        bcx = trans_stmt(bcx, s);
+    }
+
+    if dest != expr::Ignore {
+        let block_ty = node_id_type(bcx, b.id);
+
+        if b.expr.is_none() || type_is_zero_size(bcx.ccx(), block_ty) {
+            dest = expr::Ignore;
+        } else if b.expr.is_some() {
+            // If the block has an expression, but that expression isn't reachable,
+            // don't save into the destination given, ignore it.
+            if let Some(ref cfg) = bcx.fcx.cfg {
+                if !cfg.node_is_reachable(b.expr.as_ref().unwrap().id) {
+                    dest = expr::Ignore;
+                }
+            }
+        }
+    }
+
+    match b.expr {
+        Some(ref e) => {
+            if !bcx.unreachable.get() {
+                bcx = expr::trans_into(bcx, &e, dest);
+            }
+        }
+        None => {
+            assert!(dest == expr::Ignore || bcx.unreachable.get());
+        }
+    }
+
+    bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id);
+
+    return bcx;
+}
+
+pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                            if_id: ast::NodeId,
+                            cond: &hir::Expr,
+                            thn: &hir::Block,
+                            els: Option<&hir::Expr>,
+                            dest: expr::Dest)
+                            -> Block<'blk, 'tcx> {
+    debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={:?})",
+           bcx.to_str(), if_id, cond, thn.id, dest);
+    let _icx = push_ctxt("trans_if");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    let mut bcx = bcx;
+
+    let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool());
+
+    // Drop branches that are known to be impossible
+    if let Some(cv) = const_to_opt_uint(cond_val) {
+        if cv == 1 {
+            // if true { .. } [else { .. }]
+            bcx = trans_block(bcx, &thn, dest);
+            debuginfo::clear_source_location(bcx.fcx);
+        } else {
+            if let Some(elexpr) = els {
+                bcx = expr::trans_into(bcx, &elexpr, dest);
+                debuginfo::clear_source_location(bcx.fcx);
+            }
+        }
+
+        return bcx;
+    }
+
+    let name = format!("then-block-{}-", thn.id);
+    let then_bcx_in = bcx.fcx.new_id_block(&name[..], thn.id);
+    let then_bcx_out = trans_block(then_bcx_in, &thn, dest);
+    debuginfo::clear_source_location(bcx.fcx);
+
+    let cond_source_loc = cond.debug_loc();
+
+    let next_bcx;
+    match els {
+        Some(elexpr) => {
+            let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id);
+            let else_bcx_out = expr::trans_into(else_bcx_in, &elexpr, dest);
+            next_bcx = bcx.fcx.join_blocks(if_id,
+                                           &[then_bcx_out, else_bcx_out]);
+            CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc);
+        }
+
+        None => {
+            next_bcx = bcx.fcx.new_id_block("next-block", if_id);
+            Br(then_bcx_out, next_bcx.llbb, DebugLoc::None);
+            CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc);
+        }
+    }
+
+    // Clear the source location because it is still set to whatever has been translated
+    // right before.
+    debuginfo::clear_source_location(next_bcx.fcx);
+
+    next_bcx
+}
+
+pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               loop_expr: &hir::Expr,
+                               cond: &hir::Expr,
+                               body: &hir::Block)
+                               -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_while");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    let fcx = bcx.fcx;
+
+    //            bcx
+    //             |
+    //         cond_bcx_in  <--------+
+    //             |                 |
+    //         cond_bcx_out          |
+    //           |      |            |
+    //           |    body_bcx_in    |
+    // cleanup_blk      |            |
+    //    |           body_bcx_out --+
+    // next_bcx_in
+
+    let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id);
+    let cond_bcx_in = fcx.new_id_block("while_cond", cond.id);
+    let body_bcx_in = fcx.new_id_block("while_body", body.id);
+
+    fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]);
+
+    Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc());
+
+    // compile the block where we will handle loop cleanups
+    let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK);
+
+    // compile the condition
+    let Result {bcx: cond_bcx_out, val: cond_val} =
+        expr::trans(cond_bcx_in, cond).to_llbool();
+
+    CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc());
+
+    // loop body:
+    let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
+    Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None);
+
+    fcx.pop_loop_cleanup_scope(loop_expr.id);
+    return next_bcx_in;
+}
+
+pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              loop_expr: &hir::Expr,
+                              body: &hir::Block)
+                              -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_loop");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    let fcx = bcx.fcx;
+
+    //            bcx
+    //             |
+    //         body_bcx_in
+    //             |
+    //         body_bcx_out
+    //
+    // next_bcx
+    //
+    // Links between body_bcx_in and next_bcx are created by
+    // break statements.
+
+    let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id);
+    let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id);
+
+    fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]);
+
+    Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc());
+    let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
+    Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None);
+
+    fcx.pop_loop_cleanup_scope(loop_expr.id);
+
+    // If there are no predecessors for the next block, we just translated an endless loop and the
+    // next block is unreachable
+    if BasicBlock(next_bcx_in.llbb).pred_iter().next().is_none() {
+        Unreachable(next_bcx_in);
+    }
+
+    return next_bcx_in;
+}
+
+pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                    expr: &hir::Expr,
+                                    opt_label: Option<ast::Name>,
+                                    exit: usize)
+                                    -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_break_cont");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    let fcx = bcx.fcx;
+
+    // Locate loop that we will break to
+    let loop_id = match opt_label {
+        None => fcx.top_loop_scope(),
+        Some(_) => {
+            match bcx.tcx().def_map.borrow().get(&expr.id).map(|d| d.full_def())  {
+                Some(Def::Label(loop_id)) => loop_id,
+                r => {
+                    bug!("{:?} in def-map for label", r)
+                }
+            }
+        }
+    };
+
+    // Generate appropriate cleanup code and branch
+    let cleanup_llbb = fcx.normal_exit_block(loop_id, exit);
+    Br(bcx, cleanup_llbb, expr.debug_loc());
+    Unreachable(bcx); // anything afterwards should be ignored
+    return bcx;
+}
+
+pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               expr: &hir::Expr,
+                               label_opt: Option<ast::Name>)
+                               -> Block<'blk, 'tcx> {
+    return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK);
+}
+
+pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              expr: &hir::Expr,
+                              label_opt: Option<ast::Name>)
+                              -> Block<'blk, 'tcx> {
+    return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP);
+}
+
+pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                             return_expr: &hir::Expr,
+                             retval_expr: Option<&hir::Expr>)
+                             -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_ret");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    let fcx = bcx.fcx;
+    let mut bcx = bcx;
+    if let Some(x) = retval_expr {
+        let dest = if fcx.llretslotptr.get().is_some() {
+            expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
+        } else {
+            expr::Ignore
+        };
+        bcx = expr::trans_into(bcx, &x, dest);
+        match dest {
+            expr::SaveIn(slot) if fcx.needs_ret_allocas => {
+                Store(bcx, slot, fcx.llretslotptr.get().unwrap());
+            }
+            _ => {}
+        }
+    }
+    let cleanup_llbb = fcx.return_exit_block();
+    Br(bcx, cleanup_llbb, return_expr.debug_loc());
+    Unreachable(bcx);
+    return bcx;
+}
+
+pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              call_info: NodeIdAndSpan,
+                              fail_str: InternedString)
+                              -> Block<'blk, 'tcx> {
+    let ccx = bcx.ccx();
+    let _icx = push_ctxt("trans_fail_value");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    let v_str = C_str_slice(ccx, fail_str);
+    let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
+    let filename = token::intern_and_get_ident(&loc.file.name);
+    let filename = C_str_slice(ccx, filename);
+    let line = C_u32(ccx, loc.line as u32);
+    let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false);
+    let align = machine::llalign_of_min(ccx, val_ty(expr_file_line_const));
+    let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc");
+    let args = vec!(expr_file_line);
+    let did = langcall(bcx, Some(call_info.span), "", PanicFnLangItem);
+    Callee::def(ccx, did, ccx.tcx().mk_substs(Substs::empty()))
+        .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
+}
+
+pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                           call_info: NodeIdAndSpan,
+                                           index: ValueRef,
+                                           len: ValueRef)
+                                           -> Block<'blk, 'tcx> {
+    let ccx = bcx.ccx();
+    let _icx = push_ctxt("trans_fail_bounds_check");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    // Extract the file/line from the span
+    let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
+    let filename = token::intern_and_get_ident(&loc.file.name);
+
+    // Invoke the lang item
+    let filename = C_str_slice(ccx,  filename);
+    let line = C_u32(ccx, loc.line as u32);
+    let file_line_const = C_struct(ccx, &[filename, line], false);
+    let align = machine::llalign_of_min(ccx, val_ty(file_line_const));
+    let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc");
+    let args = vec!(file_line, index, len);
+    let did = langcall(bcx, Some(call_info.span), "", PanicBoundsCheckFnLangItem);
+    Callee::def(ccx, did, ccx.tcx().mk_substs(Substs::empty()))
+        .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
+}
diff --git a/src/librustc_trans/datum.rs b/src/librustc_trans/datum.rs
new file mode 100644 (file)
index 0000000..0ed38f0
--- /dev/null
@@ -0,0 +1,828 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! ## The Datum module
+//!
+//! A `Datum` encapsulates the result of evaluating a Rust expression.  It
+//! contains a `ValueRef` indicating the result, a `Ty` describing
+//! the Rust type, but also a *kind*. The kind indicates whether the datum
+//! has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of
+//! rvalues -- whether or not the value is "by ref" or "by value".
+//!
+//! The datum API is designed to try and help you avoid memory errors like
+//! forgetting to arrange cleanup or duplicating a value. The type of the
+//! datum incorporates the kind, and thus reflects whether it has cleanup
+//! scheduled:
+//!
+//! - `Datum<Lvalue>` -- by ref, cleanup scheduled
+//! - `Datum<Rvalue>` -- by value or by ref, no cleanup scheduled
+//! - `Datum<Expr>` -- either `Datum<Lvalue>` or `Datum<Rvalue>`
+//!
+//! Rvalue and expr datums are noncopyable, and most of the methods on
+//! datums consume the datum itself (with some notable exceptions). This
+//! reflects the fact that datums may represent affine values which ought
+//! to be consumed exactly once, and if you were to try to (for example)
+//! store an affine value multiple times, you would be duplicating it,
+//! which would certainly be a bug.
+//!
+//! Some of the datum methods, however, are designed to work only on
+//! copyable values such as ints or pointers. Those methods may borrow the
+//! datum (`&self`) rather than consume it, but they always include
+//! assertions on the type of the value represented to check that this
+//! makes sense. An example is `shallow_copy()`, which duplicates
+//! a datum value.
+//!
+//! Translating an expression always yields a `Datum<Expr>` result, but
+//! the methods `to_[lr]value_datum()` can be used to coerce a
+//! `Datum<Expr>` into a `Datum<Lvalue>` or `Datum<Rvalue>` as
+//! needed. Coercing to an lvalue is fairly common, and generally occurs
+//! whenever it is necessary to inspect a value and pull out its
+//! subcomponents (for example, a match, or indexing expression). Coercing
+//! to an rvalue is more unusual; it occurs when moving values from place
+//! to place, such as in an assignment expression or parameter passing.
+//!
+//! ### Lvalues in detail
+//!
+//! An lvalue datum is one for which cleanup has been scheduled. Lvalue
+//! datums are always located in memory, and thus the `ValueRef` for an
+//! LLVM value is always a pointer to the actual Rust value. This means
+//! that if the Datum has a Rust type of `int`, then the LLVM type of the
+//! `ValueRef` will be `int*` (pointer to int).
+//!
+//! Because lvalues already have cleanups scheduled, the memory must be
+//! zeroed to prevent the cleanup from taking place (presuming that the
+//! Rust type needs drop in the first place, otherwise it doesn't
+//! matter). The Datum code automatically performs this zeroing when the
+//! value is stored to a new location, for example.
+//!
+//! Lvalues usually result from evaluating lvalue expressions. For
+//! example, evaluating a local variable `x` yields an lvalue, as does a
+//! reference to a field like `x.f` or an index `x[i]`.
+//!
+//! Lvalue datums can also arise by *converting* an rvalue into an lvalue.
+//! This is done with the `to_lvalue_datum` method defined on
+//! `Datum<Expr>`. Basically this method just schedules cleanup if the
+//! datum is an rvalue, possibly storing the value into a stack slot first
+//! if needed. Converting rvalues into lvalues occurs in constructs like
+//! `&foo()` or `match foo() { ref x => ... }`, where the user is
+//! implicitly requesting a temporary.
+//!
+//! ### Rvalues in detail
+//!
+//! Rvalues datums are values with no cleanup scheduled. One must be
+//! careful with rvalue datums to ensure that cleanup is properly
+//! arranged, usually by converting to an lvalue datum or by invoking the
+//! `add_clean` method.
+//!
+//! ### Scratch datums
+//!
+//! Sometimes you need some temporary scratch space.  The functions
+//! `[lr]value_scratch_datum()` can be used to get temporary stack
+//! space. As their name suggests, they yield lvalues and rvalues
+//! respectively. That is, the slot from `lvalue_scratch_datum` will have
+//! cleanup arranged, and the slot from `rvalue_scratch_datum` does not.
+
+pub use self::Expr::*;
+pub use self::RvalueMode::*;
+
+use llvm::ValueRef;
+use adt;
+use base::*;
+use build::{Load, Store};
+use common::*;
+use cleanup;
+use cleanup::{CleanupMethods, DropHintDatum, DropHintMethods};
+use expr;
+use tvec;
+use value::Value;
+use rustc::ty::Ty;
+
+use std::fmt;
+use syntax::ast;
+use syntax::codemap::DUMMY_SP;
+
+/// A `Datum` encapsulates the result of evaluating an expression.  It
+/// describes where the value is stored, what Rust type the value has,
+/// whether it is addressed by reference, and so forth. Please refer
+/// the section on datums in `README.md` for more details.
+#[derive(Clone, Copy)]
+pub struct Datum<'tcx, K> {
+    /// The llvm value.  This is either a pointer to the Rust value or
+    /// the value itself, depending on `kind` below.
+    pub val: ValueRef,
+
+    /// The rust type of the value.
+    pub ty: Ty<'tcx>,
+
+    /// Indicates whether this is by-ref or by-value.
+    pub kind: K,
+}
+
+impl<'tcx, K: fmt::Debug> fmt::Debug for Datum<'tcx, K> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "Datum({:?}, {:?}, {:?})",
+               Value(self.val), self.ty, self.kind)
+    }
+}
+
+pub struct DatumBlock<'blk, 'tcx: 'blk, K> {
+    pub bcx: Block<'blk, 'tcx>,
+    pub datum: Datum<'tcx, K>,
+}
+
+#[derive(Debug)]
+pub enum Expr {
+    /// a fresh value that was produced and which has no cleanup yet
+    /// because it has not yet "landed" into its permanent home
+    RvalueExpr(Rvalue),
+
+    /// `val` is a pointer into memory for which a cleanup is scheduled
+    /// (and thus has type *T). If you move out of an Lvalue, you must
+    /// zero out the memory (FIXME #5016).
+    LvalueExpr(Lvalue),
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum DropFlagInfo {
+    DontZeroJustUse(ast::NodeId),
+    ZeroAndMaintain(ast::NodeId),
+    None,
+}
+
+impl DropFlagInfo {
+    pub fn must_zero(&self) -> bool {
+        match *self {
+            DropFlagInfo::DontZeroJustUse(..) => false,
+            DropFlagInfo::ZeroAndMaintain(..) => true,
+            DropFlagInfo::None => true,
+        }
+    }
+
+    pub fn hint_datum<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
+                              -> Option<DropHintDatum<'tcx>> {
+        let id = match *self {
+            DropFlagInfo::None => return None,
+            DropFlagInfo::DontZeroJustUse(id) |
+            DropFlagInfo::ZeroAndMaintain(id) => id,
+        };
+
+        let hints = bcx.fcx.lldropflag_hints.borrow();
+        let retval = hints.hint_datum(id);
+        assert!(retval.is_some(), "An id (={}) means must have a hint", id);
+        retval
+    }
+}
+
+// FIXME: having Lvalue be `Copy` is a bit of a footgun, since clients
+// may not realize that subparts of an Lvalue can have a subset of
+// drop-flags associated with them, while this as written will just
+// memcpy the drop_flag_info. But, it is an easier way to get `_match`
+// off the ground to just let this be `Copy` for now.
+#[derive(Copy, Clone, Debug)]
+pub struct Lvalue {
+    pub source: &'static str,
+    pub drop_flag_info: DropFlagInfo
+}
+
+#[derive(Debug)]
+pub struct Rvalue {
+    pub mode: RvalueMode
+}
+
+/// Classifies what action we should take when a value is moved away
+/// with respect to its drop-flag.
+///
+/// Long term there will be no need for this classification: all flags
+/// (which will be stored on the stack frame) will have the same
+/// interpretation and maintenance code associated with them.
+#[derive(Copy, Clone, Debug)]
+pub enum HintKind {
+    /// When the value is moved, set the drop-flag to "dropped"
+    /// (i.e. "zero the flag", even when the specific representation
+    /// is not literally 0) and when it is reinitialized, set the
+    /// drop-flag back to "initialized".
+    ZeroAndMaintain,
+
+    /// When the value is moved, do not set the drop-flag to "dropped"
+    /// However, continue to read the drop-flag in deciding whether to
+    /// drop. (In essence, the path/fragment in question will never
+    /// need to be dropped at the points where it is moved away by
+    /// this code, but we are defending against the scenario where
+    /// some *other* code could move away (or drop) the value and thus
+    /// zero-the-flag, which is why we will still read from it.
+    DontZeroJustUse,
+}
+
+impl Lvalue { // Constructors for various Lvalues.
+    pub fn new<'blk, 'tcx>(source: &'static str) -> Lvalue {
+        debug!("Lvalue at {} no drop flag info", source);
+        Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
+    }
+
+    pub fn new_dropflag_hint(source: &'static str) -> Lvalue {
+        debug!("Lvalue at {} is drop flag hint", source);
+        Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
+    }
+
+    pub fn new_with_hint<'blk, 'tcx>(source: &'static str,
+                                     bcx: Block<'blk, 'tcx>,
+                                     id: ast::NodeId,
+                                     k: HintKind) -> Lvalue {
+        let (opt_id, info) = {
+            let hint_available = Lvalue::has_dropflag_hint(bcx, id) &&
+                bcx.tcx().sess.nonzeroing_move_hints();
+            let info = match k {
+                HintKind::ZeroAndMaintain if hint_available =>
+                    DropFlagInfo::ZeroAndMaintain(id),
+                HintKind::DontZeroJustUse if hint_available =>
+                    DropFlagInfo::DontZeroJustUse(id),
+                _ =>
+                    DropFlagInfo::None,
+            };
+            (Some(id), info)
+        };
+        debug!("Lvalue at {}, id: {:?} info: {:?}", source, opt_id, info);
+        Lvalue { source: source, drop_flag_info: info }
+    }
+} // end Lvalue constructor methods.
+
+impl Lvalue {
+    fn has_dropflag_hint<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                     id: ast::NodeId) -> bool {
+        let hints = bcx.fcx.lldropflag_hints.borrow();
+        hints.has_hint(id)
+    }
+    pub fn dropflag_hint<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
+                                 -> Option<DropHintDatum<'tcx>> {
+        self.drop_flag_info.hint_datum(bcx)
+    }
+}
+
+impl Rvalue {
+    pub fn new(m: RvalueMode) -> Rvalue {
+        Rvalue { mode: m }
+    }
+}
+
+// Make Datum linear for more type safety.
+impl Drop for Rvalue {
+    fn drop(&mut self) { }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub enum RvalueMode {
+    /// `val` is a pointer to the actual value (and thus has type *T)
+    ByRef,
+
+    /// `val` is the actual value (*only used for immediates* like ints, ptrs)
+    ByValue,
+}
+
+pub fn immediate_rvalue<'tcx>(val: ValueRef, ty: Ty<'tcx>) -> Datum<'tcx, Rvalue> {
+    return Datum::new(val, ty, Rvalue::new(ByValue));
+}
+
+pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                        val: ValueRef,
+                                        ty: Ty<'tcx>)
+                                        -> DatumBlock<'blk, 'tcx, Rvalue> {
+    return DatumBlock::new(bcx, immediate_rvalue(val, ty))
+}
+
+/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
+/// it. The memory will be dropped upon exit from `scope`. The callback `populate` should
+/// initialize the memory.
+///
+/// The flag `zero` indicates how the temporary space itself should be
+/// initialized at the outset of the function; the only time that
+/// `InitAlloca::Uninit` is a valid value for `zero` is when the
+/// caller can prove that either (1.) the code injected by `populate`
+/// onto `bcx` always dominates the end of `scope`, or (2.) the data
+/// being allocated has no associated destructor.
+pub fn lvalue_scratch_datum<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
+                                           ty: Ty<'tcx>,
+                                           name: &str,
+                                           zero: InitAlloca,
+                                           scope: cleanup::ScopeId,
+                                           populate: F)
+                                           -> DatumBlock<'blk, 'tcx, Lvalue> where
+    F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
+{
+    // Very subtle: potentially initialize the scratch memory at point where it is alloca'ed.
+    // (See discussion at Issue 30530.)
+    let scratch = alloc_ty_init(bcx, ty, zero, name);
+    debug!("lvalue_scratch_datum scope={:?} scratch={:?} ty={:?}",
+           scope, Value(scratch), ty);
+
+    // Subtle. Populate the scratch memory *before* scheduling cleanup.
+    let bcx = populate(bcx, scratch);
+    bcx.fcx.schedule_drop_mem(scope, scratch, ty, None);
+
+    DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum")))
+}
+
+/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
+/// it.  If `zero` is true, the space will be zeroed when it is allocated; this is normally not
+/// necessary, but in the case of automatic rooting in match statements it is possible to have
+/// temporaries that may not get initialized if a certain arm is not taken, so we must zero them.
+/// You must arrange any cleanups etc yourself!
+pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                        ty: Ty<'tcx>,
+                                        name: &str)
+                                        -> Datum<'tcx, Rvalue> {
+    let scratch = alloc_ty(bcx, ty, name);
+    call_lifetime_start(bcx, scratch);
+    Datum::new(scratch, ty, Rvalue::new(ByRef))
+}
+
+/// Indicates the "appropriate" mode for this value, which is either by ref or by value, depending
+/// on whether type is immediate or not.
+pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                         ty: Ty<'tcx>) -> RvalueMode {
+    if type_is_immediate(ccx, ty) {
+        ByValue
+    } else {
+        ByRef
+    }
+}
+
+fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode,
+                              fcx: &FunctionContext<'a, 'tcx>,
+                              scope: cleanup::ScopeId,
+                              val: ValueRef,
+                              ty: Ty<'tcx>) {
+    debug!("add_rvalue_clean scope={:?} val={:?} ty={:?}",
+           scope, Value(val), ty);
+    match mode {
+        ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
+        ByRef => {
+            fcx.schedule_lifetime_end(scope, val);
+            fcx.schedule_drop_mem(scope, val, ty, None);
+        }
+    }
+}
+
+pub trait KindOps {
+
+    /// Take appropriate action after the value in `datum` has been
+    /// stored to a new location.
+    fn post_store<'blk, 'tcx>(&self,
+                              bcx: Block<'blk, 'tcx>,
+                              val: ValueRef,
+                              ty: Ty<'tcx>)
+                              -> Block<'blk, 'tcx>;
+
+    /// True if this mode is a reference mode, meaning that the datum's
+    /// val field is a pointer to the actual value
+    fn is_by_ref(&self) -> bool;
+
+    /// Converts to an Expr kind
+    fn to_expr_kind(self) -> Expr;
+
+}
+
+impl KindOps for Rvalue {
+    fn post_store<'blk, 'tcx>(&self,
+                              bcx: Block<'blk, 'tcx>,
+                              _val: ValueRef,
+                              _ty: Ty<'tcx>)
+                              -> Block<'blk, 'tcx> {
+        // No cleanup is scheduled for an rvalue, so we don't have
+        // to do anything after a move to cancel or duplicate it.
+        if self.is_by_ref() {
+            call_lifetime_end(bcx, _val);
+        }
+        bcx
+    }
+
+    fn is_by_ref(&self) -> bool {
+        self.mode == ByRef
+    }
+
+    fn to_expr_kind(self) -> Expr {
+        RvalueExpr(self)
+    }
+}
+
+impl KindOps for Lvalue {
+    /// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel
+    /// cleanup. If an @T lvalue is copied, we must increment the reference count.
+    fn post_store<'blk, 'tcx>(&self,
+                              bcx: Block<'blk, 'tcx>,
+                              val: ValueRef,
+                              ty: Ty<'tcx>)
+                              -> Block<'blk, 'tcx> {
+        let _icx = push_ctxt("<Lvalue as KindOps>::post_store");
+        if bcx.fcx.type_needs_drop(ty) {
+            // cancel cleanup of affine values:
+            // 1. if it has drop-hint, mark as moved; then code
+            //    aware of drop-hint won't bother calling the
+            //    drop-glue itself.
+            if let Some(hint_datum) = self.drop_flag_info.hint_datum(bcx) {
+                let moved_hint_byte = adt::DTOR_MOVED_HINT;
+                let hint_llval = hint_datum.to_value().value();
+                Store(bcx, C_u8(bcx.fcx.ccx, moved_hint_byte), hint_llval);
+            }
+            // 2. if the drop info says its necessary, drop-fill the memory.
+            if self.drop_flag_info.must_zero() {
+                let () = drop_done_fill_mem(bcx, val, ty);
+            }
+            bcx
+        } else {
+            // FIXME (#5016) would be nice to assert this, but we have
+            // to allow for e.g. DontZeroJustUse flags, for now.
+            //
+            // (The dropflag hint construction should be taking
+            // !type_needs_drop into account; earlier analysis phases
+            // may not have all the info they need to include such
+            // information properly, I think; in particular the
+            // fragments analysis works on a non-monomorphized view of
+            // the code.)
+            //
+            // assert_eq!(self.drop_flag_info, DropFlagInfo::None);
+            bcx
+        }
+    }
+
+    fn is_by_ref(&self) -> bool {
+        true
+    }
+
+    fn to_expr_kind(self) -> Expr {
+        LvalueExpr(self)
+    }
+}
+
+impl KindOps for Expr {
+    fn post_store<'blk, 'tcx>(&self,
+                              bcx: Block<'blk, 'tcx>,
+                              val: ValueRef,
+                              ty: Ty<'tcx>)
+                              -> Block<'blk, 'tcx> {
+        match *self {
+            LvalueExpr(ref l) => l.post_store(bcx, val, ty),
+            RvalueExpr(ref r) => r.post_store(bcx, val, ty),
+        }
+    }
+
+    fn is_by_ref(&self) -> bool {
+        match *self {
+            LvalueExpr(ref l) => l.is_by_ref(),
+            RvalueExpr(ref r) => r.is_by_ref()
+        }
+    }
+
+    fn to_expr_kind(self) -> Expr {
+        self
+    }
+}
+
+impl<'tcx> Datum<'tcx, Rvalue> {
+    /// Schedules a cleanup for this datum in the given scope. That means that this datum is no
+    /// longer an rvalue datum; hence, this function consumes the datum and returns the contained
+    /// ValueRef.
+    pub fn add_clean<'a>(self,
+                         fcx: &FunctionContext<'a, 'tcx>,
+                         scope: cleanup::ScopeId)
+                         -> ValueRef {
+        add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty);
+        self.val
+    }
+
+    /// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not
+    /// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`.
+    pub fn to_lvalue_datum_in_scope<'blk>(self,
+                                          bcx: Block<'blk, 'tcx>,
+                                          name: &str,
+                                          scope: cleanup::ScopeId)
+                                          -> DatumBlock<'blk, 'tcx, Lvalue> {
+        let fcx = bcx.fcx;
+
+        match self.kind.mode {
+            ByRef => {
+                add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty);
+                DatumBlock::new(bcx, Datum::new(
+                    self.val,
+                    self.ty,
+                    Lvalue::new("datum::to_lvalue_datum_in_scope")))
+            }
+
+            ByValue => {
+                lvalue_scratch_datum(
+                    bcx, self.ty, name, InitAlloca::Dropped, scope,
+                    |bcx, llval| {
+                        debug!("populate call for Datum::to_lvalue_datum_in_scope \
+                                self.ty={:?}", self.ty);
+                        // do not call_lifetime_start here; the
+                        // `InitAlloc::Dropped` will start scratch
+                        // value's lifetime at open of function body.
+                        let bcx = self.store_to(bcx, llval);
+                        bcx.fcx.schedule_lifetime_end(scope, llval);
+                        bcx
+                    })
+            }
+        }
+    }
+
+    pub fn to_ref_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
+                              -> DatumBlock<'blk, 'tcx, Rvalue> {
+        let mut bcx = bcx;
+        match self.kind.mode {
+            ByRef => DatumBlock::new(bcx, self),
+            ByValue => {
+                let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref");
+                bcx = self.store_to(bcx, scratch.val);
+                DatumBlock::new(bcx, scratch)
+            }
+        }
+    }
+
+    pub fn to_appropriate_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
+                                      -> DatumBlock<'blk, 'tcx, Rvalue> {
+        match self.appropriate_rvalue_mode(bcx.ccx()) {
+            ByRef => {
+                self.to_ref_datum(bcx)
+            }
+            ByValue => {
+                match self.kind.mode {
+                    ByValue => DatumBlock::new(bcx, self),
+                    ByRef => {
+                        let llval = load_ty(bcx, self.val, self.ty);
+                        call_lifetime_end(bcx, self.val);
+                        DatumBlock::new(bcx, Datum::new(llval, self.ty, Rvalue::new(ByValue)))
+                    }
+                }
+            }
+        }
+    }
+}
+
+/// Methods suitable for "expr" datums that could be either lvalues or
+/// rvalues. These include coercions into lvalues/rvalues but also a number
+/// of more general operations. (Some of those operations could be moved to
+/// the more general `impl<K> Datum<K>`, but it's convenient to have them
+/// here since we can `match self.kind` rather than having to implement
+/// generic methods in `KindOps`.)
+impl<'tcx> Datum<'tcx, Expr> {
+    fn match_kind<R, F, G>(self, if_lvalue: F, if_rvalue: G) -> R where
+        F: FnOnce(Datum<'tcx, Lvalue>) -> R,
+        G: FnOnce(Datum<'tcx, Rvalue>) -> R,
+    {
+        let Datum { val, ty, kind } = self;
+        match kind {
+            LvalueExpr(l) => if_lvalue(Datum::new(val, ty, l)),
+            RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)),
+        }
+    }
+
+    /// Asserts that this datum *is* an lvalue and returns it.
+    #[allow(dead_code)] // potentially useful
+    pub fn assert_lvalue(self) -> Datum<'tcx, Lvalue> {
+        self.match_kind(
+            |d| d,
+            |_| bug!("assert_lvalue given rvalue"))
+    }
+
+    pub fn store_to_dest<'blk>(self,
+                               bcx: Block<'blk, 'tcx>,
+                               dest: expr::Dest,
+                               expr_id: ast::NodeId)
+                               -> Block<'blk, 'tcx> {
+        match dest {
+            expr::Ignore => {
+                self.add_clean_if_rvalue(bcx, expr_id);
+                bcx
+            }
+            expr::SaveIn(addr) => {
+                self.store_to(bcx, addr)
+            }
+        }
+    }
+
+    /// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value
+    /// that may need drop.
+    pub fn add_clean_if_rvalue<'blk>(self,
+                                     bcx: Block<'blk, 'tcx>,
+                                     expr_id: ast::NodeId) {
+        self.match_kind(
+            |_| { /* Nothing to do, cleanup already arranged */ },
+            |r| {
+                let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
+                r.add_clean(bcx.fcx, scope);
+            })
+    }
+
+    pub fn to_lvalue_datum<'blk>(self,
+                                 bcx: Block<'blk, 'tcx>,
+                                 name: &str,
+                                 expr_id: ast::NodeId)
+                                 -> DatumBlock<'blk, 'tcx, Lvalue> {
+        debug!("to_lvalue_datum self: {:?}", self);
+
+        self.match_kind(
+            |l| DatumBlock::new(bcx, l),
+            |r| {
+                let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
+                r.to_lvalue_datum_in_scope(bcx, name, scope)
+            })
+    }
+
+    /// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled).
+    pub fn to_rvalue_datum<'blk>(self,
+                                 bcx: Block<'blk, 'tcx>,
+                                 name: &'static str)
+                                 -> DatumBlock<'blk, 'tcx, Rvalue> {
+        self.match_kind(
+            |l| {
+                let mut bcx = bcx;
+                match l.appropriate_rvalue_mode(bcx.ccx()) {
+                    ByRef => {
+                        let scratch = rvalue_scratch_datum(bcx, l.ty, name);
+                        bcx = l.store_to(bcx, scratch.val);
+                        DatumBlock::new(bcx, scratch)
+                    }
+                    ByValue => {
+                        let v = load_ty(bcx, l.val, l.ty);
+                        bcx = l.kind.post_store(bcx, l.val, l.ty);
+                        DatumBlock::new(bcx, Datum::new(v, l.ty, Rvalue::new(ByValue)))
+                    }
+                }
+            },
+            |r| DatumBlock::new(bcx, r))
+    }
+
+}
+
+/// Methods suitable only for lvalues. These include the various
+/// operations to extract components out of compound data structures,
+/// such as extracting the field from a struct or a particular element
+/// from an array.
+impl<'tcx> Datum<'tcx, Lvalue> {
+    /// Converts a datum into a by-ref value. The datum type must be one which is always passed by
+    /// reference.
+    pub fn to_llref(self) -> ValueRef {
+        self.val
+    }
+
+    // Extracts a component of a compound data structure (e.g., a field from a
+    // struct). Note that if self is an opened, unsized type then the returned
+    // datum may also be unsized _without the size information_. It is the
+    // callers responsibility to package the result in some way to make a valid
+    // datum in that case (e.g., by making a fat pointer or opened pair).
+    pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>,
+                                gep: F)
+                                -> Datum<'tcx, Lvalue> where
+        F: FnOnce(adt::MaybeSizedValue) -> ValueRef,
+    {
+        let val = if type_is_sized(bcx.tcx(), self.ty) {
+            let val = adt::MaybeSizedValue::sized(self.val);
+            gep(val)
+        } else {
+            let val = adt::MaybeSizedValue::unsized_(
+                Load(bcx, expr::get_dataptr(bcx, self.val)),
+                Load(bcx, expr::get_meta(bcx, self.val)));
+            gep(val)
+        };
+        Datum {
+            val: val,
+            kind: Lvalue::new("Datum::get_element"),
+            ty: ty,
+        }
+    }
+
+    pub fn get_vec_base_and_len<'blk>(&self, bcx: Block<'blk, 'tcx>)
+                                      -> (ValueRef, ValueRef) {
+        //! Converts a vector into the slice pair.
+
+        tvec::get_base_and_len(bcx, self.val, self.ty)
+    }
+}
+
+/// Generic methods applicable to any sort of datum.
+impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> {
+    pub fn new(val: ValueRef, ty: Ty<'tcx>, kind: K) -> Datum<'tcx, K> {
+        Datum { val: val, ty: ty, kind: kind }
+    }
+
+    pub fn to_expr_datum(self) -> Datum<'tcx, Expr> {
+        let Datum { val, ty, kind } = self;
+        Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
+    }
+
+    /// Moves or copies this value into a new home, as appropriate depending on the type of the
+    /// datum. This method consumes the datum, since it would be incorrect to go on using the datum
+    /// if the value represented is affine (and hence the value is moved).
+    pub fn store_to<'blk>(self,
+                          bcx: Block<'blk, 'tcx>,
+                          dst: ValueRef)
+                          -> Block<'blk, 'tcx> {
+        self.shallow_copy_raw(bcx, dst);
+
+        self.kind.post_store(bcx, self.val, self.ty)
+    }
+
+    /// Helper function that performs a shallow copy of this value into `dst`, which should be a
+    /// pointer to a memory location suitable for `self.ty`. `dst` should contain uninitialized
+    /// memory (either newly allocated, zeroed, or dropped).
+    ///
+    /// This function is private to datums because it leaves memory in an unstable state, where the
+    /// source value has been copied but not zeroed. Public methods are `store_to` (if you no
+    /// longer need the source value) or `shallow_copy` (if you wish the source value to remain
+    /// valid).
+    fn shallow_copy_raw<'blk>(&self,
+                              bcx: Block<'blk, 'tcx>,
+                              dst: ValueRef)
+                              -> Block<'blk, 'tcx> {
+        let _icx = push_ctxt("copy_to_no_check");
+
+        if type_is_zero_size(bcx.ccx(), self.ty) {
+            return bcx;
+        }
+
+        if self.kind.is_by_ref() {
+            memcpy_ty(bcx, dst, self.val, self.ty);
+        } else {
+            store_ty(bcx, self.val, dst, self.ty);
+        }
+
+        return bcx;
+    }
+
+    /// Copies the value into a new location. This function always preserves the existing datum as
+    /// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine
+    /// values (since they must never be duplicated).
+    pub fn shallow_copy<'blk>(&self,
+                              bcx: Block<'blk, 'tcx>,
+                              dst: ValueRef)
+                              -> Block<'blk, 'tcx> {
+        /*!
+         * Copies the value into a new location. This function always
+         * preserves the existing datum as a valid value. Therefore,
+         * it does not consume `self` and, also, cannot be applied to
+         * affine values (since they must never be duplicated).
+         */
+
+        assert!(!self.ty
+                     .moves_by_default(&bcx.tcx().empty_parameter_environment(), DUMMY_SP));
+        self.shallow_copy_raw(bcx, dst)
+    }
+
+    /// See the `appropriate_rvalue_mode()` function
+    pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>)
+                                       -> RvalueMode {
+        appropriate_rvalue_mode(ccx, self.ty)
+    }
+
+    /// Converts `self` into a by-value `ValueRef`. Consumes this datum (i.e., absolves you of
+    /// responsibility to cleanup the value). For this to work, the value must be something
+    /// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is
+    /// naturally passed around by value, and not by reference.
+    pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
+        assert!(!bcx.fcx.type_needs_drop(self.ty));
+        assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue);
+        if self.kind.is_by_ref() {
+            load_ty(bcx, self.val, self.ty)
+        } else {
+            self.val
+        }
+    }
+
+    pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
+        assert!(self.ty.is_bool());
+        self.to_llscalarish(bcx)
+    }
+}
+
+impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> {
+    pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, K>)
+               -> DatumBlock<'blk, 'tcx, K> {
+        DatumBlock { bcx: bcx, datum: datum }
+    }
+}
+
+impl<'blk, 'tcx, K: KindOps + fmt::Debug> DatumBlock<'blk, 'tcx, K> {
+    pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> {
+        DatumBlock::new(self.bcx, self.datum.to_expr_datum())
+    }
+}
+
+impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> {
+    pub fn store_to_dest(self,
+                         dest: expr::Dest,
+                         expr_id: ast::NodeId) -> Block<'blk, 'tcx> {
+        let DatumBlock { bcx, datum } = self;
+        datum.store_to_dest(bcx, dest, expr_id)
+    }
+
+    pub fn to_llbool(self) -> Result<'blk, 'tcx> {
+        let DatumBlock { datum, bcx } = self;
+        Result::new(bcx, datum.to_llbool(bcx))
+    }
+}
diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs
new file mode 100644 (file)
index 0000000..b1cfeb8
--- /dev/null
@@ -0,0 +1,480 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::metadata::file_metadata;
+use super::utils::DIB;
+
+use llvm;
+use llvm::debuginfo::{DIScope, DISubprogram};
+use common::CrateContext;
+use rustc::hir::pat_util;
+use rustc::util::nodemap::NodeMap;
+
+use libc::c_uint;
+use syntax::codemap::{Span, Pos};
+use syntax::{ast, codemap};
+
+use rustc::hir::{self, PatKind};
+
+// This procedure builds the *scope map* for a given function, which maps any
+// given ast::NodeId in the function's AST to the correct DIScope metadata instance.
+//
+// This builder procedure walks the AST in execution order and keeps track of
+// what belongs to which scope, creating DIScope DIEs along the way, and
+// introducing *artificial* lexical scope descriptors where necessary. These
+// artificial scopes allow GDB to correctly handle name shadowing.
+pub fn create_scope_map(cx: &CrateContext,
+                        args: &[hir::Arg],
+                        fn_entry_block: &hir::Block,
+                        fn_metadata: DISubprogram,
+                        fn_ast_id: ast::NodeId)
+                        -> NodeMap<DIScope> {
+    let mut scope_map = NodeMap();
+
+    let def_map = &cx.tcx().def_map;
+
+    let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None });
+    scope_map.insert(fn_ast_id, fn_metadata);
+
+    // Push argument identifiers onto the stack so arguments integrate nicely
+    // with variable shadowing.
+    for arg in args {
+        pat_util::pat_bindings_ident(def_map, &arg.pat, |_, node_id, _, path1| {
+            scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata,
+                                               name: Some(path1.node.unhygienic_name) });
+            scope_map.insert(node_id, fn_metadata);
+        })
+    }
+
+    // Clang creates a separate scope for function bodies, so let's do this too.
+    with_new_scope(cx,
+                   fn_entry_block.span,
+                   &mut scope_stack,
+                   &mut scope_map,
+                   |cx, scope_stack, scope_map| {
+        walk_block(cx, fn_entry_block, scope_stack, scope_map);
+    });
+
+    return scope_map;
+}
+
+// local helper functions for walking the AST.
+fn with_new_scope<F>(cx: &CrateContext,
+                     scope_span: Span,
+                     scope_stack: &mut Vec<ScopeStackEntry> ,
+                     scope_map: &mut NodeMap<DIScope>,
+                     inner_walk: F) where
+    F: FnOnce(&CrateContext, &mut Vec<ScopeStackEntry>, &mut NodeMap<DIScope>),
+{
+    // Create a new lexical scope and push it onto the stack
+    let loc = cx.sess().codemap().lookup_char_pos(scope_span.lo);
+    let file_metadata = file_metadata(cx, &loc.file.name);
+    let parent_scope = scope_stack.last().unwrap().scope_metadata;
+
+    let scope_metadata = unsafe {
+        llvm::LLVMDIBuilderCreateLexicalBlock(
+            DIB(cx),
+            parent_scope,
+            file_metadata,
+            loc.line as c_uint,
+            loc.col.to_usize() as c_uint)
+    };
+
+    scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None });
+
+    inner_walk(cx, scope_stack, scope_map);
+
+    // pop artificial scopes
+    while scope_stack.last().unwrap().name.is_some() {
+        scope_stack.pop();
+    }
+
+    if scope_stack.last().unwrap().scope_metadata != scope_metadata {
+        span_bug!(scope_span, "debuginfo: Inconsistency in scope management.");
+    }
+
+    scope_stack.pop();
+}
+
+struct ScopeStackEntry {
+    scope_metadata: DIScope,
+    name: Option<ast::Name>
+}
+
+fn walk_block(cx: &CrateContext,
+              block: &hir::Block,
+              scope_stack: &mut Vec<ScopeStackEntry> ,
+              scope_map: &mut NodeMap<DIScope>) {
+    scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata);
+
+    // The interesting things here are statements and the concluding expression.
+    for statement in &block.stmts {
+        scope_map.insert(statement.node.id(),
+                         scope_stack.last().unwrap().scope_metadata);
+
+        match statement.node {
+            hir::StmtDecl(ref decl, _) =>
+                walk_decl(cx, &decl, scope_stack, scope_map),
+            hir::StmtExpr(ref exp, _) |
+            hir::StmtSemi(ref exp, _) =>
+                walk_expr(cx, &exp, scope_stack, scope_map),
+        }
+    }
+
+    if let Some(ref exp) = block.expr {
+        walk_expr(cx, &exp, scope_stack, scope_map);
+    }
+}
+
+fn walk_decl(cx: &CrateContext,
+             decl: &hir::Decl,
+             scope_stack: &mut Vec<ScopeStackEntry> ,
+             scope_map: &mut NodeMap<DIScope>) {
+    match *decl {
+        codemap::Spanned { node: hir::DeclLocal(ref local), .. } => {
+            scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata);
+
+            walk_pattern(cx, &local.pat, scope_stack, scope_map);
+
+            if let Some(ref exp) = local.init {
+                walk_expr(cx, &exp, scope_stack, scope_map);
+            }
+        }
+        _ => ()
+    }
+}
+
+fn walk_pattern(cx: &CrateContext,
+                pat: &hir::Pat,
+                scope_stack: &mut Vec<ScopeStackEntry> ,
+                scope_map: &mut NodeMap<DIScope>) {
+
+    let def_map = &cx.tcx().def_map;
+
+    // Unfortunately, we cannot just use pat_util::pat_bindings() or
+    // ast_util::walk_pat() here because we have to visit *all* nodes in
+    // order to put them into the scope map. The above functions don't do that.
+    match pat.node {
+        PatKind::Ident(_, ref path1, ref sub_pat_opt) => {
+
+            // Check if this is a binding. If so we need to put it on the
+            // scope stack and maybe introduce an artificial scope
+            if pat_util::pat_is_binding(&def_map.borrow(), &pat) {
+
+                let name = path1.node.unhygienic_name;
+
+                // LLVM does not properly generate 'DW_AT_start_scope' fields
+                // for variable DIEs. For this reason we have to introduce
+                // an artificial scope at bindings whenever a variable with
+                // the same name is declared in *any* parent scope.
+                //
+                // Otherwise the following error occurs:
+                //
+                // let x = 10;
+                //
+                // do_something(); // 'gdb print x' correctly prints 10
+                //
+                // {
+                //     do_something(); // 'gdb print x' prints 0, because it
+                //                     // already reads the uninitialized 'x'
+                //                     // from the next line...
+                //     let x = 100;
+                //     do_something(); // 'gdb print x' correctly prints 100
+                // }
+
+                // Is there already a binding with that name?
+                // N.B.: this comparison must be UNhygienic... because
+                // gdb knows nothing about the context, so any two
+                // variables with the same name will cause the problem.
+                let need_new_scope = scope_stack
+                    .iter()
+                    .any(|entry| entry.name == Some(name));
+
+                if need_new_scope {
+                    // Create a new lexical scope and push it onto the stack
+                    let loc = cx.sess().codemap().lookup_char_pos(pat.span.lo);
+                    let file_metadata = file_metadata(cx, &loc.file.name);
+                    let parent_scope = scope_stack.last().unwrap().scope_metadata;
+
+                    let scope_metadata = unsafe {
+                        llvm::LLVMDIBuilderCreateLexicalBlock(
+                            DIB(cx),
+                            parent_scope,
+                            file_metadata,
+                            loc.line as c_uint,
+                            loc.col.to_usize() as c_uint)
+                    };
+
+                    scope_stack.push(ScopeStackEntry {
+                        scope_metadata: scope_metadata,
+                        name: Some(name)
+                    });
+
+                } else {
+                    // Push a new entry anyway so the name can be found
+                    let prev_metadata = scope_stack.last().unwrap().scope_metadata;
+                    scope_stack.push(ScopeStackEntry {
+                        scope_metadata: prev_metadata,
+                        name: Some(name)
+                    });
+                }
+            }
+
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+
+            if let Some(ref sub_pat) = *sub_pat_opt {
+                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
+            }
+        }
+
+        PatKind::Wild => {
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+        }
+
+        PatKind::TupleStruct(_, ref sub_pats_opt) => {
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+
+            if let Some(ref sub_pats) = *sub_pats_opt {
+                for p in sub_pats {
+                    walk_pattern(cx, &p, scope_stack, scope_map);
+                }
+            }
+        }
+
+        PatKind::Path(..) | PatKind::QPath(..) => {
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+        }
+
+        PatKind::Struct(_, ref field_pats, _) => {
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+
+            for &codemap::Spanned {
+                node: hir::FieldPat { pat: ref sub_pat, .. },
+                ..
+            } in field_pats {
+                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
+            }
+        }
+
+        PatKind::Tup(ref sub_pats) => {
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+
+            for sub_pat in sub_pats {
+                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
+            }
+        }
+
+        PatKind::Box(ref sub_pat) | PatKind::Ref(ref sub_pat, _) => {
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+            walk_pattern(cx, &sub_pat, scope_stack, scope_map);
+        }
+
+        PatKind::Lit(ref exp) => {
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+            walk_expr(cx, &exp, scope_stack, scope_map);
+        }
+
+        PatKind::Range(ref exp1, ref exp2) => {
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+            walk_expr(cx, &exp1, scope_stack, scope_map);
+            walk_expr(cx, &exp2, scope_stack, scope_map);
+        }
+
+        PatKind::Vec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
+            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
+
+            for sub_pat in front_sub_pats {
+                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
+            }
+
+            if let Some(ref sub_pat) = *middle_sub_pats {
+                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
+            }
+
+            for sub_pat in back_sub_pats {
+                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
+            }
+        }
+    }
+}
+
+fn walk_expr(cx: &CrateContext,
+             exp: &hir::Expr,
+             scope_stack: &mut Vec<ScopeStackEntry> ,
+             scope_map: &mut NodeMap<DIScope>) {
+
+    scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata);
+
+    match exp.node {
+        hir::ExprLit(_)   |
+        hir::ExprBreak(_) |
+        hir::ExprAgain(_) |
+        hir::ExprPath(..) => {}
+
+        hir::ExprCast(ref sub_exp, _)     |
+        hir::ExprType(ref sub_exp, _) |
+        hir::ExprAddrOf(_, ref sub_exp)  |
+        hir::ExprField(ref sub_exp, _) |
+        hir::ExprTupField(ref sub_exp, _) =>
+            walk_expr(cx, &sub_exp, scope_stack, scope_map),
+
+        hir::ExprBox(ref sub_expr) => {
+            walk_expr(cx, &sub_expr, scope_stack, scope_map);
+        }
+
+        hir::ExprRet(ref exp_opt) => match *exp_opt {
+            Some(ref sub_exp) => walk_expr(cx, &sub_exp, scope_stack, scope_map),
+            None => ()
+        },
+
+        hir::ExprUnary(_, ref sub_exp) => {
+            walk_expr(cx, &sub_exp, scope_stack, scope_map);
+        }
+
+        hir::ExprAssignOp(_, ref lhs, ref rhs) |
+        hir::ExprIndex(ref lhs, ref rhs) |
+        hir::ExprBinary(_, ref lhs, ref rhs)    => {
+            walk_expr(cx, &lhs, scope_stack, scope_map);
+            walk_expr(cx, &rhs, scope_stack, scope_map);
+        }
+
+        hir::ExprVec(ref init_expressions) |
+        hir::ExprTup(ref init_expressions) => {
+            for ie in init_expressions {
+                walk_expr(cx, &ie, scope_stack, scope_map);
+            }
+        }
+
+        hir::ExprAssign(ref sub_exp1, ref sub_exp2) |
+        hir::ExprRepeat(ref sub_exp1, ref sub_exp2) => {
+            walk_expr(cx, &sub_exp1, scope_stack, scope_map);
+            walk_expr(cx, &sub_exp2, scope_stack, scope_map);
+        }
+
+        hir::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => {
+            walk_expr(cx, &cond_exp, scope_stack, scope_map);
+
+            with_new_scope(cx,
+                           then_block.span,
+                           scope_stack,
+                           scope_map,
+                           |cx, scope_stack, scope_map| {
+                walk_block(cx, &then_block, scope_stack, scope_map);
+            });
+
+            match *opt_else_exp {
+                Some(ref else_exp) =>
+                    walk_expr(cx, &else_exp, scope_stack, scope_map),
+                _ => ()
+            }
+        }
+
+        hir::ExprWhile(ref cond_exp, ref loop_body, _) => {
+            walk_expr(cx, &cond_exp, scope_stack, scope_map);
+
+            with_new_scope(cx,
+                           loop_body.span,
+                           scope_stack,
+                           scope_map,
+                           |cx, scope_stack, scope_map| {
+                walk_block(cx, &loop_body, scope_stack, scope_map);
+            })
+        }
+
+        hir::ExprLoop(ref block, _) |
+        hir::ExprBlock(ref block)   => {
+            with_new_scope(cx,
+                           block.span,
+                           scope_stack,
+                           scope_map,
+                           |cx, scope_stack, scope_map| {
+                walk_block(cx, &block, scope_stack, scope_map);
+            })
+        }
+
+        hir::ExprClosure(_, ref decl, ref block) => {
+            with_new_scope(cx,
+                           block.span,
+                           scope_stack,
+                           scope_map,
+                           |cx, scope_stack, scope_map| {
+                for &hir::Arg { pat: ref pattern, .. } in &decl.inputs {
+                    walk_pattern(cx, &pattern, scope_stack, scope_map);
+                }
+
+                walk_block(cx, &block, scope_stack, scope_map);
+            })
+        }
+
+        hir::ExprCall(ref fn_exp, ref args) => {
+            walk_expr(cx, &fn_exp, scope_stack, scope_map);
+
+            for arg_exp in args {
+                walk_expr(cx, &arg_exp, scope_stack, scope_map);
+            }
+        }
+
+        hir::ExprMethodCall(_, _, ref args) => {
+            for arg_exp in args {
+                walk_expr(cx, &arg_exp, scope_stack, scope_map);
+            }
+        }
+
+        hir::ExprMatch(ref discriminant_exp, ref arms, _) => {
+            walk_expr(cx, &discriminant_exp, scope_stack, scope_map);
+
+            // For each arm we have to first walk the pattern as these might
+            // introduce new artificial scopes. It should be sufficient to
+            // walk only one pattern per arm, as they all must contain the
+            // same binding names.
+
+            for arm_ref in arms {
+                let arm_span = arm_ref.pats[0].span;
+
+                with_new_scope(cx,
+                               arm_span,
+                               scope_stack,
+                               scope_map,
+                               |cx, scope_stack, scope_map| {
+                    for pat in &arm_ref.pats {
+                        walk_pattern(cx, &pat, scope_stack, scope_map);
+                    }
+
+                    if let Some(ref guard_exp) = arm_ref.guard {
+                        walk_expr(cx, &guard_exp, scope_stack, scope_map)
+                    }
+
+                    walk_expr(cx, &arm_ref.body, scope_stack, scope_map);
+                })
+            }
+        }
+
+        hir::ExprStruct(_, ref fields, ref base_exp) => {
+            for &hir::Field { expr: ref exp, .. } in fields {
+                walk_expr(cx, &exp, scope_stack, scope_map);
+            }
+
+            match *base_exp {
+                Some(ref exp) => walk_expr(cx, &exp, scope_stack, scope_map),
+                None => ()
+            }
+        }
+
+        hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
+            for output in outputs {
+                walk_expr(cx, output, scope_stack, scope_map);
+            }
+
+            for input in inputs {
+                walk_expr(cx, input, scope_stack, scope_map);
+            }
+        }
+    }
+}
diff --git a/src/librustc_trans/debuginfo/doc.rs b/src/librustc_trans/debuginfo/doc.rs
new file mode 100644 (file)
index 0000000..bcf5eb9
--- /dev/null
@@ -0,0 +1,189 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! # Debug Info Module
+//!
+//! This module serves the purpose of generating debug symbols. We use LLVM's
+//! [source level debugging](http://!llvm.org/docs/SourceLevelDebugging.html)
+//! features for generating the debug information. The general principle is
+//! this:
+//!
+//! Given the right metadata in the LLVM IR, the LLVM code generator is able to
+//! create DWARF debug symbols for the given code. The
+//! [metadata](http://!llvm.org/docs/LangRef.html#metadata-type) is structured
+//! much like DWARF *debugging information entries* (DIE), representing type
+//! information such as datatype layout, function signatures, block layout,
+//! variable location and scope information, etc. It is the purpose of this
+//! module to generate correct metadata and insert it into the LLVM IR.
+//!
+//! As the exact format of metadata trees may change between different LLVM
+//! versions, we now use LLVM
+//! [DIBuilder](http://!llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html)
+//! to create metadata where possible. This will hopefully ease the adaption of
+//! this module to future LLVM versions.
+//!
+//! The public API of the module is a set of functions that will insert the
+//! correct metadata into the LLVM IR when called with the right parameters.
+//! The module is thus driven from an outside client with functions like
+//! `debuginfo::create_local_var_metadata(bcx: block, local: &ast::local)`.
+//!
+//! Internally the module will try to reuse already created metadata by
+//! utilizing a cache. The way to get a shared metadata node when needed is
+//! thus to just call the corresponding function in this module:
+//!
+//!     let file_metadata = file_metadata(crate_context, path);
+//!
+//! The function will take care of probing the cache for an existing node for
+//! that exact file path.
+//!
+//! All private state used by the module is stored within either the
+//! CrateDebugContext struct (owned by the CrateContext) or the
+//! FunctionDebugContext (owned by the FunctionContext).
+//!
+//! This file consists of three conceptual sections:
+//! 1. The public interface of the module
+//! 2. Module-internal metadata creation functions
+//! 3. Minor utility functions
+//!
+//!
+//! ## Recursive Types
+//!
+//! Some kinds of types, such as structs and enums can be recursive. That means
+//! that the type definition of some type X refers to some other type which in
+//! turn (transitively) refers to X. This introduces cycles into the type
+//! referral graph. A naive algorithm doing an on-demand, depth-first traversal
+//! of this graph when describing types, can get trapped in an endless loop
+//! when it reaches such a cycle.
+//!
+//! For example, the following simple type for a singly-linked list...
+//!
+//! ```
+//! struct List {
+//!     value: i32,
+//!     tail: Option<Box<List>>,
+//! }
+//! ```
+//!
+//! will generate the following callstack with a naive DFS algorithm:
+//!
+//! ```
+//! describe(t = List)
+//!   describe(t = i32)
+//!   describe(t = Option<Box<List>>)
+//!     describe(t = Box<List>)
+//!       describe(t = List) // at the beginning again...
+//!       ...
+//! ```
+//!
+//! To break cycles like these, we use "forward declarations". That is, when
+//! the algorithm encounters a possibly recursive type (any struct or enum), it
+//! immediately creates a type description node and inserts it into the cache
+//! *before* describing the members of the type. This type description is just
+//! a stub (as type members are not described and added to it yet) but it
+//! allows the algorithm to already refer to the type. After the stub is
+//! inserted into the cache, the algorithm continues as before. If it now
+//! encounters a recursive reference, it will hit the cache and does not try to
+//! describe the type anew.
+//!
+//! This behaviour is encapsulated in the 'RecursiveTypeDescription' enum,
+//! which represents a kind of continuation, storing all state needed to
+//! continue traversal at the type members after the type has been registered
+//! with the cache. (This implementation approach might be a tad over-
+//! engineered and may change in the future)
+//!
+//!
+//! ## Source Locations and Line Information
+//!
+//! In addition to data type descriptions the debugging information must also
+//! allow to map machine code locations back to source code locations in order
+//! to be useful. This functionality is also handled in this module. The
+//! following functions allow to control source mappings:
+//!
+//! + set_source_location()
+//! + clear_source_location()
+//! + start_emitting_source_locations()
+//!
+//! `set_source_location()` allows to set the current source location. All IR
+//! instructions created after a call to this function will be linked to the
+//! given source location, until another location is specified with
+//! `set_source_location()` or the source location is cleared with
+//! `clear_source_location()`. In the later case, subsequent IR instruction
+//! will not be linked to any source location. As you can see, this is a
+//! stateful API (mimicking the one in LLVM), so be careful with source
+//! locations set by previous calls. It's probably best to not rely on any
+//! specific state being present at a given point in code.
+//!
+//! One topic that deserves some extra attention is *function prologues*. At
+//! the beginning of a function's machine code there are typically a few
+//! instructions for loading argument values into allocas and checking if
+//! there's enough stack space for the function to execute. This *prologue* is
+//! not visible in the source code and LLVM puts a special PROLOGUE END marker
+//! into the line table at the first non-prologue instruction of the function.
+//! In order to find out where the prologue ends, LLVM looks for the first
+//! instruction in the function body that is linked to a source location. So,
+//! when generating prologue instructions we have to make sure that we don't
+//! emit source location information until the 'real' function body begins. For
+//! this reason, source location emission is disabled by default for any new
+//! function being translated and is only activated after a call to the third
+//! function from the list above, `start_emitting_source_locations()`. This
+//! function should be called right before regularly starting to translate the
+//! top-level block of the given function.
+//!
+//! There is one exception to the above rule: `llvm.dbg.declare` instruction
+//! must be linked to the source location of the variable being declared. For
+//! function parameters these `llvm.dbg.declare` instructions typically occur
+//! in the middle of the prologue, however, they are ignored by LLVM's prologue
+//! detection. The `create_argument_metadata()` and related functions take care
+//! of linking the `llvm.dbg.declare` instructions to the correct source
+//! locations even while source location emission is still disabled, so there
+//! is no need to do anything special with source location handling here.
+//!
+//! ## Unique Type Identification
+//!
+//! In order for link-time optimization to work properly, LLVM needs a unique
+//! type identifier that tells it across compilation units which types are the
+//! same as others. This type identifier is created by
+//! TypeMap::get_unique_type_id_of_type() using the following algorithm:
+//!
+//! (1) Primitive types have their name as ID
+//! (2) Structs, enums and traits have a multipart identifier
+//!
+//!     (1) The first part is the SVH (strict version hash) of the crate they
+//!          wereoriginally defined in
+//!
+//!     (2) The second part is the ast::NodeId of the definition in their
+//!          originalcrate
+//!
+//!     (3) The final part is a concatenation of the type IDs of their concrete
+//!          typearguments if they are generic types.
+//!
+//! (3) Tuple-, pointer and function types are structurally identified, which
+//!     means that they are equivalent if their component types are equivalent
+//!     (i.e. (i32, i32) is the same regardless in which crate it is used).
+//!
+//! This algorithm also provides a stable ID for types that are defined in one
+//! crate but instantiated from metadata within another crate. We just have to
+//! take care to always map crate and node IDs back to the original crate
+//! context.
+//!
+//! As a side-effect these unique type IDs also help to solve a problem arising
+//! from lifetime parameters. Since lifetime parameters are completely omitted
+//! in debuginfo, more than one `Ty` instance may map to the same debuginfo
+//! type metadata, that is, some struct `Struct<'a>` may have N instantiations
+//! with different concrete substitutions for `'a`, and thus there will be N
+//! `Ty` instances for the type `Struct<'a>` even though it is not generic
+//! otherwise. Unfortunately this means that we cannot use `ty::type_id()` as
+//! cheap identifier for type metadata---we have done this in the past, but it
+//! led to unnecessary metadata duplication in the best case and LLVM
+//! assertions in the worst. However, the unique type ID as described above
+//! *can* be used as identifier. Since it is comparatively expensive to
+//! construct, though, `ty::type_id()` is still used additionally as an
+//! optimization for cases where the exact same type has been seen before
+//! (which is most of the time).
diff --git a/src/librustc_trans/debuginfo/gdb.rs b/src/librustc_trans/debuginfo/gdb.rs
new file mode 100644 (file)
index 0000000..cf31285
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// .debug_gdb_scripts binary section.
+
+use llvm;
+
+use common::{C_bytes, CrateContext, C_i32};
+use declare;
+use type_::Type;
+use session::config::NoDebugInfo;
+
+use std::ffi::CString;
+use std::ptr;
+use syntax::attr;
+
+
+/// Inserts a side-effect free instruction sequence that makes sure that the
+/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
+pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext) {
+    if needs_gdb_debug_scripts_section(ccx) {
+        let empty = CString::new("").unwrap();
+        let gdb_debug_scripts_section_global =
+            get_or_insert_gdb_debug_scripts_section_global(ccx);
+        unsafe {
+            // Load just the first byte as that's all that's necessary to force
+            // LLVM to keep around the reference to the global.
+            let indices = [C_i32(ccx, 0), C_i32(ccx, 0)];
+            let element =
+                llvm::LLVMBuildInBoundsGEP(ccx.raw_builder(),
+                                           gdb_debug_scripts_section_global,
+                                           indices.as_ptr(),
+                                           indices.len() as ::libc::c_uint,
+                                           empty.as_ptr());
+            let volative_load_instruction =
+                llvm::LLVMBuildLoad(ccx.raw_builder(),
+                                    element,
+                                    empty.as_ptr());
+            llvm::LLVMSetVolatile(volative_load_instruction, llvm::True);
+            llvm::LLVMSetAlignment(volative_load_instruction, 1);
+        }
+    }
+}
+
+/// Allocates the global variable responsible for the .debug_gdb_scripts binary
+/// section.
+pub fn get_or_insert_gdb_debug_scripts_section_global(ccx: &CrateContext)
+                                                  -> llvm::ValueRef {
+    let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0";
+    let section_var_name = &c_section_var_name[..c_section_var_name.len()-1];
+
+    let section_var = unsafe {
+        llvm::LLVMGetNamedGlobal(ccx.llmod(),
+                                 c_section_var_name.as_ptr() as *const _)
+    };
+
+    if section_var == ptr::null_mut() {
+        let section_name = b".debug_gdb_scripts\0";
+        let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
+
+        unsafe {
+            let llvm_type = Type::array(&Type::i8(ccx),
+                                        section_contents.len() as u64);
+
+            let section_var = declare::define_global(ccx, section_var_name,
+                                                     llvm_type).unwrap_or_else(||{
+                bug!("symbol `{}` is already defined", section_var_name)
+            });
+            llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _);
+            llvm::LLVMSetInitializer(section_var, C_bytes(ccx, section_contents));
+            llvm::LLVMSetGlobalConstant(section_var, llvm::True);
+            llvm::LLVMSetUnnamedAddr(section_var, llvm::True);
+            llvm::SetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
+            // This should make sure that the whole section is not larger than
+            // the string it contains. Otherwise we get a warning from GDB.
+            llvm::LLVMSetAlignment(section_var, 1);
+            section_var
+        }
+    } else {
+        section_var
+    }
+}
+
+pub fn needs_gdb_debug_scripts_section(ccx: &CrateContext) -> bool {
+    let omit_gdb_pretty_printer_section =
+        attr::contains_name(&ccx.tcx().map.krate_attrs(),
+                            "omit_gdb_pretty_printer_section");
+
+    !omit_gdb_pretty_printer_section &&
+    !ccx.sess().target.target.options.is_like_osx &&
+    !ccx.sess().target.target.options.is_like_windows &&
+    ccx.sess().opts.debuginfo != NoDebugInfo
+}
diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs
new file mode 100644 (file)
index 0000000..8471b6a
--- /dev/null
@@ -0,0 +1,2130 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use self::RecursiveTypeDescription::*;
+use self::MemberOffset::*;
+use self::MemberDescriptionFactory::*;
+use self::EnumDiscriminantInfo::*;
+
+use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of,
+                   get_namespace_and_span_for_item, create_DIArray,
+                   fn_should_be_ignored, is_node_local_to_unit};
+use super::namespace::namespace_for_item;
+use super::type_names::{compute_debuginfo_type_name, push_debuginfo_type_name};
+use super::{declare_local, VariableKind, VariableAccess};
+
+use llvm::{self, ValueRef};
+use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType};
+
+use rustc::hir::def_id::DefId;
+use rustc::infer;
+use rustc::hir::pat_util;
+use rustc::ty::subst;
+use rustc::hir::map as hir_map;
+use rustc::hir::{self, PatKind};
+use {type_of, adt, machine, monomorphize};
+use common::{self, CrateContext, FunctionContext, Block};
+use _match::{BindingInfo, TransBindingMode};
+use type_::Type;
+use rustc::ty::{self, Ty};
+use session::config::{self, FullDebugInfo};
+use util::nodemap::FnvHashMap;
+use util::common::path2cstr;
+
+use libc::{c_uint, c_longlong};
+use std::ffi::CString;
+use std::path::Path;
+use std::ptr;
+use std::rc::Rc;
+use syntax;
+use syntax::util::interner::Interner;
+use syntax::codemap::Span;
+use syntax::{ast, codemap};
+use syntax::parse::token;
+
+
+// From DWARF 5.
+// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1
+const DW_LANG_RUST: c_uint = 0x1c;
+#[allow(non_upper_case_globals)]
+const DW_ATE_boolean: c_uint = 0x02;
+#[allow(non_upper_case_globals)]
+const DW_ATE_float: c_uint = 0x04;
+#[allow(non_upper_case_globals)]
+const DW_ATE_signed: c_uint = 0x05;
+#[allow(non_upper_case_globals)]
+const DW_ATE_unsigned: c_uint = 0x07;
+#[allow(non_upper_case_globals)]
+const DW_ATE_unsigned_char: c_uint = 0x08;
+
+pub const UNKNOWN_LINE_NUMBER: c_uint = 0;
+pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
+
+// ptr::null() doesn't work :(
+const NO_FILE_METADATA: DIFile = (0 as DIFile);
+const NO_SCOPE_METADATA: DIScope = (0 as DIScope);
+
+const FLAGS_NONE: c_uint = 0;
+
+#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
+pub struct UniqueTypeId(ast::Name);
+
+// The TypeMap is where the CrateDebugContext holds the type metadata nodes
+// created so far. The metadata nodes are indexed by UniqueTypeId, and, for
+// faster lookup, also by Ty. The TypeMap is responsible for creating
+// UniqueTypeIds.
+pub struct TypeMap<'tcx> {
+    // The UniqueTypeIds created so far
+    unique_id_interner: Interner<Rc<String>>,
+    // A map from UniqueTypeId to debuginfo metadata for that type. This is a 1:1 mapping.
+    unique_id_to_metadata: FnvHashMap<UniqueTypeId, DIType>,
+    // A map from types to debuginfo metadata. This is a N:1 mapping.
+    type_to_metadata: FnvHashMap<Ty<'tcx>, DIType>,
+    // A map from types to UniqueTypeId. This is a N:1 mapping.
+    type_to_unique_id: FnvHashMap<Ty<'tcx>, UniqueTypeId>
+}
+
+impl<'tcx> TypeMap<'tcx> {
+    pub fn new() -> TypeMap<'tcx> {
+        TypeMap {
+            unique_id_interner: Interner::new(),
+            type_to_metadata: FnvHashMap(),
+            unique_id_to_metadata: FnvHashMap(),
+            type_to_unique_id: FnvHashMap(),
+        }
+    }
+
+    // Adds a Ty to metadata mapping to the TypeMap. The method will fail if
+    // the mapping already exists.
+    fn register_type_with_metadata<'a>(&mut self,
+                                       type_: Ty<'tcx>,
+                                       metadata: DIType) {
+        if self.type_to_metadata.insert(type_, metadata).is_some() {
+            bug!("Type metadata for Ty '{}' is already in the TypeMap!", type_);
+        }
+    }
+
+    // Adds a UniqueTypeId to metadata mapping to the TypeMap. The method will
+    // fail if the mapping already exists.
+    fn register_unique_id_with_metadata(&mut self,
+                                        unique_type_id: UniqueTypeId,
+                                        metadata: DIType) {
+        if self.unique_id_to_metadata.insert(unique_type_id, metadata).is_some() {
+            let unique_type_id_str = self.get_unique_type_id_as_string(unique_type_id);
+            bug!("Type metadata for unique id '{}' is already in the TypeMap!",
+                 &unique_type_id_str[..]);
+        }
+    }
+
+    fn find_metadata_for_type(&self, type_: Ty<'tcx>) -> Option<DIType> {
+        self.type_to_metadata.get(&type_).cloned()
+    }
+
+    fn find_metadata_for_unique_id(&self, unique_type_id: UniqueTypeId) -> Option<DIType> {
+        self.unique_id_to_metadata.get(&unique_type_id).cloned()
+    }
+
+    // Get the string representation of a UniqueTypeId. This method will fail if
+    // the id is unknown.
+    fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> Rc<String> {
+        let UniqueTypeId(interner_key) = unique_type_id;
+        self.unique_id_interner.get(interner_key)
+    }
+
+    // Get the UniqueTypeId for the given type. If the UniqueTypeId for the given
+    // type has been requested before, this is just a table lookup. Otherwise an
+    // ID will be generated and stored for later lookup.
+    fn get_unique_type_id_of_type<'a>(&mut self, cx: &CrateContext<'a, 'tcx>,
+                                      type_: Ty<'tcx>) -> UniqueTypeId {
+
+        // basic type             -> {:name of the type:}
+        // tuple                  -> {tuple_(:param-uid:)*}
+        // struct                 -> {struct_:svh: / :node-id:_<(:param-uid:),*> }
+        // enum                   -> {enum_:svh: / :node-id:_<(:param-uid:),*> }
+        // enum variant           -> {variant_:variant-name:_:enum-uid:}
+        // reference (&)          -> {& :pointee-uid:}
+        // mut reference (&mut)   -> {&mut :pointee-uid:}
+        // ptr (*)                -> {* :pointee-uid:}
+        // mut ptr (*mut)         -> {*mut :pointee-uid:}
+        // unique ptr (box)       -> {box :pointee-uid:}
+        // @-ptr (@)              -> {@ :pointee-uid:}
+        // sized vec ([T; x])     -> {[:size:] :element-uid:}
+        // unsized vec ([T])      -> {[] :element-uid:}
+        // trait (T)              -> {trait_:svh: / :node-id:_<(:param-uid:),*> }
+        // closure                -> {<unsafe_> <once_> :store-sigil: |(:param-uid:),* <,_...>| -> \
+        //                             :return-type-uid: : (:bounds:)*}
+        // function               -> {<unsafe_> <abi_> fn( (:param-uid:)* <,_...> ) -> \
+        //                             :return-type-uid:}
+
+        match self.type_to_unique_id.get(&type_).cloned() {
+            Some(unique_type_id) => return unique_type_id,
+            None => { /* generate one */}
+        };
+
+        let mut unique_type_id = String::with_capacity(256);
+        unique_type_id.push('{');
+
+        match type_.sty {
+            ty::TyBool     |
+            ty::TyChar     |
+            ty::TyStr      |
+            ty::TyInt(_)   |
+            ty::TyUint(_)  |
+            ty::TyFloat(_) => {
+                push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
+            },
+            ty::TyEnum(def, substs) => {
+                unique_type_id.push_str("enum ");
+                from_def_id_and_substs(self, cx, def.did, substs, &mut unique_type_id);
+            },
+            ty::TyStruct(def, substs) => {
+                unique_type_id.push_str("struct ");
+                from_def_id_and_substs(self, cx, def.did, substs, &mut unique_type_id);
+            },
+            ty::TyTuple(ref component_types) if component_types.is_empty() => {
+                push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
+            },
+            ty::TyTuple(ref component_types) => {
+                unique_type_id.push_str("tuple ");
+                for &component_type in component_types {
+                    let component_type_id =
+                        self.get_unique_type_id_of_type(cx, component_type);
+                    let component_type_id =
+                        self.get_unique_type_id_as_string(component_type_id);
+                    unique_type_id.push_str(&component_type_id[..]);
+                }
+            },
+            ty::TyBox(inner_type) => {
+                unique_type_id.push_str("box ");
+                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
+                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
+                unique_type_id.push_str(&inner_type_id[..]);
+            },
+            ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
+                unique_type_id.push('*');
+                if mutbl == hir::MutMutable {
+                    unique_type_id.push_str("mut");
+                }
+
+                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
+                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
+                unique_type_id.push_str(&inner_type_id[..]);
+            },
+            ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
+                unique_type_id.push('&');
+                if mutbl == hir::MutMutable {
+                    unique_type_id.push_str("mut");
+                }
+
+                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
+                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
+                unique_type_id.push_str(&inner_type_id[..]);
+            },
+            ty::TyArray(inner_type, len) => {
+                unique_type_id.push_str(&format!("[{}]", len));
+
+                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
+                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
+                unique_type_id.push_str(&inner_type_id[..]);
+            },
+            ty::TySlice(inner_type) => {
+                unique_type_id.push_str("[]");
+
+                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
+                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
+                unique_type_id.push_str(&inner_type_id[..]);
+            },
+            ty::TyTrait(ref trait_data) => {
+                unique_type_id.push_str("trait ");
+
+                let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
+
+                from_def_id_and_substs(self,
+                                       cx,
+                                       principal.def_id,
+                                       principal.substs,
+                                       &mut unique_type_id);
+            },
+            ty::TyFnDef(_, _, &ty::BareFnTy{ unsafety, abi, ref sig } ) |
+            ty::TyFnPtr(&ty::BareFnTy{ unsafety, abi, ref sig } ) => {
+                if unsafety == hir::Unsafety::Unsafe {
+                    unique_type_id.push_str("unsafe ");
+                }
+
+                unique_type_id.push_str(abi.name());
+
+                unique_type_id.push_str(" fn(");
+
+                let sig = cx.tcx().erase_late_bound_regions(sig);
+                let sig = infer::normalize_associated_type(cx.tcx(), &sig);
+
+                for &parameter_type in &sig.inputs {
+                    let parameter_type_id =
+                        self.get_unique_type_id_of_type(cx, parameter_type);
+                    let parameter_type_id =
+                        self.get_unique_type_id_as_string(parameter_type_id);
+                    unique_type_id.push_str(&parameter_type_id[..]);
+                    unique_type_id.push(',');
+                }
+
+                if sig.variadic {
+                    unique_type_id.push_str("...");
+                }
+
+                unique_type_id.push_str(")->");
+                match sig.output {
+                    ty::FnConverging(ret_ty) => {
+                        let return_type_id = self.get_unique_type_id_of_type(cx, ret_ty);
+                        let return_type_id = self.get_unique_type_id_as_string(return_type_id);
+                        unique_type_id.push_str(&return_type_id[..]);
+                    }
+                    ty::FnDiverging => {
+                        unique_type_id.push_str("!");
+                    }
+                }
+            },
+            ty::TyClosure(_, ref substs) if substs.upvar_tys.is_empty() => {
+                push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
+            },
+            ty::TyClosure(_, ref substs) => {
+                unique_type_id.push_str("closure ");
+                for upvar_type in &substs.upvar_tys {
+                    let upvar_type_id =
+                        self.get_unique_type_id_of_type(cx, upvar_type);
+                    let upvar_type_id =
+                        self.get_unique_type_id_as_string(upvar_type_id);
+                    unique_type_id.push_str(&upvar_type_id[..]);
+                }
+            },
+            _ => {
+                bug!("get_unique_type_id_of_type() - unexpected type: {:?}",
+                     type_)
+            }
+        };
+
+        unique_type_id.push('}');
+
+        // Trim to size before storing permanently
+        unique_type_id.shrink_to_fit();
+
+        let key = self.unique_id_interner.intern(Rc::new(unique_type_id));
+        self.type_to_unique_id.insert(type_, UniqueTypeId(key));
+
+        return UniqueTypeId(key);
+
+        fn from_def_id_and_substs<'a, 'tcx>(type_map: &mut TypeMap<'tcx>,
+                                            cx: &CrateContext<'a, 'tcx>,
+                                            def_id: DefId,
+                                            substs: &subst::Substs<'tcx>,
+                                            output: &mut String) {
+            // First, find out the 'real' def_id of the type. Items inlined from
+            // other crates have to be mapped back to their source.
+            let def_id = if let Some(node_id) = cx.tcx().map.as_local_node_id(def_id) {
+                match cx.external_srcs().borrow().get(&node_id).cloned() {
+                    Some(source_def_id) => {
+                        // The given def_id identifies the inlined copy of a
+                        // type definition, let's take the source of the copy.
+                        source_def_id
+                    }
+                    None => def_id
+                }
+            } else {
+                def_id
+            };
+
+            // Get the crate name/disambiguator as first part of the identifier.
+            let crate_name = if def_id.is_local() {
+                cx.tcx().crate_name.clone()
+            } else {
+                cx.sess().cstore.original_crate_name(def_id.krate)
+            };
+            let crate_disambiguator = cx.tcx().crate_disambiguator(def_id.krate);
+
+            output.push_str(&crate_name[..]);
+            output.push_str("/");
+            output.push_str(&crate_disambiguator[..]);
+            output.push_str("/");
+            // Add the def-index as the second part
+            output.push_str(&format!("{:x}", def_id.index.as_usize()));
+
+            let tps = substs.types.get_slice(subst::TypeSpace);
+            if !tps.is_empty() {
+                output.push('<');
+
+                for &type_parameter in tps {
+                    let param_type_id =
+                        type_map.get_unique_type_id_of_type(cx, type_parameter);
+                    let param_type_id =
+                        type_map.get_unique_type_id_as_string(param_type_id);
+                    output.push_str(&param_type_id[..]);
+                    output.push(',');
+                }
+
+                output.push('>');
+            }
+        }
+    }
+
+    // Get the UniqueTypeId for an enum variant. Enum variants are not really
+    // types of their own, so they need special handling. We still need a
+    // UniqueTypeId for them, since to debuginfo they *are* real types.
+    fn get_unique_type_id_of_enum_variant<'a>(&mut self,
+                                              cx: &CrateContext<'a, 'tcx>,
+                                              enum_type: Ty<'tcx>,
+                                              variant_name: &str)
+                                              -> UniqueTypeId {
+        let enum_type_id = self.get_unique_type_id_of_type(cx, enum_type);
+        let enum_variant_type_id = format!("{}::{}",
+                                           &self.get_unique_type_id_as_string(enum_type_id),
+                                           variant_name);
+        let interner_key = self.unique_id_interner.intern(Rc::new(enum_variant_type_id));
+        UniqueTypeId(interner_key)
+    }
+}
+
+// A description of some recursive type. It can either be already finished (as
+// with FinalMetadata) or it is not yet finished, but contains all information
+// needed to generate the missing parts of the description. See the
+// documentation section on Recursive Types at the top of this file for more
+// information.
+enum RecursiveTypeDescription<'tcx> {
+    UnfinishedMetadata {
+        unfinished_type: Ty<'tcx>,
+        unique_type_id: UniqueTypeId,
+        metadata_stub: DICompositeType,
+        llvm_type: Type,
+        member_description_factory: MemberDescriptionFactory<'tcx>,
+    },
+    FinalMetadata(DICompositeType)
+}
+
+fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>(
+    cx: &CrateContext<'a, 'tcx>,
+    unfinished_type: Ty<'tcx>,
+    unique_type_id: UniqueTypeId,
+    metadata_stub: DICompositeType,
+    llvm_type: Type,
+    member_description_factory: MemberDescriptionFactory<'tcx>)
+ -> RecursiveTypeDescription<'tcx> {
+
+    // Insert the stub into the TypeMap in order to allow for recursive references
+    let mut type_map = debug_context(cx).type_map.borrow_mut();
+    type_map.register_unique_id_with_metadata(unique_type_id, metadata_stub);
+    type_map.register_type_with_metadata(unfinished_type, metadata_stub);
+
+    UnfinishedMetadata {
+        unfinished_type: unfinished_type,
+        unique_type_id: unique_type_id,
+        metadata_stub: metadata_stub,
+        llvm_type: llvm_type,
+        member_description_factory: member_description_factory,
+    }
+}
+
+impl<'tcx> RecursiveTypeDescription<'tcx> {
+    // Finishes up the description of the type in question (mostly by providing
+    // descriptions of the fields of the given type) and returns the final type
+    // metadata.
+    fn finalize<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> MetadataCreationResult {
+        match *self {
+            FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false),
+            UnfinishedMetadata {
+                unfinished_type,
+                unique_type_id,
+                metadata_stub,
+                llvm_type,
+                ref member_description_factory,
+                ..
+            } => {
+                // Make sure that we have a forward declaration of the type in
+                // the TypeMap so that recursive references are possible. This
+                // will always be the case if the RecursiveTypeDescription has
+                // been properly created through the
+                // create_and_register_recursive_type_forward_declaration()
+                // function.
+                {
+                    let type_map = debug_context(cx).type_map.borrow();
+                    if type_map.find_metadata_for_unique_id(unique_type_id).is_none() ||
+                       type_map.find_metadata_for_type(unfinished_type).is_none() {
+                        bug!("Forward declaration of potentially recursive type \
+                              '{:?}' was not found in TypeMap!",
+                             unfinished_type);
+                    }
+                }
+
+                // ... then create the member descriptions ...
+                let member_descriptions =
+                    member_description_factory.create_member_descriptions(cx);
+
+                // ... and attach them to the stub to complete it.
+                set_members_of_composite_type(cx,
+                                              metadata_stub,
+                                              llvm_type,
+                                              &member_descriptions[..]);
+                return MetadataCreationResult::new(metadata_stub, true);
+            }
+        }
+    }
+}
+
+// Returns from the enclosing function if the type metadata with the given
+// unique id can be found in the type map
+macro_rules! return_if_metadata_created_in_meantime {
+    ($cx: expr, $unique_type_id: expr) => (
+        match debug_context($cx).type_map
+                                .borrow()
+                                .find_metadata_for_unique_id($unique_type_id) {
+            Some(metadata) => return MetadataCreationResult::new(metadata, true),
+            None => { /* proceed normally */ }
+        }
+    )
+}
+
+fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                unique_type_id: UniqueTypeId,
+                                element_type: Ty<'tcx>,
+                                len: Option<u64>,
+                                span: Span)
+                                -> MetadataCreationResult {
+    let element_type_metadata = type_metadata(cx, element_type, span);
+
+    return_if_metadata_created_in_meantime!(cx, unique_type_id);
+
+    let element_llvm_type = type_of::type_of(cx, element_type);
+    let (element_type_size, element_type_align) = size_and_align_of(cx, element_llvm_type);
+
+    let (array_size_in_bytes, upper_bound) = match len {
+        Some(len) => (element_type_size * len, len as c_longlong),
+        None => (0, -1)
+    };
+
+    let subrange = unsafe {
+        llvm::LLVMDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)
+    };
+
+    let subscripts = create_DIArray(DIB(cx), &[subrange]);
+    let metadata = unsafe {
+        llvm::LLVMDIBuilderCreateArrayType(
+            DIB(cx),
+            bytes_to_bits(array_size_in_bytes),
+            bytes_to_bits(element_type_align),
+            element_type_metadata,
+            subscripts)
+    };
+
+    return MetadataCreationResult::new(metadata, false);
+}
+
+fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                vec_type: Ty<'tcx>,
+                                element_type: Ty<'tcx>,
+                                unique_type_id: UniqueTypeId,
+                                span: Span)
+                                -> MetadataCreationResult {
+    let data_ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut {
+        ty: element_type,
+        mutbl: hir::MutImmutable
+    });
+
+    let element_type_metadata = type_metadata(cx, data_ptr_type, span);
+
+    return_if_metadata_created_in_meantime!(cx, unique_type_id);
+
+    let slice_llvm_type = type_of::type_of(cx, vec_type);
+    let slice_type_name = compute_debuginfo_type_name(cx, vec_type, true);
+
+    let member_llvm_types = slice_llvm_type.field_types();
+    assert!(slice_layout_is_correct(cx,
+                                    &member_llvm_types[..],
+                                    element_type));
+    let member_descriptions = [
+        MemberDescription {
+            name: "data_ptr".to_string(),
+            llvm_type: member_llvm_types[0],
+            type_metadata: element_type_metadata,
+            offset: ComputedMemberOffset,
+            flags: FLAGS_NONE
+        },
+        MemberDescription {
+            name: "length".to_string(),
+            llvm_type: member_llvm_types[1],
+            type_metadata: type_metadata(cx, cx.tcx().types.usize, span),
+            offset: ComputedMemberOffset,
+            flags: FLAGS_NONE
+        },
+    ];
+
+    assert!(member_descriptions.len() == member_llvm_types.len());
+
+    let loc = span_start(cx, span);
+    let file_metadata = file_metadata(cx, &loc.file.name);
+
+    let metadata = composite_type_metadata(cx,
+                                           slice_llvm_type,
+                                           &slice_type_name[..],
+                                           unique_type_id,
+                                           &member_descriptions,
+                                           NO_SCOPE_METADATA,
+                                           file_metadata,
+                                           span);
+    return MetadataCreationResult::new(metadata, false);
+
+    fn slice_layout_is_correct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                         member_llvm_types: &[Type],
+                                         element_type: Ty<'tcx>)
+                                         -> bool {
+        member_llvm_types.len() == 2 &&
+        member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() &&
+        member_llvm_types[1] == cx.int_type()
+    }
+}
+
+fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                      unique_type_id: UniqueTypeId,
+                                      signature: &ty::PolyFnSig<'tcx>,
+                                      span: Span)
+                                      -> MetadataCreationResult
+{
+    let signature = cx.tcx().erase_late_bound_regions(signature);
+
+    let mut signature_metadata: Vec<DIType> = Vec::with_capacity(signature.inputs.len() + 1);
+
+    // return type
+    signature_metadata.push(match signature.output {
+        ty::FnConverging(ret_ty) => match ret_ty.sty {
+            ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(),
+            _ => type_metadata(cx, ret_ty, span)
+        },
+        ty::FnDiverging => diverging_type_metadata(cx)
+    });
+
+    // regular arguments
+    for &argument_type in &signature.inputs {
+        signature_metadata.push(type_metadata(cx, argument_type, span));
+    }
+
+    return_if_metadata_created_in_meantime!(cx, unique_type_id);
+
+    return MetadataCreationResult::new(
+        unsafe {
+            llvm::LLVMDIBuilderCreateSubroutineType(
+                DIB(cx),
+                NO_FILE_METADATA,
+                create_DIArray(DIB(cx), &signature_metadata[..]))
+        },
+        false);
+}
+
+// FIXME(1563) This is all a bit of a hack because 'trait pointer' is an ill-
+// defined concept. For the case of an actual trait pointer (i.e., Box<Trait>,
+// &Trait), trait_object_type should be the whole thing (e.g, Box<Trait>) and
+// trait_type should be the actual trait (e.g., Trait). Where the trait is part
+// of a DST struct, there is no trait_object_type and the results of this
+// function will be a little bit weird.
+fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                    trait_type: Ty<'tcx>,
+                                    trait_object_type: Option<Ty<'tcx>>,
+                                    unique_type_id: UniqueTypeId)
+                                    -> DIType {
+    // The implementation provided here is a stub. It makes sure that the trait
+    // type is assigned the correct name, size, namespace, and source location.
+    // But it does not describe the trait's methods.
+
+    let def_id = match trait_type.sty {
+        ty::TyTrait(ref data) => data.principal_def_id(),
+        _ => {
+            bug!("debuginfo: Unexpected trait-object type in \
+                  trait_pointer_metadata(): {:?}",
+                 trait_type);
+        }
+    };
+
+    let trait_object_type = trait_object_type.unwrap_or(trait_type);
+    let trait_type_name =
+        compute_debuginfo_type_name(cx, trait_object_type, false);
+
+    let (containing_scope, _) = get_namespace_and_span_for_item(cx, def_id);
+
+    let trait_llvm_type = type_of::type_of(cx, trait_object_type);
+
+    composite_type_metadata(cx,
+                            trait_llvm_type,
+                            &trait_type_name[..],
+                            unique_type_id,
+                            &[],
+                            containing_scope,
+                            NO_FILE_METADATA,
+                            codemap::DUMMY_SP)
+}
+
+pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                               t: Ty<'tcx>,
+                               usage_site_span: Span)
+                               -> DIType {
+    // Get the unique type id of this type.
+    let unique_type_id = {
+        let mut type_map = debug_context(cx).type_map.borrow_mut();
+        // First, try to find the type in TypeMap. If we have seen it before, we
+        // can exit early here.
+        match type_map.find_metadata_for_type(t) {
+            Some(metadata) => {
+                return metadata;
+            },
+            None => {
+                // The Ty is not in the TypeMap but maybe we have already seen
+                // an equivalent type (e.g. only differing in region arguments).
+                // In order to find out, generate the unique type id and look
+                // that up.
+                let unique_type_id = type_map.get_unique_type_id_of_type(cx, t);
+                match type_map.find_metadata_for_unique_id(unique_type_id) {
+                    Some(metadata) => {
+                        // There is already an equivalent type in the TypeMap.
+                        // Register this Ty as an alias in the cache and
+                        // return the cached metadata.
+                        type_map.register_type_with_metadata(t, metadata);
+                        return metadata;
+                    },
+                    None => {
+                        // There really is no type metadata for this type, so
+                        // proceed by creating it.
+                        unique_type_id
+                    }
+                }
+            }
+        }
+    };
+
+    debug!("type_metadata: {:?}", t);
+
+    let sty = &t.sty;
+    let MetadataCreationResult { metadata, already_stored_in_typemap } = match *sty {
+        ty::TyBool     |
+        ty::TyChar     |
+        ty::TyInt(_)   |
+        ty::TyUint(_)  |
+        ty::TyFloat(_) => {
+            MetadataCreationResult::new(basic_type_metadata(cx, t), false)
+        }
+        ty::TyTuple(ref elements) if elements.is_empty() => {
+            MetadataCreationResult::new(basic_type_metadata(cx, t), false)
+        }
+        ty::TyEnum(def, _) => {
+            prepare_enum_metadata(cx,
+                                  t,
+                                  def.did,
+                                  unique_type_id,
+                                  usage_site_span).finalize(cx)
+        }
+        ty::TyArray(typ, len) => {
+            fixed_vec_metadata(cx, unique_type_id, typ, Some(len as u64), usage_site_span)
+        }
+        ty::TySlice(typ) => {
+            fixed_vec_metadata(cx, unique_type_id, typ, None, usage_site_span)
+        }
+        ty::TyStr => {
+            fixed_vec_metadata(cx, unique_type_id, cx.tcx().types.i8, None, usage_site_span)
+        }
+        ty::TyTrait(..) => {
+            MetadataCreationResult::new(
+                        trait_pointer_metadata(cx, t, None, unique_type_id),
+            false)
+        }
+        ty::TyBox(ty) |
+        ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
+        ty::TyRef(_, ty::TypeAndMut{ty, ..}) => {
+            match ty.sty {
+                ty::TySlice(typ) => {
+                    vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span)
+                }
+                ty::TyStr => {
+                    vec_slice_metadata(cx, t, cx.tcx().types.u8, unique_type_id, usage_site_span)
+                }
+                ty::TyTrait(..) => {
+                    MetadataCreationResult::new(
+                        trait_pointer_metadata(cx, ty, Some(t), unique_type_id),
+                        false)
+                }
+                _ => {
+                    let pointee_metadata = type_metadata(cx, ty, usage_site_span);
+
+                    match debug_context(cx).type_map
+                                           .borrow()
+                                           .find_metadata_for_unique_id(unique_type_id) {
+                        Some(metadata) => return metadata,
+                        None => { /* proceed normally */ }
+                    };
+
+                    MetadataCreationResult::new(pointer_type_metadata(cx, t, pointee_metadata),
+                                                false)
+                }
+            }
+        }
+        ty::TyFnDef(_, _, ref barefnty) | ty::TyFnPtr(ref barefnty) => {
+            let fn_metadata = subroutine_type_metadata(cx,
+                                                       unique_type_id,
+                                                       &barefnty.sig,
+                                                       usage_site_span).metadata;
+            match debug_context(cx).type_map
+                                   .borrow()
+                                   .find_metadata_for_unique_id(unique_type_id) {
+                Some(metadata) => return metadata,
+                None => { /* proceed normally */ }
+            };
+
+            // This is actually a function pointer, so wrap it in pointer DI
+            MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false)
+
+        }
+        ty::TyClosure(_, ref substs) => {
+            prepare_tuple_metadata(cx,
+                                   t,
+                                   &substs.upvar_tys,
+                                   unique_type_id,
+                                   usage_site_span).finalize(cx)
+        }
+        ty::TyStruct(..) => {
+            prepare_struct_metadata(cx,
+                                    t,
+                                    unique_type_id,
+                                    usage_site_span).finalize(cx)
+        }
+        ty::TyTuple(ref elements) => {
+            prepare_tuple_metadata(cx,
+                                   t,
+                                   &elements[..],
+                                   unique_type_id,
+                                   usage_site_span).finalize(cx)
+        }
+        _ => {
+            bug!("debuginfo: unexpected type in type_metadata: {:?}", sty)
+        }
+    };
+
+    {
+        let mut type_map = debug_context(cx).type_map.borrow_mut();
+
+        if already_stored_in_typemap {
+            // Also make sure that we already have a TypeMap entry for the unique type id.
+            let metadata_for_uid = match type_map.find_metadata_for_unique_id(unique_type_id) {
+                Some(metadata) => metadata,
+                None => {
+                    let unique_type_id_str =
+                        type_map.get_unique_type_id_as_string(unique_type_id);
+                    span_bug!(usage_site_span,
+                              "Expected type metadata for unique \
+                               type id '{}' to already be in \
+                               the debuginfo::TypeMap but it \
+                               was not. (Ty = {})",
+                              &unique_type_id_str[..],
+                              t);
+                }
+            };
+
+            match type_map.find_metadata_for_type(t) {
+                Some(metadata) => {
+                    if metadata != metadata_for_uid {
+                        let unique_type_id_str =
+                            type_map.get_unique_type_id_as_string(unique_type_id);
+                        span_bug!(usage_site_span,
+                                  "Mismatch between Ty and \
+                                   UniqueTypeId maps in \
+                                   debuginfo::TypeMap. \
+                                   UniqueTypeId={}, Ty={}",
+                                  &unique_type_id_str[..],
+                                  t);
+                    }
+                }
+                None => {
+                    type_map.register_type_with_metadata(t, metadata);
+                }
+            }
+        } else {
+            type_map.register_type_with_metadata(t, metadata);
+            type_map.register_unique_id_with_metadata(unique_type_id, metadata);
+        }
+    }
+
+    metadata
+}
+
+pub fn file_metadata(cx: &CrateContext, full_path: &str) -> DIFile {
+    // FIXME (#9639): This needs to handle non-utf8 paths
+    let work_dir = cx.sess().working_dir.to_str().unwrap();
+    let file_name =
+        if full_path.starts_with(work_dir) {
+            &full_path[work_dir.len() + 1..full_path.len()]
+        } else {
+            full_path
+        };
+
+    file_metadata_(cx, full_path, file_name, &work_dir)
+}
+
+pub fn unknown_file_metadata(cx: &CrateContext) -> DIFile {
+    // Regular filenames should not be empty, so we abuse an empty name as the
+    // key for the special unknown file metadata
+    file_metadata_(cx, "", "<unknown>", "")
+
+}
+
+fn file_metadata_(cx: &CrateContext, key: &str, file_name: &str, work_dir: &str) -> DIFile {
+    match debug_context(cx).created_files.borrow().get(key) {
+        Some(file_metadata) => return *file_metadata,
+        None => ()
+    }
+
+    debug!("file_metadata: file_name: {}, work_dir: {}", file_name, work_dir);
+
+    let file_name = CString::new(file_name).unwrap();
+    let work_dir = CString::new(work_dir).unwrap();
+    let file_metadata = unsafe {
+        llvm::LLVMDIBuilderCreateFile(DIB(cx), file_name.as_ptr(),
+                                      work_dir.as_ptr())
+    };
+
+    let mut created_files = debug_context(cx).created_files.borrow_mut();
+    created_files.insert(key.to_string(), file_metadata);
+    file_metadata
+}
+
+/// Finds the scope metadata node for the given AST node.
+pub fn scope_metadata(fcx: &FunctionContext,
+                  node_id: ast::NodeId,
+                  error_reporting_span: Span)
+               -> DIScope {
+    let scope_map = &fcx.debug_context
+                        .get_ref(error_reporting_span)
+                        .scope_map;
+    match scope_map.borrow().get(&node_id).cloned() {
+        Some(scope_metadata) => scope_metadata,
+        None => {
+            let node = fcx.ccx.tcx().map.get(node_id);
+
+            span_bug!(error_reporting_span,
+                      "debuginfo: Could not find scope info for node {:?}",
+                      node);
+        }
+    }
+}
+
+pub fn diverging_type_metadata(cx: &CrateContext) -> DIType {
+    unsafe {
+        llvm::LLVMDIBuilderCreateBasicType(
+            DIB(cx),
+            "!\0".as_ptr() as *const _,
+            bytes_to_bits(0),
+            bytes_to_bits(0),
+            DW_ATE_unsigned)
+    }
+}
+
+fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                 t: Ty<'tcx>) -> DIType {
+
+    debug!("basic_type_metadata: {:?}", t);
+
+    let (name, encoding) = match t.sty {
+        ty::TyTuple(ref elements) if elements.is_empty() =>
+            ("()", DW_ATE_unsigned),
+        ty::TyBool => ("bool", DW_ATE_boolean),
+        ty::TyChar => ("char", DW_ATE_unsigned_char),
+        ty::TyInt(int_ty) => {
+            (int_ty.ty_to_string(), DW_ATE_signed)
+        },
+        ty::TyUint(uint_ty) => {
+            (uint_ty.ty_to_string(), DW_ATE_unsigned)
+        },
+        ty::TyFloat(float_ty) => {
+            (float_ty.ty_to_string(), DW_ATE_float)
+        },
+        _ => bug!("debuginfo::basic_type_metadata - t is invalid type")
+    };
+
+    let llvm_type = type_of::type_of(cx, t);
+    let (size, align) = size_and_align_of(cx, llvm_type);
+    let name = CString::new(name).unwrap();
+    let ty_metadata = unsafe {
+        llvm::LLVMDIBuilderCreateBasicType(
+            DIB(cx),
+            name.as_ptr(),
+            bytes_to_bits(size),
+            bytes_to_bits(align),
+            encoding)
+    };
+
+    return ty_metadata;
+}
+
+fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                   pointer_type: Ty<'tcx>,
+                                   pointee_type_metadata: DIType)
+                                   -> DIType {
+    let pointer_llvm_type = type_of::type_of(cx, pointer_type);
+    let (pointer_size, pointer_align) = size_and_align_of(cx, pointer_llvm_type);
+    let name = compute_debuginfo_type_name(cx, pointer_type, false);
+    let name = CString::new(name).unwrap();
+    let ptr_metadata = unsafe {
+        llvm::LLVMDIBuilderCreatePointerType(
+            DIB(cx),
+            pointee_type_metadata,
+            bytes_to_bits(pointer_size),
+            bytes_to_bits(pointer_align),
+            name.as_ptr())
+    };
+    return ptr_metadata;
+}
+
+pub fn compile_unit_metadata(cx: &CrateContext) -> DIDescriptor {
+    let work_dir = &cx.sess().working_dir;
+    let compile_unit_name = match cx.sess().local_crate_source_file {
+        None => fallback_path(cx),
+        Some(ref abs_path) => {
+            if abs_path.is_relative() {
+                cx.sess().warn("debuginfo: Invalid path to crate's local root source file!");
+                fallback_path(cx)
+            } else {
+                match abs_path.strip_prefix(work_dir) {
+                    Ok(ref p) if p.is_relative() => {
+                        if p.starts_with(Path::new("./")) {
+                            path2cstr(p)
+                        } else {
+                            path2cstr(&Path::new(".").join(p))
+                        }
+                    }
+                    _ => fallback_path(cx)
+                }
+            }
+        }
+    };
+
+    debug!("compile_unit_metadata: {:?}", compile_unit_name);
+    let producer = format!("rustc version {}",
+                           (option_env!("CFG_VERSION")).expect("CFG_VERSION"));
+
+    let compile_unit_name = compile_unit_name.as_ptr();
+    let work_dir = path2cstr(&work_dir);
+    let producer = CString::new(producer).unwrap();
+    let flags = "\0";
+    let split_name = "\0";
+    return unsafe {
+        llvm::LLVMDIBuilderCreateCompileUnit(
+            debug_context(cx).builder,
+            DW_LANG_RUST,
+            compile_unit_name,
+            work_dir.as_ptr(),
+            producer.as_ptr(),
+            cx.sess().opts.optimize != config::OptLevel::No,
+            flags.as_ptr() as *const _,
+            0,
+            split_name.as_ptr() as *const _)
+    };
+
+    fn fallback_path(cx: &CrateContext) -> CString {
+        CString::new(cx.link_meta().crate_name.clone()).unwrap()
+    }
+}
+
+struct MetadataCreationResult {
+    metadata: DIType,
+    already_stored_in_typemap: bool
+}
+
+impl MetadataCreationResult {
+    fn new(metadata: DIType, already_stored_in_typemap: bool) -> MetadataCreationResult {
+        MetadataCreationResult {
+            metadata: metadata,
+            already_stored_in_typemap: already_stored_in_typemap
+        }
+    }
+}
+
+#[derive(Debug)]
+enum MemberOffset {
+    FixedMemberOffset { bytes: usize },
+    // For ComputedMemberOffset, the offset is read from the llvm type definition.
+    ComputedMemberOffset
+}
+
+// Description of a type member, which can either be a regular field (as in
+// structs or tuples) or an enum variant.
+#[derive(Debug)]
+struct MemberDescription {
+    name: String,
+    llvm_type: Type,
+    type_metadata: DIType,
+    offset: MemberOffset,
+    flags: c_uint
+}
+
+// A factory for MemberDescriptions. It produces a list of member descriptions
+// for some record-like type. MemberDescriptionFactories are used to defer the
+// creation of type member descriptions in order to break cycles arising from
+// recursive type definitions.
+enum MemberDescriptionFactory<'tcx> {
+    StructMDF(StructMemberDescriptionFactory<'tcx>),
+    TupleMDF(TupleMemberDescriptionFactory<'tcx>),
+    EnumMDF(EnumMemberDescriptionFactory<'tcx>),
+    VariantMDF(VariantMemberDescriptionFactory<'tcx>)
+}
+
+impl<'tcx> MemberDescriptionFactory<'tcx> {
+    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+                                      -> Vec<MemberDescription> {
+        match *self {
+            StructMDF(ref this) => {
+                this.create_member_descriptions(cx)
+            }
+            TupleMDF(ref this) => {
+                this.create_member_descriptions(cx)
+            }
+            EnumMDF(ref this) => {
+                this.create_member_descriptions(cx)
+            }
+            VariantMDF(ref this) => {
+                this.create_member_descriptions(cx)
+            }
+        }
+    }
+}
+
+//=-----------------------------------------------------------------------------
+// Structs
+//=-----------------------------------------------------------------------------
+
+// Creates MemberDescriptions for the fields of a struct
+struct StructMemberDescriptionFactory<'tcx> {
+    variant: ty::VariantDef<'tcx>,
+    substs: &'tcx subst::Substs<'tcx>,
+    is_simd: bool,
+    span: Span,
+}
+
+impl<'tcx> StructMemberDescriptionFactory<'tcx> {
+    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+                                      -> Vec<MemberDescription> {
+        if let ty::VariantKind::Unit = self.variant.kind() {
+            return Vec::new();
+        }
+
+        let field_size = if self.is_simd {
+            let fty = monomorphize::field_ty(cx.tcx(),
+                                             self.substs,
+                                             &self.variant.fields[0]);
+            Some(machine::llsize_of_alloc(
+                cx,
+                type_of::type_of(cx, fty)
+            ) as usize)
+        } else {
+            None
+        };
+
+        self.variant.fields.iter().enumerate().map(|(i, f)| {
+            let name = if let ty::VariantKind::Tuple = self.variant.kind() {
+                format!("__{}", i)
+            } else {
+                f.name.to_string()
+            };
+            let fty = monomorphize::field_ty(cx.tcx(), self.substs, f);
+
+            let offset = if self.is_simd {
+                FixedMemberOffset { bytes: i * field_size.unwrap() }
+            } else {
+                ComputedMemberOffset
+            };
+
+            MemberDescription {
+                name: name,
+                llvm_type: type_of::type_of(cx, fty),
+                type_metadata: type_metadata(cx, fty, self.span),
+                offset: offset,
+                flags: FLAGS_NONE,
+            }
+        }).collect()
+    }
+}
+
+
+fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                     struct_type: Ty<'tcx>,
+                                     unique_type_id: UniqueTypeId,
+                                     span: Span)
+                                     -> RecursiveTypeDescription<'tcx> {
+    let struct_name = compute_debuginfo_type_name(cx, struct_type, false);
+    let struct_llvm_type = type_of::in_memory_type_of(cx, struct_type);
+
+    let (variant, substs) = match struct_type.sty {
+        ty::TyStruct(def, substs) => (def.struct_variant(), substs),
+        _ => bug!("prepare_struct_metadata on a non-struct")
+    };
+
+    let (containing_scope, _) = get_namespace_and_span_for_item(cx, variant.did);
+
+    let struct_metadata_stub = create_struct_stub(cx,
+                                                  struct_llvm_type,
+                                                  &struct_name,
+                                                  unique_type_id,
+                                                  containing_scope);
+
+    create_and_register_recursive_type_forward_declaration(
+        cx,
+        struct_type,
+        unique_type_id,
+        struct_metadata_stub,
+        struct_llvm_type,
+        StructMDF(StructMemberDescriptionFactory {
+            variant: variant,
+            substs: substs,
+            is_simd: struct_type.is_simd(),
+            span: span,
+        })
+    )
+}
+
+
+//=-----------------------------------------------------------------------------
+// Tuples
+//=-----------------------------------------------------------------------------
+
+// Creates MemberDescriptions for the fields of a tuple
+struct TupleMemberDescriptionFactory<'tcx> {
+    component_types: Vec<Ty<'tcx>>,
+    span: Span,
+}
+
+impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
+    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+                                      -> Vec<MemberDescription> {
+        self.component_types
+            .iter()
+            .enumerate()
+            .map(|(i, &component_type)| {
+            MemberDescription {
+                name: format!("__{}", i),
+                llvm_type: type_of::type_of(cx, component_type),
+                type_metadata: type_metadata(cx, component_type, self.span),
+                offset: ComputedMemberOffset,
+                flags: FLAGS_NONE,
+            }
+        }).collect()
+    }
+}
+
+fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                    tuple_type: Ty<'tcx>,
+                                    component_types: &[Ty<'tcx>],
+                                    unique_type_id: UniqueTypeId,
+                                    span: Span)
+                                    -> RecursiveTypeDescription<'tcx> {
+    let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false);
+    let tuple_llvm_type = type_of::type_of(cx, tuple_type);
+
+    create_and_register_recursive_type_forward_declaration(
+        cx,
+        tuple_type,
+        unique_type_id,
+        create_struct_stub(cx,
+                           tuple_llvm_type,
+                           &tuple_name[..],
+                           unique_type_id,
+                           NO_SCOPE_METADATA),
+        tuple_llvm_type,
+        TupleMDF(TupleMemberDescriptionFactory {
+            component_types: component_types.to_vec(),
+            span: span,
+        })
+    )
+}
+
+
+//=-----------------------------------------------------------------------------
+// Enums
+//=-----------------------------------------------------------------------------
+
+// Describes the members of an enum value: An enum is described as a union of
+// structs in DWARF. This MemberDescriptionFactory provides the description for
+// the members of this union; so for every variant of the given enum, this
+// factory will produce one MemberDescription (all with no name and a fixed
+// offset of zero bytes).
+struct EnumMemberDescriptionFactory<'tcx> {
+    enum_type: Ty<'tcx>,
+    type_rep: Rc<adt::Repr<'tcx>>,
+    discriminant_type_metadata: Option<DIType>,
+    containing_scope: DIScope,
+    file_metadata: DIFile,
+    span: Span,
+}
+
+impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
+    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+                                      -> Vec<MemberDescription> {
+        let adt = &self.enum_type.ty_adt_def().unwrap();
+        match *self.type_rep {
+            adt::General(_, ref struct_defs, _) => {
+                let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
+                    .expect(""));
+                struct_defs
+                    .iter()
+                    .enumerate()
+                    .map(|(i, struct_def)| {
+                        let (variant_type_metadata,
+                             variant_llvm_type,
+                             member_desc_factory) =
+                            describe_enum_variant(cx,
+                                                  self.enum_type,
+                                                  struct_def,
+                                                  &adt.variants[i],
+                                                  discriminant_info,
+                                                  self.containing_scope,
+                                                  self.span);
+
+                        let member_descriptions = member_desc_factory
+                            .create_member_descriptions(cx);
+
+                        set_members_of_composite_type(cx,
+                                                      variant_type_metadata,
+                                                      variant_llvm_type,
+                                                      &member_descriptions);
+                        MemberDescription {
+                            name: "".to_string(),
+                            llvm_type: variant_llvm_type,
+                            type_metadata: variant_type_metadata,
+                            offset: FixedMemberOffset { bytes: 0 },
+                            flags: FLAGS_NONE
+                        }
+                    }).collect()
+            },
+            adt::Univariant(ref struct_def, _) => {
+                assert!(adt.variants.len() <= 1);
+
+                if adt.variants.is_empty() {
+                    vec![]
+                } else {
+                    let (variant_type_metadata,
+                         variant_llvm_type,
+                         member_description_factory) =
+                        describe_enum_variant(cx,
+                                              self.enum_type,
+                                              struct_def,
+                                              &adt.variants[0],
+                                              NoDiscriminant,
+                                              self.containing_scope,
+                                              self.span);
+
+                    let member_descriptions =
+                        member_description_factory.create_member_descriptions(cx);
+
+                    set_members_of_composite_type(cx,
+                                                  variant_type_metadata,
+                                                  variant_llvm_type,
+                                                  &member_descriptions[..]);
+                    vec![
+                        MemberDescription {
+                            name: "".to_string(),
+                            llvm_type: variant_llvm_type,
+                            type_metadata: variant_type_metadata,
+                            offset: FixedMemberOffset { bytes: 0 },
+                            flags: FLAGS_NONE
+                        }
+                    ]
+                }
+            }
+            adt::RawNullablePointer { nndiscr: non_null_variant_index, nnty, .. } => {
+                // As far as debuginfo is concerned, the pointer this enum
+                // represents is still wrapped in a struct. This is to make the
+                // DWARF representation of enums uniform.
+
+                // First create a description of the artificial wrapper struct:
+                let non_null_variant = &adt.variants[non_null_variant_index.0 as usize];
+                let non_null_variant_name = non_null_variant.name.as_str();
+
+                // The llvm type and metadata of the pointer
+                let non_null_llvm_type = type_of::type_of(cx, nnty);
+                let non_null_type_metadata = type_metadata(cx, nnty, self.span);
+
+                // The type of the artificial struct wrapping the pointer
+                let artificial_struct_llvm_type = Type::struct_(cx,
+                                                                &[non_null_llvm_type],
+                                                                false);
+
+                // For the metadata of the wrapper struct, we need to create a
+                // MemberDescription of the struct's single field.
+                let sole_struct_member_description = MemberDescription {
+                    name: match non_null_variant.kind() {
+                        ty::VariantKind::Tuple => "__0".to_string(),
+                        ty::VariantKind::Struct => {
+                            non_null_variant.fields[0].name.to_string()
+                        }
+                        ty::VariantKind::Unit => bug!()
+                    },
+                    llvm_type: non_null_llvm_type,
+                    type_metadata: non_null_type_metadata,
+                    offset: FixedMemberOffset { bytes: 0 },
+                    flags: FLAGS_NONE
+                };
+
+                let unique_type_id = debug_context(cx).type_map
+                                                      .borrow_mut()
+                                                      .get_unique_type_id_of_enum_variant(
+                                                          cx,
+                                                          self.enum_type,
+                                                          &non_null_variant_name);
+
+                // Now we can create the metadata of the artificial struct
+                let artificial_struct_metadata =
+                    composite_type_metadata(cx,
+                                            artificial_struct_llvm_type,
+                                            &non_null_variant_name,
+                                            unique_type_id,
+                                            &[sole_struct_member_description],
+                                            self.containing_scope,
+                                            self.file_metadata,
+                                            codemap::DUMMY_SP);
+
+                // Encode the information about the null variant in the union
+                // member's name.
+                let null_variant_index = (1 - non_null_variant_index.0) as usize;
+                let null_variant_name = adt.variants[null_variant_index].name;
+                let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
+                                                0,
+                                                null_variant_name);
+
+                // Finally create the (singleton) list of descriptions of union
+                // members.
+                vec![
+                    MemberDescription {
+                        name: union_member_name,
+                        llvm_type: artificial_struct_llvm_type,
+                        type_metadata: artificial_struct_metadata,
+                        offset: FixedMemberOffset { bytes: 0 },
+                        flags: FLAGS_NONE
+                    }
+                ]
+            },
+            adt::StructWrappedNullablePointer { nonnull: ref struct_def,
+                                                nndiscr,
+                                                ref discrfield, ..} => {
+                // Create a description of the non-null variant
+                let (variant_type_metadata, variant_llvm_type, member_description_factory) =
+                    describe_enum_variant(cx,
+                                          self.enum_type,
+                                          struct_def,
+                                          &adt.variants[nndiscr.0 as usize],
+                                          OptimizedDiscriminant,
+                                          self.containing_scope,
+                                          self.span);
+
+                let variant_member_descriptions =
+                    member_description_factory.create_member_descriptions(cx);
+
+                set_members_of_composite_type(cx,
+                                              variant_type_metadata,
+                                              variant_llvm_type,
+                                              &variant_member_descriptions[..]);
+
+                // Encode the information about the null variant in the union
+                // member's name.
+                let null_variant_index = (1 - nndiscr.0) as usize;
+                let null_variant_name = adt.variants[null_variant_index].name;
+                let discrfield = discrfield.iter()
+                                           .skip(1)
+                                           .map(|x| x.to_string())
+                                           .collect::<Vec<_>>().join("$");
+                let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
+                                                discrfield,
+                                                null_variant_name);
+
+                // Create the (singleton) list of descriptions of union members.
+                vec![
+                    MemberDescription {
+                        name: union_member_name,
+                        llvm_type: variant_llvm_type,
+                        type_metadata: variant_type_metadata,
+                        offset: FixedMemberOffset { bytes: 0 },
+                        flags: FLAGS_NONE
+                    }
+                ]
+            },
+            adt::CEnum(..) => span_bug!(self.span, "This should be unreachable.")
+        }
+    }
+}
+
+// Creates MemberDescriptions for the fields of a single enum variant.
+struct VariantMemberDescriptionFactory<'tcx> {
+    args: Vec<(String, Ty<'tcx>)>,
+    discriminant_type_metadata: Option<DIType>,
+    span: Span,
+}
+
+impl<'tcx> VariantMemberDescriptionFactory<'tcx> {
+    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+                                      -> Vec<MemberDescription> {
+        self.args.iter().enumerate().map(|(i, &(ref name, ty))| {
+            MemberDescription {
+                name: name.to_string(),
+                llvm_type: type_of::type_of(cx, ty),
+                type_metadata: match self.discriminant_type_metadata {
+                    Some(metadata) if i == 0 => metadata,
+                    _ => type_metadata(cx, ty, self.span)
+                },
+                offset: ComputedMemberOffset,
+                flags: FLAGS_NONE
+            }
+        }).collect()
+    }
+}
+
+#[derive(Copy, Clone)]
+enum EnumDiscriminantInfo {
+    RegularDiscriminant(DIType),
+    OptimizedDiscriminant,
+    NoDiscriminant
+}
+
+// Returns a tuple of (1) type_metadata_stub of the variant, (2) the llvm_type
+// of the variant, and (3) a MemberDescriptionFactory for producing the
+// descriptions of the fields of the variant. This is a rudimentary version of a
+// full RecursiveTypeDescription.
+fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                   enum_type: Ty<'tcx>,
+                                   struct_def: &adt::Struct<'tcx>,
+                                   variant: ty::VariantDef<'tcx>,
+                                   discriminant_info: EnumDiscriminantInfo,
+                                   containing_scope: DIScope,
+                                   span: Span)
+                                   -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) {
+    let variant_llvm_type =
+        Type::struct_(cx, &struct_def.fields
+                                    .iter()
+                                    .map(|&t| type_of::type_of(cx, t))
+                                    .collect::<Vec<_>>()
+                                    ,
+                      struct_def.packed);
+    // Could do some consistency checks here: size, align, field count, discr type
+
+    let variant_name = variant.name.as_str();
+    let unique_type_id = debug_context(cx).type_map
+                                          .borrow_mut()
+                                          .get_unique_type_id_of_enum_variant(
+                                              cx,
+                                              enum_type,
+                                              &variant_name);
+
+    let metadata_stub = create_struct_stub(cx,
+                                           variant_llvm_type,
+                                           &variant_name,
+                                           unique_type_id,
+                                           containing_scope);
+
+    // Get the argument names from the enum variant info
+    let mut arg_names: Vec<_> = match variant.kind() {
+        ty::VariantKind::Unit => vec![],
+        ty::VariantKind::Tuple => {
+            variant.fields
+                   .iter()
+                   .enumerate()
+                   .map(|(i, _)| format!("__{}", i))
+                   .collect()
+        }
+        ty::VariantKind::Struct => {
+            variant.fields
+                   .iter()
+                   .map(|f| f.name.to_string())
+                   .collect()
+        }
+    };
+
+    // If this is not a univariant enum, there is also the discriminant field.
+    match discriminant_info {
+        RegularDiscriminant(_) => arg_names.insert(0, "RUST$ENUM$DISR".to_string()),
+        _ => { /* do nothing */ }
+    };
+
+    // Build an array of (field name, field type) pairs to be captured in the factory closure.
+    let args: Vec<(String, Ty)> = arg_names.iter()
+        .zip(&struct_def.fields)
+        .map(|(s, &t)| (s.to_string(), t))
+        .collect();
+
+    let member_description_factory =
+        VariantMDF(VariantMemberDescriptionFactory {
+            args: args,
+            discriminant_type_metadata: match discriminant_info {
+                RegularDiscriminant(discriminant_type_metadata) => {
+                    Some(discriminant_type_metadata)
+                }
+                _ => None
+            },
+            span: span,
+        });
+
+    (metadata_stub, variant_llvm_type, member_description_factory)
+}
+
+fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                   enum_type: Ty<'tcx>,
+                                   enum_def_id: DefId,
+                                   unique_type_id: UniqueTypeId,
+                                   span: Span)
+                                   -> RecursiveTypeDescription<'tcx> {
+    let enum_name = compute_debuginfo_type_name(cx, enum_type, false);
+
+    let (containing_scope, _) = get_namespace_and_span_for_item(cx, enum_def_id);
+    // FIXME: This should emit actual file metadata for the enum, but we
+    // currently can't get the necessary information when it comes to types
+    // imported from other crates. Formerly we violated the ODR when performing
+    // LTO because we emitted debuginfo for the same type with varying file
+    // metadata, so as a workaround we pretend that the type comes from
+    // <unknown>
+    let file_metadata = unknown_file_metadata(cx);
+
+    let variants = &enum_type.ty_adt_def().unwrap().variants;
+
+    let enumerators_metadata: Vec<DIDescriptor> = variants
+        .iter()
+        .map(|v| {
+            let token = v.name.as_str();
+            let name = CString::new(token.as_bytes()).unwrap();
+            unsafe {
+                llvm::LLVMDIBuilderCreateEnumerator(
+                    DIB(cx),
+                    name.as_ptr(),
+                    v.disr_val.to_u64_unchecked())
+            }
+        })
+        .collect();
+
+    let discriminant_type_metadata = |inttype: syntax::attr::IntType| {
+        let disr_type_key = (enum_def_id, inttype);
+        let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types
+                                                                 .borrow()
+                                                                 .get(&disr_type_key).cloned();
+        match cached_discriminant_type_metadata {
+            Some(discriminant_type_metadata) => discriminant_type_metadata,
+            None => {
+                let discriminant_llvm_type = adt::ll_inttype(cx, inttype);
+                let (discriminant_size, discriminant_align) =
+                    size_and_align_of(cx, discriminant_llvm_type);
+                let discriminant_base_type_metadata =
+                    type_metadata(cx,
+                                  adt::ty_of_inttype(cx.tcx(), inttype),
+                                  codemap::DUMMY_SP);
+                let discriminant_name = get_enum_discriminant_name(cx, enum_def_id);
+
+                let name = CString::new(discriminant_name.as_bytes()).unwrap();
+                let discriminant_type_metadata = unsafe {
+                    llvm::LLVMDIBuilderCreateEnumerationType(
+                        DIB(cx),
+                        containing_scope,
+                        name.as_ptr(),
+                        NO_FILE_METADATA,
+                        UNKNOWN_LINE_NUMBER,
+                        bytes_to_bits(discriminant_size),
+                        bytes_to_bits(discriminant_align),
+                        create_DIArray(DIB(cx), &enumerators_metadata),
+                        discriminant_base_type_metadata)
+                };
+
+                debug_context(cx).created_enum_disr_types
+                                 .borrow_mut()
+                                 .insert(disr_type_key, discriminant_type_metadata);
+
+                discriminant_type_metadata
+            }
+        }
+    };
+
+    let type_rep = adt::represent_type(cx, enum_type);
+
+    let discriminant_type_metadata = match *type_rep {
+        adt::CEnum(inttype, _, _) => {
+            return FinalMetadata(discriminant_type_metadata(inttype))
+        },
+        adt::RawNullablePointer { .. }           |
+        adt::StructWrappedNullablePointer { .. } |
+        adt::Univariant(..)                      => None,
+        adt::General(inttype, _, _) => Some(discriminant_type_metadata(inttype)),
+    };
+
+    let enum_llvm_type = type_of::type_of(cx, enum_type);
+    let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type);
+
+    let unique_type_id_str = debug_context(cx)
+                             .type_map
+                             .borrow()
+                             .get_unique_type_id_as_string(unique_type_id);
+
+    let enum_name = CString::new(enum_name).unwrap();
+    let unique_type_id_str = CString::new(unique_type_id_str.as_bytes()).unwrap();
+    let enum_metadata = unsafe {
+        llvm::LLVMDIBuilderCreateUnionType(
+        DIB(cx),
+        containing_scope,
+        enum_name.as_ptr(),
+        file_metadata,
+        UNKNOWN_LINE_NUMBER,
+        bytes_to_bits(enum_type_size),
+        bytes_to_bits(enum_type_align),
+        0, // Flags
+        ptr::null_mut(),
+        0, // RuntimeLang
+        unique_type_id_str.as_ptr())
+    };
+
+    return create_and_register_recursive_type_forward_declaration(
+        cx,
+        enum_type,
+        unique_type_id,
+        enum_metadata,
+        enum_llvm_type,
+        EnumMDF(EnumMemberDescriptionFactory {
+            enum_type: enum_type,
+            type_rep: type_rep.clone(),
+            discriminant_type_metadata: discriminant_type_metadata,
+            containing_scope: containing_scope,
+            file_metadata: file_metadata,
+            span: span,
+        }),
+    );
+
+    fn get_enum_discriminant_name(cx: &CrateContext,
+                                  def_id: DefId)
+                                  -> token::InternedString {
+        cx.tcx().item_name(def_id).as_str()
+    }
+}
+
+/// Creates debug information for a composite type, that is, anything that
+/// results in a LLVM struct.
+///
+/// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums.
+fn composite_type_metadata(cx: &CrateContext,
+                           composite_llvm_type: Type,
+                           composite_type_name: &str,
+                           composite_type_unique_id: UniqueTypeId,
+                           member_descriptions: &[MemberDescription],
+                           containing_scope: DIScope,
+
+                           // Ignore source location information as long as it
+                           // can't be reconstructed for non-local crates.
+                           _file_metadata: DIFile,
+                           _definition_span: Span)
+                           -> DICompositeType {
+    // Create the (empty) struct metadata node ...
+    let composite_type_metadata = create_struct_stub(cx,
+                                                     composite_llvm_type,
+                                                     composite_type_name,
+                                                     composite_type_unique_id,
+                                                     containing_scope);
+    // ... and immediately create and add the member descriptions.
+    set_members_of_composite_type(cx,
+                                  composite_type_metadata,
+                                  composite_llvm_type,
+                                  member_descriptions);
+
+    return composite_type_metadata;
+}
+
+fn set_members_of_composite_type(cx: &CrateContext,
+                                 composite_type_metadata: DICompositeType,
+                                 composite_llvm_type: Type,
+                                 member_descriptions: &[MemberDescription]) {
+    // In some rare cases LLVM metadata uniquing would lead to an existing type
+    // description being used instead of a new one created in
+    // create_struct_stub. This would cause a hard to trace assertion in
+    // DICompositeType::SetTypeArray(). The following check makes sure that we
+    // get a better error message if this should happen again due to some
+    // regression.
+    {
+        let mut composite_types_completed =
+            debug_context(cx).composite_types_completed.borrow_mut();
+        if composite_types_completed.contains(&composite_type_metadata) {
+            bug!("debuginfo::set_members_of_composite_type() - \
+                  Already completed forward declaration re-encountered.");
+        } else {
+            composite_types_completed.insert(composite_type_metadata);
+        }
+    }
+
+    let member_metadata: Vec<DIDescriptor> = member_descriptions
+        .iter()
+        .enumerate()
+        .map(|(i, member_description)| {
+            let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type);
+            let member_offset = match member_description.offset {
+                FixedMemberOffset { bytes } => bytes as u64,
+                ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i)
+            };
+
+            let member_name = member_description.name.as_bytes();
+            let member_name = CString::new(member_name).unwrap();
+            unsafe {
+                llvm::LLVMDIBuilderCreateMemberType(
+                    DIB(cx),
+                    composite_type_metadata,
+                    member_name.as_ptr(),
+                    NO_FILE_METADATA,
+                    UNKNOWN_LINE_NUMBER,
+                    bytes_to_bits(member_size),
+                    bytes_to_bits(member_align),
+                    bytes_to_bits(member_offset),
+                    member_description.flags,
+                    member_description.type_metadata)
+            }
+        })
+        .collect();
+
+    unsafe {
+        let type_array = create_DIArray(DIB(cx), &member_metadata[..]);
+        llvm::LLVMDICompositeTypeSetTypeArray(DIB(cx), composite_type_metadata, type_array);
+    }
+}
+
+// A convenience wrapper around LLVMDIBuilderCreateStructType(). Does not do any
+// caching, does not add any fields to the struct. This can be done later with
+// set_members_of_composite_type().
+fn create_struct_stub(cx: &CrateContext,
+                      struct_llvm_type: Type,
+                      struct_type_name: &str,
+                      unique_type_id: UniqueTypeId,
+                      containing_scope: DIScope)
+                   -> DICompositeType {
+    let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type);
+
+    let unique_type_id_str = debug_context(cx).type_map
+                                              .borrow()
+                                              .get_unique_type_id_as_string(unique_type_id);
+    let name = CString::new(struct_type_name).unwrap();
+    let unique_type_id = CString::new(unique_type_id_str.as_bytes()).unwrap();
+    let metadata_stub = unsafe {
+        // LLVMDIBuilderCreateStructType() wants an empty array. A null
+        // pointer will lead to hard to trace and debug LLVM assertions
+        // later on in llvm/lib/IR/Value.cpp.
+        let empty_array = create_DIArray(DIB(cx), &[]);
+
+        llvm::LLVMDIBuilderCreateStructType(
+            DIB(cx),
+            containing_scope,
+            name.as_ptr(),
+            NO_FILE_METADATA,
+            UNKNOWN_LINE_NUMBER,
+            bytes_to_bits(struct_size),
+            bytes_to_bits(struct_align),
+            0,
+            ptr::null_mut(),
+            empty_array,
+            0,
+            ptr::null_mut(),
+            unique_type_id.as_ptr())
+    };
+
+    return metadata_stub;
+}
+
+/// Creates debug information for the given global variable.
+///
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_global_var_metadata(cx: &CrateContext,
+                                  node_id: ast::NodeId,
+                                  global: ValueRef) {
+    if cx.dbg_cx().is_none() {
+        return;
+    }
+
+    // Don't create debuginfo for globals inlined from other crates. The other
+    // crate should already contain debuginfo for it. More importantly, the
+    // global might not even exist in un-inlined form anywhere which would lead
+    // to a linker errors.
+    if cx.external_srcs().borrow().contains_key(&node_id) {
+        return;
+    }
+
+    let var_item = cx.tcx().map.get(node_id);
+
+    let (name, span) = match var_item {
+        hir_map::NodeItem(item) => {
+            match item.node {
+                hir::ItemStatic(..) => (item.name, item.span),
+                hir::ItemConst(..) => (item.name, item.span),
+                _ => {
+                    span_bug!(item.span,
+                              "debuginfo::\
+                               create_global_var_metadata() -
+                               Captured var-id refers to \
+                               unexpected ast_item variant: {:?}",
+                              var_item)
+                }
+            }
+        },
+        _ => bug!("debuginfo::create_global_var_metadata() \
+                   - Captured var-id refers to unexpected \
+                   hir_map variant: {:?}",
+                  var_item)
+    };
+
+    let (file_metadata, line_number) = if span != codemap::DUMMY_SP {
+        let loc = span_start(cx, span);
+        (file_metadata(cx, &loc.file.name), loc.line as c_uint)
+    } else {
+        (NO_FILE_METADATA, UNKNOWN_LINE_NUMBER)
+    };
+
+    let is_local_to_unit = is_node_local_to_unit(cx, node_id);
+    let variable_type = cx.tcx().node_id_to_type(node_id);
+    let type_metadata = type_metadata(cx, variable_type, span);
+    let node_def_id = cx.tcx().map.local_def_id(node_id);
+    let namespace_node = namespace_for_item(cx, node_def_id);
+    let var_name = name.to_string();
+    let linkage_name =
+        namespace_node.mangled_name_of_contained_item(&var_name[..]);
+    let var_scope = namespace_node.scope;
+
+    let var_name = CString::new(var_name).unwrap();
+    let linkage_name = CString::new(linkage_name).unwrap();
+    unsafe {
+        llvm::LLVMDIBuilderCreateStaticVariable(DIB(cx),
+                                                var_scope,
+                                                var_name.as_ptr(),
+                                                linkage_name.as_ptr(),
+                                                file_metadata,
+                                                line_number,
+                                                type_metadata,
+                                                is_local_to_unit,
+                                                global,
+                                                ptr::null_mut());
+    }
+}
+
+/// Creates debug information for the given local variable.
+///
+/// This function assumes that there's a datum for each pattern component of the
+/// local in `bcx.fcx.lllocals`.
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_local_var_metadata(bcx: Block, local: &hir::Local) {
+    if bcx.unreachable.get() ||
+       fn_should_be_ignored(bcx.fcx) ||
+       bcx.sess().opts.debuginfo != FullDebugInfo  {
+        return;
+    }
+
+    let cx = bcx.ccx();
+    let def_map = &cx.tcx().def_map;
+    let locals = bcx.fcx.lllocals.borrow();
+
+    pat_util::pat_bindings(def_map, &local.pat, |_, node_id, span, var_name| {
+        let datum = match locals.get(&node_id) {
+            Some(datum) => datum,
+            None => {
+                span_bug!(span,
+                          "no entry in lllocals table for {}",
+                          node_id);
+            }
+        };
+
+        if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
+            span_bug!(span, "debuginfo::create_local_var_metadata() - \
+                             Referenced variable location is not an alloca!");
+        }
+
+        let scope_metadata = scope_metadata(bcx.fcx, node_id, span);
+
+        declare_local(bcx,
+                      var_name.node,
+                      datum.ty,
+                      scope_metadata,
+                      VariableAccess::DirectVariable { alloca: datum.val },
+                      VariableKind::LocalVariable,
+                      span);
+    })
+}
+
+/// Creates debug information for a variable captured in a closure.
+///
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                                node_id: ast::NodeId,
+                                                env_pointer: ValueRef,
+                                                env_index: usize,
+                                                captured_by_ref: bool,
+                                                span: Span) {
+    if bcx.unreachable.get() ||
+       fn_should_be_ignored(bcx.fcx) ||
+       bcx.sess().opts.debuginfo != FullDebugInfo {
+        return;
+    }
+
+    let cx = bcx.ccx();
+
+    let ast_item = cx.tcx().map.find(node_id);
+
+    let variable_name = match ast_item {
+        None => {
+            span_bug!(span, "debuginfo::create_captured_var_metadata: node not found");
+        }
+        Some(hir_map::NodeLocal(pat)) => {
+            match pat.node {
+                PatKind::Ident(_, ref path1, _) => {
+                    path1.node.name
+                }
+                _ => {
+                    span_bug!(span,
+                              "debuginfo::create_captured_var_metadata() - \
+                               Captured var-id refers to unexpected \
+                               hir_map variant: {:?}",
+                              ast_item);
+                }
+            }
+        }
+        _ => {
+            span_bug!(span,
+                      "debuginfo::create_captured_var_metadata() - \
+                       Captured var-id refers to unexpected \
+                       hir_map variant: {:?}",
+                      ast_item);
+        }
+    };
+
+    let variable_type = common::node_id_type(bcx, node_id);
+    let scope_metadata = bcx.fcx.debug_context.get_ref(span).fn_metadata;
+
+    // env_pointer is the alloca containing the pointer to the environment,
+    // so it's type is **EnvironmentType. In order to find out the type of
+    // the environment we have to "dereference" two times.
+    let llvm_env_data_type = common::val_ty(env_pointer).element_type()
+                                                        .element_type();
+    let byte_offset_of_var_in_env = machine::llelement_offset(cx,
+                                                              llvm_env_data_type,
+                                                              env_index);
+
+    let address_operations = unsafe {
+        [llvm::LLVMDIBuilderCreateOpDeref(),
+         llvm::LLVMDIBuilderCreateOpPlus(),
+         byte_offset_of_var_in_env as i64,
+         llvm::LLVMDIBuilderCreateOpDeref()]
+    };
+
+    let address_op_count = if captured_by_ref {
+        address_operations.len()
+    } else {
+        address_operations.len() - 1
+    };
+
+    let variable_access = VariableAccess::IndirectVariable {
+        alloca: env_pointer,
+        address_operations: &address_operations[..address_op_count]
+    };
+
+    declare_local(bcx,
+                  variable_name,
+                  variable_type,
+                  scope_metadata,
+                  variable_access,
+                  VariableKind::CapturedVariable,
+                  span);
+}
+
+/// Creates debug information for a local variable introduced in the head of a
+/// match-statement arm.
+///
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                                 variable_name: ast::Name,
+                                                 binding: BindingInfo<'tcx>) {
+    if bcx.unreachable.get() ||
+       fn_should_be_ignored(bcx.fcx) ||
+       bcx.sess().opts.debuginfo != FullDebugInfo {
+        return;
+    }
+
+    let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
+    let aops = unsafe {
+        [llvm::LLVMDIBuilderCreateOpDeref()]
+    };
+    // Regardless of the actual type (`T`) we're always passed the stack slot
+    // (alloca) for the binding. For ByRef bindings that's a `T*` but for ByMove
+    // bindings we actually have `T**`. So to get the actual variable we need to
+    // dereference once more. For ByCopy we just use the stack slot we created
+    // for the binding.
+    let var_access = match binding.trmode {
+        TransBindingMode::TrByCopy(llbinding) |
+        TransBindingMode::TrByMoveIntoCopy(llbinding) => VariableAccess::DirectVariable {
+            alloca: llbinding
+        },
+        TransBindingMode::TrByMoveRef => VariableAccess::IndirectVariable {
+            alloca: binding.llmatch,
+            address_operations: &aops
+        },
+        TransBindingMode::TrByRef => VariableAccess::DirectVariable {
+            alloca: binding.llmatch
+        }
+    };
+
+    declare_local(bcx,
+                  variable_name,
+                  binding.ty,
+                  scope_metadata,
+                  var_access,
+                  VariableKind::LocalVariable,
+                  binding.span);
+}
+
+/// Creates debug information for the given function argument.
+///
+/// This function assumes that there's a datum for each pattern component of the
+/// argument in `bcx.fcx.lllocals`.
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_argument_metadata(bcx: Block, arg: &hir::Arg) {
+    if bcx.unreachable.get() ||
+       fn_should_be_ignored(bcx.fcx) ||
+       bcx.sess().opts.debuginfo != FullDebugInfo {
+        return;
+    }
+
+    let def_map = &bcx.tcx().def_map;
+    let scope_metadata = bcx
+                         .fcx
+                         .debug_context
+                         .get_ref(arg.pat.span)
+                         .fn_metadata;
+    let locals = bcx.fcx.lllocals.borrow();
+
+    pat_util::pat_bindings(def_map, &arg.pat, |_, node_id, span, var_name| {
+        let datum = match locals.get(&node_id) {
+            Some(v) => v,
+            None => {
+                span_bug!(span, "no entry in lllocals table for {}", node_id);
+            }
+        };
+
+        if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
+            span_bug!(span, "debuginfo::create_argument_metadata() - \
+                             Referenced variable location is not an alloca!");
+        }
+
+        let argument_index = {
+            let counter = &bcx
+                          .fcx
+                          .debug_context
+                          .get_ref(span)
+                          .argument_counter;
+            let argument_index = counter.get();
+            counter.set(argument_index + 1);
+            argument_index
+        };
+
+        declare_local(bcx,
+                      var_name.node,
+                      datum.ty,
+                      scope_metadata,
+                      VariableAccess::DirectVariable { alloca: datum.val },
+                      VariableKind::ArgumentVariable(argument_index),
+                      span);
+    })
+}
diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs
new file mode 100644 (file)
index 0000000..bb999c3
--- /dev/null
@@ -0,0 +1,693 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// See doc.rs for documentation.
+mod doc;
+
+use self::VariableAccess::*;
+use self::VariableKind::*;
+
+use self::utils::{DIB, span_start, assert_type_for_node_id, contains_nodebug_attribute,
+                  create_DIArray, is_node_local_to_unit};
+use self::namespace::{namespace_for_item, NamespaceTreeNode};
+use self::type_names::compute_debuginfo_type_name;
+use self::metadata::{type_metadata, diverging_type_metadata};
+use self::metadata::{file_metadata, scope_metadata, TypeMap, compile_unit_metadata};
+use self::source_loc::InternalDebugLocation;
+
+use llvm;
+use llvm::{ModuleRef, ContextRef, ValueRef};
+use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilderRef, DISubprogram, DIArray,
+                      DIDescriptor, FlagPrototyped};
+use rustc::hir::def_id::DefId;
+use rustc::infer::normalize_associated_type;
+use rustc::ty::subst::{self, Substs};
+use rustc::hir;
+
+use abi::Abi;
+use common::{NodeIdAndSpan, CrateContext, FunctionContext, Block};
+use monomorphize;
+use rustc::infer;
+use rustc::ty::{self, Ty};
+use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo};
+use util::nodemap::{NodeMap, FnvHashMap, FnvHashSet};
+use rustc::hir::map as hir_map;
+
+use libc::c_uint;
+use std::cell::{Cell, RefCell};
+use std::ffi::CString;
+use std::ptr;
+use std::rc::Rc;
+
+use syntax::codemap::{Span, Pos};
+use syntax::{ast, codemap};
+use syntax::attr::IntType;
+use syntax::parse::token::{self, special_idents};
+
+pub mod gdb;
+mod utils;
+mod namespace;
+mod type_names;
+mod metadata;
+mod create_scope_map;
+mod source_loc;
+
+pub use self::source_loc::set_source_location;
+pub use self::source_loc::clear_source_location;
+pub use self::source_loc::start_emitting_source_locations;
+pub use self::source_loc::get_cleanup_debug_loc_for_ast_node;
+pub use self::source_loc::with_source_location_override;
+pub use self::metadata::create_match_binding_metadata;
+pub use self::metadata::create_argument_metadata;
+pub use self::metadata::create_captured_var_metadata;
+pub use self::metadata::create_global_var_metadata;
+pub use self::metadata::create_local_var_metadata;
+
+#[allow(non_upper_case_globals)]
+const DW_TAG_auto_variable: c_uint = 0x100;
+#[allow(non_upper_case_globals)]
+const DW_TAG_arg_variable: c_uint = 0x101;
+
+/// A context object for maintaining all state needed by the debuginfo module.
+pub struct CrateDebugContext<'tcx> {
+    llcontext: ContextRef,
+    builder: DIBuilderRef,
+    current_debug_location: Cell<InternalDebugLocation>,
+    created_files: RefCell<FnvHashMap<String, DIFile>>,
+    created_enum_disr_types: RefCell<FnvHashMap<(DefId, IntType), DIType>>,
+
+    type_map: RefCell<TypeMap<'tcx>>,
+    namespace_map: RefCell<FnvHashMap<Vec<ast::Name>, Rc<NamespaceTreeNode>>>,
+
+    // This collection is used to assert that composite types (structs, enums,
+    // ...) have their members only set once:
+    composite_types_completed: RefCell<FnvHashSet<DIType>>,
+}
+
+impl<'tcx> CrateDebugContext<'tcx> {
+    pub fn new(llmod: ModuleRef) -> CrateDebugContext<'tcx> {
+        debug!("CrateDebugContext::new");
+        let builder = unsafe { llvm::LLVMDIBuilderCreate(llmod) };
+        // DIBuilder inherits context from the module, so we'd better use the same one
+        let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
+        return CrateDebugContext {
+            llcontext: llcontext,
+            builder: builder,
+            current_debug_location: Cell::new(InternalDebugLocation::UnknownLocation),
+            created_files: RefCell::new(FnvHashMap()),
+            created_enum_disr_types: RefCell::new(FnvHashMap()),
+            type_map: RefCell::new(TypeMap::new()),
+            namespace_map: RefCell::new(FnvHashMap()),
+            composite_types_completed: RefCell::new(FnvHashSet()),
+        };
+    }
+}
+
+pub enum FunctionDebugContext {
+    RegularContext(Box<FunctionDebugContextData>),
+    DebugInfoDisabled,
+    FunctionWithoutDebugInfo,
+}
+
+impl FunctionDebugContext {
+    fn get_ref<'a>(&'a self,
+                   span: Span)
+                   -> &'a FunctionDebugContextData {
+        match *self {
+            FunctionDebugContext::RegularContext(box ref data) => data,
+            FunctionDebugContext::DebugInfoDisabled => {
+                span_bug!(span,
+                          "{}",
+                          FunctionDebugContext::debuginfo_disabled_message());
+            }
+            FunctionDebugContext::FunctionWithoutDebugInfo => {
+                span_bug!(span,
+                          "{}",
+                          FunctionDebugContext::should_be_ignored_message());
+            }
+        }
+    }
+
+    fn debuginfo_disabled_message() -> &'static str {
+        "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!"
+    }
+
+    fn should_be_ignored_message() -> &'static str {
+        "debuginfo: Error trying to access FunctionDebugContext for function that should be \
+         ignored by debug info!"
+    }
+}
+
+pub struct FunctionDebugContextData {
+    scope_map: RefCell<NodeMap<DIScope>>,
+    fn_metadata: DISubprogram,
+    argument_counter: Cell<usize>,
+    source_locations_enabled: Cell<bool>,
+    source_location_override: Cell<bool>,
+}
+
+pub enum VariableAccess<'a> {
+    // The llptr given is an alloca containing the variable's value
+    DirectVariable { alloca: ValueRef },
+    // The llptr given is an alloca containing the start of some pointer chain
+    // leading to the variable's content.
+    IndirectVariable { alloca: ValueRef, address_operations: &'a [i64] }
+}
+
+pub enum VariableKind {
+    ArgumentVariable(usize /*index*/),
+    LocalVariable,
+    CapturedVariable,
+}
+
+/// Create any deferred debug metadata nodes
+pub fn finalize(cx: &CrateContext) {
+    if cx.dbg_cx().is_none() {
+        return;
+    }
+
+    debug!("finalize");
+    let _ = compile_unit_metadata(cx);
+
+    if gdb::needs_gdb_debug_scripts_section(cx) {
+        // Add a .debug_gdb_scripts section to this compile-unit. This will
+        // cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
+        // which activates the Rust pretty printers for binary this section is
+        // contained in.
+        gdb::get_or_insert_gdb_debug_scripts_section_global(cx);
+    }
+
+    unsafe {
+        llvm::LLVMDIBuilderFinalize(DIB(cx));
+        llvm::LLVMDIBuilderDispose(DIB(cx));
+        // Debuginfo generation in LLVM by default uses a higher
+        // version of dwarf than OS X currently understands. We can
+        // instruct LLVM to emit an older version of dwarf, however,
+        // for OS X to understand. For more info see #11352
+        // This can be overridden using --llvm-opts -dwarf-version,N.
+        // Android has the same issue (#22398)
+        if cx.sess().target.target.options.is_like_osx ||
+           cx.sess().target.target.options.is_like_android {
+            llvm::LLVMRustAddModuleFlag(cx.llmod(),
+                                        "Dwarf Version\0".as_ptr() as *const _,
+                                        2)
+        }
+
+        // Indicate that we want CodeView debug information on MSVC
+        if cx.sess().target.target.options.is_like_msvc {
+            llvm::LLVMRustAddModuleFlag(cx.llmod(),
+                                        "CodeView\0".as_ptr() as *const _,
+                                        1)
+        }
+
+        // Prevent bitcode readers from deleting the debug info.
+        let ptr = "Debug Info Version\0".as_ptr();
+        llvm::LLVMRustAddModuleFlag(cx.llmod(), ptr as *const _,
+                                    llvm::LLVMRustDebugMetadataVersion());
+    };
+}
+
+/// Creates the function-specific debug context.
+///
+/// Returns the FunctionDebugContext for the function which holds state needed
+/// for debug info creation. The function may also return another variant of the
+/// FunctionDebugContext enum which indicates why no debuginfo should be created
+/// for the function.
+pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                               fn_ast_id: ast::NodeId,
+                                               param_substs: &Substs<'tcx>,
+                                               llfn: ValueRef) -> FunctionDebugContext {
+    if cx.sess().opts.debuginfo == NoDebugInfo {
+        return FunctionDebugContext::DebugInfoDisabled;
+    }
+
+    // Clear the debug location so we don't assign them in the function prelude.
+    // Do this here already, in case we do an early exit from this function.
+    source_loc::set_debug_location(cx, InternalDebugLocation::UnknownLocation);
+
+    if fn_ast_id == ast::DUMMY_NODE_ID {
+        // This is a function not linked to any source location, so don't
+        // generate debuginfo for it.
+        return FunctionDebugContext::FunctionWithoutDebugInfo;
+    }
+
+    let empty_generics = hir::Generics::empty();
+
+    let fnitem = cx.tcx().map.get(fn_ast_id);
+
+    let (name, fn_decl, generics, top_level_block, span, has_path) = match fnitem {
+        hir_map::NodeItem(ref item) => {
+            if contains_nodebug_attribute(&item.attrs) {
+                return FunctionDebugContext::FunctionWithoutDebugInfo;
+            }
+
+            match item.node {
+                hir::ItemFn(ref fn_decl, _, _, _, ref generics, ref top_level_block) => {
+                    (item.name, fn_decl, generics, top_level_block, item.span, true)
+                }
+                _ => {
+                    span_bug!(item.span,
+                        "create_function_debug_context: item bound to non-function");
+                }
+            }
+        }
+        hir_map::NodeImplItem(impl_item) => {
+            match impl_item.node {
+                hir::ImplItemKind::Method(ref sig, ref body) => {
+                    if contains_nodebug_attribute(&impl_item.attrs) {
+                        return FunctionDebugContext::FunctionWithoutDebugInfo;
+                    }
+
+                    (impl_item.name,
+                     &sig.decl,
+                     &sig.generics,
+                     body,
+                     impl_item.span,
+                     true)
+                }
+                _ => {
+                    span_bug!(impl_item.span,
+                              "create_function_debug_context() \
+                               called on non-method impl item?!")
+                }
+            }
+        }
+        hir_map::NodeExpr(ref expr) => {
+            match expr.node {
+                hir::ExprClosure(_, ref fn_decl, ref top_level_block) => {
+                    let name = format!("fn{}", token::gensym("fn"));
+                    let name = token::intern(&name[..]);
+                    (name, fn_decl,
+                        // This is not quite right. It should actually inherit
+                        // the generics of the enclosing function.
+                        &empty_generics,
+                        top_level_block,
+                        expr.span,
+                        // Don't try to lookup the item path:
+                        false)
+                }
+                _ => span_bug!(expr.span,
+                        "create_function_debug_context: expected an expr_fn_block here")
+            }
+        }
+        hir_map::NodeTraitItem(trait_item) => {
+            match trait_item.node {
+                hir::MethodTraitItem(ref sig, Some(ref body)) => {
+                    if contains_nodebug_attribute(&trait_item.attrs) {
+                        return FunctionDebugContext::FunctionWithoutDebugInfo;
+                    }
+
+                    (trait_item.name,
+                     &sig.decl,
+                     &sig.generics,
+                     body,
+                     trait_item.span,
+                     true)
+                }
+                _ => {
+                    bug!("create_function_debug_context: \
+                          unexpected sort of node: {:?}",
+                         fnitem)
+                }
+            }
+        }
+        hir_map::NodeForeignItem(..) |
+        hir_map::NodeVariant(..) |
+        hir_map::NodeStructCtor(..) => {
+            return FunctionDebugContext::FunctionWithoutDebugInfo;
+        }
+        _ => bug!("create_function_debug_context: \
+                   unexpected sort of node: {:?}",
+                  fnitem)
+    };
+
+    // This can be the case for functions inlined from another crate
+    if span == codemap::DUMMY_SP {
+        return FunctionDebugContext::FunctionWithoutDebugInfo;
+    }
+
+    let loc = span_start(cx, span);
+    let file_metadata = file_metadata(cx, &loc.file.name);
+
+    let function_type_metadata = unsafe {
+        let fn_signature = get_function_signature(cx,
+                                                  fn_ast_id,
+                                                  param_substs,
+                                                  span);
+        llvm::LLVMDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature)
+    };
+
+    // Get_template_parameters() will append a `<...>` clause to the function
+    // name if necessary.
+    let mut function_name = name.to_string();
+    let template_parameters = get_template_parameters(cx,
+                                                      generics,
+                                                      param_substs,
+                                                      file_metadata,
+                                                      &mut function_name);
+
+    // There is no hir_map::Path for hir::ExprClosure-type functions. For now,
+    // just don't put them into a namespace. In the future this could be improved
+    // somehow (storing a path in the hir_map, or construct a path using the
+    // enclosing function).
+    let (linkage_name, containing_scope) = if has_path {
+        let fn_ast_def_id = cx.tcx().map.local_def_id(fn_ast_id);
+        let namespace_node = namespace_for_item(cx, fn_ast_def_id);
+        let linkage_name = namespace_node.mangled_name_of_contained_item(
+            &function_name[..]);
+        let containing_scope = namespace_node.scope;
+        (linkage_name, containing_scope)
+    } else {
+        (function_name.clone(), file_metadata)
+    };
+
+    // Clang sets this parameter to the opening brace of the function's block,
+    // so let's do this too.
+    let scope_line = span_start(cx, top_level_block.span).line;
+
+    let is_local_to_unit = is_node_local_to_unit(cx, fn_ast_id);
+
+    let function_name = CString::new(function_name).unwrap();
+    let linkage_name = CString::new(linkage_name).unwrap();
+    let fn_metadata = unsafe {
+        llvm::LLVMDIBuilderCreateFunction(
+            DIB(cx),
+            containing_scope,
+            function_name.as_ptr(),
+            linkage_name.as_ptr(),
+            file_metadata,
+            loc.line as c_uint,
+            function_type_metadata,
+            is_local_to_unit,
+            true,
+            scope_line as c_uint,
+            FlagPrototyped as c_uint,
+            cx.sess().opts.optimize != config::OptLevel::No,
+            llfn,
+            template_parameters,
+            ptr::null_mut())
+    };
+
+    let scope_map = create_scope_map::create_scope_map(cx,
+                                                       &fn_decl.inputs,
+                                                       &top_level_block,
+                                                       fn_metadata,
+                                                       fn_ast_id);
+
+    // Initialize fn debug context (including scope map and namespace map)
+    let fn_debug_context = box FunctionDebugContextData {
+        scope_map: RefCell::new(scope_map),
+        fn_metadata: fn_metadata,
+        argument_counter: Cell::new(1),
+        source_locations_enabled: Cell::new(false),
+        source_location_override: Cell::new(false),
+    };
+
+
+
+    return FunctionDebugContext::RegularContext(fn_debug_context);
+
+    fn get_function_signature<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                        fn_ast_id: ast::NodeId,
+                                        param_substs: &Substs<'tcx>,
+                                        error_reporting_span: Span) -> DIArray {
+        if cx.sess().opts.debuginfo == LimitedDebugInfo {
+            return create_DIArray(DIB(cx), &[]);
+        }
+
+        // Return type -- llvm::DIBuilder wants this at index 0
+        assert_type_for_node_id(cx, fn_ast_id, error_reporting_span);
+        let fn_type = cx.tcx().node_id_to_type(fn_ast_id);
+        let fn_type = monomorphize::apply_param_substs(cx.tcx(), param_substs, &fn_type);
+
+        let (sig, abi) = match fn_type.sty {
+            ty::TyFnDef(_, _, ref barefnty) | ty::TyFnPtr(ref barefnty) => {
+                let sig = cx.tcx().erase_late_bound_regions(&barefnty.sig);
+                let sig = infer::normalize_associated_type(cx.tcx(), &sig);
+                (sig, barefnty.abi)
+            }
+            ty::TyClosure(def_id, ref substs) => {
+                let closure_type = cx.tcx().closure_type(def_id, substs);
+                let sig = cx.tcx().erase_late_bound_regions(&closure_type.sig);
+                let sig = infer::normalize_associated_type(cx.tcx(), &sig);
+                (sig, closure_type.abi)
+            }
+
+            _ => bug!("get_function_metdata: Expected a function type!")
+        };
+
+        let mut signature = Vec::with_capacity(sig.inputs.len() + 1);
+
+        // Return type -- llvm::DIBuilder wants this at index 0
+        signature.push(match sig.output {
+            ty::FnConverging(ret_ty) => match ret_ty.sty {
+                ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(),
+                _ => type_metadata(cx, ret_ty, codemap::DUMMY_SP)
+            },
+            ty::FnDiverging => diverging_type_metadata(cx)
+        });
+
+        let inputs = if abi == Abi::RustCall {
+            &sig.inputs[..sig.inputs.len()-1]
+        } else {
+            &sig.inputs[..]
+        };
+
+        // Arguments types
+        for &argument_type in inputs {
+            signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP));
+        }
+
+        if abi == Abi::RustCall && !sig.inputs.is_empty() {
+            if let ty::TyTuple(ref args) = sig.inputs[sig.inputs.len() - 1].sty {
+                for &argument_type in args {
+                    signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP));
+                }
+            }
+        }
+
+        return create_DIArray(DIB(cx), &signature[..]);
+    }
+
+    fn get_template_parameters<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                         generics: &hir::Generics,
+                                         param_substs: &Substs<'tcx>,
+                                         file_metadata: DIFile,
+                                         name_to_append_suffix_to: &mut String)
+                                         -> DIArray
+    {
+        let self_type = param_substs.self_ty();
+        let self_type = normalize_associated_type(cx.tcx(), &self_type);
+
+        // Only true for static default methods:
+        let has_self_type = self_type.is_some();
+
+        if !generics.is_type_parameterized() && !has_self_type {
+            return create_DIArray(DIB(cx), &[]);
+        }
+
+        name_to_append_suffix_to.push('<');
+
+        // The list to be filled with template parameters:
+        let mut template_params: Vec<DIDescriptor> =
+            Vec::with_capacity(generics.ty_params.len() + 1);
+
+        // Handle self type
+        if has_self_type {
+            let actual_self_type = self_type.unwrap();
+            // Add self type name to <...> clause of function name
+            let actual_self_type_name = compute_debuginfo_type_name(
+                cx,
+                actual_self_type,
+                true);
+
+            name_to_append_suffix_to.push_str(&actual_self_type_name[..]);
+
+            if generics.is_type_parameterized() {
+                name_to_append_suffix_to.push_str(",");
+            }
+
+            // Only create type information if full debuginfo is enabled
+            if cx.sess().opts.debuginfo == FullDebugInfo {
+                let actual_self_type_metadata = type_metadata(cx,
+                                                              actual_self_type,
+                                                              codemap::DUMMY_SP);
+
+                let name = special_idents::type_self.name.as_str();
+
+                let name = CString::new(name.as_bytes()).unwrap();
+                let param_metadata = unsafe {
+                    llvm::LLVMDIBuilderCreateTemplateTypeParameter(
+                        DIB(cx),
+                        ptr::null_mut(),
+                        name.as_ptr(),
+                        actual_self_type_metadata,
+                        file_metadata,
+                        0,
+                        0)
+                };
+
+                template_params.push(param_metadata);
+            }
+        }
+
+        // Handle other generic parameters
+        let actual_types = param_substs.types.get_slice(subst::FnSpace);
+        for (index, &hir::TyParam{ name, .. }) in generics.ty_params.iter().enumerate() {
+            let actual_type = actual_types[index];
+            // Add actual type name to <...> clause of function name
+            let actual_type_name = compute_debuginfo_type_name(cx,
+                                                               actual_type,
+                                                               true);
+            name_to_append_suffix_to.push_str(&actual_type_name[..]);
+
+            if index != generics.ty_params.len() - 1 {
+                name_to_append_suffix_to.push_str(",");
+            }
+
+            // Again, only create type information if full debuginfo is enabled
+            if cx.sess().opts.debuginfo == FullDebugInfo {
+                let actual_type_metadata = type_metadata(cx, actual_type, codemap::DUMMY_SP);
+                let name = CString::new(name.as_str().as_bytes()).unwrap();
+                let param_metadata = unsafe {
+                    llvm::LLVMDIBuilderCreateTemplateTypeParameter(
+                        DIB(cx),
+                        ptr::null_mut(),
+                        name.as_ptr(),
+                        actual_type_metadata,
+                        file_metadata,
+                        0,
+                        0)
+                };
+                template_params.push(param_metadata);
+            }
+        }
+
+        name_to_append_suffix_to.push('>');
+
+        return create_DIArray(DIB(cx), &template_params[..]);
+    }
+}
+
+fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                             variable_name: ast::Name,
+                             variable_type: Ty<'tcx>,
+                             scope_metadata: DIScope,
+                             variable_access: VariableAccess,
+                             variable_kind: VariableKind,
+                             span: Span) {
+    let cx: &CrateContext = bcx.ccx();
+
+    let filename = span_start(cx, span).file.name.clone();
+    let file_metadata = file_metadata(cx, &filename[..]);
+
+    let loc = span_start(cx, span);
+    let type_metadata = type_metadata(cx, variable_type, span);
+
+    let (argument_index, dwarf_tag) = match variable_kind {
+        ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
+        LocalVariable    |
+        CapturedVariable => (0, DW_TAG_auto_variable)
+    };
+
+    let name = CString::new(variable_name.as_str().as_bytes()).unwrap();
+    match (variable_access, &[][..]) {
+        (DirectVariable { alloca }, address_operations) |
+        (IndirectVariable {alloca, address_operations}, _) => {
+            let metadata = unsafe {
+                llvm::LLVMDIBuilderCreateVariable(
+                    DIB(cx),
+                    dwarf_tag,
+                    scope_metadata,
+                    name.as_ptr(),
+                    file_metadata,
+                    loc.line as c_uint,
+                    type_metadata,
+                    cx.sess().opts.optimize != config::OptLevel::No,
+                    0,
+                    address_operations.as_ptr(),
+                    address_operations.len() as c_uint,
+                    argument_index)
+            };
+            source_loc::set_debug_location(cx, InternalDebugLocation::new(scope_metadata,
+                                                                          loc.line,
+                                                                          loc.col.to_usize()));
+            unsafe {
+                let debug_loc = llvm::LLVMGetCurrentDebugLocation(cx.raw_builder());
+                let instr = llvm::LLVMDIBuilderInsertDeclareAtEnd(
+                    DIB(cx),
+                    alloca,
+                    metadata,
+                    address_operations.as_ptr(),
+                    address_operations.len() as c_uint,
+                    debug_loc,
+                    bcx.llbb);
+
+                llvm::LLVMSetInstDebugLocation(::build::B(bcx).llbuilder, instr);
+            }
+        }
+    }
+
+    match variable_kind {
+        ArgumentVariable(_) | CapturedVariable => {
+            assert!(!bcx.fcx
+                        .debug_context
+                        .get_ref(span)
+                        .source_locations_enabled
+                        .get());
+            source_loc::set_debug_location(cx, InternalDebugLocation::UnknownLocation);
+        }
+        _ => { /* nothing to do */ }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum DebugLoc {
+    At(ast::NodeId, Span),
+    None
+}
+
+impl DebugLoc {
+    pub fn apply(&self, fcx: &FunctionContext) {
+        match *self {
+            DebugLoc::At(node_id, span) => {
+                source_loc::set_source_location(fcx, node_id, span);
+            }
+            DebugLoc::None => {
+                source_loc::clear_source_location(fcx);
+            }
+        }
+    }
+}
+
+pub trait ToDebugLoc {
+    fn debug_loc(&self) -> DebugLoc;
+}
+
+impl ToDebugLoc for hir::Expr {
+    fn debug_loc(&self) -> DebugLoc {
+        DebugLoc::At(self.id, self.span)
+    }
+}
+
+impl ToDebugLoc for NodeIdAndSpan {
+    fn debug_loc(&self) -> DebugLoc {
+        DebugLoc::At(self.id, self.span)
+    }
+}
+
+impl ToDebugLoc for Option<NodeIdAndSpan> {
+    fn debug_loc(&self) -> DebugLoc {
+        match *self {
+            Some(NodeIdAndSpan { id, span }) => DebugLoc::At(id, span),
+            None => DebugLoc::None
+        }
+    }
+}
diff --git a/src/librustc_trans/debuginfo/namespace.rs b/src/librustc_trans/debuginfo/namespace.rs
new file mode 100644 (file)
index 0000000..5272a4f
--- /dev/null
@@ -0,0 +1,127 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Namespace Handling.
+
+use super::utils::{DIB, debug_context};
+
+use llvm;
+use llvm::debuginfo::DIScope;
+use rustc::hir::def_id::DefId;
+use rustc::hir::map as hir_map;
+use common::CrateContext;
+
+use std::ffi::CString;
+use std::iter::once;
+use std::ptr;
+use std::rc::{Rc, Weak};
+use syntax::ast;
+use syntax::parse::token;
+
+pub struct NamespaceTreeNode {
+    pub name: ast::Name,
+    pub scope: DIScope,
+    pub parent: Option<Weak<NamespaceTreeNode>>,
+}
+
+impl NamespaceTreeNode {
+    pub fn mangled_name_of_contained_item(&self, item_name: &str) -> String {
+        fn fill_nested(node: &NamespaceTreeNode, output: &mut String) {
+            match node.parent {
+                Some(ref parent) => fill_nested(&parent.upgrade().unwrap(), output),
+                None => {}
+            }
+            let string = node.name.as_str();
+            output.push_str(&string.len().to_string());
+            output.push_str(&string);
+        }
+
+        let mut name = String::from("_ZN");
+        fill_nested(self, &mut name);
+        name.push_str(&item_name.len().to_string());
+        name.push_str(item_name);
+        name.push('E');
+        name
+    }
+}
+
+pub fn namespace_for_item(cx: &CrateContext, def_id: DefId) -> Rc<NamespaceTreeNode> {
+    // prepend crate name.
+    // This shouldn't need a roundtrip through InternedString.
+    let krate = token::intern(&cx.tcx().crate_name(def_id.krate));
+    let krate = hir_map::DefPathData::TypeNs(krate);
+    let path = cx.tcx().def_path(def_id).data;
+    let mut path = once(krate).chain(path.into_iter().map(|e| e.data)).peekable();
+
+    let mut current_key = Vec::new();
+    let mut parent_node: Option<Rc<NamespaceTreeNode>> = None;
+
+    // Create/Lookup namespace for each element of the path.
+    loop {
+        // Emulate a for loop so we can use peek below.
+        let path_element = match path.next() {
+            Some(e) => e,
+            None => break
+        };
+        // Ignore the name of the item (the last path element).
+        if path.peek().is_none() {
+            break;
+        }
+
+        // This shouldn't need a roundtrip through InternedString.
+        let namespace_name = path_element.as_interned_str();
+        let name = token::intern(&namespace_name);
+        current_key.push(name);
+
+        let existing_node = debug_context(cx).namespace_map.borrow()
+                                             .get(&current_key).cloned();
+        let current_node = match existing_node {
+            Some(existing_node) => existing_node,
+            None => {
+                // create and insert
+                let parent_scope = match parent_node {
+                    Some(ref node) => node.scope,
+                    None => ptr::null_mut()
+                };
+                let namespace_name = CString::new(namespace_name.as_bytes()).unwrap();
+                let scope = unsafe {
+                    llvm::LLVMDIBuilderCreateNameSpace(
+                        DIB(cx),
+                        parent_scope,
+                        namespace_name.as_ptr(),
+                        // cannot reconstruct file ...
+                        ptr::null_mut(),
+                        // ... or line information, but that's not so important.
+                        0)
+                };
+
+                let node = Rc::new(NamespaceTreeNode {
+                    name: name,
+                    scope: scope,
+                    parent: parent_node.map(|parent| Rc::downgrade(&parent)),
+                });
+
+                debug_context(cx).namespace_map.borrow_mut()
+                                 .insert(current_key.clone(), node.clone());
+
+                node
+            }
+        };
+
+        parent_node = Some(current_node);
+    }
+
+    match parent_node {
+        Some(node) => node,
+        None => {
+            bug!("debuginfo::namespace_for_item: path too short for {:?}", def_id);
+        }
+    }
+}
diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs
new file mode 100644 (file)
index 0000000..2879da7
--- /dev/null
@@ -0,0 +1,231 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use self::InternalDebugLocation::*;
+
+use super::utils::{debug_context, span_start, fn_should_be_ignored};
+use super::metadata::{scope_metadata,UNKNOWN_COLUMN_NUMBER};
+use super::{FunctionDebugContext, DebugLoc};
+
+use llvm;
+use llvm::debuginfo::DIScope;
+use common::{NodeIdAndSpan, CrateContext, FunctionContext};
+
+use libc::c_uint;
+use std::ptr;
+use syntax::codemap::{Span, Pos};
+use syntax::{ast, codemap};
+
+pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                                    node_id: ast::NodeId,
+                                                    node_span: Span,
+                                                    is_block: bool)
+                                                 -> NodeIdAndSpan {
+    // A debug location needs two things:
+    // (1) A span (of which only the beginning will actually be used)
+    // (2) An AST node-id which will be used to look up the lexical scope
+    //     for the location in the functions scope-map
+    //
+    // This function will calculate the debug location for compiler-generated
+    // cleanup calls that are executed when control-flow leaves the
+    // scope identified by `node_id`.
+    //
+    // For everything but block-like things we can simply take id and span of
+    // the given expression, meaning that from a debugger's view cleanup code is
+    // executed at the same source location as the statement/expr itself.
+    //
+    // Blocks are a special case. Here we want the cleanup to be linked to the
+    // closing curly brace of the block. The *scope* the cleanup is executed in
+    // is up to debate: It could either still be *within* the block being
+    // cleaned up, meaning that locals from the block are still visible in the
+    // debugger.
+    // Or it could be in the scope that the block is contained in, so any locals
+    // from within the block are already considered out-of-scope and thus not
+    // accessible in the debugger anymore.
+    //
+    // The current implementation opts for the second option: cleanup of a block
+    // already happens in the parent scope of the block. The main reason for
+    // this decision is that scoping becomes controlflow dependent when variable
+    // shadowing is involved and it's impossible to decide statically which
+    // scope is actually left when the cleanup code is executed.
+    // In practice it shouldn't make much of a difference.
+
+    let mut cleanup_span = node_span;
+
+    if is_block {
+        // Not all blocks actually have curly braces (e.g. simple closure
+        // bodies), in which case we also just want to return the span of the
+        // whole expression.
+        let code_snippet = cx.sess().codemap().span_to_snippet(node_span);
+        if let Ok(code_snippet) = code_snippet {
+            let bytes = code_snippet.as_bytes();
+
+            if !bytes.is_empty() && &bytes[bytes.len()-1..] == b"}" {
+                cleanup_span = Span {
+                    lo: node_span.hi - codemap::BytePos(1),
+                    hi: node_span.hi,
+                    expn_id: node_span.expn_id
+                };
+            }
+        }
+    }
+
+    NodeIdAndSpan {
+        id: node_id,
+        span: cleanup_span
+    }
+}
+
+
+/// Sets the current debug location at the beginning of the span.
+///
+/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). The node_id
+/// parameter is used to reliably find the correct visibility scope for the code
+/// position.
+pub fn set_source_location(fcx: &FunctionContext,
+                           node_id: ast::NodeId,
+                           span: Span) {
+    match fcx.debug_context {
+        FunctionDebugContext::DebugInfoDisabled => return,
+        FunctionDebugContext::FunctionWithoutDebugInfo => {
+            set_debug_location(fcx.ccx, UnknownLocation);
+            return;
+        }
+        FunctionDebugContext::RegularContext(box ref function_debug_context) => {
+            if function_debug_context.source_location_override.get() {
+                // Just ignore any attempts to set a new debug location while
+                // the override is active.
+                return;
+            }
+
+            let cx = fcx.ccx;
+
+            debug!("set_source_location: {}", cx.sess().codemap().span_to_string(span));
+
+            if function_debug_context.source_locations_enabled.get() {
+                let loc = span_start(cx, span);
+                let scope = scope_metadata(fcx, node_id, span);
+
+                set_debug_location(cx, InternalDebugLocation::new(scope,
+                                                                  loc.line,
+                                                                  loc.col.to_usize()));
+            } else {
+                set_debug_location(cx, UnknownLocation);
+            }
+        }
+    }
+}
+
+/// This function makes sure that all debug locations emitted while executing
+/// `wrapped_function` are set to the given `debug_loc`.
+pub fn with_source_location_override<F, R>(fcx: &FunctionContext,
+                                           debug_loc: DebugLoc,
+                                           wrapped_function: F) -> R
+    where F: FnOnce() -> R
+{
+    match fcx.debug_context {
+        FunctionDebugContext::DebugInfoDisabled => {
+            wrapped_function()
+        }
+        FunctionDebugContext::FunctionWithoutDebugInfo => {
+            set_debug_location(fcx.ccx, UnknownLocation);
+            wrapped_function()
+        }
+        FunctionDebugContext::RegularContext(box ref function_debug_context) => {
+            if function_debug_context.source_location_override.get() {
+                wrapped_function()
+            } else {
+                debug_loc.apply(fcx);
+                function_debug_context.source_location_override.set(true);
+                let result = wrapped_function();
+                function_debug_context.source_location_override.set(false);
+                result
+            }
+        }
+    }
+}
+
+/// Clears the current debug location.
+///
+/// Instructions generated hereafter won't be assigned a source location.
+pub fn clear_source_location(fcx: &FunctionContext) {
+    if fn_should_be_ignored(fcx) {
+        return;
+    }
+
+    set_debug_location(fcx.ccx, UnknownLocation);
+}
+
+/// Enables emitting source locations for the given functions.
+///
+/// Since we don't want source locations to be emitted for the function prelude,
+/// they are disabled when beginning to translate a new function. This functions
+/// switches source location emitting on and must therefore be called before the
+/// first real statement/expression of the function is translated.
+pub fn start_emitting_source_locations(fcx: &FunctionContext) {
+    match fcx.debug_context {
+        FunctionDebugContext::RegularContext(box ref data) => {
+            data.source_locations_enabled.set(true)
+        },
+        _ => { /* safe to ignore */ }
+    }
+}
+
+
+#[derive(Copy, Clone, PartialEq)]
+pub enum InternalDebugLocation {
+    KnownLocation { scope: DIScope, line: usize, col: usize },
+    UnknownLocation
+}
+
+impl InternalDebugLocation {
+    pub fn new(scope: DIScope, line: usize, col: usize) -> InternalDebugLocation {
+        KnownLocation {
+            scope: scope,
+            line: line,
+            col: col,
+        }
+    }
+}
+
+pub fn set_debug_location(cx: &CrateContext, debug_location: InternalDebugLocation) {
+    if debug_location == debug_context(cx).current_debug_location.get() {
+        return;
+    }
+
+    let metadata_node;
+
+    match debug_location {
+        KnownLocation { scope, line, .. } => {
+            // Always set the column to zero like Clang and GCC
+            let col = UNKNOWN_COLUMN_NUMBER;
+            debug!("setting debug location to {} {}", line, col);
+
+            unsafe {
+                metadata_node = llvm::LLVMDIBuilderCreateDebugLocation(
+                    debug_context(cx).llcontext,
+                    line as c_uint,
+                    col as c_uint,
+                    scope,
+                    ptr::null_mut());
+            }
+        }
+        UnknownLocation => {
+            debug!("clearing debug location ");
+            metadata_node = ptr::null_mut();
+        }
+    };
+
+    unsafe {
+        llvm::LLVMSetCurrentDebugLocation(cx.raw_builder(), metadata_node);
+    }
+
+    debug_context(cx).current_debug_location.set(debug_location);
+}
diff --git a/src/librustc_trans/debuginfo/type_names.rs b/src/librustc_trans/debuginfo/type_names.rs
new file mode 100644 (file)
index 0000000..6fdd6a2
--- /dev/null
@@ -0,0 +1,199 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Type Names for Debug Info.
+
+use common::CrateContext;
+use rustc::hir::def_id::DefId;
+use rustc::infer;
+use rustc::ty::subst;
+use rustc::ty::{self, Ty};
+
+use rustc::hir;
+
+// Compute the name of the type as it should be stored in debuginfo. Does not do
+// any caching, i.e. calling the function twice with the same type will also do
+// the work twice. The `qualified` parameter only affects the first level of the
+// type name, further levels (i.e. type parameters) are always fully qualified.
+pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                             t: Ty<'tcx>,
+                                             qualified: bool)
+                                             -> String {
+    let mut result = String::with_capacity(64);
+    push_debuginfo_type_name(cx, t, qualified, &mut result);
+    result
+}
+
+// Pushes the name of the type as it should be stored in debuginfo on the
+// `output` String. See also compute_debuginfo_type_name().
+pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                          t: Ty<'tcx>,
+                                          qualified: bool,
+                                          output: &mut String) {
+    match t.sty {
+        ty::TyBool => output.push_str("bool"),
+        ty::TyChar => output.push_str("char"),
+        ty::TyStr => output.push_str("str"),
+        ty::TyInt(int_ty) => output.push_str(int_ty.ty_to_string()),
+        ty::TyUint(uint_ty) => output.push_str(uint_ty.ty_to_string()),
+        ty::TyFloat(float_ty) => output.push_str(float_ty.ty_to_string()),
+        ty::TyStruct(def, substs) |
+        ty::TyEnum(def, substs) => {
+            push_item_name(cx, def.did, qualified, output);
+            push_type_params(cx, substs, output);
+        },
+        ty::TyTuple(ref component_types) => {
+            output.push('(');
+            for &component_type in component_types {
+                push_debuginfo_type_name(cx, component_type, true, output);
+                output.push_str(", ");
+            }
+            if !component_types.is_empty() {
+                output.pop();
+                output.pop();
+            }
+            output.push(')');
+        },
+        ty::TyBox(inner_type) => {
+            output.push_str("Box<");
+            push_debuginfo_type_name(cx, inner_type, true, output);
+            output.push('>');
+        },
+        ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
+            output.push('*');
+            match mutbl {
+                hir::MutImmutable => output.push_str("const "),
+                hir::MutMutable => output.push_str("mut "),
+            }
+
+            push_debuginfo_type_name(cx, inner_type, true, output);
+        },
+        ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
+            output.push('&');
+            if mutbl == hir::MutMutable {
+                output.push_str("mut ");
+            }
+
+            push_debuginfo_type_name(cx, inner_type, true, output);
+        },
+        ty::TyArray(inner_type, len) => {
+            output.push('[');
+            push_debuginfo_type_name(cx, inner_type, true, output);
+            output.push_str(&format!("; {}", len));
+            output.push(']');
+        },
+        ty::TySlice(inner_type) => {
+            output.push('[');
+            push_debuginfo_type_name(cx, inner_type, true, output);
+            output.push(']');
+        },
+        ty::TyTrait(ref trait_data) => {
+            let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
+            push_item_name(cx, principal.def_id, false, output);
+            push_type_params(cx, principal.substs, output);
+        },
+        ty::TyFnDef(_, _, &ty::BareFnTy{ unsafety, abi, ref sig } ) |
+        ty::TyFnPtr(&ty::BareFnTy{ unsafety, abi, ref sig } ) => {
+            if unsafety == hir::Unsafety::Unsafe {
+                output.push_str("unsafe ");
+            }
+
+            if abi != ::abi::Abi::Rust {
+                output.push_str("extern \"");
+                output.push_str(abi.name());
+                output.push_str("\" ");
+            }
+
+            output.push_str("fn(");
+
+            let sig = cx.tcx().erase_late_bound_regions(sig);
+            let sig = infer::normalize_associated_type(cx.tcx(), &sig);
+            if !sig.inputs.is_empty() {
+                for &parameter_type in &sig.inputs {
+                    push_debuginfo_type_name(cx, parameter_type, true, output);
+                    output.push_str(", ");
+                }
+                output.pop();
+                output.pop();
+            }
+
+            if sig.variadic {
+                if !sig.inputs.is_empty() {
+                    output.push_str(", ...");
+                } else {
+                    output.push_str("...");
+                }
+            }
+
+            output.push(')');
+
+            match sig.output {
+                ty::FnConverging(result_type) if result_type.is_nil() => {}
+                ty::FnConverging(result_type) => {
+                    output.push_str(" -> ");
+                    push_debuginfo_type_name(cx, result_type, true, output);
+                }
+                ty::FnDiverging => {
+                    output.push_str(" -> !");
+                }
+            }
+        },
+        ty::TyClosure(..) => {
+            output.push_str("closure");
+        }
+        ty::TyError |
+        ty::TyInfer(_) |
+        ty::TyProjection(..) |
+        ty::TyParam(_) => {
+            bug!("debuginfo: Trying to create type name for \
+                unexpected type: {:?}", t);
+        }
+    }
+
+    fn push_item_name(cx: &CrateContext,
+                      def_id: DefId,
+                      qualified: bool,
+                      output: &mut String) {
+        if qualified {
+            output.push_str(&cx.tcx().crate_name(def_id.krate));
+            for path_element in cx.tcx().def_path(def_id).data {
+                output.push_str("::");
+                output.push_str(&path_element.data.as_interned_str());
+            }
+        } else {
+            output.push_str(&cx.tcx().item_name(def_id).as_str());
+        }
+    }
+
+    // Pushes the type parameters in the given `Substs` to the output string.
+    // This ignores region parameters, since they can't reliably be
+    // reconstructed for items from non-local crates. For local crates, this
+    // would be possible but with inlining and LTO we have to use the least
+    // common denominator - otherwise we would run into conflicts.
+    fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                  substs: &subst::Substs<'tcx>,
+                                  output: &mut String) {
+        if substs.types.is_empty() {
+            return;
+        }
+
+        output.push('<');
+
+        for &type_parameter in &substs.types {
+            push_debuginfo_type_name(cx, type_parameter, true, output);
+            output.push_str(", ");
+        }
+
+        output.pop();
+        output.pop();
+
+        output.push('>');
+    }
+}
diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs
new file mode 100644 (file)
index 0000000..bef7af3
--- /dev/null
@@ -0,0 +1,107 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Utility Functions.
+
+use super::{FunctionDebugContext, CrateDebugContext};
+use super::namespace::namespace_for_item;
+
+use rustc::hir::def_id::DefId;
+
+use llvm;
+use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray};
+use machine;
+use common::{CrateContext, FunctionContext};
+use type_::Type;
+
+use syntax::codemap::Span;
+use syntax::{ast, codemap};
+
+pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool
+{
+    // The is_local_to_unit flag indicates whether a function is local to the
+    // current compilation unit (i.e. if it is *static* in the C-sense). The
+    // *reachable* set should provide a good approximation of this, as it
+    // contains everything that might leak out of the current crate (by being
+    // externally visible or by being inlined into something externally
+    // visible). It might better to use the `exported_items` set from
+    // `driver::CrateAnalysis` in the future, but (atm) this set is not
+    // available in the translation pass.
+    !cx.reachable().contains(&node_id)
+}
+
+#[allow(non_snake_case)]
+pub fn create_DIArray(builder: DIBuilderRef, arr: &[DIDescriptor]) -> DIArray {
+    return unsafe {
+        llvm::LLVMDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32)
+    };
+}
+
+pub fn contains_nodebug_attribute(attributes: &[ast::Attribute]) -> bool {
+    attributes.iter().any(|attr| {
+        let meta_item: &ast::MetaItem = &attr.node.value;
+        match meta_item.node {
+            ast::MetaItemKind::Word(ref value) => &value[..] == "no_debug",
+            _ => false
+        }
+    })
+}
+
+/// Return codemap::Loc corresponding to the beginning of the span
+pub fn span_start(cx: &CrateContext, span: Span) -> codemap::Loc {
+    cx.sess().codemap().lookup_char_pos(span.lo)
+}
+
+pub fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u64) {
+    (machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type) as u64)
+}
+
+pub fn bytes_to_bits(bytes: u64) -> u64 {
+    bytes * 8
+}
+
+#[inline]
+pub fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>)
+                           -> &'a CrateDebugContext<'tcx> {
+    let debug_context: &'a CrateDebugContext<'tcx> = cx.dbg_cx().as_ref().unwrap();
+    debug_context
+}
+
+#[inline]
+#[allow(non_snake_case)]
+pub fn DIB(cx: &CrateContext) -> DIBuilderRef {
+    cx.dbg_cx().as_ref().unwrap().builder
+}
+
+pub fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
+    match fcx.debug_context {
+        FunctionDebugContext::RegularContext(_) => false,
+        _ => true
+    }
+}
+
+pub fn assert_type_for_node_id(cx: &CrateContext,
+                           node_id: ast::NodeId,
+                           error_reporting_span: Span) {
+    if !cx.tcx().node_types().contains_key(&node_id) {
+        span_bug!(error_reporting_span,
+                  "debuginfo: Could not find type for node id!");
+    }
+}
+
+pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: DefId)
+                                   -> (DIScope, Span) {
+    let containing_scope = namespace_for_item(cx, def_id).scope;
+    let definition_span = cx.tcx().map.def_id_span(def_id, codemap::DUMMY_SP /* (1) */ );
+
+    // (1) For external items there is no span information
+
+    (containing_scope, definition_span)
+}
diff --git a/src/librustc_trans/declare.rs b/src/librustc_trans/declare.rs
new file mode 100644 (file)
index 0000000..eb520fe
--- /dev/null
@@ -0,0 +1,179 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//! Declare various LLVM values.
+//!
+//! Prefer using functions and methods from this module rather than calling LLVM
+//! functions directly. These functions do some additional work to ensure we do
+//! the right thing given the preconceptions of trans.
+//!
+//! Some useful guidelines:
+//!
+//! * Use declare_* family of methods if you are declaring, but are not
+//!   interested in defining the ValueRef they return.
+//! * Use define_* family of methods when you might be defining the ValueRef.
+//! * When in doubt, define.
+use llvm::{self, ValueRef};
+use rustc::ty;
+use rustc::infer;
+use abi::{Abi, FnType};
+use attributes;
+use context::CrateContext;
+use type_::Type;
+use value::Value;
+
+use std::ffi::CString;
+
+
+/// Declare a global value.
+///
+/// If there’s a value with the same name already declared, the function will
+/// return its ValueRef instead.
+pub fn declare_global(ccx: &CrateContext, name: &str, ty: Type) -> llvm::ValueRef {
+    debug!("declare_global(name={:?})", name);
+    let namebuf = CString::new(name).unwrap_or_else(|_|{
+        bug!("name {:?} contains an interior null byte", name)
+    });
+    unsafe {
+        llvm::LLVMGetOrInsertGlobal(ccx.llmod(), namebuf.as_ptr(), ty.to_ref())
+    }
+}
+
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing ValueRef instead.
+fn declare_raw_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, ty: Type) -> ValueRef {
+    debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
+    let namebuf = CString::new(name).unwrap_or_else(|_|{
+        bug!("name {:?} contains an interior null byte", name)
+    });
+    let llfn = unsafe {
+        llvm::LLVMGetOrInsertFunction(ccx.llmod(), namebuf.as_ptr(), ty.to_ref())
+    };
+
+    llvm::SetFunctionCallConv(llfn, callconv);
+    // Function addresses in Rust are never significant, allowing functions to
+    // be merged.
+    llvm::SetUnnamedAddr(llfn, true);
+
+    if ccx.tcx().sess.opts.cg.no_redzone
+        .unwrap_or(ccx.tcx().sess.target.target.options.disable_redzone) {
+        llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoRedZone)
+    }
+
+    llfn
+}
+
+
+/// Declare a C ABI function.
+///
+/// Only use this for foreign function ABIs and glue. For Rust functions use
+/// `declare_fn` instead.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing ValueRef instead.
+pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type) -> ValueRef {
+    declare_raw_fn(ccx, name, llvm::CCallConv, fn_type)
+}
+
+
+/// Declare a Rust function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing ValueRef instead.
+pub fn declare_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
+                            fn_type: ty::Ty<'tcx>) -> ValueRef {
+    debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type);
+    let abi = fn_type.fn_abi();
+    let sig = ccx.tcx().erase_late_bound_regions(fn_type.fn_sig());
+    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+    debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
+
+    let fty = FnType::new(ccx, abi, &sig, &[]);
+    let llfn = declare_raw_fn(ccx, name, fty.cconv, fty.llvm_type(ccx));
+
+    if sig.output == ty::FnDiverging {
+        llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoReturn);
+    }
+
+    if abi != Abi::Rust && abi != Abi::RustCall {
+        attributes::unwind(llfn, false);
+    }
+
+    fty.apply_attrs_llfn(llfn);
+
+    llfn
+}
+
+
+/// Declare a global with an intention to define it.
+///
+/// Use this function when you intend to define a global. This function will
+/// return None if the name already has a definition associated with it. In that
+/// case an error should be reported to the user, because it usually happens due
+/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
+pub fn define_global(ccx: &CrateContext, name: &str, ty: Type) -> Option<ValueRef> {
+    if get_defined_value(ccx, name).is_some() {
+        None
+    } else {
+        Some(declare_global(ccx, name, ty))
+    }
+}
+
+
+/// Declare a Rust function with an intention to define it.
+///
+/// Use this function when you intend to define a function. This function will
+/// return panic if the name already has a definition associated with it. This
+/// can happen with #[no_mangle] or #[export_name], for example.
+pub fn define_internal_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                    name: &str,
+                                    fn_type: ty::Ty<'tcx>) -> ValueRef {
+    if get_defined_value(ccx, name).is_some() {
+        ccx.sess().fatal(&format!("symbol `{}` already defined", name))
+    } else {
+        let llfn = declare_fn(ccx, name, fn_type);
+        llvm::SetLinkage(llfn, llvm::InternalLinkage);
+        llfn
+    }
+}
+
+
+/// Get declared value by name.
+pub fn get_declared_value(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
+    debug!("get_declared_value(name={:?})", name);
+    let namebuf = CString::new(name).unwrap_or_else(|_|{
+        bug!("name {:?} contains an interior null byte", name)
+    });
+    let val = unsafe { llvm::LLVMGetNamedValue(ccx.llmod(), namebuf.as_ptr()) };
+    if val.is_null() {
+        debug!("get_declared_value: {:?} value is null", name);
+        None
+    } else {
+        debug!("get_declared_value: {:?} => {:?}", name, Value(val));
+        Some(val)
+    }
+}
+
+/// Get defined or externally defined (AvailableExternally linkage) value by
+/// name.
+pub fn get_defined_value(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
+    get_declared_value(ccx, name).and_then(|val|{
+        let declaration = unsafe {
+            llvm::LLVMIsDeclaration(val) != 0
+        };
+        if !declaration {
+            Some(val)
+        } else {
+            None
+        }
+    })
+}
index 5ae60d182407f8e815649f973eb8a8e849ee549b..5e4902cf3caf24108ad1eed7c68b544dd4e4ab2f 100644 (file)
@@ -83,32 +83,6 @@ unsafe { simd_add(i32x1(0), i32x1(1)); } // ok!
 ```
 "##,
 
-E0512: r##"
-Transmute with two differently sized types was attempted. Erroneous code
-example:
-
-```compile_fail
-fn takes_u8(_: u8) {}
-
-fn main() {
-    unsafe { takes_u8(::std::mem::transmute(0u16)); }
-    // error: transmute called with differently sized types
-}
-```
-
-Please use types with same size or use the expected type directly. Example:
-
-```
-fn takes_u8(_: u8) {}
-
-fn main() {
-    unsafe { takes_u8(::std::mem::transmute(0i8)); } // ok!
-    // or:
-    unsafe { takes_u8(0u8); } // ok!
-}
-```
-"##,
-
 E0515: r##"
 A constant index expression was out of bounds. Erroneous code example:
 
diff --git a/src/librustc_trans/disr.rs b/src/librustc_trans/disr.rs
new file mode 100644 (file)
index 0000000..fc79fa8
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct Disr(pub u64);
+
+impl Disr {
+    pub fn wrapping_add(self, other: Self) -> Self {
+        Disr(self.0.wrapping_add(other.0))
+    }
+}
+
+impl ::std::ops::BitAnd for Disr {
+    type Output = Disr;
+    fn bitand(self, other: Self) -> Self {
+        Disr(self.0 & other.0)
+    }
+}
+
+impl From<::rustc::ty::Disr> for Disr {
+    fn from(i: ::rustc::ty::Disr) -> Disr {
+        Disr(i.to_u64_unchecked())
+    }
+}
+
+impl From<usize> for Disr {
+    fn from(i: usize) -> Disr {
+        Disr(i as u64)
+    }
+}
+
+impl PartialOrd for Disr {
+    fn partial_cmp(&self, other: &Disr) -> Option<::std::cmp::Ordering> {
+        self.0.partial_cmp(&other.0)
+    }
+}
+
+impl Ord for Disr {
+    fn cmp(&self, other: &Disr) -> ::std::cmp::Ordering {
+        self.0.cmp(&other.0)
+    }
+}
diff --git a/src/librustc_trans/expr.rs b/src/librustc_trans/expr.rs
new file mode 100644 (file)
index 0000000..beca81d
--- /dev/null
@@ -0,0 +1,2475 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! # Translation of Expressions
+//!
+//! The expr module handles translation of expressions. The most general
+//! translation routine is `trans()`, which will translate an expression
+//! into a datum. `trans_into()` is also available, which will translate
+//! an expression and write the result directly into memory, sometimes
+//! avoiding the need for a temporary stack slot. Finally,
+//! `trans_to_lvalue()` is available if you'd like to ensure that the
+//! result has cleanup scheduled.
+//!
+//! Internally, each of these functions dispatches to various other
+//! expression functions depending on the kind of expression. We divide
+//! up expressions into:
+//!
+//! - **Datum expressions:** Those that most naturally yield values.
+//!   Examples would be `22`, `box x`, or `a + b` (when not overloaded).
+//! - **DPS expressions:** Those that most naturally write into a location
+//!   in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
+//! - **Statement expressions:** That that do not generate a meaningful
+//!   result. Examples would be `while { ... }` or `return 44`.
+//!
+//! Public entry points:
+//!
+//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
+//!   storing the result into `dest`. This is the preferred form, if you
+//!   can manage it.
+//!
+//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
+//!   `Datum` with the result. You can then store the datum, inspect
+//!   the value, etc. This may introduce temporaries if the datum is a
+//!   structural type.
+//!
+//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
+//!   expression and ensures that the result has a cleanup associated with it,
+//!   creating a temporary stack slot if necessary.
+//!
+//! - `trans_var -> Datum`: looks up a local variable, upvar or static.
+
+#![allow(non_camel_case_types)]
+
+pub use self::Dest::*;
+use self::lazy_binop_ty::*;
+
+use llvm::{self, ValueRef, TypeKind};
+use middle::const_qualif::ConstQualif;
+use rustc::hir::def::Def;
+use rustc::ty::subst::Substs;
+use {_match, abi, adt, asm, base, closure, consts, controlflow};
+use base::*;
+use build::*;
+use callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp};
+use cleanup::{self, CleanupMethods, DropHintMethods};
+use common::*;
+use datum::*;
+use debuginfo::{self, DebugLoc, ToDebugLoc};
+use declare;
+use glue;
+use machine;
+use tvec;
+use type_of;
+use value::Value;
+use Disr;
+use rustc::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
+use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
+use rustc::ty::adjustment::CustomCoerceUnsized;
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::MethodCall;
+use rustc::ty::cast::{CastKind, CastTy};
+use util::common::indenter;
+use machine::{llsize_of, llsize_of_alloc};
+use type_::Type;
+
+use rustc::hir;
+
+use syntax::{ast, codemap};
+use syntax::parse::token::InternedString;
+use std::fmt;
+use std::mem;
+
+// Destinations
+
+// These are passed around by the code generating functions to track the
+// destination of a computation's value.
+
+#[derive(Copy, Clone, PartialEq)]
+pub enum Dest {
+    SaveIn(ValueRef),
+    Ignore,
+}
+
+impl fmt::Debug for Dest {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            SaveIn(v) => write!(f, "SaveIn({:?})", Value(v)),
+            Ignore => f.write_str("Ignore")
+        }
+    }
+}
+
+/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
+/// better optimized LLVM code.
+pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              expr: &hir::Expr,
+                              dest: Dest)
+                              -> Block<'blk, 'tcx> {
+    let mut bcx = bcx;
+
+    debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
+
+    if adjustment_required(bcx, expr) {
+        // use trans, which may be less efficient but
+        // which will perform the adjustments:
+        let datum = unpack_datum!(bcx, trans(bcx, expr));
+        return datum.store_to_dest(bcx, dest, expr.id);
+    }
+
+    let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
+    if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
+        if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) {
+            if let SaveIn(lldest) = dest {
+                match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
+                                                       bcx.fcx.param_substs,
+                                                       consts::TrueConst::No) {
+                    Ok(global) => {
+                        // Cast pointer to destination, because constants
+                        // have different types.
+                        let lldest = PointerCast(bcx, lldest, val_ty(global));
+                        memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
+                        return bcx;
+                    },
+                    Err(consts::ConstEvalFailure::Runtime(_)) => {
+                        // in case const evaluation errors, translate normally
+                        // debug assertions catch the same errors
+                        // see RFC 1229
+                    },
+                    Err(consts::ConstEvalFailure::Compiletime(_)) => {
+                        return bcx;
+                    },
+                }
+            }
+
+            // If we see a const here, that's because it evaluates to a type with zero size. We
+            // should be able to just discard it, since const expressions are guaranteed not to
+            // have side effects. This seems to be reached through tuple struct constructors being
+            // passed zero-size constants.
+            if let hir::ExprPath(..) = expr.node {
+                match bcx.def(expr.id) {
+                    Def::Const(_) | Def::AssociatedConst(_) => {
+                        assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id)));
+                        return bcx;
+                    }
+                    _ => {}
+                }
+            }
+
+            // Even if we don't have a value to emit, and the expression
+            // doesn't have any side-effects, we still have to translate the
+            // body of any closures.
+            // FIXME: Find a better way of handling this case.
+        } else {
+            // The only way we're going to see a `const` at this point is if
+            // it prefers in-place instantiation, likely because it contains
+            // `[x; N]` somewhere within.
+            match expr.node {
+                hir::ExprPath(..) => {
+                    match bcx.def(expr.id) {
+                        Def::Const(did) | Def::AssociatedConst(did) => {
+                            let empty_substs = bcx.tcx().mk_substs(Substs::empty());
+                            let const_expr = consts::get_const_expr(bcx.ccx(), did, expr,
+                                                                    empty_substs);
+                            // Temporarily get cleanup scopes out of the way,
+                            // as they require sub-expressions to be contained
+                            // inside the current AST scope.
+                            // These should record no cleanups anyways, `const`
+                            // can't have destructors.
+                            let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
+                                                      vec![]);
+                            // Lock emitted debug locations to the location of
+                            // the constant reference expression.
+                            debuginfo::with_source_location_override(bcx.fcx,
+                                                                     expr.debug_loc(),
+                                                                     || {
+                                bcx = trans_into(bcx, const_expr, dest)
+                            });
+                            let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
+                                                      scopes);
+                            assert!(scopes.is_empty());
+                            return bcx;
+                        }
+                        _ => {}
+                    }
+                }
+                _ => {}
+            }
+        }
+    }
+
+    debug!("trans_into() expr={:?}", expr);
+
+    let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
+                                                                          expr.id,
+                                                                          expr.span,
+                                                                          false);
+    bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
+
+    let kind = expr_kind(bcx.tcx(), expr);
+    bcx = match kind {
+        ExprKind::Lvalue | ExprKind::RvalueDatum => {
+            trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
+        }
+        ExprKind::RvalueDps => {
+            trans_rvalue_dps_unadjusted(bcx, expr, dest)
+        }
+        ExprKind::RvalueStmt => {
+            trans_rvalue_stmt_unadjusted(bcx, expr)
+        }
+    };
+
+    bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
+}
+
+/// Translates an expression, returning a datum (and new block) encapsulating the result. When
+/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
+/// stack.
+pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                         expr: &hir::Expr)
+                         -> DatumBlock<'blk, 'tcx, Expr> {
+    debug!("trans(expr={:?})", expr);
+
+    let mut bcx = bcx;
+    let fcx = bcx.fcx;
+    let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
+    let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS);
+    let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
+        match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
+                                                            bcx.fcx.param_substs,
+                                                            consts::TrueConst::No) {
+            Ok(global) => {
+                if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) {
+                    // Is borrowed as 'static, must return lvalue.
+
+                    // Cast pointer to global, because constants have different types.
+                    let const_ty = expr_ty_adjusted(bcx, expr);
+                    let llty = type_of::type_of(bcx.ccx(), const_ty);
+                    let global = PointerCast(bcx, global, llty.ptr_to());
+                    let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
+                    return DatumBlock::new(bcx, datum.to_expr_datum());
+                }
+
+                // Otherwise, keep around and perform adjustments, if needed.
+                let const_ty = if adjusted_global {
+                    expr_ty_adjusted(bcx, expr)
+                } else {
+                    expr_ty(bcx, expr)
+                };
+
+                // This could use a better heuristic.
+                Some(if type_is_immediate(bcx.ccx(), const_ty) {
+                    // Cast pointer to global, because constants have different types.
+                    let llty = type_of::type_of(bcx.ccx(), const_ty);
+                    let global = PointerCast(bcx, global, llty.ptr_to());
+                    // Maybe just get the value directly, instead of loading it?
+                    immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
+                } else {
+                    let scratch = alloc_ty(bcx, const_ty, "const");
+                    call_lifetime_start(bcx, scratch);
+                    let lldest = if !const_ty.is_structural() {
+                        // Cast pointer to slot, because constants have different types.
+                        PointerCast(bcx, scratch, val_ty(global))
+                    } else {
+                        // In this case, memcpy_ty calls llvm.memcpy after casting both
+                        // source and destination to i8*, so we don't need any casts.
+                        scratch
+                    };
+                    memcpy_ty(bcx, lldest, global, const_ty);
+                    Datum::new(scratch, const_ty, Rvalue::new(ByRef))
+                })
+            },
+            Err(consts::ConstEvalFailure::Runtime(_)) => {
+                // in case const evaluation errors, translate normally
+                // debug assertions catch the same errors
+                // see RFC 1229
+                None
+            },
+            Err(consts::ConstEvalFailure::Compiletime(_)) => {
+                // generate a dummy llvm value
+                let const_ty = expr_ty(bcx, expr);
+                let llty = type_of::type_of(bcx.ccx(), const_ty);
+                let dummy = C_undef(llty.ptr_to());
+                Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef)))
+            },
+        }
+    } else {
+        None
+    };
+
+    let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
+                                                                          expr.id,
+                                                                          expr.span,
+                                                                          false);
+    fcx.push_ast_cleanup_scope(cleanup_debug_loc);
+    let datum = match global {
+        Some(rvalue) => rvalue.to_expr_datum(),
+        None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
+    };
+    let datum = if adjusted_global {
+        datum // trans::consts already performed adjustments.
+    } else {
+        unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
+    };
+    bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
+    return DatumBlock::new(bcx, datum);
+}
+
+pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
+    StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
+}
+
+pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
+    StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
+}
+
+pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
+    Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
+    Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
+}
+
+fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                   expr: &hir::Expr) -> bool {
+    let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
+        None => { return false; }
+        Some(adj) => adj
+    };
+
+    // Don't skip a conversion from Box<T> to &T, etc.
+    if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
+        return true;
+    }
+
+    match adjustment {
+        AdjustReifyFnPointer => true,
+        AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
+            // purely a type-level thing
+            false
+        }
+        AdjustDerefRef(ref adj) => {
+            // We are a bit paranoid about adjustments and thus might have a re-
+            // borrow here which merely derefs and then refs again (it might have
+            // a different region or mutability, but we don't care here).
+            !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
+        }
+    }
+}
+
+/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
+/// translation of `expr`.
+fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                 expr: &hir::Expr,
+                                 datum: Datum<'tcx, Expr>)
+                                 -> DatumBlock<'blk, 'tcx, Expr>
+{
+    let mut bcx = bcx;
+    let mut datum = datum;
+    let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
+        None => {
+            return DatumBlock::new(bcx, datum);
+        }
+        Some(adj) => { adj }
+    };
+    debug!("unadjusted datum for expr {:?}: {:?} adjustment={:?}",
+           expr, datum, adjustment);
+    match adjustment {
+        AdjustReifyFnPointer => {
+            match datum.ty.sty {
+                ty::TyFnDef(def_id, substs, _) => {
+                    datum = Callee::def(bcx.ccx(), def_id, substs)
+                        .reify(bcx.ccx()).to_expr_datum();
+                }
+                _ => {
+                    bug!("{} cannot be reified to a fn ptr", datum.ty)
+                }
+            }
+        }
+        AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
+            // purely a type-level thing
+        }
+        AdjustDerefRef(ref adj) => {
+            let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
+                // We are a bit paranoid about adjustments and thus might have a re-
+                // borrow here which merely derefs and then refs again (it might have
+                // a different region or mutability, but we don't care here).
+                match datum.ty.sty {
+                    // Don't skip a conversion from Box<T> to &T, etc.
+                    ty::TyRef(..) => {
+                        if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
+                            // Don't skip an overloaded deref.
+                            0
+                        } else {
+                            1
+                        }
+                    }
+                    _ => 0
+                }
+            } else {
+                0
+            };
+
+            if adj.autoderefs > skip_reborrows {
+                // Schedule cleanup.
+                let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
+                datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
+                                                          lval.to_expr_datum(),
+                                                          adj.autoderefs - skip_reborrows));
+            }
+
+            // (You might think there is a more elegant way to do this than a
+            // skip_reborrows bool, but then you remember that the borrow checker exists).
+            if skip_reborrows == 0 && adj.autoref.is_some() {
+                datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
+            }
+
+            if let Some(target) = adj.unsize {
+                // We do not arrange cleanup ourselves; if we already are an
+                // L-value, then cleanup will have already been scheduled (and
+                // the `datum.to_rvalue_datum` call below will emit code to zero
+                // the drop flag when moving out of the L-value). If we are an
+                // R-value, then we do not need to schedule cleanup.
+                let source_datum = unpack_datum!(bcx,
+                    datum.to_rvalue_datum(bcx, "__coerce_source"));
+
+                let target = bcx.monomorphize(&target);
+
+                let scratch = alloc_ty(bcx, target, "__coerce_target");
+                call_lifetime_start(bcx, scratch);
+                let target_datum = Datum::new(scratch, target,
+                                              Rvalue::new(ByRef));
+                bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
+                datum = Datum::new(scratch, target,
+                                   RvalueExpr(Rvalue::new(ByRef)));
+            }
+        }
+    }
+    debug!("after adjustments, datum={:?}", datum);
+    DatumBlock::new(bcx, datum)
+}
+
+fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              span: codemap::Span,
+                              source: Datum<'tcx, Rvalue>,
+                              target: Datum<'tcx, Rvalue>)
+                              -> Block<'blk, 'tcx> {
+    let mut bcx = bcx;
+    debug!("coerce_unsized({:?} -> {:?})", source, target);
+
+    match (&source.ty.sty, &target.ty.sty) {
+        (&ty::TyBox(a), &ty::TyBox(b)) |
+        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
+         &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
+        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
+         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
+        (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
+         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
+            let (inner_source, inner_target) = (a, b);
+
+            let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
+                // Normally, the source is a thin pointer and we are
+                // adding extra info to make a fat pointer. The exception
+                // is when we are upcasting an existing object fat pointer
+                // to use a different vtable. In that case, we want to
+                // load out the original data pointer so we can repackage
+                // it.
+                (Load(bcx, get_dataptr(bcx, source.val)),
+                Some(Load(bcx, get_meta(bcx, source.val))))
+            } else {
+                let val = if source.kind.is_by_ref() {
+                    load_ty(bcx, source.val, source.ty)
+                } else {
+                    source.val
+                };
+                (val, None)
+            };
+
+            let info = unsized_info(bcx.ccx(), inner_source, inner_target, old_info);
+
+            // Compute the base pointer. This doesn't change the pointer value,
+            // but merely its type.
+            let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
+            let base = PointerCast(bcx, base, ptr_ty);
+
+            Store(bcx, base, get_dataptr(bcx, target.val));
+            Store(bcx, info, get_meta(bcx, target.val));
+        }
+
+        // This can be extended to enums and tuples in the future.
+        // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
+        (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
+            assert_eq!(def_id_a, def_id_b);
+
+            // The target is already by-ref because it's to be written to.
+            let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
+            assert!(target.kind.is_by_ref());
+
+            let kind = custom_coerce_unsize_info(bcx.ccx(), source.ty, target.ty);
+
+            let repr_source = adt::represent_type(bcx.ccx(), source.ty);
+            let src_fields = match &*repr_source {
+                &adt::Repr::Univariant(ref s, _) => &s.fields,
+                _ => span_bug!(span,
+                               "Non univariant struct? (repr_source: {:?})",
+                               repr_source),
+            };
+            let repr_target = adt::represent_type(bcx.ccx(), target.ty);
+            let target_fields = match &*repr_target {
+                &adt::Repr::Univariant(ref s, _) => &s.fields,
+                _ => span_bug!(span,
+                               "Non univariant struct? (repr_target: {:?})",
+                               repr_target),
+            };
+
+            let coerce_index = match kind {
+                CustomCoerceUnsized::Struct(i) => i
+            };
+            assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
+
+            let source_val = adt::MaybeSizedValue::sized(source.val);
+            let target_val = adt::MaybeSizedValue::sized(target.val);
+
+            let iter = src_fields.iter().zip(target_fields).enumerate();
+            for (i, (src_ty, target_ty)) in iter {
+                let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i);
+                let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i);
+
+                // If this is the field we need to coerce, recurse on it.
+                if i == coerce_index {
+                    coerce_unsized(bcx, span,
+                                   Datum::new(ll_source, src_ty,
+                                              Rvalue::new(ByRef)),
+                                   Datum::new(ll_target, target_ty,
+                                              Rvalue::new(ByRef)));
+                } else {
+                    // Otherwise, simply copy the data from the source.
+                    assert!(src_ty.is_phantom_data() || src_ty == target_ty);
+                    memcpy_ty(bcx, ll_target, ll_source, src_ty);
+                }
+            }
+        }
+        _ => bug!("coerce_unsized: invalid coercion {:?} -> {:?}",
+                  source.ty,
+                  target.ty)
+    }
+    bcx
+}
+
+/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
+/// that the expr represents.
+///
+/// If this expression is an rvalue, this implies introducing a temporary.  In other words,
+/// something like `x().f` is translated into roughly the equivalent of
+///
+///   { tmp = x(); tmp.f }
+pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                   expr: &hir::Expr,
+                                   name: &str)
+                                   -> DatumBlock<'blk, 'tcx, Lvalue> {
+    let mut bcx = bcx;
+    let datum = unpack_datum!(bcx, trans(bcx, expr));
+    return datum.to_lvalue_datum(bcx, name, expr.id);
+}
+
+/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
+/// directly.
+fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                expr: &hir::Expr)
+                                -> DatumBlock<'blk, 'tcx, Expr> {
+    let mut bcx = bcx;
+
+    debug!("trans_unadjusted(expr={:?})", expr);
+    let _indenter = indenter();
+
+    debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
+
+    return match expr_kind(bcx.tcx(), expr) {
+        ExprKind::Lvalue | ExprKind::RvalueDatum => {
+            let datum = unpack_datum!(bcx, {
+                trans_datum_unadjusted(bcx, expr)
+            });
+
+            DatumBlock {bcx: bcx, datum: datum}
+        }
+
+        ExprKind::RvalueStmt => {
+            bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
+            nil(bcx, expr_ty(bcx, expr))
+        }
+
+        ExprKind::RvalueDps => {
+            let ty = expr_ty(bcx, expr);
+            if type_is_zero_size(bcx.ccx(), ty) {
+                bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
+                nil(bcx, ty)
+            } else {
+                let scratch = rvalue_scratch_datum(bcx, ty, "");
+                bcx = trans_rvalue_dps_unadjusted(
+                    bcx, expr, SaveIn(scratch.val));
+
+                // Note: this is not obviously a good idea.  It causes
+                // immediate values to be loaded immediately after a
+                // return from a call or other similar expression,
+                // which in turn leads to alloca's having shorter
+                // lifetimes and hence larger stack frames.  However,
+                // in turn it can lead to more register pressure.
+                // Still, in practice it seems to increase
+                // performance, since we have fewer problems with
+                // morestack churn.
+                let scratch = unpack_datum!(
+                    bcx, scratch.to_appropriate_datum(bcx));
+
+                DatumBlock::new(bcx, scratch.to_expr_datum())
+            }
+        }
+    };
+
+    fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
+                       -> DatumBlock<'blk, 'tcx, Expr> {
+        let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
+        let datum = immediate_rvalue(llval, ty);
+        DatumBlock::new(bcx, datum.to_expr_datum())
+    }
+}
+
+fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                      expr: &hir::Expr)
+                                      -> DatumBlock<'blk, 'tcx, Expr> {
+    let mut bcx = bcx;
+    let fcx = bcx.fcx;
+    let _icx = push_ctxt("trans_datum_unadjusted");
+
+    match expr.node {
+        hir::ExprType(ref e, _) => {
+            trans(bcx, &e)
+        }
+        hir::ExprPath(..) => {
+            let var = trans_var(bcx, bcx.def(expr.id));
+            DatumBlock::new(bcx, var.to_expr_datum())
+        }
+        hir::ExprField(ref base, name) => {
+            trans_rec_field(bcx, &base, name.node)
+        }
+        hir::ExprTupField(ref base, idx) => {
+            trans_rec_tup_field(bcx, &base, idx.node)
+        }
+        hir::ExprIndex(ref base, ref idx) => {
+            trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id))
+        }
+        hir::ExprBox(ref contents) => {
+            // Special case for `Box<T>`
+            let box_ty = expr_ty(bcx, expr);
+            let contents_ty = expr_ty(bcx, &contents);
+            match box_ty.sty {
+                ty::TyBox(..) => {
+                    trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty)
+                }
+                _ => span_bug!(expr.span,
+                               "expected unique box")
+            }
+
+        }
+        hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit),
+        hir::ExprBinary(op, ref lhs, ref rhs) => {
+            trans_binary(bcx, expr, op, &lhs, &rhs)
+        }
+        hir::ExprUnary(op, ref x) => {
+            trans_unary(bcx, expr, op, &x)
+        }
+        hir::ExprAddrOf(_, ref x) => {
+            match x.node {
+                hir::ExprRepeat(..) | hir::ExprVec(..) => {
+                    // Special case for slices.
+                    let cleanup_debug_loc =
+                        debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
+                                                                      x.id,
+                                                                      x.span,
+                                                                      false);
+                    fcx.push_ast_cleanup_scope(cleanup_debug_loc);
+                    let datum = unpack_datum!(
+                        bcx, tvec::trans_slice_vec(bcx, expr, &x));
+                    bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
+                    DatumBlock::new(bcx, datum)
+                }
+                _ => {
+                    trans_addr_of(bcx, expr, &x)
+                }
+            }
+        }
+        hir::ExprCast(ref val, _) => {
+            // Datum output mode means this is a scalar cast:
+            trans_imm_cast(bcx, &val, expr.id)
+        }
+        _ => {
+            span_bug!(
+                expr.span,
+                "trans_rvalue_datum_unadjusted reached \
+                 fall-through case: {:?}",
+                expr.node);
+        }
+    }
+}
+
+fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
+                              base: &hir::Expr,
+                              get_idx: F)
+                              -> DatumBlock<'blk, 'tcx, Expr> where
+    F: FnOnce(&'blk TyCtxt<'tcx>, &VariantInfo<'tcx>) -> usize,
+{
+    let mut bcx = bcx;
+    let _icx = push_ctxt("trans_rec_field");
+
+    let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
+    let bare_ty = base_datum.ty;
+    let repr = adt::represent_type(bcx.ccx(), bare_ty);
+    let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
+
+    let ix = get_idx(bcx.tcx(), &vinfo);
+    let d = base_datum.get_element(
+        bcx,
+        vinfo.fields[ix].1,
+        |srcval| {
+            adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix)
+        });
+
+    if type_is_sized(bcx.tcx(), d.ty) {
+        DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
+    } else {
+        let scratch = rvalue_scratch_datum(bcx, d.ty, "");
+        Store(bcx, d.val, get_dataptr(bcx, scratch.val));
+        let info = Load(bcx, get_meta(bcx, base_datum.val));
+        Store(bcx, info, get_meta(bcx, scratch.val));
+
+        // Always generate an lvalue datum, because this pointer doesn't own
+        // the data and cleanup is scheduled elsewhere.
+        DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
+    }
+}
+
+/// Translates `base.field`.
+fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               base: &hir::Expr,
+                               field: ast::Name)
+                               -> DatumBlock<'blk, 'tcx, Expr> {
+    trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
+}
+
+/// Translates `base.<idx>`.
+fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                   base: &hir::Expr,
+                                   idx: usize)
+                                   -> DatumBlock<'blk, 'tcx, Expr> {
+    trans_field(bcx, base, |_, _| idx)
+}
+
+fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                           index_expr: &hir::Expr,
+                           base: &hir::Expr,
+                           idx: &hir::Expr,
+                           method_call: MethodCall)
+                           -> DatumBlock<'blk, 'tcx, Expr> {
+    //! Translates `base[idx]`.
+
+    let _icx = push_ctxt("trans_index");
+    let ccx = bcx.ccx();
+    let mut bcx = bcx;
+
+    let index_expr_debug_loc = index_expr.debug_loc();
+
+    // Check for overloaded index.
+    let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
+    let elt_datum = match method {
+        Some(method) => {
+            let method_ty = monomorphize_type(bcx, method.ty);
+
+            let base_datum = unpack_datum!(bcx, trans(bcx, base));
+
+            // Translate index expression.
+            let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
+
+            let ref_ty = // invoked methods have LB regions instantiated:
+                bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
+            let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
+                None => {
+                    span_bug!(index_expr.span,
+                              "index method didn't return a \
+                              dereferenceable type?!")
+                }
+                Some(elt_tm) => elt_tm.ty,
+            };
+
+            // Overloaded. Invoke the index() method, which basically
+            // yields a `&T` pointer.  We can then proceed down the
+            // normal path (below) to dereference that `&T`.
+            let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
+
+            bcx = Callee::method(bcx, method)
+                .call(bcx, index_expr_debug_loc,
+                      ArgOverloadedOp(base_datum, Some(ix_datum)),
+                      Some(SaveIn(scratch.val))).bcx;
+
+            let datum = scratch.to_expr_datum();
+            let lval = Lvalue::new("expr::trans_index overload");
+            if type_is_sized(bcx.tcx(), elt_ty) {
+                Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
+            } else {
+                Datum::new(datum.val, elt_ty, LvalueExpr(lval))
+            }
+        }
+        None => {
+            let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
+                                                                base,
+                                                                "index"));
+
+            // Translate index expression and cast to a suitable LLVM integer.
+            // Rust is less strict than LLVM in this regard.
+            let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
+            let ix_val = ix_datum.to_llscalarish(bcx);
+            let ix_size = machine::llbitsize_of_real(bcx.ccx(),
+                                                     val_ty(ix_val));
+            let int_size = machine::llbitsize_of_real(bcx.ccx(),
+                                                      ccx.int_type());
+            let ix_val = {
+                if ix_size < int_size {
+                    if expr_ty(bcx, idx).is_signed() {
+                        SExt(bcx, ix_val, ccx.int_type())
+                    } else { ZExt(bcx, ix_val, ccx.int_type()) }
+                } else if ix_size > int_size {
+                    Trunc(bcx, ix_val, ccx.int_type())
+                } else {
+                    ix_val
+                }
+            };
+
+            let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
+
+            let (base, len) = base_datum.get_vec_base_and_len(bcx);
+
+            debug!("trans_index: base {:?}", Value(base));
+            debug!("trans_index: len {:?}", Value(len));
+
+            let bounds_check = ICmp(bcx,
+                                    llvm::IntUGE,
+                                    ix_val,
+                                    len,
+                                    index_expr_debug_loc);
+            let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
+            let expected = Call(bcx,
+                                expect,
+                                &[bounds_check, C_bool(ccx, false)],
+                                index_expr_debug_loc);
+            bcx = with_cond(bcx, expected, |bcx| {
+                controlflow::trans_fail_bounds_check(bcx,
+                                                     expr_info(index_expr),
+                                                     ix_val,
+                                                     len)
+            });
+            let elt = InBoundsGEP(bcx, base, &[ix_val]);
+            let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
+            let lval = Lvalue::new("expr::trans_index fallback");
+            Datum::new(elt, unit_ty, LvalueExpr(lval))
+        }
+    };
+
+    DatumBlock::new(bcx, elt_datum)
+}
+
+/// Translates a reference to a variable.
+pub fn trans_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: Def)
+                             -> Datum<'tcx, Lvalue> {
+
+    match def {
+        Def::Static(did, _) => consts::get_static(bcx.ccx(), did),
+        Def::Upvar(_, nid, _, _) => {
+            // Can't move upvars, so this is never a ZeroMemLastUse.
+            let local_ty = node_id_type(bcx, nid);
+            let lval = Lvalue::new_with_hint("expr::trans_var (upvar)",
+                                             bcx, nid, HintKind::ZeroAndMaintain);
+            match bcx.fcx.llupvars.borrow().get(&nid) {
+                Some(&val) => Datum::new(val, local_ty, lval),
+                None => {
+                    bug!("trans_var: no llval for upvar {} found", nid);
+                }
+            }
+        }
+        Def::Local(_, nid) => {
+            let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
+                Some(&v) => v,
+                None => {
+                    bug!("trans_var: no datum for local/arg {} found", nid);
+                }
+            };
+            debug!("take_local(nid={}, v={:?}, ty={})",
+                   nid, Value(datum.val), datum.ty);
+            datum
+        }
+        _ => bug!("{:?} should not reach expr::trans_var", def)
+    }
+}
+
+fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                            expr: &hir::Expr)
+                                            -> Block<'blk, 'tcx> {
+    let mut bcx = bcx;
+    let _icx = push_ctxt("trans_rvalue_stmt");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
+
+    match expr.node {
+        hir::ExprBreak(label_opt) => {
+            controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node.name))
+        }
+        hir::ExprType(ref e, _) => {
+            trans_into(bcx, &e, Ignore)
+        }
+        hir::ExprAgain(label_opt) => {
+            controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node.name))
+        }
+        hir::ExprRet(ref ex) => {
+            // Check to see if the return expression itself is reachable.
+            // This can occur when the inner expression contains a return
+            let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
+                cfg.node_is_reachable(expr.id)
+            } else {
+                true
+            };
+
+            if reachable {
+                controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
+            } else {
+                // If it's not reachable, just translate the inner expression
+                // directly. This avoids having to manage a return slot when
+                // it won't actually be used anyway.
+                if let &Some(ref x) = ex {
+                    bcx = trans_into(bcx, &x, Ignore);
+                }
+                // Mark the end of the block as unreachable. Once we get to
+                // a return expression, there's no more we should be doing
+                // after this.
+                Unreachable(bcx);
+                bcx
+            }
+        }
+        hir::ExprWhile(ref cond, ref body, _) => {
+            controlflow::trans_while(bcx, expr, &cond, &body)
+        }
+        hir::ExprLoop(ref body, _) => {
+            controlflow::trans_loop(bcx, expr, &body)
+        }
+        hir::ExprAssign(ref dst, ref src) => {
+            let src_datum = unpack_datum!(bcx, trans(bcx, &src));
+            let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign"));
+
+            if bcx.fcx.type_needs_drop(dst_datum.ty) {
+                // If there are destructors involved, make sure we
+                // are copying from an rvalue, since that cannot possible
+                // alias an lvalue. We are concerned about code like:
+                //
+                //   a = a
+                //
+                // but also
+                //
+                //   a = a.b
+                //
+                // where e.g. a : Option<Foo> and a.b :
+                // Option<Foo>. In that case, freeing `a` before the
+                // assignment may also free `a.b`!
+                //
+                // We could avoid this intermediary with some analysis
+                // to determine whether `dst` may possibly own `src`.
+                debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
+                let src_datum = unpack_datum!(
+                    bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
+                let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
+                let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
+
+                // 1. Drop the data at the destination, passing the
+                //    drop-hint in case the lvalue has already been
+                //    dropped or moved.
+                bcx = glue::drop_ty_core(bcx,
+                                         dst_datum.val,
+                                         dst_datum.ty,
+                                         expr.debug_loc(),
+                                         false,
+                                         opt_hint_val);
+
+                // 2. We are overwriting the destination; ensure that
+                //    its drop-hint (if any) says "initialized."
+                if let Some(hint_val) = opt_hint_val {
+                    let hint_llval = hint_val.value();
+                    let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
+                    Store(bcx, drop_needed, hint_llval);
+                }
+                src_datum.store_to(bcx, dst_datum.val)
+            } else {
+                src_datum.store_to(bcx, dst_datum.val)
+            }
+        }
+        hir::ExprAssignOp(op, ref dst, ref src) => {
+            let method = bcx.tcx().tables
+                                  .borrow()
+                                  .method_map
+                                  .get(&MethodCall::expr(expr.id)).cloned();
+
+            if let Some(method) = method {
+                let dst = unpack_datum!(bcx, trans(bcx, &dst));
+                let src_datum = unpack_datum!(bcx, trans(bcx, &src));
+
+                Callee::method(bcx, method)
+                    .call(bcx, expr.debug_loc(),
+                          ArgOverloadedOp(dst, Some(src_datum)), None).bcx
+            } else {
+                trans_assign_op(bcx, expr, op, &dst, &src)
+            }
+        }
+        hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => {
+            let outputs = outputs.iter().map(|output| {
+                let out_datum = unpack_datum!(bcx, trans(bcx, output));
+                unpack_datum!(bcx, out_datum.to_lvalue_datum(bcx, "out", expr.id))
+            }).collect();
+            let inputs = inputs.iter().map(|input| {
+                let input = unpack_datum!(bcx, trans(bcx, input));
+                let input = unpack_datum!(bcx, input.to_rvalue_datum(bcx, "in"));
+                input.to_llscalarish(bcx)
+            }).collect();
+            asm::trans_inline_asm(bcx, a, outputs, inputs);
+            bcx
+        }
+        _ => {
+            span_bug!(
+                expr.span,
+                "trans_rvalue_stmt_unadjusted reached \
+                 fall-through case: {:?}",
+                expr.node);
+        }
+    }
+}
+
+fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                           expr: &hir::Expr,
+                                           dest: Dest)
+                                           -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
+    let mut bcx = bcx;
+
+    debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
+
+    // Entry into the method table if this is an overloaded call/op.
+    let method_call = MethodCall::expr(expr.id);
+
+    match expr.node {
+        hir::ExprType(ref e, _) => {
+            trans_into(bcx, &e, dest)
+        }
+        hir::ExprPath(..) => {
+            trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
+        }
+        hir::ExprIf(ref cond, ref thn, ref els) => {
+            controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest)
+        }
+        hir::ExprMatch(ref discr, ref arms, _) => {
+            _match::trans_match(bcx, expr, &discr, &arms[..], dest)
+        }
+        hir::ExprBlock(ref blk) => {
+            controlflow::trans_block(bcx, &blk, dest)
+        }
+        hir::ExprStruct(_, ref fields, ref base) => {
+            trans_struct(bcx,
+                         &fields[..],
+                         base.as_ref().map(|e| &**e),
+                         expr.span,
+                         expr.id,
+                         node_id_type(bcx, expr.id),
+                         dest)
+        }
+        hir::ExprTup(ref args) => {
+            let numbered_fields: Vec<(usize, &hir::Expr)> =
+                args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
+            trans_adt(bcx,
+                      expr_ty(bcx, expr),
+                      Disr(0),
+                      &numbered_fields[..],
+                      None,
+                      dest,
+                      expr.debug_loc())
+        }
+        hir::ExprLit(ref lit) => {
+            match lit.node {
+                ast::LitKind::Str(ref s, _) => {
+                    tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
+                }
+                _ => {
+                    span_bug!(expr.span,
+                              "trans_rvalue_dps_unadjusted shouldn't be \
+                              translating this type of literal")
+                }
+            }
+        }
+        hir::ExprVec(..) | hir::ExprRepeat(..) => {
+            tvec::trans_fixed_vstore(bcx, expr, dest)
+        }
+        hir::ExprClosure(_, ref decl, ref body) => {
+            let dest = match dest {
+                SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
+                Ignore => closure::Dest::Ignore(bcx.ccx())
+            };
+
+            // NB. To get the id of the closure, we don't use
+            // `local_def_id(id)`, but rather we extract the closure
+            // def-id from the expr's type. This is because this may
+            // be an inlined expression from another crate, and we
+            // want to get the ORIGINAL closure def-id, since that is
+            // the key we need to find the closure-kind and
+            // closure-type etc.
+            let (def_id, substs) = match expr_ty(bcx, expr).sty {
+                ty::TyClosure(def_id, ref substs) => (def_id, substs),
+                ref t =>
+                    span_bug!(
+                        expr.span,
+                        "closure expr without closure type: {:?}", t),
+            };
+
+            closure::trans_closure_expr(dest,
+                                        decl,
+                                        body,
+                                        expr.id,
+                                        def_id,
+                                        substs).unwrap_or(bcx)
+        }
+        hir::ExprCall(ref f, ref args) => {
+            let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned();
+            let (callee, args) = if let Some(method) = method {
+                let mut all_args = vec![&**f];
+                all_args.extend(args.iter().map(|e| &**e));
+
+                (Callee::method(bcx, method), ArgOverloadedCall(all_args))
+            } else {
+                let f = unpack_datum!(bcx, trans(bcx, f));
+                (match f.ty.sty {
+                    ty::TyFnDef(def_id, substs, _) => {
+                        Callee::def(bcx.ccx(), def_id, substs)
+                    }
+                    ty::TyFnPtr(_) => {
+                        let f = unpack_datum!(bcx,
+                            f.to_rvalue_datum(bcx, "callee"));
+                        Callee::ptr(f)
+                    }
+                    _ => {
+                        span_bug!(expr.span,
+                            "type of callee is not a fn: {}", f.ty);
+                    }
+                }, ArgExprs(&args))
+            };
+            callee.call(bcx, expr.debug_loc(), args, Some(dest)).bcx
+        }
+        hir::ExprMethodCall(_, _, ref args) => {
+            Callee::method_call(bcx, method_call)
+                .call(bcx, expr.debug_loc(), ArgExprs(&args), Some(dest)).bcx
+        }
+        hir::ExprBinary(op, ref lhs, ref rhs_expr) => {
+            // if not overloaded, would be RvalueDatumExpr
+            let lhs = unpack_datum!(bcx, trans(bcx, &lhs));
+            let mut rhs = unpack_datum!(bcx, trans(bcx, &rhs_expr));
+            if !op.node.is_by_value() {
+                rhs = unpack_datum!(bcx, auto_ref(bcx, rhs, rhs_expr));
+            }
+
+            Callee::method_call(bcx, method_call)
+                .call(bcx, expr.debug_loc(),
+                      ArgOverloadedOp(lhs, Some(rhs)), Some(dest)).bcx
+        }
+        hir::ExprUnary(_, ref subexpr) => {
+            // if not overloaded, would be RvalueDatumExpr
+            let arg = unpack_datum!(bcx, trans(bcx, &subexpr));
+
+            Callee::method_call(bcx, method_call)
+                .call(bcx, expr.debug_loc(),
+                      ArgOverloadedOp(arg, None), Some(dest)).bcx
+        }
+        hir::ExprCast(..) => {
+            // Trait casts used to come this way, now they should be coercions.
+            span_bug!(expr.span, "DPS expr_cast (residual trait cast?)")
+        }
+        hir::ExprAssignOp(op, _, _) => {
+            span_bug!(
+                expr.span,
+                "augmented assignment `{}=` should always be a rvalue_stmt",
+                op.node.as_str())
+        }
+        _ => {
+            span_bug!(
+                expr.span,
+                "trans_rvalue_dps_unadjusted reached fall-through \
+                 case: {:?}",
+                expr.node);
+        }
+    }
+}
+
+fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                        ref_expr: &hir::Expr,
+                                        def: Def,
+                                        dest: Dest)
+                                        -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_def_dps_unadjusted");
+
+    let lldest = match dest {
+        SaveIn(lldest) => lldest,
+        Ignore => { return bcx; }
+    };
+
+    let ty = expr_ty(bcx, ref_expr);
+    if let ty::TyFnDef(..) = ty.sty {
+        // Zero-sized function or ctor.
+        return bcx;
+    }
+
+    match def {
+        Def::Variant(tid, vid) => {
+            let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
+            // Nullary variant.
+            let ty = expr_ty(bcx, ref_expr);
+            let repr = adt::represent_type(bcx.ccx(), ty);
+            adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val));
+            bcx
+        }
+        Def::Struct(..) => {
+            match ty.sty {
+                ty::TyStruct(def, _) if def.has_dtor() => {
+                    let repr = adt::represent_type(bcx.ccx(), ty);
+                    adt::trans_set_discr(bcx, &repr, lldest, Disr(0));
+                }
+                _ => {}
+            }
+            bcx
+        }
+        _ => {
+            span_bug!(ref_expr.span,
+                      "Non-DPS def {:?} referened by {}",
+                      def, bcx.node_id_to_string(ref_expr.id));
+        }
+    }
+}
+
+fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                            fields: &[hir::Field],
+                            base: Option<&hir::Expr>,
+                            expr_span: codemap::Span,
+                            expr_id: ast::NodeId,
+                            ty: Ty<'tcx>,
+                            dest: Dest) -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_rec");
+
+    let tcx = bcx.tcx();
+    let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
+
+    let mut need_base = vec![true; vinfo.fields.len()];
+
+    let numbered_fields = fields.iter().map(|field| {
+        let pos = vinfo.field_index(field.name.node);
+        need_base[pos] = false;
+        (pos, &*field.expr)
+    }).collect::<Vec<_>>();
+
+    let optbase = match base {
+        Some(base_expr) => {
+            let mut leftovers = Vec::new();
+            for (i, b) in need_base.iter().enumerate() {
+                if *b {
+                    leftovers.push((i, vinfo.fields[i].1));
+                }
+            }
+            Some(StructBaseInfo {expr: base_expr,
+                                 fields: leftovers })
+        }
+        None => {
+            if need_base.iter().any(|b| *b) {
+                span_bug!(expr_span, "missing fields and no base expr")
+            }
+            None
+        }
+    };
+
+    trans_adt(bcx,
+              ty,
+              vinfo.discr,
+              &numbered_fields,
+              optbase,
+              dest,
+              DebugLoc::At(expr_id, expr_span))
+}
+
+/// Information that `trans_adt` needs in order to fill in the fields
+/// of a struct copied from a base struct (e.g., from an expression
+/// like `Foo { a: b, ..base }`.
+///
+/// Note that `fields` may be empty; the base expression must always be
+/// evaluated for side-effects.
+pub struct StructBaseInfo<'a, 'tcx> {
+    /// The base expression; will be evaluated after all explicit fields.
+    expr: &'a hir::Expr,
+    /// The indices of fields to copy paired with their types.
+    fields: Vec<(usize, Ty<'tcx>)>
+}
+
+/// Constructs an ADT instance:
+///
+/// - `fields` should be a list of field indices paired with the
+/// expression to store into that field.  The initializers will be
+/// evaluated in the order specified by `fields`.
+///
+/// - `optbase` contains information on the base struct (if any) from
+/// which remaining fields are copied; see comments on `StructBaseInfo`.
+pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+                                 ty: Ty<'tcx>,
+                                 discr: Disr,
+                                 fields: &[(usize, &hir::Expr)],
+                                 optbase: Option<StructBaseInfo<'a, 'tcx>>,
+                                 dest: Dest,
+                                 debug_location: DebugLoc)
+                                 -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_adt");
+    let fcx = bcx.fcx;
+    let repr = adt::represent_type(bcx.ccx(), ty);
+
+    debug_location.apply(bcx.fcx);
+
+    // If we don't care about the result, just make a
+    // temporary stack slot
+    let addr = match dest {
+        SaveIn(pos) => pos,
+        Ignore => {
+            let llresult = alloc_ty(bcx, ty, "temp");
+            call_lifetime_start(bcx, llresult);
+            llresult
+        }
+    };
+
+    debug!("trans_adt");
+
+    // This scope holds intermediates that must be cleaned should
+    // panic occur before the ADT as a whole is ready.
+    let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
+
+    if ty.is_simd() {
+        // Issue 23112: The original logic appeared vulnerable to same
+        // order-of-eval bug. But, SIMD values are tuple-structs;
+        // i.e. functional record update (FRU) syntax is unavailable.
+        //
+        // To be safe, double-check that we did not get here via FRU.
+        assert!(optbase.is_none());
+
+        // This is the constructor of a SIMD type, such types are
+        // always primitive machine types and so do not have a
+        // destructor or require any clean-up.
+        let llty = type_of::type_of(bcx.ccx(), ty);
+
+        // keep a vector as a register, and running through the field
+        // `insertelement`ing them directly into that register
+        // (i.e. avoid GEPi and `store`s to an alloca) .
+        let mut vec_val = C_undef(llty);
+
+        for &(i, ref e) in fields {
+            let block_datum = trans(bcx, &e);
+            bcx = block_datum.bcx;
+            let position = C_uint(bcx.ccx(), i);
+            let value = block_datum.datum.to_llscalarish(bcx);
+            vec_val = InsertElement(bcx, vec_val, value, position);
+        }
+        Store(bcx, vec_val, addr);
+    } else if let Some(base) = optbase {
+        // Issue 23112: If there is a base, then order-of-eval
+        // requires field expressions eval'ed before base expression.
+
+        // First, trans field expressions to temporary scratch values.
+        let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
+            let datum = unpack_datum!(bcx, trans(bcx, &e));
+            (i, datum)
+        }).collect();
+
+        debug_location.apply(bcx.fcx);
+
+        // Second, trans the base to the dest.
+        assert_eq!(discr, Disr(0));
+
+        let addr = adt::MaybeSizedValue::sized(addr);
+        match expr_kind(bcx.tcx(), &base.expr) {
+            ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
+                bcx = trans_into(bcx, &base.expr, SaveIn(addr.value));
+            },
+            ExprKind::RvalueStmt => {
+                bug!("unexpected expr kind for struct base expr")
+            }
+            _ => {
+                let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base"));
+                for &(i, t) in &base.fields {
+                    let datum = base_datum.get_element(
+                            bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i));
+                    assert!(type_is_sized(bcx.tcx(), datum.ty));
+                    let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
+                    bcx = datum.store_to(bcx, dest);
+                }
+            }
+        }
+
+        // Finally, move scratch field values into actual field locations
+        for (i, datum) in scratch_vals {
+            let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
+            bcx = datum.store_to(bcx, dest);
+        }
+    } else {
+        // No base means we can write all fields directly in place.
+        let addr = adt::MaybeSizedValue::sized(addr);
+        for &(i, ref e) in fields {
+            let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
+            let e_ty = expr_ty_adjusted(bcx, &e);
+            bcx = trans_into(bcx, &e, SaveIn(dest));
+            let scope = cleanup::CustomScope(custom_cleanup_scope);
+            fcx.schedule_lifetime_end(scope, dest);
+            // FIXME: nonzeroing move should generalize to fields
+            fcx.schedule_drop_mem(scope, dest, e_ty, None);
+        }
+    }
+
+    adt::trans_set_discr(bcx, &repr, addr, discr);
+
+    fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
+
+    // If we don't care about the result drop the temporary we made
+    match dest {
+        SaveIn(_) => bcx,
+        Ignore => {
+            bcx = glue::drop_ty(bcx, addr, ty, debug_location);
+            base::call_lifetime_end(bcx, addr);
+            bcx
+        }
+    }
+}
+
+
+fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                   expr: &hir::Expr,
+                                   lit: &ast::Lit)
+                                   -> DatumBlock<'blk, 'tcx, Expr> {
+    // must not be a string constant, that is a RvalueDpsExpr
+    let _icx = push_ctxt("trans_immediate_lit");
+    let ty = expr_ty(bcx, expr);
+    let v = consts::const_lit(bcx.ccx(), expr, lit);
+    immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
+}
+
+fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                           expr: &hir::Expr,
+                           op: hir::UnOp,
+                           sub_expr: &hir::Expr)
+                           -> DatumBlock<'blk, 'tcx, Expr> {
+    let ccx = bcx.ccx();
+    let mut bcx = bcx;
+    let _icx = push_ctxt("trans_unary_datum");
+
+    let method_call = MethodCall::expr(expr.id);
+
+    // The only overloaded operator that is translated to a datum
+    // is an overloaded deref, since it is always yields a `&T`.
+    // Otherwise, we should be in the RvalueDpsExpr path.
+    assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
+
+    let un_ty = expr_ty(bcx, expr);
+
+    let debug_loc = expr.debug_loc();
+
+    match op {
+        hir::UnNot => {
+            let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
+            let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
+            immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
+        }
+        hir::UnNeg => {
+            let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
+            let val = datum.to_llscalarish(bcx);
+            let (bcx, llneg) = {
+                if un_ty.is_fp() {
+                    let result = FNeg(bcx, val, debug_loc);
+                    (bcx, result)
+                } else {
+                    let is_signed = un_ty.is_signed();
+                    let result = Neg(bcx, val, debug_loc);
+                    let bcx = if bcx.ccx().check_overflow() && is_signed {
+                        let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
+                        let is_min = ICmp(bcx, llvm::IntEQ, val,
+                                          C_integral(llty, min, true), debug_loc);
+                        with_cond(bcx, is_min, |bcx| {
+                            let msg = InternedString::new(
+                                "attempted to negate with overflow");
+                            controlflow::trans_fail(bcx, expr_info(expr), msg)
+                        })
+                    } else {
+                        bcx
+                    };
+                    (bcx, result)
+                }
+            };
+            immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
+        }
+        hir::UnDeref => {
+            let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
+            deref_once(bcx, expr, datum, method_call)
+        }
+    }
+}
+
+fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               box_expr: &hir::Expr,
+                               box_ty: Ty<'tcx>,
+                               contents: &hir::Expr,
+                               contents_ty: Ty<'tcx>)
+                               -> DatumBlock<'blk, 'tcx, Expr> {
+    let _icx = push_ctxt("trans_uniq_expr");
+    let fcx = bcx.fcx;
+    assert!(type_is_sized(bcx.tcx(), contents_ty));
+    let llty = type_of::type_of(bcx.ccx(), contents_ty);
+    let size = llsize_of(bcx.ccx(), llty);
+    let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
+    let llty_ptr = llty.ptr_to();
+    let Result { bcx, val } = malloc_raw_dyn(bcx,
+                                             llty_ptr,
+                                             box_ty,
+                                             size,
+                                             align,
+                                             box_expr.debug_loc());
+    // Unique boxes do not allocate for zero-size types. The standard library
+    // may assume that `free` is never called on the pointer returned for
+    // `Box<ZeroSizeType>`.
+    let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
+        trans_into(bcx, contents, SaveIn(val))
+    } else {
+        let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
+        fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
+                                val, cleanup::HeapExchange, contents_ty);
+        let bcx = trans_into(bcx, contents, SaveIn(val));
+        fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
+        bcx
+    };
+    immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
+}
+
+fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                             expr: &hir::Expr,
+                             subexpr: &hir::Expr)
+                             -> DatumBlock<'blk, 'tcx, Expr> {
+    let _icx = push_ctxt("trans_addr_of");
+    let mut bcx = bcx;
+    let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
+    let ty = expr_ty(bcx, expr);
+    if !type_is_sized(bcx.tcx(), sub_datum.ty) {
+        // Always generate an lvalue datum, because this pointer doesn't own
+        // the data and cleanup is scheduled elsewhere.
+        DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind)))
+    } else {
+        // Sized value, ref to a thin pointer
+        immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
+    }
+}
+
+fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                  binop_expr: &hir::Expr,
+                                  binop_ty: Ty<'tcx>,
+                                  op: hir::BinOp,
+                                  lhs: Datum<'tcx, Rvalue>,
+                                  rhs: Datum<'tcx, Rvalue>)
+                                  -> DatumBlock<'blk, 'tcx, Expr>
+{
+    let _icx = push_ctxt("trans_scalar_binop");
+
+    let tcx = bcx.tcx();
+    let lhs_t = lhs.ty;
+    assert!(!lhs_t.is_simd());
+    let is_float = lhs_t.is_fp();
+    let is_signed = lhs_t.is_signed();
+    let info = expr_info(binop_expr);
+
+    let binop_debug_loc = binop_expr.debug_loc();
+
+    let mut bcx = bcx;
+    let lhs = lhs.to_llscalarish(bcx);
+    let rhs = rhs.to_llscalarish(bcx);
+    let val = match op.node {
+      hir::BiAdd => {
+        if is_float {
+            FAdd(bcx, lhs, rhs, binop_debug_loc)
+        } else {
+            let (newbcx, res) = with_overflow_check(
+                bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
+            bcx = newbcx;
+            res
+        }
+      }
+      hir::BiSub => {
+        if is_float {
+            FSub(bcx, lhs, rhs, binop_debug_loc)
+        } else {
+            let (newbcx, res) = with_overflow_check(
+                bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
+            bcx = newbcx;
+            res
+        }
+      }
+      hir::BiMul => {
+        if is_float {
+            FMul(bcx, lhs, rhs, binop_debug_loc)
+        } else {
+            let (newbcx, res) = with_overflow_check(
+                bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
+            bcx = newbcx;
+            res
+        }
+      }
+      hir::BiDiv => {
+        if is_float {
+            FDiv(bcx, lhs, rhs, binop_debug_loc)
+        } else {
+            // Only zero-check integers; fp /0 is NaN
+            bcx = base::fail_if_zero_or_overflows(bcx,
+                                                  expr_info(binop_expr),
+                                                  op,
+                                                  lhs,
+                                                  rhs,
+                                                  lhs_t);
+            if is_signed {
+                SDiv(bcx, lhs, rhs, binop_debug_loc)
+            } else {
+                UDiv(bcx, lhs, rhs, binop_debug_loc)
+            }
+        }
+      }
+      hir::BiRem => {
+        if is_float {
+            // LLVM currently always lowers the `frem` instructions appropriate
+            // library calls typically found in libm. Notably f64 gets wired up
+            // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
+            // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
+            // instead just an inline function in a header that goes up to a
+            // f64, uses `fmod`, and then comes back down to a f32.
+            //
+            // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
+            // still unconditionally lower frem instructions over 32-bit floats
+            // to a call to `fmodf`. To work around this we special case MSVC
+            // 32-bit float rem instructions and instead do the call out to
+            // `fmod` ourselves.
+            //
+            // Note that this is currently duplicated with src/libcore/ops.rs
+            // which does the same thing, and it would be nice to perhaps unify
+            // these two implementations on day! Also note that we call `fmod`
+            // for both 32 and 64-bit floats because if we emit any FRem
+            // instruction at all then LLVM is capable of optimizing it into a
+            // 32-bit FRem (which we're trying to avoid).
+            let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
+                           tcx.sess.target.target.arch == "x86";
+            if use_fmod {
+                let f64t = Type::f64(bcx.ccx());
+                let fty = Type::func(&[f64t, f64t], &f64t);
+                let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty);
+                if lhs_t == tcx.types.f32 {
+                    let lhs = FPExt(bcx, lhs, f64t);
+                    let rhs = FPExt(bcx, rhs, f64t);
+                    let res = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc);
+                    FPTrunc(bcx, res, Type::f32(bcx.ccx()))
+                } else {
+                    Call(bcx, llfn, &[lhs, rhs], binop_debug_loc)
+                }
+            } else {
+                FRem(bcx, lhs, rhs, binop_debug_loc)
+            }
+        } else {
+            // Only zero-check integers; fp %0 is NaN
+            bcx = base::fail_if_zero_or_overflows(bcx,
+                                                  expr_info(binop_expr),
+                                                  op, lhs, rhs, lhs_t);
+            if is_signed {
+                SRem(bcx, lhs, rhs, binop_debug_loc)
+            } else {
+                URem(bcx, lhs, rhs, binop_debug_loc)
+            }
+        }
+      }
+      hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
+      hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
+      hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
+      hir::BiShl => {
+          let (newbcx, res) = with_overflow_check(
+              bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
+          bcx = newbcx;
+          res
+      }
+      hir::BiShr => {
+          let (newbcx, res) = with_overflow_check(
+              bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
+          bcx = newbcx;
+          res
+      }
+      hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
+          base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
+      }
+      _ => {
+        span_bug!(binop_expr.span, "unexpected binop");
+      }
+    };
+
+    immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
+}
+
+// refinement types would obviate the need for this
+enum lazy_binop_ty {
+    lazy_and,
+    lazy_or,
+}
+
+fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                binop_expr: &hir::Expr,
+                                op: lazy_binop_ty,
+                                a: &hir::Expr,
+                                b: &hir::Expr)
+                                -> DatumBlock<'blk, 'tcx, Expr> {
+    let _icx = push_ctxt("trans_lazy_binop");
+    let binop_ty = expr_ty(bcx, binop_expr);
+    let fcx = bcx.fcx;
+
+    let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
+    let lhs = lhs.to_llscalarish(past_lhs);
+
+    if past_lhs.unreachable.get() {
+        return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
+    }
+
+    let join = fcx.new_id_block("join", binop_expr.id);
+    let before_rhs = fcx.new_id_block("before_rhs", b.id);
+
+    match op {
+      lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
+      lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
+    }
+
+    let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
+    let rhs = rhs.to_llscalarish(past_rhs);
+
+    if past_rhs.unreachable.get() {
+        return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
+    }
+
+    Br(past_rhs, join.llbb, DebugLoc::None);
+    let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
+                  &[past_lhs.llbb, past_rhs.llbb]);
+
+    return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
+}
+
+fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                            expr: &hir::Expr,
+                            op: hir::BinOp,
+                            lhs: &hir::Expr,
+                            rhs: &hir::Expr)
+                            -> DatumBlock<'blk, 'tcx, Expr> {
+    let _icx = push_ctxt("trans_binary");
+    let ccx = bcx.ccx();
+
+    // if overloaded, would be RvalueDpsExpr
+    assert!(!ccx.tcx().is_method_call(expr.id));
+
+    match op.node {
+        hir::BiAnd => {
+            trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
+        }
+        hir::BiOr => {
+            trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
+        }
+        _ => {
+            let mut bcx = bcx;
+            let binop_ty = expr_ty(bcx, expr);
+
+            let lhs = unpack_datum!(bcx, trans(bcx, lhs));
+            let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
+            debug!("trans_binary (expr {}): lhs={:?}", expr.id, lhs);
+            let rhs = unpack_datum!(bcx, trans(bcx, rhs));
+            let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
+            debug!("trans_binary (expr {}): rhs={:?}", expr.id, rhs);
+
+            if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
+                assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
+                        "built-in binary operators on fat pointers are homogeneous");
+                assert_eq!(binop_ty, bcx.tcx().types.bool);
+                let val = base::compare_scalar_types(
+                    bcx,
+                    lhs.val,
+                    rhs.val,
+                    lhs.ty,
+                    op.node,
+                    expr.debug_loc());
+                immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
+            } else {
+                assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
+                        "built-in binary operators on fat pointers are homogeneous");
+                trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
+            }
+        }
+    }
+}
+
+pub fn cast_is_noop<'tcx>(tcx: &TyCtxt<'tcx>,
+                          expr: &hir::Expr,
+                          t_in: Ty<'tcx>,
+                          t_out: Ty<'tcx>)
+                          -> bool {
+    if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
+        return true;
+    }
+
+    match (t_in.builtin_deref(true, ty::NoPreference),
+           t_out.builtin_deref(true, ty::NoPreference)) {
+        (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
+            t_in == t_out
+        }
+        _ => {
+            // This condition isn't redundant with the check for CoercionCast:
+            // different types can be substituted into the same type, and
+            // == equality can be overconservative if there are regions.
+            t_in == t_out
+        }
+    }
+}
+
+fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              expr: &hir::Expr,
+                              id: ast::NodeId)
+                              -> DatumBlock<'blk, 'tcx, Expr>
+{
+    use rustc::ty::cast::CastTy::*;
+    use rustc::ty::cast::IntTy::*;
+
+    fn int_cast(bcx: Block,
+                lldsttype: Type,
+                llsrctype: Type,
+                llsrc: ValueRef,
+                signed: bool)
+                -> ValueRef
+    {
+        let _icx = push_ctxt("int_cast");
+        let srcsz = llsrctype.int_width();
+        let dstsz = lldsttype.int_width();
+        return if dstsz == srcsz {
+            BitCast(bcx, llsrc, lldsttype)
+        } else if srcsz > dstsz {
+            TruncOrBitCast(bcx, llsrc, lldsttype)
+        } else if signed {
+            SExtOrBitCast(bcx, llsrc, lldsttype)
+        } else {
+            ZExtOrBitCast(bcx, llsrc, lldsttype)
+        }
+    }
+
+    fn float_cast(bcx: Block,
+                  lldsttype: Type,
+                  llsrctype: Type,
+                  llsrc: ValueRef)
+                  -> ValueRef
+    {
+        let _icx = push_ctxt("float_cast");
+        let srcsz = llsrctype.float_width();
+        let dstsz = lldsttype.float_width();
+        return if dstsz > srcsz {
+            FPExt(bcx, llsrc, lldsttype)
+        } else if srcsz > dstsz {
+            FPTrunc(bcx, llsrc, lldsttype)
+        } else { llsrc };
+    }
+
+    let _icx = push_ctxt("trans_cast");
+    let mut bcx = bcx;
+    let ccx = bcx.ccx();
+
+    let t_in = expr_ty_adjusted(bcx, expr);
+    let t_out = node_id_type(bcx, id);
+
+    debug!("trans_cast({:?} as {:?})", t_in, t_out);
+    let mut ll_t_in = type_of::immediate_type_of(ccx, t_in);
+    let ll_t_out = type_of::immediate_type_of(ccx, t_out);
+    // Convert the value to be cast into a ValueRef, either by-ref or
+    // by-value as appropriate given its type:
+    let mut datum = unpack_datum!(bcx, trans(bcx, expr));
+
+    let datum_ty = monomorphize_type(bcx, datum.ty);
+
+    if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
+        datum.ty = t_out;
+        return DatumBlock::new(bcx, datum);
+    }
+
+    if type_is_fat_ptr(bcx.tcx(), t_in) {
+        assert!(datum.kind.is_by_ref());
+        if type_is_fat_ptr(bcx.tcx(), t_out) {
+            return DatumBlock::new(bcx, Datum::new(
+                PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
+                t_out,
+                Rvalue::new(ByRef)
+            )).to_expr_datumblock();
+        } else {
+            // Return the address
+            return immediate_rvalue_bcx(bcx,
+                                        PointerCast(bcx,
+                                                    Load(bcx, get_dataptr(bcx, datum.val)),
+                                                    ll_t_out),
+                                        t_out).to_expr_datumblock();
+        }
+    }
+
+    let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
+    let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
+
+    let (llexpr, signed) = if let Int(CEnum) = r_t_in {
+        let repr = adt::represent_type(ccx, t_in);
+        let datum = unpack_datum!(
+            bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
+        let llexpr_ptr = datum.to_llref();
+        let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr,
+                                         Some(Type::i64(ccx)), true);
+        ll_t_in = val_ty(discr);
+        (discr, adt::is_discr_signed(&repr))
+    } else {
+        (datum.to_llscalarish(bcx), t_in.is_signed())
+    };
+
+    let newval = match (r_t_in, r_t_out) {
+        (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
+            PointerCast(bcx, llexpr, ll_t_out)
+        }
+        (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
+        (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
+
+        (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
+        (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
+        (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
+        (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
+        (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
+        (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
+
+        _ => span_bug!(expr.span,
+                       "translating unsupported cast: \
+                        {:?} -> {:?}",
+                       t_in,
+                       t_out)
+    };
+    return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
+}
+
+fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                               expr: &hir::Expr,
+                               op: hir::BinOp,
+                               dst: &hir::Expr,
+                               src: &hir::Expr)
+                               -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_assign_op");
+    let mut bcx = bcx;
+
+    debug!("trans_assign_op(expr={:?})", expr);
+
+    // User-defined operator methods cannot be used with `+=` etc right now
+    assert!(!bcx.tcx().is_method_call(expr.id));
+
+    // Evaluate LHS (destination), which should be an lvalue
+    let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
+    assert!(!bcx.fcx.type_needs_drop(dst.ty));
+    let lhs = load_ty(bcx, dst.val, dst.ty);
+    let lhs = immediate_rvalue(lhs, dst.ty);
+
+    // Evaluate RHS - FIXME(#28160) this sucks
+    let rhs = unpack_datum!(bcx, trans(bcx, &src));
+    let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
+
+    // Perform computation and store the result
+    let result_datum = unpack_datum!(
+        bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
+    return result_datum.store_to(bcx, dst.val);
+}
+
+fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                        datum: Datum<'tcx, Expr>,
+                        expr: &hir::Expr)
+                        -> DatumBlock<'blk, 'tcx, Expr> {
+    let mut bcx = bcx;
+
+    // Ensure cleanup of `datum` if not already scheduled and obtain
+    // a "by ref" pointer.
+    let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
+
+    // Compute final type. Note that we are loose with the region and
+    // mutability, since those things don't matter in trans.
+    let referent_ty = lv_datum.ty;
+    let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
+
+    // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
+    // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
+    // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
+    // indirection and for thin pointers, this has no ill effects.
+    let kind  = if type_is_sized(bcx.tcx(), referent_ty) {
+        RvalueExpr(Rvalue::new(ByValue))
+    } else {
+        LvalueExpr(lv_datum.kind)
+    };
+
+    // Get the pointer.
+    let llref = lv_datum.to_llref();
+    DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind))
+}
+
+fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              expr: &hir::Expr,
+                              datum: Datum<'tcx, Expr>,
+                              times: usize)
+                              -> DatumBlock<'blk, 'tcx, Expr> {
+    let mut bcx = bcx;
+    let mut datum = datum;
+    for i in 0..times {
+        let method_call = MethodCall::autoderef(expr.id, i as u32);
+        datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
+    }
+    DatumBlock { bcx: bcx, datum: datum }
+}
+
+fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                          expr: &hir::Expr,
+                          datum: Datum<'tcx, Expr>,
+                          method_call: MethodCall)
+                          -> DatumBlock<'blk, 'tcx, Expr> {
+    let ccx = bcx.ccx();
+
+    debug!("deref_once(expr={:?}, datum={:?}, method_call={:?})",
+           expr, datum, method_call);
+
+    let mut bcx = bcx;
+
+    // Check for overloaded deref.
+    let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
+    let datum = match method {
+        Some(method) => {
+            let method_ty = monomorphize_type(bcx, method.ty);
+
+            // Overloaded. Invoke the deref() method, which basically
+            // converts from the `Smaht<T>` pointer that we have into
+            // a `&T` pointer.  We can then proceed down the normal
+            // path (below) to dereference that `&T`.
+            let datum = if method_call.autoderef == 0 {
+                datum
+            } else {
+                // Always perform an AutoPtr when applying an overloaded auto-deref
+                unpack_datum!(bcx, auto_ref(bcx, datum, expr))
+            };
+
+            let ref_ty = // invoked methods have their LB regions instantiated
+                ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
+            let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
+
+            bcx = Callee::method(bcx, method)
+                .call(bcx, expr.debug_loc(),
+                      ArgOverloadedOp(datum, None),
+                      Some(SaveIn(scratch.val))).bcx;
+            scratch.to_expr_datum()
+        }
+        None => {
+            // Not overloaded. We already have a pointer we know how to deref.
+            datum
+        }
+    };
+
+    let r = match datum.ty.sty {
+        ty::TyBox(content_ty) => {
+            // Make sure we have an lvalue datum here to get the
+            // proper cleanups scheduled
+            let datum = unpack_datum!(
+                bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
+
+            if type_is_sized(bcx.tcx(), content_ty) {
+                let ptr = load_ty(bcx, datum.val, datum.ty);
+                DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
+            } else {
+                // A fat pointer and a DST lvalue have the same representation
+                // just different types. Since there is no temporary for `*e`
+                // here (because it is unsized), we cannot emulate the sized
+                // object code path for running drop glue and free. Instead,
+                // we schedule cleanup for `e`, turning it into an lvalue.
+
+                let lval = Lvalue::new("expr::deref_once ty_uniq");
+                let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
+                DatumBlock::new(bcx, datum)
+            }
+        }
+
+        ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
+        ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
+            let lval = Lvalue::new("expr::deref_once ptr");
+            if type_is_sized(bcx.tcx(), content_ty) {
+                let ptr = datum.to_llscalarish(bcx);
+
+                // Always generate an lvalue datum, even if datum.mode is
+                // an rvalue.  This is because datum.mode is only an
+                // rvalue for non-owning pointers like &T or *T, in which
+                // case cleanup *is* scheduled elsewhere, by the true
+                // owner (or, in the case of *T, by the user).
+                DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
+            } else {
+                // A fat pointer and a DST lvalue have the same representation
+                // just different types.
+                DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
+            }
+        }
+
+        _ => {
+            span_bug!(
+                expr.span,
+                "deref invoked on expr of invalid type {:?}",
+                datum.ty);
+        }
+    };
+
+    debug!("deref_once(expr={}, method_call={:?}, result={:?})",
+           expr.id, method_call, r.datum);
+
+    return r;
+}
+
+#[derive(Debug)]
+enum OverflowOp {
+    Add,
+    Sub,
+    Mul,
+    Shl,
+    Shr,
+}
+
+impl OverflowOp {
+    fn codegen_strategy(&self) -> OverflowCodegen {
+        use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
+        match *self {
+            OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
+            OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
+            OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
+
+            OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
+            OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
+        }
+    }
+}
+
+enum OverflowCodegen {
+    ViaIntrinsic(OverflowOpViaIntrinsic),
+    ViaInputCheck(OverflowOpViaInputCheck),
+}
+
+enum OverflowOpViaInputCheck { Shl, Shr, }
+
+#[derive(Debug)]
+enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
+
+impl OverflowOpViaIntrinsic {
+    fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
+        let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
+        bcx.ccx().get_intrinsic(&name)
+    }
+    fn to_intrinsic_name(&self, tcx: &TyCtxt, ty: Ty) -> &'static str {
+        use syntax::ast::IntTy::*;
+        use syntax::ast::UintTy::*;
+        use rustc::ty::{TyInt, TyUint};
+
+        let new_sty = match ty.sty {
+            TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
+                "32" => TyInt(I32),
+                "64" => TyInt(I64),
+                _ => bug!("unsupported target word size")
+            },
+            TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
+                "32" => TyUint(U32),
+                "64" => TyUint(U64),
+                _ => bug!("unsupported target word size")
+            },
+            ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
+            _ => bug!("tried to get overflow intrinsic for {:?} applied to non-int type",
+                      *self)
+        };
+
+        match *self {
+            OverflowOpViaIntrinsic::Add => match new_sty {
+                TyInt(I8) => "llvm.sadd.with.overflow.i8",
+                TyInt(I16) => "llvm.sadd.with.overflow.i16",
+                TyInt(I32) => "llvm.sadd.with.overflow.i32",
+                TyInt(I64) => "llvm.sadd.with.overflow.i64",
+
+                TyUint(U8) => "llvm.uadd.with.overflow.i8",
+                TyUint(U16) => "llvm.uadd.with.overflow.i16",
+                TyUint(U32) => "llvm.uadd.with.overflow.i32",
+                TyUint(U64) => "llvm.uadd.with.overflow.i64",
+
+                _ => bug!(),
+            },
+            OverflowOpViaIntrinsic::Sub => match new_sty {
+                TyInt(I8) => "llvm.ssub.with.overflow.i8",
+                TyInt(I16) => "llvm.ssub.with.overflow.i16",
+                TyInt(I32) => "llvm.ssub.with.overflow.i32",
+                TyInt(I64) => "llvm.ssub.with.overflow.i64",
+
+                TyUint(U8) => "llvm.usub.with.overflow.i8",
+                TyUint(U16) => "llvm.usub.with.overflow.i16",
+                TyUint(U32) => "llvm.usub.with.overflow.i32",
+                TyUint(U64) => "llvm.usub.with.overflow.i64",
+
+                _ => bug!(),
+            },
+            OverflowOpViaIntrinsic::Mul => match new_sty {
+                TyInt(I8) => "llvm.smul.with.overflow.i8",
+                TyInt(I16) => "llvm.smul.with.overflow.i16",
+                TyInt(I32) => "llvm.smul.with.overflow.i32",
+                TyInt(I64) => "llvm.smul.with.overflow.i64",
+
+                TyUint(U8) => "llvm.umul.with.overflow.i8",
+                TyUint(U16) => "llvm.umul.with.overflow.i16",
+                TyUint(U32) => "llvm.umul.with.overflow.i32",
+                TyUint(U64) => "llvm.umul.with.overflow.i64",
+
+                _ => bug!(),
+            },
+        }
+    }
+
+    fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
+                                        info: NodeIdAndSpan,
+                                        lhs_t: Ty<'tcx>, lhs: ValueRef,
+                                        rhs: ValueRef,
+                                        binop_debug_loc: DebugLoc)
+                                        -> (Block<'blk, 'tcx>, ValueRef) {
+        let llfn = self.to_intrinsic(bcx, lhs_t);
+
+        let val = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc);
+        let result = ExtractValue(bcx, val, 0); // iN operation result
+        let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
+
+        let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
+                        binop_debug_loc);
+
+        let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
+        Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
+             binop_debug_loc);
+
+        let bcx =
+            base::with_cond(bcx, cond, |bcx|
+                controlflow::trans_fail(bcx, info,
+                    InternedString::new("arithmetic operation overflowed")));
+
+        (bcx, result)
+    }
+}
+
+impl OverflowOpViaInputCheck {
+    fn build_with_input_check<'blk, 'tcx>(&self,
+                                          bcx: Block<'blk, 'tcx>,
+                                          info: NodeIdAndSpan,
+                                          lhs_t: Ty<'tcx>,
+                                          lhs: ValueRef,
+                                          rhs: ValueRef,
+                                          binop_debug_loc: DebugLoc)
+                                          -> (Block<'blk, 'tcx>, ValueRef)
+    {
+        let lhs_llty = val_ty(lhs);
+        let rhs_llty = val_ty(rhs);
+
+        // Panic if any bits are set outside of bits that we always
+        // mask in.
+        //
+        // Note that the mask's value is derived from the LHS type
+        // (since that is where the 32/64 distinction is relevant) but
+        // the mask's type must match the RHS type (since they will
+        // both be fed into an and-binop)
+        let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
+
+        let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
+        let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
+        let result = match *self {
+            OverflowOpViaInputCheck::Shl =>
+                build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
+            OverflowOpViaInputCheck::Shr =>
+                build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
+        };
+        let bcx =
+            base::with_cond(bcx, cond, |bcx|
+                controlflow::trans_fail(bcx, info,
+                    InternedString::new("shift operation overflowed")));
+
+        (bcx, result)
+    }
+}
+
+// Check if an integer or vector contains a nonzero element.
+fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                   value: ValueRef,
+                                   binop_debug_loc: DebugLoc) -> ValueRef {
+    let llty = val_ty(value);
+    let kind = llty.kind();
+    match kind {
+        TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
+        TypeKind::Vector => {
+            // Check if any elements of the vector are nonzero by treating
+            // it as a wide integer and checking if the integer is nonzero.
+            let width = llty.vector_length() as u64 * llty.element_type().int_width();
+            let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
+            build_nonzero_check(bcx, int_value, binop_debug_loc)
+        },
+        _ => bug!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
+    }
+}
+
+fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
+                                   lhs_t: Ty<'tcx>, lhs: ValueRef,
+                                   rhs: ValueRef,
+                                   binop_debug_loc: DebugLoc)
+                                   -> (Block<'blk, 'tcx>, ValueRef) {
+    if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
+    if bcx.ccx().check_overflow() {
+
+        match oop.codegen_strategy() {
+            OverflowCodegen::ViaIntrinsic(oop) =>
+                oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
+            OverflowCodegen::ViaInputCheck(oop) =>
+                oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
+        }
+    } else {
+        let res = match oop {
+            OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
+            OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
+            OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
+
+            OverflowOp::Shl =>
+                build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
+            OverflowOp::Shr =>
+                build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
+        };
+        (bcx, res)
+    }
+}
+
+/// We categorize expressions into three kinds.  The distinction between
+/// lvalue/rvalue is fundamental to the language.  The distinction between the
+/// two kinds of rvalues is an artifact of trans which reflects how we will
+/// generate code for that kind of expression.  See trans/expr.rs for more
+/// information.
+#[derive(Copy, Clone)]
+enum ExprKind {
+    Lvalue,
+    RvalueDps,
+    RvalueDatum,
+    RvalueStmt
+}
+
+fn expr_kind(tcx: &TyCtxt, expr: &hir::Expr) -> ExprKind {
+    if tcx.is_method_call(expr.id) {
+        // Overloaded operations are generally calls, and hence they are
+        // generated via DPS, but there are a few exceptions:
+        return match expr.node {
+            // `a += b` has a unit result.
+            hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
+
+            // the deref method invoked for `*a` always yields an `&T`
+            hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
+
+            // the index method invoked for `a[i]` always yields an `&T`
+            hir::ExprIndex(..) => ExprKind::Lvalue,
+
+            // in the general case, result could be any type, use DPS
+            _ => ExprKind::RvalueDps
+        };
+    }
+
+    match expr.node {
+        hir::ExprPath(..) => {
+            match tcx.resolve_expr(expr) {
+                // Put functions and ctors with the ADTs, as they
+                // are zero-sized, so DPS is the cheapest option.
+                Def::Struct(..) | Def::Variant(..) |
+                Def::Fn(..) | Def::Method(..) => {
+                    ExprKind::RvalueDps
+                }
+
+                // Note: there is actually a good case to be made that
+                // DefArg's, particularly those of immediate type, ought to
+                // considered rvalues.
+                Def::Static(..) |
+                Def::Upvar(..) |
+                Def::Local(..) => ExprKind::Lvalue,
+
+                Def::Const(..) |
+                Def::AssociatedConst(..) => ExprKind::RvalueDatum,
+
+                def => {
+                    span_bug!(
+                        expr.span,
+                        "uncategorized def for expr {}: {:?}",
+                        expr.id,
+                        def);
+                }
+            }
+        }
+
+        hir::ExprType(ref expr, _) => {
+            expr_kind(tcx, expr)
+        }
+
+        hir::ExprUnary(hir::UnDeref, _) |
+        hir::ExprField(..) |
+        hir::ExprTupField(..) |
+        hir::ExprIndex(..) => {
+            ExprKind::Lvalue
+        }
+
+        hir::ExprCall(..) |
+        hir::ExprMethodCall(..) |
+        hir::ExprStruct(..) |
+        hir::ExprTup(..) |
+        hir::ExprIf(..) |
+        hir::ExprMatch(..) |
+        hir::ExprClosure(..) |
+        hir::ExprBlock(..) |
+        hir::ExprRepeat(..) |
+        hir::ExprVec(..) => {
+            ExprKind::RvalueDps
+        }
+
+        hir::ExprLit(ref lit) if lit.node.is_str() => {
+            ExprKind::RvalueDps
+        }
+
+        hir::ExprBreak(..) |
+        hir::ExprAgain(..) |
+        hir::ExprRet(..) |
+        hir::ExprWhile(..) |
+        hir::ExprLoop(..) |
+        hir::ExprAssign(..) |
+        hir::ExprInlineAsm(..) |
+        hir::ExprAssignOp(..) => {
+            ExprKind::RvalueStmt
+        }
+
+        hir::ExprLit(_) | // Note: LitStr is carved out above
+        hir::ExprUnary(..) |
+        hir::ExprBox(_) |
+        hir::ExprAddrOf(..) |
+        hir::ExprBinary(..) |
+        hir::ExprCast(..) => {
+            ExprKind::RvalueDatum
+        }
+    }
+}
diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs
new file mode 100644 (file)
index 0000000..5676024
--- /dev/null
@@ -0,0 +1,604 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//!
+//
+// Code relating to drop glue.
+
+use std;
+
+use back::symbol_names;
+use llvm;
+use llvm::{ValueRef, get_param};
+use middle::lang_items::ExchangeFreeFnLangItem;
+use rustc::ty::subst::{Substs};
+use rustc::traits;
+use rustc::ty::{self, Ty, TyCtxt};
+use abi::{Abi, FnType};
+use adt;
+use adt::GetDtorType; // for tcx.dtor_type()
+use base::*;
+use build::*;
+use callee::{Callee, ArgVals};
+use cleanup;
+use cleanup::CleanupMethods;
+use collector::{self, TransItem};
+use common::*;
+use debuginfo::DebugLoc;
+use declare;
+use expr;
+use machine::*;
+use monomorphize;
+use type_of::{type_of, sizing_type_of, align_of};
+use type_::Type;
+use value::Value;
+
+use arena::TypedArena;
+use syntax::codemap::DUMMY_SP;
+
+pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                           v: ValueRef,
+                                           size: ValueRef,
+                                           align: ValueRef,
+                                           debug_loc: DebugLoc)
+                                           -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("trans_exchange_free");
+
+    let def_id = langcall(bcx, None, "", ExchangeFreeFnLangItem);
+    let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align];
+    Callee::def(bcx.ccx(), def_id, bcx.tcx().mk_substs(Substs::empty()))
+        .call(bcx, debug_loc, ArgVals(&args), None).bcx
+}
+
+pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+                                       v: ValueRef,
+                                       size: u64,
+                                       align: u32,
+                                       debug_loc: DebugLoc)
+                                       -> Block<'blk, 'tcx> {
+    trans_exchange_free_dyn(cx,
+                            v,
+                            C_uint(cx.ccx(), size),
+                            C_uint(cx.ccx(), align),
+                            debug_loc)
+}
+
+pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                          ptr: ValueRef,
+                                          content_ty: Ty<'tcx>,
+                                          debug_loc: DebugLoc)
+                                          -> Block<'blk, 'tcx> {
+    assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
+    let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
+    let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
+
+    // `Box<ZeroSizeType>` does not allocate.
+    if content_size != 0 {
+        let content_align = align_of(bcx.ccx(), content_ty);
+        trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
+    } else {
+        bcx
+    }
+}
+
+pub fn type_needs_drop<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+    tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment())
+}
+
+pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                    t: Ty<'tcx>) -> Ty<'tcx> {
+    let tcx = ccx.tcx();
+    // Even if there is no dtor for t, there might be one deeper down and we
+    // might need to pass in the vtable ptr.
+    if !type_is_sized(tcx, t) {
+        return t
+    }
+
+    // FIXME (#22815): note that type_needs_drop conservatively
+    // approximates in some cases and may say a type expression
+    // requires drop glue when it actually does not.
+    //
+    // (In this case it is not clear whether any harm is done, i.e.
+    // erroneously returning `t` in some cases where we could have
+    // returned `tcx.types.i8` does not appear unsound. The impact on
+    // code quality is unknown at this time.)
+
+    if !type_needs_drop(&tcx, t) {
+        return tcx.types.i8;
+    }
+    match t.sty {
+        ty::TyBox(typ) if !type_needs_drop(&tcx, typ)
+                         && type_is_sized(tcx, typ) => {
+            let llty = sizing_type_of(ccx, typ);
+            // `Box<ZeroSizeType>` does not allocate.
+            if llsize_of_alloc(ccx, llty) == 0 {
+                tcx.types.i8
+            } else {
+                t
+            }
+        }
+        _ => t
+    }
+}
+
+pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                           v: ValueRef,
+                           t: Ty<'tcx>,
+                           debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
+    drop_ty_core(bcx, v, t, debug_loc, false, None)
+}
+
+pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                v: ValueRef,
+                                t: Ty<'tcx>,
+                                debug_loc: DebugLoc,
+                                skip_dtor: bool,
+                                drop_hint: Option<cleanup::DropHintValue>)
+                                -> Block<'blk, 'tcx> {
+    // NB: v is an *alias* of type t here, not a direct value.
+    debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
+    let _icx = push_ctxt("drop_ty");
+    let mut bcx = bcx;
+    if bcx.fcx.type_needs_drop(t) {
+        let ccx = bcx.ccx();
+        let g = if skip_dtor {
+            DropGlueKind::TyContents(t)
+        } else {
+            DropGlueKind::Ty(t)
+        };
+        let glue = get_drop_glue_core(ccx, g);
+        let glue_type = get_drop_glue_type(ccx, t);
+        let ptr = if glue_type != t {
+            PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
+        } else {
+            v
+        };
+
+        match drop_hint {
+            Some(drop_hint) => {
+                let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
+                let moved_val =
+                    C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
+                let may_need_drop =
+                    ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
+                bcx = with_cond(bcx, may_need_drop, |cx| {
+                    Call(cx, glue, &[ptr], debug_loc);
+                    cx
+                })
+            }
+            None => {
+                // No drop-hint ==> call standard drop glue
+                Call(bcx, glue, &[ptr], debug_loc);
+            }
+        }
+    }
+    bcx
+}
+
+pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                     v: ValueRef,
+                                     t: Ty<'tcx>,
+                                     debug_loc: DebugLoc,
+                                     skip_dtor: bool)
+                                     -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("drop_ty_immediate");
+    let vp = alloc_ty(bcx, t, "");
+    call_lifetime_start(bcx, vp);
+    store_ty(bcx, v, vp, t);
+    let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None);
+    call_lifetime_end(bcx, vp);
+    bcx
+}
+
+pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
+    get_drop_glue_core(ccx, DropGlueKind::Ty(t))
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub enum DropGlueKind<'tcx> {
+    /// The normal path; runs the dtor, and then recurs on the contents
+    Ty(Ty<'tcx>),
+    /// Skips the dtor, if any, for ty; drops the contents directly.
+    /// Note that the dtor is only skipped at the most *shallow*
+    /// level, namely, an `impl Drop for Ty` itself. So, for example,
+    /// if Ty is Newtype(S) then only the Drop impl for Newtype itself
+    /// will be skipped, while the Drop impl for S, if any, will be
+    /// invoked.
+    TyContents(Ty<'tcx>),
+}
+
+impl<'tcx> DropGlueKind<'tcx> {
+    fn ty(&self) -> Ty<'tcx> {
+        match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
+    }
+
+    fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
+    {
+        match *self {
+            DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
+            DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
+        }
+    }
+}
+
+fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                g: DropGlueKind<'tcx>) -> ValueRef {
+    debug!("make drop glue for {:?}", g);
+    let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
+    debug!("drop glue type {:?}", g);
+    match ccx.drop_glues().borrow().get(&g) {
+        Some(&glue) => return glue,
+        _ => { }
+    }
+    let t = g.ty();
+
+    let tcx = ccx.tcx();
+    let sig = ty::FnSig {
+        inputs: vec![tcx.mk_mut_ptr(tcx.types.i8)],
+        output: ty::FnOutput::FnConverging(tcx.mk_nil()),
+        variadic: false,
+    };
+    // Create a FnType for fn(*mut i8) and substitute the real type in
+    // later - that prevents FnType from splitting fat pointers up.
+    let mut fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
+    fn_ty.args[0].original_ty = type_of(ccx, t).ptr_to();
+    let llfnty = fn_ty.llvm_type(ccx);
+
+    // To avoid infinite recursion, don't `make_drop_glue` until after we've
+    // added the entry to the `drop_glues` cache.
+    if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
+        let llfn = declare::declare_cfn(ccx, &old_sym, llfnty);
+        ccx.drop_glues().borrow_mut().insert(g, llfn);
+        return llfn;
+    };
+
+    let suffix = match g {
+        DropGlueKind::Ty(_) => "drop",
+        DropGlueKind::TyContents(_) => "drop_contents",
+    };
+
+    let fn_nm = symbol_names::internal_name_from_type_and_suffix(ccx, t, suffix);
+    assert!(declare::get_defined_value(ccx, &fn_nm).is_none());
+    let llfn = declare::declare_cfn(ccx, &fn_nm, llfnty);
+    ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
+    ccx.drop_glues().borrow_mut().insert(g, llfn);
+
+    let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
+
+    let empty_substs = ccx.tcx().mk_substs(Substs::empty());
+    let (arena, fcx): (TypedArena<_>, FunctionContext);
+    arena = TypedArena::new();
+    fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &arena);
+
+    let bcx = fcx.init(false, None);
+
+    update_linkage(ccx, llfn, None, OriginalTranslation);
+
+    ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
+    // All glue functions take values passed *by alias*; this is a
+    // requirement since in many contexts glue is invoked indirectly and
+    // the caller has no idea if it's dealing with something that can be
+    // passed by value.
+    //
+    // llfn is expected be declared to take a parameter of the appropriate
+    // type, so we don't need to explicitly cast the function parameter.
+
+    let bcx = make_drop_glue(bcx, get_param(llfn, 0), g);
+    fcx.finish(bcx, DebugLoc::None);
+
+    llfn
+}
+
+fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+                                      t: Ty<'tcx>,
+                                      struct_data: ValueRef)
+                                      -> Block<'blk, 'tcx> {
+    assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
+
+    let repr = adt::represent_type(bcx.ccx(), t);
+    let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &repr, struct_data));
+    let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
+    let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
+    let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
+
+    let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
+        bcx
+    } else {
+        let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
+        let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
+        let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
+        let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
+        let drop_flag_neither_initialized_nor_cleared =
+            And(bcx, not_init, not_done, DebugLoc::None);
+        with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
+            let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
+            Call(cx, llfn, &[], DebugLoc::None);
+            cx
+        })
+    };
+
+    let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
+    with_cond(bcx, drop_flag_dtor_needed, |cx| {
+        trans_struct_drop(cx, t, struct_data)
+    })
+}
+fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                 t: Ty<'tcx>,
+                                 v0: ValueRef)
+                                 -> Block<'blk, 'tcx>
+{
+    debug!("trans_struct_drop t: {}", t);
+    let tcx = bcx.tcx();
+    let mut bcx = bcx;
+
+    let def = t.ty_adt_def().unwrap();
+
+    // Be sure to put the contents into a scope so we can use an invoke
+    // instruction to call the user destructor but still call the field
+    // destructors if the user destructor panics.
+    //
+    // FIXME (#14875) panic-in-drop semantics might be unsupported; we
+    // might well consider changing below to more direct code.
+    let contents_scope = bcx.fcx.push_custom_cleanup_scope();
+
+    // Issue #23611: schedule cleanup of contents, re-inspecting the
+    // discriminant (if any) in case of variant swap in drop code.
+    bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
+
+    let (sized_args, unsized_args);
+    let args: &[ValueRef] = if type_is_sized(tcx, t) {
+        sized_args = [v0];
+        &sized_args
+    } else {
+        unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))];
+        &unsized_args
+    };
+
+    let trait_ref = ty::Binder(ty::TraitRef {
+        def_id: tcx.lang_items.drop_trait().unwrap(),
+        substs: tcx.mk_substs(Substs::empty().with_self_ty(t))
+    });
+    let vtbl = match fulfill_obligation(bcx.ccx(), DUMMY_SP, trait_ref) {
+        traits::VtableImpl(data) => data,
+        _ => bug!("dtor for {:?} is not an impl???", t)
+    };
+    let dtor_did = def.destructor().unwrap();
+    bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
+        .call(bcx, DebugLoc::None, ArgVals(args), None).bcx;
+
+    bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
+}
+
+pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
+                                         t: Ty<'tcx>, info: ValueRef)
+                                         -> (ValueRef, ValueRef) {
+    debug!("calculate size of DST: {}; with lost info: {:?}",
+           t, Value(info));
+    if type_is_sized(bcx.tcx(), t) {
+        let sizing_type = sizing_type_of(bcx.ccx(), t);
+        let size = llsize_of_alloc(bcx.ccx(), sizing_type);
+        let align = align_of(bcx.ccx(), t);
+        debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}",
+               t, Value(info), size, align);
+        let size = C_uint(bcx.ccx(), size);
+        let align = C_uint(bcx.ccx(), align);
+        return (size, align);
+    }
+    if bcx.is_unreachable() {
+        let llty = Type::int(bcx.ccx());
+        return (C_undef(llty), C_undef(llty));
+    }
+    match t.sty {
+        ty::TyStruct(def, substs) => {
+            let ccx = bcx.ccx();
+            // First get the size of all statically known fields.
+            // Don't use type_of::sizing_type_of because that expects t to be sized.
+            assert!(!t.is_simd());
+            let repr = adt::represent_type(ccx, t);
+            let sizing_type = adt::sizing_type_context_of(ccx, &repr, true);
+            debug!("DST {} sizing_type: {:?}", t, sizing_type);
+            let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
+            let sized_align = llalign_of_min(ccx, sizing_type.prefix());
+            debug!("DST {} statically sized prefix size: {} align: {}",
+                   t, sized_size, sized_align);
+            let sized_size = C_uint(ccx, sized_size);
+            let sized_align = C_uint(ccx, sized_align);
+
+            // Recurse to get the size of the dynamically sized field (must be
+            // the last field).
+            let last_field = def.struct_variant().fields.last().unwrap();
+            let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
+            let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
+
+            // FIXME (#26403, #27023): We should be adding padding
+            // to `sized_size` (to accommodate the `unsized_align`
+            // required of the unsized field that follows) before
+            // summing it with `sized_size`. (Note that since #26403
+            // is unfixed, we do not yet add the necessary padding
+            // here. But this is where the add would go.)
+
+            // Return the sum of sizes and max of aligns.
+            let mut size = bcx.add(sized_size, unsized_size);
+
+            // Issue #27023: If there is a drop flag, *now* we add 1
+            // to the size.  (We can do this without adding any
+            // padding because drop flags do not have any alignment
+            // constraints.)
+            if sizing_type.needs_drop_flag() {
+                size = bcx.add(size, C_uint(bcx.ccx(), 1_u64));
+            }
+
+            // Choose max of two known alignments (combined value must
+            // be aligned according to more restrictive of the two).
+            let align = match (const_to_opt_uint(sized_align), const_to_opt_uint(unsized_align)) {
+                (Some(sized_align), Some(unsized_align)) => {
+                    // If both alignments are constant, (the sized_align should always be), then
+                    // pick the correct alignment statically.
+                    C_uint(ccx, std::cmp::max(sized_align, unsized_align))
+                }
+                _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align),
+                                sized_align,
+                                unsized_align)
+            };
+
+            // Issue #27023: must add any necessary padding to `size`
+            // (to make it a multiple of `align`) before returning it.
+            //
+            // Namely, the returned size should be, in C notation:
+            //
+            //   `size + ((size & (align-1)) ? align : 0)`
+            //
+            // emulated via the semi-standard fast bit trick:
+            //
+            //   `(size + (align-1)) & -align`
+
+            let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64));
+            let size = bcx.and(bcx.add(size, addend), bcx.neg(align));
+
+            (size, align)
+        }
+        ty::TyTrait(..) => {
+            // info points to the vtable and the second entry in the vtable is the
+            // dynamic size of the object.
+            let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to());
+            let size_ptr = bcx.gepi(info, &[1]);
+            let align_ptr = bcx.gepi(info, &[2]);
+            (bcx.load(size_ptr), bcx.load(align_ptr))
+        }
+        ty::TySlice(_) | ty::TyStr => {
+            let unit_ty = t.sequence_element_type(bcx.tcx());
+            // The info in this case is the length of the str, so the size is that
+            // times the unit size.
+            let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
+            let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
+            let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
+            (bcx.mul(info, C_uint(bcx.ccx(), unit_size)),
+             C_uint(bcx.ccx(), unit_align))
+        }
+        _ => bug!("Unexpected unsized type, found {}", t)
+    }
+}
+
+fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
+                              -> Block<'blk, 'tcx> {
+    let t = g.ty();
+
+    if collector::collecting_debug_information(bcx.ccx()) {
+        bcx.ccx()
+           .record_translation_item_as_generated(TransItem::DropGlue(bcx.tcx()
+                                                                        .erase_regions(&t)));
+    }
+
+    let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
+    // NB: v0 is an *alias* of type t here, not a direct value.
+    let _icx = push_ctxt("make_drop_glue");
+
+    // Only drop the value when it ... well, we used to check for
+    // non-null, (and maybe we need to continue doing so), but we now
+    // must definitely check for special bit-patterns corresponding to
+    // the special dtor markings.
+
+    let inttype = Type::int(bcx.ccx());
+    let dropped_pattern = C_integral(inttype, adt::DTOR_DONE_U64, false);
+
+    match t.sty {
+        ty::TyBox(content_ty) => {
+            // Support for TyBox is built-in and its drop glue is
+            // special. It may move to library and have Drop impl. As
+            // a safe-guard, assert TyBox not used with TyContents.
+            assert!(!skip_dtor);
+            if !type_is_sized(bcx.tcx(), content_ty) {
+                let llval = expr::get_dataptr(bcx, v0);
+                let llbox = Load(bcx, llval);
+                let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
+                let drop_flag_not_dropped_already =
+                    ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
+                with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
+                    let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
+                    let info = expr::get_meta(bcx, v0);
+                    let info = Load(bcx, info);
+                    let (llsize, llalign) =
+                        size_and_align_of_dst(&bcx.build(), content_ty, info);
+
+                    // `Box<ZeroSizeType>` does not allocate.
+                    let needs_free = ICmp(bcx,
+                                          llvm::IntNE,
+                                          llsize,
+                                          C_uint(bcx.ccx(), 0u64),
+                                          DebugLoc::None);
+                    with_cond(bcx, needs_free, |bcx| {
+                        trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
+                    })
+                })
+            } else {
+                let llval = v0;
+                let llbox = Load(bcx, llval);
+                let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
+                let drop_flag_not_dropped_already =
+                    ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
+                with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
+                    let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
+                    trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
+                })
+            }
+        }
+        ty::TyStruct(def, _) | ty::TyEnum(def, _) => {
+            match (def.dtor_kind(), skip_dtor) {
+                (ty::TraitDtor(true), false) => {
+                    // FIXME(16758) Since the struct is unsized, it is hard to
+                    // find the drop flag (which is at the end of the struct).
+                    // Lets just ignore the flag and pretend everything will be
+                    // OK.
+                    if type_is_sized(bcx.tcx(), t) {
+                        trans_struct_drop_flag(bcx, t, v0)
+                    } else {
+                        // Give the user a heads up that we are doing something
+                        // stupid and dangerous.
+                        bcx.sess().warn(&format!("Ignoring drop flag in destructor for {} \
+                                                 because the struct is unsized. See issue \
+                                                 #16758", t));
+                        trans_struct_drop(bcx, t, v0)
+                    }
+                }
+                (ty::TraitDtor(false), false) => {
+                    trans_struct_drop(bcx, t, v0)
+                }
+                (ty::NoDtor, _) | (_, true) => {
+                    // No dtor? Just the default case
+                    iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
+                }
+            }
+        }
+        ty::TyTrait(..) => {
+            // No support in vtable for distinguishing destroying with
+            // versus without calling Drop::drop. Assert caller is
+            // okay with always calling the Drop impl, if any.
+            assert!(!skip_dtor);
+            let data_ptr = expr::get_dataptr(bcx, v0);
+            let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0));
+            let dtor = Load(bcx, vtable_ptr);
+            Call(bcx,
+                 dtor,
+                 &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
+                 DebugLoc::None);
+            bcx
+        }
+        _ => {
+            if bcx.fcx.type_needs_drop(t) {
+                iter_structural_ty(bcx,
+                                   v0,
+                                   t,
+                                   |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
+            } else {
+                bcx
+            }
+        }
+    }
+}
diff --git a/src/librustc_trans/inline.rs b/src/librustc_trans/inline.rs
new file mode 100644 (file)
index 0000000..1eff09d
--- /dev/null
@@ -0,0 +1,186 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::{AvailableExternallyLinkage, InternalLinkage, SetLinkage};
+use middle::cstore::{CrateStore, FoundAst, InlinedItem};
+use rustc::hir::def_id::DefId;
+use rustc::ty::subst::Substs;
+use base::{push_ctxt, trans_item, trans_fn};
+use callee::Callee;
+use common::*;
+
+use rustc::dep_graph::DepNode;
+use rustc::hir;
+
+fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
+    debug!("instantiate_inline({:?})", fn_id);
+    let _icx = push_ctxt("instantiate_inline");
+    let tcx = ccx.tcx();
+    let _task = tcx.dep_graph.in_task(DepNode::TransInlinedItem(fn_id));
+
+    match ccx.external().borrow().get(&fn_id) {
+        Some(&Some(node_id)) => {
+            // Already inline
+            debug!("instantiate_inline({}): already inline as node id {}",
+                   tcx.item_path_str(fn_id), node_id);
+            let node_def_id = tcx.map.local_def_id(node_id);
+            return Some(node_def_id);
+        }
+        Some(&None) => {
+            return None; // Not inlinable
+        }
+        None => {
+            // Not seen yet
+        }
+    }
+
+    let inlined = tcx.sess.cstore.maybe_get_item_ast(tcx, fn_id);
+    let inline_id = match inlined {
+        FoundAst::NotFound => {
+            ccx.external().borrow_mut().insert(fn_id, None);
+            return None;
+        }
+        FoundAst::Found(&InlinedItem::Item(ref item)) => {
+            ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+            ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
+
+            ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
+            trans_item(ccx, item);
+
+            if let hir::ItemFn(_, _, _, _, ref generics, _) = item.node {
+                // Generics have no symbol, so they can't be given any linkage.
+                if !generics.is_type_parameterized() {
+                    let linkage = if ccx.sess().opts.cg.codegen_units == 1 {
+                        // We could use AvailableExternallyLinkage here,
+                        // but InternalLinkage allows LLVM to optimize more
+                        // aggressively (at the cost of sometimes
+                        // duplicating code).
+                        InternalLinkage
+                    } else {
+                        // With multiple compilation units, duplicated code
+                        // is more of a problem.  Also, `codegen_units > 1`
+                        // means the user is okay with losing some
+                        // performance.
+                        AvailableExternallyLinkage
+                    };
+                    let empty_substs = tcx.mk_substs(Substs::empty());
+                    let def_id = tcx.map.local_def_id(item.id);
+                    let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val;
+                    SetLinkage(llfn, linkage);
+                }
+            }
+
+            item.id
+        }
+        FoundAst::Found(&InlinedItem::Foreign(ref item)) => {
+            ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+            ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
+            item.id
+        }
+        FoundAst::FoundParent(parent_id, item) => {
+            ccx.external().borrow_mut().insert(parent_id, Some(item.id));
+            ccx.external_srcs().borrow_mut().insert(item.id, parent_id);
+
+            let mut my_id = 0;
+            match item.node {
+                hir::ItemEnum(ref ast_def, _) => {
+                    let ast_vs = &ast_def.variants;
+                    let ty_vs = &tcx.lookup_adt_def(parent_id).variants;
+                    assert_eq!(ast_vs.len(), ty_vs.len());
+                    for (ast_v, ty_v) in ast_vs.iter().zip(ty_vs.iter()) {
+                        if ty_v.did == fn_id { my_id = ast_v.node.data.id(); }
+                        ccx.external().borrow_mut().insert(ty_v.did, Some(ast_v.node.data.id()));
+                        ccx.external_srcs().borrow_mut().insert(ast_v.node.data.id(), ty_v.did);
+                    }
+                }
+                hir::ItemStruct(ref struct_def, _) => {
+                    if struct_def.is_struct() {
+                        bug!("instantiate_inline: called on a \
+                              non-tuple struct")
+                    } else {
+                        ccx.external().borrow_mut().insert(fn_id, Some(struct_def.id()));
+                        ccx.external_srcs().borrow_mut().insert(struct_def.id(), fn_id);
+                        my_id = struct_def.id();
+                    }
+                }
+                _ => bug!("instantiate_inline: item has a \
+                           non-enum, non-struct parent")
+            }
+            my_id
+        }
+        FoundAst::Found(&InlinedItem::TraitItem(_, ref trait_item)) => {
+            ccx.external().borrow_mut().insert(fn_id, Some(trait_item.id));
+            ccx.external_srcs().borrow_mut().insert(trait_item.id, fn_id);
+
+            ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
+
+            // Associated consts already have to be evaluated in `typeck`, so
+            // the logic to do that already exists in `middle`. In order to
+            // reuse that code, it needs to be able to look up the traits for
+            // inlined items.
+            let ty_trait_item = tcx.impl_or_trait_item(fn_id).clone();
+            let trait_item_def_id = tcx.map.local_def_id(trait_item.id);
+            tcx.impl_or_trait_items.borrow_mut()
+               .insert(trait_item_def_id, ty_trait_item);
+
+            // If this is a default method, we can't look up the
+            // impl type. But we aren't going to translate anyways, so
+            // don't.
+            trait_item.id
+        }
+        FoundAst::Found(&InlinedItem::ImplItem(impl_did, ref impl_item)) => {
+            ccx.external().borrow_mut().insert(fn_id, Some(impl_item.id));
+            ccx.external_srcs().borrow_mut().insert(impl_item.id, fn_id);
+
+            ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
+
+            // Translate monomorphic impl methods immediately.
+            if let hir::ImplItemKind::Method(ref sig, ref body) = impl_item.node {
+                let impl_tpt = tcx.lookup_item_type(impl_did);
+                if impl_tpt.generics.types.is_empty() &&
+                        sig.generics.ty_params.is_empty() {
+                    let def_id = tcx.map.local_def_id(impl_item.id);
+                    let empty_substs = ccx.empty_substs_for_def_id(def_id);
+                    let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val;
+                    trans_fn(ccx,
+                             &sig.decl,
+                             body,
+                             llfn,
+                             empty_substs,
+                             impl_item.id);
+                    // See linkage comments on items.
+                    if ccx.sess().opts.cg.codegen_units == 1 {
+                        SetLinkage(llfn, InternalLinkage);
+                    } else {
+                        SetLinkage(llfn, AvailableExternallyLinkage);
+                    }
+                }
+            }
+
+            impl_item.id
+        }
+    };
+
+    let inline_def_id = tcx.map.local_def_id(inline_id);
+    Some(inline_def_id)
+}
+
+pub fn get_local_instance(ccx: &CrateContext, fn_id: DefId)
+    -> Option<DefId> {
+    if let Some(_) = ccx.tcx().map.as_local_node_id(fn_id) {
+        Some(fn_id)
+    } else {
+        instantiate_inline(ccx, fn_id)
+    }
+}
+
+pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> DefId {
+    get_local_instance(ccx, fn_id).unwrap_or(fn_id)
+}
diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs
new file mode 100644 (file)
index 0000000..0f9b04c
--- /dev/null
@@ -0,0 +1,1690 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_upper_case_globals)]
+
+use arena::TypedArena;
+use intrinsics::{self, Intrinsic};
+use libc;
+use llvm;
+use llvm::{ValueRef, TypeKind};
+use rustc::infer;
+use rustc::ty::subst;
+use rustc::ty::subst::FnSpace;
+use abi::{Abi, FnType};
+use adt;
+use attributes;
+use base::*;
+use build::*;
+use callee::{self, Callee};
+use cleanup;
+use cleanup::CleanupMethods;
+use common::*;
+use consts;
+use datum::*;
+use debuginfo::DebugLoc;
+use declare;
+use expr;
+use glue;
+use type_of;
+use machine;
+use type_::Type;
+use rustc::ty::{self, Ty};
+use Disr;
+use rustc::ty::subst::Substs;
+use rustc::hir;
+use syntax::ast;
+use syntax::ptr::P;
+use syntax::parse::token;
+
+use rustc::session::Session;
+use syntax::codemap::{Span, DUMMY_SP};
+
+use std::cmp::Ordering;
+
+fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
+    let llvm_name = match name {
+        "sqrtf32" => "llvm.sqrt.f32",
+        "sqrtf64" => "llvm.sqrt.f64",
+        "powif32" => "llvm.powi.f32",
+        "powif64" => "llvm.powi.f64",
+        "sinf32" => "llvm.sin.f32",
+        "sinf64" => "llvm.sin.f64",
+        "cosf32" => "llvm.cos.f32",
+        "cosf64" => "llvm.cos.f64",
+        "powf32" => "llvm.pow.f32",
+        "powf64" => "llvm.pow.f64",
+        "expf32" => "llvm.exp.f32",
+        "expf64" => "llvm.exp.f64",
+        "exp2f32" => "llvm.exp2.f32",
+        "exp2f64" => "llvm.exp2.f64",
+        "logf32" => "llvm.log.f32",
+        "logf64" => "llvm.log.f64",
+        "log10f32" => "llvm.log10.f32",
+        "log10f64" => "llvm.log10.f64",
+        "log2f32" => "llvm.log2.f32",
+        "log2f64" => "llvm.log2.f64",
+        "fmaf32" => "llvm.fma.f32",
+        "fmaf64" => "llvm.fma.f64",
+        "fabsf32" => "llvm.fabs.f32",
+        "fabsf64" => "llvm.fabs.f64",
+        "copysignf32" => "llvm.copysign.f32",
+        "copysignf64" => "llvm.copysign.f64",
+        "floorf32" => "llvm.floor.f32",
+        "floorf64" => "llvm.floor.f64",
+        "ceilf32" => "llvm.ceil.f32",
+        "ceilf64" => "llvm.ceil.f64",
+        "truncf32" => "llvm.trunc.f32",
+        "truncf64" => "llvm.trunc.f64",
+        "rintf32" => "llvm.rint.f32",
+        "rintf64" => "llvm.rint.f64",
+        "nearbyintf32" => "llvm.nearbyint.f32",
+        "nearbyintf64" => "llvm.nearbyint.f64",
+        "roundf32" => "llvm.round.f32",
+        "roundf64" => "llvm.round.f64",
+        "assume" => "llvm.assume",
+        _ => return None
+    };
+    Some(ccx.get_intrinsic(&llvm_name))
+}
+
+/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
+/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
+/// add them to librustc_trans/trans/context.rs
+pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+                                            callee_ty: Ty<'tcx>,
+                                            fn_ty: &FnType,
+                                            args: callee::CallArgs<'a, 'tcx>,
+                                            dest: expr::Dest,
+                                            call_debug_location: DebugLoc)
+                                            -> Result<'blk, 'tcx> {
+    let fcx = bcx.fcx;
+    let ccx = fcx.ccx;
+    let tcx = bcx.tcx();
+
+    let _icx = push_ctxt("trans_intrinsic_call");
+
+    let (def_id, substs, sig) = match callee_ty.sty {
+        ty::TyFnDef(def_id, substs, fty) => {
+            let sig = tcx.erase_late_bound_regions(&fty.sig);
+            (def_id, substs, infer::normalize_associated_type(tcx, &sig))
+        }
+        _ => bug!("expected fn item type, found {}", callee_ty)
+    };
+    let arg_tys = sig.inputs;
+    let ret_ty = sig.output;
+    let name = tcx.item_name(def_id).as_str();
+
+    let span = match call_debug_location {
+        DebugLoc::At(_, span) => span,
+        DebugLoc::None => fcx.span.unwrap_or(DUMMY_SP)
+    };
+
+    let cleanup_scope = fcx.push_custom_cleanup_scope();
+
+    // For `transmute` we can just trans the input expr directly into dest
+    if name == "transmute" {
+        let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
+        match args {
+            callee::ArgExprs(arg_exprs) => {
+                assert_eq!(arg_exprs.len(), 1);
+
+                let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
+                                           *substs.types.get(FnSpace, 1));
+                let llintype = type_of::type_of(ccx, in_type);
+                let llouttype = type_of::type_of(ccx, out_type);
+
+                let in_type_size = machine::llbitsize_of_real(ccx, llintype);
+                let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
+
+                if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
+                    if out_type_size != 0 {
+                        // FIXME #19925 Remove this hack after a release cycle.
+                        let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
+                        let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
+                        let llfnty = val_ty(llfn);
+                        let llresult = match dest {
+                            expr::SaveIn(d) => d,
+                            expr::Ignore => alloc_ty(bcx, out_type, "ret")
+                        };
+                        Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
+                        if dest == expr::Ignore {
+                            bcx = glue::drop_ty(bcx, llresult, out_type,
+                                                call_debug_location);
+                        }
+                        fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
+                        fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
+                        return Result::new(bcx, llresult);
+                    }
+                }
+
+                // This should be caught by the intrinsicck pass
+                assert_eq!(in_type_size, out_type_size);
+
+                let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
+                    use llvm::TypeKind::*;
+                    match llkind {
+                        Half | Float | Double | X86_FP80 | FP128 |
+                            PPC_FP128 | Integer | Vector | X86_MMX => true,
+                        _ => false
+                    }
+                };
+
+                // An approximation to which types can be directly cast via
+                // LLVM's bitcast.  This doesn't cover pointer -> pointer casts,
+                // but does, importantly, cover SIMD types.
+                let in_kind = llintype.kind();
+                let ret_kind = llret_ty.kind();
+                let bitcast_compatible =
+                    (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
+                        in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
+                    };
+
+                let dest = if bitcast_compatible {
+                    // if we're here, the type is scalar-like (a primitive, a
+                    // SIMD type or a pointer), and so can be handled as a
+                    // by-value ValueRef and can also be directly bitcast to the
+                    // target type.  Doing this special case makes conversions
+                    // like `u32x4` -> `u64x2` much nicer for LLVM and so more
+                    // efficient (these are done efficiently implicitly in C
+                    // with the `__m128i` type and so this means Rust doesn't
+                    // lose out there).
+                    let expr = &arg_exprs[0];
+                    let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
+                    let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
+                    let val = if datum.kind.is_by_ref() {
+                        load_ty(bcx, datum.val, datum.ty)
+                    } else {
+                        from_immediate(bcx, datum.val)
+                    };
+
+                    let cast_val = BitCast(bcx, val, llret_ty);
+
+                    match dest {
+                        expr::SaveIn(d) => {
+                            // this often occurs in a sequence like `Store(val,
+                            // d); val2 = Load(d)`, so disappears easily.
+                            Store(bcx, cast_val, d);
+                        }
+                        expr::Ignore => {}
+                    }
+                    dest
+                } else {
+                    // The types are too complicated to do with a by-value
+                    // bitcast, so pointer cast instead. We need to cast the
+                    // dest so the types work out.
+                    let dest = match dest {
+                        expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
+                        expr::Ignore => expr::Ignore
+                    };
+                    bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
+                    dest
+                };
+
+                fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
+                fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
+
+                return match dest {
+                    expr::SaveIn(d) => Result::new(bcx, d),
+                    expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
+                };
+
+            }
+
+            _ => {
+                bug!("expected expr as argument for transmute");
+            }
+        }
+    }
+
+    // For `move_val_init` we can evaluate the destination address
+    // (the first argument) and then trans the source value (the
+    // second argument) directly into the resulting destination
+    // address.
+    if name == "move_val_init" {
+        if let callee::ArgExprs(ref exprs) = args {
+            let (dest_expr, source_expr) = if exprs.len() != 2 {
+                bug!("expected two exprs as arguments for `move_val_init` intrinsic");
+            } else {
+                (&exprs[0], &exprs[1])
+            };
+
+            // evaluate destination address
+            let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
+            let dest_datum = unpack_datum!(
+                bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
+            let dest_datum = unpack_datum!(
+                bcx, dest_datum.to_appropriate_datum(bcx));
+
+            // `expr::trans_into(bcx, expr, dest)` is equiv to
+            //
+            //    `trans(bcx, expr).store_to_dest(dest)`,
+            //
+            // which for `dest == expr::SaveIn(addr)`, is equivalent to:
+            //
+            //    `trans(bcx, expr).store_to(bcx, addr)`.
+            let lldest = expr::Dest::SaveIn(dest_datum.val);
+            bcx = expr::trans_into(bcx, source_expr, lldest);
+
+            let llresult = C_nil(ccx);
+            fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
+
+            return Result::new(bcx, llresult);
+        } else {
+            bug!("expected two exprs as arguments for `move_val_init` intrinsic");
+        }
+    }
+
+    // save the actual AST arguments for later (some places need to do
+    // const-evaluation on them)
+    let expr_arguments = match args {
+        callee::ArgExprs(args) => Some(args),
+        _ => None,
+    };
+
+    // Push the arguments.
+    let mut llargs = Vec::new();
+    bcx = callee::trans_args(bcx,
+                             Abi::RustIntrinsic,
+                             fn_ty,
+                             &mut callee::Intrinsic,
+                             args,
+                             &mut llargs,
+                             cleanup::CustomScope(cleanup_scope));
+
+    fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
+
+    // These are the only intrinsic functions that diverge.
+    if name == "abort" {
+        let llfn = ccx.get_intrinsic(&("llvm.trap"));
+        Call(bcx, llfn, &[], call_debug_location);
+        fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
+        Unreachable(bcx);
+        return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
+    } else if &name[..] == "unreachable" {
+        fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
+        Unreachable(bcx);
+        return Result::new(bcx, C_nil(ccx));
+    }
+
+    let ret_ty = match ret_ty {
+        ty::FnConverging(ret_ty) => ret_ty,
+        ty::FnDiverging => bug!()
+    };
+
+    let llret_ty = type_of::type_of(ccx, ret_ty);
+
+    // Get location to store the result. If the user does
+    // not care about the result, just make a stack slot
+    let llresult = match dest {
+        expr::SaveIn(d) => d,
+        expr::Ignore => {
+            if !type_is_zero_size(ccx, ret_ty) {
+                let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
+                call_lifetime_start(bcx, llresult);
+                llresult
+            } else {
+                C_undef(llret_ty.ptr_to())
+            }
+        }
+    };
+
+    let simple = get_simple_intrinsic(ccx, &name);
+    let llval = match (simple, &name[..]) {
+        (Some(llfn), _) => {
+            Call(bcx, llfn, &llargs, call_debug_location)
+        }
+        (_, "try") => {
+            bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult,
+                                call_debug_location);
+            C_nil(ccx)
+        }
+        (_, "breakpoint") => {
+            let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
+            Call(bcx, llfn, &[], call_debug_location)
+        }
+        (_, "size_of") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            let lltp_ty = type_of::type_of(ccx, tp_ty);
+            C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
+        }
+        (_, "size_of_val") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            if !type_is_sized(tcx, tp_ty) {
+                let (llsize, _) =
+                    glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
+                llsize
+            } else {
+                let lltp_ty = type_of::type_of(ccx, tp_ty);
+                C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
+            }
+        }
+        (_, "min_align_of") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            C_uint(ccx, type_of::align_of(ccx, tp_ty))
+        }
+        (_, "min_align_of_val") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            if !type_is_sized(tcx, tp_ty) {
+                let (_, llalign) =
+                    glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
+                llalign
+            } else {
+                C_uint(ccx, type_of::align_of(ccx, tp_ty))
+            }
+        }
+        (_, "pref_align_of") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            let lltp_ty = type_of::type_of(ccx, tp_ty);
+            C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
+        }
+        (_, "drop_in_place") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            let ptr = if type_is_sized(tcx, tp_ty) {
+                llargs[0]
+            } else {
+                let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
+                Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
+                Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
+                fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
+                scratch.val
+            };
+            glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
+            C_nil(ccx)
+        }
+        (_, "type_name") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
+            C_str_slice(ccx, ty_name)
+        }
+        (_, "type_id") => {
+            let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
+                                                        &ccx.link_meta().crate_hash);
+            C_u64(ccx, hash)
+        }
+        (_, "init_dropped") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            if !type_is_zero_size(ccx, tp_ty) {
+                drop_done_fill_mem(bcx, llresult, tp_ty);
+            }
+            C_nil(ccx)
+        }
+        (_, "init") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            if !type_is_zero_size(ccx, tp_ty) {
+                // Just zero out the stack slot. (See comment on base::memzero for explanation)
+                init_zero_mem(bcx, llresult, tp_ty);
+            }
+            C_nil(ccx)
+        }
+        // Effectively no-ops
+        (_, "uninit") | (_, "forget") => {
+            C_nil(ccx)
+        }
+        (_, "needs_drop") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+
+            C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
+        }
+        (_, "offset") => {
+            let ptr = llargs[0];
+            let offset = llargs[1];
+            InBoundsGEP(bcx, ptr, &[offset])
+        }
+        (_, "arith_offset") => {
+            let ptr = llargs[0];
+            let offset = llargs[1];
+            GEP(bcx, ptr, &[offset])
+        }
+
+        (_, "copy_nonoverlapping") => {
+            copy_intrinsic(bcx,
+                           false,
+                           false,
+                           *substs.types.get(FnSpace, 0),
+                           llargs[1],
+                           llargs[0],
+                           llargs[2],
+                           call_debug_location)
+        }
+        (_, "copy") => {
+            copy_intrinsic(bcx,
+                           true,
+                           false,
+                           *substs.types.get(FnSpace, 0),
+                           llargs[1],
+                           llargs[0],
+                           llargs[2],
+                           call_debug_location)
+        }
+        (_, "write_bytes") => {
+            memset_intrinsic(bcx,
+                             false,
+                             *substs.types.get(FnSpace, 0),
+                             llargs[0],
+                             llargs[1],
+                             llargs[2],
+                             call_debug_location)
+        }
+
+        (_, "volatile_copy_nonoverlapping_memory") => {
+            copy_intrinsic(bcx,
+                           false,
+                           true,
+                           *substs.types.get(FnSpace, 0),
+                           llargs[0],
+                           llargs[1],
+                           llargs[2],
+                           call_debug_location)
+        }
+        (_, "volatile_copy_memory") => {
+            copy_intrinsic(bcx,
+                           true,
+                           true,
+                           *substs.types.get(FnSpace, 0),
+                           llargs[0],
+                           llargs[1],
+                           llargs[2],
+                           call_debug_location)
+        }
+        (_, "volatile_set_memory") => {
+            memset_intrinsic(bcx,
+                             true,
+                             *substs.types.get(FnSpace, 0),
+                             llargs[0],
+                             llargs[1],
+                             llargs[2],
+                             call_debug_location)
+        }
+        (_, "volatile_load") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            let mut ptr = llargs[0];
+            if let Some(ty) = fn_ty.ret.cast {
+                ptr = PointerCast(bcx, ptr, ty.ptr_to());
+            }
+            let load = VolatileLoad(bcx, ptr);
+            unsafe {
+                llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
+            }
+            to_immediate(bcx, load, tp_ty)
+        },
+        (_, "volatile_store") => {
+            let tp_ty = *substs.types.get(FnSpace, 0);
+            if type_is_fat_ptr(bcx.tcx(), tp_ty) {
+                VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
+                VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
+            } else {
+                let val = if fn_ty.args[1].is_indirect() {
+                    Load(bcx, llargs[1])
+                } else {
+                    from_immediate(bcx, llargs[1])
+                };
+                let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
+                let store = VolatileStore(bcx, val, ptr);
+                unsafe {
+                    llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
+                }
+            }
+            C_nil(ccx)
+        },
+
+        (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
+        (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
+        (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
+        (_, "unchecked_div") | (_, "unchecked_rem") => {
+            let sty = &arg_tys[0].sty;
+            match int_type_width_signed(sty, ccx) {
+                Some((width, signed)) =>
+                    match &*name {
+                        "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
+                                                        llargs[0], call_debug_location),
+                        "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
+                                                        llargs[0], call_debug_location),
+                        "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
+                                        &llargs, call_debug_location),
+                        "bswap" => {
+                            if width == 8 {
+                                llargs[0] // byte swap a u8/i8 is just a no-op
+                            } else {
+                                Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
+                                        &llargs, call_debug_location)
+                            }
+                        }
+                        "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
+                            let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
+                                                    if signed { 's' } else { 'u' },
+                                                    &name[..3], width);
+                            with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
+                                                    call_debug_location)
+                        },
+                        "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
+                        "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
+                        "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
+                        "unchecked_div" =>
+                            if signed {
+                                SDiv(bcx, llargs[0], llargs[1], call_debug_location)
+                            } else {
+                                UDiv(bcx, llargs[0], llargs[1], call_debug_location)
+                            },
+                        "unchecked_rem" =>
+                            if signed {
+                                SRem(bcx, llargs[0], llargs[1], call_debug_location)
+                            } else {
+                                URem(bcx, llargs[0], llargs[1], call_debug_location)
+                            },
+                        _ => bug!(),
+                    },
+                None => {
+                    span_invalid_monomorphization_error(
+                        tcx.sess, span,
+                        &format!("invalid monomorphization of `{}` intrinsic: \
+                                  expected basic integer type, found `{}`", name, sty));
+                        C_nil(ccx)
+                }
+            }
+
+        },
+        (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") |
+        (_, "frem_fast") => {
+            let sty = &arg_tys[0].sty;
+            match float_type_width(sty) {
+                Some(_width) =>
+                    match &*name {
+                        "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location),
+                        "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location),
+                        "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location),
+                        "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location),
+                        "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location),
+                        _ => bug!(),
+                    },
+                None => {
+                    span_invalid_monomorphization_error(
+                        tcx.sess, span,
+                        &format!("invalid monomorphization of `{}` intrinsic: \
+                                  expected basic float type, found `{}`", name, sty));
+                        C_nil(ccx)
+                }
+            }
+
+        },
+
+
+        (_, "return_address") => {
+            if !fcx.fn_ty.ret.is_indirect() {
+                span_err!(tcx.sess, span, E0510,
+                          "invalid use of `return_address` intrinsic: function \
+                           does not use out pointer");
+                C_null(Type::i8p(ccx))
+            } else {
+                PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
+            }
+        }
+
+        (_, "discriminant_value") => {
+            let val_ty = substs.types.get(FnSpace, 0);
+            match val_ty.sty {
+                ty::TyEnum(..) => {
+                    let repr = adt::represent_type(ccx, *val_ty);
+                    adt::trans_get_discr(bcx, &repr, llargs[0],
+                                         Some(llret_ty), true)
+                }
+                _ => C_null(llret_ty)
+            }
+        }
+        (_, name) if name.starts_with("simd_") => {
+            generic_simd_intrinsic(bcx, name,
+                                   substs,
+                                   callee_ty,
+                                   expr_arguments,
+                                   &llargs,
+                                   ret_ty, llret_ty,
+                                   call_debug_location,
+                                   span)
+        }
+        // This requires that atomic intrinsics follow a specific naming pattern:
+        // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
+        (_, name) if name.starts_with("atomic_") => {
+            let split: Vec<&str> = name.split('_').collect();
+
+            let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
+            let (order, failorder) = match split.len() {
+                2 => (llvm::SequentiallyConsistent, llvm::SequentiallyConsistent),
+                3 => match split[2] {
+                    "unordered" => (llvm::Unordered, llvm::Unordered),
+                    "relaxed" => (llvm::Monotonic, llvm::Monotonic),
+                    "acq"     => (llvm::Acquire, llvm::Acquire),
+                    "rel"     => (llvm::Release, llvm::Monotonic),
+                    "acqrel"  => (llvm::AcquireRelease, llvm::Acquire),
+                    "failrelaxed" if is_cxchg =>
+                        (llvm::SequentiallyConsistent, llvm::Monotonic),
+                    "failacq" if is_cxchg =>
+                        (llvm::SequentiallyConsistent, llvm::Acquire),
+                    _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
+                },
+                4 => match (split[2], split[3]) {
+                    ("acq", "failrelaxed") if is_cxchg =>
+                        (llvm::Acquire, llvm::Monotonic),
+                    ("acqrel", "failrelaxed") if is_cxchg =>
+                        (llvm::AcquireRelease, llvm::Monotonic),
+                    _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
+                },
+                _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
+            };
+
+            match split[1] {
+                "cxchg" | "cxchgweak" => {
+                    let sty = &substs.types.get(FnSpace, 0).sty;
+                    if int_type_width_signed(sty, ccx).is_some() {
+                        let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
+                        let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
+                                                order, failorder, weak);
+                        let result = ExtractValue(bcx, val, 0);
+                        let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
+                        Store(bcx, result, StructGEP(bcx, llresult, 0));
+                        Store(bcx, success, StructGEP(bcx, llresult, 1));
+                    } else {
+                        span_invalid_monomorphization_error(
+                            tcx.sess, span,
+                            &format!("invalid monomorphization of `{}` intrinsic: \
+                                      expected basic integer type, found `{}`", name, sty));
+                    }
+                    C_nil(ccx)
+                }
+
+                "load" => {
+                    let sty = &substs.types.get(FnSpace, 0).sty;
+                    if int_type_width_signed(sty, ccx).is_some() {
+                        AtomicLoad(bcx, llargs[0], order)
+                    } else {
+                        span_invalid_monomorphization_error(
+                            tcx.sess, span,
+                            &format!("invalid monomorphization of `{}` intrinsic: \
+                                      expected basic integer type, found `{}`", name, sty));
+                        C_nil(ccx)
+                    }
+                }
+
+                "store" => {
+                    let sty = &substs.types.get(FnSpace, 0).sty;
+                    if int_type_width_signed(sty, ccx).is_some() {
+                        AtomicStore(bcx, llargs[1], llargs[0], order);
+                    } else {
+                        span_invalid_monomorphization_error(
+                            tcx.sess, span,
+                            &format!("invalid monomorphization of `{}` intrinsic: \
+                                      expected basic integer type, found `{}`", name, sty));
+                    }
+                    C_nil(ccx)
+                }
+
+                "fence" => {
+                    AtomicFence(bcx, order, llvm::CrossThread);
+                    C_nil(ccx)
+                }
+
+                "singlethreadfence" => {
+                    AtomicFence(bcx, order, llvm::SingleThread);
+                    C_nil(ccx)
+                }
+
+                // These are all AtomicRMW ops
+                op => {
+                    let atom_op = match op {
+                        "xchg"  => llvm::AtomicXchg,
+                        "xadd"  => llvm::AtomicAdd,
+                        "xsub"  => llvm::AtomicSub,
+                        "and"   => llvm::AtomicAnd,
+                        "nand"  => llvm::AtomicNand,
+                        "or"    => llvm::AtomicOr,
+                        "xor"   => llvm::AtomicXor,
+                        "max"   => llvm::AtomicMax,
+                        "min"   => llvm::AtomicMin,
+                        "umax"  => llvm::AtomicUMax,
+                        "umin"  => llvm::AtomicUMin,
+                        _ => ccx.sess().fatal("unknown atomic operation")
+                    };
+
+                    let sty = &substs.types.get(FnSpace, 0).sty;
+                    if int_type_width_signed(sty, ccx).is_some() {
+                        AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
+                    } else {
+                        span_invalid_monomorphization_error(
+                            tcx.sess, span,
+                            &format!("invalid monomorphization of `{}` intrinsic: \
+                                      expected basic integer type, found `{}`", name, sty));
+                        C_nil(ccx)
+                    }
+                }
+            }
+
+        }
+
+        (_, _) => {
+            let intr = match Intrinsic::find(&name) {
+                Some(intr) => intr,
+                None => bug!("unknown intrinsic '{}'", name),
+            };
+            fn one<T>(x: Vec<T>) -> T {
+                assert_eq!(x.len(), 1);
+                x.into_iter().next().unwrap()
+            }
+            fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
+                          any_changes_needed: &mut bool) -> Vec<Type> {
+                use intrinsics::Type::*;
+                match *t {
+                    Void => vec![Type::void(ccx)],
+                    Integer(_signed, width, llvm_width) => {
+                        *any_changes_needed |= width != llvm_width;
+                        vec![Type::ix(ccx, llvm_width as u64)]
+                    }
+                    Float(x) => {
+                        match x {
+                            32 => vec![Type::f32(ccx)],
+                            64 => vec![Type::f64(ccx)],
+                            _ => bug!()
+                        }
+                    }
+                    Pointer(ref t, ref llvm_elem, _const) => {
+                        *any_changes_needed |= llvm_elem.is_some();
+
+                        let t = llvm_elem.as_ref().unwrap_or(t);
+                        let elem = one(ty_to_type(ccx, t,
+                                                  any_changes_needed));
+                        vec![elem.ptr_to()]
+                    }
+                    Vector(ref t, ref llvm_elem, length) => {
+                        *any_changes_needed |= llvm_elem.is_some();
+
+                        let t = llvm_elem.as_ref().unwrap_or(t);
+                        let elem = one(ty_to_type(ccx, t,
+                                                  any_changes_needed));
+                        vec![Type::vector(&elem,
+                                          length as u64)]
+                    }
+                    Aggregate(false, ref contents) => {
+                        let elems = contents.iter()
+                                            .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
+                                            .collect::<Vec<_>>();
+                        vec![Type::struct_(ccx, &elems, false)]
+                    }
+                    Aggregate(true, ref contents) => {
+                        *any_changes_needed = true;
+                        contents.iter()
+                                .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
+                                .collect()
+                    }
+                }
+            }
+
+            // This allows an argument list like `foo, (bar, baz),
+            // qux` to be converted into `foo, bar, baz, qux`, integer
+            // arguments to be truncated as needed and pointers to be
+            // cast.
+            fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                            t: &intrinsics::Type,
+                                            arg_type: Ty<'tcx>,
+                                            llarg: ValueRef)
+                                            -> Vec<ValueRef>
+            {
+                match *t {
+                    intrinsics::Type::Aggregate(true, ref contents) => {
+                        // We found a tuple that needs squishing! So
+                        // run over the tuple and load each field.
+                        //
+                        // This assumes the type is "simple", i.e. no
+                        // destructors, and the contents are SIMD
+                        // etc.
+                        assert!(!bcx.fcx.type_needs_drop(arg_type));
+
+                        let repr = adt::represent_type(bcx.ccx(), arg_type);
+                        let repr_ptr = &repr;
+                        let arg = adt::MaybeSizedValue::sized(llarg);
+                        (0..contents.len())
+                            .map(|i| {
+                                Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
+                            })
+                            .collect()
+                    }
+                    intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
+                        let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
+                        vec![PointerCast(bcx, llarg,
+                                         llvm_elem.ptr_to())]
+                    }
+                    intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
+                        let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
+                        vec![BitCast(bcx, llarg,
+                                     Type::vector(&llvm_elem, length as u64))]
+                    }
+                    intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
+                        // the LLVM intrinsic uses a smaller integer
+                        // size than the C intrinsic's signature, so
+                        // we have to trim it down here.
+                        vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
+                    }
+                    _ => vec![llarg],
+                }
+            }
+
+
+            let mut any_changes_needed = false;
+            let inputs = intr.inputs.iter()
+                                    .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
+                                    .collect::<Vec<_>>();
+
+            let mut out_changes = false;
+            let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
+            // outputting a flattened aggregate is nonsense
+            assert!(!out_changes);
+
+            let llargs = if !any_changes_needed {
+                // no aggregates to flatten, so no change needed
+                llargs
+            } else {
+                // there are some aggregates that need to be flattened
+                // in the LLVM call, so we need to run over the types
+                // again to find them and extract the arguments
+                intr.inputs.iter()
+                           .zip(&llargs)
+                           .zip(&arg_tys)
+                           .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
+                           .collect()
+            };
+            assert_eq!(inputs.len(), llargs.len());
+
+            let val = match intr.definition {
+                intrinsics::IntrinsicDef::Named(name) => {
+                    let f = declare::declare_cfn(ccx,
+                                                 name,
+                                                 Type::func(&inputs, &outputs));
+                    Call(bcx, f, &llargs, call_debug_location)
+                }
+            };
+
+            match *intr.output {
+                intrinsics::Type::Aggregate(flatten, ref elems) => {
+                    // the output is a tuple so we need to munge it properly
+                    assert!(!flatten);
+
+                    for i in 0..elems.len() {
+                        let val = ExtractValue(bcx, val, i);
+                        Store(bcx, val, StructGEP(bcx, llresult, i));
+                    }
+                    C_nil(ccx)
+                }
+                _ => val,
+            }
+        }
+    };
+
+    if val_ty(llval) != Type::void(ccx) &&
+       machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
+        if let Some(ty) = fn_ty.ret.cast {
+            let ptr = PointerCast(bcx, llresult, ty.ptr_to());
+            let store = Store(bcx, llval, ptr);
+            unsafe {
+                llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
+            }
+        } else {
+            store_ty(bcx, llval, llresult, ret_ty);
+        }
+    }
+
+    // If we made a temporary stack slot, let's clean it up
+    match dest {
+        expr::Ignore => {
+            bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
+            call_lifetime_end(bcx, llresult);
+        }
+        expr::SaveIn(_) => {}
+    }
+
+    fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
+
+    Result::new(bcx, llresult)
+}
+
+fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              allow_overlap: bool,
+                              volatile: bool,
+                              tp_ty: Ty<'tcx>,
+                              dst: ValueRef,
+                              src: ValueRef,
+                              count: ValueRef,
+                              call_debug_location: DebugLoc)
+                              -> ValueRef {
+    let ccx = bcx.ccx();
+    let lltp_ty = type_of::type_of(ccx, tp_ty);
+    let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
+    let size = machine::llsize_of(ccx, lltp_ty);
+    let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
+
+    let operation = if allow_overlap {
+        "memmove"
+    } else {
+        "memcpy"
+    };
+
+    let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
+
+    let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
+    let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
+    let llfn = ccx.get_intrinsic(&name);
+
+    Call(bcx,
+         llfn,
+         &[dst_ptr,
+           src_ptr,
+           Mul(bcx, size, count, DebugLoc::None),
+           align,
+           C_bool(ccx, volatile)],
+         call_debug_location)
+}
+
+fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                volatile: bool,
+                                tp_ty: Ty<'tcx>,
+                                dst: ValueRef,
+                                val: ValueRef,
+                                count: ValueRef,
+                                call_debug_location: DebugLoc)
+                                -> ValueRef {
+    let ccx = bcx.ccx();
+    let lltp_ty = type_of::type_of(ccx, tp_ty);
+    let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
+    let size = machine::llsize_of(ccx, lltp_ty);
+    let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
+
+    let name = format!("llvm.memset.p0i8.i{}", int_size);
+
+    let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
+    let llfn = ccx.get_intrinsic(&name);
+
+    Call(bcx,
+         llfn,
+         &[dst_ptr,
+           val,
+           Mul(bcx, size, count, DebugLoc::None),
+           align,
+           C_bool(ccx, volatile)],
+         call_debug_location)
+}
+
+fn count_zeros_intrinsic(bcx: Block,
+                         name: &str,
+                         val: ValueRef,
+                         call_debug_location: DebugLoc)
+                         -> ValueRef {
+    let y = C_bool(bcx.ccx(), false);
+    let llfn = bcx.ccx().get_intrinsic(&name);
+    Call(bcx, llfn, &[val, y], call_debug_location)
+}
+
+fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                       name: &str,
+                                       a: ValueRef,
+                                       b: ValueRef,
+                                       out: ValueRef,
+                                       call_debug_location: DebugLoc)
+                                       -> ValueRef {
+    let llfn = bcx.ccx().get_intrinsic(&name);
+
+    // Convert `i1` to a `bool`, and write it to the out parameter
+    let val = Call(bcx, llfn, &[a, b], call_debug_location);
+    let result = ExtractValue(bcx, val, 0);
+    let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
+    Store(bcx, result, StructGEP(bcx, out, 0));
+    Store(bcx, overflow, StructGEP(bcx, out, 1));
+
+    C_nil(bcx.ccx())
+}
+
+fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                             func: ValueRef,
+                             data: ValueRef,
+                             local_ptr: ValueRef,
+                             dest: ValueRef,
+                             dloc: DebugLoc) -> Block<'blk, 'tcx> {
+    if bcx.sess().no_landing_pads() {
+        Call(bcx, func, &[data], dloc);
+        Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
+        bcx
+    } else if wants_msvc_seh(bcx.sess()) {
+        trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
+    } else {
+        trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
+    }
+}
+
+// MSVC's definition of the `rust_try` function.
+//
+// This implementation uses the new exception handling instructions in LLVM
+// which have support in LLVM for SEH on MSVC targets. Although these
+// instructions are meant to work for all targets, as of the time of this
+// writing, however, LLVM does not recommend the usage of these new instructions
+// as the old ones are still more optimized.
+fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                              func: ValueRef,
+                              data: ValueRef,
+                              local_ptr: ValueRef,
+                              dest: ValueRef,
+                              dloc: DebugLoc) -> Block<'blk, 'tcx> {
+    let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
+        let ccx = bcx.ccx();
+        let dloc = DebugLoc::None;
+
+        SetPersonalityFn(bcx, bcx.fcx.eh_personality());
+
+        let normal = bcx.fcx.new_temp_block("normal");
+        let catchswitch = bcx.fcx.new_temp_block("catchswitch");
+        let catchpad = bcx.fcx.new_temp_block("catchpad");
+        let caught = bcx.fcx.new_temp_block("caught");
+
+        let func = llvm::get_param(bcx.fcx.llfn, 0);
+        let data = llvm::get_param(bcx.fcx.llfn, 1);
+        let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
+
+        // We're generating an IR snippet that looks like:
+        //
+        //   declare i32 @rust_try(%func, %data, %ptr) {
+        //      %slot = alloca i8*
+        //      call @llvm.localescape(%slot)
+        //      store %ptr, %slot
+        //      invoke %func(%data) to label %normal unwind label %catchswitch
+        //
+        //   normal:
+        //      ret i32 0
+        //
+        //   catchswitch:
+        //      %cs = catchswitch within none [%catchpad] unwind to caller
+        //
+        //   catchpad:
+        //      %tok = catchpad within %cs [%rust_try_filter]
+        //      catchret from %tok to label %caught
+        //
+        //   caught:
+        //      ret i32 1
+        //   }
+        //
+        // This structure follows the basic usage of the instructions in LLVM
+        // (see their documentation/test cases for examples), but a
+        // perhaps-surprising part here is the usage of the `localescape`
+        // intrinsic. This is used to allow the filter function (also generated
+        // here) to access variables on the stack of this intrinsic. This
+        // ability enables us to transfer information about the exception being
+        // thrown to this point, where we're catching the exception.
+        //
+        // More information can be found in libstd's seh.rs implementation.
+        let slot = Alloca(bcx, Type::i8p(ccx), "slot");
+        let localescape = ccx.get_intrinsic(&"llvm.localescape");
+        Call(bcx, localescape, &[slot], dloc);
+        Store(bcx, local_ptr, slot);
+        Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
+
+        Ret(normal, C_i32(ccx, 0), dloc);
+
+        let cs = CatchSwitch(catchswitch, None, None, 1);
+        AddHandler(catchswitch, cs, catchpad.llbb);
+
+        let filter = generate_filter_fn(bcx.fcx, bcx.fcx.llfn);
+        let filter = BitCast(catchpad, filter, Type::i8p(ccx));
+        let tok = CatchPad(catchpad, cs, &[filter]);
+        CatchRet(catchpad, tok, caught.llbb);
+
+        Ret(caught, C_i32(ccx, 1), dloc);
+    });
+
+    // Note that no invoke is used here because by definition this function
+    // can't panic (that's what it's catching).
+    let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
+    Store(bcx, ret, dest);
+    return bcx
+}
+
+// Definition of the standard "try" function for Rust using the GNU-like model
+// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
+// instructions).
+//
+// This translation is a little surprising because we always call a shim
+// function instead of inlining the call to `invoke` manually here. This is done
+// because in LLVM we're only allowed to have one personality per function
+// definition. The call to the `try` intrinsic is being inlined into the
+// function calling it, and that function may already have other personality
+// functions in play. By calling a shim we're guaranteed that our shim will have
+// the right personality function.
+fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                             func: ValueRef,
+                             data: ValueRef,
+                             local_ptr: ValueRef,
+                             dest: ValueRef,
+                             dloc: DebugLoc) -> Block<'blk, 'tcx> {
+    let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
+        let ccx = bcx.ccx();
+        let tcx = ccx.tcx();
+        let dloc = DebugLoc::None;
+
+        // Translates the shims described above:
+        //
+        //   bcx:
+        //      invoke %func(%args...) normal %normal unwind %catch
+        //
+        //   normal:
+        //      ret 0
+        //
+        //   catch:
+        //      (ptr, _) = landingpad
+        //      store ptr, %local_ptr
+        //      ret 1
+        //
+        // Note that the `local_ptr` data passed into the `try` intrinsic is
+        // expected to be `*mut *mut u8` for this to actually work, but that's
+        // managed by the standard library.
+
+        attributes::emit_uwtable(bcx.fcx.llfn, true);
+        let catch_pers = match tcx.lang_items.eh_personality_catch() {
+            Some(did) => {
+                Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
+            }
+            None => bug!("eh_personality_catch not defined"),
+        };
+
+        let then = bcx.fcx.new_temp_block("then");
+        let catch = bcx.fcx.new_temp_block("catch");
+
+        let func = llvm::get_param(bcx.fcx.llfn, 0);
+        let data = llvm::get_param(bcx.fcx.llfn, 1);
+        let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
+        Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc);
+        Ret(then, C_i32(ccx, 0), dloc);
+
+        // Type indicator for the exception being thrown.
+        //
+        // The first value in this tuple is a pointer to the exception object
+        // being thrown.  The second value is a "selector" indicating which of
+        // the landing pad clauses the exception's type had been matched to.
+        // rust_try ignores the selector.
+        let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
+                                    false);
+        let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
+        AddClause(catch, vals, C_null(Type::i8p(ccx)));
+        let ptr = ExtractValue(catch, vals, 0);
+        Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
+        Ret(catch, C_i32(ccx, 1), dloc);
+    });
+
+    // Note that no invoke is used here because by definition this function
+    // can't panic (that's what it's catching).
+    let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
+    Store(bcx, ret, dest);
+    return bcx;
+}
+
+// Helper function to give a Block to a closure to translate a shim function.
+// This is currently primarily used for the `try` intrinsic functions above.
+fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
+                    name: &str,
+                    inputs: Vec<Ty<'tcx>>,
+                    output: ty::FnOutput<'tcx>,
+                    trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
+                    -> ValueRef {
+    let ccx = fcx.ccx;
+    let sig = ty::FnSig {
+        inputs: inputs,
+        output: output,
+        variadic: false,
+    };
+    let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
+
+    let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::BareFnTy {
+        unsafety: hir::Unsafety::Unsafe,
+        abi: Abi::Rust,
+        sig: ty::Binder(sig)
+    });
+    let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
+    let empty_substs = ccx.tcx().mk_substs(Substs::empty());
+    let (fcx, block_arena);
+    block_arena = TypedArena::new();
+    fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &block_arena);
+    let bcx = fcx.init(true, None);
+    trans(bcx);
+    fcx.cleanup();
+    llfn
+}
+
+// Helper function used to get a handle to the `__rust_try` function used to
+// catch exceptions.
+//
+// This function is only generated once and is then cached.
+fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
+                             trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
+                             -> ValueRef {
+    let ccx = fcx.ccx;
+    if let Some(llfn) = ccx.rust_try_fn().get() {
+        return llfn;
+    }
+
+    // Define the type up front for the signature of the rust_try function.
+    let tcx = ccx.tcx();
+    let i8p = tcx.mk_mut_ptr(tcx.types.i8);
+    let fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
+        unsafety: hir::Unsafety::Unsafe,
+        abi: Abi::Rust,
+        sig: ty::Binder(ty::FnSig {
+            inputs: vec![i8p],
+            output: ty::FnOutput::FnConverging(tcx.mk_nil()),
+            variadic: false,
+        }),
+    });
+    let output = ty::FnOutput::FnConverging(tcx.types.i32);
+    let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
+    ccx.rust_try_fn().set(Some(rust_try));
+    return rust_try
+}
+
+// For MSVC-style exceptions (SEH), the compiler generates a filter function
+// which is used to determine whether an exception is being caught (e.g. if it's
+// a Rust exception or some other).
+//
+// This function is used to generate said filter function. The shim generated
+// here is actually just a thin wrapper to call the real implementation in the
+// standard library itself. For reasons as to why, see seh.rs in the standard
+// library.
+fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
+                                rust_try_fn: ValueRef)
+                                -> ValueRef {
+    let ccx = fcx.ccx;
+    let tcx = ccx.tcx();
+    let dloc = DebugLoc::None;
+
+    let rust_try_filter = match tcx.lang_items.msvc_try_filter() {
+        Some(did) => {
+            Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
+        }
+        None => bug!("msvc_try_filter not defined"),
+    };
+
+    let output = ty::FnOutput::FnConverging(tcx.types.i32);
+    let i8p = tcx.mk_mut_ptr(tcx.types.i8);
+
+    let frameaddress = ccx.get_intrinsic(&"llvm.frameaddress");
+    let recoverfp = ccx.get_intrinsic(&"llvm.x86.seh.recoverfp");
+    let localrecover = ccx.get_intrinsic(&"llvm.localrecover");
+
+    // On all platforms, once we have the EXCEPTION_POINTERS handle as well as
+    // the base pointer, we follow the standard layout of:
+    //
+    //      block:
+    //          %parentfp = call i8* llvm.x86.seh.recoverfp(@rust_try_fn, %bp)
+    //          %arg = call i8* llvm.localrecover(@rust_try_fn, %parentfp, 0)
+    //          %ret = call i32 @the_real_filter_function(%ehptrs, %arg)
+    //          ret i32 %ret
+    //
+    // The recoverfp intrinsic is used to recover the frame frame pointer of the
+    // `rust_try_fn` function, which is then in turn passed to the
+    // `localrecover` intrinsic (pairing with the `localescape` intrinsic
+    // mentioned above). Putting all this together means that we now have a
+    // handle to the arguments passed into the `try` function, allowing writing
+    // to the stack over there.
+    //
+    // For more info, see seh.rs in the standard library.
+    let do_trans = |bcx: Block, ehptrs, base_pointer| {
+        let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx));
+        let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer], dloc);
+        let arg = Call(bcx, localrecover,
+                       &[rust_try_fn, parentfp, C_i32(ccx, 0)], dloc);
+        let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], dloc);
+        Ret(bcx, ret, dloc);
+    };
+
+    if ccx.tcx().sess.target.target.arch == "x86" {
+        // On x86 the filter function doesn't actually receive any arguments.
+        // Instead the %ebp register contains some contextual information.
+        //
+        // Unfortunately I don't know of any great documentation as to what's
+        // going on here, all I can say is that there's a few tests cases in
+        // LLVM's test suite which follow this pattern of instructions, so we
+        // just do the same.
+        gen_fn(fcx, "__rustc_try_filter", vec![], output, &mut |bcx| {
+            let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], dloc);
+            let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]);
+            let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to()));
+            do_trans(bcx, exn, ebp);
+        })
+    } else if ccx.tcx().sess.target.target.arch == "x86_64" {
+        // Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
+        // are passed in as arguments to the filter function, so we just pass
+        // those along.
+        gen_fn(fcx, "__rustc_try_filter", vec![i8p, i8p], output, &mut |bcx| {
+            let exn = llvm::get_param(bcx.fcx.llfn, 0);
+            let rbp = llvm::get_param(bcx.fcx.llfn, 1);
+            do_trans(bcx, exn, rbp);
+        })
+    } else {
+        bug!("unknown target to generate a filter function")
+    }
+}
+
+fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
+    span_err!(a, b, E0511, "{}", c);
+}
+
+fn generic_simd_intrinsic<'blk, 'tcx, 'a>
+    (bcx: Block<'blk, 'tcx>,
+     name: &str,
+     substs: &'tcx subst::Substs<'tcx>,
+     callee_ty: Ty<'tcx>,
+     args: Option<&[P<hir::Expr>]>,
+     llargs: &[ValueRef],
+     ret_ty: Ty<'tcx>,
+     llret_ty: Type,
+     call_debug_location: DebugLoc,
+     span: Span) -> ValueRef
+{
+    // macros for error handling:
+    macro_rules! emit_error {
+        ($msg: tt) => {
+            emit_error!($msg, )
+        };
+        ($msg: tt, $($fmt: tt)*) => {
+            span_invalid_monomorphization_error(
+                bcx.sess(), span,
+                &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
+                                 $msg),
+                         name, $($fmt)*));
+        }
+    }
+    macro_rules! require {
+        ($cond: expr, $($fmt: tt)*) => {
+            if !$cond {
+                emit_error!($($fmt)*);
+                return C_nil(bcx.ccx())
+            }
+        }
+    }
+    macro_rules! require_simd {
+        ($ty: expr, $position: expr) => {
+            require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+        }
+    }
+
+
+
+    let tcx = bcx.tcx();
+    let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
+    let sig = infer::normalize_associated_type(tcx, &sig);
+    let arg_tys = sig.inputs;
+
+    // every intrinsic takes a SIMD vector as its first argument
+    require_simd!(arg_tys[0], "input");
+    let in_ty = arg_tys[0];
+    let in_elem = arg_tys[0].simd_type(tcx);
+    let in_len = arg_tys[0].simd_size(tcx);
+
+    let comparison = match name {
+        "simd_eq" => Some(hir::BiEq),
+        "simd_ne" => Some(hir::BiNe),
+        "simd_lt" => Some(hir::BiLt),
+        "simd_le" => Some(hir::BiLe),
+        "simd_gt" => Some(hir::BiGt),
+        "simd_ge" => Some(hir::BiGe),
+        _ => None
+    };
+
+    if let Some(cmp_op) = comparison {
+        require_simd!(ret_ty, "return");
+
+        let out_len = ret_ty.simd_size(tcx);
+        require!(in_len == out_len,
+                 "expected return type with length {} (same as input type `{}`), \
+                  found `{}` with length {}",
+                 in_len, in_ty,
+                 ret_ty, out_len);
+        require!(llret_ty.element_type().kind() == llvm::Integer,
+                 "expected return type with integer elements, found `{}` with non-integer `{}`",
+                 ret_ty,
+                 ret_ty.simd_type(tcx));
+
+        return compare_simd_types(bcx,
+                                  llargs[0],
+                                  llargs[1],
+                                  in_elem,
+                                  llret_ty,
+                                  cmp_op,
+                                  call_debug_location)
+    }
+
+    if name.starts_with("simd_shuffle") {
+        let n: usize = match name["simd_shuffle".len()..].parse() {
+            Ok(n) => n,
+            Err(_) => span_bug!(span,
+                                "bad `simd_shuffle` instruction only caught in trans?")
+        };
+
+        require_simd!(ret_ty, "return");
+
+        let out_len = ret_ty.simd_size(tcx);
+        require!(out_len == n,
+                 "expected return type of length {}, found `{}` with length {}",
+                 n, ret_ty, out_len);
+        require!(in_elem == ret_ty.simd_type(tcx),
+                 "expected return element type `{}` (element of input `{}`), \
+                  found `{}` with element type `{}`",
+                 in_elem, in_ty,
+                 ret_ty, ret_ty.simd_type(tcx));
+
+        let total_len = in_len as u64 * 2;
+
+        let (vector, indirect) = match args {
+            Some(args) => {
+                match consts::const_expr(bcx.ccx(), &args[2], substs, None,
+                                         // this should probably help simd error reporting
+                                         consts::TrueConst::Yes) {
+                    Ok((vector, _)) => (vector, false),
+                    Err(err) => bcx.sess().span_fatal(span, &err.description()),
+                }
+            }
+            None => (llargs[2], !type_is_immediate(bcx.ccx(), arg_tys[2]))
+        };
+
+        let indices: Option<Vec<_>> = (0..n)
+            .map(|i| {
+                let arg_idx = i;
+                let val = if indirect {
+                    Load(bcx, StructGEP(bcx, vector, i))
+                } else {
+                    const_get_elt(vector, &[i as libc::c_uint])
+                };
+                let c = const_to_opt_uint(val);
+                match c {
+                    None => {
+                        emit_error!("shuffle index #{} is not a constant", arg_idx);
+                        None
+                    }
+                    Some(idx) if idx >= total_len => {
+                        emit_error!("shuffle index #{} is out of bounds (limit {})",
+                                    arg_idx, total_len);
+                        None
+                    }
+                    Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
+                }
+            })
+            .collect();
+        let indices = match indices {
+            Some(i) => i,
+            None => return C_null(llret_ty)
+        };
+
+        return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
+    }
+
+    if name == "simd_insert" {
+        require!(in_elem == arg_tys[2],
+                 "expected inserted type `{}` (element of input `{}`), found `{}`",
+                 in_elem, in_ty, arg_tys[2]);
+        return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
+    }
+    if name == "simd_extract" {
+        require!(ret_ty == in_elem,
+                 "expected return type `{}` (element of input `{}`), found `{}`",
+                 in_elem, in_ty, ret_ty);
+        return ExtractElement(bcx, llargs[0], llargs[1])
+    }
+
+    if name == "simd_cast" {
+        require_simd!(ret_ty, "return");
+        let out_len = ret_ty.simd_size(tcx);
+        require!(in_len == out_len,
+                 "expected return type with length {} (same as input type `{}`), \
+                  found `{}` with length {}",
+                 in_len, in_ty,
+                 ret_ty, out_len);
+        // casting cares about nominal type, not just structural type
+        let out_elem = ret_ty.simd_type(tcx);
+
+        if in_elem == out_elem { return llargs[0]; }
+
+        enum Style { Float, Int(/* is signed? */ bool), Unsupported }
+
+        let (in_style, in_width) = match in_elem.sty {
+            // vectors of pointer-sized integers should've been
+            // disallowed before here, so this unwrap is safe.
+            ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
+            ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
+            ty::TyFloat(f) => (Style::Float, f.bit_width()),
+            _ => (Style::Unsupported, 0)
+        };
+        let (out_style, out_width) = match out_elem.sty {
+            ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
+            ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
+            ty::TyFloat(f) => (Style::Float, f.bit_width()),
+            _ => (Style::Unsupported, 0)
+        };
+
+        match (in_style, out_style) {
+            (Style::Int(in_is_signed), Style::Int(_)) => {
+                return match in_width.cmp(&out_width) {
+                    Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
+                    Ordering::Equal => llargs[0],
+                    Ordering::Less => if in_is_signed {
+                        SExt(bcx, llargs[0], llret_ty)
+                    } else {
+                        ZExt(bcx, llargs[0], llret_ty)
+                    }
+                }
+            }
+            (Style::Int(in_is_signed), Style::Float) => {
+                return if in_is_signed {
+                    SIToFP(bcx, llargs[0], llret_ty)
+                } else {
+                    UIToFP(bcx, llargs[0], llret_ty)
+                }
+            }
+            (Style::Float, Style::Int(out_is_signed)) => {
+                return if out_is_signed {
+                    FPToSI(bcx, llargs[0], llret_ty)
+                } else {
+                    FPToUI(bcx, llargs[0], llret_ty)
+                }
+            }
+            (Style::Float, Style::Float) => {
+                return match in_width.cmp(&out_width) {
+                    Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
+                    Ordering::Equal => llargs[0],
+                    Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
+                }
+            }
+            _ => {/* Unsupported. Fallthrough. */}
+        }
+        require!(false,
+                 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
+                 in_ty, in_elem,
+                 ret_ty, out_elem);
+    }
+    macro_rules! arith {
+        ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
+            $(
+                if name == stringify!($name) {
+                    match in_elem.sty {
+                        $(
+                            $(ty::$p(_))|* => {
+                                return $call(bcx, llargs[0], llargs[1], call_debug_location)
+                            }
+                            )*
+                        _ => {},
+                    }
+                    require!(false,
+                             "unsupported operation on `{}` with element `{}`",
+                             in_ty,
+                             in_elem)
+                })*
+        }
+    }
+    arith! {
+        simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
+        simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
+        simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
+        simd_div: TyFloat => FDiv;
+        simd_shl: TyUint, TyInt => Shl;
+        simd_shr: TyUint => LShr, TyInt => AShr;
+        simd_and: TyUint, TyInt => And;
+        simd_or: TyUint, TyInt => Or;
+        simd_xor: TyUint, TyInt => Xor;
+    }
+    span_bug!(span, "unknown SIMD intrinsic");
+}
+
+// Returns the width of an int TypeVariant, and if it's signed or not
+// Returns None if the type is not an integer
+fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
+        -> Option<(u64, bool)> {
+    use rustc::ty::{TyInt, TyUint};
+    match *sty {
+        TyInt(t) => Some((match t {
+            ast::IntTy::Is => {
+                match &ccx.tcx().sess.target.target.target_pointer_width[..] {
+                    "32" => 32,
+                    "64" => 64,
+                    tws => bug!("Unsupported target word size for isize: {}", tws),
+                }
+            },
+            ast::IntTy::I8 => 8,
+            ast::IntTy::I16 => 16,
+            ast::IntTy::I32 => 32,
+            ast::IntTy::I64 => 64,
+        }, true)),
+        TyUint(t) => Some((match t {
+            ast::UintTy::Us => {
+                match &ccx.tcx().sess.target.target.target_pointer_width[..] {
+                    "32" => 32,
+                    "64" => 64,
+                    tws => bug!("Unsupported target word size for usize: {}", tws),
+                }
+            },
+            ast::UintTy::U8 => 8,
+            ast::UintTy::U16 => 16,
+            ast::UintTy::U32 => 32,
+            ast::UintTy::U64 => 64,
+        }, false)),
+        _ => None,
+    }
+}
+
+// Returns the width of a float TypeVariant
+// Returns None if the type is not a float
+fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
+        -> Option<u64> {
+    use rustc::ty::TyFloat;
+    match *sty {
+        TyFloat(t) => Some(match t {
+            ast::FloatTy::F32 => 32,
+            ast::FloatTy::F64 => 64,
+        }),
+        _ => None,
+    }
+}
index 6f596b15b9214fd29b4832f76314021760aa6b54..cb421b6be472b077a3be091b2d1030cb6a002f84 100644 (file)
 #![feature(slice_patterns)]
 #![feature(staged_api)]
 #![feature(unicode)]
+#![feature(question_mark)]
 
 extern crate arena;
 extern crate flate;
 extern crate getopts;
 extern crate graphviz;
 extern crate libc;
-extern crate rustc;
+#[macro_use] extern crate rustc;
 extern crate rustc_back;
 extern crate rustc_data_structures;
-extern crate rustc_front;
+extern crate rustc_incremental;
 pub extern crate rustc_llvm as llvm;
 extern crate rustc_mir;
 extern crate rustc_platform_intrinsics as intrinsics;
 extern crate serialize;
+extern crate rustc_const_math;
+extern crate rustc_const_eval;
 
 #[macro_use] extern crate log;
 #[macro_use] extern crate syntax;
@@ -59,26 +62,87 @@ pub use rustc::middle;
 pub use rustc::lint;
 pub use rustc::util;
 
+pub use base::trans_crate;
+pub use disr::Disr;
+
 pub mod back {
-    pub use rustc_back::abi;
     pub use rustc_back::rpath;
-    pub use rustc_back::svh;
+    pub use rustc::hir::svh;
 
     pub mod archive;
     pub mod linker;
     pub mod link;
     pub mod lto;
+    pub mod symbol_names;
     pub mod write;
     pub mod msvc;
 }
 
 pub mod diagnostics;
 
-pub mod trans;
-pub mod save;
+#[macro_use]
+mod macros;
+
+mod abi;
+mod adt;
+mod asm;
+mod attributes;
+mod base;
+mod basic_block;
+mod build;
+mod builder;
+mod cabi_aarch64;
+mod cabi_arm;
+mod cabi_asmjs;
+mod cabi_mips;
+mod cabi_powerpc;
+mod cabi_powerpc64;
+mod cabi_x86;
+mod cabi_x86_64;
+mod cabi_x86_win64;
+mod callee;
+mod cleanup;
+mod closure;
+mod common;
+mod consts;
+mod context;
+mod controlflow;
+mod datum;
+mod debuginfo;
+mod declare;
+mod disr;
+mod expr;
+mod glue;
+mod inline;
+mod intrinsic;
+mod machine;
+mod _match;
+mod meth;
+mod mir;
+mod monomorphize;
+mod collector;
+mod symbol_names_test;
+mod tvec;
+mod type_;
+mod type_of;
+mod value;
+
+#[derive(Copy, Clone)]
+pub struct ModuleTranslation {
+    pub llcx: llvm::ContextRef,
+    pub llmod: llvm::ModuleRef,
+}
+
+unsafe impl Send for ModuleTranslation { }
+unsafe impl Sync for ModuleTranslation { }
 
-pub mod lib {
-    pub use llvm;
+pub struct CrateTranslation {
+    pub modules: Vec<ModuleTranslation>,
+    pub metadata_module: ModuleTranslation,
+    pub link: middle::cstore::LinkMeta,
+    pub metadata: Vec<u8>,
+    pub reachable: Vec<String>,
+    pub no_builtins: bool,
 }
 
 __build_diagnostic_array! { librustc_trans, DIAGNOSTICS }
diff --git a/src/librustc_trans/machine.rs b/src/librustc_trans/machine.rs
new file mode 100644 (file)
index 0000000..59020b3
--- /dev/null
@@ -0,0 +1,108 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Information concerning the machine representation of various types.
+
+#![allow(non_camel_case_types)]
+
+use llvm::{self, ValueRef};
+use common::*;
+
+use type_::Type;
+
+pub type llbits = u64;
+pub type llsize = u64;
+pub type llalign = u32;
+
+// ______________________________________________________________________
+// compute sizeof / alignof
+
+// Returns the number of bytes clobbered by a Store to this type.
+pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> llsize {
+    unsafe {
+        return llvm::LLVMStoreSizeOfType(cx.td(), ty.to_ref());
+    }
+}
+
+// Returns the number of bytes between successive elements of type T in an
+// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
+pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize {
+    unsafe {
+        return llvm::LLVMABISizeOfType(cx.td(), ty.to_ref());
+    }
+}
+
+// Returns, as near as we can figure, the "real" size of a type. As in, the
+// bits in this number of bytes actually carry data related to the datum
+// with the type. Not junk, accidentally-damaged words, or whatever.
+// Note that padding of the type will be included for structs, but not for the
+// other types (i.e. SIMD types).
+// Rounds up to the nearest byte though, so if you have a 1-bit
+// value, we return 1 here, not 0. Most of rustc works in bytes. Be warned
+// that LLVM *does* distinguish between e.g. a 1-bit value and an 8-bit value
+// at the codegen level! In general you should prefer `llbitsize_of_real`
+// below.
+pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> llsize {
+    unsafe {
+        let nbits = llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref());
+        if nbits & 7 != 0 {
+            // Not an even number of bytes, spills into "next" byte.
+            1 + (nbits >> 3)
+        } else {
+            nbits >> 3
+        }
+    }
+}
+
+/// Returns the "real" size of the type in bits.
+pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits {
+    unsafe {
+        llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref())
+    }
+}
+
+/// Returns the size of the type as an LLVM constant integer value.
+pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
+    // Once upon a time, this called LLVMSizeOf, which does a
+    // getelementptr(1) on a null pointer and casts to an int, in
+    // order to obtain the type size as a value without requiring the
+    // target data layout.  But we have the target data layout, so
+    // there's no need for that contrivance.  The instruction
+    // selection DAG generator would flatten that GEP(1) node into a
+    // constant of the type's alloc size, so let's save it some work.
+    return C_uint(cx, llsize_of_alloc(cx, ty));
+}
+
+// Returns the preferred alignment of the given type for the current target.
+// The preferred alignment may be larger than the alignment used when
+// packing the type into structs. This will be used for things like
+// allocations inside a stack frame, which LLVM has a free hand in.
+pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign {
+    unsafe {
+        return llvm::LLVMPreferredAlignmentOfType(cx.td(), ty.to_ref());
+    }
+}
+
+// Returns the minimum alignment of a type required by the platform.
+// This is the alignment that will be used for struct fields, arrays,
+// and similar ABI-mandated things.
+pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign {
+    unsafe {
+        return llvm::LLVMABIAlignmentOfType(cx.td(), ty.to_ref());
+    }
+}
+
+pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: usize) -> u64 {
+    unsafe {
+        return llvm::LLVMOffsetOfElement(cx.td(),
+                                         struct_ty.to_ref(),
+                                         element as u32);
+    }
+}
diff --git a/src/librustc_trans/macros.rs b/src/librustc_trans/macros.rs
new file mode 100644 (file)
index 0000000..77efcc6
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! unpack_datum {
+    ($bcx: ident, $inp: expr) => (
+        {
+            let db = $inp;
+            $bcx = db.bcx;
+            db.datum
+        }
+    )
+}
+
+macro_rules! unpack_result {
+    ($bcx: ident, $inp: expr) => (
+        {
+            let db = $inp;
+            $bcx = db.bcx;
+            db.val
+        }
+    )
+}
diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs
new file mode 100644 (file)
index 0000000..478094c
--- /dev/null
@@ -0,0 +1,334 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::rc::Rc;
+
+use arena::TypedArena;
+use back::symbol_names;
+use llvm::{ValueRef, get_params};
+use rustc::hir::def_id::DefId;
+use rustc::infer;
+use rustc::ty::subst::{FnSpace, Subst, Substs};
+use rustc::ty::subst;
+use rustc::traits::{self, ProjectionMode};
+use abi::FnType;
+use base::*;
+use build::*;
+use callee::{Callee, Virtual, ArgVals, trans_fn_pointer_shim};
+use closure;
+use common::*;
+use consts;
+use debuginfo::DebugLoc;
+use declare;
+use expr;
+use glue;
+use machine;
+use type_::Type;
+use type_of::*;
+use value::Value;
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
+
+use syntax::ast::Name;
+use syntax::codemap::DUMMY_SP;
+
+// drop_glue pointer, size, align.
+const VTABLE_OFFSET: usize = 3;
+
+/// Extracts a method from a trait object's vtable, at the specified index.
+pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                      llvtable: ValueRef,
+                                      vtable_index: usize)
+                                      -> ValueRef {
+    // Load the data pointer from the object.
+    debug!("get_virtual_method(vtable_index={}, llvtable={:?})",
+           vtable_index, Value(llvtable));
+
+    Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET]))
+}
+
+/// Generate a shim function that allows an object type like `SomeTrait` to
+/// implement the type `SomeTrait`. Imagine a trait definition:
+///
+///    trait SomeTrait { fn get(&self) -> i32; ... }
+///
+/// And a generic bit of code:
+///
+///    fn foo<T:SomeTrait>(t: &T) {
+///        let x = SomeTrait::get;
+///        x(t)
+///    }
+///
+/// What is the value of `x` when `foo` is invoked with `T=SomeTrait`?
+/// The answer is that it is a shim function generated by this routine:
+///
+///    fn shim(t: &SomeTrait) -> i32 {
+///        // ... call t.get() virtually ...
+///    }
+///
+/// In fact, all virtual calls can be thought of as normal trait calls
+/// that go through this shim function.
+pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
+                                   method_ty: Ty<'tcx>,
+                                   vtable_index: usize)
+                                   -> ValueRef {
+    let _icx = push_ctxt("trans_object_shim");
+    let tcx = ccx.tcx();
+
+    debug!("trans_object_shim(vtable_index={}, method_ty={:?})",
+           vtable_index,
+           method_ty);
+
+    let sig = tcx.erase_late_bound_regions(&method_ty.fn_sig());
+    let sig = infer::normalize_associated_type(tcx, &sig);
+    let fn_ty = FnType::new(ccx, method_ty.fn_abi(), &sig, &[]);
+
+    let function_name =
+        symbol_names::internal_name_from_type_and_suffix(ccx, method_ty, "object_shim");
+    let llfn = declare::define_internal_fn(ccx, &function_name, method_ty);
+
+    let empty_substs = tcx.mk_substs(Substs::empty());
+    let (block_arena, fcx): (TypedArena<_>, FunctionContext);
+    block_arena = TypedArena::new();
+    fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &block_arena);
+    let mut bcx = fcx.init(false, None);
+    assert!(!fcx.needs_ret_allocas);
+
+
+    let dest =
+        fcx.llretslotptr.get().map(
+            |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
+
+    debug!("trans_object_shim: method_offset_in_vtable={}",
+           vtable_index);
+
+    let llargs = get_params(fcx.llfn);
+    let args = ArgVals(&llargs[fcx.fn_ty.ret.is_indirect() as usize..]);
+
+    let callee = Callee {
+        data: Virtual(vtable_index),
+        ty: method_ty
+    };
+    bcx = callee.call(bcx, DebugLoc::None, args, dest).bcx;
+
+    fcx.finish(bcx, DebugLoc::None);
+
+    llfn
+}
+
+/// Creates a returns a dynamic vtable for the given type and vtable origin.
+/// This is used only for objects.
+///
+/// The `trait_ref` encodes the erased self type. Hence if we are
+/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
+/// `trait_ref` would map `T:Trait`.
+pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                            trait_ref: ty::PolyTraitRef<'tcx>)
+                            -> ValueRef
+{
+    let tcx = ccx.tcx();
+    let _icx = push_ctxt("meth::get_vtable");
+
+    debug!("get_vtable(trait_ref={:?})", trait_ref);
+
+    // Check the cache.
+    match ccx.vtables().borrow().get(&trait_ref) {
+        Some(&val) => { return val }
+        None => { }
+    }
+
+    // Not in the cache. Build it.
+    let methods = traits::supertraits(tcx, trait_ref.clone()).flat_map(|trait_ref| {
+        let vtable = fulfill_obligation(ccx, DUMMY_SP, trait_ref.clone());
+        match vtable {
+            // Should default trait error here?
+            traits::VtableDefaultImpl(_) |
+            traits::VtableBuiltin(_) => {
+                Vec::new().into_iter()
+            }
+            traits::VtableImpl(
+                traits::VtableImplData {
+                    impl_def_id: id,
+                    substs,
+                    nested: _ }) => {
+                let nullptr = C_null(Type::nil(ccx).ptr_to());
+                get_vtable_methods(ccx, id, substs)
+                    .into_iter()
+                    .map(|opt_mth| opt_mth.map_or(nullptr, |mth| {
+                        Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx).val
+                    }))
+                    .collect::<Vec<_>>()
+                    .into_iter()
+            }
+            traits::VtableClosure(
+                traits::VtableClosureData {
+                    closure_def_id,
+                    substs,
+                    nested: _ }) => {
+                let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap();
+                let llfn = closure::trans_closure_method(ccx,
+                                                         closure_def_id,
+                                                         substs,
+                                                         trait_closure_kind);
+                vec![llfn].into_iter()
+            }
+            traits::VtableFnPointer(bare_fn_ty) => {
+                let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap();
+                vec![trans_fn_pointer_shim(ccx, trait_closure_kind, bare_fn_ty)].into_iter()
+            }
+            traits::VtableObject(ref data) => {
+                // this would imply that the Self type being erased is
+                // an object type; this cannot happen because we
+                // cannot cast an unsized type into a trait object
+                bug!("cannot get vtable for an object type: {:?}",
+                     data);
+            }
+            traits::VtableParam(..) => {
+                bug!("resolved vtable for {:?} to bad vtable {:?} in trans",
+                     trait_ref,
+                     vtable);
+            }
+        }
+    });
+
+    let size_ty = sizing_type_of(ccx, trait_ref.self_ty());
+    let size = machine::llsize_of_alloc(ccx, size_ty);
+    let align = align_of(ccx, trait_ref.self_ty());
+
+    let components: Vec<_> = vec![
+        // Generate a destructor for the vtable.
+        glue::get_drop_glue(ccx, trait_ref.self_ty()),
+        C_uint(ccx, size),
+        C_uint(ccx, align)
+    ].into_iter().chain(methods).collect();
+
+    let vtable_const = C_struct(ccx, &components, false);
+    let align = machine::llalign_of_pref(ccx, val_ty(vtable_const));
+    let vtable = consts::addr_of(ccx, vtable_const, align, "vtable");
+
+    ccx.vtables().borrow_mut().insert(trait_ref, vtable);
+    vtable
+}
+
+pub fn get_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                    impl_id: DefId,
+                                    substs: &'tcx subst::Substs<'tcx>)
+                                    -> Vec<Option<ImplMethod<'tcx>>>
+{
+    let tcx = ccx.tcx();
+
+    debug!("get_vtable_methods(impl_id={:?}, substs={:?}", impl_id, substs);
+
+    let trt_id = match tcx.impl_trait_ref(impl_id) {
+        Some(t_id) => t_id.def_id,
+        None       => bug!("make_impl_vtable: don't know how to \
+                            make a vtable for a type impl!")
+    };
+
+    tcx.populate_implementations_for_trait_if_necessary(trt_id);
+
+    let trait_item_def_ids = tcx.trait_item_def_ids(trt_id);
+    trait_item_def_ids
+        .iter()
+
+        // Filter out non-method items.
+        .filter_map(|item_def_id| {
+            match *item_def_id {
+                ty::MethodTraitItemId(def_id) => Some(def_id),
+                _ => None,
+            }
+        })
+
+        // Now produce pointers for each remaining method. If the
+        // method could never be called from this object, just supply
+        // null.
+        .map(|trait_method_def_id| {
+            debug!("get_vtable_methods: trait_method_def_id={:?}",
+                   trait_method_def_id);
+
+            let trait_method_type = match tcx.impl_or_trait_item(trait_method_def_id) {
+                ty::MethodTraitItem(m) => m,
+                _ => bug!("should be a method, not other assoc item"),
+            };
+            let name = trait_method_type.name;
+
+            // Some methods cannot be called on an object; skip those.
+            if !traits::is_vtable_safe_method(tcx, trt_id, &trait_method_type) {
+                debug!("get_vtable_methods: not vtable safe");
+                return None;
+            }
+
+            debug!("get_vtable_methods: trait_method_type={:?}",
+                   trait_method_type);
+
+            // the method may have some early-bound lifetimes, add
+            // regions for those
+            let num_dummy_regions = trait_method_type.generics.regions.len(FnSpace);
+            let dummy_regions = vec![ty::ReStatic; num_dummy_regions];
+            let method_substs = substs.clone()
+                                      .with_method(vec![], dummy_regions);
+            let method_substs = tcx.mk_substs(method_substs);
+
+            // The substitutions we have are on the impl, so we grab
+            // the method type from the impl to substitute into.
+            let mth = get_impl_method(tcx, impl_id, method_substs, name);
+
+            debug!("get_vtable_methods: mth={:?}", mth);
+
+            // If this is a default method, it's possible that it
+            // relies on where clauses that do not hold for this
+            // particular set of type parameters. Note that this
+            // method could then never be called, so we do not want to
+            // try and trans it, in that case. Issue #23435.
+            if mth.is_provided {
+                let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs);
+                if !normalize_and_test_predicates(ccx, predicates.into_vec()) {
+                    debug!("get_vtable_methods: predicates do not hold");
+                    return None;
+                }
+            }
+
+            Some(mth)
+        })
+        .collect()
+}
+
+#[derive(Debug)]
+pub struct ImplMethod<'tcx> {
+    pub method: Rc<ty::Method<'tcx>>,
+    pub substs: &'tcx Substs<'tcx>,
+    pub is_provided: bool
+}
+
+/// Locates the applicable definition of a method, given its name.
+pub fn get_impl_method<'tcx>(tcx: &TyCtxt<'tcx>,
+                             impl_def_id: DefId,
+                             substs: &'tcx Substs<'tcx>,
+                             name: Name)
+                             -> ImplMethod<'tcx>
+{
+    assert!(!substs.types.needs_infer());
+
+    let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
+    let trait_def = tcx.lookup_trait_def(trait_def_id);
+    let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any);
+
+    match trait_def.ancestors(impl_def_id).fn_defs(tcx, name).next() {
+        Some(node_item) => {
+            ImplMethod {
+                method: node_item.item,
+                substs: traits::translate_substs(&infcx, impl_def_id, substs, node_item.node),
+                is_provided: node_item.node.is_from_trait(),
+            }
+        }
+        None => {
+            bug!("method {:?} not found in {:?}", name, impl_def_id)
+        }
+    }
+}
diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs
new file mode 100644 (file)
index 0000000..f721e88
--- /dev/null
@@ -0,0 +1,129 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! An analysis to determine which temporaries require allocas and
+//! which do not.
+
+use rustc_data_structures::bitvec::BitVector;
+use rustc::mir::repr as mir;
+use rustc::mir::visit::{Visitor, LvalueContext};
+use common::{self, Block};
+use super::rvalue;
+
+pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
+                               mir: &mir::Mir<'tcx>)
+                               -> BitVector {
+    let mut analyzer = TempAnalyzer::new(mir.temp_decls.len());
+
+    analyzer.visit_mir(mir);
+
+    for (index, temp_decl) in mir.temp_decls.iter().enumerate() {
+        let ty = bcx.monomorphize(&temp_decl.ty);
+        debug!("temp {:?} has type {:?}", index, ty);
+        if ty.is_scalar() ||
+            ty.is_unique() ||
+            ty.is_region_ptr() ||
+            ty.is_simd()
+        {
+            // These sorts of types are immediates that we can store
+            // in an ValueRef without an alloca.
+            assert!(common::type_is_immediate(bcx.ccx(), ty) ||
+                    common::type_is_fat_ptr(bcx.tcx(), ty));
+        } else {
+            // These sorts of types require an alloca. Note that
+            // type_is_immediate() may *still* be true, particularly
+            // for newtypes, but we currently force some types
+            // (e.g. structs) into an alloca unconditionally, just so
+            // that we don't have to deal with having two pathways
+            // (gep vs extractvalue etc).
+            analyzer.mark_as_lvalue(index);
+        }
+    }
+
+    analyzer.lvalue_temps
+}
+
+struct TempAnalyzer {
+    lvalue_temps: BitVector,
+    seen_assigned: BitVector
+}
+
+impl TempAnalyzer {
+    fn new(temp_count: usize) -> TempAnalyzer {
+        TempAnalyzer {
+            lvalue_temps: BitVector::new(temp_count),
+            seen_assigned: BitVector::new(temp_count)
+        }
+    }
+
+    fn mark_as_lvalue(&mut self, temp: usize) {
+        debug!("marking temp {} as lvalue", temp);
+        self.lvalue_temps.insert(temp);
+    }
+
+    fn mark_assigned(&mut self, temp: usize) {
+        if !self.seen_assigned.insert(temp) {
+            self.mark_as_lvalue(temp);
+        }
+    }
+}
+
+impl<'tcx> Visitor<'tcx> for TempAnalyzer {
+    fn visit_assign(&mut self,
+                    block: mir::BasicBlock,
+                    lvalue: &mir::Lvalue<'tcx>,
+                    rvalue: &mir::Rvalue<'tcx>) {
+        debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue);
+
+        match *lvalue {
+            mir::Lvalue::Temp(index) => {
+                self.mark_assigned(index as usize);
+                if !rvalue::rvalue_creates_operand(rvalue) {
+                    self.mark_as_lvalue(index as usize);
+                }
+            }
+            _ => {
+                self.visit_lvalue(lvalue, LvalueContext::Store);
+            }
+        }
+
+        self.visit_rvalue(rvalue);
+    }
+
+    fn visit_lvalue(&mut self,
+                    lvalue: &mir::Lvalue<'tcx>,
+                    context: LvalueContext) {
+        debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context);
+
+        match *lvalue {
+            mir::Lvalue::Temp(index) => {
+                match context {
+                    LvalueContext::Call => {
+                        self.mark_assigned(index as usize);
+                    }
+                    LvalueContext::Consume => {
+                    }
+                    LvalueContext::Store |
+                    LvalueContext::Drop |
+                    LvalueContext::Inspect |
+                    LvalueContext::Borrow { .. } |
+                    LvalueContext::Slice { .. } |
+                    LvalueContext::Projection => {
+                        self.mark_as_lvalue(index as usize);
+                    }
+                }
+            }
+            _ => {
+            }
+        }
+
+        self.super_lvalue(lvalue, context);
+    }
+}
diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs
new file mode 100644 (file)
index 0000000..303cf61
--- /dev/null
@@ -0,0 +1,666 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::{self, BasicBlockRef, ValueRef, OperandBundleDef};
+use rustc::ty;
+use rustc::mir::repr as mir;
+use abi::{Abi, FnType, ArgType};
+use adt;
+use base;
+use build;
+use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
+use common::{self, type_is_fat_ptr, Block, BlockAndBuilder, C_undef};
+use debuginfo::DebugLoc;
+use Disr;
+use machine::{llalign_of_min, llbitsize_of_real};
+use meth;
+use type_of;
+use glue;
+use type_::Type;
+
+use super::{MirContext, TempRef, drop};
+use super::lvalue::{LvalueRef, load_fat_ptr};
+use super::operand::OperandRef;
+use super::operand::OperandValue::{self, FatPtr, Immediate, Ref};
+
+impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
+    pub fn trans_block(&mut self, bb: mir::BasicBlock) {
+        debug!("trans_block({:?})", bb);
+
+        let mut bcx = self.bcx(bb);
+        let mir = self.mir.clone();
+        let data = mir.basic_block_data(bb);
+
+        // MSVC SEH bits
+        let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) {
+            (Some(cp), Some(cb))
+        } else {
+            (None, None)
+        };
+        let funclet_br = |bcx: BlockAndBuilder, llbb: BasicBlockRef| if let Some(cp) = cleanup_pad {
+            bcx.cleanup_ret(cp, Some(llbb));
+        } else {
+            bcx.br(llbb);
+        };
+
+        for statement in &data.statements {
+            bcx = self.trans_statement(bcx, statement);
+        }
+
+        debug!("trans_block: terminator: {:?}", data.terminator());
+
+        match data.terminator().kind {
+            mir::TerminatorKind::Resume => {
+                if let Some(cleanup_pad) = cleanup_pad {
+                    bcx.cleanup_ret(cleanup_pad, None);
+                } else {
+                    let ps = self.get_personality_slot(&bcx);
+                    let lp = bcx.load(ps);
+                    bcx.with_block(|bcx| {
+                        base::call_lifetime_end(bcx, ps);
+                        base::trans_unwind_resume(bcx, lp);
+                    });
+                }
+            }
+
+            mir::TerminatorKind::Goto { target } => {
+                funclet_br(bcx, self.llblock(target));
+            }
+
+            mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => {
+                let cond = self.trans_operand(&bcx, cond);
+                let lltrue = self.llblock(true_bb);
+                let llfalse = self.llblock(false_bb);
+                bcx.cond_br(cond.immediate(), lltrue, llfalse);
+            }
+
+            mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
+                let discr_lvalue = self.trans_lvalue(&bcx, discr);
+                let ty = discr_lvalue.ty.to_ty(bcx.tcx());
+                let repr = adt::represent_type(bcx.ccx(), ty);
+                let discr = bcx.with_block(|bcx|
+                    adt::trans_get_discr(bcx, &repr, discr_lvalue.llval, None, true)
+                );
+
+                // The else branch of the Switch can't be hit, so branch to an unreachable
+                // instruction so LLVM knows that
+                let unreachable_blk = self.unreachable_block();
+                let switch = bcx.switch(discr, unreachable_blk.llbb, targets.len());
+                assert_eq!(adt_def.variants.len(), targets.len());
+                for (adt_variant, target) in adt_def.variants.iter().zip(targets) {
+                    let llval = bcx.with_block(|bcx|
+                        adt::trans_case(bcx, &repr, Disr::from(adt_variant.disr_val))
+                    );
+                    let llbb = self.llblock(*target);
+                    build::AddCase(switch, llval, llbb)
+                }
+            }
+
+            mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
+                let (otherwise, targets) = targets.split_last().unwrap();
+                let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
+                let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty));
+                let switch = bcx.switch(discr, self.llblock(*otherwise), values.len());
+                for (value, target) in values.iter().zip(targets) {
+                    let llval = self.trans_constval(&bcx, value, switch_ty).immediate();
+                    let llbb = self.llblock(*target);
+                    build::AddCase(switch, llval, llbb)
+                }
+            }
+
+            mir::TerminatorKind::Return => {
+                bcx.with_block(|bcx| {
+                    self.fcx.build_return_block(bcx, DebugLoc::None);
+                })
+            }
+
+            mir::TerminatorKind::Drop { ref value, target, unwind } => {
+                let lvalue = self.trans_lvalue(&bcx, value);
+                let ty = lvalue.ty.to_ty(bcx.tcx());
+                // Double check for necessity to drop
+                if !glue::type_needs_drop(bcx.tcx(), ty) {
+                    funclet_br(bcx, self.llblock(target));
+                    return;
+                }
+                let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
+                let drop_ty = glue::get_drop_glue_type(bcx.ccx(), ty);
+                let llvalue = if drop_ty != ty {
+                    bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
+                } else {
+                    lvalue.llval
+                };
+                if let Some(unwind) = unwind {
+                    let uwbcx = self.bcx(unwind);
+                    let unwind = self.make_landing_pad(uwbcx);
+                    bcx.invoke(drop_fn,
+                               &[llvalue],
+                               self.llblock(target),
+                               unwind.llbb(),
+                               cleanup_bundle.as_ref());
+                    self.bcx(target).at_start(|bcx| drop::drop_fill(bcx, lvalue.llval, ty));
+                } else {
+                    bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref());
+                    drop::drop_fill(&bcx, lvalue.llval, ty);
+                    funclet_br(bcx, self.llblock(target));
+                }
+            }
+
+            mir::TerminatorKind::Call { ref func, ref args, ref destination, ref cleanup } => {
+                // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
+                let callee = self.trans_operand(&bcx, func);
+
+                let (mut callee, abi, sig) = match callee.ty.sty {
+                    ty::TyFnDef(def_id, substs, f) => {
+                        (Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig)
+                    }
+                    ty::TyFnPtr(f) => {
+                        (Callee {
+                            data: Fn(callee.immediate()),
+                            ty: callee.ty
+                        }, f.abi, &f.sig)
+                    }
+                    _ => bug!("{} is not callable", callee.ty)
+                };
+
+                let sig = bcx.tcx().erase_late_bound_regions(sig);
+
+                // Handle intrinsics old trans wants Expr's for, ourselves.
+                let intrinsic = match (&callee.ty.sty, &callee.data) {
+                    (&ty::TyFnDef(def_id, _, _), &Intrinsic) => {
+                        Some(bcx.tcx().item_name(def_id).as_str())
+                    }
+                    _ => None
+                };
+                let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
+
+                if intrinsic == Some("move_val_init") {
+                    let &(_, target) = destination.as_ref().unwrap();
+                    // The first argument is a thin destination pointer.
+                    let llptr = self.trans_operand(&bcx, &args[0]).immediate();
+                    let val = self.trans_operand(&bcx, &args[1]);
+                    self.store_operand(&bcx, llptr, val);
+                    self.set_operand_dropped(&bcx, &args[1]);
+                    funclet_br(bcx, self.llblock(target));
+                    return;
+                }
+
+                if intrinsic == Some("transmute") {
+                    let &(ref dest, target) = destination.as_ref().unwrap();
+                    self.with_lvalue_ref(&bcx, dest, |this, dest| {
+                        this.trans_transmute(&bcx, &args[0], dest);
+                    });
+
+                    self.set_operand_dropped(&bcx, &args[0]);
+                    funclet_br(bcx, self.llblock(target));
+                    return;
+                }
+
+                let extra_args = &args[sig.inputs.len()..];
+                let extra_args = extra_args.iter().map(|op_arg| {
+                    self.mir.operand_ty(bcx.tcx(), op_arg)
+                }).collect::<Vec<_>>();
+                let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args);
+
+                // The arguments we'll be passing. Plus one to account for outptr, if used.
+                let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
+                let mut llargs = Vec::with_capacity(arg_count);
+
+                // Prepare the return value destination
+                let ret_dest = if let Some((ref dest, _)) = *destination {
+                    let is_intrinsic = if let Intrinsic = callee.data {
+                        true
+                    } else {
+                        false
+                    };
+                    self.make_return_dest(&bcx, dest, &fn_ty.ret, &mut llargs, is_intrinsic)
+                } else {
+                    ReturnDest::Nothing
+                };
+
+                // Split the rust-call tupled arguments off.
+                let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
+                    let (tup, args) = args.split_last().unwrap();
+                    (args, Some(tup))
+                } else {
+                    (&args[..], None)
+                };
+
+                let mut idx = 0;
+                for arg in first_args {
+                    let val = self.trans_operand(&bcx, arg).val;
+                    self.trans_argument(&bcx, val, &mut llargs, &fn_ty,
+                                        &mut idx, &mut callee.data);
+                }
+                if let Some(tup) = untuple {
+                    self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty,
+                                                  &mut idx, &mut callee.data)
+                }
+
+                let fn_ptr = match callee.data {
+                    NamedTupleConstructor(_) => {
+                        // FIXME translate this like mir::Rvalue::Aggregate.
+                        callee.reify(bcx.ccx()).val
+                    }
+                    Intrinsic => {
+                        use callee::ArgVals;
+                        use expr::{Ignore, SaveIn};
+                        use intrinsic::trans_intrinsic_call;
+
+                        let (dest, llargs) = match ret_dest {
+                            _ if fn_ty.ret.is_indirect() => {
+                                (SaveIn(llargs[0]), &llargs[1..])
+                            }
+                            ReturnDest::Nothing => (Ignore, &llargs[..]),
+                            ReturnDest::IndirectOperand(dst, _) |
+                            ReturnDest::Store(dst) => (SaveIn(dst), &llargs[..]),
+                            ReturnDest::DirectOperand(_) =>
+                                bug!("Cannot use direct operand with an intrinsic call")
+                        };
+
+                        bcx.with_block(|bcx| {
+                            trans_intrinsic_call(bcx, callee.ty, &fn_ty,
+                                                           ArgVals(llargs), dest,
+                                                           DebugLoc::None);
+                        });
+
+                        if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
+                            // Make a fake operand for store_return
+                            let op = OperandRef {
+                                val: OperandValue::Ref(dst),
+                                ty: sig.output.unwrap()
+                            };
+                            self.store_return(&bcx, ret_dest, fn_ty.ret, op);
+                        }
+
+                        if let Some((_, target)) = *destination {
+                            for op in args {
+                                self.set_operand_dropped(&bcx, op);
+                            }
+                            funclet_br(bcx, self.llblock(target));
+                        } else {
+                            // trans_intrinsic_call already used Unreachable.
+                            // bcx.unreachable();
+                        }
+
+                        return;
+                    }
+                    Fn(f) => f,
+                    Virtual(_) => bug!("Virtual fn ptr not extracted")
+                };
+
+                // Many different ways to call a function handled here
+                if let Some(cleanup) = cleanup.map(|bb| self.bcx(bb)) {
+                    let ret_bcx = if let Some((_, target)) = *destination {
+                        self.blocks[target.index()]
+                    } else {
+                        self.unreachable_block()
+                    };
+                    let landingpad = self.make_landing_pad(cleanup);
+
+                    let invokeret = bcx.invoke(fn_ptr,
+                                               &llargs,
+                                               ret_bcx.llbb,
+                                               landingpad.llbb(),
+                                               cleanup_bundle.as_ref());
+                    fn_ty.apply_attrs_callsite(invokeret);
+
+                    landingpad.at_start(|bcx| for op in args {
+                        self.set_operand_dropped(bcx, op);
+                    });
+
+                    if destination.is_some() {
+                        let ret_bcx = ret_bcx.build();
+                        ret_bcx.at_start(|ret_bcx| {
+                            let op = OperandRef {
+                                val: OperandValue::Immediate(invokeret),
+                                ty: sig.output.unwrap()
+                            };
+                            self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op);
+                            for op in args {
+                                self.set_operand_dropped(&ret_bcx, op);
+                            }
+                        });
+                    }
+                } else {
+                    let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref());
+                    fn_ty.apply_attrs_callsite(llret);
+                    if let Some((_, target)) = *destination {
+                        let op = OperandRef {
+                            val: OperandValue::Immediate(llret),
+                            ty: sig.output.unwrap()
+                        };
+                        self.store_return(&bcx, ret_dest, fn_ty.ret, op);
+                        for op in args {
+                            self.set_operand_dropped(&bcx, op);
+                        }
+                        funclet_br(bcx, self.llblock(target));
+                    } else {
+                        // no need to drop args, because the call never returns
+                        bcx.unreachable();
+                    }
+                }
+            }
+        }
+    }
+
+    fn trans_argument(&mut self,
+                      bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                      val: OperandValue,
+                      llargs: &mut Vec<ValueRef>,
+                      fn_ty: &FnType,
+                      next_idx: &mut usize,
+                      callee: &mut CalleeData) {
+        // Treat the values in a fat pointer separately.
+        if let FatPtr(ptr, meta) = val {
+            if *next_idx == 0 {
+                if let Virtual(idx) = *callee {
+                    let llfn = bcx.with_block(|bcx| {
+                        meth::get_virtual_method(bcx, meta, idx)
+                    });
+                    let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
+                    *callee = Fn(bcx.pointercast(llfn, llty));
+                }
+            }
+            self.trans_argument(bcx, Immediate(ptr), llargs, fn_ty, next_idx, callee);
+            self.trans_argument(bcx, Immediate(meta), llargs, fn_ty, next_idx, callee);
+            return;
+        }
+
+        let arg = &fn_ty.args[*next_idx];
+        *next_idx += 1;
+
+        // Fill padding with undef value, where applicable.
+        if let Some(ty) = arg.pad {
+            llargs.push(C_undef(ty));
+        }
+
+        if arg.is_ignore() {
+            return;
+        }
+
+        // Force by-ref if we have to load through a cast pointer.
+        let (mut llval, by_ref) = match val {
+            Immediate(llval) if arg.is_indirect() || arg.cast.is_some() => {
+                let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg");
+                bcx.store(llval, llscratch);
+                (llscratch, true)
+            }
+            Immediate(llval) => (llval, false),
+            Ref(llval) => (llval, true),
+            FatPtr(_, _) => bug!("fat pointers handled above")
+        };
+
+        if by_ref && !arg.is_indirect() {
+            // Have to load the argument, maybe while casting it.
+            if arg.original_ty == Type::i1(bcx.ccx()) {
+                // We store bools as i8 so we need to truncate to i1.
+                llval = bcx.load_range_assert(llval, 0, 2, llvm::False);
+                llval = bcx.trunc(llval, arg.original_ty);
+            } else if let Some(ty) = arg.cast {
+                llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()));
+                let llalign = llalign_of_min(bcx.ccx(), arg.ty);
+                unsafe {
+                    llvm::LLVMSetAlignment(llval, llalign);
+                }
+            } else {
+                llval = bcx.load(llval);
+            }
+        }
+
+        llargs.push(llval);
+    }
+
+    fn trans_arguments_untupled(&mut self,
+                                bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                                operand: &mir::Operand<'tcx>,
+                                llargs: &mut Vec<ValueRef>,
+                                fn_ty: &FnType,
+                                next_idx: &mut usize,
+                                callee: &mut CalleeData) {
+        // FIXME: consider having some optimization to avoid tupling/untupling
+        // (and storing/loading in the case of immediates)
+
+        // avoid trans_operand for pointless copying
+        let lv = match *operand {
+            mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue),
+            mir::Operand::Constant(ref constant) => {
+                // FIXME: consider being less pessimized
+                if constant.ty.is_nil() {
+                    return;
+                }
+
+                let ty = bcx.monomorphize(&constant.ty);
+                let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca");
+                let constant = self.trans_constant(bcx, constant);
+                self.store_operand(bcx, lv.llval, constant);
+                lv
+           }
+        };
+
+        let lv_ty = lv.ty.to_ty(bcx.tcx());
+        let result_types = match lv_ty.sty {
+            ty::TyTuple(ref tys) => tys,
+            _ => span_bug!(
+                self.mir.span,
+                "bad final argument to \"rust-call\" fn {:?}", lv_ty)
+        };
+
+        let base_repr = adt::represent_type(bcx.ccx(), lv_ty);
+        let base = adt::MaybeSizedValue::sized(lv.llval);
+        for (n, &ty) in result_types.iter().enumerate() {
+            let ptr = adt::trans_field_ptr_builder(bcx, &base_repr, base, Disr(0), n);
+            let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
+                let (lldata, llextra) = load_fat_ptr(bcx, ptr);
+                FatPtr(lldata, llextra)
+            } else {
+                // Don't bother loading the value, trans_argument will.
+                Ref(ptr)
+            };
+            self.trans_argument(bcx, val, llargs, fn_ty, next_idx, callee);
+        }
+    }
+
+    fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef {
+        let ccx = bcx.ccx();
+        if let Some(slot) = self.llpersonalityslot {
+            slot
+        } else {
+            let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
+            bcx.with_block(|bcx| {
+                let slot = base::alloca(bcx, llretty, "personalityslot");
+                self.llpersonalityslot = Some(slot);
+                base::call_lifetime_start(bcx, slot);
+                slot
+            })
+        }
+    }
+
+    /// Create a landingpad wrapper around the given Block.
+    ///
+    /// No-op in MSVC SEH scheme.
+    fn make_landing_pad(&mut self,
+                        cleanup: BlockAndBuilder<'bcx, 'tcx>)
+                        -> BlockAndBuilder<'bcx, 'tcx>
+    {
+        if base::wants_msvc_seh(cleanup.sess()) {
+            return cleanup;
+        }
+        let bcx = self.fcx.new_block("cleanup", None).build();
+        let ccx = bcx.ccx();
+        let llpersonality = self.fcx.eh_personality();
+        let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
+        let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn);
+        bcx.set_cleanup(llretval);
+        let slot = self.get_personality_slot(&bcx);
+        bcx.store(llretval, slot);
+        bcx.br(cleanup.llbb());
+        bcx
+    }
+
+    /// Create prologue cleanuppad instruction under MSVC SEH handling scheme.
+    ///
+    /// Also handles setting some state for the original trans and creating an operand bundle for
+    /// function calls.
+    fn make_cleanup_pad(&mut self, bb: mir::BasicBlock) -> Option<(ValueRef, OperandBundleDef)> {
+        let bcx = self.bcx(bb);
+        let data = self.mir.basic_block_data(bb);
+        let use_funclets = base::wants_msvc_seh(bcx.sess()) && data.is_cleanup;
+        let cleanup_pad = if use_funclets {
+            bcx.set_personality_fn(self.fcx.eh_personality());
+            bcx.at_start(|bcx| Some(bcx.cleanup_pad(None, &[])))
+        } else {
+            None
+        };
+        // Set the landingpad global-state for old translator, so it knows about the SEH used.
+        bcx.set_lpad(if let Some(cleanup_pad) = cleanup_pad {
+            Some(common::LandingPad::msvc(cleanup_pad))
+        } else if data.is_cleanup {
+            Some(common::LandingPad::gnu())
+        } else {
+            None
+        });
+        cleanup_pad.map(|f| (f, OperandBundleDef::new("funclet", &[f])))
+    }
+
+    fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> {
+        self.unreachable_block.unwrap_or_else(|| {
+            let bl = self.fcx.new_block("unreachable", None);
+            bl.build().unreachable();
+            self.unreachable_block = Some(bl);
+            bl
+        })
+    }
+
+    fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> {
+        self.blocks[bb.index()].build()
+    }
+
+    pub fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef {
+        self.blocks[bb.index()].llbb
+    }
+
+    fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                        dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType,
+                        llargs: &mut Vec<ValueRef>, is_intrinsic: bool) -> ReturnDest {
+        // If the return is ignored, we can just return a do-nothing ReturnDest
+        if fn_ret_ty.is_ignore() {
+            return ReturnDest::Nothing;
+        }
+        let dest = match *dest {
+            mir::Lvalue::Temp(idx) => {
+                let lvalue_ty = self.mir.lvalue_ty(bcx.tcx(), dest);
+                let lvalue_ty = bcx.monomorphize(&lvalue_ty);
+                let ret_ty = lvalue_ty.to_ty(bcx.tcx());
+                match self.temps[idx as usize] {
+                    TempRef::Lvalue(dest) => dest,
+                    TempRef::Operand(None) => {
+                        // Handle temporary lvalues, specifically Operand ones, as
+                        // they don't have allocas
+                        return if fn_ret_ty.is_indirect() {
+                            // Odd, but possible, case, we have an operand temporary,
+                            // but the calling convention has an indirect return.
+                            let tmp = bcx.with_block(|bcx| {
+                                base::alloc_ty(bcx, ret_ty, "tmp_ret")
+                            });
+                            llargs.push(tmp);
+                            ReturnDest::IndirectOperand(tmp, idx)
+                        } else if is_intrinsic {
+                            // Currently, intrinsics always need a location to store
+                            // the result. so we create a temporary alloca for the
+                            // result
+                            let tmp = bcx.with_block(|bcx| {
+                                base::alloc_ty(bcx, ret_ty, "tmp_ret")
+                            });
+                            ReturnDest::IndirectOperand(tmp, idx)
+                        } else {
+                            ReturnDest::DirectOperand(idx)
+                        };
+                    }
+                    TempRef::Operand(Some(_)) => {
+                        bug!("lvalue temp already assigned to");
+                    }
+                }
+            }
+            _ => self.trans_lvalue(bcx, dest)
+        };
+        if fn_ret_ty.is_indirect() {
+            llargs.push(dest.llval);
+            ReturnDest::Nothing
+        } else {
+            ReturnDest::Store(dest.llval)
+        }
+    }
+
+    fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                       src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) {
+        let mut val = self.trans_operand(bcx, src);
+        if let ty::TyFnDef(def_id, substs, _) = val.ty.sty {
+            let llouttype = type_of::type_of(bcx.ccx(), dst.ty.to_ty(bcx.tcx()));
+            let out_type_size = llbitsize_of_real(bcx.ccx(), llouttype);
+            if out_type_size != 0 {
+                // FIXME #19925 Remove this hack after a release cycle.
+                let f = Callee::def(bcx.ccx(), def_id, substs);
+                let datum = f.reify(bcx.ccx());
+                val = OperandRef {
+                    val: OperandValue::Immediate(datum.val),
+                    ty: datum.ty
+                };
+            }
+        }
+
+        let llty = type_of::type_of(bcx.ccx(), val.ty);
+        let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
+        self.store_operand(bcx, cast_ptr, val);
+    }
+
+    // Stores the return value of a function call into it's final location.
+    fn store_return(&mut self,
+                    bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                    dest: ReturnDest,
+                    ret_ty: ArgType,
+                    op: OperandRef<'tcx>) {
+        use self::ReturnDest::*;
+
+        match dest {
+            Nothing => (),
+            Store(dst) => ret_ty.store(bcx, op.immediate(), dst),
+            IndirectOperand(tmp, idx) => {
+                let op = self.trans_load(bcx, tmp, op.ty);
+                self.temps[idx as usize] = TempRef::Operand(Some(op));
+            }
+            DirectOperand(idx) => {
+                let op = if type_is_fat_ptr(bcx.tcx(), op.ty) {
+                    let llval = op.immediate();
+                    let ptr = bcx.extract_value(llval, 0);
+                    let meta = bcx.extract_value(llval, 1);
+
+                    OperandRef {
+                        val: OperandValue::FatPtr(ptr, meta),
+                        ty: op.ty
+                    }
+                } else {
+                    op
+                };
+                self.temps[idx as usize] = TempRef::Operand(Some(op));
+            }
+        }
+    }
+}
+
+enum ReturnDest {
+    // Do nothing, the return value is indirect or ignored
+    Nothing,
+    // Store the return value to the pointer
+    Store(ValueRef),
+    // Stores an indirect return value to an operand temporary lvalue
+    IndirectOperand(ValueRef, u32),
+    // Stores a direct return value to an operand temporary lvalue
+    DirectOperand(u32)
+}
diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs
new file mode 100644 (file)
index 0000000..cf85595
--- /dev/null
@@ -0,0 +1,143 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::ValueRef;
+use rustc::ty::{Ty, TypeFoldable};
+use rustc::middle::const_val::ConstVal;
+use rustc_const_math::ConstInt::*;
+use rustc_const_eval::lookup_const_by_id;
+use rustc::mir::repr as mir;
+use abi;
+use common::{self, BlockAndBuilder, C_bool, C_bytes, C_floating_f64, C_integral,
+                    C_str_slice, C_undef};
+use consts;
+use datum;
+use expr;
+use type_of;
+use type_::Type;
+
+use super::operand::{OperandRef, OperandValue};
+use super::MirContext;
+
+
+impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
+    pub fn trans_constval(&mut self,
+                          bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                          cv: &ConstVal,
+                          ty: Ty<'tcx>)
+                          -> OperandRef<'tcx>
+    {
+        let ccx = bcx.ccx();
+        let val = self.trans_constval_inner(bcx, cv, ty);
+        let val = if common::type_is_immediate(ccx, ty) {
+            OperandValue::Immediate(val)
+        } else if common::type_is_fat_ptr(bcx.tcx(), ty) {
+            let data = common::const_get_elt(val, &[abi::FAT_PTR_ADDR as u32]);
+            let extra = common::const_get_elt(val, &[abi::FAT_PTR_EXTRA as u32]);
+            OperandValue::FatPtr(data, extra)
+        } else {
+            OperandValue::Ref(val)
+        };
+
+        assert!(!ty.has_erasable_regions());
+
+        OperandRef {
+            ty: ty,
+            val: val
+        }
+    }
+
+    /// Translate ConstVal into a bare LLVM ValueRef.
+    fn trans_constval_inner(&mut self,
+                            bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                            cv: &ConstVal,
+                            ty: Ty<'tcx>)
+                            -> ValueRef
+    {
+        let ccx = bcx.ccx();
+        let llty = type_of::type_of(ccx, ty);
+        match *cv {
+            ConstVal::Float(v) => C_floating_f64(v, llty),
+            ConstVal::Bool(v) => C_bool(ccx, v),
+            ConstVal::Integral(I8(v)) => C_integral(Type::i8(ccx), v as u64, true),
+            ConstVal::Integral(I16(v)) => C_integral(Type::i16(ccx), v as u64, true),
+            ConstVal::Integral(I32(v)) => C_integral(Type::i32(ccx), v as u64, true),
+            ConstVal::Integral(I64(v)) => C_integral(Type::i64(ccx), v as u64, true),
+            ConstVal::Integral(Isize(v)) => {
+                let i = v.as_i64(ccx.tcx().sess.target.int_type);
+                C_integral(Type::int(ccx), i as u64, true)
+            },
+            ConstVal::Integral(U8(v)) => C_integral(Type::i8(ccx), v as u64, false),
+            ConstVal::Integral(U16(v)) => C_integral(Type::i16(ccx), v as u64, false),
+            ConstVal::Integral(U32(v)) => C_integral(Type::i32(ccx), v as u64, false),
+            ConstVal::Integral(U64(v)) => C_integral(Type::i64(ccx), v, false),
+            ConstVal::Integral(Usize(v)) => {
+                let u = v.as_u64(ccx.tcx().sess.target.uint_type);
+                C_integral(Type::int(ccx), u, false)
+            },
+            ConstVal::Integral(Infer(v)) => C_integral(llty, v as u64, false),
+            ConstVal::Integral(InferSigned(v)) => C_integral(llty, v as u64, true),
+            ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()),
+            ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"),
+            ConstVal::Struct(_) | ConstVal::Tuple(_) |
+            ConstVal::Array(..) | ConstVal::Repeat(..) |
+            ConstVal::Function(_) => {
+                bug!("MIR must not use {:?} (which refers to a local ID)", cv)
+            }
+            ConstVal::Char(c) => C_integral(Type::char(ccx), c as u64, false),
+            ConstVal::Dummy => bug!(),
+        }
+    }
+
+    pub fn trans_constant(&mut self,
+                          bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                          constant: &mir::Constant<'tcx>)
+                          -> OperandRef<'tcx>
+    {
+        let ty = bcx.monomorphize(&constant.ty);
+        match constant.literal {
+            mir::Literal::Item { def_id, substs } => {
+                // Shortcut for zero-sized types, including function item
+                // types, which would not work with lookup_const_by_id.
+                if common::type_is_zero_size(bcx.ccx(), ty) {
+                    let llty = type_of::type_of(bcx.ccx(), ty);
+                    return OperandRef {
+                        val: OperandValue::Immediate(C_undef(llty)),
+                        ty: ty
+                    };
+                }
+
+                let substs = Some(bcx.monomorphize(substs));
+                let expr = lookup_const_by_id(bcx.tcx(), def_id, substs)
+                            .expect("def was const, but lookup_const_by_id failed").0;
+                // FIXME: this is falling back to translating from HIR. This is not easy to fix,
+                // because we would have somehow adapt const_eval to work on MIR rather than HIR.
+                let d = bcx.with_block(|bcx| {
+                    expr::trans(bcx, expr)
+                });
+
+                let datum = d.datum.to_rvalue_datum(d.bcx, "").datum;
+
+                match datum.kind.mode {
+                    datum::RvalueMode::ByValue => {
+                        OperandRef {
+                            ty: datum.ty,
+                            val: OperandValue::Immediate(datum.val)
+                        }
+                    }
+                    datum::RvalueMode::ByRef => self.trans_load(bcx, datum.val, datum.ty)
+                }
+            }
+            mir::Literal::Value { ref value } => {
+                self.trans_constval(bcx, value, ty)
+            }
+        }
+    }
+}
diff --git a/src/librustc_trans/mir/drop.rs b/src/librustc_trans/mir/drop.rs
new file mode 100644 (file)
index 0000000..623cd5a
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::ValueRef;
+use rustc::ty::Ty;
+use adt;
+use base;
+use common::{self, BlockAndBuilder};
+use machine;
+use type_of;
+use type_::Type;
+
+pub fn drop_fill<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, value: ValueRef, ty: Ty<'tcx>) {
+    let llty = type_of::type_of(bcx.ccx(), ty);
+    let llptr = bcx.pointercast(value, Type::i8(bcx.ccx()).ptr_to());
+    let filling = common::C_u8(bcx.ccx(), adt::DTOR_DONE);
+    let size = machine::llsize_of(bcx.ccx(), llty);
+    let align = common::C_u32(bcx.ccx(), machine::llalign_of_min(bcx.ccx(), llty));
+    base::call_memset(&bcx, llptr, filling, size, align, false);
+}
diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs
new file mode 100644 (file)
index 0000000..695806a
--- /dev/null
@@ -0,0 +1,264 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::ValueRef;
+use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::mir::repr as mir;
+use rustc::mir::tcx::LvalueTy;
+use abi;
+use adt;
+use base;
+use builder::Builder;
+use common::{self, BlockAndBuilder, C_uint};
+use consts;
+use machine;
+use mir::drop;
+use llvm;
+use Disr;
+
+use std::ptr;
+
+use super::{MirContext, TempRef};
+
+#[derive(Copy, Clone)]
+pub struct LvalueRef<'tcx> {
+    /// Pointer to the contents of the lvalue
+    pub llval: ValueRef,
+
+    /// This lvalue's extra data if it is unsized, or null
+    pub llextra: ValueRef,
+
+    /// Monomorphized type of this lvalue, including variant information
+    pub ty: LvalueTy<'tcx>,
+}
+
+impl<'tcx> LvalueRef<'tcx> {
+    pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> {
+        LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
+    }
+
+    pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                        ty: Ty<'tcx>,
+                        name: &str)
+                        -> LvalueRef<'tcx>
+    {
+        assert!(!ty.has_erasable_regions());
+        let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name));
+        if bcx.fcx().type_needs_drop(ty) {
+            drop::drop_fill(bcx, lltemp, ty);
+        }
+        LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
+    }
+}
+
+pub fn get_meta(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
+    b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA)
+}
+
+pub fn get_dataptr(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
+    b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
+}
+
+pub fn load_fat_ptr(b: &Builder, fat_ptr: ValueRef) -> (ValueRef, ValueRef) {
+    (b.load(get_dataptr(b, fat_ptr)), b.load(get_meta(b, fat_ptr)))
+}
+
+impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
+    pub fn lvalue_len(&mut self,
+                      bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                      lvalue: LvalueRef<'tcx>)
+                      -> ValueRef {
+        match lvalue.ty.to_ty(bcx.tcx()).sty {
+            ty::TyArray(_, n) => common::C_uint(bcx.ccx(), n),
+            ty::TySlice(_) | ty::TyStr => {
+                assert!(lvalue.llextra != ptr::null_mut());
+                lvalue.llextra
+            }
+            _ => bug!("unexpected type in lvalue_len"),
+        }
+    }
+
+    pub fn trans_lvalue(&mut self,
+                        bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                        lvalue: &mir::Lvalue<'tcx>)
+                        -> LvalueRef<'tcx> {
+        debug!("trans_lvalue(lvalue={:?})", lvalue);
+
+        let fcx = bcx.fcx();
+        let ccx = bcx.ccx();
+        let tcx = bcx.tcx();
+        match *lvalue {
+            mir::Lvalue::Var(index) => self.vars[index as usize],
+            mir::Lvalue::Temp(index) => match self.temps[index as usize] {
+                TempRef::Lvalue(lvalue) =>
+                    lvalue,
+                TempRef::Operand(..) =>
+                    bug!("using operand temp {:?} as lvalue", lvalue),
+            },
+            mir::Lvalue::Arg(index) => self.args[index as usize],
+            mir::Lvalue::Static(def_id) => {
+                let const_ty = self.mir.lvalue_ty(tcx, lvalue);
+                LvalueRef::new_sized(consts::get_static(ccx, def_id).val, const_ty)
+            },
+            mir::Lvalue::ReturnPointer => {
+                let llval = if !fcx.fn_ty.ret.is_ignore() {
+                    bcx.with_block(|bcx| {
+                        fcx.get_ret_slot(bcx, "")
+                    })
+                } else {
+                    // This is a void return; that is, there’s no place to store the value and
+                    // there cannot really be one (or storing into it doesn’t make sense, anyway).
+                    // Ergo, we return an undef ValueRef, so we do not have to special-case every
+                    // place using lvalues, and could use it the same way you use a regular
+                    // ReturnPointer LValue (i.e. store into it, load from it etc).
+                    let llty = fcx.fn_ty.ret.original_ty.ptr_to();
+                    unsafe {
+                        llvm::LLVMGetUndef(llty.to_ref())
+                    }
+                };
+                let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
+                let return_ty = fn_return_ty.unwrap();
+                LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty))
+            },
+            mir::Lvalue::Projection(ref projection) => {
+                let tr_base = self.trans_lvalue(bcx, &projection.base);
+                let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
+                let projected_ty = bcx.monomorphize(&projected_ty);
+
+                let project_index = |llindex| {
+                    let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty {
+                        // Slices already point to the array element type.
+                        bcx.inbounds_gep(tr_base.llval, &[llindex])
+                    } else {
+                        let zero = common::C_uint(bcx.ccx(), 0u64);
+                        bcx.inbounds_gep(tr_base.llval, &[zero, llindex])
+                    };
+                    (element, ptr::null_mut())
+                };
+
+                let (llprojected, llextra) = match projection.elem {
+                    mir::ProjectionElem::Deref => {
+                        let base_ty = tr_base.ty.to_ty(tcx);
+                        if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
+                            (base::load_ty_builder(bcx, tr_base.llval, base_ty),
+                             ptr::null_mut())
+                        } else {
+                            load_fat_ptr(bcx, tr_base.llval)
+                        }
+                    }
+                    mir::ProjectionElem::Field(ref field, _) => {
+                        let base_ty = tr_base.ty.to_ty(tcx);
+                        let base_repr = adt::represent_type(ccx, base_ty);
+                        let discr = match tr_base.ty {
+                            LvalueTy::Ty { .. } => 0,
+                            LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v,
+                        };
+                        let discr = discr as u64;
+                        let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx));
+                        let base = if is_sized {
+                            adt::MaybeSizedValue::sized(tr_base.llval)
+                        } else {
+                            adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra)
+                        };
+                        let llprojected = adt::trans_field_ptr_builder(bcx, &base_repr, base,
+                                                                       Disr(discr), field.index());
+                        let llextra = if is_sized {
+                            ptr::null_mut()
+                        } else {
+                            tr_base.llextra
+                        };
+                        (llprojected, llextra)
+                    }
+                    mir::ProjectionElem::Index(ref index) => {
+                        let index = self.trans_operand(bcx, index);
+                        project_index(self.prepare_index(bcx, index.immediate()))
+                    }
+                    mir::ProjectionElem::ConstantIndex { offset,
+                                                         from_end: false,
+                                                         min_length: _ } => {
+                        let lloffset = C_uint(bcx.ccx(), offset);
+                        project_index(self.prepare_index(bcx, lloffset))
+                    }
+                    mir::ProjectionElem::ConstantIndex { offset,
+                                                         from_end: true,
+                                                         min_length: _ } => {
+                        let lloffset = C_uint(bcx.ccx(), offset);
+                        let lllen = self.lvalue_len(bcx, tr_base);
+                        let llindex = bcx.sub(lllen, lloffset);
+                        project_index(self.prepare_index(bcx, llindex))
+                    }
+                    mir::ProjectionElem::Downcast(..) => {
+                        (tr_base.llval, tr_base.llextra)
+                    }
+                };
+                LvalueRef {
+                    llval: llprojected,
+                    llextra: llextra,
+                    ty: projected_ty,
+                }
+            }
+        }
+    }
+
+    // Perform an action using the given Lvalue.
+    // If the Lvalue is an empty TempRef::Operand, then a temporary stack slot
+    // is created first, then used as an operand to update the Lvalue.
+    pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                                 lvalue: &mir::Lvalue<'tcx>, f: F) -> U
+    where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
+    {
+        match *lvalue {
+            mir::Lvalue::Temp(idx) => {
+                match self.temps[idx as usize] {
+                    TempRef::Lvalue(lvalue) => f(self, lvalue),
+                    TempRef::Operand(None) => {
+                        let lvalue_ty = self.mir.lvalue_ty(bcx.tcx(), lvalue);
+                        let lvalue_ty = bcx.monomorphize(&lvalue_ty);
+                        let lvalue = LvalueRef::alloca(bcx,
+                                                       lvalue_ty.to_ty(bcx.tcx()),
+                                                       "lvalue_temp");
+                        let ret = f(self, lvalue);
+                        let op = self.trans_load(bcx, lvalue.llval, lvalue_ty.to_ty(bcx.tcx()));
+                        self.temps[idx as usize] = TempRef::Operand(Some(op));
+                        ret
+                    }
+                    TempRef::Operand(Some(_)) => {
+                        bug!("Lvalue temp already set");
+                    }
+                }
+            }
+            _ => {
+                let lvalue = self.trans_lvalue(bcx, lvalue);
+                f(self, lvalue)
+            }
+        }
+    }
+
+    /// Adjust the bitwidth of an index since LLVM is less forgiving
+    /// than we are.
+    ///
+    /// nmatsakis: is this still necessary? Not sure.
+    fn prepare_index(&mut self,
+                     bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                     llindex: ValueRef)
+                     -> ValueRef
+    {
+        let ccx = bcx.ccx();
+        let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex));
+        let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type());
+        if index_size < int_size {
+            bcx.zext(llindex, ccx.int_type())
+        } else if index_size > int_size {
+            bcx.trunc(llindex, ccx.int_type())
+        } else {
+            llindex
+        }
+    }
+}
diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs
new file mode 100644 (file)
index 0000000..7e44b72
--- /dev/null
@@ -0,0 +1,275 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use libc::c_uint;
+use llvm::{self, ValueRef};
+use rustc::ty;
+use rustc::mir::repr as mir;
+use rustc::mir::tcx::LvalueTy;
+use base;
+use common::{self, Block, BlockAndBuilder, FunctionContext};
+
+use std::ops::Deref;
+use std::rc::Rc;
+
+use basic_block::BasicBlock;
+
+use rustc_data_structures::bitvec::BitVector;
+
+use self::lvalue::{LvalueRef, get_dataptr, get_meta};
+use rustc_mir::traversal;
+
+use self::operand::OperandRef;
+
+#[derive(Clone)]
+pub enum CachedMir<'mir, 'tcx: 'mir> {
+    Ref(&'mir mir::Mir<'tcx>),
+    Owned(Rc<mir::Mir<'tcx>>)
+}
+
+impl<'mir, 'tcx: 'mir> Deref for CachedMir<'mir, 'tcx> {
+    type Target = mir::Mir<'tcx>;
+    fn deref(&self) -> &mir::Mir<'tcx> {
+        match *self {
+            CachedMir::Ref(r) => r,
+            CachedMir::Owned(ref rc) => rc
+        }
+    }
+}
+
+// FIXME DebugLoc is always None right now
+
+/// Master context for translating MIR.
+pub struct MirContext<'bcx, 'tcx:'bcx> {
+    mir: CachedMir<'bcx, 'tcx>,
+
+    /// Function context
+    fcx: &'bcx common::FunctionContext<'bcx, 'tcx>,
+
+    /// When unwinding is initiated, we have to store this personality
+    /// value somewhere so that we can load it and re-use it in the
+    /// resume instruction. The personality is (afaik) some kind of
+    /// value used for C++ unwinding, which must filter by type: we
+    /// don't really care about it very much. Anyway, this value
+    /// contains an alloca into which the personality is stored and
+    /// then later loaded when generating the DIVERGE_BLOCK.
+    llpersonalityslot: Option<ValueRef>,
+
+    /// A `Block` for each MIR `BasicBlock`
+    blocks: Vec<Block<'bcx, 'tcx>>,
+
+    /// Cached unreachable block
+    unreachable_block: Option<Block<'bcx, 'tcx>>,
+
+    /// An LLVM alloca for each MIR `VarDecl`
+    vars: Vec<LvalueRef<'tcx>>,
+
+    /// The location where each MIR `TempDecl` is stored. This is
+    /// usually an `LvalueRef` representing an alloca, but not always:
+    /// sometimes we can skip the alloca and just store the value
+    /// directly using an `OperandRef`, which makes for tighter LLVM
+    /// IR. The conditions for using an `OperandRef` are as follows:
+    ///
+    /// - the type of the temporary must be judged "immediate" by `type_is_immediate`
+    /// - the operand must never be referenced indirectly
+    ///     - we should not take its address using the `&` operator
+    ///     - nor should it appear in an lvalue path like `tmp.a`
+    /// - the operand must be defined by an rvalue that can generate immediate
+    ///   values
+    ///
+    /// Avoiding allocs can also be important for certain intrinsics,
+    /// notably `expect`.
+    temps: Vec<TempRef<'tcx>>,
+
+    /// The arguments to the function; as args are lvalues, these are
+    /// always indirect, though we try to avoid creating an alloca
+    /// when we can (and just reuse the pointer the caller provided).
+    args: Vec<LvalueRef<'tcx>>,
+}
+
+enum TempRef<'tcx> {
+    Lvalue(LvalueRef<'tcx>),
+    Operand(Option<OperandRef<'tcx>>),
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
+    let bcx = fcx.init(false, None).build();
+    let mir = bcx.mir();
+
+    let mir_blocks = mir.all_basic_blocks();
+
+    // Analyze the temps to determine which must be lvalues
+    // FIXME
+    let lvalue_temps = bcx.with_block(|bcx| {
+      analyze::lvalue_temps(bcx, &mir)
+    });
+
+    // Allocate variable and temp allocas
+    let vars = mir.var_decls.iter()
+                            .map(|decl| (bcx.monomorphize(&decl.ty), decl.name))
+                            .map(|(mty, name)| LvalueRef::alloca(&bcx, mty, &name.as_str()))
+                            .collect();
+    let temps = mir.temp_decls.iter()
+                              .map(|decl| bcx.monomorphize(&decl.ty))
+                              .enumerate()
+                              .map(|(i, mty)| if lvalue_temps.contains(i) {
+                                  TempRef::Lvalue(LvalueRef::alloca(&bcx,
+                                                                    mty,
+                                                                    &format!("temp{:?}", i)))
+                              } else {
+                                  // If this is an immediate temp, we do not create an
+                                  // alloca in advance. Instead we wait until we see the
+                                  // definition and update the operand there.
+                                  TempRef::Operand(None)
+                              })
+                              .collect();
+    let args = arg_value_refs(&bcx, &mir);
+
+    // Allocate a `Block` for every basic block
+    let block_bcxs: Vec<Block<'blk,'tcx>> =
+        mir_blocks.iter()
+                  .map(|&bb|{
+                      if bb == mir::START_BLOCK {
+                          fcx.new_block("start", None)
+                      } else if bb == mir::END_BLOCK {
+                          fcx.new_block("end", None)
+                      } else {
+                          fcx.new_block(&format!("{:?}", bb), None)
+                      }
+                  })
+                  .collect();
+
+    // Branch to the START block
+    let start_bcx = block_bcxs[mir::START_BLOCK.index()];
+    bcx.br(start_bcx.llbb);
+
+    let mut mircx = MirContext {
+        mir: mir.clone(),
+        fcx: fcx,
+        llpersonalityslot: None,
+        blocks: block_bcxs,
+        unreachable_block: None,
+        vars: vars,
+        temps: temps,
+        args: args,
+    };
+
+    let mut visited = BitVector::new(mir_blocks.len());
+
+    let rpo = traversal::reverse_postorder(&mir);
+    // Translate the body of each block using reverse postorder
+    for (bb, _) in rpo {
+        visited.insert(bb.index());
+        mircx.trans_block(bb);
+    }
+
+    // Remove blocks that haven't been visited, or have no
+    // predecessors.
+    for &bb in &mir_blocks {
+        let block = mircx.blocks[bb.index()];
+        let block = BasicBlock(block.llbb);
+        // Unreachable block
+        if !visited.contains(bb.index()) {
+            block.delete();
+        } else if block.pred_iter().count() == 0 {
+            block.delete();
+        }
+    }
+
+    fcx.cleanup();
+}
+
+/// Produce, for each argument, a `ValueRef` pointing at the
+/// argument's value. As arguments are lvalues, these are always
+/// indirect.
+fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                              mir: &mir::Mir<'tcx>)
+                              -> Vec<LvalueRef<'tcx>> {
+    let fcx = bcx.fcx();
+    let tcx = bcx.tcx();
+    let mut idx = 0;
+    let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
+    mir.arg_decls.iter().enumerate().map(|(arg_index, arg_decl)| {
+        let arg_ty = bcx.monomorphize(&arg_decl.ty);
+        if arg_decl.spread {
+            // This argument (e.g. the last argument in the "rust-call" ABI)
+            // is a tuple that was spread at the ABI level and now we have
+            // to reconstruct it into a tuple local variable, from multiple
+            // individual LLVM function arguments.
+
+            let tupled_arg_tys = match arg_ty.sty {
+                ty::TyTuple(ref tys) => tys,
+                _ => bug!("spread argument isn't a tuple?!")
+            };
+
+            let lltemp = bcx.with_block(|bcx| {
+                base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
+            });
+            for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
+                let dst = bcx.struct_gep(lltemp, i);
+                let arg = &fcx.fn_ty.args[idx];
+                    idx += 1;
+                if common::type_is_fat_ptr(tcx, tupled_arg_ty) {
+                        // We pass fat pointers as two words, but inside the tuple
+                        // they are the two sub-fields of a single aggregate field.
+                    let meta = &fcx.fn_ty.args[idx];
+                    idx += 1;
+                    arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, dst));
+                    meta.store_fn_arg(bcx, &mut llarg_idx, get_meta(bcx, dst));
+                } else {
+                    arg.store_fn_arg(bcx, &mut llarg_idx, dst);
+                }
+            }
+            return LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty));
+        }
+
+        let arg = &fcx.fn_ty.args[idx];
+        idx += 1;
+        let llval = if arg.is_indirect() {
+            // Don't copy an indirect argument to an alloca, the caller
+            // already put it in a temporary alloca and gave it up, unless
+            // we emit extra-debug-info, which requires local allocas :(.
+            // FIXME: lifetimes, debug info
+            let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
+            llarg_idx += 1;
+            llarg
+        } else {
+            let lltemp = bcx.with_block(|bcx| {
+                base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
+            });
+            if common::type_is_fat_ptr(tcx, arg_ty) {
+                // we pass fat pointers as two words, but we want to
+                // represent them internally as a pointer to two words,
+                // so make an alloca to store them in.
+                let meta = &fcx.fn_ty.args[idx];
+                idx += 1;
+                arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, lltemp));
+                meta.store_fn_arg(bcx, &mut llarg_idx, get_meta(bcx, lltemp));
+            } else  {
+                // otherwise, arg is passed by value, so make a
+                // temporary and store it there
+                arg.store_fn_arg(bcx, &mut llarg_idx, lltemp);
+            }
+            lltemp
+        };
+        LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))
+    }).collect()
+}
+
+mod analyze;
+mod block;
+mod constant;
+mod drop;
+mod lvalue;
+mod operand;
+mod rvalue;
+mod statement;
diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs
new file mode 100644 (file)
index 0000000..c15d6cd
--- /dev/null
@@ -0,0 +1,200 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::ValueRef;
+use rustc::ty::Ty;
+use rustc::mir::repr as mir;
+use base;
+use common::{self, Block, BlockAndBuilder};
+use datum;
+use value::Value;
+use glue;
+
+use std::fmt;
+
+use super::lvalue::load_fat_ptr;
+use super::{MirContext, TempRef, drop};
+
+/// The representation of a Rust value. The enum variant is in fact
+/// uniquely determined by the value's type, but is kept as a
+/// safety check.
+#[derive(Copy, Clone)]
+pub enum OperandValue {
+    /// A reference to the actual operand. The data is guaranteed
+    /// to be valid for the operand's lifetime.
+    Ref(ValueRef),
+    /// A single LLVM value.
+    Immediate(ValueRef),
+    /// A fat pointer. The first ValueRef is the data and the second
+    /// is the extra.
+    FatPtr(ValueRef, ValueRef)
+}
+
+/// An `OperandRef` is an "SSA" reference to a Rust value, along with
+/// its type.
+///
+/// NOTE: unless you know a value's type exactly, you should not
+/// generate LLVM opcodes acting on it and instead act via methods,
+/// to avoid nasty edge cases. In particular, using `Builder.store`
+/// directly is sure to cause problems -- use `MirContext.store_operand`
+/// instead.
+#[derive(Copy, Clone)]
+pub struct OperandRef<'tcx> {
+    // The value.
+    pub val: OperandValue,
+
+    // The type of value being returned.
+    pub ty: Ty<'tcx>
+}
+
+impl<'tcx> fmt::Debug for OperandRef<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.val {
+            OperandValue::Ref(r) => {
+                write!(f, "OperandRef(Ref({:?}) @ {:?})",
+                       Value(r), self.ty)
+            }
+            OperandValue::Immediate(i) => {
+                write!(f, "OperandRef(Immediate({:?}) @ {:?})",
+                       Value(i), self.ty)
+            }
+            OperandValue::FatPtr(a, d) => {
+                write!(f, "OperandRef(FatPtr({:?}, {:?}) @ {:?})",
+                       Value(a), Value(d), self.ty)
+            }
+        }
+    }
+}
+
+impl<'tcx> OperandRef<'tcx> {
+    /// Asserts that this operand refers to a scalar and returns
+    /// a reference to its value.
+    pub fn immediate(self) -> ValueRef {
+        match self.val {
+            OperandValue::Immediate(s) => s,
+            _ => bug!()
+        }
+    }
+}
+
+impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
+    pub fn trans_load(&mut self,
+                      bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                      llval: ValueRef,
+                      ty: Ty<'tcx>)
+                      -> OperandRef<'tcx>
+    {
+        debug!("trans_load: {:?} @ {:?}", Value(llval), ty);
+
+        let val = match datum::appropriate_rvalue_mode(bcx.ccx(), ty) {
+            datum::ByValue => {
+                OperandValue::Immediate(base::load_ty_builder(bcx, llval, ty))
+            }
+            datum::ByRef if common::type_is_fat_ptr(bcx.tcx(), ty) => {
+                let (lldata, llextra) = load_fat_ptr(bcx, llval);
+                OperandValue::FatPtr(lldata, llextra)
+            }
+            datum::ByRef => OperandValue::Ref(llval)
+        };
+
+        OperandRef { val: val, ty: ty }
+    }
+
+    pub fn trans_operand(&mut self,
+                         bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                         operand: &mir::Operand<'tcx>)
+                         -> OperandRef<'tcx>
+    {
+        debug!("trans_operand(operand={:?})", operand);
+
+        match *operand {
+            mir::Operand::Consume(ref lvalue) => {
+                // watch out for temporaries that do not have an
+                // alloca; they are handled somewhat differently
+                if let &mir::Lvalue::Temp(index) = lvalue {
+                    match self.temps[index as usize] {
+                        TempRef::Operand(Some(o)) => {
+                            return o;
+                        }
+                        TempRef::Operand(None) => {
+                            bug!("use of {:?} before def", lvalue);
+                        }
+                        TempRef::Lvalue(..) => {
+                            // use path below
+                        }
+                    }
+                }
+
+                // for most lvalues, to consume them we just load them
+                // out from their home
+                let tr_lvalue = self.trans_lvalue(bcx, lvalue);
+                let ty = tr_lvalue.ty.to_ty(bcx.tcx());
+                self.trans_load(bcx, tr_lvalue.llval, ty)
+            }
+
+            mir::Operand::Constant(ref constant) => {
+                self.trans_constant(bcx, constant)
+            }
+        }
+    }
+
+    pub fn store_operand(&mut self,
+                         bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                         lldest: ValueRef,
+                         operand: OperandRef<'tcx>)
+    {
+        debug!("store_operand: operand={:?}", operand);
+        bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand))
+    }
+
+    pub fn store_operand_direct(&mut self,
+                                bcx: Block<'bcx, 'tcx>,
+                                lldest: ValueRef,
+                                operand: OperandRef<'tcx>)
+    {
+        // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
+        // value is through `undef`, and store itself is useless.
+        if common::type_is_zero_size(bcx.ccx(), operand.ty) {
+            return;
+        }
+        match operand.val {
+            OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty),
+            OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty),
+            OperandValue::FatPtr(data, extra) => {
+                base::store_fat_ptr(bcx, data, extra, lldest, operand.ty);
+            }
+        }
+    }
+
+    pub fn set_operand_dropped(&mut self,
+                               bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                               operand: &mir::Operand<'tcx>) {
+        match *operand {
+            mir::Operand::Constant(_) => return,
+            mir::Operand::Consume(ref lvalue) => {
+                if let mir::Lvalue::Temp(idx) = *lvalue {
+                    if let TempRef::Operand(..) = self.temps[idx as usize] {
+                        // All lvalues which have an associated drop are promoted to an alloca
+                        // beforehand. If this is an operand, it is safe to say this is never
+                        // dropped and there’s no reason for us to zero this out at all.
+                        return
+                    }
+                }
+                let lvalue = self.trans_lvalue(bcx, lvalue);
+                let ty = lvalue.ty.to_ty(bcx.tcx());
+                if !glue::type_needs_drop(bcx.tcx(), ty) {
+                    return
+                } else {
+                    drop::drop_fill(bcx, lvalue.llval, ty);
+                }
+            }
+        }
+    }
+}
diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs
new file mode 100644 (file)
index 0000000..8e5d220
--- /dev/null
@@ -0,0 +1,621 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::ValueRef;
+use rustc::ty::{self, Ty};
+use rustc::ty::cast::{CastTy, IntTy};
+use middle::const_val::ConstVal;
+use rustc_const_math::ConstInt;
+use rustc::mir::repr as mir;
+
+use asm;
+use base;
+use callee::Callee;
+use common::{self, C_uint, BlockAndBuilder, Result};
+use datum::{Datum, Lvalue};
+use debuginfo::DebugLoc;
+use declare;
+use adt;
+use machine;
+use type_::Type;
+use type_of;
+use tvec;
+use value::Value;
+use Disr;
+
+use super::MirContext;
+use super::operand::{OperandRef, OperandValue};
+use super::lvalue::{LvalueRef, get_dataptr, get_meta};
+
+impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
+    pub fn trans_rvalue(&mut self,
+                        bcx: BlockAndBuilder<'bcx, 'tcx>,
+                        dest: LvalueRef<'tcx>,
+                        rvalue: &mir::Rvalue<'tcx>)
+                        -> BlockAndBuilder<'bcx, 'tcx>
+    {
+        debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
+               Value(dest.llval), rvalue);
+
+        match *rvalue {
+           mir::Rvalue::Use(ref operand) => {
+               let tr_operand = self.trans_operand(&bcx, operand);
+               // FIXME: consider not copying constants through stack. (fixable by translating
+               // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
+               self.store_operand(&bcx, dest.llval, tr_operand);
+               self.set_operand_dropped(&bcx, operand);
+               bcx
+           }
+
+            mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
+                if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
+                    // into-coerce of a thin pointer to a fat pointer - just
+                    // use the operand path.
+                    let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
+                    self.store_operand(&bcx, dest.llval, temp);
+                    return bcx;
+                }
+
+                // Unsize of a nontrivial struct. I would prefer for
+                // this to be eliminated by MIR translation, but
+                // `CoerceUnsized` can be passed by a where-clause,
+                // so the (generic) MIR may not be able to expand it.
+                let operand = self.trans_operand(&bcx, source);
+                bcx.with_block(|bcx| {
+                    match operand.val {
+                        OperandValue::FatPtr(..) => bug!(),
+                        OperandValue::Immediate(llval) => {
+                            // unsize from an immediate structure. We don't
+                            // really need a temporary alloca here, but
+                            // avoiding it would require us to have
+                            // `coerce_unsized_into` use extractvalue to
+                            // index into the struct, and this case isn't
+                            // important enough for it.
+                            debug!("trans_rvalue: creating ugly alloca");
+                            let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
+                            base::store_ty(bcx, llval, lltemp, operand.ty);
+                            base::coerce_unsized_into(bcx,
+                                                      lltemp, operand.ty,
+                                                      dest.llval, cast_ty);
+                        }
+                        OperandValue::Ref(llref) => {
+                            base::coerce_unsized_into(bcx,
+                                                      llref, operand.ty,
+                                                      dest.llval, cast_ty);
+                        }
+                    }
+                });
+                self.set_operand_dropped(&bcx, source);
+                bcx
+            }
+
+            mir::Rvalue::Repeat(ref elem, ref count) => {
+                let tr_elem = self.trans_operand(&bcx, elem);
+                let count = ConstVal::Integral(ConstInt::Usize(count.value));
+                let size = self.trans_constval(&bcx, &count, bcx.tcx().types.usize).immediate();
+                let base = get_dataptr(&bcx, dest.llval);
+                let bcx = bcx.map_block(|block| {
+                    tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
+                        self.store_operand_direct(block, llslot, tr_elem);
+                        block
+                    })
+                });
+                self.set_operand_dropped(&bcx, elem);
+                bcx
+            }
+
+            mir::Rvalue::Aggregate(ref kind, ref operands) => {
+                match *kind {
+                    mir::AggregateKind::Adt(adt_def, index, _) => {
+                        let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
+                        let disr = Disr::from(adt_def.variants[index].disr_val);
+                        bcx.with_block(|bcx| {
+                            adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
+                        });
+                        for (i, operand) in operands.iter().enumerate() {
+                            let op = self.trans_operand(&bcx, operand);
+                            // Do not generate stores and GEPis for zero-sized fields.
+                            if !common::type_is_zero_size(bcx.ccx(), op.ty) {
+                                let val = adt::MaybeSizedValue::sized(dest.llval);
+                                let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr,
+                                                                            val, disr, i);
+                                self.store_operand(&bcx, lldest_i, op);
+                            }
+                            self.set_operand_dropped(&bcx, operand);
+                        }
+                    },
+                    _ => {
+                        // FIXME Shouldn't need to manually trigger closure instantiations.
+                        if let mir::AggregateKind::Closure(def_id, substs) = *kind {
+                            use rustc::hir;
+                            use syntax::ast::DUMMY_NODE_ID;
+                            use syntax::codemap::DUMMY_SP;
+                            use syntax::ptr::P;
+                            use closure;
+
+                            closure::trans_closure_expr(closure::Dest::Ignore(bcx.ccx()),
+                                                        &hir::FnDecl {
+                                                            inputs: P::new(),
+                                                            output: hir::NoReturn(DUMMY_SP),
+                                                            variadic: false
+                                                        },
+                                                        &hir::Block {
+                                                            stmts: P::new(),
+                                                            expr: None,
+                                                            id: DUMMY_NODE_ID,
+                                                            rules: hir::DefaultBlock,
+                                                            span: DUMMY_SP
+                                                        },
+                                                        DUMMY_NODE_ID, def_id,
+                                                        &bcx.monomorphize(substs));
+                        }
+
+                        for (i, operand) in operands.iter().enumerate() {
+                            let op = self.trans_operand(&bcx, operand);
+                            // Do not generate stores and GEPis for zero-sized fields.
+                            if !common::type_is_zero_size(bcx.ccx(), op.ty) {
+                                // Note: perhaps this should be StructGep, but
+                                // note that in some cases the values here will
+                                // not be structs but arrays.
+                                let dest = bcx.gepi(dest.llval, &[0, i]);
+                                self.store_operand(&bcx, dest, op);
+                            }
+                            self.set_operand_dropped(&bcx, operand);
+                        }
+                    }
+                }
+                bcx
+            }
+
+            mir::Rvalue::Slice { ref input, from_start, from_end } => {
+                let ccx = bcx.ccx();
+                let input = self.trans_lvalue(&bcx, input);
+                let ty = input.ty.to_ty(bcx.tcx());
+                let (llbase1, lllen) = match ty.sty {
+                    ty::TyArray(_, n) => {
+                        (bcx.gepi(input.llval, &[0, from_start]), C_uint(ccx, n))
+                    }
+                    ty::TySlice(_) | ty::TyStr => {
+                        (bcx.gepi(input.llval, &[from_start]), input.llextra)
+                    }
+                    _ => bug!("cannot slice {}", ty)
+                };
+                let adj = C_uint(ccx, from_start + from_end);
+                let lllen1 = bcx.sub(lllen, adj);
+                bcx.store(llbase1, get_dataptr(&bcx, dest.llval));
+                bcx.store(lllen1, get_meta(&bcx, dest.llval));
+                bcx
+            }
+
+            mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
+                let outputs = outputs.iter().map(|output| {
+                    let lvalue = self.trans_lvalue(&bcx, output);
+                    Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()),
+                               Lvalue::new("out"))
+                }).collect();
+
+                let input_vals = inputs.iter().map(|input| {
+                    self.trans_operand(&bcx, input).immediate()
+                }).collect();
+
+                bcx.with_block(|bcx| {
+                    asm::trans_inline_asm(bcx, asm, outputs, input_vals);
+                });
+
+                for input in inputs {
+                    self.set_operand_dropped(&bcx, input);
+                }
+                bcx
+            }
+
+            _ => {
+                assert!(rvalue_creates_operand(rvalue));
+                let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
+                self.store_operand(&bcx, dest.llval, temp);
+                bcx
+            }
+        }
+    }
+
+    pub fn trans_rvalue_operand(&mut self,
+                                bcx: BlockAndBuilder<'bcx, 'tcx>,
+                                rvalue: &mir::Rvalue<'tcx>)
+                                -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
+    {
+        assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
+
+        match *rvalue {
+            mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
+                let operand = self.trans_operand(&bcx, source);
+                debug!("cast operand is {:?}", operand);
+                let cast_ty = bcx.monomorphize(&cast_ty);
+
+                let val = match *kind {
+                    mir::CastKind::ReifyFnPointer => {
+                        match operand.ty.sty {
+                            ty::TyFnDef(def_id, substs, _) => {
+                                OperandValue::Immediate(
+                                    Callee::def(bcx.ccx(), def_id, substs)
+                                        .reify(bcx.ccx()).val)
+                            }
+                            _ => {
+                                bug!("{} cannot be reified to a fn ptr", operand.ty)
+                            }
+                        }
+                    }
+                    mir::CastKind::UnsafeFnPointer => {
+                        // this is a no-op at the LLVM level
+                        operand.val
+                    }
+                    mir::CastKind::Unsize => {
+                        // unsize targets other than to a fat pointer currently
+                        // can't be operands.
+                        assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
+
+                        match operand.val {
+                            OperandValue::FatPtr(..) => {
+                                // unsize from a fat pointer - this is a
+                                // "trait-object-to-supertrait" coercion, for
+                                // example,
+                                //   &'a fmt::Debug+Send => &'a fmt::Debug,
+                                // and is a no-op at the LLVM level
+                                self.set_operand_dropped(&bcx, source);
+                                operand.val
+                            }
+                            OperandValue::Immediate(lldata) => {
+                                // "standard" unsize
+                                let (lldata, llextra) = bcx.with_block(|bcx| {
+                                    base::unsize_thin_ptr(bcx, lldata,
+                                                          operand.ty, cast_ty)
+                                });
+                                self.set_operand_dropped(&bcx, source);
+                                OperandValue::FatPtr(lldata, llextra)
+                            }
+                            OperandValue::Ref(_) => {
+                                bug!("by-ref operand {:?} in trans_rvalue_operand",
+                                     operand);
+                            }
+                        }
+                    }
+                    mir::CastKind::Misc if common::type_is_immediate(bcx.ccx(), operand.ty) => {
+                        debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
+                        let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
+                        let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
+                        let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
+                        let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
+                        let llval = operand.immediate();
+                        let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in {
+                            let repr = adt::represent_type(bcx.ccx(), operand.ty);
+                            adt::is_discr_signed(&repr)
+                        } else {
+                            operand.ty.is_signed()
+                        };
+
+                        let newval = match (r_t_in, r_t_out) {
+                            (CastTy::Int(_), CastTy::Int(_)) => {
+                                let srcsz = ll_t_in.int_width();
+                                let dstsz = ll_t_out.int_width();
+                                if srcsz == dstsz {
+                                    bcx.bitcast(llval, ll_t_out)
+                                } else if srcsz > dstsz {
+                                    bcx.trunc(llval, ll_t_out)
+                                } else if signed {
+                                    bcx.sext(llval, ll_t_out)
+                                } else {
+                                    bcx.zext(llval, ll_t_out)
+                                }
+                            }
+                            (CastTy::Float, CastTy::Float) => {
+                                let srcsz = ll_t_in.float_width();
+                                let dstsz = ll_t_out.float_width();
+                                if dstsz > srcsz {
+                                    bcx.fpext(llval, ll_t_out)
+                                } else if srcsz > dstsz {
+                                    bcx.fptrunc(llval, ll_t_out)
+                                } else {
+                                    llval
+                                }
+                            }
+                            (CastTy::Ptr(_), CastTy::Ptr(_)) |
+                            (CastTy::FnPtr, CastTy::Ptr(_)) |
+                            (CastTy::RPtr(_), CastTy::Ptr(_)) =>
+                                bcx.pointercast(llval, ll_t_out),
+                            (CastTy::Ptr(_), CastTy::Int(_)) |
+                            (CastTy::FnPtr, CastTy::Int(_)) =>
+                                bcx.ptrtoint(llval, ll_t_out),
+                            (CastTy::Int(_), CastTy::Ptr(_)) =>
+                                bcx.inttoptr(llval, ll_t_out),
+                            (CastTy::Int(_), CastTy::Float) if signed =>
+                                bcx.sitofp(llval, ll_t_out),
+                            (CastTy::Int(_), CastTy::Float) =>
+                                bcx.uitofp(llval, ll_t_out),
+                            (CastTy::Float, CastTy::Int(IntTy::I)) =>
+                                bcx.fptosi(llval, ll_t_out),
+                            (CastTy::Float, CastTy::Int(_)) =>
+                                bcx.fptoui(llval, ll_t_out),
+                            _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
+                        };
+                        OperandValue::Immediate(newval)
+                    }
+                    mir::CastKind::Misc => { // Casts from a fat-ptr.
+                        let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
+                        let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
+                        if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val {
+                            if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
+                                let ll_cft = ll_cast_ty.field_types();
+                                let ll_fft = ll_from_ty.field_types();
+                                let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
+                                assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
+                                OperandValue::FatPtr(data_cast, meta_ptr)
+                            } else { // cast to thin-ptr
+                                // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
+                                // pointer-cast of that pointer to desired pointer type.
+                                let llval = bcx.pointercast(data_ptr, ll_cast_ty);
+                                OperandValue::Immediate(llval)
+                            }
+                        } else {
+                            bug!("Unexpected non-FatPtr operand")
+                        }
+                    }
+                };
+                let operand = OperandRef {
+                    val: val,
+                    ty: cast_ty
+                };
+                (bcx, operand)
+            }
+
+            mir::Rvalue::Ref(_, bk, ref lvalue) => {
+                let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
+
+                let ty = tr_lvalue.ty.to_ty(bcx.tcx());
+                let ref_ty = bcx.tcx().mk_ref(
+                    bcx.tcx().mk_region(ty::ReStatic),
+                    ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
+                );
+
+                // Note: lvalues are indirect, so storing the `llval` into the
+                // destination effectively creates a reference.
+                let operand = if common::type_is_sized(bcx.tcx(), ty) {
+                    OperandRef {
+                        val: OperandValue::Immediate(tr_lvalue.llval),
+                        ty: ref_ty,
+                    }
+                } else {
+                    OperandRef {
+                        val: OperandValue::FatPtr(tr_lvalue.llval,
+                                                  tr_lvalue.llextra),
+                        ty: ref_ty,
+                    }
+                };
+                (bcx, operand)
+            }
+
+            mir::Rvalue::Len(ref lvalue) => {
+                let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
+                let operand = OperandRef {
+                    val: OperandValue::Immediate(self.lvalue_len(&bcx, tr_lvalue)),
+                    ty: bcx.tcx().types.usize,
+                };
+                (bcx, operand)
+            }
+
+            mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
+                let lhs = self.trans_operand(&bcx, lhs);
+                let rhs = self.trans_operand(&bcx, rhs);
+                let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
+                    match (lhs.val, rhs.val) {
+                        (OperandValue::FatPtr(lhs_addr, lhs_extra),
+                         OperandValue::FatPtr(rhs_addr, rhs_extra)) => {
+                            bcx.with_block(|bcx| {
+                                base::compare_fat_ptrs(bcx,
+                                                       lhs_addr, lhs_extra,
+                                                       rhs_addr, rhs_extra,
+                                                       lhs.ty, op.to_hir_binop(),
+                                                       DebugLoc::None)
+                            })
+                        }
+                        _ => bug!()
+                    }
+
+                } else {
+                    self.trans_scalar_binop(&bcx, op,
+                                            lhs.immediate(), rhs.immediate(),
+                                            lhs.ty)
+                };
+                let operand = OperandRef {
+                    val: OperandValue::Immediate(llresult),
+                    ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
+                };
+                (bcx, operand)
+            }
+
+            mir::Rvalue::UnaryOp(op, ref operand) => {
+                let operand = self.trans_operand(&bcx, operand);
+                let lloperand = operand.immediate();
+                let is_float = operand.ty.is_fp();
+                let llval = match op {
+                    mir::UnOp::Not => bcx.not(lloperand),
+                    mir::UnOp::Neg => if is_float {
+                        bcx.fneg(lloperand)
+                    } else {
+                        bcx.neg(lloperand)
+                    }
+                };
+                (bcx, OperandRef {
+                    val: OperandValue::Immediate(llval),
+                    ty: operand.ty,
+                })
+            }
+
+            mir::Rvalue::Box(content_ty) => {
+                let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
+                let llty = type_of::type_of(bcx.ccx(), content_ty);
+                let llsize = machine::llsize_of(bcx.ccx(), llty);
+                let align = type_of::align_of(bcx.ccx(), content_ty);
+                let llalign = C_uint(bcx.ccx(), align);
+                let llty_ptr = llty.ptr_to();
+                let box_ty = bcx.tcx().mk_box(content_ty);
+                let mut llval = None;
+                let bcx = bcx.map_block(|bcx| {
+                    let Result { bcx, val } = base::malloc_raw_dyn(bcx,
+                                                                   llty_ptr,
+                                                                   box_ty,
+                                                                   llsize,
+                                                                   llalign,
+                                                                   DebugLoc::None);
+                    llval = Some(val);
+                    bcx
+                });
+                let operand = OperandRef {
+                    val: OperandValue::Immediate(llval.unwrap()),
+                    ty: box_ty,
+                };
+                (bcx, operand)
+            }
+
+            mir::Rvalue::Use(..) |
+            mir::Rvalue::Repeat(..) |
+            mir::Rvalue::Aggregate(..) |
+            mir::Rvalue::Slice { .. } |
+            mir::Rvalue::InlineAsm { .. } => {
+                bug!("cannot generate operand from rvalue {:?}", rvalue);
+
+            }
+        }
+    }
+
+    pub fn trans_scalar_binop(&mut self,
+                              bcx: &BlockAndBuilder<'bcx, 'tcx>,
+                              op: mir::BinOp,
+                              lhs: ValueRef,
+                              rhs: ValueRef,
+                              input_ty: Ty<'tcx>) -> ValueRef {
+        let is_float = input_ty.is_fp();
+        let is_signed = input_ty.is_signed();
+        match op {
+            mir::BinOp::Add => if is_float {
+                bcx.fadd(lhs, rhs)
+            } else {
+                bcx.add(lhs, rhs)
+            },
+            mir::BinOp::Sub => if is_float {
+                bcx.fsub(lhs, rhs)
+            } else {
+                bcx.sub(lhs, rhs)
+            },
+            mir::BinOp::Mul => if is_float {
+                bcx.fmul(lhs, rhs)
+            } else {
+                bcx.mul(lhs, rhs)
+            },
+            mir::BinOp::Div => if is_float {
+                bcx.fdiv(lhs, rhs)
+            } else if is_signed {
+                bcx.sdiv(lhs, rhs)
+            } else {
+                bcx.udiv(lhs, rhs)
+            },
+            mir::BinOp::Rem => if is_float {
+                // LLVM currently always lowers the `frem` instructions appropriate
+                // library calls typically found in libm. Notably f64 gets wired up
+                // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
+                // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
+                // instead just an inline function in a header that goes up to a
+                // f64, uses `fmod`, and then comes back down to a f32.
+                //
+                // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
+                // still unconditionally lower frem instructions over 32-bit floats
+                // to a call to `fmodf`. To work around this we special case MSVC
+                // 32-bit float rem instructions and instead do the call out to
+                // `fmod` ourselves.
+                //
+                // Note that this is currently duplicated with src/libcore/ops.rs
+                // which does the same thing, and it would be nice to perhaps unify
+                // these two implementations one day! Also note that we call `fmod`
+                // for both 32 and 64-bit floats because if we emit any FRem
+                // instruction at all then LLVM is capable of optimizing it into a
+                // 32-bit FRem (which we're trying to avoid).
+                let tcx = bcx.tcx();
+                let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
+                    tcx.sess.target.target.arch == "x86";
+                if use_fmod {
+                    let f64t = Type::f64(bcx.ccx());
+                    let fty = Type::func(&[f64t, f64t], &f64t);
+                    let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty);
+                    if input_ty == tcx.types.f32 {
+                        let lllhs = bcx.fpext(lhs, f64t);
+                        let llrhs = bcx.fpext(rhs, f64t);
+                        let llres = bcx.call(llfn, &[lllhs, llrhs], None);
+                        bcx.fptrunc(llres, Type::f32(bcx.ccx()))
+                    } else {
+                        bcx.call(llfn, &[lhs, rhs], None)
+                    }
+                } else {
+                    bcx.frem(lhs, rhs)
+                }
+            } else if is_signed {
+                bcx.srem(lhs, rhs)
+            } else {
+                bcx.urem(lhs, rhs)
+            },
+            mir::BinOp::BitOr => bcx.or(lhs, rhs),
+            mir::BinOp::BitAnd => bcx.and(lhs, rhs),
+            mir::BinOp::BitXor => bcx.xor(lhs, rhs),
+            mir::BinOp::Shl => {
+                bcx.with_block(|bcx| {
+                    common::build_unchecked_lshift(bcx,
+                                                   lhs,
+                                                   rhs,
+                                                   DebugLoc::None)
+                })
+            }
+            mir::BinOp::Shr => {
+                bcx.with_block(|bcx| {
+                    common::build_unchecked_rshift(bcx,
+                                                   input_ty,
+                                                   lhs,
+                                                   rhs,
+                                                   DebugLoc::None)
+                })
+            }
+            mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
+            mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
+                bcx.with_block(|bcx| {
+                    base::compare_scalar_types(bcx, lhs, rhs, input_ty,
+                                               op.to_hir_binop(), DebugLoc::None)
+                })
+            }
+        }
+    }
+}
+
+pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
+    match *rvalue {
+        mir::Rvalue::Ref(..) |
+        mir::Rvalue::Len(..) |
+        mir::Rvalue::Cast(..) | // (*)
+        mir::Rvalue::BinaryOp(..) |
+        mir::Rvalue::UnaryOp(..) |
+        mir::Rvalue::Box(..) =>
+            true,
+        mir::Rvalue::Use(..) | // (**)
+        mir::Rvalue::Repeat(..) |
+        mir::Rvalue::Aggregate(..) |
+        mir::Rvalue::Slice { .. } |
+        mir::Rvalue::InlineAsm { .. } =>
+            false,
+    }
+
+    // (*) this is only true if the type is suitable
+    // (**) we need to zero-out the source operand after moving, so we are restricted to either
+    // ensuring all users of `Use` zero it out themselves or not allowing to “create” operand for
+    // it.
+}
diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs
new file mode 100644 (file)
index 0000000..1d85ac6
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::mir::repr as mir;
+use common::BlockAndBuilder;
+
+use super::MirContext;
+use super::TempRef;
+
+impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
+    pub fn trans_statement(&mut self,
+                           bcx: BlockAndBuilder<'bcx, 'tcx>,
+                           statement: &mir::Statement<'tcx>)
+                           -> BlockAndBuilder<'bcx, 'tcx> {
+        debug!("trans_statement(statement={:?})", statement);
+
+        match statement.kind {
+            mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
+                match *lvalue {
+                    mir::Lvalue::Temp(index) => {
+                        let index = index as usize;
+                        match self.temps[index as usize] {
+                            TempRef::Lvalue(tr_dest) => {
+                                self.trans_rvalue(bcx, tr_dest, rvalue)
+                            }
+                            TempRef::Operand(None) => {
+                                let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue);
+                                self.temps[index] = TempRef::Operand(Some(operand));
+                                bcx
+                            }
+                            TempRef::Operand(Some(_)) => {
+                                span_bug!(statement.span,
+                                          "operand {:?} already assigned",
+                                          rvalue);
+                            }
+                        }
+                    }
+                    _ => {
+                        let tr_dest = self.trans_lvalue(&bcx, lvalue);
+                        self.trans_rvalue(bcx, tr_dest, rvalue)
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/src/librustc_trans/monomorphize.rs b/src/librustc_trans/monomorphize.rs
new file mode 100644 (file)
index 0000000..ef0da37
--- /dev/null
@@ -0,0 +1,211 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use back::symbol_names;
+use llvm::ValueRef;
+use llvm;
+use rustc::hir::def_id::DefId;
+use rustc::infer::normalize_associated_type;
+use rustc::ty::subst;
+use rustc::ty::subst::{Subst, Substs};
+use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
+use attributes;
+use base::{push_ctxt};
+use base::trans_fn;
+use base;
+use common::*;
+use declare;
+use Disr;
+use rustc::hir::map as hir_map;
+use rustc::util::ppaux;
+
+use rustc::hir;
+
+use syntax::attr;
+use syntax::errors;
+
+use std::fmt;
+
+pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                fn_id: DefId,
+                                psubsts: &'tcx subst::Substs<'tcx>)
+                                -> (ValueRef, Ty<'tcx>) {
+    debug!("monomorphic_fn(fn_id={:?}, real_substs={:?})", fn_id, psubsts);
+
+    assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types());
+
+    let _icx = push_ctxt("monomorphic_fn");
+
+    let instance = Instance::new(fn_id, psubsts);
+
+    let item_ty = ccx.tcx().lookup_item_type(fn_id).ty;
+
+    debug!("monomorphic_fn about to subst into {:?}", item_ty);
+    let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty);
+    debug!("mono_ty = {:?} (post-substitution)", mono_ty);
+
+    match ccx.instances().borrow().get(&instance) {
+        Some(&val) => {
+            debug!("leaving monomorphic fn {:?}", instance);
+            return (val, mono_ty);
+        }
+        None => ()
+    }
+
+    debug!("monomorphic_fn({:?})", instance);
+
+    ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
+
+    let depth;
+    {
+        let mut monomorphizing = ccx.monomorphizing().borrow_mut();
+        depth = match monomorphizing.get(&fn_id) {
+            Some(&d) => d, None => 0
+        };
+
+        debug!("monomorphic_fn: depth for fn_id={:?} is {:?}", fn_id, depth+1);
+
+        // Random cut-off -- code that needs to instantiate the same function
+        // recursively more than thirty times can probably safely be assumed
+        // to be causing an infinite expansion.
+        if depth > ccx.sess().recursion_limit.get() {
+            let error = format!("reached the recursion limit while instantiating `{}`",
+                                instance);
+            if let Some(id) = ccx.tcx().map.as_local_node_id(fn_id) {
+                ccx.sess().span_fatal(ccx.tcx().map.span(id), &error);
+            } else {
+                ccx.sess().fatal(&error);
+            }
+        }
+
+        monomorphizing.insert(fn_id, depth + 1);
+    }
+
+    let symbol = symbol_names::exported_name(ccx, &instance);
+
+    debug!("monomorphize_fn mangled to {}", symbol);
+    assert!(declare::get_defined_value(ccx, &symbol).is_none());
+
+    // FIXME(nagisa): perhaps needs a more fine grained selection?
+    let lldecl = declare::define_internal_fn(ccx, &symbol, mono_ty);
+    // FIXME(eddyb) Doubt all extern fn should allow unwinding.
+    attributes::unwind(lldecl, true);
+
+    ccx.instances().borrow_mut().insert(instance, lldecl);
+
+    // we can only monomorphize things in this crate (or inlined into it)
+    let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap();
+    let map_node = errors::expect(
+        ccx.sess().diagnostic(),
+        ccx.tcx().map.find(fn_node_id),
+        || {
+            format!("while instantiating `{}`, couldn't find it in \
+                     the item map (may have attempted to monomorphize \
+                     an item defined in a different crate?)",
+                    instance)
+        });
+    match map_node {
+        hir_map::NodeItem(&hir::Item {
+            ref attrs, node: hir::ItemFn(ref decl, _, _, _, _, ref body), ..
+        }) |
+        hir_map::NodeTraitItem(&hir::TraitItem {
+            ref attrs, node: hir::MethodTraitItem(
+                hir::MethodSig { ref decl, .. }, Some(ref body)), ..
+        }) |
+        hir_map::NodeImplItem(&hir::ImplItem {
+            ref attrs, node: hir::ImplItemKind::Method(
+                hir::MethodSig { ref decl, .. }, ref body), ..
+        }) => {
+            attributes::from_fn_attrs(ccx, attrs, lldecl);
+
+            let is_first = !ccx.available_monomorphizations().borrow()
+                                                             .contains(&symbol);
+            if is_first {
+                ccx.available_monomorphizations().borrow_mut().insert(symbol.clone());
+            }
+
+            let trans_everywhere = attr::requests_inline(attrs);
+            if trans_everywhere || is_first {
+                let origin = if is_first { base::OriginalTranslation } else { base::InlinedCopy };
+                base::update_linkage(ccx, lldecl, None, origin);
+                trans_fn(ccx, decl, body, lldecl, psubsts, fn_node_id);
+            } else {
+                // We marked the value as using internal linkage earlier, but that is illegal for
+                // declarations, so switch back to external linkage.
+                llvm::SetLinkage(lldecl, llvm::ExternalLinkage);
+            }
+        }
+
+        hir_map::NodeVariant(_) | hir_map::NodeStructCtor(_) => {
+            let disr = match map_node {
+                hir_map::NodeVariant(_) => {
+                    Disr::from(inlined_variant_def(ccx, fn_node_id).disr_val)
+                }
+                hir_map::NodeStructCtor(_) => Disr(0),
+                _ => bug!()
+            };
+            attributes::inline(lldecl, attributes::InlineAttr::Hint);
+            base::trans_ctor_shim(ccx, fn_node_id, disr, psubsts, lldecl);
+        }
+
+        _ => bug!("can't monomorphize a {:?}", map_node)
+    };
+
+    ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
+
+    debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id));
+    (lldecl, mono_ty)
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct Instance<'tcx> {
+    pub def: DefId,
+    pub substs: &'tcx Substs<'tcx>,
+}
+
+impl<'tcx> fmt::Display for Instance<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        ppaux::parameterized(f, &self.substs, self.def, ppaux::Ns::Value, &[],
+                             |tcx| tcx.lookup_item_type(self.def).generics)
+    }
+}
+
+impl<'tcx> Instance<'tcx> {
+    pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>)
+               -> Instance<'tcx> {
+        assert!(substs.regions.iter().all(|&r| r == ty::ReStatic));
+        Instance { def: def_id, substs: substs }
+    }
+    pub fn mono(tcx: &TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> {
+        Instance::new(def_id, &tcx.mk_substs(Substs::empty()))
+    }
+}
+
+/// Monomorphizes a type from the AST by first applying the in-scope
+/// substitutions and then normalizing any associated types.
+pub fn apply_param_substs<'tcx,T>(tcx: &TyCtxt<'tcx>,
+                                  param_substs: &Substs<'tcx>,
+                                  value: &T)
+                                  -> T
+    where T : TypeFoldable<'tcx>
+{
+    let substituted = value.subst(tcx, param_substs);
+    normalize_associated_type(tcx, &substituted)
+}
+
+
+/// Returns the normalized type of a struct field
+pub fn field_ty<'tcx>(tcx: &TyCtxt<'tcx>,
+                      param_substs: &Substs<'tcx>,
+                      f: ty::FieldDef<'tcx>)
+                      -> Ty<'tcx>
+{
+    normalize_associated_type(tcx, &f.ty(tcx, param_substs))
+}
diff --git a/src/librustc_trans/save/dump_csv.rs b/src/librustc_trans/save/dump_csv.rs
deleted file mode 100644 (file)
index d79f284..0000000
+++ /dev/null
@@ -1,1248 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Output a CSV file containing the output from rustc's analysis. The data is
-//! primarily designed to be used as input to the DXR tool, specifically its
-//! Rust plugin. It could also be used by IDEs or other code browsing, search, or
-//! cross-referencing tools.
-//!
-//! Dumping the analysis is implemented by walking the AST and getting a bunch of
-//! info out from all over the place. We use Def IDs to identify objects. The
-//! tricky part is getting syntactic (span, source text) and semantic (reference
-//! Def IDs) information for parts of expressions which the compiler has discarded.
-//! E.g., in a path `foo::bar::baz`, the compiler only keeps a span for the whole
-//! path and a reference to `baz`, but we want spans and references for all three
-//! idents.
-//!
-//! SpanUtils is used to manipulate spans. In particular, to extract sub-spans
-//! from spans (e.g., the span for `bar` from the above example path).
-//! Recorder is used for recording the output in csv format. FmtStrs separates
-//! the format of the output away from extracting it from the compiler.
-//! DumpCsvVisitor walks the AST and processes it.
-
-
-use super::{escape, generated_code, recorder, SaveContext, PathCollector, Data};
-
-use session::Session;
-
-use middle::def::Def;
-use middle::def_id::DefId;
-use middle::ty;
-
-use std::fs::File;
-use std::hash::*;
-use std::collections::HashSet;
-
-use syntax::ast::{self, NodeId, PatKind};
-use syntax::codemap::*;
-use syntax::parse::token::{self, keywords};
-use syntax::visit::{self, Visitor};
-use syntax::print::pprust::{path_to_string, ty_to_string};
-use syntax::ptr::P;
-
-use rustc_front::lowering::{lower_expr, LoweringContext};
-
-use super::span_utils::SpanUtils;
-use super::recorder::{Recorder, FmtStrs};
-
-macro_rules! down_cast_data {
-    ($id:ident, $kind:ident, $this:ident, $sp:expr) => {
-        let $id = if let super::Data::$kind(data) = $id {
-            data
-        } else {
-            $this.sess.span_bug($sp, &format!("unexpected data kind: {:?}", $id));
-        }
-    };
-}
-
-pub struct DumpCsvVisitor<'l, 'tcx: 'l> {
-    save_ctxt: SaveContext<'l, 'tcx>,
-    sess: &'l Session,
-    tcx: &'l ty::ctxt<'tcx>,
-    analysis: &'l ty::CrateAnalysis<'l>,
-
-    span: SpanUtils<'l>,
-    fmt: FmtStrs<'l, 'tcx>,
-
-    cur_scope: NodeId,
-
-    // Set of macro definition (callee) spans, and the set
-    // of macro use (callsite) spans. We store these to ensure
-    // we only write one macro def per unique macro definition, and
-    // one macro use per unique callsite span.
-    mac_defs: HashSet<Span>,
-    mac_uses: HashSet<Span>,
-
-}
-
-impl <'l, 'tcx> DumpCsvVisitor<'l, 'tcx> {
-    pub fn new(tcx: &'l ty::ctxt<'tcx>,
-               lcx: &'l LoweringContext<'l>,
-               analysis: &'l ty::CrateAnalysis<'l>,
-               output_file: Box<File>)
-               -> DumpCsvVisitor<'l, 'tcx> {
-        let span_utils = SpanUtils::new(&tcx.sess);
-        DumpCsvVisitor {
-            sess: &tcx.sess,
-            tcx: tcx,
-            save_ctxt: SaveContext::from_span_utils(tcx, lcx, span_utils.clone()),
-            analysis: analysis,
-            span: span_utils.clone(),
-            fmt: FmtStrs::new(box Recorder {
-                                  out: output_file,
-                                  dump_spans: false,
-                              },
-                              span_utils,
-                              tcx),
-            cur_scope: 0,
-            mac_defs: HashSet::new(),
-            mac_uses: HashSet::new(),
-        }
-    }
-
-    fn nest<F>(&mut self, scope_id: NodeId, f: F)
-        where F: FnOnce(&mut DumpCsvVisitor<'l, 'tcx>)
-    {
-        let parent_scope = self.cur_scope;
-        self.cur_scope = scope_id;
-        f(self);
-        self.cur_scope = parent_scope;
-    }
-
-    pub fn dump_crate_info(&mut self, name: &str, krate: &ast::Crate) {
-        let source_file = self.tcx.sess.local_crate_source_file.as_ref();
-        let crate_root = match source_file {
-            Some(source_file) => match source_file.file_name() {
-                Some(_) => source_file.parent().unwrap().display().to_string(),
-                None => source_file.display().to_string(),
-            },
-            None => "<no source>".to_owned(),
-        };
-
-        // The current crate.
-        self.fmt.crate_str(krate.span, name, &crate_root);
-
-        // Dump info about all the external crates referenced from this crate.
-        for c in &self.save_ctxt.get_external_crates() {
-            self.fmt.external_crate_str(krate.span, &c.name, c.number);
-        }
-        self.fmt.recorder.record("end_external_crates\n");
-    }
-
-    // Return all non-empty prefixes of a path.
-    // For each prefix, we return the span for the last segment in the prefix and
-    // a str representation of the entire prefix.
-    fn process_path_prefixes(&self, path: &ast::Path) -> Vec<(Span, String)> {
-        let spans = self.span.spans_for_path_segments(path);
-
-        // Paths to enums seem to not match their spans - the span includes all the
-        // variants too. But they seem to always be at the end, so I hope we can cope with
-        // always using the first ones. So, only error out if we don't have enough spans.
-        // What could go wrong...?
-        if spans.len() < path.segments.len() {
-            if generated_code(path.span) {
-                return vec!();
-            }
-            error!("Mis-calculated spans for path '{}'. Found {} spans, expected {}. Found spans:",
-                   path_to_string(path),
-                   spans.len(),
-                   path.segments.len());
-            for s in &spans {
-                let loc = self.sess.codemap().lookup_char_pos(s.lo);
-                error!("    '{}' in {}, line {}",
-                       self.span.snippet(*s),
-                       loc.file.name,
-                       loc.line);
-            }
-            return vec!();
-        }
-
-        let mut result: Vec<(Span, String)> = vec!();
-
-        let mut segs = vec!();
-        for (i, (seg, span)) in path.segments.iter().zip(&spans).enumerate() {
-            segs.push(seg.clone());
-            let sub_path = ast::Path {
-                span: *span, // span for the last segment
-                global: path.global,
-                segments: segs,
-            };
-            let qualname = if i == 0 && path.global {
-                format!("::{}", path_to_string(&sub_path))
-            } else {
-                path_to_string(&sub_path)
-            };
-            result.push((*span, qualname));
-            segs = sub_path.segments;
-        }
-
-        result
-    }
-
-    // The global arg allows us to override the global-ness of the path (which
-    // actually means 'does the path start with `::`', rather than 'is the path
-    // semantically global). We use the override for `use` imports (etc.) where
-    // the syntax is non-global, but the semantics are global.
-    fn write_sub_paths(&mut self, path: &ast::Path, global: bool) {
-        let sub_paths = self.process_path_prefixes(path);
-        for (i, &(ref span, ref qualname)) in sub_paths.iter().enumerate() {
-            let qualname = if i == 0 && global && !path.global {
-                format!("::{}", qualname)
-            } else {
-                qualname.clone()
-            };
-            self.fmt.sub_mod_ref_str(path.span, *span, &qualname, self.cur_scope);
-        }
-    }
-
-    // As write_sub_paths, but does not process the last ident in the path (assuming it
-    // will be processed elsewhere). See note on write_sub_paths about global.
-    fn write_sub_paths_truncated(&mut self, path: &ast::Path, global: bool) {
-        let sub_paths = self.process_path_prefixes(path);
-        let len = sub_paths.len();
-        if len <= 1 {
-            return;
-        }
-
-        let sub_paths = &sub_paths[..len-1];
-        for (i, &(ref span, ref qualname)) in sub_paths.iter().enumerate() {
-            let qualname = if i == 0 && global && !path.global {
-                format!("::{}", qualname)
-            } else {
-                qualname.clone()
-            };
-            self.fmt.sub_mod_ref_str(path.span, *span, &qualname, self.cur_scope);
-        }
-    }
-
-    // As write_sub_paths, but expects a path of the form module_path::trait::method
-    // Where trait could actually be a struct too.
-    fn write_sub_path_trait_truncated(&mut self, path: &ast::Path) {
-        let sub_paths = self.process_path_prefixes(path);
-        let len = sub_paths.len();
-        if len <= 1 {
-            return;
-        }
-        let sub_paths = &sub_paths[.. (len-1)];
-
-        // write the trait part of the sub-path
-        let (ref span, ref qualname) = sub_paths[len-2];
-        self.fmt.sub_type_ref_str(path.span, *span, &qualname);
-
-        // write the other sub-paths
-        if len <= 2 {
-            return;
-        }
-        let sub_paths = &sub_paths[..len-2];
-        for &(ref span, ref qualname) in sub_paths {
-            self.fmt.sub_mod_ref_str(path.span, *span, &qualname, self.cur_scope);
-        }
-    }
-
-    // looks up anything, not just a type
-    fn lookup_type_ref(&self, ref_id: NodeId) -> Option<DefId> {
-        if !self.tcx.def_map.borrow().contains_key(&ref_id) {
-            self.sess.bug(&format!("def_map has no key for {} in lookup_type_ref",
-                                   ref_id));
-        }
-        let def = self.tcx.def_map.borrow().get(&ref_id).unwrap().full_def();
-        match def {
-            Def::PrimTy(..) => None,
-            Def::SelfTy(..) => None,
-            _ => Some(def.def_id()),
-        }
-    }
-
-    fn lookup_def_kind(&self, ref_id: NodeId, span: Span) -> Option<recorder::Row> {
-        let def_map = self.tcx.def_map.borrow();
-        if !def_map.contains_key(&ref_id) {
-            self.sess.span_bug(span,
-                               &format!("def_map has no key for {} in lookup_def_kind",
-                                        ref_id));
-        }
-        let def = def_map.get(&ref_id).unwrap().full_def();
-        match def {
-            Def::Mod(_) |
-            Def::ForeignMod(_) => Some(recorder::ModRef),
-            Def::Struct(..) => Some(recorder::TypeRef),
-            Def::Enum(..) |
-            Def::TyAlias(..) |
-            Def::AssociatedTy(..) |
-            Def::Trait(_) => Some(recorder::TypeRef),
-            Def::Static(_, _) |
-            Def::Const(_) |
-            Def::AssociatedConst(..) |
-            Def::Local(..) |
-            Def::Variant(..) |
-            Def::Upvar(..) => Some(recorder::VarRef),
-
-            Def::Fn(..) => Some(recorder::FnRef),
-
-            Def::SelfTy(..) |
-            Def::Label(_) |
-            Def::TyParam(..) |
-            Def::Method(..) |
-            Def::PrimTy(_) |
-            Def::Err => {
-                self.sess.span_bug(span,
-                                   &format!("lookup_def_kind for unexpected item: {:?}", def));
-            }
-        }
-    }
-
-    fn process_formals(&mut self, formals: &Vec<ast::Arg>, qualname: &str) {
-        for arg in formals {
-            self.visit_pat(&arg.pat);
-            let mut collector = PathCollector::new();
-            collector.visit_pat(&arg.pat);
-            let span_utils = self.span.clone();
-            for &(id, ref p, _, _) in &collector.collected_paths {
-                let typ = self.tcx.node_types().get(&id).unwrap().to_string();
-                // get the span only for the name of the variable (I hope the path is only ever a
-                // variable name, but who knows?)
-                self.fmt.formal_str(p.span,
-                                    span_utils.span_for_last_ident(p.span),
-                                    id,
-                                    qualname,
-                                    &path_to_string(p),
-                                    &typ);
-            }
-        }
-    }
-
-    fn process_method(&mut self,
-                      sig: &ast::MethodSig,
-                      body: Option<&ast::Block>,
-                      id: ast::NodeId,
-                      name: ast::Name,
-                      span: Span) {
-        debug!("process_method: {}:{}", id, name);
-
-        if let Some(method_data) = self.save_ctxt.get_method_data(id, name, span) {
-
-            if body.is_some() {
-                self.fmt.method_str(span,
-                                    Some(method_data.span),
-                                    method_data.id,
-                                    &method_data.qualname,
-                                    method_data.declaration,
-                                    method_data.scope);
-                self.process_formals(&sig.decl.inputs, &method_data.qualname);
-            } else {
-                self.fmt.method_decl_str(span,
-                                         Some(method_data.span),
-                                         method_data.id,
-                                         &method_data.qualname,
-                                         method_data.scope);
-            }
-            self.process_generic_params(&sig.generics, span, &method_data.qualname, id);
-        }
-
-        // walk arg and return types
-        for arg in &sig.decl.inputs {
-            self.visit_ty(&arg.ty);
-        }
-
-        if let ast::FunctionRetTy::Ty(ref ret_ty) = sig.decl.output {
-            self.visit_ty(ret_ty);
-        }
-
-        // walk the fn body
-        if let Some(body) = body {
-            self.nest(id, |v| v.visit_block(body));
-        }
-    }
-
-    fn process_trait_ref(&mut self, trait_ref: &ast::TraitRef) {
-        let trait_ref_data = self.save_ctxt.get_trait_ref_data(trait_ref, self.cur_scope);
-        if let Some(trait_ref_data) = trait_ref_data {
-            self.fmt.ref_str(recorder::TypeRef,
-                             trait_ref.path.span,
-                             Some(trait_ref_data.span),
-                             trait_ref_data.ref_id,
-                             trait_ref_data.scope);
-            visit::walk_path(self, &trait_ref.path);
-        }
-    }
-
-    fn process_struct_field_def(&mut self, field: &ast::StructField, parent_id: NodeId) {
-        let field_data = self.save_ctxt.get_field_data(field, parent_id);
-        if let Some(field_data) = field_data {
-            self.fmt.field_str(field.span,
-                               Some(field_data.span),
-                               field_data.id,
-                               &field_data.name,
-                               &field_data.qualname,
-                               &field_data.type_value,
-                               field_data.scope);
-        }
-    }
-
-    // Dump generic params bindings, then visit_generics
-    fn process_generic_params(&mut self,
-                              generics: &ast::Generics,
-                              full_span: Span,
-                              prefix: &str,
-                              id: NodeId) {
-        // We can't only use visit_generics since we don't have spans for param
-        // bindings, so we reparse the full_span to get those sub spans.
-        // However full span is the entire enum/fn/struct block, so we only want
-        // the first few to match the number of generics we're looking for.
-        let param_sub_spans = self.span.spans_for_ty_params(full_span,
-                                                            (generics.ty_params.len() as isize));
-        for (param, param_ss) in generics.ty_params.iter().zip(param_sub_spans) {
-            // Append $id to name to make sure each one is unique
-            let name = format!("{}::{}${}",
-                               prefix,
-                               escape(self.span.snippet(param_ss)),
-                               id);
-            self.fmt.typedef_str(full_span, Some(param_ss), param.id, &name, "");
-        }
-        self.visit_generics(generics);
-    }
-
-    fn process_fn(&mut self,
-                  item: &ast::Item,
-                  decl: &ast::FnDecl,
-                  ty_params: &ast::Generics,
-                  body: &ast::Block) {
-        if let Some(fn_data) = self.save_ctxt.get_item_data(item) {
-            down_cast_data!(fn_data, FunctionData, self, item.span);
-            self.fmt.fn_str(item.span,
-                            Some(fn_data.span),
-                            fn_data.id,
-                            &fn_data.qualname,
-                            fn_data.scope);
-
-            self.process_formals(&decl.inputs, &fn_data.qualname);
-            self.process_generic_params(ty_params, item.span, &fn_data.qualname, item.id);
-        }
-
-        for arg in &decl.inputs {
-            self.visit_ty(&arg.ty);
-        }
-
-        if let ast::FunctionRetTy::Ty(ref ret_ty) = decl.output {
-            self.visit_ty(&ret_ty);
-        }
-
-        self.nest(item.id, |v| v.visit_block(&body));
-    }
-
-    fn process_static_or_const_item(&mut self, item: &ast::Item, typ: &ast::Ty, expr: &ast::Expr) {
-        if let Some(var_data) = self.save_ctxt.get_item_data(item) {
-            down_cast_data!(var_data, VariableData, self, item.span);
-            self.fmt.static_str(item.span,
-                                Some(var_data.span),
-                                var_data.id,
-                                &var_data.name,
-                                &var_data.qualname,
-                                &var_data.value,
-                                &var_data.type_value,
-                                var_data.scope);
-        }
-        self.visit_ty(&typ);
-        self.visit_expr(expr);
-    }
-
-    fn process_const(&mut self,
-                     id: ast::NodeId,
-                     name: ast::Name,
-                     span: Span,
-                     typ: &ast::Ty,
-                     expr: &ast::Expr) {
-        let qualname = format!("::{}", self.tcx.map.path_to_string(id));
-
-        let sub_span = self.span.sub_span_after_keyword(span, keywords::Const);
-
-        self.fmt.static_str(span,
-                            sub_span,
-                            id,
-                            &name.as_str(),
-                            &qualname,
-                            &self.span.snippet(expr.span),
-                            &ty_to_string(&typ),
-                            self.cur_scope);
-
-        // walk type and init value
-        self.visit_ty(typ);
-        self.visit_expr(expr);
-    }
-
-    fn process_struct(&mut self,
-                      item: &ast::Item,
-                      def: &ast::VariantData,
-                      ty_params: &ast::Generics) {
-        let qualname = format!("::{}", self.tcx.map.path_to_string(item.id));
-
-        let val = self.span.snippet(item.span);
-        let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Struct);
-        self.fmt.struct_str(item.span,
-                            sub_span,
-                            item.id,
-                            def.id(),
-                            &qualname,
-                            self.cur_scope,
-                            &val);
-
-        // fields
-        for field in def.fields() {
-            self.process_struct_field_def(field, item.id);
-            self.visit_ty(&field.node.ty);
-        }
-
-        self.process_generic_params(ty_params, item.span, &qualname, item.id);
-    }
-
-    fn process_enum(&mut self,
-                    item: &ast::Item,
-                    enum_definition: &ast::EnumDef,
-                    ty_params: &ast::Generics) {
-        let enum_data = self.save_ctxt.get_item_data(item);
-        let enum_data = match enum_data {
-            None => return,
-            Some(data) => data,
-        };
-        down_cast_data!(enum_data, EnumData, self, item.span);
-        self.fmt.enum_str(item.span,
-                          Some(enum_data.span),
-                          enum_data.id,
-                          &enum_data.qualname,
-                          enum_data.scope,
-                          &enum_data.value);
-
-        for variant in &enum_definition.variants {
-            let name = &variant.node.name.name.as_str();
-            let mut qualname = enum_data.qualname.clone();
-            qualname.push_str("::");
-            qualname.push_str(name);
-            let val = self.span.snippet(variant.span);
-
-            match variant.node.data {
-                ast::VariantData::Struct(..) => {
-                    self.fmt.struct_variant_str(variant.span,
-                                                self.span.span_for_first_ident(variant.span),
-                                                variant.node.data.id(),
-                                                &qualname,
-                                                &enum_data.qualname,
-                                                &val,
-                                                enum_data.scope);
-                }
-                _ => {
-                    self.fmt.tuple_variant_str(variant.span,
-                                               self.span.span_for_first_ident(variant.span),
-                                               variant.node.data.id(),
-                                               name,
-                                               &qualname,
-                                               &enum_data.qualname,
-                                               &val,
-                                               enum_data.scope);
-                }
-            }
-
-
-            for field in variant.node.data.fields() {
-                self.process_struct_field_def(field, variant.node.data.id());
-                self.visit_ty(&field.node.ty);
-            }
-        }
-        self.process_generic_params(ty_params, item.span, &enum_data.qualname, enum_data.id);
-    }
-
-    fn process_impl(&mut self,
-                    item: &ast::Item,
-                    type_parameters: &ast::Generics,
-                    trait_ref: &Option<ast::TraitRef>,
-                    typ: &ast::Ty,
-                    impl_items: &[ast::ImplItem]) {
-        let mut has_self_ref = false;
-        if let Some(impl_data) = self.save_ctxt.get_item_data(item) {
-            down_cast_data!(impl_data, ImplData, self, item.span);
-            if let Some(ref self_ref) = impl_data.self_ref {
-                has_self_ref = true;
-                self.fmt.ref_str(recorder::TypeRef,
-                                 item.span,
-                                 Some(self_ref.span),
-                                 self_ref.ref_id,
-                                 self_ref.scope);
-            }
-            if let Some(ref trait_ref_data) = impl_data.trait_ref {
-                self.fmt.ref_str(recorder::TypeRef,
-                                 item.span,
-                                 Some(trait_ref_data.span),
-                                 trait_ref_data.ref_id,
-                                 trait_ref_data.scope);
-                visit::walk_path(self, &trait_ref.as_ref().unwrap().path);
-            }
-
-            self.fmt.impl_str(item.span,
-                              Some(impl_data.span),
-                              impl_data.id,
-                              impl_data.self_ref.map(|data| data.ref_id),
-                              impl_data.trait_ref.map(|data| data.ref_id),
-                              impl_data.scope);
-        }
-        if !has_self_ref {
-            self.visit_ty(&typ);
-        }
-        self.process_generic_params(type_parameters, item.span, "", item.id);
-        for impl_item in impl_items {
-            self.visit_impl_item(impl_item);
-        }
-    }
-
-    fn process_trait(&mut self,
-                     item: &ast::Item,
-                     generics: &ast::Generics,
-                     trait_refs: &ast::TyParamBounds,
-                     methods: &[ast::TraitItem]) {
-        let qualname = format!("::{}", self.tcx.map.path_to_string(item.id));
-        let val = self.span.snippet(item.span);
-        let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Trait);
-        self.fmt.trait_str(item.span,
-                           sub_span,
-                           item.id,
-                           &qualname,
-                           self.cur_scope,
-                           &val);
-
-        // super-traits
-        for super_bound in trait_refs.iter() {
-            let trait_ref = match *super_bound {
-                ast::TraitTyParamBound(ref trait_ref, _) => {
-                    trait_ref
-                }
-                ast::RegionTyParamBound(..) => {
-                    continue;
-                }
-            };
-
-            let trait_ref = &trait_ref.trait_ref;
-            match self.lookup_type_ref(trait_ref.ref_id) {
-                Some(id) => {
-                    let sub_span = self.span.sub_span_for_type_name(trait_ref.path.span);
-                    self.fmt.ref_str(recorder::TypeRef,
-                                     trait_ref.path.span,
-                                     sub_span,
-                                     id,
-                                     self.cur_scope);
-                    self.fmt.inherit_str(trait_ref.path.span, sub_span, id, item.id);
-                }
-                None => (),
-            }
-        }
-
-        // walk generics and methods
-        self.process_generic_params(generics, item.span, &qualname, item.id);
-        for method in methods {
-            self.visit_trait_item(method)
-        }
-    }
-
-    // `item` is the module in question, represented as an item.
-    fn process_mod(&mut self, item: &ast::Item) {
-        if let Some(mod_data) = self.save_ctxt.get_item_data(item) {
-            down_cast_data!(mod_data, ModData, self, item.span);
-            self.fmt.mod_str(item.span,
-                             Some(mod_data.span),
-                             mod_data.id,
-                             &mod_data.qualname,
-                             mod_data.scope,
-                             &mod_data.filename);
-        }
-    }
-
-    fn process_path(&mut self, id: NodeId, path: &ast::Path, ref_kind: Option<recorder::Row>) {
-        let path_data = self.save_ctxt.get_path_data(id, path);
-        if generated_code(path.span) && path_data.is_none() {
-            return;
-        }
-
-        let path_data = match path_data {
-            Some(pd) => pd,
-            None => {
-                self.tcx.sess.span_bug(path.span,
-                                       &format!("Unexpected def kind while looking up path in \
-                                                 `{}`",
-                                                self.span.snippet(path.span)))
-            }
-        };
-        match path_data {
-            Data::VariableRefData(ref vrd) => {
-                self.fmt.ref_str(ref_kind.unwrap_or(recorder::VarRef),
-                                 path.span,
-                                 Some(vrd.span),
-                                 vrd.ref_id,
-                                 vrd.scope);
-
-            }
-            Data::TypeRefData(ref trd) => {
-                self.fmt.ref_str(recorder::TypeRef,
-                                 path.span,
-                                 Some(trd.span),
-                                 trd.ref_id,
-                                 trd.scope);
-            }
-            Data::MethodCallData(ref mcd) => {
-                self.fmt.meth_call_str(path.span,
-                                       Some(mcd.span),
-                                       mcd.ref_id,
-                                       mcd.decl_id,
-                                       mcd.scope);
-            }
-            Data::FunctionCallData(fcd) => {
-                self.fmt.fn_call_str(path.span, Some(fcd.span), fcd.ref_id, fcd.scope);
-            }
-            _ => {
-                self.sess.span_bug(path.span,
-                                   &format!("Unexpected data: {:?}", path_data));
-            }
-        }
-
-        // Modules or types in the path prefix.
-        let def_map = self.tcx.def_map.borrow();
-        let def = def_map.get(&id).unwrap().full_def();
-        match def {
-            Def::Method(did) => {
-                let ti = self.tcx.impl_or_trait_item(did);
-                if let ty::MethodTraitItem(m) = ti {
-                    if m.explicit_self == ty::ExplicitSelfCategory::Static {
-                        self.write_sub_path_trait_truncated(path);
-                    }
-                }
-            }
-            Def::Local(..) |
-            Def::Static(_,_) |
-            Def::Const(..) |
-            Def::AssociatedConst(..) |
-            Def::Struct(..) |
-            Def::Variant(..) |
-            Def::Fn(..) => self.write_sub_paths_truncated(path, false),
-            _ => {}
-        }
-    }
-
-    fn process_struct_lit(&mut self,
-                          ex: &ast::Expr,
-                          path: &ast::Path,
-                          fields: &Vec<ast::Field>,
-                          variant: ty::VariantDef,
-                          base: &Option<P<ast::Expr>>) {
-        self.write_sub_paths_truncated(path, false);
-
-        if let Some(struct_lit_data) = self.save_ctxt.get_expr_data(ex) {
-            down_cast_data!(struct_lit_data, TypeRefData, self, ex.span);
-            self.fmt.ref_str(recorder::TypeRef,
-                             ex.span,
-                             Some(struct_lit_data.span),
-                             struct_lit_data.ref_id,
-                             struct_lit_data.scope);
-            let scope = self.save_ctxt.enclosing_scope(ex.id);
-
-            for field in fields {
-                if let Some(field_data) = self.save_ctxt
-                                              .get_field_ref_data(field, variant, scope) {
-
-                    self.fmt.ref_str(recorder::VarRef,
-                                     field.ident.span,
-                                     Some(field_data.span),
-                                     field_data.ref_id,
-                                     field_data.scope);
-                }
-
-                self.visit_expr(&field.expr)
-            }
-        }
-
-        walk_list!(self, visit_expr, base);
-    }
-
-    fn process_method_call(&mut self, ex: &ast::Expr, args: &Vec<P<ast::Expr>>) {
-        if let Some(call_data) = self.save_ctxt.get_expr_data(ex) {
-            down_cast_data!(call_data, MethodCallData, self, ex.span);
-            self.fmt.meth_call_str(ex.span,
-                                   Some(call_data.span),
-                                   call_data.ref_id,
-                                   call_data.decl_id,
-                                   call_data.scope);
-        }
-
-        // walk receiver and args
-        walk_list!(self, visit_expr, args);
-    }
-
-    fn process_pat(&mut self, p: &ast::Pat) {
-        match p.node {
-            PatKind::Struct(ref path, ref fields, _) => {
-                visit::walk_path(self, path);
-                let adt = self.tcx.node_id_to_type(p.id).ty_adt_def().unwrap();
-                let def = self.tcx.def_map.borrow()[&p.id].full_def();
-                let variant = adt.variant_of_def(def);
-
-                for &Spanned { node: ref field, span } in fields {
-                    let sub_span = self.span.span_for_first_ident(span);
-                    if let Some(f) = variant.find_field_named(field.ident.name) {
-                        self.fmt.ref_str(recorder::VarRef, span, sub_span, f.did, self.cur_scope);
-                    }
-                    self.visit_pat(&field.pat);
-                }
-            }
-            _ => visit::walk_pat(self, p),
-        }
-    }
-
-
-    fn process_var_decl(&mut self, p: &ast::Pat, value: String) {
-        // The local could declare multiple new vars, we must walk the
-        // pattern and collect them all.
-        let mut collector = PathCollector::new();
-        collector.visit_pat(&p);
-        self.visit_pat(&p);
-
-        for &(id, ref p, immut, _) in &collector.collected_paths {
-            let value = if immut == ast::Mutability::Immutable {
-                value.to_string()
-            } else {
-                "<mutable>".to_string()
-            };
-            let types = self.tcx.node_types();
-            let typ = types.get(&id).map(|t| t.to_string()).unwrap_or(String::new());
-            // Get the span only for the name of the variable (I hope the path
-            // is only ever a variable name, but who knows?).
-            let sub_span = self.span.span_for_last_ident(p.span);
-            // Rust uses the id of the pattern for var lookups, so we'll use it too.
-            self.fmt.variable_str(p.span,
-                                  sub_span,
-                                  id,
-                                  &path_to_string(p),
-                                  &value,
-                                  &typ);
-        }
-    }
-
-    /// Extract macro use and definition information from the AST node defined
-    /// by the given NodeId, using the expansion information from the node's
-    /// span.
-    ///
-    /// If the span is not macro-generated, do nothing, else use callee and
-    /// callsite spans to record macro definition and use data, using the
-    /// mac_uses and mac_defs sets to prevent multiples.
-    fn process_macro_use(&mut self, span: Span, id: NodeId) {
-        let data = match self.save_ctxt.get_macro_use_data(span, id) {
-            None => return,
-            Some(data) => data,
-        };
-        let mut hasher = SipHasher::new();
-        data.callee_span.hash(&mut hasher);
-        let hash = hasher.finish();
-        let qualname = format!("{}::{}", data.name, hash);
-        // Don't write macro definition for imported macros
-        if !self.mac_defs.contains(&data.callee_span)
-            && !data.imported {
-            self.mac_defs.insert(data.callee_span);
-            if let Some(sub_span) = self.span.span_for_macro_def_name(data.callee_span) {
-                self.fmt.macro_str(data.callee_span, sub_span,
-                                   data.name.clone(), qualname.clone());
-            }
-        }
-        if !self.mac_uses.contains(&data.span) {
-            self.mac_uses.insert(data.span);
-            if let Some(sub_span) = self.span.span_for_macro_use_name(data.span) {
-                self.fmt.macro_use_str(data.span, sub_span, data.name,
-                                       qualname, data.scope);
-            }
-        }
-    }
-}
-
-impl<'l, 'tcx, 'v> Visitor<'v> for DumpCsvVisitor<'l, 'tcx> {
-    fn visit_item(&mut self, item: &ast::Item) {
-        use syntax::ast::ItemKind::*;
-        self.process_macro_use(item.span, item.id);
-        match item.node {
-            Use(ref use_item) => {
-                match use_item.node {
-                    ast::ViewPathSimple(ident, ref path) => {
-                        let sub_span = self.span.span_for_last_ident(path.span);
-                        let mod_id = match self.lookup_type_ref(item.id) {
-                            Some(def_id) => {
-                                match self.lookup_def_kind(item.id, path.span) {
-                                    Some(kind) => self.fmt.ref_str(kind,
-                                                                   path.span,
-                                                                   sub_span,
-                                                                   def_id,
-                                                                   self.cur_scope),
-                                    None => {}
-                                }
-                                Some(def_id)
-                            }
-                            None => None,
-                        };
-
-                        // 'use' always introduces an alias, if there is not an explicit
-                        // one, there is an implicit one.
-                        let sub_span = match self.span.sub_span_after_keyword(use_item.span,
-                                                                              keywords::As) {
-                            Some(sub_span) => Some(sub_span),
-                            None => sub_span,
-                        };
-
-                        self.fmt.use_alias_str(path.span,
-                                               sub_span,
-                                               item.id,
-                                               mod_id,
-                                               &ident.name.as_str(),
-                                               self.cur_scope);
-                        self.write_sub_paths_truncated(path, true);
-                    }
-                    ast::ViewPathGlob(ref path) => {
-                        // Make a comma-separated list of names of imported modules.
-                        let mut name_string = String::new();
-                        let glob_map = &self.analysis.glob_map;
-                        let glob_map = glob_map.as_ref().unwrap();
-                        if glob_map.contains_key(&item.id) {
-                            for n in glob_map.get(&item.id).unwrap() {
-                                if !name_string.is_empty() {
-                                    name_string.push_str(", ");
-                                }
-                                name_string.push_str(&n.as_str());
-                            }
-                        }
-
-                        let sub_span = self.span
-                                           .sub_span_of_token(path.span, token::BinOp(token::Star));
-                        self.fmt.use_glob_str(path.span,
-                                              sub_span,
-                                              item.id,
-                                              &name_string,
-                                              self.cur_scope);
-                        self.write_sub_paths(path, true);
-                    }
-                    ast::ViewPathList(ref path, ref list) => {
-                        for plid in list {
-                            match plid.node {
-                                ast::PathListItemKind::Ident { id, .. } => {
-                                    match self.lookup_type_ref(id) {
-                                        Some(def_id) => match self.lookup_def_kind(id, plid.span) {
-                                            Some(kind) => {
-                                                self.fmt.ref_str(kind,
-                                                                 plid.span,
-                                                                 Some(plid.span),
-                                                                 def_id,
-                                                                 self.cur_scope);
-                                            }
-                                            None => (),
-                                        },
-                                        None => (),
-                                    }
-                                }
-                                ast::PathListItemKind::Mod { .. } => (),
-                            }
-                        }
-
-                        self.write_sub_paths(path, true);
-                    }
-                }
-            }
-            ExternCrate(ref s) => {
-                let location = match *s {
-                    Some(s) => s.to_string(),
-                    None => item.ident.to_string(),
-                };
-                let alias_span = self.span.span_for_last_ident(item.span);
-                let cnum = match self.sess.cstore.extern_mod_stmt_cnum(item.id) {
-                    Some(cnum) => cnum,
-                    None => 0,
-                };
-                self.fmt.extern_crate_str(item.span,
-                                          alias_span,
-                                          item.id,
-                                          cnum,
-                                          &item.ident.name.as_str(),
-                                          &location,
-                                          self.cur_scope);
-            }
-            Fn(ref decl, _, _, _, ref ty_params, ref body) =>
-                self.process_fn(item, &decl, ty_params, &body),
-            Static(ref typ, _, ref expr) =>
-                self.process_static_or_const_item(item, typ, expr),
-            Const(ref typ, ref expr) =>
-                self.process_static_or_const_item(item, &typ, &expr),
-            Struct(ref def, ref ty_params) => self.process_struct(item, def, ty_params),
-            Enum(ref def, ref ty_params) => self.process_enum(item, def, ty_params),
-            Impl(_, _,
-                          ref ty_params,
-                          ref trait_ref,
-                          ref typ,
-                          ref impl_items) => {
-                self.process_impl(item, ty_params, trait_ref, &typ, impl_items)
-            }
-            Trait(_, ref generics, ref trait_refs, ref methods) =>
-                self.process_trait(item, generics, trait_refs, methods),
-            Mod(ref m) => {
-                self.process_mod(item);
-                self.nest(item.id, |v| visit::walk_mod(v, m));
-            }
-            Ty(ref ty, ref ty_params) => {
-                let qualname = format!("::{}", self.tcx.map.path_to_string(item.id));
-                let value = ty_to_string(&ty);
-                let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Type);
-                self.fmt.typedef_str(item.span, sub_span, item.id, &qualname, &value);
-
-                self.visit_ty(&ty);
-                self.process_generic_params(ty_params, item.span, &qualname, item.id);
-            }
-            Mac(_) => (),
-            _ => visit::walk_item(self, item),
-        }
-    }
-
-    fn visit_generics(&mut self, generics: &ast::Generics) {
-        for param in generics.ty_params.iter() {
-            for bound in param.bounds.iter() {
-                if let ast::TraitTyParamBound(ref trait_ref, _) = *bound {
-                    self.process_trait_ref(&trait_ref.trait_ref);
-                }
-            }
-            if let Some(ref ty) = param.default {
-                self.visit_ty(&ty);
-            }
-        }
-    }
-
-    fn visit_trait_item(&mut self, trait_item: &ast::TraitItem) {
-        self.process_macro_use(trait_item.span, trait_item.id);
-        match trait_item.node {
-            ast::TraitItemKind::Const(ref ty, Some(ref expr)) => {
-                self.process_const(trait_item.id,
-                                   trait_item.ident.name,
-                                   trait_item.span,
-                                   &ty,
-                                   &expr);
-            }
-            ast::TraitItemKind::Method(ref sig, ref body) => {
-                self.process_method(sig,
-                                    body.as_ref().map(|x| &**x),
-                                    trait_item.id,
-                                    trait_item.ident.name,
-                                    trait_item.span);
-            }
-            ast::TraitItemKind::Const(_, None) |
-            ast::TraitItemKind::Type(..) => {}
-        }
-    }
-
-    fn visit_impl_item(&mut self, impl_item: &ast::ImplItem) {
-        self.process_macro_use(impl_item.span, impl_item.id);
-        match impl_item.node {
-            ast::ImplItemKind::Const(ref ty, ref expr) => {
-                self.process_const(impl_item.id,
-                                   impl_item.ident.name,
-                                   impl_item.span,
-                                   &ty,
-                                   &expr);
-            }
-            ast::ImplItemKind::Method(ref sig, ref body) => {
-                self.process_method(sig,
-                                    Some(body),
-                                    impl_item.id,
-                                    impl_item.ident.name,
-                                    impl_item.span);
-            }
-            ast::ImplItemKind::Type(_) |
-            ast::ImplItemKind::Macro(_) => {}
-        }
-    }
-
-    fn visit_ty(&mut self, t: &ast::Ty) {
-        self.process_macro_use(t.span, t.id);
-        match t.node {
-            ast::TyKind::Path(_, ref path) => {
-                match self.lookup_type_ref(t.id) {
-                    Some(id) => {
-                        let sub_span = self.span.sub_span_for_type_name(t.span);
-                        self.fmt.ref_str(recorder::TypeRef, t.span, sub_span, id, self.cur_scope);
-                    }
-                    None => (),
-                }
-
-                self.write_sub_paths_truncated(path, false);
-
-                visit::walk_path(self, path);
-            }
-            _ => visit::walk_ty(self, t),
-        }
-    }
-
-    fn visit_expr(&mut self, ex: &ast::Expr) {
-        self.process_macro_use(ex.span, ex.id);
-        match ex.node {
-            ast::ExprKind::Call(ref _f, ref _args) => {
-                // Don't need to do anything for function calls,
-                // because just walking the callee path does what we want.
-                visit::walk_expr(self, ex);
-            }
-            ast::ExprKind::Path(_, ref path) => {
-                self.process_path(ex.id, path, None);
-                visit::walk_expr(self, ex);
-            }
-            ast::ExprKind::Struct(ref path, ref fields, ref base) => {
-                let hir_expr = lower_expr(self.save_ctxt.lcx, ex);
-                let adt = self.tcx.expr_ty(&hir_expr).ty_adt_def().unwrap();
-                let def = self.tcx.resolve_expr(&hir_expr);
-                self.process_struct_lit(ex, path, fields, adt.variant_of_def(def), base)
-            }
-            ast::ExprKind::MethodCall(_, _, ref args) => self.process_method_call(ex, args),
-            ast::ExprKind::Field(ref sub_ex, _) => {
-                self.visit_expr(&sub_ex);
-
-                if let Some(field_data) = self.save_ctxt.get_expr_data(ex) {
-                    down_cast_data!(field_data, VariableRefData, self, ex.span);
-                    self.fmt.ref_str(recorder::VarRef,
-                                     ex.span,
-                                     Some(field_data.span),
-                                     field_data.ref_id,
-                                     field_data.scope);
-                }
-            }
-            ast::ExprKind::TupField(ref sub_ex, idx) => {
-                self.visit_expr(&sub_ex);
-
-                let hir_node = lower_expr(self.save_ctxt.lcx, sub_ex);
-                let ty = &self.tcx.expr_ty_adjusted(&hir_node).sty;
-                match *ty {
-                    ty::TyStruct(def, _) => {
-                        let sub_span = self.span.sub_span_after_token(ex.span, token::Dot);
-                        self.fmt.ref_str(recorder::VarRef,
-                                         ex.span,
-                                         sub_span,
-                                         def.struct_variant().fields[idx.node].did,
-                                         self.cur_scope);
-                    }
-                    ty::TyTuple(_) => {}
-                    _ => self.sess.span_bug(ex.span,
-                                            &format!("Expected struct or tuple type, found {:?}",
-                                                     ty)),
-                }
-            }
-            ast::ExprKind::Closure(_, ref decl, ref body) => {
-                let mut id = String::from("$");
-                id.push_str(&ex.id.to_string());
-                self.process_formals(&decl.inputs, &id);
-
-                // walk arg and return types
-                for arg in &decl.inputs {
-                    self.visit_ty(&arg.ty);
-                }
-
-                if let ast::FunctionRetTy::Ty(ref ret_ty) = decl.output {
-                    self.visit_ty(&ret_ty);
-                }
-
-                // walk the body
-                self.nest(ex.id, |v| v.visit_block(&body));
-            }
-            ast::ExprKind::ForLoop(ref pattern, ref subexpression, ref block, _) |
-            ast::ExprKind::WhileLet(ref pattern, ref subexpression, ref block, _) => {
-                let value = self.span.snippet(mk_sp(ex.span.lo, subexpression.span.hi));
-                self.process_var_decl(pattern, value);
-                visit::walk_expr(self, subexpression);
-                visit::walk_block(self, block);
-            }
-            ast::ExprKind::IfLet(ref pattern, ref subexpression, ref block, ref opt_else) => {
-                let value = self.span.snippet(mk_sp(ex.span.lo, subexpression.span.hi));
-                self.process_var_decl(pattern, value);
-                visit::walk_expr(self, subexpression);
-                visit::walk_block(self, block);
-                opt_else.as_ref().map(|el| visit::walk_expr(self, el));
-            }
-            _ => {
-                visit::walk_expr(self, ex)
-            }
-        }
-    }
-
-    fn visit_mac(&mut self, mac: &ast::Mac) {
-        // These shouldn't exist in the AST at this point, log a span bug.
-        self.sess.span_bug(mac.span, "macro invocation should have been expanded out of AST");
-    }
-
-    fn visit_pat(&mut self, p: &ast::Pat) {
-        self.process_macro_use(p.span, p.id);
-        self.process_pat(p);
-    }
-
-    fn visit_arm(&mut self, arm: &ast::Arm) {
-        let mut collector = PathCollector::new();
-        for pattern in &arm.pats {
-            // collect paths from the arm's patterns
-            collector.visit_pat(&pattern);
-            self.visit_pat(&pattern);
-        }
-
-        // This is to get around borrow checking, because we need mut self to call process_path.
-        let mut paths_to_process = vec![];
-
-        // process collected paths
-        for &(id, ref p, immut, ref_kind) in &collector.collected_paths {
-            let def_map = self.tcx.def_map.borrow();
-            if !def_map.contains_key(&id) {
-                self.sess.span_bug(p.span,
-                                   &format!("def_map has no key for {} in visit_arm", id));
-            }
-            let def = def_map.get(&id).unwrap().full_def();
-            match def {
-                Def::Local(_, id) => {
-                    let value = if immut == ast::Mutability::Immutable {
-                        self.span.snippet(p.span).to_string()
-                    } else {
-                        "<mutable>".to_string()
-                    };
-
-                    assert!(p.segments.len() == 1,
-                            "qualified path for local variable def in arm");
-                    self.fmt.variable_str(p.span, Some(p.span), id, &path_to_string(p), &value, "")
-                }
-                Def::Variant(..) | Def::Enum(..) |
-                Def::TyAlias(..) | Def::Struct(..) => {
-                    paths_to_process.push((id, p.clone(), Some(ref_kind)))
-                }
-                // FIXME(nrc) what are these doing here?
-                Def::Static(_, _) |
-                Def::Const(..) |
-                Def::AssociatedConst(..) => {}
-                _ => error!("unexpected definition kind when processing collected paths: {:?}",
-                            def),
-            }
-        }
-
-        for &(id, ref path, ref_kind) in &paths_to_process {
-            self.process_path(id, path, ref_kind);
-        }
-        walk_list!(self, visit_expr, &arm.guard);
-        self.visit_expr(&arm.body);
-    }
-
-    fn visit_stmt(&mut self, s: &ast::Stmt) {
-        let id = s.node.id();
-        self.process_macro_use(s.span, id.unwrap());
-        visit::walk_stmt(self, s)
-    }
-
-    fn visit_local(&mut self, l: &ast::Local) {
-        self.process_macro_use(l.span, l.id);
-        let value = self.span.snippet(l.span);
-        self.process_var_decl(&l.pat, value);
-
-        // Just walk the initialiser and type (don't want to walk the pattern again).
-        walk_list!(self, visit_ty, &l.ty);
-        walk_list!(self, visit_expr, &l.init);
-    }
-}
diff --git a/src/librustc_trans/save/mod.rs b/src/librustc_trans/save/mod.rs
deleted file mode 100644 (file)
index 9c529cc..0000000
+++ /dev/null
@@ -1,862 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use middle::ty;
-use middle::def::Def;
-use middle::def_id::DefId;
-
-use std::env;
-use std::fs::{self, File};
-use std::path::{Path, PathBuf};
-
-use rustc_front;
-use rustc_front::{hir, lowering};
-use rustc::front::map::NodeItem;
-use rustc::session::config::CrateType::CrateTypeExecutable;
-
-use syntax::ast::{self, NodeId, PatKind};
-use syntax::ast_util;
-use syntax::codemap::*;
-use syntax::parse::token::{self, keywords};
-use syntax::visit::{self, Visitor};
-use syntax::print::pprust::ty_to_string;
-
-use self::span_utils::SpanUtils;
-
-#[macro_use]
-pub mod span_utils;
-pub mod recorder;
-
-mod dump_csv;
-
-pub struct SaveContext<'l, 'tcx: 'l> {
-    tcx: &'l ty::ctxt<'tcx>,
-    lcx: &'l lowering::LoweringContext<'l>,
-    span_utils: SpanUtils<'l>,
-}
-
-pub struct CrateData {
-    pub name: String,
-    pub number: u32,
-}
-
-/// Data for any entity in the Rust language. The actual data contained varied
-/// with the kind of entity being queried. See the nested structs for details.
-#[derive(Debug)]
-pub enum Data {
-    /// Data for all kinds of functions and methods.
-    FunctionData(FunctionData),
-    /// Data for local and global variables (consts and statics), and fields.
-    VariableData(VariableData),
-    /// Data for modules.
-    ModData(ModData),
-    /// Data for Enums.
-    EnumData(EnumData),
-    /// Data for impls.
-    ImplData(ImplData),
-
-    /// Data for the use of some variable (e.g., the use of a local variable, which
-    /// will refere to that variables declaration).
-    VariableRefData(VariableRefData),
-    /// Data for a reference to a type or trait.
-    TypeRefData(TypeRefData),
-    /// Data for a reference to a module.
-    ModRefData(ModRefData),
-    /// Data about a function call.
-    FunctionCallData(FunctionCallData),
-    /// Data about a method call.
-    MethodCallData(MethodCallData),
-    /// Data about a macro use.
-    MacroUseData(MacroUseData),
-}
-
-/// Data for all kinds of functions and methods.
-#[derive(Debug)]
-pub struct FunctionData {
-    pub id: NodeId,
-    pub name: String,
-    pub qualname: String,
-    pub declaration: Option<DefId>,
-    pub span: Span,
-    pub scope: NodeId,
-}
-
-/// Data for local and global variables (consts and statics).
-#[derive(Debug)]
-pub struct VariableData {
-    pub id: NodeId,
-    pub name: String,
-    pub qualname: String,
-    pub span: Span,
-    pub scope: NodeId,
-    pub value: String,
-    pub type_value: String,
-}
-
-/// Data for modules.
-#[derive(Debug)]
-pub struct ModData {
-    pub id: NodeId,
-    pub name: String,
-    pub qualname: String,
-    pub span: Span,
-    pub scope: NodeId,
-    pub filename: String,
-}
-
-/// Data for enum declarations.
-#[derive(Debug)]
-pub struct EnumData {
-    pub id: NodeId,
-    pub value: String,
-    pub qualname: String,
-    pub span: Span,
-    pub scope: NodeId,
-}
-
-#[derive(Debug)]
-pub struct ImplData {
-    pub id: NodeId,
-    pub span: Span,
-    pub scope: NodeId,
-    // FIXME: I'm not really sure inline data is the best way to do this. Seems
-    // OK in this case, but generalising leads to returning chunks of AST, which
-    // feels wrong.
-    pub trait_ref: Option<TypeRefData>,
-    pub self_ref: Option<TypeRefData>,
-}
-
-/// Data for the use of some item (e.g., the use of a local variable, which
-/// will refer to that variables declaration (by ref_id)).
-#[derive(Debug)]
-pub struct VariableRefData {
-    pub name: String,
-    pub span: Span,
-    pub scope: NodeId,
-    pub ref_id: DefId,
-}
-
-/// Data for a reference to a type or trait.
-#[derive(Debug)]
-pub struct TypeRefData {
-    pub span: Span,
-    pub scope: NodeId,
-    pub ref_id: DefId,
-}
-
-/// Data for a reference to a module.
-#[derive(Debug)]
-pub struct ModRefData {
-    pub span: Span,
-    pub scope: NodeId,
-    pub ref_id: DefId,
-}
-
-/// Data about a function call.
-#[derive(Debug)]
-pub struct FunctionCallData {
-    pub span: Span,
-    pub scope: NodeId,
-    pub ref_id: DefId,
-}
-
-/// Data about a method call.
-#[derive(Debug)]
-pub struct MethodCallData {
-    pub span: Span,
-    pub scope: NodeId,
-    pub ref_id: Option<DefId>,
-    pub decl_id: Option<DefId>,
-}
-
-/// Data about a macro use.
-#[derive(Debug)]
-pub struct MacroUseData {
-    pub span: Span,
-    pub name: String,
-    // Because macro expansion happens before ref-ids are determined,
-    // we use the callee span to reference the associated macro definition.
-    pub callee_span: Span,
-    pub scope: NodeId,
-    pub imported: bool,
-}
-
-macro_rules! option_try(
-    ($e:expr) => (match $e { Some(e) => e, None => return None })
-);
-
-
-
-impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> {
-    pub fn new(tcx: &'l ty::ctxt<'tcx>,
-               lcx: &'l lowering::LoweringContext<'l>)
-               -> SaveContext<'l, 'tcx> {
-        let span_utils = SpanUtils::new(&tcx.sess);
-        SaveContext::from_span_utils(tcx, lcx, span_utils)
-    }
-
-    pub fn from_span_utils(tcx: &'l ty::ctxt<'tcx>,
-                           lcx: &'l lowering::LoweringContext<'l>,
-                           span_utils: SpanUtils<'l>)
-                           -> SaveContext<'l, 'tcx> {
-        SaveContext {
-            tcx: tcx,
-            lcx: lcx,
-            span_utils: span_utils,
-        }
-    }
-
-    // List external crates used by the current crate.
-    pub fn get_external_crates(&self) -> Vec<CrateData> {
-        let mut result = Vec::new();
-
-        for n in self.tcx.sess.cstore.crates() {
-            result.push(CrateData {
-                name: self.tcx.sess.cstore.crate_name(n),
-                number: n,
-            });
-        }
-
-        result
-    }
-
-    pub fn get_item_data(&self, item: &ast::Item) -> Option<Data> {
-        match item.node {
-            ast::ItemKind::Fn(..) => {
-                let name = self.tcx.map.path_to_string(item.id);
-                let qualname = format!("::{}", name);
-                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Fn);
-                filter!(self.span_utils, sub_span, item.span, None);
-                Some(Data::FunctionData(FunctionData {
-                    id: item.id,
-                    name: name,
-                    qualname: qualname,
-                    declaration: None,
-                    span: sub_span.unwrap(),
-                    scope: self.enclosing_scope(item.id),
-                }))
-            }
-            ast::ItemKind::Static(ref typ, mt, ref expr) => {
-                let qualname = format!("::{}", self.tcx.map.path_to_string(item.id));
-
-                // If the variable is immutable, save the initialising expression.
-                let (value, keyword) = match mt {
-                    ast::Mutability::Mutable => (String::from("<mutable>"), keywords::Mut),
-                    ast::Mutability::Immutable => {
-                        (self.span_utils.snippet(expr.span), keywords::Static)
-                    },
-                };
-
-                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keyword);
-                filter!(self.span_utils, sub_span, item.span, None);
-                Some(Data::VariableData(VariableData {
-                    id: item.id,
-                    name: item.ident.to_string(),
-                    qualname: qualname,
-                    span: sub_span.unwrap(),
-                    scope: self.enclosing_scope(item.id),
-                    value: value,
-                    type_value: ty_to_string(&typ),
-                }))
-            }
-            ast::ItemKind::Const(ref typ, ref expr) => {
-                let qualname = format!("::{}", self.tcx.map.path_to_string(item.id));
-                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Const);
-                filter!(self.span_utils, sub_span, item.span, None);
-                Some(Data::VariableData(VariableData {
-                    id: item.id,
-                    name: item.ident.to_string(),
-                    qualname: qualname,
-                    span: sub_span.unwrap(),
-                    scope: self.enclosing_scope(item.id),
-                    value: self.span_utils.snippet(expr.span),
-                    type_value: ty_to_string(&typ),
-                }))
-            }
-            ast::ItemKind::Mod(ref m) => {
-                let qualname = format!("::{}", self.tcx.map.path_to_string(item.id));
-
-                let cm = self.tcx.sess.codemap();
-                let filename = cm.span_to_filename(m.inner);
-
-                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Mod);
-                filter!(self.span_utils, sub_span, item.span, None);
-                Some(Data::ModData(ModData {
-                    id: item.id,
-                    name: item.ident.to_string(),
-                    qualname: qualname,
-                    span: sub_span.unwrap(),
-                    scope: self.enclosing_scope(item.id),
-                    filename: filename,
-                }))
-            }
-            ast::ItemKind::Enum(..) => {
-                let enum_name = format!("::{}", self.tcx.map.path_to_string(item.id));
-                let val = self.span_utils.snippet(item.span);
-                let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Enum);
-                filter!(self.span_utils, sub_span, item.span, None);
-                Some(Data::EnumData(EnumData {
-                    id: item.id,
-                    value: val,
-                    span: sub_span.unwrap(),
-                    qualname: enum_name,
-                    scope: self.enclosing_scope(item.id),
-                }))
-            }
-            ast::ItemKind::Impl(_, _, _, ref trait_ref, ref typ, _) => {
-                let mut type_data = None;
-                let sub_span;
-
-                let parent = self.enclosing_scope(item.id);
-
-                match typ.node {
-                    // Common case impl for a struct or something basic.
-                    ast::TyKind::Path(None, ref path) => {
-                        sub_span = self.span_utils.sub_span_for_type_name(path.span);
-                        filter!(self.span_utils, sub_span, path.span, None);
-                        type_data = self.lookup_ref_id(typ.id).map(|id| {
-                            TypeRefData {
-                                span: sub_span.unwrap(),
-                                scope: parent,
-                                ref_id: id,
-                            }
-                        });
-                    }
-                    _ => {
-                        // Less useful case, impl for a compound type.
-                        let span = typ.span;
-                        sub_span = self.span_utils.sub_span_for_type_name(span).or(Some(span));
-                    }
-                }
-
-                let trait_data = trait_ref.as_ref()
-                                          .and_then(|tr| self.get_trait_ref_data(tr, parent));
-
-                filter!(self.span_utils, sub_span, typ.span, None);
-                Some(Data::ImplData(ImplData {
-                    id: item.id,
-                    span: sub_span.unwrap(),
-                    scope: parent,
-                    trait_ref: trait_data,
-                    self_ref: type_data,
-                }))
-            }
-            _ => {
-                // FIXME
-                unimplemented!();
-            }
-        }
-    }
-
-    pub fn get_field_data(&self, field: &ast::StructField,
-                          scope: NodeId) -> Option<VariableData> {
-        match field.node.kind {
-            ast::NamedField(ident, _) => {
-                let qualname = format!("::{}::{}", self.tcx.map.path_to_string(scope), ident);
-                let typ = self.tcx.node_types().get(&field.node.id).unwrap().to_string();
-                let sub_span = self.span_utils.sub_span_before_token(field.span, token::Colon);
-                filter!(self.span_utils, sub_span, field.span, None);
-                Some(VariableData {
-                    id: field.node.id,
-                    name: ident.to_string(),
-                    qualname: qualname,
-                    span: sub_span.unwrap(),
-                    scope: scope,
-                    value: "".to_owned(),
-                    type_value: typ,
-                })
-            }
-            _ => None,
-        }
-    }
-
-    // FIXME would be nice to take a MethodItem here, but the ast provides both
-    // trait and impl flavours, so the caller must do the disassembly.
-    pub fn get_method_data(&self, id: ast::NodeId,
-                           name: ast::Name, span: Span) -> Option<FunctionData> {
-        // The qualname for a method is the trait name or name of the struct in an impl in
-        // which the method is declared in, followed by the method's name.
-        let qualname = match self.tcx.impl_of_method(self.tcx.map.local_def_id(id)) {
-            Some(impl_id) => match self.tcx.map.get_if_local(impl_id) {
-                Some(NodeItem(item)) => {
-                    match item.node {
-                        hir::ItemImpl(_, _, _, _, ref ty, _) => {
-                            let mut result = String::from("<");
-                            result.push_str(&rustc_front::print::pprust::ty_to_string(&ty));
-
-                            match self.tcx.trait_of_item(self.tcx.map.local_def_id(id)) {
-                                Some(def_id) => {
-                                    result.push_str(" as ");
-                                    result.push_str(&self.tcx.item_path_str(def_id));
-                                }
-                                None => {}
-                            }
-                            result.push_str(">");
-                            result
-                        }
-                        _ => {
-                            self.tcx.sess.span_bug(span,
-                                                   &format!("Container {:?} for method {} not \
-                                                             an impl?",
-                                                            impl_id,
-                                                            id));
-                        }
-                    }
-                }
-                r => {
-                    self.tcx.sess.span_bug(span,
-                                           &format!("Container {:?} for method {} is not a node \
-                                                     item {:?}",
-                                                    impl_id,
-                                                    id,
-                                                    r));
-                }
-            },
-            None => match self.tcx.trait_of_item(self.tcx.map.local_def_id(id)) {
-                Some(def_id) => {
-                    match self.tcx.map.get_if_local(def_id) {
-                        Some(NodeItem(_)) => {
-                            format!("::{}", self.tcx.item_path_str(def_id))
-                        }
-                        r => {
-                            self.tcx.sess.span_bug(span,
-                                                   &format!("Could not find container {:?} for \
-                                                             method {}, got {:?}",
-                                                            def_id,
-                                                            id,
-                                                            r));
-                        }
-                    }
-                }
-                None => {
-                    self.tcx.sess.span_bug(span,
-                                           &format!("Could not find container for method {}", id));
-                }
-            },
-        };
-
-        let qualname = format!("{}::{}", qualname, name);
-
-        let def_id = self.tcx.map.local_def_id(id);
-        let decl_id = self.tcx.trait_item_of_item(def_id).and_then(|new_id| {
-            let new_def_id = new_id.def_id();
-            if new_def_id != def_id {
-                Some(new_def_id)
-            } else {
-                None
-            }
-        });
-
-        let sub_span = self.span_utils.sub_span_after_keyword(span, keywords::Fn);
-        filter!(self.span_utils, sub_span, span, None);
-        Some(FunctionData {
-            id: id,
-            name: name.to_string(),
-            qualname: qualname,
-            declaration: decl_id,
-            span: sub_span.unwrap(),
-            scope: self.enclosing_scope(id),
-        })
-    }
-
-    pub fn get_trait_ref_data(&self,
-                              trait_ref: &ast::TraitRef,
-                              parent: NodeId)
-                              -> Option<TypeRefData> {
-        self.lookup_ref_id(trait_ref.ref_id).and_then(|def_id| {
-            let span = trait_ref.path.span;
-            let sub_span = self.span_utils.sub_span_for_type_name(span).or(Some(span));
-            filter!(self.span_utils, sub_span, span, None);
-            Some(TypeRefData {
-                span: sub_span.unwrap(),
-                scope: parent,
-                ref_id: def_id,
-            })
-        })
-    }
-
-    pub fn get_expr_data(&self, expr: &ast::Expr) -> Option<Data> {
-        let hir_node = lowering::lower_expr(self.lcx, expr);
-        let ty = self.tcx.expr_ty_adjusted_opt(&hir_node);
-        if ty.is_none() || ty.unwrap().sty == ty::TyError {
-            return None;
-        }
-        match expr.node {
-            ast::ExprKind::Field(ref sub_ex, ident) => {
-                let hir_node = lowering::lower_expr(self.lcx, sub_ex);
-                match self.tcx.expr_ty_adjusted(&hir_node).sty {
-                    ty::TyStruct(def, _) => {
-                        let f = def.struct_variant().field_named(ident.node.name);
-                        let sub_span = self.span_utils.span_for_last_ident(expr.span);
-                        filter!(self.span_utils, sub_span, expr.span, None);
-                        return Some(Data::VariableRefData(VariableRefData {
-                            name: ident.node.to_string(),
-                            span: sub_span.unwrap(),
-                            scope: self.enclosing_scope(expr.id),
-                            ref_id: f.did,
-                        }));
-                    }
-                    _ => {
-                        debug!("Expected struct type, found {:?}", ty);
-                        None
-                    }
-                }
-            }
-            ast::ExprKind::Struct(ref path, _, _) => {
-                let hir_node = lowering::lower_expr(self.lcx, expr);
-                match self.tcx.expr_ty_adjusted(&hir_node).sty {
-                    ty::TyStruct(def, _) => {
-                        let sub_span = self.span_utils.span_for_last_ident(path.span);
-                        filter!(self.span_utils, sub_span, path.span, None);
-                        Some(Data::TypeRefData(TypeRefData {
-                            span: sub_span.unwrap(),
-                            scope: self.enclosing_scope(expr.id),
-                            ref_id: def.did,
-                        }))
-                    }
-                    _ => {
-                        // FIXME ty could legitimately be a TyEnum, but then we will fail
-                        // later if we try to look up the fields.
-                        debug!("expected TyStruct, found {:?}", ty);
-                        None
-                    }
-                }
-            }
-            ast::ExprKind::MethodCall(..) => {
-                let method_call = ty::MethodCall::expr(expr.id);
-                let method_id = self.tcx.tables.borrow().method_map[&method_call].def_id;
-                let (def_id, decl_id) = match self.tcx.impl_or_trait_item(method_id).container() {
-                    ty::ImplContainer(_) => (Some(method_id), None),
-                    ty::TraitContainer(_) => (None, Some(method_id)),
-                };
-                let sub_span = self.span_utils.sub_span_for_meth_name(expr.span);
-                filter!(self.span_utils, sub_span, expr.span, None);
-                let parent = self.enclosing_scope(expr.id);
-                Some(Data::MethodCallData(MethodCallData {
-                    span: sub_span.unwrap(),
-                    scope: parent,
-                    ref_id: def_id,
-                    decl_id: decl_id,
-                }))
-            }
-            ast::ExprKind::Path(_, ref path) => {
-                self.get_path_data(expr.id, path)
-            }
-            _ => {
-                // FIXME
-                unimplemented!();
-            }
-        }
-    }
-
-    pub fn get_path_data(&self, id: NodeId, path: &ast::Path) -> Option<Data> {
-        let def_map = self.tcx.def_map.borrow();
-        if !def_map.contains_key(&id) {
-            self.tcx.sess.span_bug(path.span,
-                                   &format!("def_map has no key for {} in visit_expr", id));
-        }
-        let def = def_map.get(&id).unwrap().full_def();
-        let sub_span = self.span_utils.span_for_last_ident(path.span);
-        filter!(self.span_utils, sub_span, path.span, None);
-        match def {
-            Def::Upvar(..) |
-            Def::Local(..) |
-            Def::Static(..) |
-            Def::Const(..) |
-            Def::AssociatedConst(..) |
-            Def::Variant(..) => {
-                Some(Data::VariableRefData(VariableRefData {
-                    name: self.span_utils.snippet(sub_span.unwrap()),
-                    span: sub_span.unwrap(),
-                    scope: self.enclosing_scope(id),
-                    ref_id: def.def_id(),
-                }))
-            }
-            Def::Struct(def_id) |
-            Def::Enum(def_id) |
-            Def::TyAlias(def_id) |
-            Def::Trait(def_id) |
-            Def::TyParam(_, _, def_id, _) => {
-                Some(Data::TypeRefData(TypeRefData {
-                    span: sub_span.unwrap(),
-                    ref_id: def_id,
-                    scope: self.enclosing_scope(id),
-                }))
-            }
-            Def::Method(decl_id) => {
-                let sub_span = self.span_utils.sub_span_for_meth_name(path.span);
-                filter!(self.span_utils, sub_span, path.span, None);
-                let def_id = if decl_id.is_local() {
-                    let ti = self.tcx.impl_or_trait_item(decl_id);
-                    match ti.container() {
-                        ty::TraitContainer(def_id) => {
-                            self.tcx
-                                .trait_items(def_id)
-                                .iter()
-                                .find(|mr| mr.name() == ti.name() && self.trait_method_has_body(mr))
-                                .map(|mr| mr.def_id())
-                        }
-                        ty::ImplContainer(def_id) => {
-                            let impl_items = self.tcx.impl_items.borrow();
-                            Some(impl_items.get(&def_id)
-                                           .unwrap()
-                                           .iter()
-                                           .find(|mr| {
-                                               self.tcx.impl_or_trait_item(mr.def_id()).name() ==
-                                               ti.name()
-                                           })
-                                           .unwrap()
-                                           .def_id())
-                        }
-                    }
-                } else {
-                    None
-                };
-                Some(Data::MethodCallData(MethodCallData {
-                    span: sub_span.unwrap(),
-                    scope: self.enclosing_scope(id),
-                    ref_id: def_id,
-                    decl_id: Some(decl_id),
-                }))
-            }
-            Def::Fn(def_id) => {
-                Some(Data::FunctionCallData(FunctionCallData {
-                    ref_id: def_id,
-                    span: sub_span.unwrap(),
-                    scope: self.enclosing_scope(id),
-                }))
-            }
-            Def::Mod(def_id) => {
-                Some(Data::ModRefData(ModRefData {
-                    ref_id: def_id,
-                    span: sub_span.unwrap(),
-                    scope: self.enclosing_scope(id),
-                }))
-            }
-            _ => None,
-        }
-    }
-
-    fn trait_method_has_body(&self, mr: &ty::ImplOrTraitItem) -> bool {
-        let def_id = mr.def_id();
-        if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) {
-            let trait_item = self.tcx.map.expect_trait_item(node_id);
-            if let hir::TraitItem_::MethodTraitItem(_, Some(_)) = trait_item.node {
-                true
-            } else {
-                false
-            }
-        } else {
-            false
-        }
-    }
-
-    pub fn get_field_ref_data(&self,
-                              field_ref: &ast::Field,
-                              variant: ty::VariantDef,
-                              parent: NodeId)
-                              -> Option<VariableRefData> {
-        let f = variant.field_named(field_ref.ident.node.name);
-        // We don't really need a sub-span here, but no harm done
-        let sub_span = self.span_utils.span_for_last_ident(field_ref.ident.span);
-        filter!(self.span_utils, sub_span, field_ref.ident.span, None);
-        Some(VariableRefData {
-            name: field_ref.ident.node.to_string(),
-            span: sub_span.unwrap(),
-            scope: parent,
-            ref_id: f.did,
-        })
-    }
-
-    /// Attempt to return MacroUseData for any AST node.
-    ///
-    /// For a given piece of AST defined by the supplied Span and NodeId,
-    /// returns None if the node is not macro-generated or the span is malformed,
-    /// else uses the expansion callsite and callee to return some MacroUseData.
-    pub fn get_macro_use_data(&self, span: Span, id: NodeId) -> Option<MacroUseData> {
-        if !generated_code(span) {
-            return None;
-        }
-        // Note we take care to use the source callsite/callee, to handle
-        // nested expansions and ensure we only generate data for source-visible
-        // macro uses.
-        let callsite = self.tcx.sess.codemap().source_callsite(span);
-        let callee = self.tcx.sess.codemap().source_callee(span);
-        let callee = option_try!(callee);
-        let callee_span = option_try!(callee.span);
-
-        // Ignore attribute macros, their spans are usually mangled
-        if let MacroAttribute(_) = callee.format {
-            return None;
-        }
-
-        // If the callee is an imported macro from an external crate, need to get
-        // the source span and name from the session, as their spans are localized
-        // when read in, and no longer correspond to the source.
-        if let Some(mac) = self.tcx.sess.imported_macro_spans.borrow().get(&callee_span) {
-            let &(ref mac_name, mac_span) = mac;
-            return Some(MacroUseData {
-                                        span: callsite,
-                                        name: mac_name.clone(),
-                                        callee_span: mac_span,
-                                        scope: self.enclosing_scope(id),
-                                        imported: true,
-                                    });
-        }
-
-        Some(MacroUseData {
-            span: callsite,
-            name: callee.name().to_string(),
-            callee_span: callee_span,
-            scope: self.enclosing_scope(id),
-            imported: false,
-        })
-    }
-
-    pub fn get_data_for_id(&self, _id: &NodeId) -> Data {
-        // FIXME
-        unimplemented!();
-    }
-
-    fn lookup_ref_id(&self, ref_id: NodeId) -> Option<DefId> {
-        if !self.tcx.def_map.borrow().contains_key(&ref_id) {
-            self.tcx.sess.bug(&format!("def_map has no key for {} in lookup_type_ref",
-                                       ref_id));
-        }
-        let def = self.tcx.def_map.borrow().get(&ref_id).unwrap().full_def();
-        match def {
-            Def::PrimTy(_) | Def::SelfTy(..) => None,
-            _ => Some(def.def_id()),
-        }
-    }
-
-    #[inline]
-    pub fn enclosing_scope(&self, id: NodeId) -> NodeId {
-        self.tcx.map.get_enclosing_scope(id).unwrap_or(0)
-    }
-}
-
-// An AST visitor for collecting paths from patterns.
-struct PathCollector {
-    // The Row field identifies the kind of pattern.
-    collected_paths: Vec<(NodeId, ast::Path, ast::Mutability, recorder::Row)>,
-}
-
-impl PathCollector {
-    fn new() -> PathCollector {
-        PathCollector { collected_paths: vec![] }
-    }
-}
-
-impl<'v> Visitor<'v> for PathCollector {
-    fn visit_pat(&mut self, p: &ast::Pat) {
-        match p.node {
-            PatKind::Struct(ref path, _, _) => {
-                self.collected_paths.push((p.id, path.clone(),
-                                           ast::Mutability::Mutable, recorder::TypeRef));
-            }
-            PatKind::TupleStruct(ref path, _) |
-            PatKind::Path(ref path) |
-            PatKind::QPath(_, ref path) => {
-                self.collected_paths.push((p.id, path.clone(),
-                                           ast::Mutability::Mutable, recorder::VarRef));
-            }
-            PatKind::Ident(bm, ref path1, _) => {
-                debug!("PathCollector, visit ident in pat {}: {:?} {:?}",
-                       path1.node,
-                       p.span,
-                       path1.span);
-                let immut = match bm {
-                    // Even if the ref is mut, you can't change the ref, only
-                    // the data pointed at, so showing the initialising expression
-                    // is still worthwhile.
-                    ast::BindingMode::ByRef(_) => ast::Mutability::Immutable,
-                    ast::BindingMode::ByValue(mt) => mt,
-                };
-                // collect path for either visit_local or visit_arm
-                let path = ast_util::ident_to_path(path1.span, path1.node);
-                self.collected_paths.push((p.id, path, immut, recorder::VarRef));
-            }
-            _ => {}
-        }
-        visit::walk_pat(self, p);
-    }
-}
-
-pub fn process_crate<'l, 'tcx>(tcx: &'l ty::ctxt<'tcx>,
-                               lcx: &'l lowering::LoweringContext<'l>,
-                               krate: &ast::Crate,
-                               analysis: &ty::CrateAnalysis,
-                               cratename: &str,
-                               odir: Option<&Path>) {
-    let _ignore = tcx.dep_graph.in_ignore();
-
-    assert!(analysis.glob_map.is_some());
-
-    info!("Dumping crate {}", cratename);
-
-    // find a path to dump our data to
-    let mut root_path = match env::var_os("DXR_RUST_TEMP_FOLDER") {
-        Some(val) => PathBuf::from(val),
-        None => match odir {
-            Some(val) => val.join("dxr"),
-            None => PathBuf::from("dxr-temp"),
-        },
-    };
-
-    if let Err(e) = fs::create_dir_all(&root_path) {
-        tcx.sess.err(&format!("Could not create directory {}: {}",
-                              root_path.display(),
-                              e));
-    }
-
-    {
-        let disp = root_path.display();
-        info!("Writing output to {}", disp);
-    }
-
-    // Create output file.
-    let executable = tcx.sess.crate_types.borrow().iter().any(|ct| *ct == CrateTypeExecutable);
-    let mut out_name = if executable {
-        "".to_owned()
-    } else {
-        "lib".to_owned()
-    };
-    out_name.push_str(&cratename);
-    out_name.push_str(&tcx.sess.opts.cg.extra_filename);
-    out_name.push_str(".csv");
-    root_path.push(&out_name);
-    let output_file = match File::create(&root_path) {
-        Ok(f) => box f,
-        Err(e) => {
-            let disp = root_path.display();
-            tcx.sess.fatal(&format!("Could not open {}: {}", disp, e));
-        }
-    };
-    root_path.pop();
-
-    let mut visitor = dump_csv::DumpCsvVisitor::new(tcx, lcx, analysis, output_file);
-
-    visitor.dump_crate_info(cratename, krate);
-    visit::walk_crate(&mut visitor, krate);
-}
-
-// Utility functions for the module.
-
-// Helper function to escape quotes in a string
-fn escape(s: String) -> String {
-    s.replace("\"", "\"\"")
-}
-
-// Helper function to determine if a span came from a
-// macro expansion or syntax extension.
-pub fn generated_code(span: Span) -> bool {
-    span.expn_id != NO_EXPANSION || span == DUMMY_SP
-}
diff --git a/src/librustc_trans/save/recorder.rs b/src/librustc_trans/save/recorder.rs
deleted file mode 100644 (file)
index c0083bb..0000000
+++ /dev/null
@@ -1,714 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub use self::Row::*;
-
-use super::escape;
-use super::span_utils::SpanUtils;
-
-use middle::cstore::LOCAL_CRATE;
-use middle::def_id::{CRATE_DEF_INDEX, DefId};
-use middle::ty;
-
-use std::io::Write;
-
-use syntax::ast;
-use syntax::ast::NodeId;
-use syntax::codemap::*;
-
-const CRATE_ROOT_DEF_ID: DefId = DefId {
-    krate: LOCAL_CRATE,
-    index: CRATE_DEF_INDEX,
-};
-
-pub struct Recorder {
-    // output file
-    pub out: Box<Write + 'static>,
-    pub dump_spans: bool,
-}
-
-impl Recorder {
-    pub fn record(&mut self, info: &str) {
-        match write!(self.out, "{}", info) {
-            Err(_) => error!("Error writing output '{}'", info),
-            _ => (),
-        }
-    }
-
-    pub fn dump_span(&mut self, su: SpanUtils, kind: &str, span: Span, _sub_span: Option<Span>) {
-        assert!(self.dump_spans);
-        let result = format!("span,kind,{},{},text,\"{}\"\n",
-                             kind,
-                             su.extent_str(span),
-                             escape(su.snippet(span)));
-        self.record(&result[..]);
-    }
-}
-
-pub struct FmtStrs<'a, 'tcx: 'a> {
-    pub recorder: Box<Recorder>,
-    span: SpanUtils<'a>,
-    tcx: &'a ty::ctxt<'tcx>,
-}
-
-macro_rules! s { ($e:expr) => { format!("{}", $e) }}
-macro_rules! svec {
-    ($($e:expr),*) => ({
-        // leading _ to allow empty construction without a warning.
-        let mut _temp = ::std::vec::Vec::new();
-        $(_temp.push(s!($e));)*
-        _temp
-    })
-}
-
-// FIXME recorder should operate on super::Data, rather than lots of ad hoc
-// data.
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum Row {
-    Variable,
-    Enum,
-    Variant,
-    VariantStruct,
-    Function,
-    MethodDecl,
-    Struct,
-    Trait,
-    Impl,
-    Module,
-    UseAlias,
-    UseGlob,
-    ExternCrate,
-    Inheritance,
-    MethodCall,
-    Typedef,
-    ExternalCrate,
-    Crate,
-    FnCall,
-    ModRef,
-    VarRef,
-    TypeRef,
-    FnRef,
-    Macro,
-    MacroUse,
-}
-
-impl<'a, 'tcx: 'a> FmtStrs<'a, 'tcx> {
-    pub fn new(rec: Box<Recorder>,
-               span: SpanUtils<'a>,
-               tcx: &'a ty::ctxt<'tcx>)
-               -> FmtStrs<'a, 'tcx> {
-        FmtStrs {
-            recorder: rec,
-            span: span,
-            tcx: tcx,
-        }
-    }
-
-    // Emitted ids are used to cross-reference items across crates. DefIds and
-    // NodeIds do not usually correspond in any way. The strategy is to use the
-    // index from the DefId as a crate-local id. However, within a crate, DefId
-    // indices and NodeIds can overlap. So, we must adjust the NodeIds. If an
-    // item can be identified by a DefId as well as a NodeId, then we use the
-    // DefId index as the id. If it can't, then we have to use the NodeId, but
-    // need to adjust it so it will not clash with any possible DefId index.
-    fn normalize_node_id(&self, id: NodeId) -> usize {
-        match self.tcx.map.opt_local_def_id(id) {
-            Some(id) => id.index.as_usize(),
-            None => id as usize + self.tcx.map.num_local_def_ids()
-        }
-    }
-
-    // A map from kind of item to a tuple of
-    //   a string representation of the name
-    //   a vector of field names
-    //   whether this kind requires a span
-    //   whether dump_spans should dump for this kind
-    fn lookup_row(r: Row) -> (&'static str, Vec<&'static str>, bool, bool) {
-        match r {
-            Variable => ("variable",
-                         vec!("id", "name", "qualname", "value", "type", "scopeid"),
-                         true,
-                         true),
-            Enum => ("enum",
-                     vec!("id", "qualname", "scopeid", "value"),
-                     true,
-                     true),
-            Variant => ("variant",
-                        vec!("id", "name", "qualname", "type", "value", "scopeid"),
-                        true,
-                        true),
-            VariantStruct => ("variant_struct",
-                              vec!("id", "ctor_id", "qualname", "type", "value", "scopeid"),
-                              true,
-                              true),
-            Function => ("function",
-                         vec!("id", "qualname", "declid", "declidcrate", "scopeid"),
-                         true,
-                         true),
-            MethodDecl => ("method_decl",
-                           vec!("id", "qualname", "scopeid"),
-                           true,
-                           true),
-            Struct => ("struct",
-                       vec!("id", "ctor_id", "qualname", "scopeid", "value"),
-                       true,
-                       true),
-            Trait => ("trait",
-                      vec!("id", "qualname", "scopeid", "value"),
-                      true,
-                      true),
-            Impl => ("impl",
-                     vec!("id",
-                          "refid",
-                          "refidcrate",
-                          "traitid",
-                          "traitidcrate",
-                          "scopeid"),
-                     true,
-                     true),
-            Module => ("module",
-                       vec!("id", "qualname", "scopeid", "def_file"),
-                       true,
-                       false),
-            UseAlias => ("use_alias",
-                         vec!("id", "refid", "refidcrate", "name", "scopeid"),
-                         true,
-                         true),
-            UseGlob => ("use_glob", vec!("id", "value", "scopeid"), true, true),
-            ExternCrate => ("extern_crate",
-                            vec!("id", "name", "location", "crate", "scopeid"),
-                            true,
-                            true),
-            Inheritance => ("inheritance",
-                            vec!("base", "basecrate", "derived", "derivedcrate"),
-                            true,
-                            false),
-            MethodCall => ("method_call",
-                           vec!("refid", "refidcrate", "declid", "declidcrate", "scopeid"),
-                           true,
-                           true),
-            Typedef => ("typedef", vec!("id", "qualname", "value"), true, true),
-            ExternalCrate => ("external_crate",
-                              vec!("name", "crate", "file_name"),
-                              false,
-                              false),
-            Crate => ("crate", vec!("name", "crate_root"), true, false),
-            FnCall => ("fn_call",
-                       vec!("refid", "refidcrate", "qualname", "scopeid"),
-                       true,
-                       true),
-            ModRef => ("mod_ref",
-                       vec!("refid", "refidcrate", "qualname", "scopeid"),
-                       true,
-                       true),
-            VarRef => ("var_ref",
-                       vec!("refid", "refidcrate", "qualname", "scopeid"),
-                       true,
-                       true),
-            TypeRef => ("type_ref",
-                        vec!("refid", "refidcrate", "qualname", "scopeid"),
-                        true,
-                        true),
-            FnRef => ("fn_ref",
-                      vec!("refid", "refidcrate", "qualname", "scopeid"),
-                      true,
-                      true),
-            Macro => ("macro",
-                         vec!("name", "qualname"),
-                         true,
-                         true),
-            MacroUse => ("macro_use",
-                         vec!("callee_name", "qualname", "scopeid"),
-                         true,
-                         true),
-        }
-    }
-
-    pub fn make_values_str(&self,
-                           kind: &'static str,
-                           fields: &Vec<&'static str>,
-                           values: Vec<String>,
-                           span: Span)
-                           -> Option<String> {
-        if values.len() != fields.len() {
-            self.span.sess.span_bug(span,
-                                    &format!("Mismatch between length of fields for '{}', \
-                                              expected '{}', found '{}'",
-                                             kind,
-                                             fields.len(),
-                                             values.len()));
-        }
-
-        let values = values.iter().map(|s| {
-            // Never take more than 1020 chars
-            if s.len() > 1020 {
-                &s[..1020]
-            } else {
-                &s[..]
-            }
-        });
-
-        let pairs = fields.iter().zip(values);
-        let strs = pairs.map(|(f, v)| format!(",{},\"{}\"", f, escape(String::from(v))));
-        Some(strs.fold(String::new(),
-                       |mut s, ss| {
-                           s.push_str(&ss[..]);
-                           s
-                       }))
-    }
-
-    pub fn record_without_span(&mut self, kind: Row, values: Vec<String>, span: Span) {
-        let (label, ref fields, needs_span, dump_spans) = FmtStrs::lookup_row(kind);
-
-        if needs_span {
-            self.span.sess.span_bug(span,
-                                    &format!("Called record_without_span for '{}' which does \
-                                              requires a span",
-                                             label));
-        }
-        assert!(!dump_spans);
-
-        if self.recorder.dump_spans {
-            return;
-        }
-
-        let values_str = match self.make_values_str(label, fields, values, span) {
-            Some(vs) => vs,
-            None => return,
-        };
-
-        let mut result = String::from(label);
-        result.push_str(&values_str[..]);
-        result.push_str("\n");
-        self.recorder.record(&result[..]);
-    }
-
-    pub fn record_with_span(&mut self,
-                            kind: Row,
-                            span: Span,
-                            sub_span: Span,
-                            values: Vec<String>) {
-        let (label, ref fields, needs_span, dump_spans) = FmtStrs::lookup_row(kind);
-
-        if self.recorder.dump_spans {
-            if dump_spans {
-                self.recorder.dump_span(self.span.clone(), label, span, Some(sub_span));
-            }
-            return;
-        }
-
-        if !needs_span {
-            self.span.sess.span_bug(span,
-                                    &format!("Called record_with_span for '{}' which does not \
-                                              require a span",
-                                             label));
-        }
-
-        let values_str = match self.make_values_str(label, fields, values, span) {
-            Some(vs) => vs,
-            None => return,
-        };
-        let result = format!("{},{}{}\n",
-                             label,
-                             self.span.extent_str(sub_span),
-                             values_str);
-        self.recorder.record(&result[..]);
-    }
-
-    pub fn check_and_record(&mut self,
-                            kind: Row,
-                            span: Span,
-                            sub_span: Option<Span>,
-                            values: Vec<String>) {
-        filter!(self.span, sub_span, span);
-        match sub_span {
-            Some(sub_span) => self.record_with_span(kind, span, sub_span, values),
-            None => {
-                let (label, _, _, _) = FmtStrs::lookup_row(kind);
-                self.span.report_span_err(label, span);
-            }
-        }
-    }
-
-    pub fn variable_str(&mut self,
-                        span: Span,
-                        sub_span: Option<Span>,
-                        id: NodeId,
-                        name: &str,
-                        value: &str,
-                        typ: &str) {
-        // Getting a fully qualified name for a variable is hard because in
-        // the local case they can be overridden in one block and there is no nice way
-        // to refer to such a scope in english, so we just hack it by appending the
-        // variable def's node id
-        let mut qualname = String::from(name);
-        qualname.push_str("$");
-        qualname.push_str(&id.to_string());
-        let id = self.normalize_node_id(id);
-        self.check_and_record(Variable,
-                              span,
-                              sub_span,
-                              svec!(id, name, qualname, value, typ, 0));
-    }
-
-    // formal parameters
-    pub fn formal_str(&mut self,
-                      span: Span,
-                      sub_span: Option<Span>,
-                      id: NodeId,
-                      fn_name: &str,
-                      name: &str,
-                      typ: &str) {
-        let mut qualname = String::from(fn_name);
-        qualname.push_str("::");
-        qualname.push_str(name);
-        let id = self.normalize_node_id(id);
-        self.check_and_record(Variable,
-                              span,
-                              sub_span,
-                              svec!(id, name, qualname, "", typ, 0));
-    }
-
-    // value is the initialising expression of the static if it is not mut, otherwise "".
-    pub fn static_str(&mut self,
-                      span: Span,
-                      sub_span: Option<Span>,
-                      id: NodeId,
-                      name: &str,
-                      qualname: &str,
-                      value: &str,
-                      typ: &str,
-                      scope_id: NodeId) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(Variable,
-                              span,
-                              sub_span,
-                              svec!(id, name, qualname, value, typ, scope_id));
-    }
-
-    pub fn field_str(&mut self,
-                     span: Span,
-                     sub_span: Option<Span>,
-                     id: NodeId,
-                     name: &str,
-                     qualname: &str,
-                     typ: &str,
-                     scope_id: NodeId) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(Variable,
-                              span,
-                              sub_span,
-                              svec!(id, name, qualname, "", typ, scope_id));
-    }
-
-    pub fn enum_str(&mut self,
-                    span: Span,
-                    sub_span: Option<Span>,
-                    id: NodeId,
-                    name: &str,
-                    scope_id: NodeId,
-                    value: &str) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(Enum, span, sub_span, svec!(id, name, scope_id, value));
-    }
-
-    pub fn tuple_variant_str(&mut self,
-                             span: Span,
-                             sub_span: Option<Span>,
-                             id: NodeId,
-                             name: &str,
-                             qualname: &str,
-                             typ: &str,
-                             val: &str,
-                             scope_id: NodeId) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(Variant,
-                              span,
-                              sub_span,
-                              svec!(id, name, qualname, typ, val, scope_id));
-    }
-
-    pub fn struct_variant_str(&mut self,
-                              span: Span,
-                              sub_span: Option<Span>,
-                              id: NodeId,
-                              name: &str,
-                              typ: &str,
-                              val: &str,
-                              scope_id: NodeId) {
-        let id = self.normalize_node_id(id);
-        let ctor_id = id;
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(VariantStruct,
-                              span,
-                              sub_span,
-                              svec!(id, ctor_id, name, typ, val, scope_id));
-    }
-
-    pub fn fn_str(&mut self,
-                  span: Span,
-                  sub_span: Option<Span>,
-                  id: NodeId,
-                  name: &str,
-                  scope_id: NodeId) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(Function,
-                              span,
-                              sub_span,
-                              svec!(id, name, "", "", scope_id));
-    }
-
-    pub fn method_str(&mut self,
-                      span: Span,
-                      sub_span: Option<Span>,
-                      id: NodeId,
-                      name: &str,
-                      decl_id: Option<DefId>,
-                      scope_id: NodeId) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        let values = match decl_id {
-            Some(decl_id) => svec!(id,
-                                   name,
-                                   decl_id.index.as_usize(),
-                                   decl_id.krate,
-                                   scope_id),
-            None => svec!(id, name, "", "", scope_id),
-        };
-        self.check_and_record(Function, span, sub_span, values);
-    }
-
-    pub fn method_decl_str(&mut self,
-                           span: Span,
-                           sub_span: Option<Span>,
-                           id: NodeId,
-                           name: &str,
-                           scope_id: NodeId) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(MethodDecl, span, sub_span, svec!(id, name, scope_id));
-    }
-
-    pub fn struct_str(&mut self,
-                      span: Span,
-                      sub_span: Option<Span>,
-                      id: NodeId,
-                      ctor_id: NodeId,
-                      name: &str,
-                      scope_id: NodeId,
-                      value: &str) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        let ctor_id = self.normalize_node_id(ctor_id);
-        self.check_and_record(Struct,
-                              span,
-                              sub_span,
-                              svec!(id, ctor_id, name, scope_id, value));
-    }
-
-    pub fn trait_str(&mut self,
-                     span: Span,
-                     sub_span: Option<Span>,
-                     id: NodeId,
-                     name: &str,
-                     scope_id: NodeId,
-                     value: &str) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(Trait, span, sub_span, svec!(id, name, scope_id, value));
-    }
-
-    pub fn impl_str(&mut self,
-                    span: Span,
-                    sub_span: Option<Span>,
-                    id: NodeId,
-                    ref_id: Option<DefId>,
-                    trait_id: Option<DefId>,
-                    scope_id: NodeId) {
-        let id = self.normalize_node_id(id);
-        let scope_id = self.normalize_node_id(scope_id);
-        let ref_id = ref_id.unwrap_or(CRATE_ROOT_DEF_ID);
-        let trait_id = trait_id.unwrap_or(CRATE_ROOT_DEF_ID);
-        self.check_and_record(Impl,
-                              span,
-                              sub_span,
-                              svec!(id,
-                                    ref_id.index.as_usize(),
-                                    ref_id.krate,
-                                    trait_id.index.as_usize(),
-                                    trait_id.krate,
-                                    scope_id));
-    }
-
-    pub fn mod_str(&mut self,
-                   span: Span,
-                   sub_span: Option<Span>,
-                   id: NodeId,
-                   name: &str,
-                   parent: NodeId,
-                   filename: &str) {
-        let id = self.normalize_node_id(id);
-        let parent = self.normalize_node_id(parent);
-        self.check_and_record(Module,
-                              span,
-                              sub_span,
-                              svec!(id, name, parent, filename));
-    }
-
-    pub fn use_alias_str(&mut self,
-                         span: Span,
-                         sub_span: Option<Span>,
-                         id: NodeId,
-                         mod_id: Option<DefId>,
-                         name: &str,
-                         parent: NodeId) {
-        let id = self.normalize_node_id(id);
-        let parent = self.normalize_node_id(parent);
-        let mod_id = mod_id.unwrap_or(CRATE_ROOT_DEF_ID);
-        self.check_and_record(UseAlias,
-                              span,
-                              sub_span,
-                              svec!(id, mod_id.index.as_usize(), mod_id.krate, name, parent));
-    }
-
-    pub fn use_glob_str(&mut self,
-                        span: Span,
-                        sub_span: Option<Span>,
-                        id: NodeId,
-                        values: &str,
-                        parent: NodeId) {
-        let id = self.normalize_node_id(id);
-        let parent = self.normalize_node_id(parent);
-        self.check_and_record(UseGlob, span, sub_span, svec!(id, values, parent));
-    }
-
-    pub fn extern_crate_str(&mut self,
-                            span: Span,
-                            sub_span: Option<Span>,
-                            id: NodeId,
-                            cnum: ast::CrateNum,
-                            name: &str,
-                            loc: &str,
-                            parent: NodeId) {
-        let id = self.normalize_node_id(id);
-        let parent = self.normalize_node_id(parent);
-        self.check_and_record(ExternCrate,
-                              span,
-                              sub_span,
-                              svec!(id, name, loc, cnum, parent));
-    }
-
-    pub fn inherit_str(&mut self,
-                       span: Span,
-                       sub_span: Option<Span>,
-                       base_id: DefId,
-                       deriv_id: NodeId) {
-        let deriv_id = self.normalize_node_id(deriv_id);
-        self.check_and_record(Inheritance,
-                              span,
-                              sub_span,
-                              svec!(base_id.index.as_usize(), base_id.krate, deriv_id, 0));
-    }
-
-    pub fn fn_call_str(&mut self,
-                       span: Span,
-                       sub_span: Option<Span>,
-                       id: DefId,
-                       scope_id: NodeId) {
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(FnCall,
-                              span,
-                              sub_span,
-                              svec!(id.index.as_usize(), id.krate, "", scope_id));
-    }
-
-    pub fn meth_call_str(&mut self,
-                         span: Span,
-                         sub_span: Option<Span>,
-                         defid: Option<DefId>,
-                         declid: Option<DefId>,
-                         scope_id: NodeId) {
-        let scope_id = self.normalize_node_id(scope_id);
-        let defid = defid.unwrap_or(CRATE_ROOT_DEF_ID);
-        let (dcn, dck) = match declid {
-            Some(declid) => (s!(declid.index.as_usize()), s!(declid.krate)),
-            None => ("".to_string(), "".to_string()),
-        };
-        self.check_and_record(MethodCall,
-                              span,
-                              sub_span,
-                              svec!(defid.index.as_usize(), defid.krate, dcn, dck, scope_id));
-    }
-
-    pub fn sub_mod_ref_str(&mut self, span: Span, sub_span: Span, qualname: &str, parent: NodeId) {
-        let parent = self.normalize_node_id(parent);
-        self.record_with_span(ModRef, span, sub_span, svec!(0, 0, qualname, parent));
-    }
-
-    pub fn typedef_str(&mut self,
-                       span: Span,
-                       sub_span: Option<Span>,
-                       id: NodeId,
-                       qualname: &str,
-                       value: &str) {
-        let id = self.normalize_node_id(id);
-        self.check_and_record(Typedef, span, sub_span, svec!(id, qualname, value));
-    }
-
-    pub fn crate_str(&mut self, span: Span, name: &str, crate_root: &str) {
-        self.record_with_span(Crate, span, span, svec!(name, crate_root));
-    }
-
-    pub fn external_crate_str(&mut self, span: Span, name: &str, num: ast::CrateNum) {
-        let lo_loc = self.span.sess.codemap().lookup_char_pos(span.lo);
-        self.record_without_span(ExternalCrate,
-                                 svec!(name, num, SpanUtils::make_path_string(&lo_loc.file.name)),
-                                 span);
-    }
-
-    pub fn sub_type_ref_str(&mut self, span: Span, sub_span: Span, qualname: &str) {
-        self.record_with_span(TypeRef, span, sub_span, svec!(0, 0, qualname, 0));
-    }
-
-    // A slightly generic function for a reference to an item of any kind.
-    pub fn ref_str(&mut self,
-                   kind: Row,
-                   span: Span,
-                   sub_span: Option<Span>,
-                   id: DefId,
-                   scope_id: NodeId) {
-        let scope_id = self.normalize_node_id(scope_id);
-        self.check_and_record(kind,
-                              span,
-                              sub_span,
-                              svec!(id.index.as_usize(), id.krate, "", scope_id));
-    }
-
-    pub fn macro_str(&mut self, span: Span, sub_span: Span, name: String, qualname: String) {
-        self.record_with_span(Macro, span, sub_span, svec!(name, qualname));
-    }
-
-    pub fn macro_use_str(&mut self,
-                         span: Span,
-                         sub_span: Span,
-                         name: String,
-                         qualname: String,
-                         scope_id: NodeId) {
-        let scope_id = self.normalize_node_id(scope_id);
-        self.record_with_span(MacroUse, span, sub_span,
-                              svec!(name, qualname, scope_id));
-    }
-}
diff --git a/src/librustc_trans/save/span_utils.rs b/src/librustc_trans/save/span_utils.rs
deleted file mode 100644 (file)
index 2a5c61f..0000000
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::session::Session;
-
-use save::generated_code;
-
-use std::cell::Cell;
-use std::env;
-use std::path::Path;
-
-use syntax::ast;
-use syntax::codemap::*;
-use syntax::parse::lexer;
-use syntax::parse::lexer::{Reader, StringReader};
-use syntax::parse::token;
-use syntax::parse::token::{keywords, Token};
-
-#[derive(Clone)]
-pub struct SpanUtils<'a> {
-    pub sess: &'a Session,
-    pub err_count: Cell<isize>,
-}
-
-impl<'a> SpanUtils<'a> {
-    pub fn new(sess: &'a Session) -> SpanUtils<'a> {
-        SpanUtils {
-            sess: sess,
-            err_count: Cell::new(0),
-        }
-    }
-
-    pub fn make_path_string(file_name: &str) -> String {
-        let path = Path::new(file_name);
-        if path.is_absolute() {
-            path.clone().display().to_string()
-        } else {
-            env::current_dir().unwrap().join(&path).display().to_string()
-        }
-    }
-
-    // Standard string for extents/location.
-    #[rustfmt_skip]
-    pub fn extent_str(&self, span: Span) -> String {
-        let lo_loc = self.sess.codemap().lookup_char_pos(span.lo);
-        let hi_loc = self.sess.codemap().lookup_char_pos(span.hi);
-        let lo_pos = self.sess.codemap().bytepos_to_file_charpos(span.lo);
-        let hi_pos = self.sess.codemap().bytepos_to_file_charpos(span.hi);
-        let lo_pos_byte = self.sess.codemap().lookup_byte_offset(span.lo).pos;
-        let hi_pos_byte = self.sess.codemap().lookup_byte_offset(span.hi).pos;
-
-        format!("file_name,\"{}\",file_line,{},file_col,{},extent_start,{},extent_start_bytes,{},\
-                 file_line_end,{},file_col_end,{},extent_end,{},extent_end_bytes,{}",
-                SpanUtils::make_path_string(&lo_loc.file.name),
-                lo_loc.line, lo_loc.col.to_usize(), lo_pos.to_usize(), lo_pos_byte.to_usize(),
-                hi_loc.line, hi_loc.col.to_usize(), hi_pos.to_usize(), hi_pos_byte.to_usize())
-    }
-
-    // sub_span starts at span.lo, so we need to adjust the positions etc.
-    // If sub_span is None, we don't need to adjust.
-    pub fn make_sub_span(&self, span: Span, sub_span: Option<Span>) -> Option<Span> {
-        match sub_span {
-            None => None,
-            Some(sub) => {
-                let FileMapAndBytePos {fm, pos} = self.sess.codemap().lookup_byte_offset(span.lo);
-                let base = pos + fm.start_pos;
-                Some(Span {
-                    lo: base + self.sess.codemap().lookup_byte_offset(sub.lo).pos,
-                    hi: base + self.sess.codemap().lookup_byte_offset(sub.hi).pos,
-                    expn_id: span.expn_id,
-                })
-            }
-        }
-    }
-
-    pub fn snippet(&self, span: Span) -> String {
-        match self.sess.codemap().span_to_snippet(span) {
-            Ok(s) => s,
-            Err(_) => String::new(),
-        }
-    }
-
-    pub fn retokenise_span(&self, span: Span) -> StringReader<'a> {
-        // sadness - we don't have spans for sub-expressions nor access to the tokens
-        // so in order to get extents for the function name itself (which dxr expects)
-        // we need to re-tokenise the fn definition
-
-        // Note: this is a bit awful - it adds the contents of span to the end of
-        // the codemap as a new filemap. This is mostly OK, but means we should
-        // not iterate over the codemap. Also, any spans over the new filemap
-        // are incompatible with spans over other filemaps.
-        let filemap = self.sess
-                          .codemap()
-                          .new_filemap(String::from("<anon-dxr>"), self.snippet(span));
-        let s = self.sess;
-        lexer::StringReader::new(s.diagnostic(), filemap)
-    }
-
-    // Re-parses a path and returns the span for the last identifier in the path
-    pub fn span_for_last_ident(&self, span: Span) -> Option<Span> {
-        let mut result = None;
-
-        let mut toks = self.retokenise_span(span);
-        let mut bracket_count = 0;
-        loop {
-            let ts = toks.real_token();
-            if ts.tok == token::Eof {
-                return self.make_sub_span(span, result)
-            }
-            if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) {
-                result = Some(ts.sp);
-            }
-
-            bracket_count += match ts.tok {
-                token::Lt => 1,
-                token::Gt => -1,
-                token::BinOp(token::Shr) => -2,
-                _ => 0,
-            }
-        }
-    }
-
-    // Return the span for the first identifier in the path.
-    pub fn span_for_first_ident(&self, span: Span) -> Option<Span> {
-        let mut toks = self.retokenise_span(span);
-        let mut bracket_count = 0;
-        loop {
-            let ts = toks.real_token();
-            if ts.tok == token::Eof {
-                return None;
-            }
-            if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) {
-                return self.make_sub_span(span, Some(ts.sp));
-            }
-
-            bracket_count += match ts.tok {
-                token::Lt => 1,
-                token::Gt => -1,
-                token::BinOp(token::Shr) => -2,
-                _ => 0,
-            }
-        }
-    }
-
-    // Return the span for the last ident before a `(` or `<` or '::<' and outside any
-    // any brackets, or the last span.
-    pub fn sub_span_for_meth_name(&self, span: Span) -> Option<Span> {
-        let mut toks = self.retokenise_span(span);
-        let mut prev = toks.real_token();
-        let mut result = None;
-        let mut bracket_count = 0;
-        let mut last_span = None;
-        while prev.tok != token::Eof {
-            last_span = None;
-            let mut next = toks.real_token();
-
-            if (next.tok == token::OpenDelim(token::Paren) || next.tok == token::Lt) &&
-               bracket_count == 0 && prev.tok.is_ident() {
-                result = Some(prev.sp);
-            }
-
-            if bracket_count == 0 && next.tok == token::ModSep {
-                let old = prev;
-                prev = next;
-                next = toks.real_token();
-                if next.tok == token::Lt && old.tok.is_ident() {
-                    result = Some(old.sp);
-                }
-            }
-
-            bracket_count += match prev.tok {
-                token::OpenDelim(token::Paren) | token::Lt => 1,
-                token::CloseDelim(token::Paren) | token::Gt => -1,
-                token::BinOp(token::Shr) => -2,
-                _ => 0,
-            };
-
-            if prev.tok.is_ident() && bracket_count == 0 {
-                last_span = Some(prev.sp);
-            }
-            prev = next;
-        }
-        if result.is_none() && last_span.is_some() {
-            return self.make_sub_span(span, last_span);
-        }
-        return self.make_sub_span(span, result);
-    }
-
-    // Return the span for the last ident before a `<` and outside any
-    // brackets, or the last span.
-    pub fn sub_span_for_type_name(&self, span: Span) -> Option<Span> {
-        let mut toks = self.retokenise_span(span);
-        let mut prev = toks.real_token();
-        let mut result = None;
-        let mut bracket_count = 0;
-        loop {
-            let next = toks.real_token();
-
-            if (next.tok == token::Lt || next.tok == token::Colon) && bracket_count == 0 &&
-               prev.tok.is_ident() {
-                result = Some(prev.sp);
-            }
-
-            bracket_count += match prev.tok {
-                token::Lt => 1,
-                token::Gt => -1,
-                token::BinOp(token::Shl) => 2,
-                token::BinOp(token::Shr) => -2,
-                _ => 0,
-            };
-
-            if next.tok == token::Eof {
-                break;
-            }
-            prev = next;
-        }
-        if bracket_count != 0 {
-            let loc = self.sess.codemap().lookup_char_pos(span.lo);
-            self.sess.span_bug(span,
-                               &format!("Mis-counted brackets when breaking path? Parsing '{}' \
-                                         in {}, line {}",
-                                        self.snippet(span),
-                                        loc.file.name,
-                                        loc.line));
-        }
-        if result.is_none() && prev.tok.is_ident() && bracket_count == 0 {
-            return self.make_sub_span(span, Some(prev.sp));
-        }
-        self.make_sub_span(span, result)
-    }
-
-    // Reparse span and return an owned vector of sub spans of the first limit
-    // identifier tokens in the given nesting level.
-    // example with Foo<Bar<T,V>, Bar<T,V>>
-    // Nesting = 0: all idents outside of brackets: [Foo]
-    // Nesting = 1: idents within one level of brackets: [Bar, Bar]
-    pub fn spans_with_brackets(&self, span: Span, nesting: isize, limit: isize) -> Vec<Span> {
-        let mut result: Vec<Span> = vec!();
-
-        let mut toks = self.retokenise_span(span);
-        // We keep track of how many brackets we're nested in
-        let mut bracket_count: isize = 0;
-        let mut found_ufcs_sep = false;
-        loop {
-            let ts = toks.real_token();
-            if ts.tok == token::Eof {
-                if bracket_count != 0 {
-                    if generated_code(span) {
-                        return vec!();
-                    }
-                    let loc = self.sess.codemap().lookup_char_pos(span.lo);
-                    self.sess.span_bug(span,
-                                       &format!("Mis-counted brackets when breaking path? \
-                                                 Parsing '{}' in {}, line {}",
-                                                self.snippet(span),
-                                                loc.file.name,
-                                                loc.line));
-                }
-                return result
-            }
-            if (result.len() as isize) == limit {
-                return result;
-            }
-            bracket_count += match ts.tok {
-                token::Lt => 1,
-                token::Gt => -1,
-                token::BinOp(token::Shl) => 2,
-                token::BinOp(token::Shr) => -2,
-                _ => 0,
-            };
-
-            // Ignore the `>::` in `<Type as Trait>::AssocTy`.
-
-            // The root cause of this hack is that the AST representation of
-            // qpaths is horrible. It treats <A as B>::C as a path with two
-            // segments, B and C and notes that there is also a self type A at
-            // position 0. Because we don't have spans for individual idents,
-            // only the whole path, we have to iterate over the tokens in the
-            // path, trying to pull out the non-nested idents (e.g., avoiding 'a
-            // in `<A as B<'a>>::C`). So we end up with a span for `B>::C` from
-            // the start of the first ident to the end of the path.
-            if !found_ufcs_sep && bracket_count == -1 {
-                found_ufcs_sep = true;
-                bracket_count += 1;
-            }
-            if ts.tok.is_ident() && bracket_count == nesting {
-                result.push(self.make_sub_span(span, Some(ts.sp)).unwrap());
-            }
-        }
-    }
-
-    pub fn sub_span_before_token(&self, span: Span, tok: Token) -> Option<Span> {
-        let mut toks = self.retokenise_span(span);
-        let mut prev = toks.real_token();
-        loop {
-            if prev.tok == token::Eof {
-                return None;
-            }
-            let next = toks.real_token();
-            if next.tok == tok {
-                return self.make_sub_span(span, Some(prev.sp));
-            }
-            prev = next;
-        }
-    }
-
-    pub fn sub_span_of_token(&self, span: Span, tok: Token) -> Option<Span> {
-        let mut toks = self.retokenise_span(span);
-        loop {
-            let next = toks.real_token();
-            if next.tok == token::Eof {
-                return None;
-            }
-            if next.tok == tok {
-                return self.make_sub_span(span, Some(next.sp));
-            }
-        }
-    }
-
-    pub fn sub_span_after_keyword(&self, span: Span, keyword: keywords::Keyword) -> Option<Span> {
-        self.sub_span_after(span, |t| t.is_keyword(keyword))
-    }
-
-    pub fn sub_span_after_token(&self, span: Span, tok: Token) -> Option<Span> {
-        self.sub_span_after(span, |t| t == tok)
-    }
-
-    fn sub_span_after<F: Fn(Token) -> bool>(&self, span: Span, f: F) -> Option<Span> {
-        let mut toks = self.retokenise_span(span);
-        loop {
-            let ts = toks.real_token();
-            if ts.tok == token::Eof {
-                return None;
-            }
-            if f(ts.tok) {
-                let ts = toks.real_token();
-                if ts.tok == token::Eof {
-                    return None
-                } else {
-                    return self.make_sub_span(span, Some(ts.sp));
-                }
-            }
-        }
-    }
-
-
-    // Returns a list of the spans of idents in a path.
-    // E.g., For foo::bar<x,t>::baz, we return [foo, bar, baz] (well, their spans)
-    pub fn spans_for_path_segments(&self, path: &ast::Path) -> Vec<Span> {
-        self.spans_with_brackets(path.span, 0, -1)
-    }
-
-    // Return an owned vector of the subspans of the param identifier
-    // tokens found in span.
-    pub fn spans_for_ty_params(&self, span: Span, number: isize) -> Vec<Span> {
-        // Type params are nested within one level of brackets:
-        // i.e. we want Vec<A, B> from Foo<A, B<T,U>>
-        self.spans_with_brackets(span, 1, number)
-    }
-
-    pub fn report_span_err(&self, kind: &str, span: Span) {
-        let loc = self.sess.codemap().lookup_char_pos(span.lo);
-        info!("({}) Could not find sub_span in `{}` in {}, line {}",
-              kind,
-              self.snippet(span),
-              loc.file.name,
-              loc.line);
-        self.err_count.set(self.err_count.get() + 1);
-        if self.err_count.get() > 1000 {
-            self.sess.bug("span errors reached 1000, giving up");
-        }
-    }
-
-    // Return the name for a macro definition (identifier after first `!`)
-    pub fn span_for_macro_def_name(&self, span: Span) -> Option<Span> {
-        let mut toks = self.retokenise_span(span);
-        loop {
-            let ts = toks.real_token();
-            if ts.tok == token::Eof {
-                return None;
-            }
-            if ts.tok == token::Not {
-                let ts = toks.real_token();
-                if ts.tok.is_ident() {
-                    return self.make_sub_span(span, Some(ts.sp));
-                } else {
-                    return None;
-                }
-            }
-        }
-    }
-
-    // Return the name for a macro use (identifier before first `!`).
-    pub fn span_for_macro_use_name(&self, span:Span) -> Option<Span> {
-        let mut toks = self.retokenise_span(span);
-        let mut prev = toks.real_token();
-        loop {
-            if prev.tok == token::Eof {
-                return None;
-            }
-            let ts = toks.real_token();
-            if ts.tok == token::Not {
-                if prev.tok.is_ident() {
-                    return self.make_sub_span(span, Some(prev.sp));
-                } else {
-                    return None;
-                }
-            }
-            prev = ts;
-        }
-    }
-
-    /// Return true if the span is generated code, and
-    /// it is not a subspan of the root callsite.
-    ///
-    /// Used to filter out spans of minimal value,
-    /// such as references to macro internal variables.
-    pub fn filter_generated(&self, sub_span: Option<Span>, parent: Span) -> bool {
-        if !generated_code(parent) {
-            if sub_span.is_none() {
-                // Edge case - this occurs on generated code with incorrect expansion info.
-                return true;
-            }
-            return false;
-        }
-        // If sub_span is none, filter out generated code.
-        if sub_span.is_none() {
-            return true;
-        }
-
-        //If the span comes from a fake filemap, filter it.
-        if !self.sess.codemap().lookup_char_pos(parent.lo).file.is_real_file() {
-            return true;
-        }
-
-        // Otherwise, a generated span is deemed invalid if it is not a sub-span of the root
-        // callsite. This filters out macro internal variables and most malformed spans.
-        let span = self.sess.codemap().source_callsite(parent);
-        !(span.contains(parent))
-    }
-}
-
-macro_rules! filter {
-    ($util: expr, $span: ident, $parent: expr, None) => {
-        if $util.filter_generated($span, $parent) {
-            return None;
-        }
-    };
-    ($util: expr, $span: ident, $parent: expr) => {
-        if $util.filter_generated($span, $parent) {
-            return;
-        }
-    };
-}
diff --git a/src/librustc_trans/symbol_names_test.rs b/src/librustc_trans/symbol_names_test.rs
new file mode 100644 (file)
index 0000000..2e33559
--- /dev/null
@@ -0,0 +1,86 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Walks the crate looking for items/impl-items/trait-items that have
+//! either a `rustc_symbol_name` or `rustc_item_path` attribute and
+//! generates an error giving, respectively, the symbol name or
+//! item-path. This is used for unit testing the code that generates
+//! paths etc in all kinds of annoying scenarios.
+
+use back::symbol_names;
+use rustc::hir;
+use rustc::hir::intravisit::{self, Visitor};
+use syntax::ast;
+use syntax::attr::AttrMetaMethods;
+
+use common::CrateContext;
+use monomorphize::Instance;
+
+const SYMBOL_NAME: &'static str = "rustc_symbol_name";
+const ITEM_PATH: &'static str = "rustc_item_path";
+
+pub fn report_symbol_names(ccx: &CrateContext) {
+    // if the `rustc_attrs` feature is not enabled, then the
+    // attributes we are interested in cannot be present anyway, so
+    // skip the walk.
+    let tcx = ccx.tcx();
+    if !tcx.sess.features.borrow().rustc_attrs {
+        return;
+    }
+
+    let _ignore = tcx.dep_graph.in_ignore();
+    let mut visitor = SymbolNamesTest { ccx: ccx };
+    tcx.map.krate().visit_all_items(&mut visitor);
+}
+
+struct SymbolNamesTest<'a, 'tcx:'a> {
+    ccx: &'a CrateContext<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> SymbolNamesTest<'a, 'tcx> {
+    fn process_attrs(&mut self,
+                     node_id: ast::NodeId) {
+        let tcx = self.ccx.tcx();
+        let def_id = tcx.map.local_def_id(node_id);
+        for attr in tcx.get_attrs(def_id).iter() {
+            if attr.check_name(SYMBOL_NAME) {
+                // for now, can only use on monomorphic names
+                let instance = Instance::mono(tcx, def_id);
+                let name = symbol_names::exported_name(self.ccx, &instance);
+                tcx.sess.span_err(attr.span, &format!("symbol-name({})", name));
+            } else if attr.check_name(ITEM_PATH) {
+                let path = tcx.item_path_str(def_id);
+                tcx.sess.span_err(attr.span, &format!("item-path({})", path));
+            }
+
+            // (*) The formatting of `tag({})` is chosen so that tests can elect
+            // to test the entirety of the string, if they choose, or else just
+            // some subset.
+        }
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for SymbolNamesTest<'a, 'tcx> {
+    fn visit_item(&mut self, item: &'tcx hir::Item) {
+        self.process_attrs(item.id);
+        intravisit::walk_item(self, item);
+    }
+
+    fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) {
+        self.process_attrs(ti.id);
+        intravisit::walk_trait_item(self, ti)
+    }
+
+    fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) {
+        self.process_attrs(ii.id);
+        intravisit::walk_impl_item(self, ii)
+    }
+}
+
diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs
deleted file mode 100644 (file)
index c5efc9b..0000000
+++ /dev/null
@@ -1,2016 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Compilation of match statements
-//!
-//! I will endeavor to explain the code as best I can.  I have only a loose
-//! understanding of some parts of it.
-//!
-//! ## Matching
-//!
-//! The basic state of the code is maintained in an array `m` of `Match`
-//! objects.  Each `Match` describes some list of patterns, all of which must
-//! match against the current list of values.  If those patterns match, then
-//! the arm listed in the match is the correct arm.  A given arm may have
-//! multiple corresponding match entries, one for each alternative that
-//! remains.  As we proceed these sets of matches are adjusted by the various
-//! `enter_XXX()` functions, each of which adjusts the set of options given
-//! some information about the value which has been matched.
-//!
-//! So, initially, there is one value and N matches, each of which have one
-//! constituent pattern.  N here is usually the number of arms but may be
-//! greater, if some arms have multiple alternatives.  For example, here:
-//!
-//!     enum Foo { A, B(int), C(usize, usize) }
-//!     match foo {
-//!         A => ...,
-//!         B(x) => ...,
-//!         C(1, 2) => ...,
-//!         C(_) => ...
-//!     }
-//!
-//! The value would be `foo`.  There would be four matches, each of which
-//! contains one pattern (and, in one case, a guard).  We could collect the
-//! various options and then compile the code for the case where `foo` is an
-//! `A`, a `B`, and a `C`.  When we generate the code for `C`, we would (1)
-//! drop the two matches that do not match a `C` and (2) expand the other two
-//! into two patterns each.  In the first case, the two patterns would be `1`
-//! and `2`, and the in the second case the _ pattern would be expanded into
-//! `_` and `_`.  The two values are of course the arguments to `C`.
-//!
-//! Here is a quick guide to the various functions:
-//!
-//! - `compile_submatch()`: The main workhouse.  It takes a list of values and
-//!   a list of matches and finds the various possibilities that could occur.
-//!
-//! - `enter_XXX()`: modifies the list of matches based on some information
-//!   about the value that has been matched.  For example,
-//!   `enter_rec_or_struct()` adjusts the values given that a record or struct
-//!   has been matched.  This is an infallible pattern, so *all* of the matches
-//!   must be either wildcards or record/struct patterns.  `enter_opt()`
-//!   handles the fallible cases, and it is correspondingly more complex.
-//!
-//! ## Bindings
-//!
-//! We store information about the bound variables for each arm as part of the
-//! per-arm `ArmData` struct.  There is a mapping from identifiers to
-//! `BindingInfo` structs.  These structs contain the mode/id/type of the
-//! binding, but they also contain an LLVM value which points at an alloca
-//! called `llmatch`. For by value bindings that are Copy, we also create
-//! an extra alloca that we copy the matched value to so that any changes
-//! we do to our copy is not reflected in the original and vice-versa.
-//! We don't do this if it's a move since the original value can't be used
-//! and thus allowing us to cheat in not creating an extra alloca.
-//!
-//! The `llmatch` binding always stores a pointer into the value being matched
-//! which points at the data for the binding.  If the value being matched has
-//! type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence
-//! `llmatch` has type `T**`).  So, if you have a pattern like:
-//!
-//!    let a: A = ...;
-//!    let b: B = ...;
-//!    match (a, b) { (ref c, d) => { ... } }
-//!
-//! For `c` and `d`, we would generate allocas of type `C*` and `D*`
-//! respectively.  These are called the `llmatch`.  As we match, when we come
-//! up against an identifier, we store the current pointer into the
-//! corresponding alloca.
-//!
-//! Once a pattern is completely matched, and assuming that there is no guard
-//! pattern, we will branch to a block that leads to the body itself.  For any
-//! by-value bindings, this block will first load the ptr from `llmatch` (the
-//! one of type `D*`) and then load a second time to get the actual value (the
-//! one of type `D`). For by ref bindings, the value of the local variable is
-//! simply the first alloca.
-//!
-//! So, for the example above, we would generate a setup kind of like this:
-//!
-//!        +-------+
-//!        | Entry |
-//!        +-------+
-//!            |
-//!        +--------------------------------------------+
-//!        | llmatch_c = (addr of first half of tuple)  |
-//!        | llmatch_d = (addr of second half of tuple) |
-//!        +--------------------------------------------+
-//!            |
-//!        +--------------------------------------+
-//!        | *llbinding_d = **llmatch_d           |
-//!        +--------------------------------------+
-//!
-//! If there is a guard, the situation is slightly different, because we must
-//! execute the guard code.  Moreover, we need to do so once for each of the
-//! alternatives that lead to the arm, because if the guard fails, they may
-//! have different points from which to continue the search. Therefore, in that
-//! case, we generate code that looks more like:
-//!
-//!        +-------+
-//!        | Entry |
-//!        +-------+
-//!            |
-//!        +-------------------------------------------+
-//!        | llmatch_c = (addr of first half of tuple) |
-//!        | llmatch_d = (addr of first half of tuple) |
-//!        +-------------------------------------------+
-//!            |
-//!        +-------------------------------------------------+
-//!        | *llbinding_d = **llmatch_d                      |
-//!        | check condition                                 |
-//!        | if false { goto next case }                     |
-//!        | if true { goto body }                           |
-//!        +-------------------------------------------------+
-//!
-//! The handling for the cleanups is a bit... sensitive.  Basically, the body
-//! is the one that invokes `add_clean()` for each binding.  During the guard
-//! evaluation, we add temporary cleanups and revoke them after the guard is
-//! evaluated (it could fail, after all). Note that guards and moves are
-//! just plain incompatible.
-//!
-//! Some relevant helper functions that manage bindings:
-//! - `create_bindings_map()`
-//! - `insert_lllocals()`
-//!
-//!
-//! ## Notes on vector pattern matching.
-//!
-//! Vector pattern matching is surprisingly tricky. The problem is that
-//! the structure of the vector isn't fully known, and slice matches
-//! can be done on subparts of it.
-//!
-//! The way that vector pattern matches are dealt with, then, is as
-//! follows. First, we make the actual condition associated with a
-//! vector pattern simply a vector length comparison. So the pattern
-//! [1, .. x] gets the condition "vec len >= 1", and the pattern
-//! [.. x] gets the condition "vec len >= 0". The problem here is that
-//! having the condition "vec len >= 1" hold clearly does not mean that
-//! only a pattern that has exactly that condition will match. This
-//! means that it may well be the case that a condition holds, but none
-//! of the patterns matching that condition match; to deal with this,
-//! when doing vector length matches, we have match failures proceed to
-//! the next condition to check.
-//!
-//! There are a couple more subtleties to deal with. While the "actual"
-//! condition associated with vector length tests is simply a test on
-//! the vector length, the actual vec_len Opt entry contains more
-//! information used to restrict which matches are associated with it.
-//! So that all matches in a submatch are matching against the same
-//! values from inside the vector, they are split up by how many
-//! elements they match at the front and at the back of the vector. In
-//! order to make sure that arms are properly checked in order, even
-//! with the overmatching conditions, each vec_len Opt entry is
-//! associated with a range of matches.
-//! Consider the following:
-//!
-//!   match &[1, 2, 3] {
-//!       [1, 1, .. _] => 0,
-//!       [1, 2, 2, .. _] => 1,
-//!       [1, 2, 3, .. _] => 2,
-//!       [1, 2, .. _] => 3,
-//!       _ => 4
-//!   }
-//! The proper arm to match is arm 2, but arms 0 and 3 both have the
-//! condition "len >= 2". If arm 3 was lumped in with arm 0, then the
-//! wrong branch would be taken. Instead, vec_len Opts are associated
-//! with a contiguous range of matches that have the same "shape".
-//! This is sort of ugly and requires a bunch of special handling of
-//! vec_len options.
-
-pub use self::BranchKind::*;
-pub use self::OptResult::*;
-pub use self::TransBindingMode::*;
-use self::Opt::*;
-use self::FailureHandler::*;
-
-use llvm::{ValueRef, BasicBlockRef};
-use middle::check_match::StaticInliner;
-use middle::check_match;
-use middle::const_eval;
-use middle::def::{Def, DefMap};
-use middle::def_id::DefId;
-use middle::expr_use_visitor as euv;
-use middle::infer;
-use middle::lang_items::StrEqFnLangItem;
-use middle::mem_categorization as mc;
-use middle::mem_categorization::Categorization;
-use middle::pat_util::*;
-use trans::adt;
-use trans::base::*;
-use trans::build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast};
-use trans::build::{Not, Store, Sub, add_comment};
-use trans::build;
-use trans::callee;
-use trans::cleanup::{self, CleanupMethods, DropHintMethods};
-use trans::common::*;
-use trans::consts;
-use trans::datum::*;
-use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
-use trans::expr::{self, Dest};
-use trans::monomorphize;
-use trans::tvec;
-use trans::type_of;
-use trans::Disr;
-use middle::ty::{self, Ty};
-use session::config::NoDebugInfo;
-use util::common::indenter;
-use util::nodemap::FnvHashMap;
-use util::ppaux;
-
-use std;
-use std::cell::RefCell;
-use std::cmp::Ordering;
-use std::fmt;
-use std::rc::Rc;
-use rustc_front::hir::{self, PatKind};
-use syntax::ast::{self, DUMMY_NODE_ID, NodeId};
-use syntax::codemap::Span;
-use rustc_front::fold::Folder;
-use syntax::ptr::P;
-
-#[derive(Copy, Clone, Debug)]
-struct ConstantExpr<'a>(&'a hir::Expr);
-
-impl<'a> ConstantExpr<'a> {
-    fn eq(self, other: ConstantExpr<'a>, tcx: &ty::ctxt) -> bool {
-        match const_eval::compare_lit_exprs(tcx, self.0, other.0) {
-            Some(result) => result == Ordering::Equal,
-            None => panic!("compare_list_exprs: type mismatch"),
-        }
-    }
-}
-
-// An option identifying a branch (either a literal, an enum variant or a range)
-#[derive(Debug)]
-enum Opt<'a, 'tcx> {
-    ConstantValue(ConstantExpr<'a>, DebugLoc),
-    ConstantRange(ConstantExpr<'a>, ConstantExpr<'a>, DebugLoc),
-    Variant(Disr, Rc<adt::Repr<'tcx>>, DefId, DebugLoc),
-    SliceLengthEqual(usize, DebugLoc),
-    SliceLengthGreaterOrEqual(/* prefix length */ usize,
-                              /* suffix length */ usize,
-                              DebugLoc),
-}
-
-impl<'a, 'tcx> Opt<'a, 'tcx> {
-    fn eq(&self, other: &Opt<'a, 'tcx>, tcx: &ty::ctxt<'tcx>) -> bool {
-        match (self, other) {
-            (&ConstantValue(a, _), &ConstantValue(b, _)) => a.eq(b, tcx),
-            (&ConstantRange(a1, a2, _), &ConstantRange(b1, b2, _)) => {
-                a1.eq(b1, tcx) && a2.eq(b2, tcx)
-            }
-            (&Variant(a_disr, ref a_repr, a_def, _),
-             &Variant(b_disr, ref b_repr, b_def, _)) => {
-                a_disr == b_disr && *a_repr == *b_repr && a_def == b_def
-            }
-            (&SliceLengthEqual(a, _), &SliceLengthEqual(b, _)) => a == b,
-            (&SliceLengthGreaterOrEqual(a1, a2, _),
-             &SliceLengthGreaterOrEqual(b1, b2, _)) => {
-                a1 == b1 && a2 == b2
-            }
-            _ => false
-        }
-    }
-
-    fn trans<'blk>(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> {
-        use trans::consts::TrueConst::Yes;
-        let _icx = push_ctxt("match::trans_opt");
-        let ccx = bcx.ccx();
-        match *self {
-            ConstantValue(ConstantExpr(lit_expr), _) => {
-                let lit_ty = bcx.tcx().node_id_to_type(lit_expr.id);
-                let expr = consts::const_expr(ccx, &lit_expr, bcx.fcx.param_substs, None, Yes);
-                let llval = match expr {
-                    Ok((llval, _)) => llval,
-                    Err(err) => bcx.ccx().sess().span_fatal(lit_expr.span, &err.description()),
-                };
-                let lit_datum = immediate_rvalue(llval, lit_ty);
-                let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
-                SingleResult(Result::new(bcx, lit_datum.val))
-            }
-            ConstantRange(ConstantExpr(ref l1), ConstantExpr(ref l2), _) => {
-                let l1 = match consts::const_expr(ccx, &l1, bcx.fcx.param_substs, None, Yes) {
-                    Ok((l1, _)) => l1,
-                    Err(err) => bcx.ccx().sess().span_fatal(l1.span, &err.description()),
-                };
-                let l2 = match consts::const_expr(ccx, &l2, bcx.fcx.param_substs, None, Yes) {
-                    Ok((l2, _)) => l2,
-                    Err(err) => bcx.ccx().sess().span_fatal(l2.span, &err.description()),
-                };
-                RangeResult(Result::new(bcx, l1), Result::new(bcx, l2))
-            }
-            Variant(disr_val, ref repr, _, _) => {
-                SingleResult(Result::new(bcx, adt::trans_case(bcx, &repr, disr_val)))
-            }
-            SliceLengthEqual(length, _) => {
-                SingleResult(Result::new(bcx, C_uint(ccx, length)))
-            }
-            SliceLengthGreaterOrEqual(prefix, suffix, _) => {
-                LowerBound(Result::new(bcx, C_uint(ccx, prefix + suffix)))
-            }
-        }
-    }
-
-    fn debug_loc(&self) -> DebugLoc {
-        match *self {
-            ConstantValue(_,debug_loc)                 |
-            ConstantRange(_, _, debug_loc)             |
-            Variant(_, _, _, debug_loc)                |
-            SliceLengthEqual(_, debug_loc)             |
-            SliceLengthGreaterOrEqual(_, _, debug_loc) => debug_loc
-        }
-    }
-}
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum BranchKind {
-    NoBranch,
-    Single,
-    Switch,
-    Compare,
-    CompareSliceLength
-}
-
-pub enum OptResult<'blk, 'tcx: 'blk> {
-    SingleResult(Result<'blk, 'tcx>),
-    RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>),
-    LowerBound(Result<'blk, 'tcx>)
-}
-
-#[derive(Clone, Copy, PartialEq)]
-pub enum TransBindingMode {
-    /// By-value binding for a copy type: copies from matched data
-    /// into a fresh LLVM alloca.
-    TrByCopy(/* llbinding */ ValueRef),
-
-    /// By-value binding for a non-copy type where we copy into a
-    /// fresh LLVM alloca; this most accurately reflects the language
-    /// semantics (e.g. it properly handles overwrites of the matched
-    /// input), but potentially injects an unwanted copy.
-    TrByMoveIntoCopy(/* llbinding */ ValueRef),
-
-    /// Binding a non-copy type by reference under the hood; this is
-    /// a codegen optimization to avoid unnecessary memory traffic.
-    TrByMoveRef,
-
-    /// By-ref binding exposed in the original source input.
-    TrByRef,
-}
-
-impl TransBindingMode {
-    /// if binding by making a fresh copy; returns the alloca that it
-    /// will copy into; otherwise None.
-    fn alloca_if_copy(&self) -> Option<ValueRef> {
-        match *self {
-            TrByCopy(llbinding) | TrByMoveIntoCopy(llbinding) => Some(llbinding),
-            TrByMoveRef | TrByRef => None,
-        }
-    }
-}
-
-/// Information about a pattern binding:
-/// - `llmatch` is a pointer to a stack slot.  The stack slot contains a
-///   pointer into the value being matched.  Hence, llmatch has type `T**`
-///   where `T` is the value being matched.
-/// - `trmode` is the trans binding mode
-/// - `id` is the node id of the binding
-/// - `ty` is the Rust type of the binding
-#[derive(Clone, Copy)]
-pub struct BindingInfo<'tcx> {
-    pub llmatch: ValueRef,
-    pub trmode: TransBindingMode,
-    pub id: ast::NodeId,
-    pub span: Span,
-    pub ty: Ty<'tcx>,
-}
-
-type BindingsMap<'tcx> = FnvHashMap<ast::Name, BindingInfo<'tcx>>;
-
-struct ArmData<'p, 'blk, 'tcx: 'blk> {
-    bodycx: Block<'blk, 'tcx>,
-    arm: &'p hir::Arm,
-    bindings_map: BindingsMap<'tcx>
-}
-
-/// Info about Match.
-/// If all `pats` are matched then arm `data` will be executed.
-/// As we proceed `bound_ptrs` are filled with pointers to values to be bound,
-/// these pointers are stored in llmatch variables just before executing `data` arm.
-struct Match<'a, 'p: 'a, 'blk: 'a, 'tcx: 'blk> {
-    pats: Vec<&'p hir::Pat>,
-    data: &'a ArmData<'p, 'blk, 'tcx>,
-    bound_ptrs: Vec<(ast::Name, ValueRef)>,
-    // Thread along renamings done by the check_match::StaticInliner, so we can
-    // map back to original NodeIds
-    pat_renaming_map: Option<&'a FnvHashMap<(NodeId, Span), NodeId>>
-}
-
-impl<'a, 'p, 'blk, 'tcx> fmt::Debug for Match<'a, 'p, 'blk, 'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        if ppaux::verbose() {
-            // for many programs, this just take too long to serialize
-            write!(f, "{:?}", self.pats)
-        } else {
-            write!(f, "{} pats", self.pats.len())
-        }
-    }
-}
-
-fn has_nested_bindings(m: &[Match], col: usize) -> bool {
-    for br in m {
-        match br.pats[col].node {
-            PatKind::Ident(_, _, Some(_)) => return true,
-            _ => ()
-        }
-    }
-    return false;
-}
-
-// As noted in `fn match_datum`, we should eventually pass around a
-// `Datum<Lvalue>` for the `val`; but until we get to that point, this
-// `MatchInput` struct will serve -- it has everything `Datum<Lvalue>`
-// does except for the type field.
-#[derive(Copy, Clone)]
-pub struct MatchInput { val: ValueRef, lval: Lvalue }
-
-impl<'tcx> Datum<'tcx, Lvalue> {
-    pub fn match_input(&self) -> MatchInput {
-        MatchInput {
-            val: self.val,
-            lval: self.kind,
-        }
-    }
-}
-
-impl MatchInput {
-    fn from_val(val: ValueRef) -> MatchInput {
-        MatchInput {
-            val: val,
-            lval: Lvalue::new("MatchInput::from_val"),
-        }
-    }
-
-    fn to_datum<'tcx>(self, ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
-        Datum::new(self.val, ty, self.lval)
-    }
-}
-
-fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                              m: &[Match<'a, 'p, 'blk, 'tcx>],
-                                              col: usize,
-                                              val: MatchInput)
-                                              -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
-    debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={})",
-           bcx.to_str(),
-           m,
-           col,
-           bcx.val_to_string(val.val));
-    let _indenter = indenter();
-
-    m.iter().map(|br| {
-        let mut bound_ptrs = br.bound_ptrs.clone();
-        let mut pat = br.pats[col];
-        loop {
-            pat = match pat.node {
-                PatKind::Ident(_, ref path, Some(ref inner)) => {
-                    bound_ptrs.push((path.node.name, val.val));
-                    &inner
-                },
-                _ => break
-            }
-        }
-
-        let mut pats = br.pats.clone();
-        pats[col] = pat;
-        Match {
-            pats: pats,
-            data: &br.data,
-            bound_ptrs: bound_ptrs,
-            pat_renaming_map: br.pat_renaming_map,
-        }
-    }).collect()
-}
-
-fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
-                                          dm: &RefCell<DefMap>,
-                                          m: &[Match<'a, 'p, 'blk, 'tcx>],
-                                          col: usize,
-                                          val: MatchInput,
-                                          mut e: F)
-                                          -> Vec<Match<'a, 'p, 'blk, 'tcx>> where
-    F: FnMut(&[&'p hir::Pat]) -> Option<Vec<&'p hir::Pat>>,
-{
-    debug!("enter_match(bcx={}, m={:?}, col={}, val={})",
-           bcx.to_str(),
-           m,
-           col,
-           bcx.val_to_string(val.val));
-    let _indenter = indenter();
-
-    m.iter().filter_map(|br| {
-        e(&br.pats).map(|pats| {
-            let this = br.pats[col];
-            let mut bound_ptrs = br.bound_ptrs.clone();
-            match this.node {
-                PatKind::Ident(_, ref path, None) => {
-                    if pat_is_binding(&dm.borrow(), &this) {
-                        bound_ptrs.push((path.node.name, val.val));
-                    }
-                }
-                PatKind::Vec(ref before, Some(ref slice), ref after) => {
-                    if let PatKind::Ident(_, ref path, None) = slice.node {
-                        let subslice_val = bind_subslice_pat(
-                            bcx, this.id, val,
-                            before.len(), after.len());
-                        bound_ptrs.push((path.node.name, subslice_val));
-                    }
-                }
-                _ => {}
-            }
-            Match {
-                pats: pats,
-                data: br.data,
-                bound_ptrs: bound_ptrs,
-                pat_renaming_map: br.pat_renaming_map,
-            }
-        })
-    }).collect()
-}
-
-fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                     dm: &RefCell<DefMap>,
-                                     m: &[Match<'a, 'p, 'blk, 'tcx>],
-                                     col: usize,
-                                     val: MatchInput)
-                                     -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
-    debug!("enter_default(bcx={}, m={:?}, col={}, val={})",
-           bcx.to_str(),
-           m,
-           col,
-           bcx.val_to_string(val.val));
-    let _indenter = indenter();
-
-    // Collect all of the matches that can match against anything.
-    enter_match(bcx, dm, m, col, val, |pats| {
-        if pat_is_binding_or_wild(&dm.borrow(), &pats[col]) {
-            let mut r = pats[..col].to_vec();
-            r.extend_from_slice(&pats[col + 1..]);
-            Some(r)
-        } else {
-            None
-        }
-    })
-}
-
-// <pcwalton> nmatsakis: what does enter_opt do?
-// <pcwalton> in trans/match
-// <pcwalton> trans/match.rs is like stumbling around in a dark cave
-// <nmatsakis> pcwalton: the enter family of functions adjust the set of
-//             patterns as needed
-// <nmatsakis> yeah, at some point I kind of achieved some level of
-//             understanding
-// <nmatsakis> anyhow, they adjust the patterns given that something of that
-//             kind has been found
-// <nmatsakis> pcwalton: ok, right, so enter_XXX() adjusts the patterns, as I
-//             said
-// <nmatsakis> enter_match() kind of embodies the generic code
-// <nmatsakis> it is provided with a function that tests each pattern to see
-//             if it might possibly apply and so forth
-// <nmatsakis> so, if you have a pattern like {a: _, b: _, _} and one like _
-// <nmatsakis> then _ would be expanded to (_, _)
-// <nmatsakis> one spot for each of the sub-patterns
-// <nmatsakis> enter_opt() is one of the more complex; it covers the fallible
-//             cases
-// <nmatsakis> enter_rec_or_struct() or enter_tuple() are simpler, since they
-//             are infallible patterns
-// <nmatsakis> so all patterns must either be records (resp. tuples) or
-//             wildcards
-
-/// The above is now outdated in that enter_match() now takes a function that
-/// takes the complete row of patterns rather than just the first one.
-/// Also, most of the enter_() family functions have been unified with
-/// the check_match specialization step.
-fn enter_opt<'a, 'p, 'blk, 'tcx>(
-             bcx: Block<'blk, 'tcx>,
-             _: ast::NodeId,
-             dm: &RefCell<DefMap>,
-             m: &[Match<'a, 'p, 'blk, 'tcx>],
-             opt: &Opt,
-             col: usize,
-             variant_size: usize,
-             val: MatchInput)
-             -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
-    debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={})",
-           bcx.to_str(),
-           m,
-           *opt,
-           col,
-           bcx.val_to_string(val.val));
-    let _indenter = indenter();
-
-    let ctor = match opt {
-        &ConstantValue(ConstantExpr(expr), _) => check_match::ConstantValue(
-            const_eval::eval_const_expr(bcx.tcx(), &expr)
-        ),
-        &ConstantRange(ConstantExpr(lo), ConstantExpr(hi), _) => check_match::ConstantRange(
-            const_eval::eval_const_expr(bcx.tcx(), &lo),
-            const_eval::eval_const_expr(bcx.tcx(), &hi)
-        ),
-        &SliceLengthEqual(n, _) =>
-            check_match::Slice(n),
-        &SliceLengthGreaterOrEqual(before, after, _) =>
-            check_match::SliceWithSubslice(before, after),
-        &Variant(_, _, def_id, _) =>
-            check_match::Constructor::Variant(def_id)
-    };
-
-    let param_env = bcx.tcx().empty_parameter_environment();
-    let mcx = check_match::MatchCheckCtxt {
-        tcx: bcx.tcx(),
-        param_env: param_env,
-    };
-    enter_match(bcx, dm, m, col, val, |pats|
-        check_match::specialize(&mcx, &pats[..], &ctor, col, variant_size)
-    )
-}
-
-// Returns the options in one column of matches. An option is something that
-// needs to be conditionally matched at runtime; for example, the discriminant
-// on a set of enum variants or a literal.
-fn get_branches<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                    m: &[Match<'a, 'p, 'blk, 'tcx>],
-                                    col: usize)
-                                    -> Vec<Opt<'p, 'tcx>> {
-    let tcx = bcx.tcx();
-
-    let mut found: Vec<Opt> = vec![];
-    for br in m {
-        let cur = br.pats[col];
-        let debug_loc = match br.pat_renaming_map {
-            Some(pat_renaming_map) => {
-                match pat_renaming_map.get(&(cur.id, cur.span)) {
-                    Some(&id) => DebugLoc::At(id, cur.span),
-                    None => DebugLoc::At(cur.id, cur.span),
-                }
-            }
-            None => DebugLoc::None
-        };
-
-        let opt = match cur.node {
-            PatKind::Lit(ref l) => {
-                ConstantValue(ConstantExpr(&l), debug_loc)
-            }
-            PatKind::Ident(..) | PatKind::Path(..) |
-            PatKind::TupleStruct(..) | PatKind::Struct(..) => {
-                // This is either an enum variant or a variable binding.
-                let opt_def = tcx.def_map.borrow().get(&cur.id).map(|d| d.full_def());
-                match opt_def {
-                    Some(Def::Variant(enum_id, var_id)) => {
-                        let variant = tcx.lookup_adt_def(enum_id).variant_with_id(var_id);
-                        Variant(Disr::from(variant.disr_val),
-                                adt::represent_node(bcx, cur.id),
-                                var_id,
-                                debug_loc)
-                    }
-                    _ => continue
-                }
-            }
-            PatKind::Range(ref l1, ref l2) => {
-                ConstantRange(ConstantExpr(&l1), ConstantExpr(&l2), debug_loc)
-            }
-            PatKind::Vec(ref before, None, ref after) => {
-                SliceLengthEqual(before.len() + after.len(), debug_loc)
-            }
-            PatKind::Vec(ref before, Some(_), ref after) => {
-                SliceLengthGreaterOrEqual(before.len(), after.len(), debug_loc)
-            }
-            _ => continue
-        };
-
-        if !found.iter().any(|x| x.eq(&opt, tcx)) {
-            found.push(opt);
-        }
-    }
-    found
-}
-
-struct ExtractedBlock<'blk, 'tcx: 'blk> {
-    vals: Vec<ValueRef>,
-    bcx: Block<'blk, 'tcx>,
-}
-
-fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                    repr: &adt::Repr<'tcx>,
-                                    disr_val: Disr,
-                                    val: MatchInput)
-                                    -> ExtractedBlock<'blk, 'tcx> {
-    let _icx = push_ctxt("match::extract_variant_args");
-    // Assume enums are always sized for now.
-    let val = adt::MaybeSizedValue::sized(val.val);
-    let args = (0..adt::num_args(repr, disr_val)).map(|i| {
-        adt::trans_field_ptr(bcx, repr, val, disr_val, i)
-    }).collect();
-
-    ExtractedBlock { vals: args, bcx: bcx }
-}
-
-/// Helper for converting from the ValueRef that we pass around in the match code, which is always
-/// an lvalue, into a Datum. Eventually we should just pass around a Datum and be done with it.
-fn match_datum<'tcx>(val: MatchInput, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
-    val.to_datum(left_ty)
-}
-
-fn bind_subslice_pat(bcx: Block,
-                     pat_id: ast::NodeId,
-                     val: MatchInput,
-                     offset_left: usize,
-                     offset_right: usize) -> ValueRef {
-    let _icx = push_ctxt("match::bind_subslice_pat");
-    let vec_ty = node_id_type(bcx, pat_id);
-    let vec_ty_contents = match vec_ty.sty {
-        ty::TyBox(ty) => ty,
-        ty::TyRef(_, mt) | ty::TyRawPtr(mt) => mt.ty,
-        _ => vec_ty
-    };
-    let unit_ty = vec_ty_contents.sequence_element_type(bcx.tcx());
-    let vec_datum = match_datum(val, vec_ty);
-    let (base, len) = vec_datum.get_vec_base_and_len(bcx);
-
-    let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]);
-    let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right);
-    let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None);
-    let slice_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic),
-                                         bcx.tcx().mk_slice(unit_ty));
-    let scratch = rvalue_scratch_datum(bcx, slice_ty, "");
-    Store(bcx, slice_begin, expr::get_dataptr(bcx, scratch.val));
-    Store(bcx, slice_len, expr::get_meta(bcx, scratch.val));
-    scratch.val
-}
-
-fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                 left_ty: Ty<'tcx>,
-                                 before: usize,
-                                 after: usize,
-                                 val: MatchInput)
-                                 -> ExtractedBlock<'blk, 'tcx> {
-    let _icx = push_ctxt("match::extract_vec_elems");
-    let vec_datum = match_datum(val, left_ty);
-    let (base, len) = vec_datum.get_vec_base_and_len(bcx);
-    let mut elems = vec![];
-    elems.extend((0..before).map(|i| GEPi(bcx, base, &[i])));
-    elems.extend((0..after).rev().map(|i| {
-        InBoundsGEP(bcx, base, &[
-            Sub(bcx, len, C_uint(bcx.ccx(), i + 1), DebugLoc::None)
-        ])
-    }));
-    ExtractedBlock { vals: elems, bcx: bcx }
-}
-
-// Macro for deciding whether any of the remaining matches fit a given kind of
-// pattern.  Note that, because the macro is well-typed, either ALL of the
-// matches should fit that sort of pattern or NONE (however, some of the
-// matches may be wildcards like _ or identifiers).
-macro_rules! any_pat {
-    ($m:expr, $col:expr, $pattern:pat) => (
-        ($m).iter().any(|br| {
-            match br.pats[$col].node {
-                $pattern => true,
-                _ => false
-            }
-        })
-    )
-}
-
-fn any_uniq_pat(m: &[Match], col: usize) -> bool {
-    any_pat!(m, col, PatKind::Box(_))
-}
-
-fn any_region_pat(m: &[Match], col: usize) -> bool {
-    any_pat!(m, col, PatKind::Ref(..))
-}
-
-fn any_irrefutable_adt_pat(tcx: &ty::ctxt, m: &[Match], col: usize) -> bool {
-    m.iter().any(|br| {
-        let pat = br.pats[col];
-        match pat.node {
-            PatKind::Tup(_) => true,
-            PatKind::Struct(..) | PatKind::TupleStruct(..) |
-            PatKind::Path(..) | PatKind::Ident(_, _, None) => {
-                match tcx.def_map.borrow().get(&pat.id).unwrap().full_def() {
-                    Def::Struct(..) | Def::TyAlias(..) => true,
-                    _ => false,
-                }
-            }
-            _ => false
-        }
-    })
-}
-
-/// What to do when the pattern match fails.
-enum FailureHandler {
-    Infallible,
-    JumpToBasicBlock(BasicBlockRef),
-    Unreachable
-}
-
-impl FailureHandler {
-    fn is_fallible(&self) -> bool {
-        match *self {
-            Infallible => false,
-            _ => true
-        }
-    }
-
-    fn is_infallible(&self) -> bool {
-        !self.is_fallible()
-    }
-
-    fn handle_fail(&self, bcx: Block) {
-        match *self {
-            Infallible =>
-                panic!("attempted to panic in a non-panicking panic handler!"),
-            JumpToBasicBlock(basic_block) =>
-                Br(bcx, basic_block, DebugLoc::None),
-            Unreachable =>
-                build::Unreachable(bcx)
-        }
-    }
-}
-
-fn pick_column_to_specialize(def_map: &RefCell<DefMap>, m: &[Match]) -> Option<usize> {
-    fn pat_score(def_map: &RefCell<DefMap>, pat: &hir::Pat) -> usize {
-        match pat.node {
-            PatKind::Ident(_, _, Some(ref inner)) => pat_score(def_map, &inner),
-            _ if pat_is_refutable(&def_map.borrow(), pat) => 1,
-            _ => 0
-        }
-    }
-
-    let column_score = |m: &[Match], col: usize| -> usize {
-        let total_score = m.iter()
-            .map(|row| row.pats[col])
-            .map(|pat| pat_score(def_map, pat))
-            .sum();
-
-        // Irrefutable columns always go first, they'd only be duplicated in the branches.
-        if total_score == 0 {
-            std::usize::MAX
-        } else {
-            total_score
-        }
-    };
-
-    let column_contains_any_nonwild_patterns = |&col: &usize| -> bool {
-        m.iter().any(|row| match row.pats[col].node {
-            PatKind::Wild => false,
-            _ => true
-        })
-    };
-
-    (0..m[0].pats.len())
-        .filter(column_contains_any_nonwild_patterns)
-        .map(|col| (col, column_score(m, col)))
-        .max_by_key(|&(_, score)| score)
-        .map(|(col, _)| col)
-}
-
-// Compiles a comparison between two things.
-fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                              lhs: ValueRef,
-                              rhs: ValueRef,
-                              rhs_t: Ty<'tcx>,
-                              debug_loc: DebugLoc)
-                              -> Result<'blk, 'tcx> {
-    fn compare_str<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                               lhs_data: ValueRef,
-                               lhs_len: ValueRef,
-                               rhs_data: ValueRef,
-                               rhs_len: ValueRef,
-                               rhs_t: Ty<'tcx>,
-                               debug_loc: DebugLoc)
-                               -> Result<'blk, 'tcx> {
-        let did = langcall(cx,
-                           None,
-                           &format!("comparison of `{}`", rhs_t),
-                           StrEqFnLangItem);
-        callee::trans_lang_call(cx, did, &[lhs_data, lhs_len, rhs_data, rhs_len], None, debug_loc)
-    }
-
-    let _icx = push_ctxt("compare_values");
-    if rhs_t.is_scalar() {
-        let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, hir::BiEq, debug_loc);
-        return Result::new(cx, cmp);
-    }
-
-    match rhs_t.sty {
-        ty::TyRef(_, mt) => match mt.ty.sty {
-            ty::TyStr => {
-                let lhs_data = Load(cx, expr::get_dataptr(cx, lhs));
-                let lhs_len = Load(cx, expr::get_meta(cx, lhs));
-                let rhs_data = Load(cx, expr::get_dataptr(cx, rhs));
-                let rhs_len = Load(cx, expr::get_meta(cx, rhs));
-                compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc)
-            }
-            ty::TyArray(ty, _) | ty::TySlice(ty) => match ty.sty {
-                ty::TyUint(ast::UintTy::U8) => {
-                    // NOTE: cast &[u8] and &[u8; N] to &str and abuse the str_eq lang item,
-                    // which calls memcmp().
-                    let pat_len = val_ty(rhs).element_type().array_length();
-                    let ty_str_slice = cx.tcx().mk_static_str();
-
-                    let rhs_data = GEPi(cx, rhs, &[0, 0]);
-                    let rhs_len = C_uint(cx.ccx(), pat_len);
-
-                    let lhs_data;
-                    let lhs_len;
-                    if val_ty(lhs) == val_ty(rhs) {
-                        // Both the discriminant and the pattern are thin pointers
-                        lhs_data = GEPi(cx, lhs, &[0, 0]);
-                        lhs_len = C_uint(cx.ccx(), pat_len);
-                    } else {
-                        // The discriminant is a fat pointer
-                        let llty_str_slice = type_of::type_of(cx.ccx(), ty_str_slice).ptr_to();
-                        let lhs_str = PointerCast(cx, lhs, llty_str_slice);
-                        lhs_data = Load(cx, expr::get_dataptr(cx, lhs_str));
-                        lhs_len = Load(cx, expr::get_meta(cx, lhs_str));
-                    }
-
-                    compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc)
-                },
-                _ => cx.sess().bug("only byte strings supported in compare_values"),
-            },
-            _ => cx.sess().bug("only string and byte strings supported in compare_values"),
-        },
-        _ => cx.sess().bug("only scalars, byte strings, and strings supported in compare_values"),
-    }
-}
-
-/// For each binding in `data.bindings_map`, adds an appropriate entry into the `fcx.lllocals` map
-fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
-                               bindings_map: &BindingsMap<'tcx>,
-                               cs: Option<cleanup::ScopeId>)
-                               -> Block<'blk, 'tcx> {
-    for (&name, &binding_info) in bindings_map {
-        let (llval, aliases_other_state) = match binding_info.trmode {
-            // By value mut binding for a copy type: load from the ptr
-            // into the matched value and copy to our alloca
-            TrByCopy(llbinding) |
-            TrByMoveIntoCopy(llbinding) => {
-                let llval = Load(bcx, binding_info.llmatch);
-                let lvalue = match binding_info.trmode {
-                    TrByCopy(..) =>
-                        Lvalue::new("_match::insert_lllocals"),
-                    TrByMoveIntoCopy(..) => {
-                        // match_input moves from the input into a
-                        // separate stack slot.
-                        //
-                        // E.g. consider moving the value `D(A)` out
-                        // of the tuple `(D(A), D(B))` and into the
-                        // local variable `x` via the pattern `(x,_)`,
-                        // leaving the remainder of the tuple `(_,
-                        // D(B))` still to be dropped in the future.
-                        //
-                        // Thus, here we must zero the place that we
-                        // are moving *from*, because we do not yet
-                        // track drop flags for a fragmented parent
-                        // match input expression.
-                        //
-                        // Longer term we will be able to map the move
-                        // into `(x, _)` up to the parent path that
-                        // owns the whole tuple, and mark the
-                        // corresponding stack-local drop-flag
-                        // tracking the first component of the tuple.
-                        let hint_kind = HintKind::ZeroAndMaintain;
-                        Lvalue::new_with_hint("_match::insert_lllocals (match_input)",
-                                              bcx, binding_info.id, hint_kind)
-                    }
-                    _ => unreachable!(),
-                };
-                let datum = Datum::new(llval, binding_info.ty, lvalue);
-                call_lifetime_start(bcx, llbinding);
-                bcx = datum.store_to(bcx, llbinding);
-                if let Some(cs) = cs {
-                    bcx.fcx.schedule_lifetime_end(cs, llbinding);
-                }
-
-                (llbinding, false)
-            },
-
-            // By value move bindings: load from the ptr into the matched value
-            TrByMoveRef => (Load(bcx, binding_info.llmatch), true),
-
-            // By ref binding: use the ptr into the matched value
-            TrByRef => (binding_info.llmatch, true),
-        };
-
-
-        // A local that aliases some other state must be zeroed, since
-        // the other state (e.g. some parent data that we matched
-        // into) will still have its subcomponents (such as this
-        // local) destructed at the end of the parent's scope. Longer
-        // term, we will properly map such parents to the set of
-        // unique drop flags for its fragments.
-        let hint_kind = if aliases_other_state {
-            HintKind::ZeroAndMaintain
-        } else {
-            HintKind::DontZeroJustUse
-        };
-        let lvalue = Lvalue::new_with_hint("_match::insert_lllocals (local)",
-                                           bcx,
-                                           binding_info.id,
-                                           hint_kind);
-        let datum = Datum::new(llval, binding_info.ty, lvalue);
-        if let Some(cs) = cs {
-            let opt_datum = lvalue.dropflag_hint(bcx);
-            bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch);
-            bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum);
-        }
-
-        debug!("binding {} to {}", binding_info.id, bcx.val_to_string(llval));
-        bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum);
-        debuginfo::create_match_binding_metadata(bcx, name, binding_info);
-    }
-    bcx
-}
-
-fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                     guard_expr: &hir::Expr,
-                                     data: &ArmData<'p, 'blk, 'tcx>,
-                                     m: &[Match<'a, 'p, 'blk, 'tcx>],
-                                     vals: &[MatchInput],
-                                     chk: &FailureHandler,
-                                     has_genuine_default: bool)
-                                     -> Block<'blk, 'tcx> {
-    debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals=[{}])",
-           bcx.to_str(),
-           guard_expr,
-           m,
-           vals.iter().map(|v| bcx.val_to_string(v.val)).collect::<Vec<_>>().join(", "));
-    let _indenter = indenter();
-
-    let mut bcx = insert_lllocals(bcx, &data.bindings_map, None);
-
-    let val = unpack_datum!(bcx, expr::trans(bcx, guard_expr));
-    let val = val.to_llbool(bcx);
-
-    for (_, &binding_info) in &data.bindings_map {
-        if let Some(llbinding) = binding_info.trmode.alloca_if_copy() {
-            call_lifetime_end(bcx, llbinding)
-        }
-    }
-
-    for (_, &binding_info) in &data.bindings_map {
-        bcx.fcx.lllocals.borrow_mut().remove(&binding_info.id);
-    }
-
-    with_cond(bcx, Not(bcx, val, guard_expr.debug_loc()), |bcx| {
-        for (_, &binding_info) in &data.bindings_map {
-            call_lifetime_end(bcx, binding_info.llmatch);
-        }
-        match chk {
-            // If the default arm is the only one left, move on to the next
-            // condition explicitly rather than (possibly) falling back to
-            // the default arm.
-            &JumpToBasicBlock(_) if m.len() == 1 && has_genuine_default => {
-                chk.handle_fail(bcx);
-            }
-            _ => {
-                compile_submatch(bcx, m, vals, chk, has_genuine_default);
-            }
-        };
-        bcx
-    })
-}
-
-fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                        m: &[Match<'a, 'p, 'blk, 'tcx>],
-                                        vals: &[MatchInput],
-                                        chk: &FailureHandler,
-                                        has_genuine_default: bool) {
-    debug!("compile_submatch(bcx={}, m={:?}, vals=[{}])",
-           bcx.to_str(),
-           m,
-           vals.iter().map(|v| bcx.val_to_string(v.val)).collect::<Vec<_>>().join(", "));
-    let _indenter = indenter();
-    let _icx = push_ctxt("match::compile_submatch");
-    let mut bcx = bcx;
-    if m.is_empty() {
-        if chk.is_fallible() {
-            chk.handle_fail(bcx);
-        }
-        return;
-    }
-
-    let tcx = bcx.tcx();
-    let def_map = &tcx.def_map;
-    match pick_column_to_specialize(def_map, m) {
-        Some(col) => {
-            let val = vals[col];
-            if has_nested_bindings(m, col) {
-                let expanded = expand_nested_bindings(bcx, m, col, val);
-                compile_submatch_continue(bcx,
-                                          &expanded[..],
-                                          vals,
-                                          chk,
-                                          col,
-                                          val,
-                                          has_genuine_default)
-            } else {
-                compile_submatch_continue(bcx, m, vals, chk, col, val, has_genuine_default)
-            }
-        }
-        None => {
-            let data = &m[0].data;
-            for &(ref name, ref value_ptr) in &m[0].bound_ptrs {
-                let binfo = *data.bindings_map.get(name).unwrap();
-                call_lifetime_start(bcx, binfo.llmatch);
-                if binfo.trmode == TrByRef && type_is_fat_ptr(bcx.tcx(), binfo.ty) {
-                    expr::copy_fat_ptr(bcx, *value_ptr, binfo.llmatch);
-                }
-                else {
-                    Store(bcx, *value_ptr, binfo.llmatch);
-                }
-            }
-            match data.arm.guard {
-                Some(ref guard_expr) => {
-                    bcx = compile_guard(bcx,
-                                        &guard_expr,
-                                        m[0].data,
-                                        &m[1..m.len()],
-                                        vals,
-                                        chk,
-                                        has_genuine_default);
-                }
-                _ => ()
-            }
-            Br(bcx, data.bodycx.llbb, DebugLoc::None);
-        }
-    }
-}
-
-fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
-                                                 m: &[Match<'a, 'p, 'blk, 'tcx>],
-                                                 vals: &[MatchInput],
-                                                 chk: &FailureHandler,
-                                                 col: usize,
-                                                 val: MatchInput,
-                                                 has_genuine_default: bool) {
-    let fcx = bcx.fcx;
-    let tcx = bcx.tcx();
-    let dm = &tcx.def_map;
-
-    let mut vals_left = vals[0..col].to_vec();
-    vals_left.extend_from_slice(&vals[col + 1..]);
-    let ccx = bcx.fcx.ccx;
-
-    // Find a real id (we're adding placeholder wildcard patterns, but
-    // each column is guaranteed to have at least one real pattern)
-    let pat_id = m.iter().map(|br| br.pats[col].id)
-                         .find(|&id| id != DUMMY_NODE_ID)
-                         .unwrap_or(DUMMY_NODE_ID);
-
-    let left_ty = if pat_id == DUMMY_NODE_ID {
-        tcx.mk_nil()
-    } else {
-        node_id_type(bcx, pat_id)
-    };
-
-    let mcx = check_match::MatchCheckCtxt {
-        tcx: bcx.tcx(),
-        param_env: bcx.tcx().empty_parameter_environment(),
-    };
-    let adt_vals = if any_irrefutable_adt_pat(bcx.tcx(), m, col) {
-        let repr = adt::represent_type(bcx.ccx(), left_ty);
-        let arg_count = adt::num_args(&repr, Disr(0));
-        let (arg_count, struct_val) = if type_is_sized(bcx.tcx(), left_ty) {
-            (arg_count, val.val)
-        } else {
-            // For an unsized ADT (i.e. DST struct), we need to treat
-            // the last field specially: instead of simply passing a
-            // ValueRef pointing to that field, as with all the others,
-            // we skip it and instead construct a 'fat ptr' below.
-            (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val.val)))
-        };
-        let mut field_vals: Vec<ValueRef> = (0..arg_count).map(|ix|
-            // By definition, these are all sized
-            adt::trans_field_ptr(bcx, &repr, adt::MaybeSizedValue::sized(struct_val), Disr(0), ix)
-        ).collect();
-
-        match left_ty.sty {
-            ty::TyStruct(def, substs) if !type_is_sized(bcx.tcx(), left_ty) => {
-                // The last field is technically unsized but
-                // since we can only ever match that field behind
-                // a reference we construct a fat ptr here.
-                let unsized_ty = def.struct_variant().fields.last().map(|field| {
-                    monomorphize::field_ty(bcx.tcx(), substs, field)
-                }).unwrap();
-                let scratch = alloc_ty(bcx, unsized_ty, "__struct_field_fat_ptr");
-
-                let meta = Load(bcx, expr::get_meta(bcx, val.val));
-                let struct_val = adt::MaybeSizedValue::unsized_(struct_val, meta);
-
-                let data = adt::trans_field_ptr(bcx, &repr, struct_val, Disr(0), arg_count);
-                Store(bcx, data, expr::get_dataptr(bcx, scratch));
-                Store(bcx, meta, expr::get_meta(bcx, scratch));
-                field_vals.push(scratch);
-            }
-            _ => {}
-        }
-        Some(field_vals)
-    } else if any_uniq_pat(m, col) || any_region_pat(m, col) {
-        Some(vec!(Load(bcx, val.val)))
-    } else {
-        match left_ty.sty {
-            ty::TyArray(_, n) => {
-                let args = extract_vec_elems(bcx, left_ty, n, 0, val);
-                Some(args.vals)
-            }
-            _ => None
-        }
-    };
-    match adt_vals {
-        Some(field_vals) => {
-            let pats = enter_match(bcx, dm, m, col, val, |pats|
-                check_match::specialize(&mcx, pats,
-                                        &check_match::Single, col,
-                                        field_vals.len())
-            );
-            let mut vals: Vec<_> = field_vals.into_iter()
-                .map(|v|MatchInput::from_val(v))
-                .collect();
-            vals.extend_from_slice(&vals_left);
-            compile_submatch(bcx, &pats, &vals, chk, has_genuine_default);
-            return;
-        }
-        _ => ()
-    }
-
-    // Decide what kind of branch we need
-    let opts = get_branches(bcx, m, col);
-    debug!("options={:?}", opts);
-    let mut kind = NoBranch;
-    let mut test_val = val.val;
-    debug!("test_val={}", bcx.val_to_string(test_val));
-    if !opts.is_empty() {
-        match opts[0] {
-            ConstantValue(..) | ConstantRange(..) => {
-                test_val = load_if_immediate(bcx, val.val, left_ty);
-                kind = if left_ty.is_integral() {
-                    Switch
-                } else {
-                    Compare
-                };
-            }
-            Variant(_, ref repr, _, _) => {
-                let (the_kind, val_opt) = adt::trans_switch(bcx, &repr,
-                                                            val.val, true);
-                kind = the_kind;
-                if let Some(tval) = val_opt { test_val = tval; }
-            }
-            SliceLengthEqual(..) | SliceLengthGreaterOrEqual(..) => {
-                let (_, len) = tvec::get_base_and_len(bcx, val.val, left_ty);
-                test_val = len;
-                kind = Switch;
-            }
-        }
-    }
-    for o in &opts {
-        match *o {
-            ConstantRange(..) => { kind = Compare; break },
-            SliceLengthGreaterOrEqual(..) => { kind = CompareSliceLength; break },
-            _ => ()
-        }
-    }
-    let else_cx = match kind {
-        NoBranch | Single => bcx,
-        _ => bcx.fcx.new_temp_block("match_else")
-    };
-    let sw = if kind == Switch {
-        build::Switch(bcx, test_val, else_cx.llbb, opts.len())
-    } else {
-        C_int(ccx, 0) // Placeholder for when not using a switch
-    };
-
-    let defaults = enter_default(else_cx, dm, m, col, val);
-    let exhaustive = chk.is_infallible() && defaults.is_empty();
-    let len = opts.len();
-
-    if exhaustive && kind == Switch {
-        build::Unreachable(else_cx);
-    }
-
-    // Compile subtrees for each option
-    for (i, opt) in opts.iter().enumerate() {
-        // In some cases of range and vector pattern matching, we need to
-        // override the failure case so that instead of failing, it proceeds
-        // to try more matching. branch_chk, then, is the proper failure case
-        // for the current conditional branch.
-        let mut branch_chk = None;
-        let mut opt_cx = else_cx;
-        let debug_loc = opt.debug_loc();
-
-        if kind == Switch || !exhaustive || i + 1 < len {
-            opt_cx = bcx.fcx.new_temp_block("match_case");
-            match kind {
-                Single => Br(bcx, opt_cx.llbb, debug_loc),
-                Switch => {
-                    match opt.trans(bcx) {
-                        SingleResult(r) => {
-                            AddCase(sw, r.val, opt_cx.llbb);
-                            bcx = r.bcx;
-                        }
-                        _ => {
-                            bcx.sess().bug(
-                                "in compile_submatch, expected \
-                                 opt.trans() to return a SingleResult")
-                        }
-                    }
-                }
-                Compare | CompareSliceLength => {
-                    let t = if kind == Compare {
-                        left_ty
-                    } else {
-                        tcx.types.usize // vector length
-                    };
-                    let Result { bcx: after_cx, val: matches } = {
-                        match opt.trans(bcx) {
-                            SingleResult(Result { bcx, val }) => {
-                                compare_values(bcx, test_val, val, t, debug_loc)
-                            }
-                            RangeResult(Result { val: vbegin, .. },
-                                        Result { bcx, val: vend }) => {
-                                let llge = compare_scalar_types(bcx, test_val, vbegin,
-                                                                t, hir::BiGe, debug_loc);
-                                let llle = compare_scalar_types(bcx, test_val, vend,
-                                                                t, hir::BiLe, debug_loc);
-                                Result::new(bcx, And(bcx, llge, llle, DebugLoc::None))
-                            }
-                            LowerBound(Result { bcx, val }) => {
-                                Result::new(bcx, compare_scalar_types(bcx, test_val,
-                                                                      val, t, hir::BiGe,
-                                                                      debug_loc))
-                            }
-                        }
-                    };
-                    bcx = fcx.new_temp_block("compare_next");
-
-                    // If none of the sub-cases match, and the current condition
-                    // is guarded or has multiple patterns, move on to the next
-                    // condition, if there is any, rather than falling back to
-                    // the default.
-                    let guarded = m[i].data.arm.guard.is_some();
-                    let multi_pats = m[i].pats.len() > 1;
-                    if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) {
-                        branch_chk = Some(JumpToBasicBlock(bcx.llbb));
-                    }
-                    CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb, debug_loc);
-                }
-                _ => ()
-            }
-        } else if kind == Compare || kind == CompareSliceLength {
-            Br(bcx, else_cx.llbb, debug_loc);
-        }
-
-        let mut size = 0;
-        let mut unpacked = Vec::new();
-        match *opt {
-            Variant(disr_val, ref repr, _, _) => {
-                let ExtractedBlock {vals: argvals, bcx: new_bcx} =
-                    extract_variant_args(opt_cx, &repr, disr_val, val);
-                size = argvals.len();
-                unpacked = argvals;
-                opt_cx = new_bcx;
-            }
-            SliceLengthEqual(len, _) => {
-                let args = extract_vec_elems(opt_cx, left_ty, len, 0, val);
-                size = args.vals.len();
-                unpacked = args.vals.clone();
-                opt_cx = args.bcx;
-            }
-            SliceLengthGreaterOrEqual(before, after, _) => {
-                let args = extract_vec_elems(opt_cx, left_ty, before, after, val);
-                size = args.vals.len();
-                unpacked = args.vals.clone();
-                opt_cx = args.bcx;
-            }
-            ConstantValue(..) | ConstantRange(..) => ()
-        }
-        let opt_ms = enter_opt(opt_cx, pat_id, dm, m, opt, col, size, val);
-        let mut opt_vals: Vec<_> = unpacked.into_iter()
-            .map(|v|MatchInput::from_val(v))
-            .collect();
-        opt_vals.extend_from_slice(&vals_left[..]);
-        compile_submatch(opt_cx,
-                         &opt_ms[..],
-                         &opt_vals[..],
-                         branch_chk.as_ref().unwrap_or(chk),
-                         has_genuine_default);
-    }
-
-    // Compile the fall-through case, if any
-    if !exhaustive && kind != Single {
-        if kind == Compare || kind == CompareSliceLength {
-            Br(bcx, else_cx.llbb, DebugLoc::None);
-        }
-        match chk {
-            // If there is only one default arm left, move on to the next
-            // condition explicitly rather than (eventually) falling back to
-            // the last default arm.
-            &JumpToBasicBlock(_) if defaults.len() == 1 && has_genuine_default => {
-                chk.handle_fail(else_cx);
-            }
-            _ => {
-                compile_submatch(else_cx,
-                                 &defaults[..],
-                                 &vals_left[..],
-                                 chk,
-                                 has_genuine_default);
-            }
-        }
-    }
-}
-
-pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                               match_expr: &hir::Expr,
-                               discr_expr: &hir::Expr,
-                               arms: &[hir::Arm],
-                               dest: Dest)
-                               -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("match::trans_match");
-    trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest)
-}
-
-/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body`
-fn is_discr_reassigned(bcx: Block, discr: &hir::Expr, body: &hir::Expr) -> bool {
-    let (vid, field) = match discr.node {
-        hir::ExprPath(..) => match bcx.def(discr.id) {
-            Def::Local(_, vid) | Def::Upvar(_, vid, _, _) => (vid, None),
-            _ => return false
-        },
-        hir::ExprField(ref base, field) => {
-            let vid = match bcx.tcx().def_map.borrow().get(&base.id).map(|d| d.full_def()) {
-                Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid,
-                _ => return false
-            };
-            (vid, Some(mc::NamedField(field.node)))
-        },
-        hir::ExprTupField(ref base, field) => {
-            let vid = match bcx.tcx().def_map.borrow().get(&base.id).map(|d| d.full_def()) {
-                Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid,
-                _ => return false
-            };
-            (vid, Some(mc::PositionalField(field.node)))
-        },
-        _ => return false
-    };
-
-    let mut rc = ReassignmentChecker {
-        node: vid,
-        field: field,
-        reassigned: false
-    };
-    {
-        let infcx = infer::normalizing_infer_ctxt(bcx.tcx(), &bcx.tcx().tables);
-        let mut visitor = euv::ExprUseVisitor::new(&mut rc, &infcx);
-        visitor.walk_expr(body);
-    }
-    rc.reassigned
-}
-
-struct ReassignmentChecker {
-    node: ast::NodeId,
-    field: Option<mc::FieldName>,
-    reassigned: bool
-}
-
-// Determine if the expression we're matching on is reassigned to within
-// the body of the match's arm.
-// We only care for the `mutate` callback since this check only matters
-// for cases where the matched value is moved.
-impl<'tcx> euv::Delegate<'tcx> for ReassignmentChecker {
-    fn consume(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: euv::ConsumeMode) {}
-    fn matched_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::MatchMode) {}
-    fn consume_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::ConsumeMode) {}
-    fn borrow(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: ty::Region,
-              _: ty::BorrowKind, _: euv::LoanCause) {}
-    fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {}
-
-    fn mutate(&mut self, _: ast::NodeId, _: Span, cmt: mc::cmt, _: euv::MutateMode) {
-        match cmt.cat {
-            Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) |
-            Categorization::Local(vid) => self.reassigned |= self.node == vid,
-            Categorization::Interior(ref base_cmt, mc::InteriorField(field)) => {
-                match base_cmt.cat {
-                    Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) |
-                    Categorization::Local(vid) => {
-                        self.reassigned |= self.node == vid &&
-                            (self.field.is_none() || Some(field) == self.field)
-                    },
-                    _ => {}
-                }
-            },
-            _ => {}
-        }
-    }
-}
-
-fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &hir::Pat,
-                                   discr: &hir::Expr, body: &hir::Expr)
-                                   -> BindingsMap<'tcx> {
-    // Create the bindings map, which is a mapping from each binding name
-    // to an alloca() that will be the value for that local variable.
-    // Note that we use the names because each binding will have many ids
-    // from the various alternatives.
-    let ccx = bcx.ccx();
-    let tcx = bcx.tcx();
-    let reassigned = is_discr_reassigned(bcx, discr, body);
-    let mut bindings_map = FnvHashMap();
-    pat_bindings(&tcx.def_map, &pat, |bm, p_id, span, path1| {
-        let name = path1.node;
-        let variable_ty = node_id_type(bcx, p_id);
-        let llvariable_ty = type_of::type_of(ccx, variable_ty);
-        let tcx = bcx.tcx();
-        let param_env = tcx.empty_parameter_environment();
-
-        let llmatch;
-        let trmode;
-        let moves_by_default = variable_ty.moves_by_default(&param_env, span);
-        match bm {
-            hir::BindByValue(_) if !moves_by_default || reassigned =>
-            {
-                llmatch = alloca(bcx, llvariable_ty.ptr_to(), "__llmatch");
-                let llcopy = alloca(bcx, llvariable_ty, &bcx.name(name));
-                trmode = if moves_by_default {
-                    TrByMoveIntoCopy(llcopy)
-                } else {
-                    TrByCopy(llcopy)
-                };
-            }
-            hir::BindByValue(_) => {
-                // in this case, the final type of the variable will be T,
-                // but during matching we need to store a *T as explained
-                // above
-                llmatch = alloca(bcx, llvariable_ty.ptr_to(), &bcx.name(name));
-                trmode = TrByMoveRef;
-            }
-            hir::BindByRef(_) => {
-                llmatch = alloca(bcx, llvariable_ty, &bcx.name(name));
-                trmode = TrByRef;
-            }
-        };
-        bindings_map.insert(name, BindingInfo {
-            llmatch: llmatch,
-            trmode: trmode,
-            id: p_id,
-            span: span,
-            ty: variable_ty
-        });
-    });
-    return bindings_map;
-}
-
-fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>,
-                                 match_id: ast::NodeId,
-                                 discr_expr: &hir::Expr,
-                                 arms: &[hir::Arm],
-                                 dest: Dest) -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("match::trans_match_inner");
-    let fcx = scope_cx.fcx;
-    let mut bcx = scope_cx;
-    let tcx = bcx.tcx();
-
-    let discr_datum = unpack_datum!(bcx, expr::trans_to_lvalue(bcx, discr_expr,
-                                                               "match"));
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    let t = node_id_type(bcx, discr_expr.id);
-    let chk = if t.is_empty(tcx) {
-        Unreachable
-    } else {
-        Infallible
-    };
-
-    let arm_datas: Vec<ArmData> = arms.iter().map(|arm| ArmData {
-        bodycx: fcx.new_id_block("case_body", arm.body.id),
-        arm: arm,
-        bindings_map: create_bindings_map(bcx, &arm.pats[0], discr_expr, &arm.body)
-    }).collect();
-
-    let mut pat_renaming_map = if scope_cx.sess().opts.debuginfo != NoDebugInfo {
-        Some(FnvHashMap())
-    } else {
-        None
-    };
-
-    let arm_pats: Vec<Vec<P<hir::Pat>>> = {
-        let mut static_inliner = StaticInliner::new(scope_cx.tcx(),
-                                                    pat_renaming_map.as_mut());
-        arm_datas.iter().map(|arm_data| {
-            arm_data.arm.pats.iter().map(|p| static_inliner.fold_pat((*p).clone())).collect()
-        }).collect()
-    };
-
-    let mut matches = Vec::new();
-    for (arm_data, pats) in arm_datas.iter().zip(&arm_pats) {
-        matches.extend(pats.iter().map(|p| Match {
-            pats: vec![&p],
-            data: arm_data,
-            bound_ptrs: Vec::new(),
-            pat_renaming_map: pat_renaming_map.as_ref()
-        }));
-    }
-
-    // `compile_submatch` works one column of arm patterns a time and
-    // then peels that column off. So as we progress, it may become
-    // impossible to tell whether we have a genuine default arm, i.e.
-    // `_ => foo` or not. Sometimes it is important to know that in order
-    // to decide whether moving on to the next condition or falling back
-    // to the default arm.
-    let has_default = arms.last().map_or(false, |arm| {
-        arm.pats.len() == 1
-        && arm.pats.last().unwrap().node == PatKind::Wild
-    });
-
-    compile_submatch(bcx, &matches[..], &[discr_datum.match_input()], &chk, has_default);
-
-    let mut arm_cxs = Vec::new();
-    for arm_data in &arm_datas {
-        let mut bcx = arm_data.bodycx;
-
-        // insert bindings into the lllocals map and add cleanups
-        let cs = fcx.push_custom_cleanup_scope();
-        bcx = insert_lllocals(bcx, &arm_data.bindings_map, Some(cleanup::CustomScope(cs)));
-        bcx = expr::trans_into(bcx, &arm_data.arm.body, dest);
-        bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, cs);
-        arm_cxs.push(bcx);
-    }
-
-    bcx = scope_cx.fcx.join_blocks(match_id, &arm_cxs[..]);
-    return bcx;
-}
-
-/// Generates code for a local variable declaration like `let <pat>;` or `let <pat> =
-/// <opt_init_expr>`.
-pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                               local: &hir::Local)
-                               -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("match::store_local");
-    let mut bcx = bcx;
-    let tcx = bcx.tcx();
-    let pat = &local.pat;
-
-    fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
-                                       pat: &hir::Pat)
-                                       -> Block<'blk, 'tcx> {
-        let _icx = push_ctxt("create_dummy_locals");
-        // create dummy memory for the variables if we have no
-        // value to store into them immediately
-        let tcx = bcx.tcx();
-        pat_bindings(&tcx.def_map, pat, |_, p_id, _, path1| {
-            let scope = cleanup::var_scope(tcx, p_id);
-            bcx = mk_binding_alloca(
-                bcx, p_id, path1.node, scope, (),
-                "_match::store_local::create_dummy_locals",
-                |(), bcx, Datum { val: llval, ty, kind }| {
-                    // Dummy-locals start out uninitialized, so set their
-                    // drop-flag hints (if any) to "moved."
-                    if let Some(hint) = kind.dropflag_hint(bcx) {
-                        let moved_hint = adt::DTOR_MOVED_HINT;
-                        debug!("store moved_hint={} for hint={:?}, uninitialized dummy",
-                               moved_hint, hint);
-                        Store(bcx, C_u8(bcx.fcx.ccx, moved_hint), hint.to_value().value());
-                    }
-
-                    if kind.drop_flag_info.must_zero() {
-                        // if no drop-flag hint, or the hint requires
-                        // we maintain the embedded drop-flag, then
-                        // mark embedded drop-flag(s) as moved
-                        // (i.e. "already dropped").
-                        drop_done_fill_mem(bcx, llval, ty);
-                    }
-                    bcx
-                });
-        });
-        bcx
-    }
-
-    match local.init {
-        Some(ref init_expr) => {
-            // Optimize the "let x = expr" case. This just writes
-            // the result of evaluating `expr` directly into the alloca
-            // for `x`. Often the general path results in similar or the
-            // same code post-optimization, but not always. In particular,
-            // in unsafe code, you can have expressions like
-            //
-            //    let x = intrinsics::uninit();
-            //
-            // In such cases, the more general path is unsafe, because
-            // it assumes it is matching against a valid value.
-            match simple_name(pat) {
-                Some(name) => {
-                    let var_scope = cleanup::var_scope(tcx, local.id);
-                    return mk_binding_alloca(
-                        bcx, pat.id, name, var_scope, (),
-                        "_match::store_local",
-                        |(), bcx, Datum { val: v, .. }| expr::trans_into(bcx, &init_expr,
-                                                                         expr::SaveIn(v)));
-                }
-
-                None => {}
-            }
-
-            // General path.
-            let init_datum =
-                unpack_datum!(bcx, expr::trans_to_lvalue(bcx, &init_expr, "let"));
-            if bcx.sess().asm_comments() {
-                add_comment(bcx, "creating zeroable ref llval");
-            }
-            let var_scope = cleanup::var_scope(tcx, local.id);
-            bind_irrefutable_pat(bcx, pat, init_datum.match_input(), var_scope)
-        }
-        None => {
-            create_dummy_locals(bcx, pat)
-        }
-    }
-}
-
-fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>,
-                                       p_id: ast::NodeId,
-                                       name: ast::Name,
-                                       cleanup_scope: cleanup::ScopeId,
-                                       arg: A,
-                                       caller_name: &'static str,
-                                       populate: F)
-                                       -> Block<'blk, 'tcx> where
-    F: FnOnce(A, Block<'blk, 'tcx>, Datum<'tcx, Lvalue>) -> Block<'blk, 'tcx>,
-{
-    let var_ty = node_id_type(bcx, p_id);
-
-    // Allocate memory on stack for the binding.
-    let llval = alloc_ty(bcx, var_ty, &bcx.name(name));
-    let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse);
-    let datum = Datum::new(llval, var_ty, lvalue);
-
-    debug!("mk_binding_alloca cleanup_scope={:?} llval={} var_ty={:?}",
-           cleanup_scope, bcx.ccx().tn().val_to_string(llval), var_ty);
-
-    // Subtle: be sure that we *populate* the memory *before*
-    // we schedule the cleanup.
-    call_lifetime_start(bcx, llval);
-    let bcx = populate(arg, bcx, datum);
-    bcx.fcx.schedule_lifetime_end(cleanup_scope, llval);
-    bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty, lvalue.dropflag_hint(bcx));
-
-    // Now that memory is initialized and has cleanup scheduled,
-    // insert datum into the local variable map.
-    bcx.fcx.lllocals.borrow_mut().insert(p_id, datum);
-    bcx
-}
-
-/// A simple version of the pattern matching code that only handles
-/// irrefutable patterns. This is used in let/argument patterns,
-/// not in match statements. Unifying this code with the code above
-/// sounds nice, but in practice it produces very inefficient code,
-/// since the match code is so much more general. In most cases,
-/// LLVM is able to optimize the code, but it causes longer compile
-/// times and makes the generated code nigh impossible to read.
-///
-/// # Arguments
-/// - bcx: starting basic block context
-/// - pat: the irrefutable pattern being matched.
-/// - val: the value being matched -- must be an lvalue (by ref, with cleanup)
-pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                    pat: &hir::Pat,
-                                    val: MatchInput,
-                                    cleanup_scope: cleanup::ScopeId)
-                                    -> Block<'blk, 'tcx> {
-    debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={})",
-           bcx.to_str(),
-           pat,
-           bcx.val_to_string(val.val));
-
-    if bcx.sess().asm_comments() {
-        add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})",
-                                 pat));
-    }
-
-    let _indenter = indenter();
-
-    let _icx = push_ctxt("match::bind_irrefutable_pat");
-    let mut bcx = bcx;
-    let tcx = bcx.tcx();
-    let ccx = bcx.ccx();
-    match pat.node {
-        PatKind::Ident(pat_binding_mode, ref path1, ref inner) => {
-            if pat_is_binding(&tcx.def_map.borrow(), &pat) {
-                // Allocate the stack slot where the value of this
-                // binding will live and place it into the appropriate
-                // map.
-                bcx = mk_binding_alloca(
-                    bcx, pat.id, path1.node.name, cleanup_scope, (),
-                    "_match::bind_irrefutable_pat",
-                    |(), bcx, Datum { val: llval, ty, kind: _ }| {
-                        match pat_binding_mode {
-                            hir::BindByValue(_) => {
-                                // By value binding: move the value that `val`
-                                // points at into the binding's stack slot.
-                                let d = val.to_datum(ty);
-                                d.store_to(bcx, llval)
-                            }
-
-                            hir::BindByRef(_) => {
-                                // By ref binding: the value of the variable
-                                // is the pointer `val` itself or fat pointer referenced by `val`
-                                if type_is_fat_ptr(bcx.tcx(), ty) {
-                                    expr::copy_fat_ptr(bcx, val.val, llval);
-                                }
-                                else {
-                                    Store(bcx, val.val, llval);
-                                }
-
-                                bcx
-                            }
-                        }
-                    });
-            }
-
-            if let Some(ref inner_pat) = *inner {
-                bcx = bind_irrefutable_pat(bcx, &inner_pat, val, cleanup_scope);
-            }
-        }
-        PatKind::TupleStruct(_, ref sub_pats) => {
-            let opt_def = bcx.tcx().def_map.borrow().get(&pat.id).map(|d| d.full_def());
-            match opt_def {
-                Some(Def::Variant(enum_id, var_id)) => {
-                    let repr = adt::represent_node(bcx, pat.id);
-                    let vinfo = ccx.tcx().lookup_adt_def(enum_id).variant_with_id(var_id);
-                    let args = extract_variant_args(bcx,
-                                                    &repr,
-                                                    Disr::from(vinfo.disr_val),
-                                                    val);
-                    if let Some(ref sub_pat) = *sub_pats {
-                        for (i, &argval) in args.vals.iter().enumerate() {
-                            bcx = bind_irrefutable_pat(
-                                bcx,
-                                &sub_pat[i],
-                                MatchInput::from_val(argval),
-                                cleanup_scope);
-                        }
-                    }
-                }
-                Some(Def::Struct(..)) => {
-                    match *sub_pats {
-                        None => {
-                            // This is a unit-like struct. Nothing to do here.
-                        }
-                        Some(ref elems) => {
-                            // This is the tuple struct case.
-                            let repr = adt::represent_node(bcx, pat.id);
-                            let val = adt::MaybeSizedValue::sized(val.val);
-                            for (i, elem) in elems.iter().enumerate() {
-                                let fldptr = adt::trans_field_ptr(bcx, &repr,
-                                                                  val, Disr(0), i);
-                                bcx = bind_irrefutable_pat(
-                                    bcx,
-                                    &elem,
-                                    MatchInput::from_val(fldptr),
-                                    cleanup_scope);
-                            }
-                        }
-                    }
-                }
-                _ => {
-                    // Nothing to do here.
-                }
-            }
-        }
-        PatKind::Struct(_, ref fields, _) => {
-            let tcx = bcx.tcx();
-            let pat_ty = node_id_type(bcx, pat.id);
-            let pat_repr = adt::represent_type(bcx.ccx(), pat_ty);
-            let pat_v = VariantInfo::of_node(tcx, pat_ty, pat.id);
-
-            let val = if type_is_sized(tcx, pat_ty) {
-                adt::MaybeSizedValue::sized(val.val)
-            } else {
-                let data = Load(bcx, expr::get_dataptr(bcx, val.val));
-                let meta = Load(bcx, expr::get_meta(bcx, val.val));
-                adt::MaybeSizedValue::unsized_(data, meta)
-            };
-
-            for f in fields {
-                let name = f.node.name;
-                let field_idx = pat_v.field_index(name);
-                let mut fldptr = adt::trans_field_ptr(
-                    bcx,
-                    &pat_repr,
-                    val,
-                    pat_v.discr,
-                    field_idx);
-
-                let fty = pat_v.fields[field_idx].1;
-                // If it's not sized, then construct a fat pointer instead of
-                // a regular one
-                if !type_is_sized(tcx, fty) {
-                    let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr");
-                    debug!("Creating fat pointer {}", bcx.val_to_string(scratch));
-                    Store(bcx, fldptr, expr::get_dataptr(bcx, scratch));
-                    Store(bcx, val.meta, expr::get_meta(bcx, scratch));
-                    fldptr = scratch;
-                }
-                bcx = bind_irrefutable_pat(bcx,
-                                           &f.node.pat,
-                                           MatchInput::from_val(fldptr),
-                                           cleanup_scope);
-            }
-        }
-        PatKind::Tup(ref elems) => {
-            let repr = adt::represent_node(bcx, pat.id);
-            let val = adt::MaybeSizedValue::sized(val.val);
-            for (i, elem) in elems.iter().enumerate() {
-                let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i);
-                bcx = bind_irrefutable_pat(
-                    bcx,
-                    &elem,
-                    MatchInput::from_val(fldptr),
-                    cleanup_scope);
-            }
-        }
-        PatKind::Box(ref inner) => {
-            let pat_ty = node_id_type(bcx, inner.id);
-            // Pass along DSTs as fat pointers.
-            let val = if type_is_fat_ptr(tcx, pat_ty) {
-                // We need to check for this, as the pattern could be binding
-                // a fat pointer by-value.
-                if let PatKind::Ident(hir::BindByRef(_),_,_) = inner.node {
-                    val.val
-                } else {
-                    Load(bcx, val.val)
-                }
-            } else if type_is_sized(tcx, pat_ty) {
-                Load(bcx, val.val)
-            } else {
-                val.val
-            };
-            bcx = bind_irrefutable_pat(
-                bcx, &inner, MatchInput::from_val(val), cleanup_scope);
-        }
-        PatKind::Ref(ref inner, _) => {
-            let pat_ty = node_id_type(bcx, inner.id);
-            // Pass along DSTs as fat pointers.
-            let val = if type_is_fat_ptr(tcx, pat_ty) {
-                // We need to check for this, as the pattern could be binding
-                // a fat pointer by-value.
-                if let PatKind::Ident(hir::BindByRef(_),_,_) = inner.node {
-                    val.val
-                } else {
-                    Load(bcx, val.val)
-                }
-            } else if type_is_sized(tcx, pat_ty) {
-                Load(bcx, val.val)
-            } else {
-                val.val
-            };
-            bcx = bind_irrefutable_pat(
-                bcx,
-                &inner,
-                MatchInput::from_val(val),
-                cleanup_scope);
-        }
-        PatKind::Vec(ref before, ref slice, ref after) => {
-            let pat_ty = node_id_type(bcx, pat.id);
-            let mut extracted = extract_vec_elems(bcx, pat_ty, before.len(), after.len(), val);
-            match slice {
-                &Some(_) => {
-                    extracted.vals.insert(
-                        before.len(),
-                        bind_subslice_pat(bcx, pat.id, val, before.len(), after.len())
-                    );
-                }
-                &None => ()
-            }
-            bcx = before
-                .iter()
-                .chain(slice.iter())
-                .chain(after.iter())
-                .zip(extracted.vals)
-                .fold(bcx, |bcx, (inner, elem)| {
-                    bind_irrefutable_pat(
-                        bcx,
-                        &inner,
-                        MatchInput::from_val(elem),
-                        cleanup_scope)
-                });
-        }
-        PatKind::Path(..) | PatKind::QPath(..) | PatKind::Wild | PatKind::Lit(_) |
-        PatKind::Range(_, _) => ()
-    }
-    return bcx;
-}
diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs
deleted file mode 100644 (file)
index ba227d6..0000000
+++ /dev/null
@@ -1,1505 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Representation of Algebraic Data Types
-//!
-//! This module determines how to represent enums, structs, and tuples
-//! based on their monomorphized types; it is responsible both for
-//! choosing a representation and translating basic operations on
-//! values of those types.  (Note: exporting the representations for
-//! debuggers is handled in debuginfo.rs, not here.)
-//!
-//! Note that the interface treats everything as a general case of an
-//! enum, so structs/tuples/etc. have one pseudo-variant with
-//! discriminant 0; i.e., as if they were a univariant enum.
-//!
-//! Having everything in one place will enable improvements to data
-//! structure representation; possibilities include:
-//!
-//! - User-specified alignment (e.g., cacheline-aligning parts of
-//!   concurrently accessed data structures); LLVM can't represent this
-//!   directly, so we'd have to insert padding fields in any structure
-//!   that might contain one and adjust GEP indices accordingly.  See
-//!   issue #4578.
-//!
-//! - Store nested enums' discriminants in the same word.  Rather, if
-//!   some variants start with enums, and those enums representations
-//!   have unused alignment padding between discriminant and body, the
-//!   outer enum's discriminant can be stored there and those variants
-//!   can start at offset 0.  Kind of fancy, and might need work to
-//!   make copies of the inner enum type cooperate, but it could help
-//!   with `Option` or `Result` wrapped around another enum.
-//!
-//! - Tagged pointers would be neat, but given that any type can be
-//!   used unboxed and any field can have pointers (including mutable)
-//!   taken to it, implementing them for Rust seems difficult.
-
-pub use self::Repr::*;
-use super::Disr;
-
-use std;
-use std::rc::Rc;
-
-use llvm::{ValueRef, True, IntEQ, IntNE};
-use back::abi::FAT_PTR_ADDR;
-use middle::subst;
-use middle::ty::{self, Ty};
-use syntax::ast;
-use syntax::attr;
-use syntax::attr::IntType;
-use trans::_match;
-use trans::base::InitAlloca;
-use trans::build::*;
-use trans::cleanup;
-use trans::cleanup::CleanupMethods;
-use trans::common::*;
-use trans::datum;
-use trans::debuginfo::DebugLoc;
-use trans::glue;
-use trans::machine;
-use trans::monomorphize;
-use trans::type_::Type;
-use trans::type_of;
-
-type Hint = attr::ReprAttr;
-
-// Representation of the context surrounding an unsized type. I want
-// to be able to track the drop flags that are injected by trans.
-#[derive(Clone, Copy, PartialEq, Debug)]
-pub struct TypeContext {
-    prefix: Type,
-    needs_drop_flag: bool,
-}
-
-impl TypeContext {
-    pub fn prefix(&self) -> Type { self.prefix }
-    pub fn needs_drop_flag(&self) -> bool { self.needs_drop_flag }
-
-    fn direct(t: Type) -> TypeContext {
-        TypeContext { prefix: t, needs_drop_flag: false }
-    }
-    fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext {
-        TypeContext { prefix: t, needs_drop_flag: needs_drop_flag }
-    }
-    pub fn to_string(self) -> String {
-        let TypeContext { prefix, needs_drop_flag } = self;
-        format!("TypeContext {{ prefix: {}, needs_drop_flag: {} }}",
-                prefix.to_string(), needs_drop_flag)
-    }
-}
-
-/// Representations.
-#[derive(Eq, PartialEq, Debug)]
-pub enum Repr<'tcx> {
-    /// C-like enums; basically an int.
-    CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType)
-    /// Single-case variants, and structs/tuples/records.
-    ///
-    /// Structs with destructors need a dynamic destroyedness flag to
-    /// avoid running the destructor too many times; this is included
-    /// in the `Struct` if present.
-    /// (The flag if nonzero, represents the initialization value to use;
-    ///  if zero, then use no flag at all.)
-    Univariant(Struct<'tcx>, u8),
-    /// General-case enums: for each case there is a struct, and they
-    /// all start with a field for the discriminant.
-    ///
-    /// Types with destructors need a dynamic destroyedness flag to
-    /// avoid running the destructor too many times; the last argument
-    /// indicates whether such a flag is present.
-    /// (The flag, if nonzero, represents the initialization value to use;
-    ///  if zero, then use no flag at all.)
-    General(IntType, Vec<Struct<'tcx>>, u8),
-    /// Two cases distinguished by a nullable pointer: the case with discriminant
-    /// `nndiscr` must have single field which is known to be nonnull due to its type.
-    /// The other case is known to be zero sized. Hence we represent the enum
-    /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
-    /// otherwise it indicates the other case.
-    RawNullablePointer {
-        nndiscr: Disr,
-        nnty: Ty<'tcx>,
-        nullfields: Vec<Ty<'tcx>>
-    },
-    /// Two cases distinguished by a nullable pointer: the case with discriminant
-    /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
-    /// field is known to be nonnull due to its type; if that field is null, then
-    /// it represents the other case, which is inhabited by at most one value
-    /// (and all other fields are undefined/unused).
-    ///
-    /// For example, `std::option::Option` instantiated at a safe pointer type
-    /// is represented such that `None` is a null pointer and `Some` is the
-    /// identity function.
-    StructWrappedNullablePointer {
-        nonnull: Struct<'tcx>,
-        nndiscr: Disr,
-        discrfield: DiscrField,
-        nullfields: Vec<Ty<'tcx>>,
-    }
-}
-
-/// For structs, and struct-like parts of anything fancier.
-#[derive(Eq, PartialEq, Debug)]
-pub struct Struct<'tcx> {
-    // If the struct is DST, then the size and alignment do not take into
-    // account the unsized fields of the struct.
-    pub size: u64,
-    pub align: u32,
-    pub sized: bool,
-    pub packed: bool,
-    pub fields: Vec<Ty<'tcx>>,
-}
-
-#[derive(Copy, Clone)]
-pub struct MaybeSizedValue {
-    pub value: ValueRef,
-    pub meta: ValueRef,
-}
-
-impl MaybeSizedValue {
-    pub fn sized(value: ValueRef) -> MaybeSizedValue {
-        MaybeSizedValue {
-            value: value,
-            meta: std::ptr::null_mut()
-        }
-    }
-
-    pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue {
-        MaybeSizedValue {
-            value: value,
-            meta: meta
-        }
-    }
-
-    pub fn has_meta(&self) -> bool {
-        !self.meta.is_null()
-    }
-}
-
-/// Convenience for `represent_type`.  There should probably be more or
-/// these, for places in trans where the `Ty` isn't directly
-/// available.
-pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                  node: ast::NodeId) -> Rc<Repr<'tcx>> {
-    represent_type(bcx.ccx(), node_id_type(bcx, node))
-}
-
-/// Decides how to represent a given type.
-pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                t: Ty<'tcx>)
-                                -> Rc<Repr<'tcx>> {
-    debug!("Representing: {}", t);
-    match cx.adt_reprs().borrow().get(&t) {
-        Some(repr) => return repr.clone(),
-        None => {}
-    }
-
-    let repr = Rc::new(represent_type_uncached(cx, t));
-    debug!("Represented as: {:?}", repr);
-    cx.adt_reprs().borrow_mut().insert(t, repr.clone());
-    repr
-}
-
-const fn repeat_u8_as_u32(val: u8) -> u32 {
-    (val as u32) << 24 | (val as u32) << 16 | (val as u32) << 8 | val as u32
-}
-
-const fn repeat_u8_as_u64(val: u8) -> u64 {
-    (repeat_u8_as_u32(val) as u64) << 32 | repeat_u8_as_u32(val) as u64
-}
-
-/// `DTOR_NEEDED_HINT` is a stack-local hint that just means
-/// "we do not know whether the destructor has run or not; check the
-/// drop-flag embedded in the value itself."
-pub const DTOR_NEEDED_HINT: u8 = 0x3d;
-
-/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has
-/// definitely been moved; you do not need to run its destructor."
-///
-/// (However, for now, such values may still end up being explicitly
-/// zeroed by the generated code; this is the distinction between
-/// `datum::DropFlagInfo::ZeroAndMaintain` versus
-/// `datum::DropFlagInfo::DontZeroJustUse`.)
-pub const DTOR_MOVED_HINT: u8 = 0x2d;
-
-pub const DTOR_NEEDED: u8 = 0xd4;
-#[allow(dead_code)]
-pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64(DTOR_NEEDED);
-
-pub const DTOR_DONE: u8 = 0x1d;
-#[allow(dead_code)]
-pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64(DTOR_DONE);
-
-fn dtor_to_init_u8(dtor: bool) -> u8 {
-    if dtor { DTOR_NEEDED } else { 0 }
-}
-
-pub trait GetDtorType<'tcx> { fn dtor_type(&self) -> Ty<'tcx>; }
-impl<'tcx> GetDtorType<'tcx> for ty::ctxt<'tcx> {
-    fn dtor_type(&self) -> Ty<'tcx> { self.types.u8 }
-}
-
-fn dtor_active(flag: u8) -> bool {
-    flag != 0
-}
-
-fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                     t: Ty<'tcx>) -> Repr<'tcx> {
-    match t.sty {
-        ty::TyTuple(ref elems) => {
-            Univariant(mk_struct(cx, &elems[..], false, t), 0)
-        }
-        ty::TyStruct(def, substs) => {
-            let mut ftys = def.struct_variant().fields.iter().map(|field| {
-                monomorphize::field_ty(cx.tcx(), substs, field)
-            }).collect::<Vec<_>>();
-            let packed = cx.tcx().lookup_packed(def.did);
-            // FIXME(16758) don't add a drop flag to unsized structs, as it
-            // won't actually be in the location we say it is because it'll be after
-            // the unsized field. Several other pieces of code assume that the unsized
-            // field is definitely the last one.
-            let dtor = def.dtor_kind().has_drop_flag() && type_is_sized(cx.tcx(), t);
-            if dtor {
-                ftys.push(cx.tcx().dtor_type());
-            }
-
-            Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor))
-        }
-        ty::TyClosure(_, ref substs) => {
-            Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0)
-        }
-        ty::TyEnum(def, substs) => {
-            let cases = get_cases(cx.tcx(), def, substs);
-            let hint = *cx.tcx().lookup_repr_hints(def.did).get(0)
-                .unwrap_or(&attr::ReprAny);
-
-            let dtor = def.dtor_kind().has_drop_flag();
-
-            if cases.is_empty() {
-                // Uninhabitable; represent as unit
-                // (Typechecking will reject discriminant-sizing attrs.)
-                assert_eq!(hint, attr::ReprAny);
-                let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() };
-                return Univariant(mk_struct(cx, &ftys[..], false, t),
-                                  dtor_to_init_u8(dtor));
-            }
-
-            if !dtor && cases.iter().all(|c| c.tys.is_empty()) {
-                // All bodies empty -> intlike
-                let discrs: Vec<_> = cases.iter().map(|c| Disr::from(c.discr)).collect();
-                let bounds = IntBounds {
-                    ulo: discrs.iter().min().unwrap().0,
-                    uhi: discrs.iter().max().unwrap().0,
-                    slo: discrs.iter().map(|n| n.0 as i64).min().unwrap(),
-                    shi: discrs.iter().map(|n| n.0 as i64).max().unwrap()
-                };
-                return mk_cenum(cx, hint, &bounds);
-            }
-
-            // Since there's at least one
-            // non-empty body, explicit discriminants should have
-            // been rejected by a checker before this point.
-            if !cases.iter().enumerate().all(|(i,c)| c.discr == Disr::from(i)) {
-                cx.sess().bug(&format!("non-C-like enum {} with specified \
-                                        discriminants",
-                                       cx.tcx().item_path_str(def.did)));
-            }
-
-            if cases.len() == 1 {
-                // Equivalent to a struct/tuple/newtype.
-                // (Typechecking will reject discriminant-sizing attrs.)
-                assert_eq!(hint, attr::ReprAny);
-                let mut ftys = cases[0].tys.clone();
-                if dtor { ftys.push(cx.tcx().dtor_type()); }
-                return Univariant(mk_struct(cx, &ftys[..], false, t),
-                                  dtor_to_init_u8(dtor));
-            }
-
-            if !dtor && cases.len() == 2 && hint == attr::ReprAny {
-                // Nullable pointer optimization
-                let mut discr = 0;
-                while discr < 2 {
-                    if cases[1 - discr].is_zerolen(cx, t) {
-                        let st = mk_struct(cx, &cases[discr].tys,
-                                           false, t);
-                        match cases[discr].find_ptr(cx) {
-                            Some(ref df) if df.len() == 1 && st.fields.len() == 1 => {
-                                return RawNullablePointer {
-                                    nndiscr: Disr::from(discr),
-                                    nnty: st.fields[0],
-                                    nullfields: cases[1 - discr].tys.clone()
-                                };
-                            }
-                            Some(mut discrfield) => {
-                                discrfield.push(0);
-                                discrfield.reverse();
-                                return StructWrappedNullablePointer {
-                                    nndiscr: Disr::from(discr),
-                                    nonnull: st,
-                                    discrfield: discrfield,
-                                    nullfields: cases[1 - discr].tys.clone()
-                                };
-                            }
-                            None => {}
-                        }
-                    }
-                    discr += 1;
-                }
-            }
-
-            // The general case.
-            assert!((cases.len() - 1) as i64 >= 0);
-            let bounds = IntBounds { ulo: 0, uhi: (cases.len() - 1) as u64,
-                                     slo: 0, shi: (cases.len() - 1) as i64 };
-            let min_ity = range_to_inttype(cx, hint, &bounds);
-
-            // Create the set of structs that represent each variant
-            // Use the minimum integer type we figured out above
-            let fields : Vec<_> = cases.iter().map(|c| {
-                let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity));
-                ftys.extend_from_slice(&c.tys);
-                if dtor { ftys.push(cx.tcx().dtor_type()); }
-                mk_struct(cx, &ftys, false, t)
-            }).collect();
-
-
-            // Check to see if we should use a different type for the
-            // discriminant. If the overall alignment of the type is
-            // the same as the first field in each variant, we can safely use
-            // an alignment-sized type.
-            // We increase the size of the discriminant to avoid LLVM copying
-            // padding when it doesn't need to. This normally causes unaligned
-            // load/stores and excessive memcpy/memset operations. By using a
-            // bigger integer size, LLVM can be sure about it's contents and
-            // won't be so conservative.
-            // This check is needed to avoid increasing the size of types when
-            // the alignment of the first field is smaller than the overall
-            // alignment of the type.
-            let (_, align) = union_size_and_align(&fields);
-            let mut use_align = true;
-            for st in &fields {
-                // Get the first non-zero-sized field
-                let field = st.fields.iter().skip(1).filter(|ty| {
-                    let t = type_of::sizing_type_of(cx, **ty);
-                    machine::llsize_of_real(cx, t) != 0 ||
-                    // This case is only relevant for zero-sized types with large alignment
-                    machine::llalign_of_min(cx, t) != 1
-                }).next();
-
-                if let Some(field) = field {
-                    let field_align = type_of::align_of(cx, *field);
-                    if field_align != align {
-                        use_align = false;
-                        break;
-                    }
-                }
-            }
-            let ity = if use_align {
-                // Use the overall alignment
-                match align {
-                    1 => attr::UnsignedInt(ast::UintTy::U8),
-                    2 => attr::UnsignedInt(ast::UintTy::U16),
-                    4 => attr::UnsignedInt(ast::UintTy::U32),
-                    8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 =>
-                        attr::UnsignedInt(ast::UintTy::U64),
-                    _ => min_ity // use min_ity as a fallback
-                }
-            } else {
-                min_ity
-            };
-
-            let fields : Vec<_> = cases.iter().map(|c| {
-                let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity));
-                ftys.extend_from_slice(&c.tys);
-                if dtor { ftys.push(cx.tcx().dtor_type()); }
-                mk_struct(cx, &ftys[..], false, t)
-            }).collect();
-
-            ensure_enum_fits_in_address_space(cx, &fields[..], t);
-
-            General(ity, fields, dtor_to_init_u8(dtor))
-        }
-        _ => cx.sess().bug(&format!("adt::represent_type called on non-ADT type: {}", t))
-    }
-}
-
-// this should probably all be in ty
-struct Case<'tcx> {
-    discr: Disr,
-    tys: Vec<Ty<'tcx>>
-}
-
-/// This represents the (GEP) indices to follow to get to the discriminant field
-pub type DiscrField = Vec<usize>;
-
-fn find_discr_field_candidate<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                    ty: Ty<'tcx>,
-                                    mut path: DiscrField) -> Option<DiscrField> {
-    match ty.sty {
-        // Fat &T/&mut T/Box<T> i.e. T is [T], str, or Trait
-        ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => {
-            path.push(FAT_PTR_ADDR);
-            Some(path)
-        },
-
-        // Regular thin pointer: &T/&mut T/Box<T>
-        ty::TyRef(..) | ty::TyBox(..) => Some(path),
-
-        // Functions are just pointers
-        ty::TyBareFn(..) => Some(path),
-
-        // Is this the NonZero lang item wrapping a pointer or integer type?
-        ty::TyStruct(def, substs) if Some(def.did) == tcx.lang_items.non_zero() => {
-            let nonzero_fields = &def.struct_variant().fields;
-            assert_eq!(nonzero_fields.len(), 1);
-            let field_ty = monomorphize::field_ty(tcx, substs, &nonzero_fields[0]);
-            match field_ty.sty {
-                ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if !type_is_sized(tcx, ty) => {
-                    path.extend_from_slice(&[0, FAT_PTR_ADDR]);
-                    Some(path)
-                },
-                ty::TyRawPtr(..) | ty::TyInt(..) | ty::TyUint(..) => {
-                    path.push(0);
-                    Some(path)
-                },
-                _ => None
-            }
-        },
-
-        // Perhaps one of the fields of this struct is non-zero
-        // let's recurse and find out
-        ty::TyStruct(def, substs) => {
-            for (j, field) in def.struct_variant().fields.iter().enumerate() {
-                let field_ty = monomorphize::field_ty(tcx, substs, field);
-                if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) {
-                    fpath.push(j);
-                    return Some(fpath);
-                }
-            }
-            None
-        },
-
-        // Perhaps one of the upvars of this struct is non-zero
-        // Let's recurse and find out!
-        ty::TyClosure(_, ref substs) => {
-            for (j, &ty) in substs.upvar_tys.iter().enumerate() {
-                if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) {
-                    fpath.push(j);
-                    return Some(fpath);
-                }
-            }
-            None
-        },
-
-        // Can we use one of the fields in this tuple?
-        ty::TyTuple(ref tys) => {
-            for (j, &ty) in tys.iter().enumerate() {
-                if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) {
-                    fpath.push(j);
-                    return Some(fpath);
-                }
-            }
-            None
-        },
-
-        // Is this a fixed-size array of something non-zero
-        // with at least one element?
-        ty::TyArray(ety, d) if d > 0 => {
-            if let Some(mut vpath) = find_discr_field_candidate(tcx, ety, path) {
-                vpath.push(0);
-                Some(vpath)
-            } else {
-                None
-            }
-        },
-
-        // Anything else is not a pointer
-        _ => None
-    }
-}
-
-impl<'tcx> Case<'tcx> {
-    fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool {
-        mk_struct(cx, &self.tys, false, scapegoat).size == 0
-    }
-
-    fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option<DiscrField> {
-        for (i, &ty) in self.tys.iter().enumerate() {
-            if let Some(mut path) = find_discr_field_candidate(cx.tcx(), ty, vec![]) {
-                path.push(i);
-                return Some(path);
-            }
-        }
-        None
-    }
-}
-
-fn get_cases<'tcx>(tcx: &ty::ctxt<'tcx>,
-                   adt: ty::AdtDef<'tcx>,
-                   substs: &subst::Substs<'tcx>)
-                   -> Vec<Case<'tcx>> {
-    adt.variants.iter().map(|vi| {
-        let field_tys = vi.fields.iter().map(|field| {
-            monomorphize::field_ty(tcx, substs, field)
-        }).collect();
-        Case { discr: Disr::from(vi.disr_val), tys: field_tys }
-    }).collect()
-}
-
-fn mk_struct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                       tys: &[Ty<'tcx>], packed: bool,
-                       scapegoat: Ty<'tcx>)
-                       -> Struct<'tcx> {
-    let sized = tys.iter().all(|&ty| type_is_sized(cx.tcx(), ty));
-    let lltys : Vec<Type> = if sized {
-        tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
-    } else {
-        tys.iter().filter(|&ty| type_is_sized(cx.tcx(), *ty))
-           .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
-    };
-
-    ensure_struct_fits_in_address_space(cx, &lltys[..], packed, scapegoat);
-
-    let llty_rec = Type::struct_(cx, &lltys[..], packed);
-    Struct {
-        size: machine::llsize_of_alloc(cx, llty_rec),
-        align: machine::llalign_of_min(cx, llty_rec),
-        sized: sized,
-        packed: packed,
-        fields: tys.to_vec(),
-    }
-}
-
-#[derive(Debug)]
-struct IntBounds {
-    slo: i64,
-    shi: i64,
-    ulo: u64,
-    uhi: u64
-}
-
-fn mk_cenum<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                      hint: Hint, bounds: &IntBounds)
-                      -> Repr<'tcx> {
-    let it = range_to_inttype(cx, hint, bounds);
-    match it {
-        attr::SignedInt(_) => CEnum(it, Disr(bounds.slo as u64), Disr(bounds.shi as u64)),
-        attr::UnsignedInt(_) => CEnum(it, Disr(bounds.ulo), Disr(bounds.uhi))
-    }
-}
-
-fn range_to_inttype(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> IntType {
-    debug!("range_to_inttype: {:?} {:?}", hint, bounds);
-    // Lists of sizes to try.  u64 is always allowed as a fallback.
-    #[allow(non_upper_case_globals)]
-    const choose_shortest: &'static [IntType] = &[
-        attr::UnsignedInt(ast::UintTy::U8), attr::SignedInt(ast::IntTy::I8),
-        attr::UnsignedInt(ast::UintTy::U16), attr::SignedInt(ast::IntTy::I16),
-        attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)];
-    #[allow(non_upper_case_globals)]
-    const at_least_32: &'static [IntType] = &[
-        attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)];
-
-    let attempts;
-    match hint {
-        attr::ReprInt(span, ity) => {
-            if !bounds_usable(cx, ity, bounds) {
-                cx.sess().span_bug(span, "representation hint insufficient for discriminant range")
-            }
-            return ity;
-        }
-        attr::ReprExtern => {
-            attempts = match &cx.sess().target.target.arch[..] {
-                // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32`
-                // appears to be used on Linux and NetBSD, but some systems may use the variant
-                // corresponding to `choose_shortest`.  However, we don't run on those yet...?
-                "arm" => at_least_32,
-                _ => at_least_32,
-            }
-        }
-        attr::ReprAny => {
-            attempts = choose_shortest;
-        },
-        attr::ReprPacked => {
-            cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum");
-        }
-        attr::ReprSimd => {
-            cx.tcx().sess.bug("range_to_inttype: found ReprSimd on an enum");
-        }
-    }
-    for &ity in attempts {
-        if bounds_usable(cx, ity, bounds) {
-            return ity;
-        }
-    }
-    return attr::UnsignedInt(ast::UintTy::U64);
-}
-
-pub fn ll_inttype(cx: &CrateContext, ity: IntType) -> Type {
-    match ity {
-        attr::SignedInt(t) => Type::int_from_ty(cx, t),
-        attr::UnsignedInt(t) => Type::uint_from_ty(cx, t)
-    }
-}
-
-fn bounds_usable(cx: &CrateContext, ity: IntType, bounds: &IntBounds) -> bool {
-    debug!("bounds_usable: {:?} {:?}", ity, bounds);
-    match ity {
-        attr::SignedInt(_) => {
-            let lllo = C_integral(ll_inttype(cx, ity), bounds.slo as u64, true);
-            let llhi = C_integral(ll_inttype(cx, ity), bounds.shi as u64, true);
-            bounds.slo == const_to_int(lllo) as i64 && bounds.shi == const_to_int(llhi) as i64
-        }
-        attr::UnsignedInt(_) => {
-            let lllo = C_integral(ll_inttype(cx, ity), bounds.ulo, false);
-            let llhi = C_integral(ll_inttype(cx, ity), bounds.uhi, false);
-            bounds.ulo == const_to_uint(lllo) as u64 && bounds.uhi == const_to_uint(llhi) as u64
-        }
-    }
-}
-
-pub fn ty_of_inttype<'tcx>(tcx: &ty::ctxt<'tcx>, ity: IntType) -> Ty<'tcx> {
-    match ity {
-        attr::SignedInt(t) => tcx.mk_mach_int(t),
-        attr::UnsignedInt(t) => tcx.mk_mach_uint(t)
-    }
-}
-
-// LLVM doesn't like types that don't fit in the address space
-fn ensure_struct_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                 fields: &[Type],
-                                                 packed: bool,
-                                                 scapegoat: Ty<'tcx>) {
-    let mut offset = 0;
-    for &llty in fields {
-        // Invariant: offset < ccx.obj_size_bound() <= 1<<61
-        if !packed {
-            let type_align = machine::llalign_of_min(ccx, llty);
-            offset = roundup(offset, type_align);
-        }
-        // type_align is a power-of-2, so still offset < ccx.obj_size_bound()
-        // llsize_of_alloc(ccx, llty) is also less than ccx.obj_size_bound()
-        // so the sum is less than 1<<62 (and therefore can't overflow).
-        offset += machine::llsize_of_alloc(ccx, llty);
-
-        if offset >= ccx.obj_size_bound() {
-            ccx.report_overbig_object(scapegoat);
-        }
-    }
-}
-
-fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) {
-    let size = sts.iter().map(|st| st.size).max().unwrap();
-    let align = sts.iter().map(|st| st.align).max().unwrap();
-    (roundup(size, align), align)
-}
-
-fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                               fields: &[Struct],
-                                               scapegoat: Ty<'tcx>) {
-    let (total_size, _) = union_size_and_align(fields);
-
-    if total_size >= ccx.obj_size_bound() {
-        ccx.report_overbig_object(scapegoat);
-    }
-}
-
-
-/// LLVM-level types are a little complicated.
-///
-/// C-like enums need to be actual ints, not wrapped in a struct,
-/// because that changes the ABI on some platforms (see issue #10308).
-///
-/// For nominal types, in some cases, we need to use LLVM named structs
-/// and fill in the actual contents in a second pass to prevent
-/// unbounded recursion; see also the comments in `trans::type_of`.
-pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type {
-    let c = generic_type_of(cx, r, None, false, false, false);
-    assert!(!c.needs_drop_flag);
-    c.prefix
-}
-
-
-// Pass dst=true if the type you are passing is a DST. Yes, we could figure
-// this out, but if you call this on an unsized type without realising it, you
-// are going to get the wrong type (it will not include the unsized parts of it).
-pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                r: &Repr<'tcx>, dst: bool) -> Type {
-    let c = generic_type_of(cx, r, None, true, dst, false);
-    assert!(!c.needs_drop_flag);
-    c.prefix
-}
-pub fn sizing_type_context_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                        r: &Repr<'tcx>, dst: bool) -> TypeContext {
-    generic_type_of(cx, r, None, true, dst, true)
-}
-pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                    r: &Repr<'tcx>, name: &str) -> Type {
-    let c = generic_type_of(cx, r, Some(name), false, false, false);
-    assert!(!c.needs_drop_flag);
-    c.prefix
-}
-pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                r: &Repr<'tcx>, llty: &mut Type) {
-    match *r {
-        CEnum(..) | General(..) | RawNullablePointer { .. } => { }
-        Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } =>
-            llty.set_struct_body(&struct_llfields(cx, st, false, false),
-                                 st.packed)
-    }
-}
-
-fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                             r: &Repr<'tcx>,
-                             name: Option<&str>,
-                             sizing: bool,
-                             dst: bool,
-                             delay_drop_flag: bool) -> TypeContext {
-    debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {} delay_drop_flag: {}",
-           r, name, sizing, dst, delay_drop_flag);
-    match *r {
-        CEnum(ity, _, _) => TypeContext::direct(ll_inttype(cx, ity)),
-        RawNullablePointer { nnty, .. } =>
-            TypeContext::direct(type_of::sizing_type_of(cx, nnty)),
-        StructWrappedNullablePointer { nonnull: ref st, .. } => {
-            match name {
-                None => {
-                    TypeContext::direct(
-                        Type::struct_(cx, &struct_llfields(cx, st, sizing, dst),
-                                      st.packed))
-                }
-                Some(name) => {
-                    assert_eq!(sizing, false);
-                    TypeContext::direct(Type::named_struct(cx, name))
-                }
-            }
-        }
-        Univariant(ref st, dtor_needed) => {
-            let dtor_needed = dtor_needed != 0;
-            match name {
-                None => {
-                    let mut fields = struct_llfields(cx, st, sizing, dst);
-                    if delay_drop_flag && dtor_needed {
-                        fields.pop();
-                    }
-                    TypeContext::may_need_drop_flag(
-                        Type::struct_(cx, &fields,
-                                      st.packed),
-                        delay_drop_flag && dtor_needed)
-                }
-                Some(name) => {
-                    // Hypothesis: named_struct's can never need a
-                    // drop flag. (... needs validation.)
-                    assert_eq!(sizing, false);
-                    TypeContext::direct(Type::named_struct(cx, name))
-                }
-            }
-        }
-        General(ity, ref sts, dtor_needed) => {
-            let dtor_needed = dtor_needed != 0;
-            // We need a representation that has:
-            // * The alignment of the most-aligned field
-            // * The size of the largest variant (rounded up to that alignment)
-            // * No alignment padding anywhere any variant has actual data
-            //   (currently matters only for enums small enough to be immediate)
-            // * The discriminant in an obvious place.
-            //
-            // So we start with the discriminant, pad it up to the alignment with
-            // more of its own type, then use alignment-sized ints to get the rest
-            // of the size.
-            //
-            // FIXME #10604: this breaks when vector types are present.
-            let (size, align) = union_size_and_align(&sts[..]);
-            let align_s = align as u64;
-            assert_eq!(size % align_s, 0);
-            let align_units = size / align_s - 1;
-
-            let discr_ty = ll_inttype(cx, ity);
-            let discr_size = machine::llsize_of_alloc(cx, discr_ty);
-            let fill_ty = match align_s {
-                1 => Type::array(&Type::i8(cx), align_units),
-                2 => Type::array(&Type::i16(cx), align_units),
-                4 => Type::array(&Type::i32(cx), align_units),
-                8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 =>
-                                 Type::array(&Type::i64(cx), align_units),
-                a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4),
-                                                              align_units),
-                _ => panic!("unsupported enum alignment: {}", align)
-            };
-            assert_eq!(machine::llalign_of_min(cx, fill_ty), align);
-            assert_eq!(align_s % discr_size, 0);
-            let mut fields: Vec<Type> =
-                [discr_ty,
-                 Type::array(&discr_ty, align_s / discr_size - 1),
-                 fill_ty].iter().cloned().collect();
-            if delay_drop_flag && dtor_needed {
-                fields.pop();
-            }
-            match name {
-                None => {
-                    TypeContext::may_need_drop_flag(
-                        Type::struct_(cx, &fields[..], false),
-                        delay_drop_flag && dtor_needed)
-                }
-                Some(name) => {
-                    let mut llty = Type::named_struct(cx, name);
-                    llty.set_struct_body(&fields[..], false);
-                    TypeContext::may_need_drop_flag(
-                        llty,
-                        delay_drop_flag && dtor_needed)
-                }
-            }
-        }
-    }
-}
-
-fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>,
-                             sizing: bool, dst: bool) -> Vec<Type> {
-    if sizing {
-        st.fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty))
-            .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
-    } else {
-        st.fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect()
-    }
-}
-
-/// Obtain a representation of the discriminant sufficient to translate
-/// destructuring; this may or may not involve the actual discriminant.
-///
-/// This should ideally be less tightly tied to `_match`.
-pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                r: &Repr<'tcx>,
-                                scrutinee: ValueRef,
-                                range_assert: bool)
-                                -> (_match::BranchKind, Option<ValueRef>) {
-    match *r {
-        CEnum(..) | General(..) |
-        RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
-            (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None,
-                                                  range_assert)))
-        }
-        Univariant(..) => {
-            // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants).
-            (_match::Single, None)
-        }
-    }
-}
-
-pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool {
-    match *r {
-        CEnum(ity, _, _) => ity.is_signed(),
-        General(ity, _, _) => ity.is_signed(),
-        Univariant(..) => false,
-        RawNullablePointer { .. } => false,
-        StructWrappedNullablePointer { .. } => false,
-    }
-}
-
-/// Obtain the actual discriminant of a value.
-pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
-                                   scrutinee: ValueRef, cast_to: Option<Type>,
-                                   range_assert: bool)
-    -> ValueRef {
-    debug!("trans_get_discr r: {:?}", r);
-    let val = match *r {
-        CEnum(ity, min, max) => {
-            load_discr(bcx, ity, scrutinee, min, max, range_assert)
-        }
-        General(ity, ref cases, _) => {
-            let ptr = StructGEP(bcx, scrutinee, 0);
-            load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1),
-                       range_assert)
-        }
-        Univariant(..) => C_u8(bcx.ccx(), 0),
-        RawNullablePointer { nndiscr, nnty, .. } =>  {
-            let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE };
-            let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
-            ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None)
-        }
-        StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
-            struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee)
-        }
-    };
-    match cast_to {
-        None => val,
-        Some(llty) => if is_discr_signed(r) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) }
-    }
-}
-
-fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField,
-                                    scrutinee: ValueRef) -> ValueRef {
-    let llptrptr = GEPi(bcx, scrutinee, &discrfield[..]);
-    let llptr = Load(bcx, llptrptr);
-    let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE };
-    ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None)
-}
-
-/// Helper for cases where the discriminant is simply loaded.
-fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr,
-              range_assert: bool)
-    -> ValueRef {
-    let llty = ll_inttype(bcx.ccx(), ity);
-    assert_eq!(val_ty(ptr), llty.ptr_to());
-    let bits = machine::llbitsize_of_real(bcx.ccx(), llty);
-    assert!(bits <= 64);
-    let bits = bits as usize;
-    let mask = Disr(!0u64 >> (64 - bits));
-    // For a (max) discr of -1, max will be `-1 as usize`, which overflows.
-    // However, that is fine here (it would still represent the full range),
-    if max.wrapping_add(Disr(1)) & mask == min & mask || !range_assert {
-        // i.e., if the range is everything.  The lo==hi case would be
-        // rejected by the LLVM verifier (it would mean either an
-        // empty set, which is impossible, or the entire range of the
-        // type, which is pointless).
-        Load(bcx, ptr)
-    } else {
-        // llvm::ConstantRange can deal with ranges that wrap around,
-        // so an overflow on (max + 1) is fine.
-        LoadRangeAssert(bcx, ptr, min.0, max.0.wrapping_add(1), /* signed: */ True)
-    }
-}
-
-/// Yield information about how to dispatch a case of the
-/// discriminant-like value returned by `trans_switch`.
-///
-/// This should ideally be less tightly tied to `_match`.
-pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr)
-                              -> ValueRef {
-    match *r {
-        CEnum(ity, _, _) => {
-            C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true)
-        }
-        General(ity, _, _) => {
-            C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true)
-        }
-        Univariant(..) => {
-            bcx.ccx().sess().bug("no cases for univariants or structs")
-        }
-        RawNullablePointer { .. } |
-        StructWrappedNullablePointer { .. } => {
-            assert!(discr == Disr(0) || discr == Disr(1));
-            C_bool(bcx.ccx(), discr != Disr(0))
-        }
-    }
-}
-
-/// Set the discriminant for a new value of the given case of the given
-/// representation.
-pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
-                                   val: ValueRef, discr: Disr) {
-    match *r {
-        CEnum(ity, min, max) => {
-            assert_discr_in_range(ity, min, max, discr);
-            Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
-                  val);
-        }
-        General(ity, ref cases, dtor) => {
-            if dtor_active(dtor) {
-                let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr,
-                                          cases[discr.0 as usize].fields.len() - 2);
-                Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr);
-            }
-            Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
-                  StructGEP(bcx, val, 0));
-        }
-        Univariant(ref st, dtor) => {
-            assert_eq!(discr, Disr(0));
-            if dtor_active(dtor) {
-                Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED),
-                      StructGEP(bcx, val, st.fields.len() - 1));
-            }
-        }
-        RawNullablePointer { nndiscr, nnty, ..} => {
-            if discr != nndiscr {
-                let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
-                Store(bcx, C_null(llptrty), val);
-            }
-        }
-        StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
-            if discr != nndiscr {
-                let llptrptr = GEPi(bcx, val, &discrfield[..]);
-                let llptrty = val_ty(llptrptr).element_type();
-                Store(bcx, C_null(llptrty), llptrptr);
-            }
-        }
-    }
-}
-
-fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) {
-    match ity {
-        attr::UnsignedInt(_) => {
-            assert!(min <= discr);
-            assert!(discr <= max)
-        },
-        attr::SignedInt(_) => {
-            assert!(min.0 as i64 <= discr.0 as i64);
-            assert!(discr.0 as i64 <= max.0 as i64);
-        },
-    }
-}
-
-/// The number of fields in a given case; for use when obtaining this
-/// information from the type or definition is less convenient.
-pub fn num_args(r: &Repr, discr: Disr) -> usize {
-    match *r {
-        CEnum(..) => 0,
-        Univariant(ref st, dtor) => {
-            assert_eq!(discr, Disr(0));
-            st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 })
-        }
-        General(_, ref cases, dtor) => {
-            cases[discr.0 as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 })
-        }
-        RawNullablePointer { nndiscr, ref nullfields, .. } => {
-            if discr == nndiscr { 1 } else { nullfields.len() }
-        }
-        StructWrappedNullablePointer { ref nonnull, nndiscr,
-                                       ref nullfields, .. } => {
-            if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() }
-        }
-    }
-}
-
-/// Access a field, at a point when the value's case is known.
-pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
-                                   val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
-    // Note: if this ever needs to generate conditionals (e.g., if we
-    // decide to do some kind of cdr-coding-like non-unique repr
-    // someday), it will need to return a possibly-new bcx as well.
-    match *r {
-        CEnum(..) => {
-            bcx.ccx().sess().bug("element access in C-like enum")
-        }
-        Univariant(ref st, _dtor) => {
-            assert_eq!(discr, Disr(0));
-            struct_field_ptr(bcx, st, val, ix, false)
-        }
-        General(_, ref cases, _) => {
-            struct_field_ptr(bcx, &cases[discr.0 as usize], val, ix + 1, true)
-        }
-        RawNullablePointer { nndiscr, ref nullfields, .. } |
-        StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => {
-            // The unit-like case might have a nonzero number of unit-like fields.
-            // (e.d., Result of Either with (), as one side.)
-            let ty = type_of::type_of(bcx.ccx(), nullfields[ix]);
-            assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0);
-            // The contents of memory at this pointer can't matter, but use
-            // the value that's "reasonable" in case of pointer comparison.
-            PointerCast(bcx, val.value, ty.ptr_to())
-        }
-        RawNullablePointer { nndiscr, nnty, .. } => {
-            assert_eq!(ix, 0);
-            assert_eq!(discr, nndiscr);
-            let ty = type_of::type_of(bcx.ccx(), nnty);
-            PointerCast(bcx, val.value, ty.ptr_to())
-        }
-        StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
-            assert_eq!(discr, nndiscr);
-            struct_field_ptr(bcx, nonnull, val, ix, false)
-        }
-    }
-}
-
-pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: MaybeSizedValue,
-                                    ix: usize, needs_cast: bool) -> ValueRef {
-    let ccx = bcx.ccx();
-    let ptr_val = if needs_cast {
-        let fields = st.fields.iter().map(|&ty| {
-            type_of::in_memory_type_of(ccx, ty)
-        }).collect::<Vec<_>>();
-        let real_ty = Type::struct_(ccx, &fields[..], st.packed);
-        PointerCast(bcx, val.value, real_ty.ptr_to())
-    } else {
-        val.value
-    };
-
-    let fty = st.fields[ix];
-    // Simple case - we can just GEP the field
-    //   * First field - Always aligned properly
-    //   * Packed struct - There is no alignment padding
-    //   * Field is sized - pointer is properly aligned already
-    if ix == 0 || st.packed || type_is_sized(bcx.tcx(), fty) {
-        return StructGEP(bcx, ptr_val, ix);
-    }
-
-    // If the type of the last field is [T] or str, then we don't need to do
-    // any adjusments
-    match fty.sty {
-        ty::TySlice(..) | ty::TyStr => {
-            return StructGEP(bcx, ptr_val, ix);
-        }
-        _ => ()
-    }
-
-    // There's no metadata available, log the case and just do the GEP.
-    if !val.has_meta() {
-        debug!("Unsized field `{}`, of `{}` has no metadata for adjustment",
-               ix,
-               bcx.val_to_string(ptr_val));
-        return StructGEP(bcx, ptr_val, ix);
-    }
-
-    let dbloc = DebugLoc::None;
-
-    // We need to get the pointer manually now.
-    // We do this by casting to a *i8, then offsetting it by the appropriate amount.
-    // We do this instead of, say, simply adjusting the pointer from the result of a GEP
-    // because the field may have an arbitrary alignment in the LLVM representation
-    // anyway.
-    //
-    // To demonstrate:
-    //   struct Foo<T: ?Sized> {
-    //      x: u16,
-    //      y: T
-    //   }
-    //
-    // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
-    // the `y` field has 16-bit alignment.
-
-    let meta = val.meta;
-
-    // Calculate the unaligned offset of the unsized field.
-    let mut offset = 0;
-    for &ty in &st.fields[0..ix] {
-        let llty = type_of::sizing_type_of(ccx, ty);
-        let type_align = type_of::align_of(ccx, ty);
-        offset = roundup(offset, type_align);
-        offset += machine::llsize_of_alloc(ccx, llty);
-    }
-    let unaligned_offset = C_uint(bcx.ccx(), offset);
-
-    // Get the alignment of the field
-    let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
-
-    // Bump the unaligned offset up to the appropriate alignment using the
-    // following expression:
-    //
-    //   (unaligned offset + (align - 1)) & -align
-
-    // Calculate offset
-    let align_sub_1 = Sub(bcx, align, C_uint(bcx.ccx(), 1u64), dbloc);
-    let offset = And(bcx,
-                     Add(bcx, unaligned_offset, align_sub_1, dbloc),
-                     Neg(bcx, align, dbloc),
-                     dbloc);
-
-    debug!("struct_field_ptr: DST field offset: {}",
-           bcx.val_to_string(offset));
-
-    // Cast and adjust pointer
-    let byte_ptr = PointerCast(bcx, ptr_val, Type::i8p(bcx.ccx()));
-    let byte_ptr = GEP(bcx, byte_ptr, &[offset]);
-
-    // Finally, cast back to the type expected
-    let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
-    debug!("struct_field_ptr: Field type is {}", ll_fty.to_string());
-    PointerCast(bcx, byte_ptr, ll_fty.ptr_to())
-}
-
-pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
-                                    r: &Repr<'tcx>,
-                                    value: ValueRef,
-                                    mut f: F)
-                                    -> Block<'blk, 'tcx> where
-    F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>,
-{
-    let fcx = bcx.fcx;
-    match *r {
-        Univariant(ref st, _) => {
-            f(bcx, st, value)
-        }
-        General(ity, ref cases, _) => {
-            let ccx = bcx.ccx();
-
-            // See the comments in trans/base.rs for more information (inside
-            // iter_structural_ty), but the gist here is that if the enum's
-            // discriminant is *not* in the range that we're expecting (in which
-            // case we'll take the fall-through branch on the switch
-            // instruction) then we can't just optimize this to an Unreachable
-            // block.
-            //
-            // Currently we still have filling drop, so this means that the drop
-            // glue for enums may be called when the enum has been paved over
-            // with the "I've been dropped" value. In this case the default
-            // branch of the switch instruction will actually be taken at
-            // runtime, so the basic block isn't actually unreachable, so we
-            // need to make it do something with defined behavior. In this case
-            // we just return early from the function.
-            //
-            // Note that this is also why the `trans_get_discr` below has
-            // `false` to indicate that loading the discriminant should
-            // not have a range assert.
-            let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void");
-            RetVoid(ret_void_cx, DebugLoc::None);
-
-            let discr_val = trans_get_discr(bcx, r, value, None, false);
-            let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len());
-            let bcx_next = fcx.new_temp_block("enum-variant-iter-next");
-
-            for (discr, case) in cases.iter().enumerate() {
-                let mut variant_cx = fcx.new_temp_block(
-                    &format!("enum-variant-iter-{}", &discr.to_string())
-                );
-                let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true);
-                AddCase(llswitch, rhs_val, variant_cx.llbb);
-
-                let fields = case.fields.iter().map(|&ty|
-                    type_of::type_of(bcx.ccx(), ty)).collect::<Vec<_>>();
-                let real_ty = Type::struct_(ccx, &fields[..], case.packed);
-                let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to());
-
-                variant_cx = f(variant_cx, case, variant_value);
-                Br(variant_cx, bcx_next.llbb, DebugLoc::None);
-            }
-
-            bcx_next
-        }
-        _ => unreachable!()
-    }
-}
-
-/// Access the struct drop flag, if present.
-pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
-                                       r: &Repr<'tcx>,
-                                       val: ValueRef)
-                                       -> datum::DatumBlock<'blk, 'tcx, datum::Expr>
-{
-    let tcx = bcx.tcx();
-    let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type());
-    match *r {
-        Univariant(ref st, dtor) if dtor_active(dtor) => {
-            let flag_ptr = StructGEP(bcx, val, st.fields.len() - 1);
-            datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock()
-        }
-        General(_, _, dtor) if dtor_active(dtor) => {
-            let fcx = bcx.fcx;
-            let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
-            let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum(
-                bcx, tcx.dtor_type(), "drop_flag",
-                InitAlloca::Uninit("drop flag itself has no dtor"),
-                cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| {
-                    debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}",
-                           tcx.dtor_type());
-                    bcx
-                }
-            ));
-            bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
-                let ptr = struct_field_ptr(variant_cx, st, MaybeSizedValue::sized(value),
-                                           (st.fields.len() - 1), false);
-                datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr"))
-                    .store_to(variant_cx, scratch.val)
-            });
-            let expr_datum = scratch.to_expr_datum();
-            fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
-            datum::DatumBlock::new(bcx, expr_datum)
-        }
-        _ => bcx.ccx().sess().bug("tried to get drop flag of non-droppable type")
-    }
-}
-
-/// Construct a constant value, suitable for initializing a
-/// GlobalVariable, given a case and constant values for its fields.
-/// Note that this may have a different LLVM type (and different
-/// alignment!) from the representation's `type_of`, so it needs a
-/// pointer cast before use.
-///
-/// The LLVM type system does not directly support unions, and only
-/// pointers can be bitcast, so a constant (and, by extension, the
-/// GlobalVariable initialized by it) will have a type that can vary
-/// depending on which case of an enum it is.
-///
-/// To understand the alignment situation, consider `enum E { V64(u64),
-/// V32(u32, u32) }` on Windows.  The type has 8-byte alignment to
-/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
-/// i32, i32}`, which is 4-byte aligned.
-///
-/// Currently the returned value has the same size as the type, but
-/// this could be changed in the future to avoid allocating unnecessary
-/// space after values of shorter-than-maximum cases.
-pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr: Disr,
-                             vals: &[ValueRef]) -> ValueRef {
-    match *r {
-        CEnum(ity, min, max) => {
-            assert_eq!(vals.len(), 0);
-            assert_discr_in_range(ity, min, max, discr);
-            C_integral(ll_inttype(ccx, ity), discr.0, true)
-        }
-        General(ity, ref cases, _) => {
-            let case = &cases[discr.0 as usize];
-            let (max_sz, _) = union_size_and_align(&cases[..]);
-            let lldiscr = C_integral(ll_inttype(ccx, ity), discr.0 as u64, true);
-            let mut f = vec![lldiscr];
-            f.extend_from_slice(vals);
-            let mut contents = build_const_struct(ccx, case, &f[..]);
-            contents.extend_from_slice(&[padding(ccx, max_sz - case.size)]);
-            C_struct(ccx, &contents[..], false)
-        }
-        Univariant(ref st, _dro) => {
-            assert_eq!(discr, Disr(0));
-            let contents = build_const_struct(ccx, st, vals);
-            C_struct(ccx, &contents[..], st.packed)
-        }
-        RawNullablePointer { nndiscr, nnty, .. } => {
-            if discr == nndiscr {
-                assert_eq!(vals.len(), 1);
-                vals[0]
-            } else {
-                C_null(type_of::sizing_type_of(ccx, nnty))
-            }
-        }
-        StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
-            if discr == nndiscr {
-                C_struct(ccx, &build_const_struct(ccx,
-                                                 nonnull,
-                                                 vals),
-                         false)
-            } else {
-                let vals = nonnull.fields.iter().map(|&ty| {
-                    // Always use null even if it's not the `discrfield`th
-                    // field; see #8506.
-                    C_null(type_of::sizing_type_of(ccx, ty))
-                }).collect::<Vec<ValueRef>>();
-                C_struct(ccx, &build_const_struct(ccx,
-                                                 nonnull,
-                                                 &vals[..]),
-                         false)
-            }
-        }
-    }
-}
-
-/// Compute struct field offsets relative to struct begin.
-fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                          st: &Struct<'tcx>) -> Vec<u64> {
-    let mut offsets = vec!();
-
-    let mut offset = 0;
-    for &ty in &st.fields {
-        let llty = type_of::sizing_type_of(ccx, ty);
-        if !st.packed {
-            let type_align = type_of::align_of(ccx, ty);
-            offset = roundup(offset, type_align);
-        }
-        offsets.push(offset);
-        offset += machine::llsize_of_alloc(ccx, llty);
-    }
-    assert_eq!(st.fields.len(), offsets.len());
-    offsets
-}
-
-/// Building structs is a little complicated, because we might need to
-/// insert padding if a field's value is less aligned than its type.
-///
-/// Continuing the example from `trans_const`, a value of type `(u32,
-/// E)` should have the `E` at offset 8, but if that field's
-/// initializer is 4-byte aligned then simply translating the tuple as
-/// a two-element struct will locate it at offset 4, and accesses to it
-/// will read the wrong memory.
-fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                st: &Struct<'tcx>, vals: &[ValueRef])
-                                -> Vec<ValueRef> {
-    assert_eq!(vals.len(), st.fields.len());
-
-    let target_offsets = compute_struct_field_offsets(ccx, st);
-
-    // offset of current value
-    let mut offset = 0;
-    let mut cfields = Vec::new();
-    for (&val, target_offset) in vals.iter().zip(target_offsets) {
-        if !st.packed {
-            let val_align = machine::llalign_of_min(ccx, val_ty(val));
-            offset = roundup(offset, val_align);
-        }
-        if offset != target_offset {
-            cfields.push(padding(ccx, target_offset - offset));
-            offset = target_offset;
-        }
-        assert!(!is_undef(val));
-        cfields.push(val);
-        offset += machine::llsize_of_alloc(ccx, val_ty(val));
-    }
-
-    assert!(st.sized && offset <= st.size);
-    if offset != st.size {
-        cfields.push(padding(ccx, st.size - offset));
-    }
-
-    cfields
-}
-
-fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
-    C_undef(Type::array(&Type::i8(ccx), size))
-}
-
-// FIXME this utility routine should be somewhere more general
-#[inline]
-fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
-
-/// Get the discriminant of a constant value.
-pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr {
-    match *r {
-        CEnum(ity, _, _) => {
-            match ity {
-                attr::SignedInt(..) => Disr(const_to_int(val) as u64),
-                attr::UnsignedInt(..) => Disr(const_to_uint(val)),
-            }
-        }
-        General(ity, _, _) => {
-            match ity {
-                attr::SignedInt(..) => Disr(const_to_int(const_get_elt(ccx, val, &[0])) as u64),
-                attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(ccx, val, &[0])))
-            }
-        }
-        Univariant(..) => Disr(0),
-        RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
-            ccx.sess().bug("const discrim access of non c-like enum")
-        }
-    }
-}
-
-/// Extract a field of a constant value, as appropriate for its
-/// representation.
-///
-/// (Not to be confused with `common::const_get_elt`, which operates on
-/// raw LLVM-level structs and arrays.)
-pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef,
-                       _discr: Disr, ix: usize) -> ValueRef {
-    match *r {
-        CEnum(..) => ccx.sess().bug("element access in C-like enum const"),
-        Univariant(..) => const_struct_field(ccx, val, ix),
-        General(..) => const_struct_field(ccx, val, ix + 1),
-        RawNullablePointer { .. } => {
-            assert_eq!(ix, 0);
-            val
-        },
-        StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix)
-    }
-}
-
-/// Extract field of struct-like const, skipping our alignment padding.
-fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: usize) -> ValueRef {
-    // Get the ix-th non-undef element of the struct.
-    let mut real_ix = 0; // actual position in the struct
-    let mut ix = ix; // logical index relative to real_ix
-    let mut field;
-    loop {
-        loop {
-            field = const_get_elt(ccx, val, &[real_ix]);
-            if !is_undef(field) {
-                break;
-            }
-            real_ix = real_ix + 1;
-        }
-        if ix == 0 {
-            return field;
-        }
-        ix = ix - 1;
-        real_ix = real_ix + 1;
-    }
-}
diff --git a/src/librustc_trans/trans/asm.rs b/src/librustc_trans/trans/asm.rs
deleted file mode 100644 (file)
index 33370ab..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Translation of inline assembly.
-
-use llvm;
-use trans::build::*;
-use trans::callee;
-use trans::common::*;
-use trans::cleanup;
-use trans::cleanup::CleanupMethods;
-use trans::expr;
-use trans::type_of;
-use trans::type_::Type;
-
-use rustc_front::hir as ast;
-use std::ffi::CString;
-use syntax::ast::AsmDialect;
-use libc::{c_uint, c_char};
-
-// Take an inline assembly expression and splat it out via LLVM
-pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
-                                    -> Block<'blk, 'tcx> {
-    let fcx = bcx.fcx;
-    let mut bcx = bcx;
-    let mut constraints = Vec::new();
-    let mut output_types = Vec::new();
-
-    let temp_scope = fcx.push_custom_cleanup_scope();
-
-    let mut ext_inputs = Vec::new();
-    let mut ext_constraints = Vec::new();
-
-    // Prepare the output operands
-    let mut outputs = Vec::new();
-    let mut inputs = Vec::new();
-    for (i, out) in ia.outputs.iter().enumerate() {
-        constraints.push(out.constraint.clone());
-
-        let out_datum = unpack_datum!(bcx, expr::trans(bcx, &out.expr));
-        if out.is_indirect {
-            bcx = callee::trans_arg_datum(bcx,
-                                          expr_ty(bcx, &out.expr),
-                                          out_datum,
-                                          cleanup::CustomScope(temp_scope),
-                                          callee::DontAutorefArg,
-                                          &mut inputs);
-            if out.is_rw {
-                ext_inputs.push(*inputs.last().unwrap());
-                ext_constraints.push(i.to_string());
-            }
-        } else {
-            output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
-            outputs.push(out_datum.val);
-            if out.is_rw {
-                bcx = callee::trans_arg_datum(bcx,
-                                              expr_ty(bcx, &out.expr),
-                                              out_datum,
-                                              cleanup::CustomScope(temp_scope),
-                                              callee::DontAutorefArg,
-                                              &mut ext_inputs);
-                ext_constraints.push(i.to_string());
-            }
-        }
-    }
-
-    // Now the input operands
-    for &(ref c, ref input) in &ia.inputs {
-        constraints.push((*c).clone());
-
-        let in_datum = unpack_datum!(bcx, expr::trans(bcx, &input));
-        bcx = callee::trans_arg_datum(bcx,
-                                    expr_ty(bcx, &input),
-                                    in_datum,
-                                    cleanup::CustomScope(temp_scope),
-                                    callee::DontAutorefArg,
-                                    &mut inputs);
-    }
-    inputs.extend_from_slice(&ext_inputs[..]);
-
-    // no failure occurred preparing operands, no need to cleanup
-    fcx.pop_custom_cleanup_scope(temp_scope);
-
-    let clobbers = ia.clobbers.iter()
-                              .map(|s| format!("~{{{}}}", &s));
-
-    // Default per-arch clobbers
-    // Basically what clang does
-    let arch_clobbers = match &bcx.sess().target.target.arch[..] {
-        "x86" | "x86_64" => vec!("~{dirflag}", "~{fpsr}", "~{flags}"),
-        _                => Vec::new()
-    };
-
-    let all_constraints= constraints.iter()
-                                    .map(|s| s.to_string())
-                                    .chain(ext_constraints)
-                                    .chain(clobbers)
-                                    .chain(arch_clobbers.iter()
-                                               .map(|s| s.to_string()))
-                                    .collect::<Vec<String>>()
-                                    .join(",");
-
-    debug!("Asm Constraints: {}", &all_constraints[..]);
-
-    // Depending on how many outputs we have, the return type is different
-    let num_outputs = outputs.len();
-    let output_type = match num_outputs {
-        0 => Type::void(bcx.ccx()),
-        1 => output_types[0],
-        _ => Type::struct_(bcx.ccx(), &output_types[..], false)
-    };
-
-    let dialect = match ia.dialect {
-        AsmDialect::Att   => llvm::AD_ATT,
-        AsmDialect::Intel => llvm::AD_Intel
-    };
-
-    let asm = CString::new(ia.asm.as_bytes()).unwrap();
-    let constraint_cstr = CString::new(all_constraints).unwrap();
-    let r = InlineAsmCall(bcx,
-                          asm.as_ptr(),
-                          constraint_cstr.as_ptr(),
-                          &inputs,
-                          output_type,
-                          ia.volatile,
-                          ia.alignstack,
-                          dialect);
-
-    // Again, based on how many outputs we have
-    if num_outputs == 1 {
-        Store(bcx, r, outputs[0]);
-    } else {
-        for (i, o) in outputs.iter().enumerate() {
-            let v = ExtractValue(bcx, r, i);
-            Store(bcx, v, *o);
-        }
-    }
-
-    // Store expn_id in a metadata node so we can map LLVM errors
-    // back to source locations.  See #17552.
-    unsafe {
-        let key = "srcloc";
-        let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(),
-            key.as_ptr() as *const c_char, key.len() as c_uint);
-
-        let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.into_u32() as i32);
-
-        llvm::LLVMSetMetadata(r, kind,
-            llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1));
-    }
-
-    return bcx;
-
-}
diff --git a/src/librustc_trans/trans/assert_dep_graph.rs b/src/librustc_trans/trans/assert_dep_graph.rs
deleted file mode 100644 (file)
index 6171d05..0000000
+++ /dev/null
@@ -1,430 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! This pass is only used for the UNIT TESTS and DEBUGGING NEEDS
-//! around dependency graph construction. It serves two purposes; it
-//! will dump graphs in graphviz form to disk, and it searches for
-//! `#[rustc_if_this_changed]` and `#[rustc_then_this_would_need]`
-//! annotations. These annotations can be used to test whether paths
-//! exist in the graph. We report errors on each
-//! `rustc_if_this_changed` annotation. If a path exists in all
-//! cases, then we would report "all path(s) exist". Otherwise, we
-//! report: "no path to `foo`" for each case where no path exists.
-//! `compile-fail` tests can then be used to check when paths exist or
-//! do not.
-//!
-//! The full form of the `rustc_if_this_changed` annotation is
-//! `#[rustc_if_this_changed(id)]`. The `"id"` is optional and
-//! defaults to `"id"` if omitted.
-//!
-//! Example:
-//!
-//! ```
-//! #[rustc_if_this_changed]
-//! fn foo() { }
-//!
-//! #[rustc_then_this_would_need("trans")] //~ ERROR no path from `foo`
-//! fn bar() { }
-//!
-//! #[rustc_then_this_would_need("trans")] //~ ERROR OK
-//! fn baz() { foo(); }
-//! ```
-
-use graphviz as dot;
-use rustc::dep_graph::{DepGraphQuery, DepNode};
-use rustc::middle::def_id::DefId;
-use rustc::middle::ty;
-use rustc_data_structures::fnv::{FnvHashMap, FnvHashSet};
-use rustc_data_structures::graph::{Direction, INCOMING, OUTGOING, NodeIndex};
-use rustc_front::hir;
-use rustc_front::intravisit::Visitor;
-use graphviz::IntoCow;
-use std::env;
-use std::fs::File;
-use std::io::Write;
-use syntax::ast;
-use syntax::attr::AttrMetaMethods;
-use syntax::codemap::Span;
-use syntax::parse::token::InternedString;
-
-const IF_THIS_CHANGED: &'static str = "rustc_if_this_changed";
-const THEN_THIS_WOULD_NEED: &'static str = "rustc_then_this_would_need";
-const ID: &'static str = "id";
-
-pub fn assert_dep_graph(tcx: &ty::ctxt) {
-    let _ignore = tcx.dep_graph.in_ignore();
-
-    if tcx.sess.opts.dump_dep_graph {
-        dump_graph(tcx);
-    }
-
-    // Find annotations supplied by user (if any).
-    let (if_this_changed, then_this_would_need) = {
-        let mut visitor = IfThisChanged { tcx: tcx,
-                                          if_this_changed: FnvHashMap(),
-                                          then_this_would_need: FnvHashMap() };
-        tcx.map.krate().visit_all_items(&mut visitor);
-        (visitor.if_this_changed, visitor.then_this_would_need)
-    };
-
-    // Check paths.
-    check_paths(tcx, &if_this_changed, &then_this_would_need);
-}
-
-type SourceHashMap = FnvHashMap<InternedString,
-                                FnvHashSet<(Span, DefId, DepNode)>>;
-type TargetHashMap = FnvHashMap<InternedString,
-                                FnvHashSet<(Span, InternedString, ast::NodeId, DepNode)>>;
-
-struct IfThisChanged<'a, 'tcx:'a> {
-    tcx: &'a ty::ctxt<'tcx>,
-    if_this_changed: SourceHashMap,
-    then_this_would_need: TargetHashMap,
-}
-
-impl<'a, 'tcx> IfThisChanged<'a, 'tcx> {
-    fn process_attrs(&mut self, node_id: ast::NodeId, def_id: DefId) {
-        for attr in self.tcx.get_attrs(def_id).iter() {
-            if attr.check_name(IF_THIS_CHANGED) {
-                let mut id = None;
-                for meta_item in attr.meta_item_list().unwrap_or_default() {
-                    match meta_item.node {
-                        ast::MetaItemKind::Word(ref s) if id.is_none() => id = Some(s.clone()),
-                        _ => {
-                            self.tcx.sess.span_err(
-                                meta_item.span,
-                                &format!("unexpected meta-item {:?}", meta_item.node));
-                        }
-                    }
-                }
-                let id = id.unwrap_or(InternedString::new(ID));
-                self.if_this_changed.entry(id)
-                                    .or_insert(FnvHashSet())
-                                    .insert((attr.span, def_id, DepNode::Hir(def_id)));
-            } else if attr.check_name(THEN_THIS_WOULD_NEED) {
-                let mut dep_node_interned = None;
-                let mut id = None;
-                for meta_item in attr.meta_item_list().unwrap_or_default() {
-                    match meta_item.node {
-                        ast::MetaItemKind::Word(ref s) if dep_node_interned.is_none() =>
-                            dep_node_interned = Some(s.clone()),
-                        ast::MetaItemKind::Word(ref s) if id.is_none() =>
-                            id = Some(s.clone()),
-                        _ => {
-                            self.tcx.sess.span_err(
-                                meta_item.span,
-                                &format!("unexpected meta-item {:?}", meta_item.node));
-                        }
-                    }
-                }
-                let dep_node_str = dep_node_interned.as_ref().map(|s| &**s);
-                macro_rules! match_depnode_name {
-                    ($input:expr, $def_id:expr, match { $($variant:ident,)* } else $y:expr) => {
-                        match $input {
-                            $(Some(stringify!($variant)) => DepNode::$variant($def_id),)*
-                            _ => $y
-                        }
-                    }
-                }
-                let dep_node = match_depnode_name! {
-                    dep_node_str, def_id, match {
-                        CollectItem,
-                        BorrowCheck,
-                        TransCrateItem,
-                        TypeckItemType,
-                        TypeckItemBody,
-                        ImplOrTraitItems,
-                        ItemSignature,
-                        FieldTy,
-                        TraitItemDefIds,
-                        InherentImpls,
-                        ImplItems,
-                        TraitImpls,
-                        ReprHints,
-                    } else {
-                        self.tcx.sess.span_fatal(
-                            attr.span,
-                            &format!("unrecognized DepNode variant {:?}", dep_node_str));
-                    }
-                };
-                let id = id.unwrap_or(InternedString::new(ID));
-                self.then_this_would_need
-                    .entry(id)
-                    .or_insert(FnvHashSet())
-                    .insert((attr.span, dep_node_interned.clone().unwrap(), node_id, dep_node));
-            }
-        }
-    }
-}
-
-impl<'a, 'tcx> Visitor<'tcx> for IfThisChanged<'a, 'tcx> {
-    fn visit_item(&mut self, item: &'tcx hir::Item) {
-        let def_id = self.tcx.map.local_def_id(item.id);
-        self.process_attrs(item.id, def_id);
-    }
-}
-
-fn check_paths(tcx: &ty::ctxt,
-               if_this_changed: &SourceHashMap,
-               then_this_would_need: &TargetHashMap)
-{
-    // Return early here so as not to construct the query, which is not cheap.
-    if if_this_changed.is_empty() {
-        return;
-    }
-    let query = tcx.dep_graph.query();
-    for (id, sources) in if_this_changed {
-        let targets = match then_this_would_need.get(id) {
-            Some(targets) => targets,
-            None => {
-                for &(source_span, _, _) in sources.iter().take(1) {
-                    tcx.sess.span_err(
-                        source_span,
-                        &format!("no targets for id `{}`", id));
-                }
-                continue;
-            }
-        };
-
-        for &(_, source_def_id, source_dep_node) in sources {
-            let dependents = query.dependents(source_dep_node);
-            for &(target_span, ref target_pass, _, ref target_dep_node) in targets {
-                if !dependents.contains(&target_dep_node) {
-                    tcx.sess.span_err(
-                        target_span,
-                        &format!("no path from `{}` to `{}`",
-                                 tcx.item_path_str(source_def_id),
-                                 target_pass));
-                } else {
-                    tcx.sess.span_err(
-                        target_span,
-                        &format!("OK"));
-                }
-            }
-        }
-    }
-}
-
-fn dump_graph(tcx: &ty::ctxt) {
-    let path: String = env::var("RUST_DEP_GRAPH").unwrap_or_else(|_| format!("dep_graph"));
-    let query = tcx.dep_graph.query();
-
-    let nodes = match env::var("RUST_DEP_GRAPH_FILTER") {
-        Ok(string) => {
-            // Expect one of: "-> target", "source -> target", or "source ->".
-            let parts: Vec<_> = string.split("->").collect();
-            if parts.len() > 2 {
-                panic!("Invalid RUST_DEP_GRAPH_FILTER: expected '[source] -> [target]'");
-            }
-            let sources = node_set(&query, &parts[0]);
-            let targets = node_set(&query, &parts[1]);
-            filter_nodes(&query, &sources, &targets)
-        }
-        Err(_) => {
-            query.nodes()
-                 .into_iter()
-                 .collect()
-        }
-    };
-    let edges = filter_edges(&query, &nodes);
-
-    { // dump a .txt file with just the edges:
-        let txt_path = format!("{}.txt", path);
-        let mut file = File::create(&txt_path).unwrap();
-        for &(source, target) in &edges {
-            write!(file, "{:?} -> {:?}\n", source, target).unwrap();
-        }
-    }
-
-    { // dump a .dot file in graphviz format:
-        let dot_path = format!("{}.dot", path);
-        let mut v = Vec::new();
-        dot::render(&GraphvizDepGraph(nodes, edges), &mut v).unwrap();
-        File::create(&dot_path).and_then(|mut f| f.write_all(&v)).unwrap();
-    }
-}
-
-pub struct GraphvizDepGraph(FnvHashSet<DepNode>, Vec<(DepNode, DepNode)>);
-
-impl<'a, 'tcx> dot::GraphWalk<'a, DepNode, (DepNode, DepNode)> for GraphvizDepGraph {
-    fn nodes(&self) -> dot::Nodes<DepNode> {
-        let nodes: Vec<_> = self.0.iter().cloned().collect();
-        nodes.into_cow()
-    }
-    fn edges(&self) -> dot::Edges<(DepNode, DepNode)> {
-        self.1[..].into_cow()
-    }
-    fn source(&self, edge: &(DepNode, DepNode)) -> DepNode {
-        edge.0
-    }
-    fn target(&self, edge: &(DepNode, DepNode)) -> DepNode {
-        edge.1
-    }
-}
-
-impl<'a, 'tcx> dot::Labeller<'a, DepNode, (DepNode, DepNode)> for GraphvizDepGraph {
-    fn graph_id(&self) -> dot::Id {
-        dot::Id::new("DependencyGraph").unwrap()
-    }
-    fn node_id(&self, n: &DepNode) -> dot::Id {
-        let s: String =
-            format!("{:?}", n).chars()
-                              .map(|c| if c == '_' || c.is_alphanumeric() { c } else { '_' })
-                              .collect();
-        debug!("n={:?} s={:?}", n, s);
-        dot::Id::new(s).unwrap()
-    }
-    fn node_label(&self, n: &DepNode) -> dot::LabelText {
-        dot::LabelText::label(format!("{:?}", n))
-    }
-}
-
-// Given an optional filter like `"x,y,z"`, returns either `None` (no
-// filter) or the set of nodes whose labels contain all of those
-// substrings.
-fn node_set(query: &DepGraphQuery, filter: &str) -> Option<FnvHashSet<DepNode>> {
-    debug!("node_set(filter={:?})", filter);
-
-    if filter.trim().is_empty() {
-        return None;
-    }
-
-    let filters: Vec<&str> = filter.split("&").map(|s| s.trim()).collect();
-
-    debug!("node_set: filters={:?}", filters);
-
-    Some(query.nodes()
-         .into_iter()
-         .filter(|n| {
-             let s = format!("{:?}", n);
-             filters.iter().all(|f| s.contains(f))
-         })
-        .collect())
-}
-
-fn filter_nodes(query: &DepGraphQuery,
-                sources: &Option<FnvHashSet<DepNode>>,
-                targets: &Option<FnvHashSet<DepNode>>)
-                -> FnvHashSet<DepNode>
-{
-    if let &Some(ref sources) = sources {
-        if let &Some(ref targets) = targets {
-            walk_between(query, sources, targets)
-        } else {
-            walk_nodes(query, sources, OUTGOING)
-        }
-    } else if let &Some(ref targets) = targets {
-        walk_nodes(query, targets, INCOMING)
-    } else {
-        query.nodes().into_iter().collect()
-    }
-}
-
-fn walk_nodes(query: &DepGraphQuery,
-              starts: &FnvHashSet<DepNode>,
-              direction: Direction)
-              -> FnvHashSet<DepNode>
-{
-    let mut set = FnvHashSet();
-    for start in starts {
-        debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING);
-        if set.insert(*start) {
-            let mut stack = vec![query.indices[start]];
-            while let Some(index) = stack.pop() {
-                for (_, edge) in query.graph.adjacent_edges(index, direction) {
-                    let neighbor_index = edge.source_or_target(direction);
-                    let neighbor = query.graph.node_data(neighbor_index);
-                    if set.insert(*neighbor) {
-                        stack.push(neighbor_index);
-                    }
-                }
-            }
-        }
-    }
-    set
-}
-
-fn walk_between(query: &DepGraphQuery,
-                sources: &FnvHashSet<DepNode>,
-                targets: &FnvHashSet<DepNode>)
-                -> FnvHashSet<DepNode>
-{
-    // This is a bit tricky. We want to include a node only if it is:
-    // (a) reachable from a source and (b) will reach a target. And we
-    // have to be careful about cycles etc.  Luckily efficiency is not
-    // a big concern!
-
-    #[derive(Copy, Clone, PartialEq)]
-    enum State { Undecided, Deciding, Included, Excluded }
-
-    let mut node_states = vec![State::Undecided; query.graph.len_nodes()];
-
-    for &target in targets {
-        node_states[query.indices[&target].0] = State::Included;
-    }
-
-    for source in sources.iter().map(|n| query.indices[n]) {
-        recurse(query, &mut node_states, source);
-    }
-
-    return query.nodes()
-                .into_iter()
-                .filter(|n| {
-                    let index = query.indices[n];
-                    node_states[index.0] == State::Included
-                })
-                .collect();
-
-    fn recurse(query: &DepGraphQuery,
-               node_states: &mut [State],
-               node: NodeIndex)
-               -> bool
-    {
-        match node_states[node.0] {
-            // known to reach a target
-            State::Included => return true,
-
-            // known not to reach a target
-            State::Excluded => return false,
-
-            // backedge, not yet known, say false
-            State::Deciding => return false,
-
-            State::Undecided => { }
-        }
-
-        node_states[node.0] = State::Deciding;
-
-        for neighbor_index in query.graph.successor_nodes(node) {
-            if recurse(query, node_states, neighbor_index) {
-                node_states[node.0] = State::Included;
-            }
-        }
-
-        // if we didn't find a path to target, then set to excluded
-        if node_states[node.0] == State::Deciding {
-            node_states[node.0] = State::Excluded;
-            false
-        } else {
-            assert!(node_states[node.0] == State::Included);
-            true
-        }
-    }
-}
-
-fn filter_edges(query: &DepGraphQuery,
-                nodes: &FnvHashSet<DepNode>)
-                -> Vec<(DepNode, DepNode)>
-{
-    query.edges()
-         .into_iter()
-         .filter(|&(source, target)| nodes.contains(&source) && nodes.contains(&target))
-         .collect()
-}
diff --git a/src/librustc_trans/trans/attributes.rs b/src/librustc_trans/trans/attributes.rs
deleted file mode 100644 (file)
index 8f9648b..0000000
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//! Set and unset common attributes on LLVM values.
-
-use libc::{c_uint, c_ulonglong};
-use llvm::{self, ValueRef, AttrHelper};
-use middle::ty;
-use middle::infer;
-use session::config::NoDebugInfo;
-use syntax::abi::Abi;
-pub use syntax::attr::InlineAttr;
-use syntax::ast;
-use rustc_front::hir;
-use trans::base;
-use trans::common;
-use trans::context::CrateContext;
-use trans::machine;
-use trans::type_of;
-
-/// Mark LLVM function to use provided inline heuristic.
-#[inline]
-pub fn inline(val: ValueRef, inline: InlineAttr) {
-    use self::InlineAttr::*;
-    match inline {
-        Hint   => llvm::SetFunctionAttribute(val, llvm::Attribute::InlineHint),
-        Always => llvm::SetFunctionAttribute(val, llvm::Attribute::AlwaysInline),
-        Never  => llvm::SetFunctionAttribute(val, llvm::Attribute::NoInline),
-        None   => {
-            let attr = llvm::Attribute::InlineHint |
-                       llvm::Attribute::AlwaysInline |
-                       llvm::Attribute::NoInline;
-            unsafe {
-                llvm::LLVMRemoveFunctionAttr(val, attr.bits() as c_ulonglong)
-            }
-        },
-    };
-}
-
-/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
-#[inline]
-pub fn emit_uwtable(val: ValueRef, emit: bool) {
-    if emit {
-        llvm::SetFunctionAttribute(val, llvm::Attribute::UWTable);
-    } else {
-        unsafe {
-            llvm::LLVMRemoveFunctionAttr(
-                val,
-                llvm::Attribute::UWTable.bits() as c_ulonglong,
-            );
-        }
-    }
-}
-
-/// Tell LLVM whether the function can or cannot unwind.
-#[inline]
-pub fn unwind(val: ValueRef, can_unwind: bool) {
-    if can_unwind {
-        unsafe {
-            llvm::LLVMRemoveFunctionAttr(
-                val,
-                llvm::Attribute::NoUnwind.bits() as c_ulonglong,
-            );
-        }
-    } else {
-        llvm::SetFunctionAttribute(val, llvm::Attribute::NoUnwind);
-    }
-}
-
-/// Tell LLVM whether it should optimise function for size.
-#[inline]
-#[allow(dead_code)] // possibly useful function
-pub fn set_optimize_for_size(val: ValueRef, optimize: bool) {
-    if optimize {
-        llvm::SetFunctionAttribute(val, llvm::Attribute::OptimizeForSize);
-    } else {
-        unsafe {
-            llvm::LLVMRemoveFunctionAttr(
-                val,
-                llvm::Attribute::OptimizeForSize.bits() as c_ulonglong,
-            );
-        }
-    }
-}
-
-/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute])
-/// attributes.
-pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRef) {
-    use syntax::attr::*;
-    inline(llfn, find_inline_attr(Some(ccx.sess().diagnostic()), attrs));
-
-    // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a
-    // parameter.
-    let no_fp_elim = (ccx.sess().opts.debuginfo != NoDebugInfo) ||
-                     !ccx.sess().target.target.options.eliminate_frame_pointer;
-    if no_fp_elim {
-        unsafe {
-            let attr = "no-frame-pointer-elim\0".as_ptr() as *const _;
-            let val = "true\0".as_ptr() as *const _;
-            llvm::LLVMAddFunctionAttrStringValue(llfn,
-                                                 llvm::FunctionIndex as c_uint,
-                                                 attr, val);
-        }
-    }
-
-    for attr in attrs {
-        if attr.check_name("cold") {
-            unsafe {
-                llvm::LLVMAddFunctionAttribute(llfn,
-                                               llvm::FunctionIndex as c_uint,
-                                               llvm::ColdAttribute as u64)
-            }
-        } else if attr.check_name("allocator") {
-            llvm::Attribute::NoAlias.apply_llfn(llvm::ReturnIndex as c_uint, llfn);
-        } else if attr.check_name("unwind") {
-            unwind(llfn, true);
-        }
-    }
-}
-
-/// Composite function which converts function type into LLVM attributes for the function.
-pub fn from_fn_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_type: ty::Ty<'tcx>)
-                              -> llvm::AttrBuilder {
-    use middle::ty::{BrAnon, ReLateBound};
-
-    let function_type;
-    let (fn_sig, abi, env_ty) = match fn_type.sty {
-        ty::TyBareFn(_, ref f) => (&f.sig, f.abi, None),
-        ty::TyClosure(closure_did, ref substs) => {
-            let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
-            function_type = infcx.closure_type(closure_did, substs);
-            let self_type = base::self_type_for_closure(ccx, closure_did, fn_type);
-            (&function_type.sig, Abi::RustCall, Some(self_type))
-        }
-        _ => ccx.sess().bug("expected closure or function.")
-    };
-
-    let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
-    let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig);
-
-    let mut attrs = llvm::AttrBuilder::new();
-    let ret_ty = fn_sig.output;
-
-    // These have an odd calling convention, so we need to manually
-    // unpack the input ty's
-    let input_tys = match fn_type.sty {
-        ty::TyClosure(..) => {
-            assert!(abi == Abi::RustCall);
-
-            match fn_sig.inputs[0].sty {
-                ty::TyTuple(ref inputs) => {
-                    let mut full_inputs = vec![env_ty.expect("Missing closure environment")];
-                    full_inputs.extend_from_slice(inputs);
-                    full_inputs
-                }
-                _ => ccx.sess().bug("expected tuple'd inputs")
-            }
-        },
-        ty::TyBareFn(..) if abi == Abi::RustCall => {
-            let mut inputs = vec![fn_sig.inputs[0]];
-
-            match fn_sig.inputs[1].sty {
-                ty::TyTuple(ref t_in) => {
-                    inputs.extend_from_slice(&t_in[..]);
-                    inputs
-                }
-                _ => ccx.sess().bug("expected tuple'd inputs")
-            }
-        }
-        _ => fn_sig.inputs.clone()
-    };
-
-    // Index 0 is the return value of the llvm func, so we start at 1
-    let mut idx = 1;
-    if let ty::FnConverging(ret_ty) = ret_ty {
-        // A function pointer is called without the declaration
-        // available, so we have to apply any attributes with ABI
-        // implications directly to the call instruction. Right now,
-        // the only attribute we need to worry about is `sret`.
-        if type_of::return_uses_outptr(ccx, ret_ty) {
-            let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, ret_ty));
-
-            // The outptr can be noalias and nocapture because it's entirely
-            // invisible to the program. We also know it's nonnull as well
-            // as how many bytes we can dereference
-            attrs.arg(1, llvm::Attribute::StructRet)
-                 .arg(1, llvm::Attribute::NoAlias)
-                 .arg(1, llvm::Attribute::NoCapture)
-                 .arg(1, llvm::DereferenceableAttribute(llret_sz));
-
-            // Add one more since there's an outptr
-            idx += 1;
-        } else {
-            // The `noalias` attribute on the return value is useful to a
-            // function ptr caller.
-            match ret_ty.sty {
-                // `Box` pointer return values never alias because ownership
-                // is transferred
-                ty::TyBox(it) if common::type_is_sized(ccx.tcx(), it) => {
-                    attrs.ret(llvm::Attribute::NoAlias);
-                }
-                _ => {}
-            }
-
-            // We can also mark the return value as `dereferenceable` in certain cases
-            match ret_ty.sty {
-                // These are not really pointers but pairs, (pointer, len)
-                ty::TyRef(_, ty::TypeAndMut { ty: inner, .. })
-                | ty::TyBox(inner) if common::type_is_sized(ccx.tcx(), inner) => {
-                    let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
-                    attrs.ret(llvm::DereferenceableAttribute(llret_sz));
-                }
-                _ => {}
-            }
-
-            if let ty::TyBool = ret_ty.sty {
-                attrs.ret(llvm::Attribute::ZExt);
-            }
-        }
-    }
-
-    for &t in input_tys.iter() {
-        match t.sty {
-            _ if type_of::arg_is_indirect(ccx, t) => {
-                let llarg_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, t));
-
-                // For non-immediate arguments the callee gets its own copy of
-                // the value on the stack, so there are no aliases. It's also
-                // program-invisible so can't possibly capture
-                attrs.arg(idx, llvm::Attribute::NoAlias)
-                     .arg(idx, llvm::Attribute::NoCapture)
-                     .arg(idx, llvm::DereferenceableAttribute(llarg_sz));
-            }
-
-            ty::TyBool => {
-                attrs.arg(idx, llvm::Attribute::ZExt);
-            }
-
-            // `Box` pointer parameters never alias because ownership is transferred
-            ty::TyBox(inner) => {
-                attrs.arg(idx, llvm::Attribute::NoAlias);
-
-                if common::type_is_sized(ccx.tcx(), inner) {
-                    let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
-                    attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
-                } else {
-                    attrs.arg(idx, llvm::NonNullAttribute);
-                    if inner.is_trait() {
-                        attrs.arg(idx + 1, llvm::NonNullAttribute);
-                    }
-                }
-            }
-
-            ty::TyRef(b, mt) => {
-                // `&mut` pointer parameters never alias other parameters, or mutable global data
-                //
-                // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
-                // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
-                // on memory dependencies rather than pointer equality
-                let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();
-
-                if mt.mutbl != hir::MutMutable && !interior_unsafe {
-                    attrs.arg(idx, llvm::Attribute::NoAlias);
-                }
-
-                if mt.mutbl == hir::MutImmutable && !interior_unsafe {
-                    attrs.arg(idx, llvm::Attribute::ReadOnly);
-                }
-
-                // & pointer parameters are also never null and for sized types we also know
-                // exactly how many bytes we can dereference
-                if common::type_is_sized(ccx.tcx(), mt.ty) {
-                    let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
-                    attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
-                } else {
-                    attrs.arg(idx, llvm::NonNullAttribute);
-                    if mt.ty.is_trait() {
-                        attrs.arg(idx + 1, llvm::NonNullAttribute);
-                    }
-                }
-
-                // When a reference in an argument has no named lifetime, it's
-                // impossible for that reference to escape this function
-                // (returned or stored beyond the call by a closure).
-                if let ReLateBound(_, BrAnon(_)) = *b {
-                    attrs.arg(idx, llvm::Attribute::NoCapture);
-                }
-            }
-
-            _ => ()
-        }
-
-        if common::type_is_fat_ptr(ccx.tcx(), t) {
-            idx += 2;
-        } else {
-            idx += 1;
-        }
-    }
-
-    attrs
-}
diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs
deleted file mode 100644 (file)
index 161ab90..0000000
+++ /dev/null
@@ -1,3424 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//! Translate the completed AST to the LLVM IR.
-//!
-//! Some functions here, such as trans_block and trans_expr, return a value --
-//! the result of the translation to LLVM -- while others, such as trans_fn,
-//! trans_impl, and trans_item, are called only for the side effect of adding a
-//! particular definition to the LLVM IR output we're producing.
-//!
-//! Hopefully useful general knowledge about trans:
-//!
-//!   * There's no way to find out the Ty type of a ValueRef.  Doing so
-//!     would be "trying to get the eggs out of an omelette" (credit:
-//!     pcwalton).  You can, instead, find out its TypeRef by calling val_ty,
-//!     but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
-//!     int) and rec(x=int, y=int, z=int) will have the same TypeRef.
-
-#![allow(non_camel_case_types)]
-
-pub use self::ValueOrigin::*;
-
-use super::CrateTranslation;
-use super::ModuleTranslation;
-
-use back::link::mangle_exported_name;
-use back::{link, abi};
-use lint;
-use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
-use llvm;
-use middle::cfg;
-use middle::cstore::CrateStore;
-use middle::def_id::DefId;
-use middle::infer;
-use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
-use middle::weak_lang_items;
-use middle::pat_util::simple_name;
-use middle::subst::{self, Substs};
-use middle::traits;
-use middle::ty::{self, Ty, TypeFoldable};
-use middle::ty::adjustment::CustomCoerceUnsized;
-use rustc::dep_graph::DepNode;
-use rustc::front::map as hir_map;
-use rustc::util::common::time;
-use rustc::mir::mir_map::MirMap;
-use session::config::{self, NoDebugInfo, FullDebugInfo};
-use session::Session;
-use trans::_match;
-use trans::adt;
-use trans::assert_dep_graph;
-use trans::attributes;
-use trans::build::*;
-use trans::builder::{Builder, noname};
-use trans::callee;
-use trans::cleanup::{self, CleanupMethods, DropHint};
-use trans::closure;
-use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral};
-use trans::collector::{self, TransItem, TransItemState, TransItemCollectionMode};
-use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
-use trans::common::{CrateContext, DropFlagHintsMap, Field, FunctionContext};
-use trans::common::{Result, NodeIdAndSpan, VariantInfo};
-use trans::common::{node_id_type, return_type_is_void, fulfill_obligation};
-use trans::common::{type_is_immediate, type_is_zero_size, val_ty};
-use trans::common;
-use trans::consts;
-use trans::context::SharedCrateContext;
-use trans::controlflow;
-use trans::datum;
-use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
-use trans::declare;
-use trans::expr;
-use trans::foreign;
-use trans::glue;
-use trans::intrinsic;
-use trans::machine;
-use trans::machine::{llsize_of, llsize_of_real};
-use trans::meth;
-use trans::mir;
-use trans::monomorphize;
-use trans::tvec;
-use trans::type_::Type;
-use trans::type_of;
-use trans::type_of::*;
-use trans::value::Value;
-use trans::Disr;
-use util::common::indenter;
-use util::sha2::Sha256;
-use util::nodemap::{NodeMap, NodeSet};
-
-use arena::TypedArena;
-use libc::c_uint;
-use std::ffi::{CStr, CString};
-use std::cell::{Cell, RefCell};
-use std::collections::{HashMap, HashSet};
-use std::str;
-use std::{i8, i16, i32, i64};
-use syntax::abi::Abi;
-use syntax::codemap::{Span, DUMMY_SP};
-use syntax::parse::token::InternedString;
-use syntax::attr::AttrMetaMethods;
-use syntax::attr;
-use rustc_front;
-use rustc_front::intravisit::{self, Visitor};
-use rustc_front::hir;
-use syntax::ast;
-
-thread_local! {
-    static TASK_LOCAL_INSN_KEY: RefCell<Option<Vec<&'static str>>> = {
-        RefCell::new(None)
-    }
-}
-
-pub fn with_insn_ctxt<F>(blk: F)
-    where F: FnOnce(&[&'static str])
-{
-    TASK_LOCAL_INSN_KEY.with(move |slot| {
-        slot.borrow().as_ref().map(move |s| blk(s));
-    })
-}
-
-pub fn init_insn_ctxt() {
-    TASK_LOCAL_INSN_KEY.with(|slot| {
-        *slot.borrow_mut() = Some(Vec::new());
-    });
-}
-
-pub struct _InsnCtxt {
-    _cannot_construct_outside_of_this_module: (),
-}
-
-impl Drop for _InsnCtxt {
-    fn drop(&mut self) {
-        TASK_LOCAL_INSN_KEY.with(|slot| {
-            match slot.borrow_mut().as_mut() {
-                Some(ctx) => {
-                    ctx.pop();
-                }
-                None => {}
-            }
-        })
-    }
-}
-
-pub fn push_ctxt(s: &'static str) -> _InsnCtxt {
-    debug!("new InsnCtxt: {}", s);
-    TASK_LOCAL_INSN_KEY.with(|slot| {
-        if let Some(ctx) = slot.borrow_mut().as_mut() {
-            ctx.push(s)
-        }
-    });
-    _InsnCtxt {
-        _cannot_construct_outside_of_this_module: (),
-    }
-}
-
-pub struct StatRecorder<'a, 'tcx: 'a> {
-    ccx: &'a CrateContext<'a, 'tcx>,
-    name: Option<String>,
-    istart: usize,
-}
-
-impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
-    pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> {
-        let istart = ccx.stats().n_llvm_insns.get();
-        StatRecorder {
-            ccx: ccx,
-            name: Some(name),
-            istart: istart,
-        }
-    }
-}
-
-impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
-    fn drop(&mut self) {
-        if self.ccx.sess().trans_stats() {
-            let iend = self.ccx.stats().n_llvm_insns.get();
-            self.ccx
-                .stats()
-                .fn_stats
-                .borrow_mut()
-                .push((self.name.take().unwrap(), iend - self.istart));
-            self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
-            // Reset LLVM insn count to avoid compound costs.
-            self.ccx.stats().n_llvm_insns.set(self.istart);
-        }
-    }
-}
-
-fn get_extern_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                fn_ty: Ty<'tcx>,
-                                name: &str,
-                                did: DefId)
-                                -> ValueRef {
-    if let Some(n) = ccx.externs().borrow().get(name) {
-        return *n;
-    }
-
-    let f = declare::declare_rust_fn(ccx, name, fn_ty);
-
-    let attrs = ccx.sess().cstore.item_attrs(did);
-    attributes::from_fn_attrs(ccx, &attrs[..], f);
-
-    ccx.externs().borrow_mut().insert(name.to_string(), f);
-    f
-}
-
-pub fn self_type_for_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                       closure_id: DefId,
-                                       fn_ty: Ty<'tcx>)
-                                       -> Ty<'tcx> {
-    let closure_kind = ccx.tcx().closure_kind(closure_id);
-    match closure_kind {
-        ty::FnClosureKind => {
-            ccx.tcx().mk_imm_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty)
-        }
-        ty::FnMutClosureKind => {
-            ccx.tcx().mk_mut_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty)
-        }
-        ty::FnOnceClosureKind => fn_ty,
-    }
-}
-
-pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind {
-    *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap()
-}
-
-pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                  did: DefId,
-                                  t: Ty<'tcx>)
-                                  -> ValueRef {
-    let name = ccx.sess().cstore.item_symbol(did);
-    let ty = type_of(ccx, t);
-    if let Some(n) = ccx.externs().borrow_mut().get(&name) {
-        return *n;
-    }
-    // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
-    // FIXME(nagisa): investigate whether it can be changed into define_global
-    let c = declare::declare_global(ccx, &name[..], ty);
-    // Thread-local statics in some other crate need to *always* be linked
-    // against in a thread-local fashion, so we need to be sure to apply the
-    // thread-local attribute locally if it was present remotely. If we
-    // don't do this then linker errors can be generated where the linker
-    // complains that one object files has a thread local version of the
-    // symbol and another one doesn't.
-    for attr in ccx.tcx().get_attrs(did).iter() {
-        if attr.check_name("thread_local") {
-            llvm::set_thread_local(c, true);
-        }
-    }
-    if ccx.use_dll_storage_attrs() {
-        llvm::SetDLLStorageClass(c, llvm::DLLImportStorageClass);
-    }
-    ccx.externs().borrow_mut().insert(name.to_string(), c);
-    return c;
-}
-
-fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId {
-    match bcx.tcx().lang_items.require(it) {
-        Ok(id) => id,
-        Err(s) => {
-            bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s));
-        }
-    }
-}
-
-// The following malloc_raw_dyn* functions allocate a box to contain
-// a given type, but with a potentially dynamic size.
-
-pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                  llty_ptr: Type,
-                                  info_ty: Ty<'tcx>,
-                                  size: ValueRef,
-                                  align: ValueRef,
-                                  debug_loc: DebugLoc)
-                                  -> Result<'blk, 'tcx> {
-    let _icx = push_ctxt("malloc_raw_exchange");
-
-    // Allocate space:
-    let r = callee::trans_lang_call(bcx,
-                                    require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem),
-                                    &[size, align],
-                                    None,
-                                    debug_loc);
-
-    Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
-}
-
-
-pub fn bin_op_to_icmp_predicate(ccx: &CrateContext,
-                                op: hir::BinOp_,
-                                signed: bool)
-                                -> llvm::IntPredicate {
-    match op {
-        hir::BiEq => llvm::IntEQ,
-        hir::BiNe => llvm::IntNE,
-        hir::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT },
-        hir::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE },
-        hir::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
-        hir::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
-        op => {
-            ccx.sess()
-               .bug(&format!("comparison_op_to_icmp_predicate: expected comparison operator, \
-                              found {:?}",
-                             op));
-        }
-    }
-}
-
-pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: hir::BinOp_) -> llvm::RealPredicate {
-    match op {
-        hir::BiEq => llvm::RealOEQ,
-        hir::BiNe => llvm::RealUNE,
-        hir::BiLt => llvm::RealOLT,
-        hir::BiLe => llvm::RealOLE,
-        hir::BiGt => llvm::RealOGT,
-        hir::BiGe => llvm::RealOGE,
-        op => {
-            ccx.sess()
-               .bug(&format!("comparison_op_to_fcmp_predicate: expected comparison operator, \
-                              found {:?}",
-                             op));
-        }
-    }
-}
-
-pub fn compare_fat_ptrs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                    lhs_addr: ValueRef,
-                                    lhs_extra: ValueRef,
-                                    rhs_addr: ValueRef,
-                                    rhs_extra: ValueRef,
-                                    _t: Ty<'tcx>,
-                                    op: hir::BinOp_,
-                                    debug_loc: DebugLoc)
-                                    -> ValueRef {
-    match op {
-        hir::BiEq => {
-            let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
-            let extra_eq = ICmp(bcx, llvm::IntEQ, lhs_extra, rhs_extra, debug_loc);
-            And(bcx, addr_eq, extra_eq, debug_loc)
-        }
-        hir::BiNe => {
-            let addr_eq = ICmp(bcx, llvm::IntNE, lhs_addr, rhs_addr, debug_loc);
-            let extra_eq = ICmp(bcx, llvm::IntNE, lhs_extra, rhs_extra, debug_loc);
-            Or(bcx, addr_eq, extra_eq, debug_loc)
-        }
-        hir::BiLe | hir::BiLt | hir::BiGe | hir::BiGt => {
-            // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
-            let (op, strict_op) = match op {
-                hir::BiLt => (llvm::IntULT, llvm::IntULT),
-                hir::BiLe => (llvm::IntULE, llvm::IntULT),
-                hir::BiGt => (llvm::IntUGT, llvm::IntUGT),
-                hir::BiGe => (llvm::IntUGE, llvm::IntUGT),
-                _ => unreachable!(),
-            };
-
-            let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
-            let extra_op = ICmp(bcx, op, lhs_extra, rhs_extra, debug_loc);
-            let addr_eq_extra_op = And(bcx, addr_eq, extra_op, debug_loc);
-
-            let addr_strict = ICmp(bcx, strict_op, lhs_addr, rhs_addr, debug_loc);
-            Or(bcx, addr_strict, addr_eq_extra_op, debug_loc)
-        }
-        _ => {
-            bcx.tcx().sess.bug("unexpected fat ptr binop");
-        }
-    }
-}
-
-pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                        lhs: ValueRef,
-                                        rhs: ValueRef,
-                                        t: Ty<'tcx>,
-                                        op: hir::BinOp_,
-                                        debug_loc: DebugLoc)
-                                        -> ValueRef {
-    match t.sty {
-        ty::TyTuple(ref tys) if tys.is_empty() => {
-            // We don't need to do actual comparisons for nil.
-            // () == () holds but () < () does not.
-            match op {
-                hir::BiEq | hir::BiLe | hir::BiGe => return C_bool(bcx.ccx(), true),
-                hir::BiNe | hir::BiLt | hir::BiGt => return C_bool(bcx.ccx(), false),
-                // refinements would be nice
-                _ => bcx.sess().bug("compare_scalar_types: must be a comparison operator"),
-            }
-        }
-        ty::TyBareFn(..) | ty::TyBool | ty::TyUint(_) | ty::TyChar => {
-            ICmp(bcx,
-                 bin_op_to_icmp_predicate(bcx.ccx(), op, false),
-                 lhs,
-                 rhs,
-                 debug_loc)
-        }
-        ty::TyRawPtr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
-            ICmp(bcx,
-                 bin_op_to_icmp_predicate(bcx.ccx(), op, false),
-                 lhs,
-                 rhs,
-                 debug_loc)
-        }
-        ty::TyRawPtr(_) => {
-            let lhs_addr = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_ADDR]));
-            let lhs_extra = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_EXTRA]));
-
-            let rhs_addr = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_ADDR]));
-            let rhs_extra = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_EXTRA]));
-            compare_fat_ptrs(bcx,
-                             lhs_addr,
-                             lhs_extra,
-                             rhs_addr,
-                             rhs_extra,
-                             t,
-                             op,
-                             debug_loc)
-        }
-        ty::TyInt(_) => {
-            ICmp(bcx,
-                 bin_op_to_icmp_predicate(bcx.ccx(), op, true),
-                 lhs,
-                 rhs,
-                 debug_loc)
-        }
-        ty::TyFloat(_) => {
-            FCmp(bcx,
-                 bin_op_to_fcmp_predicate(bcx.ccx(), op),
-                 lhs,
-                 rhs,
-                 debug_loc)
-        }
-        // Should never get here, because t is scalar.
-        _ => bcx.sess().bug("non-scalar type passed to compare_scalar_types"),
-    }
-}
-
-pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                      lhs: ValueRef,
-                                      rhs: ValueRef,
-                                      t: Ty<'tcx>,
-                                      ret_ty: Type,
-                                      op: hir::BinOp_,
-                                      debug_loc: DebugLoc)
-                                      -> ValueRef {
-    let signed = match t.sty {
-        ty::TyFloat(_) => {
-            let cmp = bin_op_to_fcmp_predicate(bcx.ccx(), op);
-            return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty);
-        },
-        ty::TyUint(_) => false,
-        ty::TyInt(_) => true,
-        _ => bcx.sess().bug("compare_simd_types: invalid SIMD type"),
-    };
-
-    let cmp = bin_op_to_icmp_predicate(bcx.ccx(), op, signed);
-    // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
-    // to get the correctly sized type. This will compile to a single instruction
-    // once the IR is converted to assembly if the SIMD instruction is supported
-    // by the target architecture.
-    SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty)
-}
-
-// Iterates through the elements of a structural type.
-pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
-                                         av: ValueRef,
-                                         t: Ty<'tcx>,
-                                         mut f: F)
-                                         -> Block<'blk, 'tcx>
-    where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
-{
-    let _icx = push_ctxt("iter_structural_ty");
-
-    fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
-                                   repr: &adt::Repr<'tcx>,
-                                   av: adt::MaybeSizedValue,
-                                   variant: ty::VariantDef<'tcx>,
-                                   substs: &Substs<'tcx>,
-                                   f: &mut F)
-                                   -> Block<'blk, 'tcx>
-        where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
-    {
-        let _icx = push_ctxt("iter_variant");
-        let tcx = cx.tcx();
-        let mut cx = cx;
-
-        for (i, field) in variant.fields.iter().enumerate() {
-            let arg = monomorphize::field_ty(tcx, substs, field);
-            cx = f(cx,
-                   adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i),
-                   arg);
-        }
-        return cx;
-    }
-
-    let value = if common::type_is_sized(cx.tcx(), t) {
-        adt::MaybeSizedValue::sized(av)
-    } else {
-        let data = Load(cx, expr::get_dataptr(cx, av));
-        let info = Load(cx, expr::get_meta(cx, av));
-        adt::MaybeSizedValue::unsized_(data, info)
-    };
-
-    let mut cx = cx;
-    match t.sty {
-        ty::TyStruct(..) => {
-            let repr = adt::represent_type(cx.ccx(), t);
-            let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
-            for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
-                let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i);
-
-                let val = if common::type_is_sized(cx.tcx(), field_ty) {
-                    llfld_a
-                } else {
-                    let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
-                    Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val));
-                    Store(cx, value.meta, expr::get_meta(cx, scratch.val));
-                    scratch.val
-                };
-                cx = f(cx, val, field_ty);
-            }
-        }
-        ty::TyClosure(_, ref substs) => {
-            let repr = adt::represent_type(cx.ccx(), t);
-            for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
-                let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
-                cx = f(cx, llupvar, upvar_ty);
-            }
-        }
-        ty::TyArray(_, n) => {
-            let (base, len) = tvec::get_fixed_base_and_len(cx, value.value, n);
-            let unit_ty = t.sequence_element_type(cx.tcx());
-            cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
-        }
-        ty::TySlice(_) | ty::TyStr => {
-            let unit_ty = t.sequence_element_type(cx.tcx());
-            cx = tvec::iter_vec_raw(cx, value.value, unit_ty, value.meta, f);
-        }
-        ty::TyTuple(ref args) => {
-            let repr = adt::represent_type(cx.ccx(), t);
-            for (i, arg) in args.iter().enumerate() {
-                let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
-                cx = f(cx, llfld_a, *arg);
-            }
-        }
-        ty::TyEnum(en, substs) => {
-            let fcx = cx.fcx;
-            let ccx = fcx.ccx;
-
-            let repr = adt::represent_type(ccx, t);
-            let n_variants = en.variants.len();
-
-            // NB: we must hit the discriminant first so that structural
-            // comparison know not to proceed when the discriminants differ.
-
-            match adt::trans_switch(cx, &repr, av, false) {
-                (_match::Single, None) => {
-                    if n_variants != 0 {
-                        assert!(n_variants == 1);
-                        cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av),
-                                          &en.variants[0], substs, &mut f);
-                    }
-                }
-                (_match::Switch, Some(lldiscrim_a)) => {
-                    cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
-
-                    // Create a fall-through basic block for the "else" case of
-                    // the switch instruction we're about to generate. Note that
-                    // we do **not** use an Unreachable instruction here, even
-                    // though most of the time this basic block will never be hit.
-                    //
-                    // When an enum is dropped it's contents are currently
-                    // overwritten to DTOR_DONE, which means the discriminant
-                    // could have changed value to something not within the actual
-                    // range of the discriminant. Currently this function is only
-                    // used for drop glue so in this case we just return quickly
-                    // from the outer function, and any other use case will only
-                    // call this for an already-valid enum in which case the `ret
-                    // void` will never be hit.
-                    let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void");
-                    RetVoid(ret_void_cx, DebugLoc::None);
-                    let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
-                    let next_cx = fcx.new_temp_block("enum-iter-next");
-
-                    for variant in &en.variants {
-                        let variant_cx = fcx.new_temp_block(&format!("enum-iter-variant-{}",
-                                                                     &variant.disr_val
-                                                                             .to_string()));
-                        let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val));
-                        AddCase(llswitch, case_val, variant_cx.llbb);
-                        let variant_cx = iter_variant(variant_cx,
-                                                      &repr,
-                                                      value,
-                                                      variant,
-                                                      substs,
-                                                      &mut f);
-                        Br(variant_cx, next_cx.llbb, DebugLoc::None);
-                    }
-                    cx = next_cx;
-                }
-                _ => ccx.sess().unimpl("value from adt::trans_switch in iter_structural_ty"),
-            }
-        }
-        _ => {
-            cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t))
-        }
-    }
-    return cx;
-}
-
-
-/// Retrieve the information we are losing (making dynamic) in an unsizing
-/// adjustment.
-///
-/// The `old_info` argument is a bit funny. It is intended for use
-/// in an upcast, where the new vtable for an object will be drived
-/// from the old one.
-pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
-                                source: Ty<'tcx>,
-                                target: Ty<'tcx>,
-                                old_info: Option<ValueRef>,
-                                param_substs: &'tcx Substs<'tcx>)
-                                -> ValueRef {
-    let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
-    match (&source.sty, &target.sty) {
-        (&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
-        (&ty::TyTrait(_), &ty::TyTrait(_)) => {
-            // For now, upcasts are limited to changes in marker
-            // traits, and hence never actually require an actual
-            // change to the vtable.
-            old_info.expect("unsized_info: missing old info for trait upcast")
-        }
-        (_, &ty::TyTrait(box ty::TraitTy { ref principal, .. })) => {
-            // Note that we preserve binding levels here:
-            let substs = principal.0.substs.with_self_ty(source).erase_regions();
-            let substs = ccx.tcx().mk_substs(substs);
-            let trait_ref = ty::Binder(ty::TraitRef {
-                def_id: principal.def_id(),
-                substs: substs,
-            });
-            consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs),
-                            Type::vtable_ptr(ccx))
-        }
-        _ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {:?} -> {:?}",
-                                     source,
-                                     target)),
-    }
-}
-
-/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
-pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   src: ValueRef,
-                                   src_ty: Ty<'tcx>,
-                                   dst_ty: Ty<'tcx>)
-                                   -> (ValueRef, ValueRef) {
-    debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
-    match (&src_ty.sty, &dst_ty.sty) {
-        (&ty::TyBox(a), &ty::TyBox(b)) |
-        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
-         &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
-        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
-         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
-        (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
-         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
-            assert!(common::type_is_sized(bcx.tcx(), a));
-            let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to();
-            (PointerCast(bcx, src, ptr_ty),
-             unsized_info(bcx.ccx(), a, b, None, bcx.fcx.param_substs))
-        }
-        _ => bcx.sess().bug("unsize_thin_ptr: called on bad types"),
-    }
-}
-
-/// Coerce `src`, which is a reference to a value of type `src_ty`,
-/// to a value of type `dst_ty` and store the result in `dst`
-pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                       src: ValueRef,
-                                       src_ty: Ty<'tcx>,
-                                       dst: ValueRef,
-                                       dst_ty: Ty<'tcx>) {
-    match (&src_ty.sty, &dst_ty.sty) {
-        (&ty::TyBox(..), &ty::TyBox(..)) |
-        (&ty::TyRef(..), &ty::TyRef(..)) |
-        (&ty::TyRef(..), &ty::TyRawPtr(..)) |
-        (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => {
-            let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) {
-                // fat-ptr to fat-ptr unsize preserves the vtable
-                load_fat_ptr(bcx, src, src_ty)
-            } else {
-                let base = load_ty(bcx, src, src_ty);
-                unsize_thin_ptr(bcx, base, src_ty, dst_ty)
-            };
-            store_fat_ptr(bcx, base, info, dst, dst_ty);
-        }
-
-        // This can be extended to enums and tuples in the future.
-        // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
-        (&ty::TyStruct(def_a, _), &ty::TyStruct(def_b, _)) => {
-            assert_eq!(def_a, def_b);
-
-            let src_repr = adt::represent_type(bcx.ccx(), src_ty);
-            let src_fields = match &*src_repr {
-                &adt::Repr::Univariant(ref s, _) => &s.fields,
-                _ => bcx.sess().bug("struct has non-univariant repr"),
-            };
-            let dst_repr = adt::represent_type(bcx.ccx(), dst_ty);
-            let dst_fields = match &*dst_repr {
-                &adt::Repr::Univariant(ref s, _) => &s.fields,
-                _ => bcx.sess().bug("struct has non-univariant repr"),
-            };
-
-            let src = adt::MaybeSizedValue::sized(src);
-            let dst = adt::MaybeSizedValue::sized(dst);
-
-            let iter = src_fields.iter().zip(dst_fields).enumerate();
-            for (i, (src_fty, dst_fty)) in iter {
-                if type_is_zero_size(bcx.ccx(), dst_fty) {
-                    continue;
-                }
-
-                let src_f = adt::trans_field_ptr(bcx, &src_repr, src, Disr(0), i);
-                let dst_f = adt::trans_field_ptr(bcx, &dst_repr, dst, Disr(0), i);
-                if src_fty == dst_fty {
-                    memcpy_ty(bcx, dst_f, src_f, src_fty);
-                } else {
-                    coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty);
-                }
-            }
-        }
-        _ => bcx.sess().bug(&format!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
-                                     src_ty,
-                                     dst_ty)),
-    }
-}
-
-pub fn custom_coerce_unsize_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
-                                             source_ty: Ty<'tcx>,
-                                             target_ty: Ty<'tcx>)
-                                             -> CustomCoerceUnsized {
-    let trait_substs = Substs::erased(subst::VecPerParamSpace::new(vec![target_ty],
-                                                                   vec![source_ty],
-                                                                   Vec::new()));
-    let trait_ref = ty::Binder(ty::TraitRef {
-        def_id: ccx.tcx().lang_items.coerce_unsized_trait().unwrap(),
-        substs: ccx.tcx().mk_substs(trait_substs)
-    });
-
-    match fulfill_obligation(ccx, DUMMY_SP, trait_ref) {
-        traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
-            ccx.tcx().custom_coerce_unsized_kind(impl_def_id)
-        }
-        vtable => {
-            ccx.sess().bug(&format!("invalid CoerceUnsized vtable: {:?}",
-                                    vtable));
-        }
-    }
-}
-
-pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-    cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b))
-}
-
-pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-    cast_shift_rhs(op,
-                   lhs,
-                   rhs,
-                   |a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) },
-                   |a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) })
-}
-
-fn cast_shift_rhs<F, G>(op: hir::BinOp_,
-                        lhs: ValueRef,
-                        rhs: ValueRef,
-                        trunc: F,
-                        zext: G)
-                        -> ValueRef
-    where F: FnOnce(ValueRef, Type) -> ValueRef,
-          G: FnOnce(ValueRef, Type) -> ValueRef
-{
-    // Shifts may have any size int on the rhs
-    if rustc_front::util::is_shift_binop(op) {
-        let mut rhs_llty = val_ty(rhs);
-        let mut lhs_llty = val_ty(lhs);
-        if rhs_llty.kind() == Vector {
-            rhs_llty = rhs_llty.element_type()
-        }
-        if lhs_llty.kind() == Vector {
-            lhs_llty = lhs_llty.element_type()
-        }
-        let rhs_sz = rhs_llty.int_width();
-        let lhs_sz = lhs_llty.int_width();
-        if lhs_sz < rhs_sz {
-            trunc(rhs, lhs_llty)
-        } else if lhs_sz > rhs_sz {
-            // FIXME (#1877: If shifting by negative
-            // values becomes not undefined then this is wrong.
-            zext(rhs, lhs_llty)
-        } else {
-            rhs
-        }
-    } else {
-        rhs
-    }
-}
-
-pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                                              val_t: Ty<'tcx>)
-                                              -> (Type, u64) {
-    match val_t.sty {
-        ty::TyInt(t) => {
-            let llty = Type::int_from_ty(cx.ccx(), t);
-            let min = match t {
-                ast::IntTy::Is if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
-                ast::IntTy::Is => i64::MIN as u64,
-                ast::IntTy::I8 => i8::MIN as u64,
-                ast::IntTy::I16 => i16::MIN as u64,
-                ast::IntTy::I32 => i32::MIN as u64,
-                ast::IntTy::I64 => i64::MIN as u64,
-            };
-            (llty, min)
-        }
-        _ => unreachable!(),
-    }
-}
-
-pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                                             call_info: NodeIdAndSpan,
-                                             divrem: hir::BinOp,
-                                             lhs: ValueRef,
-                                             rhs: ValueRef,
-                                             rhs_t: Ty<'tcx>)
-                                             -> Block<'blk, 'tcx> {
-    let (zero_text, overflow_text) = if divrem.node == hir::BiDiv {
-        ("attempted to divide by zero",
-         "attempted to divide with overflow")
-    } else {
-        ("attempted remainder with a divisor of zero",
-         "attempted remainder with overflow")
-    };
-    let debug_loc = call_info.debug_loc();
-
-    let (is_zero, is_signed) = match rhs_t.sty {
-        ty::TyInt(t) => {
-            let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false);
-            (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true)
-        }
-        ty::TyUint(t) => {
-            let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false);
-            (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false)
-        }
-        ty::TyStruct(def, _) if def.is_simd() => {
-            let mut res = C_bool(cx.ccx(), false);
-            for i in 0..rhs_t.simd_size(cx.tcx()) {
-                res = Or(cx,
-                         res,
-                         IsNull(cx, ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))),
-                         debug_loc);
-            }
-            (res, false)
-        }
-        _ => {
-            cx.sess().bug(&format!("fail-if-zero on unexpected type: {}", rhs_t));
-        }
-    };
-    let bcx = with_cond(cx, is_zero, |bcx| {
-        controlflow::trans_fail(bcx, call_info, InternedString::new(zero_text))
-    });
-
-    // To quote LLVM's documentation for the sdiv instruction:
-    //
-    //      Division by zero leads to undefined behavior. Overflow also leads
-    //      to undefined behavior; this is a rare case, but can occur, for
-    //      example, by doing a 32-bit division of -2147483648 by -1.
-    //
-    // In order to avoid undefined behavior, we perform runtime checks for
-    // signed division/remainder which would trigger overflow. For unsigned
-    // integers, no action beyond checking for zero need be taken.
-    if is_signed {
-        let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t);
-        let minus_one = ICmp(bcx,
-                             llvm::IntEQ,
-                             rhs,
-                             C_integral(llty, !0, false),
-                             debug_loc);
-        with_cond(bcx, minus_one, |bcx| {
-            let is_min = ICmp(bcx,
-                              llvm::IntEQ,
-                              lhs,
-                              C_integral(llty, min, true),
-                              debug_loc);
-            with_cond(bcx, is_min, |bcx| {
-                controlflow::trans_fail(bcx, call_info, InternedString::new(overflow_text))
-            })
-        })
-    } else {
-        bcx
-    }
-}
-
-pub fn trans_external_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                     did: DefId,
-                                     t: Ty<'tcx>)
-                                     -> ValueRef {
-    let name = ccx.sess().cstore.item_symbol(did);
-    match t.sty {
-        ty::TyBareFn(_, ref fn_ty) => {
-            match ccx.sess().target.target.adjust_abi(fn_ty.abi) {
-                Abi::Rust | Abi::RustCall => {
-                    get_extern_rust_fn(ccx, t, &name[..], did)
-                }
-                Abi::RustIntrinsic | Abi::PlatformIntrinsic => {
-                    ccx.sess().bug("unexpected intrinsic in trans_external_path")
-                }
-                _ => {
-                    let attrs = ccx.sess().cstore.item_attrs(did);
-                    foreign::register_foreign_item_fn(ccx, fn_ty.abi, t, &name, &attrs)
-                }
-            }
-        }
-        _ => {
-            get_extern_const(ccx, did, t)
-        }
-    }
-}
-
-pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                          llfn: ValueRef,
-                          llargs: &[ValueRef],
-                          fn_ty: Ty<'tcx>,
-                          debug_loc: DebugLoc)
-                          -> (ValueRef, Block<'blk, 'tcx>) {
-    let _icx = push_ctxt("invoke_");
-    if bcx.unreachable.get() {
-        return (C_null(Type::i8(bcx.ccx())), bcx);
-    }
-
-    let attributes = attributes::from_fn_type(bcx.ccx(), fn_ty);
-
-    match bcx.opt_node_id {
-        None => {
-            debug!("invoke at ???");
-        }
-        Some(id) => {
-            debug!("invoke at {}", bcx.tcx().map.node_to_string(id));
-        }
-    }
-
-    if need_invoke(bcx) {
-        debug!("invoking {} at {:?}", bcx.val_to_string(llfn), bcx.llbb);
-        for &llarg in llargs {
-            debug!("arg: {}", bcx.val_to_string(llarg));
-        }
-        let normal_bcx = bcx.fcx.new_temp_block("normal-return");
-        let landing_pad = bcx.fcx.get_landing_pad();
-
-        let llresult = Invoke(bcx,
-                              llfn,
-                              &llargs[..],
-                              normal_bcx.llbb,
-                              landing_pad,
-                              Some(attributes),
-                              debug_loc);
-        return (llresult, normal_bcx);
-    } else {
-        debug!("calling {} at {:?}", bcx.val_to_string(llfn), bcx.llbb);
-        for &llarg in llargs {
-            debug!("arg: {}", bcx.val_to_string(llarg));
-        }
-
-        let llresult = Call(bcx, llfn, &llargs[..], Some(attributes), debug_loc);
-        return (llresult, bcx);
-    }
-}
-
-/// Returns whether this session's target will use SEH-based unwinding.
-///
-/// This is only true for MSVC targets, and even then the 64-bit MSVC target
-/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
-/// 64-bit MinGW) instead of "full SEH".
-pub fn wants_msvc_seh(sess: &Session) -> bool {
-    sess.target.target.options.is_like_msvc
-}
-
-pub fn avoid_invoke(bcx: Block) -> bool {
-    bcx.sess().no_landing_pads() || bcx.lpad().is_some()
-}
-
-pub fn need_invoke(bcx: Block) -> bool {
-    if avoid_invoke(bcx) {
-        false
-    } else {
-        bcx.fcx.needs_invoke()
-    }
-}
-
-pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef {
-    let _icx = push_ctxt("load_if_immediate");
-    if type_is_immediate(cx.ccx(), t) {
-        return load_ty(cx, v, t);
-    }
-    return v;
-}
-
-/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
-/// differs from the type used for SSA values. Also handles various special cases where the type
-/// gives us better information about what we are loading.
-pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
-    if cx.unreachable.get() || type_is_zero_size(cx.ccx(), t) {
-        return C_undef(type_of::type_of(cx.ccx(), t));
-    }
-
-    let ptr = to_arg_ty_ptr(cx, ptr, t);
-    let align = type_of::align_of(cx.ccx(), t);
-
-    if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() {
-        let load = Load(cx, ptr);
-        unsafe {
-            llvm::LLVMSetAlignment(load, align);
-        }
-        return load;
-    }
-
-    unsafe {
-        let global = llvm::LLVMIsAGlobalVariable(ptr);
-        if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
-            let val = llvm::LLVMGetInitializer(global);
-            if !val.is_null() {
-                return to_arg_ty(cx, val, t);
-            }
-        }
-    }
-
-    let val = if t.is_bool() {
-        LoadRangeAssert(cx, ptr, 0, 2, llvm::False)
-    } else if t.is_char() {
-        // a char is a Unicode codepoint, and so takes values from 0
-        // to 0x10FFFF inclusive only.
-        LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
-    } else if (t.is_region_ptr() || t.is_unique()) && !common::type_is_fat_ptr(cx.tcx(), t) {
-        LoadNonNull(cx, ptr)
-    } else {
-        Load(cx, ptr)
-    };
-
-    unsafe {
-        llvm::LLVMSetAlignment(val, align);
-    }
-
-    to_arg_ty(cx, val, t)
-}
-
-/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
-/// differs from the type used for SSA values.
-pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
-    if cx.unreachable.get() {
-        return;
-    }
-
-    debug!("store_ty: {} : {:?} <- {}",
-           cx.val_to_string(dst),
-           t,
-           cx.val_to_string(v));
-
-    if common::type_is_fat_ptr(cx.tcx(), t) {
-        Store(cx,
-              ExtractValue(cx, v, abi::FAT_PTR_ADDR),
-              expr::get_dataptr(cx, dst));
-        Store(cx,
-              ExtractValue(cx, v, abi::FAT_PTR_EXTRA),
-              expr::get_meta(cx, dst));
-    } else {
-        let store = Store(cx, from_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
-        unsafe {
-            llvm::LLVMSetAlignment(store, type_of::align_of(cx.ccx(), t));
-        }
-    }
-}
-
-pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                                 data: ValueRef,
-                                 extra: ValueRef,
-                                 dst: ValueRef,
-                                 _ty: Ty<'tcx>) {
-    // FIXME: emit metadata
-    Store(cx, data, expr::get_dataptr(cx, dst));
-    Store(cx, extra, expr::get_meta(cx, dst));
-}
-
-pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                                src: ValueRef,
-                                _ty: Ty<'tcx>)
-                                -> (ValueRef, ValueRef) {
-    // FIXME: emit metadata
-    (Load(cx, expr::get_dataptr(cx, src)),
-     Load(cx, expr::get_meta(cx, src)))
-}
-
-pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
-    if ty.is_bool() {
-        ZExt(bcx, val, Type::i8(bcx.ccx()))
-    } else {
-        val
-    }
-}
-
-pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
-    if ty.is_bool() {
-        Trunc(bcx, val, Type::i1(bcx.ccx()))
-    } else {
-        val
-    }
-}
-
-pub fn to_arg_ty_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef, ty: Ty<'tcx>) -> ValueRef {
-    if type_is_immediate(bcx.ccx(), ty) && type_of::type_of(bcx.ccx(), ty).is_aggregate() {
-        // We want to pass small aggregates as immediate values, but using an aggregate LLVM type
-        // for this leads to bad optimizations, so its arg type is an appropriately sized integer
-        // and we have to convert it
-        BitCast(bcx, ptr, type_of::arg_type_of(bcx.ccx(), ty).ptr_to())
-    } else {
-        ptr
-    }
-}
-
-pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> {
-    debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
-    let _indenter = indenter();
-    let _icx = push_ctxt("init_local");
-    _match::store_local(bcx, local)
-}
-
-pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
-                             llbb: BasicBlockRef)
-                             -> Block<'blk, 'tcx> {
-    common::BlockS::new(llbb, None, fcx)
-}
-
-pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx>
-    where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
-{
-    let _icx = push_ctxt("with_cond");
-
-    if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) {
-        return bcx;
-    }
-
-    let fcx = bcx.fcx;
-    let next_cx = fcx.new_temp_block("next");
-    let cond_cx = fcx.new_temp_block("cond");
-    CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
-    let after_cx = f(cond_cx);
-    if !after_cx.terminated.get() {
-        Br(after_cx, next_cx.llbb, DebugLoc::None);
-    }
-    next_cx
-}
-
-enum Lifetime { Start, End }
-
-// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
-// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
-// and the intrinsic for `lt` and passes them to `emit`, which is in
-// charge of generating code to call the passed intrinsic on whatever
-// block of generated code is targetted for the intrinsic.
-//
-// If LLVM lifetime intrinsic support is disabled (i.e.  optimizations
-// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
-fn core_lifetime_emit<'blk, 'tcx, F>(ccx: &'blk CrateContext<'blk, 'tcx>,
-                                     ptr: ValueRef,
-                                     lt: Lifetime,
-                                     emit: F)
-    where F: FnOnce(&'blk CrateContext<'blk, 'tcx>, machine::llsize, ValueRef)
-{
-    if ccx.sess().opts.optimize == config::OptLevel::No {
-        return;
-    }
-
-    let _icx = push_ctxt(match lt {
-        Lifetime::Start => "lifetime_start",
-        Lifetime::End => "lifetime_end"
-    });
-
-    let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type());
-    if size == 0 {
-        return;
-    }
-
-    let lifetime_intrinsic = ccx.get_intrinsic(match lt {
-        Lifetime::Start => "llvm.lifetime.start",
-        Lifetime::End => "llvm.lifetime.end"
-    });
-    emit(ccx, size, lifetime_intrinsic)
-}
-
-pub fn call_lifetime_start(cx: Block, ptr: ValueRef) {
-    core_lifetime_emit(cx.ccx(), ptr, Lifetime::Start, |ccx, size, lifetime_start| {
-        let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
-        Call(cx,
-             lifetime_start,
-             &[C_u64(ccx, size), ptr],
-             None,
-             DebugLoc::None);
-    })
-}
-
-pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
-    core_lifetime_emit(cx.ccx(), ptr, Lifetime::End, |ccx, size, lifetime_end| {
-        let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
-        Call(cx,
-             lifetime_end,
-             &[C_u64(ccx, size), ptr],
-             None,
-             DebugLoc::None);
-    })
-}
-
-// Generates code for resumption of unwind at the end of a landing pad.
-pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) {
-    if !bcx.sess().target.target.options.custom_unwind_resume {
-        Resume(bcx, lpval);
-    } else {
-        let exc_ptr = ExtractValue(bcx, lpval, 0);
-        let llunwresume = bcx.fcx.eh_unwind_resume();
-        Call(bcx, llunwresume, &[exc_ptr], None, DebugLoc::None);
-        Unreachable(bcx);
-    }
-}
-
-
-pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
-    let _icx = push_ctxt("call_memcpy");
-    let ccx = cx.ccx();
-    let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
-    let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width);
-    let memcpy = ccx.get_intrinsic(&key);
-    let src_ptr = PointerCast(cx, src, Type::i8p(ccx));
-    let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx));
-    let size = IntCast(cx, n_bytes, ccx.int_type());
-    let align = C_i32(ccx, align as i32);
-    let volatile = C_bool(ccx, false);
-    Call(cx,
-         memcpy,
-         &[dst_ptr, src_ptr, size, align, volatile],
-         None,
-         DebugLoc::None);
-}
-
-pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) {
-    let _icx = push_ctxt("memcpy_ty");
-    let ccx = bcx.ccx();
-
-    if type_is_zero_size(ccx, t) {
-        return;
-    }
-
-    if t.is_structural() {
-        let llty = type_of::type_of(ccx, t);
-        let llsz = llsize_of(ccx, llty);
-        let llalign = type_of::align_of(ccx, t);
-        call_memcpy(bcx, dst, src, llsz, llalign as u32);
-    } else if common::type_is_fat_ptr(bcx.tcx(), t) {
-        let (data, extra) = load_fat_ptr(bcx, src, t);
-        store_fat_ptr(bcx, data, extra, dst, t);
-    } else {
-        store_ty(bcx, load_ty(bcx, src, t), dst, t);
-    }
-}
-
-pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
-    if cx.unreachable.get() {
-        return;
-    }
-    let _icx = push_ctxt("drop_done_fill_mem");
-    let bcx = cx;
-    memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
-}
-
-pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
-    if cx.unreachable.get() {
-        return;
-    }
-    let _icx = push_ctxt("init_zero_mem");
-    let bcx = cx;
-    memfill(&B(bcx), llptr, t, 0);
-}
-
-// Always use this function instead of storing a constant byte to the memory
-// in question. e.g. if you store a zero constant, LLVM will drown in vreg
-// allocation for large data structures, and the generated code will be
-// awful. (A telltale sign of this is large quantities of
-// `mov [byte ptr foo],0` in the generated code.)
-fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) {
-    let _icx = push_ctxt("memfill");
-    let ccx = b.ccx;
-
-    let llty = type_of::type_of(ccx, ty);
-    let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
-    let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
-
-    let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
-    let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
-    let llzeroval = C_u8(ccx, byte);
-    let size = machine::llsize_of(ccx, llty);
-    let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
-    let volatile = C_bool(ccx, false);
-    b.call(llintrinsicfn,
-           &[llptr, llzeroval, size, align, volatile],
-           None, None);
-}
-
-/// In general, when we create an scratch value in an alloca, the
-/// creator may not know if the block (that initializes the scratch
-/// with the desired value) actually dominates the cleanup associated
-/// with the scratch value.
-///
-/// To deal with this, when we do an alloca (at the *start* of whole
-/// function body), we optionally can also set the associated
-/// dropped-flag state of the alloca to "dropped."
-#[derive(Copy, Clone, Debug)]
-pub enum InitAlloca {
-    /// Indicates that the state should have its associated drop flag
-    /// set to "dropped" at the point of allocation.
-    Dropped,
-    /// Indicates the value of the associated drop flag is irrelevant.
-    /// The embedded string literal is a programmer provided argument
-    /// for why. This is a safeguard forcing compiler devs to
-    /// document; it might be a good idea to also emit this as a
-    /// comment with the alloca itself when emitting LLVM output.ll.
-    Uninit(&'static str),
-}
-
-
-pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                            t: Ty<'tcx>,
-                            name: &str) -> ValueRef {
-    // pnkfelix: I do not know why alloc_ty meets the assumptions for
-    // passing Uninit, but it was never needed (even back when we had
-    // the original boolean `zero` flag on `lvalue_scratch_datum`).
-    alloc_ty_init(bcx, t, InitAlloca::Uninit("all alloc_ty are uninit"), name)
-}
-
-/// This variant of `fn alloc_ty` does not necessarily assume that the
-/// alloca should be created with no initial value. Instead the caller
-/// controls that assumption via the `init` flag.
-///
-/// Note that if the alloca *is* initialized via `init`, then we will
-/// also inject an `llvm.lifetime.start` before that initialization
-/// occurs, and thus callers should not call_lifetime_start
-/// themselves.  But if `init` says "uninitialized", then callers are
-/// in charge of choosing where to call_lifetime_start and
-/// subsequently populate the alloca.
-///
-/// (See related discussion on PR #30823.)
-pub fn alloc_ty_init<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                             t: Ty<'tcx>,
-                             init: InitAlloca,
-                             name: &str) -> ValueRef {
-    let _icx = push_ctxt("alloc_ty");
-    let ccx = bcx.ccx();
-    let ty = type_of::type_of(ccx, t);
-    assert!(!t.has_param_types());
-    match init {
-        InitAlloca::Dropped => alloca_dropped(bcx, t, name),
-        InitAlloca::Uninit(_) => alloca(bcx, ty, name),
-    }
-}
-
-pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef {
-    let _icx = push_ctxt("alloca_dropped");
-    let llty = type_of::type_of(cx.ccx(), ty);
-    if cx.unreachable.get() {
-        unsafe { return llvm::LLVMGetUndef(llty.ptr_to().to_ref()); }
-    }
-    let p = alloca(cx, llty, name);
-    let b = cx.fcx.ccx.builder();
-    b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
-
-    // This is just like `call_lifetime_start` (but latter expects a
-    // Block, which we do not have for `alloca_insert_pt`).
-    core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| {
-        let ptr = b.pointercast(p, Type::i8p(ccx));
-        b.call(lifetime_start, &[C_u64(ccx, size), ptr], None, None);
-    });
-    memfill(&b, p, ty, adt::DTOR_DONE);
-    p
-}
-
-pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
-    let _icx = push_ctxt("alloca");
-    if cx.unreachable.get() {
-        unsafe {
-            return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
-        }
-    }
-    debuginfo::clear_source_location(cx.fcx);
-    Alloca(cx, ty, name)
-}
-
-pub fn set_value_name(val: ValueRef, name: &str) {
-    unsafe {
-        let name = CString::new(name).unwrap();
-        llvm::LLVMSetValueName(val, name.as_ptr());
-    }
-}
-
-// Creates the alloca slot which holds the pointer to the slot for the final return value
-pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
-                                          output_type: Ty<'tcx>)
-                                          -> ValueRef {
-    let lloutputtype = type_of::type_of(fcx.ccx, output_type);
-
-    // We create an alloca to hold a pointer of type `output_type`
-    // which will hold the pointer to the right alloca which has the
-    // final ret value
-    if fcx.needs_ret_allocas {
-        // Let's create the stack slot
-        let slot = AllocaFcx(fcx, lloutputtype.ptr_to(), "llretslotptr");
-
-        // and if we're using an out pointer, then store that in our newly made slot
-        if type_of::return_uses_outptr(fcx.ccx, output_type) {
-            let outptr = get_param(fcx.llfn, 0);
-
-            let b = fcx.ccx.builder();
-            b.position_before(fcx.alloca_insert_pt.get().unwrap());
-            b.store(outptr, slot);
-        }
-
-        slot
-
-    // But if there are no nested returns, we skip the indirection and have a single
-    // retslot
-    } else {
-        if type_of::return_uses_outptr(fcx.ccx, output_type) {
-            get_param(fcx.llfn, 0)
-        } else {
-            AllocaFcx(fcx, lloutputtype, "sret_slot")
-        }
-    }
-}
-
-struct FindNestedReturn {
-    found: bool,
-}
-
-impl FindNestedReturn {
-    fn new() -> FindNestedReturn {
-        FindNestedReturn {
-            found: false,
-        }
-    }
-}
-
-impl<'v> Visitor<'v> for FindNestedReturn {
-    fn visit_expr(&mut self, e: &hir::Expr) {
-        match e.node {
-            hir::ExprRet(..) => {
-                self.found = true;
-            }
-            _ => intravisit::walk_expr(self, e),
-        }
-    }
-}
-
-fn build_cfg(tcx: &ty::ctxt, id: ast::NodeId) -> (ast::NodeId, Option<cfg::CFG>) {
-    let blk = match tcx.map.find(id) {
-        Some(hir_map::NodeItem(i)) => {
-            match i.node {
-                hir::ItemFn(_, _, _, _, _, ref blk) => {
-                    blk
-                }
-                _ => tcx.sess.bug("unexpected item variant in has_nested_returns"),
-            }
-        }
-        Some(hir_map::NodeTraitItem(trait_item)) => {
-            match trait_item.node {
-                hir::MethodTraitItem(_, Some(ref body)) => body,
-                _ => {
-                    tcx.sess.bug("unexpected variant: trait item other than a provided method in \
-                                  has_nested_returns")
-                }
-            }
-        }
-        Some(hir_map::NodeImplItem(impl_item)) => {
-            match impl_item.node {
-                hir::ImplItemKind::Method(_, ref body) => body,
-                _ => {
-                    tcx.sess.bug("unexpected variant: non-method impl item in has_nested_returns")
-                }
-            }
-        }
-        Some(hir_map::NodeExpr(e)) => {
-            match e.node {
-                hir::ExprClosure(_, _, ref blk) => blk,
-                _ => tcx.sess.bug("unexpected expr variant in has_nested_returns"),
-            }
-        }
-        Some(hir_map::NodeVariant(..)) |
-        Some(hir_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None),
-
-        // glue, shims, etc
-        None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None),
-
-        _ => tcx.sess.bug(&format!("unexpected variant in has_nested_returns: {}",
-                                   tcx.map.path_to_string(id))),
-    };
-
-    (blk.id, Some(cfg::CFG::new(tcx, blk)))
-}
-
-// Checks for the presence of "nested returns" in a function.
-// Nested returns are when the inner expression of a return expression
-// (the 'expr' in 'return expr') contains a return expression. Only cases
-// where the outer return is actually reachable are considered. Implicit
-// returns from the end of blocks are considered as well.
-//
-// This check is needed to handle the case where the inner expression is
-// part of a larger expression that may have already partially-filled the
-// return slot alloca. This can cause errors related to clean-up due to
-// the clobbering of the existing value in the return slot.
-fn has_nested_returns(tcx: &ty::ctxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
-    for index in cfg.graph.depth_traverse(cfg.entry) {
-        let n = cfg.graph.node_data(index);
-        match tcx.map.find(n.id()) {
-            Some(hir_map::NodeExpr(ex)) => {
-                if let hir::ExprRet(Some(ref ret_expr)) = ex.node {
-                    let mut visitor = FindNestedReturn::new();
-                    intravisit::walk_expr(&mut visitor, &ret_expr);
-                    if visitor.found {
-                        return true;
-                    }
-                }
-            }
-            Some(hir_map::NodeBlock(blk)) if blk.id == blk_id => {
-                let mut visitor = FindNestedReturn::new();
-                walk_list!(&mut visitor, visit_expr, &blk.expr);
-                if visitor.found {
-                    return true;
-                }
-            }
-            _ => {}
-        }
-    }
-
-    return false;
-}
-
-// NB: must keep 4 fns in sync:
-//
-//  - type_of_fn
-//  - create_datums_for_fn_args.
-//  - new_fn_ctxt
-//  - trans_args
-//
-// Be warned! You must call `init_function` before doing anything with the
-// returned function context.
-pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
-                             llfndecl: ValueRef,
-                             id: ast::NodeId,
-                             has_env: bool,
-                             output_type: ty::FnOutput<'tcx>,
-                             param_substs: &'tcx Substs<'tcx>,
-                             sp: Option<Span>,
-                             block_arena: &'a TypedArena<common::BlockS<'a, 'tcx>>)
-                             -> FunctionContext<'a, 'tcx> {
-    common::validate_substs(param_substs);
-
-    debug!("new_fn_ctxt(path={}, id={}, param_substs={:?})",
-           if id == !0 {
-               "".to_string()
-           } else {
-               ccx.tcx().map.path_to_string(id).to_string()
-           },
-           id,
-           param_substs);
-
-    let uses_outptr = match output_type {
-        ty::FnConverging(output_type) => {
-            let substd_output_type = monomorphize::apply_param_substs(ccx.tcx(),
-                                                                      param_substs,
-                                                                      &output_type);
-            type_of::return_uses_outptr(ccx, substd_output_type)
-        }
-        ty::FnDiverging => false,
-    };
-    let debug_context = debuginfo::create_function_debug_context(ccx, id, param_substs, llfndecl);
-    let (blk_id, cfg) = build_cfg(ccx.tcx(), id);
-    let nested_returns = if let Some(ref cfg) = cfg {
-        has_nested_returns(ccx.tcx(), cfg, blk_id)
-    } else {
-        false
-    };
-
-    let mir = ccx.mir_map().map.get(&id);
-
-    let mut fcx = FunctionContext {
-        mir: mir,
-        llfn: llfndecl,
-        llenv: None,
-        llretslotptr: Cell::new(None),
-        param_env: ccx.tcx().empty_parameter_environment(),
-        alloca_insert_pt: Cell::new(None),
-        llreturn: Cell::new(None),
-        needs_ret_allocas: nested_returns,
-        landingpad_alloca: Cell::new(None),
-        caller_expects_out_pointer: uses_outptr,
-        lllocals: RefCell::new(NodeMap()),
-        llupvars: RefCell::new(NodeMap()),
-        lldropflag_hints: RefCell::new(DropFlagHintsMap::new()),
-        id: id,
-        param_substs: param_substs,
-        span: sp,
-        block_arena: block_arena,
-        lpad_arena: TypedArena::new(),
-        ccx: ccx,
-        debug_context: debug_context,
-        scopes: RefCell::new(Vec::new()),
-        cfg: cfg,
-    };
-
-    if has_env {
-        fcx.llenv = Some(get_param(fcx.llfn, fcx.env_arg_pos() as c_uint))
-    }
-
-    fcx
-}
-
-/// Performs setup on a newly created function, creating the entry scope block
-/// and allocating space for the return pointer.
-pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>,
-                               skip_retptr: bool,
-                               output: ty::FnOutput<'tcx>)
-                               -> Block<'a, 'tcx> {
-    let entry_bcx = fcx.new_temp_block("entry-block");
-
-    // Use a dummy instruction as the insertion point for all allocas.
-    // This is later removed in FunctionContext::cleanup.
-    fcx.alloca_insert_pt.set(Some(unsafe {
-        Load(entry_bcx, C_null(Type::i8p(fcx.ccx)));
-        llvm::LLVMGetFirstInstruction(entry_bcx.llbb)
-    }));
-
-    if let ty::FnConverging(output_type) = output {
-        // This shouldn't need to recompute the return type,
-        // as new_fn_ctxt did it already.
-        let substd_output_type = fcx.monomorphize(&output_type);
-        if !return_type_is_void(fcx.ccx, substd_output_type) {
-            // If the function returns nil/bot, there is no real return
-            // value, so do not set `llretslotptr`.
-            if !skip_retptr || fcx.caller_expects_out_pointer {
-                // Otherwise, we normally allocate the llretslotptr, unless we
-                // have been instructed to skip it for immediate return
-                // values.
-                fcx.llretslotptr.set(Some(make_return_slot_pointer(fcx, substd_output_type)));
-            }
-        }
-    }
-
-    // Create the drop-flag hints for every unfragmented path in the function.
-    let tcx = fcx.ccx.tcx();
-    let fn_did = tcx.map.local_def_id(fcx.id);
-    let tables = tcx.tables.borrow();
-    let mut hints = fcx.lldropflag_hints.borrow_mut();
-    let fragment_infos = tcx.fragment_infos.borrow();
-
-    // Intern table for drop-flag hint datums.
-    let mut seen = HashMap::new();
-
-    if let Some(fragment_infos) = fragment_infos.get(&fn_did) {
-        for &info in fragment_infos {
-
-            let make_datum = |id| {
-                let init_val = C_u8(fcx.ccx, adt::DTOR_NEEDED_HINT);
-                let llname = &format!("dropflag_hint_{}", id);
-                debug!("adding hint {}", llname);
-                let ty = tcx.types.u8;
-                let ptr = alloc_ty(entry_bcx, ty, llname);
-                Store(entry_bcx, init_val, ptr);
-                let flag = datum::Lvalue::new_dropflag_hint("base::init_function");
-                datum::Datum::new(ptr, ty, flag)
-            };
-
-            let (var, datum) = match info {
-                ty::FragmentInfo::Moved { var, .. } |
-                ty::FragmentInfo::Assigned { var, .. } => {
-                    let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| {
-                        let ty = tables.node_types[&var];
-                        if fcx.type_needs_drop(ty) {
-                            let datum = make_datum(var);
-                            seen.insert(var, Some(datum.clone()));
-                            Some(datum)
-                        } else {
-                            // No drop call needed, so we don't need a dropflag hint
-                            None
-                        }
-                    });
-                    if let Some(datum) = opt_datum {
-                        (var, datum)
-                    } else {
-                        continue
-                    }
-                }
-            };
-            match info {
-                ty::FragmentInfo::Moved { move_expr: expr_id, .. } => {
-                    debug!("FragmentInfo::Moved insert drop hint for {}", expr_id);
-                    hints.insert(expr_id, DropHint::new(var, datum));
-                }
-                ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => {
-                    debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id);
-                    hints.insert(expr_id, DropHint::new(var, datum));
-                }
-            }
-        }
-    }
-
-    entry_bcx
-}
-
-// NB: must keep 4 fns in sync:
-//
-//  - type_of_fn
-//  - create_datums_for_fn_args.
-//  - new_fn_ctxt
-//  - trans_args
-
-pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>) -> datum::Rvalue {
-    use trans::datum::{ByRef, ByValue};
-
-    datum::Rvalue {
-        mode: if arg_is_indirect(cx.ccx, t) { ByRef } else { ByValue }
-    }
-}
-
-// create_datums_for_fn_args: creates lvalue datums for each of the
-// incoming function arguments.
-pub fn create_datums_for_fn_args<'a, 'tcx>(mut bcx: Block<'a, 'tcx>,
-                                           args: &[hir::Arg],
-                                           arg_tys: &[Ty<'tcx>],
-                                           has_tupled_arg: bool,
-                                           arg_scope: cleanup::CustomScopeIndex)
-                                           -> Block<'a, 'tcx> {
-    let _icx = push_ctxt("create_datums_for_fn_args");
-    let fcx = bcx.fcx;
-    let arg_scope_id = cleanup::CustomScope(arg_scope);
-
-    debug!("create_datums_for_fn_args");
-
-    // Return an array wrapping the ValueRefs that we get from `get_param` for
-    // each argument into datums.
-    //
-    // For certain mode/type combinations, the raw llarg values are passed
-    // by value.  However, within the fn body itself, we want to always
-    // have all locals and arguments be by-ref so that we can cancel the
-    // cleanup and for better interaction with LLVM's debug info.  So, if
-    // the argument would be passed by value, we store it into an alloca.
-    // This alloca should be optimized away by LLVM's mem-to-reg pass in
-    // the event it's not truly needed.
-    let mut idx = fcx.arg_offset() as c_uint;
-    let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor");
-    for (i, &arg_ty) in arg_tys.iter().enumerate() {
-        let arg_datum = if !has_tupled_arg || i < arg_tys.len() - 1 {
-            if type_of::arg_is_indirect(bcx.ccx(), arg_ty) &&
-               bcx.sess().opts.debuginfo != FullDebugInfo {
-                // Don't copy an indirect argument to an alloca, the caller
-                // already put it in a temporary alloca and gave it up, unless
-                // we emit extra-debug-info, which requires local allocas :(.
-                let llarg = get_param(fcx.llfn, idx);
-                idx += 1;
-                bcx.fcx.schedule_lifetime_end(arg_scope_id, llarg);
-                bcx.fcx.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None);
-
-                datum::Datum::new(llarg,
-                                  arg_ty,
-                                  datum::Lvalue::new("create_datum_for_fn_args"))
-            } else if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
-                let data = get_param(fcx.llfn, idx);
-                let extra = get_param(fcx.llfn, idx + 1);
-                idx += 2;
-                unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "", uninit_reason,
-                                                        arg_scope_id, (data, extra),
-                                                        |(data, extra), bcx, dst| {
-                    debug!("populate call for create_datum_for_fn_args \
-                            early fat arg, on arg[{}] ty={:?}", i, arg_ty);
-
-                    Store(bcx, data, expr::get_dataptr(bcx, dst));
-                    Store(bcx, extra, expr::get_meta(bcx, dst));
-                    bcx
-                }))
-            } else {
-                let llarg = get_param(fcx.llfn, idx);
-                idx += 1;
-                let tmp = datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty));
-                unpack_datum!(bcx,
-                              datum::lvalue_scratch_datum(bcx,
-                                                          arg_ty,
-                                                          "",
-                                                          uninit_reason,
-                                                          arg_scope_id,
-                                                          tmp,
-                                                          |tmp, bcx, dst| {
-
-                        debug!("populate call for create_datum_for_fn_args \
-                                early thin arg, on arg[{}] ty={:?}", i, arg_ty);
-
-                                                              tmp.store_to(bcx, dst)
-                                                          }))
-            }
-        } else {
-            // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
-            match arg_ty.sty {
-                ty::TyTuple(ref tupled_arg_tys) => {
-                    unpack_datum!(bcx,
-                                  datum::lvalue_scratch_datum(bcx,
-                                                              arg_ty,
-                                                              "tupled_args",
-                                                              uninit_reason,
-                                                              arg_scope_id,
-                                                              (),
-                                                              |(),
-                                                               mut bcx,
-                                                              llval| {
-                        debug!("populate call for create_datum_for_fn_args \
-                                tupled_args, on arg[{}] ty={:?}", i, arg_ty);
-                        for (j, &tupled_arg_ty) in
-                                    tupled_arg_tys.iter().enumerate() {
-                            let lldest = StructGEP(bcx, llval, j);
-                            if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) {
-                                let data = get_param(bcx.fcx.llfn, idx);
-                                let extra = get_param(bcx.fcx.llfn, idx + 1);
-                                Store(bcx, data, expr::get_dataptr(bcx, lldest));
-                                Store(bcx, extra, expr::get_meta(bcx, lldest));
-                                idx += 2;
-                            } else {
-                                let datum = datum::Datum::new(
-                                    get_param(bcx.fcx.llfn, idx),
-                                    tupled_arg_ty,
-                                    arg_kind(bcx.fcx, tupled_arg_ty));
-                                idx += 1;
-                                bcx = datum.store_to(bcx, lldest);
-                            };
-                        }
-                        bcx
-                    }))
-                }
-                _ => {
-                    bcx.tcx()
-                       .sess
-                       .bug("last argument of a function with `rust-call` ABI isn't a tuple?!")
-                }
-            }
-        };
-
-        let pat = &*args[i].pat;
-        bcx = if let Some(name) = simple_name(pat) {
-            // Generate nicer LLVM for the common case of fn a pattern
-            // like `x: T`
-            set_value_name(arg_datum.val, &bcx.name(name));
-            bcx.fcx.lllocals.borrow_mut().insert(pat.id, arg_datum);
-            bcx
-        } else {
-            // General path. Copy out the values that are used in the
-            // pattern.
-            _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id)
-        };
-        debuginfo::create_argument_metadata(bcx, &args[i]);
-    }
-
-    bcx
-}
-
-// Ties up the llstaticallocas -> llloadenv -> lltop edges,
-// and builds the return block.
-pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
-                             last_bcx: Block<'blk, 'tcx>,
-                             retty: ty::FnOutput<'tcx>,
-                             ret_debug_loc: DebugLoc) {
-    let _icx = push_ctxt("finish_fn");
-
-    let ret_cx = match fcx.llreturn.get() {
-        Some(llreturn) => {
-            if !last_bcx.terminated.get() {
-                Br(last_bcx, llreturn, DebugLoc::None);
-            }
-            raw_block(fcx, llreturn)
-        }
-        None => last_bcx,
-    };
-
-    // This shouldn't need to recompute the return type,
-    // as new_fn_ctxt did it already.
-    let substd_retty = fcx.monomorphize(&retty);
-    build_return_block(fcx, ret_cx, substd_retty, ret_debug_loc);
-
-    debuginfo::clear_source_location(fcx);
-    fcx.cleanup();
-}
-
-// Builds the return block for a function.
-pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>,
-                                      ret_cx: Block<'blk, 'tcx>,
-                                      retty: ty::FnOutput<'tcx>,
-                                      ret_debug_location: DebugLoc) {
-    if fcx.llretslotptr.get().is_none() ||
-       (!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) {
-        return RetVoid(ret_cx, ret_debug_location);
-    }
-
-    let retslot = if fcx.needs_ret_allocas {
-        Load(ret_cx, fcx.llretslotptr.get().unwrap())
-    } else {
-        fcx.llretslotptr.get().unwrap()
-    };
-    let retptr = Value(retslot);
-    match retptr.get_dominating_store(ret_cx) {
-        // If there's only a single store to the ret slot, we can directly return
-        // the value that was stored and omit the store and the alloca
-        Some(s) => {
-            let retval = s.get_operand(0).unwrap().get();
-            s.erase_from_parent();
-
-            if retptr.has_no_uses() {
-                retptr.erase_from_parent();
-            }
-
-            let retval = if retty == ty::FnConverging(fcx.ccx.tcx().types.bool) {
-                Trunc(ret_cx, retval, Type::i1(fcx.ccx))
-            } else {
-                retval
-            };
-
-            if fcx.caller_expects_out_pointer {
-                if let ty::FnConverging(retty) = retty {
-                    store_ty(ret_cx, retval, get_param(fcx.llfn, 0), retty);
-                }
-                RetVoid(ret_cx, ret_debug_location)
-            } else {
-                Ret(ret_cx, retval, ret_debug_location)
-            }
-        }
-        // Otherwise, copy the return value to the ret slot
-        None => match retty {
-            ty::FnConverging(retty) => {
-                if fcx.caller_expects_out_pointer {
-                    memcpy_ty(ret_cx, get_param(fcx.llfn, 0), retslot, retty);
-                    RetVoid(ret_cx, ret_debug_location)
-                } else {
-                    Ret(ret_cx, load_ty(ret_cx, retslot, retty), ret_debug_location)
-                }
-            }
-            ty::FnDiverging => {
-                if fcx.caller_expects_out_pointer {
-                    RetVoid(ret_cx, ret_debug_location)
-                } else {
-                    Ret(ret_cx, C_undef(Type::nil(fcx.ccx)), ret_debug_location)
-                }
-            }
-        },
-    }
-}
-
-/// Builds an LLVM function out of a source function.
-///
-/// If the function closes over its environment a closure will be returned.
-pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                   decl: &hir::FnDecl,
-                                   body: &hir::Block,
-                                   llfndecl: ValueRef,
-                                   param_substs: &'tcx Substs<'tcx>,
-                                   fn_ast_id: ast::NodeId,
-                                   attributes: &[ast::Attribute],
-                                   output_type: ty::FnOutput<'tcx>,
-                                   abi: Abi,
-                                   closure_env: closure::ClosureEnv<'b>) {
-    ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
-
-    record_translation_item_as_generated(ccx, fn_ast_id, param_substs);
-
-    let _icx = push_ctxt("trans_closure");
-    attributes::emit_uwtable(llfndecl, true);
-
-    debug!("trans_closure(..., param_substs={:?})", param_substs);
-
-    let has_env = match closure_env {
-        closure::ClosureEnv::Closure(..) => true,
-        closure::ClosureEnv::NotClosure => false,
-    };
-
-    let (arena, fcx): (TypedArena<_>, FunctionContext);
-    arena = TypedArena::new();
-    fcx = new_fn_ctxt(ccx,
-                      llfndecl,
-                      fn_ast_id,
-                      has_env,
-                      output_type,
-                      param_substs,
-                      Some(body.span),
-                      &arena);
-    let mut bcx = init_function(&fcx, false, output_type);
-
-    if attributes.iter().any(|item| item.check_name("rustc_mir")) {
-        mir::trans_mir(bcx.build());
-        fcx.cleanup();
-        return;
-    }
-
-    // cleanup scope for the incoming arguments
-    let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(ccx,
-                                                                             fn_ast_id,
-                                                                             body.span,
-                                                                             true);
-    let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
-
-    let block_ty = node_id_type(bcx, body.id);
-
-    // Set up arguments to the function.
-    let monomorphized_arg_types = decl.inputs
-                                      .iter()
-                                      .map(|arg| node_id_type(bcx, arg.id))
-                                      .collect::<Vec<_>>();
-    for monomorphized_arg_type in &monomorphized_arg_types {
-        debug!("trans_closure: monomorphized_arg_type: {:?}",
-               monomorphized_arg_type);
-    }
-    debug!("trans_closure: function lltype: {}",
-           bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
-
-    let has_tupled_arg = match closure_env {
-        closure::ClosureEnv::NotClosure => abi == Abi::RustCall,
-        _ => false,
-    };
-
-    bcx = create_datums_for_fn_args(bcx,
-                                    &decl.inputs,
-                                    &monomorphized_arg_types,
-                                    has_tupled_arg,
-                                    arg_scope);
-
-    bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope));
-
-    // Up until here, IR instructions for this function have explicitly not been annotated with
-    // source code location, so we don't step into call setup code. From here on, source location
-    // emitting should be enabled.
-    debuginfo::start_emitting_source_locations(&fcx);
-
-    let dest = match fcx.llretslotptr.get() {
-        Some(_) => expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(block_ty), "iret_slot")),
-        None => {
-            assert!(type_is_zero_size(bcx.ccx(), block_ty));
-            expr::Ignore
-        }
-    };
-
-    // This call to trans_block is the place where we bridge between
-    // translation calls that don't have a return value (trans_crate,
-    // trans_mod, trans_item, et cetera) and those that do
-    // (trans_block, trans_expr, et cetera).
-    bcx = controlflow::trans_block(bcx, body, dest);
-
-    match dest {
-        expr::SaveIn(slot) if fcx.needs_ret_allocas => {
-            Store(bcx, slot, fcx.llretslotptr.get().unwrap());
-        }
-        _ => {}
-    }
-
-    match fcx.llreturn.get() {
-        Some(_) => {
-            Br(bcx, fcx.return_exit_block(), DebugLoc::None);
-            fcx.pop_custom_cleanup_scope(arg_scope);
-        }
-        None => {
-            // Microoptimization writ large: avoid creating a separate
-            // llreturn basic block
-            bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope);
-        }
-    };
-
-    // Put return block after all other blocks.
-    // This somewhat improves single-stepping experience in debugger.
-    unsafe {
-        let llreturn = fcx.llreturn.get();
-        if let Some(llreturn) = llreturn {
-            llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb);
-        }
-    }
-
-    let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id, fn_cleanup_debug_loc.span);
-
-    // Insert the mandatory first few basic blocks before lltop.
-    finish_fn(&fcx, bcx, output_type, ret_debug_loc);
-
-    fn record_translation_item_as_generated<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                      node_id: ast::NodeId,
-                                                      param_substs: &'tcx Substs<'tcx>) {
-        if !collector::collecting_debug_information(ccx) {
-            return;
-        }
-
-        let def_id = match ccx.tcx().node_id_to_type(node_id).sty {
-            ty::TyClosure(def_id, _) => def_id,
-            _ => ccx.external_srcs()
-                    .borrow()
-                    .get(&node_id)
-                    .map(|did| *did)
-                    .unwrap_or_else(|| ccx.tcx().map.local_def_id(node_id)),
-        };
-
-        ccx.record_translation_item_as_generated(TransItem::Fn{
-            def_id: def_id,
-            substs: ccx.tcx().mk_substs(ccx.tcx().erase_regions(param_substs)),
-        });
-    }
-}
-
-/// Creates an LLVM function corresponding to a source language function.
-pub fn trans_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                          decl: &hir::FnDecl,
-                          body: &hir::Block,
-                          llfndecl: ValueRef,
-                          param_substs: &'tcx Substs<'tcx>,
-                          id: ast::NodeId,
-                          attrs: &[ast::Attribute]) {
-    let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
-    debug!("trans_fn(param_substs={:?})", param_substs);
-    let _icx = push_ctxt("trans_fn");
-    let fn_ty = ccx.tcx().node_id_to_type(id);
-    let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fn_ty);
-    let sig = fn_ty.fn_sig();
-    let sig = ccx.tcx().erase_late_bound_regions(&sig);
-    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
-    let output_type = sig.output;
-    let abi = fn_ty.fn_abi();
-    trans_closure(ccx,
-                  decl,
-                  body,
-                  llfndecl,
-                  param_substs,
-                  id,
-                  attrs,
-                  output_type,
-                  abi,
-                  closure::ClosureEnv::NotClosure);
-}
-
-pub fn trans_enum_variant<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                    ctor_id: ast::NodeId,
-                                    disr: Disr,
-                                    param_substs: &'tcx Substs<'tcx>,
-                                    llfndecl: ValueRef) {
-    let _icx = push_ctxt("trans_enum_variant");
-
-    trans_enum_variant_or_tuple_like_struct(ccx, ctor_id, disr, param_substs, llfndecl);
-}
-
-pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
-                                                 ctor_ty: Ty<'tcx>,
-                                                 disr: Disr,
-                                                 args: callee::CallArgs,
-                                                 dest: expr::Dest,
-                                                 debug_loc: DebugLoc)
-                                                 -> Result<'blk, 'tcx> {
-
-    let ccx = bcx.fcx.ccx;
-
-    let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
-    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
-    let result_ty = sig.output.unwrap();
-
-    // Get location to store the result. If the user does not care about
-    // the result, just make a stack slot
-    let llresult = match dest {
-        expr::SaveIn(d) => d,
-        expr::Ignore => {
-            if !type_is_zero_size(ccx, result_ty) {
-                let llresult = alloc_ty(bcx, result_ty, "constructor_result");
-                call_lifetime_start(bcx, llresult);
-                llresult
-            } else {
-                C_undef(type_of::type_of(ccx, result_ty).ptr_to())
-            }
-        }
-    };
-
-    if !type_is_zero_size(ccx, result_ty) {
-        match args {
-            callee::ArgExprs(exprs) => {
-                let fields = exprs.iter().map(|x| &**x).enumerate().collect::<Vec<_>>();
-                bcx = expr::trans_adt(bcx,
-                                      result_ty,
-                                      disr,
-                                      &fields[..],
-                                      None,
-                                      expr::SaveIn(llresult),
-                                      debug_loc);
-            }
-            _ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor"),
-        }
-    } else {
-        // Just eval all the expressions (if any). Since expressions in Rust can have arbitrary
-        // contents, there could be side-effects we need from them.
-        match args {
-            callee::ArgExprs(exprs) => {
-                for expr in exprs {
-                    bcx = expr::trans_into(bcx, expr, expr::Ignore);
-                }
-            }
-            _ => (),
-        }
-    }
-
-    // If the caller doesn't care about the result
-    // drop the temporary we made
-    let bcx = match dest {
-        expr::SaveIn(_) => bcx,
-        expr::Ignore => {
-            let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc);
-            if !type_is_zero_size(ccx, result_ty) {
-                call_lifetime_end(bcx, llresult);
-            }
-            bcx
-        }
-    };
-
-    Result::new(bcx, llresult)
-}
-
-pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                    ctor_id: ast::NodeId,
-                                    param_substs: &'tcx Substs<'tcx>,
-                                    llfndecl: ValueRef) {
-    let _icx = push_ctxt("trans_tuple_struct");
-
-    trans_enum_variant_or_tuple_like_struct(ccx, ctor_id, Disr(0), param_substs, llfndecl);
-}
-
-fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                     ctor_id: ast::NodeId,
-                                                     disr: Disr,
-                                                     param_substs: &'tcx Substs<'tcx>,
-                                                     llfndecl: ValueRef) {
-    let ctor_ty = ccx.tcx().node_id_to_type(ctor_id);
-    let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
-
-    let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
-    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
-    let arg_tys = sig.inputs;
-    let result_ty = sig.output;
-
-    let (arena, fcx): (TypedArena<_>, FunctionContext);
-    arena = TypedArena::new();
-    fcx = new_fn_ctxt(ccx,
-                      llfndecl,
-                      ctor_id,
-                      false,
-                      result_ty,
-                      param_substs,
-                      None,
-                      &arena);
-    let bcx = init_function(&fcx, false, result_ty);
-
-    assert!(!fcx.needs_ret_allocas);
-
-    if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) {
-        let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot");
-        let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
-        let repr = adt::represent_type(ccx, result_ty.unwrap());
-        let mut llarg_idx = fcx.arg_offset() as c_uint;
-        for (i, arg_ty) in arg_tys.into_iter().enumerate() {
-            let lldestptr = adt::trans_field_ptr(bcx, &repr, dest_val, Disr::from(disr), i);
-            if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
-                Store(bcx,
-                      get_param(fcx.llfn, llarg_idx),
-                      expr::get_dataptr(bcx, lldestptr));
-                Store(bcx,
-                      get_param(fcx.llfn, llarg_idx + 1),
-                      expr::get_meta(bcx, lldestptr));
-                llarg_idx += 2;
-            } else {
-                let arg = get_param(fcx.llfn, llarg_idx);
-                llarg_idx += 1;
-
-                if arg_is_indirect(ccx, arg_ty) {
-                    memcpy_ty(bcx, lldestptr, arg, arg_ty);
-                } else {
-                    store_ty(bcx, arg, lldestptr, arg_ty);
-                }
-            }
-        }
-        adt::trans_set_discr(bcx, &repr, dest, disr);
-    }
-
-    finish_fn(&fcx, bcx, result_ty, DebugLoc::None);
-}
-
-fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &hir::EnumDef, sp: Span, id: ast::NodeId) {
-    let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
-
-    let print_info = ccx.sess().print_enum_sizes();
-
-    let levels = ccx.tcx().node_lint_levels.borrow();
-    let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCES);
-    let lvlsrc = levels.get(&(id, lint_id));
-    let is_allow = lvlsrc.map_or(true, |&(lvl, _)| lvl == lint::Allow);
-
-    if is_allow && !print_info {
-        // we're not interested in anything here
-        return;
-    }
-
-    let ty = ccx.tcx().node_id_to_type(id);
-    let avar = adt::represent_type(ccx, ty);
-    match *avar {
-        adt::General(_, ref variants, _) => {
-            for var in variants {
-                let mut size = 0;
-                for field in var.fields.iter().skip(1) {
-                    // skip the discriminant
-                    size += llsize_of_real(ccx, sizing_type_of(ccx, *field));
-                }
-                sizes.push(size);
-            }
-        },
-        _ => { /* its size is either constant or unimportant */ }
-    }
-
-    let (largest, slargest, largest_index) = sizes.iter().enumerate().fold((0, 0, 0),
-        |(l, s, li), (idx, &size)|
-            if size > l {
-                (size, l, idx)
-            } else if size > s {
-                (l, size, li)
-            } else {
-                (l, s, li)
-            }
-    );
-
-    // FIXME(#30505) Should use logging for this.
-    if print_info {
-        let llty = type_of::sizing_type_of(ccx, ty);
-
-        let sess = &ccx.tcx().sess;
-        sess.span_note_without_error(sp,
-                                     &format!("total size: {} bytes", llsize_of_real(ccx, llty)));
-        match *avar {
-            adt::General(..) => {
-                for (i, var) in enum_def.variants.iter().enumerate() {
-                    ccx.tcx()
-                       .sess
-                       .span_note_without_error(var.span,
-                                                &format!("variant data: {} bytes", sizes[i]));
-                }
-            }
-            _ => {}
-        }
-    }
-
-    // we only warn if the largest variant is at least thrice as large as
-    // the second-largest.
-    if !is_allow && largest > slargest * 3 && slargest > 0 {
-        // Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
-        // pass for the latter already ran.
-        lint::raw_struct_lint(&ccx.tcx().sess,
-                              &ccx.tcx().sess.lint_store.borrow(),
-                              lint::builtin::VARIANT_SIZE_DIFFERENCES,
-                              *lvlsrc.unwrap(),
-                              Some(sp),
-                              &format!("enum variant is more than three times larger ({} bytes) \
-                                        than the next largest (ignoring padding)",
-                                       largest))
-            .span_note(enum_def.variants[largest_index].span,
-                       "this variant is the largest")
-            .emit();
-    }
-}
-
-pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
-    // Use the names from src/llvm/docs/LangRef.rst here. Most types are only
-    // applicable to variable declarations and may not really make sense for
-    // Rust code in the first place but whitelist them anyway and trust that
-    // the user knows what s/he's doing. Who knows, unanticipated use cases
-    // may pop up in the future.
-    //
-    // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
-    // and don't have to be, LLVM treats them as no-ops.
-    match name {
-        "appending" => Some(llvm::AppendingLinkage),
-        "available_externally" => Some(llvm::AvailableExternallyLinkage),
-        "common" => Some(llvm::CommonLinkage),
-        "extern_weak" => Some(llvm::ExternalWeakLinkage),
-        "external" => Some(llvm::ExternalLinkage),
-        "internal" => Some(llvm::InternalLinkage),
-        "linkonce" => Some(llvm::LinkOnceAnyLinkage),
-        "linkonce_odr" => Some(llvm::LinkOnceODRLinkage),
-        "private" => Some(llvm::PrivateLinkage),
-        "weak" => Some(llvm::WeakAnyLinkage),
-        "weak_odr" => Some(llvm::WeakODRLinkage),
-        _ => None,
-    }
-}
-
-
-/// Enum describing the origin of an LLVM `Value`, for linkage purposes.
-#[derive(Copy, Clone)]
-pub enum ValueOrigin {
-    /// The LLVM `Value` is in this context because the corresponding item was
-    /// assigned to the current compilation unit.
-    OriginalTranslation,
-    /// The `Value`'s corresponding item was assigned to some other compilation
-    /// unit, but the `Value` was translated in this context anyway because the
-    /// item is marked `#[inline]`.
-    InlinedCopy,
-}
-
-/// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
-/// If the `llval` is the direct translation of a specific Rust item, `id`
-/// should be set to the `NodeId` of that item.  (This mapping should be
-/// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
-/// `None`.)  `llval_origin` indicates whether `llval` is the translation of an
-/// item assigned to `ccx`'s compilation unit or an inlined copy of an item
-/// assigned to a different compilation unit.
-pub fn update_linkage(ccx: &CrateContext,
-                      llval: ValueRef,
-                      id: Option<ast::NodeId>,
-                      llval_origin: ValueOrigin) {
-    match llval_origin {
-        InlinedCopy => {
-            // `llval` is a translation of an item defined in a separate
-            // compilation unit.  This only makes sense if there are at least
-            // two compilation units.
-            assert!(ccx.sess().opts.cg.codegen_units > 1);
-            // `llval` is a copy of something defined elsewhere, so use
-            // `AvailableExternallyLinkage` to avoid duplicating code in the
-            // output.
-            llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage);
-            return;
-        },
-        OriginalTranslation => {},
-    }
-
-    if let Some(id) = id {
-        let item = ccx.tcx().map.get(id);
-        if let hir_map::NodeItem(i) = item {
-            if let Some(name) = attr::first_attr_value_str_by_name(&i.attrs, "linkage") {
-                if let Some(linkage) = llvm_linkage_by_name(&name) {
-                    llvm::SetLinkage(llval, linkage);
-                } else {
-                    ccx.sess().span_fatal(i.span, "invalid linkage specified");
-                }
-                return;
-            }
-        }
-    }
-
-    match id {
-        Some(id) if ccx.reachable().contains(&id) => {
-            llvm::SetLinkage(llval, llvm::ExternalLinkage);
-        },
-        _ => {
-            // `id` does not refer to an item in `ccx.reachable`.
-            if ccx.sess().opts.cg.codegen_units > 1 {
-                llvm::SetLinkage(llval, llvm::ExternalLinkage);
-            } else {
-                llvm::SetLinkage(llval, llvm::InternalLinkage);
-            }
-        },
-    }
-}
-
-fn set_global_section(ccx: &CrateContext, llval: ValueRef, i: &hir::Item) {
-    match attr::first_attr_value_str_by_name(&i.attrs, "link_section") {
-        Some(sect) => {
-            if contains_null(&sect) {
-                ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", &sect));
-            }
-            unsafe {
-                let buf = CString::new(sect.as_bytes()).unwrap();
-                llvm::LLVMSetSection(llval, buf.as_ptr());
-            }
-        },
-        None => ()
-    }
-}
-
-pub fn trans_item(ccx: &CrateContext, item: &hir::Item) {
-    let _icx = push_ctxt("trans_item");
-
-    let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
-
-    match item.node {
-        hir::ItemFn(ref decl, _, _, abi, ref generics, ref body) => {
-            if !generics.is_type_parameterized() {
-                let trans_everywhere = attr::requests_inline(&item.attrs);
-                // Ignore `trans_everywhere` for cross-crate inlined items
-                // (`from_external`).  `trans_item` will be called once for each
-                // compilation unit that references the item, so it will still get
-                // translated everywhere it's needed.
-                for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
-                    let llfn = get_item_val(ccx, item.id);
-                    let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
-                    if abi != Abi::Rust {
-                        foreign::trans_rust_fn_with_foreign_abi(ccx,
-                                                                &decl,
-                                                                &body,
-                                                                &item.attrs,
-                                                                llfn,
-                                                                empty_substs,
-                                                                item.id,
-                                                                None);
-                    } else {
-                        trans_fn(ccx,
-                                 &decl,
-                                 &body,
-                                 llfn,
-                                 empty_substs,
-                                 item.id,
-                                 &item.attrs);
-                    }
-                    set_global_section(ccx, llfn, item);
-                    update_linkage(ccx,
-                                   llfn,
-                                   Some(item.id),
-                                   if is_origin {
-                                       OriginalTranslation
-                                   } else {
-                                       InlinedCopy
-                                   });
-
-                    if is_entry_fn(ccx.sess(), item.id) {
-                        create_entry_wrapper(ccx, item.span, llfn);
-                        // check for the #[rustc_error] annotation, which forces an
-                        // error in trans. This is used to write compile-fail tests
-                        // that actually test that compilation succeeds without
-                        // reporting an error.
-                        let item_def_id = ccx.tcx().map.local_def_id(item.id);
-                        if ccx.tcx().has_attr(item_def_id, "rustc_error") {
-                            ccx.tcx().sess.span_fatal(item.span, "compilation successful");
-                        }
-                    }
-                }
-            }
-        }
-        hir::ItemImpl(_, _, ref generics, _, _, ref impl_items) => {
-            meth::trans_impl(ccx, item.name, impl_items, generics, item.id);
-        }
-        hir::ItemMod(_) => {
-            // modules have no equivalent at runtime, they just affect
-            // the mangled names of things contained within
-        }
-        hir::ItemEnum(ref enum_definition, ref gens) => {
-            if gens.ty_params.is_empty() {
-                // sizes only make sense for non-generic types
-
-                enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
-            }
-        }
-        hir::ItemConst(..) => {}
-        hir::ItemStatic(_, m, ref expr) => {
-            let g = match consts::trans_static(ccx, m, expr, item.id, &item.attrs) {
-                Ok(g) => g,
-                Err(err) => ccx.tcx().sess.span_fatal(expr.span, &err.description()),
-            };
-            set_global_section(ccx, g, item);
-            update_linkage(ccx, g, Some(item.id), OriginalTranslation);
-        }
-        hir::ItemForeignMod(ref foreign_mod) => {
-            foreign::trans_foreign_mod(ccx, foreign_mod);
-        }
-        hir::ItemTrait(..) => {}
-        _ => {
-            // fall through
-        }
-    }
-}
-
-// only use this for foreign function ABIs and glue, use `register_fn` for Rust functions
-pub fn register_fn_llvmty(ccx: &CrateContext,
-                          sp: Span,
-                          sym: String,
-                          node_id: ast::NodeId,
-                          cc: llvm::CallConv,
-                          llfty: Type)
-                          -> ValueRef {
-    debug!("register_fn_llvmty id={} sym={}", node_id, sym);
-
-    let llfn = declare::define_fn(ccx, &sym[..], cc, llfty,
-                                   ty::FnConverging(ccx.tcx().mk_nil())).unwrap_or_else(||{
-        ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
-    });
-    finish_register_fn(ccx, sym, node_id);
-    llfn
-}
-
-fn finish_register_fn(ccx: &CrateContext, sym: String, node_id: ast::NodeId) {
-    ccx.item_symbols().borrow_mut().insert(node_id, sym);
-}
-
-fn register_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                         sp: Span,
-                         sym: String,
-                         node_id: ast::NodeId,
-                         node_type: Ty<'tcx>)
-                         -> ValueRef {
-    if let ty::TyBareFn(_, ref f) = node_type.sty {
-        if f.abi != Abi::Rust && f.abi != Abi::RustCall {
-            ccx.sess().span_bug(sp,
-                                &format!("only the `{}` or `{}` calling conventions are valid \
-                                          for this function; `{}` was specified",
-                                         Abi::Rust.name(),
-                                         Abi::RustCall.name(),
-                                         f.abi.name()));
-        }
-    } else {
-        ccx.sess().span_bug(sp, "expected bare rust function")
-    }
-
-    let llfn = declare::define_rust_fn(ccx, &sym[..], node_type).unwrap_or_else(|| {
-        ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
-    });
-    finish_register_fn(ccx, sym, node_id);
-    llfn
-}
-
-pub fn is_entry_fn(sess: &Session, node_id: ast::NodeId) -> bool {
-    match *sess.entry_fn.borrow() {
-        Some((entry_id, _)) => node_id == entry_id,
-        None => false,
-    }
-}
-
-/// Create the `main` function which will initialise the rust runtime and call users’ main
-/// function.
-pub fn create_entry_wrapper(ccx: &CrateContext, sp: Span, main_llfn: ValueRef) {
-    let et = ccx.sess().entry_type.get().unwrap();
-    match et {
-        config::EntryMain => {
-            create_entry_fn(ccx, sp, main_llfn, true);
-        }
-        config::EntryStart => create_entry_fn(ccx, sp, main_llfn, false),
-        config::EntryNone => {}    // Do nothing.
-    }
-
-    fn create_entry_fn(ccx: &CrateContext,
-                       sp: Span,
-                       rust_main: ValueRef,
-                       use_start_lang_item: bool) {
-        let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()], &ccx.int_type());
-
-        let llfn = declare::define_cfn(ccx, "main", llfty, ccx.tcx().mk_nil()).unwrap_or_else(|| {
-            // FIXME: We should be smart and show a better diagnostic here.
-            ccx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
-                      .help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
-                      .emit();
-            ccx.sess().abort_if_errors();
-            panic!();
-        });
-
-        let llbb = unsafe {
-            llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _)
-        };
-        let bld = ccx.raw_builder();
-        unsafe {
-            llvm::LLVMPositionBuilderAtEnd(bld, llbb);
-
-            debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx);
-
-            let (start_fn, args) = if use_start_lang_item {
-                let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
-                    Ok(id) => id,
-                    Err(s) => {
-                        ccx.sess().fatal(&s[..]);
-                    }
-                };
-                let start_fn = if let Some(start_node_id) = ccx.tcx()
-                                                               .map
-                                                               .as_local_node_id(start_def_id) {
-                    get_item_val(ccx, start_node_id)
-                } else {
-                    let start_fn_type = ccx.tcx().lookup_item_type(start_def_id).ty;
-                    trans_external_path(ccx, start_def_id, start_fn_type)
-                };
-                let args = {
-                    let opaque_rust_main =
-                        llvm::LLVMBuildPointerCast(bld,
-                                                   rust_main,
-                                                   Type::i8p(ccx).to_ref(),
-                                                   "rust_main\0".as_ptr() as *const _);
-
-                    vec![opaque_rust_main, get_param(llfn, 0), get_param(llfn, 1)]
-                };
-                (start_fn, args)
-            } else {
-                debug!("using user-defined start fn");
-                let args = vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)];
-
-                (rust_main, args)
-            };
-
-            let result = llvm::LLVMRustBuildCall(bld,
-                                                 start_fn,
-                                                 args.as_ptr(),
-                                                 args.len() as c_uint,
-                                                 0 as *mut _,
-                                                 noname());
-
-            llvm::LLVMBuildRet(bld, result);
-        }
-    }
-}
-
-fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                           id: ast::NodeId,
-                           ty: Ty<'tcx>,
-                           attrs: &[ast::Attribute])
-                           -> String {
-    match ccx.external_srcs().borrow().get(&id) {
-        Some(&did) => {
-            let sym = ccx.sess().cstore.item_symbol(did);
-            debug!("found item {} in other crate...", sym);
-            return sym;
-        }
-        None => {}
-    }
-
-    match attr::find_export_name_attr(ccx.sess().diagnostic(), attrs) {
-        // Use provided name
-        Some(name) => name.to_string(),
-        _ => {
-            let path = ccx.tcx().map.def_path_from_id(id);
-            if attr::contains_name(attrs, "no_mangle") {
-                // Don't mangle
-                path.last().unwrap().data.to_string()
-            } else {
-                match weak_lang_items::link_name(attrs) {
-                    Some(name) => name.to_string(),
-                    None => {
-                        // Usual name mangling
-                        mangle_exported_name(ccx, path, ty, id)
-                    }
-                }
-            }
-        }
-    }
-}
-
-fn contains_null(s: &str) -> bool {
-    s.bytes().any(|b| b == 0)
-}
-
-pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
-    debug!("get_item_val(id=`{}`)", id);
-
-    if let Some(v) = ccx.item_vals().borrow().get(&id).cloned() {
-        return v;
-    }
-
-    let item = ccx.tcx().map.get(id);
-    debug!("get_item_val: id={} item={:?}", id, item);
-    let val = match item {
-        hir_map::NodeItem(i) => {
-            let ty = ccx.tcx().node_id_to_type(i.id);
-            let sym = || exported_name(ccx, id, ty, &i.attrs);
-
-            let v = match i.node {
-                hir::ItemStatic(..) => {
-                    // If this static came from an external crate, then
-                    // we need to get the symbol from metadata instead of
-                    // using the current crate's name/version
-                    // information in the hash of the symbol
-                    let sym = sym();
-                    debug!("making {}", sym);
-
-                    // Create the global before evaluating the initializer;
-                    // this is necessary to allow recursive statics.
-                    let llty = type_of(ccx, ty);
-                    let g = declare::define_global(ccx, &sym[..], llty).unwrap_or_else(|| {
-                        ccx.sess()
-                           .span_fatal(i.span, &format!("symbol `{}` is already defined", sym))
-                    });
-
-                    ccx.item_symbols().borrow_mut().insert(i.id, sym);
-                    g
-                }
-
-                hir::ItemFn(_, _, _, abi, _, _) => {
-                    let sym = sym();
-                    let llfn = if abi == Abi::Rust {
-                        register_fn(ccx, i.span, sym, i.id, ty)
-                    } else {
-                        foreign::register_rust_fn_with_foreign_abi(ccx, i.span, sym, i.id)
-                    };
-                    attributes::from_fn_attrs(ccx, &i.attrs, llfn);
-                    llfn
-                }
-
-                _ => ccx.sess().bug("get_item_val: weird result in table"),
-            };
-
-            v
-        }
-
-        hir_map::NodeTraitItem(trait_item) => {
-            debug!("get_item_val(): processing a NodeTraitItem");
-            match trait_item.node {
-                hir::MethodTraitItem(_, Some(_)) => {
-                    register_method(ccx, id, &trait_item.attrs, trait_item.span)
-                }
-                _ => {
-                    ccx.sess().span_bug(trait_item.span,
-                                        "unexpected variant: trait item other than a provided \
-                                         method in get_item_val()");
-                }
-            }
-        }
-
-        hir_map::NodeImplItem(impl_item) => {
-            match impl_item.node {
-                hir::ImplItemKind::Method(..) => {
-                    register_method(ccx, id, &impl_item.attrs, impl_item.span)
-                }
-                _ => {
-                    ccx.sess().span_bug(impl_item.span,
-                                        "unexpected variant: non-method impl item in \
-                                         get_item_val()");
-                }
-            }
-        }
-
-        hir_map::NodeForeignItem(ni) => {
-            match ni.node {
-                hir::ForeignItemFn(..) => {
-                    let abi = ccx.tcx().map.get_foreign_abi(id);
-                    let ty = ccx.tcx().node_id_to_type(ni.id);
-                    let name = foreign::link_name(&ni);
-                    foreign::register_foreign_item_fn(ccx, abi, ty, &name, &ni.attrs)
-                }
-                hir::ForeignItemStatic(..) => {
-                    foreign::register_static(ccx, &ni)
-                }
-            }
-        }
-
-        hir_map::NodeVariant(ref v) => {
-            let llfn;
-            let fields = if v.node.data.is_struct() {
-                ccx.sess().bug("struct variant kind unexpected in get_item_val")
-            } else {
-                v.node.data.fields()
-            };
-            assert!(!fields.is_empty());
-            let ty = ccx.tcx().node_id_to_type(id);
-            let parent = ccx.tcx().map.get_parent(id);
-            let enm = ccx.tcx().map.expect_item(parent);
-            let sym = exported_name(ccx, id, ty, &enm.attrs);
-
-            llfn = match enm.node {
-                hir::ItemEnum(_, _) => {
-                    register_fn(ccx, (*v).span, sym, id, ty)
-                }
-                _ => ccx.sess().bug("NodeVariant, shouldn't happen"),
-            };
-            attributes::inline(llfn, attributes::InlineAttr::Hint);
-            llfn
-        }
-
-        hir_map::NodeStructCtor(struct_def) => {
-            // Only register the constructor if this is a tuple-like struct.
-            let ctor_id = if struct_def.is_struct() {
-                ccx.sess().bug("attempt to register a constructor of a non-tuple-like struct")
-            } else {
-                struct_def.id()
-            };
-            let parent = ccx.tcx().map.get_parent(id);
-            let struct_item = ccx.tcx().map.expect_item(parent);
-            let ty = ccx.tcx().node_id_to_type(ctor_id);
-            let sym = exported_name(ccx, id, ty, &struct_item.attrs);
-            let llfn = register_fn(ccx, struct_item.span, sym, ctor_id, ty);
-            attributes::inline(llfn, attributes::InlineAttr::Hint);
-            llfn
-        }
-
-        ref variant => {
-            ccx.sess().bug(&format!("get_item_val(): unexpected variant: {:?}", variant))
-        }
-    };
-
-    // All LLVM globals and functions are initially created as external-linkage
-    // declarations.  If `trans_item`/`trans_fn` later turns the declaration
-    // into a definition, it adjusts the linkage then (using `update_linkage`).
-    //
-    // The exception is foreign items, which have their linkage set inside the
-    // call to `foreign::register_*` above.  We don't touch the linkage after
-    // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the
-    // other item translation functions do).
-
-    ccx.item_vals().borrow_mut().insert(id, val);
-    val
-}
-
-fn register_method(ccx: &CrateContext,
-                   id: ast::NodeId,
-                   attrs: &[ast::Attribute],
-                   span: Span)
-                   -> ValueRef {
-    let mty = ccx.tcx().node_id_to_type(id);
-
-    let sym = exported_name(ccx, id, mty, &attrs);
-
-    if let ty::TyBareFn(_, ref f) = mty.sty {
-        let llfn = if f.abi == Abi::Rust || f.abi == Abi::RustCall {
-            register_fn(ccx, span, sym, id, mty)
-        } else {
-            foreign::register_rust_fn_with_foreign_abi(ccx, span, sym, id)
-        };
-        attributes::from_fn_attrs(ccx, &attrs, llfn);
-        return llfn;
-    } else {
-        ccx.sess().span_bug(span, "expected bare rust function");
-    }
-}
-
-pub fn write_metadata<'a, 'tcx>(cx: &SharedCrateContext<'a, 'tcx>,
-                                krate: &hir::Crate,
-                                reachable: &NodeSet,
-                                mir_map: &MirMap<'tcx>)
-                                -> Vec<u8> {
-    use flate;
-
-    let any_library = cx.sess()
-                        .crate_types
-                        .borrow()
-                        .iter()
-                        .any(|ty| *ty != config::CrateTypeExecutable);
-    if !any_library {
-        return Vec::new();
-    }
-
-    let cstore = &cx.tcx().sess.cstore;
-    let metadata = cstore.encode_metadata(cx.tcx(),
-                                          cx.export_map(),
-                                          cx.item_symbols(),
-                                          cx.link_meta(),
-                                          reachable,
-                                          mir_map,
-                                          krate);
-    let mut compressed = cstore.metadata_encoding_version().to_vec();
-    compressed.extend_from_slice(&flate::deflate_bytes(&metadata));
-
-    let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed[..]);
-    let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false);
-    let name = format!("rust_metadata_{}_{}",
-                       cx.link_meta().crate_name,
-                       cx.link_meta().crate_hash);
-    let buf = CString::new(name).unwrap();
-    let llglobal = unsafe {
-        llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf.as_ptr())
-    };
-    unsafe {
-        llvm::LLVMSetInitializer(llglobal, llconst);
-        let name =
-            cx.tcx().sess.cstore.metadata_section_name(&cx.sess().target.target);
-        let name = CString::new(name).unwrap();
-        llvm::LLVMSetSection(llglobal, name.as_ptr())
-    }
-    return metadata;
-}
-
-/// Find any symbols that are defined in one compilation unit, but not declared
-/// in any other compilation unit.  Give these symbols internal linkage.
-fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<&str>) {
-    unsafe {
-        let mut declared = HashSet::new();
-
-        // Collect all external declarations in all compilation units.
-        for ccx in cx.iter() {
-            for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
-                let linkage = llvm::LLVMGetLinkage(val);
-                // We only care about external declarations (not definitions)
-                // and available_externally definitions.
-                if !(linkage == llvm::ExternalLinkage as c_uint &&
-                     llvm::LLVMIsDeclaration(val) != 0) &&
-                   !(linkage == llvm::AvailableExternallyLinkage as c_uint) {
-                    continue;
-                }
-
-                let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
-                               .to_bytes()
-                               .to_vec();
-                declared.insert(name);
-            }
-        }
-
-        // Examine each external definition.  If the definition is not used in
-        // any other compilation unit, and is not reachable from other crates,
-        // then give it internal linkage.
-        for ccx in cx.iter() {
-            for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
-                // We only care about external definitions.
-                if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
-                     llvm::LLVMIsDeclaration(val) == 0) {
-                    continue;
-                }
-
-                let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
-                               .to_bytes()
-                               .to_vec();
-                if !declared.contains(&name) &&
-                   !reachable.contains(str::from_utf8(&name).unwrap()) {
-                    llvm::SetLinkage(val, llvm::InternalLinkage);
-                    llvm::SetDLLStorageClass(val, llvm::DefaultStorageClass);
-                }
-            }
-        }
-    }
-}
-
-// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
-// This is required to satisfy `dllimport` references to static data in .rlibs
-// when using MSVC linker.  We do this only for data, as linker can fix up
-// code references on its own.
-// See #26591, #27438
-fn create_imps(cx: &SharedCrateContext) {
-    // The x86 ABI seems to require that leading underscores are added to symbol
-    // names, so we need an extra underscore on 32-bit. There's also a leading
-    // '\x01' here which disables LLVM's symbol mangling (e.g. no extra
-    // underscores added in front).
-    let prefix = if cx.sess().target.target.target_pointer_width == "32" {
-        "\x01__imp__"
-    } else {
-        "\x01__imp_"
-    };
-    unsafe {
-        for ccx in cx.iter() {
-            let exported: Vec<_> = iter_globals(ccx.llmod())
-                                       .filter(|&val| {
-                                           llvm::LLVMGetLinkage(val) ==
-                                           llvm::ExternalLinkage as c_uint &&
-                                           llvm::LLVMIsDeclaration(val) == 0
-                                       })
-                                       .collect();
-
-            let i8p_ty = Type::i8p(&ccx);
-            for val in exported {
-                let name = CStr::from_ptr(llvm::LLVMGetValueName(val));
-                let mut imp_name = prefix.as_bytes().to_vec();
-                imp_name.extend(name.to_bytes());
-                let imp_name = CString::new(imp_name).unwrap();
-                let imp = llvm::LLVMAddGlobal(ccx.llmod(),
-                                              i8p_ty.to_ref(),
-                                              imp_name.as_ptr() as *const _);
-                let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref());
-                llvm::LLVMSetInitializer(imp, init);
-                llvm::SetLinkage(imp, llvm::ExternalLinkage);
-            }
-        }
-    }
-}
-
-struct ValueIter {
-    cur: ValueRef,
-    step: unsafe extern "C" fn(ValueRef) -> ValueRef,
-}
-
-impl Iterator for ValueIter {
-    type Item = ValueRef;
-
-    fn next(&mut self) -> Option<ValueRef> {
-        let old = self.cur;
-        if !old.is_null() {
-            self.cur = unsafe { (self.step)(old) };
-            Some(old)
-        } else {
-            None
-        }
-    }
-}
-
-fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter {
-    unsafe {
-        ValueIter {
-            cur: llvm::LLVMGetFirstGlobal(llmod),
-            step: llvm::LLVMGetNextGlobal,
-        }
-    }
-}
-
-fn iter_functions(llmod: llvm::ModuleRef) -> ValueIter {
-    unsafe {
-        ValueIter {
-            cur: llvm::LLVMGetFirstFunction(llmod),
-            step: llvm::LLVMGetNextFunction,
-        }
-    }
-}
-
-/// The context provided lists a set of reachable ids as calculated by
-/// middle::reachable, but this contains far more ids and symbols than we're
-/// actually exposing from the object file. This function will filter the set in
-/// the context to the set of ids which correspond to symbols that are exposed
-/// from the object file being generated.
-///
-/// This list is later used by linkers to determine the set of symbols needed to
-/// be exposed from a dynamic library and it's also encoded into the metadata.
-pub fn filter_reachable_ids(ccx: &SharedCrateContext) -> NodeSet {
-    ccx.reachable().iter().map(|x| *x).filter(|id| {
-        // First, only worry about nodes which have a symbol name
-        ccx.item_symbols().borrow().contains_key(id)
-    }).filter(|&id| {
-        // Next, we want to ignore some FFI functions that are not exposed from
-        // this crate. Reachable FFI functions can be lumped into two
-        // categories:
-        //
-        // 1. Those that are included statically via a static library
-        // 2. Those included otherwise (e.g. dynamically or via a framework)
-        //
-        // Although our LLVM module is not literally emitting code for the
-        // statically included symbols, it's an export of our library which
-        // needs to be passed on to the linker and encoded in the metadata.
-        //
-        // As a result, if this id is an FFI item (foreign item) then we only
-        // let it through if it's included statically.
-        match ccx.tcx().map.get(id) {
-            hir_map::NodeForeignItem(..) => {
-                ccx.sess().cstore.is_statically_included_foreign_item(id)
-            }
-            _ => true,
-        }
-    }).collect()
-}
-
-pub fn trans_crate<'tcx>(tcx: &ty::ctxt<'tcx>,
-                         mir_map: &MirMap<'tcx>,
-                         analysis: ty::CrateAnalysis)
-                         -> CrateTranslation {
-    let _task = tcx.dep_graph.in_task(DepNode::TransCrate);
-
-    // Be careful with this krate: obviously it gives access to the
-    // entire contents of the krate. So if you push any subtasks of
-    // `TransCrate`, you need to be careful to register "reads" of the
-    // particular items that will be processed.
-    let krate = tcx.map.krate();
-
-    let ty::CrateAnalysis { export_map, reachable, name, .. } = analysis;
-
-    let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks {
-        v
-    } else {
-        tcx.sess.opts.debug_assertions
-    };
-
-    let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks {
-        v
-    } else {
-        tcx.sess.opts.debug_assertions
-    };
-
-    // Before we touch LLVM, make sure that multithreading is enabled.
-    unsafe {
-        use std::sync::Once;
-        static INIT: Once = Once::new();
-        static mut POISONED: bool = false;
-        INIT.call_once(|| {
-            if llvm::LLVMStartMultithreaded() != 1 {
-                // use an extra bool to make sure that all future usage of LLVM
-                // cannot proceed despite the Once not running more than once.
-                POISONED = true;
-            }
-
-            ::back::write::configure_llvm(&tcx.sess);
-        });
-
-        if POISONED {
-            tcx.sess.bug("couldn't enable multi-threaded LLVM");
-        }
-    }
-
-    let link_meta = link::build_link_meta(&tcx.sess, krate, name);
-
-    let codegen_units = tcx.sess.opts.cg.codegen_units;
-    let shared_ccx = SharedCrateContext::new(&link_meta.crate_name,
-                                             codegen_units,
-                                             tcx,
-                                             &mir_map,
-                                             export_map,
-                                             Sha256::new(),
-                                             link_meta.clone(),
-                                             reachable,
-                                             check_overflow,
-                                             check_dropflag);
-
-    {
-        let ccx = shared_ccx.get_ccx(0);
-
-        // First, verify intrinsics.
-        intrinsic::check_intrinsics(&ccx);
-
-        collect_translation_items(&ccx);
-
-        // Next, translate all items. See `TransModVisitor` for
-        // details on why we walk in this particular way.
-        {
-            let _icx = push_ctxt("text");
-            intravisit::walk_mod(&mut TransItemsWithinModVisitor { ccx: &ccx }, &krate.module);
-            krate.visit_all_items(&mut TransModVisitor { ccx: &ccx });
-        }
-
-        collector::print_collection_results(&ccx);
-    }
-
-    for ccx in shared_ccx.iter() {
-        if ccx.sess().opts.debuginfo != NoDebugInfo {
-            debuginfo::finalize(&ccx);
-        }
-        for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() {
-            unsafe {
-                let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
-                llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
-                llvm::LLVMDeleteGlobal(old_g);
-            }
-        }
-    }
-
-    let reachable_symbol_ids = filter_reachable_ids(&shared_ccx);
-
-    // Translate the metadata.
-    let metadata = time(tcx.sess.time_passes(), "write metadata", || {
-        write_metadata(&shared_ccx, krate, &reachable_symbol_ids, mir_map)
-    });
-
-    if shared_ccx.sess().trans_stats() {
-        let stats = shared_ccx.stats();
-        println!("--- trans stats ---");
-        println!("n_glues_created: {}", stats.n_glues_created.get());
-        println!("n_null_glues: {}", stats.n_null_glues.get());
-        println!("n_real_glues: {}", stats.n_real_glues.get());
-
-        println!("n_fns: {}", stats.n_fns.get());
-        println!("n_monos: {}", stats.n_monos.get());
-        println!("n_inlines: {}", stats.n_inlines.get());
-        println!("n_closures: {}", stats.n_closures.get());
-        println!("fn stats:");
-        stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
-            insns_b.cmp(&insns_a)
-        });
-        for tuple in stats.fn_stats.borrow().iter() {
-            match *tuple {
-                (ref name, insns) => {
-                    println!("{} insns, {}", insns, *name);
-                }
-            }
-        }
-    }
-    if shared_ccx.sess().count_llvm_insns() {
-        for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
-            println!("{:7} {}", *v, *k);
-        }
-    }
-
-    let modules = shared_ccx.iter()
-        .map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
-        .collect();
-
-    let sess = shared_ccx.sess();
-    let mut reachable_symbols = reachable_symbol_ids.iter().map(|id| {
-        shared_ccx.item_symbols().borrow()[id].to_string()
-    }).collect::<Vec<_>>();
-    if sess.entry_fn.borrow().is_some() {
-        reachable_symbols.push("main".to_string());
-    }
-
-    // For the purposes of LTO, we add to the reachable set all of the upstream
-    // reachable extern fns. These functions are all part of the public ABI of
-    // the final product, so LTO needs to preserve them.
-    if sess.lto() {
-        for cnum in sess.cstore.crates() {
-            let syms = sess.cstore.reachable_ids(cnum);
-            reachable_symbols.extend(syms.into_iter().filter(|did| {
-                sess.cstore.is_extern_item(shared_ccx.tcx(), *did)
-            }).map(|did| {
-                sess.cstore.item_symbol(did)
-            }));
-        }
-    }
-
-    if codegen_units > 1 {
-        internalize_symbols(&shared_ccx,
-                            &reachable_symbols.iter().map(|x| &x[..]).collect());
-    }
-
-    if sess.target.target.options.is_like_msvc &&
-       sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) {
-        create_imps(&shared_ccx);
-    }
-
-    let metadata_module = ModuleTranslation {
-        llcx: shared_ccx.metadata_llcx(),
-        llmod: shared_ccx.metadata_llmod(),
-    };
-    let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
-
-    assert_dep_graph::assert_dep_graph(tcx);
-
-    CrateTranslation {
-        modules: modules,
-        metadata_module: metadata_module,
-        link: link_meta,
-        metadata: metadata,
-        reachable: reachable_symbols,
-        no_builtins: no_builtins,
-    }
-}
-
-/// We visit all the items in the krate and translate them.  We do
-/// this in two walks. The first walk just finds module items. It then
-/// walks the full contents of those module items and translates all
-/// the items within. Note that this entire process is O(n). The
-/// reason for this two phased walk is that each module is
-/// (potentially) placed into a distinct codegen-unit. This walk also
-/// ensures that the immediate contents of each module is processed
-/// entirely before we proceed to find more modules, helping to ensure
-/// an equitable distribution amongst codegen-units.
-pub struct TransModVisitor<'a, 'tcx: 'a> {
-    pub ccx: &'a CrateContext<'a, 'tcx>,
-}
-
-impl<'a, 'tcx, 'v> Visitor<'v> for TransModVisitor<'a, 'tcx> {
-    fn visit_item(&mut self, i: &hir::Item) {
-        match i.node {
-            hir::ItemMod(_) => {
-                let item_ccx = self.ccx.rotate();
-                intravisit::walk_item(&mut TransItemsWithinModVisitor { ccx: &item_ccx }, i);
-            }
-            _ => { }
-        }
-    }
-}
-
-/// Translates all the items within a given module. Expects owner to
-/// invoke `walk_item` on a module item. Ignores nested modules.
-pub struct TransItemsWithinModVisitor<'a, 'tcx: 'a> {
-    pub ccx: &'a CrateContext<'a, 'tcx>,
-}
-
-impl<'a, 'tcx, 'v> Visitor<'v> for TransItemsWithinModVisitor<'a, 'tcx> {
-    fn visit_nested_item(&mut self, item_id: hir::ItemId) {
-        self.visit_item(self.ccx.tcx().map.expect_item(item_id.id));
-    }
-
-    fn visit_item(&mut self, i: &hir::Item) {
-        match i.node {
-            hir::ItemMod(..) => {
-                // skip modules, they will be uncovered by the TransModVisitor
-            }
-            _ => {
-                let def_id = self.ccx.tcx().map.local_def_id(i.id);
-                let tcx = self.ccx.tcx();
-
-                // Create a subtask for trans'ing a particular item. We are
-                // giving `trans_item` access to this item, so also record a read.
-                tcx.dep_graph.with_task(DepNode::TransCrateItem(def_id), || {
-                    tcx.dep_graph.read(DepNode::Hir(def_id));
-
-                    // We are going to be accessing various tables
-                    // generated by TypeckItemBody; we also assume
-                    // that the body passes type check. These tables
-                    // are not individually tracked, so just register
-                    // a read here.
-                    tcx.dep_graph.read(DepNode::TypeckItemBody(def_id));
-
-                    trans_item(self.ccx, i);
-                });
-
-                intravisit::walk_item(self, i);
-            }
-        }
-    }
-}
-
-fn collect_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>) {
-    let time_passes = ccx.sess().time_passes();
-
-    let collection_mode = match ccx.sess().opts.debugging_opts.print_trans_items {
-        Some(ref s) => {
-            let mode_string = s.to_lowercase();
-            let mode_string = mode_string.trim();
-            if mode_string == "eager" {
-                TransItemCollectionMode::Eager
-            } else {
-                if mode_string != "lazy" {
-                    let message = format!("Unknown codegen-item collection mode '{}'. \
-                                           Falling back to 'lazy' mode.",
-                                           mode_string);
-                    ccx.sess().warn(&message);
-                }
-
-                TransItemCollectionMode::Lazy
-            }
-        }
-        None => TransItemCollectionMode::Lazy
-    };
-
-    let items = time(time_passes, "translation item collection", || {
-        collector::collect_crate_translation_items(&ccx, collection_mode)
-    });
-
-    if ccx.sess().opts.debugging_opts.print_trans_items.is_some() {
-        let mut item_keys: Vec<_> = items.iter()
-                                         .map(|i| i.to_string(ccx))
-                                         .collect();
-        item_keys.sort();
-
-        for item in item_keys {
-            println!("TRANS_ITEM {}", item);
-        }
-
-        let mut ccx_map = ccx.translation_items().borrow_mut();
-
-        for cgi in items {
-            ccx_map.insert(cgi, TransItemState::PredictedButNotGenerated);
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/basic_block.rs b/src/librustc_trans/trans/basic_block.rs
deleted file mode 100644 (file)
index d3d055c..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm;
-use llvm::BasicBlockRef;
-use trans::value::{Users, Value};
-use std::iter::{Filter, Map};
-
-#[derive(Copy, Clone)]
-pub struct BasicBlock(pub BasicBlockRef);
-
-pub type Preds = Map<Filter<Users, fn(&Value) -> bool>, fn(Value) -> BasicBlock>;
-
-/// Wrapper for LLVM BasicBlockRef
-impl BasicBlock {
-    pub fn get(&self) -> BasicBlockRef {
-        let BasicBlock(v) = *self; v
-    }
-
-    pub fn as_value(self) -> Value {
-        unsafe {
-            Value(llvm::LLVMBasicBlockAsValue(self.get()))
-        }
-    }
-
-    pub fn pred_iter(self) -> Preds {
-        fn is_a_terminator_inst(user: &Value) -> bool { user.is_a_terminator_inst() }
-        let is_a_terminator_inst: fn(&Value) -> bool = is_a_terminator_inst;
-
-        fn get_parent(user: Value) -> BasicBlock { user.get_parent().unwrap() }
-        let get_parent: fn(Value) -> BasicBlock = get_parent;
-
-        self.as_value().user_iter()
-            .filter(is_a_terminator_inst)
-            .map(get_parent)
-    }
-
-    pub fn get_single_predecessor(self) -> Option<BasicBlock> {
-        let mut iter = self.pred_iter();
-        match (iter.next(), iter.next()) {
-            (Some(first), None) => Some(first),
-            _ => None
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/build.rs b/src/librustc_trans/trans/build.rs
deleted file mode 100644 (file)
index 2b4fcf4..0000000
+++ /dev/null
@@ -1,1121 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(dead_code)] // FFI wrappers
-#![allow(non_snake_case)]
-
-use llvm;
-use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect, AttrBuilder};
-use llvm::{Opcode, IntPredicate, RealPredicate};
-use llvm::{ValueRef, BasicBlockRef};
-use trans::common::*;
-use syntax::codemap::Span;
-
-use trans::builder::Builder;
-use trans::type_::Type;
-use trans::debuginfo::DebugLoc;
-
-use libc::{c_uint, c_char};
-
-pub fn terminate(cx: Block, _: &str) {
-    debug!("terminate({})", cx.to_str());
-    cx.terminated.set(true);
-}
-
-pub fn check_not_terminated(cx: Block) {
-    if cx.terminated.get() {
-        panic!("already terminated!");
-    }
-}
-
-pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> {
-    let b = cx.fcx.ccx.builder();
-    b.position_at_end(cx.llbb);
-    b
-}
-
-// The difference between a block being unreachable and being terminated is
-// somewhat obscure, and has to do with error checking. When a block is
-// terminated, we're saying that trying to add any further statements in the
-// block is an error. On the other hand, if something is unreachable, that
-// means that the block was terminated in some way that we don't want to check
-// for (panic/break/return statements, call to diverging functions, etc), and
-// further instructions to the block should simply be ignored.
-
-pub fn RetVoid(cx: Block, debug_loc: DebugLoc) {
-    if cx.unreachable.get() {
-        return;
-    }
-    check_not_terminated(cx);
-    terminate(cx, "RetVoid");
-    debug_loc.apply(cx.fcx);
-    B(cx).ret_void();
-}
-
-pub fn Ret(cx: Block, v: ValueRef, debug_loc: DebugLoc) {
-    if cx.unreachable.get() {
-        return;
-    }
-    check_not_terminated(cx);
-    terminate(cx, "Ret");
-    debug_loc.apply(cx.fcx);
-    B(cx).ret(v);
-}
-
-pub fn AggregateRet(cx: Block,
-                    ret_vals: &[ValueRef],
-                    debug_loc: DebugLoc) {
-    if cx.unreachable.get() {
-        return;
-    }
-    check_not_terminated(cx);
-    terminate(cx, "AggregateRet");
-    debug_loc.apply(cx.fcx);
-    B(cx).aggregate_ret(ret_vals);
-}
-
-pub fn Br(cx: Block, dest: BasicBlockRef, debug_loc: DebugLoc) {
-    if cx.unreachable.get() {
-        return;
-    }
-    check_not_terminated(cx);
-    terminate(cx, "Br");
-    debug_loc.apply(cx.fcx);
-    B(cx).br(dest);
-}
-
-pub fn CondBr(cx: Block,
-              if_: ValueRef,
-              then: BasicBlockRef,
-              else_: BasicBlockRef,
-              debug_loc: DebugLoc) {
-    if cx.unreachable.get() {
-        return;
-    }
-    check_not_terminated(cx);
-    terminate(cx, "CondBr");
-    debug_loc.apply(cx.fcx);
-    B(cx).cond_br(if_, then, else_);
-}
-
-pub fn Switch(cx: Block, v: ValueRef, else_: BasicBlockRef, num_cases: usize)
-    -> ValueRef {
-    if cx.unreachable.get() { return _Undef(v); }
-    check_not_terminated(cx);
-    terminate(cx, "Switch");
-    B(cx).switch(v, else_, num_cases)
-}
-
-pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) {
-    unsafe {
-        if llvm::LLVMIsUndef(s) == llvm::True { return; }
-        llvm::LLVMAddCase(s, on_val, dest);
-    }
-}
-
-pub fn IndirectBr(cx: Block,
-                  addr: ValueRef,
-                  num_dests: usize,
-                  debug_loc: DebugLoc) {
-    if cx.unreachable.get() {
-        return;
-    }
-    check_not_terminated(cx);
-    terminate(cx, "IndirectBr");
-    debug_loc.apply(cx.fcx);
-    B(cx).indirect_br(addr, num_dests);
-}
-
-pub fn Invoke(cx: Block,
-              fn_: ValueRef,
-              args: &[ValueRef],
-              then: BasicBlockRef,
-              catch: BasicBlockRef,
-              attributes: Option<AttrBuilder>,
-              debug_loc: DebugLoc)
-              -> ValueRef {
-    if cx.unreachable.get() {
-        return C_null(Type::i8(cx.ccx()));
-    }
-    check_not_terminated(cx);
-    terminate(cx, "Invoke");
-    debug!("Invoke({} with arguments ({}))",
-           cx.val_to_string(fn_),
-           args.iter().map(|a| cx.val_to_string(*a)).collect::<Vec<String>>().join(", "));
-    debug_loc.apply(cx.fcx);
-    let bundle = cx.lpad().and_then(|b| b.bundle());
-    B(cx).invoke(fn_, args, then, catch, bundle, attributes)
-}
-
-pub fn Unreachable(cx: Block) {
-    if cx.unreachable.get() {
-        return
-    }
-    cx.unreachable.set(true);
-    if !cx.terminated.get() {
-        B(cx).unreachable();
-    }
-}
-
-pub fn _Undef(val: ValueRef) -> ValueRef {
-    unsafe {
-        return llvm::LLVMGetUndef(val_ty(val).to_ref());
-    }
-}
-
-/* Arithmetic */
-pub fn Add(cx: Block,
-           lhs: ValueRef,
-           rhs: ValueRef,
-           debug_loc: DebugLoc)
-           -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).add(lhs, rhs)
-}
-
-pub fn NSWAdd(cx: Block,
-              lhs: ValueRef,
-              rhs: ValueRef,
-              debug_loc: DebugLoc)
-              -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).nswadd(lhs, rhs)
-}
-
-pub fn NUWAdd(cx: Block,
-              lhs: ValueRef,
-              rhs: ValueRef,
-              debug_loc: DebugLoc)
-              -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).nuwadd(lhs, rhs)
-}
-
-pub fn FAdd(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).fadd(lhs, rhs)
-}
-
-pub fn Sub(cx: Block,
-           lhs: ValueRef,
-           rhs: ValueRef,
-           debug_loc: DebugLoc)
-           -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).sub(lhs, rhs)
-}
-
-pub fn NSWSub(cx: Block,
-              lhs: ValueRef,
-              rhs: ValueRef,
-              debug_loc: DebugLoc)
-              -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).nswsub(lhs, rhs)
-}
-
-pub fn NUWSub(cx: Block,
-              lhs: ValueRef,
-              rhs: ValueRef,
-              debug_loc: DebugLoc)
-              -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).nuwsub(lhs, rhs)
-}
-
-pub fn FSub(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).fsub(lhs, rhs)
-}
-
-pub fn Mul(cx: Block,
-           lhs: ValueRef,
-           rhs: ValueRef,
-           debug_loc: DebugLoc)
-           -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).mul(lhs, rhs)
-}
-
-pub fn NSWMul(cx: Block,
-              lhs: ValueRef,
-              rhs: ValueRef,
-              debug_loc: DebugLoc)
-              -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).nswmul(lhs, rhs)
-}
-
-pub fn NUWMul(cx: Block,
-              lhs: ValueRef,
-              rhs: ValueRef,
-              debug_loc: DebugLoc)
-              -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).nuwmul(lhs, rhs)
-}
-
-pub fn FMul(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).fmul(lhs, rhs)
-}
-
-pub fn UDiv(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).udiv(lhs, rhs)
-}
-
-pub fn SDiv(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).sdiv(lhs, rhs)
-}
-
-pub fn ExactSDiv(cx: Block,
-                 lhs: ValueRef,
-                 rhs: ValueRef,
-                 debug_loc: DebugLoc)
-                 -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).exactsdiv(lhs, rhs)
-}
-
-pub fn FDiv(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).fdiv(lhs, rhs)
-}
-
-pub fn URem(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).urem(lhs, rhs)
-}
-
-pub fn SRem(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).srem(lhs, rhs)
-}
-
-pub fn FRem(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).frem(lhs, rhs)
-}
-
-pub fn Shl(cx: Block,
-           lhs: ValueRef,
-           rhs: ValueRef,
-           debug_loc: DebugLoc)
-           -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).shl(lhs, rhs)
-}
-
-pub fn LShr(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).lshr(lhs, rhs)
-}
-
-pub fn AShr(cx: Block,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).ashr(lhs, rhs)
-}
-
-pub fn And(cx: Block,
-           lhs: ValueRef,
-           rhs: ValueRef,
-           debug_loc: DebugLoc)
-           -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).and(lhs, rhs)
-}
-
-pub fn Or(cx: Block,
-          lhs: ValueRef,
-          rhs: ValueRef,
-          debug_loc: DebugLoc)
-          -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).or(lhs, rhs)
-}
-
-pub fn Xor(cx: Block,
-           lhs: ValueRef,
-           rhs: ValueRef,
-           debug_loc: DebugLoc)
-           -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).xor(lhs, rhs)
-}
-
-pub fn BinOp(cx: Block,
-             op: Opcode,
-             lhs: ValueRef,
-             rhs: ValueRef,
-             debug_loc: DebugLoc)
-          -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(lhs);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).binop(op, lhs, rhs)
-}
-
-pub fn Neg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(v);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).neg(v)
-}
-
-pub fn NSWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(v);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).nswneg(v)
-}
-
-pub fn NUWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(v);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).nuwneg(v)
-}
-pub fn FNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(v);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).fneg(v)
-}
-
-pub fn Not(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
-    if cx.unreachable.get() {
-        return _Undef(v);
-    }
-    debug_loc.apply(cx.fcx);
-    B(cx).not(v)
-}
-
-pub fn Alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); }
-        AllocaFcx(cx.fcx, ty, name)
-    }
-}
-
-pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef {
-    let b = fcx.ccx.builder();
-    b.position_before(fcx.alloca_insert_pt.get().unwrap());
-    DebugLoc::None.apply(fcx);
-    b.alloca(ty, name)
-}
-
-pub fn Free(cx: Block, pointer_val: ValueRef) {
-    if cx.unreachable.get() { return; }
-    B(cx).free(pointer_val)
-}
-
-pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef {
-    unsafe {
-        let ccx = cx.fcx.ccx;
-        if cx.unreachable.get() {
-            let ty = val_ty(pointer_val);
-            let eltty = if ty.kind() == llvm::Array {
-                ty.element_type()
-            } else {
-                ccx.int_type()
-            };
-            return llvm::LLVMGetUndef(eltty.to_ref());
-        }
-        B(cx).load(pointer_val)
-    }
-}
-
-pub fn VolatileLoad(cx: Block, pointer_val: ValueRef) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
-        }
-        B(cx).volatile_load(pointer_val)
-    }
-}
-
-pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef {
-    unsafe {
-        let ccx = cx.fcx.ccx;
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(ccx.int_type().to_ref());
-        }
-        B(cx).atomic_load(pointer_val, order)
-    }
-}
-
-
-pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: u64,
-                       hi: u64, signed: llvm::Bool) -> ValueRef {
-    if cx.unreachable.get() {
-        let ccx = cx.fcx.ccx;
-        let ty = val_ty(pointer_val);
-        let eltty = if ty.kind() == llvm::Array {
-            ty.element_type()
-        } else {
-            ccx.int_type()
-        };
-        unsafe {
-            llvm::LLVMGetUndef(eltty.to_ref())
-        }
-    } else {
-        B(cx).load_range_assert(pointer_val, lo, hi, signed)
-    }
-}
-
-pub fn LoadNonNull(cx: Block, ptr: ValueRef) -> ValueRef {
-    if cx.unreachable.get() {
-        let ccx = cx.fcx.ccx;
-        let ty = val_ty(ptr);
-        let eltty = if ty.kind() == llvm::Array {
-            ty.element_type()
-        } else {
-            ccx.int_type()
-        };
-        unsafe {
-            llvm::LLVMGetUndef(eltty.to_ref())
-        }
-    } else {
-        B(cx).load_nonnull(ptr)
-    }
-}
-
-pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef {
-    if cx.unreachable.get() { return C_nil(cx.ccx()); }
-    B(cx).store(val, ptr)
-}
-
-pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef {
-    if cx.unreachable.get() { return C_nil(cx.ccx()); }
-    B(cx).volatile_store(val, ptr)
-}
-
-pub fn AtomicStore(cx: Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
-    if cx.unreachable.get() { return; }
-    B(cx).atomic_store(val, ptr, order)
-}
-
-pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
-        }
-        B(cx).gep(pointer, indices)
-    }
-}
-
-// Simple wrapper around GEP that takes an array of ints and wraps them
-// in C_i32()
-#[inline]
-pub fn GEPi(cx: Block, base: ValueRef, ixs: &[usize]) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
-        }
-        B(cx).gepi(base, ixs)
-    }
-}
-
-pub fn InBoundsGEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
-        }
-        B(cx).inbounds_gep(pointer, indices)
-    }
-}
-
-pub fn StructGEP(cx: Block, pointer: ValueRef, idx: usize) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
-        }
-        B(cx).struct_gep(pointer, idx)
-    }
-}
-
-pub fn GlobalString(cx: Block, _str: *const c_char) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
-        }
-        B(cx).global_string(_str)
-    }
-}
-
-pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
-        }
-        B(cx).global_string_ptr(_str)
-    }
-}
-
-/* Casts */
-pub fn Trunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).trunc(val, dest_ty)
-    }
-}
-
-pub fn ZExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).zext(val, dest_ty)
-    }
-}
-
-pub fn SExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).sext(val, dest_ty)
-    }
-}
-
-pub fn FPToUI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).fptoui(val, dest_ty)
-    }
-}
-
-pub fn FPToSI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).fptosi(val, dest_ty)
-    }
-}
-
-pub fn UIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).uitofp(val, dest_ty)
-    }
-}
-
-pub fn SIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).sitofp(val, dest_ty)
-    }
-}
-
-pub fn FPTrunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).fptrunc(val, dest_ty)
-    }
-}
-
-pub fn FPExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).fpext(val, dest_ty)
-    }
-}
-
-pub fn PtrToInt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).ptrtoint(val, dest_ty)
-    }
-}
-
-pub fn IntToPtr(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).inttoptr(val, dest_ty)
-    }
-}
-
-pub fn BitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).bitcast(val, dest_ty)
-    }
-}
-
-pub fn ZExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).zext_or_bitcast(val, dest_ty)
-    }
-}
-
-pub fn SExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).sext_or_bitcast(val, dest_ty)
-    }
-}
-
-pub fn TruncOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).trunc_or_bitcast(val, dest_ty)
-    }
-}
-
-pub fn Cast(cx: Block, op: Opcode, val: ValueRef, dest_ty: Type,
-            _: *const u8)
-     -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).cast(op, val, dest_ty)
-    }
-}
-
-pub fn PointerCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).pointercast(val, dest_ty)
-    }
-}
-
-pub fn IntCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).intcast(val, dest_ty)
-    }
-}
-
-pub fn FPCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
-        B(cx).fpcast(val, dest_ty)
-    }
-}
-
-
-/* Comparisons */
-pub fn ICmp(cx: Block,
-            op: IntPredicate,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
-        }
-        debug_loc.apply(cx.fcx);
-        B(cx).icmp(op, lhs, rhs)
-    }
-}
-
-pub fn FCmp(cx: Block,
-            op: RealPredicate,
-            lhs: ValueRef,
-            rhs: ValueRef,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
-        }
-        debug_loc.apply(cx.fcx);
-        B(cx).fcmp(op, lhs, rhs)
-    }
-}
-
-/* Miscellaneous instructions */
-pub fn EmptyPhi(cx: Block, ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
-        B(cx).empty_phi(ty)
-    }
-}
-
-pub fn Phi(cx: Block, ty: Type, vals: &[ValueRef],
-           bbs: &[BasicBlockRef]) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
-        B(cx).phi(ty, vals, bbs)
-    }
-}
-
-pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
-    unsafe {
-        if llvm::LLVMIsUndef(phi) == llvm::True { return; }
-        llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
-    }
-}
-
-pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef {
-    unsafe {
-        let ccx = cx.fcx.ccx;
-        let ty = val_ty(fn_);
-        let retty = if ty.kind() == llvm::Function {
-            ty.return_type()
-        } else {
-            ccx.int_type()
-        };
-        B(cx).count_insn("ret_undef");
-        llvm::LLVMGetUndef(retty.to_ref())
-    }
-}
-
-pub fn add_span_comment(cx: Block, sp: Span, text: &str) {
-    B(cx).add_span_comment(sp, text)
-}
-
-pub fn add_comment(cx: Block, text: &str) {
-    B(cx).add_comment(text)
-}
-
-pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char,
-                     inputs: &[ValueRef], output: Type,
-                     volatile: bool, alignstack: bool,
-                     dia: AsmDialect) -> ValueRef {
-    B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia)
-}
-
-pub fn Call(cx: Block,
-            fn_: ValueRef,
-            args: &[ValueRef],
-            attributes: Option<AttrBuilder>,
-            debug_loc: DebugLoc)
-            -> ValueRef {
-    if cx.unreachable.get() {
-        return _UndefReturn(cx, fn_);
-    }
-    debug_loc.apply(cx.fcx);
-    let bundle = cx.lpad.get().and_then(|b| b.bundle());
-    B(cx).call(fn_, args, bundle, attributes)
-}
-
-pub fn CallWithConv(cx: Block,
-                    fn_: ValueRef,
-                    args: &[ValueRef],
-                    conv: CallConv,
-                    attributes: Option<AttrBuilder>,
-                    debug_loc: DebugLoc)
-                    -> ValueRef {
-    if cx.unreachable.get() {
-        return _UndefReturn(cx, fn_);
-    }
-    debug_loc.apply(cx.fcx);
-    let bundle = cx.lpad.get().and_then(|b| b.bundle());
-    B(cx).call_with_conv(fn_, args, conv, bundle, attributes)
-}
-
-pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) {
-    if cx.unreachable.get() { return; }
-    B(cx).atomic_fence(order, scope)
-}
-
-pub fn Select(cx: Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef {
-    if cx.unreachable.get() { return _Undef(then); }
-    B(cx).select(if_, then, else_)
-}
-
-pub fn VAArg(cx: Block, list: ValueRef, ty: Type) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
-        B(cx).va_arg(list, ty)
-    }
-}
-
-pub fn ExtractElement(cx: Block, vec_val: ValueRef, index: ValueRef) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
-        }
-        B(cx).extract_element(vec_val, index)
-    }
-}
-
-pub fn InsertElement(cx: Block, vec_val: ValueRef, elt_val: ValueRef,
-                     index: ValueRef) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
-        }
-        B(cx).insert_element(vec_val, elt_val, index)
-    }
-}
-
-pub fn ShuffleVector(cx: Block, v1: ValueRef, v2: ValueRef,
-                     mask: ValueRef) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
-        }
-        B(cx).shuffle_vector(v1, v2, mask)
-    }
-}
-
-pub fn VectorSplat(cx: Block, num_elts: usize, elt_val: ValueRef) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
-        }
-        B(cx).vector_splat(num_elts, elt_val)
-    }
-}
-
-pub fn ExtractValue(cx: Block, agg_val: ValueRef, index: usize) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
-        }
-        B(cx).extract_value(agg_val, index)
-    }
-}
-
-pub fn InsertValue(cx: Block, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
-        }
-        B(cx).insert_value(agg_val, elt_val, index)
-    }
-}
-
-pub fn IsNull(cx: Block, val: ValueRef) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
-        }
-        B(cx).is_null(val)
-    }
-}
-
-pub fn IsNotNull(cx: Block, val: ValueRef) -> ValueRef {
-    unsafe {
-        if cx.unreachable.get() {
-            return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
-        }
-        B(cx).is_not_null(val)
-    }
-}
-
-pub fn PtrDiff(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-    unsafe {
-        let ccx = cx.fcx.ccx;
-        if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); }
-        B(cx).ptrdiff(lhs, rhs)
-    }
-}
-
-pub fn Trap(cx: Block) {
-    if cx.unreachable.get() { return; }
-    B(cx).trap();
-}
-
-pub fn LandingPad(cx: Block, ty: Type, pers_fn: ValueRef,
-                  num_clauses: usize) -> ValueRef {
-    check_not_terminated(cx);
-    assert!(!cx.unreachable.get());
-    B(cx).landing_pad(ty, pers_fn, num_clauses, cx.fcx.llfn)
-}
-
-pub fn AddClause(cx: Block, landing_pad: ValueRef, clause: ValueRef) {
-    B(cx).add_clause(landing_pad, clause)
-}
-
-pub fn SetCleanup(cx: Block, landing_pad: ValueRef) {
-    B(cx).set_cleanup(landing_pad)
-}
-
-pub fn SetPersonalityFn(cx: Block, f: ValueRef) {
-    B(cx).set_personality_fn(f)
-}
-
-pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef {
-    check_not_terminated(cx);
-    terminate(cx, "Resume");
-    B(cx).resume(exn)
-}
-
-// Atomic Operations
-pub fn AtomicCmpXchg(cx: Block, dst: ValueRef,
-                     cmp: ValueRef, src: ValueRef,
-                     order: AtomicOrdering,
-                     failure_order: AtomicOrdering,
-                     weak: llvm::Bool) -> ValueRef {
-    B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order, weak)
-}
-pub fn AtomicRMW(cx: Block, op: AtomicBinOp,
-                 dst: ValueRef, src: ValueRef,
-                 order: AtomicOrdering) -> ValueRef {
-    B(cx).atomic_rmw(op, dst, src, order)
-}
-
-pub fn CleanupPad(cx: Block,
-                  parent: Option<ValueRef>,
-                  args: &[ValueRef]) -> ValueRef {
-    check_not_terminated(cx);
-    assert!(!cx.unreachable.get());
-    B(cx).cleanup_pad(parent, args)
-}
-
-pub fn CleanupRet(cx: Block,
-                  cleanup: ValueRef,
-                  unwind: Option<BasicBlockRef>) -> ValueRef {
-    check_not_terminated(cx);
-    terminate(cx, "CleanupRet");
-    B(cx).cleanup_ret(cleanup, unwind)
-}
-
-pub fn CatchPad(cx: Block,
-                parent: ValueRef,
-                args: &[ValueRef]) -> ValueRef {
-    check_not_terminated(cx);
-    assert!(!cx.unreachable.get());
-    B(cx).catch_pad(parent, args)
-}
-
-pub fn CatchRet(cx: Block, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef {
-    check_not_terminated(cx);
-    terminate(cx, "CatchRet");
-    B(cx).catch_ret(pad, unwind)
-}
-
-pub fn CatchSwitch(cx: Block,
-                   parent: Option<ValueRef>,
-                   unwind: Option<BasicBlockRef>,
-                   num_handlers: usize) -> ValueRef {
-    check_not_terminated(cx);
-    terminate(cx, "CatchSwitch");
-    B(cx).catch_switch(parent, unwind, num_handlers)
-}
-
-pub fn AddHandler(cx: Block, catch_switch: ValueRef, handler: BasicBlockRef) {
-    B(cx).add_handler(catch_switch, handler)
-}
diff --git a/src/librustc_trans/trans/builder.rs b/src/librustc_trans/trans/builder.rs
deleted file mode 100644 (file)
index 434fca4..0000000
+++ /dev/null
@@ -1,1100 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(dead_code)] // FFI wrappers
-
-use llvm;
-use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect, AttrBuilder};
-use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef};
-use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
-use trans::base;
-use trans::common::*;
-use trans::machine::llalign_of_pref;
-use trans::type_::Type;
-use util::nodemap::FnvHashMap;
-use libc::{c_uint, c_char};
-
-use std::ffi::CString;
-use std::ptr;
-use syntax::codemap::Span;
-
-pub struct Builder<'a, 'tcx: 'a> {
-    pub llbuilder: BuilderRef,
-    pub ccx: &'a CrateContext<'a, 'tcx>,
-}
-
-// This is a really awful way to get a zero-length c-string, but better (and a
-// lot more efficient) than doing str::as_c_str("", ...) every time.
-pub fn noname() -> *const c_char {
-    static CNULL: c_char = 0;
-    &CNULL
-}
-
-impl<'a, 'tcx> Builder<'a, 'tcx> {
-    pub fn new(ccx: &'a CrateContext<'a, 'tcx>) -> Builder<'a, 'tcx> {
-        Builder {
-            llbuilder: ccx.raw_builder(),
-            ccx: ccx,
-        }
-    }
-
-    pub fn count_insn(&self, category: &str) {
-        if self.ccx.sess().trans_stats() {
-            self.ccx.stats().n_llvm_insns.set(self.ccx
-                                                .stats()
-                                                .n_llvm_insns
-                                                .get() + 1);
-        }
-        self.ccx.count_llvm_insn();
-        if self.ccx.sess().count_llvm_insns() {
-            base::with_insn_ctxt(|v| {
-                let mut h = self.ccx.stats().llvm_insns.borrow_mut();
-
-                // Build version of path with cycles removed.
-
-                // Pass 1: scan table mapping str -> rightmost pos.
-                let mut mm = FnvHashMap();
-                let len = v.len();
-                let mut i = 0;
-                while i < len {
-                    mm.insert(v[i], i);
-                    i += 1;
-                }
-
-                // Pass 2: concat strings for each elt, skipping
-                // forwards over any cycles by advancing to rightmost
-                // occurrence of each element in path.
-                let mut s = String::from(".");
-                i = 0;
-                while i < len {
-                    i = mm[v[i]];
-                    s.push('/');
-                    s.push_str(v[i]);
-                    i += 1;
-                }
-
-                s.push('/');
-                s.push_str(category);
-
-                let n = match h.get(&s) {
-                    Some(&n) => n,
-                    _ => 0
-                };
-                h.insert(s, n+1);
-            })
-        }
-    }
-
-    pub fn position_before(&self, insn: ValueRef) {
-        unsafe {
-            llvm::LLVMPositionBuilderBefore(self.llbuilder, insn);
-        }
-    }
-
-    pub fn position_at_end(&self, llbb: BasicBlockRef) {
-        unsafe {
-            llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
-        }
-    }
-
-    pub fn ret_void(&self) {
-        self.count_insn("retvoid");
-        unsafe {
-            llvm::LLVMBuildRetVoid(self.llbuilder);
-        }
-    }
-
-    pub fn ret(&self, v: ValueRef) {
-        self.count_insn("ret");
-        unsafe {
-            llvm::LLVMBuildRet(self.llbuilder, v);
-        }
-    }
-
-    pub fn aggregate_ret(&self, ret_vals: &[ValueRef]) {
-        unsafe {
-            llvm::LLVMBuildAggregateRet(self.llbuilder,
-                                        ret_vals.as_ptr(),
-                                        ret_vals.len() as c_uint);
-        }
-    }
-
-    pub fn br(&self, dest: BasicBlockRef) {
-        self.count_insn("br");
-        unsafe {
-            llvm::LLVMBuildBr(self.llbuilder, dest);
-        }
-    }
-
-    pub fn cond_br(&self, cond: ValueRef, then_llbb: BasicBlockRef, else_llbb: BasicBlockRef) {
-        self.count_insn("condbr");
-        unsafe {
-            llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
-        }
-    }
-
-    pub fn switch(&self, v: ValueRef, else_llbb: BasicBlockRef, num_cases: usize) -> ValueRef {
-        unsafe {
-            llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
-        }
-    }
-
-    pub fn indirect_br(&self, addr: ValueRef, num_dests: usize) {
-        self.count_insn("indirectbr");
-        unsafe {
-            llvm::LLVMBuildIndirectBr(self.llbuilder, addr, num_dests as c_uint);
-        }
-    }
-
-    pub fn invoke(&self,
-                  llfn: ValueRef,
-                  args: &[ValueRef],
-                  then: BasicBlockRef,
-                  catch: BasicBlockRef,
-                  bundle: Option<&OperandBundleDef>,
-                  attributes: Option<AttrBuilder>)
-                  -> ValueRef {
-        self.count_insn("invoke");
-
-        debug!("Invoke {} with args ({})",
-               self.ccx.tn().val_to_string(llfn),
-               args.iter()
-                   .map(|&v| self.ccx.tn().val_to_string(v))
-                   .collect::<Vec<String>>()
-                   .join(", "));
-
-        let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
-
-        unsafe {
-            let v = llvm::LLVMRustBuildInvoke(self.llbuilder,
-                                              llfn,
-                                              args.as_ptr(),
-                                              args.len() as c_uint,
-                                              then,
-                                              catch,
-                                              bundle,
-                                              noname());
-            if let Some(a) = attributes {
-                a.apply_callsite(v);
-            }
-            v
-        }
-    }
-
-    pub fn unreachable(&self) {
-        self.count_insn("unreachable");
-        unsafe {
-            llvm::LLVMBuildUnreachable(self.llbuilder);
-        }
-    }
-
-    /* Arithmetic */
-    pub fn add(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("add");
-        unsafe {
-            llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn nswadd(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("nswadd");
-        unsafe {
-            llvm::LLVMBuildNSWAdd(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn nuwadd(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("nuwadd");
-        unsafe {
-            llvm::LLVMBuildNUWAdd(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn fadd(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("fadd");
-        unsafe {
-            llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn sub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("sub");
-        unsafe {
-            llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn nswsub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("nwsub");
-        unsafe {
-            llvm::LLVMBuildNSWSub(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn nuwsub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("nuwsub");
-        unsafe {
-            llvm::LLVMBuildNUWSub(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn fsub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("sub");
-        unsafe {
-            llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn mul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("mul");
-        unsafe {
-            llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn nswmul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("nswmul");
-        unsafe {
-            llvm::LLVMBuildNSWMul(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn nuwmul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("nuwmul");
-        unsafe {
-            llvm::LLVMBuildNUWMul(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn fmul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("fmul");
-        unsafe {
-            llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn udiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("udiv");
-        unsafe {
-            llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn sdiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("sdiv");
-        unsafe {
-            llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn exactsdiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("exactsdiv");
-        unsafe {
-            llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn fdiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("fdiv");
-        unsafe {
-            llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn urem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("urem");
-        unsafe {
-            llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn srem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("srem");
-        unsafe {
-            llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn frem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("frem");
-        unsafe {
-            llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn shl(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("shl");
-        unsafe {
-            llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn lshr(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("lshr");
-        unsafe {
-            llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn ashr(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("ashr");
-        unsafe {
-            llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn and(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("and");
-        unsafe {
-            llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn or(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("or");
-        unsafe {
-            llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn xor(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("xor");
-        unsafe {
-            llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn binop(&self, op: Opcode, lhs: ValueRef, rhs: ValueRef)
-              -> ValueRef {
-        self.count_insn("binop");
-        unsafe {
-            llvm::LLVMBuildBinOp(self.llbuilder, op, lhs, rhs, noname())
-        }
-    }
-
-    pub fn neg(&self, v: ValueRef) -> ValueRef {
-        self.count_insn("neg");
-        unsafe {
-            llvm::LLVMBuildNeg(self.llbuilder, v, noname())
-        }
-    }
-
-    pub fn nswneg(&self, v: ValueRef) -> ValueRef {
-        self.count_insn("nswneg");
-        unsafe {
-            llvm::LLVMBuildNSWNeg(self.llbuilder, v, noname())
-        }
-    }
-
-    pub fn nuwneg(&self, v: ValueRef) -> ValueRef {
-        self.count_insn("nuwneg");
-        unsafe {
-            llvm::LLVMBuildNUWNeg(self.llbuilder, v, noname())
-        }
-    }
-    pub fn fneg(&self, v: ValueRef) -> ValueRef {
-        self.count_insn("fneg");
-        unsafe {
-            llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
-        }
-    }
-
-    pub fn not(&self, v: ValueRef) -> ValueRef {
-        self.count_insn("not");
-        unsafe {
-            llvm::LLVMBuildNot(self.llbuilder, v, noname())
-        }
-    }
-
-    pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
-        self.count_insn("alloca");
-        unsafe {
-            if name.is_empty() {
-                llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(), noname())
-            } else {
-                let name = CString::new(name).unwrap();
-                llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(),
-                                      name.as_ptr())
-            }
-        }
-    }
-
-    pub fn free(&self, ptr: ValueRef) {
-        self.count_insn("free");
-        unsafe {
-            llvm::LLVMBuildFree(self.llbuilder, ptr);
-        }
-    }
-
-    pub fn load(&self, ptr: ValueRef) -> ValueRef {
-        self.count_insn("load");
-        unsafe {
-            llvm::LLVMBuildLoad(self.llbuilder, ptr, noname())
-        }
-    }
-
-    pub fn volatile_load(&self, ptr: ValueRef) -> ValueRef {
-        self.count_insn("load.volatile");
-        unsafe {
-            let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
-            llvm::LLVMSetVolatile(insn, llvm::True);
-            insn
-        }
-    }
-
-    pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef {
-        self.count_insn("load.atomic");
-        unsafe {
-            let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
-            let align = llalign_of_pref(self.ccx, ty.element_type());
-            llvm::LLVMBuildAtomicLoad(self.llbuilder, ptr, noname(), order,
-                                      align as c_uint)
-        }
-    }
-
-
-    pub fn load_range_assert(&self, ptr: ValueRef, lo: u64,
-                             hi: u64, signed: llvm::Bool) -> ValueRef {
-        let value = self.load(ptr);
-
-        unsafe {
-            let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(ptr));
-            let min = llvm::LLVMConstInt(t, lo, signed);
-            let max = llvm::LLVMConstInt(t, hi, signed);
-
-            let v = [min, max];
-
-            llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
-                                  llvm::LLVMMDNodeInContext(self.ccx.llcx(),
-                                                            v.as_ptr(),
-                                                            v.len() as c_uint));
-        }
-
-        value
-    }
-
-    pub fn load_nonnull(&self, ptr: ValueRef) -> ValueRef {
-        let value = self.load(ptr);
-        unsafe {
-            llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint,
-                                  llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0));
-        }
-
-        value
-    }
-
-    pub fn store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef {
-        debug!("Store {} -> {}",
-               self.ccx.tn().val_to_string(val),
-               self.ccx.tn().val_to_string(ptr));
-        assert!(!self.llbuilder.is_null());
-        self.count_insn("store");
-        unsafe {
-            llvm::LLVMBuildStore(self.llbuilder, val, ptr)
-        }
-    }
-
-    pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef {
-        debug!("Store {} -> {}",
-               self.ccx.tn().val_to_string(val),
-               self.ccx.tn().val_to_string(ptr));
-        assert!(!self.llbuilder.is_null());
-        self.count_insn("store.volatile");
-        unsafe {
-            let insn = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
-            llvm::LLVMSetVolatile(insn, llvm::True);
-            insn
-        }
-    }
-
-    pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
-        debug!("Store {} -> {}",
-               self.ccx.tn().val_to_string(val),
-               self.ccx.tn().val_to_string(ptr));
-        self.count_insn("store.atomic");
-        unsafe {
-            let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
-            let align = llalign_of_pref(self.ccx, ty.element_type());
-            llvm::LLVMBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint);
-        }
-    }
-
-    pub fn gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef {
-        self.count_insn("gep");
-        unsafe {
-            llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
-                               indices.len() as c_uint, noname())
-        }
-    }
-
-    // Simple wrapper around GEP that takes an array of ints and wraps them
-    // in C_i32()
-    #[inline]
-    pub fn gepi(&self, base: ValueRef, ixs: &[usize]) -> ValueRef {
-        // Small vector optimization. This should catch 100% of the cases that
-        // we care about.
-        if ixs.len() < 16 {
-            let mut small_vec = [ C_i32(self.ccx, 0); 16 ];
-            for (small_vec_e, &ix) in small_vec.iter_mut().zip(ixs) {
-                *small_vec_e = C_i32(self.ccx, ix as i32);
-            }
-            self.inbounds_gep(base, &small_vec[..ixs.len()])
-        } else {
-            let v = ixs.iter().map(|i| C_i32(self.ccx, *i as i32)).collect::<Vec<ValueRef>>();
-            self.count_insn("gepi");
-            self.inbounds_gep(base, &v[..])
-        }
-    }
-
-    pub fn inbounds_gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef {
-        self.count_insn("inboundsgep");
-        unsafe {
-            llvm::LLVMBuildInBoundsGEP(
-                self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
-        }
-    }
-
-    pub fn struct_gep(&self, ptr: ValueRef, idx: usize) -> ValueRef {
-        self.count_insn("structgep");
-        unsafe {
-            llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
-        }
-    }
-
-    pub fn global_string(&self, _str: *const c_char) -> ValueRef {
-        self.count_insn("globalstring");
-        unsafe {
-            llvm::LLVMBuildGlobalString(self.llbuilder, _str, noname())
-        }
-    }
-
-    pub fn global_string_ptr(&self, _str: *const c_char) -> ValueRef {
-        self.count_insn("globalstringptr");
-        unsafe {
-            llvm::LLVMBuildGlobalStringPtr(self.llbuilder, _str, noname())
-        }
-    }
-
-    /* Casts */
-    pub fn trunc(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("trunc");
-        unsafe {
-            llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn zext(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("zext");
-        unsafe {
-            llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn sext(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("sext");
-        unsafe {
-            llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn fptoui(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("fptoui");
-        unsafe {
-            llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn fptosi(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("fptosi");
-        unsafe {
-            llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty.to_ref(),noname())
-        }
-    }
-
-    pub fn uitofp(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("uitofp");
-        unsafe {
-            llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn sitofp(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("sitofp");
-        unsafe {
-            llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn fptrunc(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("fptrunc");
-        unsafe {
-            llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn fpext(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("fpext");
-        unsafe {
-            llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn ptrtoint(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("ptrtoint");
-        unsafe {
-            llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn inttoptr(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("inttoptr");
-        unsafe {
-            llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("bitcast");
-        unsafe {
-            llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn zext_or_bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("zextorbitcast");
-        unsafe {
-            llvm::LLVMBuildZExtOrBitCast(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn sext_or_bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("sextorbitcast");
-        unsafe {
-            llvm::LLVMBuildSExtOrBitCast(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn trunc_or_bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("truncorbitcast");
-        unsafe {
-            llvm::LLVMBuildTruncOrBitCast(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn cast(&self, op: Opcode, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("cast");
-        unsafe {
-            llvm::LLVMBuildCast(self.llbuilder, op, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn pointercast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("pointercast");
-        unsafe {
-            llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn intcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("intcast");
-        unsafe {
-            llvm::LLVMBuildIntCast(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-    pub fn fpcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef {
-        self.count_insn("fpcast");
-        unsafe {
-            llvm::LLVMBuildFPCast(self.llbuilder, val, dest_ty.to_ref(), noname())
-        }
-    }
-
-
-    /* Comparisons */
-    pub fn icmp(&self, op: IntPredicate, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("icmp");
-        unsafe {
-            llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
-        }
-    }
-
-    pub fn fcmp(&self, op: RealPredicate, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("fcmp");
-        unsafe {
-            llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
-        }
-    }
-
-    /* Miscellaneous instructions */
-    pub fn empty_phi(&self, ty: Type) -> ValueRef {
-        self.count_insn("emptyphi");
-        unsafe {
-            llvm::LLVMBuildPhi(self.llbuilder, ty.to_ref(), noname())
-        }
-    }
-
-    pub fn phi(&self, ty: Type, vals: &[ValueRef], bbs: &[BasicBlockRef]) -> ValueRef {
-        assert_eq!(vals.len(), bbs.len());
-        let phi = self.empty_phi(ty);
-        self.count_insn("addincoming");
-        unsafe {
-            llvm::LLVMAddIncoming(phi, vals.as_ptr(),
-                                  bbs.as_ptr(),
-                                  vals.len() as c_uint);
-            phi
-        }
-    }
-
-    pub fn add_span_comment(&self, sp: Span, text: &str) {
-        if self.ccx.sess().asm_comments() {
-            let s = format!("{} ({})",
-                            text,
-                            self.ccx.sess().codemap().span_to_string(sp));
-            debug!("{}", &s[..]);
-            self.add_comment(&s[..]);
-        }
-    }
-
-    pub fn add_comment(&self, text: &str) {
-        if self.ccx.sess().asm_comments() {
-            let sanitized = text.replace("$", "");
-            let comment_text = format!("{} {}", "#",
-                                       sanitized.replace("\n", "\n\t# "));
-            self.count_insn("inlineasm");
-            let comment_text = CString::new(comment_text).unwrap();
-            let asm = unsafe {
-                llvm::LLVMConstInlineAsm(Type::func(&[], &Type::void(self.ccx)).to_ref(),
-                                         comment_text.as_ptr(), noname(), False,
-                                         False)
-            };
-            self.call(asm, &[], None, None);
-        }
-    }
-
-    pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
-                         inputs: &[ValueRef], output: Type,
-                         volatile: bool, alignstack: bool,
-                         dia: AsmDialect) -> ValueRef {
-        self.count_insn("inlineasm");
-
-        let volatile = if volatile { llvm::True }
-                       else        { llvm::False };
-        let alignstack = if alignstack { llvm::True }
-                         else          { llvm::False };
-
-        let argtys = inputs.iter().map(|v| {
-            debug!("Asm Input Type: {}", self.ccx.tn().val_to_string(*v));
-            val_ty(*v)
-        }).collect::<Vec<_>>();
-
-        debug!("Asm Output Type: {}", self.ccx.tn().type_to_string(output));
-        let fty = Type::func(&argtys[..], &output);
-        unsafe {
-            let v = llvm::LLVMInlineAsm(
-                fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint);
-            self.call(v, inputs, None, None)
-        }
-    }
-
-    pub fn call(&self, llfn: ValueRef, args: &[ValueRef],
-                bundle: Option<&OperandBundleDef>,
-                attributes: Option<AttrBuilder>) -> ValueRef {
-        self.count_insn("call");
-
-        debug!("Call {} with args ({})",
-               self.ccx.tn().val_to_string(llfn),
-               args.iter()
-                   .map(|&v| self.ccx.tn().val_to_string(v))
-                   .collect::<Vec<String>>()
-                   .join(", "));
-
-        let mut fn_ty = val_ty(llfn);
-        // Strip off pointers
-        while fn_ty.kind() == llvm::TypeKind::Pointer {
-            fn_ty = fn_ty.element_type();
-        }
-
-        assert!(fn_ty.kind() == llvm::TypeKind::Function,
-                "builder::call not passed a function");
-
-        let param_tys = fn_ty.func_params();
-
-        let iter = param_tys.into_iter()
-            .zip(args.iter().map(|&v| val_ty(v)));
-        for (i, (expected_ty, actual_ty)) in iter.enumerate() {
-            if expected_ty != actual_ty {
-                self.ccx.sess().bug(
-                    &format!(
-                        "Type mismatch in function call of {}.  Expected {} for param {}, got {}",
-                        self.ccx.tn().val_to_string(llfn),
-                        self.ccx.tn().type_to_string(expected_ty),
-                        i,
-                        self.ccx.tn().type_to_string(actual_ty)));
-
-            }
-        }
-
-        let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
-
-        unsafe {
-            let v = llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(),
-                                            args.len() as c_uint, bundle,
-                                            noname());
-            if let Some(a) = attributes {
-                a.apply_callsite(v);
-            }
-            v
-        }
-    }
-
-    pub fn call_with_conv(&self, llfn: ValueRef, args: &[ValueRef],
-                          conv: CallConv,
-                          bundle: Option<&OperandBundleDef>,
-                          attributes: Option<AttrBuilder>) -> ValueRef {
-        self.count_insn("callwithconv");
-        let v = self.call(llfn, args, bundle, attributes);
-        llvm::SetInstructionCallConv(v, conv);
-        v
-    }
-
-    pub fn select(&self, cond: ValueRef, then_val: ValueRef, else_val: ValueRef) -> ValueRef {
-        self.count_insn("select");
-        unsafe {
-            llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
-        }
-    }
-
-    pub fn va_arg(&self, list: ValueRef, ty: Type) -> ValueRef {
-        self.count_insn("vaarg");
-        unsafe {
-            llvm::LLVMBuildVAArg(self.llbuilder, list, ty.to_ref(), noname())
-        }
-    }
-
-    pub fn extract_element(&self, vec: ValueRef, idx: ValueRef) -> ValueRef {
-        self.count_insn("extractelement");
-        unsafe {
-            llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
-        }
-    }
-
-    pub fn insert_element(&self, vec: ValueRef, elt: ValueRef, idx: ValueRef) -> ValueRef {
-        self.count_insn("insertelement");
-        unsafe {
-            llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
-        }
-    }
-
-    pub fn shuffle_vector(&self, v1: ValueRef, v2: ValueRef, mask: ValueRef) -> ValueRef {
-        self.count_insn("shufflevector");
-        unsafe {
-            llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
-        }
-    }
-
-    pub fn vector_splat(&self, num_elts: usize, elt: ValueRef) -> ValueRef {
-        unsafe {
-            let elt_ty = val_ty(elt);
-            let undef = llvm::LLVMGetUndef(Type::vector(&elt_ty, num_elts as u64).to_ref());
-            let vec = self.insert_element(undef, elt, C_i32(self.ccx, 0));
-            let vec_i32_ty = Type::vector(&Type::i32(self.ccx), num_elts as u64);
-            self.shuffle_vector(vec, undef, C_null(vec_i32_ty))
-        }
-    }
-
-    pub fn extract_value(&self, agg_val: ValueRef, idx: usize) -> ValueRef {
-        self.count_insn("extractvalue");
-        unsafe {
-            llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
-        }
-    }
-
-    pub fn insert_value(&self, agg_val: ValueRef, elt: ValueRef,
-                       idx: usize) -> ValueRef {
-        self.count_insn("insertvalue");
-        unsafe {
-            llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
-                                       noname())
-        }
-    }
-
-    pub fn is_null(&self, val: ValueRef) -> ValueRef {
-        self.count_insn("isnull");
-        unsafe {
-            llvm::LLVMBuildIsNull(self.llbuilder, val, noname())
-        }
-    }
-
-    pub fn is_not_null(&self, val: ValueRef) -> ValueRef {
-        self.count_insn("isnotnull");
-        unsafe {
-            llvm::LLVMBuildIsNotNull(self.llbuilder, val, noname())
-        }
-    }
-
-    pub fn ptrdiff(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
-        self.count_insn("ptrdiff");
-        unsafe {
-            llvm::LLVMBuildPtrDiff(self.llbuilder, lhs, rhs, noname())
-        }
-    }
-
-    pub fn trap(&self) {
-        unsafe {
-            let bb: BasicBlockRef = llvm::LLVMGetInsertBlock(self.llbuilder);
-            let fn_: ValueRef = llvm::LLVMGetBasicBlockParent(bb);
-            let m: ModuleRef = llvm::LLVMGetGlobalParent(fn_);
-            let p = "llvm.trap\0".as_ptr();
-            let t: ValueRef = llvm::LLVMGetNamedFunction(m, p as *const _);
-            assert!((t as isize != 0));
-            let args: &[ValueRef] = &[];
-            self.count_insn("trap");
-            llvm::LLVMRustBuildCall(self.llbuilder, t,
-                                    args.as_ptr(), args.len() as c_uint,
-                                    0 as *mut _,
-                                    noname());
-        }
-    }
-
-    pub fn landing_pad(&self, ty: Type, pers_fn: ValueRef,
-                       num_clauses: usize,
-                       llfn: ValueRef) -> ValueRef {
-        self.count_insn("landingpad");
-        unsafe {
-            llvm::LLVMRustBuildLandingPad(self.llbuilder, ty.to_ref(), pers_fn,
-                                          num_clauses as c_uint, noname(), llfn)
-        }
-    }
-
-    pub fn add_clause(&self, landing_pad: ValueRef, clause: ValueRef) {
-        unsafe {
-            llvm::LLVMAddClause(landing_pad, clause);
-        }
-    }
-
-    pub fn set_cleanup(&self, landing_pad: ValueRef) {
-        self.count_insn("setcleanup");
-        unsafe {
-            llvm::LLVMSetCleanup(landing_pad, llvm::True);
-        }
-    }
-
-    pub fn resume(&self, exn: ValueRef) -> ValueRef {
-        self.count_insn("resume");
-        unsafe {
-            llvm::LLVMBuildResume(self.llbuilder, exn)
-        }
-    }
-
-    pub fn cleanup_pad(&self,
-                       parent: Option<ValueRef>,
-                       args: &[ValueRef]) -> ValueRef {
-        self.count_insn("cleanuppad");
-        let parent = parent.unwrap_or(0 as *mut _);
-        let name = CString::new("cleanuppad").unwrap();
-        let ret = unsafe {
-            llvm::LLVMRustBuildCleanupPad(self.llbuilder,
-                                          parent,
-                                          args.len() as c_uint,
-                                          args.as_ptr(),
-                                          name.as_ptr())
-        };
-        assert!(!ret.is_null(), "LLVM does not have support for cleanuppad");
-        return ret
-    }
-
-    pub fn cleanup_ret(&self, cleanup: ValueRef,
-                       unwind: Option<BasicBlockRef>) -> ValueRef {
-        self.count_insn("cleanupret");
-        let unwind = unwind.unwrap_or(0 as *mut _);
-        let ret = unsafe {
-            llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind)
-        };
-        assert!(!ret.is_null(), "LLVM does not have support for cleanupret");
-        return ret
-    }
-
-    pub fn catch_pad(&self,
-                     parent: ValueRef,
-                     args: &[ValueRef]) -> ValueRef {
-        self.count_insn("catchpad");
-        let name = CString::new("catchpad").unwrap();
-        let ret = unsafe {
-            llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
-                                        args.len() as c_uint, args.as_ptr(),
-                                        name.as_ptr())
-        };
-        assert!(!ret.is_null(), "LLVM does not have support for catchpad");
-        return ret
-    }
-
-    pub fn catch_ret(&self, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef {
-        self.count_insn("catchret");
-        let ret = unsafe {
-            llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind)
-        };
-        assert!(!ret.is_null(), "LLVM does not have support for catchret");
-        return ret
-    }
-
-    pub fn catch_switch(&self,
-                        parent: Option<ValueRef>,
-                        unwind: Option<BasicBlockRef>,
-                        num_handlers: usize) -> ValueRef {
-        self.count_insn("catchswitch");
-        let parent = parent.unwrap_or(0 as *mut _);
-        let unwind = unwind.unwrap_or(0 as *mut _);
-        let name = CString::new("catchswitch").unwrap();
-        let ret = unsafe {
-            llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
-                                           num_handlers as c_uint,
-                                           name.as_ptr())
-        };
-        assert!(!ret.is_null(), "LLVM does not have support for catchswitch");
-        return ret
-    }
-
-    pub fn add_handler(&self, catch_switch: ValueRef, handler: BasicBlockRef) {
-        unsafe {
-            llvm::LLVMRustAddHandler(catch_switch, handler);
-        }
-    }
-
-    pub fn set_personality_fn(&self, personality: ValueRef) {
-        unsafe {
-            llvm::LLVMRustSetPersonalityFn(self.llbuilder, personality);
-        }
-    }
-
-    // Atomic Operations
-    pub fn atomic_cmpxchg(&self, dst: ValueRef,
-                         cmp: ValueRef, src: ValueRef,
-                         order: AtomicOrdering,
-                         failure_order: AtomicOrdering,
-                         weak: llvm::Bool) -> ValueRef {
-        unsafe {
-            llvm::LLVMBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src,
-                                         order, failure_order, weak)
-        }
-    }
-    pub fn atomic_rmw(&self, op: AtomicBinOp,
-                     dst: ValueRef, src: ValueRef,
-                     order: AtomicOrdering) -> ValueRef {
-        unsafe {
-            llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order, False)
-        }
-    }
-
-    pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) {
-        unsafe {
-            llvm::LLVMBuildAtomicFence(self.llbuilder, order, scope);
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/cabi.rs b/src/librustc_trans/trans/cabi.rs
deleted file mode 100644 (file)
index 8c10be4..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub use self::ArgKind::*;
-
-use llvm::Attribute;
-use std::option;
-use trans::context::CrateContext;
-use trans::cabi_x86;
-use trans::cabi_x86_64;
-use trans::cabi_x86_win64;
-use trans::cabi_arm;
-use trans::cabi_aarch64;
-use trans::cabi_powerpc;
-use trans::cabi_powerpc64;
-use trans::cabi_mips;
-use trans::cabi_asmjs;
-use trans::type_::Type;
-
-#[derive(Clone, Copy, PartialEq)]
-pub enum ArgKind {
-    /// Pass the argument directly using the normal converted
-    /// LLVM type or by coercing to another specified type
-    Direct,
-    /// Pass the argument indirectly via a hidden pointer
-    Indirect,
-    /// Ignore the argument (useful for empty struct)
-    Ignore,
-}
-
-/// Information about how a specific C type
-/// should be passed to or returned from a function
-///
-/// This is borrowed from clang's ABIInfo.h
-#[derive(Clone, Copy)]
-pub struct ArgType {
-    pub kind: ArgKind,
-    /// Original LLVM type
-    pub ty: Type,
-    /// Coerced LLVM Type
-    pub cast: option::Option<Type>,
-    /// Dummy argument, which is emitted before the real argument
-    pub pad: option::Option<Type>,
-    /// LLVM attribute of argument
-    pub attr: option::Option<Attribute>
-}
-
-impl ArgType {
-    pub fn direct(ty: Type, cast: option::Option<Type>,
-                            pad: option::Option<Type>,
-                            attr: option::Option<Attribute>) -> ArgType {
-        ArgType {
-            kind: Direct,
-            ty: ty,
-            cast: cast,
-            pad: pad,
-            attr: attr
-        }
-    }
-
-    pub fn indirect(ty: Type, attr: option::Option<Attribute>) -> ArgType {
-        ArgType {
-            kind: Indirect,
-            ty: ty,
-            cast: option::Option::None,
-            pad: option::Option::None,
-            attr: attr
-        }
-    }
-
-    pub fn ignore(ty: Type) -> ArgType {
-        ArgType {
-            kind: Ignore,
-            ty: ty,
-            cast: None,
-            pad: None,
-            attr: None,
-        }
-    }
-
-    pub fn is_indirect(&self) -> bool {
-        return self.kind == Indirect;
-    }
-
-    pub fn is_ignore(&self) -> bool {
-        return self.kind == Ignore;
-    }
-}
-
-/// Metadata describing how the arguments to a native function
-/// should be passed in order to respect the native ABI.
-///
-/// I will do my best to describe this structure, but these
-/// comments are reverse-engineered and may be inaccurate. -NDM
-pub struct FnType {
-    /// The LLVM types of each argument.
-    pub arg_tys: Vec<ArgType> ,
-
-    /// LLVM return type.
-    pub ret_ty: ArgType,
-}
-
-pub fn compute_abi_info(ccx: &CrateContext,
-                        atys: &[Type],
-                        rty: Type,
-                        ret_def: bool) -> FnType {
-    match &ccx.sess().target.target.arch[..] {
-        "x86" => cabi_x86::compute_abi_info(ccx, atys, rty, ret_def),
-        "x86_64" => if ccx.sess().target.target.options.is_like_windows {
-            cabi_x86_win64::compute_abi_info(ccx, atys, rty, ret_def)
-        } else {
-            cabi_x86_64::compute_abi_info(ccx, atys, rty, ret_def)
-        },
-        "aarch64" => cabi_aarch64::compute_abi_info(ccx, atys, rty, ret_def),
-        "arm" => {
-            let flavor = if ccx.sess().target.target.target_os == "ios" {
-                cabi_arm::Flavor::Ios
-            } else {
-                cabi_arm::Flavor::General
-            };
-            cabi_arm::compute_abi_info(ccx, atys, rty, ret_def, flavor)
-        },
-        "mips" => cabi_mips::compute_abi_info(ccx, atys, rty, ret_def),
-        "powerpc" => cabi_powerpc::compute_abi_info(ccx, atys, rty, ret_def),
-        "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, atys, rty, ret_def),
-        "asmjs" => cabi_asmjs::compute_abi_info(ccx, atys, rty, ret_def),
-        a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)
-                              ),
-    }
-}
diff --git a/src/librustc_trans/trans/cabi_aarch64.rs b/src/librustc_trans/trans/cabi_aarch64.rs
deleted file mode 100644 (file)
index f2434ce..0000000
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_upper_case_globals)]
-
-use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute};
-use trans::cabi::{FnType, ArgType};
-use trans::context::CrateContext;
-use trans::type_::Type;
-
-use std::cmp;
-
-fn align_up_to(off: usize, a: usize) -> usize {
-    return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type) -> usize {
-    let a = ty_align(ty);
-    return align_up_to(off, a);
-}
-
-fn ty_align(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => ((ty.int_width() as usize) + 7) / 8,
-        Pointer => 8,
-        Float => 4,
-        Double => 8,
-        Struct => {
-            if ty.is_packed() {
-                1
-            } else {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
-            }
-        }
-        Array => {
-            let elt = ty.element_type();
-            ty_align(elt)
-        }
-        Vector => {
-            let len = ty.vector_length();
-            let elt = ty.element_type();
-            ty_align(elt) * len
-        }
-        _ => panic!("ty_align: unhandled type")
-    }
-}
-
-fn ty_size(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => ((ty.int_width() as usize) + 7) / 8,
-        Pointer => 8,
-        Float => 4,
-        Double => 8,
-        Struct => {
-            if ty.is_packed() {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(0, |s, t| s + ty_size(*t))
-            } else {
-                let str_tys = ty.field_types();
-                let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
-                align(size, ty)
-            }
-        }
-        Array => {
-            let len = ty.array_length();
-            let elt = ty.element_type();
-            let eltsz = ty_size(elt);
-            len * eltsz
-        }
-        Vector => {
-            let len = ty.vector_length();
-            let elt = ty.element_type();
-            let eltsz = ty_size(elt);
-            len * eltsz
-        }
-        _ => panic!("ty_size: unhandled type")
-    }
-}
-
-fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> {
-    fn check_array(ty: Type) -> Option<(Type, u64)> {
-        let len = ty.array_length() as u64;
-        if len == 0 {
-            return None
-        }
-        let elt = ty.element_type();
-
-        // if our element is an HFA/HVA, so are we; multiply members by our len
-        is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members))
-    }
-
-    fn check_struct(ty: Type) -> Option<(Type, u64)> {
-        let str_tys = ty.field_types();
-        if str_tys.len() == 0 {
-            return None
-        }
-
-        let mut prev_base_ty = None;
-        let mut members = 0;
-        for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) {
-            match (prev_base_ty, opt_homog_agg) {
-                // field isn't itself an HFA, so we aren't either
-                (_, None) => return None,
-
-                // first field - store its type and number of members
-                (None, Some((field_ty, field_members))) => {
-                    prev_base_ty = Some(field_ty);
-                    members = field_members;
-                },
-
-                // 2nd or later field - give up if it's a different type; otherwise incr. members
-                (Some(prev_ty), Some((field_ty, field_members))) => {
-                    if prev_ty != field_ty {
-                        return None;
-                    }
-                    members += field_members;
-                }
-            }
-        }
-
-        // Because of previous checks, we know prev_base_ty is Some(...) because
-        //   1. str_tys has at least one element; and
-        //   2. prev_base_ty was filled in (or we would've returned early)
-        let (base_ty, members) = (prev_base_ty.unwrap(), members);
-
-        // Ensure there is no padding.
-        if ty_size(ty) == ty_size(base_ty) * (members as usize) {
-            Some((base_ty, members))
-        } else {
-            None
-        }
-    }
-
-    let homog_agg = match ty.kind() {
-        Float  => Some((ty, 1)),
-        Double => Some((ty, 1)),
-        Array  => check_array(ty),
-        Struct => check_struct(ty),
-        Vector => match ty_size(ty) {
-            4|8 => Some((ty, 1)),
-            _   => None
-        },
-        _ => None
-    };
-
-    // Ensure we have at most four uniquely addressable members
-    homog_agg.and_then(|(base_ty, members)| {
-        if members > 0 && members <= 4 {
-            Some((base_ty, members))
-        } else {
-            None
-        }
-    })
-}
-
-fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        return ArgType::direct(ty, None, None, attr);
-    }
-    if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) {
-        let llty = Type::array(&base_ty, members);
-        return ArgType::direct(ty, Some(llty), None, None);
-    }
-    let size = ty_size(ty);
-    if size <= 16 {
-        let llty = if size <= 1 {
-            Type::i8(ccx)
-        } else if size <= 2 {
-            Type::i16(ccx)
-        } else if size <= 4 {
-            Type::i32(ccx)
-        } else if size <= 8 {
-            Type::i64(ccx)
-        } else {
-            Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
-        };
-        return ArgType::direct(ty, Some(llty), None, None);
-    }
-    ArgType::indirect(ty, Some(Attribute::StructRet))
-}
-
-fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType {
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        return ArgType::direct(ty, None, None, attr);
-    }
-    if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) {
-        let llty = Type::array(&base_ty, members);
-        return ArgType::direct(ty, Some(llty), None, None);
-    }
-    let size = ty_size(ty);
-    if size <= 16 {
-        let llty = if size == 0 {
-            Type::array(&Type::i64(ccx), 0)
-        } else if size == 1 {
-            Type::i8(ccx)
-        } else if size == 2 {
-            Type::i16(ccx)
-        } else if size <= 4 {
-            Type::i32(ccx)
-        } else if size <= 8 {
-            Type::i64(ccx)
-        } else {
-            Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
-        };
-        return ArgType::direct(ty, Some(llty), None, None);
-    }
-    ArgType::indirect(ty, None)
-}
-
-fn is_reg_ty(ty: Type) -> bool {
-    match ty.kind() {
-        Integer
-        | Pointer
-        | Float
-        | Double
-        | Vector => true,
-        _ => false
-    }
-}
-
-pub fn compute_abi_info(ccx: &CrateContext,
-                        atys: &[Type],
-                        rty: Type,
-                        ret_def: bool) -> FnType {
-    let mut arg_tys = Vec::new();
-    for &aty in atys {
-        let ty = classify_arg_ty(ccx, aty);
-        arg_tys.push(ty);
-    }
-
-    let ret_ty = if ret_def {
-        classify_ret_ty(ccx, rty)
-    } else {
-        ArgType::direct(Type::void(ccx), None, None, None)
-    };
-
-    return FnType {
-        arg_tys: arg_tys,
-        ret_ty: ret_ty,
-    };
-}
diff --git a/src/librustc_trans/trans/cabi_arm.rs b/src/librustc_trans/trans/cabi_arm.rs
deleted file mode 100644 (file)
index c5116e7..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_upper_case_globals)]
-
-use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute};
-use trans::cabi::{FnType, ArgType};
-use trans::context::CrateContext;
-use trans::type_::Type;
-
-use std::cmp;
-
-pub enum Flavor {
-    General,
-    Ios
-}
-
-type TyAlignFn = fn(ty: Type) -> usize;
-
-fn align_up_to(off: usize, a: usize) -> usize {
-    return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type, align_fn: TyAlignFn) -> usize {
-    let a = align_fn(ty);
-    return align_up_to(off, a);
-}
-
-fn general_ty_align(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => ((ty.int_width() as usize) + 7) / 8,
-        Pointer => 4,
-        Float => 4,
-        Double => 8,
-        Struct => {
-            if ty.is_packed() {
-                1
-            } else {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(1, |a, t| cmp::max(a, general_ty_align(*t)))
-            }
-        }
-        Array => {
-            let elt = ty.element_type();
-            general_ty_align(elt)
-        }
-        Vector => {
-            let len = ty.vector_length();
-            let elt = ty.element_type();
-            general_ty_align(elt) * len
-        }
-        _ => panic!("ty_align: unhandled type")
-    }
-}
-
-// For more information see:
-// ARMv7
-// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual
-//    /iPhoneOSABIReference/Articles/ARMv7FunctionCallingConventions.html
-// ARMv6
-// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual
-//    /iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html
-fn ios_ty_align(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => cmp::min(4, ((ty.int_width() as usize) + 7) / 8),
-        Pointer => 4,
-        Float => 4,
-        Double => 4,
-        Struct => {
-            if ty.is_packed() {
-                1
-            } else {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(1, |a, t| cmp::max(a, ios_ty_align(*t)))
-            }
-        }
-        Array => {
-            let elt = ty.element_type();
-            ios_ty_align(elt)
-        }
-        Vector => {
-            let len = ty.vector_length();
-            let elt = ty.element_type();
-            ios_ty_align(elt) * len
-        }
-        _ => panic!("ty_align: unhandled type")
-    }
-}
-
-fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize {
-    match ty.kind() {
-        Integer => ((ty.int_width() as usize) + 7) / 8,
-        Pointer => 4,
-        Float => 4,
-        Double => 8,
-        Struct => {
-            if ty.is_packed() {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(0, |s, t| s + ty_size(*t, align_fn))
-            } else {
-                let str_tys = ty.field_types();
-                let size = str_tys.iter()
-                                  .fold(0, |s, t| {
-                                      align(s, *t, align_fn) + ty_size(*t, align_fn)
-                                  });
-                align(size, ty, align_fn)
-            }
-        }
-        Array => {
-            let len = ty.array_length();
-            let elt = ty.element_type();
-            let eltsz = ty_size(elt, align_fn);
-            len * eltsz
-        }
-        Vector => {
-            let len = ty.vector_length();
-            let elt = ty.element_type();
-            let eltsz = ty_size(elt, align_fn);
-            len * eltsz
-        }
-        _ => panic!("ty_size: unhandled type")
-    }
-}
-
-fn classify_ret_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType {
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        return ArgType::direct(ty, None, None, attr);
-    }
-    let size = ty_size(ty, align_fn);
-    if size <= 4 {
-        let llty = if size <= 1 {
-            Type::i8(ccx)
-        } else if size <= 2 {
-            Type::i16(ccx)
-        } else {
-            Type::i32(ccx)
-        };
-        return ArgType::direct(ty, Some(llty), None, None);
-    }
-    ArgType::indirect(ty, Some(Attribute::StructRet))
-}
-
-fn classify_arg_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType {
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        return ArgType::direct(ty, None, None, attr);
-    }
-    let align = align_fn(ty);
-    let size = ty_size(ty, align_fn);
-    let llty = if align <= 4 {
-        Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64)
-    } else {
-        Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64)
-    };
-    ArgType::direct(ty, Some(llty), None, None)
-}
-
-fn is_reg_ty(ty: Type) -> bool {
-    match ty.kind() {
-        Integer
-        | Pointer
-        | Float
-        | Double
-        | Vector => true,
-        _ => false
-    }
-}
-
-pub fn compute_abi_info(ccx: &CrateContext,
-                        atys: &[Type],
-                        rty: Type,
-                        ret_def: bool,
-                        flavor: Flavor) -> FnType {
-    let align_fn = match flavor {
-        Flavor::General => general_ty_align as TyAlignFn,
-        Flavor::Ios => ios_ty_align as TyAlignFn,
-    };
-
-    let mut arg_tys = Vec::new();
-    for &aty in atys {
-        let ty = classify_arg_ty(ccx, aty, align_fn);
-        arg_tys.push(ty);
-    }
-
-    let ret_ty = if ret_def {
-        classify_ret_ty(ccx, rty, align_fn)
-    } else {
-        ArgType::direct(Type::void(ccx), None, None, None)
-    };
-
-    return FnType {
-        arg_tys: arg_tys,
-        ret_ty: ret_ty,
-    };
-}
diff --git a/src/librustc_trans/trans/cabi_asmjs.rs b/src/librustc_trans/trans/cabi_asmjs.rs
deleted file mode 100644 (file)
index 3a4a6b9..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_upper_case_globals)]
-
-use llvm::{Struct, Array, Attribute};
-use trans::cabi::{FnType, ArgType};
-use trans::context::CrateContext;
-use trans::type_::Type;
-
-// Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
-
-// See the https://github.com/kripken/emscripten-fastcomp-clang repository.
-// The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions.
-
-fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
-    match ty.kind() {
-        Struct => {
-            let field_types = ty.field_types();
-            if field_types.len() == 1 {
-                ArgType::direct(ty, Some(field_types[0]), None, None)
-            } else {
-                ArgType::indirect(ty, Some(Attribute::StructRet))
-            }
-        },
-        Array => {
-            ArgType::indirect(ty, Some(Attribute::StructRet))
-        },
-        _ => {
-            let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-            ArgType::direct(ty, None, None, attr)
-        }
-    }
-}
-
-fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType {
-    if ty.is_aggregate() {
-        ArgType::indirect(ty, Some(Attribute::ByVal))
-    } else {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        ArgType::direct(ty, None, None, attr)
-    }
-}
-
-pub fn compute_abi_info(ccx: &CrateContext,
-                        atys: &[Type],
-                        rty: Type,
-                        ret_def: bool) -> FnType {
-    let mut arg_tys = Vec::new();
-    for &aty in atys {
-        let ty = classify_arg_ty(ccx, aty);
-        arg_tys.push(ty);
-    }
-
-    let ret_ty = if ret_def {
-        classify_ret_ty(ccx, rty)
-    } else {
-        ArgType::direct(Type::void(ccx), None, None, None)
-    };
-
-    return FnType {
-        arg_tys: arg_tys,
-        ret_ty: ret_ty,
-    };
-}
diff --git a/src/librustc_trans/trans/cabi_mips.rs b/src/librustc_trans/trans/cabi_mips.rs
deleted file mode 100644 (file)
index bcffb23..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_upper_case_globals)]
-
-use libc::c_uint;
-use std::cmp;
-use llvm;
-use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute};
-use trans::cabi::{ArgType, FnType};
-use trans::context::CrateContext;
-use trans::type_::Type;
-
-fn align_up_to(off: usize, a: usize) -> usize {
-    return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type) -> usize {
-    let a = ty_align(ty);
-    return align_up_to(off, a);
-}
-
-fn ty_align(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => ((ty.int_width() as usize) + 7) / 8,
-        Pointer => 4,
-        Float => 4,
-        Double => 8,
-        Struct => {
-          if ty.is_packed() {
-            1
-          } else {
-            let str_tys = ty.field_types();
-            str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
-          }
-        }
-        Array => {
-            let elt = ty.element_type();
-            ty_align(elt)
-        }
-        Vector => {
-            let len = ty.vector_length();
-            let elt = ty.element_type();
-            ty_align(elt) * len
-        }
-        _ => panic!("ty_align: unhandled type")
-    }
-}
-
-fn ty_size(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => ((ty.int_width() as usize) + 7) / 8,
-        Pointer => 4,
-        Float => 4,
-        Double => 8,
-        Struct => {
-            if ty.is_packed() {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(0, |s, t| s + ty_size(*t))
-            } else {
-                let str_tys = ty.field_types();
-                let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
-                align(size, ty)
-            }
-        }
-        Array => {
-            let len = ty.array_length();
-            let elt = ty.element_type();
-            let eltsz = ty_size(elt);
-            len * eltsz
-        }
-        Vector => {
-            let len = ty.vector_length();
-            let elt = ty.element_type();
-            let eltsz = ty_size(elt);
-            len * eltsz
-        }
-        _ => panic!("ty_size: unhandled type")
-    }
-}
-
-fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        ArgType::direct(ty, None, None, attr)
-    } else {
-        ArgType::indirect(ty, Some(Attribute::StructRet))
-    }
-}
-
-fn classify_arg_ty(ccx: &CrateContext, ty: Type, offset: &mut usize) -> ArgType {
-    let orig_offset = *offset;
-    let size = ty_size(ty) * 8;
-    let mut align = ty_align(ty);
-
-    align = cmp::min(cmp::max(align, 4), 8);
-    *offset = align_up_to(*offset, align);
-    *offset += align_up_to(size, align * 8) / 8;
-
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        ArgType::direct(ty, None, None, attr)
-    } else {
-        ArgType::direct(
-            ty,
-            Some(struct_ty(ccx, ty)),
-            padding_ty(ccx, align, orig_offset),
-            None
-        )
-    }
-}
-
-fn is_reg_ty(ty: Type) -> bool {
-    return match ty.kind() {
-        Integer
-        | Pointer
-        | Float
-        | Double
-        | Vector => true,
-        _ => false
-    };
-}
-
-fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option<Type> {
-    if ((align - 1 ) & offset) > 0 {
-        Some(Type::i32(ccx))
-    } else {
-        None
-    }
-}
-
-fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec<Type> {
-    let int_ty = Type::i32(ccx);
-    let mut args = Vec::new();
-
-    let mut n = size / 32;
-    while n > 0 {
-        args.push(int_ty);
-        n -= 1;
-    }
-
-    let r = size % 32;
-    if r > 0 {
-        unsafe {
-            args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
-        }
-    }
-
-    args
-}
-
-fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
-    let size = ty_size(ty) * 8;
-    Type::struct_(ccx, &coerce_to_int(ccx, size), false)
-}
-
-pub fn compute_abi_info(ccx: &CrateContext,
-                        atys: &[Type],
-                        rty: Type,
-                        ret_def: bool) -> FnType {
-    let ret_ty = if ret_def {
-        classify_ret_ty(ccx, rty)
-    } else {
-        ArgType::direct(Type::void(ccx), None, None, None)
-    };
-
-    let sret = ret_ty.is_indirect();
-    let mut arg_tys = Vec::new();
-    let mut offset = if sret { 4 } else { 0 };
-
-    for aty in atys {
-        let ty = classify_arg_ty(ccx, *aty, &mut offset);
-        arg_tys.push(ty);
-    };
-
-    return FnType {
-        arg_tys: arg_tys,
-        ret_ty: ret_ty,
-    };
-}
diff --git a/src/librustc_trans/trans/cabi_powerpc.rs b/src/librustc_trans/trans/cabi_powerpc.rs
deleted file mode 100644 (file)
index 1bcc8fd..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use libc::c_uint;
-use llvm;
-use llvm::{Integer, Pointer, Float, Double, Struct, Array, Attribute};
-use trans::cabi::{FnType, ArgType};
-use trans::context::CrateContext;
-use trans::type_::Type;
-
-use std::cmp;
-
-fn align_up_to(off: usize, a: usize) -> usize {
-    return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type) -> usize {
-    let a = ty_align(ty);
-    return align_up_to(off, a);
-}
-
-fn ty_align(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => {
-            unsafe {
-                ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8
-            }
-        }
-        Pointer => 4,
-        Float => 4,
-        Double => 8,
-        Struct => {
-          if ty.is_packed() {
-            1
-          } else {
-            let str_tys = ty.field_types();
-            str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
-          }
-        }
-        Array => {
-            let elt = ty.element_type();
-            ty_align(elt)
-        }
-        _ => panic!("ty_size: unhandled type")
-    }
-}
-
-fn ty_size(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => {
-            unsafe {
-                ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8
-            }
-        }
-        Pointer => 4,
-        Float => 4,
-        Double => 8,
-        Struct => {
-            if ty.is_packed() {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(0, |s, t| s + ty_size(*t))
-            } else {
-                let str_tys = ty.field_types();
-                let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
-                align(size, ty)
-            }
-        }
-        Array => {
-            let len = ty.array_length();
-            let elt = ty.element_type();
-            let eltsz = ty_size(elt);
-            len * eltsz
-        }
-        _ => panic!("ty_size: unhandled type")
-    }
-}
-
-fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        ArgType::direct(ty, None, None, attr)
-    } else {
-        ArgType::indirect(ty, Some(Attribute::StructRet))
-    }
-}
-
-fn classify_arg_ty(ccx: &CrateContext, ty: Type, offset: &mut usize) -> ArgType {
-    let orig_offset = *offset;
-    let size = ty_size(ty) * 8;
-    let mut align = ty_align(ty);
-
-    align = cmp::min(cmp::max(align, 4), 8);
-    *offset = align_up_to(*offset, align);
-    *offset += align_up_to(size, align * 8) / 8;
-
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        ArgType::direct(ty, None, None, attr)
-    } else {
-        ArgType::direct(
-            ty,
-            Some(struct_ty(ccx, ty)),
-            padding_ty(ccx, align, orig_offset),
-            None
-        )
-    }
-}
-
-fn is_reg_ty(ty: Type) -> bool {
-    return match ty.kind() {
-        Integer
-        | Pointer
-        | Float
-        | Double => true,
-        _ => false
-    };
-}
-
-fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option<Type> {
-    if ((align - 1 ) & offset) > 0 {
-        Some(Type::i32(ccx))
-    } else {
-        None
-    }
-}
-
-fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec<Type> {
-    let int_ty = Type::i32(ccx);
-    let mut args = Vec::new();
-
-    let mut n = size / 32;
-    while n > 0 {
-        args.push(int_ty);
-        n -= 1;
-    }
-
-    let r = size % 32;
-    if r > 0 {
-        unsafe {
-            args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
-        }
-    }
-
-    args
-}
-
-fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
-    let size = ty_size(ty) * 8;
-    Type::struct_(ccx, &coerce_to_int(ccx, size), false)
-}
-
-pub fn compute_abi_info(ccx: &CrateContext,
-                        atys: &[Type],
-                        rty: Type,
-                        ret_def: bool) -> FnType {
-    let ret_ty = if ret_def {
-        classify_ret_ty(ccx, rty)
-    } else {
-        ArgType::direct(Type::void(ccx), None, None, None)
-    };
-
-    let sret = ret_ty.is_indirect();
-    let mut arg_tys = Vec::new();
-    let mut offset = if sret { 4 } else { 0 };
-
-    for aty in atys {
-        let ty = classify_arg_ty(ccx, *aty, &mut offset);
-        arg_tys.push(ty);
-    };
-
-    return FnType {
-        arg_tys: arg_tys,
-        ret_ty: ret_ty,
-    };
-}
diff --git a/src/librustc_trans/trans/cabi_powerpc64.rs b/src/librustc_trans/trans/cabi_powerpc64.rs
deleted file mode 100644 (file)
index f76bb4f..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// FIXME: The PowerPC64 ABI needs to zero or sign extend function
-// call parameters, but compute_abi_info() is passed LLVM types
-// which have no sign information.
-//
-// Alignment of 128 bit types is not currently handled, this will
-// need to be fixed when PowerPC vector support is added.
-
-use llvm::{Integer, Pointer, Float, Double, Struct, Array, Attribute};
-use trans::cabi::{FnType, ArgType};
-use trans::context::CrateContext;
-use trans::type_::Type;
-
-use std::cmp;
-
-fn align_up_to(off: usize, a: usize) -> usize {
-    return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type) -> usize {
-    let a = ty_align(ty);
-    return align_up_to(off, a);
-}
-
-fn ty_align(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => ((ty.int_width() as usize) + 7) / 8,
-        Pointer => 8,
-        Float => 4,
-        Double => 8,
-        Struct => {
-            if ty.is_packed() {
-                1
-            } else {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
-            }
-        }
-        Array => {
-            let elt = ty.element_type();
-            ty_align(elt)
-        }
-        _ => panic!("ty_align: unhandled type")
-    }
-}
-
-fn ty_size(ty: Type) -> usize {
-    match ty.kind() {
-        Integer => ((ty.int_width() as usize) + 7) / 8,
-        Pointer => 8,
-        Float => 4,
-        Double => 8,
-        Struct => {
-            if ty.is_packed() {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(0, |s, t| s + ty_size(*t))
-            } else {
-                let str_tys = ty.field_types();
-                let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
-                align(size, ty)
-            }
-        }
-        Array => {
-            let len = ty.array_length();
-            let elt = ty.element_type();
-            let eltsz = ty_size(elt);
-            len * eltsz
-        }
-        _ => panic!("ty_size: unhandled type")
-    }
-}
-
-fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> {
-    fn check_array(ty: Type) -> Option<(Type, u64)> {
-        let len = ty.array_length() as u64;
-        if len == 0 {
-            return None
-        }
-        let elt = ty.element_type();
-
-        // if our element is an HFA/HVA, so are we; multiply members by our len
-        is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members))
-    }
-
-    fn check_struct(ty: Type) -> Option<(Type, u64)> {
-        let str_tys = ty.field_types();
-        if str_tys.len() == 0 {
-            return None
-        }
-
-        let mut prev_base_ty = None;
-        let mut members = 0;
-        for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) {
-            match (prev_base_ty, opt_homog_agg) {
-                // field isn't itself an HFA, so we aren't either
-                (_, None) => return None,
-
-                // first field - store its type and number of members
-                (None, Some((field_ty, field_members))) => {
-                    prev_base_ty = Some(field_ty);
-                    members = field_members;
-                },
-
-                // 2nd or later field - give up if it's a different type; otherwise incr. members
-                (Some(prev_ty), Some((field_ty, field_members))) => {
-                    if prev_ty != field_ty {
-                        return None;
-                    }
-                    members += field_members;
-                }
-            }
-        }
-
-        // Because of previous checks, we know prev_base_ty is Some(...) because
-        //   1. str_tys has at least one element; and
-        //   2. prev_base_ty was filled in (or we would've returned early)
-        let (base_ty, members) = (prev_base_ty.unwrap(), members);
-
-        // Ensure there is no padding.
-        if ty_size(ty) == ty_size(base_ty) * (members as usize) {
-            Some((base_ty, members))
-        } else {
-            None
-        }
-    }
-
-    let homog_agg = match ty.kind() {
-        Float  => Some((ty, 1)),
-        Double => Some((ty, 1)),
-        Array  => check_array(ty),
-        Struct => check_struct(ty),
-        _ => None
-    };
-
-    // Ensure we have at most eight uniquely addressable members
-    homog_agg.and_then(|(base_ty, members)| {
-        if members > 0 && members <= 8 {
-            Some((base_ty, members))
-        } else {
-            None
-        }
-    })
-}
-
-fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        return ArgType::direct(ty, None, None, attr);
-    }
-
-    // The PowerPC64 big endian ABI doesn't return aggregates in registers
-    if ccx.sess().target.target.target_endian == "big" {
-        return ArgType::indirect(ty, Some(Attribute::StructRet))
-    }
-
-    if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) {
-        let llty = Type::array(&base_ty, members);
-        return ArgType::direct(ty, Some(llty), None, None);
-    }
-    let size = ty_size(ty);
-    if size <= 16 {
-        let llty = if size <= 1 {
-            Type::i8(ccx)
-        } else if size <= 2 {
-            Type::i16(ccx)
-        } else if size <= 4 {
-            Type::i32(ccx)
-        } else if size <= 8 {
-            Type::i64(ccx)
-        } else {
-            Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
-        };
-        return ArgType::direct(ty, Some(llty), None, None);
-    }
-
-    ArgType::indirect(ty, Some(Attribute::StructRet))
-}
-
-fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType {
-    if is_reg_ty(ty) {
-        let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        return ArgType::direct(ty, None, None, attr);
-    }
-    if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) {
-        let llty = Type::array(&base_ty, members);
-        return ArgType::direct(ty, Some(llty), None, None);
-    }
-
-    ArgType::direct(
-        ty,
-        Some(struct_ty(ccx, ty)),
-        None,
-        None
-    )
-}
-
-fn is_reg_ty(ty: Type) -> bool {
-    match ty.kind() {
-        Integer
-        | Pointer
-        | Float
-        | Double => true,
-        _ => false
-    }
-}
-
-fn coerce_to_long(ccx: &CrateContext, size: usize) -> Vec<Type> {
-    let long_ty = Type::i64(ccx);
-    let mut args = Vec::new();
-
-    let mut n = size / 64;
-    while n > 0 {
-        args.push(long_ty);
-        n -= 1;
-    }
-
-    let r = size % 64;
-    if r > 0 {
-        args.push(Type::ix(ccx, r as u64));
-    }
-
-    args
-}
-
-fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
-    let size = ty_size(ty) * 8;
-    Type::struct_(ccx, &coerce_to_long(ccx, size), false)
-}
-
-pub fn compute_abi_info(ccx: &CrateContext,
-                        atys: &[Type],
-                        rty: Type,
-                        ret_def: bool) -> FnType {
-    let ret_ty = if ret_def {
-        classify_ret_ty(ccx, rty)
-    } else {
-        ArgType::direct(Type::void(ccx), None, None, None)
-    };
-
-    let mut arg_tys = Vec::new();
-    for &aty in atys {
-        let ty = classify_arg_ty(ccx, aty);
-        arg_tys.push(ty);
-    };
-
-    return FnType {
-        arg_tys: arg_tys,
-        ret_ty: ret_ty,
-    };
-}
diff --git a/src/librustc_trans/trans/cabi_x86.rs b/src/librustc_trans/trans/cabi_x86.rs
deleted file mode 100644 (file)
index 50a3095..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use self::Strategy::*;
-use llvm::*;
-use trans::cabi::{ArgType, FnType};
-use trans::type_::Type;
-use super::common::*;
-use super::machine::*;
-
-enum Strategy { RetValue(Type), RetPointer }
-pub fn compute_abi_info(ccx: &CrateContext,
-                        atys: &[Type],
-                        rty: Type,
-                        ret_def: bool) -> FnType {
-    let mut arg_tys = Vec::new();
-
-    let ret_ty;
-    if !ret_def {
-        ret_ty = ArgType::direct(Type::void(ccx), None, None, None);
-    } else if rty.kind() == Struct {
-        // Returning a structure. Most often, this will use
-        // a hidden first argument. On some platforms, though,
-        // small structs are returned as integers.
-        //
-        // Some links:
-        // http://www.angelcode.com/dev/callconv/callconv.html
-        // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
-
-        let t = &ccx.sess().target.target;
-        let strategy = if t.options.is_like_osx || t.options.is_like_windows {
-            match llsize_of_alloc(ccx, rty) {
-                1 => RetValue(Type::i8(ccx)),
-                2 => RetValue(Type::i16(ccx)),
-                4 => RetValue(Type::i32(ccx)),
-                8 => RetValue(Type::i64(ccx)),
-                _ => RetPointer
-            }
-        } else {
-            RetPointer
-        };
-
-        match strategy {
-            RetValue(t) => {
-                ret_ty = ArgType::direct(rty, Some(t), None, None);
-            }
-            RetPointer => {
-                ret_ty = ArgType::indirect(rty, Some(Attribute::StructRet));
-            }
-        }
-    } else {
-        let attr = if rty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        ret_ty = ArgType::direct(rty, None, None, attr);
-    }
-
-    for &t in atys {
-        let ty = match t.kind() {
-            Struct => {
-                let size = llsize_of_alloc(ccx, t);
-                if size == 0 {
-                    ArgType::ignore(t)
-                } else {
-                    ArgType::indirect(t, Some(Attribute::ByVal))
-                }
-            }
-            _ => {
-                let attr = if t == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-                ArgType::direct(t, None, None, attr)
-            }
-        };
-        arg_tys.push(ty);
-    }
-
-    return FnType {
-        arg_tys: arg_tys,
-        ret_ty: ret_ty,
-    };
-}
diff --git a/src/librustc_trans/trans/cabi_x86_64.rs b/src/librustc_trans/trans/cabi_x86_64.rs
deleted file mode 100644 (file)
index 00d8fda..0000000
+++ /dev/null
@@ -1,464 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// The classification code for the x86_64 ABI is taken from the clay language
-// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
-
-#![allow(non_upper_case_globals)]
-use self::RegClass::*;
-
-use llvm::{Integer, Pointer, Float, Double};
-use llvm::{Struct, Array, Attribute, Vector};
-use trans::cabi::{ArgType, FnType};
-use trans::context::CrateContext;
-use trans::type_::Type;
-
-use std::cmp;
-
-#[derive(Clone, Copy, PartialEq)]
-enum RegClass {
-    NoClass,
-    Int,
-    SSEFs,
-    SSEFv,
-    SSEDs,
-    SSEDv,
-    SSEInt(/* bitwidth */ u64),
-    /// Data that can appear in the upper half of an SSE register.
-    SSEUp,
-    X87,
-    X87Up,
-    ComplexX87,
-    Memory
-}
-
-trait TypeMethods {
-    fn is_reg_ty(&self) -> bool;
-}
-
-impl TypeMethods for Type {
-    fn is_reg_ty(&self) -> bool {
-        match self.kind() {
-            Integer | Pointer | Float | Double => true,
-            _ => false
-        }
-    }
-}
-
-impl RegClass {
-    fn is_sse(&self) -> bool {
-        match *self {
-            SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true,
-            _ => false
-        }
-    }
-}
-
-trait ClassList {
-    fn is_pass_byval(&self) -> bool;
-    fn is_ret_bysret(&self) -> bool;
-}
-
-impl ClassList for [RegClass] {
-    fn is_pass_byval(&self) -> bool {
-        if self.is_empty() { return false; }
-
-        let class = self[0];
-           class == Memory
-        || class == X87
-        || class == ComplexX87
-    }
-
-    fn is_ret_bysret(&self) -> bool {
-        if self.is_empty() { return false; }
-
-        self[0] == Memory
-    }
-}
-
-fn classify_ty(ty: Type) -> Vec<RegClass> {
-    fn align(off: usize, ty: Type) -> usize {
-        let a = ty_align(ty);
-        return (off + a - 1) / a * a;
-    }
-
-    fn ty_align(ty: Type) -> usize {
-        match ty.kind() {
-            Integer => ((ty.int_width() as usize) + 7) / 8,
-            Pointer => 8,
-            Float => 4,
-            Double => 8,
-            Struct => {
-              if ty.is_packed() {
-                1
-              } else {
-                let str_tys = ty.field_types();
-                str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
-              }
-            }
-            Array => {
-                let elt = ty.element_type();
-                ty_align(elt)
-            }
-            Vector => {
-                let len = ty.vector_length();
-                let elt = ty.element_type();
-                ty_align(elt) * len
-            }
-            _ => panic!("ty_align: unhandled type")
-        }
-    }
-
-    fn ty_size(ty: Type) -> usize {
-        match ty.kind() {
-            Integer => (ty.int_width() as usize + 7) / 8,
-            Pointer => 8,
-            Float => 4,
-            Double => 8,
-            Struct => {
-                let str_tys = ty.field_types();
-                if ty.is_packed() {
-                    str_tys.iter().fold(0, |s, t| s + ty_size(*t))
-                } else {
-                    let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
-                    align(size, ty)
-                }
-            }
-            Array => {
-                let len = ty.array_length();
-                let elt = ty.element_type();
-                let eltsz = ty_size(elt);
-                len * eltsz
-            }
-            Vector => {
-                let len = ty.vector_length();
-                let elt = ty.element_type();
-                let eltsz = ty_size(elt);
-                len * eltsz
-            }
-
-            _ => panic!("ty_size: unhandled type")
-        }
-    }
-
-    fn all_mem(cls: &mut [RegClass]) {
-        for elt in cls {
-            *elt = Memory;
-        }
-    }
-
-    fn unify(cls: &mut [RegClass],
-             i: usize,
-             newv: RegClass) {
-        if cls[i] == newv { return }
-
-        let to_write = match (cls[i], newv) {
-            (NoClass,     _) => newv,
-            (_,           NoClass) => return,
-
-            (Memory,      _) |
-            (_,           Memory) => Memory,
-
-            (Int,         _) |
-            (_,           Int) => Int,
-
-            (X87,         _) |
-            (X87Up,       _) |
-            (ComplexX87,  _) |
-            (_,           X87) |
-            (_,           X87Up) |
-            (_,           ComplexX87) => Memory,
-
-            (SSEFv,       SSEUp) |
-            (SSEFs,       SSEUp) |
-            (SSEDv,       SSEUp) |
-            (SSEDs,       SSEUp) |
-            (SSEInt(_),   SSEUp) => return,
-
-            (_,           _) => newv
-        };
-        cls[i] = to_write;
-    }
-
-    fn classify_struct(tys: &[Type],
-                       cls: &mut [RegClass],
-                       i: usize,
-                       off: usize,
-                       packed: bool) {
-        let mut field_off = off;
-        for ty in tys {
-            if !packed {
-                field_off = align(field_off, *ty);
-            }
-            classify(*ty, cls, i, field_off);
-            field_off += ty_size(*ty);
-        }
-    }
-
-    fn classify(ty: Type,
-                cls: &mut [RegClass], ix: usize,
-                off: usize) {
-        let t_align = ty_align(ty);
-        let t_size = ty_size(ty);
-
-        let misalign = off % t_align;
-        if misalign != 0 {
-            let mut i = off / 8;
-            let e = (off + t_size + 7) / 8;
-            while i < e {
-                unify(cls, ix + i, Memory);
-                i += 1;
-            }
-            return;
-        }
-
-        match ty.kind() {
-            Integer |
-            Pointer => {
-                unify(cls, ix + off / 8, Int);
-            }
-            Float => {
-                if off % 8 == 4 {
-                    unify(cls, ix + off / 8, SSEFv);
-                } else {
-                    unify(cls, ix + off / 8, SSEFs);
-                }
-            }
-            Double => {
-                unify(cls, ix + off / 8, SSEDs);
-            }
-            Struct => {
-                classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed());
-            }
-            Array => {
-                let len = ty.array_length();
-                let elt = ty.element_type();
-                let eltsz = ty_size(elt);
-                let mut i = 0;
-                while i < len {
-                    classify(elt, cls, ix, off + i * eltsz);
-                    i += 1;
-                }
-            }
-            Vector => {
-                let len = ty.vector_length();
-                let elt = ty.element_type();
-                let eltsz = ty_size(elt);
-                let mut reg = match elt.kind() {
-                    Integer => SSEInt(elt.int_width()),
-                    Float => SSEFv,
-                    Double => SSEDv,
-                    _ => panic!("classify: unhandled vector element type")
-                };
-
-                let mut i = 0;
-                while i < len {
-                    unify(cls, ix + (off + i * eltsz) / 8, reg);
-
-                    // everything after the first one is the upper
-                    // half of a register.
-                    reg = SSEUp;
-                    i += 1;
-                }
-            }
-            _ => panic!("classify: unhandled type")
-        }
-    }
-
-    fn fixup(ty: Type, cls: &mut [RegClass]) {
-        let mut i = 0;
-        let ty_kind = ty.kind();
-        let e = cls.len();
-        if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) {
-            if cls[i].is_sse() {
-                i += 1;
-                while i < e {
-                    if cls[i] != SSEUp {
-                        all_mem(cls);
-                        return;
-                    }
-                    i += 1;
-                }
-            } else {
-                all_mem(cls);
-                return
-            }
-        } else {
-            while i < e {
-                if cls[i] == Memory {
-                    all_mem(cls);
-                    return;
-                }
-                if cls[i] == X87Up {
-                    // for darwin
-                    // cls[i] = SSEDs;
-                    all_mem(cls);
-                    return;
-                }
-                if cls[i] == SSEUp {
-                    cls[i] = SSEDv;
-                } else if cls[i].is_sse() {
-                    i += 1;
-                    while i != e && cls[i] == SSEUp { i += 1; }
-                } else if cls[i] == X87 {
-                    i += 1;
-                    while i != e && cls[i] == X87Up { i += 1; }
-                } else {
-                    i += 1;
-                }
-            }
-        }
-    }
-
-    let words = (ty_size(ty) + 7) / 8;
-    let mut cls = vec![NoClass; words];
-    if words > 4 {
-        all_mem(&mut cls);
-        return cls;
-    }
-    classify(ty, &mut cls, 0, 0);
-    fixup(ty, &mut cls);
-    return cls;
-}
-
-fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
-    fn llvec_len(cls: &[RegClass]) -> usize {
-        let mut len = 1;
-        for c in cls {
-            if *c != SSEUp {
-                break;
-            }
-            len += 1;
-        }
-        return len;
-    }
-
-    let mut tys = Vec::new();
-    let mut i = 0;
-    let e = cls.len();
-    while i < e {
-        match cls[i] {
-            Int => {
-                tys.push(Type::i64(ccx));
-            }
-            SSEFv | SSEDv | SSEInt(_) => {
-                let (elts_per_word, elt_ty) = match cls[i] {
-                    SSEFv => (2, Type::f32(ccx)),
-                    SSEDv => (1, Type::f64(ccx)),
-                    SSEInt(bits) => {
-                        assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64,
-                                "llreg_ty: unsupported SSEInt width {}", bits);
-                        (64 / bits, Type::ix(ccx, bits))
-                    }
-                    _ => unreachable!(),
-                };
-                let vec_len = llvec_len(&cls[i + 1..]);
-                let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word);
-                tys.push(vec_ty);
-                i += vec_len;
-                continue;
-            }
-            SSEFs => {
-                tys.push(Type::f32(ccx));
-            }
-            SSEDs => {
-                tys.push(Type::f64(ccx));
-            }
-            _ => panic!("llregtype: unhandled class")
-        }
-        i += 1;
-    }
-    if tys.len() == 1 && tys[0].kind() == Vector {
-        // if the type contains only a vector, pass it as that vector.
-        tys[0]
-    } else {
-        Type::struct_(ccx, &tys, false)
-    }
-}
-
-pub fn compute_abi_info(ccx: &CrateContext,
-                        atys: &[Type],
-                        rty: Type,
-                        ret_def: bool) -> FnType {
-    fn x86_64_ty<F>(ccx: &CrateContext,
-                    ty: Type,
-                    is_mem_cls: F,
-                    ind_attr: Attribute)
-                    -> ArgType where
-        F: FnOnce(&[RegClass]) -> bool,
-    {
-        if !ty.is_reg_ty() {
-            let cls = classify_ty(ty);
-            if is_mem_cls(&cls) {
-                ArgType::indirect(ty, Some(ind_attr))
-            } else {
-                ArgType::direct(ty,
-                                Some(llreg_ty(ccx, &cls)),
-                                None,
-                                None)
-            }
-        } else {
-            let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-            ArgType::direct(ty, None, None, attr)
-        }
-    }
-
-    let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9
-    let mut sse_regs = 8; // XMM0-7
-
-    let ret_ty = if ret_def {
-        x86_64_ty(ccx, rty, |cls| {
-            if cls.is_ret_bysret() {
-                // `sret` parameter thus one less register available
-                int_regs -= 1;
-                true
-            } else {
-                false
-            }
-        }, Attribute::StructRet)
-    } else {
-        ArgType::direct(Type::void(ccx), None, None, None)
-    };
-
-    let mut arg_tys = Vec::new();
-    for t in atys {
-        let ty = x86_64_ty(ccx, *t, |cls| {
-            let needed_int = cls.iter().filter(|&&c| c == Int).count() as isize;
-            let needed_sse = cls.iter().filter(|c| c.is_sse()).count() as isize;
-            let in_mem = cls.is_pass_byval() ||
-                         int_regs < needed_int ||
-                         sse_regs < needed_sse;
-            if in_mem {
-                // `byval` parameter thus one less integer register available
-                int_regs -= 1;
-            } else {
-                // split into sized chunks passed individually
-                int_regs -= needed_int;
-                sse_regs -= needed_sse;
-            }
-            in_mem
-        }, Attribute::ByVal);
-        arg_tys.push(ty);
-
-        // An integer, pointer, double or float parameter
-        // thus the above closure passed to `x86_64_ty` won't
-        // get called.
-        if t.kind() == Integer || t.kind() == Pointer {
-            int_regs -= 1;
-        } else if t.kind() == Double || t.kind() == Float {
-            sse_regs -= 1;
-        }
-    }
-
-    return FnType {
-        arg_tys: arg_tys,
-        ret_ty: ret_ty,
-    };
-}
diff --git a/src/librustc_trans/trans/cabi_x86_win64.rs b/src/librustc_trans/trans/cabi_x86_win64.rs
deleted file mode 100644 (file)
index 120c8dc..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::*;
-use super::common::*;
-use super::machine::*;
-use trans::cabi::{ArgType, FnType};
-use trans::type_::Type;
-
-// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
-
-pub fn compute_abi_info(ccx: &CrateContext,
-                          atys: &[Type],
-                          rty: Type,
-                          ret_def: bool) -> FnType {
-    let mut arg_tys = Vec::new();
-
-    let ret_ty;
-    if !ret_def {
-        ret_ty = ArgType::direct(Type::void(ccx), None, None, None);
-    } else if rty.kind() == Struct {
-        ret_ty = match llsize_of_alloc(ccx, rty) {
-            1 => ArgType::direct(rty, Some(Type::i8(ccx)), None, None),
-            2 => ArgType::direct(rty, Some(Type::i16(ccx)), None, None),
-            4 => ArgType::direct(rty, Some(Type::i32(ccx)), None, None),
-            8 => ArgType::direct(rty, Some(Type::i64(ccx)), None, None),
-            _ => ArgType::indirect(rty, Some(Attribute::StructRet))
-        };
-    } else {
-        let attr = if rty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-        ret_ty = ArgType::direct(rty, None, None, attr);
-    }
-
-    for &t in atys {
-        let ty = match t.kind() {
-            Struct => {
-                match llsize_of_alloc(ccx, t) {
-                    1 => ArgType::direct(t, Some(Type::i8(ccx)), None, None),
-                    2 => ArgType::direct(t, Some(Type::i16(ccx)), None, None),
-                    4 => ArgType::direct(t, Some(Type::i32(ccx)), None, None),
-                    8 => ArgType::direct(t, Some(Type::i64(ccx)), None, None),
-                    _ => ArgType::indirect(t, None)
-                }
-            }
-            _ => {
-                let attr = if t == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
-                ArgType::direct(t, None, None, attr)
-            }
-        };
-        arg_tys.push(ty);
-    }
-
-    return FnType {
-        arg_tys: arg_tys,
-        ret_ty: ret_ty,
-    };
-}
diff --git a/src/librustc_trans/trans/callee.rs b/src/librustc_trans/trans/callee.rs
deleted file mode 100644 (file)
index e8dd984..0000000
+++ /dev/null
@@ -1,1096 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Handles translation of callees as well as other call-related
-//! things.  Callees are a superset of normal rust values and sometimes
-//! have different representations.  In particular, top-level fn items
-//! and methods are represented as just a fn ptr and not a full
-//! closure.
-
-pub use self::AutorefArg::*;
-pub use self::CalleeData::*;
-pub use self::CallArgs::*;
-
-use arena::TypedArena;
-use back::link;
-use llvm::{self, ValueRef, get_params};
-use middle::cstore::LOCAL_CRATE;
-use middle::def::Def;
-use middle::def_id::DefId;
-use middle::infer;
-use middle::subst;
-use middle::subst::{Substs};
-use rustc::front::map as hir_map;
-use trans::adt;
-use trans::base;
-use trans::base::*;
-use trans::build::*;
-use trans::callee;
-use trans::cleanup;
-use trans::cleanup::CleanupMethods;
-use trans::common::{self, Block, Result, NodeIdAndSpan, ExprId, CrateContext,
-                    ExprOrMethodCall, FunctionContext, MethodCallKey};
-use trans::consts;
-use trans::datum::*;
-use trans::debuginfo::{DebugLoc, ToDebugLoc};
-use trans::declare;
-use trans::expr;
-use trans::glue;
-use trans::inline;
-use trans::foreign;
-use trans::intrinsic;
-use trans::meth;
-use trans::monomorphize;
-use trans::type_::Type;
-use trans::type_of;
-use trans::Disr;
-use middle::ty::{self, Ty, TypeFoldable};
-use middle::ty::MethodCall;
-use rustc_front::hir;
-
-use syntax::abi::Abi;
-use syntax::ast;
-use syntax::errors;
-use syntax::ptr::P;
-
-#[derive(Copy, Clone)]
-pub struct MethodData {
-    pub llfn: ValueRef,
-    pub llself: ValueRef,
-}
-
-pub enum CalleeData<'tcx> {
-    // Constructor for enum variant/tuple-like-struct
-    // i.e. Some, Ok
-    NamedTupleConstructor(Disr),
-
-    // Represents a (possibly monomorphized) top-level fn item or method
-    // item. Note that this is just the fn-ptr and is not a Rust closure
-    // value (which is a pair).
-    Fn(/* llfn */ ValueRef),
-
-    Intrinsic(ast::NodeId, subst::Substs<'tcx>),
-
-    TraitItem(MethodData)
-}
-
-pub struct Callee<'blk, 'tcx: 'blk> {
-    pub bcx: Block<'blk, 'tcx>,
-    pub data: CalleeData<'tcx>,
-    pub ty: Ty<'tcx>
-}
-
-fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &hir::Expr)
-                     -> Callee<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_callee");
-    debug!("callee::trans(expr={:?})", expr);
-
-    // pick out special kinds of expressions that can be called:
-    match expr.node {
-        hir::ExprPath(..) => {
-            return trans_def(bcx, bcx.def(expr.id), expr);
-        }
-        _ => {}
-    }
-
-    // any other expressions are closures:
-    return datum_callee(bcx, expr);
-
-    fn datum_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &hir::Expr)
-                                -> Callee<'blk, 'tcx> {
-        let DatumBlock { bcx, datum, .. } = expr::trans(bcx, expr);
-        match datum.ty.sty {
-            ty::TyBareFn(..) => {
-                Callee {
-                    bcx: bcx,
-                    ty: datum.ty,
-                    data: Fn(datum.to_llscalarish(bcx))
-                }
-            }
-            _ => {
-                bcx.tcx().sess.span_bug(
-                    expr.span,
-                    &format!("type of callee is neither bare-fn nor closure: {}",
-                             datum.ty));
-            }
-        }
-    }
-
-    fn fn_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, Rvalue>)
-                             -> Callee<'blk, 'tcx> {
-        Callee {
-            bcx: bcx,
-            data: Fn(datum.val),
-            ty: datum.ty
-        }
-    }
-
-    fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                             def: Def,
-                             ref_expr: &hir::Expr)
-                             -> Callee<'blk, 'tcx> {
-        debug!("trans_def(def={:?}, ref_expr={:?})", def, ref_expr);
-        let expr_ty = common::node_id_type(bcx, ref_expr.id);
-        match def {
-            Def::Fn(did) if {
-                let maybe_def_id = inline::get_local_instance(bcx.ccx(), did);
-                let maybe_ast_node = maybe_def_id.and_then(|def_id| {
-                    let node_id = bcx.tcx().map.as_local_node_id(def_id).unwrap();
-                    bcx.tcx().map.find(node_id)
-                });
-                match maybe_ast_node {
-                    Some(hir_map::NodeStructCtor(_)) => true,
-                    _ => false
-                }
-            } => {
-                Callee {
-                    bcx: bcx,
-                    data: NamedTupleConstructor(Disr(0)),
-                    ty: expr_ty
-                }
-            }
-            Def::Fn(did) if match expr_ty.sty {
-                ty::TyBareFn(_, ref f) => f.abi == Abi::RustIntrinsic ||
-                                          f.abi == Abi::PlatformIntrinsic,
-                _ => false
-            } => {
-                let substs = common::node_id_substs(bcx.ccx(),
-                                                    ExprId(ref_expr.id),
-                                                    bcx.fcx.param_substs);
-                let def_id = inline::maybe_instantiate_inline(bcx.ccx(), did);
-                let node_id = bcx.tcx().map.as_local_node_id(def_id).unwrap();
-                Callee { bcx: bcx, data: Intrinsic(node_id, substs), ty: expr_ty }
-            }
-            Def::Fn(did) => {
-                fn_callee(bcx, trans_fn_ref(bcx.ccx(), did, ExprId(ref_expr.id),
-                                            bcx.fcx.param_substs))
-            }
-            Def::Method(meth_did) => {
-                let method_item = bcx.tcx().impl_or_trait_item(meth_did);
-                let fn_datum = match method_item.container() {
-                    ty::ImplContainer(_) => {
-                        trans_fn_ref(bcx.ccx(), meth_did,
-                                     ExprId(ref_expr.id),
-                                     bcx.fcx.param_substs)
-                    }
-                    ty::TraitContainer(trait_did) => {
-                        meth::trans_static_method_callee(bcx.ccx(),
-                                                         meth_did,
-                                                         trait_did,
-                                                         ref_expr.id,
-                                                         bcx.fcx.param_substs)
-                    }
-                };
-                fn_callee(bcx, fn_datum)
-            }
-            Def::Variant(tid, vid) => {
-                let vinfo = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
-                assert_eq!(vinfo.kind(), ty::VariantKind::Tuple);
-
-                Callee {
-                    bcx: bcx,
-                    data: NamedTupleConstructor(Disr::from(vinfo.disr_val)),
-                    ty: expr_ty
-                }
-            }
-            Def::Struct(..) => {
-                Callee {
-                    bcx: bcx,
-                    data: NamedTupleConstructor(Disr(0)),
-                    ty: expr_ty
-                }
-            }
-            Def::Static(..) |
-            Def::Const(..) |
-            Def::AssociatedConst(..) |
-            Def::Local(..) |
-            Def::Upvar(..) => {
-                datum_callee(bcx, ref_expr)
-            }
-            Def::Mod(..) | Def::ForeignMod(..) | Def::Trait(..) |
-            Def::Enum(..) | Def::TyAlias(..) | Def::PrimTy(..) |
-            Def::AssociatedTy(..) | Def::Label(..) | Def::TyParam(..) |
-            Def::SelfTy(..) | Def::Err => {
-                bcx.tcx().sess.span_bug(
-                    ref_expr.span,
-                    &format!("cannot translate def {:?} \
-                             to a callable thing!", def));
-            }
-        }
-    }
-}
-
-/// Translates a reference (with id `ref_id`) to the fn/method with id `def_id` into a function
-/// pointer. This may require monomorphization or inlining.
-pub fn trans_fn_ref<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                              def_id: DefId,
-                              node: ExprOrMethodCall,
-                              param_substs: &'tcx subst::Substs<'tcx>)
-                              -> Datum<'tcx, Rvalue> {
-    let _icx = push_ctxt("trans_fn_ref");
-
-    let substs = common::node_id_substs(ccx, node, param_substs);
-    debug!("trans_fn_ref(def_id={:?}, node={:?}, substs={:?})",
-           def_id,
-           node,
-           substs);
-    trans_fn_ref_with_substs(ccx, def_id, node, param_substs, substs)
-}
-
-/// Translates an adapter that implements the `Fn` trait for a fn
-/// pointer. This is basically the equivalent of something like:
-///
-/// ```
-/// impl<'a> Fn(&'a int) -> &'a int for fn(&int) -> &int {
-///     extern "rust-abi" fn call(&self, args: (&'a int,)) -> &'a int {
-///         (*self)(args.0)
-///     }
-/// }
-/// ```
-///
-/// but for the bare function type given.
-pub fn trans_fn_pointer_shim<'a, 'tcx>(
-    ccx: &'a CrateContext<'a, 'tcx>,
-    closure_kind: ty::ClosureKind,
-    bare_fn_ty: Ty<'tcx>)
-    -> ValueRef
-{
-    let _icx = push_ctxt("trans_fn_pointer_shim");
-    let tcx = ccx.tcx();
-
-    // Normalize the type for better caching.
-    let bare_fn_ty = tcx.erase_regions(&bare_fn_ty);
-
-    // If this is an impl of `Fn` or `FnMut` trait, the receiver is `&self`.
-    let is_by_ref = match closure_kind {
-        ty::FnClosureKind | ty::FnMutClosureKind => true,
-        ty::FnOnceClosureKind => false,
-    };
-    let bare_fn_ty_maybe_ref = if is_by_ref {
-        tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), bare_fn_ty)
-    } else {
-        bare_fn_ty
-    };
-
-    // Check if we already trans'd this shim.
-    match ccx.fn_pointer_shims().borrow().get(&bare_fn_ty_maybe_ref) {
-        Some(&llval) => { return llval; }
-        None => { }
-    }
-
-    debug!("trans_fn_pointer_shim(bare_fn_ty={:?})",
-           bare_fn_ty);
-
-    // Construct the "tuply" version of `bare_fn_ty`. It takes two arguments: `self`,
-    // which is the fn pointer, and `args`, which is the arguments tuple.
-    let (opt_def_id, sig) =
-        match bare_fn_ty.sty {
-            ty::TyBareFn(opt_def_id,
-                           &ty::BareFnTy { unsafety: hir::Unsafety::Normal,
-                                           abi: Abi::Rust,
-                                           ref sig }) => {
-                (opt_def_id, sig)
-            }
-
-            _ => {
-                tcx.sess.bug(&format!("trans_fn_pointer_shim invoked on invalid type: {}",
-                                      bare_fn_ty));
-            }
-        };
-    let sig = tcx.erase_late_bound_regions(sig);
-    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
-    let tuple_input_ty = tcx.mk_tup(sig.inputs.to_vec());
-    let tuple_fn_ty = tcx.mk_fn(opt_def_id,
-        tcx.mk_bare_fn(ty::BareFnTy {
-            unsafety: hir::Unsafety::Normal,
-            abi: Abi::RustCall,
-            sig: ty::Binder(ty::FnSig {
-                inputs: vec![bare_fn_ty_maybe_ref,
-                             tuple_input_ty],
-                output: sig.output,
-                variadic: false
-            })}));
-    debug!("tuple_fn_ty: {:?}", tuple_fn_ty);
-
-    //
-    let function_name = link::mangle_internal_name_by_type_and_seq(ccx, bare_fn_ty,
-                                                                   "fn_pointer_shim");
-    let llfn = declare::declare_internal_rust_fn(ccx, &function_name[..], tuple_fn_ty);
-
-    //
-    let empty_substs = tcx.mk_substs(Substs::trans_empty());
-    let (block_arena, fcx): (TypedArena<_>, FunctionContext);
-    block_arena = TypedArena::new();
-    fcx = new_fn_ctxt(ccx,
-                      llfn,
-                      ast::DUMMY_NODE_ID,
-                      false,
-                      sig.output,
-                      empty_substs,
-                      None,
-                      &block_arena);
-    let mut bcx = init_function(&fcx, false, sig.output);
-
-    let llargs = get_params(fcx.llfn);
-
-    let self_idx = fcx.arg_offset();
-    // the first argument (`self`) will be ptr to the fn pointer
-    let llfnpointer = if is_by_ref {
-        Load(bcx, llargs[self_idx])
-    } else {
-        llargs[self_idx]
-    };
-
-    assert!(!fcx.needs_ret_allocas);
-
-    let dest = fcx.llretslotptr.get().map(|_|
-        expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot"))
-    );
-
-    bcx = trans_call_inner(bcx, DebugLoc::None, |bcx, _| {
-        Callee {
-            bcx: bcx,
-            data: Fn(llfnpointer),
-            ty: bare_fn_ty
-        }
-    }, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx;
-
-    finish_fn(&fcx, bcx, sig.output, DebugLoc::None);
-
-    ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn);
-
-    llfn
-}
-
-/// Translates a reference to a fn/method item, monomorphizing and
-/// inlining as it goes.
-///
-/// # Parameters
-///
-/// - `ccx`: the crate context
-/// - `def_id`: def id of the fn or method item being referenced
-/// - `node`: node id of the reference to the fn/method, if applicable.
-///   This parameter may be zero; but, if so, the resulting value may not
-///   have the right type, so it must be cast before being used.
-/// - `param_substs`: if the `node` is in a polymorphic function, these
-///   are the substitutions required to monomorphize its type
-/// - `substs`: values for each of the fn/method's parameters
-pub fn trans_fn_ref_with_substs<'a, 'tcx>(
-    ccx: &CrateContext<'a, 'tcx>,
-    def_id: DefId,
-    node: ExprOrMethodCall,
-    param_substs: &'tcx subst::Substs<'tcx>,
-    substs: subst::Substs<'tcx>)
-    -> Datum<'tcx, Rvalue>
-{
-    let _icx = push_ctxt("trans_fn_ref_with_substs");
-    let tcx = ccx.tcx();
-
-    debug!("trans_fn_ref_with_substs(def_id={:?}, node={:?}, \
-            param_substs={:?}, substs={:?})",
-           def_id,
-           node,
-           param_substs,
-           substs);
-
-    assert!(!substs.types.needs_infer());
-    assert!(!substs.types.has_escaping_regions());
-    let substs = substs.erase_regions();
-
-    // Check whether this fn has an inlined copy and, if so, redirect
-    // def_id to the local id of the inlined copy.
-    let def_id = inline::maybe_instantiate_inline(ccx, def_id);
-
-    fn is_named_tuple_constructor(tcx: &ty::ctxt, def_id: DefId) -> bool {
-        let node_id = match tcx.map.as_local_node_id(def_id) {
-            Some(n) => n,
-            None => { return false; }
-        };
-        let map_node = errors::expect(
-            &tcx.sess.diagnostic(),
-            tcx.map.find(node_id),
-            || "local item should be in ast map".to_string());
-
-        match map_node {
-            hir_map::NodeVariant(v) => {
-                v.node.data.is_tuple()
-            }
-            hir_map::NodeStructCtor(_) => true,
-            _ => false
-        }
-    }
-    let must_monomorphise =
-        !substs.types.is_empty() || is_named_tuple_constructor(tcx, def_id);
-
-    debug!("trans_fn_ref_with_substs({:?}) must_monomorphise: {}",
-           def_id, must_monomorphise);
-
-    // Create a monomorphic version of generic functions
-    if must_monomorphise {
-        // Should be either intra-crate or inlined.
-        assert_eq!(def_id.krate, LOCAL_CRATE);
-
-        let opt_ref_id = match node {
-            ExprId(id) => if id != 0 { Some(id) } else { None },
-            MethodCallKey(_) => None,
-        };
-
-        let substs = tcx.mk_substs(substs);
-        let (val, fn_ty, must_cast) =
-            monomorphize::monomorphic_fn(ccx, def_id, substs, opt_ref_id);
-        if must_cast && node != ExprId(0) {
-            // Monotype of the REFERENCE to the function (type params
-            // are subst'd)
-            let ref_ty = match node {
-                ExprId(id) => tcx.node_id_to_type(id),
-                MethodCallKey(method_call) => {
-                    tcx.tables.borrow().method_map[&method_call].ty
-                }
-            };
-            let ref_ty = monomorphize::apply_param_substs(tcx,
-                                                          param_substs,
-                                                          &ref_ty);
-            let llptrty = type_of::type_of_fn_from_ty(ccx, ref_ty).ptr_to();
-            if llptrty != common::val_ty(val) {
-                let val = consts::ptrcast(val, llptrty);
-                return Datum::new(val, ref_ty, Rvalue::new(ByValue));
-            }
-        }
-        return Datum::new(val, fn_ty, Rvalue::new(ByValue));
-    }
-
-    // Type scheme of the function item (may have type params)
-    let fn_type_scheme = tcx.lookup_item_type(def_id);
-    let fn_type = infer::normalize_associated_type(tcx, &fn_type_scheme.ty);
-
-    // Find the actual function pointer.
-    let mut val = {
-        if let Some(node_id) = ccx.tcx().map.as_local_node_id(def_id) {
-            // Internal reference.
-            get_item_val(ccx, node_id)
-        } else {
-            // External reference.
-            trans_external_path(ccx, def_id, fn_type)
-        }
-    };
-
-    // This is subtle and surprising, but sometimes we have to bitcast
-    // the resulting fn pointer.  The reason has to do with external
-    // functions.  If you have two crates that both bind the same C
-    // library, they may not use precisely the same types: for
-    // example, they will probably each declare their own structs,
-    // which are distinct types from LLVM's point of view (nominal
-    // types).
-    //
-    // Now, if those two crates are linked into an application, and
-    // they contain inlined code, you can wind up with a situation
-    // where both of those functions wind up being loaded into this
-    // application simultaneously. In that case, the same function
-    // (from LLVM's point of view) requires two types. But of course
-    // LLVM won't allow one function to have two types.
-    //
-    // What we currently do, therefore, is declare the function with
-    // one of the two types (whichever happens to come first) and then
-    // bitcast as needed when the function is referenced to make sure
-    // it has the type we expect.
-    //
-    // This can occur on either a crate-local or crate-external
-    // reference. It also occurs when testing libcore and in some
-    // other weird situations. Annoying.
-    let llty = type_of::type_of_fn_from_ty(ccx, fn_type);
-    let llptrty = llty.ptr_to();
-    if common::val_ty(val) != llptrty {
-        debug!("trans_fn_ref_with_substs(): casting pointer!");
-        val = consts::ptrcast(val, llptrty);
-    } else {
-        debug!("trans_fn_ref_with_substs(): not casting pointer!");
-    }
-
-    Datum::new(val, fn_type, Rvalue::new(ByValue))
-}
-
-// ______________________________________________________________________
-// Translating calls
-
-pub fn trans_call<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                  call_expr: &hir::Expr,
-                                  f: &hir::Expr,
-                                  args: CallArgs<'a, 'tcx>,
-                                  dest: expr::Dest)
-                                  -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_call");
-    trans_call_inner(bcx,
-                     call_expr.debug_loc(),
-                     |bcx, _| trans(bcx, f),
-                     args,
-                     Some(dest)).bcx
-}
-
-pub fn trans_method_call<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                         call_expr: &hir::Expr,
-                                         rcvr: &hir::Expr,
-                                         args: CallArgs<'a, 'tcx>,
-                                         dest: expr::Dest)
-                                         -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_method_call");
-    debug!("trans_method_call(call_expr={:?})", call_expr);
-    let method_call = MethodCall::expr(call_expr.id);
-    trans_call_inner(
-        bcx,
-        call_expr.debug_loc(),
-        |cx, arg_cleanup_scope| {
-            meth::trans_method_callee(cx, method_call, Some(rcvr), arg_cleanup_scope)
-        },
-        args,
-        Some(dest)).bcx
-}
-
-pub fn trans_lang_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   did: DefId,
-                                   args: &[ValueRef],
-                                   dest: Option<expr::Dest>,
-                                   debug_loc: DebugLoc)
-                                   -> Result<'blk, 'tcx> {
-    callee::trans_call_inner(bcx, debug_loc, |bcx, _| {
-        let datum = trans_fn_ref_with_substs(bcx.ccx(),
-                                             did,
-                                             ExprId(0),
-                                             bcx.fcx.param_substs,
-                                             subst::Substs::trans_empty());
-        Callee {
-            bcx: bcx,
-            data: Fn(datum.val),
-            ty: datum.ty
-        }
-    }, ArgVals(args), dest)
-}
-
-/// This behemoth of a function translates function calls. Unfortunately, in
-/// order to generate more efficient LLVM output at -O0, it has quite a complex
-/// signature (refactoring this into two functions seems like a good idea).
-///
-/// In particular, for lang items, it is invoked with a dest of None, and in
-/// that case the return value contains the result of the fn. The lang item must
-/// not return a structural type or else all heck breaks loose.
-///
-/// For non-lang items, `dest` is always Some, and hence the result is written
-/// into memory somewhere. Nonetheless we return the actual return value of the
-/// function.
-pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
-                                           debug_loc: DebugLoc,
-                                           get_callee: F,
-                                           args: CallArgs<'a, 'tcx>,
-                                           dest: Option<expr::Dest>)
-                                           -> Result<'blk, 'tcx> where
-    F: FnOnce(Block<'blk, 'tcx>, cleanup::ScopeId) -> Callee<'blk, 'tcx>,
-{
-    // Introduce a temporary cleanup scope that will contain cleanups
-    // for the arguments while they are being evaluated. The purpose
-    // this cleanup is to ensure that, should a panic occur while
-    // evaluating argument N, the values for arguments 0...N-1 are all
-    // cleaned up. If no panic occurs, the values are handed off to
-    // the callee, and hence none of the cleanups in this temporary
-    // scope will ever execute.
-    let fcx = bcx.fcx;
-    let ccx = fcx.ccx;
-    let arg_cleanup_scope = fcx.push_custom_cleanup_scope();
-
-    let callee = get_callee(bcx, cleanup::CustomScope(arg_cleanup_scope));
-    let mut bcx = callee.bcx;
-
-    let (abi, ret_ty) = match callee.ty.sty {
-        ty::TyBareFn(_, ref f) => {
-            let sig = bcx.tcx().erase_late_bound_regions(&f.sig);
-            let sig = infer::normalize_associated_type(bcx.tcx(), &sig);
-            (f.abi, sig.output)
-        }
-        _ => panic!("expected bare rust fn or closure in trans_call_inner")
-    };
-
-    let (llfn, llself) = match callee.data {
-        Fn(llfn) => {
-            (llfn, None)
-        }
-        TraitItem(d) => {
-            (d.llfn, Some(d.llself))
-        }
-        Intrinsic(node, substs) => {
-            assert!(abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic);
-            assert!(dest.is_some());
-
-            let call_info = match debug_loc {
-                DebugLoc::At(id, span) => NodeIdAndSpan { id: id, span: span },
-                DebugLoc::None => {
-                    bcx.sess().bug("No call info for intrinsic call?")
-                }
-            };
-
-            return intrinsic::trans_intrinsic_call(bcx, node, callee.ty,
-                                                   arg_cleanup_scope, args,
-                                                   dest.unwrap(), substs,
-                                                   call_info);
-        }
-        NamedTupleConstructor(disr) => {
-            assert!(dest.is_some());
-            fcx.pop_custom_cleanup_scope(arg_cleanup_scope);
-
-            return base::trans_named_tuple_constructor(bcx,
-                                                       callee.ty,
-                                                       disr,
-                                                       args,
-                                                       dest.unwrap(),
-                                                       debug_loc);
-        }
-    };
-
-    // Intrinsics should not become actual functions.
-    // We trans them in place in `trans_intrinsic_call`
-    assert!(abi != Abi::RustIntrinsic && abi != Abi::PlatformIntrinsic);
-
-    let is_rust_fn = abi == Abi::Rust || abi == Abi::RustCall;
-
-    // Generate a location to store the result. If the user does
-    // not care about the result, just make a stack slot.
-    let opt_llretslot = dest.and_then(|dest| match dest {
-        expr::SaveIn(dst) => Some(dst),
-        expr::Ignore => {
-            let ret_ty = match ret_ty {
-                ty::FnConverging(ret_ty) => ret_ty,
-                ty::FnDiverging => ccx.tcx().mk_nil()
-            };
-            if !is_rust_fn ||
-              type_of::return_uses_outptr(ccx, ret_ty) ||
-              bcx.fcx.type_needs_drop(ret_ty) {
-                // Push the out-pointer if we use an out-pointer for this
-                // return type, otherwise push "undef".
-                if common::type_is_zero_size(ccx, ret_ty) {
-                    let llty = type_of::type_of(ccx, ret_ty);
-                    Some(common::C_undef(llty.ptr_to()))
-                } else {
-                    let llresult = alloc_ty(bcx, ret_ty, "__llret");
-                    call_lifetime_start(bcx, llresult);
-                    Some(llresult)
-                }
-            } else {
-                None
-            }
-        }
-    });
-
-    let mut llresult = unsafe {
-        llvm::LLVMGetUndef(Type::nil(ccx).ptr_to().to_ref())
-    };
-
-    // The code below invokes the function, using either the Rust
-    // conventions (if it is a rust fn) or the native conventions
-    // (otherwise).  The important part is that, when all is said
-    // and done, either the return value of the function will have been
-    // written in opt_llretslot (if it is Some) or `llresult` will be
-    // set appropriately (otherwise).
-    if is_rust_fn {
-        let mut llargs = Vec::new();
-
-        if let (ty::FnConverging(ret_ty), Some(mut llretslot)) = (ret_ty, opt_llretslot) {
-            if type_of::return_uses_outptr(ccx, ret_ty) {
-                let llformal_ret_ty = type_of::type_of(ccx, ret_ty).ptr_to();
-                let llret_ty = common::val_ty(llretslot);
-                if llformal_ret_ty != llret_ty {
-                    // this could happen due to e.g. subtyping
-                    debug!("casting actual return type ({}) to match formal ({})",
-                        bcx.llty_str(llret_ty), bcx.llty_str(llformal_ret_ty));
-                    llretslot = PointerCast(bcx, llretslot, llformal_ret_ty);
-                }
-                llargs.push(llretslot);
-            }
-        }
-
-        // Push a trait object's self.
-        if let Some(llself) = llself {
-            llargs.push(llself);
-        }
-
-        // Push the arguments.
-        bcx = trans_args(bcx,
-                         args,
-                         callee.ty,
-                         &mut llargs,
-                         cleanup::CustomScope(arg_cleanup_scope),
-                         llself.is_some(),
-                         abi);
-
-        fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
-
-        // Invoke the actual rust fn and update bcx/llresult.
-        let (llret, b) = base::invoke(bcx,
-                                      llfn,
-                                      &llargs[..],
-                                      callee.ty,
-                                      debug_loc);
-        bcx = b;
-        llresult = llret;
-
-        // If the Rust convention for this type is return via
-        // the return value, copy it into llretslot.
-        match (opt_llretslot, ret_ty) {
-            (Some(llretslot), ty::FnConverging(ret_ty)) => {
-                if !type_of::return_uses_outptr(bcx.ccx(), ret_ty) &&
-                    !common::type_is_zero_size(bcx.ccx(), ret_ty)
-                {
-                    store_ty(bcx, llret, llretslot, ret_ty)
-                }
-            }
-            (_, _) => {}
-        }
-    } else {
-        // Lang items are the only case where dest is None, and
-        // they are always Rust fns.
-        assert!(dest.is_some());
-
-        let mut llargs = Vec::new();
-        let arg_tys = match args {
-            ArgExprs(a) => a.iter().map(|x| common::expr_ty_adjusted(bcx, &x)).collect(),
-            _ => panic!("expected arg exprs.")
-        };
-        bcx = trans_args(bcx,
-                         args,
-                         callee.ty,
-                         &mut llargs,
-                         cleanup::CustomScope(arg_cleanup_scope),
-                         false,
-                         abi);
-        fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
-
-        bcx = foreign::trans_native_call(bcx,
-                                         callee.ty,
-                                         llfn,
-                                         opt_llretslot.unwrap(),
-                                         &llargs[..],
-                                         arg_tys,
-                                         debug_loc);
-    }
-
-    fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope);
-
-    // If the caller doesn't care about the result of this fn call,
-    // drop the temporary slot we made.
-    match (dest, opt_llretslot, ret_ty) {
-        (Some(expr::Ignore), Some(llretslot), ty::FnConverging(ret_ty)) => {
-            // drop the value if it is not being saved.
-            bcx = glue::drop_ty(bcx,
-                                llretslot,
-                                ret_ty,
-                                debug_loc);
-            call_lifetime_end(bcx, llretslot);
-        }
-        _ => {}
-    }
-
-    if ret_ty == ty::FnDiverging {
-        Unreachable(bcx);
-    }
-
-    Result::new(bcx, llresult)
-}
-
-pub enum CallArgs<'a, 'tcx> {
-    // Supply value of arguments as a list of expressions that must be
-    // translated. This is used in the common case of `foo(bar, qux)`.
-    ArgExprs(&'a [P<hir::Expr>]),
-
-    // Supply value of arguments as a list of LLVM value refs; frequently
-    // used with lang items and so forth, when the argument is an internal
-    // value.
-    ArgVals(&'a [ValueRef]),
-
-    // For overloaded operators: `(lhs, Option(rhs, rhs_id), autoref)`. `lhs`
-    // is the left-hand-side and `rhs/rhs_id` is the datum/expr-id of
-    // the right-hand-side argument (if any). `autoref` indicates whether the `rhs`
-    // arguments should be auto-referenced
-    ArgOverloadedOp(Datum<'tcx, Expr>, Option<(Datum<'tcx, Expr>, ast::NodeId)>, bool),
-
-    // Supply value of arguments as a list of expressions that must be
-    // translated, for overloaded call operators.
-    ArgOverloadedCall(Vec<&'a hir::Expr>),
-}
-
-fn trans_args_under_call_abi<'blk, 'tcx>(
-                             mut bcx: Block<'blk, 'tcx>,
-                             arg_exprs: &[P<hir::Expr>],
-                             fn_ty: Ty<'tcx>,
-                             llargs: &mut Vec<ValueRef>,
-                             arg_cleanup_scope: cleanup::ScopeId,
-                             ignore_self: bool)
-                             -> Block<'blk, 'tcx>
-{
-    let sig = bcx.tcx().erase_late_bound_regions(&fn_ty.fn_sig());
-    let sig = infer::normalize_associated_type(bcx.tcx(), &sig);
-    let args = sig.inputs;
-
-    // Translate the `self` argument first.
-    if !ignore_self {
-        let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
-        bcx = trans_arg_datum(bcx,
-                              args[0],
-                              arg_datum,
-                              arg_cleanup_scope,
-                              DontAutorefArg,
-                              llargs);
-    }
-
-    // Now untuple the rest of the arguments.
-    let tuple_expr = &arg_exprs[1];
-    let tuple_type = common::node_id_type(bcx, tuple_expr.id);
-
-    match tuple_type.sty {
-        ty::TyTuple(ref field_types) => {
-            let tuple_datum = unpack_datum!(bcx,
-                                            expr::trans(bcx, &tuple_expr));
-            let tuple_lvalue_datum =
-                unpack_datum!(bcx,
-                              tuple_datum.to_lvalue_datum(bcx,
-                                                          "args",
-                                                          tuple_expr.id));
-            let repr = adt::represent_type(bcx.ccx(), tuple_type);
-            let repr_ptr = &repr;
-            for (i, field_type) in field_types.iter().enumerate() {
-                let arg_datum = tuple_lvalue_datum.get_element(
-                    bcx,
-                    field_type,
-                    |srcval| {
-                        adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i)
-                    }).to_expr_datum();
-                bcx = trans_arg_datum(bcx,
-                                      field_type,
-                                      arg_datum,
-                                      arg_cleanup_scope,
-                                      DontAutorefArg,
-                                      llargs);
-            }
-        }
-        _ => {
-            bcx.sess().span_bug(tuple_expr.span,
-                                "argument to `.call()` wasn't a tuple?!")
-        }
-    };
-
-    bcx
-}
-
-fn trans_overloaded_call_args<'blk, 'tcx>(
-                              mut bcx: Block<'blk, 'tcx>,
-                              arg_exprs: Vec<&hir::Expr>,
-                              fn_ty: Ty<'tcx>,
-                              llargs: &mut Vec<ValueRef>,
-                              arg_cleanup_scope: cleanup::ScopeId,
-                              ignore_self: bool)
-                              -> Block<'blk, 'tcx> {
-    // Translate the `self` argument first.
-    let sig = bcx.tcx().erase_late_bound_regions(&fn_ty.fn_sig());
-    let sig = infer::normalize_associated_type(bcx.tcx(), &sig);
-    let arg_tys = sig.inputs;
-
-    if !ignore_self {
-        let arg_datum = unpack_datum!(bcx, expr::trans(bcx, arg_exprs[0]));
-        bcx = trans_arg_datum(bcx,
-                              arg_tys[0],
-                              arg_datum,
-                              arg_cleanup_scope,
-                              DontAutorefArg,
-                              llargs);
-    }
-
-    // Now untuple the rest of the arguments.
-    let tuple_type = arg_tys[1];
-    match tuple_type.sty {
-        ty::TyTuple(ref field_types) => {
-            for (i, &field_type) in field_types.iter().enumerate() {
-                let arg_datum =
-                    unpack_datum!(bcx, expr::trans(bcx, arg_exprs[i + 1]));
-                bcx = trans_arg_datum(bcx,
-                                      field_type,
-                                      arg_datum,
-                                      arg_cleanup_scope,
-                                      DontAutorefArg,
-                                      llargs);
-            }
-        }
-        _ => {
-            bcx.sess().span_bug(arg_exprs[0].span,
-                                "argument to `.call()` wasn't a tuple?!")
-        }
-    };
-
-    bcx
-}
-
-pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                                  args: CallArgs<'a, 'tcx>,
-                                  fn_ty: Ty<'tcx>,
-                                  llargs: &mut Vec<ValueRef>,
-                                  arg_cleanup_scope: cleanup::ScopeId,
-                                  ignore_self: bool,
-                                  abi: Abi)
-                                  -> Block<'blk, 'tcx> {
-    debug!("trans_args(abi={})", abi);
-
-    let _icx = push_ctxt("trans_args");
-    let sig = cx.tcx().erase_late_bound_regions(&fn_ty.fn_sig());
-    let sig = infer::normalize_associated_type(cx.tcx(), &sig);
-    let arg_tys = sig.inputs;
-    let variadic = sig.variadic;
-
-    let mut bcx = cx;
-
-    // First we figure out the caller's view of the types of the arguments.
-    // This will be needed if this is a generic call, because the callee has
-    // to cast her view of the arguments to the caller's view.
-    match args {
-        ArgExprs(arg_exprs) => {
-            if abi == Abi::RustCall {
-                // This is only used for direct calls to the `call`,
-                // `call_mut` or `call_once` functions.
-                return trans_args_under_call_abi(cx,
-                                                 arg_exprs,
-                                                 fn_ty,
-                                                 llargs,
-                                                 arg_cleanup_scope,
-                                                 ignore_self)
-            }
-
-            let num_formal_args = arg_tys.len();
-            for (i, arg_expr) in arg_exprs.iter().enumerate() {
-                if i == 0 && ignore_self {
-                    continue;
-                }
-                let arg_ty = if i >= num_formal_args {
-                    assert!(variadic);
-                    common::expr_ty_adjusted(cx, &arg_expr)
-                } else {
-                    arg_tys[i]
-                };
-
-                let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_expr));
-                bcx = trans_arg_datum(bcx, arg_ty, arg_datum,
-                                      arg_cleanup_scope,
-                                      DontAutorefArg,
-                                      llargs);
-            }
-        }
-        ArgOverloadedCall(arg_exprs) => {
-            return trans_overloaded_call_args(cx,
-                                              arg_exprs,
-                                              fn_ty,
-                                              llargs,
-                                              arg_cleanup_scope,
-                                              ignore_self)
-        }
-        ArgOverloadedOp(lhs, rhs, autoref) => {
-            assert!(!variadic);
-
-            bcx = trans_arg_datum(bcx, arg_tys[0], lhs,
-                                  arg_cleanup_scope,
-                                  DontAutorefArg,
-                                  llargs);
-
-            if let Some((rhs, rhs_id)) = rhs {
-                assert_eq!(arg_tys.len(), 2);
-                bcx = trans_arg_datum(bcx, arg_tys[1], rhs,
-                                      arg_cleanup_scope,
-                                      if autoref { DoAutorefArg(rhs_id) } else { DontAutorefArg },
-                                      llargs);
-            } else {
-                assert_eq!(arg_tys.len(), 1);
-            }
-        }
-        ArgVals(vs) => {
-            llargs.extend_from_slice(vs);
-        }
-    }
-
-    bcx
-}
-
-#[derive(Copy, Clone)]
-pub enum AutorefArg {
-    DontAutorefArg,
-    DoAutorefArg(ast::NodeId)
-}
-
-pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   formal_arg_ty: Ty<'tcx>,
-                                   arg_datum: Datum<'tcx, Expr>,
-                                   arg_cleanup_scope: cleanup::ScopeId,
-                                   autoref_arg: AutorefArg,
-                                   llargs: &mut Vec<ValueRef>)
-                                   -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_arg_datum");
-    let mut bcx = bcx;
-    let ccx = bcx.ccx();
-
-    debug!("trans_arg_datum({:?})",
-           formal_arg_ty);
-
-    let arg_datum_ty = arg_datum.ty;
-
-    debug!("   arg datum: {}", arg_datum.to_string(bcx.ccx()));
-
-    let mut val;
-    // FIXME(#3548) use the adjustments table
-    match autoref_arg {
-        DoAutorefArg(arg_id) => {
-            // We will pass argument by reference
-            // We want an lvalue, so that we can pass by reference and
-            let arg_datum = unpack_datum!(
-                bcx, arg_datum.to_lvalue_datum(bcx, "arg", arg_id));
-            val = arg_datum.val;
-        }
-        DontAutorefArg if common::type_is_fat_ptr(bcx.tcx(), arg_datum_ty) &&
-                !bcx.fcx.type_needs_drop(arg_datum_ty) => {
-            val = arg_datum.val
-        }
-        DontAutorefArg => {
-            // Make this an rvalue, since we are going to be
-            // passing ownership.
-            let arg_datum = unpack_datum!(
-                bcx, arg_datum.to_rvalue_datum(bcx, "arg"));
-
-            // Now that arg_datum is owned, get it into the appropriate
-            // mode (ref vs value).
-            let arg_datum = unpack_datum!(
-                bcx, arg_datum.to_appropriate_datum(bcx));
-
-            // Technically, ownership of val passes to the callee.
-            // However, we must cleanup should we panic before the
-            // callee is actually invoked.
-            val = arg_datum.add_clean(bcx.fcx, arg_cleanup_scope);
-        }
-    }
-
-    if type_of::arg_is_indirect(ccx, formal_arg_ty) && formal_arg_ty != arg_datum_ty {
-        // this could happen due to e.g. subtyping
-        let llformal_arg_ty = type_of::type_of_explicit_arg(ccx, formal_arg_ty);
-        debug!("casting actual type ({}) to match formal ({})",
-               bcx.val_to_string(val), bcx.llty_str(llformal_arg_ty));
-        debug!("Rust types: {:?}; {:?}", arg_datum_ty,
-                                     formal_arg_ty);
-        val = PointerCast(bcx, val, llformal_arg_ty);
-    }
-
-    debug!("--- trans_arg_datum passing {}", bcx.val_to_string(val));
-
-    if common::type_is_fat_ptr(bcx.tcx(), formal_arg_ty) {
-        llargs.push(Load(bcx, expr::get_dataptr(bcx, val)));
-        llargs.push(Load(bcx, expr::get_meta(bcx, val)));
-    } else {
-        llargs.push(val);
-    }
-
-    bcx
-}
diff --git a/src/librustc_trans/trans/cleanup.rs b/src/librustc_trans/trans/cleanup.rs
deleted file mode 100644 (file)
index 683d5e0..0000000
+++ /dev/null
@@ -1,1291 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! ## The Cleanup module
-//!
-//! The cleanup module tracks what values need to be cleaned up as scopes
-//! are exited, either via panic or just normal control flow. The basic
-//! idea is that the function context maintains a stack of cleanup scopes
-//! that are pushed/popped as we traverse the AST tree. There is typically
-//! at least one cleanup scope per AST node; some AST nodes may introduce
-//! additional temporary scopes.
-//!
-//! Cleanup items can be scheduled into any of the scopes on the stack.
-//! Typically, when a scope is popped, we will also generate the code for
-//! each of its cleanups at that time. This corresponds to a normal exit
-//! from a block (for example, an expression completing evaluation
-//! successfully without panic). However, it is also possible to pop a
-//! block *without* executing its cleanups; this is typically used to
-//! guard intermediate values that must be cleaned up on panic, but not
-//! if everything goes right. See the section on custom scopes below for
-//! more details.
-//!
-//! Cleanup scopes come in three kinds:
-//!
-//! - **AST scopes:** each AST node in a function body has a corresponding
-//!   AST scope. We push the AST scope when we start generate code for an AST
-//!   node and pop it once the AST node has been fully generated.
-//! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are
-//!   never scheduled into loop scopes; instead, they are used to record the
-//!   basic blocks that we should branch to when a `continue` or `break` statement
-//!   is encountered.
-//! - **Custom scopes:** custom scopes are typically used to ensure cleanup
-//!   of intermediate values.
-//!
-//! ### When to schedule cleanup
-//!
-//! Although the cleanup system is intended to *feel* fairly declarative,
-//! it's still important to time calls to `schedule_clean()` correctly.
-//! Basically, you should not schedule cleanup for memory until it has
-//! been initialized, because if an unwind should occur before the memory
-//! is fully initialized, then the cleanup will run and try to free or
-//! drop uninitialized memory. If the initialization itself produces
-//! byproducts that need to be freed, then you should use temporary custom
-//! scopes to ensure that those byproducts will get freed on unwind.  For
-//! example, an expression like `box foo()` will first allocate a box in the
-//! heap and then call `foo()` -- if `foo()` should panic, this box needs
-//! to be *shallowly* freed.
-//!
-//! ### Long-distance jumps
-//!
-//! In addition to popping a scope, which corresponds to normal control
-//! flow exiting the scope, we may also *jump out* of a scope into some
-//! earlier scope on the stack. This can occur in response to a `return`,
-//! `break`, or `continue` statement, but also in response to panic. In
-//! any of these cases, we will generate a series of cleanup blocks for
-//! each of the scopes that is exited. So, if the stack contains scopes A
-//! ... Z, and we break out of a loop whose corresponding cleanup scope is
-//! X, we would generate cleanup blocks for the cleanups in X, Y, and Z.
-//! After cleanup is done we would branch to the exit point for scope X.
-//! But if panic should occur, we would generate cleanups for all the
-//! scopes from A to Z and then resume the unwind process afterwards.
-//!
-//! To avoid generating tons of code, we cache the cleanup blocks that we
-//! create for breaks, returns, unwinds, and other jumps. Whenever a new
-//! cleanup is scheduled, though, we must clear these cached blocks. A
-//! possible improvement would be to keep the cached blocks but simply
-//! generate a new block which performs the additional cleanup and then
-//! branches to the existing cached blocks.
-//!
-//! ### AST and loop cleanup scopes
-//!
-//! AST cleanup scopes are pushed when we begin and end processing an AST
-//! node. They are used to house cleanups related to rvalue temporary that
-//! get referenced (e.g., due to an expression like `&Foo()`). Whenever an
-//! AST scope is popped, we always trans all the cleanups, adding the cleanup
-//! code after the postdominator of the AST node.
-//!
-//! AST nodes that represent breakable loops also push a loop scope; the
-//! loop scope never has any actual cleanups, it's just used to point to
-//! the basic blocks where control should flow after a "continue" or
-//! "break" statement. Popping a loop scope never generates code.
-//!
-//! ### Custom cleanup scopes
-//!
-//! Custom cleanup scopes are used for a variety of purposes. The most
-//! common though is to handle temporary byproducts, where cleanup only
-//! needs to occur on panic. The general strategy is to push a custom
-//! cleanup scope, schedule *shallow* cleanups into the custom scope, and
-//! then pop the custom scope (without transing the cleanups) when
-//! execution succeeds normally. This way the cleanups are only trans'd on
-//! unwind, and only up until the point where execution succeeded, at
-//! which time the complete value should be stored in an lvalue or some
-//! other place where normal cleanup applies.
-//!
-//! To spell it out, here is an example. Imagine an expression `box expr`.
-//! We would basically:
-//!
-//! 1. Push a custom cleanup scope C.
-//! 2. Allocate the box.
-//! 3. Schedule a shallow free in the scope C.
-//! 4. Trans `expr` into the box.
-//! 5. Pop the scope C.
-//! 6. Return the box as an rvalue.
-//!
-//! This way, if a panic occurs while transing `expr`, the custom
-//! cleanup scope C is pushed and hence the box will be freed. The trans
-//! code for `expr` itself is responsible for freeing any other byproducts
-//! that may be in play.
-
-pub use self::ScopeId::*;
-pub use self::CleanupScopeKind::*;
-pub use self::EarlyExitLabel::*;
-pub use self::Heap::*;
-
-use llvm::{BasicBlockRef, ValueRef};
-use trans::base;
-use trans::build;
-use trans::common;
-use trans::common::{Block, FunctionContext, NodeIdAndSpan, LandingPad};
-use trans::datum::{Datum, Lvalue};
-use trans::debuginfo::{DebugLoc, ToDebugLoc};
-use trans::glue;
-use middle::region;
-use trans::type_::Type;
-use middle::ty::{self, Ty};
-use std::fmt;
-use syntax::ast;
-
-pub struct CleanupScope<'blk, 'tcx: 'blk> {
-    // The id of this cleanup scope. If the id is None,
-    // this is a *temporary scope* that is pushed during trans to
-    // cleanup miscellaneous garbage that trans may generate whose
-    // lifetime is a subset of some expression.  See module doc for
-    // more details.
-    kind: CleanupScopeKind<'blk, 'tcx>,
-
-    // Cleanups to run upon scope exit.
-    cleanups: Vec<CleanupObj<'tcx>>,
-
-    // The debug location any drop calls generated for this scope will be
-    // associated with.
-    debug_loc: DebugLoc,
-
-    cached_early_exits: Vec<CachedEarlyExit>,
-    cached_landing_pad: Option<BasicBlockRef>,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct CustomScopeIndex {
-    index: usize
-}
-
-pub const EXIT_BREAK: usize = 0;
-pub const EXIT_LOOP: usize = 1;
-pub const EXIT_MAX: usize = 2;
-
-pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
-    CustomScopeKind,
-    AstScopeKind(ast::NodeId),
-    LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
-}
-
-impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            CustomScopeKind => write!(f, "CustomScopeKind"),
-            AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
-            LoopScopeKind(nid, ref blks) => {
-                try!(write!(f, "LoopScopeKind({}, [", nid));
-                for blk in blks {
-                    try!(write!(f, "{:p}, ", blk));
-                }
-                write!(f, "])")
-            }
-        }
-    }
-}
-
-#[derive(Copy, Clone, PartialEq, Debug)]
-pub enum EarlyExitLabel {
-    UnwindExit(UnwindKind),
-    ReturnExit,
-    LoopExit(ast::NodeId, usize)
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum UnwindKind {
-    LandingPad,
-    CleanupPad(ValueRef),
-}
-
-#[derive(Copy, Clone)]
-pub struct CachedEarlyExit {
-    label: EarlyExitLabel,
-    cleanup_block: BasicBlockRef,
-    last_cleanup: usize,
-}
-
-pub trait Cleanup<'tcx> {
-    fn must_unwind(&self) -> bool;
-    fn is_lifetime_end(&self) -> bool;
-    fn trans<'blk>(&self,
-                   bcx: Block<'blk, 'tcx>,
-                   debug_loc: DebugLoc)
-                   -> Block<'blk, 'tcx>;
-}
-
-pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
-
-#[derive(Copy, Clone, Debug)]
-pub enum ScopeId {
-    AstScope(ast::NodeId),
-    CustomScope(CustomScopeIndex)
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct DropHint<K>(pub ast::NodeId, pub K);
-
-pub type DropHintDatum<'tcx> = DropHint<Datum<'tcx, Lvalue>>;
-pub type DropHintValue = DropHint<ValueRef>;
-
-impl<K> DropHint<K> {
-    pub fn new(id: ast::NodeId, k: K) -> DropHint<K> { DropHint(id, k) }
-}
-
-impl DropHint<ValueRef> {
-    pub fn value(&self) -> ValueRef { self.1 }
-}
-
-pub trait DropHintMethods {
-    type ValueKind;
-    fn to_value(&self) -> Self::ValueKind;
-}
-impl<'tcx> DropHintMethods for DropHintDatum<'tcx> {
-    type ValueKind = DropHintValue;
-    fn to_value(&self) -> DropHintValue { DropHint(self.0, self.1.val) }
-}
-
-impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
-    /// Invoked when we start to trans the code contained within a new cleanup scope.
-    fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
-        debug!("push_ast_cleanup_scope({})",
-               self.ccx.tcx().map.node_to_string(debug_loc.id));
-
-        // FIXME(#2202) -- currently closure bodies have a parent
-        // region, which messes up the assertion below, since there
-        // are no cleanup scopes on the stack at the start of
-        // trans'ing a closure body.  I think though that this should
-        // eventually be fixed by closure bodies not having a parent
-        // region, though that's a touch unclear, and it might also be
-        // better just to narrow this assertion more (i.e., by
-        // excluding id's that correspond to closure bodies only). For
-        // now we just say that if there is already an AST scope on the stack,
-        // this new AST scope had better be its immediate child.
-        let top_scope = self.top_ast_scope();
-        let region_maps = &self.ccx.tcx().region_maps;
-        if top_scope.is_some() {
-            assert!((region_maps
-                     .opt_encl_scope(region_maps.node_extent(debug_loc.id))
-                     .map(|s|s.node_id(region_maps)) == top_scope)
-                    ||
-                    (region_maps
-                     .opt_encl_scope(region_maps.lookup_code_extent(
-                         region::CodeExtentData::DestructionScope(debug_loc.id)))
-                     .map(|s|s.node_id(region_maps)) == top_scope));
-        }
-
-        self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
-                                          debug_loc.debug_loc()));
-    }
-
-    fn push_loop_cleanup_scope(&self,
-                               id: ast::NodeId,
-                               exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
-        debug!("push_loop_cleanup_scope({})",
-               self.ccx.tcx().map.node_to_string(id));
-        assert_eq!(Some(id), self.top_ast_scope());
-
-        // Just copy the debuginfo source location from the enclosing scope
-        let debug_loc = self.scopes
-                            .borrow()
-                            .last()
-                            .unwrap()
-                            .debug_loc;
-
-        self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
-    }
-
-    fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
-        let index = self.scopes_len();
-        debug!("push_custom_cleanup_scope(): {}", index);
-
-        // Just copy the debuginfo source location from the enclosing scope
-        let debug_loc = self.scopes
-                            .borrow()
-                            .last()
-                            .map(|opt_scope| opt_scope.debug_loc)
-                            .unwrap_or(DebugLoc::None);
-
-        self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
-        CustomScopeIndex { index: index }
-    }
-
-    fn push_custom_cleanup_scope_with_debug_loc(&self,
-                                                debug_loc: NodeIdAndSpan)
-                                                -> CustomScopeIndex {
-        let index = self.scopes_len();
-        debug!("push_custom_cleanup_scope(): {}", index);
-
-        self.push_scope(CleanupScope::new(CustomScopeKind,
-                                          debug_loc.debug_loc()));
-        CustomScopeIndex { index: index }
-    }
-
-    /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
-    /// stack, and generates the code to do its cleanups for normal exit.
-    fn pop_and_trans_ast_cleanup_scope(&self,
-                                       bcx: Block<'blk, 'tcx>,
-                                       cleanup_scope: ast::NodeId)
-                                       -> Block<'blk, 'tcx> {
-        debug!("pop_and_trans_ast_cleanup_scope({})",
-               self.ccx.tcx().map.node_to_string(cleanup_scope));
-
-        assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
-
-        let scope = self.pop_scope();
-        self.trans_scope_cleanups(bcx, &scope)
-    }
-
-    /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
-    /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
-    /// branching to a block generated by `normal_exit_block`.
-    fn pop_loop_cleanup_scope(&self,
-                              cleanup_scope: ast::NodeId) {
-        debug!("pop_loop_cleanup_scope({})",
-               self.ccx.tcx().map.node_to_string(cleanup_scope));
-
-        assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
-
-        let _ = self.pop_scope();
-    }
-
-    /// Removes the top cleanup scope from the stack without executing its cleanups. The top
-    /// cleanup scope must be the temporary scope `custom_scope`.
-    fn pop_custom_cleanup_scope(&self,
-                                custom_scope: CustomScopeIndex) {
-        debug!("pop_custom_cleanup_scope({})", custom_scope.index);
-        assert!(self.is_valid_to_pop_custom_scope(custom_scope));
-        let _ = self.pop_scope();
-    }
-
-    /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
-    /// generates the code to do its cleanups for normal exit.
-    fn pop_and_trans_custom_cleanup_scope(&self,
-                                          bcx: Block<'blk, 'tcx>,
-                                          custom_scope: CustomScopeIndex)
-                                          -> Block<'blk, 'tcx> {
-        debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
-        assert!(self.is_valid_to_pop_custom_scope(custom_scope));
-
-        let scope = self.pop_scope();
-        self.trans_scope_cleanups(bcx, &scope)
-    }
-
-    /// Returns the id of the top-most loop scope
-    fn top_loop_scope(&self) -> ast::NodeId {
-        for scope in self.scopes.borrow().iter().rev() {
-            if let LoopScopeKind(id, _) = scope.kind {
-                return id;
-            }
-        }
-        self.ccx.sess().bug("no loop scope found");
-    }
-
-    /// Returns a block to branch to which will perform all pending cleanups and
-    /// then break/continue (depending on `exit`) out of the loop with id
-    /// `cleanup_scope`
-    fn normal_exit_block(&'blk self,
-                         cleanup_scope: ast::NodeId,
-                         exit: usize) -> BasicBlockRef {
-        self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
-    }
-
-    /// Returns a block to branch to which will perform all pending cleanups and
-    /// then return from this function
-    fn return_exit_block(&'blk self) -> BasicBlockRef {
-        self.trans_cleanups_to_exit_scope(ReturnExit)
-    }
-
-    fn schedule_lifetime_end(&self,
-                             cleanup_scope: ScopeId,
-                             val: ValueRef) {
-        let drop = box LifetimeEnd {
-            ptr: val,
-        };
-
-        debug!("schedule_lifetime_end({:?}, val={})",
-               cleanup_scope,
-               self.ccx.tn().val_to_string(val));
-
-        self.schedule_clean(cleanup_scope, drop as CleanupObj);
-    }
-
-    /// Schedules a (deep) drop of `val`, which is a pointer to an instance of
-    /// `ty`
-    fn schedule_drop_mem(&self,
-                         cleanup_scope: ScopeId,
-                         val: ValueRef,
-                         ty: Ty<'tcx>,
-                         drop_hint: Option<DropHintDatum<'tcx>>) {
-        if !self.type_needs_drop(ty) { return; }
-        let drop_hint = drop_hint.map(|hint|hint.to_value());
-        let drop = box DropValue {
-            is_immediate: false,
-            val: val,
-            ty: ty,
-            fill_on_drop: false,
-            skip_dtor: false,
-            drop_hint: drop_hint,
-        };
-
-        debug!("schedule_drop_mem({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
-               cleanup_scope,
-               self.ccx.tn().val_to_string(val),
-               ty,
-               drop.fill_on_drop,
-               drop.skip_dtor);
-
-        self.schedule_clean(cleanup_scope, drop as CleanupObj);
-    }
-
-    /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty`
-    fn schedule_drop_and_fill_mem(&self,
-                                  cleanup_scope: ScopeId,
-                                  val: ValueRef,
-                                  ty: Ty<'tcx>,
-                                  drop_hint: Option<DropHintDatum<'tcx>>) {
-        if !self.type_needs_drop(ty) { return; }
-
-        let drop_hint = drop_hint.map(|datum|datum.to_value());
-        let drop = box DropValue {
-            is_immediate: false,
-            val: val,
-            ty: ty,
-            fill_on_drop: true,
-            skip_dtor: false,
-            drop_hint: drop_hint,
-        };
-
-        debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={:?},
-                fill_on_drop={}, skip_dtor={}, has_drop_hint={})",
-               cleanup_scope,
-               self.ccx.tn().val_to_string(val),
-               ty,
-               drop.fill_on_drop,
-               drop.skip_dtor,
-               drop_hint.is_some());
-
-        self.schedule_clean(cleanup_scope, drop as CleanupObj);
-    }
-
-    /// Issue #23611: Schedules a (deep) drop of the contents of
-    /// `val`, which is a pointer to an instance of struct/enum type
-    /// `ty`. The scheduled code handles extracting the discriminant
-    /// and dropping the contents associated with that variant
-    /// *without* executing any associated drop implementation.
-    fn schedule_drop_adt_contents(&self,
-                                  cleanup_scope: ScopeId,
-                                  val: ValueRef,
-                                  ty: Ty<'tcx>) {
-        // `if` below could be "!contents_needs_drop"; skipping drop
-        // is just an optimization, so sound to be conservative.
-        if !self.type_needs_drop(ty) { return; }
-
-        let drop = box DropValue {
-            is_immediate: false,
-            val: val,
-            ty: ty,
-            fill_on_drop: false,
-            skip_dtor: true,
-            drop_hint: None,
-        };
-
-        debug!("schedule_drop_adt_contents({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
-               cleanup_scope,
-               self.ccx.tn().val_to_string(val),
-               ty,
-               drop.fill_on_drop,
-               drop.skip_dtor);
-
-        self.schedule_clean(cleanup_scope, drop as CleanupObj);
-    }
-
-    /// Schedules a (deep) drop of `val`, which is an instance of `ty`
-    fn schedule_drop_immediate(&self,
-                               cleanup_scope: ScopeId,
-                               val: ValueRef,
-                               ty: Ty<'tcx>) {
-
-        if !self.type_needs_drop(ty) { return; }
-        let drop = Box::new(DropValue {
-            is_immediate: true,
-            val: val,
-            ty: ty,
-            fill_on_drop: false,
-            skip_dtor: false,
-            drop_hint: None,
-        });
-
-        debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
-               cleanup_scope,
-               self.ccx.tn().val_to_string(val),
-               ty,
-               drop.fill_on_drop,
-               drop.skip_dtor);
-
-        self.schedule_clean(cleanup_scope, drop as CleanupObj);
-    }
-
-    /// Schedules a call to `free(val)`. Note that this is a shallow operation.
-    fn schedule_free_value(&self,
-                           cleanup_scope: ScopeId,
-                           val: ValueRef,
-                           heap: Heap,
-                           content_ty: Ty<'tcx>) {
-        let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
-
-        debug!("schedule_free_value({:?}, val={}, heap={:?})",
-               cleanup_scope,
-               self.ccx.tn().val_to_string(val),
-               heap);
-
-        self.schedule_clean(cleanup_scope, drop as CleanupObj);
-    }
-
-    fn schedule_clean(&self,
-                      cleanup_scope: ScopeId,
-                      cleanup: CleanupObj<'tcx>) {
-        match cleanup_scope {
-            AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
-            CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
-        }
-    }
-
-    /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
-    /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
-    /// scope.
-    fn schedule_clean_in_ast_scope(&self,
-                                   cleanup_scope: ast::NodeId,
-                                   cleanup: CleanupObj<'tcx>) {
-        debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
-               cleanup_scope);
-
-        for scope in self.scopes.borrow_mut().iter_mut().rev() {
-            if scope.kind.is_ast_with_id(cleanup_scope) {
-                scope.cleanups.push(cleanup);
-                scope.cached_landing_pad = None;
-                return;
-            } else {
-                // will be adding a cleanup to some enclosing scope
-                scope.clear_cached_exits();
-            }
-        }
-
-        self.ccx.sess().bug(
-            &format!("no cleanup scope {} found",
-                    self.ccx.tcx().map.node_to_string(cleanup_scope)));
-    }
-
-    /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
-    fn schedule_clean_in_custom_scope(&self,
-                                      custom_scope: CustomScopeIndex,
-                                      cleanup: CleanupObj<'tcx>) {
-        debug!("schedule_clean_in_custom_scope(custom_scope={})",
-               custom_scope.index);
-
-        assert!(self.is_valid_custom_scope(custom_scope));
-
-        let mut scopes = self.scopes.borrow_mut();
-        let scope = &mut (*scopes)[custom_scope.index];
-        scope.cleanups.push(cleanup);
-        scope.cached_landing_pad = None;
-    }
-
-    /// Returns true if there are pending cleanups that should execute on panic.
-    fn needs_invoke(&self) -> bool {
-        self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
-    }
-
-    /// Returns a basic block to branch to in the event of a panic. This block
-    /// will run the panic cleanups and eventually resume the exception that
-    /// caused the landing pad to be run.
-    fn get_landing_pad(&'blk self) -> BasicBlockRef {
-        let _icx = base::push_ctxt("get_landing_pad");
-
-        debug!("get_landing_pad");
-
-        let orig_scopes_len = self.scopes_len();
-        assert!(orig_scopes_len > 0);
-
-        // Remove any scopes that do not have cleanups on panic:
-        let mut popped_scopes = vec!();
-        while !self.top_scope(|s| s.needs_invoke()) {
-            debug!("top scope does not need invoke");
-            popped_scopes.push(self.pop_scope());
-        }
-
-        // Check for an existing landing pad in the new topmost scope:
-        let llbb = self.get_or_create_landing_pad();
-
-        // Push the scopes we removed back on:
-        loop {
-            match popped_scopes.pop() {
-                Some(scope) => self.push_scope(scope),
-                None => break
-            }
-        }
-
-        assert_eq!(self.scopes_len(), orig_scopes_len);
-
-        return llbb;
-    }
-}
-
-impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
-    /// Returns the id of the current top-most AST scope, if any.
-    fn top_ast_scope(&self) -> Option<ast::NodeId> {
-        for scope in self.scopes.borrow().iter().rev() {
-            match scope.kind {
-                CustomScopeKind | LoopScopeKind(..) => {}
-                AstScopeKind(i) => {
-                    return Some(i);
-                }
-            }
-        }
-        None
-    }
-
-    fn top_nonempty_cleanup_scope(&self) -> Option<usize> {
-        self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
-    }
-
-    fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
-        self.is_valid_custom_scope(custom_scope) &&
-            custom_scope.index == self.scopes.borrow().len() - 1
-    }
-
-    fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
-        let scopes = self.scopes.borrow();
-        custom_scope.index < scopes.len() &&
-            (*scopes)[custom_scope.index].kind.is_temp()
-    }
-
-    /// Generates the cleanups for `scope` into `bcx`
-    fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
-                            bcx: Block<'blk, 'tcx>,
-                            scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
-
-        let mut bcx = bcx;
-        if !bcx.unreachable.get() {
-            for cleanup in scope.cleanups.iter().rev() {
-                bcx = cleanup.trans(bcx, scope.debug_loc);
-            }
-        }
-        bcx
-    }
-
-    fn scopes_len(&self) -> usize {
-        self.scopes.borrow().len()
-    }
-
-    fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
-        self.scopes.borrow_mut().push(scope)
-    }
-
-    fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
-        debug!("popping cleanup scope {}, {} scopes remaining",
-               self.top_scope(|s| s.block_name("")),
-               self.scopes_len() - 1);
-
-        self.scopes.borrow_mut().pop().unwrap()
-    }
-
-    fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
-        f(self.scopes.borrow().last().unwrap())
-    }
-
-    /// Used when the caller wishes to jump to an early exit, such as a return,
-    /// break, continue, or unwind. This function will generate all cleanups
-    /// between the top of the stack and the exit `label` and return a basic
-    /// block that the caller can branch to.
-    ///
-    /// For example, if the current stack of cleanups were as follows:
-    ///
-    ///      AST 22
-    ///      Custom 1
-    ///      AST 23
-    ///      Loop 23
-    ///      Custom 2
-    ///      AST 24
-    ///
-    /// and the `label` specifies a break from `Loop 23`, then this function
-    /// would generate a series of basic blocks as follows:
-    ///
-    ///      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
-    ///
-    /// where `break_blk` is the block specified in `Loop 23` as the target for
-    /// breaks. The return value would be the first basic block in that sequence
-    /// (`Cleanup(AST 24)`). The caller could then branch to `Cleanup(AST 24)`
-    /// and it will perform all cleanups and finally branch to the `break_blk`.
-    fn trans_cleanups_to_exit_scope(&'blk self,
-                                    label: EarlyExitLabel)
-                                    -> BasicBlockRef {
-        debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
-               label, self.scopes_len());
-
-        let orig_scopes_len = self.scopes_len();
-        let mut prev_llbb;
-        let mut popped_scopes = vec!();
-        let mut skip = 0;
-
-        // First we pop off all the cleanup stacks that are
-        // traversed until the exit is reached, pushing them
-        // onto the side vector `popped_scopes`. No code is
-        // generated at this time.
-        //
-        // So, continuing the example from above, we would wind up
-        // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
-        // (Presuming that there are no cached exits)
-        loop {
-            if self.scopes_len() == 0 {
-                match label {
-                    UnwindExit(val) => {
-                        // Generate a block that will resume unwinding to the
-                        // calling function
-                        let bcx = self.new_block("resume", None);
-                        match val {
-                            UnwindKind::LandingPad => {
-                                let addr = self.landingpad_alloca.get()
-                                               .unwrap();
-                                let lp = build::Load(bcx, addr);
-                                base::call_lifetime_end(bcx, addr);
-                                base::trans_unwind_resume(bcx, lp);
-                            }
-                            UnwindKind::CleanupPad(_) => {
-                                let pad = build::CleanupPad(bcx, None, &[]);
-                                build::CleanupRet(bcx, pad, None);
-                            }
-                        }
-                        prev_llbb = bcx.llbb;
-                        break;
-                    }
-
-                    ReturnExit => {
-                        prev_llbb = self.get_llreturn();
-                        break
-                    }
-
-                    LoopExit(id, _) => {
-                        self.ccx.sess().bug(&format!(
-                                "cannot exit from scope {}, \
-                                not in scope", id));
-                    }
-                }
-            }
-
-            // Pop off the scope, since we may be generating
-            // unwinding code for it.
-            let top_scope = self.pop_scope();
-            let cached_exit = top_scope.cached_early_exit(label);
-            popped_scopes.push(top_scope);
-
-            // Check if we have already cached the unwinding of this
-            // scope for this label. If so, we can stop popping scopes
-            // and branch to the cached label, since it contains the
-            // cleanups for any subsequent scopes.
-            if let Some((exit, last_cleanup)) = cached_exit {
-                prev_llbb = exit;
-                skip = last_cleanup;
-                break;
-            }
-
-            // If we are searching for a loop exit,
-            // and this scope is that loop, then stop popping and set
-            // `prev_llbb` to the appropriate exit block from the loop.
-            let scope = popped_scopes.last().unwrap();
-            match label {
-                UnwindExit(..) | ReturnExit => { }
-                LoopExit(id, exit) => {
-                    if let Some(exit) = scope.kind.early_exit_block(id, exit) {
-                        prev_llbb = exit;
-                        break
-                    }
-                }
-            }
-        }
-
-        debug!("trans_cleanups_to_exit_scope: popped {} scopes",
-               popped_scopes.len());
-
-        // Now push the popped scopes back on. As we go,
-        // we track in `prev_llbb` the exit to which this scope
-        // should branch when it's done.
-        //
-        // So, continuing with our example, we will start out with
-        // `prev_llbb` being set to `break_blk` (or possibly a cached
-        // early exit). We will then pop the scopes from `popped_scopes`
-        // and generate a basic block for each one, prepending it in the
-        // series and updating `prev_llbb`. So we begin by popping `Custom 2`
-        // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
-        // branch to `prev_llbb == break_blk`, giving us a sequence like:
-        //
-        //     Cleanup(Custom 2) -> prev_llbb
-        //
-        // We then pop `AST 24` and repeat the process, giving us the sequence:
-        //
-        //     Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
-        //
-        // At this point, `popped_scopes` is empty, and so the final block
-        // that we return to the user is `Cleanup(AST 24)`.
-        while let Some(mut scope) = popped_scopes.pop() {
-            if !scope.cleanups.is_empty() {
-                let name = scope.block_name("clean");
-                debug!("generating cleanups for {}", name);
-
-                let bcx_in = self.new_block(&name[..], None);
-                let exit_label = label.start(bcx_in);
-                let mut bcx_out = bcx_in;
-                let len = scope.cleanups.len();
-                for cleanup in scope.cleanups.iter().rev().take(len - skip) {
-                    bcx_out = cleanup.trans(bcx_out, scope.debug_loc);
-                }
-                skip = 0;
-                exit_label.branch(bcx_out, prev_llbb);
-                prev_llbb = bcx_in.llbb;
-
-                scope.add_cached_early_exit(exit_label, prev_llbb, len);
-            }
-            self.push_scope(scope);
-        }
-
-        debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb);
-
-        assert_eq!(self.scopes_len(), orig_scopes_len);
-        prev_llbb
-    }
-
-    /// Creates a landing pad for the top scope, if one does not exist.  The
-    /// landing pad will perform all cleanups necessary for an unwind and then
-    /// `resume` to continue error propagation:
-    ///
-    ///     landing_pad -> ... cleanups ... -> [resume]
-    ///
-    /// (The cleanups and resume instruction are created by
-    /// `trans_cleanups_to_exit_scope()`, not in this function itself.)
-    fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
-        let pad_bcx;
-
-        debug!("get_or_create_landing_pad");
-
-        // Check if a landing pad block exists; if not, create one.
-        {
-            let mut scopes = self.scopes.borrow_mut();
-            let last_scope = scopes.last_mut().unwrap();
-            match last_scope.cached_landing_pad {
-                Some(llbb) => return llbb,
-                None => {
-                    let name = last_scope.block_name("unwind");
-                    pad_bcx = self.new_block(&name[..], None);
-                    last_scope.cached_landing_pad = Some(pad_bcx.llbb);
-                }
-            }
-        };
-
-        let llpersonality = pad_bcx.fcx.eh_personality();
-
-        let val = if base::wants_msvc_seh(self.ccx.sess()) {
-            // A cleanup pad requires a personality function to be specified, so
-            // we do that here explicitly (happens implicitly below through
-            // creation of the landingpad instruction). We then create a
-            // cleanuppad instruction which has no filters to run cleanup on all
-            // exceptions.
-            build::SetPersonalityFn(pad_bcx, llpersonality);
-            let llretval = build::CleanupPad(pad_bcx, None, &[]);
-            UnwindKind::CleanupPad(llretval)
-        } else {
-            // The landing pad return type (the type being propagated). Not sure
-            // what this represents but it's determined by the personality
-            // function and this is what the EH proposal example uses.
-            let llretty = Type::struct_(self.ccx,
-                                        &[Type::i8p(self.ccx), Type::i32(self.ccx)],
-                                        false);
-
-            // The only landing pad clause will be 'cleanup'
-            let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1);
-
-            // The landing pad block is a cleanup
-            build::SetCleanup(pad_bcx, llretval);
-
-            let addr = match self.landingpad_alloca.get() {
-                Some(addr) => addr,
-                None => {
-                    let addr = base::alloca(pad_bcx, common::val_ty(llretval),
-                                            "");
-                    base::call_lifetime_start(pad_bcx, addr);
-                    self.landingpad_alloca.set(Some(addr));
-                    addr
-                }
-            };
-            build::Store(pad_bcx, llretval, addr);
-            UnwindKind::LandingPad
-        };
-
-        // Generate the cleanup block and branch to it.
-        let label = UnwindExit(val);
-        let cleanup_llbb = self.trans_cleanups_to_exit_scope(label);
-        label.branch(pad_bcx, cleanup_llbb);
-
-        return pad_bcx.llbb;
-    }
-}
-
-impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
-    fn new(kind: CleanupScopeKind<'blk, 'tcx>,
-           debug_loc: DebugLoc)
-        -> CleanupScope<'blk, 'tcx> {
-        CleanupScope {
-            kind: kind,
-            debug_loc: debug_loc,
-            cleanups: vec!(),
-            cached_early_exits: vec!(),
-            cached_landing_pad: None,
-        }
-    }
-
-    fn clear_cached_exits(&mut self) {
-        self.cached_early_exits = vec!();
-        self.cached_landing_pad = None;
-    }
-
-    fn cached_early_exit(&self,
-                         label: EarlyExitLabel)
-                         -> Option<(BasicBlockRef, usize)> {
-        self.cached_early_exits.iter().rev().
-            find(|e| e.label == label).
-            map(|e| (e.cleanup_block, e.last_cleanup))
-    }
-
-    fn add_cached_early_exit(&mut self,
-                             label: EarlyExitLabel,
-                             blk: BasicBlockRef,
-                             last_cleanup: usize) {
-        self.cached_early_exits.push(
-            CachedEarlyExit { label: label,
-                              cleanup_block: blk,
-                              last_cleanup: last_cleanup});
-    }
-
-    /// True if this scope has cleanups that need unwinding
-    fn needs_invoke(&self) -> bool {
-
-        self.cached_landing_pad.is_some() ||
-            self.cleanups.iter().any(|c| c.must_unwind())
-    }
-
-    /// Returns a suitable name to use for the basic block that handles this cleanup scope
-    fn block_name(&self, prefix: &str) -> String {
-        match self.kind {
-            CustomScopeKind => format!("{}_custom_", prefix),
-            AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
-            LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
-        }
-    }
-
-    /// Manipulate cleanup scope for call arguments. Conceptually, each
-    /// argument to a call is an lvalue, and performing the call moves each
-    /// of the arguments into a new rvalue (which gets cleaned up by the
-    /// callee). As an optimization, instead of actually performing all of
-    /// those moves, trans just manipulates the cleanup scope to obtain the
-    /// same effect.
-    pub fn drop_non_lifetime_clean(&mut self) {
-        self.cleanups.retain(|c| c.is_lifetime_end());
-        self.clear_cached_exits();
-    }
-}
-
-impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
-    fn is_temp(&self) -> bool {
-        match *self {
-            CustomScopeKind => true,
-            LoopScopeKind(..) | AstScopeKind(..) => false,
-        }
-    }
-
-    fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
-        match *self {
-            CustomScopeKind | LoopScopeKind(..) => false,
-            AstScopeKind(i) => i == id
-        }
-    }
-
-    fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
-        match *self {
-            CustomScopeKind | AstScopeKind(..) => false,
-            LoopScopeKind(i, _) => i == id
-        }
-    }
-
-    /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
-    fn early_exit_block(&self,
-                        id: ast::NodeId,
-                        exit: usize) -> Option<BasicBlockRef> {
-        match *self {
-            LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
-            _ => None,
-        }
-    }
-}
-
-impl EarlyExitLabel {
-    /// Generates a branch going from `from_bcx` to `to_llbb` where `self` is
-    /// the exit label attached to the start of `from_bcx`.
-    ///
-    /// Transitions from an exit label to other exit labels depend on the type
-    /// of label. For example with MSVC exceptions unwind exit labels will use
-    /// the `cleanupret` instruction instead of the `br` instruction.
-    fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) {
-        if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self {
-            build::CleanupRet(from_bcx, pad, Some(to_llbb));
-        } else {
-            build::Br(from_bcx, to_llbb, DebugLoc::None);
-        }
-    }
-
-    /// Generates the necessary instructions at the start of `bcx` to prepare
-    /// for the same kind of early exit label that `self` is.
-    ///
-    /// This function will appropriately configure `bcx` based on the kind of
-    /// label this is. For UnwindExit labels, the `lpad` field of the block will
-    /// be set to `Some`, and for MSVC exceptions this function will generate a
-    /// `cleanuppad` instruction at the start of the block so it may be jumped
-    /// to in the future (e.g. so this block can be cached as an early exit).
-    ///
-    /// Returns a new label which will can be used to cache `bcx` in the list of
-    /// early exits.
-    fn start(&self, bcx: Block) -> EarlyExitLabel {
-        match *self {
-            UnwindExit(UnwindKind::CleanupPad(..)) => {
-                let pad = build::CleanupPad(bcx, None, &[]);
-                bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::msvc(pad))));
-                UnwindExit(UnwindKind::CleanupPad(pad))
-            }
-            UnwindExit(UnwindKind::LandingPad) => {
-                bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu())));
-                *self
-            }
-            label => label,
-        }
-    }
-}
-
-impl PartialEq for UnwindKind {
-    fn eq(&self, val: &UnwindKind) -> bool {
-        match (*self, *val) {
-            (UnwindKind::LandingPad, UnwindKind::LandingPad) |
-            (UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true,
-            _ => false,
-        }
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Cleanup types
-
-#[derive(Copy, Clone)]
-pub struct DropValue<'tcx> {
-    is_immediate: bool,
-    val: ValueRef,
-    ty: Ty<'tcx>,
-    fill_on_drop: bool,
-    skip_dtor: bool,
-    drop_hint: Option<DropHintValue>,
-}
-
-impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
-    fn must_unwind(&self) -> bool {
-        true
-    }
-
-    fn is_lifetime_end(&self) -> bool {
-        false
-    }
-
-    fn trans<'blk>(&self,
-                   bcx: Block<'blk, 'tcx>,
-                   debug_loc: DebugLoc)
-                   -> Block<'blk, 'tcx> {
-        let skip_dtor = self.skip_dtor;
-        let _icx = if skip_dtor {
-            base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true")
-        } else {
-            base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false")
-        };
-        let bcx = if self.is_immediate {
-            glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
-        } else {
-            glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor, self.drop_hint)
-        };
-        if self.fill_on_drop {
-            base::drop_done_fill_mem(bcx, self.val, self.ty);
-        }
-        bcx
-    }
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum Heap {
-    HeapExchange
-}
-
-#[derive(Copy, Clone)]
-pub struct FreeValue<'tcx> {
-    ptr: ValueRef,
-    heap: Heap,
-    content_ty: Ty<'tcx>
-}
-
-impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
-    fn must_unwind(&self) -> bool {
-        true
-    }
-
-    fn is_lifetime_end(&self) -> bool {
-        false
-    }
-
-    fn trans<'blk>(&self,
-                   bcx: Block<'blk, 'tcx>,
-                   debug_loc: DebugLoc)
-                   -> Block<'blk, 'tcx> {
-        match self.heap {
-            HeapExchange => {
-                glue::trans_exchange_free_ty(bcx,
-                                             self.ptr,
-                                             self.content_ty,
-                                             debug_loc)
-            }
-        }
-    }
-}
-
-#[derive(Copy, Clone)]
-pub struct LifetimeEnd {
-    ptr: ValueRef,
-}
-
-impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
-    fn must_unwind(&self) -> bool {
-        false
-    }
-
-    fn is_lifetime_end(&self) -> bool {
-        true
-    }
-
-    fn trans<'blk>(&self,
-                   bcx: Block<'blk, 'tcx>,
-                   debug_loc: DebugLoc)
-                   -> Block<'blk, 'tcx> {
-        debug_loc.apply(bcx.fcx);
-        base::call_lifetime_end(bcx, self.ptr);
-        bcx
-    }
-}
-
-pub fn temporary_scope(tcx: &ty::ctxt,
-                       id: ast::NodeId)
-                       -> ScopeId {
-    match tcx.region_maps.temporary_scope(id) {
-        Some(scope) => {
-            let r = AstScope(scope.node_id(&tcx.region_maps));
-            debug!("temporary_scope({}) = {:?}", id, r);
-            r
-        }
-        None => {
-            tcx.sess.bug(&format!("no temporary scope available for expr {}",
-                                 id))
-        }
-    }
-}
-
-pub fn var_scope(tcx: &ty::ctxt,
-                 id: ast::NodeId)
-                 -> ScopeId {
-    let r = AstScope(tcx.region_maps.var_scope(id).node_id(&tcx.region_maps));
-    debug!("var_scope({}) = {:?}", id, r);
-    r
-}
-
-///////////////////////////////////////////////////////////////////////////
-// These traits just exist to put the methods into this file.
-
-pub trait CleanupMethods<'blk, 'tcx> {
-    fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan);
-    fn push_loop_cleanup_scope(&self,
-                               id: ast::NodeId,
-                               exits: [Block<'blk, 'tcx>; EXIT_MAX]);
-    fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
-    fn push_custom_cleanup_scope_with_debug_loc(&self,
-                                                debug_loc: NodeIdAndSpan)
-                                                -> CustomScopeIndex;
-    fn pop_and_trans_ast_cleanup_scope(&self,
-                                       bcx: Block<'blk, 'tcx>,
-                                       cleanup_scope: ast::NodeId)
-                                       -> Block<'blk, 'tcx>;
-    fn pop_loop_cleanup_scope(&self,
-                              cleanup_scope: ast::NodeId);
-    fn pop_custom_cleanup_scope(&self,
-                                custom_scope: CustomScopeIndex);
-    fn pop_and_trans_custom_cleanup_scope(&self,
-                                          bcx: Block<'blk, 'tcx>,
-                                          custom_scope: CustomScopeIndex)
-                                          -> Block<'blk, 'tcx>;
-    fn top_loop_scope(&self) -> ast::NodeId;
-    fn normal_exit_block(&'blk self,
-                         cleanup_scope: ast::NodeId,
-                         exit: usize) -> BasicBlockRef;
-    fn return_exit_block(&'blk self) -> BasicBlockRef;
-    fn schedule_lifetime_end(&self,
-                         cleanup_scope: ScopeId,
-                         val: ValueRef);
-    fn schedule_drop_mem(&self,
-                         cleanup_scope: ScopeId,
-                         val: ValueRef,
-                         ty: Ty<'tcx>,
-                         drop_hint: Option<DropHintDatum<'tcx>>);
-    fn schedule_drop_and_fill_mem(&self,
-                                  cleanup_scope: ScopeId,
-                                  val: ValueRef,
-                                  ty: Ty<'tcx>,
-                                  drop_hint: Option<DropHintDatum<'tcx>>);
-    fn schedule_drop_adt_contents(&self,
-                                  cleanup_scope: ScopeId,
-                                  val: ValueRef,
-                                  ty: Ty<'tcx>);
-    fn schedule_drop_immediate(&self,
-                               cleanup_scope: ScopeId,
-                               val: ValueRef,
-                               ty: Ty<'tcx>);
-    fn schedule_free_value(&self,
-                           cleanup_scope: ScopeId,
-                           val: ValueRef,
-                           heap: Heap,
-                           content_ty: Ty<'tcx>);
-    fn schedule_clean(&self,
-                      cleanup_scope: ScopeId,
-                      cleanup: CleanupObj<'tcx>);
-    fn schedule_clean_in_ast_scope(&self,
-                                   cleanup_scope: ast::NodeId,
-                                   cleanup: CleanupObj<'tcx>);
-    fn schedule_clean_in_custom_scope(&self,
-                                    custom_scope: CustomScopeIndex,
-                                    cleanup: CleanupObj<'tcx>);
-    fn needs_invoke(&self) -> bool;
-    fn get_landing_pad(&'blk self) -> BasicBlockRef;
-}
-
-trait CleanupHelperMethods<'blk, 'tcx> {
-    fn top_ast_scope(&self) -> Option<ast::NodeId>;
-    fn top_nonempty_cleanup_scope(&self) -> Option<usize>;
-    fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
-    fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
-    fn trans_scope_cleanups(&self,
-                            bcx: Block<'blk, 'tcx>,
-                            scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
-    fn trans_cleanups_to_exit_scope(&'blk self,
-                                    label: EarlyExitLabel)
-                                    -> BasicBlockRef;
-    fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
-    fn scopes_len(&self) -> usize;
-    fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
-    fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
-    fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
-}
diff --git a/src/librustc_trans/trans/closure.rs b/src/librustc_trans/trans/closure.rs
deleted file mode 100644 (file)
index b1db196..0000000
+++ /dev/null
@@ -1,429 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use arena::TypedArena;
-use back::link::{self, mangle_internal_name_by_path_and_seq};
-use llvm::{ValueRef, get_params};
-use middle::def_id::DefId;
-use middle::infer;
-use trans::adt;
-use trans::attributes;
-use trans::base::*;
-use trans::build::*;
-use trans::callee::{self, ArgVals, Callee, TraitItem, MethodData};
-use trans::cleanup::{CleanupMethods, CustomScope, ScopeId};
-use trans::common::*;
-use trans::datum::{self, Datum, rvalue_scratch_datum, Rvalue};
-use trans::debuginfo::{self, DebugLoc};
-use trans::declare;
-use trans::expr;
-use trans::monomorphize::{MonoId};
-use trans::type_of::*;
-use trans::Disr;
-use middle::ty;
-use session::config::FullDebugInfo;
-
-use syntax::abi::Abi::RustCall;
-use syntax::ast;
-use syntax::attr::{ThinAttributes, ThinAttributesExt};
-
-use rustc_front::hir;
-
-
-fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                        closure_def_id: DefId,
-                                        arg_scope_id: ScopeId,
-                                        freevars: &[ty::Freevar])
-                                        -> Block<'blk, 'tcx>
-{
-    let _icx = push_ctxt("closure::load_closure_environment");
-
-    // Special case for small by-value selfs.
-    let closure_ty = node_id_type(bcx, bcx.fcx.id);
-    let self_type = self_type_for_closure(bcx.ccx(), closure_def_id, closure_ty);
-    let kind = kind_for_closure(bcx.ccx(), closure_def_id);
-    let llenv = if kind == ty::FnOnceClosureKind &&
-            !arg_is_indirect(bcx.ccx(), self_type) {
-        let datum = rvalue_scratch_datum(bcx,
-                                         self_type,
-                                         "closure_env");
-        store_ty(bcx, bcx.fcx.llenv.unwrap(), datum.val, self_type);
-        datum.val
-    } else {
-        bcx.fcx.llenv.unwrap()
-    };
-
-    // Store the pointer to closure data in an alloca for debug info because that's what the
-    // llvm.dbg.declare intrinsic expects
-    let env_pointer_alloca = if bcx.sess().opts.debuginfo == FullDebugInfo {
-        let alloc = alloca(bcx, val_ty(llenv), "__debuginfo_env_ptr");
-        Store(bcx, llenv, alloc);
-        Some(alloc)
-    } else {
-        None
-    };
-
-    for (i, freevar) in freevars.iter().enumerate() {
-        let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
-                                     closure_expr_id: bcx.fcx.id };
-        let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap();
-        let mut upvar_ptr = StructGEP(bcx, llenv, i);
-        let captured_by_ref = match upvar_capture {
-            ty::UpvarCapture::ByValue => false,
-            ty::UpvarCapture::ByRef(..) => {
-                upvar_ptr = Load(bcx, upvar_ptr);
-                true
-            }
-        };
-        let node_id = freevar.def.var_id();
-        bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr);
-
-        if kind == ty::FnOnceClosureKind && !captured_by_ref {
-            let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id);
-            bcx.fcx.schedule_drop_mem(arg_scope_id,
-                                      upvar_ptr,
-                                      node_id_type(bcx, node_id),
-                                      hint)
-        }
-
-        if let Some(env_pointer_alloca) = env_pointer_alloca {
-            debuginfo::create_captured_var_metadata(
-                bcx,
-                node_id,
-                env_pointer_alloca,
-                i,
-                captured_by_ref,
-                freevar.span);
-        }
-    }
-
-    bcx
-}
-
-pub enum ClosureEnv<'a> {
-    NotClosure,
-    Closure(DefId, &'a [ty::Freevar]),
-}
-
-impl<'a> ClosureEnv<'a> {
-    pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId)
-                           -> Block<'blk, 'tcx>
-    {
-        match self {
-            ClosureEnv::NotClosure => bcx,
-            ClosureEnv::Closure(def_id, freevars) => {
-                if freevars.is_empty() {
-                    bcx
-                } else {
-                    load_closure_environment(bcx, def_id, arg_scope, freevars)
-                }
-            }
-        }
-    }
-}
-
-/// Returns the LLVM function declaration for a closure, creating it if
-/// necessary. If the ID does not correspond to a closure ID, returns None.
-pub fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                   closure_id: DefId,
-                                                   substs: &ty::ClosureSubsts<'tcx>)
-                                                   -> ValueRef {
-    // Normalize type so differences in regions and typedefs don't cause
-    // duplicate declarations
-    let substs = ccx.tcx().erase_regions(substs);
-    let mono_id = MonoId {
-        def: closure_id,
-        params: &substs.func_substs.types
-    };
-
-    if let Some(&llfn) = ccx.closure_vals().borrow().get(&mono_id) {
-        debug!("get_or_create_closure_declaration(): found closure {:?}: {:?}",
-               mono_id, ccx.tn().val_to_string(llfn));
-        return llfn;
-    }
-
-    let path = ccx.tcx().def_path(closure_id);
-    let symbol = mangle_internal_name_by_path_and_seq(path, "closure");
-
-    let function_type = ccx.tcx().mk_closure_from_closure_substs(closure_id, Box::new(substs));
-    let llfn = declare::define_internal_rust_fn(ccx, &symbol[..], function_type);
-
-    // set an inline hint for all closures
-    attributes::inline(llfn, attributes::InlineAttr::Hint);
-
-    debug!("get_or_create_declaration_if_closure(): inserting new \
-            closure {:?} (type {}): {:?}",
-           mono_id,
-           ccx.tn().type_to_string(val_ty(llfn)),
-           ccx.tn().val_to_string(llfn));
-    ccx.closure_vals().borrow_mut().insert(mono_id, llfn);
-
-    llfn
-}
-
-pub enum Dest<'a, 'tcx: 'a> {
-    SaveIn(Block<'a, 'tcx>, ValueRef),
-    Ignore(&'a CrateContext<'a, 'tcx>)
-}
-
-pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
-                                    decl: &hir::FnDecl,
-                                    body: &hir::Block,
-                                    id: ast::NodeId,
-                                    closure_def_id: DefId, // (*)
-                                    closure_substs: &'tcx ty::ClosureSubsts<'tcx>,
-                                    closure_expr_attrs: &ThinAttributes)
-                                    -> Option<Block<'a, 'tcx>>
-{
-    // (*) Note that in the case of inlined functions, the `closure_def_id` will be the
-    // defid of the closure in its original crate, whereas `id` will be the id of the local
-    // inlined copy.
-
-    let param_substs = closure_substs.func_substs;
-
-    let ccx = match dest {
-        Dest::SaveIn(bcx, _) => bcx.ccx(),
-        Dest::Ignore(ccx) => ccx
-    };
-    let tcx = ccx.tcx();
-    let _icx = push_ctxt("closure::trans_closure_expr");
-
-    debug!("trans_closure_expr(id={:?}, closure_def_id={:?}, closure_substs={:?})",
-           id, closure_def_id, closure_substs);
-
-    let llfn = get_or_create_closure_declaration(ccx, closure_def_id, closure_substs);
-
-    // Get the type of this closure. Use the current `param_substs` as
-    // the closure substitutions. This makes sense because the closure
-    // takes the same set of type arguments as the enclosing fn, and
-    // this function (`trans_closure`) is invoked at the point
-    // of the closure expression.
-
-    let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
-    let function_type = infcx.closure_type(closure_def_id, closure_substs);
-
-    let freevars: Vec<ty::Freevar> =
-        tcx.with_freevars(id, |fv| fv.iter().cloned().collect());
-
-    let sig = tcx.erase_late_bound_regions(&function_type.sig);
-    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
-
-    trans_closure(ccx,
-                  decl,
-                  body,
-                  llfn,
-                  param_substs,
-                  id,
-                  closure_expr_attrs.as_attr_slice(),
-                  sig.output,
-                  function_type.abi,
-                  ClosureEnv::Closure(closure_def_id, &freevars));
-
-    // Don't hoist this to the top of the function. It's perfectly legitimate
-    // to have a zero-size closure (in which case dest will be `Ignore`) and
-    // we must still generate the closure body.
-    let (mut bcx, dest_addr) = match dest {
-        Dest::SaveIn(bcx, p) => (bcx, p),
-        Dest::Ignore(_) => {
-            debug!("trans_closure_expr() ignoring result");
-            return None;
-        }
-    };
-
-    let repr = adt::represent_type(ccx, node_id_type(bcx, id));
-
-    // Create the closure.
-    for (i, freevar) in freevars.iter().enumerate() {
-        let datum = expr::trans_local_var(bcx, freevar.def);
-        let upvar_slot_dest = adt::trans_field_ptr(
-            bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i);
-        let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
-                                     closure_expr_id: id };
-        match tcx.upvar_capture(upvar_id).unwrap() {
-            ty::UpvarCapture::ByValue => {
-                bcx = datum.store_to(bcx, upvar_slot_dest);
-            }
-            ty::UpvarCapture::ByRef(..) => {
-                Store(bcx, datum.to_llref(), upvar_slot_dest);
-            }
-        }
-    }
-    adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0));
-
-    Some(bcx)
-}
-
-pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
-                                      closure_def_id: DefId,
-                                      substs: ty::ClosureSubsts<'tcx>,
-                                      trait_closure_kind: ty::ClosureKind)
-                                      -> ValueRef
-{
-    // If this is a closure, redirect to it.
-    let llfn = get_or_create_closure_declaration(ccx, closure_def_id, &substs);
-
-    // If the closure is a Fn closure, but a FnOnce is needed (etc),
-    // then adapt the self type
-    let closure_kind = ccx.tcx().closure_kind(closure_def_id);
-    trans_closure_adapter_shim(ccx,
-                               closure_def_id,
-                               substs,
-                               closure_kind,
-                               trait_closure_kind,
-                               llfn)
-}
-
-fn trans_closure_adapter_shim<'a, 'tcx>(
-    ccx: &'a CrateContext<'a, 'tcx>,
-    closure_def_id: DefId,
-    substs: ty::ClosureSubsts<'tcx>,
-    llfn_closure_kind: ty::ClosureKind,
-    trait_closure_kind: ty::ClosureKind,
-    llfn: ValueRef)
-    -> ValueRef
-{
-    let _icx = push_ctxt("trans_closure_adapter_shim");
-    let tcx = ccx.tcx();
-
-    debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \
-           trait_closure_kind={:?}, \
-           llfn={})",
-           llfn_closure_kind,
-           trait_closure_kind,
-           ccx.tn().val_to_string(llfn));
-
-    match (llfn_closure_kind, trait_closure_kind) {
-        (ty::FnClosureKind, ty::FnClosureKind) |
-        (ty::FnMutClosureKind, ty::FnMutClosureKind) |
-        (ty::FnOnceClosureKind, ty::FnOnceClosureKind) => {
-            // No adapter needed.
-            llfn
-        }
-        (ty::FnClosureKind, ty::FnMutClosureKind) => {
-            // The closure fn `llfn` is a `fn(&self, ...)`.  We want a
-            // `fn(&mut self, ...)`. In fact, at trans time, these are
-            // basically the same thing, so we can just return llfn.
-            llfn
-        }
-        (ty::FnClosureKind, ty::FnOnceClosureKind) |
-        (ty::FnMutClosureKind, ty::FnOnceClosureKind) => {
-            // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
-            // self, ...)`.  We want a `fn(self, ...)`. We can produce
-            // this by doing something like:
-            //
-            //     fn call_once(self, ...) { call_mut(&self, ...) }
-            //     fn call_once(mut self, ...) { call_mut(&mut self, ...) }
-            //
-            // These are both the same at trans time.
-            trans_fn_once_adapter_shim(ccx, closure_def_id, substs, llfn)
-        }
-        _ => {
-            tcx.sess.bug(&format!("trans_closure_adapter_shim: cannot convert {:?} to {:?}",
-                                  llfn_closure_kind,
-                                  trait_closure_kind));
-        }
-    }
-}
-
-fn trans_fn_once_adapter_shim<'a, 'tcx>(
-    ccx: &'a CrateContext<'a, 'tcx>,
-    closure_def_id: DefId,
-    substs: ty::ClosureSubsts<'tcx>,
-    llreffn: ValueRef)
-    -> ValueRef
-{
-    debug!("trans_fn_once_adapter_shim(closure_def_id={:?}, substs={:?}, llreffn={})",
-           closure_def_id,
-           substs,
-           ccx.tn().val_to_string(llreffn));
-
-    let tcx = ccx.tcx();
-    let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
-
-    // Find a version of the closure type. Substitute static for the
-    // region since it doesn't really matter.
-    let closure_ty = tcx.mk_closure_from_closure_substs(closure_def_id, Box::new(substs.clone()));
-    let ref_closure_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), closure_ty);
-
-    // Make a version with the type of by-ref closure.
-    let ty::ClosureTy { unsafety, abi, mut sig } = infcx.closure_type(closure_def_id, &substs);
-    sig.0.inputs.insert(0, ref_closure_ty); // sig has no self type as of yet
-    let llref_bare_fn_ty = tcx.mk_bare_fn(ty::BareFnTy { unsafety: unsafety,
-                                                               abi: abi,
-                                                               sig: sig.clone() });
-    let llref_fn_ty = tcx.mk_fn(None, llref_bare_fn_ty);
-    debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}",
-           llref_fn_ty);
-
-    // Make a version of the closure type with the same arguments, but
-    // with argument #0 being by value.
-    assert_eq!(abi, RustCall);
-    sig.0.inputs[0] = closure_ty;
-    let llonce_bare_fn_ty = tcx.mk_bare_fn(ty::BareFnTy { unsafety: unsafety,
-                                                                abi: abi,
-                                                                sig: sig });
-    let llonce_fn_ty = tcx.mk_fn(None, llonce_bare_fn_ty);
-
-    // Create the by-value helper.
-    let function_name = link::mangle_internal_name_by_type_and_seq(ccx, llonce_fn_ty, "once_shim");
-    let lloncefn = declare::define_internal_rust_fn(ccx, &function_name,
-                                                    llonce_fn_ty);
-    let sig = tcx.erase_late_bound_regions(&llonce_bare_fn_ty.sig);
-    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
-
-    let (block_arena, fcx): (TypedArena<_>, FunctionContext);
-    block_arena = TypedArena::new();
-    fcx = new_fn_ctxt(ccx,
-                      lloncefn,
-                      ast::DUMMY_NODE_ID,
-                      false,
-                      sig.output,
-                      substs.func_substs,
-                      None,
-                      &block_arena);
-    let mut bcx = init_function(&fcx, false, sig.output);
-
-    let llargs = get_params(fcx.llfn);
-
-    // the first argument (`self`) will be the (by value) closure env.
-    let self_scope = fcx.push_custom_cleanup_scope();
-    let self_scope_id = CustomScope(self_scope);
-    let rvalue_mode = datum::appropriate_rvalue_mode(ccx, closure_ty);
-    let self_idx = fcx.arg_offset();
-    let llself = llargs[self_idx];
-    let env_datum = Datum::new(llself, closure_ty, Rvalue::new(rvalue_mode));
-    let env_datum = unpack_datum!(bcx,
-                                  env_datum.to_lvalue_datum_in_scope(bcx, "self",
-                                                                     self_scope_id));
-
-    debug!("trans_fn_once_adapter_shim: env_datum={}",
-           bcx.val_to_string(env_datum.val));
-
-    let dest =
-        fcx.llretslotptr.get().map(
-            |_| expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot")));
-
-    let callee_data = TraitItem(MethodData { llfn: llreffn,
-                                             llself: env_datum.val });
-
-    bcx = callee::trans_call_inner(bcx, DebugLoc::None, |bcx, _| {
-        Callee {
-            bcx: bcx,
-            data: callee_data,
-            ty: llref_fn_ty
-        }
-    }, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx;
-
-    fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
-
-    finish_fn(&fcx, bcx, sig.output, DebugLoc::None);
-
-    lloncefn
-}
diff --git a/src/librustc_trans/trans/collector.rs b/src/librustc_trans/trans/collector.rs
deleted file mode 100644 (file)
index b5b0f0a..0000000
+++ /dev/null
@@ -1,1597 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Translation Item Collection
-//! ===========================
-//!
-//! This module is responsible for discovering all items that will contribute to
-//! to code generation of the crate. The important part here is that it not only
-//! needs to find syntax-level items (functions, structs, etc) but also all
-//! their monomorphized instantiations. Every non-generic, non-const function
-//! maps to one LLVM artifact. Every generic function can produce
-//! from zero to N artifacts, depending on the sets of type arguments it
-//! is instantiated with.
-//! This also applies to generic items from other crates: A generic definition
-//! in crate X might produce monomorphizations that are compiled into crate Y.
-//! We also have to collect these here.
-//!
-//! The following kinds of "translation items" are handled here:
-//!
-//! - Functions
-//! - Methods
-//! - Closures
-//! - Statics
-//! - Drop glue
-//!
-//! The following things also result in LLVM artifacts, but are not collected
-//! here, since we instantiate them locally on demand when needed in a given
-//! codegen unit:
-//!
-//! - Constants
-//! - Vtables
-//! - Object Shims
-//!
-//!
-//! General Algorithm
-//! -----------------
-//! Let's define some terms first:
-//!
-//! - A "translation item" is something that results in a function or global in
-//!   the LLVM IR of a codegen unit. Translation items do not stand on their
-//!   own, they can reference other translation items. For example, if function
-//!   `foo()` calls function `bar()` then the translation item for `foo()`
-//!   references the translation item for function `bar()`. In general, the
-//!   definition for translation item A referencing a translation item B is that
-//!   the LLVM artifact produced for A references the LLVM artifact produced
-//!   for B.
-//!
-//! - Translation items and the references between them for a directed graph,
-//!   where the translation items are the nodes and references form the edges.
-//!   Let's call this graph the "translation item graph".
-//!
-//! - The translation item graph for a program contains all translation items
-//!   that are needed in order to produce the complete LLVM IR of the program.
-//!
-//! The purpose of the algorithm implemented in this module is to build the
-//! translation item graph for the current crate. It runs in two phases:
-//!
-//! 1. Discover the roots of the graph by traversing the HIR of the crate.
-//! 2. Starting from the roots, find neighboring nodes by inspecting the MIR
-//!    representation of the item corresponding to a given node, until no more
-//!    new nodes are found.
-//!
-//! ### Discovering roots
-//!
-//! The roots of the translation item graph correspond to the non-generic
-//! syntactic items in the source code. We find them by walking the HIR of the
-//! crate, and whenever we hit upon a function, method, or static item, we
-//! create a translation item consisting of the items DefId and, since we only
-//! consider non-generic items, an empty type-substitution set.
-//!
-//! ### Finding neighbor nodes
-//! Given a translation item node, we can discover neighbors by inspecting its
-//! MIR. We walk the MIR and any time we hit upon something that signifies a
-//! reference to another translation item, we have found a neighbor. Since the
-//! translation item we are currently at is always monomorphic, we also know the
-//! concrete type arguments of its neighbors, and so all neighbors again will be
-//! monomorphic. The specific forms a reference to a neighboring node can take
-//! in MIR are quite diverse. Here is an overview:
-//!
-//! #### Calling Functions/Methods
-//! The most obvious form of one translation item referencing another is a
-//! function or method call (represented by a CALL terminator in MIR). But
-//! calls are not the only thing that might introduce a reference between two
-//! function translation items, and as we will see below, they are just a
-//! specialized of the form described next, and consequently will don't get any
-//! special treatment in the algorithm.
-//!
-//! #### Taking a reference to a function or method
-//! A function does not need to actually be called in order to be a neighbor of
-//! another function. It suffices to just take a reference in order to introduce
-//! an edge. Consider the following example:
-//!
-//! ```rust
-//! fn print_val<T: Display>(x: T) {
-//!     println!("{}", x);
-//! }
-//!
-//! fn call_fn(f: &Fn(i32), x: i32) {
-//!     f(x);
-//! }
-//!
-//! fn main() {
-//!     let print_i32 = print_val::<i32>;
-//!     call_fn(&print_i32, 0);
-//! }
-//! ```
-//! The MIR of none of these functions will contain an explicit call to
-//! `print_val::<i32>`. Nonetheless, in order to translate this program, we need
-//! an instance of this function. Thus, whenever we encounter a function or
-//! method in operand position, we treat it as a neighbor of the current
-//! translation item. Calls are just a special case of that.
-//!
-//! #### Closures
-//! In a way, closures are a simple case. Since every closure object needs to be
-//! constructed somewhere, we can reliably discover them by observing
-//! `RValue::Aggregate` expressions with `AggregateKind::Closure`. This is also
-//! true for closures inlined from other crates.
-//!
-//! #### Drop glue
-//! Drop glue translation items are introduced by MIR drop-statements. The
-//! generated translation item will again have drop-glue item neighbors if the
-//! type to be dropped contains nested values that also need to be dropped. It
-//! might also have a function item neighbor for the explicit `Drop::drop`
-//! implementation of its type.
-//!
-//! #### Unsizing Casts
-//! A subtle way of introducing neighbor edges is by casting to a trait object.
-//! Since the resulting fat-pointer contains a reference to a vtable, we need to
-//! instantiate all object-save methods of the trait, as we need to store
-//! pointers to these functions even if they never get called anywhere. This can
-//! be seen as a special case of taking a function reference.
-//!
-//! #### Boxes
-//! Since `Box` expression have special compiler support, no explicit calls to
-//! `exchange_malloc()` and `exchange_free()` may show up in MIR, even if the
-//! compiler will generate them. We have to observe `Rvalue::Box` expressions
-//! and Box-typed drop-statements for that purpose.
-//!
-//!
-//! Interaction with Cross-Crate Inlining
-//! -------------------------------------
-//! The binary of a crate will not only contain machine code for the items
-//! defined in the source code of that crate. It will also contain monomorphic
-//! instantiations of any extern generic functions and of functions marked with
-//! #[inline].
-//! The collection algorithm handles this more or less transparently. If it is
-//! about to create a translation item for something with an external `DefId`,
-//! it will take a look if the MIR for that item is available, and if so just
-//! proceed normally. If the MIR is not available, it assumes that that item is
-//! just linked to and no node is created; which is exactly what we want, since
-//! no machine code should be generated in the current crate for such an item.
-//!
-//! Eager and Lazy Collection Mode
-//! ------------------------------
-//! Translation item collection can be performed in one of two modes:
-//!
-//! - Lazy mode means that items will only be instantiated when actually
-//!   referenced. The goal is to produce the least amount of machine code
-//!   possible.
-//!
-//! - Eager mode is meant to be used in conjunction with incremental compilation
-//!   where a stable set of translation items is more important than a minimal
-//!   one. Thus, eager mode will instantiate drop-glue for every drop-able type
-//!   in the crate, even of no drop call for that type exists (yet). It will
-//!   also instantiate default implementations of trait methods, something that
-//!   otherwise is only done on demand.
-//!
-//!
-//! Open Issues
-//! -----------
-//! Some things are not yet fully implemented in the current version of this
-//! module.
-//!
-//! ### Initializers of Constants and Statics
-//! Since no MIR is constructed yet for initializer expressions of constants and
-//! statics we cannot inspect these properly.
-//!
-//! ### Const Fns
-//! Ideally, no translation item should be generated for const fns unless there
-//! is a call to them that cannot be evaluated at compile time. At the moment
-//! this is not implemented however: a translation item will be produced
-//! regardless of whether it is actually needed or not.
-
-use rustc_front::hir;
-use rustc_front::intravisit as hir_visit;
-
-use rustc::front::map as hir_map;
-use rustc::middle::def_id::DefId;
-use rustc::middle::lang_items::{ExchangeFreeFnLangItem, ExchangeMallocFnLangItem};
-use rustc::middle::{ty, traits};
-use rustc::middle::subst::{self, Substs, Subst};
-use rustc::middle::ty::adjustment::CustomCoerceUnsized;
-use rustc::middle::ty::fold::TypeFoldable;
-use rustc::mir::repr as mir;
-use rustc::mir::visit as mir_visit;
-use rustc::mir::visit::Visitor as MirVisitor;
-
-use syntax::ast::{self, NodeId};
-use syntax::codemap::DUMMY_SP;
-use syntax::errors;
-use syntax::parse::token;
-
-use trans::base::custom_coerce_unsize_info;
-use trans::context::CrateContext;
-use trans::common::{fulfill_obligation, normalize_and_test_predicates,
-                    type_is_sized};
-use trans::glue;
-use trans::meth;
-use trans::monomorphize;
-use util::nodemap::{FnvHashSet, FnvHashMap, DefIdMap};
-
-use std::hash::{Hash, Hasher};
-use std::rc::Rc;
-
-#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
-pub enum TransItemCollectionMode {
-    Eager,
-    Lazy
-}
-
-#[derive(Eq, Clone, Copy, Debug)]
-pub enum TransItem<'tcx> {
-    DropGlue(ty::Ty<'tcx>),
-    Fn {
-        def_id: DefId,
-        substs: &'tcx Substs<'tcx>
-    },
-    Static(NodeId)
-}
-
-impl<'tcx> Hash for TransItem<'tcx> {
-    fn hash<H: Hasher>(&self, s: &mut H) {
-        match *self {
-            TransItem::DropGlue(t) => {
-                0u8.hash(s);
-                t.hash(s);
-            },
-            TransItem::Fn { def_id, substs } => {
-                1u8.hash(s);
-                def_id.hash(s);
-                (substs as *const Substs<'tcx> as usize).hash(s);
-            }
-            TransItem::Static(node_id) => {
-                3u8.hash(s);
-                node_id.hash(s);
-            }
-        };
-    }
-}
-
-impl<'tcx> PartialEq for TransItem<'tcx> {
-    fn eq(&self, other: &Self) -> bool {
-        match (*self, *other) {
-            (TransItem::DropGlue(t1), TransItem::DropGlue(t2)) => t1 == t2,
-            (TransItem::Fn { def_id: def_id1, substs: substs1 },
-             TransItem::Fn { def_id: def_id2, substs: substs2 }) => {
-                def_id1 == def_id2 && substs1 == substs2
-            },
-            (TransItem::Static(node_id1), TransItem::Static(node_id2)) => {
-                node_id1 == node_id2
-            },
-            _ => false
-        }
-    }
-}
-
-pub fn collect_crate_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                 mode: TransItemCollectionMode)
-                                                 -> FnvHashSet<TransItem<'tcx>> {
-    // We are not tracking dependencies of this pass as it has to be re-executed
-    // every time no matter what.
-    ccx.tcx().dep_graph.with_ignore(|| {
-        let roots = collect_roots(ccx, mode);
-
-        debug!("Building translation item graph, beginning at roots");
-        let mut visited = FnvHashSet();
-        let mut recursion_depths = DefIdMap();
-        let mut mir_cache = DefIdMap();
-
-        for root in roots {
-            collect_items_rec(ccx,
-                              root,
-                              &mut visited,
-                              &mut recursion_depths,
-                              &mut mir_cache);
-        }
-
-        visited
-    })
-}
-
-// Find all non-generic items by walking the HIR. These items serve as roots to
-// start monomorphizing from.
-fn collect_roots<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                           mode: TransItemCollectionMode)
-                           -> Vec<TransItem<'tcx>> {
-    debug!("Collecting roots");
-    let mut roots = Vec::new();
-
-    {
-        let mut visitor = RootCollector {
-            ccx: ccx,
-            mode: mode,
-            output: &mut roots,
-            enclosing_item: None,
-            trans_empty_substs: ccx.tcx().mk_substs(Substs::trans_empty()),
-        };
-
-        ccx.tcx().map.krate().visit_all_items(&mut visitor);
-    }
-
-    roots
-}
-
-#[derive(Clone)]
-enum CachedMir<'mir, 'tcx: 'mir> {
-    Ref(&'mir mir::Mir<'tcx>),
-    Owned(Rc<mir::Mir<'tcx>>)
-}
-
-impl<'mir, 'tcx: 'mir> CachedMir<'mir, 'tcx> {
-    fn get_ref<'a>(&'a self) -> &'a mir::Mir<'tcx> {
-        match *self {
-            CachedMir::Ref(r) => r,
-            CachedMir::Owned(ref rc) => &rc,
-        }
-    }
-}
-
-// Collect all monomorphized translation items reachable from `starting_point`
-fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
-                                   starting_point: TransItem<'tcx>,
-                                   visited: &mut FnvHashSet<TransItem<'tcx>>,
-                                   recursion_depths: &mut DefIdMap<usize>,
-                                   mir_cache: &mut DefIdMap<CachedMir<'a, 'tcx>>) {
-    if !visited.insert(starting_point.clone()) {
-        // We've been here already, no need to search again.
-        return;
-    }
-    debug!("BEGIN collect_items_rec({})", starting_point.to_string(ccx));
-
-    let mut neighbors = Vec::new();
-    let recursion_depth_reset;
-
-    match starting_point {
-        TransItem::DropGlue(t) => {
-            find_drop_glue_neighbors(ccx, t, &mut neighbors);
-            recursion_depth_reset = None;
-        }
-        TransItem::Static(_) => {
-            recursion_depth_reset = None;
-        }
-        TransItem::Fn { def_id, substs: ref param_substs } => {
-            // Keep track of the monomorphization recursion depth
-            recursion_depth_reset = Some(check_recursion_limit(ccx,
-                                                               def_id,
-                                                               recursion_depths));
-
-            // Scan the MIR in order to find function calls, closures, and
-            // drop-glue
-            let mir = load_mir(ccx, def_id, mir_cache);
-
-            let mut visitor = MirNeighborCollector {
-                ccx: ccx,
-                mir: mir.get_ref(),
-                output: &mut neighbors,
-                param_substs: param_substs
-            };
-
-            visitor.visit_mir(mir.get_ref());
-        }
-    }
-
-    for neighbour in neighbors {
-        collect_items_rec(ccx, neighbour, visited, recursion_depths, mir_cache);
-    }
-
-    if let Some((def_id, depth)) = recursion_depth_reset {
-        recursion_depths.insert(def_id, depth);
-    }
-
-    debug!("END collect_items_rec({})", starting_point.to_string(ccx));
-}
-
-fn load_mir<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
-                          def_id: DefId,
-                          mir_cache: &mut DefIdMap<CachedMir<'a, 'tcx>>)
-                          -> CachedMir<'a, 'tcx> {
-    let mir_not_found_error_message = || {
-        format!("Could not find MIR for function: {}",
-                ccx.tcx().item_path_str(def_id))
-    };
-
-    if def_id.is_local() {
-        let node_id = ccx.tcx().map.as_local_node_id(def_id).unwrap();
-        let mir_opt = ccx.mir_map().map.get(&node_id);
-        let mir = errors::expect(ccx.sess().diagnostic(),
-                             mir_opt,
-                             mir_not_found_error_message);
-        CachedMir::Ref(mir)
-    } else {
-        if let Some(mir) = mir_cache.get(&def_id) {
-            return mir.clone();
-        }
-
-        let mir_opt = ccx.sess().cstore.maybe_get_item_mir(ccx.tcx(), def_id);
-        let mir = errors::expect(ccx.sess().diagnostic(),
-                                 mir_opt,
-                                 mir_not_found_error_message);
-        let cached = CachedMir::Owned(Rc::new(mir));
-        mir_cache.insert(def_id, cached.clone());
-        cached
-    }
-}
-
-fn check_recursion_limit<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
-                                       def_id: DefId,
-                                       recursion_depths: &mut DefIdMap<usize>)
-                                       -> (DefId, usize) {
-    let recursion_depth = recursion_depths.get(&def_id)
-                                          .map(|x| *x)
-                                          .unwrap_or(0);
-    debug!(" => recursion depth={}", recursion_depth);
-
-    // Code that needs to instantiate the same function recursively
-    // more than the recursion limit is assumed to be causing an
-    // infinite expansion.
-    if recursion_depth > ccx.sess().recursion_limit.get() {
-        if let Some(node_id) = ccx.tcx().map.as_local_node_id(def_id) {
-            ccx.sess().span_fatal(ccx.tcx().map.span(node_id),
-                "reached the recursion limit during monomorphization");
-        } else {
-            let error = format!("reached the recursion limit during \
-                                monomorphization of '{}'",
-                                ccx.tcx().item_path_str(def_id));
-            ccx.sess().fatal(&error[..]);
-        }
-    }
-
-    recursion_depths.insert(def_id, recursion_depth + 1);
-
-    (def_id, recursion_depth)
-}
-
-struct MirNeighborCollector<'a, 'tcx: 'a> {
-    ccx: &'a CrateContext<'a, 'tcx>,
-    mir: &'a mir::Mir<'tcx>,
-    output: &'a mut Vec<TransItem<'tcx>>,
-    param_substs: &'tcx Substs<'tcx>
-}
-
-impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
-
-    fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>) {
-        debug!("visiting rvalue {:?}", *rvalue);
-
-        match *rvalue {
-            mir::Rvalue::Aggregate(mir::AggregateKind::Closure(def_id,
-                                                               ref substs), _) => {
-                assert!(can_have_local_instance(self.ccx, def_id));
-                let trans_item = create_fn_trans_item(self.ccx,
-                                                      def_id,
-                                                      substs.func_substs,
-                                                      self.param_substs);
-                self.output.push(trans_item);
-            }
-            // When doing an cast from a regular pointer to a fat pointer, we
-            // have to instantiate all methods of the trait being cast to, so we
-            // can build the appropriate vtable.
-            mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, target_ty) => {
-                let target_ty = monomorphize::apply_param_substs(self.ccx.tcx(),
-                                                                 self.param_substs,
-                                                                 &target_ty);
-                let source_ty = self.mir.operand_ty(self.ccx.tcx(), operand);
-                let source_ty = monomorphize::apply_param_substs(self.ccx.tcx(),
-                                                                 self.param_substs,
-                                                                 &source_ty);
-                let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.ccx,
-                                                                            source_ty,
-                                                                            target_ty);
-                // This could also be a different Unsize instruction, like
-                // from a fixed sized array to a slice. But we are only
-                // interested in things that produce a vtable.
-                if target_ty.is_trait() && !source_ty.is_trait() {
-                    create_trans_items_for_vtable_methods(self.ccx,
-                                                          target_ty,
-                                                          source_ty,
-                                                          self.output);
-                }
-            }
-            mir::Rvalue::Box(_) => {
-                let exchange_malloc_fn_def_id =
-                    self.ccx
-                        .tcx()
-                        .lang_items
-                        .require(ExchangeMallocFnLangItem)
-                        .unwrap_or_else(|e| self.ccx.sess().fatal(&e));
-
-                assert!(can_have_local_instance(self.ccx, exchange_malloc_fn_def_id));
-                let exchange_malloc_fn_trans_item =
-                    create_fn_trans_item(self.ccx,
-                                         exchange_malloc_fn_def_id,
-                                         &Substs::trans_empty(),
-                                         self.param_substs);
-
-                self.output.push(exchange_malloc_fn_trans_item);
-            }
-            _ => { /* not interesting */ }
-        }
-
-        self.super_rvalue(rvalue);
-    }
-
-    fn visit_lvalue(&mut self,
-                    lvalue: &mir::Lvalue<'tcx>,
-                    context: mir_visit::LvalueContext) {
-        debug!("visiting lvalue {:?}", *lvalue);
-
-        if let mir_visit::LvalueContext::Drop = context {
-            let ty = self.mir.lvalue_ty(self.ccx.tcx(), lvalue)
-                             .to_ty(self.ccx.tcx());
-
-            let ty = monomorphize::apply_param_substs(self.ccx.tcx(),
-                                                      self.param_substs,
-                                                      &ty);
-            let ty = self.ccx.tcx().erase_regions(&ty);
-            let ty = glue::get_drop_glue_type(self.ccx, ty);
-            self.output.push(TransItem::DropGlue(ty));
-        }
-
-        self.super_lvalue(lvalue, context);
-    }
-
-    fn visit_operand(&mut self, operand: &mir::Operand<'tcx>) {
-        debug!("visiting operand {:?}", *operand);
-
-        let callee = match *operand {
-            mir::Operand::Constant(mir::Constant {
-                literal: mir::Literal::Item {
-                    def_id,
-                    kind,
-                    substs
-                },
-                ..
-            }) if is_function_or_method(kind) => Some((def_id, substs)),
-            _ => None
-        };
-
-        if let Some((callee_def_id, callee_substs)) = callee {
-            debug!(" => operand is callable");
-
-            // `callee_def_id` might refer to a trait method instead of a
-            // concrete implementation, so we have to find the actual
-            // implementation. For example, the call might look like
-            //
-            // std::cmp::partial_cmp(0i32, 1i32)
-            //
-            // Calling do_static_dispatch() here will map the def_id of
-            // `std::cmp::partial_cmp` to the def_id of `i32::partial_cmp<i32>`
-            let dispatched = do_static_dispatch(self.ccx,
-                                                callee_def_id,
-                                                callee_substs,
-                                                self.param_substs);
-
-            if let Some((callee_def_id, callee_substs)) = dispatched {
-                // if we have a concrete impl (which we might not have
-                // in the case of something compiler generated like an
-                // object shim or a closure that is handled differently),
-                // we check if the callee is something that will actually
-                // result in a translation item ...
-                if can_result_in_trans_item(self.ccx, callee_def_id) {
-                    // ... and create one if it does.
-                    let trans_item = create_fn_trans_item(self.ccx,
-                                                          callee_def_id,
-                                                          callee_substs,
-                                                          self.param_substs);
-                    self.output.push(trans_item);
-                }
-            }
-        }
-
-        self.super_operand(operand);
-
-        fn is_function_or_method(item_kind: mir::ItemKind) -> bool {
-            match item_kind {
-                mir::ItemKind::Constant => false,
-                mir::ItemKind::Function |
-                mir::ItemKind::Method   => true
-            }
-        }
-
-        fn can_result_in_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                              def_id: DefId)
-                                              -> bool {
-            if !match ccx.tcx().lookup_item_type(def_id).ty.sty {
-                ty::TyBareFn(Some(def_id), _) => {
-                    // Some constructors also have type TyBareFn but they are
-                    // always instantiated inline and don't result in
-                    // translation item.
-                    match ccx.tcx().map.get_if_local(def_id) {
-                        Some(hir_map::NodeVariant(_))    |
-                        Some(hir_map::NodeStructCtor(_)) => false,
-                        Some(_) => true,
-                        None => {
-                            ccx.sess().cstore.variant_kind(def_id).is_none()
-                        }
-                    }
-                }
-                ty::TyClosure(..) => true,
-                _ => false
-            } {
-                return false;
-            }
-
-            can_have_local_instance(ccx, def_id)
-        }
-    }
-}
-
-fn can_have_local_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                     def_id: DefId)
-                                     -> bool {
-    // Take a look if we have the definition available. If not, we
-    // will not emit code for this item in the local crate, and thus
-    // don't create a translation item for it.
-    def_id.is_local() || ccx.sess().cstore.is_item_mir_available(def_id)
-}
-
-fn find_drop_glue_neighbors<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                      ty: ty::Ty<'tcx>,
-                                      output: &mut Vec<TransItem<'tcx>>)
-{
-    debug!("find_drop_glue_neighbors: {}", type_to_string(ccx, ty));
-
-    // Make sure the exchange_free_fn() lang-item gets translated if
-    // there is a boxed value.
-    if let ty::TyBox(_) = ty.sty {
-        let exchange_free_fn_def_id = ccx.tcx()
-                                         .lang_items
-                                         .require(ExchangeFreeFnLangItem)
-                                         .unwrap_or_else(|e| ccx.sess().fatal(&e));
-
-        assert!(can_have_local_instance(ccx, exchange_free_fn_def_id));
-        let exchange_free_fn_trans_item =
-            create_fn_trans_item(ccx,
-                                 exchange_free_fn_def_id,
-                                 &Substs::trans_empty(),
-                                 &Substs::trans_empty());
-
-        output.push(exchange_free_fn_trans_item);
-    }
-
-    // If the type implements Drop, also add a translation item for the
-    // monomorphized Drop::drop() implementation.
-    let destructor_did = match ty.sty {
-        ty::TyStruct(def, _) |
-        ty::TyEnum(def, _)   => def.destructor(),
-        _ => None
-    };
-
-    if let Some(destructor_did) = destructor_did {
-        use rustc::middle::ty::ToPolyTraitRef;
-
-        let drop_trait_def_id = ccx.tcx()
-                                   .lang_items
-                                   .drop_trait()
-                                   .unwrap();
-
-        let self_type_substs = ccx.tcx().mk_substs(
-            Substs::trans_empty().with_self_ty(ty));
-
-        let trait_ref = ty::TraitRef {
-            def_id: drop_trait_def_id,
-            substs: self_type_substs,
-        }.to_poly_trait_ref();
-
-        let substs = match fulfill_obligation(ccx, DUMMY_SP, trait_ref) {
-            traits::VtableImpl(data) => data.substs,
-            _ => unreachable!()
-        };
-
-        if can_have_local_instance(ccx, destructor_did) {
-            let trans_item = create_fn_trans_item(ccx,
-                                                  destructor_did,
-                                                  ccx.tcx().mk_substs(substs),
-                                                  &Substs::trans_empty());
-            output.push(trans_item);
-        }
-    }
-
-    // Finally add the types of nested values
-    match ty.sty {
-        ty::TyBool       |
-        ty::TyChar       |
-        ty::TyInt(_)     |
-        ty::TyUint(_)    |
-        ty::TyStr        |
-        ty::TyFloat(_)   |
-        ty::TyRawPtr(_)  |
-        ty::TyRef(..)    |
-        ty::TyBareFn(..) |
-        ty::TySlice(_)   |
-        ty::TyTrait(_)   => {
-            /* nothing to do */
-        }
-        ty::TyStruct(ref adt_def, substs) |
-        ty::TyEnum(ref adt_def, substs) => {
-            for field in adt_def.all_fields() {
-                let field_type = monomorphize::apply_param_substs(ccx.tcx(),
-                                                                  substs,
-                                                                  &field.unsubst_ty());
-                let field_type = glue::get_drop_glue_type(ccx, field_type);
-
-                if glue::type_needs_drop(ccx.tcx(), field_type) {
-                    output.push(TransItem::DropGlue(field_type));
-                }
-            }
-        }
-        ty::TyClosure(_, ref substs) => {
-            for upvar_ty in &substs.upvar_tys {
-                let upvar_ty = glue::get_drop_glue_type(ccx, upvar_ty);
-                if glue::type_needs_drop(ccx.tcx(), upvar_ty) {
-                    output.push(TransItem::DropGlue(upvar_ty));
-                }
-            }
-        }
-        ty::TyBox(inner_type)      |
-        ty::TyArray(inner_type, _) => {
-            let inner_type = glue::get_drop_glue_type(ccx, inner_type);
-            if glue::type_needs_drop(ccx.tcx(), inner_type) {
-                output.push(TransItem::DropGlue(inner_type));
-            }
-        }
-        ty::TyTuple(ref args) => {
-            for arg in args {
-                let arg = glue::get_drop_glue_type(ccx, arg);
-                if glue::type_needs_drop(ccx.tcx(), arg) {
-                    output.push(TransItem::DropGlue(arg));
-                }
-            }
-        }
-        ty::TyProjection(_) |
-        ty::TyParam(_)      |
-        ty::TyInfer(_)      |
-        ty::TyError         => {
-            ccx.sess().bug("encountered unexpected type");
-        }
-    }
-}
-
-fn do_static_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                fn_def_id: DefId,
-                                fn_substs: &'tcx Substs<'tcx>,
-                                param_substs: &'tcx Substs<'tcx>)
-                                -> Option<(DefId, &'tcx Substs<'tcx>)> {
-    debug!("do_static_dispatch(fn_def_id={}, fn_substs={:?}, param_substs={:?})",
-           def_id_to_string(ccx, fn_def_id, None),
-           fn_substs,
-           param_substs);
-
-    let is_trait_method = ccx.tcx().trait_of_item(fn_def_id).is_some();
-
-    if is_trait_method {
-        match ccx.tcx().impl_or_trait_item(fn_def_id) {
-            ty::MethodTraitItem(ref method) => {
-                match method.container {
-                    ty::TraitContainer(trait_def_id) => {
-                        debug!(" => trait method, attempting to find impl");
-                        do_static_trait_method_dispatch(ccx,
-                                                        method,
-                                                        trait_def_id,
-                                                        fn_substs,
-                                                        param_substs)
-                    }
-                    ty::ImplContainer(_) => {
-                        // This is already a concrete implementation
-                        debug!(" => impl method");
-                        Some((fn_def_id, fn_substs))
-                    }
-                }
-            }
-            _ => unreachable!()
-        }
-    } else {
-        debug!(" => regular function");
-        // The function is not part of an impl or trait, no dispatching
-        // to be done
-        Some((fn_def_id, fn_substs))
-    }
-}
-
-// Given a trait-method and substitution information, find out the actual
-// implementation of the trait method.
-fn do_static_trait_method_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                             trait_method: &ty::Method,
-                                             trait_id: DefId,
-                                             callee_substs: &'tcx Substs<'tcx>,
-                                             param_substs: &'tcx Substs<'tcx>)
-                                             -> Option<(DefId, &'tcx Substs<'tcx>)> {
-    let tcx = ccx.tcx();
-    debug!("do_static_trait_method_dispatch(trait_method={}, \
-                                            trait_id={}, \
-                                            callee_substs={:?}, \
-                                            param_substs={:?}",
-           def_id_to_string(ccx, trait_method.def_id, None),
-           def_id_to_string(ccx, trait_id, None),
-           callee_substs,
-           param_substs);
-
-    let rcvr_substs = monomorphize::apply_param_substs(tcx,
-                                                       param_substs,
-                                                       callee_substs);
-
-    let trait_ref = ty::Binder(rcvr_substs.to_trait_ref(tcx, trait_id));
-    let vtbl = fulfill_obligation(ccx, DUMMY_SP, trait_ref);
-
-    // Now that we know which impl is being used, we can dispatch to
-    // the actual function:
-    match vtbl {
-        traits::VtableImpl(traits::VtableImplData {
-            impl_def_id: impl_did,
-            substs: impl_substs,
-            nested: _ }) =>
-        {
-            let callee_substs = impl_substs.with_method_from(&rcvr_substs);
-            let impl_method = tcx.get_impl_method(impl_did,
-                                                  callee_substs,
-                                                  trait_method.name);
-            Some((impl_method.method.def_id, tcx.mk_substs(impl_method.substs)))
-        }
-        // If we have a closure or a function pointer, we will also encounter
-        // the concrete closure/function somewhere else (during closure or fn
-        // pointer construction). That's where we track those things.
-        traits::VtableClosure(..) |
-        traits::VtableFnPointer(..) |
-        traits::VtableObject(..) => {
-            None
-        }
-        _ => {
-            tcx.sess.bug(&format!("static call to invalid vtable: {:?}", vtbl))
-        }
-    }
-}
-
-/// For given pair of source and target type that occur in an unsizing coercion,
-/// this function finds the pair of types that determines the vtable linking
-/// them.
-///
-/// For example, the source type might be `&SomeStruct` and the target type\
-/// might be `&SomeTrait` in a cast like:
-///
-/// let src: &SomeStruct = ...;
-/// let target = src as &SomeTrait;
-///
-/// Then the output of this function would be (SomeStruct, SomeTrait) since for
-/// constructing the `target` fat-pointer we need the vtable for that pair.
-///
-/// Things can get more complicated though because there's also the case where
-/// the unsized type occurs as a field:
-///
-/// ```rust
-/// struct ComplexStruct<T: ?Sized> {
-///    a: u32,
-///    b: f64,
-///    c: T
-/// }
-/// ```
-///
-/// In this case, if `T` is sized, `&ComplexStruct<T>` is a thin pointer. If `T`
-/// is unsized, `&SomeStruct` is a fat pointer, and the vtable it points to is
-/// for the pair of `T` (which is a trait) and the concrete type that `T` was
-/// originally coerced from:
-///
-/// let src: &ComplexStruct<SomeStruct> = ...;
-/// let target = src as &ComplexStruct<SomeTrait>;
-///
-/// Again, we want this `find_vtable_types_for_unsizing()` to provide the pair
-/// `(SomeStruct, SomeTrait)`.
-///
-/// Finally, there is also the case of custom unsizing coercions, e.g. for
-/// smart pointers such as `Rc` and `Arc`.
-fn find_vtable_types_for_unsizing<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                            source_ty: ty::Ty<'tcx>,
-                                            target_ty: ty::Ty<'tcx>)
-                                            -> (ty::Ty<'tcx>, ty::Ty<'tcx>) {
-    match (&source_ty.sty, &target_ty.sty) {
-        (&ty::TyBox(a), &ty::TyBox(b)) |
-        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
-         &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
-        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
-         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
-        (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
-         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
-            let (inner_source, inner_target) = (a, b);
-
-            if !type_is_sized(ccx.tcx(), inner_source) {
-                (inner_source, inner_target)
-            } else {
-                ccx.tcx().struct_lockstep_tails(inner_source, inner_target)
-            }
-        }
-
-        (&ty::TyStruct(source_adt_def, source_substs),
-         &ty::TyStruct(target_adt_def, target_substs)) => {
-            assert_eq!(source_adt_def, target_adt_def);
-
-            let kind = custom_coerce_unsize_info(ccx, source_ty, target_ty);
-
-            let coerce_index = match kind {
-                CustomCoerceUnsized::Struct(i) => i
-            };
-
-            let source_fields = &source_adt_def.struct_variant().fields;
-            let target_fields = &target_adt_def.struct_variant().fields;
-
-            assert!(coerce_index < source_fields.len() &&
-                    source_fields.len() == target_fields.len());
-
-            find_vtable_types_for_unsizing(ccx,
-                                           source_fields[coerce_index].ty(ccx.tcx(),
-                                                                          source_substs),
-                                           target_fields[coerce_index].ty(ccx.tcx(),
-                                                                          target_substs))
-        }
-        _ => ccx.sess()
-                .bug(&format!("find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}",
-                               source_ty,
-                               target_ty))
-    }
-}
-
-fn create_fn_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                  def_id: DefId,
-                                  fn_substs: &Substs<'tcx>,
-                                  param_substs: &Substs<'tcx>)
-                                  -> TransItem<'tcx>
-{
-    debug!("create_fn_trans_item(def_id={}, fn_substs={:?}, param_substs={:?})",
-            def_id_to_string(ccx, def_id, None),
-            fn_substs,
-            param_substs);
-
-    // We only get here, if fn_def_id either designates a local item or
-    // an inlineable external item. Non-inlineable external items are
-    // ignored because we don't want to generate any code for them.
-    let concrete_substs = monomorphize::apply_param_substs(ccx.tcx(),
-                                                           param_substs,
-                                                           fn_substs);
-    let concrete_substs = ccx.tcx().erase_regions(&concrete_substs);
-
-    let trans_item = TransItem::Fn {
-        def_id: def_id,
-        substs: ccx.tcx().mk_substs(concrete_substs),
-    };
-
-    return trans_item;
-}
-
-/// Creates a `TransItem` for each method that is referenced by the vtable for
-/// the given trait/impl pair.
-fn create_trans_items_for_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                   trait_ty: ty::Ty<'tcx>,
-                                                   impl_ty: ty::Ty<'tcx>,
-                                                   output: &mut Vec<TransItem<'tcx>>) {
-    assert!(!trait_ty.needs_subst() && !impl_ty.needs_subst());
-
-    if let ty::TyTrait(ref trait_ty) = trait_ty.sty {
-        let poly_trait_ref = trait_ty.principal_trait_ref_with_self_ty(ccx.tcx(),
-                                                                       impl_ty);
-
-        // Walk all methods of the trait, including those of its supertraits
-        for trait_ref in traits::supertraits(ccx.tcx(), poly_trait_ref) {
-            let vtable = fulfill_obligation(ccx, DUMMY_SP, trait_ref);
-            match vtable {
-                traits::VtableImpl(
-                    traits::VtableImplData {
-                        impl_def_id,
-                        substs,
-                        nested: _ }) => {
-                    let items = meth::get_vtable_methods(ccx, impl_def_id, substs)
-                        .into_iter()
-                        // filter out None values
-                        .filter_map(|opt_impl_method| opt_impl_method)
-                        // create translation items
-                        .filter_map(|impl_method| {
-                            if can_have_local_instance(ccx, impl_method.method.def_id) {
-                                let substs = ccx.tcx().mk_substs(impl_method.substs);
-                                Some(create_fn_trans_item(ccx,
-                                                          impl_method.method.def_id,
-                                                          substs,
-                                                          &Substs::trans_empty()))
-                            } else {
-                                None
-                            }
-                        })
-                        .collect::<Vec<_>>();
-
-                    output.extend(items.into_iter());
-                }
-                _ => { /* */ }
-            }
-        }
-    }
-}
-
-//=-----------------------------------------------------------------------------
-// Root Collection
-//=-----------------------------------------------------------------------------
-
-struct RootCollector<'b, 'a: 'b, 'tcx: 'a + 'b> {
-    ccx: &'b CrateContext<'a, 'tcx>,
-    mode: TransItemCollectionMode,
-    output: &'b mut Vec<TransItem<'tcx>>,
-    enclosing_item: Option<&'tcx hir::Item>,
-    trans_empty_substs: &'tcx Substs<'tcx>
-}
-
-impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> {
-    fn visit_item(&mut self, item: &'v hir::Item) {
-        let old_enclosing_item = self.enclosing_item;
-        self.enclosing_item = Some(item);
-
-        match item.node {
-            hir::ItemExternCrate(..) |
-            hir::ItemUse(..)         |
-            hir::ItemForeignMod(..)  |
-            hir::ItemTy(..)          |
-            hir::ItemDefaultImpl(..) |
-            hir::ItemTrait(..)       |
-            hir::ItemConst(..)       |
-            hir::ItemMod(..)         => {
-                // Nothing to do, just keep recursing...
-            }
-
-            hir::ItemImpl(..) => {
-                if self.mode == TransItemCollectionMode::Eager {
-                    create_trans_items_for_default_impls(self.ccx,
-                                                         item,
-                                                         self.trans_empty_substs,
-                                                         self.output);
-                }
-            }
-
-            hir::ItemEnum(_, ref generics)        |
-            hir::ItemStruct(_, ref generics)      => {
-                if !generics.is_parameterized() {
-                    let ty = {
-                        let tables = self.ccx.tcx().tables.borrow();
-                        tables.node_types[&item.id]
-                    };
-
-                    if self.mode == TransItemCollectionMode::Eager {
-                        debug!("RootCollector: ADT drop-glue for {}",
-                               def_id_to_string(self.ccx,
-                                                self.ccx.tcx().map.local_def_id(item.id),
-                                                None));
-
-                        let ty = glue::get_drop_glue_type(self.ccx, ty);
-                        self.output.push(TransItem::DropGlue(ty));
-                    }
-                }
-            }
-            hir::ItemStatic(..) => {
-                debug!("RootCollector: ItemStatic({})",
-                       def_id_to_string(self.ccx,
-                                        self.ccx.tcx().map.local_def_id(item.id),
-                                        None));
-                self.output.push(TransItem::Static(item.id));
-            }
-            hir::ItemFn(_, _, constness, _, ref generics, _) => {
-                if !generics.is_type_parameterized() &&
-                   constness == hir::Constness::NotConst {
-                    let def_id = self.ccx.tcx().map.local_def_id(item.id);
-
-                    debug!("RootCollector: ItemFn({})",
-                           def_id_to_string(self.ccx, def_id, None));
-
-                    self.output.push(TransItem::Fn {
-                        def_id: def_id,
-                        substs: self.trans_empty_substs
-                    });
-                }
-            }
-        }
-
-        hir_visit::walk_item(self, item);
-        self.enclosing_item = old_enclosing_item;
-    }
-
-    fn visit_impl_item(&mut self, ii: &'v hir::ImplItem) {
-        match ii.node {
-            hir::ImplItemKind::Method(hir::MethodSig {
-                ref generics,
-                constness,
-                ..
-            }, _) if constness == hir::Constness::NotConst => {
-                let hir_map = &self.ccx.tcx().map;
-                let parent_node_id = hir_map.get_parent_node(ii.id);
-                let is_impl_generic = match hir_map.expect_item(parent_node_id) {
-                    &hir::Item {
-                        node: hir::ItemImpl(_, _, ref generics, _, _, _),
-                        ..
-                    } => {
-                        generics.is_type_parameterized()
-                    }
-                    _ => {
-                        unreachable!()
-                    }
-                };
-
-                if !generics.is_type_parameterized() && !is_impl_generic {
-                    let def_id = self.ccx.tcx().map.local_def_id(ii.id);
-
-                    debug!("RootCollector: MethodImplItem({})",
-                           def_id_to_string(self.ccx, def_id, None));
-
-                    self.output.push(TransItem::Fn {
-                        def_id: def_id,
-                        substs: self.trans_empty_substs
-                    });
-                }
-            }
-            _ => { /* Nothing to do here */ }
-        }
-
-        hir_visit::walk_impl_item(self, ii)
-    }
-}
-
-fn create_trans_items_for_default_impls<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                  item: &'tcx hir::Item,
-                                                  trans_empty_substs: &'tcx Substs<'tcx>,
-                                                  output: &mut Vec<TransItem<'tcx>>) {
-    match item.node {
-        hir::ItemImpl(_,
-                      _,
-                      ref generics,
-                      _,
-                      _,
-                      ref items) => {
-            if generics.is_type_parameterized() {
-                return
-            }
-
-            let tcx = ccx.tcx();
-            let impl_def_id = tcx.map.local_def_id(item.id);
-
-            debug!("create_trans_items_for_default_impls(item={})",
-                   def_id_to_string(ccx, impl_def_id, None));
-
-            if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) {
-                let default_impls = tcx.provided_trait_methods(trait_ref.def_id);
-                let callee_substs = tcx.mk_substs(tcx.erase_regions(trait_ref.substs));
-                let overridden_methods: FnvHashSet<_> = items.iter()
-                                                             .map(|item| item.name)
-                                                             .collect();
-                for default_impl in default_impls {
-                    if overridden_methods.contains(&default_impl.name) {
-                        continue;
-                    }
-
-                    if default_impl.generics.has_type_params(subst::FnSpace) {
-                        continue;
-                    }
-
-                    // The substitutions we have are on the impl, so we grab
-                    // the method type from the impl to substitute into.
-                    let mth = tcx.get_impl_method(impl_def_id,
-                                                  callee_substs.clone(),
-                                                  default_impl.name);
-
-                    assert!(mth.is_provided);
-
-                    let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs);
-                    if !normalize_and_test_predicates(ccx, predicates.into_vec()) {
-                        continue;
-                    }
-
-                    if can_have_local_instance(ccx, default_impl.def_id) {
-                        let item = create_fn_trans_item(ccx,
-                                                        default_impl.def_id,
-                                                        callee_substs,
-                                                        trans_empty_substs);
-                        output.push(item);
-                    }
-                }
-            }
-        }
-        _ => {
-            unreachable!()
-        }
-    }
-}
-
-//=-----------------------------------------------------------------------------
-// TransItem String Keys
-//=-----------------------------------------------------------------------------
-
-// The code below allows for producing a unique string key for a trans item.
-// These keys are used by the handwritten auto-tests, so they need to be
-// predictable and human-readable.
-//
-// Note: A lot of this could looks very similar to what's already in the
-//       ppaux module. It would be good to refactor things so we only have one
-//       parameterizable implementation for printing types.
-
-/// Same as `unique_type_name()` but with the result pushed onto the given
-/// `output` parameter.
-pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                       t: ty::Ty<'tcx>,
-                                       output: &mut String) {
-    match t.sty {
-        ty::TyBool              => output.push_str("bool"),
-        ty::TyChar              => output.push_str("char"),
-        ty::TyStr               => output.push_str("str"),
-        ty::TyInt(ast::IntTy::Is)    => output.push_str("isize"),
-        ty::TyInt(ast::IntTy::I8)    => output.push_str("i8"),
-        ty::TyInt(ast::IntTy::I16)   => output.push_str("i16"),
-        ty::TyInt(ast::IntTy::I32)   => output.push_str("i32"),
-        ty::TyInt(ast::IntTy::I64)   => output.push_str("i64"),
-        ty::TyUint(ast::UintTy::Us)   => output.push_str("usize"),
-        ty::TyUint(ast::UintTy::U8)   => output.push_str("u8"),
-        ty::TyUint(ast::UintTy::U16)  => output.push_str("u16"),
-        ty::TyUint(ast::UintTy::U32)  => output.push_str("u32"),
-        ty::TyUint(ast::UintTy::U64)  => output.push_str("u64"),
-        ty::TyFloat(ast::FloatTy::F32) => output.push_str("f32"),
-        ty::TyFloat(ast::FloatTy::F64) => output.push_str("f64"),
-        ty::TyStruct(adt_def, substs) |
-        ty::TyEnum(adt_def, substs) => {
-            push_item_name(cx, adt_def.did, output);
-            push_type_params(cx, substs, &[], output);
-        },
-        ty::TyTuple(ref component_types) => {
-            output.push('(');
-            for &component_type in component_types {
-                push_unique_type_name(cx, component_type, output);
-                output.push_str(", ");
-            }
-            if !component_types.is_empty() {
-                output.pop();
-                output.pop();
-            }
-            output.push(')');
-        },
-        ty::TyBox(inner_type) => {
-            output.push_str("Box<");
-            push_unique_type_name(cx, inner_type, output);
-            output.push('>');
-        },
-        ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
-            output.push('*');
-            match mutbl {
-                hir::MutImmutable => output.push_str("const "),
-                hir::MutMutable => output.push_str("mut "),
-            }
-
-            push_unique_type_name(cx, inner_type, output);
-        },
-        ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
-            output.push('&');
-            if mutbl == hir::MutMutable {
-                output.push_str("mut ");
-            }
-
-            push_unique_type_name(cx, inner_type, output);
-        },
-        ty::TyArray(inner_type, len) => {
-            output.push('[');
-            push_unique_type_name(cx, inner_type, output);
-            output.push_str(&format!("; {}", len));
-            output.push(']');
-        },
-        ty::TySlice(inner_type) => {
-            output.push('[');
-            push_unique_type_name(cx, inner_type, output);
-            output.push(']');
-        },
-        ty::TyTrait(ref trait_data) => {
-            push_item_name(cx, trait_data.principal.skip_binder().def_id, output);
-            push_type_params(cx,
-                             &trait_data.principal.skip_binder().substs,
-                             &trait_data.bounds.projection_bounds,
-                             output);
-        },
-        ty::TyBareFn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
-            if unsafety == hir::Unsafety::Unsafe {
-                output.push_str("unsafe ");
-            }
-
-            if abi != ::syntax::abi::Abi::Rust {
-                output.push_str("extern \"");
-                output.push_str(abi.name());
-                output.push_str("\" ");
-            }
-
-            output.push_str("fn(");
-
-            let sig = cx.tcx().erase_late_bound_regions(sig);
-            if !sig.inputs.is_empty() {
-                for &parameter_type in &sig.inputs {
-                    push_unique_type_name(cx, parameter_type, output);
-                    output.push_str(", ");
-                }
-                output.pop();
-                output.pop();
-            }
-
-            if sig.variadic {
-                if !sig.inputs.is_empty() {
-                    output.push_str(", ...");
-                } else {
-                    output.push_str("...");
-                }
-            }
-
-            output.push(')');
-
-            match sig.output {
-                ty::FnConverging(result_type) if result_type.is_nil() => {}
-                ty::FnConverging(result_type) => {
-                    output.push_str(" -> ");
-                    push_unique_type_name(cx, result_type, output);
-                }
-                ty::FnDiverging => {
-                    output.push_str(" -> !");
-                }
-            }
-        },
-        ty::TyClosure(def_id, ref closure_substs) => {
-            push_item_name(cx, def_id, output);
-            output.push_str("{");
-            output.push_str(&format!("{}:{}", def_id.krate, def_id.index.as_usize()));
-            output.push_str("}");
-            push_type_params(cx, closure_substs.func_substs, &[], output);
-        }
-        ty::TyError |
-        ty::TyInfer(_) |
-        ty::TyProjection(..) |
-        ty::TyParam(_) => {
-            cx.sess().bug(&format!("debuginfo: Trying to create type name for \
-                unexpected type: {:?}", t));
-        }
-    }
-}
-
-fn push_item_name(ccx: &CrateContext,
-                  def_id: DefId,
-                  output: &mut String) {
-    if def_id.is_local() {
-        let node_id = ccx.tcx().map.as_local_node_id(def_id).unwrap();
-        let inlined_from = ccx.external_srcs()
-                              .borrow()
-                              .get(&node_id)
-                              .map(|def_id| *def_id);
-
-        if let Some(extern_def_id) = inlined_from {
-            push_item_name(ccx, extern_def_id, output);
-            return;
-        }
-
-        output.push_str(&ccx.link_meta().crate_name);
-        output.push_str("::");
-    }
-
-    for part in ccx.tcx().def_path(def_id) {
-        output.push_str(&format!("{}[{}]::",
-                        part.data.as_interned_str(),
-                        part.disambiguator));
-    }
-
-    output.pop();
-    output.pop();
-}
-
-fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                              substs: &Substs<'tcx>,
-                              projections: &[ty::PolyProjectionPredicate<'tcx>],
-                              output: &mut String) {
-    if substs.types.is_empty() && projections.is_empty() {
-        return;
-    }
-
-    output.push('<');
-
-    for &type_parameter in &substs.types {
-        push_unique_type_name(cx, type_parameter, output);
-        output.push_str(", ");
-    }
-
-    for projection in projections {
-        let projection = projection.skip_binder();
-        let name = token::get_ident_interner().get(projection.projection_ty.item_name);
-        output.push_str(&name[..]);
-        output.push_str("=");
-        push_unique_type_name(cx, projection.ty, output);
-        output.push_str(", ");
-    }
-
-    output.pop();
-    output.pop();
-
-    output.push('>');
-}
-
-fn push_def_id_as_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                              def_id: DefId,
-                              substs: Option<&Substs<'tcx>>,
-                              output: &mut String) {
-    push_item_name(ccx, def_id, output);
-
-    if let Some(substs) = substs {
-        push_type_params(ccx, substs, &[], output);
-    }
-}
-
-fn def_id_to_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                              def_id: DefId,
-                              substs: Option<&Substs<'tcx>>)
-                              -> String {
-    let mut output = String::new();
-    push_def_id_as_string(ccx, def_id, substs, &mut output);
-    output
-}
-
-fn type_to_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                            ty: ty::Ty<'tcx>)
-                            -> String {
-    let mut output = String::new();
-    push_unique_type_name(ccx, ty, &mut output);
-    output
-}
-
-impl<'tcx> TransItem<'tcx> {
-
-    pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
-        let hir_map = &ccx.tcx().map;
-
-        return match *self {
-            TransItem::DropGlue(t) => {
-                let mut s = String::with_capacity(32);
-                s.push_str("drop-glue ");
-                push_unique_type_name(ccx, t, &mut s);
-                s
-            }
-            TransItem::Fn { def_id, ref substs } => {
-                to_string_internal(ccx, "fn ", def_id, Some(substs))
-            },
-            TransItem::Static(node_id) => {
-                let def_id = hir_map.local_def_id(node_id);
-                to_string_internal(ccx, "static ", def_id, None)
-            },
-        };
-
-        fn to_string_internal<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                        prefix: &str,
-                                        def_id: DefId,
-                                        substs: Option<&Substs<'tcx>>)
-                                        -> String {
-            let mut result = String::with_capacity(32);
-            result.push_str(prefix);
-            push_def_id_as_string(ccx, def_id, substs, &mut result);
-            result
-        }
-    }
-
-    fn to_raw_string(&self) -> String {
-        match *self {
-            TransItem::DropGlue(t) => {
-                format!("DropGlue({})", t as *const _ as usize)
-            }
-            TransItem::Fn { def_id, substs } => {
-                format!("Fn({:?}, {})",
-                         def_id,
-                         substs as *const _ as usize)
-            }
-            TransItem::Static(id) => {
-                format!("Static({:?})", id)
-            }
-        }
-    }
-}
-
-#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
-pub enum TransItemState {
-    PredictedAndGenerated,
-    PredictedButNotGenerated,
-    NotPredictedButGenerated,
-}
-
-pub fn collecting_debug_information(ccx: &CrateContext) -> bool {
-    return cfg!(debug_assertions) &&
-           ccx.sess().opts.debugging_opts.print_trans_items.is_some();
-}
-
-pub fn print_collection_results<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>) {
-    use std::hash::{Hash, SipHasher, Hasher};
-
-    if !collecting_debug_information(ccx) {
-        return;
-    }
-
-    fn hash<T: Hash>(t: &T) -> u64 {
-        let mut s = SipHasher::new();
-        t.hash(&mut s);
-        s.finish()
-    }
-
-    let trans_items = ccx.translation_items().borrow();
-
-    {
-        // Check for duplicate item keys
-        let mut item_keys = FnvHashMap();
-
-        for (item, item_state) in trans_items.iter() {
-            let k = item.to_string(&ccx);
-
-            if item_keys.contains_key(&k) {
-                let prev: (TransItem, TransItemState) = item_keys[&k];
-                debug!("DUPLICATE KEY: {}", k);
-                debug!(" (1) {:?}, {:?}, hash: {}, raw: {}",
-                       prev.0,
-                       prev.1,
-                       hash(&prev.0),
-                       prev.0.to_raw_string());
-
-                debug!(" (2) {:?}, {:?}, hash: {}, raw: {}",
-                       *item,
-                       *item_state,
-                       hash(item),
-                       item.to_raw_string());
-            } else {
-                item_keys.insert(k, (*item, *item_state));
-            }
-        }
-    }
-
-    let mut predicted_but_not_generated = FnvHashSet();
-    let mut not_predicted_but_generated = FnvHashSet();
-    let mut predicted = FnvHashSet();
-    let mut generated = FnvHashSet();
-
-    for (item, item_state) in trans_items.iter() {
-        let item_key = item.to_string(&ccx);
-
-        match *item_state {
-            TransItemState::PredictedAndGenerated => {
-                predicted.insert(item_key.clone());
-                generated.insert(item_key);
-            }
-            TransItemState::PredictedButNotGenerated => {
-                predicted_but_not_generated.insert(item_key.clone());
-                predicted.insert(item_key);
-            }
-            TransItemState::NotPredictedButGenerated => {
-                not_predicted_but_generated.insert(item_key.clone());
-                generated.insert(item_key);
-            }
-        }
-    }
-
-    debug!("Total number of translation items predicted: {}", predicted.len());
-    debug!("Total number of translation items generated: {}", generated.len());
-    debug!("Total number of translation items predicted but not generated: {}",
-           predicted_but_not_generated.len());
-    debug!("Total number of translation items not predicted but generated: {}",
-           not_predicted_but_generated.len());
-
-    if generated.len() > 0 {
-        debug!("Failed to predict {}% of translation items",
-               (100 * not_predicted_but_generated.len()) / generated.len());
-    }
-    if generated.len() > 0 {
-        debug!("Predict {}% too many translation items",
-               (100 * predicted_but_not_generated.len()) / generated.len());
-    }
-
-    debug!("");
-    debug!("Not predicted but generated:");
-    debug!("============================");
-    for item in not_predicted_but_generated {
-        debug!(" - {}", item);
-    }
-
-    debug!("");
-    debug!("Predicted but not generated:");
-    debug!("============================");
-    for item in predicted_but_not_generated {
-        debug!(" - {}", item);
-    }
-}
diff --git a/src/librustc_trans/trans/common.rs b/src/librustc_trans/trans/common.rs
deleted file mode 100644 (file)
index 7f7de0e..0000000
+++ /dev/null
@@ -1,1355 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_camel_case_types, non_snake_case)]
-
-//! Code that is useful in various trans modules.
-
-pub use self::ExprOrMethodCall::*;
-
-use session::Session;
-use llvm;
-use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
-use llvm::{True, False, Bool, OperandBundleDef};
-use middle::cfg;
-use middle::def::Def;
-use middle::def_id::DefId;
-use middle::infer;
-use middle::lang_items::LangItem;
-use middle::subst::{self, Substs};
-use trans::base;
-use trans::build;
-use trans::builder::Builder;
-use trans::callee;
-use trans::cleanup;
-use trans::consts;
-use trans::datum;
-use trans::debuginfo::{self, DebugLoc};
-use trans::declare;
-use trans::machine;
-use trans::monomorphize;
-use trans::type_::Type;
-use trans::type_of;
-use middle::traits;
-use middle::ty::{self, Ty};
-use middle::ty::fold::{TypeFolder, TypeFoldable};
-use rustc_front::hir;
-use rustc::mir::repr::Mir;
-use util::nodemap::{FnvHashMap, NodeMap};
-
-use arena::TypedArena;
-use libc::{c_uint, c_char};
-use std::ops::Deref;
-use std::ffi::CString;
-use std::cell::{Cell, RefCell};
-use std::vec::Vec;
-use syntax::ast;
-use syntax::codemap::{DUMMY_SP, Span};
-use syntax::parse::token::InternedString;
-use syntax::parse::token;
-
-pub use trans::context::CrateContext;
-
-/// Is the type's representation size known at compile time?
-pub fn type_is_sized<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
-    ty.is_sized(&tcx.empty_parameter_environment(), DUMMY_SP)
-}
-
-pub fn type_is_fat_ptr<'tcx>(cx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
-    match ty.sty {
-        ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
-        ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
-        ty::TyBox(ty) => {
-            !type_is_sized(cx, ty)
-        }
-        _ => {
-            false
-        }
-    }
-}
-
-fn type_is_newtype_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
-    match ty.sty {
-        ty::TyStruct(def, substs) => {
-            let fields = &def.struct_variant().fields;
-            fields.len() == 1 && {
-                type_is_immediate(ccx, monomorphize::field_ty(ccx.tcx(), substs, &fields[0]))
-            }
-        }
-        _ => false
-    }
-}
-
-pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
-    use trans::machine::llsize_of_alloc;
-    use trans::type_of::sizing_type_of;
-
-    let tcx = ccx.tcx();
-    let simple = ty.is_scalar() ||
-        ty.is_unique() || ty.is_region_ptr() ||
-        type_is_newtype_immediate(ccx, ty) ||
-        ty.is_simd();
-    if simple && !type_is_fat_ptr(tcx, ty) {
-        return true;
-    }
-    if !type_is_sized(tcx, ty) {
-        return false;
-    }
-    match ty.sty {
-        ty::TyStruct(..) | ty::TyEnum(..) | ty::TyTuple(..) | ty::TyArray(_, _) |
-        ty::TyClosure(..) => {
-            let llty = sizing_type_of(ccx, ty);
-            llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type())
-        }
-        _ => type_is_zero_size(ccx, ty)
-    }
-}
-
-/// Identify types which have size zero at runtime.
-pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
-    use trans::machine::llsize_of_alloc;
-    use trans::type_of::sizing_type_of;
-    let llty = sizing_type_of(ccx, ty);
-    llsize_of_alloc(ccx, llty) == 0
-}
-
-/// Identifies types which we declare to be equivalent to `void` in C for the purpose of function
-/// return types. These are `()`, bot, uninhabited enums and all other zero-sized types.
-pub fn return_type_is_void<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
-    ty.is_nil() || ty.is_empty(ccx.tcx()) || type_is_zero_size(ccx, ty)
-}
-
-/// Generates a unique symbol based off the name given. This is used to create
-/// unique symbols for things like closures.
-pub fn gensym_name(name: &str) -> ast::Name {
-    let num = token::gensym(name).0;
-    // use one colon which will get translated to a period by the mangler, and
-    // we're guaranteed that `num` is globally unique for this crate.
-    token::gensym(&format!("{}:{}", name, num))
-}
-
-/*
-* A note on nomenclature of linking: "extern", "foreign", and "upcall".
-*
-* An "extern" is an LLVM symbol we wind up emitting an undefined external
-* reference to. This means "we don't have the thing in this compilation unit,
-* please make sure you link it in at runtime". This could be a reference to
-* C code found in a C library, or rust code found in a rust crate.
-*
-* Most "externs" are implicitly declared (automatically) as a result of a
-* user declaring an extern _module_ dependency; this causes the rust driver
-* to locate an extern crate, scan its compilation metadata, and emit extern
-* declarations for any symbols used by the declaring crate.
-*
-* A "foreign" is an extern that references C (or other non-rust ABI) code.
-* There is no metadata to scan for extern references so in these cases either
-* a header-digester like bindgen, or manual function prototypes, have to
-* serve as declarators. So these are usually given explicitly as prototype
-* declarations, in rust code, with ABI attributes on them noting which ABI to
-* link via.
-*
-* An "upcall" is a foreign call generated by the compiler (not corresponding
-* to any user-written call in the code) into the runtime library, to perform
-* some helper task such as bringing a task to life, allocating memory, etc.
-*
-*/
-
-use trans::Disr;
-
-#[derive(Copy, Clone)]
-pub struct NodeIdAndSpan {
-    pub id: ast::NodeId,
-    pub span: Span,
-}
-
-pub fn expr_info(expr: &hir::Expr) -> NodeIdAndSpan {
-    NodeIdAndSpan { id: expr.id, span: expr.span }
-}
-
-/// The concrete version of ty::FieldDef. The name is the field index if
-/// the field is numeric.
-pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>);
-
-/// The concrete version of ty::VariantDef
-pub struct VariantInfo<'tcx> {
-    pub discr: Disr,
-    pub fields: Vec<Field<'tcx>>
-}
-
-impl<'tcx> VariantInfo<'tcx> {
-    pub fn from_ty(tcx: &ty::ctxt<'tcx>,
-                   ty: Ty<'tcx>,
-                   opt_def: Option<Def>)
-                   -> Self
-    {
-        match ty.sty {
-            ty::TyStruct(adt, substs) | ty::TyEnum(adt, substs) => {
-                let variant = match opt_def {
-                    None => adt.struct_variant(),
-                    Some(def) => adt.variant_of_def(def)
-                };
-
-                VariantInfo {
-                    discr: Disr::from(variant.disr_val),
-                    fields: variant.fields.iter().map(|f| {
-                        Field(f.name, monomorphize::field_ty(tcx, substs, f))
-                    }).collect()
-                }
-            }
-
-            ty::TyTuple(ref v) => {
-                VariantInfo {
-                    discr: Disr(0),
-                    fields: v.iter().enumerate().map(|(i, &t)| {
-                        Field(token::intern(&i.to_string()), t)
-                    }).collect()
-                }
-            }
-
-            _ => {
-                tcx.sess.bug(&format!(
-                    "cannot get field types from the type {:?}",
-                    ty));
-            }
-        }
-    }
-
-    /// Return the variant corresponding to a given node (e.g. expr)
-    pub fn of_node(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self {
-        let node_def = tcx.def_map.borrow().get(&id).map(|v| v.full_def());
-        Self::from_ty(tcx, ty, node_def)
-    }
-
-    pub fn field_index(&self, name: ast::Name) -> usize {
-        self.fields.iter().position(|&Field(n,_)| n == name).unwrap_or_else(|| {
-            panic!("unknown field `{}`", name)
-        })
-    }
-}
-
-pub struct BuilderRef_res {
-    pub b: BuilderRef,
-}
-
-impl Drop for BuilderRef_res {
-    fn drop(&mut self) {
-        unsafe {
-            llvm::LLVMDisposeBuilder(self.b);
-        }
-    }
-}
-
-pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res {
-    BuilderRef_res {
-        b: b
-    }
-}
-
-pub type ExternMap = FnvHashMap<String, ValueRef>;
-
-pub fn validate_substs(substs: &Substs) {
-    assert!(!substs.types.needs_infer());
-}
-
-// work around bizarre resolve errors
-type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
-pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>;
-
-#[derive(Clone, Debug)]
-struct HintEntry<'tcx> {
-    // The datum for the dropflag-hint itself; note that many
-    // source-level Lvalues will be associated with the same
-    // dropflag-hint datum.
-    datum: cleanup::DropHintDatum<'tcx>,
-}
-
-pub struct DropFlagHintsMap<'tcx> {
-    // Maps NodeId for expressions that read/write unfragmented state
-    // to that state's drop-flag "hint."  (A stack-local hint
-    // indicates either that (1.) it is certain that no-drop is
-    // needed, or (2.)  inline drop-flag must be consulted.)
-    node_map: NodeMap<HintEntry<'tcx>>,
-}
-
-impl<'tcx> DropFlagHintsMap<'tcx> {
-    pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } }
-    pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) }
-    pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) {
-        self.node_map.insert(id, HintEntry { datum: datum });
-    }
-    pub fn hint_datum(&self, id: ast::NodeId) -> Option<cleanup::DropHintDatum<'tcx>> {
-        self.node_map.get(&id).map(|t|t.datum)
-    }
-}
-
-// Function context.  Every LLVM function we create will have one of
-// these.
-pub struct FunctionContext<'a, 'tcx: 'a> {
-    // The MIR for this function. At present, this is optional because
-    // we only have MIR available for things that are local to the
-    // crate.
-    pub mir: Option<&'a Mir<'tcx>>,
-
-    // The ValueRef returned from a call to llvm::LLVMAddFunction; the
-    // address of the first instruction in the sequence of
-    // instructions for this function that will go in the .text
-    // section of the executable we're generating.
-    pub llfn: ValueRef,
-
-    // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
-    pub param_env: ty::ParameterEnvironment<'a, 'tcx>,
-
-    // The environment argument in a closure.
-    pub llenv: Option<ValueRef>,
-
-    // A pointer to where to store the return value. If the return type is
-    // immediate, this points to an alloca in the function. Otherwise, it's a
-    // pointer to the hidden first parameter of the function. After function
-    // construction, this should always be Some.
-    pub llretslotptr: Cell<Option<ValueRef>>,
-
-    // These pub elements: "hoisted basic blocks" containing
-    // administrative activities that have to happen in only one place in
-    // the function, due to LLVM's quirks.
-    // A marker for the place where we want to insert the function's static
-    // allocas, so that LLVM will coalesce them into a single alloca call.
-    pub alloca_insert_pt: Cell<Option<ValueRef>>,
-    pub llreturn: Cell<Option<BasicBlockRef>>,
-
-    // If the function has any nested return's, including something like:
-    // fn foo() -> Option<Foo> { Some(Foo { x: return None }) }, then
-    // we use a separate alloca for each return
-    pub needs_ret_allocas: bool,
-
-    // When working with landingpad-based exceptions this value is alloca'd and
-    // later loaded when using the resume instruction. This ends up being
-    // critical to chaining landing pads and resuing already-translated
-    // cleanups.
-    //
-    // Note that for cleanuppad-based exceptions this is not used.
-    pub landingpad_alloca: Cell<Option<ValueRef>>,
-
-    // True if the caller expects this fn to use the out pointer to
-    // return. Either way, your code should write into the slot llretslotptr
-    // points to, but if this value is false, that slot will be a local alloca.
-    pub caller_expects_out_pointer: bool,
-
-    // Maps the DefId's for local variables to the allocas created for
-    // them in llallocas.
-    pub lllocals: RefCell<NodeMap<LvalueDatum<'tcx>>>,
-
-    // Same as above, but for closure upvars
-    pub llupvars: RefCell<NodeMap<ValueRef>>,
-
-    // Carries info about drop-flags for local bindings (longer term,
-    // paths) for the code being compiled.
-    pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>,
-
-    // The NodeId of the function, or -1 if it doesn't correspond to
-    // a user-defined function.
-    pub id: ast::NodeId,
-
-    // If this function is being monomorphized, this contains the type
-    // substitutions used.
-    pub param_substs: &'tcx Substs<'tcx>,
-
-    // The source span and nesting context where this function comes from, for
-    // error reporting and symbol generation.
-    pub span: Option<Span>,
-
-    // The arena that blocks are allocated from.
-    pub block_arena: &'a TypedArena<BlockS<'a, 'tcx>>,
-
-    // The arena that landing pads are allocated from.
-    pub lpad_arena: TypedArena<LandingPad>,
-
-    // This function's enclosing crate context.
-    pub ccx: &'a CrateContext<'a, 'tcx>,
-
-    // Used and maintained by the debuginfo module.
-    pub debug_context: debuginfo::FunctionDebugContext,
-
-    // Cleanup scopes.
-    pub scopes: RefCell<Vec<cleanup::CleanupScope<'a, 'tcx>>>,
-
-    pub cfg: Option<cfg::CFG>,
-}
-
-impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
-    pub fn mir(&self) -> &'a Mir<'tcx> {
-        self.mir.unwrap()
-    }
-
-    pub fn arg_offset(&self) -> usize {
-        self.env_arg_pos() + if self.llenv.is_some() { 1 } else { 0 }
-    }
-
-    pub fn env_arg_pos(&self) -> usize {
-        if self.caller_expects_out_pointer {
-            1
-        } else {
-            0
-        }
-    }
-
-    pub fn cleanup(&self) {
-        unsafe {
-            llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt
-                                                     .get()
-                                                     .unwrap());
-        }
-    }
-
-    pub fn get_llreturn(&self) -> BasicBlockRef {
-        if self.llreturn.get().is_none() {
-
-            self.llreturn.set(Some(unsafe {
-                llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn,
-                                                    "return\0".as_ptr() as *const _)
-            }))
-        }
-
-        self.llreturn.get().unwrap()
-    }
-
-    pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>,
-                        output: ty::FnOutput<'tcx>,
-                        name: &str) -> ValueRef {
-        if self.needs_ret_allocas {
-            base::alloca(bcx, match output {
-                ty::FnConverging(output_type) => type_of::type_of(bcx.ccx(), output_type),
-                ty::FnDiverging => Type::void(bcx.ccx())
-            }, name)
-        } else {
-            self.llretslotptr.get().unwrap()
-        }
-    }
-
-    pub fn new_block(&'a self,
-                     name: &str,
-                     opt_node_id: Option<ast::NodeId>)
-                     -> Block<'a, 'tcx> {
-        unsafe {
-            let name = CString::new(name).unwrap();
-            let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
-                                                           self.llfn,
-                                                           name.as_ptr());
-            BlockS::new(llbb, opt_node_id, self)
-        }
-    }
-
-    pub fn new_id_block(&'a self,
-                        name: &str,
-                        node_id: ast::NodeId)
-                        -> Block<'a, 'tcx> {
-        self.new_block(name, Some(node_id))
-    }
-
-    pub fn new_temp_block(&'a self,
-                          name: &str)
-                          -> Block<'a, 'tcx> {
-        self.new_block(name, None)
-    }
-
-    pub fn join_blocks(&'a self,
-                       id: ast::NodeId,
-                       in_cxs: &[Block<'a, 'tcx>])
-                       -> Block<'a, 'tcx> {
-        let out = self.new_id_block("join", id);
-        let mut reachable = false;
-        for bcx in in_cxs {
-            if !bcx.unreachable.get() {
-                build::Br(*bcx, out.llbb, DebugLoc::None);
-                reachable = true;
-            }
-        }
-        if !reachable {
-            build::Unreachable(out);
-        }
-        return out;
-    }
-
-    pub fn monomorphize<T>(&self, value: &T) -> T
-        where T : TypeFoldable<'tcx>
-    {
-        monomorphize::apply_param_substs(self.ccx.tcx(),
-                                         self.param_substs,
-                                         value)
-    }
-
-    /// This is the same as `common::type_needs_drop`, except that it
-    /// may use or update caches within this `FunctionContext`.
-    pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
-        self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env)
-    }
-
-    pub fn eh_personality(&self) -> ValueRef {
-        // The exception handling personality function.
-        //
-        // If our compilation unit has the `eh_personality` lang item somewhere
-        // within it, then we just need to translate that. Otherwise, we're
-        // building an rlib which will depend on some upstream implementation of
-        // this function, so we just codegen a generic reference to it. We don't
-        // specify any of the types for the function, we just make it a symbol
-        // that LLVM can later use.
-        //
-        // Note that MSVC is a little special here in that we don't use the
-        // `eh_personality` lang item at all. Currently LLVM has support for
-        // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
-        // *name of the personality function* to decide what kind of unwind side
-        // tables/landing pads to emit. It looks like Dwarf is used by default,
-        // injecting a dependency on the `_Unwind_Resume` symbol for resuming
-        // an "exception", but for MSVC we want to force SEH. This means that we
-        // can't actually have the personality function be our standard
-        // `rust_eh_personality` function, but rather we wired it up to the
-        // CRT's custom personality function, which forces LLVM to consider
-        // landing pads as "landing pads for SEH".
-        let target = &self.ccx.sess().target.target;
-        match self.ccx.tcx().lang_items.eh_personality() {
-            Some(def_id) if !base::wants_msvc_seh(self.ccx.sess()) => {
-                callee::trans_fn_ref(self.ccx, def_id, ExprId(0),
-                                     self.param_substs).val
-            }
-            _ => {
-                let mut personality = self.ccx.eh_personality().borrow_mut();
-                match *personality {
-                    Some(llpersonality) => llpersonality,
-                    None => {
-                        let name = if !base::wants_msvc_seh(self.ccx.sess()) {
-                            "rust_eh_personality"
-                        } else if target.arch == "x86" {
-                            "_except_handler3"
-                        } else {
-                            "__C_specific_handler"
-                        };
-                        let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
-                        let f = declare::declare_cfn(self.ccx, name, fty,
-                                                     self.ccx.tcx().types.i32);
-                        *personality = Some(f);
-                        f
-                    }
-                }
-            }
-        }
-    }
-
-    // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined,
-    // otherwise declares it as an external function.
-    pub fn eh_unwind_resume(&self) -> ValueRef {
-        use trans::attributes;
-        assert!(self.ccx.sess().target.target.options.custom_unwind_resume);
-        match self.ccx.tcx().lang_items.eh_unwind_resume() {
-            Some(def_id) => {
-                callee::trans_fn_ref(self.ccx, def_id, ExprId(0),
-                                     self.param_substs).val
-            }
-            None => {
-                let mut unwresume = self.ccx.eh_unwind_resume().borrow_mut();
-                match *unwresume {
-                    Some(llfn) => llfn,
-                    None => {
-                        let fty = Type::func(&[Type::i8p(self.ccx)], &Type::void(self.ccx));
-                        let llfn = declare::declare_fn(self.ccx,
-                                                       "rust_eh_unwind_resume",
-                                                       llvm::CCallConv,
-                                                       fty, ty::FnDiverging);
-                        attributes::unwind(llfn, true);
-                        *unwresume = Some(llfn);
-                        llfn
-                    }
-                }
-            }
-        }
-    }
-}
-
-// Basic block context.  We create a block context for each basic block
-// (single-entry, single-exit sequence of instructions) we generate from Rust
-// code.  Each basic block we generate is attached to a function, typically
-// with many basic blocks per function.  All the basic blocks attached to a
-// function are organized as a directed graph.
-pub struct BlockS<'blk, 'tcx: 'blk> {
-    // The BasicBlockRef returned from a call to
-    // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
-    // block to the function pointed to by llfn.  We insert
-    // instructions into that block by way of this block context.
-    // The block pointing to this one in the function's digraph.
-    pub llbb: BasicBlockRef,
-    pub terminated: Cell<bool>,
-    pub unreachable: Cell<bool>,
-
-    // If this block part of a landing pad, then this is `Some` indicating what
-    // kind of landing pad its in, otherwise this is none.
-    pub lpad: Cell<Option<&'blk LandingPad>>,
-
-    // AST node-id associated with this block, if any. Used for
-    // debugging purposes only.
-    pub opt_node_id: Option<ast::NodeId>,
-
-    // The function context for the function to which this block is
-    // attached.
-    pub fcx: &'blk FunctionContext<'blk, 'tcx>,
-}
-
-pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>;
-
-impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
-    pub fn new(llbb: BasicBlockRef,
-               opt_node_id: Option<ast::NodeId>,
-               fcx: &'blk FunctionContext<'blk, 'tcx>)
-               -> Block<'blk, 'tcx> {
-        fcx.block_arena.alloc(BlockS {
-            llbb: llbb,
-            terminated: Cell::new(false),
-            unreachable: Cell::new(false),
-            lpad: Cell::new(None),
-            opt_node_id: opt_node_id,
-            fcx: fcx
-        })
-    }
-
-    pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
-        self.fcx.ccx
-    }
-    pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
-        self.fcx
-    }
-    pub fn tcx(&self) -> &'blk ty::ctxt<'tcx> {
-        self.fcx.ccx.tcx()
-    }
-    pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() }
-
-    pub fn lpad(&self) -> Option<&'blk LandingPad> {
-        self.lpad.get()
-    }
-
-    pub fn mir(&self) -> &'blk Mir<'tcx> {
-        self.fcx.mir()
-    }
-
-    pub fn name(&self, name: ast::Name) -> String {
-        name.to_string()
-    }
-
-    pub fn node_id_to_string(&self, id: ast::NodeId) -> String {
-        self.tcx().map.node_to_string(id).to_string()
-    }
-
-    pub fn def(&self, nid: ast::NodeId) -> Def {
-        match self.tcx().def_map.borrow().get(&nid) {
-            Some(v) => v.full_def(),
-            None => {
-                self.tcx().sess.bug(&format!(
-                    "no def associated with node id {}", nid));
-            }
-        }
-    }
-
-    pub fn val_to_string(&self, val: ValueRef) -> String {
-        self.ccx().tn().val_to_string(val)
-    }
-
-    pub fn llty_str(&self, ty: Type) -> String {
-        self.ccx().tn().type_to_string(ty)
-    }
-
-    pub fn to_str(&self) -> String {
-        format!("[block {:p}]", self)
-    }
-
-    pub fn monomorphize<T>(&self, value: &T) -> T
-        where T : TypeFoldable<'tcx>
-    {
-        monomorphize::apply_param_substs(self.tcx(),
-                                         self.fcx.param_substs,
-                                         value)
-    }
-
-    pub fn build(&'blk self) -> BlockAndBuilder<'blk, 'tcx> {
-        BlockAndBuilder::new(self, OwnedBuilder::new_with_ccx(self.ccx()))
-    }
-}
-
-pub struct OwnedBuilder<'blk, 'tcx: 'blk> {
-    builder: Builder<'blk, 'tcx>
-}
-
-impl<'blk, 'tcx> OwnedBuilder<'blk, 'tcx> {
-    pub fn new_with_ccx(ccx: &'blk CrateContext<'blk, 'tcx>) -> Self {
-        // Create a fresh builder from the crate context.
-        let llbuilder = unsafe {
-            llvm::LLVMCreateBuilderInContext(ccx.llcx())
-        };
-        OwnedBuilder {
-            builder: Builder {
-                llbuilder: llbuilder,
-                ccx: ccx,
-            }
-        }
-    }
-}
-
-impl<'blk, 'tcx> Drop for OwnedBuilder<'blk, 'tcx> {
-    fn drop(&mut self) {
-        unsafe {
-            llvm::LLVMDisposeBuilder(self.builder.llbuilder);
-        }
-    }
-}
-
-pub struct BlockAndBuilder<'blk, 'tcx: 'blk> {
-    bcx: Block<'blk, 'tcx>,
-    owned_builder: OwnedBuilder<'blk, 'tcx>,
-}
-
-impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> {
-    pub fn new(bcx: Block<'blk, 'tcx>, owned_builder: OwnedBuilder<'blk, 'tcx>) -> Self {
-        // Set the builder's position to this block's end.
-        owned_builder.builder.position_at_end(bcx.llbb);
-        BlockAndBuilder {
-            bcx: bcx,
-            owned_builder: owned_builder,
-        }
-    }
-
-    pub fn with_block<F, R>(&self, f: F) -> R
-        where F: FnOnce(Block<'blk, 'tcx>) -> R
-    {
-        let result = f(self.bcx);
-        self.position_at_end(self.bcx.llbb);
-        result
-    }
-
-    pub fn map_block<F>(self, f: F) -> Self
-        where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
-    {
-        let BlockAndBuilder { bcx, owned_builder } = self;
-        let bcx = f(bcx);
-        BlockAndBuilder::new(bcx, owned_builder)
-    }
-
-    // Methods delegated to bcx
-
-    pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
-        self.bcx.ccx()
-    }
-    pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
-        self.bcx.fcx()
-    }
-    pub fn tcx(&self) -> &'blk ty::ctxt<'tcx> {
-        self.bcx.tcx()
-    }
-    pub fn sess(&self) -> &'blk Session {
-        self.bcx.sess()
-    }
-
-    pub fn llbb(&self) -> BasicBlockRef {
-        self.bcx.llbb
-    }
-
-    pub fn mir(&self) -> &'blk Mir<'tcx> {
-        self.bcx.mir()
-    }
-
-    pub fn val_to_string(&self, val: ValueRef) -> String {
-        self.bcx.val_to_string(val)
-    }
-
-    pub fn monomorphize<T>(&self, value: &T) -> T
-        where T: TypeFoldable<'tcx>
-    {
-        self.bcx.monomorphize(value)
-    }
-
-    pub fn set_lpad(&self, lpad: Option<LandingPad>) {
-        self.bcx.lpad.set(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p)))
-    }
-}
-
-impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> {
-    type Target = Builder<'blk, 'tcx>;
-    fn deref(&self) -> &Self::Target {
-        &self.owned_builder.builder
-    }
-}
-
-/// A structure representing an active landing pad for the duration of a basic
-/// block.
-///
-/// Each `Block` may contain an instance of this, indicating whether the block
-/// is part of a landing pad or not. This is used to make decision about whether
-/// to emit `invoke` instructions (e.g. in a landing pad we don't continue to
-/// use `invoke`) and also about various function call metadata.
-///
-/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
-/// just a bunch of `None` instances (not too interesting), but for MSVC
-/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
-/// When inside of a landing pad, each function call in LLVM IR needs to be
-/// annotated with which landing pad it's a part of. This is accomplished via
-/// the `OperandBundleDef` value created for MSVC landing pads.
-pub struct LandingPad {
-    cleanuppad: Option<ValueRef>,
-    operand: Option<OperandBundleDef>,
-}
-
-impl LandingPad {
-    pub fn gnu() -> LandingPad {
-        LandingPad { cleanuppad: None, operand: None }
-    }
-
-    pub fn msvc(cleanuppad: ValueRef) -> LandingPad {
-        LandingPad {
-            cleanuppad: Some(cleanuppad),
-            operand: Some(OperandBundleDef::new("funclet", &[cleanuppad])),
-        }
-    }
-
-    pub fn bundle(&self) -> Option<&OperandBundleDef> {
-        self.operand.as_ref()
-    }
-}
-
-impl Clone for LandingPad {
-    fn clone(&self) -> LandingPad {
-        LandingPad {
-            cleanuppad: self.cleanuppad,
-            operand: self.cleanuppad.map(|p| {
-                OperandBundleDef::new("funclet", &[p])
-            }),
-        }
-    }
-}
-
-pub struct Result<'blk, 'tcx: 'blk> {
-    pub bcx: Block<'blk, 'tcx>,
-    pub val: ValueRef
-}
-
-impl<'b, 'tcx> Result<'b, 'tcx> {
-    pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> {
-        Result {
-            bcx: bcx,
-            val: val,
-        }
-    }
-}
-
-pub fn val_ty(v: ValueRef) -> Type {
-    unsafe {
-        Type::from_ref(llvm::LLVMTypeOf(v))
-    }
-}
-
-// LLVM constant constructors.
-pub fn C_null(t: Type) -> ValueRef {
-    unsafe {
-        llvm::LLVMConstNull(t.to_ref())
-    }
-}
-
-pub fn C_undef(t: Type) -> ValueRef {
-    unsafe {
-        llvm::LLVMGetUndef(t.to_ref())
-    }
-}
-
-pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef {
-    unsafe {
-        llvm::LLVMConstInt(t.to_ref(), u, sign_extend as Bool)
-    }
-}
-
-pub fn C_floating(s: &str, t: Type) -> ValueRef {
-    unsafe {
-        let s = CString::new(s).unwrap();
-        llvm::LLVMConstRealOfString(t.to_ref(), s.as_ptr())
-    }
-}
-
-pub fn C_floating_f64(f: f64, t: Type) -> ValueRef {
-    unsafe {
-        llvm::LLVMConstReal(t.to_ref(), f)
-    }
-}
-
-pub fn C_nil(ccx: &CrateContext) -> ValueRef {
-    C_struct(ccx, &[], false)
-}
-
-pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef {
-    C_integral(Type::i1(ccx), val as u64, false)
-}
-
-pub fn C_i32(ccx: &CrateContext, i: i32) -> ValueRef {
-    C_integral(Type::i32(ccx), i as u64, true)
-}
-
-pub fn C_u32(ccx: &CrateContext, i: u32) -> ValueRef {
-    C_integral(Type::i32(ccx), i as u64, false)
-}
-
-pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef {
-    C_integral(Type::i64(ccx), i, false)
-}
-
-pub fn C_int<I: AsI64>(ccx: &CrateContext, i: I) -> ValueRef {
-    let v = i.as_i64();
-
-    let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type());
-
-    if bit_size < 64 {
-        // make sure it doesn't overflow
-        assert!(v < (1<<(bit_size-1)) && v >= -(1<<(bit_size-1)));
-    }
-
-    C_integral(ccx.int_type(), v as u64, true)
-}
-
-pub fn C_uint<I: AsU64>(ccx: &CrateContext, i: I) -> ValueRef {
-    let v = i.as_u64();
-
-    let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type());
-
-    if bit_size < 64 {
-        // make sure it doesn't overflow
-        assert!(v < (1<<bit_size));
-    }
-
-    C_integral(ccx.int_type(), v, false)
-}
-
-pub trait AsI64 { fn as_i64(self) -> i64; }
-pub trait AsU64 { fn as_u64(self) -> u64; }
-
-// FIXME: remove the intptr conversions, because they
-// are host-architecture-dependent
-impl AsI64 for i64 { fn as_i64(self) -> i64 { self as i64 }}
-impl AsI64 for i32 { fn as_i64(self) -> i64 { self as i64 }}
-impl AsI64 for isize { fn as_i64(self) -> i64 { self as i64 }}
-
-impl AsU64 for u64  { fn as_u64(self) -> u64 { self as u64 }}
-impl AsU64 for u32  { fn as_u64(self) -> u64 { self as u64 }}
-impl AsU64 for usize { fn as_u64(self) -> u64 { self as u64 }}
-
-pub fn C_u8(ccx: &CrateContext, i: u8) -> ValueRef {
-    C_integral(Type::i8(ccx), i as u64, false)
-}
-
-
-// This is a 'c-like' raw string, which differs from
-// our boxed-and-length-annotated strings.
-pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef {
-    unsafe {
-        if let Some(&llval) = cx.const_cstr_cache().borrow().get(&s) {
-            return llval;
-        }
-
-        let sc = llvm::LLVMConstStringInContext(cx.llcx(),
-                                                s.as_ptr() as *const c_char,
-                                                s.len() as c_uint,
-                                                !null_terminated as Bool);
-
-        let gsym = token::gensym("str");
-        let sym = format!("str{}", gsym.0);
-        let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{
-            cx.sess().bug(&format!("symbol `{}` is already defined", sym));
-        });
-        llvm::LLVMSetInitializer(g, sc);
-        llvm::LLVMSetGlobalConstant(g, True);
-        llvm::SetLinkage(g, llvm::InternalLinkage);
-
-        cx.const_cstr_cache().borrow_mut().insert(s, g);
-        g
-    }
-}
-
-// NB: Do not use `do_spill_noroot` to make this into a constant string, or
-// you will be kicked off fast isel. See issue #4352 for an example of this.
-pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
-    let len = s.len();
-    let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx));
-    C_named_struct(cx.tn().find_type("str_slice").unwrap(), &[cs, C_uint(cx, len)])
-}
-
-pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
-    C_struct_in_context(cx.llcx(), elts, packed)
-}
-
-pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef {
-    unsafe {
-        llvm::LLVMConstStructInContext(llcx,
-                                       elts.as_ptr(), elts.len() as c_uint,
-                                       packed as Bool)
-    }
-}
-
-pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef {
-    unsafe {
-        llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint)
-    }
-}
-
-pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef {
-    unsafe {
-        return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint);
-    }
-}
-
-pub fn C_vector(elts: &[ValueRef]) -> ValueRef {
-    unsafe {
-        return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
-    }
-}
-
-pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef {
-    C_bytes_in_context(cx.llcx(), bytes)
-}
-
-pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
-    unsafe {
-        let ptr = bytes.as_ptr() as *const c_char;
-        return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
-    }
-}
-
-pub fn const_get_elt(cx: &CrateContext, v: ValueRef, us: &[c_uint])
-              -> ValueRef {
-    unsafe {
-        let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
-
-        debug!("const_get_elt(v={}, us={:?}, r={})",
-               cx.tn().val_to_string(v), us, cx.tn().val_to_string(r));
-
-        return r;
-    }
-}
-
-pub fn const_to_int(v: ValueRef) -> i64 {
-    unsafe {
-        llvm::LLVMConstIntGetSExtValue(v)
-    }
-}
-
-pub fn const_to_uint(v: ValueRef) -> u64 {
-    unsafe {
-        llvm::LLVMConstIntGetZExtValue(v)
-    }
-}
-
-fn is_const_integral(v: ValueRef) -> bool {
-    unsafe {
-        !llvm::LLVMIsAConstantInt(v).is_null()
-    }
-}
-
-pub fn const_to_opt_int(v: ValueRef) -> Option<i64> {
-    unsafe {
-        if is_const_integral(v) {
-            Some(llvm::LLVMConstIntGetSExtValue(v))
-        } else {
-            None
-        }
-    }
-}
-
-pub fn const_to_opt_uint(v: ValueRef) -> Option<u64> {
-    unsafe {
-        if is_const_integral(v) {
-            Some(llvm::LLVMConstIntGetZExtValue(v))
-        } else {
-            None
-        }
-    }
-}
-
-pub fn is_undef(val: ValueRef) -> bool {
-    unsafe {
-        llvm::LLVMIsUndef(val) != False
-    }
-}
-
-#[allow(dead_code)] // potentially useful
-pub fn is_null(val: ValueRef) -> bool {
-    unsafe {
-        llvm::LLVMIsNull(val) != False
-    }
-}
-
-pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
-    bcx.fcx.monomorphize(&t)
-}
-
-pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> {
-    let tcx = bcx.tcx();
-    let t = tcx.node_id_to_type(id);
-    monomorphize_type(bcx, t)
-}
-
-pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
-    node_id_type(bcx, ex.id)
-}
-
-pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
-    monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex))
-}
-
-/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
-/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
-/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
-pub fn fulfill_obligation<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                    span: Span,
-                                    trait_ref: ty::PolyTraitRef<'tcx>)
-                                    -> traits::Vtable<'tcx, ()>
-{
-    let tcx = ccx.tcx();
-
-    // Remove any references to regions; this helps improve caching.
-    let trait_ref = tcx.erase_regions(&trait_ref);
-
-    // First check the cache.
-    match ccx.trait_cache().borrow().get(&trait_ref) {
-        Some(vtable) => {
-            info!("Cache hit: {:?}", trait_ref);
-            return (*vtable).clone();
-        }
-        None => { }
-    }
-
-    debug!("trans fulfill_obligation: trait_ref={:?} def_id={:?}",
-           trait_ref, trait_ref.def_id());
-
-
-    // Do the initial selection for the obligation. This yields the
-    // shallow result we are looking for -- that is, what specific impl.
-    let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
-    let mut selcx = traits::SelectionContext::new(&infcx);
-
-    let obligation =
-        traits::Obligation::new(traits::ObligationCause::misc(span, ast::DUMMY_NODE_ID),
-                                trait_ref.to_poly_trait_predicate());
-    let selection = match selcx.select(&obligation) {
-        Ok(Some(selection)) => selection,
-        Ok(None) => {
-            // Ambiguity can happen when monomorphizing during trans
-            // expands to some humongo type that never occurred
-            // statically -- this humongo type can then overflow,
-            // leading to an ambiguous result. So report this as an
-            // overflow bug, since I believe this is the only case
-            // where ambiguity can result.
-            debug!("Encountered ambiguity selecting `{:?}` during trans, \
-                    presuming due to overflow",
-                   trait_ref);
-            ccx.sess().span_fatal(
-                span,
-                "reached the recursion limit during monomorphization (selection ambiguity)");
-        }
-        Err(e) => {
-            tcx.sess.span_bug(
-                span,
-                &format!("Encountered error `{:?}` selecting `{:?}` during trans",
-                        e,
-                        trait_ref))
-        }
-    };
-
-    // Currently, we use a fulfillment context to completely resolve
-    // all nested obligations. This is because they can inform the
-    // inference of the impl's type parameters.
-    let mut fulfill_cx = traits::FulfillmentContext::new();
-    let vtable = selection.map(|predicate| {
-        fulfill_cx.register_predicate_obligation(&infcx, predicate);
-    });
-    let vtable = infer::drain_fulfillment_cx_or_panic(
-        span, &infcx, &mut fulfill_cx, &vtable
-    );
-
-    info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
-
-    ccx.trait_cache().borrow_mut().insert(trait_ref, vtable.clone());
-
-    vtable
-}
-
-/// Normalizes the predicates and checks whether they hold.  If this
-/// returns false, then either normalize encountered an error or one
-/// of the predicates did not hold. Used when creating vtables to
-/// check for unsatisfiable methods.
-pub fn normalize_and_test_predicates<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                               predicates: Vec<ty::Predicate<'tcx>>)
-                                               -> bool
-{
-    debug!("normalize_and_test_predicates(predicates={:?})",
-           predicates);
-
-    let tcx = ccx.tcx();
-    let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
-    let mut selcx = traits::SelectionContext::new(&infcx);
-    let mut fulfill_cx = traits::FulfillmentContext::new();
-    let cause = traits::ObligationCause::dummy();
-    let traits::Normalized { value: predicates, obligations } =
-        traits::normalize(&mut selcx, cause.clone(), &predicates);
-    for obligation in obligations {
-        fulfill_cx.register_predicate_obligation(&infcx, obligation);
-    }
-    for predicate in predicates {
-        let obligation = traits::Obligation::new(cause.clone(), predicate);
-        fulfill_cx.register_predicate_obligation(&infcx, obligation);
-    }
-
-    infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()).is_ok()
-}
-
-// Key used to lookup values supplied for type parameters in an expr.
-#[derive(Copy, Clone, PartialEq, Debug)]
-pub enum ExprOrMethodCall {
-    // Type parameters for a path like `None::<int>`
-    ExprId(ast::NodeId),
-
-    // Type parameters for a method call like `a.foo::<int>()`
-    MethodCallKey(ty::MethodCall)
-}
-
-pub fn node_id_substs<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                node: ExprOrMethodCall,
-                                param_substs: &subst::Substs<'tcx>)
-                                -> subst::Substs<'tcx> {
-    let tcx = ccx.tcx();
-
-    let substs = match node {
-        ExprId(id) => {
-            tcx.node_id_item_substs(id).substs
-        }
-        MethodCallKey(method_call) => {
-            tcx.tables.borrow().method_map[&method_call].substs.clone()
-        }
-    };
-
-    if substs.types.needs_infer() {
-        tcx.sess.bug(&format!("type parameters for node {:?} include inference types: {:?}",
-                              node, substs));
-    }
-
-    monomorphize::apply_param_substs(tcx,
-                                     param_substs,
-                                     &substs.erase_regions())
-}
-
-pub fn langcall(bcx: Block,
-                span: Option<Span>,
-                msg: &str,
-                li: LangItem)
-                -> DefId {
-    match bcx.tcx().lang_items.require(li) {
-        Ok(id) => id,
-        Err(s) => {
-            let msg = format!("{} {}", msg, s);
-            match span {
-                Some(span) => bcx.tcx().sess.span_fatal(span, &msg[..]),
-                None => bcx.tcx().sess.fatal(&msg[..]),
-            }
-        }
-    }
-}
-
-/// Return the VariantDef corresponding to an inlined variant node
-pub fn inlined_variant_def<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                     inlined_vid: ast::NodeId)
-                                     -> ty::VariantDef<'tcx>
-{
-
-    let ctor_ty = ccx.tcx().node_id_to_type(inlined_vid);
-    debug!("inlined_variant_def: ctor_ty={:?} inlined_vid={:?}", ctor_ty,
-           inlined_vid);
-    let adt_def = match ctor_ty.sty {
-        ty::TyBareFn(_, &ty::BareFnTy { sig: ty::Binder(ty::FnSig {
-            output: ty::FnConverging(ty), ..
-        }), ..}) => ty,
-        _ => ctor_ty
-    }.ty_adt_def().unwrap();
-    let inlined_vid_def_id = ccx.tcx().map.local_def_id(inlined_vid);
-    adt_def.variants.iter().find(|v| {
-        inlined_vid_def_id == v.did ||
-            ccx.external().borrow().get(&v.did) == Some(&Some(inlined_vid))
-    }).unwrap_or_else(|| {
-        ccx.sess().bug(&format!("no variant for {:?}::{}", adt_def, inlined_vid))
-    })
-}
-
-// To avoid UB from LLVM, these two functions mask RHS with an
-// appropriate mask unconditionally (i.e. the fallback behavior for
-// all shifts). For 32- and 64-bit types, this matches the semantics
-// of Java. (See related discussion on #1877 and #10183.)
-
-pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                          lhs: ValueRef,
-                                          rhs: ValueRef,
-                                          binop_debug_loc: DebugLoc) -> ValueRef {
-    let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs);
-    // #1877, #10183: Ensure that input is always valid
-    let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
-    build::Shl(bcx, lhs, rhs, binop_debug_loc)
-}
-
-pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                          lhs_t: Ty<'tcx>,
-                                          lhs: ValueRef,
-                                          rhs: ValueRef,
-                                          binop_debug_loc: DebugLoc) -> ValueRef {
-    let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
-    // #1877, #10183: Ensure that input is always valid
-    let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
-    let is_signed = lhs_t.is_signed();
-    if is_signed {
-        build::AShr(bcx, lhs, rhs, binop_debug_loc)
-    } else {
-        build::LShr(bcx, lhs, rhs, binop_debug_loc)
-    }
-}
-
-fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              rhs: ValueRef,
-                              debug_loc: DebugLoc) -> ValueRef {
-    let rhs_llty = val_ty(rhs);
-    build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
-}
-
-pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              llty: Type,
-                              mask_llty: Type,
-                              invert: bool) -> ValueRef {
-    let kind = llty.kind();
-    match kind {
-        TypeKind::Integer => {
-            // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
-            let val = llty.int_width() - 1;
-            if invert {
-                C_integral(mask_llty, !val, true)
-            } else {
-                C_integral(mask_llty, val, false)
-            }
-        },
-        TypeKind::Vector => {
-            let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
-            build::VectorSplat(bcx, mask_llty.vector_length(), mask)
-        },
-        _ => panic!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
-    }
-}
-
-pub fn get_static_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                            did: DefId,
-                            ty: Ty<'tcx>)
-                            -> ValueRef {
-    if let Some(node_id) = ccx.tcx().map.as_local_node_id(did) {
-        base::get_item_val(ccx, node_id)
-    } else {
-        base::get_extern_const(ccx, did, ty)
-    }
-}
diff --git a/src/librustc_trans/trans/consts.rs b/src/librustc_trans/trans/consts.rs
deleted file mode 100644 (file)
index 20da158..0000000
+++ /dev/null
@@ -1,1093 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-use back::abi;
-use llvm;
-use llvm::{ConstFCmp, ConstICmp, SetLinkage, SetUnnamedAddr};
-use llvm::{InternalLinkage, ValueRef, Bool, True};
-use middle::const_qualif::ConstQualif;
-use middle::cstore::LOCAL_CRATE;
-use middle::const_eval::{self, ConstVal, ConstEvalErr};
-use middle::const_eval::{const_int_checked_neg, const_uint_checked_neg};
-use middle::const_eval::{const_int_checked_add, const_uint_checked_add};
-use middle::const_eval::{const_int_checked_sub, const_uint_checked_sub};
-use middle::const_eval::{const_int_checked_mul, const_uint_checked_mul};
-use middle::const_eval::{const_int_checked_div, const_uint_checked_div};
-use middle::const_eval::{const_int_checked_rem, const_uint_checked_rem};
-use middle::const_eval::{const_int_checked_shl, const_uint_checked_shl};
-use middle::const_eval::{const_int_checked_shr, const_uint_checked_shr};
-use middle::def::Def;
-use middle::def_id::DefId;
-use trans::{adt, closure, debuginfo, expr, inline, machine};
-use trans::base::{self, push_ctxt};
-use trans::collector::{self, TransItem};
-use trans::common::{self, type_is_sized, ExprOrMethodCall, node_id_substs, C_nil, const_get_elt};
-use trans::common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty};
-use trans::common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint};
-use trans::common::{type_is_fat_ptr, Field, C_vector, C_array, C_null, ExprId, MethodCallKey};
-use trans::declare;
-use trans::monomorphize;
-use trans::type_::Type;
-use trans::type_of;
-use trans::Disr;
-use middle::subst::Substs;
-use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
-use middle::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
-use middle::ty::{self, Ty};
-use middle::ty::cast::{CastTy,IntTy};
-use util::nodemap::NodeMap;
-
-use rustc_front::hir;
-
-use std::ffi::{CStr, CString};
-use std::borrow::Cow;
-use libc::c_uint;
-use syntax::ast::{self, LitKind};
-use syntax::attr;
-use syntax::parse::token;
-use syntax::ptr::P;
-
-pub type FnArgMap<'a> = Option<&'a NodeMap<ValueRef>>;
-
-pub fn const_lit(cx: &CrateContext, e: &hir::Expr, lit: &ast::Lit)
-    -> ValueRef {
-    let _icx = push_ctxt("trans_lit");
-    debug!("const_lit: {:?}", lit);
-    match lit.node {
-        LitKind::Byte(b) => C_integral(Type::uint_from_ty(cx, ast::UintTy::U8), b as u64, false),
-        LitKind::Char(i) => C_integral(Type::char(cx), i as u64, false),
-        LitKind::Int(i, ast::LitIntType::Signed(t)) => {
-            C_integral(Type::int_from_ty(cx, t), i, true)
-        }
-        LitKind::Int(u, ast::LitIntType::Unsigned(t)) => {
-            C_integral(Type::uint_from_ty(cx, t), u, false)
-        }
-        LitKind::Int(i, ast::LitIntType::Unsuffixed) => {
-            let lit_int_ty = cx.tcx().node_id_to_type(e.id);
-            match lit_int_ty.sty {
-                ty::TyInt(t) => {
-                    C_integral(Type::int_from_ty(cx, t), i as u64, true)
-                }
-                ty::TyUint(t) => {
-                    C_integral(Type::uint_from_ty(cx, t), i as u64, false)
-                }
-                _ => cx.sess().span_bug(lit.span,
-                        &format!("integer literal has type {:?} (expected int \
-                                 or usize)",
-                                lit_int_ty))
-            }
-        }
-        LitKind::Float(ref fs, t) => {
-            C_floating(&fs, Type::float_from_ty(cx, t))
-        }
-        LitKind::FloatUnsuffixed(ref fs) => {
-            let lit_float_ty = cx.tcx().node_id_to_type(e.id);
-            match lit_float_ty.sty {
-                ty::TyFloat(t) => {
-                    C_floating(&fs, Type::float_from_ty(cx, t))
-                }
-                _ => {
-                    cx.sess().span_bug(lit.span,
-                        "floating point literal doesn't have the right type");
-                }
-            }
-        }
-        LitKind::Bool(b) => C_bool(cx, b),
-        LitKind::Str(ref s, _) => C_str_slice(cx, (*s).clone()),
-        LitKind::ByteStr(ref data) => {
-            addr_of(cx, C_bytes(cx, &data[..]), 1, "byte_str")
-        }
-    }
-}
-
-pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef {
-    unsafe {
-        llvm::LLVMConstPointerCast(val, ty.to_ref())
-    }
-}
-
-fn addr_of_mut(ccx: &CrateContext,
-               cv: ValueRef,
-               align: machine::llalign,
-               kind: &str)
-               -> ValueRef {
-    unsafe {
-        // FIXME: this totally needs a better name generation scheme, perhaps a simple global
-        // counter? Also most other uses of gensym in trans.
-        let gsym = token::gensym("_");
-        let name = format!("{}{}", kind, gsym.0);
-        let gv = declare::define_global(ccx, &name[..], val_ty(cv)).unwrap_or_else(||{
-            ccx.sess().bug(&format!("symbol `{}` is already defined", name));
-        });
-        llvm::LLVMSetInitializer(gv, cv);
-        llvm::LLVMSetAlignment(gv, align);
-        SetLinkage(gv, InternalLinkage);
-        SetUnnamedAddr(gv, true);
-        gv
-    }
-}
-
-pub fn addr_of(ccx: &CrateContext,
-               cv: ValueRef,
-               align: machine::llalign,
-               kind: &str)
-               -> ValueRef {
-    match ccx.const_globals().borrow().get(&cv) {
-        Some(&gv) => {
-            unsafe {
-                // Upgrade the alignment in cases where the same constant is used with different
-                // alignment requirements
-                if align > llvm::LLVMGetAlignment(gv) {
-                    llvm::LLVMSetAlignment(gv, align);
-                }
-            }
-            return gv;
-        }
-        None => {}
-    }
-    let gv = addr_of_mut(ccx, cv, align, kind);
-    unsafe {
-        llvm::LLVMSetGlobalConstant(gv, True);
-    }
-    ccx.const_globals().borrow_mut().insert(cv, gv);
-    gv
-}
-
-/// Deref a constant pointer
-fn load_const(cx: &CrateContext, v: ValueRef, t: Ty) -> ValueRef {
-    let v = match cx.const_unsized().borrow().get(&v) {
-        Some(&v) => v,
-        None => v
-    };
-    let d = unsafe { llvm::LLVMGetInitializer(v) };
-    if t.is_bool() {
-        unsafe { llvm::LLVMConstTrunc(d, Type::i1(cx).to_ref()) }
-    } else {
-        d
-    }
-}
-
-fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                         v: ValueRef,
-                         ty: Ty<'tcx>)
-                         -> (ValueRef, Ty<'tcx>) {
-    match ty.builtin_deref(true, ty::NoPreference) {
-        Some(mt) => {
-            if type_is_sized(cx.tcx(), mt.ty) {
-                (load_const(cx, v, mt.ty), mt.ty)
-            } else {
-                // Derefing a fat pointer does not change the representation,
-                // just the type to the unsized contents.
-                (v, mt.ty)
-            }
-        }
-        None => {
-            cx.sess().bug(&format!("unexpected dereferenceable type {:?}",
-                                   ty))
-        }
-    }
-}
-
-fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                           node: ExprOrMethodCall,
-                           def_id: DefId,
-                           arg_vals: &[ValueRef],
-                           param_substs: &'tcx Substs<'tcx>,
-                           trueconst: TrueConst) -> Result<ValueRef, ConstEvalFailure> {
-    let fn_like = const_eval::lookup_const_fn_by_id(ccx.tcx(), def_id);
-    let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call");
-
-    let args = &fn_like.decl().inputs;
-    assert_eq!(args.len(), arg_vals.len());
-
-    let arg_ids = args.iter().map(|arg| arg.pat.id);
-    let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect();
-
-    let substs = ccx.tcx().mk_substs(node_id_substs(ccx, node, param_substs));
-    match fn_like.body().expr {
-        Some(ref expr) => {
-            const_expr(ccx, &expr, substs, Some(&fn_args), trueconst).map(|(res, _)| res)
-        },
-        None => Ok(C_nil(ccx)),
-    }
-}
-
-pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                def_id: DefId,
-                                ref_expr: &hir::Expr,
-                                param_substs: &'tcx Substs<'tcx>)
-                                -> &'tcx hir::Expr {
-    let def_id = inline::maybe_instantiate_inline(ccx, def_id);
-
-    if def_id.krate != LOCAL_CRATE {
-        ccx.sess().span_bug(ref_expr.span,
-                            "cross crate constant could not be inlined");
-    }
-
-    match const_eval::lookup_const_by_id(ccx.tcx(), def_id, Some(ref_expr.id), Some(param_substs)) {
-        Some(ref expr) => expr,
-        None => {
-            ccx.sess().span_bug(ref_expr.span, "constant item not found")
-        }
-    }
-}
-
-pub enum ConstEvalFailure {
-    /// in case the const evaluator failed on something that panic at runtime
-    /// as defined in RFC 1229
-    Runtime(ConstEvalErr),
-    // in case we found a true constant
-    Compiletime(ConstEvalErr),
-}
-
-impl ConstEvalFailure {
-    fn into_inner(self) -> ConstEvalErr {
-        match self {
-            Runtime(e) => e,
-            Compiletime(e) => e,
-        }
-    }
-    pub fn description(&self) -> Cow<str> {
-        match self {
-            &Runtime(ref e) => e.description(),
-            &Compiletime(ref e) => e.description(),
-        }
-    }
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum TrueConst {
-    Yes, No
-}
-
-use self::ConstEvalFailure::*;
-
-fn get_const_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                           def_id: DefId,
-                           ref_expr: &hir::Expr,
-                           param_substs: &'tcx Substs<'tcx>)
-                           -> Result<ValueRef, ConstEvalFailure> {
-    let expr = get_const_expr(ccx, def_id, ref_expr, param_substs);
-    let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
-    match get_const_expr_as_global(ccx, expr, ConstQualif::empty(), empty_substs, TrueConst::Yes) {
-        Err(Runtime(err)) => {
-            ccx.tcx().sess.span_err(expr.span, &err.description());
-            Err(Compiletime(err))
-        },
-        other => other,
-    }
-}
-
-pub fn get_const_expr_as_global<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                          expr: &hir::Expr,
-                                          qualif: ConstQualif,
-                                          param_substs: &'tcx Substs<'tcx>,
-                                          trueconst: TrueConst)
-                                          -> Result<ValueRef, ConstEvalFailure> {
-    debug!("get_const_expr_as_global: {:?}", expr.id);
-    // Special-case constants to cache a common global for all uses.
-    if let hir::ExprPath(..) = expr.node {
-        // `def` must be its own statement and cannot be in the `match`
-        // otherwise the `def_map` will be borrowed for the entire match instead
-        // of just to get the `def` value
-        let def = ccx.tcx().def_map.borrow().get(&expr.id).unwrap().full_def();
-        match def {
-            Def::Const(def_id) | Def::AssociatedConst(def_id) => {
-                if !ccx.tcx().tables.borrow().adjustments.contains_key(&expr.id) {
-                    debug!("get_const_expr_as_global ({:?}): found const {:?}",
-                           expr.id, def_id);
-                    return get_const_val(ccx, def_id, expr, param_substs);
-                }
-            },
-            _ => {},
-        }
-    }
-
-    let key = (expr.id, param_substs);
-    if let Some(&val) = ccx.const_values().borrow().get(&key) {
-        return Ok(val);
-    }
-    let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs,
-                                              &ccx.tcx().expr_ty(expr));
-    let val = if qualif.intersects(ConstQualif::NON_STATIC_BORROWS) {
-        // Avoid autorefs as they would create global instead of stack
-        // references, even when only the latter are correct.
-        try!(const_expr_unadjusted(ccx, expr, ty, param_substs, None, trueconst))
-    } else {
-        try!(const_expr(ccx, expr, param_substs, None, trueconst)).0
-    };
-
-    // boolean SSA values are i1, but they have to be stored in i8 slots,
-    // otherwise some LLVM optimization passes don't work as expected
-    let val = unsafe {
-        if llvm::LLVMTypeOf(val) == Type::i1(ccx).to_ref() {
-            llvm::LLVMConstZExt(val, Type::i8(ccx).to_ref())
-        } else {
-            val
-        }
-    };
-
-    let lvalue = addr_of(ccx, val, type_of::align_of(ccx, ty), "const");
-    ccx.const_values().borrow_mut().insert(key, lvalue);
-    Ok(lvalue)
-}
-
-pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                            e: &hir::Expr,
-                            param_substs: &'tcx Substs<'tcx>,
-                            fn_args: FnArgMap,
-                            trueconst: TrueConst)
-                            -> Result<(ValueRef, Ty<'tcx>), ConstEvalFailure> {
-    let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs,
-                                               &cx.tcx().expr_ty(e));
-    let llconst = try!(const_expr_unadjusted(cx, e, ety, param_substs, fn_args, trueconst));
-    let mut llconst = llconst;
-    let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs,
-                                                            &cx.tcx().expr_ty_adjusted(e));
-    let opt_adj = cx.tcx().tables.borrow().adjustments.get(&e.id).cloned();
-    match opt_adj {
-        Some(AdjustReifyFnPointer) => {
-            // FIXME(#19925) once fn item types are
-            // zero-sized, we'll need to do something here
-        }
-        Some(AdjustUnsafeFnPointer) | Some(AdjustMutToConstPointer) => {
-            // purely a type-level thing
-        }
-        Some(AdjustDerefRef(adj)) => {
-            let mut ty = ety;
-            // Save the last autoderef in case we can avoid it.
-            if adj.autoderefs > 0 {
-                for _ in 0..adj.autoderefs-1 {
-                    let (dv, dt) = const_deref(cx, llconst, ty);
-                    llconst = dv;
-                    ty = dt;
-                }
-            }
-
-            if adj.autoref.is_some() {
-                if adj.autoderefs == 0 {
-                    // Don't copy data to do a deref+ref
-                    // (i.e., skip the last auto-deref).
-                    llconst = addr_of(cx, llconst, type_of::align_of(cx, ty), "autoref");
-                    ty = cx.tcx().mk_imm_ref(cx.tcx().mk_region(ty::ReStatic), ty);
-                }
-            } else {
-                let (dv, dt) = const_deref(cx, llconst, ty);
-                llconst = dv;
-
-                // If we derefed a fat pointer then we will have an
-                // open type here. So we need to update the type with
-                // the one returned from const_deref.
-                ety_adjusted = dt;
-            }
-
-            if let Some(target) = adj.unsize {
-                let target = monomorphize::apply_param_substs(cx.tcx(),
-                                                              param_substs,
-                                                              &target);
-
-                let pointee_ty = ty.builtin_deref(true, ty::NoPreference)
-                    .expect("consts: unsizing got non-pointer type").ty;
-                let (base, old_info) = if !type_is_sized(cx.tcx(), pointee_ty) {
-                    // Normally, the source is a thin pointer and we are
-                    // adding extra info to make a fat pointer. The exception
-                    // is when we are upcasting an existing object fat pointer
-                    // to use a different vtable. In that case, we want to
-                    // load out the original data pointer so we can repackage
-                    // it.
-                    (const_get_elt(cx, llconst, &[abi::FAT_PTR_ADDR as u32]),
-                     Some(const_get_elt(cx, llconst, &[abi::FAT_PTR_EXTRA as u32])))
-                } else {
-                    (llconst, None)
-                };
-
-                let unsized_ty = target.builtin_deref(true, ty::NoPreference)
-                    .expect("consts: unsizing got non-pointer target type").ty;
-                let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to();
-                let base = ptrcast(base, ptr_ty);
-                let info = base::unsized_info(cx, pointee_ty, unsized_ty,
-                                              old_info, param_substs);
-
-                if old_info.is_none() {
-                    let prev_const = cx.const_unsized().borrow_mut()
-                                       .insert(base, llconst);
-                    assert!(prev_const.is_none() || prev_const == Some(llconst));
-                }
-                assert_eq!(abi::FAT_PTR_ADDR, 0);
-                assert_eq!(abi::FAT_PTR_EXTRA, 1);
-                llconst = C_struct(cx, &[base, info], false);
-            }
-        }
-        None => {}
-    };
-
-    let llty = type_of::sizing_type_of(cx, ety_adjusted);
-    let csize = machine::llsize_of_alloc(cx, val_ty(llconst));
-    let tsize = machine::llsize_of_alloc(cx, llty);
-    if csize != tsize {
-        cx.sess().abort_if_errors();
-        unsafe {
-            // FIXME these values could use some context
-            llvm::LLVMDumpValue(llconst);
-            llvm::LLVMDumpValue(C_undef(llty));
-        }
-        cx.sess().bug(&format!("const {:?} of type {:?} has size {} instead of {}",
-                         e, ety_adjusted,
-                         csize, tsize));
-    }
-    Ok((llconst, ety_adjusted))
-}
-
-fn check_unary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty,
-                             te: ValueRef, trueconst: TrueConst) -> Result<(), ConstEvalFailure> {
-    // The only kind of unary expression that we check for validity
-    // here is `-expr`, to check if it "overflows" (e.g. `-i32::MIN`).
-    if let hir::ExprUnary(hir::UnNeg, ref inner_e) = e.node {
-
-        // An unfortunate special case: we parse e.g. -128 as a
-        // negation of the literal 128, which means if we're expecting
-        // a i8 (or if it was already suffixed, e.g. `-128_i8`), then
-        // 128 will have already overflowed to -128, and so then the
-        // constant evaluator thinks we're trying to negate -128.
-        //
-        // Catch this up front by looking for ExprLit directly,
-        // and just accepting it.
-        if let hir::ExprLit(_) = inner_e.node { return Ok(()); }
-
-        let result = match t.sty {
-            ty::TyInt(int_type) => {
-                let input = match const_to_opt_int(te) {
-                    Some(v) => v,
-                    None => return Ok(()),
-                };
-                const_int_checked_neg(
-                    input, e, Some(const_eval::IntTy::from(cx.tcx(), int_type)))
-            }
-            ty::TyUint(uint_type) => {
-                let input = match const_to_opt_uint(te) {
-                    Some(v) => v,
-                    None => return Ok(()),
-                };
-                const_uint_checked_neg(
-                    input, e, Some(const_eval::UintTy::from(cx.tcx(), uint_type)))
-            }
-            _ => return Ok(()),
-        };
-        const_err(cx, e, result, trueconst)
-    } else {
-        Ok(())
-    }
-}
-
-fn const_err(cx: &CrateContext,
-             e: &hir::Expr,
-             result: Result<ConstVal, ConstEvalErr>,
-             trueconst: TrueConst)
-             -> Result<(), ConstEvalFailure> {
-    match (result, trueconst) {
-        (Ok(_), _) => {
-            // We do not actually care about a successful result.
-            Ok(())
-        },
-        (Err(err), TrueConst::Yes) => {
-            cx.tcx().sess.span_err(e.span, &err.description());
-            Err(Compiletime(err))
-        },
-        (Err(err), TrueConst::No) => {
-            cx.tcx().sess.span_warn(e.span, &err.description());
-            Err(Runtime(err))
-        },
-    }
-}
-
-fn check_binary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty,
-                              te1: ValueRef, te2: ValueRef,
-                              trueconst: TrueConst) -> Result<(), ConstEvalFailure> {
-    let b = if let hir::ExprBinary(b, _, _) = e.node { b } else { unreachable!() };
-
-    let result = match t.sty {
-        ty::TyInt(int_type) => {
-            let (lhs, rhs) = match (const_to_opt_int(te1),
-                                    const_to_opt_int(te2)) {
-                (Some(v1), Some(v2)) => (v1, v2),
-                _ => return Ok(()),
-            };
-
-            let opt_ety = Some(const_eval::IntTy::from(cx.tcx(), int_type));
-            match b.node {
-                hir::BiAdd => const_int_checked_add(lhs, rhs, e, opt_ety),
-                hir::BiSub => const_int_checked_sub(lhs, rhs, e, opt_ety),
-                hir::BiMul => const_int_checked_mul(lhs, rhs, e, opt_ety),
-                hir::BiDiv => const_int_checked_div(lhs, rhs, e, opt_ety),
-                hir::BiRem => const_int_checked_rem(lhs, rhs, e, opt_ety),
-                hir::BiShl => const_int_checked_shl(lhs, rhs, e, opt_ety),
-                hir::BiShr => const_int_checked_shr(lhs, rhs, e, opt_ety),
-                _ => return Ok(()),
-            }
-        }
-        ty::TyUint(uint_type) => {
-            let (lhs, rhs) = match (const_to_opt_uint(te1),
-                                    const_to_opt_uint(te2)) {
-                (Some(v1), Some(v2)) => (v1, v2),
-                _ => return Ok(()),
-            };
-
-            let opt_ety = Some(const_eval::UintTy::from(cx.tcx(), uint_type));
-            match b.node {
-                hir::BiAdd => const_uint_checked_add(lhs, rhs, e, opt_ety),
-                hir::BiSub => const_uint_checked_sub(lhs, rhs, e, opt_ety),
-                hir::BiMul => const_uint_checked_mul(lhs, rhs, e, opt_ety),
-                hir::BiDiv => const_uint_checked_div(lhs, rhs, e, opt_ety),
-                hir::BiRem => const_uint_checked_rem(lhs, rhs, e, opt_ety),
-                hir::BiShl => const_uint_checked_shl(lhs, rhs, e, opt_ety),
-                hir::BiShr => const_uint_checked_shr(lhs, rhs, e, opt_ety),
-                _ => return Ok(()),
-            }
-        }
-        _ => return Ok(()),
-    };
-    const_err(cx, e, result, trueconst)
-}
-
-fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                   e: &hir::Expr,
-                                   ety: Ty<'tcx>,
-                                   param_substs: &'tcx Substs<'tcx>,
-                                   fn_args: FnArgMap,
-                                   trueconst: TrueConst)
-                                   -> Result<ValueRef, ConstEvalFailure>
-{
-    debug!("const_expr_unadjusted(e={:?}, ety={:?}, param_substs={:?})",
-           e,
-           ety,
-           param_substs);
-
-    let map_list = |exprs: &[P<hir::Expr>]| -> Result<Vec<ValueRef>, ConstEvalFailure> {
-        exprs.iter()
-             .map(|e| const_expr(cx, &e, param_substs, fn_args, trueconst).map(|(l, _)| l))
-             .collect::<Vec<Result<ValueRef, ConstEvalFailure>>>()
-             .into_iter()
-             .collect()
-         // this dance is necessary to eagerly run const_expr so all errors are reported
-    };
-    let _icx = push_ctxt("const_expr");
-    Ok(match e.node {
-        hir::ExprLit(ref lit) => const_lit(cx, e, &lit),
-        hir::ExprBinary(b, ref e1, ref e2) => {
-            /* Neither type is bottom, and we expect them to be unified
-             * already, so the following is safe. */
-            let (te1, ty) = try!(const_expr(cx, &e1, param_substs, fn_args, trueconst));
-            debug!("const_expr_unadjusted: te1={}, ty={:?}",
-                   cx.tn().val_to_string(te1),
-                   ty);
-            assert!(!ty.is_simd());
-            let is_float = ty.is_fp();
-            let signed = ty.is_signed();
-
-            let (te2, ty2) = try!(const_expr(cx, &e2, param_substs, fn_args, trueconst));
-            debug!("const_expr_unadjusted: te2={}, ty={:?}",
-                   cx.tn().val_to_string(te2),
-                   ty2);
-
-            try!(check_binary_expr_validity(cx, e, ty, te1, te2, trueconst));
-
-            unsafe { match b.node {
-                hir::BiAdd if is_float => llvm::LLVMConstFAdd(te1, te2),
-                hir::BiAdd             => llvm::LLVMConstAdd(te1, te2),
-
-                hir::BiSub if is_float => llvm::LLVMConstFSub(te1, te2),
-                hir::BiSub             => llvm::LLVMConstSub(te1, te2),
-
-                hir::BiMul if is_float => llvm::LLVMConstFMul(te1, te2),
-                hir::BiMul             => llvm::LLVMConstMul(te1, te2),
-
-                hir::BiDiv if is_float => llvm::LLVMConstFDiv(te1, te2),
-                hir::BiDiv if signed   => llvm::LLVMConstSDiv(te1, te2),
-                hir::BiDiv             => llvm::LLVMConstUDiv(te1, te2),
-
-                hir::BiRem if is_float => llvm::LLVMConstFRem(te1, te2),
-                hir::BiRem if signed   => llvm::LLVMConstSRem(te1, te2),
-                hir::BiRem             => llvm::LLVMConstURem(te1, te2),
-
-                hir::BiAnd    => llvm::LLVMConstAnd(te1, te2),
-                hir::BiOr     => llvm::LLVMConstOr(te1, te2),
-                hir::BiBitXor => llvm::LLVMConstXor(te1, te2),
-                hir::BiBitAnd => llvm::LLVMConstAnd(te1, te2),
-                hir::BiBitOr  => llvm::LLVMConstOr(te1, te2),
-                hir::BiShl    => {
-                    let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
-                    llvm::LLVMConstShl(te1, te2)
-                },
-                hir::BiShr    => {
-                    let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
-                    if signed { llvm::LLVMConstAShr(te1, te2) }
-                    else      { llvm::LLVMConstLShr(te1, te2) }
-                },
-                hir::BiEq | hir::BiNe | hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe => {
-                    if is_float {
-                        let cmp = base::bin_op_to_fcmp_predicate(cx, b.node);
-                        ConstFCmp(cmp, te1, te2)
-                    } else {
-                        let cmp = base::bin_op_to_icmp_predicate(cx, b.node, signed);
-                        ConstICmp(cmp, te1, te2)
-                    }
-                },
-            } } // unsafe { match b.node {
-        },
-        hir::ExprUnary(u, ref inner_e) => {
-            let (te, ty) = try!(const_expr(cx, &inner_e, param_substs, fn_args, trueconst));
-
-            try!(check_unary_expr_validity(cx, e, ty, te, trueconst));
-
-            let is_float = ty.is_fp();
-            unsafe { match u {
-                hir::UnDeref           => const_deref(cx, te, ty).0,
-                hir::UnNot             => llvm::LLVMConstNot(te),
-                hir::UnNeg if is_float => llvm::LLVMConstFNeg(te),
-                hir::UnNeg             => llvm::LLVMConstNeg(te),
-            } }
-        },
-        hir::ExprField(ref base, field) => {
-            let (bv, bt) = try!(const_expr(cx, &base, param_substs, fn_args, trueconst));
-            let brepr = adt::represent_type(cx, bt);
-            let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None);
-            let ix = vinfo.field_index(field.node);
-            adt::const_get_field(cx, &brepr, bv, vinfo.discr, ix)
-        },
-        hir::ExprTupField(ref base, idx) => {
-            let (bv, bt) = try!(const_expr(cx, &base, param_substs, fn_args, trueconst));
-            let brepr = adt::represent_type(cx, bt);
-            let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None);
-            adt::const_get_field(cx, &brepr, bv, vinfo.discr, idx.node)
-        },
-        hir::ExprIndex(ref base, ref index) => {
-            let (bv, bt) = try!(const_expr(cx, &base, param_substs, fn_args, trueconst));
-            let iv = try!(const_expr(cx, &index, param_substs, fn_args, TrueConst::Yes)).0;
-            let iv = if let Some(iv) = const_to_opt_uint(iv) {
-                iv
-            } else {
-                cx.sess().span_bug(index.span, "index is not an integer-constant expression");
-            };
-            let (arr, len) = match bt.sty {
-                ty::TyArray(_, u) => (bv, C_uint(cx, u)),
-                ty::TySlice(..) | ty::TyStr => {
-                    let e1 = const_get_elt(cx, bv, &[0]);
-                    (load_const(cx, e1, bt), const_get_elt(cx, bv, &[1]))
-                },
-                ty::TyRef(_, mt) => match mt.ty.sty {
-                    ty::TyArray(_, u) => {
-                        (load_const(cx, bv, mt.ty), C_uint(cx, u))
-                    },
-                    _ => cx.sess().span_bug(base.span,
-                                            &format!("index-expr base must be a vector \
-                                                      or string type, found {:?}",
-                                                     bt)),
-                },
-                _ => cx.sess().span_bug(base.span,
-                                        &format!("index-expr base must be a vector \
-                                                  or string type, found {:?}",
-                                                 bt)),
-            };
-
-            let len = unsafe { llvm::LLVMConstIntGetZExtValue(len) as u64 };
-            let len = match bt.sty {
-                ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => match ty.sty {
-                    ty::TyStr => {
-                        assert!(len > 0);
-                        len - 1
-                    },
-                    _ => len,
-                },
-                _ => len,
-            };
-            if iv >= len {
-                // FIXME #3170: report this earlier on in the const-eval
-                // pass. Reporting here is a bit late.
-                span_err!(cx.sess(), e.span, E0515,
-                          "const index-expr is out of bounds");
-                C_undef(val_ty(arr).element_type())
-            } else {
-                const_get_elt(cx, arr, &[iv as c_uint])
-            }
-        },
-        hir::ExprCast(ref base, _) => {
-            let t_cast = ety;
-            let llty = type_of::type_of(cx, t_cast);
-            let (v, t_expr) = try!(const_expr(cx, &base, param_substs, fn_args, trueconst));
-            debug!("trans_const_cast({:?} as {:?})", t_expr, t_cast);
-            if expr::cast_is_noop(cx.tcx(), base, t_expr, t_cast) {
-                return Ok(v);
-            }
-            if type_is_fat_ptr(cx.tcx(), t_expr) {
-                // Fat pointer casts.
-                let t_cast_inner =
-                    t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty;
-                let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to();
-                let addr = ptrcast(const_get_elt(cx, v, &[abi::FAT_PTR_ADDR as u32]),
-                                   ptr_ty);
-                if type_is_fat_ptr(cx.tcx(), t_cast) {
-                    let info = const_get_elt(cx, v, &[abi::FAT_PTR_EXTRA as u32]);
-                    return Ok(C_struct(cx, &[addr, info], false))
-                } else {
-                    return Ok(addr);
-                }
-            }
-            unsafe { match (
-                CastTy::from_ty(t_expr).expect("bad input type for cast"),
-                CastTy::from_ty(t_cast).expect("bad output type for cast"),
-            ) {
-                (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => {
-                    let repr = adt::represent_type(cx, t_expr);
-                    let discr = adt::const_get_discrim(cx, &repr, v);
-                    let iv = C_integral(cx.int_type(), discr.0, false);
-                    let s = adt::is_discr_signed(&repr) as Bool;
-                    llvm::LLVMConstIntCast(iv, llty.to_ref(), s)
-                },
-                (CastTy::Int(_), CastTy::Int(_)) => {
-                    let s = t_expr.is_signed() as Bool;
-                    llvm::LLVMConstIntCast(v, llty.to_ref(), s)
-                },
-                (CastTy::Int(_), CastTy::Float) => {
-                    if t_expr.is_signed() {
-                        llvm::LLVMConstSIToFP(v, llty.to_ref())
-                    } else {
-                        llvm::LLVMConstUIToFP(v, llty.to_ref())
-                    }
-                },
-                (CastTy::Float, CastTy::Float) => llvm::LLVMConstFPCast(v, llty.to_ref()),
-                (CastTy::Float, CastTy::Int(IntTy::I)) => llvm::LLVMConstFPToSI(v, llty.to_ref()),
-                (CastTy::Float, CastTy::Int(_)) => llvm::LLVMConstFPToUI(v, llty.to_ref()),
-                (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_))
-                | (CastTy::RPtr(_), CastTy::Ptr(_)) => {
-                    ptrcast(v, llty)
-                },
-                (CastTy::FnPtr, CastTy::FnPtr) => ptrcast(v, llty), // isn't this a coercion?
-                (CastTy::Int(_), CastTy::Ptr(_)) => llvm::LLVMConstIntToPtr(v, llty.to_ref()),
-                (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => {
-                  llvm::LLVMConstPtrToInt(v, llty.to_ref())
-                },
-                _ => {
-                  cx.sess().impossible_case(e.span,
-                                            "bad combination of types for cast")
-                },
-            } } // unsafe { match ( ... ) {
-        },
-        hir::ExprAddrOf(hir::MutImmutable, ref sub) => {
-            // If this is the address of some static, then we need to return
-            // the actual address of the static itself (short circuit the rest
-            // of const eval).
-            let mut cur = sub;
-            loop {
-                match cur.node {
-                    hir::ExprBlock(ref blk) => {
-                        if let Some(ref sub) = blk.expr {
-                            cur = sub;
-                        } else {
-                            break;
-                        }
-                    },
-                    _ => break,
-                }
-            }
-            let opt_def = cx.tcx().def_map.borrow().get(&cur.id).map(|d| d.full_def());
-            if let Some(Def::Static(def_id, _)) = opt_def {
-                common::get_static_val(cx, def_id, ety)
-            } else {
-                // If this isn't the address of a static, then keep going through
-                // normal constant evaluation.
-                let (v, ty) = try!(const_expr(cx, &sub, param_substs, fn_args, trueconst));
-                addr_of(cx, v, type_of::align_of(cx, ty), "ref")
-            }
-        },
-        hir::ExprAddrOf(hir::MutMutable, ref sub) => {
-            let (v, ty) = try!(const_expr(cx, &sub, param_substs, fn_args, trueconst));
-            addr_of_mut(cx, v, type_of::align_of(cx, ty), "ref_mut_slice")
-        },
-        hir::ExprTup(ref es) => {
-            let repr = adt::represent_type(cx, ety);
-            let vals = try!(map_list(&es[..]));
-            adt::trans_const(cx, &repr, Disr(0), &vals[..])
-        },
-        hir::ExprStruct(_, ref fs, ref base_opt) => {
-            let repr = adt::represent_type(cx, ety);
-
-            let base_val = match *base_opt {
-                Some(ref base) => Some(try!(const_expr(
-                    cx,
-                    &base,
-                    param_substs,
-                    fn_args,
-                    trueconst,
-                ))),
-                None => None
-            };
-
-            let VariantInfo { discr, fields } = VariantInfo::of_node(cx.tcx(), ety, e.id);
-            let cs = fields.iter().enumerate().map(|(ix, &Field(f_name, _))| {
-                match (fs.iter().find(|f| f_name == f.name.node), base_val) {
-                    (Some(ref f), _) => {
-                        const_expr(cx, &f.expr, param_substs, fn_args, trueconst).map(|(l, _)| l)
-                    },
-                    (_, Some((bv, _))) => Ok(adt::const_get_field(cx, &repr, bv, discr, ix)),
-                    (_, None) => cx.sess().span_bug(e.span, "missing struct field"),
-                }
-            })
-            .collect::<Vec<Result<_, ConstEvalFailure>>>()
-            .into_iter()
-            .collect::<Result<Vec<_>,ConstEvalFailure>>();
-            let cs = try!(cs);
-            if ety.is_simd() {
-                C_vector(&cs[..])
-            } else {
-                adt::trans_const(cx, &repr, discr, &cs[..])
-            }
-        },
-        hir::ExprVec(ref es) => {
-            let unit_ty = ety.sequence_element_type(cx.tcx());
-            let llunitty = type_of::type_of(cx, unit_ty);
-            let vs = es.iter()
-                       .map(|e| const_expr(
-                           cx,
-                           &e,
-                           param_substs,
-                           fn_args,
-                           trueconst,
-                       ).map(|(l, _)| l))
-                       .collect::<Vec<Result<_, ConstEvalFailure>>>()
-                       .into_iter()
-                       .collect::<Result<Vec<_>, ConstEvalFailure>>();
-            let vs = try!(vs);
-            // If the vector contains enums, an LLVM array won't work.
-            if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
-                C_struct(cx, &vs[..], false)
-            } else {
-                C_array(llunitty, &vs[..])
-            }
-        },
-        hir::ExprRepeat(ref elem, ref count) => {
-            let unit_ty = ety.sequence_element_type(cx.tcx());
-            let llunitty = type_of::type_of(cx, unit_ty);
-            let n = cx.tcx().eval_repeat_count(count);
-            let unit_val = try!(const_expr(cx, &elem, param_substs, fn_args, trueconst)).0;
-            let vs = vec![unit_val; n];
-            if val_ty(unit_val) != llunitty {
-                C_struct(cx, &vs[..], false)
-            } else {
-                C_array(llunitty, &vs[..])
-            }
-        },
-        hir::ExprPath(..) => {
-            let def = cx.tcx().def_map.borrow().get(&e.id).unwrap().full_def();
-            match def {
-                Def::Local(_, id) => {
-                    if let Some(val) = fn_args.and_then(|args| args.get(&id).cloned()) {
-                        val
-                    } else {
-                        cx.sess().span_bug(e.span, "const fn argument not found")
-                    }
-                }
-                Def::Fn(..) | Def::Method(..) => {
-                    expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val
-                }
-                Def::Const(def_id) | Def::AssociatedConst(def_id) => {
-                    load_const(cx, try!(get_const_val(cx, def_id, e, param_substs)),
-                               ety)
-                }
-                Def::Variant(enum_did, variant_did) => {
-                    let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did);
-                    match vinfo.kind() {
-                        ty::VariantKind::Unit => {
-                            let repr = adt::represent_type(cx, ety);
-                            adt::trans_const(cx, &repr, Disr::from(vinfo.disr_val), &[])
-                        }
-                        ty::VariantKind::Tuple => {
-                            expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val
-                        }
-                        ty::VariantKind::Struct => {
-                            cx.sess().span_bug(e.span, "path-expr refers to a dict variant!")
-                        }
-                    }
-                }
-                Def::Struct(..) => {
-                    if let ty::TyBareFn(..) = ety.sty {
-                        // Tuple struct.
-                        expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val
-                    } else {
-                        // Unit struct.
-                        C_null(type_of::type_of(cx, ety))
-                    }
-                }
-                _ => {
-                    cx.sess().span_bug(e.span, "expected a const, fn, struct, \
-                                                or variant def")
-                }
-            }
-        },
-        hir::ExprCall(ref callee, ref args) => {
-            let mut callee = &**callee;
-            loop {
-                callee = match callee.node {
-                    hir::ExprBlock(ref block) => match block.expr {
-                        Some(ref tail) => &tail,
-                        None => break,
-                    },
-                    _ => break,
-                };
-            }
-            let def = cx.tcx().def_map.borrow()[&callee.id].full_def();
-            let arg_vals = try!(map_list(args));
-            match def {
-                Def::Fn(did) | Def::Method(did) => {
-                    try!(const_fn_call(
-                        cx,
-                        ExprId(callee.id),
-                        did,
-                        &arg_vals,
-                        param_substs,
-                        trueconst,
-                    ))
-                }
-                Def::Struct(..) => {
-                    if ety.is_simd() {
-                        C_vector(&arg_vals[..])
-                    } else {
-                        let repr = adt::represent_type(cx, ety);
-                        adt::trans_const(cx, &repr, Disr(0), &arg_vals[..])
-                    }
-                }
-                Def::Variant(enum_did, variant_did) => {
-                    let repr = adt::represent_type(cx, ety);
-                    let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did);
-                    adt::trans_const(cx,
-                                     &repr,
-                                     Disr::from(vinfo.disr_val),
-                                     &arg_vals[..])
-                }
-                _ => cx.sess().span_bug(e.span, "expected a struct, variant, or const fn def"),
-            }
-        },
-        hir::ExprMethodCall(_, _, ref args) => {
-            let arg_vals = try!(map_list(args));
-            let method_call = ty::MethodCall::expr(e.id);
-            let method_did = cx.tcx().tables.borrow().method_map[&method_call].def_id;
-            try!(const_fn_call(cx, MethodCallKey(method_call),
-                               method_did, &arg_vals, param_substs, trueconst))
-        },
-        hir::ExprType(ref e, _) => try!(const_expr(cx, &e, param_substs, fn_args, trueconst)).0,
-        hir::ExprBlock(ref block) => {
-            match block.expr {
-                Some(ref expr) => try!(const_expr(
-                    cx,
-                    &expr,
-                    param_substs,
-                    fn_args,
-                    trueconst,
-                )).0,
-                None => C_nil(cx),
-            }
-        },
-        hir::ExprClosure(_, ref decl, ref body) => {
-            match ety.sty {
-                ty::TyClosure(def_id, ref substs) => {
-                    closure::trans_closure_expr(closure::Dest::Ignore(cx),
-                                                decl,
-                                                body,
-                                                e.id,
-                                                def_id,
-                                                substs,
-                                                &e.attrs);
-                }
-                _ =>
-                    cx.sess().span_bug(
-                        e.span,
-                        &format!("bad type for closure expr: {:?}", ety))
-            }
-            C_null(type_of::type_of(cx, ety))
-        },
-        _ => cx.sess().span_bug(e.span,
-                                "bad constant expression type in consts::const_expr"),
-    })
-}
-
-pub fn trans_static(ccx: &CrateContext,
-                    m: hir::Mutability,
-                    expr: &hir::Expr,
-                    id: ast::NodeId,
-                    attrs: &[ast::Attribute])
-                    -> Result<ValueRef, ConstEvalErr> {
-
-    if collector::collecting_debug_information(ccx) {
-        ccx.record_translation_item_as_generated(TransItem::Static(id));
-    }
-
-    unsafe {
-        let _icx = push_ctxt("trans_static");
-        let g = base::get_item_val(ccx, id);
-
-        let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
-        let (v, _) = try!(const_expr(
-            ccx,
-            expr,
-            empty_substs,
-            None,
-            TrueConst::Yes,
-        ).map_err(|e| e.into_inner()));
-
-        // boolean SSA values are i1, but they have to be stored in i8 slots,
-        // otherwise some LLVM optimization passes don't work as expected
-        let mut val_llty = llvm::LLVMTypeOf(v);
-        let v = if val_llty == Type::i1(ccx).to_ref() {
-            val_llty = Type::i8(ccx).to_ref();
-            llvm::LLVMConstZExt(v, val_llty)
-        } else {
-            v
-        };
-
-        let ty = ccx.tcx().node_id_to_type(id);
-        let llty = type_of::type_of(ccx, ty);
-        let g = if val_llty == llty.to_ref() {
-            g
-        } else {
-            // If we created the global with the wrong type,
-            // correct the type.
-            let empty_string = CString::new("").unwrap();
-            let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g));
-            let name_string = CString::new(name_str_ref.to_bytes()).unwrap();
-            llvm::LLVMSetValueName(g, empty_string.as_ptr());
-            let new_g = llvm::LLVMGetOrInsertGlobal(
-                ccx.llmod(), name_string.as_ptr(), val_llty);
-            // To avoid breaking any invariants, we leave around the old
-            // global for the moment; we'll replace all references to it
-            // with the new global later. (See base::trans_crate.)
-            ccx.statics_to_rauw().borrow_mut().push((g, new_g));
-            new_g
-        };
-        llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty));
-        llvm::LLVMSetInitializer(g, v);
-
-        // As an optimization, all shared statics which do not have interior
-        // mutability are placed into read-only memory.
-        if m != hir::MutMutable {
-            let tcontents = ty.type_contents(ccx.tcx());
-            if !tcontents.interior_unsafe() {
-                llvm::LLVMSetGlobalConstant(g, llvm::True);
-            }
-        }
-
-        debuginfo::create_global_var_metadata(ccx, id, g);
-
-        if attr::contains_name(attrs,
-                               "thread_local") {
-            llvm::set_thread_local(g, true);
-        }
-        Ok(g)
-    }
-}
diff --git a/src/librustc_trans/trans/context.rs b/src/librustc_trans/trans/context.rs
deleted file mode 100644 (file)
index b2ebaac..0000000
+++ /dev/null
@@ -1,1073 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm;
-use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef};
-use rustc::dep_graph::{DepNode, DepTrackingMap, DepTrackingMapConfig};
-use middle::cstore::LinkMeta;
-use middle::def::ExportMap;
-use middle::def_id::DefId;
-use middle::traits;
-use rustc::mir::mir_map::MirMap;
-use trans::adt;
-use trans::base;
-use trans::builder::Builder;
-use trans::common::{ExternMap,BuilderRef_res};
-use trans::debuginfo;
-use trans::declare;
-use trans::glue::DropGlueKind;
-use trans::monomorphize::MonoId;
-use trans::collector::{TransItem, TransItemState};
-use trans::type_::{Type, TypeNames};
-use middle::subst::Substs;
-use middle::ty::{self, Ty};
-use session::config::NoDebugInfo;
-use session::Session;
-use util::sha2::Sha256;
-use util::nodemap::{NodeMap, NodeSet, DefIdMap, FnvHashMap, FnvHashSet};
-
-use std::ffi::CString;
-use std::cell::{Cell, RefCell};
-use std::marker::PhantomData;
-use std::ptr;
-use std::rc::Rc;
-use syntax::ast;
-use syntax::parse::token::InternedString;
-
-pub struct Stats {
-    pub n_glues_created: Cell<usize>,
-    pub n_null_glues: Cell<usize>,
-    pub n_real_glues: Cell<usize>,
-    pub n_fns: Cell<usize>,
-    pub n_monos: Cell<usize>,
-    pub n_inlines: Cell<usize>,
-    pub n_closures: Cell<usize>,
-    pub n_llvm_insns: Cell<usize>,
-    pub llvm_insns: RefCell<FnvHashMap<String, usize>>,
-    // (ident, llvm-instructions)
-    pub fn_stats: RefCell<Vec<(String, usize)> >,
-}
-
-/// The shared portion of a `CrateContext`.  There is one `SharedCrateContext`
-/// per crate.  The data here is shared between all compilation units of the
-/// crate, so it must not contain references to any LLVM data structures
-/// (aside from metadata-related ones).
-pub struct SharedCrateContext<'a, 'tcx: 'a> {
-    local_ccxs: Vec<LocalCrateContext<'tcx>>,
-
-    metadata_llmod: ModuleRef,
-    metadata_llcx: ContextRef,
-
-    export_map: ExportMap,
-    reachable: NodeSet,
-    item_symbols: RefCell<NodeMap<String>>,
-    link_meta: LinkMeta,
-    symbol_hasher: RefCell<Sha256>,
-    tcx: &'a ty::ctxt<'tcx>,
-    stats: Stats,
-    check_overflow: bool,
-    check_drop_flag_for_sanity: bool,
-    mir_map: &'a MirMap<'tcx>,
-
-    available_drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, String>>,
-    use_dll_storage_attrs: bool,
-
-    translation_items: RefCell<FnvHashMap<TransItem<'tcx>, TransItemState>>,
-}
-
-/// The local portion of a `CrateContext`.  There is one `LocalCrateContext`
-/// per compilation unit.  Each one has its own LLVM `ContextRef` so that
-/// several compilation units may be optimized in parallel.  All other LLVM
-/// data structures in the `LocalCrateContext` are tied to that `ContextRef`.
-pub struct LocalCrateContext<'tcx> {
-    llmod: ModuleRef,
-    llcx: ContextRef,
-    tn: TypeNames,
-    externs: RefCell<ExternMap>,
-    item_vals: RefCell<NodeMap<ValueRef>>,
-    needs_unwind_cleanup_cache: RefCell<FnvHashMap<Ty<'tcx>, bool>>,
-    fn_pointer_shims: RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>,
-    drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>>,
-    /// Track mapping of external ids to local items imported for inlining
-    external: RefCell<DefIdMap<Option<ast::NodeId>>>,
-    /// Backwards version of the `external` map (inlined items to where they
-    /// came from)
-    external_srcs: RefCell<NodeMap<DefId>>,
-    /// Cache instances of monomorphized functions
-    monomorphized: RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>>,
-    monomorphizing: RefCell<DefIdMap<usize>>,
-    available_monomorphizations: RefCell<FnvHashSet<String>>,
-    /// Cache generated vtables
-    vtables: RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, ValueRef>>,
-    /// Cache of constant strings,
-    const_cstr_cache: RefCell<FnvHashMap<InternedString, ValueRef>>,
-
-    /// Reverse-direction for const ptrs cast from globals.
-    /// Key is a ValueRef holding a *T,
-    /// Val is a ValueRef holding a *[T].
-    ///
-    /// Needed because LLVM loses pointer->pointee association
-    /// when we ptrcast, and we have to ptrcast during translation
-    /// of a [T] const because we form a slice, a (*T,usize) pair, not
-    /// a pointer to an LLVM array type. Similar for trait objects.
-    const_unsized: RefCell<FnvHashMap<ValueRef, ValueRef>>,
-
-    /// Cache of emitted const globals (value -> global)
-    const_globals: RefCell<FnvHashMap<ValueRef, ValueRef>>,
-
-    /// Cache of emitted const values
-    const_values: RefCell<FnvHashMap<(ast::NodeId, &'tcx Substs<'tcx>), ValueRef>>,
-
-    /// Cache of external const values
-    extern_const_values: RefCell<DefIdMap<ValueRef>>,
-
-    impl_method_cache: RefCell<FnvHashMap<(DefId, ast::Name), DefId>>,
-
-    /// Cache of closure wrappers for bare fn's.
-    closure_bare_wrapper_cache: RefCell<FnvHashMap<ValueRef, ValueRef>>,
-
-    /// List of globals for static variables which need to be passed to the
-    /// LLVM function ReplaceAllUsesWith (RAUW) when translation is complete.
-    /// (We have to make sure we don't invalidate any ValueRefs referring
-    /// to constants.)
-    statics_to_rauw: RefCell<Vec<(ValueRef, ValueRef)>>,
-
-    lltypes: RefCell<FnvHashMap<Ty<'tcx>, Type>>,
-    llsizingtypes: RefCell<FnvHashMap<Ty<'tcx>, Type>>,
-    adt_reprs: RefCell<FnvHashMap<Ty<'tcx>, Rc<adt::Repr<'tcx>>>>,
-    type_hashcodes: RefCell<FnvHashMap<Ty<'tcx>, String>>,
-    int_type: Type,
-    opaque_vec_type: Type,
-    builder: BuilderRef_res,
-
-    /// Holds the LLVM values for closure IDs.
-    closure_vals: RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>>,
-
-    dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
-
-    eh_personality: RefCell<Option<ValueRef>>,
-    eh_unwind_resume: RefCell<Option<ValueRef>>,
-    rust_try_fn: RefCell<Option<ValueRef>>,
-
-    intrinsics: RefCell<FnvHashMap<&'static str, ValueRef>>,
-
-    /// Number of LLVM instructions translated into this `LocalCrateContext`.
-    /// This is used to perform some basic load-balancing to keep all LLVM
-    /// contexts around the same size.
-    n_llvm_insns: Cell<usize>,
-
-    /// Depth of the current type-of computation - used to bail out
-    type_of_depth: Cell<usize>,
-
-    trait_cache: RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>>,
-}
-
-// Implement DepTrackingMapConfig for `trait_cache`
-pub struct TraitSelectionCache<'tcx> {
-    data: PhantomData<&'tcx ()>
-}
-
-impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> {
-    type Key = ty::PolyTraitRef<'tcx>;
-    type Value = traits::Vtable<'tcx, ()>;
-    fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode {
-        ty::tls::with(|tcx| {
-            let lifted_key = tcx.lift(key).unwrap();
-            lifted_key.to_poly_trait_predicate().dep_node()
-        })
-    }
-}
-
-pub struct CrateContext<'a, 'tcx: 'a> {
-    shared: &'a SharedCrateContext<'a, 'tcx>,
-    local: &'a LocalCrateContext<'tcx>,
-    /// The index of `local` in `shared.local_ccxs`.  This is used in
-    /// `maybe_iter(true)` to identify the original `LocalCrateContext`.
-    index: usize,
-}
-
-pub struct CrateContextIterator<'a, 'tcx: 'a> {
-    shared: &'a SharedCrateContext<'a, 'tcx>,
-    index: usize,
-}
-
-impl<'a, 'tcx> Iterator for CrateContextIterator<'a,'tcx> {
-    type Item = CrateContext<'a, 'tcx>;
-
-    fn next(&mut self) -> Option<CrateContext<'a, 'tcx>> {
-        if self.index >= self.shared.local_ccxs.len() {
-            return None;
-        }
-
-        let index = self.index;
-        self.index += 1;
-
-        Some(CrateContext {
-            shared: self.shared,
-            local: &self.shared.local_ccxs[index],
-            index: index,
-        })
-    }
-}
-
-/// The iterator produced by `CrateContext::maybe_iter`.
-pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> {
-    shared: &'a SharedCrateContext<'a, 'tcx>,
-    index: usize,
-    single: bool,
-    origin: usize,
-}
-
-impl<'a, 'tcx> Iterator for CrateContextMaybeIterator<'a, 'tcx> {
-    type Item = (CrateContext<'a, 'tcx>, bool);
-
-    fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> {
-        if self.index >= self.shared.local_ccxs.len() {
-            return None;
-        }
-
-        let index = self.index;
-        self.index += 1;
-        if self.single {
-            self.index = self.shared.local_ccxs.len();
-        }
-
-        let ccx = CrateContext {
-            shared: self.shared,
-            local: &self.shared.local_ccxs[index],
-            index: index,
-        };
-        Some((ccx, index == self.origin))
-    }
-}
-
-unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) {
-    let llcx = llvm::LLVMContextCreate();
-    let mod_name = CString::new(mod_name).unwrap();
-    let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
-
-    if let Some(ref custom_data_layout) = sess.target.target.options.data_layout {
-        let data_layout = CString::new(&custom_data_layout[..]).unwrap();
-        llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
-    } else {
-        let tm = ::back::write::create_target_machine(sess);
-        llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
-        llvm::LLVMRustDisposeTargetMachine(tm);
-    }
-
-    let llvm_target = sess.target.target.llvm_target.as_bytes();
-    let llvm_target = CString::new(llvm_target).unwrap();
-    llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
-    (llcx, llmod)
-}
-
-impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
-    pub fn new(crate_name: &str,
-               local_count: usize,
-               tcx: &'b ty::ctxt<'tcx>,
-               mir_map: &'b MirMap<'tcx>,
-               export_map: ExportMap,
-               symbol_hasher: Sha256,
-               link_meta: LinkMeta,
-               reachable: NodeSet,
-               check_overflow: bool,
-               check_drop_flag_for_sanity: bool)
-               -> SharedCrateContext<'b, 'tcx> {
-        let (metadata_llcx, metadata_llmod) = unsafe {
-            create_context_and_module(&tcx.sess, "metadata")
-        };
-
-        // An interesting part of Windows which MSVC forces our hand on (and
-        // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
-        // attributes in LLVM IR as well as native dependencies (in C these
-        // correspond to `__declspec(dllimport)`).
-        //
-        // Whenever a dynamic library is built by MSVC it must have its public
-        // interface specified by functions tagged with `dllexport` or otherwise
-        // they're not available to be linked against. This poses a few problems
-        // for the compiler, some of which are somewhat fundamental, but we use
-        // the `use_dll_storage_attrs` variable below to attach the `dllexport`
-        // attribute to all LLVM functions that are reachable (e.g. they're
-        // already tagged with external linkage). This is suboptimal for a few
-        // reasons:
-        //
-        // * If an object file will never be included in a dynamic library,
-        //   there's no need to attach the dllexport attribute. Most object
-        //   files in Rust are not destined to become part of a dll as binaries
-        //   are statically linked by default.
-        // * If the compiler is emitting both an rlib and a dylib, the same
-        //   source object file is currently used but with MSVC this may be less
-        //   feasible. The compiler may be able to get around this, but it may
-        //   involve some invasive changes to deal with this.
-        //
-        // The flipside of this situation is that whenever you link to a dll and
-        // you import a function from it, the import should be tagged with
-        // `dllimport`. At this time, however, the compiler does not emit
-        // `dllimport` for any declarations other than constants (where it is
-        // required), which is again suboptimal for even more reasons!
-        //
-        // * Calling a function imported from another dll without using
-        //   `dllimport` causes the linker/compiler to have extra overhead (one
-        //   `jmp` instruction on x86) when calling the function.
-        // * The same object file may be used in different circumstances, so a
-        //   function may be imported from a dll if the object is linked into a
-        //   dll, but it may be just linked against if linked into an rlib.
-        // * The compiler has no knowledge about whether native functions should
-        //   be tagged dllimport or not.
-        //
-        // For now the compiler takes the perf hit (I do not have any numbers to
-        // this effect) by marking very little as `dllimport` and praying the
-        // linker will take care of everything. Fixing this problem will likely
-        // require adding a few attributes to Rust itself (feature gated at the
-        // start) and then strongly recommending static linkage on MSVC!
-        let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc;
-
-        let mut shared_ccx = SharedCrateContext {
-            local_ccxs: Vec::with_capacity(local_count),
-            metadata_llmod: metadata_llmod,
-            metadata_llcx: metadata_llcx,
-            export_map: export_map,
-            reachable: reachable,
-            item_symbols: RefCell::new(NodeMap()),
-            link_meta: link_meta,
-            symbol_hasher: RefCell::new(symbol_hasher),
-            tcx: tcx,
-            mir_map: mir_map,
-            stats: Stats {
-                n_glues_created: Cell::new(0),
-                n_null_glues: Cell::new(0),
-                n_real_glues: Cell::new(0),
-                n_fns: Cell::new(0),
-                n_monos: Cell::new(0),
-                n_inlines: Cell::new(0),
-                n_closures: Cell::new(0),
-                n_llvm_insns: Cell::new(0),
-                llvm_insns: RefCell::new(FnvHashMap()),
-                fn_stats: RefCell::new(Vec::new()),
-            },
-            check_overflow: check_overflow,
-            check_drop_flag_for_sanity: check_drop_flag_for_sanity,
-            available_drop_glues: RefCell::new(FnvHashMap()),
-            use_dll_storage_attrs: use_dll_storage_attrs,
-            translation_items: RefCell::new(FnvHashMap()),
-        };
-
-        for i in 0..local_count {
-            // Append ".rs" to crate name as LLVM module identifier.
-            //
-            // LLVM code generator emits a ".file filename" directive
-            // for ELF backends. Value of the "filename" is set as the
-            // LLVM module identifier.  Due to a LLVM MC bug[1], LLVM
-            // crashes if the module identifier is same as other symbols
-            // such as a function name in the module.
-            // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
-            let llmod_id = format!("{}.{}.rs", crate_name, i);
-            let local_ccx = LocalCrateContext::new(&shared_ccx, &llmod_id[..]);
-            shared_ccx.local_ccxs.push(local_ccx);
-        }
-
-        shared_ccx
-    }
-
-    pub fn iter<'a>(&'a self) -> CrateContextIterator<'a, 'tcx> {
-        CrateContextIterator {
-            shared: self,
-            index: 0,
-        }
-    }
-
-    pub fn get_ccx<'a>(&'a self, index: usize) -> CrateContext<'a, 'tcx> {
-        CrateContext {
-            shared: self,
-            local: &self.local_ccxs[index],
-            index: index,
-        }
-    }
-
-    fn get_smallest_ccx<'a>(&'a self) -> CrateContext<'a, 'tcx> {
-        let (local_ccx, index) =
-            self.local_ccxs
-                .iter()
-                .zip(0..self.local_ccxs.len())
-                .min_by_key(|&(local_ccx, _idx)| local_ccx.n_llvm_insns.get())
-                .unwrap();
-        CrateContext {
-            shared: self,
-            local: local_ccx,
-            index: index,
-        }
-    }
-
-
-    pub fn metadata_llmod(&self) -> ModuleRef {
-        self.metadata_llmod
-    }
-
-    pub fn metadata_llcx(&self) -> ContextRef {
-        self.metadata_llcx
-    }
-
-    pub fn export_map<'a>(&'a self) -> &'a ExportMap {
-        &self.export_map
-    }
-
-    pub fn reachable<'a>(&'a self) -> &'a NodeSet {
-        &self.reachable
-    }
-
-    pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
-        &self.item_symbols
-    }
-
-    pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
-        &self.link_meta
-    }
-
-    pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
-        self.tcx
-    }
-
-    pub fn sess<'a>(&'a self) -> &'a Session {
-        &self.tcx.sess
-    }
-
-    pub fn stats<'a>(&'a self) -> &'a Stats {
-        &self.stats
-    }
-
-    pub fn use_dll_storage_attrs(&self) -> bool {
-        self.use_dll_storage_attrs
-    }
-}
-
-impl<'tcx> LocalCrateContext<'tcx> {
-    fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>,
-           name: &str)
-           -> LocalCrateContext<'tcx> {
-        unsafe {
-            let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, name);
-
-            let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo {
-                Some(debuginfo::CrateDebugContext::new(llmod))
-            } else {
-                None
-            };
-
-            let mut local_ccx = LocalCrateContext {
-                llmod: llmod,
-                llcx: llcx,
-                tn: TypeNames::new(),
-                externs: RefCell::new(FnvHashMap()),
-                item_vals: RefCell::new(NodeMap()),
-                needs_unwind_cleanup_cache: RefCell::new(FnvHashMap()),
-                fn_pointer_shims: RefCell::new(FnvHashMap()),
-                drop_glues: RefCell::new(FnvHashMap()),
-                external: RefCell::new(DefIdMap()),
-                external_srcs: RefCell::new(NodeMap()),
-                monomorphized: RefCell::new(FnvHashMap()),
-                monomorphizing: RefCell::new(DefIdMap()),
-                available_monomorphizations: RefCell::new(FnvHashSet()),
-                vtables: RefCell::new(FnvHashMap()),
-                const_cstr_cache: RefCell::new(FnvHashMap()),
-                const_unsized: RefCell::new(FnvHashMap()),
-                const_globals: RefCell::new(FnvHashMap()),
-                const_values: RefCell::new(FnvHashMap()),
-                extern_const_values: RefCell::new(DefIdMap()),
-                impl_method_cache: RefCell::new(FnvHashMap()),
-                closure_bare_wrapper_cache: RefCell::new(FnvHashMap()),
-                statics_to_rauw: RefCell::new(Vec::new()),
-                lltypes: RefCell::new(FnvHashMap()),
-                llsizingtypes: RefCell::new(FnvHashMap()),
-                adt_reprs: RefCell::new(FnvHashMap()),
-                type_hashcodes: RefCell::new(FnvHashMap()),
-                int_type: Type::from_ref(ptr::null_mut()),
-                opaque_vec_type: Type::from_ref(ptr::null_mut()),
-                builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)),
-                closure_vals: RefCell::new(FnvHashMap()),
-                dbg_cx: dbg_cx,
-                eh_personality: RefCell::new(None),
-                eh_unwind_resume: RefCell::new(None),
-                rust_try_fn: RefCell::new(None),
-                intrinsics: RefCell::new(FnvHashMap()),
-                n_llvm_insns: Cell::new(0),
-                type_of_depth: Cell::new(0),
-                trait_cache: RefCell::new(DepTrackingMap::new(shared.tcx
-                                                                    .dep_graph
-                                                                    .clone())),
-            };
-
-            local_ccx.int_type = Type::int(&local_ccx.dummy_ccx(shared));
-            local_ccx.opaque_vec_type = Type::opaque_vec(&local_ccx.dummy_ccx(shared));
-
-            // Done mutating local_ccx directly.  (The rest of the
-            // initialization goes through RefCell.)
-            {
-                let ccx = local_ccx.dummy_ccx(shared);
-
-                let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
-                str_slice_ty.set_struct_body(&[Type::i8p(&ccx), ccx.int_type()], false);
-                ccx.tn().associate_type("str_slice", &str_slice_ty);
-
-                if ccx.sess().count_llvm_insns() {
-                    base::init_insn_ctxt()
-                }
-            }
-
-            local_ccx
-        }
-    }
-
-    /// Create a dummy `CrateContext` from `self` and  the provided
-    /// `SharedCrateContext`.  This is somewhat dangerous because `self` may
-    /// not actually be an element of `shared.local_ccxs`, which can cause some
-    /// operations to panic unexpectedly.
-    ///
-    /// This is used in the `LocalCrateContext` constructor to allow calling
-    /// functions that expect a complete `CrateContext`, even before the local
-    /// portion is fully initialized and attached to the `SharedCrateContext`.
-    fn dummy_ccx<'a>(&'a self, shared: &'a SharedCrateContext<'a, 'tcx>)
-                     -> CrateContext<'a, 'tcx> {
-        CrateContext {
-            shared: shared,
-            local: self,
-            index: !0 as usize,
-        }
-    }
-}
-
-impl<'b, 'tcx> CrateContext<'b, 'tcx> {
-    pub fn shared(&self) -> &'b SharedCrateContext<'b, 'tcx> {
-        self.shared
-    }
-
-    pub fn local(&self) -> &'b LocalCrateContext<'tcx> {
-        self.local
-    }
-
-
-    /// Get a (possibly) different `CrateContext` from the same
-    /// `SharedCrateContext`.
-    pub fn rotate(&self) -> CrateContext<'b, 'tcx> {
-        self.shared.get_smallest_ccx()
-    }
-
-    /// Either iterate over only `self`, or iterate over all `CrateContext`s in
-    /// the `SharedCrateContext`.  The iterator produces `(ccx, is_origin)`
-    /// pairs, where `is_origin` is `true` if `ccx` is `self` and `false`
-    /// otherwise.  This method is useful for avoiding code duplication in
-    /// cases where it may or may not be necessary to translate code into every
-    /// context.
-    pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx> {
-        CrateContextMaybeIterator {
-            shared: self.shared,
-            index: if iter_all { 0 } else { self.index },
-            single: !iter_all,
-            origin: self.index,
-        }
-    }
-
-
-    pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
-        self.shared.tcx
-    }
-
-    pub fn sess<'a>(&'a self) -> &'a Session {
-        &self.shared.tcx.sess
-    }
-
-    pub fn builder<'a>(&'a self) -> Builder<'a, 'tcx> {
-        Builder::new(self)
-    }
-
-    pub fn raw_builder<'a>(&'a self) -> BuilderRef {
-        self.local.builder.b
-    }
-
-    pub fn get_intrinsic(&self, key: &str) -> ValueRef {
-        if let Some(v) = self.intrinsics().borrow().get(key).cloned() {
-            return v;
-        }
-        match declare_intrinsic(self, key) {
-            Some(v) => return v,
-            None => panic!("unknown intrinsic '{}'", key)
-        }
-    }
-
-    pub fn llmod(&self) -> ModuleRef {
-        self.local.llmod
-    }
-
-    pub fn llcx(&self) -> ContextRef {
-        self.local.llcx
-    }
-
-    pub fn td(&self) -> llvm::TargetDataRef {
-        unsafe { llvm::LLVMRustGetModuleDataLayout(self.llmod()) }
-    }
-
-    pub fn tn<'a>(&'a self) -> &'a TypeNames {
-        &self.local.tn
-    }
-
-    pub fn externs<'a>(&'a self) -> &'a RefCell<ExternMap> {
-        &self.local.externs
-    }
-
-    pub fn item_vals<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
-        &self.local.item_vals
-    }
-
-    pub fn export_map<'a>(&'a self) -> &'a ExportMap {
-        &self.shared.export_map
-    }
-
-    pub fn reachable<'a>(&'a self) -> &'a NodeSet {
-        &self.shared.reachable
-    }
-
-    pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
-        &self.shared.item_symbols
-    }
-
-    pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
-        &self.shared.link_meta
-    }
-
-    pub fn needs_unwind_cleanup_cache(&self) -> &RefCell<FnvHashMap<Ty<'tcx>, bool>> {
-        &self.local.needs_unwind_cleanup_cache
-    }
-
-    pub fn fn_pointer_shims(&self) -> &RefCell<FnvHashMap<Ty<'tcx>, ValueRef>> {
-        &self.local.fn_pointer_shims
-    }
-
-    pub fn drop_glues<'a>(&'a self) -> &'a RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>> {
-        &self.local.drop_glues
-    }
-
-    pub fn external<'a>(&'a self) -> &'a RefCell<DefIdMap<Option<ast::NodeId>>> {
-        &self.local.external
-    }
-
-    pub fn external_srcs<'a>(&'a self) -> &'a RefCell<NodeMap<DefId>> {
-        &self.local.external_srcs
-    }
-
-    pub fn monomorphized<'a>(&'a self) -> &'a RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>> {
-        &self.local.monomorphized
-    }
-
-    pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<usize>> {
-        &self.local.monomorphizing
-    }
-
-    pub fn vtables<'a>(&'a self) -> &'a RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, ValueRef>> {
-        &self.local.vtables
-    }
-
-    pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell<FnvHashMap<InternedString, ValueRef>> {
-        &self.local.const_cstr_cache
-    }
-
-    pub fn const_unsized<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
-        &self.local.const_unsized
-    }
-
-    pub fn const_globals<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
-        &self.local.const_globals
-    }
-
-    pub fn const_values<'a>(&'a self) -> &'a RefCell<FnvHashMap<(ast::NodeId, &'tcx Substs<'tcx>),
-                                                                ValueRef>> {
-        &self.local.const_values
-    }
-
-    pub fn extern_const_values<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
-        &self.local.extern_const_values
-    }
-
-    pub fn impl_method_cache<'a>(&'a self)
-            -> &'a RefCell<FnvHashMap<(DefId, ast::Name), DefId>> {
-        &self.local.impl_method_cache
-    }
-
-    pub fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
-        &self.local.closure_bare_wrapper_cache
-    }
-
-    pub fn statics_to_rauw<'a>(&'a self) -> &'a RefCell<Vec<(ValueRef, ValueRef)>> {
-        &self.local.statics_to_rauw
-    }
-
-    pub fn lltypes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Type>> {
-        &self.local.lltypes
-    }
-
-    pub fn llsizingtypes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Type>> {
-        &self.local.llsizingtypes
-    }
-
-    pub fn adt_reprs<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Rc<adt::Repr<'tcx>>>> {
-        &self.local.adt_reprs
-    }
-
-    pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
-        &self.shared.symbol_hasher
-    }
-
-    pub fn type_hashcodes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, String>> {
-        &self.local.type_hashcodes
-    }
-
-    pub fn stats<'a>(&'a self) -> &'a Stats {
-        &self.shared.stats
-    }
-
-    pub fn available_monomorphizations<'a>(&'a self) -> &'a RefCell<FnvHashSet<String>> {
-        &self.local.available_monomorphizations
-    }
-
-    pub fn available_drop_glues(&self) -> &RefCell<FnvHashMap<DropGlueKind<'tcx>, String>> {
-        &self.shared.available_drop_glues
-    }
-
-    pub fn int_type(&self) -> Type {
-        self.local.int_type
-    }
-
-    pub fn opaque_vec_type(&self) -> Type {
-        self.local.opaque_vec_type
-    }
-
-    pub fn closure_vals<'a>(&'a self) -> &'a RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>> {
-        &self.local.closure_vals
-    }
-
-    pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext<'tcx>> {
-        &self.local.dbg_cx
-    }
-
-    pub fn eh_personality<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> {
-        &self.local.eh_personality
-    }
-
-    pub fn eh_unwind_resume<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> {
-        &self.local.eh_unwind_resume
-    }
-
-    pub fn rust_try_fn<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> {
-        &self.local.rust_try_fn
-    }
-
-    fn intrinsics<'a>(&'a self) -> &'a RefCell<FnvHashMap<&'static str, ValueRef>> {
-        &self.local.intrinsics
-    }
-
-    pub fn count_llvm_insn(&self) {
-        self.local.n_llvm_insns.set(self.local.n_llvm_insns.get() + 1);
-    }
-
-    pub fn trait_cache(&self) -> &RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>> {
-        &self.local.trait_cache
-    }
-
-    /// Return exclusive upper bound on object size.
-    ///
-    /// The theoretical maximum object size is defined as the maximum positive `int` value. This
-    /// ensures that the `offset` semantics remain well-defined by allowing it to correctly index
-    /// every address within an object along with one byte past the end, along with allowing `int`
-    /// to store the difference between any two pointers into an object.
-    ///
-    /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer to
-    /// represent object size in bits. It would need to be 1 << 61 to account for this, but is
-    /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
-    /// address space on 64-bit ARMv8 and x86_64.
-    pub fn obj_size_bound(&self) -> u64 {
-        match &self.sess().target.target.target_pointer_width[..] {
-            "32" => 1 << 31,
-            "64" => 1 << 47,
-            _ => unreachable!() // error handled by config::build_target_config
-        }
-    }
-
-    pub fn report_overbig_object(&self, obj: Ty<'tcx>) -> ! {
-        self.sess().fatal(
-            &format!("the type `{:?}` is too big for the current architecture",
-                    obj))
-    }
-
-    pub fn enter_type_of(&self, ty: Ty<'tcx>) -> TypeOfDepthLock<'b, 'tcx> {
-        let current_depth = self.local.type_of_depth.get();
-        debug!("enter_type_of({:?}) at depth {:?}", ty, current_depth);
-        if current_depth > self.sess().recursion_limit.get() {
-            self.sess().fatal(
-                &format!("overflow representing the type `{}`", ty))
-        }
-        self.local.type_of_depth.set(current_depth + 1);
-        TypeOfDepthLock(self.local)
-    }
-
-    pub fn check_overflow(&self) -> bool {
-        self.shared.check_overflow
-    }
-
-    pub fn check_drop_flag_for_sanity(&self) -> bool {
-        // This controls whether we emit a conditional llvm.debugtrap
-        // guarded on whether the dropflag is one of its (two) valid
-        // values.
-        self.shared.check_drop_flag_for_sanity
-    }
-
-    pub fn use_dll_storage_attrs(&self) -> bool {
-        self.shared.use_dll_storage_attrs()
-    }
-
-    pub fn mir_map(&self) -> &'b MirMap<'tcx> {
-        self.shared.mir_map
-    }
-
-    pub fn translation_items(&self) -> &RefCell<FnvHashMap<TransItem<'tcx>, TransItemState>> {
-        &self.shared.translation_items
-    }
-
-    pub fn record_translation_item_as_generated(&self, cgi: TransItem<'tcx>) {
-        if self.sess().opts.debugging_opts.print_trans_items.is_none() {
-            return;
-        }
-
-        let mut codegen_items = self.translation_items().borrow_mut();
-
-        if codegen_items.contains_key(&cgi) {
-            codegen_items.insert(cgi, TransItemState::PredictedAndGenerated);
-        } else {
-            codegen_items.insert(cgi, TransItemState::NotPredictedButGenerated);
-        }
-    }
-}
-
-pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>);
-
-impl<'a, 'tcx> Drop for TypeOfDepthLock<'a, 'tcx> {
-    fn drop(&mut self) {
-        self.0.type_of_depth.set(self.0.type_of_depth.get() - 1);
-    }
-}
-
-/// Declare any llvm intrinsics that you might need
-fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
-    macro_rules! ifn {
-        ($name:expr, fn() -> $ret:expr) => (
-            if key == $name {
-                let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret),
-                                             ccx.tcx().mk_nil());
-                llvm::SetUnnamedAddr(f, false);
-                ccx.intrinsics().borrow_mut().insert($name, f.clone());
-                return Some(f);
-            }
-        );
-        ($name:expr, fn(...) -> $ret:expr) => (
-            if key == $name {
-                let f = declare::declare_cfn(ccx, $name,
-                                             Type::variadic_func(&[], &$ret),
-                                             ccx.tcx().mk_nil());
-                llvm::SetUnnamedAddr(f, false);
-                ccx.intrinsics().borrow_mut().insert($name, f.clone());
-                return Some(f);
-            }
-        );
-        ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
-            if key == $name {
-                let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret),
-                                             ccx.tcx().mk_nil());
-                llvm::SetUnnamedAddr(f, false);
-                ccx.intrinsics().borrow_mut().insert($name, f.clone());
-                return Some(f);
-            }
-        );
-    }
-    macro_rules! mk_struct {
-        ($($field_ty:expr),*) => (Type::struct_(ccx, &[$($field_ty),*], false))
-    }
-
-    let i8p = Type::i8p(ccx);
-    let void = Type::void(ccx);
-    let i1 = Type::i1(ccx);
-    let t_i8 = Type::i8(ccx);
-    let t_i16 = Type::i16(ccx);
-    let t_i32 = Type::i32(ccx);
-    let t_i64 = Type::i64(ccx);
-    let t_f32 = Type::f32(ccx);
-    let t_f64 = Type::f64(ccx);
-
-    ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void);
-    ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void);
-    ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void);
-    ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void);
-    ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void);
-    ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void);
-    ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void);
-    ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void);
-    ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void);
-
-    ifn!("llvm.trap", fn() -> void);
-    ifn!("llvm.debugtrap", fn() -> void);
-    ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
-
-    ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
-    ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
-    ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
-    ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
-
-    ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
-
-    ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
-    ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
-
-    ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
-
-    ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
-
-    ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
-    ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
-    ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
-
-    ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
-    ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
-    ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
-
-    ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
-    ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
-    ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
-    ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
-
-    ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8);
-    ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
-    ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
-    ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
-
-    ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8);
-    ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
-    ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
-    ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
-
-    ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
-    ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
-    ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
-
-    ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
-    ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
-    ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
-    ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
-
-    ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
-    ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
-    ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
-    ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
-
-    ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
-    ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
-    ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
-    ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
-
-    ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
-    ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
-    ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
-    ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
-
-    ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
-    ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
-    ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
-    ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
-
-    ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
-    ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
-    ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
-    ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
-
-    ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void);
-    ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void);
-
-    ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
-    ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
-    ifn!("llvm.localescape", fn(...) -> void);
-    ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
-    ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
-
-    // Some intrinsics were introduced in later versions of LLVM, but they have
-    // fallbacks in libc or libm and such.
-    macro_rules! compatible_ifn {
-        ($name:expr, noop($cname:ident ($($arg:expr),*) -> void), $llvm_version:expr) => (
-            if unsafe { llvm::LLVMVersionMinor() >= $llvm_version } {
-                // The `if key == $name` is already in ifn!
-                ifn!($name, fn($($arg),*) -> void);
-            } else if key == $name {
-                let f = declare::declare_cfn(ccx, stringify!($cname),
-                                             Type::func(&[$($arg),*], &void),
-                                             ccx.tcx().mk_nil());
-                llvm::SetLinkage(f, llvm::InternalLinkage);
-
-                let bld = ccx.builder();
-                let llbb = unsafe {
-                    llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), f,
-                                                        "entry-block\0".as_ptr() as *const _)
-                };
-
-                bld.position_at_end(llbb);
-                bld.ret_void();
-
-                ccx.intrinsics().borrow_mut().insert($name, f.clone());
-                return Some(f);
-            }
-        );
-        ($name:expr, $cname:ident ($($arg:expr),*) -> $ret:expr, $llvm_version:expr) => (
-            if unsafe { llvm::LLVMVersionMinor() >= $llvm_version } {
-                // The `if key == $name` is already in ifn!
-                ifn!($name, fn($($arg),*) -> $ret);
-            } else if key == $name {
-                let f = declare::declare_cfn(ccx, stringify!($cname),
-                                             Type::func(&[$($arg),*], &$ret),
-                                             ccx.tcx().mk_nil());
-                ccx.intrinsics().borrow_mut().insert($name, f.clone());
-                return Some(f);
-            }
-        )
-    }
-
-    compatible_ifn!("llvm.assume", noop(llvmcompat_assume(i1) -> void), 6);
-
-    if ccx.sess().opts.debuginfo != NoDebugInfo {
-        ifn!("llvm.dbg.declare", fn(Type::metadata(ccx), Type::metadata(ccx)) -> void);
-        ifn!("llvm.dbg.value", fn(Type::metadata(ccx), t_i64, Type::metadata(ccx)) -> void);
-    }
-    return None;
-}
diff --git a/src/librustc_trans/trans/controlflow.rs b/src/librustc_trans/trans/controlflow.rs
deleted file mode 100644 (file)
index e84e1b4..0000000
+++ /dev/null
@@ -1,449 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::ValueRef;
-use middle::def::Def;
-use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem};
-use trans::base::*;
-use trans::basic_block::BasicBlock;
-use trans::build::*;
-use trans::callee;
-use trans::cleanup::CleanupMethods;
-use trans::cleanup;
-use trans::common::*;
-use trans::consts;
-use trans::debuginfo;
-use trans::debuginfo::{DebugLoc, ToDebugLoc};
-use trans::expr;
-use trans::machine;
-use trans;
-use middle::ty;
-
-use rustc_front::hir;
-use rustc_front::util as ast_util;
-
-use syntax::ast;
-use syntax::parse::token::InternedString;
-use syntax::parse::token;
-
-pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                              s: &hir::Stmt)
-                              -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_stmt");
-    let fcx = cx.fcx;
-    debug!("trans_stmt({:?})", s);
-
-    if cx.unreachable.get() {
-        return cx;
-    }
-
-    if cx.sess().asm_comments() {
-        add_span_comment(cx, s.span, &format!("{:?}", s));
-    }
-
-    let mut bcx = cx;
-
-    let id = ast_util::stmt_id(s);
-    let cleanup_debug_loc =
-        debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), id, s.span, false);
-    fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-
-    match s.node {
-        hir::StmtExpr(ref e, _) | hir::StmtSemi(ref e, _) => {
-            bcx = trans_stmt_semi(bcx, &e);
-        }
-        hir::StmtDecl(ref d, _) => {
-            match d.node {
-                hir::DeclLocal(ref local) => {
-                    bcx = init_local(bcx, &local);
-                    debuginfo::create_local_var_metadata(bcx, &local);
-                }
-                // Inner items are visited by `trans_item`/`trans_meth`.
-                hir::DeclItem(_) => {},
-            }
-        }
-    }
-
-    bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, ast_util::stmt_id(s));
-
-    return bcx;
-}
-
-pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &hir::Expr)
-                                   -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_stmt_semi");
-
-    if cx.unreachable.get() {
-        return cx;
-    }
-
-    let ty = expr_ty(cx, e);
-    if cx.fcx.type_needs_drop(ty) {
-        expr::trans_to_lvalue(cx, e, "stmt").bcx
-    } else {
-        expr::trans_into(cx, e, expr::Ignore)
-    }
-}
-
-pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                               b: &hir::Block,
-                               mut dest: expr::Dest)
-                               -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_block");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    let fcx = bcx.fcx;
-    let mut bcx = bcx;
-
-    let cleanup_debug_loc =
-        debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true);
-    fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-
-    for s in &b.stmts {
-        bcx = trans_stmt(bcx, s);
-    }
-
-    if dest != expr::Ignore {
-        let block_ty = node_id_type(bcx, b.id);
-
-        if b.expr.is_none() || type_is_zero_size(bcx.ccx(), block_ty) {
-            dest = expr::Ignore;
-        } else if b.expr.is_some() {
-            // If the block has an expression, but that expression isn't reachable,
-            // don't save into the destination given, ignore it.
-            if let Some(ref cfg) = bcx.fcx.cfg {
-                if !cfg.node_is_reachable(b.expr.as_ref().unwrap().id) {
-                    dest = expr::Ignore;
-                }
-            }
-        }
-    }
-
-    match b.expr {
-        Some(ref e) => {
-            if !bcx.unreachable.get() {
-                bcx = expr::trans_into(bcx, &e, dest);
-            }
-        }
-        None => {
-            assert!(dest == expr::Ignore || bcx.unreachable.get());
-        }
-    }
-
-    bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id);
-
-    return bcx;
-}
-
-pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                            if_id: ast::NodeId,
-                            cond: &hir::Expr,
-                            thn: &hir::Block,
-                            els: Option<&hir::Expr>,
-                            dest: expr::Dest)
-                            -> Block<'blk, 'tcx> {
-    debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={})",
-           bcx.to_str(), if_id, cond, thn.id,
-           dest.to_string(bcx.ccx()));
-    let _icx = push_ctxt("trans_if");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    let mut bcx = bcx;
-
-    let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool());
-
-    // Drop branches that are known to be impossible
-    if let Some(cv) = const_to_opt_uint(cond_val) {
-        if cv == 1 {
-            // if true { .. } [else { .. }]
-            bcx = trans_block(bcx, &thn, dest);
-            trans::debuginfo::clear_source_location(bcx.fcx);
-        } else {
-            if let Some(elexpr) = els {
-                bcx = expr::trans_into(bcx, &elexpr, dest);
-                trans::debuginfo::clear_source_location(bcx.fcx);
-            }
-        }
-
-        return bcx;
-    }
-
-    let name = format!("then-block-{}-", thn.id);
-    let then_bcx_in = bcx.fcx.new_id_block(&name[..], thn.id);
-    let then_bcx_out = trans_block(then_bcx_in, &thn, dest);
-    trans::debuginfo::clear_source_location(bcx.fcx);
-
-    let cond_source_loc = cond.debug_loc();
-
-    let next_bcx;
-    match els {
-        Some(elexpr) => {
-            let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id);
-            let else_bcx_out = expr::trans_into(else_bcx_in, &elexpr, dest);
-            next_bcx = bcx.fcx.join_blocks(if_id,
-                                           &[then_bcx_out, else_bcx_out]);
-            CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc);
-        }
-
-        None => {
-            next_bcx = bcx.fcx.new_id_block("next-block", if_id);
-            Br(then_bcx_out, next_bcx.llbb, DebugLoc::None);
-            CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc);
-        }
-    }
-
-    // Clear the source location because it is still set to whatever has been translated
-    // right before.
-    trans::debuginfo::clear_source_location(next_bcx.fcx);
-
-    next_bcx
-}
-
-pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                               loop_expr: &hir::Expr,
-                               cond: &hir::Expr,
-                               body: &hir::Block)
-                               -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_while");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    let fcx = bcx.fcx;
-
-    //            bcx
-    //             |
-    //         cond_bcx_in  <--------+
-    //             |                 |
-    //         cond_bcx_out          |
-    //           |      |            |
-    //           |    body_bcx_in    |
-    // cleanup_blk      |            |
-    //    |           body_bcx_out --+
-    // next_bcx_in
-
-    let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id);
-    let cond_bcx_in = fcx.new_id_block("while_cond", cond.id);
-    let body_bcx_in = fcx.new_id_block("while_body", body.id);
-
-    fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]);
-
-    Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc());
-
-    // compile the block where we will handle loop cleanups
-    let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK);
-
-    // compile the condition
-    let Result {bcx: cond_bcx_out, val: cond_val} =
-        expr::trans(cond_bcx_in, cond).to_llbool();
-
-    CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc());
-
-    // loop body:
-    let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
-    Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None);
-
-    fcx.pop_loop_cleanup_scope(loop_expr.id);
-    return next_bcx_in;
-}
-
-pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              loop_expr: &hir::Expr,
-                              body: &hir::Block)
-                              -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_loop");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    let fcx = bcx.fcx;
-
-    //            bcx
-    //             |
-    //         body_bcx_in
-    //             |
-    //         body_bcx_out
-    //
-    // next_bcx
-    //
-    // Links between body_bcx_in and next_bcx are created by
-    // break statements.
-
-    let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id);
-    let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id);
-
-    fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]);
-
-    Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc());
-    let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
-    Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None);
-
-    fcx.pop_loop_cleanup_scope(loop_expr.id);
-
-    // If there are no predecessors for the next block, we just translated an endless loop and the
-    // next block is unreachable
-    if BasicBlock(next_bcx_in.llbb).pred_iter().next().is_none() {
-        Unreachable(next_bcx_in);
-    }
-
-    return next_bcx_in;
-}
-
-pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                    expr: &hir::Expr,
-                                    opt_label: Option<ast::Name>,
-                                    exit: usize)
-                                    -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_break_cont");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    let fcx = bcx.fcx;
-
-    // Locate loop that we will break to
-    let loop_id = match opt_label {
-        None => fcx.top_loop_scope(),
-        Some(_) => {
-            match bcx.tcx().def_map.borrow().get(&expr.id).map(|d| d.full_def())  {
-                Some(Def::Label(loop_id)) => loop_id,
-                r => {
-                    bcx.tcx().sess.bug(&format!("{:?} in def-map for label", r))
-                }
-            }
-        }
-    };
-
-    // Generate appropriate cleanup code and branch
-    let cleanup_llbb = fcx.normal_exit_block(loop_id, exit);
-    Br(bcx, cleanup_llbb, expr.debug_loc());
-    Unreachable(bcx); // anything afterwards should be ignored
-    return bcx;
-}
-
-pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                               expr: &hir::Expr,
-                               label_opt: Option<ast::Name>)
-                               -> Block<'blk, 'tcx> {
-    return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK);
-}
-
-pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              expr: &hir::Expr,
-                              label_opt: Option<ast::Name>)
-                              -> Block<'blk, 'tcx> {
-    return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP);
-}
-
-pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                             return_expr: &hir::Expr,
-                             retval_expr: Option<&hir::Expr>)
-                             -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_ret");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    let fcx = bcx.fcx;
-    let mut bcx = bcx;
-    let dest = match (fcx.llretslotptr.get(), retval_expr) {
-        (Some(_), Some(retval_expr)) => {
-            let ret_ty = expr_ty_adjusted(bcx, &retval_expr);
-            expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(ret_ty), "ret_slot"))
-        }
-        _ => expr::Ignore,
-    };
-    if let Some(x) = retval_expr {
-        bcx = expr::trans_into(bcx, &x, dest);
-        match dest {
-            expr::SaveIn(slot) if fcx.needs_ret_allocas => {
-                Store(bcx, slot, fcx.llretslotptr.get().unwrap());
-            }
-            _ => {}
-        }
-    }
-    let cleanup_llbb = fcx.return_exit_block();
-    Br(bcx, cleanup_llbb, return_expr.debug_loc());
-    Unreachable(bcx);
-    return bcx;
-}
-
-pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              call_info: NodeIdAndSpan,
-                              fail_str: InternedString)
-                              -> Block<'blk, 'tcx> {
-    let ccx = bcx.ccx();
-    let _icx = push_ctxt("trans_fail_value");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    let v_str = C_str_slice(ccx, fail_str);
-    let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
-    let filename = token::intern_and_get_ident(&loc.file.name);
-    let filename = C_str_slice(ccx, filename);
-    let line = C_u32(ccx, loc.line as u32);
-    let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false);
-    let align = machine::llalign_of_min(ccx, val_ty(expr_file_line_const));
-    let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc");
-    let args = vec!(expr_file_line);
-    let did = langcall(bcx, Some(call_info.span), "", PanicFnLangItem);
-    let bcx = callee::trans_lang_call(bcx,
-                                      did,
-                                      &args[..],
-                                      Some(expr::Ignore),
-                                      call_info.debug_loc()).bcx;
-    Unreachable(bcx);
-    return bcx;
-}
-
-pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                           call_info: NodeIdAndSpan,
-                                           index: ValueRef,
-                                           len: ValueRef)
-                                           -> Block<'blk, 'tcx> {
-    let ccx = bcx.ccx();
-    let _icx = push_ctxt("trans_fail_bounds_check");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    // Extract the file/line from the span
-    let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
-    let filename = token::intern_and_get_ident(&loc.file.name);
-
-    // Invoke the lang item
-    let filename = C_str_slice(ccx,  filename);
-    let line = C_u32(ccx, loc.line as u32);
-    let file_line_const = C_struct(ccx, &[filename, line], false);
-    let align = machine::llalign_of_min(ccx, val_ty(file_line_const));
-    let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc");
-    let args = vec!(file_line, index, len);
-    let did = langcall(bcx, Some(call_info.span), "", PanicBoundsCheckFnLangItem);
-    let bcx = callee::trans_lang_call(bcx,
-                                      did,
-                                      &args[..],
-                                      Some(expr::Ignore),
-                                      call_info.debug_loc()).bcx;
-    Unreachable(bcx);
-    return bcx;
-}
diff --git a/src/librustc_trans/trans/datum.rs b/src/librustc_trans/trans/datum.rs
deleted file mode 100644 (file)
index 32f2637..0000000
+++ /dev/null
@@ -1,829 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! ## The Datum module
-//!
-//! A `Datum` encapsulates the result of evaluating a Rust expression.  It
-//! contains a `ValueRef` indicating the result, a `Ty` describing
-//! the Rust type, but also a *kind*. The kind indicates whether the datum
-//! has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of
-//! rvalues -- whether or not the value is "by ref" or "by value".
-//!
-//! The datum API is designed to try and help you avoid memory errors like
-//! forgetting to arrange cleanup or duplicating a value. The type of the
-//! datum incorporates the kind, and thus reflects whether it has cleanup
-//! scheduled:
-//!
-//! - `Datum<Lvalue>` -- by ref, cleanup scheduled
-//! - `Datum<Rvalue>` -- by value or by ref, no cleanup scheduled
-//! - `Datum<Expr>` -- either `Datum<Lvalue>` or `Datum<Rvalue>`
-//!
-//! Rvalue and expr datums are noncopyable, and most of the methods on
-//! datums consume the datum itself (with some notable exceptions). This
-//! reflects the fact that datums may represent affine values which ought
-//! to be consumed exactly once, and if you were to try to (for example)
-//! store an affine value multiple times, you would be duplicating it,
-//! which would certainly be a bug.
-//!
-//! Some of the datum methods, however, are designed to work only on
-//! copyable values such as ints or pointers. Those methods may borrow the
-//! datum (`&self`) rather than consume it, but they always include
-//! assertions on the type of the value represented to check that this
-//! makes sense. An example is `shallow_copy()`, which duplicates
-//! a datum value.
-//!
-//! Translating an expression always yields a `Datum<Expr>` result, but
-//! the methods `to_[lr]value_datum()` can be used to coerce a
-//! `Datum<Expr>` into a `Datum<Lvalue>` or `Datum<Rvalue>` as
-//! needed. Coercing to an lvalue is fairly common, and generally occurs
-//! whenever it is necessary to inspect a value and pull out its
-//! subcomponents (for example, a match, or indexing expression). Coercing
-//! to an rvalue is more unusual; it occurs when moving values from place
-//! to place, such as in an assignment expression or parameter passing.
-//!
-//! ### Lvalues in detail
-//!
-//! An lvalue datum is one for which cleanup has been scheduled. Lvalue
-//! datums are always located in memory, and thus the `ValueRef` for an
-//! LLVM value is always a pointer to the actual Rust value. This means
-//! that if the Datum has a Rust type of `int`, then the LLVM type of the
-//! `ValueRef` will be `int*` (pointer to int).
-//!
-//! Because lvalues already have cleanups scheduled, the memory must be
-//! zeroed to prevent the cleanup from taking place (presuming that the
-//! Rust type needs drop in the first place, otherwise it doesn't
-//! matter). The Datum code automatically performs this zeroing when the
-//! value is stored to a new location, for example.
-//!
-//! Lvalues usually result from evaluating lvalue expressions. For
-//! example, evaluating a local variable `x` yields an lvalue, as does a
-//! reference to a field like `x.f` or an index `x[i]`.
-//!
-//! Lvalue datums can also arise by *converting* an rvalue into an lvalue.
-//! This is done with the `to_lvalue_datum` method defined on
-//! `Datum<Expr>`. Basically this method just schedules cleanup if the
-//! datum is an rvalue, possibly storing the value into a stack slot first
-//! if needed. Converting rvalues into lvalues occurs in constructs like
-//! `&foo()` or `match foo() { ref x => ... }`, where the user is
-//! implicitly requesting a temporary.
-//!
-//! ### Rvalues in detail
-//!
-//! Rvalues datums are values with no cleanup scheduled. One must be
-//! careful with rvalue datums to ensure that cleanup is properly
-//! arranged, usually by converting to an lvalue datum or by invoking the
-//! `add_clean` method.
-//!
-//! ### Scratch datums
-//!
-//! Sometimes you need some temporary scratch space.  The functions
-//! `[lr]value_scratch_datum()` can be used to get temporary stack
-//! space. As their name suggests, they yield lvalues and rvalues
-//! respectively. That is, the slot from `lvalue_scratch_datum` will have
-//! cleanup arranged, and the slot from `rvalue_scratch_datum` does not.
-
-pub use self::Expr::*;
-pub use self::RvalueMode::*;
-
-use llvm::ValueRef;
-use trans::adt;
-use trans::base::*;
-use trans::build::{Load, Store};
-use trans::common::*;
-use trans::cleanup;
-use trans::cleanup::{CleanupMethods, DropHintDatum, DropHintMethods};
-use trans::expr;
-use trans::tvec;
-use middle::ty::Ty;
-
-use std::fmt;
-use syntax::ast;
-use syntax::codemap::DUMMY_SP;
-
-/// A `Datum` encapsulates the result of evaluating an expression.  It
-/// describes where the value is stored, what Rust type the value has,
-/// whether it is addressed by reference, and so forth. Please refer
-/// the section on datums in `README.md` for more details.
-#[derive(Clone, Copy, Debug)]
-pub struct Datum<'tcx, K> {
-    /// The llvm value.  This is either a pointer to the Rust value or
-    /// the value itself, depending on `kind` below.
-    pub val: ValueRef,
-
-    /// The rust type of the value.
-    pub ty: Ty<'tcx>,
-
-    /// Indicates whether this is by-ref or by-value.
-    pub kind: K,
-}
-
-pub struct DatumBlock<'blk, 'tcx: 'blk, K> {
-    pub bcx: Block<'blk, 'tcx>,
-    pub datum: Datum<'tcx, K>,
-}
-
-#[derive(Debug)]
-pub enum Expr {
-    /// a fresh value that was produced and which has no cleanup yet
-    /// because it has not yet "landed" into its permanent home
-    RvalueExpr(Rvalue),
-
-    /// `val` is a pointer into memory for which a cleanup is scheduled
-    /// (and thus has type *T). If you move out of an Lvalue, you must
-    /// zero out the memory (FIXME #5016).
-    LvalueExpr(Lvalue),
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum DropFlagInfo {
-    DontZeroJustUse(ast::NodeId),
-    ZeroAndMaintain(ast::NodeId),
-    None,
-}
-
-impl DropFlagInfo {
-    pub fn must_zero(&self) -> bool {
-        match *self {
-            DropFlagInfo::DontZeroJustUse(..) => false,
-            DropFlagInfo::ZeroAndMaintain(..) => true,
-            DropFlagInfo::None => true,
-        }
-    }
-
-    pub fn hint_datum<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
-                              -> Option<DropHintDatum<'tcx>> {
-        let id = match *self {
-            DropFlagInfo::None => return None,
-            DropFlagInfo::DontZeroJustUse(id) |
-            DropFlagInfo::ZeroAndMaintain(id) => id,
-        };
-
-        let hints = bcx.fcx.lldropflag_hints.borrow();
-        let retval = hints.hint_datum(id);
-        assert!(retval.is_some(), "An id (={}) means must have a hint", id);
-        retval
-    }
-}
-
-// FIXME: having Lvalue be `Copy` is a bit of a footgun, since clients
-// may not realize that subparts of an Lvalue can have a subset of
-// drop-flags associated with them, while this as written will just
-// memcpy the drop_flag_info. But, it is an easier way to get `_match`
-// off the ground to just let this be `Copy` for now.
-#[derive(Copy, Clone, Debug)]
-pub struct Lvalue {
-    pub source: &'static str,
-    pub drop_flag_info: DropFlagInfo
-}
-
-#[derive(Debug)]
-pub struct Rvalue {
-    pub mode: RvalueMode
-}
-
-/// Classifies what action we should take when a value is moved away
-/// with respect to its drop-flag.
-///
-/// Long term there will be no need for this classification: all flags
-/// (which will be stored on the stack frame) will have the same
-/// interpretation and maintenance code associated with them.
-#[derive(Copy, Clone, Debug)]
-pub enum HintKind {
-    /// When the value is moved, set the drop-flag to "dropped"
-    /// (i.e. "zero the flag", even when the specific representation
-    /// is not literally 0) and when it is reinitialized, set the
-    /// drop-flag back to "initialized".
-    ZeroAndMaintain,
-
-    /// When the value is moved, do not set the drop-flag to "dropped"
-    /// However, continue to read the drop-flag in deciding whether to
-    /// drop. (In essence, the path/fragment in question will never
-    /// need to be dropped at the points where it is moved away by
-    /// this code, but we are defending against the scenario where
-    /// some *other* code could move away (or drop) the value and thus
-    /// zero-the-flag, which is why we will still read from it.
-    DontZeroJustUse,
-}
-
-impl Lvalue { // Constructors for various Lvalues.
-    pub fn new<'blk, 'tcx>(source: &'static str) -> Lvalue {
-        debug!("Lvalue at {} no drop flag info", source);
-        Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
-    }
-
-    pub fn new_dropflag_hint(source: &'static str) -> Lvalue {
-        debug!("Lvalue at {} is drop flag hint", source);
-        Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
-    }
-
-    pub fn new_with_hint<'blk, 'tcx>(source: &'static str,
-                                     bcx: Block<'blk, 'tcx>,
-                                     id: ast::NodeId,
-                                     k: HintKind) -> Lvalue {
-        let (opt_id, info) = {
-            let hint_available = Lvalue::has_dropflag_hint(bcx, id) &&
-                bcx.tcx().sess.nonzeroing_move_hints();
-            let info = match k {
-                HintKind::ZeroAndMaintain if hint_available =>
-                    DropFlagInfo::ZeroAndMaintain(id),
-                HintKind::DontZeroJustUse if hint_available =>
-                    DropFlagInfo::DontZeroJustUse(id),
-                _ =>
-                    DropFlagInfo::None,
-            };
-            (Some(id), info)
-        };
-        debug!("Lvalue at {}, id: {:?} info: {:?}", source, opt_id, info);
-        Lvalue { source: source, drop_flag_info: info }
-    }
-} // end Lvalue constructor methods.
-
-impl Lvalue {
-    fn has_dropflag_hint<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                     id: ast::NodeId) -> bool {
-        let hints = bcx.fcx.lldropflag_hints.borrow();
-        hints.has_hint(id)
-    }
-    pub fn dropflag_hint<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
-                                 -> Option<DropHintDatum<'tcx>> {
-        self.drop_flag_info.hint_datum(bcx)
-    }
-}
-
-impl Rvalue {
-    pub fn new(m: RvalueMode) -> Rvalue {
-        Rvalue { mode: m }
-    }
-}
-
-// Make Datum linear for more type safety.
-impl Drop for Rvalue {
-    fn drop(&mut self) { }
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub enum RvalueMode {
-    /// `val` is a pointer to the actual value (and thus has type *T)
-    ByRef,
-
-    /// `val` is the actual value (*only used for immediates* like ints, ptrs)
-    ByValue,
-}
-
-pub fn immediate_rvalue<'tcx>(val: ValueRef, ty: Ty<'tcx>) -> Datum<'tcx, Rvalue> {
-    return Datum::new(val, ty, Rvalue::new(ByValue));
-}
-
-pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                        val: ValueRef,
-                                        ty: Ty<'tcx>)
-                                        -> DatumBlock<'blk, 'tcx, Rvalue> {
-    return DatumBlock::new(bcx, immediate_rvalue(val, ty))
-}
-
-/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
-/// it. The memory will be dropped upon exit from `scope`. The callback `populate` should
-/// initialize the memory.
-///
-/// The flag `zero` indicates how the temporary space itself should be
-/// initialized at the outset of the function; the only time that
-/// `InitAlloca::Uninit` is a valid value for `zero` is when the
-/// caller can prove that either (1.) the code injected by `populate`
-/// onto `bcx` always dominates the end of `scope`, or (2.) the data
-/// being allocated has no associated destructor.
-pub fn lvalue_scratch_datum<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>,
-                                              ty: Ty<'tcx>,
-                                              name: &str,
-                                              zero: InitAlloca,
-                                              scope: cleanup::ScopeId,
-                                              arg: A,
-                                              populate: F)
-                                              -> DatumBlock<'blk, 'tcx, Lvalue> where
-    F: FnOnce(A, Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
-{
-    // Very subtle: potentially initialize the scratch memory at point where it is alloca'ed.
-    // (See discussion at Issue 30530.)
-    let scratch = alloc_ty_init(bcx, ty, zero, name);
-    debug!("lvalue_scratch_datum scope={:?} scratch={} ty={:?}",
-           scope, bcx.ccx().tn().val_to_string(scratch), ty);
-
-    // Subtle. Populate the scratch memory *before* scheduling cleanup.
-    let bcx = populate(arg, bcx, scratch);
-    bcx.fcx.schedule_drop_mem(scope, scratch, ty, None);
-
-    DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum")))
-}
-
-/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
-/// it.  If `zero` is true, the space will be zeroed when it is allocated; this is normally not
-/// necessary, but in the case of automatic rooting in match statements it is possible to have
-/// temporaries that may not get initialized if a certain arm is not taken, so we must zero them.
-/// You must arrange any cleanups etc yourself!
-pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                        ty: Ty<'tcx>,
-                                        name: &str)
-                                        -> Datum<'tcx, Rvalue> {
-    let scratch = alloc_ty(bcx, ty, name);
-    call_lifetime_start(bcx, scratch);
-    Datum::new(scratch, ty, Rvalue::new(ByRef))
-}
-
-/// Indicates the "appropriate" mode for this value, which is either by ref or by value, depending
-/// on whether type is immediate or not.
-pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                         ty: Ty<'tcx>) -> RvalueMode {
-    if type_is_immediate(ccx, ty) {
-        ByValue
-    } else {
-        ByRef
-    }
-}
-
-fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode,
-                              fcx: &FunctionContext<'a, 'tcx>,
-                              scope: cleanup::ScopeId,
-                              val: ValueRef,
-                              ty: Ty<'tcx>) {
-    debug!("add_rvalue_clean scope={:?} val={} ty={:?}",
-           scope, fcx.ccx.tn().val_to_string(val), ty);
-    match mode {
-        ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
-        ByRef => {
-            fcx.schedule_lifetime_end(scope, val);
-            fcx.schedule_drop_mem(scope, val, ty, None);
-        }
-    }
-}
-
-pub trait KindOps {
-
-    /// Take appropriate action after the value in `datum` has been
-    /// stored to a new location.
-    fn post_store<'blk, 'tcx>(&self,
-                              bcx: Block<'blk, 'tcx>,
-                              val: ValueRef,
-                              ty: Ty<'tcx>)
-                              -> Block<'blk, 'tcx>;
-
-    /// True if this mode is a reference mode, meaning that the datum's
-    /// val field is a pointer to the actual value
-    fn is_by_ref(&self) -> bool;
-
-    /// Converts to an Expr kind
-    fn to_expr_kind(self) -> Expr;
-
-}
-
-impl KindOps for Rvalue {
-    fn post_store<'blk, 'tcx>(&self,
-                              bcx: Block<'blk, 'tcx>,
-                              _val: ValueRef,
-                              _ty: Ty<'tcx>)
-                              -> Block<'blk, 'tcx> {
-        // No cleanup is scheduled for an rvalue, so we don't have
-        // to do anything after a move to cancel or duplicate it.
-        if self.is_by_ref() {
-            call_lifetime_end(bcx, _val);
-        }
-        bcx
-    }
-
-    fn is_by_ref(&self) -> bool {
-        self.mode == ByRef
-    }
-
-    fn to_expr_kind(self) -> Expr {
-        RvalueExpr(self)
-    }
-}
-
-impl KindOps for Lvalue {
-    /// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel
-    /// cleanup. If an @T lvalue is copied, we must increment the reference count.
-    fn post_store<'blk, 'tcx>(&self,
-                              bcx: Block<'blk, 'tcx>,
-                              val: ValueRef,
-                              ty: Ty<'tcx>)
-                              -> Block<'blk, 'tcx> {
-        let _icx = push_ctxt("<Lvalue as KindOps>::post_store");
-        if bcx.fcx.type_needs_drop(ty) {
-            // cancel cleanup of affine values:
-            // 1. if it has drop-hint, mark as moved; then code
-            //    aware of drop-hint won't bother calling the
-            //    drop-glue itself.
-            if let Some(hint_datum) = self.drop_flag_info.hint_datum(bcx) {
-                let moved_hint_byte = adt::DTOR_MOVED_HINT;
-                let hint_llval = hint_datum.to_value().value();
-                Store(bcx, C_u8(bcx.fcx.ccx, moved_hint_byte), hint_llval);
-            }
-            // 2. if the drop info says its necessary, drop-fill the memory.
-            if self.drop_flag_info.must_zero() {
-                let () = drop_done_fill_mem(bcx, val, ty);
-            }
-            bcx
-        } else {
-            // FIXME (#5016) would be nice to assert this, but we have
-            // to allow for e.g. DontZeroJustUse flags, for now.
-            //
-            // (The dropflag hint construction should be taking
-            // !type_needs_drop into account; earlier analysis phases
-            // may not have all the info they need to include such
-            // information properly, I think; in particular the
-            // fragments analysis works on a non-monomorphized view of
-            // the code.)
-            //
-            // assert_eq!(self.drop_flag_info, DropFlagInfo::None);
-            bcx
-        }
-    }
-
-    fn is_by_ref(&self) -> bool {
-        true
-    }
-
-    fn to_expr_kind(self) -> Expr {
-        LvalueExpr(self)
-    }
-}
-
-impl KindOps for Expr {
-    fn post_store<'blk, 'tcx>(&self,
-                              bcx: Block<'blk, 'tcx>,
-                              val: ValueRef,
-                              ty: Ty<'tcx>)
-                              -> Block<'blk, 'tcx> {
-        match *self {
-            LvalueExpr(ref l) => l.post_store(bcx, val, ty),
-            RvalueExpr(ref r) => r.post_store(bcx, val, ty),
-        }
-    }
-
-    fn is_by_ref(&self) -> bool {
-        match *self {
-            LvalueExpr(ref l) => l.is_by_ref(),
-            RvalueExpr(ref r) => r.is_by_ref()
-        }
-    }
-
-    fn to_expr_kind(self) -> Expr {
-        self
-    }
-}
-
-impl<'tcx> Datum<'tcx, Rvalue> {
-    /// Schedules a cleanup for this datum in the given scope. That means that this datum is no
-    /// longer an rvalue datum; hence, this function consumes the datum and returns the contained
-    /// ValueRef.
-    pub fn add_clean<'a>(self,
-                         fcx: &FunctionContext<'a, 'tcx>,
-                         scope: cleanup::ScopeId)
-                         -> ValueRef {
-        add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty);
-        self.val
-    }
-
-    /// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not
-    /// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`.
-    pub fn to_lvalue_datum_in_scope<'blk>(self,
-                                          bcx: Block<'blk, 'tcx>,
-                                          name: &str,
-                                          scope: cleanup::ScopeId)
-                                          -> DatumBlock<'blk, 'tcx, Lvalue> {
-        let fcx = bcx.fcx;
-
-        match self.kind.mode {
-            ByRef => {
-                add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty);
-                DatumBlock::new(bcx, Datum::new(
-                    self.val,
-                    self.ty,
-                    Lvalue::new("datum::to_lvalue_datum_in_scope")))
-            }
-
-            ByValue => {
-                lvalue_scratch_datum(
-                    bcx, self.ty, name, InitAlloca::Dropped, scope, self,
-                    |this, bcx, llval| {
-                        debug!("populate call for Datum::to_lvalue_datum_in_scope \
-                                self.ty={:?}", this.ty);
-                        // do not call_lifetime_start here; the
-                        // `InitAlloc::Dropped` will start scratch
-                        // value's lifetime at open of function body.
-                        let bcx = this.store_to(bcx, llval);
-                        bcx.fcx.schedule_lifetime_end(scope, llval);
-                        bcx
-                    })
-            }
-        }
-    }
-
-    pub fn to_ref_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
-                              -> DatumBlock<'blk, 'tcx, Rvalue> {
-        let mut bcx = bcx;
-        match self.kind.mode {
-            ByRef => DatumBlock::new(bcx, self),
-            ByValue => {
-                let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref");
-                bcx = self.store_to(bcx, scratch.val);
-                DatumBlock::new(bcx, scratch)
-            }
-        }
-    }
-
-    pub fn to_appropriate_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
-                                      -> DatumBlock<'blk, 'tcx, Rvalue> {
-        match self.appropriate_rvalue_mode(bcx.ccx()) {
-            ByRef => {
-                self.to_ref_datum(bcx)
-            }
-            ByValue => {
-                match self.kind.mode {
-                    ByValue => DatumBlock::new(bcx, self),
-                    ByRef => {
-                        let llval = load_ty(bcx, self.val, self.ty);
-                        call_lifetime_end(bcx, self.val);
-                        DatumBlock::new(bcx, Datum::new(llval, self.ty, Rvalue::new(ByValue)))
-                    }
-                }
-            }
-        }
-    }
-}
-
-/// Methods suitable for "expr" datums that could be either lvalues or
-/// rvalues. These include coercions into lvalues/rvalues but also a number
-/// of more general operations. (Some of those operations could be moved to
-/// the more general `impl<K> Datum<K>`, but it's convenient to have them
-/// here since we can `match self.kind` rather than having to implement
-/// generic methods in `KindOps`.)
-impl<'tcx> Datum<'tcx, Expr> {
-    fn match_kind<R, F, G>(self, if_lvalue: F, if_rvalue: G) -> R where
-        F: FnOnce(Datum<'tcx, Lvalue>) -> R,
-        G: FnOnce(Datum<'tcx, Rvalue>) -> R,
-    {
-        let Datum { val, ty, kind } = self;
-        match kind {
-            LvalueExpr(l) => if_lvalue(Datum::new(val, ty, l)),
-            RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)),
-        }
-    }
-
-    /// Asserts that this datum *is* an lvalue and returns it.
-    #[allow(dead_code)] // potentially useful
-    pub fn assert_lvalue(self, bcx: Block) -> Datum<'tcx, Lvalue> {
-        self.match_kind(
-            |d| d,
-            |_| bcx.sess().bug("assert_lvalue given rvalue"))
-    }
-
-    pub fn store_to_dest<'blk>(self,
-                               bcx: Block<'blk, 'tcx>,
-                               dest: expr::Dest,
-                               expr_id: ast::NodeId)
-                               -> Block<'blk, 'tcx> {
-        match dest {
-            expr::Ignore => {
-                self.add_clean_if_rvalue(bcx, expr_id);
-                bcx
-            }
-            expr::SaveIn(addr) => {
-                self.store_to(bcx, addr)
-            }
-        }
-    }
-
-    /// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value
-    /// that may need drop.
-    pub fn add_clean_if_rvalue<'blk>(self,
-                                     bcx: Block<'blk, 'tcx>,
-                                     expr_id: ast::NodeId) {
-        self.match_kind(
-            |_| { /* Nothing to do, cleanup already arranged */ },
-            |r| {
-                let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
-                r.add_clean(bcx.fcx, scope);
-            })
-    }
-
-    pub fn to_lvalue_datum<'blk>(self,
-                                 bcx: Block<'blk, 'tcx>,
-                                 name: &str,
-                                 expr_id: ast::NodeId)
-                                 -> DatumBlock<'blk, 'tcx, Lvalue> {
-        debug!("to_lvalue_datum self: {}", self.to_string(bcx.ccx()));
-
-        self.match_kind(
-            |l| DatumBlock::new(bcx, l),
-            |r| {
-                let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
-                r.to_lvalue_datum_in_scope(bcx, name, scope)
-            })
-    }
-
-    /// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled).
-    pub fn to_rvalue_datum<'blk>(self,
-                                 bcx: Block<'blk, 'tcx>,
-                                 name: &'static str)
-                                 -> DatumBlock<'blk, 'tcx, Rvalue> {
-        self.match_kind(
-            |l| {
-                let mut bcx = bcx;
-                match l.appropriate_rvalue_mode(bcx.ccx()) {
-                    ByRef => {
-                        let scratch = rvalue_scratch_datum(bcx, l.ty, name);
-                        bcx = l.store_to(bcx, scratch.val);
-                        DatumBlock::new(bcx, scratch)
-                    }
-                    ByValue => {
-                        let v = load_ty(bcx, l.val, l.ty);
-                        bcx = l.kind.post_store(bcx, l.val, l.ty);
-                        DatumBlock::new(bcx, Datum::new(v, l.ty, Rvalue::new(ByValue)))
-                    }
-                }
-            },
-            |r| DatumBlock::new(bcx, r))
-    }
-
-}
-
-/// Methods suitable only for lvalues. These include the various
-/// operations to extract components out of compound data structures,
-/// such as extracting the field from a struct or a particular element
-/// from an array.
-impl<'tcx> Datum<'tcx, Lvalue> {
-    /// Converts a datum into a by-ref value. The datum type must be one which is always passed by
-    /// reference.
-    pub fn to_llref(self) -> ValueRef {
-        self.val
-    }
-
-    // Extracts a component of a compound data structure (e.g., a field from a
-    // struct). Note that if self is an opened, unsized type then the returned
-    // datum may also be unsized _without the size information_. It is the
-    // callers responsibility to package the result in some way to make a valid
-    // datum in that case (e.g., by making a fat pointer or opened pair).
-    pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>,
-                                gep: F)
-                                -> Datum<'tcx, Lvalue> where
-        F: FnOnce(adt::MaybeSizedValue) -> ValueRef,
-    {
-        let val = if type_is_sized(bcx.tcx(), self.ty) {
-            let val = adt::MaybeSizedValue::sized(self.val);
-            gep(val)
-        } else {
-            let val = adt::MaybeSizedValue::unsized_(
-                Load(bcx, expr::get_dataptr(bcx, self.val)),
-                Load(bcx, expr::get_meta(bcx, self.val)));
-            gep(val)
-        };
-        Datum {
-            val: val,
-            kind: Lvalue::new("Datum::get_element"),
-            ty: ty,
-        }
-    }
-
-    pub fn get_vec_base_and_len<'blk>(&self, bcx: Block<'blk, 'tcx>)
-                                      -> (ValueRef, ValueRef) {
-        //! Converts a vector into the slice pair.
-
-        tvec::get_base_and_len(bcx, self.val, self.ty)
-    }
-}
-
-/// Generic methods applicable to any sort of datum.
-impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> {
-    pub fn new(val: ValueRef, ty: Ty<'tcx>, kind: K) -> Datum<'tcx, K> {
-        Datum { val: val, ty: ty, kind: kind }
-    }
-
-    pub fn to_expr_datum(self) -> Datum<'tcx, Expr> {
-        let Datum { val, ty, kind } = self;
-        Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
-    }
-
-    /// Moves or copies this value into a new home, as appropriate depending on the type of the
-    /// datum. This method consumes the datum, since it would be incorrect to go on using the datum
-    /// if the value represented is affine (and hence the value is moved).
-    pub fn store_to<'blk>(self,
-                          bcx: Block<'blk, 'tcx>,
-                          dst: ValueRef)
-                          -> Block<'blk, 'tcx> {
-        self.shallow_copy_raw(bcx, dst);
-
-        self.kind.post_store(bcx, self.val, self.ty)
-    }
-
-    /// Helper function that performs a shallow copy of this value into `dst`, which should be a
-    /// pointer to a memory location suitable for `self.ty`. `dst` should contain uninitialized
-    /// memory (either newly allocated, zeroed, or dropped).
-    ///
-    /// This function is private to datums because it leaves memory in an unstable state, where the
-    /// source value has been copied but not zeroed. Public methods are `store_to` (if you no
-    /// longer need the source value) or `shallow_copy` (if you wish the source value to remain
-    /// valid).
-    fn shallow_copy_raw<'blk>(&self,
-                              bcx: Block<'blk, 'tcx>,
-                              dst: ValueRef)
-                              -> Block<'blk, 'tcx> {
-        let _icx = push_ctxt("copy_to_no_check");
-
-        if type_is_zero_size(bcx.ccx(), self.ty) {
-            return bcx;
-        }
-
-        if self.kind.is_by_ref() {
-            memcpy_ty(bcx, dst, self.val, self.ty);
-        } else {
-            store_ty(bcx, self.val, dst, self.ty);
-        }
-
-        return bcx;
-    }
-
-    /// Copies the value into a new location. This function always preserves the existing datum as
-    /// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine
-    /// values (since they must never be duplicated).
-    pub fn shallow_copy<'blk>(&self,
-                              bcx: Block<'blk, 'tcx>,
-                              dst: ValueRef)
-                              -> Block<'blk, 'tcx> {
-        /*!
-         * Copies the value into a new location. This function always
-         * preserves the existing datum as a valid value. Therefore,
-         * it does not consume `self` and, also, cannot be applied to
-         * affine values (since they must never be duplicated).
-         */
-
-        assert!(!self.ty
-                     .moves_by_default(&bcx.tcx().empty_parameter_environment(), DUMMY_SP));
-        self.shallow_copy_raw(bcx, dst)
-    }
-
-    #[allow(dead_code)] // useful for debugging
-    pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
-        format!("Datum({}, {:?}, {:?})",
-                ccx.tn().val_to_string(self.val),
-                self.ty,
-                self.kind)
-    }
-
-    /// See the `appropriate_rvalue_mode()` function
-    pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>)
-                                       -> RvalueMode {
-        appropriate_rvalue_mode(ccx, self.ty)
-    }
-
-    /// Converts `self` into a by-value `ValueRef`. Consumes this datum (i.e., absolves you of
-    /// responsibility to cleanup the value). For this to work, the value must be something
-    /// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is
-    /// naturally passed around by value, and not by reference.
-    pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
-        assert!(!bcx.fcx.type_needs_drop(self.ty));
-        assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue);
-        if self.kind.is_by_ref() {
-            load_ty(bcx, self.val, self.ty)
-        } else {
-            self.val
-        }
-    }
-
-    pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
-        assert!(self.ty.is_bool());
-        self.to_llscalarish(bcx)
-    }
-}
-
-impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> {
-    pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, K>)
-               -> DatumBlock<'blk, 'tcx, K> {
-        DatumBlock { bcx: bcx, datum: datum }
-    }
-}
-
-impl<'blk, 'tcx, K: KindOps + fmt::Debug> DatumBlock<'blk, 'tcx, K> {
-    pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> {
-        DatumBlock::new(self.bcx, self.datum.to_expr_datum())
-    }
-}
-
-impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> {
-    pub fn store_to_dest(self,
-                         dest: expr::Dest,
-                         expr_id: ast::NodeId) -> Block<'blk, 'tcx> {
-        let DatumBlock { bcx, datum } = self;
-        datum.store_to_dest(bcx, dest, expr_id)
-    }
-
-    pub fn to_llbool(self) -> Result<'blk, 'tcx> {
-        let DatumBlock { datum, bcx } = self;
-        Result::new(bcx, datum.to_llbool(bcx))
-    }
-}
diff --git a/src/librustc_trans/trans/debuginfo/create_scope_map.rs b/src/librustc_trans/trans/debuginfo/create_scope_map.rs
deleted file mode 100644 (file)
index 73fdbd5..0000000
+++ /dev/null
@@ -1,489 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::metadata::file_metadata;
-use super::utils::DIB;
-
-use llvm;
-use llvm::debuginfo::{DIScope, DISubprogram};
-use trans::common::CrateContext;
-use middle::pat_util;
-use rustc::util::nodemap::NodeMap;
-
-use libc::c_uint;
-use syntax::codemap::{Span, Pos};
-use syntax::{ast, codemap};
-
-use rustc_front;
-use rustc_front::hir::{self, PatKind};
-
-// This procedure builds the *scope map* for a given function, which maps any
-// given ast::NodeId in the function's AST to the correct DIScope metadata instance.
-//
-// This builder procedure walks the AST in execution order and keeps track of
-// what belongs to which scope, creating DIScope DIEs along the way, and
-// introducing *artificial* lexical scope descriptors where necessary. These
-// artificial scopes allow GDB to correctly handle name shadowing.
-pub fn create_scope_map(cx: &CrateContext,
-                        args: &[hir::Arg],
-                        fn_entry_block: &hir::Block,
-                        fn_metadata: DISubprogram,
-                        fn_ast_id: ast::NodeId)
-                        -> NodeMap<DIScope> {
-    let mut scope_map = NodeMap();
-
-    let def_map = &cx.tcx().def_map;
-
-    let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None });
-    scope_map.insert(fn_ast_id, fn_metadata);
-
-    // Push argument identifiers onto the stack so arguments integrate nicely
-    // with variable shadowing.
-    for arg in args {
-        pat_util::pat_bindings_ident(def_map, &arg.pat, |_, node_id, _, path1| {
-            scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata,
-                                               name: Some(path1.node.unhygienic_name) });
-            scope_map.insert(node_id, fn_metadata);
-        })
-    }
-
-    // Clang creates a separate scope for function bodies, so let's do this too.
-    with_new_scope(cx,
-                   fn_entry_block.span,
-                   &mut scope_stack,
-                   &mut scope_map,
-                   |cx, scope_stack, scope_map| {
-        walk_block(cx, fn_entry_block, scope_stack, scope_map);
-    });
-
-    return scope_map;
-}
-
-// local helper functions for walking the AST.
-fn with_new_scope<F>(cx: &CrateContext,
-                     scope_span: Span,
-                     scope_stack: &mut Vec<ScopeStackEntry> ,
-                     scope_map: &mut NodeMap<DIScope>,
-                     inner_walk: F) where
-    F: FnOnce(&CrateContext, &mut Vec<ScopeStackEntry>, &mut NodeMap<DIScope>),
-{
-    // Create a new lexical scope and push it onto the stack
-    let loc = cx.sess().codemap().lookup_char_pos(scope_span.lo);
-    let file_metadata = file_metadata(cx, &loc.file.name);
-    let parent_scope = scope_stack.last().unwrap().scope_metadata;
-
-    let scope_metadata = unsafe {
-        llvm::LLVMDIBuilderCreateLexicalBlock(
-            DIB(cx),
-            parent_scope,
-            file_metadata,
-            loc.line as c_uint,
-            loc.col.to_usize() as c_uint)
-    };
-
-    scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None });
-
-    inner_walk(cx, scope_stack, scope_map);
-
-    // pop artificial scopes
-    while scope_stack.last().unwrap().name.is_some() {
-        scope_stack.pop();
-    }
-
-    if scope_stack.last().unwrap().scope_metadata != scope_metadata {
-        cx.sess().span_bug(scope_span, "debuginfo: Inconsistency in scope management.");
-    }
-
-    scope_stack.pop();
-}
-
-struct ScopeStackEntry {
-    scope_metadata: DIScope,
-    name: Option<ast::Name>
-}
-
-fn walk_block(cx: &CrateContext,
-              block: &hir::Block,
-              scope_stack: &mut Vec<ScopeStackEntry> ,
-              scope_map: &mut NodeMap<DIScope>) {
-    scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata);
-
-    // The interesting things here are statements and the concluding expression.
-    for statement in &block.stmts {
-        scope_map.insert(rustc_front::util::stmt_id(statement),
-                         scope_stack.last().unwrap().scope_metadata);
-
-        match statement.node {
-            hir::StmtDecl(ref decl, _) =>
-                walk_decl(cx, &decl, scope_stack, scope_map),
-            hir::StmtExpr(ref exp, _) |
-            hir::StmtSemi(ref exp, _) =>
-                walk_expr(cx, &exp, scope_stack, scope_map),
-        }
-    }
-
-    if let Some(ref exp) = block.expr {
-        walk_expr(cx, &exp, scope_stack, scope_map);
-    }
-}
-
-fn walk_decl(cx: &CrateContext,
-             decl: &hir::Decl,
-             scope_stack: &mut Vec<ScopeStackEntry> ,
-             scope_map: &mut NodeMap<DIScope>) {
-    match *decl {
-        codemap::Spanned { node: hir::DeclLocal(ref local), .. } => {
-            scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata);
-
-            walk_pattern(cx, &local.pat, scope_stack, scope_map);
-
-            if let Some(ref exp) = local.init {
-                walk_expr(cx, &exp, scope_stack, scope_map);
-            }
-        }
-        _ => ()
-    }
-}
-
-fn walk_pattern(cx: &CrateContext,
-                pat: &hir::Pat,
-                scope_stack: &mut Vec<ScopeStackEntry> ,
-                scope_map: &mut NodeMap<DIScope>) {
-
-    let def_map = &cx.tcx().def_map;
-
-    // Unfortunately, we cannot just use pat_util::pat_bindings() or
-    // ast_util::walk_pat() here because we have to visit *all* nodes in
-    // order to put them into the scope map. The above functions don't do that.
-    match pat.node {
-        PatKind::Ident(_, ref path1, ref sub_pat_opt) => {
-
-            // Check if this is a binding. If so we need to put it on the
-            // scope stack and maybe introduce an artificial scope
-            if pat_util::pat_is_binding(&def_map.borrow(), &pat) {
-
-                let name = path1.node.unhygienic_name;
-
-                // LLVM does not properly generate 'DW_AT_start_scope' fields
-                // for variable DIEs. For this reason we have to introduce
-                // an artificial scope at bindings whenever a variable with
-                // the same name is declared in *any* parent scope.
-                //
-                // Otherwise the following error occurs:
-                //
-                // let x = 10;
-                //
-                // do_something(); // 'gdb print x' correctly prints 10
-                //
-                // {
-                //     do_something(); // 'gdb print x' prints 0, because it
-                //                     // already reads the uninitialized 'x'
-                //                     // from the next line...
-                //     let x = 100;
-                //     do_something(); // 'gdb print x' correctly prints 100
-                // }
-
-                // Is there already a binding with that name?
-                // N.B.: this comparison must be UNhygienic... because
-                // gdb knows nothing about the context, so any two
-                // variables with the same name will cause the problem.
-                let need_new_scope = scope_stack
-                    .iter()
-                    .any(|entry| entry.name == Some(name));
-
-                if need_new_scope {
-                    // Create a new lexical scope and push it onto the stack
-                    let loc = cx.sess().codemap().lookup_char_pos(pat.span.lo);
-                    let file_metadata = file_metadata(cx, &loc.file.name);
-                    let parent_scope = scope_stack.last().unwrap().scope_metadata;
-
-                    let scope_metadata = unsafe {
-                        llvm::LLVMDIBuilderCreateLexicalBlock(
-                            DIB(cx),
-                            parent_scope,
-                            file_metadata,
-                            loc.line as c_uint,
-                            loc.col.to_usize() as c_uint)
-                    };
-
-                    scope_stack.push(ScopeStackEntry {
-                        scope_metadata: scope_metadata,
-                        name: Some(name)
-                    });
-
-                } else {
-                    // Push a new entry anyway so the name can be found
-                    let prev_metadata = scope_stack.last().unwrap().scope_metadata;
-                    scope_stack.push(ScopeStackEntry {
-                        scope_metadata: prev_metadata,
-                        name: Some(name)
-                    });
-                }
-            }
-
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
-            if let Some(ref sub_pat) = *sub_pat_opt {
-                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
-            }
-        }
-
-        PatKind::Wild => {
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-        }
-
-        PatKind::TupleStruct(_, ref sub_pats_opt) => {
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
-            if let Some(ref sub_pats) = *sub_pats_opt {
-                for p in sub_pats {
-                    walk_pattern(cx, &p, scope_stack, scope_map);
-                }
-            }
-        }
-
-        PatKind::Path(..) | PatKind::QPath(..) => {
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-        }
-
-        PatKind::Struct(_, ref field_pats, _) => {
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
-            for &codemap::Spanned {
-                node: hir::FieldPat { pat: ref sub_pat, .. },
-                ..
-            } in field_pats {
-                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
-            }
-        }
-
-        PatKind::Tup(ref sub_pats) => {
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
-            for sub_pat in sub_pats {
-                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
-            }
-        }
-
-        PatKind::Box(ref sub_pat) | PatKind::Ref(ref sub_pat, _) => {
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-            walk_pattern(cx, &sub_pat, scope_stack, scope_map);
-        }
-
-        PatKind::Lit(ref exp) => {
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-            walk_expr(cx, &exp, scope_stack, scope_map);
-        }
-
-        PatKind::Range(ref exp1, ref exp2) => {
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-            walk_expr(cx, &exp1, scope_stack, scope_map);
-            walk_expr(cx, &exp2, scope_stack, scope_map);
-        }
-
-        PatKind::Vec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
-            scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
-            for sub_pat in front_sub_pats {
-                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
-            }
-
-            if let Some(ref sub_pat) = *middle_sub_pats {
-                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
-            }
-
-            for sub_pat in back_sub_pats {
-                walk_pattern(cx, &sub_pat, scope_stack, scope_map);
-            }
-        }
-    }
-}
-
-fn walk_expr(cx: &CrateContext,
-             exp: &hir::Expr,
-             scope_stack: &mut Vec<ScopeStackEntry> ,
-             scope_map: &mut NodeMap<DIScope>) {
-
-    scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata);
-
-    match exp.node {
-        hir::ExprLit(_)   |
-        hir::ExprBreak(_) |
-        hir::ExprAgain(_) |
-        hir::ExprPath(..) => {}
-
-        hir::ExprCast(ref sub_exp, _)     |
-        hir::ExprType(ref sub_exp, _) |
-        hir::ExprAddrOf(_, ref sub_exp)  |
-        hir::ExprField(ref sub_exp, _) |
-        hir::ExprTupField(ref sub_exp, _) =>
-            walk_expr(cx, &sub_exp, scope_stack, scope_map),
-
-        hir::ExprBox(ref sub_expr) => {
-            walk_expr(cx, &sub_expr, scope_stack, scope_map);
-        }
-
-        hir::ExprRet(ref exp_opt) => match *exp_opt {
-            Some(ref sub_exp) => walk_expr(cx, &sub_exp, scope_stack, scope_map),
-            None => ()
-        },
-
-        hir::ExprUnary(_, ref sub_exp) => {
-            walk_expr(cx, &sub_exp, scope_stack, scope_map);
-        }
-
-        hir::ExprAssignOp(_, ref lhs, ref rhs) |
-        hir::ExprIndex(ref lhs, ref rhs) |
-        hir::ExprBinary(_, ref lhs, ref rhs)    => {
-            walk_expr(cx, &lhs, scope_stack, scope_map);
-            walk_expr(cx, &rhs, scope_stack, scope_map);
-        }
-
-        hir::ExprRange(ref start, ref end) => {
-            start.as_ref().map(|e| walk_expr(cx, &e, scope_stack, scope_map));
-            end.as_ref().map(|e| walk_expr(cx, &e, scope_stack, scope_map));
-        }
-
-        hir::ExprVec(ref init_expressions) |
-        hir::ExprTup(ref init_expressions) => {
-            for ie in init_expressions {
-                walk_expr(cx, &ie, scope_stack, scope_map);
-            }
-        }
-
-        hir::ExprAssign(ref sub_exp1, ref sub_exp2) |
-        hir::ExprRepeat(ref sub_exp1, ref sub_exp2) => {
-            walk_expr(cx, &sub_exp1, scope_stack, scope_map);
-            walk_expr(cx, &sub_exp2, scope_stack, scope_map);
-        }
-
-        hir::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => {
-            walk_expr(cx, &cond_exp, scope_stack, scope_map);
-
-            with_new_scope(cx,
-                           then_block.span,
-                           scope_stack,
-                           scope_map,
-                           |cx, scope_stack, scope_map| {
-                walk_block(cx, &then_block, scope_stack, scope_map);
-            });
-
-            match *opt_else_exp {
-                Some(ref else_exp) =>
-                    walk_expr(cx, &else_exp, scope_stack, scope_map),
-                _ => ()
-            }
-        }
-
-        hir::ExprWhile(ref cond_exp, ref loop_body, _) => {
-            walk_expr(cx, &cond_exp, scope_stack, scope_map);
-
-            with_new_scope(cx,
-                           loop_body.span,
-                           scope_stack,
-                           scope_map,
-                           |cx, scope_stack, scope_map| {
-                walk_block(cx, &loop_body, scope_stack, scope_map);
-            })
-        }
-
-        hir::ExprLoop(ref block, _) |
-        hir::ExprBlock(ref block)   => {
-            with_new_scope(cx,
-                           block.span,
-                           scope_stack,
-                           scope_map,
-                           |cx, scope_stack, scope_map| {
-                walk_block(cx, &block, scope_stack, scope_map);
-            })
-        }
-
-        hir::ExprClosure(_, ref decl, ref block) => {
-            with_new_scope(cx,
-                           block.span,
-                           scope_stack,
-                           scope_map,
-                           |cx, scope_stack, scope_map| {
-                for &hir::Arg { pat: ref pattern, .. } in &decl.inputs {
-                    walk_pattern(cx, &pattern, scope_stack, scope_map);
-                }
-
-                walk_block(cx, &block, scope_stack, scope_map);
-            })
-        }
-
-        hir::ExprCall(ref fn_exp, ref args) => {
-            walk_expr(cx, &fn_exp, scope_stack, scope_map);
-
-            for arg_exp in args {
-                walk_expr(cx, &arg_exp, scope_stack, scope_map);
-            }
-        }
-
-        hir::ExprMethodCall(_, _, ref args) => {
-            for arg_exp in args {
-                walk_expr(cx, &arg_exp, scope_stack, scope_map);
-            }
-        }
-
-        hir::ExprMatch(ref discriminant_exp, ref arms, _) => {
-            walk_expr(cx, &discriminant_exp, scope_stack, scope_map);
-
-            // For each arm we have to first walk the pattern as these might
-            // introduce new artificial scopes. It should be sufficient to
-            // walk only one pattern per arm, as they all must contain the
-            // same binding names.
-
-            for arm_ref in arms {
-                let arm_span = arm_ref.pats[0].span;
-
-                with_new_scope(cx,
-                               arm_span,
-                               scope_stack,
-                               scope_map,
-                               |cx, scope_stack, scope_map| {
-                    for pat in &arm_ref.pats {
-                        walk_pattern(cx, &pat, scope_stack, scope_map);
-                    }
-
-                    if let Some(ref guard_exp) = arm_ref.guard {
-                        walk_expr(cx, &guard_exp, scope_stack, scope_map)
-                    }
-
-                    walk_expr(cx, &arm_ref.body, scope_stack, scope_map);
-                })
-            }
-        }
-
-        hir::ExprStruct(_, ref fields, ref base_exp) => {
-            for &hir::Field { expr: ref exp, .. } in fields {
-                walk_expr(cx, &exp, scope_stack, scope_map);
-            }
-
-            match *base_exp {
-                Some(ref exp) => walk_expr(cx, &exp, scope_stack, scope_map),
-                None => ()
-            }
-        }
-
-        hir::ExprInlineAsm(hir::InlineAsm { ref inputs,
-                                            ref outputs,
-                                            .. }) => {
-            // inputs, outputs: Vec<(String, P<Expr>)>
-            for &(_, ref exp) in inputs {
-                walk_expr(cx, &exp, scope_stack, scope_map);
-            }
-
-            for out in outputs {
-                walk_expr(cx, &out.expr, scope_stack, scope_map);
-            }
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/debuginfo/doc.rs b/src/librustc_trans/trans/debuginfo/doc.rs
deleted file mode 100644 (file)
index bcf5eb9..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Debug Info Module
-//!
-//! This module serves the purpose of generating debug symbols. We use LLVM's
-//! [source level debugging](http://!llvm.org/docs/SourceLevelDebugging.html)
-//! features for generating the debug information. The general principle is
-//! this:
-//!
-//! Given the right metadata in the LLVM IR, the LLVM code generator is able to
-//! create DWARF debug symbols for the given code. The
-//! [metadata](http://!llvm.org/docs/LangRef.html#metadata-type) is structured
-//! much like DWARF *debugging information entries* (DIE), representing type
-//! information such as datatype layout, function signatures, block layout,
-//! variable location and scope information, etc. It is the purpose of this
-//! module to generate correct metadata and insert it into the LLVM IR.
-//!
-//! As the exact format of metadata trees may change between different LLVM
-//! versions, we now use LLVM
-//! [DIBuilder](http://!llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html)
-//! to create metadata where possible. This will hopefully ease the adaption of
-//! this module to future LLVM versions.
-//!
-//! The public API of the module is a set of functions that will insert the
-//! correct metadata into the LLVM IR when called with the right parameters.
-//! The module is thus driven from an outside client with functions like
-//! `debuginfo::create_local_var_metadata(bcx: block, local: &ast::local)`.
-//!
-//! Internally the module will try to reuse already created metadata by
-//! utilizing a cache. The way to get a shared metadata node when needed is
-//! thus to just call the corresponding function in this module:
-//!
-//!     let file_metadata = file_metadata(crate_context, path);
-//!
-//! The function will take care of probing the cache for an existing node for
-//! that exact file path.
-//!
-//! All private state used by the module is stored within either the
-//! CrateDebugContext struct (owned by the CrateContext) or the
-//! FunctionDebugContext (owned by the FunctionContext).
-//!
-//! This file consists of three conceptual sections:
-//! 1. The public interface of the module
-//! 2. Module-internal metadata creation functions
-//! 3. Minor utility functions
-//!
-//!
-//! ## Recursive Types
-//!
-//! Some kinds of types, such as structs and enums can be recursive. That means
-//! that the type definition of some type X refers to some other type which in
-//! turn (transitively) refers to X. This introduces cycles into the type
-//! referral graph. A naive algorithm doing an on-demand, depth-first traversal
-//! of this graph when describing types, can get trapped in an endless loop
-//! when it reaches such a cycle.
-//!
-//! For example, the following simple type for a singly-linked list...
-//!
-//! ```
-//! struct List {
-//!     value: i32,
-//!     tail: Option<Box<List>>,
-//! }
-//! ```
-//!
-//! will generate the following callstack with a naive DFS algorithm:
-//!
-//! ```
-//! describe(t = List)
-//!   describe(t = i32)
-//!   describe(t = Option<Box<List>>)
-//!     describe(t = Box<List>)
-//!       describe(t = List) // at the beginning again...
-//!       ...
-//! ```
-//!
-//! To break cycles like these, we use "forward declarations". That is, when
-//! the algorithm encounters a possibly recursive type (any struct or enum), it
-//! immediately creates a type description node and inserts it into the cache
-//! *before* describing the members of the type. This type description is just
-//! a stub (as type members are not described and added to it yet) but it
-//! allows the algorithm to already refer to the type. After the stub is
-//! inserted into the cache, the algorithm continues as before. If it now
-//! encounters a recursive reference, it will hit the cache and does not try to
-//! describe the type anew.
-//!
-//! This behaviour is encapsulated in the 'RecursiveTypeDescription' enum,
-//! which represents a kind of continuation, storing all state needed to
-//! continue traversal at the type members after the type has been registered
-//! with the cache. (This implementation approach might be a tad over-
-//! engineered and may change in the future)
-//!
-//!
-//! ## Source Locations and Line Information
-//!
-//! In addition to data type descriptions the debugging information must also
-//! allow to map machine code locations back to source code locations in order
-//! to be useful. This functionality is also handled in this module. The
-//! following functions allow to control source mappings:
-//!
-//! + set_source_location()
-//! + clear_source_location()
-//! + start_emitting_source_locations()
-//!
-//! `set_source_location()` allows to set the current source location. All IR
-//! instructions created after a call to this function will be linked to the
-//! given source location, until another location is specified with
-//! `set_source_location()` or the source location is cleared with
-//! `clear_source_location()`. In the later case, subsequent IR instruction
-//! will not be linked to any source location. As you can see, this is a
-//! stateful API (mimicking the one in LLVM), so be careful with source
-//! locations set by previous calls. It's probably best to not rely on any
-//! specific state being present at a given point in code.
-//!
-//! One topic that deserves some extra attention is *function prologues*. At
-//! the beginning of a function's machine code there are typically a few
-//! instructions for loading argument values into allocas and checking if
-//! there's enough stack space for the function to execute. This *prologue* is
-//! not visible in the source code and LLVM puts a special PROLOGUE END marker
-//! into the line table at the first non-prologue instruction of the function.
-//! In order to find out where the prologue ends, LLVM looks for the first
-//! instruction in the function body that is linked to a source location. So,
-//! when generating prologue instructions we have to make sure that we don't
-//! emit source location information until the 'real' function body begins. For
-//! this reason, source location emission is disabled by default for any new
-//! function being translated and is only activated after a call to the third
-//! function from the list above, `start_emitting_source_locations()`. This
-//! function should be called right before regularly starting to translate the
-//! top-level block of the given function.
-//!
-//! There is one exception to the above rule: `llvm.dbg.declare` instruction
-//! must be linked to the source location of the variable being declared. For
-//! function parameters these `llvm.dbg.declare` instructions typically occur
-//! in the middle of the prologue, however, they are ignored by LLVM's prologue
-//! detection. The `create_argument_metadata()` and related functions take care
-//! of linking the `llvm.dbg.declare` instructions to the correct source
-//! locations even while source location emission is still disabled, so there
-//! is no need to do anything special with source location handling here.
-//!
-//! ## Unique Type Identification
-//!
-//! In order for link-time optimization to work properly, LLVM needs a unique
-//! type identifier that tells it across compilation units which types are the
-//! same as others. This type identifier is created by
-//! TypeMap::get_unique_type_id_of_type() using the following algorithm:
-//!
-//! (1) Primitive types have their name as ID
-//! (2) Structs, enums and traits have a multipart identifier
-//!
-//!     (1) The first part is the SVH (strict version hash) of the crate they
-//!          wereoriginally defined in
-//!
-//!     (2) The second part is the ast::NodeId of the definition in their
-//!          originalcrate
-//!
-//!     (3) The final part is a concatenation of the type IDs of their concrete
-//!          typearguments if they are generic types.
-//!
-//! (3) Tuple-, pointer and function types are structurally identified, which
-//!     means that they are equivalent if their component types are equivalent
-//!     (i.e. (i32, i32) is the same regardless in which crate it is used).
-//!
-//! This algorithm also provides a stable ID for types that are defined in one
-//! crate but instantiated from metadata within another crate. We just have to
-//! take care to always map crate and node IDs back to the original crate
-//! context.
-//!
-//! As a side-effect these unique type IDs also help to solve a problem arising
-//! from lifetime parameters. Since lifetime parameters are completely omitted
-//! in debuginfo, more than one `Ty` instance may map to the same debuginfo
-//! type metadata, that is, some struct `Struct<'a>` may have N instantiations
-//! with different concrete substitutions for `'a`, and thus there will be N
-//! `Ty` instances for the type `Struct<'a>` even though it is not generic
-//! otherwise. Unfortunately this means that we cannot use `ty::type_id()` as
-//! cheap identifier for type metadata---we have done this in the past, but it
-//! led to unnecessary metadata duplication in the best case and LLVM
-//! assertions in the worst. However, the unique type ID as described above
-//! *can* be used as identifier. Since it is comparatively expensive to
-//! construct, though, `ty::type_id()` is still used additionally as an
-//! optimization for cases where the exact same type has been seen before
-//! (which is most of the time).
diff --git a/src/librustc_trans/trans/debuginfo/gdb.rs b/src/librustc_trans/trans/debuginfo/gdb.rs
deleted file mode 100644 (file)
index 4e3fadd..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// .debug_gdb_scripts binary section.
-
-use llvm;
-
-use trans::common::{C_bytes, CrateContext, C_i32};
-use trans::declare;
-use trans::type_::Type;
-use session::config::NoDebugInfo;
-
-use std::ffi::CString;
-use std::ptr;
-use syntax::attr;
-
-
-/// Inserts a side-effect free instruction sequence that makes sure that the
-/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
-pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext) {
-    if needs_gdb_debug_scripts_section(ccx) {
-        let empty = CString::new("").unwrap();
-        let gdb_debug_scripts_section_global =
-            get_or_insert_gdb_debug_scripts_section_global(ccx);
-        unsafe {
-            // Load just the first byte as that's all that's necessary to force
-            // LLVM to keep around the reference to the global.
-            let indices = [C_i32(ccx, 0), C_i32(ccx, 0)];
-            let element =
-                llvm::LLVMBuildInBoundsGEP(ccx.raw_builder(),
-                                           gdb_debug_scripts_section_global,
-                                           indices.as_ptr(),
-                                           indices.len() as ::libc::c_uint,
-                                           empty.as_ptr());
-            let volative_load_instruction =
-                llvm::LLVMBuildLoad(ccx.raw_builder(),
-                                    element,
-                                    empty.as_ptr());
-            llvm::LLVMSetVolatile(volative_load_instruction, llvm::True);
-            llvm::LLVMSetAlignment(volative_load_instruction, 1);
-        }
-    }
-}
-
-/// Allocates the global variable responsible for the .debug_gdb_scripts binary
-/// section.
-pub fn get_or_insert_gdb_debug_scripts_section_global(ccx: &CrateContext)
-                                                  -> llvm::ValueRef {
-    let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0";
-    let section_var_name = &c_section_var_name[..c_section_var_name.len()-1];
-
-    let section_var = unsafe {
-        llvm::LLVMGetNamedGlobal(ccx.llmod(),
-                                 c_section_var_name.as_ptr() as *const _)
-    };
-
-    if section_var == ptr::null_mut() {
-        let section_name = b".debug_gdb_scripts\0";
-        let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
-
-        unsafe {
-            let llvm_type = Type::array(&Type::i8(ccx),
-                                        section_contents.len() as u64);
-
-            let section_var = declare::define_global(ccx, section_var_name,
-                                                     llvm_type).unwrap_or_else(||{
-                ccx.sess().bug(&format!("symbol `{}` is already defined", section_var_name))
-            });
-            llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _);
-            llvm::LLVMSetInitializer(section_var, C_bytes(ccx, section_contents));
-            llvm::LLVMSetGlobalConstant(section_var, llvm::True);
-            llvm::LLVMSetUnnamedAddr(section_var, llvm::True);
-            llvm::SetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
-            // This should make sure that the whole section is not larger than
-            // the string it contains. Otherwise we get a warning from GDB.
-            llvm::LLVMSetAlignment(section_var, 1);
-            section_var
-        }
-    } else {
-        section_var
-    }
-}
-
-pub fn needs_gdb_debug_scripts_section(ccx: &CrateContext) -> bool {
-    let omit_gdb_pretty_printer_section =
-        attr::contains_name(&ccx.tcx()
-                                .map
-                                .krate()
-                                .attrs,
-                            "omit_gdb_pretty_printer_section");
-
-    !omit_gdb_pretty_printer_section &&
-    !ccx.sess().target.target.options.is_like_osx &&
-    !ccx.sess().target.target.options.is_like_windows &&
-    ccx.sess().opts.debuginfo != NoDebugInfo
-}
diff --git a/src/librustc_trans/trans/debuginfo/metadata.rs b/src/librustc_trans/trans/debuginfo/metadata.rs
deleted file mode 100644 (file)
index 330d407..0000000
+++ /dev/null
@@ -1,2136 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use self::RecursiveTypeDescription::*;
-use self::MemberOffset::*;
-use self::MemberDescriptionFactory::*;
-use self::EnumDiscriminantInfo::*;
-
-use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of,
-                   get_namespace_and_span_for_item, create_DIArray,
-                   fn_should_be_ignored, is_node_local_to_unit};
-use super::namespace::namespace_for_item;
-use super::type_names::{compute_debuginfo_type_name, push_debuginfo_type_name};
-use super::{declare_local, VariableKind, VariableAccess};
-
-use llvm::{self, ValueRef};
-use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType};
-
-use middle::def_id::DefId;
-use middle::infer;
-use middle::pat_util;
-use middle::subst;
-use rustc::front::map as hir_map;
-use rustc_front::hir::{self, PatKind};
-use trans::{type_of, adt, machine, monomorphize};
-use trans::common::{self, CrateContext, FunctionContext, Block};
-use trans::_match::{BindingInfo, TransBindingMode};
-use trans::type_::Type;
-use middle::ty::{self, Ty};
-use session::config::{self, FullDebugInfo};
-use util::nodemap::FnvHashMap;
-use util::common::path2cstr;
-
-use libc::{c_uint, c_longlong};
-use std::ffi::CString;
-use std::path::Path;
-use std::ptr;
-use std::rc::Rc;
-use syntax;
-use syntax::util::interner::Interner;
-use syntax::codemap::Span;
-use syntax::{ast, codemap};
-use syntax::parse::token;
-
-
-const DW_LANG_RUST: c_uint = 0x9000;
-#[allow(non_upper_case_globals)]
-const DW_ATE_boolean: c_uint = 0x02;
-#[allow(non_upper_case_globals)]
-const DW_ATE_float: c_uint = 0x04;
-#[allow(non_upper_case_globals)]
-const DW_ATE_signed: c_uint = 0x05;
-#[allow(non_upper_case_globals)]
-const DW_ATE_unsigned: c_uint = 0x07;
-#[allow(non_upper_case_globals)]
-const DW_ATE_unsigned_char: c_uint = 0x08;
-
-pub const UNKNOWN_LINE_NUMBER: c_uint = 0;
-pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
-
-// ptr::null() doesn't work :(
-const NO_FILE_METADATA: DIFile = (0 as DIFile);
-const NO_SCOPE_METADATA: DIScope = (0 as DIScope);
-
-const FLAGS_NONE: c_uint = 0;
-
-#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
-pub struct UniqueTypeId(ast::Name);
-
-// The TypeMap is where the CrateDebugContext holds the type metadata nodes
-// created so far. The metadata nodes are indexed by UniqueTypeId, and, for
-// faster lookup, also by Ty. The TypeMap is responsible for creating
-// UniqueTypeIds.
-pub struct TypeMap<'tcx> {
-    // The UniqueTypeIds created so far
-    unique_id_interner: Interner<Rc<String>>,
-    // A map from UniqueTypeId to debuginfo metadata for that type. This is a 1:1 mapping.
-    unique_id_to_metadata: FnvHashMap<UniqueTypeId, DIType>,
-    // A map from types to debuginfo metadata. This is a N:1 mapping.
-    type_to_metadata: FnvHashMap<Ty<'tcx>, DIType>,
-    // A map from types to UniqueTypeId. This is a N:1 mapping.
-    type_to_unique_id: FnvHashMap<Ty<'tcx>, UniqueTypeId>
-}
-
-impl<'tcx> TypeMap<'tcx> {
-    pub fn new() -> TypeMap<'tcx> {
-        TypeMap {
-            unique_id_interner: Interner::new(),
-            type_to_metadata: FnvHashMap(),
-            unique_id_to_metadata: FnvHashMap(),
-            type_to_unique_id: FnvHashMap(),
-        }
-    }
-
-    // Adds a Ty to metadata mapping to the TypeMap. The method will fail if
-    // the mapping already exists.
-    fn register_type_with_metadata<'a>(&mut self,
-                                       cx: &CrateContext<'a, 'tcx>,
-                                       type_: Ty<'tcx>,
-                                       metadata: DIType) {
-        if self.type_to_metadata.insert(type_, metadata).is_some() {
-            cx.sess().bug(&format!("Type metadata for Ty '{}' is already in the TypeMap!",
-                                   type_));
-        }
-    }
-
-    // Adds a UniqueTypeId to metadata mapping to the TypeMap. The method will
-    // fail if the mapping already exists.
-    fn register_unique_id_with_metadata(&mut self,
-                                        cx: &CrateContext,
-                                        unique_type_id: UniqueTypeId,
-                                        metadata: DIType) {
-        if self.unique_id_to_metadata.insert(unique_type_id, metadata).is_some() {
-            let unique_type_id_str = self.get_unique_type_id_as_string(unique_type_id);
-            cx.sess().bug(&format!("Type metadata for unique id '{}' is already in the TypeMap!",
-                                  &unique_type_id_str[..]));
-        }
-    }
-
-    fn find_metadata_for_type(&self, type_: Ty<'tcx>) -> Option<DIType> {
-        self.type_to_metadata.get(&type_).cloned()
-    }
-
-    fn find_metadata_for_unique_id(&self, unique_type_id: UniqueTypeId) -> Option<DIType> {
-        self.unique_id_to_metadata.get(&unique_type_id).cloned()
-    }
-
-    // Get the string representation of a UniqueTypeId. This method will fail if
-    // the id is unknown.
-    fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> Rc<String> {
-        let UniqueTypeId(interner_key) = unique_type_id;
-        self.unique_id_interner.get(interner_key)
-    }
-
-    // Get the UniqueTypeId for the given type. If the UniqueTypeId for the given
-    // type has been requested before, this is just a table lookup. Otherwise an
-    // ID will be generated and stored for later lookup.
-    fn get_unique_type_id_of_type<'a>(&mut self, cx: &CrateContext<'a, 'tcx>,
-                                      type_: Ty<'tcx>) -> UniqueTypeId {
-
-        // basic type             -> {:name of the type:}
-        // tuple                  -> {tuple_(:param-uid:)*}
-        // struct                 -> {struct_:svh: / :node-id:_<(:param-uid:),*> }
-        // enum                   -> {enum_:svh: / :node-id:_<(:param-uid:),*> }
-        // enum variant           -> {variant_:variant-name:_:enum-uid:}
-        // reference (&)          -> {& :pointee-uid:}
-        // mut reference (&mut)   -> {&mut :pointee-uid:}
-        // ptr (*)                -> {* :pointee-uid:}
-        // mut ptr (*mut)         -> {*mut :pointee-uid:}
-        // unique ptr (box)       -> {box :pointee-uid:}
-        // @-ptr (@)              -> {@ :pointee-uid:}
-        // sized vec ([T; x])     -> {[:size:] :element-uid:}
-        // unsized vec ([T])      -> {[] :element-uid:}
-        // trait (T)              -> {trait_:svh: / :node-id:_<(:param-uid:),*> }
-        // closure                -> {<unsafe_> <once_> :store-sigil: |(:param-uid:),* <,_...>| -> \
-        //                             :return-type-uid: : (:bounds:)*}
-        // function               -> {<unsafe_> <abi_> fn( (:param-uid:)* <,_...> ) -> \
-        //                             :return-type-uid:}
-
-        match self.type_to_unique_id.get(&type_).cloned() {
-            Some(unique_type_id) => return unique_type_id,
-            None => { /* generate one */}
-        };
-
-        let mut unique_type_id = String::with_capacity(256);
-        unique_type_id.push('{');
-
-        match type_.sty {
-            ty::TyBool     |
-            ty::TyChar     |
-            ty::TyStr      |
-            ty::TyInt(_)   |
-            ty::TyUint(_)  |
-            ty::TyFloat(_) => {
-                push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
-            },
-            ty::TyEnum(def, substs) => {
-                unique_type_id.push_str("enum ");
-                from_def_id_and_substs(self, cx, def.did, substs, &mut unique_type_id);
-            },
-            ty::TyStruct(def, substs) => {
-                unique_type_id.push_str("struct ");
-                from_def_id_and_substs(self, cx, def.did, substs, &mut unique_type_id);
-            },
-            ty::TyTuple(ref component_types) if component_types.is_empty() => {
-                push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
-            },
-            ty::TyTuple(ref component_types) => {
-                unique_type_id.push_str("tuple ");
-                for &component_type in component_types {
-                    let component_type_id =
-                        self.get_unique_type_id_of_type(cx, component_type);
-                    let component_type_id =
-                        self.get_unique_type_id_as_string(component_type_id);
-                    unique_type_id.push_str(&component_type_id[..]);
-                }
-            },
-            ty::TyBox(inner_type) => {
-                unique_type_id.push_str("box ");
-                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
-                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
-                unique_type_id.push_str(&inner_type_id[..]);
-            },
-            ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
-                unique_type_id.push('*');
-                if mutbl == hir::MutMutable {
-                    unique_type_id.push_str("mut");
-                }
-
-                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
-                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
-                unique_type_id.push_str(&inner_type_id[..]);
-            },
-            ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
-                unique_type_id.push('&');
-                if mutbl == hir::MutMutable {
-                    unique_type_id.push_str("mut");
-                }
-
-                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
-                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
-                unique_type_id.push_str(&inner_type_id[..]);
-            },
-            ty::TyArray(inner_type, len) => {
-                unique_type_id.push_str(&format!("[{}]", len));
-
-                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
-                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
-                unique_type_id.push_str(&inner_type_id[..]);
-            },
-            ty::TySlice(inner_type) => {
-                unique_type_id.push_str("[]");
-
-                let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
-                let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
-                unique_type_id.push_str(&inner_type_id[..]);
-            },
-            ty::TyTrait(ref trait_data) => {
-                unique_type_id.push_str("trait ");
-
-                let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
-
-                from_def_id_and_substs(self,
-                                       cx,
-                                       principal.def_id,
-                                       principal.substs,
-                                       &mut unique_type_id);
-            },
-            ty::TyBareFn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
-                if unsafety == hir::Unsafety::Unsafe {
-                    unique_type_id.push_str("unsafe ");
-                }
-
-                unique_type_id.push_str(abi.name());
-
-                unique_type_id.push_str(" fn(");
-
-                let sig = cx.tcx().erase_late_bound_regions(sig);
-                let sig = infer::normalize_associated_type(cx.tcx(), &sig);
-
-                for &parameter_type in &sig.inputs {
-                    let parameter_type_id =
-                        self.get_unique_type_id_of_type(cx, parameter_type);
-                    let parameter_type_id =
-                        self.get_unique_type_id_as_string(parameter_type_id);
-                    unique_type_id.push_str(&parameter_type_id[..]);
-                    unique_type_id.push(',');
-                }
-
-                if sig.variadic {
-                    unique_type_id.push_str("...");
-                }
-
-                unique_type_id.push_str(")->");
-                match sig.output {
-                    ty::FnConverging(ret_ty) => {
-                        let return_type_id = self.get_unique_type_id_of_type(cx, ret_ty);
-                        let return_type_id = self.get_unique_type_id_as_string(return_type_id);
-                        unique_type_id.push_str(&return_type_id[..]);
-                    }
-                    ty::FnDiverging => {
-                        unique_type_id.push_str("!");
-                    }
-                }
-            },
-            ty::TyClosure(_, ref substs) if substs.upvar_tys.is_empty() => {
-                push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
-            },
-            ty::TyClosure(_, ref substs) => {
-                unique_type_id.push_str("closure ");
-                for upvar_type in &substs.upvar_tys {
-                    let upvar_type_id =
-                        self.get_unique_type_id_of_type(cx, upvar_type);
-                    let upvar_type_id =
-                        self.get_unique_type_id_as_string(upvar_type_id);
-                    unique_type_id.push_str(&upvar_type_id[..]);
-                }
-            },
-            _ => {
-                cx.sess().bug(&format!("get_unique_type_id_of_type() - unexpected type: {:?}",
-                                       type_))
-            }
-        };
-
-        unique_type_id.push('}');
-
-        // Trim to size before storing permanently
-        unique_type_id.shrink_to_fit();
-
-        let key = self.unique_id_interner.intern(Rc::new(unique_type_id));
-        self.type_to_unique_id.insert(type_, UniqueTypeId(key));
-
-        return UniqueTypeId(key);
-
-        fn from_def_id_and_substs<'a, 'tcx>(type_map: &mut TypeMap<'tcx>,
-                                            cx: &CrateContext<'a, 'tcx>,
-                                            def_id: DefId,
-                                            substs: &subst::Substs<'tcx>,
-                                            output: &mut String) {
-            // First, find out the 'real' def_id of the type. Items inlined from
-            // other crates have to be mapped back to their source.
-            let source_def_id = if let Some(node_id) = cx.tcx().map.as_local_node_id(def_id) {
-                match cx.external_srcs().borrow().get(&node_id).cloned() {
-                    Some(source_def_id) => {
-                        // The given def_id identifies the inlined copy of a
-                        // type definition, let's take the source of the copy.
-                        source_def_id
-                    }
-                    None => def_id
-                }
-            } else {
-                def_id
-            };
-
-            // Get the crate hash as first part of the identifier.
-            let crate_hash = if source_def_id.is_local() {
-                cx.link_meta().crate_hash.clone()
-            } else {
-                cx.sess().cstore.crate_hash(source_def_id.krate)
-            };
-
-            output.push_str(crate_hash.as_str());
-            output.push_str("/");
-            output.push_str(&format!("{:x}", def_id.index.as_usize()));
-
-            // Maybe check that there is no self type here.
-
-            let tps = substs.types.get_slice(subst::TypeSpace);
-            if !tps.is_empty() {
-                output.push('<');
-
-                for &type_parameter in tps {
-                    let param_type_id =
-                        type_map.get_unique_type_id_of_type(cx, type_parameter);
-                    let param_type_id =
-                        type_map.get_unique_type_id_as_string(param_type_id);
-                    output.push_str(&param_type_id[..]);
-                    output.push(',');
-                }
-
-                output.push('>');
-            }
-        }
-    }
-
-    // Get the UniqueTypeId for an enum variant. Enum variants are not really
-    // types of their own, so they need special handling. We still need a
-    // UniqueTypeId for them, since to debuginfo they *are* real types.
-    fn get_unique_type_id_of_enum_variant<'a>(&mut self,
-                                              cx: &CrateContext<'a, 'tcx>,
-                                              enum_type: Ty<'tcx>,
-                                              variant_name: &str)
-                                              -> UniqueTypeId {
-        let enum_type_id = self.get_unique_type_id_of_type(cx, enum_type);
-        let enum_variant_type_id = format!("{}::{}",
-                                           &self.get_unique_type_id_as_string(enum_type_id),
-                                           variant_name);
-        let interner_key = self.unique_id_interner.intern(Rc::new(enum_variant_type_id));
-        UniqueTypeId(interner_key)
-    }
-}
-
-// A description of some recursive type. It can either be already finished (as
-// with FinalMetadata) or it is not yet finished, but contains all information
-// needed to generate the missing parts of the description. See the
-// documentation section on Recursive Types at the top of this file for more
-// information.
-enum RecursiveTypeDescription<'tcx> {
-    UnfinishedMetadata {
-        unfinished_type: Ty<'tcx>,
-        unique_type_id: UniqueTypeId,
-        metadata_stub: DICompositeType,
-        llvm_type: Type,
-        member_description_factory: MemberDescriptionFactory<'tcx>,
-    },
-    FinalMetadata(DICompositeType)
-}
-
-fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>(
-    cx: &CrateContext<'a, 'tcx>,
-    unfinished_type: Ty<'tcx>,
-    unique_type_id: UniqueTypeId,
-    metadata_stub: DICompositeType,
-    llvm_type: Type,
-    member_description_factory: MemberDescriptionFactory<'tcx>)
- -> RecursiveTypeDescription<'tcx> {
-
-    // Insert the stub into the TypeMap in order to allow for recursive references
-    let mut type_map = debug_context(cx).type_map.borrow_mut();
-    type_map.register_unique_id_with_metadata(cx, unique_type_id, metadata_stub);
-    type_map.register_type_with_metadata(cx, unfinished_type, metadata_stub);
-
-    UnfinishedMetadata {
-        unfinished_type: unfinished_type,
-        unique_type_id: unique_type_id,
-        metadata_stub: metadata_stub,
-        llvm_type: llvm_type,
-        member_description_factory: member_description_factory,
-    }
-}
-
-impl<'tcx> RecursiveTypeDescription<'tcx> {
-    // Finishes up the description of the type in question (mostly by providing
-    // descriptions of the fields of the given type) and returns the final type
-    // metadata.
-    fn finalize<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> MetadataCreationResult {
-        match *self {
-            FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false),
-            UnfinishedMetadata {
-                unfinished_type,
-                unique_type_id,
-                metadata_stub,
-                llvm_type,
-                ref member_description_factory,
-                ..
-            } => {
-                // Make sure that we have a forward declaration of the type in
-                // the TypeMap so that recursive references are possible. This
-                // will always be the case if the RecursiveTypeDescription has
-                // been properly created through the
-                // create_and_register_recursive_type_forward_declaration()
-                // function.
-                {
-                    let type_map = debug_context(cx).type_map.borrow();
-                    if type_map.find_metadata_for_unique_id(unique_type_id).is_none() ||
-                       type_map.find_metadata_for_type(unfinished_type).is_none() {
-                        cx.sess().bug(&format!("Forward declaration of potentially recursive type \
-                                              '{:?}' was not found in TypeMap!",
-                                              unfinished_type)
-                                      );
-                    }
-                }
-
-                // ... then create the member descriptions ...
-                let member_descriptions =
-                    member_description_factory.create_member_descriptions(cx);
-
-                // ... and attach them to the stub to complete it.
-                set_members_of_composite_type(cx,
-                                              metadata_stub,
-                                              llvm_type,
-                                              &member_descriptions[..]);
-                return MetadataCreationResult::new(metadata_stub, true);
-            }
-        }
-    }
-}
-
-// Returns from the enclosing function if the type metadata with the given
-// unique id can be found in the type map
-macro_rules! return_if_metadata_created_in_meantime {
-    ($cx: expr, $unique_type_id: expr) => (
-        match debug_context($cx).type_map
-                                .borrow()
-                                .find_metadata_for_unique_id($unique_type_id) {
-            Some(metadata) => return MetadataCreationResult::new(metadata, true),
-            None => { /* proceed normally */ }
-        }
-    )
-}
-
-fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                unique_type_id: UniqueTypeId,
-                                element_type: Ty<'tcx>,
-                                len: Option<u64>,
-                                span: Span)
-                                -> MetadataCreationResult {
-    let element_type_metadata = type_metadata(cx, element_type, span);
-
-    return_if_metadata_created_in_meantime!(cx, unique_type_id);
-
-    let element_llvm_type = type_of::type_of(cx, element_type);
-    let (element_type_size, element_type_align) = size_and_align_of(cx, element_llvm_type);
-
-    let (array_size_in_bytes, upper_bound) = match len {
-        Some(len) => (element_type_size * len, len as c_longlong),
-        None => (0, -1)
-    };
-
-    let subrange = unsafe {
-        llvm::LLVMDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)
-    };
-
-    let subscripts = create_DIArray(DIB(cx), &[subrange]);
-    let metadata = unsafe {
-        llvm::LLVMDIBuilderCreateArrayType(
-            DIB(cx),
-            bytes_to_bits(array_size_in_bytes),
-            bytes_to_bits(element_type_align),
-            element_type_metadata,
-            subscripts)
-    };
-
-    return MetadataCreationResult::new(metadata, false);
-}
-
-fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                vec_type: Ty<'tcx>,
-                                element_type: Ty<'tcx>,
-                                unique_type_id: UniqueTypeId,
-                                span: Span)
-                                -> MetadataCreationResult {
-    let data_ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut {
-        ty: element_type,
-        mutbl: hir::MutImmutable
-    });
-
-    let element_type_metadata = type_metadata(cx, data_ptr_type, span);
-
-    return_if_metadata_created_in_meantime!(cx, unique_type_id);
-
-    let slice_llvm_type = type_of::type_of(cx, vec_type);
-    let slice_type_name = compute_debuginfo_type_name(cx, vec_type, true);
-
-    let member_llvm_types = slice_llvm_type.field_types();
-    assert!(slice_layout_is_correct(cx,
-                                    &member_llvm_types[..],
-                                    element_type));
-    let member_descriptions = [
-        MemberDescription {
-            name: "data_ptr".to_string(),
-            llvm_type: member_llvm_types[0],
-            type_metadata: element_type_metadata,
-            offset: ComputedMemberOffset,
-            flags: FLAGS_NONE
-        },
-        MemberDescription {
-            name: "length".to_string(),
-            llvm_type: member_llvm_types[1],
-            type_metadata: type_metadata(cx, cx.tcx().types.usize, span),
-            offset: ComputedMemberOffset,
-            flags: FLAGS_NONE
-        },
-    ];
-
-    assert!(member_descriptions.len() == member_llvm_types.len());
-
-    let loc = span_start(cx, span);
-    let file_metadata = file_metadata(cx, &loc.file.name);
-
-    let metadata = composite_type_metadata(cx,
-                                           slice_llvm_type,
-                                           &slice_type_name[..],
-                                           unique_type_id,
-                                           &member_descriptions,
-                                           NO_SCOPE_METADATA,
-                                           file_metadata,
-                                           span);
-    return MetadataCreationResult::new(metadata, false);
-
-    fn slice_layout_is_correct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                         member_llvm_types: &[Type],
-                                         element_type: Ty<'tcx>)
-                                         -> bool {
-        member_llvm_types.len() == 2 &&
-        member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() &&
-        member_llvm_types[1] == cx.int_type()
-    }
-}
-
-fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                      unique_type_id: UniqueTypeId,
-                                      signature: &ty::PolyFnSig<'tcx>,
-                                      span: Span)
-                                      -> MetadataCreationResult
-{
-    let signature = cx.tcx().erase_late_bound_regions(signature);
-
-    let mut signature_metadata: Vec<DIType> = Vec::with_capacity(signature.inputs.len() + 1);
-
-    // return type
-    signature_metadata.push(match signature.output {
-        ty::FnConverging(ret_ty) => match ret_ty.sty {
-            ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(),
-            _ => type_metadata(cx, ret_ty, span)
-        },
-        ty::FnDiverging => diverging_type_metadata(cx)
-    });
-
-    // regular arguments
-    for &argument_type in &signature.inputs {
-        signature_metadata.push(type_metadata(cx, argument_type, span));
-    }
-
-    return_if_metadata_created_in_meantime!(cx, unique_type_id);
-
-    return MetadataCreationResult::new(
-        unsafe {
-            llvm::LLVMDIBuilderCreateSubroutineType(
-                DIB(cx),
-                NO_FILE_METADATA,
-                create_DIArray(DIB(cx), &signature_metadata[..]))
-        },
-        false);
-}
-
-// FIXME(1563) This is all a bit of a hack because 'trait pointer' is an ill-
-// defined concept. For the case of an actual trait pointer (i.e., Box<Trait>,
-// &Trait), trait_object_type should be the whole thing (e.g, Box<Trait>) and
-// trait_type should be the actual trait (e.g., Trait). Where the trait is part
-// of a DST struct, there is no trait_object_type and the results of this
-// function will be a little bit weird.
-fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                    trait_type: Ty<'tcx>,
-                                    trait_object_type: Option<Ty<'tcx>>,
-                                    unique_type_id: UniqueTypeId)
-                                    -> DIType {
-    // The implementation provided here is a stub. It makes sure that the trait
-    // type is assigned the correct name, size, namespace, and source location.
-    // But it does not describe the trait's methods.
-
-    let def_id = match trait_type.sty {
-        ty::TyTrait(ref data) => data.principal_def_id(),
-        _ => {
-            cx.sess().bug(&format!("debuginfo: Unexpected trait-object type in \
-                                   trait_pointer_metadata(): {:?}",
-                                   trait_type));
-        }
-    };
-
-    let trait_object_type = trait_object_type.unwrap_or(trait_type);
-    let trait_type_name =
-        compute_debuginfo_type_name(cx, trait_object_type, false);
-
-    let (containing_scope, _) = get_namespace_and_span_for_item(cx, def_id);
-
-    let trait_llvm_type = type_of::type_of(cx, trait_object_type);
-
-    composite_type_metadata(cx,
-                            trait_llvm_type,
-                            &trait_type_name[..],
-                            unique_type_id,
-                            &[],
-                            containing_scope,
-                            NO_FILE_METADATA,
-                            codemap::DUMMY_SP)
-}
-
-pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                               t: Ty<'tcx>,
-                               usage_site_span: Span)
-                               -> DIType {
-    // Get the unique type id of this type.
-    let unique_type_id = {
-        let mut type_map = debug_context(cx).type_map.borrow_mut();
-        // First, try to find the type in TypeMap. If we have seen it before, we
-        // can exit early here.
-        match type_map.find_metadata_for_type(t) {
-            Some(metadata) => {
-                return metadata;
-            },
-            None => {
-                // The Ty is not in the TypeMap but maybe we have already seen
-                // an equivalent type (e.g. only differing in region arguments).
-                // In order to find out, generate the unique type id and look
-                // that up.
-                let unique_type_id = type_map.get_unique_type_id_of_type(cx, t);
-                match type_map.find_metadata_for_unique_id(unique_type_id) {
-                    Some(metadata) => {
-                        // There is already an equivalent type in the TypeMap.
-                        // Register this Ty as an alias in the cache and
-                        // return the cached metadata.
-                        type_map.register_type_with_metadata(cx, t, metadata);
-                        return metadata;
-                    },
-                    None => {
-                        // There really is no type metadata for this type, so
-                        // proceed by creating it.
-                        unique_type_id
-                    }
-                }
-            }
-        }
-    };
-
-    debug!("type_metadata: {:?}", t);
-
-    let sty = &t.sty;
-    let MetadataCreationResult { metadata, already_stored_in_typemap } = match *sty {
-        ty::TyBool     |
-        ty::TyChar     |
-        ty::TyInt(_)   |
-        ty::TyUint(_)  |
-        ty::TyFloat(_) => {
-            MetadataCreationResult::new(basic_type_metadata(cx, t), false)
-        }
-        ty::TyTuple(ref elements) if elements.is_empty() => {
-            MetadataCreationResult::new(basic_type_metadata(cx, t), false)
-        }
-        ty::TyEnum(def, _) => {
-            prepare_enum_metadata(cx,
-                                  t,
-                                  def.did,
-                                  unique_type_id,
-                                  usage_site_span).finalize(cx)
-        }
-        ty::TyArray(typ, len) => {
-            fixed_vec_metadata(cx, unique_type_id, typ, Some(len as u64), usage_site_span)
-        }
-        ty::TySlice(typ) => {
-            fixed_vec_metadata(cx, unique_type_id, typ, None, usage_site_span)
-        }
-        ty::TyStr => {
-            fixed_vec_metadata(cx, unique_type_id, cx.tcx().types.i8, None, usage_site_span)
-        }
-        ty::TyTrait(..) => {
-            MetadataCreationResult::new(
-                        trait_pointer_metadata(cx, t, None, unique_type_id),
-            false)
-        }
-        ty::TyBox(ty) |
-        ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
-        ty::TyRef(_, ty::TypeAndMut{ty, ..}) => {
-            match ty.sty {
-                ty::TySlice(typ) => {
-                    vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span)
-                }
-                ty::TyStr => {
-                    vec_slice_metadata(cx, t, cx.tcx().types.u8, unique_type_id, usage_site_span)
-                }
-                ty::TyTrait(..) => {
-                    MetadataCreationResult::new(
-                        trait_pointer_metadata(cx, ty, Some(t), unique_type_id),
-                        false)
-                }
-                _ => {
-                    let pointee_metadata = type_metadata(cx, ty, usage_site_span);
-
-                    match debug_context(cx).type_map
-                                           .borrow()
-                                           .find_metadata_for_unique_id(unique_type_id) {
-                        Some(metadata) => return metadata,
-                        None => { /* proceed normally */ }
-                    };
-
-                    MetadataCreationResult::new(pointer_type_metadata(cx, t, pointee_metadata),
-                                                false)
-                }
-            }
-        }
-        ty::TyBareFn(_, ref barefnty) => {
-            let fn_metadata = subroutine_type_metadata(cx,
-                                                       unique_type_id,
-                                                       &barefnty.sig,
-                                                       usage_site_span).metadata;
-            match debug_context(cx).type_map
-                                   .borrow()
-                                   .find_metadata_for_unique_id(unique_type_id) {
-                Some(metadata) => return metadata,
-                None => { /* proceed normally */ }
-            };
-
-            // This is actually a function pointer, so wrap it in pointer DI
-            MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false)
-
-        }
-        ty::TyClosure(_, ref substs) => {
-            prepare_tuple_metadata(cx,
-                                   t,
-                                   &substs.upvar_tys,
-                                   unique_type_id,
-                                   usage_site_span).finalize(cx)
-        }
-        ty::TyStruct(..) => {
-            prepare_struct_metadata(cx,
-                                    t,
-                                    unique_type_id,
-                                    usage_site_span).finalize(cx)
-        }
-        ty::TyTuple(ref elements) => {
-            prepare_tuple_metadata(cx,
-                                   t,
-                                   &elements[..],
-                                   unique_type_id,
-                                   usage_site_span).finalize(cx)
-        }
-        _ => {
-            cx.sess().bug(&format!("debuginfo: unexpected type in type_metadata: {:?}",
-                                  sty))
-        }
-    };
-
-    {
-        let mut type_map = debug_context(cx).type_map.borrow_mut();
-
-        if already_stored_in_typemap {
-            // Also make sure that we already have a TypeMap entry for the unique type id.
-            let metadata_for_uid = match type_map.find_metadata_for_unique_id(unique_type_id) {
-                Some(metadata) => metadata,
-                None => {
-                    let unique_type_id_str =
-                        type_map.get_unique_type_id_as_string(unique_type_id);
-                    let error_message = format!("Expected type metadata for unique \
-                                                 type id '{}' to already be in \
-                                                 the debuginfo::TypeMap but it \
-                                                 was not. (Ty = {})",
-                                                &unique_type_id_str[..],
-                                                t);
-                    cx.sess().span_bug(usage_site_span, &error_message[..]);
-                }
-            };
-
-            match type_map.find_metadata_for_type(t) {
-                Some(metadata) => {
-                    if metadata != metadata_for_uid {
-                        let unique_type_id_str =
-                            type_map.get_unique_type_id_as_string(unique_type_id);
-                        let error_message = format!("Mismatch between Ty and \
-                                                     UniqueTypeId maps in \
-                                                     debuginfo::TypeMap. \
-                                                     UniqueTypeId={}, Ty={}",
-                            &unique_type_id_str[..],
-                            t);
-                        cx.sess().span_bug(usage_site_span, &error_message[..]);
-                    }
-                }
-                None => {
-                    type_map.register_type_with_metadata(cx, t, metadata);
-                }
-            }
-        } else {
-            type_map.register_type_with_metadata(cx, t, metadata);
-            type_map.register_unique_id_with_metadata(cx, unique_type_id, metadata);
-        }
-    }
-
-    metadata
-}
-
-pub fn file_metadata(cx: &CrateContext, full_path: &str) -> DIFile {
-    // FIXME (#9639): This needs to handle non-utf8 paths
-    let work_dir = cx.sess().working_dir.to_str().unwrap();
-    let file_name =
-        if full_path.starts_with(work_dir) {
-            &full_path[work_dir.len() + 1..full_path.len()]
-        } else {
-            full_path
-        };
-
-    file_metadata_(cx, full_path, file_name, &work_dir)
-}
-
-pub fn unknown_file_metadata(cx: &CrateContext) -> DIFile {
-    // Regular filenames should not be empty, so we abuse an empty name as the
-    // key for the special unknown file metadata
-    file_metadata_(cx, "", "<unknown>", "")
-
-}
-
-fn file_metadata_(cx: &CrateContext, key: &str, file_name: &str, work_dir: &str) -> DIFile {
-    match debug_context(cx).created_files.borrow().get(key) {
-        Some(file_metadata) => return *file_metadata,
-        None => ()
-    }
-
-    debug!("file_metadata: file_name: {}, work_dir: {}", file_name, work_dir);
-
-    let file_name = CString::new(file_name).unwrap();
-    let work_dir = CString::new(work_dir).unwrap();
-    let file_metadata = unsafe {
-        llvm::LLVMDIBuilderCreateFile(DIB(cx), file_name.as_ptr(),
-                                      work_dir.as_ptr())
-    };
-
-    let mut created_files = debug_context(cx).created_files.borrow_mut();
-    created_files.insert(key.to_string(), file_metadata);
-    file_metadata
-}
-
-/// Finds the scope metadata node for the given AST node.
-pub fn scope_metadata(fcx: &FunctionContext,
-                  node_id: ast::NodeId,
-                  error_reporting_span: Span)
-               -> DIScope {
-    let scope_map = &fcx.debug_context
-                        .get_ref(fcx.ccx, error_reporting_span)
-                        .scope_map;
-    match scope_map.borrow().get(&node_id).cloned() {
-        Some(scope_metadata) => scope_metadata,
-        None => {
-            let node = fcx.ccx.tcx().map.get(node_id);
-
-            fcx.ccx.sess().span_bug(error_reporting_span,
-                &format!("debuginfo: Could not find scope info for node {:?}",
-                        node));
-        }
-    }
-}
-
-pub fn diverging_type_metadata(cx: &CrateContext) -> DIType {
-    unsafe {
-        llvm::LLVMDIBuilderCreateBasicType(
-            DIB(cx),
-            "!\0".as_ptr() as *const _,
-            bytes_to_bits(0),
-            bytes_to_bits(0),
-            DW_ATE_unsigned)
-    }
-}
-
-fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                 t: Ty<'tcx>) -> DIType {
-
-    debug!("basic_type_metadata: {:?}", t);
-
-    let (name, encoding) = match t.sty {
-        ty::TyTuple(ref elements) if elements.is_empty() =>
-            ("()", DW_ATE_unsigned),
-        ty::TyBool => ("bool", DW_ATE_boolean),
-        ty::TyChar => ("char", DW_ATE_unsigned_char),
-        ty::TyInt(int_ty) => {
-            (int_ty.ty_to_string(), DW_ATE_signed)
-        },
-        ty::TyUint(uint_ty) => {
-            (uint_ty.ty_to_string(), DW_ATE_unsigned)
-        },
-        ty::TyFloat(float_ty) => {
-            (float_ty.ty_to_string(), DW_ATE_float)
-        },
-        _ => cx.sess().bug("debuginfo::basic_type_metadata - t is invalid type")
-    };
-
-    let llvm_type = type_of::type_of(cx, t);
-    let (size, align) = size_and_align_of(cx, llvm_type);
-    let name = CString::new(name).unwrap();
-    let ty_metadata = unsafe {
-        llvm::LLVMDIBuilderCreateBasicType(
-            DIB(cx),
-            name.as_ptr(),
-            bytes_to_bits(size),
-            bytes_to_bits(align),
-            encoding)
-    };
-
-    return ty_metadata;
-}
-
-fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                   pointer_type: Ty<'tcx>,
-                                   pointee_type_metadata: DIType)
-                                   -> DIType {
-    let pointer_llvm_type = type_of::type_of(cx, pointer_type);
-    let (pointer_size, pointer_align) = size_and_align_of(cx, pointer_llvm_type);
-    let name = compute_debuginfo_type_name(cx, pointer_type, false);
-    let name = CString::new(name).unwrap();
-    let ptr_metadata = unsafe {
-        llvm::LLVMDIBuilderCreatePointerType(
-            DIB(cx),
-            pointee_type_metadata,
-            bytes_to_bits(pointer_size),
-            bytes_to_bits(pointer_align),
-            name.as_ptr())
-    };
-    return ptr_metadata;
-}
-
-pub fn compile_unit_metadata(cx: &CrateContext) -> DIDescriptor {
-    let work_dir = &cx.sess().working_dir;
-    let compile_unit_name = match cx.sess().local_crate_source_file {
-        None => fallback_path(cx),
-        Some(ref abs_path) => {
-            if abs_path.is_relative() {
-                cx.sess().warn("debuginfo: Invalid path to crate's local root source file!");
-                fallback_path(cx)
-            } else {
-                match abs_path.strip_prefix(work_dir) {
-                    Ok(ref p) if p.is_relative() => {
-                        if p.starts_with(Path::new("./")) {
-                            path2cstr(p)
-                        } else {
-                            path2cstr(&Path::new(".").join(p))
-                        }
-                    }
-                    _ => fallback_path(cx)
-                }
-            }
-        }
-    };
-
-    debug!("compile_unit_metadata: {:?}", compile_unit_name);
-    let producer = format!("rustc version {}",
-                           (option_env!("CFG_VERSION")).expect("CFG_VERSION"));
-
-    let compile_unit_name = compile_unit_name.as_ptr();
-    let work_dir = path2cstr(&work_dir);
-    let producer = CString::new(producer).unwrap();
-    let flags = "\0";
-    let split_name = "\0";
-    return unsafe {
-        llvm::LLVMDIBuilderCreateCompileUnit(
-            debug_context(cx).builder,
-            DW_LANG_RUST,
-            compile_unit_name,
-            work_dir.as_ptr(),
-            producer.as_ptr(),
-            cx.sess().opts.optimize != config::OptLevel::No,
-            flags.as_ptr() as *const _,
-            0,
-            split_name.as_ptr() as *const _)
-    };
-
-    fn fallback_path(cx: &CrateContext) -> CString {
-        CString::new(cx.link_meta().crate_name.clone()).unwrap()
-    }
-}
-
-struct MetadataCreationResult {
-    metadata: DIType,
-    already_stored_in_typemap: bool
-}
-
-impl MetadataCreationResult {
-    fn new(metadata: DIType, already_stored_in_typemap: bool) -> MetadataCreationResult {
-        MetadataCreationResult {
-            metadata: metadata,
-            already_stored_in_typemap: already_stored_in_typemap
-        }
-    }
-}
-
-#[derive(Debug)]
-enum MemberOffset {
-    FixedMemberOffset { bytes: usize },
-    // For ComputedMemberOffset, the offset is read from the llvm type definition.
-    ComputedMemberOffset
-}
-
-// Description of a type member, which can either be a regular field (as in
-// structs or tuples) or an enum variant.
-#[derive(Debug)]
-struct MemberDescription {
-    name: String,
-    llvm_type: Type,
-    type_metadata: DIType,
-    offset: MemberOffset,
-    flags: c_uint
-}
-
-// A factory for MemberDescriptions. It produces a list of member descriptions
-// for some record-like type. MemberDescriptionFactories are used to defer the
-// creation of type member descriptions in order to break cycles arising from
-// recursive type definitions.
-enum MemberDescriptionFactory<'tcx> {
-    StructMDF(StructMemberDescriptionFactory<'tcx>),
-    TupleMDF(TupleMemberDescriptionFactory<'tcx>),
-    EnumMDF(EnumMemberDescriptionFactory<'tcx>),
-    VariantMDF(VariantMemberDescriptionFactory<'tcx>)
-}
-
-impl<'tcx> MemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-                                      -> Vec<MemberDescription> {
-        match *self {
-            StructMDF(ref this) => {
-                this.create_member_descriptions(cx)
-            }
-            TupleMDF(ref this) => {
-                this.create_member_descriptions(cx)
-            }
-            EnumMDF(ref this) => {
-                this.create_member_descriptions(cx)
-            }
-            VariantMDF(ref this) => {
-                this.create_member_descriptions(cx)
-            }
-        }
-    }
-}
-
-//=-----------------------------------------------------------------------------
-// Structs
-//=-----------------------------------------------------------------------------
-
-// Creates MemberDescriptions for the fields of a struct
-struct StructMemberDescriptionFactory<'tcx> {
-    variant: ty::VariantDef<'tcx>,
-    substs: &'tcx subst::Substs<'tcx>,
-    is_simd: bool,
-    span: Span,
-}
-
-impl<'tcx> StructMemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-                                      -> Vec<MemberDescription> {
-        if let ty::VariantKind::Unit = self.variant.kind() {
-            return Vec::new();
-        }
-
-        let field_size = if self.is_simd {
-            let fty = monomorphize::field_ty(cx.tcx(),
-                                             self.substs,
-                                             &self.variant.fields[0]);
-            Some(machine::llsize_of_alloc(
-                cx,
-                type_of::type_of(cx, fty)
-            ) as usize)
-        } else {
-            None
-        };
-
-        self.variant.fields.iter().enumerate().map(|(i, f)| {
-            let name = if let ty::VariantKind::Tuple = self.variant.kind() {
-                format!("__{}", i)
-            } else {
-                f.name.to_string()
-            };
-            let fty = monomorphize::field_ty(cx.tcx(), self.substs, f);
-
-            let offset = if self.is_simd {
-                FixedMemberOffset { bytes: i * field_size.unwrap() }
-            } else {
-                ComputedMemberOffset
-            };
-
-            MemberDescription {
-                name: name,
-                llvm_type: type_of::type_of(cx, fty),
-                type_metadata: type_metadata(cx, fty, self.span),
-                offset: offset,
-                flags: FLAGS_NONE,
-            }
-        }).collect()
-    }
-}
-
-
-fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                     struct_type: Ty<'tcx>,
-                                     unique_type_id: UniqueTypeId,
-                                     span: Span)
-                                     -> RecursiveTypeDescription<'tcx> {
-    let struct_name = compute_debuginfo_type_name(cx, struct_type, false);
-    let struct_llvm_type = type_of::in_memory_type_of(cx, struct_type);
-
-    let (variant, substs) = match struct_type.sty {
-        ty::TyStruct(def, substs) => (def.struct_variant(), substs),
-        _ => cx.tcx().sess.bug("prepare_struct_metadata on a non-struct")
-    };
-
-    let (containing_scope, _) = get_namespace_and_span_for_item(cx, variant.did);
-
-    let struct_metadata_stub = create_struct_stub(cx,
-                                                  struct_llvm_type,
-                                                  &struct_name,
-                                                  unique_type_id,
-                                                  containing_scope);
-
-    create_and_register_recursive_type_forward_declaration(
-        cx,
-        struct_type,
-        unique_type_id,
-        struct_metadata_stub,
-        struct_llvm_type,
-        StructMDF(StructMemberDescriptionFactory {
-            variant: variant,
-            substs: substs,
-            is_simd: struct_type.is_simd(),
-            span: span,
-        })
-    )
-}
-
-
-//=-----------------------------------------------------------------------------
-// Tuples
-//=-----------------------------------------------------------------------------
-
-// Creates MemberDescriptions for the fields of a tuple
-struct TupleMemberDescriptionFactory<'tcx> {
-    component_types: Vec<Ty<'tcx>>,
-    span: Span,
-}
-
-impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-                                      -> Vec<MemberDescription> {
-        self.component_types
-            .iter()
-            .enumerate()
-            .map(|(i, &component_type)| {
-            MemberDescription {
-                name: format!("__{}", i),
-                llvm_type: type_of::type_of(cx, component_type),
-                type_metadata: type_metadata(cx, component_type, self.span),
-                offset: ComputedMemberOffset,
-                flags: FLAGS_NONE,
-            }
-        }).collect()
-    }
-}
-
-fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                    tuple_type: Ty<'tcx>,
-                                    component_types: &[Ty<'tcx>],
-                                    unique_type_id: UniqueTypeId,
-                                    span: Span)
-                                    -> RecursiveTypeDescription<'tcx> {
-    let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false);
-    let tuple_llvm_type = type_of::type_of(cx, tuple_type);
-
-    create_and_register_recursive_type_forward_declaration(
-        cx,
-        tuple_type,
-        unique_type_id,
-        create_struct_stub(cx,
-                           tuple_llvm_type,
-                           &tuple_name[..],
-                           unique_type_id,
-                           NO_SCOPE_METADATA),
-        tuple_llvm_type,
-        TupleMDF(TupleMemberDescriptionFactory {
-            component_types: component_types.to_vec(),
-            span: span,
-        })
-    )
-}
-
-
-//=-----------------------------------------------------------------------------
-// Enums
-//=-----------------------------------------------------------------------------
-
-// Describes the members of an enum value: An enum is described as a union of
-// structs in DWARF. This MemberDescriptionFactory provides the description for
-// the members of this union; so for every variant of the given enum, this
-// factory will produce one MemberDescription (all with no name and a fixed
-// offset of zero bytes).
-struct EnumMemberDescriptionFactory<'tcx> {
-    enum_type: Ty<'tcx>,
-    type_rep: Rc<adt::Repr<'tcx>>,
-    discriminant_type_metadata: Option<DIType>,
-    containing_scope: DIScope,
-    file_metadata: DIFile,
-    span: Span,
-}
-
-impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-                                      -> Vec<MemberDescription> {
-        let adt = &self.enum_type.ty_adt_def().unwrap();
-        match *self.type_rep {
-            adt::General(_, ref struct_defs, _) => {
-                let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
-                    .expect(""));
-                struct_defs
-                    .iter()
-                    .enumerate()
-                    .map(|(i, struct_def)| {
-                        let (variant_type_metadata,
-                             variant_llvm_type,
-                             member_desc_factory) =
-                            describe_enum_variant(cx,
-                                                  self.enum_type,
-                                                  struct_def,
-                                                  &adt.variants[i],
-                                                  discriminant_info,
-                                                  self.containing_scope,
-                                                  self.span);
-
-                        let member_descriptions = member_desc_factory
-                            .create_member_descriptions(cx);
-
-                        set_members_of_composite_type(cx,
-                                                      variant_type_metadata,
-                                                      variant_llvm_type,
-                                                      &member_descriptions);
-                        MemberDescription {
-                            name: "".to_string(),
-                            llvm_type: variant_llvm_type,
-                            type_metadata: variant_type_metadata,
-                            offset: FixedMemberOffset { bytes: 0 },
-                            flags: FLAGS_NONE
-                        }
-                    }).collect()
-            },
-            adt::Univariant(ref struct_def, _) => {
-                assert!(adt.variants.len() <= 1);
-
-                if adt.variants.is_empty() {
-                    vec![]
-                } else {
-                    let (variant_type_metadata,
-                         variant_llvm_type,
-                         member_description_factory) =
-                        describe_enum_variant(cx,
-                                              self.enum_type,
-                                              struct_def,
-                                              &adt.variants[0],
-                                              NoDiscriminant,
-                                              self.containing_scope,
-                                              self.span);
-
-                    let member_descriptions =
-                        member_description_factory.create_member_descriptions(cx);
-
-                    set_members_of_composite_type(cx,
-                                                  variant_type_metadata,
-                                                  variant_llvm_type,
-                                                  &member_descriptions[..]);
-                    vec![
-                        MemberDescription {
-                            name: "".to_string(),
-                            llvm_type: variant_llvm_type,
-                            type_metadata: variant_type_metadata,
-                            offset: FixedMemberOffset { bytes: 0 },
-                            flags: FLAGS_NONE
-                        }
-                    ]
-                }
-            }
-            adt::RawNullablePointer { nndiscr: non_null_variant_index, nnty, .. } => {
-                // As far as debuginfo is concerned, the pointer this enum
-                // represents is still wrapped in a struct. This is to make the
-                // DWARF representation of enums uniform.
-
-                // First create a description of the artificial wrapper struct:
-                let non_null_variant = &adt.variants[non_null_variant_index.0 as usize];
-                let non_null_variant_name = non_null_variant.name.as_str();
-
-                // The llvm type and metadata of the pointer
-                let non_null_llvm_type = type_of::type_of(cx, nnty);
-                let non_null_type_metadata = type_metadata(cx, nnty, self.span);
-
-                // The type of the artificial struct wrapping the pointer
-                let artificial_struct_llvm_type = Type::struct_(cx,
-                                                                &[non_null_llvm_type],
-                                                                false);
-
-                // For the metadata of the wrapper struct, we need to create a
-                // MemberDescription of the struct's single field.
-                let sole_struct_member_description = MemberDescription {
-                    name: match non_null_variant.kind() {
-                        ty::VariantKind::Tuple => "__0".to_string(),
-                        ty::VariantKind::Struct => {
-                            non_null_variant.fields[0].name.to_string()
-                        }
-                        ty::VariantKind::Unit => unreachable!()
-                    },
-                    llvm_type: non_null_llvm_type,
-                    type_metadata: non_null_type_metadata,
-                    offset: FixedMemberOffset { bytes: 0 },
-                    flags: FLAGS_NONE
-                };
-
-                let unique_type_id = debug_context(cx).type_map
-                                                      .borrow_mut()
-                                                      .get_unique_type_id_of_enum_variant(
-                                                          cx,
-                                                          self.enum_type,
-                                                          &non_null_variant_name);
-
-                // Now we can create the metadata of the artificial struct
-                let artificial_struct_metadata =
-                    composite_type_metadata(cx,
-                                            artificial_struct_llvm_type,
-                                            &non_null_variant_name,
-                                            unique_type_id,
-                                            &[sole_struct_member_description],
-                                            self.containing_scope,
-                                            self.file_metadata,
-                                            codemap::DUMMY_SP);
-
-                // Encode the information about the null variant in the union
-                // member's name.
-                let null_variant_index = (1 - non_null_variant_index.0) as usize;
-                let null_variant_name = adt.variants[null_variant_index].name;
-                let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
-                                                0,
-                                                null_variant_name);
-
-                // Finally create the (singleton) list of descriptions of union
-                // members.
-                vec![
-                    MemberDescription {
-                        name: union_member_name,
-                        llvm_type: artificial_struct_llvm_type,
-                        type_metadata: artificial_struct_metadata,
-                        offset: FixedMemberOffset { bytes: 0 },
-                        flags: FLAGS_NONE
-                    }
-                ]
-            },
-            adt::StructWrappedNullablePointer { nonnull: ref struct_def,
-                                                nndiscr,
-                                                ref discrfield, ..} => {
-                // Create a description of the non-null variant
-                let (variant_type_metadata, variant_llvm_type, member_description_factory) =
-                    describe_enum_variant(cx,
-                                          self.enum_type,
-                                          struct_def,
-                                          &adt.variants[nndiscr.0 as usize],
-                                          OptimizedDiscriminant,
-                                          self.containing_scope,
-                                          self.span);
-
-                let variant_member_descriptions =
-                    member_description_factory.create_member_descriptions(cx);
-
-                set_members_of_composite_type(cx,
-                                              variant_type_metadata,
-                                              variant_llvm_type,
-                                              &variant_member_descriptions[..]);
-
-                // Encode the information about the null variant in the union
-                // member's name.
-                let null_variant_index = (1 - nndiscr.0) as usize;
-                let null_variant_name = adt.variants[null_variant_index].name;
-                let discrfield = discrfield.iter()
-                                           .skip(1)
-                                           .map(|x| x.to_string())
-                                           .collect::<Vec<_>>().join("$");
-                let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
-                                                discrfield,
-                                                null_variant_name);
-
-                // Create the (singleton) list of descriptions of union members.
-                vec![
-                    MemberDescription {
-                        name: union_member_name,
-                        llvm_type: variant_llvm_type,
-                        type_metadata: variant_type_metadata,
-                        offset: FixedMemberOffset { bytes: 0 },
-                        flags: FLAGS_NONE
-                    }
-                ]
-            },
-            adt::CEnum(..) => cx.sess().span_bug(self.span, "This should be unreachable.")
-        }
-    }
-}
-
-// Creates MemberDescriptions for the fields of a single enum variant.
-struct VariantMemberDescriptionFactory<'tcx> {
-    args: Vec<(String, Ty<'tcx>)>,
-    discriminant_type_metadata: Option<DIType>,
-    span: Span,
-}
-
-impl<'tcx> VariantMemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-                                      -> Vec<MemberDescription> {
-        self.args.iter().enumerate().map(|(i, &(ref name, ty))| {
-            MemberDescription {
-                name: name.to_string(),
-                llvm_type: type_of::type_of(cx, ty),
-                type_metadata: match self.discriminant_type_metadata {
-                    Some(metadata) if i == 0 => metadata,
-                    _ => type_metadata(cx, ty, self.span)
-                },
-                offset: ComputedMemberOffset,
-                flags: FLAGS_NONE
-            }
-        }).collect()
-    }
-}
-
-#[derive(Copy, Clone)]
-enum EnumDiscriminantInfo {
-    RegularDiscriminant(DIType),
-    OptimizedDiscriminant,
-    NoDiscriminant
-}
-
-// Returns a tuple of (1) type_metadata_stub of the variant, (2) the llvm_type
-// of the variant, and (3) a MemberDescriptionFactory for producing the
-// descriptions of the fields of the variant. This is a rudimentary version of a
-// full RecursiveTypeDescription.
-fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                   enum_type: Ty<'tcx>,
-                                   struct_def: &adt::Struct<'tcx>,
-                                   variant: ty::VariantDef<'tcx>,
-                                   discriminant_info: EnumDiscriminantInfo,
-                                   containing_scope: DIScope,
-                                   span: Span)
-                                   -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) {
-    let variant_llvm_type =
-        Type::struct_(cx, &struct_def.fields
-                                    .iter()
-                                    .map(|&t| type_of::type_of(cx, t))
-                                    .collect::<Vec<_>>()
-                                    ,
-                      struct_def.packed);
-    // Could do some consistency checks here: size, align, field count, discr type
-
-    let variant_name = variant.name.as_str();
-    let unique_type_id = debug_context(cx).type_map
-                                          .borrow_mut()
-                                          .get_unique_type_id_of_enum_variant(
-                                              cx,
-                                              enum_type,
-                                              &variant_name);
-
-    let metadata_stub = create_struct_stub(cx,
-                                           variant_llvm_type,
-                                           &variant_name,
-                                           unique_type_id,
-                                           containing_scope);
-
-    // Get the argument names from the enum variant info
-    let mut arg_names: Vec<_> = match variant.kind() {
-        ty::VariantKind::Unit => vec![],
-        ty::VariantKind::Tuple => {
-            variant.fields
-                   .iter()
-                   .enumerate()
-                   .map(|(i, _)| format!("__{}", i))
-                   .collect()
-        }
-        ty::VariantKind::Struct => {
-            variant.fields
-                   .iter()
-                   .map(|f| f.name.to_string())
-                   .collect()
-        }
-    };
-
-    // If this is not a univariant enum, there is also the discriminant field.
-    match discriminant_info {
-        RegularDiscriminant(_) => arg_names.insert(0, "RUST$ENUM$DISR".to_string()),
-        _ => { /* do nothing */ }
-    };
-
-    // Build an array of (field name, field type) pairs to be captured in the factory closure.
-    let args: Vec<(String, Ty)> = arg_names.iter()
-        .zip(&struct_def.fields)
-        .map(|(s, &t)| (s.to_string(), t))
-        .collect();
-
-    let member_description_factory =
-        VariantMDF(VariantMemberDescriptionFactory {
-            args: args,
-            discriminant_type_metadata: match discriminant_info {
-                RegularDiscriminant(discriminant_type_metadata) => {
-                    Some(discriminant_type_metadata)
-                }
-                _ => None
-            },
-            span: span,
-        });
-
-    (metadata_stub, variant_llvm_type, member_description_factory)
-}
-
-fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                   enum_type: Ty<'tcx>,
-                                   enum_def_id: DefId,
-                                   unique_type_id: UniqueTypeId,
-                                   span: Span)
-                                   -> RecursiveTypeDescription<'tcx> {
-    let enum_name = compute_debuginfo_type_name(cx, enum_type, false);
-
-    let (containing_scope, _) = get_namespace_and_span_for_item(cx, enum_def_id);
-    // FIXME: This should emit actual file metadata for the enum, but we
-    // currently can't get the necessary information when it comes to types
-    // imported from other crates. Formerly we violated the ODR when performing
-    // LTO because we emitted debuginfo for the same type with varying file
-    // metadata, so as a workaround we pretend that the type comes from
-    // <unknown>
-    let file_metadata = unknown_file_metadata(cx);
-
-    let variants = &enum_type.ty_adt_def().unwrap().variants;
-
-    let enumerators_metadata: Vec<DIDescriptor> = variants
-        .iter()
-        .map(|v| {
-            let token = v.name.as_str();
-            let name = CString::new(token.as_bytes()).unwrap();
-            unsafe {
-                llvm::LLVMDIBuilderCreateEnumerator(
-                    DIB(cx),
-                    name.as_ptr(),
-                    v.disr_val as u64)
-            }
-        })
-        .collect();
-
-    let discriminant_type_metadata = |inttype: syntax::attr::IntType| {
-        let disr_type_key = (enum_def_id, inttype);
-        let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types
-                                                                 .borrow()
-                                                                 .get(&disr_type_key).cloned();
-        match cached_discriminant_type_metadata {
-            Some(discriminant_type_metadata) => discriminant_type_metadata,
-            None => {
-                let discriminant_llvm_type = adt::ll_inttype(cx, inttype);
-                let (discriminant_size, discriminant_align) =
-                    size_and_align_of(cx, discriminant_llvm_type);
-                let discriminant_base_type_metadata =
-                    type_metadata(cx,
-                                  adt::ty_of_inttype(cx.tcx(), inttype),
-                                  codemap::DUMMY_SP);
-                let discriminant_name = get_enum_discriminant_name(cx, enum_def_id);
-
-                let name = CString::new(discriminant_name.as_bytes()).unwrap();
-                let discriminant_type_metadata = unsafe {
-                    llvm::LLVMDIBuilderCreateEnumerationType(
-                        DIB(cx),
-                        containing_scope,
-                        name.as_ptr(),
-                        NO_FILE_METADATA,
-                        UNKNOWN_LINE_NUMBER,
-                        bytes_to_bits(discriminant_size),
-                        bytes_to_bits(discriminant_align),
-                        create_DIArray(DIB(cx), &enumerators_metadata),
-                        discriminant_base_type_metadata)
-                };
-
-                debug_context(cx).created_enum_disr_types
-                                 .borrow_mut()
-                                 .insert(disr_type_key, discriminant_type_metadata);
-
-                discriminant_type_metadata
-            }
-        }
-    };
-
-    let type_rep = adt::represent_type(cx, enum_type);
-
-    let discriminant_type_metadata = match *type_rep {
-        adt::CEnum(inttype, _, _) => {
-            return FinalMetadata(discriminant_type_metadata(inttype))
-        },
-        adt::RawNullablePointer { .. }           |
-        adt::StructWrappedNullablePointer { .. } |
-        adt::Univariant(..)                      => None,
-        adt::General(inttype, _, _) => Some(discriminant_type_metadata(inttype)),
-    };
-
-    let enum_llvm_type = type_of::type_of(cx, enum_type);
-    let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type);
-
-    let unique_type_id_str = debug_context(cx)
-                             .type_map
-                             .borrow()
-                             .get_unique_type_id_as_string(unique_type_id);
-
-    let enum_name = CString::new(enum_name).unwrap();
-    let unique_type_id_str = CString::new(unique_type_id_str.as_bytes()).unwrap();
-    let enum_metadata = unsafe {
-        llvm::LLVMDIBuilderCreateUnionType(
-        DIB(cx),
-        containing_scope,
-        enum_name.as_ptr(),
-        file_metadata,
-        UNKNOWN_LINE_NUMBER,
-        bytes_to_bits(enum_type_size),
-        bytes_to_bits(enum_type_align),
-        0, // Flags
-        ptr::null_mut(),
-        0, // RuntimeLang
-        unique_type_id_str.as_ptr())
-    };
-
-    return create_and_register_recursive_type_forward_declaration(
-        cx,
-        enum_type,
-        unique_type_id,
-        enum_metadata,
-        enum_llvm_type,
-        EnumMDF(EnumMemberDescriptionFactory {
-            enum_type: enum_type,
-            type_rep: type_rep.clone(),
-            discriminant_type_metadata: discriminant_type_metadata,
-            containing_scope: containing_scope,
-            file_metadata: file_metadata,
-            span: span,
-        }),
-    );
-
-    fn get_enum_discriminant_name(cx: &CrateContext,
-                                  def_id: DefId)
-                                  -> token::InternedString {
-        cx.tcx().item_name(def_id).as_str()
-    }
-}
-
-/// Creates debug information for a composite type, that is, anything that
-/// results in a LLVM struct.
-///
-/// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums.
-fn composite_type_metadata(cx: &CrateContext,
-                           composite_llvm_type: Type,
-                           composite_type_name: &str,
-                           composite_type_unique_id: UniqueTypeId,
-                           member_descriptions: &[MemberDescription],
-                           containing_scope: DIScope,
-
-                           // Ignore source location information as long as it
-                           // can't be reconstructed for non-local crates.
-                           _file_metadata: DIFile,
-                           _definition_span: Span)
-                           -> DICompositeType {
-    // Create the (empty) struct metadata node ...
-    let composite_type_metadata = create_struct_stub(cx,
-                                                     composite_llvm_type,
-                                                     composite_type_name,
-                                                     composite_type_unique_id,
-                                                     containing_scope);
-    // ... and immediately create and add the member descriptions.
-    set_members_of_composite_type(cx,
-                                  composite_type_metadata,
-                                  composite_llvm_type,
-                                  member_descriptions);
-
-    return composite_type_metadata;
-}
-
-fn set_members_of_composite_type(cx: &CrateContext,
-                                 composite_type_metadata: DICompositeType,
-                                 composite_llvm_type: Type,
-                                 member_descriptions: &[MemberDescription]) {
-    // In some rare cases LLVM metadata uniquing would lead to an existing type
-    // description being used instead of a new one created in
-    // create_struct_stub. This would cause a hard to trace assertion in
-    // DICompositeType::SetTypeArray(). The following check makes sure that we
-    // get a better error message if this should happen again due to some
-    // regression.
-    {
-        let mut composite_types_completed =
-            debug_context(cx).composite_types_completed.borrow_mut();
-        if composite_types_completed.contains(&composite_type_metadata) {
-            cx.sess().bug("debuginfo::set_members_of_composite_type() - \
-                           Already completed forward declaration re-encountered.");
-        } else {
-            composite_types_completed.insert(composite_type_metadata);
-        }
-    }
-
-    let member_metadata: Vec<DIDescriptor> = member_descriptions
-        .iter()
-        .enumerate()
-        .map(|(i, member_description)| {
-            let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type);
-            let member_offset = match member_description.offset {
-                FixedMemberOffset { bytes } => bytes as u64,
-                ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i)
-            };
-
-            let member_name = member_description.name.as_bytes();
-            let member_name = CString::new(member_name).unwrap();
-            unsafe {
-                llvm::LLVMDIBuilderCreateMemberType(
-                    DIB(cx),
-                    composite_type_metadata,
-                    member_name.as_ptr(),
-                    NO_FILE_METADATA,
-                    UNKNOWN_LINE_NUMBER,
-                    bytes_to_bits(member_size),
-                    bytes_to_bits(member_align),
-                    bytes_to_bits(member_offset),
-                    member_description.flags,
-                    member_description.type_metadata)
-            }
-        })
-        .collect();
-
-    unsafe {
-        let type_array = create_DIArray(DIB(cx), &member_metadata[..]);
-        llvm::LLVMDICompositeTypeSetTypeArray(DIB(cx), composite_type_metadata, type_array);
-    }
-}
-
-// A convenience wrapper around LLVMDIBuilderCreateStructType(). Does not do any
-// caching, does not add any fields to the struct. This can be done later with
-// set_members_of_composite_type().
-fn create_struct_stub(cx: &CrateContext,
-                      struct_llvm_type: Type,
-                      struct_type_name: &str,
-                      unique_type_id: UniqueTypeId,
-                      containing_scope: DIScope)
-                   -> DICompositeType {
-    let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type);
-
-    let unique_type_id_str = debug_context(cx).type_map
-                                              .borrow()
-                                              .get_unique_type_id_as_string(unique_type_id);
-    let name = CString::new(struct_type_name).unwrap();
-    let unique_type_id = CString::new(unique_type_id_str.as_bytes()).unwrap();
-    let metadata_stub = unsafe {
-        // LLVMDIBuilderCreateStructType() wants an empty array. A null
-        // pointer will lead to hard to trace and debug LLVM assertions
-        // later on in llvm/lib/IR/Value.cpp.
-        let empty_array = create_DIArray(DIB(cx), &[]);
-
-        llvm::LLVMDIBuilderCreateStructType(
-            DIB(cx),
-            containing_scope,
-            name.as_ptr(),
-            NO_FILE_METADATA,
-            UNKNOWN_LINE_NUMBER,
-            bytes_to_bits(struct_size),
-            bytes_to_bits(struct_align),
-            0,
-            ptr::null_mut(),
-            empty_array,
-            0,
-            ptr::null_mut(),
-            unique_type_id.as_ptr())
-    };
-
-    return metadata_stub;
-}
-
-/// Creates debug information for the given global variable.
-///
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_global_var_metadata(cx: &CrateContext,
-                                  node_id: ast::NodeId,
-                                  global: ValueRef) {
-    if cx.dbg_cx().is_none() {
-        return;
-    }
-
-    // Don't create debuginfo for globals inlined from other crates. The other
-    // crate should already contain debuginfo for it. More importantly, the
-    // global might not even exist in un-inlined form anywhere which would lead
-    // to a linker errors.
-    if cx.external_srcs().borrow().contains_key(&node_id) {
-        return;
-    }
-
-    let var_item = cx.tcx().map.get(node_id);
-
-    let (name, span) = match var_item {
-        hir_map::NodeItem(item) => {
-            match item.node {
-                hir::ItemStatic(..) => (item.name, item.span),
-                hir::ItemConst(..) => (item.name, item.span),
-                _ => {
-                    cx.sess()
-                      .span_bug(item.span,
-                                &format!("debuginfo::\
-                                         create_global_var_metadata() -
-                                         Captured var-id refers to \
-                                         unexpected ast_item variant: {:?}",
-                                        var_item))
-                }
-            }
-        },
-        _ => cx.sess().bug(&format!("debuginfo::create_global_var_metadata() \
-                                    - Captured var-id refers to unexpected \
-                                    hir_map variant: {:?}",
-                                   var_item))
-    };
-
-    let (file_metadata, line_number) = if span != codemap::DUMMY_SP {
-        let loc = span_start(cx, span);
-        (file_metadata(cx, &loc.file.name), loc.line as c_uint)
-    } else {
-        (NO_FILE_METADATA, UNKNOWN_LINE_NUMBER)
-    };
-
-    let is_local_to_unit = is_node_local_to_unit(cx, node_id);
-    let variable_type = cx.tcx().node_id_to_type(node_id);
-    let type_metadata = type_metadata(cx, variable_type, span);
-    let node_def_id = cx.tcx().map.local_def_id(node_id);
-    let namespace_node = namespace_for_item(cx, node_def_id);
-    let var_name = name.to_string();
-    let linkage_name =
-        namespace_node.mangled_name_of_contained_item(&var_name[..]);
-    let var_scope = namespace_node.scope;
-
-    let var_name = CString::new(var_name).unwrap();
-    let linkage_name = CString::new(linkage_name).unwrap();
-    unsafe {
-        llvm::LLVMDIBuilderCreateStaticVariable(DIB(cx),
-                                                var_scope,
-                                                var_name.as_ptr(),
-                                                linkage_name.as_ptr(),
-                                                file_metadata,
-                                                line_number,
-                                                type_metadata,
-                                                is_local_to_unit,
-                                                global,
-                                                ptr::null_mut());
-    }
-}
-
-/// Creates debug information for the given local variable.
-///
-/// This function assumes that there's a datum for each pattern component of the
-/// local in `bcx.fcx.lllocals`.
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_local_var_metadata(bcx: Block, local: &hir::Local) {
-    if bcx.unreachable.get() ||
-       fn_should_be_ignored(bcx.fcx) ||
-       bcx.sess().opts.debuginfo != FullDebugInfo  {
-        return;
-    }
-
-    let cx = bcx.ccx();
-    let def_map = &cx.tcx().def_map;
-    let locals = bcx.fcx.lllocals.borrow();
-
-    pat_util::pat_bindings(def_map, &local.pat, |_, node_id, span, var_name| {
-        let datum = match locals.get(&node_id) {
-            Some(datum) => datum,
-            None => {
-                bcx.sess().span_bug(span,
-                    &format!("no entry in lllocals table for {}",
-                            node_id));
-            }
-        };
-
-        if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
-            cx.sess().span_bug(span, "debuginfo::create_local_var_metadata() - \
-                                      Referenced variable location is not an alloca!");
-        }
-
-        let scope_metadata = scope_metadata(bcx.fcx, node_id, span);
-
-        declare_local(bcx,
-                      var_name.node,
-                      datum.ty,
-                      scope_metadata,
-                      VariableAccess::DirectVariable { alloca: datum.val },
-                      VariableKind::LocalVariable,
-                      span);
-    })
-}
-
-/// Creates debug information for a variable captured in a closure.
-///
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                                node_id: ast::NodeId,
-                                                env_pointer: ValueRef,
-                                                env_index: usize,
-                                                captured_by_ref: bool,
-                                                span: Span) {
-    if bcx.unreachable.get() ||
-       fn_should_be_ignored(bcx.fcx) ||
-       bcx.sess().opts.debuginfo != FullDebugInfo {
-        return;
-    }
-
-    let cx = bcx.ccx();
-
-    let ast_item = cx.tcx().map.find(node_id);
-
-    let variable_name = match ast_item {
-        None => {
-            cx.sess().span_bug(span, "debuginfo::create_captured_var_metadata: node not found");
-        }
-        Some(hir_map::NodeLocal(pat)) => {
-            match pat.node {
-                PatKind::Ident(_, ref path1, _) => {
-                    path1.node.name
-                }
-                _ => {
-                    cx.sess()
-                      .span_bug(span,
-                                &format!(
-                                "debuginfo::create_captured_var_metadata() - \
-                                 Captured var-id refers to unexpected \
-                                 hir_map variant: {:?}",
-                                 ast_item));
-                }
-            }
-        }
-        _ => {
-            cx.sess()
-              .span_bug(span,
-                        &format!("debuginfo::create_captured_var_metadata() - \
-                                 Captured var-id refers to unexpected \
-                                 hir_map variant: {:?}",
-                                ast_item));
-        }
-    };
-
-    let variable_type = common::node_id_type(bcx, node_id);
-    let scope_metadata = bcx.fcx.debug_context.get_ref(cx, span).fn_metadata;
-
-    // env_pointer is the alloca containing the pointer to the environment,
-    // so it's type is **EnvironmentType. In order to find out the type of
-    // the environment we have to "dereference" two times.
-    let llvm_env_data_type = common::val_ty(env_pointer).element_type()
-                                                        .element_type();
-    let byte_offset_of_var_in_env = machine::llelement_offset(cx,
-                                                              llvm_env_data_type,
-                                                              env_index);
-
-    let address_operations = unsafe {
-        [llvm::LLVMDIBuilderCreateOpDeref(),
-         llvm::LLVMDIBuilderCreateOpPlus(),
-         byte_offset_of_var_in_env as i64,
-         llvm::LLVMDIBuilderCreateOpDeref()]
-    };
-
-    let address_op_count = if captured_by_ref {
-        address_operations.len()
-    } else {
-        address_operations.len() - 1
-    };
-
-    let variable_access = VariableAccess::IndirectVariable {
-        alloca: env_pointer,
-        address_operations: &address_operations[..address_op_count]
-    };
-
-    declare_local(bcx,
-                  variable_name,
-                  variable_type,
-                  scope_metadata,
-                  variable_access,
-                  VariableKind::CapturedVariable,
-                  span);
-}
-
-/// Creates debug information for a local variable introduced in the head of a
-/// match-statement arm.
-///
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                                 variable_name: ast::Name,
-                                                 binding: BindingInfo<'tcx>) {
-    if bcx.unreachable.get() ||
-       fn_should_be_ignored(bcx.fcx) ||
-       bcx.sess().opts.debuginfo != FullDebugInfo {
-        return;
-    }
-
-    let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
-    let aops = unsafe {
-        [llvm::LLVMDIBuilderCreateOpDeref()]
-    };
-    // Regardless of the actual type (`T`) we're always passed the stack slot
-    // (alloca) for the binding. For ByRef bindings that's a `T*` but for ByMove
-    // bindings we actually have `T**`. So to get the actual variable we need to
-    // dereference once more. For ByCopy we just use the stack slot we created
-    // for the binding.
-    let var_access = match binding.trmode {
-        TransBindingMode::TrByCopy(llbinding) |
-        TransBindingMode::TrByMoveIntoCopy(llbinding) => VariableAccess::DirectVariable {
-            alloca: llbinding
-        },
-        TransBindingMode::TrByMoveRef => VariableAccess::IndirectVariable {
-            alloca: binding.llmatch,
-            address_operations: &aops
-        },
-        TransBindingMode::TrByRef => VariableAccess::DirectVariable {
-            alloca: binding.llmatch
-        }
-    };
-
-    declare_local(bcx,
-                  variable_name,
-                  binding.ty,
-                  scope_metadata,
-                  var_access,
-                  VariableKind::LocalVariable,
-                  binding.span);
-}
-
-/// Creates debug information for the given function argument.
-///
-/// This function assumes that there's a datum for each pattern component of the
-/// argument in `bcx.fcx.lllocals`.
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_argument_metadata(bcx: Block, arg: &hir::Arg) {
-    if bcx.unreachable.get() ||
-       fn_should_be_ignored(bcx.fcx) ||
-       bcx.sess().opts.debuginfo != FullDebugInfo {
-        return;
-    }
-
-    let def_map = &bcx.tcx().def_map;
-    let scope_metadata = bcx
-                         .fcx
-                         .debug_context
-                         .get_ref(bcx.ccx(), arg.pat.span)
-                         .fn_metadata;
-    let locals = bcx.fcx.lllocals.borrow();
-
-    pat_util::pat_bindings(def_map, &arg.pat, |_, node_id, span, var_name| {
-        let datum = match locals.get(&node_id) {
-            Some(v) => v,
-            None => {
-                bcx.sess().span_bug(span,
-                    &format!("no entry in lllocals table for {}",
-                            node_id));
-            }
-        };
-
-        if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
-            bcx.sess().span_bug(span, "debuginfo::create_argument_metadata() - \
-                                       Referenced variable location is not an alloca!");
-        }
-
-        let argument_index = {
-            let counter = &bcx
-                          .fcx
-                          .debug_context
-                          .get_ref(bcx.ccx(), span)
-                          .argument_counter;
-            let argument_index = counter.get();
-            counter.set(argument_index + 1);
-            argument_index
-        };
-
-        declare_local(bcx,
-                      var_name.node,
-                      datum.ty,
-                      scope_metadata,
-                      VariableAccess::DirectVariable { alloca: datum.val },
-                      VariableKind::ArgumentVariable(argument_index),
-                      span);
-    })
-}
diff --git a/src/librustc_trans/trans/debuginfo/mod.rs b/src/librustc_trans/trans/debuginfo/mod.rs
deleted file mode 100644 (file)
index 11dd631..0000000
+++ /dev/null
@@ -1,687 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// See doc.rs for documentation.
-mod doc;
-
-use self::VariableAccess::*;
-use self::VariableKind::*;
-
-use self::utils::{DIB, span_start, assert_type_for_node_id, contains_nodebug_attribute,
-                  create_DIArray, is_node_local_to_unit};
-use self::namespace::{namespace_for_item, NamespaceTreeNode};
-use self::type_names::compute_debuginfo_type_name;
-use self::metadata::{type_metadata, diverging_type_metadata};
-use self::metadata::{file_metadata, scope_metadata, TypeMap, compile_unit_metadata};
-use self::source_loc::InternalDebugLocation;
-
-use llvm;
-use llvm::{ModuleRef, ContextRef, ValueRef};
-use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilderRef, DISubprogram, DIArray,
-                      DIDescriptor, FlagPrototyped};
-use middle::def_id::DefId;
-use middle::infer::normalize_associated_type;
-use middle::subst::{self, Substs};
-use rustc_front;
-use rustc_front::hir;
-
-use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext, Block};
-use trans;
-use trans::{monomorphize, type_of};
-use middle::infer;
-use middle::ty::{self, Ty};
-use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo};
-use util::nodemap::{NodeMap, FnvHashMap, FnvHashSet};
-use rustc::front::map as hir_map;
-
-use libc::c_uint;
-use std::cell::{Cell, RefCell};
-use std::ffi::CString;
-use std::ptr;
-use std::rc::Rc;
-
-use syntax::codemap::{Span, Pos};
-use syntax::{ast, codemap};
-use syntax::abi::Abi;
-use syntax::attr::IntType;
-use syntax::parse::token::{self, special_idents};
-
-pub mod gdb;
-mod utils;
-mod namespace;
-mod type_names;
-mod metadata;
-mod create_scope_map;
-mod source_loc;
-
-pub use self::source_loc::set_source_location;
-pub use self::source_loc::clear_source_location;
-pub use self::source_loc::start_emitting_source_locations;
-pub use self::source_loc::get_cleanup_debug_loc_for_ast_node;
-pub use self::source_loc::with_source_location_override;
-pub use self::metadata::create_match_binding_metadata;
-pub use self::metadata::create_argument_metadata;
-pub use self::metadata::create_captured_var_metadata;
-pub use self::metadata::create_global_var_metadata;
-pub use self::metadata::create_local_var_metadata;
-
-#[allow(non_upper_case_globals)]
-const DW_TAG_auto_variable: c_uint = 0x100;
-#[allow(non_upper_case_globals)]
-const DW_TAG_arg_variable: c_uint = 0x101;
-
-/// A context object for maintaining all state needed by the debuginfo module.
-pub struct CrateDebugContext<'tcx> {
-    llcontext: ContextRef,
-    builder: DIBuilderRef,
-    current_debug_location: Cell<InternalDebugLocation>,
-    created_files: RefCell<FnvHashMap<String, DIFile>>,
-    created_enum_disr_types: RefCell<FnvHashMap<(DefId, IntType), DIType>>,
-
-    type_map: RefCell<TypeMap<'tcx>>,
-    namespace_map: RefCell<FnvHashMap<Vec<ast::Name>, Rc<NamespaceTreeNode>>>,
-
-    // This collection is used to assert that composite types (structs, enums,
-    // ...) have their members only set once:
-    composite_types_completed: RefCell<FnvHashSet<DIType>>,
-}
-
-impl<'tcx> CrateDebugContext<'tcx> {
-    pub fn new(llmod: ModuleRef) -> CrateDebugContext<'tcx> {
-        debug!("CrateDebugContext::new");
-        let builder = unsafe { llvm::LLVMDIBuilderCreate(llmod) };
-        // DIBuilder inherits context from the module, so we'd better use the same one
-        let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
-        return CrateDebugContext {
-            llcontext: llcontext,
-            builder: builder,
-            current_debug_location: Cell::new(InternalDebugLocation::UnknownLocation),
-            created_files: RefCell::new(FnvHashMap()),
-            created_enum_disr_types: RefCell::new(FnvHashMap()),
-            type_map: RefCell::new(TypeMap::new()),
-            namespace_map: RefCell::new(FnvHashMap()),
-            composite_types_completed: RefCell::new(FnvHashSet()),
-        };
-    }
-}
-
-pub enum FunctionDebugContext {
-    RegularContext(Box<FunctionDebugContextData>),
-    DebugInfoDisabled,
-    FunctionWithoutDebugInfo,
-}
-
-impl FunctionDebugContext {
-    fn get_ref<'a>(&'a self,
-                   cx: &CrateContext,
-                   span: Span)
-                   -> &'a FunctionDebugContextData {
-        match *self {
-            FunctionDebugContext::RegularContext(box ref data) => data,
-            FunctionDebugContext::DebugInfoDisabled => {
-                cx.sess().span_bug(span,
-                                   FunctionDebugContext::debuginfo_disabled_message());
-            }
-            FunctionDebugContext::FunctionWithoutDebugInfo => {
-                cx.sess().span_bug(span,
-                                   FunctionDebugContext::should_be_ignored_message());
-            }
-        }
-    }
-
-    fn debuginfo_disabled_message() -> &'static str {
-        "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!"
-    }
-
-    fn should_be_ignored_message() -> &'static str {
-        "debuginfo: Error trying to access FunctionDebugContext for function that should be \
-         ignored by debug info!"
-    }
-}
-
-pub struct FunctionDebugContextData {
-    scope_map: RefCell<NodeMap<DIScope>>,
-    fn_metadata: DISubprogram,
-    argument_counter: Cell<usize>,
-    source_locations_enabled: Cell<bool>,
-    source_location_override: Cell<bool>,
-}
-
-pub enum VariableAccess<'a> {
-    // The llptr given is an alloca containing the variable's value
-    DirectVariable { alloca: ValueRef },
-    // The llptr given is an alloca containing the start of some pointer chain
-    // leading to the variable's content.
-    IndirectVariable { alloca: ValueRef, address_operations: &'a [i64] }
-}
-
-pub enum VariableKind {
-    ArgumentVariable(usize /*index*/),
-    LocalVariable,
-    CapturedVariable,
-}
-
-/// Create any deferred debug metadata nodes
-pub fn finalize(cx: &CrateContext) {
-    if cx.dbg_cx().is_none() {
-        return;
-    }
-
-    debug!("finalize");
-    let _ = compile_unit_metadata(cx);
-
-    if gdb::needs_gdb_debug_scripts_section(cx) {
-        // Add a .debug_gdb_scripts section to this compile-unit. This will
-        // cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
-        // which activates the Rust pretty printers for binary this section is
-        // contained in.
-        gdb::get_or_insert_gdb_debug_scripts_section_global(cx);
-    }
-
-    unsafe {
-        llvm::LLVMDIBuilderFinalize(DIB(cx));
-        llvm::LLVMDIBuilderDispose(DIB(cx));
-        // Debuginfo generation in LLVM by default uses a higher
-        // version of dwarf than OS X currently understands. We can
-        // instruct LLVM to emit an older version of dwarf, however,
-        // for OS X to understand. For more info see #11352
-        // This can be overridden using --llvm-opts -dwarf-version,N.
-        // Android has the same issue (#22398)
-        if cx.sess().target.target.options.is_like_osx ||
-           cx.sess().target.target.options.is_like_android {
-            llvm::LLVMRustAddModuleFlag(cx.llmod(),
-                                        "Dwarf Version\0".as_ptr() as *const _,
-                                        2)
-        }
-
-        // Indicate that we want CodeView debug information on MSVC
-        if cx.sess().target.target.options.is_like_msvc {
-            llvm::LLVMRustAddModuleFlag(cx.llmod(),
-                                        "CodeView\0".as_ptr() as *const _,
-                                        1)
-        }
-
-        // Prevent bitcode readers from deleting the debug info.
-        let ptr = "Debug Info Version\0".as_ptr();
-        llvm::LLVMRustAddModuleFlag(cx.llmod(), ptr as *const _,
-                                    llvm::LLVMRustDebugMetadataVersion());
-    };
-}
-
-/// Creates the function-specific debug context.
-///
-/// Returns the FunctionDebugContext for the function which holds state needed
-/// for debug info creation. The function may also return another variant of the
-/// FunctionDebugContext enum which indicates why no debuginfo should be created
-/// for the function.
-pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                               fn_ast_id: ast::NodeId,
-                                               param_substs: &Substs<'tcx>,
-                                               llfn: ValueRef) -> FunctionDebugContext {
-    if cx.sess().opts.debuginfo == NoDebugInfo {
-        return FunctionDebugContext::DebugInfoDisabled;
-    }
-
-    // Clear the debug location so we don't assign them in the function prelude.
-    // Do this here already, in case we do an early exit from this function.
-    source_loc::set_debug_location(cx, InternalDebugLocation::UnknownLocation);
-
-    if fn_ast_id == ast::DUMMY_NODE_ID {
-        // This is a function not linked to any source location, so don't
-        // generate debuginfo for it.
-        return FunctionDebugContext::FunctionWithoutDebugInfo;
-    }
-
-    let empty_generics = rustc_front::util::empty_generics();
-
-    let fnitem = cx.tcx().map.get(fn_ast_id);
-
-    let (name, fn_decl, generics, top_level_block, span, has_path) = match fnitem {
-        hir_map::NodeItem(ref item) => {
-            if contains_nodebug_attribute(&item.attrs) {
-                return FunctionDebugContext::FunctionWithoutDebugInfo;
-            }
-
-            match item.node {
-                hir::ItemFn(ref fn_decl, _, _, _, ref generics, ref top_level_block) => {
-                    (item.name, fn_decl, generics, top_level_block, item.span, true)
-                }
-                _ => {
-                    cx.sess().span_bug(item.span,
-                        "create_function_debug_context: item bound to non-function");
-                }
-            }
-        }
-        hir_map::NodeImplItem(impl_item) => {
-            match impl_item.node {
-                hir::ImplItemKind::Method(ref sig, ref body) => {
-                    if contains_nodebug_attribute(&impl_item.attrs) {
-                        return FunctionDebugContext::FunctionWithoutDebugInfo;
-                    }
-
-                    (impl_item.name,
-                     &sig.decl,
-                     &sig.generics,
-                     body,
-                     impl_item.span,
-                     true)
-                }
-                _ => {
-                    cx.sess().span_bug(impl_item.span,
-                                       "create_function_debug_context() \
-                                        called on non-method impl item?!")
-                }
-            }
-        }
-        hir_map::NodeExpr(ref expr) => {
-            match expr.node {
-                hir::ExprClosure(_, ref fn_decl, ref top_level_block) => {
-                    let name = format!("fn{}", token::gensym("fn"));
-                    let name = token::intern(&name[..]);
-                    (name, fn_decl,
-                        // This is not quite right. It should actually inherit
-                        // the generics of the enclosing function.
-                        &empty_generics,
-                        top_level_block,
-                        expr.span,
-                        // Don't try to lookup the item path:
-                        false)
-                }
-                _ => cx.sess().span_bug(expr.span,
-                        "create_function_debug_context: expected an expr_fn_block here")
-            }
-        }
-        hir_map::NodeTraitItem(trait_item) => {
-            match trait_item.node {
-                hir::MethodTraitItem(ref sig, Some(ref body)) => {
-                    if contains_nodebug_attribute(&trait_item.attrs) {
-                        return FunctionDebugContext::FunctionWithoutDebugInfo;
-                    }
-
-                    (trait_item.name,
-                     &sig.decl,
-                     &sig.generics,
-                     body,
-                     trait_item.span,
-                     true)
-                }
-                _ => {
-                    cx.sess()
-                      .bug(&format!("create_function_debug_context: \
-                                    unexpected sort of node: {:?}",
-                                    fnitem))
-                }
-            }
-        }
-        hir_map::NodeForeignItem(..) |
-        hir_map::NodeVariant(..) |
-        hir_map::NodeStructCtor(..) => {
-            return FunctionDebugContext::FunctionWithoutDebugInfo;
-        }
-        _ => cx.sess().bug(&format!("create_function_debug_context: \
-                                    unexpected sort of node: {:?}",
-                                   fnitem))
-    };
-
-    // This can be the case for functions inlined from another crate
-    if span == codemap::DUMMY_SP {
-        return FunctionDebugContext::FunctionWithoutDebugInfo;
-    }
-
-    let loc = span_start(cx, span);
-    let file_metadata = file_metadata(cx, &loc.file.name);
-
-    let function_type_metadata = unsafe {
-        let fn_signature = get_function_signature(cx,
-                                                  fn_ast_id,
-                                                  param_substs,
-                                                  span);
-        llvm::LLVMDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature)
-    };
-
-    // Get_template_parameters() will append a `<...>` clause to the function
-    // name if necessary.
-    let mut function_name = name.to_string();
-    let template_parameters = get_template_parameters(cx,
-                                                      generics,
-                                                      param_substs,
-                                                      file_metadata,
-                                                      &mut function_name);
-
-    // There is no hir_map::Path for hir::ExprClosure-type functions. For now,
-    // just don't put them into a namespace. In the future this could be improved
-    // somehow (storing a path in the hir_map, or construct a path using the
-    // enclosing function).
-    let (linkage_name, containing_scope) = if has_path {
-        let fn_ast_def_id = cx.tcx().map.local_def_id(fn_ast_id);
-        let namespace_node = namespace_for_item(cx, fn_ast_def_id);
-        let linkage_name = namespace_node.mangled_name_of_contained_item(
-            &function_name[..]);
-        let containing_scope = namespace_node.scope;
-        (linkage_name, containing_scope)
-    } else {
-        (function_name.clone(), file_metadata)
-    };
-
-    // Clang sets this parameter to the opening brace of the function's block,
-    // so let's do this too.
-    let scope_line = span_start(cx, top_level_block.span).line;
-
-    let is_local_to_unit = is_node_local_to_unit(cx, fn_ast_id);
-
-    let function_name = CString::new(function_name).unwrap();
-    let linkage_name = CString::new(linkage_name).unwrap();
-    let fn_metadata = unsafe {
-        llvm::LLVMDIBuilderCreateFunction(
-            DIB(cx),
-            containing_scope,
-            function_name.as_ptr(),
-            linkage_name.as_ptr(),
-            file_metadata,
-            loc.line as c_uint,
-            function_type_metadata,
-            is_local_to_unit,
-            true,
-            scope_line as c_uint,
-            FlagPrototyped as c_uint,
-            cx.sess().opts.optimize != config::OptLevel::No,
-            llfn,
-            template_parameters,
-            ptr::null_mut())
-    };
-
-    let scope_map = create_scope_map::create_scope_map(cx,
-                                                       &fn_decl.inputs,
-                                                       &top_level_block,
-                                                       fn_metadata,
-                                                       fn_ast_id);
-
-    // Initialize fn debug context (including scope map and namespace map)
-    let fn_debug_context = box FunctionDebugContextData {
-        scope_map: RefCell::new(scope_map),
-        fn_metadata: fn_metadata,
-        argument_counter: Cell::new(1),
-        source_locations_enabled: Cell::new(false),
-        source_location_override: Cell::new(false),
-    };
-
-
-
-    return FunctionDebugContext::RegularContext(fn_debug_context);
-
-    fn get_function_signature<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                        fn_ast_id: ast::NodeId,
-                                        param_substs: &Substs<'tcx>,
-                                        error_reporting_span: Span) -> DIArray {
-        if cx.sess().opts.debuginfo == LimitedDebugInfo {
-            return create_DIArray(DIB(cx), &[]);
-        }
-
-        // Return type -- llvm::DIBuilder wants this at index 0
-        assert_type_for_node_id(cx, fn_ast_id, error_reporting_span);
-        let fn_type = cx.tcx().node_id_to_type(fn_ast_id);
-        let fn_type = monomorphize::apply_param_substs(cx.tcx(), param_substs, &fn_type);
-
-        let (sig, abi) = match fn_type.sty {
-            ty::TyBareFn(_, ref barefnty) => {
-                let sig = cx.tcx().erase_late_bound_regions(&barefnty.sig);
-                let sig = infer::normalize_associated_type(cx.tcx(), &sig);
-                (sig, barefnty.abi)
-            }
-            ty::TyClosure(def_id, ref substs) => {
-                let closure_type = cx.tcx().closure_type(def_id, substs);
-                let sig = cx.tcx().erase_late_bound_regions(&closure_type.sig);
-                let sig = infer::normalize_associated_type(cx.tcx(), &sig);
-                (sig, closure_type.abi)
-            }
-
-            _ => cx.sess().bug("get_function_metdata: Expected a function type!")
-        };
-
-        let mut signature = Vec::with_capacity(sig.inputs.len() + 1);
-
-        // Return type -- llvm::DIBuilder wants this at index 0
-        signature.push(match sig.output {
-            ty::FnConverging(ret_ty) => match ret_ty.sty {
-                ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(),
-                _ => type_metadata(cx, ret_ty, codemap::DUMMY_SP)
-            },
-            ty::FnDiverging => diverging_type_metadata(cx)
-        });
-
-        let inputs = &if abi == Abi::RustCall {
-            type_of::untuple_arguments(cx, &sig.inputs)
-        } else {
-            sig.inputs
-        };
-
-        // Arguments types
-        for &argument_type in inputs {
-            signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP));
-        }
-
-        return create_DIArray(DIB(cx), &signature[..]);
-    }
-
-    fn get_template_parameters<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                         generics: &hir::Generics,
-                                         param_substs: &Substs<'tcx>,
-                                         file_metadata: DIFile,
-                                         name_to_append_suffix_to: &mut String)
-                                         -> DIArray
-    {
-        let self_type = param_substs.self_ty();
-        let self_type = normalize_associated_type(cx.tcx(), &self_type);
-
-        // Only true for static default methods:
-        let has_self_type = self_type.is_some();
-
-        if !generics.is_type_parameterized() && !has_self_type {
-            return create_DIArray(DIB(cx), &[]);
-        }
-
-        name_to_append_suffix_to.push('<');
-
-        // The list to be filled with template parameters:
-        let mut template_params: Vec<DIDescriptor> =
-            Vec::with_capacity(generics.ty_params.len() + 1);
-
-        // Handle self type
-        if has_self_type {
-            let actual_self_type = self_type.unwrap();
-            // Add self type name to <...> clause of function name
-            let actual_self_type_name = compute_debuginfo_type_name(
-                cx,
-                actual_self_type,
-                true);
-
-            name_to_append_suffix_to.push_str(&actual_self_type_name[..]);
-
-            if generics.is_type_parameterized() {
-                name_to_append_suffix_to.push_str(",");
-            }
-
-            // Only create type information if full debuginfo is enabled
-            if cx.sess().opts.debuginfo == FullDebugInfo {
-                let actual_self_type_metadata = type_metadata(cx,
-                                                              actual_self_type,
-                                                              codemap::DUMMY_SP);
-
-                let name = special_idents::type_self.name.as_str();
-
-                let name = CString::new(name.as_bytes()).unwrap();
-                let param_metadata = unsafe {
-                    llvm::LLVMDIBuilderCreateTemplateTypeParameter(
-                        DIB(cx),
-                        ptr::null_mut(),
-                        name.as_ptr(),
-                        actual_self_type_metadata,
-                        file_metadata,
-                        0,
-                        0)
-                };
-
-                template_params.push(param_metadata);
-            }
-        }
-
-        // Handle other generic parameters
-        let actual_types = param_substs.types.get_slice(subst::FnSpace);
-        for (index, &hir::TyParam{ name, .. }) in generics.ty_params.iter().enumerate() {
-            let actual_type = actual_types[index];
-            // Add actual type name to <...> clause of function name
-            let actual_type_name = compute_debuginfo_type_name(cx,
-                                                               actual_type,
-                                                               true);
-            name_to_append_suffix_to.push_str(&actual_type_name[..]);
-
-            if index != generics.ty_params.len() - 1 {
-                name_to_append_suffix_to.push_str(",");
-            }
-
-            // Again, only create type information if full debuginfo is enabled
-            if cx.sess().opts.debuginfo == FullDebugInfo {
-                let actual_type_metadata = type_metadata(cx, actual_type, codemap::DUMMY_SP);
-                let name = CString::new(name.as_str().as_bytes()).unwrap();
-                let param_metadata = unsafe {
-                    llvm::LLVMDIBuilderCreateTemplateTypeParameter(
-                        DIB(cx),
-                        ptr::null_mut(),
-                        name.as_ptr(),
-                        actual_type_metadata,
-                        file_metadata,
-                        0,
-                        0)
-                };
-                template_params.push(param_metadata);
-            }
-        }
-
-        name_to_append_suffix_to.push('>');
-
-        return create_DIArray(DIB(cx), &template_params[..]);
-    }
-}
-
-fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                             variable_name: ast::Name,
-                             variable_type: Ty<'tcx>,
-                             scope_metadata: DIScope,
-                             variable_access: VariableAccess,
-                             variable_kind: VariableKind,
-                             span: Span) {
-    let cx: &CrateContext = bcx.ccx();
-
-    let filename = span_start(cx, span).file.name.clone();
-    let file_metadata = file_metadata(cx, &filename[..]);
-
-    let loc = span_start(cx, span);
-    let type_metadata = type_metadata(cx, variable_type, span);
-
-    let (argument_index, dwarf_tag) = match variable_kind {
-        ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
-        LocalVariable    |
-        CapturedVariable => (0, DW_TAG_auto_variable)
-    };
-
-    let name = CString::new(variable_name.as_str().as_bytes()).unwrap();
-    match (variable_access, &[][..]) {
-        (DirectVariable { alloca }, address_operations) |
-        (IndirectVariable {alloca, address_operations}, _) => {
-            let metadata = unsafe {
-                llvm::LLVMDIBuilderCreateVariable(
-                    DIB(cx),
-                    dwarf_tag,
-                    scope_metadata,
-                    name.as_ptr(),
-                    file_metadata,
-                    loc.line as c_uint,
-                    type_metadata,
-                    cx.sess().opts.optimize != config::OptLevel::No,
-                    0,
-                    address_operations.as_ptr(),
-                    address_operations.len() as c_uint,
-                    argument_index)
-            };
-            source_loc::set_debug_location(cx, InternalDebugLocation::new(scope_metadata,
-                                                                          loc.line,
-                                                                          loc.col.to_usize()));
-            unsafe {
-                let debug_loc = llvm::LLVMGetCurrentDebugLocation(cx.raw_builder());
-                let instr = llvm::LLVMDIBuilderInsertDeclareAtEnd(
-                    DIB(cx),
-                    alloca,
-                    metadata,
-                    address_operations.as_ptr(),
-                    address_operations.len() as c_uint,
-                    debug_loc,
-                    bcx.llbb);
-
-                llvm::LLVMSetInstDebugLocation(trans::build::B(bcx).llbuilder, instr);
-            }
-        }
-    }
-
-    match variable_kind {
-        ArgumentVariable(_) | CapturedVariable => {
-            assert!(!bcx.fcx
-                        .debug_context
-                        .get_ref(cx, span)
-                        .source_locations_enabled
-                        .get());
-            source_loc::set_debug_location(cx, InternalDebugLocation::UnknownLocation);
-        }
-        _ => { /* nothing to do */ }
-    }
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum DebugLoc {
-    At(ast::NodeId, Span),
-    None
-}
-
-impl DebugLoc {
-    pub fn apply(&self, fcx: &FunctionContext) {
-        match *self {
-            DebugLoc::At(node_id, span) => {
-                source_loc::set_source_location(fcx, node_id, span);
-            }
-            DebugLoc::None => {
-                source_loc::clear_source_location(fcx);
-            }
-        }
-    }
-}
-
-pub trait ToDebugLoc {
-    fn debug_loc(&self) -> DebugLoc;
-}
-
-impl ToDebugLoc for hir::Expr {
-    fn debug_loc(&self) -> DebugLoc {
-        DebugLoc::At(self.id, self.span)
-    }
-}
-
-impl ToDebugLoc for NodeIdAndSpan {
-    fn debug_loc(&self) -> DebugLoc {
-        DebugLoc::At(self.id, self.span)
-    }
-}
-
-impl ToDebugLoc for Option<NodeIdAndSpan> {
-    fn debug_loc(&self) -> DebugLoc {
-        match *self {
-            Some(NodeIdAndSpan { id, span }) => DebugLoc::At(id, span),
-            None => DebugLoc::None
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/debuginfo/namespace.rs b/src/librustc_trans/trans/debuginfo/namespace.rs
deleted file mode 100644 (file)
index 0c2c297..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Namespace Handling.
-
-use super::utils::{DIB, debug_context};
-
-use llvm;
-use llvm::debuginfo::DIScope;
-use rustc::middle::def_id::DefId;
-use rustc::front::map as hir_map;
-use trans::common::CrateContext;
-
-use std::ffi::CString;
-use std::ptr;
-use std::rc::{Rc, Weak};
-use syntax::ast;
-use syntax::parse::token;
-
-pub struct NamespaceTreeNode {
-    pub name: ast::Name,
-    pub scope: DIScope,
-    pub parent: Option<Weak<NamespaceTreeNode>>,
-}
-
-impl NamespaceTreeNode {
-    pub fn mangled_name_of_contained_item(&self, item_name: &str) -> String {
-        fn fill_nested(node: &NamespaceTreeNode, output: &mut String) {
-            match node.parent {
-                Some(ref parent) => fill_nested(&parent.upgrade().unwrap(), output),
-                None => {}
-            }
-            let string = node.name.as_str();
-            output.push_str(&string.len().to_string());
-            output.push_str(&string);
-        }
-
-        let mut name = String::from("_ZN");
-        fill_nested(self, &mut name);
-        name.push_str(&item_name.len().to_string());
-        name.push_str(item_name);
-        name.push('E');
-        name
-    }
-}
-
-pub fn crate_root_namespace<'a>(cx: &'a CrateContext) -> &'a str {
-    &cx.link_meta().crate_name
-}
-
-pub fn namespace_for_item(cx: &CrateContext, def_id: DefId) -> Rc<NamespaceTreeNode> {
-    cx.tcx().with_path(def_id, |path| {
-        // prepend crate name if not already present
-        let krate = if def_id.is_local() {
-            let crate_namespace_name = token::intern(crate_root_namespace(cx));
-            Some(hir_map::PathMod(crate_namespace_name))
-        } else {
-            None
-        };
-        let mut path = krate.into_iter().chain(path).peekable();
-
-        let mut current_key = Vec::new();
-        let mut parent_node: Option<Rc<NamespaceTreeNode>> = None;
-
-        // Create/Lookup namespace for each element of the path.
-        loop {
-            // Emulate a for loop so we can use peek below.
-            let path_element = match path.next() {
-                Some(e) => e,
-                None => break
-            };
-            // Ignore the name of the item (the last path element).
-            if path.peek().is_none() {
-                break;
-            }
-
-            let name = path_element.name();
-            current_key.push(name);
-
-            let existing_node = debug_context(cx).namespace_map.borrow()
-                                                 .get(&current_key).cloned();
-            let current_node = match existing_node {
-                Some(existing_node) => existing_node,
-                None => {
-                    // create and insert
-                    let parent_scope = match parent_node {
-                        Some(ref node) => node.scope,
-                        None => ptr::null_mut()
-                    };
-                    let namespace_name = name.as_str();
-                    let namespace_name = CString::new(namespace_name.as_bytes()).unwrap();
-                    let scope = unsafe {
-                        llvm::LLVMDIBuilderCreateNameSpace(
-                            DIB(cx),
-                            parent_scope,
-                            namespace_name.as_ptr(),
-                            // cannot reconstruct file ...
-                            ptr::null_mut(),
-                            // ... or line information, but that's not so important.
-                            0)
-                    };
-
-                    let node = Rc::new(NamespaceTreeNode {
-                        name: name,
-                        scope: scope,
-                        parent: parent_node.map(|parent| Rc::downgrade(&parent)),
-                    });
-
-                    debug_context(cx).namespace_map.borrow_mut()
-                                     .insert(current_key.clone(), node.clone());
-
-                    node
-                }
-            };
-
-            parent_node = Some(current_node);
-        }
-
-        match parent_node {
-            Some(node) => node,
-            None => {
-                cx.sess().bug(&format!("debuginfo::namespace_for_item(): \
-                                       path too short for {:?}",
-                                      def_id));
-            }
-        }
-    })
-}
diff --git a/src/librustc_trans/trans/debuginfo/source_loc.rs b/src/librustc_trans/trans/debuginfo/source_loc.rs
deleted file mode 100644 (file)
index 981a23f..0000000
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use self::InternalDebugLocation::*;
-
-use super::utils::{debug_context, span_start, fn_should_be_ignored};
-use super::metadata::{scope_metadata,UNKNOWN_COLUMN_NUMBER};
-use super::{FunctionDebugContext, DebugLoc};
-
-use llvm;
-use llvm::debuginfo::DIScope;
-use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext};
-
-use libc::c_uint;
-use std::ptr;
-use syntax::codemap::{Span, Pos};
-use syntax::{ast, codemap};
-
-pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                                    node_id: ast::NodeId,
-                                                    node_span: Span,
-                                                    is_block: bool)
-                                                 -> NodeIdAndSpan {
-    // A debug location needs two things:
-    // (1) A span (of which only the beginning will actually be used)
-    // (2) An AST node-id which will be used to look up the lexical scope
-    //     for the location in the functions scope-map
-    //
-    // This function will calculate the debug location for compiler-generated
-    // cleanup calls that are executed when control-flow leaves the
-    // scope identified by `node_id`.
-    //
-    // For everything but block-like things we can simply take id and span of
-    // the given expression, meaning that from a debugger's view cleanup code is
-    // executed at the same source location as the statement/expr itself.
-    //
-    // Blocks are a special case. Here we want the cleanup to be linked to the
-    // closing curly brace of the block. The *scope* the cleanup is executed in
-    // is up to debate: It could either still be *within* the block being
-    // cleaned up, meaning that locals from the block are still visible in the
-    // debugger.
-    // Or it could be in the scope that the block is contained in, so any locals
-    // from within the block are already considered out-of-scope and thus not
-    // accessible in the debugger anymore.
-    //
-    // The current implementation opts for the second option: cleanup of a block
-    // already happens in the parent scope of the block. The main reason for
-    // this decision is that scoping becomes controlflow dependent when variable
-    // shadowing is involved and it's impossible to decide statically which
-    // scope is actually left when the cleanup code is executed.
-    // In practice it shouldn't make much of a difference.
-
-    let mut cleanup_span = node_span;
-
-    if is_block {
-        // Not all blocks actually have curly braces (e.g. simple closure
-        // bodies), in which case we also just want to return the span of the
-        // whole expression.
-        let code_snippet = cx.sess().codemap().span_to_snippet(node_span);
-        if let Ok(code_snippet) = code_snippet {
-            let bytes = code_snippet.as_bytes();
-
-            if !bytes.is_empty() && &bytes[bytes.len()-1..] == b"}" {
-                cleanup_span = Span {
-                    lo: node_span.hi - codemap::BytePos(1),
-                    hi: node_span.hi,
-                    expn_id: node_span.expn_id
-                };
-            }
-        }
-    }
-
-    NodeIdAndSpan {
-        id: node_id,
-        span: cleanup_span
-    }
-}
-
-
-/// Sets the current debug location at the beginning of the span.
-///
-/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). The node_id
-/// parameter is used to reliably find the correct visibility scope for the code
-/// position.
-pub fn set_source_location(fcx: &FunctionContext,
-                           node_id: ast::NodeId,
-                           span: Span) {
-    match fcx.debug_context {
-        FunctionDebugContext::DebugInfoDisabled => return,
-        FunctionDebugContext::FunctionWithoutDebugInfo => {
-            set_debug_location(fcx.ccx, UnknownLocation);
-            return;
-        }
-        FunctionDebugContext::RegularContext(box ref function_debug_context) => {
-            if function_debug_context.source_location_override.get() {
-                // Just ignore any attempts to set a new debug location while
-                // the override is active.
-                return;
-            }
-
-            let cx = fcx.ccx;
-
-            debug!("set_source_location: {}", cx.sess().codemap().span_to_string(span));
-
-            if function_debug_context.source_locations_enabled.get() {
-                let loc = span_start(cx, span);
-                let scope = scope_metadata(fcx, node_id, span);
-
-                set_debug_location(cx, InternalDebugLocation::new(scope,
-                                                                  loc.line,
-                                                                  loc.col.to_usize()));
-            } else {
-                set_debug_location(cx, UnknownLocation);
-            }
-        }
-    }
-}
-
-/// This function makes sure that all debug locations emitted while executing
-/// `wrapped_function` are set to the given `debug_loc`.
-pub fn with_source_location_override<F, R>(fcx: &FunctionContext,
-                                           debug_loc: DebugLoc,
-                                           wrapped_function: F) -> R
-    where F: FnOnce() -> R
-{
-    match fcx.debug_context {
-        FunctionDebugContext::DebugInfoDisabled => {
-            wrapped_function()
-        }
-        FunctionDebugContext::FunctionWithoutDebugInfo => {
-            set_debug_location(fcx.ccx, UnknownLocation);
-            wrapped_function()
-        }
-        FunctionDebugContext::RegularContext(box ref function_debug_context) => {
-            if function_debug_context.source_location_override.get() {
-                wrapped_function()
-            } else {
-                debug_loc.apply(fcx);
-                function_debug_context.source_location_override.set(true);
-                let result = wrapped_function();
-                function_debug_context.source_location_override.set(false);
-                result
-            }
-        }
-    }
-}
-
-/// Clears the current debug location.
-///
-/// Instructions generated hereafter won't be assigned a source location.
-pub fn clear_source_location(fcx: &FunctionContext) {
-    if fn_should_be_ignored(fcx) {
-        return;
-    }
-
-    set_debug_location(fcx.ccx, UnknownLocation);
-}
-
-/// Enables emitting source locations for the given functions.
-///
-/// Since we don't want source locations to be emitted for the function prelude,
-/// they are disabled when beginning to translate a new function. This functions
-/// switches source location emitting on and must therefore be called before the
-/// first real statement/expression of the function is translated.
-pub fn start_emitting_source_locations(fcx: &FunctionContext) {
-    match fcx.debug_context {
-        FunctionDebugContext::RegularContext(box ref data) => {
-            data.source_locations_enabled.set(true)
-        },
-        _ => { /* safe to ignore */ }
-    }
-}
-
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum InternalDebugLocation {
-    KnownLocation { scope: DIScope, line: usize, col: usize },
-    UnknownLocation
-}
-
-impl InternalDebugLocation {
-    pub fn new(scope: DIScope, line: usize, col: usize) -> InternalDebugLocation {
-        KnownLocation {
-            scope: scope,
-            line: line,
-            col: col,
-        }
-    }
-}
-
-pub fn set_debug_location(cx: &CrateContext, debug_location: InternalDebugLocation) {
-    if debug_location == debug_context(cx).current_debug_location.get() {
-        return;
-    }
-
-    let metadata_node;
-
-    match debug_location {
-        KnownLocation { scope, line, .. } => {
-            // Always set the column to zero like Clang and GCC
-            let col = UNKNOWN_COLUMN_NUMBER;
-            debug!("setting debug location to {} {}", line, col);
-
-            unsafe {
-                metadata_node = llvm::LLVMDIBuilderCreateDebugLocation(
-                    debug_context(cx).llcontext,
-                    line as c_uint,
-                    col as c_uint,
-                    scope,
-                    ptr::null_mut());
-            }
-        }
-        UnknownLocation => {
-            debug!("clearing debug location ");
-            metadata_node = ptr::null_mut();
-        }
-    };
-
-    unsafe {
-        llvm::LLVMSetCurrentDebugLocation(cx.raw_builder(), metadata_node);
-    }
-
-    debug_context(cx).current_debug_location.set(debug_location);
-}
diff --git a/src/librustc_trans/trans/debuginfo/type_names.rs b/src/librustc_trans/trans/debuginfo/type_names.rs
deleted file mode 100644 (file)
index f243b1e..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Type Names for Debug Info.
-
-use super::namespace::crate_root_namespace;
-
-use trans::common::CrateContext;
-use middle::def_id::DefId;
-use middle::infer;
-use middle::subst;
-use middle::ty::{self, Ty};
-
-use rustc_front::hir;
-
-// Compute the name of the type as it should be stored in debuginfo. Does not do
-// any caching, i.e. calling the function twice with the same type will also do
-// the work twice. The `qualified` parameter only affects the first level of the
-// type name, further levels (i.e. type parameters) are always fully qualified.
-pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                             t: Ty<'tcx>,
-                                             qualified: bool)
-                                             -> String {
-    let mut result = String::with_capacity(64);
-    push_debuginfo_type_name(cx, t, qualified, &mut result);
-    result
-}
-
-// Pushes the name of the type as it should be stored in debuginfo on the
-// `output` String. See also compute_debuginfo_type_name().
-pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                          t: Ty<'tcx>,
-                                          qualified: bool,
-                                          output: &mut String) {
-    match t.sty {
-        ty::TyBool => output.push_str("bool"),
-        ty::TyChar => output.push_str("char"),
-        ty::TyStr => output.push_str("str"),
-        ty::TyInt(int_ty) => output.push_str(int_ty.ty_to_string()),
-        ty::TyUint(uint_ty) => output.push_str(uint_ty.ty_to_string()),
-        ty::TyFloat(float_ty) => output.push_str(float_ty.ty_to_string()),
-        ty::TyStruct(def, substs) |
-        ty::TyEnum(def, substs) => {
-            push_item_name(cx, def.did, qualified, output);
-            push_type_params(cx, substs, output);
-        },
-        ty::TyTuple(ref component_types) => {
-            output.push('(');
-            for &component_type in component_types {
-                push_debuginfo_type_name(cx, component_type, true, output);
-                output.push_str(", ");
-            }
-            if !component_types.is_empty() {
-                output.pop();
-                output.pop();
-            }
-            output.push(')');
-        },
-        ty::TyBox(inner_type) => {
-            output.push_str("Box<");
-            push_debuginfo_type_name(cx, inner_type, true, output);
-            output.push('>');
-        },
-        ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
-            output.push('*');
-            match mutbl {
-                hir::MutImmutable => output.push_str("const "),
-                hir::MutMutable => output.push_str("mut "),
-            }
-
-            push_debuginfo_type_name(cx, inner_type, true, output);
-        },
-        ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
-            output.push('&');
-            if mutbl == hir::MutMutable {
-                output.push_str("mut ");
-            }
-
-            push_debuginfo_type_name(cx, inner_type, true, output);
-        },
-        ty::TyArray(inner_type, len) => {
-            output.push('[');
-            push_debuginfo_type_name(cx, inner_type, true, output);
-            output.push_str(&format!("; {}", len));
-            output.push(']');
-        },
-        ty::TySlice(inner_type) => {
-            output.push('[');
-            push_debuginfo_type_name(cx, inner_type, true, output);
-            output.push(']');
-        },
-        ty::TyTrait(ref trait_data) => {
-            let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
-            push_item_name(cx, principal.def_id, false, output);
-            push_type_params(cx, principal.substs, output);
-        },
-        ty::TyBareFn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
-            if unsafety == hir::Unsafety::Unsafe {
-                output.push_str("unsafe ");
-            }
-
-            if abi != ::syntax::abi::Abi::Rust {
-                output.push_str("extern \"");
-                output.push_str(abi.name());
-                output.push_str("\" ");
-            }
-
-            output.push_str("fn(");
-
-            let sig = cx.tcx().erase_late_bound_regions(sig);
-            let sig = infer::normalize_associated_type(cx.tcx(), &sig);
-            if !sig.inputs.is_empty() {
-                for &parameter_type in &sig.inputs {
-                    push_debuginfo_type_name(cx, parameter_type, true, output);
-                    output.push_str(", ");
-                }
-                output.pop();
-                output.pop();
-            }
-
-            if sig.variadic {
-                if !sig.inputs.is_empty() {
-                    output.push_str(", ...");
-                } else {
-                    output.push_str("...");
-                }
-            }
-
-            output.push(')');
-
-            match sig.output {
-                ty::FnConverging(result_type) if result_type.is_nil() => {}
-                ty::FnConverging(result_type) => {
-                    output.push_str(" -> ");
-                    push_debuginfo_type_name(cx, result_type, true, output);
-                }
-                ty::FnDiverging => {
-                    output.push_str(" -> !");
-                }
-            }
-        },
-        ty::TyClosure(..) => {
-            output.push_str("closure");
-        }
-        ty::TyError |
-        ty::TyInfer(_) |
-        ty::TyProjection(..) |
-        ty::TyParam(_) => {
-            cx.sess().bug(&format!("debuginfo: Trying to create type name for \
-                unexpected type: {:?}", t));
-        }
-    }
-
-    fn push_item_name(cx: &CrateContext,
-                      def_id: DefId,
-                      qualified: bool,
-                      output: &mut String) {
-        cx.tcx().with_path(def_id, |path| {
-            if qualified {
-                if def_id.is_local() {
-                    output.push_str(crate_root_namespace(cx));
-                    output.push_str("::");
-                }
-
-                let mut path_element_count = 0;
-                for path_element in path {
-                    output.push_str(&path_element.name().as_str());
-                    output.push_str("::");
-                    path_element_count += 1;
-                }
-
-                if path_element_count == 0 {
-                    cx.sess().bug("debuginfo: Encountered empty item path!");
-                }
-
-                output.pop();
-                output.pop();
-            } else {
-                let name = path.last().expect("debuginfo: Empty item path?").name();
-                output.push_str(&name.as_str());
-            }
-        });
-    }
-
-    // Pushes the type parameters in the given `Substs` to the output string.
-    // This ignores region parameters, since they can't reliably be
-    // reconstructed for items from non-local crates. For local crates, this
-    // would be possible but with inlining and LTO we have to use the least
-    // common denominator - otherwise we would run into conflicts.
-    fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                  substs: &subst::Substs<'tcx>,
-                                  output: &mut String) {
-        if substs.types.is_empty() {
-            return;
-        }
-
-        output.push('<');
-
-        for &type_parameter in &substs.types {
-            push_debuginfo_type_name(cx, type_parameter, true, output);
-            output.push_str(", ");
-        }
-
-        output.pop();
-        output.pop();
-
-        output.push('>');
-    }
-}
diff --git a/src/librustc_trans/trans/debuginfo/utils.rs b/src/librustc_trans/trans/debuginfo/utils.rs
deleted file mode 100644 (file)
index c1d7bb4..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Utility Functions.
-
-use super::{FunctionDebugContext, CrateDebugContext};
-use super::namespace::namespace_for_item;
-
-use middle::def_id::DefId;
-
-use llvm;
-use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray};
-use trans::machine;
-use trans::common::{CrateContext, FunctionContext};
-use trans::type_::Type;
-
-use syntax::codemap::Span;
-use syntax::{ast, codemap};
-
-pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool
-{
-    // The is_local_to_unit flag indicates whether a function is local to the
-    // current compilation unit (i.e. if it is *static* in the C-sense). The
-    // *reachable* set should provide a good approximation of this, as it
-    // contains everything that might leak out of the current crate (by being
-    // externally visible or by being inlined into something externally
-    // visible). It might better to use the `exported_items` set from
-    // `driver::CrateAnalysis` in the future, but (atm) this set is not
-    // available in the translation pass.
-    !cx.reachable().contains(&node_id)
-}
-
-#[allow(non_snake_case)]
-pub fn create_DIArray(builder: DIBuilderRef, arr: &[DIDescriptor]) -> DIArray {
-    return unsafe {
-        llvm::LLVMDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32)
-    };
-}
-
-pub fn contains_nodebug_attribute(attributes: &[ast::Attribute]) -> bool {
-    attributes.iter().any(|attr| {
-        let meta_item: &ast::MetaItem = &attr.node.value;
-        match meta_item.node {
-            ast::MetaItemKind::Word(ref value) => &value[..] == "no_debug",
-            _ => false
-        }
-    })
-}
-
-/// Return codemap::Loc corresponding to the beginning of the span
-pub fn span_start(cx: &CrateContext, span: Span) -> codemap::Loc {
-    cx.sess().codemap().lookup_char_pos(span.lo)
-}
-
-pub fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u64) {
-    (machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type) as u64)
-}
-
-pub fn bytes_to_bits(bytes: u64) -> u64 {
-    bytes * 8
-}
-
-#[inline]
-pub fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>)
-                           -> &'a CrateDebugContext<'tcx> {
-    let debug_context: &'a CrateDebugContext<'tcx> = cx.dbg_cx().as_ref().unwrap();
-    debug_context
-}
-
-#[inline]
-#[allow(non_snake_case)]
-pub fn DIB(cx: &CrateContext) -> DIBuilderRef {
-    cx.dbg_cx().as_ref().unwrap().builder
-}
-
-pub fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
-    match fcx.debug_context {
-        FunctionDebugContext::RegularContext(_) => false,
-        _ => true
-    }
-}
-
-pub fn assert_type_for_node_id(cx: &CrateContext,
-                           node_id: ast::NodeId,
-                           error_reporting_span: Span) {
-    if !cx.tcx().node_types().contains_key(&node_id) {
-        cx.sess().span_bug(error_reporting_span,
-                           "debuginfo: Could not find type for node id!");
-    }
-}
-
-pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: DefId)
-                                   -> (DIScope, Span) {
-    let containing_scope = namespace_for_item(cx, def_id).scope;
-    let definition_span = cx.tcx().map.def_id_span(def_id, codemap::DUMMY_SP /* (1) */ );
-
-    // (1) For external items there is no span information
-
-    (containing_scope, definition_span)
-}
diff --git a/src/librustc_trans/trans/declare.rs b/src/librustc_trans/trans/declare.rs
deleted file mode 100644 (file)
index 75b60be..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//! Declare various LLVM values.
-//!
-//! Prefer using functions and methods from this module rather than calling LLVM
-//! functions directly. These functions do some additional work to ensure we do
-//! the right thing given the preconceptions of trans.
-//!
-//! Some useful guidelines:
-//!
-//! * Use declare_* family of methods if you are declaring, but are not
-//!   interested in defining the ValueRef they return.
-//! * Use define_* family of methods when you might be defining the ValueRef.
-//! * When in doubt, define.
-use llvm::{self, ValueRef};
-use middle::ty;
-use middle::infer;
-use syntax::abi::Abi;
-use trans::attributes;
-use trans::base;
-use trans::context::CrateContext;
-use trans::type_::Type;
-use trans::type_of;
-
-use std::ffi::CString;
-use libc::c_uint;
-
-
-/// Declare a global value.
-///
-/// If there’s a value with the same name already declared, the function will
-/// return its ValueRef instead.
-pub fn declare_global(ccx: &CrateContext, name: &str, ty: Type) -> llvm::ValueRef {
-    debug!("declare_global(name={:?})", name);
-    let namebuf = CString::new(name).unwrap_or_else(|_|{
-        ccx.sess().bug(&format!("name {:?} contains an interior null byte", name))
-    });
-    unsafe {
-        llvm::LLVMGetOrInsertGlobal(ccx.llmod(), namebuf.as_ptr(), ty.to_ref())
-    }
-}
-
-
-/// Declare a function.
-///
-/// For rust functions use `declare_rust_fn` instead.
-///
-/// If there’s a value with the same name already declared, the function will
-/// update the declaration and return existing ValueRef instead.
-pub fn declare_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv,
-                  ty: Type, output: ty::FnOutput) -> ValueRef {
-    debug!("declare_fn(name={:?})", name);
-    let namebuf = CString::new(name).unwrap_or_else(|_|{
-        ccx.sess().bug(&format!("name {:?} contains an interior null byte", name))
-    });
-    let llfn = unsafe {
-        llvm::LLVMGetOrInsertFunction(ccx.llmod(), namebuf.as_ptr(), ty.to_ref())
-    };
-
-    llvm::SetFunctionCallConv(llfn, callconv);
-    // Function addresses in Rust are never significant, allowing functions to
-    // be merged.
-    llvm::SetUnnamedAddr(llfn, true);
-
-    if output == ty::FnDiverging {
-        llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoReturn);
-    }
-
-    if ccx.tcx().sess.opts.cg.no_redzone
-        .unwrap_or(ccx.tcx().sess.target.target.options.disable_redzone) {
-        llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoRedZone)
-    }
-
-    llfn
-}
-
-
-/// Declare a C ABI function.
-///
-/// Only use this for foreign function ABIs and glue. For Rust functions use
-/// `declare_rust_fn` instead.
-///
-/// If there’s a value with the same name already declared, the function will
-/// update the declaration and return existing ValueRef instead.
-pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type,
-                   output: ty::Ty) -> ValueRef {
-    declare_fn(ccx, name, llvm::CCallConv, fn_type, ty::FnConverging(output))
-}
-
-
-/// Declare a Rust function.
-///
-/// If there’s a value with the same name already declared, the function will
-/// update the declaration and return existing ValueRef instead.
-pub fn declare_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
-                                 fn_type: ty::Ty<'tcx>) -> ValueRef {
-    debug!("declare_rust_fn(name={:?}, fn_type={:?})", name,
-           fn_type);
-
-    let function_type; // placeholder so that the memory ownership works out ok
-    let (sig, abi, env) = match fn_type.sty {
-        ty::TyBareFn(_, ref f) => {
-            (&f.sig, f.abi, None)
-        }
-        ty::TyClosure(closure_did, ref substs) => {
-            let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
-            function_type = infcx.closure_type(closure_did, substs);
-            let self_type = base::self_type_for_closure(ccx, closure_did, fn_type);
-            let llenvironment_type = type_of::type_of_explicit_arg(ccx, self_type);
-            debug!("declare_rust_fn function_type={:?} self_type={:?}",
-                   function_type, self_type);
-            (&function_type.sig, Abi::RustCall, Some(llenvironment_type))
-        }
-        _ => ccx.sess().bug("expected closure or fn")
-    };
-
-    let sig = ccx.tcx().erase_late_bound_regions(sig);
-    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
-    debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
-    let llfty = type_of::type_of_rust_fn(ccx, env, &sig, abi);
-    debug!("declare_rust_fn llfty={}", ccx.tn().type_to_string(llfty));
-
-    // it is ok to directly access sig.0.output because we erased all
-    // late-bound-regions above
-    let llfn = declare_fn(ccx, name, llvm::CCallConv, llfty, sig.output);
-    attributes::from_fn_type(ccx, fn_type).apply_llfn(llfn);
-    llfn
-}
-
-
-/// Declare a Rust function with internal linkage.
-///
-/// If there’s a value with the same name already declared, the function will
-/// update the declaration and return existing ValueRef instead.
-pub fn declare_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
-                                          fn_type: ty::Ty<'tcx>) -> ValueRef {
-    let llfn = declare_rust_fn(ccx, name, fn_type);
-    llvm::SetLinkage(llfn, llvm::InternalLinkage);
-    llfn
-}
-
-
-/// Declare a global with an intention to define it.
-///
-/// Use this function when you intend to define a global. This function will
-/// return None if the name already has a definition associated with it. In that
-/// case an error should be reported to the user, because it usually happens due
-/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
-pub fn define_global(ccx: &CrateContext, name: &str, ty: Type) -> Option<ValueRef> {
-    if get_defined_value(ccx, name).is_some() {
-        None
-    } else {
-        Some(declare_global(ccx, name, ty))
-    }
-}
-
-
-/// Declare a function with an intention to define it.
-///
-/// For rust functions use `define_rust_fn` instead.
-///
-/// Use this function when you intend to define a function. This function will
-/// return None if the name already has a definition associated with it. In that
-/// case an error should be reported to the user, because it usually happens due
-/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
-pub fn define_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv,
-                 fn_type: Type, output: ty::FnOutput) -> Option<ValueRef> {
-    if get_defined_value(ccx, name).is_some() {
-        None
-    } else {
-        Some(declare_fn(ccx, name, callconv, fn_type, output))
-    }
-}
-
-
-/// Declare a C ABI function with an intention to define it.
-///
-/// Use this function when you intend to define a function. This function will
-/// return None if the name already has a definition associated with it. In that
-/// case an error should be reported to the user, because it usually happens due
-/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
-///
-/// Only use this for foreign function ABIs and glue. For Rust functions use
-/// `declare_rust_fn` instead.
-pub fn define_cfn(ccx: &CrateContext, name: &str, fn_type: Type,
-                  output: ty::Ty) -> Option<ValueRef> {
-    if get_defined_value(ccx, name).is_some() {
-        None
-    } else {
-        Some(declare_cfn(ccx, name, fn_type, output))
-    }
-}
-
-
-/// Declare a Rust function with an intention to define it.
-///
-/// Use this function when you intend to define a function. This function will
-/// return None if the name already has a definition associated with it. In that
-/// case an error should be reported to the user, because it usually happens due
-/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
-pub fn define_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
-                                fn_type: ty::Ty<'tcx>) -> Option<ValueRef> {
-    if get_defined_value(ccx, name).is_some() {
-        None
-    } else {
-        Some(declare_rust_fn(ccx, name, fn_type))
-    }
-}
-
-
-/// Declare a Rust function with an intention to define it.
-///
-/// Use this function when you intend to define a function. This function will
-/// return panic if the name already has a definition associated with it. This
-/// can happen with #[no_mangle] or #[export_name], for example.
-pub fn define_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                         name: &str,
-                                         fn_type: ty::Ty<'tcx>) -> ValueRef {
-    if get_defined_value(ccx, name).is_some() {
-        ccx.sess().fatal(&format!("symbol `{}` already defined", name))
-    } else {
-        declare_internal_rust_fn(ccx, name, fn_type)
-    }
-}
-
-
-/// Get defined or externally defined (AvailableExternally linkage) value by
-/// name.
-fn get_defined_value(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
-    debug!("get_defined_value(name={:?})", name);
-    let namebuf = CString::new(name).unwrap_or_else(|_|{
-        ccx.sess().bug(&format!("name {:?} contains an interior null byte", name))
-    });
-    let val = unsafe { llvm::LLVMGetNamedValue(ccx.llmod(), namebuf.as_ptr()) };
-    if val.is_null() {
-        debug!("get_defined_value: {:?} value is null", name);
-        None
-    } else {
-        let (declaration, aext_link) = unsafe {
-            let linkage = llvm::LLVMGetLinkage(val);
-            (llvm::LLVMIsDeclaration(val) != 0,
-             linkage == llvm::AvailableExternallyLinkage as c_uint)
-        };
-        debug!("get_defined_value: found {:?} value (declaration: {}, \
-                aext_link: {})", name, declaration, aext_link);
-        if !declaration || aext_link {
-            Some(val)
-        } else {
-            None
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/disr.rs b/src/librustc_trans/trans/disr.rs
deleted file mode 100644 (file)
index 7cb10a8..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#[derive(Debug, Eq, PartialEq, Copy, Clone)]
-pub struct Disr(pub u64);
-
-impl Disr {
-    pub fn wrapping_add(self, other: Self) -> Self {
-        Disr(self.0.wrapping_add(other.0))
-    }
-}
-
-impl ::std::ops::BitAnd for Disr {
-    type Output = Disr;
-    fn bitand(self, other: Self) -> Self {
-        Disr(self.0 & other.0)
-    }
-}
-
-impl From<::middle::ty::Disr> for Disr {
-    fn from(i: ::middle::ty::Disr) -> Disr {
-        Disr(i)
-    }
-}
-
-impl From<usize> for Disr {
-    fn from(i: usize) -> Disr {
-        Disr(i as u64)
-    }
-}
-
-impl PartialOrd for Disr {
-    fn partial_cmp(&self, other: &Disr) -> Option<::std::cmp::Ordering> {
-        self.0.partial_cmp(&other.0)
-    }
-}
-
-impl Ord for Disr {
-    fn cmp(&self, other: &Disr) -> ::std::cmp::Ordering {
-        self.0.cmp(&other.0)
-    }
-}
diff --git a/src/librustc_trans/trans/expr.rs b/src/librustc_trans/trans/expr.rs
deleted file mode 100644 (file)
index 782e38d..0000000
+++ /dev/null
@@ -1,2663 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Translation of Expressions
-//!
-//! The expr module handles translation of expressions. The most general
-//! translation routine is `trans()`, which will translate an expression
-//! into a datum. `trans_into()` is also available, which will translate
-//! an expression and write the result directly into memory, sometimes
-//! avoiding the need for a temporary stack slot. Finally,
-//! `trans_to_lvalue()` is available if you'd like to ensure that the
-//! result has cleanup scheduled.
-//!
-//! Internally, each of these functions dispatches to various other
-//! expression functions depending on the kind of expression. We divide
-//! up expressions into:
-//!
-//! - **Datum expressions:** Those that most naturally yield values.
-//!   Examples would be `22`, `box x`, or `a + b` (when not overloaded).
-//! - **DPS expressions:** Those that most naturally write into a location
-//!   in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
-//! - **Statement expressions:** That that do not generate a meaningful
-//!   result. Examples would be `while { ... }` or `return 44`.
-//!
-//! Public entry points:
-//!
-//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
-//!   storing the result into `dest`. This is the preferred form, if you
-//!   can manage it.
-//!
-//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
-//!   `Datum` with the result. You can then store the datum, inspect
-//!   the value, etc. This may introduce temporaries if the datum is a
-//!   structural type.
-//!
-//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
-//!   expression and ensures that the result has a cleanup associated with it,
-//!   creating a temporary stack slot if necessary.
-//!
-//! - `trans_local_var -> Datum`: looks up a local variable or upvar.
-
-#![allow(non_camel_case_types)]
-
-pub use self::Dest::*;
-use self::lazy_binop_ty::*;
-
-use back::abi;
-use llvm::{self, ValueRef, TypeKind};
-use middle::const_qualif::ConstQualif;
-use middle::def::Def;
-use middle::subst::Substs;
-use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
-use trans::base::*;
-use trans::build::*;
-use trans::cleanup::{self, CleanupMethods, DropHintMethods};
-use trans::common::*;
-use trans::datum::*;
-use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
-use trans::declare;
-use trans::glue;
-use trans::machine;
-use trans::meth;
-use trans::tvec;
-use trans::type_of;
-use trans::Disr;
-use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
-use middle::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
-use middle::ty::adjustment::CustomCoerceUnsized;
-use middle::ty::{self, Ty};
-use middle::ty::MethodCall;
-use middle::ty::cast::{CastKind, CastTy};
-use util::common::indenter;
-use trans::machine::{llsize_of, llsize_of_alloc};
-use trans::type_::Type;
-
-use rustc_front;
-use rustc_front::hir;
-
-use syntax::{ast, codemap};
-use syntax::parse::token::InternedString;
-use syntax::ptr::P;
-use syntax::parse::token;
-use std::mem;
-
-// Destinations
-
-// These are passed around by the code generating functions to track the
-// destination of a computation's value.
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum Dest {
-    SaveIn(ValueRef),
-    Ignore,
-}
-
-impl Dest {
-    pub fn to_string(&self, ccx: &CrateContext) -> String {
-        match *self {
-            SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
-            Ignore => "Ignore".to_string()
-        }
-    }
-}
-
-/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
-/// better optimized LLVM code.
-pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              expr: &hir::Expr,
-                              dest: Dest)
-                              -> Block<'blk, 'tcx> {
-    let mut bcx = bcx;
-
-    debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
-
-    if adjustment_required(bcx, expr) {
-        // use trans, which may be less efficient but
-        // which will perform the adjustments:
-        let datum = unpack_datum!(bcx, trans(bcx, expr));
-        return datum.store_to_dest(bcx, dest, expr.id);
-    }
-
-    let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
-    if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
-        if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) {
-            if let SaveIn(lldest) = dest {
-                match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
-                                                       bcx.fcx.param_substs,
-                                                       consts::TrueConst::No) {
-                    Ok(global) => {
-                        // Cast pointer to destination, because constants
-                        // have different types.
-                        let lldest = PointerCast(bcx, lldest, val_ty(global));
-                        memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
-                        return bcx;
-                    },
-                    Err(consts::ConstEvalFailure::Runtime(_)) => {
-                        // in case const evaluation errors, translate normally
-                        // debug assertions catch the same errors
-                        // see RFC 1229
-                    },
-                    Err(consts::ConstEvalFailure::Compiletime(_)) => {
-                        return bcx;
-                    },
-                }
-            }
-
-            // If we see a const here, that's because it evaluates to a type with zero size. We
-            // should be able to just discard it, since const expressions are guaranteed not to
-            // have side effects. This seems to be reached through tuple struct constructors being
-            // passed zero-size constants.
-            if let hir::ExprPath(..) = expr.node {
-                match bcx.def(expr.id) {
-                    Def::Const(_) | Def::AssociatedConst(_) => {
-                        assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id)));
-                        return bcx;
-                    }
-                    _ => {}
-                }
-            }
-
-            // Even if we don't have a value to emit, and the expression
-            // doesn't have any side-effects, we still have to translate the
-            // body of any closures.
-            // FIXME: Find a better way of handling this case.
-        } else {
-            // The only way we're going to see a `const` at this point is if
-            // it prefers in-place instantiation, likely because it contains
-            // `[x; N]` somewhere within.
-            match expr.node {
-                hir::ExprPath(..) => {
-                    match bcx.def(expr.id) {
-                        Def::Const(did) | Def::AssociatedConst(did) => {
-                            let empty_substs = bcx.tcx().mk_substs(Substs::trans_empty());
-                            let const_expr = consts::get_const_expr(bcx.ccx(), did, expr,
-                                                                    empty_substs);
-                            // Temporarily get cleanup scopes out of the way,
-                            // as they require sub-expressions to be contained
-                            // inside the current AST scope.
-                            // These should record no cleanups anyways, `const`
-                            // can't have destructors.
-                            let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
-                                                      vec![]);
-                            // Lock emitted debug locations to the location of
-                            // the constant reference expression.
-                            debuginfo::with_source_location_override(bcx.fcx,
-                                                                     expr.debug_loc(),
-                                                                     || {
-                                bcx = trans_into(bcx, const_expr, dest)
-                            });
-                            let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
-                                                      scopes);
-                            assert!(scopes.is_empty());
-                            return bcx;
-                        }
-                        _ => {}
-                    }
-                }
-                _ => {}
-            }
-        }
-    }
-
-    debug!("trans_into() expr={:?}", expr);
-
-    let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
-                                                                          expr.id,
-                                                                          expr.span,
-                                                                          false);
-    bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-
-    let kind = expr_kind(bcx.tcx(), expr);
-    bcx = match kind {
-        ExprKind::Lvalue | ExprKind::RvalueDatum => {
-            trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
-        }
-        ExprKind::RvalueDps => {
-            trans_rvalue_dps_unadjusted(bcx, expr, dest)
-        }
-        ExprKind::RvalueStmt => {
-            trans_rvalue_stmt_unadjusted(bcx, expr)
-        }
-    };
-
-    bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
-}
-
-/// Translates an expression, returning a datum (and new block) encapsulating the result. When
-/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
-/// stack.
-pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                         expr: &hir::Expr)
-                         -> DatumBlock<'blk, 'tcx, Expr> {
-    debug!("trans(expr={:?})", expr);
-
-    let mut bcx = bcx;
-    let fcx = bcx.fcx;
-    let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
-    let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS);
-    let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
-        match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
-                                                            bcx.fcx.param_substs,
-                                                            consts::TrueConst::No) {
-            Ok(global) => {
-                if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) {
-                    // Is borrowed as 'static, must return lvalue.
-
-                    // Cast pointer to global, because constants have different types.
-                    let const_ty = expr_ty_adjusted(bcx, expr);
-                    let llty = type_of::type_of(bcx.ccx(), const_ty);
-                    let global = PointerCast(bcx, global, llty.ptr_to());
-                    let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
-                    return DatumBlock::new(bcx, datum.to_expr_datum());
-                }
-
-                // Otherwise, keep around and perform adjustments, if needed.
-                let const_ty = if adjusted_global {
-                    expr_ty_adjusted(bcx, expr)
-                } else {
-                    expr_ty(bcx, expr)
-                };
-
-                // This could use a better heuristic.
-                Some(if type_is_immediate(bcx.ccx(), const_ty) {
-                    // Cast pointer to global, because constants have different types.
-                    let llty = type_of::type_of(bcx.ccx(), const_ty);
-                    let global = PointerCast(bcx, global, llty.ptr_to());
-                    // Maybe just get the value directly, instead of loading it?
-                    immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
-                } else {
-                    let scratch = alloc_ty(bcx, const_ty, "const");
-                    call_lifetime_start(bcx, scratch);
-                    let lldest = if !const_ty.is_structural() {
-                        // Cast pointer to slot, because constants have different types.
-                        PointerCast(bcx, scratch, val_ty(global))
-                    } else {
-                        // In this case, memcpy_ty calls llvm.memcpy after casting both
-                        // source and destination to i8*, so we don't need any casts.
-                        scratch
-                    };
-                    memcpy_ty(bcx, lldest, global, const_ty);
-                    Datum::new(scratch, const_ty, Rvalue::new(ByRef))
-                })
-            },
-            Err(consts::ConstEvalFailure::Runtime(_)) => {
-                // in case const evaluation errors, translate normally
-                // debug assertions catch the same errors
-                // see RFC 1229
-                None
-            },
-            Err(consts::ConstEvalFailure::Compiletime(_)) => {
-                // generate a dummy llvm value
-                let const_ty = expr_ty(bcx, expr);
-                let llty = type_of::type_of(bcx.ccx(), const_ty);
-                let dummy = C_undef(llty.ptr_to());
-                Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef)))
-            },
-        }
-    } else {
-        None
-    };
-
-    let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
-                                                                          expr.id,
-                                                                          expr.span,
-                                                                          false);
-    fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-    let datum = match global {
-        Some(rvalue) => rvalue.to_expr_datum(),
-        None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
-    };
-    let datum = if adjusted_global {
-        datum // trans::consts already performed adjustments.
-    } else {
-        unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
-    };
-    bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
-    return DatumBlock::new(bcx, datum);
-}
-
-pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
-    StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
-}
-
-pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
-    StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
-}
-
-pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
-    Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
-    Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
-}
-
-fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   expr: &hir::Expr) -> bool {
-    let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
-        None => { return false; }
-        Some(adj) => adj
-    };
-
-    // Don't skip a conversion from Box<T> to &T, etc.
-    if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
-        return true;
-    }
-
-    match adjustment {
-        AdjustReifyFnPointer => {
-            // FIXME(#19925) once fn item types are
-            // zero-sized, we'll need to return true here
-            false
-        }
-        AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
-            // purely a type-level thing
-            false
-        }
-        AdjustDerefRef(ref adj) => {
-            // We are a bit paranoid about adjustments and thus might have a re-
-            // borrow here which merely derefs and then refs again (it might have
-            // a different region or mutability, but we don't care here).
-            !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
-        }
-    }
-}
-
-/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
-/// translation of `expr`.
-fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                 expr: &hir::Expr,
-                                 datum: Datum<'tcx, Expr>)
-                                 -> DatumBlock<'blk, 'tcx, Expr>
-{
-    let mut bcx = bcx;
-    let mut datum = datum;
-    let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
-        None => {
-            return DatumBlock::new(bcx, datum);
-        }
-        Some(adj) => { adj }
-    };
-    debug!("unadjusted datum for expr {:?}: {} adjustment={:?}",
-           expr,
-           datum.to_string(bcx.ccx()),
-           adjustment);
-    match adjustment {
-        AdjustReifyFnPointer => {
-            // FIXME(#19925) once fn item types are
-            // zero-sized, we'll need to do something here
-        }
-        AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
-            // purely a type-level thing
-        }
-        AdjustDerefRef(ref adj) => {
-            let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
-                // We are a bit paranoid about adjustments and thus might have a re-
-                // borrow here which merely derefs and then refs again (it might have
-                // a different region or mutability, but we don't care here).
-                match datum.ty.sty {
-                    // Don't skip a conversion from Box<T> to &T, etc.
-                    ty::TyRef(..) => {
-                        if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
-                            // Don't skip an overloaded deref.
-                            0
-                        } else {
-                            1
-                        }
-                    }
-                    _ => 0
-                }
-            } else {
-                0
-            };
-
-            if adj.autoderefs > skip_reborrows {
-                // Schedule cleanup.
-                let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
-                datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
-                                                          lval.to_expr_datum(),
-                                                          adj.autoderefs - skip_reborrows));
-            }
-
-            // (You might think there is a more elegant way to do this than a
-            // skip_reborrows bool, but then you remember that the borrow checker exists).
-            if skip_reborrows == 0 && adj.autoref.is_some() {
-                datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
-            }
-
-            if let Some(target) = adj.unsize {
-                // We do not arrange cleanup ourselves; if we already are an
-                // L-value, then cleanup will have already been scheduled (and
-                // the `datum.to_rvalue_datum` call below will emit code to zero
-                // the drop flag when moving out of the L-value). If we are an
-                // R-value, then we do not need to schedule cleanup.
-                let source_datum = unpack_datum!(bcx,
-                    datum.to_rvalue_datum(bcx, "__coerce_source"));
-
-                let target = bcx.monomorphize(&target);
-
-                let scratch = alloc_ty(bcx, target, "__coerce_target");
-                call_lifetime_start(bcx, scratch);
-                let target_datum = Datum::new(scratch, target,
-                                              Rvalue::new(ByRef));
-                bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
-                datum = Datum::new(scratch, target,
-                                   RvalueExpr(Rvalue::new(ByRef)));
-            }
-        }
-    }
-    debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
-    DatumBlock::new(bcx, datum)
-}
-
-fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              span: codemap::Span,
-                              source: Datum<'tcx, Rvalue>,
-                              target: Datum<'tcx, Rvalue>)
-                              -> Block<'blk, 'tcx> {
-    let mut bcx = bcx;
-    debug!("coerce_unsized({} -> {})",
-           source.to_string(bcx.ccx()),
-           target.to_string(bcx.ccx()));
-
-    match (&source.ty.sty, &target.ty.sty) {
-        (&ty::TyBox(a), &ty::TyBox(b)) |
-        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
-         &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
-        (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
-         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
-        (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
-         &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
-            let (inner_source, inner_target) = (a, b);
-
-            let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
-                // Normally, the source is a thin pointer and we are
-                // adding extra info to make a fat pointer. The exception
-                // is when we are upcasting an existing object fat pointer
-                // to use a different vtable. In that case, we want to
-                // load out the original data pointer so we can repackage
-                // it.
-                (Load(bcx, get_dataptr(bcx, source.val)),
-                Some(Load(bcx, get_meta(bcx, source.val))))
-            } else {
-                let val = if source.kind.is_by_ref() {
-                    load_ty(bcx, source.val, source.ty)
-                } else {
-                    source.val
-                };
-                (val, None)
-            };
-
-            let info = unsized_info(bcx.ccx(), inner_source, inner_target,
-                                    old_info, bcx.fcx.param_substs);
-
-            // Compute the base pointer. This doesn't change the pointer value,
-            // but merely its type.
-            let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
-            let base = PointerCast(bcx, base, ptr_ty);
-
-            Store(bcx, base, get_dataptr(bcx, target.val));
-            Store(bcx, info, get_meta(bcx, target.val));
-        }
-
-        // This can be extended to enums and tuples in the future.
-        // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
-        (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
-            assert_eq!(def_id_a, def_id_b);
-
-            // The target is already by-ref because it's to be written to.
-            let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
-            assert!(target.kind.is_by_ref());
-
-            let kind = custom_coerce_unsize_info(bcx.ccx(), source.ty, target.ty);
-
-            let repr_source = adt::represent_type(bcx.ccx(), source.ty);
-            let src_fields = match &*repr_source {
-                &adt::Repr::Univariant(ref s, _) => &s.fields,
-                _ => bcx.sess().span_bug(span,
-                                         &format!("Non univariant struct? (repr_source: {:?})",
-                                                  repr_source)),
-            };
-            let repr_target = adt::represent_type(bcx.ccx(), target.ty);
-            let target_fields = match &*repr_target {
-                &adt::Repr::Univariant(ref s, _) => &s.fields,
-                _ => bcx.sess().span_bug(span,
-                                         &format!("Non univariant struct? (repr_target: {:?})",
-                                                  repr_target)),
-            };
-
-            let coerce_index = match kind {
-                CustomCoerceUnsized::Struct(i) => i
-            };
-            assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
-
-            let source_val = adt::MaybeSizedValue::sized(source.val);
-            let target_val = adt::MaybeSizedValue::sized(target.val);
-
-            let iter = src_fields.iter().zip(target_fields).enumerate();
-            for (i, (src_ty, target_ty)) in iter {
-                let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i);
-                let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i);
-
-                // If this is the field we need to coerce, recurse on it.
-                if i == coerce_index {
-                    coerce_unsized(bcx, span,
-                                   Datum::new(ll_source, src_ty,
-                                              Rvalue::new(ByRef)),
-                                   Datum::new(ll_target, target_ty,
-                                              Rvalue::new(ByRef)));
-                } else {
-                    // Otherwise, simply copy the data from the source.
-                    assert!(src_ty.is_phantom_data() || src_ty == target_ty);
-                    memcpy_ty(bcx, ll_target, ll_source, src_ty);
-                }
-            }
-        }
-        _ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {:?} -> {:?}",
-                                     source.ty,
-                                     target.ty))
-    }
-    bcx
-}
-
-/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
-/// that the expr represents.
-///
-/// If this expression is an rvalue, this implies introducing a temporary.  In other words,
-/// something like `x().f` is translated into roughly the equivalent of
-///
-///   { tmp = x(); tmp.f }
-pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   expr: &hir::Expr,
-                                   name: &str)
-                                   -> DatumBlock<'blk, 'tcx, Lvalue> {
-    let mut bcx = bcx;
-    let datum = unpack_datum!(bcx, trans(bcx, expr));
-    return datum.to_lvalue_datum(bcx, name, expr.id);
-}
-
-/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
-/// directly.
-fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                expr: &hir::Expr)
-                                -> DatumBlock<'blk, 'tcx, Expr> {
-    let mut bcx = bcx;
-
-    debug!("trans_unadjusted(expr={:?})", expr);
-    let _indenter = indenter();
-
-    debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
-
-    return match expr_kind(bcx.tcx(), expr) {
-        ExprKind::Lvalue | ExprKind::RvalueDatum => {
-            let datum = unpack_datum!(bcx, {
-                trans_datum_unadjusted(bcx, expr)
-            });
-
-            DatumBlock {bcx: bcx, datum: datum}
-        }
-
-        ExprKind::RvalueStmt => {
-            bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
-            nil(bcx, expr_ty(bcx, expr))
-        }
-
-        ExprKind::RvalueDps => {
-            let ty = expr_ty(bcx, expr);
-            if type_is_zero_size(bcx.ccx(), ty) {
-                bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
-                nil(bcx, ty)
-            } else {
-                let scratch = rvalue_scratch_datum(bcx, ty, "");
-                bcx = trans_rvalue_dps_unadjusted(
-                    bcx, expr, SaveIn(scratch.val));
-
-                // Note: this is not obviously a good idea.  It causes
-                // immediate values to be loaded immediately after a
-                // return from a call or other similar expression,
-                // which in turn leads to alloca's having shorter
-                // lifetimes and hence larger stack frames.  However,
-                // in turn it can lead to more register pressure.
-                // Still, in practice it seems to increase
-                // performance, since we have fewer problems with
-                // morestack churn.
-                let scratch = unpack_datum!(
-                    bcx, scratch.to_appropriate_datum(bcx));
-
-                DatumBlock::new(bcx, scratch.to_expr_datum())
-            }
-        }
-    };
-
-    fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
-                       -> DatumBlock<'blk, 'tcx, Expr> {
-        let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
-        let datum = immediate_rvalue(llval, ty);
-        DatumBlock::new(bcx, datum.to_expr_datum())
-    }
-}
-
-fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                      expr: &hir::Expr)
-                                      -> DatumBlock<'blk, 'tcx, Expr> {
-    let mut bcx = bcx;
-    let fcx = bcx.fcx;
-    let _icx = push_ctxt("trans_datum_unadjusted");
-
-    match expr.node {
-        hir::ExprType(ref e, _) => {
-            trans(bcx, &e)
-        }
-        hir::ExprPath(..) => {
-            trans_def(bcx, expr, bcx.def(expr.id))
-        }
-        hir::ExprField(ref base, name) => {
-            trans_rec_field(bcx, &base, name.node)
-        }
-        hir::ExprTupField(ref base, idx) => {
-            trans_rec_tup_field(bcx, &base, idx.node)
-        }
-        hir::ExprIndex(ref base, ref idx) => {
-            trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id))
-        }
-        hir::ExprBox(ref contents) => {
-            // Special case for `Box<T>`
-            let box_ty = expr_ty(bcx, expr);
-            let contents_ty = expr_ty(bcx, &contents);
-            match box_ty.sty {
-                ty::TyBox(..) => {
-                    trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty)
-                }
-                _ => bcx.sess().span_bug(expr.span,
-                                         "expected unique box")
-            }
-
-        }
-        hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit),
-        hir::ExprBinary(op, ref lhs, ref rhs) => {
-            trans_binary(bcx, expr, op, &lhs, &rhs)
-        }
-        hir::ExprUnary(op, ref x) => {
-            trans_unary(bcx, expr, op, &x)
-        }
-        hir::ExprAddrOf(_, ref x) => {
-            match x.node {
-                hir::ExprRepeat(..) | hir::ExprVec(..) => {
-                    // Special case for slices.
-                    let cleanup_debug_loc =
-                        debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
-                                                                      x.id,
-                                                                      x.span,
-                                                                      false);
-                    fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-                    let datum = unpack_datum!(
-                        bcx, tvec::trans_slice_vec(bcx, expr, &x));
-                    bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
-                    DatumBlock::new(bcx, datum)
-                }
-                _ => {
-                    trans_addr_of(bcx, expr, &x)
-                }
-            }
-        }
-        hir::ExprCast(ref val, _) => {
-            // Datum output mode means this is a scalar cast:
-            trans_imm_cast(bcx, &val, expr.id)
-        }
-        _ => {
-            bcx.tcx().sess.span_bug(
-                expr.span,
-                &format!("trans_rvalue_datum_unadjusted reached \
-                         fall-through case: {:?}",
-                        expr.node));
-        }
-    }
-}
-
-fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
-                              base: &hir::Expr,
-                              get_idx: F)
-                              -> DatumBlock<'blk, 'tcx, Expr> where
-    F: FnOnce(&'blk ty::ctxt<'tcx>, &VariantInfo<'tcx>) -> usize,
-{
-    let mut bcx = bcx;
-    let _icx = push_ctxt("trans_rec_field");
-
-    let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
-    let bare_ty = base_datum.ty;
-    let repr = adt::represent_type(bcx.ccx(), bare_ty);
-    let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
-
-    let ix = get_idx(bcx.tcx(), &vinfo);
-    let d = base_datum.get_element(
-        bcx,
-        vinfo.fields[ix].1,
-        |srcval| {
-            adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix)
-        });
-
-    if type_is_sized(bcx.tcx(), d.ty) {
-        DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
-    } else {
-        let scratch = rvalue_scratch_datum(bcx, d.ty, "");
-        Store(bcx, d.val, get_dataptr(bcx, scratch.val));
-        let info = Load(bcx, get_meta(bcx, base_datum.val));
-        Store(bcx, info, get_meta(bcx, scratch.val));
-
-        // Always generate an lvalue datum, because this pointer doesn't own
-        // the data and cleanup is scheduled elsewhere.
-        DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
-    }
-}
-
-/// Translates `base.field`.
-fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                               base: &hir::Expr,
-                               field: ast::Name)
-                               -> DatumBlock<'blk, 'tcx, Expr> {
-    trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
-}
-
-/// Translates `base.<idx>`.
-fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   base: &hir::Expr,
-                                   idx: usize)
-                                   -> DatumBlock<'blk, 'tcx, Expr> {
-    trans_field(bcx, base, |_, _| idx)
-}
-
-fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                           index_expr: &hir::Expr,
-                           base: &hir::Expr,
-                           idx: &hir::Expr,
-                           method_call: MethodCall)
-                           -> DatumBlock<'blk, 'tcx, Expr> {
-    //! Translates `base[idx]`.
-
-    let _icx = push_ctxt("trans_index");
-    let ccx = bcx.ccx();
-    let mut bcx = bcx;
-
-    let index_expr_debug_loc = index_expr.debug_loc();
-
-    // Check for overloaded index.
-    let method_ty = ccx.tcx()
-                       .tables
-                       .borrow()
-                       .method_map
-                       .get(&method_call)
-                       .map(|method| method.ty);
-    let elt_datum = match method_ty {
-        Some(method_ty) => {
-            let method_ty = monomorphize_type(bcx, method_ty);
-
-            let base_datum = unpack_datum!(bcx, trans(bcx, base));
-
-            // Translate index expression.
-            let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
-
-            let ref_ty = // invoked methods have LB regions instantiated:
-                bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
-            let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
-                None => {
-                    bcx.tcx().sess.span_bug(index_expr.span,
-                                            "index method didn't return a \
-                                             dereferenceable type?!")
-                }
-                Some(elt_tm) => elt_tm.ty,
-            };
-
-            // Overloaded. Evaluate `trans_overloaded_op`, which will
-            // invoke the user's index() method, which basically yields
-            // a `&T` pointer.  We can then proceed down the normal
-            // path (below) to dereference that `&T`.
-            let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
-            unpack_result!(bcx,
-                           trans_overloaded_op(bcx,
-                                               index_expr,
-                                               method_call,
-                                               base_datum,
-                                               Some((ix_datum, idx.id)),
-                                               Some(SaveIn(scratch.val)),
-                                               false));
-            let datum = scratch.to_expr_datum();
-            let lval = Lvalue::new("expr::trans_index overload");
-            if type_is_sized(bcx.tcx(), elt_ty) {
-                Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
-            } else {
-                Datum::new(datum.val, elt_ty, LvalueExpr(lval))
-            }
-        }
-        None => {
-            let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
-                                                                base,
-                                                                "index"));
-
-            // Translate index expression and cast to a suitable LLVM integer.
-            // Rust is less strict than LLVM in this regard.
-            let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
-            let ix_val = ix_datum.to_llscalarish(bcx);
-            let ix_size = machine::llbitsize_of_real(bcx.ccx(),
-                                                     val_ty(ix_val));
-            let int_size = machine::llbitsize_of_real(bcx.ccx(),
-                                                      ccx.int_type());
-            let ix_val = {
-                if ix_size < int_size {
-                    if expr_ty(bcx, idx).is_signed() {
-                        SExt(bcx, ix_val, ccx.int_type())
-                    } else { ZExt(bcx, ix_val, ccx.int_type()) }
-                } else if ix_size > int_size {
-                    Trunc(bcx, ix_val, ccx.int_type())
-                } else {
-                    ix_val
-                }
-            };
-
-            let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
-
-            let (base, len) = base_datum.get_vec_base_and_len(bcx);
-
-            debug!("trans_index: base {}", bcx.val_to_string(base));
-            debug!("trans_index: len {}", bcx.val_to_string(len));
-
-            let bounds_check = ICmp(bcx,
-                                    llvm::IntUGE,
-                                    ix_val,
-                                    len,
-                                    index_expr_debug_loc);
-            let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
-            let expected = Call(bcx,
-                                expect,
-                                &[bounds_check, C_bool(ccx, false)],
-                                None,
-                                index_expr_debug_loc);
-            bcx = with_cond(bcx, expected, |bcx| {
-                controlflow::trans_fail_bounds_check(bcx,
-                                                     expr_info(index_expr),
-                                                     ix_val,
-                                                     len)
-            });
-            let elt = InBoundsGEP(bcx, base, &[ix_val]);
-            let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
-            let lval = Lvalue::new("expr::trans_index fallback");
-            Datum::new(elt, unit_ty, LvalueExpr(lval))
-        }
-    };
-
-    DatumBlock::new(bcx, elt_datum)
-}
-
-fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                         ref_expr: &hir::Expr,
-                         def: Def)
-                         -> DatumBlock<'blk, 'tcx, Expr> {
-    //! Translates a reference to a path.
-
-    let _icx = push_ctxt("trans_def_lvalue");
-    match def {
-        Def::Fn(..) | Def::Method(..) |
-        Def::Struct(..) | Def::Variant(..) => {
-            let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def,
-                                                bcx.fcx.param_substs);
-            DatumBlock::new(bcx, datum.to_expr_datum())
-        }
-        Def::Static(did, _) => {
-            let const_ty = expr_ty(bcx, ref_expr);
-            let val = get_static_val(bcx.ccx(), did, const_ty);
-            let lval = Lvalue::new("expr::trans_def");
-            DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval)))
-        }
-        Def::Const(_) | Def::AssociatedConst(_) => {
-            bcx.sess().span_bug(ref_expr.span,
-                "constant expression should not reach expr::trans_def")
-        }
-        _ => {
-            DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
-        }
-    }
-}
-
-fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                            expr: &hir::Expr)
-                                            -> Block<'blk, 'tcx> {
-    let mut bcx = bcx;
-    let _icx = push_ctxt("trans_rvalue_stmt");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
-
-    match expr.node {
-        hir::ExprBreak(label_opt) => {
-            controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node.name))
-        }
-        hir::ExprType(ref e, _) => {
-            trans_into(bcx, &e, Ignore)
-        }
-        hir::ExprAgain(label_opt) => {
-            controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node.name))
-        }
-        hir::ExprRet(ref ex) => {
-            // Check to see if the return expression itself is reachable.
-            // This can occur when the inner expression contains a return
-            let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
-                cfg.node_is_reachable(expr.id)
-            } else {
-                true
-            };
-
-            if reachable {
-                controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
-            } else {
-                // If it's not reachable, just translate the inner expression
-                // directly. This avoids having to manage a return slot when
-                // it won't actually be used anyway.
-                if let &Some(ref x) = ex {
-                    bcx = trans_into(bcx, &x, Ignore);
-                }
-                // Mark the end of the block as unreachable. Once we get to
-                // a return expression, there's no more we should be doing
-                // after this.
-                Unreachable(bcx);
-                bcx
-            }
-        }
-        hir::ExprWhile(ref cond, ref body, _) => {
-            controlflow::trans_while(bcx, expr, &cond, &body)
-        }
-        hir::ExprLoop(ref body, _) => {
-            controlflow::trans_loop(bcx, expr, &body)
-        }
-        hir::ExprAssign(ref dst, ref src) => {
-            let src_datum = unpack_datum!(bcx, trans(bcx, &src));
-            let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign"));
-
-            if bcx.fcx.type_needs_drop(dst_datum.ty) {
-                // If there are destructors involved, make sure we
-                // are copying from an rvalue, since that cannot possible
-                // alias an lvalue. We are concerned about code like:
-                //
-                //   a = a
-                //
-                // but also
-                //
-                //   a = a.b
-                //
-                // where e.g. a : Option<Foo> and a.b :
-                // Option<Foo>. In that case, freeing `a` before the
-                // assignment may also free `a.b`!
-                //
-                // We could avoid this intermediary with some analysis
-                // to determine whether `dst` may possibly own `src`.
-                debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
-                let src_datum = unpack_datum!(
-                    bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
-                let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
-                let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
-
-                // 1. Drop the data at the destination, passing the
-                //    drop-hint in case the lvalue has already been
-                //    dropped or moved.
-                bcx = glue::drop_ty_core(bcx,
-                                         dst_datum.val,
-                                         dst_datum.ty,
-                                         expr.debug_loc(),
-                                         false,
-                                         opt_hint_val);
-
-                // 2. We are overwriting the destination; ensure that
-                //    its drop-hint (if any) says "initialized."
-                if let Some(hint_val) = opt_hint_val {
-                    let hint_llval = hint_val.value();
-                    let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
-                    Store(bcx, drop_needed, hint_llval);
-                }
-                src_datum.store_to(bcx, dst_datum.val)
-            } else {
-                src_datum.store_to(bcx, dst_datum.val)
-            }
-        }
-        hir::ExprAssignOp(op, ref dst, ref src) => {
-            let has_method_map = bcx.tcx()
-                                    .tables
-                                    .borrow()
-                                    .method_map
-                                    .contains_key(&MethodCall::expr(expr.id));
-
-            if has_method_map {
-                let dst = unpack_datum!(bcx, trans(bcx, &dst));
-                let src_datum = unpack_datum!(bcx, trans(bcx, &src));
-                trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), dst,
-                                    Some((src_datum, src.id)), None, false).bcx
-            } else {
-                trans_assign_op(bcx, expr, op, &dst, &src)
-            }
-        }
-        hir::ExprInlineAsm(ref a) => {
-            asm::trans_inline_asm(bcx, a)
-        }
-        _ => {
-            bcx.tcx().sess.span_bug(
-                expr.span,
-                &format!("trans_rvalue_stmt_unadjusted reached \
-                         fall-through case: {:?}",
-                        expr.node));
-        }
-    }
-}
-
-fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                           expr: &hir::Expr,
-                                           dest: Dest)
-                                           -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
-    let mut bcx = bcx;
-    let tcx = bcx.tcx();
-
-    debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
-
-    match expr.node {
-        hir::ExprType(ref e, _) => {
-            trans_into(bcx, &e, dest)
-        }
-        hir::ExprPath(..) => {
-            trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
-        }
-        hir::ExprIf(ref cond, ref thn, ref els) => {
-            controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest)
-        }
-        hir::ExprMatch(ref discr, ref arms, _) => {
-            _match::trans_match(bcx, expr, &discr, &arms[..], dest)
-        }
-        hir::ExprBlock(ref blk) => {
-            controlflow::trans_block(bcx, &blk, dest)
-        }
-        hir::ExprStruct(_, ref fields, ref base) => {
-            trans_struct(bcx,
-                         &fields[..],
-                         base.as_ref().map(|e| &**e),
-                         expr.span,
-                         expr.id,
-                         node_id_type(bcx, expr.id),
-                         dest)
-        }
-        hir::ExprRange(ref start, ref end) => {
-            // FIXME it is just not right that we are synthesising ast nodes in
-            // trans. Shudder.
-            fn make_field(field_name: &str, expr: P<hir::Expr>) -> hir::Field {
-                hir::Field {
-                    name: codemap::dummy_spanned(token::intern(field_name)),
-                    expr: expr,
-                    span: codemap::DUMMY_SP,
-                }
-            }
-
-            // A range just desugars into a struct.
-            // Note that the type of the start and end may not be the same, but
-            // they should only differ in their lifetime, which should not matter
-            // in trans.
-            let (did, fields, ty_params) = match (start, end) {
-                (&Some(ref start), &Some(ref end)) => {
-                    // Desugar to Range
-                    let fields = vec![make_field("start", start.clone()),
-                                      make_field("end", end.clone())];
-                    (tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
-                }
-                (&Some(ref start), &None) => {
-                    // Desugar to RangeFrom
-                    let fields = vec![make_field("start", start.clone())];
-                    (tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
-                }
-                (&None, &Some(ref end)) => {
-                    // Desugar to RangeTo
-                    let fields = vec![make_field("end", end.clone())];
-                    (tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
-                }
-                _ => {
-                    // Desugar to RangeFull
-                    (tcx.lang_items.range_full_struct(), vec![], vec![])
-                }
-            };
-
-            if let Some(did) = did {
-                let substs = Substs::new_type(ty_params, vec![]);
-                trans_struct(bcx,
-                             &fields,
-                             None,
-                             expr.span,
-                             expr.id,
-                             tcx.mk_struct(tcx.lookup_adt_def(did),
-                                           tcx.mk_substs(substs)),
-                             dest)
-            } else {
-                tcx.sess.span_bug(expr.span,
-                                  "No lang item for ranges (how did we get this far?)")
-            }
-        }
-        hir::ExprTup(ref args) => {
-            let numbered_fields: Vec<(usize, &hir::Expr)> =
-                args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
-            trans_adt(bcx,
-                      expr_ty(bcx, expr),
-                      Disr(0),
-                      &numbered_fields[..],
-                      None,
-                      dest,
-                      expr.debug_loc())
-        }
-        hir::ExprLit(ref lit) => {
-            match lit.node {
-                ast::LitKind::Str(ref s, _) => {
-                    tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
-                }
-                _ => {
-                    bcx.tcx()
-                       .sess
-                       .span_bug(expr.span,
-                                 "trans_rvalue_dps_unadjusted shouldn't be \
-                                  translating this type of literal")
-                }
-            }
-        }
-        hir::ExprVec(..) | hir::ExprRepeat(..) => {
-            tvec::trans_fixed_vstore(bcx, expr, dest)
-        }
-        hir::ExprClosure(_, ref decl, ref body) => {
-            let dest = match dest {
-                SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
-                Ignore => closure::Dest::Ignore(bcx.ccx())
-            };
-
-            // NB. To get the id of the closure, we don't use
-            // `local_def_id(id)`, but rather we extract the closure
-            // def-id from the expr's type. This is because this may
-            // be an inlined expression from another crate, and we
-            // want to get the ORIGINAL closure def-id, since that is
-            // the key we need to find the closure-kind and
-            // closure-type etc.
-            let (def_id, substs) = match expr_ty(bcx, expr).sty {
-                ty::TyClosure(def_id, ref substs) => (def_id, substs),
-                ref t =>
-                    bcx.tcx().sess.span_bug(
-                        expr.span,
-                        &format!("closure expr without closure type: {:?}", t)),
-            };
-
-            closure::trans_closure_expr(dest,
-                                        decl,
-                                        body,
-                                        expr.id,
-                                        def_id,
-                                        substs,
-                                        &expr.attrs).unwrap_or(bcx)
-        }
-        hir::ExprCall(ref f, ref args) => {
-            if bcx.tcx().is_method_call(expr.id) {
-                trans_overloaded_call(bcx,
-                                      expr,
-                                      &f,
-                                      &args[..],
-                                      Some(dest))
-            } else {
-                callee::trans_call(bcx,
-                                   expr,
-                                   &f,
-                                   callee::ArgExprs(&args[..]),
-                                   dest)
-            }
-        }
-        hir::ExprMethodCall(_, _, ref args) => {
-            callee::trans_method_call(bcx,
-                                      expr,
-                                      &args[0],
-                                      callee::ArgExprs(&args[..]),
-                                      dest)
-        }
-        hir::ExprBinary(op, ref lhs, ref rhs) => {
-            // if not overloaded, would be RvalueDatumExpr
-            let lhs = unpack_datum!(bcx, trans(bcx, &lhs));
-            let rhs_datum = unpack_datum!(bcx, trans(bcx, &rhs));
-            trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
-                                Some((rhs_datum, rhs.id)), Some(dest),
-                                !rustc_front::util::is_by_value_binop(op.node)).bcx
-        }
-        hir::ExprUnary(op, ref subexpr) => {
-            // if not overloaded, would be RvalueDatumExpr
-            let arg = unpack_datum!(bcx, trans(bcx, &subexpr));
-            trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
-                                arg, None, Some(dest), !rustc_front::util::is_by_value_unop(op)).bcx
-        }
-        hir::ExprIndex(ref base, ref idx) => {
-            // if not overloaded, would be RvalueDatumExpr
-            let base = unpack_datum!(bcx, trans(bcx, &base));
-            let idx_datum = unpack_datum!(bcx, trans(bcx, &idx));
-            trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
-                                Some((idx_datum, idx.id)), Some(dest), true).bcx
-        }
-        hir::ExprCast(..) => {
-            // Trait casts used to come this way, now they should be coercions.
-            bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
-        }
-        hir::ExprAssignOp(op, _, _) => {
-            bcx.tcx().sess.span_bug(
-                expr.span,
-                &format!("augmented assignment `{}=` should always be a rvalue_stmt",
-                         rustc_front::util::binop_to_string(op.node)))
-        }
-        _ => {
-            bcx.tcx().sess.span_bug(
-                expr.span,
-                &format!("trans_rvalue_dps_unadjusted reached fall-through \
-                         case: {:?}",
-                        expr.node));
-        }
-    }
-}
-
-fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                        ref_expr: &hir::Expr,
-                                        def: Def,
-                                        dest: Dest)
-                                        -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_def_dps_unadjusted");
-
-    let lldest = match dest {
-        SaveIn(lldest) => lldest,
-        Ignore => { return bcx; }
-    };
-
-    match def {
-        Def::Variant(tid, vid) => {
-            let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
-            if let ty::VariantKind::Tuple = variant.kind() {
-                // N-ary variant.
-                let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
-                                                ExprId(ref_expr.id),
-                                                bcx.fcx.param_substs).val;
-                Store(bcx, llfn, lldest);
-                return bcx;
-            } else {
-                // Nullary variant.
-                let ty = expr_ty(bcx, ref_expr);
-                let repr = adt::represent_type(bcx.ccx(), ty);
-                adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val));
-                return bcx;
-            }
-        }
-        Def::Struct(..) => {
-            let ty = expr_ty(bcx, ref_expr);
-            match ty.sty {
-                ty::TyStruct(def, _) if def.has_dtor() => {
-                    let repr = adt::represent_type(bcx.ccx(), ty);
-                    adt::trans_set_discr(bcx, &repr, lldest, Disr(0));
-                }
-                _ => {}
-            }
-            bcx
-        }
-        _ => {
-            bcx.tcx().sess.span_bug(ref_expr.span, &format!(
-                "Non-DPS def {:?} referened by {}",
-                def, bcx.node_id_to_string(ref_expr.id)));
-        }
-    }
-}
-
-pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                         ref_expr: &hir::Expr,
-                                         def: Def,
-                                         param_substs: &'tcx Substs<'tcx>)
-                                         -> Datum<'tcx, Rvalue> {
-    let _icx = push_ctxt("trans_def_datum_unadjusted");
-
-    match def {
-        Def::Fn(did) |
-        Def::Struct(did) | Def::Variant(_, did) => {
-            callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
-        }
-        Def::Method(method_did) => {
-            match ccx.tcx().impl_or_trait_item(method_did).container() {
-                ty::ImplContainer(_) => {
-                    callee::trans_fn_ref(ccx, method_did,
-                                         ExprId(ref_expr.id),
-                                         param_substs)
-                }
-                ty::TraitContainer(trait_did) => {
-                    meth::trans_static_method_callee(ccx, method_did,
-                                                     trait_did, ref_expr.id,
-                                                     param_substs)
-                }
-            }
-        }
-        _ => {
-            ccx.tcx().sess.span_bug(ref_expr.span, &format!(
-                    "trans_def_fn_unadjusted invoked on: {:?} for {:?}",
-                    def,
-                    ref_expr));
-        }
-    }
-}
-
-/// Translates a reference to a local variable or argument. This always results in an lvalue datum.
-pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   def: Def)
-                                   -> Datum<'tcx, Lvalue> {
-    let _icx = push_ctxt("trans_local_var");
-
-    match def {
-        Def::Upvar(_, nid, _, _) => {
-            // Can't move upvars, so this is never a ZeroMemLastUse.
-            let local_ty = node_id_type(bcx, nid);
-            let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)",
-                                             bcx, nid, HintKind::ZeroAndMaintain);
-            match bcx.fcx.llupvars.borrow().get(&nid) {
-                Some(&val) => Datum::new(val, local_ty, lval),
-                None => {
-                    bcx.sess().bug(&format!(
-                        "trans_local_var: no llval for upvar {} found",
-                        nid));
-                }
-            }
-        }
-        Def::Local(_, nid) => {
-            let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
-                Some(&v) => v,
-                None => {
-                    bcx.sess().bug(&format!(
-                        "trans_local_var: no datum for local/arg {} found",
-                        nid));
-                }
-            };
-            debug!("take_local(nid={}, v={}, ty={})",
-                   nid, bcx.val_to_string(datum.val), datum.ty);
-            datum
-        }
-        _ => {
-            bcx.sess().unimpl(&format!(
-                "unsupported def type in trans_local_var: {:?}",
-                def));
-        }
-    }
-}
-
-fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                            fields: &[hir::Field],
-                            base: Option<&hir::Expr>,
-                            expr_span: codemap::Span,
-                            expr_id: ast::NodeId,
-                            ty: Ty<'tcx>,
-                            dest: Dest) -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_rec");
-
-    let tcx = bcx.tcx();
-    let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
-
-    let mut need_base = vec![true; vinfo.fields.len()];
-
-    let numbered_fields = fields.iter().map(|field| {
-        let pos = vinfo.field_index(field.name.node);
-        need_base[pos] = false;
-        (pos, &*field.expr)
-    }).collect::<Vec<_>>();
-
-    let optbase = match base {
-        Some(base_expr) => {
-            let mut leftovers = Vec::new();
-            for (i, b) in need_base.iter().enumerate() {
-                if *b {
-                    leftovers.push((i, vinfo.fields[i].1));
-                }
-            }
-            Some(StructBaseInfo {expr: base_expr,
-                                 fields: leftovers })
-        }
-        None => {
-            if need_base.iter().any(|b| *b) {
-                tcx.sess.span_bug(expr_span, "missing fields and no base expr")
-            }
-            None
-        }
-    };
-
-    trans_adt(bcx,
-              ty,
-              vinfo.discr,
-              &numbered_fields,
-              optbase,
-              dest,
-              DebugLoc::At(expr_id, expr_span))
-}
-
-/// Information that `trans_adt` needs in order to fill in the fields
-/// of a struct copied from a base struct (e.g., from an expression
-/// like `Foo { a: b, ..base }`.
-///
-/// Note that `fields` may be empty; the base expression must always be
-/// evaluated for side-effects.
-pub struct StructBaseInfo<'a, 'tcx> {
-    /// The base expression; will be evaluated after all explicit fields.
-    expr: &'a hir::Expr,
-    /// The indices of fields to copy paired with their types.
-    fields: Vec<(usize, Ty<'tcx>)>
-}
-
-/// Constructs an ADT instance:
-///
-/// - `fields` should be a list of field indices paired with the
-/// expression to store into that field.  The initializers will be
-/// evaluated in the order specified by `fields`.
-///
-/// - `optbase` contains information on the base struct (if any) from
-/// which remaining fields are copied; see comments on `StructBaseInfo`.
-pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
-                                 ty: Ty<'tcx>,
-                                 discr: Disr,
-                                 fields: &[(usize, &hir::Expr)],
-                                 optbase: Option<StructBaseInfo<'a, 'tcx>>,
-                                 dest: Dest,
-                                 debug_location: DebugLoc)
-                                 -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_adt");
-    let fcx = bcx.fcx;
-    let repr = adt::represent_type(bcx.ccx(), ty);
-
-    debug_location.apply(bcx.fcx);
-
-    // If we don't care about the result, just make a
-    // temporary stack slot
-    let addr = match dest {
-        SaveIn(pos) => pos,
-        Ignore => {
-            let llresult = alloc_ty(bcx, ty, "temp");
-            call_lifetime_start(bcx, llresult);
-            llresult
-        }
-    };
-
-    debug!("trans_adt");
-
-    // This scope holds intermediates that must be cleaned should
-    // panic occur before the ADT as a whole is ready.
-    let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
-
-    if ty.is_simd() {
-        // Issue 23112: The original logic appeared vulnerable to same
-        // order-of-eval bug. But, SIMD values are tuple-structs;
-        // i.e. functional record update (FRU) syntax is unavailable.
-        //
-        // To be safe, double-check that we did not get here via FRU.
-        assert!(optbase.is_none());
-
-        // This is the constructor of a SIMD type, such types are
-        // always primitive machine types and so do not have a
-        // destructor or require any clean-up.
-        let llty = type_of::type_of(bcx.ccx(), ty);
-
-        // keep a vector as a register, and running through the field
-        // `insertelement`ing them directly into that register
-        // (i.e. avoid GEPi and `store`s to an alloca) .
-        let mut vec_val = C_undef(llty);
-
-        for &(i, ref e) in fields {
-            let block_datum = trans(bcx, &e);
-            bcx = block_datum.bcx;
-            let position = C_uint(bcx.ccx(), i);
-            let value = block_datum.datum.to_llscalarish(bcx);
-            vec_val = InsertElement(bcx, vec_val, value, position);
-        }
-        Store(bcx, vec_val, addr);
-    } else if let Some(base) = optbase {
-        // Issue 23112: If there is a base, then order-of-eval
-        // requires field expressions eval'ed before base expression.
-
-        // First, trans field expressions to temporary scratch values.
-        let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
-            let datum = unpack_datum!(bcx, trans(bcx, &e));
-            (i, datum)
-        }).collect();
-
-        debug_location.apply(bcx.fcx);
-
-        // Second, trans the base to the dest.
-        assert_eq!(discr, Disr(0));
-
-        let addr = adt::MaybeSizedValue::sized(addr);
-        match expr_kind(bcx.tcx(), &base.expr) {
-            ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
-                bcx = trans_into(bcx, &base.expr, SaveIn(addr.value));
-            },
-            ExprKind::RvalueStmt => {
-                bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
-            }
-            _ => {
-                let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base"));
-                for &(i, t) in &base.fields {
-                    let datum = base_datum.get_element(
-                            bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i));
-                    assert!(type_is_sized(bcx.tcx(), datum.ty));
-                    let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
-                    bcx = datum.store_to(bcx, dest);
-                }
-            }
-        }
-
-        // Finally, move scratch field values into actual field locations
-        for (i, datum) in scratch_vals {
-            let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
-            bcx = datum.store_to(bcx, dest);
-        }
-    } else {
-        // No base means we can write all fields directly in place.
-        let addr = adt::MaybeSizedValue::sized(addr);
-        for &(i, ref e) in fields {
-            let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
-            let e_ty = expr_ty_adjusted(bcx, &e);
-            bcx = trans_into(bcx, &e, SaveIn(dest));
-            let scope = cleanup::CustomScope(custom_cleanup_scope);
-            fcx.schedule_lifetime_end(scope, dest);
-            // FIXME: nonzeroing move should generalize to fields
-            fcx.schedule_drop_mem(scope, dest, e_ty, None);
-        }
-    }
-
-    adt::trans_set_discr(bcx, &repr, addr, discr);
-
-    fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
-
-    // If we don't care about the result drop the temporary we made
-    match dest {
-        SaveIn(_) => bcx,
-        Ignore => {
-            bcx = glue::drop_ty(bcx, addr, ty, debug_location);
-            base::call_lifetime_end(bcx, addr);
-            bcx
-        }
-    }
-}
-
-
-fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   expr: &hir::Expr,
-                                   lit: &ast::Lit)
-                                   -> DatumBlock<'blk, 'tcx, Expr> {
-    // must not be a string constant, that is a RvalueDpsExpr
-    let _icx = push_ctxt("trans_immediate_lit");
-    let ty = expr_ty(bcx, expr);
-    let v = consts::const_lit(bcx.ccx(), expr, lit);
-    immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
-}
-
-fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                           expr: &hir::Expr,
-                           op: hir::UnOp,
-                           sub_expr: &hir::Expr)
-                           -> DatumBlock<'blk, 'tcx, Expr> {
-    let ccx = bcx.ccx();
-    let mut bcx = bcx;
-    let _icx = push_ctxt("trans_unary_datum");
-
-    let method_call = MethodCall::expr(expr.id);
-
-    // The only overloaded operator that is translated to a datum
-    // is an overloaded deref, since it is always yields a `&T`.
-    // Otherwise, we should be in the RvalueDpsExpr path.
-    assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
-
-    let un_ty = expr_ty(bcx, expr);
-
-    let debug_loc = expr.debug_loc();
-
-    match op {
-        hir::UnNot => {
-            let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
-            let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
-            immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
-        }
-        hir::UnNeg => {
-            let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
-            let val = datum.to_llscalarish(bcx);
-            let (bcx, llneg) = {
-                if un_ty.is_fp() {
-                    let result = FNeg(bcx, val, debug_loc);
-                    (bcx, result)
-                } else {
-                    let is_signed = un_ty.is_signed();
-                    let result = Neg(bcx, val, debug_loc);
-                    let bcx = if bcx.ccx().check_overflow() && is_signed {
-                        let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
-                        let is_min = ICmp(bcx, llvm::IntEQ, val,
-                                          C_integral(llty, min, true), debug_loc);
-                        with_cond(bcx, is_min, |bcx| {
-                            let msg = InternedString::new(
-                                "attempted to negate with overflow");
-                            controlflow::trans_fail(bcx, expr_info(expr), msg)
-                        })
-                    } else {
-                        bcx
-                    };
-                    (bcx, result)
-                }
-            };
-            immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
-        }
-        hir::UnDeref => {
-            let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
-            deref_once(bcx, expr, datum, method_call)
-        }
-    }
-}
-
-fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                               box_expr: &hir::Expr,
-                               box_ty: Ty<'tcx>,
-                               contents: &hir::Expr,
-                               contents_ty: Ty<'tcx>)
-                               -> DatumBlock<'blk, 'tcx, Expr> {
-    let _icx = push_ctxt("trans_uniq_expr");
-    let fcx = bcx.fcx;
-    assert!(type_is_sized(bcx.tcx(), contents_ty));
-    let llty = type_of::type_of(bcx.ccx(), contents_ty);
-    let size = llsize_of(bcx.ccx(), llty);
-    let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
-    let llty_ptr = llty.ptr_to();
-    let Result { bcx, val } = malloc_raw_dyn(bcx,
-                                             llty_ptr,
-                                             box_ty,
-                                             size,
-                                             align,
-                                             box_expr.debug_loc());
-    // Unique boxes do not allocate for zero-size types. The standard library
-    // may assume that `free` is never called on the pointer returned for
-    // `Box<ZeroSizeType>`.
-    let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
-        trans_into(bcx, contents, SaveIn(val))
-    } else {
-        let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
-        fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
-                                val, cleanup::HeapExchange, contents_ty);
-        let bcx = trans_into(bcx, contents, SaveIn(val));
-        fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
-        bcx
-    };
-    immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
-}
-
-fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                             expr: &hir::Expr,
-                             subexpr: &hir::Expr)
-                             -> DatumBlock<'blk, 'tcx, Expr> {
-    let _icx = push_ctxt("trans_addr_of");
-    let mut bcx = bcx;
-    let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
-    let ty = expr_ty(bcx, expr);
-    if !type_is_sized(bcx.tcx(), sub_datum.ty) {
-        // Always generate an lvalue datum, because this pointer doesn't own
-        // the data and cleanup is scheduled elsewhere.
-        DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind)))
-    } else {
-        // Sized value, ref to a thin pointer
-        immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
-    }
-}
-
-fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                  binop_expr: &hir::Expr,
-                                  binop_ty: Ty<'tcx>,
-                                  op: hir::BinOp,
-                                  lhs: Datum<'tcx, Rvalue>,
-                                  rhs: Datum<'tcx, Rvalue>)
-                                  -> DatumBlock<'blk, 'tcx, Expr>
-{
-    let _icx = push_ctxt("trans_scalar_binop");
-
-    let tcx = bcx.tcx();
-    let lhs_t = lhs.ty;
-    assert!(!lhs_t.is_simd());
-    let is_float = lhs_t.is_fp();
-    let is_signed = lhs_t.is_signed();
-    let info = expr_info(binop_expr);
-
-    let binop_debug_loc = binop_expr.debug_loc();
-
-    let mut bcx = bcx;
-    let lhs = lhs.to_llscalarish(bcx);
-    let rhs = rhs.to_llscalarish(bcx);
-    let val = match op.node {
-      hir::BiAdd => {
-        if is_float {
-            FAdd(bcx, lhs, rhs, binop_debug_loc)
-        } else {
-            let (newbcx, res) = with_overflow_check(
-                bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
-            bcx = newbcx;
-            res
-        }
-      }
-      hir::BiSub => {
-        if is_float {
-            FSub(bcx, lhs, rhs, binop_debug_loc)
-        } else {
-            let (newbcx, res) = with_overflow_check(
-                bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
-            bcx = newbcx;
-            res
-        }
-      }
-      hir::BiMul => {
-        if is_float {
-            FMul(bcx, lhs, rhs, binop_debug_loc)
-        } else {
-            let (newbcx, res) = with_overflow_check(
-                bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
-            bcx = newbcx;
-            res
-        }
-      }
-      hir::BiDiv => {
-        if is_float {
-            FDiv(bcx, lhs, rhs, binop_debug_loc)
-        } else {
-            // Only zero-check integers; fp /0 is NaN
-            bcx = base::fail_if_zero_or_overflows(bcx,
-                                                  expr_info(binop_expr),
-                                                  op,
-                                                  lhs,
-                                                  rhs,
-                                                  lhs_t);
-            if is_signed {
-                SDiv(bcx, lhs, rhs, binop_debug_loc)
-            } else {
-                UDiv(bcx, lhs, rhs, binop_debug_loc)
-            }
-        }
-      }
-      hir::BiRem => {
-        if is_float {
-            // LLVM currently always lowers the `frem` instructions appropriate
-            // library calls typically found in libm. Notably f64 gets wired up
-            // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
-            // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
-            // instead just an inline function in a header that goes up to a
-            // f64, uses `fmod`, and then comes back down to a f32.
-            //
-            // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
-            // still unconditionally lower frem instructions over 32-bit floats
-            // to a call to `fmodf`. To work around this we special case MSVC
-            // 32-bit float rem instructions and instead do the call out to
-            // `fmod` ourselves.
-            //
-            // Note that this is currently duplicated with src/libcore/ops.rs
-            // which does the same thing, and it would be nice to perhaps unify
-            // these two implementations on day! Also note that we call `fmod`
-            // for both 32 and 64-bit floats because if we emit any FRem
-            // instruction at all then LLVM is capable of optimizing it into a
-            // 32-bit FRem (which we're trying to avoid).
-            let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
-                           tcx.sess.target.target.arch == "x86";
-            if use_fmod {
-                let f64t = Type::f64(bcx.ccx());
-                let fty = Type::func(&[f64t, f64t], &f64t);
-                let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
-                                                tcx.types.f64);
-                if lhs_t == tcx.types.f32 {
-                    let lhs = FPExt(bcx, lhs, f64t);
-                    let rhs = FPExt(bcx, rhs, f64t);
-                    let res = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
-                    FPTrunc(bcx, res, Type::f32(bcx.ccx()))
-                } else {
-                    Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc)
-                }
-            } else {
-                FRem(bcx, lhs, rhs, binop_debug_loc)
-            }
-        } else {
-            // Only zero-check integers; fp %0 is NaN
-            bcx = base::fail_if_zero_or_overflows(bcx,
-                                                  expr_info(binop_expr),
-                                                  op, lhs, rhs, lhs_t);
-            if is_signed {
-                SRem(bcx, lhs, rhs, binop_debug_loc)
-            } else {
-                URem(bcx, lhs, rhs, binop_debug_loc)
-            }
-        }
-      }
-      hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
-      hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
-      hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
-      hir::BiShl => {
-          let (newbcx, res) = with_overflow_check(
-              bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
-          bcx = newbcx;
-          res
-      }
-      hir::BiShr => {
-          let (newbcx, res) = with_overflow_check(
-              bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
-          bcx = newbcx;
-          res
-      }
-      hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
-          base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
-      }
-      _ => {
-        bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
-      }
-    };
-
-    immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
-}
-
-// refinement types would obviate the need for this
-enum lazy_binop_ty {
-    lazy_and,
-    lazy_or,
-}
-
-fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                binop_expr: &hir::Expr,
-                                op: lazy_binop_ty,
-                                a: &hir::Expr,
-                                b: &hir::Expr)
-                                -> DatumBlock<'blk, 'tcx, Expr> {
-    let _icx = push_ctxt("trans_lazy_binop");
-    let binop_ty = expr_ty(bcx, binop_expr);
-    let fcx = bcx.fcx;
-
-    let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
-    let lhs = lhs.to_llscalarish(past_lhs);
-
-    if past_lhs.unreachable.get() {
-        return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
-    }
-
-    let join = fcx.new_id_block("join", binop_expr.id);
-    let before_rhs = fcx.new_id_block("before_rhs", b.id);
-
-    match op {
-      lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
-      lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
-    }
-
-    let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
-    let rhs = rhs.to_llscalarish(past_rhs);
-
-    if past_rhs.unreachable.get() {
-        return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
-    }
-
-    Br(past_rhs, join.llbb, DebugLoc::None);
-    let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
-                  &[past_lhs.llbb, past_rhs.llbb]);
-
-    return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
-}
-
-fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                            expr: &hir::Expr,
-                            op: hir::BinOp,
-                            lhs: &hir::Expr,
-                            rhs: &hir::Expr)
-                            -> DatumBlock<'blk, 'tcx, Expr> {
-    let _icx = push_ctxt("trans_binary");
-    let ccx = bcx.ccx();
-
-    // if overloaded, would be RvalueDpsExpr
-    assert!(!ccx.tcx().is_method_call(expr.id));
-
-    match op.node {
-        hir::BiAnd => {
-            trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
-        }
-        hir::BiOr => {
-            trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
-        }
-        _ => {
-            let mut bcx = bcx;
-            let binop_ty = expr_ty(bcx, expr);
-
-            let lhs = unpack_datum!(bcx, trans(bcx, lhs));
-            let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
-            debug!("trans_binary (expr {}): lhs={}",
-                   expr.id, lhs.to_string(ccx));
-            let rhs = unpack_datum!(bcx, trans(bcx, rhs));
-            let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
-            debug!("trans_binary (expr {}): rhs={}",
-                   expr.id, rhs.to_string(ccx));
-
-            if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
-                assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
-                        "built-in binary operators on fat pointers are homogeneous");
-                assert_eq!(binop_ty, bcx.tcx().types.bool);
-                let val = base::compare_scalar_types(
-                    bcx,
-                    lhs.val,
-                    rhs.val,
-                    lhs.ty,
-                    op.node,
-                    expr.debug_loc());
-                immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
-            } else {
-                assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
-                        "built-in binary operators on fat pointers are homogeneous");
-                trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
-            }
-        }
-    }
-}
-
-fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   expr: &hir::Expr,
-                                   method_call: MethodCall,
-                                   lhs: Datum<'tcx, Expr>,
-                                   rhs: Option<(Datum<'tcx, Expr>, ast::NodeId)>,
-                                   dest: Option<Dest>,
-                                   autoref: bool)
-                                   -> Result<'blk, 'tcx> {
-    callee::trans_call_inner(bcx,
-                             expr.debug_loc(),
-                             |bcx, arg_cleanup_scope| {
-                                meth::trans_method_callee(bcx,
-                                                          method_call,
-                                                          None,
-                                                          arg_cleanup_scope)
-                             },
-                             callee::ArgOverloadedOp(lhs, rhs, autoref),
-                             dest)
-}
-
-fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
-                                         expr: &hir::Expr,
-                                         callee: &'a hir::Expr,
-                                         args: &'a [P<hir::Expr>],
-                                         dest: Option<Dest>)
-                                         -> Block<'blk, 'tcx> {
-    debug!("trans_overloaded_call {}", expr.id);
-    let method_call = MethodCall::expr(expr.id);
-    let mut all_args = vec!(callee);
-    all_args.extend(args.iter().map(|e| &**e));
-    unpack_result!(bcx,
-                   callee::trans_call_inner(bcx,
-                                            expr.debug_loc(),
-                                            |bcx, arg_cleanup_scope| {
-                                                meth::trans_method_callee(
-                                                    bcx,
-                                                    method_call,
-                                                    None,
-                                                    arg_cleanup_scope)
-                                            },
-                                            callee::ArgOverloadedCall(all_args),
-                                            dest));
-    bcx
-}
-
-pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>,
-                          expr: &hir::Expr,
-                          t_in: Ty<'tcx>,
-                          t_out: Ty<'tcx>)
-                          -> bool {
-    if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
-        return true;
-    }
-
-    match (t_in.builtin_deref(true, ty::NoPreference),
-           t_out.builtin_deref(true, ty::NoPreference)) {
-        (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
-            t_in == t_out
-        }
-        _ => {
-            // This condition isn't redundant with the check for CoercionCast:
-            // different types can be substituted into the same type, and
-            // == equality can be overconservative if there are regions.
-            t_in == t_out
-        }
-    }
-}
-
-fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              expr: &hir::Expr,
-                              id: ast::NodeId)
-                              -> DatumBlock<'blk, 'tcx, Expr>
-{
-    use middle::ty::cast::CastTy::*;
-    use middle::ty::cast::IntTy::*;
-
-    fn int_cast(bcx: Block,
-                lldsttype: Type,
-                llsrctype: Type,
-                llsrc: ValueRef,
-                signed: bool)
-                -> ValueRef
-    {
-        let _icx = push_ctxt("int_cast");
-        let srcsz = llsrctype.int_width();
-        let dstsz = lldsttype.int_width();
-        return if dstsz == srcsz {
-            BitCast(bcx, llsrc, lldsttype)
-        } else if srcsz > dstsz {
-            TruncOrBitCast(bcx, llsrc, lldsttype)
-        } else if signed {
-            SExtOrBitCast(bcx, llsrc, lldsttype)
-        } else {
-            ZExtOrBitCast(bcx, llsrc, lldsttype)
-        }
-    }
-
-    fn float_cast(bcx: Block,
-                  lldsttype: Type,
-                  llsrctype: Type,
-                  llsrc: ValueRef)
-                  -> ValueRef
-    {
-        let _icx = push_ctxt("float_cast");
-        let srcsz = llsrctype.float_width();
-        let dstsz = lldsttype.float_width();
-        return if dstsz > srcsz {
-            FPExt(bcx, llsrc, lldsttype)
-        } else if srcsz > dstsz {
-            FPTrunc(bcx, llsrc, lldsttype)
-        } else { llsrc };
-    }
-
-    let _icx = push_ctxt("trans_cast");
-    let mut bcx = bcx;
-    let ccx = bcx.ccx();
-
-    let t_in = expr_ty_adjusted(bcx, expr);
-    let t_out = node_id_type(bcx, id);
-
-    debug!("trans_cast({:?} as {:?})", t_in, t_out);
-    let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
-    let ll_t_out = type_of::arg_type_of(ccx, t_out);
-    // Convert the value to be cast into a ValueRef, either by-ref or
-    // by-value as appropriate given its type:
-    let mut datum = unpack_datum!(bcx, trans(bcx, expr));
-
-    let datum_ty = monomorphize_type(bcx, datum.ty);
-
-    if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
-        datum.ty = t_out;
-        return DatumBlock::new(bcx, datum);
-    }
-
-    if type_is_fat_ptr(bcx.tcx(), t_in) {
-        assert!(datum.kind.is_by_ref());
-        if type_is_fat_ptr(bcx.tcx(), t_out) {
-            return DatumBlock::new(bcx, Datum::new(
-                PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
-                t_out,
-                Rvalue::new(ByRef)
-            )).to_expr_datumblock();
-        } else {
-            // Return the address
-            return immediate_rvalue_bcx(bcx,
-                                        PointerCast(bcx,
-                                                    Load(bcx, get_dataptr(bcx, datum.val)),
-                                                    ll_t_out),
-                                        t_out).to_expr_datumblock();
-        }
-    }
-
-    let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
-    let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
-
-    let (llexpr, signed) = if let Int(CEnum) = r_t_in {
-        let repr = adt::represent_type(ccx, t_in);
-        let datum = unpack_datum!(
-            bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
-        let llexpr_ptr = datum.to_llref();
-        let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr,
-                                         Some(Type::i64(ccx)), true);
-        ll_t_in = val_ty(discr);
-        (discr, adt::is_discr_signed(&repr))
-    } else {
-        (datum.to_llscalarish(bcx), t_in.is_signed())
-    };
-
-    let newval = match (r_t_in, r_t_out) {
-        (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
-            PointerCast(bcx, llexpr, ll_t_out)
-        }
-        (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
-        (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
-
-        (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
-        (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
-        (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
-        (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
-        (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
-        (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
-
-        _ => ccx.sess().span_bug(expr.span,
-                                  &format!("translating unsupported cast: \
-                                            {:?} -> {:?}",
-                                           t_in,
-                                           t_out)
-                                 )
-    };
-    return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
-}
-
-fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                               expr: &hir::Expr,
-                               op: hir::BinOp,
-                               dst: &hir::Expr,
-                               src: &hir::Expr)
-                               -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_assign_op");
-    let mut bcx = bcx;
-
-    debug!("trans_assign_op(expr={:?})", expr);
-
-    // User-defined operator methods cannot be used with `+=` etc right now
-    assert!(!bcx.tcx().is_method_call(expr.id));
-
-    // Evaluate LHS (destination), which should be an lvalue
-    let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
-    assert!(!bcx.fcx.type_needs_drop(dst.ty));
-    let lhs = load_ty(bcx, dst.val, dst.ty);
-    let lhs = immediate_rvalue(lhs, dst.ty);
-
-    // Evaluate RHS - FIXME(#28160) this sucks
-    let rhs = unpack_datum!(bcx, trans(bcx, &src));
-    let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
-
-    // Perform computation and store the result
-    let result_datum = unpack_datum!(
-        bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
-    return result_datum.store_to(bcx, dst.val);
-}
-
-fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                        datum: Datum<'tcx, Expr>,
-                        expr: &hir::Expr)
-                        -> DatumBlock<'blk, 'tcx, Expr> {
-    let mut bcx = bcx;
-
-    // Ensure cleanup of `datum` if not already scheduled and obtain
-    // a "by ref" pointer.
-    let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
-
-    // Compute final type. Note that we are loose with the region and
-    // mutability, since those things don't matter in trans.
-    let referent_ty = lv_datum.ty;
-    let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
-
-    // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
-    // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
-    // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
-    // indirection and for thin pointers, this has no ill effects.
-    let kind  = if type_is_sized(bcx.tcx(), referent_ty) {
-        RvalueExpr(Rvalue::new(ByValue))
-    } else {
-        LvalueExpr(lv_datum.kind)
-    };
-
-    // Get the pointer.
-    let llref = lv_datum.to_llref();
-    DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind))
-}
-
-fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              expr: &hir::Expr,
-                              datum: Datum<'tcx, Expr>,
-                              times: usize)
-                              -> DatumBlock<'blk, 'tcx, Expr> {
-    let mut bcx = bcx;
-    let mut datum = datum;
-    for i in 0..times {
-        let method_call = MethodCall::autoderef(expr.id, i as u32);
-        datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
-    }
-    DatumBlock { bcx: bcx, datum: datum }
-}
-
-fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                          expr: &hir::Expr,
-                          datum: Datum<'tcx, Expr>,
-                          method_call: MethodCall)
-                          -> DatumBlock<'blk, 'tcx, Expr> {
-    let ccx = bcx.ccx();
-
-    debug!("deref_once(expr={:?}, datum={}, method_call={:?})",
-           expr,
-           datum.to_string(ccx),
-           method_call);
-
-    let mut bcx = bcx;
-
-    // Check for overloaded deref.
-    let method_ty = ccx.tcx()
-                       .tables
-                       .borrow()
-                       .method_map
-                       .get(&method_call).map(|method| method.ty);
-
-    let datum = match method_ty {
-        Some(method_ty) => {
-            let method_ty = monomorphize_type(bcx, method_ty);
-
-            // Overloaded. Evaluate `trans_overloaded_op`, which will
-            // invoke the user's deref() method, which basically
-            // converts from the `Smaht<T>` pointer that we have into
-            // a `&T` pointer.  We can then proceed down the normal
-            // path (below) to dereference that `&T`.
-            let datum = if method_call.autoderef == 0 {
-                datum
-            } else {
-                // Always perform an AutoPtr when applying an overloaded auto-deref
-                unpack_datum!(bcx, auto_ref(bcx, datum, expr))
-            };
-
-            let ref_ty = // invoked methods have their LB regions instantiated
-                ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
-            let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
-
-            unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
-                                                    datum, None, Some(SaveIn(scratch.val)),
-                                                    false));
-            scratch.to_expr_datum()
-        }
-        None => {
-            // Not overloaded. We already have a pointer we know how to deref.
-            datum
-        }
-    };
-
-    let r = match datum.ty.sty {
-        ty::TyBox(content_ty) => {
-            // Make sure we have an lvalue datum here to get the
-            // proper cleanups scheduled
-            let datum = unpack_datum!(
-                bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
-
-            if type_is_sized(bcx.tcx(), content_ty) {
-                let ptr = load_ty(bcx, datum.val, datum.ty);
-                DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
-            } else {
-                // A fat pointer and a DST lvalue have the same representation
-                // just different types. Since there is no temporary for `*e`
-                // here (because it is unsized), we cannot emulate the sized
-                // object code path for running drop glue and free. Instead,
-                // we schedule cleanup for `e`, turning it into an lvalue.
-
-                let lval = Lvalue::new("expr::deref_once ty_uniq");
-                let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
-                DatumBlock::new(bcx, datum)
-            }
-        }
-
-        ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
-        ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
-            let lval = Lvalue::new("expr::deref_once ptr");
-            if type_is_sized(bcx.tcx(), content_ty) {
-                let ptr = datum.to_llscalarish(bcx);
-
-                // Always generate an lvalue datum, even if datum.mode is
-                // an rvalue.  This is because datum.mode is only an
-                // rvalue for non-owning pointers like &T or *T, in which
-                // case cleanup *is* scheduled elsewhere, by the true
-                // owner (or, in the case of *T, by the user).
-                DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
-            } else {
-                // A fat pointer and a DST lvalue have the same representation
-                // just different types.
-                DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
-            }
-        }
-
-        _ => {
-            bcx.tcx().sess.span_bug(
-                expr.span,
-                &format!("deref invoked on expr of invalid type {:?}",
-                        datum.ty));
-        }
-    };
-
-    debug!("deref_once(expr={}, method_call={:?}, result={})",
-           expr.id, method_call, r.datum.to_string(ccx));
-
-    return r;
-}
-
-#[derive(Debug)]
-enum OverflowOp {
-    Add,
-    Sub,
-    Mul,
-    Shl,
-    Shr,
-}
-
-impl OverflowOp {
-    fn codegen_strategy(&self) -> OverflowCodegen {
-        use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
-        match *self {
-            OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
-            OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
-            OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
-
-            OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
-            OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
-        }
-    }
-}
-
-enum OverflowCodegen {
-    ViaIntrinsic(OverflowOpViaIntrinsic),
-    ViaInputCheck(OverflowOpViaInputCheck),
-}
-
-enum OverflowOpViaInputCheck { Shl, Shr, }
-
-#[derive(Debug)]
-enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
-
-impl OverflowOpViaIntrinsic {
-    fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
-        let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
-        bcx.ccx().get_intrinsic(&name)
-    }
-    fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
-        use syntax::ast::IntTy::*;
-        use syntax::ast::UintTy::*;
-        use middle::ty::{TyInt, TyUint};
-
-        let new_sty = match ty.sty {
-            TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
-                "32" => TyInt(I32),
-                "64" => TyInt(I64),
-                _ => panic!("unsupported target word size")
-            },
-            TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
-                "32" => TyUint(U32),
-                "64" => TyUint(U64),
-                _ => panic!("unsupported target word size")
-            },
-            ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
-            _ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
-                        *self)
-        };
-
-        match *self {
-            OverflowOpViaIntrinsic::Add => match new_sty {
-                TyInt(I8) => "llvm.sadd.with.overflow.i8",
-                TyInt(I16) => "llvm.sadd.with.overflow.i16",
-                TyInt(I32) => "llvm.sadd.with.overflow.i32",
-                TyInt(I64) => "llvm.sadd.with.overflow.i64",
-
-                TyUint(U8) => "llvm.uadd.with.overflow.i8",
-                TyUint(U16) => "llvm.uadd.with.overflow.i16",
-                TyUint(U32) => "llvm.uadd.with.overflow.i32",
-                TyUint(U64) => "llvm.uadd.with.overflow.i64",
-
-                _ => unreachable!(),
-            },
-            OverflowOpViaIntrinsic::Sub => match new_sty {
-                TyInt(I8) => "llvm.ssub.with.overflow.i8",
-                TyInt(I16) => "llvm.ssub.with.overflow.i16",
-                TyInt(I32) => "llvm.ssub.with.overflow.i32",
-                TyInt(I64) => "llvm.ssub.with.overflow.i64",
-
-                TyUint(U8) => "llvm.usub.with.overflow.i8",
-                TyUint(U16) => "llvm.usub.with.overflow.i16",
-                TyUint(U32) => "llvm.usub.with.overflow.i32",
-                TyUint(U64) => "llvm.usub.with.overflow.i64",
-
-                _ => unreachable!(),
-            },
-            OverflowOpViaIntrinsic::Mul => match new_sty {
-                TyInt(I8) => "llvm.smul.with.overflow.i8",
-                TyInt(I16) => "llvm.smul.with.overflow.i16",
-                TyInt(I32) => "llvm.smul.with.overflow.i32",
-                TyInt(I64) => "llvm.smul.with.overflow.i64",
-
-                TyUint(U8) => "llvm.umul.with.overflow.i8",
-                TyUint(U16) => "llvm.umul.with.overflow.i16",
-                TyUint(U32) => "llvm.umul.with.overflow.i32",
-                TyUint(U64) => "llvm.umul.with.overflow.i64",
-
-                _ => unreachable!(),
-            },
-        }
-    }
-
-    fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
-                                        info: NodeIdAndSpan,
-                                        lhs_t: Ty<'tcx>, lhs: ValueRef,
-                                        rhs: ValueRef,
-                                        binop_debug_loc: DebugLoc)
-                                        -> (Block<'blk, 'tcx>, ValueRef) {
-        let llfn = self.to_intrinsic(bcx, lhs_t);
-
-        let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
-        let result = ExtractValue(bcx, val, 0); // iN operation result
-        let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
-
-        let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
-                        binop_debug_loc);
-
-        let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
-        Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
-             None, binop_debug_loc);
-
-        let bcx =
-            base::with_cond(bcx, cond, |bcx|
-                controlflow::trans_fail(bcx, info,
-                    InternedString::new("arithmetic operation overflowed")));
-
-        (bcx, result)
-    }
-}
-
-impl OverflowOpViaInputCheck {
-    fn build_with_input_check<'blk, 'tcx>(&self,
-                                          bcx: Block<'blk, 'tcx>,
-                                          info: NodeIdAndSpan,
-                                          lhs_t: Ty<'tcx>,
-                                          lhs: ValueRef,
-                                          rhs: ValueRef,
-                                          binop_debug_loc: DebugLoc)
-                                          -> (Block<'blk, 'tcx>, ValueRef)
-    {
-        let lhs_llty = val_ty(lhs);
-        let rhs_llty = val_ty(rhs);
-
-        // Panic if any bits are set outside of bits that we always
-        // mask in.
-        //
-        // Note that the mask's value is derived from the LHS type
-        // (since that is where the 32/64 distinction is relevant) but
-        // the mask's type must match the RHS type (since they will
-        // both be fed into an and-binop)
-        let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
-
-        let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
-        let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
-        let result = match *self {
-            OverflowOpViaInputCheck::Shl =>
-                build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
-            OverflowOpViaInputCheck::Shr =>
-                build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
-        };
-        let bcx =
-            base::with_cond(bcx, cond, |bcx|
-                controlflow::trans_fail(bcx, info,
-                    InternedString::new("shift operation overflowed")));
-
-        (bcx, result)
-    }
-}
-
-// Check if an integer or vector contains a nonzero element.
-fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   value: ValueRef,
-                                   binop_debug_loc: DebugLoc) -> ValueRef {
-    let llty = val_ty(value);
-    let kind = llty.kind();
-    match kind {
-        TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
-        TypeKind::Vector => {
-            // Check if any elements of the vector are nonzero by treating
-            // it as a wide integer and checking if the integer is nonzero.
-            let width = llty.vector_length() as u64 * llty.element_type().int_width();
-            let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
-            build_nonzero_check(bcx, int_value, binop_debug_loc)
-        },
-        _ => panic!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
-    }
-}
-
-fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
-                                   lhs_t: Ty<'tcx>, lhs: ValueRef,
-                                   rhs: ValueRef,
-                                   binop_debug_loc: DebugLoc)
-                                   -> (Block<'blk, 'tcx>, ValueRef) {
-    if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
-    if bcx.ccx().check_overflow() {
-
-        match oop.codegen_strategy() {
-            OverflowCodegen::ViaIntrinsic(oop) =>
-                oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
-            OverflowCodegen::ViaInputCheck(oop) =>
-                oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
-        }
-    } else {
-        let res = match oop {
-            OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
-            OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
-            OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
-
-            OverflowOp::Shl =>
-                build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
-            OverflowOp::Shr =>
-                build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
-        };
-        (bcx, res)
-    }
-}
-
-/// We categorize expressions into three kinds.  The distinction between
-/// lvalue/rvalue is fundamental to the language.  The distinction between the
-/// two kinds of rvalues is an artifact of trans which reflects how we will
-/// generate code for that kind of expression.  See trans/expr.rs for more
-/// information.
-#[derive(Copy, Clone)]
-enum ExprKind {
-    Lvalue,
-    RvalueDps,
-    RvalueDatum,
-    RvalueStmt
-}
-
-fn expr_kind(tcx: &ty::ctxt, expr: &hir::Expr) -> ExprKind {
-    if tcx.is_method_call(expr.id) {
-        // Overloaded operations are generally calls, and hence they are
-        // generated via DPS, but there are a few exceptions:
-        return match expr.node {
-            // `a += b` has a unit result.
-            hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
-
-            // the deref method invoked for `*a` always yields an `&T`
-            hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
-
-            // the index method invoked for `a[i]` always yields an `&T`
-            hir::ExprIndex(..) => ExprKind::Lvalue,
-
-            // in the general case, result could be any type, use DPS
-            _ => ExprKind::RvalueDps
-        };
-    }
-
-    match expr.node {
-        hir::ExprPath(..) => {
-            match tcx.resolve_expr(expr) {
-                Def::Struct(..) | Def::Variant(..) => {
-                    if let ty::TyBareFn(..) = tcx.node_id_to_type(expr.id).sty {
-                        // ctor function
-                        ExprKind::RvalueDatum
-                    } else {
-                        ExprKind::RvalueDps
-                    }
-                }
-
-                // Fn pointers are just scalar values.
-                Def::Fn(..) | Def::Method(..) => ExprKind::RvalueDatum,
-
-                // Note: there is actually a good case to be made that
-                // DefArg's, particularly those of immediate type, ought to
-                // considered rvalues.
-                Def::Static(..) |
-                Def::Upvar(..) |
-                Def::Local(..) => ExprKind::Lvalue,
-
-                Def::Const(..) |
-                Def::AssociatedConst(..) => ExprKind::RvalueDatum,
-
-                def => {
-                    tcx.sess.span_bug(
-                        expr.span,
-                        &format!("uncategorized def for expr {}: {:?}",
-                                expr.id,
-                                def));
-                }
-            }
-        }
-
-        hir::ExprType(ref expr, _) => {
-            expr_kind(tcx, expr)
-        }
-
-        hir::ExprUnary(hir::UnDeref, _) |
-        hir::ExprField(..) |
-        hir::ExprTupField(..) |
-        hir::ExprIndex(..) => {
-            ExprKind::Lvalue
-        }
-
-        hir::ExprCall(..) |
-        hir::ExprMethodCall(..) |
-        hir::ExprStruct(..) |
-        hir::ExprRange(..) |
-        hir::ExprTup(..) |
-        hir::ExprIf(..) |
-        hir::ExprMatch(..) |
-        hir::ExprClosure(..) |
-        hir::ExprBlock(..) |
-        hir::ExprRepeat(..) |
-        hir::ExprVec(..) => {
-            ExprKind::RvalueDps
-        }
-
-        hir::ExprLit(ref lit) if lit.node.is_str() => {
-            ExprKind::RvalueDps
-        }
-
-        hir::ExprBreak(..) |
-        hir::ExprAgain(..) |
-        hir::ExprRet(..) |
-        hir::ExprWhile(..) |
-        hir::ExprLoop(..) |
-        hir::ExprAssign(..) |
-        hir::ExprInlineAsm(..) |
-        hir::ExprAssignOp(..) => {
-            ExprKind::RvalueStmt
-        }
-
-        hir::ExprLit(_) | // Note: LitStr is carved out above
-        hir::ExprUnary(..) |
-        hir::ExprBox(_) |
-        hir::ExprAddrOf(..) |
-        hir::ExprBinary(..) |
-        hir::ExprCast(..) => {
-            ExprKind::RvalueDatum
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/foreign.rs b/src/librustc_trans/trans/foreign.rs
deleted file mode 100644 (file)
index 06a4d3f..0000000
+++ /dev/null
@@ -1,1070 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-use back::{abi, link};
-use llvm::{ValueRef, CallConv, get_param};
-use llvm;
-use middle::weak_lang_items;
-use trans::attributes;
-use trans::base::{llvm_linkage_by_name, push_ctxt};
-use trans::base;
-use trans::build::*;
-use trans::cabi;
-use trans::common::*;
-use trans::debuginfo::DebugLoc;
-use trans::declare;
-use trans::expr;
-use trans::machine;
-use trans::monomorphize;
-use trans::type_::Type;
-use trans::type_of::*;
-use trans::type_of;
-use middle::infer;
-use middle::ty::{self, Ty};
-use middle::subst::Substs;
-
-use std::cmp;
-use std::iter::once;
-use libc::c_uint;
-use syntax::abi::Abi;
-use syntax::attr;
-use syntax::codemap::Span;
-use syntax::parse::token::{InternedString, special_idents};
-use syntax::ast;
-use syntax::attr::AttrMetaMethods;
-
-use rustc_front::print::pprust;
-use rustc_front::hir;
-
-///////////////////////////////////////////////////////////////////////////
-// Type definitions
-
-struct ForeignTypes<'tcx> {
-    /// Rust signature of the function
-    fn_sig: ty::FnSig<'tcx>,
-
-    /// Adapter object for handling native ABI rules (trust me, you
-    /// don't want to know)
-    fn_ty: cabi::FnType,
-
-    /// LLVM types that will appear on the foreign function
-    llsig: LlvmSignature,
-}
-
-struct LlvmSignature {
-    // LLVM versions of the types of this function's arguments.
-    llarg_tys: Vec<Type> ,
-
-    // LLVM version of the type that this function returns.  Note that
-    // this *may not be* the declared return type of the foreign
-    // function, because the foreign function may opt to return via an
-    // out pointer.
-    llret_ty: Type,
-
-    /// True if there is a return value (not bottom, not unit)
-    ret_def: bool,
-}
-
-
-///////////////////////////////////////////////////////////////////////////
-// Calls to external functions
-
-pub fn llvm_calling_convention(ccx: &CrateContext,
-                               abi: Abi) -> CallConv {
-    use syntax::abi::Abi::*;
-    match ccx.sess().target.target.adjust_abi(abi) {
-        RustIntrinsic => {
-            // Intrinsics are emitted at the call site
-            ccx.sess().bug("asked to register intrinsic fn");
-        }
-        PlatformIntrinsic => {
-            // Intrinsics are emitted at the call site
-            ccx.sess().bug("asked to register platform intrinsic fn");
-        }
-
-        Rust => {
-            // FIXME(#3678) Implement linking to foreign fns with Rust ABI
-            ccx.sess().unimpl("foreign functions with Rust ABI");
-        }
-
-        RustCall => {
-            // FIXME(#3678) Implement linking to foreign fns with Rust ABI
-            ccx.sess().unimpl("foreign functions with RustCall ABI");
-        }
-
-        // It's the ABI's job to select this, not us.
-        System => ccx.sess().bug("system abi should be selected elsewhere"),
-
-        Stdcall => llvm::X86StdcallCallConv,
-        Fastcall => llvm::X86FastcallCallConv,
-        Vectorcall => llvm::X86_VectorCall,
-        C => llvm::CCallConv,
-        Win64 => llvm::X86_64_Win64,
-
-        // These API constants ought to be more specific...
-        Cdecl => llvm::CCallConv,
-        Aapcs => llvm::CCallConv,
-    }
-}
-
-pub fn register_static(ccx: &CrateContext,
-                       foreign_item: &hir::ForeignItem) -> ValueRef {
-    let ty = ccx.tcx().node_id_to_type(foreign_item.id);
-    let llty = type_of::type_of(ccx, ty);
-
-    let ident = link_name(foreign_item);
-    let c = match attr::first_attr_value_str_by_name(&foreign_item.attrs,
-                                                     "linkage") {
-        // If this is a static with a linkage specified, then we need to handle
-        // it a little specially. The typesystem prevents things like &T and
-        // extern "C" fn() from being non-null, so we can't just declare a
-        // static and call it a day. Some linkages (like weak) will make it such
-        // that the static actually has a null value.
-        Some(name) => {
-            let linkage = match llvm_linkage_by_name(&name) {
-                Some(linkage) => linkage,
-                None => {
-                    ccx.sess().span_fatal(foreign_item.span,
-                                          "invalid linkage specified");
-                }
-            };
-            let llty2 = match ty.sty {
-                ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty),
-                _ => {
-                    ccx.sess().span_fatal(foreign_item.span,
-                                          "must have type `*T` or `*mut T`");
-                }
-            };
-            unsafe {
-                // Declare a symbol `foo` with the desired linkage.
-                let g1 = declare::declare_global(ccx, &ident[..], llty2);
-                llvm::SetLinkage(g1, linkage);
-
-                // Declare an internal global `extern_with_linkage_foo` which
-                // is initialized with the address of `foo`.  If `foo` is
-                // discarded during linking (for example, if `foo` has weak
-                // linkage and there are no definitions), then
-                // `extern_with_linkage_foo` will instead be initialized to
-                // zero.
-                let mut real_name = "_rust_extern_with_linkage_".to_string();
-                real_name.push_str(&ident);
-                let g2 = declare::define_global(ccx, &real_name[..], llty).unwrap_or_else(||{
-                    ccx.sess().span_fatal(foreign_item.span,
-                                          &format!("symbol `{}` is already defined", ident))
-                });
-                llvm::SetLinkage(g2, llvm::InternalLinkage);
-                llvm::LLVMSetInitializer(g2, g1);
-                g2
-            }
-        }
-        None => // Generate an external declaration.
-            declare::declare_global(ccx, &ident[..], llty),
-    };
-
-    // Handle thread-local external statics.
-    for attr in foreign_item.attrs.iter() {
-        if attr.check_name("thread_local") {
-            llvm::set_thread_local(c, true);
-        }
-    }
-
-    return c;
-}
-
-// only use this for foreign function ABIs and glue, use `get_extern_rust_fn` for Rust functions
-pub fn get_extern_fn(ccx: &CrateContext,
-                     externs: &mut ExternMap,
-                     name: &str,
-                     cc: llvm::CallConv,
-                     ty: Type,
-                     output: Ty)
-                     -> ValueRef {
-    match externs.get(name) {
-        Some(n) => return *n,
-        None => {}
-    }
-    let f = declare::declare_fn(ccx, name, cc, ty, ty::FnConverging(output));
-    externs.insert(name.to_string(), f);
-    f
-}
-
-/// Registers a foreign function found in a library. Just adds a LLVM global.
-pub fn register_foreign_item_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                          abi: Abi, fty: Ty<'tcx>,
-                                          name: &str,
-                                          attrs: &[ast::Attribute])-> ValueRef {
-    debug!("register_foreign_item_fn(abi={:?}, \
-            ty={:?}, \
-            name={})",
-           abi,
-           fty,
-           name);
-
-    let cc = llvm_calling_convention(ccx, abi);
-
-    // Register the function as a C extern fn
-    let tys = foreign_types_for_fn_ty(ccx, fty);
-
-    // Make sure the calling convention is right for variadic functions
-    // (should've been caught if not in typeck)
-    if tys.fn_sig.variadic {
-        assert!(cc == llvm::CCallConv);
-    }
-
-    // Create the LLVM value for the C extern fn
-    let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
-
-    let llfn = get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), name, cc, llfn_ty, fty);
-    attributes::unwind(llfn, false);
-    add_argument_attributes(&tys, llfn);
-    attributes::from_fn_attrs(ccx, attrs, llfn);
-    llfn
-}
-
-/// Prepares a call to a native function. This requires adapting
-/// from the Rust argument passing rules to the native rules.
-///
-/// # Parameters
-///
-/// - `callee_ty`: Rust type for the function we are calling
-/// - `llfn`: the function pointer we are calling
-/// - `llretptr`: where to store the return value of the function
-/// - `llargs_rust`: a list of the argument values, prepared
-///   as they would be if calling a Rust function
-/// - `passed_arg_tys`: Rust type for the arguments. Normally we
-///   can derive these from callee_ty but in the case of variadic
-///   functions passed_arg_tys will include the Rust type of all
-///   the arguments including the ones not specified in the fn's signature.
-pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                     callee_ty: Ty<'tcx>,
-                                     llfn: ValueRef,
-                                     llretptr: ValueRef,
-                                     llargs_rust: &[ValueRef],
-                                     passed_arg_tys: Vec<Ty<'tcx>>,
-                                     call_debug_loc: DebugLoc)
-                                     -> Block<'blk, 'tcx>
-{
-    let ccx = bcx.ccx();
-
-    debug!("trans_native_call(callee_ty={:?}, \
-            llfn={}, \
-            llretptr={})",
-           callee_ty,
-           ccx.tn().val_to_string(llfn),
-           ccx.tn().val_to_string(llretptr));
-
-    let (fn_abi, fn_sig) = match callee_ty.sty {
-        ty::TyBareFn(_, ref fn_ty) => (fn_ty.abi, &fn_ty.sig),
-        _ => ccx.sess().bug("trans_native_call called on non-function type")
-    };
-    let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
-    let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig);
-    let llsig = foreign_signature(ccx, &fn_sig, &passed_arg_tys[..]);
-    let fn_type = cabi::compute_abi_info(ccx,
-                                         &llsig.llarg_tys,
-                                         llsig.llret_ty,
-                                         llsig.ret_def);
-
-    let arg_tys: &[cabi::ArgType] = &fn_type.arg_tys;
-
-    let mut llargs_foreign = Vec::new();
-
-    // If the foreign ABI expects return value by pointer, supply the
-    // pointer that Rust gave us. Sometimes we have to bitcast
-    // because foreign fns return slightly different (but equivalent)
-    // views on the same type (e.g., i64 in place of {i32,i32}).
-    if fn_type.ret_ty.is_indirect() {
-        match fn_type.ret_ty.cast {
-            Some(ty) => {
-                let llcastedretptr =
-                    BitCast(bcx, llretptr, ty.ptr_to());
-                llargs_foreign.push(llcastedretptr);
-            }
-            None => {
-                llargs_foreign.push(llretptr);
-            }
-        }
-    }
-
-    let mut offset = 0;
-    for (i, arg_ty) in arg_tys.iter().enumerate() {
-        let mut llarg_rust = llargs_rust[i + offset];
-
-        if arg_ty.is_ignore() {
-            continue;
-        }
-
-        // Does Rust pass this argument by pointer?
-        let rust_indirect = type_of::arg_is_indirect(ccx, passed_arg_tys[i]);
-
-        debug!("argument {}, llarg_rust={}, rust_indirect={}, arg_ty={}",
-               i,
-               ccx.tn().val_to_string(llarg_rust),
-               rust_indirect,
-               ccx.tn().type_to_string(arg_ty.ty));
-
-        // Ensure that we always have the Rust value indirectly,
-        // because it makes bitcasting easier.
-        if !rust_indirect {
-            let scratch = base::alloc_ty(bcx, passed_arg_tys[i], "__arg");
-            if type_is_fat_ptr(ccx.tcx(), passed_arg_tys[i]) {
-                Store(bcx, llargs_rust[i + offset], expr::get_dataptr(bcx, scratch));
-                Store(bcx, llargs_rust[i + offset + 1], expr::get_meta(bcx, scratch));
-                offset += 1;
-            } else {
-                base::store_ty(bcx, llarg_rust, scratch, passed_arg_tys[i]);
-            }
-            llarg_rust = scratch;
-        }
-
-        debug!("llarg_rust={} (after indirection)",
-               ccx.tn().val_to_string(llarg_rust));
-
-        // Check whether we need to do any casting
-        match arg_ty.cast {
-            Some(ty) => llarg_rust = BitCast(bcx, llarg_rust, ty.ptr_to()),
-            None => ()
-        }
-
-        debug!("llarg_rust={} (after casting)",
-               ccx.tn().val_to_string(llarg_rust));
-
-        // Finally, load the value if needed for the foreign ABI
-        let foreign_indirect = arg_ty.is_indirect();
-        let llarg_foreign = if foreign_indirect {
-            llarg_rust
-        } else {
-            if passed_arg_tys[i].is_bool() {
-                let val = LoadRangeAssert(bcx, llarg_rust, 0, 2, llvm::False);
-                Trunc(bcx, val, Type::i1(bcx.ccx()))
-            } else {
-                Load(bcx, llarg_rust)
-            }
-        };
-
-        debug!("argument {}, llarg_foreign={}",
-               i, ccx.tn().val_to_string(llarg_foreign));
-
-        // fill padding with undef value
-        match arg_ty.pad {
-            Some(ty) => llargs_foreign.push(C_undef(ty)),
-            None => ()
-        }
-        llargs_foreign.push(llarg_foreign);
-    }
-
-    let cc = llvm_calling_convention(ccx, fn_abi);
-
-    // A function pointer is called without the declaration available, so we have to apply
-    // any attributes with ABI implications directly to the call instruction.
-    let mut attrs = llvm::AttrBuilder::new();
-
-    // Add attributes that are always applicable, independent of the concrete foreign ABI
-    if fn_type.ret_ty.is_indirect() {
-        let llret_sz = machine::llsize_of_real(ccx, fn_type.ret_ty.ty);
-
-        // The outptr can be noalias and nocapture because it's entirely
-        // invisible to the program. We also know it's nonnull as well
-        // as how many bytes we can dereference
-        attrs.arg(1, llvm::Attribute::NoAlias)
-             .arg(1, llvm::Attribute::NoCapture)
-             .arg(1, llvm::DereferenceableAttribute(llret_sz));
-    };
-
-    // Add attributes that depend on the concrete foreign ABI
-    let mut arg_idx = if fn_type.ret_ty.is_indirect() { 1 } else { 0 };
-    match fn_type.ret_ty.attr {
-        Some(attr) => { attrs.arg(arg_idx, attr); },
-        _ => ()
-    }
-
-    arg_idx += 1;
-    for arg_ty in &fn_type.arg_tys {
-        if arg_ty.is_ignore() {
-            continue;
-        }
-        // skip padding
-        if arg_ty.pad.is_some() { arg_idx += 1; }
-
-        if let Some(attr) = arg_ty.attr {
-            attrs.arg(arg_idx, attr);
-        }
-
-        arg_idx += 1;
-    }
-
-    let llforeign_retval = CallWithConv(bcx,
-                                        llfn,
-                                        &llargs_foreign[..],
-                                        cc,
-                                        Some(attrs),
-                                        call_debug_loc);
-
-    // If the function we just called does not use an outpointer,
-    // store the result into the rust outpointer. Cast the outpointer
-    // type to match because some ABIs will use a different type than
-    // the Rust type. e.g., a {u32,u32} struct could be returned as
-    // u64.
-    if llsig.ret_def && !fn_type.ret_ty.is_indirect() {
-        let llrust_ret_ty = llsig.llret_ty;
-        let llforeign_ret_ty = match fn_type.ret_ty.cast {
-            Some(ty) => ty,
-            None => fn_type.ret_ty.ty
-        };
-
-        debug!("llretptr={}", ccx.tn().val_to_string(llretptr));
-        debug!("llforeign_retval={}", ccx.tn().val_to_string(llforeign_retval));
-        debug!("llrust_ret_ty={}", ccx.tn().type_to_string(llrust_ret_ty));
-        debug!("llforeign_ret_ty={}", ccx.tn().type_to_string(llforeign_ret_ty));
-
-        if llrust_ret_ty == llforeign_ret_ty {
-            match fn_sig.output {
-                ty::FnConverging(result_ty) => {
-                    base::store_ty(bcx, llforeign_retval, llretptr, result_ty)
-                }
-                ty::FnDiverging => {}
-            }
-        } else {
-            // The actual return type is a struct, but the ABI
-            // adaptation code has cast it into some scalar type.  The
-            // code that follows is the only reliable way I have
-            // found to do a transform like i64 -> {i32,i32}.
-            // Basically we dump the data onto the stack then memcpy it.
-            //
-            // Other approaches I tried:
-            // - Casting rust ret pointer to the foreign type and using Store
-            //   is (a) unsafe if size of foreign type > size of rust type and
-            //   (b) runs afoul of strict aliasing rules, yielding invalid
-            //   assembly under -O (specifically, the store gets removed).
-            // - Truncating foreign type to correct integral type and then
-            //   bitcasting to the struct type yields invalid cast errors.
-            let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast");
-            base::call_lifetime_start(bcx, llscratch);
-            Store(bcx, llforeign_retval, llscratch);
-            let llscratch_i8 = BitCast(bcx, llscratch, Type::i8(ccx).ptr_to());
-            let llretptr_i8 = BitCast(bcx, llretptr, Type::i8(ccx).ptr_to());
-            let llrust_size = machine::llsize_of_store(ccx, llrust_ret_ty);
-            let llforeign_align = machine::llalign_of_min(ccx, llforeign_ret_ty);
-            let llrust_align = machine::llalign_of_min(ccx, llrust_ret_ty);
-            let llalign = cmp::min(llforeign_align, llrust_align);
-            debug!("llrust_size={}", llrust_size);
-            base::call_memcpy(bcx, llretptr_i8, llscratch_i8,
-                              C_uint(ccx, llrust_size), llalign as u32);
-            base::call_lifetime_end(bcx, llscratch);
-        }
-    }
-
-    return bcx;
-}
-
-// feature gate SIMD types in FFI, since I (huonw) am not sure the
-// ABIs are handled at all correctly.
-fn gate_simd_ffi(tcx: &ty::ctxt, decl: &hir::FnDecl, ty: &ty::BareFnTy) {
-    if !tcx.sess.features.borrow().simd_ffi {
-        let check = |ast_ty: &hir::Ty, ty: ty::Ty| {
-            if ty.is_simd() {
-                tcx.sess.struct_span_err(ast_ty.span,
-                              &format!("use of SIMD type `{}` in FFI is highly experimental and \
-                                        may result in invalid code",
-                                       pprust::ty_to_string(ast_ty)))
-                    .fileline_help(ast_ty.span,
-                                   "add #![feature(simd_ffi)] to the crate attributes to enable")
-                    .emit();
-            }
-        };
-        let sig = &ty.sig.0;
-        for (input, ty) in decl.inputs.iter().zip(&sig.inputs) {
-            check(&input.ty, *ty)
-        }
-        if let hir::Return(ref ty) = decl.output {
-            check(&ty, sig.output.unwrap())
-        }
-    }
-}
-
-pub fn trans_foreign_mod(ccx: &CrateContext, foreign_mod: &hir::ForeignMod) {
-    let _icx = push_ctxt("foreign::trans_foreign_mod");
-    for foreign_item in &foreign_mod.items {
-        let lname = link_name(foreign_item);
-
-        if let hir::ForeignItemFn(ref decl, _) = foreign_item.node {
-            match foreign_mod.abi {
-                Abi::Rust | Abi::RustIntrinsic | Abi::PlatformIntrinsic => {}
-                abi => {
-                    let ty = ccx.tcx().node_id_to_type(foreign_item.id);
-                    match ty.sty {
-                        ty::TyBareFn(_, bft) => gate_simd_ffi(ccx.tcx(), &decl, bft),
-                        _ => ccx.tcx().sess.span_bug(foreign_item.span,
-                                                     "foreign fn's sty isn't a bare_fn_ty?")
-                    }
-
-                    register_foreign_item_fn(ccx, abi, ty, &lname, &foreign_item.attrs);
-                    // Unlike for other items, we shouldn't call
-                    // `base::update_linkage` here.  Foreign items have
-                    // special linkage requirements, which are handled
-                    // inside `foreign::register_*`.
-                }
-            }
-        }
-
-        ccx.item_symbols().borrow_mut().insert(foreign_item.id,
-                                             lname.to_string());
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Rust functions with foreign ABIs
-//
-// These are normal Rust functions defined with foreign ABIs.  For
-// now, and perhaps forever, we translate these using a "layer of
-// indirection". That is, given a Rust declaration like:
-//
-//     extern "C" fn foo(i: u32) -> u32 { ... }
-//
-// we will generate a function like:
-//
-//     S foo(T i) {
-//         S r;
-//         foo0(&r, NULL, i);
-//         return r;
-//     }
-//
-//     #[inline_always]
-//     void foo0(uint32_t *r, void *env, uint32_t i) { ... }
-//
-// Here the (internal) `foo0` function follows the Rust ABI as normal,
-// where the `foo` function follows the C ABI. We rely on LLVM to
-// inline the one into the other. Of course we could just generate the
-// correct code in the first place, but this is much simpler.
-
-pub fn decl_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                               t: Ty<'tcx>,
-                                               name: &str)
-                                               -> ValueRef {
-    let tys = foreign_types_for_fn_ty(ccx, t);
-    let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
-    let cconv = match t.sty {
-        ty::TyBareFn(_, ref fn_ty) => {
-            llvm_calling_convention(ccx, fn_ty.abi)
-        }
-        _ => panic!("expected bare fn in decl_rust_fn_with_foreign_abi")
-    };
-    let llfn = declare::declare_fn(ccx, name, cconv, llfn_ty,
-                                   ty::FnConverging(ccx.tcx().mk_nil()));
-    add_argument_attributes(&tys, llfn);
-    debug!("decl_rust_fn_with_foreign_abi(llfn_ty={}, llfn={})",
-           ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
-    llfn
-}
-
-pub fn register_rust_fn_with_foreign_abi(ccx: &CrateContext,
-                                         sp: Span,
-                                         sym: String,
-                                         node_id: ast::NodeId)
-                                         -> ValueRef {
-    let _icx = push_ctxt("foreign::register_foreign_fn");
-
-    let t = ccx.tcx().node_id_to_type(node_id);
-    let cconv = match t.sty {
-        ty::TyBareFn(_, ref fn_ty) => {
-            llvm_calling_convention(ccx, fn_ty.abi)
-        }
-        _ => panic!("expected bare fn in register_rust_fn_with_foreign_abi")
-    };
-    let tys = foreign_types_for_fn_ty(ccx, t);
-    let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
-    let llfn = base::register_fn_llvmty(ccx, sp, sym, node_id, cconv, llfn_ty);
-    add_argument_attributes(&tys, llfn);
-    debug!("register_rust_fn_with_foreign_abi(node_id={}, llfn_ty={}, llfn={})",
-           node_id, ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
-    llfn
-}
-
-pub fn trans_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                decl: &hir::FnDecl,
-                                                body: &hir::Block,
-                                                attrs: &[ast::Attribute],
-                                                llwrapfn: ValueRef,
-                                                param_substs: &'tcx Substs<'tcx>,
-                                                id: ast::NodeId,
-                                                hash: Option<&str>) {
-    let _icx = push_ctxt("foreign::build_foreign_fn");
-
-    let fnty = ccx.tcx().node_id_to_type(id);
-    let mty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fnty);
-    let tys = foreign_types_for_fn_ty(ccx, mty);
-
-    unsafe { // unsafe because we call LLVM operations
-        // Build up the Rust function (`foo0` above).
-        let llrustfn = build_rust_fn(ccx, decl, body, param_substs, attrs, id, hash);
-
-        // Build up the foreign wrapper (`foo` above).
-        return build_wrap_fn(ccx, llrustfn, llwrapfn, &tys, mty);
-    }
-
-    fn build_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                               decl: &hir::FnDecl,
-                               body: &hir::Block,
-                               param_substs: &'tcx Substs<'tcx>,
-                               attrs: &[ast::Attribute],
-                               id: ast::NodeId,
-                               hash: Option<&str>)
-                               -> ValueRef
-    {
-        let _icx = push_ctxt("foreign::foreign::build_rust_fn");
-        let tcx = ccx.tcx();
-        let t = tcx.node_id_to_type(id);
-        let t = monomorphize::apply_param_substs(tcx, param_substs, &t);
-
-        let path =
-            tcx.map.def_path_from_id(id)
-                   .into_iter()
-                   .map(|e| e.data.as_interned_str())
-                   .chain(once(special_idents::clownshoe_abi.name.as_str()));
-        let ps = link::mangle(path, hash);
-
-        // Compute the type that the function would have if it were just a
-        // normal Rust function. This will be the type of the wrappee fn.
-        match t.sty {
-            ty::TyBareFn(_, ref f) => {
-                assert!(f.abi != Abi::Rust);
-                assert!(f.abi != Abi::RustIntrinsic);
-                assert!(f.abi != Abi::PlatformIntrinsic);
-            }
-            _ => {
-                ccx.sess().bug(&format!("build_rust_fn: extern fn {} has ty {:?}, \
-                                        expected a bare fn ty",
-                                       ccx.tcx().map.path_to_string(id),
-                                       t));
-            }
-        };
-
-        debug!("build_rust_fn: path={} id={} t={:?}",
-               ccx.tcx().map.path_to_string(id),
-               id, t);
-
-        let llfn = declare::define_internal_rust_fn(ccx, &ps, t);
-        attributes::from_fn_attrs(ccx, attrs, llfn);
-        base::trans_fn(ccx, decl, body, llfn, param_substs, id, attrs);
-        llfn
-    }
-
-    unsafe fn build_wrap_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                      llrustfn: ValueRef,
-                                      llwrapfn: ValueRef,
-                                      tys: &ForeignTypes<'tcx>,
-                                      t: Ty<'tcx>) {
-        let _icx = push_ctxt(
-            "foreign::trans_rust_fn_with_foreign_abi::build_wrap_fn");
-
-        debug!("build_wrap_fn(llrustfn={}, llwrapfn={}, t={:?})",
-               ccx.tn().val_to_string(llrustfn),
-               ccx.tn().val_to_string(llwrapfn),
-               t);
-
-        // Avoid all the Rust generation stuff and just generate raw
-        // LLVM here.
-        //
-        // We want to generate code like this:
-        //
-        //     S foo(T i) {
-        //         S r;
-        //         foo0(&r, NULL, i);
-        //         return r;
-        //     }
-
-        if llvm::LLVMCountBasicBlocks(llwrapfn) != 0 {
-            ccx.sess().bug("wrapping a function inside non-empty wrapper, most likely cause is \
-                           multiple functions being wrapped");
-        }
-
-        let ptr = "the block\0".as_ptr();
-        let the_block = llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llwrapfn,
-                                                            ptr as *const _);
-
-        let builder = ccx.builder();
-        builder.position_at_end(the_block);
-
-        // Array for the arguments we will pass to the rust function.
-        let mut llrust_args = Vec::new();
-        let mut next_foreign_arg_counter: c_uint = 0;
-        let mut next_foreign_arg = |pad: bool| -> c_uint {
-            next_foreign_arg_counter += if pad {
-                2
-            } else {
-                1
-            };
-            next_foreign_arg_counter - 1
-        };
-
-        // If there is an out pointer on the foreign function
-        let foreign_outptr = {
-            if tys.fn_ty.ret_ty.is_indirect() {
-                Some(get_param(llwrapfn, next_foreign_arg(false)))
-            } else {
-                None
-            }
-        };
-
-        let rustfn_ty = Type::from_ref(llvm::LLVMTypeOf(llrustfn)).element_type();
-        let mut rust_param_tys = rustfn_ty.func_params().into_iter();
-        // Push Rust return pointer, using null if it will be unused.
-        let rust_uses_outptr = match tys.fn_sig.output {
-            ty::FnConverging(ret_ty) => type_of::return_uses_outptr(ccx, ret_ty),
-            ty::FnDiverging => false
-        };
-        let return_alloca: Option<ValueRef>;
-        let llrust_ret_ty = if rust_uses_outptr {
-            rust_param_tys.next().expect("Missing return type!").element_type()
-        } else {
-            rustfn_ty.return_type()
-        };
-        if rust_uses_outptr {
-            // Rust expects to use an outpointer. If the foreign fn
-            // also uses an outpointer, we can reuse it, but the types
-            // may vary, so cast first to the Rust type. If the
-            // foreign fn does NOT use an outpointer, we will have to
-            // alloca some scratch space on the stack.
-            match foreign_outptr {
-                Some(llforeign_outptr) => {
-                    debug!("out pointer, foreign={}",
-                           ccx.tn().val_to_string(llforeign_outptr));
-                    let llrust_retptr =
-                        builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
-                    debug!("out pointer, foreign={} (casted)",
-                           ccx.tn().val_to_string(llrust_retptr));
-                    llrust_args.push(llrust_retptr);
-                    return_alloca = None;
-                }
-
-                None => {
-                    let slot = builder.alloca(llrust_ret_ty, "return_alloca");
-                    debug!("out pointer, \
-                            allocad={}, \
-                            llrust_ret_ty={}, \
-                            return_ty={:?}",
-                           ccx.tn().val_to_string(slot),
-                           ccx.tn().type_to_string(llrust_ret_ty),
-                           tys.fn_sig.output);
-                    llrust_args.push(slot);
-                    return_alloca = Some(slot);
-                }
-            }
-        } else {
-            // Rust does not expect an outpointer. If the foreign fn
-            // does use an outpointer, then we will do a store of the
-            // value that the Rust fn returns.
-            return_alloca = None;
-        };
-
-        // Build up the arguments to the call to the rust function.
-        // Careful to adapt for cases where the native convention uses
-        // a pointer and Rust does not or vice versa.
-        for i in 0..tys.fn_sig.inputs.len() {
-            let rust_ty = tys.fn_sig.inputs[i];
-            let rust_indirect = type_of::arg_is_indirect(ccx, rust_ty);
-            let llty = rust_param_tys.next().expect("Not enough parameter types!");
-            let llrust_ty = if rust_indirect {
-                llty.element_type()
-            } else {
-                llty
-            };
-            let llforeign_arg_ty = tys.fn_ty.arg_tys[i];
-            let foreign_indirect = llforeign_arg_ty.is_indirect();
-
-            if llforeign_arg_ty.is_ignore() {
-                debug!("skipping ignored arg #{}", i);
-                llrust_args.push(C_undef(llrust_ty));
-                continue;
-            }
-
-            // skip padding
-            let foreign_index = next_foreign_arg(llforeign_arg_ty.pad.is_some());
-            let mut llforeign_arg = get_param(llwrapfn, foreign_index);
-
-            debug!("llforeign_arg {}{}: {}", "#",
-                   i, ccx.tn().val_to_string(llforeign_arg));
-            debug!("rust_indirect = {}, foreign_indirect = {}",
-                   rust_indirect, foreign_indirect);
-
-            // Ensure that the foreign argument is indirect (by
-            // pointer).  It makes adapting types easier, since we can
-            // always just bitcast pointers.
-            if !foreign_indirect {
-                llforeign_arg = if rust_ty.is_bool() {
-                    let lltemp = builder.alloca(Type::bool(ccx), "");
-                    builder.store(builder.zext(llforeign_arg, Type::bool(ccx)), lltemp);
-                    lltemp
-                } else {
-                    let lltemp = builder.alloca(val_ty(llforeign_arg), "");
-                    builder.store(llforeign_arg, lltemp);
-                    lltemp
-                }
-            }
-
-            // If the types in the ABI and the Rust types don't match,
-            // bitcast the llforeign_arg pointer so it matches the types
-            // Rust expects.
-            if llforeign_arg_ty.cast.is_some() && !type_is_fat_ptr(ccx.tcx(), rust_ty){
-                assert!(!foreign_indirect);
-                llforeign_arg = builder.bitcast(llforeign_arg, llrust_ty.ptr_to());
-            }
-
-            let llrust_arg = if rust_indirect || type_is_fat_ptr(ccx.tcx(), rust_ty) {
-                llforeign_arg
-            } else {
-                if rust_ty.is_bool() {
-                    let tmp = builder.load_range_assert(llforeign_arg, 0, 2, llvm::False);
-                    builder.trunc(tmp, Type::i1(ccx))
-                } else if type_of::type_of(ccx, rust_ty).is_aggregate() {
-                    // We want to pass small aggregates as immediate values, but using an aggregate
-                    // LLVM type for this leads to bad optimizations, so its arg type is an
-                    // appropriately sized integer and we have to convert it
-                    let tmp = builder.bitcast(llforeign_arg,
-                                              type_of::arg_type_of(ccx, rust_ty).ptr_to());
-                    let load = builder.load(tmp);
-                    llvm::LLVMSetAlignment(load, type_of::align_of(ccx, rust_ty));
-                    load
-                } else {
-                    builder.load(llforeign_arg)
-                }
-            };
-
-            debug!("llrust_arg {}{}: {}", "#",
-                   i, ccx.tn().val_to_string(llrust_arg));
-            if type_is_fat_ptr(ccx.tcx(), rust_ty) {
-                let next_llrust_ty = rust_param_tys.next().expect("Not enough parameter types!");
-                llrust_args.push(builder.load(builder.bitcast(builder.struct_gep(
-                                llrust_arg, abi::FAT_PTR_ADDR), llrust_ty.ptr_to())));
-                llrust_args.push(builder.load(builder.bitcast(builder.struct_gep(
-                                llrust_arg, abi::FAT_PTR_EXTRA), next_llrust_ty.ptr_to())));
-            } else {
-                llrust_args.push(llrust_arg);
-            }
-        }
-
-        // Perform the call itself
-        debug!("calling llrustfn = {}, t = {:?}",
-               ccx.tn().val_to_string(llrustfn), t);
-        let attributes = attributes::from_fn_type(ccx, t);
-        let llrust_ret_val = builder.call(llrustfn, &llrust_args,
-                                          None, Some(attributes));
-
-        // Get the return value where the foreign fn expects it.
-        let llforeign_ret_ty = match tys.fn_ty.ret_ty.cast {
-            Some(ty) => ty,
-            None => tys.fn_ty.ret_ty.ty
-        };
-        match foreign_outptr {
-            None if !tys.llsig.ret_def => {
-                // Function returns `()` or `bot`, which in Rust is the LLVM
-                // type "{}" but in foreign ABIs is "Void".
-                builder.ret_void();
-            }
-
-            None if rust_uses_outptr => {
-                // Rust uses an outpointer, but the foreign ABI does not. Load.
-                let llrust_outptr = return_alloca.unwrap();
-                let llforeign_outptr_casted =
-                    builder.bitcast(llrust_outptr, llforeign_ret_ty.ptr_to());
-                let llforeign_retval = builder.load(llforeign_outptr_casted);
-                builder.ret(llforeign_retval);
-            }
-
-            None if llforeign_ret_ty != llrust_ret_ty => {
-                // Neither ABI uses an outpointer, but the types don't
-                // quite match. Must cast. Probably we should try and
-                // examine the types and use a concrete llvm cast, but
-                // right now we just use a temp memory location and
-                // bitcast the pointer, which is the same thing the
-                // old wrappers used to do.
-                let lltemp = builder.alloca(llforeign_ret_ty, "");
-                let lltemp_casted = builder.bitcast(lltemp, llrust_ret_ty.ptr_to());
-                builder.store(llrust_ret_val, lltemp_casted);
-                let llforeign_retval = builder.load(lltemp);
-                builder.ret(llforeign_retval);
-            }
-
-            None => {
-                // Neither ABI uses an outpointer, and the types
-                // match. Easy peasy.
-                builder.ret(llrust_ret_val);
-            }
-
-            Some(llforeign_outptr) if !rust_uses_outptr => {
-                // Foreign ABI requires an out pointer, but Rust doesn't.
-                // Store Rust return value.
-                let llforeign_outptr_casted =
-                    builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
-                builder.store(llrust_ret_val, llforeign_outptr_casted);
-                builder.ret_void();
-            }
-
-            Some(_) => {
-                // Both ABIs use outpointers. Easy peasy.
-                builder.ret_void();
-            }
-        }
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// General ABI Support
-//
-// This code is kind of a confused mess and needs to be reworked given
-// the massive simplifications that have occurred.
-
-pub fn link_name(i: &hir::ForeignItem) -> InternedString {
-    match attr::first_attr_value_str_by_name(&i.attrs, "link_name") {
-        Some(ln) => ln.clone(),
-        None => match weak_lang_items::link_name(&i.attrs) {
-            Some(name) => name,
-            None => i.name.as_str(),
-        }
-    }
-}
-
-/// The ForeignSignature is the LLVM types of the arguments/return type of a function. Note that
-/// these LLVM types are not quite the same as the LLVM types would be for a native Rust function
-/// because foreign functions just plain ignore modes. They also don't pass aggregate values by
-/// pointer like we do.
-fn foreign_signature<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                               fn_sig: &ty::FnSig<'tcx>,
-                               arg_tys: &[Ty<'tcx>])
-                               -> LlvmSignature {
-    let llarg_tys = arg_tys.iter().map(|&arg| foreign_arg_type_of(ccx, arg)).collect();
-    let (llret_ty, ret_def) = match fn_sig.output {
-        ty::FnConverging(ret_ty) =>
-            (type_of::foreign_arg_type_of(ccx, ret_ty), !return_type_is_void(ccx, ret_ty)),
-        ty::FnDiverging =>
-            (Type::nil(ccx), false)
-    };
-    LlvmSignature {
-        llarg_tys: llarg_tys,
-        llret_ty: llret_ty,
-        ret_def: ret_def
-    }
-}
-
-fn foreign_types_for_fn_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                     ty: Ty<'tcx>) -> ForeignTypes<'tcx> {
-    let fn_sig = match ty.sty {
-        ty::TyBareFn(_, ref fn_ty) => &fn_ty.sig,
-        _ => ccx.sess().bug("foreign_types_for_fn_ty called on non-function type")
-    };
-    let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
-    let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig);
-    let llsig = foreign_signature(ccx, &fn_sig, &fn_sig.inputs);
-    let fn_ty = cabi::compute_abi_info(ccx,
-                                       &llsig.llarg_tys,
-                                       llsig.llret_ty,
-                                       llsig.ret_def);
-    debug!("foreign_types_for_fn_ty(\
-           ty={:?}, \
-           llsig={} -> {}, \
-           fn_ty={} -> {}, \
-           ret_def={}",
-           ty,
-           ccx.tn().types_to_str(&llsig.llarg_tys),
-           ccx.tn().type_to_string(llsig.llret_ty),
-           ccx.tn().types_to_str(&fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>()),
-           ccx.tn().type_to_string(fn_ty.ret_ty.ty),
-           llsig.ret_def);
-
-    ForeignTypes {
-        fn_sig: fn_sig,
-        llsig: llsig,
-        fn_ty: fn_ty
-    }
-}
-
-fn lltype_for_fn_from_foreign_types(ccx: &CrateContext, tys: &ForeignTypes) -> Type {
-    let mut llargument_tys = Vec::new();
-
-    let ret_ty = tys.fn_ty.ret_ty;
-    let llreturn_ty = if ret_ty.is_indirect() {
-        llargument_tys.push(ret_ty.ty.ptr_to());
-        Type::void(ccx)
-    } else {
-        match ret_ty.cast {
-            Some(ty) => ty,
-            None => ret_ty.ty
-        }
-    };
-
-    for &arg_ty in &tys.fn_ty.arg_tys {
-        if arg_ty.is_ignore() {
-            continue;
-        }
-        // add padding
-        match arg_ty.pad {
-            Some(ty) => llargument_tys.push(ty),
-            None => ()
-        }
-
-        let llarg_ty = if arg_ty.is_indirect() {
-            arg_ty.ty.ptr_to()
-        } else {
-            match arg_ty.cast {
-                Some(ty) => ty,
-                None => arg_ty.ty
-            }
-        };
-
-        llargument_tys.push(llarg_ty);
-    }
-
-    if tys.fn_sig.variadic {
-        Type::variadic_func(&llargument_tys, &llreturn_ty)
-    } else {
-        Type::func(&llargument_tys[..], &llreturn_ty)
-    }
-}
-
-pub fn lltype_for_foreign_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                       ty: Ty<'tcx>) -> Type {
-    lltype_for_fn_from_foreign_types(ccx, &foreign_types_for_fn_ty(ccx, ty))
-}
-
-fn add_argument_attributes(tys: &ForeignTypes,
-                           llfn: ValueRef) {
-    let mut i = if tys.fn_ty.ret_ty.is_indirect() {
-        1
-    } else {
-        0
-    };
-
-    match tys.fn_ty.ret_ty.attr {
-        Some(attr) => unsafe {
-            llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64);
-        },
-        None => {}
-    }
-
-    i += 1;
-
-    for &arg_ty in &tys.fn_ty.arg_tys {
-        if arg_ty.is_ignore() {
-            continue;
-        }
-        // skip padding
-        if arg_ty.pad.is_some() { i += 1; }
-
-        match arg_ty.attr {
-            Some(attr) => unsafe {
-                llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64);
-            },
-            None => ()
-        }
-
-        i += 1;
-    }
-}
diff --git a/src/librustc_trans/trans/glue.rs b/src/librustc_trans/trans/glue.rs
deleted file mode 100644 (file)
index 5cb6a73..0000000
+++ /dev/null
@@ -1,612 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//!
-//
-// Code relating to drop glue.
-
-use std;
-
-use back::link::*;
-use llvm;
-use llvm::{ValueRef, get_param};
-use middle::lang_items::ExchangeFreeFnLangItem;
-use middle::subst::{Substs};
-use middle::traits;
-use middle::ty::{self, Ty};
-use trans::adt;
-use trans::adt::GetDtorType; // for tcx.dtor_type()
-use trans::base::*;
-use trans::build::*;
-use trans::callee;
-use trans::cleanup;
-use trans::cleanup::CleanupMethods;
-use trans::collector::{self, TransItem};
-use trans::common::*;
-use trans::debuginfo::DebugLoc;
-use trans::declare;
-use trans::expr;
-use trans::machine::*;
-use trans::monomorphize;
-use trans::type_of::{type_of, sizing_type_of, align_of};
-use trans::type_::Type;
-
-use arena::TypedArena;
-use libc::c_uint;
-use syntax::ast;
-use syntax::codemap::DUMMY_SP;
-
-pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                                           v: ValueRef,
-                                           size: ValueRef,
-                                           align: ValueRef,
-                                           debug_loc: DebugLoc)
-                                           -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("trans_exchange_free");
-    let ccx = cx.ccx();
-    callee::trans_lang_call(cx,
-        langcall(cx, None, "", ExchangeFreeFnLangItem),
-        &[PointerCast(cx, v, Type::i8p(ccx)), size, align],
-        Some(expr::Ignore),
-        debug_loc).bcx
-}
-
-pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
-                                       v: ValueRef,
-                                       size: u64,
-                                       align: u32,
-                                       debug_loc: DebugLoc)
-                                       -> Block<'blk, 'tcx> {
-    trans_exchange_free_dyn(cx,
-                            v,
-                            C_uint(cx.ccx(), size),
-                            C_uint(cx.ccx(), align),
-                            debug_loc)
-}
-
-pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                          ptr: ValueRef,
-                                          content_ty: Ty<'tcx>,
-                                          debug_loc: DebugLoc)
-                                          -> Block<'blk, 'tcx> {
-    assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
-    let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
-    let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
-
-    // `Box<ZeroSizeType>` does not allocate.
-    if content_size != 0 {
-        let content_align = align_of(bcx.ccx(), content_ty);
-        trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
-    } else {
-        bcx
-    }
-}
-
-pub fn type_needs_drop<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
-    tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment())
-}
-
-pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                    t: Ty<'tcx>) -> Ty<'tcx> {
-    let tcx = ccx.tcx();
-    // Even if there is no dtor for t, there might be one deeper down and we
-    // might need to pass in the vtable ptr.
-    if !type_is_sized(tcx, t) {
-        return t
-    }
-
-    // FIXME (#22815): note that type_needs_drop conservatively
-    // approximates in some cases and may say a type expression
-    // requires drop glue when it actually does not.
-    //
-    // (In this case it is not clear whether any harm is done, i.e.
-    // erroneously returning `t` in some cases where we could have
-    // returned `tcx.types.i8` does not appear unsound. The impact on
-    // code quality is unknown at this time.)
-
-    if !type_needs_drop(&tcx, t) {
-        return tcx.types.i8;
-    }
-    match t.sty {
-        ty::TyBox(typ) if !type_needs_drop(&tcx, typ)
-                         && type_is_sized(tcx, typ) => {
-            let llty = sizing_type_of(ccx, typ);
-            // `Box<ZeroSizeType>` does not allocate.
-            if llsize_of_alloc(ccx, llty) == 0 {
-                tcx.types.i8
-            } else {
-                t
-            }
-        }
-        _ => t
-    }
-}
-
-pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                           v: ValueRef,
-                           t: Ty<'tcx>,
-                           debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
-    drop_ty_core(bcx, v, t, debug_loc, false, None)
-}
-
-pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                v: ValueRef,
-                                t: Ty<'tcx>,
-                                debug_loc: DebugLoc,
-                                skip_dtor: bool,
-                                drop_hint: Option<cleanup::DropHintValue>)
-                                -> Block<'blk, 'tcx> {
-    // NB: v is an *alias* of type t here, not a direct value.
-    debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
-    let _icx = push_ctxt("drop_ty");
-    let mut bcx = bcx;
-    if bcx.fcx.type_needs_drop(t) {
-        let ccx = bcx.ccx();
-        let g = if skip_dtor {
-            DropGlueKind::TyContents(t)
-        } else {
-            DropGlueKind::Ty(t)
-        };
-        let glue = get_drop_glue_core(ccx, g);
-        let glue_type = get_drop_glue_type(ccx, t);
-        let ptr = if glue_type != t {
-            PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
-        } else {
-            v
-        };
-
-        match drop_hint {
-            Some(drop_hint) => {
-                let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
-                let moved_val =
-                    C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
-                let may_need_drop =
-                    ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
-                bcx = with_cond(bcx, may_need_drop, |cx| {
-                    Call(cx, glue, &[ptr], None, debug_loc);
-                    cx
-                })
-            }
-            None => {
-                // No drop-hint ==> call standard drop glue
-                Call(bcx, glue, &[ptr], None, debug_loc);
-            }
-        }
-    }
-    bcx
-}
-
-pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                     v: ValueRef,
-                                     t: Ty<'tcx>,
-                                     debug_loc: DebugLoc,
-                                     skip_dtor: bool)
-                                     -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("drop_ty_immediate");
-    let vp = alloc_ty(bcx, t, "");
-    call_lifetime_start(bcx, vp);
-    store_ty(bcx, v, vp, t);
-    let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None);
-    call_lifetime_end(bcx, vp);
-    bcx
-}
-
-pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
-    get_drop_glue_core(ccx, DropGlueKind::Ty(t))
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub enum DropGlueKind<'tcx> {
-    /// The normal path; runs the dtor, and then recurs on the contents
-    Ty(Ty<'tcx>),
-    /// Skips the dtor, if any, for ty; drops the contents directly.
-    /// Note that the dtor is only skipped at the most *shallow*
-    /// level, namely, an `impl Drop for Ty` itself. So, for example,
-    /// if Ty is Newtype(S) then only the Drop impl for Newtype itself
-    /// will be skipped, while the Drop impl for S, if any, will be
-    /// invoked.
-    TyContents(Ty<'tcx>),
-}
-
-impl<'tcx> DropGlueKind<'tcx> {
-    fn ty(&self) -> Ty<'tcx> {
-        match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
-    }
-
-    fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
-    {
-        match *self {
-            DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
-            DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
-        }
-    }
-}
-
-fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                g: DropGlueKind<'tcx>) -> ValueRef {
-    debug!("make drop glue for {:?}", g);
-    let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
-    debug!("drop glue type {:?}", g);
-    match ccx.drop_glues().borrow().get(&g) {
-        Some(&glue) => return glue,
-        _ => { }
-    }
-    let t = g.ty();
-
-    let llty = if type_is_sized(ccx.tcx(), t) {
-        type_of(ccx, t).ptr_to()
-    } else {
-        type_of(ccx, ccx.tcx().mk_box(t)).ptr_to()
-    };
-
-    let llfnty = Type::glue_fn(ccx, llty);
-
-    // To avoid infinite recursion, don't `make_drop_glue` until after we've
-    // added the entry to the `drop_glues` cache.
-    if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
-        let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil());
-        ccx.drop_glues().borrow_mut().insert(g, llfn);
-        return llfn;
-    };
-
-    let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop");
-    let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{
-       ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm));
-    });
-    ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
-
-    let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
-
-    let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
-    let (arena, fcx): (TypedArena<_>, FunctionContext);
-    arena = TypedArena::new();
-    fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
-                      ty::FnConverging(ccx.tcx().mk_nil()),
-                      empty_substs, None, &arena);
-
-    let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil()));
-
-    update_linkage(ccx, llfn, None, OriginalTranslation);
-
-    ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
-    // All glue functions take values passed *by alias*; this is a
-    // requirement since in many contexts glue is invoked indirectly and
-    // the caller has no idea if it's dealing with something that can be
-    // passed by value.
-    //
-    // llfn is expected be declared to take a parameter of the appropriate
-    // type, so we don't need to explicitly cast the function parameter.
-
-    let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint);
-    let bcx = make_drop_glue(bcx, llrawptr0, g);
-    finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
-
-    llfn
-}
-
-fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
-                                      t: Ty<'tcx>,
-                                      struct_data: ValueRef)
-                                      -> Block<'blk, 'tcx> {
-    assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
-
-    let repr = adt::represent_type(bcx.ccx(), t);
-    let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &repr, struct_data));
-    let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
-    let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
-    let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
-
-    let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
-        bcx
-    } else {
-        let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
-        let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
-        let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
-        let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
-        let drop_flag_neither_initialized_nor_cleared =
-            And(bcx, not_init, not_done, DebugLoc::None);
-        with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
-            let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
-            Call(cx, llfn, &[], None, DebugLoc::None);
-            cx
-        })
-    };
-
-    let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
-    with_cond(bcx, drop_flag_dtor_needed, |cx| {
-        trans_struct_drop(cx, t, struct_data)
-    })
-}
-fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                 t: Ty<'tcx>,
-                                 v0: ValueRef)
-                                 -> Block<'blk, 'tcx>
-{
-    debug!("trans_struct_drop t: {}", t);
-    let tcx = bcx.tcx();
-    let mut bcx = bcx;
-
-    let def = t.ty_adt_def().unwrap();
-
-    // Be sure to put the contents into a scope so we can use an invoke
-    // instruction to call the user destructor but still call the field
-    // destructors if the user destructor panics.
-    //
-    // FIXME (#14875) panic-in-drop semantics might be unsupported; we
-    // might well consider changing below to more direct code.
-    let contents_scope = bcx.fcx.push_custom_cleanup_scope();
-
-    // Issue #23611: schedule cleanup of contents, re-inspecting the
-    // discriminant (if any) in case of variant swap in drop code.
-    bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
-
-    let (sized_args, unsized_args);
-    let args: &[ValueRef] = if type_is_sized(tcx, t) {
-        sized_args = [v0];
-        &sized_args
-    } else {
-        unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))];
-        &unsized_args
-    };
-
-    bcx = callee::trans_call_inner(bcx, DebugLoc::None, |bcx, _| {
-        let trait_ref = ty::Binder(ty::TraitRef {
-            def_id: tcx.lang_items.drop_trait().unwrap(),
-            substs: tcx.mk_substs(Substs::trans_empty().with_self_ty(t))
-        });
-        let vtbl = match fulfill_obligation(bcx.ccx(), DUMMY_SP, trait_ref) {
-            traits::VtableImpl(data) => data,
-            _ => tcx.sess.bug(&format!("dtor for {:?} is not an impl???", t))
-        };
-        let dtor_did = def.destructor().unwrap();
-        let datum = callee::trans_fn_ref_with_substs(bcx.ccx(),
-                                                     dtor_did,
-                                                     ExprId(0),
-                                                     bcx.fcx.param_substs,
-                                                     vtbl.substs);
-        callee::Callee {
-            bcx: bcx,
-            data: callee::Fn(datum.val),
-            ty: datum.ty
-        }
-    }, callee::ArgVals(args), Some(expr::Ignore)).bcx;
-
-    bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
-}
-
-pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-                                         -> (ValueRef, ValueRef) {
-    debug!("calculate size of DST: {}; with lost info: {}",
-           t, bcx.val_to_string(info));
-    if type_is_sized(bcx.tcx(), t) {
-        let sizing_type = sizing_type_of(bcx.ccx(), t);
-        let size = llsize_of_alloc(bcx.ccx(), sizing_type);
-        let align = align_of(bcx.ccx(), t);
-        debug!("size_and_align_of_dst t={} info={} size: {} align: {}",
-               t, bcx.val_to_string(info), size, align);
-        let size = C_uint(bcx.ccx(), size);
-        let align = C_uint(bcx.ccx(), align);
-        return (size, align);
-    }
-    match t.sty {
-        ty::TyStruct(def, substs) => {
-            let ccx = bcx.ccx();
-            // First get the size of all statically known fields.
-            // Don't use type_of::sizing_type_of because that expects t to be sized.
-            assert!(!t.is_simd());
-            let repr = adt::represent_type(ccx, t);
-            let sizing_type = adt::sizing_type_context_of(ccx, &repr, true);
-            debug!("DST {} sizing_type: {}", t, sizing_type.to_string());
-            let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
-            let sized_align = llalign_of_min(ccx, sizing_type.prefix());
-            debug!("DST {} statically sized prefix size: {} align: {}",
-                   t, sized_size, sized_align);
-            let sized_size = C_uint(ccx, sized_size);
-            let sized_align = C_uint(ccx, sized_align);
-
-            // Recurse to get the size of the dynamically sized field (must be
-            // the last field).
-            let last_field = def.struct_variant().fields.last().unwrap();
-            let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
-            let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
-
-            let dbloc = DebugLoc::None;
-
-            // FIXME (#26403, #27023): We should be adding padding
-            // to `sized_size` (to accommodate the `unsized_align`
-            // required of the unsized field that follows) before
-            // summing it with `sized_size`. (Note that since #26403
-            // is unfixed, we do not yet add the necessary padding
-            // here. But this is where the add would go.)
-
-            // Return the sum of sizes and max of aligns.
-            let mut size = Add(bcx, sized_size, unsized_size, dbloc);
-
-            // Issue #27023: If there is a drop flag, *now* we add 1
-            // to the size.  (We can do this without adding any
-            // padding because drop flags do not have any alignment
-            // constraints.)
-            if sizing_type.needs_drop_flag() {
-                size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc);
-            }
-
-            // Choose max of two known alignments (combined value must
-            // be aligned according to more restrictive of the two).
-            let align = match (const_to_opt_uint(sized_align), const_to_opt_uint(unsized_align)) {
-                (Some(sized_align), Some(unsized_align)) => {
-                    // If both alignments are constant, (the sized_align should always be), then
-                    // pick the correct alignment statically.
-                    C_uint(ccx, std::cmp::max(sized_align, unsized_align))
-                }
-                _ => Select(bcx,
-                            ICmp(bcx,
-                                 llvm::IntUGT,
-                                 sized_align,
-                                 unsized_align,
-                                 dbloc),
-                            sized_align,
-                            unsized_align)
-            };
-
-            // Issue #27023: must add any necessary padding to `size`
-            // (to make it a multiple of `align`) before returning it.
-            //
-            // Namely, the returned size should be, in C notation:
-            //
-            //   `size + ((size & (align-1)) ? align : 0)`
-            //
-            // emulated via the semi-standard fast bit trick:
-            //
-            //   `(size + (align-1)) & -align`
-
-            let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc);
-            let size = And(
-                bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc);
-
-            (size, align)
-        }
-        ty::TyTrait(..) => {
-            // info points to the vtable and the second entry in the vtable is the
-            // dynamic size of the object.
-            let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to());
-            let size_ptr = GEPi(bcx, info, &[1]);
-            let align_ptr = GEPi(bcx, info, &[2]);
-            (Load(bcx, size_ptr), Load(bcx, align_ptr))
-        }
-        ty::TySlice(_) | ty::TyStr => {
-            let unit_ty = t.sequence_element_type(bcx.tcx());
-            // The info in this case is the length of the str, so the size is that
-            // times the unit size.
-            let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
-            let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
-            let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
-            (Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None),
-             C_uint(bcx.ccx(), unit_align))
-        }
-        _ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", t))
-    }
-}
-
-fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
-                              -> Block<'blk, 'tcx> {
-    let t = g.ty();
-
-    if collector::collecting_debug_information(bcx.ccx()) {
-        bcx.ccx()
-           .record_translation_item_as_generated(TransItem::DropGlue(bcx.tcx()
-                                                                        .erase_regions(&t)));
-    }
-
-    let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
-    // NB: v0 is an *alias* of type t here, not a direct value.
-    let _icx = push_ctxt("make_drop_glue");
-
-    // Only drop the value when it ... well, we used to check for
-    // non-null, (and maybe we need to continue doing so), but we now
-    // must definitely check for special bit-patterns corresponding to
-    // the special dtor markings.
-
-    let inttype = Type::int(bcx.ccx());
-    let dropped_pattern = C_integral(inttype, adt::DTOR_DONE_U64, false);
-
-    match t.sty {
-        ty::TyBox(content_ty) => {
-            // Support for TyBox is built-in and its drop glue is
-            // special. It may move to library and have Drop impl. As
-            // a safe-guard, assert TyBox not used with TyContents.
-            assert!(!skip_dtor);
-            if !type_is_sized(bcx.tcx(), content_ty) {
-                let llval = expr::get_dataptr(bcx, v0);
-                let llbox = Load(bcx, llval);
-                let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
-                let drop_flag_not_dropped_already =
-                    ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
-                with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
-                    let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
-                    let info = expr::get_meta(bcx, v0);
-                    let info = Load(bcx, info);
-                    let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
-
-                    // `Box<ZeroSizeType>` does not allocate.
-                    let needs_free = ICmp(bcx,
-                                          llvm::IntNE,
-                                          llsize,
-                                          C_uint(bcx.ccx(), 0u64),
-                                          DebugLoc::None);
-                    with_cond(bcx, needs_free, |bcx| {
-                        trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
-                    })
-                })
-            } else {
-                let llval = v0;
-                let llbox = Load(bcx, llval);
-                let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
-                let drop_flag_not_dropped_already =
-                    ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
-                with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
-                    let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
-                    trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
-                })
-            }
-        }
-        ty::TyStruct(def, _) | ty::TyEnum(def, _) => {
-            match (def.dtor_kind(), skip_dtor) {
-                (ty::TraitDtor(true), false) => {
-                    // FIXME(16758) Since the struct is unsized, it is hard to
-                    // find the drop flag (which is at the end of the struct).
-                    // Lets just ignore the flag and pretend everything will be
-                    // OK.
-                    if type_is_sized(bcx.tcx(), t) {
-                        trans_struct_drop_flag(bcx, t, v0)
-                    } else {
-                        // Give the user a heads up that we are doing something
-                        // stupid and dangerous.
-                        bcx.sess().warn(&format!("Ignoring drop flag in destructor for {}\
-                                                 because the struct is unsized. See issue\
-                                                 #16758", t));
-                        trans_struct_drop(bcx, t, v0)
-                    }
-                }
-                (ty::TraitDtor(false), false) => {
-                    trans_struct_drop(bcx, t, v0)
-                }
-                (ty::NoDtor, _) | (_, true) => {
-                    // No dtor? Just the default case
-                    iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
-                }
-            }
-        }
-        ty::TyTrait(..) => {
-            // No support in vtable for distinguishing destroying with
-            // versus without calling Drop::drop. Assert caller is
-            // okay with always calling the Drop impl, if any.
-            assert!(!skip_dtor);
-            let data_ptr = expr::get_dataptr(bcx, v0);
-            let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0));
-            let dtor = Load(bcx, vtable_ptr);
-            Call(bcx,
-                 dtor,
-                 &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
-                 None,
-                 DebugLoc::None);
-            bcx
-        }
-        _ => {
-            if bcx.fcx.type_needs_drop(t) {
-                iter_structural_ty(bcx,
-                                   v0,
-                                   t,
-                                   |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
-            } else {
-                bcx
-            }
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/inline.rs b/src/librustc_trans/trans/inline.rs
deleted file mode 100644 (file)
index 4c64715..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::{AvailableExternallyLinkage, InternalLinkage, SetLinkage};
-use middle::cstore::{CrateStore, FoundAst, InlinedItem};
-use middle::def_id::DefId;
-use middle::subst::Substs;
-use trans::base::{push_ctxt, trans_item, get_item_val, trans_fn};
-use trans::common::*;
-
-use rustc::dep_graph::DepNode;
-use rustc_front::hir;
-
-fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
-    debug!("instantiate_inline({:?})", fn_id);
-    let _icx = push_ctxt("instantiate_inline");
-    let _task = ccx.tcx().dep_graph.in_task(DepNode::TransInlinedItem(fn_id));
-
-    match ccx.external().borrow().get(&fn_id) {
-        Some(&Some(node_id)) => {
-            // Already inline
-            debug!("instantiate_inline({}): already inline as node id {}",
-                   ccx.tcx().item_path_str(fn_id), node_id);
-            let node_def_id = ccx.tcx().map.local_def_id(node_id);
-            return Some(node_def_id);
-        }
-        Some(&None) => {
-            return None; // Not inlinable
-        }
-        None => {
-            // Not seen yet
-        }
-    }
-
-    let inlined = ccx.tcx().sess.cstore.maybe_get_item_ast(ccx.tcx(), fn_id);
-    let inline_id = match inlined {
-        FoundAst::NotFound => {
-            ccx.external().borrow_mut().insert(fn_id, None);
-            return None;
-        }
-        FoundAst::Found(&InlinedItem::Item(ref item)) => {
-            ccx.external().borrow_mut().insert(fn_id, Some(item.id));
-            ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
-
-            ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
-            trans_item(ccx, item);
-
-            let linkage = match item.node {
-                hir::ItemFn(_, _, _, _, ref generics, _) => {
-                    if generics.is_type_parameterized() {
-                        // Generics have no symbol, so they can't be given any
-                        // linkage.
-                        None
-                    } else {
-                        if ccx.sess().opts.cg.codegen_units == 1 {
-                            // We could use AvailableExternallyLinkage here,
-                            // but InternalLinkage allows LLVM to optimize more
-                            // aggressively (at the cost of sometimes
-                            // duplicating code).
-                            Some(InternalLinkage)
-                        } else {
-                            // With multiple compilation units, duplicated code
-                            // is more of a problem.  Also, `codegen_units > 1`
-                            // means the user is okay with losing some
-                            // performance.
-                            Some(AvailableExternallyLinkage)
-                        }
-                    }
-                }
-                hir::ItemConst(..) => None,
-                _ => unreachable!(),
-            };
-
-            match linkage {
-                Some(linkage) => {
-                    let g = get_item_val(ccx, item.id);
-                    SetLinkage(g, linkage);
-                }
-                None => {}
-            }
-
-            item.id
-        }
-        FoundAst::Found(&InlinedItem::Foreign(ref item)) => {
-            ccx.external().borrow_mut().insert(fn_id, Some(item.id));
-            ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
-            item.id
-        }
-        FoundAst::FoundParent(parent_id, &InlinedItem::Item(ref item)) => {
-            ccx.external().borrow_mut().insert(parent_id, Some(item.id));
-            ccx.external_srcs().borrow_mut().insert(item.id, parent_id);
-
-            let mut my_id = 0;
-            match item.node {
-                hir::ItemEnum(ref ast_def, _) => {
-                    let ast_vs = &ast_def.variants;
-                    let ty_vs = &ccx.tcx().lookup_adt_def(parent_id).variants;
-                    assert_eq!(ast_vs.len(), ty_vs.len());
-                    for (ast_v, ty_v) in ast_vs.iter().zip(ty_vs.iter()) {
-                        if ty_v.did == fn_id { my_id = ast_v.node.data.id(); }
-                        ccx.external().borrow_mut().insert(ty_v.did, Some(ast_v.node.data.id()));
-                    }
-                }
-                hir::ItemStruct(ref struct_def, _) => {
-                    if struct_def.is_struct() {
-                        ccx.sess().bug("instantiate_inline: called on a \
-                                                                 non-tuple struct")
-                    } else {
-                        ccx.external().borrow_mut().insert(fn_id, Some(struct_def.id()));
-                        my_id = struct_def.id();
-                    }
-                }
-                _ => ccx.sess().bug("instantiate_inline: item has a \
-                                 non-enum, non-struct parent")
-            }
-            trans_item(ccx, &item);
-            my_id
-        }
-        FoundAst::FoundParent(_, _) => {
-            ccx.sess().bug("maybe_get_item_ast returned a FoundParent \
-                            with a non-item parent");
-        }
-        FoundAst::Found(&InlinedItem::TraitItem(_, ref trait_item)) => {
-            ccx.external().borrow_mut().insert(fn_id, Some(trait_item.id));
-            ccx.external_srcs().borrow_mut().insert(trait_item.id, fn_id);
-
-            ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
-
-            // Associated consts already have to be evaluated in `typeck`, so
-            // the logic to do that already exists in `middle`. In order to
-            // reuse that code, it needs to be able to look up the traits for
-            // inlined items.
-            let ty_trait_item = ccx.tcx().impl_or_trait_item(fn_id).clone();
-            let trait_item_def_id = ccx.tcx().map.local_def_id(trait_item.id);
-            ccx.tcx().impl_or_trait_items.borrow_mut()
-                     .insert(trait_item_def_id, ty_trait_item);
-
-            // If this is a default method, we can't look up the
-            // impl type. But we aren't going to translate anyways, so
-            // don't.
-            trait_item.id
-        }
-        FoundAst::Found(&InlinedItem::ImplItem(impl_did, ref impl_item)) => {
-            ccx.external().borrow_mut().insert(fn_id, Some(impl_item.id));
-            ccx.external_srcs().borrow_mut().insert(impl_item.id, fn_id);
-
-            ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
-
-            // Translate monomorphic impl methods immediately.
-            if let hir::ImplItemKind::Method(ref sig, ref body) = impl_item.node {
-                let impl_tpt = ccx.tcx().lookup_item_type(impl_did);
-                if impl_tpt.generics.types.is_empty() &&
-                        sig.generics.ty_params.is_empty() {
-                    let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
-                    let llfn = get_item_val(ccx, impl_item.id);
-                    trans_fn(ccx,
-                             &sig.decl,
-                             body,
-                             llfn,
-                             empty_substs,
-                             impl_item.id,
-                             &impl_item.attrs);
-                    // See linkage comments on items.
-                    if ccx.sess().opts.cg.codegen_units == 1 {
-                        SetLinkage(llfn, InternalLinkage);
-                    } else {
-                        SetLinkage(llfn, AvailableExternallyLinkage);
-                    }
-                }
-            }
-
-            impl_item.id
-        }
-    };
-
-    let inline_def_id = ccx.tcx().map.local_def_id(inline_id);
-    Some(inline_def_id)
-}
-
-pub fn get_local_instance(ccx: &CrateContext, fn_id: DefId)
-    -> Option<DefId> {
-    if let Some(_) = ccx.tcx().map.as_local_node_id(fn_id) {
-        Some(fn_id)
-    } else {
-        instantiate_inline(ccx, fn_id)
-    }
-}
-
-pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> DefId {
-    get_local_instance(ccx, fn_id).unwrap_or(fn_id)
-}
diff --git a/src/librustc_trans/trans/intrinsic.rs b/src/librustc_trans/trans/intrinsic.rs
deleted file mode 100644 (file)
index b7b520f..0000000
+++ /dev/null
@@ -1,1699 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_upper_case_globals)]
-
-use arena::TypedArena;
-use intrinsics::{self, Intrinsic};
-use libc;
-use llvm;
-use llvm::{ValueRef, TypeKind};
-use middle::infer;
-use middle::subst;
-use middle::subst::FnSpace;
-use trans::adt;
-use trans::attributes;
-use trans::base::*;
-use trans::build::*;
-use trans::callee;
-use trans::cleanup;
-use trans::cleanup::CleanupMethods;
-use trans::common::*;
-use trans::consts;
-use trans::datum::*;
-use trans::debuginfo::DebugLoc;
-use trans::declare;
-use trans::expr;
-use trans::glue;
-use trans::type_of;
-use trans::machine;
-use trans::type_::Type;
-use middle::ty::{self, Ty, TypeFoldable};
-use trans::Disr;
-use middle::subst::Substs;
-use rustc::dep_graph::DepNode;
-use rustc_front::hir;
-use syntax::abi::Abi;
-use syntax::ast;
-use syntax::ptr::P;
-use syntax::parse::token;
-
-use rustc::session::Session;
-use syntax::codemap::Span;
-
-use std::cmp::Ordering;
-
-pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Option<ValueRef> {
-    let name = match &*item.name.as_str() {
-        "sqrtf32" => "llvm.sqrt.f32",
-        "sqrtf64" => "llvm.sqrt.f64",
-        "powif32" => "llvm.powi.f32",
-        "powif64" => "llvm.powi.f64",
-        "sinf32" => "llvm.sin.f32",
-        "sinf64" => "llvm.sin.f64",
-        "cosf32" => "llvm.cos.f32",
-        "cosf64" => "llvm.cos.f64",
-        "powf32" => "llvm.pow.f32",
-        "powf64" => "llvm.pow.f64",
-        "expf32" => "llvm.exp.f32",
-        "expf64" => "llvm.exp.f64",
-        "exp2f32" => "llvm.exp2.f32",
-        "exp2f64" => "llvm.exp2.f64",
-        "logf32" => "llvm.log.f32",
-        "logf64" => "llvm.log.f64",
-        "log10f32" => "llvm.log10.f32",
-        "log10f64" => "llvm.log10.f64",
-        "log2f32" => "llvm.log2.f32",
-        "log2f64" => "llvm.log2.f64",
-        "fmaf32" => "llvm.fma.f32",
-        "fmaf64" => "llvm.fma.f64",
-        "fabsf32" => "llvm.fabs.f32",
-        "fabsf64" => "llvm.fabs.f64",
-        "copysignf32" => "llvm.copysign.f32",
-        "copysignf64" => "llvm.copysign.f64",
-        "floorf32" => "llvm.floor.f32",
-        "floorf64" => "llvm.floor.f64",
-        "ceilf32" => "llvm.ceil.f32",
-        "ceilf64" => "llvm.ceil.f64",
-        "truncf32" => "llvm.trunc.f32",
-        "truncf64" => "llvm.trunc.f64",
-        "rintf32" => "llvm.rint.f32",
-        "rintf64" => "llvm.rint.f64",
-        "nearbyintf32" => "llvm.nearbyint.f32",
-        "nearbyintf64" => "llvm.nearbyint.f64",
-        "roundf32" => "llvm.round.f32",
-        "roundf64" => "llvm.round.f64",
-        "assume" => "llvm.assume",
-        _ => return None
-    };
-    Some(ccx.get_intrinsic(&name))
-}
-
-pub fn span_transmute_size_error(a: &Session, b: Span, msg: &str) {
-    span_err!(a, b, E0512, "{}", msg);
-}
-
-/// Performs late verification that intrinsics are used correctly. At present,
-/// the only intrinsic that needs such verification is `transmute`.
-pub fn check_intrinsics(ccx: &CrateContext) {
-    let _task = ccx.tcx().dep_graph.in_task(DepNode::IntrinsicUseCheck);
-    let mut last_failing_id = None;
-    for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
-        // Sometimes, a single call to transmute will push multiple
-        // type pairs to test in order to exhaustively test the
-        // possibility around a type parameter. If one of those fails,
-        // there is no sense reporting errors on the others.
-        if last_failing_id == Some(transmute_restriction.id) {
-            continue;
-        }
-
-        debug!("transmute_restriction: {:?}", transmute_restriction);
-
-        assert!(!transmute_restriction.substituted_from.has_param_types());
-        assert!(!transmute_restriction.substituted_to.has_param_types());
-
-        let llfromtype = type_of::sizing_type_of(ccx,
-                                                 transmute_restriction.substituted_from);
-        let lltotype = type_of::sizing_type_of(ccx,
-                                               transmute_restriction.substituted_to);
-        let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
-        let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
-        if from_type_size != to_type_size {
-            last_failing_id = Some(transmute_restriction.id);
-
-            if transmute_restriction.original_from != transmute_restriction.substituted_from {
-                span_transmute_size_error(ccx.sess(), transmute_restriction.span,
-                    &format!("transmute called with differently sized types: \
-                              {} (could be {} bit{}) to {} (could be {} bit{})",
-                             transmute_restriction.original_from,
-                             from_type_size as usize,
-                             if from_type_size == 1 {""} else {"s"},
-                             transmute_restriction.original_to,
-                             to_type_size as usize,
-                             if to_type_size == 1 {""} else {"s"}));
-            } else {
-                span_transmute_size_error(ccx.sess(), transmute_restriction.span,
-                    &format!("transmute called with differently sized types: \
-                              {} ({} bit{}) to {} ({} bit{})",
-                             transmute_restriction.original_from,
-                             from_type_size as usize,
-                             if from_type_size == 1 {""} else {"s"},
-                             transmute_restriction.original_to,
-                             to_type_size as usize,
-                             if to_type_size == 1 {""} else {"s"}));
-            }
-        }
-    }
-    ccx.sess().abort_if_errors();
-}
-
-/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
-/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
-/// add them to librustc_trans/trans/context.rs
-pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
-                                            node: ast::NodeId,
-                                            callee_ty: Ty<'tcx>,
-                                            cleanup_scope: cleanup::CustomScopeIndex,
-                                            args: callee::CallArgs<'a, 'tcx>,
-                                            dest: expr::Dest,
-                                            substs: subst::Substs<'tcx>,
-                                            call_info: NodeIdAndSpan)
-                                            -> Result<'blk, 'tcx> {
-    let fcx = bcx.fcx;
-    let ccx = fcx.ccx;
-    let tcx = bcx.tcx();
-
-    let _icx = push_ctxt("trans_intrinsic_call");
-
-    let sig = ccx.tcx().erase_late_bound_regions(callee_ty.fn_sig());
-    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
-    let arg_tys = sig.inputs;
-    let ret_ty = sig.output;
-    let foreign_item = tcx.map.expect_foreign_item(node);
-    let name = foreign_item.name.as_str();
-
-    // For `transmute` we can just trans the input expr directly into dest
-    if name == "transmute" {
-        let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
-        match args {
-            callee::ArgExprs(arg_exprs) => {
-                assert_eq!(arg_exprs.len(), 1);
-
-                let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
-                                           *substs.types.get(FnSpace, 1));
-                let llintype = type_of::type_of(ccx, in_type);
-                let llouttype = type_of::type_of(ccx, out_type);
-
-                let in_type_size = machine::llbitsize_of_real(ccx, llintype);
-                let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
-
-                // This should be caught by the intrinsicck pass
-                assert_eq!(in_type_size, out_type_size);
-
-                let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
-                    use llvm::TypeKind::*;
-                    match llkind {
-                        Half | Float | Double | X86_FP80 | FP128 |
-                            PPC_FP128 | Integer | Vector | X86_MMX => true,
-                        _ => false
-                    }
-                };
-
-                // An approximation to which types can be directly cast via
-                // LLVM's bitcast.  This doesn't cover pointer -> pointer casts,
-                // but does, importantly, cover SIMD types.
-                let in_kind = llintype.kind();
-                let ret_kind = llret_ty.kind();
-                let bitcast_compatible =
-                    (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
-                        in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
-                    };
-
-                let dest = if bitcast_compatible {
-                    // if we're here, the type is scalar-like (a primitive, a
-                    // SIMD type or a pointer), and so can be handled as a
-                    // by-value ValueRef and can also be directly bitcast to the
-                    // target type.  Doing this special case makes conversions
-                    // like `u32x4` -> `u64x2` much nicer for LLVM and so more
-                    // efficient (these are done efficiently implicitly in C
-                    // with the `__m128i` type and so this means Rust doesn't
-                    // lose out there).
-                    let expr = &arg_exprs[0];
-                    let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
-                    let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
-                    let val = if datum.kind.is_by_ref() {
-                        load_ty(bcx, datum.val, datum.ty)
-                    } else {
-                        from_arg_ty(bcx, datum.val, datum.ty)
-                    };
-
-                    let cast_val = BitCast(bcx, val, llret_ty);
-
-                    match dest {
-                        expr::SaveIn(d) => {
-                            // this often occurs in a sequence like `Store(val,
-                            // d); val2 = Load(d)`, so disappears easily.
-                            Store(bcx, cast_val, d);
-                        }
-                        expr::Ignore => {}
-                    }
-                    dest
-                } else {
-                    // The types are too complicated to do with a by-value
-                    // bitcast, so pointer cast instead. We need to cast the
-                    // dest so the types work out.
-                    let dest = match dest {
-                        expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
-                        expr::Ignore => expr::Ignore
-                    };
-                    bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
-                    dest
-                };
-
-                fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
-                fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-
-                return match dest {
-                    expr::SaveIn(d) => Result::new(bcx, d),
-                    expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
-                };
-
-            }
-
-            _ => {
-                ccx.sess().bug("expected expr as argument for transmute");
-            }
-        }
-    }
-
-    // For `move_val_init` we can evaluate the destination address
-    // (the first argument) and then trans the source value (the
-    // second argument) directly into the resulting destination
-    // address.
-    if name == "move_val_init" {
-        if let callee::ArgExprs(ref exprs) = args {
-            let (dest_expr, source_expr) = if exprs.len() != 2 {
-                ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
-            } else {
-                (&exprs[0], &exprs[1])
-            };
-
-            // evaluate destination address
-            let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
-            let dest_datum = unpack_datum!(
-                bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
-            let dest_datum = unpack_datum!(
-                bcx, dest_datum.to_appropriate_datum(bcx));
-
-            // `expr::trans_into(bcx, expr, dest)` is equiv to
-            //
-            //    `trans(bcx, expr).store_to_dest(dest)`,
-            //
-            // which for `dest == expr::SaveIn(addr)`, is equivalent to:
-            //
-            //    `trans(bcx, expr).store_to(bcx, addr)`.
-            let lldest = expr::Dest::SaveIn(dest_datum.val);
-            bcx = expr::trans_into(bcx, source_expr, lldest);
-
-            let llresult = C_nil(ccx);
-            fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-
-            return Result::new(bcx, llresult);
-        } else {
-            ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
-        }
-    }
-
-    let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
-
-    // For `try` we need some custom control flow
-    if &name[..] == "try" {
-        if let callee::ArgExprs(ref exprs) = args {
-            let (func, data, local_ptr) = if exprs.len() != 3 {
-                ccx.sess().bug("expected three exprs as arguments for \
-                                `try` intrinsic");
-            } else {
-                (&exprs[0], &exprs[1], &exprs[2])
-            };
-
-            // translate arguments
-            let func = unpack_datum!(bcx, expr::trans(bcx, func));
-            let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func"));
-            let data = unpack_datum!(bcx, expr::trans(bcx, data));
-            let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data"));
-            let local_ptr = unpack_datum!(bcx, expr::trans(bcx, local_ptr));
-            let local_ptr = local_ptr.to_rvalue_datum(bcx, "local_ptr");
-            let local_ptr = unpack_datum!(bcx, local_ptr);
-
-            let dest = match dest {
-                expr::SaveIn(d) => d,
-                expr::Ignore => alloc_ty(bcx, tcx.mk_mut_ptr(tcx.types.i8),
-                                         "try_result"),
-            };
-
-            // do the invoke
-            bcx = try_intrinsic(bcx, func.val, data.val, local_ptr.val, dest,
-                                call_debug_location);
-
-            fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-            return Result::new(bcx, dest);
-        } else {
-            ccx.sess().bug("expected two exprs as arguments for \
-                            `try` intrinsic");
-        }
-    }
-
-    // save the actual AST arguments for later (some places need to do
-    // const-evaluation on them)
-    let expr_arguments = match args {
-        callee::ArgExprs(args) => Some(args),
-        _ => None,
-    };
-
-    // Push the arguments.
-    let mut llargs = Vec::new();
-    bcx = callee::trans_args(bcx,
-                             args,
-                             callee_ty,
-                             &mut llargs,
-                             cleanup::CustomScope(cleanup_scope),
-                             false,
-                             Abi::RustIntrinsic);
-
-    fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
-
-    // These are the only intrinsic functions that diverge.
-    if name == "abort" {
-        let llfn = ccx.get_intrinsic(&("llvm.trap"));
-        Call(bcx, llfn, &[], None, call_debug_location);
-        fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-        Unreachable(bcx);
-        return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
-    } else if &name[..] == "unreachable" {
-        fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-        Unreachable(bcx);
-        return Result::new(bcx, C_nil(ccx));
-    }
-
-    let ret_ty = match ret_ty {
-        ty::FnConverging(ret_ty) => ret_ty,
-        ty::FnDiverging => unreachable!()
-    };
-
-    let llret_ty = type_of::type_of(ccx, ret_ty);
-
-    // Get location to store the result. If the user does
-    // not care about the result, just make a stack slot
-    let llresult = match dest {
-        expr::SaveIn(d) => d,
-        expr::Ignore => {
-            if !type_is_zero_size(ccx, ret_ty) {
-                let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
-                call_lifetime_start(bcx, llresult);
-                llresult
-            } else {
-                C_undef(llret_ty.ptr_to())
-            }
-        }
-    };
-
-    let simple = get_simple_intrinsic(ccx, &foreign_item);
-    let llval = match (simple, &*name) {
-        (Some(llfn), _) => {
-            Call(bcx, llfn, &llargs, None, call_debug_location)
-        }
-        (_, "breakpoint") => {
-            let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
-            Call(bcx, llfn, &[], None, call_debug_location)
-        }
-        (_, "size_of") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            let lltp_ty = type_of::type_of(ccx, tp_ty);
-            C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
-        }
-        (_, "size_of_val") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            if !type_is_sized(tcx, tp_ty) {
-                let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
-                llsize
-            } else {
-                let lltp_ty = type_of::type_of(ccx, tp_ty);
-                C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
-            }
-        }
-        (_, "min_align_of") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            C_uint(ccx, type_of::align_of(ccx, tp_ty))
-        }
-        (_, "min_align_of_val") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            if !type_is_sized(tcx, tp_ty) {
-                let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
-                llalign
-            } else {
-                C_uint(ccx, type_of::align_of(ccx, tp_ty))
-            }
-        }
-        (_, "pref_align_of") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            let lltp_ty = type_of::type_of(ccx, tp_ty);
-            C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
-        }
-        (_, "drop_in_place") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            let ptr = if type_is_sized(tcx, tp_ty) {
-                llargs[0]
-            } else {
-                let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
-                Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
-                Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
-                fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
-                scratch.val
-            };
-            glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
-            C_nil(ccx)
-        }
-        (_, "type_name") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
-            C_str_slice(ccx, ty_name)
-        }
-        (_, "type_id") => {
-            let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
-                                                        &ccx.link_meta().crate_hash);
-            C_u64(ccx, hash)
-        }
-        (_, "init_dropped") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            if !return_type_is_void(ccx, tp_ty) {
-                drop_done_fill_mem(bcx, llresult, tp_ty);
-            }
-            C_nil(ccx)
-        }
-        (_, "init") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            if !return_type_is_void(ccx, tp_ty) {
-                // Just zero out the stack slot. (See comment on base::memzero for explanation)
-                init_zero_mem(bcx, llresult, tp_ty);
-            }
-            C_nil(ccx)
-        }
-        // Effectively no-ops
-        (_, "uninit") | (_, "forget") => {
-            C_nil(ccx)
-        }
-        (_, "needs_drop") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-
-            C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
-        }
-        (_, "offset") => {
-            let ptr = llargs[0];
-            let offset = llargs[1];
-            InBoundsGEP(bcx, ptr, &[offset])
-        }
-        (_, "arith_offset") => {
-            let ptr = llargs[0];
-            let offset = llargs[1];
-            GEP(bcx, ptr, &[offset])
-        }
-
-        (_, "copy_nonoverlapping") => {
-            copy_intrinsic(bcx,
-                           false,
-                           false,
-                           *substs.types.get(FnSpace, 0),
-                           llargs[1],
-                           llargs[0],
-                           llargs[2],
-                           call_debug_location)
-        }
-        (_, "copy") => {
-            copy_intrinsic(bcx,
-                           true,
-                           false,
-                           *substs.types.get(FnSpace, 0),
-                           llargs[1],
-                           llargs[0],
-                           llargs[2],
-                           call_debug_location)
-        }
-        (_, "write_bytes") => {
-            memset_intrinsic(bcx,
-                             false,
-                             *substs.types.get(FnSpace, 0),
-                             llargs[0],
-                             llargs[1],
-                             llargs[2],
-                             call_debug_location)
-        }
-
-        (_, "volatile_copy_nonoverlapping_memory") => {
-            copy_intrinsic(bcx,
-                           false,
-                           true,
-                           *substs.types.get(FnSpace, 0),
-                           llargs[0],
-                           llargs[1],
-                           llargs[2],
-                           call_debug_location)
-        }
-        (_, "volatile_copy_memory") => {
-            copy_intrinsic(bcx,
-                           true,
-                           true,
-                           *substs.types.get(FnSpace, 0),
-                           llargs[0],
-                           llargs[1],
-                           llargs[2],
-                           call_debug_location)
-        }
-        (_, "volatile_set_memory") => {
-            memset_intrinsic(bcx,
-                             true,
-                             *substs.types.get(FnSpace, 0),
-                             llargs[0],
-                             llargs[1],
-                             llargs[2],
-                             call_debug_location)
-        }
-        (_, "volatile_load") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
-            let load = VolatileLoad(bcx, ptr);
-            unsafe {
-                llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
-            }
-            to_arg_ty(bcx, load, tp_ty)
-        },
-        (_, "volatile_store") => {
-            let tp_ty = *substs.types.get(FnSpace, 0);
-            let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
-            let val = from_arg_ty(bcx, llargs[1], tp_ty);
-            let store = VolatileStore(bcx, val, ptr);
-            unsafe {
-                llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
-            }
-            C_nil(ccx)
-        },
-
-        (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
-        (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
-        (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
-        (_, "unchecked_div") | (_, "unchecked_rem") => {
-            let sty = &arg_tys[0].sty;
-            match int_type_width_signed(sty, ccx) {
-                Some((width, signed)) =>
-                    match &*name {
-                        "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
-                                                        llargs[0], call_debug_location),
-                        "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
-                                                        llargs[0], call_debug_location),
-                        "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
-                                        &llargs, None, call_debug_location),
-                        "bswap" => {
-                            if width == 8 {
-                                llargs[0] // byte swap a u8/i8 is just a no-op
-                            } else {
-                                Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
-                                        &llargs, None, call_debug_location)
-                            }
-                        }
-                        "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
-                            let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
-                                                    if signed { 's' } else { 'u' },
-                                                    &name[..3], width);
-                            with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
-                                                    call_debug_location)
-                        },
-                        "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
-                        "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
-                        "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
-                        "unchecked_div" =>
-                            if signed {
-                                SDiv(bcx, llargs[0], llargs[1], call_debug_location)
-                            } else {
-                                UDiv(bcx, llargs[0], llargs[1], call_debug_location)
-                            },
-                        "unchecked_rem" =>
-                            if signed {
-                                SRem(bcx, llargs[0], llargs[1], call_debug_location)
-                            } else {
-                                URem(bcx, llargs[0], llargs[1], call_debug_location)
-                            },
-                        _ => unreachable!(),
-                    },
-                None => {
-                    span_invalid_monomorphization_error(
-                        tcx.sess, call_info.span,
-                        &format!("invalid monomorphization of `{}` intrinsic: \
-                                  expected basic integer type, found `{}`", name, sty));
-                        C_null(llret_ty)
-                }
-            }
-
-        },
-
-
-        (_, "return_address") => {
-            if !fcx.caller_expects_out_pointer {
-                span_err!(tcx.sess, call_info.span, E0510,
-                          "invalid use of `return_address` intrinsic: function \
-                           does not use out pointer");
-                C_null(Type::i8p(ccx))
-            } else {
-                PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
-            }
-        }
-
-        (_, "discriminant_value") => {
-            let val_ty = substs.types.get(FnSpace, 0);
-            match val_ty.sty {
-                ty::TyEnum(..) => {
-                    let repr = adt::represent_type(ccx, *val_ty);
-                    adt::trans_get_discr(bcx, &repr, llargs[0],
-                                         Some(llret_ty), true)
-                }
-                _ => C_null(llret_ty)
-            }
-        }
-        (_, name) if name.starts_with("simd_") => {
-            generic_simd_intrinsic(bcx, name,
-                                   substs,
-                                   callee_ty,
-                                   expr_arguments,
-                                   &llargs,
-                                   ret_ty, llret_ty,
-                                   call_debug_location,
-                                   call_info)
-        }
-        // This requires that atomic intrinsics follow a specific naming pattern:
-        // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
-        (_, name) if name.starts_with("atomic_") => {
-            let split: Vec<&str> = name.split('_').collect();
-
-            let (order, failorder) = match split.len() {
-                2 => (llvm::SequentiallyConsistent, llvm::SequentiallyConsistent),
-                3 => match split[2] {
-                    "unordered" => (llvm::Unordered, llvm::Unordered),
-                    "relaxed" => (llvm::Monotonic, llvm::Monotonic),
-                    "acq"     => (llvm::Acquire, llvm::Acquire),
-                    "rel"     => (llvm::Release, llvm::Monotonic),
-                    "acqrel"  => (llvm::AcquireRelease, llvm::Acquire),
-                    "failrelaxed" if split[1] == "cxchg" || split[1] == "cxchgweak" =>
-                        (llvm::SequentiallyConsistent, llvm::Monotonic),
-                    "failacq" if split[1] == "cxchg" || split[1] == "cxchgweak" =>
-                        (llvm::SequentiallyConsistent, llvm::Acquire),
-                    _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
-                },
-                4 => match (split[2], split[3]) {
-                    ("acq", "failrelaxed") if split[1] == "cxchg" || split[1] == "cxchgweak" =>
-                        (llvm::Acquire, llvm::Monotonic),
-                    ("acqrel", "failrelaxed") if split[1] == "cxchg" || split[1] == "cxchgweak" =>
-                        (llvm::AcquireRelease, llvm::Monotonic),
-                    _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
-                },
-                _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
-            };
-
-            match split[1] {
-                "cxchg" => {
-                    let tp_ty = *substs.types.get(FnSpace, 0);
-                    let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
-                    let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
-                    let src = from_arg_ty(bcx, llargs[2], tp_ty);
-                    let res = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::False);
-                    ExtractValue(bcx, res, 0)
-                }
-
-                "cxchgweak" => {
-                    let tp_ty = *substs.types.get(FnSpace, 0);
-                    let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
-                    let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
-                    let src = from_arg_ty(bcx, llargs[2], tp_ty);
-                    let val = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::True);
-                    let result = ExtractValue(bcx, val, 0);
-                    let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
-                    Store(bcx, result, StructGEP(bcx, llresult, 0));
-                    Store(bcx, success, StructGEP(bcx, llresult, 1));
-                    C_nil(ccx)
-                }
-
-                "load" => {
-                    let tp_ty = *substs.types.get(FnSpace, 0);
-                    let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
-                    to_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
-                }
-                "store" => {
-                    let tp_ty = *substs.types.get(FnSpace, 0);
-                    let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
-                    let val = from_arg_ty(bcx, llargs[1], tp_ty);
-                    AtomicStore(bcx, val, ptr, order);
-                    C_nil(ccx)
-                }
-
-                "fence" => {
-                    AtomicFence(bcx, order, llvm::CrossThread);
-                    C_nil(ccx)
-                }
-
-                "singlethreadfence" => {
-                    AtomicFence(bcx, order, llvm::SingleThread);
-                    C_nil(ccx)
-                }
-
-                // These are all AtomicRMW ops
-                op => {
-                    let atom_op = match op {
-                        "xchg"  => llvm::AtomicXchg,
-                        "xadd"  => llvm::AtomicAdd,
-                        "xsub"  => llvm::AtomicSub,
-                        "and"   => llvm::AtomicAnd,
-                        "nand"  => llvm::AtomicNand,
-                        "or"    => llvm::AtomicOr,
-                        "xor"   => llvm::AtomicXor,
-                        "max"   => llvm::AtomicMax,
-                        "min"   => llvm::AtomicMin,
-                        "umax"  => llvm::AtomicUMax,
-                        "umin"  => llvm::AtomicUMin,
-                        _ => ccx.sess().fatal("unknown atomic operation")
-                    };
-
-                    let tp_ty = *substs.types.get(FnSpace, 0);
-                    let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
-                    let val = from_arg_ty(bcx, llargs[1], tp_ty);
-                    AtomicRMW(bcx, atom_op, ptr, val, order)
-                }
-            }
-
-        }
-
-        (_, _) => {
-            let intr = match Intrinsic::find(tcx, &name) {
-                Some(intr) => intr,
-                None => ccx.sess().span_bug(foreign_item.span,
-                                            &format!("unknown intrinsic '{}'", name)),
-            };
-            fn one<T>(x: Vec<T>) -> T {
-                assert_eq!(x.len(), 1);
-                x.into_iter().next().unwrap()
-            }
-            fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
-                          any_changes_needed: &mut bool) -> Vec<Type> {
-                use intrinsics::Type::*;
-                match *t {
-                    Void => vec![Type::void(ccx)],
-                    Integer(_signed, width, llvm_width) => {
-                        *any_changes_needed |= width != llvm_width;
-                        vec![Type::ix(ccx, llvm_width as u64)]
-                    }
-                    Float(x) => {
-                        match x {
-                            32 => vec![Type::f32(ccx)],
-                            64 => vec![Type::f64(ccx)],
-                            _ => unreachable!()
-                        }
-                    }
-                    Pointer(ref t, ref llvm_elem, _const) => {
-                        *any_changes_needed |= llvm_elem.is_some();
-
-                        let t = llvm_elem.as_ref().unwrap_or(t);
-                        let elem = one(ty_to_type(ccx, t,
-                                                  any_changes_needed));
-                        vec![elem.ptr_to()]
-                    }
-                    Vector(ref t, ref llvm_elem, length) => {
-                        *any_changes_needed |= llvm_elem.is_some();
-
-                        let t = llvm_elem.as_ref().unwrap_or(t);
-                        let elem = one(ty_to_type(ccx, t,
-                                                  any_changes_needed));
-                        vec![Type::vector(&elem,
-                                          length as u64)]
-                    }
-                    Aggregate(false, ref contents) => {
-                        let elems = contents.iter()
-                                            .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
-                                            .collect::<Vec<_>>();
-                        vec![Type::struct_(ccx, &elems, false)]
-                    }
-                    Aggregate(true, ref contents) => {
-                        *any_changes_needed = true;
-                        contents.iter()
-                                .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
-                                .collect()
-                    }
-                }
-            }
-
-            // This allows an argument list like `foo, (bar, baz),
-            // qux` to be converted into `foo, bar, baz, qux`, integer
-            // arguments to be truncated as needed and pointers to be
-            // cast.
-            fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                            t: &intrinsics::Type,
-                                            arg_type: Ty<'tcx>,
-                                            llarg: ValueRef)
-                                            -> Vec<ValueRef>
-            {
-                match *t {
-                    intrinsics::Type::Aggregate(true, ref contents) => {
-                        // We found a tuple that needs squishing! So
-                        // run over the tuple and load each field.
-                        //
-                        // This assumes the type is "simple", i.e. no
-                        // destructors, and the contents are SIMD
-                        // etc.
-                        assert!(!bcx.fcx.type_needs_drop(arg_type));
-
-                        let repr = adt::represent_type(bcx.ccx(), arg_type);
-                        let repr_ptr = &repr;
-                        let arg = adt::MaybeSizedValue::sized(llarg);
-                        (0..contents.len())
-                            .map(|i| {
-                                Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
-                            })
-                            .collect()
-                    }
-                    intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
-                        let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
-                        vec![PointerCast(bcx, llarg,
-                                         llvm_elem.ptr_to())]
-                    }
-                    intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
-                        let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
-                        vec![BitCast(bcx, llarg,
-                                     Type::vector(&llvm_elem, length as u64))]
-                    }
-                    intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
-                        // the LLVM intrinsic uses a smaller integer
-                        // size than the C intrinsic's signature, so
-                        // we have to trim it down here.
-                        vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
-                    }
-                    _ => vec![llarg],
-                }
-            }
-
-
-            let mut any_changes_needed = false;
-            let inputs = intr.inputs.iter()
-                                    .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
-                                    .collect::<Vec<_>>();
-
-            let mut out_changes = false;
-            let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
-            // outputting a flattened aggregate is nonsense
-            assert!(!out_changes);
-
-            let llargs = if !any_changes_needed {
-                // no aggregates to flatten, so no change needed
-                llargs
-            } else {
-                // there are some aggregates that need to be flattened
-                // in the LLVM call, so we need to run over the types
-                // again to find them and extract the arguments
-                intr.inputs.iter()
-                           .zip(&llargs)
-                           .zip(&arg_tys)
-                           .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
-                           .collect()
-            };
-            assert_eq!(inputs.len(), llargs.len());
-
-            let val = match intr.definition {
-                intrinsics::IntrinsicDef::Named(name) => {
-                    let f = declare::declare_cfn(ccx,
-                                                 name,
-                                                 Type::func(&inputs, &outputs),
-                                                 tcx.mk_nil());
-                    Call(bcx, f, &llargs, None, call_debug_location)
-                }
-            };
-
-            match intr.output {
-                intrinsics::Type::Aggregate(flatten, ref elems) => {
-                    // the output is a tuple so we need to munge it properly
-                    assert!(!flatten);
-
-                    for i in 0..elems.len() {
-                        let val = ExtractValue(bcx, val, i);
-                        Store(bcx, val, StructGEP(bcx, llresult, i));
-                    }
-                    C_nil(ccx)
-                }
-                _ => val,
-            }
-        }
-    };
-
-    if val_ty(llval) != Type::void(ccx) &&
-       machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
-        store_ty(bcx, llval, llresult, ret_ty);
-    }
-
-    // If we made a temporary stack slot, let's clean it up
-    match dest {
-        expr::Ignore => {
-            bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
-            call_lifetime_end(bcx, llresult);
-        }
-        expr::SaveIn(_) => {}
-    }
-
-    fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-
-    Result::new(bcx, llresult)
-}
-
-fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              allow_overlap: bool,
-                              volatile: bool,
-                              tp_ty: Ty<'tcx>,
-                              dst: ValueRef,
-                              src: ValueRef,
-                              count: ValueRef,
-                              call_debug_location: DebugLoc)
-                              -> ValueRef {
-    let ccx = bcx.ccx();
-    let lltp_ty = type_of::type_of(ccx, tp_ty);
-    let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
-    let size = machine::llsize_of(ccx, lltp_ty);
-    let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
-
-    let operation = if allow_overlap {
-        "memmove"
-    } else {
-        "memcpy"
-    };
-
-    let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
-
-    let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
-    let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
-    let llfn = ccx.get_intrinsic(&name);
-
-    Call(bcx,
-         llfn,
-         &[dst_ptr,
-           src_ptr,
-           Mul(bcx, size, count, DebugLoc::None),
-           align,
-           C_bool(ccx, volatile)],
-         None,
-         call_debug_location)
-}
-
-fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                volatile: bool,
-                                tp_ty: Ty<'tcx>,
-                                dst: ValueRef,
-                                val: ValueRef,
-                                count: ValueRef,
-                                call_debug_location: DebugLoc)
-                                -> ValueRef {
-    let ccx = bcx.ccx();
-    let lltp_ty = type_of::type_of(ccx, tp_ty);
-    let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
-    let size = machine::llsize_of(ccx, lltp_ty);
-    let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
-
-    let name = format!("llvm.memset.p0i8.i{}", int_size);
-
-    let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
-    let llfn = ccx.get_intrinsic(&name);
-
-    Call(bcx,
-         llfn,
-         &[dst_ptr,
-           val,
-           Mul(bcx, size, count, DebugLoc::None),
-           align,
-           C_bool(ccx, volatile)],
-         None,
-         call_debug_location)
-}
-
-fn count_zeros_intrinsic(bcx: Block,
-                         name: &str,
-                         val: ValueRef,
-                         call_debug_location: DebugLoc)
-                         -> ValueRef {
-    let y = C_bool(bcx.ccx(), false);
-    let llfn = bcx.ccx().get_intrinsic(&name);
-    Call(bcx, llfn, &[val, y], None, call_debug_location)
-}
-
-fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                       name: &str,
-                                       a: ValueRef,
-                                       b: ValueRef,
-                                       out: ValueRef,
-                                       call_debug_location: DebugLoc)
-                                       -> ValueRef {
-    let llfn = bcx.ccx().get_intrinsic(&name);
-
-    // Convert `i1` to a `bool`, and write it to the out parameter
-    let val = Call(bcx, llfn, &[a, b], None, call_debug_location);
-    let result = ExtractValue(bcx, val, 0);
-    let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
-    Store(bcx, result, StructGEP(bcx, out, 0));
-    Store(bcx, overflow, StructGEP(bcx, out, 1));
-
-    C_nil(bcx.ccx())
-}
-
-fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                             func: ValueRef,
-                             data: ValueRef,
-                             local_ptr: ValueRef,
-                             dest: ValueRef,
-                             dloc: DebugLoc) -> Block<'blk, 'tcx> {
-    if bcx.sess().no_landing_pads() {
-        Call(bcx, func, &[data], None, dloc);
-        Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
-        bcx
-    } else if wants_msvc_seh(bcx.sess()) {
-        trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
-    } else {
-        trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
-    }
-}
-
-// MSVC's definition of the `rust_try` function.
-//
-// This implementation uses the new exception handling instructions in LLVM
-// which have support in LLVM for SEH on MSVC targets. Although these
-// instructions are meant to work for all targets, as of the time of this
-// writing, however, LLVM does not recommend the usage of these new instructions
-// as the old ones are still more optimized.
-fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                              func: ValueRef,
-                              data: ValueRef,
-                              local_ptr: ValueRef,
-                              dest: ValueRef,
-                              dloc: DebugLoc) -> Block<'blk, 'tcx> {
-    let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
-        let ccx = bcx.ccx();
-        let dloc = DebugLoc::None;
-
-        SetPersonalityFn(bcx, bcx.fcx.eh_personality());
-
-        let normal = bcx.fcx.new_temp_block("normal");
-        let catchswitch = bcx.fcx.new_temp_block("catchswitch");
-        let catchpad = bcx.fcx.new_temp_block("catchpad");
-        let caught = bcx.fcx.new_temp_block("caught");
-
-        let func = llvm::get_param(bcx.fcx.llfn, 0);
-        let data = llvm::get_param(bcx.fcx.llfn, 1);
-        let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
-
-        // We're generating an IR snippet that looks like:
-        //
-        //   declare i32 @rust_try(%func, %data, %ptr) {
-        //      %slot = alloca i8*
-        //      call @llvm.localescape(%slot)
-        //      store %ptr, %slot
-        //      invoke %func(%data) to label %normal unwind label %catchswitch
-        //
-        //   normal:
-        //      ret i32 0
-        //
-        //   catchswitch:
-        //      %cs = catchswitch within none [%catchpad] unwind to caller
-        //
-        //   catchpad:
-        //      %tok = catchpad within %cs [%rust_try_filter]
-        //      catchret from %tok to label %caught
-        //
-        //   caught:
-        //      ret i32 1
-        //   }
-        //
-        // This structure follows the basic usage of the instructions in LLVM
-        // (see their documentation/test cases for examples), but a
-        // perhaps-surprising part here is the usage of the `localescape`
-        // intrinsic. This is used to allow the filter function (also generated
-        // here) to access variables on the stack of this intrinsic. This
-        // ability enables us to transfer information about the exception being
-        // thrown to this point, where we're catching the exception.
-        //
-        // More information can be found in libstd's seh.rs implementation.
-        let slot = Alloca(bcx, Type::i8p(ccx), "slot");
-        let localescape = ccx.get_intrinsic(&"llvm.localescape");
-        Call(bcx, localescape, &[slot], None, dloc);
-        Store(bcx, local_ptr, slot);
-        Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, None, dloc);
-
-        Ret(normal, C_i32(ccx, 0), dloc);
-
-        let cs = CatchSwitch(catchswitch, None, None, 1);
-        AddHandler(catchswitch, cs, catchpad.llbb);
-
-        let filter = generate_filter_fn(bcx.fcx, bcx.fcx.llfn);
-        let filter = BitCast(catchpad, filter, Type::i8p(ccx));
-        let tok = CatchPad(catchpad, cs, &[filter]);
-        CatchRet(catchpad, tok, caught.llbb);
-
-        Ret(caught, C_i32(ccx, 1), dloc);
-    });
-
-    // Note that no invoke is used here because by definition this function
-    // can't panic (that's what it's catching).
-    let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc);
-    Store(bcx, ret, dest);
-    return bcx
-}
-
-// Definition of the standard "try" function for Rust using the GNU-like model
-// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
-// instructions).
-//
-// This translation is a little surprising because we always call a shim
-// function instead of inlining the call to `invoke` manually here. This is done
-// because in LLVM we're only allowed to have one personality per function
-// definition. The call to the `try` intrinsic is being inlined into the
-// function calling it, and that function may already have other personality
-// functions in play. By calling a shim we're guaranteed that our shim will have
-// the right personality function.
-fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                             func: ValueRef,
-                             data: ValueRef,
-                             local_ptr: ValueRef,
-                             dest: ValueRef,
-                             dloc: DebugLoc) -> Block<'blk, 'tcx> {
-    let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
-        let ccx = bcx.ccx();
-        let dloc = DebugLoc::None;
-
-        // Translates the shims described above:
-        //
-        //   bcx:
-        //      invoke %func(%args...) normal %normal unwind %catch
-        //
-        //   normal:
-        //      ret 0
-        //
-        //   catch:
-        //      (ptr, _) = landingpad
-        //      store ptr, %local_ptr
-        //      ret 1
-        //
-        // Note that the `local_ptr` data passed into the `try` intrinsic is
-        // expected to be `*mut *mut u8` for this to actually work, but that's
-        // managed by the standard library.
-
-        attributes::emit_uwtable(bcx.fcx.llfn, true);
-        let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() {
-            Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
-                                              bcx.fcx.param_substs).val,
-            None => bcx.tcx().sess.bug("eh_personality_catch not defined"),
-        };
-
-        let then = bcx.fcx.new_temp_block("then");
-        let catch = bcx.fcx.new_temp_block("catch");
-
-        let func = llvm::get_param(bcx.fcx.llfn, 0);
-        let data = llvm::get_param(bcx.fcx.llfn, 1);
-        let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
-        Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
-        Ret(then, C_i32(ccx, 0), dloc);
-
-        // Type indicator for the exception being thrown.
-        //
-        // The first value in this tuple is a pointer to the exception object
-        // being thrown.  The second value is a "selector" indicating which of
-        // the landing pad clauses the exception's type had been matched to.
-        // rust_try ignores the selector.
-        let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
-                                    false);
-        let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
-        AddClause(catch, vals, C_null(Type::i8p(ccx)));
-        let ptr = ExtractValue(catch, vals, 0);
-        Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
-        Ret(catch, C_i32(ccx, 1), dloc);
-    });
-
-    // Note that no invoke is used here because by definition this function
-    // can't panic (that's what it's catching).
-    let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc);
-    Store(bcx, ret, dest);
-    return bcx;
-}
-
-// Helper function to give a Block to a closure to translate a shim function.
-// This is currently primarily used for the `try` intrinsic functions above.
-fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
-                    name: &str,
-                    ty: Ty<'tcx>,
-                    output: ty::FnOutput<'tcx>,
-                    trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-                    -> ValueRef {
-    let ccx = fcx.ccx;
-    let llfn = declare::define_internal_rust_fn(ccx, name, ty);
-    let (fcx, block_arena);
-    block_arena = TypedArena::new();
-    fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
-                      output, ccx.tcx().mk_substs(Substs::trans_empty()),
-                      None, &block_arena);
-    let bcx = init_function(&fcx, true, output);
-    trans(bcx);
-    fcx.cleanup();
-    return llfn
-}
-
-// Helper function used to get a handle to the `__rust_try` function used to
-// catch exceptions.
-//
-// This function is only generated once and is then cached.
-fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
-                             trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-                             -> ValueRef {
-    let ccx = fcx.ccx;
-    if let Some(llfn) = *ccx.rust_try_fn().borrow() {
-        return llfn
-    }
-
-    // Define the type up front for the signature of the rust_try function.
-    let tcx = ccx.tcx();
-    let i8p = tcx.mk_mut_ptr(tcx.types.i8);
-    let fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
-        unsafety: hir::Unsafety::Unsafe,
-        abi: Abi::Rust,
-        sig: ty::Binder(ty::FnSig {
-            inputs: vec![i8p],
-            output: ty::FnOutput::FnConverging(tcx.mk_nil()),
-            variadic: false,
-        }),
-    });
-    let fn_ty = tcx.mk_fn(None, fn_ty);
-    let output = ty::FnOutput::FnConverging(tcx.types.i32);
-    let try_fn_ty  = tcx.mk_bare_fn(ty::BareFnTy {
-        unsafety: hir::Unsafety::Unsafe,
-        abi: Abi::Rust,
-        sig: ty::Binder(ty::FnSig {
-            inputs: vec![fn_ty, i8p, i8p],
-            output: output,
-            variadic: false,
-        }),
-    });
-    let rust_try = gen_fn(fcx, "__rust_try", tcx.mk_fn(None, try_fn_ty), output,
-                          trans);
-    *ccx.rust_try_fn().borrow_mut() = Some(rust_try);
-    return rust_try
-}
-
-// For MSVC-style exceptions (SEH), the compiler generates a filter function
-// which is used to determine whether an exception is being caught (e.g. if it's
-// a Rust exception or some other).
-//
-// This function is used to generate said filter function. The shim generated
-// here is actually just a thin wrapper to call the real implementation in the
-// standard library itself. For reasons as to why, see seh.rs in the standard
-// library.
-fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
-                                rust_try_fn: ValueRef)
-                                -> ValueRef {
-    let ccx = fcx.ccx;
-    let tcx = ccx.tcx();
-    let dloc = DebugLoc::None;
-
-    let rust_try_filter = match ccx.tcx().lang_items.msvc_try_filter() {
-        Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
-                                          fcx.param_substs).val,
-        None => ccx.sess().bug("msvc_try_filter not defined"),
-    };
-
-    let output = ty::FnOutput::FnConverging(tcx.types.i32);
-    let i8p = tcx.mk_mut_ptr(tcx.types.i8);
-
-    let frameaddress = ccx.get_intrinsic(&"llvm.frameaddress");
-    let recoverfp = ccx.get_intrinsic(&"llvm.x86.seh.recoverfp");
-    let localrecover = ccx.get_intrinsic(&"llvm.localrecover");
-
-    // On all platforms, once we have the EXCEPTION_POINTERS handle as well as
-    // the base pointer, we follow the standard layout of:
-    //
-    //      block:
-    //          %parentfp = call i8* llvm.x86.seh.recoverfp(@rust_try_fn, %bp)
-    //          %arg = call i8* llvm.localrecover(@rust_try_fn, %parentfp, 0)
-    //          %ret = call i32 @the_real_filter_function(%ehptrs, %arg)
-    //          ret i32 %ret
-    //
-    // The recoverfp intrinsic is used to recover the frame frame pointer of the
-    // `rust_try_fn` function, which is then in turn passed to the
-    // `localrecover` intrinsic (pairing with the `localescape` intrinsic
-    // mentioned above). Putting all this together means that we now have a
-    // handle to the arguments passed into the `try` function, allowing writing
-    // to the stack over there.
-    //
-    // For more info, see seh.rs in the standard library.
-    let do_trans = |bcx: Block, ehptrs, base_pointer| {
-        let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx));
-        let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer],
-                            None, dloc);
-        let arg = Call(bcx, localrecover,
-                       &[rust_try_fn, parentfp, C_i32(ccx, 0)], None, dloc);
-        let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], None, dloc);
-        Ret(bcx, ret, dloc);
-    };
-
-    if ccx.tcx().sess.target.target.arch == "x86" {
-        // On x86 the filter function doesn't actually receive any arguments.
-        // Instead the %ebp register contains some contextual information.
-        //
-        // Unfortunately I don't know of any great documentation as to what's
-        // going on here, all I can say is that there's a few tests cases in
-        // LLVM's test suite which follow this pattern of instructions, so we
-        // just do the same.
-        let filter_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
-            unsafety: hir::Unsafety::Unsafe,
-            abi: Abi::Rust,
-            sig: ty::Binder(ty::FnSig {
-                inputs: vec![],
-                output: output,
-                variadic: false,
-            }),
-        });
-        let filter_fn_ty = tcx.mk_fn(None, filter_fn_ty);
-        gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
-            let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], None, dloc);
-            let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]);
-            let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to()));
-            do_trans(bcx, exn, ebp);
-        })
-    } else if ccx.tcx().sess.target.target.arch == "x86_64" {
-        // Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
-        // are passed in as arguments to the filter function, so we just pass
-        // those along.
-        let filter_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
-            unsafety: hir::Unsafety::Unsafe,
-            abi: Abi::Rust,
-            sig: ty::Binder(ty::FnSig {
-                inputs: vec![i8p, i8p],
-                output: output,
-                variadic: false,
-            }),
-        });
-        let filter_fn_ty = tcx.mk_fn(None, filter_fn_ty);
-        gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
-            let exn = llvm::get_param(bcx.fcx.llfn, 0);
-            let rbp = llvm::get_param(bcx.fcx.llfn, 1);
-            do_trans(bcx, exn, rbp);
-        })
-    } else {
-        panic!("unknown target to generate a filter function")
-    }
-}
-
-fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
-    span_err!(a, b, E0511, "{}", c);
-}
-
-fn generic_simd_intrinsic<'blk, 'tcx, 'a>
-    (bcx: Block<'blk, 'tcx>,
-     name: &str,
-     substs: subst::Substs<'tcx>,
-     callee_ty: Ty<'tcx>,
-     args: Option<&[P<hir::Expr>]>,
-     llargs: &[ValueRef],
-     ret_ty: Ty<'tcx>,
-     llret_ty: Type,
-     call_debug_location: DebugLoc,
-     call_info: NodeIdAndSpan) -> ValueRef
-{
-    // macros for error handling:
-    macro_rules! emit_error {
-        ($msg: tt) => {
-            emit_error!($msg, )
-        };
-        ($msg: tt, $($fmt: tt)*) => {
-            span_invalid_monomorphization_error(
-                bcx.sess(), call_info.span,
-                &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
-                                 $msg),
-                         name, $($fmt)*));
-        }
-    }
-    macro_rules! require {
-        ($cond: expr, $($fmt: tt)*) => {
-            if !$cond {
-                emit_error!($($fmt)*);
-                return C_null(llret_ty)
-            }
-        }
-    }
-    macro_rules! require_simd {
-        ($ty: expr, $position: expr) => {
-            require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
-        }
-    }
-
-
-
-    let tcx = bcx.tcx();
-    let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
-    let sig = infer::normalize_associated_type(tcx, &sig);
-    let arg_tys = sig.inputs;
-
-    // every intrinsic takes a SIMD vector as its first argument
-    require_simd!(arg_tys[0], "input");
-    let in_ty = arg_tys[0];
-    let in_elem = arg_tys[0].simd_type(tcx);
-    let in_len = arg_tys[0].simd_size(tcx);
-
-    let comparison = match name {
-        "simd_eq" => Some(hir::BiEq),
-        "simd_ne" => Some(hir::BiNe),
-        "simd_lt" => Some(hir::BiLt),
-        "simd_le" => Some(hir::BiLe),
-        "simd_gt" => Some(hir::BiGt),
-        "simd_ge" => Some(hir::BiGe),
-        _ => None
-    };
-
-    if let Some(cmp_op) = comparison {
-        require_simd!(ret_ty, "return");
-
-        let out_len = ret_ty.simd_size(tcx);
-        require!(in_len == out_len,
-                 "expected return type with length {} (same as input type `{}`), \
-                  found `{}` with length {}",
-                 in_len, in_ty,
-                 ret_ty, out_len);
-        require!(llret_ty.element_type().kind() == llvm::Integer,
-                 "expected return type with integer elements, found `{}` with non-integer `{}`",
-                 ret_ty,
-                 ret_ty.simd_type(tcx));
-
-        return compare_simd_types(bcx,
-                                  llargs[0],
-                                  llargs[1],
-                                  in_elem,
-                                  llret_ty,
-                                  cmp_op,
-                                  call_debug_location)
-    }
-
-    if name.starts_with("simd_shuffle") {
-        let n: usize = match name["simd_shuffle".len()..].parse() {
-            Ok(n) => n,
-            Err(_) => tcx.sess.span_bug(call_info.span,
-                                        "bad `simd_shuffle` instruction only caught in trans?")
-        };
-
-        require_simd!(ret_ty, "return");
-
-        let out_len = ret_ty.simd_size(tcx);
-        require!(out_len == n,
-                 "expected return type of length {}, found `{}` with length {}",
-                 n, ret_ty, out_len);
-        require!(in_elem == ret_ty.simd_type(tcx),
-                 "expected return element type `{}` (element of input `{}`), \
-                  found `{}` with element type `{}`",
-                 in_elem, in_ty,
-                 ret_ty, ret_ty.simd_type(tcx));
-
-        let total_len = in_len as u64 * 2;
-
-        let vector = match args {
-            Some(args) => &args[2],
-            None => bcx.sess().span_bug(call_info.span,
-                                        "intrinsic call with unexpected argument shape"),
-        };
-        let vector = match consts::const_expr(
-            bcx.ccx(),
-            vector,
-            tcx.mk_substs(substs),
-            None,
-            consts::TrueConst::Yes, // this should probably help simd error reporting
-        ) {
-            Ok((vector, _)) => vector,
-            Err(err) => bcx.sess().span_fatal(call_info.span, &err.description()),
-        };
-
-        let indices: Option<Vec<_>> = (0..n)
-            .map(|i| {
-                let arg_idx = i;
-                let val = const_get_elt(bcx.ccx(), vector, &[i as libc::c_uint]);
-                let c = const_to_opt_uint(val);
-                match c {
-                    None => {
-                        emit_error!("shuffle index #{} is not a constant", arg_idx);
-                        None
-                    }
-                    Some(idx) if idx >= total_len => {
-                        emit_error!("shuffle index #{} is out of bounds (limit {})",
-                                    arg_idx, total_len);
-                        None
-                    }
-                    Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
-                }
-            })
-            .collect();
-        let indices = match indices {
-            Some(i) => i,
-            None => return C_null(llret_ty)
-        };
-
-        return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
-    }
-
-    if name == "simd_insert" {
-        require!(in_elem == arg_tys[2],
-                 "expected inserted type `{}` (element of input `{}`), found `{}`",
-                 in_elem, in_ty, arg_tys[2]);
-        return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
-    }
-    if name == "simd_extract" {
-        require!(ret_ty == in_elem,
-                 "expected return type `{}` (element of input `{}`), found `{}`",
-                 in_elem, in_ty, ret_ty);
-        return ExtractElement(bcx, llargs[0], llargs[1])
-    }
-
-    if name == "simd_cast" {
-        require_simd!(ret_ty, "return");
-        let out_len = ret_ty.simd_size(tcx);
-        require!(in_len == out_len,
-                 "expected return type with length {} (same as input type `{}`), \
-                  found `{}` with length {}",
-                 in_len, in_ty,
-                 ret_ty, out_len);
-        // casting cares about nominal type, not just structural type
-        let out_elem = ret_ty.simd_type(tcx);
-
-        if in_elem == out_elem { return llargs[0]; }
-
-        enum Style { Float, Int(/* is signed? */ bool), Unsupported }
-
-        let (in_style, in_width) = match in_elem.sty {
-            // vectors of pointer-sized integers should've been
-            // disallowed before here, so this unwrap is safe.
-            ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
-            ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
-            ty::TyFloat(f) => (Style::Float, f.bit_width()),
-            _ => (Style::Unsupported, 0)
-        };
-        let (out_style, out_width) = match out_elem.sty {
-            ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
-            ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
-            ty::TyFloat(f) => (Style::Float, f.bit_width()),
-            _ => (Style::Unsupported, 0)
-        };
-
-        match (in_style, out_style) {
-            (Style::Int(in_is_signed), Style::Int(_)) => {
-                return match in_width.cmp(&out_width) {
-                    Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
-                    Ordering::Equal => llargs[0],
-                    Ordering::Less => if in_is_signed {
-                        SExt(bcx, llargs[0], llret_ty)
-                    } else {
-                        ZExt(bcx, llargs[0], llret_ty)
-                    }
-                }
-            }
-            (Style::Int(in_is_signed), Style::Float) => {
-                return if in_is_signed {
-                    SIToFP(bcx, llargs[0], llret_ty)
-                } else {
-                    UIToFP(bcx, llargs[0], llret_ty)
-                }
-            }
-            (Style::Float, Style::Int(out_is_signed)) => {
-                return if out_is_signed {
-                    FPToSI(bcx, llargs[0], llret_ty)
-                } else {
-                    FPToUI(bcx, llargs[0], llret_ty)
-                }
-            }
-            (Style::Float, Style::Float) => {
-                return match in_width.cmp(&out_width) {
-                    Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
-                    Ordering::Equal => llargs[0],
-                    Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
-                }
-            }
-            _ => {/* Unsupported. Fallthrough. */}
-        }
-        require!(false,
-                 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
-                 in_ty, in_elem,
-                 ret_ty, out_elem);
-    }
-    macro_rules! arith {
-        ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
-            $(
-                if name == stringify!($name) {
-                    match in_elem.sty {
-                        $(
-                            $(ty::$p(_))|* => {
-                                return $call(bcx, llargs[0], llargs[1], call_debug_location)
-                            }
-                            )*
-                        _ => {},
-                    }
-                    require!(false,
-                             "unsupported operation on `{}` with element `{}`",
-                             in_ty,
-                             in_elem)
-                })*
-        }
-    }
-    arith! {
-        simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
-        simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
-        simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
-        simd_div: TyFloat => FDiv;
-        simd_shl: TyUint, TyInt => Shl;
-        simd_shr: TyUint => LShr, TyInt => AShr;
-        simd_and: TyUint, TyInt => And;
-        simd_or: TyUint, TyInt => Or;
-        simd_xor: TyUint, TyInt => Xor;
-    }
-    bcx.sess().span_bug(call_info.span, "unknown SIMD intrinsic");
-}
-
-// Returns the width of an int TypeVariant, and if it's signed or not
-// Returns None if the type is not an integer
-fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
-        -> Option<(u64, bool)> {
-    use rustc::middle::ty::{TyInt, TyUint};
-    match *sty {
-        TyInt(t) => Some((match t {
-            ast::IntTy::Is => {
-                match &ccx.tcx().sess.target.target.target_pointer_width[..] {
-                    "32" => 32,
-                    "64" => 64,
-                    tws => panic!("Unsupported target word size for isize: {}", tws),
-                }
-            },
-            ast::IntTy::I8 => 8,
-            ast::IntTy::I16 => 16,
-            ast::IntTy::I32 => 32,
-            ast::IntTy::I64 => 64,
-        }, true)),
-        TyUint(t) => Some((match t {
-            ast::UintTy::Us => {
-                match &ccx.tcx().sess.target.target.target_pointer_width[..] {
-                    "32" => 32,
-                    "64" => 64,
-                    tws => panic!("Unsupported target word size for usize: {}", tws),
-                }
-            },
-            ast::UintTy::U8 => 8,
-            ast::UintTy::U16 => 16,
-            ast::UintTy::U32 => 32,
-            ast::UintTy::U64 => 64,
-        }, false)),
-        _ => None,
-    }
-}
diff --git a/src/librustc_trans/trans/llrepr.rs b/src/librustc_trans/trans/llrepr.rs
deleted file mode 100644 (file)
index 6b785e7..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use trans::context::CrateContext;
-use trans::type_::Type;
-use llvm::ValueRef;
-
-pub trait LlvmRepr {
-    fn llrepr(&self, ccx: &CrateContext) -> String;
-}
-
-impl<T:LlvmRepr> LlvmRepr for [T] {
-    fn llrepr(&self, ccx: &CrateContext) -> String {
-        let reprs: Vec<String> = self.iter().map(|t| t.llrepr(ccx)).collect();
-        format!("[{}]", reprs.join(","))
-    }
-}
-
-impl LlvmRepr for Type {
-    fn llrepr(&self, ccx: &CrateContext) -> String {
-        ccx.tn().type_to_string(*self)
-    }
-}
-
-impl LlvmRepr for ValueRef {
-    fn llrepr(&self, ccx: &CrateContext) -> String {
-        ccx.tn().val_to_string(*self)
-    }
-}
diff --git a/src/librustc_trans/trans/machine.rs b/src/librustc_trans/trans/machine.rs
deleted file mode 100644 (file)
index 691fba4..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Information concerning the machine representation of various types.
-
-#![allow(non_camel_case_types)]
-
-use llvm::{self, ValueRef};
-use trans::common::*;
-
-use trans::type_::Type;
-
-pub type llbits = u64;
-pub type llsize = u64;
-pub type llalign = u32;
-
-// ______________________________________________________________________
-// compute sizeof / alignof
-
-// Returns the number of bytes clobbered by a Store to this type.
-pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> llsize {
-    unsafe {
-        return llvm::LLVMStoreSizeOfType(cx.td(), ty.to_ref());
-    }
-}
-
-// Returns the number of bytes between successive elements of type T in an
-// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
-pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize {
-    unsafe {
-        return llvm::LLVMABISizeOfType(cx.td(), ty.to_ref());
-    }
-}
-
-// Returns, as near as we can figure, the "real" size of a type. As in, the
-// bits in this number of bytes actually carry data related to the datum
-// with the type. Not junk, accidentally-damaged words, or whatever.
-// Note that padding of the type will be included for structs, but not for the
-// other types (i.e. SIMD types).
-// Rounds up to the nearest byte though, so if you have a 1-bit
-// value, we return 1 here, not 0. Most of rustc works in bytes. Be warned
-// that LLVM *does* distinguish between e.g. a 1-bit value and an 8-bit value
-// at the codegen level! In general you should prefer `llbitsize_of_real`
-// below.
-pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> llsize {
-    unsafe {
-        let nbits = llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref());
-        if nbits & 7 != 0 {
-            // Not an even number of bytes, spills into "next" byte.
-            1 + (nbits >> 3)
-        } else {
-            nbits >> 3
-        }
-    }
-}
-
-/// Returns the "real" size of the type in bits.
-pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits {
-    unsafe {
-        llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref())
-    }
-}
-
-/// Returns the size of the type as an LLVM constant integer value.
-pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
-    // Once upon a time, this called LLVMSizeOf, which does a
-    // getelementptr(1) on a null pointer and casts to an int, in
-    // order to obtain the type size as a value without requiring the
-    // target data layout.  But we have the target data layout, so
-    // there's no need for that contrivance.  The instruction
-    // selection DAG generator would flatten that GEP(1) node into a
-    // constant of the type's alloc size, so let's save it some work.
-    return C_uint(cx, llsize_of_alloc(cx, ty));
-}
-
-// Returns the preferred alignment of the given type for the current target.
-// The preferred alignment may be larger than the alignment used when
-// packing the type into structs. This will be used for things like
-// allocations inside a stack frame, which LLVM has a free hand in.
-pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign {
-    unsafe {
-        return llvm::LLVMPreferredAlignmentOfType(cx.td(), ty.to_ref());
-    }
-}
-
-// Returns the minimum alignment of a type required by the platform.
-// This is the alignment that will be used for struct fields, arrays,
-// and similar ABI-mandated things.
-pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign {
-    unsafe {
-        return llvm::LLVMABIAlignmentOfType(cx.td(), ty.to_ref());
-    }
-}
-
-pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: usize) -> u64 {
-    unsafe {
-        return llvm::LLVMOffsetOfElement(cx.td(),
-                                         struct_ty.to_ref(),
-                                         element as u32);
-    }
-}
diff --git a/src/librustc_trans/trans/macros.rs b/src/librustc_trans/trans/macros.rs
deleted file mode 100644 (file)
index 77efcc6..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-macro_rules! unpack_datum {
-    ($bcx: ident, $inp: expr) => (
-        {
-            let db = $inp;
-            $bcx = db.bcx;
-            db.datum
-        }
-    )
-}
-
-macro_rules! unpack_result {
-    ($bcx: ident, $inp: expr) => (
-        {
-            let db = $inp;
-            $bcx = db.bcx;
-            db.val
-        }
-    )
-}
diff --git a/src/librustc_trans/trans/meth.rs b/src/librustc_trans/trans/meth.rs
deleted file mode 100644 (file)
index ab43861..0000000
+++ /dev/null
@@ -1,658 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use arena::TypedArena;
-use back::link;
-use llvm::{ValueRef, get_params};
-use middle::def_id::DefId;
-use middle::infer;
-use middle::subst::{Subst, Substs};
-use middle::subst;
-use middle::traits;
-use trans::base::*;
-use trans::build::*;
-use trans::callee::*;
-use trans::callee;
-use trans::cleanup;
-use trans::closure;
-use trans::common::*;
-use trans::consts;
-use trans::datum::*;
-use trans::debuginfo::DebugLoc;
-use trans::declare;
-use trans::expr;
-use trans::glue;
-use trans::machine;
-use trans::monomorphize;
-use trans::type_::Type;
-use trans::type_of::*;
-use middle::ty::{self, Ty};
-use middle::ty::MethodCall;
-
-use syntax::ast;
-use syntax::attr;
-use syntax::codemap::DUMMY_SP;
-
-use rustc_front::hir;
-
-// drop_glue pointer, size, align.
-const VTABLE_OFFSET: usize = 3;
-
-/// The main "translation" pass for methods.  Generates code
-/// for non-monomorphized methods only.  Other methods will
-/// be generated once they are invoked with specific type parameters,
-/// see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`.
-pub fn trans_impl(ccx: &CrateContext,
-                  name: ast::Name,
-                  impl_items: &[hir::ImplItem],
-                  generics: &hir::Generics,
-                  id: ast::NodeId) {
-    let _icx = push_ctxt("meth::trans_impl");
-    let tcx = ccx.tcx();
-
-    debug!("trans_impl(name={}, id={})", name, id);
-
-    // Both here and below with generic methods, be sure to recurse and look for
-    // items that we need to translate.
-    if !generics.ty_params.is_empty() {
-        return;
-    }
-
-    for impl_item in impl_items {
-        match impl_item.node {
-            hir::ImplItemKind::Method(ref sig, ref body) => {
-                if sig.generics.ty_params.is_empty() {
-                    let trans_everywhere = attr::requests_inline(&impl_item.attrs);
-                    for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) {
-                        let llfn = get_item_val(ccx, impl_item.id);
-                        let empty_substs = tcx.mk_substs(Substs::trans_empty());
-                        trans_fn(ccx,
-                                 &sig.decl,
-                                 body,
-                                 llfn,
-                                 empty_substs,
-                                 impl_item.id,
-                                 &impl_item.attrs);
-                        update_linkage(ccx,
-                                       llfn,
-                                       Some(impl_item.id),
-                                       if is_origin { OriginalTranslation } else { InlinedCopy });
-                    }
-                }
-            }
-            _ => {}
-        }
-    }
-}
-
-pub fn trans_method_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                       method_call: MethodCall,
-                                       self_expr: Option<&hir::Expr>,
-                                       arg_cleanup_scope: cleanup::ScopeId)
-                                       -> Callee<'blk, 'tcx> {
-    let _icx = push_ctxt("meth::trans_method_callee");
-
-    let method = bcx.tcx().tables.borrow().method_map[&method_call];
-
-    match bcx.tcx().impl_or_trait_item(method.def_id).container() {
-        ty::ImplContainer(_) => {
-            debug!("trans_method_callee: static, {:?}", method.def_id);
-            let datum = callee::trans_fn_ref(bcx.ccx(),
-                                             method.def_id,
-                                             MethodCallKey(method_call),
-                                             bcx.fcx.param_substs);
-            Callee {
-                bcx: bcx,
-                data: Fn(datum.val),
-                ty: datum.ty
-            }
-        }
-
-        ty::TraitContainer(trait_def_id) => {
-            let trait_ref = method.substs.to_trait_ref(bcx.tcx(), trait_def_id);
-            let trait_ref = ty::Binder(bcx.monomorphize(&trait_ref));
-            let span = bcx.tcx().map.span(method_call.expr_id);
-            debug!("method_call={:?} trait_ref={:?} trait_ref id={:?} substs={:?}",
-                   method_call,
-                   trait_ref,
-                   trait_ref.0.def_id,
-                   trait_ref.0.substs);
-            let origin = fulfill_obligation(bcx.ccx(), span, trait_ref);
-            debug!("origin = {:?}", origin);
-            trans_monomorphized_callee(bcx,
-                                       method_call,
-                                       self_expr,
-                                       trait_def_id,
-                                       method.def_id,
-                                       method.ty,
-                                       origin,
-                                       arg_cleanup_scope)
-        }
-    }
-}
-
-pub fn trans_static_method_callee<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                            method_id: DefId,
-                                            trait_id: DefId,
-                                            expr_id: ast::NodeId,
-                                            param_substs: &'tcx subst::Substs<'tcx>)
-                                            -> Datum<'tcx, Rvalue>
-{
-    let _icx = push_ctxt("meth::trans_static_method_callee");
-    let tcx = ccx.tcx();
-
-    debug!("trans_static_method_callee(method_id={:?}, trait_id={}, \
-            expr_id={})",
-           method_id,
-           tcx.item_path_str(trait_id),
-           expr_id);
-
-    let mname = tcx.item_name(method_id);
-
-    debug!("trans_static_method_callee: method_id={:?}, expr_id={}, \
-            name={}", method_id, expr_id, mname);
-
-    // Find the substitutions for the fn itself. This includes
-    // type parameters that belong to the trait but also some that
-    // belong to the method:
-    let rcvr_substs = node_id_substs(ccx, ExprId(expr_id), param_substs);
-    debug!("rcvr_substs={:?}", rcvr_substs);
-    let trait_ref = ty::Binder(rcvr_substs.to_trait_ref(tcx, trait_id));
-    let vtbl = fulfill_obligation(ccx, DUMMY_SP, trait_ref);
-
-    // Now that we know which impl is being used, we can dispatch to
-    // the actual function:
-    match vtbl {
-        traits::VtableImpl(traits::VtableImplData {
-            impl_def_id: impl_did,
-            substs: impl_substs,
-            nested: _ }) =>
-        {
-            let callee_substs = impl_substs.with_method_from(&rcvr_substs);
-            let mth = tcx.get_impl_method(impl_did, callee_substs, mname);
-            trans_fn_ref_with_substs(ccx, mth.method.def_id, ExprId(expr_id),
-                                     param_substs,
-                                     mth.substs)
-        }
-        traits::VtableObject(ref data) => {
-            let idx = traits::get_vtable_index_of_object_method(tcx, data, method_id);
-            trans_object_shim(ccx,
-                              data.upcast_trait_ref.clone(),
-                              method_id,
-                              idx)
-        }
-        _ => {
-            // FIXME(#20847): handle at least VtableFnPointer
-            tcx.sess.bug(&format!("static call to invalid vtable: {:?}",
-                                 vtbl));
-        }
-    }
-}
-
-fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                          method_call: MethodCall,
-                                          self_expr: Option<&hir::Expr>,
-                                          trait_id: DefId,
-                                          method_id: DefId,
-                                          method_ty: Ty<'tcx>,
-                                          vtable: traits::Vtable<'tcx, ()>,
-                                          arg_cleanup_scope: cleanup::ScopeId)
-                                          -> Callee<'blk, 'tcx> {
-    let _icx = push_ctxt("meth::trans_monomorphized_callee");
-    match vtable {
-        traits::VtableImpl(vtable_impl) => {
-            let ccx = bcx.ccx();
-            let impl_did = vtable_impl.impl_def_id;
-            let mname = match ccx.tcx().impl_or_trait_item(method_id) {
-                ty::MethodTraitItem(method) => method.name,
-                _ => {
-                    bcx.tcx().sess.bug("can't monomorphize a non-method trait \
-                                        item")
-                }
-            };
-            // create a concatenated set of substitutions which includes
-            // those from the impl and those from the method:
-            let meth_substs = node_id_substs(ccx,
-                                             MethodCallKey(method_call),
-                                             bcx.fcx.param_substs);
-            let impl_substs = vtable_impl.substs.with_method_from(&meth_substs);
-            let mth = bcx.tcx().get_impl_method(impl_did, impl_substs, mname);
-            // translate the function
-            let datum = trans_fn_ref_with_substs(bcx.ccx(),
-                                                 mth.method.def_id,
-                                                 MethodCallKey(method_call),
-                                                 bcx.fcx.param_substs,
-                                                 mth.substs);
-
-            Callee { bcx: bcx, data: Fn(datum.val), ty: datum.ty }
-        }
-        traits::VtableClosure(vtable_closure) => {
-            // The substitutions should have no type parameters remaining
-            // after passing through fulfill_obligation
-            let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap();
-            let llfn = closure::trans_closure_method(bcx.ccx(),
-                                                     vtable_closure.closure_def_id,
-                                                     vtable_closure.substs,
-                                                     trait_closure_kind);
-            Callee {
-                bcx: bcx,
-                data: Fn(llfn),
-                ty: monomorphize_type(bcx, method_ty)
-            }
-        }
-        traits::VtableFnPointer(fn_ty) => {
-            let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap();
-            let llfn = trans_fn_pointer_shim(bcx.ccx(), trait_closure_kind, fn_ty);
-            Callee {
-                bcx: bcx,
-                data: Fn(llfn),
-                ty: monomorphize_type(bcx, method_ty)
-            }
-        }
-        traits::VtableObject(ref data) => {
-            let idx = traits::get_vtable_index_of_object_method(bcx.tcx(), data, method_id);
-            if let Some(self_expr) = self_expr {
-                if let ty::TyBareFn(_, ref fty) = monomorphize_type(bcx, method_ty).sty {
-                    let ty = bcx.tcx().mk_fn(None, opaque_method_ty(bcx.tcx(), fty));
-                    return trans_trait_callee(bcx, ty, idx, self_expr, arg_cleanup_scope);
-                }
-            }
-            let datum = trans_object_shim(bcx.ccx(),
-                                          data.upcast_trait_ref.clone(),
-                                          method_id,
-                                          idx);
-            Callee { bcx: bcx, data: Fn(datum.val), ty: datum.ty }
-        }
-        traits::VtableBuiltin(..) |
-        traits::VtableDefaultImpl(..) |
-        traits::VtableParam(..) => {
-            bcx.sess().bug(
-                &format!("resolved vtable bad vtable {:?} in trans",
-                        vtable));
-        }
-    }
-}
-
-/// Create a method callee where the method is coming from a trait object (e.g., Box<Trait> type).
-/// In this case, we must pull the fn pointer out of the vtable that is packaged up with the
-/// object. Objects are represented as a pair, so we first evaluate the self expression and then
-/// extract the self data and vtable out of the pair.
-fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                  opaque_fn_ty: Ty<'tcx>,
-                                  vtable_index: usize,
-                                  self_expr: &hir::Expr,
-                                  arg_cleanup_scope: cleanup::ScopeId)
-                                  -> Callee<'blk, 'tcx> {
-    let _icx = push_ctxt("meth::trans_trait_callee");
-    let mut bcx = bcx;
-
-    // Translate self_datum and take ownership of the value by
-    // converting to an rvalue.
-    let self_datum = unpack_datum!(
-        bcx, expr::trans(bcx, self_expr));
-
-    let llval = if bcx.fcx.type_needs_drop(self_datum.ty) {
-        let self_datum = unpack_datum!(
-            bcx, self_datum.to_rvalue_datum(bcx, "trait_callee"));
-
-        // Convert to by-ref since `trans_trait_callee_from_llval` wants it
-        // that way.
-        let self_datum = unpack_datum!(
-            bcx, self_datum.to_ref_datum(bcx));
-
-        // Arrange cleanup in case something should go wrong before the
-        // actual call occurs.
-        self_datum.add_clean(bcx.fcx, arg_cleanup_scope)
-    } else {
-        // We don't have to do anything about cleanups for &Trait and &mut Trait.
-        assert!(self_datum.kind.is_by_ref());
-        self_datum.val
-    };
-
-    let llself = Load(bcx, expr::get_dataptr(bcx, llval));
-    let llvtable = Load(bcx, expr::get_meta(bcx, llval));
-    trans_trait_callee_from_llval(bcx, opaque_fn_ty, vtable_index, llself, llvtable)
-}
-
-/// Same as `trans_trait_callee()` above, except that it is given a by-ref pointer to the object
-/// pair.
-fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                             opaque_fn_ty: Ty<'tcx>,
-                                             vtable_index: usize,
-                                             llself: ValueRef,
-                                             llvtable: ValueRef)
-                                             -> Callee<'blk, 'tcx> {
-    let _icx = push_ctxt("meth::trans_trait_callee");
-    let ccx = bcx.ccx();
-
-    // Load the data pointer from the object.
-    debug!("trans_trait_callee_from_llval(callee_ty={}, vtable_index={}, llself={}, llvtable={})",
-           opaque_fn_ty,
-           vtable_index,
-           bcx.val_to_string(llself),
-           bcx.val_to_string(llvtable));
-
-    // Replace the self type (&Self or Box<Self>) with an opaque pointer.
-    let mptr = Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET]));
-    let llcallee_ty = type_of_fn_from_ty(ccx, opaque_fn_ty);
-
-    Callee {
-        bcx: bcx,
-        data: TraitItem(MethodData {
-            llfn: PointerCast(bcx, mptr, llcallee_ty.ptr_to()),
-            llself: PointerCast(bcx, llself, Type::i8p(ccx)),
-        }),
-        ty: opaque_fn_ty
-    }
-}
-
-/// Generate a shim function that allows an object type like `SomeTrait` to
-/// implement the type `SomeTrait`. Imagine a trait definition:
-///
-///    trait SomeTrait { fn get(&self) -> i32; ... }
-///
-/// And a generic bit of code:
-///
-///    fn foo<T:SomeTrait>(t: &T) {
-///        let x = SomeTrait::get;
-///        x(t)
-///    }
-///
-/// What is the value of `x` when `foo` is invoked with `T=SomeTrait`?
-/// The answer is that it is a shim function generated by this routine:
-///
-///    fn shim(t: &SomeTrait) -> i32 {
-///        // ... call t.get() virtually ...
-///    }
-///
-/// In fact, all virtual calls can be thought of as normal trait calls
-/// that go through this shim function.
-pub fn trans_object_shim<'a, 'tcx>(
-    ccx: &'a CrateContext<'a, 'tcx>,
-    upcast_trait_ref: ty::PolyTraitRef<'tcx>,
-    method_id: DefId,
-    vtable_index: usize)
-    -> Datum<'tcx, Rvalue>
-{
-    let _icx = push_ctxt("trans_object_shim");
-    let tcx = ccx.tcx();
-
-    debug!("trans_object_shim(upcast_trait_ref={:?}, method_id={:?})",
-           upcast_trait_ref,
-           method_id);
-
-    // Upcast to the trait in question and extract out the substitutions.
-    let upcast_trait_ref = tcx.erase_late_bound_regions(&upcast_trait_ref);
-    let object_substs = upcast_trait_ref.substs.clone().erase_regions();
-    debug!("trans_object_shim: object_substs={:?}", object_substs);
-
-    // Lookup the type of this method as declared in the trait and apply substitutions.
-    let method_ty = match tcx.impl_or_trait_item(method_id) {
-        ty::MethodTraitItem(method) => method,
-        _ => {
-            tcx.sess.bug("can't create a method shim for a non-method item")
-        }
-    };
-    let fty = monomorphize::apply_param_substs(tcx, &object_substs, &method_ty.fty);
-    let fty = tcx.mk_bare_fn(fty);
-    let method_ty = opaque_method_ty(tcx, fty);
-    debug!("trans_object_shim: fty={:?} method_ty={:?}", fty, method_ty);
-
-    //
-    let shim_fn_ty = tcx.mk_fn(None, fty);
-    let method_bare_fn_ty = tcx.mk_fn(None, method_ty);
-    let function_name = link::mangle_internal_name_by_type_and_seq(ccx, shim_fn_ty, "object_shim");
-    let llfn = declare::define_internal_rust_fn(ccx, &function_name, shim_fn_ty);
-
-    let sig = ccx.tcx().erase_late_bound_regions(&fty.sig);
-    let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
-
-    let empty_substs = tcx.mk_substs(Substs::trans_empty());
-    let (block_arena, fcx): (TypedArena<_>, FunctionContext);
-    block_arena = TypedArena::new();
-    fcx = new_fn_ctxt(ccx,
-                      llfn,
-                      ast::DUMMY_NODE_ID,
-                      false,
-                      sig.output,
-                      empty_substs,
-                      None,
-                      &block_arena);
-    let mut bcx = init_function(&fcx, false, sig.output);
-
-    let llargs = get_params(fcx.llfn);
-
-    let self_idx = fcx.arg_offset();
-    let llself = llargs[self_idx];
-    let llvtable = llargs[self_idx + 1];
-
-    debug!("trans_object_shim: llself={}, llvtable={}",
-           bcx.val_to_string(llself), bcx.val_to_string(llvtable));
-
-    assert!(!fcx.needs_ret_allocas);
-
-    let dest =
-        fcx.llretslotptr.get().map(
-            |_| expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot")));
-
-    debug!("trans_object_shim: method_offset_in_vtable={}",
-           vtable_index);
-
-    bcx = trans_call_inner(bcx,
-                           DebugLoc::None,
-                           |bcx, _| trans_trait_callee_from_llval(bcx,
-                                                                  method_bare_fn_ty,
-                                                                  vtable_index,
-                                                                  llself, llvtable),
-                           ArgVals(&llargs[(self_idx + 2)..]),
-                           dest).bcx;
-
-    finish_fn(&fcx, bcx, sig.output, DebugLoc::None);
-
-    immediate_rvalue(llfn, shim_fn_ty)
-}
-
-/// Creates a returns a dynamic vtable for the given type and vtable origin.
-/// This is used only for objects.
-///
-/// The `trait_ref` encodes the erased self type. Hence if we are
-/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
-/// `trait_ref` would map `T:Trait`.
-pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                            trait_ref: ty::PolyTraitRef<'tcx>,
-                            param_substs: &'tcx subst::Substs<'tcx>)
-                            -> ValueRef
-{
-    let tcx = ccx.tcx();
-    let _icx = push_ctxt("meth::get_vtable");
-
-    debug!("get_vtable(trait_ref={:?})", trait_ref);
-
-    // Check the cache.
-    match ccx.vtables().borrow().get(&trait_ref) {
-        Some(&val) => { return val }
-        None => { }
-    }
-
-    // Not in the cache. Build it.
-    let methods = traits::supertraits(tcx, trait_ref.clone()).flat_map(|trait_ref| {
-        let vtable = fulfill_obligation(ccx, DUMMY_SP, trait_ref.clone());
-        match vtable {
-            // Should default trait error here?
-            traits::VtableDefaultImpl(_) |
-            traits::VtableBuiltin(_) => {
-                Vec::new().into_iter()
-            }
-            traits::VtableImpl(
-                traits::VtableImplData {
-                    impl_def_id: id,
-                    substs,
-                    nested: _ }) => {
-                let nullptr = C_null(Type::nil(ccx).ptr_to());
-                get_vtable_methods(ccx, id, substs)
-                    .into_iter()
-                    .map(|opt_mth| {
-                        match opt_mth {
-                            Some(mth) => {
-                                trans_fn_ref_with_substs(ccx,
-                                                         mth.method.def_id,
-                                                         ExprId(0),
-                                                         param_substs,
-                                                         mth.substs).val
-                            }
-                            None => nullptr
-                        }
-                    })
-                    .collect::<Vec<_>>()
-                    .into_iter()
-            }
-            traits::VtableClosure(
-                traits::VtableClosureData {
-                    closure_def_id,
-                    substs,
-                    nested: _ }) => {
-                let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap();
-                let llfn = closure::trans_closure_method(ccx,
-                                                         closure_def_id,
-                                                         substs,
-                                                         trait_closure_kind);
-                vec![llfn].into_iter()
-            }
-            traits::VtableFnPointer(bare_fn_ty) => {
-                let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap();
-                vec![trans_fn_pointer_shim(ccx, trait_closure_kind, bare_fn_ty)].into_iter()
-            }
-            traits::VtableObject(ref data) => {
-                // this would imply that the Self type being erased is
-                // an object type; this cannot happen because we
-                // cannot cast an unsized type into a trait object
-                tcx.sess.bug(
-                    &format!("cannot get vtable for an object type: {:?}",
-                            data));
-            }
-            traits::VtableParam(..) => {
-                tcx.sess.bug(
-                    &format!("resolved vtable for {:?} to bad vtable {:?} in trans",
-                            trait_ref,
-                            vtable));
-            }
-        }
-    });
-
-    let size_ty = sizing_type_of(ccx, trait_ref.self_ty());
-    let size = machine::llsize_of_alloc(ccx, size_ty);
-    let align = align_of(ccx, trait_ref.self_ty());
-
-    let components: Vec<_> = vec![
-        // Generate a destructor for the vtable.
-        glue::get_drop_glue(ccx, trait_ref.self_ty()),
-        C_uint(ccx, size),
-        C_uint(ccx, align)
-    ].into_iter().chain(methods).collect();
-
-    let vtable_const = C_struct(ccx, &components, false);
-    let align = machine::llalign_of_pref(ccx, val_ty(vtable_const));
-    let vtable = consts::addr_of(ccx, vtable_const, align, "vtable");
-
-    ccx.vtables().borrow_mut().insert(trait_ref, vtable);
-    vtable
-}
-
-pub fn get_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                    impl_id: DefId,
-                                    substs: subst::Substs<'tcx>)
-                                    -> Vec<Option<ty::util::ImplMethod<'tcx>>>
-{
-    let tcx = ccx.tcx();
-
-    debug!("get_vtable_methods(impl_id={:?}, substs={:?}", impl_id, substs);
-
-    let trt_id = match tcx.impl_trait_ref(impl_id) {
-        Some(t_id) => t_id.def_id,
-        None       => ccx.sess().bug("make_impl_vtable: don't know how to \
-                                      make a vtable for a type impl!")
-    };
-
-    tcx.populate_implementations_for_trait_if_necessary(trt_id);
-
-    let trait_item_def_ids = tcx.trait_item_def_ids(trt_id);
-    trait_item_def_ids
-        .iter()
-
-        // Filter out non-method items.
-        .filter_map(|item_def_id| {
-            match *item_def_id {
-                ty::MethodTraitItemId(def_id) => Some(def_id),
-                _ => None,
-            }
-        })
-
-        // Now produce pointers for each remaining method. If the
-        // method could never be called from this object, just supply
-        // null.
-        .map(|trait_method_def_id| {
-            debug!("get_vtable_methods: trait_method_def_id={:?}",
-                   trait_method_def_id);
-
-            let trait_method_type = match tcx.impl_or_trait_item(trait_method_def_id) {
-                ty::MethodTraitItem(m) => m,
-                _ => ccx.sess().bug("should be a method, not other assoc item"),
-            };
-            let name = trait_method_type.name;
-
-            // Some methods cannot be called on an object; skip those.
-            if !traits::is_vtable_safe_method(tcx, trt_id, &trait_method_type) {
-                debug!("get_vtable_methods: not vtable safe");
-                return None;
-            }
-
-            debug!("get_vtable_methods: trait_method_type={:?}",
-                   trait_method_type);
-
-            // The substitutions we have are on the impl, so we grab
-            // the method type from the impl to substitute into.
-            let mth = tcx.get_impl_method(impl_id, substs.clone(), name);
-
-            debug!("get_vtable_methods: mth={:?}", mth);
-
-            // If this is a default method, it's possible that it
-            // relies on where clauses that do not hold for this
-            // particular set of type parameters. Note that this
-            // method could then never be called, so we do not want to
-            // try and trans it, in that case. Issue #23435.
-            if mth.is_provided {
-                let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs);
-                if !normalize_and_test_predicates(ccx, predicates.into_vec()) {
-                    debug!("get_vtable_methods: predicates do not hold");
-                    return None;
-                }
-            }
-
-            Some(mth)
-        })
-        .collect()
-}
-
-/// Replace the self type (&Self or Box<Self>) with an opaque pointer.
-fn opaque_method_ty<'tcx>(tcx: &ty::ctxt<'tcx>, method_ty: &ty::BareFnTy<'tcx>)
-                          -> &'tcx ty::BareFnTy<'tcx> {
-    let mut inputs = method_ty.sig.0.inputs.clone();
-    inputs[0] = tcx.mk_mut_ptr(tcx.mk_mach_int(ast::IntTy::I8));
-
-    tcx.mk_bare_fn(ty::BareFnTy {
-        unsafety: method_ty.unsafety,
-        abi: method_ty.abi,
-        sig: ty::Binder(ty::FnSig {
-            inputs: inputs,
-            output: method_ty.sig.0.output,
-            variadic: method_ty.sig.0.variadic,
-        }),
-    })
-}
diff --git a/src/librustc_trans/trans/mir/analyze.rs b/src/librustc_trans/trans/mir/analyze.rs
deleted file mode 100644 (file)
index 23cca55..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! An analysis to determine which temporaries require allocas and
-//! which do not.
-
-use rustc_data_structures::bitvec::BitVector;
-use rustc::mir::repr as mir;
-use rustc::mir::visit::{Visitor, LvalueContext};
-use trans::common::{self, Block};
-use super::rvalue;
-
-pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
-                               mir: &mir::Mir<'tcx>)
-                               -> BitVector {
-    let mut analyzer = TempAnalyzer::new(mir.temp_decls.len());
-
-    analyzer.visit_mir(mir);
-
-    for (index, temp_decl) in mir.temp_decls.iter().enumerate() {
-        let ty = bcx.monomorphize(&temp_decl.ty);
-        debug!("temp {:?} has type {:?}", index, ty);
-        if ty.is_scalar() ||
-            ty.is_unique() ||
-            ty.is_region_ptr() ||
-            ty.is_simd()
-        {
-            // These sorts of types are immediates that we can store
-            // in an ValueRef without an alloca.
-            assert!(common::type_is_immediate(bcx.ccx(), ty) ||
-                    common::type_is_fat_ptr(bcx.tcx(), ty));
-        } else {
-            // These sorts of types require an alloca. Note that
-            // type_is_immediate() may *still* be true, particularly
-            // for newtypes, but we currently force some types
-            // (e.g. structs) into an alloca unconditionally, just so
-            // that we don't have to deal with having two pathways
-            // (gep vs extractvalue etc).
-            analyzer.mark_as_lvalue(index);
-        }
-    }
-
-    analyzer.lvalue_temps
-}
-
-struct TempAnalyzer {
-    lvalue_temps: BitVector,
-    seen_assigned: BitVector
-}
-
-impl TempAnalyzer {
-    fn new(temp_count: usize) -> TempAnalyzer {
-        TempAnalyzer {
-            lvalue_temps: BitVector::new(temp_count),
-            seen_assigned: BitVector::new(temp_count)
-        }
-    }
-
-    fn mark_as_lvalue(&mut self, temp: usize) {
-        debug!("marking temp {} as lvalue", temp);
-        self.lvalue_temps.insert(temp);
-    }
-
-    fn mark_assigned(&mut self, temp: usize) {
-        if !self.seen_assigned.insert(temp) {
-            self.mark_as_lvalue(temp);
-        }
-    }
-}
-
-impl<'tcx> Visitor<'tcx> for TempAnalyzer {
-    fn visit_assign(&mut self,
-                    block: mir::BasicBlock,
-                    lvalue: &mir::Lvalue<'tcx>,
-                    rvalue: &mir::Rvalue<'tcx>) {
-        debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue);
-
-        match *lvalue {
-            mir::Lvalue::Temp(index) => {
-                self.mark_assigned(index as usize);
-                if !rvalue::rvalue_creates_operand(rvalue) {
-                    self.mark_as_lvalue(index as usize);
-                }
-            }
-            _ => {
-                self.visit_lvalue(lvalue, LvalueContext::Store);
-            }
-        }
-
-        self.visit_rvalue(rvalue);
-    }
-
-    fn visit_lvalue(&mut self,
-                    lvalue: &mir::Lvalue<'tcx>,
-                    context: LvalueContext) {
-        debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context);
-
-        match *lvalue {
-            mir::Lvalue::Temp(index) => {
-                match context {
-                    LvalueContext::Consume => {
-                    }
-                    LvalueContext::Store |
-                    LvalueContext::Drop |
-                    LvalueContext::Inspect |
-                    LvalueContext::Borrow { .. } |
-                    LvalueContext::Slice { .. } |
-                    LvalueContext::Projection => {
-                        self.mark_as_lvalue(index as usize);
-                    }
-                }
-            }
-            _ => {
-            }
-        }
-
-        self.super_lvalue(lvalue, context);
-    }
-}
diff --git a/src/librustc_trans/trans/mir/block.rs b/src/librustc_trans/trans/mir/block.rs
deleted file mode 100644 (file)
index 609f1de..0000000
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::{BasicBlockRef, ValueRef, OperandBundleDef};
-use rustc::middle::ty::{self, Ty};
-use rustc::mir::repr as mir;
-use syntax::abi::Abi;
-use trans::adt;
-use trans::attributes;
-use trans::base;
-use trans::build;
-use trans::common::{self, Block, BlockAndBuilder};
-use trans::debuginfo::DebugLoc;
-use trans::Disr;
-use trans::foreign;
-use trans::glue;
-use trans::type_of;
-use trans::type_::Type;
-
-use super::MirContext;
-use super::operand::OperandValue::{FatPtr, Immediate, Ref};
-use super::operand::OperandRef;
-
-#[derive(PartialEq, Eq)]
-enum AbiStyle {
-    Foreign,
-    RustCall,
-    Rust
-}
-
-impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
-    fn abi_style(&self, fn_ty: Ty<'tcx>) -> AbiStyle {
-        if let ty::TyBareFn(_, ref f) = fn_ty.sty {
-            // We do not translate intrinsics here (they shouldn’t be functions)
-            assert!(f.abi != Abi::RustIntrinsic && f.abi != Abi::PlatformIntrinsic);
-
-            match f.abi {
-                Abi::Rust => AbiStyle::Rust,
-                Abi::RustCall => AbiStyle::RustCall,
-                _ => AbiStyle::Foreign
-            }
-        } else {
-            unreachable!()
-        }
-    }
-
-    fn arg_operands(&mut self,
-                    bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                    abi_style: AbiStyle,
-                    args: &[mir::Operand<'tcx>])
-                    -> Vec<OperandRef<'tcx>>
-    {
-        match abi_style {
-            AbiStyle::Foreign | AbiStyle::Rust => {
-                args.iter().map(|arg| self.trans_operand(bcx, arg)).collect()
-            }
-            AbiStyle::RustCall => match args.split_last() {
-                None => vec![],
-                Some((tup, self_ty)) => {
-                    // we can reorder safely because of MIR
-                    let untupled_args = self.trans_operand_untupled(bcx, tup);
-                    self_ty
-                        .iter().map(|arg| self.trans_operand(bcx, arg))
-                        .chain(untupled_args.into_iter())
-                        .collect()
-                }
-            }
-        }
-    }
-
-    pub fn trans_block(&mut self, bb: mir::BasicBlock) {
-        debug!("trans_block({:?})", bb);
-
-        let mut bcx = self.bcx(bb);
-        let data = self.mir.basic_block_data(bb);
-
-        // MSVC SEH bits
-        let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) {
-            (Some(cp), Some(cb))
-        } else {
-            (None, None)
-        };
-        let funclet_br = |bcx: BlockAndBuilder, llbb: BasicBlockRef| if let Some(cp) = cleanup_pad {
-            bcx.cleanup_ret(cp, Some(llbb));
-        } else {
-            bcx.br(llbb);
-        };
-
-        for statement in &data.statements {
-            bcx = self.trans_statement(bcx, statement);
-        }
-
-        debug!("trans_block: terminator: {:?}", data.terminator());
-
-        match *data.terminator() {
-            mir::Terminator::Resume => {
-                if let Some(cleanup_pad) = cleanup_pad {
-                    bcx.cleanup_ret(cleanup_pad, None);
-                } else {
-                    let ps = self.get_personality_slot(&bcx);
-                    let lp = bcx.load(ps);
-                    bcx.with_block(|bcx| {
-                        base::call_lifetime_end(bcx, ps);
-                        base::trans_unwind_resume(bcx, lp);
-                    });
-                }
-            }
-
-            mir::Terminator::Goto { target } => {
-                funclet_br(bcx, self.llblock(target));
-            }
-
-            mir::Terminator::If { ref cond, targets: (true_bb, false_bb) } => {
-                let cond = self.trans_operand(&bcx, cond);
-                let lltrue = self.llblock(true_bb);
-                let llfalse = self.llblock(false_bb);
-                bcx.cond_br(cond.immediate(), lltrue, llfalse);
-            }
-
-            mir::Terminator::Switch { ref discr, ref adt_def, ref targets } => {
-                let discr_lvalue = self.trans_lvalue(&bcx, discr);
-                let ty = discr_lvalue.ty.to_ty(bcx.tcx());
-                let repr = adt::represent_type(bcx.ccx(), ty);
-                let discr = bcx.with_block(|bcx|
-                    adt::trans_get_discr(bcx, &repr, discr_lvalue.llval, None, true)
-                );
-
-                // The else branch of the Switch can't be hit, so branch to an unreachable
-                // instruction so LLVM knows that
-                let unreachable_blk = self.unreachable_block();
-                let switch = bcx.switch(discr, unreachable_blk.llbb, targets.len());
-                assert_eq!(adt_def.variants.len(), targets.len());
-                for (adt_variant, target) in adt_def.variants.iter().zip(targets) {
-                    let llval = bcx.with_block(|bcx|
-                        adt::trans_case(bcx, &repr, Disr::from(adt_variant.disr_val))
-                    );
-                    let llbb = self.llblock(*target);
-                    build::AddCase(switch, llval, llbb)
-                }
-            }
-
-            mir::Terminator::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
-                let (otherwise, targets) = targets.split_last().unwrap();
-                let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
-                let switch = bcx.switch(discr, self.llblock(*otherwise), values.len());
-                for (value, target) in values.iter().zip(targets) {
-                    let llval = self.trans_constval(&bcx, value, switch_ty).immediate();
-                    let llbb = self.llblock(*target);
-                    build::AddCase(switch, llval, llbb)
-                }
-            }
-
-            mir::Terminator::Return => {
-                let return_ty = bcx.monomorphize(&self.mir.return_ty);
-                bcx.with_block(|bcx| {
-                    base::build_return_block(self.fcx, bcx, return_ty, DebugLoc::None);
-                })
-            }
-
-            mir::Terminator::Drop { ref value, target, unwind } => {
-                let lvalue = self.trans_lvalue(&bcx, value);
-                let ty = lvalue.ty.to_ty(bcx.tcx());
-                // Double check for necessity to drop
-                if !glue::type_needs_drop(bcx.tcx(), ty) {
-                    funclet_br(bcx, self.llblock(target));
-                    return;
-                }
-                let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
-                let drop_ty = glue::get_drop_glue_type(bcx.ccx(), ty);
-                let llvalue = if drop_ty != ty {
-                    bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
-                } else {
-                    lvalue.llval
-                };
-                if let Some(unwind) = unwind {
-                    let uwbcx = self.bcx(unwind);
-                    let unwind = self.make_landing_pad(uwbcx);
-                    bcx.invoke(drop_fn,
-                               &[llvalue],
-                               self.llblock(target),
-                               unwind.llbb(),
-                               cleanup_bundle.as_ref(),
-                               None);
-                } else {
-                    bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref(), None);
-                    funclet_br(bcx, self.llblock(target));
-                }
-            }
-
-            mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => {
-                // Create the callee. This will always be a fn ptr and hence a kind of scalar.
-                let callee = self.trans_operand(&bcx, func);
-                let attrs = attributes::from_fn_type(bcx.ccx(), callee.ty);
-                let debugloc = DebugLoc::None;
-                // The arguments we'll be passing. Plus one to account for outptr, if used.
-                let mut llargs = Vec::with_capacity(args.len() + 1);
-                // Types of the arguments. We do not preallocate, because this vector is only
-                // filled when `is_foreign` is `true` and foreign calls are minority of the cases.
-                let mut arg_tys = Vec::new();
-
-                // Foreign-ABI functions are translated differently
-                let abi_style = self.abi_style(callee.ty);
-                let is_foreign = abi_style == AbiStyle::Foreign;
-
-                // Prepare the return value destination
-                let (ret_dest_ty, must_copy_dest) = if let Some((ref d, _)) = *destination {
-                    let dest = self.trans_lvalue(&bcx, d);
-                    let ret_ty = dest.ty.to_ty(bcx.tcx());
-                    if !is_foreign && type_of::return_uses_outptr(bcx.ccx(), ret_ty) {
-                        llargs.push(dest.llval);
-                        (Some((dest, ret_ty)), false)
-                    } else {
-                        (Some((dest, ret_ty)), !common::type_is_zero_size(bcx.ccx(), ret_ty))
-                    }
-                } else {
-                    (None, false)
-                };
-
-                // Process the rest of the args.
-                for operand in self.arg_operands(&bcx, abi_style, args) {
-                    match operand.val {
-                        Ref(llval) | Immediate(llval) => llargs.push(llval),
-                        FatPtr(b, e) => {
-                            llargs.push(b);
-                            llargs.push(e);
-                        }
-                    }
-                    if is_foreign {
-                        arg_tys.push(operand.ty);
-                    }
-                }
-
-                // Many different ways to call a function handled here
-                match (is_foreign, cleanup, destination) {
-                    // The two cases below are the only ones to use LLVM’s `invoke`.
-                    (false, &Some(cleanup), &None) => {
-                        let cleanup = self.bcx(cleanup);
-                        let landingpad = self.make_landing_pad(cleanup);
-                        let unreachable_blk = self.unreachable_block();
-                        bcx.invoke(callee.immediate(),
-                                   &llargs[..],
-                                   unreachable_blk.llbb,
-                                   landingpad.llbb(),
-                                   cleanup_bundle.as_ref(),
-                                   Some(attrs));
-                    },
-                    (false, &Some(cleanup), &Some((_, success))) => {
-                        let cleanup = self.bcx(cleanup);
-                        let landingpad = self.make_landing_pad(cleanup);
-                        let (target, postinvoke) = if must_copy_dest {
-                            (self.fcx.new_block("", None).build(), Some(self.bcx(success)))
-                        } else {
-                            (self.bcx(success), None)
-                        };
-                        let invokeret = bcx.invoke(callee.immediate(),
-                                                   &llargs[..],
-                                                   target.llbb(),
-                                                   landingpad.llbb(),
-                                                   cleanup_bundle.as_ref(),
-                                                   Some(attrs));
-                        if let Some(postinvoketarget) = postinvoke {
-                            // We translate the copy into a temporary block. The temporary block is
-                            // necessary because the current block has already been terminated (by
-                            // `invoke`) and we cannot really translate into the target block
-                            // because:
-                            //  * The target block may have more than a single precedesor;
-                            //  * Some LLVM insns cannot have a preceeding store insn (phi,
-                            //    cleanuppad), and adding/prepending the store now may render
-                            //    those other instructions invalid.
-                            //
-                            // NB: This approach still may break some LLVM code. For example if the
-                            // target block starts with a `phi` (which may only match on immediate
-                            // precedesors), it cannot know about this temporary block thus
-                            // resulting in an invalid code:
-                            //
-                            // this:
-                            //     …
-                            //     %0 = …
-                            //     %1 = invoke to label %temp …
-                            // temp:
-                            //     store ty %1, ty* %dest
-                            //     br label %actualtargetblock
-                            // actualtargetblock:            ; preds: %temp, …
-                            //     phi … [%this, …], [%0, …] ; ERROR: phi requires to match only on
-                            //                               ; immediate precedesors
-                            let (ret_dest, ret_ty) = ret_dest_ty
-                                .expect("return destination and type not set");
-                            target.with_block(|target| {
-                                base::store_ty(target, invokeret, ret_dest.llval, ret_ty);
-                            });
-                            target.br(postinvoketarget.llbb());
-                        }
-                    },
-                    (false, _, &None) => {
-                        bcx.call(callee.immediate(),
-                                 &llargs[..],
-                                 cleanup_bundle.as_ref(),
-                                 Some(attrs));
-                        bcx.unreachable();
-                    }
-                    (false, _, &Some((_, target))) => {
-                        let llret = bcx.call(callee.immediate(),
-                                             &llargs[..],
-                                             cleanup_bundle.as_ref(),
-                                             Some(attrs));
-                        if must_copy_dest {
-                            let (ret_dest, ret_ty) = ret_dest_ty
-                                .expect("return destination and type not set");
-                            bcx.with_block(|bcx| {
-                                base::store_ty(bcx, llret, ret_dest.llval, ret_ty);
-                            });
-                        }
-                        funclet_br(bcx, self.llblock(target));
-                    }
-                    // Foreign functions
-                    (true, _, destination) => {
-                        let (dest, _) = ret_dest_ty
-                            .expect("return destination is not set");
-                        bcx = bcx.map_block(|bcx| {
-                            foreign::trans_native_call(bcx,
-                                                       callee.ty,
-                                                       callee.immediate(),
-                                                       dest.llval,
-                                                       &llargs[..],
-                                                       arg_tys,
-                                                       debugloc)
-                        });
-                        if let Some((_, target)) = *destination {
-                            funclet_br(bcx, self.llblock(target));
-                        }
-                    },
-                }
-            }
-        }
-    }
-
-    fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef {
-        let ccx = bcx.ccx();
-        if let Some(slot) = self.llpersonalityslot {
-            slot
-        } else {
-            let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
-            bcx.with_block(|bcx| {
-                let slot = base::alloca(bcx, llretty, "personalityslot");
-                self.llpersonalityslot = Some(slot);
-                base::call_lifetime_start(bcx, slot);
-                slot
-            })
-        }
-    }
-
-    /// Create a landingpad wrapper around the given Block.
-    ///
-    /// No-op in MSVC SEH scheme.
-    fn make_landing_pad(&mut self,
-                        cleanup: BlockAndBuilder<'bcx, 'tcx>)
-                        -> BlockAndBuilder<'bcx, 'tcx>
-    {
-        if base::wants_msvc_seh(cleanup.sess()) {
-            return cleanup;
-        }
-        let bcx = self.fcx.new_block("cleanup", None).build();
-        let ccx = bcx.ccx();
-        let llpersonality = self.fcx.eh_personality();
-        let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
-        let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn);
-        bcx.set_cleanup(llretval);
-        let slot = self.get_personality_slot(&bcx);
-        bcx.store(llretval, slot);
-        bcx.br(cleanup.llbb());
-        bcx
-    }
-
-    /// Create prologue cleanuppad instruction under MSVC SEH handling scheme.
-    ///
-    /// Also handles setting some state for the original trans and creating an operand bundle for
-    /// function calls.
-    fn make_cleanup_pad(&mut self, bb: mir::BasicBlock) -> Option<(ValueRef, OperandBundleDef)> {
-        let bcx = self.bcx(bb);
-        let data = self.mir.basic_block_data(bb);
-        let use_funclets = base::wants_msvc_seh(bcx.sess()) && data.is_cleanup;
-        let cleanup_pad = if use_funclets {
-            bcx.set_personality_fn(self.fcx.eh_personality());
-            Some(bcx.cleanup_pad(None, &[]))
-        } else {
-            None
-        };
-        // Set the landingpad global-state for old translator, so it knows about the SEH used.
-        bcx.set_lpad(if let Some(cleanup_pad) = cleanup_pad {
-            Some(common::LandingPad::msvc(cleanup_pad))
-        } else if data.is_cleanup {
-            Some(common::LandingPad::gnu())
-        } else {
-            None
-        });
-        cleanup_pad.map(|f| (f, OperandBundleDef::new("funclet", &[f])))
-    }
-
-    fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> {
-        self.unreachable_block.unwrap_or_else(|| {
-            let bl = self.fcx.new_block("unreachable", None);
-            bl.build().unreachable();
-            self.unreachable_block = Some(bl);
-            bl
-        })
-    }
-
-    fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> {
-        self.blocks[bb.index()].build()
-    }
-
-    fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef {
-        self.blocks[bb.index()].llbb
-    }
-}
diff --git a/src/librustc_trans/trans/mir/constant.rs b/src/librustc_trans/trans/mir/constant.rs
deleted file mode 100644 (file)
index 7f03069..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use back::abi;
-use llvm::ValueRef;
-use middle::subst::Substs;
-use middle::ty::{Ty, TypeFoldable};
-use rustc::middle::const_eval::ConstVal;
-use rustc::mir::repr as mir;
-use trans::common::{self, BlockAndBuilder, C_bool, C_bytes, C_floating_f64, C_integral,
-                    C_str_slice};
-use trans::consts;
-use trans::expr;
-use trans::type_of;
-
-use super::operand::{OperandRef, OperandValue};
-use super::MirContext;
-
-
-impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
-    pub fn trans_constval(&mut self,
-                          bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                          cv: &ConstVal,
-                          ty: Ty<'tcx>)
-                          -> OperandRef<'tcx>
-    {
-        let ccx = bcx.ccx();
-        let val = self.trans_constval_inner(bcx, cv, ty, bcx.fcx().param_substs);
-        let val = if common::type_is_immediate(ccx, ty) {
-            OperandValue::Immediate(val)
-        } else if common::type_is_fat_ptr(bcx.tcx(), ty) {
-            let data = common::const_get_elt(ccx, val, &[abi::FAT_PTR_ADDR as u32]);
-            let extra = common::const_get_elt(ccx, val, &[abi::FAT_PTR_EXTRA as u32]);
-            OperandValue::FatPtr(data, extra)
-        } else {
-            OperandValue::Ref(val)
-        };
-
-        assert!(!ty.has_erasable_regions());
-
-        OperandRef {
-            ty: ty,
-            val: val
-        }
-    }
-
-    /// Translate ConstVal into a bare LLVM ValueRef.
-    fn trans_constval_inner(&mut self,
-                            bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                            cv: &ConstVal,
-                            ty: Ty<'tcx>,
-                            param_substs: &'tcx Substs<'tcx>)
-                            -> ValueRef
-    {
-        let ccx = bcx.ccx();
-        let llty = type_of::type_of(ccx, ty);
-        match *cv {
-            ConstVal::Float(v) => C_floating_f64(v, llty),
-            ConstVal::Bool(v) => C_bool(ccx, v),
-            ConstVal::Int(v) => C_integral(llty, v as u64, true),
-            ConstVal::Uint(v) => C_integral(llty, v, false),
-            ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()),
-            ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"),
-            ConstVal::Struct(id) | ConstVal::Tuple(id) |
-            ConstVal::Array(id, _) | ConstVal::Repeat(id, _) => {
-                let expr = bcx.tcx().map.expect_expr(id);
-                bcx.with_block(|bcx| {
-                    expr::trans(bcx, expr).datum.val
-                })
-            },
-            ConstVal::Function(did) =>
-                self.trans_fn_ref(bcx, ty, param_substs, did).immediate()
-        }
-    }
-
-    pub fn trans_constant(&mut self,
-                          bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                          constant: &mir::Constant<'tcx>)
-                          -> OperandRef<'tcx>
-    {
-        match constant.literal {
-            mir::Literal::Item { def_id, kind, substs } => {
-                let substs = bcx.tcx().mk_substs(bcx.monomorphize(&substs));
-                self.trans_item_ref(bcx, constant.ty, kind, substs, def_id)
-            }
-            mir::Literal::Value { ref value } => {
-                let ty = bcx.monomorphize(&constant.ty);
-                self.trans_constval(bcx, value, ty)
-            }
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/mir/did.rs b/src/librustc_trans/trans/mir/did.rs
deleted file mode 100644 (file)
index 36bbbce..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Code for translating references to other items (DefIds).
-
-use syntax::codemap::DUMMY_SP;
-use rustc::front::map;
-use rustc::middle::ty::{self, Ty, TypeFoldable};
-use rustc::middle::subst::Substs;
-use rustc::middle::const_eval;
-use rustc::middle::def_id::DefId;
-use rustc::middle::traits;
-use rustc::mir::repr::ItemKind;
-use trans::common::{BlockAndBuilder, fulfill_obligation};
-use trans::base;
-use trans::closure;
-use trans::expr;
-use trans::monomorphize;
-use trans::meth;
-use trans::inline;
-
-use super::MirContext;
-use super::operand::{OperandRef, OperandValue};
-
-impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
-    /// Translate reference to item.
-    pub fn trans_item_ref(&mut self,
-                          bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                          ty: Ty<'tcx>,
-                          kind: ItemKind,
-                          substs: &'tcx Substs<'tcx>,
-                          did: DefId)
-                          -> OperandRef<'tcx> {
-        debug!("trans_item_ref(ty={:?}, kind={:?}, substs={:?}, did={})",
-            ty, kind, substs, bcx.tcx().item_path_str(did));
-
-        match kind {
-            ItemKind::Function => self.trans_fn_ref(bcx, ty, substs, did),
-            ItemKind::Method => match bcx.tcx().impl_or_trait_item(did).container() {
-                ty::ImplContainer(_) => self.trans_fn_ref(bcx, ty, substs, did),
-                ty::TraitContainer(tdid) => self.trans_trait_method(bcx, ty, did, tdid, substs)
-            },
-            ItemKind::Constant => {
-                let did = inline::maybe_instantiate_inline(bcx.ccx(), did);
-                let expr = const_eval::lookup_const_by_id(bcx.tcx(), did, None, Some(substs))
-                            .expect("def was const, but lookup_const_by_id failed");
-                // FIXME: this is falling back to translating from HIR. This is not easy to fix,
-                // because we would have somehow adapt const_eval to work on MIR rather than HIR.
-                let d = bcx.with_block(|bcx| {
-                    expr::trans(bcx, expr)
-                });
-                OperandRef::from_rvalue_datum(d.datum.to_rvalue_datum(d.bcx, "").datum)
-            }
-        }
-    }
-
-    /// Translates references to a function-like items.
-    ///
-    /// That includes regular functions, non-static methods, struct and enum variant constructors,
-    /// closures and possibly more.
-    ///
-    /// This is an adaptation of callee::trans_fn_ref_with_substs.
-    pub fn trans_fn_ref(&mut self,
-                        bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                        ty: Ty<'tcx>,
-                        substs: &'tcx Substs<'tcx>,
-                        did: DefId)
-                        -> OperandRef<'tcx> {
-        debug!("trans_fn_ref(ty={:?}, substs={:?}, did={})",
-            ty, substs, bcx.tcx().item_path_str(did));
-
-        let did = inline::maybe_instantiate_inline(bcx.ccx(), did);
-
-        if !substs.types.is_empty() || is_named_tuple_constructor(bcx.tcx(), did) {
-            let (val, fn_ty, _) = monomorphize::monomorphic_fn(bcx.ccx(), did, substs, None);
-            // FIXME: cast fnptr to proper type if necessary
-            OperandRef {
-                ty: fn_ty,
-                val: OperandValue::Immediate(val)
-            }
-        } else {
-            let val = if let Some(node_id) = bcx.tcx().map.as_local_node_id(did) {
-                base::get_item_val(bcx.ccx(), node_id)
-            } else {
-                base::trans_external_path(bcx.ccx(), did, ty)
-            };
-            // FIXME: cast fnptr to proper type if necessary
-            OperandRef {
-                ty: ty,
-                val: OperandValue::Immediate(val)
-            }
-        }
-    }
-
-    /// Translates references to trait methods.
-    ///
-    /// This is an adaptation of meth::trans_static_method_callee
-    pub fn trans_trait_method(&mut self,
-                              bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                              ty: Ty<'tcx>,
-                              method_id: DefId,
-                              trait_id: DefId,
-                              substs: &'tcx Substs<'tcx>)
-                              -> OperandRef<'tcx> {
-        debug!("trans_static_method(ty={:?}, method={}, trait={}, substs={:?})",
-                ty,
-                bcx.tcx().item_path_str(method_id),
-                bcx.tcx().item_path_str(trait_id),
-                substs);
-
-        let ccx = bcx.ccx();
-        let tcx = bcx.tcx();
-        let trait_ref = ty::Binder(substs.to_trait_ref(tcx, trait_id));
-        let vtbl = fulfill_obligation(ccx, DUMMY_SP, trait_ref);
-        match vtbl {
-            traits::VtableImpl(traits::VtableImplData {
-                impl_def_id, substs: impl_substs, ..
-            }) => {
-                assert!(!impl_substs.types.needs_infer());
-
-                let mname = tcx.item_name(method_id);
-
-                let callee_substs = impl_substs.with_method_from(substs);
-                let mth = tcx.get_impl_method(impl_def_id, callee_substs, mname);
-                let mth_substs = tcx.mk_substs(mth.substs);
-                self.trans_fn_ref(bcx, ty, mth_substs, mth.method.def_id)
-            },
-            traits::VtableClosure(data) => {
-                let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap();
-                let llfn = closure::trans_closure_method(bcx.ccx(),
-                                                         data.closure_def_id,
-                                                         data.substs,
-                                                         trait_closure_kind);
-                OperandRef {
-                    ty: ty,
-                    val: OperandValue::Immediate(llfn)
-                }
-            },
-            traits::VtableObject(ref data) => {
-                let idx = traits::get_vtable_index_of_object_method(tcx, data, method_id);
-                OperandRef::from_rvalue_datum(
-                    meth::trans_object_shim(ccx, data.upcast_trait_ref.clone(), method_id, idx)
-                )
-            }
-            _ => {
-                tcx.sess.bug(&format!("static call to invalid vtable: {:?}", vtbl));
-            }
-        }
-   }
-}
-
-fn is_named_tuple_constructor(tcx: &ty::ctxt, def_id: DefId) -> bool {
-    let node_id = match tcx.map.as_local_node_id(def_id) {
-        Some(n) => n,
-        None => { return false; }
-    };
-    match tcx.map.find(node_id).expect("local item should be in ast map") {
-        map::NodeVariant(v) => {
-            v.node.data.is_tuple()
-        }
-        map::NodeStructCtor(_) => true,
-        _ => false
-    }
-}
diff --git a/src/librustc_trans/trans/mir/lvalue.rs b/src/librustc_trans/trans/mir/lvalue.rs
deleted file mode 100644 (file)
index 826fb02..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::ValueRef;
-use rustc::middle::ty::{self, Ty, TypeFoldable};
-use rustc::mir::repr as mir;
-use rustc::mir::tcx::LvalueTy;
-use trans::adt;
-use trans::base;
-use trans::common::{self, BlockAndBuilder};
-use trans::machine;
-use trans::type_of;
-use llvm;
-use trans::Disr;
-
-use std::ptr;
-
-use super::{MirContext, TempRef};
-
-#[derive(Copy, Clone)]
-pub struct LvalueRef<'tcx> {
-    /// Pointer to the contents of the lvalue
-    pub llval: ValueRef,
-
-    /// This lvalue's extra data if it is unsized, or null
-    pub llextra: ValueRef,
-
-    /// Monomorphized type of this lvalue, including variant information
-    pub ty: LvalueTy<'tcx>,
-}
-
-impl<'tcx> LvalueRef<'tcx> {
-    pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> {
-        LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
-    }
-
-    pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                        ty: Ty<'tcx>,
-                        name: &str)
-                        -> LvalueRef<'tcx>
-    {
-        assert!(!ty.has_erasable_regions());
-        let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name));
-        LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
-    }
-}
-
-impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
-    pub fn lvalue_len(&mut self,
-                      bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                      lvalue: LvalueRef<'tcx>)
-                      -> ValueRef {
-        match lvalue.ty.to_ty(bcx.tcx()).sty {
-            ty::TyArray(_, n) => common::C_uint(bcx.ccx(), n),
-            ty::TySlice(_) | ty::TyStr => {
-                assert!(lvalue.llextra != ptr::null_mut());
-                lvalue.llextra
-            }
-            _ => bcx.sess().bug("unexpected type in lvalue_len"),
-        }
-    }
-
-    pub fn trans_lvalue(&mut self,
-                        bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                        lvalue: &mir::Lvalue<'tcx>)
-                        -> LvalueRef<'tcx> {
-        debug!("trans_lvalue(lvalue={:?})", lvalue);
-
-        let fcx = bcx.fcx();
-        let ccx = bcx.ccx();
-        let tcx = bcx.tcx();
-        match *lvalue {
-            mir::Lvalue::Var(index) => self.vars[index as usize],
-            mir::Lvalue::Temp(index) => match self.temps[index as usize] {
-                TempRef::Lvalue(lvalue) =>
-                    lvalue,
-                TempRef::Operand(..) =>
-                    tcx.sess.bug(&format!("using operand temp {:?} as lvalue", lvalue)),
-            },
-            mir::Lvalue::Arg(index) => self.args[index as usize],
-            mir::Lvalue::Static(def_id) => {
-                let const_ty = self.mir.lvalue_ty(tcx, lvalue);
-                LvalueRef::new_sized(
-                    common::get_static_val(ccx, def_id, const_ty.to_ty(tcx)),
-                    const_ty)
-            },
-            mir::Lvalue::ReturnPointer => {
-                let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
-                let return_ty = fn_return_ty.unwrap();
-                let llval = if !common::return_type_is_void(bcx.ccx(), return_ty) {
-                    bcx.with_block(|bcx| {
-                        fcx.get_ret_slot(bcx, fn_return_ty, "")
-                    })
-                } else {
-                    // This is a void return; that is, there’s no place to store the value and
-                    // there cannot really be one (or storing into it doesn’t make sense, anyway).
-                    // Ergo, we return an undef ValueRef, so we do not have to special-case every
-                    // place using lvalues, and could use it the same way you use a regular
-                    // ReturnPointer LValue (i.e. store into it, load from it etc).
-                    let llty = type_of::type_of(bcx.ccx(), return_ty).ptr_to();
-                    unsafe {
-                        llvm::LLVMGetUndef(llty.to_ref())
-                    }
-                };
-                LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty))
-            },
-            mir::Lvalue::Projection(ref projection) => {
-                let tr_base = self.trans_lvalue(bcx, &projection.base);
-                let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
-                let (llprojected, llextra) = match projection.elem {
-                    mir::ProjectionElem::Deref => {
-                        let base_ty = tr_base.ty.to_ty(tcx);
-                        bcx.with_block(|bcx| {
-                            if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
-                                (base::load_ty(bcx, tr_base.llval, base_ty),
-                                 ptr::null_mut())
-                            } else {
-                                base::load_fat_ptr(bcx, tr_base.llval, base_ty)
-                            }
-                        })
-                    }
-                    mir::ProjectionElem::Field(ref field, _) => {
-                        let base_ty = tr_base.ty.to_ty(tcx);
-                        let base_repr = adt::represent_type(ccx, base_ty);
-                        let discr = match tr_base.ty {
-                            LvalueTy::Ty { .. } => 0,
-                            LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v,
-                        };
-                        let discr = discr as u64;
-                        let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx));
-                        let base = if is_sized {
-                            adt::MaybeSizedValue::sized(tr_base.llval)
-                        } else {
-                            adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra)
-                        };
-                        let llprojected = bcx.with_block(|bcx| {
-                            adt::trans_field_ptr(bcx, &base_repr, base, Disr(discr), field.index())
-                        });
-                        let llextra = if is_sized {
-                            ptr::null_mut()
-                        } else {
-                            tr_base.llextra
-                        };
-                        (llprojected, llextra)
-                    }
-                    mir::ProjectionElem::Index(ref index) => {
-                        let index = self.trans_operand(bcx, index);
-                        let llindex = self.prepare_index(bcx, index.immediate());
-                        let zero = common::C_uint(bcx.ccx(), 0u64);
-                        (bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
-                         ptr::null_mut())
-                    }
-                    mir::ProjectionElem::ConstantIndex { offset,
-                                                         from_end: false,
-                                                         min_length: _ } => {
-                        let lloffset = common::C_u32(bcx.ccx(), offset);
-                        let llindex = self.prepare_index(bcx, lloffset);
-                        let zero = common::C_uint(bcx.ccx(), 0u64);
-                        (bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
-                         ptr::null_mut())
-                    }
-                    mir::ProjectionElem::ConstantIndex { offset,
-                                                         from_end: true,
-                                                         min_length: _ } => {
-                        let lloffset = common::C_u32(bcx.ccx(), offset);
-                        let lllen = self.lvalue_len(bcx, tr_base);
-                        let llindex = bcx.sub(lllen, lloffset);
-                        let llindex = self.prepare_index(bcx, llindex);
-                        let zero = common::C_uint(bcx.ccx(), 0u64);
-                        (bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
-                         ptr::null_mut())
-                    }
-                    mir::ProjectionElem::Downcast(..) => {
-                        (tr_base.llval, tr_base.llextra)
-                    }
-                };
-                LvalueRef {
-                    llval: llprojected,
-                    llextra: llextra,
-                    ty: projected_ty,
-                }
-            }
-        }
-    }
-
-    /// Adjust the bitwidth of an index since LLVM is less forgiving
-    /// than we are.
-    ///
-    /// nmatsakis: is this still necessary? Not sure.
-    fn prepare_index(&mut self,
-                     bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                     llindex: ValueRef)
-                     -> ValueRef
-    {
-        let ccx = bcx.ccx();
-        let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex));
-        let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type());
-        if index_size < int_size {
-            bcx.zext(llindex, ccx.int_type())
-        } else if index_size > int_size {
-            bcx.trunc(llindex, ccx.int_type())
-        } else {
-            llindex
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/mir/mod.rs b/src/librustc_trans/trans/mir/mod.rs
deleted file mode 100644 (file)
index 972340e..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use libc::c_uint;
-use llvm::{self, ValueRef};
-use rustc::mir::repr as mir;
-use rustc::mir::tcx::LvalueTy;
-use trans::base;
-use trans::common::{self, Block, BlockAndBuilder};
-use trans::expr;
-use trans::type_of;
-
-use self::lvalue::LvalueRef;
-use self::operand::OperandRef;
-
-// FIXME DebugLoc is always None right now
-
-/// Master context for translating MIR.
-pub struct MirContext<'bcx, 'tcx:'bcx> {
-    mir: &'bcx mir::Mir<'tcx>,
-
-    /// Function context
-    fcx: &'bcx common::FunctionContext<'bcx, 'tcx>,
-
-    /// When unwinding is initiated, we have to store this personality
-    /// value somewhere so that we can load it and re-use it in the
-    /// resume instruction. The personality is (afaik) some kind of
-    /// value used for C++ unwinding, which must filter by type: we
-    /// don't really care about it very much. Anyway, this value
-    /// contains an alloca into which the personality is stored and
-    /// then later loaded when generating the DIVERGE_BLOCK.
-    llpersonalityslot: Option<ValueRef>,
-
-    /// A `Block` for each MIR `BasicBlock`
-    blocks: Vec<Block<'bcx, 'tcx>>,
-
-    /// Cached unreachable block
-    unreachable_block: Option<Block<'bcx, 'tcx>>,
-
-    /// An LLVM alloca for each MIR `VarDecl`
-    vars: Vec<LvalueRef<'tcx>>,
-
-    /// The location where each MIR `TempDecl` is stored. This is
-    /// usually an `LvalueRef` representing an alloca, but not always:
-    /// sometimes we can skip the alloca and just store the value
-    /// directly using an `OperandRef`, which makes for tighter LLVM
-    /// IR. The conditions for using an `OperandRef` are as follows:
-    ///
-    /// - the type of the temporary must be judged "immediate" by `type_is_immediate`
-    /// - the operand must never be referenced indirectly
-    ///     - we should not take its address using the `&` operator
-    ///     - nor should it appear in an lvalue path like `tmp.a`
-    /// - the operand must be defined by an rvalue that can generate immediate
-    ///   values
-    ///
-    /// Avoiding allocs can also be important for certain intrinsics,
-    /// notably `expect`.
-    temps: Vec<TempRef<'tcx>>,
-
-    /// The arguments to the function; as args are lvalues, these are
-    /// always indirect, though we try to avoid creating an alloca
-    /// when we can (and just reuse the pointer the caller provided).
-    args: Vec<LvalueRef<'tcx>>,
-}
-
-enum TempRef<'tcx> {
-    Lvalue(LvalueRef<'tcx>),
-    Operand(Option<OperandRef<'tcx>>),
-}
-
-///////////////////////////////////////////////////////////////////////////
-
-pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) {
-    let fcx = bcx.fcx();
-    let mir = bcx.mir();
-
-    let mir_blocks = bcx.mir().all_basic_blocks();
-
-    // Analyze the temps to determine which must be lvalues
-    // FIXME
-    let lvalue_temps = bcx.with_block(|bcx| {
-      analyze::lvalue_temps(bcx, mir)
-    });
-
-    // Allocate variable and temp allocas
-    let vars = mir.var_decls.iter()
-                            .map(|decl| (bcx.monomorphize(&decl.ty), decl.name))
-                            .map(|(mty, name)| LvalueRef::alloca(&bcx, mty, &name.as_str()))
-                            .collect();
-    let temps = mir.temp_decls.iter()
-                              .map(|decl| bcx.monomorphize(&decl.ty))
-                              .enumerate()
-                              .map(|(i, mty)| if lvalue_temps.contains(i) {
-                                  TempRef::Lvalue(LvalueRef::alloca(&bcx,
-                                                                    mty,
-                                                                    &format!("temp{:?}", i)))
-                              } else {
-                                  // If this is an immediate temp, we do not create an
-                                  // alloca in advance. Instead we wait until we see the
-                                  // definition and update the operand there.
-                                  TempRef::Operand(None)
-                              })
-                              .collect();
-    let args = arg_value_refs(&bcx, mir);
-
-    // Allocate a `Block` for every basic block
-    let block_bcxs: Vec<Block<'bcx,'tcx>> =
-        mir_blocks.iter()
-                  .map(|&bb|{
-                      // FIXME(#30941) this doesn't handle msvc-style exceptions
-                      fcx.new_block(&format!("{:?}", bb), None)
-                  })
-                  .collect();
-
-    // Branch to the START block
-    let start_bcx = block_bcxs[mir::START_BLOCK.index()];
-    bcx.br(start_bcx.llbb);
-
-    let mut mircx = MirContext {
-        mir: mir,
-        fcx: fcx,
-        llpersonalityslot: None,
-        blocks: block_bcxs,
-        unreachable_block: None,
-        vars: vars,
-        temps: temps,
-        args: args,
-    };
-
-    // Translate the body of each block
-    for &bb in &mir_blocks {
-        mircx.trans_block(bb);
-    }
-}
-
-/// Produce, for each argument, a `ValueRef` pointing at the
-/// argument's value. As arguments are lvalues, these are always
-/// indirect.
-fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                              mir: &mir::Mir<'tcx>)
-                              -> Vec<LvalueRef<'tcx>> {
-    // FIXME tupled_args? I think I'd rather that mapping is done in MIR land though
-    let fcx = bcx.fcx();
-    let tcx = bcx.tcx();
-    let mut idx = fcx.arg_offset() as c_uint;
-    mir.arg_decls
-       .iter()
-       .enumerate()
-       .map(|(arg_index, arg_decl)| {
-           let arg_ty = bcx.monomorphize(&arg_decl.ty);
-           let llval = if type_of::arg_is_indirect(bcx.ccx(), arg_ty) {
-               // Don't copy an indirect argument to an alloca, the caller
-               // already put it in a temporary alloca and gave it up, unless
-               // we emit extra-debug-info, which requires local allocas :(.
-               // FIXME: lifetimes, debug info
-               let llarg = llvm::get_param(fcx.llfn, idx);
-               idx += 1;
-               llarg
-           } else if common::type_is_fat_ptr(tcx, arg_ty) {
-               // we pass fat pointers as two words, but we want to
-               // represent them internally as a pointer to two words,
-               // so make an alloca to store them in.
-               let lldata = llvm::get_param(fcx.llfn, idx);
-               let llextra = llvm::get_param(fcx.llfn, idx + 1);
-               idx += 2;
-               let (lltemp, dataptr, meta) = bcx.with_block(|bcx| {
-                   let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index));
-                   (lltemp, expr::get_dataptr(bcx, lltemp), expr::get_meta(bcx, lltemp))
-               });
-               bcx.store(lldata, dataptr);
-               bcx.store(llextra, meta);
-               lltemp
-           } else {
-               // otherwise, arg is passed by value, so make a
-               // temporary and store it there
-               let llarg = llvm::get_param(fcx.llfn, idx);
-               idx += 1;
-               bcx.with_block(|bcx| {
-                   let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index));
-                   base::store_ty(bcx, llarg, lltemp, arg_ty);
-                   lltemp
-               })
-           };
-           LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))
-       })
-       .collect()
-}
-
-mod analyze;
-mod block;
-mod constant;
-mod did;
-mod lvalue;
-mod operand;
-mod rvalue;
-mod statement;
diff --git a/src/librustc_trans/trans/mir/operand.rs b/src/librustc_trans/trans/mir/operand.rs
deleted file mode 100644 (file)
index 2183348..0000000
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::ValueRef;
-use rustc::middle::ty::{self, Ty};
-use rustc::mir::repr as mir;
-use trans::adt;
-use trans::base;
-use trans::common::{self, Block, BlockAndBuilder};
-use trans::datum;
-use trans::Disr;
-
-use super::{MirContext, TempRef};
-use super::lvalue::LvalueRef;
-
-/// The representation of a Rust value. The enum variant is in fact
-/// uniquely determined by the value's type, but is kept as a
-/// safety check.
-#[derive(Copy, Clone)]
-pub enum OperandValue {
-    /// A reference to the actual operand. The data is guaranteed
-    /// to be valid for the operand's lifetime.
-    Ref(ValueRef),
-    /// A single LLVM value.
-    Immediate(ValueRef),
-    /// A fat pointer. The first ValueRef is the data and the second
-    /// is the extra.
-    FatPtr(ValueRef, ValueRef)
-}
-
-/// An `OperandRef` is an "SSA" reference to a Rust value, along with
-/// its type.
-///
-/// NOTE: unless you know a value's type exactly, you should not
-/// generate LLVM opcodes acting on it and instead act via methods,
-/// to avoid nasty edge cases. In particular, using `Builder.store`
-/// directly is sure to cause problems -- use `MirContext.store_operand`
-/// instead.
-#[derive(Copy, Clone)]
-pub struct OperandRef<'tcx> {
-    // The value.
-    pub val: OperandValue,
-
-    // The type of value being returned.
-    pub ty: Ty<'tcx>
-}
-
-impl<'tcx> OperandRef<'tcx> {
-    /// Asserts that this operand refers to a scalar and returns
-    /// a reference to its value.
-    pub fn immediate(self) -> ValueRef {
-        match self.val {
-            OperandValue::Immediate(s) => s,
-            _ => unreachable!()
-        }
-    }
-
-    pub fn repr<'bcx>(self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> String {
-        match self.val {
-            OperandValue::Ref(r) => {
-                format!("OperandRef(Ref({}) @ {:?})",
-                        bcx.val_to_string(r), self.ty)
-            }
-            OperandValue::Immediate(i) => {
-                format!("OperandRef(Immediate({}) @ {:?})",
-                        bcx.val_to_string(i), self.ty)
-            }
-            OperandValue::FatPtr(a, d) => {
-                format!("OperandRef(FatPtr({}, {}) @ {:?})",
-                        bcx.val_to_string(a),
-                        bcx.val_to_string(d),
-                        self.ty)
-            }
-        }
-    }
-
-    pub fn from_rvalue_datum(datum: datum::Datum<'tcx, datum::Rvalue>) -> OperandRef {
-        OperandRef {
-            ty: datum.ty,
-            val: match datum.kind.mode {
-                datum::RvalueMode::ByRef => OperandValue::Ref(datum.val),
-                datum::RvalueMode::ByValue => OperandValue::Immediate(datum.val),
-            }
-        }
-    }
-}
-
-impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
-    pub fn trans_load(&mut self,
-                      bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                      llval: ValueRef,
-                      ty: Ty<'tcx>)
-                      -> OperandRef<'tcx>
-    {
-        debug!("trans_load: {} @ {:?}", bcx.val_to_string(llval), ty);
-
-        let val = match datum::appropriate_rvalue_mode(bcx.ccx(), ty) {
-            datum::ByValue => {
-                bcx.with_block(|bcx| {
-                    OperandValue::Immediate(base::load_ty(bcx, llval, ty))
-                })
-            }
-            datum::ByRef if common::type_is_fat_ptr(bcx.tcx(), ty) => {
-                let (lldata, llextra) = bcx.with_block(|bcx| {
-                    base::load_fat_ptr(bcx, llval, ty)
-                });
-                OperandValue::FatPtr(lldata, llextra)
-            }
-            datum::ByRef => OperandValue::Ref(llval)
-        };
-
-        OperandRef { val: val, ty: ty }
-    }
-
-    pub fn trans_operand(&mut self,
-                         bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                         operand: &mir::Operand<'tcx>)
-                         -> OperandRef<'tcx>
-    {
-        debug!("trans_operand(operand={:?})", operand);
-
-        match *operand {
-            mir::Operand::Consume(ref lvalue) => {
-                // watch out for temporaries that do not have an
-                // alloca; they are handled somewhat differently
-                if let &mir::Lvalue::Temp(index) = lvalue {
-                    match self.temps[index as usize] {
-                        TempRef::Operand(Some(o)) => {
-                            return o;
-                        }
-                        TempRef::Operand(None) => {
-                            bcx.tcx().sess.bug(
-                                &format!("use of {:?} before def", lvalue));
-                        }
-                        TempRef::Lvalue(..) => {
-                            // use path below
-                        }
-                    }
-                }
-
-                // for most lvalues, to consume them we just load them
-                // out from their home
-                let tr_lvalue = self.trans_lvalue(bcx, lvalue);
-                let ty = tr_lvalue.ty.to_ty(bcx.tcx());
-                self.trans_load(bcx, tr_lvalue.llval, ty)
-            }
-
-            mir::Operand::Constant(ref constant) => {
-                self.trans_constant(bcx, constant)
-            }
-        }
-    }
-
-    pub fn trans_operand_into(&mut self,
-                              bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                              lldest: ValueRef,
-                              operand: &mir::Operand<'tcx>)
-    {
-        debug!("trans_operand_into(lldest={}, operand={:?})",
-               bcx.val_to_string(lldest),
-               operand);
-
-        // FIXME: consider not copying constants through the
-        // stack.
-
-        let o = self.trans_operand(bcx, operand);
-        self.store_operand(bcx, lldest, o);
-    }
-
-    pub fn store_operand(&mut self,
-                         bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                         lldest: ValueRef,
-                         operand: OperandRef<'tcx>)
-    {
-        debug!("store_operand: operand={}", operand.repr(bcx));
-        bcx.with_block(|bcx| {
-            self.store_operand_direct(bcx, lldest, operand)
-        })
-    }
-
-    pub fn store_operand_direct(&mut self,
-                                bcx: Block<'bcx, 'tcx>,
-                                lldest: ValueRef,
-                                operand: OperandRef<'tcx>)
-    {
-        // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
-        // value is through `undef`, and store itself is useless.
-        if common::type_is_zero_size(bcx.ccx(), operand.ty) {
-            return;
-        }
-        match operand.val {
-            OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty),
-            OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty),
-            OperandValue::FatPtr(data, extra) => {
-                base::store_fat_ptr(bcx, data, extra, lldest, operand.ty);
-            }
-        }
-    }
-
-    pub fn trans_operand_untupled(&mut self,
-                                  bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                                  operand: &mir::Operand<'tcx>)
-                                  -> Vec<OperandRef<'tcx>>
-    {
-        // FIXME: consider having some optimization to avoid tupling/untupling
-        // (and storing/loading in the case of immediates)
-
-        // avoid trans_operand for pointless copying
-        let lv = match *operand {
-            mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue),
-            mir::Operand::Constant(ref constant) => {
-                // FIXME: consider being less pessimized
-                if constant.ty.is_nil() {
-                    return vec![];
-                }
-
-                let ty = bcx.monomorphize(&constant.ty);
-                let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca");
-                let constant = self.trans_constant(bcx, constant);
-                self.store_operand(bcx, lv.llval, constant);
-                lv
-           }
-        };
-
-        let lv_ty = lv.ty.to_ty(bcx.tcx());
-        let result_types = match lv_ty.sty {
-            ty::TyTuple(ref tys) => tys,
-            _ => bcx.tcx().sess.span_bug(
-                self.mir.span,
-                &format!("bad final argument to \"rust-call\" fn {:?}", lv_ty))
-        };
-
-        let base_repr = adt::represent_type(bcx.ccx(), lv_ty);
-        let base = adt::MaybeSizedValue::sized(lv.llval);
-        result_types.iter().enumerate().map(|(n, &ty)| {
-            self.trans_load(bcx, bcx.with_block(|bcx| {
-                adt::trans_field_ptr(bcx, &base_repr, base, Disr(0), n)
-            }), ty)
-        }).collect()
-    }
-}
diff --git a/src/librustc_trans/trans/mir/rvalue.rs b/src/librustc_trans/trans/mir/rvalue.rs
deleted file mode 100644 (file)
index 2468601..0000000
+++ /dev/null
@@ -1,562 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::ValueRef;
-use rustc::middle::ty::{self, Ty};
-use middle::ty::cast::{CastTy, IntTy};
-use rustc::mir::repr as mir;
-
-use trans::asm;
-use trans::base;
-use trans::common::{self, BlockAndBuilder, Result};
-use trans::debuginfo::DebugLoc;
-use trans::declare;
-use trans::expr;
-use trans::adt;
-use trans::machine;
-use trans::type_::Type;
-use trans::type_of;
-use trans::tvec;
-use trans::Disr;
-
-use super::MirContext;
-use super::operand::{OperandRef, OperandValue};
-use super::lvalue::LvalueRef;
-
-impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
-    pub fn trans_rvalue(&mut self,
-                        bcx: BlockAndBuilder<'bcx, 'tcx>,
-                        dest: LvalueRef<'tcx>,
-                        rvalue: &mir::Rvalue<'tcx>)
-                        -> BlockAndBuilder<'bcx, 'tcx>
-    {
-        debug!("trans_rvalue(dest.llval={}, rvalue={:?})",
-               bcx.val_to_string(dest.llval),
-               rvalue);
-
-        match *rvalue {
-            mir::Rvalue::Use(ref operand) => {
-                self.trans_operand_into(&bcx, dest.llval, operand);
-                bcx
-            }
-
-            mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, cast_ty) => {
-                if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
-                    // into-coerce of a thin pointer to a fat pointer - just
-                    // use the operand path.
-                    let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
-                    self.store_operand(&bcx, dest.llval, temp);
-                    return bcx;
-                }
-
-                // Unsize of a nontrivial struct. I would prefer for
-                // this to be eliminated by MIR translation, but
-                // `CoerceUnsized` can be passed by a where-clause,
-                // so the (generic) MIR may not be able to expand it.
-                let operand = self.trans_operand(&bcx, operand);
-                bcx.with_block(|bcx| {
-                    match operand.val {
-                        OperandValue::FatPtr(..) => unreachable!(),
-                        OperandValue::Immediate(llval) => {
-                            // unsize from an immediate structure. We don't
-                            // really need a temporary alloca here, but
-                            // avoiding it would require us to have
-                            // `coerce_unsized_into` use extractvalue to
-                            // index into the struct, and this case isn't
-                            // important enough for it.
-                            debug!("trans_rvalue: creating ugly alloca");
-                            let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
-                            base::store_ty(bcx, llval, lltemp, operand.ty);
-                            base::coerce_unsized_into(bcx,
-                                                      lltemp, operand.ty,
-                                                      dest.llval, cast_ty);
-                        }
-                        OperandValue::Ref(llref) => {
-                            base::coerce_unsized_into(bcx,
-                                                      llref, operand.ty,
-                                                      dest.llval, cast_ty);
-                        }
-                    }
-                });
-                bcx
-            }
-
-            mir::Rvalue::Repeat(ref elem, ref count) => {
-                let elem = self.trans_operand(&bcx, elem);
-                let size = self.trans_constval(&bcx, &count.value, count.ty).immediate();
-                bcx.map_block(|block| {
-                    let base = expr::get_dataptr(block, dest.llval);
-                    tvec::iter_vec_raw(block, base, elem.ty, size, |block, llslot, _| {
-                        self.store_operand_direct(block, llslot, elem);
-                        block
-                    })
-                })
-            }
-
-            mir::Rvalue::Aggregate(ref kind, ref operands) => {
-                match *kind {
-                    mir::AggregateKind::Adt(adt_def, index, _) => {
-                        let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
-                        let disr = Disr::from(adt_def.variants[index].disr_val);
-                        bcx.with_block(|bcx| {
-                            adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
-                        });
-                        for (i, operand) in operands.iter().enumerate() {
-                            let op = self.trans_operand(&bcx, operand);
-                            // Do not generate stores and GEPis for zero-sized fields.
-                            if !common::type_is_zero_size(bcx.ccx(), op.ty) {
-                                let val = adt::MaybeSizedValue::sized(dest.llval);
-                                let lldest_i = bcx.with_block(|bcx| {
-                                    adt::trans_field_ptr(bcx, &repr, val, disr, i)
-                                });
-                                self.store_operand(&bcx, lldest_i, op);
-                            }
-                        }
-                    },
-                    _ => {
-                        for (i, operand) in operands.iter().enumerate() {
-                            let op = self.trans_operand(&bcx, operand);
-                            // Do not generate stores and GEPis for zero-sized fields.
-                            if !common::type_is_zero_size(bcx.ccx(), op.ty) {
-                                // Note: perhaps this should be StructGep, but
-                                // note that in some cases the values here will
-                                // not be structs but arrays.
-                                let dest = bcx.gepi(dest.llval, &[0, i]);
-                                self.store_operand(&bcx, dest, op);
-                            }
-                        }
-                    }
-                }
-                bcx
-            }
-
-            mir::Rvalue::Slice { ref input, from_start, from_end } => {
-                let ccx = bcx.ccx();
-                let input = self.trans_lvalue(&bcx, input);
-                let (llbase, lllen) = bcx.with_block(|bcx| {
-                    tvec::get_base_and_len(bcx,
-                                           input.llval,
-                                           input.ty.to_ty(bcx.tcx()))
-                });
-                let llbase1 = bcx.gepi(llbase, &[from_start]);
-                let adj = common::C_uint(ccx, from_start + from_end);
-                let lllen1 = bcx.sub(lllen, adj);
-                let (lladdrdest, llmetadest) = bcx.with_block(|bcx| {
-                    (expr::get_dataptr(bcx, dest.llval), expr::get_meta(bcx, dest.llval))
-                });
-                bcx.store(llbase1, lladdrdest);
-                bcx.store(lllen1, llmetadest);
-                bcx
-            }
-
-            mir::Rvalue::InlineAsm(ref inline_asm) => {
-                bcx.map_block(|bcx| {
-                    asm::trans_inline_asm(bcx, inline_asm)
-                })
-            }
-
-            _ => {
-                assert!(rvalue_creates_operand(rvalue));
-                let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
-                self.store_operand(&bcx, dest.llval, temp);
-                bcx
-            }
-        }
-    }
-
-    pub fn trans_rvalue_operand(&mut self,
-                                bcx: BlockAndBuilder<'bcx, 'tcx>,
-                                rvalue: &mir::Rvalue<'tcx>)
-                                -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
-    {
-        assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
-
-        match *rvalue {
-            mir::Rvalue::Use(ref operand) => {
-                let operand = self.trans_operand(&bcx, operand);
-                (bcx, operand)
-            }
-
-            mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => {
-                let operand = self.trans_operand(&bcx, operand);
-                debug!("cast operand is {}", operand.repr(&bcx));
-                let cast_ty = bcx.monomorphize(&cast_ty);
-
-                let val = match *kind {
-                    mir::CastKind::ReifyFnPointer |
-                    mir::CastKind::UnsafeFnPointer => {
-                        // these are no-ops at the LLVM level
-                        operand.val
-                    }
-                    mir::CastKind::Unsize => {
-                        // unsize targets other than to a fat pointer currently
-                        // can't be operands.
-                        assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
-
-                        match operand.val {
-                            OperandValue::FatPtr(..) => {
-                                // unsize from a fat pointer - this is a
-                                // "trait-object-to-supertrait" coercion, for
-                                // example,
-                                //   &'a fmt::Debug+Send => &'a fmt::Debug,
-                                // and is a no-op at the LLVM level
-                                operand.val
-                            }
-                            OperandValue::Immediate(lldata) => {
-                                // "standard" unsize
-                                let (lldata, llextra) = bcx.with_block(|bcx| {
-                                    base::unsize_thin_ptr(bcx, lldata,
-                                                          operand.ty, cast_ty)
-                                });
-                                OperandValue::FatPtr(lldata, llextra)
-                            }
-                            OperandValue::Ref(_) => {
-                                bcx.sess().bug(
-                                    &format!("by-ref operand {} in trans_rvalue_operand",
-                                             operand.repr(&bcx)));
-                            }
-                        }
-                    }
-                    mir::CastKind::Misc if common::type_is_immediate(bcx.ccx(), operand.ty) => {
-                        debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
-                        let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
-                        let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
-                        let ll_t_in = type_of::arg_type_of(bcx.ccx(), operand.ty);
-                        let ll_t_out = type_of::arg_type_of(bcx.ccx(), cast_ty);
-                        let (llval, ll_t_in, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
-                            let repr = adt::represent_type(bcx.ccx(), operand.ty);
-                            let llval = operand.immediate();
-                            let discr = bcx.with_block(|bcx| {
-                                adt::trans_get_discr(bcx, &repr, llval, None, true)
-                            });
-                            (discr, common::val_ty(discr), adt::is_discr_signed(&repr))
-                        } else {
-                            (operand.immediate(), ll_t_in, operand.ty.is_signed())
-                        };
-
-                        let newval = match (r_t_in, r_t_out) {
-                            (CastTy::Int(_), CastTy::Int(_)) => {
-                                let srcsz = ll_t_in.int_width();
-                                let dstsz = ll_t_out.int_width();
-                                if srcsz == dstsz {
-                                    bcx.bitcast(llval, ll_t_out)
-                                } else if srcsz > dstsz {
-                                    bcx.trunc(llval, ll_t_out)
-                                } else if signed {
-                                    bcx.sext(llval, ll_t_out)
-                                } else {
-                                    bcx.zext(llval, ll_t_out)
-                                }
-                            }
-                            (CastTy::Float, CastTy::Float) => {
-                                let srcsz = ll_t_in.float_width();
-                                let dstsz = ll_t_out.float_width();
-                                if dstsz > srcsz {
-                                    bcx.fpext(llval, ll_t_out)
-                                } else if srcsz > dstsz {
-                                    bcx.fptrunc(llval, ll_t_out)
-                                } else {
-                                    llval
-                                }
-                            }
-                            (CastTy::Ptr(_), CastTy::Ptr(_)) |
-                            (CastTy::FnPtr, CastTy::Ptr(_)) |
-                            (CastTy::RPtr(_), CastTy::Ptr(_)) =>
-                                bcx.pointercast(llval, ll_t_out),
-                            (CastTy::Ptr(_), CastTy::Int(_)) |
-                            (CastTy::FnPtr, CastTy::Int(_)) =>
-                                bcx.ptrtoint(llval, ll_t_out),
-                            (CastTy::Int(_), CastTy::Ptr(_)) =>
-                                bcx.inttoptr(llval, ll_t_out),
-                            (CastTy::Int(_), CastTy::Float) if signed =>
-                                bcx.sitofp(llval, ll_t_out),
-                            (CastTy::Int(_), CastTy::Float) =>
-                                bcx.uitofp(llval, ll_t_out),
-                            (CastTy::Float, CastTy::Int(IntTy::I)) =>
-                                bcx.fptosi(llval, ll_t_out),
-                            (CastTy::Float, CastTy::Int(_)) =>
-                                bcx.fptoui(llval, ll_t_out),
-                            _ => bcx.ccx().sess().bug(
-                                &format!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
-                            )
-                        };
-                        OperandValue::Immediate(newval)
-                    }
-                    mir::CastKind::Misc => { // Casts from a fat-ptr.
-                        let ll_cast_ty = type_of::arg_type_of(bcx.ccx(), cast_ty);
-                        let ll_from_ty = type_of::arg_type_of(bcx.ccx(), operand.ty);
-                        if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val {
-                            if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
-                                let ll_cft = ll_cast_ty.field_types();
-                                let ll_fft = ll_from_ty.field_types();
-                                let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
-                                assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
-                                OperandValue::FatPtr(data_cast, meta_ptr)
-                            } else { // cast to thin-ptr
-                                // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
-                                // pointer-cast of that pointer to desired pointer type.
-                                let llval = bcx.pointercast(data_ptr, ll_cast_ty);
-                                OperandValue::Immediate(llval)
-                            }
-                        } else {
-                            panic!("Unexpected non-FatPtr operand")
-                        }
-                    }
-                };
-                let operand = OperandRef {
-                    val: val,
-                    ty: cast_ty
-                };
-                (bcx, operand)
-            }
-
-            mir::Rvalue::Ref(_, bk, ref lvalue) => {
-                let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
-
-                let ty = tr_lvalue.ty.to_ty(bcx.tcx());
-                let ref_ty = bcx.tcx().mk_ref(
-                    bcx.tcx().mk_region(ty::ReStatic),
-                    ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
-                );
-
-                // Note: lvalues are indirect, so storing the `llval` into the
-                // destination effectively creates a reference.
-                let operand = if common::type_is_sized(bcx.tcx(), ty) {
-                    OperandRef {
-                        val: OperandValue::Immediate(tr_lvalue.llval),
-                        ty: ref_ty,
-                    }
-                } else {
-                    OperandRef {
-                        val: OperandValue::FatPtr(tr_lvalue.llval,
-                                                  tr_lvalue.llextra),
-                        ty: ref_ty,
-                    }
-                };
-                (bcx, operand)
-            }
-
-            mir::Rvalue::Len(ref lvalue) => {
-                let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
-                let operand = OperandRef {
-                    val: OperandValue::Immediate(self.lvalue_len(&bcx, tr_lvalue)),
-                    ty: bcx.tcx().types.usize,
-                };
-                (bcx, operand)
-            }
-
-            mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.trans_operand(&bcx, lhs);
-                let rhs = self.trans_operand(&bcx, rhs);
-                let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
-                    match (lhs.val, rhs.val) {
-                        (OperandValue::FatPtr(lhs_addr, lhs_extra),
-                         OperandValue::FatPtr(rhs_addr, rhs_extra)) => {
-                            bcx.with_block(|bcx| {
-                                base::compare_fat_ptrs(bcx,
-                                                       lhs_addr, lhs_extra,
-                                                       rhs_addr, rhs_extra,
-                                                       lhs.ty, op.to_hir_binop(),
-                                                       DebugLoc::None)
-                            })
-                        }
-                        _ => unreachable!()
-                    }
-
-                } else {
-                    self.trans_scalar_binop(&bcx, op,
-                                            lhs.immediate(), rhs.immediate(),
-                                            lhs.ty)
-                };
-                let operand = OperandRef {
-                    val: OperandValue::Immediate(llresult),
-                    ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
-                };
-                (bcx, operand)
-            }
-
-            mir::Rvalue::UnaryOp(op, ref operand) => {
-                let operand = self.trans_operand(&bcx, operand);
-                let lloperand = operand.immediate();
-                let is_float = operand.ty.is_fp();
-                let llval = match op {
-                    mir::UnOp::Not => bcx.not(lloperand),
-                    mir::UnOp::Neg => if is_float {
-                        bcx.fneg(lloperand)
-                    } else {
-                        bcx.neg(lloperand)
-                    }
-                };
-                (bcx, OperandRef {
-                    val: OperandValue::Immediate(llval),
-                    ty: operand.ty,
-                })
-            }
-
-            mir::Rvalue::Box(content_ty) => {
-                let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
-                let llty = type_of::type_of(bcx.ccx(), content_ty);
-                let llsize = machine::llsize_of(bcx.ccx(), llty);
-                let align = type_of::align_of(bcx.ccx(), content_ty);
-                let llalign = common::C_uint(bcx.ccx(), align);
-                let llty_ptr = llty.ptr_to();
-                let box_ty = bcx.tcx().mk_box(content_ty);
-                let mut llval = None;
-                let bcx = bcx.map_block(|bcx| {
-                    let Result { bcx, val } = base::malloc_raw_dyn(bcx,
-                                                                   llty_ptr,
-                                                                   box_ty,
-                                                                   llsize,
-                                                                   llalign,
-                                                                   DebugLoc::None);
-                    llval = Some(val);
-                    bcx
-                });
-                let operand = OperandRef {
-                    val: OperandValue::Immediate(llval.unwrap()),
-                    ty: box_ty,
-                };
-                (bcx, operand)
-            }
-
-            mir::Rvalue::Repeat(..) |
-            mir::Rvalue::Aggregate(..) |
-            mir::Rvalue::Slice { .. } |
-            mir::Rvalue::InlineAsm(..) => {
-                bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue));
-            }
-        }
-    }
-
-    pub fn trans_scalar_binop(&mut self,
-                              bcx: &BlockAndBuilder<'bcx, 'tcx>,
-                              op: mir::BinOp,
-                              lhs: ValueRef,
-                              rhs: ValueRef,
-                              input_ty: Ty<'tcx>) -> ValueRef {
-        let is_float = input_ty.is_fp();
-        let is_signed = input_ty.is_signed();
-        match op {
-            mir::BinOp::Add => if is_float {
-                bcx.fadd(lhs, rhs)
-            } else {
-                bcx.add(lhs, rhs)
-            },
-            mir::BinOp::Sub => if is_float {
-                bcx.fsub(lhs, rhs)
-            } else {
-                bcx.sub(lhs, rhs)
-            },
-            mir::BinOp::Mul => if is_float {
-                bcx.fmul(lhs, rhs)
-            } else {
-                bcx.mul(lhs, rhs)
-            },
-            mir::BinOp::Div => if is_float {
-                bcx.fdiv(lhs, rhs)
-            } else if is_signed {
-                bcx.sdiv(lhs, rhs)
-            } else {
-                bcx.udiv(lhs, rhs)
-            },
-            mir::BinOp::Rem => if is_float {
-                // LLVM currently always lowers the `frem` instructions appropriate
-                // library calls typically found in libm. Notably f64 gets wired up
-                // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
-                // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
-                // instead just an inline function in a header that goes up to a
-                // f64, uses `fmod`, and then comes back down to a f32.
-                //
-                // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
-                // still unconditionally lower frem instructions over 32-bit floats
-                // to a call to `fmodf`. To work around this we special case MSVC
-                // 32-bit float rem instructions and instead do the call out to
-                // `fmod` ourselves.
-                //
-                // Note that this is currently duplicated with src/libcore/ops.rs
-                // which does the same thing, and it would be nice to perhaps unify
-                // these two implementations one day! Also note that we call `fmod`
-                // for both 32 and 64-bit floats because if we emit any FRem
-                // instruction at all then LLVM is capable of optimizing it into a
-                // 32-bit FRem (which we're trying to avoid).
-                let tcx = bcx.tcx();
-                let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
-                    tcx.sess.target.target.arch == "x86";
-                if use_fmod {
-                    let f64t = Type::f64(bcx.ccx());
-                    let fty = Type::func(&[f64t, f64t], &f64t);
-                    let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
-                                                    tcx.types.f64);
-                    if input_ty == tcx.types.f32 {
-                        let lllhs = bcx.fpext(lhs, f64t);
-                        let llrhs = bcx.fpext(rhs, f64t);
-                        let llres = bcx.call(llfn, &[lllhs, llrhs], None, None);
-                        bcx.fptrunc(llres, Type::f32(bcx.ccx()))
-                    } else {
-                        bcx.call(llfn, &[lhs, rhs], None, None)
-                    }
-                } else {
-                    bcx.frem(lhs, rhs)
-                }
-            } else if is_signed {
-                bcx.srem(lhs, rhs)
-            } else {
-                bcx.urem(lhs, rhs)
-            },
-            mir::BinOp::BitOr => bcx.or(lhs, rhs),
-            mir::BinOp::BitAnd => bcx.and(lhs, rhs),
-            mir::BinOp::BitXor => bcx.xor(lhs, rhs),
-            mir::BinOp::Shl => {
-                bcx.with_block(|bcx| {
-                    common::build_unchecked_lshift(bcx,
-                                                   lhs,
-                                                   rhs,
-                                                   DebugLoc::None)
-                })
-            }
-            mir::BinOp::Shr => {
-                bcx.with_block(|bcx| {
-                    common::build_unchecked_rshift(bcx,
-                                                   input_ty,
-                                                   lhs,
-                                                   rhs,
-                                                   DebugLoc::None)
-                })
-            }
-            mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
-            mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
-                bcx.with_block(|bcx| {
-                    base::compare_scalar_types(bcx, lhs, rhs, input_ty,
-                                               op.to_hir_binop(), DebugLoc::None)
-                })
-            }
-        }
-    }
-}
-
-pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
-    match *rvalue {
-        mir::Rvalue::Use(..) | // (*)
-        mir::Rvalue::Ref(..) |
-        mir::Rvalue::Len(..) |
-        mir::Rvalue::Cast(..) | // (*)
-        mir::Rvalue::BinaryOp(..) |
-        mir::Rvalue::UnaryOp(..) |
-        mir::Rvalue::Box(..) =>
-            true,
-        mir::Rvalue::Repeat(..) |
-        mir::Rvalue::Aggregate(..) |
-        mir::Rvalue::Slice { .. } |
-        mir::Rvalue::InlineAsm(..) =>
-            false,
-    }
-
-    // (*) this is only true if the type is suitable
-}
diff --git a/src/librustc_trans/trans/mir/statement.rs b/src/librustc_trans/trans/mir/statement.rs
deleted file mode 100644 (file)
index 0307fd6..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::mir::repr as mir;
-use trans::common::BlockAndBuilder;
-
-use super::MirContext;
-use super::TempRef;
-
-impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
-    pub fn trans_statement(&mut self,
-                           bcx: BlockAndBuilder<'bcx, 'tcx>,
-                           statement: &mir::Statement<'tcx>)
-                           -> BlockAndBuilder<'bcx, 'tcx> {
-        debug!("trans_statement(statement={:?})", statement);
-
-        match statement.kind {
-            mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
-                match *lvalue {
-                    mir::Lvalue::Temp(index) => {
-                        let index = index as usize;
-                        match self.temps[index as usize] {
-                            TempRef::Lvalue(tr_dest) => {
-                                self.trans_rvalue(bcx, tr_dest, rvalue)
-                            }
-                            TempRef::Operand(None) => {
-                                let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue);
-                                self.temps[index] = TempRef::Operand(Some(operand));
-                                bcx
-                            }
-                            TempRef::Operand(Some(_)) => {
-                                bcx.tcx().sess.span_bug(
-                                    statement.span,
-                                    &format!("operand {:?} already assigned", rvalue));
-                            }
-                        }
-                    }
-                    _ => {
-                        let tr_dest = self.trans_lvalue(&bcx, lvalue);
-                        self.trans_rvalue(bcx, tr_dest, rvalue)
-                    }
-                }
-            }
-        }
-    }
-}
diff --git a/src/librustc_trans/trans/mod.rs b/src/librustc_trans/trans/mod.rs
deleted file mode 100644 (file)
index 1b8bab7..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::{ContextRef, ModuleRef};
-use middle::cstore::LinkMeta;
-
-pub use self::base::trans_crate;
-pub use self::context::CrateContext;
-pub use self::common::gensym_name;
-pub use self::disr::Disr;
-
-#[macro_use]
-mod macros;
-
-mod adt;
-mod asm;
-mod assert_dep_graph;
-mod attributes;
-mod base;
-mod basic_block;
-mod build;
-mod builder;
-mod cabi;
-mod cabi_aarch64;
-mod cabi_arm;
-mod cabi_asmjs;
-mod cabi_mips;
-mod cabi_powerpc;
-mod cabi_powerpc64;
-mod cabi_x86;
-mod cabi_x86_64;
-mod cabi_x86_win64;
-mod callee;
-mod cleanup;
-mod closure;
-mod common;
-mod consts;
-mod context;
-mod controlflow;
-mod datum;
-mod debuginfo;
-mod declare;
-mod disr;
-mod expr;
-mod foreign;
-mod glue;
-mod inline;
-mod intrinsic;
-mod llrepr;
-mod machine;
-mod _match;
-mod meth;
-mod mir;
-mod monomorphize;
-mod collector;
-mod tvec;
-mod type_;
-mod type_of;
-mod value;
-
-#[derive(Copy, Clone)]
-pub struct ModuleTranslation {
-    pub llcx: ContextRef,
-    pub llmod: ModuleRef,
-}
-
-unsafe impl Send for ModuleTranslation { }
-unsafe impl Sync for ModuleTranslation { }
-
-pub struct CrateTranslation {
-    pub modules: Vec<ModuleTranslation>,
-    pub metadata_module: ModuleTranslation,
-    pub link: LinkMeta,
-    pub metadata: Vec<u8>,
-    pub reachable: Vec<String>,
-    pub no_builtins: bool,
-}
diff --git a/src/librustc_trans/trans/monomorphize.rs b/src/librustc_trans/trans/monomorphize.rs
deleted file mode 100644 (file)
index 867ac9b..0000000
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use back::link::exported_name;
-use llvm::ValueRef;
-use llvm;
-use middle::def_id::DefId;
-use middle::infer::normalize_associated_type;
-use middle::subst;
-use middle::subst::{Subst, Substs};
-use middle::ty::fold::{TypeFolder, TypeFoldable};
-use trans::attributes;
-use trans::base::{trans_enum_variant, push_ctxt, get_item_val};
-use trans::base::trans_fn;
-use trans::base;
-use trans::common::*;
-use trans::declare;
-use trans::foreign;
-use middle::ty::{self, Ty};
-use trans::Disr;
-use rustc::front::map as hir_map;
-
-use rustc_front::hir;
-
-use syntax::abi::Abi;
-use syntax::ast;
-use syntax::attr;
-use syntax::errors;
-use std::hash::{Hasher, Hash, SipHasher};
-
-pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                fn_id: DefId,
-                                psubsts: &'tcx subst::Substs<'tcx>,
-                                ref_id: Option<ast::NodeId>)
-                                -> (ValueRef, Ty<'tcx>, bool) {
-    debug!("monomorphic_fn(\
-            fn_id={:?}, \
-            real_substs={:?}, \
-            ref_id={:?})",
-           fn_id,
-           psubsts,
-           ref_id);
-
-    assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types());
-
-    // we can only monomorphize things in this crate (or inlined into it)
-    let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap();
-
-    let _icx = push_ctxt("monomorphic_fn");
-
-    let hash_id = MonoId {
-        def: fn_id,
-        params: &psubsts.types
-    };
-
-    let item_ty = ccx.tcx().lookup_item_type(fn_id).ty;
-
-    debug!("monomorphic_fn about to subst into {:?}", item_ty);
-    let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty);
-    debug!("mono_ty = {:?} (post-substitution)", mono_ty);
-
-    match ccx.monomorphized().borrow().get(&hash_id) {
-        Some(&val) => {
-            debug!("leaving monomorphic fn {}",
-            ccx.tcx().item_path_str(fn_id));
-            return (val, mono_ty, false);
-        }
-        None => ()
-    }
-
-    debug!("monomorphic_fn(\
-            fn_id={:?}, \
-            psubsts={:?}, \
-            hash_id={:?})",
-           fn_id,
-           psubsts,
-           hash_id);
-
-
-    let map_node = errors::expect(
-        ccx.sess().diagnostic(),
-        ccx.tcx().map.find(fn_node_id),
-        || {
-            format!("while monomorphizing {:?}, couldn't find it in \
-                     the item map (may have attempted to monomorphize \
-                     an item defined in a different crate?)",
-                    fn_id)
-        });
-
-    if let hir_map::NodeForeignItem(_) = map_node {
-        let abi = ccx.tcx().map.get_foreign_abi(fn_node_id);
-        if abi != Abi::RustIntrinsic && abi != Abi::PlatformIntrinsic {
-            // Foreign externs don't have to be monomorphized.
-            return (get_item_val(ccx, fn_node_id), mono_ty, true);
-        }
-    }
-
-    ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
-
-    let depth;
-    {
-        let mut monomorphizing = ccx.monomorphizing().borrow_mut();
-        depth = match monomorphizing.get(&fn_id) {
-            Some(&d) => d, None => 0
-        };
-
-        debug!("monomorphic_fn: depth for fn_id={:?} is {:?}", fn_id, depth+1);
-
-        // Random cut-off -- code that needs to instantiate the same function
-        // recursively more than thirty times can probably safely be assumed
-        // to be causing an infinite expansion.
-        if depth > ccx.sess().recursion_limit.get() {
-            ccx.sess().span_fatal(ccx.tcx().map.span(fn_node_id),
-                "reached the recursion limit during monomorphization");
-        }
-
-        monomorphizing.insert(fn_id, depth + 1);
-    }
-
-    let hash;
-    let s = {
-        let mut state = SipHasher::new();
-        hash_id.hash(&mut state);
-        mono_ty.hash(&mut state);
-
-        hash = format!("h{}", state.finish());
-        let path = ccx.tcx().map.def_path_from_id(fn_node_id);
-        exported_name(path, &hash[..])
-    };
-
-    debug!("monomorphize_fn mangled to {}", s);
-
-    // This shouldn't need to option dance.
-    let mut hash_id = Some(hash_id);
-    let mut mk_lldecl = |abi: Abi| {
-        let lldecl = if abi != Abi::Rust {
-            foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, &s)
-        } else {
-            // FIXME(nagisa): perhaps needs a more fine grained selection? See
-            // setup_lldecl below.
-            declare::define_internal_rust_fn(ccx, &s, mono_ty)
-        };
-
-        ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl);
-        lldecl
-    };
-    let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| {
-        base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
-        attributes::from_fn_attrs(ccx, attrs, lldecl);
-
-        let is_first = !ccx.available_monomorphizations().borrow().contains(&s);
-        if is_first {
-            ccx.available_monomorphizations().borrow_mut().insert(s.clone());
-        }
-
-        let trans_everywhere = attr::requests_inline(attrs);
-        if trans_everywhere && !is_first {
-            llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage);
-        }
-
-        // If `true`, then `lldecl` should be given a function body.
-        // Otherwise, it should be left as a declaration of an external
-        // function, with no definition in the current compilation unit.
-        trans_everywhere || is_first
-    };
-
-    let lldecl = match map_node {
-        hir_map::NodeItem(i) => {
-            match *i {
-              hir::Item {
-                  node: hir::ItemFn(ref decl, _, _, abi, _, ref body),
-                  ..
-              } => {
-                  let d = mk_lldecl(abi);
-                  let needs_body = setup_lldecl(d, &i.attrs);
-                  if needs_body {
-                      if abi != Abi::Rust {
-                          foreign::trans_rust_fn_with_foreign_abi(
-                              ccx, &decl, &body, &[], d, psubsts, fn_node_id,
-                              Some(&hash[..]));
-                      } else {
-                          trans_fn(ccx,
-                                   &decl,
-                                   &body,
-                                   d,
-                                   psubsts,
-                                   fn_node_id,
-                                   &i.attrs);
-                      }
-                  }
-
-                  d
-              }
-              _ => {
-                ccx.sess().bug("Can't monomorphize this kind of item")
-              }
-            }
-        }
-        hir_map::NodeVariant(v) => {
-            let variant = inlined_variant_def(ccx, fn_node_id);
-            assert_eq!(v.node.name, variant.name);
-            let d = mk_lldecl(Abi::Rust);
-            attributes::inline(d, attributes::InlineAttr::Hint);
-            trans_enum_variant(ccx, fn_node_id, Disr::from(variant.disr_val), psubsts, d);
-            d
-        }
-        hir_map::NodeImplItem(impl_item) => {
-            match impl_item.node {
-                hir::ImplItemKind::Method(ref sig, ref body) => {
-                    let d = mk_lldecl(Abi::Rust);
-                    let needs_body = setup_lldecl(d, &impl_item.attrs);
-                    if needs_body {
-                        trans_fn(ccx,
-                                 &sig.decl,
-                                 body,
-                                 d,
-                                 psubsts,
-                                 impl_item.id,
-                                 &impl_item.attrs);
-                    }
-                    d
-                }
-                _ => {
-                    ccx.sess().bug(&format!("can't monomorphize a {:?}",
-                                           map_node))
-                }
-            }
-        }
-        hir_map::NodeTraitItem(trait_item) => {
-            match trait_item.node {
-                hir::MethodTraitItem(ref sig, Some(ref body)) => {
-                    let d = mk_lldecl(Abi::Rust);
-                    let needs_body = setup_lldecl(d, &trait_item.attrs);
-                    if needs_body {
-                        trans_fn(ccx,
-                                 &sig.decl,
-                                 body,
-                                 d,
-                                 psubsts,
-                                 trait_item.id,
-                                 &trait_item.attrs);
-                    }
-                    d
-                }
-                _ => {
-                    ccx.sess().bug(&format!("can't monomorphize a {:?}",
-                                           map_node))
-                }
-            }
-        }
-        hir_map::NodeStructCtor(struct_def) => {
-            let d = mk_lldecl(Abi::Rust);
-            attributes::inline(d, attributes::InlineAttr::Hint);
-            if struct_def.is_struct() {
-                panic!("ast-mapped struct didn't have a ctor id")
-            }
-            base::trans_tuple_struct(ccx,
-                                     struct_def.id(),
-                                     psubsts,
-                                     d);
-            d
-        }
-
-        // Ugh -- but this ensures any new variants won't be forgotten
-        hir_map::NodeForeignItem(..) |
-        hir_map::NodeLifetime(..) |
-        hir_map::NodeTyParam(..) |
-        hir_map::NodeExpr(..) |
-        hir_map::NodeStmt(..) |
-        hir_map::NodeBlock(..) |
-        hir_map::NodePat(..) |
-        hir_map::NodeLocal(..) => {
-            ccx.sess().bug(&format!("can't monomorphize a {:?}",
-                                   map_node))
-        }
-    };
-
-    ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
-
-    debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id));
-    (lldecl, mono_ty, true)
-}
-
-#[derive(PartialEq, Eq, Hash, Debug)]
-pub struct MonoId<'tcx> {
-    pub def: DefId,
-    pub params: &'tcx subst::VecPerParamSpace<Ty<'tcx>>
-}
-
-/// Monomorphizes a type from the AST by first applying the in-scope
-/// substitutions and then normalizing any associated types.
-pub fn apply_param_substs<'tcx,T>(tcx: &ty::ctxt<'tcx>,
-                                  param_substs: &Substs<'tcx>,
-                                  value: &T)
-                                  -> T
-    where T : TypeFoldable<'tcx>
-{
-    let substituted = value.subst(tcx, param_substs);
-    normalize_associated_type(tcx, &substituted)
-}
-
-
-/// Returns the normalized type of a struct field
-pub fn field_ty<'tcx>(tcx: &ty::ctxt<'tcx>,
-                      param_substs: &Substs<'tcx>,
-                      f: ty::FieldDef<'tcx>)
-                      -> Ty<'tcx>
-{
-    normalize_associated_type(tcx, &f.ty(tcx, param_substs))
-}
diff --git a/src/librustc_trans/trans/tvec.rs b/src/librustc_trans/trans/tvec.rs
deleted file mode 100644 (file)
index d6573e9..0000000
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_camel_case_types)]
-
-use llvm;
-use llvm::ValueRef;
-use trans::base::*;
-use trans::base;
-use trans::build::*;
-use trans::cleanup;
-use trans::cleanup::CleanupMethods;
-use trans::common::*;
-use trans::consts;
-use trans::datum::*;
-use trans::debuginfo::DebugLoc;
-use trans::expr::{Dest, Ignore, SaveIn};
-use trans::expr;
-use trans::machine::llsize_of_alloc;
-use trans::type_::Type;
-use trans::type_of;
-use middle::ty::{self, Ty};
-
-use rustc_front::hir;
-
-use syntax::ast;
-use syntax::parse::token::InternedString;
-
-#[derive(Copy, Clone)]
-struct VecTypes<'tcx> {
-    unit_ty: Ty<'tcx>,
-    llunit_ty: Type
-}
-
-impl<'tcx> VecTypes<'tcx> {
-    pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
-        format!("VecTypes {{unit_ty={}, llunit_ty={}}}",
-                self.unit_ty,
-                ccx.tn().type_to_string(self.llunit_ty))
-    }
-}
-
-pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                      expr: &hir::Expr,
-                                      dest: expr::Dest)
-                                      -> Block<'blk, 'tcx> {
-    //!
-    //
-    // [...] allocates a fixed-size array and moves it around "by value".
-    // In this case, it means that the caller has already given us a location
-    // to store the array of the suitable size, so all we have to do is
-    // generate the content.
-
-    debug!("trans_fixed_vstore(expr={:?}, dest={})",
-           expr, dest.to_string(bcx.ccx()));
-
-    let vt = vec_types_from_expr(bcx, expr);
-
-    return match dest {
-        Ignore => write_content(bcx, &vt, expr, expr, dest),
-        SaveIn(lldest) => {
-            // lldest will have type *[T x N], but we want the type *T,
-            // so use GEP to convert:
-            let lldest = StructGEP(bcx, lldest, 0);
-            write_content(bcx, &vt, expr, expr, SaveIn(lldest))
-        }
-    };
-}
-
-/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the
-/// caller must make the reference).  "..." is similar except that the memory can be statically
-/// allocated and we return a reference (strings are always by-ref).
-pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                   slice_expr: &hir::Expr,
-                                   content_expr: &hir::Expr)
-                                   -> DatumBlock<'blk, 'tcx, Expr> {
-    let fcx = bcx.fcx;
-    let ccx = fcx.ccx;
-    let mut bcx = bcx;
-
-    debug!("trans_slice_vec(slice_expr={:?})",
-           slice_expr);
-
-    let vec_ty = node_id_type(bcx, slice_expr.id);
-
-    // Handle the "..." case (returns a slice since strings are always unsized):
-    if let hir::ExprLit(ref lit) = content_expr.node {
-        if let ast::LitKind::Str(ref s, _) = lit.node {
-            let scratch = rvalue_scratch_datum(bcx, vec_ty, "");
-            bcx = trans_lit_str(bcx,
-                                content_expr,
-                                s.clone(),
-                                SaveIn(scratch.val));
-            return DatumBlock::new(bcx, scratch.to_expr_datum());
-        }
-    }
-
-    // Handle the &[...] case:
-    let vt = vec_types_from_expr(bcx, content_expr);
-    let count = elements_required(bcx, content_expr);
-    debug!("    vt={}, count={}", vt.to_string(ccx), count);
-
-    let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count);
-
-    // Always create an alloca even if zero-sized, to preserve
-    // the non-null invariant of the inner slice ptr
-    let llfixed;
-    // Issue 30018: ensure state is initialized as dropped if necessary.
-    if fcx.type_needs_drop(vt.unit_ty) {
-        llfixed = base::alloc_ty_init(bcx, fixed_ty, InitAlloca::Dropped, "");
-    } else {
-        let uninit = InitAlloca::Uninit("fcx says vt.unit_ty is non-drop");
-        llfixed = base::alloc_ty_init(bcx, fixed_ty, uninit, "");
-        call_lifetime_start(bcx, llfixed);
-    };
-
-    if count > 0 {
-        // Arrange for the backing array to be cleaned up.
-        let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id);
-        fcx.schedule_lifetime_end(cleanup_scope, llfixed);
-        fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty, None);
-
-        // Generate the content into the backing array.
-        // llfixed has type *[T x N], but we want the type *T,
-        // so use GEP to convert
-        bcx = write_content(bcx, &vt, slice_expr, content_expr,
-                            SaveIn(StructGEP(bcx, llfixed, 0)));
-    };
-
-    immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
-}
-
-/// Literal strings translate to slices into static memory.  This is different from
-/// trans_slice_vstore() above because it doesn't need to copy the content anywhere.
-pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                 lit_expr: &hir::Expr,
-                                 str_lit: InternedString,
-                                 dest: Dest)
-                                 -> Block<'blk, 'tcx> {
-    debug!("trans_lit_str(lit_expr={:?}, dest={})",
-           lit_expr,
-           dest.to_string(bcx.ccx()));
-
-    match dest {
-        Ignore => bcx,
-        SaveIn(lldest) => {
-            let bytes = str_lit.len();
-            let llbytes = C_uint(bcx.ccx(), bytes);
-            let llcstr = C_cstr(bcx.ccx(), str_lit, false);
-            let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx()));
-            Store(bcx, llcstr, expr::get_dataptr(bcx, lldest));
-            Store(bcx, llbytes, expr::get_meta(bcx, lldest));
-            bcx
-        }
-    }
-}
-
-fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                             vt: &VecTypes<'tcx>,
-                             vstore_expr: &hir::Expr,
-                             content_expr: &hir::Expr,
-                             dest: Dest)
-                             -> Block<'blk, 'tcx> {
-    let _icx = push_ctxt("tvec::write_content");
-    let fcx = bcx.fcx;
-    let mut bcx = bcx;
-
-    debug!("write_content(vt={}, dest={}, vstore_expr={:?})",
-           vt.to_string(bcx.ccx()),
-           dest.to_string(bcx.ccx()),
-           vstore_expr);
-
-    match content_expr.node {
-        hir::ExprLit(ref lit) => {
-            match lit.node {
-                ast::LitKind::Str(ref s, _) => {
-                    match dest {
-                        Ignore => return bcx,
-                        SaveIn(lldest) => {
-                            let bytes = s.len();
-                            let llbytes = C_uint(bcx.ccx(), bytes);
-                            let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false);
-                            base::call_memcpy(bcx,
-                                              lldest,
-                                              llcstr,
-                                              llbytes,
-                                              1);
-                            return bcx;
-                        }
-                    }
-                }
-                _ => {
-                    bcx.tcx().sess.span_bug(content_expr.span,
-                                            "unexpected evec content");
-                }
-            }
-        }
-        hir::ExprVec(ref elements) => {
-            match dest {
-                Ignore => {
-                    for element in elements {
-                        bcx = expr::trans_into(bcx, &element, Ignore);
-                    }
-                }
-
-                SaveIn(lldest) => {
-                    let temp_scope = fcx.push_custom_cleanup_scope();
-                    for (i, element) in elements.iter().enumerate() {
-                        let lleltptr = GEPi(bcx, lldest, &[i]);
-                        debug!("writing index {} with lleltptr={}",
-                               i, bcx.val_to_string(lleltptr));
-                        bcx = expr::trans_into(bcx, &element,
-                                               SaveIn(lleltptr));
-                        let scope = cleanup::CustomScope(temp_scope);
-                        // Issue #30822: mark memory as dropped after running destructor
-                        fcx.schedule_drop_and_fill_mem(scope, lleltptr, vt.unit_ty, None);
-                    }
-                    fcx.pop_custom_cleanup_scope(temp_scope);
-                }
-            }
-            return bcx;
-        }
-        hir::ExprRepeat(ref element, ref count_expr) => {
-            match dest {
-                Ignore => {
-                    return expr::trans_into(bcx, &element, Ignore);
-                }
-                SaveIn(lldest) => {
-                    match bcx.tcx().eval_repeat_count(&count_expr) {
-                        0 => expr::trans_into(bcx, &element, Ignore),
-                        1 => expr::trans_into(bcx, &element, SaveIn(lldest)),
-                        count => {
-                            let elem = unpack_datum!(bcx, expr::trans(bcx, &element));
-                            let bcx = iter_vec_loop(bcx, lldest, vt,
-                                                    C_uint(bcx.ccx(), count),
-                                                    |set_bcx, lleltptr, _| {
-                                                        elem.shallow_copy(set_bcx, lleltptr)
-                                                    });
-                            bcx
-                        }
-                    }
-                }
-            }
-        }
-        _ => {
-            bcx.tcx().sess.span_bug(content_expr.span,
-                                    "unexpected vec content");
-        }
-    }
-}
-
-fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &hir::Expr)
-                                   -> VecTypes<'tcx> {
-    let vec_ty = node_id_type(bcx, vec_expr.id);
-    vec_types(bcx, vec_ty.sequence_element_type(bcx.tcx()))
-}
-
-fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>)
-                         -> VecTypes<'tcx> {
-    VecTypes {
-        unit_ty: unit_ty,
-        llunit_ty: type_of::type_of(bcx.ccx(), unit_ty)
-    }
-}
-
-fn elements_required(bcx: Block, content_expr: &hir::Expr) -> usize {
-    //! Figure out the number of elements we need to store this content
-
-    match content_expr.node {
-        hir::ExprLit(ref lit) => {
-            match lit.node {
-                ast::LitKind::Str(ref s, _) => s.len(),
-                _ => {
-                    bcx.tcx().sess.span_bug(content_expr.span,
-                                            "unexpected evec content")
-                }
-            }
-        },
-        hir::ExprVec(ref es) => es.len(),
-        hir::ExprRepeat(_, ref count_expr) => {
-            bcx.tcx().eval_repeat_count(&count_expr)
-        }
-        _ => bcx.tcx().sess.span_bug(content_expr.span,
-                                     "unexpected vec content")
-    }
-}
-
-/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval`
-/// which should be by ref.
-pub fn get_fixed_base_and_len(bcx: Block,
-                              llval: ValueRef,
-                              vec_length: usize)
-                              -> (ValueRef, ValueRef) {
-    let ccx = bcx.ccx();
-
-    let base = expr::get_dataptr(bcx, llval);
-    let len = C_uint(ccx, vec_length);
-    (base, len)
-}
-
-/// Converts a vector into the slice pair.  The vector should be stored in `llval` which should be
-/// by-reference.  If you have a datum, you would probably prefer to call
-/// `Datum::get_base_and_len()` which will handle any conversions for you.
-pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-                                    llval: ValueRef,
-                                    vec_ty: Ty<'tcx>)
-                                    -> (ValueRef, ValueRef) {
-    let ccx = bcx.ccx();
-
-    match vec_ty.sty {
-        ty::TyArray(_, n) => get_fixed_base_and_len(bcx, llval, n),
-        ty::TySlice(_) | ty::TyStr => {
-            let base = Load(bcx, expr::get_dataptr(bcx, llval));
-            let len = Load(bcx, expr::get_meta(bcx, llval));
-            (base, len)
-        }
-
-        // Only used for pattern matching.
-        ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => {
-            let inner = if type_is_sized(bcx.tcx(), ty) {
-                Load(bcx, llval)
-            } else {
-                llval
-            };
-            get_base_and_len(bcx, inner, ty)
-        },
-        _ => ccx.sess().bug("unexpected type in get_base_and_len"),
-    }
-}
-
-fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
-                                data_ptr: ValueRef,
-                                vt: &VecTypes<'tcx>,
-                                count: ValueRef,
-                                f: F)
-                                -> Block<'blk, 'tcx> where
-    F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
-{
-    let _icx = push_ctxt("tvec::iter_vec_loop");
-
-    if bcx.unreachable.get() {
-        return bcx;
-    }
-
-    let fcx = bcx.fcx;
-    let loop_bcx = fcx.new_temp_block("expr_repeat");
-    let next_bcx = fcx.new_temp_block("expr_repeat: next");
-
-    Br(bcx, loop_bcx.llbb, DebugLoc::None);
-
-    let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
-                           &[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
-
-    let bcx = loop_bcx;
-
-    let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
-        data_ptr
-    } else {
-        InBoundsGEP(bcx, data_ptr, &[loop_counter])
-    };
-    let bcx = f(bcx, lleltptr, vt.unit_ty);
-    let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None);
-    AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
-
-    let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None);
-    CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
-
-    next_bcx
-}
-
-pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
-                                   data_ptr: ValueRef,
-                                   unit_ty: Ty<'tcx>,
-                                   len: ValueRef,
-                                   f: F)
-                                   -> Block<'blk, 'tcx> where
-    F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
-{
-    let _icx = push_ctxt("tvec::iter_vec_raw");
-    let fcx = bcx.fcx;
-
-    let vt = vec_types(bcx, unit_ty);
-
-    if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
-        // Special-case vectors with elements of size 0  so they don't go out of bounds (#9890)
-        iter_vec_loop(bcx, data_ptr, &vt, len, f)
-    } else {
-        // Calculate the last pointer address we want to handle.
-        let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);
-
-        // Now perform the iteration.
-        let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
-        Br(bcx, header_bcx.llbb, DebugLoc::None);
-        let data_ptr =
-            Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
-        let not_yet_at_end =
-            ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None);
-        let body_bcx = fcx.new_temp_block("iter_vec_loop_body");
-        let next_bcx = fcx.new_temp_block("iter_vec_next");
-        CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
-        let body_bcx = f(body_bcx, data_ptr, unit_ty);
-        AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr,
-                                               &[C_int(bcx.ccx(), 1)]),
-                         body_bcx.llbb);
-        Br(body_bcx, header_bcx.llbb, DebugLoc::None);
-        next_bcx
-    }
-}
diff --git a/src/librustc_trans/trans/type_.rs b/src/librustc_trans/trans/type_.rs
deleted file mode 100644 (file)
index 17300f3..0000000
+++ /dev/null
@@ -1,341 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_upper_case_globals)]
-
-use llvm;
-use llvm::{TypeRef, Bool, False, True, TypeKind, ValueRef};
-use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128};
-
-use trans::context::CrateContext;
-use util::nodemap::FnvHashMap;
-
-use syntax::ast;
-
-use std::ffi::CString;
-use std::mem;
-use std::ptr;
-use std::cell::RefCell;
-
-use libc::c_uint;
-
-#[derive(Clone, Copy, PartialEq, Debug)]
-#[repr(C)]
-pub struct Type {
-    rf: TypeRef
-}
-
-macro_rules! ty {
-    ($e:expr) => ( Type::from_ref(unsafe { $e }))
-}
-
-/// Wrapper for LLVM TypeRef
-impl Type {
-    #[inline(always)]
-    pub fn from_ref(r: TypeRef) -> Type {
-        Type {
-            rf: r
-        }
-    }
-
-    #[inline(always)] // So it doesn't kill --opt-level=0 builds of the compiler
-    pub fn to_ref(&self) -> TypeRef {
-        self.rf
-    }
-
-    pub fn to_string(self: Type) -> String {
-        llvm::build_string(|s| unsafe {
-            llvm::LLVMWriteTypeToString(self.to_ref(), s);
-        }).expect("non-UTF8 type description from LLVM")
-    }
-
-    pub fn to_ref_slice(slice: &[Type]) -> &[TypeRef] {
-        unsafe { mem::transmute(slice) }
-    }
-
-    pub fn void(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMVoidTypeInContext(ccx.llcx()))
-    }
-
-    pub fn nil(ccx: &CrateContext) -> Type {
-        Type::empty_struct(ccx)
-    }
-
-    pub fn metadata(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx()))
-    }
-
-    pub fn i1(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt1TypeInContext(ccx.llcx()))
-    }
-
-    pub fn i8(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt8TypeInContext(ccx.llcx()))
-    }
-
-    pub fn i16(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt16TypeInContext(ccx.llcx()))
-    }
-
-    pub fn i32(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt32TypeInContext(ccx.llcx()))
-    }
-
-    pub fn i64(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt64TypeInContext(ccx.llcx()))
-    }
-
-    // Creates an integer type with the given number of bits, e.g. i24
-    pub fn ix(ccx: &CrateContext, num_bits: u64) -> Type {
-        ty!(llvm::LLVMIntTypeInContext(ccx.llcx(), num_bits as c_uint))
-    }
-
-    pub fn f32(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMFloatTypeInContext(ccx.llcx()))
-    }
-
-    pub fn f64(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx()))
-    }
-
-    pub fn bool(ccx: &CrateContext) -> Type {
-        Type::i8(ccx)
-    }
-
-    pub fn char(ccx: &CrateContext) -> Type {
-        Type::i32(ccx)
-    }
-
-    pub fn i8p(ccx: &CrateContext) -> Type {
-        Type::i8(ccx).ptr_to()
-    }
-
-    pub fn int(ccx: &CrateContext) -> Type {
-        match &ccx.tcx().sess.target.target.target_pointer_width[..] {
-            "32" => Type::i32(ccx),
-            "64" => Type::i64(ccx),
-            tws => panic!("Unsupported target word size for int: {}", tws),
-        }
-    }
-
-    pub fn int_from_ty(ccx: &CrateContext, t: ast::IntTy) -> Type {
-        match t {
-            ast::IntTy::Is => ccx.int_type(),
-            ast::IntTy::I8 => Type::i8(ccx),
-            ast::IntTy::I16 => Type::i16(ccx),
-            ast::IntTy::I32 => Type::i32(ccx),
-            ast::IntTy::I64 => Type::i64(ccx)
-        }
-    }
-
-    pub fn uint_from_ty(ccx: &CrateContext, t: ast::UintTy) -> Type {
-        match t {
-            ast::UintTy::Us => ccx.int_type(),
-            ast::UintTy::U8 => Type::i8(ccx),
-            ast::UintTy::U16 => Type::i16(ccx),
-            ast::UintTy::U32 => Type::i32(ccx),
-            ast::UintTy::U64 => Type::i64(ccx)
-        }
-    }
-
-    pub fn float_from_ty(ccx: &CrateContext, t: ast::FloatTy) -> Type {
-        match t {
-            ast::FloatTy::F32 => Type::f32(ccx),
-            ast::FloatTy::F64 => Type::f64(ccx),
-        }
-    }
-
-    pub fn func(args: &[Type], ret: &Type) -> Type {
-        let slice: &[TypeRef] = Type::to_ref_slice(args);
-        ty!(llvm::LLVMFunctionType(ret.to_ref(), slice.as_ptr(),
-                                   args.len() as c_uint, False))
-    }
-
-    pub fn variadic_func(args: &[Type], ret: &Type) -> Type {
-        let slice: &[TypeRef] = Type::to_ref_slice(args);
-        ty!(llvm::LLVMFunctionType(ret.to_ref(), slice.as_ptr(),
-                                   args.len() as c_uint, True))
-    }
-
-    pub fn struct_(ccx: &CrateContext, els: &[Type], packed: bool) -> Type {
-        let els: &[TypeRef] = Type::to_ref_slice(els);
-        ty!(llvm::LLVMStructTypeInContext(ccx.llcx(), els.as_ptr(),
-                                          els.len() as c_uint,
-                                          packed as Bool))
-    }
-
-    pub fn named_struct(ccx: &CrateContext, name: &str) -> Type {
-        let name = CString::new(name).unwrap();
-        ty!(llvm::LLVMStructCreateNamed(ccx.llcx(), name.as_ptr()))
-    }
-
-    pub fn empty_struct(ccx: &CrateContext) -> Type {
-        Type::struct_(ccx, &[], false)
-    }
-
-    pub fn glue_fn(ccx: &CrateContext, t: Type) -> Type {
-        Type::func(&[t], &Type::void(ccx))
-    }
-
-    pub fn array(ty: &Type, len: u64) -> Type {
-        ty!(llvm::LLVMRustArrayType(ty.to_ref(), len))
-    }
-
-    pub fn vector(ty: &Type, len: u64) -> Type {
-        ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint))
-    }
-
-    pub fn vec(ccx: &CrateContext, ty: &Type) -> Type {
-        Type::struct_(ccx,
-            &[Type::array(ty, 0), Type::int(ccx)],
-        false)
-    }
-
-    pub fn opaque_vec(ccx: &CrateContext) -> Type {
-        Type::vec(ccx, &Type::i8(ccx))
-    }
-
-    pub fn vtable_ptr(ccx: &CrateContext) -> Type {
-        Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to().ptr_to()
-    }
-
-    pub fn kind(&self) -> TypeKind {
-        unsafe {
-            llvm::LLVMGetTypeKind(self.to_ref())
-        }
-    }
-
-    pub fn set_struct_body(&mut self, els: &[Type], packed: bool) {
-        let slice: &[TypeRef] = Type::to_ref_slice(els);
-        unsafe {
-            llvm::LLVMStructSetBody(self.to_ref(), slice.as_ptr(),
-                                    els.len() as c_uint, packed as Bool)
-        }
-    }
-
-    pub fn ptr_to(&self) -> Type {
-        ty!(llvm::LLVMPointerType(self.to_ref(), 0))
-    }
-
-    pub fn is_aggregate(&self) -> bool {
-        match self.kind() {
-            TypeKind::Struct | TypeKind::Array => true,
-            _ =>  false
-        }
-    }
-
-    pub fn is_packed(&self) -> bool {
-        unsafe {
-            llvm::LLVMIsPackedStruct(self.to_ref()) == True
-        }
-    }
-
-    pub fn element_type(&self) -> Type {
-        unsafe {
-            Type::from_ref(llvm::LLVMGetElementType(self.to_ref()))
-        }
-    }
-
-    /// Return the number of elements in `self` if it is a LLVM vector type.
-    pub fn vector_length(&self) -> usize {
-        unsafe {
-            llvm::LLVMGetVectorSize(self.to_ref()) as usize
-        }
-    }
-
-    pub fn array_length(&self) -> usize {
-        unsafe {
-            llvm::LLVMGetArrayLength(self.to_ref()) as usize
-        }
-    }
-
-    pub fn field_types(&self) -> Vec<Type> {
-        unsafe {
-            let n_elts = llvm::LLVMCountStructElementTypes(self.to_ref()) as usize;
-            if n_elts == 0 {
-                return Vec::new();
-            }
-            let mut elts = vec![Type { rf: ptr::null_mut() }; n_elts];
-            llvm::LLVMGetStructElementTypes(self.to_ref(),
-                                            elts.as_mut_ptr() as *mut TypeRef);
-            elts
-        }
-    }
-
-    pub fn return_type(&self) -> Type {
-        ty!(llvm::LLVMGetReturnType(self.to_ref()))
-    }
-
-    pub fn func_params(&self) -> Vec<Type> {
-        unsafe {
-            let n_args = llvm::LLVMCountParamTypes(self.to_ref()) as usize;
-            let mut args = vec![Type { rf: ptr::null_mut() }; n_args];
-            llvm::LLVMGetParamTypes(self.to_ref(),
-                                    args.as_mut_ptr() as *mut TypeRef);
-            args
-        }
-    }
-
-    pub fn float_width(&self) -> usize {
-        match self.kind() {
-            Float => 32,
-            Double => 64,
-            X86_FP80 => 80,
-            FP128 | PPC_FP128 => 128,
-            _ => panic!("llvm_float_width called on a non-float type")
-        }
-    }
-
-    /// Retrieve the bit width of the integer type `self`.
-    pub fn int_width(&self) -> u64 {
-        unsafe {
-            llvm::LLVMGetIntTypeWidth(self.to_ref()) as u64
-        }
-    }
-}
-
-
-/* Memory-managed object interface to type handles. */
-
-pub struct TypeNames {
-    named_types: RefCell<FnvHashMap<String, TypeRef>>,
-}
-
-impl TypeNames {
-    pub fn new() -> TypeNames {
-        TypeNames {
-            named_types: RefCell::new(FnvHashMap())
-        }
-    }
-
-    pub fn associate_type(&self, s: &str, t: &Type) {
-        assert!(self.named_types.borrow_mut().insert(s.to_string(),
-                                                     t.to_ref()).is_none());
-    }
-
-    pub fn find_type(&self, s: &str) -> Option<Type> {
-        self.named_types.borrow().get(s).map(|x| Type::from_ref(*x))
-    }
-
-    pub fn type_to_string(&self, ty: Type) -> String {
-        ty.to_string()
-    }
-
-    pub fn types_to_str(&self, tys: &[Type]) -> String {
-        let strs: Vec<String> = tys.iter().map(|t| self.type_to_string(*t)).collect();
-        format!("[{}]", strs.join(","))
-    }
-
-    pub fn val_to_string(&self, val: ValueRef) -> String {
-        llvm::build_string(|s| unsafe {
-                llvm::LLVMWriteValueToString(val, s);
-            }).expect("nun-UTF8 value description from LLVM")
-    }
-}
diff --git a/src/librustc_trans/trans/type_of.rs b/src/librustc_trans/trans/type_of.rs
deleted file mode 100644 (file)
index 24a7fd3..0000000
+++ /dev/null
@@ -1,497 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_camel_case_types)]
-
-use middle::def_id::DefId;
-use middle::infer;
-use middle::subst;
-use trans::adt;
-use trans::common::*;
-use trans::foreign;
-use trans::machine;
-use middle::ty::{self, Ty, TypeFoldable};
-
-use trans::type_::Type;
-
-use syntax::abi::Abi;
-use syntax::ast;
-
-// LLVM doesn't like objects that are too big. Issue #17913
-fn ensure_array_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                llet: Type,
-                                                size: machine::llsize,
-                                                scapegoat: Ty<'tcx>) {
-    let esz = machine::llsize_of_alloc(ccx, llet);
-    match esz.checked_mul(size) {
-        Some(n) if n < ccx.obj_size_bound() => {}
-        _ => { ccx.report_overbig_object(scapegoat) }
-    }
-}
-
-pub fn arg_is_indirect<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                 arg_ty: Ty<'tcx>) -> bool {
-    !type_is_immediate(ccx, arg_ty) && !type_is_fat_ptr(ccx.tcx(), arg_ty)
-}
-
-pub fn return_uses_outptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                    ty: Ty<'tcx>) -> bool {
-    arg_is_indirect(ccx, ty)
-}
-
-pub fn type_of_explicit_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                      arg_ty: Ty<'tcx>) -> Type {
-    let llty = arg_type_of(ccx, arg_ty);
-    if arg_is_indirect(ccx, arg_ty) {
-        llty.ptr_to()
-    } else {
-        llty
-    }
-}
-
-/// Yields the types of the "real" arguments for a function using the `RustCall`
-/// ABI by untupling the arguments of the function.
-pub fn untuple_arguments<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                   inputs: &[Ty<'tcx>])
-                                   -> Vec<Ty<'tcx>> {
-    if inputs.is_empty() {
-        return Vec::new()
-    }
-
-    let mut result = Vec::new();
-    for (i, &arg_prior_to_tuple) in inputs.iter().enumerate() {
-        if i < inputs.len() - 1 {
-            result.push(arg_prior_to_tuple);
-        }
-    }
-
-    match inputs[inputs.len() - 1].sty {
-        ty::TyTuple(ref tupled_arguments) => {
-            debug!("untuple_arguments(): untupling arguments");
-            for &tupled_argument in tupled_arguments {
-                result.push(tupled_argument);
-            }
-        }
-        _ => {
-            ccx.tcx().sess.bug("argument to function with \"rust-call\" ABI \
-                                is neither a tuple nor unit")
-        }
-    }
-
-    result
-}
-
-pub fn type_of_rust_fn<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                 llenvironment_type: Option<Type>,
-                                 sig: &ty::FnSig<'tcx>,
-                                 abi: Abi)
-                                 -> Type
-{
-    debug!("type_of_rust_fn(sig={:?},abi={:?})",
-           sig,
-           abi);
-
-    assert!(!sig.variadic); // rust fns are never variadic
-
-    let mut atys: Vec<Type> = Vec::new();
-
-    // First, munge the inputs, if this has the `rust-call` ABI.
-    let inputs_temp;
-    let inputs = if abi == Abi::RustCall {
-        inputs_temp = untuple_arguments(cx, &sig.inputs);
-        &inputs_temp
-    } else {
-        &sig.inputs
-    };
-
-    // Arg 0: Output pointer.
-    // (if the output type is non-immediate)
-    let lloutputtype = match sig.output {
-        ty::FnConverging(output) => {
-            let use_out_pointer = return_uses_outptr(cx, output);
-            let lloutputtype = arg_type_of(cx, output);
-            // Use the output as the actual return value if it's immediate.
-            if use_out_pointer {
-                atys.push(lloutputtype.ptr_to());
-                Type::void(cx)
-            } else if return_type_is_void(cx, output) {
-                Type::void(cx)
-            } else {
-                lloutputtype
-            }
-        }
-        ty::FnDiverging => Type::void(cx)
-    };
-
-    // Arg 1: Environment
-    match llenvironment_type {
-        None => {}
-        Some(llenvironment_type) => atys.push(llenvironment_type),
-    }
-
-    // ... then explicit args.
-    for input in inputs {
-        let arg_ty = type_of_explicit_arg(cx, input);
-
-        if type_is_fat_ptr(cx.tcx(), input) {
-            atys.extend(arg_ty.field_types());
-        } else {
-            atys.push(arg_ty);
-        }
-    }
-
-    Type::func(&atys[..], &lloutputtype)
-}
-
-// Given a function type and a count of ty params, construct an llvm type
-pub fn type_of_fn_from_ty<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fty: Ty<'tcx>) -> Type {
-    match fty.sty {
-        ty::TyBareFn(_, ref f) => {
-            // FIXME(#19925) once fn item types are
-            // zero-sized, we'll need to do something here
-            if f.abi == Abi::Rust || f.abi == Abi::RustCall {
-                let sig = cx.tcx().erase_late_bound_regions(&f.sig);
-                let sig = infer::normalize_associated_type(cx.tcx(), &sig);
-                type_of_rust_fn(cx, None, &sig, f.abi)
-            } else {
-                foreign::lltype_for_foreign_fn(cx, fty)
-            }
-        }
-        _ => {
-            cx.sess().bug("type_of_fn_from_ty given non-closure, non-bare-fn")
-        }
-    }
-}
-
-// A "sizing type" is an LLVM type, the size and alignment of which are
-// guaranteed to be equivalent to what you would get out of `type_of()`. It's
-// useful because:
-//
-// (1) It may be cheaper to compute the sizing type than the full type if all
-//     you're interested in is the size and/or alignment;
-//
-// (2) It won't make any recursive calls to determine the structure of the
-//     type behind pointers. This can help prevent infinite loops for
-//     recursive types. For example, enum types rely on this behavior.
-
-pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
-    if let Some(t) = cx.llsizingtypes().borrow().get(&t).cloned() {
-        return t;
-    }
-
-    debug!("sizing_type_of {:?}", t);
-    let _recursion_lock = cx.enter_type_of(t);
-
-    let llsizingty = match t.sty {
-        _ if !type_is_sized(cx.tcx(), t) => {
-            Type::struct_(cx, &[Type::i8p(cx), Type::i8p(cx)], false)
-        }
-
-        ty::TyBool => Type::bool(cx),
-        ty::TyChar => Type::char(cx),
-        ty::TyInt(t) => Type::int_from_ty(cx, t),
-        ty::TyUint(t) => Type::uint_from_ty(cx, t),
-        ty::TyFloat(t) => Type::float_from_ty(cx, t),
-
-        ty::TyBox(ty) |
-        ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
-        ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
-            if type_is_sized(cx.tcx(), ty) {
-                Type::i8p(cx)
-            } else {
-                Type::struct_(cx, &[Type::i8p(cx), Type::i8p(cx)], false)
-            }
-        }
-
-        ty::TyBareFn(..) => Type::i8p(cx),
-
-        ty::TyArray(ty, size) => {
-            let llty = sizing_type_of(cx, ty);
-            let size = size as u64;
-            ensure_array_fits_in_address_space(cx, llty, size, t);
-            Type::array(&llty, size)
-        }
-
-        ty::TyTuple(ref tys) if tys.is_empty() => {
-            Type::nil(cx)
-        }
-
-        ty::TyTuple(..) | ty::TyEnum(..) | ty::TyClosure(..) => {
-            let repr = adt::represent_type(cx, t);
-            adt::sizing_type_of(cx, &repr, false)
-        }
-
-        ty::TyStruct(..) => {
-            if t.is_simd() {
-                let e = t.simd_type(cx.tcx());
-                if !e.is_machine() {
-                    cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \
-                                              a non-machine element type `{}`",
-                                             t, e))
-                }
-                let llet = type_of(cx, e);
-                let n = t.simd_size(cx.tcx()) as u64;
-                ensure_array_fits_in_address_space(cx, llet, n, t);
-                Type::vector(&llet, n)
-            } else {
-                let repr = adt::represent_type(cx, t);
-                adt::sizing_type_of(cx, &repr, false)
-            }
-        }
-
-        ty::TyProjection(..) | ty::TyInfer(..) | ty::TyParam(..) | ty::TyError => {
-            cx.sess().bug(&format!("fictitious type {:?} in sizing_type_of()",
-                                   t))
-        }
-        ty::TySlice(_) | ty::TyTrait(..) | ty::TyStr => unreachable!()
-    };
-
-    debug!("--> mapped t={:?} to llsizingty={}",
-            t,
-            cx.tn().type_to_string(llsizingty));
-
-    cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
-    llsizingty
-}
-
-pub fn foreign_arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
-    if t.is_bool() {
-        Type::i1(cx)
-    } else {
-        type_of(cx, t)
-    }
-}
-
-pub fn arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
-    if t.is_bool() {
-        Type::i1(cx)
-    } else if type_is_immediate(cx, t) && type_of(cx, t).is_aggregate() {
-        // We want to pass small aggregates as immediate values, but using an aggregate LLVM type
-        // for this leads to bad optimizations, so its arg type is an appropriately sized integer
-        match machine::llsize_of_alloc(cx, sizing_type_of(cx, t)) {
-            0 => type_of(cx, t),
-            n => Type::ix(cx, n * 8),
-        }
-    } else {
-        type_of(cx, t)
-    }
-}
-
-/// Get the LLVM type corresponding to a Rust type, i.e. `middle::ty::Ty`.
-/// This is the right LLVM type for an alloca containing a value of that type,
-/// and the pointee of an Lvalue Datum (which is always a LLVM pointer).
-/// For unsized types, the returned type is a fat pointer, thus the resulting
-/// LLVM type for a `Trait` Lvalue is `{ i8*, void(i8*)** }*`, which is a double
-/// indirection to the actual data, unlike a `i8` Lvalue, which is just `i8*`.
-/// This is needed due to the treatment of immediate values, as a fat pointer
-/// is too large for it to be placed in SSA value (by our rules).
-/// For the raw type without far pointer indirection, see `in_memory_type_of`.
-pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
-    let ty = if !type_is_sized(cx.tcx(), ty) {
-        cx.tcx().mk_imm_ptr(ty)
-    } else {
-        ty
-    };
-    in_memory_type_of(cx, ty)
-}
-
-/// Get the LLVM type corresponding to a Rust type, i.e. `middle::ty::Ty`.
-/// This is the right LLVM type for a field/array element of that type,
-/// and is the same as `type_of` for all Sized types.
-/// Unsized types, however, are represented by a "minimal unit", e.g.
-/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
-/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
-/// If the type is an unsized struct, the regular layout is generated,
-/// with the inner-most trailing unsized field using the "minimal unit"
-/// of that field's type - this is useful for taking the address of
-/// that field and ensuring the struct has the right alignment.
-/// For the LLVM type of a value as a whole, see `type_of`.
-/// NB: If you update this, be sure to update `sizing_type_of()` as well.
-pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
-    // Check the cache.
-    if let Some(&llty) = cx.lltypes().borrow().get(&t) {
-        return llty;
-    }
-
-    debug!("type_of {:?}", t);
-
-    assert!(!t.has_escaping_regions());
-
-    // Replace any typedef'd types with their equivalent non-typedef
-    // type. This ensures that all LLVM nominal types that contain
-    // Rust types are defined as the same LLVM types.  If we don't do
-    // this then, e.g. `Option<{myfield: bool}>` would be a different
-    // type than `Option<myrec>`.
-    let t_norm = cx.tcx().erase_regions(&t);
-
-    if t != t_norm {
-        let llty = in_memory_type_of(cx, t_norm);
-        debug!("--> normalized {:?} {:?} to {:?} {:?} llty={}",
-                t,
-                t,
-                t_norm,
-                t_norm,
-                cx.tn().type_to_string(llty));
-        cx.lltypes().borrow_mut().insert(t, llty);
-        return llty;
-    }
-
-    let mut llty = match t.sty {
-      ty::TyBool => Type::bool(cx),
-      ty::TyChar => Type::char(cx),
-      ty::TyInt(t) => Type::int_from_ty(cx, t),
-      ty::TyUint(t) => Type::uint_from_ty(cx, t),
-      ty::TyFloat(t) => Type::float_from_ty(cx, t),
-      ty::TyEnum(def, ref substs) => {
-          // Only create the named struct, but don't fill it in. We
-          // fill it in *after* placing it into the type cache. This
-          // avoids creating more than one copy of the enum when one
-          // of the enum's variants refers to the enum itself.
-          let repr = adt::represent_type(cx, t);
-          let tps = substs.types.get_slice(subst::TypeSpace);
-          let name = llvm_type_name(cx, def.did, tps);
-          adt::incomplete_type_of(cx, &repr, &name[..])
-      }
-      ty::TyClosure(..) => {
-          // Only create the named struct, but don't fill it in. We
-          // fill it in *after* placing it into the type cache.
-          let repr = adt::represent_type(cx, t);
-          // Unboxed closures can have substitutions in all spaces
-          // inherited from their environment, so we use entire
-          // contents of the VecPerParamSpace to construct the llvm
-          // name
-          adt::incomplete_type_of(cx, &repr, "closure")
-      }
-
-      ty::TyBox(ty) |
-      ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
-      ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
-          if !type_is_sized(cx.tcx(), ty) {
-              if let ty::TyStr = ty.sty {
-                  // This means we get a nicer name in the output (str is always
-                  // unsized).
-                  cx.tn().find_type("str_slice").unwrap()
-              } else {
-                  let ptr_ty = in_memory_type_of(cx, ty).ptr_to();
-                  let unsized_part = cx.tcx().struct_tail(ty);
-                  let info_ty = match unsized_part.sty {
-                      ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => {
-                          Type::uint_from_ty(cx, ast::UintTy::Us)
-                      }
-                      ty::TyTrait(_) => Type::vtable_ptr(cx),
-                      _ => panic!("Unexpected type returned from \
-                                   struct_tail: {:?} for ty={:?}",
-                                  unsized_part, ty)
-                  };
-                  Type::struct_(cx, &[ptr_ty, info_ty], false)
-              }
-          } else {
-              in_memory_type_of(cx, ty).ptr_to()
-          }
-      }
-
-      ty::TyArray(ty, size) => {
-          let size = size as u64;
-          // we must use `sizing_type_of` here as the type may
-          // not be fully initialized.
-          let szty = sizing_type_of(cx, ty);
-          ensure_array_fits_in_address_space(cx, szty, size, t);
-
-          let llty = in_memory_type_of(cx, ty);
-          Type::array(&llty, size)
-      }
-
-      // Unsized slice types (and str) have the type of their element, and
-      // traits have the type of u8. This is so that the data pointer inside
-      // fat pointers is of the right type (e.g. for array accesses), even
-      // when taking the address of an unsized field in a struct.
-      ty::TySlice(ty) => in_memory_type_of(cx, ty),
-      ty::TyStr | ty::TyTrait(..) => Type::i8(cx),
-
-      ty::TyBareFn(..) => {
-          type_of_fn_from_ty(cx, t).ptr_to()
-      }
-      ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx),
-      ty::TyTuple(..) => {
-          let repr = adt::represent_type(cx, t);
-          adt::type_of(cx, &repr)
-      }
-      ty::TyStruct(def, ref substs) => {
-          if t.is_simd() {
-              let e = t.simd_type(cx.tcx());
-              if !e.is_machine() {
-                  cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \
-                                            a non-machine element type `{}`",
-                                           t, e))
-              }
-              let llet = in_memory_type_of(cx, e);
-              let n = t.simd_size(cx.tcx()) as u64;
-              ensure_array_fits_in_address_space(cx, llet, n, t);
-              Type::vector(&llet, n)
-          } else {
-              // Only create the named struct, but don't fill it in. We fill it
-              // in *after* placing it into the type cache. This prevents
-              // infinite recursion with recursive struct types.
-              let repr = adt::represent_type(cx, t);
-              let tps = substs.types.get_slice(subst::TypeSpace);
-              let name = llvm_type_name(cx, def.did, tps);
-              adt::incomplete_type_of(cx, &repr, &name[..])
-          }
-      }
-
-      ty::TyInfer(..) => cx.sess().bug("type_of with TyInfer"),
-      ty::TyProjection(..) => cx.sess().bug("type_of with TyProjection"),
-      ty::TyParam(..) => cx.sess().bug("type_of with ty_param"),
-      ty::TyError => cx.sess().bug("type_of with TyError"),
-    };
-
-    debug!("--> mapped t={:?} to llty={}",
-            t,
-            cx.tn().type_to_string(llty));
-
-    cx.lltypes().borrow_mut().insert(t, llty);
-
-    // If this was an enum or struct, fill in the type now.
-    match t.sty {
-        ty::TyEnum(..) | ty::TyStruct(..) | ty::TyClosure(..)
-                if !t.is_simd() => {
-            let repr = adt::represent_type(cx, t);
-            adt::finish_type_of(cx, &repr, &mut llty);
-        }
-        _ => ()
-    }
-
-    llty
-}
-
-pub fn align_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>)
-                          -> machine::llalign {
-    let llty = sizing_type_of(cx, t);
-    machine::llalign_of_min(cx, llty)
-}
-
-fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                            did: DefId,
-                            tps: &[Ty<'tcx>])
-                            -> String {
-    let base = cx.tcx().item_path_str(did);
-    let strings: Vec<String> = tps.iter().map(|t| t.to_string()).collect();
-    let tstr = if strings.is_empty() {
-        base
-    } else {
-        format!("{}<{}>", base, strings.join(", "))
-    };
-
-    if did.krate == 0 {
-        tstr
-    } else {
-        format!("{}.{}", did.krate, tstr)
-    }
-}
diff --git a/src/librustc_trans/trans/value.rs b/src/librustc_trans/trans/value.rs
deleted file mode 100644 (file)
index bc71278..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm;
-use llvm::{UseRef, ValueRef};
-use trans::basic_block::BasicBlock;
-use trans::common::Block;
-use libc::c_uint;
-
-#[derive(Copy, Clone)]
-pub struct Value(pub ValueRef);
-
-macro_rules! opt_val { ($e:expr) => (
-    unsafe {
-        match $e {
-            p if !p.is_null() => Some(Value(p)),
-            _ => None
-        }
-    }
-) }
-
-/// Wrapper for LLVM ValueRef
-impl Value {
-    /// Returns the native ValueRef
-    pub fn get(&self) -> ValueRef {
-        let Value(v) = *self; v
-    }
-
-    /// Returns the BasicBlock that contains this value
-    pub fn get_parent(self) -> Option<BasicBlock> {
-        unsafe {
-            match llvm::LLVMGetInstructionParent(self.get()) {
-                p if !p.is_null() => Some(BasicBlock(p)),
-                _ => None
-            }
-        }
-    }
-
-    /// Removes this value from its containing BasicBlock
-    pub fn erase_from_parent(self) {
-        unsafe {
-            llvm::LLVMInstructionEraseFromParent(self.get());
-        }
-    }
-
-    /// Returns the single dominating store to this value, if any
-    /// This only performs a search for a trivially dominating store. The store
-    /// must be the only user of this value, and there must not be any conditional
-    /// branches between the store and the given block.
-    pub fn get_dominating_store(self, bcx: Block) -> Option<Value> {
-        match self.get_single_user().and_then(|user| user.as_store_inst()) {
-            Some(store) => {
-                store.get_parent().and_then(|store_bb| {
-                    let mut bb = BasicBlock(bcx.llbb);
-                    let mut ret = Some(store);
-                    while bb.get() != store_bb.get() {
-                        match bb.get_single_predecessor() {
-                            Some(pred) => bb = pred,
-                            None => { ret = None; break }
-                        }
-                    }
-                    ret
-                })
-            }
-            _ => None
-        }
-    }
-
-    /// Returns the first use of this value, if any
-    pub fn get_first_use(self) -> Option<Use> {
-        unsafe {
-            match llvm::LLVMGetFirstUse(self.get()) {
-                u if !u.is_null() => Some(Use(u)),
-                _ => None
-            }
-        }
-    }
-
-    /// Tests if there are no uses of this value
-    pub fn has_no_uses(self) -> bool {
-        self.get_first_use().is_none()
-    }
-
-    /// Returns the single user of this value
-    /// If there are no users or multiple users, this returns None
-    pub fn get_single_user(self) -> Option<Value> {
-        let mut iter = self.user_iter();
-        match (iter.next(), iter.next()) {
-            (Some(first), None) => Some(first),
-            _ => None
-        }
-    }
-
-    /// Returns an iterator for the users of this value
-    pub fn user_iter(self) -> Users {
-        Users {
-            next: self.get_first_use()
-        }
-    }
-
-    /// Returns the requested operand of this instruction
-    /// Returns None, if there's no operand at the given index
-    pub fn get_operand(self, i: usize) -> Option<Value> {
-        opt_val!(llvm::LLVMGetOperand(self.get(), i as c_uint))
-    }
-
-    /// Returns the Store represent by this value, if any
-    pub fn as_store_inst(self) -> Option<Value> {
-        opt_val!(llvm::LLVMIsAStoreInst(self.get()))
-    }
-
-    /// Tests if this value is a terminator instruction
-    pub fn is_a_terminator_inst(self) -> bool {
-        unsafe {
-            !llvm::LLVMIsATerminatorInst(self.get()).is_null()
-        }
-    }
-}
-
-/// Wrapper for LLVM UseRef
-#[derive(Copy, Clone)]
-pub struct Use(UseRef);
-
-impl Use {
-    pub fn get(&self) -> UseRef {
-        let Use(v) = *self; v
-    }
-
-    pub fn get_user(self) -> Value {
-        unsafe {
-            Value(llvm::LLVMGetUser(self.get()))
-        }
-    }
-
-    pub fn get_next_use(self) -> Option<Use> {
-        unsafe {
-            match llvm::LLVMGetNextUse(self.get()) {
-                u if !u.is_null() => Some(Use(u)),
-                _ => None
-            }
-        }
-    }
-}
-
-/// Iterator for the users of a value
-pub struct Users {
-    next: Option<Use>
-}
-
-impl Iterator for Users {
-    type Item = Value;
-
-    fn next(&mut self) -> Option<Value> {
-        let current = self.next;
-
-        self.next = current.and_then(|u| u.get_next_use());
-
-        current.map(|u| u.get_user())
-    }
-}
diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs
new file mode 100644 (file)
index 0000000..f5b9bef
--- /dev/null
@@ -0,0 +1,395 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_camel_case_types)]
+
+use llvm;
+use llvm::ValueRef;
+use base::*;
+use base;
+use build::*;
+use cleanup;
+use cleanup::CleanupMethods;
+use common::*;
+use consts;
+use datum::*;
+use debuginfo::DebugLoc;
+use expr::{Dest, Ignore, SaveIn};
+use expr;
+use machine::llsize_of_alloc;
+use type_::Type;
+use type_of;
+use value::Value;
+use rustc::ty::{self, Ty};
+
+use rustc::hir;
+use rustc_const_eval::eval_repeat_count;
+
+use syntax::ast;
+use syntax::parse::token::InternedString;
+
+#[derive(Copy, Clone, Debug)]
+struct VecTypes<'tcx> {
+    unit_ty: Ty<'tcx>,
+    llunit_ty: Type
+}
+
+pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                      expr: &hir::Expr,
+                                      dest: expr::Dest)
+                                      -> Block<'blk, 'tcx> {
+    //!
+    //
+    // [...] allocates a fixed-size array and moves it around "by value".
+    // In this case, it means that the caller has already given us a location
+    // to store the array of the suitable size, so all we have to do is
+    // generate the content.
+
+    debug!("trans_fixed_vstore(expr={:?}, dest={:?})", expr, dest);
+
+    let vt = vec_types_from_expr(bcx, expr);
+
+    return match dest {
+        Ignore => write_content(bcx, &vt, expr, expr, dest),
+        SaveIn(lldest) => {
+            // lldest will have type *[T x N], but we want the type *T,
+            // so use GEP to convert:
+            let lldest = StructGEP(bcx, lldest, 0);
+            write_content(bcx, &vt, expr, expr, SaveIn(lldest))
+        }
+    };
+}
+
+/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the
+/// caller must make the reference).  "..." is similar except that the memory can be statically
+/// allocated and we return a reference (strings are always by-ref).
+pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                   slice_expr: &hir::Expr,
+                                   content_expr: &hir::Expr)
+                                   -> DatumBlock<'blk, 'tcx, Expr> {
+    let fcx = bcx.fcx;
+    let mut bcx = bcx;
+
+    debug!("trans_slice_vec(slice_expr={:?})",
+           slice_expr);
+
+    let vec_ty = node_id_type(bcx, slice_expr.id);
+
+    // Handle the "..." case (returns a slice since strings are always unsized):
+    if let hir::ExprLit(ref lit) = content_expr.node {
+        if let ast::LitKind::Str(ref s, _) = lit.node {
+            let scratch = rvalue_scratch_datum(bcx, vec_ty, "");
+            bcx = trans_lit_str(bcx,
+                                content_expr,
+                                s.clone(),
+                                SaveIn(scratch.val));
+            return DatumBlock::new(bcx, scratch.to_expr_datum());
+        }
+    }
+
+    // Handle the &[...] case:
+    let vt = vec_types_from_expr(bcx, content_expr);
+    let count = elements_required(bcx, content_expr);
+    debug!("    vt={:?}, count={}", vt, count);
+
+    let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count);
+
+    // Always create an alloca even if zero-sized, to preserve
+    // the non-null invariant of the inner slice ptr
+    let llfixed;
+    // Issue 30018: ensure state is initialized as dropped if necessary.
+    if fcx.type_needs_drop(vt.unit_ty) {
+        llfixed = base::alloc_ty_init(bcx, fixed_ty, InitAlloca::Dropped, "");
+    } else {
+        let uninit = InitAlloca::Uninit("fcx says vt.unit_ty is non-drop");
+        llfixed = base::alloc_ty_init(bcx, fixed_ty, uninit, "");
+        call_lifetime_start(bcx, llfixed);
+    };
+
+    if count > 0 {
+        // Arrange for the backing array to be cleaned up.
+        let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id);
+        fcx.schedule_lifetime_end(cleanup_scope, llfixed);
+        fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty, None);
+
+        // Generate the content into the backing array.
+        // llfixed has type *[T x N], but we want the type *T,
+        // so use GEP to convert
+        bcx = write_content(bcx, &vt, slice_expr, content_expr,
+                            SaveIn(StructGEP(bcx, llfixed, 0)));
+    };
+
+    immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
+}
+
+/// Literal strings translate to slices into static memory.  This is different from
+/// trans_slice_vstore() above because it doesn't need to copy the content anywhere.
+pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                 lit_expr: &hir::Expr,
+                                 str_lit: InternedString,
+                                 dest: Dest)
+                                 -> Block<'blk, 'tcx> {
+    debug!("trans_lit_str(lit_expr={:?}, dest={:?})", lit_expr, dest);
+
+    match dest {
+        Ignore => bcx,
+        SaveIn(lldest) => {
+            let bytes = str_lit.len();
+            let llbytes = C_uint(bcx.ccx(), bytes);
+            let llcstr = C_cstr(bcx.ccx(), str_lit, false);
+            let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx()));
+            Store(bcx, llcstr, expr::get_dataptr(bcx, lldest));
+            Store(bcx, llbytes, expr::get_meta(bcx, lldest));
+            bcx
+        }
+    }
+}
+
+fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                             vt: &VecTypes<'tcx>,
+                             vstore_expr: &hir::Expr,
+                             content_expr: &hir::Expr,
+                             dest: Dest)
+                             -> Block<'blk, 'tcx> {
+    let _icx = push_ctxt("tvec::write_content");
+    let fcx = bcx.fcx;
+    let mut bcx = bcx;
+
+    debug!("write_content(vt={:?}, dest={:?}, vstore_expr={:?})",
+           vt, dest, vstore_expr);
+
+    match content_expr.node {
+        hir::ExprLit(ref lit) => {
+            match lit.node {
+                ast::LitKind::Str(ref s, _) => {
+                    match dest {
+                        Ignore => return bcx,
+                        SaveIn(lldest) => {
+                            let bytes = s.len();
+                            let llbytes = C_uint(bcx.ccx(), bytes);
+                            let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false);
+                            if !bcx.unreachable.get() {
+                                base::call_memcpy(&B(bcx), lldest, llcstr, llbytes, 1);
+                            }
+                            return bcx;
+                        }
+                    }
+                }
+                _ => {
+                    span_bug!(content_expr.span, "unexpected evec content");
+                }
+            }
+        }
+        hir::ExprVec(ref elements) => {
+            match dest {
+                Ignore => {
+                    for element in elements {
+                        bcx = expr::trans_into(bcx, &element, Ignore);
+                    }
+                }
+
+                SaveIn(lldest) => {
+                    let temp_scope = fcx.push_custom_cleanup_scope();
+                    for (i, element) in elements.iter().enumerate() {
+                        let lleltptr = GEPi(bcx, lldest, &[i]);
+                        debug!("writing index {} with lleltptr={:?}",
+                               i, Value(lleltptr));
+                        bcx = expr::trans_into(bcx, &element,
+                                               SaveIn(lleltptr));
+                        let scope = cleanup::CustomScope(temp_scope);
+                        // Issue #30822: mark memory as dropped after running destructor
+                        fcx.schedule_drop_and_fill_mem(scope, lleltptr, vt.unit_ty, None);
+                    }
+                    fcx.pop_custom_cleanup_scope(temp_scope);
+                }
+            }
+            return bcx;
+        }
+        hir::ExprRepeat(ref element, ref count_expr) => {
+            match dest {
+                Ignore => {
+                    return expr::trans_into(bcx, &element, Ignore);
+                }
+                SaveIn(lldest) => {
+                    match eval_repeat_count(bcx.tcx(), &count_expr) {
+                        0 => expr::trans_into(bcx, &element, Ignore),
+                        1 => expr::trans_into(bcx, &element, SaveIn(lldest)),
+                        count => {
+                            let elem = unpack_datum!(bcx, expr::trans(bcx, &element));
+                            let bcx = iter_vec_loop(bcx, lldest, vt,
+                                                    C_uint(bcx.ccx(), count),
+                                                    |set_bcx, lleltptr, _| {
+                                                        elem.shallow_copy(set_bcx, lleltptr)
+                                                    });
+                            bcx
+                        }
+                    }
+                }
+            }
+        }
+        _ => {
+            span_bug!(content_expr.span, "unexpected vec content");
+        }
+    }
+}
+
+fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &hir::Expr)
+                                   -> VecTypes<'tcx> {
+    let vec_ty = node_id_type(bcx, vec_expr.id);
+    vec_types(bcx, vec_ty.sequence_element_type(bcx.tcx()))
+}
+
+fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>)
+                         -> VecTypes<'tcx> {
+    VecTypes {
+        unit_ty: unit_ty,
+        llunit_ty: type_of::type_of(bcx.ccx(), unit_ty)
+    }
+}
+
+fn elements_required(bcx: Block, content_expr: &hir::Expr) -> usize {
+    //! Figure out the number of elements we need to store this content
+
+    match content_expr.node {
+        hir::ExprLit(ref lit) => {
+            match lit.node {
+                ast::LitKind::Str(ref s, _) => s.len(),
+                _ => {
+                    span_bug!(content_expr.span, "unexpected evec content")
+                }
+            }
+        },
+        hir::ExprVec(ref es) => es.len(),
+        hir::ExprRepeat(_, ref count_expr) => {
+            eval_repeat_count(bcx.tcx(), &count_expr)
+        }
+        _ => span_bug!(content_expr.span, "unexpected vec content")
+    }
+}
+
+/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval`
+/// which should be by ref.
+pub fn get_fixed_base_and_len(bcx: Block,
+                              llval: ValueRef,
+                              vec_length: usize)
+                              -> (ValueRef, ValueRef) {
+    let ccx = bcx.ccx();
+
+    let base = expr::get_dataptr(bcx, llval);
+    let len = C_uint(ccx, vec_length);
+    (base, len)
+}
+
+/// Converts a vector into the slice pair.  The vector should be stored in `llval` which should be
+/// by-reference.  If you have a datum, you would probably prefer to call
+/// `Datum::get_base_and_len()` which will handle any conversions for you.
+pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                    llval: ValueRef,
+                                    vec_ty: Ty<'tcx>)
+                                    -> (ValueRef, ValueRef) {
+    match vec_ty.sty {
+        ty::TyArray(_, n) => get_fixed_base_and_len(bcx, llval, n),
+        ty::TySlice(_) | ty::TyStr => {
+            let base = Load(bcx, expr::get_dataptr(bcx, llval));
+            let len = Load(bcx, expr::get_meta(bcx, llval));
+            (base, len)
+        }
+
+        // Only used for pattern matching.
+        ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => {
+            let inner = if type_is_sized(bcx.tcx(), ty) {
+                Load(bcx, llval)
+            } else {
+                llval
+            };
+            get_base_and_len(bcx, inner, ty)
+        },
+        _ => bug!("unexpected type in get_base_and_len"),
+    }
+}
+
+fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
+                                data_ptr: ValueRef,
+                                vt: &VecTypes<'tcx>,
+                                count: ValueRef,
+                                f: F)
+                                -> Block<'blk, 'tcx> where
+    F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
+{
+    let _icx = push_ctxt("tvec::iter_vec_loop");
+
+    if bcx.unreachable.get() {
+        return bcx;
+    }
+
+    let fcx = bcx.fcx;
+    let loop_bcx = fcx.new_temp_block("expr_repeat");
+    let next_bcx = fcx.new_temp_block("expr_repeat: next");
+
+    Br(bcx, loop_bcx.llbb, DebugLoc::None);
+
+    let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
+                           &[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
+
+    let bcx = loop_bcx;
+
+    let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
+        data_ptr
+    } else {
+        InBoundsGEP(bcx, data_ptr, &[loop_counter])
+    };
+    let bcx = f(bcx, lleltptr, vt.unit_ty);
+    let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None);
+    AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
+
+    let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None);
+    CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
+
+    next_bcx
+}
+
+pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
+                                   data_ptr: ValueRef,
+                                   unit_ty: Ty<'tcx>,
+                                   len: ValueRef,
+                                   f: F)
+                                   -> Block<'blk, 'tcx> where
+    F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
+{
+    let _icx = push_ctxt("tvec::iter_vec_raw");
+    let fcx = bcx.fcx;
+
+    let vt = vec_types(bcx, unit_ty);
+
+    if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
+        // Special-case vectors with elements of size 0  so they don't go out of bounds (#9890)
+        iter_vec_loop(bcx, data_ptr, &vt, len, f)
+    } else {
+        // Calculate the last pointer address we want to handle.
+        let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);
+
+        // Now perform the iteration.
+        let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
+        Br(bcx, header_bcx.llbb, DebugLoc::None);
+        let data_ptr =
+            Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
+        let not_yet_at_end =
+            ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None);
+        let body_bcx = fcx.new_temp_block("iter_vec_loop_body");
+        let next_bcx = fcx.new_temp_block("iter_vec_next");
+        CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
+        let body_bcx = f(body_bcx, data_ptr, unit_ty);
+        AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr,
+                                               &[C_int(bcx.ccx(), 1)]),
+                         body_bcx.llbb);
+        Br(body_bcx, header_bcx.llbb, DebugLoc::None);
+        next_bcx
+    }
+}
diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs
new file mode 100644 (file)
index 0000000..35a60cd
--- /dev/null
@@ -0,0 +1,324 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_upper_case_globals)]
+
+use llvm;
+use llvm::{TypeRef, Bool, False, True, TypeKind};
+use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128};
+
+use context::CrateContext;
+use util::nodemap::FnvHashMap;
+
+use syntax::ast;
+
+use std::ffi::CString;
+use std::fmt;
+use std::mem;
+use std::ptr;
+use std::cell::RefCell;
+
+use libc::c_uint;
+
+#[derive(Clone, Copy, PartialEq)]
+#[repr(C)]
+pub struct Type {
+    rf: TypeRef
+}
+
+impl fmt::Debug for Type {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.write_str(&llvm::build_string(|s| unsafe {
+            llvm::LLVMWriteTypeToString(self.to_ref(), s);
+        }).expect("non-UTF8 type description from LLVM"))
+    }
+}
+
+macro_rules! ty {
+    ($e:expr) => ( Type::from_ref(unsafe { $e }))
+}
+
+/// Wrapper for LLVM TypeRef
+impl Type {
+    #[inline(always)]
+    pub fn from_ref(r: TypeRef) -> Type {
+        Type {
+            rf: r
+        }
+    }
+
+    #[inline(always)] // So it doesn't kill --opt-level=0 builds of the compiler
+    pub fn to_ref(&self) -> TypeRef {
+        self.rf
+    }
+
+    pub fn to_ref_slice(slice: &[Type]) -> &[TypeRef] {
+        unsafe { mem::transmute(slice) }
+    }
+
+    pub fn void(ccx: &CrateContext) -> Type {
+        ty!(llvm::LLVMVoidTypeInContext(ccx.llcx()))
+    }
+
+    pub fn nil(ccx: &CrateContext) -> Type {
+        Type::empty_struct(ccx)
+    }
+
+    pub fn metadata(ccx: &CrateContext) -> Type {
+        ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx()))
+    }
+
+    pub fn i1(ccx: &CrateContext) -> Type {
+        ty!(llvm::LLVMInt1TypeInContext(ccx.llcx()))
+    }
+
+    pub fn i8(ccx: &CrateContext) -> Type {
+        ty!(llvm::LLVMInt8TypeInContext(ccx.llcx()))
+    }
+
+    pub fn i16(ccx: &CrateContext) -> Type {
+        ty!(llvm::LLVMInt16TypeInContext(ccx.llcx()))
+    }
+
+    pub fn i32(ccx: &CrateContext) -> Type {
+        ty!(llvm::LLVMInt32TypeInContext(ccx.llcx()))
+    }
+
+    pub fn i64(ccx: &CrateContext) -> Type {
+        ty!(llvm::LLVMInt64TypeInContext(ccx.llcx()))
+    }
+
+    // Creates an integer type with the given number of bits, e.g. i24
+    pub fn ix(ccx: &CrateContext, num_bits: u64) -> Type {
+        ty!(llvm::LLVMIntTypeInContext(ccx.llcx(), num_bits as c_uint))
+    }
+
+    pub fn f32(ccx: &CrateContext) -> Type {
+        ty!(llvm::LLVMFloatTypeInContext(ccx.llcx()))
+    }
+
+    pub fn f64(ccx: &CrateContext) -> Type {
+        ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx()))
+    }
+
+    pub fn bool(ccx: &CrateContext) -> Type {
+        Type::i8(ccx)
+    }
+
+    pub fn char(ccx: &CrateContext) -> Type {
+        Type::i32(ccx)
+    }
+
+    pub fn i8p(ccx: &CrateContext) -> Type {
+        Type::i8(ccx).ptr_to()
+    }
+
+    pub fn int(ccx: &CrateContext) -> Type {
+        match &ccx.tcx().sess.target.target.target_pointer_width[..] {
+            "32" => Type::i32(ccx),
+            "64" => Type::i64(ccx),
+            tws => bug!("Unsupported target word size for int: {}", tws),
+        }
+    }
+
+    pub fn int_from_ty(ccx: &CrateContext, t: ast::IntTy) -> Type {
+        match t {
+            ast::IntTy::Is => ccx.int_type(),
+            ast::IntTy::I8 => Type::i8(ccx),
+            ast::IntTy::I16 => Type::i16(ccx),
+            ast::IntTy::I32 => Type::i32(ccx),
+            ast::IntTy::I64 => Type::i64(ccx)
+        }
+    }
+
+    pub fn uint_from_ty(ccx: &CrateContext, t: ast::UintTy) -> Type {
+        match t {
+            ast::UintTy::Us => ccx.int_type(),
+            ast::UintTy::U8 => Type::i8(ccx),
+            ast::UintTy::U16 => Type::i16(ccx),
+            ast::UintTy::U32 => Type::i32(ccx),
+            ast::UintTy::U64 => Type::i64(ccx)
+        }
+    }
+
+    pub fn float_from_ty(ccx: &CrateContext, t: ast::FloatTy) -> Type {
+        match t {
+            ast::FloatTy::F32 => Type::f32(ccx),
+            ast::FloatTy::F64 => Type::f64(ccx),
+        }
+    }
+
+    pub fn func(args: &[Type], ret: &Type) -> Type {
+        let slice: &[TypeRef] = Type::to_ref_slice(args);
+        ty!(llvm::LLVMFunctionType(ret.to_ref(), slice.as_ptr(),
+                                   args.len() as c_uint, False))
+    }
+
+    pub fn variadic_func(args: &[Type], ret: &Type) -> Type {
+        let slice: &[TypeRef] = Type::to_ref_slice(args);
+        ty!(llvm::LLVMFunctionType(ret.to_ref(), slice.as_ptr(),
+                                   args.len() as c_uint, True))
+    }
+
+    pub fn struct_(ccx: &CrateContext, els: &[Type], packed: bool) -> Type {
+        let els: &[TypeRef] = Type::to_ref_slice(els);
+        ty!(llvm::LLVMStructTypeInContext(ccx.llcx(), els.as_ptr(),
+                                          els.len() as c_uint,
+                                          packed as Bool))
+    }
+
+    pub fn named_struct(ccx: &CrateContext, name: &str) -> Type {
+        let name = CString::new(name).unwrap();
+        ty!(llvm::LLVMStructCreateNamed(ccx.llcx(), name.as_ptr()))
+    }
+
+    pub fn empty_struct(ccx: &CrateContext) -> Type {
+        Type::struct_(ccx, &[], false)
+    }
+
+    pub fn array(ty: &Type, len: u64) -> Type {
+        ty!(llvm::LLVMRustArrayType(ty.to_ref(), len))
+    }
+
+    pub fn vector(ty: &Type, len: u64) -> Type {
+        ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint))
+    }
+
+    pub fn vec(ccx: &CrateContext, ty: &Type) -> Type {
+        Type::struct_(ccx,
+            &[Type::array(ty, 0), Type::int(ccx)],
+        false)
+    }
+
+    pub fn opaque_vec(ccx: &CrateContext) -> Type {
+        Type::vec(ccx, &Type::i8(ccx))
+    }
+
+    pub fn vtable_ptr(ccx: &CrateContext) -> Type {
+        Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to()
+    }
+
+    pub fn kind(&self) -> TypeKind {
+        unsafe {
+            llvm::LLVMGetTypeKind(self.to_ref())
+        }
+    }
+
+    pub fn set_struct_body(&mut self, els: &[Type], packed: bool) {
+        let slice: &[TypeRef] = Type::to_ref_slice(els);
+        unsafe {
+            llvm::LLVMStructSetBody(self.to_ref(), slice.as_ptr(),
+                                    els.len() as c_uint, packed as Bool)
+        }
+    }
+
+    pub fn ptr_to(&self) -> Type {
+        ty!(llvm::LLVMPointerType(self.to_ref(), 0))
+    }
+
+    pub fn is_aggregate(&self) -> bool {
+        match self.kind() {
+            TypeKind::Struct | TypeKind::Array => true,
+            _ =>  false
+        }
+    }
+
+    pub fn is_packed(&self) -> bool {
+        unsafe {
+            llvm::LLVMIsPackedStruct(self.to_ref()) == True
+        }
+    }
+
+    pub fn element_type(&self) -> Type {
+        unsafe {
+            Type::from_ref(llvm::LLVMGetElementType(self.to_ref()))
+        }
+    }
+
+    /// Return the number of elements in `self` if it is a LLVM vector type.
+    pub fn vector_length(&self) -> usize {
+        unsafe {
+            llvm::LLVMGetVectorSize(self.to_ref()) as usize
+        }
+    }
+
+    pub fn array_length(&self) -> usize {
+        unsafe {
+            llvm::LLVMGetArrayLength(self.to_ref()) as usize
+        }
+    }
+
+    pub fn field_types(&self) -> Vec<Type> {
+        unsafe {
+            let n_elts = llvm::LLVMCountStructElementTypes(self.to_ref()) as usize;
+            if n_elts == 0 {
+                return Vec::new();
+            }
+            let mut elts = vec![Type { rf: ptr::null_mut() }; n_elts];
+            llvm::LLVMGetStructElementTypes(self.to_ref(),
+                                            elts.as_mut_ptr() as *mut TypeRef);
+            elts
+        }
+    }
+
+    pub fn return_type(&self) -> Type {
+        ty!(llvm::LLVMGetReturnType(self.to_ref()))
+    }
+
+    pub fn func_params(&self) -> Vec<Type> {
+        unsafe {
+            let n_args = llvm::LLVMCountParamTypes(self.to_ref()) as usize;
+            let mut args = vec![Type { rf: ptr::null_mut() }; n_args];
+            llvm::LLVMGetParamTypes(self.to_ref(),
+                                    args.as_mut_ptr() as *mut TypeRef);
+            args
+        }
+    }
+
+    pub fn float_width(&self) -> usize {
+        match self.kind() {
+            Float => 32,
+            Double => 64,
+            X86_FP80 => 80,
+            FP128 | PPC_FP128 => 128,
+            _ => bug!("llvm_float_width called on a non-float type")
+        }
+    }
+
+    /// Retrieve the bit width of the integer type `self`.
+    pub fn int_width(&self) -> u64 {
+        unsafe {
+            llvm::LLVMGetIntTypeWidth(self.to_ref()) as u64
+        }
+    }
+}
+
+/* Memory-managed object interface to type handles. */
+
+pub struct TypeNames {
+    named_types: RefCell<FnvHashMap<String, TypeRef>>,
+}
+
+impl TypeNames {
+    pub fn new() -> TypeNames {
+        TypeNames {
+            named_types: RefCell::new(FnvHashMap())
+        }
+    }
+
+    pub fn associate_type(&self, s: &str, t: &Type) {
+        assert!(self.named_types.borrow_mut().insert(s.to_string(),
+                                                     t.to_ref()).is_none());
+    }
+
+    pub fn find_type(&self, s: &str) -> Option<Type> {
+        self.named_types.borrow().get(s).map(|x| Type::from_ref(*x))
+    }
+}
diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs
new file mode 100644 (file)
index 0000000..863ae3f
--- /dev/null
@@ -0,0 +1,376 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(non_camel_case_types)]
+
+use rustc::hir::def_id::DefId;
+use rustc::infer;
+use rustc::ty::subst;
+use abi::FnType;
+use adt;
+use common::*;
+use machine;
+use rustc::traits::ProjectionMode;
+use rustc::ty::{self, Ty, TypeFoldable};
+
+use type_::Type;
+
+use syntax::ast;
+
+// LLVM doesn't like objects that are too big. Issue #17913
+fn ensure_array_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                                llet: Type,
+                                                size: machine::llsize,
+                                                scapegoat: Ty<'tcx>) {
+    let esz = machine::llsize_of_alloc(ccx, llet);
+    match esz.checked_mul(size) {
+        Some(n) if n < ccx.obj_size_bound() => {}
+        _ => { ccx.report_overbig_object(scapegoat) }
+    }
+}
+
+// A "sizing type" is an LLVM type, the size and alignment of which are
+// guaranteed to be equivalent to what you would get out of `type_of()`. It's
+// useful because:
+//
+// (1) It may be cheaper to compute the sizing type than the full type if all
+//     you're interested in is the size and/or alignment;
+//
+// (2) It won't make any recursive calls to determine the structure of the
+//     type behind pointers. This can help prevent infinite loops for
+//     recursive types. For example, enum types rely on this behavior.
+
+pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
+    if let Some(t) = cx.llsizingtypes().borrow().get(&t).cloned() {
+        return t;
+    }
+
+    debug!("sizing_type_of {:?}", t);
+    let _recursion_lock = cx.enter_type_of(t);
+
+    let llsizingty = match t.sty {
+        _ if !type_is_sized(cx.tcx(), t) => {
+            Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false)
+        }
+
+        ty::TyBool => Type::bool(cx),
+        ty::TyChar => Type::char(cx),
+        ty::TyInt(t) => Type::int_from_ty(cx, t),
+        ty::TyUint(t) => Type::uint_from_ty(cx, t),
+        ty::TyFloat(t) => Type::float_from_ty(cx, t),
+
+        ty::TyBox(ty) |
+        ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
+        ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
+            if type_is_sized(cx.tcx(), ty) {
+                Type::i8p(cx)
+            } else {
+                Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false)
+            }
+        }
+
+        ty::TyFnDef(..) => Type::nil(cx),
+        ty::TyFnPtr(_) => Type::i8p(cx),
+
+        ty::TyArray(ty, size) => {
+            let llty = sizing_type_of(cx, ty);
+            let size = size as u64;
+            ensure_array_fits_in_address_space(cx, llty, size, t);
+            Type::array(&llty, size)
+        }
+
+        ty::TyTuple(ref tys) if tys.is_empty() => {
+            Type::nil(cx)
+        }
+
+        ty::TyTuple(..) | ty::TyEnum(..) | ty::TyClosure(..) => {
+            let repr = adt::represent_type(cx, t);
+            adt::sizing_type_of(cx, &repr, false)
+        }
+
+        ty::TyStruct(..) => {
+            if t.is_simd() {
+                let e = t.simd_type(cx.tcx());
+                if !e.is_machine() {
+                    cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \
+                                              a non-machine element type `{}`",
+                                             t, e))
+                }
+                let llet = type_of(cx, e);
+                let n = t.simd_size(cx.tcx()) as u64;
+                ensure_array_fits_in_address_space(cx, llet, n, t);
+                Type::vector(&llet, n)
+            } else {
+                let repr = adt::represent_type(cx, t);
+                adt::sizing_type_of(cx, &repr, false)
+            }
+        }
+
+        ty::TyProjection(..) | ty::TyInfer(..) | ty::TyParam(..) | ty::TyError => {
+            bug!("fictitious type {:?} in sizing_type_of()", t)
+        }
+        ty::TySlice(_) | ty::TyTrait(..) | ty::TyStr => bug!()
+    };
+
+    debug!("--> mapped t={:?} to llsizingty={:?}", t, llsizingty);
+
+    cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
+
+    // FIXME(eddyb) Temporary sanity check for ty::layout.
+    let infcx = infer::normalizing_infer_ctxt(cx.tcx(), &cx.tcx().tables, ProjectionMode::Any);
+    match t.layout(&infcx) {
+        Ok(layout) => {
+            if !type_is_sized(cx.tcx(), t) {
+                if !layout.is_unsized() {
+                    bug!("layout should be unsized for type `{}` / {:#?}",
+                         t, layout);
+                }
+
+                // Unsized types get turned into a fat pointer for LLVM.
+                return llsizingty;
+            }
+            let r = layout.size(&cx.tcx().data_layout).bytes();
+            let l = machine::llsize_of_alloc(cx, llsizingty);
+            if r != l {
+                bug!("size differs (rustc: {}, llvm: {}) for type `{}` / {:#?}",
+                     r, l, t, layout);
+            }
+            let r = layout.align(&cx.tcx().data_layout).abi();
+            let l = machine::llalign_of_min(cx, llsizingty) as u64;
+            if r != l {
+                bug!("align differs (rustc: {}, llvm: {}) for type `{}` / {:#?}",
+                     r, l, t, layout);
+            }
+        }
+        Err(e) => {
+            bug!("failed to get layout for `{}`: {}", t, e);
+        }
+    }
+    llsizingty
+}
+
+fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
+    let unsized_part = ccx.tcx().struct_tail(ty);
+    match unsized_part.sty {
+        ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => {
+            Type::uint_from_ty(ccx, ast::UintTy::Us)
+        }
+        ty::TyTrait(_) => Type::vtable_ptr(ccx),
+        _ => bug!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}",
+                          unsized_part, ty)
+    }
+}
+
+pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
+    if t.is_bool() {
+        Type::i1(cx)
+    } else {
+        type_of(cx, t)
+    }
+}
+
+/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
+/// This is the right LLVM type for an alloca containing a value of that type,
+/// and the pointee of an Lvalue Datum (which is always a LLVM pointer).
+/// For unsized types, the returned type is a fat pointer, thus the resulting
+/// LLVM type for a `Trait` Lvalue is `{ i8*, void(i8*)** }*`, which is a double
+/// indirection to the actual data, unlike a `i8` Lvalue, which is just `i8*`.
+/// This is needed due to the treatment of immediate values, as a fat pointer
+/// is too large for it to be placed in SSA value (by our rules).
+/// For the raw type without far pointer indirection, see `in_memory_type_of`.
+pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
+    let ty = if !type_is_sized(cx.tcx(), ty) {
+        cx.tcx().mk_imm_ptr(ty)
+    } else {
+        ty
+    };
+    in_memory_type_of(cx, ty)
+}
+
+/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
+/// This is the right LLVM type for a field/array element of that type,
+/// and is the same as `type_of` for all Sized types.
+/// Unsized types, however, are represented by a "minimal unit", e.g.
+/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+/// If the type is an unsized struct, the regular layout is generated,
+/// with the inner-most trailing unsized field using the "minimal unit"
+/// of that field's type - this is useful for taking the address of
+/// that field and ensuring the struct has the right alignment.
+/// For the LLVM type of a value as a whole, see `type_of`.
+/// NB: If you update this, be sure to update `sizing_type_of()` as well.
+pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
+    // Check the cache.
+    if let Some(&llty) = cx.lltypes().borrow().get(&t) {
+        return llty;
+    }
+
+    debug!("type_of {:?}", t);
+
+    assert!(!t.has_escaping_regions(), "{:?} has escaping regions", t);
+
+    // Replace any typedef'd types with their equivalent non-typedef
+    // type. This ensures that all LLVM nominal types that contain
+    // Rust types are defined as the same LLVM types.  If we don't do
+    // this then, e.g. `Option<{myfield: bool}>` would be a different
+    // type than `Option<myrec>`.
+    let t_norm = cx.tcx().erase_regions(&t);
+
+    if t != t_norm {
+        let llty = in_memory_type_of(cx, t_norm);
+        debug!("--> normalized {:?} to {:?} llty={:?}", t, t_norm, llty);
+        cx.lltypes().borrow_mut().insert(t, llty);
+        return llty;
+    }
+
+    let mut llty = match t.sty {
+      ty::TyBool => Type::bool(cx),
+      ty::TyChar => Type::char(cx),
+      ty::TyInt(t) => Type::int_from_ty(cx, t),
+      ty::TyUint(t) => Type::uint_from_ty(cx, t),
+      ty::TyFloat(t) => Type::float_from_ty(cx, t),
+      ty::TyEnum(def, ref substs) => {
+          // Only create the named struct, but don't fill it in. We
+          // fill it in *after* placing it into the type cache. This
+          // avoids creating more than one copy of the enum when one
+          // of the enum's variants refers to the enum itself.
+          let repr = adt::represent_type(cx, t);
+          let tps = substs.types.get_slice(subst::TypeSpace);
+          let name = llvm_type_name(cx, def.did, tps);
+          adt::incomplete_type_of(cx, &repr, &name[..])
+      }
+      ty::TyClosure(..) => {
+          // Only create the named struct, but don't fill it in. We
+          // fill it in *after* placing it into the type cache.
+          let repr = adt::represent_type(cx, t);
+          // Unboxed closures can have substitutions in all spaces
+          // inherited from their environment, so we use entire
+          // contents of the VecPerParamSpace to construct the llvm
+          // name
+          adt::incomplete_type_of(cx, &repr, "closure")
+      }
+
+      ty::TyBox(ty) |
+      ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
+      ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
+          if !type_is_sized(cx.tcx(), ty) {
+              if let ty::TyStr = ty.sty {
+                  // This means we get a nicer name in the output (str is always
+                  // unsized).
+                  cx.tn().find_type("str_slice").unwrap()
+              } else {
+                  let ptr_ty = in_memory_type_of(cx, ty).ptr_to();
+                  let info_ty = unsized_info_ty(cx, ty);
+                  Type::struct_(cx, &[ptr_ty, info_ty], false)
+              }
+          } else {
+              in_memory_type_of(cx, ty).ptr_to()
+          }
+      }
+
+      ty::TyArray(ty, size) => {
+          let size = size as u64;
+          // we must use `sizing_type_of` here as the type may
+          // not be fully initialized.
+          let szty = sizing_type_of(cx, ty);
+          ensure_array_fits_in_address_space(cx, szty, size, t);
+
+          let llty = in_memory_type_of(cx, ty);
+          Type::array(&llty, size)
+      }
+
+      // Unsized slice types (and str) have the type of their element, and
+      // traits have the type of u8. This is so that the data pointer inside
+      // fat pointers is of the right type (e.g. for array accesses), even
+      // when taking the address of an unsized field in a struct.
+      ty::TySlice(ty) => in_memory_type_of(cx, ty),
+      ty::TyStr | ty::TyTrait(..) => Type::i8(cx),
+
+      ty::TyFnDef(..) => Type::nil(cx),
+      ty::TyFnPtr(f) => {
+        let sig = cx.tcx().erase_late_bound_regions(&f.sig);
+        let sig = infer::normalize_associated_type(cx.tcx(), &sig);
+        FnType::new(cx, f.abi, &sig, &[]).llvm_type(cx).ptr_to()
+      }
+      ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx),
+      ty::TyTuple(..) => {
+          let repr = adt::represent_type(cx, t);
+          adt::type_of(cx, &repr)
+      }
+      ty::TyStruct(def, ref substs) => {
+          if t.is_simd() {
+              let e = t.simd_type(cx.tcx());
+              if !e.is_machine() {
+                  cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \
+                                            a non-machine element type `{}`",
+                                           t, e))
+              }
+              let llet = in_memory_type_of(cx, e);
+              let n = t.simd_size(cx.tcx()) as u64;
+              ensure_array_fits_in_address_space(cx, llet, n, t);
+              Type::vector(&llet, n)
+          } else {
+              // Only create the named struct, but don't fill it in. We fill it
+              // in *after* placing it into the type cache. This prevents
+              // infinite recursion with recursive struct types.
+              let repr = adt::represent_type(cx, t);
+              let tps = substs.types.get_slice(subst::TypeSpace);
+              let name = llvm_type_name(cx, def.did, tps);
+              adt::incomplete_type_of(cx, &repr, &name[..])
+          }
+      }
+
+      ty::TyInfer(..) => bug!("type_of with TyInfer"),
+      ty::TyProjection(..) => bug!("type_of with TyProjection"),
+      ty::TyParam(..) => bug!("type_of with ty_param"),
+      ty::TyError => bug!("type_of with TyError"),
+    };
+
+    debug!("--> mapped t={:?} to llty={:?}", t, llty);
+
+    cx.lltypes().borrow_mut().insert(t, llty);
+
+    // If this was an enum or struct, fill in the type now.
+    match t.sty {
+        ty::TyEnum(..) | ty::TyStruct(..) | ty::TyClosure(..)
+                if !t.is_simd() => {
+            let repr = adt::represent_type(cx, t);
+            adt::finish_type_of(cx, &repr, &mut llty);
+        }
+        _ => ()
+    }
+
+    llty
+}
+
+pub fn align_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>)
+                          -> machine::llalign {
+    let llty = sizing_type_of(cx, t);
+    machine::llalign_of_min(cx, llty)
+}
+
+fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                            did: DefId,
+                            tps: &[Ty<'tcx>])
+                            -> String {
+    let base = cx.tcx().item_path_str(did);
+    let strings: Vec<String> = tps.iter().map(|t| t.to_string()).collect();
+    let tstr = if strings.is_empty() {
+        base
+    } else {
+        format!("{}<{}>", base, strings.join(", "))
+    };
+
+    if did.krate == 0 {
+        tstr
+    } else {
+        format!("{}.{}", did.krate, tstr)
+    }
+}
diff --git a/src/librustc_trans/value.rs b/src/librustc_trans/value.rs
new file mode 100644 (file)
index 0000000..00b316c
--- /dev/null
@@ -0,0 +1,178 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm;
+use llvm::{UseRef, ValueRef};
+use basic_block::BasicBlock;
+use common::Block;
+
+use std::fmt;
+
+use libc::c_uint;
+
+#[derive(Copy, Clone, PartialEq)]
+pub struct Value(pub ValueRef);
+
+impl fmt::Debug for Value {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.write_str(&llvm::build_string(|s| unsafe {
+            llvm::LLVMWriteValueToString(self.0, s);
+        }).expect("nun-UTF8 value description from LLVM"))
+    }
+}
+
+macro_rules! opt_val { ($e:expr) => (
+    unsafe {
+        match $e {
+            p if !p.is_null() => Some(Value(p)),
+            _ => None
+        }
+    }
+) }
+
+/// Wrapper for LLVM ValueRef
+impl Value {
+    /// Returns the native ValueRef
+    pub fn get(&self) -> ValueRef {
+        let Value(v) = *self; v
+    }
+
+    /// Returns the BasicBlock that contains this value
+    pub fn get_parent(self) -> Option<BasicBlock> {
+        unsafe {
+            match llvm::LLVMGetInstructionParent(self.get()) {
+                p if !p.is_null() => Some(BasicBlock(p)),
+                _ => None
+            }
+        }
+    }
+
+    /// Removes this value from its containing BasicBlock
+    pub fn erase_from_parent(self) {
+        unsafe {
+            llvm::LLVMInstructionEraseFromParent(self.get());
+        }
+    }
+
+    /// Returns the single dominating store to this value, if any
+    /// This only performs a search for a trivially dominating store. The store
+    /// must be the only user of this value, and there must not be any conditional
+    /// branches between the store and the given block.
+    pub fn get_dominating_store(self, bcx: Block) -> Option<Value> {
+        match self.get_single_user().and_then(|user| user.as_store_inst()) {
+            Some(store) => {
+                store.get_parent().and_then(|store_bb| {
+                    let mut bb = BasicBlock(bcx.llbb);
+                    let mut ret = Some(store);
+                    while bb.get() != store_bb.get() {
+                        match bb.get_single_predecessor() {
+                            Some(pred) => bb = pred,
+                            None => { ret = None; break }
+                        }
+                    }
+                    ret
+                })
+            }
+            _ => None
+        }
+    }
+
+    /// Returns the first use of this value, if any
+    pub fn get_first_use(self) -> Option<Use> {
+        unsafe {
+            match llvm::LLVMGetFirstUse(self.get()) {
+                u if !u.is_null() => Some(Use(u)),
+                _ => None
+            }
+        }
+    }
+
+    /// Tests if there are no uses of this value
+    pub fn has_no_uses(self) -> bool {
+        self.get_first_use().is_none()
+    }
+
+    /// Returns the single user of this value
+    /// If there are no users or multiple users, this returns None
+    pub fn get_single_user(self) -> Option<Value> {
+        let mut iter = self.user_iter();
+        match (iter.next(), iter.next()) {
+            (Some(first), None) => Some(first),
+            _ => None
+        }
+    }
+
+    /// Returns an iterator for the users of this value
+    pub fn user_iter(self) -> Users {
+        Users {
+            next: self.get_first_use()
+        }
+    }
+
+    /// Returns the requested operand of this instruction
+    /// Returns None, if there's no operand at the given index
+    pub fn get_operand(self, i: usize) -> Option<Value> {
+        opt_val!(llvm::LLVMGetOperand(self.get(), i as c_uint))
+    }
+
+    /// Returns the Store represent by this value, if any
+    pub fn as_store_inst(self) -> Option<Value> {
+        opt_val!(llvm::LLVMIsAStoreInst(self.get()))
+    }
+
+    /// Tests if this value is a terminator instruction
+    pub fn is_a_terminator_inst(self) -> bool {
+        unsafe {
+            !llvm::LLVMIsATerminatorInst(self.get()).is_null()
+        }
+    }
+}
+
+/// Wrapper for LLVM UseRef
+#[derive(Copy, Clone)]
+pub struct Use(UseRef);
+
+impl Use {
+    pub fn get(&self) -> UseRef {
+        let Use(v) = *self; v
+    }
+
+    pub fn get_user(self) -> Value {
+        unsafe {
+            Value(llvm::LLVMGetUser(self.get()))
+        }
+    }
+
+    pub fn get_next_use(self) -> Option<Use> {
+        unsafe {
+            match llvm::LLVMGetNextUse(self.get()) {
+                u if !u.is_null() => Some(Use(u)),
+                _ => None
+            }
+        }
+    }
+}
+
+/// Iterator for the users of a value
+pub struct Users {
+    next: Option<Use>
+}
+
+impl Iterator for Users {
+    type Item = Value;
+
+    fn next(&mut self) -> Option<Value> {
+        let current = self.next;
+
+        self.next = current.and_then(|u| u.get_next_use());
+
+        current.map(|u| u.get_user())
+    }
+}
index fd33e9da1f93786acdce840e06917d915448dec4..e9dabf16eaece7455128e1efbe6d60627ab38ac5 100644 (file)
@@ -15,5 +15,6 @@ arena = { path = "../libarena" }
 fmt_macros = { path = "../libfmt_macros" }
 rustc = { path = "../librustc" }
 rustc_back = { path = "../librustc_back" }
-rustc_front = { path = "../librustc_front" }
+rustc_const_eval = { path = "../librustc_const_eval" }
+rustc_const_math = { path = "../librustc_const_math" }
 rustc_platform_intrinsics = { path = "../librustc_platform_intrinsics" }
index 4061d3a2028c77989700dd6f987ed9134304aaee..21122e7095dd1cb5280d9cf04828db61f908ca3a 100644 (file)
 //! an rptr (`&r.T`) use the region `r` that appears in the rptr.
 
 use middle::astconv_util::{prim_ty_to_ty, prohibit_type_params, prohibit_projection};
-use middle::const_eval::{self, ConstVal};
-use middle::const_eval::EvalHint::UncheckedExprHint;
-use middle::def::{self, Def};
-use middle::def_id::DefId;
+use middle::const_val::ConstVal;
+use rustc_const_eval::eval_const_expr_partial;
+use rustc_const_eval::EvalHint::UncheckedExprHint;
+use hir::def::{self, Def};
+use hir::def_id::DefId;
 use middle::resolve_lifetime as rl;
-use middle::privacy::{AllPublic, LastMod};
-use middle::subst::{FnSpace, TypeSpace, SelfSpace, Subst, Substs, ParamSpace};
-use middle::traits;
-use middle::ty::{self, Ty, ToPredicate, TypeFoldable};
-use middle::ty::wf::object_region_bounds;
+use rustc::ty::subst::{FnSpace, TypeSpace, SelfSpace, Subst, Substs, ParamSpace};
+use rustc::traits;
+use rustc::ty::{self, Ty, TyCtxt, ToPredicate, TypeFoldable};
+use rustc::ty::wf::object_region_bounds;
 use require_c_abi_if_variadic;
 use rscope::{self, UnelidableRscope, RegionScope, ElidableRscope,
              ObjectLifetimeDefaultRscope, ShiftedRscope, BindingRscope,
@@ -66,18 +66,20 @@ use rscope::{self, UnelidableRscope, RegionScope, ElidableRscope,
 use util::common::{ErrorReported, FN_OUTPUT_NAME};
 use util::nodemap::FnvHashSet;
 
+use rustc_const_math::ConstInt;
+
 use syntax::{abi, ast};
 use syntax::codemap::{Span, Pos};
 use syntax::errors::DiagnosticBuilder;
 use syntax::feature_gate::{GateIssue, emit_feature_err};
 use syntax::parse::token;
 
-use rustc_front::print::pprust;
-use rustc_front::hir;
+use rustc::hir::print as pprust;
+use rustc::hir;
 use rustc_back::slice;
 
 pub trait AstConv<'tcx> {
-    fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
+    fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx>;
 
     /// Identify the type scheme for an item with a type, like a type
     /// alias, fn, or struct. This allows you to figure out the set of
@@ -154,12 +156,12 @@ pub trait AstConv<'tcx> {
                     -> Ty<'tcx>;
 }
 
-pub fn ast_region_to_region(tcx: &ty::ctxt, lifetime: &hir::Lifetime)
+pub fn ast_region_to_region(tcx: &TyCtxt, lifetime: &hir::Lifetime)
                             -> ty::Region {
     let r = match tcx.named_region_map.get(&lifetime.id) {
         None => {
             // should have been recorded by the `resolve_lifetime` pass
-            tcx.sess.span_bug(lifetime.span, "unresolved lifetime");
+            span_bug!(lifetime.span, "unresolved lifetime");
         }
 
         Some(&rl::DefStaticRegion) => {
@@ -410,7 +412,7 @@ fn create_substs_for_ast_path<'tcx>(
            decl_generics, self_ty, types_provided,
            region_substs);
 
-    assert_eq!(region_substs.regions().len(TypeSpace), decl_generics.regions.len(TypeSpace));
+    assert_eq!(region_substs.regions.len(TypeSpace), decl_generics.regions.len(TypeSpace));
     assert!(region_substs.types.is_empty());
 
     // Convert the type parameters supplied by the user.
@@ -483,7 +485,7 @@ fn create_substs_for_ast_path<'tcx>(
                 substs.types.push(TypeSpace, default);
             }
         } else {
-            tcx.sess.span_bug(span, "extra parameter without default");
+            span_bug!(span, "extra parameter without default");
         }
     }
 
@@ -572,7 +574,7 @@ fn convert_angle_bracketed_parameters<'tcx>(this: &AstConv<'tcx>,
 /// Returns the appropriate lifetime to use for any output lifetimes
 /// (if one exists) and a vector of the (pattern, number of lifetimes)
 /// corresponding to each input type/pattern.
-fn find_implied_output_region<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn find_implied_output_region<'tcx>(tcx: &TyCtxt<'tcx>,
                                     input_tys: &[Ty<'tcx>],
                                     input_pats: Vec<String>) -> ElidedLifetime
 {
@@ -837,7 +839,7 @@ fn create_substs_for_ast_trait_ref<'a,'tcx>(this: &AstConv<'tcx>,
         Err(ErrorReported) => {
             // No convenient way to recover from a cycle here. Just bail. Sorry!
             this.tcx().sess.abort_if_errors();
-            this.tcx().sess.bug("ErrorReported returned, but no errors reports?")
+            bug!("ErrorReported returned, but no errors reports?")
         }
     };
 
@@ -933,7 +935,7 @@ fn ast_type_binding_to_poly_projection_predicate<'tcx>(
                                                  tcx.mk_substs(dummy_substs)));
     }
 
-    try!(this.ensure_super_predicates(binding.span, trait_ref.def_id()));
+    this.ensure_super_predicates(binding.span, trait_ref.def_id())?;
 
     let mut candidates: Vec<ty::PolyTraitRef> =
         traits::supertraits(tcx, trait_ref.clone())
@@ -952,11 +954,11 @@ fn ast_type_binding_to_poly_projection_predicate<'tcx>(
         }
     }
 
-    let candidate = try!(one_bound_for_assoc_type(tcx,
-                                                  candidates,
-                                                  &trait_ref.to_string(),
-                                                  &binding.item_name.as_str(),
-                                                  binding.span));
+    let candidate = one_bound_for_assoc_type(tcx,
+                                             candidates,
+                                             &trait_ref.to_string(),
+                                             &binding.item_name.as_str(),
+                                             binding.span)?;
 
     Ok(ty::Binder(ty::ProjectionPredicate {             // <-------------------------+
         projection_ty: ty::ProjectionTy {               //                           |
@@ -1167,7 +1169,7 @@ fn make_object_type<'tcx>(this: &AstConv<'tcx>,
     tcx.mk_trait(object.principal, object.bounds)
 }
 
-fn report_ambiguous_associated_type(tcx: &ty::ctxt,
+fn report_ambiguous_associated_type(tcx: &TyCtxt,
                                     span: Span,
                                     type_str: &str,
                                     trait_str: &str,
@@ -1221,7 +1223,7 @@ fn find_bound_for_assoc_item<'tcx>(this: &AstConv<'tcx>,
 
 // Checks that bounds contains exactly one element and reports appropriate
 // errors otherwise.
-fn one_bound_for_assoc_type<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn one_bound_for_assoc_type<'tcx>(tcx: &TyCtxt<'tcx>,
                                   bounds: Vec<ty::PolyTraitRef<'tcx>>,
                                   ty_param_name: &str,
                                   assoc_name: &str,
@@ -1351,7 +1353,7 @@ fn associated_path_def_to_ty<'tcx>(this: &AstConv<'tcx>,
                                       .expect("missing associated type");
                 tcx.map.local_def_id(item.id)
             }
-            _ => unreachable!()
+            _ => bug!()
         }
     } else {
         let trait_items = tcx.trait_items(trait_did);
@@ -1494,7 +1496,7 @@ fn base_def_to_ty<'tcx>(this: &AstConv<'tcx>,
                     ty
                 }
             } else {
-                tcx.sess.span_bug(span, "self type has not been fully resolved")
+                span_bug!(span, "self type has not been fully resolved")
             }
         }
         Def::SelfTy(Some(_), None) => {
@@ -1637,8 +1639,7 @@ pub fn ast_ty_to_ty<'tcx>(this: &AstConv<'tcx>,
         }
         hir::TyBareFn(ref bf) => {
             require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span);
-            let bare_fn = ty_of_bare_fn(this, bf.unsafety, bf.abi, &bf.decl);
-            tcx.mk_fn(None, tcx.mk_bare_fn(bare_fn))
+            tcx.mk_fn_ptr(ty_of_bare_fn(this, bf.unsafety, bf.abi, &bf.decl))
         }
         hir::TyPolyTraitRef(ref bounds) => {
             conv_ty_poly_trait_ref(this, rscope, ast_ty.span, bounds)
@@ -1650,11 +1651,10 @@ pub fn ast_ty_to_ty<'tcx>(this: &AstConv<'tcx>,
                 // Create some fake resolution that can't possibly be a type.
                 def::PathResolution {
                     base_def: Def::Mod(tcx.map.local_def_id(ast::CRATE_NODE_ID)),
-                    last_private: LastMod(AllPublic),
                     depth: path.segments.len()
                 }
             } else {
-                tcx.sess.span_bug(ast_ty.span, &format!("unbound path {:?}", ast_ty))
+                span_bug!(ast_ty.span, "unbound path {:?}", ast_ty)
             };
             let def = path_res.base_def;
             let base_ty_end = path.segments.len() - path_res.depth;
@@ -1674,7 +1674,6 @@ pub fn ast_ty_to_ty<'tcx>(this: &AstConv<'tcx>,
                 // Write back the new resolution.
                 tcx.def_map.borrow_mut().insert(ast_ty.id, def::PathResolution {
                     base_def: def,
-                    last_private: path_res.last_private,
                     depth: 0
                 });
             }
@@ -1683,23 +1682,17 @@ pub fn ast_ty_to_ty<'tcx>(this: &AstConv<'tcx>,
         }
         hir::TyFixedLengthVec(ref ty, ref e) => {
             let hint = UncheckedExprHint(tcx.types.usize);
-            match const_eval::eval_const_expr_partial(tcx, &e, hint, None) {
-                Ok(r) => {
-                    match r {
-                        ConstVal::Int(i) =>
-                            tcx.mk_array(ast_ty_to_ty(this, rscope, &ty),
-                                         i as usize),
-                        ConstVal::Uint(i) =>
-                            tcx.mk_array(ast_ty_to_ty(this, rscope, &ty),
-                                         i as usize),
-                        _ => {
-                            span_err!(tcx.sess, ast_ty.span, E0249,
-                                      "expected constant integer expression \
-                                       for array length");
-                            this.tcx().types.err
-                        }
-                    }
-                }
+            match eval_const_expr_partial(tcx, &e, hint, None) {
+                Ok(ConstVal::Integral(ConstInt::Usize(i))) => {
+                    let i = i.as_u64(tcx.sess.target.uint_type);
+                    assert_eq!(i as usize as u64, i);
+                    tcx.mk_array(ast_ty_to_ty(this, rscope, &ty), i as usize)
+                },
+                Ok(val) => {
+                    span_err!(tcx.sess, ast_ty.span, E0249,
+                              "expected usize value for array length, got {}", val.description());
+                    this.tcx().types.err
+                },
                 Err(ref r) => {
                     let mut err = struct_span_err!(tcx.sess, r.span, E0250,
                                                    "array length constant evaluation error: {}",
@@ -1968,7 +1961,7 @@ pub fn ty_of_closure<'tcx>(
             ty::FnConverging(this.ty_infer(None, None, None, decl.output.span())),
         hir::Return(ref output) =>
             ty::FnConverging(ast_ty_to_ty(this, &rb, &output)),
-        hir::DefaultReturn(..) => unreachable!(),
+        hir::DefaultReturn(..) => bug!(),
         hir::NoReturn(..) => ty::FnDiverging
     };
 
@@ -2156,7 +2149,7 @@ pub struct PartitionedBounds<'a> {
 
 /// Divides a list of bounds from the AST into three groups: builtin bounds (Copy, Sized etc),
 /// general trait bounds, and region bounds.
-pub fn partition_bounds<'a>(tcx: &ty::ctxt,
+pub fn partition_bounds<'a>(tcx: &TyCtxt,
                             _span: Span,
                             ast_bounds: &'a [hir::TyParamBound])
                             -> PartitionedBounds<'a>
@@ -2205,7 +2198,7 @@ pub fn partition_bounds<'a>(tcx: &ty::ctxt,
     }
 }
 
-fn prohibit_projections<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn prohibit_projections<'tcx>(tcx: &TyCtxt<'tcx>,
                               bindings: &[ConvertedBinding<'tcx>])
 {
     for binding in bindings.iter().take(1) {
@@ -2213,7 +2206,7 @@ fn prohibit_projections<'tcx>(tcx: &ty::ctxt<'tcx>,
     }
 }
 
-fn check_type_argument_count(tcx: &ty::ctxt, span: Span, supplied: usize,
+fn check_type_argument_count(tcx: &TyCtxt, span: Span, supplied: usize,
                              required: usize, accepted: usize) {
     if supplied < required {
         let expected = if required < accepted {
@@ -2238,7 +2231,7 @@ fn check_type_argument_count(tcx: &ty::ctxt, span: Span, supplied: usize,
     }
 }
 
-fn report_lifetime_number_error(tcx: &ty::ctxt, span: Span, number: usize, expected: usize) {
+fn report_lifetime_number_error(tcx: &TyCtxt, span: Span, number: usize, expected: usize) {
     span_err!(tcx.sess, span, E0107,
               "wrong number of lifetime parameters: expected {}, found {}",
               expected, number);
@@ -2256,7 +2249,7 @@ pub struct Bounds<'tcx> {
 
 impl<'tcx> Bounds<'tcx> {
     pub fn predicates(&self,
-        tcx: &ty::ctxt<'tcx>,
+        tcx: &TyCtxt<'tcx>,
         param_ty: Ty<'tcx>)
         -> Vec<ty::Predicate<'tcx>>
     {
index 93d983fae12e7129693237f8fd73cab66c5906c1..8dbd6496b6fb06bd3ec953b4e2cea520755d9e84 100644 (file)
@@ -8,17 +8,17 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use middle::def::{self, Def};
-use middle::infer::{self, TypeOrigin};
-use middle::pat_util::{PatIdMap, pat_id_map, pat_is_binding};
-use middle::pat_util::pat_is_resolved_const;
-use middle::privacy::{AllPublic, LastMod};
-use middle::subst::Substs;
-use middle::ty::{self, Ty, TypeFoldable, LvaluePreference};
+use hir::def::{self, Def};
+use rustc::infer::{self, InferOk, TypeOrigin};
+use hir::pat_util::{PatIdMap, pat_id_map, pat_is_binding};
+use hir::pat_util::pat_is_resolved_const;
+use rustc::ty::subst::Substs;
+use rustc::ty::{self, Ty, TypeFoldable, LvaluePreference};
 use check::{check_expr, check_expr_has_type, check_expr_with_expectation};
-use check::{check_expr_coercable_to_type, demand, FnCtxt, Expectation};
+use check::{demand, FnCtxt, Expectation};
 use check::{check_expr_with_lvalue_pref};
 use check::{instantiate_path, resolve_ty_and_def_ufcs, structurally_resolved_type};
+use check::coercion;
 use lint;
 use require_same_types;
 use util::nodemap::FnvHashMap;
@@ -30,9 +30,8 @@ use syntax::ast;
 use syntax::codemap::{Span, Spanned};
 use syntax::ptr::P;
 
-use rustc_front::hir::{self, PatKind};
-use rustc_front::print::pprust;
-use rustc_front::util as hir_util;
+use rustc::hir::{self, PatKind};
+use rustc::hir::print as pprust;
 
 pub fn check_pat<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
                            pat: &'tcx hir::Pat,
@@ -197,7 +196,7 @@ pub fn check_pat<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
             }
         }
         PatKind::Ident(_, ref path, _) => {
-            let path = hir_util::ident_to_path(path.span, path.node);
+            let path = hir::Path::from_ident(path.span, path.node);
             check_pat_enum(pcx, pat, &path, Some(&[]), expected, false);
         }
         PatKind::TupleStruct(ref path, ref subpats) => {
@@ -219,7 +218,6 @@ pub fn check_pat<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
                 let sentinel = fcx.tcx().map.local_def_id(ast::CRATE_NODE_ID);
                 def::PathResolution {
                     base_def: Def::Mod(sentinel),
-                    last_private: LastMod(AllPublic),
                     depth: path.segments.len()
                 }
             } else {
@@ -414,8 +412,7 @@ fn check_assoc_item_is_const(pcx: &pat_ctxt, def: Def, span: Span) -> bool {
             false
         }
         _ => {
-            pcx.fcx.ccx.tcx.sess.span_bug(span, "non-associated item in
-                                                 check_assoc_item_is_const");
+            span_bug!(span, "non-associated item in check_assoc_item_is_const");
         }
     }
 }
@@ -494,54 +491,72 @@ pub fn check_match<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     // of execution reach it, we will panic, so bottom is an appropriate
     // type in that case)
     let expected = expected.adjust_for_branches(fcx);
-    let result_ty = arms.iter().fold(fcx.infcx().next_diverging_ty_var(), |result_ty, arm| {
-        let bty = match expected {
-            // We don't coerce to `()` so that if the match expression is a
-            // statement it's branches can have any consistent type. That allows
-            // us to give better error messages (pointing to a usually better
-            // arm for inconsistent arms or to the whole match when a `()` type
-            // is required).
-            Expectation::ExpectHasType(ety) if ety != fcx.tcx().mk_nil() => {
-                check_expr_coercable_to_type(fcx, &arm.body, ety);
-                ety
-            }
-            _ => {
-                check_expr_with_expectation(fcx, &arm.body, expected);
-                fcx.node_ty(arm.body.id)
+    let mut result_ty = fcx.infcx().next_diverging_ty_var();
+    let coerce_first = match expected {
+        // We don't coerce to `()` so that if the match expression is a
+        // statement it's branches can have any consistent type. That allows
+        // us to give better error messages (pointing to a usually better
+        // arm for inconsistent arms or to the whole match when a `()` type
+        // is required).
+        Expectation::ExpectHasType(ety) if ety != fcx.tcx().mk_nil() => {
+            ety
+        }
+        _ => result_ty
+    };
+    for (i, arm) in arms.iter().enumerate() {
+        if let Some(ref e) = arm.guard {
+            check_expr_has_type(fcx, e, tcx.types.bool);
+        }
+        check_expr_with_expectation(fcx, &arm.body, expected);
+        let arm_ty = fcx.expr_ty(&arm.body);
+
+        if result_ty.references_error() || arm_ty.references_error() {
+            result_ty = tcx.types.err;
+            continue;
+        }
+
+        // Handle the fallback arm of a desugared if-let like a missing else.
+        let is_if_let_fallback = match match_src {
+            hir::MatchSource::IfLetDesugar { contains_else_clause: false } => {
+                i == arms.len() - 1 && arm_ty.is_nil()
             }
+            _ => false
         };
 
-        if let Some(ref e) = arm.guard {
-            check_expr_has_type(fcx, &e, tcx.types.bool);
-        }
+        let origin = if is_if_let_fallback {
+            TypeOrigin::IfExpressionWithNoElse(expr.span)
+        } else {
+            TypeOrigin::MatchExpressionArm(expr.span, arm.body.span, match_src)
+        };
 
-        if result_ty.references_error() || bty.references_error() {
-            tcx.types.err
+        let result = if is_if_let_fallback {
+            fcx.infcx().eq_types(true, origin, arm_ty, result_ty)
+                .map(|InferOk { obligations, .. }| {
+                    // FIXME(#32730) propagate obligations
+                    assert!(obligations.is_empty());
+                    arm_ty
+                })
+        } else if i == 0 {
+            // Special-case the first arm, as it has no "previous expressions".
+            coercion::try(fcx, &arm.body, coerce_first)
         } else {
-            let (origin, expected, found) = match match_src {
-                /* if-let construct without an else block */
-                hir::MatchSource::IfLetDesugar { contains_else_clause }
-                if !contains_else_clause => (
-                    TypeOrigin::IfExpressionWithNoElse(expr.span),
-                    bty,
-                    result_ty,
-                ),
-                _ => (
-                    TypeOrigin::MatchExpressionArm(expr.span, arm.body.span, match_src),
-                    result_ty,
-                    bty,
-                ),
-            };
+            let prev_arms = || arms[..i].iter().map(|arm| &*arm.body);
+            coercion::try_find_lub(fcx, origin, prev_arms, result_ty, &arm.body)
+        };
 
-            infer::common_supertype(
-                fcx.infcx(),
-                origin,
-                true,
-                expected,
-                found,
-            )
-        }
-    });
+        result_ty = match result {
+            Ok(ty) => ty,
+            Err(e) => {
+                let (expected, found) = if is_if_let_fallback {
+                    (arm_ty, result_ty)
+                } else {
+                    (result_ty, arm_ty)
+                };
+                fcx.infcx().report_mismatched_types(origin, expected, found, e);
+                fcx.tcx().types.err
+            }
+        };
+    }
 
     fcx.write_ty(expr.id, result_ty);
 }
@@ -576,7 +591,7 @@ pub fn check_pat_struct<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>, pat: &'tcx hir::Pat,
     let pat_ty = pcx.fcx.instantiate_type(def.def_id(), path);
     let item_substs = match pat_ty.sty {
         ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs,
-        _ => tcx.sess.span_bug(pat.span, "struct variant is not an ADT")
+        _ => span_bug!(pat.span, "struct variant is not an ADT")
     };
     demand::eqtype(fcx, pat.span, expected, pat_ty);
     check_struct_pat_fields(pcx, pat.span, fields, variant, &item_substs, etc);
index 91916efa882778226c992ec971cc40d6f8c50eaa..f7726bc9cf49291cfa18de84bc1802c8bc125c73 100644 (file)
@@ -8,10 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use middle::infer::InferCtxt;
-use middle::traits::{self, FulfillmentContext, Normalized, MiscObligation,
+use rustc::infer::InferCtxt;
+use rustc::traits::{self, FulfillmentContext, Normalized, MiscObligation,
                      SelectionContext, ObligationCause};
-use middle::ty::fold::TypeFoldable;
+use rustc::ty::fold::TypeFoldable;
 use syntax::ast;
 use syntax::codemap::Span;
 
index 42ea3cc2aaa7ba1d72537b25badfe7d7f90d4819..21800d91d9458b3810450293c7375eac278b39d7 100644 (file)
@@ -26,15 +26,15 @@ use super::write_call;
 
 use CrateCtxt;
 use middle::cstore::LOCAL_CRATE;
-use middle::def::Def;
-use middle::def_id::DefId;
-use middle::infer;
-use middle::ty::{self, LvaluePreference, Ty};
+use hir::def::Def;
+use hir::def_id::DefId;
+use rustc::infer;
+use rustc::ty::{self, LvaluePreference, Ty};
 use syntax::codemap::Span;
 use syntax::parse::token;
 use syntax::ptr::P;
 
-use rustc_front::hir;
+use rustc::hir;
 
 /// Check that it is legal to call methods of the trait corresponding
 /// to `trait_id` (this only cares about the trait, not the specific
@@ -82,7 +82,7 @@ pub fn check_call<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         autoderef(fcx,
                   callee_expr.span,
                   original_callee_ty,
-                  Some(callee_expr),
+                  || Some(callee_expr),
                   UnresolvedTypeAction::Error,
                   LvaluePreference::NoPreference,
                   |adj_ty, idx| {
@@ -130,7 +130,7 @@ fn try_overloaded_call_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 
     // If the callee is a bare function or a closure, then we're all set.
     match structurally_resolved_type(fcx, callee_expr.span, adjusted_ty).sty {
-        ty::TyBareFn(..) => {
+        ty::TyFnDef(..) | ty::TyFnPtr(_) => {
             fcx.write_autoderef_adjustment(callee_expr.id, autoderefs);
             return Some(CallStep::Builtin);
         }
@@ -225,7 +225,8 @@ fn confirm_builtin_call<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
     let error_fn_sig;
 
     let fn_sig = match callee_ty.sty {
-        ty::TyBareFn(_, &ty::BareFnTy {ref sig, ..}) => {
+        ty::TyFnDef(_, _, &ty::BareFnTy {ref sig, ..}) |
+        ty::TyFnPtr(&ty::BareFnTy {ref sig, ..}) => {
             sig
         }
         _ => {
@@ -396,7 +397,7 @@ impl<'tcx> DeferredCallResolution<'tcx> for CallResolution<'tcx> {
                 write_overloaded_call_method_map(fcx, self.call_expr, method_callee);
             }
             None => {
-                fcx.tcx().sess.span_bug(
+                span_bug!(
                     self.call_expr.span,
                     "failed to find an overloaded call trait for closure call");
             }
index 2ea0df280db216acd409130656059a103fd99502..91cdb8d966d419be942e604bc82970fd279a175f 100644 (file)
@@ -44,18 +44,18 @@ use super::FnCtxt;
 use super::structurally_resolved_type;
 
 use lint;
-use middle::def_id::DefId;
-use middle::ty::{self, Ty, TypeFoldable};
-use middle::ty::cast::{CastKind, CastTy};
+use hir::def_id::DefId;
+use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::ty::cast::{CastKind, CastTy};
 use syntax::codemap::Span;
-use rustc_front::hir;
+use rustc::hir;
 use syntax::ast;
 
 
 /// Reifies a cast check to be checked once we have full type information for
 /// a function context.
 pub struct CastCheck<'tcx> {
-    expr: hir::Expr,
+    expr: &'tcx hir::Expr,
     expr_ty: Ty<'tcx>,
     cast_ty: Ty<'tcx>,
     span: Span,
@@ -100,6 +100,8 @@ enum CastError {
     CastToBool,
     CastToChar,
     DifferingKinds,
+    /// Cast of thin to fat raw ptr (eg. `*const () as *const [u8]`)
+    SizedUnsizedCast,
     IllegalCast,
     NeedViaPtr,
     NeedViaThinPtr,
@@ -109,7 +111,7 @@ enum CastError {
 }
 
 impl<'tcx> CastCheck<'tcx> {
-    pub fn new(expr: hir::Expr, expr_ty: Ty<'tcx>, cast_ty: Ty<'tcx>, span: Span)
+    pub fn new(expr: &'tcx hir::Expr, expr_ty: Ty<'tcx>, cast_ty: Ty<'tcx>, span: Span)
                -> CastCheck<'tcx> {
         CastCheck {
             expr: expr,
@@ -137,7 +139,7 @@ impl<'tcx> CastCheck<'tcx> {
                             CastError::NeedViaThinPtr => "a thin pointer",
                             CastError::NeedViaInt => "an integer",
                             CastError::NeedViaUsize => "a usize",
-                            _ => unreachable!()
+                            _ => bug!()
                         }))
                     .emit();
             }
@@ -165,6 +167,13 @@ impl<'tcx> CastCheck<'tcx> {
                             fcx.infcx().ty_to_string(self.cast_ty))
                 }, self.expr_ty, None);
             }
+            CastError::SizedUnsizedCast => {
+                fcx.type_error_message(self.span, |actual| {
+                    format!("cannot cast thin pointer `{}` to fat pointer `{}`",
+                            actual,
+                            fcx.infcx().ty_to_string(self.cast_ty))
+                }, self.expr_ty, None)
+            }
             CastError::DifferingKinds => {
                 fcx.type_error_struct(self.span, |actual| {
                     format!("casting `{}` as `{}` is invalid",
@@ -229,12 +238,26 @@ impl<'tcx> CastCheck<'tcx> {
     /// can return Ok and create type errors in the fcx rather than returning
     /// directly. coercion-cast is handled in check instead of here.
     fn do_check<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<CastKind, CastError> {
-        use middle::ty::cast::IntTy::*;
-        use middle::ty::cast::CastTy::*;
+        use rustc::ty::cast::IntTy::*;
+        use rustc::ty::cast::CastTy::*;
 
         let (t_from, t_cast) = match (CastTy::from_ty(self.expr_ty),
                                       CastTy::from_ty(self.cast_ty)) {
             (Some(t_from), Some(t_cast)) => (t_from, t_cast),
+            // Function item types may need to be reified before casts.
+            (None, Some(t_cast)) => {
+                if let ty::TyFnDef(_, _, f) = self.expr_ty.sty {
+                    // Attempt a coercion to a fn pointer type.
+                    let res = coercion::try(fcx, self.expr,
+                                            fcx.tcx().mk_ty(ty::TyFnPtr(f)));
+                    if !res.is_ok() {
+                        return Err(CastError::NonScalar);
+                    }
+                    (FnPtr, t_cast)
+                } else {
+                    return Err(CastError::NonScalar);
+                }
+            }
             _ => {
                 return Err(CastError::NonScalar)
             }
@@ -298,7 +321,7 @@ impl<'tcx> CastCheck<'tcx> {
 
         // sized -> unsized? report invalid cast (don't complain about vtable kinds)
         if fcx.type_is_known_to_be_sized(m_expr.ty, self.span) {
-            return Err(CastError::IllegalCast);
+            return Err(CastError::SizedUnsizedCast);
         }
 
         // vtable kinds must match
@@ -376,14 +399,7 @@ impl<'tcx> CastCheck<'tcx> {
     }
 
     fn try_coercion_cast<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) -> bool {
-        if let Ok(()) = coercion::mk_assignty(fcx,
-                                              &self.expr,
-                                              self.expr_ty,
-                                              self.cast_ty) {
-            true
-        } else {
-            false
-        }
+        coercion::try(fcx, self.expr, self.cast_ty).is_ok()
     }
 
 }
index a4fb2e381459e7ced59e5708f377ae534bf91c17..7bca570411cebeb2d7c631bf5cb92948fe1e7f68 100644 (file)
 use super::{check_fn, Expectation, FnCtxt};
 
 use astconv;
-use middle::subst;
-use middle::ty::{self, ToPolyTraitRef, Ty};
+use rustc::ty::subst;
+use rustc::ty::{self, ToPolyTraitRef, Ty};
 use std::cmp;
 use syntax::abi::Abi;
-use rustc_front::hir;
+use rustc::hir;
 
 pub fn check_expr_closure<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
                                    expr: &hir::Expr,
index f07464592faacbd887fc3785d97f635bfd07d9b6..f1c6868efd2101f19c469e01092079a2383bb3b7 100644 (file)
 
 use check::{autoderef, FnCtxt, UnresolvedTypeAction};
 
-use middle::infer::{self, Coercion, TypeOrigin};
-use middle::traits::{self, ObligationCause};
-use middle::traits::{predicate_for_trait_def, report_selection_error};
-use middle::ty::adjustment::{AutoAdjustment, AutoDerefRef, AdjustDerefRef};
-use middle::ty::adjustment::{AutoPtr, AutoUnsafe, AdjustReifyFnPointer};
-use middle::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
-use middle::ty::{self, LvaluePreference, TypeAndMut, Ty};
-use middle::ty::fold::TypeFoldable;
-use middle::ty::error::TypeError;
-use middle::ty::relate::RelateResult;
+use rustc::infer::{Coercion, InferOk, TypeOrigin, TypeTrace};
+use rustc::traits::{self, ObligationCause};
+use rustc::traits::{predicate_for_trait_def, report_selection_error};
+use rustc::ty::adjustment::{AutoAdjustment, AutoDerefRef, AdjustDerefRef};
+use rustc::ty::adjustment::{AutoPtr, AutoUnsafe, AdjustReifyFnPointer};
+use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
+use rustc::ty::{self, LvaluePreference, TypeAndMut, Ty, TyCtxt};
+use rustc::ty::fold::TypeFoldable;
+use rustc::ty::error::TypeError;
+use rustc::ty::relate::{RelateResult, TypeRelation};
 use util::common::indent;
 
 use std::cell::RefCell;
 use std::collections::VecDeque;
-use rustc_front::hir;
+use rustc::hir;
 
 struct Coerce<'a, 'tcx: 'a> {
     fcx: &'a FnCtxt<'a, 'tcx>,
-    origin: infer::TypeOrigin,
+    origin: TypeOrigin,
+    use_lub: bool,
     unsizing_obligations: RefCell<Vec<traits::PredicateObligation<'tcx>>>,
 }
 
-type CoerceResult<'tcx> = RelateResult<'tcx, Option<AutoAdjustment<'tcx>>>;
+type CoerceResult<'tcx> = RelateResult<'tcx, (Ty<'tcx>, AutoAdjustment<'tcx>)>;
+
+fn coerce_mutbls<'tcx>(from_mutbl: hir::Mutability,
+                       to_mutbl: hir::Mutability)
+                       -> RelateResult<'tcx, ()> {
+    match (from_mutbl, to_mutbl) {
+        (hir::MutMutable, hir::MutMutable) |
+        (hir::MutImmutable, hir::MutImmutable) |
+        (hir::MutMutable, hir::MutImmutable) => Ok(()),
+        (hir::MutImmutable, hir::MutMutable) => Err(TypeError::Mutability)
+    }
+}
 
 impl<'f, 'tcx> Coerce<'f, 'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> {
+    fn new(fcx: &'f FnCtxt<'f, 'tcx>, origin: TypeOrigin) -> Self {
+        Coerce {
+            fcx: fcx,
+            origin: origin,
+            use_lub: false,
+            unsizing_obligations: RefCell::new(vec![])
+        }
+    }
+
+    fn tcx(&self) -> &TyCtxt<'tcx> {
         self.fcx.tcx()
     }
 
-    fn subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
-        try!(self.fcx.infcx().sub_types(false, self.origin.clone(), a, b));
-        Ok(None) // No coercion required.
+    fn unify(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+        let infcx = self.fcx.infcx();
+        infcx.commit_if_ok(|_| {
+            let trace = TypeTrace::types(self.origin, false, a, b);
+            if self.use_lub {
+                infcx.lub(false, trace, &a, &b)
+                    .map(|InferOk { value, obligations }| {
+                        // FIXME(#32730) propagate obligations
+                        assert!(obligations.is_empty());
+                        value
+                    })
+            } else {
+                infcx.sub(false, trace, &a, &b)
+                    .map(|InferOk { value, obligations }| {
+                        // FIXME(#32730) propagate obligations
+                        assert!(obligations.is_empty());
+                        value
+                    })
+            }
+        })
     }
 
-    fn unpack_actual_value<T, F>(&self, a: Ty<'tcx>, f: F) -> T where
-        F: FnOnce(Ty<'tcx>) -> T,
-    {
-        f(self.fcx.infcx().shallow_resolve(a))
+    /// Unify two types (using sub or lub) and produce a noop coercion.
+    fn unify_and_identity(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
+        self.unify(&a, &b).and_then(|ty| self.identity(ty))
     }
 
-    fn coerce(&self,
-              expr_a: &hir::Expr,
-              a: Ty<'tcx>,
-              b: Ty<'tcx>)
-              -> CoerceResult<'tcx> {
-        debug!("Coerce.tys({:?} => {:?})",
-               a,
-               b);
+    /// Synthesize an identity adjustment.
+    fn identity(&self, ty: Ty<'tcx>) -> CoerceResult<'tcx> {
+        Ok((ty, AdjustDerefRef(AutoDerefRef {
+            autoderefs: 0,
+            autoref: None,
+            unsize: None
+        })))
+    }
+
+    fn coerce<'a, E, I>(&self,
+                        exprs: &E,
+                        a: Ty<'tcx>,
+                        b: Ty<'tcx>)
+                        -> CoerceResult<'tcx>
+        // FIXME(eddyb) use copyable iterators when that becomes ergonomic.
+        where E: Fn() -> I,
+              I: IntoIterator<Item=&'a hir::Expr> {
 
         let a = self.fcx.infcx().shallow_resolve(a);
+        debug!("Coerce.tys({:?} => {:?})", a, b);
 
         // Just ignore error types.
         if a.references_error() || b.references_error() {
-            return Ok(None);
+            return self.identity(b);
         }
 
         // Consider coercing the subtype to a DST
@@ -133,28 +180,28 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
                 return self.coerce_unsafe_ptr(a, b, mt_b.mutbl);
             }
 
-            ty::TyRef(_, mt_b) => {
-                return self.coerce_borrowed_pointer(expr_a, a, b, mt_b.mutbl);
+            ty::TyRef(r_b, mt_b) => {
+                return self.coerce_borrowed_pointer(exprs, a, b, r_b, mt_b);
             }
 
             _ => {}
         }
 
         match a.sty {
-            ty::TyBareFn(Some(_), a_f) => {
+            ty::TyFnDef(_, _, a_f) => {
                 // Function items are coercible to any closure
                 // type; function pointers are not (that would
                 // require double indirection).
                 self.coerce_from_fn_item(a, a_f, b)
             }
-            ty::TyBareFn(None, a_f) => {
+            ty::TyFnPtr(a_f) => {
                 // We permit coercion of fn pointers to drop the
                 // unsafe qualifier.
                 self.coerce_from_fn_pointer(a, a_f, b)
             }
             _ => {
-                // Otherwise, just use subtyping rules.
-                self.subtype(a, b)
+                // Otherwise, just use unification rules.
+                self.unify_and_identity(a, b)
             }
         }
     }
@@ -162,15 +209,18 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
     /// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
     /// To match `A` with `B`, autoderef will be performed,
     /// calling `deref`/`deref_mut` where necessary.
-    fn coerce_borrowed_pointer(&self,
-                               expr_a: &hir::Expr,
-                               a: Ty<'tcx>,
-                               b: Ty<'tcx>,
-                               mutbl_b: hir::Mutability)
-                               -> CoerceResult<'tcx> {
-        debug!("coerce_borrowed_pointer(a={:?}, b={:?})",
-               a,
-               b);
+    fn coerce_borrowed_pointer<'a, E, I>(&self,
+                                         exprs: &E,
+                                         a: Ty<'tcx>,
+                                         b: Ty<'tcx>,
+                                         r_b: &'tcx ty::Region,
+                                         mt_b: TypeAndMut<'tcx>)
+                                         -> CoerceResult<'tcx>
+        // FIXME(eddyb) use copyable iterators when that becomes ergonomic.
+        where E: Fn() -> I,
+              I: IntoIterator<Item=&'a hir::Expr> {
+
+        debug!("coerce_borrowed_pointer(a={:?}, b={:?})", a, b);
 
         // If we have a parameter of type `&M T_a` and the value
         // provided is `expr`, we will be adding an implicit borrow,
@@ -178,58 +228,170 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
         // to type check, we will construct the type that `&M*expr` would
         // yield.
 
-        match a.sty {
-            ty::TyRef(_, mt_a) => {
-                try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
+        let (r_a, mt_a) = match a.sty {
+            ty::TyRef(r_a, mt_a) => {
+                coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?;
+                (r_a, mt_a)
             }
-            _ => return self.subtype(a, b)
-        }
+            _ => return self.unify_and_identity(a, b)
+        };
 
-        let coercion = Coercion(self.origin.span());
-        let r_borrow = self.fcx.infcx().next_region_var(coercion);
-        let r_borrow = self.tcx().mk_region(r_borrow);
-        let autoref = Some(AutoPtr(r_borrow, mutbl_b));
+        let span = self.origin.span();
 
-        let lvalue_pref = LvaluePreference::from_mutbl(mutbl_b);
+        let lvalue_pref = LvaluePreference::from_mutbl(mt_b.mutbl);
         let mut first_error = None;
-        let (_, autoderefs, success) = autoderef(self.fcx,
-                                                 expr_a.span,
-                                                 a,
-                                                 Some(expr_a),
+        let mut r_borrow_var = None;
+        let (_, autoderefs, success) = autoderef(self.fcx, span, a, exprs,
                                                  UnresolvedTypeAction::Ignore,
                                                  lvalue_pref,
-                                                 |inner_ty, autoderef| {
+                                                 |referent_ty, autoderef|
+        {
             if autoderef == 0 {
                 // Don't let this pass, otherwise it would cause
                 // &T to autoref to &&T.
                 return None;
             }
-            let ty = self.tcx().mk_ref(r_borrow,
-                                        TypeAndMut {ty: inner_ty, mutbl: mutbl_b});
-            if let Err(err) = self.subtype(ty, b) {
-                if first_error.is_none() {
-                    first_error = Some(err);
-                }
-                None
+
+            // At this point, we have deref'd `a` to `referent_ty`.  So
+            // imagine we are coercing from `&'a mut Vec<T>` to `&'b mut [T]`.
+            // In the autoderef loop for `&'a mut Vec<T>`, we would get
+            // three callbacks:
+            //
+            // - `&'a mut Vec<T>` -- 0 derefs, just ignore it
+            // - `Vec<T>` -- 1 deref
+            // - `[T]` -- 2 deref
+            //
+            // At each point after the first callback, we want to
+            // check to see whether this would match out target type
+            // (`&'b mut [T]`) if we autoref'd it. We can't just
+            // compare the referent types, though, because we still
+            // have to consider the mutability. E.g., in the case
+            // we've been considering, we have an `&mut` reference, so
+            // the `T` in `[T]` needs to be unified with equality.
+            //
+            // Therefore, we construct reference types reflecting what
+            // the types will be after we do the final auto-ref and
+            // compare those. Note that this means we use the target
+            // mutability [1], since it may be that we are coercing
+            // from `&mut T` to `&U`.
+            //
+            // One fine point concerns the region that we use. We
+            // choose the region such that the region of the final
+            // type that results from `unify` will be the region we
+            // want for the autoref:
+            //
+            // - if in sub mode, that means we want to use `'b` (the
+            //   region from the target reference) for both
+            //   pointers [2]. This is because sub mode (somewhat
+            //   arbitrarily) returns the subtype region.  In the case
+            //   where we are coercing to a target type, we know we
+            //   want to use that target type region (`'b`) because --
+            //   for the program to type-check -- it must be the
+            //   smaller of the two.
+            //   - One fine point. It may be surprising that we can
+            //     use `'b` without relating `'a` and `'b`. The reason
+            //     that this is ok is that what we produce is
+            //     effectively a `&'b *x` expression (if you could
+            //     annotate the region of a borrow), and regionck has
+            //     code that adds edges from the region of a borrow
+            //     (`'b`, here) into the regions in the borrowed
+            //     expression (`*x`, here).  (Search for "link".)
+            // - if in lub mode, things can get fairly complicated. The
+            //   easiest thing is just to make a fresh
+            //   region variable [4], which effectively means we defer
+            //   the decision to region inference (and regionck, which will add
+            //   some more edges to this variable). However, this can wind up
+            //   creating a crippling number of variables in some cases --
+            //   e.g. #32278 -- so we optimize one particular case [3].
+            //   Let me try to explain with some examples:
+            //   - The "running example" above represents the simple case,
+            //     where we have one `&` reference at the outer level and
+            //     ownership all the rest of the way down. In this case,
+            //     we want `LUB('a, 'b)` as the resulting region.
+            //   - However, if there are nested borrows, that region is
+            //     too strong. Consider a coercion from `&'a &'x Rc<T>` to
+            //     `&'b T`. In this case, `'a` is actually irrelevant.
+            //     The pointer we want is `LUB('x, 'b`). If we choose `LUB('a,'b)`
+            //     we get spurious errors (`run-pass/regions-lub-ref-ref-rc.rs`).
+            //     (The errors actually show up in borrowck, typically, because
+            //     this extra edge causes the region `'a` to be inferred to something
+            //     too big, which then results in borrowck errors.)
+            //   - We could track the innermost shared reference, but there is already
+            //     code in regionck that has the job of creating links between
+            //     the region of a borrow and the regions in the thing being
+            //     borrowed (here, `'a` and `'x`), and it knows how to handle
+            //     all the various cases. So instead we just make a region variable
+            //     and let regionck figure it out.
+            let r = if !self.use_lub {
+                r_b // [2] above
+            } else if autoderef == 1 {
+                r_a // [3] above
             } else {
-                Some(())
+                if r_borrow_var.is_none() { // create var lazilly, at most once
+                    let coercion = Coercion(span);
+                    let r = self.fcx.infcx().next_region_var(coercion);
+                    r_borrow_var = Some(self.tcx().mk_region(r)); // [4] above
+                }
+                r_borrow_var.unwrap()
+            };
+            let derefd_ty_a = self.tcx().mk_ref(r, TypeAndMut {
+                ty: referent_ty,
+                mutbl: mt_b.mutbl // [1] above
+            });
+            match self.unify(derefd_ty_a, b) {
+                Ok(ty) => Some(ty),
+                Err(err) => {
+                    if first_error.is_none() {
+                        first_error = Some(err);
+                    }
+                    None
+                }
             }
         });
 
-        match success {
-            Some(_) => {
-                Ok(Some(AdjustDerefRef(AutoDerefRef {
-                    autoderefs: autoderefs,
-                    autoref: autoref,
-                    unsize: None
-                })))
-            }
+        // Extract type or return an error. We return the first error
+        // we got, which should be from relating the "base" type
+        // (e.g., in example above, the failure from relating `Vec<T>`
+        // to the target type), since that should be the least
+        // confusing.
+        let ty = match success {
+            Some(ty) => ty,
             None => {
-                // Return original error as if overloaded deref was never
-                // attempted, to avoid irrelevant/confusing error messages.
-                Err(first_error.expect("coerce_borrowed_pointer failed with no error?"))
+                let err = first_error.expect("coerce_borrowed_pointer had no error");
+                debug!("coerce_borrowed_pointer: failed with err = {:?}", err);
+                return Err(err);
             }
+        };
+
+        // Now apply the autoref. We have to extract the region out of
+        // the final ref type we got.
+        if ty == a && mt_a.mutbl == hir::MutImmutable && autoderefs == 1 {
+            // As a special case, if we would produce `&'a *x`, that's
+            // a total no-op. We end up with the type `&'a T` just as
+            // we started with.  In that case, just skip it
+            // altogether. This is just an optimization.
+            //
+            // Note that for `&mut`, we DO want to reborrow --
+            // otherwise, this would be a move, which might be an
+            // error. For example `foo(self.x)` where `self` and
+            // `self.x` both have `&mut `type would be a move of
+            // `self.x`, but we auto-coerce it to `foo(&mut *self.x)`,
+            // which is a borrow.
+            assert_eq!(mt_b.mutbl, hir::MutImmutable); // can only coerce &T -> &U
+            return self.identity(ty);
         }
+        let r_borrow = match ty.sty {
+            ty::TyRef(r_borrow, _) => r_borrow,
+            _ => span_bug!(span, "expected a ref type, got {:?}", ty)
+        };
+        let autoref = Some(AutoPtr(r_borrow, mt_b.mutbl));
+        debug!("coerce_borrowed_pointer: succeeded ty={:?} autoderefs={:?} autoref={:?}",
+               ty, autoderefs, autoref);
+        Ok((ty, AdjustDerefRef(AutoDerefRef {
+            autoderefs: autoderefs,
+            autoref: autoref,
+            unsize: None
+        })))
     }
 
 
@@ -261,7 +423,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
         // Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
         let (source, reborrow) = match (&source.sty, &target.sty) {
             (&ty::TyRef(_, mt_a), &ty::TyRef(_, mt_b)) => {
-                try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
+                coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?;
 
                 let coercion = Coercion(self.origin.span());
                 let r_borrow = self.fcx.infcx().next_region_var(coercion);
@@ -269,7 +431,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
                 (mt_a.ty, Some(AutoPtr(region, mt_b.mutbl)))
             }
             (&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) => {
-                try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
+                coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?;
                 (mt_a.ty, Some(AutoUnsafe(mt_b.mutbl)))
             }
             _ => (source, None)
@@ -329,9 +491,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
             }
         }
 
-        let mut obligations = self.unsizing_obligations.borrow_mut();
-        assert!(obligations.is_empty());
-        *obligations = leftover_predicates;
+        *self.unsizing_obligations.borrow_mut() = leftover_predicates;
 
         let adjustment = AutoDerefRef {
             autoderefs: if reborrow.is_some() { 1 } else { 0 },
@@ -339,7 +499,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
             unsize: Some(target)
         };
         debug!("Success, coerced with {:?}", adjustment);
-        Ok(Some(AdjustDerefRef(adjustment)))
+        Ok((target, AdjustDerefRef(adjustment)))
     }
 
     fn coerce_from_fn_pointer(&self,
@@ -353,22 +513,21 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
          * into a closure or a `proc`.
          */
 
-        self.unpack_actual_value(b, |b| {
-            debug!("coerce_from_fn_pointer(a={:?}, b={:?})",
-                   a, b);
+        let b = self.fcx.infcx().shallow_resolve(b);
+        debug!("coerce_from_fn_pointer(a={:?}, b={:?})", a, b);
 
-            if let ty::TyBareFn(None, fn_ty_b) = b.sty {
-                match (fn_ty_a.unsafety, fn_ty_b.unsafety) {
-                    (hir::Unsafety::Normal, hir::Unsafety::Unsafe) => {
-                        let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a);
-                        try!(self.subtype(unsafe_a, b));
-                        return Ok(Some(AdjustUnsafeFnPointer));
-                    }
-                    _ => {}
+        if let ty::TyFnPtr(fn_ty_b) = b.sty {
+            match (fn_ty_a.unsafety, fn_ty_b.unsafety) {
+                (hir::Unsafety::Normal, hir::Unsafety::Unsafe) => {
+                    let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a);
+                    return self.unify_and_identity(unsafe_a, b).map(|(ty, _)| {
+                        (ty, AdjustUnsafeFnPointer)
+                    });
                 }
+                _ => {}
             }
-            self.subtype(a, b)
-        })
+        }
+        self.unify_and_identity(a, b)
     }
 
     fn coerce_from_fn_item(&self,
@@ -381,19 +540,18 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
          * into a closure or a `proc`.
          */
 
-        self.unpack_actual_value(b, |b| {
-            debug!("coerce_from_fn_item(a={:?}, b={:?})",
-                   a, b);
+        let b = self.fcx.infcx().shallow_resolve(b);
+        debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b);
 
-            match b.sty {
-                ty::TyBareFn(None, _) => {
-                    let a_fn_pointer = self.tcx().mk_fn(None, fn_ty_a);
-                    try!(self.subtype(a_fn_pointer, b));
-                    Ok(Some(AdjustReifyFnPointer))
-                }
-                _ => self.subtype(a, b)
+        match b.sty {
+            ty::TyFnPtr(_) => {
+                let a_fn_pointer = self.tcx().mk_ty(ty::TyFnPtr(fn_ty_a));
+                self.unify_and_identity(a_fn_pointer, b).map(|(ty, _)| {
+                    (ty, AdjustReifyFnPointer)
+                })
             }
-        })
+            _ => self.unify_and_identity(a, b)
+        }
     }
 
     fn coerce_unsafe_ptr(&self,
@@ -409,74 +567,215 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
             ty::TyRef(_, mt) => (true, mt),
             ty::TyRawPtr(mt) => (false, mt),
             _ => {
-                return self.subtype(a, b);
+                return self.unify_and_identity(a, b);
             }
         };
 
         // Check that the types which they point at are compatible.
         let a_unsafe = self.tcx().mk_ptr(ty::TypeAndMut{ mutbl: mutbl_b, ty: mt_a.ty });
-        try!(self.subtype(a_unsafe, b));
-        try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
+        let (ty, noop) = self.unify_and_identity(a_unsafe, b)?;
+        coerce_mutbls(mt_a.mutbl, mutbl_b)?;
 
         // Although references and unsafe ptrs have the same
         // representation, we still register an AutoDerefRef so that
         // regionck knows that the region for `a` must be valid here.
-        if is_ref {
-            Ok(Some(AdjustDerefRef(AutoDerefRef {
+        Ok((ty, if is_ref {
+            AdjustDerefRef(AutoDerefRef {
                 autoderefs: 1,
                 autoref: Some(AutoUnsafe(mutbl_b)),
                 unsize: None
-            })))
+            })
         } else if mt_a.mutbl != mutbl_b {
-            Ok(Some(AdjustMutToConstPointer))
+            AdjustMutToConstPointer
         } else {
-            Ok(None)
-        }
+            noop
+        }))
     }
 }
 
-pub fn mk_assignty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
-                             expr: &hir::Expr,
+fn apply<'a, 'b, 'tcx, E, I>(coerce: &mut Coerce<'a, 'tcx>,
+                             exprs: &E,
                              a: Ty<'tcx>,
                              b: Ty<'tcx>)
-                             -> RelateResult<'tcx, ()> {
-    debug!("mk_assignty({:?} -> {:?})", a, b);
-    let mut unsizing_obligations = vec![];
-    let adjustment = try!(indent(|| {
-        fcx.infcx().commit_if_ok(|_| {
-            let coerce = Coerce {
-                fcx: fcx,
-                origin: TypeOrigin::ExprAssignable(expr.span),
-                unsizing_obligations: RefCell::new(vec![])
-            };
-            let adjustment = try!(coerce.coerce(expr, a, b));
-            unsizing_obligations = coerce.unsizing_obligations.into_inner();
-            Ok(adjustment)
-        })
-    }));
+                             -> CoerceResult<'tcx>
+    where E: Fn() -> I,
+          I: IntoIterator<Item=&'b hir::Expr> {
 
-    if let Some(AdjustDerefRef(auto)) = adjustment {
+    let (ty, adjustment) = indent(|| coerce.coerce(exprs, a, b))?;
+
+    let fcx = coerce.fcx;
+    if let AdjustDerefRef(auto) = adjustment {
         if auto.unsize.is_some() {
-            for obligation in unsizing_obligations {
+            let mut obligations = coerce.unsizing_obligations.borrow_mut();
+            for obligation in obligations.drain(..) {
                 fcx.register_predicate(obligation);
             }
         }
     }
 
-    if let Some(adjustment) = adjustment {
-        debug!("Success, coerced with {:?}", adjustment);
-        fcx.write_adjustment(expr.id, adjustment);
-    }
-    Ok(())
+    Ok((ty, adjustment))
 }
 
-fn coerce_mutbls<'tcx>(from_mutbl: hir::Mutability,
-                       to_mutbl: hir::Mutability)
-                       -> CoerceResult<'tcx> {
-    match (from_mutbl, to_mutbl) {
-        (hir::MutMutable, hir::MutMutable) |
-        (hir::MutImmutable, hir::MutImmutable) |
-        (hir::MutMutable, hir::MutImmutable) => Ok(None),
-        (hir::MutImmutable, hir::MutMutable) => Err(TypeError::Mutability)
+/// Attempt to coerce an expression to a type, and return the
+/// adjusted type of the expression, if successful.
+/// Adjustments are only recorded if the coercion succeeded.
+/// The expressions *must not* have any pre-existing adjustments.
+pub fn try<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
+                     expr: &hir::Expr,
+                     target: Ty<'tcx>)
+                     -> RelateResult<'tcx, Ty<'tcx>> {
+    let source = fcx.resolve_type_vars_if_possible(fcx.expr_ty(expr));
+    debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target);
+
+    let mut coerce = Coerce::new(fcx, TypeOrigin::ExprAssignable(expr.span));
+    fcx.infcx().commit_if_ok(|_| {
+        let (ty, adjustment) =
+            apply(&mut coerce, &|| Some(expr), source, target)?;
+        if !adjustment.is_identity() {
+            debug!("Success, coerced with {:?}", adjustment);
+            assert!(!fcx.inh.tables.borrow().adjustments.contains_key(&expr.id));
+            fcx.write_adjustment(expr.id, adjustment);
+        }
+        Ok(ty)
+    })
+}
+
+/// Given some expressions, their known unified type and another expression,
+/// tries to unify the types, potentially inserting coercions on any of the
+/// provided expressions and returns their LUB (aka "common supertype").
+pub fn try_find_lub<'a, 'b, 'tcx, E, I>(fcx: &FnCtxt<'a, 'tcx>,
+                                        origin: TypeOrigin,
+                                        exprs: E,
+                                        prev_ty: Ty<'tcx>,
+                                        new: &'b hir::Expr)
+                                        -> RelateResult<'tcx, Ty<'tcx>>
+    // FIXME(eddyb) use copyable iterators when that becomes ergonomic.
+    where E: Fn() -> I,
+          I: IntoIterator<Item=&'b hir::Expr> {
+
+    let prev_ty = fcx.resolve_type_vars_if_possible(prev_ty);
+    let new_ty = fcx.resolve_type_vars_if_possible(fcx.expr_ty(new));
+    debug!("coercion::try_find_lub({:?}, {:?})", prev_ty, new_ty);
+
+    let trace = TypeTrace::types(origin, true, prev_ty, new_ty);
+
+    // Special-case that coercion alone cannot handle:
+    // Two function item types of differing IDs or Substs.
+    match (&prev_ty.sty, &new_ty.sty) {
+        (&ty::TyFnDef(a_def_id, a_substs, a_fty),
+         &ty::TyFnDef(b_def_id, b_substs, b_fty)) => {
+            // The signature must always match.
+            let fty = fcx.infcx().lub(true, trace.clone(), a_fty, b_fty)
+                .map(|InferOk { value, obligations }| {
+                    // FIXME(#32730) propagate obligations
+                    assert!(obligations.is_empty());
+                    value
+                })?;
+
+            if a_def_id == b_def_id {
+                // Same function, maybe the parameters match.
+                let substs = fcx.infcx().commit_if_ok(|_| {
+                    fcx.infcx().lub(true, trace.clone(), a_substs, b_substs)
+                        .map(|InferOk { value, obligations }| {
+                            // FIXME(#32730) propagate obligations
+                            assert!(obligations.is_empty());
+                            value
+                        })
+                }).map(|s| fcx.tcx().mk_substs(s));
+
+                if let Ok(substs) = substs {
+                    // We have a LUB of prev_ty and new_ty, just return it.
+                    return Ok(fcx.tcx().mk_fn_def(a_def_id, substs, fty));
+                }
+            }
+
+            // Reify both sides and return the reified fn pointer type.
+            for expr in exprs().into_iter().chain(Some(new)) {
+                // No adjustments can produce a fn item, so this should never trip.
+                assert!(!fcx.inh.tables.borrow().adjustments.contains_key(&expr.id));
+                fcx.write_adjustment(expr.id, AdjustReifyFnPointer);
+            }
+            return Ok(fcx.tcx().mk_fn_ptr(fty));
+        }
+        _ => {}
+    }
+
+    let mut coerce = Coerce::new(fcx, origin);
+    coerce.use_lub = true;
+
+    // First try to coerce the new expression to the type of the previous ones,
+    // but only if the new expression has no coercion already applied to it.
+    let mut first_error = None;
+    if !fcx.inh.tables.borrow().adjustments.contains_key(&new.id) {
+        let result = fcx.infcx().commit_if_ok(|_| {
+            apply(&mut coerce, &|| Some(new), new_ty, prev_ty)
+        });
+        match result {
+            Ok((ty, adjustment)) => {
+                if !adjustment.is_identity() {
+                    fcx.write_adjustment(new.id, adjustment);
+                }
+                return Ok(ty);
+            }
+            Err(e) => first_error = Some(e)
+        }
+    }
+
+    // Then try to coerce the previous expressions to the type of the new one.
+    // This requires ensuring there are no coercions applied to *any* of the
+    // previous expressions, other than noop reborrows (ignoring lifetimes).
+    for expr in exprs() {
+        let noop = match fcx.inh.tables.borrow().adjustments.get(&expr.id) {
+            Some(&AdjustDerefRef(AutoDerefRef {
+                autoderefs: 1,
+                autoref: Some(AutoPtr(_, mutbl_adj)),
+                unsize: None
+            })) => match fcx.expr_ty(expr).sty {
+                ty::TyRef(_, mt_orig) => {
+                    // Reborrow that we can safely ignore.
+                    mutbl_adj == mt_orig.mutbl
+                }
+                _ => false
+            },
+            Some(_) => false,
+            None => true
+        };
+
+        if !noop {
+            return fcx.infcx().commit_if_ok(|_| {
+                fcx.infcx().lub(true, trace.clone(), &prev_ty, &new_ty)
+                    .map(|InferOk { value, obligations }| {
+                        // FIXME(#32730) propagate obligations
+                        assert!(obligations.is_empty());
+                        value
+                    })
+            });
+        }
+    }
+
+    match fcx.infcx().commit_if_ok(|_| apply(&mut coerce, &exprs, prev_ty, new_ty)) {
+        Err(_) => {
+            // Avoid giving strange errors on failed attempts.
+            if let Some(e) = first_error {
+                Err(e)
+            } else {
+                fcx.infcx().commit_if_ok(|_| {
+                    fcx.infcx().lub(true, trace, &prev_ty, &new_ty)
+                        .map(|InferOk { value, obligations }| {
+                            // FIXME(#32730) propagate obligations
+                            assert!(obligations.is_empty());
+                            value
+                        })
+                })
+            }
+        }
+        Ok((ty, adjustment)) => {
+            if !adjustment.is_identity() {
+                for expr in exprs() {
+                    fcx.write_adjustment(expr.id, adjustment);
+                }
+            }
+            Ok(ty)
+        }
     }
 }
index d674fa145dc9ace491fcbff26b438947e6736f27..3c12ab8d59840ea61ade8994d21005ed7ca109f8 100644 (file)
@@ -9,10 +9,10 @@
 // except according to those terms.
 
 use middle::free_region::FreeRegionMap;
-use middle::infer::{self, TypeOrigin};
-use middle::traits;
-use middle::ty::{self};
-use middle::subst::{self, Subst, Substs, VecPerParamSpace};
+use rustc::infer::{self, InferOk, TypeOrigin};
+use rustc::ty::{self, TyCtxt};
+use rustc::traits::{self, ProjectionMode};
+use rustc::ty::subst::{self, Subst, Substs, VecPerParamSpace};
 
 use syntax::ast;
 use syntax::codemap::Span;
@@ -30,7 +30,7 @@ use super::assoc;
 /// - trait_m: the method in the trait
 /// - impl_trait_ref: the TraitRef corresponding to the trait implementation
 
-pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
+pub fn compare_impl_method<'tcx>(tcx: &TyCtxt<'tcx>,
                                  impl_m: &ty::Method<'tcx>,
                                  impl_m_span: Span,
                                  impl_m_body_id: ast::NodeId,
@@ -42,7 +42,7 @@ pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
     debug!("compare_impl_method: impl_trait_ref (liberated) = {:?}",
            impl_trait_ref);
 
-    let mut infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None);
+    let mut infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::AnyFinal);
     let mut fulfillment_cx = traits::FulfillmentContext::new();
 
     let trait_to_impl_substs = &impl_trait_ref.substs;
@@ -180,7 +180,7 @@ pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
         trait_to_impl_substs
         .subst(tcx, impl_to_skol_substs)
         .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(),
-                     impl_to_skol_substs.regions().get_slice(subst::FnSpace).to_vec());
+                     impl_to_skol_substs.regions.get_slice(subst::FnSpace).to_vec());
     debug!("compare_impl_method: trait_to_skol_substs={:?}",
            trait_to_skol_substs);
 
@@ -276,9 +276,9 @@ pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
     // type.
 
     // Compute skolemized form of impl and trait method tys.
-    let impl_fty = tcx.mk_fn(None, tcx.mk_bare_fn(impl_m.fty.clone()));
+    let impl_fty = tcx.mk_fn_ptr(impl_m.fty.clone());
     let impl_fty = impl_fty.subst(tcx, impl_to_skol_substs);
-    let trait_fty = tcx.mk_fn(None, tcx.mk_bare_fn(trait_m.fty.clone()));
+    let trait_fty = tcx.mk_fn_ptr(trait_m.fty.clone());
     let trait_fty = trait_fty.subst(tcx, &trait_to_skol_substs);
 
     let err = infcx.commit_if_ok(|snapshot| {
@@ -296,11 +296,11 @@ pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
                                                  impl_m_span,
                                                  impl_m_body_id,
                                                  &impl_sig);
-        let impl_fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy {
+        let impl_fty = tcx.mk_fn_ptr(ty::BareFnTy {
             unsafety: impl_m.fty.unsafety,
             abi: impl_m.fty.abi,
             sig: ty::Binder(impl_sig)
-        }));
+        });
         debug!("compare_impl_method: impl_fty={:?}",
                impl_fty);
 
@@ -314,16 +314,16 @@ pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
                                                  impl_m_span,
                                                  impl_m_body_id,
                                                  &trait_sig);
-        let trait_fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy {
+        let trait_fty = tcx.mk_fn_ptr(ty::BareFnTy {
             unsafety: trait_m.fty.unsafety,
             abi: trait_m.fty.abi,
             sig: ty::Binder(trait_sig)
-        }));
+        });
 
         debug!("compare_impl_method: trait_fty={:?}",
                trait_fty);
 
-        try!(infer::mk_subty(&infcx, false, origin, impl_fty, trait_fty));
+        infer::mk_subty(&infcx, false, origin, impl_fty, trait_fty)?;
 
         infcx.leak_check(&skol_map, snapshot)
     });
@@ -364,7 +364,7 @@ pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
 
     infcx.resolve_regions_and_report_errors(&free_regions, impl_m_body_id);
 
-    fn check_region_bounds_on_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
+    fn check_region_bounds_on_impl_method<'tcx>(tcx: &TyCtxt<'tcx>,
                                                 span: Span,
                                                 impl_m: &ty::Method<'tcx>,
                                                 trait_generics: &ty::Generics<'tcx>,
@@ -408,7 +408,7 @@ pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
     }
 }
 
-pub fn compare_const_impl<'tcx>(tcx: &ty::ctxt<'tcx>,
+pub fn compare_const_impl<'tcx>(tcx: &TyCtxt<'tcx>,
                                 impl_c: &ty::AssociatedConst<'tcx>,
                                 impl_c_span: Span,
                                 trait_c: &ty::AssociatedConst<'tcx>,
@@ -416,7 +416,7 @@ pub fn compare_const_impl<'tcx>(tcx: &ty::ctxt<'tcx>,
     debug!("compare_const_impl(impl_trait_ref={:?})",
            impl_trait_ref);
 
-    let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None);
+    let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::AnyFinal);
     let mut fulfillment_cx = traits::FulfillmentContext::new();
 
     // The below is for the most part highly similar to the procedure
@@ -439,7 +439,7 @@ pub fn compare_const_impl<'tcx>(tcx: &ty::ctxt<'tcx>,
         trait_to_impl_substs
         .subst(tcx, impl_to_skol_substs)
         .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(),
-                     impl_to_skol_substs.regions().get_slice(subst::FnSpace).to_vec());
+                     impl_to_skol_substs.regions.get_slice(subst::FnSpace).to_vec());
     debug!("compare_const_impl: trait_to_skol_substs={:?}",
            trait_to_skol_substs);
 
@@ -475,7 +475,10 @@ pub fn compare_const_impl<'tcx>(tcx: &ty::ctxt<'tcx>,
     });
 
     match err {
-        Ok(()) => { }
+        Ok(InferOk { obligations, .. }) => {
+            // FIXME(#32730) propagate obligations
+            assert!(obligations.is_empty())
+        }
         Err(terr) => {
             debug!("checking associated const for compatibility: impl ty {:?}, trait ty {:?}",
                    impl_ty,
index 63dac49b384a7aa23d865bf4494ed2114363cd76..5b163eb7de452ebc05ad1577cd371e73df0dae74 100644 (file)
 
 
 use check::{coercion, FnCtxt};
-use middle::ty::{self, Ty};
-use middle::infer::{self, TypeOrigin};
+use rustc::ty::Ty;
+use rustc::infer::{InferOk, TypeOrigin};
 
-use std::result::Result::{Err, Ok};
 use syntax::codemap::Span;
-use rustc_front::hir;
+use rustc::hir;
 
 // Requires that the two types unify, and prints an error message if
 // they don't.
 pub fn suptype<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, sp: Span,
-                         ty_expected: Ty<'tcx>, ty_actual: Ty<'tcx>) {
-    suptype_with_fn(fcx, sp, false, ty_expected, ty_actual,
-        |sp, e, a, s| { fcx.report_mismatched_types(sp, e, a, s) })
-}
-
-/// As `suptype`, but call `handle_err` if unification for subtyping fails.
-pub fn suptype_with_fn<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
-                                    sp: Span,
-                                    b_is_expected: bool,
-                                    ty_a: Ty<'tcx>,
-                                    ty_b: Ty<'tcx>,
-                                    handle_err: F) where
-    F: FnOnce(Span, Ty<'tcx>, Ty<'tcx>, &ty::error::TypeError<'tcx>),
-{
-    // n.b.: order of actual, expected is reversed
-    match infer::mk_subty(fcx.infcx(), b_is_expected, TypeOrigin::Misc(sp),
-                          ty_b, ty_a) {
-      Ok(()) => { /* ok */ }
-      Err(ref err) => {
-          handle_err(sp, ty_a, ty_b, err);
-      }
+                         expected: Ty<'tcx>, actual: Ty<'tcx>) {
+    let origin = TypeOrigin::Misc(sp);
+    match fcx.infcx().sub_types(false, origin, actual, expected) {
+        Ok(InferOk { obligations, .. }) => {
+            // FIXME(#32730) propagate obligations
+            assert!(obligations.is_empty());
+        },
+        Err(e) => {
+            fcx.infcx().report_mismatched_types(origin, expected, actual, e);
+        }
     }
 }
 
 pub fn eqtype<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, sp: Span,
                         expected: Ty<'tcx>, actual: Ty<'tcx>) {
-    match infer::mk_eqty(fcx.infcx(), false, TypeOrigin::Misc(sp), actual, expected) {
-        Ok(()) => { /* ok */ }
-        Err(ref err) => { fcx.report_mismatched_types(sp, expected, actual, err); }
+    let origin = TypeOrigin::Misc(sp);
+    match fcx.infcx().eq_types(false, origin, actual, expected) {
+        Ok(InferOk { obligations, .. }) => {
+            // FIXME(#32730) propagate obligations
+            assert!(obligations.is_empty());
+        },
+        Err(e) => {
+            fcx.infcx().report_mismatched_types(origin, expected, actual, e);
+        }
     }
 }
 
@@ -57,16 +51,10 @@ pub fn coerce<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                         sp: Span,
                         expected: Ty<'tcx>,
                         expr: &hir::Expr) {
-    let expr_ty = fcx.expr_ty(expr);
-    debug!("demand::coerce(expected = {:?}, expr_ty = {:?})",
-           expected,
-           expr_ty);
-    let expr_ty = fcx.resolve_type_vars_if_possible(expr_ty);
     let expected = fcx.resolve_type_vars_if_possible(expected);
-    match coercion::mk_assignty(fcx, expr, expr_ty, expected) {
-      Ok(()) => { /* ok */ }
-      Err(ref err) => {
-        fcx.report_mismatched_types(sp, expected, expr_ty, err);
-      }
+    if let Err(e) = coercion::try(fcx, expr, expected) {
+        let origin = TypeOrigin::Misc(sp);
+        let expr_ty = fcx.resolve_type_vars_if_possible(fcx.expr_ty(expr));
+        fcx.infcx().report_mismatched_types(origin, expected, expr_ty, e);
     }
 }
index 9d39653375739531f56660c839397f1319d83fa6..368b826b1bb39f24c6c8f9a15962a089f096c2bf 100644 (file)
 
 use check::regionck::{self, Rcx};
 
-use middle::def_id::DefId;
+use hir::def_id::DefId;
 use middle::free_region::FreeRegionMap;
-use middle::infer;
+use rustc::infer;
 use middle::region;
-use middle::subst::{self, Subst};
-use middle::traits;
-use middle::ty::{self, Ty};
+use rustc::ty::subst::{self, Subst};
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::traits::{self, ProjectionMode};
 use util::nodemap::FnvHashSet;
 
 use syntax::ast;
 use syntax::codemap::{self, Span};
-use syntax::parse::token::special_idents;
 
 /// check_drop_impl confirms that the Drop implementation identfied by
 /// `drop_impl_did` is not any more specialized than the type it is
@@ -40,18 +39,18 @@ use syntax::parse::token::special_idents;
 ///    struct/enum definition for the nominal type itself (i.e.
 ///    cannot do `struct S<T>; impl<T:Clone> Drop for S<T> { ... }`).
 ///
-pub fn check_drop_impl(tcx: &ty::ctxt, drop_impl_did: DefId) -> Result<(), ()> {
+pub fn check_drop_impl(tcx: &TyCtxt, drop_impl_did: DefId) -> Result<(), ()> {
     let ty::TypeScheme { generics: ref dtor_generics,
                          ty: dtor_self_type } = tcx.lookup_item_type(drop_impl_did);
     let dtor_predicates = tcx.lookup_predicates(drop_impl_did);
     match dtor_self_type.sty {
         ty::TyEnum(adt_def, self_to_impl_substs) |
         ty::TyStruct(adt_def, self_to_impl_substs) => {
-            try!(ensure_drop_params_and_item_params_correspond(tcx,
-                                                               drop_impl_did,
-                                                               dtor_generics,
-                                                               &dtor_self_type,
-                                                               adt_def.did));
+            ensure_drop_params_and_item_params_correspond(tcx,
+                                                          drop_impl_did,
+                                                          dtor_generics,
+                                                          &dtor_self_type,
+                                                          adt_def.did)?;
 
             ensure_drop_predicates_are_implied_by_item_defn(tcx,
                                                             drop_impl_did,
@@ -63,15 +62,15 @@ pub fn check_drop_impl(tcx: &ty::ctxt, drop_impl_did: DefId) -> Result<(), ()> {
             // Destructors only work on nominal types.  This was
             // already checked by coherence, so we can panic here.
             let span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP);
-            tcx.sess.span_bug(
-                span, &format!("should have been rejected by coherence check: {}",
-                               dtor_self_type));
+            span_bug!(span,
+                      "should have been rejected by coherence check: {}",
+                      dtor_self_type);
         }
     }
 }
 
 fn ensure_drop_params_and_item_params_correspond<'tcx>(
-    tcx: &ty::ctxt<'tcx>,
+    tcx: &TyCtxt<'tcx>,
     drop_impl_did: DefId,
     drop_impl_generics: &ty::Generics<'tcx>,
     drop_impl_ty: &ty::Ty<'tcx>,
@@ -83,7 +82,10 @@ fn ensure_drop_params_and_item_params_correspond<'tcx>(
     // check that the impl type can be made to match the trait type.
 
     let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id);
-    let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(impl_param_env));
+    let infcx = infer::new_infer_ctxt(tcx,
+                                      &tcx.tables,
+                                      Some(impl_param_env),
+                                      ProjectionMode::AnyFinal);
     let mut fulfillment_cx = traits::FulfillmentContext::new();
 
     let named_type = tcx.lookup_item_type(self_type_did).ty;
@@ -120,7 +122,7 @@ fn ensure_drop_params_and_item_params_correspond<'tcx>(
 /// Confirms that every predicate imposed by dtor_predicates is
 /// implied by assuming the predicates attached to self_type_did.
 fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
-    tcx: &ty::ctxt<'tcx>,
+    tcx: &TyCtxt<'tcx>,
     drop_impl_did: DefId,
     dtor_predicates: &ty::GenericPredicates<'tcx>,
     self_type_did: DefId,
@@ -274,8 +276,7 @@ pub fn check_safety_of_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>
            typ, scope);
 
     let parent_scope = rcx.tcx().region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
-        rcx.tcx().sess.span_bug(
-            span, &format!("no enclosing scope found for scope: {:?}", scope))
+        span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
     });
 
     let result = iterate_over_potentially_unsafe_regions_in_type(
@@ -299,7 +300,7 @@ pub fn check_safety_of_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>
                     // no need for an additional note if the overflow
                     // was somehow on the root.
                 }
-                TypeContext::ADT { def_id, variant, field, field_index } => {
+                TypeContext::ADT { def_id, variant, field } => {
                     let adt = tcx.lookup_adt_def(def_id);
                     let variant_name = match adt.adt_kind() {
                         ty::AdtKind::Enum => format!("enum {} variant {}",
@@ -308,17 +309,12 @@ pub fn check_safety_of_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>
                         ty::AdtKind::Struct => format!("struct {}",
                                                        tcx.item_path_str(def_id))
                     };
-                    let field_name = if field == special_idents::unnamed_field.name {
-                        format!("#{}", field_index)
-                    } else {
-                        format!("`{}`", field)
-                    };
                     span_note!(
                         &mut err,
                         span,
                         "overflowed on {} field {} type: {}",
                         variant_name,
-                        field_name,
+                        field,
                         detected_on_typ);
                 }
             }
@@ -338,7 +334,6 @@ enum TypeContext {
         def_id: DefId,
         variant: ast::Name,
         field: ast::Name,
-        field_index: usize
     }
 }
 
@@ -452,20 +447,19 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>(
         ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
             let did = def.did;
             for variant in &def.variants {
-                for (i, field) in variant.fields.iter().enumerate() {
+                for field in variant.fields.iter() {
                     let fty = field.ty(tcx, substs);
                     let fty = cx.rcx.fcx.resolve_type_vars_if_possible(
                         cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
-                    try!(iterate_over_potentially_unsafe_regions_in_type(
+                    iterate_over_potentially_unsafe_regions_in_type(
                         cx,
                         TypeContext::ADT {
                             def_id: did,
                             field: field.name,
                             variant: variant.name,
-                            field_index: i
                         },
                         fty,
-                        depth+1))
+                        depth+1)?
                 }
             }
             Ok(())
@@ -474,8 +468,7 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>(
         ty::TyTuple(ref tys) |
         ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. }) => {
             for ty in tys {
-                try!(iterate_over_potentially_unsafe_regions_in_type(
-                    cx, context, ty, depth+1))
+                iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
             }
             Ok(())
         }
@@ -487,7 +480,7 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>(
             Ok(())
         }
 
-        ty::TyBareFn(..) => {
+        ty::TyFnDef(..) | ty::TyFnPtr(_) => {
             // FIXME(#26656): this type is always destruction-safe, but
             // it implicitly witnesses Self: Fn, which can be false.
             Ok(())
@@ -499,11 +492,11 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>(
         }
 
         // these are always dtorck
-        ty::TyTrait(..) | ty::TyProjection(_) => unreachable!(),
+        ty::TyTrait(..) | ty::TyProjection(_) => bug!(),
     }
 }
 
-fn has_dtor_of_interest<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn has_dtor_of_interest<'tcx>(tcx: &TyCtxt<'tcx>,
                               ty: ty::Ty<'tcx>) -> bool {
     match ty.sty {
         ty::TyEnum(def, _) | ty::TyStruct(def, _) => {
index 5e1dc35870bb2798dbd73afe5899943e06f26dda..eae0cfb0f22672293a48558b8c6a73effdb260e9 100644 (file)
 
 use astconv::AstConv;
 use intrinsics;
-use middle::subst;
-use middle::ty::FnSig;
-use middle::ty::{self, Ty};
-use middle::ty::fold::TypeFolder;
+use rustc::ty::subst::{self, Substs};
+use rustc::ty::FnSig;
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::fold::TypeFolder;
 use {CrateCtxt, require_same_types};
 
 use std::collections::{HashMap};
@@ -26,14 +26,20 @@ use syntax::attr::AttrMetaMethods;
 use syntax::codemap::Span;
 use syntax::parse::token;
 
-use rustc_front::hir;
+use rustc::hir;
 
-fn equate_intrinsic_type<'a, 'tcx>(tcx: &ty::ctxt<'tcx>, it: &hir::ForeignItem,
+fn equate_intrinsic_type<'a, 'tcx>(tcx: &TyCtxt<'tcx>, it: &hir::ForeignItem,
                                    n_tps: usize,
                                    abi: Abi,
                                    inputs: Vec<ty::Ty<'tcx>>,
                                    output: ty::FnOutput<'tcx>) {
-    let fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy {
+    let def_id = tcx.map.local_def_id(it.id);
+    let i_ty = tcx.lookup_item_type(def_id);
+
+    let mut substs = Substs::empty();
+    substs.types = i_ty.generics.types.map(|def| tcx.mk_param_from_def(def));
+
+    let fty = tcx.mk_fn_def(def_id, tcx.mk_substs(substs), ty::BareFnTy {
         unsafety: hir::Unsafety::Unsafe,
         abi: abi,
         sig: ty::Binder(FnSig {
@@ -41,8 +47,7 @@ fn equate_intrinsic_type<'a, 'tcx>(tcx: &ty::ctxt<'tcx>, it: &hir::ForeignItem,
             output: output,
             variadic: false,
         }),
-    }));
-    let i_ty = tcx.lookup_item_type(tcx.map.local_def_id(it.id));
+    });
     let i_n_tps = i_ty.generics.types.len(subst::FnSpace);
     if i_n_tps != n_tps {
         span_err!(tcx.sess, it.span, E0094,
@@ -79,14 +84,10 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) {
 
         //We only care about the operation here
         let (n_tps, inputs, output) = match split[1] {
-            "cxchg" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)),
-                                param(ccx, 0),
-                                param(ccx, 0)),
-                        param(ccx, 0)),
-            "cxchgweak" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)),
-                                param(ccx, 0),
-                                param(ccx, 0)),
-                            tcx.mk_tup(vec!(param(ccx, 0), tcx.types.bool))),
+            "cxchg" | "cxchgweak" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)),
+                                              param(ccx, 0),
+                                              param(ccx, 0)),
+                                      tcx.mk_tup(vec!(param(ccx, 0), tcx.types.bool))),
             "load" => (1, vec!(tcx.mk_imm_ptr(param(ccx, 0))),
                        param(ccx, 0)),
             "store" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)),
@@ -275,6 +276,8 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) {
 
             "overflowing_add" | "overflowing_sub" | "overflowing_mul" =>
                 (1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)),
+            "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" =>
+                (1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)),
 
             "return_address" => (0, vec![], tcx.mk_imm_ptr(tcx.types.u8)),
 
@@ -296,8 +299,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) {
                         variadic: false,
                     }),
                 };
-                let fn_ty = tcx.mk_bare_fn(fn_ty);
-                (0, vec![tcx.mk_fn(None, fn_ty), mut_u8, mut_u8], tcx.types.i32)
+                (0, vec![tcx.mk_fn_ptr(fn_ty), mut_u8, mut_u8], tcx.types.i32)
             }
 
             ref other => {
@@ -358,7 +360,7 @@ pub fn check_platform_intrinsic_type(ccx: &CrateCtxt,
             }
         }
         _ => {
-            match intrinsics::Intrinsic::find(tcx, &name) {
+            match intrinsics::Intrinsic::find(&name) {
                 Some(intr) => {
                     // this function is a platform specific intrinsic
                     if i_n_tps != 0 {
@@ -412,7 +414,7 @@ pub fn check_platform_intrinsic_type(ccx: &CrateCtxt,
 // the same, in a kinda-structural way, i.e. `Vector`s have to be simd structs with
 // exactly the right element type
 fn match_intrinsic_type_to_type<'tcx, 'a>(
-        tcx: &ty::ctxt<'tcx>,
+        tcx: &TyCtxt<'tcx>,
         position: &str,
         span: Span,
         structural_to_nominal: &mut HashMap<&'a intrinsics::Type, ty::Ty<'tcx>>,
index bf08989bc0a1275654c00309f058212285a38d9b..b98f1884f01cee471ebb5540a6d32c496c8d8e3b 100644 (file)
@@ -12,16 +12,16 @@ use super::probe;
 
 use check::{self, FnCtxt, callee, demand};
 use check::UnresolvedTypeAction;
-use middle::def_id::DefId;
-use middle::subst::{self};
-use middle::traits;
-use middle::ty::{self, NoPreference, PreferMutLvalue, Ty};
-use middle::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr};
-use middle::ty::fold::TypeFoldable;
-use middle::infer;
-use middle::infer::{InferCtxt, TypeOrigin};
+use hir::def_id::DefId;
+use rustc::ty::subst::{self};
+use rustc::traits;
+use rustc::ty::{self, NoPreference, PreferMutLvalue, Ty, TyCtxt};
+use rustc::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr};
+use rustc::ty::fold::TypeFoldable;
+use rustc::infer;
+use rustc::infer::{InferCtxt, TypeOrigin};
 use syntax::codemap::Span;
-use rustc_front::hir;
+use rustc::hir;
 
 struct ConfirmContext<'a, 'tcx:'a> {
     fcx: &'a FnCtxt<'a, 'tcx>,
@@ -98,27 +98,29 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
         let InstantiatedMethodSig {
             method_sig, all_substs, method_predicates
         } = self.instantiate_method_sig(&pick, all_substs);
+        let all_substs = self.tcx().mk_substs(all_substs);
         let method_self_ty = method_sig.inputs[0];
 
         // Unify the (adjusted) self type with what the method expects.
         self.unify_receivers(self_ty, method_self_ty);
 
         // Create the method type
+        let def_id = pick.item.def_id();
         let method_ty = pick.item.as_opt_method().unwrap();
-        let fty = self.tcx().mk_fn(None, self.tcx().mk_bare_fn(ty::BareFnTy {
+        let fty = self.tcx().mk_fn_def(def_id, all_substs, ty::BareFnTy {
             sig: ty::Binder(method_sig),
             unsafety: method_ty.fty.unsafety,
             abi: method_ty.fty.abi.clone(),
-        }));
+        });
 
         // Add any trait/regions obligations specified on the method's type parameters.
-        self.add_obligations(fty, &all_substs, &method_predicates);
+        self.add_obligations(fty, all_substs, &method_predicates);
 
         // Create the final `MethodCallee`.
         let callee = ty::MethodCallee {
-            def_id: pick.item.def_id(),
+            def_id: def_id,
             ty: fty,
-            substs: self.tcx().mk_substs(all_substs)
+            substs: all_substs
         };
         // If this is an `&mut self` method, bias the receiver
         // expression towards mutability (this will switch
@@ -156,7 +158,7 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
         let (autoderefd_ty, n, result) = check::autoderef(self.fcx,
                                                           self.span,
                                                           unadjusted_self_ty,
-                                                          Some(self.self_expr),
+                                                          || Some(self.self_expr),
                                                           UnresolvedTypeAction::Error,
                                                           NoPreference,
                                                           |_, n| {
@@ -285,7 +287,7 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
         let (_, _, result) = check::autoderef(self.fcx,
                                               self.span,
                                               self_ty,
-                                              None,
+                                              || None,
                                               UnresolvedTypeAction::Error,
                                               NoPreference,
                                               |ty, _| {
@@ -298,10 +300,10 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
         match result {
             Some(r) => r,
             None => {
-                self.tcx().sess.span_bug(
+                span_bug!(
                     self.span,
-                    &format!("self-type `{}` for ObjectPick never dereferenced to an object",
-                            self_ty))
+                    "self-type `{}` for ObjectPick never dereferenced to an object",
+                    self_ty)
             }
         }
     }
@@ -332,7 +334,7 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
                     .generics.regions.get_slice(subst::FnSpace));
 
         let subst::Substs { types, regions } = substs;
-        let regions = regions.map(|r| r.with_slice(subst::FnSpace, &method_regions));
+        let regions = regions.with_slice(subst::FnSpace, &method_regions);
         let mut final_substs = subst::Substs { types: types, regions: regions };
 
         if num_supplied_types == 0 {
@@ -370,10 +372,10 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
         match self.fcx.mk_subty(false, TypeOrigin::Misc(self.span), self_ty, method_self_ty) {
             Ok(_) => {}
             Err(_) => {
-                self.tcx().sess.span_bug(
+                span_bug!(
                     self.span,
-                    &format!("{} was a subtype of {} but now is not?",
-                             self_ty, method_self_ty));
+                    "{} was a subtype of {} but now is not?",
+                    self_ty, method_self_ty);
             }
         }
     }
@@ -457,7 +459,7 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
     fn fixup_derefs_on_method_receiver_if_necessary(&self,
                                                     method_callee: &ty::MethodCallee) {
         let sig = match method_callee.ty.sty {
-            ty::TyBareFn(_, ref f) => f.sig.clone(),
+            ty::TyFnDef(_, _, ref f) => f.sig.clone(),
             _ => return,
         };
 
@@ -507,7 +509,7 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
                 check::autoderef(self.fcx,
                                  expr.span,
                                  self.fcx.expr_ty(expr),
-                                 Some(expr),
+                                 || Some(expr),
                                  UnresolvedTypeAction::Error,
                                  PreferMutLvalue,
                                  |_, autoderefs| {
@@ -520,92 +522,94 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
             }
 
             // Don't retry the first one or we might infinite loop!
-            if i != 0 {
-                match expr.node {
-                    hir::ExprIndex(ref base_expr, ref index_expr) => {
-                        // If this is an overloaded index, the
-                        // adjustment will include an extra layer of
-                        // autoref because the method is an &self/&mut
-                        // self method. We have to peel it off to get
-                        // the raw adjustment that `try_index_step`
-                        // expects. This is annoying and horrible. We
-                        // ought to recode this routine so it doesn't
-                        // (ab)use the normal type checking paths.
-                        let adj = self.fcx.inh.tables.borrow().adjustments.get(&base_expr.id)
-                                                                          .cloned();
-                        let (autoderefs, unsize) = match adj {
-                            Some(AdjustDerefRef(adr)) => match adr.autoref {
-                                None => {
-                                    assert!(adr.unsize.is_none());
-                                    (adr.autoderefs, None)
-                                }
-                                Some(AutoPtr(_, _)) => {
-                                    (adr.autoderefs, adr.unsize.map(|target| {
-                                        target.builtin_deref(false, NoPreference)
-                                              .expect("fixup: AutoPtr is not &T").ty
-                                    }))
-                                }
-                                Some(_) => {
-                                    self.tcx().sess.span_bug(
-                                        base_expr.span,
-                                        &format!("unexpected adjustment autoref {:?}",
-                                                adr));
-                                }
-                            },
-                            None => (0, None),
+            if i == 0 {
+                continue;
+            }
+            match expr.node {
+                hir::ExprIndex(ref base_expr, ref index_expr) => {
+                    // If this is an overloaded index, the
+                    // adjustment will include an extra layer of
+                    // autoref because the method is an &self/&mut
+                    // self method. We have to peel it off to get
+                    // the raw adjustment that `try_index_step`
+                    // expects. This is annoying and horrible. We
+                    // ought to recode this routine so it doesn't
+                    // (ab)use the normal type checking paths.
+                    let adj = self.fcx.inh.tables.borrow().adjustments.get(&base_expr.id)
+                                                                        .cloned();
+                    let (autoderefs, unsize) = match adj {
+                        Some(AdjustDerefRef(adr)) => match adr.autoref {
+                            None => {
+                                assert!(adr.unsize.is_none());
+                                (adr.autoderefs, None)
+                            }
+                            Some(AutoPtr(_, _)) => {
+                                (adr.autoderefs, adr.unsize.map(|target| {
+                                    target.builtin_deref(false, NoPreference)
+                                            .expect("fixup: AutoPtr is not &T").ty
+                                }))
+                            }
                             Some(_) => {
-                                self.tcx().sess.span_bug(
+                                span_bug!(
                                     base_expr.span,
-                                    "unexpected adjustment type");
+                                    "unexpected adjustment autoref {:?}",
+                                    adr);
                             }
-                        };
-
-                        let (adjusted_base_ty, unsize) = if let Some(target) = unsize {
-                            (target, true)
-                        } else {
-                            (self.fcx.adjust_expr_ty(base_expr,
-                                Some(&AdjustDerefRef(AutoDerefRef {
-                                    autoderefs: autoderefs,
-                                    autoref: None,
-                                    unsize: None
-                                }))), false)
-                        };
-                        let index_expr_ty = self.fcx.expr_ty(&index_expr);
-
-                        let result = check::try_index_step(
-                            self.fcx,
-                            ty::MethodCall::expr(expr.id),
-                            expr,
-                            &base_expr,
-                            adjusted_base_ty,
-                            autoderefs,
-                            unsize,
-                            PreferMutLvalue,
-                            index_expr_ty);
-
-                        if let Some((input_ty, return_ty)) = result {
-                            demand::suptype(self.fcx, index_expr.span, input_ty, index_expr_ty);
-
-                            let expr_ty = self.fcx.expr_ty(&expr);
-                            demand::suptype(self.fcx, expr.span, expr_ty, return_ty);
+                        },
+                        None => (0, None),
+                        Some(_) => {
+                            span_bug!(
+                                base_expr.span,
+                                "unexpected adjustment type");
                         }
+                    };
+
+                    let (adjusted_base_ty, unsize) = if let Some(target) = unsize {
+                        (target, true)
+                    } else {
+                        (self.fcx.adjust_expr_ty(base_expr,
+                            Some(&AdjustDerefRef(AutoDerefRef {
+                                autoderefs: autoderefs,
+                                autoref: None,
+                                unsize: None
+                            }))), false)
+                    };
+                    let index_expr_ty = self.fcx.expr_ty(&index_expr);
+
+                    let result = check::try_index_step(
+                        self.fcx,
+                        ty::MethodCall::expr(expr.id),
+                        expr,
+                        &base_expr,
+                        adjusted_base_ty,
+                        autoderefs,
+                        unsize,
+                        PreferMutLvalue,
+                        index_expr_ty);
+
+                    if let Some((input_ty, return_ty)) = result {
+                        demand::suptype(self.fcx, index_expr.span, input_ty, index_expr_ty);
+
+                        let expr_ty = self.fcx.expr_ty(&expr);
+                        demand::suptype(self.fcx, expr.span, expr_ty, return_ty);
                     }
-                    hir::ExprUnary(hir::UnDeref, ref base_expr) => {
-                        // if this is an overloaded deref, then re-evaluate with
-                        // a preference for mut
-                        let method_call = ty::MethodCall::expr(expr.id);
-                        if self.fcx.inh.tables.borrow().method_map.contains_key(&method_call) {
-                            check::try_overloaded_deref(
-                                self.fcx,
-                                expr.span,
-                                Some(method_call),
-                                Some(&base_expr),
-                                self.fcx.expr_ty(&base_expr),
-                                PreferMutLvalue);
-                        }
+                }
+                hir::ExprUnary(hir::UnDeref, ref base_expr) => {
+                    // if this is an overloaded deref, then re-evaluate with
+                    // a preference for mut
+                    let method_call = ty::MethodCall::expr(expr.id);
+                    if self.fcx.inh.tables.borrow().method_map.contains_key(&method_call) {
+                        let method = check::try_overloaded_deref(
+                            self.fcx,
+                            expr.span,
+                            Some(&base_expr),
+                            self.fcx.expr_ty(&base_expr),
+                            PreferMutLvalue);
+                        let method = method.expect("re-trying deref failed");
+                        self.fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
                     }
-                    _ => {}
                 }
+                _ => {}
             }
         }
     }
@@ -613,7 +617,7 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
     ///////////////////////////////////////////////////////////////////////////
     // MISCELLANY
 
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+    fn tcx(&self) -> &'a TyCtxt<'tcx> {
         self.fcx.tcx()
     }
 
@@ -642,12 +646,12 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
 
         // must be exactly one trait ref or we'd get an ambig error etc
         if upcast_trait_refs.len() != 1 {
-            self.tcx().sess.span_bug(
+            span_bug!(
                 self.span,
-                &format!("cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`",
-                         source_trait_ref,
-                         target_trait_def_id,
-                         upcast_trait_refs));
+                "cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`",
+                source_trait_ref,
+                target_trait_def_id,
+                upcast_trait_refs);
         }
 
         upcast_trait_refs.into_iter().next().unwrap()
index fc2dd4475e3ffabf135a56b5fb95ac42b8427a70..31d95af4fbb9c4b416b25d031f4b9cf58fcc35cf 100644 (file)
 
 use astconv::AstConv;
 use check::FnCtxt;
-use middle::def::Def;
-use middle::def_id::DefId;
-use middle::privacy::{AllPublic, DependsOn, LastPrivate, LastMod};
-use middle::subst;
-use middle::traits;
-use middle::ty::{self, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable};
-use middle::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr};
-use middle::infer;
+use hir::def::Def;
+use hir::def_id::DefId;
+use rustc::ty::subst;
+use rustc::traits;
+use rustc::ty::{self, TyCtxt, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable};
+use rustc::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr};
+use rustc::infer;
 
 use syntax::ast;
 use syntax::codemap::Span;
 
-use rustc_front::hir;
+use rustc::hir;
 
 pub use self::MethodError::*;
 pub use self::CandidateSource::*;
@@ -44,6 +43,9 @@ pub enum MethodError<'tcx> {
 
     // Using a `Fn`/`FnMut`/etc method on a raw closure type before we have inferred its kind.
     ClosureAmbiguity(/* DefId of fn trait */ DefId),
+
+    // Found an applicable method, but it is not visible.
+    PrivateMatch(Def),
 }
 
 // Contains a list of static methods that may apply, a list of unsatisfied trait predicates which
@@ -91,6 +93,7 @@ pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         Err(NoMatch(..)) => false,
         Err(Ambiguity(..)) => true,
         Err(ClosureAmbiguity(..)) => true,
+        Err(PrivateMatch(..)) => true,
     }
 }
 
@@ -125,7 +128,7 @@ pub fn lookup<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 
     let mode = probe::Mode::MethodCall;
     let self_ty = fcx.infcx().resolve_type_vars_if_possible(&self_ty);
-    let pick = try!(probe::probe(fcx, span, mode, method_name, self_ty, call_expr.id));
+    let pick = probe::probe(fcx, span, mode, method_name, self_ty, call_expr.id)?;
     Ok(confirm::confirm(fcx, span, self_expr, call_expr, self_ty, pick, supplied_method_types))
 }
 
@@ -231,11 +234,12 @@ pub fn lookup_in_trait_adjusted<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                                                        &method_ty.fty.sig).0;
     let fn_sig = fcx.instantiate_type_scheme(span, trait_ref.substs, &fn_sig);
     let transformed_self_ty = fn_sig.inputs[0];
-    let fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy {
+    let def_id = method_item.def_id();
+    let fty = tcx.mk_fn_def(def_id, trait_ref.substs, ty::BareFnTy {
         sig: ty::Binder(fn_sig),
         unsafety: method_ty.fty.unsafety,
         abi: method_ty.fty.abi.clone(),
-    }));
+    });
 
     debug!("lookup_in_trait_adjusted: matched method fty={:?} obligation={:?}",
            fty,
@@ -298,28 +302,26 @@ pub fn lookup_in_trait_adjusted<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                         }
 
                         _ => {
-                            fcx.tcx().sess.span_bug(
+                            span_bug!(
                                 span,
-                                &format!(
-                                    "trait method is &self but first arg is: {}",
-                                    transformed_self_ty));
+                                "trait method is &self but first arg is: {}",
+                                transformed_self_ty);
                         }
                     }
                 }
 
                 _ => {
-                    fcx.tcx().sess.span_bug(
+                    span_bug!(
                         span,
-                        &format!(
-                            "unexpected explicit self type in operator method: {:?}",
-                            method_ty.explicit_self));
+                        "unexpected explicit self type in operator method: {:?}",
+                        method_ty.explicit_self);
                 }
             }
         }
     }
 
     let callee = ty::MethodCallee {
-        def_id: method_item.def_id(),
+        def_id: def_id,
         ty: fty,
         substs: trait_ref.substs
     };
@@ -334,31 +336,24 @@ pub fn resolve_ufcs<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                               method_name: ast::Name,
                               self_ty: ty::Ty<'tcx>,
                               expr_id: ast::NodeId)
-                              -> Result<(Def, LastPrivate), MethodError<'tcx>>
+                              -> Result<Def, MethodError<'tcx>>
 {
     let mode = probe::Mode::Path;
-    let pick = try!(probe::probe(fcx, span, mode, method_name, self_ty, expr_id));
-    let def_id = pick.item.def_id();
-    let mut lp = LastMod(AllPublic);
+    let pick = probe::probe(fcx, span, mode, method_name, self_ty, expr_id)?;
+    let def = pick.item.def();
+
     if let probe::InherentImplPick = pick.kind {
-        if pick.item.vis() != hir::Public {
-            lp = LastMod(DependsOn(def_id));
+        if !pick.item.vis().is_accessible_from(fcx.body_id, &fcx.tcx().map) {
+            let msg = format!("{} `{}` is private", def.kind_name(), &method_name.as_str());
+            fcx.tcx().sess.span_err(span, &msg);
         }
     }
-    let def_result = match pick.item {
-        ty::ImplOrTraitItem::MethodTraitItem(..) => Def::Method(def_id),
-        ty::ImplOrTraitItem::ConstTraitItem(..) => Def::AssociatedConst(def_id),
-        ty::ImplOrTraitItem::TypeTraitItem(..) => {
-            fcx.tcx().sess.span_bug(span, "resolve_ufcs: probe picked associated type");
-        }
-    };
-    Ok((def_result, lp))
+    Ok(def)
 }
 
-
 /// Find item with name `item_name` defined in `trait_def_id`
 /// and return it, or `None`, if no such item.
-fn trait_item<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn trait_item<'tcx>(tcx: &TyCtxt<'tcx>,
                     trait_def_id: DefId,
                     item_name: ast::Name)
                     -> Option<ty::ImplOrTraitItem<'tcx>>
@@ -369,7 +364,7 @@ fn trait_item<'tcx>(tcx: &ty::ctxt<'tcx>,
                .cloned()
 }
 
-fn impl_item<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn impl_item<'tcx>(tcx: &TyCtxt<'tcx>,
                    impl_def_id: DefId,
                    item_name: ast::Name)
                    -> Option<ty::ImplOrTraitItem<'tcx>>
index 3cf182a0d8ff6ba941c45e0acc440ba804c07b23..0ffbbfea84ef0c5c946e18ce335bf3b26212926e 100644 (file)
@@ -15,16 +15,16 @@ use super::suggest;
 
 use check;
 use check::{FnCtxt, UnresolvedTypeAction};
-use middle::def_id::DefId;
-use middle::subst;
-use middle::subst::Subst;
-use middle::traits;
-use middle::ty::{self, NoPreference, Ty, ToPolyTraitRef, TraitRef, TypeFoldable};
-use middle::infer;
-use middle::infer::{InferCtxt, TypeOrigin};
+use hir::def_id::DefId;
+use hir::def::Def;
+use rustc::ty::subst;
+use rustc::ty::subst::Subst;
+use rustc::traits;
+use rustc::ty::{self, NoPreference, Ty, TyCtxt, ToPolyTraitRef, TraitRef, TypeFoldable};
+use rustc::infer::{self, InferCtxt, InferOk, TypeOrigin};
 use syntax::ast;
 use syntax::codemap::{Span, DUMMY_SP};
-use rustc_front::hir;
+use rustc::hir;
 use std::collections::HashSet;
 use std::mem;
 use std::rc::Rc;
@@ -47,6 +47,9 @@ struct ProbeContext<'a, 'tcx:'a> {
     /// used for error reporting
     static_candidates: Vec<CandidateSource>,
 
+    /// Some(candidate) if there is a private candidate
+    private_candidate: Option<Def>,
+
     /// Collects near misses when trait bounds for type parameters are unsatisfied and is only used
     /// for error reporting
     unsatisfied_predicates: Vec<TraitRef<'tcx>>
@@ -186,7 +189,7 @@ pub fn probe<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                              steps,
                                              opt_simplified_steps);
         probe_cx.assemble_inherent_candidates();
-        try!(probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id));
+        probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?;
         probe_cx.pick()
     })
 }
@@ -200,7 +203,7 @@ fn create_steps<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     let (final_ty, dereferences, _) = check::autoderef(fcx,
                                                        span,
                                                        self_ty,
-                                                       None,
+                                                       || None,
                                                        UnresolvedTypeAction::Error,
                                                        NoPreference,
                                                        |t, d| {
@@ -247,6 +250,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
             steps: Rc::new(steps),
             opt_simplified_steps: opt_simplified_steps,
             static_candidates: Vec::new(),
+            private_candidate: None,
             unsatisfied_predicates: Vec::new(),
         }
     }
@@ -256,9 +260,10 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
         self.extension_candidates.clear();
         self.impl_dups.clear();
         self.static_candidates.clear();
+        self.private_candidate = None;
     }
 
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+    fn tcx(&self) -> &'a TyCtxt<'tcx> {
         self.fcx.tcx()
     }
 
@@ -407,6 +412,11 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
             return self.record_static_candidate(ImplSource(impl_def_id));
         }
 
+        if !item.vis().is_accessible_from(self.fcx.body_id, &self.tcx().map) {
+            self.private_candidate = Some(item.def());
+            return
+        }
+
         let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id);
         let impl_ty = impl_ty.subst(self.tcx(), &impl_substs);
 
@@ -505,11 +515,11 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
                 assert_eq!(m.generics.types.get_slice(subst::TypeSpace).len(),
                            trait_ref.substs.types.get_slice(subst::TypeSpace).len());
                 assert_eq!(m.generics.regions.get_slice(subst::TypeSpace).len(),
-                           trait_ref.substs.regions().get_slice(subst::TypeSpace).len());
+                           trait_ref.substs.regions.get_slice(subst::TypeSpace).len());
                 assert_eq!(m.generics.types.get_slice(subst::SelfSpace).len(),
                            trait_ref.substs.types.get_slice(subst::SelfSpace).len());
                 assert_eq!(m.generics.regions.get_slice(subst::SelfSpace).len(),
-                           trait_ref.substs.regions().get_slice(subst::SelfSpace).len());
+                           trait_ref.substs.regions.get_slice(subst::SelfSpace).len());
             }
 
             // Because this trait derives from a where-clause, it
@@ -568,7 +578,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
         if let Some(applicable_traits) = opt_applicable_traits {
             for &trait_did in applicable_traits {
                 if duplicates.insert(trait_did) {
-                    try!(self.assemble_extension_candidates_for_trait(trait_did));
+                    self.assemble_extension_candidates_for_trait(trait_did)?;
                 }
             }
         }
@@ -579,7 +589,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
         let mut duplicates = HashSet::new();
         for trait_info in suggest::all_traits(self.fcx.ccx) {
             if duplicates.insert(trait_info.def_id) {
-                try!(self.assemble_extension_candidates_for_trait(trait_info.def_id));
+                self.assemble_extension_candidates_for_trait(trait_info.def_id)?;
             }
         }
         Ok(())
@@ -612,7 +622,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
 
         self.assemble_extension_candidates_for_trait_impls(trait_def_id, item.clone());
 
-        try!(self.assemble_closure_candidates(trait_def_id, item.clone()));
+        self.assemble_closure_candidates(trait_def_id, item.clone())?;
 
         self.assemble_projection_candidates(trait_def_id, item.clone());
 
@@ -697,11 +707,11 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
         // Check if this is one of the Fn,FnMut,FnOnce traits.
         let tcx = self.tcx();
         let kind = if Some(trait_def_id) == tcx.lang_items.fn_trait() {
-            ty::FnClosureKind
+            ty::ClosureKind::Fn
         } else if Some(trait_def_id) == tcx.lang_items.fn_mut_trait() {
-            ty::FnMutClosureKind
+            ty::ClosureKind::FnMut
         } else if Some(trait_def_id) == tcx.lang_items.fn_once_trait() {
-            ty::FnOnceClosureKind
+            ty::ClosureKind::FnOnce
         } else {
             return Ok(());
         };
@@ -846,6 +856,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
         }
 
         let static_candidates = mem::replace(&mut self.static_candidates, vec![]);
+        let private_candidate = mem::replace(&mut self.private_candidate, None);
         let unsatisfied_predicates = mem::replace(&mut self.unsatisfied_predicates, vec![]);
 
         // things failed, so lets look at all traits, for diagnostic purposes now:
@@ -854,7 +865,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
         let span = self.span;
         let tcx = self.tcx();
 
-        try!(self.assemble_extension_candidates_for_all_traits());
+        self.assemble_extension_candidates_for_all_traits()?;
 
         let out_of_scope_traits = match self.pick_core() {
             Some(Ok(p)) => vec![p.item.container().id()],
@@ -865,8 +876,8 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
                         match tcx.trait_id_of_impl(impl_id) {
                             Some(id) => id,
                             None =>
-                                tcx.sess.span_bug(span,
-                                                  "found inherent method when looking at traits")
+                                span_bug!(span,
+                                          "found inherent method when looking at traits")
                         }
                     }
                 }
@@ -877,11 +888,15 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
             }
             Some(Err(MethodError::ClosureAmbiguity(..))) => {
                 // this error only occurs when assembling candidates
-                tcx.sess.span_bug(span, "encountered ClosureAmbiguity from pick_core");
+                span_bug!(span, "encountered ClosureAmbiguity from pick_core");
             }
-            None => vec![],
+            _ => vec![],
         };
 
+        if let Some(def) = private_candidate {
+            return Err(MethodError::PrivateMatch(def));
+        }
+
         Err(MethodError::NoMatch(NoMatchData::new(static_candidates, unsatisfied_predicates,
                                                   out_of_scope_traits, self.mode)))
     }
@@ -1135,6 +1150,8 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
 
     fn make_sub_ty(&self, sub: Ty<'tcx>, sup: Ty<'tcx>) -> infer::UnitResult<'tcx> {
         self.infcx().sub_types(false, TypeOrigin::Misc(DUMMY_SP), sub, sup)
+            // FIXME(#32730) propagate obligations
+            .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
     }
 
     fn has_applicable_self(&self, item: &ty::ImplOrTraitItem) -> bool {
@@ -1194,7 +1211,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
         // method yet. So create fresh variables here for those too,
         // if there are any.
         assert_eq!(substs.types.len(subst::FnSpace), 0);
-        assert_eq!(substs.regions().len(subst::FnSpace), 0);
+        assert_eq!(substs.regions.len(subst::FnSpace), 0);
 
         if self.mode == Mode::Path {
             return impl_ty;
@@ -1278,7 +1295,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
     }
 }
 
-fn impl_item<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn impl_item<'tcx>(tcx: &TyCtxt<'tcx>,
                    impl_def_id: DefId,
                    item_name: ast::Name)
                    -> Option<ty::ImplOrTraitItem<'tcx>>
@@ -1293,7 +1310,7 @@ fn impl_item<'tcx>(tcx: &ty::ctxt<'tcx>,
 
 /// Find item with name `item_name` defined in `trait_def_id`
 /// and return it, or `None`, if no such item.
-fn trait_item<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn trait_item<'tcx>(tcx: &TyCtxt<'tcx>,
                     trait_def_id: DefId,
                     item_name: ast::Name)
                     -> Option<ty::ImplOrTraitItem<'tcx>>
index e7d84efdaa2f7a50f1051ff08aaf1dd286699051..c5195cf8787dab57a333b62e5a6ace6c49c24820 100644 (file)
 use CrateCtxt;
 
 use astconv::AstConv;
-use check::{self, FnCtxt};
-use front::map as hir_map;
-use middle::ty::{self, Ty, ToPolyTraitRef, ToPredicate, TypeFoldable};
+use check::{self, FnCtxt, UnresolvedTypeAction, autoderef};
+use rustc::hir::map as hir_map;
+use rustc::ty::{self, Ty, ToPolyTraitRef, ToPredicate, TypeFoldable};
 use middle::cstore::{self, CrateStore};
-use middle::def::Def;
-use middle::def_id::DefId;
+use hir::def::Def;
+use hir::def_id::DefId;
 use middle::lang_items::FnOnceTraitLangItem;
-use middle::subst::Substs;
-use middle::traits::{Obligation, SelectionContext};
+use rustc::ty::subst::Substs;
+use rustc::ty::LvaluePreference;
+use rustc::traits::{Obligation, SelectionContext};
 use util::nodemap::{FnvHashSet};
 
+
 use syntax::ast;
 use syntax::codemap::Span;
 use syntax::errors::DiagnosticBuilder;
-use rustc_front::print::pprust;
-use rustc_front::hir;
+use rustc::hir::print as pprust;
+use rustc::hir;
+use rustc::hir::Expr_;
 
 use std::cell;
 use std::cmp::Ordering;
@@ -37,6 +40,55 @@ use std::cmp::Ordering;
 use super::{MethodError, NoMatchData, CandidateSource, impl_item, trait_item};
 use super::probe::Mode;
 
+fn is_fn_ty<'a, 'tcx>(ty: &Ty<'tcx>, fcx: &FnCtxt<'a, 'tcx>, span: Span) -> bool {
+    let cx = fcx.tcx();
+    match ty.sty {
+        // Not all of these (e.g. unsafe fns) implement FnOnce
+        // so we look for these beforehand
+        ty::TyClosure(..) | ty::TyFnDef(..) | ty::TyFnPtr(_) => true,
+        // If it's not a simple function, look for things which implement FnOnce
+        _ => {
+            if let Ok(fn_once_trait_did) =
+                    cx.lang_items.require(FnOnceTraitLangItem) {
+                let infcx = fcx.infcx();
+                let (_, _, opt_is_fn) = autoderef(fcx,
+                                                  span,
+                                                  ty,
+                                                  || None,
+                                                  UnresolvedTypeAction::Ignore,
+                                                  LvaluePreference::NoPreference,
+                                                  |ty, _| {
+                    infcx.probe(|_| {
+                        let fn_once_substs =
+                            Substs::new_trait(vec![infcx.next_ty_var()],
+                                              Vec::new(),
+                                              ty);
+                        let trait_ref =
+                          ty::TraitRef::new(fn_once_trait_did,
+                                            cx.mk_substs(fn_once_substs));
+                        let poly_trait_ref = trait_ref.to_poly_trait_ref();
+                        let obligation = Obligation::misc(span,
+                                                          fcx.body_id,
+                                                          poly_trait_ref
+                                                             .to_predicate());
+                        let mut selcx = SelectionContext::new(infcx);
+
+                        if selcx.evaluate_obligation(&obligation) {
+                            Some(())
+                        } else {
+                            None
+                        }
+                    })
+                });
+
+                opt_is_fn.is_some()
+            } else {
+                false
+            }
+        }
+    }
+}
+
 pub fn report_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                               span: Span,
                               rcvr_ty: Ty<'tcx>,
@@ -53,7 +105,7 @@ pub fn report_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         MethodError::NoMatch(NoMatchData { static_candidates: static_sources,
                                            unsatisfied_predicates,
                                            out_of_scope_traits,
-                                           mode }) => {
+                                           mode, .. }) => {
             let cx = fcx.tcx();
 
             let mut err = fcx.type_error_struct(
@@ -79,60 +131,41 @@ pub fn report_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                         // snippet
                     };
 
-                    macro_rules! span_stored_function {
-                        () => {
-                            err.span_note(span,
-                                          &format!("use `({0}.{1})(...)` if you meant to call \
-                                                    the function stored in the `{1}` field",
-                                                   expr_string, item_name));
-                        }
-                    }
+                    let field_ty = field.ty(cx, substs);
 
-                    macro_rules! span_did_you_mean {
-                        () => {
-                            err.span_note(span, &format!("did you mean to write `{0}.{1}`?",
-                                                         expr_string, item_name));
-                        }
+                    if is_fn_ty(&field_ty, &fcx, span) {
+                        err.span_note(span,
+                                      &format!("use `({0}.{1})(...)` if you meant to call \
+                                               the function stored in the `{1}` field",
+                                               expr_string, item_name));
+                    } else {
+                        err.span_note(span, &format!("did you mean to write `{0}.{1}`?",
+                                                     expr_string, item_name));
                     }
+                }
+            }
 
-                    // Determine if the field can be used as a function in some way
-                    let field_ty = field.ty(cx, substs);
+            if is_fn_ty(&rcvr_ty, &fcx, span) {
+                macro_rules! report_function {
+                    ($span:expr, $name:expr) => {
+                        err.fileline_note(
+                            $span,
+                            &format!("{} is a function, perhaps you wish to call it",
+                                     $name));
+                    }
+                }
 
-                    match field_ty.sty {
-                        // Not all of these (e.g. unsafe fns) implement FnOnce
-                        // so we look for these beforehand
-                        ty::TyClosure(..) | ty::TyBareFn(..) => {
-                            span_stored_function!();
-                        }
-                        // If it's not a simple function, look for things which implement FnOnce
-                        _ => {
-                            if let Ok(fn_once_trait_did) =
-                                    cx.lang_items.require(FnOnceTraitLangItem) {
-                                let infcx = fcx.infcx();
-                                infcx.probe(|_| {
-                                    let fn_once_substs =
-                                        Substs::new_trait(vec![infcx.next_ty_var()],
-                                                          Vec::new(),
-                                                          field_ty);
-                                    let trait_ref =
-                                      ty::TraitRef::new(fn_once_trait_did,
-                                                        cx.mk_substs(fn_once_substs));
-                                    let poly_trait_ref = trait_ref.to_poly_trait_ref();
-                                    let obligation = Obligation::misc(span,
-                                                                      fcx.body_id,
-                                                                      poly_trait_ref
-                                                                         .to_predicate());
-                                    let mut selcx = SelectionContext::new(infcx);
-
-                                    if selcx.evaluate_obligation(&obligation) {
-                                        span_stored_function!();
-                                    } else {
-                                        span_did_you_mean!();
-                                    }
-                                });
-                            } else {
-                                span_did_you_mean!();
-                            }
+                if let Some(expr) = rcvr_expr {
+                    if let Ok (expr_string) = cx.sess.codemap().span_to_snippet(expr.span) {
+                        report_function!(expr.span, expr_string);
+                        err.span_suggestion(expr.span,
+                                            "try calling the base function:",
+                                            format!("{}()",
+                                                    expr_string));
+                    }
+                    else if let Expr_::ExprPath(_, path) = expr.node.clone() {
+                        if let Some(segment) = path.segments.last() {
+                            report_function!(expr.span, segment.identifier.name);
                         }
                     }
                 }
@@ -141,7 +174,8 @@ pub fn report_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
             if !static_sources.is_empty() {
                 err.fileline_note(
                     span,
-                    "found defined static methods, maybe a `self` is missing?");
+                    "found the following associated functions; to be used as \
+                     methods, functions must have a `self` parameter");
 
                 report_candidates(fcx, &mut err, span, item_name, static_sources);
             }
@@ -188,6 +222,11 @@ pub fn report_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
             };
             fcx.sess().span_err(span, &msg);
         }
+
+        MethodError::PrivateMatch(def) => {
+            let msg = format!("{} `{}` is private", def.kind_name(), item_name);
+            fcx.tcx().sess.span_err(span, &msg);
+        }
     }
 
     fn report_candidates(fcx: &FnCtxt,
@@ -351,7 +390,7 @@ fn type_derefs_to_local<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         return is_local(fcx.resolve_type_vars_if_possible(rcvr_ty));
     }
 
-    check::autoderef(fcx, span, rcvr_ty, None,
+    check::autoderef(fcx, span, rcvr_ty, || None,
                      check::UnresolvedTypeAction::Ignore, ty::NoPreference,
                      |ty, _| {
         if is_local(ty) {
@@ -397,7 +436,7 @@ impl Ord for TraitInfo {
 /// Retrieve all traits in this crate and any dependent crates.
 pub fn all_traits<'a>(ccx: &'a CrateCtxt) -> AllTraits<'a> {
     if ccx.all_traits.borrow().is_none() {
-        use rustc_front::intravisit;
+        use rustc::hir::intravisit;
 
         let mut traits = vec![];
 
index 7b2e678499bb88547aa26c97c5720fdb57faf0bd..67b91f7838c6644616ab0de8f66c96f3b9bd8a5f 100644 (file)
@@ -86,23 +86,22 @@ use dep_graph::DepNode;
 use fmt_macros::{Parser, Piece, Position};
 use middle::astconv_util::prohibit_type_params;
 use middle::cstore::LOCAL_CRATE;
-use middle::def::{self, Def};
-use middle::def_id::DefId;
-use middle::infer;
-use middle::infer::{TypeOrigin, type_variable};
-use middle::pat_util::{self, pat_id_map};
-use middle::privacy::{AllPublic, LastMod};
-use middle::subst::{self, Subst, Substs, VecPerParamSpace, ParamSpace};
-use middle::traits::{self, report_fulfillment_errors};
-use middle::ty::{GenericPredicates, TypeScheme};
-use middle::ty::{Disr, ParamTy, ParameterEnvironment};
-use middle::ty::{LvaluePreference, NoPreference, PreferMutLvalue};
-use middle::ty::{self, ToPolyTraitRef, Ty};
-use middle::ty::{MethodCall, MethodCallee};
-use middle::ty::adjustment;
-use middle::ty::error::TypeError;
-use middle::ty::fold::{TypeFolder, TypeFoldable};
-use middle::ty::util::Representability;
+use hir::def::{self, Def};
+use hir::def_id::DefId;
+use rustc::infer::{self, InferOk, TypeOrigin, TypeTrace, type_variable};
+use hir::pat_util::{self, pat_id_map};
+use rustc::ty::subst::{self, Subst, Substs, VecPerParamSpace, ParamSpace};
+use rustc::traits::{self, report_fulfillment_errors, ProjectionMode};
+use rustc::ty::{GenericPredicates, TypeScheme};
+use rustc::ty::{ParamTy, ParameterEnvironment};
+use rustc::ty::{LvaluePreference, NoPreference, PreferMutLvalue};
+use rustc::ty::{self, ToPolyTraitRef, Ty, TyCtxt, Visibility};
+use rustc::ty::{MethodCall, MethodCallee};
+use rustc::ty::adjustment;
+use rustc::ty::error::TypeError;
+use rustc::ty::fold::{TypeFolder, TypeFoldable};
+use rustc::ty::relate::TypeRelation;
+use rustc::ty::util::{Representability, IntTypeExt};
 use require_c_abi_if_variadic;
 use rscope::{ElisionFailureInfo, RegionScope};
 use session::{Session, CompileResult};
@@ -125,11 +124,11 @@ use syntax::parse::token::{self, InternedString, special_idents};
 use syntax::ptr::P;
 use syntax::util::lev_distance::find_best_match_for_name;
 
-use rustc_front::intravisit::{self, Visitor};
-use rustc_front::hir;
-use rustc_front::hir::{Visibility, PatKind};
-use rustc_front::print::pprust;
+use rustc::hir::intravisit::{self, Visitor};
+use rustc::hir::{self, PatKind};
+use rustc::hir::print as pprust;
 use rustc_back::slice;
+use rustc_const_eval::eval_repeat_count;
 
 mod assoc;
 pub mod dropck;
@@ -301,13 +300,13 @@ pub struct FnCtxt<'a, 'tcx: 'a> {
 }
 
 impl<'a, 'tcx> Inherited<'a, 'tcx> {
-    fn new(tcx: &'a ty::ctxt<'tcx>,
+    fn new(tcx: &'a TyCtxt<'tcx>,
            tables: &'a RefCell<ty::Tables<'tcx>>,
            param_env: ty::ParameterEnvironment<'a, 'tcx>)
            -> Inherited<'a, 'tcx> {
 
         Inherited {
-            infcx: infer::new_infer_ctxt(tcx, tables, Some(param_env)),
+            infcx: infer::new_infer_ctxt(tcx, tables, Some(param_env), ProjectionMode::AnyFinal),
             fulfillment_cx: RefCell::new(traits::FulfillmentContext::new()),
             locals: RefCell::new(NodeMap()),
             tables: tables,
@@ -435,7 +434,7 @@ fn check_bare_fn<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                            param_env: ty::ParameterEnvironment<'a, 'tcx>)
 {
     match raw_fty.sty {
-        ty::TyBareFn(_, ref fn_ty) => {
+        ty::TyFnDef(_, _, ref fn_ty) => {
             let tables = RefCell::new(ty::Tables::empty());
             let inh = Inherited::new(ccx.tcx, &tables, param_env);
 
@@ -462,8 +461,7 @@ fn check_bare_fn<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
             regionck::regionck_fn(&fcx, fn_id, fn_span, decl, body);
             writeback::resolve_type_vars_in_fn(&fcx, decl, body);
         }
-        _ => ccx.tcx.sess.impossible_case(body.span,
-                                 "check_bare_fn: function type expected")
+        _ => span_bug!(body.span, "check_bare_fn: function type expected")
     }
 }
 
@@ -672,10 +670,12 @@ pub fn check_item_type<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) {
       hir::ItemFn(..) => {} // entirely within check_item_body
       hir::ItemImpl(_, _, _, _, _, ref impl_items) => {
           debug!("ItemImpl {} with id {}", it.name, it.id);
-          match ccx.tcx.impl_trait_ref(ccx.tcx.map.local_def_id(it.id)) {
+          let impl_def_id = ccx.tcx.map.local_def_id(it.id);
+          match ccx.tcx.impl_trait_ref(impl_def_id) {
               Some(impl_trait_ref) => {
                 check_impl_items_against_trait(ccx,
                                                it.span,
+                                               impl_def_id,
                                                &impl_trait_ref,
                                                impl_items);
               }
@@ -862,12 +862,71 @@ fn check_method_body<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
     check_bare_fn(ccx, &sig.decl, body, id, span, fty, param_env);
 }
 
+fn report_forbidden_specialization(tcx: &TyCtxt,
+                                   impl_item: &hir::ImplItem,
+                                   parent_impl: DefId)
+{
+    let mut err = struct_span_err!(
+        tcx.sess, impl_item.span, E0520,
+        "item `{}` is provided by an `impl` that specializes \
+         another, but the item in the parent `impl` is not \
+         marked `default` and so it cannot be specialized.",
+        impl_item.name);
+
+    match tcx.span_of_impl(parent_impl) {
+        Ok(span) => {
+            err.span_note(span, "parent implementation is here:");
+        }
+        Err(cname) => {
+            err.note(&format!("parent implementation is in crate `{}`", cname));
+        }
+    }
+
+    err.emit();
+}
+
+fn check_specialization_validity<'tcx>(tcx: &TyCtxt<'tcx>, trait_def: &ty::TraitDef<'tcx>,
+                                       impl_id: DefId, impl_item: &hir::ImplItem)
+{
+    let ancestors = trait_def.ancestors(impl_id);
+
+    let parent = match impl_item.node {
+        hir::ImplItemKind::Const(..) => {
+            ancestors.const_defs(tcx, impl_item.name).skip(1).next()
+                .map(|node_item| node_item.map(|parent| parent.defaultness))
+        }
+        hir::ImplItemKind::Method(..) => {
+            ancestors.fn_defs(tcx, impl_item.name).skip(1).next()
+                .map(|node_item| node_item.map(|parent| parent.defaultness))
+
+        }
+        hir::ImplItemKind::Type(_) => {
+            ancestors.type_defs(tcx, impl_item.name).skip(1).next()
+                .map(|node_item| node_item.map(|parent| parent.defaultness))
+        }
+    };
+
+    if let Some(parent) = parent {
+        if parent.item.is_final() {
+            report_forbidden_specialization(tcx, impl_item, parent.node.def_id());
+        }
+    }
+
+}
+
 fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                                             impl_span: Span,
+                                            impl_id: DefId,
                                             impl_trait_ref: &ty::TraitRef<'tcx>,
                                             impl_items: &[hir::ImplItem]) {
-    // Locate trait methods
+    // If the trait reference itself is erroneous (so the compilation is going
+    // to fail), skip checking the items here -- the `impl_item` table in `tcx`
+    // isn't populated for such impls.
+    if impl_trait_ref.references_error() { return; }
+
+    // Locate trait definition and items
     let tcx = ccx.tcx;
+    let trait_def = tcx.lookup_trait_def(impl_trait_ref.def_id);
     let trait_items = tcx.trait_items(impl_trait_ref.def_id);
     let mut overridden_associated_type = None;
 
@@ -878,12 +937,13 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
         let ty_trait_item = trait_items.iter()
             .find(|ac| ac.name() == ty_impl_item.name());
 
+        // Check that impl definition matches trait definition
         if let Some(ty_trait_item) = ty_trait_item {
             match impl_item.node {
                 hir::ImplItemKind::Const(..) => {
                     let impl_const = match ty_impl_item {
                         ty::ConstTraitItem(ref cti) => cti,
-                        _ => tcx.sess.span_bug(impl_item.span, "non-const impl-item for const")
+                        _ => span_bug!(impl_item.span, "non-const impl-item for const")
                     };
 
                     // Find associated const definition.
@@ -906,7 +966,7 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
 
                     let impl_method = match ty_impl_item {
                         ty::MethodTraitItem(ref mti) => mti,
-                        _ => tcx.sess.span_bug(impl_item.span, "non-method impl-item for method")
+                        _ => span_bug!(impl_item.span, "non-method impl-item for method")
                     };
 
                     if let &ty::MethodTraitItem(ref trait_method) = ty_trait_item {
@@ -927,7 +987,7 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                 hir::ImplItemKind::Type(_) => {
                     let impl_type = match ty_impl_item {
                         ty::TypeTraitItem(ref tti) => tti,
-                        _ => tcx.sess.span_bug(impl_item.span, "non-type impl-item for type")
+                        _ => span_bug!(impl_item.span, "non-type impl-item for type")
                     };
 
                     if let &ty::TypeTraitItem(ref at) = ty_trait_item {
@@ -944,6 +1004,8 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                 }
             }
         }
+
+        check_specialization_validity(tcx, trait_def, impl_id, impl_item);
     }
 
     // Check for missing items from trait
@@ -952,9 +1014,13 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
     let mut invalidated_items = Vec::new();
     let associated_type_overridden = overridden_associated_type.is_some();
     for trait_item in trait_items.iter() {
+        let is_implemented;
+        let is_provided;
+
         match *trait_item {
             ty::ConstTraitItem(ref associated_const) => {
-                let is_implemented = impl_items.iter().any(|ii| {
+                is_provided = associated_const.has_value;
+                is_implemented = impl_items.iter().any(|ii| {
                     match ii.node {
                         hir::ImplItemKind::Const(..) => {
                             ii.name == associated_const.name
@@ -962,53 +1028,30 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                         _ => false,
                     }
                 });
-                let is_provided = associated_const.has_value;
-
-                if !is_implemented {
-                    if !is_provided {
-                        missing_items.push(associated_const.name);
-                    } else if associated_type_overridden {
-                        invalidated_items.push(associated_const.name);
-                    }
-                }
             }
             ty::MethodTraitItem(ref trait_method) => {
-                let is_implemented =
-                    impl_items.iter().any(|ii| {
-                        match ii.node {
-                            hir::ImplItemKind::Method(..) => {
-                                ii.name == trait_method.name
-                            }
-                            _ => false,
-                        }
-                    });
-                let is_provided =
-                    provided_methods.iter().any(|m| m.name == trait_method.name);
-                if !is_implemented {
-                    if !is_provided {
-                        missing_items.push(trait_method.name);
-                    } else if associated_type_overridden {
-                        invalidated_items.push(trait_method.name);
-                    }
-                }
+                is_provided = provided_methods.iter().any(|m| m.name == trait_method.name);
+                is_implemented = trait_def.ancestors(impl_id)
+                    .fn_defs(tcx, trait_method.name)
+                    .next()
+                    .map(|node_item| !node_item.node.is_from_trait())
+                    .unwrap_or(false);
             }
-            ty::TypeTraitItem(ref associated_type) => {
-                let is_implemented = impl_items.iter().any(|ii| {
-                    match ii.node {
-                        hir::ImplItemKind::Type(_) => {
-                            ii.name == associated_type.name
-                        }
-                        _ => false,
-                    }
-                });
-                let is_provided = associated_type.ty.is_some();
-                if !is_implemented {
-                    if !is_provided {
-                        missing_items.push(associated_type.name);
-                    } else if associated_type_overridden {
-                        invalidated_items.push(associated_type.name);
-                    }
-                }
+            ty::TypeTraitItem(ref trait_assoc_ty) => {
+                is_provided = trait_assoc_ty.ty.is_some();
+                is_implemented = trait_def.ancestors(impl_id)
+                    .type_defs(tcx, trait_assoc_ty.name)
+                    .next()
+                    .map(|node_item| !node_item.node.is_from_trait())
+                    .unwrap_or(false);
+            }
+        }
+
+        if !is_implemented {
+            if !is_provided {
+                missing_items.push(trait_item.name());
+            } else if associated_type_overridden {
+                invalidated_items.push(trait_item.name());
             }
         }
     }
@@ -1092,7 +1135,7 @@ fn report_cast_to_unsized_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 
 
 impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> { self.ccx.tcx }
+    fn tcx(&self) -> &TyCtxt<'tcx> { self.ccx.tcx }
 
     fn get_item_type_scheme(&self, _: Span, id: DefId)
                             -> Result<ty::TypeScheme<'tcx>, ErrorReported>
@@ -1200,7 +1243,7 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
 }
 
 impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> { self.ccx.tcx }
+    fn tcx(&self) -> &TyCtxt<'tcx> { self.ccx.tcx }
 
     pub fn infcx(&self) -> &infer::InferCtxt<'a,'tcx> {
         &self.inh.infcx
@@ -1521,8 +1564,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         match self.inh.tables.borrow().node_types.get(&ex.id) {
             Some(&t) => t,
             None => {
-                self.tcx().sess.bug(&format!("no type for expr in fcx {}",
-                                            self.tag()));
+                bug!("no type for expr in fcx {}", self.tag());
             }
         }
     }
@@ -1547,10 +1589,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             Some(&t) => t,
             None if self.err_count_since_creation() != 0 => self.tcx().types.err,
             None => {
-                self.tcx().sess.bug(
-                    &format!("no type for node {}: {} in fcx {}",
-                            id, self.tcx().map.node_to_string(id),
-                            self.tag()));
+                bug!("no type for node {}: {} in fcx {}",
+                     id, self.tcx().map.node_to_string(id),
+                     self.tag());
             }
         }
     }
@@ -1584,6 +1625,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                     sup: Ty<'tcx>)
                     -> Result<(), TypeError<'tcx>> {
         infer::mk_subty(self.infcx(), a_is_expected, origin, sub, sup)
+            // FIXME(#32730) propagate obligations
+            .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
     }
 
     pub fn mk_eqty(&self,
@@ -1593,6 +1636,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                    sup: Ty<'tcx>)
                    -> Result<(), TypeError<'tcx>> {
         infer::mk_eqty(self.infcx(), a_is_expected, origin, sub, sup)
+            // FIXME(#32730) propagate obligations
+            .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
     }
 
     pub fn mk_subr(&self,
@@ -1623,14 +1668,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         self.infcx().type_error_struct(sp, mk_msg, actual_ty, err)
     }
 
-    pub fn report_mismatched_types(&self,
-                                   sp: Span,
-                                   e: Ty<'tcx>,
-                                   a: Ty<'tcx>,
-                                   err: &TypeError<'tcx>) {
-        self.infcx().report_mismatched_types(sp, e, a, err)
-    }
-
     /// Registers an obligation for checking later, during regionck, that the type `ty` must
     /// outlive the region `r`.
     pub fn register_region_obligation(&self,
@@ -1710,6 +1747,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     }
 
     // FIXME(arielb1): use this instead of field.ty everywhere
+    // Only for fields! Returns <none> for methods>
+    // Indifferent to privacy flags
     pub fn field_ty(&self,
                     span: Span,
                     field: ty::FieldDef<'tcx>,
@@ -1720,8 +1759,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                                            &field.ty(self.tcx(), substs))
     }
 
-    // Only for fields! Returns <none> for methods>
-    // Indifferent to privacy flags
     fn check_casts(&self) {
         let mut deferred_cast_checks = self.inh.deferred_cast_checks.borrow_mut();
         for cast in deferred_cast_checks.drain(..) {
@@ -1732,8 +1769,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     /// Apply "fallbacks" to some types
     /// ! gets replaced with (), unconstrained ints with i32, and unconstrained floats with f64.
     fn default_type_parameters(&self) {
-        use middle::ty::error::UnconstrainedNumeric::Neither;
-        use middle::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
+        use rustc::ty::error::UnconstrainedNumeric::Neither;
+        use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
         for ty in &self.infcx().unsolved_variables() {
             let resolved = self.infcx().resolve_type_vars_if_possible(ty);
             if self.infcx().type_var_diverges(resolved) {
@@ -1768,8 +1805,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     }
 
     fn new_select_all_obligations_and_apply_defaults(&self) {
-        use middle::ty::error::UnconstrainedNumeric::Neither;
-        use middle::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
+        use rustc::ty::error::UnconstrainedNumeric::Neither;
+        use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
 
         // For the time being this errs on the side of being memory wasteful but provides better
         // error reporting.
@@ -1879,7 +1916,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                                     match infer::mk_eqty(self.infcx(), false,
                                                          TypeOrigin::Misc(default.origin_span),
                                                          ty, default.ty) {
-                                        Ok(()) => {}
+                                        Ok(InferOk { obligations, .. }) => {
+                                            // FIXME(#32730) propagate obligations
+                                            assert!(obligations.is_empty())
+                                        },
                                         Err(_) => {
                                             conflicts.push((*ty, default));
                                         }
@@ -1941,8 +1981,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                                 default_map: &FnvHashMap<&Ty<'tcx>, type_variable::Default<'tcx>>,
                                 conflict: Ty<'tcx>)
                                 -> Option<type_variable::Default<'tcx>> {
-        use middle::ty::error::UnconstrainedNumeric::Neither;
-        use middle::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
+        use rustc::ty::error::UnconstrainedNumeric::Neither;
+        use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
 
         // Ensure that we apply the conflicting default first
         let mut unbound_tyvars = Vec::with_capacity(unbound_vars.len() + 1);
@@ -1972,7 +2012,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                             match infer::mk_eqty(self.infcx(), false,
                                                  TypeOrigin::Misc(default.origin_span),
                                                  ty, default.ty) {
-                                Ok(()) => {}
+                                // FIXME(#32730) propagate obligations
+                                Ok(InferOk { obligations, .. }) => assert!(obligations.is_empty()),
                                 Err(_) => {
                                     result = Some(default);
                                 }
@@ -2055,20 +2096,21 @@ pub enum UnresolvedTypeAction {
 ///
 /// Note: this method does not modify the adjustments table. The caller is responsible for
 /// inserting an AutoAdjustment record into the `fcx` using one of the suitable methods.
-pub fn autoderef<'a, 'tcx, T, F>(fcx: &FnCtxt<'a, 'tcx>,
-                                 sp: Span,
-                                 base_ty: Ty<'tcx>,
-                                 opt_expr: Option<&hir::Expr>,
-                                 unresolved_type_action: UnresolvedTypeAction,
-                                 mut lvalue_pref: LvaluePreference,
-                                 mut should_stop: F)
-                                 -> (Ty<'tcx>, usize, Option<T>)
-    where F: FnMut(Ty<'tcx>, usize) -> Option<T>,
+pub fn autoderef<'a, 'b, 'tcx, E, I, T, F>(fcx: &FnCtxt<'a, 'tcx>,
+                                           sp: Span,
+                                           base_ty: Ty<'tcx>,
+                                           maybe_exprs: E,
+                                           unresolved_type_action: UnresolvedTypeAction,
+                                           mut lvalue_pref: LvaluePreference,
+                                           mut should_stop: F)
+                                           -> (Ty<'tcx>, usize, Option<T>)
+    // FIXME(eddyb) use copyable iterators when that becomes ergonomic.
+    where E: Fn() -> I,
+          I: IntoIterator<Item=&'b hir::Expr>,
+          F: FnMut(Ty<'tcx>, usize) -> Option<T>,
 {
-    debug!("autoderef(base_ty={:?}, opt_expr={:?}, lvalue_pref={:?})",
-           base_ty,
-           opt_expr,
-           lvalue_pref);
+    debug!("autoderef(base_ty={:?}, lvalue_pref={:?})",
+           base_ty, lvalue_pref);
 
     let mut t = base_ty;
     for autoderefs in 0..fcx.tcx().sess.recursion_limit.get() {
@@ -2081,7 +2123,7 @@ pub fn autoderef<'a, 'tcx, T, F>(fcx: &FnCtxt<'a, 'tcx>,
                 // (i.e. it is an inference variable) because `Ty::builtin_deref`
                 // and `try_overloaded_deref` both simply return `None`
                 // in such a case without producing spurious errors.
-                fcx.resolve_type_vars_if_possible(t)
+                fcx.infcx().resolve_type_vars_if_possible(&t)
             }
         };
         if resolved_t.references_error() {
@@ -2094,34 +2136,34 @@ pub fn autoderef<'a, 'tcx, T, F>(fcx: &FnCtxt<'a, 'tcx>,
         }
 
         // Otherwise, deref if type is derefable:
-        let mt = match resolved_t.builtin_deref(false, lvalue_pref) {
-            Some(mt) => Some(mt),
-            None => {
-                let method_call =
-                    opt_expr.map(|expr| MethodCall::autoderef(expr.id, autoderefs as u32));
-
-                // Super subtle: it might seem as though we should
-                // pass `opt_expr` to `try_overloaded_deref`, so that
-                // the (implicit) autoref of using an overloaded deref
-                // would get added to the adjustment table. However we
-                // do not do that, because it's kind of a
-                // "meta-adjustment" -- instead, we just leave it
-                // unrecorded and know that there "will be" an
-                // autoref. regionck and other bits of the code base,
-                // when they encounter an overloaded autoderef, have
-                // to do some reconstructive surgery. This is a pretty
-                // complex mess that is begging for a proper MIR.
-                try_overloaded_deref(fcx, sp, method_call, None, resolved_t, lvalue_pref)
+
+        // Super subtle: it might seem as though we should
+        // pass `opt_expr` to `try_overloaded_deref`, so that
+        // the (implicit) autoref of using an overloaded deref
+        // would get added to the adjustment table. However we
+        // do not do that, because it's kind of a
+        // "meta-adjustment" -- instead, we just leave it
+        // unrecorded and know that there "will be" an
+        // autoref. regionck and other bits of the code base,
+        // when they encounter an overloaded autoderef, have
+        // to do some reconstructive surgery. This is a pretty
+        // complex mess that is begging for a proper MIR.
+        let mt = if let Some(mt) = resolved_t.builtin_deref(false, lvalue_pref) {
+            mt
+        } else if let Some(method) = try_overloaded_deref(fcx, sp, None,
+                                                          resolved_t, lvalue_pref) {
+            for expr in maybe_exprs() {
+                let method_call = MethodCall::autoderef(expr.id, autoderefs as u32);
+                fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
             }
+            make_overloaded_lvalue_return_type(fcx.tcx(), method)
+        } else {
+            return (resolved_t, autoderefs, None);
         };
-        match mt {
-            Some(mt) => {
-                t = mt.ty;
-                if mt.mutbl == hir::MutImmutable {
-                    lvalue_pref = NoPreference;
-                }
-            }
-            None => return (resolved_t, autoderefs, None)
+
+        t = mt.ty;
+        if mt.mutbl == hir::MutImmutable {
+            lvalue_pref = NoPreference;
         }
     }
 
@@ -2134,11 +2176,10 @@ pub fn autoderef<'a, 'tcx, T, F>(fcx: &FnCtxt<'a, 'tcx>,
 
 fn try_overloaded_deref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                   span: Span,
-                                  method_call: Option<MethodCall>,
                                   base_expr: Option<&hir::Expr>,
                                   base_ty: Ty<'tcx>,
                                   lvalue_pref: LvaluePreference)
-                                  -> Option<ty::TypeAndMut<'tcx>>
+                                  -> Option<MethodCallee<'tcx>>
 {
     // Try DerefMut first, if preferred.
     let method = match (lvalue_pref, fcx.tcx().lang_items.deref_mut_trait()) {
@@ -2160,33 +2201,23 @@ fn try_overloaded_deref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         (method, _) => method
     };
 
-    make_overloaded_lvalue_return_type(fcx, method_call, method)
+    method
 }
 
 /// For the overloaded lvalue expressions (`*x`, `x[3]`), the trait returns a type of `&T`, but the
 /// actual type we assign to the *expression* is `T`. So this function just peels off the return
-/// type by one layer to yield `T`. It also inserts the `method-callee` into the method map.
-fn make_overloaded_lvalue_return_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
-                                                method_call: Option<MethodCall>,
-                                                method: Option<MethodCallee<'tcx>>)
-                                                -> Option<ty::TypeAndMut<'tcx>>
+/// type by one layer to yield `T`.
+fn make_overloaded_lvalue_return_type<'tcx>(tcx: &TyCtxt<'tcx>,
+                                            method: MethodCallee<'tcx>)
+                                            -> ty::TypeAndMut<'tcx>
 {
-    match method {
-        Some(method) => {
-            // extract method return type, which will be &T;
-            // all LB regions should have been instantiated during method lookup
-            let ret_ty = method.ty.fn_ret();
-            let ret_ty = fcx.tcx().no_late_bound_regions(&ret_ty).unwrap().unwrap();
-
-            if let Some(method_call) = method_call {
-                fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
-            }
+    // extract method return type, which will be &T;
+    // all LB regions should have been instantiated during method lookup
+    let ret_ty = method.ty.fn_ret();
+    let ret_ty = tcx.no_late_bound_regions(&ret_ty).unwrap().unwrap();
 
-            // method returns &T, but the type as visible to user is T, so deref
-            ret_ty.builtin_deref(true, NoPreference)
-        }
-        None => None,
-    }
+    // method returns &T, but the type as visible to user is T, so deref
+    ret_ty.builtin_deref(true, NoPreference).unwrap()
 }
 
 fn lookup_indexing<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
@@ -2204,7 +2235,7 @@ fn lookup_indexing<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     let (ty, autoderefs, final_mt) = autoderef(fcx,
                                                base_expr.span,
                                                base_ty,
-                                               Some(base_expr),
+                                               || Some(base_expr),
                                                UnresolvedTypeAction::Error,
                                                lvalue_pref,
                                                |adj_ty, idx| {
@@ -2301,10 +2332,10 @@ fn try_index_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     // If some lookup succeeds, write callee into table and extract index/element
     // type from the method signature.
     // If some lookup succeeded, install method in table
-    method.and_then(|method| {
+    method.map(|method| {
         debug!("try_index_step: success, using overloaded indexing");
-        make_overloaded_lvalue_return_type(fcx, Some(method_call), Some(method)).
-            map(|ret| (input_ty, ret.ty))
+        fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
+        (input_ty, make_overloaded_lvalue_return_type(fcx.tcx(), method).ty)
     })
 }
 
@@ -2334,7 +2365,7 @@ fn check_method_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         ty::FnConverging(fcx.tcx().types.err)
     } else {
         match method_fn_ty.sty {
-            ty::TyBareFn(_, ref fty) => {
+            ty::TyFnDef(_, _, ref fty) => {
                 // HACK(eddyb) ignore self in the definition (see above).
                 let expected_arg_tys = expected_types_for_fn_args(fcx,
                                                                   sp,
@@ -2351,8 +2382,7 @@ fn check_method_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                 fty.sig.0.output
             }
             _ => {
-                fcx.tcx().sess.span_bug(callee_expr.span,
-                                        "method without bare fn type");
+                span_bug!(callee_expr.span, "method without bare fn type");
             }
         }
     }
@@ -2503,20 +2533,17 @@ fn check_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                     Expectation::rvalue_hint(fcx.tcx(), ty)
                 });
 
-                check_expr_with_unifier(fcx,
-                                        &arg,
-                                        expected.unwrap_or(ExpectHasType(formal_ty)),
-                                        NoPreference, || {
-                    // 2. Coerce to the most detailed type that could be coerced
-                    //    to, which is `expected_ty` if `rvalue_hint` returns an
-                    //    `ExprHasType(expected_ty)`, or the `formal_ty` otherwise.
-                    let coerce_ty = expected.and_then(|e| e.only_has_type(fcx));
-                    demand::coerce(fcx, arg.span, coerce_ty.unwrap_or(formal_ty), &arg);
-
-                    // 3. Relate the expected type and the formal one,
-                    //    if the expected type was used for the coercion.
-                    coerce_ty.map(|ty| demand::suptype(fcx, arg.span, formal_ty, ty));
-                });
+                check_expr_with_expectation(fcx, &arg,
+                    expected.unwrap_or(ExpectHasType(formal_ty)));
+                // 2. Coerce to the most detailed type that could be coerced
+                //    to, which is `expected_ty` if `rvalue_hint` returns an
+                //    `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
+                let coerce_ty = expected.and_then(|e| e.only_has_type(fcx));
+                demand::coerce(fcx, arg.span, coerce_ty.unwrap_or(formal_ty), &arg);
+
+                // 3. Relate the expected type and the formal one,
+                //    if the expected type was used for the coercion.
+                coerce_ty.map(|ty| demand::suptype(fcx, arg.span, formal_ty, ty));
             }
 
             if let Some(&arg_ty) = fcx.inh.tables.borrow().node_types.get(&arg.id) {
@@ -2551,24 +2578,33 @@ fn check_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                 ty::TyFloat(ast::FloatTy::F32) => {
                     fcx.type_error_message(arg.span,
                                            |t| {
-                        format!("can't pass an {} to variadic \
-                                 function, cast to c_double", t)
+                        format!("can't pass an `{}` to variadic \
+                                 function, cast to `c_double`", t)
                     }, arg_ty, None);
                 }
                 ty::TyInt(ast::IntTy::I8) | ty::TyInt(ast::IntTy::I16) | ty::TyBool => {
                     fcx.type_error_message(arg.span, |t| {
-                        format!("can't pass {} to variadic \
-                                 function, cast to c_int",
+                        format!("can't pass `{}` to variadic \
+                                 function, cast to `c_int`",
                                        t)
                     }, arg_ty, None);
                 }
                 ty::TyUint(ast::UintTy::U8) | ty::TyUint(ast::UintTy::U16) => {
                     fcx.type_error_message(arg.span, |t| {
-                        format!("can't pass {} to variadic \
-                                 function, cast to c_uint",
+                        format!("can't pass `{}` to variadic \
+                                 function, cast to `c_uint`",
                                        t)
                     }, arg_ty, None);
                 }
+                ty::TyFnDef(_, _, f) => {
+                    let ptr_ty = fcx.tcx().mk_ty(ty::TyFnPtr(f));
+                    let ptr_ty = fcx.infcx().resolve_type_vars_if_possible(&ptr_ty);
+                    fcx.type_error_message(arg.span,
+                                           |t| {
+                        format!("can't pass `{}` to variadic \
+                                 function, cast to `{}`", t, ptr_ty)
+                    }, arg_ty, None);
+                }
                 _ => {}
             }
         }
@@ -2576,7 +2612,7 @@ fn check_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 }
 
 // FIXME(#17596) Ty<'tcx> is incorrectly invariant w.r.t 'tcx.
-fn err_args<'tcx>(tcx: &ty::ctxt<'tcx>, len: usize) -> Vec<Ty<'tcx>> {
+fn err_args<'tcx>(tcx: &TyCtxt<'tcx>, len: usize) -> Vec<Ty<'tcx>> {
     (0..len).map(|_| tcx.types.err).collect()
 }
 
@@ -2613,7 +2649,7 @@ fn check_lit<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                     ty::TyInt(_) | ty::TyUint(_) => Some(ty),
                     ty::TyChar => Some(tcx.types.u8),
                     ty::TyRawPtr(..) => Some(tcx.types.usize),
-                    ty::TyBareFn(..) => Some(tcx.types.usize),
+                    ty::TyFnDef(..) | ty::TyFnPtr(_) => Some(tcx.types.usize),
                     _ => None
                 }
             });
@@ -2638,57 +2674,42 @@ fn check_lit<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 fn check_expr_eq_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                 expr: &'tcx hir::Expr,
                                 expected: Ty<'tcx>) {
-    check_expr_with_unifier(
-        fcx, expr, ExpectHasType(expected), NoPreference,
-        || demand::eqtype(fcx, expr.span, expected, fcx.expr_ty(expr)));
+    check_expr_with_hint(fcx, expr, expected);
+    demand::eqtype(fcx, expr.span, expected, fcx.expr_ty(expr));
 }
 
 pub fn check_expr_has_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                      expr: &'tcx hir::Expr,
                                      expected: Ty<'tcx>) {
-    check_expr_with_unifier(
-        fcx, expr, ExpectHasType(expected), NoPreference,
-        || demand::suptype(fcx, expr.span, expected, fcx.expr_ty(expr)));
+    check_expr_with_hint(fcx, expr, expected);
+    demand::suptype(fcx, expr.span, expected, fcx.expr_ty(expr));
 }
 
 fn check_expr_coercable_to_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                           expr: &'tcx hir::Expr,
                                           expected: Ty<'tcx>) {
-    check_expr_with_unifier(
-        fcx, expr, ExpectHasType(expected), NoPreference,
-        || demand::coerce(fcx, expr.span, expected, expr));
+    check_expr_with_hint(fcx, expr, expected);
+    demand::coerce(fcx, expr.span, expected, expr);
 }
 
 fn check_expr_with_hint<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, expr: &'tcx hir::Expr,
                                   expected: Ty<'tcx>) {
-    check_expr_with_unifier(
-        fcx, expr, ExpectHasType(expected), NoPreference,
-        || ())
+    check_expr_with_expectation(fcx, expr, ExpectHasType(expected))
 }
 
 fn check_expr_with_expectation<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                          expr: &'tcx hir::Expr,
                                          expected: Expectation<'tcx>) {
-    check_expr_with_unifier(
-        fcx, expr, expected, NoPreference,
-        || ())
-}
-
-fn check_expr_with_expectation_and_lvalue_pref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
-                                                         expr: &'tcx hir::Expr,
-                                                         expected: Expectation<'tcx>,
-                                                         lvalue_pref: LvaluePreference)
-{
-    check_expr_with_unifier(fcx, expr, expected, lvalue_pref, || ())
+    check_expr_with_expectation_and_lvalue_pref(fcx, expr, expected, NoPreference)
 }
 
 fn check_expr<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, expr: &'tcx hir::Expr)  {
-    check_expr_with_unifier(fcx, expr, NoExpectation, NoPreference, || ())
+    check_expr_with_expectation(fcx, expr, NoExpectation)
 }
 
 fn check_expr_with_lvalue_pref<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, expr: &'tcx hir::Expr,
                                         lvalue_pref: LvaluePreference)  {
-    check_expr_with_unifier(fcx, expr, NoExpectation, lvalue_pref, || ())
+    check_expr_with_expectation_and_lvalue_pref(fcx, expr, NoExpectation, lvalue_pref)
 }
 
 // determine the `self` type, using fresh variables for all variables
@@ -2760,8 +2781,10 @@ fn expected_types_for_fn_args<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                 let ures = fcx.infcx().sub_types(false, origin, formal_ret_ty, ret_ty);
                 // FIXME(#15760) can't use try! here, FromError doesn't default
                 // to identity so the resulting type is not constrained.
-                if let Err(e) = ures {
-                    return Err(e);
+                match ures {
+                    // FIXME(#32730) propagate obligations
+                    Ok(InferOk { obligations, .. }) => assert!(obligations.is_empty()),
+                    Err(e) => return Err(e),
                 }
 
                 // Record all the argument types, with the substitutions
@@ -2790,13 +2813,10 @@ fn expected_types_for_fn_args<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 /// Note that inspecting a type's structure *directly* may expose the fact
 /// that there are actually multiple representations for `TyError`, so avoid
 /// that when err needs to be handled differently.
-fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
-                                        expr: &'tcx hir::Expr,
-                                        expected: Expectation<'tcx>,
-                                        lvalue_pref: LvaluePreference,
-                                        unifier: F) where
-    F: FnOnce(),
-{
+fn check_expr_with_expectation_and_lvalue_pref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
+                                                         expr: &'tcx hir::Expr,
+                                                         expected: Expectation<'tcx>,
+                                                         lvalue_pref: LvaluePreference) {
     debug!(">> typechecking: expr={:?} expected={:?}",
            expr, expected);
 
@@ -2867,30 +2887,62 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
         check_block_with_expected(fcx, then_blk, expected);
         let then_ty = fcx.node_ty(then_blk.id);
 
-        let branches_ty = match opt_else_expr {
-            Some(ref else_expr) => {
-                check_expr_with_expectation(fcx, &else_expr, expected);
-                let else_ty = fcx.expr_ty(&else_expr);
-                infer::common_supertype(fcx.infcx(),
-                                        TypeOrigin::IfExpression(sp),
-                                        true,
-                                        then_ty,
-                                        else_ty)
-            }
-            None => {
-                infer::common_supertype(fcx.infcx(),
-                                        TypeOrigin::IfExpressionWithNoElse(sp),
-                                        false,
-                                        then_ty,
-                                        fcx.tcx().mk_nil())
-            }
-        };
+        let unit = fcx.tcx().mk_nil();
+        let (origin, expected, found, result) =
+        if let Some(else_expr) = opt_else_expr {
+            check_expr_with_expectation(fcx, else_expr, expected);
+            let else_ty = fcx.expr_ty(else_expr);
+            let origin = TypeOrigin::IfExpression(sp);
+
+            // Only try to coerce-unify if we have a then expression
+            // to assign coercions to, otherwise it's () or diverging.
+            let result = if let Some(ref then) = then_blk.expr {
+                let res = coercion::try_find_lub(fcx, origin, || Some(&**then),
+                                                 then_ty, else_expr);
+
+                // In case we did perform an adjustment, we have to update
+                // the type of the block, because old trans still uses it.
+                let adj = fcx.inh.tables.borrow().adjustments.get(&then.id).cloned();
+                if res.is_ok() && adj.is_some() {
+                    fcx.write_ty(then_blk.id, fcx.adjust_expr_ty(then, adj.as_ref()));
+                }
 
-        let cond_ty = fcx.expr_ty(cond_expr);
-        let if_ty = if cond_ty.references_error() {
-            fcx.tcx().types.err
+                res
+            } else {
+                fcx.infcx().commit_if_ok(|_| {
+                    let trace = TypeTrace::types(origin, true, then_ty, else_ty);
+                    fcx.infcx().lub(true, trace, &then_ty, &else_ty)
+                        .map(|InferOk { value, obligations }| {
+                            // FIXME(#32730) propagate obligations
+                            assert!(obligations.is_empty());
+                            value
+                        })
+                })
+            };
+            (origin, then_ty, else_ty, result)
         } else {
-            branches_ty
+            let origin = TypeOrigin::IfExpressionWithNoElse(sp);
+            (origin, unit, then_ty,
+             fcx.infcx().eq_types(true, origin, unit, then_ty)
+                 .map(|InferOk { obligations, .. }| {
+                     // FIXME(#32730) propagate obligations
+                     assert!(obligations.is_empty());
+                     unit
+                 }))
+        };
+
+        let if_ty = match result {
+            Ok(ty) => {
+                if fcx.expr_ty(cond_expr).references_error() {
+                    fcx.tcx().types.err
+                } else {
+                    ty
+                }
+            }
+            Err(e) => {
+                fcx.infcx().report_mismatched_types(origin, expected, found, e);
+                fcx.tcx().types.err
+            }
         };
 
         fcx.write_ty(id, if_ty);
@@ -2903,25 +2955,26 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
                             base: &'tcx hir::Expr,
                             field: &Spanned<ast::Name>) {
         check_expr_with_lvalue_pref(fcx, base, lvalue_pref);
-        let expr_t = structurally_resolved_type(fcx, expr.span,
-                                                fcx.expr_ty(base));
-        // FIXME(eddyb) #12808 Integrate privacy into this auto-deref loop.
+        let expr_t = structurally_resolved_type(fcx, expr.span, fcx.expr_ty(base));
+        let mut private_candidate = None;
         let (_, autoderefs, field_ty) = autoderef(fcx,
                                                   expr.span,
                                                   expr_t,
-                                                  Some(base),
+                                                  || Some(base),
                                                   UnresolvedTypeAction::Error,
                                                   lvalue_pref,
                                                   |base_t, _| {
-                match base_t.sty {
-                    ty::TyStruct(base_def, substs) => {
-                        debug!("struct named {:?}",  base_t);
-                        base_def.struct_variant()
-                                .find_field_named(field.node)
-                                .map(|f| fcx.field_ty(expr.span, f, substs))
+                if let ty::TyStruct(base_def, substs) = base_t.sty {
+                    debug!("struct named {:?}",  base_t);
+                    if let Some(field) = base_def.struct_variant().find_field_named(field.node) {
+                        let field_ty = fcx.field_ty(expr.span, field, substs);
+                        if field.vis.is_accessible_from(fcx.body_id, &fcx.tcx().map) {
+                            return Some(field_ty);
+                        }
+                        private_candidate = Some((base_def.did, field_ty));
                     }
-                    _ => None
                 }
+                None
             });
         match field_ty {
             Some(field_ty) => {
@@ -2932,12 +2985,14 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
             None => {}
         }
 
-        if field.node == special_idents::invalid.name {
+        if let Some((did, field_ty)) = private_candidate {
+            let struct_path = fcx.tcx().item_path_str(did);
+            let msg = format!("field `{}` of struct `{}` is private", field.node, struct_path);
+            fcx.tcx().sess.span_err(expr.span, &msg);
+            fcx.write_ty(expr.id, field_ty);
+        } else if field.node == special_idents::invalid.name {
             fcx.write_error(expr.id);
-            return;
-        }
-
-        if method::exists(fcx, field.span, field.node, expr_t, expr.id) {
+        } else if method::exists(fcx, field.span, field.node, expr_t, expr.id) {
             fcx.type_error_struct(field.span,
                                   |actual| {
                                        format!("attempted to take value of method `{}` on type \
@@ -2948,6 +3003,7 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
                                "maybe a `()` to call it is missing? \
                                If not, try an anonymous function")
                 .emit();
+            fcx.write_error(expr.id);
         } else {
             let mut err = fcx.type_error_struct(
                 expr.span,
@@ -2963,9 +3019,8 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
                 suggest_field_names(&mut err, def.struct_variant(), field, vec![]);
             }
             err.emit();
+            fcx.write_error(expr.id);
         }
-
-        fcx.write_error(expr.id);
     }
 
     // displays hints about the closest matches in field names
@@ -3000,36 +3055,37 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
                                 base: &'tcx hir::Expr,
                                 idx: codemap::Spanned<usize>) {
         check_expr_with_lvalue_pref(fcx, base, lvalue_pref);
-        let expr_t = structurally_resolved_type(fcx, expr.span,
-                                                fcx.expr_ty(base));
+        let expr_t = structurally_resolved_type(fcx, expr.span, fcx.expr_ty(base));
+        let mut private_candidate = None;
         let mut tuple_like = false;
-        // FIXME(eddyb) #12808 Integrate privacy into this auto-deref loop.
         let (_, autoderefs, field_ty) = autoderef(fcx,
                                                   expr.span,
                                                   expr_t,
-                                                  Some(base),
+                                                  || Some(base),
                                                   UnresolvedTypeAction::Error,
                                                   lvalue_pref,
                                                   |base_t, _| {
-                match base_t.sty {
-                    ty::TyStruct(base_def, substs) => {
-                        tuple_like = base_def.struct_variant().is_tuple_struct();
-                        if tuple_like {
-                            debug!("tuple struct named {:?}",  base_t);
-                            base_def.struct_variant()
-                                    .fields
-                                    .get(idx.node)
-                                    .map(|f| fcx.field_ty(expr.span, f, substs))
-                        } else {
-                            None
-                        }
-                    }
+                let (base_def, substs) = match base_t.sty {
+                    ty::TyStruct(base_def, substs) => (base_def, substs),
                     ty::TyTuple(ref v) => {
                         tuple_like = true;
-                        if idx.node < v.len() { Some(v[idx.node]) } else { None }
+                        return if idx.node < v.len() { Some(v[idx.node]) } else { None }
                     }
-                    _ => None
+                    _ => return None,
+                };
+
+                tuple_like = base_def.struct_variant().is_tuple_struct();
+                if !tuple_like { return None }
+
+                debug!("tuple struct named {:?}",  base_t);
+                if let Some(field) = base_def.struct_variant().fields.get(idx.node) {
+                    let field_ty = fcx.field_ty(expr.span, field, substs);
+                    if field.vis.is_accessible_from(fcx.body_id, &fcx.tcx().map) {
+                        return Some(field_ty);
+                    }
+                    private_candidate = Some((base_def.did, field_ty));
                 }
+                None
             });
         match field_ty {
             Some(field_ty) => {
@@ -3039,6 +3095,15 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
             }
             None => {}
         }
+
+        if let Some((did, field_ty)) = private_candidate {
+            let struct_path = fcx.tcx().item_path_str(did);
+            let msg = format!("field `{}` of struct `{}` is private", idx.node, struct_path);
+            fcx.tcx().sess.span_err(expr.span, &msg);
+            fcx.write_ty(expr.id, field_ty);
+            return;
+        }
+
         fcx.type_error_message(
             expr.span,
             |actual| {
@@ -3090,7 +3155,7 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
         let tcx = fcx.ccx.tcx;
         let substs = match adt_ty.sty {
             ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs,
-            _ => tcx.sess.span_bug(span, "non-ADT passed to check_expr_struct_fields")
+            _ => span_bug!(span, "non-ADT passed to check_expr_struct_fields")
         };
 
         let mut remaining_fields = FnvHashMap();
@@ -3255,21 +3320,21 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
             match unop {
                 hir::UnDeref => {
                     oprnd_t = structurally_resolved_type(fcx, expr.span, oprnd_t);
-                    oprnd_t = match oprnd_t.builtin_deref(true, NoPreference) {
-                        Some(mt) => mt.ty,
-                        None => match try_overloaded_deref(fcx, expr.span,
-                                                           Some(MethodCall::expr(expr.id)),
-                                                           Some(&oprnd), oprnd_t, lvalue_pref) {
-                            Some(mt) => mt.ty,
-                            None => {
-                                fcx.type_error_message(expr.span, |actual| {
-                                    format!("type `{}` cannot be \
-                                            dereferenced", actual)
-                                }, oprnd_t, None);
-                                tcx.types.err
-                            }
-                        }
-                    };
+
+                    if let Some(mt) = oprnd_t.builtin_deref(true, NoPreference) {
+                        oprnd_t = mt.ty;
+                    } else if let Some(method) = try_overloaded_deref(
+                            fcx, expr.span, Some(&oprnd), oprnd_t, lvalue_pref) {
+                        oprnd_t = make_overloaded_lvalue_return_type(tcx, method).ty;
+                        fcx.inh.tables.borrow_mut().method_map.insert(MethodCall::expr(expr.id),
+                                                                      method);
+                    } else {
+                        fcx.type_error_message(expr.span, |actual| {
+                            format!("type `{}` cannot be \
+                                    dereferenced", actual)
+                        }, oprnd_t, None);
+                        oprnd_t = tcx.types.err;
+                    }
                 }
                 hir::UnNot => {
                     oprnd_t = structurally_resolved_type(fcx, oprnd.span,
@@ -3348,12 +3413,10 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
                 // Create some fake resolution that can't possibly be a type.
                 def::PathResolution {
                     base_def: Def::Mod(tcx.map.local_def_id(ast::CRATE_NODE_ID)),
-                    last_private: LastMod(AllPublic),
                     depth: path.segments.len()
                 }
             } else {
-              tcx.sess.span_bug(expr.span,
-                                &format!("unbound path {:?}", expr))
+              span_bug!(expr.span, "unbound path {:?}", expr)
           };
 
           if let Some((opt_ty, segments, def)) =
@@ -3382,12 +3445,12 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
               fcx.add_wf_bounds(&item_substs.substs, expr);
           });
       }
-      hir::ExprInlineAsm(ref ia) => {
-          for &(_, ref input) in &ia.inputs {
-              check_expr(fcx, &input);
+      hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
+          for output in outputs {
+              check_expr(fcx, output);
           }
-          for out in &ia.outputs {
-              check_expr(fcx, &out.expr);
+          for input in inputs {
+              check_expr(fcx, input);
           }
           fcx.write_nil(id);
       }
@@ -3514,7 +3577,7 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
 
             // Defer other checks until we're done type checking.
             let mut deferred_cast_checks = fcx.inh.deferred_cast_checks.borrow_mut();
-            let cast_check = cast::CastCheck::new((**e).clone(), t_expr, t_cast, expr.span);
+            let cast_check = cast::CastCheck::new(e, t_expr, t_cast, expr.span);
             deferred_cast_checks.push(cast_check);
         }
       }
@@ -3531,27 +3594,34 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
             }
         });
 
-        let typ = match uty {
-            Some(uty) => {
-                for e in args {
-                    check_expr_coercable_to_type(fcx, &e, uty);
-                }
-                uty
-            }
-            None => {
-                let t: Ty = fcx.infcx().next_ty_var();
-                for e in args {
-                    check_expr_has_type(fcx, &e, t);
+        let mut unified = fcx.infcx().next_ty_var();
+        let coerce_to = uty.unwrap_or(unified);
+
+        for (i, e) in args.iter().enumerate() {
+            check_expr_with_hint(fcx, e, coerce_to);
+            let e_ty = fcx.expr_ty(e);
+            let origin = TypeOrigin::Misc(e.span);
+
+            // Special-case the first element, as it has no "previous expressions".
+            let result = if i == 0 {
+                coercion::try(fcx, e, coerce_to)
+            } else {
+                let prev_elems = || args[..i].iter().map(|e| &**e);
+                coercion::try_find_lub(fcx, origin, prev_elems, unified, e)
+            };
+
+            match result {
+                Ok(ty) => unified = ty,
+                Err(e) => {
+                    fcx.infcx().report_mismatched_types(origin, unified, e_ty, e);
                 }
-                t
             }
-        };
-        let typ = tcx.mk_array(typ, args.len());
-        fcx.write_ty(id, typ);
+        }
+        fcx.write_ty(id, tcx.mk_array(unified, args.len()));
       }
       hir::ExprRepeat(ref element, ref count_expr) => {
         check_expr_has_type(fcx, &count_expr, tcx.types.usize);
-        let count = fcx.tcx().eval_repeat_count(&count_expr);
+        let count = eval_repeat_count(fcx.tcx(), &count_expr);
 
         let uty = match expected {
             ExpectHasType(uty) => {
@@ -3668,87 +3738,6 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
               }
           }
        }
-       hir::ExprRange(ref start, ref end) => {
-          let t_start = start.as_ref().map(|e| {
-            check_expr(fcx, &e);
-            fcx.expr_ty(&e)
-          });
-          let t_end = end.as_ref().map(|e| {
-            check_expr(fcx, &e);
-            fcx.expr_ty(&e)
-          });
-
-          let idx_type = match (t_start, t_end) {
-              (Some(ty), None) | (None, Some(ty)) => {
-                  Some(ty)
-              }
-              (Some(t_start), Some(t_end)) if (t_start.references_error() ||
-                                               t_end.references_error()) => {
-                  Some(fcx.tcx().types.err)
-              }
-              (Some(t_start), Some(t_end)) => {
-                  Some(infer::common_supertype(fcx.infcx(),
-                                               TypeOrigin::RangeExpression(expr.span),
-                                               true,
-                                               t_start,
-                                               t_end))
-              }
-              _ => None
-          };
-
-          // Note that we don't check the type of start/end satisfy any
-          // bounds because right now the range structs do not have any. If we add
-          // some bounds, then we'll need to check `t_start` against them here.
-
-          let range_type = match idx_type {
-            Some(idx_type) if idx_type.references_error() => {
-                fcx.tcx().types.err
-            }
-            Some(idx_type) => {
-                // Find the did from the appropriate lang item.
-                let did = match (start, end) {
-                    (&Some(_), &Some(_)) => tcx.lang_items.range_struct(),
-                    (&Some(_), &None) => tcx.lang_items.range_from_struct(),
-                    (&None, &Some(_)) => tcx.lang_items.range_to_struct(),
-                    (&None, &None) => {
-                        tcx.sess.span_bug(expr.span, "full range should be dealt with above")
-                    }
-                };
-
-                if let Some(did) = did {
-                    let def = tcx.lookup_adt_def(did);
-                    let predicates = tcx.lookup_predicates(did);
-                    let substs = Substs::new_type(vec![idx_type], vec![]);
-                    let bounds = fcx.instantiate_bounds(expr.span, &substs, &predicates);
-                    fcx.add_obligations_for_parameters(
-                        traits::ObligationCause::new(expr.span,
-                                                     fcx.body_id,
-                                                     traits::ItemObligation(did)),
-                        &bounds);
-
-                    tcx.mk_struct(def, tcx.mk_substs(substs))
-                } else {
-                    span_err!(tcx.sess, expr.span, E0236, "no lang item for range syntax");
-                    fcx.tcx().types.err
-                }
-            }
-            None => {
-                // Neither start nor end => RangeFull
-                if let Some(did) = tcx.lang_items.range_full_struct() {
-                    tcx.mk_struct(
-                        tcx.lookup_adt_def(did),
-                        tcx.mk_substs(Substs::empty())
-                    )
-                } else {
-                    span_err!(tcx.sess, expr.span, E0237, "no lang item for range syntax");
-                    fcx.tcx().types.err
-                }
-            }
-          };
-
-          fcx.write_ty(id, range_type);
-       }
-
     }
 
     debug!("type of expr({}) {} is...", expr.id,
@@ -3756,8 +3745,6 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
     debug!("... {:?}, expected is {:?}",
            fcx.expr_ty(expr),
            expected);
-
-    unifier();
 }
 
 pub fn resolve_ty_and_def_ufcs<'a, 'b, 'tcx>(fcx: &FnCtxt<'b, 'tcx>,
@@ -3786,24 +3773,30 @@ pub fn resolve_ty_and_def_ufcs<'a, 'b, 'tcx>(fcx: &FnCtxt<'b, 'tcx>,
                                                      &ty_segments[base_ty_end..]);
         let item_segment = path.segments.last().unwrap();
         let item_name = item_segment.identifier.name;
-        match method::resolve_ufcs(fcx, span, item_name, ty, node_id) {
-            Ok((def, lp)) => {
-                // Write back the new resolution.
-                fcx.ccx.tcx.def_map.borrow_mut()
-                       .insert(node_id, def::PathResolution {
-                   base_def: def,
-                   last_private: path_res.last_private.or(lp),
-                   depth: 0
-                });
-                Some((Some(ty), slice::ref_slice(item_segment), def))
-            }
+        let def = match method::resolve_ufcs(fcx, span, item_name, ty, node_id) {
+            Ok(def) => Some(def),
             Err(error) => {
+                let def = match error {
+                    method::MethodError::PrivateMatch(def) => Some(def),
+                    _ => None,
+                };
                 if item_name != special_idents::invalid.name {
                     method::report_error(fcx, span, ty, item_name, None, error);
                 }
-                fcx.write_error(node_id);
-                None
+                def
             }
+        };
+
+        if let Some(def) = def {
+            // Write back the new resolution.
+            fcx.ccx.tcx.def_map.borrow_mut().insert(node_id, def::PathResolution {
+                base_def: def,
+                depth: 0,
+            });
+            Some((Some(ty), slice::ref_slice(item_segment), def))
+        } else {
+            fcx.write_error(node_id);
+            None
         }
     }
 }
@@ -3828,7 +3821,7 @@ impl<'tcx> Expectation<'tcx> {
     /// which still is useful, because it informs integer literals and the like.
     /// See the test case `test/run-pass/coerce-expect-unsized.rs` and #20169
     /// for examples of where this comes up,.
-    fn rvalue_hint(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
+    fn rvalue_hint(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
         match tcx.struct_tail(ty).sty {
             ty::TySlice(_) | ty::TyStr | ty::TyTrait(..) => {
                 ExpectRvalueLikeUnsized(ty)
@@ -3995,7 +3988,7 @@ fn check_block_with_expected<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     let mut any_err = false;
     for s in &blk.stmts {
         check_stmt(fcx, s);
-        let s_id = ::rustc_front::util::stmt_id(s);
+        let s_id = s.node.id();
         let s_ty = fcx.node_ty(s_id);
         if any_diverges && !warned && match s.node {
             hir::StmtDecl(ref decl, _) => {
@@ -4110,7 +4103,7 @@ fn check_const_with_ty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 /// Checks whether a type can be represented in memory. In particular, it
 /// identifies types that contain themselves without indirection through a
 /// pointer, which would mean their size is unbounded.
-pub fn check_representable(tcx: &ty::ctxt,
+pub fn check_representable(tcx: &TyCtxt,
                            sp: Span,
                            item_id: ast::NodeId,
                            _designation: &str) -> bool {
@@ -4132,7 +4125,7 @@ pub fn check_representable(tcx: &ty::ctxt,
     return true
 }
 
-pub fn check_simd(tcx: &ty::ctxt, sp: Span, id: ast::NodeId) {
+pub fn check_simd(tcx: &TyCtxt, sp: Span, id: ast::NodeId) {
     let t = tcx.node_id_to_type(id);
     match t.sty {
         ty::TyStruct(def, substs) => {
@@ -4164,34 +4157,6 @@ pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
                                     sp: Span,
                                     vs: &'tcx [hir::Variant],
                                     id: ast::NodeId) {
-    // disr_in_range should be removed once we have forced type hints for consts
-    fn disr_in_range(ccx: &CrateCtxt,
-                     ty: attr::IntType,
-                     disr: ty::Disr) -> bool {
-        fn uint_in_range(ccx: &CrateCtxt, ty: ast::UintTy, disr: ty::Disr) -> bool {
-            match ty {
-                ast::UintTy::U8 => disr as u8 as Disr == disr,
-                ast::UintTy::U16 => disr as u16 as Disr == disr,
-                ast::UintTy::U32 => disr as u32 as Disr == disr,
-                ast::UintTy::U64 => disr as u64 as Disr == disr,
-                ast::UintTy::Us => uint_in_range(ccx, ccx.tcx.sess.target.uint_type, disr)
-            }
-        }
-        fn int_in_range(ccx: &CrateCtxt, ty: ast::IntTy, disr: ty::Disr) -> bool {
-            match ty {
-                ast::IntTy::I8 => disr as i8 as Disr == disr,
-                ast::IntTy::I16 => disr as i16 as Disr == disr,
-                ast::IntTy::I32 => disr as i32 as Disr == disr,
-                ast::IntTy::I64 => disr as i64 as Disr == disr,
-                ast::IntTy::Is => int_in_range(ccx, ccx.tcx.sess.target.int_type, disr)
-            }
-        }
-        match ty {
-            attr::UnsignedInt(ty) => uint_in_range(ccx, ty, disr),
-            attr::SignedInt(ty) => int_in_range(ccx, ty, disr)
-        }
-    }
-
     fn do_check<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                           vs: &'tcx [hir::Variant],
                           id: ast::NodeId,
@@ -4205,7 +4170,7 @@ pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
         let inh = static_inherited_fields(ccx, &tables);
         let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(rty), id);
 
-        let (_, repr_type_ty) = ccx.tcx.enum_repr_type(Some(&hint));
+        let repr_type_ty = ccx.tcx.enum_repr_type(Some(&hint)).to_ty(&ccx.tcx);
         for v in vs {
             if let Some(ref e) = v.node.disr_expr {
                 check_const_with_ty(&fcx, e.span, e, repr_type_ty);
@@ -4230,23 +4195,7 @@ pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
                 }
                 None => {}
             }
-            // Check for unrepresentable discriminant values
-            match hint {
-                attr::ReprAny | attr::ReprExtern => {
-                    disr_vals.push(current_disr_val);
-                }
-                attr::ReprInt(sp, ity) => {
-                    if !disr_in_range(ccx, ity, current_disr_val) {
-                        let mut err = struct_span_err!(ccx.tcx.sess, v.span, E0082,
-                            "discriminant value outside specified type");
-                        span_note!(&mut err, sp,
-                            "discriminant type specified here");
-                        err.emit();
-                    }
-                }
-                // Error reported elsewhere.
-                attr::ReprSimd | attr::ReprPacked => {}
-            }
+            disr_vals.push(current_disr_val);
         }
     }
 
@@ -4290,7 +4239,7 @@ fn type_scheme_and_predicates_for_def<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         Def::Label(..) |
         Def::SelfTy(..) |
         Def::Err => {
-            fcx.ccx.tcx.sess.span_bug(sp, &format!("expected value, found {:?}", defn));
+            span_bug!(sp, "expected value, found {:?}", defn);
         }
     }
 }
@@ -4518,7 +4467,7 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         assert_eq!(substs.types.len(space), type_defs.len(space));
 
         adjust_region_parameters(fcx, span, space, region_defs, &mut substs);
-        assert_eq!(substs.regions().len(space), region_defs.len(space));
+        assert_eq!(substs.regions.len(space), region_defs.len(space));
     }
 
     // The things we are substituting into the type should not contain
@@ -4546,16 +4495,15 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         let impl_scheme = fcx.tcx().lookup_item_type(impl_def_id);
         assert_eq!(substs.types.len(subst::TypeSpace),
                    impl_scheme.generics.types.len(subst::TypeSpace));
-        assert_eq!(substs.regions().len(subst::TypeSpace),
+        assert_eq!(substs.regions.len(subst::TypeSpace),
                    impl_scheme.generics.regions.len(subst::TypeSpace));
 
         let impl_ty = fcx.instantiate_type_scheme(span, &substs, &impl_scheme.ty);
         if fcx.mk_subty(false, TypeOrigin::Misc(span), self_ty, impl_ty).is_err() {
-            fcx.tcx().sess.span_bug(span,
-            &format!(
+            span_bug!(span,
                 "instantiate_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
                 self_ty,
-                impl_ty));
+                impl_ty);
         }
     }
 
@@ -4637,11 +4585,11 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 
         {
             let region_count = region_defs.len(space);
-            assert_eq!(substs.regions().len(space), 0);
+            assert_eq!(substs.regions.len(space), 0);
             for (i, lifetime) in data.lifetimes.iter().enumerate() {
                 let r = ast_region_to_region(fcx.tcx(), lifetime);
                 if i < region_count {
-                    substs.mut_regions().push(space, r);
+                    substs.regions.push(space, r);
                 } else if i == region_count {
                     span_err!(fcx.tcx().sess, lifetime.span, E0088,
                         "too many lifetime parameters provided: \
@@ -4650,7 +4598,7 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                         if region_count == 1 {""} else {"s"},
                         data.lifetimes.len(),
                         if data.lifetimes.len() == 1 {""} else {"s"});
-                    substs.mut_regions().truncate(space, 0);
+                    substs.regions.truncate(space, 0);
                     break;
                 }
             }
@@ -4773,7 +4721,7 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         defs: &VecPerParamSpace<ty::RegionParameterDef>,
         substs: &mut Substs)
     {
-        let provided_len = substs.mut_regions().len(space);
+        let provided_len = substs.regions.len(space);
         let desired = defs.get_slice(space);
 
         // Enforced by `push_explicit_parameters_from_segment_to_substs()`.
@@ -4781,7 +4729,7 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 
         // If nothing was provided, just use inference variables.
         if provided_len == 0 {
-            substs.mut_regions().replace(
+            substs.regions.replace(
                 space,
                 fcx.infcx().region_vars_for_defs(span, desired));
             return;
@@ -4802,7 +4750,7 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
             provided_len,
             if provided_len == 1 {""} else {"s"});
 
-        substs.mut_regions().replace(
+        substs.regions.replace(
             space,
             fcx.infcx().region_vars_for_defs(span, desired));
     }
@@ -4848,7 +4796,7 @@ pub fn structurally_resolved_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
 }
 
 // Returns true if b contains a break that can exit from b
-pub fn may_break(cx: &ty::ctxt, id: ast::NodeId, b: &hir::Block) -> bool {
+pub fn may_break(cx: &TyCtxt, id: ast::NodeId, b: &hir::Block) -> bool {
     // First: is there an unlabeled break immediately
     // inside the loop?
     (loop_query(&b, |e| {
index f4841b75d13d55e8da0b3beb3efa8d158e3d60cd..debf925875723409c393a4f6a7d05d2517f322e0 100644 (file)
@@ -18,12 +18,11 @@ use super::{
     method,
     FnCtxt,
 };
-use middle::def_id::DefId;
-use middle::ty::{Ty, TypeFoldable, PreferMutLvalue};
+use hir::def_id::DefId;
+use rustc::ty::{Ty, TypeFoldable, PreferMutLvalue};
 use syntax::ast;
 use syntax::parse::token;
-use rustc_front::hir;
-use rustc_front::util as hir_util;
+use rustc::hir;
 
 /// Check a `a <op>= b`
 pub fn check_binop_assign<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
@@ -184,12 +183,12 @@ fn check_overloaded_binop<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                 if let IsAssign::Yes = is_assign {
                     span_err!(fcx.tcx().sess, lhs_expr.span, E0368,
                               "binary assignment operation `{}=` cannot be applied to type `{}`",
-                              hir_util::binop_to_string(op.node),
+                              op.node.as_str(),
                               lhs_ty);
                 } else {
                     let mut err = struct_span_err!(fcx.tcx().sess, lhs_expr.span, E0369,
                         "binary operation `{}` cannot be applied to type `{}`",
-                        hir_util::binop_to_string(op.node),
+                        op.node.as_str(),
                         lhs_ty);
                     let missing_trait = match op.node {
                         hir::BiAdd    => Some("std::ops::Add"),
@@ -235,7 +234,7 @@ pub fn check_user_unop<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                  op: hir::UnOp)
                                  -> Ty<'tcx>
 {
-    assert!(hir_util::is_by_value_unop(op));
+    assert!(op.is_by_value());
     match lookup_op_method(fcx, ex, operand_ty, vec![],
                            token::intern(mname), trait_did,
                            operand_expr) {
@@ -270,8 +269,9 @@ fn name_and_trait_def_id(fcx: &FnCtxt,
             hir::BiShr => ("shr_assign", lang.shr_assign_trait()),
             hir::BiLt | hir::BiLe | hir::BiGe | hir::BiGt | hir::BiEq | hir::BiNe | hir::BiAnd |
             hir::BiOr => {
-                fcx.tcx().sess.span_bug(op.span, &format!("impossible assignment operation: {}=",
-                                        hir_util::binop_to_string(op.node)))
+                span_bug!(op.span,
+                          "impossible assignment operation: {}=",
+                          op.node.as_str())
             }
         }
     } else {
@@ -293,7 +293,7 @@ fn name_and_trait_def_id(fcx: &FnCtxt,
             hir::BiEq => ("eq", lang.eq_trait()),
             hir::BiNe => ("ne", lang.eq_trait()),
             hir::BiAnd | hir::BiOr => {
-                fcx.tcx().sess.span_bug(op.span, "&& and || are not overloadable")
+                span_bug!(op.span, "&& and || are not overloadable")
             }
         }
     }
@@ -335,7 +335,7 @@ fn lookup_op_method<'a, 'tcx>(fcx: &'a FnCtxt<'a, 'tcx>,
             let method_ty = method.ty;
 
             // HACK(eddyb) Fully qualified path to work around a resolve bug.
-            let method_call = ::middle::ty::MethodCall::expr(expr.id);
+            let method_call = ::rustc::ty::MethodCall::expr(expr.id);
             fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
 
             // extract return type for method; all late bound regions
index e399818779ecf60dff659d59e07a2177aa752e4f..2a4de6e091b3d37f9c3fe4236e4b8d8e34a41e30 100644 (file)
@@ -89,20 +89,19 @@ use middle::free_region::FreeRegionMap;
 use middle::mem_categorization as mc;
 use middle::mem_categorization::Categorization;
 use middle::region::{self, CodeExtent};
-use middle::subst::Substs;
-use middle::traits;
-use middle::ty::{self, Ty, MethodCall, TypeFoldable};
-use middle::infer::{self, GenericKind, InferCtxt, SubregionOrigin, TypeOrigin, VerifyBound};
-use middle::pat_util;
-use middle::ty::adjustment;
-use middle::ty::wf::ImpliedBound;
+use rustc::ty::subst::Substs;
+use rustc::traits;
+use rustc::ty::{self, Ty, TyCtxt, MethodCall, TypeFoldable};
+use rustc::infer::{self, GenericKind, InferCtxt, InferOk, SubregionOrigin, TypeOrigin, VerifyBound};
+use hir::pat_util;
+use rustc::ty::adjustment;
+use rustc::ty::wf::ImpliedBound;
 
 use std::mem;
 use syntax::ast;
 use syntax::codemap::Span;
-use rustc_front::intravisit::{self, Visitor};
-use rustc_front::hir::{self, PatKind};
-use rustc_front::util as hir_util;
+use rustc::hir::intravisit::{self, Visitor};
+use rustc::hir::{self, PatKind};
 
 use self::SubjectNode::Subject;
 
@@ -208,7 +207,7 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> {
         }
     }
 
-    pub fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+    pub fn tcx(&self) -> &'a TyCtxt<'tcx> {
         self.fcx.ccx.tcx
     }
 
@@ -298,8 +297,7 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> {
             match fn_sig_map.get(&id) {
                 Some(f) => f.clone(),
                 None => {
-                    self.tcx().sess.bug(
-                        &format!("No fn-sig entry for id={}", id));
+                    bug!("No fn-sig entry for id={}", id);
                 }
             }
         };
@@ -446,8 +444,8 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> {
         let subject_node_id = match self.subject {
             Subject(s) => s,
             SubjectNode::None => {
-                self.tcx().sess.bug("cannot resolve_regions_and_report_errors \
-                                     without subject node");
+                bug!("cannot resolve_regions_and_report_errors \
+                      without subject node");
             }
         };
 
@@ -690,7 +688,7 @@ fn visit_expr(rcx: &mut Rcx, expr: &hir::Expr) {
         },
 
         hir::ExprBinary(op, ref lhs, ref rhs) if has_method_map => {
-            let implicitly_ref_args = !hir_util::is_by_value_binop(op.node);
+            let implicitly_ref_args = !op.node.is_by_value();
 
             // As `expr_method_call`, but the call is via an
             // overloaded op.  Note that we (sadly) currently use an
@@ -717,7 +715,7 @@ fn visit_expr(rcx: &mut Rcx, expr: &hir::Expr) {
         }
 
         hir::ExprUnary(op, ref lhs) if has_method_map => {
-            let implicitly_ref_args = !hir_util::is_by_value_unop(op);
+            let implicitly_ref_args = !op.is_by_value();
 
             // As above.
             constrain_call(rcx, expr, Some(&lhs),
@@ -881,14 +879,15 @@ fn constrain_callee(rcx: &mut Rcx,
                     _callee_expr: &hir::Expr) {
     let callee_ty = rcx.resolve_node_type(callee_id);
     match callee_ty.sty {
-        ty::TyBareFn(..) => { }
+        ty::TyFnDef(..) | ty::TyFnPtr(_) => { }
         _ => {
             // this should not happen, but it does if the program is
             // erroneous
             //
-            // tcx.sess.span_bug(
+            // bug!(
             //     callee_expr.span,
-            //     format!("Calling non-function: {}", callee_ty));
+            //     "Calling non-function: {}",
+            //     callee_ty);
         }
     }
 }
@@ -986,10 +985,10 @@ fn constrain_autoderefs<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
                 let (m, r) = match self_ty.sty {
                     ty::TyRef(r, ref m) => (m.mutbl, r),
                     _ => {
-                        rcx.tcx().sess.span_bug(
+                        span_bug!(
                             deref_expr.span,
-                            &format!("bad overloaded deref type {:?}",
-                                     method.ty))
+                            "bad overloaded deref type {:?}",
+                            method.ty)
                     }
                 };
 
@@ -1014,7 +1013,7 @@ fn constrain_autoderefs<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
                                           return_type, r_deref_expr);
                         return_type
                     }
-                    ty::FnDiverging => unreachable!()
+                    ty::FnDiverging => bug!()
                 }
             }
             None => derefd_ty
@@ -1057,12 +1056,10 @@ fn check_safety_of_rvalue_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 't
                 }
                 ty::ReStatic => {}
                 region => {
-                    rcx.tcx()
-                       .sess
-                       .span_bug(span,
-                                 &format!("unexpected rvalue region in rvalue \
-                                           destructor safety checking: `{:?}`",
-                                          region));
+                    span_bug!(span,
+                              "unexpected rvalue region in rvalue \
+                               destructor safety checking: `{:?}`",
+                              region);
                 }
             }
         }
@@ -1394,10 +1391,7 @@ fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
                     infer::ReborrowUpvar(span, *upvar_id)
                 }
                 _ => {
-                    rcx.tcx().sess.span_bug(
-                        span,
-                        &format!("Illegal upvar id: {:?}",
-                                upvar_id));
+                    span_bug!( span, "Illegal upvar id: {:?}", upvar_id);
                 }
             }
         }
@@ -1496,7 +1490,7 @@ pub fn substs_wf_in_scope<'a,'tcx>(rcx: &mut Rcx<'a,'tcx>,
 
     let origin = infer::ParameterInScope(origin, expr_span);
 
-    for &region in substs.regions() {
+    for &region in &substs.regions {
         rcx.fcx.mk_subr(origin.clone(), expr_region, region);
     }
 
@@ -1624,7 +1618,7 @@ fn projection_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
     // edges, which winds up enforcing the same condition.
     let needs_infer = {
         projection_ty.trait_ref.substs.types.iter().any(|t| t.needs_infer()) ||
-            projection_ty.trait_ref.substs.regions().iter().any(|r| r.needs_infer())
+            projection_ty.trait_ref.substs.regions.iter().any(|r| r.needs_infer())
     };
     if env_bounds.is_empty() && needs_infer {
         debug!("projection_must_outlive: no declared bounds");
@@ -1633,7 +1627,7 @@ fn projection_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
             type_must_outlive(rcx, origin.clone(), component_ty, region);
         }
 
-        for &r in projection_ty.trait_ref.substs.regions() {
+        for &r in &projection_ty.trait_ref.substs.regions {
             rcx.fcx.mk_subr(origin.clone(), region, r);
         }
 
@@ -1650,7 +1644,7 @@ fn projection_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
     if !env_bounds.is_empty() && env_bounds[1..].iter().all(|b| *b == env_bounds[0]) {
         let unique_bound = env_bounds[0];
         debug!("projection_must_outlive: unique declared bound = {:?}", unique_bound);
-        if projection_ty.trait_ref.substs.regions()
+        if projection_ty.trait_ref.substs.regions
                                          .iter()
                                          .any(|r| env_bounds.contains(r))
         {
@@ -1846,7 +1840,11 @@ fn declared_projection_bounds_from_trait<'a,'tcx>(rcx: &Rcx<'a, 'tcx>,
 
                 // check whether this predicate applies to our current projection
                 match infer::mk_eqty(infcx, false, TypeOrigin::Misc(span), ty, outlives.0) {
-                    Ok(()) => { Ok(outlives.1) }
+                    Ok(InferOk { obligations, .. }) => {
+                        // FIXME(#32730) propagate obligations
+                        assert!(obligations.is_empty());
+                        Ok(outlives.1)
+                    }
                     Err(_) => { Err(()) }
                 }
             });
index bc4ec3adbc1853b820fccdc429856bb8ba432c80..40481cda76290b4d999d9bc9e24a7d98071f2865 100644 (file)
@@ -46,13 +46,13 @@ use check::demand;
 use middle::expr_use_visitor as euv;
 use middle::mem_categorization as mc;
 use middle::mem_categorization::Categorization;
-use middle::ty::{self, Ty};
-use middle::infer::{InferCtxt, UpvarRegion};
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::infer::{InferCtxt, UpvarRegion};
 use std::collections::HashSet;
 use syntax::ast;
 use syntax::codemap::Span;
-use rustc_front::hir;
-use rustc_front::intravisit::{self, Visitor};
+use rustc::hir;
+use rustc::hir::intravisit::{self, Visitor};
 
 ///////////////////////////////////////////////////////////////////////////
 // PUBLIC ENTRY POINTS
@@ -114,7 +114,7 @@ impl<'a,'tcx> SeedBorrowKind<'a,'tcx> {
         SeedBorrowKind { fcx: fcx, closures_with_inferred_kinds: HashSet::new() }
     }
 
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+    fn tcx(&self) -> &'a TyCtxt<'tcx> {
         self.fcx.tcx()
     }
 
@@ -131,7 +131,7 @@ impl<'a,'tcx> SeedBorrowKind<'a,'tcx> {
         if !self.fcx.inh.tables.borrow().closure_kinds.contains_key(&closure_def_id) {
             self.closures_with_inferred_kinds.insert(expr.id);
             self.fcx.inh.tables.borrow_mut().closure_kinds
-                                            .insert(closure_def_id, ty::FnClosureKind);
+                                            .insert(closure_def_id, ty::ClosureKind::Fn);
             debug!("check_closure: adding closure_id={:?} to closures_with_inferred_kinds",
                    closure_def_id);
         }
@@ -209,10 +209,10 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> {
         let closure_substs = match self.fcx.node_ty(id).sty {
             ty::TyClosure(_, ref substs) => substs,
             ref t => {
-                self.fcx.tcx().sess.span_bug(
+                span_bug!(
                     span,
-                    &format!("type of closure expr {:?} is not a closure {:?}",
-                             id, t));
+                    "type of closure expr {:?} is not a closure {:?}",
+                    id, t);
             }
         };
 
@@ -301,7 +301,7 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> {
                                upvar_id);
 
                         // to move out of an upvar, this must be a FnOnce closure
-                        self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnOnceClosureKind);
+                        self.adjust_closure_kind(upvar_id.closure_expr_id, ty::ClosureKind::FnOnce);
 
                         let upvar_capture_map =
                             &mut self.fcx.inh.tables.borrow_mut().upvar_capture_map;
@@ -314,7 +314,7 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> {
                         // must still adjust the kind of the closure
                         // to be a FnOnce closure to permit moves out
                         // of the environment.
-                        self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnOnceClosureKind);
+                        self.adjust_closure_kind(upvar_id.closure_expr_id, ty::ClosureKind::FnOnce);
                     }
                     mc::NoteNone => {
                     }
@@ -418,7 +418,7 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> {
                 }
 
                 // also need to be in an FnMut closure since this is not an ImmBorrow
-                self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnMutClosureKind);
+                self.adjust_closure_kind(upvar_id.closure_expr_id, ty::ClosureKind::FnMut);
 
                 true
             }
@@ -426,7 +426,7 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> {
                 // this kind of deref occurs in a `move` closure, or
                 // for a by-value upvar; in either case, to mutate an
                 // upvar, we need to be an FnMut closure
-                self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnMutClosureKind);
+                self.adjust_closure_kind(upvar_id.closure_expr_id, ty::ClosureKind::FnMut);
 
                 true
             }
@@ -488,16 +488,16 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> {
                closure_id, existing_kind, new_kind);
 
         match (existing_kind, new_kind) {
-            (ty::FnClosureKind, ty::FnClosureKind) |
-            (ty::FnMutClosureKind, ty::FnClosureKind) |
-            (ty::FnMutClosureKind, ty::FnMutClosureKind) |
-            (ty::FnOnceClosureKind, _) => {
+            (ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
+            (ty::ClosureKind::FnMut, ty::ClosureKind::Fn) |
+            (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
+            (ty::ClosureKind::FnOnce, _) => {
                 // no change needed
             }
 
-            (ty::FnClosureKind, ty::FnMutClosureKind) |
-            (ty::FnClosureKind, ty::FnOnceClosureKind) |
-            (ty::FnMutClosureKind, ty::FnOnceClosureKind) => {
+            (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) |
+            (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
+            (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
                 // new kind is stronger than the old kind
                 closure_kinds.insert(closure_def_id, new_kind);
             }
index 4f3f716c20d61c76a67572db3972e51d6889d372..1b21e6ce9ebe3d1c548e2bc9df4d777395a7467a 100644 (file)
@@ -12,12 +12,12 @@ use astconv::AstConv;
 use check::{FnCtxt, Inherited, blank_fn_ctxt, regionck};
 use constrained_type_params::{identify_constrained_type_params, Parameter};
 use CrateCtxt;
-use middle::def_id::DefId;
+use hir::def_id::DefId;
 use middle::region::{CodeExtent};
-use middle::subst::{self, TypeSpace, FnSpace, ParamSpace, SelfSpace};
-use middle::traits;
-use middle::ty::{self, Ty};
-use middle::ty::fold::{TypeFolder};
+use rustc::ty::subst::{self, TypeSpace, FnSpace, ParamSpace, SelfSpace};
+use rustc::traits;
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::fold::{TypeFolder};
 
 use std::cell::RefCell;
 use std::collections::HashSet;
@@ -25,8 +25,8 @@ use syntax::ast;
 use syntax::codemap::{Span};
 use syntax::errors::DiagnosticBuilder;
 use syntax::parse::token::{special_idents};
-use rustc_front::intravisit::{self, Visitor};
-use rustc_front::hir;
+use rustc::hir::intravisit::{self, Visitor};
+use rustc::hir;
 
 pub struct CheckTypeWellFormedVisitor<'ccx, 'tcx:'ccx> {
     ccx: &'ccx CrateCtxt<'ccx, 'tcx>,
@@ -42,7 +42,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
         }
     }
 
-    fn tcx(&self) -> &ty::ctxt<'tcx> {
+    fn tcx(&self) -> &TyCtxt<'tcx> {
         self.ccx.tcx
     }
 
@@ -255,9 +255,9 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
             let type_scheme = fcx.tcx().lookup_item_type(fcx.tcx().map.local_def_id(item.id));
             let item_ty = fcx.instantiate_type_scheme(item.span, free_substs, &type_scheme.ty);
             let bare_fn_ty = match item_ty.sty {
-                ty::TyBareFn(_, ref bare_fn_ty) => bare_fn_ty,
+                ty::TyFnDef(_, _, ref bare_fn_ty) => bare_fn_ty,
                 _ => {
-                    this.tcx().sess.span_bug(item.span, "Fn item without bare fn type");
+                    span_bug!(item.span, "Fn item without fn type");
                 }
             };
 
@@ -473,7 +473,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
         let name = match space {
             TypeSpace => ast_generics.ty_params[index].name,
             SelfSpace => special_idents::type_self.name,
-            FnSpace => self.tcx().sess.bug("Fn space occupied?"),
+            FnSpace => bug!("Fn space occupied?"),
         };
 
         ty::ParamTy { space: space, idx: index as u32, name: name }
@@ -489,7 +489,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
         match space {
             TypeSpace => ast_generics.ty_params[index].span,
             SelfSpace => item.span,
-            FnSpace => self.tcx().sess.span_bug(item.span, "Fn space occupied?"),
+            FnSpace => span_bug!(item.span, "Fn space occupied?"),
         }
     }
 
@@ -516,7 +516,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
     }
 }
 
-fn reject_shadowing_type_parameters<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn reject_shadowing_type_parameters<'tcx>(tcx: &TyCtxt<'tcx>,
                                           span: Span,
                                           generics: &ty::Generics<'tcx>) {
     let impl_params = generics.types.get_slice(subst::TypeSpace).iter()
@@ -567,7 +567,7 @@ fn struct_variant<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     let fields =
         struct_def.fields().iter()
         .map(|field| {
-            let field_ty = fcx.tcx().node_id_to_type(field.node.id);
+            let field_ty = fcx.tcx().node_id_to_type(field.id);
             let field_ty = fcx.instantiate_type_scheme(field.span,
                                                        &fcx.inh
                                                            .infcx
@@ -623,13 +623,13 @@ pub fn error_380<'ccx,'tcx>(ccx: &'ccx CrateCtxt<'ccx, 'tcx>, span: Span) {
                Trait for ..`) must have no methods or associated items")
 }
 
-pub fn error_392<'tcx>(tcx: &ty::ctxt<'tcx>, span: Span, param_name: ast::Name)
+pub fn error_392<'tcx>(tcx: &TyCtxt<'tcx>, span: Span, param_name: ast::Name)
                        -> DiagnosticBuilder<'tcx> {
     struct_span_err!(tcx.sess, span, E0392,
                      "parameter `{}` is never used", param_name)
 }
 
-pub fn error_194<'tcx>(tcx: &ty::ctxt<'tcx>, span: Span, name: ast::Name) {
+pub fn error_194<'tcx>(tcx: &TyCtxt<'tcx>, span: Span, name: ast::Name) {
     span_err!(tcx.sess, span, E0194,
               "type parameter `{}` shadows another type parameter of the same name",
               name);
index a28944995c48cd2e344b28fa3fc0c258fb272725..68327ccd39ab69f9f05b94d8f1578a22aabd6f5e 100644 (file)
@@ -15,12 +15,12 @@ use self::ResolveReason::*;
 
 use astconv::AstConv;
 use check::FnCtxt;
-use middle::def_id::DefId;
-use middle::pat_util;
-use middle::ty::{self, Ty, MethodCall, MethodCallee};
-use middle::ty::adjustment;
-use middle::ty::fold::{TypeFolder,TypeFoldable};
-use middle::infer;
+use hir::def_id::DefId;
+use hir::pat_util;
+use rustc::ty::{self, Ty, TyCtxt, MethodCall, MethodCallee};
+use rustc::ty::adjustment;
+use rustc::ty::fold::{TypeFolder,TypeFoldable};
+use rustc::infer;
 use write_substs_to_tcx;
 use write_ty_to_tcx;
 
@@ -28,10 +28,9 @@ use std::cell::Cell;
 
 use syntax::ast;
 use syntax::codemap::{DUMMY_SP, Span};
-use rustc_front::print::pprust::pat_to_string;
-use rustc_front::intravisit::{self, Visitor};
-use rustc_front::util as hir_util;
-use rustc_front::hir;
+use rustc::hir::print::pat_to_string;
+use rustc::hir::intravisit::{self, Visitor};
+use rustc::hir;
 
 ///////////////////////////////////////////////////////////////////////////
 // Entry point functions
@@ -85,7 +84,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
         WritebackCx { fcx: fcx }
     }
 
-    fn tcx(&self) -> &'cx ty::ctxt<'tcx> {
+    fn tcx(&self) -> &'cx TyCtxt<'tcx> {
         self.fcx.tcx()
     }
 
@@ -112,7 +111,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
                     // system.
                     match e.node {
                         hir::ExprBinary(..) => {
-                            if !hir_util::is_by_value_binop(op.node) {
+                            if !op.node.is_by_value() {
                                 self.fcx.inh.tables.borrow_mut().adjustments.remove(&lhs.id);
                             }
                         },
@@ -142,7 +141,7 @@ impl<'cx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'tcx> {
             return;
         }
 
-        self.visit_node_id(ResolvingExpr(s.span), hir_util::stmt_id(s));
+        self.visit_node_id(ResolvingExpr(s.span), s.node.id());
         intravisit::walk_stmt(self, s);
     }
 
@@ -381,7 +380,7 @@ enum ResolveReason {
 }
 
 impl ResolveReason {
-    fn span(&self, tcx: &ty::ctxt) -> Span {
+    fn span(&self, tcx: &TyCtxt) -> Span {
         match *self {
             ResolvingExpr(s) => s,
             ResolvingLocal(s) => s,
@@ -411,7 +410,7 @@ impl ResolveReason {
 // unresolved types and so forth.
 
 struct Resolver<'cx, 'tcx: 'cx> {
-    tcx: &'cx ty::ctxt<'tcx>,
+    tcx: &'cx TyCtxt<'tcx>,
     infcx: &'cx infer::InferCtxt<'cx, 'tcx>,
     writeback_errors: &'cx Cell<bool>,
     reason: ResolveReason,
@@ -487,7 +486,7 @@ impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
 }
 
 impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> {
-    fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+    fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx> {
         self.tcx
     }
 
index 2dfbaab2844754f9a81841835ee4448d10f766a9..9c8c9ba50561296655e09d959c2b3aee9cfc40ca 100644 (file)
 // done by the orphan and overlap modules. Then we build up various
 // mappings. That mapping code resides here.
 
-
-use middle::def_id::DefId;
+use hir::def_id::DefId;
 use middle::lang_items::UnsizeTraitLangItem;
-use middle::subst::{self, Subst};
-use middle::traits;
-use middle::ty::{self, TypeFoldable};
-use middle::ty::{ImplOrTraitItemId, ConstTraitItemId};
-use middle::ty::{MethodTraitItemId, TypeTraitItemId, ParameterEnvironment};
-use middle::ty::{Ty, TyBool, TyChar, TyEnum, TyError};
-use middle::ty::{TyParam, TyRawPtr};
-use middle::ty::{TyRef, TyStruct, TyTrait, TyTuple};
-use middle::ty::{TyStr, TyArray, TySlice, TyFloat, TyInfer, TyInt};
-use middle::ty::{TyUint, TyClosure, TyBox, TyBareFn};
-use middle::ty::TyProjection;
-use middle::ty::util::CopyImplementationError;
+use rustc::ty::subst::{self, Subst};
+use rustc::ty::{self, TyCtxt, TypeFoldable};
+use rustc::traits::{self, ProjectionMode};
+use rustc::ty::{ImplOrTraitItemId, ConstTraitItemId};
+use rustc::ty::{MethodTraitItemId, TypeTraitItemId, ParameterEnvironment};
+use rustc::ty::{Ty, TyBool, TyChar, TyEnum, TyError};
+use rustc::ty::{TyParam, TyRawPtr};
+use rustc::ty::{TyRef, TyStruct, TyTrait, TyTuple};
+use rustc::ty::{TyStr, TyArray, TySlice, TyFloat, TyInfer, TyInt};
+use rustc::ty::{TyUint, TyClosure, TyBox, TyFnDef, TyFnPtr};
+use rustc::ty::TyProjection;
+use rustc::ty::util::CopyImplementationError;
 use middle::free_region::FreeRegionMap;
 use CrateCtxt;
-use middle::infer::{self, InferCtxt, TypeOrigin, new_infer_ctxt};
+use rustc::infer::{self, InferCtxt, TypeOrigin, new_infer_ctxt};
 use std::cell::RefCell;
 use std::rc::Rc;
+use syntax::ast;
 use syntax::codemap::Span;
-use syntax::parse::token;
+use syntax::errors::DiagnosticBuilder;
 use util::nodemap::{DefIdMap, FnvHashMap};
 use rustc::dep_graph::DepNode;
-use rustc::front::map as hir_map;
-use rustc_front::intravisit;
-use rustc_front::hir::{Item, ItemImpl};
-use rustc_front::hir;
+use rustc::hir::map as hir_map;
+use rustc::hir::intravisit;
+use rustc::hir::{Item, ItemImpl};
+use rustc::hir;
 
 mod orphan;
 mod overlap;
@@ -68,8 +68,8 @@ fn get_base_type_def_id<'a, 'tcx>(inference_context: &InferCtxt<'a, 'tcx>,
         }
 
         TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
-        TyStr | TyArray(..) | TySlice(..) | TyBareFn(..) | TyTuple(..) |
-        TyParam(..) | TyError |
+        TyStr | TyArray(..) | TySlice(..) | TyFnDef(..) | TyFnPtr(_) |
+        TyTuple(..) | TyParam(..) | TyError |
         TyRawPtr(_) | TyRef(_, _) | TyProjection(..) => {
             None
         }
@@ -77,10 +77,10 @@ fn get_base_type_def_id<'a, 'tcx>(inference_context: &InferCtxt<'a, 'tcx>,
         TyInfer(..) | TyClosure(..) => {
             // `ty` comes from a user declaration so we should only expect types
             // that the user can type
-            inference_context.tcx.sess.span_bug(
+            span_bug!(
                 span,
-                &format!("coherence encountered unexpected type searching for base type: {}",
-                         ty));
+                "coherence encountered unexpected type searching for base type: {}",
+                ty);
         }
     }
 }
@@ -196,7 +196,7 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
         debug!("add_trait_impl: impl_trait_ref={:?} impl_def_id={:?}",
                impl_trait_ref, impl_def_id);
         let trait_def = self.crate_context.tcx.lookup_trait_def(impl_trait_ref.def_id);
-        trait_def.record_impl(self.crate_context.tcx, impl_def_id, impl_trait_ref);
+        trait_def.record_local_impl(self.crate_context.tcx, impl_def_id, impl_trait_ref);
     }
 
     // Converts an implementation in the AST to a vector of items.
@@ -219,9 +219,7 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
                 }).collect()
             }
             _ => {
-                self.crate_context.tcx.sess.span_bug(item.span,
-                                                     "can't convert a non-impl \
-                                                      to an impl");
+                span_bug!(item.span, "can't convert a non-impl to an impl");
             }
         }
     }
@@ -263,13 +261,12 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
                                           "the Drop trait may only be implemented on structures");
                             }
                             _ => {
-                                tcx.sess.bug("didn't find impl in ast \
-                                              map");
+                                bug!("didn't find impl in ast map");
                             }
                         }
                     } else {
-                        tcx.sess.bug("found external impl of Drop trait on \
-                                      something other than a struct");
+                        bug!("found external impl of Drop trait on \
+                              :omething other than a struct");
                     }
                 }
             }
@@ -384,13 +381,14 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
             debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (free)",
                    source, target);
 
-            let infcx = new_infer_ctxt(tcx, &tcx.tables, Some(param_env));
+            let infcx = new_infer_ctxt(tcx, &tcx.tables, Some(param_env), ProjectionMode::Topmost);
 
+            let origin = TypeOrigin::Misc(span);
             let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>, mt_b: ty::TypeAndMut<'tcx>,
                                mk_ptr: &Fn(Ty<'tcx>) -> Ty<'tcx>| {
                 if (mt_a.mutbl, mt_b.mutbl) == (hir::MutImmutable, hir::MutMutable) {
-                    infcx.report_mismatched_types(span, mk_ptr(mt_b.ty),
-                                                  target, &ty::error::TypeError::Mutability);
+                    infcx.report_mismatched_types(origin, mk_ptr(mt_b.ty),
+                                                  target, ty::error::TypeError::Mutability);
                 }
                 (mt_a.ty, mt_b.ty, unsize_trait, None)
             };
@@ -419,7 +417,6 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
                         return;
                     }
 
-                    let origin = TypeOrigin::Misc(span);
                     let fields = &def_a.struct_variant().fields;
                     let diff_fields = fields.iter().enumerate().filter_map(|(i, f)| {
                         let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
@@ -449,13 +446,7 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
                                    for a coercion between structures with one field \
                                    being coerced, but {} fields need coercions: {}",
                                    diff_fields.len(), diff_fields.iter().map(|&(i, a, b)| {
-                                        let name = fields[i].name;
-                                        format!("{} ({} to {})",
-                                                if name == token::special_names::unnamed_field {
-                                                    i.to_string()
-                                                } else {
-                                                    name.to_string()
-                                                }, a, b)
+                                        format!("{} ({} to {})", fields[i].name, a, b)
                                    }).collect::<Vec<_>>().join(", "));
                         return;
                     }
@@ -499,7 +490,7 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
     }
 }
 
-fn enforce_trait_manually_implementable(tcx: &ty::ctxt, sp: Span, trait_def_id: DefId) {
+fn enforce_trait_manually_implementable(tcx: &TyCtxt, sp: Span, trait_def_id: DefId) {
     if tcx.sess.features.borrow().unboxed_closures {
         // the feature gate allows all of them
         return
@@ -526,9 +517,19 @@ fn enforce_trait_manually_implementable(tcx: &ty::ctxt, sp: Span, trait_def_id:
     err.emit();
 }
 
+// Factored out into helper because the error cannot be defined in multiple locations.
+pub fn report_duplicate_item<'tcx>(tcx: &TyCtxt<'tcx>, sp: Span, name: ast::Name)
+                                   -> DiagnosticBuilder<'tcx>
+{
+    struct_span_err!(tcx.sess, sp, E0201, "duplicate definitions with name `{}`:", name)
+}
+
 pub fn check_coherence(crate_context: &CrateCtxt) {
     let _task = crate_context.tcx.dep_graph.in_task(DepNode::Coherence);
-    let infcx = new_infer_ctxt(crate_context.tcx, &crate_context.tcx.tables, None);
+    let infcx = new_infer_ctxt(crate_context.tcx,
+                               &crate_context.tcx.tables,
+                               None,
+                               ProjectionMode::Topmost);
     CoherenceChecker {
         crate_context: crate_context,
         inference_context: infcx,
index 9e966c283a0a5efddbd967849d290219a8f6043e..f3d63957018d672547d26005d390c01117111ca8 100644 (file)
 //! crate or pertains to a type defined in this crate.
 
 use middle::cstore::LOCAL_CRATE;
-use middle::def_id::DefId;
-use middle::traits;
-use middle::ty;
+use hir::def_id::DefId;
+use rustc::traits;
+use rustc::ty::{self, TyCtxt};
 use syntax::ast;
 use syntax::codemap::Span;
 use rustc::dep_graph::DepNode;
-use rustc_front::intravisit;
-use rustc_front::hir;
+use rustc::hir::intravisit;
+use rustc::hir;
 
-pub fn check(tcx: &ty::ctxt) {
+pub fn check(tcx: &TyCtxt) {
     let mut orphan = OrphanChecker { tcx: tcx };
     tcx.visit_all_items_in_krate(DepNode::CoherenceOrphanCheck, &mut orphan);
 }
 
 struct OrphanChecker<'cx, 'tcx:'cx> {
-    tcx: &'cx ty::ctxt<'tcx>
+    tcx: &'cx TyCtxt<'tcx>
 }
 
 impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> {
index 470e954781f8bbb6377fe9c84e63cb23af1ebe0e..a05167dbe433333c819515b62aaf9dbd5fb852bb 100644 (file)
@@ -9,23 +9,23 @@
 // except according to those terms.
 
 //! Overlap: No two impls for the same trait are implemented for the
-//! same type.
-
-use middle::cstore::{CrateStore, LOCAL_CRATE};
-use middle::def_id::DefId;
-use middle::traits;
-use middle::ty;
-use middle::infer;
+//! same type. Likewise, no two inherent impls for a given type
+//! constructor provide a method with the same name.
+
+use middle::cstore::CrateStore;
+use hir::def_id::DefId;
+use rustc::traits::{self, ProjectionMode};
+use rustc::infer;
+use rustc::ty::{self, TyCtxt};
 use syntax::ast;
-use syntax::codemap::Span;
 use rustc::dep_graph::DepNode;
-use rustc_front::hir;
-use rustc_front::intravisit;
-use util::nodemap::{DefIdMap, DefIdSet};
+use rustc::hir;
+use rustc::hir::intravisit;
+use util::nodemap::DefIdMap;
+use lint;
 
-pub fn check(tcx: &ty::ctxt) {
+pub fn check(tcx: &TyCtxt) {
     let mut overlap = OverlapChecker { tcx: tcx,
-                                       traits_checked: DefIdSet(),
                                        default_impls: DefIdMap() };
 
     // this secondary walk specifically checks for some other cases,
@@ -34,150 +34,75 @@ pub fn check(tcx: &ty::ctxt) {
 }
 
 struct OverlapChecker<'cx, 'tcx:'cx> {
-    tcx: &'cx ty::ctxt<'tcx>,
-
-    // The set of traits where we have checked for overlap.  This is
-    // used to avoid checking the same trait twice.
-    //
-    // NB. It's ok to skip tracking this set because we fully
-    // encapsulate it, and we always create a task
-    // (`CoherenceOverlapCheck`) corresponding to each entry.
-    traits_checked: DefIdSet,
+    tcx: &'cx TyCtxt<'tcx>,
 
     // maps from a trait def-id to an impl id
     default_impls: DefIdMap<ast::NodeId>,
 }
 
 impl<'cx, 'tcx> OverlapChecker<'cx, 'tcx> {
-    fn check_for_overlapping_impls_of_trait(&mut self, trait_def_id: DefId) {
-        debug!("check_for_overlapping_impls_of_trait(trait_def_id={:?})",
-               trait_def_id);
+    fn check_for_common_items_in_impls(&self, impl1: DefId, impl2: DefId) {
+        #[derive(Copy, Clone, PartialEq)]
+        enum Namespace { Type, Value }
 
-        let _task = self.tcx.dep_graph.in_task(DepNode::CoherenceOverlapCheck(trait_def_id));
-        if !self.traits_checked.insert(trait_def_id) {
-            return;
+        fn name_and_namespace(tcx: &TyCtxt, item: &ty::ImplOrTraitItemId)
+                              -> (ast::Name, Namespace)
+        {
+            let name = tcx.impl_or_trait_item(item.def_id()).name();
+            (name, match *item {
+                ty::TypeTraitItemId(..) => Namespace::Type,
+                ty::ConstTraitItemId(..) => Namespace::Value,
+                ty::MethodTraitItemId(..) => Namespace::Value,
+            })
         }
 
-        let trait_def = self.tcx.lookup_trait_def(trait_def_id);
-        self.tcx.populate_implementations_for_trait_if_necessary(
-            trait_def.trait_ref.def_id);
-
-        // We should already know all impls of this trait, so these
-        // borrows are safe.
-        let (blanket_impls, nonblanket_impls) = trait_def.borrow_impl_lists(self.tcx);
-
-        // Conflicts can only occur between a blanket impl and another impl,
-        // or between 2 non-blanket impls of the same kind.
+        let impl_items = self.tcx.impl_items.borrow();
 
-        for (i, &impl1_def_id) in blanket_impls.iter().enumerate() {
-            for &impl2_def_id in &blanket_impls[(i+1)..] {
-                self.check_if_impls_overlap(impl1_def_id,
-                                            impl2_def_id);
-            }
-
-            for v in nonblanket_impls.values() {
-                for &impl2_def_id in v {
-                    self.check_if_impls_overlap(impl1_def_id,
-                                                impl2_def_id);
-                }
-            }
-        }
+        for item1 in &impl_items[&impl1] {
+            let (name, namespace) = name_and_namespace(&self.tcx, item1);
 
-        for impl_group in nonblanket_impls.values() {
-            for (i, &impl1_def_id) in impl_group.iter().enumerate() {
-                for &impl2_def_id in &impl_group[(i+1)..] {
-                    self.check_if_impls_overlap(impl1_def_id,
-                                                impl2_def_id);
+            for item2 in &impl_items[&impl2] {
+                if (name, namespace) == name_and_namespace(&self.tcx, item2) {
+                    let msg = format!("duplicate definitions with name `{}`", name);
+                    let node_id = self.tcx.map.as_local_node_id(item1.def_id()).unwrap();
+                    self.tcx.sess.add_lint(lint::builtin::OVERLAPPING_INHERENT_IMPLS,
+                                           node_id,
+                                           self.tcx.span_of_impl(item1.def_id()).unwrap(),
+                                           msg);
                 }
             }
         }
     }
 
-    // We need to coherently pick which impl will be displayed
-    // as causing the error message, and it must be the in the current
-    // crate. Just pick the smaller impl in the file.
-    fn order_impls(&self, impl1_def_id: DefId, impl2_def_id: DefId)
-            -> Option<(DefId, DefId)> {
-        if impl1_def_id.krate != LOCAL_CRATE {
-            if impl2_def_id.krate != LOCAL_CRATE {
-                // we don't need to check impls if both are external;
-                // that's the other crate's job.
-                None
-            } else {
-                Some((impl2_def_id, impl1_def_id))
-            }
-        } else if impl2_def_id.krate != LOCAL_CRATE {
-            Some((impl1_def_id, impl2_def_id))
-        } else if impl1_def_id < impl2_def_id {
-            Some((impl1_def_id, impl2_def_id))
-        } else {
-            Some((impl2_def_id, impl1_def_id))
-        }
-    }
-
-
-    fn check_if_impls_overlap(&self,
-                              impl1_def_id: DefId,
-                              impl2_def_id: DefId)
-    {
-        if let Some((impl1_def_id, impl2_def_id)) = self.order_impls(
-            impl1_def_id, impl2_def_id)
-        {
-            debug!("check_if_impls_overlap({:?}, {:?})",
-                   impl1_def_id,
-                   impl2_def_id);
-
-            let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, None);
-            if let Some(trait_ref) = traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id) {
-                self.report_overlap_error(impl1_def_id, impl2_def_id, trait_ref);
-            }
-        }
-    }
+    fn check_for_overlapping_inherent_impls(&self, ty_def_id: DefId) {
+        let _task = self.tcx.dep_graph.in_task(DepNode::CoherenceOverlapInherentCheck(ty_def_id));
 
-    fn report_overlap_error(&self,
-                            impl1: DefId,
-                            impl2: DefId,
-                            trait_ref: ty::TraitRef)
-    {
-        // only print the Self type if it's concrete; otherwise, it's not adding much information.
-        let self_type = {
-            trait_ref.substs.self_ty().and_then(|ty| {
-                if let ty::TyInfer(_) = ty.sty {
-                    None
-                } else {
-                    Some(format!(" for type `{}`", ty))
-                }
-            }).unwrap_or(String::new())
+        let inherent_impls = self.tcx.inherent_impls.borrow();
+        let impls = match inherent_impls.get(&ty_def_id) {
+            Some(impls) => impls,
+            None => return
         };
 
-        let mut err = struct_span_err!(self.tcx.sess, self.span_of_impl(impl1), E0119,
-                                       "conflicting implementations of trait `{}`{}:",
-                                       trait_ref,
-                                       self_type);
-
-        if impl2.is_local() {
-            span_note!(&mut err, self.span_of_impl(impl2),
-                       "conflicting implementation is here:");
-        } else {
-            let cname = self.tcx.sess.cstore.crate_name(impl2.krate);
-            err.note(&format!("conflicting implementation in crate `{}`", cname));
+        for (i, &impl1_def_id) in impls.iter().enumerate() {
+            for &impl2_def_id in &impls[(i+1)..] {
+                let infcx = infer::new_infer_ctxt(self.tcx,
+                                                  &self.tcx.tables,
+                                                  None,
+                                                  ProjectionMode::Topmost);
+                if traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id).is_some() {
+                    self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id)
+                }
+            }
         }
-        err.emit();
-    }
-
-    fn span_of_impl(&self, impl_did: DefId) -> Span {
-        let node_id = self.tcx.map.as_local_node_id(impl_did).unwrap();
-        self.tcx.map.span(node_id)
     }
 }
 
-
 impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OverlapChecker<'cx, 'tcx> {
     fn visit_item(&mut self, item: &'v hir::Item) {
         match item.node {
-            hir::ItemTrait(..) => {
-                let trait_def_id = self.tcx.map.local_def_id(item.id);
-                self.check_for_overlapping_impls_of_trait(trait_def_id);
+            hir::ItemEnum(..) | hir::ItemStruct(..) => {
+                let type_def_id = self.tcx.map.local_def_id(item.id);
+                self.check_for_overlapping_inherent_impls(type_def_id);
             }
 
             hir::ItemDefaultImpl(..) => {
@@ -187,50 +112,90 @@ impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OverlapChecker<'cx, 'tcx> {
                 let impl_def_id = self.tcx.map.local_def_id(item.id);
                 let trait_ref = self.tcx.impl_trait_ref(impl_def_id).unwrap();
 
-                self.check_for_overlapping_impls_of_trait(trait_ref.def_id);
-
                 let prev_default_impl = self.default_impls.insert(trait_ref.def_id, item.id);
-                match prev_default_impl {
-                    Some(prev_id) => {
-                        self.report_overlap_error(impl_def_id,
-                                                  self.tcx.map.local_def_id(prev_id),
-                                                  trait_ref);
-                    }
-                    None => { }
+                if let Some(prev_id) = prev_default_impl {
+                    let mut err = struct_span_err!(
+                        self.tcx.sess,
+                        self.tcx.span_of_impl(impl_def_id).unwrap(), E0521,
+                        "redundant default implementations of trait `{}`:",
+                        trait_ref);
+                    err.span_note(self.tcx.span_of_impl(self.tcx.map.local_def_id(prev_id))
+                                      .unwrap(),
+                                  "redundant implementation is here:");
+                    err.emit();
                 }
             }
             hir::ItemImpl(_, _, _, Some(_), _, _) => {
                 let impl_def_id = self.tcx.map.local_def_id(item.id);
                 let trait_ref = self.tcx.impl_trait_ref(impl_def_id).unwrap();
                 let trait_def_id = trait_ref.def_id;
-                self.check_for_overlapping_impls_of_trait(trait_def_id);
-                match trait_ref.self_ty().sty {
-                    ty::TyTrait(ref data) => {
-                        // This is something like impl Trait1 for Trait2. Illegal
-                        // if Trait1 is a supertrait of Trait2 or Trait2 is not object safe.
-
-                        if !traits::is_object_safe(self.tcx, data.principal_def_id()) {
-                            // This is an error, but it will be
-                            // reported by wfcheck.  Ignore it
-                            // here. This is tested by
-                            // `coherence-impl-trait-for-trait-object-safe.rs`.
-                        } else {
-                            let mut supertrait_def_ids =
-                                traits::supertrait_def_ids(self.tcx, data.principal_def_id());
-                            if supertrait_def_ids.any(|d| d == trait_def_id) {
-                                span_err!(self.tcx.sess, item.span, E0371,
-                                          "the object type `{}` automatically \
-                                           implements the trait `{}`",
-                                          trait_ref.self_ty(),
-                                          self.tcx.item_path_str(trait_def_id));
+
+                let _task = self.tcx.dep_graph.in_task(
+                    DepNode::CoherenceOverlapCheck(trait_def_id));
+
+                let def = self.tcx.lookup_trait_def(trait_def_id);
+
+                // attempt to insert into the specialization graph
+                let insert_result = def.add_impl_for_specialization(self.tcx, impl_def_id);
+
+                // insertion failed due to overlap
+                if let Err(overlap) = insert_result {
+                    // only print the Self type if it has at least some outer
+                    // concrete shell; otherwise, it's not adding much
+                    // information.
+                    let self_type = {
+                        overlap.on_trait_ref.substs.self_ty().and_then(|ty| {
+                            if ty.has_concrete_skeleton() {
+                                Some(format!(" for type `{}`", ty))
+                            } else {
+                                None
                             }
+                        }).unwrap_or(String::new())
+                    };
+
+                    let mut err = struct_span_err!(
+                        self.tcx.sess, self.tcx.span_of_impl(impl_def_id).unwrap(), E0119,
+                        "conflicting implementations of trait `{}`{}:",
+                        overlap.on_trait_ref,
+                        self_type);
+
+                    match self.tcx.span_of_impl(overlap.with_impl) {
+                        Ok(span) => {
+                            err.span_note(span, "conflicting implementation is here:");
+                        }
+                        Err(cname) => {
+                            err.note(&format!("conflicting implementation in crate `{}`",
+                                              cname));
+                        }
+                    }
+
+                    err.emit();
+                }
+
+                // check for overlap with the automatic `impl Trait for Trait`
+                if let ty::TyTrait(ref data) = trait_ref.self_ty().sty {
+                    // This is something like impl Trait1 for Trait2. Illegal
+                    // if Trait1 is a supertrait of Trait2 or Trait2 is not object safe.
+
+                    if !traits::is_object_safe(self.tcx, data.principal_def_id()) {
+                        // This is an error, but it will be
+                        // reported by wfcheck.  Ignore it
+                        // here. This is tested by
+                        // `coherence-impl-trait-for-trait-object-safe.rs`.
+                    } else {
+                        let mut supertrait_def_ids =
+                            traits::supertrait_def_ids(self.tcx, data.principal_def_id());
+                        if supertrait_def_ids.any(|d| d == trait_def_id) {
+                            span_err!(self.tcx.sess, item.span, E0371,
+                                      "the object type `{}` automatically \
+                                       implements the trait `{}`",
+                                      trait_ref.self_ty(),
+                                      self.tcx.item_path_str(trait_def_id));
                         }
                     }
-                    _ => { }
                 }
             }
-            _ => {
-            }
+            _ => {}
         }
     }
 }
index 936d26f9208502195de37c2de0cf30d908605bb4..b042e23e0ac0494104958c0b376b6c327a604845 100644 (file)
 //! Unsafety checker: every impl either implements a trait defined in this
 //! crate or pertains to a type defined in this crate.
 
-use middle::ty;
-use rustc_front::intravisit;
-use rustc_front::hir;
+use rustc::ty::TyCtxt;
+use rustc::hir::intravisit;
+use rustc::hir;
 
-pub fn check(tcx: &ty::ctxt) {
+pub fn check(tcx: &TyCtxt) {
     let mut orphan = UnsafetyChecker { tcx: tcx };
     tcx.map.krate().visit_all_items(&mut orphan);
 }
 
 struct UnsafetyChecker<'cx, 'tcx:'cx> {
-    tcx: &'cx ty::ctxt<'tcx>
+    tcx: &'cx TyCtxt<'tcx>
 }
 
 impl<'cx, 'tcx, 'v> UnsafetyChecker<'cx, 'tcx> {
index 026881927cb21e5678f230aa4d283b9d771a7fdd..2e1a684684375714582094cfcf31023371eb8e6b 100644 (file)
@@ -60,26 +60,30 @@ There are some shortcomings in this design:
 
 use astconv::{self, AstConv, ty_of_arg, ast_ty_to_ty, ast_region_to_region};
 use lint;
-use middle::def::Def;
-use middle::def_id::DefId;
+use hir::def::Def;
+use hir::def_id::DefId;
 use constrained_type_params as ctp;
+use coherence;
 use middle::lang_items::SizedTraitLangItem;
 use middle::resolve_lifetime;
-use middle::const_eval::{self, ConstVal};
-use middle::const_eval::EvalHint::UncheckedExprHint;
-use middle::subst::{Substs, FnSpace, ParamSpace, SelfSpace, TypeSpace, VecPerParamSpace};
-use middle::ty::{ToPredicate, ImplContainer, ImplOrTraitItemContainer, TraitContainer};
-use middle::ty::{self, ToPolyTraitRef, Ty, TypeScheme};
-use middle::ty::{VariantKind};
-use middle::ty::fold::{TypeFolder};
-use middle::ty::util::IntTypeExt;
+use middle::const_val::ConstVal;
+use rustc_const_eval::EvalHint::UncheckedExprHint;
+use rustc_const_eval::eval_const_expr_partial;
+use rustc::ty::subst::{Substs, FnSpace, ParamSpace, SelfSpace, TypeSpace, VecPerParamSpace};
+use rustc::ty::{ToPredicate, ImplContainer, ImplOrTraitItemContainer, TraitContainer};
+use rustc::ty::{self, ToPolyTraitRef, Ty, TyCtxt, TypeScheme};
+use rustc::ty::{VariantKind};
+use rustc::ty::fold::{TypeFolder};
+use rustc::ty::util::IntTypeExt;
 use rscope::*;
 use rustc::dep_graph::DepNode;
-use rustc::front::map as hir_map;
+use rustc::hir::map as hir_map;
 use util::common::{ErrorReported, MemoizationMap};
 use util::nodemap::{FnvHashMap, FnvHashSet};
 use write_ty_to_tcx;
 
+use rustc_const_math::ConstInt;
+
 use std::cell::RefCell;
 use std::collections::HashSet;
 use std::rc::Rc;
@@ -90,14 +94,14 @@ use syntax::attr;
 use syntax::codemap::Span;
 use syntax::parse::token::special_idents;
 use syntax::ptr::P;
-use rustc_front::hir::{self, PatKind};
-use rustc_front::intravisit;
-use rustc_front::print::pprust;
+use rustc::hir::{self, PatKind};
+use rustc::hir::intravisit;
+use rustc::hir::print as pprust;
 
 ///////////////////////////////////////////////////////////////////////////
 // Main entry point
 
-pub fn collect_item_types(tcx: &ty::ctxt) {
+pub fn collect_item_types(tcx: &TyCtxt) {
     let ccx = &CrateCtxt { tcx: tcx, stack: RefCell::new(Vec::new()) };
     let mut visitor = CollectItemTypesVisitor{ ccx: ccx };
     ccx.tcx.visit_all_items_in_krate(DepNode::CollectItem, &mut visitor);
@@ -106,7 +110,7 @@ pub fn collect_item_types(tcx: &ty::ctxt) {
 ///////////////////////////////////////////////////////////////////////////
 
 struct CrateCtxt<'a,'tcx:'a> {
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
 
     // This stack is used to identify cycles in the user's source.
     // Note that these cycles can cross multiple items.
@@ -270,7 +274,7 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> {
         if let Some(trait_id) = tcx.map.as_local_node_id(trait_id) {
             let item = match tcx.map.get(trait_id) {
                 hir_map::NodeItem(item) => item,
-                _ => tcx.sess.bug(&format!("get_trait_def({:?}): not an item", trait_id))
+                _ => bug!("get_trait_def({:?}): not an item", trait_id)
             };
 
             trait_def_of_item(self, &item)
@@ -289,7 +293,7 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> {
             let def_ids = ensure_super_predicates_step(self, trait_def_id);
 
             for def_id in def_ids {
-                try!(self.ensure_super_predicates(span, def_id));
+                self.ensure_super_predicates(span, def_id)?;
             }
 
             Ok(())
@@ -304,7 +308,7 @@ impl<'a,'tcx> ItemCtxt<'a,'tcx> {
 }
 
 impl<'a, 'tcx> AstConv<'tcx> for ItemCtxt<'a, 'tcx> {
-    fn tcx(&self) -> &ty::ctxt<'tcx> { self.ccx.tcx }
+    fn tcx(&self) -> &TyCtxt<'tcx> { self.ccx.tcx }
 
     fn get_item_type_scheme(&self, span: Span, id: DefId)
                             -> Result<ty::TypeScheme<'tcx>, ErrorReported>
@@ -500,7 +504,7 @@ impl<'tcx> GetTypeParameterBounds<'tcx> for hir::Generics {
 /// parameter with id `param_id`. We use this so as to avoid running
 /// `ast_ty_to_ty`, because we want to avoid triggering an all-out
 /// conversion of the type to avoid inducing unnecessary cycles.
-fn is_param<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn is_param<'tcx>(tcx: &TyCtxt<'tcx>,
                   ast_ty: &hir::Ty,
                   param_id: ast::NodeId)
                   -> bool
@@ -528,8 +532,9 @@ fn convert_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                             container: ImplOrTraitItemContainer,
                             name: ast::Name,
                             id: ast::NodeId,
-                            vis: hir::Visibility,
+                            vis: &hir::Visibility,
                             sig: &hir::MethodSig,
+                            defaultness: hir::Defaultness,
                             untransformed_rcvr_ty: Ty<'tcx>,
                             rcvr_ty_generics: &ty::Generics<'tcx>,
                             rcvr_ty_predicates: &ty::GenericPredicates<'tcx>) {
@@ -543,17 +548,19 @@ fn convert_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                               sig, untransformed_rcvr_ty);
 
     let def_id = ccx.tcx.map.local_def_id(id);
+    let substs = ccx.tcx.mk_substs(mk_item_substs(ccx, &ty_generics));
+
     let ty_method = ty::Method::new(name,
                                     ty_generics,
                                     ty_generic_predicates,
                                     fty,
                                     explicit_self_category,
-                                    vis,
+                                    ty::Visibility::from_hir(vis, id, ccx.tcx),
+                                    defaultness,
                                     def_id,
                                     container);
 
-    let fty = ccx.tcx.mk_fn(Some(def_id),
-                            ccx.tcx.mk_bare_fn(ty_method.fty.clone()));
+    let fty = ccx.tcx.mk_fn_def(def_id, substs, ty_method.fty.clone());
     debug!("method {} (id {}) has type {:?}",
             name, id, fty);
     ccx.tcx.register_item_type(def_id, TypeScheme {
@@ -574,20 +581,20 @@ fn convert_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
 fn convert_field<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                            struct_generics: &ty::Generics<'tcx>,
                            struct_predicates: &ty::GenericPredicates<'tcx>,
-                           v: &hir::StructField,
+                           field: &hir::StructField,
                            ty_f: ty::FieldDefMaster<'tcx>)
 {
-    let tt = ccx.icx(struct_predicates).to_ty(&ExplicitRscope, &v.node.ty);
+    let tt = ccx.icx(struct_predicates).to_ty(&ExplicitRscope, &field.ty);
     ty_f.fulfill_ty(tt);
-    write_ty_to_tcx(ccx.tcx, v.node.id, tt);
+    write_ty_to_tcx(ccx.tcx, field.id, tt);
 
     /* add the field to the tcache */
-    ccx.tcx.register_item_type(ccx.tcx.map.local_def_id(v.node.id),
+    ccx.tcx.register_item_type(ccx.tcx.map.local_def_id(field.id),
                                ty::TypeScheme {
                                    generics: struct_generics.clone(),
                                    ty: tt
                                });
-    ccx.tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(v.node.id),
+    ccx.tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(field.id),
                                            struct_predicates.clone());
 }
 
@@ -595,7 +602,8 @@ fn convert_associated_const<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                                       container: ImplOrTraitItemContainer,
                                       name: ast::Name,
                                       id: ast::NodeId,
-                                      vis: hir::Visibility,
+                                      vis: &hir::Visibility,
+                                      defaultness: hir::Defaultness,
                                       ty: ty::Ty<'tcx>,
                                       has_value: bool)
 {
@@ -606,7 +614,8 @@ fn convert_associated_const<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
 
     let associated_const = Rc::new(ty::AssociatedConst {
         name: name,
-        vis: vis,
+        vis: ty::Visibility::from_hir(vis, id, ccx.tcx),
+        defaultness: defaultness,
         def_id: ccx.tcx.map.local_def_id(id),
         container: container,
         ty: ty,
@@ -620,12 +629,14 @@ fn convert_associated_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                                      container: ImplOrTraitItemContainer,
                                      name: ast::Name,
                                      id: ast::NodeId,
-                                     vis: hir::Visibility,
+                                     vis: &hir::Visibility,
+                                     defaultness: hir::Defaultness,
                                      ty: Option<Ty<'tcx>>)
 {
     let associated_type = Rc::new(ty::AssociatedType {
         name: name,
-        vis: vis,
+        vis: ty::Visibility::from_hir(vis, id, ccx.tcx),
+        defaultness: defaultness,
         ty: ty,
         def_id: ccx.tcx.map.local_def_id(id),
         container: container
@@ -715,33 +726,18 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) {
             tcx.register_item_type(def_id,
                                    TypeScheme { generics: ty_generics.clone(),
                                                 ty: selfty });
-            if let &Some(ref ast_trait_ref) = opt_trait_ref {
-                tcx.impl_trait_refs.borrow_mut().insert(
-                    def_id,
-                    Some(astconv::instantiate_mono_trait_ref(&ccx.icx(&ty_predicates),
-                                                             &ExplicitRscope,
-                                                             ast_trait_ref,
-                                                             Some(selfty)))
-                        );
-            } else {
-                tcx.impl_trait_refs.borrow_mut().insert(def_id, None);
-            }
+            let trait_ref = opt_trait_ref.as_ref().map(|ast_trait_ref| {
+                astconv::instantiate_mono_trait_ref(&ccx.icx(&ty_predicates),
+                                                    &ExplicitRscope,
+                                                    ast_trait_ref,
+                                                    Some(selfty))
+            });
+            tcx.impl_trait_refs.borrow_mut().insert(def_id, trait_ref);
 
             enforce_impl_params_are_constrained(tcx, generics, &mut ty_predicates, def_id);
             tcx.predicates.borrow_mut().insert(def_id, ty_predicates.clone());
 
 
-            // If there is a trait reference, treat the methods as always public.
-            // This is to work around some incorrect behavior in privacy checking:
-            // when the method belongs to a trait, it should acquire the privacy
-            // from the trait, not the impl. Forcing the visibility to be public
-            // makes things sorta work.
-            let parent_visibility = if opt_trait_ref.is_some() {
-                hir::Public
-            } else {
-                it.vis
-            };
-
             // Convert all the associated consts.
             // Also, check if there are any duplicate associated items
             let mut seen_type_items = FnvHashSet();
@@ -753,17 +749,7 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) {
                     _                    => &mut seen_value_items,
                 };
                 if !seen_items.insert(impl_item.name) {
-                    let desc = match impl_item.node {
-                        hir::ImplItemKind::Const(_, _) => "associated constant",
-                        hir::ImplItemKind::Type(_) => "associated type",
-                        hir::ImplItemKind::Method(ref sig, _) =>
-                            match sig.explicit_self.node {
-                                hir::SelfStatic => "associated function",
-                                _ => "method",
-                            },
-                    };
-
-                    span_err!(tcx.sess, impl_item.span, E0201, "duplicate {}", desc);
+                    coherence::report_duplicate_item(tcx, impl_item.span, impl_item.name).emit();
                 }
 
                 if let hir::ImplItemKind::Const(ref ty, _) = impl_item.node {
@@ -774,9 +760,13 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) {
                                                generics: ty_generics.clone(),
                                                ty: ty,
                                            });
+                    // Trait-associated constants are always public.
+                    let public = &hir::Public;
+                    let visibility = if opt_trait_ref.is_some() { public } else { &impl_item.vis };
                     convert_associated_const(ccx, ImplContainer(def_id),
                                              impl_item.name, impl_item.id,
-                                             impl_item.vis.inherit_from(parent_visibility),
+                                             visibility,
+                                             impl_item.defaultness,
                                              ty, true /* has_value */);
                 }
             }
@@ -792,22 +782,21 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) {
                     let typ = ccx.icx(&ty_predicates).to_ty(&ExplicitRscope, ty);
 
                     convert_associated_type(ccx, ImplContainer(def_id),
-                                            impl_item.name, impl_item.id, impl_item.vis,
-                                            Some(typ));
+                                            impl_item.name, impl_item.id, &impl_item.vis,
+                                            impl_item.defaultness, Some(typ));
                 }
             }
 
             for impl_item in impl_items {
                 if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node {
-                    // if the method specifies a visibility, use that, otherwise
-                    // inherit the visibility from the impl (so `foo` in `pub impl
-                    // { fn foo(); }` is public, but private in `impl { fn
-                    // foo(); }`).
-                    let method_vis = impl_item.vis.inherit_from(parent_visibility);
+                    // Trait methods are always public.
+                    let public = &hir::Public;
+                    let method_vis = if opt_trait_ref.is_some() { public } else { &impl_item.vis };
 
                     convert_method(ccx, ImplContainer(def_id),
                                    impl_item.name, impl_item.id, method_vis,
-                                   sig, selfty, &ty_generics, &ty_predicates);
+                                   sig, impl_item.defaultness, selfty, &ty_generics,
+                                   &ty_predicates);
                 }
             }
 
@@ -840,7 +829,8 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) {
                                              container,
                                              trait_item.name,
                                              trait_item.id,
-                                             hir::Public,
+                                             &hir::Public,
+                                             hir::Defaultness::Default,
                                              ty,
                                              default.is_some())
                 }
@@ -857,7 +847,8 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) {
                                             container,
                                             trait_item.name,
                                             trait_item.id,
-                                            hir::Public,
+                                            &hir::Public,
+                                            hir::Defaultness::Default,
                                             typ);
                 }
             }
@@ -869,8 +860,9 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) {
                                    container,
                                    trait_item.name,
                                    trait_item.id,
-                                   hir::Inherited,
+                                   &hir::Inherited,
                                    sig,
+                                   hir::Defaultness::Default,
                                    tcx.mk_self_type(),
                                    &trait_def.generics,
                                    &trait_predicates);
@@ -902,7 +894,7 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) {
             }
 
             if !struct_def.is_struct() {
-                convert_variant_ctor(tcx, struct_def.id(), variant, scheme, predicates);
+                convert_variant_ctor(ccx, struct_def.id(), variant, scheme, predicates);
             }
         },
         hir::ItemTy(_, ref generics) => {
@@ -920,11 +912,12 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) {
     }
 }
 
-fn convert_variant_ctor<'a, 'tcx>(tcx: &ty::ctxt<'tcx>,
+fn convert_variant_ctor<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                                   ctor_id: ast::NodeId,
                                   variant: ty::VariantDef<'tcx>,
                                   scheme: ty::TypeScheme<'tcx>,
                                   predicates: ty::GenericPredicates<'tcx>) {
+    let tcx = ccx.tcx;
     let ctor_ty = match variant.kind() {
         VariantKind::Unit | VariantKind::Struct => scheme.ty,
         VariantKind::Tuple => {
@@ -933,9 +926,17 @@ fn convert_variant_ctor<'a, 'tcx>(tcx: &ty::ctxt<'tcx>,
                 .iter()
                 .map(|field| field.unsubst_ty())
                 .collect();
-            tcx.mk_ctor_fn(tcx.map.local_def_id(ctor_id),
-                           &inputs[..],
-                           scheme.ty)
+            let def_id = tcx.map.local_def_id(ctor_id);
+            let substs = tcx.mk_substs(mk_item_substs(ccx, &scheme.generics));
+            tcx.mk_fn_def(def_id, substs, ty::BareFnTy {
+                unsafety: hir::Unsafety::Normal,
+                abi: abi::Abi::Rust,
+                sig: ty::Binder(ty::FnSig {
+                    inputs: inputs,
+                    output: ty::FnConverging(scheme.ty),
+                    variadic: false
+                })
+            })
         }
     };
     write_ty_to_tcx(tcx, ctor_id, ctor_ty);
@@ -961,7 +962,7 @@ fn convert_enum_variant_types<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
         // Convert the ctor, if any. This also registers the variant as
         // an item.
         convert_variant_ctor(
-            ccx.tcx,
+            ccx,
             variant.node.data.id(),
             ty_variant,
             scheme.clone(),
@@ -970,33 +971,27 @@ fn convert_enum_variant_types<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
     }
 }
 
-fn convert_struct_variant<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn convert_struct_variant<'tcx>(tcx: &TyCtxt<'tcx>,
                                 did: DefId,
                                 name: ast::Name,
                                 disr_val: ty::Disr,
                                 def: &hir::VariantData) -> ty::VariantDefData<'tcx, 'tcx> {
     let mut seen_fields: FnvHashMap<ast::Name, Span> = FnvHashMap();
+    let node_id = tcx.map.as_local_node_id(did).unwrap();
     let fields = def.fields().iter().map(|f| {
-        let fid = tcx.map.local_def_id(f.node.id);
-        match f.node.kind {
-            hir::NamedField(name, vis) => {
-                let dup_span = seen_fields.get(&name).cloned();
-                if let Some(prev_span) = dup_span {
-                    let mut err = struct_span_err!(tcx.sess, f.span, E0124,
-                                                   "field `{}` is already declared",
-                                                   name);
-                    span_note!(&mut err, prev_span, "previously declared here");
-                    err.emit();
-                } else {
-                    seen_fields.insert(name, f.span);
-                }
-
-                ty::FieldDefData::new(fid, name, vis)
-            },
-            hir::UnnamedField(vis) => {
-                ty::FieldDefData::new(fid, special_idents::unnamed_field.name, vis)
-            }
+        let fid = tcx.map.local_def_id(f.id);
+        let dup_span = seen_fields.get(&f.name).cloned();
+        if let Some(prev_span) = dup_span {
+            let mut err = struct_span_err!(tcx.sess, f.span, E0124,
+                                           "field `{}` is already declared",
+                                           f.name);
+            span_note!(&mut err, prev_span, "previously declared here");
+            err.emit();
+        } else {
+            seen_fields.insert(f.name, f.span);
         }
+
+        ty::FieldDefData::new(fid, f.name, ty::Visibility::from_hir(&f.vis, node_id, tcx))
     }).collect();
     ty::VariantDefData {
         did: did,
@@ -1007,7 +1002,7 @@ fn convert_struct_variant<'tcx>(tcx: &ty::ctxt<'tcx>,
     }
 }
 
-fn convert_struct_def<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn convert_struct_def<'tcx>(tcx: &TyCtxt<'tcx>,
                             it: &hir::Item,
                             def: &hir::VariantData)
                             -> ty::AdtDefMaster<'tcx>
@@ -1022,33 +1017,48 @@ fn convert_struct_def<'tcx>(tcx: &ty::ctxt<'tcx>,
     tcx.intern_adt_def(
         did,
         ty::AdtKind::Struct,
-        vec![convert_struct_variant(tcx, ctor_id, it.name, 0, def)]
+        vec![convert_struct_variant(tcx, ctor_id, it.name, ConstInt::Infer(0), def)]
     )
 }
 
-fn convert_enum_def<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn convert_enum_def<'tcx>(tcx: &TyCtxt<'tcx>,
                           it: &hir::Item,
                           def: &hir::EnumDef)
                           -> ty::AdtDefMaster<'tcx>
 {
-    fn evaluate_disr_expr<'tcx>(tcx: &ty::ctxt<'tcx>,
-                                repr_ty: Ty<'tcx>,
+    fn print_err(tcx: &TyCtxt, span: Span, ty: ty::Ty, cv: ConstVal) {
+        span_err!(tcx.sess, span, E0079, "mismatched types: expected `{}` got `{}`",
+                  ty, cv.description());
+    }
+    fn evaluate_disr_expr<'tcx>(tcx: &TyCtxt<'tcx>,
+                                repr_ty: attr::IntType,
                                 e: &hir::Expr) -> Option<ty::Disr> {
         debug!("disr expr, checking {}", pprust::expr_to_string(e));
 
-        let hint = UncheckedExprHint(repr_ty);
-        match const_eval::eval_const_expr_partial(tcx, e, hint, None) {
-            Ok(ConstVal::Int(val)) => Some(val as ty::Disr),
-            Ok(ConstVal::Uint(val)) => Some(val as ty::Disr),
-            Ok(_) => {
-                let sign_desc = if repr_ty.is_signed() {
-                    "signed"
-                } else {
-                    "unsigned"
-                };
-                span_err!(tcx.sess, e.span, E0079,
-                          "expected {} integer constant",
-                          sign_desc);
+        let ty_hint = repr_ty.to_ty(tcx);
+        let hint = UncheckedExprHint(ty_hint);
+        match eval_const_expr_partial(tcx, e, hint, None) {
+            Ok(ConstVal::Integral(i)) => {
+                // FIXME: eval_const_expr_partial should return an error if the hint is wrong
+                match (repr_ty, i) {
+                    (attr::SignedInt(ast::IntTy::I8), ConstInt::I8(_)) => Some(i),
+                    (attr::SignedInt(ast::IntTy::I16), ConstInt::I16(_)) => Some(i),
+                    (attr::SignedInt(ast::IntTy::I32), ConstInt::I32(_)) => Some(i),
+                    (attr::SignedInt(ast::IntTy::I64), ConstInt::I64(_)) => Some(i),
+                    (attr::SignedInt(ast::IntTy::Is), ConstInt::Isize(_)) => Some(i),
+                    (attr::UnsignedInt(ast::UintTy::U8), ConstInt::U8(_)) => Some(i),
+                    (attr::UnsignedInt(ast::UintTy::U16), ConstInt::U16(_)) => Some(i),
+                    (attr::UnsignedInt(ast::UintTy::U32), ConstInt::U32(_)) => Some(i),
+                    (attr::UnsignedInt(ast::UintTy::U64), ConstInt::U64(_)) => Some(i),
+                    (attr::UnsignedInt(ast::UintTy::Us), ConstInt::Usize(_)) => Some(i),
+                    (_, i) => {
+                        print_err(tcx, e.span, ty_hint, ConstVal::Integral(i));
+                        None
+                    },
+                }
+            },
+            Ok(cv) => {
+                print_err(tcx, e.span, ty_hint, cv);
                 None
             },
             Err(err) => {
@@ -1064,37 +1074,31 @@ fn convert_enum_def<'tcx>(tcx: &ty::ctxt<'tcx>,
         }
     }
 
-    fn report_discrim_overflow(tcx: &ty::ctxt,
+    fn report_discrim_overflow(tcx: &TyCtxt,
                                variant_span: Span,
                                variant_name: &str,
-                               repr_type: attr::IntType,
                                prev_val: ty::Disr) {
-        let computed_value = repr_type.disr_wrap_incr(Some(prev_val));
-        let computed_value = repr_type.disr_string(computed_value);
-        let prev_val = repr_type.disr_string(prev_val);
-        let repr_type = repr_type.to_ty(tcx);
         span_err!(tcx.sess, variant_span, E0370,
-                  "enum discriminant overflowed on value after {}: {}; \
+                  "enum discriminant overflowed on value after {}; \
                    set explicitly via {} = {} if that is desired outcome",
-                  prev_val, repr_type, variant_name, computed_value);
+                  prev_val, variant_name, prev_val.wrap_incr());
     }
 
-    fn next_disr(tcx: &ty::ctxt,
+    fn next_disr(tcx: &TyCtxt,
                  v: &hir::Variant,
                  repr_type: attr::IntType,
                  prev_disr_val: Option<ty::Disr>) -> Option<ty::Disr> {
         if let Some(prev_disr_val) = prev_disr_val {
             let result = repr_type.disr_incr(prev_disr_val);
             if let None = result {
-                report_discrim_overflow(tcx, v.span, &v.node.name.as_str(),
-                                             repr_type, prev_disr_val);
+                report_discrim_overflow(tcx, v.span, &v.node.name.as_str(), prev_disr_val);
             }
             result
         } else {
-            Some(ty::INITIAL_DISCRIMINANT_VALUE)
+            Some(repr_type.initial_discriminant(tcx))
         }
     }
-    fn convert_enum_variant<'tcx>(tcx: &ty::ctxt<'tcx>,
+    fn convert_enum_variant<'tcx>(tcx: &TyCtxt<'tcx>,
                                   v: &hir::Variant,
                                   disr: ty::Disr)
                                   -> ty::VariantDefData<'tcx, 'tcx>
@@ -1105,17 +1109,19 @@ fn convert_enum_def<'tcx>(tcx: &ty::ctxt<'tcx>,
     }
     let did = tcx.map.local_def_id(it.id);
     let repr_hints = tcx.lookup_repr_hints(did);
-    let (repr_type, repr_type_ty) = tcx.enum_repr_type(repr_hints.get(0));
+    let repr_type = tcx.enum_repr_type(repr_hints.get(0));
     let mut prev_disr = None;
     let variants = def.variants.iter().map(|v| {
         let disr = match v.node.disr_expr {
-            Some(ref e) => evaluate_disr_expr(tcx, repr_type_ty, e),
+            Some(ref e) => evaluate_disr_expr(tcx, repr_type, e),
             None => next_disr(tcx, v, repr_type, prev_disr)
-        }.unwrap_or(repr_type.disr_wrap_incr(prev_disr));
+        }.unwrap_or_else(|| {
+            prev_disr.map(ty::Disr::wrap_incr)
+                     .unwrap_or(repr_type.initial_discriminant(tcx))
+        });
 
-        let v = convert_enum_variant(tcx, v, disr);
         prev_disr = Some(disr);
-        v
+        convert_enum_variant(tcx, v, disr)
     }).collect();
     tcx.intern_adt_def(tcx.map.local_def_id(it.id), ty::AdtKind::Enum, variants)
 }
@@ -1150,13 +1156,13 @@ fn ensure_super_predicates_step(ccx: &CrateCtxt,
     let superpredicates = superpredicates.unwrap_or_else(|| {
         let item = match ccx.tcx.map.get(trait_node_id) {
             hir_map::NodeItem(item) => item,
-            _ => ccx.tcx.sess.bug(&format!("trait_node_id {} is not an item", trait_node_id))
+            _ => bug!("trait_node_id {} is not an item", trait_node_id)
         };
 
         let (generics, bounds) = match item.node {
             hir::ItemTrait(_, ref generics, ref supertraits, _) => (generics, supertraits),
-            _ => tcx.sess.span_bug(item.span,
-                                   "ensure_super_predicates_step invoked on non-trait"),
+            _ => span_bug!(item.span,
+                           "ensure_super_predicates_step invoked on non-trait"),
         };
 
         // In-scope when converting the superbounds for `Trait` are
@@ -1222,7 +1228,7 @@ fn trait_def_of_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
 
     let (unsafety, generics, items) = match it.node {
         hir::ItemTrait(unsafety, ref generics, _, ref items) => (unsafety, generics, items),
-        _ => tcx.sess.span_bug(it.span, "trait_def_of_item invoked on non-trait"),
+        _ => span_bug!(it.span, "trait_def_of_item invoked on non-trait"),
     };
 
     let paren_sugar = tcx.has_attr(def_id, "rustc_paren_sugar");
@@ -1302,12 +1308,12 @@ fn trait_defines_associated_type_named(ccx: &CrateCtxt,
 {
     let item = match ccx.tcx.map.get(trait_node_id) {
         hir_map::NodeItem(item) => item,
-        _ => ccx.tcx.sess.bug(&format!("trait_node_id {} is not an item", trait_node_id))
+        _ => bug!("trait_node_id {} is not an item", trait_node_id)
     };
 
     let trait_items = match item.node {
         hir::ItemTrait(_, _, _, ref trait_items) => trait_items,
-        _ => ccx.tcx.sess.bug(&format!("trait_node_id {} is not a trait", trait_node_id))
+        _ => bug!("trait_node_id {} is not a trait", trait_node_id)
     };
 
     trait_items.iter().any(|trait_item| {
@@ -1327,9 +1333,10 @@ fn convert_trait_predicates<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, it: &hir::Item)
     let (generics, items) = match it.node {
         hir::ItemTrait(_, ref generics, _, ref items) => (generics, items),
         ref s => {
-            tcx.sess.span_bug(
+            span_bug!(
                 it.span,
-                &format!("trait_def_of_item invoked on {:?}", s));
+                "trait_def_of_item invoked on {:?}",
+                s);
         }
     };
 
@@ -1406,9 +1413,8 @@ fn type_scheme_of_def_id<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
                 type_scheme_of_foreign_item(ccx, &foreign_item, abi)
             }
             x => {
-                ccx.tcx.sess.bug(&format!("unexpected sort of node \
-                                           in get_item_type_scheme(): {:?}",
-                                          x));
+                bug!("unexpected sort of node in get_item_type_scheme(): {:?}",
+                     x);
             }
         }
     } else {
@@ -1443,7 +1449,9 @@ fn compute_type_scheme_of_item<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
         hir::ItemFn(ref decl, unsafety, _, abi, ref generics, _) => {
             let ty_generics = ty_generics_for_fn(ccx, generics, &ty::Generics::empty());
             let tofd = astconv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &decl);
-            let ty = tcx.mk_fn(Some(ccx.tcx.map.local_def_id(it.id)), tcx.mk_bare_fn(tofd));
+            let def_id = ccx.tcx.map.local_def_id(it.id);
+            let substs = tcx.mk_substs(mk_item_substs(ccx, &ty_generics));
+            let ty = tcx.mk_fn_def(def_id, substs, tofd);
             ty::TypeScheme { ty: ty, generics: ty_generics }
         }
         hir::ItemTy(ref t, ref generics) => {
@@ -1472,10 +1480,10 @@ fn compute_type_scheme_of_item<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
         hir::ItemForeignMod(..) |
         hir::ItemExternCrate(..) |
         hir::ItemUse(..) => {
-            tcx.sess.span_bug(
+            span_bug!(
                 it.span,
-                &format!("compute_type_scheme_of_item: unexpected item type: {:?}",
-                         it.node));
+                "compute_type_scheme_of_item: unexpected item type: {:?}",
+                it.node);
         }
     }
 }
@@ -1511,10 +1519,10 @@ fn convert_typed_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
         hir::ItemImpl(..) |
         hir::ItemMod(..) |
         hir::ItemForeignMod(..) => {
-            tcx.sess.span_bug(
+            span_bug!(
                 it.span,
-                &format!("compute_type_scheme_of_item: unexpected item type: {:?}",
-                         it.node));
+                "compute_type_scheme_of_item: unexpected item type: {:?}",
+                it.node);
         }
     };
 
@@ -1563,7 +1571,9 @@ fn compute_type_scheme_of_foreign_item<'a, 'tcx>(
 {
     match it.node {
         hir::ForeignItemFn(ref fn_decl, ref generics) => {
-            compute_type_scheme_of_foreign_fn_decl(ccx, fn_decl, generics, abi)
+            compute_type_scheme_of_foreign_fn_decl(
+                ccx, ccx.tcx.map.local_def_id(it.id),
+                fn_decl, generics, abi)
         }
         hir::ForeignItemStatic(ref t, _) => {
             ty::TypeScheme {
@@ -1817,9 +1827,9 @@ fn ty_generic_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
 
             &hir::WherePredicate::EqPredicate(ref eq_pred) => {
                 // FIXME(#20041)
-                tcx.sess.span_bug(eq_pred.span,
-                                    "Equality constraints are not yet \
-                                        implemented (#20041)")
+                span_bug!(eq_pred.span,
+                         "Equality constraints are not yet \
+                          implemented (#20041)")
             }
         }
     }
@@ -2114,6 +2124,7 @@ fn conv_param_bounds<'a,'tcx>(astconv: &AstConv<'tcx>,
 
 fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>(
     ccx: &CrateCtxt<'a, 'tcx>,
+    id: DefId,
     decl: &hir::FnDecl,
     ast_generics: &hir::Generics,
     abi: abi::Abi)
@@ -2136,7 +2147,7 @@ fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>(
     let input_tys = decl.inputs
                         .iter()
                         .map(|a| ty_of_arg(&ccx.icx(ast_generics), &rb, a, None))
-                        .collect();
+                        .collect::<Vec<_>>();
 
     let output = match decl.output {
         hir::Return(ref ty) =>
@@ -2147,14 +2158,37 @@ fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>(
             ty::FnDiverging
     };
 
-    let t_fn = ccx.tcx.mk_fn(None,
-        ccx.tcx.mk_bare_fn(ty::BareFnTy {
-            abi: abi,
-            unsafety: hir::Unsafety::Unsafe,
-            sig: ty::Binder(ty::FnSig {inputs: input_tys,
-                                       output: output,
-                                       variadic: decl.variadic}),
-        }));
+    // feature gate SIMD types in FFI, since I (huonw) am not sure the
+    // ABIs are handled at all correctly.
+    if abi != abi::Abi::RustIntrinsic && abi != abi::Abi::PlatformIntrinsic
+            && !ccx.tcx.sess.features.borrow().simd_ffi {
+        let check = |ast_ty: &hir::Ty, ty: ty::Ty| {
+            if ty.is_simd() {
+                ccx.tcx.sess.struct_span_err(ast_ty.span,
+                              &format!("use of SIMD type `{}` in FFI is highly experimental and \
+                                        may result in invalid code",
+                                       pprust::ty_to_string(ast_ty)))
+                    .fileline_help(ast_ty.span,
+                                   "add #![feature(simd_ffi)] to the crate attributes to enable")
+                    .emit();
+            }
+        };
+        for (input, ty) in decl.inputs.iter().zip(&input_tys) {
+            check(&input.ty, ty)
+        }
+        if let hir::Return(ref ty) = decl.output {
+            check(&ty, output.unwrap())
+        }
+    }
+
+    let substs = ccx.tcx.mk_substs(mk_item_substs(ccx, &ty_generics));
+    let t_fn = ccx.tcx.mk_fn_def(id, substs, ty::BareFnTy {
+        abi: abi,
+        unsafety: hir::Unsafety::Unsafe,
+        sig: ty::Binder(ty::FnSig {inputs: input_tys,
+                                    output: output,
+                                    variadic: decl.variadic}),
+    });
 
     ty::TypeScheme {
         generics: ty_generics,
@@ -2178,7 +2212,7 @@ fn mk_item_substs<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
 }
 
 /// Checks that all the type parameters on an impl
-fn enforce_impl_params_are_constrained<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn enforce_impl_params_are_constrained<'tcx>(tcx: &TyCtxt<'tcx>,
                                              ast_generics: &hir::Generics,
                                              impl_predicates: &mut ty::GenericPredicates<'tcx>,
                                              impl_def_id: DefId)
@@ -2213,7 +2247,7 @@ fn enforce_impl_params_are_constrained<'tcx>(tcx: &ty::ctxt<'tcx>,
     }
 }
 
-fn enforce_impl_lifetimes_are_constrained<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn enforce_impl_lifetimes_are_constrained<'tcx>(tcx: &TyCtxt<'tcx>,
                                                 ast_generics: &hir::Generics,
                                                 impl_def_id: DefId,
                                                 impl_items: &[hir::ImplItem])
@@ -2278,7 +2312,7 @@ fn enforce_impl_lifetimes_are_constrained<'tcx>(tcx: &ty::ctxt<'tcx>,
     // used elsewhere are not projected back out.
 }
 
-fn report_unused_parameter(tcx: &ty::ctxt,
+fn report_unused_parameter(tcx: &TyCtxt,
                            span: Span,
                            kind: &str,
                            name: &str)
index 9abe101e2d9d657dafb9ebdf3c65b5a733005dfe..7e8b08c58539b81425a40f7fb71f974822bffb5f 100644 (file)
@@ -8,8 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use middle::subst;
-use middle::ty::{self, Ty};
+use rustc::ty::subst;
+use rustc::ty::{self, Ty, TyCtxt};
 
 use std::collections::HashSet;
 
@@ -72,7 +72,8 @@ fn parameters_for_type_shallow<'tcx>(ty: Ty<'tcx>) -> Vec<Parameter> {
             parameters_for_regions_in_substs(&pi.trait_ref.substs),
         ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) |
         ty::TyFloat(..) | ty::TyBox(..) | ty::TyStr |
-        ty::TyArray(..) | ty::TySlice(..) | ty::TyBareFn(..) |
+        ty::TyArray(..) | ty::TySlice(..) |
+        ty::TyFnDef(..) | ty::TyFnPtr(_) |
         ty::TyTuple(..) | ty::TyRawPtr(..) |
         ty::TyInfer(..) | ty::TyClosure(..) | ty::TyError =>
             vec![]
@@ -80,7 +81,7 @@ fn parameters_for_type_shallow<'tcx>(ty: Ty<'tcx>) -> Vec<Parameter> {
 }
 
 fn parameters_for_regions_in_substs(substs: &subst::Substs) -> Vec<Parameter> {
-    substs.regions()
+    substs.regions
           .iter()
           .filter_map(|r| parameters_for_region(r))
           .collect()
@@ -93,7 +94,7 @@ fn parameters_for_region(region: &ty::Region) -> Option<Parameter> {
     }
 }
 
-pub fn identify_constrained_type_params<'tcx>(_tcx: &ty::ctxt<'tcx>,
+pub fn identify_constrained_type_params<'tcx>(_tcx: &TyCtxt<'tcx>,
                                               predicates: &[ty::Predicate<'tcx>],
                                               impl_trait_ref: Option<ty::TraitRef<'tcx>>,
                                               input_parameters: &mut HashSet<Parameter>)
@@ -143,7 +144,7 @@ pub fn identify_constrained_type_params<'tcx>(_tcx: &ty::ctxt<'tcx>,
 /// which is determined by 1, which requires `U`, that is determined
 /// by 0. I should probably pick a less tangled example, but I can't
 /// think of any.
-pub fn setup_constraining_predicates<'tcx>(_tcx: &ty::ctxt<'tcx>,
+pub fn setup_constraining_predicates<'tcx>(_tcx: &TyCtxt<'tcx>,
                                            predicates: &mut [ty::Predicate<'tcx>],
                                            impl_trait_ref: Option<ty::TraitRef<'tcx>>,
                                            input_parameters: &mut HashSet<Parameter>)
index 0ff6dcccde698c1a21b4a46b7e7759d90fb13ff8..8f76bf92ef4f52665a2b2a84b1fcc61cc8352305 100644 (file)
@@ -327,6 +327,30 @@ fn main() {
     <Test as Trait1>::foo()
 }
 ```
+
+One last example:
+
+```
+trait F {
+    fn m(&self);
+}
+
+trait G {
+    fn m(&self);
+}
+
+struct X;
+
+impl F for X { fn m(&self) { println!("I am F"); } }
+impl G for X { fn m(&self) { println!("I am G"); } }
+
+fn main() {
+    let f = X;
+
+    F::m(&f); // it displays "I am F"
+    G::m(&f); // it displays "I am G"
+}
+```
 "##,
 
 E0035: r##"
@@ -379,7 +403,7 @@ impl Test {
 
 fn main() {
     let x = Test;
-    let v = &[0i32];
+    let v = &[0];
 
     x.method::<i32, i32>(v); // error: only one type parameter is expected!
 }
@@ -398,7 +422,7 @@ impl Test {
 
 fn main() {
     let x = Test;
-    let v = &[0i32];
+    let v = &[0];
 
     x.method::<i32>(v); // OK, we're good!
 }
@@ -901,7 +925,7 @@ Example of erroneous code:
 ```compile_fail
 enum Foo { FirstValue(i32) };
 
-let u = Foo::FirstValue { value: 0i32 }; // error: Foo::FirstValue
+let u = Foo::FirstValue { value: 0 }; // error: Foo::FirstValue
                                          // isn't a structure!
 // or even simpler, if the name doesn't refer to a structure at all.
 let t = u32 { value: 4 }; // error: `u32` does not name a structure.
@@ -1133,15 +1157,16 @@ enum Bad {
 }
 ```
 
-Here `X` will have already been assigned the discriminant 0 by the time `Y` is
+Here `X` will have already been specified the discriminant 0 by the time `Y` is
 encountered, so a conflict occurs.
 "##,
 
 E0082: r##"
-The default type for enum discriminants is `isize`, but it can be adjusted by
-adding the `repr` attribute to the enum declaration. This error indicates that
-an integer literal given as a discriminant is not a member of the discriminant
-type. For example:
+When you specify enum discriminants with `=`, the compiler expects `isize`
+values by default. Or you can add the `repr` attibute to the enum declaration
+for an explicit choice of the discriminant type. In either cases, the
+discriminant values must fall within a valid range for the expected type;
+otherwise this error is raised. For example:
 
 ```compile_fail
 #[repr(u8)]
@@ -1152,11 +1177,19 @@ enum Thing {
 ```
 
 Here, 1024 lies outside the valid range for `u8`, so the discriminant for `A` is
-invalid. You may want to change representation types to fix this, or else change
-invalid discriminant values so that they fit within the existing type.
+invalid. Here is another, more subtle example which depends on target word size:
+
+```compile_fail
+enum DependsOnPointerSize {
+    A = 1 << 32
+}
+```
+
+Here, `1 << 32` is interpreted as an `isize` value. So it is invalid for 32 bit
+target (`target_pointer_width = "32"`) but valid for 64 bit target.
 
-Note also that without a representation manually defined, the compiler will
-optimize by using the smallest integer type possible.
+You may want to change representation types to fix this, or else change invalid
+discriminant values so that they fit within the existing type.
 "##,
 
 E0084: r##"
@@ -2276,6 +2309,21 @@ impl Baz for Foo {
     type Quux = u32;
 }
 ```
+
+Note, however, that items with the same name are allowed for inherent `impl`
+blocks that don't overlap:
+
+```
+struct Foo<T>(T);
+
+impl Foo<u8> {
+    fn bar(&self) -> bool { self.0 > 5 }
+}
+
+impl Foo<bool> {
+    fn bar(&self) -> bool { self.0 }
+}
+```
 "##,
 
 E0202: r##"
@@ -3143,8 +3191,8 @@ x <<= 2;
 To fix this error, please check that this type implements this binary
 operation. Example:
 
-```compile_fail
-let x = 12u32; // the `u32` type does implement the `ShlAssign` trait
+```
+let mut x = 12u32; // the `u32` type does implement the `ShlAssign` trait
 
 x <<= 2; // ok!
 ```
@@ -3596,6 +3644,68 @@ fn main() {
 ```
 "##,
 
+E0520: r##"
+A non-default implementation was already made on this type so it cannot be
+specialized further. Erroneous code example:
+
+```compile_fail
+#![feature(specialization)]
+
+trait SpaceLlama {
+    fn fly(&self);
+}
+
+// applies to all T
+impl<T> SpaceLlama for T {
+    default fn fly(&self) {}
+}
+
+// non-default impl
+// applies to all `Clone` T and overrides the previous impl
+impl<T: Clone> SpaceLlama for T {
+    fn fly(&self) {}
+}
+
+// since `i32` is clone, this conflicts with the previous implementation
+impl SpaceLlama for i32 {
+    default fn fly(&self) {}
+    // error: item `fly` is provided by an `impl` that specializes
+    //        another, but the item in the parent `impl` is not marked
+    //        `default` and so it cannot be specialized.
+}
+```
+
+Specialization only allows you to override `default` functions in
+implementations.
+
+To fix this error, you need to mark all the parent implementations as default.
+Example:
+
+```
+#![feature(specialization)]
+
+trait SpaceLlama {
+    fn fly(&self);
+}
+
+// applies to all T
+impl<T> SpaceLlama for T {
+    default fn fly(&self) {} // This is a parent implementation.
+}
+
+// applies to all `Clone` T; overrides the previous impl
+impl<T: Clone> SpaceLlama for T {
+    default fn fly(&self) {} // This is a parent implementation but was
+                             // previously not a default one, causing the error
+}
+
+// applies to i32, overrides the previous two impls
+impl SpaceLlama for i32 {
+    fn fly(&self) {} // And now that's ok!
+}
+```
+"##,
+
 }
 
 register_diagnostics! {
@@ -3643,8 +3753,8 @@ register_diagnostics! {
 //  E0233,
 //  E0234,
 //  E0235, // structure constructor specifies a structure of type but
-    E0236, // no lang item for range syntax
-    E0237, // no lang item for range syntax
+//  E0236, // no lang item for range syntax
+//  E0237, // no lang item for range syntax
     E0238, // parenthesized parameters may only be used with a trait
 //  E0239, // `next` method of `Iterator` trait has unexpected type
 //  E0240,
@@ -3671,5 +3781,6 @@ register_diagnostics! {
     E0399, // trait items need to be implemented because the associated
            // type `{}` was overridden
     E0436, // functional record update requires a struct
-    E0513  // no type for local variable ..
+    E0513, // no type for local variable ..
+    E0521  // redundant default implementations of trait
 }
index 0835762c4e5c579d4f90f20c06acd23ac4d5b8c6..7f27d10ce1ec3c097f3ee25fd25a7ee8798e49b0 100644 (file)
@@ -81,33 +81,35 @@ This API is completely unstable and subject to change.
 #![feature(rustc_diagnostic_macros)]
 #![feature(rustc_private)]
 #![feature(staged_api)]
+#![feature(question_mark)]
 
 #[macro_use] extern crate log;
 #[macro_use] extern crate syntax;
 
 extern crate arena;
 extern crate fmt_macros;
-extern crate rustc;
+#[macro_use] extern crate rustc;
 extern crate rustc_platform_intrinsics as intrinsics;
-extern crate rustc_front;
 extern crate rustc_back;
+extern crate rustc_const_math;
+extern crate rustc_const_eval;
 
 pub use rustc::dep_graph;
-pub use rustc::front;
+pub use rustc::hir;
 pub use rustc::lint;
 pub use rustc::middle;
 pub use rustc::session;
 pub use rustc::util;
 
 use dep_graph::DepNode;
-use front::map as hir_map;
-use middle::def::Def;
-use middle::infer::{self, TypeOrigin};
-use middle::subst;
-use middle::ty::{self, Ty, TypeFoldable};
+use hir::map as hir_map;
+use hir::def::Def;
+use rustc::infer::{self, TypeOrigin};
+use rustc::ty::subst::Substs;
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc::traits::ProjectionMode;
 use session::{config, CompileResult};
 use util::common::time;
-use rustc_front::hir;
 
 use syntax::codemap::Span;
 use syntax::ast;
@@ -128,29 +130,29 @@ pub mod coherence;
 pub mod variance;
 
 pub struct TypeAndSubsts<'tcx> {
-    pub substs: subst::Substs<'tcx>,
+    pub substs: Substs<'tcx>,
     pub ty: Ty<'tcx>,
 }
 
 pub struct CrateCtxt<'a, 'tcx: 'a> {
     // A mapping from method call sites to traits that have that method.
-    pub trait_map: ty::TraitMap,
+    pub trait_map: hir::TraitMap,
     /// A vector of every trait accessible in the whole crate
     /// (i.e. including those from subcrates). This is used only for
     /// error reporting, and so is lazily initialised and generally
     /// shouldn't taint the common path (hence the RefCell).
     pub all_traits: RefCell<Option<check::method::AllTraitsVec>>,
-    pub tcx: &'a ty::ctxt<'tcx>,
+    pub tcx: &'a TyCtxt<'tcx>,
 }
 
 // Functions that write types into the node type table
-fn write_ty_to_tcx<'tcx>(tcx: &ty::ctxt<'tcx>, node_id: ast::NodeId, ty: Ty<'tcx>) {
+fn write_ty_to_tcx<'tcx>(tcx: &TyCtxt<'tcx>, node_id: ast::NodeId, ty: Ty<'tcx>) {
     debug!("write_ty_to_tcx({}, {:?})", node_id,  ty);
     assert!(!ty.needs_infer());
     tcx.node_type_insert(node_id, ty);
 }
 
-fn write_substs_to_tcx<'tcx>(tcx: &ty::ctxt<'tcx>,
+fn write_substs_to_tcx<'tcx>(tcx: &TyCtxt<'tcx>,
                                  node_id: ast::NodeId,
                                  item_substs: ty::ItemSubsts<'tcx>) {
     if !item_substs.is_noop() {
@@ -164,7 +166,7 @@ fn write_substs_to_tcx<'tcx>(tcx: &ty::ctxt<'tcx>,
     }
 }
 
-fn lookup_full_def(tcx: &ty::ctxt, sp: Span, id: ast::NodeId) -> Def {
+fn lookup_full_def(tcx: &TyCtxt, sp: Span, id: ast::NodeId) -> Def {
     match tcx.def_map.borrow().get(&id) {
         Some(x) => x.full_def(),
         None => {
@@ -173,7 +175,7 @@ fn lookup_full_def(tcx: &ty::ctxt, sp: Span, id: ast::NodeId) -> Def {
     }
 }
 
-fn require_c_abi_if_variadic(tcx: &ty::ctxt,
+fn require_c_abi_if_variadic(tcx: &TyCtxt,
                              decl: &hir::FnDecl,
                              abi: Abi,
                              span: Span) {
@@ -183,7 +185,7 @@ fn require_c_abi_if_variadic(tcx: &ty::ctxt,
     }
 }
 
-fn require_same_types<'a, 'tcx, M>(tcx: &ty::ctxt<'tcx>,
+fn require_same_types<'a, 'tcx, M>(tcx: &TyCtxt<'tcx>,
                                    maybe_infcx: Option<&infer::InferCtxt<'a, 'tcx>>,
                                    t1_is_expected: bool,
                                    span: Span,
@@ -195,7 +197,7 @@ fn require_same_types<'a, 'tcx, M>(tcx: &ty::ctxt<'tcx>,
 {
     let result = match maybe_infcx {
         None => {
-            let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None);
+            let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::AnyFinal);
             infer::mk_eqty(&infcx, t1_is_expected, TypeOrigin::Misc(span), t1, t2)
         }
         Some(infcx) => {
@@ -220,7 +222,7 @@ fn check_main_fn_ty(ccx: &CrateCtxt,
     let tcx = ccx.tcx;
     let main_t = tcx.node_id_to_type(main_id);
     match main_t.sty {
-        ty::TyBareFn(..) => {
+        ty::TyFnDef(..) => {
             match tcx.map.find(main_id) {
                 Some(hir_map::NodeItem(it)) => {
                     match it.node {
@@ -236,7 +238,8 @@ fn check_main_fn_ty(ccx: &CrateCtxt,
                 _ => ()
             }
             let main_def_id = tcx.map.local_def_id(main_id);
-            let se_ty = tcx.mk_fn(Some(main_def_id), tcx.mk_bare_fn(ty::BareFnTy {
+            let substs = tcx.mk_substs(Substs::empty());
+            let se_ty = tcx.mk_fn_def(main_def_id, substs, ty::BareFnTy {
                 unsafety: hir::Unsafety::Normal,
                 abi: Abi::Rust,
                 sig: ty::Binder(ty::FnSig {
@@ -244,7 +247,7 @@ fn check_main_fn_ty(ccx: &CrateCtxt,
                     output: ty::FnConverging(tcx.mk_nil()),
                     variadic: false
                 })
-            }));
+            });
 
             require_same_types(tcx, None, false, main_span, main_t, se_ty,
                 || {
@@ -253,9 +256,9 @@ fn check_main_fn_ty(ccx: &CrateCtxt,
                 });
         }
         _ => {
-            tcx.sess.span_bug(main_span,
-                              &format!("main has a non-function type: found `{}`",
-                                       main_t));
+            span_bug!(main_span,
+                      "main has a non-function type: found `{}`",
+                      main_t);
         }
     }
 }
@@ -266,7 +269,7 @@ fn check_start_fn_ty(ccx: &CrateCtxt,
     let tcx = ccx.tcx;
     let start_t = tcx.node_id_to_type(start_id);
     match start_t.sty {
-        ty::TyBareFn(..) => {
+        ty::TyFnDef(..) => {
             match tcx.map.find(start_id) {
                 Some(hir_map::NodeItem(it)) => {
                     match it.node {
@@ -282,8 +285,9 @@ fn check_start_fn_ty(ccx: &CrateCtxt,
                 _ => ()
             }
 
-            let se_ty = tcx.mk_fn(Some(ccx.tcx.map.local_def_id(start_id)),
-                                  tcx.mk_bare_fn(ty::BareFnTy {
+            let start_def_id = ccx.tcx.map.local_def_id(start_id);
+            let substs = tcx.mk_substs(Substs::empty());
+            let se_ty = tcx.mk_fn_def(start_def_id, substs, ty::BareFnTy {
                 unsafety: hir::Unsafety::Normal,
                 abi: Abi::Rust,
                 sig: ty::Binder(ty::FnSig {
@@ -294,7 +298,7 @@ fn check_start_fn_ty(ccx: &CrateCtxt,
                     output: ty::FnConverging(tcx.types.isize),
                     variadic: false,
                 }),
-            }));
+            });
 
             require_same_types(tcx, None, false, start_span, start_t, se_ty,
                 || {
@@ -304,9 +308,9 @@ fn check_start_fn_ty(ccx: &CrateCtxt,
 
         }
         _ => {
-            tcx.sess.span_bug(start_span,
-                              &format!("start has a non-function type: found `{}`",
-                                       start_t));
+            span_bug!(start_span,
+                      "start has a non-function type: found `{}`",
+                      start_t);
         }
     }
 }
@@ -319,13 +323,13 @@ fn check_for_entry_fn(ccx: &CrateCtxt) {
             Some(config::EntryMain) => check_main_fn_ty(ccx, id, sp),
             Some(config::EntryStart) => check_start_fn_ty(ccx, id, sp),
             Some(config::EntryNone) => {}
-            None => tcx.sess.bug("entry function without a type")
+            None => bug!("entry function without a type")
         },
         None => {}
     }
 }
 
-pub fn check_crate(tcx: &ty::ctxt, trait_map: ty::TraitMap) -> CompileResult {
+pub fn check_crate(tcx: &TyCtxt, trait_map: hir::TraitMap) -> CompileResult {
     let time_passes = tcx.sess.time_passes();
     let ccx = CrateCtxt {
         trait_map: trait_map,
@@ -335,31 +339,27 @@ pub fn check_crate(tcx: &ty::ctxt, trait_map: ty::TraitMap) -> CompileResult {
 
     // this ensures that later parts of type checking can assume that items
     // have valid types and not error
-    try!(tcx.sess.track_errors(|| {
+    tcx.sess.track_errors(|| {
         time(time_passes, "type collecting", ||
              collect::collect_item_types(tcx));
 
-    }));
+    })?;
 
     time(time_passes, "variance inference", ||
          variance::infer_variance(tcx));
 
-    try!(tcx.sess.track_errors(|| {
+    tcx.sess.track_errors(|| {
       time(time_passes, "coherence checking", ||
           coherence::check_coherence(&ccx));
-    }));
+    })?;
 
-    try!(time(time_passes, "wf checking", ||
-        check::check_wf_new(&ccx)));
+    time(time_passes, "wf checking", || check::check_wf_new(&ccx))?;
 
-    try!(time(time_passes, "item-types checking", ||
-        check::check_item_types(&ccx)));
+    time(time_passes, "item-types checking", || check::check_item_types(&ccx))?;
 
-    try!(time(time_passes, "item-bodies checking", ||
-        check::check_item_bodies(&ccx)));
+    time(time_passes, "item-bodies checking", || check::check_item_bodies(&ccx))?;
 
-    try!(time(time_passes, "drop-impl checking", ||
-        check::check_drop_impls(&ccx)));
+    time(time_passes, "drop-impl checking", || check::check_drop_impls(&ccx))?;
 
     check_for_entry_fn(&ccx);
 
index 1b02c736dce3b6e3a0e932afd947bee65784bb41..793dba1e3f5f8bac6693f5eac7f4af2eb9f7ba1d 100644 (file)
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 
-use middle::ty;
+use rustc::ty;
 
 use std::cell::Cell;
 use syntax::codemap::Span;
index 2f243d0fd0f80cb7c2ffe7acfd3c7c87237461e7..3b03a713a5b93f8acf8d79f159e9dfe1221e7ae1 100644 (file)
 //! We walk the set of items and, for each member, generate new constraints.
 
 use dep_graph::DepTrackingMapConfig;
-use middle::def_id::DefId;
+use hir::def_id::DefId;
 use middle::resolve_lifetime as rl;
-use middle::subst;
-use middle::subst::ParamSpace;
-use middle::ty::{self, Ty};
-use middle::ty::maps::ItemVariances;
-use rustc::front::map as hir_map;
+use rustc::ty::subst;
+use rustc::ty::subst::ParamSpace;
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::maps::ItemVariances;
+use rustc::hir::map as hir_map;
 use syntax::ast;
-use rustc_front::hir;
-use rustc_front::intravisit::Visitor;
+use rustc::hir;
+use rustc::hir::intravisit::Visitor;
 
 use super::terms::*;
 use super::terms::VarianceTerm::*;
@@ -127,7 +127,7 @@ fn is_lifetime(map: &hir_map::Map, param_id: ast::NodeId) -> bool {
 }
 
 impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
-    fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+    fn tcx(&self) -> &'a TyCtxt<'tcx> {
         self.terms_cx.tcx
     }
 
@@ -135,9 +135,8 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
         match self.terms_cx.inferred_map.get(&param_id) {
             Some(&index) => index,
             None => {
-                self.tcx().sess.bug(&format!(
-                        "no inferred index entry for {}",
-                        self.tcx().map.node_to_string(param_id)));
+                bug!("no inferred index entry for {}",
+                     self.tcx().map.node_to_string(param_id));
             }
         }
     }
@@ -148,7 +147,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
         match tcx.named_region_map.get(&param_id) {
             Some(&rl::DefEarlyBoundRegion(_, _, lifetime_decl_id))
                 => lifetime_decl_id,
-            Some(_) => panic!("should not encounter non early-bound cases"),
+            Some(_) => bug!("should not encounter non early-bound cases"),
 
             // The lookup should only fail when `param_id` is
             // itself a lifetime binding: use it as the decl_id.
@@ -173,13 +172,13 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
             assert!(is_lifetime(&tcx.map, param_id));
             let parent_id = tcx.map.get_parent(decl_id);
             let parent = tcx.map.find(parent_id).unwrap_or_else(
-                || panic!("tcx.map missing entry for id: {}", parent_id));
+                || bug!("tcx.map missing entry for id: {}", parent_id));
 
             let is_inferred;
             macro_rules! cannot_happen { () => { {
-                panic!("invalid parent: {} for {}",
-                      tcx.map.node_to_string(parent_id),
-                      tcx.map.node_to_string(param_id));
+                bug!("invalid parent: {} for {}",
+                     tcx.map.node_to_string(parent_id),
+                     tcx.map.node_to_string(param_id));
             } } }
 
             match parent {
@@ -328,7 +327,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
             }
 
             ty::TyClosure(..) => {
-                self.tcx().sess.bug("Unexpected closure type in variance computation");
+                bug!("Unexpected closure type in variance computation");
             }
 
             ty::TyRef(region, ref mt) => {
@@ -429,7 +428,8 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
                 }
             }
 
-            ty::TyBareFn(_, &ty::BareFnTy { ref sig, .. }) => {
+            ty::TyFnDef(_, _, &ty::BareFnTy { ref sig, .. }) |
+            ty::TyFnPtr(&ty::BareFnTy { ref sig, .. }) => {
                 self.add_constraints_from_sig(generics, sig, variance);
             }
 
@@ -439,9 +439,8 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
             }
 
             ty::TyInfer(..) => {
-                self.tcx().sess.bug(
-                    &format!("unexpected type encountered in \
-                              variance inference: {}", ty));
+                bug!("unexpected type encountered in \
+                      variance inference: {}", ty);
             }
         }
     }
@@ -476,7 +475,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
                 self.declared_variance(p.def_id, def_id,
                                        RegionParam, p.space, p.index as usize);
             let variance_i = self.xform(variance, variance_decl);
-            let substs_r = *substs.regions().get(p.space, p.index as usize);
+            let substs_r = *substs.regions.get(p.space, p.index as usize);
             self.add_constraints_from_region(generics, substs_r, variance_i);
         }
     }
@@ -524,11 +523,9 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
             ty::ReSkolemized(..) | ty::ReEmpty => {
                 // We don't expect to see anything but 'static or bound
                 // regions when visiting member types or method types.
-                self.tcx()
-                    .sess
-                    .bug(&format!("unexpected region encountered in variance \
-                                  inference: {:?}",
-                                 region));
+                bug!("unexpected region encountered in variance \
+                      inference: {:?}",
+                     region);
             }
         }
     }
index 3ce3a868f047723c8c16c20e39a399b8829e225e..ee9f317f20fd08e37d7bf1370500f2b219d90b21 100644 (file)
@@ -12,7 +12,7 @@
 //! parameters. See README.md for details.
 
 use arena;
-use middle::ty;
+use rustc::ty::TyCtxt;
 
 /// Defines the `TermsContext` basically houses an arena where we can
 /// allocate terms.
@@ -27,7 +27,7 @@ mod solve;
 /// Code for transforming variances.
 mod xform;
 
-pub fn infer_variance(tcx: &ty::ctxt) {
+pub fn infer_variance(tcx: &TyCtxt) {
     let mut arena = arena::TypedArena::new();
     let terms_cx = terms::determine_parameters_to_be_inferred(tcx, &mut arena);
     let constraints_cx = constraints::add_constraints_from_crate(terms_cx);
index fd442a4547cf92a360e5d5fce0622c30a7591bd2..0763cfd7e2e6064d5c1a31f918c603301b5a3798 100644 (file)
@@ -15,8 +15,8 @@
 //! optimal solution to the constraints. The final variance for each
 //! inferred is then written into the `variance_map` in the tcx.
 
-use middle::subst::VecPerParamSpace;
-use middle::ty;
+use rustc::ty::subst::VecPerParamSpace;
+use rustc::ty;
 use std::rc::Rc;
 
 use super::constraints::*;
index aa1e93c3d6b7d678c230478d858638fe0475a6f5..413dc83e638f2c583d84878fcd9e408617bd8585 100644 (file)
 
 use arena::TypedArena;
 use dep_graph::DepTrackingMapConfig;
-use middle::subst::{ParamSpace, FnSpace, TypeSpace, SelfSpace, VecPerParamSpace};
-use middle::ty;
-use middle::ty::maps::ItemVariances;
+use rustc::ty::subst::{ParamSpace, FnSpace, TypeSpace, SelfSpace, VecPerParamSpace};
+use rustc::ty::{self, TyCtxt};
+use rustc::ty::maps::ItemVariances;
 use std::fmt;
 use std::rc::Rc;
 use syntax::ast;
-use rustc_front::hir;
-use rustc_front::intravisit::Visitor;
+use rustc::hir;
+use rustc::hir::intravisit::Visitor;
 use util::nodemap::NodeMap;
 
 use self::VarianceTerm::*;
@@ -59,7 +59,7 @@ impl<'a> fmt::Debug for VarianceTerm<'a> {
 // The first pass over the crate simply builds up the set of inferreds.
 
 pub struct TermsContext<'a, 'tcx: 'a> {
-    pub tcx: &'a ty::ctxt<'tcx>,
+    pub tcx: &'a TyCtxt<'tcx>,
     pub arena: &'a TypedArena<VarianceTerm<'a>>,
 
     pub empty_variances: Rc<ty::ItemVariances>,
@@ -98,7 +98,7 @@ pub struct InferredInfo<'a> {
 }
 
 pub fn determine_parameters_to_be_inferred<'a, 'tcx>(
-    tcx: &'a ty::ctxt<'tcx>,
+    tcx: &'a TyCtxt<'tcx>,
     arena: &'a mut TypedArena<VarianceTerm<'a>>)
     -> TermsContext<'a, 'tcx>
 {
@@ -125,7 +125,7 @@ pub fn determine_parameters_to_be_inferred<'a, 'tcx>(
     terms_cx
 }
 
-fn lang_items(tcx: &ty::ctxt) -> Vec<(ast::NodeId,Vec<ty::Variance>)> {
+fn lang_items(tcx: &TyCtxt) -> Vec<(ast::NodeId,Vec<ty::Variance>)> {
     let all = vec![
         (tcx.lang_items.phantom_data(), vec![ty::Covariant]),
         (tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]),
index 2749dce5c73f44ab9f373c2c90953061106dc218..02a2ceb360d7f9fb3d9f819060dea059262fd863 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use middle::ty;
+use rustc::ty;
 
 pub trait Xform {
     fn xform(self, v: Self) -> Self;
index 115002867555d7e40540b767e0e544f7ca7dd0b4..863cada5b88090f09a9eb434c37b93add48ca19d 100644 (file)
@@ -19,7 +19,7 @@
 //! [Unicode code point]: http://www.unicode.org/glossary/#code_point
 //!
 //! This module exists for technical reasons, the primary documentation for
-//! `char` is directly on [the `char` primitive type](../primitive.char.html)
+//! `char` is directly on [the `char` primitive type](../../std/primitive.char.html)
 //! itself.
 //!
 //! This module is the home of the iterator implementations for the iterators
 #![stable(feature = "rust1", since = "1.0.0")]
 
 use core::char::CharExt as C;
-use core::option::Option::{self, Some, None};
-use core::iter::Iterator;
+use core::fmt;
 use tables::{derived_property, property, general_category, conversions};
 
 // stable reexports
 #[stable(feature = "rust1", since = "1.0.0")]
-pub use core::char::{MAX, from_u32, from_u32_unchecked, from_digit, EscapeUnicode, EscapeDefault};
+pub use core::char::{MAX, from_u32, from_u32_unchecked, from_digit};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::char::{EscapeUnicode, EscapeDefault, EncodeUtf8, EncodeUtf16};
 
 // unstable reexports
 #[unstable(feature = "unicode", issue = "27783")]
@@ -46,8 +47,8 @@ pub use tables::UNICODE_VERSION;
 /// This `struct` is created by the [`to_lowercase()`] method on [`char`]. See
 /// its documentation for more.
 ///
-/// [`to_lowercase()`]: ../primitive.char.html#method.to_lowercase
-/// [`char`]: ../primitive.char.html
+/// [`to_lowercase()`]: ../../std/primitive.char.html#method.to_lowercase
+/// [`char`]: ../../std/primitive.char.html
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct ToLowercase(CaseMappingIter);
 
@@ -64,8 +65,8 @@ impl Iterator for ToLowercase {
 /// This `struct` is created by the [`to_uppercase()`] method on [`char`]. See
 /// its documentation for more.
 ///
-/// [`to_uppercase()`]: ../primitive.char.html#method.to_uppercase
-/// [`char`]: ../primitive.char.html
+/// [`to_uppercase()`]: ../../std/primitive.char.html#method.to_uppercase
+/// [`char`]: ../../std/primitive.char.html
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct ToUppercase(CaseMappingIter);
 
@@ -151,14 +152,9 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let d = '1';
-    ///
-    /// assert!(d.is_digit(10));
-    ///
-    /// let d = 'f';
-    ///
-    /// assert!(d.is_digit(16));
-    /// assert!(!d.is_digit(10));
+    /// assert!('1'.is_digit(10));
+    /// assert!('f'.is_digit(16));
+    /// assert!(!'f'.is_digit(10));
     /// ```
     ///
     /// Passing a large radix, causing a panic:
@@ -167,10 +163,8 @@ impl char {
     /// use std::thread;
     ///
     /// let result = thread::spawn(|| {
-    ///     let d = '1';
-    ///
     ///     // this panics
-    ///     d.is_digit(37);
+    ///     '1'.is_digit(37);
     /// }).join();
     ///
     /// assert!(result.is_err());
@@ -207,25 +201,15 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let d = '1';
-    ///
-    /// assert_eq!(d.to_digit(10), Some(1));
-    ///
-    /// let d = 'f';
-    ///
-    /// assert_eq!(d.to_digit(16), Some(15));
+    /// assert_eq!('1'.to_digit(10), Some(1));
+    /// assert_eq!('f'.to_digit(16), Some(15));
     /// ```
     ///
     /// Passing a non-digit results in failure:
     ///
     /// ```
-    /// let d = 'f';
-    ///
-    /// assert_eq!(d.to_digit(10), None);
-    ///
-    /// let d = 'z';
-    ///
-    /// assert_eq!(d.to_digit(16), None);
+    /// assert_eq!('f'.to_digit(10), None);
+    /// assert_eq!('z'.to_digit(16), None);
     /// ```
     ///
     /// Passing a large radix, causing a panic:
@@ -234,9 +218,7 @@ impl char {
     /// use std::thread;
     ///
     /// let result = thread::spawn(|| {
-    ///   let d = '1';
-    ///
-    ///   d.to_digit(37);
+    ///     '1'.to_digit(37);
     /// }).join();
     ///
     /// assert!(result.is_err());
@@ -408,84 +390,50 @@ impl char {
         C::len_utf16(self)
     }
 
-    /// Encodes this character as UTF-8 into the provided byte buffer, and then
-    /// returns the number of bytes written.
+    /// Returns an interator over the bytes of this character as UTF-8.
     ///
-    /// If the buffer is not large enough, nothing will be written into it and a
-    /// `None` will be returned. A buffer of length four is large enough to
-    /// encode any `char`.
+    /// The returned iterator also has an `as_slice()` method to view the
+    /// encoded bytes as a byte slice.
     ///
     /// # Examples
     ///
-    /// In both of these examples, 'ß' takes two bytes to encode.
-    ///
-    /// ```
-    /// #![feature(unicode)]
-    ///
-    /// let mut b = [0; 2];
-    ///
-    /// let result = 'ß'.encode_utf8(&mut b);
-    ///
-    /// assert_eq!(result, Some(2));
-    /// ```
-    ///
-    /// A buffer that's too small:
-    ///
     /// ```
     /// #![feature(unicode)]
     ///
-    /// let mut b = [0; 1];
-    ///
-    /// let result = 'ß'.encode_utf8(&mut b);
+    /// let iterator = 'ß'.encode_utf8();
+    /// assert_eq!(iterator.as_slice(), [0xc3, 0x9f]);
     ///
-    /// assert_eq!(result, None);
+    /// for (i, byte) in iterator.enumerate() {
+    ///     println!("byte {}: {:x}", i, byte);
+    /// }
     /// ```
-    #[unstable(feature = "unicode",
-               reason = "pending decision about Iterator/Writer/Reader",
-               issue = "27784")]
+    #[unstable(feature = "unicode", issue = "27784")]
     #[inline]
-    pub fn encode_utf8(self, dst: &mut [u8]) -> Option<usize> {
-        C::encode_utf8(self, dst)
+    pub fn encode_utf8(self) -> EncodeUtf8 {
+        C::encode_utf8(self)
     }
 
-    /// Encodes this character as UTF-16 into the provided `u16` buffer, and
-    /// then returns the number of `u16`s written.
+    /// Returns an interator over the `u16` entries of this character as UTF-16.
     ///
-    /// If the buffer is not large enough, nothing will be written into it and a
-    /// `None` will be returned. A buffer of length 2 is large enough to encode
-    /// any `char`.
+    /// The returned iterator also has an `as_slice()` method to view the
+    /// encoded form as a slice.
     ///
     /// # Examples
     ///
-    /// In both of these examples, '𝕊' takes two `u16`s to encode.
-    ///
     /// ```
     /// #![feature(unicode)]
     ///
-    /// let mut b = [0; 2];
-    ///
-    /// let result = '𝕊'.encode_utf16(&mut b);
-    ///
-    /// assert_eq!(result, Some(2));
-    /// ```
-    ///
-    /// A buffer that's too small:
+    /// let iterator = '𝕊'.encode_utf16();
+    /// assert_eq!(iterator.as_slice(), [0xd835, 0xdd4a]);
     ///
+    /// for (i, val) in iterator.enumerate() {
+    ///     println!("entry {}: {:x}", i, val);
+    /// }
     /// ```
-    /// #![feature(unicode)]
-    ///
-    /// let mut b = [0; 1];
-    ///
-    /// let result = '𝕊'.encode_utf16(&mut b);
-    ///
-    /// assert_eq!(result, None);
-    /// ```
-    #[unstable(feature = "unicode",
-               reason = "pending decision about Iterator/Writer/Reader",
-               issue = "27784")]
+    #[unstable(feature = "unicode", issue = "27784")]
     #[inline]
-    pub fn encode_utf16(self, dst: &mut [u16]) -> Option<usize> {
-        C::encode_utf16(self, dst)
+    pub fn encode_utf16(self) -> EncodeUtf16 {
+        C::encode_utf16(self)
     }
 
     /// Returns true if this `char` is an alphabetic code point, and false if not.
@@ -495,12 +443,8 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let c = 'a';
-    ///
-    /// assert!(c.is_alphabetic());
-    ///
-    /// let c = '京';
-    /// assert!(c.is_alphabetic());
+    /// assert!('a'.is_alphabetic());
+    /// assert!('京'.is_alphabetic());
     ///
     /// let c = '💝';
     /// // love is many things, but it is not alphabetic
@@ -554,21 +498,13 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let c = 'a';
-    /// assert!(c.is_lowercase());
-    ///
-    /// let c = 'δ';
-    /// assert!(c.is_lowercase());
-    ///
-    /// let c = 'A';
-    /// assert!(!c.is_lowercase());
-    ///
-    /// let c = 'Δ';
-    /// assert!(!c.is_lowercase());
+    /// assert!('a'.is_lowercase());
+    /// assert!('δ'.is_lowercase());
+    /// assert!(!'A'.is_lowercase());
+    /// assert!(!'Δ'.is_lowercase());
     ///
     /// // The various Chinese scripts do not have case, and so:
-    /// let c = '中';
-    /// assert!(!c.is_lowercase());
+    /// assert!(!'中'.is_lowercase());
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
@@ -590,21 +526,13 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let c = 'a';
-    /// assert!(!c.is_uppercase());
-    ///
-    /// let c = 'δ';
-    /// assert!(!c.is_uppercase());
-    ///
-    /// let c = 'A';
-    /// assert!(c.is_uppercase());
-    ///
-    /// let c = 'Δ';
-    /// assert!(c.is_uppercase());
+    /// assert!(!'a'.is_uppercase());
+    /// assert!(!'δ'.is_uppercase());
+    /// assert!('A'.is_uppercase());
+    /// assert!('Δ'.is_uppercase());
     ///
     /// // The various Chinese scripts do not have case, and so:
-    /// let c = '中';
-    /// assert!(!c.is_uppercase());
+    /// assert!(!'中'.is_uppercase());
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
@@ -626,15 +554,12 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let c = ' ';
-    /// assert!(c.is_whitespace());
+    /// assert!(' '.is_whitespace());
     ///
     /// // a non-breaking space
-    /// let c = '\u{A0}';
-    /// assert!(c.is_whitespace());
+    /// assert!('\u{A0}'.is_whitespace());
     ///
-    /// let c = '越';
-    /// assert!(!c.is_whitespace());
+    /// assert!(!'越'.is_whitespace());
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
@@ -656,29 +581,14 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let c = '٣';
-    /// assert!(c.is_alphanumeric());
-    ///
-    /// let c = '7';
-    /// assert!(c.is_alphanumeric());
-    ///
-    /// let c = '৬';
-    /// assert!(c.is_alphanumeric());
-    ///
-    /// let c = 'K';
-    /// assert!(c.is_alphanumeric());
-    ///
-    /// let c = 'و';
-    /// assert!(c.is_alphanumeric());
-    ///
-    /// let c = '藏';
-    /// assert!(c.is_alphanumeric());
-    ///
-    /// let c = '¾';
-    /// assert!(!c.is_alphanumeric());
-    ///
-    /// let c = '①';
-    /// assert!(!c.is_alphanumeric());
+    /// assert!('٣'.is_alphanumeric());
+    /// assert!('7'.is_alphanumeric());
+    /// assert!('৬'.is_alphanumeric());
+    /// assert!('K'.is_alphanumeric());
+    /// assert!('و'.is_alphanumeric());
+    /// assert!('藏'.is_alphanumeric());
+    /// assert!(!'¾'.is_alphanumeric());
+    /// assert!(!'①'.is_alphanumeric());
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
@@ -697,11 +607,8 @@ impl char {
     ///
     /// ```
     /// // U+009C, STRING TERMINATOR
-    /// let c = '\9c';
-    /// assert!(c.is_control());
-    ///
-    /// let c = 'q';
-    /// assert!(!c.is_control());
+    /// assert!('\9c'.is_control());
+    /// assert!(!'q'.is_control());
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
@@ -719,29 +626,14 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let c = '٣';
-    /// assert!(c.is_numeric());
-    ///
-    /// let c = '7';
-    /// assert!(c.is_numeric());
-    ///
-    /// let c = '৬';
-    /// assert!(c.is_numeric());
-    ///
-    /// let c = 'K';
-    /// assert!(!c.is_numeric());
-    ///
-    /// let c = 'و';
-    /// assert!(!c.is_numeric());
-    ///
-    /// let c = '藏';
-    /// assert!(!c.is_numeric());
-    ///
-    /// let c = '¾';
-    /// assert!(!c.is_numeric());
-    ///
-    /// let c = '①';
-    /// assert!(!c.is_numeric());
+    /// assert!('٣'.is_numeric());
+    /// assert!('7'.is_numeric());
+    /// assert!('৬'.is_numeric());
+    /// assert!(!'K'.is_numeric());
+    /// assert!(!'و'.is_numeric());
+    /// assert!(!'藏'.is_numeric());
+    /// assert!(!'¾'.is_numeric());
+    /// assert!(!'①'.is_numeric());
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
@@ -776,13 +668,10 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let c = 'C';
-    ///
-    /// assert_eq!(c.to_lowercase().next(), Some('c'));
+    /// assert_eq!('C'.to_lowercase().next(), Some('c'));
     ///
     /// // Japanese scripts do not have case, and so:
-    /// let c = '山';
-    /// assert_eq!(c.to_lowercase().next(), Some('山'));
+    /// assert_eq!('山'.to_lowercase().next(), Some('山'));
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
@@ -813,12 +702,10 @@ impl char {
     /// Basic usage:
     ///
     /// ```
-    /// let c = 'c';
-    /// assert_eq!(c.to_uppercase().next(), Some('C'));
+    /// assert_eq!('c'.to_uppercase().next(), Some('C'));
     ///
     /// // Japanese does not have case, and so:
-    /// let c = '山';
-    /// assert_eq!(c.to_uppercase().next(), Some('山'));
+    /// assert_eq!('山'.to_uppercase().next(), Some('山'));
     /// ```
     ///
     /// In Turkish, the equivalent of 'i' in Latin has five forms instead of two:
@@ -829,9 +716,7 @@ impl char {
     /// Note that the lowercase dotted 'i' is the same as the Latin. Therefore:
     ///
     /// ```
-    /// let i = 'i';
-    ///
-    /// let upper_i = i.to_uppercase().next();
+    /// let upper_i = 'i'.to_uppercase().next();
     /// ```
     ///
     /// The value of `upper_i` here relies on the language of the text: if we're
@@ -839,9 +724,7 @@ impl char {
     /// be `Some('İ')`. `to_uppercase()` does not take this into account, and so:
     ///
     /// ```
-    /// let i = 'i';
-    ///
-    /// let upper_i = i.to_uppercase().next();
+    /// let upper_i = 'i'.to_uppercase().next();
     ///
     /// assert_eq!(Some('I'), upper_i);
     /// ```
@@ -855,7 +738,7 @@ impl char {
 }
 
 /// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s.
-#[unstable(feature = "decode_utf16", reason = "recently exposed", issue = "27830")]
+#[stable(feature = "decode_utf16", since = "1.9.0")]
 #[derive(Clone)]
 pub struct DecodeUtf16<I>
     where I: Iterator<Item = u16>
@@ -864,7 +747,14 @@ pub struct DecodeUtf16<I>
     buf: Option<u16>,
 }
 
-/// Create an iterator over the UTF-16 encoded code points in `iterable`,
+/// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s.
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct DecodeUtf16Error {
+    code: u16,
+}
+
+/// Create an iterator over the UTF-16 encoded code points in `iter`,
 /// returning unpaired surrogates as `Err`s.
 ///
 /// # Examples
@@ -872,8 +762,6 @@ pub struct DecodeUtf16<I>
 /// Basic usage:
 ///
 /// ```
-/// #![feature(decode_utf16)]
-///
 /// use std::char::decode_utf16;
 ///
 /// fn main() {
@@ -882,7 +770,9 @@ pub struct DecodeUtf16<I>
 ///              0x0073, 0xDD1E, 0x0069, 0x0063,
 ///              0xD834];
 ///
-///     assert_eq!(decode_utf16(v.iter().cloned()).collect::<Vec<_>>(),
+///     assert_eq!(decode_utf16(v.iter().cloned())
+///                            .map(|r| r.map_err(|e| e.unpaired_surrogate()))
+///                            .collect::<Vec<_>>(),
 ///                vec![Ok('𝄞'),
 ///                     Ok('m'), Ok('u'), Ok('s'),
 ///                     Err(0xDD1E),
@@ -894,8 +784,6 @@ pub struct DecodeUtf16<I>
 /// A lossy decoder can be obtained by replacing `Err` results with the replacement character:
 ///
 /// ```
-/// #![feature(decode_utf16)]
-///
 /// use std::char::{decode_utf16, REPLACEMENT_CHARACTER};
 ///
 /// fn main() {
@@ -910,20 +798,20 @@ pub struct DecodeUtf16<I>
 ///                "𝄞mus�ic�");
 /// }
 /// ```
-#[unstable(feature = "decode_utf16", reason = "recently exposed", issue = "27830")]
+#[stable(feature = "decode_utf16", since = "1.9.0")]
 #[inline]
-pub fn decode_utf16<I: IntoIterator<Item = u16>>(iterable: I) -> DecodeUtf16<I::IntoIter> {
+pub fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::IntoIter> {
     DecodeUtf16 {
-        iter: iterable.into_iter(),
+        iter: iter.into_iter(),
         buf: None,
     }
 }
 
-#[unstable(feature = "decode_utf16", reason = "recently exposed", issue = "27830")]
+#[stable(feature = "decode_utf16", since = "1.9.0")]
 impl<I: Iterator<Item=u16>> Iterator for DecodeUtf16<I> {
-    type Item = Result<char, u16>;
+    type Item = Result<char, DecodeUtf16Error>;
 
-    fn next(&mut self) -> Option<Result<char, u16>> {
+    fn next(&mut self) -> Option<Result<char, DecodeUtf16Error>> {
         let u = match self.buf.take() {
             Some(buf) => buf,
             None => match self.iter.next() {
@@ -937,18 +825,18 @@ impl<I: Iterator<Item=u16>> Iterator for DecodeUtf16<I> {
             Some(Ok(unsafe { from_u32_unchecked(u as u32) }))
         } else if u >= 0xDC00 {
             // a trailing surrogate
-            Some(Err(u))
+            Some(Err(DecodeUtf16Error { code: u }))
         } else {
             let u2 = match self.iter.next() {
                 Some(u2) => u2,
                 // eof
-                None => return Some(Err(u)),
+                None => return Some(Err(DecodeUtf16Error { code: u })),
             };
             if u2 < 0xDC00 || u2 > 0xDFFF {
                 // not a trailing surrogate so we're not a valid
                 // surrogate pair, so rewind to redecode u2 next time.
                 self.buf = Some(u2);
-                return Some(Err(u));
+                return Some(Err(DecodeUtf16Error { code: u }));
             }
 
             // all ok, so lets decode it.
@@ -966,8 +854,25 @@ impl<I: Iterator<Item=u16>> Iterator for DecodeUtf16<I> {
     }
 }
 
-/// `U+FFFD REPLACEMENT CHARACTER` (�) is used in Unicode to represent a decoding error.
+impl DecodeUtf16Error {
+    /// Returns the unpaired surrogate which caused this error.
+    #[stable(feature = "decode_utf16", since = "1.9.0")]
+    pub fn unpaired_surrogate(&self) -> u16 {
+        self.code
+    }
+}
+
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+impl fmt::Display for DecodeUtf16Error {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "unpaired surrogate found: {:x}", self.code)
+    }
+}
+
+/// `U+FFFD REPLACEMENT CHARACTER` (�) is used in Unicode to represent a
+/// decoding error.
+///
 /// It can occur, for example, when giving ill-formed UTF-8 bytes to
-/// [`String::from_utf8_lossy`](../string/struct.String.html#method.from_utf8_lossy).
-#[unstable(feature = "decode_utf16", reason = "recently added", issue = "27830")]
+/// [`String::from_utf8_lossy`](../../std/string/struct.String.html#method.from_utf8_lossy).
+#[stable(feature = "decode_utf16", since = "1.9.0")]
 pub const REPLACEMENT_CHARACTER: char = '\u{FFFD}';
index 4cbcfd5a0b2a5a1457416ee1c85e24638392a965..2f7f724e6af8fcfe606630f7359829d384c00da1 100644 (file)
@@ -35,6 +35,7 @@
 #![feature(core_char_ext)]
 #![feature(lang_items)]
 #![feature(staged_api)]
+#![feature(unicode)]
 
 mod tables;
 mod u_str;
@@ -51,3 +52,8 @@ pub mod str {
 pub mod derived_property {
     pub use tables::derived_property::{Cased, Case_Ignorable};
 }
+
+// For use in libsyntax
+pub mod property {
+    pub use tables::property::Pattern_White_Space;
+}
index a147bea791c47f4ff1f95f3399cca7046f2af2d4..ad17016eae8cbb8b4b6ce47c2b19e129b3cd1c7d 100644 (file)
@@ -1180,6 +1180,15 @@ pub mod derived_property {
 }
 
 pub mod property {
+    pub const Pattern_White_Space_table: &'static [(char, char)] = &[
+        ('\u{9}', '\u{d}'), ('\u{20}', '\u{20}'), ('\u{85}', '\u{85}'), ('\u{200e}', '\u{200f}'),
+        ('\u{2028}', '\u{2029}')
+    ];
+
+    pub fn Pattern_White_Space(c: char) -> bool {
+        super::bsearch_range_table(c, Pattern_White_Space_table)
+    }
+
     pub const White_Space_table: &'static [(char, char)] = &[
         ('\u{9}', '\u{d}'), ('\u{20}', '\u{20}'), ('\u{85}', '\u{85}'), ('\u{a0}', '\u{a0}'),
         ('\u{1680}', '\u{1680}'), ('\u{2000}', '\u{200a}'), ('\u{2028}', '\u{2029}'), ('\u{202f}',
index 9a6700ad47ce0d9df3ad250bdaec55c596f1c21b..18734a66871f6fd4541cb35706057adc8d18710f 100644 (file)
@@ -155,13 +155,13 @@ impl<I> Iterator for Utf16Encoder<I> where I: Iterator<Item=char> {
             return Some(tmp);
         }
 
-        let mut buf = [0; 2];
         self.chars.next().map(|ch| {
-            let n = CharExt::encode_utf16(ch, &mut buf).unwrap_or(0);
-            if n == 2 {
-                self.extra = buf[1];
+            let n = CharExt::encode_utf16(ch);
+            let n = n.as_slice();
+            if n.len() == 2 {
+                self.extra = n[1];
             }
-            buf[0]
+            n[0]
         })
     }
 
index 6b0ad30f4509c98f3e35aa9c4d5c3d45a003e832..9d76dd81e5f76041ff3404539ad98f8aad599a27 100644 (file)
@@ -11,18 +11,16 @@ crate-type = ["dylib"]
 
 [dependencies]
 arena = { path = "../libarena" }
-getopts = { path = "../libgetopts" }
 rustc = { path = "../librustc" }
 rustc_back = { path = "../librustc_back" }
+rustc_const_eval = { path = "../librustc_const_eval" }
 rustc_driver = { path = "../librustc_driver" }
-rustc_front = { path = "../librustc_front" }
 rustc_lint = { path = "../librustc_lint" }
 rustc_metadata = { path = "../librustc_metadata" }
 rustc_resolve = { path = "../librustc_resolve" }
 rustc_trans = { path = "../librustc_trans" }
 serialize = { path = "../libserialize" }
 syntax = { path = "../libsyntax" }
-test = { path = "../libtest" }
 log = { path = "../liblog" }
 
 [build-dependencies]
index 8f878264769796cb14444dbb963e4416baa79dae..850975498269bc4a31382f06455a15ed547f138c 100644 (file)
 //! Support for inlining external documentation into the current AST.
 
 use std::collections::HashSet;
+use std::iter::once;
 
 use syntax::ast;
 use syntax::attr::AttrMetaMethods;
-use rustc_front::hir;
+use rustc::hir;
 
 use rustc::middle::cstore::{self, CrateStore};
-use rustc::middle::def::Def;
-use rustc::middle::def_id::DefId;
-use rustc::middle::ty;
-use rustc::middle::subst;
+use rustc::hir::def::Def;
+use rustc::hir::def_id::DefId;
+use rustc::ty::{self, TyCtxt};
+use rustc::ty::subst;
 use rustc::middle::stability;
-use rustc::middle::const_eval;
+
+use rustc_const_eval::lookup_const_by_id;
 
 use core::DocContext;
 use doctree;
-use clean;
+use clean::{self, Attributes, GetDefId};
 
 use super::{Clean, ToSource};
 
@@ -67,7 +69,7 @@ pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Name>)
     })
 }
 
-fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
+fn try_inline_def(cx: &DocContext, tcx: &TyCtxt,
                   def: Def) -> Option<Vec<clean::Item>> {
     let mut ret = Vec::new();
     let did = def.def_id();
@@ -128,7 +130,7 @@ fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
     Some(ret)
 }
 
-pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
+pub fn load_attrs(cx: &DocContext, tcx: &TyCtxt,
                   did: DefId) -> Vec<clean::Attribute> {
     tcx.get_attrs(did).iter().map(|a| a.clean(cx)).collect()
 }
@@ -138,17 +140,17 @@ pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
 /// These names are used later on by HTML rendering to generate things like
 /// source links back to the original item.
 pub fn record_extern_fqn(cx: &DocContext, did: DefId, kind: clean::TypeKind) {
-    match cx.tcx_opt() {
-        Some(tcx) => {
-            let fqn = tcx.sess.cstore.extern_item_path(did);
-            let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
-            cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
-        }
-        None => {}
+    if let Some(tcx) = cx.tcx_opt() {
+        let crate_name = tcx.sess.cstore.crate_name(did.krate).to_string();
+        let relative = tcx.def_path(did).data.into_iter().map(|elem| {
+            elem.data.to_string()
+        });
+        let fqn = once(crate_name).chain(relative).collect();
+        cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
     }
 }
 
-pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
+pub fn build_external_trait(cx: &DocContext, tcx: &TyCtxt,
                             did: DefId) -> clean::Trait {
     let def = tcx.lookup_trait_def(did);
     let trait_items = tcx.trait_items(did).clean(cx);
@@ -164,10 +166,10 @@ pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
     }
 }
 
-fn build_external_function(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean::Function {
+fn build_external_function(cx: &DocContext, tcx: &TyCtxt, did: DefId) -> clean::Function {
     let t = tcx.lookup_item_type(did);
     let (decl, style, abi) = match t.ty.sty {
-        ty::TyBareFn(_, ref f) => ((did, &f.sig).clean(cx), f.unsafety, f.abi),
+        ty::TyFnDef(_, _, ref f) => ((did, &f.sig).clean(cx), f.unsafety, f.abi),
         _ => panic!("bad function"),
     };
 
@@ -187,9 +189,7 @@ fn build_external_function(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean
     }
 }
 
-fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean::Struct {
-    use syntax::parse::token::special_idents::unnamed_field;
-
+fn build_struct(cx: &DocContext, tcx: &TyCtxt, did: DefId) -> clean::Struct {
     let t = tcx.lookup_item_type(did);
     let predicates = tcx.lookup_predicates(did);
     let variant = tcx.lookup_adt_def(did).struct_variant();
@@ -197,8 +197,8 @@ fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean::Struct {
     clean::Struct {
         struct_type: match &*variant.fields {
             [] => doctree::Unit,
-            [ref f] if f.name == unnamed_field.name => doctree::Newtype,
-            [ref f, ..] if f.name == unnamed_field.name => doctree::Tuple,
+            [_] if variant.kind == ty::VariantKind::Tuple => doctree::Newtype,
+            [..] if variant.kind == ty::VariantKind::Tuple => doctree::Tuple,
             _ => doctree::Plain,
         },
         generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
@@ -207,7 +207,7 @@ fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean::Struct {
     }
 }
 
-fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean::ItemEnum {
+fn build_type(cx: &DocContext, tcx: &TyCtxt, did: DefId) -> clean::ItemEnum {
     let t = tcx.lookup_item_type(did);
     let predicates = tcx.lookup_predicates(did);
     match t.ty.sty {
@@ -227,17 +227,15 @@ fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean::ItemEnum {
     }, false)
 }
 
-pub fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
+pub fn build_impls(cx: &DocContext,
+                   tcx: &TyCtxt,
                    did: DefId) -> Vec<clean::Item> {
     tcx.populate_inherent_implementations_for_type_if_necessary(did);
     let mut impls = Vec::new();
 
-    match tcx.inherent_impls.borrow().get(&did) {
-        None => {}
-        Some(i) => {
-            for &did in i.iter() {
-                build_impl(cx, tcx, did, &mut impls);
-            }
+    if let Some(i) = tcx.inherent_impls.borrow().get(&did) {
+        for &did in i.iter() {
+            build_impl(cx, tcx, did, &mut impls);
         }
     }
 
@@ -249,19 +247,21 @@ pub fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
     // Primarily, the impls will be used to populate the documentation for this
     // type being inlined, but impls can also be used when generating
     // documentation for primitives (no way to find those specifically).
-    if cx.populated_crate_impls.borrow_mut().insert(did.krate) {
+    if !cx.all_crate_impls.borrow_mut().contains_key(&did.krate) {
+        let mut impls = Vec::new();
         for item in tcx.sess.cstore.crate_top_level_items(did.krate) {
             populate_impls(cx, tcx, item.def, &mut impls);
         }
+        cx.all_crate_impls.borrow_mut().insert(did.krate, impls);
 
-        fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
+        fn populate_impls(cx: &DocContext, tcx: &TyCtxt,
                           def: cstore::DefLike,
                           impls: &mut Vec<clean::Item>) {
             match def {
                 cstore::DlImpl(did) => build_impl(cx, tcx, did, impls),
                 cstore::DlDef(Def::Mod(did)) => {
                     // Don't recurse if this is a #[doc(hidden)] module
-                    if load_attrs(cx, tcx, did).iter().any(|a| is_doc_hidden(a)) {
+                    if load_attrs(cx, tcx, did).list("doc").has_word("hidden") {
                         return;
                     }
 
@@ -274,11 +274,25 @@ pub fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
         }
     }
 
+    let mut candidates = cx.all_crate_impls.borrow_mut();
+    let candidates = candidates.get_mut(&did.krate).unwrap();
+    for i in (0..candidates.len()).rev() {
+        let remove = match candidates[i].inner {
+            clean::ImplItem(ref i) => {
+                i.for_.def_id() == Some(did) || i.for_.primitive_type().is_some()
+            }
+            _ => continue,
+        };
+        if remove {
+            impls.push(candidates.swap_remove(i));
+        }
+    }
+
     return impls;
 }
 
 pub fn build_impl(cx: &DocContext,
-                  tcx: &ty::ctxt,
+                  tcx: &TyCtxt,
                   did: DefId,
                   ret: &mut Vec<clean::Item>) {
     if !cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
@@ -290,7 +304,7 @@ pub fn build_impl(cx: &DocContext,
     if let Some(ref t) = associated_trait {
         // If this is an impl for a #[doc(hidden)] trait, be sure to not inline
         let trait_attrs = load_attrs(cx, tcx, t.def_id);
-        if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
+        if trait_attrs.list("doc").has_word("hidden") {
             return
         }
     }
@@ -327,8 +341,8 @@ pub fn build_impl(cx: &DocContext,
                 let did = assoc_const.def_id;
                 let type_scheme = tcx.lookup_item_type(did);
                 let default = if assoc_const.has_value {
-                    Some(const_eval::lookup_const_by_id(tcx, did, None, None)
-                         .unwrap().span.to_src(cx))
+                    Some(lookup_const_by_id(tcx, did, None)
+                         .unwrap().0.span.to_src(cx))
                 } else {
                     None
                 };
@@ -347,7 +361,7 @@ pub fn build_impl(cx: &DocContext,
                 })
             }
             ty::MethodTraitItem(method) => {
-                if method.vis != hir::Public && associated_trait.is_none() {
+                if method.vis != ty::Visibility::Public && associated_trait.is_none() {
                     return None
                 }
                 let mut item = method.clean(cx);
@@ -405,15 +419,22 @@ pub fn build_impl(cx: &DocContext,
             clean::RegionBound(..) => unreachable!(),
         }
     });
-    if let Some(clean::ResolvedPath { did, .. }) = trait_ {
-        if Some(did) == cx.deref_trait_did.get() {
-            super::build_deref_target_impls(cx, &trait_items, ret);
-        }
+    if trait_.def_id() == cx.deref_trait_did.get() {
+        super::build_deref_target_impls(cx, &trait_items, ret);
     }
+
+    let provided = trait_.def_id().map(|did| {
+        cx.tcx().provided_trait_methods(did)
+                .into_iter()
+                .map(|meth| meth.name.to_string())
+                .collect()
+    }).unwrap_or(HashSet::new());
+
     ret.push(clean::Item {
         inner: clean::ImplItem(clean::Impl {
             unsafety: hir::Unsafety::Normal, // FIXME: this should be decoded
             derived: clean::detect_derived(&attrs),
+            provided_trait_methods: provided,
             trait_: trait_,
             for_: ty.ty.clean(cx),
             generics: (&ty.generics, &predicates, subst::TypeSpace).clean(cx),
@@ -430,21 +451,7 @@ pub fn build_impl(cx: &DocContext,
     });
 }
 
-fn is_doc_hidden(a: &clean::Attribute) -> bool {
-    match *a {
-        clean::List(ref name, ref inner) if *name == "doc" => {
-            inner.iter().any(|a| {
-                match *a {
-                    clean::Word(ref s) => *s == "hidden",
-                    _ => false,
-                }
-            })
-        }
-        _ => false
-    }
-}
-
-fn build_module(cx: &DocContext, tcx: &ty::ctxt,
+fn build_module(cx: &DocContext, tcx: &TyCtxt,
                 did: DefId) -> clean::Module {
     let mut items = Vec::new();
     fill_in(cx, tcx, did, &mut items);
@@ -453,7 +460,7 @@ fn build_module(cx: &DocContext, tcx: &ty::ctxt,
         is_crate: false,
     };
 
-    fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: DefId,
+    fn fill_in(cx: &DocContext, tcx: &TyCtxt, did: DefId,
                items: &mut Vec<clean::Item>) {
         // If we're reexporting a reexport it may actually reexport something in
         // two namespaces, so the target may be listed twice. Make sure we only
@@ -464,11 +471,10 @@ fn build_module(cx: &DocContext, tcx: &ty::ctxt,
                 cstore::DlDef(Def::ForeignMod(did)) => {
                     fill_in(cx, tcx, did, items);
                 }
-                cstore::DlDef(def) if item.vis == hir::Public => {
+                cstore::DlDef(def) if item.vis == ty::Visibility::Public => {
                     if !visited.insert(def) { continue }
-                    match try_inline_def(cx, tcx, def) {
-                        Some(i) => items.extend(i),
-                        None => {}
+                    if let Some(i) = try_inline_def(cx, tcx, def) {
+                        items.extend(i)
                     }
                 }
                 cstore::DlDef(..) => {}
@@ -480,12 +486,11 @@ fn build_module(cx: &DocContext, tcx: &ty::ctxt,
     }
 }
 
-fn build_const(cx: &DocContext, tcx: &ty::ctxt,
+fn build_const(cx: &DocContext, tcx: &TyCtxt,
                did: DefId) -> clean::Constant {
-    use rustc::middle::const_eval;
-    use rustc_front::print::pprust;
+    use rustc::hir::print as pprust;
 
-    let expr = const_eval::lookup_const_by_id(tcx, did, None, None).unwrap_or_else(|| {
+    let (expr, ty) = lookup_const_by_id(tcx, did, None).unwrap_or_else(|| {
         panic!("expected lookup_const_by_id to succeed for {:?}", did);
     });
     debug!("converting constant expr {:?} to snippet", expr);
@@ -493,12 +498,12 @@ fn build_const(cx: &DocContext, tcx: &ty::ctxt,
     debug!("got snippet {}", sn);
 
     clean::Constant {
-        type_: tcx.lookup_item_type(did).ty.clean(cx),
+        type_: ty.map(|t| t.clean(cx)).unwrap_or_else(|| tcx.lookup_item_type(did).ty.clean(cx)),
         expr: sn
     }
 }
 
-fn build_static(cx: &DocContext, tcx: &ty::ctxt,
+fn build_static(cx: &DocContext, tcx: &TyCtxt,
                 did: DefId,
                 mutable: bool) -> clean::Static {
     clean::Static {
index 1ff88f1d12758ae8c76a32cf7f2e0091e53365bc..d8d1472560d825211b71bbe2d5fcd9983651f552 100644 (file)
@@ -14,7 +14,6 @@
 pub use self::Type::*;
 pub use self::PrimitiveType::*;
 pub use self::TypeKind::*;
-pub use self::StructField::*;
 pub use self::VariantKind::*;
 pub use self::Mutability::*;
 pub use self::Import::*;
@@ -36,15 +35,15 @@ use syntax::ptr::P;
 
 use rustc_trans::back::link;
 use rustc::middle::cstore::{self, CrateStore};
-use rustc::middle::def::Def;
-use rustc::middle::def_id::{DefId, DefIndex};
-use rustc::middle::subst::{self, ParamSpace, VecPerParamSpace};
-use rustc::middle::ty;
+use rustc::hir::def::Def;
+use rustc::hir::def_id::{DefId, DefIndex};
+use rustc::ty::subst::{self, ParamSpace, VecPerParamSpace};
+use rustc::ty;
 use rustc::middle::stability;
 
-use rustc_front::hir;
+use rustc::hir;
 
-use std::collections::HashMap;
+use std::collections::{HashMap, HashSet};
 use std::path::PathBuf;
 use std::rc::Rc;
 use std::u32;
@@ -53,10 +52,7 @@ use std::env::current_dir;
 use core::DocContext;
 use doctree;
 use visit_ast;
-
-/// A stable identifier to the particular version of JSON output.
-/// Increment this when the `Crate` and related structures change.
-pub const SCHEMA_VERSION: &'static str = "0.8.3";
+use html::item_type::ItemType;
 
 mod inline;
 mod simplify;
@@ -100,10 +96,7 @@ impl<T: Clean<U>, U> Clean<U> for Rc<T> {
 
 impl<T: Clean<U>, U> Clean<Option<U>> for Option<T> {
     fn clean(&self, cx: &DocContext) -> Option<U> {
-        match self {
-            &None => None,
-            &Some(ref v) => Some(v.clean(cx))
-        }
+        self.as_ref().map(|v| v.clean(cx))
     }
 }
 
@@ -178,9 +171,8 @@ impl<'a, 'tcx> Clean<Crate> for visit_ast::RustdocVisitor<'a, 'tcx> {
             };
             let mut tmp = Vec::new();
             for child in &mut m.items {
-                match child.inner {
-                    ModuleItem(..) => {}
-                    _ => continue,
+                if !child.is_mod() {
+                    continue;
                 }
                 let prim = match PrimitiveType::find(&child.attrs) {
                     Some(prim) => prim,
@@ -209,7 +201,7 @@ impl<'a, 'tcx> Clean<Crate> for visit_ast::RustdocVisitor<'a, 'tcx> {
                     current_dir().unwrap().join(path)
                 }
             },
-            Input::Str(_) => PathBuf::new() // FIXME: this is wrong
+            Input::Str { ref name, .. } => PathBuf::from(name.clone()),
         };
 
         Crate {
@@ -245,7 +237,7 @@ impl Clean<ExternalCrate> for CrateNum {
             }
         });
         ExternalCrate {
-            name: cx.sess().cstore.crate_name(self.0),
+            name: (&cx.sess().cstore.crate_name(self.0)[..]).to_owned(),
             attrs: cx.sess().cstore.crate_attrs(self.0).clean(cx),
             primitives: primitives,
         }
@@ -261,7 +253,7 @@ pub struct Item {
     pub source: Span,
     /// Not everything has a name. E.g., impls
     pub name: Option<String>,
-    pub attrs: Vec<Attribute> ,
+    pub attrs: Vec<Attribute>,
     pub inner: ItemEnum,
     pub visibility: Option<Visibility>,
     pub def_id: DefId,
@@ -270,89 +262,73 @@ pub struct Item {
 }
 
 impl Item {
-    /// Finds the `doc` attribute as a List and returns the list of attributes
-    /// nested inside.
-    pub fn doc_list<'a>(&'a self) -> Option<&'a [Attribute]> {
-        for attr in &self.attrs {
-            match *attr {
-                List(ref x, ref list) if "doc" == *x => {
-                    return Some(list);
-                }
-                _ => {}
-            }
-        }
-        return None;
-    }
-
     /// Finds the `doc` attribute as a NameValue and returns the corresponding
     /// value found.
     pub fn doc_value<'a>(&'a self) -> Option<&'a str> {
-        for attr in &self.attrs {
-            match *attr {
-                NameValue(ref x, ref v) if "doc" == *x => {
-                    return Some(v);
-                }
-                _ => {}
-            }
-        }
-        return None;
+        self.attrs.value("doc")
     }
-
-    pub fn is_hidden_from_doc(&self) -> bool {
-        match self.doc_list() {
-            Some(l) => {
-                for innerattr in l {
-                    match *innerattr {
-                        Word(ref s) if "hidden" == *s => {
-                            return true
-                        }
-                        _ => (),
-                    }
-                }
-            },
-            None => ()
+    pub fn is_crate(&self) -> bool {
+        match self.inner {
+            StrippedItem(box ModuleItem(Module { is_crate: true, ..})) |
+            ModuleItem(Module { is_crate: true, ..}) => true,
+            _ => false,
         }
-        return false;
     }
-
     pub fn is_mod(&self) -> bool {
-        match self.inner { ModuleItem(..) => true, _ => false }
+        ItemType::from_item(self) == ItemType::Module
     }
     pub fn is_trait(&self) -> bool {
-        match self.inner { TraitItem(..) => true, _ => false }
+        ItemType::from_item(self) == ItemType::Trait
     }
     pub fn is_struct(&self) -> bool {
-        match self.inner { StructItem(..) => true, _ => false }
+        ItemType::from_item(self) == ItemType::Struct
     }
     pub fn is_enum(&self) -> bool {
-        match self.inner { EnumItem(..) => true, _ => false }
+        ItemType::from_item(self) == ItemType::Module
     }
     pub fn is_fn(&self) -> bool {
-        match self.inner { FunctionItem(..) => true, _ => false }
+        ItemType::from_item(self) == ItemType::Function
+    }
+    pub fn is_associated_type(&self) -> bool {
+        ItemType::from_item(self) == ItemType::AssociatedType
+    }
+    pub fn is_associated_const(&self) -> bool {
+        ItemType::from_item(self) == ItemType::AssociatedConst
+    }
+    pub fn is_method(&self) -> bool {
+        ItemType::from_item(self) == ItemType::Method
+    }
+    pub fn is_ty_method(&self) -> bool {
+        ItemType::from_item(self) == ItemType::TyMethod
+    }
+    pub fn is_stripped(&self) -> bool {
+        match self.inner { StrippedItem(..) => true, _ => false }
+    }
+    pub fn has_stripped_fields(&self) -> Option<bool> {
+        match self.inner {
+            StructItem(ref _struct) => Some(_struct.fields_stripped),
+            VariantItem(Variant { kind: StructVariant(ref vstruct)} ) => {
+                Some(vstruct.fields_stripped)
+            },
+            _ => None,
+        }
     }
 
     pub fn stability_class(&self) -> String {
-        match self.stability {
-            Some(ref s) => {
-                let mut base = match s.level {
-                    stability::Unstable => "unstable".to_string(),
-                    stability::Stable => String::new(),
-                };
-                if !s.deprecated_since.is_empty() {
-                    base.push_str(" deprecated");
-                }
-                base
+        self.stability.as_ref().map(|ref s| {
+            let mut base = match s.level {
+                stability::Unstable => "unstable".to_string(),
+                stability::Stable => String::new(),
+            };
+            if !s.deprecated_since.is_empty() {
+                base.push_str(" deprecated");
             }
-            _ => String::new(),
-        }
+            base
+        }).unwrap_or(String::new())
     }
 
     pub fn stable_since(&self) -> Option<&str> {
-        if let Some(ref s) = self.stability {
-            return Some(&s.since[..]);
-        }
-
-        None
+        self.stability.as_ref().map(|s| &s.since[..])
     }
 }
 
@@ -374,7 +350,7 @@ pub enum ItemEnum {
     TyMethodItem(TyMethod),
     /// A method with a body.
     MethodItem(Method),
-    StructFieldItem(StructField),
+    StructFieldItem(Type),
     VariantItem(Variant),
     /// `fn`s from an extern block
     ForeignFunctionItem(Function),
@@ -385,6 +361,8 @@ pub enum ItemEnum {
     AssociatedConstItem(Type, Option<String>),
     AssociatedTypeItem(Vec<TyParamBound>, Option<Type>),
     DefaultImplItem(DefaultImpl),
+    /// An item that has been stripped by a rustdoc pass
+    StrippedItem(Box<ItemEnum>),
 }
 
 #[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
@@ -448,10 +426,54 @@ impl Clean<Item> for doctree::Module {
     }
 }
 
+pub trait Attributes {
+    fn has_word(&self, &str) -> bool;
+    fn value<'a>(&'a self, &str) -> Option<&'a str>;
+    fn list<'a>(&'a self, &str) -> &'a [Attribute];
+}
+
+impl Attributes for [Attribute] {
+    /// Returns whether the attribute list contains a specific `Word`
+    fn has_word(&self, word: &str) -> bool {
+        for attr in self {
+            if let Word(ref w) = *attr {
+                if word == *w {
+                    return true;
+                }
+            }
+        }
+        false
+    }
+
+    /// Finds an attribute as NameValue and returns the corresponding value found.
+    fn value<'a>(&'a self, name: &str) -> Option<&'a str> {
+        for attr in self {
+            if let NameValue(ref x, ref v) = *attr {
+                if name == *x {
+                    return Some(v);
+                }
+            }
+        }
+        None
+    }
+
+    /// Finds an attribute as List and returns the list of attributes nested inside.
+    fn list<'a>(&'a self, name: &str) -> &'a [Attribute] {
+        for attr in self {
+            if let List(ref x, ref list) = *attr {
+                if name == *x {
+                    return &list[..];
+                }
+            }
+        }
+        &[]
+    }
+}
+
 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)]
 pub enum Attribute {
     Word(String),
-    List(String, Vec<Attribute> ),
+    List(String, Vec<Attribute>),
     NameValue(String, String)
 }
 
@@ -496,12 +518,6 @@ impl attr::AttrMetaMethods for Attribute {
     fn meta_item_list<'a>(&'a self) -> Option<&'a [P<ast::MetaItem>]> { None }
     fn span(&self) -> codemap::Span { unimplemented!() }
 }
-impl<'a> attr::AttrMetaMethods for &'a Attribute {
-    fn name(&self) -> InternedString { (**self).name() }
-    fn value_str(&self) -> Option<InternedString> { (**self).value_str() }
-    fn meta_item_list(&self) -> Option<&[P<ast::MetaItem>]> { None }
-    fn span(&self) -> codemap::Span { unimplemented!() }
-}
 
 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)]
 pub struct TyParam {
@@ -543,7 +559,7 @@ pub enum TyParamBound {
 
 impl TyParamBound {
     fn maybe_sized(cx: &DocContext) -> TyParamBound {
-        use rustc_front::hir::TraitBoundModifier as TBM;
+        use rustc::hir::TraitBoundModifier as TBM;
         let mut sized_bound = ty::BoundSized.clean(cx);
         if let TyParamBound::TraitBound(_, ref mut tbm) = sized_bound {
             *tbm = TBM::Maybe
@@ -552,17 +568,11 @@ impl TyParamBound {
     }
 
     fn is_sized_bound(&self, cx: &DocContext) -> bool {
-        use rustc_front::hir::TraitBoundModifier as TBM;
+        use rustc::hir::TraitBoundModifier as TBM;
         if let Some(tcx) = cx.tcx_opt() {
-            let sized_did = match tcx.lang_items.sized_trait() {
-                Some(did) => did,
-                None => return false
-            };
-            if let TyParamBound::TraitBound(PolyTrait {
-                trait_: Type::ResolvedPath { did, .. }, ..
-            }, TBM::None) = *self {
-                if did == sized_did {
-                    return true
+            if let TyParamBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self {
+                if trait_.def_id() == tcx.lang_items.sized_trait() {
+                    return true;
                 }
             }
         }
@@ -601,7 +611,7 @@ impl<'tcx> Clean<(Vec<TyParamBound>, Vec<TypeBinding>)> for ty::ExistentialBound
 
 fn external_path_params(cx: &DocContext, trait_did: Option<DefId>,
                         bindings: Vec<TypeBinding>, substs: &subst::Substs) -> PathParameters {
-    let lifetimes = substs.regions().get_slice(subst::TypeSpace)
+    let lifetimes = substs.regions.get_slice(subst::TypeSpace)
                     .iter()
                     .filter_map(|v| v.clean(cx))
                     .collect();
@@ -711,7 +721,7 @@ impl<'tcx> Clean<TyParamBound> for ty::TraitRef<'tcx> {
                         if let &ty::Region::ReLateBound(_, _) = *reg {
                             debug!("  hit an ReLateBound {:?}", reg);
                             if let Some(lt) = reg.clean(cx) {
-                                late_bounds.push(lt)
+                                late_bounds.push(lt);
                             }
                         }
                     }
@@ -719,22 +729,25 @@ impl<'tcx> Clean<TyParamBound> for ty::TraitRef<'tcx> {
             }
         }
 
-        TraitBound(PolyTrait {
-            trait_: ResolvedPath {
-                path: path,
-                typarams: None,
-                did: self.def_id,
-                is_generic: false,
+        TraitBound(
+            PolyTrait {
+                trait_: ResolvedPath {
+                    path: path,
+                    typarams: None,
+                    did: self.def_id,
+                    is_generic: false,
+                },
+                lifetimes: late_bounds,
             },
-            lifetimes: late_bounds
-        }, hir::TraitBoundModifier::None)
+            hir::TraitBoundModifier::None
+        )
     }
 }
 
 impl<'tcx> Clean<Option<Vec<TyParamBound>>> for subst::Substs<'tcx> {
     fn clean(&self, cx: &DocContext) -> Option<Vec<TyParamBound>> {
         let mut v = Vec::new();
-        v.extend(self.regions().iter().filter_map(|r| r.clean(cx)).map(RegionBound));
+        v.extend(self.regions.iter().filter_map(|r| r.clean(cx)).map(RegionBound));
         v.extend(self.types.iter().map(|t| TraitBound(PolyTrait {
             trait_: t.clean(cx),
             lifetimes: vec![]
@@ -780,8 +793,7 @@ impl Clean<Option<Lifetime>> for ty::Region {
     fn clean(&self, cx: &DocContext) -> Option<Lifetime> {
         match *self {
             ty::ReStatic => Some(Lifetime::statik()),
-            ty::ReLateBound(_, ty::BrNamed(_, name)) =>
-                Some(Lifetime(name.to_string())),
+            ty::ReLateBound(_, ty::BrNamed(_, name)) => Some(Lifetime(name.to_string())),
             ty::ReEarlyBound(ref data) => Some(Lifetime(data.name.clean(cx))),
 
             ty::ReLateBound(..) |
@@ -827,7 +839,7 @@ impl Clean<WherePredicate> for hir::WherePredicate {
 
 impl<'a> Clean<WherePredicate> for ty::Predicate<'a> {
     fn clean(&self, cx: &DocContext) -> WherePredicate {
-        use rustc::middle::ty::Predicate;
+        use rustc::ty::Predicate;
 
         match *self {
             Predicate::Trait(ref pred) => pred.clean(cx),
@@ -928,7 +940,6 @@ impl<'a, 'tcx> Clean<Generics> for (&'a ty::Generics<'tcx>,
                                     &'a ty::GenericPredicates<'tcx>,
                                     subst::ParamSpace) {
     fn clean(&self, cx: &DocContext) -> Generics {
-        use std::collections::HashSet;
         use self::WherePredicate as WP;
 
         let (gens, preds, space) = *self;
@@ -1151,12 +1162,12 @@ impl<'tcx> Clean<Type> for ty::FnOutput<'tcx> {
 impl<'a, 'tcx> Clean<FnDecl> for (DefId, &'a ty::PolyFnSig<'tcx>) {
     fn clean(&self, cx: &DocContext) -> FnDecl {
         let (did, sig) = *self;
-        let mut names = if let Some(_) = cx.map.as_local_node_id(did) {
+        let mut names = if cx.map.as_local_node_id(did).is_some() {
             vec![].into_iter()
         } else {
             cx.tcx().sess.cstore.method_arg_names(did).into_iter()
         }.peekable();
-        if names.peek().map(|s| &**s) == Some("self") {
+        if let Some("self") = names.peek().map(|s| &s[..]) {
             let _ = names.next();
         }
         FnDecl {
@@ -1408,7 +1419,7 @@ pub struct PolyTrait {
 }
 
 /// A representation of a Type suitable for hyperlinking purposes. Ideally one can get the original
-/// type out of the AST/ty::ctxt given one of these, if more information is needed. Most importantly
+/// type out of the AST/TyCtxt given one of these, if more information is needed. Most importantly
 /// it does not preserve mutability or boxes.
 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)]
 pub enum Type {
@@ -1482,6 +1493,16 @@ pub enum TypeKind {
     TypeTypedef,
 }
 
+pub trait GetDefId {
+    fn def_id(&self) -> Option<DefId>;
+}
+
+impl<T: GetDefId> GetDefId for Option<T> {
+    fn def_id(&self) -> Option<DefId> {
+        self.as_ref().and_then(|d| d.def_id())
+    }
+}
+
 impl Type {
     pub fn primitive_type(&self) -> Option<PrimitiveType> {
         match *self {
@@ -1497,6 +1518,15 @@ impl Type {
     }
 }
 
+impl GetDefId for Type {
+    fn def_id(&self) -> Option<DefId> {
+        match *self {
+            ResolvedPath { did, .. } => Some(did),
+            _ => None,
+        }
+    }
+}
+
 impl PrimitiveType {
     fn from_str(s: &str) -> Option<PrimitiveType> {
         match s {
@@ -1524,24 +1554,16 @@ impl PrimitiveType {
     }
 
     fn find(attrs: &[Attribute]) -> Option<PrimitiveType> {
-        for attr in attrs {
-            let list = match *attr {
-                List(ref k, ref l) if *k == "doc" => l,
-                _ => continue,
-            };
-            for sub_attr in list {
-                let value = match *sub_attr {
-                    NameValue(ref k, ref v)
-                        if *k == "primitive" => v,
-                    _ => continue,
-                };
-                match PrimitiveType::from_str(value) {
-                    Some(p) => return Some(p),
-                    None => {}
+        for attr in attrs.list("doc") {
+            if let NameValue(ref k, ref v) = *attr {
+                if "primitive" == *k {
+                    if let ret@Some(..) = PrimitiveType::from_str(v) {
+                        return ret;
+                    }
                 }
             }
         }
-        return None
+        None
     }
 
     pub fn to_string(&self) -> &'static str {
@@ -1583,7 +1605,7 @@ impl PrimitiveType {
 
 impl Clean<Type> for hir::Ty {
     fn clean(&self, cx: &DocContext) -> Type {
-        use rustc_front::hir::*;
+        use rustc::hir::*;
         match self.node {
             TyPtr(ref m) => RawPointer(m.mutbl.clean(cx), box m.ty.clean(cx)),
             TyRptr(ref l, ref m) =>
@@ -1627,15 +1649,9 @@ impl Clean<Type> for hir::Ty {
                 }
             }
             TyBareFn(ref barefn) => BareFunction(box barefn.clean(cx)),
-            TyPolyTraitRef(ref bounds) => {
-                PolyTraitRef(bounds.clean(cx))
-            },
-            TyInfer => {
-                Infer
-            },
-            TyTypeof(..) => {
-                panic!("Unimplemented type {:?}", self.node)
-            },
+            TyPolyTraitRef(ref bounds) => PolyTraitRef(bounds.clean(cx)),
+            TyInfer => Infer,
+            TyTypeof(..) => panic!("Unimplemented type {:?}", self.node),
         }
     }
 }
@@ -1673,7 +1689,8 @@ impl<'tcx> Clean<Type> for ty::Ty<'tcx> {
                 mutability: mt.mutbl.clean(cx),
                 type_: box mt.ty.clean(cx),
             },
-            ty::TyBareFn(_, ref fty) => BareFunction(box BareFunctionDecl {
+            ty::TyFnDef(_, _, ref fty) |
+            ty::TyFnPtr(ref fty) => BareFunction(box BareFunctionDecl {
                 unsafety: fty.unsafety,
                 generics: Generics {
                     lifetimes: Vec::new(),
@@ -1727,53 +1744,34 @@ impl<'tcx> Clean<Type> for ty::Ty<'tcx> {
     }
 }
 
-#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
-pub enum StructField {
-    HiddenStructField, // inserted later by strip passes
-    TypedStructField(Type),
-}
-
 impl Clean<Item> for hir::StructField {
     fn clean(&self, cx: &DocContext) -> Item {
-        let (name, vis) = match self.node.kind {
-            hir::NamedField(id, vis) => (Some(id), vis),
-            hir::UnnamedField(vis) => (None, vis)
-        };
         Item {
-            name: name.clean(cx),
-            attrs: self.node.attrs.clean(cx),
+            name: Some(self.name).clean(cx),
+            attrs: self.attrs.clean(cx),
             source: self.span.clean(cx),
-            visibility: Some(vis),
-            stability: get_stability(cx, cx.map.local_def_id(self.node.id)),
-            deprecation: get_deprecation(cx, cx.map.local_def_id(self.node.id)),
-            def_id: cx.map.local_def_id(self.node.id),
-            inner: StructFieldItem(TypedStructField(self.node.ty.clean(cx))),
+            visibility: self.vis.clean(cx),
+            stability: get_stability(cx, cx.map.local_def_id(self.id)),
+            deprecation: get_deprecation(cx, cx.map.local_def_id(self.id)),
+            def_id: cx.map.local_def_id(self.id),
+            inner: StructFieldItem(self.ty.clean(cx)),
         }
     }
 }
 
 impl<'tcx> Clean<Item> for ty::FieldDefData<'tcx, 'static> {
     fn clean(&self, cx: &DocContext) -> Item {
-        use syntax::parse::token::special_idents::unnamed_field;
         // FIXME: possible O(n^2)-ness! Not my fault.
-        let attr_map =
-            cx.tcx().sess.cstore.crate_struct_field_attrs(self.did.krate);
-
-        let (name, attrs) = if self.name == unnamed_field.name {
-            (None, None)
-        } else {
-            (Some(self.name), Some(attr_map.get(&self.did).unwrap()))
-        };
-
+        let attr_map = cx.tcx().sess.cstore.crate_struct_field_attrs(self.did.krate);
         Item {
-            name: name.clean(cx),
-            attrs: attrs.unwrap_or(&Vec::new()).clean(cx),
+            name: Some(self.name).clean(cx),
+            attrs: attr_map.get(&self.did).unwrap_or(&Vec::new()).clean(cx),
             source: Span::empty(),
-            visibility: Some(self.vis),
+            visibility: self.vis.clean(cx),
             stability: get_stability(cx, self.did),
             deprecation: get_deprecation(cx, self.did),
             def_id: self.did,
-            inner: StructFieldItem(TypedStructField(self.unsubst_ty().clean(cx))),
+            inner: StructFieldItem(self.unsubst_ty().clean(cx)),
         }
     }
 }
@@ -1782,7 +1780,13 @@ pub type Visibility = hir::Visibility;
 
 impl Clean<Option<Visibility>> for hir::Visibility {
     fn clean(&self, _: &DocContext) -> Option<Visibility> {
-        Some(*self)
+        Some(self.clone())
+    }
+}
+
+impl Clean<Option<Visibility>> for ty::Visibility {
+    fn clean(&self, _: &DocContext) -> Option<Visibility> {
+        Some(if *self == ty::Visibility::Public { hir::Public } else { hir::Inherited })
     }
 }
 
@@ -1824,7 +1828,7 @@ pub struct VariantStruct {
     pub fields_stripped: bool,
 }
 
-impl Clean<VariantStruct> for ::rustc_front::hir::VariantData {
+impl Clean<VariantStruct> for ::rustc::hir::VariantData {
     fn clean(&self, cx: &DocContext) -> VariantStruct {
         VariantStruct {
             struct_type: doctree::struct_type_from_def(self),
@@ -1884,7 +1888,6 @@ impl Clean<Item> for doctree::Variant {
 
 impl<'tcx> Clean<Item> for ty::VariantDefData<'tcx, 'static> {
     fn clean(&self, cx: &DocContext) -> Item {
-        // use syntax::parse::token::special_idents::unnamed_field;
         let kind = match self.kind() {
             ty::VariantKind::Unit => CLikeVariant,
             ty::VariantKind::Tuple => {
@@ -1900,21 +1903,12 @@ impl<'tcx> Clean<Item> for ty::VariantDefData<'tcx, 'static> {
                         Item {
                             source: Span::empty(),
                             name: Some(field.name.clean(cx)),
-                            attrs: Vec::new(),
-                            visibility: Some(hir::Public),
-                            // FIXME: this is not accurate, we need an id for
-                            //        the specific field but we're using the id
-                            //        for the whole variant. Thus we read the
-                            //        stability from the whole variant as well.
-                            //        Struct variants are experimental and need
-                            //        more infrastructure work before we can get
-                            //        at the needed information here.
-                            def_id: self.did,
-                            stability: get_stability(cx, self.did),
-                            deprecation: get_deprecation(cx, self.did),
-                            inner: StructFieldItem(
-                                TypedStructField(field.unsubst_ty().clean(cx))
-                            )
+                            attrs: cx.tcx().get_attrs(field.did).clean(cx),
+                            visibility: field.vis.clean(cx),
+                            def_id: field.did,
+                            stability: get_stability(cx, field.did),
+                            deprecation: get_deprecation(cx, field.did),
+                            inner: StructFieldItem(field.unsubst_ty().clean(cx))
                         }
                     }).collect()
                 })
@@ -1924,7 +1918,7 @@ impl<'tcx> Clean<Item> for ty::VariantDefData<'tcx, 'static> {
             name: Some(self.name.clean(cx)),
             attrs: inline::load_attrs(cx, cx.tcx(), self.did),
             source: Span::empty(),
-            visibility: Some(hir::Public),
+            visibility: Some(hir::Inherited),
             def_id: self.did,
             inner: VariantItem(Variant { kind: kind }),
             stability: get_stability(cx, self.did),
@@ -1946,7 +1940,7 @@ fn struct_def_to_variant_kind(struct_def: &hir::VariantData, cx: &DocContext) ->
     } else if struct_def.is_unit() {
         CLikeVariant
     } else {
-        TupleVariant(struct_def.fields().iter().map(|x| x.node.ty.clean(cx)).collect())
+        TupleVariant(struct_def.fields().iter().map(|x| x.ty.clean(cx)).collect())
     }
 }
 
@@ -2224,6 +2218,7 @@ impl Clean<ImplPolarity> for hir::ImplPolarity {
 pub struct Impl {
     pub unsafety: hir::Unsafety,
     pub generics: Generics,
+    pub provided_trait_methods: HashSet<String>,
     pub trait_: Option<Type>,
     pub for_: Type,
     pub items: Vec<Item>,
@@ -2243,12 +2238,19 @@ impl Clean<Vec<Item>> for doctree::Impl {
 
         // If this impl block is an implementation of the Deref trait, then we
         // need to try inlining the target's inherent impl blocks as well.
-        if let Some(ResolvedPath { did, .. }) = trait_ {
-            if Some(did) == cx.deref_trait_did.get() {
-                build_deref_target_impls(cx, &items, &mut ret);
-            }
+        if trait_.def_id() == cx.deref_trait_did.get() {
+            build_deref_target_impls(cx, &items, &mut ret);
         }
 
+        let provided = trait_.def_id().and_then(|did| {
+            cx.tcx_opt().map(|tcx| {
+                tcx.provided_trait_methods(did)
+                   .into_iter()
+                   .map(|meth| meth.name.to_string())
+                   .collect()
+            })
+        }).unwrap_or(HashSet::new());
+
         ret.push(Item {
             name: None,
             attrs: self.attrs.clean(cx),
@@ -2260,6 +2262,7 @@ impl Clean<Vec<Item>> for doctree::Impl {
             inner: ImplItem(Impl {
                 unsafety: self.unsafety,
                 generics: self.generics.clean(cx),
+                provided_trait_methods: provided,
                 trait_: trait_,
                 for_: self.for_.clean(cx),
                 items: items,
@@ -2267,7 +2270,7 @@ impl Clean<Vec<Item>> for doctree::Impl {
                 polarity: Some(self.polarity.clean(cx)),
             }),
         });
-        return ret;
+        ret
     }
 }
 
@@ -2407,9 +2410,8 @@ impl Clean<Vec<Item>> for doctree::Import {
             }
             hir::ViewPathSimple(name, ref p) => {
                 if !denied {
-                    match inline::try_inline(cx, self.id, Some(name)) {
-                        Some(items) => return items,
-                        None => {}
+                    if let Some(items) = inline::try_inline(cx, self.id, Some(name)) {
+                        return items;
                     }
                 }
                 (vec![], SimpleImport(name.clean(cx),
@@ -2474,9 +2476,8 @@ impl Clean<Vec<Item>> for hir::ForeignMod {
     fn clean(&self, cx: &DocContext) -> Vec<Item> {
         let mut items = self.items.clean(cx);
         for item in &mut items {
-            match item.inner {
-                ForeignFunctionItem(ref mut f) => f.abi = self.abi,
-                _ => {}
+            if let ForeignFunctionItem(ref mut f) = item.inner {
+                f.abi = self.abi;
             }
         }
         items
@@ -2555,7 +2556,7 @@ fn lit_to_string(lit: &ast::Lit) -> String {
 }
 
 fn name_from_pat(p: &hir::Pat) -> String {
-    use rustc_front::hir::*;
+    use rustc::hir::*;
     debug!("Trying to get a name from pattern: {:?}", p);
 
     match p.node {
@@ -2612,11 +2613,7 @@ fn resolve_type(cx: &DocContext,
             };
         }
     };
-    let def = match tcx.def_map.borrow().get(&id) {
-        Some(k) => k.full_def(),
-        None => panic!("unresolved id not in defmap")
-    };
-
+    let def = tcx.def_map.borrow().get(&id).expect("unresolved id not in defmap").full_def();
     debug!("resolve_type: def={:?}", def);
 
     let is_generic = match def {
@@ -2673,7 +2670,7 @@ fn register_def(cx: &DocContext, def: Def) -> DefId {
         let t = inline::build_external_trait(cx, tcx, did);
         cx.external_traits.borrow_mut().as_mut().unwrap().insert(did, t);
     }
-    return did;
+    did
 }
 
 fn resolve_use_source(cx: &DocContext, path: Path, id: ast::NodeId) -> ImportSource {
@@ -2746,12 +2743,10 @@ impl Clean<Stability> for attr::Stability {
                 _=> "".to_string(),
             },
             reason: {
-                if let Some(ref depr) = self.rustc_depr {
-                    depr.reason.to_string()
-                } else if let attr::Unstable {reason: Some(ref reason), ..} = self.level {
-                    reason.to_string()
-                } else {
-                    "".to_string()
+                match (&self.rustc_depr, &self.level) {
+                    (&Some(ref depr), _) => depr.reason.to_string(),
+                    (&None, &attr::Unstable {reason: Some(ref reason), ..}) => reason.to_string(),
+                    _ => "".to_string(),
                 }
             },
             issue: match self.level {
index 716b88d5534ae865795f7f502ce61103ac9295db..4ba412cdc851bb4a843710b94bae8f518ad3b36e 100644 (file)
@@ -11,7 +11,7 @@
 //! Simplification of where clauses and parameter bounds into a prettier and
 //! more canonical form.
 //!
-//! Currently all cross-crate-inlined function use `middle::ty` to reconstruct
+//! Currently all cross-crate-inlined function use `rustc::ty` to reconstruct
 //! the AST (e.g. see all of `clean::inline`), but this is not always a
 //! non-lossy transformation. The current format of storage for where clauses
 //! for functions and such is simply a list of predicates. One example of this
@@ -29,8 +29,8 @@
 use std::mem;
 use std::collections::HashMap;
 
-use rustc::middle::def_id::DefId;
-use rustc::middle::subst;
+use rustc::hir::def_id::DefId;
+use rustc::ty::subst;
 
 use clean::PathParameters as PP;
 use clean::WherePredicate as WP;
index 6b8f34ac73f6413802c697ccc4ae2e145d40ac49..6b7aa103e1d0afb7c71ae2f62d48349bf695d0a1 100644 (file)
@@ -13,14 +13,14 @@ use rustc_lint;
 use rustc_driver::{driver, target_features, abort_on_err};
 use rustc::dep_graph::DepGraph;
 use rustc::session::{self, config};
-use rustc::middle::def_id::DefId;
+use rustc::hir::def_id::DefId;
 use rustc::middle::privacy::AccessLevels;
-use rustc::middle::ty;
-use rustc::front::map as hir_map;
+use rustc::ty::{self, TyCtxt};
+use rustc::hir::map as hir_map;
 use rustc::lint;
 use rustc_trans::back::link;
 use rustc_resolve as resolve;
-use rustc_front::lowering::{lower_crate, LoweringContext};
+use rustc::hir::lowering::{lower_crate, LoweringContext};
 use rustc_metadata::cstore::CStore;
 
 use syntax::{ast, codemap, errors};
@@ -41,7 +41,7 @@ pub use rustc::session::search_paths::SearchPaths;
 
 /// Are we generating documentation (`Typed`) or tests (`NotTyped`)?
 pub enum MaybeTyped<'a, 'tcx: 'a> {
-    Typed(&'a ty::ctxt<'tcx>),
+    Typed(&'a TyCtxt<'tcx>),
     NotTyped(&'a session::Session)
 }
 
@@ -56,7 +56,7 @@ pub struct DocContext<'a, 'tcx: 'a> {
     pub external_traits: RefCell<Option<HashMap<DefId, clean::Trait>>>,
     pub external_typarams: RefCell<Option<HashMap<DefId, String>>>,
     pub inlined: RefCell<Option<HashSet<DefId>>>,
-    pub populated_crate_impls: RefCell<HashSet<ast::CrateNum>>,
+    pub all_crate_impls: RefCell<HashMap<ast::CrateNum, Vec<clean::Item>>>,
     pub deref_trait_did: Cell<Option<DefId>>,
 }
 
@@ -68,14 +68,14 @@ impl<'b, 'tcx> DocContext<'b, 'tcx> {
         }
     }
 
-    pub fn tcx_opt<'a>(&'a self) -> Option<&'a ty::ctxt<'tcx>> {
+    pub fn tcx_opt<'a>(&'a self) -> Option<&'a TyCtxt<'tcx>> {
         match self.maybe_typed {
             Typed(tcx) => Some(tcx),
             NotTyped(_) => None
         }
     }
 
-    pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+    pub fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx> {
         let tcx_opt = self.tcx_opt();
         tcx_opt.expect("tcx not present")
     }
@@ -133,7 +133,7 @@ pub fn run_core(search_paths: SearchPaths, cfgs: Vec<String>, externs: Externs,
     let mut cfg = config::build_configuration(&sess);
     target_features::add_configuration(&mut cfg, &sess);
 
-    let krate = driver::phase_1_parse_input(&sess, cfg, &input);
+    let krate = panictry!(driver::phase_1_parse_input(&sess, cfg, &input));
 
     let name = link::find_crate_name(Some(&sess), &krate.attrs,
                                      &input);
@@ -179,7 +179,7 @@ pub fn run_core(search_paths: SearchPaths, cfgs: Vec<String>, externs: Externs,
             external_typarams: RefCell::new(Some(HashMap::new())),
             external_paths: RefCell::new(Some(HashMap::new())),
             inlined: RefCell::new(Some(HashSet::new())),
-            populated_crate_impls: RefCell::new(HashSet::new()),
+            all_crate_impls: RefCell::new(HashMap::new()),
             deref_trait_did: Cell::new(None),
         };
         debug!("crate: {:?}", ctxt.map.krate());
index fc0422b3a3f037ad6be59491121c57deb068532f..2db4b779eed9844d5d5e0a092b4bfd7cc6aff082 100644 (file)
@@ -20,7 +20,7 @@ use syntax::ast;
 use syntax::ast::{Name, NodeId};
 use syntax::attr;
 use syntax::ptr::P;
-use rustc_front::hir;
+use rustc::hir;
 
 pub struct Module {
     pub name: Option<Name>,
index 57cb87e1b2d011fb7a4c08147bfc1a491e687f8c..2ecb071fcc2a4228694c8eeb47c693b19b246fd9 100644 (file)
@@ -38,9 +38,9 @@ impl ExternalHtml {
 }
 
 pub fn load_string(input: &Path) -> io::Result<Option<String>> {
-    let mut f = try!(File::open(input));
+    let mut f = File::open(input)?;
     let mut d = Vec::new();
-    try!(f.read_to_end(&mut d));
+    f.read_to_end(&mut d)?;
     Ok(str::from_utf8(&d).map(|s| s.to_string()).ok())
 }
 
index 5a4f95d1a1a5a7b24f4ce8050e7faeb105b83b09..5595c749256dfc7971a46edbd2b42c3b36806b7d 100644 (file)
@@ -9,8 +9,26 @@
 // except according to those terms.
 
 use clean::*;
-use std::collections::HashMap;
-use std::mem::{replace, swap};
+
+pub enum FoldItem {
+    Retain(Item),
+    Strip(Item),
+    Erase,
+}
+
+impl FoldItem {
+    pub fn fold(self) -> Option<Item> {
+        match self {
+            FoldItem::Erase => None,
+            FoldItem::Retain(i) => Some(i),
+            FoldItem::Strip(item@ Item { inner: StrippedItem(..), .. } ) => Some(item),
+            FoldItem::Strip(mut i) => {
+                i.inner = StrippedItem(box i.inner);
+                Some(i)
+            }
+        }
+    }
+}
 
 pub trait DocFolder : Sized {
     fn fold_item(&mut self, item: Item) -> Option<Item> {
@@ -18,51 +36,58 @@ pub trait DocFolder : Sized {
     }
 
     /// don't override!
-    fn fold_item_recur(&mut self, item: Item) -> Option<Item> {
-        let Item { attrs, name, source, visibility, def_id, inner, stability, deprecation } = item;
-        let inner = inner;
-        let inner = match inner {
-            StructItem(mut i) => {
-                let mut foo = Vec::new(); swap(&mut foo, &mut i.fields);
-                let num_fields = foo.len();
-                i.fields.extend(foo.into_iter().filter_map(|x| self.fold_item(x)));
-                i.fields_stripped |= num_fields != i.fields.len();
-                StructItem(i)
-            },
+    fn fold_inner_recur(&mut self, inner: ItemEnum) -> ItemEnum {
+        match inner {
+            StrippedItem(..) => unreachable!(),
             ModuleItem(i) => {
                 ModuleItem(self.fold_mod(i))
             },
+            StructItem(mut i) => {
+                let num_fields = i.fields.len();
+                i.fields = i.fields.into_iter().filter_map(|x| self.fold_item(x)).collect();
+                i.fields_stripped |= num_fields != i.fields.len() ||
+                                     i.fields.iter().any(|f| f.is_stripped());
+                StructItem(i)
+            },
             EnumItem(mut i) => {
-                let mut foo = Vec::new(); swap(&mut foo, &mut i.variants);
-                let num_variants = foo.len();
-                i.variants.extend(foo.into_iter().filter_map(|x| self.fold_item(x)));
-                i.variants_stripped |= num_variants != i.variants.len();
+                let num_variants = i.variants.len();
+                i.variants = i.variants.into_iter().filter_map(|x| self.fold_item(x)).collect();
+                i.variants_stripped |= num_variants != i.variants.len() ||
+                                       i.variants.iter().any(|f| f.is_stripped());
                 EnumItem(i)
             },
             TraitItem(mut i) => {
-                let mut foo = Vec::new(); swap(&mut foo, &mut i.items);
-                i.items.extend(foo.into_iter().filter_map(|x| self.fold_item(x)));
+                i.items = i.items.into_iter().filter_map(|x| self.fold_item(x)).collect();
                 TraitItem(i)
             },
             ImplItem(mut i) => {
-                let mut foo = Vec::new(); swap(&mut foo, &mut i.items);
-                i.items.extend(foo.into_iter().filter_map(|x| self.fold_item(x)));
+                i.items = i.items.into_iter().filter_map(|x| self.fold_item(x)).collect();
                 ImplItem(i)
             },
             VariantItem(i) => {
                 let i2 = i.clone(); // this clone is small
                 match i.kind {
                     StructVariant(mut j) => {
-                        let mut foo = Vec::new(); swap(&mut foo, &mut j.fields);
-                        let num_fields = foo.len();
-                        j.fields.extend(foo.into_iter().filter_map(|x| self.fold_item(x)));
-                        j.fields_stripped |= num_fields != j.fields.len();
+                        let num_fields = j.fields.len();
+                        j.fields = j.fields.into_iter().filter_map(|x| self.fold_item(x)).collect();
+                        j.fields_stripped |= num_fields != j.fields.len() ||
+                                             j.fields.iter().any(|f| f.is_stripped());
                         VariantItem(Variant {kind: StructVariant(j), ..i2})
                     },
                     _ => VariantItem(i2)
                 }
             },
             x => x
+        }
+    }
+
+    /// don't override!
+    fn fold_item_recur(&mut self, item: Item) -> Option<Item> {
+        let Item { attrs, name, source, visibility, def_id, inner, stability, deprecation } = item;
+
+        let inner = match inner {
+            StrippedItem(box i) => StrippedItem(box self.fold_inner_recur(i)),
+            _ => self.fold_inner_recur(inner),
         };
 
         Some(Item { attrs: attrs, name: name, source: source, inner: inner,
@@ -78,16 +103,12 @@ pub trait DocFolder : Sized {
     }
 
     fn fold_crate(&mut self, mut c: Crate) -> Crate {
-        c.module = match replace(&mut c.module, None) {
-            Some(module) => self.fold_item(module), None => None
-        };
-        let external_traits = replace(&mut c.external_traits, HashMap::new());
-        c.external_traits = external_traits.into_iter().map(|(k, mut v)| {
-            let items = replace(&mut v.items, Vec::new());
-            v.items = items.into_iter().filter_map(|i| self.fold_item(i))
-                           .collect();
+        c.module = c.module.and_then(|module| self.fold_item(module));
+
+        c.external_traits = c.external_traits.into_iter().map(|(k, mut v)| {
+            v.items = v.items.into_iter().filter_map(|i| self.fold_item(i)).collect();
             (k, v)
         }).collect();
-        return c;
+        c
     }
 }
index f04e1cc75202f348ceb13318ad89ec88dd13ff75..1173e6447f50cacc0b0cf1bfe3df33f83fefc424 100644 (file)
@@ -29,7 +29,7 @@ impl<'a> fmt::Display for Escape<'a> {
         for (i, ch) in s.bytes().enumerate() {
             match ch as char {
                 '<' | '>' | '&' | '\'' | '"' => {
-                    try!(fmt.write_str(&pile_o_bits[last.. i]));
+                    fmt.write_str(&pile_o_bits[last.. i])?;
                     let s = match ch as char {
                         '>' => "&gt;",
                         '<' => "&lt;",
@@ -38,7 +38,7 @@ impl<'a> fmt::Display for Escape<'a> {
                         '"' => "&quot;",
                         _ => unreachable!()
                     };
-                    try!(fmt.write_str(s));
+                    fmt.write_str(s)?;
                     last = i + 1;
                 }
                 _ => {}
@@ -46,7 +46,7 @@ impl<'a> fmt::Display for Escape<'a> {
         }
 
         if last < s.len() {
-            try!(fmt.write_str(&pile_o_bits[last..]));
+            fmt.write_str(&pile_o_bits[last..])?;
         }
         Ok(())
     }
index 9d5189cfd0b12f992f0766fdc0978ef883ba20f4..ce20ad05acb0e05cc730d1b4d644227bd1f31f0c 100644 (file)
@@ -19,9 +19,9 @@ use std::fmt;
 use std::iter::repeat;
 
 use rustc::middle::cstore::LOCAL_CRATE;
-use rustc::middle::def_id::{CRATE_DEF_INDEX, DefId};
+use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
 use syntax::abi::Abi;
-use rustc_front::hir;
+use rustc::hir;
 
 use clean;
 use html::item_type::ItemType;
@@ -31,7 +31,7 @@ use html::render::{cache, CURRENT_LOCATION_KEY};
 /// Helper to render an optional visibility with a space after it (if the
 /// visibility is preset)
 #[derive(Copy, Clone)]
-pub struct VisSpace(pub Option<hir::Visibility>);
+pub struct VisSpace<'a>(pub &'a Option<hir::Visibility>);
 /// Similarly to VisSpace, this structure is used to render a function style with a
 /// space after it.
 #[derive(Copy, Clone)]
@@ -56,9 +56,9 @@ pub struct TyParamBounds<'a>(pub &'a [clean::TyParamBound]);
 pub struct CommaSep<'a, T: 'a>(pub &'a [T]);
 pub struct AbiSpace(pub Abi);
 
-impl VisSpace {
-    pub fn get(&self) -> Option<hir::Visibility> {
-        let VisSpace(v) = *self; v
+impl<'a> VisSpace<'a> {
+    pub fn get(self) -> &'a Option<hir::Visibility> {
+        let VisSpace(v) = self; v
     }
 }
 
@@ -77,8 +77,8 @@ impl ConstnessSpace {
 impl<'a, T: fmt::Display> fmt::Display for CommaSep<'a, T> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         for (i, item) in self.0.iter().enumerate() {
-            if i != 0 { try!(write!(f, ", ")); }
-            try!(write!(f, "{}", item));
+            if i != 0 { write!(f, ", ")?; }
+            write!(f, "{}", item)?;
         }
         Ok(())
     }
@@ -89,9 +89,9 @@ impl<'a> fmt::Display for TyParamBounds<'a> {
         let &TyParamBounds(bounds) = self;
         for (i, bound) in bounds.iter().enumerate() {
             if i > 0 {
-                try!(f.write_str(" + "));
+                f.write_str(" + ")?;
             }
-            try!(write!(f, "{}", *bound));
+            write!(f, "{}", *bound)?;
         }
         Ok(())
     }
@@ -100,36 +100,36 @@ impl<'a> fmt::Display for TyParamBounds<'a> {
 impl fmt::Display for clean::Generics {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         if self.lifetimes.is_empty() && self.type_params.is_empty() { return Ok(()) }
-        try!(f.write_str("&lt;"));
+        f.write_str("&lt;")?;
 
         for (i, life) in self.lifetimes.iter().enumerate() {
             if i > 0 {
-                try!(f.write_str(", "));
+                f.write_str(", ")?;
             }
-            try!(write!(f, "{}", *life));
+            write!(f, "{}", *life)?;
         }
 
         if !self.type_params.is_empty() {
             if !self.lifetimes.is_empty() {
-                try!(f.write_str(", "));
+                f.write_str(", ")?;
             }
             for (i, tp) in self.type_params.iter().enumerate() {
                 if i > 0 {
-                    try!(f.write_str(", "))
+                    f.write_str(", ")?
                 }
-                try!(f.write_str(&tp.name));
+                f.write_str(&tp.name)?;
 
                 if !tp.bounds.is_empty() {
-                    try!(write!(f, ": {}", TyParamBounds(&tp.bounds)));
+                    write!(f, ": {}", TyParamBounds(&tp.bounds))?;
                 }
 
                 match tp.default {
-                    Some(ref ty) => { try!(write!(f, " = {}", ty)); },
+                    Some(ref ty) => { write!(f, " = {}", ty)?; },
                     None => {}
                 };
             }
         }
-        try!(f.write_str("&gt;"));
+        f.write_str("&gt;")?;
         Ok(())
     }
 }
@@ -140,40 +140,40 @@ impl<'a> fmt::Display for WhereClause<'a> {
         if gens.where_predicates.is_empty() {
             return Ok(());
         }
-        try!(f.write_str(" <span class='where'>where "));
+        f.write_str(" <span class='where'>where ")?;
         for (i, pred) in gens.where_predicates.iter().enumerate() {
             if i > 0 {
-                try!(f.write_str(", "));
+                f.write_str(", ")?;
             }
             match pred {
                 &clean::WherePredicate::BoundPredicate { ref ty, ref bounds } => {
                     let bounds = bounds;
-                    try!(write!(f, "{}: {}", ty, TyParamBounds(bounds)));
+                    write!(f, "{}: {}", ty, TyParamBounds(bounds))?;
                 }
                 &clean::WherePredicate::RegionPredicate { ref lifetime,
                                                           ref bounds } => {
-                    try!(write!(f, "{}: ", lifetime));
+                    write!(f, "{}: ", lifetime)?;
                     for (i, lifetime) in bounds.iter().enumerate() {
                         if i > 0 {
-                            try!(f.write_str(" + "));
+                            f.write_str(" + ")?;
                         }
 
-                        try!(write!(f, "{}", lifetime));
+                        write!(f, "{}", lifetime)?;
                     }
                 }
                 &clean::WherePredicate::EqPredicate { ref lhs, ref rhs } => {
-                    try!(write!(f, "{} == {}", lhs, rhs));
+                    write!(f, "{} == {}", lhs, rhs)?;
                 }
             }
         }
-        try!(f.write_str("</span>"));
+        f.write_str("</span>")?;
         Ok(())
     }
 }
 
 impl fmt::Display for clean::Lifetime {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(f.write_str(self.get_ref()));
+        f.write_str(self.get_ref())?;
         Ok(())
     }
 }
@@ -181,14 +181,14 @@ impl fmt::Display for clean::Lifetime {
 impl fmt::Display for clean::PolyTrait {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         if !self.lifetimes.is_empty() {
-            try!(f.write_str("for&lt;"));
+            f.write_str("for&lt;")?;
             for (i, lt) in self.lifetimes.iter().enumerate() {
                 if i > 0 {
-                    try!(f.write_str(", "));
+                    f.write_str(", ")?;
                 }
-                try!(write!(f, "{}", lt));
+                write!(f, "{}", lt)?;
             }
-            try!(f.write_str("&gt; "));
+            f.write_str("&gt; ")?;
         }
         write!(f, "{}", self.trait_)
     }
@@ -218,46 +218,46 @@ impl fmt::Display for clean::PathParameters {
                 ref lifetimes, ref types, ref bindings
             } => {
                 if !lifetimes.is_empty() || !types.is_empty() || !bindings.is_empty() {
-                    try!(f.write_str("&lt;"));
+                    f.write_str("&lt;")?;
                     let mut comma = false;
                     for lifetime in lifetimes {
                         if comma {
-                            try!(f.write_str(", "));
+                            f.write_str(", ")?;
                         }
                         comma = true;
-                        try!(write!(f, "{}", *lifetime));
+                        write!(f, "{}", *lifetime)?;
                     }
                     for ty in types {
                         if comma {
-                            try!(f.write_str(", "));
+                            f.write_str(", ")?;
                         }
                         comma = true;
-                        try!(write!(f, "{}", *ty));
+                        write!(f, "{}", *ty)?;
                     }
                     for binding in bindings {
                         if comma {
-                            try!(f.write_str(", "));
+                            f.write_str(", ")?;
                         }
                         comma = true;
-                        try!(write!(f, "{}", *binding));
+                        write!(f, "{}", *binding)?;
                     }
-                    try!(f.write_str("&gt;"));
+                    f.write_str("&gt;")?;
                 }
             }
             clean::PathParameters::Parenthesized { ref inputs, ref output } => {
-                try!(f.write_str("("));
+                f.write_str("(")?;
                 let mut comma = false;
                 for ty in inputs {
                     if comma {
-                        try!(f.write_str(", "));
+                        f.write_str(", ")?;
                     }
                     comma = true;
-                    try!(write!(f, "{}", *ty));
+                    write!(f, "{}", *ty)?;
                 }
-                try!(f.write_str(")"));
+                f.write_str(")")?;
                 if let Some(ref ty) = *output {
-                    try!(f.write_str(" -&gt; "));
-                    try!(write!(f, "{}", ty));
+                    f.write_str(" -&gt; ")?;
+                    write!(f, "{}", ty)?;
                 }
             }
         }
@@ -267,7 +267,7 @@ impl fmt::Display for clean::PathParameters {
 
 impl fmt::Display for clean::PathSegment {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(f.write_str(&self.name));
+        f.write_str(&self.name)?;
         write!(f, "{}", self.params)
     }
 }
@@ -275,14 +275,14 @@ impl fmt::Display for clean::PathSegment {
 impl fmt::Display for clean::Path {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         if self.global {
-            try!(f.write_str("::"))
+            f.write_str("::")?
         }
 
         for (i, seg) in self.segments.iter().enumerate() {
             if i > 0 {
-                try!(f.write_str("::"))
+                f.write_str("::")?
             }
-            try!(write!(f, "{}", seg));
+            write!(f, "{}", seg)?;
         }
         Ok(())
     }
@@ -339,20 +339,20 @@ fn resolved_path(w: &mut fmt::Formatter, did: DefId, path: &clean::Path,
             Some(mut root) => {
                 for seg in &path.segments[..amt] {
                     if "super" == seg.name || "self" == seg.name {
-                        try!(write!(w, "{}::", seg.name));
+                        write!(w, "{}::", seg.name)?;
                     } else {
                         root.push_str(&seg.name);
                         root.push_str("/");
-                        try!(write!(w, "<a class='mod'
-                                            href='{}index.html'>{}</a>::",
-                                      root,
-                                      seg.name));
+                        write!(w, "<a class='mod'
+                                       href='{}index.html'>{}</a>::",
+                                 root,
+                                 seg.name)?;
                     }
                 }
             }
             None => {
                 for seg in &path.segments[..amt] {
-                    try!(write!(w, "{}::", seg.name));
+                    write!(w, "{}::", seg.name)?;
                 }
             }
         }
@@ -360,12 +360,12 @@ fn resolved_path(w: &mut fmt::Formatter, did: DefId, path: &clean::Path,
 
     match href(did) {
         Some((url, shortty, fqp)) => {
-            try!(write!(w, "<a class='{}' href='{}' title='{}'>{}</a>",
-                          shortty, url, fqp.join("::"), last.name));
+            write!(w, "<a class='{}' href='{}' title='{}'>{}</a>",
+                   shortty, url, fqp.join("::"), last.name)?;
         }
-        _ => try!(write!(w, "{}", last.name)),
+        _ => write!(w, "{}", last.name)?,
     }
-    try!(write!(w, "{}", last.params));
+    write!(w, "{}", last.params)?;
     Ok(())
 }
 
@@ -378,9 +378,9 @@ fn primitive_link(f: &mut fmt::Formatter,
         Some(&LOCAL_CRATE) => {
             let len = CURRENT_LOCATION_KEY.with(|s| s.borrow().len());
             let len = if len == 0 {0} else {len - 1};
-            try!(write!(f, "<a class='primitive' href='{}primitive.{}.html'>",
-                        repeat("../").take(len).collect::<String>(),
-                        prim.to_url_str()));
+            write!(f, "<a class='primitive' href='{}primitive.{}.html'>",
+                   repeat("../").take(len).collect::<String>(),
+                   prim.to_url_str())?;
             needs_termination = true;
         }
         Some(&cnum) => {
@@ -398,10 +398,10 @@ fn primitive_link(f: &mut fmt::Formatter,
             };
             match loc {
                 Some(root) => {
-                    try!(write!(f, "<a class='primitive' href='{}{}/primitive.{}.html'>",
-                                root,
-                                path.0.first().unwrap(),
-                                prim.to_url_str()));
+                    write!(f, "<a class='primitive' href='{}{}/primitive.{}.html'>",
+                           root,
+                           path.0.first().unwrap(),
+                           prim.to_url_str())?;
                     needs_termination = true;
                 }
                 None => {}
@@ -409,9 +409,9 @@ fn primitive_link(f: &mut fmt::Formatter,
         }
         None => {}
     }
-    try!(write!(f, "{}", name));
+    write!(f, "{}", name)?;
     if needs_termination {
-        try!(write!(f, "</a>"));
+        write!(f, "</a>")?;
     }
     Ok(())
 }
@@ -422,8 +422,8 @@ fn tybounds(w: &mut fmt::Formatter,
     match *typarams {
         Some(ref params) => {
             for param in params {
-                try!(write!(w, " + "));
-                try!(write!(w, "{}", *param));
+                write!(w, " + ")?;
+                write!(w, "{}", *param)?;
             }
             Ok(())
         }
@@ -439,7 +439,7 @@ impl fmt::Display for clean::Type {
             }
             clean::ResolvedPath{ did, ref typarams, ref path, is_generic } => {
                 // Paths like T::Output and Self::Output should be rendered with all segments
-                try!(resolved_path(f, did, path, is_generic));
+                resolved_path(f, did, path, is_generic)?;
                 tybounds(f, typarams)
             }
             clean::Infer => write!(f, "_"),
@@ -459,25 +459,25 @@ impl fmt::Display for clean::Type {
                 match &**typs {
                     [] => primitive_link(f, clean::PrimitiveTuple, "()"),
                     [ref one] => {
-                        try!(primitive_link(f, clean::PrimitiveTuple, "("));
-                        try!(write!(f, "{},", one));
+                        primitive_link(f, clean::PrimitiveTuple, "(")?;
+                        write!(f, "{},", one)?;
                         primitive_link(f, clean::PrimitiveTuple, ")")
                     }
                     many => {
-                        try!(primitive_link(f, clean::PrimitiveTuple, "("));
-                        try!(write!(f, "{}", CommaSep(&many)));
+                        primitive_link(f, clean::PrimitiveTuple, "(")?;
+                        write!(f, "{}", CommaSep(&many))?;
                         primitive_link(f, clean::PrimitiveTuple, ")")
                     }
                 }
             }
             clean::Vector(ref t) => {
-                try!(primitive_link(f, clean::Slice, &format!("[")));
-                try!(write!(f, "{}", t));
+                primitive_link(f, clean::Slice, &format!("["))?;
+                write!(f, "{}", t)?;
                 primitive_link(f, clean::Slice, &format!("]"))
             }
             clean::FixedVector(ref t, ref s) => {
-                try!(primitive_link(f, clean::PrimitiveType::Array, "["));
-                try!(write!(f, "{}", t));
+                primitive_link(f, clean::PrimitiveType::Array, "[")?;
+                write!(f, "{}", t)?;
                 primitive_link(f, clean::PrimitiveType::Array,
                                &format!("; {}]", *s))
             }
@@ -489,8 +489,8 @@ impl fmt::Display for clean::Type {
                                        &format!("*{}{}", RawMutableSpace(m), t))
                     }
                     _ => {
-                        try!(primitive_link(f, clean::PrimitiveType::PrimitiveRawPointer,
-                                            &format!("*{}", RawMutableSpace(m))));
+                        primitive_link(f, clean::PrimitiveType::PrimitiveRawPointer,
+                                       &format!("*{}", RawMutableSpace(m)))?;
                         write!(f, "{}", t)
                     }
                 }
@@ -508,9 +508,8 @@ impl fmt::Display for clean::Type {
                                 primitive_link(f, clean::Slice,
                                     &format!("&amp;{}{}[{}]", lt, m, **bt)),
                             _ => {
-                                try!(primitive_link(f, clean::Slice,
-                                    &format!("&amp;{}{}[", lt, m)));
-                                try!(write!(f, "{}", **bt));
+                                primitive_link(f, clean::Slice, &format!("&amp;{}{}[", lt, m))?;
+                                write!(f, "{}", **bt)?;
                                 primitive_link(f, clean::Slice, "]")
                             }
                         }
@@ -523,9 +522,9 @@ impl fmt::Display for clean::Type {
             clean::PolyTraitRef(ref bounds) => {
                 for (i, bound) in bounds.iter().enumerate() {
                     if i != 0 {
-                        try!(write!(f, " + "));
+                        write!(f, " + ")?;
                     }
-                    try!(write!(f, "{}", *bound));
+                    write!(f, "{}", *bound)?;
                 }
                 Ok(())
             }
@@ -544,9 +543,9 @@ impl fmt::Display for clean::Type {
                 ref self_type,
                 trait_: box clean::ResolvedPath { did, ref typarams, .. },
             } => {
-                try!(write!(f, "{}::", self_type));
+                write!(f, "{}::", self_type)?;
                 let path = clean::Path::singleton(name.clone());
-                try!(resolved_path(f, did, &path, false));
+                resolved_path(f, did, &path, false)?;
 
                 // FIXME: `typarams` are not rendered, and this seems bad?
                 drop(typarams);
@@ -564,13 +563,13 @@ impl fmt::Display for clean::Type {
 
 impl fmt::Display for clean::Impl {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(f, "impl{} ", self.generics));
+        write!(f, "impl{} ", self.generics)?;
         if let Some(ref ty) = self.trait_ {
-            try!(write!(f, "{}{} for ",
-                        if self.polarity == Some(clean::ImplPolarity::Negative) { "!" } else { "" },
-                        *ty));
+            write!(f, "{}{} for ",
+                   if self.polarity == Some(clean::ImplPolarity::Negative) { "!" } else { "" },
+                   *ty)?;
         }
-        try!(write!(f, "{}{}", self.for_, WhereClause(&self.generics)));
+        write!(f, "{}{}", self.for_, WhereClause(&self.generics))?;
         Ok(())
     }
 }
@@ -578,11 +577,11 @@ impl fmt::Display for clean::Impl {
 impl fmt::Display for clean::Arguments {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         for (i, input) in self.values.iter().enumerate() {
-            if i > 0 { try!(write!(f, ", ")); }
+            if i > 0 { write!(f, ", ")?; }
             if !input.name.is_empty() {
-                try!(write!(f, "{}: ", input.name));
+                write!(f, "{}: ", input.name)?;
             }
-            try!(write!(f, "{}", input.type_));
+            write!(f, "{}", input.type_)?;
         }
         Ok(())
     }
@@ -637,9 +636,9 @@ impl<'a> fmt::Display for Method<'a> {
     }
 }
 
-impl fmt::Display for VisSpace {
+impl<'a> fmt::Display for VisSpace<'a> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match self.get() {
+        match *self.get() {
             Some(hir::Public) => write!(f, "pub "),
             Some(hir::Inherited) | None => Ok(())
         }
@@ -678,12 +677,12 @@ impl fmt::Display for clean::Import {
                 write!(f, "use {}::*;", *src)
             }
             clean::ImportList(ref src, ref names) => {
-                try!(write!(f, "use {}::{{", *src));
+                write!(f, "use {}::{{", *src)?;
                 for (i, n) in names.iter().enumerate() {
                     if i > 0 {
-                        try!(write!(f, ", "));
+                        write!(f, ", ")?;
                     }
-                    try!(write!(f, "{}", *n));
+                    write!(f, "{}", *n)?;
                 }
                 write!(f, "}};")
             }
@@ -698,9 +697,9 @@ impl fmt::Display for clean::ImportSource {
             _ => {
                 for (i, seg) in self.path.segments.iter().enumerate() {
                     if i > 0 {
-                        try!(write!(f, "::"))
+                        write!(f, "::")?
                     }
-                    try!(write!(f, "{}", seg.name));
+                    write!(f, "{}", seg.name)?;
                 }
                 Ok(())
             }
@@ -713,13 +712,13 @@ impl fmt::Display for clean::ViewListIdent {
         match self.source {
             Some(did) => {
                 let path = clean::Path::singleton(self.name.clone());
-                try!(resolved_path(f, did, &path, false));
+                resolved_path(f, did, &path, false)?;
             }
-            _ => try!(write!(f, "{}", self.name)),
+            _ => write!(f, "{}", self.name)?,
         }
 
         if let Some(ref name) = self.rename {
-            try!(write!(f, " as {}", name));
+            write!(f, " as {}", name)?;
         }
         Ok(())
     }
index cca365d16c85c09a4f6acbe74f88e6f83fa1167a..7ca4703a2e1859e298a9ba486156f196758526c6 100644 (file)
@@ -17,22 +17,36 @@ use html::escape::Escape;
 
 use std::io;
 use std::io::prelude::*;
-use syntax::parse::lexer;
+use syntax::parse::lexer::{self, Reader};
 use syntax::parse::token;
 use syntax::parse;
 
-/// Highlights some source code, returning the HTML output.
-pub fn highlight(src: &str, class: Option<&str>, id: Option<&str>) -> String {
+/// Highlights `src`, returning the HTML output.
+pub fn render_with_highlighting(src: &str, class: Option<&str>, id: Option<&str>) -> String {
     debug!("highlighting: ================\n{}\n==============", src);
     let sess = parse::ParseSess::new();
     let fm = sess.codemap().new_filemap("<stdin>".to_string(), src.to_string());
 
     let mut out = Vec::new();
-    doit(&sess,
-         lexer::StringReader::new(&sess.span_diagnostic, fm),
-         class,
-         id,
-         &mut out).unwrap();
+    write_header(class, id, &mut out).unwrap();
+    write_source(&sess,
+                 lexer::StringReader::new(&sess.span_diagnostic, fm),
+                 &mut out).unwrap();
+    write_footer(&mut out).unwrap();
+    String::from_utf8_lossy(&out[..]).into_owned()
+}
+
+/// Highlights `src`, returning the HTML output. Returns only the inner html to
+/// be inserted into an element. C.f., `render_with_highlighting` which includes
+/// an enclosing `<pre>` block.
+pub fn render_inner_with_highlighting(src: &str) -> String {
+    let sess = parse::ParseSess::new();
+    let fm = sess.codemap().new_filemap("<stdin>".to_string(), src.to_string());
+
+    let mut out = Vec::new();
+    write_source(&sess,
+                 lexer::StringReader::new(&sess.span_diagnostic, fm),
+                 &mut out).unwrap();
     String::from_utf8_lossy(&out[..]).into_owned()
 }
 
@@ -43,17 +57,10 @@ pub fn highlight(src: &str, class: Option<&str>, id: Option<&str>) -> String {
 /// it's used. All source code emission is done as slices from the source map,
 /// not from the tokens themselves, in order to stay true to the original
 /// source.
-fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
-        class: Option<&str>, id: Option<&str>,
-        out: &mut Write) -> io::Result<()> {
-    use syntax::parse::lexer::Reader;
-
-    try!(write!(out, "<pre "));
-    match id {
-        Some(id) => try!(write!(out, "id='{}' ", id)),
-        None => {}
-    }
-    try!(write!(out, "class='rust {}'>\n", class.unwrap_or("")));
+fn write_source(sess: &parse::ParseSess,
+                mut lexer: lexer::StringReader,
+                out: &mut Write)
+                -> io::Result<()> {
     let mut is_attribute = false;
     let mut is_macro = false;
     let mut is_macro_nonterminal = false;
@@ -66,16 +73,16 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
 
         let klass = match next.tok {
             token::Whitespace => {
-                try!(write!(out, "{}", Escape(&snip(next.sp))));
+                write!(out, "{}", Escape(&snip(next.sp)))?;
                 continue
             },
             token::Comment => {
-                try!(write!(out, "<span class='comment'>{}</span>",
-                            Escape(&snip(next.sp))));
+                write!(out, "<span class='comment'>{}</span>",
+                       Escape(&snip(next.sp)))?;
                 continue
             },
             token::Shebang(s) => {
-                try!(write!(out, "{}", Escape(&s.as_str())));
+                write!(out, "{}", Escape(&s.as_str()))?;
                 continue
             },
             // If this '&' token is directly adjacent to another token, assume
@@ -114,13 +121,13 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
             // span when we see the ']'.
             token::Pound => {
                 is_attribute = true;
-                try!(write!(out, r"<span class='attribute'>#"));
+                write!(out, r"<span class='attribute'>#")?;
                 continue
             }
             token::CloseDelim(token::Bracket) => {
                 if is_attribute {
                     is_attribute = false;
-                    try!(write!(out, "]</span>"));
+                    write!(out, "]</span>")?;
                     continue
                 } else {
                     ""
@@ -178,12 +185,27 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
         // stringifying this token
         let snip = sess.codemap().span_to_snippet(next.sp).unwrap();
         if klass == "" {
-            try!(write!(out, "{}", Escape(&snip)));
+            write!(out, "{}", Escape(&snip))?;
         } else {
-            try!(write!(out, "<span class='{}'>{}</span>", klass,
-                          Escape(&snip)));
+            write!(out, "<span class='{}'>{}</span>", klass, Escape(&snip))?;
         }
     }
 
+    Ok(())
+}
+
+fn write_header(class: Option<&str>,
+                id: Option<&str>,
+                out: &mut Write)
+                -> io::Result<()> {
+    write!(out, "<pre ")?;
+    match id {
+        Some(id) => write!(out, "id='{}' ", id)?,
+        None => {}
+    }
+    write!(out, "class='rust {}'>\n", class.unwrap_or(""))
+}
+
+fn write_footer(out: &mut Write) -> io::Result<()> {
     write!(out, "</pre>\n")
 }
index afc93f41172e84498729ceece2adebcc6845022d..74f7b099044f16b3700faf88061e6567ae82cb87 100644 (file)
@@ -44,7 +44,12 @@ pub enum ItemType {
 
 impl ItemType {
     pub fn from_item(item: &clean::Item) -> ItemType {
-        match item.inner {
+        let inner = match item.inner {
+            clean::StrippedItem(box ref item) => item,
+            ref inner@_ => inner,
+        };
+
+        match *inner {
             clean::ModuleItem(..)          => ItemType::Module,
             clean::ExternCrateItem(..)     => ItemType::ExternCrate,
             clean::ImportItem(..)          => ItemType::Import,
@@ -67,6 +72,7 @@ impl ItemType {
             clean::AssociatedConstItem(..) => ItemType::AssociatedConst,
             clean::AssociatedTypeItem(..)  => ItemType::AssociatedType,
             clean::DefaultImplItem(..)     => ItemType::Impl,
+            clean::StrippedItem(..)        => unreachable!(),
         }
     }
 
index 975b4d3636f2d10b983fb0a79c61b6d92d550c01..8b2e84974985c96579e268d8a00d9adc5261ccfc 100644 (file)
@@ -28,11 +28,12 @@ pub struct Page<'a> {
     pub ty: &'a str,
     pub root_path: &'a str,
     pub description: &'a str,
-    pub keywords: &'a str
+    pub keywords: &'a str,
 }
 
 pub fn render<T: fmt::Display, S: fmt::Display>(
-    dst: &mut io::Write, layout: &Layout, page: &Page, sidebar: &S, t: &T)
+    dst: &mut io::Write, layout: &Layout, page: &Page, sidebar: &S, t: &T,
+    css_file_extension: bool)
     -> io::Result<()>
 {
     write!(dst,
@@ -49,6 +50,7 @@ r##"<!DOCTYPE html>
 
     <link rel="stylesheet" type="text/css" href="{root_path}rustdoc.css">
     <link rel="stylesheet" type="text/css" href="{root_path}main.css">
+    {css_extension}
 
     {favicon}
     {in_header}
@@ -141,6 +143,12 @@ r##"<!DOCTYPE html>
     <script defer src="{root_path}search-index.js"></script>
 </body>
 </html>"##,
+    css_extension = if css_file_extension {
+        format!("<link rel=\"stylesheet\" type=\"text/css\" href=\"{root_path}theme.css\">",
+                root_path = page.root_path)
+    } else {
+        "".to_owned()
+    },
     content   = *t,
     root_path = page.root_path,
     ty        = page.ty,
index 9fd476bfbab552b2f006b5e887c9e046aff3c6cb..3baf22b38ef6874b2fca3e4199c3c11c96fead9e 100644 (file)
@@ -262,9 +262,9 @@ pub fn render(w: &mut fmt::Formatter, s: &str, print_toc: bool) -> fmt::Result {
                                               &Default::default());
                     s.push_str(&format!("<span class='rusttest'>{}</span>", Escape(&test)));
                 });
-                s.push_str(&highlight::highlight(&text,
-                                                 Some("rust-example-rendered"),
-                                                 None));
+                s.push_str(&highlight::render_with_highlighting(&text,
+                                                                Some("rust-example-rendered"),
+                                                                None));
                 let output = CString::new(s).unwrap();
                 hoedown_buffer_puts(ob, output.as_ptr());
             })
@@ -607,7 +607,7 @@ mod tests {
     fn issue_17736() {
         let markdown = "# title";
         format!("{}", Markdown(markdown));
-        reset_ids();
+        reset_ids(true);
     }
 
     #[test]
@@ -615,7 +615,7 @@ mod tests {
         fn t(input: &str, expect: &str) {
             let output = format!("{}", Markdown(input));
             assert_eq!(output, expect);
-            reset_ids();
+            reset_ids(true);
         }
 
         t("# Foo bar", "\n<h1 id='foo-bar' class='section-header'>\
@@ -654,7 +654,7 @@ mod tests {
               <a href='#panics-1'>Panics</a></h1>");
         };
         test();
-        reset_ids();
+        reset_ids(true);
         test();
     }
 
index af6dec8dc868895a972afbcb1b3283d9304ae426..c5850089578cd059a051adde3a952496837b84d7 100644 (file)
@@ -56,13 +56,13 @@ use serialize::json::{ToJson, Json, as_json};
 use syntax::{abi, ast};
 use syntax::feature_gate::UnstableFeatures;
 use rustc::middle::cstore::LOCAL_CRATE;
-use rustc::middle::def_id::{CRATE_DEF_INDEX, DefId};
+use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
 use rustc::middle::privacy::AccessLevels;
 use rustc::middle::stability;
 use rustc::session::config::get_unstable_features_setting;
-use rustc_front::hir;
+use rustc::hir;
 
-use clean::{self, SelfTy};
+use clean::{self, SelfTy, Attributes, GetDefId};
 use doctree;
 use fold::DocFolder;
 use html::escape::Escape;
@@ -91,12 +91,20 @@ pub struct Context {
     /// String representation of how to get back to the root path of the 'doc/'
     /// folder in terms of a relative URL.
     pub root_path: String,
-    /// The path to the crate root source minus the file name.
-    /// Used for simplifying paths to the highlighted source code files.
-    pub src_root: PathBuf,
     /// The current destination folder of where HTML artifacts should be placed.
     /// This changes as the context descends into the module hierarchy.
     pub dst: PathBuf,
+    /// A flag, which when `true`, will render pages which redirect to the
+    /// real location of an item. This is used to allow external links to
+    /// publicly reused items to redirect to the right location.
+    pub render_redirect_pages: bool,
+    pub shared: Arc<SharedContext>,
+}
+
+pub struct SharedContext {
+    /// The path to the crate root source minus the file name.
+    /// Used for simplifying paths to the highlighted source code files.
+    pub src_root: PathBuf,
     /// This describes the layout of each page, and is not modified after
     /// creation of the context (contains info like the favicon and added html).
     pub layout: layout::Layout,
@@ -104,15 +112,16 @@ pub struct Context {
     /// the source files are present in the html rendering, then this will be
     /// `true`.
     pub include_sources: bool,
-    /// A flag, which when turned off, will render pages which redirect to the
-    /// real location of an item. This is used to allow external links to
-    /// publicly reused items to redirect to the right location.
-    pub render_redirect_pages: bool,
+    /// The local file sources we've emitted and their respective url-paths.
+    pub local_sources: HashMap<PathBuf, String>,
     /// All the passes that were run on this crate.
     pub passes: HashSet<String>,
     /// The base-URL of the issue tracker for when an item has been tagged with
     /// an issue number.
     pub issue_tracker_base_url: Option<String>,
+    /// The given user css file which allow to customize the generated
+    /// documentation theme.
+    pub css_file_extension: Option<PathBuf>,
 }
 
 /// Indicates where an external crate can be found.
@@ -142,9 +151,7 @@ pub struct Impl {
 
 impl Impl {
     fn trait_did(&self) -> Option<DefId> {
-        self.impl_.trait_.as_ref().and_then(|tr| {
-            if let clean::ResolvedPath { did, .. } = *tr {Some(did)} else {None}
-        })
+        self.impl_.trait_.def_id()
     }
 }
 
@@ -245,8 +252,7 @@ pub struct Cache {
     parent_stack: Vec<DefId>,
     parent_is_trait_impl: bool,
     search_index: Vec<IndexItem>,
-    privmod: bool,
-    remove_priv: bool,
+    stripped_mod: bool,
     access_levels: AccessLevels<DefId>,
     deref_trait_did: Option<DefId>,
 
@@ -260,10 +266,8 @@ pub struct Cache {
 
 /// Helper struct to render all source code to HTML pages
 struct SourceCollector<'a> {
-    cx: &'a mut Context,
+    scx: &'a mut SharedContext,
 
-    /// Processed source-file paths
-    seen: HashSet<String>,
     /// Root destination to place all HTML output into
     dst: PathBuf,
 }
@@ -374,14 +378,20 @@ fn init_ids() -> HashMap<String, usize> {
      "deref-methods",
      "implementations",
      "derived_implementations"
-     ].into_iter().map(|id| (String::from(*id), 1)).collect::<HashMap<_, _>>()
+     ].into_iter().map(|id| (String::from(*id), 1)).collect()
 }
 
 /// This method resets the local table of used ID attributes. This is typically
 /// used at the beginning of rendering an entire HTML page to reset from the
 /// previous state (if any).
-pub fn reset_ids() {
-    USED_ID_MAP.with(|s| *s.borrow_mut() = init_ids());
+pub fn reset_ids(embedded: bool) {
+    USED_ID_MAP.with(|s| {
+        *s.borrow_mut() = if embedded {
+            init_ids()
+        } else {
+            HashMap::new()
+        };
+    });
 }
 
 pub fn derive_id(candidate: String) -> String {
@@ -404,17 +414,18 @@ pub fn derive_id(candidate: String) -> String {
 pub fn run(mut krate: clean::Crate,
            external_html: &ExternalHtml,
            dst: PathBuf,
-           passes: HashSet<String>) -> Result<(), Error> {
+           passes: HashSet<String>,
+           css_file_extension: Option<PathBuf>) -> Result<(), Error> {
     let src_root = match krate.src.parent() {
         Some(p) => p.to_path_buf(),
         None => PathBuf::new(),
     };
-    let mut cx = Context {
-        dst: dst,
+    let mut scx = SharedContext {
         src_root: src_root,
         passes: passes,
-        current: Vec::new(),
-        root_path: String::new(),
+        include_sources: true,
+        local_sources: HashMap::new(),
+        issue_tracker_base_url: None,
         layout: layout::Layout {
             logo: "".to_string(),
             favicon: "".to_string(),
@@ -422,52 +433,53 @@ pub fn run(mut krate: clean::Crate,
             krate: krate.name.clone(),
             playground_url: "".to_string(),
         },
-        include_sources: true,
-        render_redirect_pages: false,
-        issue_tracker_base_url: None,
+        css_file_extension: css_file_extension.clone(),
     };
 
-    try_err!(mkdir(&cx.dst), &cx.dst);
-
     // Crawl the crate attributes looking for attributes which control how we're
     // going to emit HTML
-    let default: &[_] = &[];
-    match krate.module.as_ref().map(|m| m.doc_list().unwrap_or(default)) {
-        Some(attrs) => {
-            for attr in attrs {
-                match *attr {
-                    clean::NameValue(ref x, ref s)
-                            if "html_favicon_url" == *x => {
-                        cx.layout.favicon = s.to_string();
-                    }
-                    clean::NameValue(ref x, ref s)
-                            if "html_logo_url" == *x => {
-                        cx.layout.logo = s.to_string();
-                    }
-                    clean::NameValue(ref x, ref s)
-                            if "html_playground_url" == *x => {
-                        cx.layout.playground_url = s.to_string();
-                        markdown::PLAYGROUND_KRATE.with(|slot| {
-                            if slot.borrow().is_none() {
-                                let name = krate.name.clone();
-                                *slot.borrow_mut() = Some(Some(name));
-                            }
-                        });
-                    }
-                    clean::NameValue(ref x, ref s)
-                            if "issue_tracker_base_url" == *x => {
-                        cx.issue_tracker_base_url = Some(s.to_string());
-                    }
-                    clean::Word(ref x)
-                            if "html_no_source" == *x => {
-                        cx.include_sources = false;
-                    }
-                    _ => {}
+    if let Some(attrs) = krate.module.as_ref().map(|m| m.attrs.list("doc")) {
+        for attr in attrs {
+            match *attr {
+                clean::NameValue(ref x, ref s)
+                        if "html_favicon_url" == *x => {
+                    scx.layout.favicon = s.to_string();
+                }
+                clean::NameValue(ref x, ref s)
+                        if "html_logo_url" == *x => {
+                    scx.layout.logo = s.to_string();
+                }
+                clean::NameValue(ref x, ref s)
+                        if "html_playground_url" == *x => {
+                    scx.layout.playground_url = s.to_string();
+                    markdown::PLAYGROUND_KRATE.with(|slot| {
+                        if slot.borrow().is_none() {
+                            let name = krate.name.clone();
+                            *slot.borrow_mut() = Some(Some(name));
+                        }
+                    });
                 }
+                clean::NameValue(ref x, ref s)
+                        if "issue_tracker_base_url" == *x => {
+                    scx.issue_tracker_base_url = Some(s.to_string());
+                }
+                clean::Word(ref x)
+                        if "html_no_source" == *x => {
+                    scx.include_sources = false;
+                }
+                _ => {}
             }
         }
-        None => {}
     }
+    try_err!(mkdir(&dst), &dst);
+    krate = render_sources(&dst, &mut scx, krate)?;
+    let cx = Context {
+        current: Vec::new(),
+        root_path: String::new(),
+        dst: dst,
+        render_redirect_pages: false,
+        shared: Arc::new(scx),
+    };
 
     // Crawl the crate to build various caches used for the output
     let analysis = ::ANALYSISKEY.with(|a| a.clone());
@@ -491,8 +503,7 @@ pub fn run(mut krate: clean::Crate,
         parent_is_trait_impl: false,
         extern_locations: HashMap::new(),
         primitive_locations: HashMap::new(),
-        remove_priv: cx.passes.contains("strip-private"),
-        privmod: false,
+        stripped_mod: false,
         access_levels: access_levels,
         orphan_methods: Vec::new(),
         traits: mem::replace(&mut krate.external_traits, HashMap::new()),
@@ -538,8 +549,7 @@ pub fn run(mut krate: clean::Crate,
     CACHE_KEY.with(|v| *v.borrow_mut() = cache.clone());
     CURRENT_LOCATION_KEY.with(|s| s.borrow_mut().clear());
 
-    try!(write_shared(&cx, &krate, &*cache, index));
-    let krate = try!(render_sources(&mut cx, krate));
+    write_shared(&cx, &krate, &*cache, index)?;
 
     // And finally render the whole crate's documentation
     cx.krate(krate)
@@ -632,53 +642,65 @@ fn write_shared(cx: &Context,
 
     // Add all the static files. These may already exist, but we just
     // overwrite them anyway to make sure that they're fresh and up-to-date.
-    try!(write(cx.dst.join("jquery.js"),
-               include_bytes!("static/jquery-2.1.4.min.js")));
-    try!(write(cx.dst.join("main.js"),
-               include_bytes!("static/main.js")));
-    try!(write(cx.dst.join("playpen.js"),
-               include_bytes!("static/playpen.js")));
-    try!(write(cx.dst.join("rustdoc.css"),
-               include_bytes!("static/rustdoc.css")));
-    try!(write(cx.dst.join("main.css"),
-               include_bytes!("static/styles/main.css")));
-    try!(write(cx.dst.join("normalize.css"),
-               include_bytes!("static/normalize.css")));
-    try!(write(cx.dst.join("FiraSans-Regular.woff"),
-               include_bytes!("static/FiraSans-Regular.woff")));
-    try!(write(cx.dst.join("FiraSans-Medium.woff"),
-               include_bytes!("static/FiraSans-Medium.woff")));
-    try!(write(cx.dst.join("FiraSans-LICENSE.txt"),
-               include_bytes!("static/FiraSans-LICENSE.txt")));
-    try!(write(cx.dst.join("Heuristica-Italic.woff"),
-               include_bytes!("static/Heuristica-Italic.woff")));
-    try!(write(cx.dst.join("Heuristica-LICENSE.txt"),
-               include_bytes!("static/Heuristica-LICENSE.txt")));
-    try!(write(cx.dst.join("SourceSerifPro-Regular.woff"),
-               include_bytes!("static/SourceSerifPro-Regular.woff")));
-    try!(write(cx.dst.join("SourceSerifPro-Bold.woff"),
-               include_bytes!("static/SourceSerifPro-Bold.woff")));
-    try!(write(cx.dst.join("SourceSerifPro-LICENSE.txt"),
-               include_bytes!("static/SourceSerifPro-LICENSE.txt")));
-    try!(write(cx.dst.join("SourceCodePro-Regular.woff"),
-               include_bytes!("static/SourceCodePro-Regular.woff")));
-    try!(write(cx.dst.join("SourceCodePro-Semibold.woff"),
-               include_bytes!("static/SourceCodePro-Semibold.woff")));
-    try!(write(cx.dst.join("SourceCodePro-LICENSE.txt"),
-               include_bytes!("static/SourceCodePro-LICENSE.txt")));
-    try!(write(cx.dst.join("LICENSE-MIT.txt"),
-               include_bytes!("static/LICENSE-MIT.txt")));
-    try!(write(cx.dst.join("LICENSE-APACHE.txt"),
-               include_bytes!("static/LICENSE-APACHE.txt")));
-    try!(write(cx.dst.join("COPYRIGHT.txt"),
-               include_bytes!("static/COPYRIGHT.txt")));
+
+    write(cx.dst.join("jquery.js"),
+          include_bytes!("static/jquery-2.1.4.min.js"))?;
+    write(cx.dst.join("main.js"),
+          include_bytes!("static/main.js"))?;
+    write(cx.dst.join("playpen.js"),
+          include_bytes!("static/playpen.js"))?;
+    write(cx.dst.join("rustdoc.css"),
+          include_bytes!("static/rustdoc.css"))?;
+    write(cx.dst.join("main.css"),
+          include_bytes!("static/styles/main.css"))?;
+    if let Some(ref css) = cx.shared.css_file_extension {
+        let mut content = String::new();
+        let css = css.as_path();
+        let mut f = try_err!(File::open(css), css);
+
+        try_err!(f.read_to_string(&mut content), css);
+        let css = cx.dst.join("theme.css");
+        let css = css.as_path();
+        let mut f = try_err!(File::create(css), css);
+        try_err!(write!(f, "{}", &content), css);
+    }
+    write(cx.dst.join("normalize.css"),
+          include_bytes!("static/normalize.css"))?;
+    write(cx.dst.join("FiraSans-Regular.woff"),
+          include_bytes!("static/FiraSans-Regular.woff"))?;
+    write(cx.dst.join("FiraSans-Medium.woff"),
+          include_bytes!("static/FiraSans-Medium.woff"))?;
+    write(cx.dst.join("FiraSans-LICENSE.txt"),
+          include_bytes!("static/FiraSans-LICENSE.txt"))?;
+    write(cx.dst.join("Heuristica-Italic.woff"),
+          include_bytes!("static/Heuristica-Italic.woff"))?;
+    write(cx.dst.join("Heuristica-LICENSE.txt"),
+          include_bytes!("static/Heuristica-LICENSE.txt"))?;
+    write(cx.dst.join("SourceSerifPro-Regular.woff"),
+          include_bytes!("static/SourceSerifPro-Regular.woff"))?;
+    write(cx.dst.join("SourceSerifPro-Bold.woff"),
+          include_bytes!("static/SourceSerifPro-Bold.woff"))?;
+    write(cx.dst.join("SourceSerifPro-LICENSE.txt"),
+          include_bytes!("static/SourceSerifPro-LICENSE.txt"))?;
+    write(cx.dst.join("SourceCodePro-Regular.woff"),
+          include_bytes!("static/SourceCodePro-Regular.woff"))?;
+    write(cx.dst.join("SourceCodePro-Semibold.woff"),
+          include_bytes!("static/SourceCodePro-Semibold.woff"))?;
+    write(cx.dst.join("SourceCodePro-LICENSE.txt"),
+          include_bytes!("static/SourceCodePro-LICENSE.txt"))?;
+    write(cx.dst.join("LICENSE-MIT.txt"),
+          include_bytes!("static/LICENSE-MIT.txt"))?;
+    write(cx.dst.join("LICENSE-APACHE.txt"),
+          include_bytes!("static/LICENSE-APACHE.txt"))?;
+    write(cx.dst.join("COPYRIGHT.txt"),
+          include_bytes!("static/COPYRIGHT.txt"))?;
 
     fn collect(path: &Path, krate: &str,
                key: &str) -> io::Result<Vec<String>> {
         let mut ret = Vec::new();
         if path.exists() {
-            for line in BufReader::new(try!(File::open(path))).lines() {
-                let line = try!(line);
+            for line in BufReader::new(File::open(path)?).lines() {
+                let line = line?;
                 if !line.starts_with(key) {
                     continue
                 }
@@ -761,20 +783,17 @@ fn write_shared(cx: &Context,
     Ok(())
 }
 
-fn render_sources(cx: &mut Context,
+fn render_sources(dst: &Path, scx: &mut SharedContext,
                   krate: clean::Crate) -> Result<clean::Crate, Error> {
     info!("emitting source files");
-    let dst = cx.dst.join("src");
+    let dst = dst.join("src");
     try_err!(mkdir(&dst), &dst);
     let dst = dst.join(&krate.name);
     try_err!(mkdir(&dst), &dst);
     let mut folder = SourceCollector {
         dst: dst,
-        seen: HashSet::new(),
-        cx: cx,
+        scx: scx,
     };
-    // skip all invalid spans
-    folder.seen.insert("".to_string());
     Ok(folder.fold_crate(krate))
 }
 
@@ -838,42 +857,33 @@ fn extern_location(e: &clean::ExternalCrate, dst: &Path) -> ExternalLocation {
 
     // Failing that, see if there's an attribute specifying where to find this
     // external crate
-    for attr in &e.attrs {
-        match *attr {
-            clean::List(ref x, ref list) if "doc" == *x => {
-                for attr in list {
-                    match *attr {
-                        clean::NameValue(ref x, ref s)
-                                if "html_root_url" == *x => {
-                            if s.ends_with("/") {
-                                return Remote(s.to_string());
-                            }
-                            return Remote(format!("{}/", s));
-                        }
-                        _ => {}
-                    }
-                }
-            }
-            _ => {}
+    e.attrs.list("doc").value("html_root_url").map(|url| {
+        let mut url = url.to_owned();
+        if !url.ends_with("/") {
+            url.push('/')
         }
-    }
-
-    // Well, at least we tried.
-    return Unknown;
+        Remote(url)
+    }).unwrap_or(Unknown) // Well, at least we tried.
 }
 
 impl<'a> DocFolder for SourceCollector<'a> {
     fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> {
         // If we're including source files, and we haven't seen this file yet,
         // then we need to render it out to the filesystem
-        if self.cx.include_sources && !self.seen.contains(&item.source.filename) {
+        if self.scx.include_sources
+            // skip all invalid spans
+            && item.source.filename != ""
+            // macros from other libraries get special filenames which we can
+            // safely ignore
+            && !(item.source.filename.starts_with("<")
+                && item.source.filename.ends_with("macros>")) {
 
             // If it turns out that we couldn't read this file, then we probably
             // can't read any of the files (generating html output from json or
             // something like that), so just don't include sources for the
             // entire crate. The other option is maintaining this mapping on a
             // per-file basis, but that's probably not worth it...
-            self.cx
+            self.scx
                 .include_sources = match self.emit_source(&item.source.filename) {
                 Ok(()) => true,
                 Err(e) => {
@@ -884,9 +894,7 @@ impl<'a> DocFolder for SourceCollector<'a> {
                     false
                 }
             };
-            self.seen.insert(item.source.filename.clone());
         }
-
         self.fold_item_recur(item)
     }
 }
@@ -895,19 +903,14 @@ impl<'a> SourceCollector<'a> {
     /// Renders the given filename into its corresponding HTML source file.
     fn emit_source(&mut self, filename: &str) -> io::Result<()> {
         let p = PathBuf::from(filename);
+        if self.scx.local_sources.contains_key(&p) {
+            // We've already emitted this source
+            return Ok(());
+        }
 
-        // If we couldn't open this file, then just returns because it
-        // probably means that it's some standard library macro thing and we
-        // can't have the source to it anyway.
         let mut contents = Vec::new();
-        match File::open(&p).and_then(|mut f| f.read_to_end(&mut contents)) {
-            Ok(r) => r,
-            // macros from other libraries get special filenames which we can
-            // safely ignore
-            Err(..) if filename.starts_with("<") &&
-                       filename.ends_with("macros>") => return Ok(()),
-            Err(e) => return Err(e)
-        };
+        File::open(&p).and_then(|mut f| f.read_to_end(&mut contents))?;
+
         let contents = str::from_utf8(&contents).unwrap();
 
         // Remove the utf-8 BOM if any
@@ -920,17 +923,21 @@ impl<'a> SourceCollector<'a> {
         // Create the intermediate directories
         let mut cur = self.dst.clone();
         let mut root_path = String::from("../../");
-        clean_srcpath(&self.cx.src_root, &p, false, |component| {
+        let mut href = String::new();
+        clean_srcpath(&self.scx.src_root, &p, false, |component| {
             cur.push(component);
             mkdir(&cur).unwrap();
             root_path.push_str("../");
+            href.push_str(component);
+            href.push('/');
         });
-
         let mut fname = p.file_name().expect("source has no filename")
                          .to_os_string();
         fname.push(".html");
         cur.push(&fname[..]);
-        let mut w = BufWriter::new(try!(File::create(&cur)));
+        href.push_str(&fname.to_string_lossy());
+
+        let mut w = BufWriter::new(File::create(&cur)?);
         let title = format!("{} -- source", cur.file_name().unwrap()
                                                .to_string_lossy());
         let desc = format!("Source to the Rust file `{}`.", filename);
@@ -939,25 +946,28 @@ impl<'a> SourceCollector<'a> {
             ty: "source",
             root_path: &root_path,
             description: &desc,
-            keywords: get_basic_keywords(),
+            keywords: BASIC_KEYWORDS,
         };
-        try!(layout::render(&mut w, &self.cx.layout,
-                            &page, &(""), &Source(contents)));
-        try!(w.flush());
-        return Ok(());
+        layout::render(&mut w, &self.scx.layout,
+                       &page, &(""), &Source(contents),
+                       self.scx.css_file_extension.is_some())?;
+        w.flush()?;
+        self.scx.local_sources.insert(p, href);
+        Ok(())
     }
 }
 
 impl DocFolder for Cache {
     fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> {
-        // If this is a private module, we don't want it in the search index.
-        let orig_privmod = match item.inner {
-            clean::ModuleItem(..) => {
-                let prev = self.privmod;
-                self.privmod = prev || (self.remove_priv && item.visibility != Some(hir::Public));
+        // If this is a stripped module,
+        // we don't want it or its children in the search index.
+        let orig_stripped_mod = match item.inner {
+            clean::StrippedItem(box clean::ModuleItem(..)) => {
+                let prev = self.stripped_mod;
+                self.stripped_mod = true;
                 prev
             }
-            _ => self.privmod,
+            _ => self.stripped_mod,
         };
 
         // Register any generics to their corresponding string. This is used
@@ -983,21 +993,19 @@ impl DocFolder for Cache {
 
         // Collect all the implementors of traits.
         if let clean::ImplItem(ref i) = item.inner {
-            match i.trait_ {
-                Some(clean::ResolvedPath{ did, .. }) => {
-                    self.implementors.entry(did).or_insert(vec![]).push(Implementor {
-                        def_id: item.def_id,
-                        stability: item.stability.clone(),
-                        impl_: i.clone(),
-                    });
-                }
-                Some(..) | None => {}
+            if let Some(did) = i.trait_.def_id() {
+                self.implementors.entry(did).or_insert(vec![]).push(Implementor {
+                    def_id: item.def_id,
+                    stability: item.stability.clone(),
+                    impl_: i.clone(),
+                });
             }
         }
 
         // Index this method for searching later on
         if let Some(ref s) = item.name {
             let (parent, is_method) = match item.inner {
+                clean::StrippedItem(..) => ((None, None), false),
                 clean::AssociatedConstItem(..) |
                 clean::TypedefItem(_, true) if self.parent_is_trait_impl => {
                     // skip associated items in trait impls
@@ -1036,13 +1044,9 @@ impl DocFolder for Cache {
                 }
                 _ => ((None, Some(&*self.stack)), false)
             };
-            let hidden_field = match item.inner {
-                clean::StructFieldItem(clean::HiddenStructField) => true,
-                _ => false
-            };
 
             match parent {
-                (parent, Some(path)) if is_method || (!self.privmod && !hidden_field) => {
+                (parent, Some(path)) if is_method || (!self.stripped_mod) => {
                     // Needed to determine `self` type.
                     let parent_basename = self.parent_stack.first().and_then(|parent| {
                         match self.paths.get(parent) {
@@ -1050,7 +1054,11 @@ impl DocFolder for Cache {
                             _ => None
                         }
                     });
+                    debug_assert!(!item.is_stripped());
 
+                    // A crate has a module at its root, containing all items,
+                    // which should not be indexed. The crate-item itself is
+                    // inserted later on when serializing the search-index.
                     if item.def_id.index != CRATE_DEF_INDEX {
                         self.search_index.push(IndexItem {
                             ty: shortty(&item),
@@ -1063,7 +1071,7 @@ impl DocFolder for Cache {
                         });
                     }
                 }
-                (Some(parent), None) if is_method || (!self.privmod && !hidden_field)=> {
+                (Some(parent), None) if is_method || (!self.stripped_mod)=> {
                     if parent.is_local() {
                         // We have a parent, but we don't know where they're
                         // defined yet. Wait for later to index this item.
@@ -1075,18 +1083,19 @@ impl DocFolder for Cache {
         }
 
         // Keep track of the fully qualified path for this item.
-        let pushed = if item.name.is_some() {
-            let n = item.name.as_ref().unwrap();
-            if !n.is_empty() {
+        let pushed = match item.name {
+            Some(ref n) if !n.is_empty() => {
                 self.stack.push(n.to_string());
                 true
-            } else { false }
-        } else { false };
+            }
+            _ => false,
+        };
+
         match item.inner {
             clean::StructItem(..) | clean::EnumItem(..) |
             clean::TypedefItem(..) | clean::TraitItem(..) |
             clean::FunctionItem(..) | clean::ModuleItem(..) |
-            clean::ForeignFunctionItem(..) if !self.privmod => {
+            clean::ForeignFunctionItem(..) if !self.stripped_mod => {
                 // Reexported items mean that the same id can show up twice
                 // in the rustdoc ast that we're looking at. We know,
                 // however, that a reexported item doesn't show up in the
@@ -1104,7 +1113,7 @@ impl DocFolder for Cache {
             }
             // link variants to their parent enum because pages aren't emitted
             // for each variant
-            clean::VariantItem(..) if !self.privmod => {
+            clean::VariantItem(..) if !self.stripped_mod => {
                 let mut stack = self.stack.clone();
                 stack.pop();
                 self.paths.insert(item.def_id, (stack, ItemType::Enum));
@@ -1150,64 +1159,44 @@ impl DocFolder for Cache {
 
         // Once we've recursively found all the generics, then hoard off all the
         // implementations elsewhere
-        let ret = match self.fold_item_recur(item) {
-            Some(item) => {
-                match item {
-                    clean::Item{ attrs, inner: clean::ImplItem(i), .. } => {
-                        // extract relevant documentation for this impl
-                        let dox = match attrs.into_iter().find(|a| {
-                            match *a {
-                                clean::NameValue(ref x, _)
-                                        if "doc" == *x => {
-                                    true
-                                }
-                                _ => false
-                            }
-                        }) {
-                            Some(clean::NameValue(_, dox)) => Some(dox),
-                            Some(..) | None => None,
-                        };
-
-                        // Figure out the id of this impl. This may map to a
-                        // primitive rather than always to a struct/enum.
-                        let did = match i.for_ {
-                            clean::ResolvedPath { did, .. } |
-                            clean::BorrowedRef {
-                                type_: box clean::ResolvedPath { did, .. }, ..
-                            } => {
-                                Some(did)
-                            }
-
-                            ref t => {
-                                t.primitive_type().and_then(|t| {
-                                    self.primitive_locations.get(&t).map(|n| {
-                                        let id = t.to_def_index();
-                                        DefId { krate: *n, index: id }
-                                    })
-                                })
-                            }
-                        };
-
-                        if let Some(did) = did {
-                            self.impls.entry(did).or_insert(vec![]).push(Impl {
-                                impl_: i,
-                                dox: dox,
-                                stability: item.stability.clone(),
-                            });
-                        }
+        let ret = self.fold_item_recur(item).and_then(|item| {
+            if let clean::Item { attrs, inner: clean::ImplItem(i), .. } = item {
+                // Figure out the id of this impl. This may map to a
+                // primitive rather than always to a struct/enum.
+                let did = match i.for_ {
+                    clean::ResolvedPath { did, .. } |
+                    clean::BorrowedRef {
+                        type_: box clean::ResolvedPath { did, .. }, ..
+                    } => {
+                        Some(did)
+                    }
 
-                        None
+                    ref t => {
+                        t.primitive_type().and_then(|t| {
+                            self.primitive_locations.get(&t).map(|n| {
+                                let id = t.to_def_index();
+                                DefId { krate: *n, index: id }
+                            })
+                        })
                     }
+                };
 
-                    i => Some(i),
+                if let Some(did) = did {
+                    self.impls.entry(did).or_insert(vec![]).push(Impl {
+                        impl_: i,
+                        dox: attrs.value("doc").map(|s|s.to_owned()),
+                        stability: item.stability.clone(),
+                    });
                 }
+                None
+            } else {
+                Some(item)
             }
-            i => i,
-        };
+        });
 
         if pushed { self.stack.pop().unwrap(); }
         if parent_pushed { self.parent_stack.pop().unwrap(); }
-        self.privmod = orig_privmod;
+        self.stripped_mod = orig_stripped_mod;
         self.parent_is_trait_impl = orig_parent_is_trait_impl;
         return ret;
     }
@@ -1264,15 +1253,12 @@ impl Context {
 
         // render the crate documentation
         let mut work = vec!((self, item));
-        loop {
-            match work.pop() {
-                Some((mut cx, item)) => try!(cx.item(item, |cx, item| {
-                    work.push((cx.clone(), item));
-                })),
-                None => break,
-            }
-        }
 
+        while let Some((mut cx, item)) = work.pop() {
+            cx.item(item, |cx, item| {
+                work.push((cx.clone(), item))
+            })?
+        }
         Ok(())
     }
 
@@ -1301,16 +1287,12 @@ impl Context {
             }
             title.push_str(" - Rust");
             let tyname = shortty(it).to_static_str();
-            let is_crate = match it.inner {
-                clean::ModuleItem(clean::Module { items: _, is_crate: true }) => true,
-                _ => false
-            };
-            let desc = if is_crate {
+            let desc = if it.is_crate() {
                 format!("API documentation for the Rust `{}` crate.",
-                        cx.layout.krate)
+                        cx.shared.layout.krate)
             } else {
                 format!("API documentation for the Rust `{}` {} in crate `{}`.",
-                        it.name.as_ref().unwrap(), tyname, cx.layout.krate)
+                        it.name.as_ref().unwrap(), tyname, cx.shared.layout.krate)
             };
             let keywords = make_item_keywords(it);
             let page = layout::Page {
@@ -1321,89 +1303,82 @@ impl Context {
                 keywords: &keywords,
             };
 
-            reset_ids();
+            reset_ids(true);
 
             // We have a huge number of calls to write, so try to alleviate some
             // of the pain by using a buffered writer instead of invoking the
             // write syscall all the time.
             let mut writer = BufWriter::new(w);
             if !cx.render_redirect_pages {
-                try!(layout::render(&mut writer, &cx.layout, &page,
-                                    &Sidebar{ cx: cx, item: it },
-                                    &Item{ cx: cx, item: it }));
+                layout::render(&mut writer, &cx.shared.layout, &page,
+                               &Sidebar{ cx: cx, item: it },
+                               &Item{ cx: cx, item: it },
+                               cx.shared.css_file_extension.is_some())?;
             } else {
                 let mut url = repeat("../").take(cx.current.len())
                                            .collect::<String>();
-                match cache().paths.get(&it.def_id) {
-                    Some(&(ref names, _)) => {
-                        for name in &names[..names.len() - 1] {
-                            url.push_str(name);
-                            url.push_str("/");
-                        }
-                        url.push_str(&item_path(it));
-                        try!(layout::redirect(&mut writer, &url));
+                if let Some(&(ref names, _)) = cache().paths.get(&it.def_id) {
+                    for name in &names[..names.len() - 1] {
+                        url.push_str(name);
+                        url.push_str("/");
                     }
-                    None => {}
+                    url.push_str(&item_path(it));
+                    layout::redirect(&mut writer, &url)?;
                 }
             }
             writer.flush()
         }
 
-        // Private modules may survive the strip-private pass if they
-        // contain impls for public types. These modules can also
+        // Stripped modules survive the rustdoc passes (i.e. `strip-private`)
+        // if they contain impls for public types. These modules can also
         // contain items such as publicly reexported structures.
         //
         // External crates will provide links to these structures, so
-        // these modules are recursed into, but not rendered normally (a
-        // flag on the context).
+        // these modules are recursed into, but not rendered normally
+        // (a flag on the context).
         if !self.render_redirect_pages {
-            self.render_redirect_pages = self.ignore_private_item(&item);
+            self.render_redirect_pages = self.maybe_ignore_item(&item);
         }
 
-        match item.inner {
+        if item.is_mod() {
             // modules are special because they add a namespace. We also need to
             // recurse into the items of the module as well.
-            clean::ModuleItem(..) => {
-                let name = item.name.as_ref().unwrap().to_string();
-                let mut item = Some(item);
-                self.recurse(name, |this| {
-                    let item = item.take().unwrap();
-                    let joint_dst = this.dst.join("index.html");
-                    let dst = try_err!(File::create(&joint_dst), &joint_dst);
-                    try_err!(render(dst, this, &item, false), &joint_dst);
-
-                    let m = match item.inner {
-                        clean::ModuleItem(m) => m,
-                        _ => unreachable!()
-                    };
-
-                    // render sidebar-items.js used throughout this module
-                    {
-                        let items = this.build_sidebar_items(&m);
-                        let js_dst = this.dst.join("sidebar-items.js");
-                        let mut js_out = BufWriter::new(try_err!(File::create(&js_dst), &js_dst));
-                        try_err!(write!(&mut js_out, "initSidebarItems({});",
-                                    as_json(&items)), &js_dst);
-                    }
+            let name = item.name.as_ref().unwrap().to_string();
+            let mut item = Some(item);
+            self.recurse(name, |this| {
+                let item = item.take().unwrap();
+                let joint_dst = this.dst.join("index.html");
+                let dst = try_err!(File::create(&joint_dst), &joint_dst);
+                try_err!(render(dst, this, &item, false), &joint_dst);
 
-                    for item in m.items {
-                        f(this,item);
-                    }
-                    Ok(())
-                })
-            }
+                let m = match item.inner {
+                    clean::StrippedItem(box clean::ModuleItem(m)) |
+                    clean::ModuleItem(m) => m,
+                    _ => unreachable!()
+                };
 
-            // Things which don't have names (like impls) don't get special
-            // pages dedicated to them.
-            _ if item.name.is_some() => {
-                let joint_dst = self.dst.join(&item_path(&item));
+                // render sidebar-items.js used throughout this module
+                {
+                    let items = this.build_sidebar_items(&m);
+                    let js_dst = this.dst.join("sidebar-items.js");
+                    let mut js_out = BufWriter::new(try_err!(File::create(&js_dst), &js_dst));
+                    try_err!(write!(&mut js_out, "initSidebarItems({});",
+                                    as_json(&items)), &js_dst);
+                }
 
-                let dst = try_err!(File::create(&joint_dst), &joint_dst);
-                try_err!(render(dst, self, &item, true), &joint_dst);
+                for item in m.items {
+                    f(this,item);
+                }
                 Ok(())
-            }
+            })
+        } else if item.name.is_some() {
+            let joint_dst = self.dst.join(&item_path(&item));
 
-            _ => Ok(())
+            let dst = try_err!(File::create(&joint_dst), &joint_dst);
+            try_err!(render(dst, self, &item, true), &joint_dst);
+            Ok(())
+        } else {
+            Ok(())
         }
     }
 
@@ -1411,7 +1386,7 @@ impl Context {
         // BTreeMap instead of HashMap to get a sorted output
         let mut map = BTreeMap::new();
         for item in &m.items {
-            if self.ignore_private_item(item) { continue }
+            if self.maybe_ignore_item(item) { continue }
 
             let short = shortty(item).to_static_str();
             let myname = match item.name {
@@ -1429,27 +1404,18 @@ impl Context {
         return map;
     }
 
-    fn ignore_private_item(&self, it: &clean::Item) -> bool {
+    fn maybe_ignore_item(&self, it: &clean::Item) -> bool {
         match it.inner {
+            clean::StrippedItem(..) => true,
             clean::ModuleItem(ref m) => {
-                (m.items.is_empty() &&
-                 it.doc_value().is_none() &&
-                 it.visibility != Some(hir::Public)) ||
-                (self.passes.contains("strip-private") && it.visibility != Some(hir::Public))
-            }
-            clean::PrimitiveItem(..) => it.visibility != Some(hir::Public),
+                it.doc_value().is_none() && m.items.is_empty() && it.visibility != Some(hir::Public)
+            },
             _ => false,
         }
     }
 }
 
 impl<'a> Item<'a> {
-    fn ismodule(&self) -> bool {
-        match self.item.inner {
-            clean::ModuleItem(..) => true, _ => false
-        }
-    }
-
     /// Generate a url appropriate for an `href` attribute back to the source of
     /// this item.
     ///
@@ -1459,7 +1425,7 @@ impl<'a> Item<'a> {
     /// If `None` is returned, then a source link couldn't be generated. This
     /// may happen, for example, with externally inlined items where the source
     /// of their crate documentation isn't known.
-    fn href(&self, cx: &Context) -> Option<String> {
+    fn href(&self) -> Option<String> {
         let href = if self.item.source.loline == self.item.source.hiline {
             format!("{}", self.item.source.loline)
         } else {
@@ -1492,25 +1458,14 @@ impl<'a> Item<'a> {
         // know the span, so we plow forward and generate a proper url. The url
         // has anchors for the line numbers that we're linking to.
         } else if self.item.def_id.is_local() {
-            let mut path = Vec::new();
-            clean_srcpath(&cx.src_root, Path::new(&self.item.source.filename),
-                          true, |component| {
-                path.push(component.to_string());
-            });
-
-            // If the span points into an external macro the
-            // source-file will be bogus, i.e `<foo macros>`
-            let filename = &self.item.source.filename;
-            if !(filename.starts_with("<") && filename.ends_with("macros>")) {
-                Some(format!("{root}src/{krate}/{path}.html#{href}",
-                             root = self.cx.root_path,
-                             krate = self.cx.layout.krate,
-                             path = path.join("/"),
-                             href = href))
-            } else {
-                None
-            }
-
+            let path = PathBuf::from(&self.item.source.filename);
+            self.cx.shared.local_sources.get(&path).map(|path| {
+                format!("{root}src/{krate}/{path}#{href}",
+                        root = self.cx.root_path,
+                        krate = self.cx.shared.layout.krate,
+                        path = path,
+                        href = href)
+            })
         // If this item is not part of the local crate, then things get a little
         // trickier. We don't actually know the span of the external item, but
         // we know that the documentation on the other end knows the span!
@@ -1542,19 +1497,20 @@ impl<'a> Item<'a> {
 
 impl<'a> fmt::Display for Item<'a> {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        debug_assert!(!self.item.is_stripped());
         // Write the breadcrumb trail header for the top
-        try!(write!(fmt, "\n<h1 class='fqn'><span class='in-band'>"));
+        write!(fmt, "\n<h1 class='fqn'><span class='in-band'>")?;
         match self.item.inner {
             clean::ModuleItem(ref m) => if m.is_crate {
-                    try!(write!(fmt, "Crate "));
+                    write!(fmt, "Crate ")?;
                 } else {
-                    try!(write!(fmt, "Module "));
+                    write!(fmt, "Module ")?;
                 },
-            clean::FunctionItem(..) => try!(write!(fmt, "Function ")),
-            clean::TraitItem(..) => try!(write!(fmt, "Trait ")),
-            clean::StructItem(..) => try!(write!(fmt, "Struct ")),
-            clean::EnumItem(..) => try!(write!(fmt, "Enum ")),
-            clean::PrimitiveItem(..) => try!(write!(fmt, "Primitive Type ")),
+            clean::FunctionItem(..) => write!(fmt, "Function ")?,
+            clean::TraitItem(..) => write!(fmt, "Trait ")?,
+            clean::StructItem(..) => write!(fmt, "Struct ")?,
+            clean::EnumItem(..) => write!(fmt, "Enum ")?,
+            clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?,
             _ => {}
         }
         let is_primitive = match self.item.inner {
@@ -1563,25 +1519,25 @@ impl<'a> fmt::Display for Item<'a> {
         };
         if !is_primitive {
             let cur = &self.cx.current;
-            let amt = if self.ismodule() { cur.len() - 1 } else { cur.len() };
+            let amt = if self.item.is_mod() { cur.len() - 1 } else { cur.len() };
             for (i, component) in cur.iter().enumerate().take(amt) {
-                try!(write!(fmt, "<a href='{}index.html'>{}</a>::<wbr>",
-                            repeat("../").take(cur.len() - i - 1)
-                                         .collect::<String>(),
-                            component));
+                write!(fmt, "<a href='{}index.html'>{}</a>::<wbr>",
+                       repeat("../").take(cur.len() - i - 1)
+                                    .collect::<String>(),
+                       component)?;
             }
         }
-        try!(write!(fmt, "<a class='{}' href=''>{}</a>",
-                    shortty(self.item), self.item.name.as_ref().unwrap()));
+        write!(fmt, "<a class='{}' href=''>{}</a>",
+               shortty(self.item), self.item.name.as_ref().unwrap())?;
 
-        try!(write!(fmt, "</span>")); // in-band
-        try!(write!(fmt, "<span class='out-of-band'>"));
-        try!(write!(fmt,
-        r##"<span id='render-detail'>
-            <a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
-                [<span class='inner'>&#x2212;</span>]
-            </a>
-        </span>"##));
+        write!(fmt, "</span>")?; // in-band
+        write!(fmt, "<span class='out-of-band'>")?;
+        write!(fmt,
+               r##"<span id='render-detail'>
+                   <a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
+                       [<span class='inner'>&#x2212;</span>]
+                   </a>
+               </span>"##)?;
 
         // Write `src` tag
         //
@@ -1589,20 +1545,17 @@ impl<'a> fmt::Display for Item<'a> {
         // [src] link in the downstream documentation will actually come back to
         // this page, and this link will be auto-clicked. The `id` attribute is
         // used to find the link to auto-click.
-        if self.cx.include_sources && !is_primitive {
-            match self.href(self.cx) {
-                Some(l) => {
-                    try!(write!(fmt, "<a id='src-{}' class='srclink' \
-                                       href='{}' title='{}'>[src]</a>",
-                                self.item.def_id.index.as_usize(), l, "goto source code"));
-                }
-                None => {}
+        if self.cx.shared.include_sources && !is_primitive {
+            if let Some(l) = self.href() {
+                write!(fmt, "<a id='src-{}' class='srclink' \
+                              href='{}' title='{}'>[src]</a>",
+                       self.item.def_id.index.as_usize(), l, "goto source code")?;
             }
         }
 
-        try!(write!(fmt, "</span>")); // out-of-band
+        write!(fmt, "</span>")?; // out-of-band
 
-        try!(write!(fmt, "</h1>\n"));
+        write!(fmt, "</h1>\n")?;
 
         match self.item.inner {
             clean::ModuleItem(ref m) => {
@@ -1625,15 +1578,12 @@ impl<'a> fmt::Display for Item<'a> {
 }
 
 fn item_path(item: &clean::Item) -> String {
-    match item.inner {
-        clean::ModuleItem(..) => {
-            format!("{}/index.html", item.name.as_ref().unwrap())
-        }
-        _ => {
-            format!("{}.{}.html",
-                    shortty(item).to_static_str(),
-                    *item.name.as_ref().unwrap())
-        }
+    if item.is_mod() {
+        format!("{}/index.html", item.name.as_ref().unwrap())
+    } else {
+        format!("{}.{}.html",
+                shortty(item).to_static_str(),
+                *item.name.as_ref().unwrap())
     }
 }
 
@@ -1663,20 +1613,20 @@ fn plain_summary_line(s: Option<&str>) -> String {
 
 fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result {
     if let Some(s) = short_stability(item, cx, true) {
-        try!(write!(w, "<div class='stability'>{}</div>", s));
+        write!(w, "<div class='stability'>{}</div>", s)?;
     }
     if let Some(s) = item.doc_value() {
-        try!(write!(w, "<div class='docblock'>{}</div>", Markdown(s)));
+        write!(w, "<div class='docblock'>{}</div>", Markdown(s))?;
     }
     Ok(())
 }
 
 fn item_module(w: &mut fmt::Formatter, cx: &Context,
                item: &clean::Item, items: &[clean::Item]) -> fmt::Result {
-    try!(document(w, cx, item));
+    document(w, cx, item)?;
 
     let mut indices = (0..items.len()).filter(|i| {
-        !cx.ignore_private_item(&items[*i])
+        !cx.maybe_ignore_item(&items[*i])
     }).collect::<Vec<usize>>();
 
     // the order of item types in the listing
@@ -1720,6 +1670,9 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context,
     let mut curty = None;
     for &idx in &indices {
         let myitem = &items[idx];
+        if myitem.is_stripped() {
+            continue;
+        }
 
         let myty = Some(shortty(myitem));
         if curty == Some(ItemType::ExternCrate) && myty == Some(ItemType::Import) {
@@ -1727,7 +1680,7 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context,
             curty = myty;
         } else if myty != curty {
             if curty.is_some() {
-                try!(write!(w, "</table>"));
+                write!(w, "</table>")?;
             }
             curty = myty;
             let (short, name) = match myty.unwrap() {
@@ -1751,31 +1704,31 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context,
                 ItemType::AssociatedType  => ("associated-types", "Associated Types"),
                 ItemType::AssociatedConst => ("associated-consts", "Associated Constants"),
             };
-            try!(write!(w, "<h2 id='{id}' class='section-header'>\
-                           <a href=\"#{id}\">{name}</a></h2>\n<table>",
-                           id = derive_id(short.to_owned()), name = name));
+            write!(w, "<h2 id='{id}' class='section-header'>\
+                       <a href=\"#{id}\">{name}</a></h2>\n<table>",
+                   id = derive_id(short.to_owned()), name = name)?;
         }
 
         match myitem.inner {
             clean::ExternCrateItem(ref name, ref src) => {
                 match *src {
                     Some(ref src) => {
-                        try!(write!(w, "<tr><td><code>{}extern crate {} as {};",
-                                    VisSpace(myitem.visibility),
-                                    src,
-                                    name))
+                        write!(w, "<tr><td><code>{}extern crate {} as {};",
+                               VisSpace(&myitem.visibility),
+                               src,
+                               name)?
                     }
                     None => {
-                        try!(write!(w, "<tr><td><code>{}extern crate {};",
-                                    VisSpace(myitem.visibility), name))
+                        write!(w, "<tr><td><code>{}extern crate {};",
+                               VisSpace(&myitem.visibility), name)?
                     }
                 }
-                try!(write!(w, "</code></td></tr>"));
+                write!(w, "</code></td></tr>")?;
             }
 
             clean::ImportItem(ref import) => {
-                try!(write!(w, "<tr><td><code>{}{}</code></td></tr>",
-                            VisSpace(myitem.visibility), *import));
+                write!(w, "<tr><td><code>{}{}</code></td></tr>",
+                       VisSpace(&myitem.visibility), *import)?;
             }
 
             _ => {
@@ -1786,22 +1739,21 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context,
                     String::new()
                 };
                 let doc_value = myitem.doc_value().unwrap_or("");
-                try!(write!(w, "
-                    <tr class='{stab} module-item'>
-                        <td><a class='{class}' href='{href}'
-                               title='{title}'>{name}</a></td>
-                        <td class='docblock short'>
-                            {stab_docs} {docs}
-                        </td>
-                    </tr>
-                ",
-                name = *myitem.name.as_ref().unwrap(),
-                stab_docs = stab_docs,
-                docs = shorter(Some(&Markdown(doc_value).to_string())),
-                class = shortty(myitem),
-                stab = myitem.stability_class(),
-                href = item_path(myitem),
-                title = full_path(cx, myitem)));
+                write!(w, "
+                       <tr class='{stab} module-item'>
+                           <td><a class='{class}' href='{href}'
+                                  title='{title}'>{name}</a></td>
+                           <td class='docblock short'>
+                               {stab_docs} {docs}
+                           </td>
+                       </tr>",
+                       name = *myitem.name.as_ref().unwrap(),
+                       stab_docs = stab_docs,
+                       docs = shorter(Some(&Markdown(doc_value).to_string())),
+                       class = shortty(myitem),
+                       stab = myitem.stability_class(),
+                       href = item_path(myitem),
+                       title = full_path(cx, myitem))?;
             }
         }
     }
@@ -1810,7 +1762,7 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context,
 }
 
 fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Option<String> {
-    let mut result = item.stability.as_ref().and_then(|stab| {
+    item.stability.as_ref().and_then(|stab| {
         let reason = if show_reason && !stab.reason.is_empty() {
             format!(": {}", stab.reason)
         } else {
@@ -1825,7 +1777,7 @@ fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Optio
             format!("Deprecated{}{}", since, Markdown(&reason))
         } else if stab.level == stability::Unstable {
             let unstable_extra = if show_reason {
-                match (!stab.feature.is_empty(), &cx.issue_tracker_base_url, stab.issue) {
+                match (!stab.feature.is_empty(), &cx.shared.issue_tracker_base_url, stab.issue) {
                     (true, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 =>
                         format!(" (<code>{}</code> <a href=\"{}{}\">#{}</a>)",
                                 Escape(&stab.feature), tracker_url, issue_no, issue_no),
@@ -1845,10 +1797,8 @@ fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Optio
         };
         Some(format!("<em class='stab {}'>{}</em>",
                      item.stability_class(), text))
-    });
-
-    if result.is_none() {
-        result = item.deprecation.as_ref().and_then(|depr| {
+    }).or_else(|| {
+        item.deprecation.as_ref().and_then(|depr| {
             let note = if show_reason && !depr.note.is_empty() {
                 format!(": {}", depr.note)
             } else {
@@ -1862,10 +1812,8 @@ fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Optio
 
             let text = format!("Deprecated{}{}", since, Markdown(&note));
             Some(format!("<em class='stab deprecated'>{}</em>", text))
-        });
-    }
-
-    result
+        })
+    })
 }
 
 struct Initializer<'a>(&'a str);
@@ -1874,51 +1822,52 @@ impl<'a> fmt::Display for Initializer<'a> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         let Initializer(s) = *self;
         if s.is_empty() { return Ok(()); }
-        try!(write!(f, "<code> = </code>"));
+        write!(f, "<code> = </code>")?;
         write!(f, "<code>{}</code>", s)
     }
 }
 
 fn item_constant(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
                  c: &clean::Constant) -> fmt::Result {
-    try!(write!(w, "<pre class='rust const'>{vis}const \
-                    {name}: {typ}{init}</pre>",
-           vis = VisSpace(it.visibility),
+    write!(w, "<pre class='rust const'>{vis}const \
+               {name}: {typ}{init}</pre>",
+           vis = VisSpace(&it.visibility),
            name = it.name.as_ref().unwrap(),
            typ = c.type_,
-           init = Initializer(&c.expr)));
+           init = Initializer(&c.expr))?;
     document(w, cx, it)
 }
 
 fn item_static(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
                s: &clean::Static) -> fmt::Result {
-    try!(write!(w, "<pre class='rust static'>{vis}static {mutability}\
-                    {name}: {typ}{init}</pre>",
-           vis = VisSpace(it.visibility),
+    write!(w, "<pre class='rust static'>{vis}static {mutability}\
+               {name}: {typ}{init}</pre>",
+           vis = VisSpace(&it.visibility),
            mutability = MutableSpace(s.mutability),
            name = it.name.as_ref().unwrap(),
            typ = s.type_,
-           init = Initializer(&s.expr)));
+           init = Initializer(&s.expr))?;
     document(w, cx, it)
 }
 
 fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
                  f: &clean::Function) -> fmt::Result {
+    // FIXME(#24111): remove when `const_fn` is stabilized
     let vis_constness = match get_unstable_features_setting() {
         UnstableFeatures::Allow => f.constness,
         _ => hir::Constness::NotConst
     };
-    try!(write!(w, "<pre class='rust fn'>{vis}{constness}{unsafety}{abi}fn \
-                    {name}{generics}{decl}{where_clause}</pre>",
-           vis = VisSpace(it.visibility),
+    write!(w, "<pre class='rust fn'>{vis}{constness}{unsafety}{abi}fn \
+               {name}{generics}{decl}{where_clause}</pre>",
+           vis = VisSpace(&it.visibility),
            constness = ConstnessSpace(vis_constness),
            unsafety = UnsafetySpace(f.unsafety),
            abi = AbiSpace(f.abi),
            name = it.name.as_ref().unwrap(),
            generics = f.generics,
            where_clause = WhereClause(&f.generics),
-           decl = f.decl));
-    try!(render_stability_since_raw(w, it.stable_since(), None));
+           decl = f.decl)?;
+    render_stability_since_raw(w, it.stable_since(), None)?;
     document(w, cx, it)
 }
 
@@ -1937,179 +1886,196 @@ fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
     }
 
     // Output the trait definition
-    try!(write!(w, "<pre class='rust trait'>{}{}trait {}{}{}{} ",
-                  VisSpace(it.visibility),
-                  UnsafetySpace(t.unsafety),
-                  it.name.as_ref().unwrap(),
-                  t.generics,
-                  bounds,
-                  WhereClause(&t.generics)));
-
-    let types = t.items.iter().filter(|m| {
-        match m.inner { clean::AssociatedTypeItem(..) => true, _ => false }
-    }).collect::<Vec<_>>();
-    let consts = t.items.iter().filter(|m| {
-        match m.inner { clean::AssociatedConstItem(..) => true, _ => false }
-    }).collect::<Vec<_>>();
-    let required = t.items.iter().filter(|m| {
-        match m.inner { clean::TyMethodItem(_) => true, _ => false }
-    }).collect::<Vec<_>>();
-    let provided = t.items.iter().filter(|m| {
-        match m.inner { clean::MethodItem(_) => true, _ => false }
-    }).collect::<Vec<_>>();
+    write!(w, "<pre class='rust trait'>{}{}trait {}{}{}{} ",
+           VisSpace(&it.visibility),
+           UnsafetySpace(t.unsafety),
+           it.name.as_ref().unwrap(),
+           t.generics,
+           bounds,
+           WhereClause(&t.generics))?;
+
+    let types = t.items.iter().filter(|m| m.is_associated_type()).collect::<Vec<_>>();
+    let consts = t.items.iter().filter(|m| m.is_associated_const()).collect::<Vec<_>>();
+    let required = t.items.iter().filter(|m| m.is_ty_method()).collect::<Vec<_>>();
+    let provided = t.items.iter().filter(|m| m.is_method()).collect::<Vec<_>>();
 
     if t.items.is_empty() {
-        try!(write!(w, "{{ }}"));
+        write!(w, "{{ }}")?;
     } else {
-        try!(write!(w, "{{\n"));
+        write!(w, "{{\n")?;
         for t in &types {
-            try!(write!(w, "    "));
-            try!(render_assoc_item(w, t, AssocItemLink::Anchor));
-            try!(write!(w, ";\n"));
+            write!(w, "    ")?;
+            render_assoc_item(w, t, AssocItemLink::Anchor)?;
+            write!(w, ";\n")?;
         }
         if !types.is_empty() && !consts.is_empty() {
-            try!(w.write_str("\n"));
+            w.write_str("\n")?;
         }
         for t in &consts {
-            try!(write!(w, "    "));
-            try!(render_assoc_item(w, t, AssocItemLink::Anchor));
-            try!(write!(w, ";\n"));
+            write!(w, "    ")?;
+            render_assoc_item(w, t, AssocItemLink::Anchor)?;
+            write!(w, ";\n")?;
         }
         if !consts.is_empty() && !required.is_empty() {
-            try!(w.write_str("\n"));
+            w.write_str("\n")?;
         }
         for m in &required {
-            try!(write!(w, "    "));
-            try!(render_assoc_item(w, m, AssocItemLink::Anchor));
-            try!(write!(w, ";\n"));
+            write!(w, "    ")?;
+            render_assoc_item(w, m, AssocItemLink::Anchor)?;
+            write!(w, ";\n")?;
         }
         if !required.is_empty() && !provided.is_empty() {
-            try!(w.write_str("\n"));
+            w.write_str("\n")?;
         }
         for m in &provided {
-            try!(write!(w, "    "));
-            try!(render_assoc_item(w, m, AssocItemLink::Anchor));
-            try!(write!(w, " {{ ... }}\n"));
+            write!(w, "    ")?;
+            render_assoc_item(w, m, AssocItemLink::Anchor)?;
+            write!(w, " {{ ... }}\n")?;
         }
-        try!(write!(w, "}}"));
+        write!(w, "}}")?;
     }
-    try!(write!(w, "</pre>"));
+    write!(w, "</pre>")?;
 
     // Trait documentation
-    try!(document(w, cx, it));
+    document(w, cx, it)?;
 
     fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item, t: &clean::Item)
                   -> fmt::Result {
         let name = m.name.as_ref().unwrap();
         let id = derive_id(format!("{}.{}", shortty(m), name));
-        try!(write!(w, "<h3 id='{id}' class='method stab {stab}'><code>",
-                       id = id,
-                       stab = m.stability_class()));
-        try!(render_assoc_item(w, m, AssocItemLink::Anchor));
-        try!(write!(w, "</code>"));
-        try!(render_stability_since(w, m, t));
-        try!(write!(w, "</h3>"));
-        try!(document(w, cx, m));
+        write!(w, "<h3 id='{id}' class='method stab {stab}'><code>",
+               id = id,
+               stab = m.stability_class())?;
+        render_assoc_item(w, m, AssocItemLink::Anchor)?;
+        write!(w, "</code>")?;
+        render_stability_since(w, m, t)?;
+        write!(w, "</h3>")?;
+        document(w, cx, m)?;
         Ok(())
     }
 
     if !types.is_empty() {
-        try!(write!(w, "
+        write!(w, "
             <h2 id='associated-types'>Associated Types</h2>
             <div class='methods'>
-        "));
+        ")?;
         for t in &types {
-            try!(trait_item(w, cx, *t, it));
+            trait_item(w, cx, *t, it)?;
         }
-        try!(write!(w, "</div>"));
+        write!(w, "</div>")?;
     }
 
     if !consts.is_empty() {
-        try!(write!(w, "
+        write!(w, "
             <h2 id='associated-const'>Associated Constants</h2>
             <div class='methods'>
-        "));
+        ")?;
         for t in &consts {
-            try!(trait_item(w, cx, *t, it));
+            trait_item(w, cx, *t, it)?;
         }
-        try!(write!(w, "</div>"));
+        write!(w, "</div>")?;
     }
 
     // Output the documentation for each function individually
     if !required.is_empty() {
-        try!(write!(w, "
+        write!(w, "
             <h2 id='required-methods'>Required Methods</h2>
             <div class='methods'>
-        "));
+        ")?;
         for m in &required {
-            try!(trait_item(w, cx, *m, it));
+            trait_item(w, cx, *m, it)?;
         }
-        try!(write!(w, "</div>"));
+        write!(w, "</div>")?;
     }
     if !provided.is_empty() {
-        try!(write!(w, "
+        write!(w, "
             <h2 id='provided-methods'>Provided Methods</h2>
             <div class='methods'>
-        "));
+        ")?;
         for m in &provided {
-            try!(trait_item(w, cx, *m, it));
+            trait_item(w, cx, *m, it)?;
         }
-        try!(write!(w, "</div>"));
+        write!(w, "</div>")?;
     }
 
     // If there are methods directly on this trait object, render them here.
-    try!(render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All));
+    render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?;
 
     let cache = cache();
-    try!(write!(w, "
+    write!(w, "
         <h2 id='implementors'>Implementors</h2>
         <ul class='item-list' id='implementors-list'>
-    "));
+    ")?;
     match cache.implementors.get(&it.def_id) {
         Some(implementors) => {
             for i in implementors {
-                try!(writeln!(w, "<li><code>{}</code></li>", i.impl_));
+                writeln!(w, "<li><code>{}</code></li>", i.impl_)?;
             }
         }
         None => {}
     }
-    try!(write!(w, "</ul>"));
-    try!(write!(w, r#"<script type="text/javascript" async
-                              src="{root_path}/implementors/{path}/{ty}.{name}.js">
-                      </script>"#,
-                root_path = vec![".."; cx.current.len()].join("/"),
-                path = if it.def_id.is_local() {
-                    cx.current.join("/")
-                } else {
-                    let path = &cache.external_paths[&it.def_id];
-                    path[..path.len() - 1].join("/")
-                },
-                ty = shortty(it).to_static_str(),
-                name = *it.name.as_ref().unwrap()));
+    write!(w, "</ul>")?;
+    write!(w, r#"<script type="text/javascript" async
+                         src="{root_path}/implementors/{path}/{ty}.{name}.js">
+                 </script>"#,
+           root_path = vec![".."; cx.current.len()].join("/"),
+           path = if it.def_id.is_local() {
+               cx.current.join("/")
+           } else {
+               let path = &cache.external_paths[&it.def_id];
+               path[..path.len() - 1].join("/")
+           },
+           ty = shortty(it).to_static_str(),
+           name = *it.name.as_ref().unwrap())?;
     Ok(())
 }
 
-fn assoc_const(w: &mut fmt::Formatter, it: &clean::Item,
-               ty: &clean::Type, default: Option<&String>)
-               -> fmt::Result {
-    try!(write!(w, "const {}", it.name.as_ref().unwrap()));
-    try!(write!(w, ": {}", ty));
+fn naive_assoc_href(it: &clean::Item, link: AssocItemLink) -> String {
+    use html::item_type::ItemType::*;
+
+    let name = it.name.as_ref().unwrap();
+    let ty = match shortty(it) {
+        Typedef | AssociatedType => AssociatedType,
+        s@_ => s,
+    };
+
+    let anchor = format!("#{}.{}", ty, name);
+    match link {
+        AssocItemLink::Anchor => anchor,
+        AssocItemLink::GotoSource(did, _) => {
+            href(did).map(|p| format!("{}{}", p.0, anchor)).unwrap_or(anchor)
+        }
+    }
+}
+
+fn assoc_const(w: &mut fmt::Formatter,
+               it: &clean::Item,
+               ty: &clean::Type,
+               default: Option<&String>,
+               link: AssocItemLink) -> fmt::Result {
+    write!(w, "const <a href='{}' class='constant'>{}</a>",
+           naive_assoc_href(it, link),
+           it.name.as_ref().unwrap())?;
+
+    write!(w, ": {}", ty)?;
     if let Some(default) = default {
-        try!(write!(w, " = {}", default));
+        write!(w, " = {}", default)?;
     }
     Ok(())
 }
 
 fn assoc_type(w: &mut fmt::Formatter, it: &clean::Item,
               bounds: &Vec<clean::TyParamBound>,
-              default: &Option<clean::Type>)
-              -> fmt::Result {
-    try!(write!(w, "type {}", it.name.as_ref().unwrap()));
+              default: Option<&clean::Type>,
+              link: AssocItemLink) -> fmt::Result {
+    write!(w, "type <a href='{}' class='type'>{}</a>",
+           naive_assoc_href(it, link),
+           it.name.as_ref().unwrap())?;
     if !bounds.is_empty() {
-        try!(write!(w, ": {}", TyParamBounds(bounds)))
+        write!(w, ": {}", TyParamBounds(bounds))?
     }
-    if let Some(ref default) = *default {
-        try!(write!(w, " = {}", default));
+    if let Some(default) = default {
+        write!(w, " = {}", default)?;
     }
     Ok(())
 }
@@ -2117,17 +2083,12 @@ fn assoc_type(w: &mut fmt::Formatter, it: &clean::Item,
 fn render_stability_since_raw<'a>(w: &mut fmt::Formatter,
                                   ver: Option<&'a str>,
                                   containing_ver: Option<&'a str>) -> fmt::Result {
-    if containing_ver != ver {
-        match ver {
-            Some(v) =>
-                if v.len() > 0 {
-                        try!(write!(w, "<span class=\"since\">{}</span>",
-                                    v))
-                },
-            None => {}
+    if let Some(v) = ver {
+        if containing_ver != ver && v.len() > 0 {
+            write!(w, "<span class=\"since\">{}</span>",
+                   v)?
         }
     }
-
     Ok(())
 }
 
@@ -2137,10 +2098,11 @@ fn render_stability_since(w: &mut fmt::Formatter,
     render_stability_since_raw(w, item.stable_since(), containing_item.stable_since())
 }
 
-fn render_assoc_item(w: &mut fmt::Formatter, meth: &clean::Item,
+fn render_assoc_item(w: &mut fmt::Formatter,
+                     item: &clean::Item,
                      link: AssocItemLink) -> fmt::Result {
     fn method(w: &mut fmt::Formatter,
-              it: &clean::Item,
+              meth: &clean::Item,
               unsafety: hir::Unsafety,
               constness: hir::Constness,
               abi: abi::Abi,
@@ -2151,14 +2113,23 @@ fn render_assoc_item(w: &mut fmt::Formatter, meth: &clean::Item,
               -> fmt::Result {
         use syntax::abi::Abi;
 
-        let name = it.name.as_ref().unwrap();
-        let anchor = format!("#{}.{}", shortty(it), name);
+        let name = meth.name.as_ref().unwrap();
+        let anchor = format!("#{}.{}", shortty(meth), name);
         let href = match link {
             AssocItemLink::Anchor => anchor,
-            AssocItemLink::GotoSource(did) => {
-                href(did).map(|p| format!("{}{}", p.0, anchor)).unwrap_or(anchor)
+            AssocItemLink::GotoSource(did, provided_methods) => {
+                // We're creating a link from an impl-item to the corresponding
+                // trait-item and need to map the anchored type accordingly.
+                let ty = if provided_methods.contains(name) {
+                    ItemType::Method
+                } else {
+                    ItemType::TyMethod
+                };
+
+                href(did).map(|p| format!("{}#{}.{}", p.0, ty, name)).unwrap_or(anchor)
             }
         };
+        // FIXME(#24111): remove when `const_fn` is stabilized
         let vis_constness = match get_unstable_features_setting() {
             UnstableFeatures::Allow => constness,
             _ => hir::Constness::NotConst
@@ -2177,21 +2148,22 @@ fn render_assoc_item(w: &mut fmt::Formatter, meth: &clean::Item,
                decl = Method(selfty, d),
                where_clause = WhereClause(g))
     }
-    match meth.inner {
+    match item.inner {
+        clean::StrippedItem(..) => Ok(()),
         clean::TyMethodItem(ref m) => {
-            method(w, meth, m.unsafety, hir::Constness::NotConst,
+            method(w, item, m.unsafety, hir::Constness::NotConst,
                    m.abi, &m.generics, &m.self_, &m.decl, link)
         }
         clean::MethodItem(ref m) => {
-            method(w, meth, m.unsafety, m.constness,
+            method(w, item, m.unsafety, m.constness,
                    m.abi, &m.generics, &m.self_, &m.decl,
                    link)
         }
         clean::AssociatedConstItem(ref ty, ref default) => {
-            assoc_const(w, meth, ty, default.as_ref())
+            assoc_const(w, item, ty, default.as_ref(), link)
         }
         clean::AssociatedTypeItem(ref bounds, ref default) => {
-            assoc_type(w, meth, bounds, default)
+            assoc_type(w, item, bounds, default.as_ref(), link)
         }
         _ => panic!("render_assoc_item called on non-associated-item")
     }
@@ -2199,39 +2171,39 @@ fn render_assoc_item(w: &mut fmt::Formatter, meth: &clean::Item,
 
 fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
                s: &clean::Struct) -> fmt::Result {
-    try!(write!(w, "<pre class='rust struct'>"));
-    try!(render_attributes(w, it));
-    try!(render_struct(w,
-                       it,
-                       Some(&s.generics),
-                       s.struct_type,
-                       &s.fields,
-                       "",
-                       true));
-    try!(write!(w, "</pre>"));
-    try!(render_stability_since_raw(w, it.stable_since(), None));
-
-    try!(document(w, cx, it));
+    write!(w, "<pre class='rust struct'>")?;
+    render_attributes(w, it)?;
+    render_struct(w,
+                  it,
+                  Some(&s.generics),
+                  s.struct_type,
+                  &s.fields,
+                  "",
+                  true)?;
+    write!(w, "</pre>")?;
+    render_stability_since_raw(w, it.stable_since(), None)?;
+
+    document(w, cx, it)?;
     let mut fields = s.fields.iter().filter(|f| {
         match f.inner {
-            clean::StructFieldItem(clean::HiddenStructField) => false,
-            clean::StructFieldItem(clean::TypedStructField(..)) => true,
+            clean::StructFieldItem(..) => true,
             _ => false,
         }
     }).peekable();
     if let doctree::Plain = s.struct_type {
         if fields.peek().is_some() {
-            try!(write!(w, "<h2 class='fields'>Fields</h2>\n<table>"));
+            write!(w, "<h2 class='fields'>Fields</h2>\n<table>")?;
             for field in fields {
-                try!(write!(w, "<tr class='stab {stab}'>
-                                  <td id='structfield.{name}'>\
-                                    <code>{name}</code></td><td>",
-                            stab = field.stability_class(),
-                            name = field.name.as_ref().unwrap()));
-                try!(document(w, cx, field));
-                try!(write!(w, "</td></tr>"));
+                write!(w, "<tr class='stab {stab}'>
+                             <td id='{shortty}.{name}'>\
+                               <code>{name}</code></td><td>",
+                       shortty = ItemType::StructField,
+                       stab = field.stability_class(),
+                       name = field.name.as_ref().unwrap())?;
+                document(w, cx, field)?;
+                write!(w, "</td></tr>")?;
             }
-            try!(write!(w, "</table>"));
+            write!(w, "</table>")?;
         }
     }
     render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)
@@ -2239,104 +2211,96 @@ fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
 
 fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
              e: &clean::Enum) -> fmt::Result {
-    try!(write!(w, "<pre class='rust enum'>"));
-    try!(render_attributes(w, it));
-    try!(write!(w, "{}enum {}{}{}",
-                  VisSpace(it.visibility),
-                  it.name.as_ref().unwrap(),
-                  e.generics,
-                  WhereClause(&e.generics)));
+    write!(w, "<pre class='rust enum'>")?;
+    render_attributes(w, it)?;
+    write!(w, "{}enum {}{}{}",
+           VisSpace(&it.visibility),
+           it.name.as_ref().unwrap(),
+           e.generics,
+           WhereClause(&e.generics))?;
     if e.variants.is_empty() && !e.variants_stripped {
-        try!(write!(w, " {{}}"));
+        write!(w, " {{}}")?;
     } else {
-        try!(write!(w, " {{\n"));
+        write!(w, " {{\n")?;
         for v in &e.variants {
-            try!(write!(w, "    "));
+            write!(w, "    ")?;
             let name = v.name.as_ref().unwrap();
             match v.inner {
                 clean::VariantItem(ref var) => {
                     match var.kind {
-                        clean::CLikeVariant => try!(write!(w, "{}", name)),
+                        clean::CLikeVariant => write!(w, "{}", name)?,
                         clean::TupleVariant(ref tys) => {
-                            try!(write!(w, "{}(", name));
+                            write!(w, "{}(", name)?;
                             for (i, ty) in tys.iter().enumerate() {
                                 if i > 0 {
-                                    try!(write!(w, ", "))
+                                    write!(w, ", ")?
                                 }
-                                try!(write!(w, "{}", *ty));
+                                write!(w, "{}", *ty)?;
                             }
-                            try!(write!(w, ")"));
+                            write!(w, ")")?;
                         }
                         clean::StructVariant(ref s) => {
-                            try!(render_struct(w,
-                                               v,
-                                               None,
-                                               s.struct_type,
-                                               &s.fields,
-                                               "    ",
-                                               false));
+                            render_struct(w,
+                                          v,
+                                          None,
+                                          s.struct_type,
+                                          &s.fields,
+                                          "    ",
+                                          false)?;
                         }
                     }
                 }
                 _ => unreachable!()
             }
-            try!(write!(w, ",\n"));
+            write!(w, ",\n")?;
         }
 
         if e.variants_stripped {
-            try!(write!(w, "    // some variants omitted\n"));
+            write!(w, "    // some variants omitted\n")?;
         }
-        try!(write!(w, "}}"));
+        write!(w, "}}")?;
     }
-    try!(write!(w, "</pre>"));
-    try!(render_stability_since_raw(w, it.stable_since(), None));
+    write!(w, "</pre>")?;
+    render_stability_since_raw(w, it.stable_since(), None)?;
 
-    try!(document(w, cx, it));
+    document(w, cx, it)?;
     if !e.variants.is_empty() {
-        try!(write!(w, "<h2 class='variants'>Variants</h2>\n<table class='variants_table'>"));
+        write!(w, "<h2 class='variants'>Variants</h2>\n<table class='variants_table'>")?;
         for variant in &e.variants {
-            try!(write!(w, "<tr><td id='variant.{name}'><code>{name}</code></td><td>",
-                          name = variant.name.as_ref().unwrap()));
-            try!(document(w, cx, variant));
-            match variant.inner {
-                clean::VariantItem(ref var) => {
-                    match var.kind {
-                        clean::StructVariant(ref s) => {
-                            let fields = s.fields.iter().filter(|f| {
-                                match f.inner {
-                                    clean::StructFieldItem(ref t) => match *t {
-                                        clean::HiddenStructField => false,
-                                        clean::TypedStructField(..) => true,
-                                    },
-                                    _ => false,
-                                }
-                            });
-                            try!(write!(w, "<h3 class='fields'>Fields</h3>\n
-                                              <table>"));
-                            for field in fields {
-                                try!(write!(w, "<tr><td \
-                                                  id='variant.{v}.field.{f}'>\
-                                                  <code>{f}</code></td><td>",
-                                              v = variant.name.as_ref().unwrap(),
-                                              f = field.name.as_ref().unwrap()));
-                                try!(document(w, cx, field));
-                                try!(write!(w, "</td></tr>"));
-                            }
-                            try!(write!(w, "</table>"));
-                        }
-                        _ => ()
+            write!(w, "<tr><td id='{shortty}.{name}'><code>{name}</code></td><td>",
+                   shortty = ItemType::Variant,
+                   name = variant.name.as_ref().unwrap())?;
+            document(w, cx, variant)?;
+
+            use clean::{Variant, StructVariant};
+            if let clean::VariantItem( Variant { kind: StructVariant(ref s) } ) = variant.inner {
+                let fields = s.fields.iter().filter(|f| {
+                    match f.inner {
+                        clean::StructFieldItem(..) => true,
+                        _ => false,
                     }
+                });
+                write!(w, "<h3 class='fields'>Fields</h3>\n
+                           <table>")?;
+                for field in fields {
+                    write!(w, "<tr><td \
+                               id='{shortty}.{v}.field.{f}'>\
+                               <code>{f}</code></td><td>",
+                           shortty = ItemType::Variant,
+                           v = variant.name.as_ref().unwrap(),
+                           f = field.name.as_ref().unwrap())?;
+                    document(w, cx, field)?;
+                    write!(w, "</td></tr>")?;
                 }
-                _ => ()
+                write!(w, "</table>")?;
             }
-            try!(write!(w, "</td><td>"));
-            try!(render_stability_since(w, variant, it));
-            try!(write!(w, "</td></tr>"));
+            write!(w, "</td><td>")?;
+            render_stability_since(w, variant, it)?;
+            write!(w, "</td></tr>")?;
         }
-        try!(write!(w, "</table>"));
-
+        write!(w, "</table>")?;
     }
-    try!(render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All));
+    render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?;
     Ok(())
 }
 
@@ -2344,10 +2308,10 @@ fn render_attributes(w: &mut fmt::Formatter, it: &clean::Item) -> fmt::Result {
     for attr in &it.attrs {
         match *attr {
             clean::Word(ref s) if *s == "must_use" => {
-                try!(write!(w, "#[{}]\n", s));
+                write!(w, "#[{}]\n", s)?;
             }
             clean::NameValue(ref k, ref v) if *k == "must_use" => {
-                try!(write!(w, "#[{} = \"{}\"]\n", k, v));
+                write!(w, "#[{} = \"{}\"]\n", k, v)?;
             }
             _ => ()
         }
@@ -2361,68 +2325,60 @@ fn render_struct(w: &mut fmt::Formatter, it: &clean::Item,
                  fields: &[clean::Item],
                  tab: &str,
                  structhead: bool) -> fmt::Result {
-    try!(write!(w, "{}{}{}",
-                  VisSpace(it.visibility),
-                  if structhead {"struct "} else {""},
-                  it.name.as_ref().unwrap()));
-    match g {
-        Some(g) => try!(write!(w, "{}{}", *g, WhereClause(g))),
-        None => {}
+    write!(w, "{}{}{}",
+           VisSpace(&it.visibility),
+           if structhead {"struct "} else {""},
+           it.name.as_ref().unwrap())?;
+    if let Some(g) = g {
+        write!(w, "{}{}", *g, WhereClause(g))?
     }
     match ty {
         doctree::Plain => {
-            try!(write!(w, " {{\n{}", tab));
-            let mut fields_stripped = false;
+            write!(w, " {{\n{}", tab)?;
             for field in fields {
-                match field.inner {
-                    clean::StructFieldItem(clean::HiddenStructField) => {
-                        fields_stripped = true;
-                    }
-                    clean::StructFieldItem(clean::TypedStructField(ref ty)) => {
-                        try!(write!(w, "    {}{}: {},\n{}",
-                                      VisSpace(field.visibility),
-                                      field.name.as_ref().unwrap(),
-                                      *ty,
-                                      tab));
-                    }
-                    _ => unreachable!(),
-                };
+                if let clean::StructFieldItem(ref ty) = field.inner {
+                    write!(w, "    {}{}: {},\n{}",
+                           VisSpace(&field.visibility),
+                           field.name.as_ref().unwrap(),
+                           *ty,
+                           tab)?;
+                }
             }
 
-            if fields_stripped {
-                try!(write!(w, "    // some fields omitted\n{}", tab));
+            if it.has_stripped_fields().unwrap() {
+                write!(w, "    // some fields omitted\n{}", tab)?;
             }
-            try!(write!(w, "}}"));
+            write!(w, "}}")?;
         }
         doctree::Tuple | doctree::Newtype => {
-            try!(write!(w, "("));
+            write!(w, "(")?;
             for (i, field) in fields.iter().enumerate() {
                 if i > 0 {
-                    try!(write!(w, ", "));
+                    write!(w, ", ")?;
                 }
                 match field.inner {
-                    clean::StructFieldItem(clean::HiddenStructField) => {
-                        try!(write!(w, "_"))
+                    clean::StrippedItem(box clean::StructFieldItem(..)) => {
+                        write!(w, "_")?
                     }
-                    clean::StructFieldItem(clean::TypedStructField(ref ty)) => {
-                        try!(write!(w, "{}{}", VisSpace(field.visibility), *ty))
+                    clean::StructFieldItem(ref ty) => {
+                        write!(w, "{}{}", VisSpace(&field.visibility), *ty)?
                     }
                     _ => unreachable!()
                 }
             }
-            try!(write!(w, ");"));
+            write!(w, ");")?;
         }
         doctree::Unit => {
-            try!(write!(w, ";"));
+            write!(w, ";")?;
         }
     }
     Ok(())
 }
 
 #[derive(Copy, Clone)]
-enum AssocItemLink {
+enum AssocItemLink<'a> {
     Anchor,
-    GotoSource(DefId),
+    GotoSource(DefId, &'a HashSet<String>),
 }
 
 enum AssocItemRender<'a> {
@@ -2446,53 +2402,48 @@ fn render_assoc_items(w: &mut fmt::Formatter,
     if !non_trait.is_empty() {
         let render_header = match what {
             AssocItemRender::All => {
-                try!(write!(w, "<h2 id='methods'>Methods</h2>"));
+                write!(w, "<h2 id='methods'>Methods</h2>")?;
                 true
             }
             AssocItemRender::DerefFor { trait_, type_ } => {
-                try!(write!(w, "<h2 id='deref-methods'>Methods from \
-                                    {}&lt;Target={}&gt;</h2>", trait_, type_));
+                write!(w, "<h2 id='deref-methods'>Methods from \
+                               {}&lt;Target={}&gt;</h2>", trait_, type_)?;
                 false
             }
         };
         for i in &non_trait {
-            try!(render_impl(w, cx, i, AssocItemLink::Anchor, render_header,
-                             containing_item.stable_since()));
+            render_impl(w, cx, i, AssocItemLink::Anchor, render_header,
+                        containing_item.stable_since())?;
         }
     }
     if let AssocItemRender::DerefFor { .. } = what {
-        return Ok(())
+        return Ok(());
     }
     if !traits.is_empty() {
         let deref_impl = traits.iter().find(|t| {
-            match *t.impl_.trait_.as_ref().unwrap() {
-                clean::ResolvedPath { did, .. } => {
-                    Some(did) == c.deref_trait_did
-                }
-                _ => false
-            }
+            t.impl_.trait_.def_id() == c.deref_trait_did
         });
         if let Some(impl_) = deref_impl {
-            try!(render_deref_methods(w, cx, impl_, containing_item));
+            render_deref_methods(w, cx, impl_, containing_item)?;
         }
-        try!(write!(w, "<h2 id='implementations'>Trait \
-                          Implementations</h2>"));
+        write!(w, "<h2 id='implementations'>Trait \
+                   Implementations</h2>")?;
         let (derived, manual): (Vec<_>, Vec<&Impl>) = traits.iter().partition(|i| {
             i.impl_.derived
         });
         for i in &manual {
             let did = i.trait_did().unwrap();
-            try!(render_impl(w, cx, i, AssocItemLink::GotoSource(did), true,
-                             containing_item.stable_since()));
+            let assoc_link = AssocItemLink::GotoSource(did, &i.impl_.provided_trait_methods);
+            render_impl(w, cx, i, assoc_link, true, containing_item.stable_since())?;
         }
         if !derived.is_empty() {
-            try!(write!(w, "<h3 id='derived_implementations'>\
-                Derived Implementations \
-            </h3>"));
+            write!(w, "<h3 id='derived_implementations'>\
+                           Derived Implementations \
+                       </h3>")?;
             for i in &derived {
                 let did = i.trait_did().unwrap();
-                try!(render_impl(w, cx, i, AssocItemLink::GotoSource(did), true,
-                                 containing_item.stable_since()));
+                let assoc_link = AssocItemLink::GotoSource(did, &i.impl_.provided_trait_methods);
+                render_impl(w, cx, i, assoc_link, true, containing_item.stable_since())?;
             }
         }
     }
@@ -2509,17 +2460,16 @@ fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl,
         }
     }).next().expect("Expected associated type binding");
     let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target };
-    match *target {
-        clean::ResolvedPath { did, .. } => render_assoc_items(w, cx, container_item, did, what),
-        _ => {
-            if let Some(prim) = target.primitive_type() {
-                if let Some(c) = cache().primitive_locations.get(&prim) {
-                    let did = DefId { krate: *c, index: prim.to_def_index() };
-                    try!(render_assoc_items(w, cx, container_item, did, what));
-                }
+    if let Some(did) = target.def_id() {
+        render_assoc_items(w, cx, container_item, did, what)
+    } else {
+        if let Some(prim) = target.primitive_type() {
+            if let Some(c) = cache().primitive_locations.get(&prim) {
+                let did = DefId { krate: *c, index: prim.to_def_index() };
+                render_assoc_items(w, cx, container_item, did, what)?;
             }
-            Ok(())
         }
+        Ok(())
     }
 }
 
@@ -2529,123 +2479,118 @@ fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl,
 fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLink,
                render_header: bool, outer_version: Option<&str>) -> fmt::Result {
     if render_header {
-        try!(write!(w, "<h3 class='impl'><code>{}</code>", i.impl_));
+        write!(w, "<h3 class='impl'><code>{}</code>", i.impl_)?;
         let since = i.stability.as_ref().map(|s| &s.since[..]);
-        try!(render_stability_since_raw(w, since, outer_version));
-        try!(write!(w, "</h3>"));
+        render_stability_since_raw(w, since, outer_version)?;
+        write!(w, "</h3>")?;
         if let Some(ref dox) = i.dox {
-            try!(write!(w, "<div class='docblock'>{}</div>", Markdown(dox)));
+            write!(w, "<div class='docblock'>{}</div>", Markdown(dox))?;
         }
     }
 
     fn doctraititem(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item,
                     link: AssocItemLink, render_static: bool,
                     outer_version: Option<&str>) -> fmt::Result {
+        let shortty = shortty(item);
         let name = item.name.as_ref().unwrap();
+
+        let is_static = match item.inner {
+            clean::MethodItem(ref method) => method.self_ == SelfTy::SelfStatic,
+            clean::TyMethodItem(ref method) => method.self_ == SelfTy::SelfStatic,
+            _ => false
+        };
+
         match item.inner {
             clean::MethodItem(..) | clean::TyMethodItem(..) => {
                 // Only render when the method is not static or we allow static methods
-                if !is_static_method(item) || render_static {
-                    let id = derive_id(format!("method.{}", name));
-                    try!(write!(w, "<h4 id='{}' class='{}'>", id, shortty(item)));
-                    try!(render_stability_since_raw(w, item.stable_since(), outer_version));
-                    try!(write!(w, "<code>"));
-                    try!(render_assoc_item(w, item, link));
-                    try!(write!(w, "</code></h4>\n"));
+                if !is_static || render_static {
+                    let id = derive_id(format!("{}.{}", shortty, name));
+                    write!(w, "<h4 id='{}' class='{}'>", id, shortty)?;
+                    render_stability_since_raw(w, item.stable_since(), outer_version)?;
+                    write!(w, "<code>")?;
+                    render_assoc_item(w, item, link)?;
+                    write!(w, "</code></h4>\n")?;
                 }
             }
             clean::TypedefItem(ref tydef, _) => {
-                let id = derive_id(format!("associatedtype.{}", name));
-                try!(write!(w, "<h4 id='{}' class='{}'><code>", id, shortty(item)));
-                try!(write!(w, "type {} = {}", name, tydef.type_));
-                try!(write!(w, "</code></h4>\n"));
+                let id = derive_id(format!("{}.{}", ItemType::AssociatedType, name));
+                write!(w, "<h4 id='{}' class='{}'><code>", id, shortty)?;
+                assoc_type(w, item, &Vec::new(), Some(&tydef.type_), link)?;
+                write!(w, "</code></h4>\n")?;
             }
             clean::AssociatedConstItem(ref ty, ref default) => {
-                let id = derive_id(format!("associatedconstant.{}", name));
-                try!(write!(w, "<h4 id='{}' class='{}'><code>", id, shortty(item)));
-                try!(assoc_const(w, item, ty, default.as_ref()));
-                try!(write!(w, "</code></h4>\n"));
+                let id = derive_id(format!("{}.{}", shortty, name));
+                write!(w, "<h4 id='{}' class='{}'><code>", id, shortty)?;
+                assoc_const(w, item, ty, default.as_ref(), link)?;
+                write!(w, "</code></h4>\n")?;
             }
             clean::ConstantItem(ref c) => {
-                let id = derive_id(format!("associatedconstant.{}", name));
-                try!(write!(w, "<h4 id='{}' class='{}'><code>", id, shortty(item)));
-                try!(assoc_const(w, item, &c.type_, Some(&c.expr)));
-                try!(write!(w, "</code></h4>\n"));
+                let id = derive_id(format!("{}.{}", shortty, name));
+                write!(w, "<h4 id='{}' class='{}'><code>", id, shortty)?;
+                assoc_const(w, item, &c.type_, Some(&c.expr), link)?;
+                write!(w, "</code></h4>\n")?;
             }
             clean::AssociatedTypeItem(ref bounds, ref default) => {
-                let id = derive_id(format!("associatedtype.{}", name));
-                try!(write!(w, "<h4 id='{}' class='{}'><code>", id, shortty(item)));
-                try!(assoc_type(w, item, bounds, default));
-                try!(write!(w, "</code></h4>\n"));
+                let id = derive_id(format!("{}.{}", shortty, name));
+                write!(w, "<h4 id='{}' class='{}'><code>", id, shortty)?;
+                assoc_type(w, item, bounds, default.as_ref(), link)?;
+                write!(w, "</code></h4>\n")?;
             }
+            clean::StrippedItem(..) => return Ok(()),
             _ => panic!("can't make docs for trait item with name {:?}", item.name)
         }
 
-        return if let AssocItemLink::Anchor = link {
-            if is_static_method(item) && !render_static {
-                Ok(())
-            } else {
+        match link {
+            AssocItemLink::Anchor if !is_static || render_static => {
                 document(w, cx, item)
-            }
-        } else {
-            Ok(())
-        };
-
-        fn is_static_method(item: &clean::Item) -> bool {
-            match item.inner {
-                clean::MethodItem(ref method) => method.self_ == SelfTy::SelfStatic,
-                clean::TyMethodItem(ref method) => method.self_ == SelfTy::SelfStatic,
-                _ => false
-            }
+            },
+            _ => Ok(()),
         }
     }
 
-    try!(write!(w, "<div class='impl-items'>"));
+    write!(w, "<div class='impl-items'>")?;
     for trait_item in &i.impl_.items {
-        try!(doctraititem(w, cx, trait_item, link, render_header, outer_version));
+        doctraititem(w, cx, trait_item, link, render_header, outer_version)?;
     }
 
     fn render_default_items(w: &mut fmt::Formatter,
                             cx: &Context,
-                            did: DefId,
                             t: &clean::Trait,
-                              i: &clean::Impl,
-                              render_static: bool,
-                              outer_version: Option<&str>) -> fmt::Result {
+                            i: &clean::Impl,
+                            render_static: bool,
+                            outer_version: Option<&str>) -> fmt::Result {
         for trait_item in &t.items {
             let n = trait_item.name.clone();
-            match i.items.iter().find(|m| { m.name == n }) {
-                Some(..) => continue,
-                None => {}
+            if i.items.iter().find(|m| m.name == n).is_some() {
+                continue;
             }
+            let did = i.trait_.as_ref().unwrap().def_id().unwrap();
+            let assoc_link = AssocItemLink::GotoSource(did, &i.provided_trait_methods);
 
-            try!(doctraititem(w, cx, trait_item, AssocItemLink::GotoSource(did), render_static,
-                              outer_version));
+            doctraititem(w, cx, trait_item, assoc_link, render_static,
+                         outer_version)?;
         }
         Ok(())
     }
 
     // If we've implemented a trait, then also emit documentation for all
-    // default methods which weren't overridden in the implementation block.
-    // FIXME: this also needs to be done for associated types, whenever defaults
-    // for them work.
-    if let Some(clean::ResolvedPath { did, .. }) = i.impl_.trait_ {
+    // default items which weren't overridden in the implementation block.
+    if let Some(did) = i.trait_did() {
         if let Some(t) = cache().traits.get(&did) {
-            try!(render_default_items(w, cx, did, t, &i.impl_, render_header, outer_version));
-
+            render_default_items(w, cx, t, &i.impl_, render_header, outer_version)?;
         }
     }
-    try!(write!(w, "</div>"));
+    write!(w, "</div>")?;
     Ok(())
 }
 
 fn item_typedef(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
                 t: &clean::Typedef) -> fmt::Result {
-    try!(write!(w, "<pre class='rust typedef'>type {}{}{where_clause} = {type_};</pre>",
-                  it.name.as_ref().unwrap(),
-                  t.generics,
-                  where_clause = WhereClause(&t.generics),
-                  type_ = t.type_));
+    write!(w, "<pre class='rust typedef'>type {}{}{where_clause} = {type_};</pre>",
+           it.name.as_ref().unwrap(),
+           t.generics,
+           where_clause = WhereClause(&t.generics),
+           type_ = t.type_)?;
 
     document(w, cx, it)
 }
@@ -2664,34 +2609,34 @@ impl<'a> fmt::Display for Sidebar<'a> {
         // as much HTML as possible in order to allow non-JS-enabled browsers
         // to navigate the documentation (though slightly inefficiently).
 
-        try!(write!(fmt, "<p class='location'>"));
+        write!(fmt, "<p class='location'>")?;
         for (i, name) in cx.current.iter().take(parentlen).enumerate() {
             if i > 0 {
-                try!(write!(fmt, "::<wbr>"));
+                write!(fmt, "::<wbr>")?;
             }
-            try!(write!(fmt, "<a href='{}index.html'>{}</a>",
-                          &cx.root_path[..(cx.current.len() - i - 1) * 3],
-                          *name));
+            write!(fmt, "<a href='{}index.html'>{}</a>",
+                   &cx.root_path[..(cx.current.len() - i - 1) * 3],
+                   *name)?;
         }
-        try!(write!(fmt, "</p>"));
+        write!(fmt, "</p>")?;
 
         // sidebar refers to the enclosing module, not this module
-        let relpath = if shortty(it) == ItemType::Module { "../" } else { "" };
-        try!(write!(fmt,
-                    "<script>window.sidebarCurrent = {{\
-                        name: '{name}', \
-                        ty: '{ty}', \
-                        relpath: '{path}'\
-                     }};</script>",
-                    name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""),
-                    ty = shortty(it).to_static_str(),
-                    path = relpath));
+        let relpath = if it.is_mod() { "../" } else { "" };
+        write!(fmt,
+               "<script>window.sidebarCurrent = {{\
+                   name: '{name}', \
+                   ty: '{ty}', \
+                   relpath: '{path}'\
+                }};</script>",
+               name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""),
+               ty = shortty(it).to_static_str(),
+               path = relpath)?;
         if parentlen == 0 {
             // there is no sidebar-items.js beyond the crate root path
             // FIXME maybe dynamic crate loading can be merged here
         } else {
-            try!(write!(fmt, "<script defer src=\"{path}sidebar-items.js\"></script>",
-                        path = relpath));
+            write!(fmt, "<script defer src=\"{path}sidebar-items.js\"></script>",
+                   path = relpath)?;
         }
 
         Ok(())
@@ -2708,38 +2653,36 @@ impl<'a> fmt::Display for Source<'a> {
             cols += 1;
             tmp /= 10;
         }
-        try!(write!(fmt, "<pre class=\"line-numbers\">"));
+        write!(fmt, "<pre class=\"line-numbers\">")?;
         for i in 1..lines + 1 {
-            try!(write!(fmt, "<span id=\"{0}\">{0:1$}</span>\n", i, cols));
+            write!(fmt, "<span id=\"{0}\">{0:1$}</span>\n", i, cols)?;
         }
-        try!(write!(fmt, "</pre>"));
-        try!(write!(fmt, "{}", highlight::highlight(s, None, None)));
+        write!(fmt, "</pre>")?;
+        write!(fmt, "{}", highlight::render_with_highlighting(s, None, None))?;
         Ok(())
     }
 }
 
 fn item_macro(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
               t: &clean::Macro) -> fmt::Result {
-    try!(w.write_str(&highlight::highlight(&t.source,
-                                          Some("macro"),
-                                          None)));
-    try!(render_stability_since_raw(w, it.stable_since(), None));
+    w.write_str(&highlight::render_with_highlighting(&t.source,
+                                                     Some("macro"),
+                                                     None))?;
+    render_stability_since_raw(w, it.stable_since(), None)?;
     document(w, cx, it)
 }
 
 fn item_primitive(w: &mut fmt::Formatter, cx: &Context,
                   it: &clean::Item,
                   _p: &clean::PrimitiveType) -> fmt::Result {
-    try!(document(w, cx, it));
+    document(w, cx, it)?;
     render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)
 }
 
-fn get_basic_keywords() -> &'static str {
-    "rust, rustlang, rust-lang"
-}
+const BASIC_KEYWORDS: &'static str = "rust, rustlang, rust-lang";
 
 fn make_item_keywords(it: &clean::Item) -> String {
-    format!("{}, {}", get_basic_keywords(), it.name.as_ref().unwrap())
+    format!("{}, {}", BASIC_KEYWORDS, it.name.as_ref().unwrap())
 }
 
 fn get_index_search_type(item: &clean::Item,
@@ -2808,6 +2751,6 @@ fn test_unique_id() {
         assert_eq!(&actual[..], expected);
     };
     test();
-    reset_ids();
+    reset_ids(true);
     test();
 }
index f3efbcb1db3080f107e3fe7ca3250a810bf71b14..1d1e78926f1208c7eec107e24e651c7da5fb12a2 100644 (file)
             $(".search-input").on("keyup input",function() {
                 clearTimeout(searchTimeout);
                 if ($(this).val().length === 0) {
-                    window.history.replaceState("", "std - Rust", "?search=");
+                    if (browserSupportsHistoryApi()) {
+                        history.replaceState("", "std - Rust", "?search=");
+                    }
                     $('#main.content').removeClass('hidden');
                     $('#search.content').addClass('hidden');
                 } else {
         var prev_id = 0;
 
         function set_fragment(name) {
-            if (history.replaceState) {
+            if (browserSupportsHistoryApi()) {
                 history.replaceState(null, null, '#' + name);
                 $(window).trigger('hashchange');
             } else {
index 1cadc4e476ac4512d18004cdb42a6caaa569cb82..c07871a4029cae1840eb3e8bf2133b15e6d4b66d 100644 (file)
@@ -616,15 +616,19 @@ a.test-arrow {
     }
 
     .sidebar .location {
-        float: left;
+        float: right;
         margin: 0px;
-        padding: 5px;
-        width: 60%;
+        padding: 3px 10px 1px 10px;
+        min-height: 39px;
         background: inherit;
         text-align: left;
         font-size: 24px;
     }
 
+    .sidebar .location:empty {
+        padding: 0;
+    }
+
     .sidebar img {
         width: 35px;
         margin-top: 5px;
index 53be8b5bc06c8e2cd440ce0c90ea397c41f127d8..305e6258baa41c31ee7f7910682c04569b782900 100644 (file)
@@ -183,15 +183,15 @@ impl fmt::Debug for Toc {
 
 impl fmt::Display for Toc {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(fmt, "<ul>"));
+        write!(fmt, "<ul>")?;
         for entry in &self.entries {
             // recursively format this table of contents (the
             // `{children}` is the key).
-            try!(write!(fmt,
-                        "\n<li><a href=\"#{id}\">{num} {name}</a>{children}</li>",
-                        id = entry.id,
-                        num = entry.sec_number, name = entry.name,
-                        children = entry.children))
+            write!(fmt,
+                   "\n<li><a href=\"#{id}\">{num} {name}</a>{children}</li>",
+                   id = entry.id,
+                   num = entry.sec_number, name = entry.name,
+                   children = entry.children)?
         }
         write!(fmt, "</ul>")
     }
index ffb15d157b0666e10bb3948f41344c789af8b95d..bc7c7c5e0caf06d80ce8e6d6bd8e175b5cb6bdb0 100644 (file)
 
 #![feature(box_patterns)]
 #![feature(box_syntax)]
-#![feature(dynamic_lib)]
 #![feature(libc)]
-#![feature(recover)]
 #![feature(rustc_private)]
 #![feature(set_stdio)]
 #![feature(slice_patterns)]
 #![feature(staged_api)]
-#![feature(std_panic)]
 #![feature(test)]
 #![feature(unicode)]
+#![feature(question_mark)]
 
 extern crate arena;
 extern crate getopts;
 extern crate libc;
 extern crate rustc;
+extern crate rustc_const_eval;
 extern crate rustc_trans;
 extern crate rustc_driver;
 extern crate rustc_resolve;
 extern crate rustc_lint;
 extern crate rustc_back;
-extern crate rustc_front;
 extern crate rustc_metadata;
 extern crate serialize;
-extern crate syntax;
+#[macro_use] extern crate syntax;
 extern crate test as testing;
 extern crate rustc_unicode;
 #[macro_use] extern crate log;
@@ -54,21 +52,15 @@ use std::cell::RefCell;
 use std::collections::HashMap;
 use std::default::Default;
 use std::env;
-use std::fs::File;
-use std::io::{self, Read, Write};
+use std::io::Read;
 use std::path::PathBuf;
 use std::process;
 use std::rc::Rc;
 use std::sync::mpsc::channel;
 
 use externalfiles::ExternalHtml;
-use serialize::Decodable;
-use serialize::json::{self, Json};
 use rustc::session::search_paths::SearchPaths;
-use rustc::session::config::ErrorOutputType;
-
-// reexported from `clean` so it can be easily updated with the mod itself
-pub use clean::SCHEMA_VERSION;
+use rustc::session::config::{ErrorOutputType, RustcOptGroup, nightly_options};
 
 #[macro_use]
 pub mod externalfiles;
@@ -94,6 +86,8 @@ pub mod visit_ast;
 pub mod test;
 mod flock;
 
+use clean::Attributes;
+
 type Pass = (&'static str,                                      // name
              fn(clean::Crate) -> plugins::PluginResult,         // fn
              &'static str);                                     // description
@@ -106,7 +100,10 @@ const PASSES: &'static [Pass] = &[
     ("collapse-docs", passes::collapse_docs,
      "concatenates all document attributes into one document attribute"),
     ("strip-private", passes::strip_private,
-     "strips all private items from a crate which cannot be seen externally"),
+     "strips all private items from a crate which cannot be seen externally, \
+      implies strip-priv-imports"),
+    ("strip-priv-imports", passes::strip_priv_imports,
+     "strips all private import statements (`use`, `extern crate`) from a crate"),
 ];
 
 const DEFAULT_PASSES: &'static [&'static str] = &[
@@ -122,7 +119,6 @@ thread_local!(pub static ANALYSISKEY: Rc<RefCell<Option<core::CrateAnalysis>>> =
 
 struct Output {
     krate: clean::Crate,
-    json_plugins: Vec<plugins::PluginJson>,
     passes: Vec<String>,
 }
 
@@ -135,68 +131,87 @@ pub fn main() {
     process::exit(res as i32);
 }
 
-pub fn opts() -> Vec<getopts::OptGroup> {
+fn stable(g: getopts::OptGroup) -> RustcOptGroup { RustcOptGroup::stable(g) }
+fn unstable(g: getopts::OptGroup) -> RustcOptGroup { RustcOptGroup::unstable(g) }
+
+pub fn opts() -> Vec<RustcOptGroup> {
     use getopts::*;
     vec!(
-        optflag("h", "help", "show this help message"),
-        optflag("V", "version", "print rustdoc's version"),
-        optflag("v", "verbose", "use verbose output"),
-        optopt("r", "input-format", "the input type of the specified file",
-               "[rust|json]"),
-        optopt("w", "output-format", "the output type to write",
-               "[html|json]"),
-        optopt("o", "output", "where to place the output", "PATH"),
-        optopt("", "crate-name", "specify the name of this crate", "NAME"),
-        optmulti("L", "library-path", "directory to add to crate search path",
-                 "DIR"),
-        optmulti("", "cfg", "pass a --cfg to rustc", ""),
-        optmulti("", "extern", "pass an --extern to rustc", "NAME=PATH"),
-        optmulti("", "plugin-path", "directory to load plugins from", "DIR"),
-        optmulti("", "passes", "list of passes to also run, you might want \
-                                to pass it multiple times; a value of `list` \
-                                will print available passes",
-                 "PASSES"),
-        optmulti("", "plugins", "space separated list of plugins to also load",
-                 "PLUGINS"),
-        optflag("", "no-defaults", "don't run the default passes"),
-        optflag("", "test", "run code examples as tests"),
-        optmulti("", "test-args", "arguments to pass to the test runner",
-                 "ARGS"),
-        optopt("", "target", "target triple to document", "TRIPLE"),
-        optmulti("", "markdown-css", "CSS files to include via <link> in a rendered Markdown file",
-                 "FILES"),
-        optmulti("", "html-in-header",
-                 "files to include inline in the <head> section of a rendered Markdown file \
-                 or generated documentation",
-                 "FILES"),
-        optmulti("", "html-before-content",
-                 "files to include inline between <body> and the content of a rendered \
-                 Markdown file or generated documentation",
-                 "FILES"),
-        optmulti("", "html-after-content",
-                 "files to include inline between the content and </body> of a rendered \
-                 Markdown file or generated documentation",
-                 "FILES"),
-        optopt("", "markdown-playground-url",
-               "URL to send code snippets to", "URL"),
-        optflag("", "markdown-no-toc", "don't include table of contents")
+        stable(optflag("h", "help", "show this help message")),
+        stable(optflag("V", "version", "print rustdoc's version")),
+        stable(optflag("v", "verbose", "use verbose output")),
+        stable(optopt("r", "input-format", "the input type of the specified file",
+                      "[rust]")),
+        stable(optopt("w", "output-format", "the output type to write",
+                      "[html]")),
+        stable(optopt("o", "output", "where to place the output", "PATH")),
+        stable(optopt("", "crate-name", "specify the name of this crate", "NAME")),
+        stable(optmulti("L", "library-path", "directory to add to crate search path",
+                        "DIR")),
+        stable(optmulti("", "cfg", "pass a --cfg to rustc", "")),
+        stable(optmulti("", "extern", "pass an --extern to rustc", "NAME=PATH")),
+        stable(optmulti("", "plugin-path", "directory to load plugins from", "DIR")),
+        stable(optmulti("", "passes",
+                        "list of passes to also run, you might want \
+                         to pass it multiple times; a value of `list` \
+                         will print available passes",
+                        "PASSES")),
+        stable(optmulti("", "plugins", "space separated list of plugins to also load",
+                        "PLUGINS")),
+        stable(optflag("", "no-defaults", "don't run the default passes")),
+        stable(optflag("", "test", "run code examples as tests")),
+        stable(optmulti("", "test-args", "arguments to pass to the test runner",
+                        "ARGS")),
+        stable(optopt("", "target", "target triple to document", "TRIPLE")),
+        stable(optmulti("", "markdown-css",
+                        "CSS files to include via <link> in a rendered Markdown file",
+                        "FILES")),
+        stable(optmulti("", "html-in-header",
+                        "files to include inline in the <head> section of a rendered Markdown file \
+                         or generated documentation",
+                        "FILES")),
+        stable(optmulti("", "html-before-content",
+                        "files to include inline between <body> and the content of a rendered \
+                         Markdown file or generated documentation",
+                        "FILES")),
+        stable(optmulti("", "html-after-content",
+                        "files to include inline between the content and </body> of a rendered \
+                         Markdown file or generated documentation",
+                        "FILES")),
+        stable(optopt("", "markdown-playground-url",
+                      "URL to send code snippets to", "URL")),
+        stable(optflag("", "markdown-no-toc", "don't include table of contents")),
+        unstable(optopt("e", "extend-css",
+                        "to redefine some css rules with a given file to generate doc with your \
+                         own theme", "PATH")),
+        unstable(optmulti("Z", "",
+                          "internal and debugging options (only on nightly build)", "FLAG")),
     )
 }
 
 pub fn usage(argv0: &str) {
     println!("{}",
              getopts::usage(&format!("{} [options] <input>", argv0),
-                            &opts()));
+                            &opts().into_iter()
+                                   .map(|x| x.opt_group)
+                                   .collect::<Vec<getopts::OptGroup>>()));
 }
 
 pub fn main_args(args: &[String]) -> isize {
-    let matches = match getopts::getopts(&args[1..], &opts()) {
+    let all_groups: Vec<getopts::OptGroup> = opts()
+                                             .into_iter()
+                                             .map(|x| x.opt_group)
+                                             .collect();
+    let matches = match getopts::getopts(&args[1..], &all_groups) {
         Ok(m) => m,
         Err(err) => {
             println!("{}", err);
             return 1;
         }
     };
+    // Check for unstable options.
+    nightly_options::check_nightly_options(&matches, &opts());
+
     if matches.opt_present("h") || matches.opt_present("help") {
         usage(&args[0]);
         return 0;
@@ -248,8 +263,16 @@ pub fn main_args(args: &[String]) -> isize {
     let markdown_input = input.ends_with(".md") || input.ends_with(".markdown");
 
     let output = matches.opt_str("o").map(|s| PathBuf::from(&s));
+    let css_file_extension = matches.opt_str("e").map(|s| PathBuf::from(&s));
     let cfgs = matches.opt_strs("cfg");
 
+    if let Some(ref p) = css_file_extension {
+        if !p.is_file() {
+            println!("{}", "--extend-css option must take a css file as input");
+            return 1;
+        }
+    }
+
     let external_html = match ExternalHtml::load(
             &matches.opt_strs("html-in-header"),
             &matches.opt_strs("html-before-content"),
@@ -279,23 +302,15 @@ pub fn main_args(args: &[String]) -> isize {
             return 1;
         }
     };
-    let Output { krate, json_plugins, passes, } = out;
+    let Output { krate, passes, } = out;
     info!("going to format");
     match matches.opt_str("w").as_ref().map(|s| &**s) {
         Some("html") | None => {
-            match html::render::run(krate, &external_html,
-                                    output.unwrap_or(PathBuf::from("doc")),
-                                    passes.into_iter().collect()) {
-                Ok(()) => {}
-                Err(e) => panic!("failed to generate documentation: {}", e),
-            }
-        }
-        Some("json") => {
-            match json_output(krate, json_plugins,
-                              output.unwrap_or(PathBuf::from("doc.json"))) {
-                Ok(()) => {}
-                Err(e) => panic!("failed to write json: {}", e),
-            }
+            html::render::run(krate, &external_html,
+                              output.unwrap_or(PathBuf::from("doc")),
+                              passes.into_iter().collect(),
+                              css_file_extension)
+                .expect("failed to generate documentation")
         }
         Some(s) => {
             println!("unknown output format: {}", s);
@@ -313,14 +328,9 @@ fn acquire_input(input: &str,
                  matches: &getopts::Matches) -> Result<Output, String> {
     match matches.opt_str("r").as_ref().map(|s| &**s) {
         Some("rust") => Ok(rust_input(input, externs, matches)),
-        Some("json") => json_input(input),
         Some(s) => Err(format!("unknown input format: {}", s)),
         None => {
-            if input.ends_with(".json") {
-                json_input(input)
-            } else {
-                Ok(rust_input(input, externs, matches))
-            }
+            Ok(rust_input(input, externs, matches))
         }
     }
 }
@@ -332,18 +342,10 @@ fn parse_externs(matches: &getopts::Matches) -> Result<core::Externs, String> {
     let mut externs = HashMap::new();
     for arg in &matches.opt_strs("extern") {
         let mut parts = arg.splitn(2, '=');
-        let name = match parts.next() {
-            Some(s) => s,
-            None => {
-                return Err("--extern value must not be empty".to_string());
-            }
-        };
-        let location = match parts.next() {
-            Some(s) => s,
-            None => {
-                return Err("--extern value must be of the format `foo=bar`".to_string());
-            }
-        };
+        let name = parts.next().ok_or("--extern value must not be empty".to_string())?;
+        let location = parts.next()
+                                 .ok_or("--extern value must be of the format `foo=bar`"
+                                    .to_string())?;
         let name = name.to_string();
         externs.entry(name).or_insert(vec![]).push(location.to_string());
     }
@@ -391,32 +393,25 @@ fn rust_input(cratefile: &str, externs: core::Externs, matches: &getopts::Matche
 
     // Process all of the crate attributes, extracting plugin metadata along
     // with the passes which we are supposed to run.
-    match krate.module.as_ref().unwrap().doc_list() {
-        Some(nested) => {
-            for inner in nested {
-                match *inner {
-                    clean::Word(ref x)
-                            if "no_default_passes" == *x => {
-                        default_passes = false;
-                    }
-                    clean::NameValue(ref x, ref value)
-                            if "passes" == *x => {
-                        for pass in value.split_whitespace() {
-                            passes.push(pass.to_string());
-                        }
-                    }
-                    clean::NameValue(ref x, ref value)
-                            if "plugins" == *x => {
-                        for p in value.split_whitespace() {
-                            plugins.push(p.to_string());
-                        }
-                    }
-                    _ => {}
+    for attr in krate.module.as_ref().unwrap().attrs.list("doc") {
+        match *attr {
+            clean::Word(ref w) if "no_default_passes" == *w => {
+                default_passes = false;
+            },
+            clean::NameValue(ref name, ref value) => {
+                let sink = match &name[..] {
+                    "passes" => &mut passes,
+                    "plugins" => &mut plugins,
+                    _ => continue,
+                };
+                for p in value.split_whitespace() {
+                    sink.push(p.to_string());
                 }
             }
+            _ => (),
         }
-        None => {}
     }
+
     if default_passes {
         for name in DEFAULT_PASSES.iter().rev() {
             passes.insert(0, name.to_string());
@@ -447,85 +442,6 @@ fn rust_input(cratefile: &str, externs: core::Externs, matches: &getopts::Matche
 
     // Run everything!
     info!("Executing passes/plugins");
-    let (krate, json) = pm.run_plugins(krate);
-    return Output { krate: krate, json_plugins: json, passes: passes, };
-}
-
-/// This input format purely deserializes the json output file. No passes are
-/// run over the deserialized output.
-fn json_input(input: &str) -> Result<Output, String> {
-    let mut bytes = Vec::new();
-    match File::open(input).and_then(|mut f| f.read_to_end(&mut bytes)) {
-        Ok(_) => {}
-        Err(e) => return Err(format!("couldn't open {}: {}", input, e)),
-    };
-    match json::from_reader(&mut &bytes[..]) {
-        Err(s) => Err(format!("{:?}", s)),
-        Ok(Json::Object(obj)) => {
-            let mut obj = obj;
-            // Make sure the schema is what we expect
-            match obj.remove(&"schema".to_string()) {
-                Some(Json::String(version)) => {
-                    if version != SCHEMA_VERSION {
-                        return Err(format!(
-                                "sorry, but I only understand version {}",
-                                SCHEMA_VERSION))
-                    }
-                }
-                Some(..) => return Err("malformed json".to_string()),
-                None => return Err("expected a schema version".to_string()),
-            }
-            let krate = match obj.remove(&"crate".to_string()) {
-                Some(json) => {
-                    let mut d = json::Decoder::new(json);
-                    Decodable::decode(&mut d).unwrap()
-                }
-                None => return Err("malformed json".to_string()),
-            };
-            // FIXME: this should read from the "plugins" field, but currently
-            //      Json doesn't implement decodable...
-            let plugin_output = Vec::new();
-            Ok(Output { krate: krate, json_plugins: plugin_output, passes: Vec::new(), })
-        }
-        Ok(..) => {
-            Err("malformed json input: expected an object at the \
-                 top".to_string())
-        }
-    }
-}
-
-/// Outputs the crate/plugin json as a giant json blob at the specified
-/// destination.
-fn json_output(krate: clean::Crate, res: Vec<plugins::PluginJson> ,
-               dst: PathBuf) -> io::Result<()> {
-    // {
-    //   "schema": version,
-    //   "crate": { parsed crate ... },
-    //   "plugins": { output of plugins ... }
-    // }
-    let mut json = std::collections::BTreeMap::new();
-    json.insert("schema".to_string(), Json::String(SCHEMA_VERSION.to_string()));
-    let plugins_json = res.into_iter()
-                          .filter_map(|opt| {
-                              match opt {
-                                  None => None,
-                                  Some((string, json)) => {
-                                      Some((string.to_string(), json))
-                                  }
-                              }
-                          }).collect();
-
-    // FIXME #8335: yuck, Rust -> str -> JSON round trip! No way to .encode
-    // straight to the Rust JSON representation.
-    let crate_json_str = format!("{}", json::as_json(&krate));
-    let crate_json = match json::from_str(&crate_json_str) {
-        Ok(j) => j,
-        Err(e) => panic!("Rust generated JSON is invalid: {:?}", e)
-    };
-
-    json.insert("crate".to_string(), crate_json);
-    json.insert("plugins".to_string(), Json::Object(plugins_json));
-
-    let mut file = try!(File::create(&dst));
-    write!(&mut file, "{}", Json::Object(json))
+    let krate = pm.run_plugins(krate);
+    Output { krate: krate, passes: passes }
 }
index 03d2c1a1b4d0d3ea2189e37ec9858faf7b68bf7b..d21726dd40f08195ec366b989025a38c5b47b331 100644 (file)
@@ -83,7 +83,7 @@ pub fn render(input: &str, mut output: PathBuf, matches: &getopts::Matches,
     }
     let title = metadata[0];
 
-    reset_ids();
+    reset_ids(false);
 
     let rendered = if include_toc {
         format!("{}", MarkdownWithToc(text))
index 957957eaec6e5a1fda9a764555ac984c0479adf4..adc39b69986d62fe230e4332b17ee31fed62cf3f 100644 (file)
@@ -8,19 +8,20 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use rustc::middle::def_id::DefId;
+use rustc::hir::def_id::DefId;
 use rustc::middle::privacy::AccessLevels;
 use rustc::util::nodemap::DefIdSet;
 use std::cmp;
 use std::string::String;
 use std::usize;
-use rustc_front::hir;
+use rustc::hir;
 
-use clean;
+use clean::{self, Attributes, GetDefId};
 use clean::Item;
 use plugins;
 use fold;
 use fold::DocFolder;
+use fold::FoldItem::Strip;
 
 /// Strip items marked `#[doc(hidden)]`
 pub fn strip_hidden(krate: clean::Crate) -> plugins::PluginResult {
@@ -33,24 +34,18 @@ pub fn strip_hidden(krate: clean::Crate) -> plugins::PluginResult {
         }
         impl<'a> fold::DocFolder for Stripper<'a> {
             fn fold_item(&mut self, i: Item) -> Option<Item> {
-                if i.is_hidden_from_doc() {
+                if i.attrs.list("doc").has_word("hidden") {
                     debug!("found one in strip_hidden; removing");
                     self.stripped.insert(i.def_id);
 
                     // use a dedicated hidden item for given item type if any
                     match i.inner {
-                        clean::StructFieldItem(..) => {
-                            return Some(clean::Item {
-                                inner: clean::StructFieldItem(clean::HiddenStructField),
-                                ..i
-                            });
-                        }
-                        _ => {
-                            return None;
+                        clean::StructFieldItem(..) | clean::ModuleItem(..) => {
+                            return Strip(i).fold()
                         }
+                        _ => return None,
                     }
                 }
-
                 self.fold_item_recur(i)
             }
         }
@@ -59,7 +54,7 @@ pub fn strip_hidden(krate: clean::Crate) -> plugins::PluginResult {
     };
 
     // strip any traits implemented on stripped items
-    let krate = {
+    {
         struct ImplStripper<'a> {
             stripped: &'a mut DefIdSet
         }
@@ -74,7 +69,7 @@ pub fn strip_hidden(krate: clean::Crate) -> plugins::PluginResult {
                         return None;
                     }
                     // Impls of stripped traits also don't need to exist
-                    if let Some(clean::ResolvedPath { did, .. }) = *trait_ {
+                    if let Some(did) = trait_.def_id() {
                         if self.stripped.contains(&did) {
                             return None;
                         }
@@ -85,9 +80,7 @@ pub fn strip_hidden(krate: clean::Crate) -> plugins::PluginResult {
         }
         let mut stripper = ImplStripper{ stripped: &mut stripped };
         stripper.fold_crate(krate)
-    };
-
-    (krate, None)
+    }
 }
 
 /// Strip private items from the point of view of a crate or externally from a
@@ -106,15 +99,14 @@ pub fn strip_private(mut krate: clean::Crate) -> plugins::PluginResult {
             retained: &mut retained,
             access_levels: &access_levels,
         };
-        krate = stripper.fold_crate(krate);
+        krate = ImportStripper.fold_crate(stripper.fold_crate(krate));
     }
 
     // strip all private implementations of traits
     {
         let mut stripper = ImplStripper(&retained);
-        krate = stripper.fold_crate(krate);
+        stripper.fold_crate(krate)
     }
-    (krate, None)
 }
 
 struct Stripper<'a> {
@@ -125,12 +117,14 @@ struct Stripper<'a> {
 impl<'a> fold::DocFolder for Stripper<'a> {
     fn fold_item(&mut self, i: Item) -> Option<Item> {
         match i.inner {
+            clean::StrippedItem(..) => return Some(i),
             // These items can all get re-exported
             clean::TypedefItem(..) | clean::StaticItem(..) |
             clean::StructItem(..) | clean::EnumItem(..) |
             clean::TraitItem(..) | clean::FunctionItem(..) |
             clean::VariantItem(..) | clean::MethodItem(..) |
-            clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) => {
+            clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) |
+            clean::ConstantItem(..) => {
                 if i.def_id.is_local() {
                     if !self.access_levels.is_exported(i.def_id) {
                         return None;
@@ -138,30 +132,18 @@ impl<'a> fold::DocFolder for Stripper<'a> {
                 }
             }
 
-            clean::ConstantItem(..) => {
-                if i.def_id.is_local() && !self.access_levels.is_exported(i.def_id) {
-                    return None;
-                }
-            }
-
-            clean::ExternCrateItem(..) | clean::ImportItem(_) => {
+            clean::StructFieldItem(..) => {
                 if i.visibility != Some(hir::Public) {
-                    return None
+                    return Strip(i).fold();
                 }
             }
 
-            clean::StructFieldItem(..) => {
-                if i.visibility != Some(hir::Public) {
-                    return Some(clean::Item {
-                        inner: clean::StructFieldItem(clean::HiddenStructField),
-                        ..i
-                    })
+            clean::ModuleItem(..) => {
+                if i.def_id.is_local() && i.visibility != Some(hir::Public) {
+                    return Strip(self.fold_item_recur(i).unwrap()).fold()
                 }
             }
 
-            // handled below
-            clean::ModuleItem(..) => {}
-
             // trait impls for private items should be stripped
             clean::ImplItem(clean::Impl{
                 for_: clean::ResolvedPath{ did, .. }, ..
@@ -170,6 +152,9 @@ impl<'a> fold::DocFolder for Stripper<'a> {
                     return None;
                 }
             }
+            // handled in the `strip-priv-imports` pass
+            clean::ExternCrateItem(..) | clean::ImportItem(..) => {}
+
             clean::DefaultImplItem(..) | clean::ImplItem(..) => {}
 
             // tymethods/macros have no control over privacy
@@ -190,7 +175,6 @@ impl<'a> fold::DocFolder for Stripper<'a> {
 
             // implementations of traits are always public.
             clean::ImplItem(ref imp) if imp.trait_.is_some() => true,
-
             // Struct variant fields have inherited visibility
             clean::VariantItem(clean::Variant {
                 kind: clean::StructVariant(..)
@@ -205,22 +189,19 @@ impl<'a> fold::DocFolder for Stripper<'a> {
             self.fold_item_recur(i)
         };
 
-        match i {
-            Some(i) => {
-                match i.inner {
-                    // emptied modules/impls have no need to exist
-                    clean::ModuleItem(ref m)
-                        if m.items.is_empty() &&
-                           i.doc_value().is_none() => None,
-                    clean::ImplItem(ref i) if i.items.is_empty() => None,
-                    _ => {
-                        self.retained.insert(i.def_id);
-                        Some(i)
-                    }
+        i.and_then(|i| {
+            match i.inner {
+                // emptied modules/impls have no need to exist
+                clean::ModuleItem(ref m)
+                    if m.items.is_empty() &&
+                       i.doc_value().is_none() => None,
+                clean::ImplItem(ref i) if i.items.is_empty() => None,
+                _ => {
+                    self.retained.insert(i.def_id);
+                    Some(i)
                 }
             }
-            None => None,
-        }
+        })
     }
 }
 
@@ -229,25 +210,36 @@ struct ImplStripper<'a>(&'a DefIdSet);
 impl<'a> fold::DocFolder for ImplStripper<'a> {
     fn fold_item(&mut self, i: Item) -> Option<Item> {
         if let clean::ImplItem(ref imp) = i.inner {
-            match imp.trait_ {
-                Some(clean::ResolvedPath{ did, .. }) => {
-                    if did.is_local() && !self.0.contains(&did) {
-                        return None;
-                    }
+            if let Some(did) = imp.trait_.def_id() {
+                if did.is_local() && !self.0.contains(&did) {
+                    return None;
                 }
-                Some(..) | None => {}
             }
         }
         self.fold_item_recur(i)
     }
 }
 
+// This stripper discards all private import statements (`use`, `extern crate`)
+struct ImportStripper;
+impl fold::DocFolder for ImportStripper {
+    fn fold_item(&mut self, i: Item) -> Option<Item> {
+        match i.inner {
+            clean::ExternCrateItem(..) |
+            clean::ImportItem(..) if i.visibility != Some(hir::Public) => None,
+            _ => self.fold_item_recur(i)
+        }
+    }
+}
+
+pub fn strip_priv_imports(krate: clean::Crate)  -> plugins::PluginResult {
+    ImportStripper.fold_crate(krate)
+}
 
 pub fn unindent_comments(krate: clean::Crate) -> plugins::PluginResult {
     struct CommentCleaner;
     impl fold::DocFolder for CommentCleaner {
-        fn fold_item(&mut self, i: Item) -> Option<Item> {
-            let mut i = i;
+        fn fold_item(&mut self, mut i: Item) -> Option<Item> {
             let mut avec: Vec<clean::Attribute> = Vec::new();
             for attr in &i.attrs {
                 match attr {
@@ -265,23 +257,20 @@ pub fn unindent_comments(krate: clean::Crate) -> plugins::PluginResult {
     }
     let mut cleaner = CommentCleaner;
     let krate = cleaner.fold_crate(krate);
-    (krate, None)
+    krate
 }
 
 pub fn collapse_docs(krate: clean::Crate) -> plugins::PluginResult {
     struct Collapser;
     impl fold::DocFolder for Collapser {
-        fn fold_item(&mut self, i: Item) -> Option<Item> {
+        fn fold_item(&mut self, mut i: Item) -> Option<Item> {
             let mut docstr = String::new();
-            let mut i = i;
             for attr in &i.attrs {
-                match *attr {
-                    clean::NameValue(ref x, ref s)
-                            if "doc" == *x => {
+                if let clean::NameValue(ref x, ref s) = *attr {
+                    if "doc" == *x {
                         docstr.push_str(s);
                         docstr.push('\n');
-                    },
-                    _ => ()
+                    }
                 }
             }
             let mut a: Vec<clean::Attribute> = i.attrs.iter().filter(|&a| match a {
@@ -297,7 +286,7 @@ pub fn collapse_docs(krate: clean::Crate) -> plugins::PluginResult {
     }
     let mut collapser = Collapser;
     let krate = collapser.fold_crate(krate);
-    (krate, None)
+    krate
 }
 
 pub fn unindent(s: &str) -> String {
index 7292ea5377e25d8bb0a293030c76c448c1484dd0..b8be84825c9ccf88e29c43a2edfbee73deb650f4 100644 (file)
 
 use clean;
 
-use std::dynamic_lib as dl;
-use serialize::json;
 use std::mem;
 use std::string::String;
 use std::path::PathBuf;
 
-pub type PluginJson = Option<(String, json::Json)>;
-pub type PluginResult = (clean::Crate, PluginJson);
+use rustc_back::dynamic_lib as dl;
+
+pub type PluginResult = clean::Crate;
 pub type PluginCallback = fn (clean::Crate) -> PluginResult;
 
 /// Manages loading and running of plugins
@@ -64,15 +63,11 @@ impl PluginManager {
         self.callbacks.push(plugin);
     }
     /// Run all the loaded plugins over the crate, returning their results
-    pub fn run_plugins(&self, krate: clean::Crate) -> (clean::Crate, Vec<PluginJson> ) {
-        let mut out_json = Vec::new();
-        let mut krate = krate;
+    pub fn run_plugins(&self, mut krate: clean::Crate) -> clean::Crate {
         for &callback in &self.callbacks {
-            let (c, res) = callback(krate);
-            krate = c;
-            out_json.push(res);
+            krate = callback(krate);
         }
-        (krate, out_json)
+        krate
     }
 }
 
index 39550488a9e875c419f5fb8e93918c5589bf3588..982f477fc4ae1c0c5429551048c989711e9da4ec 100644 (file)
@@ -8,17 +8,14 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![allow(deprecated)]
-
 use std::cell::{RefCell, Cell};
-use std::collections::{HashSet, HashMap};
-use std::dynamic_lib::DynamicLibrary;
+use std::collections::HashMap;
 use std::env;
 use std::ffi::OsString;
 use std::io::prelude::*;
 use std::io;
 use std::path::PathBuf;
-use std::panic::{self, AssertRecoverSafe};
+use std::panic::{self, AssertUnwindSafe};
 use std::process::Command;
 use std::rc::Rc;
 use std::str;
@@ -27,11 +24,12 @@ use std::sync::{Arc, Mutex};
 use testing;
 use rustc_lint;
 use rustc::dep_graph::DepGraph;
-use rustc::front::map as hir_map;
+use rustc::hir::map as hir_map;
 use rustc::session::{self, config};
 use rustc::session::config::{get_unstable_features_setting, OutputType};
 use rustc::session::search_paths::{SearchPaths, PathKind};
-use rustc_front::lowering::{lower_crate, LoweringContext};
+use rustc::hir::lowering::{lower_crate, LoweringContext};
+use rustc_back::dynamic_lib::DynamicLibrary;
 use rustc_back::tempdir::TempDir;
 use rustc_driver::{driver, Compilation};
 use rustc_metadata::cstore::CStore;
@@ -91,7 +89,7 @@ pub fn run(input: &str,
 
     let mut cfg = config::build_configuration(&sess);
     cfg.extend(config::parse_cfgspecs(cfgs.clone()));
-    let krate = driver::phase_1_parse_input(&sess, cfg, &input);
+    let krate = panictry!(driver::phase_1_parse_input(&sess, cfg, &input));
     let krate = driver::phase_2_configure_and_expand(&sess, &cstore, krate,
                                                      "rustdoc-test", None)
         .expect("phase_2_configure_and_expand aborted in rustdoc!");
@@ -114,19 +112,18 @@ pub fn run(input: &str,
         external_traits: RefCell::new(None),
         external_typarams: RefCell::new(None),
         inlined: RefCell::new(None),
-        populated_crate_impls: RefCell::new(HashSet::new()),
+        all_crate_impls: RefCell::new(HashMap::new()),
         deref_trait_did: Cell::new(None),
     };
 
     let mut v = RustdocVisitor::new(&ctx, None);
     v.visit(ctx.map.krate());
     let mut krate = v.clean(&ctx);
-    match crate_name {
-        Some(name) => krate.name = name,
-        None => {}
+    if let Some(name) = crate_name {
+        krate.name = name;
     }
-    let (krate, _) = passes::collapse_docs(krate);
-    let (krate, _) = passes::unindent_comments(krate);
+    let krate = passes::collapse_docs(krate);
+    let krate = passes::unindent_comments(krate);
 
     let mut collector = Collector::new(krate.name.to_string(),
                                        cfgs,
@@ -144,7 +141,7 @@ pub fn run(input: &str,
 }
 
 // Look for #![doc(test(no_crate_inject))], used by crates in the std facade
-fn scrape_test_config(krate: &::rustc_front::hir::Crate) -> TestOptions {
+fn scrape_test_config(krate: &::rustc::hir::Crate) -> TestOptions {
     use syntax::attr::AttrMetaMethods;
     use syntax::print::pprust;
 
@@ -183,7 +180,10 @@ fn runtest(test: &str, cratename: &str, cfgs: Vec<String>, libs: SearchPaths,
     // the test harness wants its own `main` & top level functions, so
     // never wrap the test in `fn main() { ... }`
     let test = maketest(test, Some(cratename), as_test_harness, opts);
-    let input = config::Input::Str(test.to_string());
+    let input = config::Input::Str {
+        name: driver::anon_src(),
+        input: test.to_owned(),
+    };
     let mut outputs = HashMap::new();
     outputs.insert(OutputType::Exe, None);
 
@@ -256,18 +256,13 @@ fn runtest(test: &str, cratename: &str, cfgs: Vec<String>, libs: SearchPaths,
         control.after_analysis.stop = Compilation::Stop;
     }
 
-    match {
-        let b_sess = AssertRecoverSafe::new(&sess);
-        let b_cstore = AssertRecoverSafe::new(&cstore);
-        let b_cfg = AssertRecoverSafe::new(cfg.clone());
-        let b_control = AssertRecoverSafe::new(&control);
-
-        panic::recover(|| {
-            driver::compile_input(&b_sess, &b_cstore, (*b_cfg).clone(),
-                                  &input, &out,
-                                  &None, None, &b_control)
-        })
-    } {
+    let res = panic::catch_unwind(AssertUnwindSafe(|| {
+        driver::compile_input(&sess, &cstore, cfg.clone(),
+                              &input, &out,
+                              &None, None, &control)
+    }));
+
+    match res {
         Ok(r) => {
             match r {
                 Err(count) if count > 0 && compile_fail == false => {
@@ -334,13 +329,10 @@ pub fn maketest(s: &str, cratename: Option<&str>, dont_insert_main: bool,
     // Don't inject `extern crate std` because it's already injected by the
     // compiler.
     if !s.contains("extern crate") && !opts.no_crate_inject && cratename != Some("std") {
-        match cratename {
-            Some(cratename) => {
-                if s.contains(cratename) {
-                    prog.push_str(&format!("extern crate {};\n", cratename));
-                }
+        if let Some(cratename) = cratename {
+            if s.contains(cratename) {
+                prog.push_str(&format!("extern crate {};\n", cratename));
             }
-            None => {}
         }
     }
     if dont_insert_main || s.contains("fn main") {
@@ -434,7 +426,7 @@ impl Collector {
                 // compiler failures are test failures
                 should_panic: testing::ShouldPanic::No,
             },
-            testfn: testing::DynTestFn(Box::new(move|| {
+            testfn: testing::DynTestFn(box move|| {
                 runtest(&test,
                         &cratename,
                         cfgs,
@@ -445,7 +437,7 @@ impl Collector {
                         as_test_harness,
                         compile_fail,
                         &opts);
-            }))
+            })
         });
     }
 
@@ -476,12 +468,7 @@ impl DocFolder for Collector {
             _ => typename_if_impl(&item)
         };
 
-        let pushed = if let Some(name) = current_name {
-            self.names.push(name);
-            true
-        } else {
-            false
-        };
+        let pushed = current_name.map(|name| self.names.push(name)).is_some();
 
         if let Some(doc) = item.doc_value() {
             self.cnt = 0;
index ba389bc42b78cd30df66543a03e688ef923a9827..5c36c38abc5872f626cd7b04b0f9664055c9ce25 100644 (file)
@@ -20,12 +20,13 @@ use syntax::attr;
 use syntax::attr::AttrMetaMethods;
 use syntax::codemap::Span;
 
-use rustc::front::map as hir_map;
+use rustc::hir::map as hir_map;
 use rustc::middle::stability;
 
-use rustc_front::hir;
+use rustc::hir;
 
 use core;
+use clean::{Clean, Attributes};
 use doctree::*;
 
 // looks to me like the first two of these are actually
@@ -100,7 +101,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
             id: item.id,
             struct_type: struct_type,
             name: name,
-            vis: item.vis,
+            vis: item.vis.clone(),
             stab: self.stability(item.id),
             depr: self.deprecation(item.id),
             attrs: item.attrs.clone(),
@@ -124,7 +125,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                 def: v.node.data.clone(),
                 whence: v.span,
             }).collect(),
-            vis: it.vis,
+            vis: it.vis.clone(),
             stab: self.stability(it.id),
             depr: self.deprecation(it.id),
             generics: params.clone(),
@@ -143,7 +144,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
         debug!("Visiting fn");
         Function {
             id: item.id,
-            vis: item.vis,
+            vis: item.vis.clone(),
             stab: self.stability(item.id),
             depr: self.deprecation(item.id),
             attrs: item.attrs.clone(),
@@ -165,7 +166,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
         om.where_outer = span;
         om.where_inner = m.inner;
         om.attrs = attrs;
-        om.vis = vis;
+        om.vis = vis.clone();
         om.stab = self.stability(id);
         om.depr = self.deprecation(id);
         om.id = id;
@@ -182,7 +183,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                        please_inline: bool) -> Option<hir::ViewPath_> {
         match path {
             hir::ViewPathSimple(dst, base) => {
-                if self.resolve_id(id, Some(dst), false, om, please_inline) {
+                if self.maybe_inline_local(id, Some(dst), false, om, please_inline) {
                     None
                 } else {
                     Some(hir::ViewPathSimple(dst, base))
@@ -190,7 +191,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
             }
             hir::ViewPathList(p, paths) => {
                 let mine = paths.into_iter().filter(|path| {
-                    !self.resolve_id(path.node.id(), None, false, om,
+                    !self.maybe_inline_local(path.node.id(), None, false, om,
                                      please_inline)
                 }).collect::<hir::HirVec<hir::PathListItem>>();
 
@@ -201,9 +202,8 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                 }
             }
 
-            // these are feature gated anyway
             hir::ViewPathGlob(base) => {
-                if self.resolve_id(id, None, true, om, please_inline) {
+                if self.maybe_inline_local(id, None, true, om, please_inline) {
                     None
                 } else {
                     Some(hir::ViewPathGlob(base))
@@ -213,8 +213,32 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
 
     }
 
-    fn resolve_id(&mut self, id: ast::NodeId, renamed: Option<ast::Name>,
+    /// Tries to resolve the target of a `pub use` statement and inlines the
+    /// target if it is defined locally and would not be documented otherwise,
+    /// or when it is specifically requested with `please_inline`.
+    /// (the latter is the case when the import is marked `doc(inline)`)
+    ///
+    /// Cross-crate inlining occurs later on during crate cleaning
+    /// and follows different rules.
+    ///
+    /// Returns true if the target has been inlined.
+    fn maybe_inline_local(&mut self, id: ast::NodeId, renamed: Option<ast::Name>,
                   glob: bool, om: &mut Module, please_inline: bool) -> bool {
+
+        fn inherits_doc_hidden(cx: &core::DocContext, mut node: ast::NodeId) -> bool {
+            while let Some(id) = cx.map.get_enclosing_scope(node) {
+                node = id;
+                let attrs = cx.map.attrs(node).clean(cx);
+                if attrs.list("doc").has_word("hidden") {
+                    return true;
+                }
+                if node == ast::CRATE_NODE_ID {
+                    break;
+                }
+            }
+            false
+        }
+
         let tcx = match self.cx.tcx_opt() {
             Some(tcx) => tcx,
             None => return false
@@ -226,9 +250,18 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
         let analysis = match self.analysis {
             Some(analysis) => analysis, None => return false
         };
-        if !please_inline && analysis.access_levels.is_public(def) {
+
+        let use_attrs = tcx.map.attrs(id).clean(self.cx);
+
+        let is_private = !analysis.access_levels.is_public(def);
+        let is_hidden = inherits_doc_hidden(self.cx, def_node_id);
+        let is_no_inline = use_attrs.list("doc").has_word("no_inline");
+
+        // Only inline if requested or if the item would otherwise be stripped
+        if (!please_inline && !is_private && !is_hidden) || is_no_inline {
             return false
         }
+
         if !self.view_item_stack.insert(def_node_id) { return false }
 
         let ret = match tcx.map.get(def_node_id) {
@@ -263,14 +296,10 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
         let name = renamed.unwrap_or(item.name);
         match item.node {
             hir::ItemExternCrate(ref p) => {
-                let path = match *p {
-                    None => None,
-                    Some(x) => Some(x.to_string()),
-                };
                 om.extern_crates.push(ExternCrate {
                     name: name,
-                    path: path,
-                    vis: item.vis,
+                    path: p.map(|x|x.to_string()),
+                    vis: item.vis.clone(),
                     attrs: item.attrs.clone(),
                     whence: item.span,
                 })
@@ -280,10 +309,10 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                 let node = if item.vis == hir::Public {
                     let please_inline = item.attrs.iter().any(|item| {
                         match item.meta_item_list() {
-                            Some(list) => {
+                            Some(list) if &item.name()[..] == "doc" => {
                                 list.iter().any(|i| &i.name()[..] == "inline")
                             }
-                            None => false,
+                            _ => false,
                         }
                     });
                     match self.visit_view_path(node, om, item.id, please_inline) {
@@ -295,7 +324,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                 };
                 om.imports.push(Import {
                     id: item.id,
-                    vis: item.vis,
+                    vis: item.vis.clone(),
                     attrs: item.attrs.clone(),
                     node: node,
                     whence: item.span,
@@ -304,7 +333,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
             hir::ItemMod(ref m) => {
                 om.mods.push(self.visit_mod_contents(item.span,
                                                      item.attrs.clone(),
-                                                     item.vis,
+                                                     item.vis.clone(),
                                                      item.id,
                                                      m,
                                                      Some(name)));
@@ -324,7 +353,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                     id: item.id,
                     attrs: item.attrs.clone(),
                     whence: item.span,
-                    vis: item.vis,
+                    vis: item.vis.clone(),
                     stab: self.stability(item.id),
                     depr: self.deprecation(item.id),
                 };
@@ -339,7 +368,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                     name: name,
                     attrs: item.attrs.clone(),
                     whence: item.span,
-                    vis: item.vis,
+                    vis: item.vis.clone(),
                     stab: self.stability(item.id),
                     depr: self.deprecation(item.id),
                 };
@@ -353,7 +382,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                     name: name,
                     attrs: item.attrs.clone(),
                     whence: item.span,
-                    vis: item.vis,
+                    vis: item.vis.clone(),
                     stab: self.stability(item.id),
                     depr: self.deprecation(item.id),
                 };
@@ -369,7 +398,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                     id: item.id,
                     attrs: item.attrs.clone(),
                     whence: item.span,
-                    vis: item.vis,
+                    vis: item.vis.clone(),
                     stab: self.stability(item.id),
                     depr: self.deprecation(item.id),
                 };
@@ -386,7 +415,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                     attrs: item.attrs.clone(),
                     id: item.id,
                     whence: item.span,
-                    vis: item.vis,
+                    vis: item.vis.clone(),
                     stab: self.stability(item.id),
                     depr: self.deprecation(item.id),
                 };
index 804e1af19aba2816f8b42baf9f13a1822484f861..5d652ba2f55bbc72775d16613171bf40c3ffbaa1 100644 (file)
@@ -23,7 +23,7 @@ impl<
     fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
         s.emit_seq(self.len(), |s| {
             for (i, e) in self.iter().enumerate() {
-                try!(s.emit_seq_elt(i, |s| e.encode(s)));
+                s.emit_seq_elt(i, |s| e.encode(s))?;
             }
             Ok(())
         })
@@ -35,7 +35,7 @@ impl<T:Decodable> Decodable for LinkedList<T> {
         d.read_seq(|d, len| {
             let mut list = LinkedList::new();
             for i in 0..len {
-                list.push_back(try!(d.read_seq_elt(i, |d| Decodable::decode(d))));
+                list.push_back(d.read_seq_elt(i, |d| Decodable::decode(d))?);
             }
             Ok(list)
         })
@@ -46,7 +46,7 @@ impl<T: Encodable> Encodable for VecDeque<T> {
     fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
         s.emit_seq(self.len(), |s| {
             for (i, e) in self.iter().enumerate() {
-                try!(s.emit_seq_elt(i, |s| e.encode(s)));
+                s.emit_seq_elt(i, |s| e.encode(s))?;
             }
             Ok(())
         })
@@ -58,7 +58,7 @@ impl<T:Decodable> Decodable for VecDeque<T> {
         d.read_seq(|d, len| {
             let mut deque: VecDeque<T> = VecDeque::new();
             for i in 0..len {
-                deque.push_back(try!(d.read_seq_elt(i, |d| Decodable::decode(d))));
+                deque.push_back(d.read_seq_elt(i, |d| Decodable::decode(d))?);
             }
             Ok(deque)
         })
@@ -73,8 +73,8 @@ impl<
         e.emit_map(self.len(), |e| {
             let mut i = 0;
             for (key, val) in self {
-                try!(e.emit_map_elt_key(i, |e| key.encode(e)));
-                try!(e.emit_map_elt_val(i, |e| val.encode(e)));
+                e.emit_map_elt_key(i, |e| key.encode(e))?;
+                e.emit_map_elt_val(i, |e| val.encode(e))?;
                 i += 1;
             }
             Ok(())
@@ -90,8 +90,8 @@ impl<
         d.read_map(|d, len| {
             let mut map = BTreeMap::new();
             for i in 0..len {
-                let key = try!(d.read_map_elt_key(i, |d| Decodable::decode(d)));
-                let val = try!(d.read_map_elt_val(i, |d| Decodable::decode(d)));
+                let key = d.read_map_elt_key(i, |d| Decodable::decode(d))?;
+                let val = d.read_map_elt_val(i, |d| Decodable::decode(d))?;
                 map.insert(key, val);
             }
             Ok(map)
@@ -106,7 +106,7 @@ impl<
         s.emit_seq(self.len(), |s| {
             let mut i = 0;
             for e in self {
-                try!(s.emit_seq_elt(i, |s| e.encode(s)));
+                s.emit_seq_elt(i, |s| e.encode(s))?;
                 i += 1;
             }
             Ok(())
@@ -121,7 +121,7 @@ impl<
         d.read_seq(|d, len| {
             let mut set = BTreeSet::new();
             for i in 0..len {
-                set.insert(try!(d.read_seq_elt(i, |d| Decodable::decode(d))));
+                set.insert(d.read_seq_elt(i, |d| Decodable::decode(d))?);
             }
             Ok(set)
         })
@@ -144,7 +144,7 @@ impl<
     T: Decodable + CLike
 > Decodable for EnumSet<T> {
     fn decode<D: Decoder>(d: &mut D) -> Result<EnumSet<T>, D::Error> {
-        let bits = try!(d.read_uint());
+        let bits = d.read_uint()?;
         let mut set = EnumSet::new();
         for bit in 0..(mem::size_of::<usize>()*8) {
             if bits & (1 << bit) != 0 {
@@ -164,8 +164,8 @@ impl<K, V, S> Encodable for HashMap<K, V, S>
         e.emit_map(self.len(), |e| {
             let mut i = 0;
             for (key, val) in self {
-                try!(e.emit_map_elt_key(i, |e| key.encode(e)));
-                try!(e.emit_map_elt_val(i, |e| val.encode(e)));
+                e.emit_map_elt_key(i, |e| key.encode(e))?;
+                e.emit_map_elt_val(i, |e| val.encode(e))?;
                 i += 1;
             }
             Ok(())
@@ -183,8 +183,8 @@ impl<K, V, S> Decodable for HashMap<K, V, S>
             let state = Default::default();
             let mut map = HashMap::with_capacity_and_hasher(len, state);
             for i in 0..len {
-                let key = try!(d.read_map_elt_key(i, |d| Decodable::decode(d)));
-                let val = try!(d.read_map_elt_val(i, |d| Decodable::decode(d)));
+                let key = d.read_map_elt_key(i, |d| Decodable::decode(d))?;
+                let val = d.read_map_elt_val(i, |d| Decodable::decode(d))?;
                 map.insert(key, val);
             }
             Ok(map)
@@ -200,7 +200,7 @@ impl<T, S> Encodable for HashSet<T, S>
         s.emit_seq(self.len(), |s| {
             let mut i = 0;
             for e in self {
-                try!(s.emit_seq_elt(i, |s| e.encode(s)));
+                s.emit_seq_elt(i, |s| e.encode(s))?;
                 i += 1;
             }
             Ok(())
@@ -217,7 +217,7 @@ impl<T, S> Decodable for HashSet<T, S>
             let state = Default::default();
             let mut set = HashSet::with_capacity_and_hasher(len, state);
             for i in 0..len {
-                set.insert(try!(d.read_seq_elt(i, |d| Decodable::decode(d))));
+                set.insert(d.read_seq_elt(i, |d| Decodable::decode(d))?);
             }
             Ok(set)
         })
index 609ebe8546164d1872790a48c32a6bb0a8e0107c..31b71dbc80b62c2bd3ce75f5e78778d6b0567590 100644 (file)
@@ -132,7 +132,10 @@ impl FromHex for str {
                     buf >>= 4;
                     continue
                 }
-                _ => return Err(InvalidHexCharacter(self.char_at(idx), idx)),
+                _ => {
+                    let ch = self[idx..].chars().next().unwrap();
+                    return Err(InvalidHexCharacter(ch, idx))
+                }
             }
 
             modulus += 1;
index b95eddbc661dddfd3e453da8e8288e4e48896195..a7d723515664da6242ebc9a7c3c55c8b77cda021 100644 (file)
@@ -319,7 +319,7 @@ pub fn encode<T: ::Encodable>(object: &T) -> Result<string::String, EncoderError
     let mut s = String::new();
     {
         let mut encoder = Encoder::new(&mut s);
-        try!(object.encode(&mut encoder));
+        object.encode(&mut encoder)?;
     }
     Ok(s)
 }
@@ -371,7 +371,7 @@ pub type EncodeResult = Result<(), EncoderError>;
 pub type DecodeResult<T> = Result<T, DecoderError>;
 
 fn escape_str(wr: &mut fmt::Write, v: &str) -> EncodeResult {
-    try!(wr.write_str("\""));
+    wr.write_str("\"")?;
 
     let mut start = 0;
 
@@ -416,39 +416,38 @@ fn escape_str(wr: &mut fmt::Write, v: &str) -> EncodeResult {
         };
 
         if start < i {
-            try!(wr.write_str(&v[start..i]));
+            wr.write_str(&v[start..i])?;
         }
 
-        try!(wr.write_str(escaped));
+        wr.write_str(escaped)?;
 
         start = i + 1;
     }
 
     if start != v.len() {
-        try!(wr.write_str(&v[start..]));
+        wr.write_str(&v[start..])?;
     }
 
-    try!(wr.write_str("\""));
+    wr.write_str("\"")?;
     Ok(())
 }
 
 fn escape_char(writer: &mut fmt::Write, v: char) -> EncodeResult {
-    let mut buf = [0; 4];
-    let n = v.encode_utf8(&mut buf).unwrap();
-    let buf = unsafe { str::from_utf8_unchecked(&buf[..n]) };
-    escape_str(writer, buf)
+    escape_str(writer, unsafe {
+        str::from_utf8_unchecked(v.encode_utf8().as_slice())
+    })
 }
 
 fn spaces(wr: &mut fmt::Write, mut n: usize) -> EncodeResult {
     const BUF: &'static str = "                ";
 
     while n >= BUF.len() {
-        try!(wr.write_str(BUF));
+        wr.write_str(BUF)?;
         n -= BUF.len();
     }
 
     if n > 0 {
-        try!(wr.write_str(&BUF[..n]));
+        wr.write_str(&BUF[..n])?;
     }
     Ok(())
 }
@@ -492,7 +491,7 @@ impl<'a> ::Encoder for Encoder<'a> {
 
     fn emit_nil(&mut self) -> EncodeResult {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-        try!(write!(self.writer, "null"));
+        write!(self.writer, "null")?;
         Ok(())
     }
 
@@ -511,9 +510,9 @@ impl<'a> ::Encoder for Encoder<'a> {
     fn emit_bool(&mut self, v: bool) -> EncodeResult {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if v {
-            try!(write!(self.writer, "true"));
+            write!(self.writer, "true")?;
         } else {
-            try!(write!(self.writer, "false"));
+            write!(self.writer, "false")?;
         }
         Ok(())
     }
@@ -552,11 +551,11 @@ impl<'a> ::Encoder for Encoder<'a> {
             escape_str(self.writer, name)
         } else {
             if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-            try!(write!(self.writer, "{{\"variant\":"));
-            try!(escape_str(self.writer, name));
-            try!(write!(self.writer, ",\"fields\":["));
-            try!(f(self));
-            try!(write!(self.writer, "]}}"));
+            write!(self.writer, "{{\"variant\":")?;
+            escape_str(self.writer, name)?;
+            write!(self.writer, ",\"fields\":[")?;
+            f(self)?;
+            write!(self.writer, "]}}")?;
             Ok(())
         }
     }
@@ -566,7 +565,7 @@ impl<'a> ::Encoder for Encoder<'a> {
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if idx != 0 {
-            try!(write!(self.writer, ","));
+            write!(self.writer, ",")?;
         }
         f(self)
     }
@@ -596,9 +595,9 @@ impl<'a> ::Encoder for Encoder<'a> {
         F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-        try!(write!(self.writer, "{{"));
-        try!(f(self));
-        try!(write!(self.writer, "}}"));
+        write!(self.writer, "{{")?;
+        f(self)?;
+        write!(self.writer, "}}")?;
         Ok(())
     }
 
@@ -606,9 +605,9 @@ impl<'a> ::Encoder for Encoder<'a> {
         F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-        if idx != 0 { try!(write!(self.writer, ",")); }
-        try!(escape_str(self.writer, name));
-        try!(write!(self.writer, ":"));
+        if idx != 0 { write!(self.writer, ",")?; }
+        escape_str(self.writer, name)?;
+        write!(self.writer, ":")?;
         f(self)
     }
 
@@ -659,9 +658,9 @@ impl<'a> ::Encoder for Encoder<'a> {
         F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-        try!(write!(self.writer, "["));
-        try!(f(self));
-        try!(write!(self.writer, "]"));
+        write!(self.writer, "[")?;
+        f(self)?;
+        write!(self.writer, "]")?;
         Ok(())
     }
 
@@ -670,7 +669,7 @@ impl<'a> ::Encoder for Encoder<'a> {
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if idx != 0 {
-            try!(write!(self.writer, ","));
+            write!(self.writer, ",")?;
         }
         f(self)
     }
@@ -679,9 +678,9 @@ impl<'a> ::Encoder for Encoder<'a> {
         F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-        try!(write!(self.writer, "{{"));
-        try!(f(self));
-        try!(write!(self.writer, "}}"));
+        write!(self.writer, "{{")?;
+        f(self)?;
+        write!(self.writer, "}}")?;
         Ok(())
     }
 
@@ -689,9 +688,9 @@ impl<'a> ::Encoder for Encoder<'a> {
         F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-        if idx != 0 { try!(write!(self.writer, ",")) }
+        if idx != 0 { write!(self.writer, ",")? }
         self.is_emitting_map_key = true;
-        try!(f(self));
+        f(self)?;
         self.is_emitting_map_key = false;
         Ok(())
     }
@@ -700,7 +699,7 @@ impl<'a> ::Encoder for Encoder<'a> {
         F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-        try!(write!(self.writer, ":"));
+        write!(self.writer, ":")?;
         f(self)
     }
 }
@@ -740,7 +739,7 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
 
     fn emit_nil(&mut self) -> EncodeResult {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-        try!(write!(self.writer, "null"));
+        write!(self.writer, "null")?;
         Ok(())
     }
 
@@ -759,9 +758,9 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
     fn emit_bool(&mut self, v: bool) -> EncodeResult {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if v {
-            try!(write!(self.writer, "true"));
+            write!(self.writer, "true")?;
         } else {
-            try!(write!(self.writer, "false"));
+            write!(self.writer, "false")?;
         }
         Ok(())
     }
@@ -798,23 +797,23 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
             escape_str(self.writer, name)
         } else {
             if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-            try!(write!(self.writer, "{{\n"));
+            write!(self.writer, "{{\n")?;
             self.curr_indent += self.indent;
-            try!(spaces(self.writer, self.curr_indent));
-            try!(write!(self.writer, "\"variant\": "));
-            try!(escape_str(self.writer, name));
-            try!(write!(self.writer, ",\n"));
-            try!(spaces(self.writer, self.curr_indent));
-            try!(write!(self.writer, "\"fields\": [\n"));
+            spaces(self.writer, self.curr_indent)?;
+            write!(self.writer, "\"variant\": ")?;
+            escape_str(self.writer, name)?;
+            write!(self.writer, ",\n")?;
+            spaces(self.writer, self.curr_indent)?;
+            write!(self.writer, "\"fields\": [\n")?;
             self.curr_indent += self.indent;
-            try!(f(self));
+            f(self)?;
             self.curr_indent -= self.indent;
-            try!(write!(self.writer, "\n"));
-            try!(spaces(self.writer, self.curr_indent));
+            write!(self.writer, "\n")?;
+            spaces(self.writer, self.curr_indent)?;
             self.curr_indent -= self.indent;
-            try!(write!(self.writer, "]\n"));
-            try!(spaces(self.writer, self.curr_indent));
-            try!(write!(self.writer, "}}"));
+            write!(self.writer, "]\n")?;
+            spaces(self.writer, self.curr_indent)?;
+            write!(self.writer, "}}")?;
             Ok(())
         }
     }
@@ -824,9 +823,9 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if idx != 0 {
-            try!(write!(self.writer, ",\n"));
+            write!(self.writer, ",\n")?;
         }
-        try!(spaces(self.writer, self.curr_indent));
+        spaces(self.writer, self.curr_indent)?;
         f(self)
     }
 
@@ -857,15 +856,15 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if len == 0 {
-            try!(write!(self.writer, "{{}}"));
+            write!(self.writer, "{{}}")?;
         } else {
-            try!(write!(self.writer, "{{"));
+            write!(self.writer, "{{")?;
             self.curr_indent += self.indent;
-            try!(f(self));
+            f(self)?;
             self.curr_indent -= self.indent;
-            try!(write!(self.writer, "\n"));
-            try!(spaces(self.writer, self.curr_indent));
-            try!(write!(self.writer, "}}"));
+            write!(self.writer, "\n")?;
+            spaces(self.writer, self.curr_indent)?;
+            write!(self.writer, "}}")?;
         }
         Ok(())
     }
@@ -875,13 +874,13 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if idx == 0 {
-            try!(write!(self.writer, "\n"));
+            write!(self.writer, "\n")?;
         } else {
-            try!(write!(self.writer, ",\n"));
+            write!(self.writer, ",\n")?;
         }
-        try!(spaces(self.writer, self.curr_indent));
-        try!(escape_str(self.writer, name));
-        try!(write!(self.writer, ": "));
+        spaces(self.writer, self.curr_indent)?;
+        escape_str(self.writer, name)?;
+        write!(self.writer, ": ")?;
         f(self)
     }
 
@@ -933,15 +932,15 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if len == 0 {
-            try!(write!(self.writer, "[]"));
+            write!(self.writer, "[]")?;
         } else {
-            try!(write!(self.writer, "["));
+            write!(self.writer, "[")?;
             self.curr_indent += self.indent;
-            try!(f(self));
+            f(self)?;
             self.curr_indent -= self.indent;
-            try!(write!(self.writer, "\n"));
-            try!(spaces(self.writer, self.curr_indent));
-            try!(write!(self.writer, "]"));
+            write!(self.writer, "\n")?;
+            spaces(self.writer, self.curr_indent)?;
+            write!(self.writer, "]")?;
         }
         Ok(())
     }
@@ -951,11 +950,11 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if idx == 0 {
-            try!(write!(self.writer, "\n"));
+            write!(self.writer, "\n")?;
         } else {
-            try!(write!(self.writer, ",\n"));
+            write!(self.writer, ",\n")?;
         }
-        try!(spaces(self.writer, self.curr_indent));
+        spaces(self.writer, self.curr_indent)?;
         f(self)
     }
 
@@ -964,15 +963,15 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if len == 0 {
-            try!(write!(self.writer, "{{}}"));
+            write!(self.writer, "{{}}")?;
         } else {
-            try!(write!(self.writer, "{{"));
+            write!(self.writer, "{{")?;
             self.curr_indent += self.indent;
-            try!(f(self));
+            f(self)?;
             self.curr_indent -= self.indent;
-            try!(write!(self.writer, "\n"));
-            try!(spaces(self.writer, self.curr_indent));
-            try!(write!(self.writer, "}}"));
+            write!(self.writer, "\n")?;
+            spaces(self.writer, self.curr_indent)?;
+            write!(self.writer, "}}")?;
         }
         Ok(())
     }
@@ -982,13 +981,13 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
         if idx == 0 {
-            try!(write!(self.writer, "\n"));
+            write!(self.writer, "\n")?;
         } else {
-            try!(write!(self.writer, ",\n"));
+            write!(self.writer, ",\n")?;
         }
-        try!(spaces(self.writer, self.curr_indent));
+        spaces(self.writer, self.curr_indent)?;
         self.is_emitting_map_key = true;
-        try!(f(self));
+        f(self)?;
         self.is_emitting_map_key = false;
         Ok(())
     }
@@ -997,7 +996,7 @@ impl<'a> ::Encoder for PrettyEncoder<'a> {
         F: FnOnce(&mut PrettyEncoder<'a>) -> EncodeResult,
     {
         if self.is_emitting_map_key { return Err(EncoderError::BadHashmapKey); }
-        try!(write!(self.writer, ": "));
+        write!(self.writer, ": ")?;
         f(self)
     }
 }
@@ -1696,7 +1695,7 @@ impl<T: Iterator<Item=char>> Parser<T> {
                     'n' => res.push('\n'),
                     'r' => res.push('\r'),
                     't' => res.push('\t'),
-                    'u' => match try!(self.decode_hex_escape()) {
+                    'u' => match self.decode_hex_escape()? {
                         0xDC00 ... 0xDFFF => {
                             return self.error(LoneLeadingSurrogateInHexEscape)
                         }
@@ -1709,7 +1708,7 @@ impl<T: Iterator<Item=char>> Parser<T> {
                                 _ => return self.error(UnexpectedEndOfHexEscape),
                             }
 
-                            let n2 = try!(self.decode_hex_escape());
+                            let n2 = self.decode_hex_escape()?;
                             if n2 < 0xDC00 || n2 > 0xDFFF {
                                 return self.error(LoneLeadingSurrogateInHexEscape)
                             }
@@ -2175,7 +2174,7 @@ impl ::Decoder for Decoder {
     }
 
     fn read_char(&mut self) -> DecodeResult<char> {
-        let s = try!(self.read_str());
+        let s = self.read_str()?;
         {
             let mut it = s.chars();
             match (it.next(), it.next()) {
@@ -2265,7 +2264,7 @@ impl ::Decoder for Decoder {
     fn read_struct<T, F>(&mut self, _name: &str, _len: usize, f: F) -> DecodeResult<T> where
         F: FnOnce(&mut Decoder) -> DecodeResult<T>,
     {
-        let value = try!(f(self));
+        let value = f(self)?;
         self.pop();
         Ok(value)
     }
@@ -2277,7 +2276,7 @@ impl ::Decoder for Decoder {
                                -> DecodeResult<T> where
         F: FnOnce(&mut Decoder) -> DecodeResult<T>,
     {
-        let mut obj = try!(expect!(self.pop(), Object));
+        let mut obj = expect!(self.pop(), Object)?;
 
         let value = match obj.remove(&name.to_string()) {
             None => {
@@ -2291,7 +2290,7 @@ impl ::Decoder for Decoder {
             },
             Some(json) => {
                 self.stack.push(json);
-                try!(f(self))
+                f(self)?
             }
         };
         self.stack.push(Json::Object(obj));
@@ -2347,7 +2346,7 @@ impl ::Decoder for Decoder {
     fn read_seq<T, F>(&mut self, f: F) -> DecodeResult<T> where
         F: FnOnce(&mut Decoder, usize) -> DecodeResult<T>,
     {
-        let array = try!(expect!(self.pop(), Array));
+        let array = expect!(self.pop(), Array)?;
         let len = array.len();
         for v in array.into_iter().rev() {
             self.stack.push(v);
@@ -2364,7 +2363,7 @@ impl ::Decoder for Decoder {
     fn read_map<T, F>(&mut self, f: F) -> DecodeResult<T> where
         F: FnOnce(&mut Decoder, usize) -> DecodeResult<T>,
     {
-        let obj = try!(expect!(self.pop(), Object));
+        let obj = expect!(self.pop(), Object)?;
         let len = obj.len();
         for (key, value) in obj {
             self.stack.push(value);
index d683769af7b0af0f1ebb715e4e4dcb7d2e1ea770..80cd47c85ccdfd64e5b5764a12b9d4aaa11f3e04 100644 (file)
@@ -32,8 +32,8 @@ Core encoding and decoding interfaces.
 #![feature(enumset)]
 #![feature(rustc_private)]
 #![feature(staged_api)]
-#![feature(str_char)]
 #![feature(unicode)]
+#![feature(question_mark)]
 #![cfg_attr(test, feature(test))]
 
 // test harness access
index 23888bed259736cc2f9c1854417cbdfb593369a8..0fcab1347d160f1cfc69bab158cbfc9672081872 100644 (file)
@@ -410,13 +410,13 @@ impl<T: ?Sized + Encodable> Encodable for Box<T> {
 
 impl< T: Decodable> Decodable for Box<T> {
     fn decode<D: Decoder>(d: &mut D) -> Result<Box<T>, D::Error> {
-        Ok(box try!(Decodable::decode(d)))
+        Ok(box Decodable::decode(d)?)
     }
 }
 
 impl< T: Decodable> Decodable for Box<[T]> {
     fn decode<D: Decoder>(d: &mut D) -> Result<Box<[T]>, D::Error> {
-        let v: Vec<T> = try!(Decodable::decode(d));
+        let v: Vec<T> = Decodable::decode(d)?;
         Ok(v.into_boxed_slice())
     }
 }
@@ -431,7 +431,7 @@ impl<T:Encodable> Encodable for Rc<T> {
 impl<T:Decodable> Decodable for Rc<T> {
     #[inline]
     fn decode<D: Decoder>(d: &mut D) -> Result<Rc<T>, D::Error> {
-        Ok(Rc::new(try!(Decodable::decode(d))))
+        Ok(Rc::new(Decodable::decode(d)?))
     }
 }
 
@@ -439,7 +439,7 @@ impl<T:Encodable> Encodable for [T] {
     fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
         s.emit_seq(self.len(), |s| {
             for (i, e) in self.iter().enumerate() {
-                try!(s.emit_seq_elt(i, |s| e.encode(s)))
+                s.emit_seq_elt(i, |s| e.encode(s))?
             }
             Ok(())
         })
@@ -450,7 +450,7 @@ impl<T:Encodable> Encodable for Vec<T> {
     fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
         s.emit_seq(self.len(), |s| {
             for (i, e) in self.iter().enumerate() {
-                try!(s.emit_seq_elt(i, |s| e.encode(s)))
+                s.emit_seq_elt(i, |s| e.encode(s))?
             }
             Ok(())
         })
@@ -462,7 +462,7 @@ impl<T:Decodable> Decodable for Vec<T> {
         d.read_seq(|d, len| {
             let mut v = Vec::with_capacity(len);
             for i in 0..len {
-                v.push(try!(d.read_seq_elt(i, |d| Decodable::decode(d))));
+                v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
             }
             Ok(v)
         })
@@ -484,7 +484,7 @@ impl<T:Decodable> Decodable for Option<T> {
     fn decode<D: Decoder>(d: &mut D) -> Result<Option<T>, D::Error> {
         d.read_option(|d, b| {
             if b {
-                Ok(Some(try!(Decodable::decode(d))))
+                Ok(Some(Decodable::decode(d)?))
             } else {
                 Ok(None)
             }
@@ -546,7 +546,7 @@ impl Encodable for path::PathBuf {
 
 impl Decodable for path::PathBuf {
     fn decode<D: Decoder>(d: &mut D) -> Result<path::PathBuf, D::Error> {
-        let bytes: String = try!(Decodable::decode(d));
+        let bytes: String = Decodable::decode(d)?;
         Ok(path::PathBuf::from(bytes))
     }
 }
@@ -559,7 +559,7 @@ impl<T: Encodable + Copy> Encodable for Cell<T> {
 
 impl<T: Decodable + Copy> Decodable for Cell<T> {
     fn decode<D: Decoder>(d: &mut D) -> Result<Cell<T>, D::Error> {
-        Ok(Cell::new(try!(Decodable::decode(d))))
+        Ok(Cell::new(Decodable::decode(d)?))
     }
 }
 
@@ -576,7 +576,7 @@ impl<T: Encodable> Encodable for RefCell<T> {
 
 impl<T: Decodable> Decodable for RefCell<T> {
     fn decode<D: Decoder>(d: &mut D) -> Result<RefCell<T>, D::Error> {
-        Ok(RefCell::new(try!(Decodable::decode(d))))
+        Ok(RefCell::new(Decodable::decode(d)?))
     }
 }
 
@@ -588,7 +588,7 @@ impl<T:Encodable> Encodable for Arc<T> {
 
 impl<T:Decodable+Send+Sync> Decodable for Arc<T> {
     fn decode<D: Decoder>(d: &mut D) -> Result<Arc<T>, D::Error> {
-        Ok(Arc::new(try!(Decodable::decode(d))))
+        Ok(Arc::new(Decodable::decode(d)?))
     }
 }
 
@@ -607,9 +607,9 @@ impl<S:Encoder> EncoderHelpers for S {
     {
         self.emit_seq(v.len(), |this| {
             for (i, e) in v.iter().enumerate() {
-                try!(this.emit_seq_elt(i, |this| {
+                this.emit_seq_elt(i, |this| {
                     f(this, e)
-                }));
+                })?;
             }
             Ok(())
         })
@@ -629,7 +629,7 @@ impl<D: Decoder> DecoderHelpers for D {
         self.read_seq(|this, len| {
             let mut v = Vec::with_capacity(len);
             for i in 0..len {
-                v.push(try!(this.read_seq_elt(i, |this| f(this))));
+                v.push(this.read_seq_elt(i, |this| f(this))?);
             }
             Ok(v)
         })
index 8cabdc41a0583324bb700ff8f79a88d78a78c5d9..0db91034eb5ac8eed231b5d28b9c3f4f9bc06347 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Operations on ASCII strings and characters
+//! Operations on ASCII strings and characters.
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
@@ -128,8 +128,6 @@ pub trait AsciiExt {
     /// # Examples
     ///
     /// ```
-    /// #![feature(ascii)]
-    ///
     /// use std::ascii::AsciiExt;
     ///
     /// let mut ascii = 'a';
@@ -138,7 +136,7 @@ pub trait AsciiExt {
     ///
     /// assert_eq!('A', ascii);
     /// ```
-    #[unstable(feature = "ascii", issue = "27809")]
+    #[stable(feature = "ascii", since = "1.9.0")]
     fn make_ascii_uppercase(&mut self);
 
     /// Converts this type to its ASCII lower case equivalent in-place.
@@ -148,8 +146,6 @@ pub trait AsciiExt {
     /// # Examples
     ///
     /// ```
-    /// #![feature(ascii)]
-    ///
     /// use std::ascii::AsciiExt;
     ///
     /// let mut ascii = 'A';
@@ -158,7 +154,7 @@ pub trait AsciiExt {
     ///
     /// assert_eq!('a', ascii);
     /// ```
-    #[unstable(feature = "ascii", issue = "27809")]
+    #[stable(feature = "ascii", since = "1.9.0")]
     fn make_ascii_lowercase(&mut self);
 }
 
@@ -567,4 +563,10 @@ mod tests {
                     &from_u32(lower).unwrap().to_string()));
         }
     }
+
+    #[test]
+    fn inference_works() {
+        let x = "a".to_string();
+        x.eq_ignore_ascii_case("A");
+    }
 }
index 8fb49a1be4ea6add15f85bcf006551066b403938..c32bca82bd5a0da00020956eb8f9478ddc3e0cc8 100644 (file)
@@ -23,23 +23,23 @@ fn main() {
 
     let target = env::var("TARGET").unwrap();
     let host = env::var("HOST").unwrap();
-    if !target.contains("apple") && !target.contains("msvc") {
+    if !target.contains("apple") && !target.contains("msvc") && !target.contains("emscripten"){
         build_libbacktrace(&host, &target);
     }
 
-    if target.contains("unknown-linux") {
-        if target.contains("musl") {
+    if target.contains("linux") {
+        if target.contains("musl") && (target.contains("x86_64") || target.contains("i686")) {
             println!("cargo:rustc-link-lib=static=unwind");
+        } else if target.contains("android") {
+            println!("cargo:rustc-link-lib=dl");
+            println!("cargo:rustc-link-lib=log");
+            println!("cargo:rustc-link-lib=gcc");
         } else {
             println!("cargo:rustc-link-lib=dl");
             println!("cargo:rustc-link-lib=rt");
             println!("cargo:rustc-link-lib=pthread");
             println!("cargo:rustc-link-lib=gcc_s");
         }
-    } else if target.contains("android") {
-        println!("cargo:rustc-link-lib=dl");
-        println!("cargo:rustc-link-lib=log");
-        println!("cargo:rustc-link-lib=gcc");
     } else if target.contains("freebsd") {
         println!("cargo:rustc-link-lib=execinfo");
         println!("cargo:rustc-link-lib=pthread");
@@ -50,7 +50,9 @@ fn main() {
 
         if target.contains("rumprun") {
             println!("cargo:rustc-link-lib=unwind");
-        } else if target.contains("netbsd") || target.contains("openbsd") {
+        } else if target.contains("netbsd") {
+            println!("cargo:rustc-link-lib=gcc_s");
+        } else if target.contains("openbsd") {
             println!("cargo:rustc-link-lib=gcc");
         } else if target.contains("bitrig") {
             println!("cargo:rustc-link-lib=c++abi");
index 051829fbafb01933104037a36f8851a8cff3e2d0..c20270e830665d4ee0eb36d3bef5b9352f62166d 100644 (file)
@@ -9,14 +9,13 @@
 // except according to those terms.
 
 use self::Entry::*;
-use self::SearchResult::*;
 use self::VacantEntryState::*;
 
 use borrow::Borrow;
 use cmp::max;
 use fmt::{self, Debug};
 use hash::{Hash, SipHasher, BuildHasher};
-use iter::{self, Map, FromIterator};
+use iter::FromIterator;
 use mem::{self, replace};
 use ops::{Deref, Index};
 use rand::{self, Rng};
@@ -26,7 +25,6 @@ use super::table::{
     Bucket,
     EmptyBucket,
     FullBucket,
-    FullBucketImm,
     FullBucketMut,
     RawTable,
     SafeHash
@@ -342,10 +340,11 @@ pub struct HashMap<K, V, S = RandomState> {
 }
 
 /// Search for a pre-hashed key.
+#[inline]
 fn search_hashed<K, V, M, F>(table: M,
                              hash: SafeHash,
                              mut is_match: F)
-                             -> SearchResult<K, V, M> where
+                             -> InternalEntry<K, V, M> where
     M: Deref<Target=RawTable<K, V>>,
     F: FnMut(&K) -> bool,
 {
@@ -353,37 +352,50 @@ fn search_hashed<K, V, M, F>(table: M,
     // undefined behavior when Bucket::new gets the raw bucket in this
     // case, immediately return the appropriate search result.
     if table.capacity() == 0 {
-        return TableRef(table);
+        return InternalEntry::TableIsEmpty;
     }
 
-    let size = table.size();
+    let size = table.size() as isize;
     let mut probe = Bucket::new(table, hash);
-    let ib = probe.index();
+    let ib = probe.index() as isize;
 
-    while probe.index() != ib + size {
+    loop {
         let full = match probe.peek() {
-            Empty(b) => return TableRef(b.into_table()), // hit an empty bucket
-            Full(b) => b
+            Empty(bucket) => {
+                // Found a hole!
+                return InternalEntry::Vacant {
+                    hash: hash,
+                    elem: NoElem(bucket),
+                };
+            }
+            Full(bucket) => bucket
         };
 
-        if full.distance() + ib < full.index() {
+        let robin_ib = full.index() as isize - full.displacement() as isize;
+
+        if ib < robin_ib {
+            // Found a luckier bucket than me.
             // We can finish the search early if we hit any bucket
             // with a lower distance to initial bucket than we've probed.
-            return TableRef(full.into_table());
+            return InternalEntry::Vacant {
+                hash: hash,
+                elem: NeqElem(full, robin_ib as usize),
+            };
         }
 
         // If the hash doesn't match, it can't be this one..
         if hash == full.hash() {
             // If the key doesn't match, it can't be this one..
             if is_match(full.read().0) {
-                return FoundExisting(full);
+                return InternalEntry::Occupied {
+                    elem: full
+                };
             }
         }
 
         probe = full.next();
+        debug_assert!(probe.index() as isize != ib + size + 1);
     }
-
-    TableRef(probe.into_table())
 }
 
 fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>) -> (K, V) {
@@ -393,7 +405,7 @@ fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>) -> (K, V) {
         None => return (retkey, retval)
     };
 
-    while gap.full().distance() != 0 {
+    while gap.full().displacement() != 0 {
         gap = match gap.shift() {
             Some(b) => b,
             None => break
@@ -409,78 +421,60 @@ fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>) -> (K, V) {
 /// to recalculate it.
 ///
 /// `hash`, `k`, and `v` are the elements to "robin hood" into the hashtable.
-fn robin_hood<'a, K: 'a, V: 'a>(mut bucket: FullBucketMut<'a, K, V>,
+fn robin_hood<'a, K: 'a, V: 'a>(bucket: FullBucketMut<'a, K, V>,
                         mut ib: usize,
                         mut hash: SafeHash,
-                        mut k: K,
-                        mut v: V)
+                        mut key: K,
+                        mut val: V)
                         -> &'a mut V {
     let starting_index = bucket.index();
-    let size = {
-        let table = bucket.table(); // FIXME "lifetime too short".
-        table.size()
-    };
+    let size = bucket.table().size();
+    // Save the *starting point*.
+    let mut bucket = bucket.stash();
     // There can be at most `size - dib` buckets to displace, because
     // in the worst case, there are `size` elements and we already are
-    // `distance` buckets away from the initial one.
-    let idx_end = starting_index + size - bucket.distance();
+    // `displacement` buckets away from the initial one.
+    let idx_end = starting_index + size - bucket.displacement();
 
     loop {
-        let (old_hash, old_key, old_val) = bucket.replace(hash, k, v);
+        let (old_hash, old_key, old_val) = bucket.replace(hash, key, val);
+        hash = old_hash;
+        key = old_key;
+        val = old_val;
+
         loop {
             let probe = bucket.next();
-            assert!(probe.index() != idx_end);
+            debug_assert!(probe.index() != idx_end);
 
             let full_bucket = match probe.peek() {
                 Empty(bucket) => {
                     // Found a hole!
-                    let b = bucket.put(old_hash, old_key, old_val);
+                    let bucket = bucket.put(hash, key, val);
                     // Now that it's stolen, just read the value's pointer
-                    // right out of the table!
-                    return Bucket::at_index(b.into_table(), starting_index)
-                               .peek()
-                               .expect_full()
-                               .into_mut_refs()
-                               .1;
+                    // right out of the table! Go back to the *starting point*.
+                    //
+                    // This use of `into_table` is misleading. It turns the
+                    // bucket, which is a FullBucket on top of a
+                    // FullBucketMut, into just one FullBucketMut. The "table"
+                    // refers to the inner FullBucketMut in this context.
+                    return bucket.into_table().into_mut_refs().1;
                 },
                 Full(bucket) => bucket
             };
 
-            let probe_ib = full_bucket.index() - full_bucket.distance();
+            let probe_ib = full_bucket.index() - full_bucket.displacement();
 
             bucket = full_bucket;
 
             // Robin hood! Steal the spot.
             if ib < probe_ib {
                 ib = probe_ib;
-                hash = old_hash;
-                k = old_key;
-                v = old_val;
                 break;
             }
         }
     }
 }
 
-/// A result that works like Option<FullBucket<..>> but preserves
-/// the reference that grants us access to the table in any case.
-enum SearchResult<K, V, M> {
-    // This is an entry that holds the given key:
-    FoundExisting(FullBucket<K, V, M>),
-
-    // There was no such entry. The reference is given back:
-    TableRef(M)
-}
-
-impl<K, V, M> SearchResult<K, V, M> {
-    fn into_option(self) -> Option<FullBucket<K, V, M>> {
-        match self {
-            FoundExisting(bucket) => Some(bucket),
-            TableRef(_) => None
-        }
-    }
-}
-
 impl<K, V, S> HashMap<K, V, S>
     where K: Eq + Hash, S: BuildHasher
 {
@@ -491,20 +485,20 @@ impl<K, V, S> HashMap<K, V, S>
     /// Search for a key, yielding the index if it's found in the hashtable.
     /// If you already have the hash for the key lying around, use
     /// search_hashed.
-    fn search<'a, Q: ?Sized>(&'a self, q: &Q) -> Option<FullBucketImm<'a, K, V>>
+    #[inline]
+    fn search<'a, Q: ?Sized>(&'a self, q: &Q) -> InternalEntry<K, V, &'a RawTable<K, V>>
         where K: Borrow<Q>, Q: Eq + Hash
     {
         let hash = self.make_hash(q);
         search_hashed(&self.table, hash, |k| q.eq(k.borrow()))
-            .into_option()
     }
 
-    fn search_mut<'a, Q: ?Sized>(&'a mut self, q: &Q) -> Option<FullBucketMut<'a, K, V>>
+    #[inline]
+    fn search_mut<'a, Q: ?Sized>(&'a mut self, q: &Q) -> InternalEntry<K, V, &'a mut RawTable<K, V>>
         where K: Borrow<Q>, Q: Eq + Hash
     {
         let hash = self.make_hash(q);
         search_hashed(&mut self.table, hash, |k| q.eq(k.borrow()))
-            .into_option()
     }
 
     // The caller should ensure that invariants by Robin Hood Hashing hold.
@@ -592,15 +586,6 @@ impl<K, V, S> HashMap<K, V, S>
         }
     }
 
-    /// Deprecated, renamed to `with_hasher`
-    #[inline]
-    #[unstable(feature = "hashmap_hasher", reason = "hasher stuff is unclear",
-               issue = "27713")]
-    #[rustc_deprecated(since = "1.7.0", reason = "renamed to with_hasher")]
-    pub fn with_hash_state(hash_state: S) -> HashMap<K, V, S> {
-        HashMap::with_hasher(hash_state)
-    }
-
     /// Creates an empty HashMap with space for at least `capacity`
     /// elements, using `hasher` to hash the keys.
     ///
@@ -634,20 +619,8 @@ impl<K, V, S> HashMap<K, V, S>
         }
     }
 
-    /// Deprecated, renamed to `with_capacity_and_hasher`
-    #[inline]
-    #[unstable(feature = "hashmap_hasher", reason = "hasher stuff is unclear",
-               issue = "27713")]
-    #[rustc_deprecated(since = "1.7.0",
-                       reason = "renamed to with_capacity_and_hasher")]
-    pub fn with_capacity_and_hash_state(capacity: usize, hash_state: S)
-                                        -> HashMap<K, V, S> {
-        HashMap::with_capacity_and_hasher(capacity, hash_state)
-    }
-
     /// Returns a reference to the map's hasher.
-    #[unstable(feature = "hashmap_public_hasher", reason = "don't want to make insta-stable",
-               issue = "31262")]
+    #[stable(feature = "hashmap_public_hasher", since = "1.9.0")]
     pub fn hasher(&self) -> &S {
         &self.hash_builder
     }
@@ -731,7 +704,7 @@ impl<K, V, S> HashMap<K, V, S>
         loop {
             bucket = match bucket.peek() {
                 Full(full) => {
-                    if full.distance() == 0 {
+                    if full.displacement() == 0 {
                         // This bucket occupies its ideal spot.
                         // It indicates the start of another "cluster".
                         bucket = full.into_bucket();
@@ -767,10 +740,9 @@ impl<K, V, S> HashMap<K, V, S>
                     let h = bucket.hash();
                     let (b, k, v) = bucket.take();
                     self.insert_hashed_ordered(h, k, v);
-                    {
-                        let t = b.table(); // FIXME "lifetime too short".
-                        if t.size() == 0 { break }
-                    };
+                    if b.table().size() == 0 {
+                        break;
+                    }
                     b.into_bucket()
                 }
                 Empty(b) => b.into_bucket()
@@ -824,53 +796,19 @@ impl<K, V, S> HashMap<K, V, S>
     ///
     /// If the key already exists, the hashtable will be returned untouched
     /// and a reference to the existing element will be returned.
-    fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> &mut V {
-        self.insert_or_replace_with(hash, k, v, |_, _, _, _| ())
-    }
-
-    fn insert_or_replace_with<'a, F>(&'a mut self,
-                                     hash: SafeHash,
-                                     k: K,
-                                     v: V,
-                                     mut found_existing: F)
-                                     -> &'a mut V where
-        F: FnMut(&mut K, &mut V, K, V),
-    {
-        // Worst case, we'll find one empty bucket among `size + 1` buckets.
-        let size = self.table.size();
-        let mut probe = Bucket::new(&mut self.table, hash);
-        let ib = probe.index();
-
-        loop {
-            let mut bucket = match probe.peek() {
-                Empty(bucket) => {
-                    // Found a hole!
-                    return bucket.put(hash, k, v).into_mut_refs().1;
-                }
-                Full(bucket) => bucket
-            };
-
-            // hash matches?
-            if bucket.hash() == hash {
-                // key matches?
-                if k == *bucket.read_mut().0 {
-                    let (bucket_k, bucket_v) = bucket.into_mut_refs();
-                    debug_assert!(k == *bucket_k);
-                    // Key already exists. Get its reference.
-                    found_existing(bucket_k, bucket_v, k, v);
-                    return bucket_v;
-                }
+    fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> Option<V> {
+        let entry = search_hashed(&mut self.table, hash, |key| *key == k).into_entry(k);
+        match entry {
+            Some(Occupied(mut elem)) => {
+                Some(elem.insert(v))
             }
-
-            let robin_ib = bucket.index() as isize - bucket.distance() as isize;
-
-            if (ib as isize) < robin_ib {
-                // Found a luckier bucket than me. Better steal his spot.
-                return robin_hood(bucket, robin_ib as usize, hash, k, v);
+            Some(Vacant(elem)) => {
+                elem.insert(v);
+                None
+            }
+            None => {
+                unreachable!()
             }
-
-            probe = bucket.next();
-            assert!(probe.index() != ib + size + 1);
         }
     }
 
@@ -893,10 +831,7 @@ impl<K, V, S> HashMap<K, V, S>
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
-        fn first<A, B>((a, _): (A, B)) -> A { a }
-        let first: fn((&'a K,&'a V)) -> &'a K = first; // coerce to fn ptr
-
-        Keys { inner: self.iter().map(first) }
+        Keys { inner: self.iter() }
     }
 
     /// An iterator visiting all values in arbitrary order.
@@ -918,10 +853,35 @@ impl<K, V, S> HashMap<K, V, S>
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn values<'a>(&'a self) -> Values<'a, K, V> {
-        fn second<A, B>((_, b): (A, B)) -> B { b }
-        let second: fn((&'a K,&'a V)) -> &'a V = second; // coerce to fn ptr
+        Values { inner: self.iter() }
+    }
 
-        Values { inner: self.iter().map(second) }
+    /// An iterator visiting all values mutably in arbitrary order.
+    /// Iterator element type is `&'a mut V`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # #![feature(map_values_mut)]
+    /// use std::collections::HashMap;
+    ///
+    /// let mut map = HashMap::new();
+    ///
+    /// map.insert("a", 1);
+    /// map.insert("b", 2);
+    /// map.insert("c", 3);
+    ///
+    /// for val in map.values_mut() {
+    ///     *val = *val + 10;
+    /// }
+    ///
+    /// for val in map.values() {
+    ///     print!("{}", val);
+    /// }
+    /// ```
+    #[unstable(feature = "map_values_mut", reason = "recently added", issue = "32551")]
+    pub fn values_mut<'a>(&'a mut self) -> ValuesMut<'a, K, V> {
+        ValuesMut { inner: self.iter_mut() }
     }
 
     /// An iterator visiting all key-value pairs in arbitrary order.
@@ -997,9 +957,7 @@ impl<K, V, S> HashMap<K, V, S>
     pub fn entry(&mut self, key: K) -> Entry<K, V> {
         // Gotta resize now.
         self.reserve(1);
-
-        let hash = self.make_hash(&key);
-        search_entry_hashed(&mut self.table, hash, key)
+        self.search_mut(&key).into_entry(key).expect("unreachable")
     }
 
     /// Returns the number of elements in the map.
@@ -1055,11 +1013,8 @@ impl<K, V, S> HashMap<K, V, S>
     #[inline]
     #[stable(feature = "drain", since = "1.6.0")]
     pub fn drain(&mut self) -> Drain<K, V> {
-        fn last_two<A, B, C>((_, b, c): (A, B, C)) -> (B, C) { (b, c) }
-        let last_two: fn((SafeHash, K, V)) -> (K, V) = last_two; // coerce to fn pointer
-
         Drain {
-            inner: self.table.drain().map(last_two),
+            inner: self.table.drain(),
         }
     }
 
@@ -1102,7 +1057,7 @@ impl<K, V, S> HashMap<K, V, S>
     pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
         where K: Borrow<Q>, Q: Hash + Eq
     {
-        self.search(k).map(|bucket| bucket.into_refs().1)
+        self.search(k).into_occupied_bucket().map(|bucket| bucket.into_refs().1)
     }
 
     /// Returns true if the map contains a value for the specified key.
@@ -1125,7 +1080,7 @@ impl<K, V, S> HashMap<K, V, S>
     pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
         where K: Borrow<Q>, Q: Hash + Eq
     {
-        self.search(k).is_some()
+        self.search(k).into_occupied_bucket().is_some()
     }
 
     /// Returns a mutable reference to the value corresponding to the key.
@@ -1150,7 +1105,7 @@ impl<K, V, S> HashMap<K, V, S>
     pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
         where K: Borrow<Q>, Q: Hash + Eq
     {
-        self.search_mut(k).map(|bucket| bucket.into_mut_refs().1)
+        self.search_mut(k).into_occupied_bucket().map(|bucket| bucket.into_mut_refs().1)
     }
 
     /// Inserts a key-value pair into the map.
@@ -1181,12 +1136,7 @@ impl<K, V, S> HashMap<K, V, S>
     pub fn insert(&mut self, k: K, v: V) -> Option<V> {
         let hash = self.make_hash(&k);
         self.reserve(1);
-
-        let mut retval = None;
-        self.insert_or_replace_with(hash, k, v, |_, val_ref, _, val| {
-            retval = Some(replace(val_ref, val));
-        });
-        retval
+        self.insert_hashed_nocheck(hash, k, v)
     }
 
     /// Removes a key from the map, returning the value at the key if the key
@@ -1214,54 +1164,7 @@ impl<K, V, S> HashMap<K, V, S>
             return None
         }
 
-        self.search_mut(k).map(|bucket| pop_internal(bucket).1)
-    }
-}
-
-fn search_entry_hashed<'a, K: Eq, V>(table: &'a mut RawTable<K,V>, hash: SafeHash, k: K)
-        -> Entry<'a, K, V>
-{
-    // Worst case, we'll find one empty bucket among `size + 1` buckets.
-    let size = table.size();
-    let mut probe = Bucket::new(table, hash);
-    let ib = probe.index();
-
-    loop {
-        let bucket = match probe.peek() {
-            Empty(bucket) => {
-                // Found a hole!
-                return Vacant(VacantEntry {
-                    hash: hash,
-                    key: k,
-                    elem: NoElem(bucket),
-                });
-            },
-            Full(bucket) => bucket
-        };
-
-        // hash matches?
-        if bucket.hash() == hash {
-            // key matches?
-            if k == *bucket.read().0 {
-                return Occupied(OccupiedEntry{
-                    elem: bucket,
-                });
-            }
-        }
-
-        let robin_ib = bucket.index() as isize - bucket.distance() as isize;
-
-        if (ib as isize) < robin_ib {
-            // Found a luckier bucket than me. Better steal his spot.
-            return Vacant(VacantEntry {
-                hash: hash,
-                key: k,
-                elem: NeqElem(bucket, robin_ib as usize),
-            });
-        }
-
-        probe = bucket.next();
-        assert!(probe.index() != ib + size + 1);
+        self.search_mut(k).into_occupied_bucket().map(|bucket| pop_internal(bucket).1)
     }
 }
 
@@ -1341,13 +1244,13 @@ pub struct IterMut<'a, K: 'a, V: 'a> {
 /// HashMap move iterator.
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct IntoIter<K, V> {
-    inner: iter::Map<table::IntoIter<K, V>, fn((SafeHash, K, V)) -> (K, V)>
+    inner: table::IntoIter<K, V>
 }
 
 /// HashMap keys iterator.
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Keys<'a, K: 'a, V: 'a> {
-    inner: Map<Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>
+    inner: Iter<'a, K, V>
 }
 
 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
@@ -1363,7 +1266,7 @@ impl<'a, K, V> Clone for Keys<'a, K, V> {
 /// HashMap values iterator.
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Values<'a, K: 'a, V: 'a> {
-    inner: Map<Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>
+    inner: Iter<'a, K, V>
 }
 
 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
@@ -1379,21 +1282,56 @@ impl<'a, K, V> Clone for Values<'a, K, V> {
 /// HashMap drain iterator.
 #[stable(feature = "drain", since = "1.6.0")]
 pub struct Drain<'a, K: 'a, V: 'a> {
-    inner: iter::Map<table::Drain<'a, K, V>, fn((SafeHash, K, V)) -> (K, V)>
+    inner: table::Drain<'a, K, V>
 }
 
-/// A view into a single occupied location in a HashMap.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
-    elem: FullBucket<K, V, &'a mut RawTable<K, V>>,
+/// Mutable HashMap values iterator.
+#[unstable(feature = "map_values_mut", reason = "recently added", issue = "32551")]
+pub struct ValuesMut<'a, K: 'a, V: 'a> {
+    inner: IterMut<'a, K, V>
 }
 
-/// A view into a single empty location in a HashMap.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct VacantEntry<'a, K: 'a, V: 'a> {
-    hash: SafeHash,
-    key: K,
-    elem: VacantEntryState<K, V, &'a mut RawTable<K, V>>,
+enum InternalEntry<K, V, M> {
+    Occupied {
+        elem: FullBucket<K, V, M>,
+    },
+    Vacant {
+        hash: SafeHash,
+        elem: VacantEntryState<K, V, M>,
+    },
+    TableIsEmpty,
+}
+
+impl<K, V, M> InternalEntry<K, V, M> {
+    #[inline]
+    fn into_occupied_bucket(self) -> Option<FullBucket<K, V, M>> {
+        match self {
+            InternalEntry::Occupied { elem } => Some(elem),
+            _ => None,
+        }
+    }
+}
+
+impl<'a, K, V> InternalEntry<K, V, &'a mut RawTable<K, V>> {
+    #[inline]
+    fn into_entry(self, key: K) -> Option<Entry<'a, K, V>> {
+        match self {
+            InternalEntry::Occupied { elem } => {
+                Some(Occupied(OccupiedEntry {
+                    key: Some(key),
+                    elem: elem
+                }))
+            }
+            InternalEntry::Vacant { hash, elem } => {
+                Some(Vacant(VacantEntry {
+                    hash: hash,
+                    key: key,
+                    elem: elem,
+                }))
+            }
+            InternalEntry::TableIsEmpty => None
+        }
+    }
 }
 
 /// A view into a single location in a map, which may be vacant or occupied.
@@ -1412,6 +1350,21 @@ pub enum Entry<'a, K: 'a, V: 'a> {
     ),
 }
 
+/// A view into a single occupied location in a HashMap.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
+    key: Option<K>,
+    elem: FullBucket<K, V, &'a mut RawTable<K, V>>,
+}
+
+/// A view into a single empty location in a HashMap.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct VacantEntry<'a, K: 'a, V: 'a> {
+    hash: SafeHash,
+    key: K,
+    elem: VacantEntryState<K, V, &'a mut RawTable<K, V>>,
+}
+
 /// Possible states of a VacantEntry.
 enum VacantEntryState<K, V, M> {
     /// The index is occupied, but the key to insert has precedence,
@@ -1470,11 +1423,8 @@ impl<K, V, S> IntoIterator for HashMap<K, V, S>
     /// let vec: Vec<(&str, isize)> = map.into_iter().collect();
     /// ```
     fn into_iter(self) -> IntoIter<K, V> {
-        fn last_two<A, B, C>((_, b, c): (A, B, C)) -> (B, C) { (b, c) }
-        let last_two: fn((SafeHash, K, V)) -> (K, V) = last_two;
-
         IntoIter {
-            inner: self.table.into_iter().map(last_two)
+            inner: self.table.into_iter()
         }
     }
 }
@@ -1507,7 +1457,7 @@ impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
 impl<K, V> Iterator for IntoIter<K, V> {
     type Item = (K, V);
 
-    #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next() }
+    #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next().map(|(_, k, v)| (k, v)) }
     #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1519,7 +1469,7 @@ impl<K, V> ExactSizeIterator for IntoIter<K, V> {
 impl<'a, K, V> Iterator for Keys<'a, K, V> {
     type Item = &'a K;
 
-    #[inline] fn next(&mut self) -> Option<(&'a K)> { self.inner.next() }
+    #[inline] fn next(&mut self) -> Option<(&'a K)> { self.inner.next().map(|(k, _)| k) }
     #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1531,7 +1481,7 @@ impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
 impl<'a, K, V> Iterator for Values<'a, K, V> {
     type Item = &'a V;
 
-    #[inline] fn next(&mut self) -> Option<(&'a V)> { self.inner.next() }
+    #[inline] fn next(&mut self) -> Option<(&'a V)> { self.inner.next().map(|(_, v)| v) }
     #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1539,11 +1489,23 @@ impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
     #[inline] fn len(&self) -> usize { self.inner.len() }
 }
 
+#[unstable(feature = "map_values_mut", reason = "recently added", issue = "32551")]
+impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
+    type Item = &'a mut V;
+
+    #[inline] fn next(&mut self) -> Option<(&'a mut V)> { self.inner.next().map(|(_, v)| v) }
+    #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
+}
+#[unstable(feature = "map_values_mut", reason = "recently added", issue = "32551")]
+impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> {
+    #[inline] fn len(&self) -> usize { self.inner.len() }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a, K, V> Iterator for Drain<'a, K, V> {
     type Item = (K, V);
 
-    #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next() }
+    #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next().map(|(_, k, v)| (k, v)) }
     #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1574,6 +1536,12 @@ impl<'a, K, V> Entry<'a, K, V> {
 }
 
 impl<'a, K, V> OccupiedEntry<'a, K, V> {
+    /// Gets a reference to the key in the entry.
+    #[unstable(feature = "map_entry_keys", issue = "32281")]
+    pub fn key(&self) -> &K {
+        self.elem.read().0
+    }
+
     /// Gets a reference to the value in the entry.
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn get(&self) -> &V {
@@ -1606,9 +1574,22 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> {
     pub fn remove(self) -> V {
         pop_internal(self.elem).1
     }
+    /// Returns a key that was used for search.
+    ///
+    /// The key was retained for further use.
+    fn take_key(&mut self) -> Option<K> {
+        self.key.take()
+    }
 }
 
 impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> {
+    /// Gets a reference to the key that would be used when inserting a value
+    /// through the VacantEntry.
+    #[unstable(feature = "map_entry_keys", issue = "32281")]
+    pub fn key(&self) -> &K {
+        &self.key
+    }
+
     /// Sets the value of the entry with the VacantEntry's key,
     /// and returns a mutable reference to it
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -1628,11 +1609,11 @@ impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> {
 impl<K, V, S> FromIterator<(K, V)> for HashMap<K, V, S>
     where K: Eq + Hash, S: BuildHasher + Default
 {
-    fn from_iter<T: IntoIterator<Item=(K, V)>>(iterable: T) -> HashMap<K, V, S> {
-        let iter = iterable.into_iter();
-        let lower = iter.size_hint().0;
+    fn from_iter<T: IntoIterator<Item=(K, V)>>(iter: T) -> HashMap<K, V, S> {
+        let iterator = iter.into_iter();
+        let lower = iterator.size_hint().0;
         let mut map = HashMap::with_capacity_and_hasher(lower, Default::default());
-        map.extend(iter);
+        map.extend(iterator);
         map
     }
 }
@@ -1703,7 +1684,7 @@ impl<K, S, Q: ?Sized> super::Recover<Q> for HashMap<K, (), S>
     type Key = K;
 
     fn get(&self, key: &Q) -> Option<&K> {
-        self.search(key).map(|bucket| bucket.into_refs().0)
+        self.search(key).into_occupied_bucket().map(|bucket| bucket.into_refs().0)
     }
 
     fn take(&mut self, key: &Q) -> Option<K> {
@@ -1711,21 +1692,39 @@ impl<K, S, Q: ?Sized> super::Recover<Q> for HashMap<K, (), S>
             return None
         }
 
-        self.search_mut(key).map(|bucket| pop_internal(bucket).0)
+        self.search_mut(key).into_occupied_bucket().map(|bucket| pop_internal(bucket).0)
     }
 
     fn replace(&mut self, key: K) -> Option<K> {
-        let hash = self.make_hash(&key);
         self.reserve(1);
 
-        let mut retkey = None;
-        self.insert_or_replace_with(hash, key, (), |key_ref, _, key, _| {
-            retkey = Some(replace(key_ref, key));
-        });
-        retkey
+        match self.entry(key) {
+            Occupied(mut occupied) => {
+                let key = occupied.take_key().unwrap();
+                Some(mem::replace(occupied.elem.read_mut().0, key))
+            }
+            Vacant(vacant) => {
+                vacant.insert(());
+                None
+            }
+        }
     }
 }
 
+#[allow(dead_code)]
+fn assert_covariance() {
+    fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> { v }
+    fn map_val<'new>(v: HashMap<u8, &'static str>) -> HashMap<u8, &'new str> { v }
+    fn iter_key<'a, 'new>(v: Iter<'a, &'static str, u8>) -> Iter<'a, &'new str, u8> { v }
+    fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { v }
+    fn into_iter_key<'new>(v: IntoIter<&'static str, u8>) -> IntoIter<&'new str, u8> { v }
+    fn into_iter_val<'new>(v: IntoIter<u8, &'static str>) -> IntoIter<u8, &'new str> { v }
+    fn keys_key<'a, 'new>(v: Keys<'a, &'static str, u8>) -> Keys<'a, &'new str, u8> { v }
+    fn keys_val<'a, 'new>(v: Keys<'a, u8, &'static str>) -> Keys<'a, u8, &'new str> { v }
+    fn values_key<'a, 'new>(v: Values<'a, &'static str, u8>) -> Values<'a, &'new str, u8> { v }
+    fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> { v }
+}
+
 #[cfg(test)]
 mod test_map {
     use prelude::v1::*;
@@ -1757,6 +1756,20 @@ mod test_map {
         assert_eq!(*m.get(&2).unwrap(), 4);
     }
 
+    #[test]
+    fn test_clone() {
+        let mut m = HashMap::new();
+        assert_eq!(m.len(), 0);
+        assert!(m.insert(1, 2).is_none());
+        assert_eq!(m.len(), 1);
+        assert!(m.insert(2, 4).is_none());
+        assert_eq!(m.len(), 2);
+        let m2 = m.clone();
+        assert_eq!(*m2.get(&1).unwrap(), 2);
+        assert_eq!(*m2.get(&2).unwrap(), 4);
+        assert_eq!(m2.len(), 2);
+    }
+
     thread_local! { static DROP_VECTOR: RefCell<Vec<isize>> = RefCell::new(Vec::new()) }
 
     #[derive(Hash, PartialEq, Eq)]
@@ -1848,7 +1861,7 @@ mod test_map {
     }
 
     #[test]
-    fn test_move_iter_drops() {
+    fn test_into_iter_drops() {
         DROP_VECTOR.with(|v| {
             *v.borrow_mut() = vec![0; 200];
         });
@@ -1913,11 +1926,36 @@ mod test_map {
     }
 
     #[test]
-    fn test_empty_pop() {
+    fn test_empty_remove() {
         let mut m: HashMap<isize, bool> = HashMap::new();
         assert_eq!(m.remove(&0), None);
     }
 
+    #[test]
+    fn test_empty_entry() {
+        let mut m: HashMap<isize, bool> = HashMap::new();
+        match m.entry(0) {
+            Occupied(_) => panic!(),
+            Vacant(_) => {}
+        }
+        assert!(*m.entry(0).or_insert(true));
+        assert_eq!(m.len(), 1);
+    }
+
+    #[test]
+    fn test_empty_iter() {
+        let mut m: HashMap<isize, bool> = HashMap::new();
+        assert_eq!(m.drain().next(), None);
+        assert_eq!(m.keys().next(), None);
+        assert_eq!(m.values().next(), None);
+        assert_eq!(m.values_mut().next(), None);
+        assert_eq!(m.iter().next(), None);
+        assert_eq!(m.iter_mut().next(), None);
+        assert_eq!(m.len(), 0);
+        assert!(m.is_empty());
+        assert_eq!(m.into_iter().next(), None);
+    }
+
     #[test]
     fn test_lots_of_insertions() {
         let mut m = HashMap::new();
@@ -2087,6 +2125,20 @@ mod test_map {
         assert!(values.contains(&'c'));
     }
 
+    #[test]
+    fn test_values_mut() {
+        let vec = vec![(1, 1), (2, 2), (3, 3)];
+        let mut map: HashMap<_, _> = vec.into_iter().collect();
+        for value in map.values_mut() {
+            *value = (*value) * 2
+        }
+        let values: Vec<_> = map.values().cloned().collect();
+        assert_eq!(values.len(), 3);
+        assert!(values.contains(&2));
+        assert!(values.contains(&4));
+        assert!(values.contains(&6));
+    }
+
     #[test]
     fn test_find() {
         let mut m = HashMap::new();
@@ -2454,4 +2506,40 @@ mod test_map {
         a.insert(item, 0);
         assert!(a.capacity() > a.len());
     }
+
+    #[test]
+    fn test_occupied_entry_key() {
+        let mut a = HashMap::new();
+        let key = "hello there";
+        let value = "value goes here";
+        assert!(a.is_empty());
+        a.insert(key.clone(), value.clone());
+        assert_eq!(a.len(), 1);
+        assert_eq!(a[key], value);
+
+        match a.entry(key.clone()) {
+            Vacant(_) => panic!(),
+            Occupied(e) => assert_eq!(key, *e.key()),
+        }
+        assert_eq!(a.len(), 1);
+        assert_eq!(a[key], value);
+    }
+
+    #[test]
+    fn test_vacant_entry_key() {
+        let mut a = HashMap::new();
+        let key = "hello there";
+        let value = "value goes here";
+
+        assert!(a.is_empty());
+        match a.entry(key.clone()) {
+            Occupied(_) => panic!(),
+            Vacant(e) => {
+                assert_eq!(key, *e.key());
+                e.insert(value.clone());
+            },
+        }
+        assert_eq!(a.len(), 1);
+        assert_eq!(a[key], value);
+    }
 }
index 4a6fcf44926a334e4e3b033d7152dc6a3d8c7806..7a22bec5a3f89a3c7f3593b284ca33effc5d37ff 100644 (file)
@@ -14,7 +14,6 @@ mod bench;
 mod table;
 pub mod map;
 pub mod set;
-pub mod state;
 
 trait Recover<Q: ?Sized> {
     type Key;
index 5c58cd9dfbf7ce8eb8e7094625575b5ba03ceaea..b353a4c1ba120ca6521e0bf8cf6133336c911af1 100644 (file)
@@ -11,7 +11,7 @@
 use borrow::Borrow;
 use fmt;
 use hash::{Hash, BuildHasher};
-use iter::{Map, Chain, FromIterator};
+use iter::{Chain, FromIterator};
 use ops::{BitOr, BitAnd, BitXor, Sub};
 
 use super::Recover;
@@ -194,32 +194,11 @@ impl<T, S> HashSet<T, S>
     }
 
     /// Returns a reference to the set's hasher.
-    #[unstable(feature = "hashmap_public_hasher", reason = "don't want to make insta-stable",
-               issue = "31262")]
+    #[stable(feature = "hashmap_public_hasher", since = "1.9.0")]
     pub fn hasher(&self) -> &S {
         self.map.hasher()
     }
 
-    /// Deprecated, renamed to `with_hasher`
-    #[inline]
-    #[unstable(feature = "hashmap_hasher", reason = "hasher stuff is unclear",
-               issue = "27713")]
-    #[rustc_deprecated(since = "1.7.0", reason = "renamed to with_hasher")]
-    pub fn with_hash_state(hash_state: S) -> HashSet<T, S> {
-        HashSet::with_hasher(hash_state)
-    }
-
-    /// Deprecated, renamed to `with_capacity_and_hasher`
-    #[inline]
-    #[unstable(feature = "hashmap_hasher", reason = "hasher stuff is unclear",
-               issue = "27713")]
-    #[rustc_deprecated(since = "1.7.0",
-                       reason = "renamed to with_capacity_and_hasher")]
-    pub fn with_capacity_and_hash_state(capacity: usize, hash_state: S)
-                                        -> HashSet<T, S> {
-        HashSet::with_capacity_and_hasher(capacity, hash_state)
-    }
-
     /// Returns the number of elements the set can hold without reallocating.
     ///
     /// # Examples
@@ -434,10 +413,7 @@ impl<T, S> HashSet<T, S>
     #[inline]
     #[stable(feature = "drain", since = "1.6.0")]
     pub fn drain(&mut self) -> Drain<T> {
-        fn first<A, B>((a, _): (A, B)) -> A { a }
-        let first: fn((T, ())) -> T = first; // coerce to fn pointer
-
-        Drain { iter: self.map.drain().map(first) }
+        Drain { iter: self.map.drain() }
     }
 
     /// Clears the set, removing all values.
@@ -482,7 +458,7 @@ impl<T, S> HashSet<T, S>
     /// The value may be any borrowed form of the set's value type, but
     /// `Hash` and `Eq` on the borrowed form *must* match those for
     /// the value type.
-    #[unstable(feature = "set_recovery", issue = "28050")]
+    #[stable(feature = "set_recovery", since = "1.9.0")]
     pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
         where T: Borrow<Q>, Q: Hash + Eq
     {
@@ -579,7 +555,7 @@ impl<T, S> HashSet<T, S>
 
     /// Adds a value to the set, replacing the existing value, if any, that is equal to the given
     /// one. Returns the replaced value.
-    #[unstable(feature = "set_recovery", issue = "28050")]
+    #[stable(feature = "set_recovery", since = "1.9.0")]
     pub fn replace(&mut self, value: T) -> Option<T> {
         Recover::replace(&mut self.map, value)
     }
@@ -614,7 +590,7 @@ impl<T, S> HashSet<T, S>
     /// The value may be any borrowed form of the set's value type, but
     /// `Hash` and `Eq` on the borrowed form *must* match those for
     /// the value type.
-    #[unstable(feature = "set_recovery", issue = "28050")]
+    #[stable(feature = "set_recovery", since = "1.9.0")]
     pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
         where T: Borrow<Q>, Q: Hash + Eq
     {
@@ -653,11 +629,11 @@ impl<T, S> FromIterator<T> for HashSet<T, S>
     where T: Eq + Hash,
           S: BuildHasher + Default,
 {
-    fn from_iter<I: IntoIterator<Item=T>>(iterable: I) -> HashSet<T, S> {
-        let iter = iterable.into_iter();
-        let lower = iter.size_hint().0;
+    fn from_iter<I: IntoIterator<Item=T>>(iter: I) -> HashSet<T, S> {
+        let iterator = iter.into_iter();
+        let lower = iterator.size_hint().0;
         let mut set = HashSet::with_capacity_and_hasher(lower, Default::default());
-        set.extend(iter);
+        set.extend(iterator);
         set
     }
 }
@@ -831,13 +807,13 @@ pub struct Iter<'a, K: 'a> {
 /// HashSet move iterator
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct IntoIter<K> {
-    iter: Map<map::IntoIter<K, ()>, fn((K, ())) -> K>
+    iter: map::IntoIter<K, ()>
 }
 
 /// HashSet drain iterator
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Drain<'a, K: 'a> {
-    iter: Map<map::Drain<'a, K, ()>, fn((K, ())) -> K>,
+    iter: map::Drain<'a, K, ()>,
 }
 
 /// Intersection iterator
@@ -911,10 +887,7 @@ impl<T, S> IntoIterator for HashSet<T, S>
     /// }
     /// ```
     fn into_iter(self) -> IntoIter<T> {
-        fn first<A, B>((a, _): (A, B)) -> A { a }
-        let first: fn((T, ())) -> T = first;
-
-        IntoIter { iter: self.map.into_iter().map(first) }
+        IntoIter { iter: self.map.into_iter() }
     }
 }
 
@@ -938,7 +911,7 @@ impl<'a, K> ExactSizeIterator for Iter<'a, K> {
 impl<K> Iterator for IntoIter<K> {
     type Item = K;
 
-    fn next(&mut self) -> Option<K> { self.iter.next() }
+    fn next(&mut self) -> Option<K> { self.iter.next().map(|(k, _)| k) }
     fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -950,7 +923,7 @@ impl<K> ExactSizeIterator for IntoIter<K> {
 impl<'a, K> Iterator for Drain<'a, K> {
     type Item = K;
 
-    fn next(&mut self) -> Option<K> { self.iter.next() }
+    fn next(&mut self) -> Option<K> { self.iter.next().map(|(k, _)| k) }
     fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1050,6 +1023,21 @@ impl<'a, T, S> Iterator for Union<'a, T, S>
     fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
 }
 
+#[allow(dead_code)]
+fn assert_covariance() {
+    fn set<'new>(v: HashSet<&'static str>) -> HashSet<&'new str> { v }
+    fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { v }
+    fn into_iter<'new>(v: IntoIter<&'static str>) -> IntoIter<&'new str> { v }
+    fn difference<'a, 'new>(v: Difference<'a, &'static str, RandomState>)
+        -> Difference<'a, &'new str, RandomState> { v }
+    fn symmetric_difference<'a, 'new>(v: SymmetricDifference<'a, &'static str, RandomState>)
+        -> SymmetricDifference<'a, &'new str, RandomState> { v }
+    fn intersection<'a, 'new>(v: Intersection<'a, &'static str, RandomState>)
+        -> Intersection<'a, &'new str, RandomState> { v }
+    fn union<'a, 'new>(v: Union<'a, &'static str, RandomState>)
+        -> Union<'a, &'new str, RandomState> { v }
+}
+
 #[cfg(test)]
 mod test_set {
     use prelude::v1::*;
diff --git a/src/libstd/collections/hash/state.rs b/src/libstd/collections/hash/state.rs
deleted file mode 100644 (file)
index 167aca0..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![unstable(feature = "hashmap_hasher", reason = "hasher stuff is unclear",
-            issue = "27713")]
-#![rustc_deprecated(since = "1.7.0", reason = "support moved to std::hash")]
-#![allow(deprecated)]
-
-use clone::Clone;
-use default::Default;
-use hash;
-use marker;
-
-pub use hash::HashState;
-
-/// A structure which is a factory for instances of `Hasher` which implement the
-/// default trait.
-///
-/// This struct is 0-sized and does not need construction.
-pub struct DefaultState<H>(marker::PhantomData<H>);
-
-impl<H: Default + hash::Hasher> HashState for DefaultState<H> {
-    type Hasher = H;
-    fn hasher(&self) -> H { Default::default() }
-}
-
-impl<H> Clone for DefaultState<H> {
-    fn clone(&self) -> DefaultState<H> { DefaultState(marker::PhantomData) }
-}
-
-impl<H> Default for DefaultState<H> {
-    fn default() -> DefaultState<H> { DefaultState(marker::PhantomData) }
-}
index 97cab94b67bd132fa5c8973e023d38de4549b55a..cf64e5d333639c5f11d88b854bcfa418cc81029b 100644 (file)
@@ -75,8 +75,10 @@ unsafe impl<K: Sync, V: Sync> Sync for RawTable<K, V> {}
 
 struct RawBucket<K, V> {
     hash: *mut u64,
-    key:  *mut K,
-    val:  *mut V,
+
+    // We use *const to ensure covariance with respect to K and V
+    key:  *const K,
+    val:  *const V,
     _marker: marker::PhantomData<(K,V)>,
 }
 
@@ -201,23 +203,47 @@ impl<K, V, M> EmptyBucket<K, V, M> {
     pub fn table(&self) -> &M {
         &self.table
     }
-    /// Move out the reference to the table.
-    pub fn into_table(self) -> M {
-        self.table
-    }
 }
 
 impl<K, V, M> Bucket<K, V, M> {
-    /// Move out the reference to the table.
-    pub fn into_table(self) -> M {
-        self.table
-    }
     /// Get the raw index.
     pub fn index(&self) -> usize {
         self.idx
     }
 }
 
+impl<K, V, M> Deref for FullBucket<K, V, M> where M: Deref<Target=RawTable<K, V>> {
+    type Target = RawTable<K, V>;
+    fn deref(&self) -> &RawTable<K, V> {
+        &self.table
+    }
+}
+
+/// `Put` is implemented for types which provide access to a table and cannot be invalidated
+///  by filling a bucket. A similar implementation for `Take` is possible.
+pub trait Put<K, V> {
+    unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V>;
+}
+
+
+impl<'t, K, V> Put<K, V> for &'t mut RawTable<K, V> {
+    unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
+        *self
+    }
+}
+
+impl<K, V, M> Put<K, V> for Bucket<K, V, M> where M: Put<K, V> {
+    unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
+        self.table.borrow_table_mut()
+    }
+}
+
+impl<K, V, M> Put<K, V> for FullBucket<K, V, M> where M: Put<K, V> {
+    unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
+        self.table.borrow_table_mut()
+    }
+}
+
 impl<K, V, M: Deref<Target=RawTable<K, V>>> Bucket<K, V, M> {
     pub fn new(table: M, hash: SafeHash) -> Bucket<K, V, M> {
         Bucket::at_index(table, hash.inspect() as usize)
@@ -268,22 +294,14 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> Bucket<K, V, M> {
 
     /// Modifies the bucket pointer in place to make it point to the next slot.
     pub fn next(&mut self) {
-        // Branchless bucket iteration step.
-        // As we reach the end of the table...
-        // We take the current idx:          0111111b
-        // Xor it by its increment:        ^ 1000000b
-        //                               ------------
-        //                                   1111111b
-        // Then AND with the capacity:     & 1000000b
-        //                               ------------
-        // to get the backwards offset:      1000000b
-        // ... and it's zero at all other times.
-        let maybe_wraparound_dist = (self.idx ^ (self.idx + 1)) & self.table.capacity();
-        // Finally, we obtain the offset 1 or the offset -cap + 1.
-        let dist = 1 - (maybe_wraparound_dist as isize);
-
         self.idx += 1;
-
+        let range = self.table.capacity();
+        // This code is branchless thanks to a conditional move.
+        let dist = if self.idx & (range - 1) == 0 {
+            1 - range as isize
+        } else {
+            1
+        };
         unsafe {
             self.raw = self.raw.offset(dist);
         }
@@ -326,7 +344,7 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> EmptyBucket<K, V, M> {
     }
 }
 
-impl<K, V, M: Deref<Target=RawTable<K, V>> + DerefMut> EmptyBucket<K, V, M> {
+impl<K, V, M> EmptyBucket<K, V, M> where M: Put<K, V> {
     /// Puts given key and value pair, along with the key's hash,
     /// into this bucket in the hashtable. Note how `self` is 'moved' into
     /// this function, because this slot will no longer be empty when
@@ -338,11 +356,11 @@ impl<K, V, M: Deref<Target=RawTable<K, V>> + DerefMut> EmptyBucket<K, V, M> {
                -> FullBucket<K, V, M> {
         unsafe {
             *self.raw.hash = hash.inspect();
-            ptr::write(self.raw.key, key);
-            ptr::write(self.raw.val, value);
-        }
+            ptr::write(self.raw.key as *mut K, key);
+            ptr::write(self.raw.val as *mut V, value);
 
-        self.table.size += 1;
+            self.table.borrow_table_mut().size += 1;
+        }
 
         FullBucket { raw: self.raw, idx: self.idx, table: self.table }
     }
@@ -365,12 +383,22 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> FullBucket<K, V, M> {
         }
     }
 
+    /// Duplicates the current position. This can be useful for operations
+    /// on two or more buckets.
+    pub fn stash(self) -> FullBucket<K, V, Self> {
+        FullBucket {
+            raw: self.raw,
+            idx: self.idx,
+            table: self,
+        }
+    }
+
     /// Get the distance between this bucket and the 'ideal' location
     /// as determined by the key's hash stored in it.
     ///
     /// In the cited blog posts above, this is called the "distance to
     /// initial bucket", or DIB. Also known as "probe count".
-    pub fn distance(&self) -> usize {
+    pub fn displacement(&self) -> usize {
         // Calculates the distance one has to travel when going from
         // `hash mod capacity` onwards to `idx mod capacity`, wrapping around
         // if the destination is not reached before the end of the table.
@@ -395,12 +423,15 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> FullBucket<K, V, M> {
     }
 }
 
-impl<K, V, M: Deref<Target=RawTable<K, V>> + DerefMut> FullBucket<K, V, M> {
+// We take a mutable reference to the table instead of accepting anything that
+// implements `DerefMut` to prevent fn `take` from being called on `stash`ed
+// buckets.
+impl<'t, K, V> FullBucket<K, V, &'t mut RawTable<K, V>> {
     /// Removes this bucket's key and value from the hashtable.
     ///
     /// This works similarly to `put`, building an `EmptyBucket` out of the
     /// taken bucket.
-    pub fn take(mut self) -> (EmptyBucket<K, V, M>, K, V) {
+    pub fn take(mut self) -> (EmptyBucket<K, V, &'t mut RawTable<K, V>>, K, V) {
         self.table.size -= 1;
 
         unsafe {
@@ -416,27 +447,33 @@ impl<K, V, M: Deref<Target=RawTable<K, V>> + DerefMut> FullBucket<K, V, M> {
             )
         }
     }
+}
 
+// This use of `Put` is misleading and restrictive, but safe and sufficient for our use cases
+// where `M` is a full bucket or table reference type with mutable access to the table.
+impl<K, V, M> FullBucket<K, V, M> where M: Put<K, V> {
     pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) {
         unsafe {
             let old_hash = ptr::replace(self.raw.hash as *mut SafeHash, h);
-            let old_key  = ptr::replace(self.raw.key,  k);
-            let old_val  = ptr::replace(self.raw.val,  v);
+            let old_key  = ptr::replace(self.raw.key as *mut K,  k);
+            let old_val  = ptr::replace(self.raw.val as *mut V,  v);
 
             (old_hash, old_key, old_val)
         }
     }
+}
 
+impl<K, V, M> FullBucket<K, V, M> where M: Deref<Target=RawTable<K, V>> + DerefMut {
     /// Gets mutable references to the key and value at a given index.
     pub fn read_mut(&mut self) -> (&mut K, &mut V) {
         unsafe {
-            (&mut *self.raw.key,
-             &mut *self.raw.val)
+            (&mut *(self.raw.key as *mut K),
+             &mut *(self.raw.val as *mut V))
         }
     }
 }
 
-impl<'t, K, V, M: Deref<Target=RawTable<K, V>> + 't> FullBucket<K, V, M> {
+impl<'t, K, V, M> FullBucket<K, V, M> where M: Deref<Target=RawTable<K, V>> + 't {
     /// Exchange a bucket state for immutable references into the table.
     /// Because the underlying reference to the table is also consumed,
     /// no further changes to the structure of the table are possible;
@@ -450,28 +487,18 @@ impl<'t, K, V, M: Deref<Target=RawTable<K, V>> + 't> FullBucket<K, V, M> {
     }
 }
 
-impl<'t, K, V, M: Deref<Target=RawTable<K, V>> + DerefMut + 't> FullBucket<K, V, M> {
+impl<'t, K, V, M> FullBucket<K, V, M> where M: Deref<Target=RawTable<K, V>> + DerefMut + 't {
     /// This works similarly to `into_refs`, exchanging a bucket state
     /// for mutable references into the table.
     pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) {
         unsafe {
-            (&mut *self.raw.key,
-             &mut *self.raw.val)
+            (&mut *(self.raw.key as *mut K),
+             &mut *(self.raw.val as *mut V))
         }
     }
 }
 
-impl<K, V, M> BucketState<K, V, M> {
-    // For convenience.
-    pub fn expect_full(self) -> FullBucket<K, V, M> {
-        match self {
-            Full(full) => full,
-            Empty(..) => panic!("Expected full bucket")
-        }
-    }
-}
-
-impl<K, V, M: Deref<Target=RawTable<K, V>>> GapThenFull<K, V, M> {
+impl<K, V, M> GapThenFull<K, V, M> where M: Deref<Target=RawTable<K, V>> {
     #[inline]
     pub fn full(&self) -> &FullBucket<K, V, M> {
         &self.full
@@ -480,8 +507,8 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> GapThenFull<K, V, M> {
     pub fn shift(mut self) -> Option<GapThenFull<K, V, M>> {
         unsafe {
             *self.gap.raw.hash = mem::replace(&mut *self.full.raw.hash, EMPTY_BUCKET);
-            ptr::copy_nonoverlapping(self.full.raw.key, self.gap.raw.key, 1);
-            ptr::copy_nonoverlapping(self.full.raw.val, self.gap.raw.val, 1);
+            ptr::copy_nonoverlapping(self.full.raw.key, self.gap.raw.key as *mut K, 1);
+            ptr::copy_nonoverlapping(self.full.raw.val, self.gap.raw.val as *mut V, 1);
         }
 
         let FullBucket { raw: prev_raw, idx: prev_idx, .. } = self.full;
@@ -624,7 +651,7 @@ impl<K, V> RawTable<K, V> {
         let hashes_size = self.capacity * size_of::<u64>();
         let keys_size = self.capacity * size_of::<K>();
 
-        let buffer = *self.hashes as *mut u8;
+        let buffer = *self.hashes as *const u8;
         let (keys_offset, vals_offset, oflo) =
             calculate_offsets(hashes_size,
                               keys_size, align_of::<K>(),
@@ -633,8 +660,8 @@ impl<K, V> RawTable<K, V> {
         unsafe {
             RawBucket {
                 hash: *self.hashes,
-                key:  buffer.offset(keys_offset as isize) as *mut K,
-                val:  buffer.offset(vals_offset as isize) as *mut V,
+                key:  buffer.offset(keys_offset as isize) as *const K,
+                val:  buffer.offset(vals_offset as isize) as *const V,
                 _marker: marker::PhantomData,
             }
         }
@@ -682,6 +709,7 @@ impl<K, V> RawTable<K, V> {
         IterMut {
             iter: self.raw_buckets(),
             elems_left: self.size(),
+            _marker: marker::PhantomData,
         }
     }
 
@@ -833,6 +861,8 @@ impl<'a, K, V> Clone for Iter<'a, K, V> {
 pub struct IterMut<'a, K: 'a, V: 'a> {
     iter: RawBuckets<'a, K, V>,
     elems_left: usize,
+    // To ensure invariance with respect to V
+    _marker: marker::PhantomData<&'a mut V>,
 }
 
 unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {}
@@ -887,7 +917,7 @@ impl<'a, K, V> Iterator for IterMut<'a, K, V> {
             self.elems_left -= 1;
             unsafe {
                 (&*bucket.key,
-                 &mut *bucket.val)
+                 &mut *(bucket.val as *mut V))
             }
         })
     }
@@ -978,8 +1008,8 @@ impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
                                 (full.hash(), k.clone(), v.clone())
                             };
                             *new_buckets.raw.hash = h.inspect();
-                            ptr::write(new_buckets.raw.key, k);
-                            ptr::write(new_buckets.raw.val, v);
+                            ptr::write(new_buckets.raw.key as *mut K, k);
+                            ptr::write(new_buckets.raw.val as *mut V, v);
                         }
                         Empty(..) => {
                             *new_buckets.raw.hash = EMPTY_BUCKET;
index 06c14157606b2a40b7f2b9a96f6487cea445bd93..4de442fd3a19955a1b23894ca6ab43b39035e4e7 100644 (file)
@@ -442,13 +442,3 @@ pub mod hash_set {
     #[stable(feature = "rust1", since = "1.0.0")]
     pub use super::hash::set::*;
 }
-
-/// Experimental support for providing custom hash algorithms to a HashMap and
-/// HashSet.
-#[unstable(feature = "hashmap_hasher", reason = "module was recently added",
-           issue = "27713")]
-#[rustc_deprecated(since = "1.7.0", reason = "support moved to std::hash")]
-#[allow(deprecated)]
-pub mod hash_state {
-    pub use super::hash::state::*;
-}
diff --git a/src/libstd/dynamic_lib.rs b/src/libstd/dynamic_lib.rs
deleted file mode 100644 (file)
index 08e33fa..0000000
+++ /dev/null
@@ -1,398 +0,0 @@
-// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Dynamic library facilities.
-//!
-//! A simple wrapper over the platform's dynamic library facilities
-
-#![unstable(feature = "dynamic_lib",
-            reason = "API has not been scrutinized and is highly likely to \
-                      either disappear or change",
-            issue = "27810")]
-#![allow(missing_docs)]
-#![allow(deprecated)]
-
-use prelude::v1::*;
-
-use env;
-use ffi::{CString, OsString};
-use path::{Path, PathBuf};
-
-#[unstable(feature = "dynamic_lib",
-           reason = "API has not been scrutinized and is highly likely to \
-                     either disappear or change",
-           issue = "27810")]
-#[rustc_deprecated(since = "1.5.0", reason = "replaced with 'dylib' on crates.io")]
-pub struct DynamicLibrary {
-    handle: *mut u8
-}
-
-impl Drop for DynamicLibrary {
-    fn drop(&mut self) {
-        match dl::check_for_errors_in(|| {
-            unsafe {
-                dl::close(self.handle)
-            }
-        }) {
-            Ok(()) => {},
-            Err(str) => panic!("{}", str)
-        }
-    }
-}
-
-#[unstable(feature = "dynamic_lib",
-           reason = "API has not been scrutinized and is highly likely to \
-                     either disappear or change",
-           issue = "27810")]
-#[rustc_deprecated(since = "1.5.0", reason = "replaced with 'dylib' on crates.io")]
-impl DynamicLibrary {
-    /// Lazily open a dynamic library. When passed None it gives a
-    /// handle to the calling process
-    pub fn open(filename: Option<&Path>) -> Result<DynamicLibrary, String> {
-        let maybe_library = dl::open(filename.map(|path| path.as_os_str()));
-
-        // The dynamic library must not be constructed if there is
-        // an error opening the library so the destructor does not
-        // run.
-        match maybe_library {
-            Err(err) => Err(err),
-            Ok(handle) => Ok(DynamicLibrary { handle: handle })
-        }
-    }
-
-    /// Prepends a path to this process's search path for dynamic libraries
-    pub fn prepend_search_path(path: &Path) {
-        let mut search_path = DynamicLibrary::search_path();
-        search_path.insert(0, path.to_path_buf());
-        env::set_var(DynamicLibrary::envvar(), &DynamicLibrary::create_path(&search_path));
-    }
-
-    /// From a slice of paths, create a new vector which is suitable to be an
-    /// environment variable for this platforms dylib search path.
-    pub fn create_path(path: &[PathBuf]) -> OsString {
-        let mut newvar = OsString::new();
-        for (i, path) in path.iter().enumerate() {
-            if i > 0 { newvar.push(DynamicLibrary::separator()); }
-            newvar.push(path);
-        }
-        return newvar;
-    }
-
-    /// Returns the environment variable for this process's dynamic library
-    /// search path
-    pub fn envvar() -> &'static str {
-        if cfg!(windows) {
-            "PATH"
-        } else if cfg!(target_os = "macos") {
-            "DYLD_LIBRARY_PATH"
-        } else {
-            "LD_LIBRARY_PATH"
-        }
-    }
-
-    fn separator() -> &'static str {
-        if cfg!(windows) { ";" } else { ":" }
-    }
-
-    /// Returns the current search path for dynamic libraries being used by this
-    /// process
-    pub fn search_path() -> Vec<PathBuf> {
-        match env::var_os(DynamicLibrary::envvar()) {
-            Some(var) => env::split_paths(&var).collect(),
-            None => Vec::new(),
-        }
-    }
-
-    /// Accesses the value at the symbol of the dynamic library.
-    pub unsafe fn symbol<T>(&self, symbol: &str) -> Result<*mut T, String> {
-        // This function should have a lifetime constraint of 'a on
-        // T but that feature is still unimplemented
-
-        let raw_string = CString::new(symbol).unwrap();
-        let maybe_symbol_value = dl::check_for_errors_in(|| {
-            dl::symbol(self.handle, raw_string.as_ptr())
-        });
-
-        // The value must not be constructed if there is an error so
-        // the destructor does not run.
-        match maybe_symbol_value {
-            Err(err) => Err(err),
-            Ok(symbol_value) => Ok(symbol_value as *mut T)
-        }
-    }
-}
-
-#[cfg(all(test, not(target_os = "ios"), not(target_os = "nacl")))]
-mod tests {
-    use super::*;
-    use prelude::v1::*;
-    use libc;
-    use mem;
-
-    #[test]
-    #[cfg_attr(any(windows,
-                   target_os = "android",  // FIXME #10379
-                   target_env = "musl"), ignore)]
-    #[allow(deprecated)]
-    fn test_loading_cosine() {
-        // The math library does not need to be loaded since it is already
-        // statically linked in
-        let libm = match DynamicLibrary::open(None) {
-            Err(error) => panic!("Could not load self as module: {}", error),
-            Ok(libm) => libm
-        };
-
-        let cosine: extern fn(libc::c_double) -> libc::c_double = unsafe {
-            match libm.symbol("cos") {
-                Err(error) => panic!("Could not load function cos: {}", error),
-                Ok(cosine) => mem::transmute::<*mut u8, _>(cosine)
-            }
-        };
-
-        let argument = 0.0;
-        let expected_result = 1.0;
-        let result = cosine(argument);
-        if result != expected_result {
-            panic!("cos({}) != {} but equaled {} instead", argument,
-                   expected_result, result)
-        }
-    }
-
-    #[test]
-    #[cfg(any(target_os = "linux",
-              target_os = "macos",
-              target_os = "freebsd",
-              target_os = "dragonfly",
-              target_os = "bitrig",
-              target_os = "netbsd",
-              target_os = "openbsd",
-              target_os = "solaris"))]
-    #[allow(deprecated)]
-    fn test_errors_do_not_crash() {
-        use path::Path;
-
-        // Open /dev/null as a library to get an error, and make sure
-        // that only causes an error, and not a crash.
-        let path = Path::new("/dev/null");
-        match DynamicLibrary::open(Some(&path)) {
-            Err(_) => {}
-            Ok(_) => panic!("Successfully opened the empty library.")
-        }
-    }
-}
-
-#[cfg(any(target_os = "linux",
-          target_os = "android",
-          target_os = "macos",
-          target_os = "ios",
-          target_os = "freebsd",
-          target_os = "dragonfly",
-          target_os = "bitrig",
-          target_os = "netbsd",
-          target_os = "openbsd",
-          target_os = "solaris",
-          target_os = "emscripten"))]
-mod dl {
-    use prelude::v1::*;
-
-    use ffi::{CStr, OsStr};
-    use str;
-    use libc;
-    use ptr;
-
-    pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> {
-        check_for_errors_in(|| {
-            unsafe {
-                match filename {
-                    Some(filename) => open_external(filename),
-                    None => open_internal(),
-                }
-            }
-        })
-    }
-
-    const LAZY: libc::c_int = 1;
-
-    unsafe fn open_external(filename: &OsStr) -> *mut u8 {
-        let s = filename.to_cstring().unwrap();
-        libc::dlopen(s.as_ptr(), LAZY) as *mut u8
-    }
-
-    unsafe fn open_internal() -> *mut u8 {
-        libc::dlopen(ptr::null(), LAZY) as *mut u8
-    }
-
-    pub fn check_for_errors_in<T, F>(f: F) -> Result<T, String> where
-        F: FnOnce() -> T,
-    {
-        use sync::StaticMutex;
-        static LOCK: StaticMutex = StaticMutex::new();
-        unsafe {
-            // dlerror isn't thread safe, so we need to lock around this entire
-            // sequence
-            let _guard = LOCK.lock();
-            let _old_error = libc::dlerror();
-
-            let result = f();
-
-            let last_error = libc::dlerror() as *const _;
-            let ret = if ptr::null() == last_error {
-                Ok(result)
-            } else {
-                let s = CStr::from_ptr(last_error).to_bytes();
-                Err(str::from_utf8(s).unwrap().to_owned())
-            };
-
-            ret
-        }
-    }
-
-    pub unsafe fn symbol(handle: *mut u8,
-                         symbol: *const libc::c_char) -> *mut u8 {
-        libc::dlsym(handle as *mut libc::c_void, symbol) as *mut u8
-    }
-    pub unsafe fn close(handle: *mut u8) {
-        libc::dlclose(handle as *mut libc::c_void); ()
-    }
-}
-
-#[cfg(target_os = "windows")]
-mod dl {
-    use prelude::v1::*;
-
-    use ffi::OsStr;
-    use libc;
-    use os::windows::prelude::*;
-    use ptr;
-    use sys::c;
-    use sys::os;
-
-    pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> {
-        // disable "dll load failed" error dialog.
-        let mut use_thread_mode = true;
-        let prev_error_mode = unsafe {
-            // SEM_FAILCRITICALERRORS 0x01
-            let new_error_mode = 1;
-            let mut prev_error_mode = 0;
-            // Windows >= 7 supports thread error mode.
-            let result = c::SetThreadErrorMode(new_error_mode,
-                                               &mut prev_error_mode);
-            if result == 0 {
-                let err = os::errno();
-                if err == c::ERROR_CALL_NOT_IMPLEMENTED as i32 {
-                    use_thread_mode = false;
-                    // SetThreadErrorMode not found. use fallback solution:
-                    // SetErrorMode() Note that SetErrorMode is process-wide so
-                    // this can cause race condition!  However, since even
-                    // Windows APIs do not care of such problem (#20650), we
-                    // just assume SetErrorMode race is not a great deal.
-                    prev_error_mode = c::SetErrorMode(new_error_mode);
-                }
-            }
-            prev_error_mode
-        };
-
-        unsafe {
-            c::SetLastError(0);
-        }
-
-        let result = match filename {
-            Some(filename) => {
-                let filename_str: Vec<_> =
-                    filename.encode_wide().chain(Some(0)).collect();
-                let result = unsafe {
-                    c::LoadLibraryW(filename_str.as_ptr())
-                };
-                // beware: Vec/String may change errno during drop!
-                // so we get error here.
-                if result == ptr::null_mut() {
-                    let errno = os::errno();
-                    Err(os::error_string(errno))
-                } else {
-                    Ok(result as *mut u8)
-                }
-            }
-            None => {
-                let mut handle = ptr::null_mut();
-                let succeeded = unsafe {
-                    c::GetModuleHandleExW(0 as c::DWORD, ptr::null(),
-                                          &mut handle)
-                };
-                if succeeded == c::FALSE {
-                    let errno = os::errno();
-                    Err(os::error_string(errno))
-                } else {
-                    Ok(handle as *mut u8)
-                }
-            }
-        };
-
-        unsafe {
-            if use_thread_mode {
-                c::SetThreadErrorMode(prev_error_mode, ptr::null_mut());
-            } else {
-                c::SetErrorMode(prev_error_mode);
-            }
-        }
-
-        result
-    }
-
-    pub fn check_for_errors_in<T, F>(f: F) -> Result<T, String> where
-        F: FnOnce() -> T,
-    {
-        unsafe {
-            c::SetLastError(0);
-
-            let result = f();
-
-            let error = os::errno();
-            if 0 == error {
-                Ok(result)
-            } else {
-                Err(format!("Error code {}", error))
-            }
-        }
-    }
-
-    pub unsafe fn symbol(handle: *mut u8, symbol: *const libc::c_char) -> *mut u8 {
-        c::GetProcAddress(handle as c::HMODULE, symbol) as *mut u8
-    }
-    pub unsafe fn close(handle: *mut u8) {
-        c::FreeLibrary(handle as c::HMODULE);
-    }
-}
-
-#[cfg(target_os = "nacl")]
-pub mod dl {
-    use ffi::OsStr;
-    use ptr;
-    use result::Result;
-    use result::Result::Err;
-    use libc;
-    use string::String;
-    use ops::FnOnce;
-    use option::Option;
-
-    pub fn open(_filename: Option<&OsStr>) -> Result<*mut u8, String> {
-        Err(format!("NaCl + Newlib doesn't impl loading shared objects"))
-    }
-
-    pub fn check_for_errors_in<T, F>(_f: F) -> Result<T, String>
-        where F: FnOnce() -> T,
-    {
-        Err(format!("NaCl doesn't support shared objects"))
-    }
-
-    pub unsafe fn symbol(_handle: *mut u8, _symbol: *const libc::c_char) -> *mut u8 {
-        ptr::null_mut()
-    }
-    pub unsafe fn close(_handle: *mut u8) { }
-}
index 749e58c11962b238d035c511b0732db069a269d0..9dc6a26cdeed30869599b0a2c1af1769cdbbbb18 100644 (file)
@@ -181,7 +181,7 @@ fn _var(key: &OsStr) -> Result<String, VarError> {
 }
 
 /// Fetches the environment variable `key` from the current process, returning
-/// None if the variable isn't set.
+/// `None` if the variable isn't set.
 ///
 /// # Examples
 ///
@@ -416,7 +416,7 @@ impl Error for JoinPathsError {
     fn description(&self) -> &str { self.inner.description() }
 }
 
-/// Returns the path to the current user's home directory if known.
+/// Returns the path of the current user's home directory if known.
 ///
 /// # Unix
 ///
@@ -450,7 +450,7 @@ pub fn home_dir() -> Option<PathBuf> {
     os_imp::home_dir()
 }
 
-/// Returns the path to a temporary directory.
+/// Returns the path of a temporary directory.
 ///
 /// On Unix, returns the value of the 'TMPDIR' environment variable if it is
 /// set, otherwise for non-Android it returns '/tmp'. If Android, since there
@@ -459,7 +459,7 @@ pub fn home_dir() -> Option<PathBuf> {
 ///
 /// On Windows, returns the value of, in order, the 'TMP', 'TEMP',
 /// 'USERPROFILE' environment variable  if any are set and not the empty
-/// string. Otherwise, tmpdir returns the path to the Windows directory. This
+/// string. Otherwise, tmpdir returns the path of the Windows directory. This
 /// behavior is identical to that of [GetTempPath][msdn], which this function
 /// uses internally.
 ///
@@ -482,14 +482,14 @@ pub fn temp_dir() -> PathBuf {
     os_imp::temp_dir()
 }
 
-/// Returns the full filesystem path to the current running executable.
+/// Returns the full filesystem path of the current running executable.
 ///
-/// The path returned is not necessarily a "real path" to the executable as
+/// The path returned is not necessarily a "real path" of the executable as
 /// there may be intermediate symlinks.
 ///
 /// # Errors
 ///
-/// Acquiring the path to the current executable is a platform-specific operation
+/// Acquiring the path of the current executable is a platform-specific operation
 /// that can fail for a good number of reasons. Some errors can include, but not
 /// be limited to, filesystem operations failing or general syscall failures.
 ///
@@ -526,7 +526,7 @@ pub struct ArgsOs { inner: os_imp::Args }
 /// Returns the arguments which this program was started with (normally passed
 /// via the command line).
 ///
-/// The first element is traditionally the path to the executable, but it can be
+/// The first element is traditionally the path of the executable, but it can be
 /// set to arbitrary text, and may not even exist. This means this property should
 /// not be relied upon for security purposes.
 ///
@@ -554,7 +554,7 @@ pub fn args() -> Args {
 /// Returns the arguments which this program was started with (normally passed
 /// via the command line).
 ///
-/// The first element is traditionally the path to the executable, but it can be
+/// The first element is traditionally the path of the executable, but it can be
 /// set to arbitrary text, and it may not even exist, so this property should
 /// not be relied upon for security purposes.
 ///
@@ -617,7 +617,7 @@ pub mod consts {
     #[stable(feature = "env", since = "1.0.0")]
     pub const ARCH: &'static str = super::arch::ARCH;
 
-    /// The family of the operating system. In this case, `unix`.
+    /// The family of the operating system. Example value is `unix`.
     ///
     /// Some possible values:
     ///
@@ -626,8 +626,8 @@ pub mod consts {
     #[stable(feature = "env", since = "1.0.0")]
     pub const FAMILY: &'static str = super::os::FAMILY;
 
-    /// A string describing the specific operating system in use: in this
-    /// case, `linux`.
+    /// A string describing the specific operating system in use.
+    /// Example value is `linux`.
     ///
     /// Some possible values:
     ///
@@ -646,7 +646,7 @@ pub mod consts {
     pub const OS: &'static str = super::os::OS;
 
     /// Specifies the filename prefix used for shared libraries on this
-    /// platform: in this case, `lib`.
+    /// platform. Example value is `lib`.
     ///
     /// Some possible values:
     ///
@@ -656,7 +656,7 @@ pub mod consts {
     pub const DLL_PREFIX: &'static str = super::os::DLL_PREFIX;
 
     /// Specifies the filename suffix used for shared libraries on this
-    /// platform: in this case, `.so`.
+    /// platform. Example value is `.so`.
     ///
     /// Some possible values:
     ///
@@ -667,7 +667,7 @@ pub mod consts {
     pub const DLL_SUFFIX: &'static str = super::os::DLL_SUFFIX;
 
     /// Specifies the file extension used for shared libraries on this
-    /// platform that goes after the dot: in this case, `so`.
+    /// platform that goes after the dot. Example value is `so`.
     ///
     /// Some possible values:
     ///
@@ -678,7 +678,7 @@ pub mod consts {
     pub const DLL_EXTENSION: &'static str = super::os::DLL_EXTENSION;
 
     /// Specifies the filename suffix used for executable binaries on this
-    /// platform: in this case, the empty string.
+    /// platform. Example value is `.exe`.
     ///
     /// Some possible values:
     ///
@@ -690,7 +690,7 @@ pub mod consts {
     pub const EXE_SUFFIX: &'static str = super::os::EXE_SUFFIX;
 
     /// Specifies the file extension, if any, used for executable binaries
-    /// on this platform: in this case, the empty string.
+    /// on this platform. Example value is `exe`.
     ///
     /// Some possible values:
     ///
index 660948b0accac4c38808906cacf071d4ca6c1b7d..35cd4a5ec5292076fef8fc29d4d8dd2f5883199f 100644 (file)
 
 use any::TypeId;
 use boxed::Box;
-use convert::From;
+use char;
 use fmt::{self, Debug, Display};
 use marker::{Send, Sync, Reflect};
 use mem::transmute;
 use num;
-use option::Option::{self, Some, None};
-use result::Result::{self, Ok, Err};
 use raw::TraitObject;
 use str;
 use string::{self, String};
@@ -189,6 +187,13 @@ impl Error for string::ParseError {
     }
 }
 
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+impl Error for char::DecodeUtf16Error {
+    fn description(&self) -> &str {
+        "unpaired surrogate found"
+    }
+}
+
 #[stable(feature = "box_error", since = "1.7.0")]
 impl<T: Error> Error for Box<T> {
     fn description(&self) -> &str {
index 1db45764552392a494217ee3b9d010527b834e58..52d7bb128d5a21423af9fe26f080328521d67eb1 100644 (file)
@@ -318,9 +318,9 @@ impl From<CString> for Vec<u8> {
 #[stable(feature = "cstr_debug", since = "1.3.0")]
 impl fmt::Debug for CStr {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(f, "\""));
+        write!(f, "\"")?;
         for byte in self.to_bytes().iter().flat_map(|&b| ascii::escape_default(b)) {
-            try!(f.write_char(byte as char));
+            f.write_char(byte as char)?;
         }
         write!(f, "\"")
     }
index cf4f4bdf291bca273af64c878588d0c87e0ec973..0d29e62485abb8035fa3c7a404174790cc6d61c9 100644 (file)
@@ -9,7 +9,6 @@
 // except according to those terms.
 
 use borrow::{Borrow, Cow, ToOwned};
-use ffi::CString;
 use fmt::{self, Debug};
 use mem;
 use string::String;
@@ -22,7 +21,7 @@ use sys::os_str::{Buf, Slice};
 use sys_common::{AsInner, IntoInner, FromInner};
 
 /// A type that can represent owned, mutable platform-native strings, but is
-/// cheaply interconvertable with Rust strings.
+/// cheaply inter-convertible with Rust strings.
 ///
 /// The need for this type arises from the fact that:
 ///
@@ -56,22 +55,6 @@ impl OsString {
         OsString { inner: Buf::from_string(String::new()) }
     }
 
-    /// Constructs an `OsString` from a byte sequence.
-    ///
-    /// # Platform behavior
-    ///
-    /// On Unix systems, any byte sequence can be successfully
-    /// converted into an `OsString`.
-    ///
-    /// On Windows system, only UTF-8 byte sequences will successfully
-    /// convert; non UTF-8 data will produce `None`.
-    #[unstable(feature = "convert", reason = "recently added", issue = "27704")]
-    #[rustc_deprecated(reason = "RFC was closed, hides subtle Windows semantics",
-                       since = "1.6.0")]
-    pub fn from_bytes<B>(bytes: B) -> Option<OsString> where B: Into<Vec<u8>> {
-        Self::_from_bytes(bytes.into())
-    }
-
     #[cfg(unix)]
     fn _from_bytes(vec: Vec<u8>) -> Option<OsString> {
         use os::unix::ffi::OsStringExt;
@@ -103,13 +86,14 @@ impl OsString {
         self.inner.push_slice(&s.as_ref().inner)
     }
 
-    /// Creates a new `OsString` with the given capacity. The string will be
-    /// able to hold exactly `capacity` bytes without reallocating. If
-    /// `capacity` is 0, the string will not allocate.
+    /// Creates a new `OsString` with the given capacity.
+    ///
+    /// The string will be able to hold exactly `capacity` lenth units of other
+    /// OS strings without reallocating. If `capacity` is 0, the string will not
+    /// allocate.
     ///
     /// See main `OsString` documentation information about encoding.
-    #[unstable(feature = "osstring_simple_functions",
-               reason = "recently added", issue = "29453")]
+    #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
     pub fn with_capacity(capacity: usize) -> OsString {
         OsString {
             inner: Buf::with_capacity(capacity)
@@ -117,40 +101,36 @@ impl OsString {
     }
 
     /// Truncates the `OsString` to zero length.
-    #[unstable(feature = "osstring_simple_functions",
-               reason = "recently added", issue = "29453")]
+    #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
     pub fn clear(&mut self) {
         self.inner.clear()
     }
 
-    /// Returns the number of bytes this `OsString` can hold without
-    /// reallocating.
+    /// Returns the capacity this `OsString` can hold without reallocating.
     ///
     /// See `OsString` introduction for information about encoding.
-    #[unstable(feature = "osstring_simple_functions",
-               reason = "recently added", issue = "29453")]
+    #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
     pub fn capacity(&self) -> usize {
         self.inner.capacity()
     }
 
-    /// Reserves capacity for at least `additional` more bytes to be inserted
-    /// in the given `OsString`. The collection may reserve more space to avoid
-    /// frequent reallocations.
-    #[unstable(feature = "osstring_simple_functions",
-               reason = "recently added", issue = "29453")]
+    /// Reserves capacity for at least `additional` more capacity to be inserted
+    /// in the given `OsString`.
+    ///
+    /// The collection may reserve more space to avoid frequent reallocations.
+    #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
     pub fn reserve(&mut self, additional: usize) {
         self.inner.reserve(additional)
     }
 
-    /// Reserves the minimum capacity for exactly `additional` more bytes to be
-    /// inserted in the given `OsString`. Does nothing if the capacity is
+    /// Reserves the minimum capacity for exactly `additional` more capacity to
+    /// be inserted in the given `OsString`. Does nothing if the capacity is
     /// already sufficient.
     ///
     /// Note that the allocator may give the collection more space than it
     /// requests. Therefore capacity can not be relied upon to be precisely
     /// minimal. Prefer reserve if future insertions are expected.
-    #[unstable(feature = "osstring_simple_functions",
-               reason = "recently added", issue = "29453")]
+    #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
     pub fn reserve_exact(&mut self, additional: usize) {
         self.inner.reserve_exact(additional)
     }
@@ -190,6 +170,14 @@ impl ops::Deref for OsString {
     }
 }
 
+#[stable(feature = "osstring_default", since = "1.9.0")]
+impl Default for OsString {
+    #[inline]
+    fn default() -> OsString {
+        OsString::new()
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl Debug for OsString {
     fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
@@ -272,7 +260,7 @@ impl OsStr {
         unsafe { mem::transmute(inner) }
     }
 
-    /// Yields a `&str` slice if the `OsStr` is valid unicode.
+    /// Yields a `&str` slice if the `OsStr` is valid Unicode.
     ///
     /// This conversion may entail doing a check for UTF-8 validity.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -294,53 +282,21 @@ impl OsStr {
         OsString { inner: self.inner.to_owned() }
     }
 
-    /// Yields this `OsStr` as a byte slice.
-    ///
-    /// # Platform behavior
-    ///
-    /// On Unix systems, this is a no-op.
-    ///
-    /// On Windows systems, this returns `None` unless the `OsStr` is
-    /// valid unicode, in which case it produces UTF-8-encoded
-    /// data. This may entail checking validity.
-    #[unstable(feature = "convert", reason = "recently added", issue = "27704")]
-    #[rustc_deprecated(reason = "RFC was closed, hides subtle Windows semantics",
-                       since = "1.6.0")]
-    pub fn to_bytes(&self) -> Option<&[u8]> {
-        if cfg!(windows) {
-            self.to_str().map(|s| s.as_bytes())
-        } else {
-            Some(self.bytes())
-        }
-    }
-
-    /// Creates a `CString` containing this `OsStr` data.
-    ///
-    /// Fails if the `OsStr` contains interior nulls.
-    ///
-    /// This is a convenience for creating a `CString` from
-    /// `self.to_bytes()`, and inherits the platform behavior of the
-    /// `to_bytes` method.
-    #[unstable(feature = "convert", reason = "recently added", issue = "27704")]
-    #[rustc_deprecated(reason = "RFC was closed, hides subtle Windows semantics",
-                       since = "1.6.0")]
-    #[allow(deprecated)]
-    pub fn to_cstring(&self) -> Option<CString> {
-        self.to_bytes().and_then(|b| CString::new(b).ok())
-    }
-
     /// Checks whether the `OsStr` is empty.
-    #[unstable(feature = "osstring_simple_functions",
-               reason = "recently added", issue = "29453")]
+    #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
     pub fn is_empty(&self) -> bool {
         self.inner.inner.is_empty()
     }
 
-    /// Returns the number of bytes in this `OsStr`.
+    /// Returns the length of this `OsStr`.
+    ///
+    /// Note that this does **not** return the number of bytes in this string
+    /// as, for example, OS strings on Windows are encoded as a list of `u16`
+    /// rather than a list of bytes. This number is simply useful for passing to
+    /// other methods like `OsString::with_capacity` to avoid reallocations.
     ///
-    /// See `OsStr` introduction for information about encoding.
-    #[unstable(feature = "osstring_simple_functions",
-               reason = "recently added", issue = "29453")]
+    /// See `OsStr` introduction for more information about encoding.
+    #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
     pub fn len(&self) -> usize {
         self.inner.inner.len()
     }
@@ -354,6 +310,14 @@ impl OsStr {
     }
 }
 
+#[stable(feature = "osstring_default", since = "1.9.0")]
+impl<'a> Default for &'a OsStr {
+    #[inline]
+    fn default() -> &'a OsStr {
+        OsStr::new("")
+    }
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl PartialEq for OsStr {
     fn eq(&self, other: &OsStr) -> bool {
@@ -606,6 +570,12 @@ mod tests {
         assert!(os_string.capacity() >= 33)
     }
 
+    #[test]
+    fn test_os_string_default() {
+        let os_string: OsString = Default::default();
+        assert_eq!("", &os_string);
+    }
+
     #[test]
     fn test_os_str_is_empty() {
         let mut os_string = OsString::new();
@@ -629,4 +599,10 @@ mod tests {
         os_string.clear();
         assert_eq!(0, os_string.len());
     }
+
+    #[test]
+    fn test_os_str_default() {
+        let os_str: &OsStr = Default::default();
+        assert_eq!("", os_str);
+    }
 }
index badbba21d55cc9ddfbf7c55f266a7be4760550ea..f5a51e974272754541ac5813ed6c0ecec3cad521 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Filesystem manipulation operations
+//! Filesystem manipulation operations.
 //!
 //! This module contains basic methods to manipulate the contents of the local
 //! filesystem. All methods in this module represent cross-platform filesystem
@@ -22,7 +22,6 @@ use ffi::OsString;
 use io::{self, SeekFrom, Seek, Read, Write};
 use path::{Path, PathBuf};
 use sys::fs as fs_imp;
-use sys_common::io::read_to_end_uninitialized;
 use sys_common::{AsInnerMut, FromInner, AsInner, IntoInner};
 use vec::Vec;
 use time::SystemTime;
@@ -86,19 +85,6 @@ pub struct ReadDir(fs_imp::ReadDir);
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct DirEntry(fs_imp::DirEntry);
 
-/// An iterator that recursively walks over the contents of a directory.
-#[unstable(feature = "fs_walk",
-           reason = "the precise semantics and defaults for a recursive walk \
-                     may change and this may end up accounting for files such \
-                     as symlinks differently",
-           issue = "27707")]
-#[rustc_deprecated(reason = "superceded by the walkdir crate",
-                   since = "1.6.0")]
-pub struct WalkDir {
-    cur: Option<ReadDir>,
-    stack: Vec<io::Result<ReadDir>>,
-}
-
 /// Options and flags which can be used to configure how a file is opened.
 ///
 /// This builder exposes the ability to configure how a `File` is opened and
@@ -316,10 +302,10 @@ impl File {
     /// The returned `File` is a reference to the same state that this object
     /// references. Both handles will read and write with the same cursor
     /// position.
-    #[unstable(feature = "file_try_clone", reason = "newly added", issue = "31405")]
+    #[stable(feature = "file_try_clone", since = "1.9.0")]
     pub fn try_clone(&self) -> io::Result<File> {
         Ok(File {
-            inner: try!(self.inner.duplicate())
+            inner: self.inner.duplicate()?
         })
     }
 }
@@ -351,7 +337,7 @@ impl Read for File {
         self.inner.read(buf)
     }
     fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
-        unsafe { read_to_end_uninitialized(self, buf) }
+        self.inner.read_to_end(buf)
     }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -372,6 +358,9 @@ impl<'a> Read for &'a File {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
         self.inner.read(buf)
     }
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.inner.read_to_end(buf)
+    }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a> Write for &'a File {
@@ -534,16 +523,13 @@ impl OpenOptions {
     /// # Examples
     ///
     /// ```no_run
-    /// #![feature(expand_open_options)]
     /// use std::fs::OpenOptions;
     ///
     /// let file = OpenOptions::new().write(true)
     ///                              .create_new(true)
     ///                              .open("foo.txt");
     /// ```
-    #[unstable(feature = "expand_open_options",
-               reason = "recently added",
-               issue = "30014")]
+    #[stable(feature = "expand_open_options2", since = "1.9.0")]
     pub fn create_new(&mut self, create_new: bool) -> &mut OpenOptions {
         self.0.create_new(create_new); self
     }
@@ -576,7 +562,7 @@ impl OpenOptions {
     }
 
     fn _open(&self, path: &Path) -> io::Result<File> {
-        let inner = try!(fs_imp::File::open(path, &self.0));
+        let inner = fs_imp::File::open(path, &self.0)?;
         Ok(File { inner: inner })
     }
 }
@@ -1343,7 +1329,7 @@ pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> {
 /// use std::fs::{self, DirEntry};
 /// use std::path::Path;
 ///
-/// // one possible implementation of fs::walk_dir only visiting files
+/// // one possible implementation of walking a directory only visiting files
 /// fn visit_dirs(dir: &Path, cb: &Fn(&DirEntry)) -> io::Result<()> {
 ///     if try!(fs::metadata(dir)).is_dir() {
 ///         for entry in try!(fs::read_dir(dir)) {
@@ -1363,64 +1349,6 @@ pub fn read_dir<P: AsRef<Path>>(path: P) -> io::Result<ReadDir> {
     fs_imp::readdir(path.as_ref()).map(ReadDir)
 }
 
-/// Returns an iterator that will recursively walk the directory structure
-/// rooted at `path`.
-///
-/// The path given will not be iterated over, and this will perform iteration in
-/// some top-down order.  The contents of unreadable subdirectories are ignored.
-///
-/// The iterator will yield instances of `io::Result<DirEntry>`. New errors may
-/// be encountered after an iterator is initially constructed.
-#[unstable(feature = "fs_walk",
-           reason = "the precise semantics and defaults for a recursive walk \
-                     may change and this may end up accounting for files such \
-                     as symlinks differently",
-           issue = "27707")]
-#[rustc_deprecated(reason = "superceded by the walkdir crate",
-                   since = "1.6.0")]
-#[allow(deprecated)]
-pub fn walk_dir<P: AsRef<Path>>(path: P) -> io::Result<WalkDir> {
-    _walk_dir(path.as_ref())
-}
-
-#[allow(deprecated)]
-fn _walk_dir(path: &Path) -> io::Result<WalkDir> {
-    let start = try!(read_dir(path));
-    Ok(WalkDir { cur: Some(start), stack: Vec::new() })
-}
-
-#[unstable(feature = "fs_walk", issue = "27707")]
-#[rustc_deprecated(reason = "superceded by the walkdir crate",
-                   since = "1.6.0")]
-#[allow(deprecated)]
-impl Iterator for WalkDir {
-    type Item = io::Result<DirEntry>;
-
-    fn next(&mut self) -> Option<io::Result<DirEntry>> {
-        loop {
-            if let Some(ref mut cur) = self.cur {
-                match cur.next() {
-                    Some(Err(e)) => return Some(Err(e)),
-                    Some(Ok(next)) => {
-                        let path = next.path();
-                        if path.is_dir() {
-                            self.stack.push(read_dir(&*path));
-                        }
-                        return Some(Ok(next))
-                    }
-                    None => {}
-                }
-            }
-            self.cur = None;
-            match self.stack.pop() {
-                Some(Err(e)) => return Some(Err(e)),
-                Some(Ok(next)) => self.cur = Some(next),
-                None => return None,
-            }
-        }
-    }
-}
-
 /// Changes the permissions found on a file or a directory.
 ///
 /// # Platform-specific behavior
@@ -1509,7 +1437,7 @@ impl DirBuilder {
     fn create_dir_all(&self, path: &Path) -> io::Result<()> {
         if path == Path::new("") || path.is_dir() { return Ok(()) }
         if let Some(p) = path.parent() {
-            try!(self.create_dir_all(p))
+            self.create_dir_all(p)?
         }
         self.inner.mkdir(path)
     }
@@ -1526,12 +1454,12 @@ mod tests {
     use prelude::v1::*;
     use io::prelude::*;
 
-    use env;
     use fs::{self, File, OpenOptions};
     use io::{ErrorKind, SeekFrom};
-    use path::{Path, PathBuf};
-    use rand::{self, StdRng, Rng};
+    use path::Path;
+    use rand::{StdRng, Rng};
     use str;
+    use sys_common::io::test::{TempDir, tmpdir};
 
     #[cfg(windows)]
     use os::windows::fs::{symlink_dir, symlink_file};
@@ -1559,37 +1487,6 @@ mod tests {
         }
     ) }
 
-    pub struct TempDir(PathBuf);
-
-    impl TempDir {
-        fn join(&self, path: &str) -> PathBuf {
-            let TempDir(ref p) = *self;
-            p.join(path)
-        }
-
-        fn path<'a>(&'a self) -> &'a Path {
-            let TempDir(ref p) = *self;
-            p
-        }
-    }
-
-    impl Drop for TempDir {
-        fn drop(&mut self) {
-            // Gee, seeing how we're testing the fs module I sure hope that we
-            // at least implement this correctly!
-            let TempDir(ref p) = *self;
-            check!(fs::remove_dir_all(p));
-        }
-    }
-
-    pub fn tmpdir() -> TempDir {
-        let p = env::temp_dir();
-        let mut r = rand::thread_rng();
-        let ret = p.join(&format!("rust-{}", r.next_u32()));
-        check!(fs::create_dir(&ret));
-        TempDir(ret)
-    }
-
     // Several test fail on windows if the user does not have permission to
     // create symlinks (the `SeCreateSymbolicLinkPrivilege`). Instead of
     // disabling these test on Windows, use this function to test whether we
@@ -1808,7 +1705,7 @@ mod tests {
         let tmpdir = tmpdir();
         let dir = &tmpdir.join("fileinfo_false_on_dir");
         check!(fs::create_dir(dir));
-        assert!(dir.is_file() == false);
+        assert!(!dir.is_file());
         check!(fs::remove_dir(dir));
     }
 
@@ -1863,35 +1760,6 @@ mod tests {
         check!(fs::remove_dir(dir));
     }
 
-    #[test]
-    #[allow(deprecated)]
-    fn file_test_walk_dir() {
-        let tmpdir = tmpdir();
-        let dir = &tmpdir.join("walk_dir");
-        check!(fs::create_dir(dir));
-
-        let dir1 = &dir.join("01/02/03");
-        check!(fs::create_dir_all(dir1));
-        check!(File::create(&dir1.join("04")));
-
-        let dir2 = &dir.join("11/12/13");
-        check!(fs::create_dir_all(dir2));
-        check!(File::create(&dir2.join("14")));
-
-        let files = check!(fs::walk_dir(dir));
-        let mut cur = [0; 2];
-        for f in files {
-            let f = f.unwrap().path();
-            let stem = f.file_stem().unwrap().to_str().unwrap();
-            let root = stem.as_bytes()[0] - b'0';
-            let name = stem.as_bytes()[1] - b'0';
-            assert!(cur[root as usize] < name);
-            cur[root as usize] = name;
-        }
-
-        check!(fs::remove_dir_all(dir));
-    }
-
     #[test]
     fn mkdir_path_already_exists_error() {
         let tmpdir = tmpdir();
index ccebf3682c217c2683dc2a6450566d59915cc531..632ef3db80451b5ad65e570cfb3e42583dd3204e 100644 (file)
@@ -172,8 +172,8 @@ impl<R: Read> Read for BufReader<R> {
             return self.inner.read(buf);
         }
         let nread = {
-            let mut rem = try!(self.fill_buf());
-            try!(rem.read(buf))
+            let mut rem = self.fill_buf()?;
+            rem.read(buf)?
         };
         self.consume(nread);
         Ok(nread)
@@ -186,7 +186,7 @@ impl<R: Read> BufRead for BufReader<R> {
         // If we've reached the end of our internal buffer then we need to fetch
         // some more data from the underlying reader.
         if self.pos == self.cap {
-            self.cap = try!(self.inner.read(&mut self.buf));
+            self.cap = self.inner.read(&mut self.buf)?;
             self.pos = 0;
         }
         Ok(&self.buf[self.pos..self.cap])
@@ -237,16 +237,16 @@ impl<R: Seek> Seek for BufReader<R> {
             // support seeking by i64::min_value() so we need to handle underflow when subtracting
             // remainder.
             if let Some(offset) = n.checked_sub(remainder) {
-                result = try!(self.inner.seek(SeekFrom::Current(offset)));
+                result = self.inner.seek(SeekFrom::Current(offset))?;
             } else {
                 // seek backwards by our remainder, and then by the offset
-                try!(self.inner.seek(SeekFrom::Current(-remainder)));
+                self.inner.seek(SeekFrom::Current(-remainder))?;
                 self.pos = self.cap; // empty the buffer
-                result = try!(self.inner.seek(SeekFrom::Current(n)));
+                result = self.inner.seek(SeekFrom::Current(n))?;
             }
         } else {
             // Seeking with Start/End doesn't care about our buffer length.
-            result = try!(self.inner.seek(pos));
+            result = self.inner.seek(pos)?;
         }
         self.pos = self.cap; // empty the buffer
         Ok(result)
@@ -461,7 +461,7 @@ impl<W: Write> BufWriter<W> {
 impl<W: Write> Write for BufWriter<W> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
         if self.buf.len() + buf.len() > self.buf.capacity() {
-            try!(self.flush_buf());
+            self.flush_buf()?;
         }
         if buf.len() >= self.buf.capacity() {
             self.panicked = true;
@@ -761,9 +761,11 @@ impl<W: Write> Write for LineWriter<W> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
         match memchr::memrchr(b'\n', buf) {
             Some(i) => {
-                let n = try!(self.inner.write(&buf[..i + 1]));
-                if n != i + 1 { return Ok(n) }
-                try!(self.inner.flush());
+                let n = self.inner.write(&buf[..i + 1])?;
+                if n != i + 1 || self.inner.flush().is_err() {
+                    // Do not return errors on partial writes.
+                    return Ok(n);
+                }
                 self.inner.write(&buf[i + 1..]).map(|i| n + i)
             }
             None => self.inner.write(buf),
@@ -982,6 +984,34 @@ mod tests {
         assert_eq!(v, []);
     }
 
+    #[test]
+    fn test_line_buffer_fail_flush() {
+        // Issue #32085
+        struct FailFlushWriter<'a>(&'a mut Vec<u8>);
+
+        impl<'a> Write for FailFlushWriter<'a> {
+            fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+                self.0.extend_from_slice(buf);
+                Ok(buf.len())
+            }
+            fn flush(&mut self) -> io::Result<()> {
+                Err(io::Error::new(io::ErrorKind::Other, "flush failed"))
+            }
+        }
+
+        let mut buf = Vec::new();
+        {
+            let mut writer = LineWriter::new(FailFlushWriter(&mut buf));
+            let to_write = b"abc\ndef";
+            if let Ok(written) = writer.write(to_write) {
+                assert!(written < to_write.len(), "didn't flush on new line");
+                // PASS
+                return;
+            }
+        }
+        assert!(buf.is_empty(), "write returned an error but wrote data");
+    }
+
     #[test]
     fn test_line_buffer() {
         let mut writer = LineWriter::new(Vec::new());
index 420fede34d2f835cd4dbca5daeffa42e93c48ebe..a1002fdb645fcb6119b9ff0d9b845a2ff3ab52fd 100644 (file)
@@ -213,7 +213,7 @@ impl<T> io::Seek for Cursor<T> where T: AsRef<[u8]> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T> Read for Cursor<T> where T: AsRef<[u8]> {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
-        let n = try!(Read::read(&mut try!(self.fill_buf()), buf));
+        let n = Read::read(&mut self.fill_buf()?, buf)?;
         self.pos += n as u64;
         Ok(n)
     }
@@ -232,7 +232,7 @@ impl<T> BufRead for Cursor<T> where T: AsRef<[u8]> {
 impl<'a> Write for Cursor<&'a mut [u8]> {
     fn write(&mut self, data: &[u8]) -> io::Result<usize> {
         let pos = cmp::min(self.pos, self.inner.len() as u64);
-        let amt = try!((&mut self.inner[(pos as usize)..]).write(data));
+        let amt = (&mut self.inner[(pos as usize)..]).write(data)?;
         self.pos += amt as u64;
         Ok(amt)
     }
@@ -271,7 +271,7 @@ impl Write for Cursor<Vec<u8>> {
 impl Write for Cursor<Box<[u8]>> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
         let pos = cmp::min(self.pos, self.inner.len() as u64);
-        let amt = try!((&mut self.inner[(pos as usize)..]).write(buf));
+        let amt = (&mut self.inner[(pos as usize)..]).write(buf)?;
         self.pos += amt as u64;
         Ok(amt)
     }
index e3f17c839f135be9c4ef4c26848f9bde3d89a071..9a605fc7bbff48c1080c50948a588c883b09451e 100644 (file)
@@ -150,12 +150,6 @@ pub enum ErrorKind {
     #[stable(feature = "rust1", since = "1.0.0")]
     Other,
 
-    #[allow(missing_docs)]
-    #[unstable(feature = "read_exact_old", reason = "recently added",
-               issue = "0")]
-    #[rustc_deprecated(since = "1.6.0", reason = "renamed to UnexpectedEof")]
-    UnexpectedEOF,
-
     /// An error returned when an operation could not be completed because an
     /// "end of file" was reached prematurely.
     ///
@@ -311,7 +305,6 @@ impl fmt::Display for Error {
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl error::Error for Error {
-    #[allow(deprecated)] // remove with UnexpectedEOF
     fn description(&self) -> &str {
         match self.repr {
             Repr::Os(..) => match self.kind() {
@@ -332,7 +325,6 @@ impl error::Error for Error {
                 ErrorKind::WriteZero => "write zero",
                 ErrorKind::Interrupted => "operation interrupted",
                 ErrorKind::Other => "other os error",
-                ErrorKind::UnexpectedEOF => "unexpected end of file",
                 ErrorKind::UnexpectedEof => "unexpected end of file",
                 ErrorKind::__Nonexhaustive => unreachable!()
             },
index ec63f14d4533217a21a1bd6ca28af70c8a278bf4..317993815630b93fa698649c422d7ffa4eed0093 100644 (file)
@@ -196,7 +196,7 @@ impl<'a> Write for &'a mut [u8] {
 
     #[inline]
     fn write_all(&mut self, data: &[u8]) -> io::Result<()> {
-        if try!(self.write(data)) == data.len() {
+        if self.write(data)? == data.len() {
             Ok(())
         } else {
             Err(Error::new(ErrorKind::WriteZero, "failed to write whole buffer"))
index 61334f309249acc4a902d7de4092e67672448a87..6dd7273c17fe7b614cebcc67cc4cac7c68e0468f 100644 (file)
@@ -292,7 +292,7 @@ mod lazy;
 mod util;
 mod stdio;
 
-const DEFAULT_BUF_SIZE: usize = 64 * 1024;
+const DEFAULT_BUF_SIZE: usize = 8 * 1024;
 
 // A few methods below (read_to_string, read_line) will append data into a
 // `String` buffer, but we need to be pretty careful when doing this. The
@@ -811,49 +811,6 @@ pub trait Read {
     fn take(self, limit: u64) -> Take<Self> where Self: Sized {
         Take { inner: self, limit: limit }
     }
-
-    /// Creates a reader adaptor which will write all read data into the given
-    /// output stream.
-    ///
-    /// Whenever the returned `Read` instance is read it will write the read
-    /// data to `out`. The current semantics of this implementation imply that
-    /// a `write` error will not report how much data was initially read.
-    ///
-    /// # Examples
-    ///
-    /// [`File`][file]s implement `Read`:
-    ///
-    /// [file]: ../fs/struct.File.html
-    ///
-    /// ```
-    /// #![feature(io)]
-    /// use std::io;
-    /// use std::io::prelude::*;
-    /// use std::fs::File;
-    ///
-    /// # fn foo() -> io::Result<()> {
-    /// let mut f = try!(File::open("foo.txt"));
-    /// let mut buffer1 = Vec::with_capacity(10);
-    /// let mut buffer2 = Vec::with_capacity(10);
-    ///
-    /// // write the output to buffer1 as we read
-    /// let mut handle = f.tee(&mut buffer1);
-    ///
-    /// try!(handle.read(&mut buffer2));
-    /// # Ok(())
-    /// # }
-    /// ```
-    #[unstable(feature = "io", reason = "the semantics of a partial read/write \
-                                         of where errors happen is currently \
-                                         unclear and may change",
-               issue = "27802")]
-    #[rustc_deprecated(reason = "error handling semantics unclear and \
-                                 don't seem to have an ergonomic resolution",
-                       since = "1.6.0")]
-    #[allow(deprecated)]
-    fn tee<W: Write>(self, out: W) -> Tee<Self, W> where Self: Sized {
-        Tee { reader: self, writer: out }
-    }
 }
 
 /// A trait for objects which are byte-oriented sinks.
@@ -1089,47 +1046,6 @@ pub trait Write {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     fn by_ref(&mut self) -> &mut Self where Self: Sized { self }
-
-    /// Creates a new writer which will write all data to both this writer and
-    /// another writer.
-    ///
-    /// All data written to the returned writer will both be written to `self`
-    /// as well as `other`. Note that the error semantics of the current
-    /// implementation do not precisely track where errors happen. For example
-    /// an error on the second call to `write` will not report that the first
-    /// call to `write` succeeded.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// #![feature(io)]
-    /// use std::io::prelude::*;
-    /// use std::fs::File;
-    ///
-    /// # fn foo() -> std::io::Result<()> {
-    /// let mut buffer1 = try!(File::create("foo.txt"));
-    /// let mut buffer2 = Vec::new();
-    ///
-    /// // write the output to buffer1 as we read
-    /// let mut handle = buffer1.broadcast(&mut buffer2);
-    ///
-    /// try!(handle.write(b"some bytes"));
-    /// # Ok(())
-    /// # }
-    /// ```
-    #[unstable(feature = "io", reason = "the semantics of a partial read/write \
-                                         of where errors happen is currently \
-                                         unclear and may change",
-               issue = "27802")]
-    #[rustc_deprecated(reason = "error handling semantics unclear and \
-                                 don't seem to have an ergonomic resolution",
-                       since = "1.6.0")]
-    #[allow(deprecated)]
-    fn broadcast<W: Write>(self, other: W) -> Broadcast<Self, W>
-        where Self: Sized
-    {
-        Broadcast { first: self, second: other }
-    }
 }
 
 /// The `Seek` trait provides a cursor which can be moved within a stream of
@@ -1338,7 +1254,7 @@ pub trait BufRead: Read {
     /// longer be returned. As such, this function may do odd things if
     /// `fill_buf` isn't called before calling it.
     ///
-    /// [fillbuf]: #tymethod.fill_buff
+    /// [fillbuf]: #tymethod.fill_buf
     ///
     /// The `amt` must be `<=` the number of bytes in the buffer returned by
     /// `fill_buf`.
@@ -1500,41 +1416,6 @@ pub trait BufRead: Read {
     }
 }
 
-/// A `Write` adaptor which will write data to multiple locations.
-///
-/// This struct is generally created by calling [`broadcast()`][broadcast] on a
-/// writer. Please see the documentation of `broadcast()` for more details.
-///
-/// [broadcast]: trait.Write.html#method.broadcast
-#[unstable(feature = "io", reason = "awaiting stability of Write::broadcast",
-           issue = "27802")]
-#[rustc_deprecated(reason = "error handling semantics unclear and \
-                             don't seem to have an ergonomic resolution",
-                   since = "1.6.0")]
-pub struct Broadcast<T, U> {
-    first: T,
-    second: U,
-}
-
-#[unstable(feature = "io", reason = "awaiting stability of Write::broadcast",
-           issue = "27802")]
-#[rustc_deprecated(reason = "error handling semantics unclear and \
-                             don't seem to have an ergonomic resolution",
-                   since = "1.6.0")]
-#[allow(deprecated)]
-impl<T: Write, U: Write> Write for Broadcast<T, U> {
-    fn write(&mut self, data: &[u8]) -> Result<usize> {
-        let n = try!(self.first.write(data));
-        // FIXME: what if the write fails? (we wrote something)
-        try!(self.second.write_all(&data[..n]));
-        Ok(n)
-    }
-
-    fn flush(&mut self) -> Result<()> {
-        self.first.flush().and(self.second.flush())
-    }
-}
-
 /// Adaptor to chain together two readers.
 ///
 /// This struct is generally created by calling [`chain()`][chain] on a reader.
@@ -1552,7 +1433,7 @@ pub struct Chain<T, U> {
 impl<T: Read, U: Read> Read for Chain<T, U> {
     fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
         if !self.done_first {
-            match try!(self.first.read(buf)) {
+            match self.first.read(buf)? {
                 0 => { self.done_first = true; }
                 n => return Ok(n),
             }
@@ -1561,6 +1442,27 @@ impl<T: Read, U: Read> Read for Chain<T, U> {
     }
 }
 
+#[stable(feature = "chain_bufread", since = "1.9.0")]
+impl<T: BufRead, U: BufRead> BufRead for Chain<T, U> {
+    fn fill_buf(&mut self) -> Result<&[u8]> {
+        if !self.done_first {
+            match self.first.fill_buf()? {
+                buf if buf.len() == 0 => { self.done_first = true; }
+                buf => return Ok(buf),
+            }
+        }
+        self.second.fill_buf()
+    }
+
+    fn consume(&mut self, amt: usize) {
+        if !self.done_first {
+            self.first.consume(amt)
+        } else {
+            self.second.consume(amt)
+        }
+    }
+}
+
 /// Reader adaptor which limits the bytes read from an underlying reader.
 ///
 /// This struct is generally created by calling [`take()`][take] on a reader.
@@ -1594,7 +1496,7 @@ impl<T: Read> Read for Take<T> {
         }
 
         let max = cmp::min(buf.len() as u64, self.limit) as usize;
-        let n = try!(self.inner.read(&mut buf[..max]));
+        let n = self.inner.read(&mut buf[..max])?;
         self.limit -= n as u64;
         Ok(n)
     }
@@ -1603,7 +1505,7 @@ impl<T: Read> Read for Take<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: BufRead> BufRead for Take<T> {
     fn fill_buf(&mut self) -> Result<&[u8]> {
-        let buf = try!(self.inner.fill_buf());
+        let buf = self.inner.fill_buf()?;
         let cap = cmp::min(buf.len() as u64, self.limit) as usize;
         Ok(&buf[..cap])
     }
@@ -1616,37 +1518,6 @@ impl<T: BufRead> BufRead for Take<T> {
     }
 }
 
-/// An adaptor which will emit all read data to a specified writer as well.
-///
-/// This struct is generally created by calling [`tee()`][tee] on a reader.
-/// Please see the documentation of `tee()` for more details.
-///
-/// [tee]: trait.Read.html#method.tee
-#[unstable(feature = "io", reason = "awaiting stability of Read::tee",
-           issue = "27802")]
-#[rustc_deprecated(reason = "error handling semantics unclear and \
-                             don't seem to have an ergonomic resolution",
-                   since = "1.6.0")]
-pub struct Tee<R, W> {
-    reader: R,
-    writer: W,
-}
-
-#[unstable(feature = "io", reason = "awaiting stability of Read::tee",
-           issue = "27802")]
-#[rustc_deprecated(reason = "error handling semantics unclear and \
-                             don't seem to have an ergonomic resolution",
-                   since = "1.6.0")]
-#[allow(deprecated)]
-impl<R: Read, W: Write> Read for Tee<R, W> {
-    fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
-        let n = try!(self.reader.read(buf));
-        // FIXME: what if the write fails? (we read something)
-        try!(self.writer.write_all(&buf[..n]));
-        Ok(n)
-    }
-}
-
 /// An iterator over `u8` values of a reader.
 ///
 /// This struct is generally created by calling [`bytes()`][bytes] on a reader.
@@ -1725,7 +1596,7 @@ impl<R: Read> Iterator for Chars<R> {
             }
         }
         Some(match str::from_utf8(&buf[..width]).ok() {
-            Some(s) => Ok(s.char_at(0)),
+            Some(s) => Ok(s.chars().next().unwrap()),
             None => Err(CharsError::NotUtf8),
         })
     }
@@ -1994,6 +1865,39 @@ mod tests {
         assert_eq!(0, R.take(0).read(&mut buf).unwrap());
     }
 
+    fn cmp_bufread<Br1: BufRead, Br2: BufRead>(mut br1: Br1, mut br2: Br2, exp: &[u8]) {
+        let mut cat = Vec::new();
+        loop {
+            let consume = {
+                let buf1 = br1.fill_buf().unwrap();
+                let buf2 = br2.fill_buf().unwrap();
+                let minlen = if buf1.len() < buf2.len() { buf1.len() } else { buf2.len() };
+                assert_eq!(buf1[..minlen], buf2[..minlen]);
+                cat.extend_from_slice(&buf1[..minlen]);
+                minlen
+            };
+            if consume == 0 {
+                break;
+            }
+            br1.consume(consume);
+            br2.consume(consume);
+        }
+        assert_eq!(br1.fill_buf().unwrap().len(), 0);
+        assert_eq!(br2.fill_buf().unwrap().len(), 0);
+        assert_eq!(&cat[..], &exp[..])
+    }
+
+    #[test]
+    fn chain_bufread() {
+        let testdata = b"ABCDEFGHIJKL";
+        let chain1 = (&testdata[..3]).chain(&testdata[3..6])
+                                     .chain(&testdata[6..9])
+                                     .chain(&testdata[9..]);
+        let chain2 = (&testdata[..4]).chain(&testdata[4..8])
+                                     .chain(&testdata[8..]);
+        cmp_bufread(chain1, chain2, &testdata[..]);
+    }
+
     #[bench]
     fn bench_read_to_end(b: &mut test::Bencher) {
         b.iter(|| {
index cd2d5e52462bb91b8e63d4bd02e74411ad0ec874..c4b573db5f2dd41b44f4179cba30e5680ed88cfe 100644 (file)
@@ -12,13 +12,11 @@ use prelude::v1::*;
 use io::prelude::*;
 
 use cell::{RefCell, BorrowState};
-use cmp;
 use fmt;
 use io::lazy::Lazy;
 use io::{self, BufReader, LineWriter};
 use sync::{Arc, Mutex, MutexGuard};
 use sys::stdio;
-use sys_common::io::{read_to_end_uninitialized};
 use sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
 use thread::LocalKeyState;
 
@@ -78,6 +76,9 @@ fn stderr_raw() -> io::Result<StderrRaw> { stdio::Stderr::new().map(StderrRaw) }
 
 impl Read for StdinRaw {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.0.read_to_end(buf)
+    }
 }
 impl Write for StdoutRaw {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
@@ -116,6 +117,12 @@ impl<R: io::Read> io::Read for Maybe<R> {
             Maybe::Fake => Ok(0)
         }
     }
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        match *self {
+            Maybe::Real(ref mut r) => handle_ebadf(r.read_to_end(buf), 0),
+            Maybe::Fake => Ok(0)
+        }
+    }
 }
 
 fn handle_ebadf<T>(r: io::Result<T>, default: T) -> io::Result<T> {
@@ -134,8 +141,8 @@ fn handle_ebadf<T>(r: io::Result<T>, default: T) -> io::Result<T> {
 ///
 /// Each handle is a shared reference to a global buffer of input data to this
 /// process. A handle can be `lock`'d to gain full access to [`BufRead`] methods
-/// (e.g. `.lines()`). Writes to this handle are otherwise locked with respect
-/// to other writes.
+/// (e.g. `.lines()`). Reads to this handle are otherwise locked with respect
+/// to other reads.
 ///
 /// This handle implements the `Read` trait, but beware that concurrent reads
 /// of `Stdin` must be executed with care.
@@ -294,7 +301,7 @@ impl<'a> Read for StdinLock<'a> {
         self.inner.read(buf)
     }
     fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
-        unsafe { read_to_end_uninitialized(self, buf) }
+        self.inner.read_to_end(buf)
     }
 }
 
@@ -304,22 +311,6 @@ impl<'a> BufRead for StdinLock<'a> {
     fn consume(&mut self, n: usize) { self.inner.consume(n) }
 }
 
-// As with stdin on windows, stdout often can't handle writes of large
-// sizes. For an example, see #14940. For this reason, don't try to
-// write the entire output buffer on windows. On unix we can just
-// write the whole buffer all at once.
-//
-// For some other references, it appears that this problem has been
-// encountered by others [1] [2]. We choose the number 8KB just because
-// libuv does the same.
-//
-// [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
-// [2]: http://www.mail-archive.com/log4net-dev@logging.apache.org/msg00661.html
-#[cfg(windows)]
-const OUT_MAX: usize = 8192;
-#[cfg(unix)]
-const OUT_MAX: usize = ::usize::MAX;
-
 /// A handle to the global standard output stream of the current process.
 ///
 /// Each handle shares a global buffer of data to be written to the standard
@@ -432,7 +423,7 @@ impl Write for Stdout {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a> Write for StdoutLock<'a> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
-        self.inner.borrow_mut().write(&buf[..cmp::min(buf.len(), OUT_MAX)])
+        self.inner.borrow_mut().write(buf)
     }
     fn flush(&mut self) -> io::Result<()> {
         self.inner.borrow_mut().flush()
@@ -538,7 +529,7 @@ impl Write for Stderr {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<'a> Write for StderrLock<'a> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
-        self.inner.borrow_mut().write(&buf[..cmp::min(buf.len(), OUT_MAX)])
+        self.inner.borrow_mut().write(buf)
     }
     fn flush(&mut self) -> io::Result<()> {
         self.inner.borrow_mut().flush()
index e05a0d577ff895b6c366c5c7f8d577c7e56d8e8b..2815c0163d68a86e448dc50f459b0c00663c4e21 100644 (file)
@@ -55,7 +55,7 @@ pub fn copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> io::Result<
             Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
             Err(e) => return Err(e),
         };
-        try!(writer.write_all(&buf[..len]));
+        writer.write_all(&buf[..len])?;
         written += len as u64;
     }
 }
@@ -197,31 +197,4 @@ mod tests {
         assert_eq!(repeat(4).take(100).bytes().next().unwrap().unwrap(), 4);
         assert_eq!(repeat(1).take(10).chain(repeat(2).take(10)).bytes().count(), 20);
     }
-
-    #[test]
-    #[allow(deprecated)]
-    fn tee() {
-        let mut buf = [0; 10];
-        {
-            let mut ptr: &mut [u8] = &mut buf;
-            assert_eq!(repeat(4).tee(&mut ptr).take(5).read(&mut [0; 10]).unwrap(), 5);
-        }
-        assert_eq!(buf, [4, 4, 4, 4, 4, 0, 0, 0, 0, 0]);
-    }
-
-    #[test]
-    #[allow(deprecated)]
-    fn broadcast() {
-        let mut buf1 = [0; 10];
-        let mut buf2 = [0; 10];
-        {
-            let mut ptr1: &mut [u8] = &mut buf1;
-            let mut ptr2: &mut [u8] = &mut buf2;
-
-            assert_eq!((&mut ptr1).broadcast(&mut ptr2)
-                                  .write(&[1, 2, 3]).unwrap(), 3);
-        }
-        assert_eq!(buf1, buf2);
-        assert_eq!(buf1, [1, 2, 3, 0, 0, 0, 0, 0, 0, 0]);
-    }
 }
index e062f9040afc440b3ff4871e1364e5d19af39f71..e14a31453d381601f4ccf4e860f8fc4c95bcadef 100644 (file)
 //! [`thread`]: thread/index.html
 //! [`use std::env`]: env/index.html
 //! [`use`]: ../book/crates-and-modules.html#importing-modules-with-use
-//! [crate root]: ../book/crates-and-modules.html#basic-terminology:-crates-and-modules
+//! [crate root]: ../book/crates-and-modules.html#basic-terminology-crates-and-modules
 //! [crates.io]: https://crates.io
 //! [deref coercions]: ../book/deref-coercions.html
 //! [files]: fs/struct.File.html
 #![feature(collections)]
 #![feature(collections_bound)]
 #![feature(const_fn)]
-#![feature(copy_from_slice)]
 #![feature(core_float)]
 #![feature(core_intrinsics)]
-#![feature(decode_utf16)]
 #![feature(dropck_parametricity)]
 #![feature(float_extras)]
 #![feature(float_from_str_radix)]
 #![feature(fnbox)]
+#![feature(fn_traits)]
 #![feature(heap_api)]
 #![feature(hashmap_hasher)]
+#![feature(inclusive_range)]
 #![feature(int_error_internals)]
 #![feature(into_cow)]
 #![feature(lang_items)]
 #![feature(link_args)]
 #![feature(linkage)]
 #![feature(macro_reexport)]
+#![cfg_attr(test, feature(map_values_mut))]
 #![feature(num_bits_bytes)]
 #![feature(old_wrapping)]
 #![feature(on_unimplemented)]
 #![feature(optin_builtin_traits)]
 #![feature(placement_in_syntax)]
 #![feature(rand)]
-#![feature(range_inclusive)]
 #![feature(raw)]
 #![feature(repr_simd)]
 #![feature(reflect_marker)]
+#![feature(rustc_attrs)]
 #![feature(shared)]
 #![feature(slice_bytes)]
 #![feature(slice_concat_ext)]
 #![feature(unwind_attributes)]
 #![feature(vec_push_all)]
 #![feature(zero_one)]
+#![feature(question_mark)]
 
 // Issue# 30592: Systematically use alloc_system during stage0 since jemalloc
 // might be unavailable or disabled
 #![cfg_attr(not(stage0), deny(warnings))]
 
 #[cfg(test)] extern crate test;
-#[cfg(test)] #[macro_use] extern crate log;
 
 // We want to reexport a few macros from core but libcore has already been
 // imported by the compiler (via our #[no_std] attribute) In this case we just
@@ -418,7 +419,6 @@ pub mod num;
 pub mod thread;
 
 pub mod collections;
-pub mod dynamic_lib;
 pub mod env;
 pub mod ffi;
 pub mod fs;
index d241cd032ed4c07872f56a632077a59fabd93d4a..39adda1066a1194a0695bfefeba931cf0813ad09 100644 (file)
@@ -173,18 +173,6 @@ macro_rules! select {
     })
 }
 
-// When testing the standard library, we link to the liblog crate to get the
-// logging macros. In doing so, the liblog crate was linked against the real
-// version of libstd, and uses a different std::fmt module than the test crate
-// uses. To get around this difference, we redefine the log!() macro here to be
-// just a dumb version of what it should be.
-#[cfg(test)]
-macro_rules! log {
-    ($lvl:expr, $($args:tt)*) => (
-        if log_enabled!($lvl) { println!($($args)*) }
-    )
-}
-
 #[cfg(test)]
 macro_rules! assert_approx_eq {
     ($a:expr, $b:expr) => ({
index 78da9412212a9caf3cfc72909268004464178241..d510339f1c5b492d29941a9cbfe10bc7e4e926d5 100644 (file)
@@ -68,7 +68,7 @@ impl SocketAddr {
     }
 
     /// Change the IP address associated with this socket address.
-    #[unstable(feature = "sockaddr_setters", reason = "recent addition", issue = "31572")]
+    #[stable(feature = "sockaddr_setters", since = "1.9.0")]
     pub fn set_ip(&mut self, new_ip: IpAddr) {
         // `match (*self, new_ip)` would have us mutate a copy of self only to throw it away.
         match (self, new_ip) {
@@ -88,7 +88,7 @@ impl SocketAddr {
     }
 
     /// Change the port number associated with this socket address.
-    #[unstable(feature = "sockaddr_setters", reason = "recent addition", issue = "31572")]
+    #[stable(feature = "sockaddr_setters", since = "1.9.0")]
     pub fn set_port(&mut self, new_port: u16) {
         match *self {
             SocketAddr::V4(ref mut a) => a.set_port(new_port),
@@ -120,16 +120,22 @@ impl SocketAddrV4 {
     }
 
     /// Change the IP address associated with this socket address.
-    #[unstable(feature = "sockaddr_setters", reason = "recent addition", issue = "31572")]
-    pub fn set_ip(&mut self, new_ip: Ipv4Addr) { self.inner.sin_addr = *new_ip.as_inner() }
+    #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+    pub fn set_ip(&mut self, new_ip: Ipv4Addr) {
+        self.inner.sin_addr = *new_ip.as_inner()
+    }
 
     /// Returns the port number associated with this socket address.
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn port(&self) -> u16 { ntoh(self.inner.sin_port) }
+    pub fn port(&self) -> u16 {
+        ntoh(self.inner.sin_port)
+    }
 
     /// Change the port number associated with this socket address.
-    #[unstable(feature = "sockaddr_setters", reason = "recent addition", issue = "31572")]
-    pub fn set_port(&mut self, new_port: u16) { self.inner.sin_port = hton(new_port) }
+    #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+    pub fn set_port(&mut self, new_port: u16) {
+        self.inner.sin_port = hton(new_port);
+    }
 }
 
 impl SocketAddrV6 {
@@ -143,8 +149,8 @@ impl SocketAddrV6 {
                 sin6_family: c::AF_INET6 as c::sa_family_t,
                 sin6_port: hton(port),
                 sin6_addr: *ip.as_inner(),
-                sin6_flowinfo: hton(flowinfo),
-                sin6_scope_id: hton(scope_id),
+                sin6_flowinfo: flowinfo,
+                sin6_scope_id: scope_id,
                 .. unsafe { mem::zeroed() }
             },
         }
@@ -159,37 +165,47 @@ impl SocketAddrV6 {
     }
 
     /// Change the IP address associated with this socket address.
-    #[unstable(feature = "sockaddr_setters", reason = "recent addition", issue = "31572")]
-    pub fn set_ip(&mut self, new_ip: Ipv6Addr) { self.inner.sin6_addr = *new_ip.as_inner() }
+    #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+    pub fn set_ip(&mut self, new_ip: Ipv6Addr) {
+        self.inner.sin6_addr = *new_ip.as_inner()
+    }
 
     /// Returns the port number associated with this socket address.
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn port(&self) -> u16 { ntoh(self.inner.sin6_port) }
+    pub fn port(&self) -> u16 {
+        ntoh(self.inner.sin6_port)
+    }
 
     /// Change the port number associated with this socket address.
-    #[unstable(feature = "sockaddr_setters", reason = "recent addition", issue = "31572")]
-    pub fn set_port(&mut self, new_port: u16) { self.inner.sin6_port = hton(new_port) }
+    #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+    pub fn set_port(&mut self, new_port: u16) {
+        self.inner.sin6_port = hton(new_port);
+    }
 
     /// Returns the flow information associated with this address,
     /// corresponding to the `sin6_flowinfo` field in C.
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn flowinfo(&self) -> u32 { ntoh(self.inner.sin6_flowinfo) }
+    pub fn flowinfo(&self) -> u32 {
+        self.inner.sin6_flowinfo
+    }
 
     /// Change the flow information associated with this socket address.
-    #[unstable(feature = "sockaddr_setters", reason = "recent addition", issue = "31572")]
+    #[stable(feature = "sockaddr_setters", since = "1.9.0")]
     pub fn set_flowinfo(&mut self, new_flowinfo: u32) {
-        self.inner.sin6_flowinfo = hton(new_flowinfo)
+        self.inner.sin6_flowinfo = new_flowinfo;
     }
 
     /// Returns the scope ID associated with this address,
     /// corresponding to the `sin6_scope_id` field in C.
     #[stable(feature = "rust1", since = "1.0.0")]
-    pub fn scope_id(&self) -> u32 { ntoh(self.inner.sin6_scope_id) }
+    pub fn scope_id(&self) -> u32 {
+        self.inner.sin6_scope_id
+    }
 
     /// Change the scope ID associated with this socket address.
-    #[unstable(feature = "sockaddr_setters", reason = "recent addition", issue = "31572")]
+    #[stable(feature = "sockaddr_setters", since = "1.9.0")]
     pub fn set_scope_id(&mut self, new_scope_id: u32) {
-        self.inner.sin6_scope_id = hton(new_scope_id)
+        self.inner.sin6_scope_id = new_scope_id;
     }
 }
 
@@ -431,13 +447,13 @@ impl ToSocketAddrs for (Ipv6Addr, u16) {
 }
 
 fn resolve_socket_addr(s: &str, p: u16) -> io::Result<vec::IntoIter<SocketAddr>> {
-    let ips = try!(lookup_host(s));
-    let v: Vec<_> = try!(ips.map(|a| {
+    let ips = lookup_host(s)?;
+    let v: Vec<_> = ips.map(|a| {
         a.map(|mut a| {
             a.set_port(p);
             a
         })
-    }).collect());
+    }).collect()?;
     Ok(v.into_iter())
 }
 
index 68075af61cf5e8e0e570b61bd3bf07484997883d..adceee6d73ec5cf3a112dd5dd049191dec46b26e 100644 (file)
@@ -121,6 +121,8 @@ impl Ipv4Addr {
     }
 
     /// Returns true if the address appears to be globally routable.
+    /// See [iana-ipv4-special-registry][ipv4-sr].
+    /// [ipv4-sr]: http://goo.gl/RaZ7lg
     ///
     /// The following return false:
     ///
@@ -129,9 +131,10 @@ impl Ipv4Addr {
     /// - the link-local address (169.254.0.0/16)
     /// - the broadcast address (255.255.255.255/32)
     /// - test addresses used for documentation (192.0.2.0/24, 198.51.100.0/24 and 203.0.113.0/24)
+    /// - the unspecified address (0.0.0.0)
     pub fn is_global(&self) -> bool {
         !self.is_private() && !self.is_loopback() && !self.is_link_local() &&
-        !self.is_broadcast() && !self.is_documentation()
+        !self.is_broadcast() && !self.is_documentation() && !self.is_unspecified()
     }
 
     /// Returns true if this is a multicast address.
@@ -276,6 +279,13 @@ impl From<u32> for Ipv4Addr {
     }
 }
 
+#[stable(feature = "from_slice_v4", since = "1.9.0")]
+impl From<[u8; 4]> for Ipv4Addr {
+    fn from(octets: [u8; 4]) -> Ipv4Addr {
+        Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3])
+    }
+}
+
 impl Ipv6Addr {
     /// Creates a new IPv6 address from eight 16-bit segments.
     ///
@@ -360,6 +370,12 @@ impl Ipv6Addr {
         (self.segments()[0] & 0xffc0) == 0xfec0
     }
 
+    /// Returns true if this is an address reserved for documentation
+    /// This is defined to be 2001:db8::/32 in RFC RFC 3849
+    pub fn is_documentation(&self) -> bool {
+        (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8)
+    }
+
     /// Returns true if the address is a globally routable unicast address.
     ///
     /// The following return false:
@@ -368,10 +384,13 @@ impl Ipv6Addr {
     /// - the link-local addresses
     /// - the (deprecated) site-local addresses
     /// - unique local addresses
+    /// - the unspecified address
+    /// - the address range reserved for documentation
     pub fn is_unicast_global(&self) -> bool {
         !self.is_multicast()
             && !self.is_loopback() && !self.is_unicast_link_local()
             && !self.is_unicast_site_local() && !self.is_unique_local()
+            && !self.is_unspecified() && !self.is_documentation()
     }
 
     /// Returns the address's multicast scope if the address is multicast.
@@ -415,6 +434,13 @@ impl Ipv6Addr {
             _ => None
         }
     }
+
+    /// Returns the sixteen eight-bit integers the IPv6 address consists of.
+    #[unstable(feature = "ipv6_to_octets", reason = "needs some testing",
+               issue = "32313")]
+    pub fn octets(&self) -> [u8; 16] {
+        self.inner.s6_addr
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -468,16 +494,16 @@ impl fmt::Display for Ipv6Addr {
                 if zeros_len > 1 {
                     fn fmt_subslice(segments: &[u16], fmt: &mut fmt::Formatter) -> fmt::Result {
                         if !segments.is_empty() {
-                            try!(write!(fmt, "{:x}", segments[0]));
+                            write!(fmt, "{:x}", segments[0])?;
                             for &seg in &segments[1..] {
-                                try!(write!(fmt, ":{:x}", seg));
+                                write!(fmt, ":{:x}", seg)?;
                             }
                         }
                         Ok(())
                     }
 
-                    try!(fmt_subslice(&self.segments()[..zeros_at], fmt));
-                    try!(fmt.write_str("::"));
+                    fmt_subslice(&self.segments()[..zeros_at], fmt)?;
+                    fmt.write_str("::")?;
                     fmt_subslice(&self.segments()[zeros_at + zeros_len..], fmt)
                 } else {
                     let &[a, b, c, d, e, f, g, h] = &self.segments();
@@ -541,6 +567,15 @@ impl FromInner<c::in6_addr> for Ipv6Addr {
     }
 }
 
+#[stable(feature = "ipv6_from_octets", since = "1.9.0")]
+impl From<[u8; 16]> for Ipv6Addr {
+    fn from(octets: [u8; 16]) -> Ipv6Addr {
+        let mut inner: c::in6_addr = unsafe { mem::zeroed() };
+        inner.s6_addr = octets;
+        Ipv6Addr::from_inner(inner)
+    }
+}
+
 // Tests for this module
 #[cfg(test)]
 mod tests {
@@ -725,32 +760,34 @@ mod tests {
         }
 
         //    address                unspec loopbk privt  linloc global multicast brdcast doc
-        check(&[0, 0, 0, 0],         true,  false, false, false, true,  false,    false,  false);
-        check(&[0, 0, 0, 1],         false, false, false, false, true,  false,    false,  false);
-        check(&[1, 0, 0, 0],         false, false, false, false, true,  false,    false,  false);
-        check(&[10, 9, 8, 7],        false, false, true,  false, false, false,    false,  false);
-        check(&[127, 1, 2, 3],       false, true,  false, false, false, false,    false,  false);
-        check(&[172, 31, 254, 253],  false, false, true,  false, false, false,    false,  false);
-        check(&[169, 254, 253, 242], false, false, false, true,  false, false,    false,  false);
-        check(&[192, 0, 2, 183],     false, false, false, false, false, false,    false,  true);
-        check(&[192, 1, 2, 183],     false, false, false, false, true,  false,    false,  false);
-        check(&[192, 168, 254, 253], false, false, true,  false, false, false,    false,  false);
-        check(&[198, 51, 100, 0],    false, false, false, false, false, false,    false,  true);
-        check(&[203, 0, 113, 0],     false, false, false, false, false, false,    false,  true);
-        check(&[203, 2, 113, 0],     false, false, false, false, true,  false,    false,  false);
-        check(&[224, 0, 0, 0],       false, false, false, false, true,  true,     false,  false);
-        check(&[239, 255, 255, 255], false, false, false, false, true,  true,     false,  false);
-        check(&[255, 255, 255, 255], false, false, false, false, false, false,    true,   false);
+        check(&[0, 0, 0, 0],         true,  false, false, false, false,  false,    false,  false);
+        check(&[0, 0, 0, 1],         false, false, false, false, true,   false,    false,  false);
+        check(&[0, 1, 0, 0],         false, false, false, false, true,   false,    false,  false);
+        check(&[10, 9, 8, 7],        false, false, true,  false, false,  false,    false,  false);
+        check(&[127, 1, 2, 3],       false, true,  false, false, false,  false,    false,  false);
+        check(&[172, 31, 254, 253],  false, false, true,  false, false,  false,    false,  false);
+        check(&[169, 254, 253, 242], false, false, false, true,  false,  false,    false,  false);
+        check(&[192, 0, 2, 183],     false, false, false, false, false,  false,    false,  true);
+        check(&[192, 1, 2, 183],     false, false, false, false, true,   false,    false,  false);
+        check(&[192, 168, 254, 253], false, false, true,  false, false,  false,    false,  false);
+        check(&[198, 51, 100, 0],    false, false, false, false, false,  false,    false,  true);
+        check(&[203, 0, 113, 0],     false, false, false, false, false,  false,    false,  true);
+        check(&[203, 2, 113, 0],     false, false, false, false, true,   false,    false,  false);
+        check(&[224, 0, 0, 0],       false, false, false, false, true,   true,     false,  false);
+        check(&[239, 255, 255, 255], false, false, false, false, true,   true,     false,  false);
+        check(&[255, 255, 255, 255], false, false, false, false, false,  false,    true,   false);
     }
 
     #[test]
     fn ipv6_properties() {
-        fn check(str_addr: &str, unspec: bool, loopback: bool,
+        fn check(str_addr: &str, octets: &[u8; 16], unspec: bool, loopback: bool,
                  unique_local: bool, global: bool,
-                 u_link_local: bool, u_site_local: bool, u_global: bool,
+                 u_link_local: bool, u_site_local: bool, u_global: bool, u_doc: bool,
                  m_scope: Option<Ipv6MulticastScope>) {
             let ip: Ipv6Addr = str_addr.parse().unwrap();
             assert_eq!(str_addr, ip.to_string());
+            assert_eq!(&ip.octets(), octets);
+            assert_eq!(Ipv6Addr::from(*octets), ip);
 
             assert_eq!(ip.is_unspecified(), unspec);
             assert_eq!(ip.is_loopback(), loopback);
@@ -759,43 +796,50 @@ mod tests {
             assert_eq!(ip.is_unicast_link_local(), u_link_local);
             assert_eq!(ip.is_unicast_site_local(), u_site_local);
             assert_eq!(ip.is_unicast_global(), u_global);
+            assert_eq!(ip.is_documentation(), u_doc);
             assert_eq!(ip.multicast_scope(), m_scope);
             assert_eq!(ip.is_multicast(), m_scope.is_some());
         }
 
-        //    unspec loopbk uniqlo global unill  unisl  uniglo mscope
-        check("::",
-              true,  false, false, true,  false, false, true,  None);
-        check("::1",
-              false, true,  false, false, false, false, false, None);
-        check("::0.0.0.2",
-              false, false, false, true,  false, false, true,  None);
-        check("1::",
-              false, false, false, true,  false, false, true,  None);
-        check("fc00::",
-              false, false, true,  false, false, false, false, None);
-        check("fdff:ffff::",
-              false, false, true,  false, false, false, false, None);
-        check("fe80:ffff::",
-              false, false, false, false, true,  false, false, None);
-        check("febf:ffff::",
-              false, false, false, false, true,  false, false, None);
-        check("fec0::",
-              false, false, false, false, false, true,  false, None);
-        check("ff01::",
-              false, false, false, false, false, false, false, Some(InterfaceLocal));
-        check("ff02::",
-              false, false, false, false, false, false, false, Some(LinkLocal));
-        check("ff03::",
-              false, false, false, false, false, false, false, Some(RealmLocal));
-        check("ff04::",
-              false, false, false, false, false, false, false, Some(AdminLocal));
-        check("ff05::",
-              false, false, false, false, false, false, false, Some(SiteLocal));
-        check("ff08::",
-              false, false, false, false, false, false, false, Some(OrganizationLocal));
-        check("ff0e::",
-              false, false, false, true,  false, false, false, Some(Global));
+        //    unspec loopbk uniqlo global unill  unisl  uniglo doc    mscope
+        check("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              true,  false, false, false, false, false, false, false, None);
+        check("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
+              false, true,  false, false, false, false, false, false, None);
+        check("::0.0.0.2", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2],
+              false, false, false, true,  false, false, true,  false, None);
+        check("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, true,  false, false, true,  false, None);
+        check("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, true,  false, false, false, false, false, None);
+        check("fdff:ffff::", &[0xfd, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, true,  false, false, false, false, false, None);
+        check("fe80:ffff::", &[0xfe, 0x80, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, false, true,  false, false, false, None);
+        check("febf:ffff::", &[0xfe, 0xbf, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, false, true,  false, false, false, None);
+        check("fec0::", &[0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, false, false, true,  false, false, None);
+        check("ff01::", &[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, false, false, false, false, false, Some(InterfaceLocal));
+        check("ff02::", &[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, false, false, false, false, false, Some(LinkLocal));
+        check("ff03::", &[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, false, false, false, false, false, Some(RealmLocal));
+        check("ff04::", &[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, false, false, false, false, false, Some(AdminLocal));
+        check("ff05::", &[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, false, false, false, false, false, Some(SiteLocal));
+        check("ff08::", &[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, false, false, false, false, false, Some(OrganizationLocal));
+        check("ff0e::", &[0xff, 0xe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+              false, false, false, true,  false, false, false, false, Some(Global));
+        check("2001:db8:85a3::8a2e:370:7334",
+              &[0x20, 1, 0xd, 0xb8, 0x85, 0xa3, 0, 0, 0, 0, 0x8a, 0x2e, 3, 0x70, 0x73, 0x34],
+              false, false, false, false, false, false, false, true, None);
+        check("102:304:506:708:90a:b0c:d0e:f10",
+              &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+              false, false, false, true,  false, false, true,  false, None);
     }
 
     #[test]
@@ -816,6 +860,11 @@ mod tests {
         assert_eq!(Ipv4Addr::from(2130706433), a);
     }
 
+    #[test]
+    fn ipv4_from_u32_slice() {
+        assert_eq!(Ipv4Addr::from([127, 0, 0, 1]), Ipv4Addr::new(127, 0, 0, 1))
+    }
+
     #[test]
     fn ord() {
         assert!(Ipv4Addr::new(100, 64, 3, 3) < Ipv4Addr::new(192, 0, 2, 2));
index d263bf38495c14dfa161720142f95fad3e7c6e1a..45070460282710f84e0de8fcd37e179c573f8421 100644 (file)
@@ -74,7 +74,7 @@ fn each_addr<A: ToSocketAddrs, F, T>(addr: A, mut f: F) -> io::Result<T>
     where F: FnMut(&SocketAddr) -> io::Result<T>
 {
     let mut last_err = None;
-    for addr in try!(addr.to_socket_addrs()) {
+    for addr in addr.to_socket_addrs()? {
         match f(&addr) {
             Ok(l) => return Ok(l),
             Err(e) => last_err = Some(e),
@@ -128,33 +128,3 @@ impl Iterator for LookupHost {
 pub fn lookup_host(host: &str) -> io::Result<LookupHost> {
     net_imp::lookup_host(host).map(LookupHost)
 }
-
-/// Resolve the given address to a hostname.
-///
-/// This function may perform a DNS query to resolve `addr` and may also inspect
-/// system configuration to resolve the specified address. If the address
-/// cannot be resolved, it is returned in string format.
-///
-/// # Examples
-///
-/// ```no_run
-/// #![feature(lookup_addr)]
-/// #![feature(ip_addr)]
-///
-/// use std::net::{self, Ipv4Addr, IpAddr};
-///
-/// let ip_addr = "8.8.8.8";
-/// let addr: Ipv4Addr = ip_addr.parse().unwrap();
-/// let hostname = net::lookup_addr(&IpAddr::V4(addr)).unwrap();
-///
-/// println!("{} --> {}", ip_addr, hostname);
-/// // Output: 8.8.8.8 --> google-public-dns-a.google.com
-/// ```
-#[unstable(feature = "lookup_addr", reason = "recent addition",
-           issue = "27705")]
-#[rustc_deprecated(reason = "ipaddr type is being deprecated",
-                   since = "1.6.0")]
-#[allow(deprecated)]
-pub fn lookup_addr(addr: &IpAddr) -> io::Result<String> {
-    net_imp::lookup_addr(addr)
-}
index f9c38c38458475661a87e298266849114991d7e7..a7738e3170049019e12fc12c7c960bb77a134984 100644 (file)
@@ -14,7 +14,6 @@ use io::prelude::*;
 use fmt;
 use io;
 use net::{ToSocketAddrs, SocketAddr, Shutdown};
-use sys_common::io::read_to_end_uninitialized;
 use sys_common::net as net_imp;
 use sys_common::{AsInner, FromInner, IntoInner};
 use time::Duration;
@@ -180,13 +179,73 @@ impl TcpStream {
     pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
         self.0.write_timeout()
     }
+
+    /// Sets the value of the `TCP_NODELAY` option on this socket.
+    ///
+    /// If set, this option disables the Nagle algorithm. This means that
+    /// segments are always sent as soon as possible, even if there is only a
+    /// small amount of data. When not set, data is buffered until there is a
+    /// sufficient amount to send out, thereby avoiding the frequent sending of
+    /// small packets.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+        self.0.set_nodelay(nodelay)
+    }
+
+    /// Gets the value of the `TCP_NODELAY` option on this socket.
+    ///
+    /// For more information about this option, see [`set_nodelay`][link].
+    ///
+    /// [link]: #method.set_nodelay
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn nodelay(&self) -> io::Result<bool> {
+        self.0.nodelay()
+    }
+
+    /// Sets the value for the `IP_TTL` option on this socket.
+    ///
+    /// This value sets the time-to-live field that is used in every packet sent
+    /// from this socket.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        self.0.set_ttl(ttl)
+    }
+
+    /// Gets the value of the `IP_TTL` option for this socket.
+    ///
+    /// For more information about this option, see [`set_ttl`][link].
+    ///
+    /// [link]: #method.set_ttl
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn ttl(&self) -> io::Result<u32> {
+        self.0.ttl()
+    }
+
+    /// Get the value of the `SO_ERROR` option on this socket.
+    ///
+    /// This will retrieve the stored error in the underlying socket, clearing
+    /// the field in the process. This can be useful for checking errors between
+    /// calls.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.0.take_error()
+    }
+
+    /// Moves this TCP stream into or out of nonblocking mode.
+    ///
+    /// On Unix this corresponds to calling fcntl, and on Windows this
+    /// corresponds to calling ioctlsocket.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        self.0.set_nonblocking(nonblocking)
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl Read for TcpStream {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
     fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
-        unsafe { read_to_end_uninitialized(self, buf) }
+        self.0.read_to_end(buf)
     }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -198,7 +257,7 @@ impl Write for TcpStream {
 impl<'a> Read for &'a TcpStream {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
     fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
-        unsafe { read_to_end_uninitialized(self, buf) }
+        self.0.read_to_end(buf)
     }
 }
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -278,6 +337,67 @@ impl TcpListener {
     pub fn incoming(&self) -> Incoming {
         Incoming { listener: self }
     }
+
+    /// Sets the value for the `IP_TTL` option on this socket.
+    ///
+    /// This value sets the time-to-live field that is used in every packet sent
+    /// from this socket.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        self.0.set_ttl(ttl)
+    }
+
+    /// Gets the value of the `IP_TTL` option for this socket.
+    ///
+    /// For more information about this option, see [`set_ttl`][link].
+    ///
+    /// [link]: #method.set_ttl
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn ttl(&self) -> io::Result<u32> {
+        self.0.ttl()
+    }
+
+    /// Sets the value for the `IPV6_V6ONLY` option on this socket.
+    ///
+    /// If this is set to `true` then the socket is restricted to sending and
+    /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
+    /// can bind the same port at the same time.
+    ///
+    /// If this is set to `false` then the socket can be used to send and
+    /// receive packets from an IPv4-mapped IPv6 address.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+        self.0.set_only_v6(only_v6)
+    }
+
+    /// Gets the value of the `IPV6_V6ONLY` option for this socket.
+    ///
+    /// For more information about this option, see [`set_only_v6`][link].
+    ///
+    /// [link]: #method.set_only_v6
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn only_v6(&self) -> io::Result<bool> {
+        self.0.only_v6()
+    }
+
+    /// Get the value of the `SO_ERROR` option on this socket.
+    ///
+    /// This will retrieve the stored error in the underlying socket, clearing
+    /// the field in the process. This can be useful for checking errors between
+    /// calls.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.0.take_error()
+    }
+
+    /// Moves this TCP stream into or out of nonblocking mode.
+    ///
+    /// On Unix this corresponds to calling fcntl, and on Windows this
+    /// corresponds to calling ioctlsocket.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        self.0.set_nonblocking(nonblocking)
+    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -969,4 +1089,55 @@ mod tests {
         assert!(start.elapsed() > Duration::from_millis(400));
         drop(listener);
     }
+
+    #[test]
+    fn nodelay() {
+        let addr = next_test_ip4();
+        let _listener = t!(TcpListener::bind(&addr));
+
+        let stream = t!(TcpStream::connect(&("localhost", addr.port())));
+
+        assert_eq!(false, t!(stream.nodelay()));
+        t!(stream.set_nodelay(true));
+        assert_eq!(true, t!(stream.nodelay()));
+        t!(stream.set_nodelay(false));
+        assert_eq!(false, t!(stream.nodelay()));
+    }
+
+    #[test]
+    fn ttl() {
+        let ttl = 100;
+
+        let addr = next_test_ip4();
+        let listener = t!(TcpListener::bind(&addr));
+
+        t!(listener.set_ttl(ttl));
+        assert_eq!(ttl, t!(listener.ttl()));
+
+        let stream = t!(TcpStream::connect(&("localhost", addr.port())));
+
+        t!(stream.set_ttl(ttl));
+        assert_eq!(ttl, t!(stream.ttl()));
+    }
+
+    #[test]
+    fn set_nonblocking() {
+        let addr = next_test_ip4();
+        let listener = t!(TcpListener::bind(&addr));
+
+        t!(listener.set_nonblocking(true));
+        t!(listener.set_nonblocking(false));
+
+        let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
+
+        t!(stream.set_nonblocking(false));
+        t!(stream.set_nonblocking(true));
+
+        let mut buf = [0];
+        match stream.read(&mut buf) {
+            Ok(_) => panic!("expected error"),
+            Err(ref e) if e.kind() == ErrorKind::WouldBlock => {}
+            Err(e) => panic!("unexpected error {}", e),
+        }
+    }
 }
index 29ac991a547dc9b4925b854c0b1ea1a0a316c96f..4683c7061c3c83aee1a308f8903b5c846676bcf2 100644 (file)
@@ -10,7 +10,7 @@
 
 use fmt;
 use io::{self, Error, ErrorKind};
-use net::{ToSocketAddrs, SocketAddr};
+use net::{ToSocketAddrs, SocketAddr, Ipv4Addr, Ipv6Addr};
 use sys_common::net as net_imp;
 use sys_common::{AsInner, FromInner, IntoInner};
 use time::Duration;
@@ -70,7 +70,7 @@ impl UdpSocket {
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn send_to<A: ToSocketAddrs>(&self, buf: &[u8], addr: A)
                                      -> io::Result<usize> {
-        match try!(addr.to_socket_addrs()).next() {
+        match addr.to_socket_addrs()?.next() {
             Some(addr) => self.0.send_to(buf, &addr),
             None => Err(Error::new(ErrorKind::InvalidInput,
                                    "no addresses to send data to")),
@@ -140,6 +140,198 @@ impl UdpSocket {
     pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
         self.0.write_timeout()
     }
+
+    /// Sets the value of the `SO_BROADCAST` option for this socket.
+    ///
+    /// When enabled, this socket is allowed to send packets to a broadcast
+    /// address.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> {
+        self.0.set_broadcast(broadcast)
+    }
+
+    /// Gets the value of the `SO_BROADCAST` option for this socket.
+    ///
+    /// For more information about this option, see
+    /// [`set_broadcast`][link].
+    ///
+    /// [link]: #method.set_broadcast
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn broadcast(&self) -> io::Result<bool> {
+        self.0.broadcast()
+    }
+
+    /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+    ///
+    /// If enabled, multicast packets will be looped back to the local socket.
+    /// Note that this may not have any affect on IPv6 sockets.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> {
+        self.0.set_multicast_loop_v4(multicast_loop_v4)
+    }
+
+    /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+    ///
+    /// For more information about this option, see
+    /// [`set_multicast_loop_v4`][link].
+    ///
+    /// [link]: #method.set_multicast_loop_v4
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+        self.0.multicast_loop_v4()
+    }
+
+    /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+    ///
+    /// Indicates the time-to-live value of outgoing multicast packets for
+    /// this socket. The default value is 1 which means that multicast packets
+    /// don't leave the local network unless explicitly requested.
+    ///
+    /// Note that this may not have any affect on IPv6 sockets.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> {
+        self.0.set_multicast_ttl_v4(multicast_ttl_v4)
+    }
+
+    /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+    ///
+    /// For more information about this option, see
+    /// [`set_multicast_ttl_v4`][link].
+    ///
+    /// [link]: #method.set_multicast_ttl_v4
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+        self.0.multicast_ttl_v4()
+    }
+
+    /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+    ///
+    /// Controls whether this socket sees the multicast packets it sends itself.
+    /// Note that this may not have any affect on IPv4 sockets.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> {
+        self.0.set_multicast_loop_v6(multicast_loop_v6)
+    }
+
+    /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+    ///
+    /// For more information about this option, see
+    /// [`set_multicast_loop_v6`][link].
+    ///
+    /// [link]: #method.set_multicast_loop_v6
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+        self.0.multicast_loop_v6()
+    }
+
+    /// Sets the value for the `IP_TTL` option on this socket.
+    ///
+    /// This value sets the time-to-live field that is used in every packet sent
+    /// from this socket.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        self.0.set_ttl(ttl)
+    }
+
+    /// Gets the value of the `IP_TTL` option for this socket.
+    ///
+    /// For more information about this option, see [`set_ttl`][link].
+    ///
+    /// [link]: #method.set_ttl
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn ttl(&self) -> io::Result<u32> {
+        self.0.ttl()
+    }
+
+    /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+    ///
+    /// This function specifies a new multicast group for this socket to join.
+    /// The address must be a valid multicast address, and `interface` is the
+    /// address of the local interface with which the system should join the
+    /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+    /// interface is chosen by the system.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+        self.0.join_multicast_v4(multiaddr, interface)
+    }
+
+    /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+    ///
+    /// This function specifies a new multicast group for this socket to join.
+    /// The address must be a valid multicast address, and `interface` is the
+    /// index of the interface to join/leave (or 0 to indicate any interface).
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+        self.0.join_multicast_v6(multiaddr, interface)
+    }
+
+    /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+    ///
+    /// For more information about this option, see
+    /// [`join_multicast_v4`][link].
+    ///
+    /// [link]: #method.join_multicast_v4
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+        self.0.leave_multicast_v4(multiaddr, interface)
+    }
+
+    /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+    ///
+    /// For more information about this option, see
+    /// [`join_multicast_v6`][link].
+    ///
+    /// [link]: #method.join_multicast_v6
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+        self.0.leave_multicast_v6(multiaddr, interface)
+    }
+
+    /// Get the value of the `SO_ERROR` option on this socket.
+    ///
+    /// This will retrieve the stored error in the underlying socket, clearing
+    /// the field in the process. This can be useful for checking errors between
+    /// calls.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.0.take_error()
+    }
+
+    /// Connects this UDP socket to a remote address, allowing the `send` and
+    /// `recv` syscalls to be used to send data and also applies filters to only
+    /// receive data from the specified address.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn connect<A: ToSocketAddrs>(&self, addr: A) -> io::Result<()> {
+        super::each_addr(addr, |addr| self.0.connect(addr))
+    }
+
+    /// Sends data on the socket to the remote address to which it is connected.
+    ///
+    /// The `connect` method will connect this socket to a remote address. This
+    /// method will fail if the socket is not connected.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+        self.0.send(buf)
+    }
+
+    /// Receives data on the socket from the remote address to which it is
+    /// connected.
+    ///
+    /// The `connect` method will connect this socket to a remote address. This
+    /// method will fail if the socket is not connected.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+        self.0.recv(buf)
+    }
+
+    /// Moves this TCP stream into or out of nonblocking mode.
+    ///
+    /// On Unix this corresponds to calling fcntl, and on Windows this
+    /// corresponds to calling ioctlsocket.
+    #[stable(feature = "net2_mutators", since = "1.9.0")]
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        self.0.set_nonblocking(nonblocking)
+    }
 }
 
 impl AsInner<net_imp::UdpSocket> for UdpSocket {
@@ -400,4 +592,40 @@ mod tests {
         assert!(kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut);
         assert!(start.elapsed() > Duration::from_millis(400));
     }
+
+    #[test]
+    fn connect_send_recv() {
+        let addr = next_test_ip4();
+
+        let socket = t!(UdpSocket::bind(&addr));
+        t!(socket.connect(addr));
+
+        t!(socket.send(b"hello world"));
+
+        let mut buf = [0; 11];
+        t!(socket.recv(&mut buf));
+        assert_eq!(b"hello world", &buf[..]);
+    }
+
+    #[test]
+    fn ttl() {
+        let ttl = 100;
+
+        let addr = next_test_ip4();
+
+        let stream = t!(UdpSocket::bind(&addr));
+
+        t!(stream.set_ttl(ttl));
+        assert_eq!(ttl, t!(stream.ttl()));
+    }
+
+    #[test]
+    fn set_nonblocking() {
+        let addr = next_test_ip4();
+
+        let stream = t!(UdpSocket::bind(&addr));
+
+        t!(stream.set_nonblocking(true));
+        t!(stream.set_nonblocking(false));
+    }
 }
index 7f57d6dc650bab145670458d28143328787cf85e..6fc26bb7eed71fd48940bce6fee5e0858c01446c 100644 (file)
@@ -262,7 +262,7 @@ impl f32 {
     ///
     /// assert!(abs_difference <= f32::EPSILON);
     /// ```
-    /// [floating-point]: ../../../../../reference.html#machine-types
+    /// [floating-point]: ../reference.html#machine-types
     #[unstable(feature = "float_extras", reason = "signature is undecided",
                issue = "27752")]
     #[inline]
@@ -1152,9 +1152,10 @@ impl f32 {
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
     pub fn asinh(self) -> f32 {
-        match self {
-            NEG_INFINITY => NEG_INFINITY,
-            x => (x + ((x * x) + 1.0).sqrt()).ln(),
+        if self == NEG_INFINITY {
+            NEG_INFINITY
+        } else {
+            (self + ((self * self) + 1.0).sqrt()).ln()
         }
     }
 
@@ -1371,6 +1372,7 @@ mod tests {
     }
 
     #[test]
+    #[rustc_no_mir] // FIXME #27840 MIR NAN ends up negative.
     fn test_integer_decode() {
         assert_eq!(3.14159265359f32.integer_decode(), (13176795, -22, 1));
         assert_eq!((-8573.5918555f32).integer_decode(), (8779358, -10, -1));
index a39311f7d108d578a44273a968fb5f475930f6b9..93e5969a275c3f9855fdfd7f1a102c41c6d5d0de 100644 (file)
@@ -206,7 +206,7 @@ impl f64 {
     ///
     /// assert!(abs_difference < 1e-10);
     /// ```
-    /// [floating-point]: ../../../../../reference.html#machine-types
+    /// [floating-point]: ../reference.html#machine-types
     #[unstable(feature = "float_extras", reason = "signature is undecided",
                issue = "27752")]
     #[inline]
@@ -1023,9 +1023,10 @@ impl f64 {
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
     pub fn asinh(self) -> f64 {
-        match self {
-            NEG_INFINITY => NEG_INFINITY,
-            x => (x + ((x * x) + 1.0).sqrt()).ln(),
+        if self == NEG_INFINITY {
+            NEG_INFINITY
+        } else {
+            (self + ((self * self) + 1.0).sqrt()).ln()
         }
     }
 
@@ -1264,6 +1265,7 @@ mod tests {
     }
 
     #[test]
+    #[rustc_no_mir] // FIXME #27840 MIR NAN ends up negative.
     fn test_integer_decode() {
         assert_eq!(3.14159265359f64.integer_decode(), (7074237752028906, -51, 1));
         assert_eq!((-8573.5918555f64).integer_decode(), (4713381968463931, -39, -1));
index dd0d874ee4b7cbdd942e317180ea114c56849ee0..1886b4fdf59c6160a660cbd2f76492b1d8e77d25 100644 (file)
@@ -21,7 +21,7 @@ pub use core::num::{Zero, One};
 #[stable(feature = "rust1", since = "1.0.0")]
 pub use core::num::{FpCategory, ParseIntError, ParseFloatError};
 #[stable(feature = "rust1", since = "1.0.0")]
-pub use core::num::{wrapping, Wrapping};
+pub use core::num::Wrapping;
 
 #[cfg(test)] use cmp::PartialEq;
 #[cfg(test)] use fmt;
index eb09800a18cb04a4a79872a19952f8e9a5149f09..db672e5643531181fb59b0759274cc81d74c7e50 100644 (file)
@@ -63,10 +63,6 @@ pub trait MetadataExt {
     #[stable(feature = "metadata_ext2", since = "1.8.0")]
     fn st_ctime_nsec(&self) -> i64;
     #[stable(feature = "metadata_ext2", since = "1.8.0")]
-    fn st_birthtime(&self) -> i64;
-    #[stable(feature = "metadata_ext2", since = "1.8.0")]
-    fn st_birthtime_nsec(&self) -> i64;
-    #[stable(feature = "metadata_ext2", since = "1.8.0")]
     fn st_blksize(&self) -> u64;
     #[stable(feature = "metadata_ext2", since = "1.8.0")]
     fn st_blocks(&self) -> u64;
@@ -129,12 +125,6 @@ impl MetadataExt for Metadata {
     fn st_ctime_nsec(&self) -> i64 {
         self.as_inner().as_inner().st_ctime_nsec as i64
     }
-    fn st_birthtime(&self) -> i64 {
-        self.as_inner().as_inner().st_birthtime as i64
-    }
-    fn st_birthtime_nsec(&self) -> i64 {
-        self.as_inner().as_inner().st_birthtime_nsec as i64
-    }
     fn st_blksize(&self) -> u64 {
         self.as_inner().as_inner().st_blksize as u64
     }
diff --git a/src/libstd/os/emscripten/fs.rs b/src/libstd/os/emscripten/fs.rs
new file mode 100644 (file)
index 0000000..8056ce4
--- /dev/null
@@ -0,0 +1,128 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use libc;
+
+use fs::Metadata;
+use sys_common::AsInner;
+
+#[allow(deprecated)]
+use os::emscripten::raw;
+
+/// OS-specific extension methods for `fs::Metadata`
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+    /// Gain a reference to the underlying `stat` structure which contains
+    /// the raw information returned by the OS.
+    ///
+    /// The contents of the returned `stat` are **not** consistent across
+    /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+    /// cross-Unix abstractions contained within the raw stat.
+    #[stable(feature = "metadata_ext", since = "1.1.0")]
+    #[rustc_deprecated(since = "1.8.0",
+                       reason = "deprecated in favor of the accessor \
+                                 methods of this trait")]
+    #[allow(deprecated)]
+    fn as_raw_stat(&self) -> &raw::stat;
+
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_dev(&self) -> u64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_ino(&self) -> u64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_mode(&self) -> u32;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_nlink(&self) -> u64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_uid(&self) -> u32;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_gid(&self) -> u32;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_rdev(&self) -> u64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_size(&self) -> u64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_atime(&self) -> i64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_atime_nsec(&self) -> i64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_mtime(&self) -> i64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_mtime_nsec(&self) -> i64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_ctime(&self) -> i64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_ctime_nsec(&self) -> i64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_blksize(&self) -> u64;
+    #[stable(feature = "metadata_ext2", since = "1.8.0")]
+    fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+    #[allow(deprecated)]
+    fn as_raw_stat(&self) -> &raw::stat {
+        unsafe {
+            &*(self.as_inner().as_inner() as *const libc::stat64
+                                          as *const raw::stat)
+        }
+    }
+    fn st_dev(&self) -> u64 {
+        self.as_inner().as_inner().st_dev as u64
+    }
+    fn st_ino(&self) -> u64 {
+        self.as_inner().as_inner().st_ino as u64
+    }
+    fn st_mode(&self) -> u32 {
+        self.as_inner().as_inner().st_mode as u32
+    }
+    fn st_nlink(&self) -> u64 {
+        self.as_inner().as_inner().st_nlink as u64
+    }
+    fn st_uid(&self) -> u32 {
+        self.as_inner().as_inner().st_uid as u32
+    }
+    fn st_gid(&self) -> u32 {
+        self.as_inner().as_inner().st_gid as u32
+    }
+    fn st_rdev(&self) -> u64 {
+        self.as_inner().as_inner().st_rdev as u64
+    }
+    fn st_size(&self) -> u64 {
+        self.as_inner().as_inner().st_size as u64
+    }
+    fn st_atime(&self) -> i64 {
+        self.as_inner().as_inner().st_atime as i64
+    }
+    fn st_atime_nsec(&self) -> i64 {
+        self.as_inner().as_inner().st_atime_nsec as i64
+    }
+    fn st_mtime(&self) -> i64 {
+        self.as_inner().as_inner().st_mtime as i64
+    }
+    fn st_mtime_nsec(&self) -> i64 {
+        self.as_inner().as_inner().st_mtime_nsec as i64
+    }
+    fn st_ctime(&self) -> i64 {
+        self.as_inner().as_inner().st_ctime as i64
+    }
+    fn st_ctime_nsec(&self) -> i64 {
+        self.as_inner().as_inner().st_ctime_nsec as i64
+    }
+    fn st_blksize(&self) -> u64 {
+        self.as_inner().as_inner().st_blksize as u64
+    }
+    fn st_blocks(&self) -> u64 {
+        self.as_inner().as_inner().st_blocks as u64
+    }
+}
diff --git a/src/libstd/os/emscripten/mod.rs b/src/libstd/os/emscripten/mod.rs
new file mode 100644 (file)
index 0000000..8ec44b9
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Linux-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod raw;
+pub mod fs;
diff --git a/src/libstd/os/emscripten/raw.rs b/src/libstd/os/emscripten/raw.rs
new file mode 100644 (file)
index 0000000..9da400a
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Emscripten-specific raw type definitions
+//! This is basically exactly the same as the linux definitions,
+//! except using the musl-specific stat64 structure in liblibc.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![rustc_deprecated(since = "1.8.0",
+                    reason = "these type aliases are no longer supported by \
+                              the standard library, the `libc` crate on \
+                              crates.io should be used instead for the correct \
+                              definitions")]
+#![allow(deprecated)]
+
+use os::raw::{c_long, c_short, c_uint, c_ulong};
+
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type mode_t = u32;
+
+#[unstable(feature = "pthread_t", issue = "29791")] pub type pthread_t = c_ulong;
+
+#[doc(inline)]
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = c_long;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_dev: u64,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub __pad1: c_short,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub __st_ino: u32,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_mode: u32,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_nlink: u32,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_uid: u32,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_gid: u32,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_rdev: u64,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub __pad2: c_uint,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_size: i64,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_blksize: i32,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_blocks: i64,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_atime: time_t,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_atime_nsec: c_long,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_mtime: time_t,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_mtime_nsec: c_long,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_ctime: time_t,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_ctime_nsec: c_long,
+    #[stable(feature = "raw_ext", since = "1.1.0")]
+    pub st_ino: u64,
+}
index e15c8d67a8a6c7638fed251a420f649b3d3a5023..a91d251fc120aaa504823dbfcd68afa4d27bfb57 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! OS-specific functionality
+//! OS-specific functionality.
 
 #![stable(feature = "os", since = "1.0.0")]
 #![allow(missing_docs, bad_style)]
@@ -31,10 +31,6 @@ pub use sys::ext as windows;
 #[cfg(target_os = "netbsd")]   pub mod netbsd;
 #[cfg(target_os = "openbsd")]   pub mod openbsd;
 #[cfg(target_os = "solaris")]   pub mod solaris;
-
-// Emscripten is just like linux
-#[cfg(target_os = "emscripten")]
-#[path = "linux/mod.rs"]
-pub mod emscripten;
+#[cfg(target_os = "emscripten")] pub mod emscripten;
 
 pub mod raw;
index 69a1b57a0c538237274fde28ae07ef372536f51f..16401c4527f160b15dab692c8872aeff10018ccf 100644 (file)
@@ -10,8 +10,7 @@
 
 //! Panic support in the standard library
 
-#![unstable(feature = "std_panic", reason = "awaiting feedback",
-            issue = "27719")]
+#![stable(feature = "std_panic", since = "1.9.0")]
 
 use any::Any;
 use boxed::Box;
@@ -23,7 +22,22 @@ use sync::{Arc, Mutex, RwLock};
 use sys_common::unwind;
 use thread::Result;
 
-pub use panicking::{take_handler, set_handler, PanicInfo, Location};
+#[unstable(feature = "panic_handler", issue = "30449")]
+pub use panicking::{take_hook, set_hook, PanicInfo, Location};
+
+///
+#[rustc_deprecated(since = "1.9.0", reason = "renamed to set_hook")]
+#[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+pub fn set_handler<F>(handler: F) where F: Fn(&PanicInfo) + 'static + Sync + Send {
+    set_hook(Box::new(handler))
+}
+
+///
+#[rustc_deprecated(since = "1.9.0", reason = "renamed to take_hook")]
+#[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+pub fn take_handler() -> Box<Fn(&PanicInfo) + 'static + Sync + Send> {
+    take_hook()
+}
 
 /// A marker trait which represents "panic safe" types in Rust.
 ///
@@ -78,7 +92,7 @@ pub use panicking::{take_handler, set_handler, PanicInfo, Location};
 /// "speed bump" to alert users of `recover` that broken invariants may be
 /// witnessed and may need to be accounted for.
 ///
-/// ## Who implements `RecoverSafe`?
+/// ## Who implements `UnwindSafe`?
 ///
 /// Types such as `&mut T` and `&RefCell<T>` are examples which are **not**
 /// recover safe. The general idea is that any mutable state which can be shared
@@ -90,7 +104,7 @@ pub use panicking::{take_handler, set_handler, PanicInfo, Location};
 /// poisoning by default. They still allow witnessing a broken invariant, but
 /// they already provide their own "speed bumps" to do so.
 ///
-/// ## When should `RecoverSafe` be used?
+/// ## When should `UnwindSafe` be used?
 ///
 /// Is not intended that most types or functions need to worry about this trait.
 /// It is only used as a bound on the `recover` function and as mentioned above,
@@ -98,10 +112,18 @@ pub use panicking::{take_handler, set_handler, PanicInfo, Location};
 /// wrapper struct in this module can be used to force this trait to be
 /// implemented for any closed over variables passed to the `recover` function
 /// (more on this below).
-#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
+#[stable(feature = "catch_unwind", since = "1.9.0")]
 #[rustc_on_unimplemented = "the type {Self} may not be safely transferred \
                             across a recover boundary"]
+pub trait UnwindSafe {}
+
+/// Deprecated, renamed to UnwindSafe
+#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
+#[rustc_deprecated(reason = "renamed to `UnwindSafe`", since = "1.9.0")]
 pub trait RecoverSafe {}
+#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
+#[allow(deprecated)]
+impl<T: UnwindSafe> RecoverSafe for T {}
 
 /// A marker trait representing types where a shared reference is considered
 /// recover safe.
@@ -110,12 +132,12 @@ pub trait RecoverSafe {}
 /// interior mutability.
 ///
 /// This is a "helper marker trait" used to provide impl blocks for the
-/// `RecoverSafe` trait, for more information see that documentation.
-#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
+/// `UnwindSafe` trait, for more information see that documentation.
+#[stable(feature = "catch_unwind", since = "1.9.0")]
 #[rustc_on_unimplemented = "the type {Self} contains interior mutability \
                             and a reference may not be safely transferrable \
                             across a recover boundary"]
-pub trait RefRecoverSafe {}
+pub trait RefUnwindSafe {}
 
 /// A simple wrapper around a type to assert that it is panic safe.
 ///
@@ -129,78 +151,161 @@ pub trait RefRecoverSafe {}
 ///
 /// # Examples
 ///
-/// ```
-/// #![feature(recover, std_panic)]
+/// One way to use `AssertUnwindSafe` is to assert that the entire closure
+/// itself is recover safe, bypassing all checks for all variables:
 ///
-/// use std::panic::{self, AssertRecoverSafe};
+/// ```
+/// use std::panic::{self, AssertUnwindSafe};
 ///
 /// let mut variable = 4;
 ///
 /// // This code will not compile because the closure captures `&mut variable`
 /// // which is not considered panic safe by default.
 ///
-/// // panic::recover(|| {
+/// // panic::catch_unwind(|| {
 /// //     variable += 3;
 /// // });
 ///
-/// // This, however, will compile due to the `AssertRecoverSafe` wrapper
+/// // This, however, will compile due to the `AssertUnwindSafe` wrapper
+/// let result = panic::catch_unwind(AssertUnwindSafe(|| {
+///     variable += 3;
+/// }));
+/// // ...
+/// ```
+///
+/// Wrapping the entire closure amounts to a blanket assertion that all captured
+/// variables are unwind safe. This has the downside that if new captures are
+/// added in the future, they will also be considered unwind safe. Therefore,
+/// you may prefer to just wrap individual captures, as shown below. This is
+/// more annotation, but it ensures that if a new capture is added which is not
+/// unwind safe, you will get a compilation error at that time, which will
+/// allow you to consider whether that new capture in fact represent a bug or
+/// not.
+///
+/// ```
+/// use std::panic::{self, AssertUnwindSafe};
+///
+/// let mut variable = 4;
+/// let other_capture = 3;
+///
 /// let result = {
-///     let mut wrapper = AssertRecoverSafe::new(&mut variable);
-///     panic::recover(move || {
-///         **wrapper += 3;
+///     let mut wrapper = AssertUnwindSafe(&mut variable);
+///     panic::catch_unwind(move || {
+///         **wrapper += other_capture;
 ///     })
 /// };
 /// // ...
 /// ```
-#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
-pub struct AssertRecoverSafe<T>(T);
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+pub struct AssertUnwindSafe<T>(
+    #[stable(feature = "catch_unwind", since = "1.9.0")]
+    pub T
+);
 
-// Implementations of the `RecoverSafe` trait:
+/// Deprecated, renamed to `AssertUnwindSafe`
+#[unstable(feature = "recover", issue = "27719")]
+#[rustc_deprecated(reason = "renamed to `AssertUnwindSafe`", since = "1.9.0")]
+pub struct AssertRecoverSafe<T>(pub T);
+
+// Implementations of the `UnwindSafe` trait:
 //
-// * By default everything is recover safe
-// * pointers T contains mutability of some form are not recover safe
+// * By default everything is unwind safe
+// * pointers T contains mutability of some form are not unwind safe
 // * Unique, an owning pointer, lifts an implementation
-// * Types like Mutex/RwLock which are explicilty poisoned are recover safe
-// * Our custom AssertRecoverSafe wrapper is indeed recover safe
-impl RecoverSafe for .. {}
-impl<'a, T: ?Sized> !RecoverSafe for &'a mut T {}
-impl<'a, T: RefRecoverSafe + ?Sized> RecoverSafe for &'a T {}
-impl<T: RefRecoverSafe + ?Sized> RecoverSafe for *const T {}
-impl<T: RefRecoverSafe + ?Sized> RecoverSafe for *mut T {}
-impl<T: RecoverSafe> RecoverSafe for Unique<T> {}
-impl<T: RefRecoverSafe + ?Sized> RecoverSafe for Shared<T> {}
-impl<T: ?Sized> RecoverSafe for Mutex<T> {}
-impl<T: ?Sized> RecoverSafe for RwLock<T> {}
-impl<T> RecoverSafe for AssertRecoverSafe<T> {}
+// * Types like Mutex/RwLock which are explicilty poisoned are unwind safe
+// * Our custom AssertUnwindSafe wrapper is indeed unwind safe
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl UnwindSafe for .. {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<'a, T: ?Sized> !UnwindSafe for &'a mut T {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<'a, T: RefUnwindSafe + ?Sized> UnwindSafe for &'a T {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for *const T {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for *mut T {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: UnwindSafe> UnwindSafe for Unique<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Shared<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: ?Sized> UnwindSafe for Mutex<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: ?Sized> UnwindSafe for RwLock<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T> UnwindSafe for AssertUnwindSafe<T> {}
+#[unstable(feature = "recover", issue = "27719")]
+#[allow(deprecated)]
+impl<T> UnwindSafe for AssertRecoverSafe<T> {}
 
 // not covered via the Shared impl above b/c the inner contents use
 // Cell/AtomicUsize, but the usage here is recover safe so we can lift the
 // impl up one level to Arc/Rc itself
-impl<T: RefRecoverSafe + ?Sized> RecoverSafe for Rc<T> {}
-impl<T: RefRecoverSafe + ?Sized> RecoverSafe for Arc<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Rc<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Arc<T> {}
 
 // Pretty simple implementations for the `RefRecoverSafe` marker trait,
 // basically just saying that this is a marker trait and `UnsafeCell` is the
 // only thing which doesn't implement it (which then transitively applies to
 // everything else).
-impl RefRecoverSafe for .. {}
-impl<T: ?Sized> !RefRecoverSafe for UnsafeCell<T> {}
-impl<T> RefRecoverSafe for AssertRecoverSafe<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl RefUnwindSafe for .. {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: ?Sized> !RefUnwindSafe for UnsafeCell<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T> RefUnwindSafe for AssertUnwindSafe<T> {}
+#[unstable(feature = "recover", issue = "27719")]
+#[allow(deprecated)]
+impl<T> RefUnwindSafe for AssertRecoverSafe<T> {}
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T> Deref for AssertUnwindSafe<T> {
+    type Target = T;
 
+    fn deref(&self) -> &T {
+        &self.0
+    }
+}
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T> DerefMut for AssertUnwindSafe<T> {
+    fn deref_mut(&mut self) -> &mut T {
+        &mut self.0
+    }
+}
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<R, F: FnOnce() -> R> FnOnce<()> for AssertUnwindSafe<F> {
+    type Output = R;
+
+    extern "rust-call" fn call_once(self, _args: ()) -> R {
+        (self.0)()
+    }
+}
+
+#[allow(deprecated)]
 impl<T> AssertRecoverSafe<T> {
     /// Creates a new `AssertRecoverSafe` wrapper around the provided type.
     #[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
+    #[rustc_deprecated(reason = "the type's field is now public, construct it directly",
+                       since = "1.9.0")]
     pub fn new(t: T) -> AssertRecoverSafe<T> {
         AssertRecoverSafe(t)
     }
 
     /// Consumes the `AssertRecoverSafe`, returning the wrapped value.
     #[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
+    #[rustc_deprecated(reason = "the type's field is now public, access it directly",
+                       since = "1.9.0")]
     pub fn into_inner(self) -> T {
         self.0
     }
 }
 
+#[unstable(feature = "recover", issue = "27719")]
+#[allow(deprecated)]
 impl<T> Deref for AssertRecoverSafe<T> {
     type Target = T;
 
@@ -209,13 +314,25 @@ impl<T> Deref for AssertRecoverSafe<T> {
     }
 }
 
+#[unstable(feature = "recover", issue = "27719")]
+#[allow(deprecated)]
 impl<T> DerefMut for AssertRecoverSafe<T> {
     fn deref_mut(&mut self) -> &mut T {
         &mut self.0
     }
 }
 
-/// Invokes a closure, capturing the cause of panic if one occurs.
+#[unstable(feature = "recover", issue = "27719")]
+#[allow(deprecated)]
+impl<R, F: FnOnce() -> R> FnOnce<()> for AssertRecoverSafe<F> {
+    type Output = R;
+
+    extern "rust-call" fn call_once(self, _args: ()) -> R {
+        (self.0)()
+    }
+}
+
+/// Invokes a closure, capturing the cause of an unwinding panic if one occurs.
 ///
 /// This function will return `Ok` with the closure's result if the closure
 /// does not panic, and will return `Err(cause)` if the closure panics. The
@@ -228,67 +345,92 @@ impl<T> DerefMut for AssertRecoverSafe<T> {
 ///
 /// It is **not** recommended to use this function for a general try/catch
 /// mechanism. The `Result` type is more appropriate to use for functions that
-/// can fail on a regular basis.
-///
-/// The closure provided is required to adhere to the `RecoverSafe` to ensure
-/// that all captured variables are safe to cross this recover boundary. The
-/// purpose of this bound is to encode the concept of [exception safety][rfc] in
-/// the type system. Most usage of this function should not need to worry about
-/// this bound as programs are naturally panic safe without `unsafe` code. If it
-/// becomes a problem the associated `AssertRecoverSafe` wrapper type in this
+/// can fail on a regular basis. Additionally, this function is not guaranteed
+/// to catch all panics, see the "Notes" sectino below.
+///
+/// The closure provided is required to adhere to the `UnwindSafe` to ensure
+/// that all captured variables are safe to cross this boundary. The purpose of
+/// this bound is to encode the concept of [exception safety][rfc] in the type
+/// system. Most usage of this function should not need to worry about this
+/// bound as programs are naturally panic safe without `unsafe` code. If it
+/// becomes a problem the associated `AssertUnwindSafe` wrapper type in this
 /// module can be used to quickly assert that the usage here is indeed exception
 /// safe.
 ///
 /// [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/1236-stabilize-catch-panic.md
 ///
+/// # Notes
+///
+/// Note that this function **may not catch all panics** in Rust. A panic in
+/// Rust is not always implemented via unwinding, but can be implemented by
+/// aborting the process as well. This function *only* catches unwinding panics,
+/// not those that abort the process.
+///
 /// # Examples
 ///
 /// ```
-/// #![feature(recover, std_panic)]
-///
 /// use std::panic;
 ///
-/// let result = panic::recover(|| {
+/// let result = panic::catch_unwind(|| {
 ///     println!("hello!");
 /// });
 /// assert!(result.is_ok());
 ///
-/// let result = panic::recover(|| {
+/// let result = panic::catch_unwind(|| {
 ///     panic!("oh no!");
 /// });
 /// assert!(result.is_err());
 /// ```
-#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
-pub fn recover<F: FnOnce() -> R + RecoverSafe, R>(f: F) -> Result<R> {
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+pub fn catch_unwind<F: FnOnce() -> R + UnwindSafe, R>(f: F) -> Result<R> {
     let mut result = None;
     unsafe {
         let result = &mut result;
-        try!(unwind::try(move || *result = Some(f())))
+        unwind::try(move || *result = Some(f()))?
     }
     Ok(result.unwrap())
 }
 
+/// Deprecated, renamed to `catch_unwind`
+#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
+#[rustc_deprecated(reason = "renamed to `catch_unwind`", since = "1.9.0")]
+pub fn recover<F: FnOnce() -> R + UnwindSafe, R>(f: F) -> Result<R> {
+    catch_unwind(f)
+}
+
 /// Triggers a panic without invoking the panic handler.
 ///
-/// This is designed to be used in conjunction with `recover` to, for example,
-/// carry a panic across a layer of C code.
+/// This is designed to be used in conjunction with `catch_unwind` to, for
+/// example, carry a panic across a layer of C code.
+///
+/// # Notes
+///
+/// Note that panics in Rust are not always implemented via unwinding, but they
+/// may be implemented by aborting the process. If this function is called when
+/// panics are implemented this way then this function will abort the process,
+/// not trigger an unwind.
 ///
 /// # Examples
 ///
 /// ```should_panic
-/// #![feature(std_panic, recover, panic_propagate)]
-///
 /// use std::panic;
 ///
-/// let result = panic::recover(|| {
+/// let result = panic::catch_unwind(|| {
 ///     panic!("oh no!");
 /// });
 ///
 /// if let Err(err) = result {
-///     panic::propagate(err);
+///     panic::resume_unwind(err);
 /// }
 /// ```
+#[stable(feature = "resume_unwind", since = "1.9.0")]
+pub fn resume_unwind(payload: Box<Any + Send>) -> ! {
+    unwind::rust_panic(payload)
+}
+
+/// Deprecated, use resume_unwind instead
 #[unstable(feature = "panic_propagate", reason = "awaiting feedback", issue = "30752")]
+#[rustc_deprecated(reason = "renamed to `resume_unwind`", since = "1.9.0")]
 pub fn propagate(payload: Box<Any + Send>) -> ! {
-    unwind::rust_panic(payload)
+    resume_unwind(payload)
 }
index 490c5f4b3522e41479b155a944c5b20de4b6df80..fd6a15b0f69a3f8bebf751eae3cfeeb643e7fbed 100644 (file)
@@ -32,73 +32,71 @@ thread_local! {
 }
 
 #[derive(Copy, Clone)]
-enum Handler {
+enum Hook {
     Default,
     Custom(*mut (Fn(&PanicInfo) + 'static + Sync + Send)),
 }
 
-static HANDLER_LOCK: StaticRwLock = StaticRwLock::new();
-static mut HANDLER: Handler = Handler::Default;
+static HOOK_LOCK: StaticRwLock = StaticRwLock::new();
+static mut HOOK: Hook = Hook::Default;
 static FIRST_PANIC: AtomicBool = AtomicBool::new(true);
 
-/// Registers a custom panic handler, replacing any that was previously
-/// registered.
+/// Registers a custom panic hook, replacing any that was previously registered.
 ///
-/// The panic handler is invoked when a thread panics, but before it begins
-/// unwinding the stack. The default handler prints a message to standard error
+/// The panic hook is invoked when a thread panics, but before it begins
+/// unwinding the stack. The default hook prints a message to standard error
 /// and generates a backtrace if requested, but this behavior can be customized
-/// with the `set_handler` and `take_handler` functions.
+/// with the `set_hook` and `take_hook` functions.
 ///
-/// The handler is provided with a `PanicInfo` struct which contains information
+/// The hook is provided with a `PanicInfo` struct which contains information
 /// about the origin of the panic, including the payload passed to `panic!` and
 /// the source code location from which the panic originated.
 ///
-/// The panic handler is a global resource.
+/// The panic hook is a global resource.
 ///
 /// # Panics
 ///
 /// Panics if called from a panicking thread.
 #[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
-pub fn set_handler<F>(handler: F) where F: Fn(&PanicInfo) + 'static + Sync + Send {
+pub fn set_hook(hook: Box<Fn(&PanicInfo) + 'static + Sync + Send>) {
     if thread::panicking() {
-        panic!("cannot modify the panic handler from a panicking thread");
+        panic!("cannot modify the panic hook from a panicking thread");
     }
 
-    let handler = Box::new(handler);
     unsafe {
-        let lock = HANDLER_LOCK.write();
-        let old_handler = HANDLER;
-        HANDLER = Handler::Custom(Box::into_raw(handler));
+        let lock = HOOK_LOCK.write();
+        let old_hook = HOOK;
+        HOOK = Hook::Custom(Box::into_raw(hook));
         drop(lock);
 
-        if let Handler::Custom(ptr) = old_handler {
+        if let Hook::Custom(ptr) = old_hook {
             Box::from_raw(ptr);
         }
     }
 }
 
-/// Unregisters the current panic handler, returning it.
+/// Unregisters the current panic hook, returning it.
 ///
-/// If no custom handler is registered, the default handler will be returned.
+/// If no custom hook is registered, the default hook will be returned.
 ///
 /// # Panics
 ///
 /// Panics if called from a panicking thread.
 #[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
-pub fn take_handler() -> Box<Fn(&PanicInfo) + 'static + Sync + Send> {
+pub fn take_hook() -> Box<Fn(&PanicInfo) + 'static + Sync + Send> {
     if thread::panicking() {
-        panic!("cannot modify the panic handler from a panicking thread");
+        panic!("cannot modify the panic hook from a panicking thread");
     }
 
     unsafe {
-        let lock = HANDLER_LOCK.write();
-        let handler = HANDLER;
-        HANDLER = Handler::Default;
+        let lock = HOOK_LOCK.write();
+        let hook = HOOK;
+        HOOK = Hook::Default;
         drop(lock);
 
-        match handler {
-            Handler::Default => Box::new(default_handler),
-            Handler::Custom(ptr) => {Box::from_raw(ptr)} // FIXME #30530
+        match hook {
+            Hook::Default => Box::new(default_hook),
+            Hook::Custom(ptr) => {Box::from_raw(ptr)} // FIXME #30530
         }
     }
 }
@@ -151,7 +149,7 @@ impl<'a> Location<'a> {
     }
 }
 
-fn default_handler(info: &PanicInfo) {
+fn default_hook(info: &PanicInfo) {
     let panics = PANIC_COUNT.with(|s| s.get());
 
     // If this is a double panic, make sure that we print a backtrace
@@ -224,10 +222,10 @@ pub fn on_panic(obj: &(Any+Send), file: &'static str, line: u32) {
     };
 
     unsafe {
-        let _lock = HANDLER_LOCK.read();
-        match HANDLER {
-            Handler::Default => default_handler(&info),
-            Handler::Custom(ptr) => (*ptr)(&info),
+        let _lock = HOOK_LOCK.read();
+        match HOOK {
+            Hook::Default => default_hook(&info),
+            Hook::Custom(ptr) => (*ptr)(&info),
         }
     }
 
index 94967bfb96ad315c9a860ea3f10efd34ce470c4d..5309cc3c858f6536b9d15dd1378c46d2537c1fbe 100644 (file)
 #![stable(feature = "rust1", since = "1.0.0")]
 
 use ascii::*;
-#[allow(deprecated)]
-use borrow::{Borrow, IntoCow, ToOwned, Cow};
+use borrow::{Borrow, ToOwned, Cow};
 use cmp;
 use error::Error;
 use fmt;
@@ -781,14 +780,6 @@ impl<'a> Components<'a> {
             }
         }
     }
-
-    /// Examine the next component without consuming it.
-    #[unstable(feature = "path_components_peek", issue = "27727")]
-    #[rustc_deprecated(reason = "use peekable() instead",
-                       since = "1.6.0")]
-    pub fn peek(&self) -> Option<Component<'a>> {
-        self.clone().next()
-    }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -1218,22 +1209,6 @@ impl Borrow<Path> for PathBuf {
     }
 }
 
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow(deprecated)]
-impl IntoCow<'static, Path> for PathBuf {
-    fn into_cow(self) -> Cow<'static, Path> {
-        Cow::Owned(self)
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow(deprecated)]
-impl<'a> IntoCow<'a, Path> for &'a Path {
-    fn into_cow(self) -> Cow<'a, Path> {
-        Cow::Borrowed(self)
-    }
-}
-
 #[stable(feature = "cow_from_path", since = "1.6.0")]
 impl<'a> From<&'a Path> for Cow<'a, Path> {
     #[inline]
@@ -1474,17 +1449,7 @@ impl Path {
         !self.is_absolute()
     }
 
-    /// Returns the *prefix* of a path, if any.
-    ///
-    /// Prefixes are relevant only for Windows paths, and consist of volumes
-    /// like `C:`, UNC prefixes like `\\server`, and others described in more
-    /// detail in `std::os::windows::PathExt`.
-    #[unstable(feature = "path_prefix",
-               reason = "uncertain whether to expose this convenience",
-               issue = "27722")]
-    #[rustc_deprecated(since = "1.7.0",
-                       reason = "inspect components().next() instead")]
-    pub fn prefix(&self) -> Option<Prefix> {
+    fn prefix(&self) -> Option<Prefix> {
         self.components().prefix
     }
 
@@ -1566,17 +1531,6 @@ impl Path {
         })
     }
 
-    /// Returns a path that, when joined onto `base`, yields `self`.
-    ///
-    /// If `base` is not a prefix of `self` (i.e. `starts_with`
-    /// returns false), then `relative_from` returns `None`.
-    #[unstable(feature = "path_relative_from", reason = "see #23284",
-               issue = "23284")]
-    #[rustc_deprecated(since = "1.7.0", reason = "renamed to strip_prefix")]
-    pub fn relative_from<'a, P: ?Sized + AsRef<Path>>(&'a self, base: &'a P) -> Option<&Path> {
-        self._strip_prefix(base.as_ref()).ok()
-    }
-
     /// Returns a path that, when joined onto `base`, yields `self`.
     ///
     /// # Errors
@@ -2236,27 +2190,6 @@ mod tests {
         );
     );
 
-    #[test]
-    #[allow(deprecated)]
-    fn into_cow() {
-        use borrow::{Cow, IntoCow};
-
-        let static_path = Path::new("/home/foo");
-        let static_cow_path: Cow<'static, Path> = static_path.into_cow();
-        let pathbuf = PathBuf::from("/home/foo");
-
-        {
-            let path: &Path = &pathbuf;
-            let borrowed_cow_path: Cow<Path> = path.into_cow();
-
-            assert_eq!(static_cow_path, borrowed_cow_path);
-        }
-
-        let owned_cow_path: Cow<'static, Path> = pathbuf.into_cow();
-
-        assert_eq!(static_cow_path, owned_cow_path);
-    }
-
     #[test]
     fn into() {
         use borrow::Cow;
index ebd299efa78db9ca89206ec41af9885f921b861a..f4cd319f06454a128b5ec701f3de83f56f3d1a36 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! The Rust Prelude
+//! The Rust Prelude.
 //!
 //! Rust comes with a variety of things in its standard library. However, if
 //! you had to manually import every single thing that you used, it would be
index 839287d13217bed2e7a98da483dc56ceba551d59..e083605a2acd5bf1ed38d6b1d7b29023b38cd296 100644 (file)
 /// assert!(!bool_val);
 /// ```
 ///
-/// [`assert!`]: std/macro.assert!.html
-/// [`if` conditionals]: ../../book/if.html
-/// [`BitAnd`]: ../ops/trait.BitAnd.html
-/// [`BitOr`]: ../ops/trait.BitOr.html
-/// [`Not`]: ../ops/trait.Not.html
+/// [`assert!`]: macro.assert!.html
+/// [`if` conditionals]: ../book/if.html
+/// [`BitAnd`]: ops/trait.BitAnd.html
+/// [`BitOr`]: ops/trait.BitOr.html
+/// [`Not`]: ops/trait.Not.html
 ///
 /// # Examples
 ///
@@ -54,7 +54,7 @@
 /// }
 /// ```
 ///
-/// Also, since `bool` implements the [`Copy`](../marker/trait.Copy.html) trait, we don't
+/// Also, since `bool` implements the [`Copy`](marker/trait.Copy.html) trait, we don't
 /// have to worry about the move semantics (just like the integer and float primitives).
 mod prim_bool { }
 
@@ -77,7 +77,7 @@ mod prim_bool { }
 /// # Representation
 ///
 /// `char` is always four bytes in size. This is a different representation than
-/// a given character would have as part of a [`String`], for example:
+/// a given character would have as part of a [`String`]. For example:
 ///
 /// ```
 /// let v = vec!['h', 'e', 'l', 'l', 'o'];
@@ -116,8 +116,8 @@ mod prim_bool { }
 ///             ^~
 /// ```
 ///
-/// Another implication of the 4-byte fixed size of a `char`, is that
-/// per-`char`acter processing can end up using a lot more memory:
+/// Another implication of the 4-byte fixed size of a `char` is that
+/// per-`char` processing can end up using a lot more memory:
 ///
 /// ```
 /// let s = String::from("love: ❤️");
@@ -370,7 +370,7 @@ mod prim_slice { }
 /// // story has nineteen bytes
 /// assert_eq!(19, len);
 ///
-/// // We can re-build a str out of ptr and len. This is all unsafe becuase
+/// // We can re-build a str out of ptr and len. This is all unsafe because
 /// // we are responsible for making sure the two components are valid:
 /// let s = unsafe {
 ///     // First, we build a &[u8]...
@@ -421,14 +421,15 @@ mod prim_str { }
 /// assert_eq!(tuple.2, 'c');
 /// ```
 ///
-/// For more about tuples, see [the book](../../book/primitive-types.html#tuples).
+/// For more about tuples, see [the book](../book/primitive-types.html#tuples).
 ///
 /// # Trait implementations
 ///
-/// If every type inside a tuple implements one of the following
-/// traits, then a tuple itself also implements it.
+/// If every type inside a tuple implements one of the following traits, then a
+/// tuple itself also implements it.
 ///
 /// * [`Clone`]
+/// * [`Copy`]
 /// * [`PartialEq`]
 /// * [`Eq`]
 /// * [`PartialOrd`]
@@ -437,14 +438,15 @@ mod prim_str { }
 /// * [`Default`]
 /// * [`Hash`]
 ///
-/// [`Clone`]: ../clone/trait.Clone.html
-/// [`PartialEq`]: ../cmp/trait.PartialEq.html
-/// [`Eq`]: ../cmp/trait.Eq.html
-/// [`PartialOrd`]: ../cmp/trait.PartialOrd.html
-/// [`Ord`]: ../cmp/trait.Ord.html
-/// [`Debug`]: ../fmt/trait.Debug.html
-/// [`Default`]: ../default/trait.Default.html
-/// [`Hash`]: ../hash/trait.Hash.html
+/// [`Clone`]: clone/trait.Clone.html
+/// [`Copy`]: marker/trait.Copy.html
+/// [`PartialEq`]: cmp/trait.PartialEq.html
+/// [`Eq`]: cmp/trait.Eq.html
+/// [`PartialOrd`]: cmp/trait.PartialOrd.html
+/// [`Ord`]: cmp/trait.Ord.html
+/// [`Debug`]: fmt/trait.Debug.html
+/// [`Default`]: default/trait.Default.html
+/// [`Hash`]: hash/trait.Hash.html
 ///
 /// Due to a temporary restriction in Rust's type system, these traits are only
 /// implemented on tuples of arity 32 or less. In the future, this may change.
index 8db8ad324bea984c9fd2b8d64c2e7dd2cbd84ed2..c1ef60852a7c856e314dea260471ad0c8dc731c6 100644 (file)
@@ -20,10 +20,9 @@ use fmt;
 use io;
 use path::Path;
 use str;
-use sys::pipe::AnonPipe;
+use sys::pipe::{read2, AnonPipe};
 use sys::process as imp;
 use sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
-use thread::{self, JoinHandle};
 
 /// Representation of a running or exited child process.
 ///
@@ -134,6 +133,9 @@ impl Read for ChildStdout {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
         self.inner.read(buf)
     }
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.inner.read_to_end(buf)
+    }
 }
 
 impl AsInner<AnonPipe> for ChildStdout {
@@ -161,6 +163,9 @@ impl Read for ChildStderr {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
         self.inner.read(buf)
     }
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.inner.read_to_end(buf)
+    }
 }
 
 impl AsInner<AnonPipe> for ChildStderr {
@@ -289,7 +294,7 @@ impl Command {
     /// By default, stdin, stdout and stderr are inherited from the parent.
     #[stable(feature = "process", since = "1.0.0")]
     pub fn spawn(&mut self) -> io::Result<Child> {
-        self.inner.spawn(imp::Stdio::Inherit).map(Child::from_inner)
+        self.inner.spawn(imp::Stdio::Inherit, true).map(Child::from_inner)
     }
 
     /// Executes the command as a child process, waiting for it to finish and
@@ -312,7 +317,7 @@ impl Command {
     /// ```
     #[stable(feature = "process", since = "1.0.0")]
     pub fn output(&mut self) -> io::Result<Output> {
-        self.inner.spawn(imp::Stdio::MakePipe).map(Child::from_inner)
+        self.inner.spawn(imp::Stdio::MakePipe, false).map(Child::from_inner)
             .and_then(|p| p.wait_with_output())
     }
 
@@ -334,7 +339,8 @@ impl Command {
     /// ```
     #[stable(feature = "process", since = "1.0.0")]
     pub fn status(&mut self) -> io::Result<ExitStatus> {
-        self.spawn().and_then(|mut p| p.wait())
+        self.inner.spawn(imp::Stdio::Inherit, true).map(Child::from_inner)
+                  .and_then(|mut p| p.wait())
     }
 }
 
@@ -496,24 +502,29 @@ impl Child {
     #[stable(feature = "process", since = "1.0.0")]
     pub fn wait_with_output(mut self) -> io::Result<Output> {
         drop(self.stdin.take());
-        fn read<R>(mut input: R) -> JoinHandle<io::Result<Vec<u8>>>
-            where R: Read + Send + 'static
-        {
-            thread::spawn(move || {
-                let mut ret = Vec::new();
-                input.read_to_end(&mut ret).map(|_| ret)
-            })
+
+        let (mut stdout, mut stderr) = (Vec::new(), Vec::new());
+        match (self.stdout.take(), self.stderr.take()) {
+            (None, None) => {}
+            (Some(mut out), None) => {
+                let res = out.read_to_end(&mut stdout);
+                res.unwrap();
+            }
+            (None, Some(mut err)) => {
+                let res = err.read_to_end(&mut stderr);
+                res.unwrap();
+            }
+            (Some(out), Some(err)) => {
+                let res = read2(out.inner, &mut stdout, err.inner, &mut stderr);
+                res.unwrap();
+            }
         }
-        let stdout = self.stdout.take().map(read);
-        let stderr = self.stderr.take().map(read);
-        let status = try!(self.wait());
-        let stdout = stdout.and_then(|t| t.join().unwrap().ok());
-        let stderr = stderr.and_then(|t| t.join().unwrap().ok());
 
+        let status = self.wait()?;
         Ok(Output {
             status: status,
-            stdout: stdout.unwrap_or(Vec::new()),
-            stderr: stderr.unwrap_or(Vec::new()),
+            stdout: stdout,
+            stderr: stderr,
         })
     }
 }
index fcd827e2a8b722e9b4e18d17299d90cf12192ee0..83091c72c0d6bea2a2c4a0a990679effac0fd93e 100644 (file)
@@ -60,7 +60,7 @@ fn lang_start(main: *const u8, argc: isize, argv: *const *const u8) -> isize {
         sys_common::args::init(argc, argv);
 
         // Let's run some code!
-        let res = panic::recover(mem::transmute::<_, fn()>(main));
+        let res = panic::catch_unwind(mem::transmute::<_, fn()>(main));
         sys_common::cleanup();
         res.is_err()
     };
index 4df6ca5f0b88e0aa2ca49c5485f78d460cc6fb40..b543240c15afb898ff98b5027ef0cbe703711792 100644 (file)
@@ -17,16 +17,21 @@ use sync::{Mutex, Condvar};
 /// use std::sync::{Arc, Barrier};
 /// use std::thread;
 ///
+/// let mut handles = Vec::with_capacity(10);
 /// let barrier = Arc::new(Barrier::new(10));
 /// for _ in 0..10 {
 ///     let c = barrier.clone();
 ///     // The same messages will be printed together.
 ///     // You will NOT see any interleaving.
-///     thread::spawn(move|| {
+///     handles.push(thread::spawn(move|| {
 ///         println!("before wait");
 ///         c.wait();
 ///         println!("after wait");
-///     });
+///     }));
+/// }
+/// // Wait for other threads to finish.
+/// for handle in handles {
+///     handle.join().unwrap();
 /// }
 /// ```
 #[stable(feature = "rust1", since = "1.0.0")]
index 0ff3a6907026ed34cb5a699dbc986ccc46187f9f..64468be396f344f2f875fc079aacac2dfbbe5de5 100644 (file)
@@ -167,13 +167,12 @@ impl Condvar {
     /// returns, regardless of whether the timeout elapsed or not.
     #[stable(feature = "rust1", since = "1.0.0")]
     #[rustc_deprecated(since = "1.6.0", reason = "replaced by `std::sync::Condvar::wait_timeout`")]
-    #[allow(deprecated)]
     pub fn wait_timeout_ms<'a, T>(&self, guard: MutexGuard<'a, T>, ms: u32)
                                   -> LockResult<(MutexGuard<'a, T>, bool)> {
-        unsafe {
-            let me: &'static Condvar = &*(self as *const _);
-            me.inner.wait_timeout_ms(guard, ms)
-        }
+        let res = self.wait_timeout(guard, Duration::from_millis(ms as u64));
+        poison::map_result(res, |(a, b)| {
+            (a, !b.timed_out())
+        })
     }
 
     /// Waits on this condition variable for a notification, timing out after a
@@ -200,30 +199,6 @@ impl Condvar {
         }
     }
 
-    /// Waits on this condition variable for a notification, timing out after a
-    /// specified duration.
-    ///
-    /// The semantics of this function are equivalent to `wait_timeout` except
-    /// that the implementation will repeatedly wait while the duration has not
-    /// passed and the provided function returns `false`.
-    #[unstable(feature = "wait_timeout_with",
-               reason = "unsure if this API is broadly needed or what form it should take",
-               issue = "27748")]
-    #[rustc_deprecated(since = "1.8.0",
-                       reason = "wonky signature and questionable \
-                                 implementation didn't justify existence")]
-    pub fn wait_timeout_with<'a, T, F>(&self,
-                                       guard: MutexGuard<'a, T>,
-                                       dur: Duration,
-                                       f: F)
-                                       -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
-            where F: FnMut(LockResult<&mut T>) -> bool {
-        unsafe {
-            let me: &'static Condvar = &*(self as *const _);
-            me.inner.wait_timeout_with(guard, dur, f)
-        }
-    }
-
     /// Wakes up one blocked thread on this condvar.
     ///
     /// If there is a blocked thread on this condition variable, then it will
@@ -286,26 +261,6 @@ impl StaticCondvar {
         }
     }
 
-    /// Waits on this condition variable for a notification, timing out after a
-    /// specified duration.
-    ///
-    /// See `Condvar::wait_timeout`.
-    #[unstable(feature = "static_condvar",
-               reason = "may be merged with Condvar in the future",
-               issue = "27717")]
-    #[rustc_deprecated(since = "1.6.0",
-                       reason = "replaced by `std::sync::StaticCondvar::wait_timeout`")]
-    pub fn wait_timeout_ms<'a, T>(&'static self, guard: MutexGuard<'a, T>, ms: u32)
-                                  -> LockResult<(MutexGuard<'a, T>, bool)> {
-        match self.wait_timeout(guard, Duration::from_millis(ms as u64)) {
-            Ok((guard, timed_out)) => Ok((guard, !timed_out.timed_out())),
-            Err(poison) => {
-                let (guard, timed_out) = poison.into_inner();
-                Err(PoisonError::new((guard, !timed_out.timed_out())))
-            }
-        }
-    }
-
     /// Waits on this condition variable for a notification, timing out after a
     /// specified duration.
     ///
index 9c9aa20eff5c0e292b95a6a368e8c63f9bceaaf1..c20b422d40cf5b907e9f8e3dff3825ca6e3f8d29 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Useful synchronization primitives
+//! Useful synchronization primitives.
 //!
 //! This module contains useful safe and unsafe synchronization primitives.
 //! Most of the primitives in this module do not provide any sort of locking
@@ -38,9 +38,6 @@ pub use sys_common::poison::{PoisonError, TryLockError, TryLockResult, LockResul
 pub use self::rwlock::{RwLockReadGuard, RwLockWriteGuard};
 #[stable(feature = "rust1", since = "1.0.0")]
 pub use self::rwlock::{RwLock, StaticRwLock, RW_LOCK_INIT};
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow(deprecated)]
-pub use self::semaphore::{Semaphore, SemaphoreGuard};
 
 pub mod mpsc;
 
@@ -49,4 +46,3 @@ mod condvar;
 mod mutex;
 mod once;
 mod rwlock;
-mod semaphore;
index fadca390986ceb3f7f5a87751a627cb64f9ac2f7..dbcc2bc95bc2147511f201a82960373ad591ef25 100644 (file)
@@ -299,6 +299,9 @@ pub struct Receiver<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<T: Send> Send for Receiver<T> { }
 
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> !Sync for Receiver<T> { }
+
 /// An iterator over messages on a receiver, this iterator will block
 /// whenever `next` is called, waiting for a new message, and `None` will be
 /// returned when the corresponding channel has hung up.
@@ -327,6 +330,9 @@ pub struct Sender<T> {
 #[stable(feature = "rust1", since = "1.0.0")]
 unsafe impl<T: Send> Send for Sender<T> { }
 
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> !Sync for Sender<T> { }
+
 /// The sending-half of Rust's synchronous channel type. This half can only be
 /// owned by one thread, but it can be cloned to send to other threads.
 #[stable(feature = "rust1", since = "1.0.0")]
index 34c9c7cf1e9e44c734e148ba7b47dc65e2ba61c5..e0946a5c12a77d25ccd077757a988e870192473e 100644 (file)
@@ -234,7 +234,7 @@ impl<T: ?Sized> Mutex<T> {
     pub fn try_lock(&self) -> TryLockResult<MutexGuard<T>> {
         unsafe {
             if self.inner.lock.try_lock() {
-                Ok(try!(MutexGuard::new(&*self.inner, &self.data)))
+                Ok(MutexGuard::new(&*self.inner, &self.data)?)
             } else {
                 Err(TryLockError::WouldBlock)
             }
@@ -353,7 +353,7 @@ impl StaticMutex {
     pub fn try_lock(&'static self) -> TryLockResult<MutexGuard<()>> {
         unsafe {
             if self.lock.try_lock() {
-                Ok(try!(MutexGuard::new(self, &DUMMY.0)))
+                Ok(MutexGuard::new(self, &DUMMY.0)?)
             } else {
                 Err(TryLockError::WouldBlock)
             }
index a91e1321149a3fa8eb210f6e120f2385ea68edda..e228d236a3ca716317dd709ecd79471c508dc971 100644 (file)
 //! This primitive is meant to be used to run one-time initialization. An
 //! example use case would be for initializing an FFI library.
 
-use isize;
-use sync::atomic::{AtomicIsize, Ordering};
-use sync::StaticMutex;
+// A "once" is a relatively simple primitive, and it's also typically provided
+// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS
+// primitives, however, tend to have surprising restrictions, such as the Unix
+// one doesn't allow an argument to be passed to the function.
+//
+// As a result, we end up implementing it ourselves in the standard library.
+// This also gives us the opportunity to optimize the implementation a bit which
+// should help the fast path on call sites. Consequently, let's explain how this
+// primitive works now!
+//
+// So to recap, the guarantees of a Once are that it will call the
+// initialization closure at most once, and it will never return until the one
+// that's running has finished running. This means that we need some form of
+// blocking here while the custom callback is running at the very least.
+// Additionally, we add on the restriction of **poisoning**. Whenever an
+// initialization closure panics, the Once enters a "poisoned" state which means
+// that all future calls will immediately panic as well.
+//
+// So to implement this, one might first reach for a `StaticMutex`, but those
+// unfortunately need to be deallocated (e.g. call `destroy()`) to free memory
+// on all OSes (some of the BSDs allocate memory for mutexes). It also gets a
+// lot harder with poisoning to figure out when the mutex needs to be
+// deallocated because it's not after the closure finishes, but after the first
+// successful closure finishes.
+//
+// All in all, this is instead implemented with atomics and lock-free
+// operations! Whee! Each `Once` has one word of atomic state, and this state is
+// CAS'd on to determine what to do. There are four possible state of a `Once`:
+//
+// * Incomplete - no initialization has run yet, and no thread is currently
+//                using the Once.
+// * Poisoned - some thread has previously attempted to initialize the Once, but
+//              it panicked, so the Once is now poisoned. There are no other
+//              threads currently accessing this Once.
+// * Running - some thread is currently attempting to run initialization. It may
+//             succeed, so all future threads need to wait for it to finish.
+//             Note that this state is accompanied with a payload, described
+//             below.
+// * Complete - initialization has completed and all future calls should finish
+//              immediately.
+//
+// With 4 states we need 2 bits to encode this, and we use the remaining bits
+// in the word we have allocated as a queue of threads waiting for the thread
+// responsible for entering the RUNNING state. This queue is just a linked list
+// of Waiter nodes which is monotonically increasing in size. Each node is
+// allocated on the stack, and whenever the running closure finishes it will
+// consume the entire queue and notify all waiters they should try again.
+//
+// You'll find a few more details in the implementation, but that's the gist of
+// it!
+
+use marker;
+use sync::atomic::{AtomicUsize, AtomicBool, Ordering};
+use thread::{self, Thread};
 
 /// A synchronization primitive which can be used to run a one-time global
 /// initialization. Useful for one-time initialization for FFI or related
@@ -35,23 +86,62 @@ use sync::StaticMutex;
 /// ```
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct Once {
-    mutex: StaticMutex,
-    cnt: AtomicIsize,
-    lock_cnt: AtomicIsize,
+    // This `state` word is actually an encoded version of just a pointer to a
+    // `Waiter`, so we add the `PhantomData` appropriately.
+    state: AtomicUsize,
+    _marker: marker::PhantomData<*mut Waiter>,
+}
+
+// The `PhantomData` of a raw pointer removes these two auto traits, but we
+// enforce both below in the implementation so this should be safe to add.
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl Sync for Once {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl Send for Once {}
+
+/// State yielded to the `call_once_force` method which can be used to query
+/// whether the `Once` was previously poisoned or not.
+#[unstable(feature = "once_poison", issue = "31688")]
+pub struct OnceState {
+    poisoned: bool,
 }
 
 /// Initialization value for static `Once` values.
 #[stable(feature = "rust1", since = "1.0.0")]
 pub const ONCE_INIT: Once = Once::new();
 
+// Four states that a Once can be in, encoded into the lower bits of `state` in
+// the Once structure.
+const INCOMPLETE: usize = 0x0;
+const POISONED: usize = 0x1;
+const RUNNING: usize = 0x2;
+const COMPLETE: usize = 0x3;
+
+// Mask to learn about the state. All other bits are the queue of waiters if
+// this is in the RUNNING state.
+const STATE_MASK: usize = 0x3;
+
+// Representation of a node in the linked list of waiters in the RUNNING state.
+struct Waiter {
+    thread: Option<Thread>,
+    signaled: AtomicBool,
+    next: *mut Waiter,
+}
+
+// Helper struct used to clean up after a closure call with a `Drop`
+// implementation to also run on panic.
+struct Finish {
+    panicked: bool,
+    me: &'static Once,
+}
+
 impl Once {
     /// Creates a new `Once` value.
     #[stable(feature = "once_new", since = "1.2.0")]
     pub const fn new() -> Once {
         Once {
-            mutex: StaticMutex::new(),
-            cnt: AtomicIsize::new(0),
-            lock_cnt: AtomicIsize::new(0),
+            state: AtomicUsize::new(INCOMPLETE),
+            _marker: marker::PhantomData,
         }
     }
 
@@ -68,73 +158,223 @@ impl Once {
     /// be reliably observed by other threads at this point (there is a
     /// happens-before relation between the closure and code executing after the
     /// return).
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::sync::{Once, ONCE_INIT};
+    ///
+    /// static mut VAL: usize = 0;
+    /// static INIT: Once = ONCE_INIT;
+    ///
+    /// // Accessing a `static mut` is unsafe much of the time, but if we do so
+    /// // in a synchronized fashion (e.g. write once or read all) then we're
+    /// // good to go!
+    /// //
+    /// // This function will only call `expensive_computation` once, and will
+    /// // otherwise always return the value returned from the first invocation.
+    /// fn get_cached_val() -> usize {
+    ///     unsafe {
+    ///         INIT.call_once(|| {
+    ///             VAL = expensive_computation();
+    ///         });
+    ///         VAL
+    ///     }
+    /// }
+    ///
+    /// fn expensive_computation() -> usize {
+    ///     // ...
+    /// # 2
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// The closure `f` will only be executed once if this is called
+    /// concurrently amongst many threads. If that closure panics, however, then
+    /// it will *poison* this `Once` instance, causing all future invocations of
+    /// `call_once` to also panic.
+    ///
+    /// This is similar to [poisoning with mutexes][poison].
+    ///
+    /// [poison]: struct.Mutex.html#poisoning
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn call_once<F>(&'static self, f: F) where F: FnOnce() {
-        // Optimize common path: load is much cheaper than fetch_add.
-        if self.cnt.load(Ordering::SeqCst) < 0 {
+        // Fast path, just see if we've completed initialization.
+        if self.state.load(Ordering::SeqCst) == COMPLETE {
             return
         }
 
-        // Implementation-wise, this would seem like a fairly trivial primitive.
-        // The stickler part is where our mutexes currently require an
-        // allocation, and usage of a `Once` shouldn't leak this allocation.
-        //
-        // This means that there must be a deterministic destroyer of the mutex
-        // contained within (because it's not needed after the initialization
-        // has run).
-        //
-        // The general scheme here is to gate all future threads once
-        // initialization has completed with a "very negative" count, and to
-        // allow through threads to lock the mutex if they see a non negative
-        // count. For all threads grabbing the mutex, exactly one of them should
-        // be responsible for unlocking the mutex, and this should only be done
-        // once everyone else is done with the mutex.
-        //
-        // This atomicity is achieved by swapping a very negative value into the
-        // shared count when the initialization routine has completed. This will
-        // read the number of threads which will at some point attempt to
-        // acquire the mutex. This count is then squirreled away in a separate
-        // variable, and the last person on the way out of the mutex is then
-        // responsible for destroying the mutex.
-        //
-        // It is crucial that the negative value is swapped in *after* the
-        // initialization routine has completed because otherwise new threads
-        // calling `call_once` will return immediately before the initialization
-        // has completed.
-
-        let prev = self.cnt.fetch_add(1, Ordering::SeqCst);
-        if prev < 0 {
-            // Make sure we never overflow, we'll never have isize::MIN
-            // simultaneous calls to `call_once` to make this value go back to 0
-            self.cnt.store(isize::MIN, Ordering::SeqCst);
+        let mut f = Some(f);
+        self.call_inner(false, &mut |_| f.take().unwrap()());
+    }
+
+    /// Performs the same function as `call_once` except ignores poisoning.
+    ///
+    /// If this `Once` has been poisoned (some initialization panicked) then
+    /// this function will continue to attempt to call initialization functions
+    /// until one of them doesn't panic.
+    ///
+    /// The closure `f` is yielded a structure which can be used to query the
+    /// state of this `Once` (whether initialization has previously panicked or
+    /// not).
+    /// poisoned or not.
+    #[unstable(feature = "once_poison", issue = "31688")]
+    pub fn call_once_force<F>(&'static self, f: F) where F: FnOnce(&OnceState) {
+        // same as above, just with a different parameter to `call_inner`.
+        if self.state.load(Ordering::SeqCst) == COMPLETE {
             return
         }
 
-        // If the count is negative, then someone else finished the job,
-        // otherwise we run the job and record how many people will try to grab
-        // this lock
-        let guard = self.mutex.lock();
-        if self.cnt.load(Ordering::SeqCst) > 0 {
-            f();
-            let prev = self.cnt.swap(isize::MIN, Ordering::SeqCst);
-            self.lock_cnt.store(prev, Ordering::SeqCst);
+        let mut f = Some(f);
+        self.call_inner(true, &mut |p| {
+            f.take().unwrap()(&OnceState { poisoned: p })
+        });
+    }
+
+    // This is a non-generic function to reduce the monomorphization cost of
+    // using `call_once` (this isn't exactly a trivial or small implementation).
+    //
+    // Additionally, this is tagged with `#[cold]` as it should indeed be cold
+    // and it helps let LLVM know that calls to this function should be off the
+    // fast path. Essentially, this should help generate more straight line code
+    // in LLVM.
+    //
+    // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
+    // currently no way to take an `FnOnce` and call it via virtual dispatch
+    // without some allocation overhead.
+    #[cold]
+    fn call_inner(&'static self,
+                  ignore_poisoning: bool,
+                  mut init: &mut FnMut(bool)) {
+        let mut state = self.state.load(Ordering::SeqCst);
+
+        'outer: loop {
+            match state {
+                // If we're complete, then there's nothing to do, we just
+                // jettison out as we shouldn't run the closure.
+                COMPLETE => return,
+
+                // If we're poisoned and we're not in a mode to ignore
+                // poisoning, then we panic here to propagate the poison.
+                POISONED if !ignore_poisoning => {
+                    panic!("Once instance has previously been poisoned");
+                }
+
+                // Otherwise if we see a poisoned or otherwise incomplete state
+                // we will attempt to move ourselves into the RUNNING state. If
+                // we succeed, then the queue of waiters starts at null (all 0
+                // bits).
+                POISONED |
+                INCOMPLETE => {
+                    let old = self.state.compare_and_swap(state, RUNNING,
+                                                          Ordering::SeqCst);
+                    if old != state {
+                        state = old;
+                        continue
+                    }
+
+                    // Run the initialization routine, letting it know if we're
+                    // poisoned or not. The `Finish` struct is then dropped, and
+                    // the `Drop` implementation here is responsible for waking
+                    // up other waiters both in the normal return and panicking
+                    // case.
+                    let mut complete = Finish {
+                        panicked: true,
+                        me: self,
+                    };
+                    init(state == POISONED);
+                    complete.panicked = false;
+                    return
+                }
+
+                // All other values we find should correspond to the RUNNING
+                // state with an encoded waiter list in the more significant
+                // bits. We attempt to enqueue ourselves by moving us to the
+                // head of the list and bail out if we ever see a state that's
+                // not RUNNING.
+                _ => {
+                    assert!(state & STATE_MASK == RUNNING);
+                    let mut node = Waiter {
+                        thread: Some(thread::current()),
+                        signaled: AtomicBool::new(false),
+                        next: 0 as *mut Waiter,
+                    };
+                    let me = &mut node as *mut Waiter as usize;
+                    assert!(me & STATE_MASK == 0);
+
+                    while state & STATE_MASK == RUNNING {
+                        node.next = (state & !STATE_MASK) as *mut Waiter;
+                        let old = self.state.compare_and_swap(state,
+                                                              me | RUNNING,
+                                                              Ordering::SeqCst);
+                        if old != state {
+                            state = old;
+                            continue
+                        }
+
+                        // Once we've enqueued ourselves, wait in a loop.
+                        // Aftewards reload the state and continue with what we
+                        // were doing from before.
+                        while !node.signaled.load(Ordering::SeqCst) {
+                            thread::park();
+                        }
+                        state = self.state.load(Ordering::SeqCst);
+                        continue 'outer
+                    }
+                }
+            }
         }
-        drop(guard);
+    }
+}
 
-        // Last one out cleans up after everyone else, no leaks!
-        if self.lock_cnt.fetch_add(-1, Ordering::SeqCst) == 1 {
-            unsafe { self.mutex.destroy() }
+impl Drop for Finish {
+    fn drop(&mut self) {
+        // Swap out our state with however we finished. We should only ever see
+        // an old state which was RUNNING.
+        let queue = if self.panicked {
+            self.me.state.swap(POISONED, Ordering::SeqCst)
+        } else {
+            self.me.state.swap(COMPLETE, Ordering::SeqCst)
+        };
+        assert_eq!(queue & STATE_MASK, RUNNING);
+
+        // Decode the RUNNING to a list of waiters, then walk that entire list
+        // and wake them up. Note that it is crucial that after we store `true`
+        // in the node it can be free'd! As a result we load the `thread` to
+        // signal ahead of time and then unpark it after the store.
+        unsafe {
+            let mut queue = (queue & !STATE_MASK) as *mut Waiter;
+            while !queue.is_null() {
+                let next = (*queue).next;
+                let thread = (*queue).thread.take().unwrap();
+                (*queue).signaled.store(true, Ordering::SeqCst);
+                thread.unpark();
+                queue = next;
+            }
         }
     }
 }
 
+impl OnceState {
+    /// Returns whether the associated `Once` has been poisoned.
+    ///
+    /// Once an initalization routine for a `Once` has panicked it will forever
+    /// indicate to future forced initialization routines that it is poisoned.
+    #[unstable(feature = "once_poison", issue = "31688")]
+    pub fn poisoned(&self) -> bool {
+        self.poisoned
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use prelude::v1::*;
 
+    use panic;
+    use sync::mpsc::channel;
     use thread;
     use super::Once;
-    use sync::mpsc::channel;
 
     #[test]
     fn smoke_once() {
@@ -179,4 +419,71 @@ mod tests {
             rx.recv().unwrap();
         }
     }
+
+    #[test]
+    fn poison_bad() {
+        static O: Once = Once::new();
+
+        // poison the once
+        let t = panic::catch_unwind(|| {
+            O.call_once(|| panic!());
+        });
+        assert!(t.is_err());
+
+        // poisoning propagates
+        let t = panic::catch_unwind(|| {
+            O.call_once(|| {});
+        });
+        assert!(t.is_err());
+
+        // we can subvert poisoning, however
+        let mut called = false;
+        O.call_once_force(|p| {
+            called = true;
+            assert!(p.poisoned())
+        });
+        assert!(called);
+
+        // once any success happens, we stop propagating the poison
+        O.call_once(|| {});
+    }
+
+    #[test]
+    fn wait_for_force_to_finish() {
+        static O: Once = Once::new();
+
+        // poison the once
+        let t = panic::catch_unwind(|| {
+            O.call_once(|| panic!());
+        });
+        assert!(t.is_err());
+
+        // make sure someone's waiting inside the once via a force
+        let (tx1, rx1) = channel();
+        let (tx2, rx2) = channel();
+        let t1 = thread::spawn(move || {
+            O.call_once_force(|p| {
+                assert!(p.poisoned());
+                tx1.send(()).unwrap();
+                rx2.recv().unwrap();
+            });
+        });
+
+        rx1.recv().unwrap();
+
+        // put another waiter on the once
+        let t2 = thread::spawn(|| {
+            let mut called = false;
+            O.call_once(|| {
+                called = true;
+            });
+            assert!(!called);
+        });
+
+        tx2.send(()).unwrap();
+
+        assert!(t1.join().is_ok());
+        assert!(t2.join().is_ok());
+
+    }
 }
index 0603dad4528048d8bf3864046fc14f57d2266184..a37c1c16a45e98b58d2622ca55b80101d2dcdd01 100644 (file)
@@ -205,7 +205,7 @@ impl<T: ?Sized> RwLock<T> {
     pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
         unsafe {
             if self.inner.lock.try_read() {
-                Ok(try!(RwLockReadGuard::new(&*self.inner, &self.data)))
+                Ok(RwLockReadGuard::new(&*self.inner, &self.data)?)
             } else {
                 Err(TryLockError::WouldBlock)
             }
@@ -257,7 +257,7 @@ impl<T: ?Sized> RwLock<T> {
     pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
         unsafe {
             if self.inner.lock.try_write() {
-                Ok(try!(RwLockWriteGuard::new(&*self.inner, &self.data)))
+                Ok(RwLockWriteGuard::new(&*self.inner, &self.data)?)
             } else {
                 Err(TryLockError::WouldBlock)
             }
@@ -382,7 +382,7 @@ impl StaticRwLock {
                     -> TryLockResult<RwLockReadGuard<'static, ()>> {
         unsafe {
             if self.lock.try_read(){
-                Ok(try!(RwLockReadGuard::new(self, &DUMMY.0)))
+                Ok(RwLockReadGuard::new(self, &DUMMY.0)?)
             } else {
                 Err(TryLockError::WouldBlock)
             }
@@ -409,7 +409,7 @@ impl StaticRwLock {
                      -> TryLockResult<RwLockWriteGuard<'static, ()>> {
         unsafe {
             if self.lock.try_write() {
-                Ok(try!(RwLockWriteGuard::new(self, &DUMMY.0)))
+                Ok(RwLockWriteGuard::new(self, &DUMMY.0)?)
             } else {
                 Err(TryLockError::WouldBlock)
             }
diff --git a/src/libstd/sync/semaphore.rs b/src/libstd/sync/semaphore.rs
deleted file mode 100644 (file)
index dd76444..0000000
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![unstable(feature = "semaphore",
-            reason = "the interaction between semaphores and the acquisition/release \
-                      of resources is currently unclear",
-            issue = "27798")]
-#![allow(deprecated)]
-
-use ops::Drop;
-use sync::{Mutex, Condvar};
-
-/// A counting, blocking, semaphore.
-///
-/// Semaphores are a form of atomic counter where access is only granted if the
-/// counter is a positive value. Each acquisition will block the calling thread
-/// until the counter is positive, and each release will increment the counter
-/// and unblock any threads if necessary.
-///
-/// # Examples
-///
-/// ```
-/// #![feature(semaphore)]
-///
-/// use std::sync::Semaphore;
-///
-/// // Create a semaphore that represents 5 resources
-/// let sem = Semaphore::new(5);
-///
-/// // Acquire one of the resources
-/// sem.acquire();
-///
-/// // Acquire one of the resources for a limited period of time
-/// {
-///     let _guard = sem.access();
-///     // ...
-/// } // resources is released here
-///
-/// // Release our initially acquired resource
-/// sem.release();
-/// ```
-#[rustc_deprecated(since = "1.7.0",
-                   reason = "easily confused with system semaphores and not \
-                             used enough to pull its weight")]
-#[unstable(feature = "semaphore",
-           reason = "the interaction between semaphores and the acquisition/release \
-                     of resources is currently unclear",
-           issue = "27798")]
-pub struct Semaphore {
-    lock: Mutex<isize>,
-    cvar: Condvar,
-}
-
-/// An RAII guard which will release a resource acquired from a semaphore when
-/// dropped.
-#[rustc_deprecated(since = "1.7.0",
-                   reason = "easily confused with system semaphores and not \
-                             used enough to pull its weight")]
-#[unstable(feature = "semaphore",
-           reason = "the interaction between semaphores and the acquisition/release \
-                     of resources is currently unclear",
-           issue = "27798")]
-pub struct SemaphoreGuard<'a> {
-    sem: &'a Semaphore,
-}
-
-#[rustc_deprecated(since = "1.7.0",
-                   reason = "easily confused with system semaphores and not \
-                             used enough to pull its weight")]
-#[unstable(feature = "semaphore",
-           reason = "the interaction between semaphores and the acquisition/release \
-                     of resources is currently unclear",
-           issue = "27798")]
-impl Semaphore {
-    /// Creates a new semaphore with the initial count specified.
-    ///
-    /// The count specified can be thought of as a number of resources, and a
-    /// call to `acquire` or `access` will block until at least one resource is
-    /// available. It is valid to initialize a semaphore with a negative count.
-    pub fn new(count: isize) -> Semaphore {
-        Semaphore {
-            lock: Mutex::new(count),
-            cvar: Condvar::new(),
-        }
-    }
-
-    /// Acquires a resource of this semaphore, blocking the current thread until
-    /// it can do so.
-    ///
-    /// This method will block until the internal count of the semaphore is at
-    /// least 1.
-    pub fn acquire(&self) {
-        let mut count = self.lock.lock().unwrap();
-        while *count <= 0 {
-            count = self.cvar.wait(count).unwrap();
-        }
-        *count -= 1;
-    }
-
-    /// Release a resource from this semaphore.
-    ///
-    /// This will increment the number of resources in this semaphore by 1 and
-    /// will notify any pending waiters in `acquire` or `access` if necessary.
-    pub fn release(&self) {
-        *self.lock.lock().unwrap() += 1;
-        self.cvar.notify_one();
-    }
-
-    /// Acquires a resource of this semaphore, returning an RAII guard to
-    /// release the semaphore when dropped.
-    ///
-    /// This function is semantically equivalent to an `acquire` followed by a
-    /// `release` when the guard returned is dropped.
-    pub fn access(&self) -> SemaphoreGuard {
-        self.acquire();
-        SemaphoreGuard { sem: self }
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a> Drop for SemaphoreGuard<'a> {
-    fn drop(&mut self) {
-        self.sem.release();
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use prelude::v1::*;
-
-    use sync::Arc;
-    use super::Semaphore;
-    use sync::mpsc::channel;
-    use thread;
-
-    #[test]
-    fn test_sem_acquire_release() {
-        let s = Semaphore::new(1);
-        s.acquire();
-        s.release();
-        s.acquire();
-    }
-
-    #[test]
-    fn test_sem_basic() {
-        let s = Semaphore::new(1);
-        let _g = s.access();
-    }
-
-    #[test]
-    fn test_sem_as_mutex() {
-        let s = Arc::new(Semaphore::new(1));
-        let s2 = s.clone();
-        let _t = thread::spawn(move|| {
-            let _g = s2.access();
-        });
-        let _g = s.access();
-    }
-
-    #[test]
-    fn test_sem_as_cvar() {
-        /* Child waits and parent signals */
-        let (tx, rx) = channel();
-        let s = Arc::new(Semaphore::new(0));
-        let s2 = s.clone();
-        let _t = thread::spawn(move|| {
-            s2.acquire();
-            tx.send(()).unwrap();
-        });
-        s.release();
-        let _ = rx.recv();
-
-        /* Parent waits and child signals */
-        let (tx, rx) = channel();
-        let s = Arc::new(Semaphore::new(0));
-        let s2 = s.clone();
-        let _t = thread::spawn(move|| {
-            s2.release();
-            let _ = rx.recv();
-        });
-        s.acquire();
-        tx.send(()).unwrap();
-    }
-
-    #[test]
-    fn test_sem_multi_resource() {
-        // Parent and child both get in the critical section at the same
-        // time, and shake hands.
-        let s = Arc::new(Semaphore::new(2));
-        let s2 = s.clone();
-        let (tx1, rx1) = channel();
-        let (tx2, rx2) = channel();
-        let _t = thread::spawn(move|| {
-            let _g = s2.access();
-            let _ = rx2.recv();
-            tx1.send(()).unwrap();
-        });
-        let _g = s.access();
-        tx2.send(()).unwrap();
-        rx1.recv().unwrap();
-    }
-
-    #[test]
-    fn test_sem_runtime_friendly_blocking() {
-        let s = Arc::new(Semaphore::new(1));
-        let s2 = s.clone();
-        let (tx, rx) = channel();
-        {
-            let _g = s.access();
-            thread::spawn(move|| {
-                tx.send(()).unwrap();
-                drop(s2.access());
-                tx.send(()).unwrap();
-            });
-            rx.recv().unwrap(); // wait for child to come alive
-        }
-        rx.recv().unwrap(); // wait for child to be done
-    }
-}
index 3c84783d215eca8b696a178024945766bf09a29e..6f185437e50af47b3d4b7966210fbaa636284e78 100644 (file)
@@ -36,7 +36,7 @@ pub fn log_enabled() -> bool {
     }
 
     let val = match env::var_os("RUST_BACKTRACE") {
-        Some(..) => 2,
+        Some(x) => if &x == "0" { 1 } else { 2 },
         None => 1,
     };
     ENABLED.store(val, Ordering::SeqCst);
@@ -46,10 +46,10 @@ pub fn log_enabled() -> bool {
 // These output functions should now be used everywhere to ensure consistency.
 pub fn output(w: &mut Write, idx: isize, addr: *mut libc::c_void,
               s: Option<&[u8]>) -> io::Result<()> {
-    try!(write!(w, "  {:2}: {:2$?} - ", idx, addr, HEX_WIDTH));
+    write!(w, "  {:2}: {:2$?} - ", idx, addr, HEX_WIDTH)?;
     match s.and_then(|s| str::from_utf8(s).ok()) {
-        Some(string) => try!(demangle(w, string)),
-        None => try!(write!(w, "<unknown>")),
+        Some(string) => demangle(w, string)?,
+        None => write!(w, "<unknown>")?,
     }
     w.write_all(&['\n' as u8])
 }
@@ -59,9 +59,9 @@ pub fn output_fileline(w: &mut Write, file: &[u8], line: libc::c_int,
                        more: bool) -> io::Result<()> {
     let file = str::from_utf8(file).unwrap_or("<unknown>");
     // prior line: "  ##: {:2$} - func"
-    try!(write!(w, "      {:3$}at {}:{}", "", file, line, HEX_WIDTH));
+    write!(w, "      {:3$}at {}:{}", "", file, line, HEX_WIDTH)?;
     if more {
-        try!(write!(w, " <... and possibly more>"));
+        write!(w, " <... and possibly more>")?;
     }
     w.write_all(&['\n' as u8])
 }
@@ -121,17 +121,17 @@ pub fn demangle(writer: &mut Write, s: &str) -> io::Result<()> {
 
     // Alright, let's do this.
     if !valid {
-        try!(writer.write_all(s.as_bytes()));
+        writer.write_all(s.as_bytes())?;
     } else {
         let mut first = true;
         while !inner.is_empty() {
             if !first {
-                try!(writer.write_all(b"::"));
+                writer.write_all(b"::")?;
             } else {
                 first = false;
             }
             let mut rest = inner;
-            while rest.char_at(0).is_numeric() {
+            while rest.chars().next().unwrap().is_numeric() {
                 rest = &rest[1..];
             }
             let i: usize = inner[.. (inner.len() - rest.len())].parse().unwrap();
@@ -177,7 +177,7 @@ pub fn demangle(writer: &mut Write, s: &str) -> io::Result<()> {
                         None => rest.len(),
                         Some(i) => i,
                     };
-                    try!(writer.write_all(rest[..idx].as_bytes()));
+                    writer.write_all(rest[..idx].as_bytes())?;
                     rest = &rest[idx..];
                 }
             }
index 8b3cb04030c8272753e8e62a790551f29c40e2ac..b5802afc109435ec1e5e2cf8dd1b71951780c592 100644 (file)
@@ -15,7 +15,6 @@ use sys_common::backtrace::{output, output_fileline};
 
 pub fn print(w: &mut Write, idx: isize, addr: *mut libc::c_void,
              symaddr: *mut libc::c_void) -> io::Result<()> {
-    use env;
     use ffi::CStr;
     use ptr;
 
@@ -110,46 +109,22 @@ pub fn print(w: &mut Write, idx: isize, addr: *mut libc::c_void,
     // that is calculated the first time this is requested. Remember that
     // backtracing all happens serially (one global lock).
     //
-    // An additionally oddity in this function is that we initialize the
-    // filename via self_exe_name() to pass to libbacktrace. It turns out
-    // that on Linux libbacktrace seamlessly gets the filename of the
-    // current executable, but this fails on freebsd. by always providing
-    // it, we make sure that libbacktrace never has a reason to not look up
-    // the symbols. The libbacktrace API also states that the filename must
-    // be in "permanent memory", so we copy it to a static and then use the
-    // static as the pointer.
+    // Things don't work so well on not-Linux since libbacktrace can't track
+    // down that executable this is. We at one point used env::current_exe but
+    // it turns out that there are some serious security issues with that
+    // approach.
     //
-    // FIXME: We also call self_exe_name() on DragonFly BSD. I haven't
-    //        tested if this is required or not.
+    // Specifically, on certain platforms like BSDs, a malicious actor can cause
+    // an arbitrary file to be placed at the path returned by current_exe.
+    // libbacktrace does not behave defensively in the presence of ill-formed
+    // DWARF information, and has been demonstrated to segfault in at least one
+    // case. There is no evidence at the moment to suggest that a more carefully
+    // constructed file can't cause arbitrary code execution. As a result of all
+    // of this, we don't hint libbacktrace with the path to the current process.
     unsafe fn init_state() -> *mut backtrace_state {
         static mut STATE: *mut backtrace_state = ptr::null_mut();
-        static mut LAST_FILENAME: [libc::c_char; 256] = [0; 256];
         if !STATE.is_null() { return STATE }
-        let selfname = if cfg!(target_os = "freebsd") ||
-            cfg!(target_os = "dragonfly") ||
-            cfg!(target_os = "bitrig") ||
-            cfg!(target_os = "openbsd") ||
-            cfg!(target_os = "windows") {
-                env::current_exe().ok()
-            } else {
-                None
-            };
-        let filename = match selfname.as_ref().and_then(|s| s.to_str()) {
-            Some(path) => {
-                let bytes = path.as_bytes();
-                if bytes.len() < LAST_FILENAME.len() {
-                    let i = bytes.iter();
-                    for (slot, val) in LAST_FILENAME.iter_mut().zip(i) {
-                        *slot = *val as libc::c_char;
-                    }
-                    LAST_FILENAME.as_ptr()
-                } else {
-                    ptr::null()
-                }
-            }
-            None => ptr::null(),
-        };
-        STATE = backtrace_create_state(filename, 0, error_cb,
+        STATE = backtrace_create_state(ptr::null(), 0, error_cb,
                                        ptr::null_mut());
         STATE
     }
@@ -172,9 +147,9 @@ pub fn print(w: &mut Write, idx: isize, addr: *mut libc::c_void,
                           data_addr as *mut libc::c_void)
     };
     if ret == 0 || data.is_null() {
-        try!(output(w, idx, addr, None));
+        output(w, idx, addr, None)?;
     } else {
-        try!(output(w, idx, addr, Some(unsafe { CStr::from_ptr(data).to_bytes() })));
+        output(w, idx, addr, Some(unsafe { CStr::from_ptr(data).to_bytes() }))?;
     }
 
     // pcinfo may return an arbitrary number of file:line pairs,
@@ -198,7 +173,7 @@ pub fn print(w: &mut Write, idx: isize, addr: *mut libc::c_void,
         for (i, &(file, line)) in fileline_buf[..fileline_count].iter().enumerate() {
             if file.is_null() { continue; } // just to be sure
             let file = unsafe { CStr::from_ptr(file).to_bytes() };
-            try!(output_fileline(w, file, line, i == FILELINE_SIZE - 1));
+            output_fileline(w, file, line, i == FILELINE_SIZE - 1)?;
         }
     }
 
index 9f2f0df3a64705577537e4eaf90e7249924baa13..7b08852ba51d1fbfa8fa5d451bffcd6f851204df 100644 (file)
@@ -51,6 +51,46 @@ pub unsafe fn read_to_end_uninitialized(r: &mut Read, buf: &mut Vec<u8>) -> io::
     }
 }
 
+#[cfg(test)]
+pub mod test {
+    use prelude::v1::*;
+    use path::{Path, PathBuf};
+    use env;
+    use rand::{self, Rng};
+    use fs;
+
+    pub struct TempDir(PathBuf);
+
+    impl TempDir {
+        pub fn join(&self, path: &str) -> PathBuf {
+            let TempDir(ref p) = *self;
+            p.join(path)
+        }
+
+        pub fn path<'a>(&'a self) -> &'a Path {
+            let TempDir(ref p) = *self;
+            p
+        }
+    }
+
+    impl Drop for TempDir {
+        fn drop(&mut self) {
+            // Gee, seeing how we're testing the fs module I sure hope that we
+            // at least implement this correctly!
+            let TempDir(ref p) = *self;
+            fs::remove_dir_all(p).unwrap();
+        }
+    }
+
+    pub fn tmpdir() -> TempDir {
+        let p = env::temp_dir();
+        let mut r = rand::thread_rng();
+        let ret = p.join(&format!("rust-{}", r.next_u32()));
+        fs::create_dir(&ret).unwrap();
+        TempDir(ret)
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use prelude::v1::*;
@@ -58,7 +98,6 @@ mod tests {
     use super::*;
     use io;
     use io::{ErrorKind, Take, Repeat, repeat};
-    use test;
     use slice::from_raw_parts;
 
     struct ErrorRepeat {
@@ -129,7 +168,7 @@ mod tests {
     }
 
     #[bench]
-    fn bench_uninitialized(b: &mut test::Bencher) {
+    fn bench_uninitialized(b: &mut ::test::Bencher) {
         b.iter(|| {
             let mut lr = repeat(1).take(10000000);
             let mut vec = Vec::with_capacity(1024);
index 3f70afe6ad76e9f7164a9ee8443de01b503519af..c1e9782852a7971ae4c714acf5b7ef52c78e25bf 100644 (file)
@@ -106,9 +106,15 @@ pub type _Unwind_Exception_Cleanup_Fn =
 #[cfg_attr(any(all(target_os = "linux", not(target_env = "musl")),
                target_os = "freebsd",
                target_os = "solaris",
-               all(target_os = "linux", target_env = "musl", not(target_arch = "x86_64"))),
+               all(target_os = "linux",
+                   target_env = "musl",
+                   not(target_arch = "x86"),
+                   not(target_arch = "x86_64"))),
            link(name = "gcc_s"))]
-#[cfg_attr(all(target_os = "linux", target_env = "musl", target_arch = "x86_64", not(test)),
+#[cfg_attr(all(target_os = "linux",
+               target_env = "musl",
+               any(target_arch = "x86", target_arch = "x86_64"),
+               not(test)),
            link(name = "unwind", kind = "static"))]
 #[cfg_attr(any(target_os = "android", target_os = "openbsd"),
            link(name = "gcc"))]
index 1cb9303a9fc4b5a984a1795942c4fff6d32ac63f..54b9b466c42240b7f8dab99bb962af50f4e1e553 100644 (file)
 use prelude::v1::*;
 
 use cmp;
-use ffi::{CStr, CString};
+use ffi::CString;
 use fmt;
 use io::{self, Error, ErrorKind};
-use libc::{c_int, c_char, c_void};
+use libc::{c_int, c_void};
 use mem;
-#[allow(deprecated)]
-use net::{SocketAddr, Shutdown, IpAddr};
+use net::{SocketAddr, Shutdown, Ipv4Addr, Ipv6Addr};
 use ptr;
-use str::from_utf8;
 use sys::net::{cvt, cvt_r, cvt_gai, Socket, init, wrlen_t};
 use sys::net::netc as c;
 use sys_common::{AsInner, FromInner, IntoInner};
 use time::Duration;
 
+#[cfg(any(target_os = "dragonfly", target_os = "freebsd",
+          target_os = "ios", target_os = "macos",
+          target_os = "openbsd", target_os = "netbsd"))]
+use sys::net::netc::IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP;
+#[cfg(not(any(target_os = "dragonfly", target_os = "freebsd",
+              target_os = "ios", target_os = "macos",
+          target_os = "openbsd", target_os = "netbsd")))]
+use sys::net::netc::IPV6_ADD_MEMBERSHIP;
+#[cfg(any(target_os = "dragonfly", target_os = "freebsd",
+          target_os = "ios", target_os = "macos",
+          target_os = "openbsd", target_os = "netbsd"))]
+use sys::net::netc::IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP;
+#[cfg(not(any(target_os = "dragonfly", target_os = "freebsd",
+              target_os = "ios", target_os = "macos",
+          target_os = "openbsd", target_os = "netbsd")))]
+use sys::net::netc::IPV6_DROP_MEMBERSHIP;
+
 ////////////////////////////////////////////////////////////////////////////////
 // sockaddr and misc bindings
 ////////////////////////////////////////////////////////////////////////////////
@@ -33,8 +48,8 @@ pub fn setsockopt<T>(sock: &Socket, opt: c_int, val: c_int,
                      payload: T) -> io::Result<()> {
     unsafe {
         let payload = &payload as *const T as *const c_void;
-        try!(cvt(c::setsockopt(*sock.as_inner(), opt, val, payload,
-                               mem::size_of::<T>() as c::socklen_t)));
+        cvt(c::setsockopt(*sock.as_inner(), opt, val, payload,
+                          mem::size_of::<T>() as c::socklen_t))?;
         Ok(())
     }
 }
@@ -44,9 +59,9 @@ pub fn getsockopt<T: Copy>(sock: &Socket, opt: c_int,
     unsafe {
         let mut slot: T = mem::zeroed();
         let mut len = mem::size_of::<T>() as c::socklen_t;
-        try!(cvt(c::getsockopt(*sock.as_inner(), opt, val,
-                               &mut slot as *mut _ as *mut _,
-                               &mut len)));
+        cvt(c::getsockopt(*sock.as_inner(), opt, val,
+                          &mut slot as *mut _ as *mut _,
+                          &mut len))?;
         assert_eq!(len as usize, mem::size_of::<T>());
         Ok(slot)
     }
@@ -58,7 +73,7 @@ fn sockname<F>(f: F) -> io::Result<SocketAddr>
     unsafe {
         let mut storage: c::sockaddr_storage = mem::zeroed();
         let mut len = mem::size_of_val(&storage) as c::socklen_t;
-        try!(cvt(f(&mut storage as *mut _ as *mut _, &mut len)));
+        cvt(f(&mut storage as *mut _ as *mut _, &mut len))?;
         sockaddr_to_addr(&storage, len as usize)
     }
 }
@@ -84,6 +99,16 @@ fn sockaddr_to_addr(storage: &c::sockaddr_storage,
     }
 }
 
+#[cfg(target_os = "android")]
+fn to_ipv6mr_interface(value: u32) -> c_int {
+    value as c_int
+}
+
+#[cfg(not(target_os = "android"))]
+fn to_ipv6mr_interface(value: u32) -> ::libc::c_uint {
+    value as ::libc::c_uint
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 // get_host_addresses
 ////////////////////////////////////////////////////////////////////////////////
@@ -118,43 +143,15 @@ impl Drop for LookupHost {
 pub fn lookup_host(host: &str) -> io::Result<LookupHost> {
     init();
 
-    let c_host = try!(CString::new(host));
+    let c_host = CString::new(host)?;
     let mut res = ptr::null_mut();
     unsafe {
-        try!(cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), ptr::null(),
-                                   &mut res)));
+        cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), ptr::null(),
+                               &mut res))?;
         Ok(LookupHost { original: res, cur: res })
     }
 }
 
-////////////////////////////////////////////////////////////////////////////////
-// lookup_addr
-////////////////////////////////////////////////////////////////////////////////
-
-#[allow(deprecated)]
-pub fn lookup_addr(addr: &IpAddr) -> io::Result<String> {
-    init();
-
-    let saddr = SocketAddr::new(*addr, 0);
-    let (inner, len) = saddr.into_inner();
-    let mut hostbuf = [0 as c_char; c::NI_MAXHOST as usize];
-
-    let data = unsafe {
-        try!(cvt_gai(c::getnameinfo(inner, len,
-                                    hostbuf.as_mut_ptr(),
-                                    c::NI_MAXHOST,
-                                    ptr::null_mut(), 0, 0)));
-
-        CStr::from_ptr(hostbuf.as_ptr())
-    };
-
-    match from_utf8(data.to_bytes()) {
-        Ok(name) => Ok(name.to_owned()),
-        Err(_) => Err(io::Error::new(io::ErrorKind::Other,
-                                     "failed to lookup address information"))
-    }
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // TCP streams
 ////////////////////////////////////////////////////////////////////////////////
@@ -167,10 +164,10 @@ impl TcpStream {
     pub fn connect(addr: &SocketAddr) -> io::Result<TcpStream> {
         init();
 
-        let sock = try!(Socket::new(addr, c::SOCK_STREAM));
+        let sock = Socket::new(addr, c::SOCK_STREAM)?;
 
         let (addrp, len) = addr.into_inner();
-        try!(cvt_r(|| unsafe { c::connect(*sock.as_inner(), addrp, len) }));
+        cvt_r(|| unsafe { c::connect(*sock.as_inner(), addrp, len) })?;
         Ok(TcpStream { inner: sock })
     }
 
@@ -198,14 +195,18 @@ impl TcpStream {
         self.inner.read(buf)
     }
 
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.inner.read_to_end(buf)
+    }
+
     pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
         let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
-        let ret = try!(cvt(unsafe {
+        let ret = cvt(unsafe {
             c::send(*self.inner.as_inner(),
                     buf.as_ptr() as *const c_void,
                     len,
                     0)
-        }));
+        })?;
         Ok(ret as usize)
     }
 
@@ -228,6 +229,31 @@ impl TcpStream {
     pub fn duplicate(&self) -> io::Result<TcpStream> {
         self.inner.duplicate().map(|s| TcpStream { inner: s })
     }
+
+    pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+        self.inner.set_nodelay(nodelay)
+    }
+
+    pub fn nodelay(&self) -> io::Result<bool> {
+        self.inner.nodelay()
+    }
+
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        setsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL, ttl as c_int)
+    }
+
+    pub fn ttl(&self) -> io::Result<u32> {
+        let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL)?;
+        Ok(raw as u32)
+    }
+
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.inner.take_error()
+    }
+
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        self.inner.set_nonblocking(nonblocking)
+    }
 }
 
 impl FromInner<Socket> for TcpStream {
@@ -266,22 +292,22 @@ impl TcpListener {
     pub fn bind(addr: &SocketAddr) -> io::Result<TcpListener> {
         init();
 
-        let sock = try!(Socket::new(addr, c::SOCK_STREAM));
+        let sock = Socket::new(addr, c::SOCK_STREAM)?;
 
         // On platforms with Berkeley-derived sockets, this allows
         // to quickly rebind a socket, without needing to wait for
         // the OS to clean up the previous one.
         if !cfg!(windows) {
-            try!(setsockopt(&sock, c::SOL_SOCKET, c::SO_REUSEADDR,
-                            1 as c_int));
+            setsockopt(&sock, c::SOL_SOCKET, c::SO_REUSEADDR,
+                       1 as c_int)?;
         }
 
         // Bind our new socket
         let (addrp, len) = addr.into_inner();
-        try!(cvt(unsafe { c::bind(*sock.as_inner(), addrp, len) }));
+        cvt(unsafe { c::bind(*sock.as_inner(), addrp, len) })?;
 
         // Start listening
-        try!(cvt(unsafe { c::listen(*sock.as_inner(), 128) }));
+        cvt(unsafe { c::listen(*sock.as_inner(), 128) })?;
         Ok(TcpListener { inner: sock })
     }
 
@@ -298,15 +324,41 @@ impl TcpListener {
     pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
         let mut storage: c::sockaddr_storage = unsafe { mem::zeroed() };
         let mut len = mem::size_of_val(&storage) as c::socklen_t;
-        let sock = try!(self.inner.accept(&mut storage as *mut _ as *mut _,
-                                          &mut len));
-        let addr = try!(sockaddr_to_addr(&storage, len as usize));
+        let sock = self.inner.accept(&mut storage as *mut _ as *mut _,
+                                     &mut len)?;
+        let addr = sockaddr_to_addr(&storage, len as usize)?;
         Ok((TcpStream { inner: sock, }, addr))
     }
 
     pub fn duplicate(&self) -> io::Result<TcpListener> {
         self.inner.duplicate().map(|s| TcpListener { inner: s })
     }
+
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        setsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL, ttl as c_int)
+    }
+
+    pub fn ttl(&self) -> io::Result<u32> {
+        let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL)?;
+        Ok(raw as u32)
+    }
+
+    pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+        setsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_V6ONLY, only_v6 as c_int)
+    }
+
+    pub fn only_v6(&self) -> io::Result<bool> {
+        let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_V6ONLY)?;
+        Ok(raw != 0)
+    }
+
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.inner.take_error()
+    }
+
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        self.inner.set_nonblocking(nonblocking)
+    }
 }
 
 impl FromInner<Socket> for TcpListener {
@@ -341,9 +393,9 @@ impl UdpSocket {
     pub fn bind(addr: &SocketAddr) -> io::Result<UdpSocket> {
         init();
 
-        let sock = try!(Socket::new(addr, c::SOCK_DGRAM));
+        let sock = Socket::new(addr, c::SOCK_DGRAM)?;
         let (addrp, len) = addr.into_inner();
-        try!(cvt(unsafe { c::bind(*sock.as_inner(), addrp, len) }));
+        cvt(unsafe { c::bind(*sock.as_inner(), addrp, len) })?;
         Ok(UdpSocket { inner: sock })
     }
 
@@ -362,23 +414,23 @@ impl UdpSocket {
         let mut addrlen = mem::size_of_val(&storage) as c::socklen_t;
         let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
 
-        let n = try!(cvt(unsafe {
+        let n = cvt(unsafe {
             c::recvfrom(*self.inner.as_inner(),
                         buf.as_mut_ptr() as *mut c_void,
                         len, 0,
                         &mut storage as *mut _ as *mut _, &mut addrlen)
-        }));
-        Ok((n as usize, try!(sockaddr_to_addr(&storage, addrlen as usize))))
+        })?;
+        Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?))
     }
 
     pub fn send_to(&self, buf: &[u8], dst: &SocketAddr) -> io::Result<usize> {
         let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
         let (dstp, dstlen) = dst.into_inner();
-        let ret = try!(cvt(unsafe {
+        let ret = cvt(unsafe {
             c::sendto(*self.inner.as_inner(),
                       buf.as_ptr() as *const c_void, len,
                       0, dstp, dstlen)
-        }));
+        })?;
         Ok(ret as usize)
     }
 
@@ -401,6 +453,115 @@ impl UdpSocket {
     pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
         self.inner.timeout(c::SO_SNDTIMEO)
     }
+
+    pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> {
+        setsockopt(&self.inner, c::SOL_SOCKET, c::SO_BROADCAST, broadcast as c_int)
+    }
+
+    pub fn broadcast(&self) -> io::Result<bool> {
+        let raw: c_int = getsockopt(&self.inner, c::SOL_SOCKET, c::SO_BROADCAST)?;
+        Ok(raw != 0)
+    }
+
+    pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> {
+        setsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_LOOP, multicast_loop_v4 as c_int)
+    }
+
+    pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+        let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_LOOP)?;
+        Ok(raw != 0)
+    }
+
+    pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> {
+        setsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_TTL, multicast_ttl_v4 as c_int)
+    }
+
+    pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+        let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_TTL)?;
+        Ok(raw as u32)
+    }
+
+    pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> {
+        setsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_MULTICAST_LOOP, multicast_loop_v6 as c_int)
+    }
+
+    pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+        let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_MULTICAST_LOOP)?;
+        Ok(raw != 0)
+    }
+
+    pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr)
+                         -> io::Result<()> {
+        let mreq = c::ip_mreq {
+            imr_multiaddr: *multiaddr.as_inner(),
+            imr_interface: *interface.as_inner(),
+        };
+        setsockopt(&self.inner, c::IPPROTO_IP, c::IP_ADD_MEMBERSHIP, mreq)
+    }
+
+    pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32)
+                         -> io::Result<()> {
+        let mreq = c::ipv6_mreq {
+            ipv6mr_multiaddr: *multiaddr.as_inner(),
+            ipv6mr_interface: to_ipv6mr_interface(interface),
+        };
+        setsockopt(&self.inner, c::IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, mreq)
+    }
+
+    pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr)
+                          -> io::Result<()> {
+        let mreq = c::ip_mreq {
+            imr_multiaddr: *multiaddr.as_inner(),
+            imr_interface: *interface.as_inner(),
+        };
+        setsockopt(&self.inner, c::IPPROTO_IP, c::IP_DROP_MEMBERSHIP, mreq)
+    }
+
+    pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32)
+                          -> io::Result<()> {
+        let mreq = c::ipv6_mreq {
+            ipv6mr_multiaddr: *multiaddr.as_inner(),
+            ipv6mr_interface: to_ipv6mr_interface(interface),
+        };
+        setsockopt(&self.inner, c::IPPROTO_IPV6, IPV6_DROP_MEMBERSHIP, mreq)
+    }
+
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        setsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL, ttl as c_int)
+    }
+
+    pub fn ttl(&self) -> io::Result<u32> {
+        let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL)?;
+        Ok(raw as u32)
+    }
+
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.inner.take_error()
+    }
+
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        self.inner.set_nonblocking(nonblocking)
+    }
+
+    pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+        self.inner.read(buf)
+    }
+
+    pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+        let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
+        let ret = cvt(unsafe {
+            c::send(*self.inner.as_inner(),
+                    buf.as_ptr() as *const c_void,
+                    len,
+                    0)
+        })?;
+        Ok(ret as usize)
+    }
+
+    pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> {
+        let (addrp, len) = addr.into_inner();
+        cvt_r(|| unsafe { c::connect(*self.inner.as_inner(), addrp, len) }).map(|_| ())
+    }
 }
 
 impl FromInner<Socket> for UdpSocket {
index 2e2be63c3cb5b300ea63c813cc8951e74acc53a9..39d4104246732f56d0333aa58aa8ddada075284d 100644 (file)
@@ -8,9 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![unstable(feature = "reentrant_mutex", reason = "new API",
-            issue = "27738")]
-
 use prelude::v1::*;
 
 use fmt;
@@ -102,7 +99,7 @@ impl<T> ReentrantMutex<T> {
     /// acquired.
     pub fn try_lock(&self) -> TryLockResult<ReentrantMutexGuard<T>> {
         if unsafe { self.inner.try_lock() } {
-            Ok(try!(ReentrantMutexGuard::new(&self)))
+            Ok(ReentrantMutexGuard::new(&self)?)
         } else {
             Err(TryLockError::WouldBlock)
         }
index ff6a11951dc5e57f59615394758c2cdedd8fe5bc..da7a340af351525ac6a766722b9d921b7295d66d 100644 (file)
@@ -224,8 +224,13 @@ pub mod eabi {
         context: *mut uw::_Unwind_Context
     ) -> uw::_Unwind_Reason_Code
     {
+        // Backtraces on ARM will call the personality routine with
+        // state == _US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND. In those cases
+        // we want to continue unwinding the stack, otherwise all our backtraces
+        // would end at __rust_try.
         if (state as c_int & uw::_US_ACTION_MASK as c_int)
-                           == uw::_US_VIRTUAL_UNWIND_FRAME as c_int { // search phase
+                           == uw::_US_VIRTUAL_UNWIND_FRAME as c_int
+               && (state as c_int & uw::_US_FORCE_UNWIND as c_int) == 0 { // search phase
             uw::_URC_HANDLER_FOUND // catch!
         }
         else { // cleanup phase
index 48e9adb9296bc47b7888fa981193b126c0947f0e..55e485e5811acc687dce80d57e2f43571489f3d3 100644 (file)
@@ -25,7 +25,6 @@
 // unix (it's mostly used on windows), so don't worry about dead code here.
 #![allow(dead_code)]
 
-use core::char::{encode_utf8_raw, encode_utf16_raw};
 use core::str::next_code_point;
 
 use ascii::*;
@@ -192,8 +191,11 @@ impl Wtf8Buf {
             match item {
                 Ok(ch) => string.push_char(ch),
                 Err(surrogate) => {
+                    let surrogate = surrogate.unpaired_surrogate();
                     // Surrogates are known to be in the code point range.
-                    let code_point = unsafe { CodePoint::from_u32_unchecked(surrogate as u32) };
+                    let code_point = unsafe {
+                        CodePoint::from_u32_unchecked(surrogate as u32)
+                    };
                     // Skip the WTF-8 concatenation check,
                     // surrogate pairs are already decoded by decode_utf16
                     string.push_code_point_unchecked(code_point)
@@ -206,19 +208,10 @@ impl Wtf8Buf {
     /// Copied from String::push
     /// This does **not** include the WTF-8 concatenation check.
     fn push_code_point_unchecked(&mut self, code_point: CodePoint) {
-        let cur_len = self.len();
-        // This may use up to 4 bytes.
-        self.reserve(4);
-
-        unsafe {
-            // Attempt to not use an intermediate buffer by just pushing bytes
-            // directly onto this string.
-            let slice = slice::from_raw_parts_mut(
-                self.bytes.as_mut_ptr().offset(cur_len as isize), 4
-            );
-            let used = encode_utf8_raw(code_point.value, slice).unwrap();
-            self.bytes.set_len(cur_len + used);
-        }
+        let bytes = unsafe {
+            char::from_u32_unchecked(code_point.value).encode_utf8()
+        };
+        self.bytes.extend_from_slice(bytes.as_slice());
     }
 
     #[inline]
@@ -366,8 +359,8 @@ impl FromIterator<CodePoint> for Wtf8Buf {
 /// This replaces surrogate code point pairs with supplementary code points,
 /// like concatenating ill-formed UTF-16 strings effectively would.
 impl Extend<CodePoint> for Wtf8Buf {
-    fn extend<T: IntoIterator<Item=CodePoint>>(&mut self, iterable: T) {
-        let iterator = iterable.into_iter();
+    fn extend<T: IntoIterator<Item=CodePoint>>(&mut self, iter: T) {
+        let iterator = iter.into_iter();
         let (low, _high) = iterator.size_hint();
         // Lower bound of one byte per code point (ASCII only)
         self.bytes.reserve(low);
@@ -398,32 +391,32 @@ impl fmt::Debug for Wtf8 {
         fn write_str_escaped(f: &mut fmt::Formatter, s: &str) -> fmt::Result {
             use fmt::Write;
             for c in s.chars().flat_map(|c| c.escape_default()) {
-                try!(f.write_char(c))
+                f.write_char(c)?
             }
             Ok(())
         }
 
-        try!(formatter.write_str("\""));
+        formatter.write_str("\"")?;
         let mut pos = 0;
         loop {
             match self.next_surrogate(pos) {
                 None => break,
                 Some((surrogate_pos, surrogate)) => {
-                    try!(write_str_escaped(
+                    write_str_escaped(
                         formatter,
                         unsafe { str::from_utf8_unchecked(
                             &self.bytes[pos .. surrogate_pos]
                         )},
-                    ));
-                    try!(write!(formatter, "\\u{{{:X}}}", surrogate));
+                    )?;
+                    write!(formatter, "\\u{{{:X}}}", surrogate)?;
                     pos = surrogate_pos + 3;
                 }
             }
         }
-        try!(write_str_escaped(
+        write_str_escaped(
             formatter,
             unsafe { str::from_utf8_unchecked(&self.bytes[pos..]) },
-        ));
+        )?;
         formatter.write_str("\"")
     }
 }
@@ -747,12 +740,15 @@ impl<'a> Iterator for EncodeWide<'a> {
             return Some(tmp);
         }
 
-        let mut buf = [0; 2];
         self.code_points.next().map(|code_point| {
-            let n = encode_utf16_raw(code_point.value, &mut buf)
-                .unwrap_or(0);
-            if n == 2 { self.extra = buf[1]; }
-            buf[0]
+            let n = unsafe {
+                char::from_u32_unchecked(code_point.value).encode_utf16()
+            };
+            let n = n.as_slice();
+            if n.len() == 2 {
+                self.extra = n[1];
+            }
+            n[0]
         })
     }
 
index 46f276aecf4f7596f1ed5c8286a440c80b70c493..de93d3d4e509f18c5b06a4969a56cf1a1fe4c1ac 100644 (file)
@@ -40,7 +40,7 @@ pub fn write(w: &mut Write) -> io::Result<()> {
     static LOCK: StaticMutex = StaticMutex::new();
     let _g = LOCK.lock();
 
-    try!(writeln!(w, "stack backtrace:"));
+    writeln!(w, "stack backtrace:")?;
     // 100 lines should be enough
     const SIZE: usize = 100;
     let mut buf: [*mut libc::c_void; SIZE] = unsafe { mem::zeroed() };
@@ -48,7 +48,7 @@ pub fn write(w: &mut Write) -> io::Result<()> {
 
     // skipping the first one as it is write itself
     for i in 1..cnt {
-        try!(print(w, i as isize, buf[i], buf[i]))
+        print(w, i as isize, buf[i], buf[i])?
     }
     Ok(())
 }
index 8b32b5ec040024ad15b3c1b203d6d2a2a8722810..8d88091716676ee49c538d4a0a1f336df94ec8a2 100644 (file)
@@ -33,7 +33,7 @@ pub fn write(w: &mut Write) -> io::Result<()> {
     static LOCK: StaticMutex = StaticMutex::new();
     let _g = LOCK.lock();
 
-    try!(writeln!(w, "stack backtrace:"));
+    writeln!(w, "stack backtrace:")?;
 
     let mut cx = Context { writer: w, last_error: None, idx: 0 };
     return match unsafe {
index bdde25648ed8707e56f009d02fa52f1986ea59fb..a1528458860f6754fcb3568ce33b096e6ca1df90 100644 (file)
 use fs::{self, Permissions, OpenOptions};
 use io;
 use libc;
-#[allow(deprecated)]
-use os::unix::raw;
 use path::Path;
 use sys;
 use sys_common::{FromInner, AsInner, AsInnerMut};
 use sys::platform::fs::MetadataExt as UnixMetadataExt;
 
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const USER_READ: raw::mode_t = 0o400;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const USER_WRITE: raw::mode_t = 0o200;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const USER_EXECUTE: raw::mode_t = 0o100;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const USER_RWX: raw::mode_t = 0o700;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const GROUP_READ: raw::mode_t = 0o040;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const GROUP_WRITE: raw::mode_t = 0o020;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const GROUP_EXECUTE: raw::mode_t = 0o010;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const GROUP_RWX: raw::mode_t = 0o070;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const OTHER_READ: raw::mode_t = 0o004;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const OTHER_WRITE: raw::mode_t = 0o002;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const OTHER_EXECUTE: raw::mode_t = 0o001;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const OTHER_RWX: raw::mode_t = 0o007;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const ALL_READ: raw::mode_t = 0o444;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const ALL_WRITE: raw::mode_t = 0o222;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const ALL_EXECUTE: raw::mode_t = 0o111;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const ALL_RWX: raw::mode_t = 0o777;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const SETUID: raw::mode_t = 0o4000;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const SETGID: raw::mode_t = 0o2000;
-#[unstable(feature = "fs_mode", reason = "recently added API", issue = "27712")]
-#[rustc_deprecated(since = "1.7.0", reason = "moved to the libc crate instead")]
-#[allow(deprecated)]
-pub const STICKY_BIT: raw::mode_t = 0o1000;
-
 /// Unix-specific extensions to `Permissions`
 #[stable(feature = "fs_ext", since = "1.1.0")]
 pub trait PermissionsExt {
index 276ef25a03a448bb7c38d0e05bbcc72160cb459d..1be3d75d866dd7123c663604a142fb9f8ffcfeed 100644 (file)
@@ -35,6 +35,7 @@ pub mod fs;
 pub mod process;
 pub mod raw;
 pub mod thread;
+pub mod net;
 
 /// A prelude for conveniently writing platform-specific code.
 ///
@@ -48,7 +49,9 @@ pub mod prelude {
     #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")]
     pub use super::fs::{PermissionsExt, OpenOptionsExt, MetadataExt, FileTypeExt};
     #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")]
-    pub use super::fs::{DirEntryExt};
+    pub use super::fs::DirEntryExt;
+    #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")]
+    pub use super::thread::JoinHandleExt;
     #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")]
     pub use super::process::{CommandExt, ExitStatusExt};
 }
diff --git a/src/libstd/sys/unix/ext/net.rs b/src/libstd/sys/unix/ext/net.rs
new file mode 100644 (file)
index 0000000..a74f7ea
--- /dev/null
@@ -0,0 +1,1042 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![unstable(feature = "unix_socket", reason = "newly added", issue = "32312")]
+
+//! Unix-specific networking functionality
+
+use libc;
+
+use prelude::v1::*;
+use ascii;
+use ffi::OsStr;
+use fmt;
+use io;
+use mem;
+use net::Shutdown;
+use os::unix::ffi::OsStrExt;
+use os::unix::io::{RawFd, AsRawFd, FromRawFd, IntoRawFd};
+use path::Path;
+use time::Duration;
+use sys::cvt;
+use sys::net::Socket;
+use sys_common::{AsInner, FromInner, IntoInner};
+
+fn sun_path_offset() -> usize {
+    unsafe {
+        // Work with an actual instance of the type since using a null pointer is UB
+        let addr: libc::sockaddr_un = mem::uninitialized();
+        let base = &addr as *const _ as usize;
+        let path = &addr.sun_path as *const _ as usize;
+        path - base
+    }
+}
+
+unsafe fn sockaddr_un(path: &Path) -> io::Result<(libc::sockaddr_un, libc::socklen_t)> {
+    let mut addr: libc::sockaddr_un = mem::zeroed();
+    addr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+
+    let bytes = path.as_os_str().as_bytes();
+
+    if bytes.contains(&0) {
+        return Err(io::Error::new(io::ErrorKind::InvalidInput,
+                                  "paths may not contain interior null bytes"));
+    }
+
+    if bytes.len() >= addr.sun_path.len() {
+        return Err(io::Error::new(io::ErrorKind::InvalidInput,
+                                  "path must be shorter than SUN_LEN"));
+    }
+    for (dst, src) in addr.sun_path.iter_mut().zip(bytes.iter()) {
+        *dst = *src as libc::c_char;
+    }
+    // null byte for pathname addresses is already there because we zeroed the
+    // struct
+
+    let mut len = sun_path_offset() + bytes.len();
+    match bytes.get(0) {
+        Some(&0) | None => {}
+        Some(_) => len += 1,
+    }
+    Ok((addr, len as libc::socklen_t))
+}
+
+enum AddressKind<'a> {
+    Unnamed,
+    Pathname(&'a Path),
+    Abstract(&'a [u8]),
+}
+
+/// An address associated with a Unix socket.
+#[derive(Clone)]
+pub struct SocketAddr {
+    addr: libc::sockaddr_un,
+    len: libc::socklen_t,
+}
+
+impl SocketAddr {
+    fn new<F>(f: F) -> io::Result<SocketAddr>
+        where F: FnOnce(*mut libc::sockaddr, *mut libc::socklen_t) -> libc::c_int
+    {
+        unsafe {
+            let mut addr: libc::sockaddr_un = mem::zeroed();
+            let mut len = mem::size_of::<libc::sockaddr_un>() as libc::socklen_t;
+            cvt(f(&mut addr as *mut _ as *mut _, &mut len))?;
+            SocketAddr::from_parts(addr, len)
+        }
+    }
+
+    fn from_parts(addr: libc::sockaddr_un, mut len: libc::socklen_t) -> io::Result<SocketAddr> {
+        if len == 0 {
+            // When there is a datagram from unnamed unix socket
+            // linux returns zero bytes of address
+            len = sun_path_offset() as libc::socklen_t;  // i.e. zero-length address
+        } else if addr.sun_family != libc::AF_UNIX as libc::sa_family_t {
+            return Err(io::Error::new(io::ErrorKind::InvalidInput,
+                                      "file descriptor did not correspond to a Unix socket"));
+        }
+
+        Ok(SocketAddr {
+            addr: addr,
+            len: len,
+        })
+    }
+
+    /// Returns true iff the address is unnamed.
+    pub fn is_unnamed(&self) -> bool {
+        if let AddressKind::Unnamed = self.address() {
+            true
+        } else {
+            false
+        }
+    }
+
+    /// Returns the contents of this address if it is a `pathname` address.
+    pub fn as_pathname(&self) -> Option<&Path> {
+        if let AddressKind::Pathname(path) = self.address() {
+            Some(path)
+        } else {
+            None
+        }
+    }
+
+    fn address<'a>(&'a self) -> AddressKind<'a> {
+        let len = self.len as usize - sun_path_offset();
+        let path = unsafe { mem::transmute::<&[libc::c_char], &[u8]>(&self.addr.sun_path) };
+
+        // OSX seems to return a len of 16 and a zeroed sun_path for unnamed addresses
+        if len == 0 || (cfg!(not(target_os = "linux")) && self.addr.sun_path[0] == 0) {
+            AddressKind::Unnamed
+        } else if self.addr.sun_path[0] == 0 {
+            AddressKind::Abstract(&path[1..len])
+        } else {
+            AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref())
+        }
+    }
+}
+
+impl fmt::Debug for SocketAddr {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        match self.address() {
+            AddressKind::Unnamed => write!(fmt, "(unnamed)"),
+            AddressKind::Abstract(name) => write!(fmt, "{} (abstract)", AsciiEscaped(name)),
+            AddressKind::Pathname(path) => write!(fmt, "{:?} (pathname)", path),
+        }
+    }
+}
+
+struct AsciiEscaped<'a>(&'a [u8]);
+
+impl<'a> fmt::Display for AsciiEscaped<'a> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "\"")?;
+        for byte in self.0.iter().cloned().flat_map(ascii::escape_default) {
+            write!(fmt, "{}", byte as char)?;
+        }
+        write!(fmt, "\"")
+    }
+}
+
+/// A Unix stream socket.
+///
+/// # Examples
+///
+/// ```rust,no_run
+/// #![feature(unix_socket)]
+///
+/// use std::os::unix::net::UnixStream;
+/// use std::io::prelude::*;
+///
+/// let mut stream = UnixStream::connect("/path/to/my/socket").unwrap();
+/// stream.write_all(b"hello world").unwrap();
+/// let mut response = String::new();
+/// stream.read_to_string(&mut response).unwrap();
+/// println!("{}", response);
+/// ```
+pub struct UnixStream(Socket);
+
+impl fmt::Debug for UnixStream {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        let mut builder = fmt.debug_struct("UnixStream");
+        builder.field("fd", self.0.as_inner());
+        if let Ok(addr) = self.local_addr() {
+            builder.field("local", &addr);
+        }
+        if let Ok(addr) = self.peer_addr() {
+            builder.field("peer", &addr);
+        }
+        builder.finish()
+    }
+}
+
+impl UnixStream {
+    /// Connects to the socket named by `path`.
+    pub fn connect<P: AsRef<Path>>(path: P) -> io::Result<UnixStream> {
+        fn inner(path: &Path) -> io::Result<UnixStream> {
+            unsafe {
+                let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
+                let (addr, len) = sockaddr_un(path)?;
+
+                cvt(libc::connect(*inner.as_inner(), &addr as *const _ as *const _, len))?;
+                Ok(UnixStream(inner))
+            }
+        }
+        inner(path.as_ref())
+    }
+
+    /// Creates an unnamed pair of connected sockets.
+    ///
+    /// Returns two `UnixStream`s which are connected to each other.
+    pub fn pair() -> io::Result<(UnixStream, UnixStream)> {
+        let (i1, i2) = Socket::new_pair(libc::AF_UNIX, libc::SOCK_STREAM)?;
+        Ok((UnixStream(i1), UnixStream(i2)))
+    }
+
+    /// Creates a new independently owned handle to the underlying socket.
+    ///
+    /// The returned `UnixStream` is a reference to the same stream that this
+    /// object references. Both handles will read and write the same stream of
+    /// data, and options set on one stream will be propogated to the other
+    /// stream.
+    pub fn try_clone(&self) -> io::Result<UnixStream> {
+        self.0.duplicate().map(UnixStream)
+    }
+
+    /// Returns the socket address of the local half of this connection.
+    pub fn local_addr(&self) -> io::Result<SocketAddr> {
+        SocketAddr::new(|addr, len| unsafe { libc::getsockname(*self.0.as_inner(), addr, len) })
+    }
+
+    /// Returns the socket address of the remote half of this connection.
+    pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+        SocketAddr::new(|addr, len| unsafe { libc::getpeername(*self.0.as_inner(), addr, len) })
+    }
+
+    /// Sets the read timeout for the socket.
+    ///
+    /// If the provided value is `None`, then `read` calls will block
+    /// indefinitely. It is an error to pass the zero `Duration` to this
+    /// method.
+    pub fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
+        self.0.set_timeout(timeout, libc::SO_RCVTIMEO)
+    }
+
+    /// Sets the write timeout for the socket.
+    ///
+    /// If the provided value is `None`, then `write` calls will block
+    /// indefinitely. It is an error to pass the zero `Duration` to this
+    /// method.
+    pub fn set_write_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
+        self.0.set_timeout(timeout, libc::SO_SNDTIMEO)
+    }
+
+    /// Returns the read timeout of this socket.
+    pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+        self.0.timeout(libc::SO_RCVTIMEO)
+    }
+
+    /// Returns the write timeout of this socket.
+    pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+        self.0.timeout(libc::SO_SNDTIMEO)
+    }
+
+    /// Moves the socket into or out of nonblocking mode.
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        self.0.set_nonblocking(nonblocking)
+    }
+
+    /// Returns the value of the `SO_ERROR` option.
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.0.take_error()
+    }
+
+    /// Shuts down the read, write, or both halves of this connection.
+    ///
+    /// This function will cause all pending and future I/O calls on the
+    /// specified portions to immediately return with an appropriate value
+    /// (see the documentation of `Shutdown`).
+    pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+        self.0.shutdown(how)
+    }
+}
+
+impl io::Read for UnixStream {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        io::Read::read(&mut &*self, buf)
+    }
+
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        io::Read::read_to_end(&mut &*self, buf)
+    }
+}
+
+impl<'a> io::Read for &'a UnixStream {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        self.0.read(buf)
+    }
+
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.0.read_to_end(buf)
+    }
+}
+
+impl io::Write for UnixStream {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        io::Write::write(&mut &*self, buf)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        io::Write::flush(&mut &*self)
+    }
+}
+
+impl<'a> io::Write for &'a UnixStream {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.0.write(buf)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+}
+
+impl AsRawFd for UnixStream {
+    fn as_raw_fd(&self) -> RawFd {
+        *self.0.as_inner()
+    }
+}
+
+impl FromRawFd for UnixStream {
+    unsafe fn from_raw_fd(fd: RawFd) -> UnixStream {
+        UnixStream(Socket::from_inner(fd))
+    }
+}
+
+impl IntoRawFd for UnixStream {
+    fn into_raw_fd(self) -> RawFd {
+        self.0.into_inner()
+    }
+}
+
+/// A structure representing a Unix domain socket server.
+///
+/// # Examples
+///
+/// ```rust,no_run
+/// #![feature(unix_socket)]
+///
+/// use std::thread;
+/// use std::os::unix::net::{UnixStream, UnixListener};
+///
+/// fn handle_client(stream: UnixStream) {
+///     // ...
+/// }
+///
+/// let listener = UnixListener::bind("/path/to/the/socket").unwrap();
+///
+/// // accept connections and process them, spawning a new thread for each one
+/// for stream in listener.incoming() {
+///     match stream {
+///         Ok(stream) => {
+///             /* connection succeeded */
+///             thread::spawn(|| handle_client(stream));
+///         }
+///         Err(err) => {
+///             /* connection failed */
+///             break;
+///         }
+///     }
+/// }
+///
+/// // close the listener socket
+/// drop(listener);
+/// ```
+pub struct UnixListener(Socket);
+
+impl fmt::Debug for UnixListener {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        let mut builder = fmt.debug_struct("UnixListener");
+        builder.field("fd", self.0.as_inner());
+        if let Ok(addr) = self.local_addr() {
+            builder.field("local", &addr);
+        }
+        builder.finish()
+    }
+}
+
+impl UnixListener {
+    /// Creates a new `UnixListener` bound to the specified socket.
+    pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> {
+        fn inner(path: &Path) -> io::Result<UnixListener> {
+            unsafe {
+                let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
+                let (addr, len) = sockaddr_un(path)?;
+
+                cvt(libc::bind(*inner.as_inner(), &addr as *const _ as *const _, len))?;
+                cvt(libc::listen(*inner.as_inner(), 128))?;
+
+                Ok(UnixListener(inner))
+            }
+        }
+        inner(path.as_ref())
+    }
+
+    /// Accepts a new incoming connection to this listener.
+    ///
+    /// This function will block the calling thread until a new Unix connection
+    /// is established. When established, the corersponding `UnixStream` and
+    /// the remote peer's address will be returned.
+    pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
+        let mut storage: libc::sockaddr_un = unsafe { mem::zeroed() };
+        let mut len = mem::size_of_val(&storage) as libc::socklen_t;
+        let sock = self.0.accept(&mut storage as *mut _ as *mut _, &mut len)?;
+        let addr = SocketAddr::from_parts(storage, len)?;
+        Ok((UnixStream(sock), addr))
+    }
+
+    /// Creates a new independently owned handle to the underlying socket.
+    ///
+    /// The returned `UnixListener` is a reference to the same socket that this
+    /// object references. Both handles can be used to accept incoming
+    /// connections and options set on one listener will affect the other.
+    pub fn try_clone(&self) -> io::Result<UnixListener> {
+        self.0.duplicate().map(UnixListener)
+    }
+
+    /// Returns the local socket address of this listener.
+    pub fn local_addr(&self) -> io::Result<SocketAddr> {
+        SocketAddr::new(|addr, len| unsafe { libc::getsockname(*self.0.as_inner(), addr, len) })
+    }
+
+    /// Moves the socket into or out of nonblocking mode.
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        self.0.set_nonblocking(nonblocking)
+    }
+
+    /// Returns the value of the `SO_ERROR` option.
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.0.take_error()
+    }
+
+    /// Returns an iterator over incoming connections.
+    ///
+    /// The iterator will never return `None` and will also not yield the
+    /// peer's `SocketAddr` structure.
+    pub fn incoming<'a>(&'a self) -> Incoming<'a> {
+        Incoming { listener: self }
+    }
+}
+
+impl AsRawFd for UnixListener {
+    fn as_raw_fd(&self) -> RawFd {
+        *self.0.as_inner()
+    }
+}
+
+impl FromRawFd for UnixListener {
+    unsafe fn from_raw_fd(fd: RawFd) -> UnixListener {
+        UnixListener(Socket::from_inner(fd))
+    }
+}
+
+impl IntoRawFd for UnixListener {
+    fn into_raw_fd(self) -> RawFd {
+        self.0.into_inner()
+    }
+}
+
+impl<'a> IntoIterator for &'a UnixListener {
+    type Item = io::Result<UnixStream>;
+    type IntoIter = Incoming<'a>;
+
+    fn into_iter(self) -> Incoming<'a> {
+        self.incoming()
+    }
+}
+
+/// An iterator over incoming connections to a `UnixListener`.
+///
+/// It will never return `None`.
+#[derive(Debug)]
+pub struct Incoming<'a> {
+    listener: &'a UnixListener,
+}
+
+impl<'a> Iterator for Incoming<'a> {
+    type Item = io::Result<UnixStream>;
+
+    fn next(&mut self) -> Option<io::Result<UnixStream>> {
+        Some(self.listener.accept().map(|s| s.0))
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (usize::max_value(), None)
+    }
+}
+
+/// A Unix datagram socket.
+///
+/// # Examples
+///
+/// ```rust,no_run
+/// #![feature(unix_socket)]
+///
+/// use std::os::unix::net::UnixDatagram;
+///
+/// let socket = UnixDatagram::bind("/path/to/my/socket").unwrap();
+/// socket.send_to(b"hello world", "/path/to/other/socket").unwrap();
+/// let mut buf = [0; 100];
+/// let (count, address) = socket.recv_from(&mut buf).unwrap();
+/// println!("socket {:?} sent {:?}", address, &buf[..count]);
+/// ```
+pub struct UnixDatagram(Socket);
+
+impl fmt::Debug for UnixDatagram {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        let mut builder = fmt.debug_struct("UnixDatagram");
+        builder.field("fd", self.0.as_inner());
+        if let Ok(addr) = self.local_addr() {
+            builder.field("local", &addr);
+        }
+        if let Ok(addr) = self.peer_addr() {
+            builder.field("peer", &addr);
+        }
+        builder.finish()
+    }
+}
+
+impl UnixDatagram {
+    /// Creates a Unix datagram socket bound to the given path.
+    pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixDatagram> {
+        fn inner(path: &Path) -> io::Result<UnixDatagram> {
+            unsafe {
+                let socket = UnixDatagram::unbound()?;
+                let (addr, len) = sockaddr_un(path)?;
+
+                cvt(libc::bind(*socket.0.as_inner(), &addr as *const _ as *const _, len))?;
+
+                Ok(socket)
+            }
+        }
+        inner(path.as_ref())
+    }
+
+    /// Creates a Unix Datagram socket which is not bound to any address.
+    pub fn unbound() -> io::Result<UnixDatagram> {
+        let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_DGRAM)?;
+        Ok(UnixDatagram(inner))
+    }
+
+    /// Create an unnamed pair of connected sockets.
+    ///
+    /// Returns two `UnixDatagrams`s which are connected to each other.
+    pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> {
+        let (i1, i2) = Socket::new_pair(libc::AF_UNIX, libc::SOCK_DGRAM)?;
+        Ok((UnixDatagram(i1), UnixDatagram(i2)))
+    }
+
+    /// Connects the socket to the specified address.
+    ///
+    /// The `send` method may be used to send data to the specified address.
+    /// `recv` and `recv_from` will only receive data from that address.
+    pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
+        fn inner(d: &UnixDatagram, path: &Path) -> io::Result<()> {
+            unsafe {
+                let (addr, len) = sockaddr_un(path)?;
+
+                cvt(libc::connect(*d.0.as_inner(), &addr as *const _ as *const _, len))?;
+
+                Ok(())
+            }
+        }
+        inner(self, path.as_ref())
+    }
+
+    /// Creates a new independently owned handle to the underlying socket.
+    ///
+    /// The returned `UnixListener` is a reference to the same socket that this
+    /// object references. Both handles can be used to accept incoming
+    /// connections and options set on one listener will affect the other.
+    pub fn try_clone(&self) -> io::Result<UnixDatagram> {
+        self.0.duplicate().map(UnixDatagram)
+    }
+
+    /// Returns the address of this socket.
+    pub fn local_addr(&self) -> io::Result<SocketAddr> {
+        SocketAddr::new(|addr, len| unsafe { libc::getsockname(*self.0.as_inner(), addr, len) })
+    }
+
+    /// Returns the address of this socket's peer.
+    ///
+    /// The `connect` method will connect the socket to a peer.
+    pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+        SocketAddr::new(|addr, len| unsafe { libc::getpeername(*self.0.as_inner(), addr, len) })
+    }
+
+    /// Receives data from the socket.
+    ///
+    /// On success, returns the number of bytes read and the address from
+    /// whence the data came.
+    pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+        let mut count = 0;
+        let addr = SocketAddr::new(|addr, len| {
+            unsafe {
+                count = libc::recvfrom(*self.0.as_inner(),
+                                       buf.as_mut_ptr() as *mut _,
+                                       buf.len(),
+                                       0,
+                                       addr,
+                                       len);
+                if count > 0 {
+                    1
+                } else if count == 0 {
+                    0
+                } else {
+                    -1
+                }
+            }
+        })?;
+
+        Ok((count as usize, addr))
+    }
+
+    /// Receives data from the socket.
+    ///
+    /// On success, returns the number of bytes read.
+    pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+        self.0.read(buf)
+    }
+
+    /// Sends data on the socket to the specified address.
+    ///
+    /// On success, returns the number of bytes written.
+    pub fn send_to<P: AsRef<Path>>(&self, buf: &[u8], path: P) -> io::Result<usize> {
+        fn inner(d: &UnixDatagram, buf: &[u8], path: &Path) -> io::Result<usize> {
+            unsafe {
+                let (addr, len) = sockaddr_un(path)?;
+
+                let count = cvt(libc::sendto(*d.0.as_inner(),
+                                             buf.as_ptr() as *const _,
+                                             buf.len(),
+                                             0,
+                                             &addr as *const _ as *const _,
+                                             len))?;
+                Ok(count as usize)
+            }
+        }
+        inner(self, buf, path.as_ref())
+    }
+
+    /// Sends data on the socket to the socket's peer.
+    ///
+    /// The peer address may be set by the `connect` method, and this method
+    /// will return an error if the socket has not already been connected.
+    ///
+    /// On success, returns the number of bytes written.
+    pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+        self.0.write(buf)
+    }
+
+    /// Sets the read timeout for the socket.
+    ///
+    /// If the provided value is `None`, then `recv` and `recv_from` calls will
+    /// block indefinitely. It is an error to pass the zero `Duration` to this
+    /// method.
+    pub fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
+        self.0.set_timeout(timeout, libc::SO_RCVTIMEO)
+    }
+
+    /// Sets the write timeout for the socket.
+    ///
+    /// If the provided value is `None`, then `send` and `send_to` calls will
+    /// block indefinitely. It is an error to pass the zero `Duration` to this
+    /// method.
+    pub fn set_write_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
+        self.0.set_timeout(timeout, libc::SO_SNDTIMEO)
+    }
+
+    /// Returns the read timeout of this socket.
+    pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+        self.0.timeout(libc::SO_RCVTIMEO)
+    }
+
+    /// Returns the write timeout of this socket.
+    pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+        self.0.timeout(libc::SO_SNDTIMEO)
+    }
+
+    /// Moves the socket into or out of nonblocking mode.
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        self.0.set_nonblocking(nonblocking)
+    }
+
+    /// Returns the value of the `SO_ERROR` option.
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        self.0.take_error()
+    }
+
+    /// Shut down the read, write, or both halves of this connection.
+    ///
+    /// This function will cause all pending and future I/O calls on the
+    /// specified portions to immediately return with an appropriate value
+    /// (see the documentation of `Shutdown`).
+    pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+        self.0.shutdown(how)
+    }
+}
+
+impl AsRawFd for UnixDatagram {
+    fn as_raw_fd(&self) -> RawFd {
+        *self.0.as_inner()
+    }
+}
+
+impl FromRawFd for UnixDatagram {
+    unsafe fn from_raw_fd(fd: RawFd) -> UnixDatagram {
+        UnixDatagram(Socket::from_inner(fd))
+    }
+}
+
+impl IntoRawFd for UnixDatagram {
+    fn into_raw_fd(self) -> RawFd {
+        self.0.into_inner()
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use prelude::v1::*;
+    use thread;
+    use io;
+    use io::prelude::*;
+    use time::Duration;
+    use sys_common::io::test::tmpdir;
+
+    use super::*;
+
+    macro_rules! or_panic {
+        ($e:expr) => {
+            match $e {
+                Ok(e) => e,
+                Err(e) => panic!("{}", e),
+            }
+        }
+    }
+
+    #[test]
+    fn basic() {
+        let dir = tmpdir();
+        let socket_path = dir.path().join("sock");
+        let msg1 = b"hello";
+        let msg2 = b"world!";
+
+        let listener = or_panic!(UnixListener::bind(&socket_path));
+        let thread = thread::spawn(move || {
+            let mut stream = or_panic!(listener.accept()).0;
+            let mut buf = [0; 5];
+            or_panic!(stream.read(&mut buf));
+            assert_eq!(&msg1[..], &buf[..]);
+            or_panic!(stream.write_all(msg2));
+        });
+
+        let mut stream = or_panic!(UnixStream::connect(&socket_path));
+        assert_eq!(Some(&*socket_path),
+                   stream.peer_addr().unwrap().as_pathname());
+        or_panic!(stream.write_all(msg1));
+        let mut buf = vec![];
+        or_panic!(stream.read_to_end(&mut buf));
+        assert_eq!(&msg2[..], &buf[..]);
+        drop(stream);
+
+        thread.join().unwrap();
+    }
+
+    #[test]
+    fn pair() {
+        let msg1 = b"hello";
+        let msg2 = b"world!";
+
+        let (mut s1, mut s2) = or_panic!(UnixStream::pair());
+        let thread = thread::spawn(move || {
+            // s1 must be moved in or the test will hang!
+            let mut buf = [0; 5];
+            or_panic!(s1.read(&mut buf));
+            assert_eq!(&msg1[..], &buf[..]);
+            or_panic!(s1.write_all(msg2));
+        });
+
+        or_panic!(s2.write_all(msg1));
+        let mut buf = vec![];
+        or_panic!(s2.read_to_end(&mut buf));
+        assert_eq!(&msg2[..], &buf[..]);
+        drop(s2);
+
+        thread.join().unwrap();
+    }
+
+    #[test]
+    fn try_clone() {
+        let dir = tmpdir();
+        let socket_path = dir.path().join("sock");
+        let msg1 = b"hello";
+        let msg2 = b"world";
+
+        let listener = or_panic!(UnixListener::bind(&socket_path));
+        let thread = thread::spawn(move || {
+            let mut stream = or_panic!(listener.accept()).0;
+            or_panic!(stream.write_all(msg1));
+            or_panic!(stream.write_all(msg2));
+        });
+
+        let mut stream = or_panic!(UnixStream::connect(&socket_path));
+        let mut stream2 = or_panic!(stream.try_clone());
+
+        let mut buf = [0; 5];
+        or_panic!(stream.read(&mut buf));
+        assert_eq!(&msg1[..], &buf[..]);
+        or_panic!(stream2.read(&mut buf));
+        assert_eq!(&msg2[..], &buf[..]);
+
+        thread.join().unwrap();
+    }
+
+    #[test]
+    fn iter() {
+        let dir = tmpdir();
+        let socket_path = dir.path().join("sock");
+
+        let listener = or_panic!(UnixListener::bind(&socket_path));
+        let thread = thread::spawn(move || {
+            for stream in listener.incoming().take(2) {
+                let mut stream = or_panic!(stream);
+                let mut buf = [0];
+                or_panic!(stream.read(&mut buf));
+            }
+        });
+
+        for _ in 0..2 {
+            let mut stream = or_panic!(UnixStream::connect(&socket_path));
+            or_panic!(stream.write_all(&[0]));
+        }
+
+        thread.join().unwrap();
+    }
+
+    #[test]
+    fn long_path() {
+        let dir = tmpdir();
+        let socket_path = dir.path()
+                             .join("asdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfa\
+                                    sasdfasdfasdasdfasdfasdfadfasdfasdfasdfasdfasdf");
+        match UnixStream::connect(&socket_path) {
+            Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {}
+            Err(e) => panic!("unexpected error {}", e),
+            Ok(_) => panic!("unexpected success"),
+        }
+
+        match UnixListener::bind(&socket_path) {
+            Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {}
+            Err(e) => panic!("unexpected error {}", e),
+            Ok(_) => panic!("unexpected success"),
+        }
+
+        match UnixDatagram::bind(&socket_path) {
+            Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {}
+            Err(e) => panic!("unexpected error {}", e),
+            Ok(_) => panic!("unexpected success"),
+        }
+    }
+
+    #[test]
+    fn timeouts() {
+        let dir = tmpdir();
+        let socket_path = dir.path().join("sock");
+
+        let _listener = or_panic!(UnixListener::bind(&socket_path));
+
+        let stream = or_panic!(UnixStream::connect(&socket_path));
+        let dur = Duration::new(15410, 0);
+
+        assert_eq!(None, or_panic!(stream.read_timeout()));
+
+        or_panic!(stream.set_read_timeout(Some(dur)));
+        assert_eq!(Some(dur), or_panic!(stream.read_timeout()));
+
+        assert_eq!(None, or_panic!(stream.write_timeout()));
+
+        or_panic!(stream.set_write_timeout(Some(dur)));
+        assert_eq!(Some(dur), or_panic!(stream.write_timeout()));
+
+        or_panic!(stream.set_read_timeout(None));
+        assert_eq!(None, or_panic!(stream.read_timeout()));
+
+        or_panic!(stream.set_write_timeout(None));
+        assert_eq!(None, or_panic!(stream.write_timeout()));
+    }
+
+    #[test]
+    fn test_read_timeout() {
+        let dir = tmpdir();
+        let socket_path = dir.path().join("sock");
+
+        let _listener = or_panic!(UnixListener::bind(&socket_path));
+
+        let mut stream = or_panic!(UnixStream::connect(&socket_path));
+        or_panic!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
+
+        let mut buf = [0; 10];
+        let kind = stream.read(&mut buf).err().expect("expected error").kind();
+        assert!(kind == io::ErrorKind::WouldBlock || kind == io::ErrorKind::TimedOut);
+    }
+
+    #[test]
+    fn test_read_with_timeout() {
+        let dir = tmpdir();
+        let socket_path = dir.path().join("sock");
+
+        let listener = or_panic!(UnixListener::bind(&socket_path));
+
+        let mut stream = or_panic!(UnixStream::connect(&socket_path));
+        or_panic!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
+
+        let mut other_end = or_panic!(listener.accept()).0;
+        or_panic!(other_end.write_all(b"hello world"));
+
+        let mut buf = [0; 11];
+        or_panic!(stream.read(&mut buf));
+        assert_eq!(b"hello world", &buf[..]);
+
+        let kind = stream.read(&mut buf).err().expect("expected error").kind();
+        assert!(kind == io::ErrorKind::WouldBlock || kind == io::ErrorKind::TimedOut);
+    }
+
+    #[test]
+    fn test_unix_datagram() {
+        let dir = tmpdir();
+        let path1 = dir.path().join("sock1");
+        let path2 = dir.path().join("sock2");
+
+        let sock1 = or_panic!(UnixDatagram::bind(&path1));
+        let sock2 = or_panic!(UnixDatagram::bind(&path2));
+
+        let msg = b"hello world";
+        or_panic!(sock1.send_to(msg, &path2));
+        let mut buf = [0; 11];
+        or_panic!(sock2.recv_from(&mut buf));
+        assert_eq!(msg, &buf[..]);
+    }
+
+    #[test]
+    fn test_unnamed_unix_datagram() {
+        let dir = tmpdir();
+        let path1 = dir.path().join("sock1");
+
+        let sock1 = or_panic!(UnixDatagram::bind(&path1));
+        let sock2 = or_panic!(UnixDatagram::unbound());
+
+        let msg = b"hello world";
+        or_panic!(sock2.send_to(msg, &path1));
+        let mut buf = [0; 11];
+        let (usize, addr) = or_panic!(sock1.recv_from(&mut buf));
+        assert_eq!(usize, 11);
+        assert!(addr.is_unnamed());
+        assert_eq!(msg, &buf[..]);
+    }
+
+    #[test]
+    fn test_connect_unix_datagram() {
+        let dir = tmpdir();
+        let path1 = dir.path().join("sock1");
+        let path2 = dir.path().join("sock2");
+
+        let bsock1 = or_panic!(UnixDatagram::bind(&path1));
+        let bsock2 = or_panic!(UnixDatagram::bind(&path2));
+        let sock = or_panic!(UnixDatagram::unbound());
+        or_panic!(sock.connect(&path1));
+
+        // Check send()
+        let msg = b"hello there";
+        or_panic!(sock.send(msg));
+        let mut buf = [0; 11];
+        let (usize, addr) = or_panic!(bsock1.recv_from(&mut buf));
+        assert_eq!(usize, 11);
+        assert!(addr.is_unnamed());
+        assert_eq!(msg, &buf[..]);
+
+        // Changing default socket works too
+        or_panic!(sock.connect(&path2));
+        or_panic!(sock.send(msg));
+        or_panic!(bsock2.recv_from(&mut buf));
+    }
+
+    #[test]
+    fn test_unix_datagram_recv() {
+        let dir = tmpdir();
+        let path1 = dir.path().join("sock1");
+
+        let sock1 = or_panic!(UnixDatagram::bind(&path1));
+        let sock2 = or_panic!(UnixDatagram::unbound());
+        or_panic!(sock2.connect(&path1));
+
+        let msg = b"hello world";
+        or_panic!(sock2.send(msg));
+        let mut buf = [0; 11];
+        let size = or_panic!(sock1.recv(&mut buf));
+        assert_eq!(size, 11);
+        assert_eq!(msg, &buf[..]);
+    }
+
+    #[test]
+    fn datagram_pair() {
+        let msg1 = b"hello";
+        let msg2 = b"world!";
+
+        let (s1, s2) = or_panic!(UnixDatagram::pair());
+        let thread = thread::spawn(move || {
+            // s1 must be moved in or the test will hang!
+            let mut buf = [0; 5];
+            or_panic!(s1.recv(&mut buf));
+            assert_eq!(&msg1[..], &buf[..]);
+            or_panic!(s1.send(msg2));
+        });
+
+        or_panic!(s2.send(msg1));
+        let mut buf = [0; 6];
+        or_panic!(s2.recv(&mut buf));
+        assert_eq!(&msg2[..], &buf[..]);
+        drop(s2);
+
+        thread.join().unwrap();
+    }
+
+    #[test]
+    fn abstract_namespace_not_allowed() {
+        assert!(UnixStream::connect("\0asdf").is_err());
+    }
+}
index 8cc291d00ee027cf4cca947a1db1b0a6829ed556..7f31cf9f3bf80690b7f01506e9bdf317b3b07b78 100644 (file)
@@ -45,6 +45,8 @@ pub trait CommandExt {
     /// (the daemon) in the same session.
     #[unstable(feature = "process_session_leader", reason = "recently added",
                issue = "27811")]
+    #[rustc_deprecated(reason = "use `before_exec` instead",
+                       since = "1.9.0")]
     fn session_leader(&mut self, on: bool) -> &mut process::Command;
 
     /// Schedules a closure to be run just before the `exec` function is
@@ -94,7 +96,7 @@ pub trait CommandExt {
     /// file descriptors may have changed. If a "transactional spawn" is
     /// required to gracefully handle errors it is recommended to use the
     /// cross-platform `spawn` instead.
-    #[unstable(feature = "process_exec", issue = "31398")]
+    #[stable(feature = "process_exec2", since = "1.9.0")]
     fn exec(&mut self) -> io::Error;
 }
 
index bb8200ff8597a9bf6ad4639c155158217d9dc611..fe2a48764dc3a99799373cf3f0878a8b61d6dca7 100644 (file)
@@ -8,37 +8,41 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Unix-specific extensions to primitives in the `std::process` module.
+//! Unix-specific extensions to primitives in the `std::thread` module.
 
-#![unstable(feature = "thread_extensions", issue = "29791")]
+#![stable(feature = "thread_extensions", since = "1.9.0")]
 
 #[allow(deprecated)]
 use os::unix::raw::pthread_t;
 use sys_common::{AsInner, IntoInner};
 use thread::JoinHandle;
 
-#[unstable(feature = "thread_extensions", issue = "29791")]
+#[stable(feature = "thread_extensions", since = "1.9.0")]
 #[allow(deprecated)]
 pub type RawPthread = pthread_t;
 
 /// Unix-specific extensions to `std::thread::JoinHandle`
-#[unstable(feature = "thread_extensions", issue = "29791")]
+#[stable(feature = "thread_extensions", since = "1.9.0")]
 pub trait JoinHandleExt {
     /// Extracts the raw pthread_t without taking ownership
+    #[stable(feature = "thread_extensions", since = "1.9.0")]
     fn as_pthread_t(&self) -> RawPthread;
+
     /// Consumes the thread, returning the raw pthread_t
     ///
     /// This function **transfers ownership** of the underlying pthread_t to
     /// the caller. Callers are then the unique owners of the pthread_t and
-    /// must either detech or join the pthread_t once it's no longer needed.
+    /// must either detach or join the pthread_t once it's no longer needed.
+    #[stable(feature = "thread_extensions", since = "1.9.0")]
     fn into_pthread_t(self) -> RawPthread;
 }
 
-#[unstable(feature = "thread_extensions", issue = "29791")]
+#[stable(feature = "thread_extensions", since = "1.9.0")]
 impl<T> JoinHandleExt for JoinHandle<T> {
     fn as_pthread_t(&self) -> RawPthread {
         self.as_inner().id() as RawPthread
     }
+
     fn into_pthread_t(self) -> RawPthread {
         self.into_inner().into_id() as RawPthread
     }
index 299c6ec2731d7f02462de3632ae8e6c391906131..94c48be02ffc4b3f3093763b8e8b557100bf6499 100644 (file)
@@ -8,12 +8,17 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use io;
+#![unstable(reason = "not public", issue = "0", feature = "fd")]
+
+use prelude::v1::*;
+
+use io::{self, Read};
 use libc::{self, c_int, size_t, c_void};
 use mem;
+use sync::atomic::{AtomicBool, Ordering};
 use sys::cvt;
 use sys_common::AsInner;
-use sync::atomic::{AtomicBool, Ordering};
+use sys_common::io::read_to_end_uninitialized;
 
 pub struct FileDesc {
     fd: c_int,
@@ -34,20 +39,25 @@ impl FileDesc {
     }
 
     pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
-        let ret = try!(cvt(unsafe {
+        let ret = cvt(unsafe {
             libc::read(self.fd,
                        buf.as_mut_ptr() as *mut c_void,
                        buf.len() as size_t)
-        }));
+        })?;
         Ok(ret as usize)
     }
 
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        let mut me = self;
+        (&mut me).read_to_end(buf)
+    }
+
     pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
-        let ret = try!(cvt(unsafe {
+        let ret = cvt(unsafe {
             libc::write(self.fd,
                         buf.as_ptr() as *const c_void,
                         buf.len() as size_t)
-        }));
+        })?;
         Ok(ret as usize)
     }
 
@@ -67,6 +77,20 @@ impl FileDesc {
         }
     }
 
+    pub fn set_nonblocking(&self, nonblocking: bool) {
+        unsafe {
+            let previous = libc::fcntl(self.fd, libc::F_GETFL);
+            debug_assert!(previous != -1);
+            let new = if nonblocking {
+                previous | libc::O_NONBLOCK
+            } else {
+                previous & !libc::O_NONBLOCK
+            };
+            let ret = libc::fcntl(self.fd, libc::F_SETFL, new);
+            debug_assert!(ret != -1);
+        }
+    }
+
     pub fn duplicate(&self) -> io::Result<FileDesc> {
         // We want to atomically duplicate this file descriptor and set the
         // CLOEXEC flag, and currently that's done via F_DUPFD_CLOEXEC. This
@@ -118,6 +142,16 @@ impl FileDesc {
     }
 }
 
+impl<'a> Read for &'a FileDesc {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        (**self).read(buf)
+    }
+
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        unsafe { read_to_end_uninitialized(self, buf) }
+    }
+}
+
 impl AsInner<c_int> for FileDesc {
     fn as_inner(&self) -> &c_int { &self.fd }
 }
index 250b1b015a069e380c0270903686dc69c46d0636..810a34478c5f90fc7ba11d0582a1dd38c7cc9501 100644 (file)
@@ -25,15 +25,19 @@ use sys::time::SystemTime;
 use sys::{cvt, cvt_r};
 use sys_common::{AsInner, FromInner};
 
-#[cfg(target_os = "linux")]
+#[cfg(any(target_os = "linux", target_os = "emscripten"))]
 use libc::{stat64, fstat64, lstat64, off64_t, ftruncate64, lseek64, dirent64, readdir64_r, open64};
 #[cfg(target_os = "android")]
 use libc::{stat as stat64, fstat as fstat64, lstat as lstat64, off64_t, ftruncate64, lseek64,
            dirent as dirent64, open as open64};
-#[cfg(not(any(target_os = "linux", target_os = "android")))]
+#[cfg(not(any(target_os = "linux",
+              target_os = "emscripten",
+              target_os = "android")))]
 use libc::{stat as stat64, fstat as fstat64, lstat as lstat64, off_t as off64_t,
            ftruncate as ftruncate64, lseek as lseek64, dirent as dirent64, open as open64};
-#[cfg(not(any(target_os = "linux", target_os = "solaris")))]
+#[cfg(not(any(target_os = "linux",
+              target_os = "emscripten",
+              target_os = "solaris")))]
 use libc::{readdir_r as readdir64_r};
 
 pub struct File(FileDesc);
@@ -414,18 +418,18 @@ impl OpenOptions {
 
 impl File {
     pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
-        let path = try!(cstr(path));
+        let path = cstr(path)?;
         File::open_c(&path, opts)
     }
 
     pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
         let flags = libc::O_CLOEXEC |
-                    try!(opts.get_access_mode()) |
-                    try!(opts.get_creation_mode()) |
+                    opts.get_access_mode()? |
+                    opts.get_creation_mode()? |
                     (opts.custom_flags as c_int & !libc::O_ACCMODE);
-        let fd = try!(cvt_r(|| unsafe {
+        let fd = cvt_r(|| unsafe {
             open64(path.as_ptr(), flags, opts.mode as c_int)
-        }));
+        })?;
         let fd = FileDesc::new(fd);
 
         // Currently the standard library supports Linux 2.6.18 which did not
@@ -444,19 +448,19 @@ impl File {
 
     pub fn file_attr(&self) -> io::Result<FileAttr> {
         let mut stat: stat64 = unsafe { mem::zeroed() };
-        try!(cvt(unsafe {
+        cvt(unsafe {
             fstat64(self.0.raw(), &mut stat)
-        }));
+        })?;
         Ok(FileAttr { stat: stat })
     }
 
     pub fn fsync(&self) -> io::Result<()> {
-        try!(cvt_r(|| unsafe { libc::fsync(self.0.raw()) }));
+        cvt_r(|| unsafe { libc::fsync(self.0.raw()) })?;
         Ok(())
     }
 
     pub fn datasync(&self) -> io::Result<()> {
-        try!(cvt_r(|| unsafe { os_datasync(self.0.raw()) }));
+        cvt_r(|| unsafe { os_datasync(self.0.raw()) })?;
         return Ok(());
 
         #[cfg(any(target_os = "macos", target_os = "ios"))]
@@ -472,9 +476,9 @@ impl File {
     }
 
     pub fn truncate(&self, size: u64) -> io::Result<()> {
-        try!(cvt_r(|| unsafe {
+        cvt_r(|| unsafe {
             ftruncate64(self.0.raw(), size as off64_t)
-        }));
+        })?;
         Ok(())
     }
 
@@ -482,6 +486,10 @@ impl File {
         self.0.read(buf)
     }
 
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.0.read_to_end(buf)
+    }
+
     pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
         self.0.write(buf)
     }
@@ -494,7 +502,7 @@ impl File {
             SeekFrom::End(off) => (libc::SEEK_END, off as off64_t),
             SeekFrom::Current(off) => (libc::SEEK_CUR, off as off64_t),
         };
-        let n = try!(cvt(unsafe { lseek64(self.0.raw(), pos, whence) }));
+        let n = cvt(unsafe { lseek64(self.0.raw(), pos, whence) })?;
         Ok(n as u64)
     }
 
@@ -513,8 +521,8 @@ impl DirBuilder {
     }
 
     pub fn mkdir(&self, p: &Path) -> io::Result<()> {
-        let p = try!(cstr(p));
-        try!(cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }));
+        let p = cstr(p)?;
+        cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) })?;
         Ok(())
     }
 
@@ -524,7 +532,7 @@ impl DirBuilder {
 }
 
 fn cstr(path: &Path) -> io::Result<CString> {
-    Ok(try!(CString::new(path.as_os_str().as_bytes())))
+    Ok(CString::new(path.as_os_str().as_bytes())?)
 }
 
 impl FromInner<c_int> for File {
@@ -602,7 +610,7 @@ impl fmt::Debug for File {
 
 pub fn readdir(p: &Path) -> io::Result<ReadDir> {
     let root = Arc::new(p.to_path_buf());
-    let p = try!(cstr(p));
+    let p = cstr(p)?;
     unsafe {
         let ptr = libc::opendir(p.as_ptr());
         if ptr.is_null() {
@@ -614,32 +622,32 @@ pub fn readdir(p: &Path) -> io::Result<ReadDir> {
 }
 
 pub fn unlink(p: &Path) -> io::Result<()> {
-    let p = try!(cstr(p));
-    try!(cvt(unsafe { libc::unlink(p.as_ptr()) }));
+    let p = cstr(p)?;
+    cvt(unsafe { libc::unlink(p.as_ptr()) })?;
     Ok(())
 }
 
 pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
-    let old = try!(cstr(old));
-    let new = try!(cstr(new));
-    try!(cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) }));
+    let old = cstr(old)?;
+    let new = cstr(new)?;
+    cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) })?;
     Ok(())
 }
 
 pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
-    let p = try!(cstr(p));
-    try!(cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }));
+    let p = cstr(p)?;
+    cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) })?;
     Ok(())
 }
 
 pub fn rmdir(p: &Path) -> io::Result<()> {
-    let p = try!(cstr(p));
-    try!(cvt(unsafe { libc::rmdir(p.as_ptr()) }));
+    let p = cstr(p)?;
+    cvt(unsafe { libc::rmdir(p.as_ptr()) })?;
     Ok(())
 }
 
 pub fn remove_dir_all(path: &Path) -> io::Result<()> {
-    let filetype = try!(lstat(path)).file_type();
+    let filetype = lstat(path)?.file_type();
     if filetype.is_symlink() {
         unlink(path)
     } else {
@@ -648,27 +656,27 @@ pub fn remove_dir_all(path: &Path) -> io::Result<()> {
 }
 
 fn remove_dir_all_recursive(path: &Path) -> io::Result<()> {
-    for child in try!(readdir(path)) {
-        let child = try!(child);
-        if try!(child.file_type()).is_dir() {
-            try!(remove_dir_all_recursive(&child.path()));
+    for child in readdir(path)? {
+        let child = child?;
+        if child.file_type()?.is_dir() {
+            remove_dir_all_recursive(&child.path())?;
         } else {
-            try!(unlink(&child.path()));
+            unlink(&child.path())?;
         }
     }
     rmdir(path)
 }
 
 pub fn readlink(p: &Path) -> io::Result<PathBuf> {
-    let c_path = try!(cstr(p));
+    let c_path = cstr(p)?;
     let p = c_path.as_ptr();
 
     let mut buf = Vec::with_capacity(256);
 
     loop {
-        let buf_read = try!(cvt(unsafe {
+        let buf_read = cvt(unsafe {
             libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity() as libc::size_t)
-        })) as usize;
+        })? as usize;
 
         unsafe { buf.set_len(buf_read); }
 
@@ -686,39 +694,39 @@ pub fn readlink(p: &Path) -> io::Result<PathBuf> {
 }
 
 pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
-    let src = try!(cstr(src));
-    let dst = try!(cstr(dst));
-    try!(cvt(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) }));
+    let src = cstr(src)?;
+    let dst = cstr(dst)?;
+    cvt(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) })?;
     Ok(())
 }
 
 pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
-    let src = try!(cstr(src));
-    let dst = try!(cstr(dst));
-    try!(cvt(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) }));
+    let src = cstr(src)?;
+    let dst = cstr(dst)?;
+    cvt(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) })?;
     Ok(())
 }
 
 pub fn stat(p: &Path) -> io::Result<FileAttr> {
-    let p = try!(cstr(p));
+    let p = cstr(p)?;
     let mut stat: stat64 = unsafe { mem::zeroed() };
-    try!(cvt(unsafe {
+    cvt(unsafe {
         stat64(p.as_ptr(), &mut stat as *mut _ as *mut _)
-    }));
+    })?;
     Ok(FileAttr { stat: stat })
 }
 
 pub fn lstat(p: &Path) -> io::Result<FileAttr> {
-    let p = try!(cstr(p));
+    let p = cstr(p)?;
     let mut stat: stat64 = unsafe { mem::zeroed() };
-    try!(cvt(unsafe {
+    cvt(unsafe {
         lstat64(p.as_ptr(), &mut stat as *mut _ as *mut _)
-    }));
+    })?;
     Ok(FileAttr { stat: stat })
 }
 
 pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
-    let path = try!(CString::new(p.as_os_str().as_bytes()));
+    let path = CString::new(p.as_os_str().as_bytes())?;
     let buf;
     unsafe {
         let r = libc::realpath(path.as_ptr(), ptr::null_mut());
@@ -738,11 +746,11 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
                               "the source path is not an existing regular file"))
     }
 
-    let mut reader = try!(File::open(from));
-    let mut writer = try!(File::create(to));
-    let perm = try!(reader.metadata()).permissions();
+    let mut reader = File::open(from)?;
+    let mut writer = File::create(to)?;
+    let perm = reader.metadata()?.permissions();
 
-    let ret = try!(io::copy(&mut reader, &mut writer));
-    try!(set_permissions(to, perm));
+    let ret = io::copy(&mut reader, &mut writer)?;
+    set_permissions(to, perm)?;
     Ok(ret)
 }
index c332d6035ee8d8d789ccf681bbae8c754cdd55a8..f8b2d4dd23240bdcaf5f32fcbffe81357c46758d 100644 (file)
@@ -85,12 +85,46 @@ pub fn init() {
 
     #[cfg(not(target_os = "nacl"))]
     unsafe fn reset_sigpipe() {
-        assert!(libc::signal(libc::SIGPIPE, libc::SIG_IGN) != !0);
+        assert!(signal(libc::SIGPIPE, libc::SIG_IGN) != !0);
     }
     #[cfg(target_os = "nacl")]
     unsafe fn reset_sigpipe() {}
 }
 
+// Currently the minimum supported Android version of the standard library is
+// API level 18 (android-18). Back in those days [1] the `signal` function was
+// just an inline wrapper around `bsd_signal`, but starting in API level
+// android-20 the `signal` symbols was introduced [2]. Finally, in android-21
+// the API `bsd_signal` was removed [3].
+//
+// Basically this means that if we want to be binary compatible with multiple
+// Android releases (oldest being 18 and newest being 21) then we need to check
+// for both symbols and not actually link against either.
+//
+// Note that if we're not on android we just link against the `android` symbol
+// itself.
+//
+// [1]: https://chromium.googlesource.com/android_tools/+/20ee6d20/ndk/platforms
+//                                       /android-18/arch-arm/usr/include/signal.h
+// [2]: https://chromium.googlesource.com/android_tools/+/fbd420/ndk_experimental
+//                                       /platforms/android-20/arch-arm
+//                                       /usr/include/signal.h
+// [3]: https://chromium.googlesource.com/android_tools/+/20ee6d/ndk/platforms
+//                                       /android-21/arch-arm/usr/include/signal.h
+#[cfg(target_os = "android")]
+unsafe fn signal(signum: libc::c_int,
+                 handler: libc::sighandler_t) -> libc::sighandler_t {
+    weak!(fn signal(libc::c_int, libc::sighandler_t) -> libc::sighandler_t);
+    weak!(fn bsd_signal(libc::c_int, libc::sighandler_t) -> libc::sighandler_t);
+
+    let f = signal.get().or_else(|| bsd_signal.get());
+    let f = f.expect("neither `signal` nor `bsd_signal` symbols found");
+    f(signum, handler)
+}
+
+#[cfg(not(target_os = "android"))]
+pub use libc::signal;
+
 pub fn decode_error_kind(errno: i32) -> ErrorKind {
     match errno as libc::c_int {
         libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
index 16c369674f0a2e2bf90f8f80952135480fff1328..830957a7e59c76943a6f080b28e98473e008b856 100644 (file)
@@ -57,13 +57,17 @@ impl Socket {
             SocketAddr::V4(..) => libc::AF_INET,
             SocketAddr::V6(..) => libc::AF_INET6,
         };
+        Socket::new_raw(fam, ty)
+    }
+
+    pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> {
         unsafe {
             // On linux we first attempt to pass the SOCK_CLOEXEC flag to
             // atomically create the socket and set it as CLOEXEC. Support for
             // this option, however, was added in 2.6.27, and we still support
             // 2.6.18 as a kernel, so if the returned error is EINVAL we
             // fallthrough to the fallback.
-            if cfg!(target_os = "linux") {
+            if cfg!(linux) {
                 match cvt(libc::socket(fam, ty | SOCK_CLOEXEC, 0)) {
                     Ok(fd) => return Ok(Socket(FileDesc::new(fd))),
                     Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => {}
@@ -71,13 +75,37 @@ impl Socket {
                 }
             }
 
-            let fd = try!(cvt(libc::socket(fam, ty, 0)));
+            let fd = cvt(libc::socket(fam, ty, 0))?;
             let fd = FileDesc::new(fd);
             fd.set_cloexec();
             Ok(Socket(fd))
         }
     }
 
+    pub fn new_pair(fam: c_int, ty: c_int) -> io::Result<(Socket, Socket)> {
+        unsafe {
+            let mut fds = [0, 0];
+
+            // Like above, see if we can set cloexec atomically
+            if cfg!(linux) {
+                match cvt(libc::socketpair(fam, ty | SOCK_CLOEXEC, 0, fds.as_mut_ptr())) {
+                    Ok(_) => {
+                        return Ok((Socket(FileDesc::new(fds[0])), Socket(FileDesc::new(fds[1]))));
+                    }
+                    Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => {},
+                    Err(e) => return Err(e),
+                }
+            }
+
+            cvt(libc::socketpair(fam, ty, 0, fds.as_mut_ptr()))?;
+            let a = FileDesc::new(fds[0]);
+            a.set_cloexec();
+            let b = FileDesc::new(fds[1]);
+            b.set_cloexec();
+            Ok((Socket(a), Socket(b)))
+        }
+    }
+
     pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t)
                   -> io::Result<Socket> {
         // Unfortunately the only known way right now to accept a socket and
@@ -100,9 +128,9 @@ impl Socket {
             }
         }
 
-        let fd = try!(cvt_r(|| unsafe {
+        let fd = cvt_r(|| unsafe {
             libc::accept(self.0.raw(), storage, len)
-        }));
+        })?;
         let fd = FileDesc::new(fd);
         fd.set_cloexec();
         Ok(Socket(fd))
@@ -116,6 +144,14 @@ impl Socket {
         self.0.read(buf)
     }
 
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.0.read_to_end(buf)
+    }
+
+    pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+        self.0.write(buf)
+    }
+
     pub fn set_timeout(&self, dur: Option<Duration>, kind: libc::c_int) -> io::Result<()> {
         let timeout = match dur {
             Some(dur) => {
@@ -149,7 +185,7 @@ impl Socket {
     }
 
     pub fn timeout(&self, kind: libc::c_int) -> io::Result<Option<Duration>> {
-        let raw: libc::timeval = try!(getsockopt(self, libc::SOL_SOCKET, kind));
+        let raw: libc::timeval = getsockopt(self, libc::SOL_SOCKET, kind)?;
         if raw.tv_sec == 0 && raw.tv_usec == 0 {
             Ok(None)
         } else {
@@ -165,9 +201,32 @@ impl Socket {
             Shutdown::Read => libc::SHUT_RD,
             Shutdown::Both => libc::SHUT_RDWR,
         };
-        try!(cvt(unsafe { libc::shutdown(self.0.raw(), how) }));
+        cvt(unsafe { libc::shutdown(self.0.raw(), how) })?;
         Ok(())
     }
+
+    pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+        setsockopt(self, libc::IPPROTO_TCP, libc::TCP_NODELAY, nodelay as c_int)
+    }
+
+    pub fn nodelay(&self) -> io::Result<bool> {
+        let raw: c_int = getsockopt(self, libc::IPPROTO_TCP, libc::TCP_NODELAY)?;
+        Ok(raw != 0)
+    }
+
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        let mut nonblocking = nonblocking as libc::c_ulong;
+        cvt(unsafe { libc::ioctl(*self.as_inner(), libc::FIONBIO, &mut nonblocking) }).map(|_| ())
+    }
+
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        let raw: c_int = getsockopt(self, libc::SOL_SOCKET, libc::SO_ERROR)?;
+        if raw == 0 {
+            Ok(None)
+        } else {
+            Ok(Some(io::Error::from_raw_os_error(raw as i32)))
+        }
+    }
 }
 
 impl AsInner<c_int> for Socket {
index b6a0bd844094b1ddeeb9999aa4ae8c6bff4169db..94ebbd70ae83deb5c6194460c96aa613ee6997a2 100644 (file)
@@ -36,6 +36,7 @@ const TMPBUF_SZ: usize = 128;
 static ENV_LOCK: StaticMutex = StaticMutex::new();
 
 /// Returns the platform-specific value of errno
+#[cfg(not(target_os = "dragonfly"))]
 pub fn errno() -> i32 {
     extern {
         #[cfg_attr(any(target_os = "linux", target_os = "emscripten"),
@@ -47,7 +48,6 @@ pub fn errno() -> i32 {
                        target_env = "newlib"),
                    link_name = "__errno")]
         #[cfg_attr(target_os = "solaris", link_name = "___errno")]
-        #[cfg_attr(target_os = "dragonfly", link_name = "__dfly_error")]
         #[cfg_attr(any(target_os = "macos",
                        target_os = "ios",
                        target_os = "freebsd"),
@@ -60,6 +60,16 @@ pub fn errno() -> i32 {
     }
 }
 
+#[cfg(target_os = "dragonfly")]
+pub fn errno() -> i32 {
+    extern {
+        #[thread_local]
+        static errno: c_int;
+    }
+
+    errno as i32
+}
+
 /// Gets a detailed string description for the given error number.
 pub fn error_string(errno: i32) -> String {
     extern {
@@ -110,7 +120,7 @@ pub fn getcwd() -> io::Result<PathBuf> {
 
 pub fn chdir(p: &path::Path) -> io::Result<()> {
     let p: &OsStr = p.as_ref();
-    let p = try!(CString::new(p.as_bytes()));
+    let p = CString::new(p.as_bytes())?;
     unsafe {
         match libc::chdir(p.as_ptr()) == (0 as c_int) {
             true => Ok(()),
@@ -180,16 +190,16 @@ pub fn current_exe() -> io::Result<PathBuf> {
                        libc::KERN_PROC_PATHNAME as c_int,
                        -1 as c_int];
         let mut sz: libc::size_t = 0;
-        try!(cvt(libc::sysctl(mib.as_mut_ptr(), mib.len() as ::libc::c_uint,
-                              ptr::null_mut(), &mut sz, ptr::null_mut(),
-                              0 as libc::size_t)));
+        cvt(libc::sysctl(mib.as_mut_ptr(), mib.len() as ::libc::c_uint,
+                         ptr::null_mut(), &mut sz, ptr::null_mut(),
+                         0 as libc::size_t))?;
         if sz == 0 {
             return Err(io::Error::last_os_error())
         }
         let mut v: Vec<u8> = Vec::with_capacity(sz as usize);
-        try!(cvt(libc::sysctl(mib.as_mut_ptr(), mib.len() as ::libc::c_uint,
-                              v.as_mut_ptr() as *mut libc::c_void, &mut sz,
-                              ptr::null_mut(), 0 as libc::size_t)));
+        cvt(libc::sysctl(mib.as_mut_ptr(), mib.len() as ::libc::c_uint,
+                         v.as_mut_ptr() as *mut libc::c_void, &mut sz,
+                         ptr::null_mut(), 0 as libc::size_t))?;
         if sz == 0 {
             return Err(io::Error::last_os_error());
         }
@@ -217,11 +227,11 @@ pub fn current_exe() -> io::Result<PathBuf> {
                        libc::KERN_PROC_ARGV];
         let mib = mib.as_mut_ptr();
         let mut argv_len = 0;
-        try!(cvt(libc::sysctl(mib, 4, 0 as *mut _, &mut argv_len,
-                              0 as *mut _, 0)));
+        cvt(libc::sysctl(mib, 4, 0 as *mut _, &mut argv_len,
+                         0 as *mut _, 0))?;
         let mut argv = Vec::<*const libc::c_char>::with_capacity(argv_len as usize);
-        try!(cvt(libc::sysctl(mib, 4, argv.as_mut_ptr() as *mut _,
-                              &mut argv_len, 0 as *mut _, 0)));
+        cvt(libc::sysctl(mib, 4, argv.as_mut_ptr() as *mut _,
+                         &mut argv_len, 0 as *mut _, 0))?;
         argv.set_len(argv_len as usize);
         if argv[0].is_null() {
             return Err(io::Error::new(io::ErrorKind::Other,
@@ -460,7 +470,7 @@ pub fn env() -> Env {
 pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> {
     // environment variables with a nul byte can't be set, so their value is
     // always None as well
-    let k = try!(CString::new(k.as_bytes()));
+    let k = CString::new(k.as_bytes())?;
     let _g = ENV_LOCK.lock();
     Ok(unsafe {
         let s = libc::getenv(k.as_ptr()) as *const _;
@@ -473,8 +483,8 @@ pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> {
 }
 
 pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
-    let k = try!(CString::new(k.as_bytes()));
-    let v = try!(CString::new(v.as_bytes()));
+    let k = CString::new(k.as_bytes())?;
+    let v = CString::new(v.as_bytes())?;
     let _g = ENV_LOCK.lock();
     cvt(unsafe {
         libc::setenv(k.as_ptr(), v.as_ptr(), 1)
@@ -482,7 +492,7 @@ pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
 }
 
 pub fn unsetenv(n: &OsStr) -> io::Result<()> {
-    let nbuf = try!(CString::new(n.as_bytes()));
+    let nbuf = CString::new(n.as_bytes())?;
     let _g = ENV_LOCK.lock();
     cvt(unsafe {
         libc::unsetenv(nbuf.as_ptr())
index 667f0f9e6bf62044775dbad7850cfcc0b2685db8..beca2d467536d82a4ea9acf2956b9df7055b3737 100644 (file)
@@ -8,8 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use prelude::v1::*;
+
+use cmp;
 use io;
 use libc::{self, c_int};
+use mem;
 use sys::cvt_r;
 use sys::fd::FileDesc;
 
@@ -57,6 +61,10 @@ impl AnonPipe {
         self.0.read(buf)
     }
 
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.0.read_to_end(buf)
+    }
+
     pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
         self.0.write(buf)
     }
@@ -64,3 +72,54 @@ impl AnonPipe {
     pub fn fd(&self) -> &FileDesc { &self.0 }
     pub fn into_fd(self) -> FileDesc { self.0 }
 }
+
+pub fn read2(p1: AnonPipe,
+             v1: &mut Vec<u8>,
+             p2: AnonPipe,
+             v2: &mut Vec<u8>) -> io::Result<()> {
+    // Set both pipes into nonblocking mode as we're gonna be reading from both
+    // in the `select` loop below, and we wouldn't want one to block the other!
+    let p1 = p1.into_fd();
+    let p2 = p2.into_fd();
+    p1.set_nonblocking(true);
+    p2.set_nonblocking(true);
+
+    let max = cmp::max(p1.raw(), p2.raw());
+    loop {
+        // wait for either pipe to become readable using `select`
+        cvt_r(|| unsafe {
+            let mut read: libc::fd_set = mem::zeroed();
+            libc::FD_SET(p1.raw(), &mut read);
+            libc::FD_SET(p2.raw(), &mut read);
+            libc::select(max + 1, &mut read, 0 as *mut _, 0 as *mut _,
+                         0 as *mut _)
+        })?;
+
+        // Read as much as we can from each pipe, ignoring EWOULDBLOCK or
+        // EAGAIN. If we hit EOF, then this will happen because the underlying
+        // reader will return Ok(0), in which case we'll see `Ok` ourselves. In
+        // this case we flip the other fd back into blocking mode and read
+        // whatever's leftover on that file descriptor.
+        let read = |fd: &FileDesc, dst: &mut Vec<u8>| {
+            match fd.read_to_end(dst) {
+                Ok(_) => Ok(true),
+                Err(e) => {
+                    if e.raw_os_error() == Some(libc::EWOULDBLOCK) ||
+                       e.raw_os_error() == Some(libc::EAGAIN) {
+                        Ok(false)
+                    } else {
+                        Err(e)
+                    }
+                }
+            }
+        };
+        if read(&p1, v1)? {
+            p2.set_nonblocking(false);
+            return p2.read_to_end(v2).map(|_| ());
+        }
+        if read(&p2, v2)? {
+            p1.set_nonblocking(false);
+            return p1.read_to_end(v1).map(|_| ());
+        }
+    }
+}
index 28475f50ce63e49267b9f7e63314a7793e33fb6f..270c2096b2c3b33aa34f3fca35e8fbd2fa600ec6 100644 (file)
@@ -216,7 +216,7 @@ impl Command {
         self.stderr = Some(stderr);
     }
 
-    pub fn spawn(&mut self, default: Stdio)
+    pub fn spawn(&mut self, default: Stdio, needs_stdin: bool)
                  -> io::Result<(Process, StdioPipes)> {
         const CLOEXEC_MSG_FOOTER: &'static [u8] = b"NOEX";
 
@@ -225,11 +225,11 @@ impl Command {
                                       "nul byte found in provided data"));
         }
 
-        let (ours, theirs) = try!(self.setup_io(default));
-        let (input, output) = try!(sys::pipe::anon_pipe());
+        let (ours, theirs) = self.setup_io(default, needs_stdin)?;
+        let (input, output) = sys::pipe::anon_pipe()?;
 
         let pid = unsafe {
-            match try!(cvt(libc::fork())) {
+            match cvt(libc::fork())? {
                 0 => {
                     drop(input);
                     let err = self.do_exec(theirs);
@@ -298,7 +298,7 @@ impl Command {
                                   "nul byte found in provided data")
         }
 
-        match self.setup_io(default) {
+        match self.setup_io(default, true) {
             Ok((_, theirs)) => unsafe { self.do_exec(theirs) },
             Err(e) => e,
         }
@@ -335,7 +335,7 @@ impl Command {
     // have the drop glue anyway because this code never returns (the
     // child will either exec() or invoke libc::exit)
     unsafe fn do_exec(&mut self, stdio: ChildPipes) -> io::Error {
-        macro_rules! try {
+        macro_rules! t {
             ($e:expr) => (match $e {
                 Ok(e) => e,
                 Err(e) => return e,
@@ -343,17 +343,17 @@ impl Command {
         }
 
         if let Some(fd) = stdio.stdin.fd() {
-            try!(cvt_r(|| libc::dup2(fd, libc::STDIN_FILENO)));
+            t!(cvt_r(|| libc::dup2(fd, libc::STDIN_FILENO)));
         }
         if let Some(fd) = stdio.stdout.fd() {
-            try!(cvt_r(|| libc::dup2(fd, libc::STDOUT_FILENO)));
+            t!(cvt_r(|| libc::dup2(fd, libc::STDOUT_FILENO)));
         }
         if let Some(fd) = stdio.stderr.fd() {
-            try!(cvt_r(|| libc::dup2(fd, libc::STDERR_FILENO)));
+            t!(cvt_r(|| libc::dup2(fd, libc::STDERR_FILENO)));
         }
 
         if let Some(u) = self.gid {
-            try!(cvt(libc::setgid(u as gid_t)));
+            t!(cvt(libc::setgid(u as gid_t)));
         }
         if let Some(u) = self.uid {
             // When dropping privileges from root, the `setgroups` call
@@ -365,7 +365,7 @@ impl Command {
             // privilege dropping function.
             let _ = libc::setgroups(0, ptr::null());
 
-            try!(cvt(libc::setuid(u as uid_t)));
+            t!(cvt(libc::setuid(u as uid_t)));
         }
         if self.session_leader {
             // Don't check the error of setsid because it fails if we're the
@@ -374,7 +374,7 @@ impl Command {
             let _ = libc::setsid();
         }
         if let Some(ref cwd) = self.cwd {
-            try!(cvt(libc::chdir(cwd.as_ptr())));
+            t!(cvt(libc::chdir(cwd.as_ptr())));
         }
         if let Some(ref envp) = self.envp {
             *sys::os::environ() = envp.as_ptr();
@@ -390,17 +390,17 @@ impl Command {
             // need to clean things up now to avoid confusing the program
             // we're about to run.
             let mut set: libc::sigset_t = mem::uninitialized();
-            try!(cvt(libc::sigemptyset(&mut set)));
-            try!(cvt(libc::pthread_sigmask(libc::SIG_SETMASK, &set,
-                                           ptr::null_mut())));
-            let ret = libc::signal(libc::SIGPIPE, libc::SIG_DFL);
+            t!(cvt(libc::sigemptyset(&mut set)));
+            t!(cvt(libc::pthread_sigmask(libc::SIG_SETMASK, &set,
+                                         ptr::null_mut())));
+            let ret = super::signal(libc::SIGPIPE, libc::SIG_DFL);
             if ret == libc::SIG_ERR {
                 return io::Error::last_os_error()
             }
         }
 
         for callback in self.closures.iter_mut() {
-            try!(callback());
+            t!(callback());
         }
 
         libc::execvp(self.argv[0], self.argv.as_ptr());
@@ -408,13 +408,16 @@ impl Command {
     }
 
 
-    fn setup_io(&self, default: Stdio) -> io::Result<(StdioPipes, ChildPipes)> {
-        let stdin = self.stdin.as_ref().unwrap_or(&default);
+    fn setup_io(&self, default: Stdio, needs_stdin: bool)
+                -> io::Result<(StdioPipes, ChildPipes)> {
+        let null = Stdio::Null;
+        let default_stdin = if needs_stdin {&default} else {&null};
+        let stdin = self.stdin.as_ref().unwrap_or(default_stdin);
         let stdout = self.stdout.as_ref().unwrap_or(&default);
         let stderr = self.stderr.as_ref().unwrap_or(&default);
-        let (their_stdin, our_stdin) = try!(stdin.to_child_stdio(true));
-        let (their_stdout, our_stdout) = try!(stdout.to_child_stdio(false));
-        let (their_stderr, our_stderr) = try!(stderr.to_child_stdio(false));
+        let (their_stdin, our_stdin) = stdin.to_child_stdio(true)?;
+        let (their_stdout, our_stdout) = stdout.to_child_stdio(false)?;
+        let (their_stderr, our_stderr) = stderr.to_child_stdio(false)?;
         let ours = StdioPipes {
             stdin: our_stdin,
             stdout: our_stdout,
@@ -451,14 +454,14 @@ impl Stdio {
             // overwritten prematurely.
             Stdio::Fd(ref fd) => {
                 if fd.raw() >= 0 && fd.raw() <= libc::STDERR_FILENO {
-                    Ok((ChildStdio::Owned(try!(fd.duplicate())), None))
+                    Ok((ChildStdio::Owned(fd.duplicate()?), None))
                 } else {
                     Ok((ChildStdio::Explicit(fd.raw()), None))
                 }
             }
 
             Stdio::MakePipe => {
-                let (reader, writer) = try!(pipe::anon_pipe());
+                let (reader, writer) = pipe::anon_pipe()?;
                 let (ours, theirs) = if readable {
                     (writer, reader)
                 } else {
@@ -474,7 +477,7 @@ impl Stdio {
                 let path = unsafe {
                     CStr::from_ptr("/dev/null\0".as_ptr() as *const _)
                 };
-                let fd = try!(File::open_c(&path, &opts));
+                let fd = File::open_c(&path, &opts)?;
                 Ok((ChildStdio::Owned(fd.into_fd()), None))
             }
         }
@@ -505,9 +508,9 @@ fn pair_to_key(key: &OsStr, value: &OsStr, saw_nul: &mut bool) -> CString {
 
 impl fmt::Debug for Command {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(f, "{:?}", self.program));
+        write!(f, "{:?}", self.program)?;
         for arg in &self.args {
-            try!(write!(f, " {:?}", arg));
+            write!(f, " {:?}", arg)?;
         }
         Ok(())
     }
@@ -586,7 +589,7 @@ impl Process {
             return Ok(status)
         }
         let mut status = 0 as c_int;
-        try!(cvt_r(|| unsafe { libc::waitpid(self.pid, &mut status, 0) }));
+        cvt_r(|| unsafe { libc::waitpid(self.pid, &mut status, 0) })?;
         self.status = Some(ExitStatus(status));
         Ok(ExitStatus(status))
     }
@@ -648,7 +651,7 @@ mod tests {
             cmd.stdin(Stdio::MakePipe);
             cmd.stdout(Stdio::MakePipe);
 
-            let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null));
+            let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null, true));
             let stdin_write = pipes.stdin.take().unwrap();
             let stdout_read = pipes.stdout.take().unwrap();
 
index fa504ade084fc3f3b4d043f318d09df9342625e6..92c3bf8829adf272367066f1148a8afde4bd291a 100644 (file)
@@ -138,7 +138,7 @@ mod imp {
                 return Ok(OsRng { inner: OsGetrandomRng });
             }
 
-            let reader = try!(File::open("/dev/urandom"));
+            let reader = File::open("/dev/urandom")?;
             let reader_rng = ReaderRng::new(reader);
 
             Ok(OsRng { inner: OsReaderRng(reader_rng) })
index ccbb14677c7e404143cf7bc31d6f6870133ec9cc..37d1d9a969ed8d7a435a0fdf4b9818938348a1cf 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use prelude::v1::*;
+
 use io;
 use libc;
 use sys::fd::FileDesc;
@@ -25,6 +27,13 @@ impl Stdin {
         fd.into_raw();
         ret
     }
+
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        let fd = FileDesc::new(libc::STDIN_FILENO);
+        let ret = fd.read_to_end(buf);
+        fd.into_raw();
+        ret
+    }
 }
 
 impl Stdout {
index 793a2ecae89f17729733926468709fc799619ffb..6d966a0f6944c681cbd387ab2a346a5754ef8ad8 100644 (file)
@@ -13,7 +13,7 @@ use prelude::v1::*;
 use alloc::boxed::FnBox;
 use cmp;
 #[cfg(not(any(target_env = "newlib", target_os = "solaris")))]
-use ffi::CString;
+use ffi::CStr;
 use io;
 use libc;
 use mem;
@@ -84,15 +84,12 @@ impl Thread {
     #[cfg(any(target_os = "linux",
               target_os = "android",
               target_os = "emscripten"))]
-    pub fn set_name(name: &str) {
+    pub fn set_name(name: &CStr) {
         const PR_SET_NAME: libc::c_int = 15;
-        let cname = CString::new(name).unwrap_or_else(|_| {
-            panic!("thread name may not contain interior null bytes")
-        });
         // pthread wrapper only appeared in glibc 2.12, so we use syscall
         // directly.
         unsafe {
-            libc::prctl(PR_SET_NAME, cname.as_ptr() as libc::c_ulong, 0, 0, 0);
+            libc::prctl(PR_SET_NAME, name.as_ptr() as libc::c_ulong, 0, 0, 0);
         }
     }
 
@@ -100,32 +97,30 @@ impl Thread {
               target_os = "dragonfly",
               target_os = "bitrig",
               target_os = "openbsd"))]
-    pub fn set_name(name: &str) {
-        let cname = CString::new(name).unwrap();
+    pub fn set_name(name: &CStr) {
         unsafe {
-            libc::pthread_set_name_np(libc::pthread_self(), cname.as_ptr());
+            libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr());
         }
     }
 
     #[cfg(any(target_os = "macos", target_os = "ios"))]
-    pub fn set_name(name: &str) {
-        let cname = CString::new(name).unwrap();
+    pub fn set_name(name: &CStr) {
         unsafe {
-            libc::pthread_setname_np(cname.as_ptr());
+            libc::pthread_setname_np(name.as_ptr());
         }
     }
 
     #[cfg(target_os = "netbsd")]
-    pub fn set_name(name: &str) {
+    pub fn set_name(name: &CStr) {
+        use ffi::CString;
         let cname = CString::new(&b"%s"[..]).unwrap();
-        let carg = CString::new(name).unwrap();
         unsafe {
             libc::pthread_setname_np(libc::pthread_self(), cname.as_ptr(),
-                                     carg.as_ptr() as *mut libc::c_void);
+                                     name.as_ptr() as *mut libc::c_void);
         }
     }
     #[cfg(any(target_env = "newlib", target_os = "solaris"))]
-    pub fn set_name(_name: &str) {
+    pub fn set_name(_name: &CStr) {
         // Newlib and Illumos has no way to set a thread name.
     }
 
index dd248416f84cfb98db616aec277918aa897a37ae..cc7abe25e35e5a1edc902e6608996a8da7344172 100644 (file)
@@ -88,11 +88,11 @@ mod inner {
                         -> Result<Duration, Duration> {
             if self >= other {
                 Ok(if self.t.tv_usec >= other.t.tv_usec {
-                    Duration::new(self.t.tv_sec as u64 - other.t.tv_sec as u64,
-                                  (self.t.tv_usec as u32 -
-                                   other.t.tv_usec as u32) * 1000)
+                    Duration::new((self.t.tv_sec - other.t.tv_sec) as u64,
+                                  ((self.t.tv_usec -
+                                    other.t.tv_usec) as u32) * 1000)
                 } else {
-                    Duration::new(self.t.tv_sec as u64 - 1 - other.t.tv_sec as u64,
+                    Duration::new((self.t.tv_sec - 1 - other.t.tv_sec) as u64,
                                   (self.t.tv_usec as u32 + (USEC_PER_SEC as u32) -
                                    other.t.tv_usec as u32) * 1000)
                 })
@@ -303,8 +303,13 @@ mod inner {
         }
     }
 
+    #[cfg(not(target_os = "dragonfly"))]
+    pub type clock_t = libc::c_int;
+    #[cfg(target_os = "dragonfly")]
+    pub type clock_t = libc::c_ulong;
+
     impl Timespec {
-        pub fn now(clock: libc::c_int) -> Timespec {
+        pub fn now(clock: clock_t) -> Timespec {
             let mut t = Timespec {
                 t: libc::timespec {
                     tv_sec: 0,
index e6f85c08d1246cf06ee0f88ad9935b49cbf83262..99ab8741159e39075825b1a4e01fab4f2dea9cf1 100644 (file)
@@ -75,11 +75,5 @@ unsafe fn fetch(name: &str) -> usize {
         Ok(cstr) => cstr,
         Err(..) => return 0,
     };
-    let lib = libc::dlopen(0 as *const _, libc::RTLD_LAZY);
-    if lib.is_null() {
-        return 0
-    }
-    let ret = libc::dlsym(lib, name.as_ptr()) as usize;
-    libc::dlclose(lib);
-    return ret
+    libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) as usize
 }
index 80ff0cb0453ad1570c6cc14f275daa540f57edb4..0e10a8d8e8dd2619b6da4010a9a5816e722b0d14 100644 (file)
 
 use io::prelude::*;
 
-use dynamic_lib::DynamicLibrary;
 use io;
 use libc::c_void;
 use mem;
-use path::Path;
 use ptr;
 use sync::StaticMutex;
 use sys::c;
+use sys::dynamic_lib::DynamicLibrary;
 
-macro_rules! sym{ ($lib:expr, $e:expr, $t:ident) => (unsafe {
-    let lib = $lib;
-    match lib.symbol($e) {
-        Ok(f) => $crate::mem::transmute::<*mut u8, $t>(f),
-        Err(..) => return Ok(())
-    }
-}) }
+macro_rules! sym {
+    ($lib:expr, $e:expr, $t:ident) => (
+        match $lib.symbol($e) {
+            Ok(f) => $crate::mem::transmute::<usize, $t>(f),
+            Err(..) => return Ok(())
+        }
+    )
+}
 
 #[cfg(target_env = "msvc")]
 #[path = "printing/msvc.rs"]
@@ -52,16 +52,16 @@ mod printing;
 mod printing;
 
 type SymInitializeFn =
-    extern "system" fn(c::HANDLE, *mut c_void,
-                       c::BOOL) -> c::BOOL;
+    unsafe extern "system" fn(c::HANDLE, *mut c_void,
+                              c::BOOL) -> c::BOOL;
 type SymCleanupFn =
-    extern "system" fn(c::HANDLE) -> c::BOOL;
+    unsafe extern "system" fn(c::HANDLE) -> c::BOOL;
 
 type StackWalk64Fn =
-    extern "system" fn(c::DWORD, c::HANDLE, c::HANDLE,
-                       *mut c::STACKFRAME64, *mut c::CONTEXT,
-                       *mut c_void, *mut c_void,
-                       *mut c_void, *mut c_void) -> c::BOOL;
+    unsafe extern "system" fn(c::DWORD, c::HANDLE, c::HANDLE,
+                              *mut c::STACKFRAME64, *mut c::CONTEXT,
+                              *mut c_void, *mut c_void,
+                              *mut c_void, *mut c_void) -> c::BOOL;
 
 #[cfg(target_arch = "x86")]
 pub fn init_frame(frame: &mut c::STACKFRAME64,
@@ -93,7 +93,9 @@ struct Cleanup {
 }
 
 impl Drop for Cleanup {
-    fn drop(&mut self) { (self.SymCleanup)(self.handle); }
+    fn drop(&mut self) {
+        unsafe { (self.SymCleanup)(self.handle); }
+    }
 }
 
 pub fn write(w: &mut Write) -> io::Result<()> {
@@ -102,52 +104,50 @@ pub fn write(w: &mut Write) -> io::Result<()> {
     static LOCK: StaticMutex = StaticMutex::new();
     let _g = LOCK.lock();
 
-    // Open up dbghelp.dll, we don't link to it explicitly because it can't
-    // always be found. Additionally, it's nice having fewer dependencies.
-    let path = Path::new("dbghelp.dll");
-    let dbghelp = match DynamicLibrary::open(Some(&path)) {
+    let dbghelp = match DynamicLibrary::open("dbghelp.dll") {
         Ok(lib) => lib,
         Err(..) => return Ok(()),
     };
-
-    // Fetch the symbols necessary from dbghelp.dll
-    let SymInitialize = sym!(&dbghelp, "SymInitialize", SymInitializeFn);
-    let SymCleanup = sym!(&dbghelp, "SymCleanup", SymCleanupFn);
-    let StackWalk64 = sym!(&dbghelp, "StackWalk64", StackWalk64Fn);
-
-    // Allocate necessary structures for doing the stack walk
-    let process = unsafe { c::GetCurrentProcess() };
-    let thread = unsafe { c::GetCurrentThread() };
-    let mut context: c::CONTEXT = unsafe { mem::zeroed() };
-    unsafe { c::RtlCaptureContext(&mut context); }
-    let mut frame: c::STACKFRAME64 = unsafe { mem::zeroed() };
-    let image = init_frame(&mut frame, &context);
-
-    // Initialize this process's symbols
-    let ret = SymInitialize(process, ptr::null_mut(), c::TRUE);
-    if ret != c::TRUE { return Ok(()) }
-    let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
-
-    // And now that we're done with all the setup, do the stack walking!
-    // Start from -1 to avoid printing this stack frame, which will
-    // always be exactly the same.
-    let mut i = -1;
-    try!(write!(w, "stack backtrace:\n"));
-    while StackWalk64(image, process, thread, &mut frame, &mut context,
-                      ptr::null_mut(),
-                      ptr::null_mut(),
-                      ptr::null_mut(),
-                      ptr::null_mut()) == c::TRUE {
-        let addr = frame.AddrPC.Offset;
-        if addr == frame.AddrReturn.Offset || addr == 0 ||
-           frame.AddrReturn.Offset == 0 { break }
-
-        i += 1;
-
-        if i >= 0 {
-            try!(printing::print(w, i, addr-1, &dbghelp, process));
+    unsafe {
+        // Fetch the symbols necessary from dbghelp.dll
+        let SymInitialize = sym!(dbghelp, "SymInitialize", SymInitializeFn);
+        let SymCleanup = sym!(dbghelp, "SymCleanup", SymCleanupFn);
+        let StackWalk64 = sym!(dbghelp, "StackWalk64", StackWalk64Fn);
+
+        // Allocate necessary structures for doing the stack walk
+        let process = c::GetCurrentProcess();
+        let thread = c::GetCurrentThread();
+        let mut context: c::CONTEXT = mem::zeroed();
+        c::RtlCaptureContext(&mut context);
+        let mut frame: c::STACKFRAME64 = mem::zeroed();
+        let image = init_frame(&mut frame, &context);
+
+        // Initialize this process's symbols
+        let ret = SymInitialize(process, ptr::null_mut(), c::TRUE);
+        if ret != c::TRUE { return Ok(()) }
+        let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
+
+        // And now that we're done with all the setup, do the stack walking!
+        // Start from -1 to avoid printing this stack frame, which will
+        // always be exactly the same.
+        let mut i = -1;
+        write!(w, "stack backtrace:\n")?;
+        while StackWalk64(image, process, thread, &mut frame, &mut context,
+                          ptr::null_mut(),
+                          ptr::null_mut(),
+                          ptr::null_mut(),
+                          ptr::null_mut()) == c::TRUE {
+            let addr = frame.AddrPC.Offset;
+            if addr == frame.AddrReturn.Offset || addr == 0 ||
+               frame.AddrReturn.Offset == 0 { break }
+
+            i += 1;
+
+            if i >= 0 {
+                printing::print(w, i, addr - 1, process, &dbghelp)?;
+            }
         }
-    }
 
-    Ok(())
+        Ok(())
+    }
 }
index 5cbfec01bedaa897f1885bf7b596bda55b663329..ab24b9e6fd601560e37f5f78e96523ed87442f89 100644 (file)
@@ -12,8 +12,9 @@
 
 #![allow(bad_style)]
 #![cfg_attr(test, allow(dead_code))]
+#![unstable(issue = "0", feature = "windows_c")]
 
-use os::raw::{c_int, c_uint, c_ulong, c_long, c_longlong, c_ushort};
+use os::raw::{c_int, c_uint, c_ulong, c_long, c_longlong, c_ushort,};
 use os::raw::{c_char, c_ulonglong};
 use libc::{wchar_t, size_t, c_void};
 use ptr;
@@ -114,6 +115,8 @@ pub const FILE_FLAG_OPEN_REPARSE_POINT: DWORD = 0x00200000;
 pub const FILE_FLAG_BACKUP_SEMANTICS: DWORD = 0x02000000;
 pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000;
 
+pub const FIONBIO: c_ulong = 0x8004667e;
+
 #[repr(C)]
 #[derive(Copy)]
 pub struct WIN32_FIND_DATAW {
@@ -151,8 +154,6 @@ pub const WSAESHUTDOWN: c_int = 10058;
 pub const WSAETIMEDOUT: c_int = 10060;
 pub const WSAECONNREFUSED: c_int = 10061;
 
-pub const NI_MAXHOST: DWORD = 1025;
-
 pub const MAX_PROTOCOL_CHAIN: DWORD = 7;
 
 pub const TOKEN_READ: DWORD = 0x20008;
@@ -179,6 +180,7 @@ pub const ERROR_PATH_NOT_FOUND: DWORD = 3;
 pub const ERROR_ACCESS_DENIED: DWORD = 5;
 pub const ERROR_INVALID_HANDLE: DWORD = 6;
 pub const ERROR_NO_MORE_FILES: DWORD = 18;
+pub const ERROR_HANDLE_EOF: DWORD = 38;
 pub const ERROR_BROKEN_PIPE: DWORD = 109;
 pub const ERROR_CALL_NOT_IMPLEMENTED: DWORD = 120;
 pub const ERROR_INSUFFICIENT_BUFFER: DWORD = 122;
@@ -186,6 +188,7 @@ pub const ERROR_ALREADY_EXISTS: DWORD = 183;
 pub const ERROR_NO_DATA: DWORD = 232;
 pub const ERROR_ENVVAR_NOT_FOUND: DWORD = 203;
 pub const ERROR_OPERATION_ABORTED: DWORD = 995;
+pub const ERROR_IO_PENDING: DWORD = 997;
 pub const ERROR_TIMEOUT: DWORD = 0x5B4;
 
 pub const INVALID_HANDLE_VALUE: HANDLE = !0 as HANDLE;
@@ -223,6 +226,33 @@ pub const SOL_SOCKET: c_int = 0xffff;
 pub const SO_RCVTIMEO: c_int = 0x1006;
 pub const SO_SNDTIMEO: c_int = 0x1005;
 pub const SO_REUSEADDR: c_int = 0x0004;
+pub const IPPROTO_IP: c_int = 0;
+pub const IPPROTO_TCP: c_int = 6;
+pub const IPPROTO_IPV6: c_int = 41;
+pub const TCP_NODELAY: c_int = 0x0001;
+pub const IP_TTL: c_int = 4;
+pub const IPV6_V6ONLY: c_int = 27;
+pub const SO_ERROR: c_int = 0x1007;
+pub const SO_BROADCAST: c_int = 0x0020;
+pub const IP_MULTICAST_LOOP: c_int = 11;
+pub const IPV6_MULTICAST_LOOP: c_int = 11;
+pub const IP_MULTICAST_TTL: c_int = 10;
+pub const IP_ADD_MEMBERSHIP: c_int = 12;
+pub const IP_DROP_MEMBERSHIP: c_int = 13;
+pub const IPV6_ADD_MEMBERSHIP: c_int = 12;
+pub const IPV6_DROP_MEMBERSHIP: c_int = 13;
+
+#[repr(C)]
+pub struct ip_mreq {
+    pub imr_multiaddr: in_addr,
+    pub imr_interface: in_addr,
+}
+
+#[repr(C)]
+pub struct ipv6_mreq {
+    pub ipv6mr_multiaddr: in6_addr,
+    pub ipv6mr_interface: c_uint,
+}
 
 pub const VOLUME_NAME_DOS: DWORD = 0x0;
 pub const MOVEFILE_REPLACE_EXISTING: DWORD = 1;
@@ -263,6 +293,14 @@ pub const EXCEPTION_UNWIND: DWORD = EXCEPTION_UNWINDING |
                                     EXCEPTION_TARGET_UNWIND |
                                     EXCEPTION_COLLIDED_UNWIND;
 
+pub const PIPE_ACCESS_INBOUND: DWORD = 0x00000001;
+pub const FILE_FLAG_FIRST_PIPE_INSTANCE: DWORD = 0x00080000;
+pub const FILE_FLAG_OVERLAPPED: DWORD = 0x40000000;
+pub const PIPE_WAIT: DWORD = 0x00000000;
+pub const PIPE_TYPE_BYTE: DWORD = 0x00000000;
+pub const PIPE_REJECT_REMOTE_CLIENTS: DWORD = 0x00000008;
+pub const PIPE_READMODE_BYTE: DWORD = 0x00000000;
+
 #[repr(C)]
 #[cfg(target_arch = "x86")]
 pub struct WSADATA {
@@ -833,6 +871,7 @@ extern "system" {
                       lpProtocolInfo: LPWSAPROTOCOL_INFO,
                       g: GROUP,
                       dwFlags: DWORD) -> SOCKET;
+    pub fn ioctlsocket(s: SOCKET, cmd: c_long, argp: *mut c_ulong) -> c_int;
     pub fn InitializeCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
     pub fn EnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
     pub fn TryEnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION) -> BOOLEAN;
@@ -883,10 +922,6 @@ extern "system" {
                            nOutBufferSize: DWORD,
                            lpBytesReturned: LPDWORD,
                            lpOverlapped: LPOVERLAPPED) -> BOOL;
-    pub fn CreatePipe(hReadPipe: LPHANDLE,
-                      hWritePipe: LPHANDLE,
-                      lpPipeAttributes: LPSECURITY_ATTRIBUTES,
-                      nSize: DWORD) -> BOOL;
     pub fn CreateThread(lpThreadAttributes: LPSECURITY_ATTRIBUTES,
                         dwStackSize: SIZE_T,
                         lpStartAddress: extern "system" fn(*mut c_void)
@@ -1062,18 +1097,11 @@ extern "system" {
                        hints: *const ADDRINFOA,
                        res: *mut *mut ADDRINFOA) -> c_int;
     pub fn freeaddrinfo(res: *mut ADDRINFOA);
-    pub fn getnameinfo(sa: *const SOCKADDR, salen: c_int,
-                       host: *mut c_char, hostlen: DWORD,
-                       serv: *mut c_char, servlen: DWORD,
-                       flags: c_int) -> c_int;
 
     pub fn LoadLibraryW(name: LPCWSTR) -> HMODULE;
-    pub fn GetModuleHandleExW(dwFlags: DWORD, name: LPCWSTR,
-                              handle: *mut HMODULE) -> BOOL;
+    pub fn FreeLibrary(handle: HMODULE) -> BOOL;
     pub fn GetProcAddress(handle: HMODULE,
                           name: LPCSTR) -> *mut c_void;
-    pub fn FreeLibrary(handle: HMODULE) -> BOOL;
-    pub fn SetErrorMode(uMode: c_uint) -> c_uint;
     pub fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
     pub fn CryptAcquireContextA(phProv: *mut HCRYPTPROV,
                                 pszContainer: LPCSTR,
@@ -1099,6 +1127,29 @@ extern "system" {
                        OriginalContext: *const CONTEXT,
                        HistoryTable: *const UNWIND_HISTORY_TABLE);
     pub fn GetSystemTimeAsFileTime(lpSystemTimeAsFileTime: LPFILETIME);
+
+    pub fn CreateEventW(lpEventAttributes: LPSECURITY_ATTRIBUTES,
+                        bManualReset: BOOL,
+                        bInitialState: BOOL,
+                        lpName: LPCWSTR) -> HANDLE;
+    pub fn WaitForMultipleObjects(nCount: DWORD,
+                                  lpHandles: *const HANDLE,
+                                  bWaitAll: BOOL,
+                                  dwMilliseconds: DWORD) -> DWORD;
+    pub fn CreateNamedPipeW(lpName: LPCWSTR,
+                            dwOpenMode: DWORD,
+                            dwPipeMode: DWORD,
+                            nMaxInstances: DWORD,
+                            nOutBufferSize: DWORD,
+                            nInBufferSize: DWORD,
+                            nDefaultTimeOut: DWORD,
+                            lpSecurityAttributes: LPSECURITY_ATTRIBUTES)
+                            -> HANDLE;
+    pub fn CancelIo(handle: HANDLE) -> BOOL;
+    pub fn GetOverlappedResult(hFile: HANDLE,
+                               lpOverlapped: LPOVERLAPPED,
+                               lpNumberOfBytesTransferred: LPDWORD,
+                               bWait: BOOL) -> BOOL;
 }
 
 // Functions that aren't available on Windows XP, but we still use them and just
@@ -1117,10 +1168,6 @@ compat_fn! {
                                      _dwFlags: DWORD) -> DWORD {
         SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0
     }
-    pub fn SetThreadErrorMode(_dwNewMode: DWORD,
-                              _lpOldMode: *mut DWORD) -> c_uint {
-        SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0
-    }
     pub fn SetThreadStackGuarantee(_size: *mut c_ulong) -> BOOL {
         SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0
     }
diff --git a/src/libstd/sys/windows/dynamic_lib.rs b/src/libstd/sys/windows/dynamic_lib.rs
new file mode 100644 (file)
index 0000000..dde13ec
--- /dev/null
@@ -0,0 +1,55 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use prelude::v1::*;
+use os::windows::prelude::*;
+
+use ffi::{CString, OsStr};
+use io;
+use sys::c;
+
+pub struct DynamicLibrary {
+    handle: c::HMODULE,
+}
+
+impl DynamicLibrary {
+    pub fn open(filename: &str) -> io::Result<DynamicLibrary> {
+        let filename = OsStr::new(filename)
+                             .encode_wide()
+                             .chain(Some(0))
+                             .collect::<Vec<_>>();
+        let result = unsafe {
+            c::LoadLibraryW(filename.as_ptr())
+        };
+        if result.is_null() {
+            Err(io::Error::last_os_error())
+        } else {
+            Ok(DynamicLibrary { handle: result })
+        }
+    }
+
+    pub fn symbol(&self, symbol: &str) -> io::Result<usize> {
+        let symbol = CString::new(symbol)?;
+        unsafe {
+            match c::GetProcAddress(self.handle, symbol.as_ptr()) as usize {
+                0 => Err(io::Error::last_os_error()),
+                n => Ok(n),
+            }
+        }
+    }
+}
+
+impl Drop for DynamicLibrary {
+    fn drop(&mut self) {
+        unsafe {
+            c::FreeLibrary(self.handle);
+        }
+    }
+}
index 3a07204b2bcf80f43cd514ec6b35f3b50ffabe11..36b3a3d4bdec8570f839bf1a580276f1c16973b7 100644 (file)
 
 //! Extensions to `std::thread` for Windows.
 
-#![unstable(feature = "thread_extensions", issue = "29791")]
+#![stable(feature = "thread_extensions", since = "1.9.0")]
 
 use os::windows::io::{RawHandle, AsRawHandle, IntoRawHandle};
 use thread;
 use sys_common::{AsInner, IntoInner};
 
+#[stable(feature = "thread_extensions", since = "1.9.0")]
 impl<T> AsRawHandle for thread::JoinHandle<T> {
     fn as_raw_handle(&self) -> RawHandle {
         self.as_inner().handle().raw() as *mut _
     }
 }
 
+#[stable(feature = "thread_extensions", since = "1.9.0")]
 impl<T> IntoRawHandle for thread::JoinHandle<T>  {
     fn into_raw_handle(self) -> RawHandle {
         self.into_inner().into_handle().into_raw() as *mut _
index 95fb1e7c60052c27d2355153a9178a11a6d40cd7..529e42248f6a2c47b7fb8f8c1f608b746373785d 100644 (file)
@@ -8,6 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use prelude::v1::*;
 use io::prelude::*;
 use os::windows::prelude::*;
 
@@ -247,13 +248,13 @@ impl OpenOptions {
 
 impl File {
     pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
-        let path = try!(to_u16s(path));
+        let path = to_u16s(path)?;
         let handle = unsafe {
             c::CreateFileW(path.as_ptr(),
-                           try!(opts.get_access_mode()),
+                           opts.get_access_mode()?,
                            opts.share_mode,
                            opts.security_attributes as *mut _,
-                           try!(opts.get_creation_mode()),
+                           opts.get_creation_mode()?,
                            opts.get_flags_and_attributes(),
                            ptr::null_mut())
         };
@@ -265,7 +266,7 @@ impl File {
     }
 
     pub fn fsync(&self) -> io::Result<()> {
-        try!(cvt(unsafe { c::FlushFileBuffers(self.handle.raw()) }));
+        cvt(unsafe { c::FlushFileBuffers(self.handle.raw()) })?;
         Ok(())
     }
 
@@ -276,20 +277,20 @@ impl File {
             EndOfFile: size as c::LARGE_INTEGER,
         };
         let size = mem::size_of_val(&info);
-        try!(cvt(unsafe {
+        cvt(unsafe {
             c::SetFileInformationByHandle(self.handle.raw(),
                                           c::FileEndOfFileInfo,
                                           &mut info as *mut _ as *mut _,
                                           size as c::DWORD)
-        }));
+        })?;
         Ok(())
     }
 
     pub fn file_attr(&self) -> io::Result<FileAttr> {
         unsafe {
             let mut info: c::BY_HANDLE_FILE_INFORMATION = mem::zeroed();
-            try!(cvt(c::GetFileInformationByHandle(self.handle.raw(),
-                                                   &mut info)));
+            cvt(c::GetFileInformationByHandle(self.handle.raw(),
+                                              &mut info))?;
             let mut attr = FileAttr {
                 attributes: info.dwFileAttributes,
                 creation_time: info.ftCreationTime,
@@ -312,6 +313,10 @@ impl File {
         self.handle.read(buf)
     }
 
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.handle.read_to_end(buf)
+    }
+
     pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
         self.handle.write(buf)
     }
@@ -326,16 +331,16 @@ impl File {
         };
         let pos = pos as c::LARGE_INTEGER;
         let mut newpos = 0;
-        try!(cvt(unsafe {
+        cvt(unsafe {
             c::SetFilePointerEx(self.handle.raw(), pos,
                                 &mut newpos, whence)
-        }));
+        })?;
         Ok(newpos as u64)
     }
 
     pub fn duplicate(&self) -> io::Result<File> {
         Ok(File {
-            handle: try!(self.handle.duplicate(0, true, c::DUPLICATE_SAME_ACCESS)),
+            handle: self.handle.duplicate(0, true, c::DUPLICATE_SAME_ACCESS)?,
         })
     }
 
@@ -348,7 +353,7 @@ impl File {
                          -> io::Result<(c::DWORD, &'a c::REPARSE_DATA_BUFFER)> {
         unsafe {
             let mut bytes = 0;
-            try!(cvt({
+            cvt({
                 c::DeviceIoControl(self.handle.raw(),
                                    c::FSCTL_GET_REPARSE_POINT,
                                    ptr::null_mut(),
@@ -357,14 +362,14 @@ impl File {
                                    space.len() as c::DWORD,
                                    &mut bytes,
                                    ptr::null_mut())
-            }));
+            })?;
             Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER)))
         }
     }
 
     fn readlink(&self) -> io::Result<PathBuf> {
         let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
-        let (_bytes, buf) = try!(self.reparse_point(&mut space));
+        let (_bytes, buf) = self.reparse_point(&mut space)?;
         unsafe {
             let (path_buffer, subst_off, subst_len, relative) = match buf.ReparseTag {
                 c::IO_REPARSE_TAG_SYMLINK => {
@@ -511,10 +516,10 @@ impl DirBuilder {
     pub fn new() -> DirBuilder { DirBuilder }
 
     pub fn mkdir(&self, p: &Path) -> io::Result<()> {
-        let p = try!(to_u16s(p));
-        try!(cvt(unsafe {
+        let p = to_u16s(p)?;
+        cvt(unsafe {
             c::CreateDirectoryW(p.as_ptr(), ptr::null_mut())
-        }));
+        })?;
         Ok(())
     }
 }
@@ -522,7 +527,7 @@ impl DirBuilder {
 pub fn readdir(p: &Path) -> io::Result<ReadDir> {
     let root = p.to_path_buf();
     let star = p.join("*");
-    let path = try!(to_u16s(&star));
+    let path = to_u16s(&star)?;
 
     unsafe {
         let mut wfd = mem::zeroed();
@@ -540,28 +545,28 @@ pub fn readdir(p: &Path) -> io::Result<ReadDir> {
 }
 
 pub fn unlink(p: &Path) -> io::Result<()> {
-    let p_u16s = try!(to_u16s(p));
-    try!(cvt(unsafe { c::DeleteFileW(p_u16s.as_ptr()) }));
+    let p_u16s = to_u16s(p)?;
+    cvt(unsafe { c::DeleteFileW(p_u16s.as_ptr()) })?;
     Ok(())
 }
 
 pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
-    let old = try!(to_u16s(old));
-    let new = try!(to_u16s(new));
-    try!(cvt(unsafe {
+    let old = to_u16s(old)?;
+    let new = to_u16s(new)?;
+    cvt(unsafe {
         c::MoveFileExW(old.as_ptr(), new.as_ptr(), c::MOVEFILE_REPLACE_EXISTING)
-    }));
+    })?;
     Ok(())
 }
 
 pub fn rmdir(p: &Path) -> io::Result<()> {
-    let p = try!(to_u16s(p));
-    try!(cvt(unsafe { c::RemoveDirectoryW(p.as_ptr()) }));
+    let p = to_u16s(p)?;
+    cvt(unsafe { c::RemoveDirectoryW(p.as_ptr()) })?;
     Ok(())
 }
 
 pub fn remove_dir_all(path: &Path) -> io::Result<()> {
-    let filetype = try!(lstat(path)).file_type();
+    let filetype = lstat(path)?.file_type();
     if filetype.is_symlink() {
         // On Windows symlinks to files and directories are removed differently.
         // rmdir only deletes dir symlinks and junctions, not file symlinks.
@@ -572,15 +577,15 @@ pub fn remove_dir_all(path: &Path) -> io::Result<()> {
 }
 
 fn remove_dir_all_recursive(path: &Path) -> io::Result<()> {
-    for child in try!(readdir(path)) {
-        let child = try!(child);
-        let child_type = try!(child.file_type());
+    for child in readdir(path)? {
+        let child = child?;
+        let child_type = child.file_type()?;
         if child_type.is_dir() {
-            try!(remove_dir_all_recursive(&child.path()));
+            remove_dir_all_recursive(&child.path())?;
         } else if child_type.is_symlink_dir() {
-            try!(rmdir(&child.path()));
+            rmdir(&child.path())?;
         } else {
-            try!(unlink(&child.path()));
+            unlink(&child.path())?;
         }
     }
     rmdir(path)
@@ -594,7 +599,7 @@ pub fn readlink(path: &Path) -> io::Result<PathBuf> {
     opts.access_mode(0);
     opts.custom_flags(c::FILE_FLAG_OPEN_REPARSE_POINT |
                       c::FILE_FLAG_BACKUP_SEMANTICS);
-    let file = try!(File::open(&path, &opts));
+    let file = File::open(&path, &opts)?;
     file.readlink()
 }
 
@@ -603,21 +608,21 @@ pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
 }
 
 pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> {
-    let src = try!(to_u16s(src));
-    let dst = try!(to_u16s(dst));
+    let src = to_u16s(src)?;
+    let dst = to_u16s(dst)?;
     let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 };
-    try!(cvt(unsafe {
+    cvt(unsafe {
         c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as c::BOOL
-    }));
+    })?;
     Ok(())
 }
 
 pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
-    let src = try!(to_u16s(src));
-    let dst = try!(to_u16s(dst));
-    try!(cvt(unsafe {
+    let src = to_u16s(src)?;
+    let dst = to_u16s(dst)?;
+    cvt(unsafe {
         c::CreateHardLinkW(dst.as_ptr(), src.as_ptr(), ptr::null_mut())
-    }));
+    })?;
     Ok(())
 }
 
@@ -627,7 +632,7 @@ pub fn stat(path: &Path) -> io::Result<FileAttr> {
     opts.access_mode(0);
     // This flag is so we can open directories too
     opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS);
-    let file = try!(File::open(path, &opts));
+    let file = File::open(path, &opts)?;
     file.file_attr()
 }
 
@@ -636,14 +641,14 @@ pub fn lstat(path: &Path) -> io::Result<FileAttr> {
     // No read or write permissions are necessary
     opts.access_mode(0);
     opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS | c::FILE_FLAG_OPEN_REPARSE_POINT);
-    let file = try!(File::open(path, &opts));
+    let file = File::open(path, &opts)?;
     file.file_attr()
 }
 
 pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
-    let p = try!(to_u16s(p));
+    let p = to_u16s(p)?;
     unsafe {
-        try!(cvt(c::SetFileAttributesW(p.as_ptr(), perm.attrs)));
+        cvt(c::SetFileAttributesW(p.as_ptr(), perm.attrs))?;
         Ok(())
     }
 }
@@ -663,7 +668,7 @@ pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
     opts.access_mode(0);
     // This flag is so we can open directories too
     opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS);
-    let f = try!(File::open(p, &opts));
+    let f = File::open(p, &opts)?;
     get_path(&f)
 }
 
@@ -682,13 +687,13 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
         *(lpData as *mut i64) = TotalBytesTransferred;
         c::PROGRESS_CONTINUE
     }
-    let pfrom = try!(to_u16s(from));
-    let pto = try!(to_u16s(to));
+    let pfrom = to_u16s(from)?;
+    let pto = to_u16s(to)?;
     let mut size = 0i64;
-    try!(cvt(unsafe {
+    cvt(unsafe {
         c::CopyFileExW(pfrom.as_ptr(), pto.as_ptr(), Some(callback),
                        &mut size as *mut _ as *mut _, ptr::null_mut(), 0)
-    }));
+    })?;
     Ok(size as u64)
 }
 
@@ -705,13 +710,13 @@ pub fn symlink_junction<P: AsRef<Path>, Q: AsRef<Path>>(src: P, dst: Q) -> io::R
 #[allow(dead_code)]
 fn symlink_junction_inner(target: &Path, junction: &Path) -> io::Result<()> {
     let d = DirBuilder::new();
-    try!(d.mkdir(&junction));
+    d.mkdir(&junction)?;
 
     let mut opts = OpenOptions::new();
     opts.write(true);
     opts.custom_flags(c::FILE_FLAG_OPEN_REPARSE_POINT |
                       c::FILE_FLAG_BACKUP_SEMANTICS);
-    let f = try!(File::open(junction, &opts));
+    let f = File::open(junction, &opts)?;
     let h = f.handle().raw();
 
     unsafe {
index 47676a927f65877ab02e386ace0fc98e974b46ea..74546bb893bea8bbde6cc36fc802b9cbb7d71cfb 100644 (file)
@@ -8,14 +8,19 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![unstable(issue = "0", feature = "windows_handle")]
+
+use prelude::v1::*;
+
 use cmp;
-use io::ErrorKind;
+use io::{ErrorKind, Read};
 use io;
 use mem;
 use ops::Deref;
 use ptr;
 use sys::c;
 use sys::cvt;
+use sys_common::io::read_to_end_uninitialized;
 use u32;
 
 /// An owned container for `HANDLE` object, closing them on Drop.
@@ -39,6 +44,20 @@ impl Handle {
         Handle(RawHandle::new(handle))
     }
 
+    pub fn new_event(manual: bool, init: bool) -> io::Result<Handle> {
+        unsafe {
+            let event = c::CreateEventW(0 as *mut _,
+                                        manual as c::BOOL,
+                                        init as c::BOOL,
+                                        0 as *const _);
+            if event.is_null() {
+                Err(io::Error::last_os_error())
+            } else {
+                Ok(Handle::new(event))
+            }
+        }
+    }
+
     pub fn into_raw(self) -> c::HANDLE {
         let ret = self.raw();
         mem::forget(self);
@@ -87,27 +106,95 @@ impl RawHandle {
         }
     }
 
+    pub unsafe fn read_overlapped(&self,
+                                  buf: &mut [u8],
+                                  overlapped: *mut c::OVERLAPPED)
+                                  -> io::Result<Option<usize>> {
+        let len = cmp::min(buf.len(), <c::DWORD>::max_value() as usize) as c::DWORD;
+        let mut amt = 0;
+        let res = cvt({
+            c::ReadFile(self.0, buf.as_ptr() as c::LPVOID,
+                        len, &mut amt, overlapped)
+        });
+        match res {
+            Ok(_) => Ok(Some(amt as usize)),
+            Err(e) => {
+                if e.raw_os_error() == Some(c::ERROR_IO_PENDING as i32) {
+                    Ok(None)
+                } else if e.raw_os_error() == Some(c::ERROR_BROKEN_PIPE as i32) {
+                    Ok(Some(0))
+                } else {
+                    Err(e)
+                }
+            }
+        }
+    }
+
+    pub fn overlapped_result(&self,
+                             overlapped: *mut c::OVERLAPPED,
+                             wait: bool) -> io::Result<usize> {
+        unsafe {
+            let mut bytes = 0;
+            let wait = if wait {c::TRUE} else {c::FALSE};
+            let res = cvt({
+                c::GetOverlappedResult(self.raw(), overlapped, &mut bytes, wait)
+            });
+            match res {
+                Ok(_) => Ok(bytes as usize),
+                Err(e) => {
+                    if e.raw_os_error() == Some(c::ERROR_HANDLE_EOF as i32) ||
+                       e.raw_os_error() == Some(c::ERROR_BROKEN_PIPE as i32) {
+                        Ok(0)
+                    } else {
+                        Err(e)
+                    }
+                }
+            }
+        }
+    }
+
+    pub fn cancel_io(&self) -> io::Result<()> {
+        unsafe {
+            cvt(c::CancelIo(self.raw())).map(|_| ())
+        }
+    }
+
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        let mut me = self;
+        (&mut me).read_to_end(buf)
+    }
+
     pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
         let mut amt = 0;
         // WriteFile takes a DWORD (u32) for the length so it only supports
         // writing u32::MAX bytes at a time.
         let len = cmp::min(buf.len(), u32::MAX as usize) as c::DWORD;
-        try!(cvt(unsafe {
+        cvt(unsafe {
             c::WriteFile(self.0, buf.as_ptr() as c::LPVOID,
                          len, &mut amt, ptr::null_mut())
-        }));
+        })?;
         Ok(amt as usize)
     }
 
     pub fn duplicate(&self, access: c::DWORD, inherit: bool,
                      options: c::DWORD) -> io::Result<Handle> {
         let mut ret = 0 as c::HANDLE;
-        try!(cvt(unsafe {
+        cvt(unsafe {
             let cur_proc = c::GetCurrentProcess();
             c::DuplicateHandle(cur_proc, self.0, cur_proc, &mut ret,
                             access, inherit as c::BOOL,
                             options)
-        }));
+        })?;
         Ok(Handle::new(ret))
     }
 }
+
+impl<'a> Read for &'a RawHandle {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        (**self).read(buf)
+    }
+
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        unsafe { read_to_end_uninitialized(self, buf) }
+    }
+}
index 765e6e09427641cc127e38fc30a02409be033bed..384940e4dc446edbe0f0101378000b60a431727b 100644 (file)
@@ -24,6 +24,7 @@ use time::Duration;
 pub mod backtrace;
 pub mod c;
 pub mod condvar;
+pub mod dynamic_lib;
 pub mod ext;
 pub mod fs;
 pub mod handle;
index 49ba8e9c65990570c4192be868b7500cb8257b3c..b05dcf42a3324636d3a5a8ae1acb3a9a6e5c8789 100644 (file)
@@ -8,9 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![unstable(issue = "0", feature = "windows_net")]
+
+use prelude::v1::*;
+
 use cmp;
-use io;
-use libc::{c_int, c_void};
+use io::{self, Read};
+use libc::{c_int, c_void, c_ulong};
 use mem;
 use net::{SocketAddr, Shutdown};
 use num::One;
@@ -20,6 +24,7 @@ use sync::Once;
 use sys::c;
 use sys;
 use sys_common::{self, AsInner, FromInner, IntoInner};
+use sys_common::io::read_to_end_uninitialized;
 use sys_common::net;
 use time::Duration;
 
@@ -56,27 +61,28 @@ fn last_error() -> io::Error {
 }
 
 /// Checks if the signed integer is the Windows constant `SOCKET_ERROR` (-1)
-/// and if so, returns the last error from the Windows socket interface. This
+/// and if so, returns the last error from the Windows socket interface. This
 /// function must be called before another call to the socket API is made.
-pub fn cvt<T: One + Neg<Output=T> + PartialEq>(t: T) -> io::Result<T> {
-    let one: T = T::one();
-    if t == -one {
+pub fn cvt<T: One + PartialEq + Neg<Output=T>>(t: T) -> io::Result<T> {
+    if t == -T::one() {
         Err(last_error())
     } else {
         Ok(t)
     }
 }
 
-/// Provides the functionality of `cvt` for the return values of `getaddrinfo`
-/// and similar, meaning that they return an error if the return value is 0.
+/// A variant of `cvt` for `getaddrinfo` which return 0 for a success.
 pub fn cvt_gai(err: c_int) -> io::Result<()> {
-    if err == 0 { return Ok(()) }
-    cvt(err).map(|_| ())
+    if err == 0 {
+        Ok(())
+    } else {
+        Err(last_error())
+    }
 }
 
-/// Provides the functionality of `cvt` for a closure.
+/// Just to provide the same interface as sys/unix/net.rs
 pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
-    where F: FnMut() -> T, T: One + Neg<Output=T> + PartialEq
+    where T: One + PartialEq + Neg<Output=T>, F: FnMut() -> T
 {
     cvt(f())
 }
@@ -87,35 +93,35 @@ impl Socket {
             SocketAddr::V4(..) => c::AF_INET,
             SocketAddr::V6(..) => c::AF_INET6,
         };
-        let socket = try!(unsafe {
+        let socket = unsafe {
             match c::WSASocketW(fam, ty, 0, ptr::null_mut(), 0,
                                 c::WSA_FLAG_OVERLAPPED) {
                 c::INVALID_SOCKET => Err(last_error()),
                 n => Ok(Socket(n)),
             }
-        });
-        try!(socket.set_no_inherit());
+        }?;
+        socket.set_no_inherit()?;
         Ok(socket)
     }
 
     pub fn accept(&self, storage: *mut c::SOCKADDR,
                   len: *mut c_int) -> io::Result<Socket> {
-        let socket = try!(unsafe {
+        let socket = unsafe {
             match c::accept(self.0, storage, len) {
                 c::INVALID_SOCKET => Err(last_error()),
                 n => Ok(Socket(n)),
             }
-        });
-        try!(socket.set_no_inherit());
+        }?;
+        socket.set_no_inherit()?;
         Ok(socket)
     }
 
     pub fn duplicate(&self) -> io::Result<Socket> {
-        let socket = try!(unsafe {
+        let socket = unsafe {
             let mut info: c::WSAPROTOCOL_INFO = mem::zeroed();
-            try!(cvt(c::WSADuplicateSocketW(self.0,
+            cvt(c::WSADuplicateSocketW(self.0,
                                             c::GetCurrentProcessId(),
-                                            &mut info)));
+                                            &mut info))?;
             match c::WSASocketW(info.iAddressFamily,
                                 info.iSocketType,
                                 info.iProtocol,
@@ -124,8 +130,8 @@ impl Socket {
                 c::INVALID_SOCKET => Err(last_error()),
                 n => Ok(Socket(n)),
             }
-        });
-        try!(socket.set_no_inherit());
+        }?;
+        socket.set_no_inherit()?;
         Ok(socket)
     }
 
@@ -142,6 +148,11 @@ impl Socket {
         }
     }
 
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        let mut me = self;
+        (&mut me).read_to_end(buf)
+    }
+
     pub fn set_timeout(&self, dur: Option<Duration>,
                        kind: c_int) -> io::Result<()> {
         let timeout = match dur {
@@ -159,7 +170,7 @@ impl Socket {
     }
 
     pub fn timeout(&self, kind: c_int) -> io::Result<Option<Duration>> {
-        let raw: c::DWORD = try!(net::getsockopt(self, c::SOL_SOCKET, kind));
+        let raw: c::DWORD = net::getsockopt(self, c::SOL_SOCKET, kind)?;
         if raw == 0 {
             Ok(None)
         } else {
@@ -182,9 +193,48 @@ impl Socket {
             Shutdown::Read => c::SD_RECEIVE,
             Shutdown::Both => c::SD_BOTH,
         };
-        try!(cvt(unsafe { c::shutdown(self.0, how) }));
+        cvt(unsafe { c::shutdown(self.0, how) })?;
         Ok(())
     }
+
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        let mut nonblocking = nonblocking as c_ulong;
+        let r = unsafe { c::ioctlsocket(self.0, c::FIONBIO as c_int, &mut nonblocking) };
+        if r == 0 {
+            Ok(())
+        } else {
+            Err(io::Error::last_os_error())
+        }
+    }
+
+    pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+        net::setsockopt(self, c::IPPROTO_TCP, c::TCP_NODELAY, nodelay as c::BYTE)
+    }
+
+    pub fn nodelay(&self) -> io::Result<bool> {
+        let raw: c::BYTE = net::getsockopt(self, c::IPPROTO_TCP, c::TCP_NODELAY)?;
+        Ok(raw != 0)
+    }
+
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        let raw: c_int = net::getsockopt(self, c::SOL_SOCKET, c::SO_ERROR)?;
+        if raw == 0 {
+            Ok(None)
+        } else {
+            Ok(Some(io::Error::from_raw_os_error(raw as i32)))
+        }
+    }
+}
+
+#[unstable(reason = "not public", issue = "0", feature = "fd_read")]
+impl<'a> Read for &'a Socket {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        (**self).read(buf)
+    }
+
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        unsafe { read_to_end_uninitialized(self, buf) }
+    }
 }
 
 impl Drop for Socket {
index 688475a756574919f390a321032746ec4dbd47b1..32ca32e76cb626aee457870c00020279bf5d5127 100644 (file)
@@ -239,7 +239,7 @@ pub fn chdir(p: &path::Path) -> io::Result<()> {
 }
 
 pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> {
-    let k = try!(to_u16s(k));
+    let k = to_u16s(k)?;
     let res = super::fill_utf16_buf(|buf, sz| unsafe {
         c::GetEnvironmentVariableW(k.as_ptr(), buf, sz)
     }, |buf| {
@@ -258,8 +258,8 @@ pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> {
 }
 
 pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
-    let k = try!(to_u16s(k));
-    let v = try!(to_u16s(v));
+    let k = to_u16s(k)?;
+    let v = to_u16s(v)?;
 
     cvt(unsafe {
         c::SetEnvironmentVariableW(k.as_ptr(), v.as_ptr())
@@ -267,7 +267,7 @@ pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
 }
 
 pub fn unsetenv(n: &OsStr) -> io::Result<()> {
-    let v = try!(to_u16s(n));
+    let v = to_u16s(n)?;
     cvt(unsafe {
         c::SetEnvironmentVariableW(v.as_ptr(), ptr::null())
     }).map(|_| ())
index aec41885f3b879123c5f624bade4e09f80ce7b03..8631a63d653a3af38843f51e1f3b6e317b176891 100644 (file)
@@ -8,10 +8,17 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use prelude::v1::*;
+use os::windows::prelude::*;
+
+use ffi::OsStr;
+use path::Path;
 use io;
-use ptr;
-use sys::cvt;
+use mem;
+use rand::{self, Rng};
+use slice;
 use sys::c;
+use sys::fs::{File, OpenOptions};
 use sys::handle::Handle;
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -23,14 +30,76 @@ pub struct AnonPipe {
 }
 
 pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
-    let mut reader = c::INVALID_HANDLE_VALUE;
-    let mut writer = c::INVALID_HANDLE_VALUE;
-    try!(cvt(unsafe {
-        c::CreatePipe(&mut reader, &mut writer, ptr::null_mut(), 0)
-    }));
-    let reader = Handle::new(reader);
-    let writer = Handle::new(writer);
-    Ok((AnonPipe { inner: reader }, AnonPipe { inner: writer }))
+    // Note that we specifically do *not* use `CreatePipe` here because
+    // unfortunately the anonymous pipes returned do not support overlapped
+    // operations.
+    //
+    // Instead, we create a "hopefully unique" name and create a named pipe
+    // which has overlapped operations enabled.
+    //
+    // Once we do this, we connect do it as usual via `CreateFileW`, and then we
+    // return those reader/writer halves.
+    unsafe {
+        let reader;
+        let mut name;
+        let mut tries = 0;
+        loop {
+            tries += 1;
+            let key: u64 = rand::thread_rng().gen();
+            name = format!(r"\\.\pipe\__rust_anonymous_pipe1__.{}.{}",
+                           c::GetCurrentProcessId(),
+                           key);
+            let wide_name = OsStr::new(&name)
+                                  .encode_wide()
+                                  .chain(Some(0))
+                                  .collect::<Vec<_>>();
+
+            let handle = c::CreateNamedPipeW(wide_name.as_ptr(),
+                                             c::PIPE_ACCESS_INBOUND |
+                                              c::FILE_FLAG_FIRST_PIPE_INSTANCE |
+                                              c::FILE_FLAG_OVERLAPPED,
+                                             c::PIPE_TYPE_BYTE |
+                                              c::PIPE_READMODE_BYTE |
+                                              c::PIPE_WAIT |
+                                              c::PIPE_REJECT_REMOTE_CLIENTS,
+                                             1,
+                                             4096,
+                                             4096,
+                                             0,
+                                             0 as *mut _);
+
+            // We pass the FILE_FLAG_FIRST_PIPE_INSTANCE flag above, and we're
+            // also just doing a best effort at selecting a unique name. If
+            // ERROR_ACCESS_DENIED is returned then it could mean that we
+            // accidentally conflicted with an already existing pipe, so we try
+            // again.
+            //
+            // Don't try again too much though as this could also perhaps be a
+            // legit error.
+            if handle == c::INVALID_HANDLE_VALUE {
+                let err = io::Error::last_os_error();
+                if tries < 10 &&
+                   err.raw_os_error() == Some(c::ERROR_ACCESS_DENIED as i32) {
+                    continue
+                }
+                return Err(err)
+            }
+            reader = Handle::new(handle);
+            break
+        }
+
+        // Connect to the named pipe we just created in write-only mode (also
+        // overlapped for async I/O below).
+        let mut opts = OpenOptions::new();
+        opts.write(true);
+        opts.read(false);
+        opts.share_mode(0);
+        opts.attributes(c::FILE_FLAG_OVERLAPPED);
+        let writer = File::open(Path::new(&name), &opts)?;
+        let writer = AnonPipe { inner: writer.into_handle() };
+
+        Ok((AnonPipe { inner: reader }, AnonPipe { inner: writer.into_handle() }))
+    }
 }
 
 impl AnonPipe {
@@ -41,7 +110,193 @@ impl AnonPipe {
         self.inner.read(buf)
     }
 
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        self.inner.read_to_end(buf)
+    }
+
     pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
         self.inner.write(buf)
     }
 }
+
+pub fn read2(p1: AnonPipe,
+             v1: &mut Vec<u8>,
+             p2: AnonPipe,
+             v2: &mut Vec<u8>) -> io::Result<()> {
+    let p1 = p1.into_handle();
+    let p2 = p2.into_handle();
+
+    let mut p1 = AsyncPipe::new(p1, v1)?;
+    let mut p2 = AsyncPipe::new(p2, v2)?;
+    let objs = [p1.event.raw(), p2.event.raw()];
+
+    // In a loop we wait for either pipe's scheduled read operation to complete.
+    // If the operation completes with 0 bytes, that means EOF was reached, in
+    // which case we just finish out the other pipe entirely.
+    //
+    // Note that overlapped I/O is in general super unsafe because we have to
+    // be careful to ensure that all pointers in play are valid for the entire
+    // duration of the I/O operation (where tons of operations can also fail).
+    // The destructor for `AsyncPipe` ends up taking care of most of this.
+    loop {
+        let res = unsafe {
+            c::WaitForMultipleObjects(2, objs.as_ptr(), c::FALSE, c::INFINITE)
+        };
+        if res == c::WAIT_OBJECT_0 {
+            if !p1.result()? || !p1.schedule_read()? {
+                return p2.finish()
+            }
+        } else if res == c::WAIT_OBJECT_0 + 1 {
+            if !p2.result()? || !p2.schedule_read()? {
+                return p1.finish()
+            }
+        } else {
+            return Err(io::Error::last_os_error())
+        }
+    }
+}
+
+struct AsyncPipe<'a> {
+    pipe: Handle,
+    event: Handle,
+    overlapped: Box<c::OVERLAPPED>, // needs a stable address
+    dst: &'a mut Vec<u8>,
+    state: State,
+}
+
+#[derive(PartialEq, Debug)]
+enum State {
+    NotReading,
+    Reading,
+    Read(usize),
+}
+
+impl<'a> AsyncPipe<'a> {
+    fn new(pipe: Handle, dst: &'a mut Vec<u8>) -> io::Result<AsyncPipe<'a>> {
+        // Create an event which we'll use to coordinate our overlapped
+        // opreations, this event will be used in WaitForMultipleObjects
+        // and passed as part of the OVERLAPPED handle.
+        //
+        // Note that we do a somewhat clever thing here by flagging the
+        // event as being manually reset and setting it initially to the
+        // signaled state. This means that we'll naturally fall through the
+        // WaitForMultipleObjects call above for pipes created initially,
+        // and the only time an even will go back to "unset" will be once an
+        // I/O operation is successfully scheduled (what we want).
+        let event = Handle::new_event(true, true)?;
+        let mut overlapped: Box<c::OVERLAPPED> = unsafe {
+            Box::new(mem::zeroed())
+        };
+        overlapped.hEvent = event.raw();
+        Ok(AsyncPipe {
+            pipe: pipe,
+            overlapped: overlapped,
+            event: event,
+            dst: dst,
+            state: State::NotReading,
+        })
+    }
+
+    /// Executes an overlapped read operation.
+    ///
+    /// Must not currently be reading, and returns whether the pipe is currently
+    /// at EOF or not. If the pipe is not at EOF then `result()` must be called
+    /// to complete the read later on (may block), but if the pipe is at EOF
+    /// then `result()` should not be called as it will just block forever.
+    fn schedule_read(&mut self) -> io::Result<bool> {
+        assert_eq!(self.state, State::NotReading);
+        let amt = unsafe {
+            let slice = slice_to_end(self.dst);
+            self.pipe.read_overlapped(slice, &mut *self.overlapped)?
+        };
+
+        // If this read finished immediately then our overlapped event will
+        // remain signaled (it was signaled coming in here) and we'll progress
+        // down to the method below.
+        //
+        // Otherwise the I/O operation is scheduled and the system set our event
+        // to not signaled, so we flag ourselves into the reading state and move
+        // on.
+        self.state = match amt {
+            Some(0) => return Ok(false),
+            Some(amt) => State::Read(amt),
+            None => State::Reading,
+        };
+        Ok(true)
+    }
+
+    /// Wait for the result of the overlapped operation previously executed.
+    ///
+    /// Takes a parameter `wait` which indicates if this pipe is currently being
+    /// read whether the function should block waiting for the read to complete.
+    ///
+    /// Return values:
+    ///
+    /// * `true` - finished any pending read and the pipe is not at EOF (keep
+    ///            going)
+    /// * `false` - finished any pending read and pipe is at EOF (stop issuing
+    ///             reads)
+    fn result(&mut self) -> io::Result<bool> {
+        let amt = match self.state {
+            State::NotReading => return Ok(true),
+            State::Reading => {
+                self.pipe.overlapped_result(&mut *self.overlapped, true)?
+            }
+            State::Read(amt) => amt,
+        };
+        self.state = State::NotReading;
+        unsafe {
+            let len = self.dst.len();
+            self.dst.set_len(len + amt);
+        }
+        Ok(amt != 0)
+    }
+
+    /// Finishes out reading this pipe entirely.
+    ///
+    /// Waits for any pending and schedule read, and then calls `read_to_end`
+    /// if necessary to read all the remaining information.
+    fn finish(&mut self) -> io::Result<()> {
+        while self.result()? && self.schedule_read()? {
+            // ...
+        }
+        Ok(())
+    }
+}
+
+impl<'a> Drop for AsyncPipe<'a> {
+    fn drop(&mut self) {
+        match self.state {
+            State::Reading => {}
+            _ => return,
+        }
+
+        // If we have a pending read operation, then we have to make sure that
+        // it's *done* before we actually drop this type. The kernel requires
+        // that the `OVERLAPPED` and buffer pointers are valid for the entire
+        // I/O operation.
+        //
+        // To do that, we call `CancelIo` to cancel any pending operation, and
+        // if that succeeds we wait for the overlapped result.
+        //
+        // If anything here fails, there's not really much we can do, so we leak
+        // the buffer/OVERLAPPED pointers to ensure we're at least memory safe.
+        if self.pipe.cancel_io().is_err() || self.result().is_err() {
+            let buf = mem::replace(self.dst, Vec::new());
+            let overlapped = Box::new(unsafe { mem::zeroed() });
+            let overlapped = mem::replace(&mut self.overlapped, overlapped);
+            mem::forget((buf, overlapped));
+        }
+    }
+}
+
+unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
+    if v.capacity() == 0 {
+        v.reserve(16);
+    }
+    if v.capacity() == v.len() {
+        v.reserve(1);
+    }
+    slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
+                              v.capacity() - v.len())
+}
index c1367d5381da12c01e13526419a44e57dd50fd87..be2d5273c07283832abfbee5424f09482210cd70 100644 (file)
@@ -8,18 +8,19 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![allow(deprecated)]
-
-use dynamic_lib::DynamicLibrary;
 use io::prelude::*;
 use io;
-use sys::c;
 use libc::c_void;
-
+use sys::c;
+use sys::dynamic_lib::DynamicLibrary;
 use sys_common::gnu::libbacktrace;
 
-pub fn print(w: &mut Write, i: isize, addr: u64, _: &DynamicLibrary, _: c::HANDLE)
-        -> io::Result<()> {
+pub fn print(w: &mut Write,
+             i: isize,
+             addr: u64,
+             _process: c::HANDLE,
+             _dbghelp: &DynamicLibrary)
+              -> io::Result<()> {
     let addr = addr as usize as *mut c_void;
     libbacktrace::print(w, i, addr, addr)
 }
index 255c645c3fbf19d1a0d454724e861d04cee15f2c..9c29ac4082a6b2e3f0b40fdb3fcfcfcb8dffec9e 100644 (file)
@@ -8,60 +8,66 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![allow(deprecated)]
-
-use dynamic_lib::DynamicLibrary;
 use ffi::CStr;
 use io::prelude::*;
 use io;
 use libc::{c_ulong, c_int, c_char, c_void};
 use mem;
 use sys::c;
+use sys::dynamic_lib::DynamicLibrary;
 use sys_common::backtrace::{output, output_fileline};
 
 type SymFromAddrFn =
-    extern "system" fn(c::HANDLE, u64, *mut u64,
-                       *mut c::SYMBOL_INFO) -> c::BOOL;
+    unsafe extern "system" fn(c::HANDLE, u64, *mut u64,
+                              *mut c::SYMBOL_INFO) -> c::BOOL;
 type SymGetLineFromAddr64Fn =
-    extern "system" fn(c::HANDLE, u64, *mut u32,
-                       *mut c::IMAGEHLP_LINE64) -> c::BOOL;
+    unsafe extern "system" fn(c::HANDLE, u64, *mut u32,
+                              *mut c::IMAGEHLP_LINE64) -> c::BOOL;
 
-pub fn print(w: &mut Write, i: isize, addr: u64, dbghelp: &DynamicLibrary,
-             process: c::HANDLE) -> io::Result<()> {
-    let SymFromAddr = sym!(dbghelp, "SymFromAddr", SymFromAddrFn);
-    let SymGetLineFromAddr64 = sym!(dbghelp, "SymGetLineFromAddr64", SymGetLineFromAddr64Fn);
+pub fn print(w: &mut Write,
+             i: isize,
+             addr: u64,
+             process: c::HANDLE,
+             dbghelp: &DynamicLibrary)
+              -> io::Result<()> {
+    unsafe {
+        let SymFromAddr = sym!(dbghelp, "SymFromAddr", SymFromAddrFn);
+        let SymGetLineFromAddr64 = sym!(dbghelp,
+                                        "SymGetLineFromAddr64",
+                                        SymGetLineFromAddr64Fn);
 
-    let mut info: c::SYMBOL_INFO = unsafe { mem::zeroed() };
-    info.MaxNameLen = c::MAX_SYM_NAME as c_ulong;
-    // the struct size in C.  the value is different to
-    // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81)
-    // due to struct alignment.
-    info.SizeOfStruct = 88;
+        let mut info: c::SYMBOL_INFO = mem::zeroed();
+        info.MaxNameLen = c::MAX_SYM_NAME as c_ulong;
+        // the struct size in C.  the value is different to
+        // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81)
+        // due to struct alignment.
+        info.SizeOfStruct = 88;
 
-    let mut displacement = 0u64;
-    let ret = SymFromAddr(process, addr, &mut displacement, &mut info);
+        let mut displacement = 0u64;
+        let ret = SymFromAddr(process, addr, &mut displacement, &mut info);
 
-    let name = if ret == c::TRUE {
-        let ptr = info.Name.as_ptr() as *const c_char;
-        Some(unsafe { CStr::from_ptr(ptr).to_bytes() })
-    } else {
-        None
-    };
+        let name = if ret == c::TRUE {
+            let ptr = info.Name.as_ptr() as *const c_char;
+            Some(CStr::from_ptr(ptr).to_bytes())
+        } else {
+            None
+        };
 
-    try!(output(w, i, addr as usize as *mut c_void, name));
+        output(w, i, addr as usize as *mut c_void, name)?;
 
-    // Now find out the filename and line number
-    let mut line: c::IMAGEHLP_LINE64 = unsafe { mem::zeroed() };
-    line.SizeOfStruct = ::mem::size_of::<c::IMAGEHLP_LINE64>() as u32;
+        // Now find out the filename and line number
+        let mut line: c::IMAGEHLP_LINE64 = mem::zeroed();
+        line.SizeOfStruct = ::mem::size_of::<c::IMAGEHLP_LINE64>() as u32;
 
-    let mut displacement = 0u32;
-    let ret = SymGetLineFromAddr64(process, addr, &mut displacement, &mut line);
-    if ret == c::TRUE {
-        output_fileline(w,
-                        unsafe { CStr::from_ptr(line.Filename).to_bytes() },
-                        line.LineNumber as c_int,
-                        false)
-    } else {
-        Ok(())
+        let mut displacement = 0u32;
+        let ret = SymGetLineFromAddr64(process, addr, &mut displacement, &mut line);
+        if ret == c::TRUE {
+            output_fileline(w,
+                            CStr::from_ptr(line.Filename).to_bytes(),
+                            line.LineNumber as c_int,
+                            false)
+        } else {
+            Ok(())
+        }
     }
 }
index fa118be6fe6b19c8908fc61e7ffc6bd4032e19cb..f4957297581bb93f3ebbf0cd6747f3ac2ab09afa 100644 (file)
@@ -123,7 +123,7 @@ impl Command {
         self.stderr = Some(stderr);
     }
 
-    pub fn spawn(&mut self, default: Stdio)
+    pub fn spawn(&mut self, default: Stdio, needs_stdin: bool)
                  -> io::Result<(Process, StdioPipes)> {
         // To have the spawning semantics of unix/windows stay the same, we need
         // to read the *child's* PATH if one is provided. See #15149 for more
@@ -151,7 +151,7 @@ impl Command {
         si.dwFlags = c::STARTF_USESTDHANDLES;
 
         let program = program.as_ref().unwrap_or(&self.program);
-        let mut cmd_str = try!(make_command_line(program, &self.args));
+        let mut cmd_str = make_command_line(program, &self.args)?;
         cmd_str.push(0); // add null terminator
 
         // stolen from the libuv code.
@@ -160,8 +160,8 @@ impl Command {
             flags |= c::DETACHED_PROCESS | c::CREATE_NEW_PROCESS_GROUP;
         }
 
-        let (envp, _data) = try!(make_envp(self.env.as_ref()));
-        let (dirp, _data) = try!(make_dirp(self.cwd.as_ref()));
+        let (envp, _data) = make_envp(self.env.as_ref())?;
+        let (dirp, _data) = make_dirp(self.cwd.as_ref())?;
         let mut pi = zeroed_process_information();
 
         // Prepare all stdio handles to be inherited by the child. This
@@ -181,26 +181,28 @@ impl Command {
             stdout: None,
             stderr: None,
         };
-        let stdin = self.stdin.as_ref().unwrap_or(&default);
+        let null = Stdio::Null;
+        let default_stdin = if needs_stdin {&default} else {&null};
+        let stdin = self.stdin.as_ref().unwrap_or(default_stdin);
         let stdout = self.stdout.as_ref().unwrap_or(&default);
         let stderr = self.stderr.as_ref().unwrap_or(&default);
-        let stdin = try!(stdin.to_handle(c::STD_INPUT_HANDLE, &mut pipes.stdin));
-        let stdout = try!(stdout.to_handle(c::STD_OUTPUT_HANDLE,
-                                           &mut pipes.stdout));
-        let stderr = try!(stderr.to_handle(c::STD_ERROR_HANDLE,
-                                           &mut pipes.stderr));
+        let stdin = stdin.to_handle(c::STD_INPUT_HANDLE, &mut pipes.stdin)?;
+        let stdout = stdout.to_handle(c::STD_OUTPUT_HANDLE,
+                                      &mut pipes.stdout)?;
+        let stderr = stderr.to_handle(c::STD_ERROR_HANDLE,
+                                      &mut pipes.stderr)?;
         si.hStdInput = stdin.raw();
         si.hStdOutput = stdout.raw();
         si.hStdError = stderr.raw();
 
-        try!(unsafe {
+        unsafe {
             cvt(c::CreateProcessW(ptr::null(),
                                   cmd_str.as_mut_ptr(),
                                   ptr::null_mut(),
                                   ptr::null_mut(),
                                   c::TRUE, flags, envp, dirp,
                                   &mut si, &mut pi))
-        });
+        }?;
 
         // We close the thread handle because we don't care about keeping
         // the thread id valid, and we aren't keeping the thread handle
@@ -214,9 +216,9 @@ impl Command {
 
 impl fmt::Debug for Command {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(write!(f, "{:?}", self.program));
+        write!(f, "{:?}", self.program)?;
         for arg in &self.args {
-            try!(write!(f, " {:?}", arg));
+            write!(f, " {:?}", arg)?;
         }
         Ok(())
     }
@@ -238,18 +240,18 @@ impl Stdio {
             }
 
             Stdio::MakePipe => {
-                let (reader, writer) = try!(pipe::anon_pipe());
+                let (reader, writer) = pipe::anon_pipe()?;
                 let (ours, theirs) = if stdio_id == c::STD_INPUT_HANDLE {
                     (writer, reader)
                 } else {
                     (reader, writer)
                 };
                 *pipe = Some(ours);
-                try!(cvt(unsafe {
+                cvt(unsafe {
                     c::SetHandleInformation(theirs.handle().raw(),
                                             c::HANDLE_FLAG_INHERIT,
                                             c::HANDLE_FLAG_INHERIT)
-                }));
+                })?;
                 Ok(theirs.into_handle())
             }
 
@@ -294,9 +296,9 @@ pub struct Process {
 
 impl Process {
     pub fn kill(&mut self) -> io::Result<()> {
-        try!(cvt(unsafe {
+        cvt(unsafe {
             c::TerminateProcess(self.handle.raw(), 1)
-        }));
+        })?;
         Ok(())
     }
 
@@ -313,7 +315,7 @@ impl Process {
                 return Err(Error::last_os_error())
             }
             let mut status = 0;
-            try!(cvt(c::GetExitCodeProcess(self.handle.raw(), &mut status)));
+            cvt(c::GetExitCodeProcess(self.handle.raw(), &mut status))?;
             Ok(ExitStatus(status))
         }
     }
@@ -379,10 +381,10 @@ fn make_command_line(prog: &OsStr, args: &[OsString]) -> io::Result<Vec<u16>> {
     // Encode the command and arguments in a command line string such
     // that the spawned process may recover them using CommandLineToArgvW.
     let mut cmd: Vec<u16> = Vec::new();
-    try!(append_arg(&mut cmd, prog));
+    append_arg(&mut cmd, prog)?;
     for arg in args {
         cmd.push(' ' as u16);
-        try!(append_arg(&mut cmd, arg));
+        append_arg(&mut cmd, arg)?;
     }
     return Ok(cmd);
 
@@ -390,7 +392,7 @@ fn make_command_line(prog: &OsStr, args: &[OsString]) -> io::Result<Vec<u16>> {
         // If an argument has 0 characters then we need to quote it to ensure
         // that it actually gets passed through on the command line or otherwise
         // it will be dropped entirely when parsed on the other end.
-        try!(ensure_no_nuls(arg));
+        ensure_no_nuls(arg)?;
         let arg_bytes = &arg.as_inner().inner.as_inner();
         let quote = arg_bytes.iter().any(|c| *c == b' ' || *c == b'\t')
             || arg_bytes.is_empty();
@@ -436,9 +438,9 @@ fn make_envp(env: Option<&collections::HashMap<OsString, OsString>>)
             let mut blk = Vec::new();
 
             for pair in env {
-                blk.extend(try!(ensure_no_nuls(pair.0)).encode_wide());
+                blk.extend(ensure_no_nuls(pair.0)?.encode_wide());
                 blk.push('=' as u16);
-                blk.extend(try!(ensure_no_nuls(pair.1)).encode_wide());
+                blk.extend(ensure_no_nuls(pair.1)?.encode_wide());
                 blk.push(0);
             }
             blk.push(0);
@@ -452,7 +454,7 @@ fn make_dirp(d: Option<&OsString>) -> io::Result<(*const u16, Vec<u16>)> {
 
     match d {
         Some(dir) => {
-            let mut dir_str: Vec<u16> = try!(ensure_no_nuls(dir)).encode_wide().collect();
+            let mut dir_str: Vec<u16> = ensure_no_nuls(dir)?.encode_wide().collect();
             dir_str.push(0);
             Ok((dir_str.as_ptr(), dir_str))
         },
index 1cd05b61d25b0f3b49d16fe65679a05b2ea069c4..fa3cab2191edde5ba25924ef0a838e235b6aae75 100644 (file)
@@ -8,9 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![unstable(issue = "0", feature = "windows_stdio")]
+
 use prelude::v1::*;
 use io::prelude::*;
 
+use cmp;
 use io::{self, Cursor};
 use ptr;
 use str;
@@ -18,6 +21,7 @@ use sync::Mutex;
 use sys::c;
 use sys::cvt;
 use sys::handle::Handle;
+use sys_common::io::read_to_end_uninitialized;
 
 pub struct NoClose(Option<Handle>);
 
@@ -55,23 +59,37 @@ fn write(out: &Output, data: &[u8]) -> io::Result<usize> {
         Output::Console(ref c) => c.get().raw(),
         Output::Pipe(ref p) => return p.get().write(data),
     };
-    let utf16 = match str::from_utf8(data).ok() {
-        Some(utf8) => utf8.encode_utf16().collect::<Vec<u16>>(),
-        None => return Err(invalid_encoding()),
+    // As with stdin on windows, stdout often can't handle writes of large
+    // sizes. For an example, see #14940. For this reason, don't try to
+    // write the entire output buffer on windows.
+    //
+    // For some other references, it appears that this problem has been
+    // encountered by others [1] [2]. We choose the number 8K just because
+    // libuv does the same.
+    //
+    // [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
+    // [2]: http://www.mail-archive.com/log4net-dev@logging.apache.org/msg00661.html
+    const OUT_MAX: usize = 8192;
+    let len = cmp::min(data.len(), OUT_MAX);
+    let utf8 = match str::from_utf8(&data[..len]) {
+        Ok(s) => s,
+        Err(ref e) if e.valid_up_to() == 0 => return Err(invalid_encoding()),
+        Err(e) => str::from_utf8(&data[..e.valid_up_to()]).unwrap(),
     };
+    let utf16 = utf8.encode_utf16().collect::<Vec<u16>>();
     let mut written = 0;
-    try!(cvt(unsafe {
+    cvt(unsafe {
         c::WriteConsoleW(handle,
                          utf16.as_ptr() as c::LPCVOID,
                          utf16.len() as u32,
                          &mut written,
                          ptr::null_mut())
-    }));
+    })?;
 
     // FIXME if this only partially writes the utf16 buffer then we need to
     //       figure out how many bytes of `data` were actually written
     assert_eq!(written as usize, utf16.len());
-    Ok(data.len())
+    Ok(utf8.len())
 }
 
 impl Stdin {
@@ -94,13 +112,13 @@ impl Stdin {
         if utf8.position() as usize == utf8.get_ref().len() {
             let mut utf16 = vec![0u16; 0x1000];
             let mut num = 0;
-            try!(cvt(unsafe {
+            cvt(unsafe {
                 c::ReadConsoleW(handle,
                                 utf16.as_mut_ptr() as c::LPVOID,
                                 utf16.len() as u32,
                                 &mut num,
                                 ptr::null_mut())
-            }));
+            })?;
             utf16.truncate(num as usize);
             // FIXME: what to do about this data that has already been read?
             let data = match String::from_utf16(&utf16) {
@@ -113,6 +131,22 @@ impl Stdin {
         // MemReader shouldn't error here since we just filled it
         utf8.read(buf)
     }
+
+    pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        let mut me = self;
+        (&mut me).read_to_end(buf)
+    }
+}
+
+#[unstable(reason = "not public", issue = "0", feature = "fd_read")]
+impl<'a> Read for &'a Stdin {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        (**self).read(buf)
+    }
+
+    fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+        unsafe { read_to_end_uninitialized(self, buf) }
+    }
 }
 
 impl Stdout {
index b18772c0c24381c75589fcd140786f87d96067d4..0383e92c79ec7b24a2c1d0c8ff4575e404c4ac1a 100644 (file)
@@ -12,6 +12,7 @@ use prelude::v1::*;
 
 use alloc::boxed::FnBox;
 use io;
+use ffi::CStr;
 use mem;
 use libc::c_void;
 use ptr;
@@ -54,7 +55,7 @@ impl Thread {
         }
     }
 
-    pub fn set_name(_name: &str) {
+    pub fn set_name(_name: &CStr) {
         // Windows threads are nameless
         // The names in MSVC debugger are obtained using a "magic" exception,
         // which requires a use of MS C++ extensions.
index 981ba1e36e9d5c531b7811365b6039443b0fb921..b3549dc12645aec854e0c0ec3d4d1b90ede910e9 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! Native threads
+//! Native threads.
 //!
 //! ## The threading model
 //!
@@ -166,6 +166,8 @@ use any::Any;
 use cell::UnsafeCell;
 use fmt;
 use io;
+use str;
+use ffi::{CStr, CString};
 use sync::{Mutex, Condvar, Arc};
 use sys::thread as imp;
 use sys_common::thread_info;
@@ -267,7 +269,7 @@ impl Builder {
         let their_packet = my_packet.clone();
 
         let main = move || {
-            if let Some(name) = their_thread.name() {
+            if let Some(name) = their_thread.cname() {
                 imp::Thread::set_name(name);
             }
             unsafe {
@@ -285,7 +287,7 @@ impl Builder {
 
         Ok(JoinHandle(JoinInner {
             native: unsafe {
-                Some(try!(imp::Thread::new(stack_size, Box::new(main))))
+                Some(imp::Thread::new(stack_size, Box::new(main))?)
             },
             thread: my_thread,
             packet: Packet(my_packet),
@@ -339,40 +341,6 @@ pub fn panicking() -> bool {
     unwind::panicking()
 }
 
-/// Invokes a closure, capturing the cause of panic if one occurs.
-///
-/// This function will return `Ok` with the closure's result if the closure
-/// does not panic, and will return `Err(cause)` if the closure panics. The
-/// `cause` returned is the object with which panic was originally invoked.
-///
-/// It is currently undefined behavior to unwind from Rust code into foreign
-/// code, so this function is particularly useful when Rust is called from
-/// another language (normally C). This can run arbitrary Rust code, capturing a
-/// panic and allowing a graceful handling of the error.
-///
-/// It is **not** recommended to use this function for a general try/catch
-/// mechanism. The `Result` type is more appropriate to use for functions that
-/// can fail on a regular basis.
-///
-/// The closure provided is required to adhere to the `'static` bound to ensure
-/// that it cannot reference data in the parent stack frame, mitigating problems
-/// with exception safety. Furthermore, a `Send` bound is also required,
-/// providing the same safety guarantees as `thread::spawn` (ensuring the
-/// closure is properly isolated from the parent).
-#[unstable(feature = "catch_panic", reason = "recent API addition",
-           issue = "27719")]
-#[rustc_deprecated(since = "1.6.0", reason = "renamed to std::panic::recover")]
-pub fn catch_panic<F, R>(f: F) -> Result<R>
-    where F: FnOnce() -> R + Send + 'static
-{
-    let mut result = None;
-    unsafe {
-        let result = &mut result;
-        try!(unwind::try(move || *result = Some(f())))
-    }
-    Ok(result.unwrap())
-}
-
 /// Puts the current thread to sleep for the specified amount of time.
 ///
 /// The thread may sleep longer than the duration specified due to scheduling
@@ -484,7 +452,7 @@ pub fn park_timeout(dur: Duration) {
 
 /// The internal representation of a `Thread` handle
 struct Inner {
-    name: Option<String>,
+    name: Option<CString>,      // Guaranteed to be UTF-8
     lock: Mutex<bool>,          // true when there is a buffered unpark
     cvar: Condvar,
 }
@@ -499,9 +467,12 @@ pub struct Thread {
 impl Thread {
     // Used only internally to construct a thread object without spawning
     fn new(name: Option<String>) -> Thread {
+        let cname = name.map(|n| CString::new(n).unwrap_or_else(|_| {
+            panic!("thread name may not contain interior null bytes")
+        }));
         Thread {
             inner: Arc::new(Inner {
-                name: name,
+                name: cname,
                 lock: Mutex::new(false),
                 cvar: Condvar::new(),
             })
@@ -523,6 +494,10 @@ impl Thread {
     /// Gets the thread's name.
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn name(&self) -> Option<&str> {
+        self.cname().map(|s| unsafe { str::from_utf8_unchecked(s.to_bytes()) } )
+    }
+
+    fn cname(&self) -> Option<&CStr> {
         self.inner.name.as_ref().map(|s| &**s)
     }
 }
@@ -656,6 +631,12 @@ mod tests {
         }).unwrap().join().unwrap();
     }
 
+    #[test]
+    #[should_panic]
+    fn test_invalid_named_thread() {
+        let _ = Builder::new().name("ada l\0velace".to_string()).spawn(|| {});
+    }
+
     #[test]
     fn test_run_basic() {
         let (tx, rx) = channel();
index 7ecb3920cc86c9ede483e4a79cf264de587ec15a..8a50f07e6d85419d0253edbb9365519db5e9259a 100644 (file)
@@ -8,8 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use ops::{Add, Sub, Mul, Div};
-use time::Instant;
+use ops::{Add, Sub, Mul, Div, AddAssign, SubAssign, MulAssign, DivAssign};
 
 const NANOS_PER_SEC: u32 = 1_000_000_000;
 const NANOS_PER_MILLI: u32 = 1_000_000;
@@ -53,35 +52,23 @@ impl Duration {
     /// If the nanoseconds is greater than 1 billion (the number of nanoseconds
     /// in a second), then it will carry over into the seconds provided.
     #[stable(feature = "duration", since = "1.3.0")]
+    #[inline]
     pub fn new(secs: u64, nanos: u32) -> Duration {
         let secs = secs + (nanos / NANOS_PER_SEC) as u64;
         let nanos = nanos % NANOS_PER_SEC;
         Duration { secs: secs, nanos: nanos }
     }
 
-    /// Runs a closure, returning the duration of time it took to run the
-    /// closure.
-    #[unstable(feature = "duration_span",
-               reason = "unsure if this is the right API or whether it should \
-                         wait for a more general \"moment in time\" \
-                         abstraction",
-               issue = "27799")]
-    #[rustc_deprecated(reason = "use std::time::Instant instead",
-                       since = "1.6.0")]
-    pub fn span<F>(f: F) -> Duration where F: FnOnce() {
-        let start = Instant::now();
-        f();
-        start.elapsed()
-    }
-
     /// Creates a new `Duration` from the specified number of seconds.
     #[stable(feature = "duration", since = "1.3.0")]
+    #[inline]
     pub fn from_secs(secs: u64) -> Duration {
         Duration { secs: secs, nanos: 0 }
     }
 
     /// Creates a new `Duration` from the specified number of milliseconds.
     #[stable(feature = "duration", since = "1.3.0")]
+    #[inline]
     pub fn from_millis(millis: u64) -> Duration {
         let secs = millis / MILLIS_PER_SEC;
         let nanos = ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI;
@@ -93,6 +80,7 @@ impl Duration {
     /// The extra precision represented by this duration is ignored (e.g. extra
     /// nanoseconds are not represented in the returned value).
     #[stable(feature = "duration", since = "1.3.0")]
+    #[inline]
     pub fn as_secs(&self) -> u64 { self.secs }
 
     /// Returns the nanosecond precision represented by this duration.
@@ -101,6 +89,7 @@ impl Duration {
     /// represented by nanoseconds. The returned number always represents a
     /// fractional portion of a second (e.g. it is less than one billion).
     #[stable(feature = "duration", since = "1.3.0")]
+    #[inline]
     pub fn subsec_nanos(&self) -> u32 { self.nanos }
 }
 
@@ -121,6 +110,13 @@ impl Add for Duration {
     }
 }
 
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign for Duration {
+    fn add_assign(&mut self, rhs: Duration) {
+        *self = *self + rhs;
+    }
+}
+
 #[stable(feature = "duration", since = "1.3.0")]
 impl Sub for Duration {
     type Output = Duration;
@@ -140,6 +136,13 @@ impl Sub for Duration {
     }
 }
 
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign for Duration {
+    fn sub_assign(&mut self, rhs: Duration) {
+        *self = *self - rhs;
+    }
+}
+
 #[stable(feature = "duration", since = "1.3.0")]
 impl Mul<u32> for Duration {
     type Output = Duration;
@@ -157,6 +160,13 @@ impl Mul<u32> for Duration {
     }
 }
 
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl MulAssign<u32> for Duration {
+    fn mul_assign(&mut self, rhs: u32) {
+        *self = *self * rhs;
+    }
+}
+
 #[stable(feature = "duration", since = "1.3.0")]
 impl Div<u32> for Duration {
     type Output = Duration;
@@ -171,6 +181,13 @@ impl Div<u32> for Duration {
     }
 }
 
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl DivAssign<u32> for Duration {
+    fn div_assign(&mut self, rhs: u32) {
+        *self = *self / rhs;
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use super::Duration;
index aa0a843dc9a548763d6a713538e39fbe7659f9d3..bc50b0d3a7066c8e44e313de078bc15fab1769cc 100644 (file)
@@ -9,12 +9,22 @@
 // except according to those terms.
 
 //! Temporal quantification.
+//!
+//! Example:
+//!
+//! ```
+//! use std::time::Duration;
+//!
+//! let five_seconds = Duration::new(5, 0);
+//! // both declarations are equivalent
+//! assert_eq!(Duration::new(5, 0), Duration::from_secs(5));
+//! ```
 
 #![stable(feature = "time", since = "1.3.0")]
 
 use error::Error;
 use fmt;
-use ops::{Add, Sub};
+use ops::{Add, Sub, AddAssign, SubAssign};
 use sys::time;
 use sys_common::FromInner;
 
@@ -40,6 +50,22 @@ mod duration;
 /// no method to get "the number of seconds" from an instant. Instead, it only
 /// allows measuring the duration between two instants (or comparing two
 /// instants).
+///
+/// Example:
+///
+/// ```no_run
+/// use std::time::{Duration, Instant};
+/// use std::thread::sleep;
+///
+/// fn main() {
+///    let now = Instant::now();
+///
+///    // we sleep for 2 seconds
+///    sleep(Duration::new(2, 0));
+///    // it prints '2'
+///    println!("{}", now.elapsed().as_secs());
+/// }
+/// ```
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
 #[stable(feature = "time2", since = "1.8.0")]
 pub struct Instant(time::Instant);
@@ -63,6 +89,30 @@ pub struct Instant(time::Instant);
 /// information about a `SystemTime`. By calculating the duration from this
 /// fixed point in time, a `SystemTime` can be converted to a human-readable time,
 /// or perhaps some other string representation.
+///
+/// Example:
+///
+/// ```no_run
+/// use std::time::{Duration, SystemTime};
+/// use std::thread::sleep;
+///
+/// fn main() {
+///    let now = SystemTime::now();
+///
+///    // we sleep for 2 seconds
+///    sleep(Duration::new(2, 0));
+///    match now.elapsed() {
+///        Ok(elapsed) => {
+///            // it prints '2'
+///            println!("{}", elapsed.as_secs());
+///        }
+///        Err(e) => {
+///            // an error occured!
+///            println!("Error: {:?}", e);
+///        }
+///    }
+/// }
+/// ```
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
 #[stable(feature = "time2", since = "1.8.0")]
 pub struct SystemTime(time::SystemTime);
@@ -122,6 +172,13 @@ impl Add<Duration> for Instant {
     }
 }
 
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign<Duration> for Instant {
+    fn add_assign(&mut self, other: Duration) {
+        *self = *self + other;
+    }
+}
+
 #[stable(feature = "time2", since = "1.8.0")]
 impl Sub<Duration> for Instant {
     type Output = Instant;
@@ -131,6 +188,13 @@ impl Sub<Duration> for Instant {
     }
 }
 
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign<Duration> for Instant {
+    fn sub_assign(&mut self, other: Duration) {
+        *self = *self - other;
+    }
+}
+
 #[stable(feature = "time2", since = "1.8.0")]
 impl Sub<Instant> for Instant {
     type Output = Duration;
@@ -204,6 +268,13 @@ impl Add<Duration> for SystemTime {
     }
 }
 
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign<Duration> for SystemTime {
+    fn add_assign(&mut self, other: Duration) {
+        *self = *self + other;
+    }
+}
+
 #[stable(feature = "time2", since = "1.8.0")]
 impl Sub<Duration> for SystemTime {
     type Output = SystemTime;
@@ -213,6 +284,13 @@ impl Sub<Duration> for SystemTime {
     }
 }
 
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign<Duration> for SystemTime {
+    fn sub_assign(&mut self, other: Duration) {
+        *self = *self - other;
+    }
+}
+
 #[stable(feature = "time2", since = "1.8.0")]
 impl fmt::Debug for SystemTime {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
index f5ff7fbf61042baa051d16eb957741f97eddd055..964f2dcb6b6b79119615c42b14b42e9c2e16603e 100644 (file)
@@ -10,6 +10,5 @@ crate-type = ["dylib"]
 
 [dependencies]
 serialize = { path = "../libserialize" }
-term = { path = "../libterm" }
 log = { path = "../liblog" }
 rustc_bitflags = { path = "../librustc_bitflags" }
index 23bb6fd141a4ed22112dafd8655e705cc50ded19..16d4ed53b5b1905b3c5c41fad149bd4527771f00 100644 (file)
@@ -10,7 +10,6 @@
 
 // The Rust abstract syntax tree.
 
-pub use self::StructFieldKind::*;
 pub use self::TyParamBound::*;
 pub use self::UnsafeSource::*;
 pub use self::ViewPath_::*;
@@ -19,6 +18,7 @@ pub use self::PathParameters::*;
 use attr::ThinAttributes;
 use codemap::{Span, Spanned, DUMMY_SP, ExpnId};
 use abi::Abi;
+use errors;
 use ext::base;
 use ext::tt::macro_parser;
 use parse::token::InternedString;
@@ -83,7 +83,7 @@ impl Encodable for Name {
 
 impl Decodable for Name {
     fn decode<D: Decoder>(d: &mut D) -> Result<Name, D::Error> {
-        Ok(token::intern(&try!(d.read_str())[..]))
+        Ok(token::intern(&d.read_str()?[..]))
     }
 }
 
@@ -152,7 +152,7 @@ impl Encodable for Ident {
 
 impl Decodable for Ident {
     fn decode<D: Decoder>(d: &mut D) -> Result<Ident, D::Error> {
-        Ok(Ident::with_empty_ctxt(try!(Name::decode(d))))
+        Ok(Ident::with_empty_ctxt(Name::decode(d)?))
     }
 }
 
@@ -204,6 +204,23 @@ impl fmt::Display for Path {
     }
 }
 
+impl Path {
+    // convert a span and an identifier to the corresponding
+    // 1-segment path
+    pub fn from_ident(s: Span, identifier: Ident) -> Path {
+        Path {
+            span: s,
+            global: false,
+            segments: vec!(
+                PathSegment {
+                    identifier: identifier,
+                    parameters: PathParameters::none()
+                }
+            ),
+        }
+    }
+}
+
 /// A segment of a path: an identifier, an optional lifetime, and a set of
 /// types.
 #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
@@ -344,6 +361,10 @@ pub const DUMMY_NODE_ID: NodeId = !0;
 pub trait NodeIdAssigner {
     fn next_node_id(&self) -> NodeId;
     fn peek_node_id(&self) -> NodeId;
+
+    fn diagnostic(&self) -> &errors::Handler {
+        panic!("this ID assigner cannot emit diagnostics")
+    }
 }
 
 /// The AST represents all type param bounds as types.
@@ -458,7 +479,7 @@ pub struct WhereEqPredicate {
 
 /// The set of MetaItems that define the compilation environment of the crate,
 /// used to drive conditional compilation
-pub type CrateConfig = Vec<P<MetaItem>> ;
+pub type CrateConfig = Vec<P<MetaItem>>;
 
 #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
 pub struct Crate {
@@ -886,6 +907,15 @@ impl fmt::Debug for Expr {
     }
 }
 
+/// Limit types of a range (inclusive or exclusive)
+#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum RangeLimits {
+    /// Inclusive at the beginning, exclusive at the end
+    HalfOpen,
+    /// Inclusive at the beginning and end
+    Closed,
+}
+
 #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
 pub enum ExprKind {
     /// A `box x` expression.
@@ -918,7 +948,7 @@ pub enum ExprKind {
     Binary(BinOp, P<Expr>, P<Expr>),
     /// A unary operation (For example: `!x`, `*x`)
     Unary(UnOp, P<Expr>),
-    /// A literal (For example: `1u8`, `"foo"`)
+    /// A literal (For example: `1`, `"foo"`)
     Lit(P<Lit>),
     /// A cast (`foo as f64`)
     Cast(P<Expr>, P<Ty>),
@@ -974,8 +1004,8 @@ pub enum ExprKind {
     TupField(P<Expr>, Spanned<usize>),
     /// An indexing operation (`foo[2]`)
     Index(P<Expr>, P<Expr>),
-    /// A range (`1..2`, `1..`, or `..2`)
-    Range(Option<P<Expr>>, Option<P<Expr>>),
+    /// A range (`1..2`, `1..`, `..2`, `1...2`, `1...`, `...2`)
+    Range(Option<P<Expr>>, Option<P<Expr>>, RangeLimits),
 
     /// Variable reference, possibly containing `::` and/or type
     /// parameters, e.g. foo::bar::<baz>.
@@ -1007,12 +1037,15 @@ pub enum ExprKind {
 
     /// An array literal constructed from one repeated element.
     ///
-    /// For example, `[1u8; 5]`. The first expression is the element
+    /// For example, `[1; 5]`. The first expression is the element
     /// to be repeated; the second is the number of times to repeat it.
     Repeat(P<Expr>, P<Expr>),
 
     /// No-op: used solely so we can pretty-print faithfully
     Paren(P<Expr>),
+
+    /// `expr?`
+    Try(P<Expr>),
 }
 
 /// The explicit Self type in a "qualified path". The actual
@@ -1276,7 +1309,7 @@ pub enum LitKind {
     Byte(u8),
     /// A character literal (`'a'`)
     Char(char),
-    /// An integer literal (`1u8`)
+    /// An integer literal (`1`)
     Int(u64, LitIntType),
     /// A float literal (`1f64` or `1E10f64`)
     Float(InternedString, FloatTy),
@@ -1316,10 +1349,10 @@ pub struct MethodSig {
     pub explicit_self: ExplicitSelf,
 }
 
-/// Represents a method declaration in a trait declaration, possibly including
-/// a default implementation. A trait method is either required (meaning it
-/// doesn't have an implementation, just a signature) or provided (meaning it
-/// has a default implementation).
+/// Represents an item declaration within a trait declaration,
+/// possibly including a default implementation. A trait item is
+/// either required (meaning it doesn't have an implementation, just a
+/// signature) or provided (meaning it has a default implementation).
 #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
 pub struct TraitItem {
     pub id: NodeId,
@@ -1341,6 +1374,7 @@ pub struct ImplItem {
     pub id: NodeId,
     pub ident: Ident,
     pub vis: Visibility,
+    pub defaultness: Defaultness,
     pub attrs: Vec<Attribute>,
     pub node: ImplItemKind,
     pub span: Span,
@@ -1642,6 +1676,12 @@ pub enum Constness {
     NotConst,
 }
 
+#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum Defaultness {
+    Default,
+    Final,
+}
+
 impl fmt::Display for Unsafety {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         fmt::Display::fmt(match *self {
@@ -1844,62 +1884,24 @@ pub struct PolyTraitRef {
     pub span: Span,
 }
 
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
 pub enum Visibility {
     Public,
+    Crate,
+    Restricted { path: P<Path>, id: NodeId },
     Inherited,
 }
 
-impl Visibility {
-    pub fn inherit_from(&self, parent_visibility: Visibility) -> Visibility {
-        match *self {
-            Visibility::Inherited => parent_visibility,
-            Visibility::Public => *self
-        }
-    }
-}
-
 #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub struct StructField_ {
-    pub kind: StructFieldKind,
+pub struct StructField {
+    pub span: Span,
+    pub ident: Option<Ident>,
+    pub vis: Visibility,
     pub id: NodeId,
     pub ty: P<Ty>,
     pub attrs: Vec<Attribute>,
 }
 
-impl StructField_ {
-    pub fn ident(&self) -> Option<Ident> {
-        match self.kind {
-            NamedField(ref ident, _) => Some(ident.clone()),
-            UnnamedField(_) => None
-        }
-    }
-}
-
-pub type StructField = Spanned<StructField_>;
-
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum StructFieldKind {
-    NamedField(Ident, Visibility),
-    /// Element of a tuple-like struct
-    UnnamedField(Visibility),
-}
-
-impl StructFieldKind {
-    pub fn is_unnamed(&self) -> bool {
-        match *self {
-            UnnamedField(..) => true,
-            NamedField(..) => false,
-        }
-    }
-
-    pub fn visibility(&self) -> Visibility {
-        match *self {
-            NamedField(_, vis) | UnnamedField(vis) => vis
-        }
-    }
-}
-
 /// Fields and Ids of enum variants and structs
 ///
 /// For enum variants: `NodeId` represents both an Id of the variant itself (relevant for all
diff --git a/src/libsyntax/ast_util.rs b/src/libsyntax/ast_util.rs
deleted file mode 100644 (file)
index 5d55b6f..0000000
+++ /dev/null
@@ -1,398 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use ast::*;
-use ast;
-use codemap;
-use codemap::Span;
-use parse::token;
-use print::pprust;
-use ptr::P;
-use visit::{FnKind, Visitor};
-use visit;
-
-use std::cmp;
-use std::u32;
-
-pub fn path_name_i(idents: &[Ident]) -> String {
-    // FIXME: Bad copies (#2543 -- same for everything else that says "bad")
-    idents.iter().map(|i| i.to_string()).collect::<Vec<String>>().join("::")
-}
-
-pub fn is_path(e: P<Expr>) -> bool {
-    match e.node { ExprKind::Path(..) => true, _ => false }
-}
-
-
-// convert a span and an identifier to the corresponding
-// 1-segment path
-pub fn ident_to_path(s: Span, identifier: Ident) -> Path {
-    ast::Path {
-        span: s,
-        global: false,
-        segments: vec!(
-            ast::PathSegment {
-                identifier: identifier,
-                parameters: ast::PathParameters::AngleBracketed(ast::AngleBracketedParameterData {
-                    lifetimes: Vec::new(),
-                    types: P::empty(),
-                    bindings: P::empty(),
-                })
-            }
-        ),
-    }
-}
-
-// If path is a single segment ident path, return that ident. Otherwise, return
-// None.
-pub fn path_to_ident(path: &Path) -> Option<Ident> {
-    if path.segments.len() != 1 {
-        return None;
-    }
-
-    let segment = &path.segments[0];
-    if !segment.parameters.is_empty() {
-        return None;
-    }
-
-    Some(segment.identifier)
-}
-
-pub fn ident_to_pat(id: NodeId, s: Span, i: Ident) -> P<Pat> {
-    let spanned = codemap::Spanned{ span: s, node: i };
-    P(Pat {
-        id: id,
-        node: PatKind::Ident(BindingMode::ByValue(Mutability::Immutable), spanned, None),
-        span: s
-    })
-}
-
-/// Generate a "pretty" name for an `impl` from its type and trait.
-/// This is designed so that symbols of `impl`'d methods give some
-/// hint of where they came from, (previously they would all just be
-/// listed as `__extensions__::method_name::hash`, with no indication
-/// of the type).
-pub fn impl_pretty_name(trait_ref: &Option<TraitRef>, ty: Option<&Ty>) -> Ident {
-    let mut pretty = match ty {
-        Some(t) => pprust::ty_to_string(t),
-        None => String::from("..")
-    };
-
-    match *trait_ref {
-        Some(ref trait_ref) => {
-            pretty.push('.');
-            pretty.push_str(&pprust::path_to_string(&trait_ref.path));
-        }
-        None => {}
-    }
-    token::gensym_ident(&pretty[..])
-}
-
-pub fn struct_field_visibility(field: ast::StructField) -> Visibility {
-    match field.node.kind {
-        ast::NamedField(_, v) | ast::UnnamedField(v) => v
-    }
-}
-
-// ______________________________________________________________________
-// Enumerating the IDs which appear in an AST
-
-#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)]
-pub struct IdRange {
-    pub min: NodeId,
-    pub max: NodeId,
-}
-
-impl IdRange {
-    pub fn max() -> IdRange {
-        IdRange {
-            min: u32::MAX,
-            max: u32::MIN,
-        }
-    }
-
-    pub fn empty(&self) -> bool {
-        self.min >= self.max
-    }
-
-    pub fn add(&mut self, id: NodeId) {
-        self.min = cmp::min(self.min, id);
-        self.max = cmp::max(self.max, id + 1);
-    }
-}
-
-pub trait IdVisitingOperation {
-    fn visit_id(&mut self, node_id: NodeId);
-}
-
-/// A visitor that applies its operation to all of the node IDs
-/// in a visitable thing.
-
-pub struct IdVisitor<'a, O:'a> {
-    pub operation: &'a mut O,
-    pub visited_outermost: bool,
-}
-
-impl<'a, O: IdVisitingOperation> IdVisitor<'a, O> {
-    fn visit_generics_helper(&mut self, generics: &Generics) {
-        for type_parameter in generics.ty_params.iter() {
-            self.operation.visit_id(type_parameter.id)
-        }
-        for lifetime in &generics.lifetimes {
-            self.operation.visit_id(lifetime.lifetime.id)
-        }
-    }
-}
-
-impl<'a, 'v, O: IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> {
-    fn visit_mod(&mut self,
-                 module: &Mod,
-                 _: Span,
-                 node_id: NodeId) {
-        self.operation.visit_id(node_id);
-        visit::walk_mod(self, module)
-    }
-
-    fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
-        self.operation.visit_id(foreign_item.id);
-        visit::walk_foreign_item(self, foreign_item)
-    }
-
-    fn visit_item(&mut self, item: &Item) {
-        if self.visited_outermost {
-            return
-        } else {
-            self.visited_outermost = true
-        }
-
-        self.operation.visit_id(item.id);
-        match item.node {
-            ItemKind::Use(ref view_path) => {
-                match view_path.node {
-                    ViewPathSimple(_, _) |
-                    ViewPathGlob(_) => {}
-                    ViewPathList(_, ref paths) => {
-                        for path in paths {
-                            self.operation.visit_id(path.node.id())
-                        }
-                    }
-                }
-            }
-            _ => {}
-        }
-
-        visit::walk_item(self, item);
-
-        self.visited_outermost = false
-    }
-
-    fn visit_local(&mut self, local: &Local) {
-        self.operation.visit_id(local.id);
-        visit::walk_local(self, local)
-    }
-
-    fn visit_block(&mut self, block: &Block) {
-        self.operation.visit_id(block.id);
-        visit::walk_block(self, block)
-    }
-
-    fn visit_stmt(&mut self, statement: &Stmt) {
-        self.operation
-            .visit_id(statement.node.id().expect("attempted to visit unexpanded stmt"));
-        visit::walk_stmt(self, statement)
-    }
-
-    fn visit_pat(&mut self, pattern: &Pat) {
-        self.operation.visit_id(pattern.id);
-        visit::walk_pat(self, pattern)
-    }
-
-    fn visit_expr(&mut self, expression: &Expr) {
-        self.operation.visit_id(expression.id);
-        visit::walk_expr(self, expression)
-    }
-
-    fn visit_ty(&mut self, typ: &Ty) {
-        self.operation.visit_id(typ.id);
-        visit::walk_ty(self, typ)
-    }
-
-    fn visit_generics(&mut self, generics: &Generics) {
-        self.visit_generics_helper(generics);
-        visit::walk_generics(self, generics)
-    }
-
-    fn visit_fn(&mut self,
-                function_kind: visit::FnKind<'v>,
-                function_declaration: &'v FnDecl,
-                block: &'v Block,
-                span: Span,
-                node_id: NodeId) {
-        match function_kind {
-            FnKind::Method(..) if self.visited_outermost => return,
-            FnKind::Method(..) => self.visited_outermost = true,
-            _ => {}
-        }
-
-        self.operation.visit_id(node_id);
-
-        match function_kind {
-            FnKind::ItemFn(_, generics, _, _, _, _) => {
-                self.visit_generics_helper(generics)
-            }
-            FnKind::Method(_, sig, _) => {
-                self.visit_generics_helper(&sig.generics)
-            }
-            FnKind::Closure => {}
-        }
-
-        for argument in &function_declaration.inputs {
-            self.operation.visit_id(argument.id)
-        }
-
-        visit::walk_fn(self,
-                       function_kind,
-                       function_declaration,
-                       block,
-                       span);
-
-        if let FnKind::Method(..) = function_kind {
-            self.visited_outermost = false;
-        }
-    }
-
-    fn visit_struct_field(&mut self, struct_field: &StructField) {
-        self.operation.visit_id(struct_field.node.id);
-        visit::walk_struct_field(self, struct_field)
-    }
-
-    fn visit_variant_data(&mut self,
-                        struct_def: &VariantData,
-                        _: ast::Ident,
-                        _: &ast::Generics,
-                        _: NodeId,
-                        _: Span) {
-        self.operation.visit_id(struct_def.id());
-        visit::walk_struct_def(self, struct_def);
-    }
-
-    fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
-        self.operation.visit_id(ti.id);
-        visit::walk_trait_item(self, ti);
-    }
-
-    fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
-        self.operation.visit_id(ii.id);
-        visit::walk_impl_item(self, ii);
-    }
-
-    fn visit_lifetime(&mut self, lifetime: &Lifetime) {
-        self.operation.visit_id(lifetime.id);
-    }
-
-    fn visit_lifetime_def(&mut self, def: &LifetimeDef) {
-        self.visit_lifetime(&def.lifetime);
-    }
-
-    fn visit_trait_ref(&mut self, trait_ref: &TraitRef) {
-        self.operation.visit_id(trait_ref.ref_id);
-        visit::walk_trait_ref(self, trait_ref);
-    }
-}
-
-pub struct IdRangeComputingVisitor {
-    pub result: IdRange,
-}
-
-impl IdRangeComputingVisitor {
-    pub fn new() -> IdRangeComputingVisitor {
-        IdRangeComputingVisitor { result: IdRange::max() }
-    }
-
-    pub fn result(&self) -> IdRange {
-        self.result
-    }
-}
-
-impl IdVisitingOperation for IdRangeComputingVisitor {
-    fn visit_id(&mut self, id: NodeId) {
-        self.result.add(id);
-    }
-}
-
-/// Computes the id range for a single fn body, ignoring nested items.
-pub fn compute_id_range_for_fn_body(fk: FnKind,
-                                    decl: &FnDecl,
-                                    body: &Block,
-                                    sp: Span,
-                                    id: NodeId)
-                                    -> IdRange
-{
-    let mut visitor = IdRangeComputingVisitor::new();
-    let mut id_visitor = IdVisitor {
-        operation: &mut visitor,
-        visited_outermost: false,
-    };
-    id_visitor.visit_fn(fk, decl, body, sp, id);
-    id_visitor.operation.result
-}
-
-/// Returns true if the given pattern consists solely of an identifier
-/// and false otherwise.
-pub fn pat_is_ident(pat: P<ast::Pat>) -> bool {
-    match pat.node {
-        PatKind::Ident(..) => true,
-        _ => false,
-    }
-}
-
-// are two paths equal when compared unhygienically?
-// since I'm using this to replace ==, it seems appropriate
-// to compare the span, global, etc. fields as well.
-pub fn path_name_eq(a : &ast::Path, b : &ast::Path) -> bool {
-    (a.span.source_equal(&b.span))
-    && (a.global == b.global)
-    && (segments_name_eq(&a.segments[..], &b.segments[..]))
-}
-
-// are two arrays of segments equal when compared unhygienically?
-pub fn segments_name_eq(a : &[ast::PathSegment], b : &[ast::PathSegment]) -> bool {
-    a.len() == b.len() &&
-    a.iter().zip(b).all(|(s, t)| {
-        s.identifier.name == t.identifier.name &&
-        // FIXME #7743: ident -> name problems in lifetime comparison?
-        // can types contain idents?
-        s.parameters == t.parameters
-    })
-}
-
-#[cfg(test)]
-mod tests {
-    use ast::*;
-    use super::*;
-
-    fn ident_to_segment(id: Ident) -> PathSegment {
-        PathSegment {identifier: id,
-                     parameters: PathParameters::none()}
-    }
-
-    #[test] fn idents_name_eq_test() {
-        assert!(segments_name_eq(
-            &[Ident::new(Name(3),SyntaxContext(4)), Ident::new(Name(78),SyntaxContext(82))]
-                .iter().cloned().map(ident_to_segment).collect::<Vec<PathSegment>>(),
-            &[Ident::new(Name(3),SyntaxContext(104)), Ident::new(Name(78),SyntaxContext(182))]
-                .iter().cloned().map(ident_to_segment).collect::<Vec<PathSegment>>()));
-        assert!(!segments_name_eq(
-            &[Ident::new(Name(3),SyntaxContext(4)), Ident::new(Name(78),SyntaxContext(82))]
-                .iter().cloned().map(ident_to_segment).collect::<Vec<PathSegment>>(),
-            &[Ident::new(Name(3),SyntaxContext(104)), Ident::new(Name(77),SyntaxContext(182))]
-                .iter().cloned().map(ident_to_segment).collect::<Vec<PathSegment>>()));
-    }
-}
index c7f8a56135d298c8a1372c6054cd10661f9d1618..f771ee95bd1215696ca6557771290ca1a372755d 100644 (file)
@@ -86,7 +86,7 @@ impl Encodable for BytePos {
 
 impl Decodable for BytePos {
     fn decode<D: Decoder>(d: &mut D) -> Result<BytePos, D::Error> {
-        Ok(BytePos(try!{ d.read_u32() }))
+        Ok(BytePos(d.read_u32()?))
     }
 }
 
@@ -203,9 +203,9 @@ pub struct Spanned<T> {
 impl Encodable for Span {
     fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
         s.emit_struct("Span", 2, |s| {
-            try!(s.emit_struct_field("lo", 0, |s| {
+            s.emit_struct_field("lo", 0, |s| {
                 self.lo.encode(s)
-            }));
+            })?;
 
             s.emit_struct_field("hi", 1, |s| {
                 self.hi.encode(s)
@@ -217,13 +217,13 @@ impl Encodable for Span {
 impl Decodable for Span {
     fn decode<D: Decoder>(d: &mut D) -> Result<Span, D::Error> {
         d.read_struct("Span", 2, |d| {
-            let lo = try!(d.read_struct_field("lo", 0, |d| {
+            let lo = d.read_struct_field("lo", 0, |d| {
                 BytePos::decode(d)
-            }));
+            })?;
 
-            let hi = try!(d.read_struct_field("hi", 1, |d| {
+            let hi = d.read_struct_field("hi", 1, |d| {
                 BytePos::decode(d)
-            }));
+            })?;
 
             Ok(mk_sp(lo, hi))
         })
@@ -526,56 +526,55 @@ pub struct FileMap {
 impl Encodable for FileMap {
     fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
         s.emit_struct("FileMap", 5, |s| {
-            try! { s.emit_struct_field("name", 0, |s| self.name.encode(s)) };
-            try! { s.emit_struct_field("start_pos", 1, |s| self.start_pos.encode(s)) };
-            try! { s.emit_struct_field("end_pos", 2, |s| self.end_pos.encode(s)) };
-            try! { s.emit_struct_field("lines", 3, |s| {
-                    let lines = self.lines.borrow();
-                    // store the length
-                    try! { s.emit_u32(lines.len() as u32) };
-
-                    if !lines.is_empty() {
-                        // In order to preserve some space, we exploit the fact that
-                        // the lines list is sorted and individual lines are
-                        // probably not that long. Because of that we can store lines
-                        // as a difference list, using as little space as possible
-                        // for the differences.
-                        let max_line_length = if lines.len() == 1 {
-                            0
-                        } else {
-                            lines.windows(2)
-                                 .map(|w| w[1] - w[0])
-                                 .map(|bp| bp.to_usize())
-                                 .max()
-                                 .unwrap()
-                        };
-
-                        let bytes_per_diff: u8 = match max_line_length {
-                            0 ... 0xFF => 1,
-                            0x100 ... 0xFFFF => 2,
-                            _ => 4
-                        };
-
-                        // Encode the number of bytes used per diff.
-                        try! { bytes_per_diff.encode(s) };
-
-                        // Encode the first element.
-                        try! { lines[0].encode(s) };
-
-                        let diff_iter = (&lines[..]).windows(2)
-                                                    .map(|w| (w[1] - w[0]));
-
-                        match bytes_per_diff {
-                            1 => for diff in diff_iter { try! { (diff.0 as u8).encode(s) } },
-                            2 => for diff in diff_iter { try! { (diff.0 as u16).encode(s) } },
-                            4 => for diff in diff_iter { try! { diff.0.encode(s) } },
-                            _ => unreachable!()
-                        }
+            s.emit_struct_field("name", 0, |s| self.name.encode(s))?;
+            s.emit_struct_field("start_pos", 1, |s| self.start_pos.encode(s))?;
+            s.emit_struct_field("end_pos", 2, |s| self.end_pos.encode(s))?;
+            s.emit_struct_field("lines", 3, |s| {
+                let lines = self.lines.borrow();
+                // store the length
+                s.emit_u32(lines.len() as u32)?;
+
+                if !lines.is_empty() {
+                    // In order to preserve some space, we exploit the fact that
+                    // the lines list is sorted and individual lines are
+                    // probably not that long. Because of that we can store lines
+                    // as a difference list, using as little space as possible
+                    // for the differences.
+                    let max_line_length = if lines.len() == 1 {
+                        0
+                    } else {
+                        lines.windows(2)
+                             .map(|w| w[1] - w[0])
+                             .map(|bp| bp.to_usize())
+                             .max()
+                             .unwrap()
+                    };
+
+                    let bytes_per_diff: u8 = match max_line_length {
+                        0 ... 0xFF => 1,
+                        0x100 ... 0xFFFF => 2,
+                        _ => 4
+                    };
+
+                    // Encode the number of bytes used per diff.
+                    bytes_per_diff.encode(s)?;
+
+                    // Encode the first element.
+                    lines[0].encode(s)?;
+
+                    let diff_iter = (&lines[..]).windows(2)
+                                                .map(|w| (w[1] - w[0]));
+
+                    match bytes_per_diff {
+                        1 => for diff in diff_iter { (diff.0 as u8).encode(s)? },
+                        2 => for diff in diff_iter { (diff.0 as u16).encode(s)? },
+                        4 => for diff in diff_iter { diff.0.encode(s)? },
+                        _ => unreachable!()
                     }
+                }
 
-                    Ok(())
-                })
-            };
+                Ok(())
+            })?;
             s.emit_struct_field("multibyte_chars", 4, |s| {
                 (*self.multibyte_chars.borrow()).encode(s)
             })
@@ -587,48 +586,39 @@ impl Decodable for FileMap {
     fn decode<D: Decoder>(d: &mut D) -> Result<FileMap, D::Error> {
 
         d.read_struct("FileMap", 5, |d| {
-            let name: String = try! {
-                d.read_struct_field("name", 0, |d| Decodable::decode(d))
-            };
-            let start_pos: BytePos = try! {
-                d.read_struct_field("start_pos", 1, |d| Decodable::decode(d))
-            };
-            let end_pos: BytePos = try! {
-                d.read_struct_field("end_pos", 2, |d| Decodable::decode(d))
-            };
-            let lines: Vec<BytePos> = try! {
-                d.read_struct_field("lines", 3, |d| {
-                    let num_lines: u32 = try! { Decodable::decode(d) };
-                    let mut lines = Vec::with_capacity(num_lines as usize);
+            let name: String = d.read_struct_field("name", 0, |d| Decodable::decode(d))?;
+            let start_pos: BytePos = d.read_struct_field("start_pos", 1, |d| Decodable::decode(d))?;
+            let end_pos: BytePos = d.read_struct_field("end_pos", 2, |d| Decodable::decode(d))?;
+            let lines: Vec<BytePos> = d.read_struct_field("lines", 3, |d| {
+                let num_lines: u32 = Decodable::decode(d)?;
+                let mut lines = Vec::with_capacity(num_lines as usize);
+
+                if num_lines > 0 {
+                    // Read the number of bytes used per diff.
+                    let bytes_per_diff: u8 = Decodable::decode(d)?;
+
+                    // Read the first element.
+                    let mut line_start: BytePos = Decodable::decode(d)?;
+                    lines.push(line_start);
+
+                    for _ in 1..num_lines {
+                        let diff = match bytes_per_diff {
+                            1 => d.read_u8()? as u32,
+                            2 => d.read_u16()? as u32,
+                            4 => d.read_u32()?,
+                            _ => unreachable!()
+                        };
 
-                    if num_lines > 0 {
-                        // Read the number of bytes used per diff.
-                        let bytes_per_diff: u8 = try! { Decodable::decode(d) };
+                        line_start = line_start + BytePos(diff);
 
-                        // Read the first element.
-                        let mut line_start: BytePos = try! { Decodable::decode(d) };
                         lines.push(line_start);
-
-                        for _ in 1..num_lines {
-                            let diff = match bytes_per_diff {
-                                1 => try! { d.read_u8() } as u32,
-                                2 => try! { d.read_u16() } as u32,
-                                4 => try! { d.read_u32() },
-                                _ => unreachable!()
-                            };
-
-                            line_start = line_start + BytePos(diff);
-
-                            lines.push(line_start);
-                        }
                     }
+                }
 
-                    Ok(lines)
-                })
-            };
-            let multibyte_chars: Vec<MultiByteChar> = try! {
-                d.read_struct_field("multibyte_chars", 4, |d| Decodable::decode(d))
-            };
+                Ok(lines)
+            })?;
+            let multibyte_chars: Vec<MultiByteChar> =
+                d.read_struct_field("multibyte_chars", 4, |d| Decodable::decode(d))?;
             Ok(FileMap {
                 name: name,
                 start_pos: start_pos,
@@ -730,7 +720,7 @@ impl FileLoader for RealFileLoader {
 
     fn read_file(&self, path: &Path) -> io::Result<String> {
         let mut src = String::new();
-        try!(try!(fs::File::open(path)).read_to_string(&mut src));
+        fs::File::open(path)?.read_to_string(&mut src)?;
         Ok(src)
     }
 }
@@ -767,7 +757,7 @@ impl CodeMap {
     }
 
     pub fn load_file(&self, path: &Path) -> io::Result<Rc<FileMap>> {
-        let src = try!(self.file_loader.read_file(path));
+        let src = self.file_loader.read_file(path)?;
         Ok(self.new_filemap(path.to_str().unwrap().to_string(), src))
     }
 
@@ -1314,6 +1304,31 @@ impl CodeMap {
         return a;
     }
 
+    /// Check if the backtrace `subtrace` contains `suptrace` as a prefix.
+    pub fn more_specific_trace(&self,
+                              mut subtrace: ExpnId,
+                              suptrace: ExpnId)
+                              -> bool {
+        loop {
+            if subtrace == suptrace {
+                return true;
+            }
+
+            let stop = self.with_expn_info(subtrace, |opt_expn_info| {
+                if let Some(expn_info) = opt_expn_info {
+                    subtrace = expn_info.call_site.expn_id;
+                    false
+                } else {
+                    true
+                }
+            });
+
+            if stop {
+                return false;
+            }
+        }
+    }
+
     pub fn record_expansion(&self, expn_info: ExpnInfo) -> ExpnId {
         let mut expansions = self.expansions.borrow_mut();
         expansions.push(expn_info);
index 9acb1805cdde879e302092efe5ac4989ff45fb2e..4554a280e5f198c1ddf11fc0e7dbf7750e40603e 100644 (file)
@@ -180,12 +180,12 @@ fn fold_struct<F>(cx: &mut Context<F>, vdata: ast::VariantData) -> ast::VariantD
     match vdata {
         ast::VariantData::Struct(fields, id) => {
             ast::VariantData::Struct(fields.into_iter().filter(|m| {
-                (cx.in_cfg)(&m.node.attrs)
+                (cx.in_cfg)(&m.attrs)
             }).collect(), id)
         }
         ast::VariantData::Tuple(fields, id) => {
             ast::VariantData::Tuple(fields.into_iter().filter(|m| {
-                (cx.in_cfg)(&m.node.attrs)
+                (cx.in_cfg)(&m.attrs)
             }).collect(), id)
         }
         ast::VariantData::Unit(id) => ast::VariantData::Unit(id)
@@ -434,7 +434,7 @@ impl<'v, 'a, 'b> visit::Visitor<'v> for StmtExprAttrFeatureVisitor<'a, 'b> {
     }
 
     fn visit_struct_field(&mut self, s: &'v ast::StructField) {
-        if node_survives_cfg(&s.node.attrs, self.config) {
+        if node_survives_cfg(&s.attrs, self.config) {
             visit::walk_struct_field(self, s);
         }
     }
index e988b74cb3d1d3ceedbefc4f179d87718a564069..181b32594f1278d6a24ce365a0243ae277937b51 100644 (file)
@@ -76,11 +76,11 @@ pub fn output_metadata(ecx: &ExtCtxt, prefix: &str, name: &str, err_map: &ErrorM
 {
     // Create the directory to place the file in.
     let metadata_dir = get_metadata_dir(prefix);
-    try!(create_dir_all(&metadata_dir));
+    create_dir_all(&metadata_dir)?;
 
     // Open the metadata file.
     let metadata_path = get_metadata_path(metadata_dir, name);
-    let mut metadata_file = try!(File::create(&metadata_path));
+    let mut metadata_file = File::create(&metadata_path)?;
 
     // Construct a serializable map.
     let json_map = err_map.iter().map(|(k, &ErrorInfo { description, use_site })| {
@@ -95,7 +95,7 @@ pub fn output_metadata(ecx: &ExtCtxt, prefix: &str, name: &str, err_map: &ErrorM
     // Write the data to the file, deleting it if the write fails.
     let result = write!(&mut metadata_file, "{}", as_json(&json_map));
     if result.is_err() {
-        try!(remove_file(&metadata_path));
+        remove_file(&metadata_path)?;
     }
-    Ok(try!(result))
+    Ok(result?)
 }
index 4272f281edb4405514d33535051f7fac3d6a929b..61fdc8453d8fdf63853c7cce40855ceb077ba1cc 100644 (file)
@@ -184,20 +184,20 @@ impl EmitterWriter {
             self.cm.span_to_string(bounds)
         };
 
-        try!(print_diagnostic(&mut self.dst, &ss[..], lvl, msg, code));
+        print_diagnostic(&mut self.dst, &ss[..], lvl, msg, code)?;
 
         match *rsp {
             FullSpan(_) => {
-                try!(self.highlight_lines(msp, lvl));
-                try!(self.print_macro_backtrace(bounds));
+                self.highlight_lines(msp, lvl)?;
+                self.print_macro_backtrace(bounds)?;
             }
             EndSpan(_) => {
-                try!(self.end_highlight_lines(msp, lvl));
-                try!(self.print_macro_backtrace(bounds));
+                self.end_highlight_lines(msp, lvl)?;
+                self.print_macro_backtrace(bounds)?;
             }
             Suggestion(ref suggestion) => {
-                try!(self.highlight_suggestion(suggestion));
-                try!(self.print_macro_backtrace(bounds));
+                self.highlight_suggestion(suggestion)?;
+                self.print_macro_backtrace(bounds)?;
             }
             FileLine(..) => {
                 // no source text in this case!
@@ -207,9 +207,9 @@ impl EmitterWriter {
         if let Some(code) = code {
             if let Some(_) = self.registry.as_ref()
                                           .and_then(|registry| registry.find_description(code)) {
-                try!(print_diagnostic(&mut self.dst, &ss[..], Help,
-                                      &format!("run `rustc --explain {}` to see a \
-                                               detailed explanation", code), None));
+                print_diagnostic(&mut self.dst, &ss[..], Help,
+                                 &format!("run `rustc --explain {}` to see a \
+                                           detailed explanation", code), None)?;
             }
         }
         Ok(())
@@ -233,14 +233,14 @@ impl EmitterWriter {
         // snippets from the actual error being reported.
         let mut lines = complete.lines();
         for line in lines.by_ref().take(MAX_HIGHLIGHT_LINES) {
-            try!(write!(&mut self.dst, "{0}:{1:2$} {3}\n",
-                        fm.name, "", max_digits, line));
+            write!(&mut self.dst, "{0}:{1:2$} {3}\n",
+                   fm.name, "", max_digits, line)?;
         }
 
         // if we elided some lines, add an ellipsis
         if let Some(_) = lines.next() {
-            try!(write!(&mut self.dst, "{0:1$} {0:2$} ...\n",
-                        "", fm.name.len(), max_digits));
+            write!(&mut self.dst, "{0:1$} {0:2$} ...\n",
+                   "", fm.name.len(), max_digits)?;
         }
 
         Ok(())
@@ -254,7 +254,7 @@ impl EmitterWriter {
         let lines = match self.cm.span_to_lines(msp.to_span_bounds()) {
             Ok(lines) => lines,
             Err(_) => {
-                try!(write!(&mut self.dst, "(internal compiler error: unprintable span)\n"));
+                write!(&mut self.dst, "(internal compiler error: unprintable span)\n")?;
                 return Ok(());
             }
         };
@@ -418,26 +418,26 @@ impl EmitterWriter {
 
             // If we elided something put an ellipsis.
             if prev_line_index != line.line_index.wrapping_sub(1) && !overflowed {
-                try!(write!(&mut self.dst, "{0:1$}...\n", "", skip));
+                write!(&mut self.dst, "{0:1$}...\n", "", skip)?;
             }
 
             // Print offending code-line
             remaining_err_lines -= 1;
-            try!(write!(&mut self.dst, "{}:{:>width$} {}\n",
-                        fm.name,
-                        line.line_index + 1,
-                        cur_line_str,
-                        width=digits));
+            write!(&mut self.dst, "{}:{:>width$} {}\n",
+                   fm.name,
+                   line.line_index + 1,
+                   cur_line_str,
+                   width=digits)?;
 
             if s.len() > skip {
                 // Render the spans we assembled previously (if any).
-                try!(println_maybe_styled!(&mut self.dst, term::Attr::ForegroundColor(lvl.color()),
-                                           "{}", s));
+                println_maybe_styled!(&mut self.dst, term::Attr::ForegroundColor(lvl.color()),
+                                      "{}", s)?;
             }
 
             if !overflowed_buf.is_empty() {
                 // Print code-lines trailing the rendered spans (when a span overflows)
-                try!(write!(&mut self.dst, "{}", &overflowed_buf));
+                write!(&mut self.dst, "{}", &overflowed_buf)?;
                 overflowed_buf.clear();
             } else {
                 prev_line_index = line.line_index;
@@ -446,7 +446,7 @@ impl EmitterWriter {
 
         // If we elided something, put an ellipsis.
         if lines.next().is_some() {
-            try!(write!(&mut self.dst, "{0:1$}...\n", "", skip));
+            write!(&mut self.dst, "{0:1$}...\n", "", skip)?;
         }
         Ok(())
     }
@@ -465,7 +465,7 @@ impl EmitterWriter {
         let lines = match self.cm.span_to_lines(msp.to_span_bounds()) {
             Ok(lines) => lines,
             Err(_) => {
-                try!(write!(&mut self.dst, "(internal compiler error: unprintable span)\n"));
+                write!(&mut self.dst, "(internal compiler error: unprintable span)\n")?;
                 return Ok(());
             }
         };
@@ -556,18 +556,18 @@ impl EmitterWriter {
 
             if prev_line_index != line.line_index.wrapping_sub(1) {
                 // If we elided something, put an ellipsis.
-                try!(write!(&mut self.dst, "{0:1$}...\n", "", skip));
+                write!(&mut self.dst, "{0:1$}...\n", "", skip)?;
             }
 
             // Print offending code-lines
-            try!(write!(&mut self.dst, "{}:{:>width$} {}\n", fm.name,
-                        line.line_index + 1, line_str, width=digits));
+            write!(&mut self.dst, "{}:{:>width$} {}\n", fm.name,
+                   line.line_index + 1, line_str, width=digits)?;
             remaining_err_lines -= 1;
 
             if s.len() > skip {
                 // Render the spans we assembled previously (if any)
-                try!(println_maybe_styled!(&mut self.dst, term::Attr::ForegroundColor(lvl.color()),
-                                           "{}", s));
+                println_maybe_styled!(&mut self.dst, term::Attr::ForegroundColor(lvl.color()),
+                                      "{}", s)?;
             }
             prev_line_index = line.line_index;
         }
@@ -612,7 +612,7 @@ impl EmitterWriter {
                 }
 
                 let snippet = self.cm.span_to_string(span);
-                try!(print_diagnostic(&mut self.dst, &snippet, Note, &diag_string, None));
+                print_diagnostic(&mut self.dst, &snippet, Note, &diag_string, None)?;
             }
             last_span = span;
         }
@@ -638,18 +638,18 @@ fn print_diagnostic(dst: &mut Destination,
                     code: Option<&str>)
                     -> io::Result<()> {
     if !topic.is_empty() {
-        try!(write!(dst, "{} ", topic));
+        write!(dst, "{} ", topic)?;
     }
 
-    try!(print_maybe_styled!(dst, term::Attr::ForegroundColor(lvl.color()),
-                             "{}: ", lvl.to_string()));
-    try!(print_maybe_styled!(dst, term::Attr::Bold, "{}", msg));
+    print_maybe_styled!(dst, term::Attr::ForegroundColor(lvl.color()),
+                        "{}: ", lvl.to_string())?;
+    print_maybe_styled!(dst, term::Attr::Bold, "{}", msg)?;
 
     if let Some(code) = code {
         let style = term::Attr::ForegroundColor(term::color::BRIGHT_MAGENTA);
-        try!(print_maybe_styled!(dst, style, " [{}]", code.clone()));
+        print_maybe_styled!(dst, style, " [{}]", code.clone())?;
     }
-    try!(write!(dst, "\n"));
+    write!(dst, "\n")?;
     Ok(())
 }
 
@@ -696,7 +696,7 @@ impl Destination {
                           -> io::Result<()> {
         match *self {
             Terminal(ref mut t) => {
-                try!(t.attr(color));
+                t.attr(color)?;
                 // If `msg` ends in a newline, we need to reset the color before
                 // the newline. We're making the assumption that we end up writing
                 // to a `LineBufferedWriter`, which means that emitting the reset
@@ -710,8 +710,8 @@ impl Destination {
                 // once, which still leaves the opportunity for interleaved output
                 // to be miscolored. We assume this is rare enough that we don't
                 // have to worry about it.
-                try!(t.write_fmt(args));
-                try!(t.reset());
+                t.write_fmt(args)?;
+                t.reset()?;
                 if print_newline_at_end {
                     t.write_all(b"\n")
                 } else {
@@ -719,7 +719,7 @@ impl Destination {
                 }
             }
             Raw(ref mut w) => {
-                try!(w.write_fmt(args));
+                w.write_fmt(args)?;
                 if print_newline_at_end {
                     w.write_all(b"\n")
                 } else {
index 5bb5f4757e013ad729d8cd389687e560254784d1..f369582bc5c30f674c5a8fe78462ab9345a520d0 100644 (file)
@@ -20,7 +20,7 @@
 // FIXME spec the JSON output properly.
 
 
-use codemap::{MultiSpan, CodeMap};
+use codemap::{self, Span, MultiSpan, CodeMap};
 use diagnostics::registry::Registry;
 use errors::{Level, DiagnosticBuilder, SubDiagnostic, RenderSpan, CodeSuggestion};
 use errors::emitter::Emitter;
@@ -99,6 +99,16 @@ struct DiagnosticSpan {
     /// 1-based, character offset.
     column_start: usize,
     column_end: usize,
+    /// Source text from the start of line_start to the end of line_end.
+    text: Vec<DiagnosticSpanLine>,
+}
+
+#[derive(RustcEncodable)]
+struct DiagnosticSpanLine {
+    text: String,
+    /// 1-based, character offset in self.text.
+    highlight_start: usize,
+    highlight_end: usize,
 }
 
 #[derive(RustcEncodable)]
@@ -180,14 +190,15 @@ impl DiagnosticSpan {
                 line_end: end.line,
                 column_start: start.col.0 + 1,
                 column_end: end.col.0 + 1,
+                text: DiagnosticSpanLine::from_span(span, je),
             }
         }).collect()
     }
 
     fn from_render_span(rsp: &RenderSpan, je: &JsonEmitter) -> Vec<DiagnosticSpan> {
         match *rsp {
-            // FIXME(#30701) handle Suggestion properly
             RenderSpan::FullSpan(ref msp) |
+            // FIXME(#30701) handle Suggestion properly
             RenderSpan::Suggestion(CodeSuggestion { ref msp, .. }) => {
                 DiagnosticSpan::from_multispan(msp, je)
             }
@@ -196,12 +207,13 @@ impl DiagnosticSpan {
                     let end = je.cm.lookup_char_pos(span.hi);
                     DiagnosticSpan {
                         file_name: end.file.name.clone(),
-                        byte_start: span.lo.0,
+                        byte_start: span.hi.0,
                         byte_end: span.hi.0,
-                        line_start: 0,
+                        line_start: end.line,
                         line_end: end.line,
-                        column_start: 0,
+                        column_start: end.col.0 + 1,
                         column_end: end.col.0 + 1,
+                        text: DiagnosticSpanLine::from_span_end(span, je),
                     }
                 }).collect()
             }
@@ -217,6 +229,7 @@ impl DiagnosticSpan {
                         line_end: end.line,
                         column_start: 0,
                         column_end: 0,
+                        text: DiagnosticSpanLine::from_span(span, je),
                     }
                 }).collect()
             }
@@ -224,6 +237,76 @@ impl DiagnosticSpan {
     }
 }
 
+macro_rules! get_lines_for_span {
+    ($span: ident, $je: ident) => {
+        match $je.cm.span_to_lines(*$span) {
+            Ok(lines) => lines,
+            Err(_) => {
+                debug!("unprintable span");
+                return Vec::new();
+            }
+        }
+    }
+}
+
+impl DiagnosticSpanLine {
+    fn line_from_filemap(fm: &codemap::FileMap,
+                         index: usize,
+                         h_start: usize,
+                         h_end: usize)
+                         -> DiagnosticSpanLine {
+        DiagnosticSpanLine {
+            text: fm.get_line(index).unwrap().to_owned(),
+            highlight_start: h_start,
+            highlight_end: h_end,
+        }
+    }
+
+    /// Create a list of DiagnosticSpanLines from span - each line with any part
+    /// of `span` gets a DiagnosticSpanLine, with the highlight indicating the
+    /// `span` within the line.
+    fn from_span(span: &Span, je: &JsonEmitter) -> Vec<DiagnosticSpanLine> {
+        let lines = get_lines_for_span!(span, je);
+
+        let mut result = Vec::new();
+        let fm = &*lines.file;
+
+        for line in &lines.lines {
+            result.push(DiagnosticSpanLine::line_from_filemap(fm,
+                                                              line.line_index,
+                                                              line.start_col.0 + 1,
+                                                              line.end_col.0 + 1));
+        }
+
+        result
+    }
+
+    /// Create a list of DiagnosticSpanLines from span - the result covers all
+    /// of `span`, but the highlight is zero-length and at the end of `span`.
+    fn from_span_end(span: &Span, je: &JsonEmitter) -> Vec<DiagnosticSpanLine> {
+        let lines = get_lines_for_span!(span, je);
+
+        let mut result = Vec::new();
+        let fm = &*lines.file;
+
+        for (i, line) in lines.lines.iter().enumerate() {
+            // Invariant - CodeMap::span_to_lines will not return extra context
+            // lines - the last line returned is the last line of `span`.
+            let highlight = if i == lines.lines.len() - 1 {
+                (line.end_col.0 + 1, line.end_col.0 + 1)
+            } else {
+                (0, 0)
+            };
+            result.push(DiagnosticSpanLine::line_from_filemap(fm,
+                                                              line.line_index,
+                                                              highlight.0,
+                                                              highlight.1));
+        }
+
+        result
+    }
+}
+
 impl DiagnosticCode {
     fn map_opt_string(s: Option<String>, je: &JsonEmitter) -> Option<DiagnosticCode> {
         s.map(|s| {
index 2a9d96fa861b1f27a25498326c3338e41d3ccbcc..c0306b8494be222b174e760f1638946fdb124dea 100644 (file)
@@ -552,6 +552,10 @@ pub struct ExtCtxt<'a> {
 
     pub syntax_env: SyntaxEnv,
     pub recursion_count: usize,
+
+    pub filename: Option<String>,
+    pub mod_path_stack: Vec<InternedString>,
+    pub in_block: bool,
 }
 
 impl<'a> ExtCtxt<'a> {
@@ -570,6 +574,10 @@ impl<'a> ExtCtxt<'a> {
             exported_macros: Vec::new(),
             syntax_env: env,
             recursion_count: 0,
+
+            filename: None,
+            mod_path_stack: Vec::new(),
+            in_block: false,
         }
     }
 
index 0eb42f17f68b41f59e054c1282308ef476a79eb4..a4e5b68277d698799d8be41122f0ac335947854a 100644 (file)
@@ -1007,12 +1007,14 @@ impl<'a> AstBuilder for ExtCtxt<'a> {
 
     fn variant(&self, span: Span, name: Ident, tys: Vec<P<ast::Ty>> ) -> ast::Variant {
         let fields: Vec<_> = tys.into_iter().map(|ty| {
-            Spanned { span: ty.span, node: ast::StructField_ {
+            ast::StructField {
+                span: ty.span,
                 ty: ty,
-                kind: ast::UnnamedField(ast::Visibility::Inherited),
+                ident: None,
+                vis: ast::Visibility::Inherited,
                 attrs: Vec::new(),
                 id: ast::DUMMY_NODE_ID,
-            }}
+            }
         }).collect();
 
         let vdata = if fields.is_empty() {
index e8e042c13217ab3a1c58b9ab2c2acc683ae06007..8550617560df3ed7840259c812b60b6a3c2c9649 100644 (file)
@@ -33,7 +33,7 @@ use visit::Visitor;
 use std_inject;
 
 use std::collections::HashSet;
-
+use std::env;
 
 pub fn expand_expr(e: P<ast::Expr>, fld: &mut MacroExpander) -> P<ast::Expr> {
     let expr_span = e.span;
@@ -1061,6 +1061,7 @@ fn expand_impl_item(ii: ast::ImplItem, fld: &mut MacroExpander)
             ident: ii.ident,
             attrs: ii.attrs,
             vis: ii.vis,
+            defaultness: ii.defaultness,
             node: match ii.node  {
                 ast::ImplItemKind::Method(sig, body) => {
                     let (sig, body) = expand_and_rename_method(sig, body, fld);
@@ -1183,6 +1184,11 @@ impl<'a, 'b> MacroExpander<'a, 'b> {
 }
 
 impl<'a, 'b> Folder for MacroExpander<'a, 'b> {
+    fn fold_crate(&mut self, c: Crate) -> Crate {
+        self.cx.filename = Some(self.cx.parse_sess.codemap().span_to_filename(c.span));
+        noop_fold_crate(c, self)
+    }
+
     fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
         expand_expr(expr, self)
     }
@@ -1192,7 +1198,27 @@ impl<'a, 'b> Folder for MacroExpander<'a, 'b> {
     }
 
     fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
-        expand_item(item, self)
+        use std::mem::replace;
+        let result;
+        if let ast::ItemKind::Mod(ast::Mod { inner, .. }) = item.node {
+            if item.span.contains(inner) {
+                self.push_mod_path(item.ident, &item.attrs);
+                result = expand_item(item, self);
+                self.pop_mod_path();
+            } else {
+                let filename = if inner != codemap::DUMMY_SP {
+                    Some(self.cx.parse_sess.codemap().span_to_filename(inner))
+                } else { None };
+                let orig_filename = replace(&mut self.cx.filename, filename);
+                let orig_mod_path_stack = replace(&mut self.cx.mod_path_stack, Vec::new());
+                result = expand_item(item, self);
+                self.cx.filename = orig_filename;
+                self.cx.mod_path_stack = orig_mod_path_stack;
+            }
+        } else {
+            result = expand_item(item, self);
+        }
+        result
     }
 
     fn fold_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
@@ -1204,7 +1230,10 @@ impl<'a, 'b> Folder for MacroExpander<'a, 'b> {
     }
 
     fn fold_block(&mut self, block: P<Block>) -> P<Block> {
-        expand_block(block, self)
+        let was_in_block = ::std::mem::replace(&mut self.cx.in_block, true);
+        let result = expand_block(block, self);
+        self.cx.in_block = was_in_block;
+        result
     }
 
     fn fold_arm(&mut self, arm: ast::Arm) -> ast::Arm {
@@ -1230,12 +1259,57 @@ impl<'a, 'b> Folder for MacroExpander<'a, 'b> {
     }
 }
 
+impl<'a, 'b> MacroExpander<'a, 'b> {
+    fn push_mod_path(&mut self, id: Ident, attrs: &[ast::Attribute]) {
+        let default_path = id.name.as_str();
+        let file_path = match ::attr::first_attr_value_str_by_name(attrs, "path") {
+            Some(d) => d,
+            None => default_path,
+        };
+        self.cx.mod_path_stack.push(file_path)
+    }
+
+    fn pop_mod_path(&mut self) {
+        self.cx.mod_path_stack.pop().unwrap();
+    }
+}
+
 fn new_span(cx: &ExtCtxt, sp: Span) -> Span {
-    /* this discards information in the case of macro-defining macros */
-    Span {
-        lo: sp.lo,
-        hi: sp.hi,
-        expn_id: cx.backtrace(),
+    debug!("new_span(sp={:?})", sp);
+
+    if cx.codemap().more_specific_trace(sp.expn_id, cx.backtrace()) {
+        // If the span we are looking at has a backtrace that has more
+        // detail than our current backtrace, then we keep that
+        // backtrace.  Honestly, I have no idea if this makes sense,
+        // because I have no idea why we are stripping the backtrace
+        // below. But the reason I made this change is because, in
+        // deriving, we were generating attributes with a specific
+        // backtrace, which was essential for `#[structural_match]` to
+        // be properly supported, but these backtraces were being
+        // stripped and replaced with a null backtrace. Sort of
+        // unclear why this is the case. --nmatsakis
+        debug!("new_span: keeping trace from {:?} because it is more specific",
+               sp.expn_id);
+        sp
+    } else {
+        // This discards information in the case of macro-defining macros.
+        //
+        // The comment above was originally added in
+        // b7ec2488ff2f29681fe28691d20fd2c260a9e454 in Feb 2012. I
+        // *THINK* the reason we are doing this is because we want to
+        // replace the backtrace of the macro contents with the
+        // backtrace that contains the macro use. But it's pretty
+        // unclear to me. --nmatsakis
+        let sp1 = Span {
+            lo: sp.lo,
+            hi: sp.hi,
+            expn_id: cx.backtrace(),
+        };
+        debug!("new_span({:?}) = {:?}", sp, sp1);
+        if sp.expn_id.into_u32() == 0 && env::var_os("NDM").is_some() {
+            panic!("NDM");
+        }
+        sp1
     }
 }
 
@@ -1476,7 +1550,7 @@ mod tests {
         let crate_ast = parse::parse_crate_from_source_str(
             "<test>".to_string(),
             src,
-            Vec::new(), &sess);
+            Vec::new(), &sess).unwrap();
         // should fail:
         let mut gated_cfgs = vec![];
         let ecx = ExtCtxt::new(&sess, vec![], test_ecfg(), &mut gated_cfgs);
@@ -1492,7 +1566,7 @@ mod tests {
         let crate_ast = parse::parse_crate_from_source_str(
             "<test>".to_string(),
             src,
-            Vec::new(), &sess);
+            Vec::new(), &sess).unwrap();
         let mut gated_cfgs = vec![];
         let ecx = ExtCtxt::new(&sess, vec![], test_ecfg(), &mut gated_cfgs);
         expand_crate(ecx, vec![], vec![], crate_ast);
@@ -1506,7 +1580,7 @@ mod tests {
         let crate_ast = parse::parse_crate_from_source_str(
             "<test>".to_string(),
             src,
-            Vec::new(), &sess);
+            Vec::new(), &sess).unwrap();
         let mut gated_cfgs = vec![];
         let ecx = ExtCtxt::new(&sess, vec![], test_ecfg(), &mut gated_cfgs);
         expand_crate(ecx, vec![], vec![], crate_ast);
index d0eaa89e4ae5dc4f143c54e8d89406f9b12cf38e..77aeaf8459aec8b046d2fa95176aa098faa9d27b 100644 (file)
@@ -18,12 +18,12 @@ use parse::token::*;
 use parse::token;
 use ptr::P;
 
-///  Quasiquoting works via token trees.
+/// Quasiquoting works via token trees.
 ///
-///  This is registered as a set of expression syntax extension called quote!
-///  that lifts its argument token-tree to an AST representing the
-///  construction of the same token tree, with token::SubstNt interpreted
-///  as antiquotes (splices).
+/// This is registered as a set of expression syntax extension called quote!
+/// that lifts its argument token-tree to an AST representing the
+/// construction of the same token tree, with token::SubstNt interpreted
+/// as antiquotes (splices).
 
 pub mod rt {
     use ast;
@@ -121,6 +121,12 @@ pub mod rt {
         }
     }
 
+    impl ToTokens for P<ast::ImplItem> {
+        fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
+            vec![TokenTree::Token(self.span, token::Interpolated(token::NtImplItem(self.clone())))]
+        }
+    }
+
     impl ToTokens for ast::TraitItem {
         fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
             vec![TokenTree::Token(self.span,
@@ -319,34 +325,36 @@ pub mod rt {
     }
 
     impl<'a> ExtParseUtils for ExtCtxt<'a> {
-
         fn parse_item(&self, s: String) -> P<ast::Item> {
-            parse::parse_item_from_source_str(
+            panictry!(parse::parse_item_from_source_str(
                 "<quote expansion>".to_string(),
                 s,
                 self.cfg(),
-                self.parse_sess()).expect("parse error")
+                self.parse_sess())).expect("parse error")
         }
 
         fn parse_stmt(&self, s: String) -> ast::Stmt {
-            parse::parse_stmt_from_source_str("<quote expansion>".to_string(),
-                                              s,
-                                              self.cfg(),
-                                              self.parse_sess()).expect("parse error")
+            panictry!(parse::parse_stmt_from_source_str(
+                "<quote expansion>".to_string(),
+                s,
+                self.cfg(),
+                self.parse_sess())).expect("parse error")
         }
 
         fn parse_expr(&self, s: String) -> P<ast::Expr> {
-            parse::parse_expr_from_source_str("<quote expansion>".to_string(),
-                                              s,
-                                              self.cfg(),
-                                              self.parse_sess())
+            panictry!(parse::parse_expr_from_source_str(
+                "<quote expansion>".to_string(),
+                s,
+                self.cfg(),
+                self.parse_sess()))
         }
 
         fn parse_tts(&self, s: String) -> Vec<TokenTree> {
-            parse::parse_tts_from_source_str("<quote expansion>".to_string(),
-                                             s,
-                                             self.cfg(),
-                                             self.parse_sess())
+            panictry!(parse::parse_tts_from_source_str(
+                "<quote expansion>".to_string(),
+                s,
+                self.cfg(),
+                self.parse_sess()))
         }
     }
 }
index c4e1f32a52c235bb43bbb0953ddc6d495a812e1b..4e4c644776a5163200273ee26a93415a77384d72 100644 (file)
@@ -208,12 +208,12 @@ pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
         match *m {
             TokenTree::Sequence(_, ref seq) => {
                 for next_m in &seq.tts {
-                    try!(n_rec(p_s, next_m, res, ret_val, idx))
+                    n_rec(p_s, next_m, res, ret_val, idx)?
                 }
             }
             TokenTree::Delimited(_, ref delim) => {
                 for next_m in &delim.tts {
-                    try!(n_rec(p_s, next_m, res, ret_val, idx));
+                    n_rec(p_s, next_m, res, ret_val, idx)?;
                 }
             }
             TokenTree::Token(sp, MatchNt(bind_name, _, _, _)) => {
index c641c478a6bba6482398a3656346ae5b2f463aa0..77bae4cb3f6c45b99789e9452b139043f051b11f 100644 (file)
@@ -16,7 +16,7 @@ use ext::tt::macro_parser::{Success, Error, Failure};
 use ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
 use ext::tt::macro_parser::parse;
 use parse::lexer::new_tt_reader;
-use parse::parser::Parser;
+use parse::parser::{Parser, Restrictions};
 use parse::token::{self, special_idents, gensym_ident, NtTT, Token};
 use parse::token::Token::*;
 use print;
@@ -195,6 +195,12 @@ fn generic_extension<'cx>(cx: &'cx ExtCtxt,
                                            imported_from,
                                            rhs);
                 let mut p = Parser::new(cx.parse_sess(), cx.cfg(), Box::new(trncbr));
+                p.filename = cx.filename.clone();
+                p.mod_path_stack = cx.mod_path_stack.clone();
+                p.restrictions = match cx.in_block {
+                    true => Restrictions::NO_NONINLINE_MOD,
+                    false => Restrictions::empty(),
+                };
                 p.check_unknown_macro_variable();
                 // Let the context choose how to interpret the result.
                 // Weird, but useful for X-macros.
index 3f2fb2d3d1704296911adb7fc4928c0c3b5cc75f..fc18ef407ab2f9e138f5bb435a793959bf959c47 100644 (file)
@@ -109,6 +109,8 @@ const KNOWN_FEATURES: &'static [(&'static str, &'static str, Option<u32>, Status
     // to bootstrap fix for #5723.
     ("issue_5723_bootstrap", "1.0.0", None, Accepted),
 
+    ("structural_match", "1.8.0", Some(31434), Active),
+
     // A way to temporarily opt out of opt in copy. This will *never* be accepted.
     ("opt_out_copy", "1.0.0", None, Removed),
 
@@ -197,7 +199,7 @@ const KNOWN_FEATURES: &'static [(&'static str, &'static str, Option<u32>, Status
     ("associated_type_defaults", "1.2.0", Some(29661), Active),
 
     // Allows macros to appear in the type position.
-    ("type_macros", "1.3.0", Some(27336), Active),
+    ("type_macros", "1.3.0", Some(27245), Active),
 
     // allow `repr(simd)`, and importing the various simd intrinsics
     ("repr_simd", "1.4.0", Some(27731), Active),
@@ -212,11 +214,14 @@ const KNOWN_FEATURES: &'static [(&'static str, &'static str, Option<u32>, Status
     // rust runtime internal
     ("unwind_attributes", "1.4.0", None, Active),
 
+    // allow the use of `#[naked]` on functions.
+    ("naked_functions", "1.9.0", Some(32408), Active),
+
     // allow empty structs and enum variants with braces
-    ("braced_empty_structs", "1.5.0", Some(29720), Accepted),
+    ("braced_empty_structs", "1.8.0", Some(29720), Accepted),
 
     // allow overloading augmented assignment operations like `a += b`
-    ("augmented_assignments", "1.5.0", Some(28235), Accepted),
+    ("augmented_assignments", "1.8.0", Some(28235), Accepted),
 
     // allow `#[no_debug]`
     ("no_debug", "1.5.0", Some(29721), Active),
@@ -232,7 +237,7 @@ const KNOWN_FEATURES: &'static [(&'static str, &'static str, Option<u32>, Status
     ("stmt_expr_attributes", "1.6.0", Some(15701), Active),
 
     // Allows `#[deprecated]` attribute
-    ("deprecated", "1.6.0", Some(29935), Active),
+    ("deprecated", "1.9.0", Some(29935), Accepted),
 
     // allow using type ascription in expressions
     ("type_ascription", "1.6.0", Some(23416), Active),
@@ -241,7 +246,16 @@ const KNOWN_FEATURES: &'static [(&'static str, &'static str, Option<u32>, Status
     ("cfg_target_thread_local", "1.7.0", Some(29594), Active),
 
     // rustc internal
-    ("abi_vectorcall", "1.7.0", None, Active)
+    ("abi_vectorcall", "1.7.0", None, Active),
+
+    // a...b and ...b
+    ("inclusive_range_syntax", "1.7.0", Some(28237), Active),
+
+    // `expr?`
+    ("question_mark", "1.9.0", Some(31436), Active),
+
+    // impl specialization (RFC 1210)
+    ("specialization", "1.7.0", Some(31844), Active),
 ];
 // (changing above list without updating src/doc/reference.md makes @cmr sad)
 
@@ -292,6 +306,11 @@ pub const KNOWN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGat
     ("link_args", Normal, Ungated),
     ("macro_escape", Normal, Ungated),
 
+    // RFC #1445.
+    ("structural_match", Whitelisted, Gated("structural_match",
+                                            "the semantics of constant patterns is \
+                                             not yet settled")),
+
     // Not used any more, but we can't feature gate it
     ("no_stack_check", Normal, Ungated),
 
@@ -337,14 +356,30 @@ pub const KNOWN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGat
                                        "the `#[rustc_if_this_changed]` attribute \
                                         is just used for rustc unit tests \
                                         and will never be stable")),
+    ("rustc_dirty", Whitelisted, Gated("rustc_attrs",
+                                       "the `#[rustc_dirty]` attribute \
+                                        is just used for rustc unit tests \
+                                        and will never be stable")),
+    ("rustc_clean", Whitelisted, Gated("rustc_attrs",
+                                       "the `#[rustc_clean]` attribute \
+                                        is just used for rustc unit tests \
+                                        and will never be stable")),
+    ("rustc_symbol_name", Whitelisted, Gated("rustc_attrs",
+                                       "internal rustc attributes will never be stable")),
+    ("rustc_item_path", Whitelisted, Gated("rustc_attrs",
+                                       "internal rustc attributes will never be stable")),
     ("rustc_move_fragments", Normal, Gated("rustc_attrs",
                                            "the `#[rustc_move_fragments]` attribute \
                                             is just used for rustc unit tests \
                                             and will never be stable")),
-    ("rustc_mir", Normal, Gated("rustc_attrs",
-                                "the `#[rustc_mir]` attribute \
-                                 is just used for rustc unit tests \
-                                 and will never be stable")),
+    ("rustc_mir", Whitelisted, Gated("rustc_attrs",
+                                     "the `#[rustc_mir]` attribute \
+                                      is just used for rustc unit tests \
+                                      and will never be stable")),
+    ("rustc_no_mir", Whitelisted, Gated("rustc_attrs",
+                                        "the `#[rustc_no_mir]` attribute \
+                                         is just used to make tests pass \
+                                         and will never be stable")),
 
     ("allow_internal_unstable", Normal, Gated("allow_internal_unstable",
                                               EXPLAIN_ALLOW_INTERNAL_UNSTABLE)),
@@ -363,6 +398,9 @@ pub const KNOWN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGat
     // FIXME: #14406 these are processed in trans, which happens after the
     // lint pass
     ("cold", Whitelisted, Ungated),
+    ("naked", Whitelisted, Gated("naked_functions",
+                                 "the `#[naked]` attribute \
+                                  is an experimental feature")),
     ("export_name", Whitelisted, Ungated),
     ("inline", Whitelisted, Ungated),
     ("link", Whitelisted, Ungated),
@@ -397,7 +435,7 @@ pub const KNOWN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGat
     ("must_use", Whitelisted, Ungated),
     ("stable", Whitelisted, Ungated),
     ("unstable", Whitelisted, Ungated),
-    ("deprecated", Normal, Gated("deprecated", "`#[deprecated]` attribute is unstable")),
+    ("deprecated", Normal, Ungated),
 
     ("rustc_paren_sugar", Normal, Gated("unboxed_closures",
                                         "unboxed_closures are still evolving")),
@@ -549,6 +587,7 @@ pub struct Features {
     pub allow_placement_in: bool,
     pub allow_box: bool,
     pub allow_pushpop_unsafe: bool,
+    pub allow_inclusive_range: bool,
     pub simd_ffi: bool,
     pub unmarked_api: bool,
     /// spans of #![feature] attrs for stable language features. for error reporting
@@ -559,6 +598,7 @@ pub struct Features {
     pub const_indexing: bool,
     pub static_recursion: bool,
     pub default_type_parameter_fallback: bool,
+    pub rustc_attrs: bool,
     pub type_macros: bool,
     pub cfg_target_feature: bool,
     pub cfg_target_vendor: bool,
@@ -566,6 +606,8 @@ pub struct Features {
     pub staged_api: bool,
     pub stmt_expr_attributes: bool,
     pub deprecated: bool,
+    pub question_mark: bool,
+    pub specialization: bool,
 }
 
 impl Features {
@@ -583,6 +625,7 @@ impl Features {
             allow_placement_in: false,
             allow_box: false,
             allow_pushpop_unsafe: false,
+            allow_inclusive_range: false,
             simd_ffi: false,
             unmarked_api: false,
             declared_stable_lang_features: Vec::new(),
@@ -591,6 +634,7 @@ impl Features {
             const_indexing: false,
             static_recursion: false,
             default_type_parameter_fallback: false,
+            rustc_attrs: false,
             type_macros: false,
             cfg_target_feature: false,
             cfg_target_vendor: false,
@@ -598,6 +642,8 @@ impl Features {
             staged_api: false,
             stmt_expr_attributes: false,
             deprecated: false,
+            question_mark: false,
+            specialization: false,
         }
     }
 }
@@ -651,7 +697,7 @@ impl<'a> Context<'a> {
     fn gate_feature(&self, feature: &str, span: Span, explain: &str) {
         let has_feature = self.has_feature(feature);
         debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", feature, span, has_feature);
-        if !has_feature {
+        if !has_feature && !self.cm.span_allows_unstable(span) {
             emit_feature_err(self.span_handler, feature, span, GateIssue::Language, explain);
         }
     }
@@ -991,6 +1037,14 @@ impl<'a, 'v> Visitor<'v> for PostExpansionVisitor<'a> {
                 self.gate_feature("type_ascription", e.span,
                                   "type ascription is experimental");
             }
+            ast::ExprKind::Range(_, _, ast::RangeLimits::Closed) => {
+                self.gate_feature("inclusive_range_syntax",
+                                  e.span,
+                                  "inclusive range syntax is experimental");
+            }
+            ast::ExprKind::Try(..) => {
+                self.gate_feature("question_mark", e.span, "the `?` operator is not stable");
+            }
             _ => {}
         }
         visit::walk_expr(self, e);
@@ -1084,6 +1138,12 @@ impl<'a, 'v> Visitor<'v> for PostExpansionVisitor<'a> {
     }
 
     fn visit_impl_item(&mut self, ii: &'v ast::ImplItem) {
+        if ii.defaultness == ast::Defaultness::Default {
+            self.gate_feature("specialization",
+                              ii.span,
+                              "specialization is unstable");
+        }
+
         match ii.node {
             ast::ImplItemKind::Const(..) => {
                 self.gate_feature("associated_consts",
@@ -1177,6 +1237,7 @@ fn check_crate_inner<F>(cm: &CodeMap, span_handler: &Handler,
         allow_placement_in: cx.has_feature("placement_in_syntax"),
         allow_box: cx.has_feature("box_syntax"),
         allow_pushpop_unsafe: cx.has_feature("pushpop_unsafe"),
+        allow_inclusive_range: cx.has_feature("inclusive_range_syntax"),
         simd_ffi: cx.has_feature("simd_ffi"),
         unmarked_api: cx.has_feature("unmarked_api"),
         declared_stable_lang_features: accepted_features,
@@ -1185,6 +1246,7 @@ fn check_crate_inner<F>(cm: &CodeMap, span_handler: &Handler,
         const_indexing: cx.has_feature("const_indexing"),
         static_recursion: cx.has_feature("static_recursion"),
         default_type_parameter_fallback: cx.has_feature("default_type_parameter_fallback"),
+        rustc_attrs: cx.has_feature("rustc_attrs"),
         type_macros: cx.has_feature("type_macros"),
         cfg_target_feature: cx.has_feature("cfg_target_feature"),
         cfg_target_vendor: cx.has_feature("cfg_target_vendor"),
@@ -1192,6 +1254,8 @@ fn check_crate_inner<F>(cm: &CodeMap, span_handler: &Handler,
         staged_api: cx.has_feature("staged_api"),
         stmt_expr_attributes: cx.has_feature("stmt_expr_attributes"),
         deprecated: cx.has_feature("deprecated"),
+        question_mark: cx.has_feature("question_mark"),
+        specialization: cx.has_feature("specialization"),
     }
 }
 
index d75e8f796ae891143d18c56ccfe6c4eb5cf0d996..5d378763bef632a8fa656eb0899db5f2efc4ea86 100644 (file)
@@ -21,7 +21,6 @@
 use ast::*;
 use ast;
 use attr::{ThinAttributes, ThinAttributesExt};
-use ast_util;
 use codemap::{respan, Span, Spanned};
 use parse::token;
 use ptr::P;
@@ -288,6 +287,10 @@ pub trait Folder : Sized {
         noop_fold_where_predicate(where_predicate, self)
     }
 
+    fn fold_vis(&mut self, vis: Visibility) -> Visibility {
+        noop_fold_vis(vis, self)
+    }
+
     fn new_id(&mut self, i: NodeId) -> NodeId {
         i
     }
@@ -843,15 +846,13 @@ pub fn noop_fold_poly_trait_ref<T: Folder>(p: PolyTraitRef, fld: &mut T) -> Poly
 }
 
 pub fn noop_fold_struct_field<T: Folder>(f: StructField, fld: &mut T) -> StructField {
-    let StructField {node: StructField_ {id, kind, ty, attrs}, span} = f;
-    Spanned {
-        node: StructField_ {
-            id: fld.new_id(id),
-            kind: kind,
-            ty: fld.fold_ty(ty),
-            attrs: fold_attrs(attrs, fld),
-        },
-        span: fld.new_span(span)
+    StructField {
+        span: fld.new_span(f.span),
+        id: fld.new_id(f.id),
+        ident: f.ident.map(|ident| fld.fold_ident(ident)),
+        vis: f.vis,
+        ty: fld.fold_ty(f.ty),
+        attrs: fold_attrs(f.attrs, fld),
     }
 }
 
@@ -992,7 +993,8 @@ pub fn noop_fold_impl_item<T: Folder>(i: ImplItem, folder: &mut T)
         id: folder.new_id(i.id),
         ident: folder.fold_ident(i.ident),
         attrs: fold_attrs(i.attrs, folder),
-        vis: i.vis,
+        vis: folder.fold_vis(i.vis),
+        defaultness: i.defaultness,
         node: match i.node  {
             ast::ImplItemKind::Const(ty, expr) => {
                 ast::ImplItemKind::Const(folder.fold_ty(ty), folder.fold_expr(expr))
@@ -1068,20 +1070,13 @@ pub fn noop_fold_item_simple<T: Folder>(Item {id, ident, attrs, node, vis, span}
                                         folder: &mut T) -> Item {
     let id = folder.new_id(id);
     let node = folder.fold_item_kind(node);
-    let ident = match node {
-        // The node may have changed, recompute the "pretty" impl name.
-        ItemKind::Impl(_, _, _, ref maybe_trait, ref ty, _) => {
-            ast_util::impl_pretty_name(maybe_trait, Some(&ty))
-        }
-        _ => ident
-    };
 
     Item {
         id: id,
         ident: folder.fold_ident(ident),
         attrs: fold_attrs(attrs, folder),
         node: node,
-        vis: vis,
+        vis: folder.fold_vis(vis),
         span: folder.new_span(span)
     }
 }
@@ -1099,7 +1094,7 @@ pub fn noop_fold_foreign_item<T: Folder>(ni: ForeignItem, folder: &mut T) -> For
                 ForeignItemKind::Static(folder.fold_ty(t), m)
             }
         },
-        vis: ni.vis,
+        vis: folder.fold_vis(ni.vis),
         span: folder.new_span(ni.span)
     }
 }
@@ -1273,9 +1268,10 @@ pub fn noop_fold_expr<T: Folder>(Expr {id, node, span, attrs}: Expr, folder: &mu
             ExprKind::Index(el, er) => {
                 ExprKind::Index(folder.fold_expr(el), folder.fold_expr(er))
             }
-            ExprKind::Range(e1, e2) => {
+            ExprKind::Range(e1, e2, lim) => {
                 ExprKind::Range(e1.map(|x| folder.fold_expr(x)),
-                          e2.map(|x| folder.fold_expr(x)))
+                                e2.map(|x| folder.fold_expr(x)),
+                                lim)
             }
             ExprKind::Path(qself, path) => {
                 let qself = qself.map(|QSelf { ty, position }| {
@@ -1331,7 +1327,8 @@ pub fn noop_fold_expr<T: Folder>(Expr {id, node, span, attrs}: Expr, folder: &mu
                         fields.move_map(|x| folder.fold_field(x)),
                         maybe_expr.map(|x| folder.fold_expr(x)))
             },
-            ExprKind::Paren(ex) => ExprKind::Paren(folder.fold_expr(ex))
+            ExprKind::Paren(ex) => ExprKind::Paren(folder.fold_expr(ex)),
+            ExprKind::Try(ex) => ExprKind::Try(folder.fold_expr(ex)),
         },
         span: folder.new_span(span),
         attrs: attrs.map_thin_attrs(|v| fold_attrs(v, folder)),
@@ -1388,6 +1385,16 @@ pub fn noop_fold_stmt<T: Folder>(Spanned {node, span}: Stmt, folder: &mut T)
     }
 }
 
+pub fn noop_fold_vis<T: Folder>(vis: Visibility, folder: &mut T) -> Visibility {
+    match vis {
+        Visibility::Restricted { path, id } => Visibility::Restricted {
+            path: path.map(|path| folder.fold_path(path)),
+            id: folder.new_id(id)
+        },
+        _ => vis,
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use std::io;
index d7d3e576a613d5fe9de131cf36624cc4b597b8dd..ca7e5729c0b7a176dd83ac415e85e2acf08dd07a 100644 (file)
 #![feature(libc)]
 #![feature(rustc_private)]
 #![feature(staged_api)]
-#![feature(str_char)]
 #![feature(str_escape)]
 #![feature(unicode)]
+#![feature(question_mark)]
 
 extern crate serialize;
 extern crate term;
 extern crate libc;
 #[macro_use] extern crate log;
 #[macro_use] #[no_link] extern crate rustc_bitflags;
+extern crate rustc_unicode;
 
 extern crate serialize as rustc_serialize; // used by deriving
 
@@ -89,7 +90,6 @@ pub mod syntax {
 
 pub mod abi;
 pub mod ast;
-pub mod ast_util;
 pub mod attr;
 pub mod codemap;
 pub mod config;
index 0950d6082e7e2bb5d8cbdb6a61f617b345e28215..b8e320e36e9b4883f9a68a530d36ba83189e3a7d 100644 (file)
@@ -25,7 +25,7 @@ impl<'a> Parser<'a> {
             debug!("parse_outer_attributes: self.token={:?}", self.token);
             match self.token {
                 token::Pound => {
-                    attrs.push(try!(self.parse_attribute(false)));
+                    attrs.push(self.parse_attribute(false)?);
                 }
                 token::DocComment(s) => {
                     let attr = ::attr::mk_sugared_doc_attr(
@@ -79,10 +79,10 @@ impl<'a> Parser<'a> {
                     ast::AttrStyle::Outer
                 };
 
-                try!(self.expect(&token::OpenDelim(token::Bracket)));
-                let meta_item = try!(self.parse_meta_item());
+                self.expect(&token::OpenDelim(token::Bracket))?;
+                let meta_item = self.parse_meta_item()?;
                 let hi = self.span.hi;
-                try!(self.expect(&token::CloseDelim(token::Bracket)));
+                self.expect(&token::CloseDelim(token::Bracket))?;
 
                 (mk_sp(lo, hi), meta_item, style)
             }
@@ -126,7 +126,7 @@ impl<'a> Parser<'a> {
                         break;
                     }
 
-                    let attr = try!(self.parse_attribute(true));
+                    let attr = self.parse_attribute(true)?;
                     assert!(attr.node.style == ast::AttrStyle::Inner);
                     attrs.push(attr);
                 }
@@ -166,12 +166,12 @@ impl<'a> Parser<'a> {
         }
 
         let lo = self.span.lo;
-        let ident = try!(self.parse_ident());
+        let ident = self.parse_ident()?;
         let name = self.id_to_interned_str(ident);
         match self.token {
             token::Eq => {
                 self.bump();
-                let lit = try!(self.parse_lit());
+                let lit = self.parse_lit()?;
                 // FIXME #623 Non-string meta items are not serialized correctly;
                 // just forbid them for now
                 match lit.node {
@@ -185,7 +185,7 @@ impl<'a> Parser<'a> {
                 Ok(P(spanned(lo, hi, ast::MetaItemKind::NameValue(name, lit))))
             }
             token::OpenDelim(token::Paren) => {
-                let inner_items = try!(self.parse_meta_seq());
+                let inner_items = self.parse_meta_seq()?;
                 let hi = self.span.hi;
                 Ok(P(spanned(lo, hi, ast::MetaItemKind::List(name, inner_items))))
             }
index e336c98f03ca0c7d2795f2407776c12cca94c8b1..629edced804f51f015f88e89989caa5fc294a485 100644 (file)
@@ -15,7 +15,7 @@ use codemap::{BytePos, CharPos, CodeMap, Pos};
 use errors;
 use parse::lexer::is_block_doc_comment;
 use parse::lexer::{StringReader, TokenAndSpan};
-use parse::lexer::{is_whitespace, Reader};
+use parse::lexer::{is_pattern_whitespace, Reader};
 use parse::lexer;
 use print::pprust;
 use str::char_at;
@@ -153,7 +153,7 @@ fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) {
 }
 
 fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader, comments: &mut Vec<Comment>) {
-    while is_whitespace(rdr.curr) && !rdr.is_eof() {
+    while is_pattern_whitespace(rdr.curr) && !rdr.is_eof() {
         if rdr.col == CharPos(0) && rdr.curr_is('\n') {
             push_blank_line_comment(rdr, &mut *comments);
         }
index 3183dfbd954f57dff4642fe8dceffb231e7c39f9..a5cb5c7117e212d64bd82e32fc9a2d426161de74 100644 (file)
@@ -16,6 +16,7 @@ use ext::tt::transcribe::tt_next_token;
 use parse::token::str_to_ident;
 use parse::token;
 use str::char_at;
+use rustc_unicode::property::Pattern_White_Space;
 
 use std::borrow::Cow;
 use std::char;
@@ -546,10 +547,10 @@ impl<'a> StringReader<'a> {
                 let c = self.scan_comment();
                 debug!("scanning a comment {:?}", c);
                 c
-            }
-            c if is_whitespace(Some(c)) => {
+            },
+            c if is_pattern_whitespace(Some(c)) => {
                 let start_bpos = self.last_pos;
-                while is_whitespace(self.curr) {
+                while is_pattern_whitespace(self.curr) {
                     self.bump();
                 }
                 let c = Some(TokenAndSpan {
@@ -1440,7 +1441,7 @@ impl<'a> StringReader<'a> {
     }
 
     fn consume_whitespace(&mut self) {
-        while is_whitespace(self.curr) && !self.is_eof() {
+        while is_pattern_whitespace(self.curr) && !self.is_eof() {
             self.bump();
         }
     }
@@ -1465,7 +1466,7 @@ impl<'a> StringReader<'a> {
     }
 
     fn consume_non_eol_whitespace(&mut self) {
-        while is_whitespace(self.curr) && !self.curr_is('\n') && !self.is_eof() {
+        while is_pattern_whitespace(self.curr) && !self.curr_is('\n') && !self.is_eof() {
             self.bump();
         }
     }
@@ -1596,11 +1597,10 @@ impl<'a> StringReader<'a> {
     }
 }
 
-pub fn is_whitespace(c: Option<char>) -> bool {
-    match c.unwrap_or('\x00') { // None can be null for now... it's not whitespace
-        ' ' | '\n' | '\t' | '\r' => true,
-        _ => false,
-    }
+// This tests the character for the unicode property 'PATTERN_WHITE_SPACE' which
+// is guaranteed to be forward compatible. http://unicode.org/reports/tr31/#R3
+pub fn is_pattern_whitespace(c: Option<char>) -> bool {
+    c.map_or(false, Pattern_White_Space)
 }
 
 fn in_range(c: Option<char>, lo: char, hi: char) -> bool {
index 1ec2479058cc8d22d4deb5105b2b7a6d6840a2f7..ea5d6739e6dd3db8561699ebbc13d908dadb098e 100644 (file)
@@ -71,95 +71,97 @@ impl ParseSess {
 // uses a HOF to parse anything, and <source> includes file and
 // source_str.
 
-pub fn parse_crate_from_file(
-    input: &Path,
-    cfg: ast::CrateConfig,
-    sess: &ParseSess
-) -> ast::Crate {
+pub fn parse_crate_from_file<'a>(input: &Path,
+                                 cfg: ast::CrateConfig,
+                                 sess: &'a ParseSess)
+                                 -> PResult<'a, ast::Crate> {
     let mut parser = new_parser_from_file(sess, cfg, input);
-    abort_if_errors(parser.parse_crate_mod(), &parser)
+    parser.parse_crate_mod()
 }
 
-pub fn parse_crate_attrs_from_file(
-    input: &Path,
-    cfg: ast::CrateConfig,
-    sess: &ParseSess
-) -> Vec<ast::Attribute> {
+pub fn parse_crate_attrs_from_file<'a>(input: &Path,
+                                       cfg: ast::CrateConfig,
+                                       sess: &'a ParseSess)
+                                       -> PResult<'a, Vec<ast::Attribute>> {
     let mut parser = new_parser_from_file(sess, cfg, input);
-    abort_if_errors(parser.parse_inner_attributes(), &parser)
+    parser.parse_inner_attributes()
 }
 
-pub fn parse_crate_from_source_str(name: String,
-                                   source: String,
-                                   cfg: ast::CrateConfig,
-                                   sess: &ParseSess)
-                                   -> ast::Crate {
+pub fn parse_crate_from_source_str<'a>(name: String,
+                                       source: String,
+                                       cfg: ast::CrateConfig,
+                                       sess: &'a ParseSess)
+                                       -> PResult<'a, ast::Crate> {
     let mut p = new_parser_from_source_str(sess,
                                            cfg,
                                            name,
                                            source);
-    panictry!(p.parse_crate_mod())
+    p.parse_crate_mod()
 }
 
-pub fn parse_crate_attrs_from_source_str(name: String,
-                                         source: String,
-                                         cfg: ast::CrateConfig,
-                                         sess: &ParseSess)
-                                         -> Vec<ast::Attribute> {
+pub fn parse_crate_attrs_from_source_str<'a>(name: String,
+                                             source: String,
+                                             cfg: ast::CrateConfig,
+                                             sess: &'a ParseSess)
+                                             -> PResult<'a, Vec<ast::Attribute>> {
     let mut p = new_parser_from_source_str(sess,
                                            cfg,
                                            name,
                                            source);
-    panictry!(p.parse_inner_attributes())
+    p.parse_inner_attributes()
 }
 
-pub fn parse_expr_from_source_str(name: String,
-                                  source: String,
-                                  cfg: ast::CrateConfig,
-                                  sess: &ParseSess)
-                                  -> P<ast::Expr> {
+pub fn parse_expr_from_source_str<'a>(name: String,
+                                      source: String,
+                                      cfg: ast::CrateConfig,
+                                      sess: &'a ParseSess)
+                                      -> PResult<'a, P<ast::Expr>> {
     let mut p = new_parser_from_source_str(sess, cfg, name, source);
-    panictry!(p.parse_expr())
+    p.parse_expr()
 }
 
-pub fn parse_item_from_source_str(name: String,
-                                  source: String,
-                                  cfg: ast::CrateConfig,
-                                  sess: &ParseSess)
-                                  -> Option<P<ast::Item>> {
+/// Parses an item.
+///
+/// Returns `Ok(Some(item))` when successful, `Ok(None)` when no item was found, and`Err`
+/// when a syntax error occurred.
+pub fn parse_item_from_source_str<'a>(name: String,
+                                      source: String,
+                                      cfg: ast::CrateConfig,
+                                      sess: &'a ParseSess)
+                                      -> PResult<'a, Option<P<ast::Item>>> {
     let mut p = new_parser_from_source_str(sess, cfg, name, source);
-    panictry!(p.parse_item())
+    p.parse_item()
 }
 
-pub fn parse_meta_from_source_str(name: String,
-                                  source: String,
-                                  cfg: ast::CrateConfig,
-                                  sess: &ParseSess)
-                                  -> P<ast::MetaItem> {
+pub fn parse_meta_from_source_str<'a>(name: String,
+                                      source: String,
+                                      cfg: ast::CrateConfig,
+                                      sess: &'a ParseSess)
+                                      -> PResult<'a, P<ast::MetaItem>> {
     let mut p = new_parser_from_source_str(sess, cfg, name, source);
-    panictry!(p.parse_meta_item())
+    p.parse_meta_item()
 }
 
-pub fn parse_stmt_from_source_str(name: String,
-                                  source: String,
-                                  cfg: ast::CrateConfig,
-                                  sess: &ParseSess)
-                                  -> Option<ast::Stmt> {
+pub fn parse_stmt_from_source_str<'a>(name: String,
+                                      source: String,
+                                      cfg: ast::CrateConfig,
+                                      sess: &'a ParseSess)
+                                      -> PResult<'a, Option<ast::Stmt>> {
     let mut p = new_parser_from_source_str(
         sess,
         cfg,
         name,
         source
     );
-    panictry!(p.parse_stmt())
+    p.parse_stmt()
 }
 
 // Warning: This parses with quote_depth > 0, which is not the default.
-pub fn parse_tts_from_source_str(name: String,
-                                 source: String,
-                                 cfg: ast::CrateConfig,
-                                 sess: &ParseSess)
-                                 -> Vec<ast::TokenTree> {
+pub fn parse_tts_from_source_str<'a>(name: String,
+                                     source: String,
+                                     cfg: ast::CrateConfig,
+                                     sess: &'a ParseSess)
+                                     -> PResult<'a, Vec<ast::TokenTree>> {
     let mut p = new_parser_from_source_str(
         sess,
         cfg,
@@ -168,7 +170,7 @@ pub fn parse_tts_from_source_str(name: String,
     );
     p.quote_depth += 1;
     // right now this is re-creating the token trees from ... token trees.
-    panictry!(p.parse_all_token_trees())
+    p.parse_all_token_trees()
 }
 
 // Create a new parser from a source string
@@ -265,20 +267,6 @@ pub fn tts_to_parser<'a>(sess: &'a ParseSess,
     p
 }
 
-
-fn abort_if_errors<'a, T>(result: PResult<'a, T>, p: &Parser) -> T {
-    match result {
-        Ok(c) => {
-            c
-        }
-        Err(mut e) => {
-            e.emit();
-            p.abort_if_errors();
-            unreachable!();
-        }
-    }
-}
-
 /// Parse a string representing a character literal into its final form.
 /// Rather than just accepting/rejecting a given literal, unescapes it as
 /// well. Can take any slice prefixed by a character escape. Returns the
@@ -1078,19 +1066,21 @@ mod tests {
 
         let name = "<source>".to_string();
         let source = "/// doc comment\r\nfn foo() {}".to_string();
-        let item = parse_item_from_source_str(name.clone(), source, Vec::new(), &sess).unwrap();
+        let item = parse_item_from_source_str(name.clone(), source, Vec::new(), &sess)
+            .unwrap().unwrap();
         let doc = first_attr_value_str_by_name(&item.attrs, "doc").unwrap();
         assert_eq!(&doc[..], "/// doc comment");
 
         let source = "/// doc comment\r\n/// line 2\r\nfn foo() {}".to_string();
-        let item = parse_item_from_source_str(name.clone(), source, Vec::new(), &sess).unwrap();
+        let item = parse_item_from_source_str(name.clone(), source, Vec::new(), &sess)
+            .unwrap().unwrap();
         let docs = item.attrs.iter().filter(|a| &*a.name() == "doc")
                     .map(|a| a.value_str().unwrap().to_string()).collect::<Vec<_>>();
         let b: &[_] = &["/// doc comment".to_string(), "/// line 2".to_string()];
         assert_eq!(&docs[..], b);
 
         let source = "/** doc comment\r\n *  with CRLF */\r\nfn foo() {}".to_string();
-        let item = parse_item_from_source_str(name, source, Vec::new(), &sess).unwrap();
+        let item = parse_item_from_source_str(name, source, Vec::new(), &sess).unwrap().unwrap();
         let doc = first_attr_value_str_by_name(&item.attrs, "doc").unwrap();
         assert_eq!(&doc[..], "/** doc comment\n *  with CRLF */");
     }
@@ -1099,7 +1089,7 @@ mod tests {
     fn ttdelim_span() {
         let sess = ParseSess::new();
         let expr = parse::parse_expr_from_source_str("foo".to_string(),
-            "foo!( fn main() { body } )".to_string(), vec![], &sess);
+            "foo!( fn main() { body } )".to_string(), vec![], &sess).unwrap();
 
         let tts = match expr.node {
             ast::ExprKind::Mac(ref mac) => mac.node.tts.clone(),
index f11c12a059a6a03b34ca73db6473e2cb8107e4a4..b8c926f8de9cb7b693025153067bc6d8a94ffc49 100644 (file)
@@ -18,9 +18,9 @@ use ast::{Mod, Arg, Arm, Attribute, BindingMode, TraitItemKind};
 use ast::Block;
 use ast::{BlockCheckMode, CaptureBy};
 use ast::{Constness, Crate, CrateConfig};
-use ast::{Decl, DeclKind};
+use ast::{Decl, DeclKind, Defaultness};
 use ast::{EMPTY_CTXT, EnumDef, ExplicitSelf};
-use ast::{Expr, ExprKind};
+use ast::{Expr, ExprKind, RangeLimits};
 use ast::{Field, FnDecl};
 use ast::{ForeignItem, ForeignItemKind, FunctionRetTy};
 use ast::{Ident, ImplItem, Item, ItemKind};
@@ -29,7 +29,6 @@ use ast::Local;
 use ast::MacStmtStyle;
 use ast::Mac_;
 use ast::{MutTy, Mutability};
-use ast::NamedField;
 use ast::{Pat, PatKind};
 use ast::{PolyTraitRef, QSelf};
 use ast::{Stmt, StmtKind};
@@ -38,13 +37,11 @@ use ast::StrStyle;
 use ast::SelfKind;
 use ast::{Delimited, SequenceRepetition, TokenTree, TraitItem, TraitRef};
 use ast::{Ty, TyKind, TypeBinding, TyParam, TyParamBounds};
-use ast::UnnamedField;
 use ast::{ViewPath, ViewPathGlob, ViewPathList, ViewPathSimple};
 use ast::{Visibility, WhereClause};
 use attr::{ThinAttributes, ThinAttributesExt, AttributesExt};
 use ast::{BinOpKind, UnOp};
 use ast;
-use ast_util::{self, ident_to_path};
 use codemap::{self, Span, BytePos, Spanned, spanned, mk_sp, CodeMap};
 use errors::{self, DiagnosticBuilder};
 use ext::tt::macro_parser;
@@ -99,13 +96,6 @@ pub enum BoundParsingMode {
     Modified,
 }
 
-/// `pub` should be parsed in struct fields and not parsed in variant fields
-#[derive(Clone, Copy, PartialEq)]
-pub enum ParsePub {
-    Yes,
-    No,
-}
-
 #[derive(Clone, Copy, PartialEq)]
 pub enum SemiColonMode {
     Break,
@@ -274,9 +264,10 @@ pub struct Parser<'a> {
     /// extra detail when the same error is seen twice
     pub obsolete_set: HashSet<ObsoleteSyntax>,
     /// Used to determine the path to externally loaded source files
+    pub filename: Option<String>,
     pub mod_path_stack: Vec<InternedString>,
-    /// Stack of spans of open delimiters. Used for error message.
-    pub open_braces: Vec<Span>,
+    /// Stack of open delimiters and their spans. Used for error message.
+    pub open_braces: Vec<(token::DelimToken, Span)>,
     /// Flag if this parser "owns" the directory that it is currently parsing
     /// in. This will affect how nested files are looked up.
     pub owns_directory: bool,
@@ -355,6 +346,9 @@ impl<'a> Parser<'a> {
     {
         let tok0 = rdr.real_token();
         let span = tok0.sp;
+        let filename = if span != codemap::DUMMY_SP {
+            Some(sess.codemap().span_to_filename(span))
+        } else { None };
         let placeholder = TokenAndSpan {
             tok: token::Underscore,
             sp: span,
@@ -384,6 +378,7 @@ impl<'a> Parser<'a> {
             quote_depth: 0,
             obsolete_set: HashSet::new(),
             mod_path_stack: Vec::new(),
+            filename: filename,
             open_braces: Vec::new(),
             owns_directory: true,
             root_module_name: None,
@@ -576,9 +571,12 @@ impl<'a> Parser<'a> {
                 self.bug("ident interpolation not converted to real token");
             }
             _ => {
-                let token_str = self.this_token_to_string();
-                Err(self.fatal(&format!("expected ident, found `{}`",
-                                    token_str)))
+                let mut err = self.fatal(&format!("expected identifier, found `{}`",
+                                                  self.this_token_to_string()));
+                if self.token == token::Underscore {
+                    err.fileline_note(self.span, "`_` is a wildcard pattern, not an identifier");
+                }
+                Err(err)
             }
         }
     }
@@ -594,11 +592,11 @@ impl<'a> Parser<'a> {
     pub fn parse_path_list_item(&mut self) -> PResult<'a, ast::PathListItem> {
         let lo = self.span.lo;
         let node = if self.eat_keyword(keywords::SelfValue) {
-            let rename = try!(self.parse_rename());
+            let rename = self.parse_rename()?;
             ast::PathListItemKind::Mod { id: ast::DUMMY_NODE_ID, rename: rename }
         } else {
-            let ident = try!(self.parse_ident());
-            let rename = try!(self.parse_rename());
+            let ident = self.parse_ident()?;
+            let rename = self.parse_rename()?;
             ast::PathListItemKind::Ident { name: ident, rename: rename, id: ast::DUMMY_NODE_ID }
         };
         let hi = self.last_span.hi;
@@ -648,6 +646,25 @@ impl<'a> Parser<'a> {
         }
     }
 
+    pub fn check_contextual_keyword(&mut self, ident: Ident) -> bool {
+        let tok = token::Ident(ident, token::Plain);
+        self.expected_tokens.push(TokenType::Token(tok));
+        if let token::Ident(ref cur_ident, _) = self.token {
+            cur_ident.name == ident.name
+        } else {
+            false
+        }
+    }
+
+    pub fn eat_contextual_keyword(&mut self, ident: Ident) -> bool {
+        if self.check_contextual_keyword(ident) {
+            self.bump();
+            true
+        } else {
+            false
+        }
+    }
+
     /// If the given word is not a keyword, signal an error.
     /// If the next token is not the given word, signal an error.
     /// Otherwise, eat it.
@@ -709,7 +726,6 @@ impl<'a> Parser<'a> {
         }
     }
 
-
     /// Attempt to consume a `<`. If `<<` is seen, replace it with a single
     /// `<` and continue. If a `<` is not seen, return false.
     ///
@@ -797,13 +813,13 @@ impl<'a> Parser<'a> {
             }
 
             if i % 2 == 0 {
-                match try!(f(self)) {
+                match f(self)? {
                     Some(result) => v.push(result),
                     None => return Ok((P::from_vec(v), true))
                 }
             } else {
                 if let Some(t) = sep.as_ref() {
-                    try!(self.expect(t));
+                    self.expect(t)?;
                 }
 
             }
@@ -819,8 +835,8 @@ impl<'a> Parser<'a> {
                                         -> PResult<'a, P<[T]>> where
         F: FnMut(&mut Parser<'a>) -> PResult<'a, T>,
     {
-        let (result, returned) = try!(self.parse_seq_to_before_gt_or_return(sep,
-                                                    |p| Ok(Some(try!(f(p))))));
+        let (result, returned) = self.parse_seq_to_before_gt_or_return(sep,
+                                                                       |p| Ok(Some(f(p)?)))?;
         assert!(!returned);
         return Ok(result);
     }
@@ -831,8 +847,8 @@ impl<'a> Parser<'a> {
                                  -> PResult<'a, P<[T]>> where
         F: FnMut(&mut Parser<'a>) -> PResult<'a, T>,
     {
-        let v = try!(self.parse_seq_to_before_gt(sep, f));
-        try!(self.expect_gt());
+        let v = self.parse_seq_to_before_gt(sep, f)?;
+        self.expect_gt()?;
         return Ok(v);
     }
 
@@ -842,9 +858,9 @@ impl<'a> Parser<'a> {
                                            -> PResult<'a, (P<[T]>, bool)> where
         F: FnMut(&mut Parser<'a>) -> PResult<'a, Option<T>>,
     {
-        let (v, returned) = try!(self.parse_seq_to_before_gt_or_return(sep, f));
+        let (v, returned) = self.parse_seq_to_before_gt_or_return(sep, f)?;
         if !returned {
-            try!(self.expect_gt());
+            self.expect_gt()?;
         }
         return Ok((v, returned));
     }
@@ -881,7 +897,7 @@ impl<'a> Parser<'a> {
                                          sep: SeqSep,
                                          f: F)
                                          -> Vec<T>
-        where F: FnMut(&mut Parser<'a>) -> PResult<'a,  T>,
+        where F: FnMut(&mut Parser<'a>) -> PResult<'a,  T>
     {
         self.parse_seq_to_before_tokens(&[ket], sep, f, |mut e| e.emit())
     }
@@ -939,7 +955,7 @@ impl<'a> Parser<'a> {
                                      -> PResult<'a, Vec<T>> where
         F: FnMut(&mut Parser<'a>) -> PResult<'a,  T>,
     {
-        try!(self.expect(bra));
+        self.expect(bra)?;
         let result = self.parse_seq_to_before_end(ket, sep, f);
         if self.token == *ket {
             self.bump();
@@ -957,7 +973,7 @@ impl<'a> Parser<'a> {
                                         -> PResult<'a, Vec<T>> where
         F: FnMut(&mut Parser<'a>) -> PResult<'a,  T>,
     {
-        let result = try!(self.parse_unspanned_seq(bra, ket, sep, f));
+        let result = self.parse_unspanned_seq(bra, ket, sep, f)?;
         if result.is_empty() {
             let last_span = self.last_span;
             self.span_err(last_span,
@@ -977,7 +993,7 @@ impl<'a> Parser<'a> {
         F: FnMut(&mut Parser<'a>) -> PResult<'a,  T>,
     {
         let lo = self.span.lo;
-        try!(self.expect(bra));
+        self.expect(bra)?;
         let result = self.parse_seq_to_before_end(ket, sep, f);
         let hi = self.span.hi;
         self.bump();
@@ -1139,19 +1155,19 @@ impl<'a> Parser<'a> {
         // parse <'lt>
         let lo = self.span.lo;
 
-        let lifetime_defs = try!(self.parse_late_bound_lifetime_defs());
+        let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
 
         // examine next token to decide to do
         if self.token_is_bare_fn_keyword() {
             self.parse_ty_bare_fn(lifetime_defs)
         } else {
             let hi = self.span.hi;
-            let trait_ref = try!(self.parse_trait_ref());
+            let trait_ref = self.parse_trait_ref()?;
             let poly_trait_ref = ast::PolyTraitRef { bound_lifetimes: lifetime_defs,
                                                      trait_ref: trait_ref,
                                                      span: mk_sp(lo, hi)};
             let other_bounds = if self.eat(&token::BinOp(token::Plus)) {
-                try!(self.parse_ty_param_bounds(BoundParsingMode::Bare))
+                self.parse_ty_param_bounds(BoundParsingMode::Bare)?
             } else {
                 P::empty()
             };
@@ -1164,7 +1180,7 @@ impl<'a> Parser<'a> {
     }
 
     pub fn parse_ty_path(&mut self) -> PResult<'a, TyKind> {
-        Ok(TyKind::Path(None, try!(self.parse_path(LifetimeAndTypesWithoutColons))))
+        Ok(TyKind::Path(None, self.parse_path(LifetimeAndTypesWithoutColons)?))
     }
 
     /// parse a TyKind::BareFn type:
@@ -1172,26 +1188,26 @@ impl<'a> Parser<'a> {
                             -> PResult<'a, TyKind> {
         /*
 
-        [unsafe] [extern "ABI"] fn <'lt> (S) -> T
-         ^~~~^           ^~~~^     ^~~~^ ^~^    ^
-           |               |         |    |     |
-           |               |         |    |   Return type
-           |               |         |  Argument types
-           |               |     Lifetimes
+        [unsafe] [extern "ABI"] fn (S) -> T
+         ^~~~^           ^~~~^     ^~^    ^
+           |               |        |     |
+           |               |        |   Return type
+           |               |      Argument types
+           |               |
            |              ABI
         Function Style
         */
 
-        let unsafety = try!(self.parse_unsafety());
+        let unsafety = self.parse_unsafety()?;
         let abi = if self.eat_keyword(keywords::Extern) {
-            try!(self.parse_opt_abi()).unwrap_or(Abi::C)
+            self.parse_opt_abi()?.unwrap_or(Abi::C)
         } else {
             Abi::Rust
         };
 
-        try!(self.expect_keyword(keywords::Fn));
-        let (inputs, variadic) = try!(self.parse_fn_args(false, true));
-        let ret_ty = try!(self.parse_ret_ty());
+        self.expect_keyword(keywords::Fn)?;
+        let (inputs, variadic) = self.parse_fn_args(false, true)?;
+        let ret_ty = self.parse_ret_ty()?;
         let decl = P(FnDecl {
             inputs: inputs,
             output: ret_ty,
@@ -1251,25 +1267,25 @@ impl<'a> Parser<'a> {
             SeqSep::none(),
             |p| -> PResult<'a, TraitItem> {
             maybe_whole!(no_clone_from_p p, NtTraitItem);
-            let mut attrs = try!(p.parse_outer_attributes());
+            let mut attrs = p.parse_outer_attributes()?;
             let lo = p.span.lo;
 
             let (name, node) = if p.eat_keyword(keywords::Type) {
-                let TyParam {ident, bounds, default, ..} = try!(p.parse_ty_param());
-                try!(p.expect(&token::Semi));
+                let TyParam {ident, bounds, default, ..} = p.parse_ty_param()?;
+                p.expect(&token::Semi)?;
                 (ident, TraitItemKind::Type(bounds, default))
             } else if p.is_const_item() {
-                try!(p.expect_keyword(keywords::Const));
-                let ident = try!(p.parse_ident());
-                try!(p.expect(&token::Colon));
-                let ty = try!(p.parse_ty_sum());
+                p.expect_keyword(keywords::Const)?;
+                let ident = p.parse_ident()?;
+                p.expect(&token::Colon)?;
+                let ty = p.parse_ty_sum()?;
                 let default = if p.check(&token::Eq) {
                     p.bump();
-                    let expr = try!(p.parse_expr());
-                    try!(p.commit_expr_expecting(&expr, token::Semi));
+                    let expr = p.parse_expr()?;
+                    p.commit_expr_expecting(&expr, token::Semi)?;
                     Some(expr)
                 } else {
-                    try!(p.expect(&token::Semi));
+                    p.expect(&token::Semi)?;
                     None
                 };
                 (ident, TraitItemKind::Const(ty, default))
@@ -1288,7 +1304,7 @@ impl<'a> Parser<'a> {
                                 }
 
                                 token::OpenDelim(token::Brace) => {
-                                    try!(p.parse_token_tree());
+                                    p.parse_token_tree()?;
                                     break;
                                 }
 
@@ -1300,17 +1316,17 @@ impl<'a> Parser<'a> {
                     }
                 };
 
-                let ident = try!(p.parse_ident());
-                let mut generics = try!(p.parse_generics());
+                let ident = p.parse_ident()?;
+                let mut generics = p.parse_generics()?;
 
-                let (explicit_self, d) = try!(p.parse_fn_decl_with_self(|p: &mut Parser<'a>|{
+                let (explicit_self, d) = p.parse_fn_decl_with_self(|p: &mut Parser<'a>|{
                     // This is somewhat dubious; We don't want to allow
                     // argument names to be left off if there is a
                     // definition...
                     p.parse_arg_general(false)
-                }));
+                })?;
 
-                generics.where_clause = try!(p.parse_where_clause());
+                generics.where_clause = p.parse_where_clause()?;
                 let sig = ast::MethodSig {
                     unsafety: unsafety,
                     constness: constness,
@@ -1329,7 +1345,7 @@ impl<'a> Parser<'a> {
                   token::OpenDelim(token::Brace) => {
                     debug!("parse_trait_methods(): parsing provided method");
                     let (inner_attrs, body) =
-                        try!(p.parse_inner_attrs_and_block());
+                        p.parse_inner_attrs_and_block()?;
                     attrs.extend(inner_attrs.iter().cloned());
                     Some(body)
                   }
@@ -1355,8 +1371,8 @@ impl<'a> Parser<'a> {
 
     /// Parse a possibly mutable type
     pub fn parse_mt(&mut self) -> PResult<'a, MutTy> {
-        let mutbl = try!(self.parse_mutability());
-        let t = try!(self.parse_ty());
+        let mutbl = self.parse_mutability()?;
+        let t = self.parse_ty()?;
         Ok(MutTy { ty: t, mutbl: mutbl })
     }
 
@@ -1366,7 +1382,7 @@ impl<'a> Parser<'a> {
             if self.eat(&token::Not) {
                 Ok(FunctionRetTy::None(self.last_span))
             } else {
-                Ok(FunctionRetTy::Ty(try!(self.parse_ty())))
+                Ok(FunctionRetTy::Ty(self.parse_ty()?))
             }
         } else {
             let pos = self.span.lo;
@@ -1377,13 +1393,13 @@ impl<'a> Parser<'a> {
     /// Parse a type in a context where `T1+T2` is allowed.
     pub fn parse_ty_sum(&mut self) -> PResult<'a, P<Ty>> {
         let lo = self.span.lo;
-        let lhs = try!(self.parse_ty());
+        let lhs = self.parse_ty()?;
 
         if !self.eat(&token::BinOp(token::Plus)) {
             return Ok(lhs);
         }
 
-        let bounds = try!(self.parse_ty_param_bounds(BoundParsingMode::Bare));
+        let bounds = self.parse_ty_param_bounds(BoundParsingMode::Bare)?;
 
         // In type grammar, `+` is treated like a binary operator,
         // and hence both L and R side are required.
@@ -1414,7 +1430,7 @@ impl<'a> Parser<'a> {
             let mut ts = vec![];
             let mut last_comma = false;
             while self.token != token::CloseDelim(token::Paren) {
-                ts.push(try!(self.parse_ty_sum()));
+                ts.push(self.parse_ty_sum()?);
                 if self.check(&token::Comma) {
                     last_comma = true;
                     self.bump();
@@ -1424,7 +1440,7 @@ impl<'a> Parser<'a> {
                 }
             }
 
-            try!(self.expect(&token::CloseDelim(token::Paren)));
+            self.expect(&token::CloseDelim(token::Paren))?;
             if ts.len() == 1 && !last_comma {
                 TyKind::Paren(ts.into_iter().nth(0).unwrap())
             } else {
@@ -1433,54 +1449,54 @@ impl<'a> Parser<'a> {
         } else if self.check(&token::BinOp(token::Star)) {
             // STAR POINTER (bare pointer?)
             self.bump();
-            TyKind::Ptr(try!(self.parse_ptr()))
+            TyKind::Ptr(self.parse_ptr()?)
         } else if self.check(&token::OpenDelim(token::Bracket)) {
             // VECTOR
-            try!(self.expect(&token::OpenDelim(token::Bracket)));
-            let t = try!(self.parse_ty_sum());
+            self.expect(&token::OpenDelim(token::Bracket))?;
+            let t = self.parse_ty_sum()?;
 
             // Parse the `; e` in `[ i32; e ]`
             // where `e` is a const expression
-            let t = match try!(self.maybe_parse_fixed_length_of_vec()) {
+            let t = match self.maybe_parse_fixed_length_of_vec()? {
                 None => TyKind::Vec(t),
                 Some(suffix) => TyKind::FixedLengthVec(t, suffix)
             };
-            try!(self.expect(&token::CloseDelim(token::Bracket)));
+            self.expect(&token::CloseDelim(token::Bracket))?;
             t
         } else if self.check(&token::BinOp(token::And)) ||
                   self.token == token::AndAnd {
             // BORROWED POINTER
-            try!(self.expect_and());
-            try!(self.parse_borrowed_pointee())
+            self.expect_and()?;
+            self.parse_borrowed_pointee()?
         } else if self.check_keyword(keywords::For) {
-            try!(self.parse_for_in_type())
+            self.parse_for_in_type()?
         } else if self.token_is_bare_fn_keyword() {
             // BARE FUNCTION
-            try!(self.parse_ty_bare_fn(Vec::new()))
+            self.parse_ty_bare_fn(Vec::new())?
         } else if self.eat_keyword_noexpect(keywords::Typeof) {
             // TYPEOF
             // In order to not be ambiguous, the type must be surrounded by parens.
-            try!(self.expect(&token::OpenDelim(token::Paren)));
-            let e = try!(self.parse_expr());
-            try!(self.expect(&token::CloseDelim(token::Paren)));
+            self.expect(&token::OpenDelim(token::Paren))?;
+            let e = self.parse_expr()?;
+            self.expect(&token::CloseDelim(token::Paren))?;
             TyKind::Typeof(e)
         } else if self.eat_lt() {
 
             let (qself, path) =
-                 try!(self.parse_qualified_path(NoTypesAllowed));
+                 self.parse_qualified_path(NoTypesAllowed)?;
 
             TyKind::Path(Some(qself), path)
         } else if self.check(&token::ModSep) ||
                   self.token.is_ident() ||
                   self.token.is_path() {
-            let path = try!(self.parse_path(LifetimeAndTypesWithoutColons));
+            let path = self.parse_path(LifetimeAndTypesWithoutColons)?;
             if self.check(&token::Not) {
                 // MACRO INVOCATION
                 self.bump();
-                let delim = try!(self.expect_open_delim());
-                let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim),
-                                                     SeqSep::none(),
-                                                     |p| p.parse_token_tree()));
+                let delim = self.expect_open_delim()?;
+                let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
+                                                SeqSep::none(),
+                                                |p| p.parse_token_tree())?;
                 let hi = self.span.hi;
                 TyKind::Mac(spanned(lo, hi, Mac_ { path: path, tts: tts, ctxt: EMPTY_CTXT }))
             } else {
@@ -1502,9 +1518,9 @@ impl<'a> Parser<'a> {
 
     pub fn parse_borrowed_pointee(&mut self) -> PResult<'a, TyKind> {
         // look for `&'lt` or `&'foo ` and interpret `foo` as the region name:
-        let opt_lifetime = try!(self.parse_opt_lifetime());
+        let opt_lifetime = self.parse_opt_lifetime()?;
 
-        let mt = try!(self.parse_mt());
+        let mt = self.parse_mt()?;
         return Ok(TyKind::Rptr(opt_lifetime, mt));
     }
 
@@ -1521,7 +1537,7 @@ impl<'a> Parser<'a> {
                            known as `*const T`");
             Mutability::Immutable
         };
-        let t = try!(self.parse_ty());
+        let t = self.parse_ty()?;
         Ok(MutTy { ty: t, mutbl: mutbl })
     }
 
@@ -1552,18 +1568,23 @@ impl<'a> Parser<'a> {
         let pat = if require_name || self.is_named_argument() {
             debug!("parse_arg_general parse_pat (require_name:{})",
                    require_name);
-            let pat = try!(self.parse_pat());
+            let pat = self.parse_pat()?;
 
-            try!(self.expect(&token::Colon));
+            self.expect(&token::Colon)?;
             pat
         } else {
             debug!("parse_arg_general ident_to_pat");
-            ast_util::ident_to_pat(ast::DUMMY_NODE_ID,
-                                   self.last_span,
-                                   special_idents::invalid)
+            let sp = self.last_span;
+            let spanned = Spanned { span: sp, node: special_idents::invalid };
+            P(Pat {
+                id: ast::DUMMY_NODE_ID,
+                node: PatKind::Ident(BindingMode::ByValue(Mutability::Immutable),
+                                     spanned, None),
+                span: sp
+            })
         };
 
-        let t = try!(self.parse_ty_sum());
+        let t = self.parse_ty_sum()?;
 
         Ok(Arg {
             ty: t,
@@ -1579,9 +1600,9 @@ impl<'a> Parser<'a> {
 
     /// Parse an argument in a lambda header e.g. |arg, arg|
     pub fn parse_fn_block_arg(&mut self) -> PResult<'a, Arg> {
-        let pat = try!(self.parse_pat());
+        let pat = self.parse_pat()?;
         let t = if self.eat(&token::Colon) {
-            try!(self.parse_ty_sum())
+            self.parse_ty_sum()?
         } else {
             P(Ty {
                 id: ast::DUMMY_NODE_ID,
@@ -1599,7 +1620,7 @@ impl<'a> Parser<'a> {
     pub fn maybe_parse_fixed_length_of_vec(&mut self) -> PResult<'a, Option<P<ast::Expr>>> {
         if self.check(&token::Semi) {
             self.bump();
-            Ok(Some(try!(self.parse_expr())))
+            Ok(Some(self.parse_expr()?))
         } else {
             Ok(None)
         }
@@ -1673,7 +1694,7 @@ impl<'a> Parser<'a> {
             LitKind::Bool(false)
         } else {
             let token = self.bump_and_get();
-            let lit = try!(self.lit_from_token(&token));
+            let lit = self.lit_from_token(&token)?;
             lit
         };
         Ok(codemap::Spanned { node: lit, span: mk_sp(lo, self.last_span.hi) })
@@ -1684,7 +1705,7 @@ impl<'a> Parser<'a> {
         let minus_lo = self.span.lo;
         let minus_present = self.eat(&token::BinOp(token::Minus));
         let lo = self.span.lo;
-        let literal = P(try!(self.parse_lit()));
+        let literal = P(self.parse_lit()?);
         let hi = self.last_span.hi;
         let expr = self.mk_expr(lo, hi, ExprKind::Lit(literal), None);
 
@@ -1715,9 +1736,9 @@ impl<'a> Parser<'a> {
     pub fn parse_qualified_path(&mut self, mode: PathParsingMode)
                                 -> PResult<'a, (QSelf, ast::Path)> {
         let span = self.last_span;
-        let self_type = try!(self.parse_ty_sum());
+        let self_type = self.parse_ty_sum()?;
         let mut path = if self.eat_keyword(keywords::As) {
-            try!(self.parse_path(LifetimeAndTypesWithoutColons))
+            self.parse_path(LifetimeAndTypesWithoutColons)?
         } else {
             ast::Path {
                 span: span,
@@ -1731,18 +1752,18 @@ impl<'a> Parser<'a> {
             position: path.segments.len()
         };
 
-        try!(self.expect(&token::Gt));
-        try!(self.expect(&token::ModSep));
+        self.expect(&token::Gt)?;
+        self.expect(&token::ModSep)?;
 
         let segments = match mode {
             LifetimeAndTypesWithoutColons => {
-                try!(self.parse_path_segments_without_colons())
+                self.parse_path_segments_without_colons()?
             }
             LifetimeAndTypesWithColons => {
-                try!(self.parse_path_segments_with_colons())
+                self.parse_path_segments_with_colons()?
             }
             NoTypesAllowed => {
-                try!(self.parse_path_segments_without_types())
+                self.parse_path_segments_without_types()?
             }
         };
         path.segments.extend(segments);
@@ -1774,13 +1795,13 @@ impl<'a> Parser<'a> {
         // A bound set is a set of type parameter bounds.
         let segments = match mode {
             LifetimeAndTypesWithoutColons => {
-                try!(self.parse_path_segments_without_colons())
+                self.parse_path_segments_without_colons()?
             }
             LifetimeAndTypesWithColons => {
-                try!(self.parse_path_segments_with_colons())
+                self.parse_path_segments_with_colons()?
             }
             NoTypesAllowed => {
-                try!(self.parse_path_segments_without_types())
+                self.parse_path_segments_without_types()?
             }
         };
 
@@ -1803,11 +1824,11 @@ impl<'a> Parser<'a> {
         let mut segments = Vec::new();
         loop {
             // First, parse an identifier.
-            let identifier = try!(self.parse_ident_or_self_type());
+            let identifier = self.parse_ident_or_self_type()?;
 
             // Parse types, optionally.
             let parameters = if self.eat_lt() {
-                let (lifetimes, types, bindings) = try!(self.parse_generic_values_after_lt());
+                let (lifetimes, types, bindings) = self.parse_generic_values_after_lt()?;
 
                 ast::PathParameters::AngleBracketed(ast::AngleBracketedParameterData {
                     lifetimes: lifetimes,
@@ -1817,13 +1838,13 @@ impl<'a> Parser<'a> {
             } else if self.eat(&token::OpenDelim(token::Paren)) {
                 let lo = self.last_span.lo;
 
-                let inputs = try!(self.parse_seq_to_end(
+                let inputs = self.parse_seq_to_end(
                     &token::CloseDelim(token::Paren),
                     SeqSep::trailing_allowed(token::Comma),
-                    |p| p.parse_ty_sum()));
+                    |p| p.parse_ty_sum())?;
 
                 let output_ty = if self.eat(&token::RArrow) {
-                    Some(try!(self.parse_ty()))
+                    Some(self.parse_ty()?)
                 } else {
                     None
                 };
@@ -1856,7 +1877,7 @@ impl<'a> Parser<'a> {
         let mut segments = Vec::new();
         loop {
             // First, parse an identifier.
-            let identifier = try!(self.parse_ident_or_self_type());
+            let identifier = self.parse_ident_or_self_type()?;
 
             // If we do not see a `::`, stop.
             if !self.eat(&token::ModSep) {
@@ -1870,7 +1891,7 @@ impl<'a> Parser<'a> {
             // Check for a type segment.
             if self.eat_lt() {
                 // Consumed `a::b::<`, go look for types
-                let (lifetimes, types, bindings) = try!(self.parse_generic_values_after_lt());
+                let (lifetimes, types, bindings) = self.parse_generic_values_after_lt()?;
                 let parameters = ast::AngleBracketedParameterData {
                     lifetimes: lifetimes,
                     types: P::from_vec(types),
@@ -1902,7 +1923,7 @@ impl<'a> Parser<'a> {
         let mut segments = Vec::new();
         loop {
             // First, parse an identifier.
-            let identifier = try!(self.parse_ident_or_self_type());
+            let identifier = self.parse_ident_or_self_type()?;
 
             // Assemble and push the result.
             segments.push(ast::PathSegment {
@@ -1921,7 +1942,7 @@ impl<'a> Parser<'a> {
     pub fn parse_opt_lifetime(&mut self) -> PResult<'a, Option<ast::Lifetime>> {
         match self.token {
             token::Lifetime(..) => {
-                Ok(Some(try!(self.parse_lifetime())))
+                Ok(Some(self.parse_lifetime()?))
             }
             _ => {
                 Ok(None)
@@ -1956,10 +1977,10 @@ impl<'a> Parser<'a> {
         loop {
             match self.token {
                 token::Lifetime(_) => {
-                    let lifetime = try!(self.parse_lifetime());
+                    let lifetime = self.parse_lifetime()?;
                     let bounds =
                         if self.eat(&token::Colon) {
-                            try!(self.parse_lifetimes(token::BinOp(token::Plus)))
+                            self.parse_lifetimes(token::BinOp(token::Plus))?
                         } else {
                             Vec::new()
                         };
@@ -1999,7 +2020,7 @@ impl<'a> Parser<'a> {
         loop {
             match self.token {
                 token::Lifetime(_) => {
-                    res.push(try!(self.parse_lifetime()));
+                    res.push(self.parse_lifetime()?);
                 }
                 _ => {
                     return Ok(res);
@@ -2026,10 +2047,10 @@ impl<'a> Parser<'a> {
     /// Parse ident COLON expr
     pub fn parse_field(&mut self) -> PResult<'a, Field> {
         let lo = self.span.lo;
-        let i = try!(self.parse_ident());
+        let i = self.parse_ident()?;
         let hi = self.last_span.hi;
-        try!(self.expect(&token::Colon));
-        let e = try!(self.parse_expr());
+        self.expect(&token::Colon)?;
+        let e = self.parse_expr()?;
         Ok(ast::Field {
             ident: spanned(lo, hi, i),
             span: mk_sp(lo, e.span.hi),
@@ -2073,9 +2094,17 @@ impl<'a> Parser<'a> {
 
     pub fn mk_range(&mut self,
                     start: Option<P<Expr>>,
-                    end: Option<P<Expr>>)
-                    -> ast::ExprKind {
-        ExprKind::Range(start, end)
+                    end: Option<P<Expr>>,
+                    limits: RangeLimits)
+                    -> PResult<'a, ast::ExprKind> {
+        if end.is_none() && limits == RangeLimits::Closed {
+            Err(self.span_fatal_help(self.span,
+                                     "inclusive range with no end",
+                                     "inclusive ranges must be bounded at the end \
+                                      (`...b` or `a...b`)"))
+        } else {
+            Ok(ExprKind::Range(start, end, limits))
+        }
     }
 
     pub fn mk_field(&mut self, expr: P<Expr>, ident: ast::SpannedIdent) -> ast::ExprKind {
@@ -2154,7 +2183,7 @@ impl<'a> Parser<'a> {
             token::OpenDelim(token::Paren) => {
                 self.bump();
 
-                let attrs = try!(self.parse_inner_attributes())
+                let attrs = self.parse_inner_attributes()?
                     .into_thin_attrs()
                     .prepend(attrs);
 
@@ -2163,9 +2192,9 @@ impl<'a> Parser<'a> {
                 let mut es = vec![];
                 let mut trailing_comma = false;
                 while self.token != token::CloseDelim(token::Paren) {
-                    es.push(try!(self.parse_expr()));
-                    try!(self.commit_expr(&es.last().unwrap(), &[],
-                                     &[token::Comma, token::CloseDelim(token::Paren)]));
+                    es.push(self.parse_expr()?);
+                    self.commit_expr(&es.last().unwrap(), &[],
+                                     &[token::Comma, token::CloseDelim(token::Paren)])?;
                     if self.check(&token::Comma) {
                         trailing_comma = true;
 
@@ -2196,14 +2225,14 @@ impl<'a> Parser<'a> {
                             ctxt: _
                          }, token::Plain) => {
                 self.bump();
-                let path = ast_util::ident_to_path(mk_sp(lo, hi), id);
+                let path = ast::Path::from_ident(mk_sp(lo, hi), id);
                 ex = ExprKind::Path(None, path);
                 hi = self.last_span.hi;
             }
             token::OpenDelim(token::Bracket) => {
                 self.bump();
 
-                let inner_attrs = try!(self.parse_inner_attributes())
+                let inner_attrs = self.parse_inner_attributes()?
                     .into_thin_attrs();
                 attrs.update(|attrs| attrs.append(inner_attrs));
 
@@ -2213,27 +2242,27 @@ impl<'a> Parser<'a> {
                     ex = ExprKind::Vec(Vec::new());
                 } else {
                     // Nonempty vector.
-                    let first_expr = try!(self.parse_expr());
+                    let first_expr = self.parse_expr()?;
                     if self.check(&token::Semi) {
                         // Repeating array syntax: [ 0; 512 ]
                         self.bump();
-                        let count = try!(self.parse_expr());
-                        try!(self.expect(&token::CloseDelim(token::Bracket)));
+                        let count = self.parse_expr()?;
+                        self.expect(&token::CloseDelim(token::Bracket))?;
                         ex = ExprKind::Repeat(first_expr, count);
                     } else if self.check(&token::Comma) {
                         // Vector with two or more elements.
                         self.bump();
-                        let remaining_exprs = try!(self.parse_seq_to_end(
+                        let remaining_exprs = self.parse_seq_to_end(
                             &token::CloseDelim(token::Bracket),
                             SeqSep::trailing_allowed(token::Comma),
-                            |p| Ok(try!(p.parse_expr()))
-                                ));
+                            |p| Ok(p.parse_expr()?)
+                        )?;
                         let mut exprs = vec!(first_expr);
                         exprs.extend(remaining_exprs);
                         ex = ExprKind::Vec(exprs);
                     } else {
                         // Vector with one element.
-                        try!(self.expect(&token::CloseDelim(token::Bracket)));
+                        self.expect(&token::CloseDelim(token::Bracket))?;
                         ex = ExprKind::Vec(vec!(first_expr));
                     }
                 }
@@ -2242,7 +2271,7 @@ impl<'a> Parser<'a> {
             _ => {
                 if self.eat_lt() {
                     let (qself, path) =
-                        try!(self.parse_qualified_path(LifetimeAndTypesWithColons));
+                        self.parse_qualified_path(LifetimeAndTypesWithColons)?;
                     hi = path.span.hi;
                     return Ok(self.mk_expr(lo, hi, ExprKind::Path(Some(qself), path), attrs));
                 }
@@ -2265,7 +2294,7 @@ impl<'a> Parser<'a> {
                     let lifetime = self.get_lifetime();
                     let lo = self.span.lo;
                     self.bump();
-                    try!(self.expect(&token::Colon));
+                    self.expect(&token::Colon)?;
                     if self.eat_keyword(keywords::While) {
                         return self.parse_while_expr(Some(lifetime), lo, attrs)
                     }
@@ -2306,7 +2335,7 @@ impl<'a> Parser<'a> {
                 }
                 if self.eat_keyword(keywords::Return) {
                     if self.token.can_begin_expr() {
-                        let e = try!(self.parse_expr());
+                        let e = self.parse_expr()?;
                         hi = e.span.hi;
                         ex = ExprKind::Ret(Some(e));
                     } else {
@@ -2334,18 +2363,18 @@ impl<'a> Parser<'a> {
                         !self.check_keyword(keywords::True) &&
                         !self.check_keyword(keywords::False) {
                     let pth =
-                        try!(self.parse_path(LifetimeAndTypesWithColons));
+                        self.parse_path(LifetimeAndTypesWithColons)?;
 
                     // `!`, as an operator, is prefix, so we know this isn't that
                     if self.check(&token::Not) {
                         // MACRO INVOCATION expression
                         self.bump();
 
-                        let delim = try!(self.expect_open_delim());
-                        let tts = try!(self.parse_seq_to_end(
+                        let delim = self.expect_open_delim()?;
+                        let tts = self.parse_seq_to_end(
                             &token::CloseDelim(delim),
                             SeqSep::none(),
-                            |p| p.parse_token_tree()));
+                            |p| p.parse_token_tree())?;
                         let hi = self.last_span.hi;
 
                         return Ok(self.mk_mac_expr(lo,
@@ -2366,7 +2395,7 @@ impl<'a> Parser<'a> {
                             let mut base = None;
 
                             let attrs = attrs.append(
-                                try!(self.parse_inner_attributes())
+                                self.parse_inner_attributes()?
                                     .into_thin_attrs());
 
                             while self.token != token::CloseDelim(token::Brace) {
@@ -2405,7 +2434,7 @@ impl<'a> Parser<'a> {
                             }
 
                             hi = self.span.hi;
-                            try!(self.expect(&token::CloseDelim(token::Brace)));
+                            self.expect(&token::CloseDelim(token::Brace))?;
                             ex = ExprKind::Struct(pth, fields, base);
                             return Ok(self.mk_expr(lo, hi, ex, attrs));
                         }
@@ -2415,7 +2444,7 @@ impl<'a> Parser<'a> {
                     ex = ExprKind::Path(None, pth);
                 } else {
                     // other literal expression
-                    let lit = try!(self.parse_lit());
+                    let lit = self.parse_lit()?;
                     hi = lit.span.hi;
                     ex = ExprKind::Lit(P(lit));
                 }
@@ -2441,12 +2470,12 @@ impl<'a> Parser<'a> {
                             -> PResult<'a, P<Expr>> {
 
         let outer_attrs = attrs;
-        try!(self.expect(&token::OpenDelim(token::Brace)));
+        self.expect(&token::OpenDelim(token::Brace))?;
 
-        let inner_attrs = try!(self.parse_inner_attributes()).into_thin_attrs();
+        let inner_attrs = self.parse_inner_attributes()?.into_thin_attrs();
         let attrs = outer_attrs.append(inner_attrs);
 
-        let blk = try!(self.parse_block_tail(lo, blk_mode));
+        let blk = self.parse_block_tail(lo, blk_mode)?;
         return Ok(self.mk_expr(blk.span.lo, blk.span.hi, ExprKind::Block(blk), attrs));
     }
 
@@ -2454,10 +2483,10 @@ impl<'a> Parser<'a> {
     pub fn parse_dot_or_call_expr(&mut self,
                                   already_parsed_attrs: Option<ThinAttributes>)
                                   -> PResult<'a, P<Expr>> {
-        let attrs = try!(self.parse_or_use_outer_attributes(already_parsed_attrs));
+        let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?;
 
         let b = self.parse_bottom_expr();
-        let (span, b) = try!(self.interpolated_or_expr_span(b));
+        let (span, b) = self.interpolated_or_expr_span(b)?;
         self.parse_dot_or_call_expr_with(b, span.lo, attrs)
     }
 
@@ -2500,8 +2529,8 @@ impl<'a> Parser<'a> {
                         lo: BytePos)
                         -> PResult<'a, P<Expr>> {
         let (_, tys, bindings) = if self.eat(&token::ModSep) {
-            try!(self.expect_lt());
-            try!(self.parse_generic_values_after_lt())
+            self.expect_lt()?;
+            self.parse_generic_values_after_lt()?
         } else {
             (Vec::new(), Vec::new(), Vec::new())
         };
@@ -2514,12 +2543,12 @@ impl<'a> Parser<'a> {
         Ok(match self.token {
             // expr.f() method call.
             token::OpenDelim(token::Paren) => {
-                let mut es = try!(self.parse_unspanned_seq(
+                let mut es = self.parse_unspanned_seq(
                     &token::OpenDelim(token::Paren),
                     &token::CloseDelim(token::Paren),
                     SeqSep::trailing_allowed(token::Comma),
-                    |p| Ok(try!(p.parse_expr()))
-                ));
+                    |p| Ok(p.parse_expr()?)
+                )?;
                 let hi = self.last_span.hi;
 
                 es.insert(0, self_value);
@@ -2547,6 +2576,12 @@ impl<'a> Parser<'a> {
         let mut e = e0;
         let mut hi;
         loop {
+            // expr?
+            while self.eat(&token::Question) {
+                let hi = self.span.hi;
+                e = self.mk_expr(lo, hi, ExprKind::Try(e), None);
+            }
+
             // expr.f
             if self.eat(&token::Dot) {
                 match self.token {
@@ -2555,7 +2590,7 @@ impl<'a> Parser<'a> {
                     hi = self.span.hi;
                     self.bump();
 
-                    e = try!(self.parse_dot_suffix(i, mk_sp(dot_pos, hi), e, lo));
+                    e = self.parse_dot_suffix(i, mk_sp(dot_pos, hi), e, lo)?;
                   }
                   token::Literal(token::Integer(n), suf) => {
                     let sp = self.span;
@@ -2605,9 +2640,9 @@ impl<'a> Parser<'a> {
                     self.span_err(self.span, &format!("unexpected token: `{}`", actual));
 
                     let dot_pos = self.last_span.hi;
-                    e = try!(self.parse_dot_suffix(special_idents::invalid,
-                                                   mk_sp(dot_pos, dot_pos),
-                                                   e, lo));
+                    e = self.parse_dot_suffix(special_idents::invalid,
+                                              mk_sp(dot_pos, dot_pos),
+                                              e, lo)?;
                   }
                 }
                 continue;
@@ -2616,12 +2651,12 @@ impl<'a> Parser<'a> {
             match self.token {
               // expr(...)
               token::OpenDelim(token::Paren) => {
-                let es = try!(self.parse_unspanned_seq(
+                let es = self.parse_unspanned_seq(
                     &token::OpenDelim(token::Paren),
                     &token::CloseDelim(token::Paren),
                     SeqSep::trailing_allowed(token::Comma),
-                    |p| Ok(try!(p.parse_expr()))
-                ));
+                    |p| Ok(p.parse_expr()?)
+                )?;
                 hi = self.last_span.hi;
 
                 let nd = self.mk_call(e, es);
@@ -2632,9 +2667,9 @@ impl<'a> Parser<'a> {
               // Could be either an index expression or a slicing expression.
               token::OpenDelim(token::Bracket) => {
                 self.bump();
-                let ix = try!(self.parse_expr());
+                let ix = self.parse_expr()?;
                 hi = self.span.hi;
-                try!(self.commit_expr_expecting(&ix, token::CloseDelim(token::Bracket)));
+                self.commit_expr_expecting(&ix, token::CloseDelim(token::Bracket))?;
                 let index = self.mk_index(e, ix);
                 e = self.mk_expr(lo, hi, index, None)
               }
@@ -2652,13 +2687,13 @@ impl<'a> Parser<'a> {
                 self.bump();
 
                 if self.token == token::OpenDelim(token::Paren) {
-                    let Spanned { node: seq, span: seq_span } = try!(self.parse_seq(
+                    let Spanned { node: seq, span: seq_span } = self.parse_seq(
                         &token::OpenDelim(token::Paren),
                         &token::CloseDelim(token::Paren),
                         SeqSep::none(),
                         |p| p.parse_token_tree()
-                    ));
-                    let (sep, repeat) = try!(self.parse_sep_and_kleene_op());
+                    )?;
+                    let (sep, repeat) = self.parse_sep_and_kleene_op()?;
                     let name_num = macro_parser::count_names(&seq);
                     return Ok(TokenTree::Sequence(mk_sp(sp.lo, seq_span.hi),
                                       Rc::new(SequenceRepetition {
@@ -2673,7 +2708,7 @@ impl<'a> Parser<'a> {
                 } else {
                     sp = mk_sp(sp.lo, self.span.hi);
                     let namep = match self.token { token::Ident(_, p) => p, _ => token::Plain };
-                    let name = try!(self.parse_ident());
+                    let name = self.parse_ident()?;
                     (name, namep)
                 }
             }
@@ -2690,7 +2725,7 @@ impl<'a> Parser<'a> {
             self.bump();
             sp = mk_sp(sp.lo, self.span.hi);
             let kindp = match self.token { token::Ident(_, p) => p, _ => token::Plain };
-            let nt_kind = try!(self.parse_ident());
+            let nt_kind = self.parse_ident()?;
             Ok(TokenTree::Token(sp, MatchNt(name, nt_kind, namep, kindp)))
         } else {
             Ok(TokenTree::Token(sp, SubstNt(name, namep)))
@@ -2725,13 +2760,13 @@ impl<'a> Parser<'a> {
             }
         };
 
-        match try!(parse_kleene_op(self)) {
+        match parse_kleene_op(self)? {
             Some(kleene_op) => return Ok((None, kleene_op)),
             None => {}
         }
 
         let separator = self.bump_and_get();
-        match try!(parse_kleene_op(self)) {
+        match parse_kleene_op(self)? {
             Some(zerok) => Ok((Some(separator), zerok)),
             None => return Err(self.fatal("expected `*` or `+`"))
         }
@@ -2751,8 +2786,8 @@ impl<'a> Parser<'a> {
                 let mut err: DiagnosticBuilder<'a> =
                     self.diagnostic().struct_span_err(self.span,
                                                       "this file contains an un-closed delimiter");
-                for sp in &self.open_braces {
-                    err.span_help(*sp, "did you mean to close this delimiter?");
+                for &(_, sp) in &self.open_braces {
+                    err.span_help(sp, "did you mean to close this delimiter?");
                 }
 
                 Err(err)
@@ -2762,23 +2797,66 @@ impl<'a> Parser<'a> {
                 let pre_span = self.span;
 
                 // Parse the open delimiter.
-                self.open_braces.push(self.span);
+                self.open_braces.push((delim, self.span));
                 let open_span = self.span;
                 self.bump();
 
-                // Parse the token trees within the delimiters
-                let tts = self.parse_seq_to_before_end(&token::CloseDelim(delim),
-                                                       SeqSep::none(),
-                                                       |p| p.parse_token_tree());
+                // Parse the token trees within the delimiters.
+                // We stop at any delimiter so we can try to recover if the user
+                // uses an incorrect delimiter.
+                let tts = self.parse_seq_to_before_tokens(&[&token::CloseDelim(token::Brace),
+                                                            &token::CloseDelim(token::Paren),
+                                                            &token::CloseDelim(token::Bracket)],
+                                                          SeqSep::none(),
+                                                          |p| p.parse_token_tree(),
+                                                          |mut e| e.emit());
 
-                // Parse the close delimiter.
                 let close_span = self.span;
-                self.bump();
-                self.open_braces.pop().unwrap();
-
                 // Expand to cover the entire delimited token tree
                 let span = Span { hi: close_span.hi, ..pre_span };
 
+                match self.token {
+                    // Correct delmiter.
+                    token::CloseDelim(d) if d == delim => {
+                        self.open_braces.pop().unwrap();
+
+                        // Parse the close delimiter.
+                        self.bump();
+                    }
+                    // Incorect delimiter.
+                    token::CloseDelim(other) => {
+                        let token_str = self.this_token_to_string();
+                        let mut err = self.diagnostic().struct_span_err(self.span,
+                            &format!("incorrect close delimiter: `{}`", token_str));
+                        // This is a conservative error: only report the last unclosed delimiter.
+                        // The previous unclosed delimiters could actually be closed! The parser
+                        // just hasn't gotten to them yet.
+                        if let Some(&(_, sp)) = self.open_braces.last() {
+                            err.span_note(sp, "unclosed delimiter");
+                        };
+                        err.emit();
+
+                        self.open_braces.pop().unwrap();
+
+                        // If the incorrect delimter matches an earlier opening
+                        // delimiter, then don't consume it (it can be used to
+                        // close the earlier one)Otherwise, consume it.
+                        // E.g., we try to recover from:
+                        // fn foo() {
+                        //     bar(baz(
+                        // }  // Incorrect delimiter but matches the earlier `{`
+                        if !self.open_braces.iter().any(|&(b, _)| b == other) {
+                            self.bump();
+                        }
+                    }
+                    token::Eof => {
+                        // Silently recover, the EOF token will be seen again
+                        // and an error emitted then. Thus we don't pop from
+                        // self.open_braces here.
+                    },
+                    _ => unreachable!(),
+                }
+
                 Ok(TokenTree::Delimited(span, Rc::new(Delimited {
                     delim: delim,
                     open_span: open_span,
@@ -2794,16 +2872,11 @@ impl<'a> Parser<'a> {
                 maybe_whole!(deref self, NtTT);
                 match self.token {
                     token::CloseDelim(_) => {
+                        // An unexpected closing delimiter (i.e., there is no
+                        // matching opening delimiter).
                         let token_str = self.this_token_to_string();
-                        let mut err = self.diagnostic().struct_span_err(self.span,
-                            &format!("incorrect close delimiter: `{}`", token_str));
-                        // This is a conservative error: only report the last unclosed delimiter.
-                        // The previous unclosed delimiters could actually be closed! The parser
-                        // just hasn't gotten to them yet.
-                        if let Some(&sp) = self.open_braces.last() {
-                            err.span_note(sp, "unclosed delimiter");
-                        };
-
+                        let err = self.diagnostic().struct_span_err(self.span,
+                            &format!("unexpected close delimiter: `{}`", token_str));
                         Err(err)
                     },
                     /* we ought to allow different depths of unquotation */
@@ -2823,7 +2896,7 @@ impl<'a> Parser<'a> {
     pub fn parse_all_token_trees(&mut self) -> PResult<'a, Vec<TokenTree>> {
         let mut tts = Vec::new();
         while self.token != token::Eof {
-            tts.push(try!(self.parse_token_tree()));
+            tts.push(self.parse_token_tree()?);
         }
         Ok(tts)
     }
@@ -2832,7 +2905,7 @@ impl<'a> Parser<'a> {
     pub fn parse_prefix_expr(&mut self,
                              already_parsed_attrs: Option<ThinAttributes>)
                              -> PResult<'a, P<Expr>> {
-        let attrs = try!(self.parse_or_use_outer_attributes(already_parsed_attrs));
+        let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?;
         let lo = self.span.lo;
         let hi;
         // Note: when adding new unary operators, don't forget to adjust Token::can_begin_expr()
@@ -2840,39 +2913,39 @@ impl<'a> Parser<'a> {
             token::Not => {
                 self.bump();
                 let e = self.parse_prefix_expr(None);
-                let (span, e) = try!(self.interpolated_or_expr_span(e));
+                let (span, e) = self.interpolated_or_expr_span(e)?;
                 hi = span.hi;
                 self.mk_unary(UnOp::Not, e)
             }
             token::BinOp(token::Minus) => {
                 self.bump();
                 let e = self.parse_prefix_expr(None);
-                let (span, e) = try!(self.interpolated_or_expr_span(e));
+                let (span, e) = self.interpolated_or_expr_span(e)?;
                 hi = span.hi;
                 self.mk_unary(UnOp::Neg, e)
             }
             token::BinOp(token::Star) => {
                 self.bump();
                 let e = self.parse_prefix_expr(None);
-                let (span, e) = try!(self.interpolated_or_expr_span(e));
+                let (span, e) = self.interpolated_or_expr_span(e)?;
                 hi = span.hi;
                 self.mk_unary(UnOp::Deref, e)
             }
             token::BinOp(token::And) | token::AndAnd => {
-                try!(self.expect_and());
-                let m = try!(self.parse_mutability());
+                self.expect_and()?;
+                let m = self.parse_mutability()?;
                 let e = self.parse_prefix_expr(None);
-                let (span, e) = try!(self.interpolated_or_expr_span(e));
+                let (span, e) = self.interpolated_or_expr_span(e)?;
                 hi = span.hi;
                 ExprKind::AddrOf(m, e)
             }
             token::Ident(..) if self.token.is_keyword(keywords::In) => {
                 self.bump();
-                let place = try!(self.parse_expr_res(
+                let place = self.parse_expr_res(
                     Restrictions::RESTRICTION_NO_STRUCT_LITERAL,
                     None,
-                ));
-                let blk = try!(self.parse_block());
+                )?;
+                let blk = self.parse_block()?;
                 let span = blk.span;
                 hi = span.hi;
                 let blk_expr = self.mk_expr(span.lo, span.hi, ExprKind::Block(blk),
@@ -2882,7 +2955,7 @@ impl<'a> Parser<'a> {
             token::Ident(..) if self.token.is_keyword(keywords::Box) => {
                 self.bump();
                 let e = self.parse_prefix_expr(None);
-                let (span, e) = try!(self.interpolated_or_expr_span(e));
+                let (span, e) = self.interpolated_or_expr_span(e)?;
                 hi = span.hi;
                 ExprKind::Box(e)
             }
@@ -2913,14 +2986,13 @@ impl<'a> Parser<'a> {
                 LhsExpr::AttributesParsed(attrs) => Some(attrs),
                 _ => None,
             };
-            if self.token == token::DotDot {
+            if self.token == token::DotDot || self.token == token::DotDotDot {
                 return self.parse_prefix_range_expr(attrs);
             } else {
-                try!(self.parse_prefix_expr(attrs))
+                self.parse_prefix_expr(attrs)?
             }
         };
 
-
         if self.expr_is_complete(&lhs) {
             // Semi-statement forms are odd. See https://github.com/rust-lang/rust/issues/29071
             return Ok(lhs);
@@ -2949,45 +3021,51 @@ impl<'a> Parser<'a> {
             }
             // Special cases:
             if op == AssocOp::As {
-                let rhs = try!(self.parse_ty());
+                let rhs = self.parse_ty()?;
                 lhs = self.mk_expr(lhs_span.lo, rhs.span.hi,
                                    ExprKind::Cast(lhs, rhs), None);
                 continue
             } else if op == AssocOp::Colon {
-                let rhs = try!(self.parse_ty());
+                let rhs = self.parse_ty()?;
                 lhs = self.mk_expr(lhs_span.lo, rhs.span.hi,
                                    ExprKind::Type(lhs, rhs), None);
                 continue
-            } else if op == AssocOp::DotDot {
-                    // If we didn’t have to handle `x..`, it would be pretty easy to generalise
-                    // it to the Fixity::None code.
-                    //
-                    // We have 2 alternatives here: `x..y` and `x..` The other two variants are
-                    // handled with `parse_prefix_range_expr` call above.
-                    let rhs = if self.is_at_start_of_range_notation_rhs() {
-                        let rhs = self.parse_assoc_expr_with(op.precedence() + 1,
-                                                             LhsExpr::NotYetParsed);
-                        match rhs {
-                            Ok(e) => Some(e),
-                            Err(mut e) => {
-                                e.cancel();
-                                None
-                            }
+            } else if op == AssocOp::DotDot || op == AssocOp::DotDotDot {
+                // If we didn’t have to handle `x..`/`x...`, it would be pretty easy to
+                // generalise it to the Fixity::None code.
+                //
+                // We have 2 alternatives here: `x..y`/`x...y` and `x..`/`x...` The other
+                // two variants are handled with `parse_prefix_range_expr` call above.
+                let rhs = if self.is_at_start_of_range_notation_rhs() {
+                    let rhs = self.parse_assoc_expr_with(op.precedence() + 1,
+                                                         LhsExpr::NotYetParsed);
+                    match rhs {
+                        Ok(e) => Some(e),
+                        Err(mut e) => {
+                            e.cancel();
+                            None
                         }
-                    } else {
-                        None
-                    };
-                    let (lhs_span, rhs_span) = (lhs_span, if let Some(ref x) = rhs {
-                        x.span
-                    } else {
-                        cur_op_span
-                    });
-                    let r = self.mk_range(Some(lhs), rhs);
-                    lhs = self.mk_expr(lhs_span.lo, rhs_span.hi, r, None);
-                    break
+                    }
+                } else {
+                    None
+                };
+                let (lhs_span, rhs_span) = (lhs.span, if let Some(ref x) = rhs {
+                    x.span
+                } else {
+                    cur_op_span
+                });
+                let limits = if op == AssocOp::DotDot {
+                    RangeLimits::HalfOpen
+                } else {
+                    RangeLimits::Closed
+                };
+
+                let r = try!(self.mk_range(Some(lhs), rhs, limits));
+                lhs = self.mk_expr(lhs_span.lo, rhs_span.hi, r, None);
+                break
             }
 
-            let rhs = try!(match op.fixity() {
+            let rhs = match op.fixity() {
                 Fixity::Right => self.with_res(
                     restrictions - Restrictions::RESTRICTION_STMT_EXPR,
                     |this| {
@@ -3008,7 +3086,7 @@ impl<'a> Parser<'a> {
                         this.parse_assoc_expr_with(op.precedence() + 1,
                             LhsExpr::NotYetParsed)
                 }),
-            });
+            }?;
 
             lhs = match op {
                 AssocOp::Add | AssocOp::Subtract | AssocOp::Multiply | AssocOp::Divide |
@@ -3042,8 +3120,8 @@ impl<'a> Parser<'a> {
                     let aopexpr = self.mk_assign_op(codemap::respan(cur_op_span, aop), lhs, rhs);
                     self.mk_expr(lhs_span.lo, rhs_span.hi, aopexpr, None)
                 }
-                AssocOp::As | AssocOp::Colon | AssocOp::DotDot => {
-                    self.bug("As, Colon or DotDot branch reached")
+                AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotDot => {
+                    self.bug("As, Colon, DotDot or DotDotDot branch reached")
                 }
             };
 
@@ -3073,28 +3151,37 @@ impl<'a> Parser<'a> {
         }
     }
 
-    /// Parse prefix-forms of range notation: `..expr` and `..`
+    /// Parse prefix-forms of range notation: `..expr`, `..`, `...expr`
     fn parse_prefix_range_expr(&mut self,
                                already_parsed_attrs: Option<ThinAttributes>)
                                -> PResult<'a, P<Expr>> {
-        debug_assert!(self.token == token::DotDot);
-        let attrs = try!(self.parse_or_use_outer_attributes(already_parsed_attrs));
+        debug_assert!(self.token == token::DotDot || self.token == token::DotDotDot);
+        let tok = self.token.clone();
+        let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?;
         let lo = self.span.lo;
         let mut hi = self.span.hi;
         self.bump();
         let opt_end = if self.is_at_start_of_range_notation_rhs() {
-            // RHS must be parsed with more associativity than DotDot.
-            let next_prec = AssocOp::from_token(&token::DotDot).unwrap().precedence() + 1;
-            Some(try!(self.parse_assoc_expr_with(next_prec,
-                                                 LhsExpr::NotYetParsed)
-            .map(|x|{
-                hi = x.span.hi;
-                x
-            })))
+            // RHS must be parsed with more associativity than the dots.
+            let next_prec = AssocOp::from_token(&tok).unwrap().precedence() + 1;
+            Some(self.parse_assoc_expr_with(next_prec,
+                                            LhsExpr::NotYetParsed)
+                .map(|x|{
+                    hi = x.span.hi;
+                    x
+                })?)
          } else {
             None
         };
-        let r = self.mk_range(None, opt_end);
+        let limits = if tok == token::DotDot {
+            RangeLimits::HalfOpen
+        } else {
+            RangeLimits::Closed
+        };
+
+        let r = try!(self.mk_range(None,
+                                   opt_end,
+                                   limits));
         Ok(self.mk_expr(lo, hi, r, attrs))
     }
 
@@ -3116,12 +3203,12 @@ impl<'a> Parser<'a> {
             return self.parse_if_let_expr(attrs);
         }
         let lo = self.last_span.lo;
-        let cond = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None));
-        let thn = try!(self.parse_block());
+        let cond = self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None)?;
+        let thn = self.parse_block()?;
         let mut els: Option<P<Expr>> = None;
         let mut hi = thn.span.hi;
         if self.eat_keyword(keywords::Else) {
-            let elexpr = try!(self.parse_else_expr());
+            let elexpr = self.parse_else_expr()?;
             hi = elexpr.span.hi;
             els = Some(elexpr);
         }
@@ -3132,13 +3219,13 @@ impl<'a> Parser<'a> {
     pub fn parse_if_let_expr(&mut self, attrs: ThinAttributes)
                              -> PResult<'a, P<Expr>> {
         let lo = self.last_span.lo;
-        try!(self.expect_keyword(keywords::Let));
-        let pat = try!(self.parse_pat());
-        try!(self.expect(&token::Eq));
-        let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None));
-        let thn = try!(self.parse_block());
+        self.expect_keyword(keywords::Let)?;
+        let pat = self.parse_pat()?;
+        self.expect(&token::Eq)?;
+        let expr = self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None)?;
+        let thn = self.parse_block()?;
         let (hi, els) = if self.eat_keyword(keywords::Else) {
-            let expr = try!(self.parse_else_expr());
+            let expr = self.parse_else_expr()?;
             (expr.span.hi, Some(expr))
         } else {
             (thn.span.hi, None)
@@ -3152,12 +3239,12 @@ impl<'a> Parser<'a> {
                              attrs: ThinAttributes)
                              -> PResult<'a, P<Expr>>
     {
-        let decl = try!(self.parse_fn_block_decl());
+        let decl = self.parse_fn_block_decl()?;
         let body = match decl.output {
             FunctionRetTy::Default(_) => {
                 // If no explicit return type is given, parse any
                 // expr and wrap it up in a dummy block:
-                let body_expr = try!(self.parse_expr());
+                let body_expr = self.parse_expr()?;
                 P(ast::Block {
                     id: ast::DUMMY_NODE_ID,
                     stmts: vec![],
@@ -3169,7 +3256,7 @@ impl<'a> Parser<'a> {
             _ => {
                 // If an explicit return type is given, require a
                 // block to appear (RFC 968).
-                try!(self.parse_block())
+                self.parse_block()?
             }
         };
 
@@ -3184,7 +3271,7 @@ impl<'a> Parser<'a> {
         if self.eat_keyword(keywords::If) {
             return self.parse_if_expr(None);
         } else {
-            let blk = try!(self.parse_block());
+            let blk = self.parse_block()?;
             return Ok(self.mk_expr(blk.span.lo, blk.span.hi, ExprKind::Block(blk), None));
         }
     }
@@ -3195,10 +3282,10 @@ impl<'a> Parser<'a> {
                           attrs: ThinAttributes) -> PResult<'a, P<Expr>> {
         // Parse: `for <src_pat> in <src_expr> <src_loop_block>`
 
-        let pat = try!(self.parse_pat());
-        try!(self.expect_keyword(keywords::In));
-        let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None));
-        let (iattrs, loop_block) = try!(self.parse_inner_attrs_and_block());
+        let pat = self.parse_pat()?;
+        self.expect_keyword(keywords::In)?;
+        let expr = self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None)?;
+        let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?;
         let attrs = attrs.append(iattrs.into_thin_attrs());
 
         let hi = self.last_span.hi;
@@ -3215,8 +3302,8 @@ impl<'a> Parser<'a> {
         if self.token.is_keyword(keywords::Let) {
             return self.parse_while_let_expr(opt_ident, span_lo, attrs);
         }
-        let cond = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None));
-        let (iattrs, body) = try!(self.parse_inner_attrs_and_block());
+        let cond = self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None)?;
+        let (iattrs, body) = self.parse_inner_attrs_and_block()?;
         let attrs = attrs.append(iattrs.into_thin_attrs());
         let hi = body.span.hi;
         return Ok(self.mk_expr(span_lo, hi, ExprKind::While(cond, body, opt_ident),
@@ -3227,11 +3314,11 @@ impl<'a> Parser<'a> {
     pub fn parse_while_let_expr(&mut self, opt_ident: Option<ast::Ident>,
                                 span_lo: BytePos,
                                 attrs: ThinAttributes) -> PResult<'a, P<Expr>> {
-        try!(self.expect_keyword(keywords::Let));
-        let pat = try!(self.parse_pat());
-        try!(self.expect(&token::Eq));
-        let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None));
-        let (iattrs, body) = try!(self.parse_inner_attrs_and_block());
+        self.expect_keyword(keywords::Let)?;
+        let pat = self.parse_pat()?;
+        self.expect(&token::Eq)?;
+        let expr = self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL, None)?;
+        let (iattrs, body) = self.parse_inner_attrs_and_block()?;
         let attrs = attrs.append(iattrs.into_thin_attrs());
         let hi = body.span.hi;
         return Ok(self.mk_expr(span_lo, hi, ExprKind::WhileLet(pat, expr, body, opt_ident), attrs));
@@ -3241,7 +3328,7 @@ impl<'a> Parser<'a> {
     pub fn parse_loop_expr(&mut self, opt_ident: Option<ast::Ident>,
                            span_lo: BytePos,
                            attrs: ThinAttributes) -> PResult<'a, P<Expr>> {
-        let (iattrs, body) = try!(self.parse_inner_attrs_and_block());
+        let (iattrs, body) = self.parse_inner_attrs_and_block()?;
         let attrs = attrs.append(iattrs.into_thin_attrs());
         let hi = body.span.hi;
         Ok(self.mk_expr(span_lo, hi, ExprKind::Loop(body, opt_ident), attrs))
@@ -3251,8 +3338,8 @@ impl<'a> Parser<'a> {
     fn parse_match_expr(&mut self, attrs: ThinAttributes) -> PResult<'a, P<Expr>> {
         let match_span = self.last_span;
         let lo = self.last_span.lo;
-        let discriminant = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL,
-                                                    None));
+        let discriminant = self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL,
+                                               None)?;
         if let Err(mut e) = self.commit_expr_expecting(&discriminant,
                                                        token::OpenDelim(token::Brace)) {
             if self.token == token::Token::Semi {
@@ -3261,7 +3348,7 @@ impl<'a> Parser<'a> {
             return Err(e)
         }
         let attrs = attrs.append(
-            try!(self.parse_inner_attributes()).into_thin_attrs());
+            self.parse_inner_attributes()?.into_thin_attrs());
         let mut arms: Vec<Arm> = Vec::new();
         while self.token != token::CloseDelim(token::Brace) {
             match self.parse_arm() {
@@ -3286,21 +3373,21 @@ impl<'a> Parser<'a> {
     pub fn parse_arm(&mut self) -> PResult<'a, Arm> {
         maybe_whole!(no_clone self, NtArm);
 
-        let attrs = try!(self.parse_outer_attributes());
-        let pats = try!(self.parse_pats());
+        let attrs = self.parse_outer_attributes()?;
+        let pats = self.parse_pats()?;
         let mut guard = None;
         if self.eat_keyword(keywords::If) {
-            guard = Some(try!(self.parse_expr()));
+            guard = Some(self.parse_expr()?);
         }
-        try!(self.expect(&token::FatArrow));
-        let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_STMT_EXPR, None));
+        self.expect(&token::FatArrow)?;
+        let expr = self.parse_expr_res(Restrictions::RESTRICTION_STMT_EXPR, None)?;
 
         let require_comma =
             !classify::expr_is_simple_block(&expr)
             && self.token != token::CloseDelim(token::Brace);
 
         if require_comma {
-            try!(self.commit_expr(&expr, &[token::Comma], &[token::CloseDelim(token::Brace)]));
+            self.commit_expr(&expr, &[token::Comma], &[token::CloseDelim(token::Brace)])?;
         } else {
             self.eat(&token::Comma);
         }
@@ -3343,7 +3430,7 @@ impl<'a> Parser<'a> {
     fn parse_initializer(&mut self) -> PResult<'a, Option<P<Expr>>> {
         if self.check(&token::Eq) {
             self.bump();
-            Ok(Some(try!(self.parse_expr())))
+            Ok(Some(self.parse_expr()?))
         } else {
             Ok(None)
         }
@@ -3353,7 +3440,7 @@ impl<'a> Parser<'a> {
     fn parse_pats(&mut self) -> PResult<'a, Vec<P<Pat>>> {
         let mut pats = Vec::new();
         loop {
-            pats.push(try!(self.parse_pat()));
+            pats.push(self.parse_pat()?);
             if self.check(&token::BinOp(token::Or)) { self.bump();}
             else { return Ok(pats); }
         };
@@ -3362,15 +3449,15 @@ impl<'a> Parser<'a> {
     fn parse_pat_tuple_elements(&mut self) -> PResult<'a, Vec<P<Pat>>> {
         let mut fields = vec![];
         if !self.check(&token::CloseDelim(token::Paren)) {
-            fields.push(try!(self.parse_pat()));
+            fields.push(self.parse_pat()?);
             if self.look_ahead(1, |t| *t != token::CloseDelim(token::Paren)) {
                 while self.eat(&token::Comma) &&
                       !self.check(&token::CloseDelim(token::Paren)) {
-                    fields.push(try!(self.parse_pat()));
+                    fields.push(self.parse_pat()?);
                 }
             }
             if fields.len() == 1 {
-                try!(self.expect(&token::Comma));
+                self.expect(&token::Comma)?;
             }
         }
         Ok(fields)
@@ -3389,7 +3476,7 @@ impl<'a> Parser<'a> {
             if first {
                 first = false;
             } else {
-                try!(self.expect(&token::Comma));
+                self.expect(&token::Comma)?;
 
                 if self.token == token::CloseDelim(token::Bracket)
                         && (before_slice || !after.is_empty()) {
@@ -3414,7 +3501,7 @@ impl<'a> Parser<'a> {
                 }
             }
 
-            let subpat = try!(self.parse_pat());
+            let subpat = self.parse_pat()?;
             if before_slice && self.check(&token::DotDot) {
                 self.bump();
                 slice = Some(subpat);
@@ -3438,7 +3525,7 @@ impl<'a> Parser<'a> {
             if first {
                 first = false;
             } else {
-                try!(self.expect(&token::Comma));
+                self.expect(&token::Comma)?;
                 // accept trailing commas
                 if self.check(&token::CloseDelim(token::Brace)) { break }
             }
@@ -3460,9 +3547,9 @@ impl<'a> Parser<'a> {
             // Check if a colon exists one ahead. This means we're parsing a fieldname.
             let (subpat, fieldname, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) {
                 // Parsing a pattern of the form "fieldname: pat"
-                let fieldname = try!(self.parse_ident());
+                let fieldname = self.parse_ident()?;
                 self.bump();
-                let pat = try!(self.parse_pat());
+                let pat = self.parse_pat()?;
                 hi = pat.span.hi;
                 (pat, fieldname, false)
             } else {
@@ -3471,7 +3558,7 @@ impl<'a> Parser<'a> {
                 let boxed_span_lo = self.span.lo;
                 let is_ref = self.eat_keyword(keywords::Ref);
                 let is_mut = self.eat_keyword(keywords::Mut);
-                let fieldname = try!(self.parse_ident());
+                let fieldname = self.parse_ident()?;
                 hi = self.last_span.hi;
 
                 let bind_type = match (is_ref, is_mut) {
@@ -3513,11 +3600,11 @@ impl<'a> Parser<'a> {
             let (qself, path) = if self.eat_lt() {
                 // Parse a qualified path
                 let (qself, path) =
-                    try!(self.parse_qualified_path(NoTypesAllowed));
+                    self.parse_qualified_path(NoTypesAllowed)?;
                 (Some(qself), path)
             } else {
                 // Parse an unqualified path
-                (None, try!(self.parse_path(LifetimeAndTypesWithColons)))
+                (None, self.parse_path(LifetimeAndTypesWithColons)?)
             };
             let hi = self.last_span.hi;
             Ok(self.mk_expr(lo, hi, ExprKind::Path(qself, path), None))
@@ -3546,41 +3633,41 @@ impl<'a> Parser<'a> {
           }
           token::BinOp(token::And) | token::AndAnd => {
             // Parse &pat / &mut pat
-            try!(self.expect_and());
-            let mutbl = try!(self.parse_mutability());
+            self.expect_and()?;
+            let mutbl = self.parse_mutability()?;
             if let token::Lifetime(ident) = self.token {
                 return Err(self.fatal(&format!("unexpected lifetime `{}` in pattern", ident)));
             }
 
-            let subpat = try!(self.parse_pat());
+            let subpat = self.parse_pat()?;
             pat = PatKind::Ref(subpat, mutbl);
           }
           token::OpenDelim(token::Paren) => {
             // Parse (pat,pat,pat,...) as tuple pattern
             self.bump();
-            let fields = try!(self.parse_pat_tuple_elements());
-            try!(self.expect(&token::CloseDelim(token::Paren)));
+            let fields = self.parse_pat_tuple_elements()?;
+            self.expect(&token::CloseDelim(token::Paren))?;
             pat = PatKind::Tup(fields);
           }
           token::OpenDelim(token::Bracket) => {
             // Parse [pat,pat,...] as slice pattern
             self.bump();
-            let (before, slice, after) = try!(self.parse_pat_vec_elements());
-            try!(self.expect(&token::CloseDelim(token::Bracket)));
+            let (before, slice, after) = self.parse_pat_vec_elements()?;
+            self.expect(&token::CloseDelim(token::Bracket))?;
             pat = PatKind::Vec(before, slice, after);
           }
           _ => {
             // At this point, token != _, &, &&, (, [
             if self.eat_keyword(keywords::Mut) {
                 // Parse mut ident @ pat
-                pat = try!(self.parse_pat_ident(BindingMode::ByValue(Mutability::Mutable)));
+                pat = self.parse_pat_ident(BindingMode::ByValue(Mutability::Mutable))?;
             } else if self.eat_keyword(keywords::Ref) {
                 // Parse ref ident @ pat / ref mut ident @ pat
-                let mutbl = try!(self.parse_mutability());
-                pat = try!(self.parse_pat_ident(BindingMode::ByRef(mutbl)));
+                let mutbl = self.parse_mutability()?;
+                pat = self.parse_pat_ident(BindingMode::ByRef(mutbl))?;
             } else if self.eat_keyword(keywords::Box) {
                 // Parse box pat
-                let subpat = try!(self.parse_pat());
+                let subpat = self.parse_pat()?;
                 pat = PatKind::Box(subpat);
             } else if self.is_path_start() {
                 // Parse pattern starting with a path
@@ -3592,13 +3679,14 @@ impl<'a> Parser<'a> {
                     // Plain idents have some extra abilities here compared to general paths
                     if self.look_ahead(1, |t| *t == token::Not) {
                         // Parse macro invocation
-                        let ident = try!(self.parse_ident());
+                        let ident = self.parse_ident()?;
                         let ident_span = self.last_span;
-                        let path = ident_to_path(ident_span, ident);
+                        let path = ast::Path::from_ident(ident_span, ident);
                         self.bump();
-                        let delim = try!(self.expect_open_delim());
-                        let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim),
-                                SeqSep::none(), |p| p.parse_token_tree()));
+                        let delim = self.expect_open_delim()?;
+                        let tts = self.parse_seq_to_end(
+                            &token::CloseDelim(delim),
+                            SeqSep::none(), |p| p.parse_token_tree())?;
                         let mac = Mac_ { path: path, tts: tts, ctxt: EMPTY_CTXT };
                         pat = PatKind::Mac(codemap::Spanned {node: mac,
                                                        span: mk_sp(lo, self.last_span.hi)});
@@ -3607,17 +3695,17 @@ impl<'a> Parser<'a> {
                         // This can give false positives and parse nullary enums,
                         // they are dealt with later in resolve
                         let binding_mode = BindingMode::ByValue(Mutability::Immutable);
-                        pat = try!(self.parse_pat_ident(binding_mode));
+                        pat = self.parse_pat_ident(binding_mode)?;
                     }
                 } else {
                     let (qself, path) = if self.eat_lt() {
                         // Parse a qualified path
                         let (qself, path) =
-                            try!(self.parse_qualified_path(NoTypesAllowed));
+                            self.parse_qualified_path(NoTypesAllowed)?;
                         (Some(qself), path)
                     } else {
                         // Parse an unqualified path
-                        (None, try!(self.parse_path(LifetimeAndTypesWithColons)))
+                        (None, self.parse_path(LifetimeAndTypesWithColons)?)
                     };
                     match self.token {
                       token::DotDotDot => {
@@ -3625,7 +3713,7 @@ impl<'a> Parser<'a> {
                         let hi = self.last_span.hi;
                         let begin = self.mk_expr(lo, hi, ExprKind::Path(qself, path), None);
                         self.bump();
-                        let end = try!(self.parse_pat_range_end());
+                        let end = self.parse_pat_range_end()?;
                         pat = PatKind::Range(begin, end);
                       }
                       token::OpenDelim(token::Brace) => {
@@ -3651,14 +3739,14 @@ impl<'a> Parser<'a> {
                             // This is a "top constructor only" pat
                             self.bump();
                             self.bump();
-                            try!(self.expect(&token::CloseDelim(token::Paren)));
+                            self.expect(&token::CloseDelim(token::Paren))?;
                             pat = PatKind::TupleStruct(path, None);
                         } else {
-                            let args = try!(self.parse_enum_variant_seq(
-                                    &token::OpenDelim(token::Paren),
-                                    &token::CloseDelim(token::Paren),
-                                    SeqSep::trailing_allowed(token::Comma),
-                                    |p| p.parse_pat()));
+                            let args = self.parse_enum_variant_seq(
+                                &token::OpenDelim(token::Paren),
+                                &token::CloseDelim(token::Paren),
+                                SeqSep::trailing_allowed(token::Comma),
+                                |p| p.parse_pat())?;
                             pat = PatKind::TupleStruct(path, Some(args));
                         }
                       }
@@ -3674,9 +3762,9 @@ impl<'a> Parser<'a> {
                 }
             } else {
                 // Try to parse everything else as literal with optional minus
-                let begin = try!(self.parse_pat_literal_maybe_minus());
+                let begin = self.parse_pat_literal_maybe_minus()?;
                 if self.eat(&token::DotDotDot) {
-                    let end = try!(self.parse_pat_range_end());
+                    let end = self.parse_pat_range_end()?;
                     pat = PatKind::Range(begin, end);
                 } else {
                     pat = PatKind::Lit(begin);
@@ -3699,17 +3787,11 @@ impl<'a> Parser<'a> {
     fn parse_pat_ident(&mut self,
                        binding_mode: ast::BindingMode)
                        -> PResult<'a, PatKind> {
-        if !self.token.is_plain_ident() {
-            let span = self.span;
-            let tok_str = self.this_token_to_string();
-            return Err(self.span_fatal(span,
-                            &format!("expected identifier, found `{}`", tok_str)))
-        }
-        let ident = try!(self.parse_ident());
+        let ident = self.parse_ident()?;
         let last_span = self.last_span;
         let name = codemap::Spanned{span: last_span, node: ident};
         let sub = if self.eat(&token::At) {
-            Some(try!(self.parse_pat()))
+            Some(self.parse_pat()?)
         } else {
             None
         };
@@ -3733,13 +3815,13 @@ impl<'a> Parser<'a> {
     /// Parse a local variable declaration
     fn parse_local(&mut self, attrs: ThinAttributes) -> PResult<'a, P<Local>> {
         let lo = self.span.lo;
-        let pat = try!(self.parse_pat());
+        let pat = self.parse_pat()?;
 
         let mut ty = None;
         if self.eat(&token::Colon) {
-            ty = Some(try!(self.parse_ty_sum()));
+            ty = Some(self.parse_ty_sum()?);
         }
-        let init = try!(self.parse_initializer());
+        let init = self.parse_initializer()?;
         Ok(P(ast::Local {
             ty: ty,
             pat: pat,
@@ -3753,7 +3835,7 @@ impl<'a> Parser<'a> {
     /// Parse a "let" stmt
     fn parse_let(&mut self, attrs: ThinAttributes) -> PResult<'a, P<Decl>> {
         let lo = self.span.lo;
-        let local = try!(self.parse_local(attrs));
+        let local = self.parse_local(attrs)?;
         Ok(P(spanned(lo, self.last_span.hi, DeclKind::Local(local))))
     }
 
@@ -3762,20 +3844,19 @@ impl<'a> Parser<'a> {
                          attrs: Vec<Attribute> ) -> PResult<'a, StructField> {
         let lo = match pr {
             Visibility::Inherited => self.span.lo,
-            Visibility::Public => self.last_span.lo,
+            _ => self.last_span.lo,
         };
-        if !self.token.is_plain_ident() {
-            return Err(self.fatal("expected ident"));
-        }
-        let name = try!(self.parse_ident());
-        try!(self.expect(&token::Colon));
-        let ty = try!(self.parse_ty_sum());
-        Ok(spanned(lo, self.last_span.hi, ast::StructField_ {
-            kind: NamedField(name, pr),
+        let name = self.parse_ident()?;
+        self.expect(&token::Colon)?;
+        let ty = self.parse_ty_sum()?;
+        Ok(StructField {
+            span: mk_sp(lo, self.last_span.hi),
+            ident: Some(name),
+            vis: pr,
             id: ast::DUMMY_NODE_ID,
             ty: ty,
             attrs: attrs,
-        }))
+        })
     }
 
     /// Emit an expected item after attributes error.
@@ -3867,12 +3948,12 @@ impl<'a> Parser<'a> {
     fn parse_stmt_without_recovery(&mut self) -> PResult<'a, Option<Stmt>> {
         maybe_whole!(Some deref self, NtStmt);
 
-        let attrs = try!(self.parse_outer_attributes());
+        let attrs = self.parse_outer_attributes()?;
         let lo = self.span.lo;
 
         Ok(Some(if self.check_keyword(keywords::Let) {
-            try!(self.expect_keyword(keywords::Let));
-            let decl = try!(self.parse_let(attrs.into_thin_attrs()));
+            self.expect_keyword(keywords::Let)?;
+            let decl = self.parse_let(attrs.into_thin_attrs())?;
             let hi = decl.span.hi;
             let stmt = StmtKind::Decl(decl, ast::DUMMY_NODE_ID);
             spanned(lo, hi, stmt)
@@ -3883,12 +3964,12 @@ impl<'a> Parser<'a> {
 
             // Potential trouble: if we allow macros with paths instead of
             // idents, we'd need to look ahead past the whole path here...
-            let pth = try!(self.parse_path(NoTypesAllowed));
+            let pth = self.parse_path(NoTypesAllowed)?;
             self.bump();
 
             let id = match self.token {
                 token::OpenDelim(_) => token::special_idents::invalid, // no special identifier
-                _ => try!(self.parse_ident()),
+                _ => self.parse_ident()?,
             };
 
             // check that we're pointing at delimiters (need to check
@@ -3911,12 +3992,12 @@ impl<'a> Parser<'a> {
                 },
             };
 
-            let tts = try!(self.parse_unspanned_seq(
+            let tts = self.parse_unspanned_seq(
                 &token::OpenDelim(delim),
                 &token::CloseDelim(delim),
                 SeqSep::none(),
                 |p| p.parse_token_tree()
-            ));
+            )?;
             let hi = self.last_span.hi;
 
             let style = if delim == token::Brace {
@@ -3954,8 +4035,8 @@ impl<'a> Parser<'a> {
         } else {
             // FIXME: Bad copy of attrs
             let restrictions = self.restrictions | Restrictions::NO_NONINLINE_MOD;
-            match try!(self.with_res(restrictions,
-                                     |this| this.parse_item_(attrs.clone(), false, true))) {
+            match self.with_res(restrictions,
+                                |this| this.parse_item_(attrs.clone(), false, true))? {
                 Some(i) => {
                     let hi = i.span.hi;
                     let decl = P(spanned(lo, hi, DeclKind::Item(i)));
@@ -3982,8 +4063,8 @@ impl<'a> Parser<'a> {
                     }
 
                     // Remainder are line-expr stmts.
-                    let e = try!(self.parse_expr_res(
-                        Restrictions::RESTRICTION_STMT_EXPR, Some(attrs.into_thin_attrs())));
+                    let e = self.parse_expr_res(
+                        Restrictions::RESTRICTION_STMT_EXPR, Some(attrs.into_thin_attrs()))?;
                     let hi = e.span.hi;
                     let stmt = StmtKind::Expr(e, ast::DUMMY_NODE_ID);
                     spanned(lo, hi, stmt)
@@ -4020,9 +4101,9 @@ impl<'a> Parser<'a> {
         maybe_whole!(pair_empty self, NtBlock);
 
         let lo = self.span.lo;
-        try!(self.expect(&token::OpenDelim(token::Brace)));
-        Ok((try!(self.parse_inner_attributes()),
-            try!(self.parse_block_tail(lo, BlockCheckMode::Default))))
+        self.expect(&token::OpenDelim(token::Brace))?;
+        Ok((self.parse_inner_attributes()?,
+            self.parse_block_tail(lo, BlockCheckMode::Default)?))
     }
 
     /// Parse the rest of a block expression or function body
@@ -4042,7 +4123,7 @@ impl<'a> Parser<'a> {
             };
             match node {
                 StmtKind::Expr(e, _) => {
-                    try!(self.handle_expression_like_statement(e, span, &mut stmts, &mut expr));
+                    self.handle_expression_like_statement(e, span, &mut stmts, &mut expr)?;
                 }
                 StmtKind::Mac(mac, MacStmtStyle::NoBraces, attrs) => {
                     // statement macro without braces; might be an
@@ -4060,13 +4141,13 @@ impl<'a> Parser<'a> {
                                                      mac.and_then(|m| m.node),
                                                      None);
                             let lo = e.span.lo;
-                            let e = try!(self.parse_dot_or_call_expr_with(e, lo, attrs));
-                            let e = try!(self.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(e)));
-                            try!(self.handle_expression_like_statement(
+                            let e = self.parse_dot_or_call_expr_with(e, lo, attrs)?;
+                            let e = self.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(e))?;
+                            self.handle_expression_like_statement(
                                 e,
                                 span,
                                 &mut stmts,
-                                &mut expr));
+                                &mut expr)?;
                         }
                     }
                 }
@@ -4098,7 +4179,7 @@ impl<'a> Parser<'a> {
                 _ => { // all other kinds of statements:
                     let mut hi = span.hi;
                     if classify::stmt_ends_with_semi(&node) {
-                        try!(self.commit_stmt_expecting(token::Semi));
+                        self.commit_stmt_expecting(token::Semi)?;
                         hi = self.last_span.hi;
                     }
 
@@ -4199,7 +4280,7 @@ impl<'a> Parser<'a> {
                     self.bump();
                 }
                 token::ModSep | token::Ident(..) => {
-                    let poly_trait_ref = try!(self.parse_poly_trait_ref());
+                    let poly_trait_ref = self.parse_poly_trait_ref()?;
                     let modifier = if ate_question {
                         if mode == BoundParsingMode::Modified {
                             TraitBoundModifier::Maybe
@@ -4227,13 +4308,13 @@ impl<'a> Parser<'a> {
     /// Matches typaram = IDENT (`?` unbound)? optbounds ( EQ ty )?
     fn parse_ty_param(&mut self) -> PResult<'a, TyParam> {
         let span = self.span;
-        let ident = try!(self.parse_ident());
+        let ident = self.parse_ident()?;
 
-        let bounds = try!(self.parse_colon_then_ty_param_bounds(BoundParsingMode::Modified));
+        let bounds = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Modified)?;
 
         let default = if self.check(&token::Eq) {
             self.bump();
-            Some(try!(self.parse_ty_sum()))
+            Some(self.parse_ty_sum()?)
         } else {
             None
         };
@@ -4258,11 +4339,11 @@ impl<'a> Parser<'a> {
         maybe_whole!(self, NtGenerics);
 
         if self.eat(&token::Lt) {
-            let lifetime_defs = try!(self.parse_lifetime_defs());
+            let lifetime_defs = self.parse_lifetime_defs()?;
             let mut seen_default = false;
-            let ty_params = try!(self.parse_seq_to_gt(Some(token::Comma), |p| {
-                try!(p.forbid_lifetime());
-                let ty_param = try!(p.parse_ty_param());
+            let ty_params = self.parse_seq_to_gt(Some(token::Comma), |p| {
+                p.forbid_lifetime()?;
+                let ty_param = p.parse_ty_param()?;
                 if ty_param.default.is_some() {
                     seen_default = true;
                 } else if seen_default {
@@ -4271,7 +4352,7 @@ impl<'a> Parser<'a> {
                                "type parameters with a default must be trailing");
                 }
                 Ok(ty_param)
-            }));
+            })?;
             Ok(ast::Generics {
                 lifetimes: lifetime_defs,
                 ty_params: ty_params,
@@ -4289,7 +4370,7 @@ impl<'a> Parser<'a> {
                                                             Vec<P<Ty>>,
                                                             Vec<TypeBinding>)> {
         let span_lo = self.span.lo;
-        let lifetimes = try!(self.parse_lifetimes(token::Comma));
+        let lifetimes = self.parse_lifetimes(token::Comma)?;
 
         let missing_comma = !lifetimes.is_empty() &&
                             !self.token.is_like_gt() &&
@@ -4321,17 +4402,17 @@ impl<'a> Parser<'a> {
         }
 
         // First parse types.
-        let (types, returned) = try!(self.parse_seq_to_gt_or_return(
+        let (types, returned) = self.parse_seq_to_gt_or_return(
             Some(token::Comma),
             |p| {
-                try!(p.forbid_lifetime());
+                p.forbid_lifetime()?;
                 if p.look_ahead(1, |t| t == &token::Eq) {
                     Ok(None)
                 } else {
-                    Ok(Some(try!(p.parse_ty_sum())))
+                    Ok(Some(p.parse_ty_sum()?))
                 }
             }
-        ));
+        )?;
 
         // If we found the `>`, don't continue.
         if !returned {
@@ -4339,18 +4420,18 @@ impl<'a> Parser<'a> {
         }
 
         // Then parse type bindings.
-        let bindings = try!(self.parse_seq_to_gt(
+        let bindings = self.parse_seq_to_gt(
             Some(token::Comma),
             |p| {
-                try!(p.forbid_lifetime());
+                p.forbid_lifetime()?;
                 let lo = p.span.lo;
-                let ident = try!(p.parse_ident());
+                let ident = p.parse_ident()?;
                 let found_eq = p.eat(&token::Eq);
                 if !found_eq {
                     let span = p.span;
                     p.span_warn(span, "whoops, no =?");
                 }
-                let ty = try!(p.parse_ty());
+                let ty = p.parse_ty()?;
                 let hi = ty.span.hi;
                 let span = mk_sp(lo, hi);
                 return Ok(TypeBinding{id: ast::DUMMY_NODE_ID,
@@ -4359,7 +4440,7 @@ impl<'a> Parser<'a> {
                     span: span,
                 });
             }
-        ));
+        )?;
         Ok((lifetimes, types.into_vec(), bindings.into_vec()))
     }
 
@@ -4399,12 +4480,12 @@ impl<'a> Parser<'a> {
 
                 token::Lifetime(..) => {
                     let bounded_lifetime =
-                        try!(self.parse_lifetime());
+                        self.parse_lifetime()?;
 
                     self.eat(&token::Colon);
 
                     let bounds =
-                        try!(self.parse_lifetimes(token::BinOp(token::Plus)));
+                        self.parse_lifetimes(token::BinOp(token::Plus))?;
 
                     let hi = self.last_span.hi;
                     let span = mk_sp(lo, hi);
@@ -4423,18 +4504,18 @@ impl<'a> Parser<'a> {
                 _ => {
                     let bound_lifetimes = if self.eat_keyword(keywords::For) {
                         // Higher ranked constraint.
-                        try!(self.expect(&token::Lt));
-                        let lifetime_defs = try!(self.parse_lifetime_defs());
-                        try!(self.expect_gt());
+                        self.expect(&token::Lt)?;
+                        let lifetime_defs = self.parse_lifetime_defs()?;
+                        self.expect_gt()?;
                         lifetime_defs
                     } else {
                         vec![]
                     };
 
-                    let bounded_ty = try!(self.parse_ty());
+                    let bounded_ty = self.parse_ty()?;
 
                     if self.eat(&token::Colon) {
-                        let bounds = try!(self.parse_ty_param_bounds(BoundParsingMode::Bare));
+                        let bounds = self.parse_ty_param_bounds(BoundParsingMode::Bare)?;
                         let hi = self.last_span.hi;
                         let span = mk_sp(lo, hi);
 
@@ -4497,7 +4578,7 @@ impl<'a> Parser<'a> {
         let sp = self.span;
         let mut variadic = false;
         let args: Vec<Option<Arg>> =
-            try!(self.parse_unspanned_seq(
+            self.parse_unspanned_seq(
                 &token::OpenDelim(token::Paren),
                 &token::CloseDelim(token::Paren),
                 SeqSep::trailing_allowed(token::Comma),
@@ -4528,7 +4609,7 @@ impl<'a> Parser<'a> {
                         }
                     }
                 }
-            ));
+            )?;
 
         let args: Vec<_> = args.into_iter().filter_map(|x| x).collect();
 
@@ -4543,8 +4624,8 @@ impl<'a> Parser<'a> {
     /// Parse the argument list and result type of a function declaration
     pub fn parse_fn_decl(&mut self, allow_variadic: bool) -> PResult<'a, P<FnDecl>> {
 
-        let (args, variadic) = try!(self.parse_fn_args(true, allow_variadic));
-        let ret_ty = try!(self.parse_ret_ty());
+        let (args, variadic) = self.parse_fn_args(true, allow_variadic)?;
+        let ret_ty = self.parse_ret_ty()?;
 
         Ok(P(FnDecl {
             inputs: args,
@@ -4614,31 +4695,31 @@ impl<'a> Parser<'a> {
 
             if this.look_ahead(1, |t| t.is_keyword(keywords::SelfValue)) {
                 this.bump();
-                Ok(SelfKind::Region(None, Mutability::Immutable, try!(this.expect_self_ident())))
+                Ok(SelfKind::Region(None, Mutability::Immutable, this.expect_self_ident()?))
             } else if this.look_ahead(1, |t| t.is_mutability()) &&
                       this.look_ahead(2, |t| t.is_keyword(keywords::SelfValue)) {
                 this.bump();
-                let mutability = try!(this.parse_mutability());
-                Ok(SelfKind::Region(None, mutability, try!(this.expect_self_ident())))
+                let mutability = this.parse_mutability()?;
+                Ok(SelfKind::Region(None, mutability, this.expect_self_ident()?))
             } else if this.look_ahead(1, |t| t.is_lifetime()) &&
                       this.look_ahead(2, |t| t.is_keyword(keywords::SelfValue)) {
                 this.bump();
-                let lifetime = try!(this.parse_lifetime());
-                let ident = try!(this.expect_self_ident());
+                let lifetime = this.parse_lifetime()?;
+                let ident = this.expect_self_ident()?;
                 Ok(SelfKind::Region(Some(lifetime), Mutability::Immutable, ident))
             } else if this.look_ahead(1, |t| t.is_lifetime()) &&
                       this.look_ahead(2, |t| t.is_mutability()) &&
                       this.look_ahead(3, |t| t.is_keyword(keywords::SelfValue)) {
                 this.bump();
-                let lifetime = try!(this.parse_lifetime());
-                let mutability = try!(this.parse_mutability());
-                Ok(SelfKind::Region(Some(lifetime), mutability, try!(this.expect_self_ident())))
+                let lifetime = this.parse_lifetime()?;
+                let mutability = this.parse_mutability()?;
+                Ok(SelfKind::Region(Some(lifetime), mutability, this.expect_self_ident()?))
             } else {
                 Ok(SelfKind::Static)
             }
         }
 
-        try!(self.expect(&token::OpenDelim(token::Paren)));
+        self.expect(&token::OpenDelim(token::Paren))?;
 
         // A bit of complexity and lookahead is needed here in order to be
         // backwards compatible.
@@ -4649,7 +4730,7 @@ impl<'a> Parser<'a> {
         let mut mutbl_self = Mutability::Immutable;
         let explicit_self = match self.token {
             token::BinOp(token::And) => {
-                let eself = try!(maybe_parse_borrowed_explicit_self(self));
+                let eself = maybe_parse_borrowed_explicit_self(self)?;
                 self_ident_lo = self.last_span.lo;
                 self_ident_hi = self.last_span.hi;
                 eself
@@ -4659,7 +4740,7 @@ impl<'a> Parser<'a> {
                 // emitting cryptic "unexpected token" errors.
                 self.bump();
                 let _mutability = if self.token.is_mutability() {
-                    try!(self.parse_mutability())
+                    self.parse_mutability()?
                 } else {
                     Mutability::Immutable
                 };
@@ -4673,24 +4754,24 @@ impl<'a> Parser<'a> {
             }
             token::Ident(..) => {
                 if self.is_self_ident() {
-                    let self_ident = try!(self.expect_self_ident());
+                    let self_ident = self.expect_self_ident()?;
 
                     // Determine whether this is the fully explicit form, `self:
                     // TYPE`.
                     if self.eat(&token::Colon) {
-                        SelfKind::Explicit(try!(self.parse_ty_sum()), self_ident)
+                        SelfKind::Explicit(self.parse_ty_sum()?, self_ident)
                     } else {
                         SelfKind::Value(self_ident)
                     }
                 } else if self.token.is_mutability() &&
                         self.look_ahead(1, |t| t.is_keyword(keywords::SelfValue)) {
-                    mutbl_self = try!(self.parse_mutability());
-                    let self_ident = try!(self.expect_self_ident());
+                    mutbl_self = self.parse_mutability()?;
+                    let self_ident = self.expect_self_ident()?;
 
                     // Determine whether this is the fully explicit form,
                     // `self: TYPE`.
                     if self.eat(&token::Colon) {
-                        SelfKind::Explicit(try!(self.parse_ty_sum()), self_ident)
+                        SelfKind::Explicit(self.parse_ty_sum()?, self_ident)
                     } else {
                         SelfKind::Value(self_ident)
                     }
@@ -4744,11 +4825,11 @@ impl<'a> Parser<'a> {
         };
 
 
-        try!(self.expect(&token::CloseDelim(token::Paren)));
+        self.expect(&token::CloseDelim(token::Paren))?;
 
         let hi = self.span.hi;
 
-        let ret_ty = try!(self.parse_ret_ty());
+        let ret_ty = self.parse_ret_ty()?;
 
         let fn_decl = P(FnDecl {
             inputs: fn_inputs,
@@ -4765,8 +4846,8 @@ impl<'a> Parser<'a> {
             if self.eat(&token::OrOr) {
                 Vec::new()
             } else {
-                try!(self.expect(&token::BinOp(token::Or)));
-                try!(self.parse_obsolete_closure_kind());
+                self.expect(&token::BinOp(token::Or))?;
+                self.parse_obsolete_closure_kind()?;
                 let args = self.parse_seq_to_before_end(
                     &token::BinOp(token::Or),
                     SeqSep::trailing_allowed(token::Comma),
@@ -4776,7 +4857,7 @@ impl<'a> Parser<'a> {
                 args
             }
         };
-        let output = try!(self.parse_ret_ty());
+        let output = self.parse_ret_ty()?;
 
         Ok(P(FnDecl {
             inputs: inputs_captures,
@@ -4787,8 +4868,8 @@ impl<'a> Parser<'a> {
 
     /// Parse the name and optional generic types of a function header.
     fn parse_fn_header(&mut self) -> PResult<'a, (Ident, ast::Generics)> {
-        let id = try!(self.parse_ident());
-        let generics = try!(self.parse_generics());
+        let id = self.parse_ident()?;
+        let generics = self.parse_generics()?;
         Ok((id, generics))
     }
 
@@ -4811,10 +4892,10 @@ impl<'a> Parser<'a> {
                      constness: Constness,
                      abi: abi::Abi)
                      -> PResult<'a, ItemInfo> {
-        let (ident, mut generics) = try!(self.parse_fn_header());
-        let decl = try!(self.parse_fn_decl(false));
-        generics.where_clause = try!(self.parse_where_clause());
-        let (inner_attrs, body) = try!(self.parse_inner_attrs_and_block());
+        let (ident, mut generics) = self.parse_fn_header()?;
+        let decl = self.parse_fn_decl(false)?;
+        generics.where_clause = self.parse_where_clause()?;
+        let (inner_attrs, body) = self.parse_inner_attrs_and_block()?;
         Ok((ident, ItemKind::Fn(decl, unsafety, constness, abi, generics, body), Some(inner_attrs)))
     }
 
@@ -4836,18 +4917,18 @@ impl<'a> Parser<'a> {
     pub fn parse_fn_front_matter(&mut self)
                                  -> PResult<'a, (ast::Constness, ast::Unsafety, abi::Abi)> {
         let is_const_fn = self.eat_keyword(keywords::Const);
-        let unsafety = try!(self.parse_unsafety());
+        let unsafety = self.parse_unsafety()?;
         let (constness, unsafety, abi) = if is_const_fn {
             (Constness::Const, unsafety, Abi::Rust)
         } else {
             let abi = if self.eat_keyword(keywords::Extern) {
-                try!(self.parse_opt_abi()).unwrap_or(Abi::C)
+                self.parse_opt_abi()?.unwrap_or(Abi::C)
             } else {
                 Abi::Rust
             };
             (Constness::NotConst, unsafety, abi)
         };
-        try!(self.expect_keyword(keywords::Fn));
+        self.expect_keyword(keywords::Fn)?;
         Ok((constness, unsafety, abi))
     }
 
@@ -4855,26 +4936,27 @@ impl<'a> Parser<'a> {
     pub fn parse_impl_item(&mut self) -> PResult<'a, ImplItem> {
         maybe_whole!(no_clone_from_p self, NtImplItem);
 
-        let mut attrs = try!(self.parse_outer_attributes());
+        let mut attrs = self.parse_outer_attributes()?;
         let lo = self.span.lo;
-        let vis = try!(self.parse_visibility());
+        let vis = self.parse_visibility()?;
+        let defaultness = self.parse_defaultness()?;
         let (name, node) = if self.eat_keyword(keywords::Type) {
-            let name = try!(self.parse_ident());
-            try!(self.expect(&token::Eq));
-            let typ = try!(self.parse_ty_sum());
-            try!(self.expect(&token::Semi));
+            let name = self.parse_ident()?;
+            self.expect(&token::Eq)?;
+            let typ = self.parse_ty_sum()?;
+            self.expect(&token::Semi)?;
             (name, ast::ImplItemKind::Type(typ))
         } else if self.is_const_item() {
-            try!(self.expect_keyword(keywords::Const));
-            let name = try!(self.parse_ident());
-            try!(self.expect(&token::Colon));
-            let typ = try!(self.parse_ty_sum());
-            try!(self.expect(&token::Eq));
-            let expr = try!(self.parse_expr());
-            try!(self.commit_expr_expecting(&expr, token::Semi));
+            self.expect_keyword(keywords::Const)?;
+            let name = self.parse_ident()?;
+            self.expect(&token::Colon)?;
+            let typ = self.parse_ty_sum()?;
+            self.expect(&token::Eq)?;
+            let expr = self.parse_expr()?;
+            self.commit_expr_expecting(&expr, token::Semi)?;
             (name, ast::ImplItemKind::Const(typ, expr))
         } else {
-            let (name, inner_attrs, node) = try!(self.parse_impl_method(vis));
+            let (name, inner_attrs, node) = self.parse_impl_method(&vis)?;
             attrs.extend(inner_attrs);
             (name, node)
         };
@@ -4884,14 +4966,16 @@ impl<'a> Parser<'a> {
             span: mk_sp(lo, self.last_span.hi),
             ident: name,
             vis: vis,
+            defaultness: defaultness,
             attrs: attrs,
             node: node
         })
     }
 
-    fn complain_if_pub_macro(&mut self, visa: Visibility, span: Span) {
-        match visa {
-            Visibility::Public => {
+    fn complain_if_pub_macro(&mut self, visa: &Visibility, span: Span) {
+        match *visa {
+            Visibility::Inherited => (),
+            _ => {
                 let is_macro_rules: bool = match self.token {
                     token::Ident(sid, _) => sid.name == intern("macro_rules"),
                     _ => false,
@@ -4909,12 +4993,11 @@ impl<'a> Parser<'a> {
                                      .emit();
                 }
             }
-            Visibility::Inherited => (),
         }
     }
 
     /// Parse a method or a macro invocation in a trait impl.
-    fn parse_impl_method(&mut self, vis: Visibility)
+    fn parse_impl_method(&mut self, vis: &Visibility)
                          -> PResult<'a, (Ident, Vec<ast::Attribute>, ast::ImplItemKind)> {
         // code copied from parse_macro_use_or_failure... abstraction!
         if !self.token.is_any_keyword()
@@ -4924,34 +5007,34 @@ impl<'a> Parser<'a> {
             // method macro.
 
             let last_span = self.last_span;
-            self.complain_if_pub_macro(vis, last_span);
+            self.complain_if_pub_macro(&vis, last_span);
 
             let lo = self.span.lo;
-            let pth = try!(self.parse_path(NoTypesAllowed));
-            try!(self.expect(&token::Not));
+            let pth = self.parse_path(NoTypesAllowed)?;
+            self.expect(&token::Not)?;
 
             // eat a matched-delimiter token tree:
-            let delim = try!(self.expect_open_delim());
-            let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim),
-                                                 SeqSep::none(),
-                                                 |p| p.parse_token_tree()));
+            let delim = self.expect_open_delim()?;
+            let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
+                                            SeqSep::none(),
+                                            |p| p.parse_token_tree())?;
             let m_ = Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT };
             let m: ast::Mac = codemap::Spanned { node: m_,
                                                 span: mk_sp(lo,
                                                             self.last_span.hi) };
             if delim != token::Brace {
-                try!(self.expect(&token::Semi))
+                self.expect(&token::Semi)?
             }
             Ok((token::special_idents::invalid, vec![], ast::ImplItemKind::Macro(m)))
         } else {
-            let (constness, unsafety, abi) = try!(self.parse_fn_front_matter());
-            let ident = try!(self.parse_ident());
-            let mut generics = try!(self.parse_generics());
-            let (explicit_self, decl) = try!(self.parse_fn_decl_with_self(|p| {
+            let (constness, unsafety, abi) = self.parse_fn_front_matter()?;
+            let ident = self.parse_ident()?;
+            let mut generics = self.parse_generics()?;
+            let (explicit_self, decl) = self.parse_fn_decl_with_self(|p| {
                     p.parse_arg()
-                }));
-            generics.where_clause = try!(self.parse_where_clause());
-            let (inner_attrs, body) = try!(self.parse_inner_attrs_and_block());
+                })?;
+            generics.where_clause = self.parse_where_clause()?;
+            let (inner_attrs, body) = self.parse_inner_attrs_and_block()?;
             Ok((ident, inner_attrs, ast::ImplItemKind::Method(ast::MethodSig {
                 generics: generics,
                 abi: abi,
@@ -4966,15 +5049,15 @@ impl<'a> Parser<'a> {
     /// Parse trait Foo { ... }
     fn parse_item_trait(&mut self, unsafety: Unsafety) -> PResult<'a, ItemInfo> {
 
-        let ident = try!(self.parse_ident());
-        let mut tps = try!(self.parse_generics());
+        let ident = self.parse_ident()?;
+        let mut tps = self.parse_generics()?;
 
         // Parse supertrait bounds.
-        let bounds = try!(self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare));
+        let bounds = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare)?;
 
-        tps.where_clause = try!(self.parse_where_clause());
+        tps.where_clause = self.parse_where_clause()?;
 
-        let meths = try!(self.parse_trait_items());
+        let meths = self.parse_trait_items()?;
         Ok((ident, ItemKind::Trait(unsafety, tps, bounds, meths), None))
     }
 
@@ -4986,7 +5069,7 @@ impl<'a> Parser<'a> {
         let impl_span = self.span;
 
         // First, parse type parameters if necessary.
-        let mut generics = try!(self.parse_generics());
+        let mut generics = self.parse_generics()?;
 
         // Special case: if the next identifier that follows is '(', don't
         // allow this to be parsed as a trait.
@@ -5000,7 +5083,7 @@ impl<'a> Parser<'a> {
         };
 
         // Parse the trait.
-        let mut ty = try!(self.parse_ty_sum());
+        let mut ty = self.parse_ty_sum()?;
 
         // Parse traits, if necessary.
         let opt_trait = if could_be_trait && self.eat_keyword(keywords::For) {
@@ -5035,25 +5118,25 @@ impl<'a> Parser<'a> {
                                           allowed to have generics");
             }
 
-            try!(self.expect(&token::OpenDelim(token::Brace)));
-            try!(self.expect(&token::CloseDelim(token::Brace)));
-            Ok((ast_util::impl_pretty_name(&opt_trait, None),
+            self.expect(&token::OpenDelim(token::Brace))?;
+            self.expect(&token::CloseDelim(token::Brace))?;
+            Ok((special_idents::invalid,
              ItemKind::DefaultImpl(unsafety, opt_trait.unwrap()), None))
         } else {
             if opt_trait.is_some() {
-                ty = try!(self.parse_ty_sum());
+                ty = self.parse_ty_sum()?;
             }
-            generics.where_clause = try!(self.parse_where_clause());
+            generics.where_clause = self.parse_where_clause()?;
 
-            try!(self.expect(&token::OpenDelim(token::Brace)));
-            let attrs = try!(self.parse_inner_attributes());
+            self.expect(&token::OpenDelim(token::Brace))?;
+            let attrs = self.parse_inner_attributes()?;
 
             let mut impl_items = vec![];
             while !self.eat(&token::CloseDelim(token::Brace)) {
-                impl_items.push(try!(self.parse_impl_item()));
+                impl_items.push(self.parse_impl_item()?);
             }
 
-            Ok((ast_util::impl_pretty_name(&opt_trait, Some(&ty)),
+            Ok((special_idents::invalid,
              ItemKind::Impl(unsafety, polarity, generics, opt_trait, ty, impl_items),
              Some(attrs)))
         }
@@ -5062,16 +5145,16 @@ impl<'a> Parser<'a> {
     /// Parse a::B<String,i32>
     fn parse_trait_ref(&mut self) -> PResult<'a, TraitRef> {
         Ok(ast::TraitRef {
-            path: try!(self.parse_path(LifetimeAndTypesWithoutColons)),
+            path: self.parse_path(LifetimeAndTypesWithoutColons)?,
             ref_id: ast::DUMMY_NODE_ID,
         })
     }
 
     fn parse_late_bound_lifetime_defs(&mut self) -> PResult<'a, Vec<ast::LifetimeDef>> {
         if self.eat_keyword(keywords::For) {
-            try!(self.expect(&token::Lt));
-            let lifetime_defs = try!(self.parse_lifetime_defs());
-            try!(self.expect_gt());
+            self.expect(&token::Lt)?;
+            let lifetime_defs = self.parse_lifetime_defs()?;
+            self.expect_gt()?;
             Ok(lifetime_defs)
         } else {
             Ok(Vec::new())
@@ -5081,19 +5164,19 @@ impl<'a> Parser<'a> {
     /// Parse for<'l> a::B<String,i32>
     fn parse_poly_trait_ref(&mut self) -> PResult<'a, PolyTraitRef> {
         let lo = self.span.lo;
-        let lifetime_defs = try!(self.parse_late_bound_lifetime_defs());
+        let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
 
         Ok(ast::PolyTraitRef {
             bound_lifetimes: lifetime_defs,
-            trait_ref: try!(self.parse_trait_ref()),
+            trait_ref: self.parse_trait_ref()?,
             span: mk_sp(lo, self.last_span.hi),
         })
     }
 
     /// Parse struct Foo { ... }
     fn parse_item_struct(&mut self) -> PResult<'a, ItemInfo> {
-        let class_name = try!(self.parse_ident());
-        let mut generics = try!(self.parse_generics());
+        let class_name = self.parse_ident()?;
+        let mut generics = self.parse_generics()?;
 
         // There is a special case worth noting here, as reported in issue #17904.
         // If we are parsing a tuple struct it is the case that the where clause
@@ -5110,28 +5193,25 @@ impl<'a> Parser<'a> {
         // struct.
 
         let vdata = if self.token.is_keyword(keywords::Where) {
-            generics.where_clause = try!(self.parse_where_clause());
+            generics.where_clause = self.parse_where_clause()?;
             if self.eat(&token::Semi) {
                 // If we see a: `struct Foo<T> where T: Copy;` style decl.
                 VariantData::Unit(ast::DUMMY_NODE_ID)
             } else {
                 // If we see: `struct Foo<T> where T: Copy { ... }`
-                VariantData::Struct(try!(self.parse_record_struct_body(ParsePub::Yes)),
-                                    ast::DUMMY_NODE_ID)
+                VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID)
             }
         // No `where` so: `struct Foo<T>;`
         } else if self.eat(&token::Semi) {
             VariantData::Unit(ast::DUMMY_NODE_ID)
         // Record-style struct definition
         } else if self.token == token::OpenDelim(token::Brace) {
-            VariantData::Struct(try!(self.parse_record_struct_body(ParsePub::Yes)),
-                                ast::DUMMY_NODE_ID)
+            VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID)
         // Tuple-style struct definition with optional where-clause.
         } else if self.token == token::OpenDelim(token::Paren) {
-            let body = VariantData::Tuple(try!(self.parse_tuple_struct_body(ParsePub::Yes)),
-                                          ast::DUMMY_NODE_ID);
-            generics.where_clause = try!(self.parse_where_clause());
-            try!(self.expect(&token::Semi));
+            let body = VariantData::Tuple(self.parse_tuple_struct_body()?, ast::DUMMY_NODE_ID);
+            generics.where_clause = self.parse_where_clause()?;
+            self.expect(&token::Semi)?;
             body
         } else {
             let token_str = self.this_token_to_string();
@@ -5142,13 +5222,11 @@ impl<'a> Parser<'a> {
         Ok((class_name, ItemKind::Struct(vdata, generics), None))
     }
 
-    pub fn parse_record_struct_body(&mut self,
-                                    parse_pub: ParsePub)
-                                    -> PResult<'a, Vec<StructField>> {
+    pub fn parse_record_struct_body(&mut self) -> PResult<'a, Vec<StructField>> {
         let mut fields = Vec::new();
         if self.eat(&token::OpenDelim(token::Brace)) {
             while self.token != token::CloseDelim(token::Brace) {
-                fields.push(try!(self.parse_struct_decl_field(parse_pub)));
+                fields.push(self.parse_struct_decl_field()?);
             }
 
             self.bump();
@@ -5162,32 +5240,27 @@ impl<'a> Parser<'a> {
         Ok(fields)
     }
 
-    pub fn parse_tuple_struct_body(&mut self,
-                                   parse_pub: ParsePub)
-                                   -> PResult<'a, Vec<StructField>> {
+    pub fn parse_tuple_struct_body(&mut self) -> PResult<'a, Vec<StructField>> {
         // This is the case where we find `struct Foo<T>(T) where T: Copy;`
         // Unit like structs are handled in parse_item_struct function
-        let fields = try!(self.parse_unspanned_seq(
+        let fields = self.parse_unspanned_seq(
             &token::OpenDelim(token::Paren),
             &token::CloseDelim(token::Paren),
             SeqSep::trailing_allowed(token::Comma),
             |p| {
-                let attrs = try!(p.parse_outer_attributes());
+                let attrs = p.parse_outer_attributes()?;
                 let lo = p.span.lo;
-                let struct_field_ = ast::StructField_ {
-                    kind: UnnamedField (
-                        if parse_pub == ParsePub::Yes {
-                            try!(p.parse_visibility())
-                        } else {
-                            Visibility::Inherited
-                        }
-                    ),
+                let vis = p.parse_visibility()?;
+                let ty = p.parse_ty_sum()?;
+                Ok(StructField {
+                    span: mk_sp(lo, p.span.hi),
+                    vis: vis,
+                    ident: None,
                     id: ast::DUMMY_NODE_ID,
-                    ty: try!(p.parse_ty_sum()),
+                    ty: ty,
                     attrs: attrs,
-                };
-                Ok(spanned(lo, p.span.hi, struct_field_))
-            }));
+                })
+            })?;
 
         Ok(fields)
     }
@@ -5197,7 +5270,7 @@ impl<'a> Parser<'a> {
                                      vis: Visibility,
                                      attrs: Vec<Attribute> )
                                      -> PResult<'a, StructField> {
-        let a_var = try!(self.parse_name_and_ty(vis, attrs));
+        let a_var = self.parse_name_and_ty(vis, attrs)?;
         match self.token {
             token::Comma => {
                 self.bump();
@@ -5216,15 +5289,11 @@ impl<'a> Parser<'a> {
     }
 
     /// Parse an element of a struct definition
-    fn parse_struct_decl_field(&mut self, parse_pub: ParsePub) -> PResult<'a, StructField> {
+    fn parse_struct_decl_field(&mut self) -> PResult<'a, StructField> {
 
-        let attrs = try!(self.parse_outer_attributes());
+        let attrs = self.parse_outer_attributes()?;
 
         if self.eat_keyword(keywords::Pub) {
-            if parse_pub == ParsePub::No {
-                let span = self.last_span;
-                self.span_err(span, "`pub` is not allowed here");
-            }
             return self.parse_single_struct_field(Visibility::Public, attrs);
         }
 
@@ -5237,10 +5306,19 @@ impl<'a> Parser<'a> {
         else { Ok(Visibility::Inherited) }
     }
 
+    /// Parse defaultness: DEFAULT or nothing
+    fn parse_defaultness(&mut self) -> PResult<'a, Defaultness> {
+        if self.eat_contextual_keyword(special_idents::DEFAULT) {
+            Ok(Defaultness::Default)
+        } else {
+            Ok(Defaultness::Final)
+        }
+    }
+
     /// Given a termination token, parse all of the items in a module
     fn parse_mod_items(&mut self, term: &token::Token, inner_lo: BytePos) -> PResult<'a, Mod> {
         let mut items = vec![];
-        while let Some(item) = try!(self.parse_item()) {
+        while let Some(item) = self.parse_item()? {
             items.push(item);
         }
 
@@ -5262,12 +5340,12 @@ impl<'a> Parser<'a> {
     }
 
     fn parse_item_const(&mut self, m: Option<Mutability>) -> PResult<'a, ItemInfo> {
-        let id = try!(self.parse_ident());
-        try!(self.expect(&token::Colon));
-        let ty = try!(self.parse_ty_sum());
-        try!(self.expect(&token::Eq));
-        let e = try!(self.parse_expr());
-        try!(self.commit_expr_expecting(&e, token::Semi));
+        let id = self.parse_ident()?;
+        self.expect(&token::Colon)?;
+        let ty = self.parse_ty_sum()?;
+        self.expect(&token::Eq)?;
+        let e = self.parse_expr()?;
+        self.commit_expr_expecting(&e, token::Semi)?;
         let item = match m {
             Some(m) => ItemKind::Static(ty, m, e),
             None => ItemKind::Const(ty, e),
@@ -5278,18 +5356,18 @@ impl<'a> Parser<'a> {
     /// Parse a `mod <foo> { ... }` or `mod <foo>;` item
     fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> PResult<'a, ItemInfo> {
         let id_span = self.span;
-        let id = try!(self.parse_ident());
+        let id = self.parse_ident()?;
         if self.check(&token::Semi) {
             self.bump();
             // This mod is in an external file. Let's go get it!
-            let (m, attrs) = try!(self.eval_src_mod(id, outer_attrs, id_span));
+            let (m, attrs) = self.eval_src_mod(id, outer_attrs, id_span)?;
             Ok((id, m, Some(attrs)))
         } else {
             self.push_mod_path(id, outer_attrs);
-            try!(self.expect(&token::OpenDelim(token::Brace)));
+            self.expect(&token::OpenDelim(token::Brace))?;
             let mod_inner_lo = self.span.lo;
-            let attrs = try!(self.parse_inner_attributes());
-            let m = try!(self.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo));
+            let attrs = self.parse_inner_attributes()?;
+            let m = self.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo)?;
             self.pop_mod_path();
             Ok((id, ItemKind::Mod(m), Some(attrs)))
         }
@@ -5353,7 +5431,7 @@ impl<'a> Parser<'a> {
                    id: ast::Ident,
                    outer_attrs: &[ast::Attribute],
                    id_sp: Span) -> PResult<'a, ModulePathSuccess> {
-        let mut prefix = PathBuf::from(&self.sess.codemap().span_to_filename(self.span));
+        let mut prefix = PathBuf::from(self.filename.as_ref().unwrap());
         prefix.pop();
         let mut dir_path = prefix;
         for part in &self.mod_path_stack {
@@ -5408,9 +5486,9 @@ impl<'a> Parser<'a> {
                     outer_attrs: &[ast::Attribute],
                     id_sp: Span)
                     -> PResult<'a, (ast::ItemKind, Vec<ast::Attribute> )> {
-        let ModulePathSuccess { path, owns_directory } = try!(self.submod_path(id,
-                                                                               outer_attrs,
-                                                                               id_sp));
+        let ModulePathSuccess { path, owns_directory } = self.submod_path(id,
+                                                                          outer_attrs,
+                                                                          id_sp)?;
 
         self.eval_src_mod_from_path(path,
                                     owns_directory,
@@ -5447,8 +5525,8 @@ impl<'a> Parser<'a> {
                                               Some(name),
                                               id_sp);
         let mod_inner_lo = p0.span.lo;
-        let mod_attrs = try!(p0.parse_inner_attributes());
-        let m0 = try!(p0.parse_mod_items(&token::Eof, mod_inner_lo));
+        let mod_attrs = p0.parse_inner_attributes()?;
+        let m0 = p0.parse_mod_items(&token::Eof, mod_inner_lo)?;
         self.sess.included_mod_stack.borrow_mut().pop();
         Ok((ast::ItemKind::Mod(m0), mod_attrs))
     }
@@ -5456,13 +5534,13 @@ impl<'a> Parser<'a> {
     /// Parse a function declaration from a foreign module
     fn parse_item_foreign_fn(&mut self, vis: ast::Visibility, lo: BytePos,
                              attrs: Vec<Attribute>) -> PResult<'a, ForeignItem> {
-        try!(self.expect_keyword(keywords::Fn));
+        self.expect_keyword(keywords::Fn)?;
 
-        let (ident, mut generics) = try!(self.parse_fn_header());
-        let decl = try!(self.parse_fn_decl(true));
-        generics.where_clause = try!(self.parse_where_clause());
+        let (ident, mut generics) = self.parse_fn_header()?;
+        let decl = self.parse_fn_decl(true)?;
+        generics.where_clause = self.parse_where_clause()?;
         let hi = self.span.hi;
-        try!(self.expect(&token::Semi));
+        self.expect(&token::Semi)?;
         Ok(ast::ForeignItem {
             ident: ident,
             attrs: attrs,
@@ -5476,14 +5554,14 @@ impl<'a> Parser<'a> {
     /// Parse a static item from a foreign module
     fn parse_item_foreign_static(&mut self, vis: ast::Visibility, lo: BytePos,
                                  attrs: Vec<Attribute>) -> PResult<'a, ForeignItem> {
-        try!(self.expect_keyword(keywords::Static));
+        self.expect_keyword(keywords::Static)?;
         let mutbl = self.eat_keyword(keywords::Mut);
 
-        let ident = try!(self.parse_ident());
-        try!(self.expect(&token::Colon));
-        let ty = try!(self.parse_ty_sum());
+        let ident = self.parse_ident()?;
+        self.expect(&token::Colon)?;
+        let ty = self.parse_ty_sum()?;
         let hi = self.span.hi;
-        try!(self.expect(&token::Semi));
+        self.expect(&token::Semi)?;
         Ok(ForeignItem {
             ident: ident,
             attrs: attrs,
@@ -5506,13 +5584,13 @@ impl<'a> Parser<'a> {
                                attrs: Vec<Attribute>)
                                 -> PResult<'a, P<Item>> {
 
-        let crate_name = try!(self.parse_ident());
-        let (maybe_path, ident) = if let Some(ident) = try!(self.parse_rename()) {
+        let crate_name = self.parse_ident()?;
+        let (maybe_path, ident) = if let Some(ident) = self.parse_rename()? {
             (Some(crate_name.name), ident)
         } else {
             (None, crate_name)
         };
-        try!(self.expect(&token::Semi));
+        self.expect(&token::Semi)?;
 
         let last_span = self.last_span;
         Ok(self.mk_item(lo,
@@ -5539,17 +5617,17 @@ impl<'a> Parser<'a> {
                               visibility: Visibility,
                               mut attrs: Vec<Attribute>)
                               -> PResult<'a, P<Item>> {
-        try!(self.expect(&token::OpenDelim(token::Brace)));
+        self.expect(&token::OpenDelim(token::Brace))?;
 
         let abi = opt_abi.unwrap_or(Abi::C);
 
-        attrs.extend(try!(self.parse_inner_attributes()));
+        attrs.extend(self.parse_inner_attributes()?);
 
         let mut foreign_items = vec![];
-        while let Some(item) = try!(self.parse_foreign_item()) {
+        while let Some(item) = self.parse_foreign_item()? {
             foreign_items.push(item);
         }
-        try!(self.expect(&token::CloseDelim(token::Brace)));
+        self.expect(&token::CloseDelim(token::Brace))?;
 
         let last_span = self.last_span;
         let m = ast::ForeignMod {
@@ -5566,12 +5644,12 @@ impl<'a> Parser<'a> {
 
     /// Parse type Foo = Bar;
     fn parse_item_type(&mut self) -> PResult<'a, ItemInfo> {
-        let ident = try!(self.parse_ident());
-        let mut tps = try!(self.parse_generics());
-        tps.where_clause = try!(self.parse_where_clause());
-        try!(self.expect(&token::Eq));
-        let ty = try!(self.parse_ty_sum());
-        try!(self.expect(&token::Semi));
+        let ident = self.parse_ident()?;
+        let mut tps = self.parse_generics()?;
+        tps.where_clause = self.parse_where_clause()?;
+        self.expect(&token::Eq)?;
+        let ty = self.parse_ty_sum()?;
+        self.expect(&token::Semi)?;
         Ok((ident, ItemKind::Ty(ty, tps), None))
     }
 
@@ -5581,23 +5659,23 @@ impl<'a> Parser<'a> {
         let mut all_nullary = true;
         let mut any_disr = None;
         while self.token != token::CloseDelim(token::Brace) {
-            let variant_attrs = try!(self.parse_outer_attributes());
+            let variant_attrs = self.parse_outer_attributes()?;
             let vlo = self.span.lo;
 
             let struct_def;
             let mut disr_expr = None;
-            let ident = try!(self.parse_ident());
+            let ident = self.parse_ident()?;
             if self.check(&token::OpenDelim(token::Brace)) {
                 // Parse a struct variant.
                 all_nullary = false;
-                struct_def = VariantData::Struct(try!(self.parse_record_struct_body(ParsePub::No)),
+                struct_def = VariantData::Struct(self.parse_record_struct_body()?,
                                                  ast::DUMMY_NODE_ID);
             } else if self.check(&token::OpenDelim(token::Paren)) {
                 all_nullary = false;
-                struct_def = VariantData::Tuple(try!(self.parse_tuple_struct_body(ParsePub::No)),
+                struct_def = VariantData::Tuple(self.parse_tuple_struct_body()?,
                                                 ast::DUMMY_NODE_ID);
             } else if self.eat(&token::Eq) {
-                disr_expr = Some(try!(self.parse_expr()));
+                disr_expr = Some(self.parse_expr()?);
                 any_disr = disr_expr.as_ref().map(|expr| expr.span);
                 struct_def = VariantData::Unit(ast::DUMMY_NODE_ID);
             } else {
@@ -5614,7 +5692,7 @@ impl<'a> Parser<'a> {
 
             if !self.eat(&token::Comma) { break; }
         }
-        try!(self.expect(&token::CloseDelim(token::Brace)));
+        self.expect(&token::CloseDelim(token::Brace))?;
         match any_disr {
             Some(disr_span) if !all_nullary =>
                 self.span_err(disr_span,
@@ -5627,12 +5705,12 @@ impl<'a> Parser<'a> {
 
     /// Parse an "enum" declaration
     fn parse_item_enum(&mut self) -> PResult<'a, ItemInfo> {
-        let id = try!(self.parse_ident());
-        let mut generics = try!(self.parse_generics());
-        generics.where_clause = try!(self.parse_where_clause());
-        try!(self.expect(&token::OpenDelim(token::Brace)));
+        let id = self.parse_ident()?;
+        let mut generics = self.parse_generics()?;
+        generics.where_clause = self.parse_where_clause()?;
+        self.expect(&token::OpenDelim(token::Brace))?;
 
-        let enum_definition = try!(self.parse_enum_def(&generics));
+        let enum_definition = self.parse_enum_def(&generics)?;
         Ok((id, ItemKind::Enum(enum_definition, generics), None))
     }
 
@@ -5687,12 +5765,12 @@ impl<'a> Parser<'a> {
 
         let lo = self.span.lo;
 
-        let visibility = try!(self.parse_visibility());
+        let visibility = self.parse_visibility()?;
 
         if self.eat_keyword(keywords::Use) {
             // USE ITEM
-            let item_ = ItemKind::Use(try!(self.parse_view_path()));
-            try!(self.expect(&token::Semi));
+            let item_ = ItemKind::Use(self.parse_view_path()?);
+            self.expect(&token::Semi)?;
 
             let last_span = self.last_span;
             let item = self.mk_item(lo,
@@ -5706,16 +5784,16 @@ impl<'a> Parser<'a> {
 
         if self.eat_keyword(keywords::Extern) {
             if self.eat_keyword(keywords::Crate) {
-                return Ok(Some(try!(self.parse_item_extern_crate(lo, visibility, attrs))));
+                return Ok(Some(self.parse_item_extern_crate(lo, visibility, attrs)?));
             }
 
-            let opt_abi = try!(self.parse_opt_abi());
+            let opt_abi = self.parse_opt_abi()?;
 
             if self.eat_keyword(keywords::Fn) {
                 // EXTERN FUNCTION ITEM
                 let abi = opt_abi.unwrap_or(Abi::C);
                 let (ident, item_, extra_attrs) =
-                    try!(self.parse_item_fn(Unsafety::Normal, Constness::NotConst, abi));
+                    self.parse_item_fn(Unsafety::Normal, Constness::NotConst, abi)?;
                 let last_span = self.last_span;
                 let item = self.mk_item(lo,
                                         last_span.hi,
@@ -5725,10 +5803,10 @@ impl<'a> Parser<'a> {
                                         maybe_append(attrs, extra_attrs));
                 return Ok(Some(item));
             } else if self.check(&token::OpenDelim(token::Brace)) {
-                return Ok(Some(try!(self.parse_item_foreign_mod(lo, opt_abi, visibility, attrs))));
+                return Ok(Some(self.parse_item_foreign_mod(lo, opt_abi, visibility, attrs)?));
             }
 
-            try!(self.unexpected());
+            self.unexpected()?;
         }
 
         if self.eat_keyword(keywords::Static) {
@@ -5738,7 +5816,7 @@ impl<'a> Parser<'a> {
             } else {
                 Mutability::Immutable
             };
-            let (ident, item_, extra_attrs) = try!(self.parse_item_const(Some(m)));
+            let (ident, item_, extra_attrs) = self.parse_item_const(Some(m))?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5760,7 +5838,7 @@ impl<'a> Parser<'a> {
                 };
                 self.bump();
                 let (ident, item_, extra_attrs) =
-                    try!(self.parse_item_fn(unsafety, Constness::Const, Abi::Rust));
+                    self.parse_item_fn(unsafety, Constness::Const, Abi::Rust)?;
                 let last_span = self.last_span;
                 let item = self.mk_item(lo,
                                         last_span.hi,
@@ -5778,7 +5856,7 @@ impl<'a> Parser<'a> {
                                  .fileline_help(last_span, "did you mean to declare a static?")
                                  .emit();
             }
-            let (ident, item_, extra_attrs) = try!(self.parse_item_const(None));
+            let (ident, item_, extra_attrs) = self.parse_item_const(None)?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5792,10 +5870,10 @@ impl<'a> Parser<'a> {
             self.look_ahead(1, |t| t.is_keyword(keywords::Trait))
         {
             // UNSAFE TRAIT ITEM
-            try!(self.expect_keyword(keywords::Unsafe));
-            try!(self.expect_keyword(keywords::Trait));
+            self.expect_keyword(keywords::Unsafe)?;
+            self.expect_keyword(keywords::Trait)?;
             let (ident, item_, extra_attrs) =
-                try!(self.parse_item_trait(ast::Unsafety::Unsafe));
+                self.parse_item_trait(ast::Unsafety::Unsafe)?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5809,9 +5887,9 @@ impl<'a> Parser<'a> {
             self.look_ahead(1, |t| t.is_keyword(keywords::Impl))
         {
             // IMPL ITEM
-            try!(self.expect_keyword(keywords::Unsafe));
-            try!(self.expect_keyword(keywords::Impl));
-            let (ident, item_, extra_attrs) = try!(self.parse_item_impl(ast::Unsafety::Unsafe));
+            self.expect_keyword(keywords::Unsafe)?;
+            self.expect_keyword(keywords::Impl)?;
+            let (ident, item_, extra_attrs) = self.parse_item_impl(ast::Unsafety::Unsafe)?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5825,7 +5903,7 @@ impl<'a> Parser<'a> {
             // FUNCTION ITEM
             self.bump();
             let (ident, item_, extra_attrs) =
-                try!(self.parse_item_fn(Unsafety::Normal, Constness::NotConst, Abi::Rust));
+                self.parse_item_fn(Unsafety::Normal, Constness::NotConst, Abi::Rust)?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5840,13 +5918,13 @@ impl<'a> Parser<'a> {
             // UNSAFE FUNCTION ITEM
             self.bump();
             let abi = if self.eat_keyword(keywords::Extern) {
-                try!(self.parse_opt_abi()).unwrap_or(Abi::C)
+                self.parse_opt_abi()?.unwrap_or(Abi::C)
             } else {
                 Abi::Rust
             };
-            try!(self.expect_keyword(keywords::Fn));
+            self.expect_keyword(keywords::Fn)?;
             let (ident, item_, extra_attrs) =
-                try!(self.parse_item_fn(Unsafety::Unsafe, Constness::NotConst, abi));
+                self.parse_item_fn(Unsafety::Unsafe, Constness::NotConst, abi)?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5859,7 +5937,7 @@ impl<'a> Parser<'a> {
         if self.eat_keyword(keywords::Mod) {
             // MODULE ITEM
             let (ident, item_, extra_attrs) =
-                try!(self.parse_item_mod(&attrs[..]));
+                self.parse_item_mod(&attrs[..])?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5871,7 +5949,7 @@ impl<'a> Parser<'a> {
         }
         if self.eat_keyword(keywords::Type) {
             // TYPE ITEM
-            let (ident, item_, extra_attrs) = try!(self.parse_item_type());
+            let (ident, item_, extra_attrs) = self.parse_item_type()?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5883,7 +5961,7 @@ impl<'a> Parser<'a> {
         }
         if self.eat_keyword(keywords::Enum) {
             // ENUM ITEM
-            let (ident, item_, extra_attrs) = try!(self.parse_item_enum());
+            let (ident, item_, extra_attrs) = self.parse_item_enum()?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5896,7 +5974,7 @@ impl<'a> Parser<'a> {
         if self.eat_keyword(keywords::Trait) {
             // TRAIT ITEM
             let (ident, item_, extra_attrs) =
-                try!(self.parse_item_trait(ast::Unsafety::Normal));
+                self.parse_item_trait(ast::Unsafety::Normal)?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5908,7 +5986,7 @@ impl<'a> Parser<'a> {
         }
         if self.eat_keyword(keywords::Impl) {
             // IMPL ITEM
-            let (ident, item_, extra_attrs) = try!(self.parse_item_impl(ast::Unsafety::Normal));
+            let (ident, item_, extra_attrs) = self.parse_item_impl(ast::Unsafety::Normal)?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5920,7 +5998,7 @@ impl<'a> Parser<'a> {
         }
         if self.eat_keyword(keywords::Struct) {
             // STRUCT ITEM
-            let (ident, item_, extra_attrs) = try!(self.parse_item_struct());
+            let (ident, item_, extra_attrs) = self.parse_item_struct()?;
             let last_span = self.last_span;
             let item = self.mk_item(lo,
                                     last_span.hi,
@@ -5935,21 +6013,21 @@ impl<'a> Parser<'a> {
 
     /// Parse a foreign item.
     fn parse_foreign_item(&mut self) -> PResult<'a, Option<ForeignItem>> {
-        let attrs = try!(self.parse_outer_attributes());
+        let attrs = self.parse_outer_attributes()?;
         let lo = self.span.lo;
-        let visibility = try!(self.parse_visibility());
+        let visibility = self.parse_visibility()?;
 
         if self.check_keyword(keywords::Static) {
             // FOREIGN STATIC ITEM
-            return Ok(Some(try!(self.parse_item_foreign_static(visibility, lo, attrs))));
+            return Ok(Some(self.parse_item_foreign_static(visibility, lo, attrs)?));
         }
         if self.check_keyword(keywords::Fn) || self.check_keyword(keywords::Unsafe) {
             // FOREIGN FUNCTION ITEM
-            return Ok(Some(try!(self.parse_item_foreign_fn(visibility, lo, attrs))));
+            return Ok(Some(self.parse_item_foreign_fn(visibility, lo, attrs)?));
         }
 
         // FIXME #5668: this will occur for a macro invocation:
-        match try!(self.parse_macro_use_or_failure(attrs, true, false, lo, visibility)) {
+        match self.parse_macro_use_or_failure(attrs, true, false, lo, visibility)? {
             Some(item) => {
                 return Err(self.span_fatal(item.span, "macros cannot expand to foreign items"));
             }
@@ -5974,27 +6052,27 @@ impl<'a> Parser<'a> {
             // MACRO INVOCATION ITEM
 
             let last_span = self.last_span;
-            self.complain_if_pub_macro(visibility, last_span);
+            self.complain_if_pub_macro(&visibility, last_span);
 
             let mac_lo = self.span.lo;
 
             // item macro.
-            let pth = try!(self.parse_path(NoTypesAllowed));
-            try!(self.expect(&token::Not));
+            let pth = self.parse_path(NoTypesAllowed)?;
+            self.expect(&token::Not)?;
 
             // a 'special' identifier (like what `macro_rules!` uses)
             // is optional. We should eventually unify invoc syntax
             // and remove this.
             let id = if self.token.is_plain_ident() {
-                try!(self.parse_ident())
+                self.parse_ident()?
             } else {
                 token::special_idents::invalid // no special identifier
             };
             // eat a matched-delimiter token tree:
-            let delim = try!(self.expect_open_delim());
-            let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim),
-                                                 SeqSep::none(),
-                                                 |p| p.parse_token_tree()));
+            let delim = self.expect_open_delim()?;
+            let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
+                                            SeqSep::none(),
+                                            |p| p.parse_token_tree())?;
             // single-variant-enum... :
             let m = Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT };
             let m: ast::Mac = codemap::Spanned { node: m,
@@ -6025,7 +6103,7 @@ impl<'a> Parser<'a> {
         // FAILURE TO PARSE ITEM
         match visibility {
             Visibility::Inherited => {}
-            Visibility::Public => {
+            _ => {
                 let last_span = self.last_span;
                 return Err(self.span_fatal(last_span, "unmatched visibility `pub`"));
             }
@@ -6038,7 +6116,7 @@ impl<'a> Parser<'a> {
     }
 
     pub fn parse_item(&mut self) -> PResult<'a, Option<P<Item>>> {
-        let attrs = try!(self.parse_outer_attributes());
+        let attrs = self.parse_outer_attributes()?;
         self.parse_item_(attrs, true, false)
     }
 
@@ -6053,24 +6131,24 @@ impl<'a> Parser<'a> {
 
         // Allow a leading :: because the paths are absolute either way.
         // This occurs with "use $crate::..." in macros.
-        self.eat(&token::ModSep);
+        let is_global = self.eat(&token::ModSep);
 
         if self.check(&token::OpenDelim(token::Brace)) {
             // use {foo,bar}
-            let idents = try!(self.parse_unspanned_seq(
+            let idents = self.parse_unspanned_seq(
                 &token::OpenDelim(token::Brace),
                 &token::CloseDelim(token::Brace),
                 SeqSep::trailing_allowed(token::Comma),
-                |p| p.parse_path_list_item()));
+                |p| p.parse_path_list_item())?;
             let path = ast::Path {
                 span: mk_sp(lo, self.span.hi),
-                global: false,
+                global: is_global,
                 segments: Vec::new()
             };
             return Ok(P(spanned(lo, self.span.hi, ViewPathList(path, idents))));
         }
 
-        let first_ident = try!(self.parse_ident());
+        let first_ident = self.parse_ident()?;
         let mut path = vec!(first_ident);
         if let token::ModSep = self.token {
             // foo::bar or foo::{a,b,c} or foo::*
@@ -6079,21 +6157,21 @@ impl<'a> Parser<'a> {
 
                 match self.token {
                   token::Ident(..) => {
-                    let ident = try!(self.parse_ident());
+                    let ident = self.parse_ident()?;
                     path.push(ident);
                   }
 
                   // foo::bar::{a,b,c}
                   token::OpenDelim(token::Brace) => {
-                    let idents = try!(self.parse_unspanned_seq(
+                    let idents = self.parse_unspanned_seq(
                         &token::OpenDelim(token::Brace),
                         &token::CloseDelim(token::Brace),
                         SeqSep::trailing_allowed(token::Comma),
                         |p| p.parse_path_list_item()
-                    ));
+                    )?;
                     let path = ast::Path {
                         span: mk_sp(lo, self.span.hi),
-                        global: false,
+                        global: is_global,
                         segments: path.into_iter().map(|identifier| {
                             ast::PathSegment {
                                 identifier: identifier,
@@ -6109,7 +6187,7 @@ impl<'a> Parser<'a> {
                     self.bump();
                     let path = ast::Path {
                         span: mk_sp(lo, self.span.hi),
-                        global: false,
+                        global: is_global,
                         segments: path.into_iter().map(|identifier| {
                             ast::PathSegment {
                                 identifier: identifier,
@@ -6132,7 +6210,7 @@ impl<'a> Parser<'a> {
         let mut rename_to = path[path.len() - 1];
         let path = ast::Path {
             span: mk_sp(lo, self.last_span.hi),
-            global: false,
+            global: is_global,
             segments: path.into_iter().map(|identifier| {
                 ast::PathSegment {
                     identifier: identifier,
@@ -6140,7 +6218,7 @@ impl<'a> Parser<'a> {
                 }
             }).collect()
         };
-        rename_to = try!(self.parse_rename()).unwrap_or(rename_to);
+        rename_to = self.parse_rename()?.unwrap_or(rename_to);
         Ok(P(spanned(lo, self.last_span.hi, ViewPathSimple(rename_to, path))))
     }
 
@@ -6157,8 +6235,8 @@ impl<'a> Parser<'a> {
     pub fn parse_crate_mod(&mut self) -> PResult<'a, Crate> {
         let lo = self.span.lo;
         Ok(ast::Crate {
-            attrs: try!(self.parse_inner_attributes()),
-            module: try!(self.parse_mod_items(&token::Eof, lo)),
+            attrs: self.parse_inner_attributes()?,
+            module: self.parse_mod_items(&token::Eof, lo)?,
             config: self.cfg.clone(),
             span: mk_sp(lo, self.span.lo),
             exported_macros: Vec::new(),
index accbb54c629b243220c4bdb33caa5f39349af5b4..16417ac004461f685f0d5a04756565f73aba8e9f 100644 (file)
@@ -196,7 +196,7 @@ impl Token {
             BinOp(Or)                   => true, // in lambda syntax
             OrOr                        => true, // in lambda syntax
             AndAnd                      => true, // double borrow
-            DotDot                      => true, // range notation
+            DotDot | DotDotDot          => true, // range notation
             ModSep                      => true,
             Interpolated(NtExpr(..))    => true,
             Interpolated(NtIdent(..))   => true,
@@ -514,7 +514,7 @@ macro_rules! declare_special_idents_and_keywords {(
 // If the special idents get renumbered, remember to modify these two as appropriate
 pub const SELF_KEYWORD_NAME: ast::Name = ast::Name(SELF_KEYWORD_NAME_NUM);
 const STATIC_KEYWORD_NAME: ast::Name = ast::Name(STATIC_KEYWORD_NAME_NUM);
-const SUPER_KEYWORD_NAME: ast::Name = ast::Name(SUPER_KEYWORD_NAME_NUM);
+pub const SUPER_KEYWORD_NAME: ast::Name = ast::Name(SUPER_KEYWORD_NAME_NUM);
 const SELF_TYPE_KEYWORD_NAME: ast::Name = ast::Name(SELF_TYPE_KEYWORD_NAME_NUM);
 
 pub const SELF_KEYWORD_NAME_NUM: u32 = 1;
@@ -542,69 +542,70 @@ declare_special_idents_and_keywords! {
         // outside of libsyntax
         (7,                          clownshoe_abi,          "__rust_abi");
         (8,                          opaque,                 "<opaque>");
-        (9,                          unnamed_field,          "<unnamed_field>");
+        (9,                          __unused1,              "<__unused1>");
         (super::SELF_TYPE_KEYWORD_NAME_NUM, type_self,       "Self");
         (11,                         prelude_import,         "prelude_import");
+        (12,                         DEFAULT,                "default");
     }
 
     pub mod keywords {
         // These ones are variants of the Keyword enum
 
         'strict:
-        (12,                         As,         "as");
-        (13,                         Break,      "break");
-        (14,                         Crate,      "crate");
-        (15,                         Else,       "else");
-        (16,                         Enum,       "enum");
-        (17,                         Extern,     "extern");
-        (18,                         False,      "false");
-        (19,                         Fn,         "fn");
-        (20,                         For,        "for");
-        (21,                         If,         "if");
-        (22,                         Impl,       "impl");
-        (23,                         In,         "in");
-        (24,                         Let,        "let");
-        (25,                         Loop,       "loop");
-        (26,                         Match,      "match");
-        (27,                         Mod,        "mod");
-        (28,                         Move,       "move");
-        (29,                         Mut,        "mut");
-        (30,                         Pub,        "pub");
-        (31,                         Ref,        "ref");
-        (32,                         Return,     "return");
+        (13,                         As,         "as");
+        (14,                         Break,      "break");
+        (15,                         Crate,      "crate");
+        (16,                         Else,       "else");
+        (17,                         Enum,       "enum");
+        (18,                         Extern,     "extern");
+        (19,                         False,      "false");
+        (20,                         Fn,         "fn");
+        (21,                         For,        "for");
+        (22,                         If,         "if");
+        (23,                         Impl,       "impl");
+        (24,                         In,         "in");
+        (25,                         Let,        "let");
+        (26,                         Loop,       "loop");
+        (27,                         Match,      "match");
+        (28,                         Mod,        "mod");
+        (29,                         Move,       "move");
+        (30,                         Mut,        "mut");
+        (31,                         Pub,        "pub");
+        (32,                         Ref,        "ref");
+        (33,                         Return,     "return");
         // Static and Self are also special idents (prefill de-dupes)
         (super::STATIC_KEYWORD_NAME_NUM, Static, "static");
         (super::SELF_KEYWORD_NAME_NUM, SelfValue, "self");
         (super::SELF_TYPE_KEYWORD_NAME_NUM, SelfType, "Self");
-        (33,                         Struct,     "struct");
+        (34,                         Struct,     "struct");
         (super::SUPER_KEYWORD_NAME_NUM, Super,   "super");
-        (34,                         True,       "true");
-        (35,                         Trait,      "trait");
-        (36,                         Type,       "type");
-        (37,                         Unsafe,     "unsafe");
-        (38,                         Use,        "use");
-        (39,                         While,      "while");
-        (40,                         Continue,   "continue");
-        (41,                         Box,        "box");
-        (42,                         Const,      "const");
-        (43,                         Where,      "where");
+        (35,                         True,       "true");
+        (36,                         Trait,      "trait");
+        (37,                         Type,       "type");
+        (38,                         Unsafe,     "unsafe");
+        (39,                         Use,        "use");
+        (40,                         While,      "while");
+        (41,                         Continue,   "continue");
+        (42,                         Box,        "box");
+        (43,                         Const,      "const");
+        (44,                         Where,      "where");
         'reserved:
-        (44,                         Virtual,    "virtual");
-        (45,                         Proc,       "proc");
-        (46,                         Alignof,    "alignof");
-        (47,                         Become,     "become");
-        (48,                         Offsetof,   "offsetof");
-        (49,                         Priv,       "priv");
-        (50,                         Pure,       "pure");
-        (51,                         Sizeof,     "sizeof");
-        (52,                         Typeof,     "typeof");
-        (53,                         Unsized,    "unsized");
-        (54,                         Yield,      "yield");
-        (55,                         Do,         "do");
-        (56,                         Abstract,   "abstract");
-        (57,                         Final,      "final");
-        (58,                         Override,   "override");
-        (59,                         Macro,      "macro");
+        (45,                         Virtual,    "virtual");
+        (46,                         Proc,       "proc");
+        (47,                         Alignof,    "alignof");
+        (48,                         Become,     "become");
+        (49,                         Offsetof,   "offsetof");
+        (50,                         Priv,       "priv");
+        (51,                         Pure,       "pure");
+        (52,                         Sizeof,     "sizeof");
+        (53,                         Typeof,     "typeof");
+        (54,                         Unsized,    "unsized");
+        (55,                         Yield,      "yield");
+        (56,                         Do,         "do");
+        (57,                         Abstract,   "abstract");
+        (58,                         Final,      "final");
+        (59,                         Override,   "override");
+        (60,                         Macro,      "macro");
     }
 }
 
@@ -705,7 +706,7 @@ impl<'a> PartialEq<InternedString> for &'a str {
 
 impl Decodable for InternedString {
     fn decode<D: Decoder>(d: &mut D) -> Result<InternedString, D::Error> {
-        Ok(intern(try!(d.read_str()).as_ref()).as_str())
+        Ok(intern(d.read_str()?.as_ref()).as_str())
     }
 }
 
index cbbd5289a5a2d5415a846ebfa050725f96aeac85..c381a3a8437de3965f8cb18a265700a0cdd1f705 100644 (file)
@@ -168,8 +168,8 @@ pub fn mk_printer<'a>(out: Box<io::Write+'a>, linewidth: usize) -> Printer<'a> {
     let n: usize = 3 * linewidth;
     debug!("mk_printer {}", linewidth);
     let token = vec![Token::Eof; n];
-    let size = vec![0_isize; n];
-    let scan_stack = vec![0_usize; n];
+    let size = vec![0; n];
+    let scan_stack = vec![0; n];
     Printer {
         out: out,
         buf_len: n,
@@ -318,7 +318,7 @@ impl<'a> Printer<'a> {
           Token::Eof => {
             if !self.scan_stack_empty {
                 self.check_stack(0);
-                try!(self.advance_left());
+                self.advance_left()?;
             }
             self.indent(0);
             Ok(())
@@ -399,9 +399,9 @@ impl<'a> Printer<'a> {
                     self.size[scanned] = SIZE_INFINITY;
                 }
             }
-            try!(self.advance_left());
+            self.advance_left()?;
             if self.left != self.right {
-                try!(self.check_stream());
+                self.check_stream()?;
             }
         }
         Ok(())
@@ -464,7 +464,7 @@ impl<'a> Printer<'a> {
                 _ => 0
             };
 
-            try!(self.print(left, left_size));
+            self.print(left, left_size)?;
 
             self.left_total += len;
 
@@ -532,7 +532,7 @@ impl<'a> Printer<'a> {
     }
     pub fn print_str(&mut self, s: &str) -> io::Result<()> {
         while self.pending_indentation > 0 {
-            try!(write!(self.out, " "));
+            write!(self.out, " ")?;
             self.pending_indentation -= 1;
         }
         write!(self.out, "{}", s)
index b4e08d65a0a8d7a228aeb0b39cedb1399325ed53..fcd83b4104130294b1f0a471e487287572031e25 100644 (file)
@@ -123,16 +123,16 @@ pub fn print_crate<'a>(cm: &'a CodeMap,
         let list = attr::mk_list_item(InternedString::new("feature"),
                                       vec![prelude_import_meta]);
         let fake_attr = attr::mk_attr_inner(attr::mk_attr_id(), list);
-        try!(s.print_attribute(&fake_attr));
+        s.print_attribute(&fake_attr)?;
 
         // #![no_std]
         let no_std_meta = attr::mk_word_item(InternedString::new("no_std"));
         let fake_attr = attr::mk_attr_inner(attr::mk_attr_id(), no_std_meta);
-        try!(s.print_attribute(&fake_attr));
+        s.print_attribute(&fake_attr)?;
     }
 
-    try!(s.print_mod(&krate.module, &krate.attrs));
-    try!(s.print_remaining_comments());
+    s.print_mod(&krate.module, &krate.attrs)?;
+    s.print_remaining_comments()?;
     eof(&mut s.s)
 }
 
@@ -386,10 +386,10 @@ pub fn fun_to_string(decl: &ast::FnDecl,
                      generics: &ast::Generics)
                      -> String {
     to_string(|s| {
-        try!(s.head(""));
-        try!(s.print_fn(decl, unsafety, constness, Abi::Rust, Some(name),
-                        generics, opt_explicit_self, ast::Visibility::Inherited));
-        try!(s.end()); // Close the head box
+        s.head("")?;
+        s.print_fn(decl, unsafety, constness, Abi::Rust, Some(name),
+                   generics, opt_explicit_self, &ast::Visibility::Inherited)?;
+        s.end()?; // Close the head box
         s.end() // Close the outer box
     })
 }
@@ -397,9 +397,9 @@ pub fn fun_to_string(decl: &ast::FnDecl,
 pub fn block_to_string(blk: &ast::Block) -> String {
     to_string(|s| {
         // containing cbox, will be closed by print-block at }
-        try!(s.cbox(INDENT_UNIT));
+        s.cbox(INDENT_UNIT)?;
         // head-ibox, will be closed by print-block after {
-        try!(s.ibox(0));
+        s.ibox(0)?;
         s.print_block(blk)
     })
 }
@@ -432,9 +432,11 @@ pub fn mac_to_string(arg: &ast::Mac) -> String {
     to_string(|s| s.print_mac(arg, ::parse::token::Paren))
 }
 
-pub fn visibility_qualified(vis: ast::Visibility, s: &str) -> String {
-    match vis {
+pub fn visibility_qualified(vis: &ast::Visibility, s: &str) -> String {
+    match *vis {
         ast::Visibility::Public => format!("pub {}", s),
+        ast::Visibility::Crate => format!("pub(crate) {}", s),
+        ast::Visibility::Restricted { ref path, .. } => format!("pub({}) {}", path, s),
         ast::Visibility::Inherited => s.to_string()
     }
 }
@@ -457,7 +459,7 @@ pub trait PrintState<'a> {
     fn literals(&self) -> &Option<Vec<comments::Literal>>;
 
     fn word_space(&mut self, w: &str) -> io::Result<()> {
-        try!(word(self.writer(), w));
+        word(self.writer(), w)?;
         space(self.writer())
     }
 
@@ -486,7 +488,7 @@ pub trait PrintState<'a> {
 
     fn hardbreak_if_not_bol(&mut self) -> io::Result<()> {
         if !self.is_bol() {
-            try!(hardbreak(self.writer()))
+            hardbreak(self.writer())?
         }
         Ok(())
     }
@@ -510,11 +512,11 @@ pub trait PrintState<'a> {
     fn commasep<T, F>(&mut self, b: Breaks, elts: &[T], mut op: F) -> io::Result<()>
         where F: FnMut(&mut Self, &T) -> io::Result<()>,
     {
-        try!(self.rbox(0, b));
+        self.rbox(0, b)?;
         let mut first = true;
         for elt in elts {
-            if first { first = false; } else { try!(self.word_space(",")); }
-            try!(op(self, elt));
+            if first { first = false; } else { self.word_space(",")?; }
+            op(self, elt)?;
         }
         self.end()
     }
@@ -546,7 +548,7 @@ pub trait PrintState<'a> {
             match self.next_comment() {
                 Some(ref cmnt) => {
                     if (*cmnt).pos < pos {
-                        try!(self.print_comment(cmnt));
+                        self.print_comment(cmnt)?;
                         self.cur_cmnt_and_lit().cur_cmnt += 1;
                     } else { break; }
                 }
@@ -561,34 +563,34 @@ pub trait PrintState<'a> {
         match cmnt.style {
             comments::Mixed => {
                 assert_eq!(cmnt.lines.len(), 1);
-                try!(zerobreak(self.writer()));
-                try!(word(self.writer(), &cmnt.lines[0]));
+                zerobreak(self.writer())?;
+                word(self.writer(), &cmnt.lines[0])?;
                 zerobreak(self.writer())
             }
             comments::Isolated => {
-                try!(self.hardbreak_if_not_bol());
+                self.hardbreak_if_not_bol()?;
                 for line in &cmnt.lines {
                     // Don't print empty lines because they will end up as trailing
                     // whitespace
                     if !line.is_empty() {
-                        try!(word(self.writer(), &line[..]));
+                        word(self.writer(), &line[..])?;
                     }
-                    try!(hardbreak(self.writer()));
+                    hardbreak(self.writer())?;
                 }
                 Ok(())
             }
             comments::Trailing => {
-                try!(word(self.writer(), " "));
+                word(self.writer(), " ")?;
                 if cmnt.lines.len() == 1 {
-                    try!(word(self.writer(), &cmnt.lines[0]));
+                    word(self.writer(), &cmnt.lines[0])?;
                     hardbreak(self.writer())
                 } else {
-                    try!(self.ibox(0));
+                    self.ibox(0)?;
                     for line in &cmnt.lines {
                         if !line.is_empty() {
-                            try!(word(self.writer(), &line[..]));
+                            word(self.writer(), &line[..])?;
                         }
-                        try!(hardbreak(self.writer()));
+                        hardbreak(self.writer())?;
                     }
                     self.end()
                 }
@@ -600,7 +602,7 @@ pub trait PrintState<'a> {
                     _ => false
                 };
                 if is_semi || self.is_begin() || self.is_end() {
-                    try!(hardbreak(self.writer()));
+                    hardbreak(self.writer())?;
                 }
                 hardbreak(self.writer())
             }
@@ -622,7 +624,7 @@ pub trait PrintState<'a> {
     }
 
     fn print_literal(&mut self, lit: &ast::Lit) -> io::Result<()> {
-        try!(self.maybe_print_comment(lit.span.lo));
+        self.maybe_print_comment(lit.span.lo)?;
         match self.next_lit(lit.span.lo) {
             Some(ref ltrl) => {
                 return word(self.writer(), &(*ltrl).lit);
@@ -728,15 +730,15 @@ pub trait PrintState<'a> {
         let mut count = 0;
         for attr in attrs {
             if attr.node.style == kind {
-                    try!(self.print_attribute_inline(attr, is_inline));
+                    self.print_attribute_inline(attr, is_inline)?;
                     if is_inline {
-                        try!(self.nbsp());
+                        self.nbsp()?;
                     }
                     count += 1;
             }
         }
         if count > 0 && trailing_hardbreak && !is_inline {
-            try!(self.hardbreak_if_not_bol());
+            self.hardbreak_if_not_bol()?;
         }
         Ok(())
     }
@@ -748,46 +750,47 @@ pub trait PrintState<'a> {
     fn print_attribute_inline(&mut self, attr: &ast::Attribute,
                               is_inline: bool) -> io::Result<()> {
         if !is_inline {
-            try!(self.hardbreak_if_not_bol());
+            self.hardbreak_if_not_bol()?;
         }
-        try!(self.maybe_print_comment(attr.span.lo));
+        self.maybe_print_comment(attr.span.lo)?;
         if attr.node.is_sugared_doc {
-            word(self.writer(), &attr.value_str().unwrap())
+            word(self.writer(), &attr.value_str().unwrap())?;
+            hardbreak(self.writer())
         } else {
             match attr.node.style {
-                ast::AttrStyle::Inner => try!(word(self.writer(), "#![")),
-                ast::AttrStyle::Outer => try!(word(self.writer(), "#[")),
+                ast::AttrStyle::Inner => word(self.writer(), "#![")?,
+                ast::AttrStyle::Outer => word(self.writer(), "#[")?,
             }
-            try!(self.print_meta_item(&attr.meta()));
+            self.print_meta_item(&attr.meta())?;
             word(self.writer(), "]")
         }
     }
 
     fn print_meta_item(&mut self, item: &ast::MetaItem) -> io::Result<()> {
-        try!(self.ibox(INDENT_UNIT));
+        self.ibox(INDENT_UNIT)?;
         match item.node {
             ast::MetaItemKind::Word(ref name) => {
-                try!(word(self.writer(), &name));
+                word(self.writer(), &name)?;
             }
             ast::MetaItemKind::NameValue(ref name, ref value) => {
-                try!(self.word_space(&name[..]));
-                try!(self.word_space("="));
-                try!(self.print_literal(value));
+                self.word_space(&name[..])?;
+                self.word_space("=")?;
+                self.print_literal(value)?;
             }
             ast::MetaItemKind::List(ref name, ref items) => {
-                try!(word(self.writer(), &name));
-                try!(self.popen());
-                try!(self.commasep(Consistent,
-                                   &items[..],
-                                   |s, i| s.print_meta_item(&i)));
-                try!(self.pclose());
+                word(self.writer(), &name)?;
+                self.popen()?;
+                self.commasep(Consistent,
+                              &items[..],
+                              |s, i| s.print_meta_item(&i))?;
+                self.pclose()?;
             }
         }
         self.end()
     }
 
     fn space_if_not_bol(&mut self) -> io::Result<()> {
-        if !self.is_bol() { try!(space(self.writer())); }
+        if !self.is_bol() { space(self.writer())?; }
         Ok(())
     }
 
@@ -823,24 +826,24 @@ impl<'a> State<'a> {
     }
 
     pub fn word_nbsp(&mut self, w: &str) -> io::Result<()> {
-        try!(word(&mut self.s, w));
+        word(&mut self.s, w)?;
         self.nbsp()
     }
 
     pub fn head(&mut self, w: &str) -> io::Result<()> {
         // outer-box is consistent
-        try!(self.cbox(INDENT_UNIT));
+        self.cbox(INDENT_UNIT)?;
         // head-box is inconsistent
-        try!(self.ibox(w.len() + 1));
+        self.ibox(w.len() + 1)?;
         // keyword that starts the head
         if !w.is_empty() {
-            try!(self.word_nbsp(w));
+            self.word_nbsp(w)?;
         }
         Ok(())
     }
 
     pub fn bopen(&mut self) -> io::Result<()> {
-        try!(word(&mut self.s, "{"));
+        word(&mut self.s, "{")?;
         self.end() // close the head-box
     }
 
@@ -850,11 +853,11 @@ impl<'a> State<'a> {
     }
     pub fn bclose_maybe_open(&mut self, span: codemap::Span,
                              indented: usize, close_box: bool) -> io::Result<()> {
-        try!(self.maybe_print_comment(span.hi));
-        try!(self.break_offset_if_not_bol(1, -(indented as isize)));
-        try!(word(&mut self.s, "}"));
+        self.maybe_print_comment(span.hi)?;
+        self.break_offset_if_not_bol(1, -(indented as isize))?;
+        word(&mut self.s, "}")?;
         if close_box {
-            try!(self.end()); // close the outer-box
+            self.end()?; // close the outer-box
         }
         Ok(())
     }
@@ -887,10 +890,10 @@ impl<'a> State<'a> {
     // Synthesizes a comment that was not textually present in the original source
     // file.
     pub fn synth_comment(&mut self, text: String) -> io::Result<()> {
-        try!(word(&mut self.s, "/*"));
-        try!(space(&mut self.s));
-        try!(word(&mut self.s, &text[..]));
-        try!(space(&mut self.s));
+        word(&mut self.s, "/*")?;
+        space(&mut self.s)?;
+        word(&mut self.s, &text[..])?;
+        space(&mut self.s)?;
         word(&mut self.s, "*/")
     }
 
@@ -904,18 +907,18 @@ impl<'a> State<'a> {
         F: FnMut(&mut State, &T) -> io::Result<()>,
         G: FnMut(&T) -> codemap::Span,
     {
-        try!(self.rbox(0, b));
+        self.rbox(0, b)?;
         let len = elts.len();
         let mut i = 0;
         for elt in elts {
-            try!(self.maybe_print_comment(get_span(elt).hi));
-            try!(op(self, elt));
+            self.maybe_print_comment(get_span(elt).hi)?;
+            op(self, elt)?;
             i += 1;
             if i < len {
-                try!(word(&mut self.s, ","));
-                try!(self.maybe_print_trailing_comment(get_span(elt),
-                                                    Some(get_span(&elts[i]).hi)));
-                try!(self.space_if_not_bol());
+                word(&mut self.s, ",")?;
+                self.maybe_print_trailing_comment(get_span(elt),
+                                                  Some(get_span(&elts[i]).hi))?;
+                self.space_if_not_bol()?;
             }
         }
         self.end()
@@ -928,18 +931,18 @@ impl<'a> State<'a> {
 
     pub fn print_mod(&mut self, _mod: &ast::Mod,
                      attrs: &[ast::Attribute]) -> io::Result<()> {
-        try!(self.print_inner_attributes(attrs));
+        self.print_inner_attributes(attrs)?;
         for item in &_mod.items {
-            try!(self.print_item(&item));
+            self.print_item(&item)?;
         }
         Ok(())
     }
 
     pub fn print_foreign_mod(&mut self, nmod: &ast::ForeignMod,
                              attrs: &[ast::Attribute]) -> io::Result<()> {
-        try!(self.print_inner_attributes(attrs));
+        self.print_inner_attributes(attrs)?;
         for item in &nmod.items {
-            try!(self.print_foreign_item(item));
+            self.print_foreign_item(item)?;
         }
         Ok(())
     }
@@ -947,47 +950,47 @@ impl<'a> State<'a> {
     pub fn print_opt_lifetime(&mut self,
                               lifetime: &Option<ast::Lifetime>) -> io::Result<()> {
         if let Some(l) = *lifetime {
-            try!(self.print_lifetime(&l));
-            try!(self.nbsp());
+            self.print_lifetime(&l)?;
+            self.nbsp()?;
         }
         Ok(())
     }
 
     pub fn print_type(&mut self, ty: &ast::Ty) -> io::Result<()> {
-        try!(self.maybe_print_comment(ty.span.lo));
-        try!(self.ibox(0));
+        self.maybe_print_comment(ty.span.lo)?;
+        self.ibox(0)?;
         match ty.node {
             ast::TyKind::Vec(ref ty) => {
-                try!(word(&mut self.s, "["));
-                try!(self.print_type(&ty));
-                try!(word(&mut self.s, "]"));
+                word(&mut self.s, "[")?;
+                self.print_type(&ty)?;
+                word(&mut self.s, "]")?;
             }
             ast::TyKind::Ptr(ref mt) => {
-                try!(word(&mut self.s, "*"));
+                word(&mut self.s, "*")?;
                 match mt.mutbl {
-                    ast::Mutability::Mutable => try!(self.word_nbsp("mut")),
-                    ast::Mutability::Immutable => try!(self.word_nbsp("const")),
+                    ast::Mutability::Mutable => self.word_nbsp("mut")?,
+                    ast::Mutability::Immutable => self.word_nbsp("const")?,
                 }
-                try!(self.print_type(&mt.ty));
+                self.print_type(&mt.ty)?;
             }
             ast::TyKind::Rptr(ref lifetime, ref mt) => {
-                try!(word(&mut self.s, "&"));
-                try!(self.print_opt_lifetime(lifetime));
-                try!(self.print_mt(mt));
+                word(&mut self.s, "&")?;
+                self.print_opt_lifetime(lifetime)?;
+                self.print_mt(mt)?;
             }
             ast::TyKind::Tup(ref elts) => {
-                try!(self.popen());
-                try!(self.commasep(Inconsistent, &elts[..],
-                                   |s, ty| s.print_type(&ty)));
+                self.popen()?;
+                self.commasep(Inconsistent, &elts[..],
+                              |s, ty| s.print_type(&ty))?;
                 if elts.len() == 1 {
-                    try!(word(&mut self.s, ","));
+                    word(&mut self.s, ",")?;
                 }
-                try!(self.pclose());
+                self.pclose()?;
             }
             ast::TyKind::Paren(ref typ) => {
-                try!(self.popen());
-                try!(self.print_type(&typ));
-                try!(self.pclose());
+                self.popen()?;
+                self.print_type(&typ)?;
+                self.pclose()?;
             }
             ast::TyKind::BareFn(ref f) => {
                 let generics = ast::Generics {
@@ -998,43 +1001,43 @@ impl<'a> State<'a> {
                         predicates: Vec::new(),
                     },
                 };
-                try!(self.print_ty_fn(f.abi,
-                                      f.unsafety,
-                                      &f.decl,
-                                      None,
-                                      &generics,
-                                      None));
+                self.print_ty_fn(f.abi,
+                                 f.unsafety,
+                                 &f.decl,
+                                 None,
+                                 &generics,
+                                 None)?;
             }
             ast::TyKind::Path(None, ref path) => {
-                try!(self.print_path(path, false, 0));
+                self.print_path(path, false, 0)?;
             }
             ast::TyKind::Path(Some(ref qself), ref path) => {
-                try!(self.print_qpath(path, qself, false))
+                self.print_qpath(path, qself, false)?
             }
             ast::TyKind::ObjectSum(ref ty, ref bounds) => {
-                try!(self.print_type(&ty));
-                try!(self.print_bounds("+", &bounds[..]));
+                self.print_type(&ty)?;
+                self.print_bounds("+", &bounds[..])?;
             }
             ast::TyKind::PolyTraitRef(ref bounds) => {
-                try!(self.print_bounds("", &bounds[..]));
+                self.print_bounds("", &bounds[..])?;
             }
             ast::TyKind::FixedLengthVec(ref ty, ref v) => {
-                try!(word(&mut self.s, "["));
-                try!(self.print_type(&ty));
-                try!(word(&mut self.s, "; "));
-                try!(self.print_expr(&v));
-                try!(word(&mut self.s, "]"));
+                word(&mut self.s, "[")?;
+                self.print_type(&ty)?;
+                word(&mut self.s, "; ")?;
+                self.print_expr(&v)?;
+                word(&mut self.s, "]")?;
             }
             ast::TyKind::Typeof(ref e) => {
-                try!(word(&mut self.s, "typeof("));
-                try!(self.print_expr(&e));
-                try!(word(&mut self.s, ")"));
+                word(&mut self.s, "typeof(")?;
+                self.print_expr(&e)?;
+                word(&mut self.s, ")")?;
             }
             ast::TyKind::Infer => {
-                try!(word(&mut self.s, "_"));
+                word(&mut self.s, "_")?;
             }
             ast::TyKind::Mac(ref m) => {
-                try!(self.print_mac(m, token::Paren));
+                self.print_mac(m, token::Paren)?;
             }
         }
         self.end()
@@ -1042,31 +1045,30 @@ impl<'a> State<'a> {
 
     pub fn print_foreign_item(&mut self,
                               item: &ast::ForeignItem) -> io::Result<()> {
-        try!(self.hardbreak_if_not_bol());
-        try!(self.maybe_print_comment(item.span.lo));
-        try!(self.print_outer_attributes(&item.attrs));
+        self.hardbreak_if_not_bol()?;
+        self.maybe_print_comment(item.span.lo)?;
+        self.print_outer_attributes(&item.attrs)?;
         match item.node {
             ast::ForeignItemKind::Fn(ref decl, ref generics) => {
-                try!(self.head(""));
-                try!(self.print_fn(decl, ast::Unsafety::Normal,
-                                   ast::Constness::NotConst,
-                                   Abi::Rust, Some(item.ident),
-                                   generics, None, item.vis));
-                try!(self.end()); // end head-ibox
-                try!(word(&mut self.s, ";"));
+                self.head("")?;
+                self.print_fn(decl, ast::Unsafety::Normal,
+                              ast::Constness::NotConst,
+                              Abi::Rust, Some(item.ident),
+                              generics, None, &item.vis)?;
+                self.end()?; // end head-ibox
+                word(&mut self.s, ";")?;
                 self.end() // end the outer fn box
             }
             ast::ForeignItemKind::Static(ref t, m) => {
-                try!(self.head(&visibility_qualified(item.vis,
-                                                    "static")));
+                self.head(&visibility_qualified(&item.vis, "static"))?;
                 if m {
-                    try!(self.word_space("mut"));
+                    self.word_space("mut")?;
                 }
-                try!(self.print_ident(item.ident));
-                try!(self.word_space(":"));
-                try!(self.print_type(&t));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end the head-ibox
+                self.print_ident(item.ident)?;
+                self.word_space(":")?;
+                self.print_type(&t)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end the head-ibox
                 self.end() // end the outer cbox
             }
         }
@@ -1076,18 +1078,18 @@ impl<'a> State<'a> {
                               ident: ast::Ident,
                               ty: &ast::Ty,
                               default: Option<&ast::Expr>,
-                              vis: ast::Visibility)
+                              vis: &ast::Visibility)
                               -> io::Result<()>
     {
-        try!(word(&mut self.s, &visibility_qualified(vis, "")));
-        try!(self.word_space("const"));
-        try!(self.print_ident(ident));
-        try!(self.word_space(":"));
-        try!(self.print_type(ty));
+        word(&mut self.s, &visibility_qualified(vis, ""))?;
+        self.word_space("const")?;
+        self.print_ident(ident)?;
+        self.word_space(":")?;
+        self.print_type(ty)?;
         if let Some(expr) = default {
-            try!(space(&mut self.s));
-            try!(self.word_space("="));
-            try!(self.print_expr(expr));
+            space(&mut self.s)?;
+            self.word_space("=")?;
+            self.print_expr(expr)?;
         }
         word(&mut self.s, ";")
     }
@@ -1097,87 +1099,83 @@ impl<'a> State<'a> {
                              bounds: Option<&ast::TyParamBounds>,
                              ty: Option<&ast::Ty>)
                              -> io::Result<()> {
-        try!(self.word_space("type"));
-        try!(self.print_ident(ident));
+        self.word_space("type")?;
+        self.print_ident(ident)?;
         if let Some(bounds) = bounds {
-            try!(self.print_bounds(":", bounds));
+            self.print_bounds(":", bounds)?;
         }
         if let Some(ty) = ty {
-            try!(space(&mut self.s));
-            try!(self.word_space("="));
-            try!(self.print_type(ty));
+            space(&mut self.s)?;
+            self.word_space("=")?;
+            self.print_type(ty)?;
         }
         word(&mut self.s, ";")
     }
 
     /// Pretty-print an item
     pub fn print_item(&mut self, item: &ast::Item) -> io::Result<()> {
-        try!(self.hardbreak_if_not_bol());
-        try!(self.maybe_print_comment(item.span.lo));
-        try!(self.print_outer_attributes(&item.attrs));
-        try!(self.ann.pre(self, NodeItem(item)));
+        self.hardbreak_if_not_bol()?;
+        self.maybe_print_comment(item.span.lo)?;
+        self.print_outer_attributes(&item.attrs)?;
+        self.ann.pre(self, NodeItem(item))?;
         match item.node {
             ast::ItemKind::ExternCrate(ref optional_path) => {
-                try!(self.head(&visibility_qualified(item.vis,
-                                                     "extern crate")));
+                self.head(&visibility_qualified(&item.vis, "extern crate"))?;
                 if let Some(p) = *optional_path {
                     let val = p.as_str();
                     if val.contains("-") {
-                        try!(self.print_string(&val, ast::StrStyle::Cooked));
+                        self.print_string(&val, ast::StrStyle::Cooked)?;
                     } else {
-                        try!(self.print_name(p));
+                        self.print_name(p)?;
                     }
-                    try!(space(&mut self.s));
-                    try!(word(&mut self.s, "as"));
-                    try!(space(&mut self.s));
+                    space(&mut self.s)?;
+                    word(&mut self.s, "as")?;
+                    space(&mut self.s)?;
                 }
-                try!(self.print_ident(item.ident));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end inner head-block
-                try!(self.end()); // end outer head-block
+                self.print_ident(item.ident)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end inner head-block
+                self.end()?; // end outer head-block
             }
             ast::ItemKind::Use(ref vp) => {
-                try!(self.head(&visibility_qualified(item.vis,
-                                                     "use")));
-                try!(self.print_view_path(&vp));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end inner head-block
-                try!(self.end()); // end outer head-block
+                self.head(&visibility_qualified(&item.vis, "use"))?;
+                self.print_view_path(&vp)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end inner head-block
+                self.end()?; // end outer head-block
             }
             ast::ItemKind::Static(ref ty, m, ref expr) => {
-                try!(self.head(&visibility_qualified(item.vis,
-                                                    "static")));
+                self.head(&visibility_qualified(&item.vis, "static"))?;
                 if m == ast::Mutability::Mutable {
-                    try!(self.word_space("mut"));
+                    self.word_space("mut")?;
                 }
-                try!(self.print_ident(item.ident));
-                try!(self.word_space(":"));
-                try!(self.print_type(&ty));
-                try!(space(&mut self.s));
-                try!(self.end()); // end the head-ibox
+                self.print_ident(item.ident)?;
+                self.word_space(":")?;
+                self.print_type(&ty)?;
+                space(&mut self.s)?;
+                self.end()?; // end the head-ibox
 
-                try!(self.word_space("="));
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end the outer cbox
+                self.word_space("=")?;
+                self.print_expr(&expr)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end the outer cbox
             }
             ast::ItemKind::Const(ref ty, ref expr) => {
-                try!(self.head(&visibility_qualified(item.vis,
-                                                    "const")));
-                try!(self.print_ident(item.ident));
-                try!(self.word_space(":"));
-                try!(self.print_type(&ty));
-                try!(space(&mut self.s));
-                try!(self.end()); // end the head-ibox
-
-                try!(self.word_space("="));
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end the outer cbox
+                self.head(&visibility_qualified(&item.vis, "const"))?;
+                self.print_ident(item.ident)?;
+                self.word_space(":")?;
+                self.print_type(&ty)?;
+                space(&mut self.s)?;
+                self.end()?; // end the head-ibox
+
+                self.word_space("=")?;
+                self.print_expr(&expr)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end the outer cbox
             }
             ast::ItemKind::Fn(ref decl, unsafety, constness, abi, ref typarams, ref body) => {
-                try!(self.head(""));
-                try!(self.print_fn(
+                self.head("")?;
+                self.print_fn(
                     decl,
                     unsafety,
                     constness,
@@ -1185,67 +1183,66 @@ impl<'a> State<'a> {
                     Some(item.ident),
                     typarams,
                     None,
-                    item.vis
-                ));
-                try!(word(&mut self.s, " "));
-                try!(self.print_block_with_attrs(&body, &item.attrs));
+                    &item.vis
+                )?;
+                word(&mut self.s, " ")?;
+                self.print_block_with_attrs(&body, &item.attrs)?;
             }
             ast::ItemKind::Mod(ref _mod) => {
-                try!(self.head(&visibility_qualified(item.vis,
-                                                    "mod")));
-                try!(self.print_ident(item.ident));
-                try!(self.nbsp());
-                try!(self.bopen());
-                try!(self.print_mod(_mod, &item.attrs));
-                try!(self.bclose(item.span));
+                self.head(&visibility_qualified(&item.vis, "mod"))?;
+                self.print_ident(item.ident)?;
+                self.nbsp()?;
+                self.bopen()?;
+                self.print_mod(_mod, &item.attrs)?;
+                self.bclose(item.span)?;
             }
             ast::ItemKind::ForeignMod(ref nmod) => {
-                try!(self.head("extern"));
-                try!(self.word_nbsp(&nmod.abi.to_string()));
-                try!(self.bopen());
-                try!(self.print_foreign_mod(nmod, &item.attrs));
-                try!(self.bclose(item.span));
+                self.head("extern")?;
+                self.word_nbsp(&nmod.abi.to_string())?;
+                self.bopen()?;
+                self.print_foreign_mod(nmod, &item.attrs)?;
+                self.bclose(item.span)?;
             }
             ast::ItemKind::Ty(ref ty, ref params) => {
-                try!(self.ibox(INDENT_UNIT));
-                try!(self.ibox(0));
-                try!(self.word_nbsp(&visibility_qualified(item.vis, "type")));
-                try!(self.print_ident(item.ident));
-                try!(self.print_generics(params));
-                try!(self.end()); // end the inner ibox
-
-                try!(self.print_where_clause(&params.where_clause));
-                try!(space(&mut self.s));
-                try!(self.word_space("="));
-                try!(self.print_type(&ty));
-                try!(word(&mut self.s, ";"));
-                try!(self.end()); // end the outer ibox
+                self.ibox(INDENT_UNIT)?;
+                self.ibox(0)?;
+                self.word_nbsp(&visibility_qualified(&item.vis, "type"))?;
+                self.print_ident(item.ident)?;
+                self.print_generics(params)?;
+                self.end()?; // end the inner ibox
+
+                self.print_where_clause(&params.where_clause)?;
+                space(&mut self.s)?;
+                self.word_space("=")?;
+                self.print_type(&ty)?;
+                word(&mut self.s, ";")?;
+                self.end()?; // end the outer ibox
             }
             ast::ItemKind::Enum(ref enum_definition, ref params) => {
-                try!(self.print_enum_def(
+                self.print_enum_def(
                     enum_definition,
                     params,
                     item.ident,
                     item.span,
-                    item.vis
-                ));
+                    &item.vis
+                )?;
             }
             ast::ItemKind::Struct(ref struct_def, ref generics) => {
-                try!(self.head(&visibility_qualified(item.vis,"struct")));
-                try!(self.print_struct(&struct_def, generics, item.ident, item.span, true));
+                self.head(&visibility_qualified(&item.vis, "struct"))?;
+                self.print_struct(&struct_def, generics, item.ident, item.span, true)?;
             }
 
             ast::ItemKind::DefaultImpl(unsafety, ref trait_ref) => {
-                try!(self.head(""));
-                try!(self.print_visibility(item.vis));
-                try!(self.print_unsafety(unsafety));
-                try!(self.word_nbsp("impl"));
-                try!(self.print_trait_ref(trait_ref));
-                try!(space(&mut self.s));
-                try!(self.word_space("for"));
-                try!(self.word_space(".."));
-                try!(self.bopen());
-                try!(self.bclose(item.span));
+                self.head("")?;
+                self.print_visibility(&item.vis)?;
+                self.print_unsafety(unsafety)?;
+                self.word_nbsp("impl")?;
+                self.print_trait_ref(trait_ref)?;
+                space(&mut self.s)?;
+                self.word_space("for")?;
+                self.word_space("..")?;
+                self.bopen()?;
+                self.bclose(item.span)?;
             }
             ast::ItemKind::Impl(unsafety,
                           polarity,
@@ -1253,80 +1250,80 @@ impl<'a> State<'a> {
                           ref opt_trait,
                           ref ty,
                           ref impl_items) => {
-                try!(self.head(""));
-                try!(self.print_visibility(item.vis));
-                try!(self.print_unsafety(unsafety));
-                try!(self.word_nbsp("impl"));
+                self.head("")?;
+                self.print_visibility(&item.vis)?;
+                self.print_unsafety(unsafety)?;
+                self.word_nbsp("impl")?;
 
                 if generics.is_parameterized() {
-                    try!(self.print_generics(generics));
-                    try!(space(&mut self.s));
+                    self.print_generics(generics)?;
+                    space(&mut self.s)?;
                 }
 
                 match polarity {
                     ast::ImplPolarity::Negative => {
-                        try!(word(&mut self.s, "!"));
+                        word(&mut self.s, "!")?;
                     },
                     _ => {}
                 }
 
                 match *opt_trait {
                     Some(ref t) => {
-                        try!(self.print_trait_ref(t));
-                        try!(space(&mut self.s));
-                        try!(self.word_space("for"));
+                        self.print_trait_ref(t)?;
+                        space(&mut self.s)?;
+                        self.word_space("for")?;
                     }
                     None => {}
                 }
 
-                try!(self.print_type(&ty));
-                try!(self.print_where_clause(&generics.where_clause));
+                self.print_type(&ty)?;
+                self.print_where_clause(&generics.where_clause)?;
 
-                try!(space(&mut self.s));
-                try!(self.bopen());
-                try!(self.print_inner_attributes(&item.attrs));
+                space(&mut self.s)?;
+                self.bopen()?;
+                self.print_inner_attributes(&item.attrs)?;
                 for impl_item in impl_items {
-                    try!(self.print_impl_item(impl_item));
+                    self.print_impl_item(impl_item)?;
                 }
-                try!(self.bclose(item.span));
+                self.bclose(item.span)?;
             }
             ast::ItemKind::Trait(unsafety, ref generics, ref bounds, ref trait_items) => {
-                try!(self.head(""));
-                try!(self.print_visibility(item.vis));
-                try!(self.print_unsafety(unsafety));
-                try!(self.word_nbsp("trait"));
-                try!(self.print_ident(item.ident));
-                try!(self.print_generics(generics));
+                self.head("")?;
+                self.print_visibility(&item.vis)?;
+                self.print_unsafety(unsafety)?;
+                self.word_nbsp("trait")?;
+                self.print_ident(item.ident)?;
+                self.print_generics(generics)?;
                 let mut real_bounds = Vec::with_capacity(bounds.len());
                 for b in bounds.iter() {
                     if let TraitTyParamBound(ref ptr, ast::TraitBoundModifier::Maybe) = *b {
-                        try!(space(&mut self.s));
-                        try!(self.word_space("for ?"));
-                        try!(self.print_trait_ref(&ptr.trait_ref));
+                        space(&mut self.s)?;
+                        self.word_space("for ?")?;
+                        self.print_trait_ref(&ptr.trait_ref)?;
                     } else {
                         real_bounds.push(b.clone());
                     }
                 }
-                try!(self.print_bounds(":", &real_bounds[..]));
-                try!(self.print_where_clause(&generics.where_clause));
-                try!(word(&mut self.s, " "));
-                try!(self.bopen());
+                self.print_bounds(":", &real_bounds[..])?;
+                self.print_where_clause(&generics.where_clause)?;
+                word(&mut self.s, " ")?;
+                self.bopen()?;
                 for trait_item in trait_items {
-                    try!(self.print_trait_item(trait_item));
+                    self.print_trait_item(trait_item)?;
                 }
-                try!(self.bclose(item.span));
+                self.bclose(item.span)?;
             }
             ast::ItemKind::Mac(codemap::Spanned { ref node, .. }) => {
-                try!(self.print_visibility(item.vis));
-                try!(self.print_path(&node.path, false, 0));
-                try!(word(&mut self.s, "! "));
-                try!(self.print_ident(item.ident));
-                try!(self.cbox(INDENT_UNIT));
-                try!(self.popen());
-                try!(self.print_tts(&node.tts[..]));
-                try!(self.pclose());
-                try!(word(&mut self.s, ";"));
-                try!(self.end());
+                self.print_visibility(&item.vis)?;
+                self.print_path(&node.path, false, 0)?;
+                word(&mut self.s, "! ")?;
+                self.print_ident(item.ident)?;
+                self.cbox(INDENT_UNIT)?;
+                self.popen()?;
+                self.print_tts(&node.tts[..])?;
+                self.pclose()?;
+                word(&mut self.s, ";")?;
+                self.end()?;
             }
         }
         self.ann.post(self, NodeItem(item))
@@ -1338,57 +1335,60 @@ impl<'a> State<'a> {
 
     fn print_formal_lifetime_list(&mut self, lifetimes: &[ast::LifetimeDef]) -> io::Result<()> {
         if !lifetimes.is_empty() {
-            try!(word(&mut self.s, "for<"));
+            word(&mut self.s, "for<")?;
             let mut comma = false;
             for lifetime_def in lifetimes {
                 if comma {
-                    try!(self.word_space(","))
+                    self.word_space(",")?
                 }
-                try!(self.print_lifetime_def(lifetime_def));
+                self.print_lifetime_def(lifetime_def)?;
                 comma = true;
             }
-            try!(word(&mut self.s, ">"));
+            word(&mut self.s, ">")?;
         }
         Ok(())
     }
 
     fn print_poly_trait_ref(&mut self, t: &ast::PolyTraitRef) -> io::Result<()> {
-        try!(self.print_formal_lifetime_list(&t.bound_lifetimes));
+        self.print_formal_lifetime_list(&t.bound_lifetimes)?;
         self.print_trait_ref(&t.trait_ref)
     }
 
     pub fn print_enum_def(&mut self, enum_definition: &ast::EnumDef,
                           generics: &ast::Generics, ident: ast::Ident,
                           span: codemap::Span,
-                          visibility: ast::Visibility) -> io::Result<()> {
-        try!(self.head(&visibility_qualified(visibility, "enum")));
-        try!(self.print_ident(ident));
-        try!(self.print_generics(generics));
-        try!(self.print_where_clause(&generics.where_clause));
-        try!(space(&mut self.s));
+                          visibility: &ast::Visibility) -> io::Result<()> {
+        self.head(&visibility_qualified(visibility, "enum"))?;
+        self.print_ident(ident)?;
+        self.print_generics(generics)?;
+        self.print_where_clause(&generics.where_clause)?;
+        space(&mut self.s)?;
         self.print_variants(&enum_definition.variants, span)
     }
 
     pub fn print_variants(&mut self,
                           variants: &[ast::Variant],
                           span: codemap::Span) -> io::Result<()> {
-        try!(self.bopen());
+        self.bopen()?;
         for v in variants {
-            try!(self.space_if_not_bol());
-            try!(self.maybe_print_comment(v.span.lo));
-            try!(self.print_outer_attributes(&v.node.attrs));
-            try!(self.ibox(INDENT_UNIT));
-            try!(self.print_variant(v));
-            try!(word(&mut self.s, ","));
-            try!(self.end());
-            try!(self.maybe_print_trailing_comment(v.span, None));
+            self.space_if_not_bol()?;
+            self.maybe_print_comment(v.span.lo)?;
+            self.print_outer_attributes(&v.node.attrs)?;
+            self.ibox(INDENT_UNIT)?;
+            self.print_variant(v)?;
+            word(&mut self.s, ",")?;
+            self.end()?;
+            self.maybe_print_trailing_comment(v.span, None)?;
         }
         self.bclose(span)
     }
 
-    pub fn print_visibility(&mut self, vis: ast::Visibility) -> io::Result<()> {
-        match vis {
+    pub fn print_visibility(&mut self, vis: &ast::Visibility) -> io::Result<()> {
+        match *vis {
             ast::Visibility::Public => self.word_nbsp("pub"),
+            ast::Visibility::Crate => self.word_nbsp("pub(crate)"),
+            ast::Visibility::Restricted { ref path, .. } =>
+                self.word_nbsp(&format!("pub({})", path)),
             ast::Visibility::Inherited => Ok(())
         }
     }
@@ -1399,52 +1399,42 @@ impl<'a> State<'a> {
                         ident: ast::Ident,
                         span: codemap::Span,
                         print_finalizer: bool) -> io::Result<()> {
-        try!(self.print_ident(ident));
-        try!(self.print_generics(generics));
+        self.print_ident(ident)?;
+        self.print_generics(generics)?;
         if !struct_def.is_struct() {
             if struct_def.is_tuple() {
-                try!(self.popen());
-                try!(self.commasep(
+                self.popen()?;
+                self.commasep(
                     Inconsistent, struct_def.fields(),
                     |s, field| {
-                        match field.node.kind {
-                            ast::NamedField(..) => panic!("unexpected named field"),
-                            ast::UnnamedField(vis) => {
-                                try!(s.print_visibility(vis));
-                                try!(s.maybe_print_comment(field.span.lo));
-                                s.print_type(&field.node.ty)
-                            }
-                        }
+                        s.print_visibility(&field.vis)?;
+                        s.maybe_print_comment(field.span.lo)?;
+                        s.print_type(&field.ty)
                     }
-                ));
-                try!(self.pclose());
+                )?;
+                self.pclose()?;
             }
-            try!(self.print_where_clause(&generics.where_clause));
+            self.print_where_clause(&generics.where_clause)?;
             if print_finalizer {
-                try!(word(&mut self.s, ";"));
+                word(&mut self.s, ";")?;
             }
-            try!(self.end());
+            self.end()?;
             self.end() // close the outer-box
         } else {
-            try!(self.print_where_clause(&generics.where_clause));
-            try!(self.nbsp());
-            try!(self.bopen());
-            try!(self.hardbreak_if_not_bol());
+            self.print_where_clause(&generics.where_clause)?;
+            self.nbsp()?;
+            self.bopen()?;
+            self.hardbreak_if_not_bol()?;
 
             for field in struct_def.fields() {
-                match field.node.kind {
-                    ast::UnnamedField(..) => panic!("unexpected unnamed field"),
-                    ast::NamedField(ident, visibility) => {
-                        try!(self.hardbreak_if_not_bol());
-                        try!(self.maybe_print_comment(field.span.lo));
-                        try!(self.print_outer_attributes(&field.node.attrs));
-                        try!(self.print_visibility(visibility));
-                        try!(self.print_ident(ident));
-                        try!(self.word_nbsp(":"));
-                        try!(self.print_type(&field.node.ty));
-                        try!(word(&mut self.s, ","));
-                    }
-                }
+                self.hardbreak_if_not_bol()?;
+                self.maybe_print_comment(field.span.lo)?;
+                self.print_outer_attributes(&field.attrs)?;
+                self.print_visibility(&field.vis)?;
+                self.print_ident(field.ident.unwrap())?;
+                self.word_nbsp(":")?;
+                self.print_type(&field.ty)?;
+                word(&mut self.s, ",")?;
             }
 
             self.bclose(span)
@@ -1461,7 +1451,7 @@ impl<'a> State<'a> {
     pub fn print_tt(&mut self, tt: &ast::TokenTree) -> io::Result<()> {
         match *tt {
             TokenTree::Token(_, ref tk) => {
-                try!(word(&mut self.s, &token_to_string(tk)));
+                word(&mut self.s, &token_to_string(tk))?;
                 match *tk {
                     parse::token::DocComment(..) => {
                         hardbreak(&mut self.s)
@@ -1470,21 +1460,21 @@ impl<'a> State<'a> {
                 }
             }
             TokenTree::Delimited(_, ref delimed) => {
-                try!(word(&mut self.s, &token_to_string(&delimed.open_token())));
-                try!(space(&mut self.s));
-                try!(self.print_tts(&delimed.tts));
-                try!(space(&mut self.s));
+                word(&mut self.s, &token_to_string(&delimed.open_token()))?;
+                space(&mut self.s)?;
+                self.print_tts(&delimed.tts)?;
+                space(&mut self.s)?;
                 word(&mut self.s, &token_to_string(&delimed.close_token()))
             },
             TokenTree::Sequence(_, ref seq) => {
-                try!(word(&mut self.s, "$("));
+                word(&mut self.s, "$(")?;
                 for tt_elt in &seq.tts {
-                    try!(self.print_tt(tt_elt));
+                    self.print_tt(tt_elt)?;
                 }
-                try!(word(&mut self.s, ")"));
+                word(&mut self.s, ")")?;
                 match seq.separator {
                     Some(ref tk) => {
-                        try!(word(&mut self.s, &token_to_string(tk)));
+                        word(&mut self.s, &token_to_string(tk))?;
                     }
                     None => {},
                 }
@@ -1497,13 +1487,13 @@ impl<'a> State<'a> {
     }
 
     pub fn print_tts(&mut self, tts: &[ast::TokenTree]) -> io::Result<()> {
-        try!(self.ibox(0));
+        self.ibox(0)?;
         let mut suppress_space = false;
         for (i, tt) in tts.iter().enumerate() {
             if i != 0 && !suppress_space {
-                try!(space(&mut self.s));
+                space(&mut self.s)?;
             }
-            try!(self.print_tt(tt));
+            self.print_tt(tt)?;
             // There should be no space between the module name and the following `::` in paths,
             // otherwise imported macros get re-parsed from crate metadata incorrectly (#20701)
             suppress_space = match *tt {
@@ -1517,13 +1507,13 @@ impl<'a> State<'a> {
     }
 
     pub fn print_variant(&mut self, v: &ast::Variant) -> io::Result<()> {
-        try!(self.head(""));
+        self.head("")?;
         let generics = ast::Generics::default();
-        try!(self.print_struct(&v.node.data, &generics, v.node.name, v.span, false));
+        self.print_struct(&v.node.data, &generics, v.node.name, v.span, false)?;
         match v.node.disr_expr {
             Some(ref d) => {
-                try!(space(&mut self.s));
-                try!(self.word_space("="));
+                space(&mut self.s)?;
+                self.word_space("=")?;
                 self.print_expr(&d)
             }
             _ => Ok(())
@@ -1533,7 +1523,7 @@ impl<'a> State<'a> {
     pub fn print_method_sig(&mut self,
                             ident: ast::Ident,
                             m: &ast::MethodSig,
-                            vis: ast::Visibility)
+                            vis: &ast::Visibility)
                             -> io::Result<()> {
         self.print_fn(&m.decl,
                       m.unsafety,
@@ -1547,100 +1537,103 @@ impl<'a> State<'a> {
 
     pub fn print_trait_item(&mut self, ti: &ast::TraitItem)
                             -> io::Result<()> {
-        try!(self.ann.pre(self, NodeSubItem(ti.id)));
-        try!(self.hardbreak_if_not_bol());
-        try!(self.maybe_print_comment(ti.span.lo));
-        try!(self.print_outer_attributes(&ti.attrs));
+        self.ann.pre(self, NodeSubItem(ti.id))?;
+        self.hardbreak_if_not_bol()?;
+        self.maybe_print_comment(ti.span.lo)?;
+        self.print_outer_attributes(&ti.attrs)?;
         match ti.node {
             ast::TraitItemKind::Const(ref ty, ref default) => {
-                try!(self.print_associated_const(ti.ident, &ty,
-                                                 default.as_ref().map(|expr| &**expr),
-                                                 ast::Visibility::Inherited));
+                self.print_associated_const(ti.ident, &ty,
+                                            default.as_ref().map(|expr| &**expr),
+                                            &ast::Visibility::Inherited)?;
             }
             ast::TraitItemKind::Method(ref sig, ref body) => {
                 if body.is_some() {
-                    try!(self.head(""));
+                    self.head("")?;
                 }
-                try!(self.print_method_sig(ti.ident, sig, ast::Visibility::Inherited));
+                self.print_method_sig(ti.ident, sig, &ast::Visibility::Inherited)?;
                 if let Some(ref body) = *body {
-                    try!(self.nbsp());
-                    try!(self.print_block_with_attrs(body, &ti.attrs));
+                    self.nbsp()?;
+                    self.print_block_with_attrs(body, &ti.attrs)?;
                 } else {
-                    try!(word(&mut self.s, ";"));
+                    word(&mut self.s, ";")?;
                 }
             }
             ast::TraitItemKind::Type(ref bounds, ref default) => {
-                try!(self.print_associated_type(ti.ident, Some(bounds),
-                                                default.as_ref().map(|ty| &**ty)));
+                self.print_associated_type(ti.ident, Some(bounds),
+                                           default.as_ref().map(|ty| &**ty))?;
             }
         }
         self.ann.post(self, NodeSubItem(ti.id))
     }
 
     pub fn print_impl_item(&mut self, ii: &ast::ImplItem) -> io::Result<()> {
-        try!(self.ann.pre(self, NodeSubItem(ii.id)));
-        try!(self.hardbreak_if_not_bol());
-        try!(self.maybe_print_comment(ii.span.lo));
-        try!(self.print_outer_attributes(&ii.attrs));
+        self.ann.pre(self, NodeSubItem(ii.id))?;
+        self.hardbreak_if_not_bol()?;
+        self.maybe_print_comment(ii.span.lo)?;
+        self.print_outer_attributes(&ii.attrs)?;
+        if let ast::Defaultness::Default = ii.defaultness {
+            self.word_nbsp("default")?;
+        }
         match ii.node {
             ast::ImplItemKind::Const(ref ty, ref expr) => {
-                try!(self.print_associated_const(ii.ident, &ty, Some(&expr), ii.vis));
+                self.print_associated_const(ii.ident, &ty, Some(&expr), &ii.vis)?;
             }
             ast::ImplItemKind::Method(ref sig, ref body) => {
-                try!(self.head(""));
-                try!(self.print_method_sig(ii.ident, sig, ii.vis));
-                try!(self.nbsp());
-                try!(self.print_block_with_attrs(body, &ii.attrs));
+                self.head("")?;
+                self.print_method_sig(ii.ident, sig, &ii.vis)?;
+                self.nbsp()?;
+                self.print_block_with_attrs(body, &ii.attrs)?;
             }
             ast::ImplItemKind::Type(ref ty) => {
-                try!(self.print_associated_type(ii.ident, None, Some(ty)));
+                self.print_associated_type(ii.ident, None, Some(ty))?;
             }
             ast::ImplItemKind::Macro(codemap::Spanned { ref node, .. }) => {
                 // code copied from ItemKind::Mac:
-                try!(self.print_path(&node.path, false, 0));
-                try!(word(&mut self.s, "! "));
-                try!(self.cbox(INDENT_UNIT));
-                try!(self.popen());
-                try!(self.print_tts(&node.tts[..]));
-                try!(self.pclose());
-                try!(word(&mut self.s, ";"));
-                try!(self.end())
+                self.print_path(&node.path, false, 0)?;
+                word(&mut self.s, "! ")?;
+                self.cbox(INDENT_UNIT)?;
+                self.popen()?;
+                self.print_tts(&node.tts[..])?;
+                self.pclose()?;
+                word(&mut self.s, ";")?;
+                self.end()?
             }
         }
         self.ann.post(self, NodeSubItem(ii.id))
     }
 
     pub fn print_stmt(&mut self, st: &ast::Stmt) -> io::Result<()> {
-        try!(self.maybe_print_comment(st.span.lo));
+        self.maybe_print_comment(st.span.lo)?;
         match st.node {
             ast::StmtKind::Decl(ref decl, _) => {
-                try!(self.print_decl(&decl));
+                self.print_decl(&decl)?;
             }
             ast::StmtKind::Expr(ref expr, _) => {
-                try!(self.space_if_not_bol());
-                try!(self.print_expr_outer_attr_style(&expr, false));
+                self.space_if_not_bol()?;
+                self.print_expr_outer_attr_style(&expr, false)?;
             }
             ast::StmtKind::Semi(ref expr, _) => {
-                try!(self.space_if_not_bol());
-                try!(self.print_expr_outer_attr_style(&expr, false));
-                try!(word(&mut self.s, ";"));
+                self.space_if_not_bol()?;
+                self.print_expr_outer_attr_style(&expr, false)?;
+                word(&mut self.s, ";")?;
             }
             ast::StmtKind::Mac(ref mac, style, ref attrs) => {
-                try!(self.space_if_not_bol());
-                try!(self.print_outer_attributes(attrs.as_attr_slice()));
+                self.space_if_not_bol()?;
+                self.print_outer_attributes(attrs.as_attr_slice())?;
                 let delim = match style {
                     ast::MacStmtStyle::Braces => token::Brace,
                     _ => token::Paren
                 };
-                try!(self.print_mac(&mac, delim));
+                self.print_mac(&mac, delim)?;
                 match style {
                     ast::MacStmtStyle::Braces => {}
-                    _ => try!(word(&mut self.s, ";")),
+                    _ => word(&mut self.s, ";")?,
                 }
             }
         }
         if parse::classify::stmt_ends_with_semi(&st.node) {
-            try!(word(&mut self.s, ";"));
+            word(&mut self.s, ";")?;
         }
         self.maybe_print_trailing_comment(st.span, None)
     }
@@ -1676,27 +1669,27 @@ impl<'a> State<'a> {
                                       attrs: &[ast::Attribute],
                                       close_box: bool) -> io::Result<()> {
         match blk.rules {
-            BlockCheckMode::Unsafe(..) => try!(self.word_space("unsafe")),
+            BlockCheckMode::Unsafe(..) => self.word_space("unsafe")?,
             BlockCheckMode::Default => ()
         }
-        try!(self.maybe_print_comment(blk.span.lo));
-        try!(self.ann.pre(self, NodeBlock(blk)));
-        try!(self.bopen());
+        self.maybe_print_comment(blk.span.lo)?;
+        self.ann.pre(self, NodeBlock(blk))?;
+        self.bopen()?;
 
-        try!(self.print_inner_attributes(attrs));
+        self.print_inner_attributes(attrs)?;
 
         for st in &blk.stmts {
-            try!(self.print_stmt(st));
+            self.print_stmt(st)?;
         }
         match blk.expr {
             Some(ref expr) => {
-                try!(self.space_if_not_bol());
-                try!(self.print_expr_outer_attr_style(&expr, false));
-                try!(self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi)));
+                self.space_if_not_bol()?;
+                self.print_expr_outer_attr_style(&expr, false)?;
+                self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi))?;
             }
             _ => ()
         }
-        try!(self.bclose_maybe_open(blk.span, indented, close_box));
+        self.bclose_maybe_open(blk.span, indented, close_box)?;
         self.ann.post(self, NodeBlock(blk))
     }
 
@@ -1706,32 +1699,32 @@ impl<'a> State<'a> {
                 match _else.node {
                     // "another else-if"
                     ast::ExprKind::If(ref i, ref then, ref e) => {
-                        try!(self.cbox(INDENT_UNIT - 1));
-                        try!(self.ibox(0));
-                        try!(word(&mut self.s, " else if "));
-                        try!(self.print_expr(&i));
-                        try!(space(&mut self.s));
-                        try!(self.print_block(&then));
+                        self.cbox(INDENT_UNIT - 1)?;
+                        self.ibox(0)?;
+                        word(&mut self.s, " else if ")?;
+                        self.print_expr(&i)?;
+                        space(&mut self.s)?;
+                        self.print_block(&then)?;
                         self.print_else(e.as_ref().map(|e| &**e))
                     }
                     // "another else-if-let"
                     ast::ExprKind::IfLet(ref pat, ref expr, ref then, ref e) => {
-                        try!(self.cbox(INDENT_UNIT - 1));
-                        try!(self.ibox(0));
-                        try!(word(&mut self.s, " else if let "));
-                        try!(self.print_pat(&pat));
-                        try!(space(&mut self.s));
-                        try!(self.word_space("="));
-                        try!(self.print_expr(&expr));
-                        try!(space(&mut self.s));
-                        try!(self.print_block(&then));
+                        self.cbox(INDENT_UNIT - 1)?;
+                        self.ibox(0)?;
+                        word(&mut self.s, " else if let ")?;
+                        self.print_pat(&pat)?;
+                        space(&mut self.s)?;
+                        self.word_space("=")?;
+                        self.print_expr(&expr)?;
+                        space(&mut self.s)?;
+                        self.print_block(&then)?;
                         self.print_else(e.as_ref().map(|e| &**e))
                     }
                     // "final else"
                     ast::ExprKind::Block(ref b) => {
-                        try!(self.cbox(INDENT_UNIT - 1));
-                        try!(self.ibox(0));
-                        try!(word(&mut self.s, " else "));
+                        self.cbox(INDENT_UNIT - 1)?;
+                        self.ibox(0)?;
+                        word(&mut self.s, " else ")?;
                         self.print_block(&b)
                     }
                     // BLEAH, constraints would be great here
@@ -1746,38 +1739,38 @@ impl<'a> State<'a> {
 
     pub fn print_if(&mut self, test: &ast::Expr, blk: &ast::Block,
                     elseopt: Option<&ast::Expr>) -> io::Result<()> {
-        try!(self.head("if"));
-        try!(self.print_expr(test));
-        try!(space(&mut self.s));
-        try!(self.print_block(blk));
+        self.head("if")?;
+        self.print_expr(test)?;
+        space(&mut self.s)?;
+        self.print_block(blk)?;
         self.print_else(elseopt)
     }
 
     pub fn print_if_let(&mut self, pat: &ast::Pat, expr: &ast::Expr, blk: &ast::Block,
                         elseopt: Option<&ast::Expr>) -> io::Result<()> {
-        try!(self.head("if let"));
-        try!(self.print_pat(pat));
-        try!(space(&mut self.s));
-        try!(self.word_space("="));
-        try!(self.print_expr(expr));
-        try!(space(&mut self.s));
-        try!(self.print_block(blk));
+        self.head("if let")?;
+        self.print_pat(pat)?;
+        space(&mut self.s)?;
+        self.word_space("=")?;
+        self.print_expr(expr)?;
+        space(&mut self.s)?;
+        self.print_block(blk)?;
         self.print_else(elseopt)
     }
 
     pub fn print_mac(&mut self, m: &ast::Mac, delim: token::DelimToken)
                      -> io::Result<()> {
-        try!(self.print_path(&m.node.path, false, 0));
-        try!(word(&mut self.s, "!"));
+        self.print_path(&m.node.path, false, 0)?;
+        word(&mut self.s, "!")?;
         match delim {
-            token::Paren => try!(self.popen()),
-            token::Bracket => try!(word(&mut self.s, "[")),
+            token::Paren => self.popen()?,
+            token::Bracket => word(&mut self.s, "[")?,
             token::Brace => {
-                try!(self.head(""));
-                try!(self.bopen());
+                self.head("")?;
+                self.bopen()?;
             }
         }
-        try!(self.print_tts(&m.node.tts));
+        self.print_tts(&m.node.tts)?;
         match delim {
             token::Paren => self.pclose(),
             token::Bracket => word(&mut self.s, "]"),
@@ -1787,8 +1780,8 @@ impl<'a> State<'a> {
 
 
     fn print_call_post(&mut self, args: &[P<ast::Expr>]) -> io::Result<()> {
-        try!(self.popen());
-        try!(self.commasep_exprs(Inconsistent, args));
+        self.popen()?;
+        self.commasep_exprs(Inconsistent, args)?;
         self.pclose()
     }
 
@@ -1810,11 +1803,11 @@ impl<'a> State<'a> {
     pub fn print_expr_maybe_paren(&mut self, expr: &ast::Expr) -> io::Result<()> {
         let needs_par = needs_parentheses(expr);
         if needs_par {
-            try!(self.popen());
+            self.popen()?;
         }
-        try!(self.print_expr(expr));
+        self.print_expr(expr)?;
         if needs_par {
-            try!(self.pclose());
+            self.pclose()?;
         }
         Ok(())
     }
@@ -1822,19 +1815,19 @@ impl<'a> State<'a> {
     fn print_expr_in_place(&mut self,
                            place: &ast::Expr,
                            expr: &ast::Expr) -> io::Result<()> {
-        try!(self.print_expr_maybe_paren(place));
-        try!(space(&mut self.s));
-        try!(self.word_space("<-"));
+        self.print_expr_maybe_paren(place)?;
+        space(&mut self.s)?;
+        self.word_space("<-")?;
         self.print_expr_maybe_paren(expr)
     }
 
     fn print_expr_vec(&mut self, exprs: &[P<ast::Expr>],
                       attrs: &[Attribute]) -> io::Result<()> {
-        try!(self.ibox(INDENT_UNIT));
-        try!(word(&mut self.s, "["));
-        try!(self.print_inner_attributes_inline(attrs));
-        try!(self.commasep_exprs(Inconsistent, &exprs[..]));
-        try!(word(&mut self.s, "]"));
+        self.ibox(INDENT_UNIT)?;
+        word(&mut self.s, "[")?;
+        self.print_inner_attributes_inline(attrs)?;
+        self.commasep_exprs(Inconsistent, &exprs[..])?;
+        word(&mut self.s, "]")?;
         self.end()
     }
 
@@ -1842,13 +1835,13 @@ impl<'a> State<'a> {
                          element: &ast::Expr,
                          count: &ast::Expr,
                          attrs: &[Attribute]) -> io::Result<()> {
-        try!(self.ibox(INDENT_UNIT));
-        try!(word(&mut self.s, "["));
-        try!(self.print_inner_attributes_inline(attrs));
-        try!(self.print_expr(element));
-        try!(self.word_space(";"));
-        try!(self.print_expr(count));
-        try!(word(&mut self.s, "]"));
+        self.ibox(INDENT_UNIT)?;
+        word(&mut self.s, "[")?;
+        self.print_inner_attributes_inline(attrs)?;
+        self.print_expr(element)?;
+        self.word_space(";")?;
+        self.print_expr(count)?;
+        word(&mut self.s, "]")?;
         self.end()
     }
 
@@ -1857,46 +1850,46 @@ impl<'a> State<'a> {
                          fields: &[ast::Field],
                          wth: &Option<P<ast::Expr>>,
                          attrs: &[Attribute]) -> io::Result<()> {
-        try!(self.print_path(path, true, 0));
-        try!(word(&mut self.s, "{"));
-        try!(self.print_inner_attributes_inline(attrs));
-        try!(self.commasep_cmnt(
+        self.print_path(path, true, 0)?;
+        word(&mut self.s, "{")?;
+        self.print_inner_attributes_inline(attrs)?;
+        self.commasep_cmnt(
             Consistent,
             &fields[..],
             |s, field| {
-                try!(s.ibox(INDENT_UNIT));
-                try!(s.print_ident(field.ident.node));
-                try!(s.word_space(":"));
-                try!(s.print_expr(&field.expr));
+                s.ibox(INDENT_UNIT)?;
+                s.print_ident(field.ident.node)?;
+                s.word_space(":")?;
+                s.print_expr(&field.expr)?;
                 s.end()
             },
-            |f| f.span));
+            |f| f.span)?;
         match *wth {
             Some(ref expr) => {
-                try!(self.ibox(INDENT_UNIT));
+                self.ibox(INDENT_UNIT)?;
                 if !fields.is_empty() {
-                    try!(word(&mut self.s, ","));
-                    try!(space(&mut self.s));
+                    word(&mut self.s, ",")?;
+                    space(&mut self.s)?;
                 }
-                try!(word(&mut self.s, ".."));
-                try!(self.print_expr(&expr));
-                try!(self.end());
+                word(&mut self.s, "..")?;
+                self.print_expr(&expr)?;
+                self.end()?;
             }
             _ => if !fields.is_empty() {
-                try!(word(&mut self.s, ","))
+                word(&mut self.s, ",")?
             }
         }
-        try!(word(&mut self.s, "}"));
+        word(&mut self.s, "}")?;
         Ok(())
     }
 
     fn print_expr_tup(&mut self, exprs: &[P<ast::Expr>],
                       attrs: &[Attribute]) -> io::Result<()> {
-        try!(self.popen());
-        try!(self.print_inner_attributes_inline(attrs));
-        try!(self.commasep_exprs(Inconsistent, &exprs[..]));
+        self.popen()?;
+        self.print_inner_attributes_inline(attrs)?;
+        self.commasep_exprs(Inconsistent, &exprs[..])?;
         if exprs.len() == 1 {
-            try!(word(&mut self.s, ","));
+            word(&mut self.s, ",")?;
         }
         self.pclose()
     }
@@ -1904,7 +1897,7 @@ impl<'a> State<'a> {
     fn print_expr_call(&mut self,
                        func: &ast::Expr,
                        args: &[P<ast::Expr>]) -> io::Result<()> {
-        try!(self.print_expr_maybe_paren(func));
+        self.print_expr_maybe_paren(func)?;
         self.print_call_post(args)
     }
 
@@ -1913,14 +1906,14 @@ impl<'a> State<'a> {
                               tys: &[P<ast::Ty>],
                               args: &[P<ast::Expr>]) -> io::Result<()> {
         let base_args = &args[1..];
-        try!(self.print_expr(&args[0]));
-        try!(word(&mut self.s, "."));
-        try!(self.print_ident(ident.node));
+        self.print_expr(&args[0])?;
+        word(&mut self.s, ".")?;
+        self.print_ident(ident.node)?;
         if !tys.is_empty() {
-            try!(word(&mut self.s, "::<"));
-            try!(self.commasep(Inconsistent, tys,
-                               |s, ty| s.print_type(&ty)));
-            try!(word(&mut self.s, ">"));
+            word(&mut self.s, "::<")?;
+            self.commasep(Inconsistent, tys,
+                          |s, ty| s.print_type(&ty))?;
+            word(&mut self.s, ">")?;
         }
         self.print_call_post(base_args)
     }
@@ -1930,12 +1923,12 @@ impl<'a> State<'a> {
                          lhs: &ast::Expr,
                          rhs: &ast::Expr) -> io::Result<()> {
         if self.check_expr_bin_needs_paren(lhs, op) {
-            try!(self.print_expr_maybe_paren(lhs));
+            self.print_expr_maybe_paren(lhs)?;
         } else {
-            try!(self.print_expr(lhs));
+            self.print_expr(lhs)?;
         }
-        try!(space(&mut self.s));
-        try!(self.word_space(op.node.to_string()));
+        space(&mut self.s)?;
+        self.word_space(op.node.to_string())?;
         if self.check_expr_bin_needs_paren(rhs, op) {
             self.print_expr_maybe_paren(rhs)
         } else {
@@ -1946,15 +1939,15 @@ impl<'a> State<'a> {
     fn print_expr_unary(&mut self,
                         op: ast::UnOp,
                         expr: &ast::Expr) -> io::Result<()> {
-        try!(word(&mut self.s, ast::UnOp::to_string(op)));
+        word(&mut self.s, ast::UnOp::to_string(op))?;
         self.print_expr_maybe_paren(expr)
     }
 
     fn print_expr_addr_of(&mut self,
                           mutability: ast::Mutability,
                           expr: &ast::Expr) -> io::Result<()> {
-        try!(word(&mut self.s, "&"));
-        try!(self.print_mutability(mutability));
+        word(&mut self.s, "&")?;
+        self.print_mutability(mutability)?;
         self.print_expr_maybe_paren(expr)
     }
 
@@ -1965,139 +1958,139 @@ impl<'a> State<'a> {
     fn print_expr_outer_attr_style(&mut self,
                                   expr: &ast::Expr,
                                   is_inline: bool) -> io::Result<()> {
-        try!(self.maybe_print_comment(expr.span.lo));
+        self.maybe_print_comment(expr.span.lo)?;
 
         let attrs = expr.attrs.as_attr_slice();
         if is_inline {
-            try!(self.print_outer_attributes_inline(attrs));
+            self.print_outer_attributes_inline(attrs)?;
         } else {
-            try!(self.print_outer_attributes(attrs));
+            self.print_outer_attributes(attrs)?;
         }
 
-        try!(self.ibox(INDENT_UNIT));
-        try!(self.ann.pre(self, NodeExpr(expr)));
+        self.ibox(INDENT_UNIT)?;
+        self.ann.pre(self, NodeExpr(expr))?;
         match expr.node {
             ast::ExprKind::Box(ref expr) => {
-                try!(self.word_space("box"));
-                try!(self.print_expr(expr));
+                self.word_space("box")?;
+                self.print_expr(expr)?;
             }
             ast::ExprKind::InPlace(ref place, ref expr) => {
-                try!(self.print_expr_in_place(place, expr));
+                self.print_expr_in_place(place, expr)?;
             }
             ast::ExprKind::Vec(ref exprs) => {
-                try!(self.print_expr_vec(&exprs[..], attrs));
+                self.print_expr_vec(&exprs[..], attrs)?;
             }
             ast::ExprKind::Repeat(ref element, ref count) => {
-                try!(self.print_expr_repeat(&element, &count, attrs));
+                self.print_expr_repeat(&element, &count, attrs)?;
             }
             ast::ExprKind::Struct(ref path, ref fields, ref wth) => {
-                try!(self.print_expr_struct(path, &fields[..], wth, attrs));
+                self.print_expr_struct(path, &fields[..], wth, attrs)?;
             }
             ast::ExprKind::Tup(ref exprs) => {
-                try!(self.print_expr_tup(&exprs[..], attrs));
+                self.print_expr_tup(&exprs[..], attrs)?;
             }
             ast::ExprKind::Call(ref func, ref args) => {
-                try!(self.print_expr_call(&func, &args[..]));
+                self.print_expr_call(&func, &args[..])?;
             }
             ast::ExprKind::MethodCall(ident, ref tys, ref args) => {
-                try!(self.print_expr_method_call(ident, &tys[..], &args[..]));
+                self.print_expr_method_call(ident, &tys[..], &args[..])?;
             }
             ast::ExprKind::Binary(op, ref lhs, ref rhs) => {
-                try!(self.print_expr_binary(op, &lhs, &rhs));
+                self.print_expr_binary(op, &lhs, &rhs)?;
             }
             ast::ExprKind::Unary(op, ref expr) => {
-                try!(self.print_expr_unary(op, &expr));
+                self.print_expr_unary(op, &expr)?;
             }
             ast::ExprKind::AddrOf(m, ref expr) => {
-                try!(self.print_expr_addr_of(m, &expr));
+                self.print_expr_addr_of(m, &expr)?;
             }
             ast::ExprKind::Lit(ref lit) => {
-                try!(self.print_literal(&lit));
+                self.print_literal(&lit)?;
             }
             ast::ExprKind::Cast(ref expr, ref ty) => {
                 if let ast::ExprKind::Cast(..) = expr.node {
-                    try!(self.print_expr(&expr));
+                    self.print_expr(&expr)?;
                 } else {
-                    try!(self.print_expr_maybe_paren(&expr));
+                    self.print_expr_maybe_paren(&expr)?;
                 }
-                try!(space(&mut self.s));
-                try!(self.word_space("as"));
-                try!(self.print_type(&ty));
+                space(&mut self.s)?;
+                self.word_space("as")?;
+                self.print_type(&ty)?;
             }
             ast::ExprKind::Type(ref expr, ref ty) => {
-                try!(self.print_expr(&expr));
-                try!(self.word_space(":"));
-                try!(self.print_type(&ty));
+                self.print_expr(&expr)?;
+                self.word_space(":")?;
+                self.print_type(&ty)?;
             }
             ast::ExprKind::If(ref test, ref blk, ref elseopt) => {
-                try!(self.print_if(&test, &blk, elseopt.as_ref().map(|e| &**e)));
+                self.print_if(&test, &blk, elseopt.as_ref().map(|e| &**e))?;
             }
             ast::ExprKind::IfLet(ref pat, ref expr, ref blk, ref elseopt) => {
-                try!(self.print_if_let(&pat, &expr, &blk, elseopt.as_ref().map(|e| &**e)));
+                self.print_if_let(&pat, &expr, &blk, elseopt.as_ref().map(|e| &**e))?;
             }
             ast::ExprKind::While(ref test, ref blk, opt_ident) => {
                 if let Some(ident) = opt_ident {
-                    try!(self.print_ident(ident));
-                    try!(self.word_space(":"));
+                    self.print_ident(ident)?;
+                    self.word_space(":")?;
                 }
-                try!(self.head("while"));
-                try!(self.print_expr(&test));
-                try!(space(&mut self.s));
-                try!(self.print_block_with_attrs(&blk, attrs));
+                self.head("while")?;
+                self.print_expr(&test)?;
+                space(&mut self.s)?;
+                self.print_block_with_attrs(&blk, attrs)?;
             }
             ast::ExprKind::WhileLet(ref pat, ref expr, ref blk, opt_ident) => {
                 if let Some(ident) = opt_ident {
-                    try!(self.print_ident(ident));
-                    try!(self.word_space(":"));
+                    self.print_ident(ident)?;
+                    self.word_space(":")?;
                 }
-                try!(self.head("while let"));
-                try!(self.print_pat(&pat));
-                try!(space(&mut self.s));
-                try!(self.word_space("="));
-                try!(self.print_expr(&expr));
-                try!(space(&mut self.s));
-                try!(self.print_block_with_attrs(&blk, attrs));
+                self.head("while let")?;
+                self.print_pat(&pat)?;
+                space(&mut self.s)?;
+                self.word_space("=")?;
+                self.print_expr(&expr)?;
+                space(&mut self.s)?;
+                self.print_block_with_attrs(&blk, attrs)?;
             }
             ast::ExprKind::ForLoop(ref pat, ref iter, ref blk, opt_ident) => {
                 if let Some(ident) = opt_ident {
-                    try!(self.print_ident(ident));
-                    try!(self.word_space(":"));
+                    self.print_ident(ident)?;
+                    self.word_space(":")?;
                 }
-                try!(self.head("for"));
-                try!(self.print_pat(&pat));
-                try!(space(&mut self.s));
-                try!(self.word_space("in"));
-                try!(self.print_expr(&iter));
-                try!(space(&mut self.s));
-                try!(self.print_block_with_attrs(&blk, attrs));
+                self.head("for")?;
+                self.print_pat(&pat)?;
+                space(&mut self.s)?;
+                self.word_space("in")?;
+                self.print_expr(&iter)?;
+                space(&mut self.s)?;
+                self.print_block_with_attrs(&blk, attrs)?;
             }
             ast::ExprKind::Loop(ref blk, opt_ident) => {
                 if let Some(ident) = opt_ident {
-                    try!(self.print_ident(ident));
-                    try!(self.word_space(":"));
+                    self.print_ident(ident)?;
+                    self.word_space(":")?;
                 }
-                try!(self.head("loop"));
-                try!(space(&mut self.s));
-                try!(self.print_block_with_attrs(&blk, attrs));
+                self.head("loop")?;
+                space(&mut self.s)?;
+                self.print_block_with_attrs(&blk, attrs)?;
             }
             ast::ExprKind::Match(ref expr, ref arms) => {
-                try!(self.cbox(INDENT_UNIT));
-                try!(self.ibox(4));
-                try!(self.word_nbsp("match"));
-                try!(self.print_expr(&expr));
-                try!(space(&mut self.s));
-                try!(self.bopen());
-                try!(self.print_inner_attributes_no_trailing_hardbreak(attrs));
+                self.cbox(INDENT_UNIT)?;
+                self.ibox(4)?;
+                self.word_nbsp("match")?;
+                self.print_expr(&expr)?;
+                space(&mut self.s)?;
+                self.bopen()?;
+                self.print_inner_attributes_no_trailing_hardbreak(attrs)?;
                 for arm in arms {
-                    try!(self.print_arm(arm));
+                    self.print_arm(arm)?;
                 }
-                try!(self.bclose_(expr.span, INDENT_UNIT));
+                self.bclose_(expr.span, INDENT_UNIT)?;
             }
             ast::ExprKind::Closure(capture_clause, ref decl, ref body) => {
-                try!(self.print_capture_clause(capture_clause));
+                self.print_capture_clause(capture_clause)?;
 
-                try!(self.print_fn_block_args(&decl));
-                try!(space(&mut self.s));
+                self.print_fn_block_args(&decl)?;
+                space(&mut self.s)?;
 
                 let default_return = match decl.output {
                     ast::FunctionRetTy::Default(..) => true,
@@ -2105,144 +2098,150 @@ impl<'a> State<'a> {
                 };
 
                 if !default_return || !body.stmts.is_empty() || body.expr.is_none() {
-                    try!(self.print_block_unclosed(&body));
+                    self.print_block_unclosed(&body)?;
                 } else {
                     // we extract the block, so as not to create another set of boxes
                     let i_expr = body.expr.as_ref().unwrap();
                     match i_expr.node {
                         ast::ExprKind::Block(ref blk) => {
-                            try!(self.print_block_unclosed_with_attrs(
+                            self.print_block_unclosed_with_attrs(
                                 &blk,
-                                i_expr.attrs.as_attr_slice()));
+                                i_expr.attrs.as_attr_slice())?;
                         }
                         _ => {
                             // this is a bare expression
-                            try!(self.print_expr(&i_expr));
-                            try!(self.end()); // need to close a box
+                            self.print_expr(&i_expr)?;
+                            self.end()?; // need to close a box
                         }
                     }
                 }
                 // a box will be closed by print_expr, but we didn't want an overall
                 // wrapper so we closed the corresponding opening. so create an
                 // empty box to satisfy the close.
-                try!(self.ibox(0));
+                self.ibox(0)?;
             }
             ast::ExprKind::Block(ref blk) => {
                 // containing cbox, will be closed by print-block at }
-                try!(self.cbox(INDENT_UNIT));
+                self.cbox(INDENT_UNIT)?;
                 // head-box, will be closed by print-block after {
-                try!(self.ibox(0));
-                try!(self.print_block_with_attrs(&blk, attrs));
+                self.ibox(0)?;
+                self.print_block_with_attrs(&blk, attrs)?;
             }
             ast::ExprKind::Assign(ref lhs, ref rhs) => {
-                try!(self.print_expr(&lhs));
-                try!(space(&mut self.s));
-                try!(self.word_space("="));
-                try!(self.print_expr(&rhs));
+                self.print_expr(&lhs)?;
+                space(&mut self.s)?;
+                self.word_space("=")?;
+                self.print_expr(&rhs)?;
             }
             ast::ExprKind::AssignOp(op, ref lhs, ref rhs) => {
-                try!(self.print_expr(&lhs));
-                try!(space(&mut self.s));
-                try!(word(&mut self.s, op.node.to_string()));
-                try!(self.word_space("="));
-                try!(self.print_expr(&rhs));
+                self.print_expr(&lhs)?;
+                space(&mut self.s)?;
+                word(&mut self.s, op.node.to_string())?;
+                self.word_space("=")?;
+                self.print_expr(&rhs)?;
             }
             ast::ExprKind::Field(ref expr, id) => {
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, "."));
-                try!(self.print_ident(id.node));
+                self.print_expr(&expr)?;
+                word(&mut self.s, ".")?;
+                self.print_ident(id.node)?;
             }
             ast::ExprKind::TupField(ref expr, id) => {
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, "."));
-                try!(self.print_usize(id.node));
+                self.print_expr(&expr)?;
+                word(&mut self.s, ".")?;
+                self.print_usize(id.node)?;
             }
             ast::ExprKind::Index(ref expr, ref index) => {
-                try!(self.print_expr(&expr));
-                try!(word(&mut self.s, "["));
-                try!(self.print_expr(&index));
-                try!(word(&mut self.s, "]"));
+                self.print_expr(&expr)?;
+                word(&mut self.s, "[")?;
+                self.print_expr(&index)?;
+                word(&mut self.s, "]")?;
             }
-            ast::ExprKind::Range(ref start, ref end) => {
+            ast::ExprKind::Range(ref start, ref end, limits) => {
                 if let &Some(ref e) = start {
-                    try!(self.print_expr(&e));
+                    self.print_expr(&e)?;
+                }
+                if limits == ast::RangeLimits::HalfOpen {
+                    word(&mut self.s, "..")?;
+                } else {
+                    word(&mut self.s, "...")?;
                 }
-                try!(word(&mut self.s, ".."));
                 if let &Some(ref e) = end {
-                    try!(self.print_expr(&e));
+                    self.print_expr(&e)?;
                 }
             }
             ast::ExprKind::Path(None, ref path) => {
-                try!(self.print_path(path, true, 0))
+                self.print_path(path, true, 0)?
             }
             ast::ExprKind::Path(Some(ref qself), ref path) => {
-                try!(self.print_qpath(path, qself, true))
+                self.print_qpath(path, qself, true)?
             }
             ast::ExprKind::Break(opt_ident) => {
-                try!(word(&mut self.s, "break"));
-                try!(space(&mut self.s));
+                word(&mut self.s, "break")?;
+                space(&mut self.s)?;
                 if let Some(ident) = opt_ident {
-                    try!(self.print_ident(ident.node));
-                    try!(space(&mut self.s));
+                    self.print_ident(ident.node)?;
+                    space(&mut self.s)?;
                 }
             }
             ast::ExprKind::Again(opt_ident) => {
-                try!(word(&mut self.s, "continue"));
-                try!(space(&mut self.s));
+                word(&mut self.s, "continue")?;
+                space(&mut self.s)?;
                 if let Some(ident) = opt_ident {
-                    try!(self.print_ident(ident.node));
-                    try!(space(&mut self.s))
+                    self.print_ident(ident.node)?;
+                    space(&mut self.s)?
                 }
             }
             ast::ExprKind::Ret(ref result) => {
-                try!(word(&mut self.s, "return"));
+                word(&mut self.s, "return")?;
                 match *result {
                     Some(ref expr) => {
-                        try!(word(&mut self.s, " "));
-                        try!(self.print_expr(&expr));
+                        word(&mut self.s, " ")?;
+                        self.print_expr(&expr)?;
                     }
                     _ => ()
                 }
             }
             ast::ExprKind::InlineAsm(ref a) => {
-                try!(word(&mut self.s, "asm!"));
-                try!(self.popen());
-                try!(self.print_string(&a.asm, a.asm_str_style));
-                try!(self.word_space(":"));
+                word(&mut self.s, "asm!")?;
+                self.popen()?;
+                self.print_string(&a.asm, a.asm_str_style)?;
+                self.word_space(":")?;
 
-                try!(self.commasep(Inconsistent, &a.outputs,
+                self.commasep(Inconsistent, &a.outputs,
                                    |s, out| {
-                    match out.constraint.slice_shift_char() {
-                        Some(('=', operand)) if out.is_rw => {
-                            try!(s.print_string(&format!("+{}", operand),
-                                                ast::StrStyle::Cooked))
+                    let mut ch = out.constraint.chars();
+                    match ch.next() {
+                        Some('=') if out.is_rw => {
+                            s.print_string(&format!("+{}", ch.as_str()),
+                                           ast::StrStyle::Cooked)?
                         }
-                        _ => try!(s.print_string(&out.constraint, ast::StrStyle::Cooked))
+                        _ => s.print_string(&out.constraint,
+                                            ast::StrStyle::Cooked)?
                     }
-                    try!(s.popen());
-                    try!(s.print_expr(&out.expr));
-                    try!(s.pclose());
+                    s.popen()?;
+                    s.print_expr(&out.expr)?;
+                    s.pclose()?;
                     Ok(())
-                }));
-                try!(space(&mut self.s));
-                try!(self.word_space(":"));
+                })?;
+                space(&mut self.s)?;
+                self.word_space(":")?;
 
-                try!(self.commasep(Inconsistent, &a.inputs,
+                self.commasep(Inconsistent, &a.inputs,
                                    |s, &(ref co, ref o)| {
-                    try!(s.print_string(&co, ast::StrStyle::Cooked));
-                    try!(s.popen());
-                    try!(s.print_expr(&o));
-                    try!(s.pclose());
+                    s.print_string(&co, ast::StrStyle::Cooked)?;
+                    s.popen()?;
+                    s.print_expr(&o)?;
+                    s.pclose()?;
                     Ok(())
-                }));
-                try!(space(&mut self.s));
-                try!(self.word_space(":"));
+                })?;
+                space(&mut self.s)?;
+                self.word_space(":")?;
 
-                try!(self.commasep(Inconsistent, &a.clobbers,
+                self.commasep(Inconsistent, &a.clobbers,
                                    |s, co| {
-                    try!(s.print_string(&co, ast::StrStyle::Cooked));
+                    s.print_string(&co, ast::StrStyle::Cooked)?;
                     Ok(())
-                }));
+                })?;
 
                 let mut options = vec!();
                 if a.volatile {
@@ -2256,54 +2255,58 @@ impl<'a> State<'a> {
                 }
 
                 if !options.is_empty() {
-                    try!(space(&mut self.s));
-                    try!(self.word_space(":"));
-                    try!(self.commasep(Inconsistent, &options,
-                                       |s, &co| {
-                        try!(s.print_string(co, ast::StrStyle::Cooked));
-                        Ok(())
-                    }));
+                    space(&mut self.s)?;
+                    self.word_space(":")?;
+                    self.commasep(Inconsistent, &options,
+                                  |s, &co| {
+                                      s.print_string(co, ast::StrStyle::Cooked)?;
+                                      Ok(())
+                                  })?;
                 }
 
-                try!(self.pclose());
+                self.pclose()?;
             }
-            ast::ExprKind::Mac(ref m) => try!(self.print_mac(m, token::Paren)),
+            ast::ExprKind::Mac(ref m) => self.print_mac(m, token::Paren)?,
             ast::ExprKind::Paren(ref e) => {
-                try!(self.popen());
-                try!(self.print_inner_attributes_inline(attrs));
-                try!(self.print_expr(&e));
-                try!(self.pclose());
+                self.popen()?;
+                self.print_inner_attributes_inline(attrs)?;
+                self.print_expr(&e)?;
+                self.pclose()?;
+            },
+            ast::ExprKind::Try(ref e) => {
+                self.print_expr(e)?;
+                word(&mut self.s, "?")?
             }
         }
-        try!(self.ann.post(self, NodeExpr(expr)));
+        self.ann.post(self, NodeExpr(expr))?;
         self.end()
     }
 
     pub fn print_local_decl(&mut self, loc: &ast::Local) -> io::Result<()> {
-        try!(self.print_pat(&loc.pat));
+        self.print_pat(&loc.pat)?;
         if let Some(ref ty) = loc.ty {
-            try!(self.word_space(":"));
-            try!(self.print_type(&ty));
+            self.word_space(":")?;
+            self.print_type(&ty)?;
         }
         Ok(())
     }
 
     pub fn print_decl(&mut self, decl: &ast::Decl) -> io::Result<()> {
-        try!(self.maybe_print_comment(decl.span.lo));
+        self.maybe_print_comment(decl.span.lo)?;
         match decl.node {
             ast::DeclKind::Local(ref loc) => {
-                try!(self.print_outer_attributes(loc.attrs.as_attr_slice()));
-                try!(self.space_if_not_bol());
-                try!(self.ibox(INDENT_UNIT));
-                try!(self.word_nbsp("let"));
-
-                try!(self.ibox(INDENT_UNIT));
-                try!(self.print_local_decl(&loc));
-                try!(self.end());
+                self.print_outer_attributes(loc.attrs.as_attr_slice())?;
+                self.space_if_not_bol()?;
+                self.ibox(INDENT_UNIT)?;
+                self.word_nbsp("let")?;
+
+                self.ibox(INDENT_UNIT)?;
+                self.print_local_decl(&loc)?;
+                self.end()?;
                 if let Some(ref init) = loc.init {
-                    try!(self.nbsp());
-                    try!(self.word_space("="));
-                    try!(self.print_expr(&init));
+                    self.nbsp()?;
+                    self.word_space("=")?;
+                    self.print_expr(&init)?;
                 }
                 self.end()
             }
@@ -2312,7 +2315,7 @@ impl<'a> State<'a> {
     }
 
     pub fn print_ident(&mut self, ident: ast::Ident) -> io::Result<()> {
-        try!(word(&mut self.s, &ident.name.as_str()));
+        word(&mut self.s, &ident.name.as_str())?;
         self.ann.post(self, NodeIdent(&ident))
     }
 
@@ -2321,15 +2324,15 @@ impl<'a> State<'a> {
     }
 
     pub fn print_name(&mut self, name: ast::Name) -> io::Result<()> {
-        try!(word(&mut self.s, &name.as_str()));
+        word(&mut self.s, &name.as_str())?;
         self.ann.post(self, NodeName(&name))
     }
 
     pub fn print_for_decl(&mut self, loc: &ast::Local,
                           coll: &ast::Expr) -> io::Result<()> {
-        try!(self.print_local_decl(loc));
-        try!(space(&mut self.s));
-        try!(self.word_space("in"));
+        self.print_local_decl(loc)?;
+        space(&mut self.s)?;
+        self.word_space("in")?;
         self.print_expr(coll)
     }
 
@@ -2339,19 +2342,19 @@ impl<'a> State<'a> {
                   depth: usize)
                   -> io::Result<()>
     {
-        try!(self.maybe_print_comment(path.span.lo));
+        self.maybe_print_comment(path.span.lo)?;
 
         let mut first = !path.global;
         for segment in &path.segments[..path.segments.len()-depth] {
             if first {
                 first = false
             } else {
-                try!(word(&mut self.s, "::"))
+                word(&mut self.s, "::")?
             }
 
-            try!(self.print_ident(segment.identifier));
+            self.print_ident(segment.identifier)?;
 
-            try!(self.print_path_parameters(&segment.parameters, colons_before_params));
+            self.print_path_parameters(&segment.parameters, colons_before_params)?;
         }
 
         Ok(())
@@ -2363,18 +2366,18 @@ impl<'a> State<'a> {
                    colons_before_params: bool)
                    -> io::Result<()>
     {
-        try!(word(&mut self.s, "<"));
-        try!(self.print_type(&qself.ty));
+        word(&mut self.s, "<")?;
+        self.print_type(&qself.ty)?;
         if qself.position > 0 {
-            try!(space(&mut self.s));
-            try!(self.word_space("as"));
+            space(&mut self.s)?;
+            self.word_space("as")?;
             let depth = path.segments.len() - qself.position;
-            try!(self.print_path(&path, false, depth));
+            self.print_path(&path, false, depth)?;
         }
-        try!(word(&mut self.s, ">"));
-        try!(word(&mut self.s, "::"));
+        word(&mut self.s, ">")?;
+        word(&mut self.s, "::")?;
         let item_segment = path.segments.last().unwrap();
-        try!(self.print_ident(item_segment.identifier));
+        self.print_ident(item_segment.identifier)?;
         self.print_path_parameters(&item_segment.parameters, colons_before_params)
     }
 
@@ -2388,61 +2391,61 @@ impl<'a> State<'a> {
         }
 
         if colons_before_params {
-            try!(word(&mut self.s, "::"))
+            word(&mut self.s, "::")?
         }
 
         match *parameters {
             ast::PathParameters::AngleBracketed(ref data) => {
-                try!(word(&mut self.s, "<"));
+                word(&mut self.s, "<")?;
 
                 let mut comma = false;
                 for lifetime in &data.lifetimes {
                     if comma {
-                        try!(self.word_space(","))
+                        self.word_space(",")?
                     }
-                    try!(self.print_lifetime(lifetime));
+                    self.print_lifetime(lifetime)?;
                     comma = true;
                 }
 
                 if !data.types.is_empty() {
                     if comma {
-                        try!(self.word_space(","))
+                        self.word_space(",")?
                     }
-                    try!(self.commasep(
+                    self.commasep(
                         Inconsistent,
                         &data.types,
-                        |s, ty| s.print_type(&ty)));
+                        |s, ty| s.print_type(&ty))?;
                         comma = true;
                 }
 
                 for binding in data.bindings.iter() {
                     if comma {
-                        try!(self.word_space(","))
+                        self.word_space(",")?
                     }
-                    try!(self.print_ident(binding.ident));
-                    try!(space(&mut self.s));
-                    try!(self.word_space("="));
-                    try!(self.print_type(&binding.ty));
+                    self.print_ident(binding.ident)?;
+                    space(&mut self.s)?;
+                    self.word_space("=")?;
+                    self.print_type(&binding.ty)?;
                     comma = true;
                 }
 
-                try!(word(&mut self.s, ">"))
+                word(&mut self.s, ">")?
             }
 
             ast::PathParameters::Parenthesized(ref data) => {
-                try!(word(&mut self.s, "("));
-                try!(self.commasep(
+                word(&mut self.s, "(")?;
+                self.commasep(
                     Inconsistent,
                     &data.inputs,
-                    |s, ty| s.print_type(&ty)));
-                try!(word(&mut self.s, ")"));
+                    |s, ty| s.print_type(&ty))?;
+                word(&mut self.s, ")")?;
 
                 match data.output {
                     None => { }
                     Some(ref ty) => {
-                        try!(self.space_if_not_bol());
-                        try!(self.word_space("->"));
-                        try!(self.print_type(&ty));
+                        self.space_if_not_bol()?;
+                        self.word_space("->")?;
+                        self.print_type(&ty)?;
                     }
                 }
             }
@@ -2452,120 +2455,120 @@ impl<'a> State<'a> {
     }
 
     pub fn print_pat(&mut self, pat: &ast::Pat) -> io::Result<()> {
-        try!(self.maybe_print_comment(pat.span.lo));
-        try!(self.ann.pre(self, NodePat(pat)));
+        self.maybe_print_comment(pat.span.lo)?;
+        self.ann.pre(self, NodePat(pat))?;
         /* Pat isn't normalized, but the beauty of it
          is that it doesn't matter */
         match pat.node {
-            PatKind::Wild => try!(word(&mut self.s, "_")),
+            PatKind::Wild => word(&mut self.s, "_")?,
             PatKind::Ident(binding_mode, ref path1, ref sub) => {
                 match binding_mode {
                     ast::BindingMode::ByRef(mutbl) => {
-                        try!(self.word_nbsp("ref"));
-                        try!(self.print_mutability(mutbl));
+                        self.word_nbsp("ref")?;
+                        self.print_mutability(mutbl)?;
                     }
                     ast::BindingMode::ByValue(ast::Mutability::Immutable) => {}
                     ast::BindingMode::ByValue(ast::Mutability::Mutable) => {
-                        try!(self.word_nbsp("mut"));
+                        self.word_nbsp("mut")?;
                     }
                 }
-                try!(self.print_ident(path1.node));
+                self.print_ident(path1.node)?;
                 match *sub {
                     Some(ref p) => {
-                        try!(word(&mut self.s, "@"));
-                        try!(self.print_pat(&p));
+                        word(&mut self.s, "@")?;
+                        self.print_pat(&p)?;
                     }
                     None => ()
                 }
             }
             PatKind::TupleStruct(ref path, ref args_) => {
-                try!(self.print_path(path, true, 0));
+                self.print_path(path, true, 0)?;
                 match *args_ {
-                    None => try!(word(&mut self.s, "(..)")),
+                    None => word(&mut self.s, "(..)")?,
                     Some(ref args) => {
-                        try!(self.popen());
-                        try!(self.commasep(Inconsistent, &args[..],
-                                          |s, p| s.print_pat(&p)));
-                        try!(self.pclose());
+                        self.popen()?;
+                        self.commasep(Inconsistent, &args[..],
+                                          |s, p| s.print_pat(&p))?;
+                        self.pclose()?;
                     }
                 }
             }
             PatKind::Path(ref path) => {
-                try!(self.print_path(path, true, 0));
+                self.print_path(path, true, 0)?;
             }
             PatKind::QPath(ref qself, ref path) => {
-                try!(self.print_qpath(path, qself, false));
+                self.print_qpath(path, qself, false)?;
             }
             PatKind::Struct(ref path, ref fields, etc) => {
-                try!(self.print_path(path, true, 0));
-                try!(self.nbsp());
-                try!(self.word_space("{"));
-                try!(self.commasep_cmnt(
+                self.print_path(path, true, 0)?;
+                self.nbsp()?;
+                self.word_space("{")?;
+                self.commasep_cmnt(
                     Consistent, &fields[..],
                     |s, f| {
-                        try!(s.cbox(INDENT_UNIT));
+                        s.cbox(INDENT_UNIT)?;
                         if !f.node.is_shorthand {
-                            try!(s.print_ident(f.node.ident));
-                            try!(s.word_nbsp(":"));
+                            s.print_ident(f.node.ident)?;
+                            s.word_nbsp(":")?;
                         }
-                        try!(s.print_pat(&f.node.pat));
+                        s.print_pat(&f.node.pat)?;
                         s.end()
                     },
-                    |f| f.node.pat.span));
+                    |f| f.node.pat.span)?;
                 if etc {
-                    if !fields.is_empty() { try!(self.word_space(",")); }
-                    try!(word(&mut self.s, ".."));
+                    if !fields.is_empty() { self.word_space(",")?; }
+                    word(&mut self.s, "..")?;
                 }
-                try!(space(&mut self.s));
-                try!(word(&mut self.s, "}"));
+                space(&mut self.s)?;
+                word(&mut self.s, "}")?;
             }
             PatKind::Tup(ref elts) => {
-                try!(self.popen());
-                try!(self.commasep(Inconsistent,
+                self.popen()?;
+                self.commasep(Inconsistent,
                                    &elts[..],
-                                   |s, p| s.print_pat(&p)));
+                                   |s, p| s.print_pat(&p))?;
                 if elts.len() == 1 {
-                    try!(word(&mut self.s, ","));
+                    word(&mut self.s, ",")?;
                 }
-                try!(self.pclose());
+                self.pclose()?;
             }
             PatKind::Box(ref inner) => {
-                try!(word(&mut self.s, "box "));
-                try!(self.print_pat(&inner));
+                word(&mut self.s, "box ")?;
+                self.print_pat(&inner)?;
             }
             PatKind::Ref(ref inner, mutbl) => {
-                try!(word(&mut self.s, "&"));
+                word(&mut self.s, "&")?;
                 if mutbl == ast::Mutability::Mutable {
-                    try!(word(&mut self.s, "mut "));
+                    word(&mut self.s, "mut ")?;
                 }
-                try!(self.print_pat(&inner));
+                self.print_pat(&inner)?;
             }
-            PatKind::Lit(ref e) => try!(self.print_expr(&**e)),
+            PatKind::Lit(ref e) => self.print_expr(&**e)?,
             PatKind::Range(ref begin, ref end) => {
-                try!(self.print_expr(&begin));
-                try!(space(&mut self.s));
-                try!(word(&mut self.s, "..."));
-                try!(self.print_expr(&end));
+                self.print_expr(&begin)?;
+                space(&mut self.s)?;
+                word(&mut self.s, "...")?;
+                self.print_expr(&end)?;
             }
             PatKind::Vec(ref before, ref slice, ref after) => {
-                try!(word(&mut self.s, "["));
-                try!(self.commasep(Inconsistent,
+                word(&mut self.s, "[")?;
+                self.commasep(Inconsistent,
                                    &before[..],
-                                   |s, p| s.print_pat(&p)));
+                                   |s, p| s.print_pat(&p))?;
                 if let Some(ref p) = *slice {
-                    if !before.is_empty() { try!(self.word_space(",")); }
+                    if !before.is_empty() { self.word_space(",")?; }
                     if p.node != PatKind::Wild {
-                        try!(self.print_pat(&p));
+                        self.print_pat(&p)?;
                     }
-                    try!(word(&mut self.s, ".."));
-                    if !after.is_empty() { try!(self.word_space(",")); }
+                    word(&mut self.s, "..")?;
+                    if !after.is_empty() { self.word_space(",")?; }
                 }
-                try!(self.commasep(Inconsistent,
+                self.commasep(Inconsistent,
                                    &after[..],
-                                   |s, p| s.print_pat(&p)));
-                try!(word(&mut self.s, "]"));
+                                   |s, p| s.print_pat(&p))?;
+                word(&mut self.s, "]")?;
             }
-            PatKind::Mac(ref m) => try!(self.print_mac(m, token::Paren)),
+            PatKind::Mac(ref m) => self.print_mac(m, token::Paren)?,
         }
         self.ann.post(self, NodePat(pat))
     }
@@ -2574,43 +2577,43 @@ impl<'a> State<'a> {
         // I have no idea why this check is necessary, but here it
         // is :(
         if arm.attrs.is_empty() {
-            try!(space(&mut self.s));
+            space(&mut self.s)?;
         }
-        try!(self.cbox(INDENT_UNIT));
-        try!(self.ibox(0));
-        try!(self.print_outer_attributes(&arm.attrs));
+        self.cbox(INDENT_UNIT)?;
+        self.ibox(0)?;
+        self.print_outer_attributes(&arm.attrs)?;
         let mut first = true;
         for p in &arm.pats {
             if first {
                 first = false;
             } else {
-                try!(space(&mut self.s));
-                try!(self.word_space("|"));
+                space(&mut self.s)?;
+                self.word_space("|")?;
             }
-            try!(self.print_pat(&p));
+            self.print_pat(&p)?;
         }
-        try!(space(&mut self.s));
+        space(&mut self.s)?;
         if let Some(ref e) = arm.guard {
-            try!(self.word_space("if"));
-            try!(self.print_expr(&e));
-            try!(space(&mut self.s));
+            self.word_space("if")?;
+            self.print_expr(&e)?;
+            space(&mut self.s)?;
         }
-        try!(self.word_space("=>"));
+        self.word_space("=>")?;
 
         match arm.body.node {
             ast::ExprKind::Block(ref blk) => {
                 // the block will close the pattern's ibox
-                try!(self.print_block_unclosed_indent(&blk, INDENT_UNIT));
+                self.print_block_unclosed_indent(&blk, INDENT_UNIT)?;
 
                 // If it is a user-provided unsafe block, print a comma after it
                 if let BlockCheckMode::Unsafe(ast::UserProvided) = blk.rules {
-                    try!(word(&mut self.s, ","));
+                    word(&mut self.s, ",")?;
                 }
             }
             _ => {
-                try!(self.end()); // close the ibox for the pattern
-                try!(self.print_expr(&arm.body));
-                try!(word(&mut self.s, ","));
+                self.end()?; // close the ibox for the pattern
+                self.print_expr(&arm.body)?;
+                word(&mut self.s, ",")?;
             }
         }
         self.end() // close enclosing cbox
@@ -2620,22 +2623,22 @@ impl<'a> State<'a> {
     fn print_explicit_self(&mut self,
                            explicit_self: &ast::SelfKind,
                            mutbl: ast::Mutability) -> io::Result<bool> {
-        try!(self.print_mutability(mutbl));
+        self.print_mutability(mutbl)?;
         match *explicit_self {
             ast::SelfKind::Static => { return Ok(false); }
             ast::SelfKind::Value(_) => {
-                try!(word(&mut self.s, "self"));
+                word(&mut self.s, "self")?;
             }
             ast::SelfKind::Region(ref lt, m, _) => {
-                try!(word(&mut self.s, "&"));
-                try!(self.print_opt_lifetime(lt));
-                try!(self.print_mutability(m));
-                try!(word(&mut self.s, "self"));
+                word(&mut self.s, "&")?;
+                self.print_opt_lifetime(lt)?;
+                self.print_mutability(m)?;
+                word(&mut self.s, "self")?;
             }
             ast::SelfKind::Explicit(ref typ, _) => {
-                try!(word(&mut self.s, "self"));
-                try!(self.word_space(":"));
-                try!(self.print_type(&typ));
+                word(&mut self.s, "self")?;
+                self.word_space(":")?;
+                self.print_type(&typ)?;
             }
         }
         return Ok(true);
@@ -2649,15 +2652,15 @@ impl<'a> State<'a> {
                     name: Option<ast::Ident>,
                     generics: &ast::Generics,
                     opt_explicit_self: Option<&ast::SelfKind>,
-                    vis: ast::Visibility) -> io::Result<()> {
-        try!(self.print_fn_header_info(unsafety, constness, abi, vis));
+                    vis: &ast::Visibility) -> io::Result<()> {
+        self.print_fn_header_info(unsafety, constness, abi, vis)?;
 
         if let Some(name) = name {
-            try!(self.nbsp());
-            try!(self.print_ident(name));
+            self.nbsp()?;
+            self.print_ident(name)?;
         }
-        try!(self.print_generics(generics));
-        try!(self.print_fn_args_and_ret(decl, opt_explicit_self));
+        self.print_generics(generics)?;
+        self.print_fn_args_and_ret(decl, opt_explicit_self)?;
         self.print_where_clause(&generics.where_clause)
     }
 
@@ -2666,7 +2669,7 @@ impl<'a> State<'a> {
                          is_closure: bool) -> io::Result<()> {
         // It is unfortunate to duplicate the commasep logic, but we want the
         // self type and the args all in the same box.
-        try!(self.rbox(0, Inconsistent));
+        self.rbox(0, Inconsistent)?;
         let mut first = true;
         if let Some(explicit_self) = opt_explicit_self {
             let m = match *explicit_self {
@@ -2676,7 +2679,7 @@ impl<'a> State<'a> {
                     _ => ast::Mutability::Immutable
                 }
             };
-            first = !try!(self.print_explicit_self(explicit_self, m));
+            first = !self.print_explicit_self(explicit_self, m)?;
         }
 
         // HACK(eddyb) ignore the separately printed self argument.
@@ -2687,8 +2690,8 @@ impl<'a> State<'a> {
         };
 
         for arg in args {
-            if first { first = false; } else { try!(self.word_space(",")); }
-            try!(self.print_arg(arg, is_closure));
+            if first { first = false; } else { self.word_space(",")?; }
+            self.print_arg(arg, is_closure)?;
         }
 
         self.end()
@@ -2697,12 +2700,12 @@ impl<'a> State<'a> {
     pub fn print_fn_args_and_ret(&mut self, decl: &ast::FnDecl,
                                  opt_explicit_self: Option<&ast::SelfKind>)
         -> io::Result<()> {
-        try!(self.popen());
-        try!(self.print_fn_args(decl, opt_explicit_self, false));
+        self.popen()?;
+        self.print_fn_args(decl, opt_explicit_self, false)?;
         if decl.variadic {
-            try!(word(&mut self.s, ", ..."));
+            word(&mut self.s, ", ...")?;
         }
-        try!(self.pclose());
+        self.pclose()?;
 
         self.print_fn_output(decl)
     }
@@ -2711,24 +2714,24 @@ impl<'a> State<'a> {
             &mut self,
             decl: &ast::FnDecl)
             -> io::Result<()> {
-        try!(word(&mut self.s, "|"));
-        try!(self.print_fn_args(decl, None, true));
-        try!(word(&mut self.s, "|"));
+        word(&mut self.s, "|")?;
+        self.print_fn_args(decl, None, true)?;
+        word(&mut self.s, "|")?;
 
         if let ast::FunctionRetTy::Default(..) = decl.output {
             return Ok(());
         }
 
-        try!(self.space_if_not_bol());
-        try!(self.word_space("->"));
+        self.space_if_not_bol()?;
+        self.word_space("->")?;
         match decl.output {
             ast::FunctionRetTy::Ty(ref ty) => {
-                try!(self.print_type(&ty));
+                self.print_type(&ty)?;
                 self.maybe_print_comment(ty.span.lo)
             }
             ast::FunctionRetTy::Default(..) => unreachable!(),
             ast::FunctionRetTy::None(span) => {
-                try!(self.word_nbsp("!"));
+                self.word_nbsp("!")?;
                 self.maybe_print_comment(span.lo)
             }
         }
@@ -2747,28 +2750,28 @@ impl<'a> State<'a> {
                         bounds: &[ast::TyParamBound])
                         -> io::Result<()> {
         if !bounds.is_empty() {
-            try!(word(&mut self.s, prefix));
+            word(&mut self.s, prefix)?;
             let mut first = true;
             for bound in bounds {
-                try!(self.nbsp());
+                self.nbsp()?;
                 if first {
                     first = false;
                 } else {
-                    try!(self.word_space("+"));
+                    self.word_space("+")?;
                 }
 
-                try!(match *bound {
+                match *bound {
                     TraitTyParamBound(ref tref, TraitBoundModifier::None) => {
                         self.print_poly_trait_ref(tref)
                     }
                     TraitTyParamBound(ref tref, TraitBoundModifier::Maybe) => {
-                        try!(word(&mut self.s, "?"));
+                        word(&mut self.s, "?")?;
                         self.print_poly_trait_ref(tref)
                     }
                     RegionTyParamBound(ref lt) => {
                         self.print_lifetime(lt)
                     }
-                })
+                }?
             }
             Ok(())
         } else {
@@ -2787,11 +2790,11 @@ impl<'a> State<'a> {
                               lifetime: &ast::LifetimeDef)
                               -> io::Result<()>
     {
-        try!(self.print_lifetime(&lifetime.lifetime));
+        self.print_lifetime(&lifetime.lifetime)?;
         let mut sep = ":";
         for v in &lifetime.bounds {
-            try!(word(&mut self.s, sep));
-            try!(self.print_lifetime(v));
+            word(&mut self.s, sep)?;
+            self.print_lifetime(v)?;
             sep = "+";
         }
         Ok(())
@@ -2806,14 +2809,14 @@ impl<'a> State<'a> {
             return Ok(());
         }
 
-        try!(word(&mut self.s, "<"));
+        word(&mut self.s, "<")?;
 
         let mut ints = Vec::new();
         for i in 0..total {
             ints.push(i);
         }
 
-        try!(self.commasep(Inconsistent, &ints[..], |s, &idx| {
+        self.commasep(Inconsistent, &ints[..], |s, &idx| {
             if idx < generics.lifetimes.len() {
                 let lifetime = &generics.lifetimes[idx];
                 s.print_lifetime_def(lifetime)
@@ -2822,19 +2825,19 @@ impl<'a> State<'a> {
                 let param = &generics.ty_params[idx];
                 s.print_ty_param(param)
             }
-        }));
+        })?;
 
-        try!(word(&mut self.s, ">"));
+        word(&mut self.s, ">")?;
         Ok(())
     }
 
     pub fn print_ty_param(&mut self, param: &ast::TyParam) -> io::Result<()> {
-        try!(self.print_ident(param.ident));
-        try!(self.print_bounds(":", &param.bounds));
+        self.print_ident(param.ident)?;
+        self.print_bounds(":", &param.bounds)?;
         match param.default {
             Some(ref default) => {
-                try!(space(&mut self.s));
-                try!(self.word_space("="));
+                space(&mut self.s)?;
+                self.word_space("=")?;
                 self.print_type(&default)
             }
             _ => Ok(())
@@ -2847,12 +2850,12 @@ impl<'a> State<'a> {
             return Ok(())
         }
 
-        try!(space(&mut self.s));
-        try!(self.word_space("where"));
+        space(&mut self.s)?;
+        self.word_space("where")?;
 
         for (i, predicate) in where_clause.predicates.iter().enumerate() {
             if i != 0 {
-                try!(self.word_space(","));
+                self.word_space(",")?;
             }
 
             match *predicate {
@@ -2860,29 +2863,29 @@ impl<'a> State<'a> {
                                                                              ref bounded_ty,
                                                                              ref bounds,
                                                                              ..}) => {
-                    try!(self.print_formal_lifetime_list(bound_lifetimes));
-                    try!(self.print_type(&bounded_ty));
-                    try!(self.print_bounds(":", bounds));
+                    self.print_formal_lifetime_list(bound_lifetimes)?;
+                    self.print_type(&bounded_ty)?;
+                    self.print_bounds(":", bounds)?;
                 }
                 ast::WherePredicate::RegionPredicate(ast::WhereRegionPredicate{ref lifetime,
                                                                                ref bounds,
                                                                                ..}) => {
-                    try!(self.print_lifetime(lifetime));
-                    try!(word(&mut self.s, ":"));
+                    self.print_lifetime(lifetime)?;
+                    word(&mut self.s, ":")?;
 
                     for (i, bound) in bounds.iter().enumerate() {
-                        try!(self.print_lifetime(bound));
+                        self.print_lifetime(bound)?;
 
                         if i != 0 {
-                            try!(word(&mut self.s, ":"));
+                            word(&mut self.s, ":")?;
                         }
                     }
                 }
                 ast::WherePredicate::EqPredicate(ast::WhereEqPredicate{ref path, ref ty, ..}) => {
-                    try!(self.print_path(path, false, 0));
-                    try!(space(&mut self.s));
-                    try!(self.word_space("="));
-                    try!(self.print_type(&ty));
+                    self.print_path(path, false, 0)?;
+                    space(&mut self.s)?;
+                    self.word_space("=")?;
+                    self.print_type(&ty)?;
                 }
             }
         }
@@ -2893,52 +2896,52 @@ impl<'a> State<'a> {
     pub fn print_view_path(&mut self, vp: &ast::ViewPath) -> io::Result<()> {
         match vp.node {
             ast::ViewPathSimple(ident, ref path) => {
-                try!(self.print_path(path, false, 0));
+                self.print_path(path, false, 0)?;
 
                 if path.segments.last().unwrap().identifier.name !=
                         ident.name {
-                    try!(space(&mut self.s));
-                    try!(self.word_space("as"));
-                    try!(self.print_ident(ident));
+                    space(&mut self.s)?;
+                    self.word_space("as")?;
+                    self.print_ident(ident)?;
                 }
 
                 Ok(())
             }
 
             ast::ViewPathGlob(ref path) => {
-                try!(self.print_path(path, false, 0));
+                self.print_path(path, false, 0)?;
                 word(&mut self.s, "::*")
             }
 
             ast::ViewPathList(ref path, ref idents) => {
                 if path.segments.is_empty() {
-                    try!(word(&mut self.s, "{"));
+                    word(&mut self.s, "{")?;
                 } else {
-                    try!(self.print_path(path, false, 0));
-                    try!(word(&mut self.s, "::{"));
+                    self.print_path(path, false, 0)?;
+                    word(&mut self.s, "::{")?;
                 }
-                try!(self.commasep(Inconsistent, &idents[..], |s, w| {
+                self.commasep(Inconsistent, &idents[..], |s, w| {
                     match w.node {
                         ast::PathListItemKind::Ident { name, rename, .. } => {
-                            try!(s.print_ident(name));
+                            s.print_ident(name)?;
                             if let Some(ident) = rename {
-                                try!(space(&mut s.s));
-                                try!(s.word_space("as"));
-                                try!(s.print_ident(ident));
+                                space(&mut s.s)?;
+                                s.word_space("as")?;
+                                s.print_ident(ident)?;
                             }
                             Ok(())
                         },
                         ast::PathListItemKind::Mod { rename, .. } => {
-                            try!(word(&mut s.s, "self"));
+                            word(&mut s.s, "self")?;
                             if let Some(ident) = rename {
-                                try!(space(&mut s.s));
-                                try!(s.word_space("as"));
-                                try!(s.print_ident(ident));
+                                space(&mut s.s)?;
+                                s.word_space("as")?;
+                                s.print_ident(ident)?;
                             }
                             Ok(())
                         }
                     }
-                }));
+                })?;
                 word(&mut self.s, "}")
             }
         }
@@ -2953,14 +2956,14 @@ impl<'a> State<'a> {
     }
 
     pub fn print_mt(&mut self, mt: &ast::MutTy) -> io::Result<()> {
-        try!(self.print_mutability(mt.mutbl));
+        self.print_mutability(mt.mutbl)?;
         self.print_type(&mt.ty)
     }
 
     pub fn print_arg(&mut self, input: &ast::Arg, is_closure: bool) -> io::Result<()> {
-        try!(self.ibox(INDENT_UNIT));
+        self.ibox(INDENT_UNIT)?;
         match input.ty.node {
-            ast::TyKind::Infer if is_closure => try!(self.print_pat(&input.pat)),
+            ast::TyKind::Infer if is_closure => self.print_pat(&input.pat)?,
             _ => {
                 match input.pat.node {
                     PatKind::Ident(_, ref path1, _) if
@@ -2969,12 +2972,12 @@ impl<'a> State<'a> {
                         // Do nothing.
                     }
                     _ => {
-                        try!(self.print_pat(&input.pat));
-                        try!(word(&mut self.s, ":"));
-                        try!(space(&mut self.s));
+                        self.print_pat(&input.pat)?;
+                        word(&mut self.s, ":")?;
+                        space(&mut self.s)?;
                     }
                 }
-                try!(self.print_type(&input.ty));
+                self.print_type(&input.ty)?;
             }
         }
         self.end()
@@ -2985,17 +2988,17 @@ impl<'a> State<'a> {
             return Ok(());
         }
 
-        try!(self.space_if_not_bol());
-        try!(self.ibox(INDENT_UNIT));
-        try!(self.word_space("->"));
+        self.space_if_not_bol()?;
+        self.ibox(INDENT_UNIT)?;
+        self.word_space("->")?;
         match decl.output {
             ast::FunctionRetTy::None(_) =>
-                try!(self.word_nbsp("!")),
+                self.word_nbsp("!")?,
             ast::FunctionRetTy::Default(..) => unreachable!(),
             ast::FunctionRetTy::Ty(ref ty) =>
-                try!(self.print_type(&ty))
+                self.print_type(&ty)?
         }
-        try!(self.end());
+        self.end()?;
 
         match decl.output {
             ast::FunctionRetTy::Ty(ref output) => self.maybe_print_comment(output.span.lo),
@@ -3011,10 +3014,10 @@ impl<'a> State<'a> {
                        generics: &ast::Generics,
                        opt_explicit_self: Option<&ast::SelfKind>)
                        -> io::Result<()> {
-        try!(self.ibox(INDENT_UNIT));
+        self.ibox(INDENT_UNIT)?;
         if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() {
-            try!(word(&mut self.s, "for"));
-            try!(self.print_generics(generics));
+            word(&mut self.s, "for")?;
+            self.print_generics(generics)?;
         }
         let generics = ast::Generics {
             lifetimes: Vec::new(),
@@ -3024,14 +3027,14 @@ impl<'a> State<'a> {
                 predicates: Vec::new(),
             },
         };
-        try!(self.print_fn(decl,
-                           unsafety,
-                           ast::Constness::NotConst,
-                           abi,
-                           name,
-                           &generics,
-                           opt_explicit_self,
-                           ast::Visibility::Inherited));
+        self.print_fn(decl,
+                      unsafety,
+                      ast::Constness::NotConst,
+                      abi,
+                      name,
+                      &generics,
+                      opt_explicit_self,
+                      &ast::Visibility::Inherited)?;
         self.end()
     }
 
@@ -3051,7 +3054,7 @@ impl<'a> State<'a> {
                 match next_pos { None => (), Some(p) => next = p }
                 if span.hi < (*cmnt).pos && (*cmnt).pos < next &&
                     span_line.line == comment_line.line {
-                        try!(self.print_comment(cmnt));
+                        self.print_comment(cmnt)?;
                         self.cur_cmnt_and_lit.cur_cmnt += 1;
                     }
             }
@@ -3064,12 +3067,12 @@ impl<'a> State<'a> {
         // If there aren't any remaining comments, then we need to manually
         // make sure there is a line break at the end.
         if self.next_comment().is_none() {
-            try!(hardbreak(&mut self.s));
+            hardbreak(&mut self.s)?;
         }
         loop {
             match self.next_comment() {
                 Some(ref cmnt) => {
-                    try!(self.print_comment(cmnt));
+                    self.print_comment(cmnt)?;
                     self.cur_cmnt_and_lit.cur_cmnt += 1;
                 }
                 _ => break
@@ -3084,7 +3087,7 @@ impl<'a> State<'a> {
         match opt_abi {
             Some(Abi::Rust) => Ok(()),
             Some(abi) => {
-                try!(self.word_nbsp("extern"));
+                self.word_nbsp("extern")?;
                 self.word_nbsp(&abi.to_string())
             }
             None => Ok(())
@@ -3095,7 +3098,7 @@ impl<'a> State<'a> {
                                 opt_abi: Option<Abi>) -> io::Result<()> {
         match opt_abi {
             Some(abi) => {
-                try!(self.word_nbsp("extern"));
+                self.word_nbsp("extern")?;
                 self.word_nbsp(&abi.to_string())
             }
             None => Ok(())
@@ -3106,19 +3109,19 @@ impl<'a> State<'a> {
                                 unsafety: ast::Unsafety,
                                 constness: ast::Constness,
                                 abi: Abi,
-                                vis: ast::Visibility) -> io::Result<()> {
-        try!(word(&mut self.s, &visibility_qualified(vis, "")));
+                                vis: &ast::Visibility) -> io::Result<()> {
+        word(&mut self.s, &visibility_qualified(vis, ""))?;
 
         match constness {
             ast::Constness::NotConst => {}
-            ast::Constness::Const => try!(self.word_nbsp("const"))
+            ast::Constness::Const => self.word_nbsp("const")?
         }
 
-        try!(self.print_unsafety(unsafety));
+        self.print_unsafety(unsafety)?;
 
         if abi != Abi::Rust {
-            try!(self.word_nbsp("extern"));
-            try!(self.word_nbsp(&abi.to_string()));
+            self.word_nbsp("extern")?;
+            self.word_nbsp(&abi.to_string())?;
         }
 
         word(&mut self.s, "fn")
index 81b702e794d7737ef08f3652c231ba6f174e577c..703b1611540c89272bc9c8b82d4aee63f1ef69f3 100644 (file)
@@ -18,7 +18,6 @@ use std::iter;
 use std::slice;
 use std::mem;
 use std::vec;
-use ast_util::*;
 use attr::AttrMetaMethods;
 use attr;
 use codemap::{DUMMY_SP, Span, ExpnInfo, NameAndSpan, MacroAttribute};
@@ -35,7 +34,7 @@ use fold;
 use parse::token::{intern, InternedString};
 use parse::{token, ParseSess};
 use print::pprust;
-use {ast, ast_util};
+use ast;
 use ptr::P;
 use util::small_vector::SmallVector;
 
@@ -120,8 +119,7 @@ impl<'a> fold::Folder for TestHarnessGenerator<'a> {
         if ident.name != token::special_idents::invalid.name {
             self.cx.path.push(ident);
         }
-        debug!("current path: {}",
-               ast_util::path_name_i(&self.cx.path));
+        debug!("current path: {}", path_name_i(&self.cx.path));
 
         let i = if is_test_fn(&self.cx, &i) || is_bench_fn(&self.cx, &i) {
             match i.node {
@@ -349,7 +347,6 @@ enum HasTestSignature {
     NotEvenAFunction,
 }
 
-
 fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
     let has_test_attr = attr::contains_name(&i.attrs, "test");
 
@@ -576,6 +573,11 @@ fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
     }
 }
 
+fn path_name_i(idents: &[ast::Ident]) -> String {
+    // FIXME: Bad copies (#2543 -- same for everything else that says "bad")
+    idents.iter().map(|i| i.to_string()).collect::<Vec<String>>().join("::")
+}
+
 fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
     // The vector of test_descs for this crate
     let test_descs = mk_test_descs(cx);
@@ -645,10 +647,10 @@ fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> P<ast::Expr> {
     // creates $name: $expr
     let field = |name, expr| ecx.field_imm(span, ecx.ident_of(name), expr);
 
-    debug!("encoding {}", ast_util::path_name_i(&path[..]));
+    debug!("encoding {}", path_name_i(&path[..]));
 
     // path to the #[test] function: "foo::bar::baz"
-    let path_string = ast_util::path_name_i(&path[..]);
+    let path_string = path_name_i(&path[..]);
     let name_expr = ecx.expr_str(span, token::intern_and_get_ident(&path_string[..]));
 
     // self::test::StaticTestName($name_expr)
index 6fb81bb6a768421d89397ac8d652125619f5ba27..df4eb1c9ed7d8053ac5382f817c502e42d4d608c 100644 (file)
@@ -61,6 +61,8 @@ pub enum AssocOp {
     As,
     /// `..` range
     DotDot,
+    /// `...` range
+    DotDotDot,
     /// `:`
     Colon,
 }
@@ -102,6 +104,7 @@ impl AssocOp {
             Token::AndAnd => Some(LAnd),
             Token::OrOr => Some(LOr),
             Token::DotDot => Some(DotDot),
+            Token::DotDotDot => Some(DotDotDot),
             Token::Colon => Some(Colon),
             _ if t.is_keyword(keywords::As) => Some(As),
             _ => None
@@ -147,7 +150,7 @@ impl AssocOp {
             Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual => 7,
             LAnd => 6,
             LOr => 5,
-            DotDot => 4,
+            DotDot | DotDotDot => 4,
             Inplace => 3,
             Assign | AssignOp(_) => 2,
         }
@@ -162,7 +165,7 @@ impl AssocOp {
             As | Multiply | Divide | Modulus | Add | Subtract | ShiftLeft | ShiftRight | BitAnd |
             BitXor | BitOr | Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual |
             LAnd | LOr | Colon => Fixity::Left,
-            DotDot => Fixity::None
+            DotDot | DotDotDot => Fixity::None
         }
     }
 
@@ -171,7 +174,8 @@ impl AssocOp {
         match *self {
             Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual => true,
             Inplace | Assign | AssignOp(_) | As | Multiply | Divide | Modulus | Add | Subtract |
-            ShiftLeft | ShiftRight | BitAnd | BitXor | BitOr | LAnd | LOr | DotDot | Colon => false
+            ShiftLeft | ShiftRight | BitAnd | BitXor | BitOr | LAnd | LOr |
+            DotDot | DotDotDot | Colon => false
         }
     }
 
@@ -181,7 +185,7 @@ impl AssocOp {
             Assign | AssignOp(_) | Inplace => true,
             Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual | As | Multiply | Divide |
             Modulus | Add | Subtract | ShiftLeft | ShiftRight | BitAnd | BitXor | BitOr | LAnd |
-            LOr | DotDot | Colon => false
+            LOr | DotDot | DotDotDot | Colon => false
         }
     }
 
@@ -206,7 +210,7 @@ impl AssocOp {
             BitOr => Some(BinOpKind::BitOr),
             LAnd => Some(BinOpKind::And),
             LOr => Some(BinOpKind::Or),
-            Inplace | Assign | AssignOp(_) | As | DotDot | Colon => None
+            Inplace | Assign | AssignOp(_) | As | DotDot | DotDotDot | Colon => None
         }
     }
 }
index b0f4c2dcba508bdaded40d320aeb835a9338531e..8358af69b6666dc0a5b7f2c85ba1863c12b6113a 100644 (file)
 
 use ast;
 use parse::{ParseSess,PResult,filemap_to_tts};
-use parse::new_parser_from_source_str;
+use parse::{lexer, new_parser_from_source_str};
 use parse::parser::Parser;
 use parse::token;
 use ptr::P;
-use str::char_at;
+use std::iter::Peekable;
 
 /// Map a string to tts, using a made-up filename:
 pub fn string_to_tts(source_str: String) -> Vec<ast::TokenTree> {
@@ -87,69 +87,62 @@ pub fn strs_to_idents(ids: Vec<&str> ) -> Vec<ast::Ident> {
 
 /// Does the given string match the pattern? whitespace in the first string
 /// may be deleted or replaced with other whitespace to match the pattern.
-/// this function is Unicode-ignorant; fortunately, the careful design of
-/// UTF-8 mitigates this ignorance.  In particular, this function only collapses
-/// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate Unicode
-/// chars. Unsurprisingly, it doesn't do NKF-normalization(?).
+/// This function is relatively Unicode-ignorant; fortunately, the careful design
+/// of UTF-8 mitigates this ignorance. It doesn't do NKF-normalization(?).
 pub fn matches_codepattern(a : &str, b : &str) -> bool {
-    let mut idx_a = 0;
-    let mut idx_b = 0;
+    let mut a_iter = a.chars().peekable();
+    let mut b_iter = b.chars().peekable();
+
     loop {
-        if idx_a == a.len() && idx_b == b.len() {
-            return true;
-        }
-        else if idx_a == a.len() {return false;}
-        else if idx_b == b.len() {
-            // maybe the stuff left in a is all ws?
-            if is_whitespace(char_at(a, idx_a)) {
-                return scan_for_non_ws_or_end(a,idx_a) == a.len();
-            } else {
-                return false;
+        let (a, b) = match (a_iter.peek(), b_iter.peek()) {
+            (None, None) => return true,
+            (None, _) => return false,
+            (Some(&a), None) => {
+                if is_pattern_whitespace(a) {
+                    break // trailing whitespace check is out of loop for borrowck
+                } else {
+                    return false
+                }
             }
-        }
-        // ws in both given and pattern:
-        else if is_whitespace(char_at(a, idx_a))
-           && is_whitespace(char_at(b, idx_b)) {
-            idx_a = scan_for_non_ws_or_end(a,idx_a);
-            idx_b = scan_for_non_ws_or_end(b,idx_b);
-        }
-        // ws in given only:
-        else if is_whitespace(char_at(a, idx_a)) {
-            idx_a = scan_for_non_ws_or_end(a,idx_a);
-        }
-        // *don't* silently eat ws in expected only.
-        else if char_at(a, idx_a) == char_at(b, idx_b) {
-            idx_a += 1;
-            idx_b += 1;
-        }
-        else {
-            return false;
+            (Some(&a), Some(&b)) => (a, b)
+        };
+
+        if is_pattern_whitespace(a) && is_pattern_whitespace(b) {
+            // skip whitespace for a and b
+            scan_for_non_ws_or_end(&mut a_iter);
+            scan_for_non_ws_or_end(&mut b_iter);
+        } else if is_pattern_whitespace(a) {
+            // skip whitespace for a
+            scan_for_non_ws_or_end(&mut a_iter);
+        } else if a == b {
+            a_iter.next();
+            b_iter.next();
+        } else {
+            return false
         }
     }
+
+    // check if a has *only* trailing whitespace
+    a_iter.all(is_pattern_whitespace)
 }
 
-/// Given a string and an index, return the first usize >= idx
-/// that is a non-ws-char or is outside of the legal range of
-/// the string.
-fn scan_for_non_ws_or_end(a : &str, idx: usize) -> usize {
-    let mut i = idx;
-    let len = a.len();
-    while (i < len) && (is_whitespace(char_at(a, i))) {
-        i += 1;
+/// Advances the given peekable `Iterator` until it reaches a non-whitespace character
+fn scan_for_non_ws_or_end<I: Iterator<Item= char>>(iter: &mut Peekable<I>) {
+    while lexer::is_pattern_whitespace(iter.peek().cloned()) {
+        iter.next();
     }
-    i
 }
 
-/// Copied from lexer.
-pub fn is_whitespace(c: char) -> bool {
-    return c == ' ' || c == '\t' || c == '\r' || c == '\n';
+pub fn is_pattern_whitespace(c: char) -> bool {
+    lexer::is_pattern_whitespace(Some(c))
 }
 
 #[cfg(test)]
 mod tests {
     use super::*;
 
-    #[test] fn eqmodws() {
+    #[test]
+    fn eqmodws() {
         assert_eq!(matches_codepattern("",""),true);
         assert_eq!(matches_codepattern("","a"),false);
         assert_eq!(matches_codepattern("a",""),false);
@@ -160,5 +153,22 @@ mod tests {
         assert_eq!(matches_codepattern("a   b","a b"),true);
         assert_eq!(matches_codepattern("ab","a b"),false);
         assert_eq!(matches_codepattern("a   b","ab"),true);
+        assert_eq!(matches_codepattern(" a   b","ab"),true);
+    }
+
+    #[test]
+    fn pattern_whitespace() {
+        assert_eq!(matches_codepattern("","\x0C"), false);
+        assert_eq!(matches_codepattern("a b ","a   \u{0085}\n\t\r  b"),true);
+        assert_eq!(matches_codepattern("a b","a   \u{0085}\n\t\r  b "),false);
+    }
+
+    #[test]
+    fn non_pattern_whitespace() {
+        // These have the property 'White_Space' but not 'Pattern_White_Space'
+        assert_eq!(matches_codepattern("a b","a\u{2002}b"), false);
+        assert_eq!(matches_codepattern("a   b","a\u{2002}b"), false);
+        assert_eq!(matches_codepattern("\u{205F}a   b","ab"), false);
+        assert_eq!(matches_codepattern("a  \u{3000}b","ab"), false);
     }
 }
index f26610b8b8d14613ec1f237e168719f1840832e0..839bbf4805df20c27cacbebbcc209375d70e0e8f 100644 (file)
@@ -31,10 +31,10 @@ use codemap::Span;
 #[derive(Copy, Clone, PartialEq, Eq)]
 pub enum FnKind<'a> {
     /// fn foo() or extern "Abi" fn foo()
-    ItemFn(Ident, &'a Generics, Unsafety, Constness, Abi, Visibility),
+    ItemFn(Ident, &'a Generics, Unsafety, Constness, Abi, &'a Visibility),
 
     /// fn foo(&self)
-    Method(Ident, &'a MethodSig, Option<Visibility>),
+    Method(Ident, &'a MethodSig, Option<&'a Visibility>),
 
     /// |x, y| {}
     Closure,
@@ -129,6 +129,9 @@ pub trait Visitor<'v> : Sized {
     fn visit_macro_def(&mut self, macro_def: &'v MacroDef) {
         walk_macro_def(self, macro_def)
     }
+    fn visit_vis(&mut self, vis: &'v Visibility) {
+        walk_vis(self, vis)
+    }
 }
 
 #[macro_export]
@@ -260,7 +263,7 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) {
         }
         ItemKind::Fn(ref declaration, unsafety, constness, abi, ref generics, ref body) => {
             visitor.visit_fn(FnKind::ItemFn(item.ident, generics, unsafety,
-                                            constness, abi, item.vis),
+                                            constness, abi, &item.vis),
                              declaration,
                              body,
                              item.span,
@@ -546,7 +549,7 @@ pub fn walk_fn_kind<'v, V: Visitor<'v>>(visitor: &mut V,
         FnKind::ItemFn(_, generics, _, _, _, _) => {
             visitor.visit_generics(generics);
         }
-        FnKind::Method(_, sig, _) => {
+        FnKind::Method(_, ref sig, _) => {
             visitor.visit_generics(&sig.generics);
             visitor.visit_explicit_self(&sig.explicit_self);
         }
@@ -597,7 +600,7 @@ pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplIt
             visitor.visit_expr(expr);
         }
         ImplItemKind::Method(ref sig, ref body) => {
-            visitor.visit_fn(FnKind::Method(impl_item.ident, sig, Some(impl_item.vis)), &sig.decl,
+            visitor.visit_fn(FnKind::Method(impl_item.ident, sig, Some(&impl_item.vis)), &sig.decl,
                              body, impl_item.span, impl_item.id);
         }
         ImplItemKind::Type(ref ty) => {
@@ -616,9 +619,9 @@ pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V,
 
 pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V,
                                              struct_field: &'v StructField) {
-    walk_opt_ident(visitor, struct_field.span, struct_field.node.ident());
-    visitor.visit_ty(&struct_field.node.ty);
-    walk_list!(visitor, visit_attribute, &struct_field.node.attrs);
+    walk_opt_ident(visitor, struct_field.span, struct_field.ident);
+    visitor.visit_ty(&struct_field.ty);
+    walk_list!(visitor, visit_attribute, &struct_field.attrs);
 }
 
 pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block) {
@@ -763,7 +766,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) {
             visitor.visit_expr(main_expression);
             visitor.visit_expr(index_expression)
         }
-        ExprKind::Range(ref start, ref end) => {
+        ExprKind::Range(ref start, ref end, _) => {
             walk_list!(visitor, visit_expr, start);
             walk_list!(visitor, visit_expr, end);
         }
@@ -793,6 +796,9 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) {
                 visitor.visit_expr(&output.expr)
             }
         }
+        ExprKind::Try(ref subexpression) => {
+            visitor.visit_expr(subexpression)
+        }
     }
 
     visitor.visit_expr_post(expression)
@@ -804,3 +810,10 @@ pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm) {
     visitor.visit_expr(&arm.body);
     walk_list!(visitor, visit_attribute, &arm.attrs);
 }
+
+pub fn walk_vis<'v, V: Visitor<'v>>(visitor: &mut V, vis: &'v Visibility) {
+    match *vis {
+        Visibility::Restricted { ref path, id } => visitor.visit_path(path, id),
+        _ => {}
+    }
+}
index e137815cd32fd919e380403fe43624e49f303560..671f3e4a7e3302cf265464adb617a4096c77e840 100644 (file)
@@ -10,4 +10,5 @@ crate-type = ["dylib"]
 
 [dependencies]
 fmt_macros = { path = "../libfmt_macros" }
+log = { path = "../liblog" }
 syntax = { path = "../libsyntax" }
index b9ba1f107ad7aefa8867eb59e13d0d8a1111831f..50d2b9d31fe010b396ad1dff085d8307387bf455 100644 (file)
@@ -131,11 +131,12 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
                     // It's the opposite of '=&' which means that the memory
                     // cannot be shared with any other operand (usually when
                     // a register is clobbered early.)
-                    let output = match constraint.slice_shift_char() {
-                        Some(('=', _)) => None,
-                        Some(('+', operand)) => {
+                    let mut ch = constraint.chars();
+                    let output = match ch.next() {
+                        Some('=') => None,
+                        Some('+') => {
                             Some(token::intern_and_get_ident(&format!(
-                                        "={}", operand)))
+                                        "={}", ch.as_str())))
                         }
                         _ => {
                             cx.span_err(span, "output operand constraint lacks '=' or '+'");
@@ -146,7 +147,7 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
                     let is_rw = output.is_some();
                     let is_indirect = constraint.contains("*");
                     outputs.push(ast::InlineAsmOutput {
-                        constraint: output.unwrap_or(constraint),
+                        constraint: output.unwrap_or(constraint.clone()),
                         expr: out,
                         is_rw: is_rw,
                         is_indirect: is_indirect,
index 2fa847ee430d8746408e7e64ca3bb4ae39fdca68..74706c470872a0dbe767b34e79643fc90fdb67da 100644 (file)
@@ -11,7 +11,7 @@
 use deriving::generic::*;
 use deriving::generic::ty::*;
 
-use syntax::ast::{MetaItem, Expr, BinOpKind, self};
+use syntax::ast::{MetaItem, Expr, self};
 use syntax::codemap::Span;
 use syntax::ext::base::{ExtCtxt, Annotatable};
 use syntax::ext::build::AstBuilder;
@@ -64,7 +64,7 @@ pub fn ordering_collapsed(cx: &mut ExtCtxt,
 
 pub fn cs_cmp(cx: &mut ExtCtxt, span: Span,
               substr: &Substructure) -> P<Expr> {
-    let test_id = cx.ident_of("__test");
+    let test_id = cx.ident_of("__cmp");
     let equals_path = cx.path_global(span,
                                      cx.std_path(&["cmp", "Ordering", "Equal"]));
 
@@ -73,36 +73,31 @@ pub fn cs_cmp(cx: &mut ExtCtxt, span: Span,
     /*
     Builds:
 
-    let __test = ::std::cmp::Ord::cmp(&self_field1, &other_field1);
-    if other == ::std::cmp::Ordering::Equal {
-        let __test = ::std::cmp::Ord::cmp(&self_field2, &other_field2);
-        if __test == ::std::cmp::Ordering::Equal {
-            ...
-        } else {
-            __test
-        }
-    } else {
-        __test
+    match ::std::cmp::Ord::cmp(&self_field1, &other_field1) {
+        ::std::cmp::Ordering::Equal =>
+            match ::std::cmp::Ord::cmp(&self_field2, &other_field2) {
+                ::std::cmp::Ordering::Equal => {
+                    ...
+                }
+                __cmp => __cmp
+            },
+        __cmp => __cmp
     }
-
-    FIXME #6449: These `if`s could/should be `match`es.
     */
     cs_fold(
         // foldr nests the if-elses correctly, leaving the first field
         // as the outermost one, and the last as the innermost.
         false,
         |cx, span, old, self_f, other_fs| {
-            // let __test = new;
-            // if __test == ::std::cmp::Ordering::Equal {
-            //    old
-            // } else {
-            //    __test
+            // match new {
+            //     ::std::cmp::Ordering::Equal => old,
+            //     __cmp => __cmp
             // }
 
             let new = {
                 let other_f = match (other_fs.len(), other_fs.get(0)) {
                     (1, Some(o_f)) => o_f,
-                    _ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`"),
+                    _ => cx.span_bug(span, "not exactly 2 arguments in `derive(Ord)`"),
                 };
 
                 let args = vec![
@@ -113,20 +108,21 @@ pub fn cs_cmp(cx: &mut ExtCtxt, span: Span,
                 cx.expr_call_global(span, cmp_path.clone(), args)
             };
 
-            let assign = cx.stmt_let(span, false, test_id, new);
+            let eq_arm = cx.arm(span,
+                                vec![cx.pat_enum(span,
+                                                 equals_path.clone(),
+                                                 vec![])],
+                                old);
+            let neq_arm = cx.arm(span,
+                                 vec![cx.pat_ident(span, test_id)],
+                                 cx.expr_ident(span, test_id));
 
-            let cond = cx.expr_binary(span, BinOpKind::Eq,
-                                      cx.expr_ident(span, test_id),
-                                      cx.expr_path(equals_path.clone()));
-            let if_ = cx.expr_if(span,
-                                 cond,
-                                 old, Some(cx.expr_ident(span, test_id)));
-            cx.expr_block(cx.block(span, vec!(assign), Some(if_)))
+            cx.expr_match(span, new, vec![eq_arm, neq_arm])
         },
         cx.expr_path(equals_path.clone()),
         Box::new(|cx, span, (self_args, tag_tuple), _non_self_args| {
             if self_args.len() != 2 {
-                cx.span_bug(span, "not exactly 2 arguments in `derives(Ord)`")
+                cx.span_bug(span, "not exactly 2 arguments in `derive(Ord)`")
             } else {
                 ordering_collapsed(cx, span, tag_tuple)
             }
index 0150a073b07d0bdf816f90ce0be0770a9d41b0b5..6406ee59a5eb555a2e28fc7fabfa3f4ebede0ab3 100644 (file)
@@ -80,6 +80,14 @@ pub fn expand_deriving_partial_eq(cx: &mut ExtCtxt,
         } }
     }
 
+    // avoid defining `ne` if we can
+    // c-like enums, enums without any fields and structs without fields
+    // can safely define only `eq`.
+    let mut methods = vec![md!("eq", cs_eq)];
+    if !is_type_without_fields(item) {
+        methods.push(md!("ne", cs_ne));
+    }
+
     let trait_def = TraitDef {
         span: span,
         attributes: Vec::new(),
@@ -87,10 +95,7 @@ pub fn expand_deriving_partial_eq(cx: &mut ExtCtxt,
         additional_bounds: Vec::new(),
         generics: LifetimeBounds::empty(),
         is_unsafe: false,
-        methods: vec!(
-            md!("eq", cs_eq),
-            md!("ne", cs_ne)
-        ),
+        methods: methods,
         associated_types: Vec::new(),
     };
     trait_def.expand(cx, mitem, item, push)
index e857f7d52f912af544ad254f2ca207b512c55ab5..e49c77285ab026b5d7ec1fcce286ed3b4be70d2b 100644 (file)
@@ -67,6 +67,21 @@ pub fn expand_deriving_partial_ord(cx: &mut ExtCtxt,
         }))
     };
 
+    // avoid defining extra methods if we can
+    // c-like enums, enums without any fields and structs without fields
+    // can safely define only `partial_cmp`.
+    let methods = if is_type_without_fields(item) {
+        vec![partial_cmp_def]
+    } else {
+        vec![
+            partial_cmp_def,
+            md!("lt", true, false),
+            md!("le", true, true),
+            md!("gt", false, false),
+            md!("ge", false, true)
+        ]
+    };
+
     let trait_def = TraitDef {
         span: span,
         attributes: vec![],
@@ -74,13 +89,7 @@ pub fn expand_deriving_partial_ord(cx: &mut ExtCtxt,
         additional_bounds: vec![],
         generics: LifetimeBounds::empty(),
         is_unsafe: false,
-        methods: vec![
-            partial_cmp_def,
-            md!("lt", true, false),
-            md!("le", true, true),
-            md!("gt", false, false),
-            md!("ge", false, true)
-        ],
+        methods: methods,
         associated_types: Vec::new(),
     };
     trait_def.expand(cx, mitem, item, push)
@@ -107,41 +116,36 @@ pub fn some_ordering_collapsed(cx: &mut ExtCtxt,
 
 pub fn cs_partial_cmp(cx: &mut ExtCtxt, span: Span,
               substr: &Substructure) -> P<Expr> {
-    let test_id = cx.ident_of("__test");
+    let test_id = cx.ident_of("__cmp");
     let ordering = cx.path_global(span,
                                   cx.std_path(&["cmp", "Ordering", "Equal"]));
-    let ordering = cx.expr_path(ordering);
-    let equals_expr = cx.expr_some(span, ordering);
+    let ordering_expr = cx.expr_path(ordering.clone());
+    let equals_expr = cx.expr_some(span, ordering_expr);
 
     let partial_cmp_path = cx.std_path(&["cmp", "PartialOrd", "partial_cmp"]);
 
     /*
     Builds:
 
-    let __test = ::std::cmp::PartialOrd::partial_cmp(&self_field1, &other_field1);
-    if __test == ::std::option::Option::Some(::std::cmp::Ordering::Equal) {
-        let __test = ::std::cmp::PartialOrd::partial_cmp(&self_field2, &other_field2);
-        if __test == ::std::option::Option::Some(::std::cmp::Ordering::Equal) {
-            ...
-        } else {
-            __test
-        }
-    } else {
-        __test
+    match ::std::cmp::PartialOrd::partial_cmp(&self_field1, &other_field1) {
+        ::std::option::Option::Some(::std::cmp::Ordering::Equal) =>
+            match ::std::cmp::PartialOrd::partial_cmp(&self_field2, &other_field2) {
+                ::std::option::Option::Some(::std::cmp::Ordering::Equal) => {
+                    ...
+                }
+                __cmp => __cmp
+            },
+        __cmp => __cmp
     }
-
-    FIXME #6449: These `if`s could/should be `match`es.
     */
     cs_fold(
         // foldr nests the if-elses correctly, leaving the first field
         // as the outermost one, and the last as the innermost.
         false,
         |cx, span, old, self_f, other_fs| {
-            // let __test = new;
-            // if __test == Some(::std::cmp::Ordering::Equal) {
-            //    old
-            // } else {
-            //    __test
+            // match new {
+            //     Some(::std::cmp::Ordering::Equal) => old,
+            //     __cmp => __cmp
             // }
 
             let new = {
@@ -158,15 +162,17 @@ pub fn cs_partial_cmp(cx: &mut ExtCtxt, span: Span,
                 cx.expr_call_global(span, partial_cmp_path.clone(), args)
             };
 
-            let assign = cx.stmt_let(span, false, test_id, new);
-
-            let cond = cx.expr_binary(span, BinOpKind::Eq,
-                                      cx.expr_ident(span, test_id),
-                                      equals_expr.clone());
-            let if_ = cx.expr_if(span,
-                                 cond,
-                                 old, Some(cx.expr_ident(span, test_id)));
-            cx.expr_block(cx.block(span, vec!(assign), Some(if_)))
+            let eq_arm = cx.arm(span,
+                                vec![cx.pat_some(span,
+                                                 cx.pat_enum(span,
+                                                             ordering.clone(),
+                                                             vec![]))],
+                                old);
+            let neq_arm = cx.arm(span,
+                                 vec![cx.pat_ident(span, test_id)],
+                                 cx.expr_ident(span, test_id));
+
+            cx.expr_match(span, new, vec![eq_arm, neq_arm])
         },
         equals_expr.clone(),
         Box::new(|cx, span, (self_args, tag_tuple), _non_self_args| {
index 092f8548966dabcbf7201f42380f84581a60f757..49f14c937e953771e442fd3f315733dfcadee607 100644 (file)
@@ -10,6 +10,7 @@
 
 //! The compiler code necessary for `#[derive(Decodable)]`. See encodable.rs for more.
 
+use deriving;
 use deriving::generic::*;
 use deriving::generic::ty::*;
 
@@ -54,6 +55,8 @@ fn expand_deriving_decodable_imp(cx: &mut ExtCtxt,
         return
     }
 
+    let typaram = &*deriving::hygienic_type_parameter(item, "__D");
+
     let trait_def = TraitDef {
         span: span,
         attributes: Vec::new(),
@@ -66,18 +69,17 @@ fn expand_deriving_decodable_imp(cx: &mut ExtCtxt,
                 name: "decode",
                 generics: LifetimeBounds {
                     lifetimes: Vec::new(),
-                    bounds: vec!(("__D", vec!(Path::new_(
-                                    vec!(krate, "Decoder"), None,
-                                    vec!(), true))))
+                    bounds: vec![(typaram,
+                                  vec![Path::new_(vec!(krate, "Decoder"), None, vec!(), true)])]
                 },
                 explicit_self: None,
-                args: vec!(Ptr(Box::new(Literal(Path::new_local("__D"))),
+                args: vec!(Ptr(Box::new(Literal(Path::new_local(typaram))),
                             Borrowed(None, Mutability::Mutable))),
                 ret_ty: Literal(Path::new_(
                     pathvec_std!(cx, core::result::Result),
                     None,
                     vec!(Box::new(Self_), Box::new(Literal(Path::new_(
-                        vec!["__D", "Error"], None, vec![], false
+                        vec![typaram, "Error"], None, vec![], false
                     )))),
                     true
                 )),
index 8262a04e9ce179350db8cd8437d201383382d2ac..a05bd7869b2a9ccd1065c375c6fb4f28af1b748c 100644 (file)
@@ -88,6 +88,7 @@
 //! }
 //! ```
 
+use deriving;
 use deriving::generic::*;
 use deriving::generic::ty::*;
 
@@ -130,6 +131,8 @@ fn expand_deriving_encodable_imp(cx: &mut ExtCtxt,
         return;
     }
 
+    let typaram = &*deriving::hygienic_type_parameter(item, "__S");
+
     let trait_def = TraitDef {
         span: span,
         attributes: Vec::new(),
@@ -142,18 +145,17 @@ fn expand_deriving_encodable_imp(cx: &mut ExtCtxt,
                 name: "encode",
                 generics: LifetimeBounds {
                     lifetimes: Vec::new(),
-                    bounds: vec!(("__S", vec!(Path::new_(
-                                    vec!(krate, "Encoder"), None,
-                                    vec!(), true))))
+                    bounds: vec![(typaram,
+                                  vec![Path::new_(vec![krate, "Encoder"], None, vec!(), true)])]
                 },
                 explicit_self: borrowed_explicit_self(),
-                args: vec!(Ptr(Box::new(Literal(Path::new_local("__S"))),
+                args: vec!(Ptr(Box::new(Literal(Path::new_local(typaram))),
                             Borrowed(None, Mutability::Mutable))),
                 ret_ty: Literal(Path::new_(
                     pathvec_std!(cx, core::result::Result),
                     None,
                     vec!(Box::new(Tuple(Vec::new())), Box::new(Literal(Path::new_(
-                        vec!["__S", "Error"], None, vec![], false
+                        vec![typaram, "Error"], None, vec![], false
                     )))),
                     true
                 )),
index c0237a5d29a4103dbf8e535ba9800cc474929fca..b8ba1a58f2163924fc95b5265abde5bb18f71a4f 100644 (file)
 
 pub use self::StaticFields::*;
 pub use self::SubstructureFields::*;
-use self::StructType::*;
 
 use std::cell::RefCell;
 use std::collections::HashSet;
@@ -194,7 +193,6 @@ use std::vec;
 
 use syntax::abi::Abi;
 use syntax::ast::{self, EnumDef, Expr, Ident, Generics, VariantData, BinOpKind, PatKind};
-use syntax::ast_util;
 use syntax::attr;
 use syntax::attr::AttrMetaMethods;
 use syntax::ext::base::{ExtCtxt, Annotatable};
@@ -209,6 +207,8 @@ use syntax::ptr::P;
 
 use self::ty::{LifetimeBounds, Path, Ptr, PtrTy, Self_, Ty};
 
+use deriving;
+
 pub mod ty;
 
 pub struct TraitDef<'a> {
@@ -476,6 +476,7 @@ impl<'a> TraitDef<'a> {
                 span: self.span,
                 ident: ident,
                 vis: ast::Visibility::Inherited,
+                defaultness: ast::Defaultness::Final,
                 attrs: Vec::new(),
                 node: ast::ImplItemKind::Type(type_def.to_ty(cx,
                     self.span,
@@ -617,7 +618,6 @@ impl<'a> TraitDef<'a> {
         // Just mark it now since we know that it'll end up used downstream
         attr::mark_used(&attr);
         let opt_trait_ref = Some(trait_ref);
-        let ident = ast_util::impl_pretty_name(&opt_trait_ref, Some(&self_type));
         let unused_qual = cx.attribute(
             self.span,
             cx.meta_list(self.span,
@@ -635,7 +635,7 @@ impl<'a> TraitDef<'a> {
 
         cx.item(
             self.span,
-            ident,
+            special_idents::invalid,
             a,
             ast::ItemKind::Impl(unsafety,
                                 ast::ImplPolarity::Positive,
@@ -651,7 +651,7 @@ impl<'a> TraitDef<'a> {
                          type_ident: Ident,
                          generics: &Generics) -> P<ast::Item> {
         let field_tys: Vec<P<ast::Ty>> = struct_def.fields().iter()
-            .map(|field| field.node.ty.clone())
+            .map(|field| field.ty.clone())
             .collect();
 
         let methods = self.methods.iter().map(|method_def| {
@@ -699,7 +699,7 @@ impl<'a> TraitDef<'a> {
 
         for variant in &enum_def.variants {
             field_tys.extend(variant.node.data.fields().iter()
-                .map(|field| field.node.ty.clone()));
+                .map(|field| field.ty.clone()));
         }
 
         let methods = self.methods.iter().map(|method_def| {
@@ -741,7 +741,7 @@ impl<'a> TraitDef<'a> {
 
 fn find_repr_type_name(diagnostic: &Handler,
                        type_attrs: &[ast::Attribute]) -> &'static str {
-    let mut repr_type_name = "i32";
+    let mut repr_type_name = "isize";
     for a in type_attrs {
         for r in &attr::find_repr_attrs(diagnostic, a) {
             repr_type_name = match *r {
@@ -893,6 +893,7 @@ impl<'a> MethodDef<'a> {
             attrs: self.attributes.clone(),
             span: trait_.span,
             vis: ast::Visibility::Inherited,
+            defaultness: ast::Defaultness::Final,
             ident: method_ident,
             node: ast::ImplItemKind::Method(ast::MethodSig {
                 generics: fn_generics,
@@ -1023,8 +1024,8 @@ impl<'a> MethodDef<'a> {
     ///     fn eq(&self, __arg_1: &A) -> ::bool {
     ///         match (&*self, &*__arg_1) {
     ///             (&A1, &A1) => true,
-    ///             (&A2(ref __self_0),
-    ///              &A2(ref __arg_1_0)) => (*__self_0).eq(&(*__arg_1_0)),
+    ///             (&A2(ref self_0),
+    ///              &A2(ref __arg_1_0)) => (*self_0).eq(&(*__arg_1_0)),
     ///             _ => {
     ///                 let __self_vi = match *self { A1(..) => 0, A2(..) => 1 };
     ///                 let __arg_1_vi = match *__arg_1 { A1(..) => 0, A2(..) => 1 };
@@ -1072,9 +1073,9 @@ impl<'a> MethodDef<'a> {
     /// let __self0_vi = unsafe {
     ///     std::intrinsics::discriminant_value(&self) } as i32;
     /// let __self1_vi = unsafe {
-    ///     std::intrinsics::discriminant_value(&__arg1) } as i32;
+    ///     std::intrinsics::discriminant_value(&arg1) } as i32;
     /// let __self2_vi = unsafe {
-    ///     std::intrinsics::discriminant_value(&__arg2) } as i32;
+    ///     std::intrinsics::discriminant_value(&arg2) } as i32;
     ///
     /// if __self0_vi == __self1_vi && __self0_vi == __self2_vi && ... {
     ///     match (...) {
@@ -1246,9 +1247,9 @@ impl<'a> MethodDef<'a> {
             // let __self0_vi = unsafe {
             //     std::intrinsics::discriminant_value(&self) } as i32;
             // let __self1_vi = unsafe {
-            //     std::intrinsics::discriminant_value(&__arg1) } as i32;
+            //     std::intrinsics::discriminant_value(&arg1) } as i32;
             // let __self2_vi = unsafe {
-            //     std::intrinsics::discriminant_value(&__arg2) } as i32;
+            //     std::intrinsics::discriminant_value(&arg2) } as i32;
             // ```
             let mut index_let_stmts: Vec<ast::Stmt> = Vec::new();
 
@@ -1261,15 +1262,11 @@ impl<'a> MethodDef<'a> {
 
             let mut first_ident = None;
             for (&ident, self_arg) in vi_idents.iter().zip(&self_args) {
-                let path = cx.std_path(&["intrinsics", "discriminant_value"]);
-                let call = cx.expr_call_global(
-                    sp, path, vec![cx.expr_addr_of(sp, self_arg.clone())]);
-                let variant_value = cx.expr_block(P(ast::Block {
-                    stmts: vec![],
-                    expr: Some(call),
-                    id: ast::DUMMY_NODE_ID,
-                    rules: ast::BlockCheckMode::Unsafe(ast::CompilerGenerated),
-                    span: sp }));
+                let self_addr = cx.expr_addr_of(sp, self_arg.clone());
+                let variant_value = deriving::call_intrinsic(cx,
+                                                             sp,
+                                                             "discriminant_value",
+                                                             vec![self_addr]);
 
                 let target_ty = cx.ty_ident(sp, cx.ident_of(target_type_name));
                 let variant_disr = cx.expr_cast(sp, variant_value, target_ty);
@@ -1297,16 +1294,9 @@ impl<'a> MethodDef<'a> {
             //Since we know that all the arguments will match if we reach the match expression we
             //add the unreachable intrinsics as the result of the catch all which should help llvm
             //in optimizing it
-            let path = cx.std_path(&["intrinsics", "unreachable"]);
-            let call = cx.expr_call_global(
-                sp, path, vec![]);
-            let unreachable = cx.expr_block(P(ast::Block {
-                stmts: vec![],
-                expr: Some(call),
-                id: ast::DUMMY_NODE_ID,
-                rules: ast::BlockCheckMode::Unsafe(ast::CompilerGenerated),
-                span: sp }));
-            match_arms.push(cx.arm(sp, vec![cx.pat_wild(sp)], unreachable));
+            match_arms.push(cx.arm(sp,
+                                   vec![cx.pat_wild(sp)],
+                                   deriving::call_intrinsic(cx, sp, "unreachable", vec![])));
 
             // Final wrinkle: the self_args are expressions that deref
             // down to desired l-values, but we cannot actually deref
@@ -1382,7 +1372,7 @@ impl<'a> MethodDef<'a> {
             // derive Debug on such a type could here generate code
             // that needs the feature gate enabled.)
 
-            cx.expr_unreachable(sp)
+            deriving::call_intrinsic(cx, sp, "unreachable", vec![])
         }
         else {
 
@@ -1416,11 +1406,6 @@ impl<'a> MethodDef<'a> {
     }
 }
 
-#[derive(PartialEq)] // dogfooding!
-enum StructType {
-    Unknown, Record, Tuple
-}
-
 // general helper methods.
 impl<'a> TraitDef<'a> {
     fn set_expn_info(&self,
@@ -1448,9 +1433,9 @@ impl<'a> TraitDef<'a> {
         let mut just_spans = Vec::new();
         for field in struct_def.fields(){
             let sp = self.set_expn_info(cx, field.span);
-            match field.node.kind {
-                ast::NamedField(ident, _) => named_idents.push((ident, sp)),
-                ast::UnnamedField(..) => just_spans.push(sp),
+            match field.ident {
+                Some(ident) => named_idents.push((ident, sp)),
+                _ => just_spans.push(sp),
             }
         }
 
@@ -1486,53 +1471,26 @@ impl<'a> TraitDef<'a> {
                              -> (P<ast::Pat>, Vec<(Span, Option<Ident>,
                                                    P<Expr>,
                                                    &'a [ast::Attribute])>) {
-        if struct_def.fields().is_empty() {
-            if struct_def.is_struct() {
-                return (cx.pat_struct(self.span, struct_path, vec![]), vec![]);
-            } else {
-                return (cx.pat_enum(self.span, struct_path, vec![]), vec![]);
-            }
-        }
-
         let mut paths = Vec::new();
-        let mut ident_expr = Vec::new();
-        let mut struct_type = Unknown;
-
+        let mut ident_exprs = Vec::new();
         for (i, struct_field) in struct_def.fields().iter().enumerate() {
             let sp = self.set_expn_info(cx, struct_field.span);
-            let opt_id = match struct_field.node.kind {
-                ast::NamedField(ident, _) if (struct_type == Unknown ||
-                                              struct_type == Record) => {
-                    struct_type = Record;
-                    Some(ident)
-                }
-                ast::UnnamedField(..) if (struct_type == Unknown ||
-                                          struct_type == Tuple) => {
-                    struct_type = Tuple;
-                    None
-                }
-                _ => {
-                    cx.span_bug(sp, "a struct with named and unnamed fields in `derive`");
-                }
-            };
             let ident = cx.ident_of(&format!("{}_{}", prefix, i));
             paths.push(codemap::Spanned{span: sp, node: ident});
             let val = cx.expr_deref(sp, cx.expr_path(cx.path_ident(sp,ident)));
             let val = cx.expr(sp, ast::ExprKind::Paren(val));
-            ident_expr.push((sp, opt_id, val, &struct_field.node.attrs[..]));
+            ident_exprs.push((sp, struct_field.ident, val, &struct_field.attrs[..]));
         }
 
         let subpats = self.create_subpatterns(cx, paths, mutbl);
-
-        // struct_type is definitely not Unknown, since struct_def.fields
-        // must be nonempty to reach here
         let pattern = if struct_def.is_struct() {
-            let field_pats = subpats.into_iter().zip(&ident_expr)
-                                    .map(|(pat, &(_, id, _, _))| {
-                // id is guaranteed to be Some
+            let field_pats = subpats.into_iter().zip(&ident_exprs).map(|(pat, &(sp, ident, _, _))| {
+                if ident.is_none() {
+                    cx.span_bug(sp, "a braced struct with unnamed fields in `derive`");
+                }
                 codemap::Spanned {
                     span: pat.span,
-                    node: ast::FieldPat { ident: id.unwrap(), pat: pat, is_shorthand: false },
+                    node: ast::FieldPat { ident: ident.unwrap(), pat: pat, is_shorthand: false },
                 }
             }).collect();
             cx.pat_struct(self.span, struct_path, field_pats)
@@ -1540,7 +1498,7 @@ impl<'a> TraitDef<'a> {
             cx.pat_enum(self.span, struct_path, subpats)
         };
 
-        (pattern, ident_expr)
+        (pattern, ident_exprs)
     }
 
     fn create_enum_variant_pattern(&self,
@@ -1638,3 +1596,21 @@ pub fn cs_same_method<F>(f: F,
         }
     }
 }
+
+/// Return true if the type has no value fields
+/// (for an enum, no variant has any fields)
+pub fn is_type_without_fields(item: &Annotatable) -> bool {
+    if let Annotatable::Item(ref item) = *item {
+        match item.node {
+            ast::ItemKind::Enum(ref enum_def, _) => {
+                enum_def.variants.iter().all(|v| v.node.data.fields().is_empty())
+            }
+            ast::ItemKind::Struct(ref variant_data, _) => {
+                variant_data.fields().is_empty()
+            }
+            _ => false
+        }
+    } else {
+        false
+    }
+}
index bf8aa8fb23debd071fceabb07f5a016e98771797..c37ae116d379be84c496ad42e604f8cba24cb28c 100644 (file)
@@ -8,6 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use deriving;
 use deriving::generic::*;
 use deriving::generic::ty::*;
 
@@ -26,7 +27,10 @@ pub fn expand_deriving_hash(cx: &mut ExtCtxt,
 
     let path = Path::new_(pathvec_std!(cx, core::hash::Hash), None,
                           vec!(), true);
-    let arg = Path::new_local("__H");
+
+    let typaram = &*deriving::hygienic_type_parameter(item, "__H");
+
+    let arg = Path::new_local(typaram);
     let hash_trait_def = TraitDef {
         span: span,
         attributes: Vec::new(),
@@ -39,7 +43,7 @@ pub fn expand_deriving_hash(cx: &mut ExtCtxt,
                 name: "hash",
                 generics: LifetimeBounds {
                     lifetimes: Vec::new(),
-                    bounds: vec![("__H",
+                    bounds: vec![(typaram,
                                   vec![path_std!(cx, core::hash::Hasher)])],
                 },
                 explicit_self: borrowed_explicit_self(),
@@ -77,15 +81,13 @@ fn hash_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure)
 
     let fields = match *substr.fields {
         Struct(_, ref fs) => fs,
-        EnumMatching(index, variant, ref fs) => {
-            // Determine the discriminant. We will feed this value to the byte
-            // iteration function.
-            let discriminant = match variant.node.disr_expr {
-                Some(ref d) => d.clone(),
-                None => cx.expr_usize(trait_span, index)
-            };
+        EnumMatching(_, _, ref fs) => {
+            let variant_value = deriving::call_intrinsic(cx,
+                                                         trait_span,
+                                                         "discriminant_value",
+                                                         vec![cx.expr_self(trait_span)]);
 
-            stmts.push(call_hash(trait_span, discriminant));
+            stmts.push(call_hash(trait_span, variant_value));
 
             fs
         }
index 4e2142f1fb482a963da3426f8432c031ea07214a..92a141fb4ec863f43db5666b4c251fa030f29b94 100644 (file)
@@ -9,11 +9,8 @@
 // except according to those terms.
 
 //! The compiler code necessary to implement the `#[derive]` extensions.
-//!
-//! FIXME (#2810): hygiene. Search for "__" strings (in other files too). We also assume "extra" is
-//! the standard library, and "std" is the core library.
 
-use syntax::ast::{MetaItem, MetaItemKind};
+use syntax::ast::{MetaItem, MetaItemKind, self};
 use syntax::attr::AttrMetaMethods;
 use syntax::ext::base::{ExtCtxt, SyntaxEnv, Annotatable};
 use syntax::ext::base::{MultiDecorator, MultiItemDecorator, MultiModifier};
@@ -21,6 +18,7 @@ use syntax::ext::build::AstBuilder;
 use syntax::feature_gate;
 use syntax::codemap::Span;
 use syntax::parse::token::{intern, intern_and_get_ident};
+use syntax::ptr::P;
 
 macro_rules! pathvec {
     ($($x:ident)::+) => (
@@ -81,7 +79,10 @@ fn expand_derive(cx: &mut ExtCtxt,
                  mitem: &MetaItem,
                  annotatable: Annotatable)
                  -> Annotatable {
-    annotatable.map_item_or(|item| {
+    debug!("expand_derive: span = {:?}", span);
+    debug!("expand_derive: mitem = {:?}", mitem);
+    debug!("expand_derive: annotatable input  = {:?}", annotatable);
+    let annot = annotatable.map_item_or(|item| {
         item.map(|mut item| {
             if mitem.value_str().is_some() {
                 cx.span_err(mitem.span, "unexpected value in `derive`");
@@ -92,6 +93,9 @@ fn expand_derive(cx: &mut ExtCtxt,
                 cx.span_warn(mitem.span, "empty trait list in `derive`");
             }
 
+            let mut found_partial_eq = false;
+            let mut found_eq = false;
+
             for titem in traits.iter().rev() {
                 let tname = match titem.node {
                     MetaItemKind::Word(ref tname) => tname,
@@ -110,17 +114,64 @@ fn expand_derive(cx: &mut ExtCtxt,
                     continue;
                 }
 
+                if &tname[..] == "Eq" {
+                    found_eq = true;
+                } else if &tname[..] == "PartialEq" {
+                    found_partial_eq = true;
+                }
+
                 // #[derive(Foo, Bar)] expands to #[derive_Foo] #[derive_Bar]
                 item.attrs.push(cx.attribute(titem.span, cx.meta_word(titem.span,
                     intern_and_get_ident(&format!("derive_{}", tname)))));
             }
 
+            // RFC #1445. `#[derive(PartialEq, Eq)]` adds a (trusted)
+            // `#[structural_match]` attribute.
+            if found_partial_eq && found_eq {
+                // This span is **very** sensitive and crucial to
+                // getting the stability behavior we want. What we are
+                // doing is marking `#[structural_match]` with the
+                // span of the `#[deriving(...)]` attribute (the
+                // entire attribute, not just the `PartialEq` or `Eq`
+                // part), but with the current backtrace. The current
+                // backtrace will contain a topmost entry that IS this
+                // `#[deriving(...)]` attribute and with the
+                // "allow-unstable" flag set to true.
+                //
+                // Note that we do NOT use the span of the `Eq`
+                // text itself. You might think this is
+                // equivalent, because the `Eq` appears within the
+                // `#[deriving(Eq)]` attribute, and hence we would
+                // inherit the "allows unstable" from the
+                // backtrace.  But in fact this is not always the
+                // case. The actual source text that led to
+                // deriving can be `#[$attr]`, for example, where
+                // `$attr == deriving(Eq)`. In that case, the
+                // "#[structural_match]" would be considered to
+                // originate not from the deriving call but from
+                // text outside the deriving call, and hence would
+                // be forbidden from using unstable
+                // content.
+                //
+                // See tests src/run-pass/rfc1445 for
+                // examples. --nmatsakis
+                let span = Span { expn_id: cx.backtrace(), .. span };
+                assert!(cx.parse_sess.codemap().span_allows_unstable(span));
+                debug!("inserting structural_match with span {:?}", span);
+                let structural_match = intern_and_get_ident("structural_match");
+                item.attrs.push(cx.attribute(span,
+                                             cx.meta_word(span,
+                                                          structural_match)));
+            }
+
             item
         })
     }, |a| {
         cx.span_err(span, "`derive` can only be applied to items");
         a
-    })
+    });
+    debug!("expand_derive: annotatable output = {:?}", annot);
+    annot
 }
 
 macro_rules! derive_traits {
@@ -197,3 +248,43 @@ fn warn_if_deprecated(ecx: &mut ExtCtxt, sp: Span, name: &str) {
                                    name, replacement));
     }
 }
+
+/// Construct a name for the inner type parameter that can't collide with any type parameters of
+/// the item. This is achieved by starting with a base and then concatenating the names of all
+/// other type parameters.
+// FIXME(aburka): use real hygiene when that becomes possible
+fn hygienic_type_parameter(item: &Annotatable, base: &str) -> String {
+    let mut typaram = String::from(base);
+    if let Annotatable::Item(ref item) = *item {
+        match item.node {
+            ast::ItemKind::Struct(_, ast::Generics { ref ty_params, .. }) |
+                ast::ItemKind::Enum(_, ast::Generics { ref ty_params, .. }) => {
+
+                for ty in ty_params.iter() {
+                    typaram.push_str(&ty.ident.name.as_str());
+                }
+            }
+
+            _ => {}
+        }
+    }
+
+    typaram
+}
+
+/// Constructs an expression that calls an intrinsic
+fn call_intrinsic(cx: &ExtCtxt,
+                  span: Span,
+                  intrinsic: &str,
+                  args: Vec<P<ast::Expr>>) -> P<ast::Expr> {
+    let path = cx.std_path(&["intrinsics", intrinsic]);
+    let call = cx.expr_call_global(span, path, args);
+
+    cx.expr_block(P(ast::Block {
+        stmts: vec![],
+        expr: Some(call),
+        id: ast::DUMMY_NODE_ID,
+        rules: ast::BlockCheckMode::Unsafe(ast::CompilerGenerated),
+        span: span }))
+}
+
index 97531d4279d4b47c73b763785baf46b5d124e3f7..8f5362b4d2895c270ca10440b0459d76b9793089 100644 (file)
@@ -21,9 +21,9 @@
 
 #![feature(rustc_private)]
 #![feature(staged_api)]
-#![feature(str_char)]
 
 extern crate fmt_macros;
+#[macro_use] extern crate log;
 #[macro_use]
 extern crate syntax;
 
index 01daa938142b149ba1ff0b9abc455b709281c1ee..0244e2657969337bd2e47a4a49e55b11fca67e68 100644 (file)
@@ -59,6 +59,7 @@
 #![cfg_attr(windows, feature(libc))]
 // Handle rustfmt skips
 #![feature(custom_attribute)]
+#![feature(question_mark)]
 #![allow(unused_attributes)]
 
 use std::io::prelude::*;
index e54f763fd0db795cd701365c9984212da16ffe27..395d966b9f25c74b4066edf257aff62587bef446 100644 (file)
@@ -109,7 +109,7 @@ impl TermInfo {
     }
     // Keep the metadata small
     fn _from_path(path: &Path) -> Result<TermInfo, Error> {
-        let file = try!(File::open(path).map_err(|e| Error::IoError(e)));
+        let file = File::open(path).map_err(|e| Error::IoError(e))?;
         let mut reader = BufReader::new(file);
         parse(&mut reader, false).map_err(|e| Error::MalformedTerminfo(e))
     }
index aceaa0c10bcce847e22fbbd7a25224561c6a0098..60b5dffac59ad20f1fc63064f6a818849843475a 100644 (file)
@@ -209,7 +209,7 @@ pub fn expand(cap: &[u8], params: &[Param], vars: &mut Variables) -> Result<Vec<
                     'd' | 'o' | 'x' | 'X' | 's' => {
                         if let Some(arg) = stack.pop() {
                             let flags = Flags::new();
-                            let res = try!(format(arg, FormatOp::from_char(cur), flags));
+                            let res = format(arg, FormatOp::from_char(cur), flags)?;
                             output.extend(res.iter().map(|x| *x));
                         } else {
                             return Err("stack is empty".to_string());
@@ -317,7 +317,7 @@ pub fn expand(cap: &[u8], params: &[Param], vars: &mut Variables) -> Result<Vec<
                 match (*fstate, cur) {
                     (_, 'd') | (_, 'o') | (_, 'x') | (_, 'X') | (_, 's') => {
                         if let Some(arg) = stack.pop() {
-                            let res = try!(format(arg, FormatOp::from_char(cur), *flags));
+                            let res = format(arg, FormatOp::from_char(cur), *flags)?;
                             output.extend(res.iter().map(|x| *x));
                             // will cause state to go to Nothing
                             old_state = FormatPattern(*flags, *fstate);
index 558d35c2ae27b9844e9b2105eb4cdcc6cb6d9802..8d1ba378521529ac87043a85dc698975ec14bcb1 100644 (file)
@@ -168,7 +168,7 @@ fn read_le_u16(r: &mut io::Read) -> io::Result<u16> {
     let mut b = [0; 2];
     let mut amt = 0;
     while amt < b.len() {
-        match try!(r.read(&mut b[amt..])) {
+        match r.read(&mut b[amt..])? {
             0 => return Err(io::Error::new(io::ErrorKind::Other, "end of file")),
             n => amt += n,
         }
@@ -186,7 +186,7 @@ fn read_byte(r: &mut io::Read) -> io::Result<u8> {
 /// Parse a compiled terminfo entry, using long capability names if `longnames`
 /// is true
 pub fn parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo, String> {
-    macro_rules! try( ($e:expr) => (
+    macro_rules! t( ($e:expr) => (
         match $e {
             Ok(e) => e,
             Err(e) => return Err(format!("{}", e))
@@ -200,7 +200,7 @@ pub fn parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo, String> {
     };
 
     // Check magic number
-    let magic = try!(read_le_u16(file));
+    let magic = t!(read_le_u16(file));
     if magic != 0x011A {
         return Err(format!("invalid magic number: expected {:x}, found {:x}",
                            0x011A,
@@ -211,7 +211,7 @@ pub fn parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo, String> {
     // supported. Using 0 instead of -1 works because we skip sections with length 0.
     macro_rules! read_nonneg {
         () => {{
-            match try!(read_le_u16(file)) as i16 {
+            match t!(read_le_u16(file)) as i16 {
                 n if n >= 0 => n as usize,
                 -1 => 0,
                 _ => return Err("incompatible file: length fields must be  >= -1".to_string()),
@@ -243,7 +243,7 @@ pub fn parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo, String> {
 
     // don't read NUL
     let mut bytes = Vec::new();
-    try!(file.take((names_bytes - 1) as u64).read_to_end(&mut bytes));
+    t!(file.take((names_bytes - 1) as u64).read_to_end(&mut bytes));
     let names_str = match String::from_utf8(bytes) {
         Ok(s) => s,
         Err(_) => return Err("input not utf-8".to_string()),
@@ -253,11 +253,11 @@ pub fn parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo, String> {
                                            .map(|s| s.to_string())
                                            .collect();
     // consume NUL
-    if try!(read_byte(file)) != b'\0' {
+    if t!(read_byte(file)) != b'\0' {
         return Err("incompatible file: missing null terminator for names section".to_string());
     }
 
-    let bools_map: HashMap<String, bool> = try! {
+    let bools_map: HashMap<String, bool> = t! {
         (0..bools_bytes).filter_map(|i| match read_byte(file) {
             Err(e) => Some(Err(e)),
             Ok(1) => Some(Ok((bnames[i].to_string(), true))),
@@ -266,10 +266,10 @@ pub fn parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo, String> {
     };
 
     if (bools_bytes + names_bytes) % 2 == 1 {
-        try!(read_byte(file)); // compensate for padding
+        t!(read_byte(file)); // compensate for padding
     }
 
-    let numbers_map: HashMap<String, u16> = try! {
+    let numbers_map: HashMap<String, u16> = t! {
         (0..numbers_count).filter_map(|i| match read_le_u16(file) {
             Ok(0xFFFF) => None,
             Ok(n) => Some(Ok((nnames[i].to_string(), n))),
@@ -278,14 +278,14 @@ pub fn parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo, String> {
     };
 
     let string_map: HashMap<String, Vec<u8>> = if string_offsets_count > 0 {
-        let string_offsets: Vec<u16> = try!((0..string_offsets_count)
+        let string_offsets: Vec<u16> = t!((0..string_offsets_count)
                                                 .map(|_| read_le_u16(file))
                                                 .collect());
 
         let mut string_table = Vec::new();
-        try!(file.take(string_table_bytes as u64).read_to_end(&mut string_table));
+        t!(file.take(string_table_bytes as u64).read_to_end(&mut string_table));
 
-        try!(string_offsets.into_iter().enumerate().filter(|&(_, offset)| {
+        t!(string_offsets.into_iter().enumerate().filter(|&(_, offset)| {
             // non-entry
             offset != 0xFFFF
         }).map(|(i, offset)| {
index 96a84496b9c9c23245790d2eef7f67e58aa4a4ea..ecbd5a9c0f509a09192043c5e4cab40d88dc303b 100644 (file)
@@ -11,4 +11,3 @@ crate-type = ["dylib", "rlib"]
 [dependencies]
 getopts = { path = "../libgetopts" }
 term = { path = "../libterm" }
-serialize = { path = "../libserialize" }
index 7536ab9c5afc677e05fda8e83587241f889a6215..e78fd0dea292ad4f478218d91a75cca8ea730d63 100644 (file)
 #![feature(rustc_private)]
 #![feature(set_stdio)]
 #![feature(staged_api)]
+#![feature(question_mark)]
 
 extern crate getopts;
-extern crate serialize;
-extern crate serialize as rustc_serialize;
 extern crate term;
 extern crate libc;
 
@@ -56,7 +55,6 @@ use self::NamePadding::*;
 use self::OutputLocation::*;
 
 use stats::Stats;
-use serialize::Encodable;
 use std::boxed::FnBox;
 use term::Terminal;
 
@@ -109,7 +107,7 @@ impl fmt::Display for TestName {
     }
 }
 
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, PartialEq, Eq)]
 enum NamePadding {
     PadNone,
     PadOnRight,
@@ -215,7 +213,7 @@ pub struct TestDescAndFn {
     pub testfn: TestFn,
 }
 
-#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
+#[derive(Clone, PartialEq, Debug, Copy)]
 pub struct Metric {
     value: f64,
     noise: f64,
@@ -301,6 +299,7 @@ pub struct TestOpts {
     pub logfile: Option<PathBuf>,
     pub nocapture: bool,
     pub color: ColorConfig,
+    pub quiet: bool,
 }
 
 impl TestOpts {
@@ -314,6 +313,7 @@ impl TestOpts {
             logfile: None,
             nocapture: false,
             color: AutoColor,
+            quiet: false,
         }
     }
 }
@@ -331,6 +331,7 @@ fn optgroups() -> Vec<getopts::OptGroup> {
                           of stdout", "PATH"),
       getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
                                          task, allow printing directly"),
+      getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
       getopts::optopt("", "color", "Configure coloring of output:
             auto   = colorize if stdout is a tty and tests are run on serially (default);
             always = always colorize output;
@@ -348,8 +349,8 @@ By default, all tests are run in parallel. This can be altered with the
 RUST_TEST_THREADS environment variable when running tests (set it to 1).
 
 All tests have their standard output and standard error captured by default.
-This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
-environment variable. Logging is not captured by default.
+This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
+environment variable to a value other than "0". Logging is not captured by default.
 
 Test Attributes:
 
@@ -388,6 +389,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
     };
 
     let run_ignored = matches.opt_present("ignored");
+    let quiet = matches.opt_present("quiet");
 
     let logfile = matches.opt_str("logfile");
     let logfile = logfile.map(|s| PathBuf::from(&s));
@@ -397,7 +399,10 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
 
     let mut nocapture = matches.opt_present("nocapture");
     if !nocapture {
-        nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
+        nocapture = match env::var("RUST_TEST_NOCAPTURE") {
+            Ok(val) => &val != "0",
+            Err(_) => false
+        };
     }
 
     let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
@@ -420,6 +425,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
         logfile: logfile,
         nocapture: nocapture,
         color: color,
+        quiet: quiet,
     };
 
     Some(Ok(test_opts))
@@ -451,6 +457,7 @@ struct ConsoleTestState<T> {
     log_out: Option<File>,
     out: OutputLocation<T>,
     use_color: bool,
+    quiet: bool,
     total: usize,
     passed: usize,
     failed: usize,
@@ -464,7 +471,7 @@ struct ConsoleTestState<T> {
 impl<T: Write> ConsoleTestState<T> {
     pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
         let log_out = match opts.logfile {
-            Some(ref path) => Some(try!(File::create(path))),
+            Some(ref path) => Some(File::create(path)?),
             None => None,
         };
         let out = match term::stdout() {
@@ -476,6 +483,7 @@ impl<T: Write> ConsoleTestState<T> {
             out: out,
             log_out: log_out,
             use_color: use_color(opts),
+            quiet: opts.quiet,
             total: 0,
             passed: 0,
             failed: 0,
@@ -488,15 +496,15 @@ impl<T: Write> ConsoleTestState<T> {
     }
 
     pub fn write_ok(&mut self) -> io::Result<()> {
-        self.write_pretty("ok", term::color::GREEN)
+        self.write_short_result("ok", ".", term::color::GREEN)
     }
 
     pub fn write_failed(&mut self) -> io::Result<()> {
-        self.write_pretty("FAILED", term::color::RED)
+        self.write_short_result("FAILED", "F", term::color::RED)
     }
 
     pub fn write_ignored(&mut self) -> io::Result<()> {
-        self.write_pretty("ignored", term::color::YELLOW)
+        self.write_short_result("ignored", "i", term::color::YELLOW)
     }
 
     pub fn write_metric(&mut self) -> io::Result<()> {
@@ -507,20 +515,30 @@ impl<T: Write> ConsoleTestState<T> {
         self.write_pretty("bench", term::color::CYAN)
     }
 
+    pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
+                              -> io::Result<()> {
+        if self.quiet {
+            self.write_pretty(quiet, color)
+        } else {
+            self.write_pretty(verbose, color)?;
+            self.write_plain("\n")
+        }
+    }
+
     pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
         match self.out {
             Pretty(ref mut term) => {
                 if self.use_color {
-                    try!(term.fg(color));
+                    term.fg(color)?;
                 }
-                try!(term.write_all(word.as_bytes()));
+                term.write_all(word.as_bytes())?;
                 if self.use_color {
-                    try!(term.reset());
+                    term.reset()?;
                 }
                 term.flush()
             }
             Raw(ref mut stdout) => {
-                try!(stdout.write_all(word.as_bytes()));
+                stdout.write_all(word.as_bytes())?;
                 stdout.flush()
             }
         }
@@ -529,11 +547,11 @@ impl<T: Write> ConsoleTestState<T> {
     pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
         match self.out {
             Pretty(ref mut term) => {
-                try!(term.write_all(s.as_bytes()));
+                term.write_all(s.as_bytes())?;
                 term.flush()
             }
             Raw(ref mut stdout) => {
-                try!(stdout.write_all(s.as_bytes()));
+                stdout.write_all(s.as_bytes())?;
                 stdout.flush()
             }
         }
@@ -550,28 +568,28 @@ impl<T: Write> ConsoleTestState<T> {
     }
 
     pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
-        let name = test.padded_name(self.max_name_len, align);
-        self.write_plain(&format!("test {} ... ", name))
+        if self.quiet && align != PadOnRight {
+            Ok(())
+        } else {
+            let name = test.padded_name(self.max_name_len, align);
+            self.write_plain(&format!("test {} ... ", name))
+        }
     }
 
     pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
-        try!(match *result {
+        match *result {
             TrOk => self.write_ok(),
             TrFailed => self.write_failed(),
             TrIgnored => self.write_ignored(),
             TrMetrics(ref mm) => {
-                try!(self.write_metric());
-                self.write_plain(&format!(": {}", mm.fmt_metrics()))
+                self.write_metric()?;
+                self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
             }
             TrBench(ref bs) => {
-                try!(self.write_bench());
-
-                try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
-
-                Ok(())
+                self.write_bench()?;
+                self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
             }
-        });
-        self.write_plain("\n")
+        }
     }
 
     pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
@@ -593,7 +611,7 @@ impl<T: Write> ConsoleTestState<T> {
     }
 
     pub fn write_failures(&mut self) -> io::Result<()> {
-        try!(self.write_plain("\nfailures:\n"));
+        self.write_plain("\nfailures:\n")?;
         let mut failures = Vec::new();
         let mut fail_out = String::new();
         for &(ref f, ref stdout) in &self.failures {
@@ -606,14 +624,14 @@ impl<T: Write> ConsoleTestState<T> {
             }
         }
         if !fail_out.is_empty() {
-            try!(self.write_plain("\n"));
-            try!(self.write_plain(&fail_out));
+            self.write_plain("\n")?;
+            self.write_plain(&fail_out)?;
         }
 
-        try!(self.write_plain("\nfailures:\n"));
+        self.write_plain("\nfailures:\n")?;
         failures.sort();
         for name in &failures {
-            try!(self.write_plain(&format!("    {}\n", name)));
+            self.write_plain(&format!("    {}\n", name))?;
         }
         Ok(())
     }
@@ -623,22 +641,22 @@ impl<T: Write> ConsoleTestState<T> {
 
         let success = self.failed == 0;
         if !success {
-            try!(self.write_failures());
+            self.write_failures()?;
         }
 
-        try!(self.write_plain("\ntest result: "));
+        self.write_plain("\ntest result: ")?;
         if success {
             // There's no parallelism at this point so it's safe to use color
-            try!(self.write_ok());
+            self.write_pretty("ok", term::color::GREEN)?;
         } else {
-            try!(self.write_failed());
+            self.write_pretty("FAILED", term::color::RED)?;
         }
         let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
                         self.passed,
                         self.failed,
                         self.ignored,
                         self.measured);
-        try!(self.write_plain(&s));
+        self.write_plain(&s)?;
         return Ok(success);
     }
 }
@@ -692,8 +710,8 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
             TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
             TeWait(ref test, padding) => st.write_test_start(test, padding),
             TeResult(test, result, stdout) => {
-                try!(st.write_log(&test, &result));
-                try!(st.write_result(&result));
+                st.write_log(&test, &result)?;
+                st.write_result(&result)?;
                 match result {
                     TrOk => st.passed += 1,
                     TrIgnored => st.ignored += 1,
@@ -722,7 +740,7 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
         }
     }
 
-    let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
+    let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
     fn len_if_padded(t: &TestDescAndFn) -> usize {
         match t.testfn.padding() {
             PadNone => 0,
@@ -736,7 +754,7 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
         }
         None => {}
     }
-    try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
+    run_tests(opts, tests, |x| callback(&x, &mut st))?;
     return st.write_run_finish();
 }
 
@@ -758,6 +776,7 @@ fn should_sort_failures_before_printing_them() {
         log_out: None,
         out: Raw(Vec::new()),
         use_color: false,
+        quiet: false,
         total: 0,
         passed: 0,
         failed: 0,
@@ -831,7 +850,7 @@ fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) ->
                                        .map(|t| t.desc.clone())
                                        .collect();
 
-    try!(callback(TeFiltered(filtered_descs)));
+    callback(TeFiltered(filtered_descs))?;
 
     let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
         filtered_tests.into_iter().partition(|e| {
@@ -858,7 +877,7 @@ fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) ->
                 // We are doing one test at a time so we can print the name
                 // of the test before we run it. Useful for debugging tests
                 // that hang forever.
-                try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
+                callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
             }
             run_test(opts, !opts.run_tests, test, tx.clone());
             pending += 1;
@@ -866,9 +885,9 @@ fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) ->
 
         let (desc, result, stdout) = rx.recv().unwrap();
         if concurrency != 1 {
-            try!(callback(TeWait(desc.clone(), PadNone)));
+            callback(TeWait(desc.clone(), PadNone))?;
         }
-        try!(callback(TeResult(desc, result, stdout)));
+        callback(TeResult(desc, result, stdout))?;
         pending -= 1;
     }
 
@@ -876,10 +895,10 @@ fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) ->
         // All benchmarks run at the end, in serial.
         // (this includes metric fns)
         for b in filtered_benchs_and_metrics {
-            try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
+            callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
             run_test(opts, false, b, tx.clone());
             let (test, result, stdout) = rx.recv().unwrap();
-            try!(callback(TeResult(test, result, stdout)));
+            callback(TeResult(test, result, stdout))?;
         }
     }
     Ok(())
@@ -1490,7 +1509,7 @@ mod tests {
 
         assert_eq!(filtered.len(), 1);
         assert_eq!(filtered[0].desc.name.to_string(), "1");
-        assert!(filtered[0].desc.ignore == false);
+        assert!(!filtered[0].desc.ignore);
     }
 
     #[test]
index 00bfa63e6fea8204714719ea3b318a580416eeab..d2ebdcca80cf0cb6b44a037b1b3c08fa7e1921b7 100644 (file)
@@ -243,3 +243,7 @@ double rust_interesting_average(uint64_t n, ...) {
     va_end(pairs);
     return sum / n;
 }
+
+int32_t rust_int8_to_int32(int8_t x) {
+    return (int32_t)x;
+}
diff --git a/src/rustbook/Cargo.toml b/src/rustbook/Cargo.toml
deleted file mode 100644 (file)
index c684c47..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-[package]
-authors = ["The Rust Project Developers"]
-name = "rustbook"
-version = "0.0.0"
-
-[lib]
-name = "rustbook"
-path = "main.rs"
-crate-type = ["dylib"]
-
-[dependencies]
-rustc_back = { path = "../librustc_back" }
-rustdoc = { path = "../librustdoc" }
diff --git a/src/rustbook/book.rs b/src/rustbook/book.rs
deleted file mode 100644 (file)
index 36a37db..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Basic data structures for representing a book.
-
-use std::io::prelude::*;
-use std::io::BufReader;
-use std::iter;
-use std::path::{Path, PathBuf};
-
-pub struct BookItem {
-    pub title: String,
-    pub path: PathBuf,
-    pub path_to_root: PathBuf,
-    pub children: Vec<BookItem>,
-}
-
-pub struct Book {
-    pub chapters: Vec<BookItem>,
-}
-
-/// A depth-first iterator over a book.
-pub struct BookItems<'a> {
-    cur_items: &'a [BookItem],
-    cur_idx: usize,
-    stack: Vec<(&'a [BookItem], usize)>,
-}
-
-impl<'a> Iterator for BookItems<'a> {
-    type Item = (String, &'a BookItem);
-
-    fn next(&mut self) -> Option<(String, &'a BookItem)> {
-        loop {
-            if self.cur_idx >= self.cur_items.len() {
-                match self.stack.pop() {
-                    None => return None,
-                    Some((parent_items, parent_idx)) => {
-                        self.cur_items = parent_items;
-                        self.cur_idx = parent_idx + 1;
-                    }
-                }
-            } else {
-                let cur = self.cur_items.get(self.cur_idx).unwrap();
-
-                let mut section = "".to_string();
-                for &(_, idx) in &self.stack {
-                    section.push_str(&(idx + 1).to_string()[..]);
-                    section.push('.');
-                }
-                section.push_str(&(self.cur_idx + 1).to_string()[..]);
-                section.push('.');
-
-                self.stack.push((self.cur_items, self.cur_idx));
-                self.cur_items = &cur.children[..];
-                self.cur_idx = 0;
-                return Some((section, cur))
-            }
-        }
-    }
-}
-
-impl Book {
-    pub fn iter(&self) -> BookItems {
-        BookItems {
-            cur_items: &self.chapters[..],
-            cur_idx: 0,
-            stack: Vec::new(),
-        }
-    }
-}
-
-/// Construct a book by parsing a summary (markdown table of contents).
-pub fn parse_summary(input: &mut Read, src: &Path) -> Result<Book, Vec<String>> {
-    fn collapse(stack: &mut Vec<BookItem>,
-                top_items: &mut Vec<BookItem>,
-                to_level: usize) {
-        loop {
-            if stack.len() < to_level { return }
-            if stack.len() == 1 {
-                top_items.push(stack.pop().unwrap());
-                return;
-            }
-
-            let tip = stack.pop().unwrap();
-            let last = stack.len() - 1;
-            stack[last].children.push(tip);
-        }
-    }
-
-    let mut top_items = vec!();
-    let mut stack = vec!();
-    let mut errors = vec!();
-
-    // always include the introduction
-    top_items.push(BookItem {
-        title: "Introduction".to_string(),
-        path: PathBuf::from("README.md"),
-        path_to_root: PathBuf::from(""),
-        children: vec!(),
-    });
-
-    for line_result in BufReader::new(input).lines() {
-        let line = match line_result {
-            Ok(line) => line,
-            Err(err) => {
-                errors.push(err.to_string());
-                return Err(errors);
-            }
-        };
-
-        let star_idx = match line.find("*") { Some(i) => i, None => continue };
-
-        let start_bracket = star_idx + line[star_idx..].find("[").unwrap();
-        let end_bracket = start_bracket + line[start_bracket..].find("](").unwrap();
-        let start_paren = end_bracket + 1;
-        let end_paren = start_paren + line[start_paren..].find(")").unwrap();
-
-        let given_path = &line[start_paren + 1 .. end_paren];
-        let title = line[start_bracket + 1..end_bracket].to_string();
-        let indent = &line[..star_idx];
-
-        let path_from_root = match src.join(given_path).strip_prefix(src) {
-            Ok(p) => p.to_path_buf(),
-            Err(..) => {
-                errors.push(format!("paths in SUMMARY.md must be relative, \
-                                     but path '{}' for section '{}' is not.",
-                                     given_path, title));
-                PathBuf::new()
-            }
-        };
-        let path_to_root = PathBuf::from(&iter::repeat("../")
-                                         .take(path_from_root.components().count() - 1)
-                                         .collect::<String>());
-        let item = BookItem {
-            title: title,
-            path: path_from_root,
-            path_to_root: path_to_root,
-            children: vec!(),
-        };
-        let level = indent.chars().map(|c| -> usize {
-            match c {
-                ' ' => 1,
-                '\t' => 4,
-                _ => unreachable!()
-            }
-        }).sum::<usize>() / 4 + 1;
-
-        if level > stack.len() + 1 {
-            errors.push(format!("section '{}' is indented too deeply; \
-                                 found {}, expected {} or less",
-                                item.title, level, stack.len() + 1));
-        } else if level <= stack.len() {
-            collapse(&mut stack, &mut top_items, level);
-        }
-        stack.push(item)
-    }
-
-    if errors.is_empty() {
-        collapse(&mut stack, &mut top_items, 1);
-        Ok(Book { chapters: top_items })
-    } else {
-        Err(errors)
-    }
-}
diff --git a/src/rustbook/build.rs b/src/rustbook/build.rs
deleted file mode 100644 (file)
index 4b6d67d..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Implementation of the `build` subcommand, used to compile a book.
-
-use std::env;
-use std::fs::{self, File};
-use std::io::prelude::*;
-use std::io::{self, BufWriter};
-use std::path::{Path, PathBuf};
-use rustc_back::tempdir::TempDir;
-
-use subcommand::Subcommand;
-use term::Term;
-use error::{err, CliResult, CommandResult};
-use book;
-use book::{Book, BookItem};
-
-use rustdoc;
-
-struct Build;
-
-pub fn parse_cmd(name: &str) -> Option<Box<Subcommand>> {
-    if name == "build" {
-        Some(Box::new(Build))
-    } else {
-        None
-    }
-}
-
-fn write_toc(book: &Book, current_page: &BookItem, out: &mut Write) -> io::Result<()> {
-    fn walk_items(items: &[BookItem],
-                  section: &str,
-                  current_page: &BookItem,
-                  out: &mut Write) -> io::Result<()> {
-        for (i, item) in items.iter().enumerate() {
-            try!(walk_item(item, &format!("{}{}.", section, i + 1)[..], current_page, out));
-        }
-        Ok(())
-    }
-    fn walk_item(item: &BookItem,
-                 section: &str,
-                 current_page: &BookItem,
-                 out: &mut Write) -> io::Result<()> {
-        let class_string = if item.path == current_page.path {
-            "class='active'"
-        } else {
-            ""
-        };
-
-        try!(writeln!(out, "<li><a {} href='{}'><b>{}</b> {}</a>",
-                      class_string,
-                      current_page.path_to_root.join(&item.path).with_extension("html").display(),
-                      section,
-                      item.title));
-        if !item.children.is_empty() {
-            try!(writeln!(out, "<ul class='section'>"));
-            let _ = walk_items(&item.children[..], section, current_page, out);
-            try!(writeln!(out, "</ul>"));
-        }
-        try!(writeln!(out, "</li>"));
-
-        Ok(())
-    }
-
-    try!(writeln!(out, "<div id='toc' class='mobile-hidden'>"));
-    try!(writeln!(out, "<ul class='chapter'>"));
-    try!(walk_items(&book.chapters[..], "", &current_page, out));
-    try!(writeln!(out, "</ul>"));
-    try!(writeln!(out, "</div>"));
-
-    Ok(())
-}
-
-fn render(book: &Book, tgt: &Path) -> CliResult<()> {
-    let tmp = try!(TempDir::new("rustbook"));
-
-    for (_section, item) in book.iter() {
-        let out_path = match item.path.parent() {
-            Some(p) => tgt.join(p),
-            None => tgt.to_path_buf(),
-        };
-
-        let src;
-        if env::args().len() < 3 {
-            src = env::current_dir().unwrap().clone();
-        } else {
-            src = PathBuf::from(&env::args().nth(2).unwrap());
-        }
-        // preprocess the markdown, rerouting markdown references to html
-        // references
-        let mut markdown_data = String::new();
-        try!(File::open(&src.join(&item.path)).and_then(|mut f| {
-            f.read_to_string(&mut markdown_data)
-        }));
-        let preprocessed_path = tmp.path().join(item.path.file_name().unwrap());
-        {
-            let urls = markdown_data.replace(".md)", ".html)");
-            try!(File::create(&preprocessed_path).and_then(|mut f| {
-                f.write_all(urls.as_bytes())
-            }));
-        }
-
-        // write the prelude to a temporary HTML file for rustdoc inclusion
-        let prelude = tmp.path().join("prelude.html");
-        {
-            let mut buffer = BufWriter::new(try!(File::create(&prelude)));
-            try!(writeln!(&mut buffer, r#"
-                <div id="nav">
-                    <button id="toggle-nav">
-                        <span class="sr-only">Toggle navigation</span>
-                        <span class="bar"></span>
-                        <span class="bar"></span>
-                        <span class="bar"></span>
-                    </button>
-                </div>"#));
-            let _ = write_toc(book, &item, &mut buffer);
-            try!(writeln!(&mut buffer, "<div id='page-wrapper'>"));
-            try!(writeln!(&mut buffer, "<div id='page'>"));
-        }
-
-        // write the postlude to a temporary HTML file for rustdoc inclusion
-        let postlude = tmp.path().join("postlude.html");
-        {
-            let mut buffer = BufWriter::new(try!(File::create(&postlude)));
-            try!(writeln!(&mut buffer, "<script src='rustbook.js'></script>"));
-            try!(writeln!(&mut buffer, "<script src='playpen.js'></script>"));
-            try!(writeln!(&mut buffer, "</div></div>"));
-        }
-
-        try!(fs::create_dir_all(&out_path));
-
-        let rustdoc_args: &[String] = &[
-            "".to_string(),
-            preprocessed_path.display().to_string(),
-            format!("-o{}", out_path.display()),
-            format!("--html-before-content={}", prelude.display()),
-            format!("--html-after-content={}", postlude.display()),
-            format!("--markdown-playground-url=https://play.rust-lang.org"),
-            format!("--markdown-css={}", item.path_to_root.join("rustbook.css").display()),
-            "--markdown-no-toc".to_string(),
-        ];
-        let output_result = rustdoc::main_args(rustdoc_args);
-        if output_result != 0 {
-            let message = format!("Could not execute `rustdoc` with {:?}: {}",
-                                  rustdoc_args, output_result);
-            return Err(err(&message));
-        }
-    }
-
-    // create index.html from the root README
-    try!(fs::copy(&tgt.join("README.html"), &tgt.join("index.html")));
-
-    // Copy js for playpen
-    let mut playpen = try!(File::create(tgt.join("playpen.js")));
-    let js = include_bytes!("../librustdoc/html/static/playpen.js");
-    try!(playpen.write_all(js));
-    Ok(())
-}
-
-impl Subcommand for Build {
-    fn parse_args(&mut self, _: &[String]) -> CliResult<()> {
-        Ok(())
-    }
-    fn usage(&self) {}
-    fn execute(&mut self, term: &mut Term) -> CommandResult<()> {
-        let cwd = env::current_dir().unwrap();
-        let src;
-        let tgt;
-
-        if env::args().len() < 3 {
-            src = cwd.clone();
-        } else {
-            src = PathBuf::from(&env::args().nth(2).unwrap());
-        }
-
-        if env::args().len() < 4 {
-            tgt = cwd.join("_book");
-        } else {
-            tgt = PathBuf::from(&env::args().nth(3).unwrap());
-        }
-
-        // `_book` directory may already exist from previous runs. Check and
-        // delete it if it exists.
-        for entry in try!(fs::read_dir(&cwd)) {
-            let path = try!(entry).path();
-            if path == tgt { try!(fs::remove_dir_all(&tgt)) }
-        }
-        try!(fs::create_dir(&tgt));
-
-        // Copy static files
-        let css = include_bytes!("static/rustbook.css");
-        let js = include_bytes!("static/rustbook.js");
-
-        let mut css_file = try!(File::create(tgt.join("rustbook.css")));
-        try!(css_file.write_all(css));
-
-        let mut js_file = try!(File::create(tgt.join("rustbook.js")));
-        try!(js_file.write_all(js));
-
-
-        let mut summary = try!(File::open(&src.join("SUMMARY.md")));
-        match book::parse_summary(&mut summary, &src) {
-            Ok(book) => {
-                // execute rustdoc on the whole book
-                render(&book, &tgt)
-            }
-            Err(errors) => {
-                let n = errors.len();
-                for err in errors {
-                    term.err(&format!("error: {}", err)[..]);
-                }
-
-                Err(err(&format!("{} errors occurred", n)))
-            }
-        }
-    }
-}
diff --git a/src/rustbook/error.rs b/src/rustbook/error.rs
deleted file mode 100644 (file)
index e896dee..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Error handling utilities. WIP.
-
-use std::error::Error;
-use std::fmt;
-
-pub type CliError = Box<Error + 'static>;
-pub type CliResult<T> = Result<T, CliError>;
-
-pub type CommandError = Box<Error + 'static>;
-pub type CommandResult<T> = Result<T, CommandError>;
-
-pub fn err(s: &str) -> CliError {
-    #[derive(Debug)]
-    struct E(String);
-
-    impl Error for E {
-        fn description(&self) -> &str { &self.0 }
-    }
-    impl fmt::Display for E {
-        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-            self.0.fmt(f)
-        }
-    }
-
-    Box::new(E(s.to_string()))
-}
diff --git a/src/rustbook/help.rs b/src/rustbook/help.rs
deleted file mode 100644 (file)
index c90c2b9..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Implementation of the `help` subcommand. Currently just prints basic usage info.
-
-use subcommand::Subcommand;
-use error::CliResult;
-use error::CommandResult;
-use term::Term;
-
-struct Help;
-
-pub fn parse_cmd(name: &str) -> Option<Box<Subcommand>> {
-    match name {
-        "help" | "--help" | "-h" | "-?" => Some(Box::new(Help)),
-        _ => None
-    }
-}
-
-impl Subcommand for Help {
-    fn parse_args(&mut self, _: &[String]) -> CliResult<()> {
-        Ok(())
-    }
-    fn usage(&self) {}
-    fn execute(&mut self, _: &mut Term) -> CommandResult<()> {
-        usage();
-        Ok(())
-    }
-}
-
-pub fn usage() {
-    println!("Usage: rustbook <command> [<args>]");
-    println!("");
-    println!("The <command> must be one of:");
-    println!("  help    Print this message.");
-    println!("  build   Build the book in subdirectory _book");
-    println!("  serve   --NOT YET IMPLEMENTED--");
-    println!("  test    --NOT YET IMPLEMENTED--");
-}
diff --git a/src/rustbook/main.rs b/src/rustbook/main.rs
deleted file mode 100644 (file)
index bd4fc89..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![deny(warnings)]
-
-#![feature(iter_arith)]
-#![feature(rustc_private)]
-#![feature(rustdoc)]
-
-extern crate rustdoc;
-extern crate rustc_back;
-
-use std::env;
-use std::error::Error;
-use std::process;
-use std::sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT, Ordering};
-use subcommand::Subcommand;
-use term::Term;
-
-mod term;
-mod error;
-mod book;
-
-mod subcommand;
-mod help;
-mod build;
-mod serve;
-mod test;
-
-static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT;
-
-pub fn main() {
-    let mut term = Term::new();
-    let cmd: Vec<_> = env::args().collect();
-
-    if cmd.len() <= 1 {
-        help::usage()
-    } else {
-        match subcommand::parse_name(&cmd[1][..]) {
-            Some(mut subcmd) => {
-                match subcmd.parse_args(&cmd[..cmd.len()-1]) {
-                    Ok(_) => {
-                        match subcmd.execute(&mut term) {
-                            Ok(_) => (),
-                            Err(err) => {
-                                term.err(&format!("error: {}", err));
-                            }
-                        }
-                    }
-                    Err(err) => {
-                        println!("{}", err.description());
-                        println!("");
-                        subcmd.usage();
-                    }
-                }
-            }
-            None => {
-                println!("Unrecognized command '{}'.", cmd[1]);
-                println!("");
-                help::usage();
-            }
-        }
-    }
-    process::exit(EXIT_STATUS.load(Ordering::SeqCst) as i32);
-}
diff --git a/src/rustbook/serve.rs b/src/rustbook/serve.rs
deleted file mode 100644 (file)
index 2fa7b7e..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Implementation of the `serve` subcommand. Just a stub for now.
-
-use subcommand::Subcommand;
-use error::CliResult;
-use error::CommandResult;
-use term::Term;
-
-struct Serve;
-
-pub fn parse_cmd(name: &str) -> Option<Box<Subcommand>> {
-    if name == "serve" {
-        Some(Box::new(Serve))
-    } else {
-        None
-    }
-}
-
-impl Subcommand for Serve {
-    fn parse_args(&mut self, _: &[String]) -> CliResult<()> {
-        Ok(())
-    }
-    fn usage(&self) {}
-    fn execute(&mut self, _: &mut Term) -> CommandResult<()> {
-        Ok(())
-    }
-}
diff --git a/src/rustbook/static/rustbook.css b/src/rustbook/static/rustbook.css
deleted file mode 100644 (file)
index ba0151f..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-/**
- * Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
- * file at the top-level directory of this distribution and at
- * http://rust-lang.org/COPYRIGHT.
- *
- * Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
- * http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
- * <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
- * option. This file may not be copied, modified, or distributed
- * except according to those terms.
- */
-
-@import url('../rust.css');
-
-body {
-    max-width: none;
-    font: 16px/1.6 'Source Serif Pro', Georgia, Times, 'Times New Roman', serif;
-    color: #333;
-}
-
-h1, h2, h3, h4, h5, h6 {
-    font-family: 'Open Sans', 'Fira Sans', 'Helvetica Neue', Helvetica, Arial, sans-serif;
-    font-weight: bold;
-    color: #333;
-}
-
-@media only screen {
-    #toc {
-        position: fixed;
-        top: 0;
-        left: 0;
-        bottom: 0;
-        width: 300px;
-        overflow-y: auto;
-        border-right: 1px solid #e8e8e8;
-        padding: 0 15px;
-        font-size: 14px;
-        background-color: #fafafa;
-        -webkit-overflow-scrolling: touch;
-    }
-
-    #page-wrapper {
-        position: absolute;
-        top: 0;
-        left: 300px;
-        right: 0;
-        padding: 0 15px;
-        -webkit-overflow-scrolling: touch;
-    }
-}
-
-@media only print {
-    #toc, #nav {
-        display: none;
-    }
-}
-
-@media only screen and (max-width: 1023px) {
-    #toc {
-        width: 100%;
-        top: 40px;
-    }
-
-    #page-wrapper {
-        top: 40px;
-        left: 0;
-    }
-
-    .mobile-hidden {
-        display: none;
-    }
-}
-
-#page {
-    margin: 0 auto;
-    max-width: 750px;
-    padding-bottom: 50px;
-}
-
-.chapter {
-    list-style: none;
-    padding-left: 0;
-    line-height: 30px;
-}
-
-.section {
-    list-style: none;
-    padding-left: 20px;
-    line-height: 40px;
-}
-
-.section li {
-    text-overflow: ellipsis;
-    overflow: hidden;
-    white-space: nowrap;
-}
-
-.chapter li a {
-    color: #333;
-    padding: 5px 0;
-}
-
-.chapter li a.active,
-.chapter li a:hover {
-    color: #008cff;
-    text-decoration: none;
-}
-
-#toggle-nav {
-    cursor: pointer;
-    margin-top: 5px;
-    width: 30px;
-    height: 30px;
-    background-color: #fff;
-    border: 1px solid #666;
-    border-radius: 3px;
-    padding: 3px 3px 0 3px;
-}
-
-.sr-only {
-    position: absolute;
-    width: 1px;
-    height: 1px;
-    margin: -1px;
-    padding: 0;
-    overflow: hidden;
-    clip: rect(0, 0, 0, 0);
-    border: 0;
-}
-
-.bar {
-    display: block;
-    background-color: #000;
-    border-radius: 2px;
-    width: 100%;
-    height: 2px;
-    margin: 2px 0 3px;
-    padding: 0;
-}
-
-pre {
-    padding: 11px;
-    overflow: auto;
-    font-size: 85%;
-    line-height: 1.45;
-    background-color: #f7f7f7;
-    border: 0;
-    border-radius: 3px;
-}
-
-.left {
-    float: left;
-}
-
-.right {
-    float: right;
-}
diff --git a/src/rustbook/static/rustbook.js b/src/rustbook/static/rustbook.js
deleted file mode 100644 (file)
index d8ab152..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/*jslint browser: true, es5: true */
-/*globals $: true, rootPath: true */
-
-document.addEventListener('DOMContentLoaded', function() {
-    'use strict';
-
-    document.getElementById('toggle-nav').onclick = function(e) {
-        var toc = document.getElementById('toc');
-        var pagewrapper = document.getElementById('page-wrapper');
-        toggleClass(toc, 'mobile-hidden');
-        toggleClass(pagewrapper, 'mobile-hidden');
-    };
-
-    function toggleClass(el, className) {
-        // from http://youmightnotneedjquery.com/
-        if (el.classList) {
-            el.classList.toggle(className);
-        } else {
-            var classes = el.className.split(' ');
-            var existingIndex = classes.indexOf(className);
-
-            if (existingIndex >= 0) {
-                classes.splice(existingIndex, 1);
-            } else {
-                classes.push(className);
-            }
-
-            el.className = classes.join(' ');
-        }
-    }
-
-    // The below code is used to add prev and next navigation links to the
-    // bottom of each of the sections.
-    // It works by extracting the current page based on the url and iterates
-    // over the menu links until it finds the menu item for the current page. We
-    // then create a copy of the preceding and following menu links and add the
-    // correct css class and insert them into the bottom of the page.
-    var toc = document.getElementById('toc').getElementsByTagName('a');
-    var href = document.location.pathname.split('/').pop();
-
-    if (href === 'index.html' || href === '') {
-        href = 'README.html';
-    }
-
-    for (var i = 0; i < toc.length; i++) {
-        if (toc[i].attributes.href.value.split('/').pop() === href) {
-            var nav = document.createElement('p');
-
-            if (i > 0) {
-                var prevNode = toc[i-1].cloneNode(true);
-                prevNode.className = 'left';
-                prevNode.setAttribute('rel', 'prev');
-                nav.appendChild(prevNode);
-            }
-
-            if (i < toc.length - 1) {
-                var nextNode = toc[i+1].cloneNode(true);
-                nextNode.className = 'right';
-                nextNode.setAttribute('rel', 'next');
-                nav.appendChild(nextNode);
-            }
-
-            document.getElementById('page').appendChild(nav);
-
-            break;
-        }
-    }
-});
diff --git a/src/rustbook/subcommand.rs b/src/rustbook/subcommand.rs
deleted file mode 100644 (file)
index a66c2b4..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Common API for all rustbook subcommands.
-
-use error::CliResult;
-use error::CommandResult;
-use term::Term;
-
-use help;
-use build;
-use serve;
-use test;
-
-pub trait Subcommand {
-    /// Mutate the subcommand by parsing its arguments.
-    ///
-    /// Returns `Err` on a parsing error.
-    fn parse_args(&mut self, args: &[String]) -> CliResult<()>;
-    /// Print the CLI usage information.
-    fn usage(&self);
-    /// Actually execute the subcommand.
-    fn execute(&mut self, term: &mut Term) -> CommandResult<()>;
-}
-
-/// Create a Subcommand object based on its name.
-pub fn parse_name(name: &str) -> Option<Box<Subcommand>> {
-    let cmds: [fn(&str) -> Option<Box<Subcommand>>; 4] = [help::parse_cmd,
-                                                          build::parse_cmd,
-                                                          serve::parse_cmd,
-                                                          test::parse_cmd];
-    for parser in &cmds {
-        let parsed = (*parser)(name);
-        if parsed.is_some() { return parsed }
-    }
-    None
-}
diff --git a/src/rustbook/term.rs b/src/rustbook/term.rs
deleted file mode 100644 (file)
index cdd25e6..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! An abstraction of the terminal. Eventually, provide color and
-//! verbosity support. For now, just a wrapper around stdout/stderr.
-
-use std::io;
-use std::io::prelude::*;
-use std::sync::atomic::Ordering;
-
-pub struct Term {
-    err: Box<Write + 'static>
-}
-
-impl Term {
-    pub fn new() -> Term {
-        Term {
-            err: Box::new(io::stderr())
-        }
-    }
-
-    pub fn err(&mut self, msg: &str) {
-        // swallow any errors
-        let _ = writeln!(&mut self.err, "{}", msg);
-        ::EXIT_STATUS.store(101, Ordering::SeqCst);
-    }
-}
diff --git a/src/rustbook/test.rs b/src/rustbook/test.rs
deleted file mode 100644 (file)
index 72df076..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Implementation of the `test` subcommand. Just a stub for now.
-
-use subcommand::Subcommand;
-use error::{err, CliResult, CommandResult};
-use term::Term;
-use book;
-
-use std::fs::File;
-use std::env;
-use std::process::Command;
-
-struct Test;
-
-pub fn parse_cmd(name: &str) -> Option<Box<Subcommand>> {
-    if name == "test" {
-        Some(Box::new(Test))
-    } else {
-        None
-    }
-}
-
-impl Subcommand for Test {
-    fn parse_args(&mut self, _: &[String]) -> CliResult<()> {
-        Ok(())
-    }
-    fn usage(&self) {}
-    fn execute(&mut self, term: &mut Term) -> CommandResult<()> {
-        let cwd = env::current_dir().unwrap();
-        let src = cwd.clone();
-
-        let mut summary = try!(File::open(&src.join("SUMMARY.md")));
-        match book::parse_summary(&mut summary, &src) {
-            Ok(book) => {
-                for (_, item) in book.iter() {
-                    let output_result = Command::new("rustdoc")
-                        .arg(&item.path)
-                        .arg("--test")
-                        .output();
-                    match output_result {
-                        Ok(output) => {
-                            if !output.status.success() {
-                                term.err(&format!("{}\n{}",
-                                         String::from_utf8_lossy(&output.stdout),
-                                         String::from_utf8_lossy(&output.stderr)));
-                                return Err(err("some tests failed"));
-                            }
-
-                        }
-                        Err(e) => {
-                            let message = format!("could not execute `rustdoc`: {}", e);
-                            return Err(err(&message))
-                        }
-                    }
-                }
-            }
-            Err(errors) => {
-                for err in errors {
-                    term.err(&err[..]);
-                }
-                return Err(err("there was an error"))
-            }
-        }
-        Ok(()) // lol
-    }
-}
index e4432720ab4f460889499e35e80fbe9ad0a21eef..a3420d75218fb1c9fd5d1277431881f65c742790 100644 (file)
@@ -2,7 +2,6 @@
 name = "rustc-main"
 version = "0.0.0"
 dependencies = [
- "rustbook 0.0.0",
  "rustc_back 0.0.0",
  "rustc_driver 0.0.0",
  "rustdoc 0.0.0",
@@ -46,10 +45,6 @@ dependencies = [
  "winapi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
-[[package]]
-name = "getopts"
-version = "0.0.0"
-
 [[package]]
 name = "graphviz"
 version = "0.0.0"
@@ -66,14 +61,6 @@ dependencies = [
  "serialize 0.0.0",
 ]
 
-[[package]]
-name = "rustbook"
-version = "0.0.0"
-dependencies = [
- "rustc_back 0.0.0",
- "rustdoc 0.0.0",
-]
-
 [[package]]
 name = "rustc"
 version = "0.0.0"
@@ -81,15 +68,13 @@ dependencies = [
  "arena 0.0.0",
  "flate 0.0.0",
  "fmt_macros 0.0.0",
- "getopts 0.0.0",
  "graphviz 0.0.0",
  "log 0.0.0",
  "rbml 0.0.0",
  "rustc_back 0.0.0",
  "rustc_bitflags 0.0.0",
+ "rustc_const_math 0.0.0",
  "rustc_data_structures 0.0.0",
- "rustc_front 0.0.0",
- "rustc_llvm 0.0.0",
  "serialize 0.0.0",
  "syntax 0.0.0",
 ]
@@ -99,8 +84,6 @@ name = "rustc_back"
 version = "0.0.0"
 dependencies = [
  "log 0.0.0",
- "rustc_front 0.0.0",
- "rustc_llvm 0.0.0",
  "serialize 0.0.0",
  "syntax 0.0.0",
 ]
@@ -116,7 +99,29 @@ dependencies = [
  "graphviz 0.0.0",
  "log 0.0.0",
  "rustc 0.0.0",
- "rustc_front 0.0.0",
+ "rustc_mir 0.0.0",
+ "syntax 0.0.0",
+]
+
+[[package]]
+name = "rustc_const_eval"
+version = "0.0.0"
+dependencies = [
+ "graphviz 0.0.0",
+ "log 0.0.0",
+ "rustc 0.0.0",
+ "rustc_back 0.0.0",
+ "rustc_const_math 0.0.0",
+ "serialize 0.0.0",
+ "syntax 0.0.0",
+]
+
+[[package]]
+name = "rustc_const_math"
+version = "0.0.0"
+dependencies = [
+ "log 0.0.0",
+ "serialize 0.0.0",
  "syntax 0.0.0",
 ]
 
@@ -134,13 +139,13 @@ version = "0.0.0"
 dependencies = [
  "arena 0.0.0",
  "flate 0.0.0",
- "getopts 0.0.0",
  "graphviz 0.0.0",
  "log 0.0.0",
  "rustc 0.0.0",
  "rustc_back 0.0.0",
  "rustc_borrowck 0.0.0",
- "rustc_front 0.0.0",
+ "rustc_const_eval 0.0.0",
+ "rustc_incremental 0.0.0",
  "rustc_lint 0.0.0",
  "rustc_llvm 0.0.0",
  "rustc_metadata 0.0.0",
@@ -149,6 +154,7 @@ dependencies = [
  "rustc_plugin 0.0.0",
  "rustc_privacy 0.0.0",
  "rustc_resolve 0.0.0",
+ "rustc_save_analysis 0.0.0",
  "rustc_trans 0.0.0",
  "rustc_typeck 0.0.0",
  "serialize 0.0.0",
@@ -157,11 +163,14 @@ dependencies = [
 ]
 
 [[package]]
-name = "rustc_front"
+name = "rustc_incremental"
 version = "0.0.0"
 dependencies = [
+ "graphviz 0.0.0",
  "log 0.0.0",
- "rustc_bitflags 0.0.0",
+ "rbml 0.0.0",
+ "rustc 0.0.0",
+ "rustc_data_structures 0.0.0",
  "serialize 0.0.0",
  "syntax 0.0.0",
 ]
@@ -173,7 +182,7 @@ dependencies = [
  "log 0.0.0",
  "rustc 0.0.0",
  "rustc_back 0.0.0",
- "rustc_front 0.0.0",
+ "rustc_const_eval 0.0.0",
  "syntax 0.0.0",
 ]
 
@@ -196,7 +205,7 @@ dependencies = [
  "rustc 0.0.0",
  "rustc_back 0.0.0",
  "rustc_bitflags 0.0.0",
- "rustc_front 0.0.0",
+ "rustc_const_math 0.0.0",
  "rustc_llvm 0.0.0",
  "serialize 0.0.0",
  "syntax 0.0.0",
@@ -210,8 +219,9 @@ dependencies = [
  "log 0.0.0",
  "rustc 0.0.0",
  "rustc_back 0.0.0",
+ "rustc_const_eval 0.0.0",
+ "rustc_const_math 0.0.0",
  "rustc_data_structures 0.0.0",
- "rustc_front 0.0.0",
  "syntax 0.0.0",
 ]
 
@@ -221,17 +231,13 @@ version = "0.0.0"
 dependencies = [
  "log 0.0.0",
  "rustc 0.0.0",
- "rustc_front 0.0.0",
+ "rustc_const_eval 0.0.0",
  "syntax 0.0.0",
 ]
 
 [[package]]
 name = "rustc_platform_intrinsics"
 version = "0.0.0"
-dependencies = [
- "rustc 0.0.0",
- "rustc_llvm 0.0.0",
-]
 
 [[package]]
 name = "rustc_plugin"
@@ -239,8 +245,8 @@ version = "0.0.0"
 dependencies = [
  "log 0.0.0",
  "rustc 0.0.0",
+ "rustc_back 0.0.0",
  "rustc_bitflags 0.0.0",
- "rustc_front 0.0.0",
  "rustc_metadata 0.0.0",
  "rustc_mir 0.0.0",
  "syntax 0.0.0",
@@ -252,7 +258,6 @@ version = "0.0.0"
 dependencies = [
  "log 0.0.0",
  "rustc 0.0.0",
- "rustc_front 0.0.0",
  "syntax 0.0.0",
 ]
 
@@ -264,7 +269,15 @@ dependencies = [
  "log 0.0.0",
  "rustc 0.0.0",
  "rustc_bitflags 0.0.0",
- "rustc_front 0.0.0",
+ "syntax 0.0.0",
+]
+
+[[package]]
+name = "rustc_save_analysis"
+version = "0.0.0"
+dependencies = [
+ "log 0.0.0",
+ "rustc 0.0.0",
  "syntax 0.0.0",
 ]
 
@@ -274,13 +287,14 @@ version = "0.0.0"
 dependencies = [
  "arena 0.0.0",
  "flate 0.0.0",
- "getopts 0.0.0",
  "graphviz 0.0.0",
  "log 0.0.0",
  "rustc 0.0.0",
  "rustc_back 0.0.0",
+ "rustc_const_eval 0.0.0",
+ "rustc_const_math 0.0.0",
  "rustc_data_structures 0.0.0",
- "rustc_front 0.0.0",
+ "rustc_incremental 0.0.0",
  "rustc_llvm 0.0.0",
  "rustc_mir 0.0.0",
  "rustc_platform_intrinsics 0.0.0",
@@ -297,7 +311,8 @@ dependencies = [
  "log 0.0.0",
  "rustc 0.0.0",
  "rustc_back 0.0.0",
- "rustc_front 0.0.0",
+ "rustc_const_eval 0.0.0",
+ "rustc_const_math 0.0.0",
  "rustc_platform_intrinsics 0.0.0",
  "syntax 0.0.0",
 ]
@@ -309,19 +324,17 @@ dependencies = [
  "arena 0.0.0",
  "build_helper 0.1.0",
  "gcc 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)",
- "getopts 0.0.0",
  "log 0.0.0",
  "rustc 0.0.0",
  "rustc_back 0.0.0",
+ "rustc_const_eval 0.0.0",
  "rustc_driver 0.0.0",
- "rustc_front 0.0.0",
  "rustc_lint 0.0.0",
  "rustc_metadata 0.0.0",
  "rustc_resolve 0.0.0",
  "rustc_trans 0.0.0",
  "serialize 0.0.0",
  "syntax 0.0.0",
- "test 0.0.0",
 ]
 
 [[package]]
@@ -338,7 +351,6 @@ dependencies = [
  "log 0.0.0",
  "rustc_bitflags 0.0.0",
  "serialize 0.0.0",
- "term 0.0.0",
 ]
 
 [[package]]
@@ -346,22 +358,10 @@ name = "syntax_ext"
 version = "0.0.0"
 dependencies = [
  "fmt_macros 0.0.0",
+ "log 0.0.0",
  "syntax 0.0.0",
 ]
 
-[[package]]
-name = "term"
-version = "0.0.0"
-
-[[package]]
-name = "test"
-version = "0.0.0"
-dependencies = [
- "getopts 0.0.0",
- "serialize 0.0.0",
- "term 0.0.0",
-]
-
 [[package]]
 name = "winapi"
 version = "0.2.2"
index 9fcefd9d3a4dd5e399304489270f3865c25908b9..7431c35efba01752e01dc51b9e9f6d22f9caf62c 100644 (file)
@@ -11,10 +11,6 @@ path = "rustc.rs"
 name = "rustdoc"
 path = "rustdoc.rs"
 
-[[bin]]
-name = "rustbook"
-path = "rustbook.rs"
-
 [profile.release]
 opt-level = 2
 
@@ -27,10 +23,9 @@ debug-assertions = false
 # All optional dependencies so the features passed to this Cargo.toml select
 # what should actually be built.
 [dependencies]
-rustbook = { path = "../rustbook", optional = true }
 rustc_back = { path = "../librustc_back" }
 rustc_driver = { path = "../librustc_driver" }
-rustdoc = { path = "../librustdoc", optional = true }
+rustdoc = { path = "../librustdoc" }
 
 [features]
 jemalloc = ["rustc_back/jemalloc"]
diff --git a/src/rustc/rustbook.rs b/src/rustc/rustbook.rs
deleted file mode 100644 (file)
index 6f78f78..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-extern crate rustbook;
-
-fn main() { rustbook::main() }
-
diff --git a/src/rustc/test_shim/Cargo.lock b/src/rustc/test_shim/Cargo.lock
new file mode 100644 (file)
index 0000000..73df56d
--- /dev/null
@@ -0,0 +1,23 @@
+[root]
+name = "test_shim"
+version = "0.1.0"
+dependencies = [
+ "test 0.0.0",
+]
+
+[[package]]
+name = "getopts"
+version = "0.0.0"
+
+[[package]]
+name = "term"
+version = "0.0.0"
+
+[[package]]
+name = "test"
+version = "0.0.0"
+dependencies = [
+ "getopts 0.0.0",
+ "term 0.0.0",
+]
+
diff --git a/src/rustc/test_shim/Cargo.toml b/src/rustc/test_shim/Cargo.toml
new file mode 100644 (file)
index 0000000..bf57665
--- /dev/null
@@ -0,0 +1,25 @@
+# This is a shim Cargo.toml which serves as a proxy for building libtest.
+#
+# The reason this shim exists is basically the same reason that `std_shim`
+# exists, and more documentation can be found in that `Cargo.toml` as to why.
+
+[package]
+name = "test_shim"
+version = "0.1.0"
+authors = ["The Rust Project Developers"]
+
+[lib]
+name = "test_shim"
+path = "lib.rs"
+
+[profile.release]
+opt-level = 2
+
+# These options are controlled from our rustc wrapper script, so turn them off
+# here and have them controlled elsewhere.
+[profile.dev]
+debug = false
+debug-assertions = false
+
+[dependencies]
+test = { path = "../../libtest" }
diff --git a/src/rustc/test_shim/lib.rs b/src/rustc/test_shim/lib.rs
new file mode 100644 (file)
index 0000000..a626c94
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// See comments in Cargo.toml for why this exists
index 4ebe49512d74b007f8c06fb7dd16672fe3f09a56..697b2d3f539651533a613685e4f0b321a28d3d60 100644 (file)
@@ -151,6 +151,16 @@ extern "C" void LLVMAddFunctionAttrStringValue(LLVMValueRef Fn, unsigned index,
   F->addAttributes(index, AttributeSet::get(F->getContext(), index, B));
 }
 
+extern "C" void LLVMRemoveFunctionAttributes(LLVMValueRef Fn, unsigned index, uint64_t Val) {
+  Function *A = unwrap<Function>(Fn);
+  const AttributeSet PAL = A->getAttributes();
+  AttrBuilder B(Val);
+  const AttributeSet PALnew =
+    PAL.removeAttributes(A->getContext(), index,
+                         AttributeSet::get(A->getContext(), index, B));
+  A->setAttributes(PALnew);
+}
+
 extern "C" void LLVMRemoveFunctionAttrString(LLVMValueRef fn, unsigned index, const char *Name) {
   Function *f = unwrap<Function>(fn);
   LLVMContext &C = f->getContext();
@@ -164,6 +174,13 @@ extern "C" void LLVMRemoveFunctionAttrString(LLVMValueRef fn, unsigned index, co
                                           to_remove));
 }
 
+// enable fpmath flag UnsafeAlgebra
+extern "C" void LLVMRustSetHasUnsafeAlgebra(LLVMValueRef V) {
+    if (auto I = dyn_cast<Instruction>(unwrap<Value>(V))) {
+        I->setHasUnsafeAlgebra(true);
+    }
+}
+
 extern "C" LLVMValueRef LLVMBuildAtomicLoad(LLVMBuilderRef B,
                                             LLVMValueRef source,
                                             const char* Name,
@@ -1167,3 +1184,21 @@ LLVMRustBuildInvoke(LLVMBuilderRef B,
     return LLVMBuildInvoke(B, Fn, Args, NumArgs, Then, Catch, Name);
 }
 #endif
+
+extern "C" void LLVMRustPositionBuilderAtStart(LLVMBuilderRef B, LLVMBasicBlockRef BB) {
+    auto point = unwrap(BB)->getFirstInsertionPt();
+    unwrap(B)->SetInsertPoint(unwrap(BB), point);
+}
+
+extern "C" void LLVMRustSetComdat(LLVMModuleRef M, LLVMValueRef V, const char *Name) {
+    Triple TargetTriple(unwrap(M)->getTargetTriple());
+    GlobalObject *GV = unwrap<GlobalObject>(V);
+    if (!TargetTriple.isOSBinFormatMachO()) {
+        GV->setComdat(unwrap(M)->getOrInsertComdat(Name));
+    }
+}
+
+extern "C" void LLVMRustUnsetComdat(LLVMValueRef V) {
+    GlobalObject *GV = unwrap<GlobalObject>(V);
+    GV->setComdat(nullptr);
+}
index bb34b6f38bfd7d07d9b470d83216f6d39e009880..d6e8852cfecfe7384489a919df0638e7b7b69e12 100644 (file)
@@ -1,4 +1,4 @@
 # If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
 # The actual contents of this file do not matter, but to trigger a change on the
 # build bots then the contents should be changed so git updates the mtime.
-2016-02-20
+2016-03-18
index 9b3390d7747285eec0784bf04860f863efb4bce3..61dfd4f8f867d42280e1b53380cc47dc4beff782 100644 (file)
@@ -1,4 +1,15 @@
+S 2016-03-18 235d774
+  linux-i386 0e0e4448b80d0a12b75485795244bb3857a0a7ef
+  linux-x86_64 1273b6b6aed421c9e40c59f366d0df6092ec0397
+  macos-i386 9f9c0b4a2db09acbce54b792fb8839a735585565
+  macos-x86_64 52570f6fd915b0210a9be98cfc933148e16a75f8
+  winnt-i386 7703869608cc4192b8f1943e51b19ba1a03c0110
+  winnt-x86_64 8512b5ecc0c53a2cd3552e4f5688577de95cd978
+  openbsd-x86_64 c5b6feda38138a12cd5c05574b585dadebbb5e87
+  freebsd-x86_64 390b9a9f60f3d0d6a52c04d939a0355f572d03b3
+
 S 2016-02-17 4d3eebf
+  dragonfly-x86_64 765bb5820ad406e966ec0ac51c8070b656459b02
   linux-i386 5f194aa7628c0703f0fd48adc4ec7f3cc64b98c7
   linux-x86_64 d29b7607d13d64078b6324aec82926fb493f59ba
   macos-i386 4c8e42dd649e247f3576bf9dfa273327b4907f9c
@@ -6,6 +17,8 @@ S 2016-02-17 4d3eebf
   winnt-i386 0c336d794a65f8e285c121866c7d59aa2dd0d1e1
   winnt-x86_64 27e75b1bf99770b3564bcebd7f3230be01135a92
   openbsd-x86_64 ac957c6b84de2bd67f01df085d9ea515f96e22f3
+  freebsd-i386 4e2af0b34eb335e173aebff543be693724a956c2
+  freebsd-x86_64 f38991fbb81c1cd8d0bbda396f98f13a55b42804
 
 S 2015-12-18 3391630
   bitrig-x86_64 6476e1562df02389b55553b4c88b1f4fd121cd40
index bd23fb88217080d03513fa505f448a2d7148e402..4ba0ccdba9bf744ceb064b22c2240bdabf7efeac 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-trait me {
+pub trait me {
     fn me(&self) -> usize;
 }
 impl me for usize { fn me(&self) -> usize { *self } }
diff --git a/src/test/auxiliary/cgu_test.rs b/src/test/auxiliary/cgu_test.rs
new file mode 100644 (file)
index 0000000..7c88d3d
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+// compile-flags: --crate-type=lib
+
+pub fn id<T>(t: T) -> T {
+  t
+}
diff --git a/src/test/auxiliary/cgu_test_a.rs b/src/test/auxiliary/cgu_test_a.rs
new file mode 100644 (file)
index 0000000..0f0d1cd
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+// compile-flags: -Ccodegen-units=2 --crate-type=lib
+
+extern crate cgu_test;
+
+pub mod a {
+    pub fn a() {
+        ::cgu_test::id(0);
+    }
+}
+pub mod b {
+    pub fn a() {
+        ::cgu_test::id(0);
+    }
+}
diff --git a/src/test/auxiliary/cgu_test_b.rs b/src/test/auxiliary/cgu_test_b.rs
new file mode 100644 (file)
index 0000000..0f0d1cd
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+// compile-flags: -Ccodegen-units=2 --crate-type=lib
+
+extern crate cgu_test;
+
+pub mod a {
+    pub fn a() {
+        ::cgu_test::id(0);
+    }
+}
+pub mod b {
+    pub fn a() {
+        ::cgu_test::id(0);
+    }
+}
diff --git a/src/test/auxiliary/derive-no-std.rs b/src/test/auxiliary/derive-no-std.rs
new file mode 100644 (file)
index 0000000..f083e10
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![crate_type = "rlib"]
+#![no_std]
+
+// Issue #16803
+
+#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord,
+         Debug, Default, Copy)]
+pub struct Foo {
+    pub x: u32,
+}
+
+#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord,
+         Debug, Copy)]
+pub enum Bar {
+    Qux,
+    Quux(u32),
+}
+
+#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord,
+         Debug, Copy)]
+pub enum Void {}
+#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord,
+         Debug, Copy)]
+pub struct Empty;
+#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord,
+         Debug, Copy)]
+pub struct AlsoEmpty {}
+
index 16ef965e0dbd5421262e3ff8d7802349c6d3227a..b5234af937b0c08b65385d94606ab902503a9b50 100644 (file)
 #![feature(box_syntax)]
 
 #[macro_use] extern crate rustc;
-extern crate rustc_front;
 extern crate rustc_plugin;
+extern crate rustc_const_math;
 extern crate syntax;
 
-use rustc::mir::transform::MirPass;
+use rustc::mir::transform::{self, MirPass};
 use rustc::mir::repr::{Mir, Literal};
 use rustc::mir::visit::MutVisitor;
-use rustc::middle::infer::InferCtxt;
-use rustc::middle::const_eval::ConstVal;
+use rustc::ty;
+use rustc::middle::const_val::ConstVal;
+use rustc_const_math::ConstInt;
 use rustc_plugin::Registry;
 
+use syntax::ast::NodeId;
+
 struct Pass;
 
-impl MirPass for Pass {
-    fn run_on_mir<'a, 'tcx>(&mut self, mir: &mut Mir<'tcx>, _: &InferCtxt<'a, 'tcx>) {
+impl transform::Pass for Pass {}
+impl<'tcx> MirPass<'tcx> for Pass {
+    fn run_pass(&mut self, _: &ty::TyCtxt<'tcx>, _: NodeId, mir: &mut Mir<'tcx>) {
         Visitor.visit_mir(mir)
     }
 }
@@ -37,8 +41,10 @@ struct Visitor;
 
 impl<'tcx> MutVisitor<'tcx> for Visitor {
     fn visit_literal(&mut self, literal: &mut Literal<'tcx>) {
-        if let Literal::Value { value: ConstVal::Int(ref mut i @ 11) } = *literal {
-            *i = 42;
+        if let Literal::Value { ref mut value } = *literal {
+            if let ConstVal::Integral(ConstInt::I32(ref mut i @ 11)) = *value {
+                *i = 42;
+            }
         }
     }
 }
diff --git a/src/test/auxiliary/empty.rs b/src/test/auxiliary/empty.rs
new file mode 100644 (file)
index 0000000..3066947
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
index 92239ce55981c808dfd0991c4a7eaa2906377223..460d0a0088ce2918813c5374d8558d63a99409fd 100644 (file)
@@ -9,6 +9,7 @@
 // except according to those terms.
 
 #![crate_name="foreign_lib"]
+
 #![feature(libc)]
 
 pub mod rustrt {
@@ -19,3 +20,29 @@ pub mod rustrt {
         pub fn rust_get_test_int() -> libc::intptr_t;
     }
 }
+
+pub mod rustrt2 {
+    extern crate libc;
+
+    extern {
+        pub fn rust_get_test_int() -> libc::intptr_t;
+    }
+}
+
+pub mod rustrt3 {
+    // Different type, but same ABI (on all supported platforms).
+    // Ensures that we don't ICE or trigger LLVM asserts when
+    // importing the same symbol under different types.
+    // See https://github.com/rust-lang/rust/issues/32740.
+    extern {
+        pub fn rust_get_test_int() -> *const u8;
+    }
+}
+
+pub fn local_uses() {
+    unsafe {
+        let x = rustrt::rust_get_test_int();
+        assert_eq!(x, rustrt2::rust_get_test_int());
+        assert_eq!(x as *const _, rustrt3::rust_get_test_int());
+    }
+}
index 0a921c8f5b3a0b1eb8fe6110786cf19a49977ba5..044bb606b40e224b15a0dd6766a72f4e1fec9155 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![feature(specialization)]
+
 // Common code used for tests that model the Fn/FnMut/FnOnce hierarchy.
 
 pub trait Go {
@@ -37,7 +39,7 @@ pub fn go_once<G:GoOnce>(this: G, arg: isize) {
 impl<G> GoMut for G
     where G : Go
 {
-    fn go_mut(&mut self, arg: isize) {
+    default fn go_mut(&mut self, arg: isize) {
         go(&*self, arg)
     }
 }
@@ -45,7 +47,7 @@ impl<G> GoMut for G
 impl<G> GoOnce for G
     where G : GoMut
 {
-    fn go_once(mut self, arg: isize) {
+    default fn go_once(mut self, arg: isize) {
         go_mut(&mut self, arg)
     }
 }
index 5f1bd7ab52235bc3bc9c2110c12d5a6afffd2a7d..e21e6ad204384978da2a27dab911a7af65d58309 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 pub trait Foo {
     fn bar(&self);
     fn foo(&mut self) {}
index 0bb2133c833c70aa78e7d7119ff7b0c43f75b2ec..ecddfe99b3be7cbe04eb2d097f7e66f7fd59be9d 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 pub trait Foo {
     #[doc(hidden)]
     fn foo(&self) {}
index 9e42dbfbc6be41f5d5b50035a792ea249197a9b7..145b4df6299956fcc453e159df0cad4b086436e7 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 #![doc(html_root_url = "http://example.com/")]
 
 /// dox
index d3a860357422c2ad3046342b2b374461ab0d2e03..644d1634e9d9c40c8cf7a1087c89d8221d3c468c 100644 (file)
@@ -8,6 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
 
 #![doc(html_root_url = "http://example.com")]
 
diff --git a/src/test/auxiliary/issue-17718-aux.rs b/src/test/auxiliary/issue-17718-aux.rs
new file mode 100644 (file)
index 0000000..373fc04
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(const_fn)]
+
+use std::sync::atomic;
+
+pub const C1: usize = 1;
+pub const C2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+pub const C3: fn() = foo;
+pub const C4: usize = C1 * C1 + C1 / C1;
+pub const C5: &'static usize = &C4;
+
+pub static S1: usize = 3;
+pub static S2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+
+fn foo() {}
diff --git a/src/test/auxiliary/issue-17718.rs b/src/test/auxiliary/issue-17718.rs
deleted file mode 100644 (file)
index 373fc04..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(const_fn)]
-
-use std::sync::atomic;
-
-pub const C1: usize = 1;
-pub const C2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
-pub const C3: fn() = foo;
-pub const C4: usize = C1 * C1 + C1 / C1;
-pub const C5: &'static usize = &C4;
-
-pub static S1: usize = 3;
-pub static S2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
-
-fn foo() {}
index 7403bcf4afb31694fdffcbb238ca9236217c6a7b..2c9271202a650bd20edf2571afa33a7cb49247b8 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 use std::ops::Deref;
 
 pub struct Foo;
index 150d8018f088831ef4d4a24c0862983b50e3ec32..815b78a91d9af7ef7e10582d5d5a602097c3c06f 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 pub trait Trait {
     type Output;
 }
index aea8b429d9f753b936bc263f71d0a391cb3346bd..2ec761fad96b5cb8df95e579e7bc272f18c748aa 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 pub trait Deref {
     type Target: ?Sized;
 
index 6d6046cc7bfc2ca1f4028a0f11cb3c7673e3c842..e906311e3aeb42a75d5bd515435651ee80197ab3 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 pub trait Foo {
     type Bar;
     fn foo(&self) {}
index ada6c6925025de6c81e1f403ea1a28bab0b577c4..f618edec5985e594d31ea342a3063d91a4c23129 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 pub struct Foo;
 
 impl Foo {
index 554b580ae2b1fc71f477eaddc0b4b8864e717f4c..35a37e27d912fb17022777e94966c74899f9872c 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 pub mod foo {
 
     pub trait Foo {}
index e551d623ae7dba1530584b4b9f4e87004a71d434..25de698cad10e2308f0b1a5c9f2678d48b35159f 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 #![feature(const_fn)]
 
 pub const fn foo() {}
index 4a9e6126fc6023324cd1137dd86c6c72d3c4c1fe..63c79f875efb2c73fb462e18e5f63b0d1f1d03cb 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// compile-flags: -Cmetadata=aux
+
 pub struct Foo;
 
 #[doc(hidden)]
index 78d0394a6e5ad31dbff6090aaab5409e67b2d0bd..4cb846b51868fc9d62ded630170e5a3e62d817af 100644 (file)
@@ -14,26 +14,26 @@ pub mod testtypes {
     use std::any::TypeId;
 
     pub fn type_ids() -> Vec<TypeId> {
-        let mut ids = vec!();
-        ids.push(TypeId::of::<FooNil>());
-        ids.push(TypeId::of::<FooBool>());
-        ids.push(TypeId::of::<FooInt>());
-        ids.push(TypeId::of::<FooUint>());
-        ids.push(TypeId::of::<FooFloat>());
-        ids.push(TypeId::of::<FooEnum>());
-        ids.push(TypeId::of::<FooUniq>());
-        ids.push(TypeId::of::<FooPtr>());
-        ids.push(TypeId::of::<&'static FooTrait>());
-        ids.push(TypeId::of::<FooStruct>());
-        ids.push(TypeId::of::<FooTuple>());
-        ids
+        vec![
+            TypeId::of::<FooBool>(),
+            TypeId::of::<FooInt>(),
+            TypeId::of::<FooUint>(),
+            TypeId::of::<FooFloat>(),
+            TypeId::of::<FooStr>(),
+            TypeId::of::<FooArray>(),
+            TypeId::of::<FooSlice>(),
+            TypeId::of::<FooBox>(),
+            TypeId::of::<FooPtr>(),
+            TypeId::of::<FooRef>(),
+            TypeId::of::<FooFnPtr>(),
+            TypeId::of::<FooNil>(),
+            TypeId::of::<FooTuple>(),
+            TypeId::of::<FooTrait>(),
+            TypeId::of::<FooStruct>(),
+            TypeId::of::<FooEnum>()
+        ]
     }
 
-    // Tests ty_nil
-    pub type FooNil = ();
-
-    // Skipping ty_bot
-
     // Tests TyBool
     pub type FooBool = bool;
 
@@ -49,25 +49,26 @@ pub mod testtypes {
     // Tests TyFloat (does not test all variants of FloatTy)
     pub type FooFloat = f64;
 
-    // For TyStr, what kind of string should I use? &'static str? String? Raw str?
+    // Tests TyStr
+    pub type FooStr = str;
 
-    // Tests TyEnum
-    pub enum FooEnum {
-        VarA(usize),
-        VarB(usize, usize)
-    }
+    // Tests TyArray
+    pub type FooArray = [u8; 1];
 
-    // Tests TyBox (of u8)
-    pub type FooUniq = Box<u8>;
+    // Tests TySlice
+    pub type FooSlice = [u8];
 
-    // As with TyStr, what type should be used for TyArray?
+    // Tests TyBox (of u8)
+    pub type FooBox = Box<u8>;
 
     // Tests TyRawPtr
     pub type FooPtr = *const u8;
 
-    // Skipping TyRef
+    // Tests TyRef
+    pub type FooRef = &'static u8;
 
-    // Skipping TyBareFn (how do you get a bare function type, rather than proc or closure?)
+    // Tests TyFnPtr
+    pub type FooFnPtr = fn(u8) -> bool;
 
     // Tests TyTrait
     pub trait FooTrait {
@@ -80,14 +81,17 @@ pub mod testtypes {
         foo_field: usize
     }
 
+    // Tests TyEnum
+    pub enum FooEnum {
+        VarA(usize),
+        VarB(usize, usize)
+    }
+
     // Tests TyTuple
+    pub type FooNil = ();
     pub type FooTuple = (u8, i8, bool);
 
-    // Skipping ty_param
-
-    // Skipping ty_self
-
-    // Skipping ty_self
+    // Skipping TyParam
 
     // Skipping TyInfer
 
index ce3360787136a099ed5a38adcb0544323bf8ffc8..09a2e8ecd876d4c5d074873068896c944a57f75a 100644 (file)
@@ -8,13 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(dynamic_lib)]
+#![feature(rustc_private)]
 
 // We're testing linkage visibility; the compiler warns us, but we want to
 // do the runtime check that these functions aren't exported.
 #![allow(private_no_mangle_fns)]
 
-use std::dynamic_lib::DynamicLibrary;
+extern crate rustc_back;
+
+use rustc_back::dynamic_lib::DynamicLibrary;
 
 #[no_mangle]
 pub fn foo() { bar(); }
index 1933188ccbf0287de2f5ee7cbc8f215603b84f57..a424517da12f0c2dbda6a5b4e45eb48400faf65f 100644 (file)
 #![feature(box_syntax)]
 
 #[macro_use] extern crate rustc;
-extern crate rustc_front;
 extern crate rustc_plugin;
 extern crate syntax;
 
 use rustc::lint::{LateContext, LintContext, LintPass, LateLintPass, LateLintPassObject, LintArray};
 use rustc_plugin::Registry;
-use rustc_front::hir;
+use rustc::hir;
 use syntax::attr;
 
 declare_lint!(CRATE_NOT_OKAY, Warn, "crate not marked with #![crate_okay]");
index 98ed86bb390f3e397773e9e103a73d8b019e1ff8..1e9a77724a8ebdfd46a5eabcc407b98f34ee46ac 100644 (file)
 #![feature(plugin_registrar)]
 #![feature(box_syntax, rustc_private)]
 
-extern crate rustc_front;
-
 // Load rustc as a plugin to get macros
 #[macro_use]
 extern crate rustc;
 extern crate rustc_plugin;
 
-use rustc_front::hir;
+use rustc::hir;
 use rustc::lint::{LateContext, LintContext, LintPass, LateLintPass, LateLintPassObject, LintArray};
 use rustc_plugin::Registry;
 
diff --git a/src/test/auxiliary/reexp_stripped.rs b/src/test/auxiliary/reexp_stripped.rs
new file mode 100644 (file)
index 0000000..2b061e3
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use private::Quz;
+pub use hidden::Bar;
+
+mod private {
+    pub struct Quz;
+}
+
+#[doc(hidden)]
+pub mod hidden {
+    pub struct Bar;
+}
diff --git a/src/test/auxiliary/specialization_cross_crate.rs b/src/test/auxiliary/specialization_cross_crate.rs
new file mode 100644 (file)
index 0000000..1d23533
--- /dev/null
@@ -0,0 +1,82 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+pub trait Foo {
+    fn foo(&self) -> &'static str;
+}
+
+impl<T> Foo for T {
+    default fn foo(&self) -> &'static str {
+        "generic"
+    }
+}
+
+impl<T: Clone> Foo for T {
+    default fn foo(&self) -> &'static str {
+        "generic Clone"
+    }
+}
+
+impl<T, U> Foo for (T, U) where T: Clone, U: Clone {
+    default fn foo(&self) -> &'static str {
+        "generic pair"
+    }
+}
+
+impl<T: Clone> Foo for (T, T) {
+    default fn foo(&self) -> &'static str {
+        "generic uniform pair"
+    }
+}
+
+impl Foo for (u8, u32) {
+    default fn foo(&self) -> &'static str {
+        "(u8, u32)"
+    }
+}
+
+impl Foo for (u8, u8) {
+    default fn foo(&self) -> &'static str {
+        "(u8, u8)"
+    }
+}
+
+impl<T: Clone> Foo for Vec<T> {
+    default fn foo(&self) -> &'static str {
+        "generic Vec"
+    }
+}
+
+impl Foo for Vec<i32> {
+    fn foo(&self) -> &'static str {
+        "Vec<i32>"
+    }
+}
+
+impl Foo for String {
+    fn foo(&self) -> &'static str {
+        "String"
+    }
+}
+
+impl Foo for i32 {
+    fn foo(&self) -> &'static str {
+        "i32"
+    }
+}
+
+pub trait MyMarker {}
+impl<T: Clone + MyMarker> Foo for T {
+    default fn foo(&self) -> &'static str {
+        "generic Clone + MyMarker"
+    }
+}
diff --git a/src/test/auxiliary/specialization_cross_crate_defaults.rs b/src/test/auxiliary/specialization_cross_crate_defaults.rs
new file mode 100755 (executable)
index 0000000..b62d80b
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+#![feature(specialization)]
+
+// First, test only use of explicit `default` items:
+
+pub trait Foo {
+    fn foo(&self) -> bool;
+}
+
+impl<T> Foo for T {
+    default fn foo(&self) -> bool { false }
+}
+
+impl Foo for i32 {}
+
+impl Foo for i64 {
+    fn foo(&self) -> bool { true }
+}
+
+// Next, test mixture of explicit `default` and provided methods:
+
+pub trait Bar {
+    fn bar(&self) -> i32 { 0 }
+}
+
+impl<T> Bar for T {} // use the provided method
+
+impl Bar for i32 {
+    fn bar(&self) -> i32 { 1 }
+}
+impl<'a> Bar for &'a str {}
+
+impl<T> Bar for Vec<T> {
+    default fn bar(&self) -> i32 { 2 }
+}
+impl Bar for Vec<i32> {}
+impl Bar for Vec<i64> {
+    fn bar(&self) -> i32 { 3 }
+}
index fe1dc9d1c8cae3e35ac31b9ed07eba91a78f3dc9..5fea97da03ee38f5cdf2107610c6d979d8f31338 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-struct A {
+pub struct A {
     a: isize,
     pub b: isize,
 }
diff --git a/src/test/auxiliary/typeid-intrinsic-aux1.rs b/src/test/auxiliary/typeid-intrinsic-aux1.rs
new file mode 100644 (file)
index 0000000..388d323
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(core)]
+
+use std::any::{Any, TypeId};
+
+pub struct A;
+pub struct B(Option<A>);
+pub struct C(Option<isize>);
+pub struct D(Option<&'static str>);
+pub struct E(Result<&'static str, isize>);
+
+pub type F = Option<isize>;
+pub type G = usize;
+pub type H = &'static str;
+
+pub unsafe fn id_A() -> TypeId { TypeId::of::<A>() }
+pub unsafe fn id_B() -> TypeId { TypeId::of::<B>() }
+pub unsafe fn id_C() -> TypeId { TypeId::of::<C>() }
+pub unsafe fn id_D() -> TypeId { TypeId::of::<D>() }
+pub unsafe fn id_E() -> TypeId { TypeId::of::<E>() }
+pub unsafe fn id_F() -> TypeId { TypeId::of::<F>() }
+pub unsafe fn id_G() -> TypeId { TypeId::of::<G>() }
+pub unsafe fn id_H() -> TypeId { TypeId::of::<H>() }
+
+pub unsafe fn foo<T: Any>() -> TypeId { TypeId::of::<T>() }
diff --git a/src/test/auxiliary/typeid-intrinsic-aux2.rs b/src/test/auxiliary/typeid-intrinsic-aux2.rs
new file mode 100644 (file)
index 0000000..3ad307f
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(core)]
+
+use std::any::{Any, TypeId};
+
+pub struct A;
+pub struct B(Option<A>);
+pub struct C(Option<isize>);
+pub struct D(Option<&'static str>);
+pub struct E(Result<&'static str, isize>);
+
+pub type F = Option<isize>;
+pub type G = usize;
+pub type H = &'static str;
+
+pub unsafe fn id_A() -> TypeId { TypeId::of::<A>() }
+pub unsafe fn id_B() -> TypeId { TypeId::of::<B>() }
+pub unsafe fn id_C() -> TypeId { TypeId::of::<C>() }
+pub unsafe fn id_D() -> TypeId { TypeId::of::<D>() }
+pub unsafe fn id_E() -> TypeId { TypeId::of::<E>() }
+pub unsafe fn id_F() -> TypeId { TypeId::of::<F>() }
+pub unsafe fn id_G() -> TypeId { TypeId::of::<G>() }
+pub unsafe fn id_H() -> TypeId { TypeId::of::<H>() }
+
+pub unsafe fn foo<T:Any>() -> TypeId { TypeId::of::<T>() }
diff --git a/src/test/auxiliary/typeid-intrinsic.rs b/src/test/auxiliary/typeid-intrinsic.rs
deleted file mode 100644 (file)
index 388d323..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(core)]
-
-use std::any::{Any, TypeId};
-
-pub struct A;
-pub struct B(Option<A>);
-pub struct C(Option<isize>);
-pub struct D(Option<&'static str>);
-pub struct E(Result<&'static str, isize>);
-
-pub type F = Option<isize>;
-pub type G = usize;
-pub type H = &'static str;
-
-pub unsafe fn id_A() -> TypeId { TypeId::of::<A>() }
-pub unsafe fn id_B() -> TypeId { TypeId::of::<B>() }
-pub unsafe fn id_C() -> TypeId { TypeId::of::<C>() }
-pub unsafe fn id_D() -> TypeId { TypeId::of::<D>() }
-pub unsafe fn id_E() -> TypeId { TypeId::of::<E>() }
-pub unsafe fn id_F() -> TypeId { TypeId::of::<F>() }
-pub unsafe fn id_G() -> TypeId { TypeId::of::<G>() }
-pub unsafe fn id_H() -> TypeId { TypeId::of::<H>() }
-
-pub unsafe fn foo<T: Any>() -> TypeId { TypeId::of::<T>() }
diff --git a/src/test/auxiliary/typeid-intrinsic2.rs b/src/test/auxiliary/typeid-intrinsic2.rs
deleted file mode 100644 (file)
index 3ad307f..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(core)]
-
-use std::any::{Any, TypeId};
-
-pub struct A;
-pub struct B(Option<A>);
-pub struct C(Option<isize>);
-pub struct D(Option<&'static str>);
-pub struct E(Result<&'static str, isize>);
-
-pub type F = Option<isize>;
-pub type G = usize;
-pub type H = &'static str;
-
-pub unsafe fn id_A() -> TypeId { TypeId::of::<A>() }
-pub unsafe fn id_B() -> TypeId { TypeId::of::<B>() }
-pub unsafe fn id_C() -> TypeId { TypeId::of::<C>() }
-pub unsafe fn id_D() -> TypeId { TypeId::of::<D>() }
-pub unsafe fn id_E() -> TypeId { TypeId::of::<E>() }
-pub unsafe fn id_F() -> TypeId { TypeId::of::<F>() }
-pub unsafe fn id_G() -> TypeId { TypeId::of::<G>() }
-pub unsafe fn id_H() -> TypeId { TypeId::of::<H>() }
-
-pub unsafe fn foo<T:Any>() -> TypeId { TypeId::of::<T>() }
diff --git a/src/test/auxiliary/variant-struct.rs b/src/test/auxiliary/variant-struct.rs
new file mode 100644 (file)
index 0000000..d846c0a
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub enum Foo {
+    Bar {
+        qux: (),
+    }
+}
index 43852a4e793f363a6477cb8dc7df1caf906c2a65..6779438c67226783e1d63bf6095cb790be532578 100644 (file)
 
 #![feature(associated_type_defaults)]
 
-pub trait Foo {
-    type Input = usize;
-    fn bar(&self, _: Self::Input) {}
+pub trait Foo<T: Default + ToString> {
+    type Out: Default + ToString = T;
 }
 
-impl Foo for () {}
+impl Foo<u32> for () {
+}
+
+impl Foo<u64> for () {
+    type Out = bool;
+}
index 32b07d42fec44313da37dc77faa3c6e09782340b..30f3ef12d07435cb059d8b14947705906bccabb5 100644 (file)
@@ -19,12 +19,12 @@ extern crate cgu_extern_closures;
 //~ TRANS_ITEM fn cross_crate_closures::main[0]
 fn main() {
 
-    //~ TRANS_ITEM fn cgu_extern_closures[0]::inlined_fn[0]
-    //~ TRANS_ITEM fn cgu_extern_closures[0]::inlined_fn[0]::{{closure}}[0]
+    //~ TRANS_ITEM fn cgu_extern_closures::inlined_fn[0]
+    //~ TRANS_ITEM fn cgu_extern_closures::inlined_fn[0]::{{closure}}[0]
     let _ = cgu_extern_closures::inlined_fn(1, 2);
 
-    //~ TRANS_ITEM fn cgu_extern_closures[0]::inlined_fn_generic[0]<i32>
-    //~ TRANS_ITEM fn cgu_extern_closures[0]::inlined_fn_generic[0]::{{closure}}[0]<i32>
+    //~ TRANS_ITEM fn cgu_extern_closures::inlined_fn_generic[0]<i32>
+    //~ TRANS_ITEM fn cgu_extern_closures::inlined_fn_generic[0]::{{closure}}[0]<i32>
     let _ = cgu_extern_closures::inlined_fn_generic(3, 4, 5i32);
 
     // Nothing should be generated for this call, we just link to the instance instance
index 82d940a15485251871bec35627077f4be9c115cc..ada1234b852a1b1c943c1b9bd92de1182fcbacbc 100644 (file)
@@ -19,12 +19,12 @@ extern crate cgu_generic_function;
 //~ TRANS_ITEM fn cross_crate_generic_functions::main[0]
 fn main()
 {
-    //~ TRANS_ITEM fn cgu_generic_function[0]::bar[0]<u32>
-    //~ TRANS_ITEM fn cgu_generic_function[0]::foo[0]<u32>
+    //~ TRANS_ITEM fn cgu_generic_function::bar[0]<u32>
+    //~ TRANS_ITEM fn cgu_generic_function::foo[0]<u32>
     let _ = cgu_generic_function::foo(1u32);
 
-    //~ TRANS_ITEM fn cgu_generic_function[0]::bar[0]<u64>
-    //~ TRANS_ITEM fn cgu_generic_function[0]::foo[0]<u64>
+    //~ TRANS_ITEM fn cgu_generic_function::bar[0]<u64>
+    //~ TRANS_ITEM fn cgu_generic_function::foo[0]<u64>
     let _ = cgu_generic_function::foo(2u64);
 
     // This should not introduce a codegen item
index aa1f6b06c813594f9bb6beeec02a68673e38a32f..9f29a90bffbf645932bfa08edf883b2b1f135a27 100644 (file)
@@ -29,31 +29,31 @@ fn main()
     // Currently, no object code is generated for trait methods with default
     // implemenations, unless they are actually called from somewhere. Therefore
     // we cannot import the implementations and have to create our own inline.
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::Trait[0]::with_default_impl[0]<u32>
+    //~ TRANS_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl[0]<u32>
     let _ = Trait::with_default_impl(0u32);
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::Trait[0]::with_default_impl[0]<char>
+    //~ TRANS_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl[0]<char>
     let _ = Trait::with_default_impl('c');
 
 
 
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::Trait[0]::with_default_impl_generic[0]<u32, &str>
+    //~ TRANS_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<u32, &str>
     let _ = Trait::with_default_impl_generic(0u32, "abc");
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::Trait[0]::with_default_impl_generic[0]<u32, bool>
+    //~ TRANS_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<u32, bool>
     let _ = Trait::with_default_impl_generic(0u32, false);
 
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::Trait[0]::with_default_impl_generic[0]<char, i16>
+    //~ TRANS_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<char, i16>
     let _ = Trait::with_default_impl_generic('x', 1i16);
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::Trait[0]::with_default_impl_generic[0]<char, i32>
+    //~ TRANS_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<char, i32>
     let _ = Trait::with_default_impl_generic('y', 0i32);
 
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::u32.Trait[0]::without_default_impl_generic[0]<char>
+    //~ TRANS_ITEM fn cgu_export_trait_method::{{impl}}[1]::without_default_impl_generic[0]<char>
     let _: (u32, char) = Trait::without_default_impl_generic('c');
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::u32.Trait[0]::without_default_impl_generic[0]<bool>
+    //~ TRANS_ITEM fn cgu_export_trait_method::{{impl}}[1]::without_default_impl_generic[0]<bool>
     let _: (u32, bool) = Trait::without_default_impl_generic(false);
 
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::char.Trait[0]::without_default_impl_generic[0]<char>
+    //~ TRANS_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0]<char>
     let _: (char, char) = Trait::without_default_impl_generic('c');
-    //~ TRANS_ITEM fn cgu_export_trait_method[0]::char.Trait[0]::without_default_impl_generic[0]<bool>
+    //~ TRANS_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0]<bool>
     let _: (char, bool) = Trait::without_default_impl_generic(false);
 }
 
index f89d6e61bc5527e9f34420b100513df0c5c914c2..476c84044e68656390521693be220ecbab4c461e 100644 (file)
@@ -49,17 +49,17 @@ struct NonGenericWithDrop(i32);
 
 impl Drop for NonGenericWithDrop {
     fn drop(&mut self) {}
-//~ TRANS_ITEM fn generic_drop_glue::NonGenericWithDrop.Drop[0]::drop[0]
+//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[2]::drop[0]
 }
 
 //~ TRANS_ITEM fn generic_drop_glue::main[0]
 fn main() {
     //~ TRANS_ITEM drop-glue generic_drop_glue::StructWithDrop[0]<i8, char>
-    //~ TRANS_ITEM fn generic_drop_glue::StructWithDrop<T1, T2>.Drop[0]::drop[0]<i8, char>
+    //~ TRANS_ITEM fn generic_drop_glue::{{impl}}[0]::drop[0]<i8, char>
     let _ = StructWithDrop { x: 0i8, y: 'a' }.x;
 
     //~ TRANS_ITEM drop-glue generic_drop_glue::StructWithDrop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>
-    //~ TRANS_ITEM fn generic_drop_glue::StructWithDrop<T1, T2>.Drop[0]::drop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>
+    //~ TRANS_ITEM fn generic_drop_glue::{{impl}}[0]::drop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>
     let _ = StructWithDrop { x: "&str", y: NonGenericNoDrop(0) }.y;
 
     // Should produce no drop glue
@@ -71,14 +71,14 @@ fn main() {
     let _ = StructNoDrop { x: NonGenericWithDrop(0), y: 0f64 }.y;
 
     //~ TRANS_ITEM drop-glue generic_drop_glue::EnumWithDrop[0]<i32, i64>
-    //~ TRANS_ITEM fn generic_drop_glue::EnumWithDrop<T1, T2>.Drop[0]::drop[0]<i32, i64>
+    //~ TRANS_ITEM fn generic_drop_glue::{{impl}}[1]::drop[0]<i32, i64>
     let _ = match EnumWithDrop::A::<i32, i64>(0) {
         EnumWithDrop::A(x) => x,
         EnumWithDrop::B(x) => x as i32
     };
 
     //~ TRANS_ITEM drop-glue generic_drop_glue::EnumWithDrop[0]<f64, f32>
-    //~ TRANS_ITEM fn generic_drop_glue::EnumWithDrop<T1, T2>.Drop[0]::drop[0]<f64, f32>
+    //~ TRANS_ITEM fn generic_drop_glue::{{impl}}[1]::drop[0]<f64, f32>
     let _ = match EnumWithDrop::B::<f64, f32>(1.0) {
         EnumWithDrop::A(x) => x,
         EnumWithDrop::B(x) => x as f64
index 6e6bb5cbf53c867271c0b8e412dca1e676d497e4..a27515fd39b701dd99b8328f320c0a858bfdf26c 100644 (file)
@@ -40,11 +40,11 @@ pub struct LifeTimeOnly<'a> {
 
 impl<'a> LifeTimeOnly<'a> {
 
-    //~ TRANS_ITEM fn generic_impl::LifeTimeOnly<'a>[0]::foo[0]
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[1]::foo[0]
     pub fn foo(&self) {}
-    //~ TRANS_ITEM fn generic_impl::LifeTimeOnly<'a>[0]::bar[0]
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[1]::bar[0]
     pub fn bar(&'a self) {}
-    //~ TRANS_ITEM fn generic_impl::LifeTimeOnly<'a>[0]::baz[0]
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[1]::baz[0]
     pub fn baz<'b>(&'b self) {}
 
     pub fn non_instantiated<T>(&self) {}
@@ -53,27 +53,27 @@ impl<'a> LifeTimeOnly<'a> {
 
 //~ TRANS_ITEM fn generic_impl::main[0]
 fn main() {
-    //~ TRANS_ITEM fn generic_impl::Struct<T>[0]::new[0]<i32>
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[0]::new[0]<i32>
     //~ TRANS_ITEM fn generic_impl::id[0]<i32>
-    //~ TRANS_ITEM fn generic_impl::Struct<T>[0]::get[0]<i32, i16>
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[0]::get[0]<i32, i16>
     let _ = Struct::new(0i32).get(0i16);
 
-    //~ TRANS_ITEM fn generic_impl::Struct<T>[0]::new[0]<i64>
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[0]::new[0]<i64>
     //~ TRANS_ITEM fn generic_impl::id[0]<i64>
-    //~ TRANS_ITEM fn generic_impl::Struct<T>[0]::get[0]<i64, i16>
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[0]::get[0]<i64, i16>
     let _ = Struct::new(0i64).get(0i16);
 
-    //~ TRANS_ITEM fn generic_impl::Struct<T>[0]::new[0]<char>
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[0]::new[0]<char>
     //~ TRANS_ITEM fn generic_impl::id[0]<char>
-    //~ TRANS_ITEM fn generic_impl::Struct<T>[0]::get[0]<char, i16>
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[0]::get[0]<char, i16>
     let _ = Struct::new('c').get(0i16);
 
-    //~ TRANS_ITEM fn generic_impl::Struct<T>[0]::new[0]<&str>
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[0]::new[0]<&str>
     //~ TRANS_ITEM fn generic_impl::id[0]<&str>
-    //~ TRANS_ITEM fn generic_impl::Struct<T>[0]::get[0]<generic_impl::Struct[0]<&str>, i16>
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[0]::get[0]<generic_impl::Struct[0]<&str>, i16>
     let _ = Struct::new(Struct::new("str")).get(0i16);
 
-    //~ TRANS_ITEM fn generic_impl::Struct<T>[0]::new[0]<generic_impl::Struct[0]<&str>>
+    //~ TRANS_ITEM fn generic_impl::{{impl}}[0]::new[0]<generic_impl::Struct[0]<&str>>
     //~ TRANS_ITEM fn generic_impl::id[0]<generic_impl::Struct[0]<&str>>
     let _ = (Struct::new(Struct::new("str")).f)(Struct::new("str"));
 }
index e17a1a7094f2fa0e881db4917c3b0e4758eb2751..a3bfa67e1ae44eb2c05b7d23dba8547e1b0ad890 100644 (file)
@@ -21,7 +21,7 @@ trait SomeTrait {
 // discovered.
 pub fn generic_function<T>(x: T) -> (T, i32) {
     impl SomeTrait for i64 {
-        //~ TRANS_ITEM fn impl_in_non_instantiated_generic::generic_function[0]::i64.SomeTrait[0]::foo[0]
+        //~ TRANS_ITEM fn impl_in_non_instantiated_generic::generic_function[0]::{{impl}}[0]::foo[0]
         fn foo(&self) {}
     }
 
index 46587b2b0a1b2b9c355558cb89809387945fa01d..b77252512200103aad028b238058dfe6c4934eec 100644 (file)
@@ -31,12 +31,12 @@ impl<T> Trait for Struct<T> {
 fn main() {
     let s1 = Struct { _a: 0u32 };
 
-    //~ TRANS_ITEM fn instantiation_through_vtable::Struct<T>.Trait[0]::foo[0]<u32>
-    //~ TRANS_ITEM fn instantiation_through_vtable::Struct<T>.Trait[0]::bar[0]<u32>
+    //~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::foo[0]<u32>
+    //~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::bar[0]<u32>
     let _ = &s1 as &Trait;
 
     let s1 = Struct { _a: 0u64 };
-    //~ TRANS_ITEM fn instantiation_through_vtable::Struct<T>.Trait[0]::foo[0]<u64>
-    //~ TRANS_ITEM fn instantiation_through_vtable::Struct<T>.Trait[0]::bar[0]<u64>
+    //~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::foo[0]<u64>
+    //~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::bar[0]<u64>
     let _ = &s1 as &Trait;
 }
index a82e85b7a5315c3fb4d8b34d3d13f281ef295557..bd8b0c605aecf8b96a48a468c35acb04a57de1ff 100644 (file)
@@ -19,7 +19,7 @@ struct StructWithDrop {
 }
 
 impl Drop for StructWithDrop {
-    //~ TRANS_ITEM fn non_generic_drop_glue::StructWithDrop.Drop[0]::drop[0]
+    //~ TRANS_ITEM fn non_generic_drop_glue::{{impl}}[0]::drop[0]
     fn drop(&mut self) {}
 }
 
@@ -33,7 +33,7 @@ enum EnumWithDrop {
 }
 
 impl Drop for EnumWithDrop {
-    //~ TRANS_ITEM fn non_generic_drop_glue::EnumWithDrop.Drop[0]::drop[0]
+    //~ TRANS_ITEM fn non_generic_drop_glue::{{impl}}[1]::drop[0]
     fn drop(&mut self) {}
 }
 
index 687ce7fa05cb45fd7d3891401a6d3e1ce0f4d132..4e2a7c8508468f1ac82c1d8e58e722c8b53d2f50 100644 (file)
@@ -38,31 +38,31 @@ fn bar() {
 struct Struct { _x: i32 }
 
 impl Struct {
-    //~ TRANS_ITEM fn non_generic_functions::Struct[0]::foo[0]
+    //~ TRANS_ITEM fn non_generic_functions::{{impl}}[0]::foo[0]
     fn foo() {
         {
-            //~ TRANS_ITEM fn non_generic_functions::Struct[0]::foo[0]::foo[0]
+            //~ TRANS_ITEM fn non_generic_functions::{{impl}}[0]::foo[0]::foo[0]
             fn foo() {}
             foo();
         }
 
         {
-            //~ TRANS_ITEM fn non_generic_functions::Struct[0]::foo[0]::foo[1]
+            //~ TRANS_ITEM fn non_generic_functions::{{impl}}[0]::foo[0]::foo[1]
             fn foo() {}
             foo();
         }
     }
 
-    //~ TRANS_ITEM fn non_generic_functions::Struct[0]::bar[0]
+    //~ TRANS_ITEM fn non_generic_functions::{{impl}}[0]::bar[0]
     fn bar(&self) {
         {
-            //~ TRANS_ITEM fn non_generic_functions::Struct[0]::bar[0]::foo[0]
+            //~ TRANS_ITEM fn non_generic_functions::{{impl}}[0]::bar[0]::foo[0]
             fn foo() {}
             foo();
         }
 
         {
-            //~ TRANS_ITEM fn non_generic_functions::Struct[0]::bar[0]::foo[1]
+            //~ TRANS_ITEM fn non_generic_functions::{{impl}}[0]::bar[0]::foo[1]
             fn foo() {}
             foo();
         }
index 134110222f392433a8db5aac9e25181da6f3cb99..c275eb954b094296d69496812ca8002d94600354 100644 (file)
@@ -23,7 +23,7 @@ pub struct Indexable {
 impl Index<usize> for Indexable {
     type Output = u8;
 
-    //~ TRANS_ITEM fn overloaded_operators::Indexable.Index<usize>[0]::index[0]
+    //~ TRANS_ITEM fn overloaded_operators::{{impl}}[0]::index[0]
     fn index(&self, index: usize) -> &Self::Output {
         if index >= 3 {
             &self.data[0]
@@ -34,7 +34,7 @@ impl Index<usize> for Indexable {
 }
 
 impl IndexMut<usize> for Indexable {
-    //~ TRANS_ITEM fn overloaded_operators::Indexable.IndexMut<usize>[0]::index_mut[0]
+    //~ TRANS_ITEM fn overloaded_operators::{{impl}}[1]::index_mut[0]
     fn index_mut(&mut self, index: usize) -> &mut Self::Output {
         if index >= 3 {
             &mut self.data[0]
@@ -45,8 +45,8 @@ impl IndexMut<usize> for Indexable {
 }
 
 
-//~ TRANS_ITEM fn overloaded_operators::Equatable.::std::cmp::PartialEq[0]::eq[0]
-//~ TRANS_ITEM fn overloaded_operators::Equatable.::std::cmp::PartialEq[0]::ne[0]
+//~ TRANS_ITEM fn overloaded_operators::{{impl}}[2]::eq[0]
+//~ TRANS_ITEM fn overloaded_operators::{{impl}}[2]::ne[0]
 #[derive(PartialEq)]
 pub struct Equatable(u32);
 
@@ -54,7 +54,7 @@ pub struct Equatable(u32);
 impl Add<u32> for Equatable {
     type Output = u32;
 
-    //~ TRANS_ITEM fn overloaded_operators::Equatable.Add<u32>[0]::add[0]
+    //~ TRANS_ITEM fn overloaded_operators::{{impl}}[3]::add[0]
     fn add(self, rhs: u32) -> u32 {
         self.0 + rhs
     }
@@ -63,7 +63,7 @@ impl Add<u32> for Equatable {
 impl Deref for Equatable {
     type Target = u32;
 
-    //~ TRANS_ITEM fn overloaded_operators::Equatable.Deref[0]::deref[0]
+    //~ TRANS_ITEM fn overloaded_operators::{{impl}}[4]::deref[0]
     fn deref(&self) -> &Self::Target {
         &self.0
     }
index 590859f15a3e1ce3a267e42f3f288101a075b2e1..2eb2212f0cacd0f6f166d22032a2ac105149b6ff 100644 (file)
@@ -20,7 +20,7 @@ pub trait SomeTrait {
 
 impl SomeTrait for i64 {
 
-    //~ TRANS_ITEM fn trait_implementations::i64.SomeTrait[0]::foo[0]
+    //~ TRANS_ITEM fn trait_implementations::{{impl}}[0]::foo[0]
     fn foo(&self) {}
 
     fn bar<T>(&self, _: T) {}
@@ -28,7 +28,7 @@ impl SomeTrait for i64 {
 
 impl SomeTrait for i32 {
 
-    //~ TRANS_ITEM fn trait_implementations::i32.SomeTrait[0]::foo[0]
+    //~ TRANS_ITEM fn trait_implementations::{{impl}}[1]::foo[0]
     fn foo(&self) {}
 
     fn bar<T>(&self, _: T) {}
@@ -42,7 +42,7 @@ pub trait SomeGenericTrait<T> {
 // Concrete impl of generic trait
 impl SomeGenericTrait<u32> for f64 {
 
-    //~ TRANS_ITEM fn trait_implementations::f64.SomeGenericTrait<u32>[0]::foo[0]
+    //~ TRANS_ITEM fn trait_implementations::{{impl}}[2]::foo[0]
     fn foo(&self, _: u32) {}
 
     fn bar<T2>(&self, _: u32, _: T2) {}
@@ -57,25 +57,25 @@ impl<T> SomeGenericTrait<T> for f32 {
 
 //~ TRANS_ITEM fn trait_implementations::main[0]
 fn main() {
-   //~ TRANS_ITEM fn trait_implementations::i32.SomeTrait[0]::bar[0]<char>
+   //~ TRANS_ITEM fn trait_implementations::{{impl}}[1]::bar[0]<char>
    0i32.bar('x');
 
-   //~ TRANS_ITEM fn trait_implementations::f64.SomeGenericTrait<u32>[0]::bar[0]<&str>
+   //~ TRANS_ITEM fn trait_implementations::{{impl}}[2]::bar[0]<&str>
    0f64.bar(0u32, "&str");
 
-   //~ TRANS_ITEM fn trait_implementations::f64.SomeGenericTrait<u32>[0]::bar[0]<()>
+   //~ TRANS_ITEM fn trait_implementations::{{impl}}[2]::bar[0]<()>
    0f64.bar(0u32, ());
 
-   //~ TRANS_ITEM fn trait_implementations::f32.SomeGenericTrait<T>[0]::foo[0]<char>
+   //~ TRANS_ITEM fn trait_implementations::{{impl}}[3]::foo[0]<char>
    0f32.foo('x');
 
-   //~ TRANS_ITEM fn trait_implementations::f32.SomeGenericTrait<T>[0]::foo[0]<i64>
+   //~ TRANS_ITEM fn trait_implementations::{{impl}}[3]::foo[0]<i64>
    0f32.foo(-1i64);
 
-   //~ TRANS_ITEM fn trait_implementations::f32.SomeGenericTrait<T>[0]::bar[0]<u32, ()>
+   //~ TRANS_ITEM fn trait_implementations::{{impl}}[3]::bar[0]<u32, ()>
    0f32.bar(0u32, ());
 
-   //~ TRANS_ITEM fn trait_implementations::f32.SomeGenericTrait<T>[0]::bar[0]<&str, &str>
+   //~ TRANS_ITEM fn trait_implementations::{{impl}}[3]::bar[0]<&str, &str>
    0f32.bar("&str", "&str");
 }
 
index fdf63df5471110b90197e4c8b05db63111ed14f4..e7006d73ef166c83c26c2d9ea5eebb1504712b7b 100644 (file)
@@ -39,7 +39,7 @@ fn take_foo_mut<T, F: FnMut(T) -> T>(mut f: F, arg: T) -> T {
 //~ TRANS_ITEM fn trait_method_as_argument::main[0]
 fn main() {
     //~ TRANS_ITEM fn trait_method_as_argument::take_foo_once[0]<u32, fn(u32) -> u32>
-    //~ TRANS_ITEM fn trait_method_as_argument::u32.Trait[0]::foo[0]
+    //~ TRANS_ITEM fn trait_method_as_argument::{{impl}}[0]::foo[0]
     take_foo_once(Trait::foo, 0u32);
 
     //~ TRANS_ITEM fn trait_method_as_argument::take_foo_once[0]<char, fn(char) -> char>
index 6982cb9299a550997b5cacf07f630597e5735c3e..21bb29199a685715ba4832ed0b35e071c6610c78 100644 (file)
@@ -21,7 +21,7 @@ struct Intermediate(Leaf);
 struct Leaf;
 
 impl Drop for Leaf {
-    //~ TRANS_ITEM fn transitive_drop_glue::Leaf.Drop[0]::drop[0]
+    //~ TRANS_ITEM fn transitive_drop_glue::{{impl}}[0]::drop[0]
     fn drop(&mut self) {}
 }
 
@@ -44,12 +44,12 @@ fn main() {
     //~ TRANS_ITEM drop-glue transitive_drop_glue::RootGen[0]<u32>
     //~ TRANS_ITEM drop-glue transitive_drop_glue::IntermediateGen[0]<u32>
     //~ TRANS_ITEM drop-glue transitive_drop_glue::LeafGen[0]<u32>
-    //~ TRANS_ITEM fn transitive_drop_glue::LeafGen<T>.Drop[0]::drop[0]<u32>
+    //~ TRANS_ITEM fn transitive_drop_glue::{{impl}}[1]::drop[0]<u32>
     let _ = RootGen(IntermediateGen(LeafGen(0u32)));
 
     //~ TRANS_ITEM drop-glue transitive_drop_glue::RootGen[0]<i16>
     //~ TRANS_ITEM drop-glue transitive_drop_glue::IntermediateGen[0]<i16>
     //~ TRANS_ITEM drop-glue transitive_drop_glue::LeafGen[0]<i16>
-    //~ TRANS_ITEM fn transitive_drop_glue::LeafGen<T>.Drop[0]::drop[0]<i16>
+    //~ TRANS_ITEM fn transitive_drop_glue::{{impl}}[1]::drop[0]<i16>
     let _ = RootGen(IntermediateGen(LeafGen(0i16)));
 }
index 87fcb00eab8c2424dacdc168ef023763783acf4a..1bc235de88e1f603c33b3222ddb6b496b0d97081 100644 (file)
@@ -17,7 +17,7 @@
 struct Dropped;
 
 impl Drop for Dropped {
-    //~ TRANS_ITEM fn tuple_drop_glue::Dropped.Drop[0]::drop[0]
+    //~ TRANS_ITEM fn tuple_drop_glue::{{impl}}[0]::drop[0]
     fn drop(&mut self) {}
 }
 
index dd90d32858f110568a261774f12c050629cfe2cd..45ba441bc8ba6066bef2f85c6eed8fbd37de471c 100644 (file)
@@ -57,11 +57,11 @@ fn main()
 {
     // simple case
     let bool_sized = &true;
-    //~ TRANS_ITEM fn unsizing::bool.Trait[0]::foo[0]
+    //~ TRANS_ITEM fn unsizing::{{impl}}[0]::foo[0]
     let _bool_unsized = bool_sized as &Trait;
 
     let char_sized = &true;
-    //~ TRANS_ITEM fn unsizing::char.Trait[0]::foo[0]
+    //~ TRANS_ITEM fn unsizing::{{impl}}[1]::foo[0]
     let _char_unsized = char_sized as &Trait;
 
     // struct field
@@ -70,11 +70,11 @@ fn main()
         _b: 2,
         _c: 3.0f64
     };
-    //~ TRANS_ITEM fn unsizing::f64.Trait[0]::foo[0]
+    //~ TRANS_ITEM fn unsizing::{{impl}}[2]::foo[0]
     let _struct_unsized = struct_sized as &Struct<Trait>;
 
     // custom coercion
     let wrapper_sized = Wrapper(&0u32);
-    //~ TRANS_ITEM fn unsizing::u32.Trait[0]::foo[0]
+    //~ TRANS_ITEM fn unsizing::{{impl}}[3]::foo[0]
     let _wrapper_sized = wrapper_sized as Wrapper<Trait>;
 }
index a4c5099ab9751e4d223a5ec0c1c4a6766371a470..8689beb3fb77e98e6df3cc05214d348615f01134 100644 (file)
@@ -85,5 +85,5 @@ impl NonGeneric {
 }
 
 // Only the non-generic methods should be instantiated:
-//~ TRANS_ITEM fn unused_traits_and_generics::NonGeneric[0]::foo[0]
+//~ TRANS_ITEM fn unused_traits_and_generics::{{impl}}[3]::foo[0]
 //~ TRANS_ITEM drop-glue i8
index a61fa84398e9aacbd9b8f1958ec2dc71875368c7..20d049394345256dad72cc965d5d1c670b1f0f12 100644 (file)
@@ -11,6 +11,7 @@
 // compile-flags: -C no-prepopulate-passes
 
 #![crate_type = "lib"]
+#![feature(rustc_attrs)]
 
 // Hack to get the correct size for the length part in slices
 // CHECK: @helper([[USIZE:i[0-9]+]])
@@ -20,6 +21,7 @@ fn helper(_: usize) {
 
 // CHECK-LABEL: @no_op_slice_adjustment
 #[no_mangle]
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
 pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
     // We used to generate an extra alloca and memcpy for the block's trailing expression value, so
     // check that we copy directly to the return value slot
index c8c9f5b407c421b11458d6b6c514f54ac64afbaf..74c7192259ac4044108f287b39fb09114773b5eb 100644 (file)
 // compile-flags: -C no-prepopulate-passes
 
 #![crate_type = "lib"]
+#![feature(rustc_attrs)]
 
 static X: i32 = 5;
 
 // CHECK-LABEL: @raw_ptr_to_raw_ptr_noop
 // CHECK-NOT: alloca
 #[no_mangle]
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
 pub fn raw_ptr_to_raw_ptr_noop() -> *const i32{
     &X as *const i32
 }
@@ -24,6 +26,7 @@ pub fn raw_ptr_to_raw_ptr_noop() -> *const i32{
 // CHECK-LABEL: @reference_to_raw_ptr_noop
 // CHECK-NOT: alloca
 #[no_mangle]
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
 pub fn reference_to_raw_ptr_noop() -> *const i32 {
     &X
 }
index 6b4e626df924bf020be2e9d5d6e8ebb201bfc765..ea4c932d43549ce74efb7c6204950922d473e01a 100644 (file)
@@ -11,6 +11,7 @@
 // compile-flags: -C no-prepopulate-passes
 
 #![crate_type = "lib"]
+#![feature(rustc_attrs)]
 
 // Below, these constants are defined as enum variants that by itself would
 // have a lower alignment than the enum type. Ensure that we mark them
@@ -39,18 +40,21 @@ pub static STATIC: E<i16, i32> = E::A(0);
 
 // CHECK-LABEL: @static_enum_const
 #[no_mangle]
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
 pub fn static_enum_const() -> E<i16, i32> {
    STATIC
 }
 
 // CHECK-LABEL: @inline_enum_const
 #[no_mangle]
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
 pub fn inline_enum_const() -> E<i8, i16> {
     E::A(0)
 }
 
 // CHECK-LABEL: @low_align_const
 #[no_mangle]
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
 pub fn low_align_const() -> E<i16, [i16; 3]> {
 // Check that low_align_const and high_align_const use the same constant
 // CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{[0-9]+}}, i8* {{.*}} [[LOW_HIGH:@const[0-9]+]]
@@ -59,6 +63,7 @@ pub fn low_align_const() -> E<i16, [i16; 3]> {
 
 // CHECK-LABEL: @high_align_const
 #[no_mangle]
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
 pub fn high_align_const() -> E<i16, i32> {
 // Check that low_align_const and high_align_const use the same constant
 // CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{[0-9]}}, i8* {{.*}} [[LOW_HIGH]]
index 2ac8de6d802947e3fc43c899b0c6513699c45702..83dd6a3b00258ec5ee4af95d2ff11877cf98cc73 100644 (file)
@@ -11,6 +11,7 @@
 // compile-flags: -C no-prepopulate-passes
 
 #![crate_type = "lib"]
+#![feature(rustc_attrs)]
 
 struct SomeUniqueName;
 
@@ -24,6 +25,7 @@ pub fn possibly_unwinding() {
 
 // CHECK-LABEL: @droppy
 #[no_mangle]
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
 pub fn droppy() {
 // Check that there are exactly 6 drop calls. The cleanups for the unwinding should be reused, so
 // that's one new drop call per call to possibly_unwinding(), and finally 3 drop calls for the
index ff9d54e67e4782439005eb7d2dcc048b658209b7..7ee31070b2635ea3895e5a5966c87b79a7345b8d 100644 (file)
@@ -22,3 +22,8 @@ extern {
     #[unwind]
     fn unwinding_extern_fn();
 }
+
+pub unsafe fn force_declare() {
+    extern_fn();
+    unwinding_extern_fn();
+}
diff --git a/src/test/codegen/float_math.rs b/src/test/codegen/float_math.rs
new file mode 100644 (file)
index 0000000..bc458d4
--- /dev/null
@@ -0,0 +1,60 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(core_intrinsics)]
+
+use std::intrinsics::{fadd_fast, fsub_fast, fmul_fast, fdiv_fast, frem_fast};
+
+// CHECK-LABEL: @add
+#[no_mangle]
+pub fn add(x: f32, y: f32) -> f32 {
+// CHECK: fadd float
+// CHECK-NOT fast
+    x + y
+}
+
+// CHECK-LABEL: @addition
+#[no_mangle]
+pub fn addition(x: f32, y: f32) -> f32 {
+// CHECK: fadd fast float
+    unsafe {
+        fadd_fast(x, y)
+    }
+}
+
+// CHECK-LABEL: @subtraction
+#[no_mangle]
+pub fn subtraction(x: f32, y: f32) -> f32 {
+// CHECK: fsub fast float
+    unsafe {
+        fsub_fast(x, y)
+    }
+}
+
+// CHECK-LABEL: @multiplication
+#[no_mangle]
+pub fn multiplication(x: f32, y: f32) -> f32 {
+// CHECK: fmul fast float
+    unsafe {
+        fmul_fast(x, y)
+    }
+}
+
+// CHECK-LABEL: @division
+#[no_mangle]
+pub fn division(x: f32, y: f32) -> f32 {
+// CHECK: fdiv fast float
+    unsafe {
+        fdiv_fast(x, y)
+    }
+}
diff --git a/src/test/codegen/naked-functions.rs b/src/test/codegen/naked-functions.rs
new file mode 100644 (file)
index 0000000..0a600f4
--- /dev/null
@@ -0,0 +1,69 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+
+// compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(naked_functions)]
+
+// CHECK: Function Attrs: naked uwtable
+// CHECK-NEXT: define internal void @naked_empty()
+#[no_mangle]
+#[naked]
+fn naked_empty() {
+    // CHECK: ret void
+}
+
+// CHECK: Function Attrs: naked uwtable
+#[no_mangle]
+#[naked]
+// CHECK-NEXT: define internal void @naked_with_args(i{{[0-9]+}})
+fn naked_with_args(a: isize) {
+    // CHECK: %a = alloca i{{[0-9]+}}
+    // CHECK: ret void
+}
+
+// CHECK: Function Attrs: naked uwtable
+// CHECK-NEXT: define internal i{{[0-9]+}} @naked_with_return()
+#[no_mangle]
+#[naked]
+fn naked_with_return() -> isize {
+    // CHECK: ret i{{[0-9]+}} 0
+    0
+}
+
+// CHECK: Function Attrs: naked uwtable
+// CHECK-NEXT: define internal i{{[0-9]+}} @naked_with_args_and_return(i{{[0-9]+}})
+#[no_mangle]
+#[naked]
+fn naked_with_args_and_return(a: isize) -> isize {
+    // CHECK: %a = alloca i{{[0-9]+}}
+    // CHECK: ret i{{[0-9]+}} %{{[0-9]+}}
+    a
+}
+
+// CHECK: Function Attrs: naked uwtable
+// CHECK-NEXT: define internal void @naked_recursive()
+#[no_mangle]
+#[naked]
+fn naked_recursive() {
+    // CHECK: call void @naked_empty()
+    naked_empty();
+    // CHECK: %{{[0-9]+}} = call i{{[0-9]+}} @naked_with_return()
+    naked_with_args(
+        // CHECK: %{{[0-9]+}} = call i{{[0-9]+}} @naked_with_args_and_return(i{{[0-9]+}} %{{[0-9]+}})
+        naked_with_args_and_return(
+            // CHECK: call void @naked_with_args(i{{[0-9]+}} %{{[0-9]+}})
+            naked_with_return()
+        )
+    );
+}
index 08eec0045f78a07631b52149568e07078ffd3449..36c83412e4f0f4775d73e65356841c27b6f86aa4 100644 (file)
@@ -11,6 +11,7 @@
 // compile-flags: -C no-prepopulate-passes
 
 #![crate_type = "lib"]
+#![feature(rustc_attrs)]
 
 // Hack to get the correct size for the length part in slices
 // CHECK: @helper([[USIZE:i[0-9]+]])
@@ -20,6 +21,7 @@ fn helper(_: usize) {
 
 // CHECK-LABEL: @ref_dst
 #[no_mangle]
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
 pub fn ref_dst(s: &[u8]) {
     // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
     // directly to the alloca for "x"
index 5d2d47e1bf3f6f3ed53b375448667fa41d49449b..f849a6c9b18b847b0ec491c51331484e8feccb5c 100644 (file)
@@ -11,6 +11,7 @@
 // compile-flags: -C no-prepopulate-passes
 
 #![crate_type = "lib"]
+#![feature(rustc_attrs)]
 
 pub struct Bytes {
   a: u8,
@@ -23,25 +24,20 @@ pub struct Bytes {
 // The array is stored as i32, but its alignment is lower, go with 1 byte to avoid target
 // dependent alignment
 #[no_mangle]
-pub fn small_array_alignment(x: &mut [i8; 4]) {
-// CHECK: [[VAR:%[0-9]+]] = load {{(\[4 x i8\]\*, )?}}[4 x i8]** %x
-// CHECK: [[VAR2:%[0-9]+]] = bitcast [4 x i8]* [[VAR]] to i32*
-// CHECK: store i32 %{{.*}}, i32* [[VAR2]], align 1
-    *x = [0; 4];
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
+pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
+// CHECK: [[VAR:%[0-9]+]] = bitcast [4 x i8]* %y to i32*
+// CHECK: store i32 %{{.*}}, i32* [[VAR]], align 1
+    *x = y;
 }
 
 // CHECK-LABEL: small_struct_alignment
 // The struct is stored as i32, but its alignment is lower, go with 1 byte to avoid target
 // dependent alignment
 #[no_mangle]
-pub fn small_struct_alignment(x: &mut Bytes) {
-// CHECK: [[VAR:%[0-9]+]] = load {{(%Bytes\*, )?}}%Bytes** %x
-// CHECK: [[VAR2:%[0-9]+]] = bitcast %Bytes* [[VAR]] to i32*
-// CHECK: store i32 %{{.*}}, i32* [[VAR2]], align 1
-    *x = Bytes {
-        a: 0,
-        b: 0,
-        c: 0,
-        d: 0,
-    };
+#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
+pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) {
+// CHECK: [[VAR:%[0-9]+]] = bitcast %Bytes* %y to i32*
+// CHECK: store i32 %{{.*}}, i32* [[VAR]], align 1
+    *x = y;
 }
diff --git a/src/test/compile-fail-fulldeps/derive-no-std-not-supported.rs b/src/test/compile-fail-fulldeps/derive-no-std-not-supported.rs
new file mode 100644 (file)
index 0000000..01c81a8
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![no_std]
+
+extern crate core;
+extern crate rand;
+extern crate serialize as rustc_serialize;
+
+#[derive(RustcEncodable)]  //~ ERROR this trait cannot be derived
+struct Bar {
+    x: u32,
+}
+
+#[derive(RustcDecodable)]  //~ ERROR this trait cannot be derived
+struct Baz {
+    x: u32,
+}
+
+fn main() {
+    Foo { x: 0 };
+    Bar { x: 0 };
+    Baz { x: 0 };
+}
diff --git a/src/test/compile-fail-fulldeps/dropck_tarena_cycle_checked.rs b/src/test/compile-fail-fulldeps/dropck_tarena_cycle_checked.rs
new file mode 100644 (file)
index 0000000..d36293a
--- /dev/null
@@ -0,0 +1,127 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Reject mixing cyclic structure and Drop when using TypedArena.
+//
+// (Compare against compile-fail/dropck_vec_cycle_checked.rs)
+//
+// (Also compare against compile-fail/dropck_tarena_unsound_drop.rs,
+//  which is a reduction of this code to more directly show the reason
+//  for the error message we see here.)
+
+#![feature(const_fn)]
+
+extern crate arena;
+
+use arena::TypedArena;
+use std::cell::Cell;
+use id::Id;
+
+mod s {
+    use std::sync::atomic::{AtomicUsize, Ordering};
+
+    static S_COUNT: AtomicUsize = AtomicUsize::new(0);
+
+    pub fn next_count() -> usize {
+        S_COUNT.fetch_add(1, Ordering::SeqCst) + 1
+    }
+}
+
+mod id {
+    use s;
+    #[derive(Debug)]
+    pub struct Id {
+        orig_count: usize,
+        count: usize,
+    }
+
+    impl Id {
+        pub fn new() -> Id {
+            let c = s::next_count();
+            println!("building Id {}", c);
+            Id { orig_count: c, count: c }
+        }
+        pub fn count(&self) -> usize {
+            println!("Id::count on {} returns {}", self.orig_count, self.count);
+            self.count
+        }
+    }
+
+    impl Drop for Id {
+        fn drop(&mut self) {
+            println!("dropping Id {}", self.count);
+            self.count = 0;
+        }
+    }
+}
+
+trait HasId {
+    fn count(&self) -> usize;
+}
+
+#[derive(Debug)]
+struct CheckId<T:HasId> {
+    v: T
+}
+
+#[allow(non_snake_case)]
+fn CheckId<T:HasId>(t: T) -> CheckId<T> { CheckId{ v: t } }
+
+impl<T:HasId> Drop for CheckId<T> {
+    fn drop(&mut self) {
+        assert!(self.v.count() > 0);
+    }
+}
+
+#[derive(Debug)]
+struct C<'a> {
+    id: Id,
+    v: Vec<CheckId<Cell<Option<&'a C<'a>>>>>,
+}
+
+impl<'a> HasId for Cell<Option<&'a C<'a>>> {
+    fn count(&self) -> usize {
+        match self.get() {
+            None => 1,
+            Some(c) => c.id.count(),
+        }
+    }
+}
+
+impl<'a> C<'a> {
+    fn new() -> C<'a> {
+        C { id: Id::new(), v: Vec::new() }
+    }
+}
+
+fn f<'a>(arena: &'a TypedArena<C<'a>>) {
+    let c1 = arena.alloc(C::new());
+    let c2 = arena.alloc(C::new());
+    let c3 = arena.alloc(C::new());
+
+    c1.v.push(CheckId(Cell::new(None)));
+    c1.v.push(CheckId(Cell::new(None)));
+    c2.v.push(CheckId(Cell::new(None)));
+    c2.v.push(CheckId(Cell::new(None)));
+    c3.v.push(CheckId(Cell::new(None)));
+    c3.v.push(CheckId(Cell::new(None)));
+
+    c1.v[0].v.set(Some(c2));
+    c1.v[1].v.set(Some(c3));
+    c2.v[0].v.set(Some(c2));
+    c2.v[1].v.set(Some(c3));
+    c3.v[0].v.set(Some(c1));
+    c3.v[1].v.set(Some(c2));
+}
+
+fn main() {
+    let arena = TypedArena::new();
+    f(&arena); //~ ERROR `arena` does not live long enough
+}
diff --git a/src/test/compile-fail-fulldeps/dropck_tarena_unsound_drop.rs b/src/test/compile-fail-fulldeps/dropck_tarena_unsound_drop.rs
new file mode 100644 (file)
index 0000000..6cbed34
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that an arena (TypedArena) cannot carry elements whose drop
+// methods might access borrowed data of lifetime that does not
+// strictly outlive the arena itself.
+//
+// Compare against run-pass/dropck_tarena_sound_drop.rs, which shows a
+// similar setup, but loosens `f` so that the struct `C<'a>` can be
+// fed a lifetime longer than that of the arena.
+//
+// (Also compare against dropck_tarena_cycle_checked.rs, from which
+// this was reduced to better understand its error message.)
+
+extern crate arena;
+
+use arena::TypedArena;
+
+trait HasId { fn count(&self) -> usize; }
+
+struct CheckId<T:HasId> { v: T }
+
+// In the code below, the impl of HasId for `&'a usize` does not
+// actually access the borrowed data, but the point is that the
+// interface to CheckId does not (and cannot) know that, and therefore
+// when encountering a value V of type CheckId<S>, we must
+// conservatively force the type S to strictly outlive V.
+impl<T:HasId> Drop for CheckId<T> {
+    fn drop(&mut self) {
+        assert!(self.v.count() > 0);
+    }
+}
+
+struct C<'a> { v: CheckId<&'a usize>, }
+
+impl<'a> HasId for &'a usize { fn count(&self) -> usize { 1 } }
+
+fn f<'a>(_arena: &'a TypedArena<C<'a>>) {}
+
+fn main() {
+    let arena: TypedArena<C> = TypedArena::new();
+    f(&arena); //~ ERROR `arena` does not live long enough
+}
index 8ac03606720e4fbf8e6ec860aa40bfbb6a26767a..1fbde00a3dfdecf2b7377ad415cbd78d29a64bf0 100644 (file)
@@ -16,5 +16,4 @@ extern crate macro_crate_test;
 fn main() {
     macro_crate_test::foo();
     //~^ ERROR failed to resolve. Use of undeclared type or module `macro_crate_test`
-    //~^^ ERROR unresolved name `macro_crate_test::foo`
 }
index 5f8948ac5f1e9e41efa0e2eb9fb2c9bc940d699f..db7f304227f01acc9df0d1df50744ac82e3e08c4 100644 (file)
@@ -8,11 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// ignore-windows everything is the system allocator on windows
-// ignore-musl no dylibs on musl yet
-// ignore-bitrig no jemalloc on bitrig
-// ignore-openbsd no jemalloc on openbsd
+// ignore-musl no dylibs
 // aux-build:allocator-dylib.rs
+// aux-build:allocator1.rs
 // no-prefer-dynamic
 // error-pattern: cannot link together two allocators
 
 #![feature(alloc_jemalloc)]
 
 extern crate allocator_dylib;
+
+// The main purpose of this test is to ensure that `alloc_jemalloc` **fails**
+// here (specifically the jemalloc allocator), but currently jemalloc is
+// disabled on quite a few platforms (bsds, emscripten, msvc, etc). To ensure
+// that this just passes on those platforms we link in some other allocator to
+// ensure we get the same error.
+//
+// So long as we CI linux/OSX we should be good.
+#[cfg(any(target_os = "linux", target_os = "macos"))]
 extern crate alloc_jemalloc;
+#[cfg(not(any(target_os = "linux", target_os = "macos")))]
+extern crate allocator1;
 
 fn main() {
     allocator_dylib::foo();
index b29797f3295262b8daa1716169f730b5968913d9..46ad226d255645a0df21d8ac19f5179aa6905fe0 100644 (file)
@@ -8,11 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// ignore-windows everything is the system allocator on windows
-// ignore-musl no dylibs on musl right now
-// ignore-bitrig no jemalloc on bitrig
-// ignore-openbsd no jemalloc on openbsd
+// ignore-musl no dylibs
 // aux-build:allocator-dylib2.rs
+// aux-build:allocator1.rs
 // error-pattern: cannot link together two allocators
 
 // Ensure that rust dynamic libraries use jemalloc as their allocator, verifying
 #![feature(alloc_system)]
 
 extern crate allocator_dylib2;
+
+// The main purpose of this test is to ensure that `alloc_system` **fails**
+// here (specifically the system allocator), but currently system is
+// disabled on quite a few platforms (bsds, emscripten, msvc, etc). To ensure
+// that this just passes on those platforms we link in some other allocator to
+// ensure we get the same error.
+//
+// So long as we CI linux/OSX we should be good.
+#[cfg(any(target_os = "linux", target_os = "macos"))]
 extern crate alloc_system;
+#[cfg(not(any(target_os = "linux", target_os = "macos")))]
+extern crate allocator1;
 
 fn main() {
     allocator_dylib2::foo();
 }
-
index 8c8451623d511ee2384a54982d873c064893ae7f..c1c72a5519bf10b5d81c389fad5481c8ee432db7 100644 (file)
@@ -23,6 +23,7 @@ pub fn main() {
     unsafe {
         asm!("mov $1, $0" : "=r"(x) : "r"(5));
         //~^ ERROR re-assignment of immutable variable `x`
+        //~| NOTE in this expansion of asm!
     }
     foo(x);
 }
index 6a7eaecae7f42ae6de68acd8dbd73ef1417ff0ca..ab903591fbb069b3f15a68b30cb19812965b8f20 100644 (file)
@@ -14,7 +14,7 @@ struct Foo;
 
 impl Foo {
     const bar: bool = true;
-    fn bar() {} //~ ERROR duplicate associated function
+    fn bar() {} //~ ERROR duplicate definitions
 }
 
 fn main() {}
index 7c4c5ca6b4eab9448ee66b5497f6c6fcd7d5d31f..12cab13d0b9b33bee7402b73e4f4d515b9bb2f6d 100644 (file)
@@ -20,7 +20,7 @@ struct Baz;
 
 impl Foo for Baz {
     type Bar = i16;
-    type Bar = u16; //~ ERROR duplicate associated type
+    type Bar = u16; //~ ERROR duplicate definitions
 }
 
 fn main() {
index 4c484b49024efc44a2b9445962a6a620ab70adf2..85868f5c02085df966afe230c66a480cde32f3cd 100644 (file)
@@ -19,9 +19,9 @@ trait Foo {
 
 impl Foo for () {
     type Ty = ();
-    type Ty = usize; //~ ERROR duplicate associated type
+    type Ty = usize; //~ ERROR duplicate definitions
     const BAR: u32 = 7;
-    const BAR: u32 = 8; //~ ERROR duplicate associated constant
+    const BAR: u32 = 8; //~ ERROR duplicate definitions
 }
 
 fn main() {
index c5a47f3e5358f1440330edbfc7427c32d4bf7f10..48bfa84fa8666cb6a5e29db6c3c5d2d18b1554b0 100644 (file)
@@ -31,5 +31,5 @@ trait Add<RHS=Self> {
 fn ice<A>(a: A) {
     let r = loop {};
     r = r + a;
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
 }
index adccd73beae2dc97651f38cdc1f844e6611cd74e..cd21fb949cb84190aa147147adff53868501b9c0 100644 (file)
@@ -24,7 +24,7 @@ pub trait GetToInt
 fn foo<G>(g: G) -> isize
     where G : GetToInt
 {
-    ToInt::to_int(&g.get()) //~ ERROR not implemented
+    ToInt::to_int(&g.get()) //~ ERROR E0277
 }
 
 fn bar<G : GetToInt>(g: G) -> isize
index 6d68da54112f24da473b778e0780319813873246..786a25500a886284c4b1bcd9fa4a9ca7f9fcb361 100644 (file)
@@ -22,21 +22,21 @@ pub trait IntoCow<'a, B: ?Sized> {
     fn into_cow(self) -> Cow<'a, B>;
 }
 
-impl<'a, B: ?Sized> IntoCow<'a, B> for Cow<'a, B> where B: ToOwned {
-//~^ ERROR E0119
+impl<'a, B: ?Sized> IntoCow<'a, B> for <B as ToOwned>::Owned where B: ToOwned {
     fn into_cow(self) -> Cow<'a, B> {
-        self
+        Cow(PhantomData)
     }
 }
 
-impl<'a, B: ?Sized> IntoCow<'a, B> for <B as ToOwned>::Owned where B: ToOwned {
+impl<'a, B: ?Sized> IntoCow<'a, B> for Cow<'a, B> where B: ToOwned {
 //~^ ERROR E0119
     fn into_cow(self) -> Cow<'a, B> {
-        Cow(PhantomData)
+        self
     }
 }
 
 impl<'a, B: ?Sized> IntoCow<'a, B> for &'a B where B: ToOwned {
+//~^ ERROR E0119
     fn into_cow(self) -> Cow<'a, B> {
         Cow(PhantomData)
     }
index 9c173515793f44f84c47038fb8a51f0de08e8aa6..a6fcb9cff13ea58ed40f8ed2b575d952cd3d7730 100644 (file)
@@ -15,7 +15,7 @@ trait Get {
 
 trait Other {
     fn uhoh<U:Get>(&self, foo: U, bar: <Self as Get>::Value) {}
-    //~^ ERROR the trait `Get` is not implemented for the type `Self`
+    //~^ ERROR the trait bound `Self: Get` is not satisfied
 }
 
 fn main() {
index d48cff405a6371d473dfafb195d95016c9f43528..83726a1676d261911166fe0b541e9a515cdf4e54 100644 (file)
@@ -18,7 +18,7 @@ trait Foo<T> {
 
 fn f<T:Foo<isize>>(t: &T) {
     let u: <T as Foo<usize>>::Bar = t.get_bar();
-    //~^ ERROR the trait `Foo<usize>` is not implemented for the type `T`
+    //~^ ERROR the trait bound `T: Foo<usize>` is not satisfied
 }
 
 fn main() { }
index fd60896c29885b262a657fec9efa2de3a2716162..baf56ffec86921cb7a78fb6f7407cae57b3c5bdc 100644 (file)
@@ -19,7 +19,7 @@ struct Struct {
 
 impl Struct {
     fn uhoh<T>(foo: <T as Get>::Value) {}
-    //~^ ERROR the trait `Get` is not implemented for the type `T`
+    //~^ ERROR the trait bound `T: Get` is not satisfied
 }
 
 fn main() {
index bda16c8a85de15d03c00c2470688bb3cb34b158a..e0f0f3c47ae5ec312ecb2799e65716ca88623b9a 100644 (file)
@@ -25,7 +25,7 @@ trait Get {
 
 trait Other {
     fn uhoh<U:Get>(&self, foo: U, bar: <Self as Get>::Value) {}
-    //~^ ERROR the trait `Get` is not implemented for the type `Self`
+    //~^ ERROR the trait bound `Self: Get` is not satisfied
 }
 
 fn main() { }
index 0b1d6a5b71ad20e0abb8a6bc1809ab559747fcb0..ec38595e8fe06fa6e15fe6229270e8bc45b204a5 100644 (file)
@@ -25,12 +25,12 @@ trait Get {
 
 trait Other {
     fn uhoh<U:Get>(&self, foo: U, bar: <Self as Get>::Value) {}
-    //~^ ERROR the trait `Get` is not implemented for the type `Self`
+    //~^ ERROR the trait bound `Self: Get` is not satisfied
 }
 
 impl<T:Get> Other for T {
     fn uhoh<U:Get>(&self, foo: U, bar: <(T, U) as Get>::Value) {}
-    //~^ ERROR the trait `Get` is not implemented for the type `(T, U)`
+    //~^ ERROR the trait bound `(T, U): Get` is not satisfied
 }
 
 fn main() { }
index c9374d42938002979a871480b1e1b94987ea567b..0c077e37e43bebe23675326ecb13a34160579db6 100644 (file)
@@ -38,12 +38,12 @@ pub fn f1_int_uint() {
 
 pub fn f1_uint_uint() {
     f1(2u32, 4u32);
-    //~^ ERROR the trait `Foo` is not implemented
+    //~^ ERROR `u32: Foo` is not satisfied
 }
 
 pub fn f1_uint_int() {
     f1(2u32, 4i32);
-    //~^ ERROR the trait `Foo` is not implemented
+    //~^ ERROR `u32: Foo` is not satisfied
 }
 
 pub fn f2_int() {
index e4b97ab5933cebe0bb2451a7f4fe0c6b9ffa49d3..f1827022964169293e55e73260846c06158fca5e 100644 (file)
@@ -14,7 +14,7 @@ trait Get {
 }
 
 fn foo<T:Get>(t: T) {
-    let x = t.get(); //~ ERROR the trait `core::marker::Sized` is not implemented
+    let x = t.get(); //~ ERROR `<T as Get>::Value: std::marker::Sized` is not
 }
 
 fn main() {
index a9e5c957b89d4735827153a24af639f83f7c058f..f05c8c31f10245edf5f8bf68d59e14fa8e72ce3b 100644 (file)
@@ -10,8 +10,8 @@
 
 static i: String = 10;
 //~^ ERROR mismatched types
-//~| expected `collections::string::String`
+//~| expected `std::string::String`
 //~| found `_`
-//~| expected struct `collections::string::String`
+//~| expected struct `std::string::String`
 //~| found integral variable
 fn main() { println!("{}", i); }
diff --git a/src/test/compile-fail/bad-intrinsic-monomorphization.rs b/src/test/compile-fail/bad-intrinsic-monomorphization.rs
new file mode 100644 (file)
index 0000000..049552a
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics, rustc_attrs, core_intrinsics)]
+#![allow(warnings)]
+
+// Bad monomorphizations could previously cause LLVM asserts even though the
+// error was caught in the compiler.
+
+extern "platform-intrinsic" {
+    fn simd_add<T>(x: T, y: T) -> T;
+}
+
+use std::intrinsics;
+
+#[derive(Copy, Clone)]
+struct Foo(i64);
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_cttz(v: Foo) -> Foo {
+    intrinsics::cttz(v)
+    //~^ ERROR `cttz` intrinsic: expected basic integer type, found `Foo`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_fadd_fast(a: Foo, b: Foo) -> Foo {
+    intrinsics::fadd_fast(a, b)
+    //~^ ERROR `fadd_fast` intrinsic: expected basic float type, found `Foo`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_simd_add(a: Foo, b: Foo) -> Foo {
+    simd_add(a, b)
+    //~^ ERROR `simd_add` intrinsic: expected SIMD input type, found non-SIMD `Foo`
+}
+
+fn main() {}
index 2129d4fbd505578ff2242fc7685c04eaa7e0704c..5be90f05018331389eddcbee6a33e2a9dbfa9061 100644 (file)
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 fn foo<T:'static>() {
-    1.bar::<T>(); //~ ERROR `core::marker::Send` is not implemented
+    1.bar::<T>(); //~ ERROR `T: std::marker::Send` is not satisfied
 }
 
 trait bar {
index edc118cb0399b1f51851b60af703579e41998c00..0cd3a8853185fade135c0916d475447724a754d7 100644 (file)
@@ -8,6 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// error-pattern: unresolved name
+// error-pattern: failed to resolve. Use of undeclared type or module `thing`
 
 fn main() { let foo = thing::len(Vec::new()); }
index bfe9d7403399d10cf1dbc705c3de3c5d8ceba898..f62404e60e69e55d7dad756cabd9d2e620e34999 100644 (file)
@@ -12,7 +12,7 @@ trait Trait {}
 
 pub fn main() {
     let x: Vec<Trait + Sized> = Vec::new();
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
-    //~| ERROR the trait `core::marker::Sized` is not implemented
-    //~| ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `Trait + Sized: std::marker::Sized` is not satisfied
+    //~| ERROR `Trait + Sized: std::marker::Sized` is not satisfied
+    //~| ERROR `Trait + Sized: std::marker::Sized` is not satisfied
 }
index a3139905244c6aa4ec9a0216a31164bacaf1bd17..c1547dd82b3b2e5a83c6904a2d68a2a836b33abb 100644 (file)
@@ -10,6 +10,5 @@
 
 fn foo<T>() {
     fn bar(b: T) { } //~ ERROR can't use type parameters from outer
-    //~^ ERROR type name `T` is undefined or not in scope
 }
 fn main() { }
index 58cacc0b9f3338fe88b8b825dd99a14ef19739c0..3f26e3ce904cf68b37944d2075ba42acde5a0b02 100644 (file)
@@ -8,6 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// error-pattern:`^` cannot be applied to type `collections::string::String`
+// error-pattern:`^` cannot be applied to type `std::string::String`
 
 fn main() { let x = "a".to_string() ^ "b".to_string(); }
index d4adaa042b2b55212ef369161d326a308e97c5f8..03af0d51ec296350e73ba9aa9cc07a09aa734da9 100644 (file)
@@ -14,7 +14,7 @@ fn main() {
     {
         struct Bar;
         use foo::Bar;
-        //~^ ERROR import `Bar` conflicts with type in this module
-        //~^^ ERROR import `Bar` conflicts with value in this module
+        //~^ ERROR a type named `Bar` has already been defined in this block
+        //~^^ ERROR a value named `Bar` has already been defined in this block
     }
 }
index 24a1e4e24d81a959f3b29345ad3bf597b454e144..930f769771d58275ad0be5a47fd3ad3d5ebb1fe5 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-mod foo { struct bar; }
+mod foo { pub struct bar; }
 
 fn main() {
     let bar = 5;
index 9f21d6a9234089a049ae13e8f4a343e54b80821a..b08c78e9060dec4e2518424a178bb2858d829350 100644 (file)
@@ -8,8 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-mod foo { pub mod foo {  } }
+mod foo { pub mod foo {  } } //~ NOTE previous definition of `foo` here
 
-use foo::foo; //~ ERROR import `foo` conflicts with existing submodule
+use foo::foo; //~ ERROR a module named `foo` has already been defined in this module
 
 fn main() {}
index c980e77df6f967c38a7f858e586bab4c857a44db..7c3d632078fe24f23b39aa4979519e4877b2fda4 100644 (file)
@@ -54,80 +54,97 @@ fn borrow_after_move() {
 fn move_after_borrow() {
     let a: Box<_> = box B { x: box 0, y: box 1 };
     let _x = &a.x;
+    //~^ NOTE borrow of `a.x` occurs here
     let _y = a.y; //~ ERROR cannot move
 }
 
 fn copy_after_mut_borrow() {
     let mut a: Box<_> = box A { x: box 0, y: 1 };
     let _x = &mut a.x;
+    //~^ NOTE borrow of `a.x` occurs here
     let _y = a.y; //~ ERROR cannot use
 }
 
 fn move_after_mut_borrow() {
     let mut a: Box<_> = box B { x: box 0, y: box 1 };
     let _x = &mut a.x;
+    //~^ NOTE borrow of `a.x` occurs here
     let _y = a.y; //~ ERROR cannot move
 }
 
 fn borrow_after_mut_borrow() {
     let mut a: Box<_> = box A { x: box 0, y: 1 };
     let _x = &mut a.x;
+    //~^ NOTE previous borrow of `a` occurs here (through borrowing `a.x`);
     let _y = &a.y; //~ ERROR cannot borrow
 }
+//~^ NOTE previous borrow ends here
 
 fn mut_borrow_after_borrow() {
     let mut a: Box<_> = box A { x: box 0, y: 1 };
     let _x = &a.x;
+    //~^ NOTE previous borrow of `a` occurs here (through borrowing `a.x`)
     let _y = &mut a.y; //~ ERROR cannot borrow
 }
+//~^ NOTE previous borrow ends here
 
 fn copy_after_move_nested() {
     let a: Box<_> = box C { x: box A { x: box 0, y: 1 }, y: 2 };
     let _x = a.x.x;
+    //~^ NOTE `a.x.x` moved here because it has type `Box<isize>`, which is moved by default
     let _y = a.y; //~ ERROR use of collaterally moved
 }
 
 fn move_after_move_nested() {
     let a: Box<_> = box D { x: box A { x: box 0, y: 1 }, y: box 2 };
     let _x = a.x.x;
+    //~^ NOTE `a.x.x` moved here because it has type `Box<isize>`, which is moved by default
     let _y = a.y; //~ ERROR use of collaterally moved
 }
 
 fn borrow_after_move_nested() {
     let a: Box<_> = box C { x: box A { x: box 0, y: 1 }, y: 2 };
     let _x = a.x.x;
+    //~^ NOTE `a.x.x` moved here because it has type `Box<isize>`, which is moved by default
     let _y = &a.y; //~ ERROR use of collaterally moved
 }
 
 fn move_after_borrow_nested() {
     let a: Box<_> = box D { x: box A { x: box 0, y: 1 }, y: box 2 };
     let _x = &a.x.x;
+    //~^ NOTE borrow of `a.x.x` occurs here
     let _y = a.y; //~ ERROR cannot move
 }
 
 fn copy_after_mut_borrow_nested() {
     let mut a: Box<_> = box C { x: box A { x: box 0, y: 1 }, y: 2 };
     let _x = &mut a.x.x;
+    //~^ NOTE borrow of `a.x.x` occurs here
     let _y = a.y; //~ ERROR cannot use
 }
 
 fn move_after_mut_borrow_nested() {
     let mut a: Box<_> = box D { x: box A { x: box 0, y: 1 }, y: box 2 };
     let _x = &mut a.x.x;
+    //~^ NOTE borrow of `a.x.x` occurs here
     let _y = a.y; //~ ERROR cannot move
 }
 
 fn borrow_after_mut_borrow_nested() {
     let mut a: Box<_> = box C { x: box A { x: box 0, y: 1 }, y: 2 };
     let _x = &mut a.x.x;
+    //~^ NOTE previous borrow of `a.x.x` occurs here; the mutable borrow prevents
     let _y = &a.y; //~ ERROR cannot borrow
 }
+//~^ NOTE previous borrow ends here
 
 fn mut_borrow_after_borrow_nested() {
     let mut a: Box<_> = box C { x: box A { x: box 0, y: 1 }, y: 2 };
     let _x = &a.x.x;
+    //~^ NOTE previous borrow of `a.x.x` occurs here; the immutable borrow prevents
     let _y = &mut a.y; //~ ERROR cannot borrow
 }
+//~^ NOTE previous borrow ends here
 
 fn main() {
     copy_after_move();
index d760f3db0c2cc9b85370bc03f11365d3d78c778b..7e9d448275dc7697d792abce8cba920220676ce8 100644 (file)
@@ -12,6 +12,7 @@ fn f() {
     let x = [1].iter(); //~ ERROR borrowed value does not live long enough
                          //~^ NOTE reference must be valid for the block suffix following statement
                          //~^^ HELP consider using a `let` binding to increase its lifetime
+                         //~^^^ NOTE ...but borrowed value is only valid for the statement at 12:4
 }
 
 fn main() {
index 61bf2c11a1f72d1b96d371f762d352e61947ae1d..2b1ff47ee3d94c3e8312fc2f141d0f846a69595f 100644 (file)
@@ -13,6 +13,7 @@ fn main() {
     // Original borrow ends at end of function
     let mut x = 1;
     let y = &mut x;
+    //~^ previous borrow of `x` occurs here; the mutable borrow prevents
     let z = &x; //~ ERROR cannot borrow
 }
 //~^ NOTE previous borrow ends here
@@ -23,6 +24,7 @@ fn foo() {
             // Original borrow ends at end of match arm
             let mut x = 1;
             let y = &x;
+            //~^ previous borrow of `x` occurs here; the immutable borrow prevents
             let z = &mut x; //~ ERROR cannot borrow
         }
      //~^ NOTE previous borrow ends here
@@ -35,6 +37,7 @@ fn bar() {
     || {
         let mut x = 1;
         let y = &mut x;
+        //~^ previous borrow of `x` occurs here; the mutable borrow prevents
         let z = &mut x; //~ ERROR cannot borrow
     };
  //~^ NOTE previous borrow ends here
index a69ce0cb365c765a07cf5d842edb9b5e4fda2455..1a21b03a457a8325a95376e2bf642c8ae9e436f8 100644 (file)
@@ -17,6 +17,7 @@ fn a() {
     let mut vec = [box 1, box 2, box 3];
     match vec {
         [box ref _a, _, _] => {
+        //~^ borrow of `vec[..]` occurs here
             vec[0] = box 4; //~ ERROR cannot assign
         }
     }
@@ -27,6 +28,7 @@ fn b() {
     let vec: &mut [Box<isize>] = &mut vec;
     match vec {
         [_b..] => {
+        //~^ borrow of `vec[..]` occurs here
             vec[0] = box 4; //~ ERROR cannot assign
         }
     }
@@ -48,6 +50,7 @@ fn c() {
         _ => {}
     }
     let a = vec[0]; //~ ERROR cannot move out
+    //~^ NOTE attempting to move value to here
 }
 
 fn d() {
@@ -59,6 +62,7 @@ fn d() {
         _ => {}
     }
     let a = vec[0]; //~ ERROR cannot move out
+    //~^ NOTE attempting to move value to here
 }
 
 fn e() {
index cd215a7373780170e02eed6cb3e3aaf30ac787b6..8d5d8e8dc9b7dbec63d832211807dd4480f821bb 100644 (file)
@@ -13,9 +13,9 @@
 
 trait Foo : Send+Sync { }
 
-impl <T: Sync+'static> Foo for (T,) { } //~ ERROR the trait `core::marker::Send` is not implemented
+impl <T: Sync+'static> Foo for (T,) { } //~ ERROR `T: std::marker::Send` is not satisfied
 
-impl <T: Send> Foo for (T,T) { } //~ ERROR the trait `core::marker::Sync` is not implemented
+impl <T: Send> Foo for (T,T) { } //~ ERROR `T: std::marker::Sync` is not satisfied
 
 impl <T: Send+Sync> Foo for (T,T,T) { } // (ok)
 
index 138fcfa8b64fe06c1ce668c766e77c89b24acf0c..de2084c4e8187baa3d52ae9dc0323d8c128ba662 100644 (file)
@@ -22,6 +22,6 @@ struct X<T>(T);
 impl <T:Sync> RequiresShare for X<T> { }
 
 impl <T:Sync+'static> RequiresRequiresShareAndSend for X<T> { }
-//~^ ERROR the trait `core::marker::Send` is not implemented
+//~^ ERROR `T: std::marker::Send` is not satisfied
 
 fn main() { }
index c3fb6a1be8797a811819022975446ca9fabf5711..6dc5f39cb30dfa05a8cc741781deed0fd0bffefe 100644 (file)
@@ -14,6 +14,6 @@
 trait Foo : Send { }
 
 impl Foo for std::rc::Rc<i8> { }
-//~^ ERROR the trait `core::marker::Send` is not implemented
+//~^ ERROR `std::rc::Rc<i8>: std::marker::Send` is not satisfied
 
 fn main() { }
index 534bb6d37ec39104bdb35d53ee08069c01c74e0c..d4bb8de13d056fc84ff13e5eb52853f66520524b 100644 (file)
@@ -12,6 +12,6 @@
 
 trait Foo : Send { }
 
-impl <T: Sync+'static> Foo for T { } //~ ERROR the trait `core::marker::Send` is not implemented
+impl <T: Sync+'static> Foo for T { } //~ ERROR `T: std::marker::Send` is not satisfied
 
 fn main() { }
index 52a4950022d1e13364ae8a26ac5edac2b50e5e8f..4764ae380ff44ce9bcd7950df8d5ea7459a05228 100644 (file)
@@ -12,4 +12,5 @@ fn main() {
     let u = 5 as bool;
     //~^ ERROR cannot cast as `bool`
     //~^^ HELP compare with zero instead
+    //~^^^ HELP run `rustc --explain E0054` to see a detailed explanation
 }
index d14b0fa9e6602a397e3a715961b44370909aa0df..dcd49e34bb26c9f0a7678be2871f27a502cba55f 100644 (file)
@@ -61,9 +61,11 @@ fn main()
     let _ = 3 as bool;
     //~^ ERROR cannot cast as `bool`
     //~^^ HELP compare with zero
+    //~^^^ HELP run `rustc --explain E0054` to see a detailed explanation
     let _ = E::A as bool;
     //~^ ERROR cannot cast as `bool`
     //~^^ HELP compare with zero
+    //~^^^ HELP run `rustc --explain E0054` to see a detailed explanation
     let _ = 0x61u32 as char; //~ ERROR only `u8` can be cast
 
     let _ = false as f32;
@@ -87,9 +89,12 @@ fn main()
     //~^^ HELP through a usize first
 
     let _ = 42usize as *const [u8]; //~ ERROR casting
-    let _ = v as *const [u8]; //~ ERROR casting
+    let _ = v as *const [u8]; //~ ERROR cannot cast
     let _ = fat_v as *const Foo;
-    //~^ ERROR `core::marker::Sized` is not implemented for the type `[u8]`
+    //~^ ERROR the trait bound `[u8]: std::marker::Sized` is not satisfied
+    //~^^ HELP run `rustc --explain E0277` to see a detailed explanation
+    //~^^^ NOTE `[u8]` does not have a constant size known at compile-time
+    //~^^^^ NOTE required for the cast to the object type `Foo`
     let _ = foo as *const str; //~ ERROR casting
     let _ = foo as *mut str; //~ ERROR casting
     let _ = main as *mut str; //~ ERROR casting
@@ -101,7 +106,10 @@ fn main()
 
     let a : *const str = "hello";
     let _ = a as *const Foo;
-    //~^ ERROR `core::marker::Sized` is not implemented for the type `str`
+    //~^ ERROR the trait bound `str: std::marker::Sized` is not satisfied
+    //~^^ HELP run `rustc --explain E0277` to see a detailed explanation
+    //~^^^ NOTE `str` does not have a constant size known at compile-time
+    //~^^^^ NOTE required for the cast to the object type `Foo`
 
     // check no error cascade
     let _ = main.f as *const u32; //~ ERROR attempted access of field
index 4c27d57008d040c68584c1a82c93b01608094fd8..8868a5a95498e72f4fca515ff3e491e8f8e1c754 100644 (file)
@@ -12,7 +12,7 @@
 
 // compile-flags: --cfg ""
 
-// error-pattern: expected ident, found
+// error-pattern: expected identifier, found
 
 pub fn main() {
 }
index b88040cbd54a622ec02a824f06b775e286119b8c..b9224e7be7f12eddf404f034462e1ccabf2f7305 100644 (file)
@@ -13,7 +13,7 @@ struct X<F> where F: FnOnce() + 'static + Send {
 }
 
 fn foo<F>(blk: F) -> X<F> where F: FnOnce() + 'static {
-    //~^ ERROR the trait `core::marker::Send` is not implemented for the type
+    //~^ ERROR `F: std::marker::Send` is not satisfied
     return X { field: blk };
 }
 
index fadaa84f19db535b6ef5096b9bc0d6dbc23f1aea..d3339c4845ab276b28b9794861486f2ca7233f56 100644 (file)
@@ -21,7 +21,7 @@ fn give_any<F>(f: F) where F: FnOnce() {
 
 fn give_owned<F>(f: F) where F: FnOnce() + Send {
     take_any(f);
-    take_const_owned(f); //~ ERROR the trait `core::marker::Sync` is not implemented for the type
+    take_const_owned(f); //~ ERROR `F: std::marker::Sync` is not satisfied
 }
 
 fn main() {}
index b771b959d3e507ef1cf5001777109b9f1cef7e94..434d77828b44ae203c3632f44b7a384c8d22b9a1 100644 (file)
@@ -27,11 +27,11 @@ impl Even for isize { }
 
 impl Odd for usize { }
 
-impl<T:Even> MyTrait for T { //~ ERROR E0119
+impl<T:Even> MyTrait for T {
     fn get(&self) -> usize { 0 }
 }
 
-impl<T:Odd> MyTrait for T {
+impl<T:Odd> MyTrait for T { //~ ERROR E0119
     fn get(&self) -> usize { 0 }
 }
 
index d3b0e7f10b91bc815a32a1f21384bc2bfd9fd5e9..7ad5cd71ca8c2a0e343db9a8bed5998ee7ba60d1 100644 (file)
@@ -23,11 +23,11 @@ trait Even {}
 
 trait Odd {}
 
-impl<T:Even> MyTrait for T { //~ ERROR E0119
+impl<T:Even> MyTrait for T {
     fn get(&self) -> usize { 0 }
 }
 
-impl<T:Odd> MyTrait for T {
+impl<T:Odd> MyTrait for T { //~ ERROR E0119
     fn get(&self) -> usize { 0 }
 }
 
index 7b60a5ecbd71f4ac8533599cfb20cb73c4b42a71..1defe6c8b20e3e16a53ca178db9e42e59a39729c 100644 (file)
@@ -18,7 +18,7 @@ trait MyTrait<T> {
     fn get(&self) -> T;
 }
 
-impl<T> MyTrait<T> for T { //~ ERROR E0119
+impl<T> MyTrait<T> for T {
     fn get(&self) -> T {
         panic!()
     }
@@ -29,7 +29,7 @@ struct MyType {
     dummy: usize
 }
 
-impl MyTrait<MyType> for MyType {
+impl MyTrait<MyType> for MyType { //~ ERROR E0119
     fn get(&self) -> usize { (*self).clone() }
 }
 
index eeaa68677eb670c37c44e7852401966608bfcbb3..5c5c4d32d675cd7ac1a4c37a180198ffa0771676 100644 (file)
@@ -19,7 +19,7 @@ trait MyTrait {
     fn get(&self) -> usize;
 }
 
-impl<T:OtherTrait> MyTrait for T { //~ ERROR E0119
+impl<T:OtherTrait> MyTrait for T {
     fn get(&self) -> usize { 0 }
 }
 
@@ -27,7 +27,7 @@ struct MyType {
     dummy: usize
 }
 
-impl MyTrait for MyType {
+impl MyTrait for MyType { //~ ERROR E0119
     fn get(&self) -> usize { self.dummy }
 }
 
index d218b64af05270f0dec430b4b1001d410edfd0e9..57d71b44b0f6b29aa5452a08183ed5bddecad80d 100644 (file)
@@ -18,7 +18,7 @@ trait MyTrait {
     fn get(&self) -> usize;
 }
 
-impl<T> MyTrait for T { //~ ERROR E0119
+impl<T> MyTrait for T {
     fn get(&self) -> usize { 0 }
 }
 
@@ -26,7 +26,7 @@ struct MyType {
     dummy: usize
 }
 
-impl MyTrait for MyType {
+impl MyTrait for MyType { //~ ERROR E0119
     fn get(&self) -> usize { self.dummy }
 }
 
index 344ec89d25de915e58de8021481d9ce2a632df69..afc3b8d4ccddcbc7ed5a3395f7b4d9caf7caa02d 100644 (file)
@@ -15,14 +15,12 @@ trait MyTrait {}
 struct TestType<T>(::std::marker::PhantomData<T>);
 
 unsafe impl<T: MyTrait+'static> Send for TestType<T> {}
-//~^ ERROR conflicting implementations of trait `core::marker::Send`
-//~^^ ERROR conflicting implementations of trait `core::marker::Send`
 
 impl<T: MyTrait> !Send for TestType<T> {}
-//~^ ERROR conflicting implementations of trait `core::marker::Send`
+//~^ ERROR conflicting implementations of trait `std::marker::Send`
 
 unsafe impl<T:'static> Send for TestType<T> {}
-//~^ ERROR error: conflicting implementations of trait `core::marker::Send`
+//~^ ERROR error: conflicting implementations of trait `std::marker::Send`
 
 impl !Send for TestType<i32> {}
 
diff --git a/src/test/compile-fail/coherence-cow-1.rs b/src/test/compile-fail/coherence-cow-1.rs
deleted file mode 100644 (file)
index 530bbf5..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// aux-build:coherence_lib.rs
-
-// pretty-expanded FIXME #23616
-
-// Test that the `Pair` type reports an error if it contains type
-// parameters, even when they are covered by local types. This test
-// was originally intended to test the opposite, but the rules changed
-// with RFC 1023 and this became illegal.
-
-extern crate coherence_lib as lib;
-use lib::{Remote,Pair};
-
-pub struct Cover<T>(T);
-
-impl<T> Remote for Pair<T,Cover<T>> { }
-//~^ ERROR E0210
-
-fn main() { }
diff --git a/src/test/compile-fail/coherence-cow-2.rs b/src/test/compile-fail/coherence-cow-2.rs
deleted file mode 100644 (file)
index 52abcea..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// aux-build:coherence_lib.rs
-
-// Test that the `Pair` type reports an error if it contains type
-// parameters, even when they are covered by local types. This test
-// was originally intended to test the opposite, but the rules changed
-// with RFC 1023 and this became illegal.
-
-// pretty-expanded FIXME #23616
-
-extern crate coherence_lib as lib;
-use lib::{Remote,Pair};
-
-pub struct Cover<T>(T);
-
-impl<T> Remote for Pair<Cover<T>,T> { } //~ ERROR E0210
-
-fn main() { }
diff --git a/src/test/compile-fail/coherence-cow-no-cover.rs b/src/test/compile-fail/coherence-cow-no-cover.rs
deleted file mode 100644 (file)
index cd32e79..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// aux-build:coherence_lib.rs
-
-// Test that it's not ok for T to appear uncovered
-
-extern crate coherence_lib as lib;
-use lib::{Remote,Pair};
-
-pub struct Cover<T>(T);
-
-impl<T,U> Remote for Pair<Cover<T>,U> { }
-//~^ ERROR type parameter `T` must be used as the type parameter for some local type
-
-fn main() { }
diff --git a/src/test/compile-fail/coherence-cow.rs b/src/test/compile-fail/coherence-cow.rs
new file mode 100644 (file)
index 0000000..6a2d1ba
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// revisions: a b c
+
+// aux-build:coherence_lib.rs
+
+// pretty-expanded FIXME #23616
+
+// Test that the `Pair` type reports an error if it contains type
+// parameters, even when they are covered by local types. This test
+// was originally intended to test the opposite, but the rules changed
+// with RFC 1023 and this became illegal.
+
+extern crate coherence_lib as lib;
+use lib::{Remote,Pair};
+
+pub struct Cover<T>(T);
+
+#[cfg(a)]
+impl<T> Remote for Pair<T,Cover<T>> { } //[a]~ ERROR E0210
+
+#[cfg(b)]
+impl<T> Remote for Pair<Cover<T>,T> { } //[b]~ ERROR E0210
+
+#[cfg(c)]
+impl<T,U> Remote for Pair<Cover<T>,U> { }
+//[c]~^ ERROR type parameter `T` must be used as the type parameter for some local type
+
+fn main() { }
index a020b518d8273a9ad0e1d6b1812f07cd7c9900ce..9f74afbb2b3b5493ba7b30472b627a51e7685f18 100644 (file)
@@ -8,8 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// Regression test for #3512 - conflicting trait impls in different crates should give a
-// 'conflicting implementations' error message.
+// The error here is strictly due to orphan rules; the impl here
+// generalizes the one upstream
 
 // aux-build:trait_impl_conflict.rs
 extern crate trait_impl_conflict;
@@ -17,7 +17,6 @@ use trait_impl_conflict::Foo;
 
 impl<A> Foo for A {
     //~^ ERROR type parameter `A` must be used as the type parameter for some local type
-    //~^^ ERROR E0119
 }
 
 fn main() {
index 0705702b031ee007b2731320f50b4d4885908fbe..3d109de76ccd1c21908c0ff5a46dbef64fd0a072 100644 (file)
@@ -15,7 +15,7 @@ trait MyTrait {}
 impl MyTrait for .. {}
 
 impl MyTrait for .. {}
-//~^ ERROR conflicting implementations of trait `MyTrait`
+//~^ ERROR redundant default implementations of trait `MyTrait`
 
 trait MySafeTrait {}
 
diff --git a/src/test/compile-fail/coherence-no-direct-lifetime-dispatch.rs b/src/test/compile-fail/coherence-no-direct-lifetime-dispatch.rs
new file mode 100644 (file)
index 0000000..6de338f
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that you cannot *directly* dispatch on lifetime requirements
+
+trait MyTrait {}
+
+impl<T> MyTrait for T {}
+impl<T: 'static> MyTrait for T {} //~ ERROR E0119
+
+fn main() {}
index 3fd635b3d616f33840d8571ef21d620c7f22ebc4..928ba7a36db266ddb3534eba302fbff8125d4013 100644 (file)
 trait From<U> {
 }
 
-impl <T> From<T> for T { //~ ERROR E0119
+impl <T> From<T> for T {
 }
 
-impl <T11, U11> From<(U11,)> for (T11,) {
+impl <T11, U11> From<(U11,)> for (T11,) { //~ ERROR E0119
 }
 
 fn main() { }
index d7f060a3bfe73dfb87be02c0858c64ef255a8e51..51d7c3e8b4cb17962f0809276d5abf49b84a85af 100644 (file)
@@ -14,6 +14,6 @@
 
 pub trait Sugar { fn dummy(&self) { } }
 pub trait Sweet { fn dummy(&self) { } }
-impl<T:Sugar> Sweet for T { } //~ ERROR E0119
-impl<U:Sugar> Sweet for Box<U> { }
+impl<T:Sugar> Sweet for T { }
+impl<U:Sugar> Sweet for Box<U> { } //~ ERROR E0119
 fn main() { }
index 4f1092f960e0d2d41ea5e4f55297992a5a9e57e8..0ae8135221c21fc9a54f3094a711234c17ce9bff 100644 (file)
 
 trait Foo {}
 
-impl<T> Foo for T {} //~ ERROR conflicting implementations of trait `Foo`:
-impl<U> Foo for U {}
+impl<T> Foo for T {}
+impl<U> Foo for U {} //~ ERROR conflicting implementations of trait `Foo`:
 
 trait Bar {}
 
-impl<T> Bar for T {} //~ ERROR conflicting implementations of trait `Bar` for type `u8`:
-impl Bar for u8 {}
+impl<T> Bar for (T, u8) {}
+impl<T> Bar for (u8, T) {} //~ ERROR conflicting implementations of trait `Bar` for type `(u8, u8)`:
 
 trait Baz<T> {}
 
-impl<T, U> Baz<U> for T {} //~ ERROR conflicting implementations of trait `Baz<_>` for type `u8`:
-impl<T> Baz<T> for u8 {}
+impl<T> Baz<u8> for T {}
+impl<T> Baz<T> for u8 {} //~ ERROR conflicting implementations of trait `Baz<u8>` for type `u8`:
 
-trait Quux<T> {}
+trait Quux<U, V> {}
 
-impl<T, U> Quux<U> for T {} //~ ERROR conflicting implementations of trait `Quux<_>`:
-impl<T> Quux<T> for T {}
-
-trait Qaar<T> {}
-
-impl<T, U> Qaar<U> for T {} //~ ERROR conflicting implementations of trait `Qaar<u8>`:
-impl<T> Qaar<u8> for T {}
-
-trait Qaax<T> {}
-
-impl<T, U> Qaax<U> for T {}
-//~^ ERROR conflicting implementations of trait `Qaax<u8>` for type `u32`:
-impl Qaax<u8> for u32 {}
+impl<T, U, V> Quux<U, V> for T {}
+impl<T, U> Quux<U, U> for T {} //~ ERROR conflicting implementations of trait `Quux<_, _>`:
+impl<T, V> Quux<T, V> for T {} //~ ERROR conflicting implementations of trait `Quux<_, _>`:
 
 fn main() {}
index 3de79454398387338cdf8498c8a94d8383692f7b..3ed3549de89aaa214b9191d281a47429c741af58 100644 (file)
@@ -21,8 +21,8 @@ pub trait Bar {
     type Output: 'static;
 }
 
-impl Foo<i32> for i32 { } //~ ERROR E0119
+impl Foo<i32> for i32 { }
 
-impl<A:Iterator> Foo<A::Item> for A { }
+impl<A:Iterator> Foo<A::Item> for A { }  //~ ERROR E0119
 
 fn main() {}
index 6880f3e9a3cc9aeecd0ad58182c3b493a03202c8..f04902a70f68c20e490b1719efaefe4b58d38096 100644 (file)
@@ -15,8 +15,8 @@ use std::marker::PhantomData;
 
 pub trait Foo<P> {}
 
-impl <P, T: Foo<P>> Foo<P> for Option<T> {} //~ ERROR E0119
+impl <P, T: Foo<P>> Foo<P> for Option<T> {}
 
-impl<T, U> Foo<T> for Option<U> { }
+impl<T, U> Foo<T> for Option<U> { } //~ ERROR E0119
 
 fn main() {}
index 2236e71b53fff1cd7aaa516e972bbc97a88ab3a2..6d3ab32f06f43d08327af11f3d2088ae8c643935 100644 (file)
@@ -16,9 +16,9 @@ pub trait Bar {
     type Output: 'static;
 }
 
-impl Foo<i32> for i32 { } //~ ERROR E0119
+impl Foo<i32> for i32 { }
 
-impl<A:Bar> Foo<A::Output> for A { }
+impl<A:Bar> Foo<A::Output> for A { } //~ ERROR E0119
 
 impl Bar for i32 {
     type Output = i32;
index 87b007fdd698239d3af23c2558975c29ad994eb2..7807f93df1a673d8810010f82c027e0d014dd49a 100644 (file)
@@ -18,11 +18,11 @@ trait MyTrait {
     fn get(&self) -> usize;
 }
 
-impl<T> MyTrait for (T,T) { //~ ERROR E0119
+impl<T> MyTrait for (T,T) {
     fn get(&self) -> usize { 0 }
 }
 
-impl<A,B> MyTrait for (A,B) {
+impl<A,B> MyTrait for (A,B) { //~ ERROR E0119
     fn get(&self) -> usize { self.dummy }
 }
 
index a6b62d17bc4e6aa9ac34540d80911fa8b0865294..8e3e3f31cb5f1434356b5b9aa5bfd199a475cf8f 100644 (file)
@@ -21,10 +21,10 @@ struct MyType { x: i32 }
 
 trait MyTrait { }
 
-impl<T: lib::MyCopy> MyTrait for T { } //~ ERROR E0119
+impl<T: lib::MyCopy> MyTrait for T { }
 
 // Tuples are not fundamental.
-impl MyTrait for lib::MyFundamentalStruct<(MyType,)> { }
+impl MyTrait for lib::MyFundamentalStruct<(MyType,)> { } //~ ERROR E0119
 
 #[rustc_error]
 fn main() { }
index 5a9f440f8bb6a98900bb406b1976d6a056232c1b..35bc17b8e887036a93bb4e1675aca4786795e1d8 100644 (file)
@@ -18,7 +18,7 @@ extern crate coherence_copy_like_lib as lib;
 struct MyType { x: i32 }
 
 trait MyTrait { }
-impl<T: lib::MyCopy> MyTrait for T { } //~ ERROR E0119
+impl<T: lib::MyCopy> MyTrait for T { }
 
 // `MyStruct` is not declared fundamental, therefore this would
 // require that
@@ -26,6 +26,6 @@ impl<T: lib::MyCopy> MyTrait for T { } //~ ERROR E0119
 //     MyStruct<MyType>: !MyTrait
 //
 // which we cannot approve.
-impl MyTrait for lib::MyStruct<MyType> { }
+impl MyTrait for lib::MyStruct<MyType> { } //~ ERROR E0119
 
 fn main() { }
index ee0d5550fd61f0ea8e514bec49a723935cf31ac6..a70cc92955fb0d6a76e0a5672e0d4764a976d470 100644 (file)
@@ -18,13 +18,13 @@ extern crate coherence_copy_like_lib as lib;
 struct MyType { x: i32 }
 
 trait MyTrait { }
-impl<T: lib::MyCopy> MyTrait for T { } //~ ERROR E0119
+impl<T: lib::MyCopy> MyTrait for T { }
 
 // Tuples are not fundamental, therefore this would require that
 //
 //     (MyType,): !MyTrait
 //
 // which we cannot approve.
-impl MyTrait for (MyType,) { }
+impl MyTrait for (MyType,) { } //~ ERROR E0119
 
 fn main() { }
diff --git a/src/test/compile-fail/comm-not-freeze-receiver.rs b/src/test/compile-fail/comm-not-freeze-receiver.rs
deleted file mode 100644 (file)
index 305acfe..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::sync::mpsc::Receiver;
-
-fn test<T: Sync>() {}
-
-fn main() {
-    test::<Receiver<isize>>();   //~ ERROR: `core::marker::Sync` is not implemented
-}
diff --git a/src/test/compile-fail/comm-not-freeze.rs b/src/test/compile-fail/comm-not-freeze.rs
deleted file mode 100644 (file)
index de2c969..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::sync::mpsc::Sender;
-
-fn test<T: Sync>() {}
-
-fn main() {
-    test::<Sender<isize>>();     //~ ERROR: `core::marker::Sync` is not implemented
-}
diff --git a/src/test/compile-fail/consider-removing-last-semi.rs b/src/test/compile-fail/consider-removing-last-semi.rs
new file mode 100644 (file)
index 0000000..02148a1
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn f() -> String {  //~ ERROR E0269
+                    //~^ HELP detailed explanation
+    0u8;
+    "bla".to_string();  //~ HELP consider removing this semicolon
+}
+
+fn g() -> String {  //~ ERROR E0269
+                    //~^ HELP detailed explanation
+    "this won't work".to_string();
+    "removeme".to_string(); //~ HELP consider removing this semicolon
+}
+
+fn main() {}
diff --git a/src/test/compile-fail/const-err-early.rs b/src/test/compile-fail/const-err-early.rs
new file mode 100644 (file)
index 0000000..cdcdb91
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(const_indexing)]
+#![deny(const_err)]
+
+pub const A: i8 = -std::i8::MIN; //~ ERROR attempted to negate with overflow
+pub const B: u8 = 200u8 + 200u8; //~ ERROR attempted to add with overflow
+pub const C: u8 = 200u8 * 4; //~ ERROR attempted to multiply with overflow
+pub const D: u8 = 42u8 - (42u8 + 1); //~ ERROR attempted to subtract with overflow
+pub const E: u8 = [5u8][1]; //~ ERROR index out of bounds
+
+fn main() {
+    let _e = [6u8][1];
+}
index be67e06d99fee0f0cc8fc3a22b02857cf4f23b14..45e8fc37d878b60dddd0be3089b057ea680a2522 100644 (file)
@@ -8,16 +8,14 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#[allow(exceeding_bitshifts)]
-#[deny(const_err)]
+#![feature(rustc_attrs)]
+#![allow(exceeding_bitshifts)]
 
 fn black_box<T>(_: T) {
     unimplemented!()
 }
 
-const BLA: u8 = 200u8 + 200u8;
-//~^ ERROR attempted to add with overflow
-
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let a = -std::i8::MIN;
     //~^ WARN attempted to negate with overflow
@@ -25,10 +23,11 @@ fn main() {
     //~^ WARN attempted to add with overflow
     //~^^ WARN attempted to add with overflow
     let c = 200u8 * 4;
-    //~^ WARN attempted to mul with overflow
+    //~^ WARN attempted to multiply with overflow
     let d = 42u8 - (42u8 + 1);
-    //~^ WARN attempted to sub with overflow
-    let _e = BLA;
+    //~^ WARN attempted to subtract with overflow
+    let _e = [5u8][1];
+    //~^ ERROR const index-expr is out of bounds
     black_box(a);
     black_box(b);
     black_box(c);
index be04bc9bd3b732effbb96c0d55258242f7ed6c8d..07e27a7dc9a9a412b2eb14155044584b7980c325 100644 (file)
@@ -21,10 +21,11 @@ const NEG_128: i8 = -128;
 const NEG_NEG_128: i8 = -NEG_128;
 //~^ ERROR constant evaluation error: attempted to negate with overflow
 //~| ERROR attempted to negate with overflow
+//~| ERROR attempted to negate with overflow
 
 fn main() {
     match -128i8 {
-        NEG_NEG_128 => println!("A"),
+        NEG_NEG_128 => println!("A"), //~ NOTE in pattern here
         _ => println!("B"),
     }
 }
index c2bc5b2648af3c4d8393998c707801ab514fda41..c90ae045f96b4aa0430e2b3c020f47d33caeba1a 100644 (file)
@@ -36,4 +36,3 @@ fn main() {
 fn foo<T:fmt::Debug>(x: T) {
     println!("{:?}", x);
 }
-
index 253285d3919c21937c4c5ebb4ee90fdfc7eb6a44..5aa93cf6383fe4c37e2209f3c33cb350e1ed2681 100644 (file)
@@ -21,8 +21,9 @@ use std::{u8, u16, u32, u64, usize};
 
 const A_I8_T
     : [u32; (i8::MAX as i8 + 1u8) as usize]
-    //~^ ERROR mismatched types
-    //~| ERROR the trait `core::ops::Add<u8>` is not implemented for the type `i8`
+    //~^ ERROR mismatched types:
+    //~| expected `i8`,
+    //~| found `u8` [E0250]
     = [0; (i8::MAX as usize) + 1];
 
 fn main() {
index 2a2fc2ef080dbf30cacd440ff6d482e0c7eda65d..96013551ef4927ea17028eaa21e2ae96b31dafde 100644 (file)
@@ -8,6 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![feature(rustc_attrs)]
 #![allow(unused_imports)]
 
 // Note: the relevant lint pass here runs before some of the constant
@@ -23,86 +24,87 @@ const VALS_I8: (i8, i8, i8, i8) =
     (-i8::MIN,
      //~^ ERROR attempted to negate with overflow
      i8::MIN - 1,
-     //~^ ERROR attempted to sub with overflow
+     //~^ ERROR attempted to subtract with overflow
      i8::MAX + 1,
      //~^ ERROR attempted to add with overflow
      i8::MIN * 2,
-     //~^ ERROR attempted to mul with overflow
+     //~^ ERROR attempted to multiply with overflow
      );
 
 const VALS_I16: (i16, i16, i16, i16) =
     (-i16::MIN,
      //~^ ERROR attempted to negate with overflow
      i16::MIN - 1,
-     //~^ ERROR attempted to sub with overflow
+     //~^ ERROR attempted to subtract with overflow
      i16::MAX + 1,
      //~^ ERROR attempted to add with overflow
      i16::MIN * 2,
-     //~^ ERROR attempted to mul with overflow
+     //~^ ERROR attempted to multiply with overflow
      );
 
 const VALS_I32: (i32, i32, i32, i32) =
     (-i32::MIN,
      //~^ ERROR attempted to negate with overflow
      i32::MIN - 1,
-     //~^ ERROR attempted to sub with overflow
+     //~^ ERROR attempted to subtract with overflow
      i32::MAX + 1,
      //~^ ERROR attempted to add with overflow
      i32::MIN * 2,
-     //~^ ERROR attempted to mul with overflow
+     //~^ ERROR attempted to multiply with overflow
      );
 
 const VALS_I64: (i64, i64, i64, i64) =
     (-i64::MIN,
      //~^ ERROR attempted to negate with overflow
      i64::MIN - 1,
-     //~^ ERROR attempted to sub with overflow
+     //~^ ERROR attempted to subtract with overflow
      i64::MAX + 1,
      //~^ ERROR attempted to add with overflow
      i64::MAX * 2,
-     //~^ ERROR attempted to mul with overflow
+     //~^ ERROR attempted to multiply with overflow
      );
 
 const VALS_U8: (u8, u8, u8, u8) =
     (-(u8::MIN as i8) as u8,
      u8::MIN - 1,
-     //~^ ERROR attempted to sub with overflow
+     //~^ ERROR attempted to subtract with overflow
      u8::MAX + 1,
      //~^ ERROR attempted to add with overflow
      u8::MAX * 2,
-     //~^ ERROR attempted to mul with overflow
+     //~^ ERROR attempted to multiply with overflow
      );
 
 const VALS_U16: (u16, u16, u16, u16) =
     (-(u16::MIN as i16) as u16,
      u16::MIN - 1,
-     //~^ ERROR attempted to sub with overflow
+     //~^ ERROR attempted to subtract with overflow
      u16::MAX + 1,
      //~^ ERROR attempted to add with overflow
      u16::MAX * 2,
-     //~^ ERROR attempted to mul with overflow
+     //~^ ERROR attempted to multiply with overflow
      );
 
 const VALS_U32: (u32, u32, u32, u32) =
     (-(u32::MIN as i32) as u32,
      u32::MIN - 1,
-     //~^ ERROR attempted to sub with overflow
+     //~^ ERROR attempted to subtract with overflow
      u32::MAX + 1,
      //~^ ERROR attempted to add with overflow
      u32::MAX * 2,
-     //~^ ERROR attempted to mul with overflow
+     //~^ ERROR attempted to multiply with overflow
      );
 
 const VALS_U64: (u64, u64, u64, u64) =
     (-(u64::MIN as i64) as u64,
      u64::MIN - 1,
-     //~^ ERROR attempted to sub with overflow
+     //~^ ERROR attempted to subtract with overflow
      u64::MAX + 1,
      //~^ ERROR attempted to add with overflow
      u64::MAX * 2,
-     //~^ ERROR attempted to mul with overflow
+     //~^ ERROR attempted to multiply with overflow
      );
 
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     foo(VALS_I8);
     foo(VALS_I16);
index 44ab798f4911b4a17c9cef11952aa577f6f27ced..9fdd24c42fdbd48447d526393e541ac19312c354 100644 (file)
@@ -14,7 +14,7 @@
 struct S(i32);
 
 const CONSTANT: S = S(0);
-//~^ ERROR: constant evaluation error: call on struct [E0080]
+//~^ ERROR: unimplemented constant expression: tuple struct constructors [E0080]
 
 enum E {
     V = CONSTANT,
index 37a93ec954d8dbd2152760eeee431fdbf776b114..0d6cf3bab453fac0892aca6ff79efa9986f75507 100644 (file)
@@ -8,32 +8,30 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-const X: usize = 42 && 39; //~ ERROR: can't do this op on unsigned integrals
+const X: usize = 42 && 39; //~ ERROR: can't do this op on integrals
 const ARR: [i32; X] = [99; 34]; //~ NOTE: for array length here
 
-const X1: usize = 42 || 39; //~ ERROR: can't do this op on unsigned integrals
+const X1: usize = 42 || 39; //~ ERROR: can't do this op on integrals
 const ARR1: [i32; X1] = [99; 47]; //~ NOTE: for array length here
 
-// FIXME: the error should be `on signed integrals`
-const X2: usize = -42 || -39; //~ ERROR: can't do this op on unsigned integrals
+const X2: usize = -42 || -39; //~ ERROR: unary negation of unsigned integer
 const ARR2: [i32; X2] = [99; 18446744073709551607]; //~ NOTE: for array length here
 
-// FIXME: the error should be `on signed integrals`
-const X3: usize = -42 && -39; //~ ERROR: can't do this op on unsigned integrals
+const X3: usize = -42 && -39; //~ ERROR: unary negation of unsigned integer
 const ARR3: [i32; X3] = [99; 6]; //~ NOTE: for array length here
 
 const Y: usize = 42.0 == 42.0;
-const ARRR: [i32; Y] = [99; 1]; //~ ERROR: expected constant integer expression for array length
+const ARRR: [i32; Y] = [99; 1]; //~ ERROR: expected usize value for array length
 const Y1: usize = 42.0 >= 42.0;
-const ARRR1: [i32; Y] = [99; 1]; //~ ERROR: expected constant integer expression for array length
+const ARRR1: [i32; Y] = [99; 1]; //~ ERROR: expected usize value for array length
 const Y2: usize = 42.0 <= 42.0;
-const ARRR2: [i32; Y] = [99; 1]; //~ ERROR: expected constant integer expression for array length
+const ARRR2: [i32; Y] = [99; 1]; //~ ERROR: expected usize value for array length
 const Y3: usize = 42.0 > 42.0;
-const ARRR3: [i32; Y] = [99; 0]; //~ ERROR: expected constant integer expression for array length
+const ARRR3: [i32; Y] = [99; 0]; //~ ERROR: expected usize value for array length
 const Y4: usize = 42.0 < 42.0;
-const ARRR4: [i32; Y] = [99; 0]; //~ ERROR: expected constant integer expression for array length
+const ARRR4: [i32; Y] = [99; 0]; //~ ERROR: expected usize value for array length
 const Y5: usize = 42.0 != 42.0;
-const ARRR5: [i32; Y] = [99; 0]; //~ ERROR: expected constant integer expression for array length
+const ARRR5: [i32; Y] = [99; 0]; //~ ERROR: expected usize value for array length
 
 fn main() {
     let _ = ARR;
index 786c72b66f36fe812677a781aacf61f9f7a3d85d..9c6b774b99039811fb6482e80d2e98c649f5f6aa 100644 (file)
@@ -15,7 +15,7 @@
 const ONE: usize = 1;
 const TWO: usize = 2;
 const LEN: usize = ONE - TWO;
-//~^ ERROR array length constant evaluation error: attempted to sub with overflow [E0250]
+//~^ ERROR array length constant evaluation error: attempted to subtract with overflow [E0250]
 
 fn main() {
     let a: [i8; LEN] = unimplemented!();
index 020717dc1e18f8fbfccefd584da4e2c296e99e6c..d51f31087d0df1494c5cce1005433e6839109a92 100644 (file)
@@ -16,5 +16,5 @@ const TWO: usize = 2;
 
 fn main() {
     let a: [i8; ONE - TWO] = unimplemented!();
-    //~^ ERROR array length constant evaluation error: attempted to sub with overflow [E0250]
+    //~^ ERROR array length constant evaluation error: attempted to subtract with overflow [E0250]
 }
index 2d345d9142b0bd70f025254d7c77ff53d15e3b44..bc395af9622c5e6bc74e79896f00310fddcb8c5b 100644 (file)
@@ -9,8 +9,8 @@
 // except according to those terms.
 
 mod foo {
-    const b: u8 = 2; //~ NOTE constant defined here
-    const d: u8 = 2; //~ NOTE constant defined here
+    pub const b: u8 = 2; //~ NOTE constant defined here
+    pub const d: u8 = 2; //~ NOTE constant defined here
 }
 
 use foo::b as c; //~ NOTE constant imported here
index ecc43d21a4612cedcc6e6f16f31ca323c3f6ecf7..4567cd4a74bb2a3ae343d9aea7679b1c8324e0fa 100644 (file)
@@ -17,12 +17,12 @@ enum Cake {
 use Cake::*;
 
 const BOO: (Cake, Cake) = (Marmor, BlackForest);
-//~^ ERROR: constant evaluation error: non-constant path in constant expression [E0471]
+//~^ ERROR: constant evaluation error: unimplemented constant expression: enum variants [E0471]
 const FOO: Cake = BOO.1;
 
 const fn foo() -> Cake {
-    Marmor //~ ERROR: constant evaluation error: non-constant path in constant expression [E0471]
-    //~^ ERROR: non-constant path in constant expression
+    Marmor //~ ERROR: constant evaluation error: unimplemented constant expression: enum variants
+    //~^ ERROR: unimplemented constant expression: enum variants
 }
 
 const WORKS: Cake = Marmor;
index 8c607fc7e32b2ecbfa091fff24cdcb5c781d1888..9d3c432d14878de269cf86ba9616dc7cc7c9d7af 100644 (file)
@@ -11,7 +11,7 @@
 // Test spans of errors
 
 const TUP: (usize,) = 5 << 64;
-//~^ ERROR: attempted left shift with overflow [E0250]
+//~^ ERROR: attempted to shift left with overflow [E0250]
 const ARR: [i32; TUP.0] = [];
 
 fn main() {
index 7d4c618de665c622712d252d60417fade9974ef5..b034fedb805e343b85ae4024275185aa8f033a70 100644 (file)
@@ -23,7 +23,7 @@ trait Bar<X> { }
 
 // We don't always check where clauses for sanity, but in this case
 // wfcheck does report an error here:
-fn vacuous<A>() //~ ERROR the trait `Bar<u32>` is not implemented for the type `i32`
+fn vacuous<A>() //~ ERROR the trait bound `i32: Bar<u32>` is not satisfied
     where i32: Foo<u32, A>
 {
     // ... the original intention was to check that we don't use that
index d1ecff5984ac5d76f83f8fb01aabe8edff31fd17..b56be9e5683a851a5576eb93152faf1eba7e0e91 100644 (file)
@@ -11,7 +11,7 @@
 // Test that when a trait impl changes, fns whose body uses that trait
 // must also be recompiled.
 
-// compile-flags: -Z incr-comp
+// compile-flags: -Z query-dep-graph
 
 #![feature(rustc_attrs)]
 #![allow(warnings)]
index acd6091cbdd482c93319d7474ecaa14473dcfac5..0d6954ab9df6229e5390d647b1c0aabf62fc39c1 100644 (file)
@@ -11,7 +11,7 @@
 // Test that immediate callers have to change when callee changes, but
 // not callers' callers.
 
-// compile-flags: -Z incr-comp
+// compile-flags: -Z query-dep-graph
 
 #![feature(rustc_attrs)]
 #![allow(dead_code)]
index c16998cd33c644cfdc15d4eaef027359eb5ef2d7..235fc5051d780f78a50b91a8c06bfd8c063acbb8 100644 (file)
@@ -11,7 +11,7 @@
 // Test cases where a changing struct appears in the signature of fns
 // and methods.
 
-// compile-flags: -Z incr-comp
+// compile-flags: -Z query-dep-graph
 
 #![feature(rustc_attrs)]
 #![allow(dead_code)]
index 57e83586d8d37b79c5b32c87fb0c5081b21e8127..1afecd80ff5ad05309bc0b9c88b04cc43aff5c23 100644 (file)
@@ -11,7 +11,7 @@
 // Test that adding an impl to a trait `Foo` DOES affect functions
 // that only use `Bar` if they have methods in common.
 
-// compile-flags: -Z incr-comp
+// compile-flags: -Z query-dep-graph
 
 #![feature(rustc_attrs)]
 #![allow(dead_code)]
index ba54a056209454720f1012c6233b723140fb7ed7..2ec7573cb81dc133e32acfcbd7281fe53247d394 100644 (file)
@@ -11,7 +11,7 @@
 // Test that adding an impl to a trait `Foo` does not affect functions
 // that only use `Bar`, so long as they do not have methods in common.
 
-// compile-flags: -Z incr-comp
+// compile-flags: -Z query-dep-graph
 
 #![feature(rustc_attrs)]
 #![allow(warnings)]
index b38fdad9809ed1cb4b57f80a9d6d88e8d163216b..d87d7a6be1c280f65018f3bbbecec38514bd6a8e 100644 (file)
@@ -11,7 +11,7 @@
 // Test that when a trait impl changes, fns whose body uses that trait
 // must also be recompiled.
 
-// compile-flags: -Z incr-comp
+// compile-flags: -Z query-dep-graph
 
 #![feature(rustc_attrs)]
 #![allow(warnings)]
index 8feec12a2f76c975d9cb284db890445c65771870..079f2b52fd462a21c1107363fdca4636b34d6736 100644 (file)
@@ -10,7 +10,7 @@
 
 // Test that two unrelated functions have no trans dependency.
 
-// compile-flags: -Z incr-comp
+// compile-flags: -Z query-dep-graph
 
 #![feature(rustc_attrs)]
 #![allow(dead_code)]
index 4f4aed21f994d8383c109e69ebd305862528457f..8170c9fd8f161db0ebf4bceafd079d94576c804c 100644 (file)
@@ -10,7 +10,7 @@
 
 // #[deprecated] can't be used in staged api
 
-#![feature(deprecated, staged_api)]
+#![feature(staged_api)]
 
 #![stable(feature = "test_feature", since = "1.0.0")]
 
index 58fa00fb41086d2f09a621bbfb42047dcac29491..5fc8f684a66fe01f678fdc4cb3bde67a7f15a6f3 100644 (file)
@@ -10,8 +10,6 @@
 
 // aux-build:deprecation-lint.rs
 
-#![feature(deprecated)]
-
 #![deny(deprecated)]
 #![allow(warnings)]
 
index 6ee5cd2c7e3cf171887d12ce69db6bbc325c4266..af2ac79ea80721808bcc49e694f9ba8e6c9f52a0 100644 (file)
@@ -10,8 +10,6 @@
 
 // Various checks that deprecation attributes are used correctly
 
-#![feature(deprecated)]
-
 mod bogus_attribute_types_1 {
     #[deprecated(since = "a", note = "a", reason)] //~ ERROR unknown meta item 'reason'
     fn f1() { }
diff --git a/src/test/compile-fail/derive-no-std-not-supported.rs b/src/test/compile-fail/derive-no-std-not-supported.rs
deleted file mode 100644 (file)
index 01c81a8..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![no_std]
-
-extern crate core;
-extern crate rand;
-extern crate serialize as rustc_serialize;
-
-#[derive(RustcEncodable)]  //~ ERROR this trait cannot be derived
-struct Bar {
-    x: u32,
-}
-
-#[derive(RustcDecodable)]  //~ ERROR this trait cannot be derived
-struct Baz {
-    x: u32,
-}
-
-fn main() {
-    Foo { x: 0 };
-    Bar { x: 0 };
-    Baz { x: 0 };
-}
index ac63cc27da1eeba52ecc9b151ba25ead78aa9515..129c859b91954af02c9a70040f12f51d6df9ddd1 100644 (file)
@@ -18,7 +18,7 @@ struct E {
 #[derive(Clone)]
 struct C {
     x: NoCloneOrEq
-    //~^ ERROR the trait `core::clone::Clone` is not implemented for the type `NoCloneOrEq`
+    //~^ ERROR `NoCloneOrEq: std::clone::Clone` is not satisfied
 }
 
 
index ac718519fe672a8c7d3bfac33a81a4ea7b32e73f..56fb38611735d1aab226000c83f070ce82a941bd 100644 (file)
@@ -17,7 +17,7 @@ struct Error;
 
 #[derive(Default)]
 struct Struct {
-    x: Error //~ ERROR `core::default::Default` is not implemented
+    x: Error //~ ERROR `Error: std::default::Default` is not satisfied
 }
 
 fn main() {}
index 08db643df00e4736003e083e012f3e9898fbb587..68d9795710245f7aa66b43e96298f35d6bbfa786 100644 (file)
@@ -35,7 +35,7 @@ fn main() {
     // n == m
     let &x = &1isize as &T;      //~ ERROR type `&T` cannot be dereferenced
     let &&x = &(&1isize as &T);  //~ ERROR type `&T` cannot be dereferenced
-    let box x = box 1isize as Box<T>; //~ ERROR the trait `core::marker::Sized` is not implemented
+    let box x = box 1isize as Box<T>; //~ ERROR `T: std::marker::Sized` is not satisfied
 
     // n > m
     let &&x = &1isize as &T;
index 76378d5c8021bd03eac59f856ba9ad41e5c766aa..0ff740212e8fa46cec39e66a28c8b00de66ce70b 100644 (file)
@@ -24,7 +24,7 @@ fn f_i8() {
     enum A {
         Ok = i8::MAX - 1,
         Ok2,
-        OhNo, //~ ERROR enum discriminant overflowed on value after 127: i8; set explicitly via OhNo = -128 if that is desired outcome
+        OhNo, //~ ERROR enum discriminant overflowed on value after 127i8; set explicitly via OhNo = -128i8 if that is desired outcome
     }
 }
 
@@ -33,7 +33,7 @@ fn f_u8() {
     enum A {
         Ok = u8::MAX - 1,
         Ok2,
-        OhNo, //~ ERROR enum discriminant overflowed on value after 255: u8; set explicitly via OhNo = 0 if that is desired outcome
+        OhNo, //~ ERROR enum discriminant overflowed on value after 255u8; set explicitly via OhNo = 0u8 if that is desired outcome
     }
 }
 
index 5d7e61e9d1eec67fcb340de8ca036661ab9236e9..7316e737b6da8680379b2c617430e54b2703b4ad 100644 (file)
@@ -22,7 +22,7 @@ fn f_i8() {
     enum A {
         Ok = i8::MAX - 1,
         Ok2,
-        OhNo, //~ ERROR enum discriminant overflowed on value after 127: i8; set explicitly via OhNo = -128 if that is desired outcome
+        OhNo, //~ ERROR enum discriminant overflowed on value after 127i8; set explicitly via OhNo = -128i8 if that is desired outcome
     }
 
     let x = A::Ok;
@@ -33,7 +33,7 @@ fn f_u8() {
     enum A {
         Ok = u8::MAX - 1,
         Ok2,
-        OhNo, //~ ERROR enum discriminant overflowed on value after 255: u8; set explicitly via OhNo = 0 if that is desired outcome
+        OhNo, //~ ERROR enum discriminant overflowed on value after 255u8; set explicitly via OhNo = 0u8 if that is desired outcome
     }
 
     let x = A::Ok;
index bf4dc89415423c4da60866c7eacdc7ed2a4b1aca..7b915647884f2ac169a6e45f43b1b1f0083c8c2d 100644 (file)
 // when reporting the error.
 
 mod sub1 {
-    fn foo() {} // implementation 1
+    pub fn foo() {} // implementation 1
 }
 
 mod sub2 {
-    fn foo() {} // implementation 2
+    pub fn foo() {} // implementation 2
 }
 
 use sub1::foo; //~ NOTE previous import of `foo` here
index f7eb6e10ca78827cc9f7a09ed263ebab99e56cdc..45531666ecf5d72c57e87a1c171e2b456020f35b 100644 (file)
@@ -39,8 +39,8 @@ enum Wrapper<T:'static> {
 }
 
 fn main() {
-    let w = //~ ERROR overflow while adding drop-check rules for core::option
+    let w = //~ ERROR overflow while adding drop-check rules for std::option
         Some(Wrapper::Simple::<u32>);
-    //~^ ERROR overflow while adding drop-check rules for core::option::Option
+    //~^ ERROR overflow while adding drop-check rules for std::option::Option
     //~| ERROR overflow while adding drop-check rules for Wrapper
 }
diff --git a/src/test/compile-fail/dropck_tarena_cycle_checked.rs b/src/test/compile-fail/dropck_tarena_cycle_checked.rs
deleted file mode 100644 (file)
index d36293a..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Reject mixing cyclic structure and Drop when using TypedArena.
-//
-// (Compare against compile-fail/dropck_vec_cycle_checked.rs)
-//
-// (Also compare against compile-fail/dropck_tarena_unsound_drop.rs,
-//  which is a reduction of this code to more directly show the reason
-//  for the error message we see here.)
-
-#![feature(const_fn)]
-
-extern crate arena;
-
-use arena::TypedArena;
-use std::cell::Cell;
-use id::Id;
-
-mod s {
-    use std::sync::atomic::{AtomicUsize, Ordering};
-
-    static S_COUNT: AtomicUsize = AtomicUsize::new(0);
-
-    pub fn next_count() -> usize {
-        S_COUNT.fetch_add(1, Ordering::SeqCst) + 1
-    }
-}
-
-mod id {
-    use s;
-    #[derive(Debug)]
-    pub struct Id {
-        orig_count: usize,
-        count: usize,
-    }
-
-    impl Id {
-        pub fn new() -> Id {
-            let c = s::next_count();
-            println!("building Id {}", c);
-            Id { orig_count: c, count: c }
-        }
-        pub fn count(&self) -> usize {
-            println!("Id::count on {} returns {}", self.orig_count, self.count);
-            self.count
-        }
-    }
-
-    impl Drop for Id {
-        fn drop(&mut self) {
-            println!("dropping Id {}", self.count);
-            self.count = 0;
-        }
-    }
-}
-
-trait HasId {
-    fn count(&self) -> usize;
-}
-
-#[derive(Debug)]
-struct CheckId<T:HasId> {
-    v: T
-}
-
-#[allow(non_snake_case)]
-fn CheckId<T:HasId>(t: T) -> CheckId<T> { CheckId{ v: t } }
-
-impl<T:HasId> Drop for CheckId<T> {
-    fn drop(&mut self) {
-        assert!(self.v.count() > 0);
-    }
-}
-
-#[derive(Debug)]
-struct C<'a> {
-    id: Id,
-    v: Vec<CheckId<Cell<Option<&'a C<'a>>>>>,
-}
-
-impl<'a> HasId for Cell<Option<&'a C<'a>>> {
-    fn count(&self) -> usize {
-        match self.get() {
-            None => 1,
-            Some(c) => c.id.count(),
-        }
-    }
-}
-
-impl<'a> C<'a> {
-    fn new() -> C<'a> {
-        C { id: Id::new(), v: Vec::new() }
-    }
-}
-
-fn f<'a>(arena: &'a TypedArena<C<'a>>) {
-    let c1 = arena.alloc(C::new());
-    let c2 = arena.alloc(C::new());
-    let c3 = arena.alloc(C::new());
-
-    c1.v.push(CheckId(Cell::new(None)));
-    c1.v.push(CheckId(Cell::new(None)));
-    c2.v.push(CheckId(Cell::new(None)));
-    c2.v.push(CheckId(Cell::new(None)));
-    c3.v.push(CheckId(Cell::new(None)));
-    c3.v.push(CheckId(Cell::new(None)));
-
-    c1.v[0].v.set(Some(c2));
-    c1.v[1].v.set(Some(c3));
-    c2.v[0].v.set(Some(c2));
-    c2.v[1].v.set(Some(c3));
-    c3.v[0].v.set(Some(c1));
-    c3.v[1].v.set(Some(c2));
-}
-
-fn main() {
-    let arena = TypedArena::new();
-    f(&arena); //~ ERROR `arena` does not live long enough
-}
diff --git a/src/test/compile-fail/dropck_tarena_unsound_drop.rs b/src/test/compile-fail/dropck_tarena_unsound_drop.rs
deleted file mode 100644 (file)
index 6cbed34..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Check that an arena (TypedArena) cannot carry elements whose drop
-// methods might access borrowed data of lifetime that does not
-// strictly outlive the arena itself.
-//
-// Compare against run-pass/dropck_tarena_sound_drop.rs, which shows a
-// similar setup, but loosens `f` so that the struct `C<'a>` can be
-// fed a lifetime longer than that of the arena.
-//
-// (Also compare against dropck_tarena_cycle_checked.rs, from which
-// this was reduced to better understand its error message.)
-
-extern crate arena;
-
-use arena::TypedArena;
-
-trait HasId { fn count(&self) -> usize; }
-
-struct CheckId<T:HasId> { v: T }
-
-// In the code below, the impl of HasId for `&'a usize` does not
-// actually access the borrowed data, but the point is that the
-// interface to CheckId does not (and cannot) know that, and therefore
-// when encountering a value V of type CheckId<S>, we must
-// conservatively force the type S to strictly outlive V.
-impl<T:HasId> Drop for CheckId<T> {
-    fn drop(&mut self) {
-        assert!(self.v.count() > 0);
-    }
-}
-
-struct C<'a> { v: CheckId<&'a usize>, }
-
-impl<'a> HasId for &'a usize { fn count(&self) -> usize { 1 } }
-
-fn f<'a>(_arena: &'a TypedArena<C<'a>>) {}
-
-fn main() {
-    let arena: TypedArena<C> = TypedArena::new();
-    f(&arena); //~ ERROR `arena` does not live long enough
-}
index 8441f3a99e1aa6b3430191b4672c1303de85d3c4..241fabf053c0bdc0cd26736027fe887e64abc88e 100644 (file)
@@ -44,5 +44,5 @@ pub fn main() {
     // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
     let z: Box<ToBar> = Box::new(Bar1 {f: 36});
     f5.ptr = *z;
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `ToBar: std::marker::Sized` is not satisfied
 }
index d3029bc6a99801a785754a1a9b8ac6bf92e05ec0..2d21d0ebc760b5579e6c47aa501b2f2b6257d01f 100644 (file)
@@ -49,5 +49,5 @@ pub fn main() {
     //~| found `Bar1`
     //~| expected trait ToBar
     //~| found struct `Bar1`
-    //~| ERROR the trait `core::marker::Sized` is not implemented for the type `ToBar`
+    //~| ERROR `ToBar: std::marker::Sized` is not satisfied
 }
index 2d87345db2245f5c71701480ad892e3cceda4f61..9a3ea54a3a4559c54cc6de17e1682b396afacb45 100644 (file)
@@ -28,5 +28,5 @@ pub fn main() {
     let f1 = Fat { ptr: Foo };
     let f2: &Fat<Foo> = &f1;
     let f3: &Fat<Bar> = f2;
-    //~^ ERROR the trait `Bar` is not implemented for the type `Foo`
+    //~^ ERROR `Foo: Bar` is not satisfied
 }
index f90baafef3ffad3d7f6c8abbccf1b43964b04a43..f508364d75115fa74f7a10baab0c7f6f6a658a55 100644 (file)
@@ -21,5 +21,5 @@ pub fn main() {
     let f: Fat<[isize; 3]> = Fat { ptr: [5, 6, 7] };
     let g: &Fat<[isize]> = &f;
     let h: &Fat<Fat<[isize]>> = &Fat { ptr: *g };
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `[isize]: std::marker::Sized` is not satisfied
 }
index da667dcb1e88253feba1aa1682adfbd2f201ef6b..8fafd78d407969ec26ee9b2cdf998a2554789a3f 100644 (file)
@@ -16,22 +16,22 @@ impl Foo for [u8] {}
 
 fn test1<T: ?Sized + Foo>(t: &T) {
     let u: &Foo = t;
-    //~^ ERROR `core::marker::Sized` is not implemented for the type `T`
+    //~^ ERROR `T: std::marker::Sized` is not satisfied
 }
 
 fn test2<T: ?Sized + Foo>(t: &T) {
     let v: &Foo = t as &Foo;
-    //~^ ERROR `core::marker::Sized` is not implemented for the type `T`
+    //~^ ERROR `T: std::marker::Sized` is not satisfied
 }
 
 fn test3() {
     let _: &[&Foo] = &["hi"];
-    //~^ ERROR `core::marker::Sized` is not implemented for the type `str`
+    //~^ ERROR `str: std::marker::Sized` is not satisfied
 }
 
 fn test4(x: &[u8]) {
     let _: &Foo = x as &Foo;
-    //~^ ERROR `core::marker::Sized` is not implemented for the type `[u8]`
+    //~^ ERROR `[u8]: std::marker::Sized` is not satisfied
 }
 
 fn main() { }
index 3436c8d488c70710743c00bedae0a006a548f3df..bd5fd3ee3b71eff738117678b3e34f4cf2da068c 100644 (file)
@@ -15,9 +15,9 @@
 trait Foo<T> : Sized { fn take(self, x: &T) { } } // Note: T is sized
 
 impl Foo<[isize]> for usize { }
-//~^ ERROR the trait `core::marker::Sized` is not implemented for the type `[isize]`
+//~^ ERROR `[isize]: std::marker::Sized` is not satisfied
 
 impl Foo<isize> for [usize] { }
-//~^ ERROR the trait `core::marker::Sized` is not implemented for the type `[usize]`
+//~^ ERROR `[usize]: std::marker::Sized` is not satisfied
 
 pub fn main() { }
diff --git a/src/test/compile-fail/dupe-symbols-8.rs b/src/test/compile-fail/dupe-symbols-8.rs
deleted file mode 100644 (file)
index 3c0e545..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-// error-pattern:already defined
-
-
-#![allow(warnings)]
-
-fn main() {
-    {
-        extern fn fail() {}
-    }
-    {
-        extern fn fail() {}
-    }
-}
index d39553a7267e0fc16e1af759adeb26500d7de496..ad5ea29159968fc44d7035eea88ecdd073d42b56 100644 (file)
@@ -8,14 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// Test for issue #31788
+// note-pattern: first defined in crate `std`.
 
-// error-pattern: duplicate entry for `panic_fmt`, first definition found in `std`
+// Test for issue #31788 and E0152
 
 #![feature(lang_items)]
 
 #[lang = "panic_fmt"]
 fn panic_fmt() -> ! {
+//~^ ERROR: duplicate lang item found: `panic_fmt`.
     loop {}
 }
 
index 28e969b21498a6cce2fbaa9b4b6e19cb174de748..a6793ee8b9fbda001b64993f792666074de93db0 100644 (file)
@@ -8,11 +8,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-enum Foo {
+enum Foo { //~ NOTE previous definition
     X
 }
 
-mod Foo { //~ ERROR duplicate definition of type or module `Foo`
+mod Foo { //~ ERROR a type named `Foo` has already been defined
     pub static X: isize = 42;
     fn f() { f() } // Check that this does not result in a resolution error
 }
diff --git a/src/test/compile-fail/enum-discrim-autosizing.rs b/src/test/compile-fail/enum-discrim-autosizing.rs
new file mode 100644 (file)
index 0000000..3a24333
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// With no repr attribute the discriminant will default to isize.
+// On 32-bit architectures this is equivalent to i32 so the variants
+// collide. On other architectures we need compilation to fail anyway,
+// so force the repr.
+#[cfg_attr(not(target_pointer_width = "32"), repr(i32))]
+enum Eu64 {
+    Au64 = 0,
+    Bu64 = 0x8000_0000_0000_0000 //~ERROR already exists
+}
+
index 84a27a382006988820be6dac882733a5b7b3c9f0..d6ba09bb4c5bf072692b9677f3f4883dc99e4951 100644 (file)
@@ -9,46 +9,32 @@
 // except according to those terms.
 
 
-#[repr(u8)] //~ NOTE discriminant type specified here
+#[repr(u8)]
 enum Eu8 {
     Au8 = 23,
     Bu8 = 223,
-    Cu8 = -23, //~ ERROR discriminant value outside specified type
+    Cu8 = -23, //~ ERROR unary negation of unsigned integer
 }
 
-#[repr(i8)] //~ NOTE discriminant type specified here
-enum Ei8 {
-    Ai8 = 23,
-    Bi8 = -23,
-    Ci8 = 223, //~ ERROR discriminant value outside specified type
-}
-
-#[repr(u16)] //~ NOTE discriminant type specified here
+#[repr(u16)]
 enum Eu16 {
     Au16 = 23,
     Bu16 = 55555,
-    Cu16 = -22333, //~ ERROR discriminant value outside specified type
-}
-
-#[repr(i16)] //~ NOTE discriminant type specified here
-enum Ei16 {
-    Ai16 = 23,
-    Bi16 = -22333,
-    Ci16 = 55555, //~ ERROR discriminant value outside specified type
+    Cu16 = -22333, //~ ERROR unary negation of unsigned integer
 }
 
-#[repr(u32)] //~ NOTE discriminant type specified here
+#[repr(u32)]
 enum Eu32 {
     Au32 = 23,
     Bu32 = 3_000_000_000,
-    Cu32 = -2_000_000_000, //~ ERROR discriminant value outside specified type
+    Cu32 = -2_000_000_000, //~ ERROR unary negation of unsigned integer
 }
 
-#[repr(i32)] //~ NOTE discriminant type specified here
-enum Ei32 {
-    Ai32 = 23,
-    Bi32 = -2_000_000_000,
-    Ci32 = 3_000_000_000, //~ ERROR discriminant value outside specified type
+#[repr(u64)]
+enum Eu64 {
+    Au32 = 23,
+    Bu32 = 3_000_000_000,
+    Cu32 = -2_000_000_000, //~ ERROR unary negation of unsigned integer
 }
 
 // u64 currently allows negative numbers, and i64 allows numbers greater than `1<<63`.  This is a
diff --git a/src/test/compile-fail/enum-discrim-too-small2.rs b/src/test/compile-fail/enum-discrim-too-small2.rs
new file mode 100644 (file)
index 0000000..d66716c
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(overflowing_literals)]
+#![allow(dead_code)]
+
+#[repr(i8)]
+enum Ei8 {
+    Ai8 = 23,
+    Bi8 = -23,
+    Ci8 = 223, //~ ERROR literal out of range for i8
+}
+
+#[repr(i16)]
+enum Ei16 {
+    Ai16 = 23,
+    Bi16 = -22333,
+    Ci16 = 55555, //~ ERROR literal out of range for i16
+}
+
+#[repr(i32)]
+enum Ei32 {
+    Ai32 = 23,
+    Bi32 = -2_000_000_000,
+    Ci32 = 3_000_000_000, //~ ERROR literal out of range for i32
+}
+
+#[repr(i64)]
+enum Ei64 {
+    Ai64 = 23,
+    Bi64 = -9223372036854775808,
+    Ci64 = 9223372036854775809, //~ ERROR literal out of range for i64
+}
+
+// u64 currently allows negative numbers, and i64 allows numbers greater than `1<<63`.  This is a
+// little counterintuitive, but since the discriminant can store all the bits, and extracting it
+// with a cast requires specifying the signedness, there is no loss of information in those cases.
+// This also applies to isize and usize on 64-bit targets.
+
+pub fn main() { }
index d79b3f472d06b70bd53436e6990058290ae86071..8b1e2fc19663dc448d87583e81eff3d2fd3b8919 100644 (file)
@@ -13,5 +13,5 @@
 fn check_bound<T:Copy>(_: T) {}
 
 fn main() {
-    check_bound("nocopy".to_string()); //~ ERROR the trait `core::marker::Copy` is not implemented
+    check_bound("nocopy".to_string()); //~ ERROR : std::marker::Copy` is not satisfied
 }
index ed1327f31185e8d565dd95307800193381e584a5..7ca274b81e574a5c6cf6962b926ddfb3ceff5b05 100644 (file)
@@ -10,7 +10,8 @@
 
 enum test {
     div_zero = 1/0, //~ERROR constant evaluation error: attempted to divide by zero
-    rem_zero = 1%0  //~ERROR constant evaluation error: attempted remainder with a divisor of zero
+    rem_zero = 1%0,
+//~^ ERROR constant evaluation error: attempted to calculate the remainder with a divisor of zero
 }
 
 fn main() {}
index 2ba2ef1c05a044b36c9cb55d4830fab90ea8efc8..166ef7ab87fcd1d2297dc0eda5e21f4c16424c48 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// error-pattern: unresolved name
+// error-pattern: failed to resolve. Use of undeclared type or module `foo`
 
 // In this test baz isn't resolved when called as foo.baz even though
 // it's called from inside foo. This is somewhat surprising and may
index 46d872495a6d6307b37b0f8cb7aae8b6465ac270..b6e8cf71ddd6c0550849e04ed982fb711102f06d 100644 (file)
@@ -14,4 +14,4 @@ mod foo {
     enum y { y1, }
 }
 
-fn main() { let z = foo::y::y1; } //~ ERROR: is inaccessible
+fn main() { let z = foo::y::y1; } //~ ERROR: enum `y` is private
index 6104c02c90a03332145a30a132a4822675ee5e8a..f7b1400aa45acf87e7c8a60bea566b0d75527013 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// error-pattern: unresolved name
+// error-pattern: failed to resolve. Use of undeclared type or module `bar`
 
 mod foo {
     pub fn x() { bar::x(); }
index 56a41a15ab3c0b63317895c62c3295fd3c884779..86aae47214804b28888f106310b8e8099354dd29 100644 (file)
@@ -30,5 +30,14 @@ fn f() {
     mod core {} // Check that private crates are not glob imported
 }
 
+mod bar {
+    pub extern crate core;
+}
+
+mod baz {
+    pub use bar::*;
+    use self::core::cell; // Check that public extern crates are glob imported
+}
+
 #[rustc_error]
 fn main() {} //~ ERROR compilation successful
index d1abed9b2627c0a636bd9f1eb96ceb6ce7c3f008..576368aef312f08de60da9e319558cc6f58feffa 100644 (file)
@@ -17,6 +17,6 @@ fn main() {
     // extern functions are extern "C" fn
     let _x: extern "C" fn() = f; // OK
     is_fn(f);
-    //~^ ERROR the trait `core::ops::Fn<()>` is not implemented for the type `extern "C" fn()
-    //~| ERROR the trait `core::ops::FnOnce<()>` is not implemented for the type `extern "C" fn()
+    //~^ ERROR `extern "C" fn() {f}: std::ops::Fn<()>` is not satisfied
+    //~| ERROR `extern "C" fn() {f}: std::ops::FnOnce<()>` is not satisfied
 }
index 1c462779b433c707541e1bd19d0fafef17fb3f99..b2fd11d4b39eab139352bed8fb9c335cbae7bb81 100644 (file)
@@ -18,13 +18,14 @@ fn main() {
     let q = a.as_ptr();
 
     a as usize; //~ ERROR casting
+    //~^ HELP cast through a raw pointer first
     b as usize; //~ ERROR non-scalar cast
     p as usize;
     //~^ ERROR casting
     //~^^ HELP cast through a thin pointer
 
     // #22955
-    q as *const [i32]; //~ ERROR casting
+    q as *const [i32]; //~ ERROR cannot cast
 
     // #21397
     let t: *mut (Trait + 'static) = 0 as *mut _; //~ ERROR casting
index 15cc17b19db33bfc9bb1b2695d95fa6bdfb3df20..93e09c6d8d2d25161dc4b1ec1b5f624fb662ea4b 100644 (file)
@@ -17,26 +17,13 @@ impl std::ops::Neg for S {
 }
 
 const _MAX: usize = -1;
-//~^ ERROR unary negation of unsigned integer
+//~^ WARN unary negation of unsigned integer
+//~| ERROR unary negation of unsigned integer
 //~| HELP use a cast or the `!` operator
 
 fn main() {
-    let a = -1;
-    //~^ ERROR unary negation of unsigned integer
-    //~| HELP use a cast or the `!` operator
-    let _b : u8 = a; // for infering variable a to u8.
-
-    -a;
-    //~^ ERROR unary negation of unsigned integer
-    //~| HELP use a cast or the `!` operator
-
-    let _d = -1u8;
-    //~^ ERROR unary negation of unsigned integer
-    //~| HELP use a cast or the `!` operator
-
-    for _ in -10..10u8 {}
-    //~^ ERROR unary negation of unsigned integer
-    //~| HELP use a cast or the `!` operator
-
+    let x = 5u8;
+    let _y = -x; //~ ERROR unary negation of unsigned integer
+    //~^ HELP use a cast or the `!` operator
     -S; // should not trigger the gate; issue 26840
 }
diff --git a/src/test/compile-fail/feature-gate-negate-unsigned0.rs b/src/test/compile-fail/feature-gate-negate-unsigned0.rs
new file mode 100644 (file)
index 0000000..05b1943
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that negating unsigned integers doesn't compile
+
+struct S;
+impl std::ops::Neg for S {
+    type Output = u32;
+    fn neg(self) -> u32 { 0 }
+}
+
+fn main() {
+    let a = -1;
+    //~^ ERROR unary negation of unsigned integer
+    let _b : u8 = a; // for infering variable a to u8.
+
+    let _d = -1u8;
+    //~^ ERROR unary negation of unsigned integer
+
+    for _ in -10..10u8 {}
+    //~^ ERROR unary negation of unsigned integer
+
+    -S; // should not trigger the gate; issue 26840
+}
diff --git a/src/test/compile-fail/feature-gate-try-operator.rs b/src/test/compile-fail/feature-gate-try-operator.rs
new file mode 100644 (file)
index 0000000..b05c732
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! id {
+    ($e:expr) => { $e }
+}
+
+fn main() {
+    id!(x?);  //~ error: the `?` operator is not stable (see issue #31436)
+    y?;  //~ error: the `?` operator is not stable (see issue #31436)
+}
index 5015810ff47742cb86dc0e54d54f9ec0e3bf22e2..c90a7113f1b4106d3d7d210195f6867e6cd560d9 100644 (file)
 // Test that the types of distinct fn items are not compatible by
 // default. See also `run-pass/fn-item-type-*.rs`.
 
-fn foo(x: isize) -> isize { x * 2 }
-fn bar(x: isize) -> isize { x * 4 }
+fn foo<T>(x: isize) -> isize { x * 2 }
+fn bar<T>(x: isize) -> isize { x * 4 }
 
 fn eq<T>(x: T, y: T) { }
 
+trait Foo { fn foo() { /* this is a default fn */ } }
+impl<T> Foo for T { /* `foo` is still default here */ }
+
 fn main() {
-    let f = if true { foo } else { bar };
-    //~^ ERROR if and else have incompatible types
-    //~| expected `fn(isize) -> isize {foo}`
-    //~| found `fn(isize) -> isize {bar}`
-    //~| expected fn item,
-    //~| found a different fn item
-
-    eq(foo, bar);
+    eq(foo::<u8>, bar::<u8>);
     //~^ ERROR mismatched types
-    //~|  expected `fn(isize) -> isize {foo}`
-    //~|  found `fn(isize) -> isize {bar}`
+    //~|  expected `fn(isize) -> isize {foo::<u8>}`
+    //~|  found `fn(isize) -> isize {bar::<u8>}`
     //~|  expected fn item
     //~|  found a different fn item
+
+    eq(foo::<u8>, foo::<i8>);
+    //~^ ERROR mismatched types
+    //~|  expected `fn(isize) -> isize {foo::<u8>}`
+    //~|  found `fn(isize) -> isize {foo::<i8>}`
+
+    eq(bar::<String>, bar::<Vec<u8>>);
+    //~^ ERROR mismatched types
+    //~|  expected `fn(isize) -> isize {bar::<std::string::String>}`
+    //~|  found `fn(isize) -> isize {bar::<std::vec::Vec<u8>>}`
+    //~|  expected struct `std::string::String`
+    //~|  found struct `std::vec::Vec`
+
+    // Make sure we distinguish between trait methods correctly.
+    eq(<u8 as Foo>::foo, <u16 as Foo>::foo);
+    //~^ ERROR mismatched types
+    //~|  expected `fn() {<u8 as Foo>::foo}`
+    //~|  found `fn() {<u16 as Foo>::foo}`
 }
index 6433255bd4d2fcb7e3dd05894e669f8a38b0b945..8cbfc520ff449cc32cdf3933389d7719613d7554 100644 (file)
@@ -17,23 +17,23 @@ fn main() {
     let _: () = (box |_: isize| {}) as Box<FnOnce(isize)>;
     //~^ ERROR mismatched types
     //~| expected `()`
-    //~| found `Box<core::ops::FnOnce(isize)>`
+    //~| found `Box<std::ops::FnOnce(isize)>`
     //~| expected ()
     //~| found box
     let _: () = (box |_: isize, isize| {}) as Box<Fn(isize, isize)>;
     //~^ ERROR mismatched types
     //~| expected `()`
-    //~| found `Box<core::ops::Fn(isize, isize)>`
+    //~| found `Box<std::ops::Fn(isize, isize)>`
     //~| expected ()
     //~| found box
     let _: () = (box || -> isize { unimplemented!() }) as Box<FnMut() -> isize>;
     //~^ ERROR mismatched types
     //~| expected `()`
-    //~| found `Box<core::ops::FnMut() -> isize>`
+    //~| found `Box<std::ops::FnMut() -> isize>`
     //~| expected ()
     //~| found box
 
     needs_fn(1);
-    //~^ ERROR `core::ops::Fn<(isize,)>`
-    //~| ERROR `core::ops::FnOnce<(isize,)>`
+    //~^ ERROR : std::ops::Fn<(isize,)>`
+    //~| ERROR : std::ops::FnOnce<(isize,)>`
 }
index c77683045170e11675cf029c64b20f169c16f346..96ad184fd355840c602ab11ce998a3ca7cca3a43 100644 (file)
@@ -24,7 +24,7 @@ pub fn main() {
         x: 1,
         y: 2,
     };
-    for x in bogus { //~ ERROR `core::iter::Iterator` is not implemented for the type `MyStruct`
+    for x in bogus { //~ ERROR `MyStruct: std::iter::Iterator` is not satisfied
         drop(x);
     }
 }
index 029acbff9c33327217b8307a05deb6290b0245fb..fb787e8572c200018f97d5f36c2d9bcefc96dfb9 100644 (file)
@@ -14,8 +14,8 @@ fn main() {
     let x: Option<usize>;
     x = 5;
     //~^ ERROR mismatched types
-    //~| expected `core::option::Option<usize>`
+    //~| expected `std::option::Option<usize>`
     //~| found `_`
-    //~| expected enum `core::option::Option`
+    //~| expected enum `std::option::Option`
     //~| found integral variable
 }
index d6d668b366aa35006332a51de4b9a6a0746e5e26..9242849efc746b00c1846660c5bca825dc7a189e 100644 (file)
@@ -15,9 +15,9 @@ use std::option::Option;
 fn bar(x: usize) -> Option<usize> {
     return x;
     //~^ ERROR mismatched types
-    //~| expected `core::option::Option<usize>`
+    //~| expected `std::option::Option<usize>`
     //~| found `usize`
-    //~| expected enum `core::option::Option`
+    //~| expected enum `std::option::Option`
     //~| found usize
 }
 
diff --git a/src/test/compile-fail/gated-naked_functions.rs b/src/test/compile-fail/gated-naked_functions.rs
new file mode 100644 (file)
index 0000000..ceb475e
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[naked]
+//~^ the `#[naked]` attribute is an experimental feature
+fn naked() {}
+
+#[naked]
+//~^ the `#[naked]` attribute is an experimental feature
+fn naked_2() -> isize {
+    0
+}
index a452cd35f943fc4484b588d121e2ca9cb5538f29..adf9a98a05c2d5dc32f8e71d303dac9b403cc47e 100644 (file)
@@ -38,13 +38,13 @@ fn main() {
     // Including cases where the default is using previous type params.
     let _: HashMap<String, isize> = ();
     //~^ ERROR mismatched types
-    //~| expected `HashMap<collections::string::String, isize>`
+    //~| expected `HashMap<std::string::String, isize>`
     //~| found `()`
     //~| expected struct `HashMap`
     //~| found ()
     let _: HashMap<String, isize, Hash<String>> = ();
     //~^ ERROR mismatched types
-    //~| expected `HashMap<collections::string::String, isize>`
+    //~| expected `HashMap<std::string::String, isize>`
     //~| found `()`
     //~| expected struct `HashMap`
     //~| found ()
diff --git a/src/test/compile-fail/glob-cycles.rs b/src/test/compile-fail/glob-cycles.rs
new file mode 100644 (file)
index 0000000..077ae19
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod foo {
+    pub use bar::*;
+    pub use main as f; //~ ERROR has already been imported
+}
+
+mod bar {
+    pub use foo::*;
+}
+
+pub use foo::*;
+pub use baz::*; //~ ERROR has already been imported
+mod baz {
+    pub use super::*;
+}
+
+pub fn main() {}
index 3efe0501267e9b1ac6c5f031ac022586e3562548..845429d4b0c0b37a07c89fd0500d0389fe888ed0 100644 (file)
@@ -35,6 +35,6 @@ impl<'a> Foo<(&'a isize, &'a isize)> for SomeStruct
 }
 
 fn a() { want_foo1::<SomeStruct>(); } // OK -- foo wants just one region
-fn b() { want_foo2::<SomeStruct>(); } //~ ERROR not implemented
+fn b() { want_foo2::<SomeStruct>(); } //~ ERROR E0277
 
 fn main() { }
index 249256f8e01a6a0e6f2da50b6f279640caa72245..b55dccec2d56f95adec0155cfaae1dd679d5ee1d 100644 (file)
@@ -54,7 +54,7 @@ fn want_qux<B>(b: &B)
     where B : Qux
 {
     want_foo_for_any_tcx(b);
-    want_bar_for_any_ccx(b); //~ ERROR not implemented
+    want_bar_for_any_ccx(b); //~ ERROR E0277
 }
 
 fn main() {}
index 441ad76b6023ca980430def642c4c8e7ef77372a..4c5add4aceaaf15273f468de48cc5e2731bce19a 100644 (file)
@@ -25,7 +25,7 @@ fn want_foo_for_some_tcx<'x,F>(f: &'x F)
     where F : Foo<'x>
 {
     want_foo_for_some_tcx(f);
-    want_foo_for_any_tcx(f); //~ ERROR not implemented
+    want_foo_for_any_tcx(f); //~ ERROR E0277
 }
 
 fn want_foo_for_any_tcx<F>(f: &F)
@@ -42,7 +42,7 @@ fn want_bar_for_some_ccx<'x,B>(b: &B)
     want_foo_for_any_tcx(b);
 
     want_bar_for_some_ccx(b);
-    want_bar_for_any_ccx(b); //~ ERROR not implemented
+    want_bar_for_any_ccx(b); //~ ERROR E0277
 }
 
 fn want_bar_for_any_ccx<B>(b: &B)
index a1ec4a739e8cea03dbd55880b2eaf33d46eef86e..aec950f992cf4c160dc98536c2706f3320d4180e 100644 (file)
@@ -31,7 +31,7 @@ fn give_any() {
 struct StaticInt;
 impl Foo<&'static isize> for StaticInt { }
 fn give_static() {
-    want_hrtb::<StaticInt>() //~ ERROR `for<'a> Foo<&'a isize>` is not implemented
+    want_hrtb::<StaticInt>() //~ ERROR `for<'a> StaticInt: Foo<&'a isize>` is not satisfied
 }
 
 fn main() { }
index e8ecc0608fc4aeb1081bf617912250490c9123eb..fcfbeefced06bd184bc9787ff7b17bd4162075c0 100644 (file)
@@ -53,7 +53,7 @@ fn foo_hrtb_bar_not<'b,T>(mut t: T)
     // be implemented. Thus to satisfy `&mut T : for<'a> Foo<&'a
     // isize>`, we require `T : for<'a> Bar<&'a isize>`, but the where
     // clause only specifies `T : Bar<&'b isize>`.
-    foo_hrtb_bar_not(&mut t); //~ ERROR `for<'a> Bar<&'a isize>` is not implemented for the type `T`
+    foo_hrtb_bar_not(&mut t); //~ ERROR `for<'a> T: Bar<&'a isize>` is not satisfied
 }
 
 fn foo_hrtb_bar_hrtb<T>(mut t: T)
index 948040496bd392466bf689e266d3b7cce1688d1a..9b9bae92c33c1b43e62ca20781292f9b1e06f170 100644 (file)
@@ -10,5 +10,5 @@
 
 fn main() {
     format!("{:X}", "3");
-    //~^ ERROR: the trait `core::fmt::UpperHex` is not implemented
+    //~^ ERROR: `str: std::fmt::UpperHex` is not satisfied
 }
index 8c8f67e40abef59af082a594b670b2e9a9122eb6..f90365b71ae55fa617102acea28ae95a6e07691c 100644 (file)
@@ -17,7 +17,7 @@ trait Getter<T: Clone2> {
     fn get(&self) -> T;
 }
 
-impl Getter<isize> for isize { //~ ERROR the trait `Clone2` is not implemented
+impl Getter<isize> for isize { //~ ERROR `isize: Clone2` is not satisfied
     fn get(&self) -> isize { *self }
 }
 
index 6201d9862bb6974da1296c603c010f5a8ee64276..148958ae128970624c30498fc6de5b8c2f618a8f 100644 (file)
@@ -11,7 +11,7 @@
 struct Foo;
 impl Foo {
     fn orange(&self){}
-    fn orange(&self){}   //~ ERROR duplicate method
+    fn orange(&self){}   //~ ERROR duplicate definitions
 }
 
 fn main() {}
diff --git a/src/test/compile-fail/impossible_range.rs b/src/test/compile-fail/impossible_range.rs
new file mode 100644 (file)
index 0000000..94e048f
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Make sure that invalid ranges generate an error during HIR lowering, not an ICE
+
+#![feature(inclusive_range_syntax)]
+
+pub fn main() {
+    ..;
+    0..;
+    ..1;
+    0..1;
+
+    ...; //~ERROR inclusive range with no end
+         //~^HELP bounded at the end
+    0...; //~ERROR inclusive range with no end
+          //~^HELP bounded at the end
+    ...1;
+    0...1;
+}
+
+
index 3d3b7bc1bcb430b30416e4f77ac0a2c656a220c8..354d7b936485bd0505c02eb71758860d5400d5a4 100644 (file)
@@ -13,7 +13,7 @@
 
 fn main() {
     fn bar<T>(_: T) {}
-    [0][0u8]; //~ ERROR: the trait `core::ops::Index<u8>` is not implemented
+    [0][0u8]; //~ ERROR: `[_]: std::ops::Index<u8>` is not satisfied
 
     [0][0]; // should infer to be a usize
 
index 28806b6e2ab8cbd39eceb141c533e71e0abd2b61..08e6bf2469117179b8584954ab81a57943edb670 100644 (file)
@@ -31,7 +31,7 @@ impl<T:Clone> ToOpt for Option<T> {
 }
 
 fn function<T:ToOpt + Clone>(counter: usize, t: T) {
-//~^ ERROR reached the recursion limit during monomorphization
+//~^ ERROR reached the recursion limit while instantiating `function::<std::option::Option<
     if counter > 0 {
         function(counter - 1, t.to_option());
         // FIXME(#4287) Error message should be here. It should be
diff --git a/src/test/compile-fail/inherent-overlap.rs b/src/test/compile-fail/inherent-overlap.rs
new file mode 100644 (file)
index 0000000..333a4ee
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that you cannot define items with the same name in overlapping inherent
+// impl blocks.
+
+#![feature(rustc_attrs)]
+#![allow(dead_code)]
+
+struct Foo;
+
+impl Foo {
+    fn id() {} //~ WARN duplicate definitions
+    //~^ WARN previously accepted
+}
+
+impl Foo {
+    fn id() {}
+}
+
+struct Bar<T>(T);
+
+impl<T> Bar<T> {
+    fn bar(&self) {} //~ WARN duplicate definitions
+    //~^ WARN previously accepted
+}
+
+impl Bar<u32> {
+    fn bar(&self) {}
+}
+
+struct Baz<T>(T);
+
+impl<T: Copy> Baz<T> {
+    fn baz(&self) {} //~ WARN duplicate definitions
+    //~^ WARN previously accepted
+}
+
+impl<T> Baz<Vec<T>> {
+    fn baz(&self) {}
+}
+
+#[rustc_error]
+fn main() {} //~ ERROR compilation successful
index 6fcda66486b78018ef3fe61b08c2d0270ec6e89b..a6a331984583675c99a942ea23f96347bc95f091 100644 (file)
@@ -15,7 +15,6 @@ enum Bar<T> { What } //~ ERROR parameter `T` is never used
 fn foo<T>() {
     static a: Bar<T> = Bar::What;
     //~^ ERROR cannot use an outer type parameter in this context
-    //~| ERROR type name `T` is undefined or not in scope
 }
 
 fn main() {
index f78d677679bc88ed9d4cd75b31b91901c704c98d..897aca66cbfd466705cc089e5a8b42e50d0e68f8 100644 (file)
@@ -13,14 +13,14 @@ pub fn main() {
     let s: String = "abcdef".to_string();
     v[3_usize];
     v[3];
-    v[3u8];  //~ERROR the trait `core::ops::Index<u8>` is not implemented
-    v[3i8];  //~ERROR the trait `core::ops::Index<i8>` is not implemented
-    v[3u32]; //~ERROR the trait `core::ops::Index<u32>` is not implemented
-    v[3i32]; //~ERROR the trait `core::ops::Index<i32>` is not implemented
+    v[3u8];  //~ERROR : std::ops::Index<u8>` is not satisfied
+    v[3i8];  //~ERROR : std::ops::Index<i8>` is not satisfied
+    v[3u32]; //~ERROR : std::ops::Index<u32>` is not satisfied
+    v[3i32]; //~ERROR : std::ops::Index<i32>` is not satisfied
     s.as_bytes()[3_usize];
     s.as_bytes()[3];
-    s.as_bytes()[3u8];  //~ERROR the trait `core::ops::Index<u8>` is not implemented
-    s.as_bytes()[3i8];  //~ERROR the trait `core::ops::Index<i8>` is not implemented
-    s.as_bytes()[3u32]; //~ERROR the trait `core::ops::Index<u32>` is not implemented
-    s.as_bytes()[3i32]; //~ERROR the trait `core::ops::Index<i32>` is not implemented
+    s.as_bytes()[3u8];  //~ERROR : std::ops::Index<u8>` is not satisfied
+    s.as_bytes()[3i8];  //~ERROR : std::ops::Index<i8>` is not satisfied
+    s.as_bytes()[3u32]; //~ERROR : std::ops::Index<u32>` is not satisfied
+    s.as_bytes()[3i32]; //~ERROR : std::ops::Index<i32>` is not satisfied
 }
index b83f0f73436cffe488e9f358ed532eec95f091e0..906056896be1e6eb3810283737655a019d6e4ee9 100644 (file)
@@ -15,15 +15,10 @@ extern "rust-intrinsic" {
     fn return_address() -> *const u8;
 }
 
-unsafe fn f() {
-    let _ = return_address();
-    //~^ ERROR invalid use of `return_address` intrinsic: function does not use out pointer
-}
+unsafe fn f() { let _ = return_address(); }
+//~^ ERROR invalid use of `return_address` intrinsic: function does not use out pointer
 
-unsafe fn g() -> isize {
-    let _ = return_address();
-    //~^ ERROR invalid use of `return_address` intrinsic: function does not use out pointer
-    0
-}
+unsafe fn g() -> isize { let _ = return_address(); 0 }
+//~^ ERROR invalid use of `return_address` intrinsic: function does not use out pointer
 
 fn main() {}
diff --git a/src/test/compile-fail/invalid-intrinsic.rs b/src/test/compile-fail/invalid-intrinsic.rs
new file mode 100644 (file)
index 0000000..2aa2546
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(intrinsics)]
+extern "rust-intrinsic" {
+    pub static breakpoint : unsafe extern "rust-intrinsic" fn();
+    //~^ ERROR intrinsic has wrong type
+}
+fn main() { unsafe { breakpoint(); } }
\ No newline at end of file
index 9711d760ae666ccb54409aa5fdb6dd90f43f3478..43255db2ff3ab96ea443117ed3e70fcbebb6cf97 100644 (file)
@@ -11,8 +11,7 @@
 fn test<'x>(x: &'x isize) {
     // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
     drop::<Box<for<'z> FnMut(&'z isize) -> &'z isize>>(Box::new(|z| {
-        x
-        //~^ ERROR cannot infer an appropriate lifetime
+        x //~ ERROR E0312
     }));
 }
 
diff --git a/src/test/compile-fail/issue-10636-2.rs b/src/test/compile-fail/issue-10636-2.rs
new file mode 100644 (file)
index 0000000..747252d
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// FIXME(31528) we emit a bunch of silly errors here due to continuing past the
+// first one. This would be easy-ish to address by better recovery in tokenisation.
+
+pub fn trace_option(option: Option<isize>) {
+    option.map(|some| 42; //~ NOTE: unclosed delimiter
+                          //~^ ERROR: expected one of
+} //~ ERROR: incorrect close delimiter
+//~^ ERROR: unexpected token
index 1bd7b0aa1c27913f4b6537354d0022d9736fb83e..7dccd7811066e3843393d6354f16dc8adc5620f0 100644 (file)
@@ -14,8 +14,8 @@ extern crate issue_11680 as other;
 
 fn main() {
     let _b = other::Foo::Bar(1);
-    //~^ ERROR: variant `Bar` is private
+    //~^ ERROR: enum `Foo` is private
 
     let _b = other::test::Foo::Bar(1);
-    //~^ ERROR: variant `Bar` is private
+    //~^ ERROR: enum `Foo` is private
 }
index 998576097a0a06887275de88b1d82149f2fcae6e..6dde59d4a2e6098353e0374eab134060ee773c37 100644 (file)
@@ -9,6 +9,7 @@
 // except according to those terms.
 
 fn blah() -> i32 { //~ ERROR not all control paths return a value
+    //~^ HELP run `rustc --explain E0269` to see a detailed explanation
     1
 
     ; //~ HELP consider removing this semicolon:
index 33fbdce4ee25a2b50fd3a4474bc31c840ccd7879..0c3c82a99f28f4f38a6ba45caea71226fadad3de 100644 (file)
@@ -12,7 +12,6 @@ trait Trait {
     fn outer(&self) {
         fn inner(_: &Self) {
             //~^ ERROR can't use type parameters from outer function
-            //~^^ ERROR use of `Self` outside of an impl or trait
         }
     }
 }
index 8886dd80be5039bed56eb68650bcf347efd7d6b9..b552d7678d563701f3282fa6ef290365ecc67fc4 100644 (file)
@@ -36,8 +36,5 @@ fn check<'r, I: Iterator<Item=usize>, T: Itble<'r, usize, I>>(cont: &T) -> bool
 fn main() {
     check((3, 5));
 //~^ ERROR mismatched types
-//~| expected `&_`
-//~| found `(_, _)`
-//~| expected &-ptr
-//~| found tuple
+//~| HELP run `rustc --explain E0308` to see a detailed explanation
 }
index 311280bd497605f429b179754bc9b60586e7338d..afb2e867f45c6057e60336534eefb316e4148cc7 100644 (file)
@@ -16,4 +16,5 @@ fn main() {
     A::C = 1;
     //~^ ERROR: invalid left-hand side expression
     //~| ERROR: mismatched types
+    //~| ERROR: struct `C` is private
 }
index c771970650d31e2bee64641e787dabc1c188d04f..5b8ab08aefca11a4bed1b68f6dc66f3dd0350ab3 100644 (file)
@@ -11,6 +11,7 @@
 // Regression test for #13428
 
 fn foo() -> String {  //~ ERROR not all control paths return a value
+    //~^ HELP run `rustc --explain E0269` to see a detailed explanation
     format!("Hello {}",
             "world")
     // Put the trailing semicolon on its own line to test that the
@@ -19,6 +20,7 @@ fn foo() -> String {  //~ ERROR not all control paths return a value
 }
 
 fn bar() -> String {  //~ ERROR not all control paths return a value
+    //~^ HELP run `rustc --explain E0269` to see a detailed explanation
     "foobar".to_string()
     ;   //~ HELP consider removing this semicolon
 }
index a29a83c43063214f6bb0c2ebae731c2b7327f575..d2c8b679ff69fff7a5818046e2cb0cd47d05fc90 100644 (file)
@@ -17,16 +17,16 @@ pub fn main() {
     let _x: usize = match Some(1) {
         Ok(u) => u,
         //~^ ERROR mismatched types
-        //~| expected `core::option::Option<_>`
-        //~| found `core::result::Result<_, _>`
-        //~| expected enum `core::option::Option`
-        //~| found enum `core::result::Result`
+        //~| expected `std::option::Option<_>`
+        //~| found `std::result::Result<_, _>`
+        //~| expected enum `std::option::Option`
+        //~| found enum `std::result::Result`
 
         Err(e) => panic!(e)
         //~^ ERROR mismatched types
-        //~| expected `core::option::Option<_>`
-        //~| found `core::result::Result<_, _>`
-        //~| expected enum `core::option::Option`
-        //~| found enum `core::result::Result`
+        //~| expected `std::option::Option<_>`
+        //~| found `std::result::Result<_, _>`
+        //~| expected enum `std::option::Option`
+        //~| found enum `std::result::Result`
     };
 }
index f907be161fa0815a4fa5af331cf97386d1d1f347..e1fe2d06993d5ba4a21f4cddd18a3bf9713bccfa 100644 (file)
@@ -17,7 +17,7 @@ fn main() {
     let y = match x {
         [] => None,
 //~^ ERROR mismatched types
-//~| expected `[_#0i; 2]`
+//~| expected `[_#1i; 2]`
 //~| found `[_#7t; 0]`
 //~| expected an array with a fixed size of 2 elements
 //~| found one with 0 elements
index 51b6dc0d07865eda130cbc0e8633f90907501550..3b690e08f6143042e6cc05cfeca424a036f4330c 100644 (file)
@@ -17,9 +17,7 @@ mod a {
 
 fn main() {
     a::Foo::new();
-    //~^ ERROR: method `new` is inaccessible
-    //~^^ NOTE: struct `Foo` is private
+    //~^ ERROR: struct `Foo` is private
     a::Bar::new();
-    //~^ ERROR: method `new` is inaccessible
-    //~^^ NOTE: enum `Bar` is private
+    //~^ ERROR: enum `Bar` is private
 }
index 8cbec549dda96fc9384a49966235fdf972760f51..446514c8dd45fd186322919d4b4bb5b72d8658d2 100644 (file)
@@ -13,5 +13,5 @@
 
 fn main() {
     () <- 0;
-    //~^ ERROR: the trait `core::ops::Placer<_>` is not implemented
+    //~^ ERROR: `(): std::ops::Placer<_>` is not satisfied
 }
index c7cac71af036125ded2fdda44ffebcb7497a4610..84452accc9a4ac4abec7e0134f6a81c628cbaf24 100644 (file)
@@ -10,5 +10,5 @@
 
 fn main() {
     let _x = "test" as &::std::any::Any;
-//~^ ERROR the trait `core::marker::Sized` is not implemented for the type `str`
+//~^ ERROR `str: std::marker::Sized` is not satisfied
 }
index c6c1a0fd17781ab1209e1a4923073f294e043ba9..c4d88267032920c59648ad43977aaa394d225e9f 100644 (file)
@@ -20,7 +20,7 @@ struct X { data: u32 }
 
 impl Something for X {
     fn yay<T: Str>(_:Option<X>, thing: &[T]) {
-    //~^ ERROR the requirement `T : Str` appears on the impl method
+    //~^ ERROR the requirement `T: Str` appears on the impl method
     }
 }
 
index 2228b6d37799de44b226fbea051e69fc969b7aa1..5ec82326d6c1a10c4695f78751529afcbb7db8bf 100644 (file)
@@ -25,6 +25,7 @@ fn main() {
 
     let Foo {
         a, //~ NOTE field `a` previously bound here
+        //~^ NOTE field `a` previously bound here
         a: _, //~ ERROR field `a` bound multiple times in the pattern
         a: x //~ ERROR field `a` bound multiple times in the pattern
     } = Foo { a: 29 };
index b378d2f885e833a754a7e4ed8fb92f75fe8c4943..bdf344dcdfe8da8377ba866e99756a1fb1ad3b39 100644 (file)
@@ -12,12 +12,12 @@ const N: isize = 1;
 
 enum Foo {
     A = 1,
-    B = 1, //~ ERROR discriminant value `1` already exists
+    B = 1, //~ ERROR discriminant value `1isize` already exists
     //~^^ NOTE conflicting
     C = 0,
-    D, //~ ERROR discriminant value `1` already exists
+    D, //~ ERROR discriminant value `1isize` already exists
     //~^^^^^ NOTE conflicting
-    E = N, //~ ERROR discriminant value `1` already exists
+    E = N, //~ ERROR discriminant value `1isize` already exists
     //~^^^^^^^ NOTE conflicting
 }
 
index 02ccf9c0e08e7af6fd4daa7a6b6b49a4d61933bc..41349d7d7443b056bcaeb16498090e66b22ca71e 100644 (file)
@@ -15,7 +15,7 @@ fn dft_iter<'a, T>(arg1: Chunks<'a,T>, arg2: ChunksMut<'a,T>)
 {
     for
     &mut something
-//~^ ERROR the trait `core::marker::Sized` is not implemented for the type `[T]`
+//~^ ERROR `[T]: std::marker::Sized` is not satisfied
     in arg2
     {
     }
index 13a5fa4b8af5af16ad60efb1adc04de9e764e358..9a139021e4c6d67dc8e2913ea304b8fd2e3a9798 100644 (file)
@@ -17,8 +17,8 @@ fn main() {
     let x = Some(&[name]);
     let msg = foo(x);
 //~^ ERROR mismatched types
-//~| expected `core::option::Option<&[&str]>`
-//~| found `core::option::Option<&[&str; 1]>`
+//~| expected `std::option::Option<&[&str]>`
+//~| found `std::option::Option<&[&str; 1]>`
 //~| expected slice
 //~| found array of 1 elements
     assert_eq!(msg, 3);
index ba369734daa47abd55b3b4444353e9eaa793dc96..30775a958b57d40616cd8ce43f12b020badb7dbc 100644 (file)
@@ -14,7 +14,7 @@ fn main() {
     let Slice { data: data, len: len } = "foo";
     //~^ ERROR mismatched types
     //~| expected `&str`
-    //~| found `core::raw::Slice<_>`
+    //~| found `std::raw::Slice<_>`
     //~| expected &-ptr
-    //~| found struct `core::raw::Slice`
+    //~| found struct `std::raw::Slice`
 }
index b943ef510da28fbf7a3978bf01f8dcf65bbf1604..a90f9fe26e48c644f6655fa0acd7f9b17ee6b2c1 100644 (file)
@@ -15,9 +15,9 @@ fn main() {
         Slice { data: data, len: len } => (),
         //~^ ERROR mismatched types
         //~| expected `()`
-        //~| found `core::raw::Slice<_>`
+        //~| found `std::raw::Slice<_>`
         //~| expected ()
-        //~| found struct `core::raw::Slice`
+        //~| found struct `std::raw::Slice`
         _ => unreachable!()
     }
 }
index 6f627bfe704a85d50329de907925fa8930ba68e1..6c41450796c749edad9e4c630bef2fa2e6526d96 100644 (file)
@@ -9,17 +9,17 @@
 // except according to those terms.
 
 mod Y {
-    type X = usize;
+    pub type X = usize;
     extern {
-        static x: *const usize;
+        pub static x: *const usize;
     }
-    fn foo(value: *const X) -> *const X {
+    pub fn foo(value: *const X) -> *const X {
         value
     }
 }
 
 static foo: *const Y::X = Y::foo(Y::x as *const Y::X);
-//~^ ERROR the trait `core::marker::Sync` is not implemented for the type
+//~^ ERROR `*const usize: std::marker::Sync` is not satisfied
 //~| ERROR cannot refer to other statics by value, use the address-of operator or a constant instead
 //~| ERROR E0015
 
index dd7e8a869eca9c3c9d0119f70a03cf281aed76e4..0fdb5f74e829961a575921f634449c087af01b44 100644 (file)
@@ -19,6 +19,7 @@ struct List<'a, T: ListItem<'a>> {
 //~^ ERROR the parameter type `T` may not live long enough
 //~| HELP consider adding an explicit lifetime bound
 //~| NOTE ...so that the reference type `&'a [T]` does not outlive the data it points at
+//~| HELP run `rustc --explain E0309` to see a detailed explanation
 }
 impl<'a, T: ListItem<'a>> Collection for List<'a, T> {
     fn len(&self) -> usize {
index f40d51f1d2fd3c59c3aef358a2ca1de3b5aef372..2320bc02baf5edc478996370b1cef53008b9d1f0 100644 (file)
@@ -23,3 +23,5 @@ fn main() {
     //~^ ERROR cannot borrow `foo` (here through borrowing `foo.b`) as immutable
     //~^^ NOTE previous borrow of `foo` occurs here (through borrowing `foo.a`)
 }
+//~^ NOTE previous borrow ends here
+//~^^ NOTE previous borrow ends here
index 46a64f99354c61c98ef48a0ed056f1a9bc29369c..7d300bfb1483161148dec1fb44e79362929da0a2 100644 (file)
@@ -15,11 +15,11 @@ fn main() {
 
     // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
     let _bar = Box::new(1_usize) as std::fmt::Debug;
-    //~^ ERROR cast to unsized type: `Box<usize>` as `core::fmt::Debug`
+    //~^ ERROR cast to unsized type: `Box<usize>` as `std::fmt::Debug`
     //~^^ HELP try casting to a `Box` instead
 
     let _baz = 1_usize as std::fmt::Debug;
-    //~^ ERROR cast to unsized type: `usize` as `core::fmt::Debug`
+    //~^ ERROR cast to unsized type: `usize` as `std::fmt::Debug`
     //~^^ HELP consider using a box or reference as appropriate
 
     let _quux = [1_usize, 2] as [usize];
index 9e49abc463f15491906c9a402f844437ebdb980f..0fe01ece558ee0d9a4b8f3fbdcc431252dc932d2 100644 (file)
@@ -14,5 +14,5 @@
 fn main() {
     // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
     (|| Box::new(*(&[0][..])))();
-    //~^ ERROR the trait `core::marker::Sized` is not implemented for the type `[_]`
+    //~^ ERROR `[_]: std::marker::Sized` is not satisfied
 }
index fa8035a79652ea27a1c7f8e67aad9d47562b6bad..790329cd2e429f13897329353c166236791268d1 100644 (file)
@@ -17,6 +17,6 @@ impl !Sync for Foo {}
 
 static FOO: usize = 3;
 static BAR: Foo = Foo;
-//~^ ERROR: the trait `core::marker::Sync` is not implemented
+//~^ ERROR: `Foo: std::marker::Sync` is not satisfied
 
 fn main() {}
index 83e52216be29baa371506ac14f26f106294db121..787eb7a3b8878e362e49b12123d590a8fc03f608 100644 (file)
@@ -107,7 +107,7 @@ impl Debug for Player {
 }
 
 fn str_to_direction(to_parse: &str) -> RoomDirection {
-    match to_parse {
+    match to_parse { //~ ERROR match arms have incompatible types
         "w" | "west" => RoomDirection::West,
         "e" | "east" => RoomDirection::East,
         "n" | "north" => RoomDirection::North,
@@ -116,7 +116,7 @@ fn str_to_direction(to_parse: &str) -> RoomDirection {
         "out" => RoomDirection::Out,
         "up" => RoomDirection::Up,
         "down" => RoomDirection::Down,
-        _ => None //~ ERROR mismatched types
+        _ => None //~ NOTE match arm with an incompatible type
     }
 }
 
index 56a66ecc8aa456898fe8080bd999e14f32990120..23be4d3536117fe50a594e3607af41dcb7780b83 100644 (file)
@@ -19,7 +19,7 @@ struct G<T: ?Sized> {
 }
 
 impl<T> Drop for G<T> {
-//~^ ERROR: The requirement `T : core::marker::Sized` is added only by the Drop impl. [E0367]
+//~^ ERROR: The requirement `T: core::marker::Sized` is added only by the Drop impl. [E0367]
     fn drop(&mut self) {
         if !self._ptr.is_null() {
         }
index 6300a1dc15d6015cec6aea8b0c34743a529f13f4..33d68c121bf2674a9d11e854690129b1b5a284fe 100644 (file)
@@ -12,7 +12,7 @@ pub trait AbstractRenderer {}
 
 fn _create_render(_: &()) ->
     AbstractRenderer
-//~^ ERROR: the trait `core::marker::Sized` is not implemented
+//~^ ERROR: `AbstractRenderer + 'static: std::marker::Sized` is not satisfied
 {
     match 0 {
         _ => unimplemented!()
index a662e9ca98ee812086e338e874234fd0a00a1f91..a3ad76e1be06bb28bdc54983103620cde25e0be5 100644 (file)
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 fn add_state(op: <isize as HasState>::State) {
-//~^ ERROR the trait `HasState` is not implemented for the type `isize`
+//~^ ERROR `isize: HasState` is not satisfied
 }
 
 trait HasState {
index 8c2c52e6fad41d701f98eff466cebee1d5b708f0..3e21360721b6d87a9076a72e7efdea81fe9d0405 100644 (file)
@@ -11,7 +11,7 @@
 type FuncType<'f> = Fn(&isize) -> isize + 'f;
 
 fn ho_func(f: Option<FuncType>) {
-    //~^ ERROR: the trait `core::marker::Sized` is not implemented for the type
+    //~^ ERROR: `for<'r> std::ops::Fn(&'r isize) -> isize: std::marker::Sized` is not satisfied
 }
 
 fn main() {}
index 8c75d4680faebd7fbeef1258fd5a38442053b2e9..8fbe4432204a49c0810cea899c13cbe0614aafae 100644 (file)
@@ -18,5 +18,5 @@ fn assert_clone<T>() where T : Clone { }
 
 fn main() {
     assert_clone::<foo::core::sync::atomic::AtomicBool>();
-    //~^ ERROR the trait `foo::core::clone::Clone` is not implemented for the type `foo::core::
+    //~^ ERROR `foo::core::sync::atomic::AtomicBool: foo::core::clone::Clone` is not satisfied
 }
index c73a17350648bfba823679cd75eb677bb969c4b1..02c925f336eae8fa20cde1a07638797a6691d096 100644 (file)
@@ -16,5 +16,5 @@ fn assert_clone<T>() where T : Clone { }
 
 fn main() {
     assert_clone::<bar::sync::atomic::AtomicBool>();
-    //~^ ERROR the trait `bar::clone::Clone` is not implemented for the type `bar::sync::atomic::
+    //~^ ERROR `bar::sync::atomic::AtomicBool: bar::clone::Clone` is not satisfied
 }
index 0ef7747c8a84f124c4310cd9b4fe825c29053a0d..dfec48e0a83c1359411bf1e9bfa549be7fab0469 100644 (file)
@@ -20,5 +20,5 @@ fn assert_clone<T>() where T : Clone { }
 
 fn main() {
     assert_clone::<foo::core::sync::atomic::AtomicBool>();
-    //~^ ERROR the trait `core::clone::Clone` is not implemented for the type `core::sync::atomic::
+    //~^ ERROR `core::sync::atomic::AtomicBool: core::clone::Clone` is not satisfied
 }
index 02b9c42b65b9a9e948f997abe2d390b7ce9f0905..87b79b5cd67ebf97106acd297efbcf28ad1431bc 100644 (file)
@@ -8,14 +8,14 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use self::A; //~ ERROR import `A` conflicts with existing submodule
-use self::B; //~ ERROR import `B` conflicts with existing submodule
-mod A {}
-pub mod B {}
+use self::A; //~ NOTE previous import of `A` here
+use self::B; //~ NOTE previous import of `B` here
+mod A {} //~ ERROR a module named `A` has already been imported in this module
+pub mod B {} //~ ERROR a module named `B` has already been imported in this module
 
 mod C {
-    use C::D; //~ ERROR import `D` conflicts with existing submodule
-    mod D {}
+    use C::D; //~ NOTE previous import of `D` here
+    mod D {} //~ ERROR a module named `D` has already been imported in this module
 }
 
 fn main() {}
index 9affb44b7445c7d07736851368efa81e3e7ddc9d..814c1a4131d381faf81a0e469932e5f61f4be662 100644 (file)
 
 type foo = fn(&u8, &u8) -> &u8; //~ ERROR missing lifetime specifier
 //~^ HELP the signature does not say whether it is borrowed from argument 1 or argument 2
+//~^^ HELP run `rustc --explain E0106` to see a detailed explanation
 
 fn bar<F: Fn(&u8, &u8) -> &u8>(f: &F) {} //~ ERROR missing lifetime specifier
 //~^ HELP the signature does not say whether it is borrowed from argument 1 or argument 2
+//~^^ HELP run `rustc --explain E0106` to see a detailed explanation
 
 fn main() {}
index aaf27ba527b18fb936fa49db9a246dd4bc4f380e..b02757fb5a3133acdf5f81f1fdda9a98f644d0bd 100644 (file)
@@ -15,7 +15,7 @@ trait From<Src> {
 }
 
 trait To {
-    fn to<Dst>(  //~ ERROR the trait `core::marker::Sized` is not implemented
+    fn to<Dst>(  //~ ERROR `Self: std::marker::Sized` is not satisfied
         self
     ) -> <Dst as From<Self>>::Result where Dst: From<Self> {
         From::from(self)
index d3a87689ac5e11793d29eef8798d18bda0ca8e48..b2f3a2da516194a1762fe2948529cf1518592363 100644 (file)
@@ -13,5 +13,5 @@ struct X { x: i32 }
 fn main() {
     let mut b: Vec<X> = vec![];
     b.sort();
-    //~^ ERROR the trait `core::cmp::Ord` is not implemented for the type `X`
+    //~^ ERROR `X: std::cmp::Ord` is not satisfied
 }
diff --git a/src/test/compile-fail/issue-20427.rs b/src/test/compile-fail/issue-20427.rs
deleted file mode 100644 (file)
index 99dd22a..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// aux-build:i8.rs
-extern crate i8;
-use std::string as i16;
-static i32: i32 = 0;
-const i64: i64 = 0;
-fn u8(f32: f32) {}
-fn f<f64>(f64: f64) {}
-//~^ ERROR user-defined types or type parameters cannot shadow the primitive types
-type u16 = u16; //~ ERROR user-defined types or type parameters cannot shadow the primitive types
-enum u32 {} //~ ERROR user-defined types or type parameters cannot shadow the primitive types
-struct u64; //~ ERROR user-defined types or type parameters cannot shadow the primitive types
-trait bool {} //~ ERROR user-defined types or type parameters cannot shadow the primitive types
-
-mod char {
-    extern crate i8;
-    static i32_: i32 = 0;
-    const i64_: i64 = 0;
-    fn u8_(f32: f32) {}
-    fn f_<f64_>(f64: f64_) {}
-    type u16_ = u16;
-    enum u32_ {}
-    struct u64_;
-    trait bool_ {}
-    mod char_ {}
-
-    mod str {
-        use super::i8 as i8;
-        use super::i32_ as i32;
-        use super::i64_ as i64;
-        use super::u8_ as u8;
-        use super::f_ as f64;
-        use super::u16_ as u16;
-        //~^ ERROR user-defined types or type parameters cannot shadow the primitive types
-        use super::u32_ as u32;
-        //~^ ERROR user-defined types or type parameters cannot shadow the primitive types
-        use super::u64_ as u64;
-        //~^ ERROR user-defined types or type parameters cannot shadow the primitive types
-        use super::bool_ as bool;
-        //~^ ERROR user-defined types or type parameters cannot shadow the primitive types
-        use super::{bool_ as str};
-        //~^ ERROR user-defined types or type parameters cannot shadow the primitive types
-        use super::char_ as char;
-    }
-}
-
-trait isize_ {
-    type isize; //~ ERROR user-defined types or type parameters cannot shadow the primitive types
-}
-
-fn usize<'usize>(usize: &'usize usize) -> &'usize usize { usize }
-
-fn main() {
-    let bool = true;
-    match bool {
-        str @ true => if str { i32 as i64 } else { i64 },
-        false => i64,
-    };
-}
index f2d65af9cdfc80276be064f05092f903f8be64d3..b7c544c78483aa7b6cd245d1251a7f63a948c9d6 100644 (file)
@@ -10,7 +10,7 @@
 
 fn changer<'a>(mut things: Box<Iterator<Item=&'a mut u8>>) {
     for item in *things { *item = 0 }
-//~^ ERROR the trait `core::marker::Sized` is not implemented for the type `core::iter::Iterator
+//~^ ERROR `std::iter::Iterator<Item=&mut u8>: std::marker::Sized` is not satisfied
 }
 
 fn main() {}
index 07ced7a97ba96db862c30a9bb4ace18a00191fc3..d6cf9acae9b0b9351329685bb01f793697caa6c2 100644 (file)
@@ -40,7 +40,7 @@ type Type_5_<'a> = Type_1_<'a, ()>;
 //type Type_7 = Box<(),,>; // error: expected type, found `,`
 
 
-type Type_8<'a,,> = &'a (); //~ error: expected ident, found `,`
+type Type_8<'a,,> = &'a (); //~ error: expected identifier, found `,`
 
 
-//type Type_9<T,,> = Box<T>; // error: expected ident, found `,`
+//type Type_9<T,,> = Box<T>; // error: expected identifier, found `,`
index 7847dea69ef59486e411755f10769af5abdb1708..d64cec446ef3dafce7ca825b3425a20c547b2954 100644 (file)
@@ -40,7 +40,7 @@ type Type_5_<'a> = Type_1_<'a, ()>;
 //type Type_7 = Box<(),,>; // error: expected type, found `,`
 
 
-//type Type_8<'a,,> = &'a (); // error: expected ident, found `,`
+//type Type_8<'a,,> = &'a (); // error: expected identifier, found `,`
 
 
-type Type_9<T,,> = Box<T>; //~ error: expected ident, found `,`
+type Type_9<T,,> = Box<T>; //~ error: expected identifier, found `,`
index 557bf518a3cfbee46c02c4d65970a6c1df316a2f..0de0ab2269bff833471de2cd6daa791d4ca6a2ab 100644 (file)
@@ -16,6 +16,6 @@ impl Bar {
 
 #[derive(Hash)]
 struct Foo(Bar);
-//~^ error: the trait `core::hash::Hash` is not implemented for the type `Bar`
+//~^ error: `Bar: std::hash::Hash` is not satisfied
 
 fn main() {}
index 30fd2eb4d2f9aeb6cfa527f04d3e5c2150b41adc..c92a404b71a6fbf38ad9afc5af0a6402985d585b 100644 (file)
@@ -15,7 +15,7 @@ trait Trait<'a> {
 
 fn foo<'a, T: Trait<'a>>(value: T::A) {
     let new: T::B = unsafe { std::mem::transmute(value) };
-//~^ ERROR: cannot transmute to or from a type that contains unsubstituted type parameters [E0139]
+//~^ ERROR: transmute called with differently sized types
 }
 
 fn main() { }
index 03dd7b6754ad3ce651f200ea2516cee556505ced..c53d5a0922e647737fa8f21a61623a300e810321 100644 (file)
@@ -55,6 +55,8 @@ impl Mul for Foo {
 //~| HELP `mul1::Mul`
 //~| HELP `mul2::Mul`
 //~| HELP `std::ops::Mul`
+//~| HELP run `rustc --explain E0405` to see a detailed explanation
+//~| HELP you can import several candidates into scope (`use ...;`):
 }
 
 // BEFORE, we got:
@@ -75,17 +77,22 @@ fn getMul() -> Mul {
 //~| HELP `mul3::Mul`
 //~| HELP `mul4::Mul`
 //~| HELP and 2 other candidates
+//~| HELP run `rustc --explain E0412` to see a detailed explanation
+//~| HELP you can import several candidates into scope (`use ...;`):
 }
 
 // Let's also test what happens if the trait doesn't exist:
 impl ThisTraitReallyDoesntExistInAnyModuleReally for Foo {
 //~^ ERROR trait `ThisTraitReallyDoesntExistInAnyModuleReally` is not in scope
+//~^^ HELP run `rustc --explain E0405` to see a detailed explanation
+//~^^^ HELP no candidates by the name of `ThisTraitReallyDoesntExistInAnyModuleReally` found
 }
 
 // Let's also test what happens if there's just one alternative:
 impl Div for Foo {
 //~^ ERROR trait `Div` is not in scope
 //~| HELP `use std::ops::Div;`
+//~| HELP run `rustc --explain E0405` to see a detailed explanation
 }
 
 fn main() {
index 4145d20dea5557189c21b02dbed7be370a10881d..cf5c6e8a3b4f09c0dca79d50436dbec4cd1e14ec 100644 (file)
@@ -13,7 +13,7 @@ pub mod foo {
         // note: trait T is not public, but being in the current
         // crate, it's fine to show it, since the programmer can
         // decide to make it public based on the suggestion ...
-        trait T {}
+        pub trait T {}
     }
     // imports should be ignored:
     use self::bar::T;
@@ -27,4 +27,5 @@ pub mod baz {
 struct Foo;
 impl T for Foo { }
 //~^ ERROR trait `T` is not in scope
-//~| HELP you can to import it into scope: `use foo::bar::T;`.
+//~| HELP you can import it into scope: `use foo::bar::T;`.
+//~| HELP run `rustc --explain E0405` to see a detailed explanation
index ba66496b93039a7a39016611fe3012aaf97a43d5..a1a712d142197ddc485312fb68102fabbe71243a 100644 (file)
@@ -24,7 +24,8 @@ struct Foo;
 // are hidden from the view.
 impl OuterTrait for Foo {}
 //~^ ERROR trait `OuterTrait` is not in scope
-//~| HELP you can to import it into scope: `use issue_21221_3::outer::OuterTrait;`.
+//~| HELP you can import it into scope: `use issue_21221_3::outer::OuterTrait;`.
+//~| HELP run `rustc --explain E0405` to see a detailed explanation
 fn main() {
     println!("Hello, world!");
 }
index 8d09510ae091adc93b5b9181cb040515acf3be26..1ef205bd8be8560a565fe03f4d2bd5a56fef7f7d 100644 (file)
@@ -19,7 +19,8 @@ struct Foo;
 
 impl T for Foo {}
 //~^ ERROR trait `T` is not in scope
-//~| HELP you can to import it into scope: `use issue_21221_4::T;`.
+//~| HELP you can import it into scope: `use issue_21221_4::T;`.
+//~| HELP run `rustc --explain E0405` to see a detailed explanation
 
 fn main() {
     println!("Hello, world!");
index f25a9fd2762522277f45188e1c04c420b4249223..b36918149fa99008b7fd5d7560a63ad82081b759 100644 (file)
@@ -14,8 +14,8 @@ impl Iterator for S {
     type Item = i32;
     fn next(&mut self) -> Result<i32, i32> { Ok(7) }
     //~^ ERROR method `next` has an incompatible type for trait
-    //~| expected enum `core::option::Option`
-    //~|    found enum `core::result::Result` [E0053]
+    //~| expected enum `std::option::Option`
+    //~|    found enum `std::result::Result` [E0053]
 }
 
 fn main() {}
index 256c5d8e6f72cc08d375e1e45caba593528c0e77..9143a226a248355a8a932e859dea6b5e8809717b 100644 (file)
@@ -21,5 +21,5 @@ impl<A> vec_monad<A> for Vec<A> {
 }
 fn main() {
     ["hi"].bind(|x| [x] );
-    //~^ ERROR no method named `bind` found for type `[&str; 1]` in the current scope
+    //~^ ERROR no method named `bind` found for type `[&'static str; 1]` in the current scope
 }
index 535630e0824ca571725c9c766d59e4983a817b7e..11d05ceb9a019d220e4fab6f23fffe374e779945 100644 (file)
 
 #[allow(non_snake_case)]
 mod Foo { }
-//~^ NOTE first definition of type or module `Foo`
+//~^ NOTE previous definition of `Foo` here
 
 #[allow(dead_code)]
 struct Foo;
-//~^ ERROR duplicate definition of type or module `Foo`
-
+//~^ ERROR a module named `Foo` has already been defined in this module
 
 #[allow(non_snake_case)]
 mod Bar { }
-//~^ NOTE first definition of type or module `Bar`
+//~^ NOTE previous definition of `Bar` here
 
 #[allow(dead_code)]
 struct Bar(i32);
-//~^ ERROR duplicate definition of type or module `Bar`
+//~^ ERROR a module named `Bar` has already been defined
 
 
 #[allow(dead_code)]
 struct Baz(i32);
-//~^ NOTE first definition of type or module
+//~^ NOTE previous definition
 
 #[allow(non_snake_case)]
 mod Baz { }
-//~^ ERROR duplicate definition of type or module `Baz`
+//~^ ERROR a type named `Baz` has already been defined
 
 
 #[allow(dead_code)]
 struct Qux { x: bool }
-//~^ NOTE first definition of type or module
+//~^ NOTE previous definition
 
 #[allow(non_snake_case)]
 mod Qux { }
-//~^ ERROR duplicate definition of type or module `Qux`
+//~^ ERROR a type named `Qux` has already been defined
 
 
 #[allow(dead_code)]
 struct Quux;
-//~^ NOTE first definition of type or module
+//~^ NOTE previous definition
 
 #[allow(non_snake_case)]
 mod Quux { }
-//~^ ERROR duplicate definition of type or module `Quux`
+//~^ ERROR a type named `Quux` has already been defined
 
 
 #[allow(dead_code)]
 enum Corge { A, B }
+//~^ NOTE previous definition
 
 #[allow(non_snake_case)]
 mod Corge { }
-//~^ ERROR duplicate definition of type or module `Corge`
+//~^ ERROR a type named `Corge` has already been defined
 
 fn main() { }
index f9a79dbb9c32a2f91c0e6e8926095f452cb2f889..d9dcebfda6a1dbafb93d20323b04219a7c6ab947 100644 (file)
@@ -23,5 +23,8 @@ fn main() {
         call_it(|| x.gen());
         call_it(|| x.gen_mut()); //~ ERROR cannot borrow data mutably in a captured outer
         //~^ ERROR cannot borrow data mutably in a captured outer
+        //~^^ HELP run `rustc --explain E0387` to see a detailed explanation
+        //~^^^ HELP run `rustc --explain E0387` to see a detailed explanation
+        //~^^^^ HELP consider changing this closure to take self by mutable reference
     });
 }
index 8ea63fdf1762d5b3e891cefbb6e58d6cb4cd2a9c..e880a8b212bbc101255d60147d62756d9ff85320 100644 (file)
@@ -32,7 +32,7 @@ fn main() {
     let f1 = Bar;
 
     f1.foo(1usize);
-    //~^ error: the trait `Foo<usize>` is not implemented for the type `Bar`
+    //~^ error: the trait bound `Bar: Foo<usize>` is not satisfied
     //~| help: the following implementations were found:
     //~| help:   <Bar as Foo<i32>>
     //~| help:   <Bar as Foo<u8>>
index 9460ac19596e1cae8f0977360b9a7e5258e62f63..2c5b18a8113f7ac7d95ccb18648da5facb75165f 100644 (file)
@@ -36,7 +36,7 @@ fn main() {
     let f1 = Bar;
 
     f1.foo(1usize);
-    //~^ error: the trait `Foo<usize>` is not implemented for the type `Bar`
+    //~^ error: the trait bound `Bar: Foo<usize>` is not satisfied
     //~| help: the following implementations were found:
     //~| help:   <Bar as Foo<i8>>
     //~| help:   <Bar as Foo<i16>>
index bcf3a0a9ac62b08bc0d70ebad4ed34216dce5215..cb0baee0a8787d538d5f484dc4bd15f34014cf55 100644 (file)
@@ -17,5 +17,5 @@ fn foo<T: Send>() {}
 
 fn main() {
     foo::<HashMap<Rc<()>, Rc<()>>>();
-    //~^ ERROR: the trait `core::marker::Send` is not implemented for the type `alloc::rc::Rc<()>`
+    //~^ ERROR: `std::rc::Rc<()>: std::marker::Send` is not satisfied
 }
index ef6ce5c995bf3f91803e2c7362498f00138d9750..1028923ec82fd6464ae20c12711b55d012b66703 100644 (file)
@@ -16,5 +16,5 @@ fn main() {
     let x = &10 as
             &Add;
             //~^ ERROR the type parameter `RHS` must be explicitly specified in an object type because its default value `Self` references the type `Self`
-            //~| ERROR the value of the associated type `Output` (from the trait `core::ops::Add`) must be specified
+            //~| ERROR the value of the associated type `Output` (from the trait `std::ops::Add`) must be specified
 }
index 8b258180e830fb25822bf79b6ea70c5c6cda922f..3e0ab6d89212a54f328d5915ac49eff0dc8cf5c9 100644 (file)
@@ -14,7 +14,7 @@ fn main() {
     let ptr: *mut () = 0 as *mut _;
     let _: &mut Fn() = unsafe {
         &mut *(ptr as *mut Fn())
-        //~^ ERROR the trait `core::ops::Fn<()>` is not implemented
-        //~| ERROR the trait `core::ops::FnOnce<()>` is not implemented
+        //~^ ERROR `(): std::ops::Fn<()>` is not satisfied
+        //~| ERROR `(): std::ops::FnOnce<()>` is not satisfied
     };
 }
index a05bbe4960e7c3fc691e7ff975f07fad121b8ad5..20ec2d64ae6a8ee0b78fa6813bfae74a131eac74 100644 (file)
@@ -14,7 +14,7 @@ use std::ops::{Add, Sub};
 
 type Test = Add +
             //~^ ERROR the type parameter `RHS` must be explicitly specified in an object type because its default value `Self` references the type `Self`
-            //~^^ ERROR the value of the associated type `Output` (from the trait `core::ops::Add`) must be specified [E0191]
+            //~^^ ERROR the value of the associated type `Output` (from the trait `std::ops::Add`) must be specified [E0191]
             Sub;
             //~^ ERROR only the builtin traits can be used as closure or object bounds
 
index e8c17ca0b362b8b584cc3b6072df4e430b82f89e..0c8c2311dcaa583eb662b00dda4ce4bdadea8f8a 100644 (file)
@@ -17,7 +17,7 @@ struct A (B);
 
 impl A {
     pub fn matches<F: Fn()>(&self, f: &F) {
-        //~^ ERROR reached the recursion limit during monomorphization
+        //~^ ERROR reached the recursion limit while instantiating `A::matches::<[closure
         let &A(ref term) = self;
         term.matches(f);
     }
index aa7fa82fa29ba843654586c2f5ad14681770ddfd..402b9a04496e9a4aeaa73652ca9bfd1cbfb905f3 100644 (file)
@@ -22,6 +22,6 @@ impl<RHS: Scalar> Add <RHS> for Bob {
 
 fn main() {
   let b = Bob + 3.5;
-  b + 3 //~ ERROR: is not implemented
+  b + 3 //~ ERROR E0277
   //~^ ERROR: mismatched types
 }
diff --git a/src/test/compile-fail/issue-22684.rs b/src/test/compile-fail/issue-22684.rs
new file mode 100644 (file)
index 0000000..b7ffbef
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod foo {
+    pub struct Foo;
+    impl Foo {
+        fn bar(&self) {}
+    }
+
+    pub trait Baz {
+        fn bar(&self) -> bool {}
+    }
+    impl Baz for Foo {}
+}
+
+fn main() {
+    use foo::Baz;
+
+    // Check that `bar` resolves to the trait method, not the inherent impl method.
+    let _: () = foo::Foo.bar(); //~ ERROR mismatched types
+}
index 92610c1c6d33cb5e254956578f31e9180db96fcd..df2a70160f866e58bafe4a9c4b06518e7493f747 100644 (file)
@@ -19,5 +19,5 @@ fn main()
     println!("{:?}",(vfnfer[0] as Fn)(3));
     //~^ ERROR the precise format of `Fn`-family traits'
     //~| ERROR wrong number of type arguments: expected 1, found 0
-    //~| ERROR the value of the associated type `Output` (from the trait `core::ops::FnOnce`)
+    //~| ERROR the value of the associated type `Output` (from the trait `std::ops::FnOnce`)
 }
index 3c8a64eaf7de4411255e54a3859bb7bdb2368a39..28eebea749cce5a14ad206b62c025089025edb62 100644 (file)
@@ -14,17 +14,20 @@ fn closure_to_loc() {
     //~^ ERROR mismatched types
     //~| NOTE no two closures, even if identical, have the same type
     //~| HELP consider boxing your closure and/or using it as a trait object
+    //~| HELP run `rustc --explain E0308` to see a detailed explanation
 }
 
 fn closure_from_match() {
     let x = match 1usize {
         1 => |c| c + 1,
         2 => |c| c - 1,
+        //~^ NOTE match arm with an incompatible type
         _ => |c| c - 1
     };
-    //~^^^^^ ERROR match arms have incompatible types
+    //~^^^^^^ ERROR match arms have incompatible types
     //~| NOTE no two closures, even if identical, have the same type
     //~| HELP consider boxing your closure and/or using it as a trait object
+    //~| HELP run `rustc --explain E0308` to see a detailed explanation
 }
 
 fn main() { }
index 11376cec14ee32d2442dcd952399d0d141cf0d13..94fb30082892e2f9642b2e436ca84a523f4a1c1f 100644 (file)
@@ -8,16 +8,16 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use std::ops::Add; //~ ERROR import `Add` conflicts with type in this module
-use std::ops::Sub; //~ ERROR import `Sub` conflicts with type in this module
-use std::ops::Mul; //~ ERROR import `Mul` conflicts with type in this module
-use std::ops::Div; //~ ERROR import `Div` conflicts with existing submodule
-use std::ops::Rem; //~ ERROR import `Rem` conflicts with trait in this module
+use std::ops::Add; //~ NOTE previous import
+use std::ops::Sub; //~ NOTE previous import
+use std::ops::Mul; //~ NOTE previous import
+use std::ops::Div; //~ NOTE previous import
+use std::ops::Rem; //~ NOTE previous import
 
-type Add = bool;
-struct Sub { x: f32 }
-enum Mul { A, B }
-mod Div { }
-trait Rem { }
+type Add = bool; //~ ERROR a trait named `Add` has already been imported in this module
+struct Sub { x: f32 } //~ ERROR a trait named `Sub` has already been imported in this module
+enum Mul { A, B } //~ ERROR a trait named `Mul` has already been imported in this module
+mod Div { } //~ ERROR a trait named `Div` has already been imported in this module
+trait Rem {  } //~ ERROR a trait named `Rem` has already been imported in this module
 
 fn main() {}
index 1fca8eb6ac4f3272f49c4c7ef3ffa4c0cc69a206..cafe6d1bb58298b2ac49257a7d3b88137badc0dc 100644 (file)
@@ -11,9 +11,9 @@
 fn main() {
     static foo: Fn() -> u32 = || -> u32 {
         //~^ ERROR: mismatched types:
-        //~| expected `core::ops::Fn() -> u32 + 'static`,
+        //~| expected `std::ops::Fn() -> u32 + 'static`,
         //~| found closure
-        //~| (expected trait core::ops::Fn,
+        //~| (expected trait std::ops::Fn,
         //~| found closure)
         0
     };
diff --git a/src/test/compile-fail/issue-24883.rs b/src/test/compile-fail/issue-24883.rs
new file mode 100644 (file)
index 0000000..097f2a5
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+
+mod a {
+    pub mod b { pub struct Foo; }
+
+    pub mod c {
+        use super::b;
+        pub struct Bar(pub b::Foo);
+    }
+
+    pub use self::c::*;
+}
+
+#[rustc_error]
+fn main() {  //~ ERROR compilation successful
+    let _ = a::c::Bar(a::b::Foo);
+    let _ = a::Bar(a::b::Foo);
+}
index 40f3b72849612daef711869ec38ed0661fcb07ba..1c255b4e6314a0b51d9ed5d5cbb0584eb544a8f0 100644 (file)
@@ -17,5 +17,5 @@ fn do_fold<B, F: InOut<B, Out=B>>(init: B, f: F) {}
 fn bot<T>() -> T { loop {} }
 
 fn main() {
-    do_fold(bot(), ()); //~ ERROR is not implemented for the type `()`
+    do_fold(bot(), ()); //~ ERROR `(): InOut<_>` is not satisfied
 }
index 4aacb6840e9d51f22a578ba7c8cc950c08aed1e5..51d7baaf3e915ad7d25b0f4f23c223701c9d1ecd 100644 (file)
@@ -21,4 +21,5 @@ fn main() {
 
     foo!(1i32.foo());
     //~^ ERROR no method named `foo` found for type `i32` in the current scope
+    //~^^ NOTE in this expansion of foo!
 }
index 297d3aacfd51e987710ee7a08e14a1d234452b6b..b2775db5e75a7cbfc947f263d48714e9719cf679 100644 (file)
@@ -35,6 +35,4 @@ macro_rules! check_ptr_exist {
 fn main() {
     let item = stuff::Item::new();
     println!("{}", check_ptr_exist!(item, name));
-    //~^ NOTE in this expansion of check_ptr_exist!
-    //~^^ NOTE in this expansion of check_ptr_exist!
 }
index 3ada57c9993054286725fc8475e254330bd064d8..ec77e6ebd7cf38af1b9861b2ca53cc074671d6c9 100644 (file)
@@ -32,6 +32,6 @@ mod foo {
 mod bar {
     pub mod baz {}
     pub type Quux = i32;
-    struct blah { x: i8 }
+    pub struct blah { x: i8 }
     pub const WOMP: i8 = -5;
 }
index fd3e3186bc5c9b020c0b2b07df3745f6efb6bb42..44b3ada97fea8c0de34029616bcd4a7101bfa23e 100644 (file)
@@ -27,6 +27,7 @@ impl HasInfo {
     fn get_other(&mut self) -> usize {
         self.get_size(width!(self))
         //~^ NOTE in this expansion of width!
+        //~| NOTE borrow of `*self` occurs here
     }
 }
 
index 24cc0099b89a522a1b7ae2441dabc6663ba3ce1d..16d7ea468466d703e07e9fcafae4627fe5d91a70 100644 (file)
@@ -21,7 +21,7 @@ struct E {
 
 impl A for E {
     fn b<F: Sync, G>(&self, _x: F) -> F { panic!() }
-    //~^ ERROR `F : core::marker::Sync` appears on the impl method
+    //~^ ERROR `F: std::marker::Sync` appears on the impl method
 }
 
 fn main() {}
index 010803bf25b8a4efd352c6513e9a8d363783b652..9cbb64c2311bc50d94dd8fb2615a70cc6083bf26 100644 (file)
 fn parse_type(iter: Box<Iterator<Item=&str>+'static>) -> &str { iter.next() }
 //~^ ERROR missing lifetime specifier [E0106]
 //~^^ HELP 2 elided lifetimes
+//~^^^ HELP run `rustc --explain E0106` to see a detailed explanation
 
 fn parse_type_2(iter: fn(&u8)->&u8) -> &str { iter() }
 //~^ ERROR missing lifetime specifier [E0106]
 //~^^ HELP lifetime cannot be derived
+//~^^^ HELP run `rustc --explain E0106` to see a detailed explanation
+//~^^^^ HELP consider giving it an explicit bounded or 'static lifetime
 
 fn parse_type_3() -> &str { unimplemented!() }
 //~^ ERROR missing lifetime specifier [E0106]
 //~^^ HELP no value for it to be borrowed from
+//~^^^ HELP run `rustc --explain E0106` to see a detailed explanation
+//~^^^^ HELP consider giving it a 'static lifetime
 
 fn main() {}
diff --git a/src/test/compile-fail/issue-26930.rs b/src/test/compile-fail/issue-26930.rs
new file mode 100644 (file)
index 0000000..6c98f3e
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+#![allow(unused)]
+
+extern crate core;
+use core as core_export;
+use self::x::*;
+mod x {}
+
+#[rustc_error]
+fn main() {} //~ ERROR compilation successful
diff --git a/src/test/compile-fail/issue-27340.rs b/src/test/compile-fail/issue-27340.rs
new file mode 100644 (file)
index 0000000..6a97ae8
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo;
+#[derive(Copy, Clone)]
+//~^ ERROR the trait `Copy` may not be implemented for this type; field `0` does not implement
+struct Bar(Foo);
+
+fn main() {}
index f565d24e1fd082809170801f889a335d0cee5a45..5dded2b1e16973e9cf259805273a349894313de7 100644 (file)
 
 fn main() {
     let _ = Iterator::next(&mut ());
-    //~^ ERROR the trait `core::iter::Iterator` is not implemented
+    //~^ ERROR `(): std::iter::Iterator` is not satisfied
 
     for _ in false {}
-    //~^ ERROR the trait `core::iter::Iterator` is not implemented
+    //~^ ERROR `bool: std::iter::Iterator` is not satisfied
 
     let _ = Iterator::next(&mut ());
-    //~^ ERROR the trait `core::iter::Iterator` is not implemented
+    //~^ ERROR `(): std::iter::Iterator` is not satisfied
 
     other()
 }
@@ -25,11 +25,11 @@ pub fn other() {
     // check errors are still reported globally
 
     let _ = Iterator::next(&mut ());
-    //~^ ERROR the trait `core::iter::Iterator` is not implemented
+    //~^ ERROR `(): std::iter::Iterator` is not satisfied
 
     let _ = Iterator::next(&mut ());
-    //~^ ERROR the trait `core::iter::Iterator` is not implemented
+    //~^ ERROR `(): std::iter::Iterator` is not satisfied
 
     for _ in false {}
-    //~^ ERROR the trait `core::iter::Iterator` is not implemented
+    //~^ ERROR `bool: std::iter::Iterator` is not satisfied
 }
index 23827c55a10443c90553ce567c646ffb92266142..ca5bd9c6717cf55f89f3834de6988fbb53a286f1 100644 (file)
 extern {
   fn foo();
 
-  pub //~ ERROR duplicate definition
+  pub //~ ERROR a value named `foo` has already been defined
   fn foo();
 
-  pub //~ ERROR duplicate definition
+  pub //~ ERROR a value named `foo` has already been defined
   static mut foo: u32;
 }
 
index 1dfff144cef98770b8661d782e4c2b231f8213c0..7c051784f61a7dbbf253550c40cb03dffad0dcee 100644 (file)
 struct MyStruct;
 
 impl Drop for MyStruct {
-//~^ ERROR conflicting implementations of trait
+//~^ NOTE conflicting implementation is here
     fn drop(&mut self) { }
 }
 
 impl Drop for MyStruct {
-//~^ NOTE conflicting implementation is here
+//~^ ERROR conflicting implementations of trait
     fn drop(&mut self) { }
 }
 
diff --git a/src/test/compile-fail/issue-29124.rs b/src/test/compile-fail/issue-29124.rs
new file mode 100644 (file)
index 0000000..b3dc043
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct ret;
+struct obj;
+
+impl obj {
+    fn func() -> ret {
+        ret
+    }
+}
+
+fn func() -> ret {
+    ret
+}
+
+fn main() {
+    obj::func.x();
+    //~^ ERROR no method named `x` found for type `fn() -> ret {obj::func}` in the current scope
+    //~^^ NOTE obj::func is a function, perhaps you wish to call it
+    //~^^^ HELP try calling the base function:
+    //~| SUGGESTION obj::func().x();
+    func.x();
+    //~^ ERROR no method named `x` found for type `fn() -> ret {func}` in the current scope
+    //~^^ NOTE func is a function, perhaps you wish to call it
+    //~^^^ HELP try calling the base function:
+    //~| SUGGESTION func().x();
+}
index 64bfa232f3ffd0220c86a45f717919005be18ef6..0ecaa409412a62a92fc6cfb863134b864926648d 100644 (file)
@@ -28,5 +28,5 @@ impl Foo for S5<u32> { fn xxx(&self) {} }
 impl Foo for S5<u64> { fn xxx(&self) {} }
 
 fn main() {
-    let _ = <S5<_>>::xxx; //~ ERROR cannot resolve `S5<_> : Foo`
+    let _ = <S5<_>>::xxx; //~ ERROR cannot resolve `S5<_>: Foo`
 }
index 1821f5717cf7916ad33d7529afd607172a8de514..f7453c45be645d7bd0d3bcbb97ab1eba65f35cef 100644 (file)
@@ -13,7 +13,6 @@ mod a {
 
     impl Default for A {
         pub fn default() -> A {
-            //~^ ERROR E0449
             A;
         }
     }
@@ -22,5 +21,5 @@ mod a {
 
 fn main() {
     a::A::default();
-    //~^ ERROR method `default` is inaccessible
+    //~^ ERROR struct `A` is private
  }
diff --git a/src/test/compile-fail/issue-30079.rs b/src/test/compile-fail/issue-30079.rs
new file mode 100644 (file)
index 0000000..a8db01b
--- /dev/null
@@ -0,0 +1,55 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+#![allow(unused)]
+
+struct SemiPriv;
+
+mod m1 {
+    struct Priv;
+    impl ::SemiPriv {
+        pub fn f(_: Priv) {} //~ WARN private type in public interface
+        //~^ WARNING hard error
+    }
+
+    impl Priv {
+        pub fn f(_: Priv) {} // ok
+    }
+}
+
+mod m2 {
+    struct Priv;
+    impl ::std::ops::Deref for ::SemiPriv {
+        type Target = Priv; //~ WARN private type in public interface
+        //~^ WARNING hard error
+        fn deref(&self) -> &Self::Target { unimplemented!() }
+    }
+
+    impl ::std::ops::Deref for Priv {
+        type Target = Priv; // ok
+        fn deref(&self) -> &Self::Target { unimplemented!() }
+    }
+}
+
+trait SemiPrivTrait {
+    type Assoc;
+}
+
+mod m3 {
+    struct Priv;
+    impl ::SemiPrivTrait for () {
+        type Assoc = Priv; //~ WARN private type in public interface
+        //~^ WARNING hard error
+    }
+}
+
+#[rustc_error]
+fn main() {} //~ ERROR compilation successful
index 5c539cd739cefe51df9d933a85ef28098dcc15e6..2b0a24cfdb35f9e3bbabf3baaa3a5f7a0904f21a 100644 (file)
@@ -17,7 +17,6 @@ fn siphash(k0 : u64) {
     impl siphash {
         pub fn reset(&mut self) {
            self.v0 = k0 ^ 0x736f6d6570736575; //~ ERROR can't capture dynamic environment
-           //~^ ERROR unresolved name `k0`
         }
     }
 }
index 03473bd44cc1bcce136af6bdf35c25260068f068..635006a3b4dad62af9afe2611bd533bf5578580c 100644 (file)
@@ -13,8 +13,6 @@ fn siphash<T>() {
     trait t {
         fn g(&self, x: T) -> T;  //~ ERROR can't use type parameters from outer function; try using
         //~^ ERROR can't use type parameters from outer function; try using
-        //~^^ ERROR type name `T` is undefined or not in scope
-        //~^^^ ERROR type name `T` is undefined or not in scope
     }
 }
 
index ecc8ac34ecf2ba5f45cc5cf2c47805a385eed093..c23e12e713aad797157fa2fc7e897aeef6997ff4 100644 (file)
@@ -29,9 +29,7 @@ fn siphash(k0 : u64, k1 : u64) {
    impl siphash for SipState {
         fn reset(&self) {
             self.v0 = k0 ^ 0x736f6d6570736575; //~ ERROR can't capture dynamic environment
-            //~^ ERROR unresolved name `k0`
             self.v1 = k1 ^ 0x646f72616e646f6d; //~ ERROR can't capture dynamic environment
-            //~^ ERROR unresolved name `k1`
         }
         fn result(&self) -> u64 { return mk_result(self); }
     }
index 7cf772b0728795334680a6b3ff01c68e27447623..f93a333d2aea7b8f37ed1b5f37915bbb641f6d89 100644 (file)
@@ -20,7 +20,6 @@ fn siphash(k0 : u64) {
     impl SipHash for SipState {
         fn reset(&self) {
            self.v0 = k0 ^ 0x736f6d6570736575; //~ ERROR can't capture dynamic environment
-           //~^ ERROR unresolved name `k0`
         }
     }
     panic!();
index 26508a47224253462be8ff723601771a5af4383f..56f0b31da0d80ba3997ba08503a6e767980a1f15 100644 (file)
@@ -18,8 +18,10 @@ fn is_empty<T>(s: Stack<T>) -> bool {
         Nil => true,
 //~^ WARN pattern binding `Nil` is named the same as one of the variants of the type `Stack`
 //~| HELP consider making the path in the pattern qualified: `Stack::Nil`
+//~| HELP run `rustc --explain E0170` to see a detailed explanation
         _ => false
 //~^ ERROR unreachable pattern
+//~| HELP run `rustc --explain E0001` to see a detailed explanation
     }
 }
 
diff --git a/src/test/compile-fail/issue-30560.rs b/src/test/compile-fail/issue-30560.rs
new file mode 100644 (file)
index 0000000..71c7e60
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+type Alias = ();
+use Alias::*; //~ ERROR Not a module
+use std::io::Result::*; //~ ERROR Not a module
+
+trait T {}
+use T::*; //~ ERROR items in traits are not importable
+
+fn main() {}
diff --git a/src/test/compile-fail/issue-30730.rs b/src/test/compile-fail/issue-30730.rs
new file mode 100644 (file)
index 0000000..82804bb
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(warnings)] //~ NOTE: lint level defined here
+use std::thread; //~ ERROR: unused import
+fn main() {}
index 316199b6730bb2684f1d389345cd081778fe4ea6..cc7de01b0631ba1ed1b61db79735c4730ea63a83 100644 (file)
@@ -10,6 +10,6 @@
 
 enum a { b, c }
 
-enum a { d, e } //~ ERROR duplicate definition of type or module `a`
+enum a { d, e } //~ ERROR a type named `a` has already been defined in this module
 
 fn main() {}
index b3f1b2a32eae3434ccc46c862bdad5e0802f83b1..ae667341022f61659eb8c2325054372c08fc94cb 100644 (file)
@@ -10,6 +10,6 @@
 
 pub mod a {}
 
-pub mod a {} //~ ERROR duplicate definition of type or module `a`
+pub mod a {} //~ ERROR a module named `a` has already been defined in this module
 
 fn main() {}
index cdc377a09996f58f9558bbbe9411e959b26ea5fb..34bc21833e59f5a3b434cc3c476c5e60ef7c15b1 100644 (file)
@@ -12,7 +12,7 @@ fn a(x: String) -> String {
     format!("First function with {}", x)
 }
 
-fn a(x: String, y: String) -> String { //~ ERROR duplicate definition of value `a`
+fn a(x: String, y: String) -> String { //~ ERROR a value named `a` has already been defined
     format!("Second function with {} and {}", x, y)
 }
 
diff --git a/src/test/compile-fail/issue-31511.rs b/src/test/compile-fail/issue-31511.rs
new file mode 100644 (file)
index 0000000..dd1af2f
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn cast_thin_to_fat(x: *const ()) {
+    x as *const [u8];
+    //~^ ERROR: cannot cast thin pointer `*const ()` to fat pointer `*const [u8]`
+}
+
+fn main() {}
diff --git a/src/test/compile-fail/issue-31804.rs b/src/test/compile-fail/issue-31804.rs
new file mode 100644 (file)
index 0000000..b6a04be
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that error recovery in the parser to an EOF does not give an infinite
+// spew of errors.
+
+fn main() {
+    let
+} //~ ERROR unexpected token: `}`
diff --git a/src/test/compile-fail/issue-32119.rs b/src/test/compile-fail/issue-32119.rs
new file mode 100644 (file)
index 0000000..4743b77
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+
+pub type T = ();
+mod foo { pub use super::T; }
+mod bar { pub use super::T; }
+
+pub use foo::*;
+pub use bar::*;
+
+mod baz {
+    pub type T = ();
+    mod foo { pub use super::T as S; }
+    mod bar { pub use super::foo::S as T; }
+    pub use self::bar::*;
+}
+
+#[rustc_error]
+fn main() {} //~ ERROR compilation successful
diff --git a/src/test/compile-fail/issue-32128.rs b/src/test/compile-fail/issue-32128.rs
new file mode 100644 (file)
index 0000000..fe7e66a
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Example {
+    example: Box<Fn(i32) -> i32>
+}
+
+fn main() {
+    let demo = Example {
+        example: Box::new(|x| {
+            x + 1
+        })
+    };
+
+    demo.example(1);    //~ ERROR no method named `example`
+                        //~^ NOTE use `(demo.example)(...)`
+    // (demo.example)(1);
+}
index 4f955df82051f535df746e4147542fdf0c3cbecb..d3b932fbc53efa5710b0a0843e25809f91148f4a 100644 (file)
@@ -11,7 +11,6 @@
 fn foo<T>() {
     struct foo {
         x: T, //~ ERROR can't use type parameters from outer function;
-        //~^ ERROR type name `T` is undefined or not in scope
     }
 
     impl<T> Drop for foo<T> {
diff --git a/src/test/compile-fail/issue-32201.rs b/src/test/compile-fail/issue-32201.rs
new file mode 100644 (file)
index 0000000..bcc53df
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern {
+    fn foo(a: i32, ...);
+}
+
+fn bar(_: *const u8) {}
+
+fn main() {
+    unsafe {
+        foo(0, bar);
+        //~^ ERROR can't pass `fn(*const u8) {bar}` to variadic function, cast to `fn(*const u8)`
+    }
+}
diff --git a/src/test/compile-fail/issue-32222.rs b/src/test/compile-fail/issue-32222.rs
new file mode 100644 (file)
index 0000000..b3b34f4
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+#![allow(warnings)]
+
+mod foo {
+    pub fn bar() {}
+}
+
+pub use foo::*;
+use b::bar;
+
+mod foobar {
+    use super::*;
+}
+
+mod a {
+    pub mod bar {}
+}
+
+mod b {
+    pub use a::bar;
+}
+
+#[rustc_error]
+fn main() {} //~ ERROR compilation successful
diff --git a/src/test/compile-fail/issue-32323.rs b/src/test/compile-fail/issue-32323.rs
new file mode 100644 (file)
index 0000000..e3461e5
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait Tr<'a> {
+    type Out;
+}
+
+pub fn f<'a, T: Tr<'a>>() -> <T as Tr<'a>>::Out {}
+//~^ ERROR not all control paths return a value
+
+pub fn main() {}
diff --git a/src/test/compile-fail/issue-32326.rs b/src/test/compile-fail/issue-32326.rs
new file mode 100644 (file)
index 0000000..8af243a
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #32326. We ran out of memory because we
+// attempted to expand this case up to the recursion limit, and 2^N is
+// too big.
+
+enum Expr { //~ ERROR E0072
+    Plus(Expr, Expr),
+    Literal(i64),
+}
+
+fn main() { }
diff --git a/src/test/compile-fail/issue-32377.rs b/src/test/compile-fail/issue-32377.rs
new file mode 100644 (file)
index 0000000..6e81263
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::mem;
+use std::marker::PhantomData;
+
+trait Foo {
+    type Error;
+}
+
+struct Bar<U: Foo> {
+    stream: PhantomData<U::Error>,
+}
+
+fn foo<U: Foo>(x: [usize; 2]) -> Bar<U> {
+    unsafe { mem::transmute(x) }
+    //~^ ERROR transmute called with differently sized types
+}
+
+fn main() {}
diff --git a/src/test/compile-fail/issue-32797.rs b/src/test/compile-fail/issue-32797.rs
new file mode 100644 (file)
index 0000000..af75783
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use bar::*;
+mod bar {
+    pub use super::*;
+}
+
+pub use baz::*; //~ ERROR already been imported
+mod baz {
+    pub use main as f;
+}
+
+pub fn main() {}
index ad5bc4e445c9839b7ebc41064ef684f460087340..6cd2c02c417ea8ed5f8aacfd19367259ed7b72b5 100644 (file)
@@ -13,7 +13,6 @@ fn main() {
 
     static y: isize = foo + 1;
     //~^ ERROR attempt to use a non-constant value in a constant
-    //~| ERROR unresolved name `foo`
 
     println!("{}", y);
 }
index 34cd8cae2de32a64a3255c45b05354428b7c5910..52375ef281ace24513c5f70d46257acd5512d401 100644 (file)
@@ -15,8 +15,7 @@ fn main() {
     enum Stuff {
         Bar = foo
         //~^ ERROR attempt to use a non-constant value in a constant
-        //~| ERROR unresolved name `foo`
-        //~^^^ ERROR constant evaluation error: non-constant path in constant expression
+        //~^^ ERROR constant evaluation error: non-constant path in constant expression
     }
 
     println!("{}", Stuff::Bar);
index a09c8090de06de5716e718d3d33e6291f7edfaf9..16fb2f68133f291f9519dcb591330338e28684d8 100644 (file)
@@ -11,7 +11,6 @@
 fn f(x:isize) {
     static child: isize = x + 1;
     //~^ ERROR attempt to use a non-constant value in a constant
-    //~| ERROR unresolved name `x`
 }
 
 fn main() {}
index 9b7476244f0d26a19e0d91639fea3c306c20406f..9c31dc1e38ef8ff312193b105a73e9346e9d0153 100644 (file)
@@ -17,7 +17,6 @@ impl PTrait for P {
    fn getChildOption(&self) -> Option<Box<P>> {
        static childVal: Box<P> = self.child.get();
        //~^ ERROR attempt to use a non-constant value in a constant
-       //~| ERROR unresolved name `self`
        panic!();
    }
 }
index 4aff95b57decbb0c7115bf6bd8d1b046b35407a6..fc918c278ef5d5d990d603cca93db5cee61672ed 100644 (file)
@@ -12,9 +12,9 @@ fn main() {
     match None {
         Err(_) => ()
         //~^ ERROR mismatched types
-        //~| expected `core::option::Option<_>`
-        //~| found `core::result::Result<_, _>`
-        //~| expected enum `core::option::Option`
-        //~| found enum `core::result::Result`
+        //~| expected `std::option::Option<_>`
+        //~| found `std::result::Result<_, _>`
+        //~| expected enum `std::option::Option`
+        //~| found enum `std::result::Result`
     }
 }
index a3d90a00d038ec14a4d2e218e20b167ddc8b88df..1dbf211b269414ea8c8531c5b8b91787150862cf 100644 (file)
@@ -11,7 +11,7 @@
 // aux-build:issue_3907.rs
 extern crate issue_3907;
 
-type Foo = issue_3907::Foo;
+type Foo = issue_3907::Foo; //~ NOTE: type defined here
 
 struct S {
     name: isize
index 328de9f8187e56d2f1b38658fca5db8fd3d806e6..62db68dcbb2ee63f7ceb8eaf446c9a2a0acce957 100644 (file)
@@ -17,7 +17,7 @@ impl Foo {
         Foo { baz: 0 }.bar();
     }
 
-    fn bar() { //~ ERROR duplicate associated function
+    fn bar() { //~ ERROR duplicate definitions
     }
 }
 
index 9e324cdd61eb0e82f40308131b51c72d1d1ce360..83ff95cc2ea48f59c0086e69e970eb4a3aa1752f 100644 (file)
@@ -11,6 +11,6 @@
 trait I {}
 type K = I+'static;
 
-fn foo(_x: K) {} //~ ERROR: the trait `core::marker::Sized` is not implemented
+fn foo(_x: K) {} //~ ERROR: `I + 'static: std::marker::Sized` is not satisfied
 
 fn main() {}
index cdf9d3bd36ecefcf1f32de2f0a317ecc7cdf2d45..dabeb503841c548208b3b96260a778d6c21267ea 100644 (file)
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 trait I {}
-type K = I;
+type K = I; //~ NOTE: type defined here
 impl K for isize {} //~ ERROR: `K` is not a trait
 //~^ NOTE: `type` aliases cannot be used for traits
 fn main() {}
index 9ff957b6e6deac4904d0d5066390bd4dd53e2803..019a7bdc734d4be95e48aac78050987589613d81 100644 (file)
@@ -15,8 +15,8 @@ struct Struct {
 }
 
 fn new_struct(r: A+'static)
-    -> Struct { //~^  ERROR the trait `core::marker::Sized` is not implemented
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    -> Struct { //~^  ERROR `A + 'static: std::marker::Sized` is not satisfied
+    //~^ ERROR `A + 'static: std::marker::Sized` is not satisfied
     Struct { r: r }
 }
 
index 20d239c6ae046540009c5215f44c7d415be7c04a..463fdaa106926a898579d7335aba455180227b17 100644 (file)
@@ -11,7 +11,6 @@
 fn f<Z>() -> bool {
     enum E { V(Z) }
     //~^ ERROR can't use type parameters from outer function
-    //~^^ ERROR type name `Z` is undefined or not in scope
     true
 }
 
index 40be2f04cb43ca5daace5380ff219a76ee9936b2..e9cfafc98df0ef47fffdfe2768cd6353e7071555 100644 (file)
@@ -9,8 +9,7 @@
 // except according to those terms.
 
 fn f<T>() -> bool {
-    struct S(T); //~ ERROR type name `T` is undefined or not in scope
-    //~^ ERROR can't use type parameters from outer function; try using
+    struct S(T); //~ ERROR can't use type parameters from outer function; try using
 
     true
 }
index 2c1809d4d4886f45c25a7f5ff87a679d10eeaa17..1fe10ba7a27bba10bc7d762c6671b85a4452cb03 100644 (file)
@@ -13,7 +13,6 @@ impl A {
     fn m(&self) {
         fn x() {
             self.m() //~ ERROR can't capture dynamic environment in a fn item
-            //~^ ERROR unresolved name `self`
         }
     }
 }
index 66ed817ffa82637ac093bf2c6a01ff67402c62db..6cb825a9be736f7f9c3dbd049193e6f4c92bcaa1 100644 (file)
@@ -16,4 +16,5 @@ struct Monster {
 fn main() {
     let _m = Monster(); //~ ERROR `Monster` is the name of a struct or
     //~^ HELP did you mean to write: `Monster { /* fields */ }`?
+    //~| HELP run `rustc --explain E0423` to see a detailed explanation
 }
index ffab194149e12c79ba1525339844aaf40e2739b4..f6b7e13c4f5e9328593e8bab6c99e8b233d8ffac 100644 (file)
@@ -24,9 +24,13 @@ fn main() { //~ ERROR compilation successful
         _ => {},
     };
     //~^^^ WARNING unmatchable NaN in pattern, use the is_nan method in a guard instead
+    //~| WARNING floating point constants cannot be used
+    //~| WARNING this was previously accepted
     match [x, 1.0] {
         [NAN, _] => {},
         _ => {},
     };
     //~^^^ WARNING unmatchable NaN in pattern, use the is_nan method in a guard instead
+    //~| WARNING floating point constants cannot be used
+    //~| WARNING this was previously accepted
 }
index f5c879a07ee56b602d8648c7fd5974849bed3b27..c8021a229957f77227a9e95ed2146469d204d97e 100644 (file)
@@ -12,17 +12,17 @@ struct T;
 
 mod t1 {
     type Foo = ::T;
-    mod Foo {} //~ ERROR: duplicate definition of type or module `Foo`
+    mod Foo {} //~ ERROR: `Foo` has already been defined
 }
 
 mod t2 {
     type Foo = ::T;
-    struct Foo; //~ ERROR: duplicate definition of type or module `Foo`
+    struct Foo; //~ ERROR: `Foo` has already been defined
 }
 
 mod t3 {
     type Foo = ::T;
-    enum Foo {} //~ ERROR: duplicate definition of type or module `Foo`
+    enum Foo {} //~ ERROR: `Foo` has already been defined
 }
 
 mod t4 {
@@ -32,7 +32,7 @@ mod t4 {
 
 mod t5 {
     type Bar<T> = T;
-    mod Bar {} //~ ERROR: duplicate definition of type or module `Bar`
+    mod Bar {} //~ ERROR: `Bar` has already been defined
 }
 
 mod t6 {
index 90ecfb6015dcd4a3a672caee67d558c80fabed83..95bbd4eccf4ff08cebdb83bcad077d0847a4ebbf 100644 (file)
@@ -34,5 +34,5 @@ struct A {
 
 fn main() {
     let a = A {v: box B{v: None} as Box<Foo+Send>};
-    //~^ ERROR the trait `core::marker::Send` is not implemented
+    //~^ ERROR `std::rc::Rc<std::cell::RefCell<A>>: std::marker::Send` is not satisfied
 }
index 6f9fb2e61f2a81f255e456731c3da3b898a952b1..06573bea13c1474abad123865dc43a75f3194f03 100644 (file)
@@ -9,6 +9,6 @@
 // except according to those terms.
 
 static X: isize = 0;
-struct X; //~ ERROR error: duplicate definition of value `X`
+struct X; //~ ERROR `X` has already been defined
 
 fn main() {}
index eefb34fbe4c91121419867aa10086f8defdc7083..4a278bbdeb04a4fcc5320ce6adab4ef9e8117684 100644 (file)
@@ -16,9 +16,9 @@ fn foo(x: Whatever) {
         Some(field) =>
 //~^ ERROR mismatched types
 //~| expected `Whatever`
-//~| found `core::option::Option<_>`
+//~| found `std::option::Option<_>`
 //~| expected enum `Whatever`
-//~| found enum `core::option::Option`
+//~| found enum `std::option::Option`
             field.access(), //~ ERROR the type of this value must be known in this context
     }
 }
index 87b7b73d57dbd41b18b560199588bb21b0339b64..16b407baad178eece4faea4a35123d70b206a406 100644 (file)
@@ -16,6 +16,6 @@ use std::cell::RefCell;
 // Regression test for issue 7364
 static boxed: Box<RefCell<isize>> = box RefCell::new(0);
 //~^ ERROR allocations are not allowed in statics
-//~| ERROR the trait `core::marker::Sync` is not implemented for the type
+//~| ERROR `std::cell::RefCell<isize>: std::marker::Sync` is not satisfied
 
 fn main() { }
index 2d1cea1d44b746698ff438f6db9d2edc3d69161d..d13da1d9fd94817727296b389166f40fe7ceb97f 100644 (file)
@@ -24,11 +24,9 @@ impl CrateId {
 }
 
 pub fn remove_package_from_database() {
-    let mut lines_to_use: Vec<&CrateId> = Vec::new();
+    let mut lines_to_use: Vec<&CrateId> = Vec::new(); //~ ERROR E0495
     let push_id = |installed_id: &CrateId| {
         lines_to_use.push(installed_id);
-        //~^ ERROR cannot infer an appropriate lifetime for automatic coercion due to
-        // conflicting requirements
     };
     list_database(push_id);
 
index 6c7196527efdbd73db01cf703c57bc6d9a41282d..3cb30981b673c0ff0aa8278f4b5915d722dd743b 100644 (file)
@@ -9,6 +9,7 @@
 // except according to those terms.
 
 // Test the mechanism for warning about possible missing `self` declarations.
+// ignore-tidy-linelength
 
 trait CtxtFn {
     fn f8(self, usize) -> usize;
@@ -72,15 +73,15 @@ impl ManyImplTrait for Myisize {}
 fn no_param_bound(u: usize, m: Myisize) -> usize {
     u.f8(42) + u.f9(342) + m.fff(42)
             //~^ ERROR no method named `f9` found for type `usize` in the current scope
-            //~^^ NOTE found defined static methods, maybe a `self` is missing?
+            //~^^ NOTE found the following associated functions; to be used as methods, functions must have a `self` parameter
             //~^^^ ERROR no method named `fff` found for type `Myisize` in the current scope
-            //~^^^^ NOTE found defined static methods, maybe a `self` is missing?
+            //~^^^^ NOTE found the following associated functions; to be used as methods, functions must have a `self` parameter
 }
 
 fn param_bound<T: ManyImplTrait>(t: T) -> bool {
     t.is_str()
     //~^ ERROR no method named `is_str` found for type `T` in the current scope
-    //~^^ NOTE found defined static methods, maybe a `self` is missing?
+    //~^^ NOTE found the following associated functions; to be used as methods, functions must have a `self` parameter
 }
 
 fn main() {
index 400806c3a5fa6cbfe7110e1b887212ac5be62181..95513860b084f340a4e9e019718ddb2b30491fd1 100644 (file)
@@ -26,15 +26,15 @@ fn main() {
     match &Some(42) {
         Some(x) => (),
         //~^ ERROR mismatched types
-        //~| expected `&core::option::Option<_>`
-        //~| found `core::option::Option<_>`
+        //~| expected `&std::option::Option<_>`
+        //~| found `std::option::Option<_>`
         //~| expected &-ptr
-        //~| found enum `core::option::Option`
+        //~| found enum `std::option::Option`
         None => ()
         //~^ ERROR mismatched types
-        //~| expected `&core::option::Option<_>`
-        //~| found `core::option::Option<_>`
+        //~| expected `&std::option::Option<_>`
+        //~| found `std::option::Option<_>`
         //~| expected &-ptr
-        //~| found enum `core::option::Option`
+        //~| found enum `std::option::Option`
     }
 }
index ea7224939ce56364f20a2020ed987ef1a99b6056..457918b54d4aed619e5ef128085a0ccb03fa1ad1 100644 (file)
@@ -18,7 +18,7 @@ trait Bar {
 
 impl Bar for Foo {
     fn bar(&self) -> isize {1}
-    fn bar(&self) -> isize {2} //~ ERROR duplicate method
+    fn bar(&self) -> isize {2} //~ ERROR duplicate definitions
 }
 
 fn main() {
index 95921556c7db1de120f85c73887fc83e8fe23c87..fe51d0b69987a5142775be84bf7f3b54364bcac4 100644 (file)
@@ -35,23 +35,23 @@ fn main() {
     assert!(thread::spawn(move|| { 1i64 / 0; }).join().is_err());
     //~^ ERROR attempted to divide by zero
     assert!(thread::spawn(move|| { isize::MIN % -1; }).join().is_err());
-    //~^ ERROR attempted remainder with overflow
+    //~^ ERROR attempted to calculate the remainder with overflow
     assert!(thread::spawn(move|| { i8::MIN % -1; }).join().is_err());
-    //~^ ERROR attempted remainder with overflow
+    //~^ ERROR attempted to calculate the remainder with overflow
     assert!(thread::spawn(move|| { i16::MIN % -1; }).join().is_err());
-    //~^ ERROR attempted remainder with overflow
+    //~^ ERROR attempted to calculate the remainder with overflow
     assert!(thread::spawn(move|| { i32::MIN % -1; }).join().is_err());
-    //~^ ERROR attempted remainder with overflow
+    //~^ ERROR attempted to calculate the remainder with overflow
     assert!(thread::spawn(move|| { i64::MIN % -1; }).join().is_err());
-    //~^ ERROR attempted remainder with overflow
+    //~^ ERROR attempted to calculate the remainder with overflow
     assert!(thread::spawn(move|| { 1isize % 0; }).join().is_err());
-    //~^ ERROR attempted remainder with a divisor of zero
+    //~^ ERROR attempted to calculate the remainder with a divisor of zero
     assert!(thread::spawn(move|| { 1i8 % 0; }).join().is_err());
-    //~^ ERROR attempted remainder with a divisor of zero
+    //~^ ERROR attempted to calculate the remainder with a divisor of zero
     assert!(thread::spawn(move|| { 1i16 % 0; }).join().is_err());
-    //~^ ERROR attempted remainder with a divisor of zero
+    //~^ ERROR attempted to calculate the remainder with a divisor of zero
     assert!(thread::spawn(move|| { 1i32 % 0; }).join().is_err());
-    //~^ ERROR attempted remainder with a divisor of zero
+    //~^ ERROR attempted to calculate the remainder with a divisor of zero
     assert!(thread::spawn(move|| { 1i64 % 0; }).join().is_err());
-    //~^ ERROR attempted remainder with a divisor of zero
+    //~^ ERROR attempted to calculate the remainder with a divisor of zero
 }
index 5c1592e65473caa6fad5b4e5d5d6494c138b63ee..e469e05a2444a798c85c561a3dd86936a61843f8 100644 (file)
@@ -12,8 +12,8 @@
 
 mod foo {
     use baz::bar;
-    //~^ ERROR import `bar` conflicts with existing submodule
     mod bar {}
+    //~^ ERROR a module named `bar` has already been imported
 }
 mod baz { pub mod bar {} }
 
index 72da6dcaa6c45a22e76a62a97282c958d6273838..18b7948c46c7cace2458bb84302f9be4608fa969 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// error-pattern:reached the recursion limit during monomorphization
+// error-pattern:reached the recursion limit while instantiating `generic::<std::option::Option<
 
 // Verify the compiler fails with an error on infinite function
 // recursions.
index 35be01970cb56d4acfef455f7ff060e05c899026..1c98abce0304e149f092ea7e0a4137c0cac303d5 100644 (file)
 
 enum Foo {
     A = 1i64,
-    //~^ ERROR mismatched types
-    //~| expected `isize`
-    //~| found `i64`
+    //~^ ERROR mismatched types:
+    //~| expected `isize`,
+    //~| found `i64` [E0080]
     B = 2u8
-    //~^ ERROR mismatched types
-    //~| expected `isize`
-    //~| found `u8`
+    //~^ ERROR mismatched types:
+    //~| expected `isize`,
+    //~| found `u8` [E0080]
 }
 
 fn main() {}
index 997c940c9547e9ffd24877c98b8cb35c25099342..08b4e1a45f3368669be85be0838ca9763c63ca8d 100644 (file)
@@ -34,14 +34,14 @@ fn test<'a,T,U:Copy>(_: &'a isize) {
     assert_copy::<&'a [isize]>();
 
     // ...unless they are mutable
-    assert_copy::<&'static mut isize>(); //~ ERROR `core::marker::Copy` is not implemented
-    assert_copy::<&'a mut isize>();  //~ ERROR `core::marker::Copy` is not implemented
+    assert_copy::<&'static mut isize>(); //~ ERROR : std::marker::Copy` is not satisfied
+    assert_copy::<&'a mut isize>();  //~ ERROR : std::marker::Copy` is not satisfied
 
     // boxes are not ok
-    assert_copy::<Box<isize>>();   //~ ERROR `core::marker::Copy` is not implemented
-    assert_copy::<String>();   //~ ERROR `core::marker::Copy` is not implemented
-    assert_copy::<Vec<isize> >(); //~ ERROR `core::marker::Copy` is not implemented
-    assert_copy::<Box<&'a mut isize>>(); //~ ERROR `core::marker::Copy` is not implemented
+    assert_copy::<Box<isize>>();   //~ ERROR : std::marker::Copy` is not satisfied
+    assert_copy::<String>();   //~ ERROR : std::marker::Copy` is not satisfied
+    assert_copy::<Vec<isize> >(); //~ ERROR : std::marker::Copy` is not satisfied
+    assert_copy::<Box<&'a mut isize>>(); //~ ERROR : std::marker::Copy` is not satisfied
 
     // borrowed object types are generally ok
     assert_copy::<&'a Dummy>();
@@ -49,11 +49,11 @@ fn test<'a,T,U:Copy>(_: &'a isize) {
     assert_copy::<&'static (Dummy+Copy)>();
 
     // owned object types are not ok
-    assert_copy::<Box<Dummy>>(); //~ ERROR `core::marker::Copy` is not implemented
-    assert_copy::<Box<Dummy+Copy>>(); //~ ERROR `core::marker::Copy` is not implemented
+    assert_copy::<Box<Dummy>>(); //~ ERROR : std::marker::Copy` is not satisfied
+    assert_copy::<Box<Dummy+Copy>>(); //~ ERROR : std::marker::Copy` is not satisfied
 
     // mutable object types are not ok
-    assert_copy::<&'a mut (Dummy+Copy)>();  //~ ERROR `core::marker::Copy` is not implemented
+    assert_copy::<&'a mut (Dummy+Copy)>();  //~ ERROR : std::marker::Copy` is not satisfied
 
     // unsafe ptrs are ok
     assert_copy::<*const isize>();
@@ -71,10 +71,10 @@ fn test<'a,T,U:Copy>(_: &'a isize) {
     assert_copy::<MyStruct>();
 
     // structs containing non-POD are not ok
-    assert_copy::<MyNoncopyStruct>(); //~ ERROR `core::marker::Copy` is not implemented
+    assert_copy::<MyNoncopyStruct>(); //~ ERROR : std::marker::Copy` is not satisfied
 
     // ref counted types are not ok
-    assert_copy::<Rc<isize>>();   //~ ERROR `core::marker::Copy` is not implemented
+    assert_copy::<Rc<isize>>();   //~ ERROR : std::marker::Copy` is not satisfied
 }
 
 pub fn main() {
index 66297d70ef505cccb7e3f7f9f7c0a1dfa22bad8a..1cf970e150d7033da338ec5b79e80ec5a6056c8a 100644 (file)
@@ -21,5 +21,5 @@ fn take_param<T:Foo>(foo: &T) { }
 fn main() {
     let x: Box<_> = box 3;
     take_param(&x);
-    //~^ ERROR the trait `core::marker::Copy` is not implemented
+    //~^ ERROR `Box<_>: std::marker::Copy` is not satisfied
 }
index aec40c1a73a806f488acac21c3077c4da8d7c803..53ad4d1163bfad59b0da9ec0c786c252617f6407 100644 (file)
@@ -26,13 +26,13 @@ impl<T: Send + Copy + 'static> Gettable<T> for S<T> {}
 fn f<T>(val: T) {
     let t: S<T> = S(marker::PhantomData);
     let a = &t as &Gettable<T>;
-    //~^ ERROR the trait `core::marker::Send` is not implemented
+    //~^ ERROR : std::marker::Send` is not satisfied
 }
 
 fn g<T>(val: T) {
     let t: S<T> = S(marker::PhantomData);
     let a: &Gettable<T> = &t;
-    //~^ ERROR the trait `core::marker::Send` is not implemented
+    //~^ ERROR : std::marker::Send` is not satisfied
 }
 
 fn foo<'a>() {
@@ -44,7 +44,7 @@ fn foo<'a>() {
 fn foo2<'a>() {
     let t: Box<S<String>> = box S(marker::PhantomData);
     let a = t as Box<Gettable<String>>;
-    //~^ ERROR the trait `core::marker::Copy` is not implemented
+    //~^ ERROR : std::marker::Copy` is not satisfied
 }
 
 fn foo3<'a>() {
@@ -52,7 +52,7 @@ fn foo3<'a>() {
 
     let t: Box<S<Foo>> = box S(marker::PhantomData);
     let a: Box<Gettable<Foo>> = t;
-    //~^ ERROR the trait `core::marker::Copy` is not implemented
+    //~^ ERROR : std::marker::Copy` is not satisfied
 }
 
 fn main() { }
index bf2a209c4c48ed01a16223d5f0eaa474fef7a4eb..dd77c2c138f4622df9b7a129959b05fb95e37d46 100644 (file)
@@ -18,5 +18,5 @@ fn bar<F:FnOnce() + Send>(_: F) { }
 fn main() {
     let x = Rc::new(3);
     bar(move|| foo(x));
-    //~^ ERROR `core::marker::Send` is not implemented
+    //~^ ERROR : std::marker::Send` is not satisfied
 }
index 570f7ad7fe3bf214adae00a7ac6644053305dd6c..bd0e5642b9ccddd5813b91f7607260240daefae2 100644 (file)
@@ -20,11 +20,11 @@ trait Message : Send { }
 
 fn object_ref_with_static_bound_not_ok() {
     assert_send::<&'static (Dummy+'static)>();
-    //~^ ERROR the trait `core::marker::Sync` is not implemented
+    //~^ ERROR : std::marker::Sync` is not satisfied
 }
 
 fn box_object_with_no_bound_not_ok<'a>() {
-    assert_send::<Box<Dummy>>(); //~ ERROR the trait `core::marker::Send` is not implemented
+    assert_send::<Box<Dummy>>(); //~ ERROR : std::marker::Send` is not satisfied
 }
 
 fn object_with_send_bound_ok() {
index 48d5215b7085be78f389d72df0139f2efa25130e..da56fccde2d4ab93f252a185beb66a4253e6b45e 100644 (file)
@@ -18,7 +18,7 @@ trait Dummy { }
 // careful with object types, who knows what they close over...
 fn test51<'a>() {
     assert_send::<&'a Dummy>();
-    //~^ ERROR the trait `core::marker::Sync` is not implemented
+    //~^ ERROR : std::marker::Sync` is not satisfied
 }
 fn test52<'a>() {
     assert_send::<&'a (Dummy+Sync)>();
@@ -37,7 +37,7 @@ fn test61() {
 // them not ok
 fn test_71<'a>() {
     assert_send::<Box<Dummy+'a>>();
-    //~^ ERROR the trait `core::marker::Send` is not implemented
+    //~^ ERROR : std::marker::Send` is not satisfied
 }
 
 fn main() { }
index d3d166e2a6916181d24c32984ed212bca92a07e6..e52a6e12efc96bf66b80e956705d3dc5275821d0 100644 (file)
@@ -14,11 +14,11 @@ fn assert_send<T:Send>() { }
 trait Dummy { }
 
 fn test50() {
-    assert_send::<&'static Dummy>(); //~ ERROR the trait `core::marker::Sync` is not implemented
+    assert_send::<&'static Dummy>(); //~ ERROR : std::marker::Sync` is not satisfied
 }
 
 fn test53() {
-    assert_send::<Box<Dummy>>(); //~ ERROR the trait `core::marker::Send` is not implemented
+    assert_send::<Box<Dummy>>(); //~ ERROR : std::marker::Send` is not satisfied
 }
 
 // ...unless they are properly bounded
index 406711902a543555547453d1900b5469d74d2ea9..583381a1c28f5b23fc9753f8e2ef25f9b1dde529 100644 (file)
@@ -19,7 +19,7 @@ fn test32() { assert_send::<Vec<isize> >(); }
 
 // but not if they own a bad thing
 fn test40() {
-    assert_send::<Box<*mut u8>>(); //~ ERROR `core::marker::Send` is not implemented
+    assert_send::<Box<*mut u8>>(); //~ ERROR : std::marker::Send` is not satisfied
 }
 
 fn main() { }
index bce765a986a22fccc6fc206abe1c1e0e04b6c292..ecee2e0a4c63ae4c6ad976f475cb351549e89338 100644 (file)
@@ -14,7 +14,7 @@ fn assert_send<T:Send>() { }
 
 fn test71<'a>() {
     assert_send::<*mut &'a isize>();
-    //~^ ERROR the trait `core::marker::Send` is not implemented for the type
+    //~^ ERROR `*mut &'a isize: core::marker::Send` is not satisfied
 }
 
 fn main() {
diff --git a/src/test/compile-fail/lexical-scopes.rs b/src/test/compile-fail/lexical-scopes.rs
new file mode 100644 (file)
index 0000000..dbcd3f3
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct T { i: i32 }
+fn f<T>() {
+    let t = T { i: 0 }; //~ ERROR `T` does not name a structure
+}
+
+mod Foo {
+    pub fn f() {}
+}
+fn g<Foo>() {
+    Foo::f(); //~ ERROR no associated item named `f`
+}
+
+fn main() {}
index 1fa7284f6b5dcab6e7a2726173b2c2ed7f34c427..be4166e43b504af4b790327367e33cbc053ff949 100644 (file)
 // Lifetime annotation needed because we have no arguments.
 fn f() -> &isize {    //~ ERROR missing lifetime specifier
 //~^ HELP there is no value for it to be borrowed from
+//~| HELP run `rustc --explain E0106` to see a detailed explanation
+//~| HELP consider giving it a 'static lifetime
     panic!()
 }
 
 // Lifetime annotation needed because we have two by-reference parameters.
 fn g(_x: &isize, _y: &isize) -> &isize {    //~ ERROR missing lifetime specifier
 //~^ HELP the signature does not say whether it is borrowed from `_x` or `_y`
+//~| HELP run `rustc --explain E0106` to see a detailed explanation
     panic!()
 }
 
@@ -28,11 +31,14 @@ struct Foo<'a> {
 // and one on the reference.
 fn h(_x: &Foo) -> &isize { //~ ERROR missing lifetime specifier
 //~^ HELP the signature does not say which one of `_x`'s 2 elided lifetimes it is borrowed from
+//~| HELP run `rustc --explain E0106` to see a detailed explanation
     panic!()
 }
 
 fn i(_x: isize) -> &isize { //~ ERROR missing lifetime specifier
 //~^ HELP this function's return type contains a borrowed value
+//~| HELP run `rustc --explain E0106` to see a detailed explanation
+//~| HELP consider giving it an explicit bounded or 'static lifetime
     panic!()
 }
 
index 66d8927ee51b7096b6cfa3826d1b6202dfe6b723..e0ea1ed74340783001fd75971980b68550f2e7bd 100644 (file)
@@ -14,19 +14,19 @@ use std::marker::PhantomData;
 
 struct Bar<'x, 'y, 'z> { bar: &'y i32, baz: i32, marker: PhantomData<(&'x(),&'y(),&'z())> }
 fn bar1<'a>(x: &Bar) -> (&'a i32, &'a i32, &'a i32) {
-//~^ HELP: consider using an explicit lifetime parameter as shown: fn bar1<'b, 'c, 'a>(x: &'a Bar<'b, 'a, 'c>) -> (&'a i32, &'a i32, &'a i32)
+    //~^ HELP: consider using an explicit lifetime parameter as shown: fn bar1<'a>(x: &'a Bar) -> (&'a i32, &'a i32, &'a i32)
     (x.bar, &x.baz, &x.baz)
-    //~^ ERROR: cannot infer
-    //~^^ ERROR: cannot infer
-    //~^^^ ERROR: cannot infer
+    //~^ ERROR E0312
+    //~| ERROR cannot infer
+    //~| ERROR cannot infer
 }
 
 fn bar2<'a, 'b, 'c>(x: &Bar<'a, 'b, 'c>) -> (&'a i32, &'a i32, &'a i32) {
-//~^ HELP: consider using an explicit lifetime parameter as shown: fn bar2<'a, 'c>(x: &'a Bar<'a, 'a, 'c>) -> (&'a i32, &'a i32, &'a i32)
+    //~^ HELP: consider using an explicit lifetime parameter as shown: fn bar2<'a, 'c>(x: &'a Bar<'a, 'a, 'c>) -> (&'a i32, &'a i32, &'a i32)
     (x.bar, &x.baz, &x.baz)
-    //~^ ERROR: cannot infer
-    //~^^ ERROR: cannot infer
-    //~^^^ ERROR: cannot infer
+    //~^ ERROR E0312
+    //~| ERROR cannot infer
+    //~| ERROR cannot infer
 }
 
 fn main() { }
index e32ed1c42a09088e421b6c12ea59feec6a27ad8a..73d89beb2202f84369e048588a346945b6cd2179 100644 (file)
@@ -39,8 +39,8 @@ struct Cat<'x, T> { cat: &'x isize, t: T }
 struct Dog<'y> { dog: &'y isize }
 
 fn cat2<'x, 'y>(x: Cat<'x, Dog<'y>>) -> &'x isize {
-//~^ HELP: consider using an explicit lifetime parameter as shown: fn cat2<'x>(x: Cat<'x, Dog<'x>>) -> &'x isize
-    x.t.dog //~ ERROR: cannot infer
+    //~^ HELP consider using an explicit lifetime parameter as shown: fn cat2<'x>(x: Cat<'x, Dog<'x>>) -> &'x isize
+    x.t.dog //~ ERROR E0312
 }
 
 struct Baz<'x> {
@@ -49,11 +49,9 @@ struct Baz<'x> {
 
 impl<'a> Baz<'a> {
     fn baz2<'b>(&self, x: &isize) -> (&'b isize, &'b isize) {
-         //~^ HELP: parameter as shown: fn baz2<'b>(&self, x: &'b isize) -> (&'a isize, &'a isize)
-        // The lifetime that gets assigned to `x` seems somewhat random.
-        // I have disabled this test for the time being. --pcwalton
-        (self.bar, x) //~ ERROR: cannot infer
-        //~^ ERROR: cannot infer
+        //~^ HELP consider using an explicit lifetime parameter as shown: fn baz2<'b>(&self, x: &'b isize) -> (&'a isize, &'a isize)
+        (self.bar, x) //~ ERROR E0312
+        //~^ ERROR E0312
     }
 }
 
index edbeebe882e51930b1d61808bbeb9a92896cf399..2a127d937eaaafb39d7e2d4fdf7eca760bf36bb1 100644 (file)
@@ -12,7 +12,7 @@
 
 extern {
     #[linkage = "extern_weak"] static foo: i32;
-    //~^ ERROR: must have type `*T`
+    //~^ ERROR: must have type `*const T` or `*mut T`
 }
 
 fn main() {
index 59ab5be1572a3d43d7bbe74e1aeb3cd367f430f8..393e46ab5394ca7d1a61bd239bd112722407b2a5 100644 (file)
@@ -30,6 +30,7 @@ mod test {
     mod warn {
         #![warn(bad_style)]
         //~^ NOTE lint level defined here
+        //~| NOTE lint level defined here
 
         fn CamelCase() {} //~ WARN function `CamelCase` should have a snake case name
 
index 2df57b08f283c3f3dd9350ba7b8cacb7682f1fd0..91e5065517dcc9c4acb776d019a12da69ba20768 100644 (file)
@@ -15,6 +15,8 @@
 
 #![feature(unsafe_no_drop_flag)]
 #![deny(drop_with_repr_extern)]
+//~^ NOTE lint level defined here
+//~| NOTE lint level defined here
 
 #[repr(C)] struct As { x: Box<i8> }
 #[repr(C)] enum Ae { Ae(Box<i8>), _None }
diff --git a/src/test/compile-fail/lint-removed-allow.rs b/src/test/compile-fail/lint-removed-allow.rs
new file mode 100644 (file)
index 0000000..159a3d7
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// No warnings about removed lint when
+// allow(renamed_and_removed_lints)
+
+#[deny(raw_pointer_derive)]
+#[allow(renamed_and_removed_lints)]
+#[deny(unused_variables)]
+fn main() { let unused = (); } //~ ERR unused
index e196e128b17128607d6ab5e3c2fed9b8bbd940ac..9069356604131a789305a1a10202d9ce5afdfc24 100644 (file)
@@ -8,9 +8,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// The raw_pointer_derived lint only warns about its own removal
+// The raw_pointer_derived lint was removed, but is now reported by
+// the renamed_and_removed_lints lint, which means it's a warning by
+// default, and allowed in cargo dependency builds.
 // cc #30346
 
 #[deny(raw_pointer_derive)] //~ WARN raw_pointer_derive has been removed
-#[deny(warnings)]
+#[deny(unused_variables)]
 fn main() { let unused = (); } //~ ERR unused
diff --git a/src/test/compile-fail/lint-renamed-allow.rs b/src/test/compile-fail/lint-renamed-allow.rs
new file mode 100644 (file)
index 0000000..a2426d8
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// No warnings about renamed lint when
+// allow(renamed_and_removed_lints)
+
+#[deny(unknown_features)]
+#[allow(renamed_and_removed_lints)]
+#[deny(unused)]
+fn main() { let unused = (); } //~ ERR unused
index 0b414ad73db6f9109bd33fc5106102bae088e7e5..29929c120c30f75784f2e4bd67adce7ad37be3e1 100644 (file)
@@ -24,11 +24,6 @@ fn bar() -> i8 {
     return 123;
 }
 
-fn baz() -> bool {
-    128 > bar() //~ ERROR comparison is useless due to type limits
-                //~^ WARNING literal out of range for i8
-}
-
 fn bleh() {
     let u = 42u8;
     let _ = u > 255; //~ ERROR comparison is useless due to type limits
@@ -40,11 +35,3 @@ fn bleh() {
     let _ = u >= 0; //~ ERROR comparison is useless due to type limits
     let _ = 0 <= u; //~ ERROR comparison is useless due to type limits
 }
-
-fn qux() {
-    let mut i = 1i8;
-    while 200 != i { //~ ERROR comparison is useless due to type limits
-                     //~^ WARNING literal out of range for i8
-        i += 1;
-    }
-}
diff --git a/src/test/compile-fail/lint-type-limits2.rs b/src/test/compile-fail/lint-type-limits2.rs
new file mode 100644 (file)
index 0000000..8fc18d1
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+// compile-flags: -D unused-comparisons
+fn main() { }
+
+
+fn bar() -> i8 {
+    return 123;
+}
+
+fn baz() -> bool {
+    128 > bar() //~ ERROR comparison is useless due to type limits
+                //~| WARN literal out of range for i8
+}
diff --git a/src/test/compile-fail/lint-type-limits3.rs b/src/test/compile-fail/lint-type-limits3.rs
new file mode 100644 (file)
index 0000000..b09dc0a
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+// compile-flags: -D unused-comparisons
+fn main() { }
+
+fn qux() {
+    let mut i = 1i8;
+    while 200 != i { //~ ERROR comparison is useless due to type limits
+                     //~| WARN literal out of range for i8
+        i += 1;
+    }
+}
index eb5b77f7a45a352a3d7fa6a0287e26e7ff188cf0..ce336905c0139e77ea5b6fcd9f47b86e09c7188a 100644 (file)
@@ -25,7 +25,6 @@ fn main() {
 
     let x2: i8 = -128; // should be OK
     let x1: i8 = 128; //~ error: literal out of range for i8
-    let x2: i8 = --128; //~ error: literal out of range for i8
 
     let x3: i8 = -129; //~ error: literal out of range for i8
     let x3: i8 = -(129); //~ error: literal out of range for i8
@@ -54,9 +53,4 @@ fn main() {
     let x = 18446744073709551615_i64; //~ error: literal out of range for i64
     let x: i64 = -9223372036854775809; //~ error: literal out of range for i64
     let x = -9223372036854775809_i64; //~ error: literal out of range for i64
-
-    let x = -3.40282348e+38_f32; //~ error: literal out of range for f32
-    let x =  3.40282348e+38_f32; //~ error: literal out of range for f32
-    let x = -1.7976931348623159e+308_f64; //~ error: literal out of range for f64
-    let x =  1.7976931348623159e+308_f64; //~ error: literal out of range for f64
 }
diff --git a/src/test/compile-fail/lint-type-overflow2.rs b/src/test/compile-fail/lint-type-overflow2.rs
new file mode 100644 (file)
index 0000000..83300f1
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+
+#![deny(overflowing_literals)]
+
+#[allow(unused_variables)]
+fn main() {
+    let x2: i8 = --128; //~ error: literal out of range for i8
+
+    let x = -3.40282348e+38_f32; //~ error: literal out of range for f32
+    let x =  3.40282348e+38_f32; //~ error: literal out of range for f32
+    let x = -1.7976931348623159e+308_f64; //~ error: literal out of range for f64
+    let x =  1.7976931348623159e+308_f64; //~ error: literal out of range for f64
+}
index 6e3a00746f3e6107bcd3654e5f41a57fdc57bef4..94e189aa47f6fba9e95293080efb906a7474ae32 100644 (file)
@@ -9,6 +9,20 @@
 // except according to those terms.
 
 #![deny(unconditional_recursion)]
+//~^ NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
+//~| NOTE lint level defined here
 #![allow(dead_code)]
 fn foo() { //~ ERROR function cannot return without recurring
     foo(); //~ NOTE recursive call site
index a4eb1630afe19c00f993df2f6aa67a1ea1270e85..343622c5c1b048a714dfcaf5475606377c224cc1 100644 (file)
 macro_rules! test { () => { fn foo() -> i32 { 1; } } }
                                            //~^ ERROR not all control paths return a value
                                            //~^^ HELP consider removing this semicolon
+                                           //~^^^ HELP run `rustc --explain E0269` to see a
 
 fn no_return() -> i32 {} //~ ERROR  not all control paths return a value
+                         //~^ HELP run `rustc --explain E0269` to see a detailed explanation
 
 fn bar(x: u32) -> u32 { //~ ERROR  not all control paths return a value
+                        //~^ HELP run `rustc --explain E0269` to see a detailed explanation
     x * 2; //~ HELP consider removing this semicolon
 }
 
 fn baz(x: u64) -> u32 { //~ ERROR  not all control paths return a value
+                        //~^ HELP run `rustc --explain E0269` to see a detailed explanation
     x * 2;
 }
 
index 0fee48a8c6c662ddeda75093ea52daf124096aae..3aab953eb7940bc6d2a33c3e73ce5b78d30d53aa 100644 (file)
@@ -12,6 +12,8 @@
 #![deny(unused_assignments)]
 #![allow(dead_code, non_camel_case_types, trivial_numeric_casts)]
 
+use std::ops::AddAssign;
+
 fn f1(x: isize) {
     //~^ ERROR unused variable: `x`
 }
@@ -100,5 +102,49 @@ fn f5c() {
     }
 }
 
+struct View<'a>(&'a mut [i32]);
+
+impl<'a> AddAssign<i32> for View<'a> {
+    fn add_assign(&mut self, rhs: i32) {
+        for lhs in self.0.iter_mut() {
+            *lhs += rhs;
+        }
+    }
+}
+
+fn f6() {
+    let mut array = [1, 2, 3];
+    let mut v = View(&mut array);
+
+    // ensure an error shows up for x even if lhs of an overloaded add assign
+
+    let x;
+    //~^ ERROR variable `x` is assigned to, but never used
+
+    *({
+        x = 0;  //~ ERROR value assigned to `x` is never read
+        &mut v
+    }) += 1;
+}
+
+
+struct MutRef<'a>(&'a mut i32);
+
+impl<'a> AddAssign<i32> for MutRef<'a> {
+    fn add_assign(&mut self, rhs: i32) {
+        *self.0 += rhs;
+    }
+}
+
+fn f7() {
+    let mut a = 1;
+    {
+        // `b` does not trigger unused_variables
+        let mut b = MutRef(&mut a);
+        b += 1;
+    }
+    drop(a);
+}
+
 fn main() {
 }
index 06af8ac8719e57b539496912b1a825860f4947f6..8d2a0fd07e80a3e4436cb2ff221109491e2be3e3 100644 (file)
@@ -35,14 +35,14 @@ pub fn opt_str2<'a>(maybestr: &'a Option<String>) -> &'static str {
         "(none)"
     } else {
         let s: &'a str = maybestr.as_ref().unwrap();
-        s  //~ ERROR cannot infer an appropriate lifetime for automatic coercion due to conflicting
+        s  //~ ERROR E0312
     }
 }
 
 pub fn opt_str3<'a>(maybestr: &'a Option<String>) -> &'static str {
     if maybestr.is_some() {
         let s: &'a str = maybestr.as_ref().unwrap();
-        s  //~ ERROR cannot infer an appropriate lifetime for automatic coercion due to conflicting
+        s  //~ ERROR E0312
     } else {
         "(none)"
     }
index 1b5824964a8eb53b44a45bd5a82fb43af5e0bdc4..b9423feb5c1cde561d8f586ad7a132cf4073c953 100644 (file)
@@ -37,8 +37,7 @@ pub fn opt_str2<'a>(maybestr: &'a Option<String>) -> &'static str {
         None => "(none)",
         Some(ref s) => {
             let s: &'a str = s;
-            s
-            //~^ ERROR cannot infer an appropriate lifetime
+            s //~ ERROR E0312
         }
     }
 }
@@ -47,8 +46,7 @@ pub fn opt_str3<'a>(maybestr: &'a Option<String>) -> &'static str {
     match *maybestr {
         Some(ref s) => {
             let s: &'a str = s;
-            s
-            //~^ ERROR cannot infer an appropriate lifetime
+            s //~ ERROR E0312
         }
         None => "(none)",
     }
index a429681bb21815e2e6431b2fa52ac6f0dcef58ef..c935ccef055aeb0ae96f6591a0b95e674b50f5af 100644 (file)
@@ -21,9 +21,11 @@ macro_rules! call_nested_expr {
 
 macro_rules! call_nested_expr_sum {
     () => { 1 + nested_expr!(); } //~ ERROR unresolved name
+                                  //~^ NOTE in this expansion of nested_expr!
 }
 
 fn main() {
     1 + call_nested_expr!(); //~ ERROR unresolved name
+                             //~^ NOTE in this expansion of call_nested_expr!
     call_nested_expr_sum!(); //~ NOTE in this expansion of
 }
index 294892662d46480d24d383190fc3920214469243..a485b9056de26980d10ffd2ca0193caf2305a5fc 100644 (file)
@@ -22,7 +22,8 @@ macro_rules! myprint {
 
 macro_rules! myprintln {
     ($fmt:expr) => (myprint!(concat!($fmt, "\n"))); //~ ERROR invalid reference to argument `0`
-                                                    //~^ NOTE in this expansion of
+                                                    //~^ NOTE in this expansion of myprint!
+                                                    //~^^ NOTE in this expansion of concat!
 }
 
 fn main() {
diff --git a/src/test/compile-fail/macro-expanded-mod.rs b/src/test/compile-fail/macro-expanded-mod.rs
new file mode 100644 (file)
index 0000000..8e631a6
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that macro-expanded non-inline modules behave correctly
+
+macro_rules! mod_decl {
+    ($i:ident) => { mod $i; }
+}
+
+mod macro_expanded_mod_helper {
+    mod_decl!(foo); // This should search in the folder `macro_expanded_mod_helper`
+}
+
+fn main() {
+    mod_decl!(foo); //~ ERROR Cannot declare a non-inline module inside a block
+}
index abf0ed420e7a26c59cf03009f84cdcaae0e8364d..1111b21d4550c14c3ef2fbae02ae702a562dd9ed 100644 (file)
@@ -26,6 +26,5 @@ test!(b,
 fn main() {
     a::bar();
     //~^ ERROR failed to resolve. Use of undeclared type or module `a`
-    //~^^ ERROR unresolved name `a::bar`
     b::bar();
 }
diff --git a/src/test/compile-fail/macro_expanded_mod_helper/foo/bar.rs b/src/test/compile-fail/macro_expanded_mod_helper/foo/bar.rs
new file mode 100644 (file)
index 0000000..3ec3436
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-test
diff --git a/src/test/compile-fail/macro_expanded_mod_helper/foo/mod.rs b/src/test/compile-fail/macro_expanded_mod_helper/foo/mod.rs
new file mode 100644 (file)
index 0000000..25fcf11
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-test
+
+mod_decl!(bar);
index e298a0f62cd81882ab9e47920f4ac74347b58e82..a419c6480e6a7e1460ad7e1c6a5eeee29fbd14a5 100644 (file)
@@ -28,5 +28,5 @@ fn main() {
     let x: Box<Map<isize, isize>> = x;
     // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
     let y: Box<Map<usize, isize>> = Box::new(x);
-    //~^ ERROR the trait `Map<usize, isize>` is not implemented
+    //~^ ERROR `Box<Map<isize, isize>>: Map<usize, isize>` is not satisfied
 }
diff --git a/src/test/compile-fail/meta-expected-error-correct-rev.rs b/src/test/compile-fail/meta-expected-error-correct-rev.rs
new file mode 100644 (file)
index 0000000..95b4e1a
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// revisions: a
+// pretty-expanded FIXME #23616
+
+// Counterpart to `meta-expected-error-wrong-rev.rs`
+
+#[cfg(a)]
+fn foo() {
+    let x: u32 = 22_usize; //[a]~ ERROR mismatched types
+}
+
+fn main() { }
diff --git a/src/test/compile-fail/meta-expected-error-wrong-rev.rs b/src/test/compile-fail/meta-expected-error-wrong-rev.rs
new file mode 100644 (file)
index 0000000..084c6ed
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// revisions: a
+// should-fail
+// pretty-expanded FIXME #23616
+
+// This is a "meta-test" of the compilertest framework itself.  In
+// particular, it includes the right error message, but the message
+// targets the wrong revision, so we expect the execution to fail.
+// See also `meta-expected-error-correct-rev.rs`.
+
+#[cfg(a)]
+fn foo() {
+    let x: u32 = 22_usize; //[b]~ ERROR mismatched types
+}
+
+fn main() { }
index 375d5076ea2f8e9c6bc432f629a26a5ae87133ff..6416d5467c8903ad3e8a1ec9901b30c6143b6ff9 100644 (file)
@@ -13,6 +13,6 @@ struct Foo;
 fn main() {
     let a: Result<(), Foo> = Ok(());
     a.unwrap();
-    //~^ ERROR no method named `unwrap` found for type `core::result::Result<(), Foo>`
-    //~| NOTE the following trait bounds were not satisfied: `Foo : core::fmt::Debug`
+    //~^ ERROR no method named `unwrap` found for type `std::result::Result<(), Foo>`
+    //~| NOTE the following trait bounds were not satisfied: `Foo : std::fmt::Debug`
 }
index 967a8531b2c0afd0d4e3eebfd343a8fddad341a7..f3c227849dcbeea03b0ec7f80d728c1647339751 100644 (file)
@@ -29,7 +29,7 @@ impl S {
 
     // Cause an error. It shouldn't have any macro backtrace frames.
     fn bar(&self) { }
-    fn bar(&self) { } //~ ERROR duplicate method
+    fn bar(&self) { } //~ ERROR duplicate definitions
 }
 
 fn main() { }
index e6f3c8ab3170e6176a247d195f96892feff2e4a2..c8c1447fea3869b4ab9be12306411add555a8cc2 100644 (file)
@@ -9,6 +9,7 @@
 // except according to those terms.
 
 // issue #21405
+// ignore-tidy-linelength
 
 struct Foo;
 
@@ -19,4 +20,5 @@ fn main() {
     //~^ ERROR no method named `is_empty` found
     //~^^ HELP #1: `core::slice::SliceExt`
     //~^^^ HELP #2: `core::str::StrExt`
+    //~^^^^ HELP items from traits can only be used if the trait is implemented and in scope; the following traits define an item `is_empty`, perhaps you need to implement one of them:
 }
index 9a89424c61f343e3f72a28ffa66af73ad2e3347d..1ba30c67a03cfac40b205ee027c61b09426a4bae 100644 (file)
@@ -8,6 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// error-pattern:cannot apply unary operator `-` to type `collections::string::String`
+// error-pattern:cannot apply unary operator `-` to type `std::string::String`
 
 fn main() { -"foo".to_string(); }
index 865784c2f1eede50822737829f56a5a2abdd23d9..0c4aae8cfad3651f37f37a8437408362a1ee02c6 100644 (file)
@@ -10,7 +10,7 @@
 
 // Test for a subtle failure computing kinds of cyclic types, in which
 // temporary kinds wound up being stored in a cache and used later.
-// See middle::ty::type_contents() for more information.
+// See rustc::ty::type_contents() for more information.
 
 
 struct List { key: isize, next: Option<Box<List>> }
diff --git a/src/test/compile-fail/mut-not-freeze.rs b/src/test/compile-fail/mut-not-freeze.rs
deleted file mode 100644 (file)
index db19132..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::cell::RefCell;
-
-fn f<T: Sync>(_: T) {}
-
-fn main() {
-    let x = RefCell::new(0);
-    f(x);
-    //~^ ERROR `core::marker::Sync` is not implemented
-}
index 1657d602e24c485de42a3591d5006b92899e49fc..cafcabe6279b0b6f0ca675c437302bd1883bb616 100644 (file)
@@ -24,5 +24,5 @@ fn bar<T: Sync>(_: T) {}
 
 fn main() {
     let x = Foo::A(NoSync);
-    bar(&x); //~ ERROR the trait `core::marker::Sync` is not implemented
+    bar(&x); //~ ERROR `NoSync: std::marker::Sync` is not satisfied
 }
index a9c2b6a942c658dab4e09a84e3752eba44cf51db..957b6cda55311b79c927ee3ec5de589e5085cc92 100644 (file)
@@ -14,6 +14,5 @@ extern crate libc;
 fn main() {
     unsafe {
         libc::abs(0);  //~ ERROR Use of undeclared type or module `libc`
-                      //~^ ERROR unresolved name `libc::abs`
     }
 }
index 1683db811cf040ec6ab18aa9c5b5d79c1e00872b..9ccc7cc75ad5a41609051d4a84dea2b2778aafc7 100644 (file)
@@ -65,7 +65,7 @@ fn main() {
     //~^^ HELP following traits define an item `method`, perhaps you need to implement one of them
     //~^^^ HELP `foo::Bar`
     //~^^^^ HELP `no_method_suggested_traits::foo::PubPub`
-    //~^^^^^ HELP `no_method_suggested_traits::reexport::Reexported`
+    //~^^^^^ HELP `no_method_suggested_traits::Reexported`
     //~^^^^^^ HELP `no_method_suggested_traits::bar::PubPriv`
     //~^^^^^^^ HELP `no_method_suggested_traits::qux::PrivPub`
     //~^^^^^^^^ HELP `no_method_suggested_traits::quz::PrivPriv`
@@ -74,7 +74,7 @@ fn main() {
     //~^^ HELP following traits define an item `method`, perhaps you need to implement one of them
     //~^^^ HELP `foo::Bar`
     //~^^^^ HELP `no_method_suggested_traits::foo::PubPub`
-    //~^^^^^ HELP `no_method_suggested_traits::reexport::Reexported`
+    //~^^^^^ HELP `no_method_suggested_traits::Reexported`
     //~^^^^^^ HELP `no_method_suggested_traits::bar::PubPriv`
     //~^^^^^^^ HELP `no_method_suggested_traits::qux::PrivPub`
     //~^^^^^^^^ HELP `no_method_suggested_traits::quz::PrivPriv`
index 60b2d22079e2c418ed803387045527eed5f809b4..334952cefa6e02f9612f3e761db590f966a07159 100644 (file)
@@ -33,7 +33,7 @@ fn main() {
     let x = foo(Port(Rc::new(())));
 
     thread::spawn(move|| {
-        //~^ ERROR `core::marker::Send` is not implemented
+        //~^ ERROR `std::rc::Rc<()>: std::marker::Send` is not satisfied
         let y = x;
         println!("{:?}", y);
     });
index 21675434e2433f3c56a0b685d23de6201ac5c27b..f2a27dc528e6cf82da0b2498d0f657ecedfd2357 100644 (file)
@@ -10,7 +10,7 @@
 
 #![no_std]
 
-extern crate core; //~ ERROR: an external crate named `core` has already
+extern crate core; //~ ERROR: an extern crate named `core` has already
 extern crate std;
 
 fn main() {}
index 625d51260c4dcc4375d6704da1a11ce35adacc6b..902710e96e274fbc0ffc8a83d61e739741ff1ded 100644 (file)
@@ -24,5 +24,5 @@ fn bar<T: Send>(_: T) {}
 fn main() {
     let x = Foo::A(NoSend);
     bar(x);
-    //~^ ERROR `core::marker::Send` is not implemented
+    //~^ ERROR `NoSend: std::marker::Send` is not satisfied
 }
index b5c572b17dd9a1bac7e465b36b92ef8472ced80a..69f6fcdc4afa6577b8a0cbeb232f7ea4b758ec7a 100644 (file)
@@ -15,5 +15,5 @@ fn bar<T: Send>(_: T) {}
 fn main() {
     let x = Rc::new(5);
     bar(x);
-    //~^ ERROR `core::marker::Send` is not implemented
+    //~^ ERROR `std::rc::Rc<_>: std::marker::Send` is not satisfied
 }
index 7f16db0ba947be19ea11269408e9f2a68458763b..b2ca4f9f5db16bb76a749cf8478733ded2a68b5e 100644 (file)
@@ -23,5 +23,5 @@ fn bar<T: Send>(_: T) {}
 fn main() {
     let x = Foo { a: 5 };
     bar(x);
-    //~^ ERROR the trait `core::marker::Send` is not implemented
+    //~^ ERROR `Foo: std::marker::Send` is not satisfied
 }
index 9331afdbbb5d7d792a150e4cf128a9c635f182f5..ae9a25a95b4ea99283139f6e0c73e4e96b423962 100644 (file)
@@ -22,5 +22,5 @@ fn bar<T: Sync>(_: T) {}
 fn main() {
     let x = Foo::A(NoSync);
     bar(x);
-    //~^ ERROR the trait `core::marker::Sync` is not implemented
+    //~^ ERROR `NoSync: std::marker::Sync` is not satisfied
 }
diff --git a/src/test/compile-fail/no_share-rc.rs b/src/test/compile-fail/no_share-rc.rs
deleted file mode 100644 (file)
index 4bc3442..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::rc::Rc;
-use std::cell::RefCell;
-
-fn bar<T: Sync>(_: T) {}
-
-fn main() {
-    let x = Rc::new(RefCell::new(5));
-    bar(x);
-    //~^ ERROR the trait `core::marker::Sync` is not implemented
-}
index b5ccceb3b2a7d9ec72bc6fc286af790ff6e9cd8a..d64d37a2f6c33785e120c424f50aa99129622186 100644 (file)
@@ -20,5 +20,5 @@ fn bar<T: Sync>(_: T) {}
 fn main() {
     let x = Foo { a: 5 };
     bar(x);
-    //~^ ERROR the trait `core::marker::Sync` is not implemented
+    //~^ ERROR `Foo: std::marker::Sync` is not satisfied
 }
index 85a47492306aff40e3a725833613e9e7e62c60cc..687e1e49ee844bb6390a7965c8076a2f521e1b4a 100644 (file)
@@ -20,7 +20,7 @@ fn main() {
   let x: isize = noexporttypelib::foo();
     //~^ ERROR mismatched types
     //~| expected `isize`
-    //~| found `core::option::Option<isize>`
+    //~| found `std::option::Option<isize>`
     //~| expected isize
-    //~| found enum `core::option::Option`
+    //~| found enum `std::option::Option`
 }
index a7cabae16be1b8f39be0f84ae8f66a090bbfbdad..9564a080b8ee7d6ca7f8c8dbb04d125921408ba9 100644 (file)
@@ -15,5 +15,5 @@ enum State { ST_NULL, ST_WHITESPACE }
 
 fn main() {
     [State::ST_NULL; (State::ST_WHITESPACE as usize)];
-    //~^ ERROR expected constant integer for repeat count, but non-constant path
+    //~^ ERROR expected constant integer for repeat count, but unimplemented constant expression
 }
diff --git a/src/test/compile-fail/non-interger-atomic.rs b/src/test/compile-fail/non-interger-atomic.rs
new file mode 100644 (file)
index 0000000..d2376ee
--- /dev/null
@@ -0,0 +1,117 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(core_intrinsics, rustc_attrs)]
+#![allow(warnings)]
+
+use std::intrinsics;
+
+#[derive(Copy, Clone)]
+struct Foo(i64);
+type Bar = &'static Fn();
+type Quux = [u8; 100];
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_bool_load(p: &mut bool, v: bool) {
+    intrinsics::atomic_load(p);
+    //~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `bool`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_bool_store(p: &mut bool, v: bool) {
+    intrinsics::atomic_store(p, v);
+    //~^ ERROR `atomic_store` intrinsic: expected basic integer type, found `bool`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_bool_xchg(p: &mut bool, v: bool) {
+    intrinsics::atomic_xchg(p, v);
+    //~^ ERROR `atomic_xchg` intrinsic: expected basic integer type, found `bool`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_bool_cxchg(p: &mut bool, v: bool) {
+    intrinsics::atomic_cxchg(p, v, v);
+    //~^ ERROR `atomic_cxchg` intrinsic: expected basic integer type, found `bool`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Foo_load(p: &mut Foo, v: Foo) {
+    intrinsics::atomic_load(p);
+    //~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `Foo`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Foo_store(p: &mut Foo, v: Foo) {
+    intrinsics::atomic_store(p, v);
+    //~^ ERROR `atomic_store` intrinsic: expected basic integer type, found `Foo`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Foo_xchg(p: &mut Foo, v: Foo) {
+    intrinsics::atomic_xchg(p, v);
+    //~^ ERROR `atomic_xchg` intrinsic: expected basic integer type, found `Foo`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Foo_cxchg(p: &mut Foo, v: Foo) {
+    intrinsics::atomic_cxchg(p, v, v);
+    //~^ ERROR `atomic_cxchg` intrinsic: expected basic integer type, found `Foo`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Bar_load(p: &mut Bar, v: Bar) {
+    intrinsics::atomic_load(p);
+    //~^ ERROR expected basic integer type, found `&'static std::ops::Fn() + 'static`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Bar_store(p: &mut Bar, v: Bar) {
+    intrinsics::atomic_store(p, v);
+    //~^ ERROR expected basic integer type, found `&'static std::ops::Fn() + 'static`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Bar_xchg(p: &mut Bar, v: Bar) {
+    intrinsics::atomic_xchg(p, v);
+    //~^ ERROR expected basic integer type, found `&'static std::ops::Fn() + 'static`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Bar_cxchg(p: &mut Bar, v: Bar) {
+    intrinsics::atomic_cxchg(p, v, v);
+    //~^ ERROR expected basic integer type, found `&'static std::ops::Fn() + 'static`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Quux_load(p: &mut Quux, v: Quux) {
+    intrinsics::atomic_load(p);
+    //~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `[u8; 100]`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Quux_store(p: &mut Quux, v: Quux) {
+    intrinsics::atomic_store(p, v);
+    //~^ ERROR `atomic_store` intrinsic: expected basic integer type, found `[u8; 100]`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Quux_xchg(p: &mut Quux, v: Quux) {
+    intrinsics::atomic_xchg(p, v);
+    //~^ ERROR `atomic_xchg` intrinsic: expected basic integer type, found `[u8; 100]`
+}
+
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
+unsafe fn test_Quux_cxchg(p: &mut Quux, v: Quux) {
+    intrinsics::atomic_cxchg(p, v, v);
+    //~^ ERROR `atomic_cxchg` intrinsic: expected basic integer type, found `[u8; 100]`
+}
+
+fn main() {}
index 50a69543f7d04fbfaa77a0d4cea3ef2d46f6457e..e5de03a08486c5854685bcb95449c1db11e7071e 100644 (file)
@@ -18,5 +18,5 @@ use std::cell::RefCell;
 fn assert<T: RecoverSafe + ?Sized>() {}
 
 fn main() {
-    assert::<Arc<RefCell<i32>>>(); //~ ERROR: is not implemented
+    assert::<Arc<RefCell<i32>>>(); //~ ERROR E0277
 }
index 1fa76c21f853d1923b7fad1067cfe1a0d6c0748b..0301c8dd935c7fe71876b226c9d18041028c5959 100644 (file)
@@ -17,5 +17,5 @@ use std::cell::UnsafeCell;
 fn assert<T: RecoverSafe + ?Sized>() {}
 
 fn main() {
-    assert::<*const UnsafeCell<i32>>(); //~ ERROR: is not implemented
+    assert::<*const UnsafeCell<i32>>(); //~ ERROR E0277
 }
index f06464c5b1ab8067ea5664d7553d661a9302a5fa..ece8fa7dc47bb12da8a4ffdbdad81c48a9a513e7 100644 (file)
 #![allow(dead_code)]
 #![feature(recover)]
 
-use std::panic::RecoverSafe;
+use std::panic::UnwindSafe;
 
-fn assert<T: RecoverSafe + ?Sized>() {}
+fn assert<T: UnwindSafe + ?Sized>() {}
 
 fn main() {
-    assert::<&mut i32>(); //~ ERROR: RecoverSafe` is not implemented
+    assert::<&mut i32>(); //~ ERROR: UnwindSafe` is not satisfied
 }
diff --git a/src/test/compile-fail/not-sync.rs b/src/test/compile-fail/not-sync.rs
new file mode 100644 (file)
index 0000000..aa7a83a
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::cell::{Cell, RefCell};
+use std::rc::{Rc, Weak};
+use std::sync::mpsc::{Receiver, Sender, SyncSender};
+
+fn test<T: Sync>() {}
+
+fn main() {
+    test::<Cell<i32>>();
+    //~^ ERROR `std::cell::Cell<i32>: std::marker::Sync` is not satisfied
+    test::<RefCell<i32>>();
+    //~^ ERROR `std::cell::RefCell<i32>: std::marker::Sync` is not satisfied
+
+    test::<Rc<i32>>();
+    //~^ ERROR `std::rc::Rc<i32>: std::marker::Sync` is not satisfied
+    test::<Weak<i32>>();
+    //~^ ERROR `std::rc::Weak<i32>: std::marker::Sync` is not satisfied
+
+    test::<Receiver<i32>>();
+    //~^ ERROR `std::sync::mpsc::Receiver<i32>: std::marker::Sync` is not satisfied
+    test::<Sender<i32>>();
+    //~^ ERROR `std::sync::mpsc::Sender<i32>: std::marker::Sync` is not satisfied
+    test::<SyncSender<i32>>();
+    //~^ ERROR `std::sync::mpsc::SyncSender<i32>: std::marker::Sync` is not satisfied
+}
index efbf3782f9796a4b34ed979e293f60837032e67e..6fa261dea71cb83cbb39b4f3e6ca544d516e2716 100644 (file)
@@ -14,5 +14,5 @@
 trait Foo {}
 fn take_foo<F:Foo>(f: F) {}
 fn take_object(f: Box<Foo>) { take_foo(f); }
-//~^ ERROR the trait `Foo` is not implemented
+//~^ ERROR `Box<Foo>: Foo` is not satisfied
 fn main() {}
index 80dbee3c481e674b71c9143b978d409a91327d38..014b0c1e80e71467da8875a63f55fac28f1bd8dd 100644 (file)
@@ -34,8 +34,7 @@ fn load1<'a,'b>(a: &'a MyBox<SomeTrait>,
                 b: &'b MyBox<SomeTrait>)
                 -> &'b MyBox<SomeTrait>
 {
-    a
-      //~^ ERROR cannot infer
+    a //~ ERROR E0312
 }
 
 fn load2<'a>(ss: &MyBox<SomeTrait+'a>) -> MyBox<SomeTrait+'a> {
index 8e3161ef884be5b83410291364cc2678c9b8ddf0..5097e3d7b10d4735cdc961e4b86d418b7a34fba0 100644 (file)
@@ -29,6 +29,7 @@ fn make_bar<T:Bar>(t: &T) -> &Bar {
 
 fn make_bar_explicit<T:Bar>(t: &T) -> &Bar {
     //~^ ERROR E0038
+    //~^^ NOTE method `bar` has generic type parameters
     t as &Bar
 }
 
index c4eb467c4f96143da28576de025979f19b89d101..39ce1b33ca13122b275642f24761987f645aab0f 100644 (file)
@@ -33,7 +33,9 @@ pub fn main() {
     let x = vec!(1u8, 2, 3, 4);
     let y: Option<Vec<u8>> = collect(x.iter()); // this should give approximately the same error for x.iter().collect()
     //~^ ERROR
-    //~^^ NOTE a collection of type `core::option::Option<collections::vec::Vec<u8>>` cannot be built from an iterator over elements of type `&u8`
+    //~^^ NOTE a collection of type `std::option::Option<std::vec::Vec<u8>>` cannot be built from an iterator over elements of type `&u8`
+    //~^^^ NOTE required by `collect`
     let x: String = foobar(); //~ ERROR
-    //~^ NOTE test error `collections::string::String` with `u8` `_` `u32`
+    //~^ NOTE test error `std::string::String` with `u8` `_` `u32`
+    //~^^ NOTE required by `foobar`
 }
index 071b4db40f86f5c7a60a4869093715f993a6d90f..c84927ea266394e41e25e4a14415857a10dff276 100644 (file)
@@ -31,11 +31,11 @@ struct Nested<T>(T);
 fn is_zen<T: Zen>(_: T) {}
 
 fn not_sync<T>(x: Guard<T>) {
-    is_zen(x)  //~ error: the trait `core::marker::Sync` is not implemented for the type `T`
+    is_zen(x)  //~ error: `T: std::marker::Sync` is not satisfied
 }
 
 fn nested_not_sync<T>(x: Nested<Guard<T>>) {
-    is_zen(x)  //~ error: the trait `core::marker::Sync` is not implemented for the type `T`
+    is_zen(x)  //~ error: `T: std::marker::Sync` is not satisfied
 }
 
 fn main() {}
index b3aa4e9187d34dc7fdb4cdb8ec18fac2c8514e7c..cc73cbe15fe5f9f16dfe8aa62b98fc895ac3b2e8 100644 (file)
@@ -18,9 +18,6 @@ fn main() {
     use std::boxed::HEAP; //~ ERROR use of unstable library feature
 
     let _ = HEAP <- { //~ ERROR use of unstable library feature
-        ::core::raw::Slice { //~ ERROR use of unstable library feature
-            data: &42, //~ ERROR use of unstable library feature
-            len: 1 //~ ERROR use of unstable library feature
-        }
+        HEAP //~ ERROR use of unstable library feature
     };
 }
diff --git a/src/test/compile-fail/privacy-in-paths.rs b/src/test/compile-fail/privacy-in-paths.rs
new file mode 100644 (file)
index 0000000..0a8689e
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod foo {
+    pub use self::bar::S;
+    mod bar {
+        pub struct S;
+        pub use baz;
+    }
+
+    trait T {
+        type Assoc;
+    }
+    impl T for () {
+        type Assoc = S;
+    }
+}
+
+impl foo::S {
+    fn f() {}
+}
+
+pub mod baz {
+    fn f() {}
+
+    fn g() {
+        ::foo::bar::baz::f(); //~ERROR module `bar` is private
+        ::foo::bar::S::f(); //~ERROR module `bar` is private
+        <() as ::foo::T>::Assoc::f(); //~ERROR trait `T` is private
+    }
+}
+
+fn main() {}
index 7fe0574ab7d9a663902dd9433fcc6a01145f3b4c..bf296220d2a2b601d2cb38bce3f6e5dba8621526 100644 (file)
@@ -25,14 +25,13 @@ pub mod foo1 {
 }
 
 fn test_single1() {
-    // In an ideal world, these would be private instead of inaccessible.
-    use foo1::Bar;  //~ ERROR `Bar` is inaccessible
+    use foo1::Bar;  //~ ERROR function `Bar` is private
 
     Bar();
 }
 
 fn test_list1() {
-    use foo1::{Bar,Baz};  //~ ERROR `Bar` is inaccessible
+    use foo1::{Bar,Baz};  //~ ERROR `Bar` is private
 
     Bar();
 }
@@ -47,7 +46,7 @@ pub mod foo2 {
 }
 
 fn test_single2() {
-    use foo2::Bar;  //~ ERROR `Bar` is private
+    use foo2::Bar;  //~ ERROR trait `Bar` is private
 
     let _x : Box<Bar>;
 }
index 336913b8772719d41631c22fd034bc3c9a0b3b5f..063848f62aa9ee9f23c5cd100fd0ce657586cd36 100644 (file)
@@ -40,37 +40,30 @@ pub extern "C" { //~ ERROR unnecessary visibility qualifier
 
 const MAIN: u8 = {
     trait MarkerTr {}
-    pub trait Tr { //~ ERROR visibility has no effect inside functions or block
+    pub trait Tr {
         fn f();
         const C: u8;
         type T;
     }
-    pub struct S { //~ ERROR visibility has no effect inside functions or block
-        pub a: u8 //~ ERROR visibility has no effect inside functions or block
+    pub struct S {
+        pub a: u8
     }
-    struct Ts(pub u8); //~ ERROR visibility has no effect inside functions or block
+    struct Ts(pub u8);
 
     pub impl MarkerTr for .. {} //~ ERROR unnecessary visibility qualifier
-    //~^ ERROR visibility has no effect inside functions or block
     pub impl Tr for S {  //~ ERROR unnecessary visibility qualifier
-    //~^ ERROR visibility has no effect inside functions or block
         pub fn f() {} //~ ERROR unnecessary visibility qualifier
-        //~^ ERROR visibility has no effect inside functions or block
         pub const C: u8 = 0; //~ ERROR unnecessary visibility qualifier
-        //~^ ERROR visibility has no effect inside functions or block
         pub type T = u8; //~ ERROR unnecessary visibility qualifier
-        //~^ ERROR visibility has no effect inside functions or block
     }
     pub impl S { //~ ERROR unnecessary visibility qualifier
-    //~^ ERROR visibility has no effect inside functions or block
-        pub fn f() {} //~ ERROR visibility has no effect inside functions or block
-        pub const C: u8 = 0; //~ ERROR visibility has no effect inside functions or block
-        // pub type T = u8; // ERROR visibility has no effect inside functions or block
+        pub fn f() {}
+        pub const C: u8 = 0;
+        // pub type T = u8;
     }
     pub extern "C" { //~ ERROR unnecessary visibility qualifier
-    //~^ ERROR visibility has no effect inside functions or block
-        pub fn f(); //~ ERROR visibility has no effect inside functions or block
-        pub static St: u8; //~ ERROR visibility has no effect inside functions or block
+        pub fn f();
+        pub static St: u8;
     }
 
     0
@@ -78,36 +71,29 @@ const MAIN: u8 = {
 
 fn main() {
     trait MarkerTr {}
-    pub trait Tr { //~ ERROR visibility has no effect inside functions or block
+    pub trait Tr {
         fn f();
         const C: u8;
         type T;
     }
-    pub struct S { //~ ERROR visibility has no effect inside functions or block
-        pub a: u8 //~ ERROR visibility has no effect inside functions or block
+    pub struct S {
+        pub a: u8
     }
-    struct Ts(pub u8); //~ ERROR visibility has no effect inside functions or block
+    struct Ts(pub u8);
 
     pub impl MarkerTr for .. {} //~ ERROR unnecessary visibility qualifier
-    //~^ ERROR visibility has no effect inside functions or block
     pub impl Tr for S {  //~ ERROR unnecessary visibility qualifier
-    //~^ ERROR visibility has no effect inside functions or block
         pub fn f() {} //~ ERROR unnecessary visibility qualifier
-        //~^ ERROR visibility has no effect inside functions or block
         pub const C: u8 = 0; //~ ERROR unnecessary visibility qualifier
-        //~^ ERROR visibility has no effect inside functions or block
         pub type T = u8; //~ ERROR unnecessary visibility qualifier
-        //~^ ERROR visibility has no effect inside functions or block
     }
     pub impl S { //~ ERROR unnecessary visibility qualifier
-    //~^ ERROR visibility has no effect inside functions or block
-        pub fn f() {} //~ ERROR visibility has no effect inside functions or block
-        pub const C: u8 = 0; //~ ERROR visibility has no effect inside functions or block
-        // pub type T = u8; // ERROR visibility has no effect inside functions or block
+        pub fn f() {}
+        pub const C: u8 = 0;
+        // pub type T = u8;
     }
     pub extern "C" { //~ ERROR unnecessary visibility qualifier
-    //~^ ERROR visibility has no effect inside functions or block
-        pub fn f(); //~ ERROR visibility has no effect inside functions or block
-        pub static St: u8; //~ ERROR visibility has no effect inside functions or block
+        pub fn f();
+        pub static St: u8;
     }
 }
index ccb379c717928d5da0daf112f526d910d8a98215..28c1a003e39f0b731f43a1e97fae218c51cbac46 100644 (file)
@@ -19,6 +19,5 @@ mod foo {
 }
 
 fn main() {
-    <i32 as ::foo::Bar>::baz(); //~ERROR method `baz` is inaccessible
-                                //~^NOTE: trait `Bar` is private
+    <i32 as ::foo::Bar>::baz(); //~ERROR trait `Bar` is private
 }
index 495cdc3fe62e287062ee83fb588bc3e16439fc08..afe8c2fda3d4c75bfe26f1c91425f4d9b49b9851 100644 (file)
@@ -72,7 +72,6 @@ mod bar {
         self::baz::A::foo();
         self::baz::A::bar(); //~ ERROR: method `bar` is private
         self::baz::A.foo2();
-        self::baz::A.bar2(); //~ ERROR: method `bar2` is private
 
         // this used to cause an ICE in privacy traversal.
         super::gpub();
@@ -91,7 +90,6 @@ fn lol() {
     bar::A::foo();
     bar::A::bar(); //~ ERROR: method `bar` is private
     bar::A.foo2();
-    bar::A.bar2(); //~ ERROR: method `bar2` is private
 }
 
 mod foo {
@@ -99,19 +97,15 @@ mod foo {
         ::bar::A::foo();
         ::bar::A::bar();        //~ ERROR: method `bar` is private
         ::bar::A.foo2();
-        ::bar::A.bar2();        //~ ERROR: method `bar2` is private
-        ::bar::baz::A::foo();   //~ ERROR: method `foo` is inaccessible
-                                //~^ NOTE: module `baz` is private
-        ::bar::baz::A::bar();   //~ ERROR: method `bar` is private
-        ::bar::baz::A.foo2();   //~ ERROR: struct `A` is inaccessible
-                                //~^ NOTE: module `baz` is private
-        ::bar::baz::A.bar2();   //~ ERROR: struct `A` is inaccessible
+        ::bar::baz::A::foo();   //~ ERROR: module `baz` is private
+        ::bar::baz::A::bar();   //~ ERROR: module `baz` is private
+                                //~^ ERROR: method `bar` is private
+        ::bar::baz::A.foo2();   //~ ERROR: module `baz` is private
+        ::bar::baz::A.bar2();   //~ ERROR: module `baz` is private
                                 //~^ ERROR: method `bar2` is private
-                                //~^^ NOTE: module `baz` is private
 
         let _: isize =
-        ::bar::B::foo();        //~ ERROR: method `foo` is inaccessible
-                                //~^ NOTE: trait `B` is private
+        ::bar::B::foo();        //~ ERROR: trait `B` is private
         ::lol();
 
         ::bar::Enum::Pub;
@@ -126,19 +120,14 @@ mod foo {
 
         ::bar::gpub();
 
-        ::bar::baz::foo(); //~ ERROR: function `foo` is inaccessible
-                           //~^ NOTE: module `baz` is private
-        ::bar::baz::bar(); //~ ERROR: function `bar` is inaccessible
-                           //~^ NOTE: module `baz` is private
+        ::bar::baz::foo(); //~ ERROR: module `baz` is private
+        ::bar::baz::bar(); //~ ERROR: module `baz` is private
     }
 
     fn test2() {
         use bar::baz::{foo, bar};
-        //~^ ERROR: function `foo` is inaccessible
-        //~| NOTE: module `baz` is private
-        //~| ERROR: function `bar` is inaccessible
-        //~| NOTE: module `baz` is private
-
+        //~^ ERROR: module `baz` is private
+        //~| ERROR: module `baz` is private
 
         foo();
         bar();
@@ -169,8 +158,7 @@ pub mod mytest {
     // Even though the inner `A` struct is a publicly exported item (usable from
     // external crates through `foo::foo`, it should not be accessible through
     // its definition path (which has the private `i` module).
-    use self::foo::i::A; //~ ERROR: struct `A` is inaccessible
-                         //~^ NOTE: module `i` is private
+    use self::foo::i::A; //~ ERROR: module `i` is private
 
     pub mod foo {
         pub use self::i::A as foo;
index fd8f8d20b7babbecb248e7419bf7349bb66004e8..abf702204d16baef221f412b22dd0e3ecc0b8004 100644 (file)
@@ -16,7 +16,7 @@
 mod bar {
     pub use self::glob::*;
 
-    mod glob {
+    pub mod glob {
         use foo;
     }
 }
index 8e9998dd5977faf75510e67b3d2ee2fb429700ee..d9f767442845cd7fc95ee8c2d65531d27b008e8a 100644 (file)
@@ -28,7 +28,7 @@ mod bar {
 pub fn foo() {}
 
 fn test2() {
-    use bar::glob::gpriv; //~ ERROR: function `gpriv` is private
+    use bar::glob::gpriv; //~ ERROR: module `glob` is private
     gpriv();
 }
 
index 588c9be3065f82c32890e1ab1951d99474d71dad..9d6ae187cd381f0aa2f9d9c5257d6b0d93772475 100644 (file)
@@ -63,25 +63,25 @@ fn this_crate() {
     let c = a::C(2, 3); //~ ERROR: cannot invoke tuple struct constructor
     let d = a::D(4);
 
-    let a::A(()) = a; //~ ERROR: field #0 of struct `a::A` is private
+    let a::A(()) = a; //~ ERROR: field `0` of struct `a::A` is private
     let a::A(_) = a;
-    match a { a::A(()) => {} } //~ ERROR: field #0 of struct `a::A` is private
+    match a { a::A(()) => {} } //~ ERROR: field `0` of struct `a::A` is private
     match a { a::A(_) => {} }
 
     let a::B(_) = b;
-    let a::B(_b) = b; //~ ERROR: field #0 of struct `a::B` is private
+    let a::B(_b) = b; //~ ERROR: field `0` of struct `a::B` is private
     match b { a::B(_) => {} }
-    match b { a::B(_b) => {} } //~ ERROR: field #0 of struct `a::B` is private
-    match b { a::B(1) => {} a::B(_) => {} } //~ ERROR: field #0 of struct `a::B` is private
+    match b { a::B(_b) => {} } //~ ERROR: field `0` of struct `a::B` is private
+    match b { a::B(1) => {} a::B(_) => {} } //~ ERROR: field `0` of struct `a::B` is private
 
     let a::C(_, _) = c;
     let a::C(_a, _) = c;
-    let a::C(_, _b) = c; //~ ERROR: field #1 of struct `a::C` is private
-    let a::C(_a, _b) = c; //~ ERROR: field #1 of struct `a::C` is private
+    let a::C(_, _b) = c; //~ ERROR: field `1` of struct `a::C` is private
+    let a::C(_a, _b) = c; //~ ERROR: field `1` of struct `a::C` is private
     match c { a::C(_, _) => {} }
     match c { a::C(_a, _) => {} }
-    match c { a::C(_, _b) => {} } //~ ERROR: field #1 of struct `a::C` is private
-    match c { a::C(_a, _b) => {} } //~ ERROR: field #1 of struct `a::C` is private
+    match c { a::C(_, _b) => {} } //~ ERROR: field `1` of struct `a::C` is private
+    match c { a::C(_a, _b) => {} } //~ ERROR: field `1` of struct `a::C` is private
 
     let a::D(_) = d;
     let a::D(_d) = d;
@@ -101,30 +101,30 @@ fn xcrate() {
     let c = other::C(2, 3); //~ ERROR: cannot invoke tuple struct constructor
     let d = other::D(4);
 
-    let other::A(()) = a; //~ ERROR: field #0 of struct `other::A` is private
+    let other::A(()) = a; //~ ERROR: field `0` of struct `other::A` is private
     let other::A(_) = a;
     match a { other::A(()) => {} }
-    //~^ ERROR: field #0 of struct `other::A` is private
+    //~^ ERROR: field `0` of struct `other::A` is private
     match a { other::A(_) => {} }
 
     let other::B(_) = b;
-    let other::B(_b) = b; //~ ERROR: field #0 of struct `other::B` is private
+    let other::B(_b) = b; //~ ERROR: field `0` of struct `other::B` is private
     match b { other::B(_) => {} }
     match b { other::B(_b) => {} }
-    //~^ ERROR: field #0 of struct `other::B` is private
+    //~^ ERROR: field `0` of struct `other::B` is private
     match b { other::B(1) => {} other::B(_) => {} }
-    //~^ ERROR: field #0 of struct `other::B` is private
+    //~^ ERROR: field `0` of struct `other::B` is private
 
     let other::C(_, _) = c;
     let other::C(_a, _) = c;
-    let other::C(_, _b) = c; //~ ERROR: field #1 of struct `other::C` is private
-    let other::C(_a, _b) = c; //~ ERROR: field #1 of struct `other::C` is private
+    let other::C(_, _b) = c; //~ ERROR: field `1` of struct `other::C` is private
+    let other::C(_a, _b) = c; //~ ERROR: field `1` of struct `other::C` is private
     match c { other::C(_, _) => {} }
     match c { other::C(_a, _) => {} }
     match c { other::C(_, _b) => {} }
-    //~^ ERROR: field #1 of struct `other::C` is private
+    //~^ ERROR: field `1` of struct `other::C` is private
     match c { other::C(_a, _b) => {} }
-    //~^ ERROR: field #1 of struct `other::C` is private
+    //~^ ERROR: field `1` of struct `other::C` is private
 
     let other::D(_) = d;
     let other::D(_d) = d;
index c6e329aab041a3b2ad0506e23278bbf9b766cf70..e04380f12acafd7b0f6d577b8ad868db8c2a55a7 100644 (file)
@@ -18,7 +18,14 @@ mod a {
     }
 }
 
+fn f() {
+    impl a::Foo {
+        fn bar(&self) {} // This should be visible outside `f`
+    }
+}
+
 fn main() {
     let s = a::Foo { x: 1 };
+    s.bar();
     s.foo();    //~ ERROR method `foo` is private
 }
index 9aab06ce14ee16b324075fcaed44fbedc7c63766..b9d632a8cf07e6ce33dcbcd4271e05758769b5d0 100644 (file)
@@ -198,9 +198,11 @@ mod aliases_pub {
     use self::m::PubTr as PrivUseAliasTr;
     type PrivAlias = m::Pub2;
     trait PrivTr {
+        type AssocAlias;
+    }
+    impl PrivTr for Priv {
         type AssocAlias = m::Pub3;
     }
-    impl PrivTr for Priv {}
 
     pub fn f1(arg: PrivUseAlias) {} // OK
 
@@ -245,9 +247,11 @@ mod aliases_priv {
     use self::PrivTr1 as PrivUseAliasTr;
     type PrivAlias = Priv2;
     trait PrivTr {
+        type AssocAlias;
+    }
+    impl PrivTr for Priv {
         type AssocAlias = Priv3;
     }
-    impl PrivTr for Priv {}
 
     pub trait Tr1: PrivUseAliasTr {} //~ WARN private trait in public interface
         //~^ WARNING hard error
index b839902c6832ab16eeeaa9c77ef57513348d8966..895d2450cfed6c570c5ca2f72ee7594fe4caa64b 100644 (file)
@@ -13,7 +13,7 @@
 pub fn main() {
     // Mixed types.
     let _ = 0u32..10i32;
-    //~^ ERROR start and end of range have incompatible types
+    //~^ ERROR mismatched types
 
     // Bool => does not implement iterator.
     for i in false..true {}
@@ -22,6 +22,6 @@ pub fn main() {
     // Unsized type.
     let arr: &[_] = &[1, 2, 3];
     let range = *arr..;
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
-    //~| ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `[_]: std::marker::Sized` is not satisfied
+    //~| ERROR `[_]: std::marker::Sized` is not satisfied
 }
index c9053328572bcc3b07800255081d6f94ceabcb73..94967693ecf912d65d9c9db1b00b83bf83202d4c 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
 // file at the top-level directory of this distribution and at
 // http://rust-lang.org/COPYRIGHT.
 //
 
 pub fn main() {
     let r = {
-        &42..&42
-        //~^ ERROR borrowed value does not live long enough
-        //~^^ ERROR borrowed value does not live long enough
+        let a = 42;
+        let b = 42;
+        &a..&b
+        //~^ ERROR `a` does not live long enough
+        //~^^ ERROR `b` does not live long enough
     };
 }
diff --git a/src/test/compile-fail/range_inclusive_gate.rs b/src/test/compile-fail/range_inclusive_gate.rs
new file mode 100644 (file)
index 0000000..deac152
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Make sure that #![feature(inclusive_range)] is required.
+
+#![feature(inclusive_range_syntax)]
+// #![feature(inclusive_range)]
+
+pub fn main() {
+    let _: std::ops::RangeInclusive<_> = { use std::intrinsics; 1 } ... { use std::intrinsics; 2 };
+    //~^ ERROR use of unstable library feature 'inclusive_range'
+    //~^^ ERROR core_intrinsics
+    //~^^^ ERROR core_intrinsics
+    //~^^^^ WARN unused_imports
+    //~^^^^^ WARN unused_imports
+}
+
+
index 368269999a296c0008f457a5f7c37f12b63d9e63..226a6d57ddbf039f7cfd982719a8b1d11fcb228a 100644 (file)
@@ -43,4 +43,15 @@ fn main() {
     is_send::<A>();
     //~^ ERROR overflow evaluating
     //~| NOTE consider adding a `#![recursion_limit="20"]` attribute to your crate
+    //~| NOTE required because it appears within the type `A`
+    //~| NOTE required because it appears within the type `B`
+    //~| NOTE required because it appears within the type `C`
+    //~| NOTE required because it appears within the type `D`
+    //~| NOTE required because it appears within the type `E`
+    //~| NOTE required because it appears within the type `F`
+    //~| NOTE required because it appears within the type `G`
+    //~| NOTE required because it appears within the type `H`
+    //~| NOTE required because it appears within the type `I`
+    //~| NOTE required because it appears within the type `J`
+    //~| NOTE required by `is_send`
 }
index 815f75266322354be33e7e7d0742778020d17d98..4625669d5ecfe6b15b7a5ebe37c3288d20bdd4e3 100644 (file)
@@ -14,12 +14,14 @@ fn main() {
     //~^ HELP use a `ref` binding as shown
     //~| SUGGESTION let ref y = x;
     x; //~ ERROR use of moved value
+    //~^ HELP run `rustc --explain E0382` to see a detailed explanation
 
     let x = vec![1];
     let mut y = x;
     //~^ HELP use a `ref` binding as shown
     //~| SUGGESTION let ref mut y = x;
     x; //~ ERROR use of moved value
+    //~^ HELP run `rustc --explain E0382` to see a detailed explanation
 
     let x = (Some(vec![1]), ());
 
@@ -30,4 +32,5 @@ fn main() {
         _ => {},
     }
     x; //~ ERROR use of partially moved value
+    //~^ HELP run `rustc --explain E0382` to see a detailed explanation
 }
index 9cf0d252c2d55ce2d7af9471ef6d0b3f1c09b8e2..7cac3f41d546d712b490f3e090e41dd3adef2134 100644 (file)
@@ -24,7 +24,7 @@ struct Struct<T>(T);
 fn is_reflect<T:Reflect>() { }
 
 fn a<T>() {
-    is_reflect::<Box<Get<Output=T>>>(); //~ ERROR not implemented
+    is_reflect::<Box<Get<Output=T>>>(); //~ ERROR E0277
 }
 
 fn ok_a<T: Reflect>() {
index 9f074667feb3dc67a80d60340c6556728fb06ec0..476b498ae6492157b8b270ffe9158613e9317413 100644 (file)
@@ -23,7 +23,7 @@ struct Struct<T>(T);
 fn is_reflect<T:Reflect>() { }
 
 fn a<T>() {
-    is_reflect::<T>(); //~ ERROR not implemented
+    is_reflect::<T>(); //~ ERROR E0277
 }
 
 fn ok_a<T: Reflect>() {
@@ -31,7 +31,7 @@ fn ok_a<T: Reflect>() {
 }
 
 fn b<T>() {
-    is_reflect::<Box<Get<T>>>(); //~ ERROR not implemented
+    is_reflect::<Box<Get<T>>>(); //~ ERROR E0277
 }
 
 fn ok_b<T: Reflect>() {
@@ -39,7 +39,7 @@ fn ok_b<T: Reflect>() {
 }
 
 fn c<T>() {
-    is_reflect::<Box<Get<Struct<T>>>>(); //~ ERROR not implemented
+    is_reflect::<Box<Get<Struct<T>>>>(); //~ ERROR E0277
 }
 
 fn main() {
index 701aa5b40bc0a6314b50e04b28a6975c9a95280a..fdd569e2c1b3fe9bac4540d766b8e7f196b0c554 100644 (file)
@@ -22,7 +22,7 @@ struct Struct<T>(T);
 fn is_reflect<T:Reflect>() { }
 
 fn c<T>() {
-    is_reflect::<Struct<T>>(); //~ ERROR not implemented
+    is_reflect::<Struct<T>>(); //~ ERROR E0277
 }
 
 fn ok_c<T: Reflect>() {
@@ -30,7 +30,7 @@ fn ok_c<T: Reflect>() {
 }
 
 fn d<T>() {
-    is_reflect::<(i32, T)>(); //~ ERROR not implemented
+    is_reflect::<(i32, T)>(); //~ ERROR E0277
 }
 
 fn main() {
index ee05ba676ac7048d76425175a99a16688f82e094..6364db1f4b49c2e33bf095020ab237b3257144c9 100644 (file)
@@ -15,7 +15,7 @@ fn a<'a, 'b>(x: &mut &'a isize, y: &mut &'b isize) where 'b: 'a {
 
 fn b<'a, 'b>(x: &mut &'a isize, y: &mut &'b isize) {
     // Illegal now because there is no `'b:'a` declaration.
-    *x = *y; //~ ERROR cannot infer
+    *x = *y; //~ ERROR E0312
 }
 
 fn c<'a,'b>(x: &mut &'a isize, y: &mut &'b isize) {
index 30e6a4e12773f598db7db6beffaade00a676bca5..154135eba38baf7bd4cbf6b175429a7824c66c9d 100644 (file)
@@ -16,8 +16,8 @@ fn a<'a, 'b, 'c>(x: &mut &'a isize, y: &mut &'b isize, z: &mut &'c isize) where
 
 fn b<'a, 'b, 'c>(x: &mut &'a isize, y: &mut &'b isize, z: &mut &'c isize) {
     // Illegal now because there is no `'b:'a` declaration.
-    *x = *y; //~ ERROR cannot infer
-    *z = *y; //~ ERROR cannot infer
+    *x = *y; //~ ERROR E0312
+    *z = *y; //~ ERROR E0312
 }
 
 fn c<'a,'b, 'c>(x: &mut &'a isize, y: &mut &'b isize, z: &mut &'c isize) {
index 4a3ca01c8496dce20b17961504d0bc90b251f767..8cc35272282c41dbf4b5744e6d7604d59b6c91f9 100644 (file)
@@ -28,8 +28,7 @@ impl<'a> GetRef<'a> for Box<'a> {
 impl<'a> Box<'a> {
     fn or<'b,G:GetRef<'b>>(&self, g2: G) -> &'a isize {
         g2.get()
-        //~^ ERROR cannot infer an appropriate lifetime for automatic coercion due to
-        //~| ERROR mismatched types
+        //~^ ERROR mismatched types
         //~| expected `&'a isize`
         //~| found `&'b isize`
         //~| lifetime mismatch
index 57c8e3f1170aa74d5268761fdeb600db0764501e..1fc3b4b3c6a620279653f39d68b0143be1961817 100644 (file)
@@ -27,11 +27,7 @@ impl<'a,T:Clone> GetRef<'a,T> for Box<'a,T> {
 
 fn get<'a,'b,G:GetRef<'a, isize>>(g1: G, b: &'b isize) -> &'b isize {
     g1.get()
-    //~^ ERROR cannot infer an appropriate lifetime for automatic coercion due to
-    //~| ERROR mismatched types
-    //~| expected `&'b isize`
-    //~| found `&'a isize`
-    //~| lifetime mismatch
+    //~^ ERROR mismatched types
 }
 
 fn main() {
index 323d5360029c581b44ddb3734bb98f2ce022df82..586a8a183a4e6317bb34d1bde8c4baff8c91f464 100644 (file)
@@ -11,7 +11,7 @@
 mod argparse {
     pub struct Flag<'a> {
         name: &'a str,
-        desc: &'a str,
+        pub desc: &'a str,
         max_count: usize,
         value: usize
     }
@@ -22,9 +22,9 @@ mod argparse {
 
     impl<'a> Flag<'a> {
         pub fn set_desc(self, s: &str) -> Flag<'a> {
-            Flag {
+            Flag { //~ ERROR cannot infer
                 name: self.name,
-                desc: s, //~ ERROR cannot infer an appropriate lifetime for automatic coercion due t
+                desc: s,
                 max_count: self.max_count,
                 value: self.value
             }
index 43940d499d21003603b03b0ba36f62740e43837a..89254516ac600554386c34b54394a76c3e80cbdf 100644 (file)
@@ -15,13 +15,13 @@ fn a<'a, 'b:'a>(x: &mut &'a isize, y: &mut &'b isize) {
 
 fn b<'a, 'b>(x: &mut &'a isize, y: &mut &'b isize) {
     // Illegal now because there is no `'b:'a` declaration.
-    *x = *y; //~ ERROR cannot infer
+    *x = *y; //~ ERROR E0312
 }
 
 fn c<'a,'b>(x: &mut &'a isize, y: &mut &'b isize) {
     // Here we try to call `foo` but do not know that `'a` and `'b` are
     // related as required.
-    a(x, y); //~ ERROR cannot infer
+    a(x, y); //~ ERROR E0495
 }
 
 fn d() {
index f114a8bc7ce668bfe6a35ac4e3141cf41709bed1..5ef2a701a6005f37c2c54cfc208ae856bef4fcec 100644 (file)
@@ -14,16 +14,16 @@ fn ignore<T>(t: T) {}
 
 fn nested<'x>(x: &'x isize) {
     let y = 3;
-    let mut ay = &y;
+    let mut ay = &y; //~ ERROR E0495
 
     ignore::<Box<for<'z> FnMut(&'z isize)>>(Box::new(|z| {
-        ay = x; //~ ERROR cannot infer
+        ay = x;
         ay = &y;
         ay = z;
     }));
 
     ignore::< Box<for<'z> FnMut(&'z isize) -> &'z isize>>(Box::new(|z| {
-        if false { return x; }  //~ ERROR cannot infer an appropriate lifetime for automatic
+        if false { return x; } //~ ERROR E0312
         if false { return ay; }
         return z;
     }));
index 297b6a866da3a0b30b41ab5de2534fd6d96d5d9a..de695e72d07e4bab6bb979e71d515313f4764bc8 100644 (file)
@@ -13,7 +13,7 @@ fn static_id<'a,'b>(t: &'a ()) -> &'static ()
 fn static_id_indirect<'a,'b>(t: &'a ()) -> &'static ()
     where 'a: 'b, 'b: 'static { t }
 fn static_id_wrong_way<'a>(t: &'a ()) -> &'static () where 'static: 'a {
-    t //~ ERROR cannot infer an appropriate lifetime
+    t //~ ERROR E0312
 }
 
 fn error(u: &(), v: &()) {
index b12e26fddf6d20a86175441a6aaab8bb8428040f..adc870224037802c1e7c10c1eeb93bc79f64dc42 100644 (file)
@@ -47,7 +47,7 @@ impl              Drop for P<i8>          { fn drop(&mut self) { } } // REJECT
 //~^ ERROR Implementations of Drop cannot be specialized
 
 impl<Adds_bnd:Bound> Drop for Q<Adds_bnd> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR The requirement `Adds_bnd : Bound` is added only by the Drop impl.
+//~^ ERROR The requirement `Adds_bnd: Bound` is added only by the Drop impl.
 
 impl<'rbnd,Adds_rbnd:'rbnd> Drop for R<Adds_rbnd> { fn drop(&mut self) { } } // REJECT
 //~^ ERROR The requirement `Adds_rbnd : 'rbnd` is added only by the Drop impl.
index c8457adb8d6e6e4c4fd1b7489f99214714beca98..88441594a7e9345f0f460fb994d35f5538205f0f 100644 (file)
@@ -25,5 +25,5 @@ impl Drop for Foo {
 fn main() {
     let a = Foo { x: 3 };
     let _ = [ a; 5 ];
-    //~^ ERROR the trait `core::marker::Copy` is not implemented for the type `Foo`
+    //~^ ERROR `Foo: std::marker::Copy` is not satisfied
 }
index 9aa61418d6d047d60d2270c5d09de01525a8ab78..10b722946a8a89f847c5af45081ee29e40f9470c 100644 (file)
@@ -43,13 +43,17 @@ fn main() {
     let f = [0; -4_isize];
     //~^ ERROR mismatched types
     //~| expected `usize`
-    //~| found `isize`
-    //~| ERROR expected positive integer for repeat count, found negative integer [E0306]
+    //~| found `isize` [E0308]
+    //~| ERROR mismatched types:
+    //~| expected `usize`,
+    //~| found `isize` [E0307]
     let f = [0_usize; -1_isize];
     //~^ ERROR mismatched types
     //~| expected `usize`
-    //~| found `isize`
-    //~| ERROR expected positive integer for repeat count, found negative integer [E0306]
+    //~| found `isize` [E0308]
+    //~| ERROR mismatched types
+    //~| expected `usize`
+    //~| found `isize` [E0307]
     struct G {
         g: (),
     }
index af3ee7f353731c13141ce7b07fdcad5cf5d69d84..c05d0cc1b0e51ca7055d416ed18f60403077fad2 100644 (file)
@@ -9,6 +9,6 @@
 // except according to those terms.
 
 extern crate std;
-//~^ ERROR an external crate named `std` has already been imported
+//~^ ERROR an extern crate named `std` has already been imported
 
 fn main(){}
index 0c601a81178b3ad28fcc71a301491f016ead71b6..6cbc728c03edcad6edbc800d26a91881eba3eff5 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use std::slice as std; //~ ERROR import `std` conflicts with imported crate
+use std::slice as std; //~ ERROR an extern crate named `std` has already been imported
 
 fn main() {
 }
index 07f80cf03d1dadf086ce798de5ba756d336bd5bf..b0954ee1571eee7d55f5514b7797867667241eb8 100644 (file)
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 fn std() {}
-mod std {}    //~ ERROR the name `std` conflicts with an external crate
+mod std {}    //~ ERROR an extern crate named `std` has already been imported
 
 fn main() {
 }
index 1edf815ecaeca402f5b6be30fef47213f07d8dec..dbd1ecf44fd7312d7a776e2a0ba39e1cd5513bd5 100644 (file)
@@ -9,9 +9,10 @@
 // except according to those terms.
 
 use std::mem::transmute;
-//~^ ERROR import `transmute` conflicts with value in this module
+//~^ NOTE previous import of `transmute` here
 
 fn transmute() {}
+//~^ ERROR a value named `transmute` has already been imported in this module
 
 fn main() {
 }
index 45b0314d2c01da88190a844571b87fecb337bb44..aa7e47e223ff5a9ec04b268114a36f74bfcdf7af 100644 (file)
@@ -9,9 +9,9 @@
 // except according to those terms.
 
 use std::slice::Iter;
-//~^ ERROR import `Iter` conflicts with type in this module
 
 struct Iter;
+//~^ ERROR a type named `Iter` has already been imported in this module
 
 fn main() {
 }
index 92134ecde91aa56c9ba3bccb0281128e5a98f6f6..30ff1ed0e26f7a2a637a253e7512c7ac73a62048 100644 (file)
@@ -18,7 +18,6 @@ trait TraitA<A> {
             //~^ ERROR parameter `B` is never used
             Variance(A)
                 //~^ ERROR can't use type parameters from outer function
-                //~^^ ERROR type name `A` is undefined or not in scope
         }
     }
 }
@@ -27,8 +26,7 @@ trait TraitB<A> {
     fn outer(self) {
         struct Foo<B>(A);
                 //~^ ERROR can't use type parameters from outer function
-                //~^^ ERROR type name `A` is undefined or not in scope
-                //~^^^ ERROR parameter `B` is never used
+                //~^^ ERROR parameter `B` is never used
     }
 }
 
@@ -36,8 +34,7 @@ trait TraitC<A> {
     fn outer(self) {
         struct Foo<B> { a: A }
                 //~^ ERROR can't use type parameters from outer function
-                //~^^ ERROR type name `A` is undefined or not in scope
-                //~^^^ ERROR parameter `B` is never used
+                //~^^ ERROR parameter `B` is never used
     }
 }
 
@@ -45,7 +42,6 @@ trait TraitD<A> {
     fn outer(self) {
         fn foo<B>(a: A) { }
                 //~^ ERROR can't use type parameters from outer function
-                //~^^ ERROR type name `A` is undefined or not in scope
     }
 }
 
diff --git a/src/test/compile-fail/rfc1445/feature-gate.rs b/src/test/compile-fail/rfc1445/feature-gate.rs
new file mode 100644 (file)
index 0000000..1f2d781
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that structural match is only permitted with a feature gate,
+// and that if a feature gate is supplied, it permits the type to be
+// used in a match.
+
+// revisions: with_gate no_gate
+
+#![allow(dead_code)]
+#![deny(future_incompatible)]
+#![feature(rustc_attrs)]
+#![cfg_attr(with_gate, feature(structural_match))]
+
+#[structural_match] //[no_gate]~ ERROR semantics of constant patterns is not yet settled
+struct Foo {
+    x: u32
+}
+
+const FOO: Foo = Foo { x: 0 };
+
+#[rustc_error]
+fn main() { //[with_gate]~ ERROR compilation successful
+    let y = Foo { x: 1 };
+    match y {
+        FOO => { }
+        _ => { }
+    }
+}
diff --git a/src/test/compile-fail/rfc1445/match-forbidden-without-eq.rs b/src/test/compile-fail/rfc1445/match-forbidden-without-eq.rs
new file mode 100644 (file)
index 0000000..c573e3e
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+#![deny(future_incompatible)]
+
+use std::f32;
+
+#[derive(PartialEq)]
+struct Foo {
+    x: u32
+}
+
+const FOO: Foo = Foo { x: 0 };
+
+fn main() {
+    let y = Foo { x: 1 };
+    match y {
+        FOO => { }
+        //~^ ERROR must be annotated with `#[derive(PartialEq, Eq)]`
+        //~| WARNING will become a hard error
+        _ => { }
+    }
+
+    let x = 0.0;
+    match x {
+        f32::INFINITY => { }
+        //~^ ERROR floating point constants cannot be used in patterns
+        //~| WARNING will become a hard error
+        _ => { }
+    }
+}
diff --git a/src/test/compile-fail/rfc1445/match-requires-both-partialeq-and-eq.rs b/src/test/compile-fail/rfc1445/match-requires-both-partialeq-and-eq.rs
new file mode 100644 (file)
index 0000000..029df08
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+#![deny(future_incompatible)]
+
+#[derive(Eq)]
+struct Foo {
+    x: u32
+}
+
+impl PartialEq for Foo {
+    fn eq(&self, _: &Foo) -> bool {
+        false // ha ha sucker!
+    }
+}
+
+const FOO: Foo = Foo { x: 0 };
+
+fn main() {
+    let y = Foo { x: 1 };
+    match y {
+        FOO => { }
+        //~^ ERROR must be annotated with `#[derive(PartialEq, Eq)]`
+        //~| WARNING will become a hard error
+        _ => { }
+    }
+}
diff --git a/src/test/compile-fail/shadowed-trait-methods.rs b/src/test/compile-fail/shadowed-trait-methods.rs
new file mode 100644 (file)
index 0000000..3146017
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that methods from shadowed traits cannot be used
+
+mod foo {
+    pub trait T { fn f(&self) {} }
+    impl T for () {}
+}
+
+mod bar { pub use foo::T; }
+
+fn main() {
+    pub use bar::*;
+    struct T;
+    ().f() //~ ERROR no method
+}
index 35c368f4cbedb5bb399156b590bbc7ad1f43dfdd..33954d23b19cbb0bf59bfa200be86d019a4a13b2 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(repr_simd, platform_intrinsics)]
+#![feature(repr_simd, platform_intrinsics, rustc_attrs)]
 #![allow(non_camel_case_types)]
 #[repr(simd)]
 #[derive(Copy, Clone)]
@@ -34,6 +34,7 @@ extern "platform-intrinsic" {
     fn simd_xor<T>(x: T, y: T) -> T;
 }
 
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
 fn main() {
     let x = i32x4(0, 0, 0, 0);
     let y = u32x4(0, 0, 0, 0);
index 4999b790b130a4e79cb9b9e4ea63b001f0ef93d8..cb3bed7209e2d6596eb219221254d46c695e88fb 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(repr_simd, platform_intrinsics)]
+#![feature(repr_simd, platform_intrinsics, rustc_attrs)]
 
 #[repr(simd)]
 #[derive(Copy, Clone)]
@@ -35,6 +35,7 @@ extern "platform-intrinsic" {
     fn simd_cast<T, U>(x: T) -> U;
 }
 
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
 fn main() {
     let x = i32x4(0, 0, 0, 0);
 
index 617b03a87117b75b3ff97d26c88c1f1b9b6ca2e0..0e7b2bd490470832fe7e61a6a786d7cf172ed20a 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(repr_simd, platform_intrinsics)]
+#![feature(repr_simd, platform_intrinsics, rustc_attrs)]
 
 #[repr(simd)]
 #[derive(Copy, Clone)]
@@ -29,6 +29,7 @@ extern "platform-intrinsic" {
     fn simd_ge<T, U>(x: T, y: T) -> U;
 }
 
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
 fn main() {
     let x = i32x4(0, 0, 0, 0);
 
index b0198c411d5679ec1e06727969e619c9fe0fad78..1f4cc72ffe717c2031de22da23096302aec09426 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(repr_simd, platform_intrinsics)]
+#![feature(repr_simd, platform_intrinsics, rustc_attrs)]
 
 #[repr(simd)]
 #[derive(Copy, Clone)]
@@ -56,6 +56,7 @@ extern "platform-intrinsic" {
     fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U;
 }
 
+#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls.
 fn main() {
     let x = i32x4(0, 0, 0, 0);
 
index bb1ab2eafb355efc4d0871172a0e544b41a471ae..3d7c4868e96fe3bff3e62da72e0752ecc45c519a 100644 (file)
@@ -20,11 +20,11 @@ struct Baz { q: Option<Foo> }
 
 struct Foo { q: Option<Baz> }
 //~^ ERROR recursive type `Foo` has infinite size
-//~| type `Foo` is embedded within `core::option::Option<Foo>`...
-//~| ...which in turn is embedded within `core::option::Option<Foo>`...
-//~| ...which in turn is embedded within `Baz`...
-//~| ...which in turn is embedded within `core::option::Option<Baz>`...
-//~| ...which in turn is embedded within `Foo`, completing the cycle.
+//~| NOTE type `Foo` is embedded within `std::option::Option<Foo>`...
+//~| NOTE ...which in turn is embedded within `std::option::Option<Foo>`...
+//~| NOTE ...which in turn is embedded within `Baz`...
+//~| NOTE ...which in turn is embedded within `std::option::Option<Baz>`...
+//~| NOTE ...which in turn is embedded within `Foo`, completing the cycle.
 
 impl Foo { fn bar(&self) {} }
 
diff --git a/src/test/compile-fail/specialization/README.md b/src/test/compile-fail/specialization/README.md
new file mode 100644 (file)
index 0000000..f2b4bf9
--- /dev/null
@@ -0,0 +1,21 @@
+This directory contains the test for incorrect usage of specialization that
+should lead to compile failure. Those tests break down into a few categories:
+
+- Feature gating
+  - [On use of the `default` keyword](specialization-feature-gate-default.rs)
+  - [On overlapping impls](specialization-feature-gate-overlap.rs)
+
+- Overlap checking with specialization enabled
+  - [Basic overlap scenarios](specialization-overlap.rs)
+    - Includes purely structural overlap
+    - Includes purely trait-based overlap
+    - Includes mix
+  - [Overlap with differing polarity](specialization-overlap-negative.rs)
+
+- [Attempt to specialize without using `default`](specialization-no-default.rs)
+
+- [Attempt to change impl polarity in a specialization](specialization-polarity.rs)
+
+- Attempt to rely on projection of a `default` type
+  - [Rely on it externally in both generic and monomorphic contexts](specialization-default-projection.rs)
+  - [Rely on it both within an impl and outside it](specialization-default-types.rs)
diff --git a/src/test/compile-fail/specialization/specialization-default-projection.rs b/src/test/compile-fail/specialization/specialization-default-projection.rs
new file mode 100644 (file)
index 0000000..96cbd7a
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+// Make sure we can't project defaulted associated types
+
+trait Foo {
+    type Assoc;
+}
+
+impl<T> Foo for T {
+    default type Assoc = ();
+}
+
+impl Foo for u8 {
+    type Assoc = String;
+}
+
+fn generic<T>() -> <T as Foo>::Assoc {
+    // `T` could be some downstream crate type that specializes (or,
+    // for that matter, `u8`).
+
+    () //~ ERROR mismatched types
+}
+
+fn monomorphic() -> () {
+    // Even though we know that `()` is not specialized in a
+    // downstream crate, typeck refuses to project here.
+
+    generic::<()>() //~ ERROR mismatched types
+}
+
+fn main() {
+    // No error here, we CAN project from `u8`, as there is no `default`
+    // in that impl.
+    let s: String = generic::<u8>();
+    println!("{}", s); // bad news if this all compiles
+}
diff --git a/src/test/compile-fail/specialization/specialization-default-types.rs b/src/test/compile-fail/specialization/specialization-default-types.rs
new file mode 100644 (file)
index 0000000..18acecb
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// It should not be possible to use the concrete value of a defaulted
+// associated type in the impl defining it -- otherwise, what happens
+// if it's overridden?
+
+#![feature(specialization)]
+
+trait Example {
+    type Output;
+    fn generate(self) -> Self::Output;
+}
+
+impl<T> Example for T {
+    default type Output = Box<T>;
+    default fn generate(self) -> Self::Output {
+        Box::new(self) //~ ERROR mismatched types
+    }
+}
+
+impl Example for bool {
+    type Output = bool;
+    fn generate(self) -> bool { self }
+}
+
+fn trouble<T>(t: T) -> Box<T> {
+    Example::generate(t) //~ ERROR mismatched types
+}
+
+fn weaponize() -> bool {
+    let b: Box<bool> = trouble(true);
+    *b
+}
+
+fn main() {
+    weaponize();
+}
diff --git a/src/test/compile-fail/specialization/specialization-feature-gate-default.rs b/src/test/compile-fail/specialization/specialization-feature-gate-default.rs
new file mode 100644 (file)
index 0000000..e7c194c
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that specialization must be ungated to use the `default` keyword
+
+trait Foo {
+    fn foo(&self);
+}
+
+impl<T> Foo for T {
+    default fn foo(&self) {} //~ ERROR specialization is unstable
+}
+
+fn main() {}
diff --git a/src/test/compile-fail/specialization/specialization-feature-gate-overlap.rs b/src/test/compile-fail/specialization/specialization-feature-gate-overlap.rs
new file mode 100644 (file)
index 0000000..d11ab56
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that writing an overlapping impl is not allow unless specialization is ungated.
+
+trait Foo {
+    fn foo(&self);
+}
+
+impl<T> Foo for T {
+    fn foo(&self) {}
+}
+
+impl Foo for u8 { //~ ERROR E0119
+    fn foo(&self) {}
+}
+
+fn main() {}
diff --git a/src/test/compile-fail/specialization/specialization-no-default.rs b/src/test/compile-fail/specialization/specialization-no-default.rs
new file mode 100644 (file)
index 0000000..9615616
--- /dev/null
@@ -0,0 +1,95 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+// Check a number of scenarios in which one impl tries to override another,
+// without correctly using `default`.
+
+////////////////////////////////////////////////////////////////////////////////
+// Test 1: one layer of specialization, multiple methods, missing `default`
+////////////////////////////////////////////////////////////////////////////////
+
+trait Foo {
+    fn foo(&self);
+    fn bar(&self);
+}
+
+impl<T> Foo for T {
+    fn foo(&self) {}
+    fn bar(&self) {}
+}
+
+impl Foo for u8 {}
+impl Foo for u16 {
+    fn foo(&self) {} //~ ERROR E0520
+}
+impl Foo for u32 {
+    fn bar(&self) {} //~ ERROR E0520
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Test 2: one layer of specialization, missing `default` on associated type
+////////////////////////////////////////////////////////////////////////////////
+
+trait Bar {
+    type T;
+}
+
+impl<T> Bar for T {
+    type T = u8;
+}
+
+impl Bar for u8 {
+    type T = (); //~ ERROR E0520
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Test 3a: multiple layers of specialization, missing interior `default`
+////////////////////////////////////////////////////////////////////////////////
+
+trait Baz {
+    fn baz(&self);
+}
+
+impl<T> Baz for T {
+    default fn baz(&self) {}
+}
+
+impl<T: Clone> Baz for T {
+    fn baz(&self) {}
+}
+
+impl Baz for i32 {
+    fn baz(&self) {} //~ ERROR E0520
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Test 3b: multiple layers of specialization, missing interior `default`,
+// redundant `default` in bottom layer.
+////////////////////////////////////////////////////////////////////////////////
+
+trait Redundant {
+    fn redundant(&self);
+}
+
+impl<T> Redundant for T {
+    default fn redundant(&self) {}
+}
+
+impl<T: Clone> Redundant for T {
+    fn redundant(&self) {}
+}
+
+impl Redundant for i32 {
+    default fn redundant(&self) {} //~ ERROR E0520
+}
+
+fn main() {}
diff --git a/src/test/compile-fail/specialization/specialization-overlap-negative.rs b/src/test/compile-fail/specialization/specialization-overlap-negative.rs
new file mode 100644 (file)
index 0000000..62a6d8d
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(optin_builtin_traits)]
+#![feature(specialization)]
+
+trait MyTrait {}
+
+struct TestType<T>(::std::marker::PhantomData<T>);
+
+unsafe impl<T: Clone> Send for TestType<T> {}
+impl<T: MyTrait> !Send for TestType<T> {} //~ ERROR E0119
+
+fn main() {}
diff --git a/src/test/compile-fail/specialization/specialization-overlap.rs b/src/test/compile-fail/specialization/specialization-overlap.rs
new file mode 100644 (file)
index 0000000..f579817
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+trait Foo {}
+impl<T: Clone> Foo for T {}
+impl<T> Foo for Vec<T> {} //~ ERROR E0119
+
+trait Bar {}
+impl<T> Bar for (T, u8) {}
+impl<T> Bar for (u8, T) {} //~ ERROR E0119
+
+trait Baz<U> {}
+impl<T> Baz<T> for u8 {}
+impl<T> Baz<u8> for T {} //~ ERROR E0119
+
+trait Qux {}
+impl<T: Clone> Qux for T {}
+impl<T: Eq> Qux for T {} //~ ERROR E0119
+
+fn main() {}
diff --git a/src/test/compile-fail/specialization/specialization-polarity.rs b/src/test/compile-fail/specialization/specialization-polarity.rs
new file mode 100755 (executable)
index 0000000..27a3e31
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Make sure specialization cannot change impl polarity
+
+#![feature(optin_builtin_traits)]
+#![feature(specialization)]
+
+trait Foo {}
+
+impl Foo for .. {}
+
+impl<T> Foo for T {}
+impl !Foo for u8 {} //~ ERROR E0119
+
+trait Bar {}
+
+impl Bar for .. {}
+
+impl<T> !Bar for T {}
+impl Bar for u8 {} //~ ERROR E0119
+
+fn main() {}
index 6e48ae20d09f3ab1ebe47f1f91fa81b89bd1db5b..b972a09b5c4908fa9e200d0888fbbb1f64fe33b3 100644 (file)
@@ -10,5 +10,5 @@
 
 pub fn main() {
     let s: &str = "hello";
-    let c: u8 = s[4]; //~ ERROR the trait `core::ops::Index<_>` is not implemented
+    let c: u8 = s[4]; //~ ERROR `str: std::ops::Index<_>` is not satisfied
 }
index ec6a14778a463f22f266b9f6aa66654b52935d5f..8851e5e07973cd597d9cfcd3a390976c58ae9651 100644 (file)
@@ -12,11 +12,11 @@ fn bot<T>() -> T { loop {} }
 
 fn mutate(s: &mut str) {
     s[1..2] = bot();
-    //~^ ERROR `core::marker::Sized` is not implemented for the type `str`
-    //~| ERROR `core::marker::Sized` is not implemented for the type `str`
+    //~^ ERROR `str: std::marker::Sized` is not satisfied
+    //~| ERROR `str: std::marker::Sized` is not satisfied
     s[1usize] = bot();
-    //~^ ERROR `core::ops::Index<usize>` is not implemented for the type `str`
-    //~| ERROR `core::ops::IndexMut<usize>` is not implemented for the type `str`
+    //~^ ERROR `str: std::ops::Index<usize>` is not satisfied
+    //~| ERROR `str: std::ops::IndexMut<usize>` is not satisfied
 }
 
 pub fn main() {}
index 2ff48b73e294c415bcb02f51f05046d8dbb02f95..f487ef62aa435d22887b120fc595ceca5fb80436 100644 (file)
@@ -17,7 +17,7 @@ struct A {
 }
 
 mod inner {
-    struct A {
+    pub struct A {
         a: isize,
         pub b: isize,
     }
@@ -25,12 +25,10 @@ mod inner {
         pub a: isize,
         b: isize,
     }
+    pub struct Z(pub isize, isize);
 }
 
-fn test(a: A, b: inner::A, c: inner::B, d: xc::A, e: xc::B) {
-    //~^ ERROR: struct `A` is private
-    //~^^ ERROR: struct `A` is private
-
+fn test(a: A, b: inner::A, c: inner::B, d: xc::A, e: xc::B, z: inner::Z) {
     a.a;
     b.a; //~ ERROR: field `a` of struct `inner::A` is private
     b.b;
@@ -42,6 +40,9 @@ fn test(a: A, b: inner::A, c: inner::B, d: xc::A, e: xc::B) {
 
     e.a;
     e.b; //~ ERROR: field `b` of struct `xc::B` is private
+
+    z.0;
+    z.1; //~ ERROR: field `1` of struct `inner::Z` is private
 }
 
 fn main() {}
index b8be7d0cdc20a59aa6f03d8e2d2beaba82c4a41f..8507acd26cebec14456e6ff1ca1b81e4a115e961 100644 (file)
@@ -13,7 +13,7 @@ extern crate struct_variant_privacy;
 
 fn f(b: struct_variant_privacy::Bar) { //~ ERROR enum `Bar` is private
     match b {
-        struct_variant_privacy::Bar::Baz { a: _a } => {} //~ ERROR variant `Baz` is private
+        struct_variant_privacy::Bar::Baz { a: _a } => {} //~ ERROR enum `Bar` is private
     }
 }
 
index f36862364e734c8e72aedbd9674d0ec705573746..7de4ca62555a24ad6e66b702ac27789fc193ebef 100644 (file)
@@ -15,8 +15,7 @@ mod foo {
 
 fn f(b: foo::Bar) { //~ ERROR enum `Bar` is private
     match b {
-        foo::Bar::Baz { a: _a } => {} //~ ERROR variant `Baz` is inaccessible
-        // ^~ ERROR enum `Bar` is private
+        foo::Bar::Baz { a: _a } => {} //~ ERROR enum `Bar` is private
     }
 }
 
diff --git a/src/test/compile-fail/substs-ppaux.rs b/src/test/compile-fail/substs-ppaux.rs
new file mode 100644 (file)
index 0000000..851e31b
--- /dev/null
@@ -0,0 +1,61 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// revisions: verbose normal
+//
+//[verbose] compile-flags: -Z verbose
+
+trait Foo<'b, 'c, S=u32> {
+    fn bar<'a, T>() where T: 'a {}
+    fn baz() {}
+}
+
+impl<'a,'b,T,S> Foo<'a, 'b, S> for T {}
+
+fn main() {}
+
+fn foo<'z>() where &'z (): Sized {
+    let x: () = <i8 as Foo<'static, 'static,  u8>>::bar::<'static, char>;
+    //[verbose]~^ ERROR mismatched types
+    //[verbose]~| expected `()`
+    //[verbose]~| found `fn() {<i8 as Foo<ReStatic, ReStatic, u8>>::bar::<ReStatic, char>}`
+    //[normal]~^^^^ ERROR mismatched types
+    //[normal]~| expected `()`
+    //[normal]~| found  `fn() {<i8 as Foo<'static, 'static, u8>>::bar::<'static, char>}`
+
+
+    let x: () = <i8 as Foo<'static, 'static,  u32>>::bar::<'static, char>;
+    //[verbose]~^ ERROR mismatched types
+    //[verbose]~| expected `()`
+    //[verbose]~| found `fn() {<i8 as Foo<ReStatic, ReStatic, u32>>::bar::<ReStatic, char>}`
+    //[normal]~^^^^ ERROR mismatched types
+    //[normal]~| expected `()`
+    //[normal]~| found `fn() {<i8 as Foo<'static, 'static>>::bar::<'static, char>}`
+
+    let x: () = <i8 as Foo<'static, 'static,  u8>>::baz;
+    //[verbose]~^ ERROR mismatched types
+    //[verbose]~| expected `()`
+    //[verbose]~| found `fn() {<i8 as Foo<ReStatic, ReStatic, u8>>::baz}`
+    //[normal]~^^^^ ERROR mismatched types
+    //[normal]~| expected `()`
+    //[normal]~| found `fn() {<i8 as Foo<'static, 'static, u8>>::baz}`
+
+    let x: () = foo::<'static>;
+    //[verbose]~^ ERROR mismatched types
+    //[verbose]~| expected `()`
+    //[verbose]~| found `fn() {foo::<ReStatic>}`
+    //[normal]~^^^^ ERROR mismatched types
+    //[normal]~| expected `()`
+    //[normal]~| found `fn() {foo::<'static>}`
+
+    <str as Foo<u8>>::bar;
+    //[verbose]~^ ERROR `str: std::marker::Sized` is not satisfied
+    //[normal]~^^ ERROR `str: std::marker::Sized` is not satisfied
+}
index 1d04679fd11e799dd9ebb620163e378d939b02e6..8877377a6ec9676355a20ce148cf0f067f2bc9b5 100644 (file)
@@ -27,46 +27,54 @@ fn h1() -> i32 {
     a.I
         //~^ ERROR E0425
         //~| HELP To reference an item from the `a` module, use `a::I`
+        //~| HELP run `rustc --explain E0425` to see a detailed explanation
 }
 
 fn h2() -> i32 {
     a.g()
         //~^ ERROR E0425
         //~| HELP To call a function from the `a` module, use `a::g(..)`
+        //~| HELP run `rustc --explain E0425` to see a detailed explanation
 }
 
 fn h3() -> i32 {
     a.b.J
         //~^ ERROR E0425
         //~| HELP To reference an item from the `a` module, use `a::b`
+        //~| HELP run `rustc --explain E0425` to see a detailed explanation
 }
 
 fn h4() -> i32 {
     a::b.J
         //~^ ERROR E0425
         //~| HELP To reference an item from the `a::b` module, use `a::b::J`
+        //~| HELP run `rustc --explain E0425` to see a detailed explanation
 }
 
 fn h5() -> i32 {
     a.b.f()
         //~^ ERROR E0425
         //~| HELP To reference an item from the `a` module, use `a::b`
+        //~| HELP run `rustc --explain E0425` to see a detailed explanation
 }
 
 fn h6() -> i32 {
     a::b.f()
         //~^ ERROR E0425
         //~| HELP To call a function from the `a::b` module, use `a::b::f(..)`
+        //~| HELP run `rustc --explain E0425` to see a detailed explanation
 }
 
 fn h7() {
     a::b
         //~^ ERROR E0425
         //~| HELP Module `a::b` cannot be the value of an expression
+        //~| HELP run `rustc --explain E0425` to see a detailed explanation
 }
 
 fn h8() -> i32 {
     a::b()
         //~^ ERROR E0425
         //~| HELP No function corresponds to `a::b(..)`
+        //~| HELP run `rustc --explain E0425` to see a detailed explanation
 }
index 8bc8a7a60bdd369695685297fef357360ea48866..9c61f618e690f0fce57f9f812e7f22b3c5e42d3a 100644 (file)
@@ -25,6 +25,7 @@ fn main () {
         aa: 20, //~ ERROR structure `xc::B` has no field named `aa`
         //~^ HELP did you mean `a`?
         bb: 20, //~ ERROR structure `xc::B` has no field named `bb`
+        //~^ HELP did you mean `a`?
     };
     // local crate struct
     let l = A {
index 309b6773f604ef5a3a223b4a060d3f2b5b96cd7f..f59caef4631361f546e077efd37784b1362605d9 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use super::f; //~ ERROR unresolved import `super::f`
+use super::f; //~ ERROR unresolved import `super::f`. There are too many initial `super`s.
 
 fn main() {
 }
diff --git a/src/test/compile-fail/symbol-names/basic.rs b/src/test/compile-fail/symbol-names/basic.rs
new file mode 100644 (file)
index 0000000..0095774
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+
+#[rustc_symbol_name] //~ ERROR _ZN5basic4main
+#[rustc_item_path] //~ ERROR item-path(main)
+fn main() {
+}
diff --git a/src/test/compile-fail/symbol-names/impl1.rs b/src/test/compile-fail/symbol-names/impl1.rs
new file mode 100644 (file)
index 0000000..39bee26
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+#![allow(dead_code)]
+
+mod foo {
+    pub struct Foo { x: u32 }
+
+    impl Foo {
+        #[rustc_symbol_name] //~ ERROR _ZN5impl13foo3Foo3bar
+        #[rustc_item_path] //~ ERROR item-path(foo::Foo::bar)
+        fn bar() { }
+    }
+}
+
+mod bar {
+    use foo::Foo;
+
+    impl Foo {
+        #[rustc_symbol_name] //~ ERROR _ZN5impl13bar26_$LT$impl$u20$foo..Foo$GT$3baz
+        #[rustc_item_path] //~ ERROR item-path(bar::<impl foo::Foo>::baz)
+        fn baz() { }
+    }
+}
+
+fn main() {
+}
index 660c1fa9a88d89c00ab98b4326991e164cce4b7a..725234dfeab5600c3d8ba230866a9a73ececcc51 100644 (file)
@@ -22,7 +22,7 @@ fn main() {
     let x : char = last(y);
     //~^ ERROR mismatched types
     //~| expected `char`
-    //~| found `core::option::Option<_>`
+    //~| found `std::option::Option<_>`
     //~| expected char
-    //~| found enum `core::option::Option`
+    //~| found enum `std::option::Option`
 }
index dc3385f4bb92f38e7366800fa15f31dc6f1291ca..a11df776e06d3ba1e2dc6f0add3390d80a92da6b 100644 (file)
 
 // ensure that the ThreadRng isn't/doesn't become accidentally sendable.
 
-use std::rand;
+use std::rand; //~ ERROR: module `rand` is private
 
 fn test_send<S: Send>() {}
 
 pub fn main() {
     test_send::<rand::ThreadRng>();
-    //~^ ERROR `core::marker::Send` is not implemented
+    //~^ ERROR : std::marker::Send` is not satisfied
 }
diff --git a/src/test/compile-fail/token-error-correct-2.rs b/src/test/compile-fail/token-error-correct-2.rs
new file mode 100644 (file)
index 0000000..ab429ab
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that we do some basic error correcton in the tokeniser (and don't ICE).
+
+fn main() {
+    if foo { //~ NOTE: unclosed delimiter
+    //~^ ERROR: unresolved name `foo`
+    ) //~ ERROR: incorrect close delimiter: `)`
+}
diff --git a/src/test/compile-fail/token-error-correct-3.rs b/src/test/compile-fail/token-error-correct-3.rs
new file mode 100644 (file)
index 0000000..f42c8d0
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that we do some basic error correcton in the tokeniser (and don't spew
+// too many bogus errors).
+
+pub mod raw {
+    use std::{io, fs};
+    use std::path::Path;
+
+    pub fn ensure_dir_exists<P: AsRef<Path>, F: FnOnce(&Path)>(path: P,
+                                                               callback: F)
+                                                               -> io::Result<bool> {
+        if !is_directory(path.as_ref()) { //~ ERROR: unresolved name `is_directory`
+            callback(path.as_ref();  //~ NOTE: unclosed delimiter
+                     //~^ ERROR: expected one of
+            fs::create_dir_all(path.as_ref()).map(|()| true) //~ ERROR: mismatched types
+        } else { //~ ERROR: incorrect close delimiter: `}`
+            //~^ ERROR: expected one of
+            Ok(false);
+        }
+
+        panic!();
+    }
+}
+
+fn main() {}
diff --git a/src/test/compile-fail/token-error-correct.rs b/src/test/compile-fail/token-error-correct.rs
new file mode 100644 (file)
index 0000000..6c54acd
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that we do some basic error correcton in the tokeniser.
+
+fn main() {
+    foo(bar(; //~ NOTE: unclosed delimiter
+    //~^ NOTE: unclosed delimiter
+    //~^^ ERROR: unexpected token: `;`
+    //~^^^ ERROR: unresolved name `bar`
+    //~^^^^ ERROR: unresolved name `foo`
+} //~ ERROR: incorrect close delimiter: `}`
+//~^ ERROR: incorrect close delimiter: `}`
index a2369f8ffb4f2ca4b3cdf4a9bd8e98b7128ded12..3fffb2e19f289c5c96c390a30b7788d62b6f3cdf 100644 (file)
@@ -34,15 +34,15 @@ trait Foo {
 impl Foo for isize {
     // invalid bound for T, was defined as Eq in trait
     fn test_error1_fn<T: Ord>(&self) {}
-    //~^ ERROR the requirement `T : core::cmp::Ord` appears on the impl
+    //~^ ERROR the requirement `T: std::cmp::Ord` appears on the impl
 
     // invalid bound for T, was defined as Eq + Ord in trait
     fn test_error2_fn<T: Eq + B>(&self) {}
-    //~^ ERROR the requirement `T : B` appears on the impl
+    //~^ ERROR the requirement `T: B` appears on the impl
 
     // invalid bound for T, was defined as Eq + Ord in trait
     fn test_error3_fn<T: B + Eq>(&self) {}
-    //~^ ERROR the requirement `T : B` appears on the impl
+    //~^ ERROR the requirement `T: B` appears on the impl
 
     // multiple bounds, same order as in trait
     fn test3_fn<T: Ord + Eq>(&self) {}
@@ -52,16 +52,16 @@ impl Foo for isize {
 
     // parameters in impls must be equal or more general than in the defining trait
     fn test_error5_fn<T: B>(&self) {}
-    //~^ ERROR the requirement `T : B` appears on the impl
+    //~^ ERROR the requirement `T: B` appears on the impl
 
     // bound `std::cmp::Eq` not enforced by this implementation, but this is OK
     fn test6_fn<T: A>(&self) {}
 
     fn test_error7_fn<T: A + Eq>(&self) {}
-    //~^ ERROR the requirement `T : core::cmp::Eq` appears on the impl
+    //~^ ERROR the requirement `T: std::cmp::Eq` appears on the impl
 
     fn test_error8_fn<T: C>(&self) {}
-    //~^ ERROR the requirement `T : C` appears on the impl
+    //~^ ERROR the requirement `T: C` appears on the impl
 }
 
 trait Getter<T> {
index 01910939a80ebd6af13a30eb67a4e1a4646e0438..8d587b29ba989331d57800761c613be2056ab41d 100644 (file)
@@ -21,7 +21,7 @@ trait IteratorUtil<A>: Sized
 
 impl<A, T: Iterator<A>> IteratorUtil<A> for T {
     fn zip<B, U: Iterator<B>>(self, other: U) -> ZipIterator<T, U> {
-    //~^ ERROR the requirement `U : Iterator<B>` appears on the impl method
+    //~^ ERROR the requirement `U: Iterator<B>` appears on the impl method
         ZipIterator{a: self, b: other}
     }
 }
index e126a3040e99277741f5910c278787d49216d29b..fd46d1a62962c5c5518f6b916dfb7c2dfe8eb8e2 100644 (file)
@@ -15,7 +15,7 @@ trait Foo {
 // This should emit the less confusing error, not the more confusing one.
 
 fn foo(_x: Foo + Send) {
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `Foo + Send + 'static: std::marker::Sized` is not satisfied
 }
 
 fn main() { }
index dbfda61f5525a41a46283f78709a80006f54bac4..6a271a7b7497ffd84493874bf966c2c6b19b4c05 100644 (file)
@@ -21,10 +21,10 @@ enum Bar<T:Trait> {
 }
 
 fn explode(x: Foo<u32>) {}
-//~^ ERROR not implemented
+//~^ ERROR E0277
 
 fn kaboom(y: Bar<f32>) {}
-//~^ ERROR not implemented
+//~^ ERROR E0277
 
 fn main() {
 }
index c647dd38ee38a5aba56257c845e68aa53045cd26..77abe6f7f7473c084a0886d80ac66d9df2af839c 100644 (file)
@@ -28,7 +28,7 @@ trait PolyTrait<T>
 struct Struct;
 
 impl PolyTrait<Foo<u16>> for Struct {
-//~^ ERROR not implemented
+//~^ ERROR E0277
 }
 
 fn main() {
index 520691fbecc481b34985c4bb65c40cd6074609a1..9e680d17fb9ef5a8c865b8e1834d655f3f712109 100644 (file)
@@ -18,10 +18,10 @@ struct Foo<T:Trait> {
 
 fn main() {
     let foo = Foo {
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
         x: 3
     };
 
     let baz: Foo<usize> = loop { };
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
 }
index d93c9bafaef27c36c205a5e933f106810aac50f1..2b59fdcae3534a8658466f457bcca0cb277d1a79 100644 (file)
@@ -17,7 +17,7 @@ struct Foo<T:Trait> {
 }
 
 static X: Foo<usize> = Foo {
-//~^ ERROR not implemented
+//~^ ERROR E0277
     x: 1,
 };
 
index 5f95a7ca6e20455ba2959975c8228334fbee5ebd..975de00d02a33aa729057533a21374b0aaf72ef0 100644 (file)
@@ -15,10 +15,10 @@ extern crate trait_bounds_on_structs_and_enums_xc;
 use trait_bounds_on_structs_and_enums_xc::{Bar, Foo, Trait};
 
 fn explode(x: Foo<usize>) {}
-//~^ ERROR not implemented
+//~^ ERROR E0277
 
 fn kaboom(y: Bar<f32>) {}
-//~^ ERROR not implemented
+//~^ ERROR E0277
 
 fn main() {
 }
index 840787022e65cc736d40a70d35087032bdc81e7e..515684bcf42d275e585186be160a0edd4ea2805f 100644 (file)
@@ -16,10 +16,10 @@ use trait_bounds_on_structs_and_enums_xc::{Bar, Foo, Trait};
 
 fn main() {
     let foo = Foo {
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
         x: 3
     };
     let bar: Bar<f64> = return;
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
     let _ = bar;
 }
index e1b005b0c8533b3baf78494245c7744769ebea46..24e2418e8d45e97ce88c5148f13340d965043ce0 100644 (file)
@@ -21,32 +21,32 @@ enum Bar<T:Trait> {
 }
 
 impl<T> Foo<T> {
-//~^ ERROR the trait `Trait` is not implemented
+//~^ ERROR `T: Trait` is not satisfied
     fn uhoh() {}
 }
 
 struct Baz {
-    a: Foo<isize>, //~ ERROR not implemented
+    a: Foo<isize>, //~ ERROR E0277
 }
 
 enum Boo {
-    Quux(Bar<usize>), //~ ERROR not implemented
+    Quux(Bar<usize>), //~ ERROR E0277
 }
 
 struct Badness<U> {
-    b: Foo<U>, //~ ERROR not implemented
+    b: Foo<U>, //~ ERROR E0277
 }
 
 enum MoreBadness<V> {
-    EvenMoreBadness(Bar<V>), //~ ERROR not implemented
+    EvenMoreBadness(Bar<V>), //~ ERROR E0277
 }
 
 struct TupleLike(
-    Foo<i32>, //~ ERROR not implemented
+    Foo<i32>, //~ ERROR E0277
 );
 
 enum Enum {
-    DictionaryLike { field: Bar<u8> }, //~ ERROR not implemented
+    DictionaryLike { field: Bar<u8> }, //~ ERROR E0277
 }
 
 fn main() {
index b25af522b2476e35759af8a255ade6225020a8c0..dd64085f6f6668a17d8ab42553a0a61b0ab2659f 100644 (file)
@@ -25,6 +25,6 @@ impl Trait<&'static str> for Struct {
 fn main() {
     // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
     let s: Box<Trait<isize>> = Box::new(Struct { person: "Fred" });
-    //~^ ERROR the trait `Trait<isize>` is not implemented for the type `Struct`
+    //~^ ERROR `Struct: Trait<isize>` is not satisfied
     s.f(1);
 }
index ba8101d16ab0f170239a811f14c718abcb0ec5fe..41700b25bbb726798de48eca9d0544c393220b56 100644 (file)
@@ -9,8 +9,8 @@
 // except according to those terms.
 
 trait Foo {
-    fn orange(&self);
-    fn orange(&self);   //~ ERROR error: duplicate definition of value `orange`
+    fn orange(&self); //~ NOTE previous definition of `orange` here
+    fn orange(&self); //~ ERROR a value named `orange` has already been defined in this trait
 }
 
 fn main() {}
index 21668fcfeae7139b4be7281dfa6a5cb90885f5be..5feef0a24eb0e93a717f61d52b5608410a97ef03 100644 (file)
@@ -20,7 +20,7 @@ struct S;
 impl m::Pub for S {}
 
 fn g<T: m::Pub>(arg: T) {
-    arg.f(); //~ ERROR: source trait is private
+    arg.f(); //~ ERROR: source trait `m::Priv` is private
 }
 
 fn main() {
index fc2ed83b2724d0a2d0be5e9b2b04ab1ca592fb28..29360e58b5bd3471424cdcb5d5e25c497ac31172 100644 (file)
@@ -13,8 +13,10 @@ fn main() {
     //~^ ERROR expected a path
     //~| HELP try adding parentheses
     //~| SUGGESTION let _: &(Copy + 'static);
+    //~| HELP run `rustc --explain E0178` to see a detailed explanation
     let _: &'static Copy + 'static;
     //~^ ERROR expected a path
     //~| HELP try adding parentheses
     //~| SUGGESTION let _: &'static (Copy + 'static);
+    //~| HELP run `rustc --explain E0178` to see a detailed explanation
 }
diff --git a/src/test/compile-fail/trait-privacy.rs b/src/test/compile-fail/trait-privacy.rs
new file mode 100644 (file)
index 0000000..5f9e8ba
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs, get_type_id)]
+#![allow(dead_code)]
+
+mod foo {
+    pub use self::bar::T;
+    mod bar {
+        pub trait T {
+            fn f(&self) {}
+        }
+        impl T for () {}
+    }
+}
+
+fn g() {
+    use foo::T;
+    ().f(); // Check that this does not trigger a privacy error
+}
+
+fn f() {
+    let error = ::std::thread::spawn(|| {}).join().unwrap_err();
+    error.get_type_id(); // Regression test for #21670
+}
+
+#[rustc_error]
+fn main() {} //~ ERROR compilation successful
diff --git a/src/test/compile-fail/trait-suggest-where-clause.rs b/src/test/compile-fail/trait-suggest-where-clause.rs
new file mode 100644 (file)
index 0000000..6950bce
--- /dev/null
@@ -0,0 +1,67 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::mem;
+
+struct Misc<T:?Sized>(T);
+
+fn check<T: Iterator, U: ?Sized>() {
+    // suggest a where-clause, if needed
+    mem::size_of::<U>();
+    //~^ ERROR `U: std::marker::Sized` is not satisfied
+    //~| HELP E0277
+    //~| HELP consider adding a `where U: std::marker::Sized` bound
+    //~| NOTE required by `std::mem::size_of`
+
+    mem::size_of::<Misc<U>>();
+    //~^ ERROR `U: std::marker::Sized` is not satisfied
+    //~| HELP E0277
+    //~| HELP consider adding a `where U: std::marker::Sized` bound
+    //~| NOTE required because it appears within the type `Misc<U>`
+    //~| NOTE required by `std::mem::size_of`
+
+    // ... even if T occurs as a type parameter
+
+    <u64 as From<T>>::from;
+    //~^ ERROR `u64: std::convert::From<T>` is not satisfied
+    //~| HELP E0277
+    //~| HELP consider adding a `where u64: std::convert::From<T>` bound
+    //~| NOTE required by `std::convert::From::from`
+
+    <u64 as From<<T as Iterator>::Item>>::from;
+    //~^ ERROR `u64: std::convert::From<<T as std::iter::Iterator>::Item>` is not satisfied
+    //~| HELP E0277
+    //~| HELP consider adding a `where u64:
+    //~| NOTE required by `std::convert::From::from`
+
+    // ... but not if there are inference variables
+
+    <Misc<_> as From<T>>::from;
+    //~^ ERROR `Misc<_>: std::convert::From<T>` is not satisfied
+    //~| HELP E0277
+    //~| NOTE required by `std::convert::From::from`
+
+    // ... and also not if the error is not related to the type
+
+    mem::size_of::<[T]>();
+    //~^ ERROR `[T]: std::marker::Sized` is not satisfied
+    //~| HELP E0277
+    //~| NOTE `[T]` does not have a constant size
+    //~| NOTE required by `std::mem::size_of`
+
+    mem::size_of::<[&U]>();
+    //~^ ERROR `[&U]: std::marker::Sized` is not satisfied
+    //~| HELP E0277
+    //~| NOTE `[&U]` does not have a constant size
+    //~| NOTE required by `std::mem::size_of`
+}
+
+fn main() {
+}
diff --git a/src/test/compile-fail/traits-inductive-overflow-auto-normal-auto.rs b/src/test/compile-fail/traits-inductive-overflow-auto-normal-auto.rs
new file mode 100644 (file)
index 0000000..cdf4b40
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test for a potential corner case in current impl where you have an
+// auto trait (Magic1) that depends on a normal trait (Magic2) which
+// in turn depends on the auto trait (Magic1). This was incorrectly
+// being considered coinductive, but because of the normal trait
+// interfering, it should not be.
+
+#![feature(optin_builtin_traits)]
+
+trait Magic1: Magic2 { }
+impl Magic1 for .. {}
+
+trait Magic2 { }
+impl<T: Magic1> Magic2 for T { }
+
+fn is_magic1<T: Magic1>() { }
+
+#[derive(Debug)]
+struct NoClone;
+
+fn main() {
+    is_magic1::<NoClone>(); //~ ERROR E0275
+}
index c37f45dcf50bb8911f9c3f6a140162942ebe9c15..8014f92e1734438b5765e21caaaeb77f01395927 100644 (file)
@@ -31,8 +31,8 @@ fn dummy() {
     impl !Send for TestType {}
 
     Outer(TestType);
-    //~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy::TestType`
-    //~| ERROR the trait `core::marker::Send` is not implemented for the type `dummy::TestType`
+    //~^ ERROR `dummy::TestType: std::marker::Send` is not satisfied
+    //~| ERROR `dummy::TestType: std::marker::Send` is not satisfied
 }
 
 fn dummy1b() {
@@ -40,7 +40,7 @@ fn dummy1b() {
     impl !Send for TestType {}
 
     is_send(TestType);
-    //~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy1b::TestType`
+    //~^ ERROR `dummy1b::TestType: std::marker::Send` is not satisfied
 }
 
 fn dummy1c() {
@@ -48,7 +48,7 @@ fn dummy1c() {
     impl !Send for TestType {}
 
     is_send((8, TestType));
-    //~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy1c::TestType`
+    //~^ ERROR `dummy1c::TestType: std::marker::Send` is not satisfied
 }
 
 fn dummy2() {
@@ -56,7 +56,7 @@ fn dummy2() {
     impl !Send for TestType {}
 
     is_send(Box::new(TestType));
-    //~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy2::TestType`
+    //~^ ERROR `dummy2::TestType: std::marker::Send` is not satisfied
 }
 
 fn dummy3() {
@@ -64,7 +64,7 @@ fn dummy3() {
     impl !Send for TestType {}
 
     is_send(Box::new(Outer2(TestType)));
-    //~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy3::TestType`
+    //~^ ERROR `dummy3::TestType: std::marker::Send` is not satisfied
 }
 
 fn main() {
@@ -74,5 +74,5 @@ fn main() {
     // This will complain about a missing Send impl because `Sync` is implement *just*
     // for T that are `Send`. Look at #20366 and #19950
     is_sync(Outer2(TestType));
-    //~^ ERROR the trait `core::marker::Send` is not implemented for the type `main::TestType`
+    //~^ ERROR `main::TestType: std::marker::Send` is not satisfied
 }
index d61ac6f08d99d603a0136e10e5a0c545dee0e75b..3fc0d638dd6f1880be39caa81b4c71bdb4e4e35c 100644 (file)
@@ -33,21 +33,21 @@ impl CompareTo<u64> for i64 {
 impl CompareToInts for i64 { }
 
 fn with_obj(c: &CompareToInts) -> bool {
-    c.same_as(22) //~ ERROR `CompareTo<i32>` is not implemented
+    c.same_as(22) //~ ERROR `CompareToInts: CompareTo<i32>` is not satisfied
 }
 
 fn with_trait<C:CompareToInts>(c: &C) -> bool {
-    c.same_as(22) //~ ERROR `CompareTo<i32>` is not implemented
+    c.same_as(22) //~ ERROR `C: CompareTo<i32>` is not satisfied
 }
 
 fn with_ufcs1<C:CompareToInts>(c: &C) -> bool {
-    CompareToInts::same_as(c, 22) //~ ERROR `CompareTo<i32>` is not implemented
+    CompareToInts::same_as(c, 22) //~ ERROR `CompareToInts: CompareTo<i32>` is not satisfied
 }
 
 fn with_ufcs2<C:CompareToInts>(c: &C) -> bool {
-    CompareTo::same_as(c, 22) //~ ERROR `CompareTo<i32>` is not implemented
+    CompareTo::same_as(c, 22) //~ ERROR `C: CompareTo<i32>` is not satisfied
 }
 
 fn main() {
-    assert_eq!(22_i64.same_as(22), true); //~ ERROR `CompareTo<i32>` is not implemented
+    assert_eq!(22_i64.same_as(22), true); //~ ERROR `i64: CompareTo<i32>` is not satisfied
 }
diff --git a/src/test/compile-fail/transmute-from-fn-item-types-error.rs b/src/test/compile-fail/transmute-from-fn-item-types-error.rs
new file mode 100644 (file)
index 0000000..50bcd53
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::mem;
+
+unsafe fn bar() {
+    // Error, still, if the resulting type is not pointer-sized.
+    mem::transmute::<_, u8>(main);
+    //~^ ERROR transmute called with differently sized types
+}
+
+fn main() {
+    unsafe {
+        bar();
+    }
+}
diff --git a/src/test/compile-fail/transmute-from-fn-item-types-lint.rs b/src/test/compile-fail/transmute-from-fn-item-types-lint.rs
new file mode 100644 (file)
index 0000000..42c3cb7
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(transmute_from_fn_item_types)]
+
+use std::mem;
+
+unsafe fn foo() -> (isize, *const (), Option<fn()>) {
+    let i = mem::transmute(bar);
+    //~^ ERROR is now zero-sized and has to be cast to a pointer before transmuting
+    //~^^ ERROR was previously accepted
+
+    let p = mem::transmute(foo);
+    //~^ ERROR is now zero-sized and has to be cast to a pointer before transmuting
+    //~^^ ERROR was previously accepted
+
+    let of = mem::transmute(main);
+    //~^ ERROR is now zero-sized and has to be cast to a pointer before transmuting
+    //~^^ ERROR was previously accepted
+
+    (i, p, of)
+}
+
+unsafe fn bar() {
+    mem::transmute::<_, *mut ()>(foo);
+    //~^ ERROR is now zero-sized and has to be cast to a pointer before transmuting
+    //~^^ ERROR was previously accepted
+
+    mem::transmute::<_, fn()>(bar);
+    //~^ ERROR is now zero-sized and has to be cast to a pointer before transmuting
+    //~^^ ERROR was previously accepted
+
+    // No error if a coercion would otherwise occur.
+    mem::transmute::<fn(), usize>(main);
+}
+
+fn main() {
+    unsafe {
+        foo();
+        bar();
+    }
+}
index b06966bd867ca86e2969b005263881bf4dfd382e..b6e7e32663ec122669b3bd2e50cf257f9903afe7 100644 (file)
 use std::mem::transmute;
 
 unsafe fn f<T>(x: T) {
-    let _: isize = transmute(x);  //~ ERROR cannot transmute
+    let _: isize = transmute(x);
+//~^ ERROR differently sized types: T (size can vary) to isize
 }
 
 unsafe fn g<T>(x: (T, isize)) {
-    let _: isize = transmute(x);  //~ ERROR cannot transmute
+    let _: isize = transmute(x);
+//~^ ERROR differently sized types: (T, isize) (size can vary because of T) to isize
 }
 
 unsafe fn h<T>(x: [T; 10]) {
-    let _: isize = transmute(x);  //~ ERROR cannot transmute
+    let _: isize = transmute(x);
+//~^ ERROR differently sized types: [T; 10] (size can vary because of T) to isize
 }
 
 struct Bad<T> {
@@ -29,7 +32,8 @@ struct Bad<T> {
 }
 
 unsafe fn i<T>(x: Bad<T>) {
-    let _: isize = transmute(x);  //~ ERROR cannot transmute
+    let _: isize = transmute(x);
+//~^ ERROR differently sized types: Bad<T> (size can vary because of T) to isize
 }
 
 enum Worse<T> {
@@ -38,11 +42,13 @@ enum Worse<T> {
 }
 
 unsafe fn j<T>(x: Worse<T>) {
-    let _: isize = transmute(x);  //~ ERROR cannot transmute
+    let _: isize = transmute(x);
+//~^ ERROR differently sized types: Worse<T> (size can vary because of T) to isize
 }
 
 unsafe fn k<T>(x: Option<T>) {
-    let _: isize = transmute(x);  //~ ERROR cannot transmute
+    let _: isize = transmute(x);
+//~^ ERROR differently sized types: std::option::Option<T> (size can vary because of T) to isize
 }
 
 fn main() {}
index 3119b865488e896b1a5cba0c195c2532778d72fa..7693d98a2f7b0c8f3927d531bcc7a5e8a438efd3 100644 (file)
@@ -76,7 +76,7 @@ pub fn main() {
 
     // functions
     fn baz(_x: i32) {}
-    let _ = &baz as &Fn(i32); //~ERROR trivial cast: `&fn(i32) {main::baz}` as `&core::ops::Fn(i32)`
+    let _ = &baz as &Fn(i32); //~ERROR trivial cast: `&fn(i32) {main::baz}` as `&std::ops::Fn(i32)`
     let _: &Fn(i32) = &baz;
     let x = |_x: i32| {};
     let _ = &x as &Fn(i32); //~ERROR trivial cast
index 756f5d4547a16b19ee25f193e5a2337f6f78dc2f..4712e2b65e16abeb4084729435e402e3c2733df3 100644 (file)
@@ -14,7 +14,7 @@ macro_rules! Id {
 
 struct Foo<T> {
     x: Id!(T)
-    //~^ ERROR: type macros are experimental (see issue #27336)
+    //~^ ERROR: type macros are experimental (see issue #27245)
 }
 
 fn main() {
index 71e9113603a6061a50e20fa57dd62ae06cc7022f..d07282763d85b398f44893fed97f11e6cc11640f 100644 (file)
@@ -17,13 +17,13 @@ trait Tr<T> : Sized {
 
 trait A:    Tr<Self> {
     fn test<U>(u: U) -> Self {
-        Tr::op(u)   //~ ERROR not implemented
+        Tr::op(u)   //~ ERROR E0277
     }
 }
 
 trait B<T>: Tr<T> {
     fn test<U>(u: U) -> Self {
-        Tr::op(u)   //~ ERROR not implemented
+        Tr::op(u)   //~ ERROR E0277
     }
 }
 
index 8a9d53731c545ca3788012d12d5cdc0d3a56f59a..f8342c333a3638cc7bc42e9ecb3f5d1db3e84b49 100644 (file)
@@ -16,7 +16,7 @@ trait Trait {
     fn dummy(&self) { }
 }
 fn bar<T:Trait+Send>() {
-    is_send::<T::AssocType>(); //~ ERROR not implemented
+    is_send::<T::AssocType>(); //~ ERROR E0277
 }
 
 fn is_send<T:Send>() {
index a27f7f7ebbe0f4b405d5d701bbb8309bd9de464f..8a46d6c76c30fad5dd6a9a20c8c87c4b893ff544 100644 (file)
@@ -26,5 +26,5 @@ fn main() {
     is_mytrait::<MyS>();
 
     is_mytrait::<(MyS2, MyS)>();
-    //~^ ERROR the trait `MyTrait` is not implemented for the type `MyS2`
+    //~^ ERROR `MyS2: MyTrait` is not satisfied
 }
index 24819bb4f08d63a51f7db7752df8bfbdf14766f3..3d7746b369cc0a65315df93151bf3a63da5f2956 100644 (file)
@@ -29,5 +29,5 @@ fn main() {
     is_mytrait::<MyS>();
 
     is_mytrait::<MyS2>();
-    //~^ ERROR the trait `MyTrait` is not implemented for the type `MyS2`
+    //~^ ERROR `MyS2: MyTrait` is not satisfied
 }
index db4d1fe485b32d33e13d15601d016f78930c01d4..853718f1e77d08ddcc68ca765eaa4173550cc76e 100644 (file)
@@ -27,5 +27,5 @@ fn is_send<T: Send>() {}
 fn main() {
     is_send::<MySendable>();
     is_send::<MyNotSendable>();
-    //~^ ERROR the trait `core::marker::Send` is not implemented for the type `MyNotSendable`
+    //~^ ERROR `MyNotSendable: std::marker::Send` is not satisfied
 }
index 7c5d1d93b1a7f673be6d9d9b8a64a4f99d1950b7..cdf787a60ad43a9f7a58c716e1a400838f713c5e 100644 (file)
@@ -43,11 +43,11 @@ fn is_sync<T: Sync>() {}
 fn main() {
     is_sync::<MySync>();
     is_sync::<MyNotSync>();
-    //~^ ERROR the trait `core::marker::Sync` is not implemented for the type `MyNotSync`
+    //~^ ERROR `MyNotSync: std::marker::Sync` is not satisfied
 
     is_sync::<MyTypeWUnsafe>();
-    //~^ ERROR the trait `core::marker::Sync` is not implemented for the type `core::cell::UnsafeCell<u8>`
+    //~^ ERROR `std::cell::UnsafeCell<u8>: std::marker::Sync` is not satisfied
 
     is_sync::<MyTypeManaged>();
-    //~^ ERROR the trait `core::marker::Sync` is not implemented for the type `Managed`
+    //~^ ERROR `Managed: std::marker::Sync` is not satisfied
 }
index 4b91d0b7a736c6f1c1f5d4b692c23fa854333894..8c2658b89a506f0cb3f49269544ff27c067c1349 100644 (file)
@@ -33,10 +33,10 @@ fn is_my_unsafe_trait<T: MyUnsafeTrait>() {}
 fn main() {
     is_my_trait::<ThisImplsTrait>();
     is_my_trait::<ThisImplsUnsafeTrait>();
-    //~^ ERROR the trait `MyTrait` is not implemented for the type `ThisImplsUnsafeTrait`
+    //~^ ERROR `ThisImplsUnsafeTrait: MyTrait` is not satisfied
 
     is_my_unsafe_trait::<ThisImplsTrait>();
-    //~^ ERROR the trait `MyUnsafeTrait` is not implemented for the type `ThisImplsTrait`
+    //~^ ERROR `ThisImplsTrait: MyUnsafeTrait` is not satisfied
 
     is_my_unsafe_trait::<ThisImplsUnsafeTrait>();
 }
index c67fc92313c3a58d29e714db80e0b8003cb55419..66c7a1c75ffe47a71ea93ddbd6b75247f46d941e 100644 (file)
@@ -27,5 +27,5 @@ impl Signed for i32 { }
 fn main() {
     is_defaulted::<&'static i32>();
     is_defaulted::<&'static u32>();
-    //~^ ERROR the trait `Signed` is not implemented for the type `u32`
+    //~^ ERROR `u32: Signed` is not satisfied
 }
index 185e9dcb3bd91d32217f85f18da308216471694c..0c548b3bd990961465eab24ea223550b31f5b943 100644 (file)
@@ -12,7 +12,7 @@
 // an explicit trait bound.
 
 fn foo<T>() {
-    is_send::<T>() //~ ERROR not implemented
+    is_send::<T>() //~ ERROR E0277
 }
 
 fn is_send<T:Send>() {
index c9bfdff6c0e495cd78a089e0d4b2d96fcc2fe421..0b071a9acd0921cedcbde0626bd524e65b6c435f 100644 (file)
@@ -24,6 +24,6 @@ fn foo<T:MyTrait>() { bar::<T>() }
 fn bar<T:NotImplemented>() { }
 
 fn main() {
-    foo::<i32>(); //~ ERROR the trait `NotImplemented` is not implemented for the type `i32`
-    bar::<i64>(); //~ ERROR the trait `NotImplemented` is not implemented for the type `i64`
+    foo::<i32>(); //~ ERROR `i32: NotImplemented` is not satisfied
+    bar::<i64>(); //~ ERROR `i64: NotImplemented` is not satisfied
 }
index ad58ae92b46e2139eadfde8c3881550d16f61ff3..3085f45a83dd1f3d393376232ec540dbb445ea72 100644 (file)
@@ -29,7 +29,7 @@ fn bar<T:NotImplemented>() { }
 
 fn test() {
     bar::<Option<i32>>();
-    //~^ ERROR the trait `NotImplemented` is not implemented for the type `core::option::Option<i32>`
+    //~^ ERROR `std::option::Option<i32>: NotImplemented` is not satisfied
 }
 
 fn main() {
index ff8fbd49574917f78aac78c181fc5032a2aea6e0..47e87c09d12b1c7e1f908ca155a5016c195da1f0 100644 (file)
@@ -26,7 +26,7 @@ impl NotImplemented for i32 {}
 impl MyTrait for .. {}
 
 fn foo<T:MyTrait>() {
-    //~^ ERROR the trait `NotImplemented` is not implemented for the type `core::option::Option<T>`
+    //~^ ERROR `std::option::Option<T>: NotImplemented` is not satisfied
     // This should probably typecheck. This is #20671.
 }
 
index 1cb6bd048ccc362acbe7901daa9ac76089835211..6047f6770a7bdc17422c88bdb54f1e182f4ecc96 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// Verify that UnsafeCell is *always* sync regardless if `T` is sync.
+// Verify that UnsafeCell is *always* !Sync regardless if `T` is sync.
 
 #![feature(optin_builtin_traits)]
 
@@ -27,16 +27,16 @@ fn test<T: Sync>(s: T) {}
 fn main() {
     let us = UnsafeCell::new(MySync{u: UnsafeCell::new(0)});
     test(us);
-    //~^ ERROR `core::marker::Sync` is not implemented
+    //~^ ERROR `std::cell::UnsafeCell<MySync<_>>: std::marker::Sync` is not satisfied
 
     let uns = UnsafeCell::new(NoSync);
     test(uns);
-    //~^ ERROR `core::marker::Sync` is not implemented
+    //~^ ERROR `std::cell::UnsafeCell<NoSync>: std::marker::Sync` is not satisfied
 
     let ms = MySync{u: uns};
     test(ms);
-    //~^ ERROR `core::marker::Sync` is not implemented
+    //~^ ERROR `std::cell::UnsafeCell<NoSync>: std::marker::Sync` is not satisfied
 
     test(NoSync);
-    //~^ ERROR `core::marker::Sync` is not implemented
+    //~^ ERROR `NoSync: std::marker::Sync` is not satisfied
 }
index 2e8f9d42e789547cba427470d8a9fcf6bfd79140..a24515c5160987dcbc16e5cebc698e09326404b4 100644 (file)
@@ -8,7 +8,17 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use std::borrow::IntoCow;
+use std::borrow::Cow;
+
+pub trait IntoCow<'a, B: ?Sized> where B: ToOwned {
+    fn into_cow(self) -> Cow<'a, B>;
+}
+
+impl<'a> IntoCow<'a, str> for String {
+    fn into_cow(self) -> Cow<'a, str> {
+        Cow::Owned(self)
+    }
+}
 
 fn main() {
     <String as IntoCow>::into_cow("foo".to_string());
index 8e60064beca170487a7b0834d63e90d86f62d4e6..94a98b1582af102c9851de6b226a739bf6db8890 100644 (file)
@@ -12,7 +12,7 @@ use std::ops::Add;
 
 fn main() {
     <i32 as Add<u32>>::add(1, 2);
-    //~^ ERROR the trait `core::ops::Add<u32>` is not implemented for the type `i32`
+    //~^ ERROR `i32: std::ops::Add<u32>` is not satisfied
     <i32 as Add<i32>>::add(1u32, 2);
     //~^ ERROR mismatched types
     <i32 as Add<i32>>::add(1, 2u32);
index 831db98941c6dc2ac9165eab6e987b3168dfdcd1..849f7e0573cffdca02ebc120a4db5969a7c6c9bd 100644 (file)
@@ -29,7 +29,7 @@ fn test<'a,'b>() {
 
     // In angle version, we supply something other than the default
     eq::< Foo<(isize,),isize,Output=()>,      Foo(isize)                      >();
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
 
     // Supply default explicitly.
     eq::< Foo<(isize,),(isize,),Output=()>,   Foo(isize)                      >();
index dc5576aee650a56ed814b9bbcd9abc09d5f24723..0cf44a2ca61c27cf94ae31bf0e00fe371db2823a 100644 (file)
@@ -52,7 +52,7 @@ fn test<'a,'b>() {
     // Errors expected:
     eq::< Foo<(),Output=()>,
           Foo(char)                                               >();
-    //~^^ ERROR not implemented
+    //~^^ ERROR E0277
 }
 
 fn main() { }
index 93498ac7f83514c1768d1babc95dcd640c3435e2..b25b331880679fc2a638859dadca0f8275b5b392 100644 (file)
@@ -36,5 +36,5 @@ fn call_it<F:Fn(isize)->isize>(f: &F, x: isize) -> isize {
 
 fn main() {
     let x = call_it(&S, 22);
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
 }
index 3dd5779914d06b63d26432eb66a69ab045c2ebd5..1e2b01856e71bb923d34fb6557684479e1943042 100644 (file)
@@ -24,6 +24,6 @@ fn doit<T,F>(val: T, f: &F)
 
 pub fn main() {
     doit(0, &|x, y| {
-        x.set(y); //~ ERROR cannot infer
+        x.set(y); //~ ERROR E0312
     });
 }
index 361df93a71669ed3ccdb7a22e27415efb587a9dd..cba7ad82ee16343ee3a5b8fa97d6a75ceee734e0 100644 (file)
@@ -22,19 +22,19 @@ fn call_it_once<F:FnOnce(&isize)->isize>(_: F, _: isize) -> isize { 0 }
 
 fn a() {
     let x = call_it(&square, 22);
-    //~^ ERROR not implemented
-    //~| ERROR not implemented
+    //~^ ERROR E0277
+    //~| ERROR E0277
 }
 
 fn b() {
     let y = call_it_mut(&mut square, 22);
-    //~^ ERROR not implemented
-    //~| ERROR not implemented
+    //~^ ERROR E0277
+    //~| ERROR E0277
 }
 
 fn c() {
     let z = call_it_once(square, 22);
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
 }
 
 fn main() { }
index ca15d1bb5eefc83f11278776d3d9ba9bbab47784..dd891bc473cef97a9216b307694fe45a8cd12f75 100644 (file)
@@ -22,19 +22,19 @@ fn call_it_once<F:FnOnce(&isize)->isize>(_: F, _: isize) -> isize { 0 }
 
 fn a() {
     let x = call_it(&square, 22);
-    //~^ ERROR not implemented
-    //~| ERROR not implemented
+    //~^ ERROR E0277
+    //~| ERROR E0277
 }
 
 fn b() {
     let y = call_it_mut(&mut square, 22);
-    //~^ ERROR not implemented
-    //~| ERROR not implemented
+    //~^ ERROR E0277
+    //~| ERROR E0277
 }
 
 fn c() {
     let z = call_it_once(square, 22);
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
 }
 
 fn main() { }
index b960362aad7cd36c3f0d97bafa4507b9087ad2cd..f9edd5df6739f22c88cae7b45fd719bd23038c35 100644 (file)
@@ -23,19 +23,19 @@ fn call_it_once<F:FnOnce(&isize)->isize>(_: F, _: isize) -> isize { 0 }
 
 fn a() {
     let x = call_it(&square, 22);
-    //~^ ERROR not implemented
-    //~| ERROR not implemented
+    //~^ ERROR E0277
+    //~| ERROR E0277
 }
 
 fn b() {
     let y = call_it_mut(&mut square, 22);
-    //~^ ERROR not implemented
-    //~| ERROR not implemented
+    //~^ ERROR E0277
+    //~| ERROR E0277
 }
 
 fn c() {
     let z = call_it_once(square, 22);
-    //~^ ERROR not implemented
+    //~^ ERROR E0277
 }
 
 fn main() { }
diff --git a/src/test/compile-fail/unique-unique-kind.rs b/src/test/compile-fail/unique-unique-kind.rs
deleted file mode 100644 (file)
index 86fe4da..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::rc::Rc;
-
-fn f<T:Send>(__isize: T) {
-}
-
-fn main() {
-    // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
-    let i = Box::new(Rc::new(100));
-    f(i);
-    //~^ ERROR `core::marker::Send` is not implemented
-}
diff --git a/src/test/compile-fail/unique-vec-res.rs b/src/test/compile-fail/unique-vec-res.rs
deleted file mode 100644 (file)
index 6aa0834..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::cell::Cell;
-
-#[derive(Debug)]
-struct r<'a> {
-  i: &'a Cell<isize>,
-}
-
-impl<'a> Drop for r<'a> {
-    fn drop(&mut self) {
-        unsafe {
-            self.i.set(self.i.get() + 1);
-        }
-    }
-}
-
-fn f<T>(__isize: Vec<T> , _j: Vec<T> ) {
-}
-
-fn clone<T: Clone>(t: &T) -> T { t.clone() }
-
-fn main() {
-    let i1 = &Cell::new(0);
-    let i2 = &Cell::new(1);
-    // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
-    let r1 = vec!(Box::new(r { i: i1 }));
-    let r2 = vec!(Box::new(r { i: i2 }));
-    f(clone(&r1), clone(&r2));
-    //~^ ERROR the trait `core::clone::Clone` is not implemented for the type
-    //~^^ ERROR the trait `core::clone::Clone` is not implemented for the type
-    println!("{:?}", (r2, i1.get()));
-    println!("{:?}", (r1, i2.get()));
-}
diff --git a/src/test/compile-fail/unnecessary-private.rs b/src/test/compile-fail/unnecessary-private.rs
deleted file mode 100644 (file)
index 1133934..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-fn main() {
-    pub use std::usize; //~ ERROR: visibility has no effect
-    pub struct A; //~ ERROR: visibility has no effect
-    pub enum B {} //~ ERROR: visibility has no effect
-    pub trait C { //~ ERROR: visibility has no effect
-        fn foo(&self) {}
-    }
-    impl A {
-        pub fn foo(&self) {} //~ ERROR: visibility has no effect
-    }
-
-    struct D {
-        pub foo: isize, //~ ERROR: visibility has no effect
-    }
-    pub fn foo() {} //~ ERROR: visibility has no effect
-    pub mod bar {} //~ ERROR: visibility has no effect
-}
index c2ee62c195cb0155478a3322c7a4762678185c9e..cc328d8c9e919bcd38dd9947e311208889bfc82b 100644 (file)
@@ -10,6 +10,6 @@
 
 extern crate core;
 use core;
-//~^ ERROR import `core` conflicts with imported crate in this module
+//~^ ERROR an extern crate named `core` has already been imported in this module
 
 fn main() {}
index e28c082c80c4aebf54f5401cdc2163091fe71486..3dcc7d248d72db316eb7848174d9f19ae48c38de 100644 (file)
@@ -9,5 +9,5 @@
 // except according to those terms.
 
 fn bar<T: Sized>() { }
-fn foo<T: ?Sized>() { bar::<T>() } //~ ERROR the trait `core::marker::Sized` is not implemented
+fn foo<T: ?Sized>() { bar::<T>() } //~ ERROR `T: std::marker::Sized` is not satisfied
 fn main() { }
index df961978086e7aa6958f01371247025757499b23..61b2b01b355841d5b998bbd74d1e882a23f983fc 100644 (file)
@@ -15,14 +15,14 @@ fn not_sized<T: ?Sized>() { }
 enum Foo<U> { FooSome(U), FooNone }
 fn foo1<T>() { not_sized::<Foo<T>>() } // Hunky dory.
 fn foo2<T: ?Sized>() { not_sized::<Foo<T>>() }
-//~^ ERROR the trait `core::marker::Sized` is not implemented
+//~^ ERROR `T: std::marker::Sized` is not satisfied
 //
 // Not OK: `T` is not sized.
 
 enum Bar<U: ?Sized> { BarSome(U), BarNone }
 fn bar1<T: ?Sized>() { not_sized::<Bar<T>>() }
 fn bar2<T: ?Sized>() { is_sized::<Bar<T>>() }
-//~^ ERROR the trait `core::marker::Sized` is not implemented
+//~^ ERROR `T: std::marker::Sized` is not satisfied
 //
 // Not OK: `Bar<T>` is not sized, but it should be.
 
index a03c76b12dd8ea141dce5ddd1171b815590c3157..4d0774f2ce44157743fbdc6135a37874b7808eb7 100644 (file)
@@ -14,7 +14,7 @@
 
 struct S5<Y>(Y);
 
-impl<X: ?Sized> S5<X> { //~ ERROR not implemented
+impl<X: ?Sized> S5<X> { //~ ERROR E0277
 }
 
 fn main() { }
index 7b9fc2b468b78fbf34cb751441dba2bea0ef7626..bbefb2fcecd80f7ba78fcd03abedf8d7fa702e7e 100644 (file)
@@ -15,14 +15,14 @@ fn not_sized<T: ?Sized>() { }
 struct Foo<T> { data: T }
 fn foo1<T>() { not_sized::<Foo<T>>() } // Hunky dory.
 fn foo2<T: ?Sized>() { not_sized::<Foo<T>>() }
-//~^ ERROR the trait `core::marker::Sized` is not implemented
+//~^ ERROR `T: std::marker::Sized` is not satisfied
 //
 // Not OK: `T` is not sized.
 
 struct Bar<T: ?Sized> { data: T }
 fn bar1<T: ?Sized>() { not_sized::<Bar<T>>() }
 fn bar2<T: ?Sized>() { is_sized::<Bar<T>>() }
-//~^ ERROR the trait `core::marker::Sized` is not implemented
+//~^ ERROR `T: std::marker::Sized` is not satisfied
 //
 // Not OK: `Bar<T>` is not sized, but it should be.
 
index 08df1d9b7b8fb8a384912f0258e7bcbdfadb2b53..c919bdf924f6566bb8b707451d7b860e3e397e31 100644 (file)
@@ -17,7 +17,7 @@ trait T3<Z: ?Sized> {
 
 struct S5<Y>(Y);
 
-impl<X: ?Sized> T3<X> for S5<X> { //~ ERROR not implemented
+impl<X: ?Sized> T3<X> for S5<X> { //~ ERROR E0277
 }
 
 fn main() { }
index 4723dfeaeb988393150e30af3cc515eef3806778..ad5e4c2daef9ea010cc8eb8f086f7a2721195e53 100644 (file)
@@ -16,7 +16,7 @@ trait T2<Z> {
 }
 struct S4<Y: ?Sized>(Box<Y>);
 impl<X: ?Sized> T2<X> for S4<X> {
-    //~^ ERROR `core::marker::Sized` is not implemented for the type `X`
+    //~^ ERROR `X: std::marker::Sized` is not satisfied
 }
 
 fn main() { }
index de1cbab82b281e373002a1b2d4177a1ab1cd6791..f88165c02e988cd5e34bc3af44292b6cdb0d2a9c 100644 (file)
@@ -15,7 +15,7 @@ use std::marker;
 // Unbounded.
 fn f1<X: ?Sized>(x: &X) {
     f2::<X>(x);
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `X: std::marker::Sized` is not satisfied
 }
 fn f2<X>(x: &X) {
 }
@@ -26,7 +26,7 @@ trait T {
 }
 fn f3<X: ?Sized + T>(x: &X) {
     f4::<X>(x);
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `X: std::marker::Sized` is not satisfied
 }
 fn f4<X: T>(x: &X) {
 }
@@ -40,7 +40,7 @@ fn f5<Y>(x: &Y) {}
 fn f6<X: ?Sized>(x: &X) {}
 fn f7<X: ?Sized>(x1: &E<X>, x2: &E<X>) {
     f5(x1);
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `X: std::marker::Sized` is not satisfied
     f6(x2); // ok
 }
 
@@ -52,19 +52,19 @@ struct S<X: ?Sized> {
 
 fn f8<X: ?Sized>(x1: &S<X>, x2: &S<X>) {
     f5(x1);
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `X: std::marker::Sized` is not satisfied
     f6(x2); // ok
 }
 
 // Test some tuples.
 fn f9<X: ?Sized>(x1: Box<S<X>>, x2: Box<E<X>>) {
     f5(&(*x1, 34));
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `X: std::marker::Sized` is not satisfied
 }
 
 fn f10<X: ?Sized>(x1: Box<S<X>>, x2: Box<E<X>>) {
     f5(&(32, *x2));
-    //~^ ERROR the trait `core::marker::Sized` is not implemented
+    //~^ ERROR `X: std::marker::Sized` is not satisfied
 }
 
 pub fn main() {
index afa574bf85a8d950f5c2d77f61b336c270e304a7..3e6c9cc4061e1cb8ff7a02bf94e5048404f4d410 100644 (file)
 // Test `?Sized` types not allowed in fields (except the last one).
 
 struct S1<X: ?Sized> {
-    f1: X, //~ ERROR `core::marker::Sized` is not implemented
+    f1: X, //~ ERROR `X: std::marker::Sized` is not satisfied
     f2: isize,
 }
 struct S2<X: ?Sized> {
     f: isize,
-    g: X, //~ ERROR `core::marker::Sized` is not implemented
+    g: X, //~ ERROR `X: std::marker::Sized` is not satisfied
     h: isize,
 }
 struct S3 {
-    f: str, //~ ERROR `core::marker::Sized` is not implemented
+    f: str, //~ ERROR `str: std::marker::Sized` is not satisfied
     g: [usize]
 }
 struct S4 {
-    f: [u8], //~ ERROR `core::marker::Sized` is not implemented
+    f: [u8], //~ ERROR `[u8]: std::marker::Sized` is not satisfied
     g: usize
 }
 enum E<X: ?Sized> {
-    V1(X, isize), //~ERROR `core::marker::Sized` is not implemented
+    V1(X, isize), //~ERROR `X: std::marker::Sized` is not satisfied
 }
 enum F<X: ?Sized> {
-    V2{f1: X, f: isize}, //~ERROR `core::marker::Sized` is not implemented
+    V2{f1: X, f: isize}, //~ERROR `X: std::marker::Sized` is not satisfied
 }
 
 pub fn main() {
index 3f18f359d306e711ee4be2c654e0382d38a8d8c7..663cb0a17161a6b26263bc3f6b7a8f090c77bcce 100644 (file)
@@ -15,27 +15,27 @@ trait T {}
 fn f1<X: ?Sized>(x: &X) {
     let _: X; // <-- this is OK, no bindings created, no initializer.
     let _: (isize, (X, isize)); // same
-    let y: X; //~ERROR the trait `core::marker::Sized` is not implemented
-    let y: (isize, (X, isize)); //~ERROR the trait `core::marker::Sized` is not implemented
+    let y: X; //~ERROR `X: std::marker::Sized` is not satisfied
+    let y: (isize, (X, isize)); //~ERROR `X: std::marker::Sized` is not satisfied
 }
 fn f2<X: ?Sized + T>(x: &X) {
-    let y: X; //~ERROR the trait `core::marker::Sized` is not implemented
-    let y: (isize, (X, isize)); //~ERROR the trait `core::marker::Sized` is not implemented
+    let y: X; //~ERROR `X: std::marker::Sized` is not satisfied
+    let y: (isize, (X, isize)); //~ERROR `X: std::marker::Sized` is not satisfied
 }
 
 fn f3<X: ?Sized>(x1: Box<X>, x2: Box<X>, x3: Box<X>) {
-    let y: X = *x1; //~ERROR the trait `core::marker::Sized` is not implemented
-    let y = *x2; //~ERROR the trait `core::marker::Sized` is not implemented
-    let (y, z) = (*x3, 4); //~ERROR the trait `core::marker::Sized` is not implemented
+    let y: X = *x1; //~ERROR `X: std::marker::Sized` is not satisfied
+    let y = *x2; //~ERROR `X: std::marker::Sized` is not satisfied
+    let (y, z) = (*x3, 4); //~ERROR `X: std::marker::Sized` is not satisfied
 }
 fn f4<X: ?Sized + T>(x1: Box<X>, x2: Box<X>, x3: Box<X>) {
-    let y: X = *x1;         //~ERROR the trait `core::marker::Sized` is not implemented
-    let y = *x2;            //~ERROR the trait `core::marker::Sized` is not implemented
-    let (y, z) = (*x3, 4); //~ERROR the trait `core::marker::Sized` is not implemented
+    let y: X = *x1;         //~ERROR `X: std::marker::Sized` is not satisfied
+    let y = *x2;            //~ERROR `X: std::marker::Sized` is not satisfied
+    let (y, z) = (*x3, 4); //~ERROR `X: std::marker::Sized` is not satisfied
 }
 
-fn g1<X: ?Sized>(x: X) {} //~ERROR the trait `core::marker::Sized` is not implemented
-fn g2<X: ?Sized + T>(x: X) {} //~ERROR the trait `core::marker::Sized` is not implemented
+fn g1<X: ?Sized>(x: X) {} //~ERROR `X: std::marker::Sized` is not satisfied
+fn g2<X: ?Sized + T>(x: X) {} //~ERROR `X: std::marker::Sized` is not satisfied
 
 pub fn main() {
 }
index 0245a1b5cf2c8628e39abbf80b87c47d5c868b88..25868c594feb675326960008eff1724b43b84763 100644 (file)
@@ -20,7 +20,7 @@ trait T1<Z: T> {
 
 struct S3<Y: ?Sized>(Box<Y>);
 impl<X: ?Sized + T> T1<X> for S3<X> {
-    //~^ ERROR `core::marker::Sized` is not implemented for the type `X`
+    //~^ ERROR `X: std::marker::Sized` is not satisfied
 }
 
 fn main() { }
index bd954272fcca24533680bd025a0b984e8f28e553..cce500800caca0f842c8354f923482fa9a87d076 100644 (file)
@@ -12,8 +12,7 @@ use foo::bar::{
     self //~ ERROR module `bar` is private
 };
 use foo::bar::{
-    Bar //~ ERROR type `Bar` is inaccessible
-    //~^ NOTE module `bar` is private
+    Bar //~ ERROR module `bar` is private
 };
 
 mod foo {
index 9cc3c92e2e376b22cd2b2e269202d788d664a16f..bbb063770c148274fd1ee99df28074b7f544c446 100644 (file)
@@ -11,6 +11,7 @@
 use foo::bar::{
     self,
 //~^ ERROR `self` import can only appear once in the list
+//~^^ NOTE previous import of `bar` here
     Bar,
     self
 //~^ NOTE another `self` import appears here
diff --git a/src/test/compile-fail/use-super-global-path.rs b/src/test/compile-fail/use-super-global-path.rs
new file mode 100644 (file)
index 0000000..d721d42
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+
+mod foo {
+    pub fn g() {
+        use ::super::main; //~ WARN expected identifier, found keyword `super`
+        //~^ WARN this was previously accepted by the compiler but is being phased out
+        main();
+    }
+}
+
+#[rustc_error]
+fn main() { foo::g(); } //~ ERROR compilation successful
index fb6cdf7fa592442b53113eb782444b053ed7885b..268b937c2916581f1631074ef823f0df4d675e6e 100644 (file)
@@ -18,4 +18,11 @@ impl E for A {
     pub fn foo(&self) {}             //~ ERROR: unnecessary visibility
 }
 
+enum Foo {
+    V1 { pub f: i32 }, //~ ERROR unnecessary visibility qualifier
+                       //| NOTE visibility qualifiers have no effect on variant fields
+    V2(pub i32), //~ ERROR unnecessary visibility qualifier
+                 //| NOTE visibility qualifiers have no effect on variant fields
+}
+
 fn main() {}
index 94055450bc6bb7854f743eb77e0387a49cb1b136..6e60562da67495513b829dec5462f0474d23f4d0 100644 (file)
@@ -22,7 +22,7 @@ fn main() {
         let x: unsafe extern "C" fn(f: isize, x: u8) = foo;
         //~^ ERROR: mismatched types
         //~| expected `unsafe extern "C" fn(isize, u8)`
-        //~| found `unsafe extern "C" fn(isize, u8, ...)`
+        //~| found `unsafe extern "C" fn(isize, u8, ...) {foo}`
         //~| expected non-variadic fn
         //~| found variadic function
 
@@ -33,11 +33,11 @@ fn main() {
         //~| expected variadic fn
         //~| found non-variadic function
 
-        foo(1, 2, 3f32); //~ ERROR: can't pass an f32 to variadic function, cast to c_double
-        foo(1, 2, true); //~ ERROR: can't pass bool to variadic function, cast to c_int
-        foo(1, 2, 1i8); //~ ERROR: can't pass i8 to variadic function, cast to c_int
-        foo(1, 2, 1u8); //~ ERROR: can't pass u8 to variadic function, cast to c_uint
-        foo(1, 2, 1i16); //~ ERROR: can't pass i16 to variadic function, cast to c_int
-        foo(1, 2, 1u16); //~ ERROR: can't pass u16 to variadic function, cast to c_uint
+        foo(1, 2, 3f32); //~ ERROR: can't pass an `f32` to variadic function, cast to `c_double`
+        foo(1, 2, true); //~ ERROR: can't pass `bool` to variadic function, cast to `c_int`
+        foo(1, 2, 1i8); //~ ERROR: can't pass `i8` to variadic function, cast to `c_int`
+        foo(1, 2, 1u8); //~ ERROR: can't pass `u8` to variadic function, cast to `c_uint`
+        foo(1, 2, 1i16); //~ ERROR: can't pass `i16` to variadic function, cast to `c_int`
+        foo(1, 2, 1u16); //~ ERROR: can't pass `u16` to variadic function, cast to `c_uint`
     }
 }
index 862d842d62c2316abdd7a1584fe5744a09182920..f7fed32cb5af2ec5f1b9547f3505b33b15acf8e2 100644 (file)
 struct SomeStruct<A> { x: u32 }
 //~^ ERROR parameter `A` is never used
 //~| HELP PhantomData
+//~| HELP run `rustc --explain E0392` to see a detailed explanation
 
 enum SomeEnum<A> { Nothing }
 //~^ ERROR parameter `A` is never used
 //~| HELP PhantomData
+//~| HELP run `rustc --explain E0392` to see a detailed explanation
 
 // Here T might *appear* used, but in fact it isn't.
 enum ListCell<T> {
 //~^ ERROR parameter `T` is never used
 //~| HELP PhantomData
+//~| HELP run `rustc --explain E0392` to see a detailed explanation
     Cons(Box<ListCell<T>>),
     Nil
 }
index 75869d700d35d6c886b2bff53512a5d3aba13a60..a8bb94b78fcc0a9940573dd903d3e394e67aa11d 100644 (file)
 
 // aux-build:variant-namespacing.rs
 
-extern crate variant_namespacing;
-pub use variant_namespacing::XE::*;
-//~^ ERROR import `XStruct` conflicts with type in this module
-//~| ERROR import `XStruct` conflicts with value in this module
-//~| ERROR import `XTuple` conflicts with type in this module
-//~| ERROR import `XTuple` conflicts with value in this module
-//~| ERROR import `XUnit` conflicts with type in this module
-//~| ERROR import `XUnit` conflicts with value in this module
-pub use E::*;
-//~^ ERROR import `Struct` conflicts with type in this module
-//~| ERROR import `Struct` conflicts with value in this module
-//~| ERROR import `Tuple` conflicts with type in this module
-//~| ERROR import `Tuple` conflicts with value in this module
-//~| ERROR import `Unit` conflicts with type in this module
-//~| ERROR import `Unit` conflicts with value in this module
-
 enum E {
     Struct { a: u8 },
     Tuple(u8),
@@ -46,4 +30,20 @@ const XStruct: u8 = 0;
 const XTuple: u8 = 0;
 const XUnit: u8 = 0;
 
+extern crate variant_namespacing;
+pub use variant_namespacing::XE::*;
+//~^ ERROR `XStruct` has already been defined
+//~| ERROR `XStruct` has already been defined
+//~| ERROR `XTuple` has already been defined
+//~| ERROR `XTuple` has already been defined
+//~| ERROR `XUnit` has already been defined
+//~| ERROR `XUnit` has already been defined
+pub use E::*;
+//~^ ERROR `Struct` has already been defined
+//~| ERROR `Struct` has already been defined
+//~| ERROR `Tuple` has already been defined
+//~| ERROR `Tuple` has already been defined
+//~| ERROR `Unit` has already been defined
+//~| ERROR `Unit` has already been defined
+
 fn main() {}
index 654272f5bc6eba34b09220e9eff6adbd080ebcad..eb0baff0005ddd53ce54853b1f31223495b7d2b2 100644 (file)
@@ -24,7 +24,7 @@ impl TraitB for isize {
 
 fn call_it<B:TraitB>(b: B)  -> isize {
     let y = 4;
-    b.gimme_an_a(y) //~ ERROR the trait `TraitA` is not implemented
+    b.gimme_an_a(y) //~ ERROR `_: TraitA` is not satisfied
 }
 
 fn main() {
index ba31de98e7f95186f7bb54d11fa222ce70f1d945..1e82f609d2a7f7e91a0cf4a329137f449c120ef6 100644 (file)
@@ -25,9 +25,8 @@ pub trait Foo {
 
 impl<T> Foo for T {
     type Bar = MySet<T>;
-    //~^ ERROR the trait `MyHash` is not implemented for the type `T`
+    //~^ ERROR the trait bound `T: MyHash` is not satisfied
 }
 
 #[rustc_error]
 fn main() { }
-
index 6c6522fe658d23ec0053b575e32db7f33026a993..e99957c79142738ac8ab7df1990ac943ec80dba9 100644 (file)
@@ -24,7 +24,7 @@ struct Evil<'a, 'b: 'a>(Option<&'a &'b ()>);
 impl<'a, 'b> Foo<'a, 'b, Evil<'a, 'b>> for () {
     fn make_me() -> Self { }
     fn static_evil(u: &'b u32) -> &'a u32 {
-        u //~ ERROR cannot infer an appropriate lifetime
+        u //~ ERROR E0312
     }
 }
 
@@ -40,7 +40,7 @@ impl<'a, 'b> Foo<'a, 'b, ()> for IndirectEvil<'a, 'b> {
 
 impl<'a, 'b> Evil<'a, 'b> {
     fn inherent_evil(u: &'b u32) -> &'a u32 {
-        u //~ ERROR cannot infer an appropriate lifetime
+        u //~ ERROR E0312
     }
 }
 
index 74342cfedd3298261807b6cc1fe6c08b883fc026..458ee6694247e29baf6c6010a11a8973cadffb7e 100644 (file)
@@ -21,7 +21,7 @@ impl<T> Foo<T> {
 
     fn fails_copy(self) {
         require_copy(self.x);
-        //~^ ERROR the trait `core::marker::Copy` is not implemented for the type `T`
+        //~^ ERROR the trait bound `T: std::marker::Copy` is not satisfied
     }
 }
 
index 8e01e89b6870782085d0aa7f86e91c20d4c039b1..b3f99f2ae253d9f026b779179f5afd40f918bca5 100644 (file)
@@ -26,7 +26,7 @@ impl<T> Foo<T> for Bar<T> {
 
     fn fails_copy(self) {
         require_copy(self.x);
-        //~^ ERROR the trait `core::marker::Copy` is not implemented for the type `T`
+        //~^ ERROR the trait bound `T: std::marker::Copy` is not satisfied
     }
 }
 
index bf614e6eb512bab429c2c5530e833fdc1f464c35..05a58daf906881b9fbe42e50d40c5c0a3e5baa07 100644 (file)
@@ -28,5 +28,5 @@ impl Bar<X> for isize {
 
 fn main() {
     1.method::<X>();
-    //~^ ERROR the trait `Foo<X>` is not implemented for the type `X`
+    //~^ ERROR the trait bound `X: Foo<X>` is not satisfied
 }
index e5b54582e4e3ffca0ccc224b7bf53062054a0fb7..1ac03330afd195e06fd25392afffee75567818b3 100644 (file)
@@ -26,5 +26,5 @@ impl<T> Foo<T> {
 fn main() {
     let x = Foo { value: Bar };
     x.equals(&x);
-    //~^ ERROR the trait `core::cmp::Eq` is not implemented for the type `Bar`
+    //~^ ERROR `Bar: std::cmp::Eq` is not satisfied
 }
index 269df212489cb5f8f0abf3864249e05ed7b399a8..278a8db4e1ad481ddb518c4b01fc46926315a145 100644 (file)
@@ -15,5 +15,5 @@ struct Struct;
 
 fn main() {
     drop(equal(&Struct, &Struct))
-    //~^ ERROR the trait `core::cmp::Eq` is not implemented
+    //~^ ERROR the trait bound `Struct: std::cmp::Eq` is not satisfied
 }
index cd5240198b385b6ec7af3e9fb98c0c3be5b6b801..bf8fc29217338bf2e6e953ccac2ec4eb608ea196 100644 (file)
@@ -29,5 +29,5 @@ fn foo<T>(x: &T)
 
 fn main() {
     foo(&X);
-    //~^ error: `for<'a> Bar` is not implemented
+    //~^ error: `for<'a> &'a _: Bar` is not satisfied
 }
index 43be96965d01f05bfd0736e5313da04e45311fc7..3bd4c780625a42db1cb9096385b5d36b4cf04121 100644 (file)
@@ -43,13 +43,13 @@ fn main() {
 
     // public items in a private mod should be inaccessible
     static_priv_by_default::foo::a;
-    //~^ ERROR: static `a` is private
+    //~^ ERROR: module `foo` is private
     static_priv_by_default::foo::b;
-    //~^ ERROR: function `b` is private
+    //~^ ERROR: module `foo` is private
     static_priv_by_default::foo::c;
-    //~^ ERROR: struct `c` is private
+    //~^ ERROR: module `foo` is private
     foo::<static_priv_by_default::foo::d>();
-    //~^ ERROR: enum `d` is private
+    //~^ ERROR: module `foo` is private
     foo::<static_priv_by_default::foo::e>();
-    //~^ ERROR: type `e` is private
+    //~^ ERROR: module `foo` is private
 }
index fb40400a4429cfe3a1ca496b5d16c8d26da08745..6974238ac72f2cd4b6dd3d883cf27c60e8681e42 100644 (file)
 
 const CONSTANT: u64 = 3;
 
+#[derive(PartialEq, Eq)]
 struct Struct {
     a: isize,
     b: usize,
 }
 const STRUCT: Struct = Struct { a: 1, b: 2 };
 
+#[derive(PartialEq, Eq)]
 struct TupleStruct(u32);
 const TUPLE_STRUCT: TupleStruct = TupleStruct(4);
 
+#[derive(PartialEq, Eq)]
 enum Enum {
     Variant1(char),
     Variant2 { a: u8 },
diff --git a/src/test/incremental/dirty_clean.rs b/src/test/incremental/dirty_clean.rs
new file mode 100644 (file)
index 0000000..9a30978
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// revisions: rpass1 cfail2
+
+#![allow(warnings)]
+#![feature(rustc_attrs)]
+
+// Sanity check for the dirty-clean system. Give the opposite
+// annotations that we expect to see, so that we check that errors are
+// reported.
+
+fn main() { }
+
+mod x {
+    #[cfg(rpass1)]
+    pub fn x() -> usize {
+        22
+    }
+
+    #[cfg(cfail2)]
+    pub fn x() -> u32 {
+        22
+    }
+}
+
+mod y {
+    use x;
+
+    #[rustc_clean(label="TypeckItemBody", cfg="cfail2")]
+    #[rustc_clean(label="TransCrateItem", cfg="cfail2")]
+    pub fn y() {
+        //[cfail2]~^ ERROR `TypeckItemBody("y::y")` not found in dep graph, but should be clean
+        //[cfail2]~| ERROR `TransCrateItem("y::y")` not found in dep graph, but should be clean
+        x::x();
+    }
+}
+
+mod z {
+    #[rustc_dirty(label="TypeckItemBody", cfg="cfail2")]
+    #[rustc_dirty(label="TransCrateItem", cfg="cfail2")]
+    pub fn z() {
+        //[cfail2]~^ ERROR `TypeckItemBody("z::z")` found in dep graph, but should be dirty
+        //[cfail2]~| ERROR `TransCrateItem("z::z")` found in dep graph, but should be dirty
+    }
+}
diff --git a/src/test/incremental/hello_world.rs b/src/test/incremental/hello_world.rs
new file mode 100644 (file)
index 0000000..f98ae18
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// revisions: rpass1 rpass2
+
+#![allow(warnings)]
+#![feature(rustc_attrs)]
+
+fn main() { }
+
+mod x {
+    #[cfg(rpass1)]
+    pub fn x() -> i32 {
+        1
+    }
+
+    #[cfg(rpass2)]
+    pub fn x() -> i32 {
+        2
+    }
+}
+
+mod y {
+    use x;
+
+    #[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+    pub fn y() {
+        x::x();
+    }
+}
+
+mod z {
+    use y;
+
+    #[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+    pub fn z() {
+        y::y();
+    }
+}
diff --git a/src/test/incremental/string_constant.rs b/src/test/incremental/string_constant.rs
new file mode 100644 (file)
index 0000000..0aa728b
--- /dev/null
@@ -0,0 +1,58 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// revisions: rpass1 rpass2
+
+#![allow(warnings)]
+#![feature(rustc_attrs)]
+
+// Here the only thing which changes is the string constant in `x`.
+// Therefore, the compiler deduces (correctly) that typeck is not
+// needed even for callers of `x`.
+//
+// It is not entirely clear why `TransCrateItem` invalidates `y` and
+// `z`, actually, I think it's because of the structure of
+// trans. -nmatsakis
+
+fn main() { }
+
+mod x {
+    #[cfg(rpass1)]
+    pub fn x() {
+        println!("1");
+    }
+
+    #[cfg(rpass2)]
+    #[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+    #[rustc_dirty(label="TransCrateItem", cfg="rpass2")]
+    pub fn x() {
+        println!("2");
+    }
+}
+
+mod y {
+    use x;
+
+    #[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+    #[rustc_clean(label="TransCrateItem", cfg="rpass2")]
+    pub fn y() {
+        x::x();
+    }
+}
+
+mod z {
+    use y;
+
+    #[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+    #[rustc_clean(label="TransCrateItem", cfg="rpass2")]
+    pub fn z() {
+        y::y();
+    }
+}
diff --git a/src/test/parse-fail/default.rs b/src/test/parse-fail/default.rs
new file mode 100644 (file)
index 0000000..d18401e
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+// Test successful and unsucessful parsing of the `default` contextual keyword
+
+trait Foo {
+    fn foo<T: Default>() -> T;
+}
+
+impl Foo for u8 {
+    default fn foo<T: Default>() -> T {
+        T::default()
+    }
+}
+
+impl Foo for u16 {
+    pub default fn foo<T: Default>() -> T {
+        T::default()
+    }
+}
+
+impl Foo for u32 {
+    default pub fn foo<T: Default>() -> T { T::default() } //~ ERROR expected one of
+}
+
+fn main() {}
diff --git a/src/test/parse-fail/issue-10636-2.rs b/src/test/parse-fail/issue-10636-2.rs
deleted file mode 100644 (file)
index 9da5453..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// FIXME(31528) we emit a bunch of silly errors here due to continuing past the
-// first one. This would be easy-ish to address by better recovery in tokenisation.
-
-// compile-flags: -Z parse-only
-
-pub fn trace_option(option: Option<isize>) { //~ HELP did you mean to close this delimiter?
-    option.map(|some| 42; //~ NOTE: unclosed delimiter
-                          //~^ ERROR: expected one of
-} //~ ERROR: incorrect close delimiter
-//~^ ERROR: unexpected token
-//~ ERROR: un-closed delimiter
index 0d9a111045a9940ab8ae4dfa8e1d980eb83a7072..6ba0fb5f15bb139c99c6acd7d00ebfdb9c49e412 100644 (file)
@@ -12,5 +12,5 @@
 
 fn main() {
     let t = (42, 42);
-    t.0::<isize>; //~ ERROR expected one of `.`, `;`, `}`, or an operator, found `::`
+    t.0::<isize>; //~ ERROR expected one of `.`, `;`, `?`, `}`, or an operator, found `::`
 }
index be6bd516d6fe439388160260d61fc3c4f824d302..a489864e3f7370cb8ae0b6b8321106f2a2c2176b 100644 (file)
@@ -16,6 +16,6 @@ impl Foo {
     fn foo() {}
 
     #[stable(feature = "rust1", since = "1.0.0")]
-} //~ ERROR expected one of `const`, `extern`, `fn`, `pub`, `type`, or `unsafe`, found `}`
+} //~ ERROR expected one of `const`, `default`, `extern`, `fn`, `pub`, `type`, or `unsafe`
 
 fn main() {}
index d1d8d3acf91871bc581d3799380a47aca6d1126f..d9789d55a6faf51d160a585d0645ca7c2ce7dfa3 100644 (file)
@@ -14,6 +14,6 @@ struct Foo;
 
 impl Foo {
     #[stable(feature = "rust1", since = "1.0.0")]
-} //~ ERROR expected one of `const`, `extern`, `fn`, `pub`, `type`, or `unsafe`, found `}`
+} //~ ERROR expected one of `const`, `default`, `extern`, `fn`, `pub`, `type`, or `unsafe`
 
 fn main() {}
index e65f95780bb96d1ccff488beea27bcce20f1e743..f24c544073578ff07def143413cec0e14cfaf49b 100644 (file)
@@ -10,4 +10,4 @@
 
 // compile-flags: -Z parse-only
 
-static foo: isize = 2; } //~ ERROR incorrect close delimiter:
+static foo: isize = 2; } //~ ERROR unexpected close delimiter:
index 1946e984e5dc92d285230e0821119e085761827f..229b12136fc6c2d52126e55543656e622ce03c8a 100644 (file)
@@ -15,4 +15,4 @@
 fn main()
 {
     let x = 3
-} //~ ERROR: expected one of `.`, `;`, or an operator, found `}`
+} //~ ERROR: expected one of `.`, `;`, `?`, or an operator, found `}`
diff --git a/src/test/parse-fail/issue-32501.rs b/src/test/parse-fail/issue-32501.rs
new file mode 100644 (file)
index 0000000..10df093
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+fn main() {
+    let a = 0;
+    let _b = 0;
+    let _ = 0;
+    let mut b = 0;
+    let mut _b = 0;
+    let mut _ = 0; //~ ERROR expected identifier, found `_`
+    //~^ NOTE `_` is a wildcard pattern, not an identifier
+}
index 84094ab6ca89cbc16a37e60eb3bd1f4fc4e1245b..cbc0ed0ccdb84760bb98e6f69f68d5e15c574806 100644 (file)
@@ -14,4 +14,4 @@ fn main() {
     foo! (
         bar, "baz", 1, 2.0
     } //~ ERROR incorrect close delimiter
-}
+} //~ ERROR unexpected close delimiter: `}`
index 5931631cceefcb7431bf0699544c4d54c7229f56..1c55d70f607379d294c88f6dd0a3e938fdb0fe6b 100644 (file)
@@ -12,6 +12,6 @@
 
 fn main() {
     assert_eq!(1, 2)
-    assert_eq!(3, 4) //~ ERROR expected one of `.`, `;`, `}`, or an operator, found `assert_eq`
+    assert_eq!(3, 4) //~ ERROR expected one of `.`, `;`, `?`, `}`, or an operator, found `assert_eq`
     println!("hello");
 }
index b99d0493ff7333b2f402829193a766a3ecc1f382..37b66601e7092d2f5519c59cad4c4247e720ac7c 100644 (file)
@@ -14,7 +14,7 @@ fn main() {
     let foo =
         match //~ NOTE did you mean to remove this `match` keyword?
         Some(4).unwrap_or_else(5)
-        ; //~ ERROR expected one of `.`, `{`, or an operator, found `;`
+        ; //~ ERROR expected one of `.`, `?`, `{`, or an operator, found `;`
 
     println!("{}", foo)
 }
index 63d9e94146993636681b802db585f1930b4bbc62..3466fc6e71ac0cf716ab83f514ae3df73a47a777 100644 (file)
@@ -13,5 +13,5 @@
 // http://phpsadness.com/sad/1
 
 fn main() {
-    ::; //~ ERROR expected ident, found `;`
+    ::; //~ ERROR expected identifier, found `;`
 }
index 198fa5b9aca0bf4009c238cc2ed2651278b86a15..83db24b8c01ef2258bac77f91c18e7e2e46ec3b6 100644 (file)
@@ -29,6 +29,4 @@ mod bleh {
     }
 }
 
-fn main() {
-    bleh::S.f();
-}
+fn main() {}
index 284cdb8c6531a8e855c4799a723e6e004a8ce5f2..95aa71b0cdfdbcd41c6bfc7225c2de917e77c233 100644 (file)
@@ -14,5 +14,5 @@
 
 pub fn main() {
     let r = 1..2..3;
-    //~^ ERROR expected one of `.`, `;`, or an operator, found `..`
+    //~^ ERROR expected one of `.`, `;`, `?`, or an operator, found `..`
 }
index 69898612771e71b4db5f92abafff96c9ae99118d..4500df116a2de5cb25c192707992481c523091b8 100644 (file)
@@ -14,5 +14,5 @@
 
 pub fn main() {
     let r = ..1..2;
-    //~^ ERROR expected one of `.`, `;`, or an operator, found `..`
+    //~^ ERROR expected one of `.`, `;`, `?`, or an operator, found `..`
 }
diff --git a/src/test/parse-fail/range_inclusive.rs b/src/test/parse-fail/range_inclusive.rs
new file mode 100644 (file)
index 0000000..ce97372
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Make sure that inclusive ranges with no end point don't parse.
+
+#![feature(inclusive_range_syntax, inclusive_range)]
+
+pub fn main() {
+    for _ in 1... {} //~ERROR inclusive range with no end
+                     //~^HELP bounded at the end
+}
+
diff --git a/src/test/parse-fail/range_inclusive_gate.rs b/src/test/parse-fail/range_inclusive_gate.rs
new file mode 100644 (file)
index 0000000..021b6dd
--- /dev/null
@@ -0,0 +1,74 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Make sure that #![feature(inclusive_range_syntax)] is required.
+
+// #![feature(inclusive_range_syntax, inclusive_range)]
+
+macro_rules! m {
+    () => { for _ in 1...10 {} } //~ ERROR inclusive range syntax is experimental
+}
+
+#[cfg(nope)]
+fn f() {}
+#[cfg(not(nope))]
+fn f() {
+    for _ in 1...10 {} //~ ERROR inclusive range syntax is experimental
+}
+
+#[cfg(nope)]
+macro_rules! n { () => {} }
+#[cfg(not(nope))]
+macro_rules! n {
+    () => { for _ in 1...10 {} } //~ ERROR inclusive range syntax is experimental
+}
+
+macro_rules! o {
+    () => {{
+        #[cfg(nope)]
+        fn g() {}
+        #[cfg(not(nope))]
+        fn g() {
+            for _ in 1...10 {} //~ ERROR inclusive range syntax is experimental
+        }
+
+        g();
+    }}
+}
+
+#[cfg(nope)]
+macro_rules! p { () => {} }
+#[cfg(not(nope))]
+macro_rules! p {
+    () => {{
+        #[cfg(nope)]
+        fn h() {}
+        #[cfg(not(nope))]
+        fn h() {
+            for _ in 1...10 {} //~ ERROR inclusive range syntax is experimental
+        }
+
+        h();
+    }}
+}
+
+pub fn main() {
+    for _ in 1...10 {} //~ ERROR inclusive range syntax is experimental
+    for _ in ...10 {} //~ ERROR inclusive range syntax is experimental
+
+    f(); // not allowed in cfg'ed functions
+
+    m!(); // not allowed in macros
+    n!(); // not allowed in cfg'ed macros
+    o!(); // not allowed in macros that output cfgs
+    p!(); // not allowed in cfg'ed macros that output cfgs
+}
+
+
index ce8960edde12f713cc4a4974d0923177da08f7f4..5c09f68970bc4b462801ae5103ea761da4f64b73 100644 (file)
@@ -12,5 +12,5 @@
 
 static s: &'static str =
     r#"
-      "## //~ ERROR expected one of `.`, `;`, or an operator, found `#`
+      "## //~ ERROR expected one of `.`, `;`, `?`, or an operator, found `#`
 ;
index ab9ff7ac19e5e42fa9d9ca72ecef2ff07a1b9071..301bd0e8b1c9c08e94179a05761d0e1e8d6ccf0e 100644 (file)
@@ -13,5 +13,5 @@
 fn f() {
     let v = [mut 1, 2, 3, 4];
     //~^  ERROR expected identifier, found keyword `mut`
-    //~^^ ERROR expected one of `!`, `,`, `.`, `::`, `;`, `]`, `{`, or an operator, found `1`
+    //~^^ ERROR expected one of `!`, `,`, `.`, `::`, `;`, `?`, `]`, `{`, or an operator, found `1`
 }
index 7b6caad86b6ccc0b6005dc41c869648a065650ec..b4c25a75c9086b4f2ef118c8038097fb7fe7a609 100644 (file)
@@ -15,4 +15,4 @@ struct S;
 impl S {
     static fn f() {}
 }
-//~^^ ERROR expected one of `const`, `extern`, `fn`, `pub`, `type`, `unsafe`, or `}`, found `static`
+//~^^ ERROR expected one of `const`, `default`, `extern`, `fn`, `pub`, `type`, `unsafe`, or `}`
index ea686aeb6e05aafa1c13973651f3ad689d58a565..2f637cf0b4e7633d63a1eaf8dd5546a3f6ce576d 100644 (file)
@@ -13,5 +13,5 @@
 fn f() {
     let a_box = box mut 42;
     //~^  ERROR expected identifier, found keyword `mut`
-    //~^^ ERROR expected one of `!`, `.`, `::`, `;`, `{`, or an operator, found `42`
+    //~^^ ERROR expected one of `!`, `.`, `::`, `;`, `?`, `{`, or an operator, found `42`
 }
index e9de52c013b3a8ba04912347d5ae7cbc011bd459..156b172a94402c695f07333943459416757e5cda 100644 (file)
@@ -18,5 +18,5 @@ fn removed_with() {
 
     let a = S { foo: (), bar: () };
     let b = S { foo: () with a };
-    //~^ ERROR expected one of `,`, `.`, `}`, or an operator, found `with`
+    //~^ ERROR expected one of `,`, `.`, `?`, `}`, or an operator, found `with`
 }
index e57298f72804af423ba7a55a3c6e5e9d91529934..930984555600be3e094cf17c15d25ce95e6bc81c 100644 (file)
@@ -23,7 +23,7 @@ impl Foo {
 fn main() {
     for x in Foo {
         x: 3    //~ ERROR expected type, found `3`
-    }.hi() { //~ ERROR expected one of `.`, `;`, `}`, or an operator, found `{`
+    }.hi() { //~ ERROR expected one of `.`, `;`, `?`, `}`, or an operator, found `{`
         println!("yo");
     }
 }
index 6bf41b7a450aff70a1c6492c4ce331b7c82e3b5d..db6a360a5675a46334df05afb095e4367da1df73 100644 (file)
@@ -23,7 +23,7 @@ impl Foo {
 fn main() {
     if Foo {
         x: 3    //~ ERROR expected type, found `3`
-    }.hi() { //~ ERROR expected one of `.`, `;`, `}`, or an operator, found `{`
+    }.hi() { //~ ERROR expected one of `.`, `;`, `?`, `}`, or an operator, found `{`
         println!("yo");
     }
 }
index 679f4542824fbd0ff4155e7429add1bd28daac80..7038cc798c41542580e4599dc03246ce521e8f6e 100644 (file)
@@ -20,6 +20,6 @@ fn main() {
     } {
         Foo {
             x: x
-        } => {} //~ ERROR expected one of `.`, `;`, `}`, or an operator, found `=>`
+        } => {} //~ ERROR expected one of `.`, `;`, `?`, `}`, or an operator, found `=>`
     }
 }
index b388aac2c54875400a3ea4b0e35c770ec43c2fd5..75e4eb3de070bc3315912a28c2ee6486b08c8cc8 100644 (file)
@@ -23,7 +23,7 @@ impl Foo {
 fn main() {
     while Foo {
         x: 3    //~ ERROR expected type, found `3`
-    }.hi() { //~ ERROR expected one of `.`, `;`, `}`, or an operator, found `{`
+    }.hi() { //~ ERROR expected one of `.`, `;`, `?`, `}`, or an operator, found `{`
         println!("yo");
     }
 }
diff --git a/src/test/parse-fail/struct-variant-no-pub.rs b/src/test/parse-fail/struct-variant-no-pub.rs
deleted file mode 100644 (file)
index 1824e32..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags: -Z parse-only
-
-enum Foo {
-    Bar {
-        pub a: isize //~ ERROR: `pub` is not allowed here
-    }
-}
-
-fn main() {}
index ad2710e91e2678c2b001542de0cef75667705e10..58564dc8621dd4d967a4e5b8c1f068d9e134e9b5 100644 (file)
@@ -24,7 +24,7 @@ fn bar() {
     let b = Box::Bar::<isize,usize>::new(); // OK
 
     let b = Box::Bar::()::new();
-    //~^ ERROR expected ident, found `(`
+    //~^ ERROR expected identifier, found `(`
 }
 
 fn main() { }
index 835f7fc96c63fb5e9c6ef6976b32a63daab8a945..6ce534d52b88356d119869bf537b41bc7b266410 100644 (file)
@@ -39,42 +39,42 @@ pub fn bar() {
 
 
     ((::std::fmt::format as
-         fn(core::fmt::Arguments<'_>) -> collections::string::String {collections::fmt::format})(((::std::fmt::Arguments::new_v1
-                                                                                                      as
-                                                                                                      fn(&[&str], &[core::fmt::ArgumentV1<'_>]) -> core::fmt::Arguments<'_> {core::fmt::Arguments<'a>::new_v1})(({
-                                                                                                                                                                                                                     static __STATIC_FMTSTR:
-                                                                                                                                                                                                                            &'static [&'static str]
-                                                                                                                                                                                                                            =
-                                                                                                                                                                                                                         (&([("test"
-                                                                                                                                                                                                                                 as
-                                                                                                                                                                                                                                 &'static str)]
-                                                                                                                                                                                                                               as
-                                                                                                                                                                                                                               [&'static str; 1])
-                                                                                                                                                                                                                             as
-                                                                                                                                                                                                                             &'static [&'static str; 1]);
-                                                                                                                                                                                                                     (__STATIC_FMTSTR
-                                                                                                                                                                                                                         as
-                                                                                                                                                                                                                         &'static [&'static str])
-                                                                                                                                                                                                                 }
-                                                                                                                                                                                                                    as
-                                                                                                                                                                                                                    &[&str]),
-                                                                                                                                                                                                                (&(match (()
-                                                                                                                                                                                                                             as
-                                                                                                                                                                                                                             ())
-                                                                                                                                                                                                                       {
-                                                                                                                                                                                                                       ()
-                                                                                                                                                                                                                       =>
-                                                                                                                                                                                                                       ([]
-                                                                                                                                                                                                                           as
-                                                                                                                                                                                                                           [core::fmt::ArgumentV1<'_>; 0]),
-                                                                                                                                                                                                                   }
-                                                                                                                                                                                                                      as
-                                                                                                                                                                                                                      [core::fmt::ArgumentV1<'_>; 0])
-                                                                                                                                                                                                                    as
-                                                                                                                                                                                                                    &[core::fmt::ArgumentV1<'_>; 0]))
-                                                                                                     as
-                                                                                                     core::fmt::Arguments<'_>))
-        as collections::string::String);
+         fn(std::fmt::Arguments<'_>) -> std::string::String {std::fmt::format})(((::std::fmt::Arguments::new_v1
+                                                                                     as
+                                                                                     fn(&[&str], &[std::fmt::ArgumentV1<'_>]) -> std::fmt::Arguments<'_> {std::fmt::Arguments<'_>::new_v1})(({
+                                                                                                                                                                                                 static __STATIC_FMTSTR:
+                                                                                                                                                                                                        &'static [&'static str]
+                                                                                                                                                                                                        =
+                                                                                                                                                                                                     (&([("test"
+                                                                                                                                                                                                             as
+                                                                                                                                                                                                             &'static str)]
+                                                                                                                                                                                                           as
+                                                                                                                                                                                                           [&'static str; 1])
+                                                                                                                                                                                                         as
+                                                                                                                                                                                                         &'static [&'static str; 1]);
+                                                                                                                                                                                                 (__STATIC_FMTSTR
+                                                                                                                                                                                                     as
+                                                                                                                                                                                                     &'static [&'static str])
+                                                                                                                                                                                             }
+                                                                                                                                                                                                as
+                                                                                                                                                                                                &[&str]),
+                                                                                                                                                                                            (&(match (()
+                                                                                                                                                                                                         as
+                                                                                                                                                                                                         ())
+                                                                                                                                                                                                   {
+                                                                                                                                                                                                   ()
+                                                                                                                                                                                                   =>
+                                                                                                                                                                                                   ([]
+                                                                                                                                                                                                       as
+                                                                                                                                                                                                       [std::fmt::ArgumentV1<'_>; 0]),
+                                                                                                                                                                                               }
+                                                                                                                                                                                                  as
+                                                                                                                                                                                                  [std::fmt::ArgumentV1<'_>; 0])
+                                                                                                                                                                                                as
+                                                                                                                                                                                                &[std::fmt::ArgumentV1<'_>; 0]))
+                                                                                    as
+                                                                                    std::fmt::Arguments<'_>))
+        as std::string::String);
 }
 pub type Foo = [i32; (3 as usize)];
 pub struct Bar {
@@ -86,8 +86,10 @@ pub fn id<T>(x: T) -> T { (x as T) }
 pub fn use_id() {
     let _ =
         ((id::<[i32; (3 as usize)]> as
-             fn([i32; 3]) -> [i32; 3] {id})(([(1 as i32), (2 as i32),
-                                              (3 as i32)] as [i32; 3])) as
+             fn([i32; 3]) -> [i32; 3] {id::<[i32; 3]>})(([(1 as i32),
+                                                          (2 as i32),
+                                                          (3 as i32)] as
+                                                            [i32; 3])) as
             [i32; 3]);
 }
 fn main() { }
diff --git a/src/test/pretty/top-level-doc-comments.rs b/src/test/pretty/top-level-doc-comments.rs
new file mode 100644 (file)
index 0000000..59c7f0f
--- /dev/null
@@ -0,0 +1,20 @@
+/// Some doc comment.
+struct X;
+
+// ignore-license
+
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// pp-exact
+
+// Test that rust can properly pretty print a doc comment if it's the first line in a file.  some
+
+fn main() { let x = X; }
index de69b7b9fa6705ed03afd7c07abd96ef49415334..d3817b25d6100f6d364ec3515c20102ad0e7b665 100644 (file)
@@ -8,7 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:attempted to divide by zero
+
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let y = 0;
     let _z = 1 / y;
diff --git a/src/test/run-fail/meta-revision-bad.rs b/src/test/run-fail/meta-revision-bad.rs
new file mode 100644 (file)
index 0000000..bf521d4
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Meta test for compiletest: check that when we give the wrong error
+// patterns, the test fails.
+
+// revisions: foo bar
+// should-fail
+//[foo] error-pattern:bar
+//[bar] error-pattern:foo
+
+#[cfg(foo)] fn die() {panic!("foo");}
+#[cfg(bar)] fn die() {panic!("bar");}
+
+fn main() { die(); }
diff --git a/src/test/run-fail/meta-revision-ok.rs b/src/test/run-fail/meta-revision-ok.rs
new file mode 100644 (file)
index 0000000..f74ec39
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Meta test for compiletest: check that when we give the right error
+// patterns, the test passes. See all `meta-revision-bad.rs`.
+
+// revisions: foo bar
+//[foo] error-pattern:foo
+//[bar] error-pattern:bar
+
+#[cfg(foo)] fn die() {panic!("foo");}
+#[cfg(bar)] fn die() {panic!("bar");}
+
+fn main() { die(); }
diff --git a/src/test/run-fail/mir_dynamic_drops_1.rs b/src/test/run-fail/mir_dynamic_drops_1.rs
new file mode 100644 (file)
index 0000000..590b9fb
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(rustc_attrs)]
+// error-pattern:drop 1
+// error-pattern:drop 2
+use std::io::{self, Write};
+
+
+/// Structure which will not allow to be dropped twice.
+struct Droppable<'a>(&'a mut bool, u32);
+impl<'a> Drop for Droppable<'a> {
+    fn drop(&mut self) {
+        if *self.0 {
+            writeln!(io::stderr(), "{} dropped twice", self.1);
+            ::std::process::exit(1);
+        }
+        writeln!(io::stderr(), "drop {}", self.1);
+        *self.0 = true;
+    }
+}
+
+#[rustc_mir]
+fn mir(){
+    let (mut xv, mut yv) = (false, false);
+    let x = Droppable(&mut xv, 1);
+    let y = Droppable(&mut yv, 2);
+    let mut z = x;
+    let k = y;
+    z = k;
+}
+
+fn main() {
+    mir();
+    panic!();
+}
diff --git a/src/test/run-fail/mir_dynamic_drops_2.rs b/src/test/run-fail/mir_dynamic_drops_2.rs
new file mode 100644 (file)
index 0000000..eafd3d3
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(rustc_attrs)]
+// error-pattern:drop 1
+use std::io::{self, Write};
+
+
+/// Structure which will not allow to be dropped twice.
+struct Droppable<'a>(&'a mut bool, u32);
+impl<'a> Drop for Droppable<'a> {
+    fn drop(&mut self) {
+        if *self.0 {
+            writeln!(io::stderr(), "{} dropped twice", self.1);
+            ::std::process::exit(1);
+        }
+        writeln!(io::stderr(), "drop {}", self.1);
+        *self.0 = true;
+    }
+}
+
+#[rustc_mir]
+fn mir<'a>(d: Droppable<'a>){
+    loop {
+        let x = d;
+        break;
+    }
+}
+
+fn main() {
+    let mut xv = false;
+    mir(Droppable(&mut xv, 1));
+    panic!();
+}
diff --git a/src/test/run-fail/mir_dynamic_drops_3.rs b/src/test/run-fail/mir_dynamic_drops_3.rs
new file mode 100644 (file)
index 0000000..730d9c8
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(rustc_attrs)]
+// error-pattern:unwind happens
+// error-pattern:drop 3
+// error-pattern:drop 2
+// error-pattern:drop 1
+use std::io::{self, Write};
+
+
+/// Structure which will not allow to be dropped twice.
+struct Droppable<'a>(&'a mut bool, u32);
+impl<'a> Drop for Droppable<'a> {
+    fn drop(&mut self) {
+        if *self.0 {
+            writeln!(io::stderr(), "{} dropped twice", self.1);
+            ::std::process::exit(1);
+        }
+        writeln!(io::stderr(), "drop {}", self.1);
+        *self.0 = true;
+    }
+}
+
+fn may_panic<'a>() -> Droppable<'a> {
+    panic!("unwind happens");
+}
+
+#[rustc_mir]
+fn mir<'a>(d: Droppable<'a>){
+    let (mut a, mut b) = (false, false);
+    let y = Droppable(&mut a, 2);
+    let x = [Droppable(&mut b, 1), y, d, may_panic()];
+}
+
+fn main() {
+    let mut c = false;
+    mir(Droppable(&mut c, 3));
+}
index 76d4de7ecb03ce907b987c4e16e705a4dc36373e..7a151c8c572f6dfe7db13bf5576f34e1c9a1acb5 100644 (file)
@@ -8,7 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:attempted remainder with a divisor of zero
+
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let y = 0;
     let _z = 1 % y;
index 6c6a41fa6f2e3e6deaa21b8e62bcb2faa94b8959..c989cc594536ba11c02a1a65a218eef1dc01929c 100644 (file)
@@ -8,10 +8,14 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
 // compile-flags: -C debug-assertions
 
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _x = 200u8 + 200u8 + 200u8;
 }
index 62935bacce8711f90b198d14fc5b7b387f1f53bc..a27210112982a6a76f932f21264f2d757ce8efe8 100644 (file)
@@ -8,11 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
 #![warn(exceeding_bitshifts)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _x = 1_i32 << 32;
 }
index f6e6cb105c51bd797fa12c0ae5cef393e8fe726d..fe0bcc5b98545f62ec02fae195cd229d648a4437 100644 (file)
@@ -8,11 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
 #![warn(exceeding_bitshifts)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _x = 1 << -1;
 }
index a70f31954c6edaf65d7331f6eff2199679f1d4d5..aac220d32d9ce4dfa51e12670fb866914d31d5b7 100644 (file)
@@ -8,11 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
 #![warn(exceeding_bitshifts)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _x = 1_u64 << 64;
 }
index 571feaeb94345f70b2de1103fa3db812f922a65b..7e8b266da49bee83dcbd81e26ba980107861a028 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
@@ -16,6 +18,8 @@
 
 #![warn(exceeding_bitshifts)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     // this signals overflow when checking is on
     let x = 1_i8 << 17;
index a413a6f0abfa29d489fdac600e5f05a6cace1c2d..8cba700bbf9a3e0b59faa56765dadaf25407a4b4 100644 (file)
@@ -8,9 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
 // compile-flags: -C debug-assertions
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let x = 200u8 * 4;
 }
index 7891d1ce9bed4931d2bd8fb7cd6e2256b5a61115..2d9d746bef324517277816ca0a88064601a8ea48 100644 (file)
@@ -8,9 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'attempted to negate with overflow'
 // compile-flags: -C debug-assertions
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _x = -std::i8::MIN;
 }
index b58eaf7f836c2c2e1d38ad48d16159ef9c7f8059..63c808dc80a4ecebe1b0caa986d2f519d70d0b57 100644 (file)
@@ -8,11 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
 #![warn(exceeding_bitshifts)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _x = -1_i32 >> 32;
 }
index 40b468a6ad41974d4b50b5c943b48e7d21eeefa2..8b89e57c85bb5e33e254d83c4ae244e271dc4347 100644 (file)
@@ -8,11 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
 #![warn(exceeding_bitshifts)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _x = -1_i32 >> -1;
 }
index afe6a908cb5f13587dee7d6d6dd564e1081009fd..8874587064c35ec26f45bf7cc1e4ebdc09f565ba 100644 (file)
@@ -8,11 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
 #![warn(exceeding_bitshifts)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _x = -1_i64 >> 64;
 }
index 585186575f6eda614bd4631861624bd1205e5387..d74fd8a6b8e417abcd3d87c568668898dbea34c0 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
@@ -16,6 +18,8 @@
 
 #![warn(exceeding_bitshifts)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     // this signals overflow when checking is on
     let x = 2_i8 >> 17;
index 34a7ff833bbbdebb25f7e87d212d3c595da79858..249b952a5dca2643fec1d751066561c8327a3e64 100644 (file)
@@ -8,11 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
 #![warn(exceeding_bitshifts)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _n = 1i64 >> [64][0];
 }
index b6f4348b184de6829fa470a71fcb64ea7b63118f..1227f35444a60399a1f60125f405c1e3079fbf6c 100644 (file)
@@ -8,12 +8,16 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'shift operation overflowed'
 // compile-flags: -C debug-assertions
 
 #![warn(exceeding_bitshifts)]
 #![feature(const_indexing)]
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _n = 1i64 >> [64][0];
 }
index ece4d37c36eb31171ca1fa40009cb51f6893e0e5..ce243a50e0b66f2065b84c038af2279041d09404 100644 (file)
@@ -8,9 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
 // compile-flags: -C debug-assertions
 
+#![feature(rustc_attrs)]
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
     let _x = 42u8 - (42u8 + 1);
 }
diff --git a/src/test/run-make/a-b-a-linker-guard/Makefile b/src/test/run-make/a-b-a-linker-guard/Makefile
new file mode 100644 (file)
index 0000000..0962ebf
--- /dev/null
@@ -0,0 +1,12 @@
+-include ../tools.mk
+
+# Test that if we build `b` against a version of `a` that has one set
+# of types, it will not run with a dylib that has a different set of
+# types.
+
+all:
+       $(RUSTC) a.rs --cfg x -C prefer-dynamic
+       $(RUSTC) b.rs -C prefer-dynamic
+       $(call RUN,b)
+       $(RUSTC) a.rs --cfg y -C prefer-dynamic
+       $(call FAIL,b)
diff --git a/src/test/run-make/a-b-a-linker-guard/a.rs b/src/test/run-make/a-b-a-linker-guard/a.rs
new file mode 100644 (file)
index 0000000..e6cbe2e
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name = "a"]
+#![crate_type = "dylib"]
+
+#[cfg(x)]
+pub fn foo(x: u32) { }
+
+#[cfg(y)]
+pub fn foo(x: i32) { }
+
+
diff --git a/src/test/run-make/a-b-a-linker-guard/b.rs b/src/test/run-make/a-b-a-linker-guard/b.rs
new file mode 100644 (file)
index 0000000..89fd48d
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name = "b"]
+
+extern crate a;
+
+fn main() {
+    a::foo(22_u32);
+}
index a0ccc75afd05b5c6ea9f0029dc69049abd7d444c..fb54161c2c127db8dafd85cd069a9905bc173a89 100644 (file)
@@ -8,6 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![feature(rustc_attrs)]
 #![deny(warnings)]
 
 use std::env;
@@ -36,6 +37,7 @@ fn debug_assert() {
 }
 
 fn overflow() {
+    #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
     fn add(a: u8, b: u8) -> u8 { a + b }
 
     add(200u8, 200u8);
index 13cbdfe24d6f33f3a04145b80e885f10f1580ab3..12cc475f121f6fbe0608ee73c8e8c9e0fae53733 100644 (file)
 extern crate libc;
 extern crate rustc;
 extern crate rustc_driver;
-extern crate rustc_front;
 extern crate rustc_lint;
+extern crate rustc_llvm as llvm;
 extern crate rustc_metadata;
 extern crate rustc_resolve;
-extern crate syntax;
+#[macro_use] extern crate syntax;
 
 use std::ffi::{CStr, CString};
 use std::mem::transmute;
@@ -27,14 +27,13 @@ use std::rc::Rc;
 use std::thread::Builder;
 
 use rustc::dep_graph::DepGraph;
-use rustc::front::map as ast_map;
-use rustc::llvm;
+use rustc::hir::map as ast_map;
 use rustc::middle::cstore::{CrateStore, LinkagePreference};
-use rustc::middle::ty;
+use rustc::ty;
 use rustc::session::config::{self, basic_options, build_configuration, Input, Options};
 use rustc::session::build_session;
 use rustc_driver::{driver, abort_on_err};
-use rustc_front::lowering::{lower_crate, LoweringContext};
+use rustc::hir::lowering::{lower_crate, LoweringContext};
 use rustc_resolve::MakeGlobMap;
 use rustc_metadata::cstore::CStore;
 use libc::c_void;
@@ -216,7 +215,10 @@ fn build_exec_options(sysroot: PathBuf) -> Options {
 /// for crates used in the given input.
 fn compile_program(input: &str, sysroot: PathBuf)
                    -> Option<(llvm::ModuleRef, Vec<PathBuf>)> {
-    let input = Input::Str(input.to_string());
+    let input = Input::Str {
+        name: driver::anon_src(),
+        input: input.to_string(),
+    };
     let thread = Builder::new().name("compile_program".to_string());
 
     let handle = thread.spawn(move || {
@@ -230,14 +232,14 @@ fn compile_program(input: &str, sysroot: PathBuf)
 
         let id = "input".to_string();
 
-        let krate = driver::phase_1_parse_input(&sess, cfg, &input);
+        let krate = panictry!(driver::phase_1_parse_input(&sess, cfg, &input));
 
         let krate = driver::phase_2_configure_and_expand(&sess, &cstore, krate, &id, None)
             .expect("phase_2 returned `None`");
 
         let krate = driver::assign_node_ids(&sess, krate);
         let lcx = LoweringContext::new(&sess, Some(&krate));
-        let dep_graph = DepGraph::new(sess.opts.build_dep_graph);
+        let dep_graph = DepGraph::new(sess.opts.build_dep_graph());
         let mut hir_forest = ast_map::Forest::new(lower_crate(&lcx, &krate), dep_graph);
         let arenas = ty::CtxtArenas::new();
         let ast_map = driver::make_map(&sess, &mut hir_forest);
index b53a870e2001cb0129151fa59b4c6a07209fd608..a1bd1041d145eaa61e3090e4f79979770d0d5df5 100644 (file)
@@ -8,9 +8,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(dynamic_lib)]
+#![feature(rustc_private)]
 
-use std::dynamic_lib::DynamicLibrary;
+extern crate rustc_back;
+
+use rustc_back::dynamic_lib::DynamicLibrary;
 use std::path::Path;
 
 pub fn main() {
index 110db9f068dae5d04a1faf8500c8ddd30dbd5b97..7d063a4c83cba1cd929bfa4906570bd1f5f5cac9 100644 (file)
@@ -1,5 +1,5 @@
 -include ../tools.mk
 
 all:
-       $(RUSTC) libc.rs
+       $(RUSTC) libc.rs -Cmetadata=foo
        $(RUSTC) main.rs --extern libc=$(TMPDIR)/liblibc.rlib
index 43ae356feede66dbac5a6c19d6bcf29b51f26000..80c06ca3274f245af1bd718ae3ae7edfc31d0782 100644 (file)
@@ -18,7 +18,7 @@ extern crate syntax;
 
 use rustc::session::{build_session, Session};
 use rustc::session::config::{basic_options, build_configuration, Input, OutputType};
-use rustc_driver::driver::{compile_input, CompileController};
+use rustc_driver::driver::{compile_input, CompileController, anon_src};
 use rustc_metadata::cstore::CStore;
 use syntax::diagnostics::registry::Registry;
 use syntax::parse::token;
@@ -67,7 +67,7 @@ fn compile(code: String, output: PathBuf, sysroot: PathBuf) {
 
     compile_input(&sess, &cstore,
             cfg,
-            &Input::Str(code),
+            &Input::Str { name: anon_src(), input: code },
             &None,
             &Some(output),
             None,
index 5736b1730216d4bd87775c16036a7b687cc35076..ab85d2bb13fb16b9519475a1a8aadbf86f5161b0 100644 (file)
@@ -2,15 +2,10 @@
 #include <stddef.h>
 #include <stdint.h>
 
-struct ByteSlice {
-        uint8_t *data;
-        size_t len;
-};
-
-size_t slice_len(struct ByteSlice bs) {
-        return bs.len;
+size_t slice_len(uint8_t *data, size_t len) {
+    return len;
 }
 
-uint8_t slice_elem(struct ByteSlice bs, size_t idx) {
-        return bs.data[idx];
+uint8_t slice_elem(uint8_t *data, size_t len, size_t idx) {
+    return data[idx];
 }
index 10c789d20c0bae4a7ed932e67be3ae7b519c922c..66aa78d538637d0d9e8476b47557f2cf5fffa8af 100644 (file)
@@ -1,16 +1,18 @@
 -include ../tools.mk
 
+OUT := $(TMPDIR)/out
+
 ifndef IS_WINDOWS
 all: time
 
 time: libc
-       mkdir -p out/time out/time/deps
-       ln -sf out/libc/liblibc.rlib out/time/deps/
-       $(RUSTC) in/time/lib.rs -Ldependency=out/time/deps/
+       mkdir -p $(OUT)/time $(OUT)/time/deps
+       ln -sf $(OUT)/libc/liblibc.rlib $(OUT)/time/deps/
+       $(RUSTC) in/time/lib.rs -Ldependency=$(OUT)/time/deps/
 
 libc:
-       mkdir -p out/libc
-       $(RUSTC) in/libc/lib.rs --crate-name=libc -o out/libc/liblibc.rlib
+       mkdir -p $(OUT)/libc
+       $(RUSTC) in/libc/lib.rs --crate-name=libc -Cmetadata=foo -o $(OUT)/libc/liblibc.rlib
 else
 all:
 endif
index e6701224ccb6bb331fbed5bcdf0c8df1477cef8a..bb73fda67bddb962e79361d3be5ed95030779a20 100644 (file)
@@ -1,8 +1,10 @@
 -include ../tools.mk
 
+LOG := $(TMPDIR)/foo.log
+
 all:
        cp foo.rs $(TMPDIR)
        cd $(TMPDIR)
-       -$(RUSTC) -Z unstable-options --error-format=json foo.rs 2>foo.log
-       grep -q '{"message":"unresolved name `y`","code":{"code":"E0425","explanation":"\\nAn unresolved name was used. Example of erroneous codes.*"},"level":"error","spans":\[{"file_name":"foo.rs","byte_start":496,"byte_end":497,"line_start":12,"line_end":12,"column_start":18,"column_end":19}\],"children":\[\]}' foo.log
-       grep -q '{"message":".*","code":{"code":"E0277","explanation":"\\nYou tried.*"},"level":"error","spans":\[{.*}\],"children":\[{"message":"the .*","code":null,"level":"help","spans":\[{"file_name":"foo.rs","byte_start":504,"byte_end":516,"line_start":14,"line_end":14,"column_start":0,"column_end":0}\],"children":\[\]},{"message":"  <u8 as core::ops::Add>","code":null,"level":"help",' foo.log
+       -$(RUSTC) -Z unstable-options --error-format=json foo.rs 2>$(LOG)
+       grep -q '{"message":"unresolved name `y`","code":{"code":"E0425","explanation":"\\nAn unresolved name was used. Example of erroneous codes.*"},"level":"error","spans":\[{"file_name":"foo.rs","byte_start":496,"byte_end":497,"line_start":12,"line_end":12,"column_start":18,"column_end":19,"text":\[{"text":"    let x = 42 + y;","highlight_start":18,"highlight_end":19}\]}\],"children":\[\]}' $(LOG)
+       grep -q '{"message":".*","code":{"code":"E0277","explanation":"\\nYou tried.*"},"level":"error","spans":\[{.*}\],"children":\[{"message":"the .*","code":null,"level":"help","spans":\[{"file_name":"foo.rs","byte_start":504,"byte_end":516,"line_start":14,"line_end":14,"column_start":0,"column_end":0,"text":\[{.*}\]}\],"children":\[\]},{"message":"  <u8 as std::ops::Add>","code":null,"level":"help",' $(LOG)
index 4879fbfe6d34346172d9c0d75a1111829a9cfeaf..696e0544fdc5793003c11493f059864c632f9040 100644 (file)
@@ -13,4 +13,4 @@
 
 
 fn foo_method(&self) -> &'static str { return "i am very similar to foo."; }
-/* nest::S::foo_method */
+/* nest::{{impl}}::foo_method */
index b22f34fa35b54af8b262159d26d461e6dc301db6..485ecbb4b5a59a60b0cad78692d977374face91d 100644 (file)
@@ -7,8 +7,7 @@ all: others
        $(RUSTC) -C relocation-model=default foo.rs
        $(call RUN,foo)
 
-       $(RUSTC) -C relocation-model=default --crate-type=dylib foo.rs
-       $(RUSTC) -C relocation-model=dynamic-no-pic --crate-type=dylib foo.rs
+       $(RUSTC) -C relocation-model=dynamic-no-pic --crate-type=dylib foo.rs --emit=link,obj
 
 ifdef IS_MSVC
 # FIXME(#28026)
@@ -17,5 +16,4 @@ else
 others:
        $(RUSTC) -C relocation-model=static foo.rs
        $(call RUN,foo)
-       $(RUSTC) -C relocation-model=static --crate-type=dylib foo.rs
 endif
diff --git a/src/test/run-make/reproducible-build/Makefile b/src/test/run-make/reproducible-build/Makefile
new file mode 100644 (file)
index 0000000..8e799ca
--- /dev/null
@@ -0,0 +1,20 @@
+-include ../tools.mk
+all:
+       $(RUSTC) reproducible-build-aux.rs
+       $(RUSTC) reproducible-build.rs -o"$(TMPDIR)/reproducible-build1"
+       $(RUSTC) reproducible-build.rs -o"$(TMPDIR)/reproducible-build2"
+       nm "$(TMPDIR)/reproducible-build1" | sort > "$(TMPDIR)/reproducible-build1.nm"
+       nm "$(TMPDIR)/reproducible-build2" | sort > "$(TMPDIR)/reproducible-build2.nm"
+       cmp "$(TMPDIR)/reproducible-build1.nm" "$(TMPDIR)/reproducible-build2.nm" || exit 1
+       $(RUSTC) reproducible-build-aux.rs -g
+       $(RUSTC) reproducible-build.rs -g -o"$(TMPDIR)/reproducible-build1-debug"
+       $(RUSTC) reproducible-build.rs -g -o"$(TMPDIR)/reproducible-build2-debug"
+       nm "$(TMPDIR)/reproducible-build1-debug" | sort > "$(TMPDIR)/reproducible-build1-debug.nm"
+       nm "$(TMPDIR)/reproducible-build2-debug" | sort > "$(TMPDIR)/reproducible-build2-debug.nm"
+       cmp "$(TMPDIR)/reproducible-build1-debug.nm" "$(TMPDIR)/reproducible-build2-debug.nm" || exit 1
+       $(RUSTC) reproducible-build-aux.rs -O
+       $(RUSTC) reproducible-build.rs -O -o"$(TMPDIR)/reproducible-build1-opt"
+       $(RUSTC) reproducible-build.rs -O -o"$(TMPDIR)/reproducible-build2-opt"
+       nm "$(TMPDIR)/reproducible-build1-opt" | sort > "$(TMPDIR)/reproducible-build1-opt.nm"
+       nm "$(TMPDIR)/reproducible-build2-opt" | sort > "$(TMPDIR)/reproducible-build2-opt.nm"
+       cmp "$(TMPDIR)/reproducible-build1-opt.nm" "$(TMPDIR)/reproducible-build2-opt.nm" || exit 1
diff --git a/src/test/run-make/reproducible-build/reproducible-build-aux.rs b/src/test/run-make/reproducible-build/reproducible-build-aux.rs
new file mode 100644 (file)
index 0000000..9ef853e
--- /dev/null
@@ -0,0 +1,38 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type="lib"]
+
+pub static STATIC: i32 = 1234;
+
+pub struct Struct<T1, T2> {
+    _t1: std::marker::PhantomData<T1>,
+    _t2: std::marker::PhantomData<T2>,
+}
+
+pub fn regular_fn(_: i32) {}
+
+pub fn generic_fn<T1, T2>() {}
+
+impl<T1, T2> Drop for Struct<T1, T2> {
+    fn drop(&mut self) {}
+}
+
+pub enum Enum {
+    Variant1,
+    Variant2(u32),
+    Variant3 { x: u32 }
+}
+
+pub struct TupleStruct(pub i8, pub i16, pub i32, pub i64);
+
+pub trait Trait<T1, T2> {
+    fn foo(&self);
+}
diff --git a/src/test/run-make/reproducible-build/reproducible-build.rs b/src/test/run-make/reproducible-build/reproducible-build.rs
new file mode 100644 (file)
index 0000000..dc7c702
--- /dev/null
@@ -0,0 +1,128 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test case makes sure that two identical invocations of the compiler
+// (i.e. same code base, same compile-flags, same compiler-versions, etc.)
+// produce the same output. In the past, symbol names of monomorphized functions
+// were not deterministic (which we want to avoid).
+//
+// The test tries to exercise as many different paths into symbol name
+// generation as possible:
+//
+// - regular functions
+// - generic functions
+// - methods
+// - statics
+// - closures
+// - enum variant constructors
+// - tuple struct constructors
+// - drop glue
+// - FnOnce adapters
+// - Trait object shims
+// - Fn Pointer shims
+
+#![allow(dead_code)]
+
+extern crate reproducible_build_aux;
+
+static STATIC: i32 = 1234;
+
+pub struct Struct<T1, T2> {
+    x: T1,
+    y: T2,
+}
+
+fn regular_fn(_: i32) {}
+
+fn generic_fn<T1, T2>() {}
+
+impl<T1, T2> Drop for Struct<T1, T2> {
+    fn drop(&mut self) {}
+}
+
+pub enum Enum {
+    Variant1,
+    Variant2(u32),
+    Variant3 { x: u32 }
+}
+
+struct TupleStruct(i8, i16, i32, i64);
+
+impl TupleStruct {
+    pub fn bar(&self) {}
+}
+
+trait Trait<T1, T2> {
+    fn foo(&self);
+}
+
+impl Trait<i32, u64> for u64 {
+    fn foo(&self) {}
+}
+
+impl reproducible_build_aux::Trait<char, String> for TupleStruct {
+    fn foo(&self) {}
+}
+
+fn main() {
+    regular_fn(STATIC);
+    generic_fn::<u32, char>();
+    generic_fn::<char, Struct<u32, u64>>();
+    generic_fn::<Struct<u64, u32>, reproducible_build_aux::Struct<u32, u64>>();
+
+    let dropped = Struct {
+        x: "",
+        y: 'a',
+    };
+
+    let _ = Enum::Variant1;
+    let _ = Enum::Variant2(0);
+    let _ = Enum::Variant3 { x: 0 };
+    let _ = TupleStruct(1, 2, 3, 4);
+
+    let closure  = |x| {
+        x + 1i32
+    };
+
+    fn inner<F: Fn(i32) -> i32>(f: F) -> i32 {
+        f(STATIC)
+    }
+
+    println!("{}", inner(closure));
+
+    let object_shim: &Trait<i32, u64> = &0u64;
+    object_shim.foo();
+
+    fn with_fn_once_adapter<F: FnOnce(i32)>(f: F) {
+        f(0);
+    }
+
+    with_fn_once_adapter(|_:i32| { });
+
+    reproducible_build_aux::regular_fn(STATIC);
+    reproducible_build_aux::generic_fn::<u32, char>();
+    reproducible_build_aux::generic_fn::<char, Struct<u32, u64>>();
+    reproducible_build_aux::generic_fn::<Struct<u64, u32>,
+                                         reproducible_build_aux::Struct<u32, u64>>();
+
+    let _ = reproducible_build_aux::Enum::Variant1;
+    let _ = reproducible_build_aux::Enum::Variant2(0);
+    let _ = reproducible_build_aux::Enum::Variant3 { x: 0 };
+    let _ = reproducible_build_aux::TupleStruct(1, 2, 3, 4);
+
+    let object_shim: &reproducible_build_aux::Trait<char, String> = &TupleStruct(0, 1, 2, 3);
+    object_shim.foo();
+
+    let pointer_shim: &Fn(i32) = &regular_fn;
+
+    TupleStruct(1, 2, 3, 4).bar();
+}
+
+
diff --git a/src/test/run-make/rustdoc-json/Makefile b/src/test/run-make/rustdoc-json/Makefile
deleted file mode 100644 (file)
index e49ab64..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
--include ../tools.mk
-all:
-       $(HOST_RPATH_ENV) $(RUSTDOC) -w json -o $(TMPDIR)/doc.json foo.rs
-       $(HOST_RPATH_ENV) $(RUSTDOC) -o $(TMPDIR)/doc $(TMPDIR)/doc.json
diff --git a/src/test/run-make/rustdoc-json/foo.rs b/src/test/run-make/rustdoc-json/foo.rs
deleted file mode 100644 (file)
index 3bd56c1..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![crate_name = "foo"]
-
-//! Very docs
-
-pub mod bar {
-
-    /// So correct
-    pub mod baz {
-        /// Much detail
-        pub fn baz() { }
-    }
-
-    /// *wow*
-    pub trait Doge { fn dummy(&self) { } }
-}
diff --git a/src/test/run-make/stable-symbol-names/Makefile b/src/test/run-make/stable-symbol-names/Makefile
new file mode 100644 (file)
index 0000000..da96d1b
--- /dev/null
@@ -0,0 +1,18 @@
+-include ../tools.mk
+
+# This test case makes sure that monomorphizations of the same function with the
+# same set of generic arguments will have the same symbol names when
+# instantiated in different crates.
+
+dump-symbols = nm "$(TMPDIR)/lib$(1).rlib" \
+             |  grep "some_test_function" \
+             | sed "s/^[0-9a-f]\{8,16\}/00000000/" \
+             | sort \
+             > "$(TMPDIR)/$(1).nm"
+
+all:
+       $(RUSTC) stable-symbol-names1.rs
+       $(RUSTC) stable-symbol-names2.rs
+       $(call dump-symbols,stable_symbol_names1)
+       $(call dump-symbols,stable_symbol_names2)
+       cmp "$(TMPDIR)/stable_symbol_names1.nm" "$(TMPDIR)/stable_symbol_names2.nm"
diff --git a/src/test/run-make/stable-symbol-names/stable-symbol-names1.rs b/src/test/run-make/stable-symbol-names/stable-symbol-names1.rs
new file mode 100644 (file)
index 0000000..7b2cd85
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type="rlib"]
+
+pub fn some_test_function<T>(t: T) -> T {
+  t
+}
+
+pub fn user() {
+  some_test_function(0u32);
+  some_test_function("abc");
+  let x = 2u64;
+  some_test_function(&x);
+}
diff --git a/src/test/run-make/stable-symbol-names/stable-symbol-names2.rs b/src/test/run-make/stable-symbol-names/stable-symbol-names2.rs
new file mode 100644 (file)
index 0000000..ff027d6
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type="rlib"]
+
+extern crate stable_symbol_names1;
+
+pub fn user() {
+  stable_symbol_names1::some_test_function(1u32);
+  stable_symbol_names1::some_test_function("def");
+  let x = 2u64;
+  stable_symbol_names1::some_test_function(&x);
+}
index e3080d29ec33077ce320a4b0a8f2758d74ff7483..b7083c2776aeccd3f5575be4e55c25dc7222d0f9 100644 (file)
@@ -1,4 +1,5 @@
 {
+    "data-layout": "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128",
     "llvm-target": "i686-unknown-linux-gnu",
     "target-endian": "little",
     "target-pointer-width": "32",
index 4517af8e24be595c38c3a735077297fd51572864..fc9b65c47a9fe56e6fab734bad209228136d5a19 100644 (file)
@@ -5,3 +5,5 @@ all:
        $(RUSTC) --test test-ignore-cfg.rs --cfg ignorecfg
        $(call RUN,test-ignore-cfg) | grep 'shouldnotignore ... ok'
        $(call RUN,test-ignore-cfg) | grep 'shouldignore ... ignored'
+       $(call RUN,test-ignore-cfg --quiet) | grep "^i\.$$"
+       $(call RUN,test-ignore-cfg --quiet) | grep -v 'should'
diff --git a/src/test/run-pass-fulldeps/conditional-debug-macro-off.rs b/src/test/run-pass-fulldeps/conditional-debug-macro-off.rs
new file mode 100644 (file)
index 0000000..c6beb5b
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C debug-assertions=no
+// exec-env:RUST_LOG=conditional-debug-macro-off=4
+
+
+#![feature(rustc_private)]
+
+#[macro_use]
+extern crate log;
+
+pub fn main() {
+    // only panics if println! evaluates its argument.
+    debug!("{:?}", { if true { panic!() } });
+}
diff --git a/src/test/run-pass-fulldeps/deprecated-derive.rs b/src/test/run-pass-fulldeps/deprecated-derive.rs
new file mode 100644 (file)
index 0000000..69a7f88
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_private)]
+
+extern crate serialize;
+
+#[derive(Encodable)]
+//~^ WARNING derive(Encodable) is deprecated in favor of derive(RustcEncodable)
+struct Test1;
+
+fn main() { }
diff --git a/src/test/run-pass-fulldeps/deriving-encodable-decodable-box.rs b/src/test/run-pass-fulldeps/deriving-encodable-decodable-box.rs
new file mode 100644 (file)
index 0000000..328cc13
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+#![allow(unknown_features)]
+#![feature(box_syntax)]
+#![feature(rustc_private)]
+
+extern crate serialize;
+
+use serialize::{Encodable, Decodable};
+use serialize::json;
+
+#[derive(Encodable, Decodable)]
+struct A {
+    foo: Box<[bool]>,
+}
+
+fn main() {
+    // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
+    let obj = A { foo: Box::new([true, false]) };
+    let s = json::encode(&obj).unwrap();
+    let obj2: A = json::decode(&s).unwrap();
+    assert_eq!(obj.foo, obj2.foo);
+}
diff --git a/src/test/run-pass-fulldeps/deriving-encodable-decodable-cell-refcell.rs b/src/test/run-pass-fulldeps/deriving-encodable-decodable-cell-refcell.rs
new file mode 100644 (file)
index 0000000..6e5eb86
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This briefly tests the capability of `Cell` and `RefCell` to implement the
+// `Encodable` and `Decodable` traits via `#[derive(Encodable, Decodable)]`
+
+
+#![feature(rustc_private)]
+
+extern crate serialize;
+
+use std::cell::{Cell, RefCell};
+use serialize::{Encodable, Decodable};
+use serialize::json;
+
+#[derive(Encodable, Decodable)]
+struct A {
+    baz: isize
+}
+
+#[derive(Encodable, Decodable)]
+struct B {
+    foo: Cell<bool>,
+    bar: RefCell<A>,
+}
+
+fn main() {
+    let obj = B {
+        foo: Cell::new(true),
+        bar: RefCell::new( A { baz: 2 } )
+    };
+    let s = json::encode(&obj).unwrap();
+    let obj2: B = json::decode(&s).unwrap();
+    assert_eq!(obj.foo.get(), obj2.foo.get());
+    assert_eq!(obj.bar.borrow().baz, obj2.bar.borrow().baz);
+}
diff --git a/src/test/run-pass-fulldeps/deriving-global.rs b/src/test/run-pass-fulldeps/deriving-global.rs
new file mode 100644 (file)
index 0000000..10e8ddc
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rand, rustc_private)]
+
+extern crate serialize;
+
+mod submod {
+    // if any of these are implemented without global calls for any
+    // function calls, then being in a submodule will (correctly)
+    // cause errors about unrecognised module `std` (or `extra`)
+    #[derive(PartialEq, PartialOrd, Eq, Ord,
+               Hash,
+               Clone,
+               Debug,
+               Encodable, Decodable)]
+    enum A { A1(usize), A2(isize) }
+
+    #[derive(PartialEq, PartialOrd, Eq, Ord,
+               Hash,
+               Clone,
+               Debug,
+               Encodable, Decodable)]
+    struct B { x: usize, y: isize }
+
+    #[derive(PartialEq, PartialOrd, Eq, Ord,
+               Hash,
+               Clone,
+               Debug,
+               Encodable, Decodable)]
+    struct C(usize, isize);
+
+}
+
+pub fn main() {}
diff --git a/src/test/run-pass-fulldeps/dropck_tarena_sound_drop.rs b/src/test/run-pass-fulldeps/dropck_tarena_sound_drop.rs
new file mode 100644 (file)
index 0000000..db30bfb
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that an arena (TypedArena) can carry elements whose drop
+// methods might access borrowed data, as long as the borrowed data
+// has lifetime that strictly outlives the arena itself.
+//
+// Compare against compile-fail/dropck_tarena_unsound_drop.rs, which
+// shows a similar setup, but restricts `f` so that the struct `C<'a>`
+// is force-fed a lifetime equal to that of the borrowed arena.
+
+#![allow(unstable)]
+#![feature(rustc_private)]
+
+extern crate arena;
+
+use arena::TypedArena;
+
+trait HasId { fn count(&self) -> usize; }
+
+struct CheckId<T:HasId> { v: T }
+
+// In the code below, the impl of HasId for `&'a usize` does not
+// actually access the borrowed data, but the point is that the
+// interface to CheckId does not (and cannot) know that, and therefore
+// when encountering the a value V of type CheckId<S>, we must
+// conservatively force the type S to strictly outlive V.
+impl<T:HasId> Drop for CheckId<T> {
+    fn drop(&mut self) {
+        assert!(self.v.count() > 0);
+    }
+}
+
+struct C<'a> { _v: CheckId<&'a usize>, }
+
+impl<'a> HasId for &'a usize { fn count(&self) -> usize { 1 } }
+
+fn f<'a, 'b>(_arena: &'a TypedArena<C<'b>>) {}
+
+fn main() {
+    let arena: TypedArena<C> = TypedArena::new();
+    f(&arena);
+}
diff --git a/src/test/run-pass-fulldeps/empty-struct-braces-derive.rs b/src/test/run-pass-fulldeps/empty-struct-braces-derive.rs
new file mode 100644 (file)
index 0000000..8d19209
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// `#[derive(Trait)]` works for empty structs/variants with braces
+
+#![feature(rustc_private)]
+
+extern crate serialize as rustc_serialize;
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash,
+         Default, Debug, RustcEncodable, RustcDecodable)]
+struct S {}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash,
+         Debug, RustcEncodable, RustcDecodable)]
+enum E {
+    V {},
+    U,
+}
+
+fn main() {
+    let s = S {};
+    let s1 = s;
+    let s2 = s.clone();
+    assert_eq!(s, s1);
+    assert_eq!(s, s2);
+    assert!(!(s < s1));
+    assert_eq!(format!("{:?}", s), "S");
+
+    let e = E::V {};
+    let e1 = e;
+    let e2 = e.clone();
+    assert_eq!(e, e1);
+    assert_eq!(e, e2);
+    assert!(!(e < e1));
+    assert_eq!(format!("{:?}", e), "V");
+}
diff --git a/src/test/run-pass-fulldeps/extern-mod-syntax.rs b/src/test/run-pass-fulldeps/extern-mod-syntax.rs
new file mode 100644 (file)
index 0000000..37404ee
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(unused_imports)]
+#![feature(rustc_private)]
+
+extern crate serialize;
+use serialize::json::Object;
+
+pub fn main() {
+    println!("Hello world!");
+}
diff --git a/src/test/run-pass-fulldeps/issue-11881.rs b/src/test/run-pass-fulldeps/issue-11881.rs
new file mode 100644 (file)
index 0000000..9da04f7
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+#![feature(rustc_private)]
+
+extern crate rbml;
+extern crate serialize;
+
+use std::io::Cursor;
+use std::io::prelude::*;
+use std::fmt;
+use std::slice;
+
+use serialize::{Encodable, Encoder};
+use serialize::json;
+
+use rbml::writer;
+
+#[derive(Encodable)]
+struct Foo {
+    baz: bool,
+}
+
+#[derive(Encodable)]
+struct Bar {
+    froboz: usize,
+}
+
+enum WireProtocol {
+    JSON,
+    RBML,
+    // ...
+}
+
+fn encode_json<T: Encodable>(val: &T, wr: &mut Cursor<Vec<u8>>) {
+    write!(wr, "{}", json::as_json(val));
+}
+fn encode_rbml<T: Encodable>(val: &T, wr: &mut Cursor<Vec<u8>>) {
+    let mut encoder = writer::Encoder::new(wr);
+    val.encode(&mut encoder);
+}
+
+pub fn main() {
+    let target = Foo{baz: false,};
+    let mut wr = Cursor::new(Vec::new());
+    let proto = WireProtocol::JSON;
+    match proto {
+        WireProtocol::JSON => encode_json(&target, &mut wr),
+        WireProtocol::RBML => encode_rbml(&target, &mut wr)
+    }
+}
diff --git a/src/test/run-pass-fulldeps/issue-14021.rs b/src/test/run-pass-fulldeps/issue-14021.rs
new file mode 100644 (file)
index 0000000..907967d
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_private)]
+
+extern crate serialize;
+
+use serialize::{Encodable, Decodable};
+use serialize::json;
+
+#[derive(Encodable, Decodable, PartialEq, Debug)]
+struct UnitLikeStruct;
+
+pub fn main() {
+    let obj = UnitLikeStruct;
+    let json_str: String = json::encode(&obj).unwrap();
+
+    let json_object = json::from_str(&json_str);
+    let mut decoder = json::Decoder::new(json_object.unwrap());
+    let mut decoded_obj: UnitLikeStruct = Decodable::decode(&mut decoder).unwrap();
+
+    assert_eq!(obj, decoded_obj);
+}
diff --git a/src/test/run-pass-fulldeps/issue-15924.rs b/src/test/run-pass-fulldeps/issue-15924.rs
new file mode 100644 (file)
index 0000000..0c20877
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// pretty-expanded FIXME #23616
+
+#![feature(rustc_private)]
+
+extern crate serialize;
+
+use std::fmt;
+use serialize::{Encoder, Encodable};
+use serialize::json;
+
+struct Foo<T: Encodable> {
+    v: T,
+}
+
+impl<T: Encodable> Drop for Foo<T> {
+    fn drop(&mut self) {
+        json::encode(&self.v);
+    }
+}
+
+fn main() {
+    let _ = Foo { v: 10 };
+}
diff --git a/src/test/run-pass-fulldeps/issue-24972.rs b/src/test/run-pass-fulldeps/issue-24972.rs
new file mode 100644 (file)
index 0000000..ae7eb84
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_private)]
+
+extern crate serialize;
+
+use serialize::{Encodable, Decodable};
+use std::fmt::Display;
+
+pub trait Entity : Decodable + Encodable + Sized {
+    type Key: Clone + Decodable + Encodable + ToString + Display + Eq + Ord + Sized;
+
+    fn id(&self) -> Self::Key;
+
+    fn find_by_id(id: Self::Key) -> Option<Self>;
+}
+
+pub struct DbRef<E: Entity> {
+    pub id: E::Key,
+}
+
+impl<E> DbRef<E> where E: Entity {
+    fn get(self) -> Option<E> {
+        E::find_by_id(self.id)
+    }
+}
+
+fn main() {}
diff --git a/src/test/run-pass-fulldeps/issue-2804.rs b/src/test/run-pass-fulldeps/issue-2804.rs
new file mode 100644 (file)
index 0000000..a2b4e21
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(collections)]
+#![feature(rustc_private)]
+
+extern crate collections;
+extern crate serialize;
+
+use std::collections::HashMap;
+use serialize::json::{self, Json};
+use std::option;
+
+enum object {
+    bool_value(bool),
+    int_value(i64),
+}
+
+fn lookup(table: json::Object, key: String, default: String) -> String
+{
+    match table.get(&key) {
+        option::Option::Some(&Json::String(ref s)) => {
+            s.to_string()
+        }
+        option::Option::Some(value) => {
+            println!("{} was expected to be a string but is a {}", key, value);
+            default
+        }
+        option::Option::None => {
+            default
+        }
+    }
+}
+
+fn add_interface(_store: isize, managed_ip: String, data: json::Json) -> (String, object)
+{
+    match &data {
+        &Json::Object(ref interface) => {
+            let name = lookup(interface.clone(),
+                              "ifDescr".to_string(),
+                              "".to_string());
+            let label = format!("{}-{}", managed_ip, name);
+
+            (label, object::bool_value(false))
+        }
+        _ => {
+            println!("Expected dict for {} interfaces, found {}", managed_ip, data);
+            ("gnos:missing-interface".to_string(), object::bool_value(true))
+        }
+    }
+}
+
+fn add_interfaces(store: isize, managed_ip: String, device: HashMap<String, json::Json>)
+-> Vec<(String, object)> {
+    match device["interfaces"] {
+        Json::Array(ref interfaces) =>
+        {
+          interfaces.iter().map(|interface| {
+                add_interface(store, managed_ip.clone(), (*interface).clone())
+          }).collect()
+        }
+        _ =>
+        {
+            println!("Expected list for {} interfaces, found {}", managed_ip,
+                     device["interfaces"]);
+            Vec::new()
+        }
+    }
+}
+
+pub fn main() {}
diff --git a/src/test/run-pass-fulldeps/issue-4016.rs b/src/test/run-pass-fulldeps/issue-4016.rs
new file mode 100644 (file)
index 0000000..bc3fa16
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+#![feature(rustc_private)]
+
+extern crate serialize;
+
+use serialize::{json, Decodable};
+
+trait JD : Decodable {}
+
+fn exec<T: JD>() {
+    let doc = json::from_str("").unwrap();
+    let mut decoder = json::Decoder::new(doc);
+    let _v: T = Decodable::decode(&mut decoder).unwrap();
+    panic!()
+}
+
+pub fn main() {}
diff --git a/src/test/run-pass-fulldeps/issue-4036.rs b/src/test/run-pass-fulldeps/issue-4036.rs
new file mode 100644 (file)
index 0000000..ae7bb8a
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+// Issue #4036: Test for an issue that arose around fixing up type inference
+// byproducts in vtable records.
+
+// pretty-expanded FIXME #23616
+
+#![feature(rustc_private)]
+
+extern crate serialize;
+
+use serialize::{json, Decodable};
+
+pub fn main() {
+    let json = json::from_str("[1]").unwrap();
+    let mut decoder = json::Decoder::new(json);
+    let _x: Vec<isize> = Decodable::decode(&mut decoder).unwrap();
+}
diff --git a/src/test/run-pass-fulldeps/linkage-visibility.rs b/src/test/run-pass-fulldeps/linkage-visibility.rs
new file mode 100644 (file)
index 0000000..e6eaefb
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:linkage-visibility.rs
+// ignore-android: FIXME(#10356)
+// ignore-windows: std::dynamic_lib does not work on Windows well
+// ignore-musl
+
+extern crate linkage_visibility as foo;
+
+pub fn main() {
+    foo::test();
+    foo::foo2::<isize>();
+    foo::foo();
+}
diff --git a/src/test/run-pass-fulldeps/logging-enabled-debug.rs b/src/test/run-pass-fulldeps/logging-enabled-debug.rs
new file mode 100644 (file)
index 0000000..3ae4884
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C debug-assertions=no
+// exec-env:RUST_LOG=logging-enabled-debug=debug
+
+
+#![feature(rustc_private)]
+
+#[macro_use]
+extern crate log;
+
+pub fn main() {
+    if log_enabled!(log::DEBUG) {
+        panic!("what?! debugging?");
+    }
+}
diff --git a/src/test/run-pass-fulldeps/logging-enabled.rs b/src/test/run-pass-fulldeps/logging-enabled.rs
new file mode 100644 (file)
index 0000000..2975835
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// exec-env:RUST_LOG=logging_enabled=info
+
+
+#![feature(rustc_private)]
+
+#[macro_use]
+extern crate log;
+
+pub fn main() {
+    if log_enabled!(log::DEBUG) {
+        panic!("what?! debugging?");
+    }
+    if !log_enabled!(log::INFO) {
+        panic!("what?! no info?");
+    }
+}
diff --git a/src/test/run-pass-fulldeps/logging-right-crate.rs b/src/test/run-pass-fulldeps/logging-right-crate.rs
new file mode 100644 (file)
index 0000000..7caeeb4
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:logging_right_crate.rs
+// exec-env:RUST_LOG=logging-right-crate=debug
+
+// This is a test for issue #3046 to make sure that when we monomorphize a
+// function from one crate to another the right top-level logging name is
+// preserved.
+//
+// It used to be the case that if logging were turned on for this crate, all
+// monomorphized functions from other crates had logging turned on (their
+// logging module names were all incorrect). This test ensures that this no
+// longer happens by enabling logging for *this* crate and then invoking a
+// function in an external crate which will panic when logging is enabled.
+
+// pretty-expanded FIXME #23616
+
+extern crate logging_right_crate;
+
+pub fn main() {
+    // this function panicks if logging is turned on
+    logging_right_crate::foo::<isize>();
+}
diff --git a/src/test/run-pass-fulldeps/logging-separate-lines.rs b/src/test/run-pass-fulldeps/logging-separate-lines.rs
new file mode 100644 (file)
index 0000000..0975932
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-windows
+// exec-env:RUST_LOG=debug
+// compile-flags:-C debug-assertions=y
+
+#![feature(rustc_private)]
+
+#[macro_use]
+extern crate log;
+
+use std::process::Command;
+use std::env;
+use std::str;
+
+fn main() {
+    let args: Vec<String> = env::args().collect();
+    if args.len() > 1 && args[1] == "child" {
+        debug!("foo");
+        debug!("bar");
+        return
+    }
+
+    let p = Command::new(&args[0])
+                    .arg("child")
+                    .output().unwrap();
+    assert!(p.status.success());
+    let mut lines = str::from_utf8(&p.stderr).unwrap().lines();
+    assert!(lines.next().unwrap().contains("foo"));
+    assert!(lines.next().unwrap().contains("bar"));
+}
diff --git a/src/test/run-pass-fulldeps/regions-mock-tcx.rs b/src/test/run-pass-fulldeps/regions-mock-tcx.rs
new file mode 100644 (file)
index 0000000..ed3cec4
--- /dev/null
@@ -0,0 +1,141 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+// Test a sample usage pattern for regions. Makes use of the
+// following features:
+//
+// - Multiple lifetime parameters
+// - Arenas
+
+#![feature(rustc_private, libc, collections)]
+
+extern crate arena;
+extern crate collections;
+extern crate libc;
+
+use TypeStructure::{TypeInt, TypeFunction};
+use AstKind::{ExprInt, ExprVar, ExprLambda};
+use arena::TypedArena;
+use std::collections::HashMap;
+use std::mem;
+
+type Type<'tcx> = &'tcx TypeStructure<'tcx>;
+
+#[derive(Copy, Clone, Debug)]
+enum TypeStructure<'tcx> {
+    TypeInt,
+    TypeFunction(Type<'tcx>, Type<'tcx>),
+}
+
+impl<'tcx> PartialEq for TypeStructure<'tcx> {
+    fn eq(&self, other: &TypeStructure<'tcx>) -> bool {
+        match (*self, *other) {
+            (TypeInt, TypeInt) => true,
+            (TypeFunction(s_a, s_b), TypeFunction(o_a, o_b)) => *s_a == *o_a && *s_b == *o_b,
+            _ => false
+        }
+    }
+}
+
+impl<'tcx> Eq for TypeStructure<'tcx> {}
+
+type TyArena<'tcx> = TypedArena<TypeStructure<'tcx>>;
+type AstArena<'ast> = TypedArena<AstStructure<'ast>>;
+
+struct TypeContext<'tcx, 'ast> {
+    ty_arena: &'tcx TyArena<'tcx>,
+    types: Vec<Type<'tcx>> ,
+    type_table: HashMap<NodeId, Type<'tcx>>,
+
+    ast_arena: &'ast AstArena<'ast>,
+    ast_counter: usize,
+}
+
+impl<'tcx,'ast> TypeContext<'tcx, 'ast> {
+    fn new(ty_arena: &'tcx TyArena<'tcx>, ast_arena: &'ast AstArena<'ast>)
+           -> TypeContext<'tcx, 'ast> {
+        TypeContext { ty_arena: ty_arena,
+                      types: Vec::new(),
+                      type_table: HashMap::new(),
+
+                      ast_arena: ast_arena,
+                      ast_counter: 0 }
+    }
+
+    fn add_type(&mut self, s: TypeStructure<'tcx>) -> Type<'tcx> {
+        for &ty in &self.types {
+            if *ty == s {
+                return ty;
+            }
+        }
+
+        let ty = self.ty_arena.alloc(s);
+        self.types.push(ty);
+        ty
+    }
+
+    fn set_type(&mut self, id: NodeId, ty: Type<'tcx>) -> Type<'tcx> {
+        self.type_table.insert(id, ty);
+        ty
+    }
+
+    fn ast(&mut self, a: AstKind<'ast>) -> Ast<'ast> {
+        let id = self.ast_counter;
+        self.ast_counter += 1;
+        self.ast_arena.alloc(AstStructure { id: NodeId {id:id}, kind: a })
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+struct NodeId {
+    id: usize
+}
+
+type Ast<'ast> = &'ast AstStructure<'ast>;
+
+#[derive(Copy, Clone)]
+struct AstStructure<'ast> {
+    id: NodeId,
+    kind: AstKind<'ast>
+}
+
+#[derive(Copy, Clone)]
+enum AstKind<'ast> {
+    ExprInt,
+    ExprVar(usize),
+    ExprLambda(Ast<'ast>),
+}
+
+fn compute_types<'tcx,'ast>(tcx: &mut TypeContext<'tcx,'ast>,
+                            ast: Ast<'ast>) -> Type<'tcx>
+{
+    match ast.kind {
+        ExprInt | ExprVar(_) => {
+            let ty = tcx.add_type(TypeInt);
+            tcx.set_type(ast.id, ty)
+        }
+        ExprLambda(ast) => {
+            let arg_ty = tcx.add_type(TypeInt);
+            let body_ty = compute_types(tcx, ast);
+            let lambda_ty = tcx.add_type(TypeFunction(arg_ty, body_ty));
+            tcx.set_type(ast.id, lambda_ty)
+        }
+    }
+}
+
+pub fn main() {
+    let ty_arena = TypedArena::new();
+    let ast_arena = TypedArena::new();
+    let mut tcx = TypeContext::new(&ty_arena, &ast_arena);
+    let ast = tcx.ast(ExprInt);
+    let ty = compute_types(&mut tcx, ast);
+    assert_eq!(*ty, TypeInt);
+}
diff --git a/src/test/run-pass-fulldeps/rust-log-filter.rs b/src/test/run-pass-fulldeps/rust-log-filter.rs
new file mode 100644 (file)
index 0000000..306d24e
--- /dev/null
@@ -0,0 +1,58 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// exec-env:RUST_LOG=rust_log_filter/foo
+// ignore-emscripten no threads support
+
+#![allow(unknown_features)]
+#![feature(box_syntax, std_misc, rustc_private)]
+
+#[macro_use]
+extern crate log;
+
+use std::sync::mpsc::{channel, Sender, Receiver};
+use std::thread;
+
+pub struct ChannelLogger {
+    tx: Sender<String>
+}
+
+impl ChannelLogger {
+    pub fn new() -> (Box<ChannelLogger>, Receiver<String>) {
+        let (tx, rx) = channel();
+        (box ChannelLogger { tx: tx }, rx)
+    }
+}
+
+impl log::Logger for ChannelLogger {
+    fn log(&mut self, record: &log::LogRecord) {
+        self.tx.send(format!("{}", record.args)).unwrap();
+    }
+}
+
+pub fn main() {
+    let (logger, rx) = ChannelLogger::new();
+
+    let t = thread::spawn(move|| {
+        log::set_logger(logger);
+
+        info!("foo");
+        info!("bar");
+        info!("foo bar");
+        info!("bar foo");
+    });
+
+    assert_eq!(rx.recv().unwrap(), "foo");
+    assert_eq!(rx.recv().unwrap(), "foo bar");
+    assert_eq!(rx.recv().unwrap(), "bar foo");
+    assert!(rx.recv().is_err());
+
+    t.join();
+}
index 247e82c2f09245ec06c8d6639b4a2889a505d94c..0de949471c68407cf66129e7aaa16d494d473d6d 100644 (file)
@@ -8,10 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // no-prefer-dynamic
 
 #![allow(dead_code)]
-#![feature(const_fn)]
+#![feature(const_fn, rustc_attrs)]
 
 // check dtor calling order when casting enums.
 
@@ -36,6 +38,7 @@ impl Drop for E {
     }
 }
 
+#[rustc_no_mir] // FIXME #27840 MIR miscompiles this.
 fn main() {
     assert_eq!(FLAG.load(Ordering::SeqCst), 0);
     {
index be69a4eaddfa1eda51b5831ff3fa87b5216c388f..32ed2b330118d5cd85badd30eb5f4df1192f4da7 100644 (file)
@@ -8,12 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(alloc_jemalloc, alloc_system)]
+#![feature(alloc_jemalloc)]
 
-#[cfg(not(any(windows, target_os = "bitrig", target_os = "openbsd", target_os="emscripten")))]
+#[cfg(any(target_os = "linux", target_os = "macos"))]
 extern crate alloc_jemalloc;
-#[cfg(any(windows, target_os = "bitrig", target_os = "openbsd", target_os="emscripten"))]
-extern crate alloc_system;
 
 fn main() {
     println!("{:?}", Box::new(3));
diff --git a/src/test/run-pass/allocator-jemalloc.rs b/src/test/run-pass/allocator-jemalloc.rs
deleted file mode 100644 (file)
index ad1438f..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// no-prefer-dynamic
-// ignore-windows no jemalloc on windows
-// ignore-bitrig no jemalloc on bitrig
-// ignore-openbsd no jemalloc on openbsd
-// ignore-emscripten no jemalloc on emscripten
-
-#![feature(alloc_jemalloc)]
-
-extern crate alloc_jemalloc;
-
-fn main() {
-    println!("{:?}", Box::new(3));
-}
index 605ca6b65e2cff94f7077cd8ef9e3609d77ecea2..01d1b27bfc99f803a42643588485cae13edbb471 100644 (file)
@@ -17,6 +17,7 @@ use empty_struct::XEmpty2 as XFoo;
 
 struct Foo;
 
+#[derive(PartialEq, Eq)]
 enum Bar {
     Var1,
     Var2,
index 7946704709ce4124704c51a300299d0ad8e23de6..1d9fa248e3d93e97d97aab24b68d2de49fa31f8f 100644 (file)
@@ -16,22 +16,22 @@ static ATOMIC: AtomicIsize = ATOMIC_ISIZE_INIT;
 
 fn main() {
     // Make sure trans can emit all the intrinsics correctly
-    ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed);
-    ATOMIC.compare_exchange(0, 1, Acquire, Relaxed);
-    ATOMIC.compare_exchange(0, 1, Release, Relaxed);
-    ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed);
-    ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed);
-    ATOMIC.compare_exchange(0, 1, Acquire, Acquire);
-    ATOMIC.compare_exchange(0, 1, AcqRel, Acquire);
-    ATOMIC.compare_exchange(0, 1, SeqCst, Acquire);
-    ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst);
-    ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed);
-    ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed);
-    ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed);
-    ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed);
-    ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed);
-    ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire);
-    ATOMIC.compare_exchange_weak(0, 1, AcqRel, Acquire);
-    ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire);
-    ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst);
+    ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed).ok();
+    ATOMIC.compare_exchange(0, 1, Acquire, Relaxed).ok();
+    ATOMIC.compare_exchange(0, 1, Release, Relaxed).ok();
+    ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed).ok();
+    ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed).ok();
+    ATOMIC.compare_exchange(0, 1, Acquire, Acquire).ok();
+    ATOMIC.compare_exchange(0, 1, AcqRel, Acquire).ok();
+    ATOMIC.compare_exchange(0, 1, SeqCst, Acquire).ok();
+    ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok();
+    ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed).ok();
+    ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed).ok();
+    ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed).ok();
+    ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok();
+    ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok();
+    ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok();
+    ATOMIC.compare_exchange_weak(0, 1, AcqRel, Acquire).ok();
+    ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire).ok();
+    ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst).ok();
 }
index 8c9ebcd274a4bf1c4fd8c39e164e2e9c60d58b4c..3ed9e8548dcbc05d693d888f49e4d651998c3963 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![deny(unused_assignments)]
+
 use std::mem;
 use std::ops::{
     AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, DivAssign, Index, MulAssign, RemAssign,
@@ -27,6 +29,8 @@ impl Slice {
     }
 }
 
+struct View<'a>(&'a mut [i32]);
+
 fn main() {
     let mut x = Int(1);
 
@@ -78,6 +82,12 @@ fn main() {
     assert_eq!(array[0], 1);
     assert_eq!(array[1], 2);
     assert_eq!(array[2], 3);
+
+    // sized indirection
+    // check that this does *not* trigger the unused_assignments lint
+    let mut array = [0, 1, 2];
+    let mut view = View(&mut array);
+    view += 1;
 }
 
 impl AddAssign for Int {
@@ -159,3 +169,11 @@ impl AddAssign<i32> for Slice {
         }
     }
 }
+
+impl<'a> AddAssign<i32> for View<'a> {
+    fn add_assign(&mut self, rhs: i32) {
+        for lhs in self.0.iter_mut() {
+            *lhs += rhs;
+        }
+    }
+}
diff --git a/src/test/run-pass/autoderef-privacy.rs b/src/test/run-pass/autoderef-privacy.rs
new file mode 100644 (file)
index 0000000..e50f1be
--- /dev/null
@@ -0,0 +1,60 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check we do not select a private method or field when computing autoderefs
+
+#![allow(unused)]
+
+#[derive(Default)]
+pub struct Bar2 { i: i32 }
+#[derive(Default)]
+pub struct Baz2(i32);
+
+impl Bar2 {
+    fn f(&self) -> bool { true }
+}
+
+mod foo {
+    #[derive(Default)]
+    pub struct Bar { i: ::Bar2 }
+    #[derive(Default)]
+    pub struct Baz(::Baz2);
+
+    impl Bar {
+        fn f(&self) -> bool { false }
+    }
+
+    impl ::std::ops::Deref for Bar {
+        type Target = ::Bar2;
+        fn deref(&self) -> &::Bar2 { &self.i }
+    }
+
+    impl ::std::ops::Deref for Baz {
+        type Target = ::Baz2;
+        fn deref(&self) -> &::Baz2 { &self.0 }
+    }
+
+    pub fn f(bar: &Bar, baz: &Baz) {
+        // Since the private fields and methods are visible here, there should be no autoderefs.
+        let _: &::Bar2 = &bar.i;
+        let _: &::Baz2 = &baz.0;
+        assert!(!bar.f());
+    }
+}
+
+fn main() {
+    let bar = foo::Bar::default();
+    let baz = foo::Baz::default();
+    foo::f(&bar, &baz);
+
+    let _: i32 = bar.i;
+    let _: i32 = baz.0;
+    assert!(bar.f());
+}
index 48df600214ad0813863e490db07492724ab79867..b80c938fed5d42bb0c6fe3651c9195dee985f4e4 100644 (file)
@@ -11,6 +11,7 @@
 // ignore-test: not a test, used by backtrace-debuginfo.rs to test file!()
 
 #[inline(never)]
+#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently.
 pub fn callback<F>(f: F) where F: FnOnce((&'static str, u32)) {
     f((file!(), line!()))
 }
@@ -20,6 +21,7 @@ pub fn callback<F>(f: F) where F: FnOnce((&'static str, u32)) {
 // this case.
 #[cfg_attr(not(target_env = "msvc"), inline(always))]
 #[cfg_attr(target_env = "msvc", inline(never))]
+#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently.
 pub fn callback_inlined<F>(f: F) where F: FnOnce((&'static str, u32)) {
     f((file!(), line!()))
 }
index 8b2b26948824f08d89b9252cc024a2714d5e773b..7867c94ce4b11ff9f9ac3272f8fd3848a279c0a7 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![feature(rustc_attrs)]
+
 // We disable tail merging here because it can't preserve debuginfo and thus
 // potentially breaks the backtraces. Also, subtle changes can decide whether
 // tail merging suceeds, so the test might work today but fail tomorrow due to a
@@ -32,11 +34,15 @@ macro_rules! dump_and_die {
     ($($pos:expr),*) => ({
         // FIXME(#18285): we cannot include the current position because
         // the macro span takes over the last frame's file/line.
-        if cfg!(target_os = "macos") ||
-           cfg!(target_os = "ios") ||
-           cfg!(target_os = "android") ||
-           cfg!(all(target_os = "linux", target_arch = "arm")) ||
-           cfg!(all(windows, target_env = "gnu")) {
+        if cfg!(any(target_os = "macos",
+                    target_os = "ios",
+                    target_os = "android",
+                    all(target_os = "linux", target_arch = "arm"),
+                    target_os = "windows",
+                    target_os = "freebsd",
+                    target_os = "dragonfly",
+                    target_os = "bitrig",
+                    target_os = "openbsd")) {
             // skip these platforms as this support isn't implemented yet.
         } else {
             dump_filelines(&[$($pos),*]);
@@ -72,6 +78,7 @@ fn dump_filelines(filelines: &[Pos]) {
 }
 
 #[inline(never)]
+#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently.
 fn inner(counter: &mut i32, main_pos: Pos, outer_pos: Pos) {
     check!(counter; main_pos, outer_pos);
     check!(counter; main_pos, outer_pos);
@@ -88,6 +95,7 @@ fn inner(counter: &mut i32, main_pos: Pos, outer_pos: Pos) {
 // this case.
 #[cfg_attr(not(target_env = "msvc"), inline(always))]
 #[cfg_attr(target_env = "msvc", inline(never))]
+#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently.
 fn inner_inlined(counter: &mut i32, main_pos: Pos, outer_pos: Pos) {
     check!(counter; main_pos, outer_pos);
     check!(counter; main_pos, outer_pos);
@@ -113,6 +121,7 @@ fn inner_inlined(counter: &mut i32, main_pos: Pos, outer_pos: Pos) {
 }
 
 #[inline(never)]
+#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently.
 fn outer(mut counter: i32, main_pos: Pos) {
     inner(&mut counter, main_pos, pos!());
     inner_inlined(&mut counter, main_pos, pos!());
@@ -157,6 +166,7 @@ fn run_test(me: &str) {
 }
 
 #[inline(never)]
+#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently.
 fn main() {
     let args: Vec<String> = env::args().collect();
     if args.len() >= 2 {
index 3fb52f8c8b4dc321c8efee9e89b15c05755401de..d38f10dd338967050ec5a132be7433df98cbc3d9 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![feature(rustc_attrs)]
+
 // no-pretty-expanded FIXME #15189
 // ignore-android FIXME #17520
 // compile-flags:-g
@@ -16,6 +18,8 @@ use std::env;
 use std::process::{Command, Stdio};
 use std::str;
 
+// FIXME #31005 MIR missing debuginfo currently.
+#[cfg_attr(target_env = "msvc", rustc_no_mir)]
 #[inline(never)]
 fn foo() {
     let _v = vec![1, 2, 3];
@@ -24,6 +28,8 @@ fn foo() {
     }
 }
 
+// FIXME #31005 MIR missing debuginfo currently.
+#[cfg_attr(target_env = "msvc", rustc_no_mir)]
 #[inline(never)]
 fn double() {
     struct Double;
@@ -45,13 +51,29 @@ fn template(me: &str) -> Command {
     return m;
 }
 
+fn expected(fn_name: &str) -> String {
+    // FIXME(#32481)
+    //
+    // On windows, we read the function name from debuginfo using some
+    // system APIs. For whatever reason, these APIs seem to use the
+    // "name" field, which is only the "relative" name, not the full
+    // name with namespace info, so we just see `foo` and not
+    // `backtrace::foo` as we see on linux (which uses the linkage
+    // name).
+    if cfg!(windows) && cfg!(target_env = "msvc") {
+        format!(" - {}", fn_name)
+    } else {
+        format!(" - backtrace::{}", fn_name)
+    }
+}
+
 fn runtest(me: &str) {
     // Make sure that the stack trace is printed
     let p = template(me).arg("fail").env("RUST_BACKTRACE", "1").spawn().unwrap();
     let out = p.wait_with_output().unwrap();
     assert!(!out.status.success());
     let s = str::from_utf8(&out.stderr).unwrap();
-    assert!(s.contains("stack backtrace") && s.contains(" - foo"),
+    assert!(s.contains("stack backtrace") && s.contains(&expected("foo")),
             "bad output: {}", s);
 
     // Make sure the stack trace is *not* printed
@@ -61,9 +83,19 @@ fn runtest(me: &str) {
     let out = p.wait_with_output().unwrap();
     assert!(!out.status.success());
     let s = str::from_utf8(&out.stderr).unwrap();
-    assert!(!s.contains("stack backtrace") && !s.contains(" - foo"),
+    assert!(!s.contains("stack backtrace") && !s.contains(&expected("foo")),
             "bad output2: {}", s);
 
+    // Make sure the stack trace is *not* printed
+    // (RUST_BACKTRACE=0 acts as if it were unset from our own environment,
+    // in case developer is running `make check` with it set.)
+    let p = template(me).arg("fail").env("RUST_BACKTRACE","0").spawn().unwrap();
+    let out = p.wait_with_output().unwrap();
+    assert!(!out.status.success());
+    let s = str::from_utf8(&out.stderr).unwrap();
+    assert!(!s.contains("stack backtrace") && !s.contains(" - foo"),
+            "bad output3: {}", s);
+
     // Make sure a stack trace is printed
     let p = template(me).arg("double-fail").spawn().unwrap();
     let out = p.wait_with_output().unwrap();
@@ -71,7 +103,7 @@ fn runtest(me: &str) {
     let s = str::from_utf8(&out.stderr).unwrap();
     // loosened the following from double::h to double:: due to
     // spurious failures on mac, 32bit, optimized
-    assert!(s.contains("stack backtrace") && s.contains(" - double"),
+    assert!(s.contains("stack backtrace") && s.contains(&expected("double")),
             "bad output3: {}", s);
 
     // Make sure a stack trace isn't printed too many times
@@ -89,7 +121,7 @@ fn runtest(me: &str) {
 }
 
 fn main() {
-    if cfg!(windows) && cfg!(target_arch = "x86") && cfg!(target_env = "gnu") {
+    if cfg!(windows) && cfg!(target_env = "gnu") {
         return
     }
 
diff --git a/src/test/run-pass/bench/issue-32062.rs b/src/test/run-pass/bench/issue-32062.rs
new file mode 100644 (file)
index 0000000..8f6457d
--- /dev/null
@@ -0,0 +1,58 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// pretty-expanded FIXME #23616
+
+fn main() {
+    let _ = test(Some(0).into_iter());
+}
+
+trait Parser {
+    type Input: Iterator;
+    type Output;
+    fn parse(self, input: Self::Input) -> Result<(Self::Output, Self::Input), ()>;
+    fn chain<P>(self, p: P) -> Chain<Self, P> where Self: Sized {
+        Chain(self, p)
+    }
+}
+
+struct Token<T>(T::Item) where T: Iterator;
+
+impl<T> Parser for Token<T> where T: Iterator {
+    type Input = T;
+    type Output = T::Item;
+    fn parse(self, _input: Self::Input) -> Result<(Self::Output, Self::Input), ()> {
+        Err(())
+    }
+}
+
+struct Chain<L, R>(L, R);
+
+impl<L, R> Parser for Chain<L, R> where L: Parser, R: Parser<Input = L::Input> {
+    type Input = L::Input;
+    type Output = (L::Output, R::Output);
+    fn parse(self, _input: Self::Input) -> Result<(Self::Output, Self::Input), ()> {
+        Err(())
+    }
+}
+
+fn test<I>(i: I) -> Result<((), I), ()> where I: Iterator<Item = i32> {
+    Chain(Token(0), Token(1))
+        .chain(Chain(Token(0), Token(1)))
+        .chain(Chain(Token(0), Token(1)))
+        .chain(Chain(Token(0), Token(1)))
+        .chain(Chain(Token(0), Token(1)))
+        .chain(Chain(Token(0), Token(1)))
+        .chain(Chain(Token(0), Token(1)))
+        .chain(Chain(Token(0), Token(1)))
+        .chain(Chain(Token(0), Token(1)))
+        .parse(i)
+        .map(|(_, i)| ((), i))
+}
index d85fd3a2b6b1c1bf3f87e4b4ab376e80d8eb9c31..7fbd8dc4786baf1de47c2d7735e6700c8fe11b18 100644 (file)
@@ -70,7 +70,7 @@ fn test_integrity() {
             {
                 // push the panicking item to the heap and catch the panic
                 let thread_result = {
-                    let mut heap_ref = AssertRecoverSafe::new(&mut heap);
+                    let mut heap_ref = AssertRecoverSafe(&mut heap);
                     panic::recover(move || {
                         heap_ref.push(panic_item);
                     })
index 7efda12192a78e6a5f6415de5e347cdf3fb1b9f2..052b60539314549ae126fa47964dcc179d222150 100644 (file)
@@ -8,9 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-
-// pretty-expanded FIXME #23616
-
 trait Foo {
     fn foo(self);
 }
index 10e9a1b51e2d1f8a6a79144de873483305cfca8a..187063968f7f994e33432661423cc522d69819e7 100644 (file)
@@ -8,9 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-
-// pretty-expanded FIXME #23616
-
 struct F { f: Vec<isize> }
 
 fn impure(_v: &[isize]) {
diff --git a/src/test/run-pass/cabi-int-widening.rs b/src/test/run-pass/cabi-int-widening.rs
new file mode 100644 (file)
index 0000000..c7a2275
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[link(name = "rust_test_helpers")]
+extern {
+    fn rust_int8_to_int32(_: i8) -> i32;
+}
+
+fn main() {
+    let x = unsafe {
+        rust_int8_to_int32(-1)
+    };
+
+    assert!(x == -1);
+}
index e4f4a4f3a576cde0ce8b7e632e48c1254046af66..b40c29dd715292df2460e4561105efafbadd69fe 100644 (file)
@@ -11,8 +11,6 @@
 // aux-build:cci_nested_lib.rs
 
 
-#![feature(globs)]
-
 extern crate cci_nested_lib;
 use cci_nested_lib::*;
 
diff --git a/src/test/run-pass/coerce-unify.rs b/src/test/run-pass/coerce-unify.rs
new file mode 100644 (file)
index 0000000..3d69014
--- /dev/null
@@ -0,0 +1,77 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that coercions can unify if-else, match arms and array elements.
+
+// Try to construct if-else chains, matches and arrays out of given expressions.
+macro_rules! check {
+    ($last:expr $(, $rest:expr)+) => {
+        // Last expression comes first because of whacky ifs and matches.
+        let _ = $(if false { $rest })else+ else { $last };
+
+        let _ = match 0 { $(_ if false => $rest,)+ _ => $last };
+
+        let _ = [$($rest,)+ $last];
+    }
+}
+
+// Check all non-uniform cases of 2 and 3 expressions of 2 types.
+macro_rules! check2 {
+    ($a:expr, $b:expr) => {
+        check!($a, $b);
+        check!($b, $a);
+
+        check!($a, $a, $b);
+        check!($a, $b, $a);
+        check!($a, $b, $b);
+
+        check!($b, $a, $a);
+        check!($b, $a, $b);
+        check!($b, $b, $a);
+    }
+}
+
+// Check all non-uniform cases of 2 and 3 expressions of 3 types.
+macro_rules! check3 {
+    ($a:expr, $b:expr, $c:expr) => {
+        // Delegate to check2 for cases where a type repeats.
+        check2!($a, $b);
+        check2!($b, $c);
+        check2!($a, $c);
+
+        // Check the remaining cases, i.e. permutations of ($a, $b, $c).
+        check!($a, $b, $c);
+        check!($a, $c, $b);
+        check!($b, $a, $c);
+        check!($b, $c, $a);
+        check!($c, $a, $b);
+        check!($c, $b, $a);
+    }
+}
+
+use std::mem::size_of;
+
+fn foo() {}
+fn bar() {}
+
+pub fn main() {
+    check3!(foo, bar, foo as fn());
+    check3!(size_of::<u8>, size_of::<u16>, size_of::<usize> as fn() -> usize);
+
+    let s = String::from("bar");
+    check2!("foo", &s);
+
+    let a = [1, 2, 3];
+    let v = vec![1, 2, 3];
+    check2!(&a[..], &v);
+
+    // Make sure in-array coercion still works.
+    let _ = [("a", Default::default()), (Default::default(), "b"), (&s, &s)];
+}
diff --git a/src/test/run-pass/conditional-debug-macro-off.rs b/src/test/run-pass/conditional-debug-macro-off.rs
deleted file mode 100644 (file)
index c6beb5b..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags: -C debug-assertions=no
-// exec-env:RUST_LOG=conditional-debug-macro-off=4
-
-
-#![feature(rustc_private)]
-
-#[macro_use]
-extern crate log;
-
-pub fn main() {
-    // only panics if println! evaluates its argument.
-    debug!("{:?}", { if true { panic!() } });
-}
diff --git a/src/test/run-pass/const-bitshift-rhs-inference.rs b/src/test/run-pass/const-bitshift-rhs-inference.rs
new file mode 100644 (file)
index 0000000..b377fd2
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+const RHS: u8 = 8;
+const IRHS: i8 = 8;
+const RHS16: u16 = 8;
+const IRHS16: i16 = 8;
+const RHS32: u32 = 8;
+const IRHS32: i32 = 8;
+const RHS64: u64 = 8;
+const IRHS64: i64 = 8;
+const RHSUS: usize = 8;
+const IRHSIS: isize = 8;
+
+fn main() {
+    let _: [&'static str; 1 << RHS] = [""; 256];
+    let _: [&'static str; 1 << IRHS] = [""; 256];
+    let _: [&'static str; 1 << RHS16] = [""; 256];
+    let _: [&'static str; 1 << IRHS16] = [""; 256];
+    let _: [&'static str; 1 << RHS32] = [""; 256];
+    let _: [&'static str; 1 << IRHS32] = [""; 256];
+    let _: [&'static str; 1 << RHS64] = [""; 256];
+    let _: [&'static str; 1 << IRHS64] = [""; 256];
+    let _: [&'static str; 1 << RHSUS] = [""; 256];
+    let _: [&'static str; 1 << IRHSIS] = [""; 256];
+}
index 5961ed8d3390d9ac4a1266e77e9c24622f7270c0..562040dc5620ba2fc54dea65eb878d5bc6bebcd5 100644 (file)
@@ -10,7 +10,7 @@
 
 // A very basic test of const fn functionality.
 
-#![feature(const_fn)]
+#![feature(const_fn, const_indexing)]
 
 const fn add(x: u32, y: u32) -> u32 {
     x + y
@@ -24,6 +24,14 @@ const unsafe fn div(x: u32, y: u32) -> u32 {
     x / y
 }
 
+const fn generic<T>(t: T) -> T {
+    t
+}
+
+const fn generic_arr<T: Copy>(t: [T; 1]) -> T {
+    t[0]
+}
+
 const SUM: u32 = add(44, 22);
 const DIFF: u32 = sub(44, 22);
 const DIV: u32 = unsafe{div(44, 22)};
@@ -36,4 +44,6 @@ fn main() {
     assert_eq!(DIV, 2);
 
     let _: [&'static str; sub(100, 99) as usize] = ["hi"];
+    let _: [&'static str; generic(1)] = ["hi"];
+    let _: [&'static str; generic_arr([1])] = ["hi"];
 }
diff --git a/src/test/run-pass/const-negation.rs b/src/test/run-pass/const-negation.rs
new file mode 100644 (file)
index 0000000..96f4217
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(stmt_expr_attributes)]
+
+#[deny(const_err)]
+
+fn main() {
+    #[cfg(target_pointer_width = "32")]
+    const I: isize = -2147483648isize;
+    #[cfg(target_pointer_width = "64")]
+    const I: isize = -9223372036854775808isize;
+    assert_eq!(::std::i32::MIN as u64, 0xffffffff80000000);
+    assert_eq!(-2147483648isize as u64, 0xffffffff80000000);
+    assert_eq!(::std::i64::MIN as u64, 0x8000000000000000);
+    #[cfg(target_pointer_width = "64")]
+    assert_eq!(-9223372036854775808isize as u64, 0x8000000000000000);
+    #[cfg(target_pointer_width = "32")]
+    assert_eq!(-9223372036854775808isize as u64, 0);
+    const J: usize = ::std::i32::MAX as usize;
+    const K: usize = -1i32 as u32 as usize;
+    const L: usize = ::std::i32::MIN as usize;
+    const M: usize = ::std::i64::MIN as usize;
+    match 5 {
+        J => {},
+        K => {},
+        L => {},
+        M => {},
+        _ => {}
+    }
+    match 5 {
+        I => {},
+        _ => {}
+    }
+}
index 4c5152ff90ffdfccfd131fce2832045db974741e..1736ab5bb82c81510e98cc4d80e3564a3e5639b8 100644 (file)
@@ -8,6 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![feature(rustc_attrs)]
+
+// ignore-pretty : (#23623) problems when  ending with // comments
 
 use std::{str, string};
 
@@ -15,6 +18,7 @@ const A: [u8; 2] = ['h' as u8, 'i' as u8];
 const B: &'static [u8; 2] = &A;
 const C: *const u8 = B as *const u8;
 
+#[rustc_no_mir] // FIXME #27840 MIR can't do rvalue promotion yet.
 pub fn main() {
     unsafe {
         let foo = &A as *const u8;
index 3e6c72c993a0aa5e480919674aff6b4b0cdc62db..ed55d5c8b171e6180b0296c4201e2b80796fbb0b 100644 (file)
 
 #![feature(associated_type_defaults)]
 
-trait Foo<T> {
-    type Out = T;
-    fn foo(&self) -> Self::Out;
+trait Foo<T: Default + ToString> {
+    type Out: Default + ToString = T;
 }
 
 impl Foo<u32> for () {
-    fn foo(&self) -> u32 {
-        4u32
-    }
 }
 
-impl Foo<u64> for bool {
-    type Out = ();
-    fn foo(&self) {}
+impl Foo<u64> for () {
+    type Out = bool;
 }
 
 fn main() {
-    assert_eq!(<() as Foo<u32>>::foo(&()), 4u32);
-    assert_eq!(<bool as Foo<u64>>::foo(&true), ());
+    assert_eq!(
+        <() as Foo<u32>>::Out::default().to_string(),
+        "0");
+    assert_eq!(
+        <() as Foo<u64>>::Out::default().to_string(),
+        "false");
 }
diff --git a/src/test/run-pass/deprecated-derive.rs b/src/test/run-pass/deprecated-derive.rs
deleted file mode 100644 (file)
index 69a7f88..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(rustc_private)]
-
-extern crate serialize;
-
-#[derive(Encodable)]
-//~^ WARNING derive(Encodable) is deprecated in favor of derive(RustcEncodable)
-struct Test1;
-
-fn main() { }
index 78e9da001f799a5ac354e796af327e7600073498..0cbe4f4ebd0edc42bc59d8fb9bbb53b71e1b02ae 100644 (file)
@@ -8,32 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(rand, collections, rustc_private)]
-#![no_std]
+// aux-build:derive-no-std.rs
 
-extern crate rand;
-extern crate serialize as rustc_serialize;
-extern crate collections;
+extern crate derive_no_std;
+use derive_no_std::*;
 
-// Issue #16803
-
-#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord,
-         Debug, Default, Copy)]
-struct Foo {
-    x: u32,
-}
+fn main() {
+    let f = Foo { x: 0 };
+    assert_eq!(f.clone(), Foo::default());
 
-#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord,
-         Debug, Copy)]
-enum Bar {
-    Qux,
-    Quux(u32),
+    assert!(Bar::Qux < Bar::Quux(42));
 }
 
-enum Baz { A=0, B=5, }
-
-fn main() {
-    Foo { x: 0 };
-    Bar::Quux(3);
-    Baz::A;
-}
diff --git a/src/test/run-pass/deriving-encodable-decodable-box.rs b/src/test/run-pass/deriving-encodable-decodable-box.rs
deleted file mode 100644 (file)
index 328cc13..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-#![allow(unknown_features)]
-#![feature(box_syntax)]
-#![feature(rustc_private)]
-
-extern crate serialize;
-
-use serialize::{Encodable, Decodable};
-use serialize::json;
-
-#[derive(Encodable, Decodable)]
-struct A {
-    foo: Box<[bool]>,
-}
-
-fn main() {
-    // FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
-    let obj = A { foo: Box::new([true, false]) };
-    let s = json::encode(&obj).unwrap();
-    let obj2: A = json::decode(&s).unwrap();
-    assert_eq!(obj.foo, obj2.foo);
-}
diff --git a/src/test/run-pass/deriving-encodable-decodable-cell-refcell.rs b/src/test/run-pass/deriving-encodable-decodable-cell-refcell.rs
deleted file mode 100644 (file)
index 6e5eb86..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// This briefly tests the capability of `Cell` and `RefCell` to implement the
-// `Encodable` and `Decodable` traits via `#[derive(Encodable, Decodable)]`
-
-
-#![feature(rustc_private)]
-
-extern crate serialize;
-
-use std::cell::{Cell, RefCell};
-use serialize::{Encodable, Decodable};
-use serialize::json;
-
-#[derive(Encodable, Decodable)]
-struct A {
-    baz: isize
-}
-
-#[derive(Encodable, Decodable)]
-struct B {
-    foo: Cell<bool>,
-    bar: RefCell<A>,
-}
-
-fn main() {
-    let obj = B {
-        foo: Cell::new(true),
-        bar: RefCell::new( A { baz: 2 } )
-    };
-    let s = json::encode(&obj).unwrap();
-    let obj2: B = json::decode(&s).unwrap();
-    assert_eq!(obj.foo.get(), obj2.foo.get());
-    assert_eq!(obj.bar.borrow().baz, obj2.bar.borrow().baz);
-}
diff --git a/src/test/run-pass/deriving-global.rs b/src/test/run-pass/deriving-global.rs
deleted file mode 100644 (file)
index 10e8ddc..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(rand, rustc_private)]
-
-extern crate serialize;
-
-mod submod {
-    // if any of these are implemented without global calls for any
-    // function calls, then being in a submodule will (correctly)
-    // cause errors about unrecognised module `std` (or `extra`)
-    #[derive(PartialEq, PartialOrd, Eq, Ord,
-               Hash,
-               Clone,
-               Debug,
-               Encodable, Decodable)]
-    enum A { A1(usize), A2(isize) }
-
-    #[derive(PartialEq, PartialOrd, Eq, Ord,
-               Hash,
-               Clone,
-               Debug,
-               Encodable, Decodable)]
-    struct B { x: usize, y: isize }
-
-    #[derive(PartialEq, PartialOrd, Eq, Ord,
-               Hash,
-               Clone,
-               Debug,
-               Encodable, Decodable)]
-    struct C(usize, isize);
-
-}
-
-pub fn main() {}
index 69e9816ab948616a28b9afec7a8107722231a33b..91bfc2f9201b7796cbf713117c3e6618b295ffb3 100644 (file)
@@ -12,6 +12,7 @@
 #![feature(hash_default)]
 
 use std::hash::{Hash, SipHasher, Hasher};
+use std::mem::size_of;
 
 #[derive(Hash)]
 struct Person {
@@ -20,12 +21,34 @@ struct Person {
     phone: usize,
 }
 
+// test for hygiene name collisions
+#[derive(Hash)] struct __H__H;
+#[derive(Hash)] enum Collision<__H> { __H { __H__H: __H } }
+
+#[derive(Hash)]
+enum E { A=1, B }
+
 fn hash<T: Hash>(t: &T) -> u64 {
     let mut s = SipHasher::new_with_keys(0, 0);
     t.hash(&mut s);
     s.finish()
 }
 
+struct FakeHasher<'a>(&'a mut Vec<u8>);
+impl<'a> Hasher for FakeHasher<'a> {
+    fn finish(&self) -> u64 {
+        unimplemented!()
+    }
+
+    fn write(&mut self, bytes: &[u8]) {
+        self.0.extend(bytes);
+    }
+}
+
+fn fake_hash(v: &mut Vec<u8>, e: E) {
+    e.hash(&mut FakeHasher(v));
+}
+
 fn main() {
     let person1 = Person {
         id: 5,
@@ -39,4 +62,11 @@ fn main() {
     };
     assert_eq!(hash(&person1), hash(&person1));
     assert!(hash(&person1) != hash(&person2));
+
+    // test #21714
+    let mut va = vec![];
+    let mut vb = vec![];
+    fake_hash(&mut va, E::A);
+    fake_hash(&mut vb, E::B);
+    assert!(va != vb);
 }
diff --git a/src/test/run-pass/dropck_tarena_sound_drop.rs b/src/test/run-pass/dropck_tarena_sound_drop.rs
deleted file mode 100644 (file)
index db30bfb..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Check that an arena (TypedArena) can carry elements whose drop
-// methods might access borrowed data, as long as the borrowed data
-// has lifetime that strictly outlives the arena itself.
-//
-// Compare against compile-fail/dropck_tarena_unsound_drop.rs, which
-// shows a similar setup, but restricts `f` so that the struct `C<'a>`
-// is force-fed a lifetime equal to that of the borrowed arena.
-
-#![allow(unstable)]
-#![feature(rustc_private)]
-
-extern crate arena;
-
-use arena::TypedArena;
-
-trait HasId { fn count(&self) -> usize; }
-
-struct CheckId<T:HasId> { v: T }
-
-// In the code below, the impl of HasId for `&'a usize` does not
-// actually access the borrowed data, but the point is that the
-// interface to CheckId does not (and cannot) know that, and therefore
-// when encountering the a value V of type CheckId<S>, we must
-// conservatively force the type S to strictly outlive V.
-impl<T:HasId> Drop for CheckId<T> {
-    fn drop(&mut self) {
-        assert!(self.v.count() > 0);
-    }
-}
-
-struct C<'a> { _v: CheckId<&'a usize>, }
-
-impl<'a> HasId for &'a usize { fn count(&self) -> usize { 1 } }
-
-fn f<'a, 'b>(_arena: &'a TypedArena<C<'b>>) {}
-
-fn main() {
-    let arena: TypedArena<C> = TypedArena::new();
-    f(&arena);
-}
diff --git a/src/test/run-pass/empty-struct-braces-derive.rs b/src/test/run-pass/empty-struct-braces-derive.rs
deleted file mode 100644 (file)
index 8d19209..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// `#[derive(Trait)]` works for empty structs/variants with braces
-
-#![feature(rustc_private)]
-
-extern crate serialize as rustc_serialize;
-
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash,
-         Default, Debug, RustcEncodable, RustcDecodable)]
-struct S {}
-
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash,
-         Debug, RustcEncodable, RustcDecodable)]
-enum E {
-    V {},
-    U,
-}
-
-fn main() {
-    let s = S {};
-    let s1 = s;
-    let s2 = s.clone();
-    assert_eq!(s, s1);
-    assert_eq!(s, s2);
-    assert!(!(s < s1));
-    assert_eq!(format!("{:?}", s), "S");
-
-    let e = E::V {};
-    let e1 = e;
-    let e2 = e.clone();
-    assert_eq!(e, e1);
-    assert_eq!(e, e2);
-    assert!(!(e < e1));
-    assert_eq!(format!("{:?}", e), "V");
-}
index 85ae77f20f155e3648bf90d80e539a25d78ee15d..0060150fbece01b90c07e411bfb31b4db4ae8a58 100644 (file)
@@ -18,7 +18,10 @@ use empty_struct::*;
 
 struct Empty1 {}
 struct Empty2;
+
+#[derive(PartialEq, Eq)]
 struct Empty3 {}
+
 const Empty3: Empty3 = Empty3 {};
 
 enum E {
diff --git a/src/test/run-pass/empty-type-parameter-list.rs b/src/test/run-pass/empty-type-parameter-list.rs
new file mode 100644 (file)
index 0000000..7af2844
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that empty type parameter list (<>) is synonymous with
+// no type parameters at all
+
+struct S<>;
+trait T<> {}
+enum E<> { V }
+impl<> T<> for S<> {}
+impl T for E {}
+fn foo<>() {}
+fn bar() {}
+
+fn main() {
+    let _ = S;
+    let _ = S::<>;
+    let _ = E::V;
+    let _ = E::<>::V;
+    foo();
+    foo::<>();
+
+    // Test that we can supply <> to non generic things
+    bar::<>();
+    let _: i32<>;
+}
index 8be3634c88addb6dc6a3e5027e9903171a65b927..fdaad9e1fabcea336bfe6290b9ca79a54c0c9376 100644 (file)
 
 #[repr(u32)]
 enum Foo {
-  A = 0,
-  B = 23
+    A = 0,
+    B = 23
 }
 
 #[inline(never)]
 extern "C" fn foo(_x: usize) -> Foo { Foo::B }
 
 pub fn main() {
-  unsafe {
-    let f: extern "C" fn(usize) -> u32 = ::std::mem::transmute(foo);
-    assert_eq!(f(0xDEADBEEF), Foo::B as u32);
-  }
+    unsafe {
+        let f: extern "C" fn(usize) -> u32 =
+            ::std::mem::transmute(foo as extern "C" fn(usize) -> Foo);
+        assert_eq!(f(0xDEADBEEF), Foo::B as u32);
+    }
 }
index 99e44735d0f03a11a25a9517d08c8480bdb089b6..53c44f2bb24b5f16ce3eae5d008182fa2ebaba30 100644 (file)
@@ -8,6 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![feature(stmt_expr_attributes)]
 
 use std::mem::size_of;
 
@@ -46,11 +47,6 @@ enum Ei64 {
     Bi64 = 0x8000_0000
 }
 
-enum Eu64 {
-    Au64 = 0,
-    Bu64 = 0x8000_0000_0000_0000
-}
-
 pub fn main() {
     assert_eq!(size_of::<Ei8>(), 1);
     assert_eq!(size_of::<Eu8>(), 1);
@@ -58,6 +54,8 @@ pub fn main() {
     assert_eq!(size_of::<Eu16>(), 2);
     assert_eq!(size_of::<Ei32>(), 4);
     assert_eq!(size_of::<Eu32>(), 4);
+    #[cfg(target_pointer_width = "64")]
     assert_eq!(size_of::<Ei64>(), 8);
-    assert_eq!(size_of::<Eu64>(), 8);
+    #[cfg(target_pointer_width = "32")]
+    assert_eq!(size_of::<Ei64>(), 4);
 }
index edad5cc1652da9253695ce3aad854faff0db28be..3bbc107e0b99e20bdbc227f0bbdad29aaecd4d95 100644 (file)
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 
-use std::mem::size_of;
+use std::mem::{size_of, align_of};
 
 #[repr(i8)]
 enum Ei8 {
@@ -71,6 +71,24 @@ enum Euint {
     Buint = 1
 }
 
+#[repr(u8)]
+enum Eu8NonCLike<T> {
+    _None,
+    _Some(T),
+}
+
+#[repr(i64)]
+enum Ei64NonCLike<T> {
+    _None,
+    _Some(T),
+}
+
+#[repr(u64)]
+enum Eu64NonCLike<T> {
+    _None,
+    _Some(T),
+}
+
 pub fn main() {
     assert_eq!(size_of::<Ei8>(), 1);
     assert_eq!(size_of::<Eu8>(), 1);
@@ -82,4 +100,17 @@ pub fn main() {
     assert_eq!(size_of::<Eu64>(), 8);
     assert_eq!(size_of::<Eint>(), size_of::<isize>());
     assert_eq!(size_of::<Euint>(), size_of::<usize>());
+    assert_eq!(size_of::<Eu8NonCLike<()>>(), 1);
+    assert_eq!(size_of::<Ei64NonCLike<()>>(), 8);
+    assert_eq!(size_of::<Eu64NonCLike<()>>(), 8);
+    let u8_expected_size = round_up(9, align_of::<Eu64NonCLike<u8>>());
+    assert_eq!(size_of::<Eu64NonCLike<u8>>(), u8_expected_size);
+    let array_expected_size = round_up(28, align_of::<Eu64NonCLike<[u32; 5]>>());
+    assert_eq!(size_of::<Eu64NonCLike<[u32; 5]>>(), array_expected_size);
+    assert_eq!(size_of::<Eu64NonCLike<[u32; 6]>>(), 32);
+}
+
+// Rounds x up to the next multiple of a
+fn round_up(x: usize, a: usize) -> usize {
+    ((x + (a - 1)) / a) * a
 }
index 2b0a01496710590b24d761789a77c5c1ec2030c4..8b4a633d61358278058ce578dd7b765b179670ce 100644 (file)
@@ -15,7 +15,6 @@
 // ignore-emscripten
 // no-prefer-dynamic
 
-#![feature(convert)]
 #![feature(libc)]
 
 extern crate libc;
@@ -23,7 +22,8 @@ extern crate libc;
 use libc::c_char;
 use libc::execve;
 use std::env;
-use std::ffi::OsStr;
+use std::ffi::CString;
+use std::os::unix::prelude::*;
 use std::ptr;
 
 fn main() {
@@ -34,8 +34,11 @@ fn main() {
         return;
     }
 
-    let current_exe = env::current_exe().unwrap().into_os_string().to_cstring().unwrap();
-    let new_env_var = OsStr::new("FOOBAR").to_cstring().unwrap();
+    let current_exe = CString::new(env::current_exe()
+                                       .unwrap()
+                                       .as_os_str()
+                                       .as_bytes()).unwrap();
+    let new_env_var = CString::new("FOOBAR").unwrap();
     let filename: *const c_char = current_exe.as_ptr();
     let argv: &[*const c_char] = &[filename, filename, ptr::null()];
     let envp: &[*const c_char] = &[new_env_var.as_ptr(), ptr::null()];
diff --git a/src/test/run-pass/extern-mod-syntax.rs b/src/test/run-pass/extern-mod-syntax.rs
deleted file mode 100644 (file)
index 37404ee..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(unused_imports)]
-#![feature(rustc_private)]
-
-extern crate serialize;
-use serialize::json::Object;
-
-pub fn main() {
-    println!("Hello world!");
-}
diff --git a/src/test/run-pass/float_math.rs b/src/test/run-pass/float_math.rs
new file mode 100644 (file)
index 0000000..c1b4b55
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(core_intrinsics)]
+
+use std::intrinsics::{fadd_fast, fsub_fast, fmul_fast, fdiv_fast, frem_fast};
+
+#[inline(never)]
+pub fn test_operations(a: f64, b: f64) {
+    // make sure they all map to the correct operation
+    unsafe {
+        assert_eq!(fadd_fast(a, b), a + b);
+        assert_eq!(fsub_fast(a, b), a - b);
+        assert_eq!(fmul_fast(a, b), a * b);
+        assert_eq!(fdiv_fast(a, b), a / b);
+        assert_eq!(frem_fast(a, b), a % b);
+    }
+}
+
+fn main() {
+    test_operations(1., 2.);
+    test_operations(10., 5.);
+}
diff --git a/src/test/run-pass/fn-item-type-zero-sized.rs b/src/test/run-pass/fn-item-type-zero-sized.rs
new file mode 100644 (file)
index 0000000..5fdaf08
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that fn item types are zero-sized.
+
+use std::mem::{size_of, size_of_val};
+
+fn main() {
+    assert_eq!(size_of_val(&main), 0);
+
+    let (a, b) = (size_of::<u8>, size_of::<u16>);
+    assert_eq!(size_of_val(&a), 0);
+    assert_eq!(size_of_val(&b), 0);
+    assert_eq!((a(), b()), (1, 2));
+}
index 11de5ac70f4feecccd15c82a1ceee8528443f330..163ee617d6f6bf84d0840b46323a330b5c04a32f 100644 (file)
@@ -8,32 +8,17 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// calling pin_thread and that's having weird side-effects.
+// aux-build:foreign_lib.rs
 
-// pretty-expanded FIXME #23616
+// Check that we can still call duplicated extern (imported) functions
+// which were declared in another crate. See issues #32740 and #32783.
 
-#![feature(libc)]
-
-mod rustrt1 {
-    extern crate libc;
-
-    #[link(name = "rust_test_helpers")]
-    extern {
-        pub fn rust_get_test_int() -> libc::intptr_t;
-    }
-}
-
-mod rustrt2 {
-    extern crate libc;
-
-    extern {
-        pub fn rust_get_test_int() -> libc::intptr_t;
-    }
-}
+extern crate foreign_lib;
 
 pub fn main() {
     unsafe {
-        rustrt1::rust_get_test_int();
-        rustrt2::rust_get_test_int();
+        let x = foreign_lib::rustrt::rust_get_test_int();
+        assert_eq!(x, foreign_lib::rustrt2::rust_get_test_int());
+        assert_eq!(x as *const _, foreign_lib::rustrt3::rust_get_test_int());
     }
 }
index b14a6101225e799a8ebd5a61f969db5131cb26f6..eb0546063f7154396245fd1a5735c19732f1e3d2 100644 (file)
@@ -8,8 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// pretty-expanded FIXME #23616
-
 enum wrapper<T> { wrapped(T), }
 
 pub fn main() { let _w = wrapper::wrapped(vec!(1, 2, 3, 4, 5)); }
index 2cc033b8a46e136cbe6f00130ab6f15f6bc359dd..27cafeacc203d73f5911dd7e88c59cc05270e9a0 100644 (file)
@@ -12,8 +12,9 @@
 
 #![deny(warnings)]
 #![allow(unused_must_use)]
-#![allow(unknown_features)]
+#![allow(unused_features)]
 #![feature(box_syntax)]
+#![feature(question_mark)]
 
 use std::fmt::{self, Write};
 use std::usize;
@@ -40,8 +41,8 @@ impl fmt::Display for C {
 }
 impl fmt::Binary for D {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        try!(f.write_str("aa"));
-        try!(f.write_char('☃'));
+        f.write_str("aa")?;
+        f.write_char('☃')?;
         f.write_str("bb")
     }
 }
index 3cc125e95133470e4049e98dd31a0dbd2e14e5da..a675528424ed23f810a5a1d56324604c42e6d80c 100644 (file)
@@ -15,9 +15,9 @@
 
 mod rusti {
     extern "rust-intrinsic" {
-        pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> T;
-        pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> T;
-        pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> T;
+        pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+        pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+        pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
 
         pub fn atomic_cxchgweak<T>(dst: *mut T, old: T, src: T) -> (T, bool);
         pub fn atomic_cxchgweak_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
@@ -56,13 +56,13 @@ pub fn main() {
         rusti::atomic_store_rel(&mut *x,1);
         assert_eq!(*x, 1);
 
-        assert_eq!(rusti::atomic_cxchg(&mut *x, 1, 2), 1);
+        assert_eq!(rusti::atomic_cxchg(&mut *x, 1, 2), (1, true));
         assert_eq!(*x, 2);
 
-        assert_eq!(rusti::atomic_cxchg_acq(&mut *x, 1, 3), 2);
+        assert_eq!(rusti::atomic_cxchg_acq(&mut *x, 1, 3), (2, false));
         assert_eq!(*x, 2);
 
-        assert_eq!(rusti::atomic_cxchg_rel(&mut *x, 2, 1), 2);
+        assert_eq!(rusti::atomic_cxchg_rel(&mut *x, 2, 1), (2, true));
         assert_eq!(*x, 1);
 
         assert_eq!(rusti::atomic_xchg(&mut *x, 0), 1);
diff --git a/src/test/run-pass/issue-11881.rs b/src/test/run-pass/issue-11881.rs
deleted file mode 100644 (file)
index 9da04f7..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-#![feature(rustc_private)]
-
-extern crate rbml;
-extern crate serialize;
-
-use std::io::Cursor;
-use std::io::prelude::*;
-use std::fmt;
-use std::slice;
-
-use serialize::{Encodable, Encoder};
-use serialize::json;
-
-use rbml::writer;
-
-#[derive(Encodable)]
-struct Foo {
-    baz: bool,
-}
-
-#[derive(Encodable)]
-struct Bar {
-    froboz: usize,
-}
-
-enum WireProtocol {
-    JSON,
-    RBML,
-    // ...
-}
-
-fn encode_json<T: Encodable>(val: &T, wr: &mut Cursor<Vec<u8>>) {
-    write!(wr, "{}", json::as_json(val));
-}
-fn encode_rbml<T: Encodable>(val: &T, wr: &mut Cursor<Vec<u8>>) {
-    let mut encoder = writer::Encoder::new(wr);
-    val.encode(&mut encoder);
-}
-
-pub fn main() {
-    let target = Foo{baz: false,};
-    let mut wr = Cursor::new(Vec::new());
-    let proto = WireProtocol::JSON;
-    match proto {
-        WireProtocol::JSON => encode_json(&target, &mut wr),
-        WireProtocol::RBML => encode_rbml(&target, &mut wr)
-    }
-}
index c854747bcf7bd2343a79762ab8ea3c8874662747..5c9ee74472b12c5047286d8fccc8e015cce43f73 100644 (file)
@@ -8,8 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// pretty-expanded FIXME #23616
-
 #![feature(collections)]
 
 extern crate collections;
index 91ec3e854044268d5ee22c65e6cb6bbc78424d78..084b7a166cdd36c4f9a24d18ec61996c5051216e 100644 (file)
@@ -19,23 +19,29 @@ use issue13507::testtypes;
 use std::any::TypeId;
 
 pub fn type_ids() -> Vec<TypeId> {
-    let mut ids = vec!();
-    ids.push(TypeId::of::<testtypes::FooNil>());
-    ids.push(TypeId::of::<testtypes::FooBool>());
-    ids.push(TypeId::of::<testtypes::FooInt>());
-    ids.push(TypeId::of::<testtypes::FooUint>());
-    ids.push(TypeId::of::<testtypes::FooFloat>());
-    ids.push(TypeId::of::<testtypes::FooEnum>());
-    ids.push(TypeId::of::<testtypes::FooUniq>());
-    ids.push(TypeId::of::<testtypes::FooPtr>());
-    ids.push(TypeId::of::<&'static testtypes::FooTrait>());
-    ids.push(TypeId::of::<testtypes::FooStruct>());
-    ids.push(TypeId::of::<testtypes::FooTuple>());
-    ids
+    use issue13507::testtypes::*;
+    vec![
+        TypeId::of::<FooBool>(),
+        TypeId::of::<FooInt>(),
+        TypeId::of::<FooUint>(),
+        TypeId::of::<FooFloat>(),
+        TypeId::of::<FooStr>(),
+        TypeId::of::<FooArray>(),
+        TypeId::of::<FooSlice>(),
+        TypeId::of::<FooBox>(),
+        TypeId::of::<FooPtr>(),
+        TypeId::of::<FooRef>(),
+        TypeId::of::<FooFnPtr>(),
+        TypeId::of::<FooNil>(),
+        TypeId::of::<FooTuple>(),
+        TypeId::of::<FooTrait>(),
+        TypeId::of::<FooStruct>(),
+        TypeId::of::<FooEnum>()
+    ]
 }
 
 pub fn main() {
-    let othercrate = testtypes::type_ids();
+    let othercrate = issue13507::testtypes::type_ids();
     let thiscrate = type_ids();
     assert_eq!(thiscrate, othercrate);
 }
index 0ea38596335d8c8f0deb9e85011a23ea4a1b2d52..1367ab2ed85bdccb61b71779afb5c0aa2f3ad7fa 100644 (file)
 
 // pretty-expanded FIXME #23616
 
-#![feature(num_bits_bytes)]
-
-use std::u8;
+mod u8 {
+    pub const BITS: usize = 8;
+}
 
 const NUM: usize = u8::BITS;
 
 struct MyStruct { nums: [usize; 8] }
 
-
 fn main() {
     let _s = MyStruct { nums: [0; NUM] };
 }
diff --git a/src/test/run-pass/issue-14021.rs b/src/test/run-pass/issue-14021.rs
deleted file mode 100644 (file)
index 907967d..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(rustc_private)]
-
-extern crate serialize;
-
-use serialize::{Encodable, Decodable};
-use serialize::json;
-
-#[derive(Encodable, Decodable, PartialEq, Debug)]
-struct UnitLikeStruct;
-
-pub fn main() {
-    let obj = UnitLikeStruct;
-    let json_str: String = json::encode(&obj).unwrap();
-
-    let json_object = json::from_str(&json_str);
-    let mut decoder = json::Decoder::new(json_object.unwrap());
-    let mut decoded_obj: UnitLikeStruct = Decodable::decode(&mut decoder).unwrap();
-
-    assert_eq!(obj, decoded_obj);
-}
diff --git a/src/test/run-pass/issue-15924.rs b/src/test/run-pass/issue-15924.rs
deleted file mode 100644 (file)
index 0c20877..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// pretty-expanded FIXME #23616
-
-#![feature(rustc_private)]
-
-extern crate serialize;
-
-use std::fmt;
-use serialize::{Encoder, Encodable};
-use serialize::json;
-
-struct Foo<T: Encodable> {
-    v: T,
-}
-
-impl<T: Encodable> Drop for Foo<T> {
-    fn drop(&mut self) {
-        json::encode(&self.v);
-    }
-}
-
-fn main() {
-    let _ = Foo { v: 10 };
-}
index 366ef7543fdbb9cc1a0c73d76da0cd5769213fcc..b3c80041ef88b7f1e0f0d6139bde4d0208406b50 100644 (file)
@@ -10,6 +10,8 @@
 
 // pretty-expanded FIXME #23616
 
+#![feature(question_mark)]
+
 use std::fs::File;
 use std::io::{self, BufReader, Read};
 
@@ -27,7 +29,7 @@ impl<R: Read> Lexer<R>
 
     pub fn new_from_file(p: &str) -> io::Result<Lexer<File>>
     {
-        Ok(Lexer::new_from_reader(try!(File::open(p))))
+        Ok(Lexer::new_from_reader(File::open(p)?))
     }
 
     pub fn new_from_str<'a>(s: &'a str) -> Lexer<&'a [u8]>
index 2bb69d105ff5d8c50b05836f29e9e330f3a26e90..744e63f159b6566dc17ceb4187399bf043e2f495 100644 (file)
@@ -8,13 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// aux-build:issue-17718.rs
+// aux-build:issue-17718-aux.rs
 
 
 #![feature(core)]
 #![feature(const_fn)]
 
-extern crate issue_17718 as other;
+extern crate issue_17718_aux as other;
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 
index 65a0b51095c80cbd450c5bd0f465aa3306c998c0..8e3cb414566c7577279db2ba4f0fc60d45c7af7b 100644 (file)
@@ -8,8 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// pretty-expanded FIXME #23616
-
 #![feature(unboxed_closures)]
 
 use std::marker::PhantomData;
index b0b2f068bb74bb1be4f67cd62deee78da9bb132d..88bf95f036b86275729e3d4d246792d7b72d3851 100644 (file)
@@ -15,7 +15,6 @@
 // impl.
 
 // aux-build:issue-18514.rs
-// pretty-expanded FIXME #23616
 
 extern crate issue_18514 as ice;
 use ice::{Tr, St};
diff --git a/src/test/run-pass/issue-20427.rs b/src/test/run-pass/issue-20427.rs
new file mode 100644 (file)
index 0000000..dd3d952
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:i8.rs
+// ignore-pretty (#23623)
+
+extern crate i8;
+use std::string as i16;
+static i32: i32 = 0;
+const i64: i64 = 0;
+fn u8(f32: f32) {}
+fn f<f64>(f64: f64) {}
+enum u32 {}
+struct u64;
+trait bool {}
+
+mod char {
+    extern crate i8;
+    static i32_: i32 = 0;
+    const i64_: i64 = 0;
+    fn u8_(f32: f32) {}
+    fn f_<f64_>(f64: f64_) {}
+    type u16_ = u16;
+    enum u32_ {}
+    struct u64_;
+    trait bool_ {}
+    mod char_ {}
+
+    mod str {
+        use super::i8 as i8;
+        use super::i32_ as i32;
+        use super::i64_ as i64;
+        use super::u8_ as u8;
+        use super::f_ as f64;
+        use super::u16_ as u16;
+        use super::u32_ as u32;
+        use super::u64_ as u64;
+        use super::bool_ as bool;
+        use super::{bool_ as str};
+        use super::char_ as char;
+    }
+}
+
+trait isize_ {
+    type isize;
+}
+
+fn usize<'usize>(usize: &'usize usize) -> &'usize usize { usize }
+
+mod reuse {
+    use std::mem::size_of;
+
+    type u8 = u64;
+    use std::string::String as i16;
+
+    pub fn check<u16>() {
+        assert_eq!(size_of::<u8>(), 8);
+        assert_eq!(size_of::<::u64>(), 0);
+        assert_eq!(size_of::<i16>(), 3 * size_of::<*const ()>());
+        assert_eq!(size_of::<u16>(), 0);
+    }
+}
+
+mod guard {
+    pub fn check() {
+        use std::u8; // bring module u8 in scope
+        fn f() -> u8 { // OK, resolves to primitive u8, not to std::u8
+            u8::max_value() // OK, resolves to associated function <u8>::max_value,
+                            // not to non-existent std::u8::max_value
+        }
+        assert_eq!(f(), u8::MAX); // OK, resolves to std::u8::MAX
+    }
+}
+
+fn main() {
+    let bool = true;
+    let _ = match bool {
+        str @ true => if str { i32 as i64 } else { i64 },
+        false => i64,
+    };
+
+    reuse::check::<u64>();
+    guard::check();
+}
index 2772fc8787528b20f20a040860706e8ea28ec177..321ed1a3bb283fdcdef5545fc46485d7a3c5de2f 100644 (file)
@@ -12,6 +12,8 @@
 
 // pretty-expanded FIXME #23616
 
+#![feature(question_mark)]
+
 use std::default::Default;
 use std::io;
 use std::fs;
@@ -55,7 +57,7 @@ pub struct Subpaths<S: Strategy> {
 impl<S: Strategy> Subpaths<S> {
     /// Create a directory walker with a root path and strategy.
     pub fn new(p: &S::P, strategy: S) -> io::Result<Subpaths<S>> {
-        let stack = try!(strategy.get_more(p));
+        let stack = strategy.get_more(p)?;
         Ok(Subpaths { stack: stack, strategy: strategy })
     }
 }
index 0ca936878148b6c54f2d477f9d43128beaf838b9..af767464db35170ca31443c7eafdde94a778a08f 100644 (file)
@@ -26,5 +26,5 @@ fn main() {
         std::intrinsics::type_name::<NT>(),
         // DST
         std::intrinsics::type_name::<DST>()
-    )}, ("[u8]", "str", "core::marker::Send + 'static", "NT", "DST"));
+    )}, ("[u8]", "str", "std::marker::Send + 'static", "NT", "DST"));
 }
index cd55b9fbaa345e83ca6cc0324cf3713741b694e2..0d1be964748bb9b263c191d9f85e4d98d622ceb5 100644 (file)
@@ -11,6 +11,8 @@
 // Regression test for #21400 which itself was extracted from
 // stackoverflow.com/questions/28031155/is-my-borrow-checker-drunk/28031580
 
+#![feature(question_mark)]
+
 fn main() {
     let mut t = Test;
     assert_eq!(t.method1("one"), Ok(1));
@@ -54,7 +56,7 @@ impl GitConnect {
         let c = format!("git-upload-pack");
 
         let mut out = String::new();
-        let data = try!(self.command(&c));
+        let data = self.command(&c)?;
 
         for line in data.iter() {
             out.push_str(&format!("{:?}", line));
index 2c4141b0bd37939a88ef3c827b9271532792d262..96e77c05ca2bd434c26e503efa3e888b7f9f98a7 100644 (file)
@@ -10,7 +10,7 @@
 
 // pretty-expanded FIXME #23616
 
-#![feature(fs, net, fs_walk)]
+#![feature(fs, net)]
 
 use std::{fs, net};
 
@@ -22,7 +22,6 @@ fn main() {
     assert_both::<fs::Metadata>();
     assert_both::<fs::ReadDir>();
     assert_both::<fs::DirEntry>();
-    assert_send::<fs::WalkDir>();
     assert_both::<fs::OpenOptions>();
     assert_both::<fs::Permissions>();
 
index 507d482febfd90b089154059cbce53a8848c461b..73c52a0843cfbd870c383fa30123b0343d1c239a 100644 (file)
@@ -8,6 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![feature(rustc_attrs)]
+
 // ignore-pretty : (#23623) problems when  ending with // comments
 
 // This test is ensuring that parameters are indeed dropped after
@@ -64,6 +66,7 @@ fn test<'a>(log: d::Log<'a>) {
     d::println(&format!("result {}", result));
 }
 
+#[rustc_no_mir] // FIXME #29855 MIR doesn't handle all drops correctly.
 fn foo<'a>(da0: D<'a>, de1: D<'a>) -> D<'a> {
     d::println("entered foo");
     let de2 = de1.incr();      // creates D(de_2, 2)
index 4b6d593f592d970b812bc0ca043e17ebd50849f4..6e20662b702a94c254547bf231018e1938bae0ea 100644 (file)
@@ -16,24 +16,16 @@ use std::intrinsics;
 #[derive(Copy, Clone)]
 struct Wrap(i64);
 
-// These volatile and atomic intrinsics used to cause an ICE
+// These volatile intrinsics used to cause an ICE
 
 unsafe fn test_bool(p: &mut bool, v: bool) {
     intrinsics::volatile_load(p);
     intrinsics::volatile_store(p, v);
-    intrinsics::atomic_load(p);
-    intrinsics::atomic_cxchg(p, v, v);
-    intrinsics::atomic_store(p, v);
-    intrinsics::atomic_xchg(p, v);
 }
 
 unsafe fn test_immediate_fca(p: &mut Wrap, v: Wrap) {
     intrinsics::volatile_load(p);
     intrinsics::volatile_store(p, v);
-    intrinsics::atomic_load(p);
-    intrinsics::atomic_cxchg(p, v, v);
-    intrinsics::atomic_store(p, v);
-    intrinsics::atomic_xchg(p, v);
 }
 
 fn main() {}
diff --git a/src/test/run-pass/issue-23833.rs b/src/test/run-pass/issue-23833.rs
new file mode 100644 (file)
index 0000000..7d63c41
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt;
+use std::{i8, i16, i32, i64, isize};
+use std::{u8, u16, u32, u64, usize};
+
+const A_I8_T
+    : [u32; (i8::MAX as i8 - 1i8) as usize]
+    = [0; (i8::MAX as usize) - 1];
+
+fn main() {
+    foo(&A_I8_T[..]);
+}
+
+fn foo<T:fmt::Debug>(x: T) {
+    println!("{:?}", x);
+}
diff --git a/src/test/run-pass/issue-24972.rs b/src/test/run-pass/issue-24972.rs
deleted file mode 100644 (file)
index ae7eb84..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(rustc_private)]
-
-extern crate serialize;
-
-use serialize::{Encodable, Decodable};
-use std::fmt::Display;
-
-pub trait Entity : Decodable + Encodable + Sized {
-    type Key: Clone + Decodable + Encodable + ToString + Display + Eq + Ord + Sized;
-
-    fn id(&self) -> Self::Key;
-
-    fn find_by_id(id: Self::Key) -> Option<Self>;
-}
-
-pub struct DbRef<E: Entity> {
-    pub id: E::Key,
-}
-
-impl<E> DbRef<E> where E: Entity {
-    fn get(self) -> Option<E> {
-        E::find_by_id(self.id)
-    }
-}
-
-fn main() {}
index 7413ebd35046059abfa82a2b0199b272daa1dbef..365b594c99e36fb13f101bd266d2ddab68b8d110 100644 (file)
@@ -11,8 +11,6 @@
 
 // aux-build:issue-2631-a.rs
 
-// pretty-expanded FIXME #23616
-
 extern crate req;
 
 use req::request;
diff --git a/src/test/run-pass/issue-26997.rs b/src/test/run-pass/issue-26997.rs
new file mode 100644 (file)
index 0000000..e7ecf68
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub struct Foo {
+    x: isize,
+    y: isize
+}
+
+impl Foo {
+    pub extern fn foo_new() -> Foo {
+        Foo { x: 21, y: 33 }
+    }
+}
+
+fn main() {}
index 7fee2fc16cbed96edbce3668dfccc6459231eec1..bab7b0d24db70f2af04493989d13c80425d29f81 100644 (file)
@@ -10,8 +10,6 @@
 
 // aux-build:issue_2723_a.rs
 
-// pretty-expanded FIXME #23616
-
 extern crate issue_2723_a;
 use issue_2723_a::f;
 
diff --git a/src/test/run-pass/issue-2804.rs b/src/test/run-pass/issue-2804.rs
deleted file mode 100644 (file)
index a2b4e21..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(collections)]
-#![feature(rustc_private)]
-
-extern crate collections;
-extern crate serialize;
-
-use std::collections::HashMap;
-use serialize::json::{self, Json};
-use std::option;
-
-enum object {
-    bool_value(bool),
-    int_value(i64),
-}
-
-fn lookup(table: json::Object, key: String, default: String) -> String
-{
-    match table.get(&key) {
-        option::Option::Some(&Json::String(ref s)) => {
-            s.to_string()
-        }
-        option::Option::Some(value) => {
-            println!("{} was expected to be a string but is a {}", key, value);
-            default
-        }
-        option::Option::None => {
-            default
-        }
-    }
-}
-
-fn add_interface(_store: isize, managed_ip: String, data: json::Json) -> (String, object)
-{
-    match &data {
-        &Json::Object(ref interface) => {
-            let name = lookup(interface.clone(),
-                              "ifDescr".to_string(),
-                              "".to_string());
-            let label = format!("{}-{}", managed_ip, name);
-
-            (label, object::bool_value(false))
-        }
-        _ => {
-            println!("Expected dict for {} interfaces, found {}", managed_ip, data);
-            ("gnos:missing-interface".to_string(), object::bool_value(true))
-        }
-    }
-}
-
-fn add_interfaces(store: isize, managed_ip: String, device: HashMap<String, json::Json>)
--> Vec<(String, object)> {
-    match device["interfaces"] {
-        Json::Array(ref interfaces) =>
-        {
-          interfaces.iter().map(|interface| {
-                add_interface(store, managed_ip.clone(), (*interface).clone())
-          }).collect()
-        }
-        _ =>
-        {
-            println!("Expected list for {} interfaces, found {}", managed_ip,
-                     device["interfaces"]);
-            Vec::new()
-        }
-    }
-}
-
-pub fn main() {}
diff --git a/src/test/run-pass/issue-28950.rs b/src/test/run-pass/issue-28950.rs
new file mode 100644 (file)
index 0000000..f01ce46
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Tests that the `vec!` macro does not overflow the stack when it is
+// given data larger than the stack.
+
+const LEN: usize = 1 << 15;
+
+use std::thread::Builder;
+
+fn main() {
+    assert!(Builder::new().stack_size(LEN / 2).spawn(|| {
+        let vec = vec![[0; LEN]];
+        assert_eq!(vec.len(), 1);
+    }).unwrap().join().is_ok());
+}
diff --git a/src/test/run-pass/issue-29663.rs b/src/test/run-pass/issue-29663.rs
new file mode 100644 (file)
index 0000000..9a77be0
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// write_volatile causes an LLVM assert with composite types
+
+#![feature(volatile)]
+use std::ptr::{read_volatile, write_volatile};
+
+#[derive(Debug, Eq, PartialEq)]
+struct A(u32);
+#[derive(Debug, Eq, PartialEq)]
+struct B(u64);
+#[derive(Debug, Eq, PartialEq)]
+struct C(u32, u32);
+#[derive(Debug, Eq, PartialEq)]
+struct D(u64, u64);
+#[derive(Debug, Eq, PartialEq)]
+struct E([u64; 32]);
+
+fn main() {
+    unsafe {
+        let mut x: u32 = 0;
+        write_volatile(&mut x, 1);
+        assert_eq!(read_volatile(&x), 1);
+        assert_eq!(x, 1);
+
+        let mut x: u64 = 0;
+        write_volatile(&mut x, 1);
+        assert_eq!(read_volatile(&x), 1);
+        assert_eq!(x, 1);
+
+        let mut x = A(0);
+        write_volatile(&mut x, A(1));
+        assert_eq!(read_volatile(&x), A(1));
+        assert_eq!(x, A(1));
+
+        let mut x = B(0);
+        write_volatile(&mut x, B(1));
+        assert_eq!(read_volatile(&x), B(1));
+        assert_eq!(x, B(1));
+
+        let mut x = C(0, 0);
+        write_volatile(&mut x, C(1, 1));
+        assert_eq!(read_volatile(&x), C(1, 1));
+        assert_eq!(x, C(1, 1));
+
+        let mut x = D(0, 0);
+        write_volatile(&mut x, D(1, 1));
+        assert_eq!(read_volatile(&x), D(1, 1));
+        assert_eq!(x, D(1, 1));
+
+        let mut x = E([0; 32]);
+        write_volatile(&mut x, E([1; 32]));
+        assert_eq!(read_volatile(&x), E([1; 32]));
+        assert_eq!(x, E([1; 32]));
+    }
+}
diff --git a/src/test/run-pass/issue-30615.rs b/src/test/run-pass/issue-30615.rs
new file mode 100644 (file)
index 0000000..a26509d
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+    &0u8 as *const u8 as *const PartialEq<u8>;
+    &[0u8] as *const [u8; 1] as *const [u8];
+}
diff --git a/src/test/run-pass/issue-31597.rs b/src/test/run-pass/issue-31597.rs
new file mode 100644 (file)
index 0000000..9acd638
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Make {
+    type Out;
+
+    fn make() -> Self::Out;
+}
+
+impl Make for () {
+    type Out = ();
+
+    fn make() -> Self::Out {}
+}
+
+// Also make sure we don't hit an ICE when the projection can't be known
+fn f<T: Make>() -> <T as Make>::Out { loop {} }
+
+// ...and that it works with a blanket impl
+trait Tr {
+    type Assoc;
+}
+
+impl<T: Make> Tr for T {
+    type Assoc = ();
+}
+
+fn g<T: Make>() -> <T as Tr>::Assoc { }
+
+fn main() {}
diff --git a/src/test/run-pass/issue-31776.rs b/src/test/run-pass/issue-31776.rs
new file mode 100644 (file)
index 0000000..a12e569
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Various scenarios in which `pub` is required in blocks
+
+struct S;
+
+mod m {
+    fn f() {
+        impl ::S {
+            pub fn s(&self) {}
+        }
+    }
+}
+
+// ------------------------------------------------------
+
+pub trait Tr {
+    type A;
+}
+pub struct S1;
+
+fn f() {
+    pub struct Z;
+
+    impl ::Tr for ::S1 {
+        type A = Z; // Private-in-public error unless `struct Z` is pub
+    }
+}
+
+// ------------------------------------------------------
+
+trait Tr1 {
+    type A;
+    fn pull(&self) -> Self::A;
+}
+struct S2;
+
+mod m1 {
+    fn f() {
+        struct Z {
+            pub field: u8
+        }
+
+        impl ::Tr1 for ::S2 {
+            type A = Z;
+            fn pull(&self) -> Self::A { Z{field: 10} }
+        }
+    }
+}
+
+// ------------------------------------------------------
+
+fn main() {
+    S.s(); // Privacy error, unless `fn s` is pub
+    let a = S2.pull().field; // Privacy error unless `field: u8` is pub
+}
diff --git a/src/test/run-pass/issue-32292.rs b/src/test/run-pass/issue-32292.rs
new file mode 100644 (file)
index 0000000..3995caa
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(warnings)]
+
+#[derive(Hash, Ord, PartialOrd, Eq, PartialEq, Debug, Clone, Copy)]
+struct Foo;
+
+fn main() {
+    let _ = Foo;
+}
\ No newline at end of file
diff --git a/src/test/run-pass/issue-32324.rs b/src/test/run-pass/issue-32324.rs
new file mode 100644 (file)
index 0000000..ca9dd6a
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+trait Resources {
+    type Buffer: Copy;
+}
+
+#[derive(Copy, Clone)]
+struct ConstantBufferSet<R: Resources>(
+    pub R::Buffer
+);
+
+#[derive(Copy, Clone)]
+enum It {}
+impl Resources for It {
+    type Buffer = u8;
+}
+
+#[derive(Copy, Clone)]
+enum Command {
+    BindConstantBuffers(ConstantBufferSet<It>)
+}
+
+fn main() {}
diff --git a/src/test/run-pass/issue-32389.rs b/src/test/run-pass/issue-32389.rs
new file mode 100644 (file)
index 0000000..2f6cfb6
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo<T>() -> T { loop {} }
+
+fn test() {
+    let ref mut a: &mut FnMut((i8,), i16) = foo();
+    a((0,), 0);
+}
+
+fn main() {
+    let _ = test;
+}
diff --git a/src/test/run-pass/issue-32518.rs b/src/test/run-pass/issue-32518.rs
new file mode 100644 (file)
index 0000000..386d3e6
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+// aux-build:cgu_test.rs
+// aux-build:cgu_test_a.rs
+// aux-build:cgu_test_b.rs
+
+extern crate cgu_test_a;
+extern crate cgu_test_b;
+
+fn main() {
+    cgu_test_a::a::a();
+    cgu_test_b::a::a();
+}
diff --git a/src/test/run-pass/issue-33461.rs b/src/test/run-pass/issue-33461.rs
new file mode 100644 (file)
index 0000000..ac7f7ff
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::marker::PhantomData;
+
+struct TheType<T> {
+    t: PhantomData<T>
+}
+
+pub trait TheTrait {
+    type TheAssociatedType;
+}
+
+impl TheTrait for () {
+    type TheAssociatedType = ();
+}
+
+pub trait Shape<P: TheTrait> {
+    fn doit(&self) {
+    }
+}
+
+impl<P: TheTrait> Shape<P> for TheType<P::TheAssociatedType> {
+}
+
+fn main() {
+    let ball = TheType { t: PhantomData };
+    let handle: &Shape<()> = &ball;
+}
diff --git a/src/test/run-pass/issue-4016.rs b/src/test/run-pass/issue-4016.rs
deleted file mode 100644 (file)
index bc3fa16..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-#![feature(rustc_private)]
-
-extern crate serialize;
-
-use serialize::{json, Decodable};
-
-trait JD : Decodable {}
-
-fn exec<T: JD>() {
-    let doc = json::from_str("").unwrap();
-    let mut decoder = json::Decoder::new(doc);
-    let _v: T = Decodable::decode(&mut decoder).unwrap();
-    panic!()
-}
-
-pub fn main() {}
diff --git a/src/test/run-pass/issue-4036.rs b/src/test/run-pass/issue-4036.rs
deleted file mode 100644 (file)
index ae7bb8a..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-// Issue #4036: Test for an issue that arose around fixing up type inference
-// byproducts in vtable records.
-
-// pretty-expanded FIXME #23616
-
-#![feature(rustc_private)]
-
-extern crate serialize;
-
-use serialize::{json, Decodable};
-
-pub fn main() {
-    let json = json::from_str("[1]").unwrap();
-    let mut decoder = json::Decoder::new(json);
-    let _x: Vec<isize> = Decodable::decode(&mut decoder).unwrap();
-}
index 8ec9f8aff8ec72da5143d30a96941768db3ed82c..7589bce31f480ca7faeb98051748ee4878a82777 100644 (file)
@@ -9,31 +9,44 @@
 // except according to those terms.
 
 // ignore-emscripten no threads support
+// ignore-pretty : (#23623) problems when  ending with // comments
 
-#![feature(zero_one)]
+#![feature(rustc_attrs, stmt_expr_attributes, zero_one)]
 
 use std::num::Zero;
 use std::thread;
 
+macro_rules! check {
+    ($($e:expr),*) => {
+        $(assert!(thread::spawn({
+            #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
+            move|| { $e; }
+        }).join().is_err());)*
+    }
+}
+
+#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
 fn main() {
-    assert!(thread::spawn(move|| { isize::min_value() / -1; }).join().is_err());
-    assert!(thread::spawn(move|| { i8::min_value() / -1; }).join().is_err());
-    assert!(thread::spawn(move|| { i16::min_value() / -1; }).join().is_err());
-    assert!(thread::spawn(move|| { i32::min_value() / -1; }).join().is_err());
-    assert!(thread::spawn(move|| { i64::min_value() / -1; }).join().is_err());
-    assert!(thread::spawn(move|| { 1isize / isize::zero(); }).join().is_err());
-    assert!(thread::spawn(move|| { 1i8 / i8::zero(); }).join().is_err());
-    assert!(thread::spawn(move|| { 1i16 / i16::zero(); }).join().is_err());
-    assert!(thread::spawn(move|| { 1i32 / i32::zero(); }).join().is_err());
-    assert!(thread::spawn(move|| { 1i64 / i64::zero(); }).join().is_err());
-    assert!(thread::spawn(move|| { isize::min_value() % -1; }).join().is_err());
-    assert!(thread::spawn(move|| { i8::min_value() % -1; }).join().is_err());
-    assert!(thread::spawn(move|| { i16::min_value() % -1; }).join().is_err());
-    assert!(thread::spawn(move|| { i32::min_value() % -1; }).join().is_err());
-    assert!(thread::spawn(move|| { i64::min_value() % -1; }).join().is_err());
-    assert!(thread::spawn(move|| { 1isize % isize::zero(); }).join().is_err());
-    assert!(thread::spawn(move|| { 1i8 % i8::zero(); }).join().is_err());
-    assert!(thread::spawn(move|| { 1i16 % i16::zero(); }).join().is_err());
-    assert!(thread::spawn(move|| { 1i32 % i32::zero(); }).join().is_err());
-    assert!(thread::spawn(move|| { 1i64 % i64::zero(); }).join().is_err());
+    check![
+        isize::min_value() / -1,
+        i8::min_value() / -1,
+        i16::min_value() / -1,
+        i32::min_value() / -1,
+        i64::min_value() / -1,
+        1isize / isize::zero(),
+        1i8 / i8::zero(),
+        1i16 / i16::zero(),
+        1i32 / i32::zero(),
+        1i64 / i64::zero(),
+        isize::min_value() % -1,
+        i8::min_value() % -1,
+        i16::min_value() % -1,
+        i32::min_value() % -1,
+        i64::min_value() % -1,
+        1isize % isize::zero(),
+        1i8 % i8::zero(),
+        1i16 % i16::zero(),
+        1i32 % i32::zero(),
+        1i64 % i64::zero()
+    ];
 }
index 62aa300578355027ff8fbf315d301a20bb560d96..5b40105a9791637620dcdc954af8ac8fd832d042 100644 (file)
@@ -8,8 +8,5 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-
-// pretty-expanded FIXME #23616
-
 fn f(_a: Vec<isize> ) { }
 pub fn main() { f(vec!(1, 2, 3, 4, 5)); }
index e7498f7c1741dbff45d0720d948e3351151edb30..b8238774bc1e00771b3e632ac0456cb49f6840f6 100644 (file)
@@ -8,7 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// pretty-expanded FIXME #23616
 // ignore-emscripten no threads support
 
 #![feature(std_misc)]
diff --git a/src/test/run-pass/linkage-visibility.rs b/src/test/run-pass/linkage-visibility.rs
deleted file mode 100644 (file)
index e6eaefb..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// aux-build:linkage-visibility.rs
-// ignore-android: FIXME(#10356)
-// ignore-windows: std::dynamic_lib does not work on Windows well
-// ignore-musl
-
-extern crate linkage_visibility as foo;
-
-pub fn main() {
-    foo::test();
-    foo::foo2::<isize>();
-    foo::foo();
-}
diff --git a/src/test/run-pass/lint-dead-code-associated-type.rs b/src/test/run-pass/lint-dead-code-associated-type.rs
new file mode 100644 (file)
index 0000000..1ae078b
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(dead_code)]
+
+trait Foo {
+    type Bar;
+}
+
+struct Used;
+
+struct Ex;
+
+impl Foo for Ex {
+    type Bar = Used;
+}
+
+pub fn main() {
+    let _x = Ex;
+}
diff --git a/src/test/run-pass/logging-enabled-debug.rs b/src/test/run-pass/logging-enabled-debug.rs
deleted file mode 100644 (file)
index 3ae4884..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags:-C debug-assertions=no
-// exec-env:RUST_LOG=logging-enabled-debug=debug
-
-
-#![feature(rustc_private)]
-
-#[macro_use]
-extern crate log;
-
-pub fn main() {
-    if log_enabled!(log::DEBUG) {
-        panic!("what?! debugging?");
-    }
-}
diff --git a/src/test/run-pass/logging-enabled.rs b/src/test/run-pass/logging-enabled.rs
deleted file mode 100644 (file)
index 2975835..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// exec-env:RUST_LOG=logging_enabled=info
-
-
-#![feature(rustc_private)]
-
-#[macro_use]
-extern crate log;
-
-pub fn main() {
-    if log_enabled!(log::DEBUG) {
-        panic!("what?! debugging?");
-    }
-    if !log_enabled!(log::INFO) {
-        panic!("what?! no info?");
-    }
-}
diff --git a/src/test/run-pass/logging-right-crate.rs b/src/test/run-pass/logging-right-crate.rs
deleted file mode 100644 (file)
index 7caeeb4..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// aux-build:logging_right_crate.rs
-// exec-env:RUST_LOG=logging-right-crate=debug
-
-// This is a test for issue #3046 to make sure that when we monomorphize a
-// function from one crate to another the right top-level logging name is
-// preserved.
-//
-// It used to be the case that if logging were turned on for this crate, all
-// monomorphized functions from other crates had logging turned on (their
-// logging module names were all incorrect). This test ensures that this no
-// longer happens by enabling logging for *this* crate and then invoking a
-// function in an external crate which will panic when logging is enabled.
-
-// pretty-expanded FIXME #23616
-
-extern crate logging_right_crate;
-
-pub fn main() {
-    // this function panicks if logging is turned on
-    logging_right_crate::foo::<isize>();
-}
diff --git a/src/test/run-pass/logging-separate-lines.rs b/src/test/run-pass/logging-separate-lines.rs
deleted file mode 100644 (file)
index 0975932..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-windows
-// exec-env:RUST_LOG=debug
-// compile-flags:-C debug-assertions=y
-
-#![feature(rustc_private)]
-
-#[macro_use]
-extern crate log;
-
-use std::process::Command;
-use std::env;
-use std::str;
-
-fn main() {
-    let args: Vec<String> = env::args().collect();
-    if args.len() > 1 && args[1] == "child" {
-        debug!("foo");
-        debug!("bar");
-        return
-    }
-
-    let p = Command::new(&args[0])
-                    .arg("child")
-                    .output().unwrap();
-    assert!(p.status.success());
-    let mut lines = str::from_utf8(&p.stderr).unwrap().lines();
-    assert!(lines.next().unwrap().contains("foo"));
-    assert!(lines.next().unwrap().contains("bar"));
-}
index 6a3a495f2f1998f92755c080881775f4db6371c1..a2ae3fbf83b0f6e82e0fd582f073f087cc1c38f3 100644 (file)
@@ -8,8 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// pretty-expanded FIXME #23616
-
 fn main() {
     vec![1_usize, 2, 3].len();
 }
index 43ff69fe75e6c6164c29badab6d37c21741b27ff..9700ed247959b43039d5bb8afebdf5a81be1b2e4 100644 (file)
@@ -9,18 +9,24 @@
 // except according to those terms.
 
 
+#[derive(PartialEq, Eq)]
 struct NewBool(bool);
 
+#[derive(PartialEq, Eq)]
 enum Direction {
     North,
     East,
     South,
     West
 }
+
+#[derive(PartialEq, Eq)]
 struct Foo {
     bar: Option<Direction>,
     baz: NewBool
 }
+
+#[derive(PartialEq, Eq)]
 enum EnumWithStructVariants {
     Variant1(bool),
     Variant2 {
@@ -37,7 +43,7 @@ const VARIANT2_NORTH: EnumWithStructVariants = EnumWithStructVariants::Variant2
     dir: Direction::North };
 
 pub mod glfw {
-    #[derive(Copy, Clone)]
+    #[derive(Copy, Clone, PartialEq, Eq)]
     pub struct InputState(usize);
 
     pub const RELEASE  : InputState = InputState(0);
@@ -82,6 +88,7 @@ fn issue_14576() {
         _ => unreachable!()
     }
 
+    #[derive(PartialEq, Eq)]
     enum C { D = 3, E = 4 }
     const F : C = C::D;
 
@@ -89,6 +96,7 @@ fn issue_14576() {
 }
 
 fn issue_13731() {
+    #[derive(PartialEq, Eq)]
     enum A { AA(()) }
     const B: A = A::AA(());
 
@@ -99,6 +107,7 @@ fn issue_13731() {
 
 fn issue_15393() {
     #![allow(dead_code)]
+    #[derive(PartialEq, Eq)]
     struct Flags {
         bits: usize
     }
index 37decb9e7482b0defb7c1f63396f58d0dbacb720..ba48648df32fb04255bb77d04f663484c6f534e1 100644 (file)
@@ -8,8 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(iter_min_max, cmp_partial, iter_cmp)]
-
 use std::fmt::Debug;
 use std::cmp::{self, PartialOrd, Ordering};
 
@@ -43,13 +41,13 @@ fn main() {
 
     // `min` should return the left when the values are equal
     assert_eq!(data.iter().min(), Some(&a));
-    assert_eq!(data.iter().min_by(|a| a.n), Some(&a));
+    assert_eq!(data.iter().min_by_key(|a| a.n), Some(&a));
     assert_eq!(cmp::min(a, b), a);
     assert_eq!(cmp::min(b, a), b);
 
     // `max` should return the right when the values are equal
     assert_eq!(data.iter().max(), Some(&f));
-    assert_eq!(data.iter().max_by(|a| a.n), Some(&f));
+    assert_eq!(data.iter().max_by_key(|a| a.n), Some(&f));
     assert_eq!(cmp::max(e, f), f);
     assert_eq!(cmp::max(f, e), e);
 
diff --git a/src/test/run-pass/mir_fat_ptr_drop.rs b/src/test/run-pass/mir_fat_ptr_drop.rs
new file mode 100644 (file)
index 0000000..3f79be0
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// test that ordinary fat pointer operations work.
+
+#![feature(braced_empty_structs)]
+#![feature(rustc_attrs)]
+
+use std::sync::atomic;
+use std::sync::atomic::Ordering::SeqCst;
+
+static COUNTER: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
+
+struct DropMe {
+}
+
+impl Drop for DropMe {
+    fn drop(&mut self) {
+        COUNTER.fetch_add(1, SeqCst);
+    }
+}
+
+#[rustc_mir]
+fn fat_ptr_move_then_drop(a: Box<[DropMe]>) {
+    let b = a;
+}
+
+fn main() {
+    let a: Box<[DropMe]> = Box::new([DropMe { }]);
+    fat_ptr_move_then_drop(a);
+    assert_eq!(COUNTER.load(SeqCst), 1);
+}
index 9bbfbb6822463083cad6d1252dbd58c06cbb3775..c0ba7a76dba480515e42dc683236966c922bc06b 100644 (file)
@@ -10,6 +10,8 @@
 
 #![feature(rustc_attrs)]
 
+// ignore-pretty : (#23623) problems when  ending with // comments
+
 // check raw fat pointer ops in mir
 // FIXME: please improve this when we get monomorphization support
 
@@ -119,6 +121,7 @@ impl<T> Foo for T {
 
 struct S<T:?Sized>(u32, T);
 
+#[rustc_no_mir] // FIXME #27840 MIR can't do rvalue promotion yet.
 fn main() {
     let array = [0,1,2,3,4];
     let array2 = [5,6,7,8,9];
index 93953e3f58ae0f2f4d477f4948bf64d5f931afd2..67baf2f9c49c190df6e4c089e6828bf4ba64a161 100644 (file)
@@ -204,48 +204,41 @@ fn t24() -> fn(u8) -> S {
     C4
 }
 
-fn main(){
-    unsafe {
-        assert_eq!(t1()(), regular());
-
-        assert!(::std::mem::transmute::<_, *mut ()>(t2()) ==
-                ::std::mem::transmute::<_, *mut ()>(E::U));
-        assert!(::std::mem::transmute::<_, *mut ()>(t3()) ==
-                ::std::mem::transmute::<_, *mut ()>(S));
-
-        assert_eq!(t4()(), S::hey());
-        let s = S(42);
-        assert_eq!(t5()(&s), <S as X>::hoy(&s));
-
-
-        assert_eq!(t6()(), ext::regular_fn());
-        assert!(::std::mem::transmute::<_, *mut ()>(t7()) ==
-                ::std::mem::transmute::<_, *mut ()>(ext::E::U));
-        assert!(::std::mem::transmute::<_, *mut ()>(t8()) ==
-                ::std::mem::transmute::<_, *mut ()>(ext::S));
-
-        assert_eq!(t9()(), ext::S::hey());
-        let sext = ext::S(6);
-        assert_eq!(t10()(&sext), <ext::S as ext::X>::hoy(&sext));
-
-        let p = parametric::<u8>;
-        assert!(::std::mem::transmute::<_, *mut ()>(t11()) ==
-                ::std::mem::transmute::<_, *mut ()>(p));
-
-        assert_eq!(t12(), C);
-        assert_eq!(t13(), C2);
-        assert_eq!(t13_2(), C3);
-
-        assert_eq!(t14()(), <S as X>::hoy2());
-        assert_eq!(t15()(&s), S::hey2(&s));
-        assert_eq!(t16()(10u32, 20u32), F::f(10u32, 20u32));
-        assert_eq!(t17()(30u32, 10u64), F::f(30u32, 10u64));
-        assert_eq!(t18()(50u64, 5u64), F::f(50u64, 5u64));
-        assert_eq!(t19()(322u64, 2u32), F::f(322u64, 2u32));
-        assert_eq!(t20()(123u64, 38u32), <u32 as T<_, _>>::staticmeth(123, 38));
-        assert_eq!(t21(), Unit);
-        assert_eq!(t22(), None);
-        assert_eq!(t23(), (CEnum::A, CEnum::B));
-        assert_eq!(t24(), C4);
-    }
+fn main() {
+    assert_eq!(t1()(), regular());
+
+    assert_eq!(t2() as *mut (), E::U as *mut ());
+    assert_eq!(t3() as *mut (), S as *mut ());
+
+    assert_eq!(t4()(), S::hey());
+    let s = S(42);
+    assert_eq!(t5()(&s), <S as X>::hoy(&s));
+
+
+    assert_eq!(t6()(), ext::regular_fn());
+    assert_eq!(t7() as *mut (), ext::E::U as *mut ());
+    assert_eq!(t8() as *mut (), ext::S as *mut ());
+
+    assert_eq!(t9()(), ext::S::hey());
+    let sext = ext::S(6);
+    assert_eq!(t10()(&sext), <ext::S as ext::X>::hoy(&sext));
+
+    let p = parametric::<u8>;
+    assert_eq!(t11() as *mut (), p as *mut ());
+
+    assert_eq!(t12(), C);
+    assert_eq!(t13(), C2);
+    assert_eq!(t13_2(), C3);
+
+    assert_eq!(t14()(), <S as X>::hoy2());
+    assert_eq!(t15()(&s), S::hey2(&s));
+    assert_eq!(t16()(10u32, 20u32), F::f(10u32, 20u32));
+    assert_eq!(t17()(30u32, 10u64), F::f(30u32, 10u64));
+    assert_eq!(t18()(50u64, 5u64), F::f(50u64, 5u64));
+    assert_eq!(t19()(322u64, 2u32), F::f(322u64, 2u32));
+    assert_eq!(t20()(123u64, 38u32), <u32 as T<_, _>>::staticmeth(123, 38));
+    assert_eq!(t21(), Unit);
+    assert_eq!(t22(), None);
+    assert_eq!(t23(), (CEnum::A, CEnum::B));
+    assert_eq!(t24(), C4);
 }
diff --git a/src/test/run-pass/mir_trans_critical_edge.rs b/src/test/run-pass/mir_trans_critical_edge.rs
new file mode 100644 (file)
index 0000000..320f401
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This code produces a CFG with critical edges that, if we don't
+// handle properly, will cause invalid codegen.
+
+#![feature(rustc_attrs)]
+
+enum State {
+    Both,
+    Front,
+    Back
+}
+
+pub struct Foo<A: Iterator, B: Iterator> {
+    state: State,
+    a: A,
+    b: B
+}
+
+impl<A, B> Foo<A, B>
+where A: Iterator, B: Iterator<Item=A::Item>
+{
+    // This is the function we care about
+    #[rustc_mir]
+    fn next(&mut self) -> Option<A::Item> {
+        match self.state {
+            State::Both => match self.a.next() {
+                elt @ Some(..) => elt,
+                None => {
+                    self.state = State::Back;
+                    self.b.next()
+                }
+            },
+            State::Front => self.a.next(),
+            State::Back => self.b.next(),
+        }
+    }
+}
+
+// Make sure we actually translate a version of the function
+pub fn do_stuff(mut f: Foo<Box<Iterator<Item=u32>>, Box<Iterator<Item=u32>>>) {
+    let _x = f.next();
+}
+
+fn main() {}
index 6a0d7278b5e16481fd5c9c486730ee2cb846aabc..8e0b14128c83b0896cb93b218ad8c5b61829c460 100644 (file)
@@ -8,6 +8,17 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+fn check_for_no_backtrace(test: std::process::Output) {
+    assert!(!test.status.success());
+    let err = String::from_utf8_lossy(&test.stderr);
+    let mut it = err.lines();
+
+    assert_eq!(it.next().map(|l| l.starts_with("thread '<unnamed>' panicked at")), Some(true));
+    assert_eq!(it.next(), Some("note: Run with `RUST_BACKTRACE=1` for a backtrace."));
+    assert_eq!(it.next().map(|l| l.starts_with("thread '<main>' panicked at")), Some(true));
+    assert_eq!(it.next(), None);
+}
+
 fn main() {
     let args: Vec<String> = std::env::args().collect();
     if args.len() > 1 && args[1] == "run_test" {
@@ -21,13 +32,11 @@ fn main() {
                                                        .env_remove("RUST_BACKTRACE")
                                                        .output()
                                                        .unwrap();
-        assert!(!test.status.success());
-        let err = String::from_utf8_lossy(&test.stderr);
-        let mut it = err.lines();
-
-        assert_eq!(it.next().map(|l| l.starts_with("thread '<unnamed>' panicked at")), Some(true));
-        assert_eq!(it.next(), Some("note: Run with `RUST_BACKTRACE=1` for a backtrace."));
-        assert_eq!(it.next().map(|l| l.starts_with("thread '<main>' panicked at")), Some(true));
-        assert_eq!(it.next(), None);
+        check_for_no_backtrace(test);
+        let test = std::process::Command::new(&args[0]).arg("run_test")
+                                                       .env("RUST_BACKTRACE","0")
+                                                       .output()
+                                                       .unwrap();
+        check_for_no_backtrace(test);
     }
 }
index 224111900d60cdcf645ea9ee2193371092cc0d47..2b7cf6c6682477bf890547c1bb854b9e96a8e425 100644 (file)
 use std::mem;
 
 #[inline(never)]
-extern "C" fn foo<'a>(x: &'a isize) -> Option<&'a isize> { Some(x) }
+extern "C" fn foo(x: &isize) -> Option<&isize> { Some(x) }
 
 static FOO: isize = 0xDEADBEE;
 
 pub fn main() {
     unsafe {
-        let f: for<'a> extern "C" fn(&'a isize) -> &'a isize = mem::transmute(foo);
+        let f: extern "C" fn(&isize) -> &isize =
+            mem::transmute(foo as extern "C" fn(&isize) -> Option<&isize>);
         assert_eq!(*f(&FOO), FOO);
     }
 }
index cfd3bb49f34d45f9750e5888e704fedc8e619569..dffdcfe0af562d5d4a59f4eba925f6e0f34e9ef2 100644 (file)
@@ -12,8 +12,6 @@
 #![allow(unknown_features)]
 #![feature(box_syntax)]
 
-use std::{option, mem};
-
 // Iota-reduction is a rule in the Calculus of (Co-)Inductive Constructions,
 // which "says that a destructor applied to an object built from a constructor
 // behaves as expected".  -- http://coq.inria.fr/doc/Reference-Manual006.html
@@ -43,9 +41,9 @@ macro_rules! check_option {
         check_option!($e, $T, |ptr| assert_eq!(*ptr, $e));
     }};
     ($e:expr, $T:ty, |$v:ident| $chk:expr) => {{
-        assert!(option::Option::None::<$T>.is_none());
+        assert!(None::<$T>.is_none());
         let e = $e;
-        let s_ = option::Option::Some::<$T>(e);
+        let s_ = Some::<$T>(e);
         let $v = s_.as_ref().unwrap();
         $chk
     }}
@@ -78,9 +76,8 @@ pub fn main() {
     check_type!(&17, &isize);
     check_type!(box 18, Box<isize>);
     check_type!("foo".to_string(), String);
-    check_type!(vec!(20, 22), Vec<isize> );
-    let mint: usize = unsafe { mem::transmute(main) };
+    check_type!(vec!(20, 22), Vec<isize>);
     check_type!(main, fn(), |pthing| {
-        assert_eq!(mint, unsafe { mem::transmute(*pthing) })
+        assert_eq!(main as fn(), *pthing as fn())
     });
 }
index faa02c6698edd535025b9020111730e18f03a852..143759e271561c81d75c540740d5909d3bd8371d 100644 (file)
@@ -12,7 +12,7 @@
 //
 // Test std::num::Wrapping<T> for {uN, iN, usize, isize}
 
-#![feature(num_bits_bytes, test)]
+#![feature(test)]
 
 extern crate test;
 
@@ -22,9 +22,40 @@ use std::ops::{
     AddAssign, SubAssign, MulAssign, DivAssign, RemAssign, BitXorAssign, BitOrAssign, BitAndAssign,
     Shl, Shr, ShlAssign, ShrAssign
 };
-use std::{i8, i16, i32, i64, isize, u8, u16, u32, u64, usize};
 use test::black_box;
 
+macro_rules! int_modules {
+    ($(($name:ident, $size:expr),)*) => ($(
+        mod $name {
+            pub const BITS: usize = $size;
+            pub use std::$name::*;
+        }
+    )*)
+}
+
+int_modules! {
+    (i8, 8),
+    (i16, 16),
+    (i32, 32),
+    (i64, 64),
+    (u8, 8),
+    (u16, 16),
+    (u32, 32),
+    (u64, 64),
+}
+
+#[cfg(target_pointer_width = "32")]
+int_modules! {
+    (isize, 32),
+    (usize, 32),
+}
+
+#[cfg(target_pointer_width = "64")]
+int_modules! {
+    (isize, 64),
+    (usize, 64),
+}
+
 fn main() {
     test_ops();
     test_op_assigns();
index 1ed592d3d6b92622a05ab013ace8c24c476a29bb..7c2e3f0c91baff5774974c863736bb22470a1015 100644 (file)
@@ -17,12 +17,12 @@ static A: AtomicUsize = AtomicUsize::new(0);
 static B: AtomicUsize = AtomicUsize::new(0);
 
 fn main() {
-    panic::set_handler(|_| { A.fetch_add(1, Ordering::SeqCst); });
-    let handler = panic::take_handler();
-    panic::set_handler(move |info| {
+    panic::set_hook(Box::new(|_| { A.fetch_add(1, Ordering::SeqCst); }));
+    let hook = panic::take_hook();
+    panic::set_hook(Box::new(move |info| {
         B.fetch_add(1, Ordering::SeqCst);
-        handler(info);
-    });
+        hook(info);
+    }));
 
     let _ = thread::spawn(|| {
         panic!();
index 39ea987f71ba4b0f93914b94ca60036c50f4277e..311310712df8360096fd7cac349c3dd55cd9d699 100644 (file)
@@ -15,28 +15,28 @@ use std::panic;
 use std::thread;
 
 fn a() {
-    panic::set_handler(|_| println!("hello yes this is a"));
-    panic::take_handler();
-    panic::set_handler(|_| println!("hello yes this is a part 2"));
-    panic::take_handler();
+    panic::set_hook(Box::new(|_| println!("hello yes this is a")));
+    panic::take_hook();
+    panic::set_hook(Box::new(|_| println!("hello yes this is a part 2")));
+    panic::take_hook();
 }
 
 fn b() {
-    panic::take_handler();
-    panic::take_handler();
-    panic::take_handler();
-    panic::take_handler();
-    panic::take_handler();
+    panic::take_hook();
+    panic::take_hook();
+    panic::take_hook();
+    panic::take_hook();
+    panic::take_hook();
     panic!();
 }
 
 fn c() {
-    panic::set_handler(|_| ());
-    panic::set_handler(|_| ());
-    panic::set_handler(|_| ());
-    panic::set_handler(|_| ());
-    panic::set_handler(|_| ());
-    panic::set_handler(|_| ());
+    panic::set_hook(Box::new(|_| ()));
+    panic::set_hook(Box::new(|_| ()));
+    panic::set_hook(Box::new(|_| ()));
+    panic::set_hook(Box::new(|_| ()));
+    panic::set_hook(Box::new(|_| ()));
+    panic::set_hook(Box::new(|_| ()));
     panic!();
 }
 
index ed9a02c1d3eccdde8c9a0b64333fa0c32ea15aeb..196e08a63a7f0fafecb1ac889681a7398e5264a7 100644 (file)
@@ -18,8 +18,8 @@ use std::thread;
 static A: AtomicUsize = AtomicUsize::new(0);
 
 fn main() {
-    panic::set_handler(|_| ());
-    panic::set_handler(|info| { A.fetch_add(1, Ordering::SeqCst); });
+    panic::set_hook(Box::new(|_| ()));
+    panic::set_hook(Box::new(|info| { A.fetch_add(1, Ordering::SeqCst); }));
 
     let _ = thread::spawn(|| {
         panic!();
index 9949b79278c11a2ecb871557a17ce481302d57d1..b918f79f2d5d6fc71f147d68a8459dc14a393f1b 100644 (file)
 #![allow(dead_code)]
 #![feature(recover)]
 
-use std::panic::{RecoverSafe, AssertRecoverSafe};
+use std::panic::{UnwindSafe, AssertUnwindSafe};
 use std::cell::RefCell;
 use std::sync::{Mutex, RwLock, Arc};
 use std::rc::Rc;
 
 struct Foo { a: i32 }
 
-fn assert<T: RecoverSafe + ?Sized>() {}
+fn assert<T: UnwindSafe + ?Sized>() {}
 
 fn main() {
     assert::<i32>();
@@ -43,13 +43,13 @@ fn main() {
         assert::<Mutex<T>>();
         assert::<RwLock<T>>();
     }
-    fn baz<T: RecoverSafe>() {
+    fn baz<T: UnwindSafe>() {
         assert::<Box<T>>();
         assert::<Vec<T>>();
         assert::<RefCell<T>>();
-        assert::<AssertRecoverSafe<T>>();
-        assert::<&AssertRecoverSafe<T>>();
-        assert::<Rc<AssertRecoverSafe<T>>>();
-        assert::<Arc<AssertRecoverSafe<T>>>();
+        assert::<AssertUnwindSafe<T>>();
+        assert::<&AssertUnwindSafe<T>>();
+        assert::<Rc<AssertUnwindSafe<T>>>();
+        assert::<Arc<AssertUnwindSafe<T>>>();
     }
 }
diff --git a/src/test/run-pass/parser-unicode-whitespace.rs b/src/test/run-pass/parser-unicode-whitespace.rs
new file mode 100644 (file)
index 0000000..837bb83
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+// Beware editing: it has numerous whitespace characters which are important.
+// It contains one ranges from the 'PATTERN_WHITE_SPACE' property outlined in
+// http://unicode.org/Public/UNIDATA/PropList.txt
+//
+// The characters in the first expression of the assertion can be generated
+// from: "4\u{0C}+\n\t\r7\t*\u{20}2\u{85}/\u{200E}3\u{200F}*\u{2028}2\u{2029}"
+pub fn main() {
+assert_eq!(4\f+
+
+7   * 2\85/‎3‏*
2
, 4 + 7 * 2 / 3 * 2);
+}
diff --git a/src/test/run-pass/placement-new-arena.rs b/src/test/run-pass/placement-new-arena.rs
deleted file mode 100644 (file)
index 7ac624e..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(rustc_private)]
-
-extern crate arena;
-use arena::Arena;
-
-pub fn main() {
-    let mut arena = Arena::new();
-    let p = &mut arena;
-    let x = p.alloc(|| 4_usize);
-    println!("{}", *x);
-    assert_eq!(*x, 4_usize);
-}
diff --git a/src/test/run-pass/process-status-inherits-stdin.rs b/src/test/run-pass/process-status-inherits-stdin.rs
new file mode 100644 (file)
index 0000000..2ad47c4
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::env;
+use std::io;
+use std::io::Write;
+use std::process::{Command, Stdio};
+
+fn main() {
+    let mut args = env::args();
+    let me = args.next().unwrap();
+    let arg = args.next();
+    match arg.as_ref().map(|s| &s[..]) {
+        None => {
+            let mut s = Command::new(&me)
+                                .arg("a1")
+                                .stdin(Stdio::piped())
+                                .spawn()
+                                .unwrap();
+            s.stdin.take().unwrap().write_all(b"foo\n").unwrap();
+            let s = s.wait().unwrap();
+            assert!(s.success());
+        }
+        Some("a1") => {
+            let s = Command::new(&me).arg("a2").status().unwrap();
+            assert!(s.success());
+        }
+        Some(..) => {
+            let mut s = String::new();
+            io::stdin().read_line(&mut s).unwrap();
+            assert_eq!(s, "foo\n");
+        }
+    }
+}
index 24261772add0353a3ecb25055132e5abe03b9142..4c249bbe1f73e338b90d88a775e7b9227f205a3d 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
 // file at the top-level directory of this distribution and at
 // http://rust-lang.org/COPYRIGHT.
 //
@@ -44,6 +44,7 @@ pub fn main() {
     let _ = 0_usize..4+4-3;
     let _ = 0..foo();
 
+    let _ = { &42..&100 }; // references to literals are OK
     let _ = ..42_usize;
 
     // Test we can use two different types with a common supertype.
diff --git a/src/test/run-pass/range_inclusive.rs b/src/test/run-pass/range_inclusive.rs
new file mode 100644 (file)
index 0000000..07233a4
--- /dev/null
@@ -0,0 +1,129 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test inclusive range syntax.
+
+#![feature(inclusive_range_syntax, inclusive_range, step_by)]
+
+use std::ops::{RangeInclusive, RangeToInclusive};
+
+fn foo() -> isize { 42 }
+
+// Test that range syntax works in return statements
+fn return_range_to() -> RangeToInclusive<i32> { return ...1; }
+
+pub fn main() {
+    let mut count = 0;
+    for i in 0_usize...10 {
+        assert!(i >= 0 && i <= 10);
+        count += i;
+    }
+    assert_eq!(count, 55);
+
+    let mut count = 0;
+    let mut range = 0_usize...10;
+    for i in range {
+        assert!(i >= 0 && i <= 10);
+        count += i;
+    }
+    assert_eq!(count, 55);
+
+    let mut count = 0;
+    for i in (0_usize...10).step_by(2) {
+        assert!(i >= 0 && i <= 10 && i % 2 == 0);
+        count += i;
+    }
+    assert_eq!(count, 30);
+
+    let _ = 0_usize...4+4-3;
+    let _ = 0...foo();
+
+    let _ = { &42...&100 }; // references to literals are OK
+    let _ = ...42_usize;
+
+    // Test we can use two different types with a common supertype.
+    let x = &42;
+    {
+        let y = 42;
+        let _ = x...&y;
+    }
+
+    // test collection indexing
+    let vec = (0...10).collect::<Vec<_>>();
+    let slice: &[_] = &*vec;
+    let string = String::from("hello world");
+    let stir = "hello world";
+
+    assert_eq!(&vec[3...6], &[3, 4, 5, 6]);
+    assert_eq!(&vec[ ...6], &[0, 1, 2, 3, 4, 5, 6]);
+
+    assert_eq!(&slice[3...6], &[3, 4, 5, 6]);
+    assert_eq!(&slice[ ...6], &[0, 1, 2, 3, 4, 5, 6]);
+
+    assert_eq!(&string[3...6], "lo w");
+    assert_eq!(&string[ ...6], "hello w");
+
+    assert_eq!(&stir[3...6], "lo w");
+    assert_eq!(&stir[ ...6], "hello w");
+
+    // test the size hints and emptying
+    let mut long = 0...255u8;
+    let mut short = 42...42;
+    assert_eq!(long.size_hint(), (256, Some(256)));
+    assert_eq!(short.size_hint(), (1, Some(1)));
+    long.next();
+    short.next();
+    assert_eq!(long.size_hint(), (255, Some(255)));
+    assert_eq!(short.size_hint(), (0, Some(0)));
+    assert_eq!(short, RangeInclusive::Empty { at: 42 });
+
+    assert_eq!(long.len(), 255);
+    assert_eq!(short.len(), 0);
+
+    // test iterating backwards
+    assert_eq!(long.next_back(), Some(255));
+    assert_eq!(long.next_back(), Some(254));
+    assert_eq!(long.next_back(), Some(253));
+    assert_eq!(long.next(), Some(1));
+    assert_eq!(long.next(), Some(2));
+    assert_eq!(long.next_back(), Some(252));
+    for i in 3...251 {
+        assert_eq!(long.next(), Some(i));
+    }
+    assert_eq!(long, RangeInclusive::Empty { at: 251 });
+
+    // check underflow
+    let mut narrow = 1...0;
+    assert_eq!(narrow.next_back(), None);
+    assert_eq!(narrow, RangeInclusive::Empty { at: 0 });
+    let mut zero = 0u8...0;
+    assert_eq!(zero.next_back(), Some(0));
+    assert_eq!(zero.next_back(), None);
+    assert_eq!(zero, RangeInclusive::Empty { at: 0 });
+    let mut high = 255u8...255;
+    assert_eq!(high.next_back(), Some(255));
+    assert_eq!(high.next_back(), None);
+    assert_eq!(high, RangeInclusive::Empty { at: 255 });
+
+    // what happens if you have a nonsense range?
+    let mut nonsense = 10...5;
+    assert_eq!(nonsense.next(), None);
+    assert_eq!(nonsense, RangeInclusive::Empty { at: 10 });
+
+    // conversion
+    assert_eq!(0...9, (0..10).into());
+    assert_eq!(0...0, (0..1).into());
+    assert_eq!(RangeInclusive::Empty { at: 1 }, (1..0).into());
+
+    // output
+    assert_eq!(format!("{:?}", 0...10), "0...10");
+    assert_eq!(format!("{:?}", ...10), "...10");
+    assert_eq!(format!("{:?}", long), "[empty range @ 251]");
+}
diff --git a/src/test/run-pass/range_inclusive_gate.rs b/src/test/run-pass/range_inclusive_gate.rs
new file mode 100644 (file)
index 0000000..5e0ec19
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that you only need the syntax gate if you don't mention the structs.
+
+#![feature(inclusive_range_syntax)]
+
+fn main() {
+    let mut count = 0;
+    for i in 0_usize...10 {
+        assert!(i >= 0 && i <= 10);
+        count += i;
+    }
+    assert_eq!(count, 55);
+}
+
index fd0d8121f5fc6df925ea96889bbc23f1c66fa639..7183937fe8035f7ae6ec1106a068f6944bb0c4fa 100644 (file)
@@ -11,9 +11,6 @@
 // Test lifetimes are linked properly when we autoslice a vector.
 // Issue #3148.
 
-
-// pretty-expanded FIXME #23616
-
 fn subslice1<'r>(v: &'r [usize]) -> &'r [usize] { v }
 
 fn both<'r>(v: &'r [usize]) -> &'r [usize] {
diff --git a/src/test/run-pass/regions-lub-ref-ref-rc.rs b/src/test/run-pass/regions-lub-ref-ref-rc.rs
new file mode 100644 (file)
index 0000000..41c6419
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test a corner case of LUB coercion. In this case, one arm of the
+// match requires a deref coercion and other other doesn't, and there
+// is an extra `&` on the `rc`. We want to be sure that the lifetime
+// assigned to this `&rc` value is not `'a` but something smaller.  In
+// other words, the type from `rc` is `&'a Rc<String>` and the type
+// from `&rc` should be `&'x &'a Rc<String>`, where `'x` is something
+// small.
+
+use std::rc::Rc;
+
+#[derive(Clone)]
+enum CachedMir<'mir> {
+    Ref(&'mir String),
+    Owned(Rc<String>),
+}
+
+impl<'mir> CachedMir<'mir> {
+    fn get_ref<'a>(&'a self) -> &'a String {
+        match *self {
+            CachedMir::Ref(r) => r,
+            CachedMir::Owned(ref rc) => &rc,
+        }
+    }
+}
+
+fn main() { }
diff --git a/src/test/run-pass/regions-mock-tcx.rs b/src/test/run-pass/regions-mock-tcx.rs
deleted file mode 100644 (file)
index ed3cec4..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-// Test a sample usage pattern for regions. Makes use of the
-// following features:
-//
-// - Multiple lifetime parameters
-// - Arenas
-
-#![feature(rustc_private, libc, collections)]
-
-extern crate arena;
-extern crate collections;
-extern crate libc;
-
-use TypeStructure::{TypeInt, TypeFunction};
-use AstKind::{ExprInt, ExprVar, ExprLambda};
-use arena::TypedArena;
-use std::collections::HashMap;
-use std::mem;
-
-type Type<'tcx> = &'tcx TypeStructure<'tcx>;
-
-#[derive(Copy, Clone, Debug)]
-enum TypeStructure<'tcx> {
-    TypeInt,
-    TypeFunction(Type<'tcx>, Type<'tcx>),
-}
-
-impl<'tcx> PartialEq for TypeStructure<'tcx> {
-    fn eq(&self, other: &TypeStructure<'tcx>) -> bool {
-        match (*self, *other) {
-            (TypeInt, TypeInt) => true,
-            (TypeFunction(s_a, s_b), TypeFunction(o_a, o_b)) => *s_a == *o_a && *s_b == *o_b,
-            _ => false
-        }
-    }
-}
-
-impl<'tcx> Eq for TypeStructure<'tcx> {}
-
-type TyArena<'tcx> = TypedArena<TypeStructure<'tcx>>;
-type AstArena<'ast> = TypedArena<AstStructure<'ast>>;
-
-struct TypeContext<'tcx, 'ast> {
-    ty_arena: &'tcx TyArena<'tcx>,
-    types: Vec<Type<'tcx>> ,
-    type_table: HashMap<NodeId, Type<'tcx>>,
-
-    ast_arena: &'ast AstArena<'ast>,
-    ast_counter: usize,
-}
-
-impl<'tcx,'ast> TypeContext<'tcx, 'ast> {
-    fn new(ty_arena: &'tcx TyArena<'tcx>, ast_arena: &'ast AstArena<'ast>)
-           -> TypeContext<'tcx, 'ast> {
-        TypeContext { ty_arena: ty_arena,
-                      types: Vec::new(),
-                      type_table: HashMap::new(),
-
-                      ast_arena: ast_arena,
-                      ast_counter: 0 }
-    }
-
-    fn add_type(&mut self, s: TypeStructure<'tcx>) -> Type<'tcx> {
-        for &ty in &self.types {
-            if *ty == s {
-                return ty;
-            }
-        }
-
-        let ty = self.ty_arena.alloc(s);
-        self.types.push(ty);
-        ty
-    }
-
-    fn set_type(&mut self, id: NodeId, ty: Type<'tcx>) -> Type<'tcx> {
-        self.type_table.insert(id, ty);
-        ty
-    }
-
-    fn ast(&mut self, a: AstKind<'ast>) -> Ast<'ast> {
-        let id = self.ast_counter;
-        self.ast_counter += 1;
-        self.ast_arena.alloc(AstStructure { id: NodeId {id:id}, kind: a })
-    }
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-struct NodeId {
-    id: usize
-}
-
-type Ast<'ast> = &'ast AstStructure<'ast>;
-
-#[derive(Copy, Clone)]
-struct AstStructure<'ast> {
-    id: NodeId,
-    kind: AstKind<'ast>
-}
-
-#[derive(Copy, Clone)]
-enum AstKind<'ast> {
-    ExprInt,
-    ExprVar(usize),
-    ExprLambda(Ast<'ast>),
-}
-
-fn compute_types<'tcx,'ast>(tcx: &mut TypeContext<'tcx,'ast>,
-                            ast: Ast<'ast>) -> Type<'tcx>
-{
-    match ast.kind {
-        ExprInt | ExprVar(_) => {
-            let ty = tcx.add_type(TypeInt);
-            tcx.set_type(ast.id, ty)
-        }
-        ExprLambda(ast) => {
-            let arg_ty = tcx.add_type(TypeInt);
-            let body_ty = compute_types(tcx, ast);
-            let lambda_ty = tcx.add_type(TypeFunction(arg_ty, body_ty));
-            tcx.set_type(ast.id, lambda_ty)
-        }
-    }
-}
-
-pub fn main() {
-    let ty_arena = TypedArena::new();
-    let ast_arena = TypedArena::new();
-    let mut tcx = TypeContext::new(&ty_arena, &ast_arena);
-    let ast = tcx.ast(ExprInt);
-    let ty = compute_types(&mut tcx, ast);
-    assert_eq!(*ty, TypeInt);
-}
diff --git a/src/test/run-pass/rfc1445/eq-allows-match-on-ty-in-macro.rs b/src/test/run-pass/rfc1445/eq-allows-match-on-ty-in-macro.rs
new file mode 100644 (file)
index 0000000..241fe6c
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+macro_rules! foo {
+    (#[$attr:meta] $x:ident) => {
+        #[$attr]
+        struct $x {
+            x: u32
+        }
+    }
+}
+
+foo! { #[derive(PartialEq, Eq)] Foo }
+
+const FOO: Foo = Foo { x: 0 };
+
+fn main() {
+    let y = Foo { x: 1 };
+    match y {
+        FOO => { }
+        _ => { }
+    }
+}
diff --git a/src/test/run-pass/rfc1445/eq-allows-match.rs b/src/test/run-pass/rfc1445/eq-allows-match.rs
new file mode 100644 (file)
index 0000000..f02a456
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+#[derive(PartialEq, Eq)]
+struct Foo {
+    x: u32
+}
+
+const FOO: Foo = Foo { x: 0 };
+
+fn main() {
+    let y = Foo { x: 1 };
+    match y {
+        FOO => { }
+        _ => { }
+    }
+}
diff --git a/src/test/run-pass/rust-log-filter.rs b/src/test/run-pass/rust-log-filter.rs
deleted file mode 100644 (file)
index 306d24e..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// exec-env:RUST_LOG=rust_log_filter/foo
-// ignore-emscripten no threads support
-
-#![allow(unknown_features)]
-#![feature(box_syntax, std_misc, rustc_private)]
-
-#[macro_use]
-extern crate log;
-
-use std::sync::mpsc::{channel, Sender, Receiver};
-use std::thread;
-
-pub struct ChannelLogger {
-    tx: Sender<String>
-}
-
-impl ChannelLogger {
-    pub fn new() -> (Box<ChannelLogger>, Receiver<String>) {
-        let (tx, rx) = channel();
-        (box ChannelLogger { tx: tx }, rx)
-    }
-}
-
-impl log::Logger for ChannelLogger {
-    fn log(&mut self, record: &log::LogRecord) {
-        self.tx.send(format!("{}", record.args)).unwrap();
-    }
-}
-
-pub fn main() {
-    let (logger, rx) = ChannelLogger::new();
-
-    let t = thread::spawn(move|| {
-        log::set_logger(logger);
-
-        info!("foo");
-        info!("bar");
-        info!("foo bar");
-        info!("bar foo");
-    });
-
-    assert_eq!(rx.recv().unwrap(), "foo");
-    assert_eq!(rx.recv().unwrap(), "foo bar");
-    assert_eq!(rx.recv().unwrap(), "bar foo");
-    assert!(rx.recv().is_err());
-
-    t.join();
-}
index 2501ef94bea8211983c0edcee05e9b66fcc2ab4a..2ff2d5a8aa52bd2e968f6a7e45ef071971fc1f78 100644 (file)
@@ -8,57 +8,55 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-
-#![feature(collections, into_cow)]
-
-extern crate collections;
-
 use std::collections::HashMap;
-use std::borrow::{Cow, IntoCow};
+use std::borrow::Cow;
+
+use std::borrow::Cow::Borrowed as B;
+use std::borrow::Cow::Owned as O;
 
 type SendStr = Cow<'static, str>;
 
-pub fn main() {
+fn main() {
     let mut map: HashMap<SendStr, usize> = HashMap::new();
-    assert!(map.insert("foo".into_cow(), 42).is_none());
-    assert!(map.insert("foo".to_string().into_cow(), 42).is_some());
-    assert!(map.insert("foo".into_cow(), 42).is_some());
-    assert!(map.insert("foo".to_string().into_cow(), 42).is_some());
+    assert!(map.insert(B("foo"), 42).is_none());
+    assert!(map.insert(O("foo".to_string()), 42).is_some());
+    assert!(map.insert(B("foo"), 42).is_some());
+    assert!(map.insert(O("foo".to_string()), 42).is_some());
 
-    assert!(map.insert("foo".into_cow(), 43).is_some());
-    assert!(map.insert("foo".to_string().into_cow(), 44).is_some());
-    assert!(map.insert("foo".into_cow(), 45).is_some());
-    assert!(map.insert("foo".to_string().into_cow(), 46).is_some());
+    assert!(map.insert(B("foo"), 43).is_some());
+    assert!(map.insert(O("foo".to_string()), 44).is_some());
+    assert!(map.insert(B("foo"), 45).is_some());
+    assert!(map.insert(O("foo".to_string()), 46).is_some());
 
     let v = 46;
 
-    assert_eq!(map.get(&"foo".to_string().into_cow()), Some(&v));
-    assert_eq!(map.get(&"foo".into_cow()), Some(&v));
+    assert_eq!(map.get(&O("foo".to_string())), Some(&v));
+    assert_eq!(map.get(&B("foo")), Some(&v));
 
     let (a, b, c, d) = (50, 51, 52, 53);
 
-    assert!(map.insert("abc".into_cow(), a).is_none());
-    assert!(map.insert("bcd".to_string().into_cow(), b).is_none());
-    assert!(map.insert("cde".into_cow(), c).is_none());
-    assert!(map.insert("def".to_string().into_cow(), d).is_none());
+    assert!(map.insert(B("abc"), a).is_none());
+    assert!(map.insert(O("bcd".to_string()), b).is_none());
+    assert!(map.insert(B("cde"), c).is_none());
+    assert!(map.insert(O("def".to_string()), d).is_none());
 
-    assert!(map.insert("abc".into_cow(), a).is_some());
-    assert!(map.insert("bcd".to_string().into_cow(), b).is_some());
-    assert!(map.insert("cde".into_cow(), c).is_some());
-    assert!(map.insert("def".to_string().into_cow(), d).is_some());
+    assert!(map.insert(B("abc"), a).is_some());
+    assert!(map.insert(O("bcd".to_string()), b).is_some());
+    assert!(map.insert(B("cde"), c).is_some());
+    assert!(map.insert(O("def".to_string()), d).is_some());
 
-    assert!(map.insert("abc".to_string().into_cow(), a).is_some());
-    assert!(map.insert("bcd".into_cow(), b).is_some());
-    assert!(map.insert("cde".to_string().into_cow(), c).is_some());
-    assert!(map.insert("def".into_cow(), d).is_some());
+    assert!(map.insert(O("abc".to_string()), a).is_some());
+    assert!(map.insert(B("bcd"), b).is_some());
+    assert!(map.insert(O("cde".to_string()), c).is_some());
+    assert!(map.insert(B("def"), d).is_some());
 
     assert_eq!(map.get("abc"), Some(&a));
     assert_eq!(map.get("bcd"), Some(&b));
     assert_eq!(map.get("cde"), Some(&c));
     assert_eq!(map.get("def"), Some(&d));
 
-    assert_eq!(map.get(&"abc".into_cow()), Some(&a));
-    assert_eq!(map.get(&"bcd".into_cow()), Some(&b));
-    assert_eq!(map.get(&"cde".into_cow()), Some(&c));
-    assert_eq!(map.get(&"def".into_cow()), Some(&d));
+    assert_eq!(map.get(&B("abc")), Some(&a));
+    assert_eq!(map.get(&B("bcd")), Some(&b));
+    assert_eq!(map.get(&B("cde")), Some(&c));
+    assert_eq!(map.get(&B("def")), Some(&d));
 }
index 146f5253508c6e92cc39b0fd5cae707354becff4..3178dc94598812eaaa0c70399a8d46ed65647acd 100644 (file)
@@ -8,61 +8,58 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use std::collections::BTreeMap;
+use std::borrow::Cow;
 
-#![feature(collections, into_cow)]
-
-extern crate collections;
-
-use self::collections::BTreeMap;
-use std::borrow::{Cow, IntoCow};
+use std::borrow::Cow::{Owned as O, Borrowed as B};
 
 type SendStr = Cow<'static, str>;
 
-pub fn main() {
+fn main() {
     let mut map: BTreeMap<SendStr, usize> = BTreeMap::new();
-    assert!(map.insert("foo".into_cow(), 42).is_none());
-    assert!(map.insert("foo".to_string().into_cow(), 42).is_some());
-    assert!(map.insert("foo".into_cow(), 42).is_some());
-    assert!(map.insert("foo".to_string().into_cow(), 42).is_some());
+    assert!(map.insert(B("foo"), 42).is_none());
+    assert!(map.insert(O("foo".to_string()), 42).is_some());
+    assert!(map.insert(B("foo"), 42).is_some());
+    assert!(map.insert(O("foo".to_string()), 42).is_some());
 
-    assert!(map.insert("foo".into_cow(), 43).is_some());
-    assert!(map.insert("foo".to_string().into_cow(), 44).is_some());
-    assert!(map.insert("foo".into_cow(), 45).is_some());
-    assert!(map.insert("foo".to_string().into_cow(), 46).is_some());
+    assert!(map.insert(B("foo"), 43).is_some());
+    assert!(map.insert(O("foo".to_string()), 44).is_some());
+    assert!(map.insert(B("foo"), 45).is_some());
+    assert!(map.insert(O("foo".to_string()), 46).is_some());
 
     let v = 46;
 
-    assert_eq!(map.get(&"foo".to_string().into_cow()), Some(&v));
-    assert_eq!(map.get(&"foo".into_cow()), Some(&v));
+    assert_eq!(map.get(&O("foo".to_string())), Some(&v));
+    assert_eq!(map.get(&B("foo")), Some(&v));
 
     let (a, b, c, d) = (50, 51, 52, 53);
 
-    assert!(map.insert("abc".into_cow(), a).is_none());
-    assert!(map.insert("bcd".to_string().into_cow(), b).is_none());
-    assert!(map.insert("cde".into_cow(), c).is_none());
-    assert!(map.insert("def".to_string().into_cow(), d).is_none());
+    assert!(map.insert(B("abc"), a).is_none());
+    assert!(map.insert(O("bcd".to_string()), b).is_none());
+    assert!(map.insert(B("cde"), c).is_none());
+    assert!(map.insert(O("def".to_string()), d).is_none());
 
-    assert!(map.insert("abc".into_cow(), a).is_some());
-    assert!(map.insert("bcd".to_string().into_cow(), b).is_some());
-    assert!(map.insert("cde".into_cow(), c).is_some());
-    assert!(map.insert("def".to_string().into_cow(), d).is_some());
+    assert!(map.insert(B("abc"), a).is_some());
+    assert!(map.insert(O("bcd".to_string()), b).is_some());
+    assert!(map.insert(B("cde"), c).is_some());
+    assert!(map.insert(O("def".to_string()), d).is_some());
 
-    assert!(map.insert("abc".to_string().into_cow(), a).is_some());
-    assert!(map.insert("bcd".into_cow(), b).is_some());
-    assert!(map.insert("cde".to_string().into_cow(), c).is_some());
-    assert!(map.insert("def".into_cow(), d).is_some());
+    assert!(map.insert(O("abc".to_string()), a).is_some());
+    assert!(map.insert(B("bcd"), b).is_some());
+    assert!(map.insert(O("cde".to_string()), c).is_some());
+    assert!(map.insert(B("def"), d).is_some());
 
-    assert_eq!(map.get(&"abc".into_cow()), Some(&a));
-    assert_eq!(map.get(&"bcd".into_cow()), Some(&b));
-    assert_eq!(map.get(&"cde".into_cow()), Some(&c));
-    assert_eq!(map.get(&"def".into_cow()), Some(&d));
+    assert_eq!(map.get(&B("abc")), Some(&a));
+    assert_eq!(map.get(&B("bcd")), Some(&b));
+    assert_eq!(map.get(&B("cde")), Some(&c));
+    assert_eq!(map.get(&B("def")), Some(&d));
 
-    assert_eq!(map.get(&"abc".to_string().into_cow()), Some(&a));
-    assert_eq!(map.get(&"bcd".to_string().into_cow()), Some(&b));
-    assert_eq!(map.get(&"cde".to_string().into_cow()), Some(&c));
-    assert_eq!(map.get(&"def".to_string().into_cow()), Some(&d));
+    assert_eq!(map.get(&O("abc".to_string())), Some(&a));
+    assert_eq!(map.get(&O("bcd".to_string())), Some(&b));
+    assert_eq!(map.get(&O("cde".to_string())), Some(&c));
+    assert_eq!(map.get(&O("def".to_string())), Some(&d));
 
-    assert!(map.remove(&"foo".into_cow()).is_some());
+    assert!(map.remove(&B("foo")).is_some());
     assert_eq!(map.into_iter().map(|(k, v)| format!("{}{}", k, v))
                               .collect::<Vec<String>>()
                               .concat(),
index f0444c27170561550a80c85b48e8e4f3f17dd2c1..ffb9e6072dfe3cd488a5dd85e557c2448c00aa89 100644 (file)
@@ -8,7 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(repr_simd, platform_intrinsics)]
+#![feature(repr_simd, rustc_attrs, platform_intrinsics)]
+
+// ignore-pretty : (#23623) problems when  ending with // comments
 
 #[repr(simd)]
 #[derive(Copy, Clone, Debug, PartialEq)]
@@ -50,6 +52,7 @@ macro_rules! all_eq {
     }}
 }
 
+#[rustc_no_mir] // FIXME #27840 MIR doesn't handle shuffle constants.
 fn main() {
     let x2 = i32x2(20, 21);
     let x3 = i32x3(30, 31, 32);
diff --git a/src/test/run-pass/specialization/README.md b/src/test/run-pass/specialization/README.md
new file mode 100644 (file)
index 0000000..1373a2c
--- /dev/null
@@ -0,0 +1,37 @@
+Tests that specialization is working correctly:
+
+- Dispatch
+  - [On methods](specialization-basics.rs), includes:
+    - Specialization via adding a trait bound
+      - Including both remote and local traits
+    - Specialization via pure structure (e.g. `(T, U)` vs `(T, T)`)
+    - Specialization via concrete types vs unknown types
+      - In top level of the trait reference
+      - Embedded within another type (`Vec<T>` vs `Vec<i32>`)
+  - [Specialization based on super trait relationships](specialization-super-traits.rs)
+  - [On assoc fns](specialization-assoc-fns.rs)
+  - [Ensure that impl order doesn't matter](specialization-out-of-order.rs)
+
+- Item inheritance
+  - [Correct default cascading for methods](specialization-default-methods.rs)
+  - Inheritance works across impls with varying generics
+    - [With projections](specialization-translate-projections.rs)
+    - [With projections that involve input types](specialization-translate-projections-with-params.rs)
+
+- Normalization issues
+  - [Non-default assoc types can be projected](specialization-projection.rs)
+    - Including non-specialized cases
+    - Including specialized cases
+  - [Specialized Impls can happen on projections](specialization-on-projection.rs)
+  - [Projections and aliases play well together](specialization-projection-alias.rs)
+  - [Projections involving specialization allowed in the trait ref for impls, and overlap can still be determined](specialization-overlap-projection.rs)
+    - Only works for the simple case where the most specialized impl directly
+      provides a non-`default` associated type
+
+- Across crates
+  - [For traits defined in upstream crate](specialization-allowed-cross-crate.rs)
+  - [Full method dispatch tests, drawing from upstream crate](specialization-cross-crate.rs)
+    - Including *additional* local specializations
+  - [Full method dispatch tests, *without* turning on specialization in local crate](specialization-cross-crate-no-gate.rs)
+  - [Test that defaults cascade correctly from upstream crates](specialization-cross-crate-defaults.rs)
+    - Including *additional* local use of defaults
diff --git a/src/test/run-pass/specialization/specialization-allowed-cross-crate.rs b/src/test/run-pass/specialization/specialization-allowed-cross-crate.rs
new file mode 100644 (file)
index 0000000..6b999f3
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:go_trait.rs
+
+#![feature(specialization)]
+
+extern crate go_trait;
+
+use go_trait::{Go,GoMut};
+use std::fmt::Debug;
+use std::default::Default;
+
+struct MyThingy;
+
+impl Go for MyThingy {
+    fn go(&self, arg: isize) { }
+}
+
+impl GoMut for MyThingy {
+    fn go_mut(&mut self, arg: isize) { }
+}
+
+fn main() { }
diff --git a/src/test/run-pass/specialization/specialization-assoc-fns.rs b/src/test/run-pass/specialization/specialization-assoc-fns.rs
new file mode 100644 (file)
index 0000000..577f217
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that non-method associated functions can be specialized
+
+#![feature(specialization)]
+
+trait Foo {
+    fn mk() -> Self;
+}
+
+impl<T: Default> Foo for T {
+    default fn mk() -> T {
+        T::default()
+    }
+}
+
+impl Foo for Vec<u8> {
+    fn mk() -> Vec<u8> {
+        vec![0]
+    }
+}
+
+fn main() {
+    let v1: Vec<i32> = Foo::mk();
+    let v2: Vec<u8> = Foo::mk();
+
+    assert!(v1.len() == 0);
+    assert!(v2.len() == 1);
+}
diff --git a/src/test/run-pass/specialization/specialization-basics.rs b/src/test/run-pass/specialization/specialization-basics.rs
new file mode 100644 (file)
index 0000000..b11495e
--- /dev/null
@@ -0,0 +1,106 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+// Tests a variety of basic specialization scenarios and method
+// dispatch for them.
+
+trait Foo {
+    fn foo(&self) -> &'static str;
+}
+
+impl<T> Foo for T {
+    default fn foo(&self) -> &'static str {
+        "generic"
+    }
+}
+
+impl<T: Clone> Foo for T {
+    default fn foo(&self) -> &'static str {
+        "generic Clone"
+    }
+}
+
+impl<T, U> Foo for (T, U) where T: Clone, U: Clone {
+    default fn foo(&self) -> &'static str {
+        "generic pair"
+    }
+}
+
+impl<T: Clone> Foo for (T, T) {
+    default fn foo(&self) -> &'static str {
+        "generic uniform pair"
+    }
+}
+
+impl Foo for (u8, u32) {
+    default fn foo(&self) -> &'static str {
+        "(u8, u32)"
+    }
+}
+
+impl Foo for (u8, u8) {
+    default fn foo(&self) -> &'static str {
+        "(u8, u8)"
+    }
+}
+
+impl<T: Clone> Foo for Vec<T> {
+    default fn foo(&self) -> &'static str {
+        "generic Vec"
+    }
+}
+
+impl Foo for Vec<i32> {
+    fn foo(&self) -> &'static str {
+        "Vec<i32>"
+    }
+}
+
+impl Foo for String {
+    fn foo(&self) -> &'static str {
+        "String"
+    }
+}
+
+impl Foo for i32 {
+    fn foo(&self) -> &'static str {
+        "i32"
+    }
+}
+
+struct NotClone;
+
+trait MyMarker {}
+impl<T: Clone + MyMarker> Foo for T {
+    default fn foo(&self) -> &'static str {
+        "generic Clone + MyMarker"
+    }
+}
+
+#[derive(Clone)]
+struct MarkedAndClone;
+impl MyMarker for MarkedAndClone {}
+
+fn  main() {
+    assert!(NotClone.foo() == "generic");
+    assert!(0u8.foo() == "generic Clone");
+    assert!(vec![NotClone].foo() == "generic");
+    assert!(vec![0u8].foo() == "generic Vec");
+    assert!(vec![0i32].foo() == "Vec<i32>");
+    assert!(0i32.foo() == "i32");
+    assert!(String::new().foo() == "String");
+    assert!(((), 0).foo() == "generic pair");
+    assert!(((), ()).foo() == "generic uniform pair");
+    assert!((0u8, 0u32).foo() == "(u8, u32)");
+    assert!((0u8, 0u8).foo() == "(u8, u8)");
+    assert!(MarkedAndClone.foo() == "generic Clone + MyMarker");
+}
diff --git a/src/test/run-pass/specialization/specialization-cross-crate-defaults.rs b/src/test/run-pass/specialization/specialization-cross-crate-defaults.rs
new file mode 100644 (file)
index 0000000..62c7e3e
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:specialization_cross_crate_defaults.rs
+
+#![feature(specialization)]
+
+extern crate specialization_cross_crate_defaults;
+
+use specialization_cross_crate_defaults::*;
+
+struct LocalDefault;
+struct LocalOverride;
+
+impl Foo for LocalDefault {}
+
+impl Foo for LocalOverride {
+    fn foo(&self) -> bool { true }
+}
+
+fn test_foo() {
+    assert!(!0i8.foo());
+    assert!(!0i32.foo());
+    assert!(0i64.foo());
+
+    assert!(!LocalDefault.foo());
+    assert!(LocalOverride.foo());
+}
+
+fn test_bar() {
+    assert!(0u8.bar() == 0);
+    assert!(0i32.bar() == 1);
+    assert!("hello".bar() == 0);
+    assert!(vec![()].bar() == 2);
+    assert!(vec![0i32].bar() == 2);
+    assert!(vec![0i64].bar() == 3);
+}
+
+fn main() {
+    test_foo();
+    test_bar();
+}
diff --git a/src/test/run-pass/specialization/specialization-cross-crate-no-gate.rs b/src/test/run-pass/specialization/specialization-cross-crate-no-gate.rs
new file mode 100644 (file)
index 0000000..b954853
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that specialization works even if only the upstream crate enables it
+
+// aux-build:specialization_cross_crate.rs
+
+extern crate specialization_cross_crate;
+
+use specialization_cross_crate::*;
+
+fn  main() {
+    assert!(0u8.foo() == "generic Clone");
+    assert!(vec![0u8].foo() == "generic Vec");
+    assert!(vec![0i32].foo() == "Vec<i32>");
+    assert!(0i32.foo() == "i32");
+    assert!(String::new().foo() == "String");
+    assert!(((), 0).foo() == "generic pair");
+    assert!(((), ()).foo() == "generic uniform pair");
+    assert!((0u8, 0u32).foo() == "(u8, u32)");
+    assert!((0u8, 0u8).foo() == "(u8, u8)");
+}
diff --git a/src/test/run-pass/specialization/specialization-cross-crate.rs b/src/test/run-pass/specialization/specialization-cross-crate.rs
new file mode 100644 (file)
index 0000000..7593ac4
--- /dev/null
@@ -0,0 +1,58 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:specialization_cross_crate.rs
+
+#![feature(specialization)]
+
+extern crate specialization_cross_crate;
+
+use specialization_cross_crate::*;
+
+struct NotClone;
+
+#[derive(Clone)]
+struct MarkedAndClone;
+impl MyMarker for MarkedAndClone {}
+
+struct MyType<T>(T);
+impl<T> Foo for MyType<T> {
+    default fn foo(&self) -> &'static str {
+        "generic MyType"
+    }
+}
+
+impl Foo for MyType<u8> {
+    fn foo(&self) -> &'static str {
+        "MyType<u8>"
+    }
+}
+
+struct MyOtherType;
+impl Foo for MyOtherType {}
+
+fn  main() {
+    assert!(NotClone.foo() == "generic");
+    assert!(0u8.foo() == "generic Clone");
+    assert!(vec![NotClone].foo() == "generic");
+    assert!(vec![0u8].foo() == "generic Vec");
+    assert!(vec![0i32].foo() == "Vec<i32>");
+    assert!(0i32.foo() == "i32");
+    assert!(String::new().foo() == "String");
+    assert!(((), 0).foo() == "generic pair");
+    assert!(((), ()).foo() == "generic uniform pair");
+    assert!((0u8, 0u32).foo() == "(u8, u32)");
+    assert!((0u8, 0u8).foo() == "(u8, u8)");
+    assert!(MarkedAndClone.foo() == "generic Clone + MyMarker");
+
+    assert!(MyType(()).foo() == "generic MyType");
+    assert!(MyType(0u8).foo() == "MyType<u8>");
+    assert!(MyOtherType.foo() == "generic");
+}
diff --git a/src/test/run-pass/specialization/specialization-default-methods.rs b/src/test/run-pass/specialization/specialization-default-methods.rs
new file mode 100644 (file)
index 0000000..9cfc6aa
--- /dev/null
@@ -0,0 +1,94 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+// Test that default methods are cascaded correctly
+
+// First, test only use of explicit `default` items:
+
+trait Foo {
+    fn foo(&self) -> bool;
+}
+
+// Specialization tree for Foo:
+//
+//        T
+//       / \
+//    i32   i64
+
+impl<T> Foo for T {
+    default fn foo(&self) -> bool { false }
+}
+
+impl Foo for i32 {}
+
+impl Foo for i64 {
+    fn foo(&self) -> bool { true }
+}
+
+fn test_foo() {
+    assert!(!0i8.foo());
+    assert!(!0i32.foo());
+    assert!(0i64.foo());
+}
+
+// Next, test mixture of explicit `default` and provided methods:
+
+trait Bar {
+    fn bar(&self) -> i32 { 0 }
+}
+
+// Specialization tree for Bar.
+// Uses of $ designate that method is provided
+//
+//           $Bar   (the trait)
+//             |
+//             T
+//            /|\
+//           / | \
+//          /  |  \
+//         /   |   \
+//        /    |    \
+//       /     |     \
+//     $i32   &str  $Vec<T>
+//                    /\
+//                   /  \
+//            Vec<i32>  $Vec<i64>
+
+// use the provided method
+impl<T> Bar for T {}
+
+impl Bar for i32 {
+    fn bar(&self) -> i32 { 1 }
+}
+impl<'a> Bar for &'a str {}
+
+impl<T> Bar for Vec<T> {
+    default fn bar(&self) -> i32 { 2 }
+}
+impl Bar for Vec<i32> {}
+impl Bar for Vec<i64> {
+    fn bar(&self) -> i32 { 3 }
+}
+
+fn test_bar() {
+    assert!(0u8.bar() == 0);
+    assert!(0i32.bar() == 1);
+    assert!("hello".bar() == 0);
+    assert!(vec![()].bar() == 2);
+    assert!(vec![0i32].bar() == 2);
+    assert!(vec![0i64].bar() == 3);
+}
+
+fn main() {
+    test_foo();
+    test_bar();
+}
diff --git a/src/test/run-pass/specialization/specialization-on-projection.rs b/src/test/run-pass/specialization/specialization-on-projection.rs
new file mode 100644 (file)
index 0000000..acf78de
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+// Ensure that specialization works for impls defined directly on a projection
+
+trait Foo<T> {}
+
+trait Assoc {
+    type Item;
+}
+
+impl<T: Assoc> Foo<T::Item> for T {}
+
+struct Struct;
+
+impl Assoc for Struct {
+    type Item = u8;
+}
+
+impl Foo<u8> for Struct {}
+
+fn main() {}
diff --git a/src/test/run-pass/specialization/specialization-out-of-order.rs b/src/test/run-pass/specialization/specialization-out-of-order.rs
new file mode 100644 (file)
index 0000000..2d293f4
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that you can list the more specific impl before the more general one.
+
+#![feature(specialization)]
+
+trait Foo {
+    type Out;
+}
+
+impl Foo for bool {
+    type Out = ();
+}
+
+impl<T> Foo for T {
+    default type Out = bool;
+}
+
+fn main() {}
diff --git a/src/test/run-pass/specialization/specialization-overlap-projection.rs b/src/test/run-pass/specialization/specialization-overlap-projection.rs
new file mode 100644 (file)
index 0000000..20046ee
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that impls on projected self types can resolve overlap, even when the
+// projections involve specialization, so long as the associated type is
+// provided by the most specialized impl.
+
+#![feature(specialization)]
+
+trait Assoc {
+    type Output;
+}
+
+impl<T> Assoc for T {
+    default type Output = bool;
+}
+
+impl Assoc for u8 { type Output = u8; }
+impl Assoc for u16 { type Output = u16; }
+
+trait Foo {}
+impl Foo for u32 {}
+impl Foo for <u8 as Assoc>::Output {}
+impl Foo for <u16 as Assoc>::Output {}
+
+fn main() {}
diff --git a/src/test/run-pass/specialization/specialization-projection-alias.rs b/src/test/run-pass/specialization/specialization-projection-alias.rs
new file mode 100644 (file)
index 0000000..7fce1cc
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+// Regression test for ICE when combining specialized associated types and type
+// aliases
+
+trait Id_ {
+    type Out;
+}
+
+type Id<T> = <T as Id_>::Out;
+
+impl<T> Id_ for T {
+    default type Out = T;
+}
+
+fn test_proection() {
+    let x: Id<bool> = panic!();
+}
+
+fn main() {
+
+}
diff --git a/src/test/run-pass/specialization/specialization-projection.rs b/src/test/run-pass/specialization/specialization-projection.rs
new file mode 100644 (file)
index 0000000..4e0bdec
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+// Make sure we *can* project non-defaulted associated types
+// cf compile-fail/specialization-default-projection.rs
+
+// First, do so without any use of specialization
+
+trait Foo {
+    type Assoc;
+}
+
+impl<T> Foo for T {
+    type Assoc = ();
+}
+
+fn generic_foo<T>() -> <T as Foo>::Assoc {
+    ()
+}
+
+// Next, allow for one layer of specialization
+
+trait Bar {
+    type Assoc;
+}
+
+impl<T> Bar for T {
+    default type Assoc = ();
+}
+
+impl<T: Clone> Bar for T {
+    type Assoc = u8;
+}
+
+fn generic_bar_clone<T: Clone>() -> <T as Bar>::Assoc {
+    0u8
+}
+
+fn main() {
+}
diff --git a/src/test/run-pass/specialization/specialization-super-traits.rs b/src/test/run-pass/specialization/specialization-super-traits.rs
new file mode 100644 (file)
index 0000000..a9b3bfc
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+// Test that you can specialize via an explicit trait hierarchy
+
+// FIXME: this doesn't work yet...
+
+trait Parent {}
+trait Child: Parent {}
+
+trait Foo {}
+
+impl<T: Parent> Foo for T {}
+impl<T: Child> Foo for T {}
+
+fn main() {}
diff --git a/src/test/run-pass/specialization/specialization-translate-projections-with-params.rs b/src/test/run-pass/specialization/specialization-translate-projections-with-params.rs
new file mode 100644 (file)
index 0000000..647d552
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Ensure that provided items are inherited properly even when impls vary in
+// type parameters *and* rely on projections, and the type parameters are input
+// types on the trait.
+
+#![feature(specialization)]
+
+trait Trait<T> {
+    fn convert(&self) -> T;
+}
+trait WithAssoc {
+    type Item;
+    fn as_item(&self) -> &Self::Item;
+}
+
+impl<T, U> Trait<U> for T where T: WithAssoc<Item=U>, U: Clone {
+    fn convert(&self) -> U {
+        self.as_item().clone()
+    }
+}
+
+impl WithAssoc for u8 {
+    type Item = u8;
+    fn as_item(&self) -> &u8 { self }
+}
+
+impl Trait<u8> for u8 {}
+
+fn main() {
+    assert!(3u8.convert() == 3u8);
+}
diff --git a/src/test/run-pass/specialization/specialization-translate-projections.rs b/src/test/run-pass/specialization/specialization-translate-projections.rs
new file mode 100644 (file)
index 0000000..11e1d99
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Ensure that provided items are inherited properly even when impls vary in
+// type parameters *and* rely on projections.
+
+#![feature(specialization)]
+
+use std::convert::Into;
+
+trait Trait {
+    fn to_u8(&self) -> u8;
+}
+trait WithAssoc {
+    type Item;
+    fn to_item(&self) -> Self::Item;
+}
+
+impl<T, U> Trait for T where T: WithAssoc<Item=U>, U: Into<u8> {
+    fn to_u8(&self) -> u8 {
+        self.to_item().into()
+    }
+}
+
+impl WithAssoc for u8 {
+    type Item = u8;
+    fn to_item(&self) -> u8 { *self }
+}
+
+impl Trait for u8 {}
+
+fn main() {
+    assert!(3u8.to_u8() == 3u8);
+}
index 2c29787e128fb7e5359ddfae65c12bf579efaffa..7332f098b96fa770d62726a759120718dbb92554 100644 (file)
@@ -24,7 +24,6 @@ fn main() {
     assert_both::<sync::Mutex<()>>();
     assert_both::<sync::Condvar>();
     assert_both::<sync::RwLock<()>>();
-    assert_both::<sync::Semaphore>();
     assert_both::<sync::Barrier>();
     assert_both::<sync::Arc<()>>();
     assert_both::<sync::Weak<()>>();
index 69ec0a2222ddbc50e84ba5b22c26e129e97e30dc..b764a983a0c0960aa872a4c00c0982f5cc7f0456 100644 (file)
@@ -11,6 +11,7 @@
 // ignore-pretty
 //
 // exec-env:RUST_MIN_STACK=16000000
+// rustc-env:RUST_MIN_STACK=16000000
 //
 // Big stack is needed for pretty printing, a little sad...
 
diff --git a/src/test/run-pass/transmute-from-fn-item-types.rs b/src/test/run-pass/transmute-from-fn-item-types.rs
new file mode 100644 (file)
index 0000000..574a90e
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(transmute_from_fn_item_types)]
+
+use std::mem;
+
+fn main() {
+    unsafe {
+        let u = mem::transmute(main);
+        let p = mem::transmute(main);
+        let f = mem::transmute(main);
+        let tuple: (usize, *mut (), fn()) = (u, p, f);
+        assert_eq!(mem::transmute::<_, [usize; 3]>(tuple), [main as usize; 3]);
+
+        mem::transmute::<_, usize>(main);
+        mem::transmute::<_, *mut ()>(main);
+        mem::transmute::<_, fn()>(main);
+    }
+}
diff --git a/src/test/run-pass/try-macro.rs b/src/test/run-pass/try-macro.rs
new file mode 100644 (file)
index 0000000..a12e207
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::num::{ParseFloatError, ParseIntError};
+
+fn main() {
+    assert_eq!(simple(), Ok(1));
+    assert_eq!(nested(), Ok(2));
+    assert_eq!(merge_ok(), Ok(3.0));
+    assert_eq!(merge_int_err(), Err(Error::Int));
+    assert_eq!(merge_float_err(), Err(Error::Float));
+}
+
+fn simple() -> Result<i32, ParseIntError> {
+    Ok(try!("1".parse()))
+}
+
+fn nested() -> Result<i32, ParseIntError> {
+    Ok(try!(try!("2".parse::<i32>()).to_string().parse::<i32>()))
+}
+
+fn merge_ok() -> Result<f32, Error> {
+    Ok(try!("1".parse::<i32>()) as f32 + try!("2.0".parse::<f32>()))
+}
+
+fn merge_int_err() -> Result<f32, Error> {
+    Ok(try!("a".parse::<i32>()) as f32 + try!("2.0".parse::<f32>()))
+}
+
+fn merge_float_err() -> Result<f32, Error> {
+    Ok(try!("1".parse::<i32>()) as f32 + try!("b".parse::<f32>()))
+}
+
+#[derive(Debug, PartialEq)]
+enum Error {
+    Int,
+    Float,
+}
+
+impl From<ParseIntError> for Error {
+    fn from(_: ParseIntError) -> Error {
+        Error::Int
+    }
+}
+
+impl From<ParseFloatError> for Error {
+    fn from(_: ParseFloatError) -> Error {
+        Error::Float
+    }
+}
diff --git a/src/test/run-pass/try-operator-hygiene.rs b/src/test/run-pass/try-operator-hygiene.rs
new file mode 100644 (file)
index 0000000..ae622df
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// `expr?` expands to:
+//
+// match expr {
+//     Ok(val) => val,
+//     Err(err) => return Err(From::from(err)),
+// }
+//
+// This test verifies that the expansion is hygienic, i.e. it's not affected by other `val` and
+// `err` bindings that may be in scope.
+
+#![feature(question_mark)]
+
+use std::num::ParseIntError;
+
+fn main() {
+    assert_eq!(parse(), Ok(1));
+}
+
+fn parse() -> Result<i32, ParseIntError> {
+    const val: char = 'a';
+    const err: char = 'b';
+
+    Ok("1".parse::<i32>()?)
+}
diff --git a/src/test/run-pass/try-operator.rs b/src/test/run-pass/try-operator.rs
new file mode 100644 (file)
index 0000000..de5ccf0
--- /dev/null
@@ -0,0 +1,200 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(question_mark)]
+
+use std::fs::File;
+use std::io::{Read, self};
+use std::num::ParseIntError;
+use std::str::FromStr;
+
+fn on_method() -> Result<i32, ParseIntError> {
+    Ok("1".parse::<i32>()? + "2".parse::<i32>()?)
+}
+
+fn in_chain() -> Result<String, ParseIntError> {
+    Ok("3".parse::<i32>()?.to_string())
+}
+
+fn on_call() -> Result<i32, ParseIntError> {
+    fn parse<T: FromStr>(s: &str) -> Result<T, T::Err> {
+        s.parse()
+    }
+
+    Ok(parse("4")?)
+}
+
+fn nested() -> Result<i32, ParseIntError> {
+    Ok("5".parse::<i32>()?.to_string().parse()?)
+}
+
+fn on_path() -> Result<i32, ParseIntError> {
+    let x = "6".parse::<i32>();
+
+    Ok(x?)
+}
+
+fn on_macro() -> Result<i32, ParseIntError> {
+    macro_rules! id {
+        ($e:expr) => { $e }
+    }
+
+    Ok(id!("7".parse::<i32>())?)
+}
+
+fn on_parens() -> Result<i32, ParseIntError> {
+    let x = "8".parse::<i32>();
+
+    Ok((x)?)
+}
+
+fn on_block() -> Result<i32, ParseIntError> {
+    let x = "9".parse::<i32>();
+
+    Ok({x}?)
+}
+
+fn on_field() -> Result<i32, ParseIntError> {
+    struct Pair<A, B> { a: A, b: B }
+
+    let x = Pair { a: "10".parse::<i32>(), b: 0 };
+
+    Ok(x.a?)
+}
+
+fn on_tuple_field() -> Result<i32, ParseIntError> {
+    let x = ("11".parse::<i32>(), 0);
+
+    Ok(x.0?)
+}
+
+fn on_try() -> Result<i32, ParseIntError> {
+    let x = "12".parse::<i32>().map(|i| i.to_string().parse::<i32>());
+
+    Ok(x??)
+}
+
+fn on_binary_op() -> Result<i32, ParseIntError> {
+    let x = 13 - "14".parse::<i32>()?;
+    let y = "15".parse::<i32>()? - 16;
+    let z = "17".parse::<i32>()? - "18".parse::<i32>()?;
+
+    Ok(x + y + z)
+}
+
+fn on_index() -> Result<i32, ParseIntError> {
+    let x = [19];
+    let y = "0".parse::<usize>();
+
+    Ok(x[y?])
+}
+
+fn on_args() -> Result<i32, ParseIntError> {
+    fn sub(x: i32, y: i32) -> i32 { x - y }
+
+    let x = "20".parse();
+    let y = "21".parse();
+
+    Ok(sub(x?, y?))
+}
+
+fn on_if() -> Result<i32, ParseIntError> {
+    Ok(if true {
+        "22".parse::<i32>()
+    } else {
+        "23".parse::<i32>()
+    }?)
+}
+
+fn on_if_let() -> Result<i32, ParseIntError> {
+    Ok(if let Ok(..) = "24".parse::<i32>() {
+        "25".parse::<i32>()
+    } else {
+        "26".parse::<i32>()
+    }?)
+}
+
+fn on_match() -> Result<i32, ParseIntError> {
+    Ok(match "27".parse::<i32>() {
+        Err(..) => "28".parse::<i32>(),
+        Ok(..) => "29".parse::<i32>(),
+    }?)
+}
+
+fn tight_binding() -> Result<bool, ()> {
+    fn ok<T>(x: T) -> Result<T, ()> { Ok(x) }
+
+    let x = ok(true);
+    Ok(!x?)
+}
+
+// just type check
+fn merge_error() -> Result<i32, Error> {
+    let mut s = String::new();
+
+    File::open("foo.txt")?.read_to_string(&mut s)?;
+
+    Ok(s.parse::<i32>()? + 1)
+}
+
+fn main() {
+    assert_eq!(Ok(3), on_method());
+
+    assert_eq!(Ok("3".to_string()), in_chain());
+
+    assert_eq!(Ok(4), on_call());
+
+    assert_eq!(Ok(5), nested());
+
+    assert_eq!(Ok(6), on_path());
+
+    assert_eq!(Ok(7), on_macro());
+
+    assert_eq!(Ok(8), on_parens());
+
+    assert_eq!(Ok(9), on_block());
+
+    assert_eq!(Ok(10), on_field());
+
+    assert_eq!(Ok(11), on_tuple_field());
+
+    assert_eq!(Ok(12), on_try());
+
+    assert_eq!(Ok(-3), on_binary_op());
+
+    assert_eq!(Ok(19), on_index());
+
+    assert_eq!(Ok(-1), on_args());
+
+    assert_eq!(Ok(22), on_if());
+
+    assert_eq!(Ok(25), on_if_let());
+
+    assert_eq!(Ok(29), on_match());
+
+    assert_eq!(Ok(false), tight_binding());
+}
+
+enum Error {
+    Io(io::Error),
+    Parse(ParseIntError),
+}
+
+impl From<io::Error> for Error {
+    fn from(e: io::Error) -> Error {
+        Error::Io(e)
+    }
+}
+
+impl From<ParseIntError> for Error {
+    fn from(e: ParseIntError) -> Error {
+        Error::Parse(e)
+    }
+}
index db53fa855f11d401167fcf3ee53b156f5f71947e..4bd82baafeb100c32897ccf9584969dc98902d2d 100644 (file)
@@ -8,13 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// aux-build:typeid-intrinsic.rs
-// aux-build:typeid-intrinsic2.rs
+// aux-build:typeid-intrinsic-aux1.rs
+// aux-build:typeid-intrinsic-aux2.rs
 
 #![feature(core_intrinsics)]
 
-extern crate typeid_intrinsic as other1;
-extern crate typeid_intrinsic2 as other2;
+extern crate typeid_intrinsic_aux1 as other1;
+extern crate typeid_intrinsic_aux2 as other2;
 
 use std::hash::{SipHasher, Hasher, Hash};
 use std::any::TypeId;
index 685ce589bfa0349b1065c7dbd40fd01e72986f9f..491045564ce222b6f81d2530311feaf5b1d34e66 100644 (file)
@@ -8,9 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(into_cow)]
-
-use std::borrow::{Cow, IntoCow};
+use std::borrow::{Cow, ToOwned};
 use std::default::Default;
 use std::iter::FromIterator;
 use std::ops::Add;
@@ -25,6 +23,16 @@ pub trait Rand: Default + Sized {
 }
 impl Rand for i32 { }
 
+pub trait IntoCow<'a, B: ?Sized> where B: ToOwned {
+    fn into_cow(self) -> Cow<'a, B>;
+}
+
+impl<'a> IntoCow<'a, str> for String {
+    fn into_cow(self) -> Cow<'a, str> {
+        Cow::Owned(self)
+    }
+}
+
 #[derive(PartialEq, Eq)]
 struct Newt<T>(T);
 
diff --git a/src/test/run-pass/unique-ffi-symbols.rs b/src/test/run-pass/unique-ffi-symbols.rs
new file mode 100644 (file)
index 0000000..81563f4
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// We used to have a __rust_abi shim that resulted in duplicated symbols
+// whenever the item path wasn't enough to disambiguate between them.
+fn main() {
+    let a = {
+        extern fn good() -> i32 { return 0; }
+        good as extern fn() -> i32
+    };
+    let b = {
+        extern fn good() -> i32 { return 5; }
+        good as extern fn() -> i32
+    };
+
+    assert!(a != b);
+    assert_eq!((a(), b()), (0, 5));
+}
index 6351cc76a2e4c9f3e5cdc25f6d66705228cb15c1..abd1709825c974b605f03b011c5113f0bb3afc25 100644 (file)
@@ -45,15 +45,15 @@ pub fn main() {
         let x: unsafe extern fn(*mut c_char, *const c_char, ...) -> c_int = sprintf;
 
         // A function that takes a function pointer
-        unsafe fn call(p: unsafe extern fn(*mut c_char, *const c_char, ...) -> c_int) {
+        unsafe fn call(fp: unsafe extern fn(*mut c_char, *const c_char, ...) -> c_int) {
             // Call with just the named parameter
             let c = CString::new(&b"Hello World\n"[..]).unwrap();
-            check("Hello World\n", |s| sprintf(s, c.as_ptr()));
+            check("Hello World\n", |s| fp(s, c.as_ptr()));
 
             // Call with variable number of arguments
             let c = CString::new(&b"%d %f %c %s\n"[..]).unwrap();
             check("42 42.500000 a %d %f %c %s\n\n", |s| {
-                sprintf(s, c.as_ptr(), 42, 42.5f64, 'a' as c_int, c.as_ptr());
+                fp(s, c.as_ptr(), 42, 42.5f64, 'a' as c_int, c.as_ptr());
             });
         }
 
index 25c1cfe1f4d427678fd2595e6dac9f8e5e87496f..5fe9dd60591cef6b3a15f79dc62eca229455c2d9 100644 (file)
@@ -8,15 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(vec_push_all)]
-
 use std::vec;
 
 pub fn main() {
     let a: Vec<isize> = vec!(1, 2, 3, 4, 5);
     let b: Vec<isize> = vec!(6, 7, 8, 9, 0);
     let mut v: Vec<isize> = a;
-    v.push_all(&b);
+    v.extend_from_slice(&b);
     println!("{}", v[9]);
     assert_eq!(v[0], 1);
     assert_eq!(v[7], 8);
index b69bd53cb8c4d739ce13d37ae15ed83fa622d9a1..33f01c5bd41c8b3bf37bd3645c3b5bcfa78c97b5 100644 (file)
@@ -8,6 +8,4 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// pretty-expanded FIXME #23616
-
 pub fn main() { let mut v = vec!(1, 2, 3); v.push(1); }
index a4202217840e64a45bd0251e7ffc92210d9c3f21..911bfc7454c2a9992400604cce85ad8ef4be5ec4 100644 (file)
@@ -10,7 +10,7 @@
 
 // ignore-emscripten no threads support
 
-#![feature(rand, num_bits_bytes)]
+#![feature(rand)]
 #![feature(const_fn)]
 
 use std::sync::atomic::{AtomicUsize, Ordering};
@@ -47,7 +47,6 @@ impl Drop for DropCounter {
 }
 
 pub fn main() {
-    assert!(MAX_LEN <= std::usize::BITS);
     // len can't go above 64.
     for len in 2..MAX_LEN {
         for _ in 0..REPEATS {
diff --git a/src/test/run-pass/volatile-fat-ptr.rs b/src/test/run-pass/volatile-fat-ptr.rs
new file mode 100644 (file)
index 0000000..03ba558
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(volatile)]
+use std::ptr::{read_volatile, write_volatile};
+
+fn main() {
+    let mut x: &'static str = "test";
+    unsafe {
+        let a = read_volatile(&x);
+        assert_eq!(a, "test");
+        write_volatile(&mut x, "foo");
+        assert_eq!(x, "foo");
+    }
+}
index 1b6de3b2f7bcf029a4e1cb236b0f0a1227a60842..2dacbe0966ee37b68d6b7299b4737b2d0001ccf5 100644 (file)
 extern crate xcrate_associated_type_defaults;
 use xcrate_associated_type_defaults::Foo;
 
+struct LocalDefault;
+impl Foo<u32> for LocalDefault {}
+
+struct LocalOverride;
+impl Foo<u64> for LocalOverride {
+    type Out = bool;
+}
+
 fn main() {
-    ().bar(5);
+    assert_eq!(
+        <() as Foo<u32>>::Out::default().to_string(),
+        "0");
+    assert_eq!(
+        <() as Foo<u64>>::Out::default().to_string(),
+        "false");
+
+    assert_eq!(
+        <LocalDefault as Foo<u32>>::Out::default().to_string(),
+        "0");
+    assert_eq!(
+        <LocalOverride as Foo<u64>>::Out::default().to_string(),
+        "false");
 }
index fecbeed407c0359b749a333eff8b00af5354eeea..a663ae650c087574611c8156d098a3b5674abc22 100644 (file)
@@ -8,11 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+#![feature(rustc_attrs, unsafe_no_drop_flag)]
 
-#![feature(unsafe_no_drop_flag)]
+// ignore-pretty : (#23623) problems when  ending with // comments
 
 static mut destructions : isize = 3;
 
+#[rustc_no_mir] // FIXME #29855 MIR doesn't handle all drops correctly.
 pub fn foo() {
     #[unsafe_no_drop_flag]
     struct Foo;
diff --git a/src/test/rustdoc/inline_local/issue-28537.rs b/src/test/rustdoc/inline_local/issue-28537.rs
new file mode 100644 (file)
index 0000000..b38e104
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[doc(hidden)]
+pub mod foo {
+    pub struct Foo;
+}
+
+mod bar {
+    pub use self::bar::Bar;
+    mod bar {
+        pub struct Bar;
+    }
+}
+
+// @has issue_28537/struct.Foo.html
+pub use foo::Foo;
+
+// @has issue_28537/struct.Bar.html
+pub use self::bar::Bar;
diff --git a/src/test/rustdoc/inline_local/issue-32343.rs b/src/test/rustdoc/inline_local/issue-32343.rs
new file mode 100644 (file)
index 0000000..a045c96
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// @!has issue_32343/struct.Foo.html
+// @has issue_32343/index.html
+// @has - '//code' 'pub use foo::Foo'
+// @!has - '//code/a' 'Foo'
+#[doc(no_inline)]
+pub use foo::Foo;
+
+// @!has issue_32343/struct.Bar.html
+// @has issue_32343/index.html
+// @has - '//code' 'pub use foo::Bar'
+// @has - '//code/a' 'Bar'
+#[doc(no_inline)]
+pub use foo::Bar;
+
+mod foo {
+    pub struct Foo;
+    pub struct Bar;
+}
+
+pub mod bar {
+    // @has issue_32343/bar/struct.Bar.html
+    pub use ::foo::Bar;
+}
diff --git a/src/test/rustdoc/inline_local/please_inline.rs b/src/test/rustdoc/inline_local/please_inline.rs
new file mode 100644 (file)
index 0000000..d237ab8
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub mod foo {
+    pub struct Foo;
+}
+
+// @has please_inline/a/index.html
+pub mod a {
+    // @!has - 'pub use foo::'
+    // @has please_inline/a/struct.Foo.html
+    #[doc(inline)]
+    pub use foo::Foo;
+}
+
+// @has please_inline/b/index.html
+pub mod b {
+    // @has - 'pub use foo::'
+    // @!has please_inline/b/struct.Foo.html
+    #[feature(inline)]
+    pub use foo::Foo;
+}
index 97c37bbc1eda0619f5925aca0ab3a1a106cb69d8..266a30891941dbfde7e88ccc05d2bb4df297cc59 100644 (file)
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// compile-flags:--no-defaults --passes "collapse-docs" --passes "unindent-comments"
+// compile-flags:--no-defaults --passes collapse-docs --passes unindent-comments
 
 // @has issue_15347/fn.foo.html
 #[doc(hidden)]
index 2343b610ce448c30414892a8f13f4b19b9654162..25c97ee2c76a9833af7d76a586bc4e4d1c20f50b 100644 (file)
@@ -17,15 +17,15 @@ pub trait Bar {
     fn quux(self);
 }
 
-impl<T> Foo<T> {
+impl Foo<u8> {
     // @has - '//*[@id="method.pass"]//code' 'fn pass()'
     pub fn pass() {}
 }
-impl<T> Foo<T> {
+impl Foo<u16> {
     // @has - '//*[@id="method.pass-1"]//code' 'fn pass() -> usize'
     pub fn pass() -> usize { 42 }
 }
-impl<T> Foo<T> {
+impl Foo<u32> {
     // @has - '//*[@id="method.pass-2"]//code' 'fn pass() -> isize'
     pub fn pass() -> isize { 42 }
 }
diff --git a/src/test/rustdoc/issue-27104.rs b/src/test/rustdoc/issue-27104.rs
new file mode 100644 (file)
index 0000000..5fa093d
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:--no-defaults --passes strip-priv-imports
+// aux-build:empty.rs
+// ignore-cross-compile
+
+// @has issue_27104/index.html
+// @!has - 'extern crate std'
+// @!has - 'use std::prelude::'
+
+// @has - 'pub extern crate empty'
+pub extern crate empty;
index 1211087e24c7dc7ba61047410ba96285f47d7a86..b28fb7ec47a8c57ad67a897e2f178054a722ef98 100644 (file)
@@ -10,7 +10,7 @@
 
 // aux-build:issue-27362.rs
 // ignore-cross-compile
-// ignore-test This test fails on beta/stable
+// ignore-test This test fails on beta/stable #32019
 
 extern crate issue_27362;
 pub use issue_27362 as quux;
diff --git a/src/test/rustdoc/issue-28478.rs b/src/test/rustdoc/issue-28478.rs
new file mode 100644 (file)
index 0000000..0db92a4
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_type_defaults)]
+#![feature(associated_consts)]
+
+// @has issue_28478/trait.Bar.html
+pub trait Bar {
+    // @has - '//*[@id="associatedtype.Bar"]' 'type Bar = ()'
+    // @has - '//*[@href="#associatedtype.Bar"]' 'Bar'
+    type Bar = ();
+    // @has - '//*[@id="associatedconstant.Baz"]' 'const Baz: usize = 7'
+    // @has - '//*[@href="#associatedconstant.Baz"]' 'Baz'
+    const Baz: usize = 7;
+    // @has - '//*[@id="tymethod.bar"]' 'fn bar'
+    fn bar();
+    // @has - '//*[@id="method.baz"]' 'fn baz'
+    fn baz() { }
+}
+
+// @has issue_28478/struct.Foo.html
+pub struct Foo;
+
+impl Foo {
+    // @has - '//*[@href="#method.foo"]' 'foo'
+    pub fn foo() {}
+}
+
+impl Bar for Foo {
+    // @has - '//*[@href="../issue_28478/trait.Bar.html#associatedtype.Bar"]' 'Bar'
+    // @has - '//*[@href="../issue_28478/trait.Bar.html#associatedconstant.Baz"]' 'Baz'
+    // @has - '//*[@href="../issue_28478/trait.Bar.html#tymethod.bar"]' 'bar'
+    fn bar() {}
+    // @has - '//*[@href="../issue_28478/trait.Bar.html#method.baz"]' 'baz'
+}
diff --git a/src/test/rustdoc/issue-32395.rs b/src/test/rustdoc/issue-32395.rs
new file mode 100644 (file)
index 0000000..672c875
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:variant-struct.rs
+// build-aux-docs
+// ignore-cross-compile
+
+// @has variant_struct/enum.Foo.html
+// @!has - 'pub qux'
+// @!has - 'pub Bar'
+extern crate variant_struct;
+
+// @has issue_32395/enum.Foo.html
+// @!has - 'pub qux'
+// @!has - 'pub Bar'
+pub use variant_struct::Foo;
index 7505d20566dbb45ef209d92c1d86b050393a6d80..00f7d90fabc38d303c57103863a4764c04c06904 100644 (file)
@@ -9,7 +9,6 @@
 // except according to those terms.
 
 #![crate_type = "lib"]
-#![feature(globs)]
 
 mod m {
     pub use self::a::Foo;
index 7505d20566dbb45ef209d92c1d86b050393a6d80..00f7d90fabc38d303c57103863a4764c04c06904 100644 (file)
@@ -9,7 +9,6 @@
 // except according to those terms.
 
 #![crate_type = "lib"]
-#![feature(globs)]
 
 mod m {
     pub use self::a::Foo;
index 62a13f76ca4f08cb0d9433be25b979621714b19a..1d9b903a279206c4799e3b8b5576df2789b3a714 100644 (file)
@@ -8,8 +8,6 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#![feature(globs)]
-
 pub mod longhands {
     pub use super::*;
 
diff --git a/src/test/rustdoc/redirect.rs b/src/test/rustdoc/redirect.rs
new file mode 100644 (file)
index 0000000..98e66e8
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:reexp_stripped.rs
+// build-aux-docs
+// ignore-cross-compile
+
+extern crate reexp_stripped;
+
+pub trait Foo {}
+
+// @has redirect/index.html
+// @has - '//code' 'pub use reexp_stripped::Bar'
+// @has - '//code/a' 'Bar'
+// @has reexp_stripped/hidden/struct.Bar.html
+// @has - '//p/a' '../../reexp_stripped/struct.Bar.html'
+// @has 'reexp_stripped/struct.Bar.html'
+#[doc(no_inline)]
+pub use reexp_stripped::Bar;
+impl Foo for Bar {}
+
+// @has redirect/index.html
+// @has - '//code' 'pub use reexp_stripped::Quz'
+// @has - '//code/a' 'Quz'
+// @has reexp_stripped/private/struct.Quz.html
+// @has - '//p/a' '../../reexp_stripped/struct.Quz.html'
+// @has 'reexp_stripped/struct.Quz.html'
+#[doc(no_inline)]
+pub use reexp_stripped::Quz;
+impl Foo for Quz {}
+
+mod private_no_inline {
+    pub struct Qux;
+    impl ::Foo for Qux {}
+}
+
+// @has redirect/index.html
+// @has - '//code' 'pub use private_no_inline::Qux'
+// @!has - '//code/a' 'Qux'
+#[doc(no_inline)]
+pub use private_no_inline::Qux;
diff --git a/src/test/rustdoc/structfields.rs b/src/test/rustdoc/structfields.rs
new file mode 100644 (file)
index 0000000..c4327f7
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// @has structfields/struct.Foo.html
+pub struct Foo {
+    // @has - //pre "pub a: ()"
+    pub a: (),
+    // @has - //pre "// some fields omitted"
+    // @!has - //pre "b: ()"
+    b: (),
+    // @!has - //pre "c: usize"
+    #[doc(hidden)]
+    c: usize,
+    // @has - //pre "pub d: usize"
+    pub d: usize,
+}
+
+// @has structfields/struct.Bar.html
+pub struct Bar {
+    // @has - //pre "pub a: ()"
+    pub a: (),
+    // @!has - //pre "// some fields omitted"
+}
+
+// @has structfields/enum.Qux.html
+pub enum Qux {
+    Quz {
+        // @has - //pre "a: ()"
+        a: (),
+        // @!has - //pre "b: ()"
+        #[doc(hidden)]
+        b: (),
+        // @has - //pre "c: usize"
+        c: usize,
+        // @has - //pre "// some fields omitted"
+    },
+}
diff --git a/src/tools/cargotest/Cargo.lock b/src/tools/cargotest/Cargo.lock
new file mode 100644 (file)
index 0000000..30b1427
--- /dev/null
@@ -0,0 +1,28 @@
+[root]
+name = "cargotest"
+version = "0.1.0"
+dependencies = [
+ "tempdir 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rand"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tempdir"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
diff --git a/src/tools/cargotest/Cargo.toml b/src/tools/cargotest/Cargo.toml
new file mode 100644 (file)
index 0000000..2c7e235
--- /dev/null
@@ -0,0 +1,11 @@
+[package]
+name = "cargotest"
+version = "0.1.0"
+authors = ["Brian Anderson <banderson@mozilla.com>"]
+
+[dependencies]
+tempdir = "0.3.4"
+
+[[bin]]
+name = "cargotest"
+path = "main.rs"
diff --git a/src/tools/cargotest/lockfiles/iron-Cargo.lock b/src/tools/cargotest/lockfiles/iron-Cargo.lock
new file mode 100644 (file)
index 0000000..843f2dc
--- /dev/null
@@ -0,0 +1,364 @@
+[root]
+name = "iron"
+version = "0.3.0"
+dependencies = [
+ "conduit-mime-types 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "error 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hyper 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "modifier 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "plugin 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
+ "typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "bitflags"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "conduit-mime-types"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "cookie"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "openssl 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
+ "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "error"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "traitobject 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "gcc"
+version = "0.3.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "gdi32-sys"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "hpack"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "httparse"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "hyper"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cookie 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "httparse 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "openssl 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "solicit 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
+ "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "kernel32-sys"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "language-tags"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "lazy_static"
+version = "0.1.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "libc"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "libressl-pnacl-sys"
+version = "2.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "pnacl-build-helper 1.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "log"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "matches"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "mime"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "modifier"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "num_cpus"
+version = "0.2.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "openssl"
+version = "0.7.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.26 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "openssl-sys 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "openssl-sys-extras 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "openssl-sys"
+version = "0.7.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "gdi32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libressl-pnacl-sys 2.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "user32-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "openssl-sys-extras"
+version = "0.7.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "gcc 0.3.26 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "openssl-sys 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "pkg-config"
+version = "0.3.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "plugin"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "pnacl-build-helper"
+version = "1.4.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "tempdir 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rustc-serialize"
+version = "0.3.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rustc_version"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "semver 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "semver"
+version = "0.1.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "solicit"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "hpack 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tempdir"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "time"
+version = "0.1.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "traitobject"
+version = "0.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "traitobject"
+version = "0.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "typeable"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "typemap"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unsafe-any 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicase"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicode-bidi"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicode-normalization"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "unsafe-any"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "traitobject 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "url"
+version = "0.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-bidi 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "uuid 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "user32-sys"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "uuid"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "winapi"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi-build"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
diff --git a/src/tools/cargotest/main.rs b/src/tools/cargotest/main.rs
new file mode 100644 (file)
index 0000000..87a0103
--- /dev/null
@@ -0,0 +1,102 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate tempdir;
+
+use tempdir::TempDir;
+use std::env;
+use std::process::Command;
+use std::path::Path;
+use std::fs::File;
+use std::io::Write;
+
+const TEST_REPOS: &'static [(&'static str, &'static str, Option<&'static str>)] = &[
+    ("https://github.com/rust-lang/cargo",
+     "fae9c539388f1b7c70c31fd0a21b5dd9cd071177",
+     None),
+    ("https://github.com/iron/iron",
+     "16c858ec2901e2992fe5e529780f59fa8ed12903",
+     Some(include_str!("lockfiles/iron-Cargo.lock")))
+];
+
+
+fn main() {
+    let ref cargo = env::args().collect::<Vec<_>>()[1];
+    let ref cargo = Path::new(cargo);
+
+    for &(repo, sha, lockfile) in TEST_REPOS.iter().rev() {
+        test_repo(cargo, repo, sha, lockfile);
+    }
+}
+
+fn test_repo(cargo: &Path, repo: &str, sha: &str, lockfile: Option<&str>) {
+    println!("testing {}", repo);
+    let dir = clone_repo(repo, sha);
+    if let Some(lockfile) = lockfile {
+        File::create(&dir.path().join("Cargo.lock")).expect("")
+            .write_all(lockfile.as_bytes()).expect("");
+    }
+    if !run_cargo_test(cargo, dir.path()) {
+        panic!("tests failed for {}", repo);
+    }
+}
+
+fn clone_repo(repo: &str, sha: &str) -> TempDir {
+    let dir = TempDir::new("cargotest").expect("");
+    let status = Command::new("git")
+        .arg("init")
+        .arg(dir.path())
+        .status()
+        .expect("");
+    assert!(status.success());
+
+    // Try progressively deeper fetch depths to find the commit
+    let mut found = false;
+    for depth in &[1, 10, 100, 1000, 100000] {
+        let status = Command::new("git")
+            .arg("fetch")
+            .arg(repo)
+            .arg("master")
+            .arg(&format!("--depth={}", depth))
+            .current_dir(dir.path())
+            .status()
+            .expect("");
+        assert!(status.success());
+
+        let status = Command::new("git")
+            .arg("reset")
+            .arg(sha)
+            .arg("--hard")
+            .current_dir(dir.path())
+            .status()
+            .expect("");
+
+        if status.success() {
+            found = true;
+            break;
+        }
+    }
+
+    if !found { panic!("unable to find commit {}", sha) }
+
+    dir
+}
+
+fn run_cargo_test(cargo_path: &Path, crate_path: &Path) -> bool {
+    let status = Command::new(cargo_path)
+        .arg("test")
+        // Disable rust-lang/cargo's cross-compile tests
+        .env("CFG_DISABLE_CROSS_TESTS", "1")
+        .current_dir(crate_path)
+        .status()
+        .expect("");
+
+    status.success()
+}
diff --git a/src/tools/error_index_generator/Cargo.lock b/src/tools/error_index_generator/Cargo.lock
new file mode 100644 (file)
index 0000000..b7d2cfc
--- /dev/null
@@ -0,0 +1,4 @@
+[root]
+name = "error_index_generator"
+version = "0.0.0"
+
diff --git a/src/tools/error_index_generator/Cargo.toml b/src/tools/error_index_generator/Cargo.toml
new file mode 100644 (file)
index 0000000..5c5ca27
--- /dev/null
@@ -0,0 +1,8 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "error_index_generator"
+version = "0.0.0"
+
+[[bin]]
+name = "error_index_generator"
+path = "main.rs"
diff --git a/src/tools/error_index_generator/main.rs b/src/tools/error_index_generator/main.rs
new file mode 100644 (file)
index 0000000..2c734c8
--- /dev/null
@@ -0,0 +1,209 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_private, rustdoc)]
+#![feature(question_mark)]
+
+extern crate syntax;
+extern crate rustdoc;
+extern crate serialize as rustc_serialize;
+
+use std::collections::BTreeMap;
+use std::env;
+use std::error::Error;
+use std::fs::{read_dir, File};
+use std::io::{Read, Write};
+use std::path::Path;
+use std::path::PathBuf;
+
+use syntax::diagnostics::metadata::{get_metadata_dir, ErrorMetadataMap, ErrorMetadata};
+
+use rustdoc::html::markdown::Markdown;
+use rustc_serialize::json;
+
+enum OutputFormat {
+    HTML(HTMLFormatter),
+    Markdown(MarkdownFormatter),
+    Unknown(String),
+}
+
+impl OutputFormat {
+    fn from(format: &str) -> OutputFormat {
+        match &*format.to_lowercase() {
+            "html"     => OutputFormat::HTML(HTMLFormatter),
+            "markdown" => OutputFormat::Markdown(MarkdownFormatter),
+            s          => OutputFormat::Unknown(s.to_owned()),
+        }
+    }
+}
+
+trait Formatter {
+    fn header(&self, output: &mut Write) -> Result<(), Box<Error>>;
+    fn title(&self, output: &mut Write) -> Result<(), Box<Error>>;
+    fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
+                        err_code: &str) -> Result<(), Box<Error>>;
+    fn footer(&self, output: &mut Write) -> Result<(), Box<Error>>;
+}
+
+struct HTMLFormatter;
+struct MarkdownFormatter;
+
+impl Formatter for HTMLFormatter {
+    fn header(&self, output: &mut Write) -> Result<(), Box<Error>> {
+        write!(output, r##"<!DOCTYPE html>
+<html>
+<head>
+<title>Rust Compiler Error Index</title>
+<meta charset="utf-8">
+<!-- Include rust.css after main.css so its rules take priority. -->
+<link rel="stylesheet" type="text/css" href="main.css"/>
+<link rel="stylesheet" type="text/css" href="rust.css"/>
+<style>
+.error-undescribed {{
+    display: none;
+}}
+</style>
+</head>
+<body>
+"##)?;
+        Ok(())
+    }
+
+    fn title(&self, output: &mut Write) -> Result<(), Box<Error>> {
+        write!(output, "<h1>Rust Compiler Error Index</h1>\n")?;
+        Ok(())
+    }
+
+    fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
+                        err_code: &str) -> Result<(), Box<Error>> {
+        // Enclose each error in a div so they can be shown/hidden en masse.
+        let desc_desc = match info.description {
+            Some(_) => "error-described",
+            None => "error-undescribed",
+        };
+        let use_desc = match info.use_site {
+            Some(_) => "error-used",
+            None => "error-unused",
+        };
+        write!(output, "<div class=\"{} {}\">", desc_desc, use_desc)?;
+
+        // Error title (with self-link).
+        write!(output,
+               "<h2 id=\"{0}\" class=\"section-header\"><a href=\"#{0}\">{0}</a></h2>\n",
+               err_code)?;
+
+        // Description rendered as markdown.
+        match info.description {
+            Some(ref desc) => write!(output, "{}", Markdown(desc))?,
+            None => write!(output, "<p>No description.</p>\n")?,
+        }
+
+        write!(output, "</div>\n")?;
+        Ok(())
+    }
+
+    fn footer(&self, output: &mut Write) -> Result<(), Box<Error>> {
+        write!(output, "</body>\n</html>")?;
+        Ok(())
+    }
+}
+
+impl Formatter for MarkdownFormatter {
+    #[allow(unused_variables)]
+    fn header(&self, output: &mut Write) -> Result<(), Box<Error>> {
+        Ok(())
+    }
+
+    fn title(&self, output: &mut Write) -> Result<(), Box<Error>> {
+        write!(output, "# Rust Compiler Error Index\n")?;
+        Ok(())
+    }
+
+    fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
+                        err_code: &str) -> Result<(), Box<Error>> {
+        Ok(match info.description {
+            Some(ref desc) => write!(output, "## {}\n{}\n", err_code, desc)?,
+            None => (),
+        })
+    }
+
+    #[allow(unused_variables)]
+    fn footer(&self, output: &mut Write) -> Result<(), Box<Error>> {
+        Ok(())
+    }
+}
+
+/// Load all the metadata files from `metadata_dir` into an in-memory map.
+fn load_all_errors(metadata_dir: &Path) -> Result<ErrorMetadataMap, Box<Error>> {
+    let mut all_errors = BTreeMap::new();
+
+    for entry in read_dir(metadata_dir)? {
+        let path = entry?.path();
+
+        let mut metadata_str = String::new();
+        File::open(&path).and_then(|mut f| f.read_to_string(&mut metadata_str))?;
+
+        let some_errors: ErrorMetadataMap = json::decode(&metadata_str)?;
+
+        for (err_code, info) in some_errors {
+            all_errors.insert(err_code, info);
+        }
+    }
+
+    Ok(all_errors)
+}
+
+/// Output an HTML page for the errors in `err_map` to `output_path`.
+fn render_error_page<T: Formatter>(err_map: &ErrorMetadataMap, output_path: &Path,
+                                   formatter: T) -> Result<(), Box<Error>> {
+    let mut output_file = File::create(output_path)?;
+
+    formatter.header(&mut output_file)?;
+    formatter.title(&mut output_file)?;
+
+    for (err_code, info) in err_map {
+        formatter.error_code_block(&mut output_file, info, err_code)?;
+    }
+
+    formatter.footer(&mut output_file)
+}
+
+fn main_with_result(format: OutputFormat, dst: &Path) -> Result<(), Box<Error>> {
+    let build_arch = env::var("CFG_BUILD")?;
+    let metadata_dir = get_metadata_dir(&build_arch);
+    let err_map = load_all_errors(&metadata_dir)?;
+    match format {
+        OutputFormat::Unknown(s)  => panic!("Unknown output format: {}", s),
+        OutputFormat::HTML(h)     => render_error_page(&err_map, dst, h)?,
+        OutputFormat::Markdown(m) => render_error_page(&err_map, dst, m)?,
+    }
+    Ok(())
+}
+
+fn parse_args() -> (OutputFormat, PathBuf) {
+    let mut args = env::args().skip(1);
+    let format = args.next().map(|a| OutputFormat::from(&a))
+                            .unwrap_or(OutputFormat::from("html"));
+    let dst = args.next().map(PathBuf::from).unwrap_or_else(|| {
+        match format {
+            OutputFormat::HTML(..) => PathBuf::from("doc/error-index.html"),
+            OutputFormat::Markdown(..) => PathBuf::from("doc/error-index.md"),
+            OutputFormat::Unknown(..) => PathBuf::from("<nul>"),
+        }
+    });
+    (format, dst)
+}
+
+fn main() {
+    let (format, dst) = parse_args();
+    if let Err(e) = main_with_result(format, &dst) {
+        panic!("{}", e.description());
+    }
+}
diff --git a/src/tools/linkchecker/Cargo.lock b/src/tools/linkchecker/Cargo.lock
new file mode 100644 (file)
index 0000000..8e94137
--- /dev/null
@@ -0,0 +1,64 @@
+[root]
+name = "linkchecker"
+version = "0.1.0"
+dependencies = [
+ "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "matches"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rand"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rustc-serialize"
+version = "0.3.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "unicode-bidi"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicode-normalization"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "url"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-bidi 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "uuid 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "uuid"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
diff --git a/src/tools/linkchecker/Cargo.toml b/src/tools/linkchecker/Cargo.toml
new file mode 100644 (file)
index 0000000..29fc78a
--- /dev/null
@@ -0,0 +1,11 @@
+[package]
+name = "linkchecker"
+version = "0.1.0"
+authors = ["Alex Crichton <alex@alexcrichton.com>"]
+
+[dependencies]
+url = "0.5"
+
+[[bin]]
+name = "linkchecker"
+path = "main.rs"
diff --git a/src/tools/linkchecker/main.rs b/src/tools/linkchecker/main.rs
new file mode 100644 (file)
index 0000000..12419d4
--- /dev/null
@@ -0,0 +1,344 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Script to check the validity of `href` links in our HTML documentation.
+//!
+//! In the past we've been quite error prone to writing in broken links as most
+//! of them are manually rather than automatically added. As files move over
+//! time or apis change old links become stale or broken. The purpose of this
+//! script is to check all relative links in our documentation to make sure they
+//! actually point to a valid place.
+//!
+//! Currently this doesn't actually do any HTML parsing or anything fancy like
+//! that, it just has a simple "regex" to search for `href` and `id` tags.
+//! These values are then translated to file URLs if possible and then the
+//! destination is asserted to exist.
+//!
+//! A few whitelisted exceptions are allowed as there's known bugs in rustdoc,
+//! but this should catch the majority of "broken link" cases.
+
+extern crate url;
+
+use std::env;
+use std::fs::File;
+use std::io::prelude::*;
+use std::path::{Path, PathBuf};
+use std::collections::{HashMap, HashSet};
+use std::collections::hash_map::Entry;
+
+use url::{Url, UrlParser};
+
+use Redirect::*;
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+fn main() {
+    let docs = env::args().nth(1).unwrap();
+    let docs = env::current_dir().unwrap().join(docs);
+    let mut url = Url::from_file_path(&docs).unwrap();
+    let mut errors = false;
+    walk(&mut HashMap::new(), &docs, &docs, &mut url, &mut errors);
+    if errors {
+        panic!("found some broken links");
+    }
+}
+
+#[derive(Debug)]
+pub enum LoadError {
+    IOError(std::io::Error),
+    BrokenRedirect(PathBuf, std::io::Error),
+    IsRedirect,
+}
+
+enum Redirect {
+    SkipRedirect,
+    FromRedirect(bool),
+}
+
+struct FileEntry {
+    source: String,
+    ids: HashSet<String>,
+}
+
+type Cache = HashMap<PathBuf, FileEntry>;
+
+impl FileEntry {
+    fn parse_ids(&mut self,
+                file: &Path,
+                contents: &str,
+                errors: &mut bool)
+{
+        if self.ids.is_empty() {
+            with_attrs_in_source(contents, " id", |fragment, i| {
+                let frag = fragment.trim_left_matches("#").to_owned();
+                if !self.ids.insert(frag) {
+                    *errors = true;
+                    println!("{}:{}: id is not unique: `{}`",
+                             file.display(), i, fragment);
+                }
+            });
+        }
+    }
+}
+
+fn walk(cache: &mut Cache,
+        root: &Path,
+        dir: &Path,
+        url: &mut Url,
+        errors: &mut bool)
+{
+    for entry in t!(dir.read_dir()).map(|e| t!(e)) {
+        let path = entry.path();
+        let kind = t!(entry.file_type());
+        url.path_mut().unwrap().push(entry.file_name().into_string().unwrap());
+        if kind.is_dir() {
+            walk(cache, root, &path, url, errors);
+        } else {
+            let pretty_path = check(cache, root, &path, url, errors);
+            if let Some(pretty_path) = pretty_path {
+                let entry = cache.get_mut(&pretty_path).unwrap();
+                // we don't need the source anymore,
+                // so drop to to reduce memory-usage
+                entry.source = String::new();
+            }
+        }
+        url.path_mut().unwrap().pop();
+    }
+}
+
+fn check(cache: &mut Cache,
+         root: &Path,
+         file: &Path,
+         base: &Url,
+         errors: &mut bool) -> Option<PathBuf>
+{
+    // ignore js files as they are not prone to errors as the rest of the
+    // documentation is and they otherwise bring up false positives.
+    if file.extension().and_then(|s| s.to_str()) == Some("js") {
+        return None;
+    }
+
+    // Unfortunately we're not 100% full of valid links today to we need a few
+    // whitelists to get this past `make check` today.
+    // FIXME(#32129)
+    if file.ends_with("std/string/struct.String.html") {
+        return None;
+    }
+    // FIXME(#32553)
+    if file.ends_with("collections/string/struct.String.html") {
+        return None;
+    }
+    // FIXME(#32130)
+    if file.ends_with("btree_set/struct.BTreeSet.html") ||
+       file.ends_with("collections/struct.BTreeSet.html") ||
+       file.ends_with("collections/btree_map/struct.BTreeMap.html") ||
+       file.ends_with("collections/hash_map/struct.HashMap.html") {
+        return None;
+    }
+
+    if file.ends_with("std/sys/ext/index.html") {
+        return None;
+    }
+
+    if let Some(file) = file.to_str() {
+        // FIXME(#31948)
+        if file.contains("ParseFloatError") {
+            return None;
+        }
+        // weird reexports, but this module is on its way out, so chalk it up to
+        // "rustdoc weirdness" and move on from there
+        if file.contains("scoped_tls") {
+            return None;
+        }
+    }
+
+    let mut parser = UrlParser::new();
+    parser.base_url(base);
+
+    let res = load_file(cache, root, PathBuf::from(file), SkipRedirect);
+    let (pretty_file, contents) = match res {
+        Ok(res) => res,
+        Err(_) => return None,
+    };
+    {
+        cache.get_mut(&pretty_file).unwrap()
+                                   .parse_ids(&pretty_file, &contents, errors);
+    }
+
+    // Search for anything that's the regex 'href[ ]*=[ ]*".*?"'
+    with_attrs_in_source(&contents, " href", |url, i| {
+        // Once we've plucked out the URL, parse it using our base url and
+        // then try to extract a file path. If either of these fail then we
+        // just keep going.
+        let (parsed_url, path) = match url_to_file_path(&parser, url) {
+            Some((url, path)) => (url, PathBuf::from(path)),
+            None => return,
+        };
+
+        // Alright, if we've found a file name then this file had better
+        // exist! If it doesn't then we register and print an error.
+        if path.exists() {
+            if path.is_dir() {
+                return;
+            }
+            let res = load_file(cache, root, path.clone(), FromRedirect(false));
+            let (pretty_path, contents) = match res {
+                Ok(res) => res,
+                Err(LoadError::IOError(err)) => panic!(format!("{}", err)),
+                Err(LoadError::BrokenRedirect(target, _)) => {
+                    print!("{}:{}: broken redirect to {}",
+                           pretty_file.display(), i + 1, target.display());
+                    return;
+                }
+                Err(LoadError::IsRedirect) => unreachable!(),
+            };
+
+            if let Some(ref fragment) = parsed_url.fragment {
+                // Fragments like `#1-6` are most likely line numbers to be
+                // interpreted by javascript, so we're ignoring these
+                if fragment.splitn(2, '-')
+                           .all(|f| f.chars().all(|c| c.is_numeric())) {
+                    return;
+                }
+
+                let entry = &mut cache.get_mut(&pretty_path).unwrap();
+                entry.parse_ids(&pretty_path, &contents, errors);
+
+                if !entry.ids.contains(fragment) {
+                    *errors = true;
+                    print!("{}:{}: broken link fragment  ",
+                           pretty_file.display(), i + 1);
+                    println!("`#{}` pointing to `{}`",
+                             fragment, pretty_path.display());
+                };
+            }
+        } else {
+            *errors = true;
+            print!("{}:{}: broken link - ", pretty_file.display(), i + 1);
+            let pretty_path = path.strip_prefix(root).unwrap_or(&path);
+            println!("{}", pretty_path.display());
+        }
+    });
+    Some(pretty_file)
+}
+
+fn load_file(cache: &mut Cache,
+             root: &Path,
+             file: PathBuf,
+             redirect: Redirect) -> Result<(PathBuf, String), LoadError> {
+    let mut contents = String::new();
+    let pretty_file = PathBuf::from(file.strip_prefix(root).unwrap_or(&file));
+
+    let maybe_redirect = match cache.entry(pretty_file.clone()) {
+        Entry::Occupied(entry) => {
+            contents = entry.get().source.clone();
+            None
+        },
+        Entry::Vacant(entry) => {
+            let mut fp = try!(File::open(file.clone()).map_err(|err| {
+                if let FromRedirect(true) = redirect {
+                    LoadError::BrokenRedirect(file.clone(), err)
+                } else {
+                    LoadError::IOError(err)
+                }
+            }));
+            try!(fp.read_to_string(&mut contents)
+                   .map_err(|err| LoadError::IOError(err)));
+
+            let maybe = maybe_redirect(&contents);
+            if maybe.is_some() {
+                if let SkipRedirect = redirect {
+                    return Err(LoadError::IsRedirect);
+                }
+            } else {
+                entry.insert(FileEntry {
+                    source: contents.clone(),
+                    ids: HashSet::new(),
+                });
+            }
+            maybe
+        },
+    };
+    let base = Url::from_file_path(&file).unwrap();
+    let mut parser = UrlParser::new();
+    parser.base_url(&base);
+
+    match maybe_redirect.and_then(|url| url_to_file_path(&parser, &url)) {
+        Some((_, redirect_file)) => {
+            let path = PathBuf::from(redirect_file);
+            load_file(cache, root, path, FromRedirect(true))
+        }
+        None => Ok((pretty_file, contents))
+    }
+}
+
+fn maybe_redirect(source: &str) -> Option<String> {
+    const REDIRECT: &'static str = "<p>Redirecting to <a href=";
+
+    let mut lines = source.lines();
+    let redirect_line = match lines.nth(6) {
+        Some(l) => l,
+        None => return None,
+    };
+
+    redirect_line.find(REDIRECT).map(|i| {
+        let rest = &redirect_line[(i + REDIRECT.len() + 1)..];
+        let pos_quote = rest.find('"').unwrap();
+        rest[..pos_quote].to_owned()
+    })
+}
+
+fn url_to_file_path(parser: &UrlParser, url: &str) -> Option<(Url, PathBuf)> {
+    parser.parse(url).ok().and_then(|parsed_url| {
+        parsed_url.to_file_path().ok().map(|f| (parsed_url, f))
+    })
+}
+
+fn with_attrs_in_source<F: FnMut(&str, usize)>(contents: &str,
+                                               attr: &str,
+                                               mut f: F)
+{
+    for (i, mut line) in contents.lines().enumerate() {
+        while let Some(j) = line.find(attr) {
+            let rest = &line[j + attr.len() ..];
+            line = rest;
+            let pos_equals = match rest.find("=") {
+                Some(i) => i,
+                None => continue,
+            };
+            if rest[..pos_equals].trim_left_matches(" ") != "" {
+                continue
+            }
+
+            let rest = &rest[pos_equals + 1..];
+
+            let pos_quote = match rest.find(&['"', '\''][..]) {
+                Some(i) => i,
+                None => continue,
+            };
+            let quote_delim = rest.as_bytes()[pos_quote] as char;
+
+            if rest[..pos_quote].trim_left_matches(" ") != "" {
+                continue
+            }
+            let rest = &rest[pos_quote + 1..];
+            let url = match rest.find(quote_delim) {
+                Some(i) => &rest[..i],
+                None => continue,
+            };
+            f(url, i)
+        }
+    }
+}
diff --git a/src/tools/rustbook/Cargo.lock b/src/tools/rustbook/Cargo.lock
new file mode 100644 (file)
index 0000000..e541ce4
--- /dev/null
@@ -0,0 +1,4 @@
+[root]
+name = "rustbook"
+version = "0.0.0"
+
diff --git a/src/tools/rustbook/Cargo.toml b/src/tools/rustbook/Cargo.toml
new file mode 100644 (file)
index 0000000..956392c
--- /dev/null
@@ -0,0 +1,8 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "rustbook"
+version = "0.0.0"
+
+[[bin]]
+name = "rustbook"
+path = "main.rs"
diff --git a/src/tools/rustbook/book.rs b/src/tools/rustbook/book.rs
new file mode 100644 (file)
index 0000000..36a37db
--- /dev/null
@@ -0,0 +1,171 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Basic data structures for representing a book.
+
+use std::io::prelude::*;
+use std::io::BufReader;
+use std::iter;
+use std::path::{Path, PathBuf};
+
+pub struct BookItem {
+    pub title: String,
+    pub path: PathBuf,
+    pub path_to_root: PathBuf,
+    pub children: Vec<BookItem>,
+}
+
+pub struct Book {
+    pub chapters: Vec<BookItem>,
+}
+
+/// A depth-first iterator over a book.
+pub struct BookItems<'a> {
+    cur_items: &'a [BookItem],
+    cur_idx: usize,
+    stack: Vec<(&'a [BookItem], usize)>,
+}
+
+impl<'a> Iterator for BookItems<'a> {
+    type Item = (String, &'a BookItem);
+
+    fn next(&mut self) -> Option<(String, &'a BookItem)> {
+        loop {
+            if self.cur_idx >= self.cur_items.len() {
+                match self.stack.pop() {
+                    None => return None,
+                    Some((parent_items, parent_idx)) => {
+                        self.cur_items = parent_items;
+                        self.cur_idx = parent_idx + 1;
+                    }
+                }
+            } else {
+                let cur = self.cur_items.get(self.cur_idx).unwrap();
+
+                let mut section = "".to_string();
+                for &(_, idx) in &self.stack {
+                    section.push_str(&(idx + 1).to_string()[..]);
+                    section.push('.');
+                }
+                section.push_str(&(self.cur_idx + 1).to_string()[..]);
+                section.push('.');
+
+                self.stack.push((self.cur_items, self.cur_idx));
+                self.cur_items = &cur.children[..];
+                self.cur_idx = 0;
+                return Some((section, cur))
+            }
+        }
+    }
+}
+
+impl Book {
+    pub fn iter(&self) -> BookItems {
+        BookItems {
+            cur_items: &self.chapters[..],
+            cur_idx: 0,
+            stack: Vec::new(),
+        }
+    }
+}
+
+/// Construct a book by parsing a summary (markdown table of contents).
+pub fn parse_summary(input: &mut Read, src: &Path) -> Result<Book, Vec<String>> {
+    fn collapse(stack: &mut Vec<BookItem>,
+                top_items: &mut Vec<BookItem>,
+                to_level: usize) {
+        loop {
+            if stack.len() < to_level { return }
+            if stack.len() == 1 {
+                top_items.push(stack.pop().unwrap());
+                return;
+            }
+
+            let tip = stack.pop().unwrap();
+            let last = stack.len() - 1;
+            stack[last].children.push(tip);
+        }
+    }
+
+    let mut top_items = vec!();
+    let mut stack = vec!();
+    let mut errors = vec!();
+
+    // always include the introduction
+    top_items.push(BookItem {
+        title: "Introduction".to_string(),
+        path: PathBuf::from("README.md"),
+        path_to_root: PathBuf::from(""),
+        children: vec!(),
+    });
+
+    for line_result in BufReader::new(input).lines() {
+        let line = match line_result {
+            Ok(line) => line,
+            Err(err) => {
+                errors.push(err.to_string());
+                return Err(errors);
+            }
+        };
+
+        let star_idx = match line.find("*") { Some(i) => i, None => continue };
+
+        let start_bracket = star_idx + line[star_idx..].find("[").unwrap();
+        let end_bracket = start_bracket + line[start_bracket..].find("](").unwrap();
+        let start_paren = end_bracket + 1;
+        let end_paren = start_paren + line[start_paren..].find(")").unwrap();
+
+        let given_path = &line[start_paren + 1 .. end_paren];
+        let title = line[start_bracket + 1..end_bracket].to_string();
+        let indent = &line[..star_idx];
+
+        let path_from_root = match src.join(given_path).strip_prefix(src) {
+            Ok(p) => p.to_path_buf(),
+            Err(..) => {
+                errors.push(format!("paths in SUMMARY.md must be relative, \
+                                     but path '{}' for section '{}' is not.",
+                                     given_path, title));
+                PathBuf::new()
+            }
+        };
+        let path_to_root = PathBuf::from(&iter::repeat("../")
+                                         .take(path_from_root.components().count() - 1)
+                                         .collect::<String>());
+        let item = BookItem {
+            title: title,
+            path: path_from_root,
+            path_to_root: path_to_root,
+            children: vec!(),
+        };
+        let level = indent.chars().map(|c| -> usize {
+            match c {
+                ' ' => 1,
+                '\t' => 4,
+                _ => unreachable!()
+            }
+        }).sum::<usize>() / 4 + 1;
+
+        if level > stack.len() + 1 {
+            errors.push(format!("section '{}' is indented too deeply; \
+                                 found {}, expected {} or less",
+                                item.title, level, stack.len() + 1));
+        } else if level <= stack.len() {
+            collapse(&mut stack, &mut top_items, level);
+        }
+        stack.push(item)
+    }
+
+    if errors.is_empty() {
+        collapse(&mut stack, &mut top_items, 1);
+        Ok(Book { chapters: top_items })
+    } else {
+        Err(errors)
+    }
+}
diff --git a/src/tools/rustbook/build.rs b/src/tools/rustbook/build.rs
new file mode 100644 (file)
index 0000000..6014439
--- /dev/null
@@ -0,0 +1,225 @@
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of the `build` subcommand, used to compile a book.
+
+use std::env;
+use std::fs::{self, File};
+use std::io::prelude::*;
+use std::io::{self, BufWriter};
+use std::path::{Path, PathBuf};
+use rustc_back::tempdir::TempDir;
+
+use subcommand::Subcommand;
+use term::Term;
+use error::{err, CliResult, CommandResult};
+use book;
+use book::{Book, BookItem};
+
+use rustdoc;
+
+struct Build;
+
+pub fn parse_cmd(name: &str) -> Option<Box<Subcommand>> {
+    if name == "build" {
+        Some(Box::new(Build))
+    } else {
+        None
+    }
+}
+
+fn write_toc(book: &Book, current_page: &BookItem, out: &mut Write) -> io::Result<()> {
+    fn walk_items(items: &[BookItem],
+                  section: &str,
+                  current_page: &BookItem,
+                  out: &mut Write) -> io::Result<()> {
+        for (i, item) in items.iter().enumerate() {
+            walk_item(item, &format!("{}{}.", section, i + 1)[..], current_page, out)?;
+        }
+        Ok(())
+    }
+    fn walk_item(item: &BookItem,
+                 section: &str,
+                 current_page: &BookItem,
+                 out: &mut Write) -> io::Result<()> {
+        let class_string = if item.path == current_page.path {
+            "class='active'"
+        } else {
+            ""
+        };
+
+        writeln!(out, "<li><a {} href='{}'><b>{}</b> {}</a>",
+                 class_string,
+                 current_page.path_to_root.join(&item.path).with_extension("html").display(),
+                 section,
+                 item.title)?;
+        if !item.children.is_empty() {
+            writeln!(out, "<ul class='section'>")?;
+            let _ = walk_items(&item.children[..], section, current_page, out);
+            writeln!(out, "</ul>")?;
+        }
+        writeln!(out, "</li>")?;
+
+        Ok(())
+    }
+
+    writeln!(out, "<div id='toc' class='mobile-hidden'>")?;
+    writeln!(out, "<ul class='chapter'>")?;
+    walk_items(&book.chapters[..], "", &current_page, out)?;
+    writeln!(out, "</ul>")?;
+    writeln!(out, "</div>")?;
+
+    Ok(())
+}
+
+fn render(book: &Book, tgt: &Path) -> CliResult<()> {
+    let tmp = TempDir::new("rustbook")?;
+
+    for (_section, item) in book.iter() {
+        let out_path = match item.path.parent() {
+            Some(p) => tgt.join(p),
+            None => tgt.to_path_buf(),
+        };
+
+        let src;
+        if env::args().len() < 3 {
+            src = env::current_dir().unwrap().clone();
+        } else {
+            src = PathBuf::from(&env::args().nth(2).unwrap());
+        }
+        // preprocess the markdown, rerouting markdown references to html
+        // references
+        let mut markdown_data = String::new();
+        File::open(&src.join(&item.path)).and_then(|mut f| {
+            f.read_to_string(&mut markdown_data)
+        })?;
+        let preprocessed_path = tmp.path().join(item.path.file_name().unwrap());
+        {
+            let urls = markdown_data.replace(".md)", ".html)");
+            File::create(&preprocessed_path).and_then(|mut f| {
+                f.write_all(urls.as_bytes())
+            })?;
+        }
+
+        // write the prelude to a temporary HTML file for rustdoc inclusion
+        let prelude = tmp.path().join("prelude.html");
+        {
+            let mut buffer = BufWriter::new(File::create(&prelude)?);
+            writeln!(&mut buffer, r#"
+                <div id="nav">
+                    <button id="toggle-nav">
+                        <span class="sr-only">Toggle navigation</span>
+                        <span class="bar"></span>
+                        <span class="bar"></span>
+                        <span class="bar"></span>
+                    </button>
+                </div>"#)?;
+            let _ = write_toc(book, &item, &mut buffer);
+            writeln!(&mut buffer, "<div id='page-wrapper'>")?;
+            writeln!(&mut buffer, "<div id='page'>")?;
+        }
+
+        // write the postlude to a temporary HTML file for rustdoc inclusion
+        let postlude = tmp.path().join("postlude.html");
+        {
+            let mut buffer = BufWriter::new(File::create(&postlude)?);
+            writeln!(&mut buffer, "<script src='rustbook.js'></script>")?;
+            writeln!(&mut buffer, "<script src='playpen.js'></script>")?;
+            writeln!(&mut buffer, "</div></div>")?;
+        }
+
+        fs::create_dir_all(&out_path)?;
+
+        let rustdoc_args: &[String] = &[
+            "".to_string(),
+            preprocessed_path.display().to_string(),
+            format!("-o{}", out_path.display()),
+            format!("--html-before-content={}", prelude.display()),
+            format!("--html-after-content={}", postlude.display()),
+            format!("--markdown-playground-url=https://play.rust-lang.org"),
+            format!("--markdown-css={}", item.path_to_root.join("rustbook.css").display()),
+            "--markdown-no-toc".to_string(),
+        ];
+        let output_result = rustdoc::main_args(rustdoc_args);
+        if output_result != 0 {
+            let message = format!("Could not execute `rustdoc` with {:?}: {}",
+                                  rustdoc_args, output_result);
+            return Err(err(&message));
+        }
+    }
+
+    // create index.html from the root README
+    fs::copy(&tgt.join("README.html"), &tgt.join("index.html"))?;
+
+    // Copy js for playpen
+    let mut playpen = File::create(tgt.join("playpen.js"))?;
+    let js = include_bytes!("../../librustdoc/html/static/playpen.js");
+    playpen.write_all(js)?;
+    Ok(())
+}
+
+impl Subcommand for Build {
+    fn parse_args(&mut self, _: &[String]) -> CliResult<()> {
+        Ok(())
+    }
+    fn usage(&self) {}
+    fn execute(&mut self, term: &mut Term) -> CommandResult<()> {
+        let cwd = env::current_dir().unwrap();
+        let src;
+        let tgt;
+
+        if env::args().len() < 3 {
+            src = cwd.clone();
+        } else {
+            src = PathBuf::from(&env::args().nth(2).unwrap());
+        }
+
+        if env::args().len() < 4 {
+            tgt = cwd.join("_book");
+        } else {
+            tgt = PathBuf::from(&env::args().nth(3).unwrap());
+        }
+
+        // `_book` directory may already exist from previous runs. Check and
+        // delete it if it exists.
+        for entry in fs::read_dir(&cwd)? {
+            let path = entry?.path();
+            if path == tgt { fs::remove_dir_all(&tgt)? }
+        }
+        fs::create_dir(&tgt)?;
+
+        // Copy static files
+        let css = include_bytes!("static/rustbook.css");
+        let js = include_bytes!("static/rustbook.js");
+
+        let mut css_file = File::create(tgt.join("rustbook.css"))?;
+        css_file.write_all(css)?;
+
+        let mut js_file = File::create(tgt.join("rustbook.js"))?;
+        js_file.write_all(js)?;
+
+
+        let mut summary = File::open(&src.join("SUMMARY.md"))?;
+        match book::parse_summary(&mut summary, &src) {
+            Ok(book) => {
+                // execute rustdoc on the whole book
+                render(&book, &tgt)
+            }
+            Err(errors) => {
+                let n = errors.len();
+                for err in errors {
+                    term.err(&format!("error: {}", err)[..]);
+                }
+
+                Err(err(&format!("{} errors occurred", n)))
+            }
+        }
+    }
+}
diff --git a/src/tools/rustbook/error.rs b/src/tools/rustbook/error.rs
new file mode 100644 (file)
index 0000000..e896dee
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Error handling utilities. WIP.
+
+use std::error::Error;
+use std::fmt;
+
+pub type CliError = Box<Error + 'static>;
+pub type CliResult<T> = Result<T, CliError>;
+
+pub type CommandError = Box<Error + 'static>;
+pub type CommandResult<T> = Result<T, CommandError>;
+
+pub fn err(s: &str) -> CliError {
+    #[derive(Debug)]
+    struct E(String);
+
+    impl Error for E {
+        fn description(&self) -> &str { &self.0 }
+    }
+    impl fmt::Display for E {
+        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+            self.0.fmt(f)
+        }
+    }
+
+    Box::new(E(s.to_string()))
+}
diff --git a/src/tools/rustbook/help.rs b/src/tools/rustbook/help.rs
new file mode 100644 (file)
index 0000000..c90c2b9
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of the `help` subcommand. Currently just prints basic usage info.
+
+use subcommand::Subcommand;
+use error::CliResult;
+use error::CommandResult;
+use term::Term;
+
+struct Help;
+
+pub fn parse_cmd(name: &str) -> Option<Box<Subcommand>> {
+    match name {
+        "help" | "--help" | "-h" | "-?" => Some(Box::new(Help)),
+        _ => None
+    }
+}
+
+impl Subcommand for Help {
+    fn parse_args(&mut self, _: &[String]) -> CliResult<()> {
+        Ok(())
+    }
+    fn usage(&self) {}
+    fn execute(&mut self, _: &mut Term) -> CommandResult<()> {
+        usage();
+        Ok(())
+    }
+}
+
+pub fn usage() {
+    println!("Usage: rustbook <command> [<args>]");
+    println!("");
+    println!("The <command> must be one of:");
+    println!("  help    Print this message.");
+    println!("  build   Build the book in subdirectory _book");
+    println!("  serve   --NOT YET IMPLEMENTED--");
+    println!("  test    --NOT YET IMPLEMENTED--");
+}
diff --git a/src/tools/rustbook/main.rs b/src/tools/rustbook/main.rs
new file mode 100644 (file)
index 0000000..5ad4982
--- /dev/null
@@ -0,0 +1,73 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(warnings)]
+
+#![feature(iter_arith)]
+#![feature(rustc_private)]
+#![feature(rustdoc)]
+#![feature(question_mark)]
+
+extern crate rustdoc;
+extern crate rustc_back;
+
+use std::env;
+use std::error::Error;
+use std::process;
+use std::sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT, Ordering};
+use subcommand::Subcommand;
+use term::Term;
+
+mod term;
+mod error;
+mod book;
+
+mod subcommand;
+mod help;
+mod build;
+mod serve;
+mod test;
+
+static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT;
+
+pub fn main() {
+    let mut term = Term::new();
+    let cmd: Vec<_> = env::args().collect();
+
+    if cmd.len() <= 1 {
+        help::usage()
+    } else {
+        match subcommand::parse_name(&cmd[1][..]) {
+            Some(mut subcmd) => {
+                match subcmd.parse_args(&cmd[..cmd.len()-1]) {
+                    Ok(_) => {
+                        match subcmd.execute(&mut term) {
+                            Ok(_) => (),
+                            Err(err) => {
+                                term.err(&format!("error: {}", err));
+                            }
+                        }
+                    }
+                    Err(err) => {
+                        println!("{}", err.description());
+                        println!("");
+                        subcmd.usage();
+                    }
+                }
+            }
+            None => {
+                println!("Unrecognized command '{}'.", cmd[1]);
+                println!("");
+                help::usage();
+            }
+        }
+    }
+    process::exit(EXIT_STATUS.load(Ordering::SeqCst) as i32);
+}
diff --git a/src/tools/rustbook/serve.rs b/src/tools/rustbook/serve.rs
new file mode 100644 (file)
index 0000000..2fa7b7e
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of the `serve` subcommand. Just a stub for now.
+
+use subcommand::Subcommand;
+use error::CliResult;
+use error::CommandResult;
+use term::Term;
+
+struct Serve;
+
+pub fn parse_cmd(name: &str) -> Option<Box<Subcommand>> {
+    if name == "serve" {
+        Some(Box::new(Serve))
+    } else {
+        None
+    }
+}
+
+impl Subcommand for Serve {
+    fn parse_args(&mut self, _: &[String]) -> CliResult<()> {
+        Ok(())
+    }
+    fn usage(&self) {}
+    fn execute(&mut self, _: &mut Term) -> CommandResult<()> {
+        Ok(())
+    }
+}
diff --git a/src/tools/rustbook/static/rustbook.css b/src/tools/rustbook/static/rustbook.css
new file mode 100644 (file)
index 0000000..ba0151f
--- /dev/null
@@ -0,0 +1,157 @@
+/**
+ * Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
+ * file at the top-level directory of this distribution and at
+ * http://rust-lang.org/COPYRIGHT.
+ *
+ * Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+ * http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+ * <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+ * option. This file may not be copied, modified, or distributed
+ * except according to those terms.
+ */
+
+@import url('../rust.css');
+
+body {
+    max-width: none;
+    font: 16px/1.6 'Source Serif Pro', Georgia, Times, 'Times New Roman', serif;
+    color: #333;
+}
+
+h1, h2, h3, h4, h5, h6 {
+    font-family: 'Open Sans', 'Fira Sans', 'Helvetica Neue', Helvetica, Arial, sans-serif;
+    font-weight: bold;
+    color: #333;
+}
+
+@media only screen {
+    #toc {
+        position: fixed;
+        top: 0;
+        left: 0;
+        bottom: 0;
+        width: 300px;
+        overflow-y: auto;
+        border-right: 1px solid #e8e8e8;
+        padding: 0 15px;
+        font-size: 14px;
+        background-color: #fafafa;
+        -webkit-overflow-scrolling: touch;
+    }
+
+    #page-wrapper {
+        position: absolute;
+        top: 0;
+        left: 300px;
+        right: 0;
+        padding: 0 15px;
+        -webkit-overflow-scrolling: touch;
+    }
+}
+
+@media only print {
+    #toc, #nav {
+        display: none;
+    }
+}
+
+@media only screen and (max-width: 1023px) {
+    #toc {
+        width: 100%;
+        top: 40px;
+    }
+
+    #page-wrapper {
+        top: 40px;
+        left: 0;
+    }
+
+    .mobile-hidden {
+        display: none;
+    }
+}
+
+#page {
+    margin: 0 auto;
+    max-width: 750px;
+    padding-bottom: 50px;
+}
+
+.chapter {
+    list-style: none;
+    padding-left: 0;
+    line-height: 30px;
+}
+
+.section {
+    list-style: none;
+    padding-left: 20px;
+    line-height: 40px;
+}
+
+.section li {
+    text-overflow: ellipsis;
+    overflow: hidden;
+    white-space: nowrap;
+}
+
+.chapter li a {
+    color: #333;
+    padding: 5px 0;
+}
+
+.chapter li a.active,
+.chapter li a:hover {
+    color: #008cff;
+    text-decoration: none;
+}
+
+#toggle-nav {
+    cursor: pointer;
+    margin-top: 5px;
+    width: 30px;
+    height: 30px;
+    background-color: #fff;
+    border: 1px solid #666;
+    border-radius: 3px;
+    padding: 3px 3px 0 3px;
+}
+
+.sr-only {
+    position: absolute;
+    width: 1px;
+    height: 1px;
+    margin: -1px;
+    padding: 0;
+    overflow: hidden;
+    clip: rect(0, 0, 0, 0);
+    border: 0;
+}
+
+.bar {
+    display: block;
+    background-color: #000;
+    border-radius: 2px;
+    width: 100%;
+    height: 2px;
+    margin: 2px 0 3px;
+    padding: 0;
+}
+
+pre {
+    padding: 11px;
+    overflow: auto;
+    font-size: 85%;
+    line-height: 1.45;
+    background-color: #f7f7f7;
+    border: 0;
+    border-radius: 3px;
+}
+
+.left {
+    float: left;
+}
+
+.right {
+    float: right;
+}
diff --git a/src/tools/rustbook/static/rustbook.js b/src/tools/rustbook/static/rustbook.js
new file mode 100644 (file)
index 0000000..d8ab152
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*jslint browser: true, es5: true */
+/*globals $: true, rootPath: true */
+
+document.addEventListener('DOMContentLoaded', function() {
+    'use strict';
+
+    document.getElementById('toggle-nav').onclick = function(e) {
+        var toc = document.getElementById('toc');
+        var pagewrapper = document.getElementById('page-wrapper');
+        toggleClass(toc, 'mobile-hidden');
+        toggleClass(pagewrapper, 'mobile-hidden');
+    };
+
+    function toggleClass(el, className) {
+        // from http://youmightnotneedjquery.com/
+        if (el.classList) {
+            el.classList.toggle(className);
+        } else {
+            var classes = el.className.split(' ');
+            var existingIndex = classes.indexOf(className);
+
+            if (existingIndex >= 0) {
+                classes.splice(existingIndex, 1);
+            } else {
+                classes.push(className);
+            }
+
+            el.className = classes.join(' ');
+        }
+    }
+
+    // The below code is used to add prev and next navigation links to the
+    // bottom of each of the sections.
+    // It works by extracting the current page based on the url and iterates
+    // over the menu links until it finds the menu item for the current page. We
+    // then create a copy of the preceding and following menu links and add the
+    // correct css class and insert them into the bottom of the page.
+    var toc = document.getElementById('toc').getElementsByTagName('a');
+    var href = document.location.pathname.split('/').pop();
+
+    if (href === 'index.html' || href === '') {
+        href = 'README.html';
+    }
+
+    for (var i = 0; i < toc.length; i++) {
+        if (toc[i].attributes.href.value.split('/').pop() === href) {
+            var nav = document.createElement('p');
+
+            if (i > 0) {
+                var prevNode = toc[i-1].cloneNode(true);
+                prevNode.className = 'left';
+                prevNode.setAttribute('rel', 'prev');
+                nav.appendChild(prevNode);
+            }
+
+            if (i < toc.length - 1) {
+                var nextNode = toc[i+1].cloneNode(true);
+                nextNode.className = 'right';
+                nextNode.setAttribute('rel', 'next');
+                nav.appendChild(nextNode);
+            }
+
+            document.getElementById('page').appendChild(nav);
+
+            break;
+        }
+    }
+});
diff --git a/src/tools/rustbook/subcommand.rs b/src/tools/rustbook/subcommand.rs
new file mode 100644 (file)
index 0000000..a66c2b4
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Common API for all rustbook subcommands.
+
+use error::CliResult;
+use error::CommandResult;
+use term::Term;
+
+use help;
+use build;
+use serve;
+use test;
+
+pub trait Subcommand {
+    /// Mutate the subcommand by parsing its arguments.
+    ///
+    /// Returns `Err` on a parsing error.
+    fn parse_args(&mut self, args: &[String]) -> CliResult<()>;
+    /// Print the CLI usage information.
+    fn usage(&self);
+    /// Actually execute the subcommand.
+    fn execute(&mut self, term: &mut Term) -> CommandResult<()>;
+}
+
+/// Create a Subcommand object based on its name.
+pub fn parse_name(name: &str) -> Option<Box<Subcommand>> {
+    let cmds: [fn(&str) -> Option<Box<Subcommand>>; 4] = [help::parse_cmd,
+                                                          build::parse_cmd,
+                                                          serve::parse_cmd,
+                                                          test::parse_cmd];
+    for parser in &cmds {
+        let parsed = (*parser)(name);
+        if parsed.is_some() { return parsed }
+    }
+    None
+}
diff --git a/src/tools/rustbook/term.rs b/src/tools/rustbook/term.rs
new file mode 100644 (file)
index 0000000..cdd25e6
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! An abstraction of the terminal. Eventually, provide color and
+//! verbosity support. For now, just a wrapper around stdout/stderr.
+
+use std::io;
+use std::io::prelude::*;
+use std::sync::atomic::Ordering;
+
+pub struct Term {
+    err: Box<Write + 'static>
+}
+
+impl Term {
+    pub fn new() -> Term {
+        Term {
+            err: Box::new(io::stderr())
+        }
+    }
+
+    pub fn err(&mut self, msg: &str) {
+        // swallow any errors
+        let _ = writeln!(&mut self.err, "{}", msg);
+        ::EXIT_STATUS.store(101, Ordering::SeqCst);
+    }
+}
diff --git a/src/tools/rustbook/test.rs b/src/tools/rustbook/test.rs
new file mode 100644 (file)
index 0000000..002c46a
--- /dev/null
@@ -0,0 +1,75 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of the `test` subcommand. Just a stub for now.
+
+use subcommand::Subcommand;
+use error::{err, CliResult, CommandResult};
+use term::Term;
+use book;
+
+use std::fs::File;
+use std::env;
+use std::process::Command;
+
+struct Test;
+
+pub fn parse_cmd(name: &str) -> Option<Box<Subcommand>> {
+    if name == "test" {
+        Some(Box::new(Test))
+    } else {
+        None
+    }
+}
+
+impl Subcommand for Test {
+    fn parse_args(&mut self, _: &[String]) -> CliResult<()> {
+        Ok(())
+    }
+    fn usage(&self) {}
+    fn execute(&mut self, term: &mut Term) -> CommandResult<()> {
+        let cwd = env::current_dir().unwrap();
+        let src = cwd.clone();
+
+        let mut summary = File::open(&src.join("SUMMARY.md"))?;
+        match book::parse_summary(&mut summary, &src) {
+            Ok(book) => {
+                for (_, item) in book.iter() {
+                    let output_result = Command::new("rustdoc")
+                        .arg(&item.path)
+                        .arg("--test")
+                        .output();
+                    match output_result {
+                        Ok(output) => {
+                            if !output.status.success() {
+                                term.err(&format!("{}\n{}",
+                                         String::from_utf8_lossy(&output.stdout),
+                                         String::from_utf8_lossy(&output.stderr)));
+                                return Err(err("some tests failed"));
+                            }
+
+                        }
+                        Err(e) => {
+                            let message = format!("could not execute `rustdoc`: {}", e);
+                            return Err(err(&message))
+                        }
+                    }
+                }
+            }
+            Err(errors) => {
+                for err in errors {
+                    term.err(&err[..]);
+                }
+                return Err(err("there was an error"))
+            }
+        }
+        Ok(()) // lol
+    }
+}